From 92da916ffffd5c9b8f3b83e360b251855cca81cc Mon Sep 17 00:00:00 2001 From: Chris Novakovic Date: Wed, 8 Sep 2021 11:37:45 +0100 Subject: [PATCH 01/56] mklove: make zlib test program compilable The test program that is used at compile-time to detect whether zlib is available fails to compile due to `NULL` being undefined: ``` _mkltmpyos55w.c:5:20: error: use of undeclared identifier 'NULL' z_stream *p = NULL; ^ 1 error generated. ``` This means that zlib availability is only automatically detected when using pkg-config. Import `stddef.h` (which defines `NULL`) in the test program, allowing zlib to be automatically detected via a compilation check. --- CHANGELOG.md | 1 + mklove/modules/configure.zlib | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 197bcafa9c..4a89dfc589 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ librdkafka v1.8.0 is a security release: ### General fixes + * Correctly detect presence of zlib via compilation check. (Chris Novakovic) * `ERR__ALL_BROKERS_DOWN` is no longer emitted when the coordinator connection goes down, only when all standard named brokers have been tried. This fixes the issue with `ERR__ALL_BROKERS_DOWN` being triggered on diff --git a/mklove/modules/configure.zlib b/mklove/modules/configure.zlib index 811cc032fa..9f9f4c178f 100644 --- a/mklove/modules/configure.zlib +++ b/mklove/modules/configure.zlib @@ -23,6 +23,7 @@ function manual_checks { mkl_meta_set "zlib" "static" "libz.a" mkl_lib_check "zlib" "WITH_ZLIB" $action CC "-lz" \ " +#include #include void foo (void) { From 3f837a734ddd340fa6db8e2237b05765c8530fa9 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 15 Sep 2021 19:07:01 +0200 Subject: [PATCH 02/56] Travis: New secure env vars --- .travis.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 5caeda3553..64b110bbc3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,8 +13,8 @@ addons: env: global: - - secure: "N6iA5+uNcMVvl6KWIiNMdm8MbTX1sviOG2cKBehL+t36ebQOmsi6amRhNseVHdgboALHeWqKf7N8OdMZGbkE8MkgC4mZk+oBunCAZCyBFqwFmVTYuCvsk5Zr1RFw/GbeiGHTmHQYKOol5ct18L6zQnsxV136uxDOOgVk2TizRO8=" - - secure: "XcfZ5u15+saEPVmqM6hBfrdNcZdAZgZhRwTSpTMtFagByb7kE/XtIL/n3qNoDrmgAG2cu8chWI6Tj8Jd1gQU6TjbePfoDK3hRH7gtRumw4vGwFf3bCalOCED6ekYy9qTE+Eymfax8QsH4Qs1e7duFK4d6AeIljG3M01bjnutCIY=" + - secure: "q7DQ6KCiQyMEpBf8mxPFl6hY9JEoaOUdIaLh1IuYn5TctiNIA+J6O/bL/dyDSy2Yjor61WAiiMOh77eMykm1wPl72kqjR97ui0uCq7BQQn4MWtKrXXi0eWLF3bYt2FbUGJZvrM0xeoWzSYT6np7CKu8ssgL8Fvr4bmf152IpdQ8=" + - secure: "XpFExynXwbSr6vTuGsZVyqF4sti+UmRxX2sztjpTdaIH0yo60d6KYT0SRW7BLdZNA6/XI1l1GPTAwcDwTM1XasnnFrD7i88uZsAneA/xEgZTGXtnVVWPJAcVoX/75Rxeibc8CfSc5MO9QmBMiGGuI3S6HHCj4RzCJacBhOjIhfA=" matrix: include: @@ -147,9 +147,9 @@ script: deploy: provider: s3 access_key_id: - secure: "m8FQrFesK0xSS1wHo2S7cuWkpO7VB91dBmj1XIYLRXZSkbMpKBJATcFcHNbrAp3slEp7wLAnT7CHrQ4ccQi4H68Z7mjEwdq4VKRE+7zqJ/feK8MOFNeSHWLQzgwLUYlRlc9+tzLNwxMuL2ilWgdjKOArsUVHo9LEKNfQ3T6zCJU=" + secure: "sRsKY1YoPDb3b+9hHnBv4tDSdyB/FraYEKI1/+aKmqWxvOI6xYYFFP0Tvn6f4Rgk0wzYmxO/5V+cR+fmKxVhb1pItFXOdVqML0ilOTP5gtlOPUeHu9fytqw3q7GgMV8JR75g60BNVko9vZegtd2LIq6FWzAIvPSUJOAw7qekjGU=" secret_access_key: - secure: "GE6O0gk5VRervntCKAmczfBdSOvbr9bouJ15H2rpcOgHi8KTDEjI/NS69eLiRRSHBCARtcRqN4wfgy+/dn7D1VklY8a1rAKu02wGjw+fq7k7GVSSmynR/aF619R4SIABsaAhNCwswXnLHuLlq8HFk5ulG3z8DUvYBczB45bWZfQ=" + secure: "ZDjH6Z9CJr2yo7Splm+0xpo30QbO+cpeqxFUn1d9XOyLZQ0dapr6iboxdPlJaCOIhqVUWXS0IJgFwCW+5vWb9Za6tFumP1MtJGiwE6bqr820G8E02umwSvbNijr44h+EyxQcxP71Ljjk22Pfu7SLKWqMJ/iIzcYe6Z6Sz8obSWA=" bucket: librdkafka-ci-packages region: us-west-1 skip_cleanup: true From 3cbc7a13e7927849c6c4b74f9bab4100c6731e80 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 15 Sep 2021 20:18:44 +0200 Subject: [PATCH 03/56] AppVeyor: rotate access keys --- .appveyor.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.appveyor.yml b/.appveyor.yml index 71ec26a28f..b215134155 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -86,9 +86,9 @@ after_test: deploy: - provider: S3 access_key_id: - secure: iBK0xb23FMYOrOsOb8cw3YGyU+6vvPX5BF+PXuMub8M= + secure: 3SmFFB3J1WWjLqxouvH8zLdcmrFNVHHbkROb+2BBVJE= secret_access_key: - secure: jJsj373UiOtuXf/u0LLL0Q8XQMyu4s/ucx0+vH4GpKbAfZJUwYB4dEO1//mQDNuC + secure: VT0D5uzlaJI6gfZbemKCnf0MMh6qnlcmioVADK0oCkW6syz+n17VzWScRjvAifPm region: us-west-1 bucket: librdkafka-ci-packages folder: librdkafka/p-librdkafka__bld-appveyor__plat-windows__arch-$(platform)__bldtype-$(configuration)__tag-$(APPVEYOR_REPO_TAG_NAME)__sha-$(APPVEYOR_REPO_COMMIT)__bid-$(APPVEYOR_BUILD_ID) From 33de703cdabfcb3331f1191812d479f99224b84c Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 15 Sep 2021 20:18:54 +0200 Subject: [PATCH 04/56] Travis: show sha256sums of artifacts prior to deploy --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 64b110bbc3..db2226bf89 100644 --- a/.travis.yml +++ b/.travis.yml @@ -143,6 +143,7 @@ script: - if [[ $DOC_CHECK == y ]]; then make docs || travis_terminate 1 ; fi - if [[ -z $TRAVIS_TAG && $RUN_INTEGRATION_TESTS == y ]]; then (cd tests && travis_retry ./interactive_broker_version.py -c "make quick" 2.7.0) || travis_terminate 1 ; fi - if [[ -f tests/core ]] && (which gdb >/dev/null); then (cd tests && LD_LIBRARY_PATH=../src:../src-cpp gdb ./test-runner core < backtrace.gdb) ; fi +- sha256sum artifacts/* || true deploy: provider: s3 From 9ded5eefaf3ba3b65ebc95b0dff7a6d5faaaa38d Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 15 Sep 2021 21:20:47 +0200 Subject: [PATCH 05/56] Add MSVC 140 runtimes (for packaging) --- .../msvcr140.zip | Bin 0 -> 516022 bytes .../msvcr140.zip | Bin 0 -> 621912 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip create mode 100644 packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip diff --git a/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip b/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip new file mode 100644 index 0000000000000000000000000000000000000000..1529381383c631ad6fca9bec3b7f8b456a146267 GIT binary patch literal 516022 zcmY(qWl$Z@(*_E`9YS!I;O_1Yf#43o-Q5G6;O_43?(PZh&cWR|*ugHp|66r$-48o8 zwX;3l^Yqg_yR$Pt<)NUzK|nyjLpU2EsBW~YYIcu8Ld3N~L7+eoLYO$%n>f3&am_e?vEN^F)Gxgnod8fcT&Ixv!J2>f+|3l}fR7@>x|3?R=TK6B{yi zGr1U|9qo)54U7&AM?Q*OUK8t&`fH8U)zf*MnTh&vR8GYfIHL94kj4(hLYA^dmsJU~ z#=A)_u14iY07|pHz{8vWRX5qAfjw@IPwvOo%kyi`?Cl@_^xFo0*A5Q&eHB$f%kEv@lHL)49LWD#15ySUb;VcWNky4#q4!*e~Q4v@BdTx zPMf#31#F~UTmGEoYoPCv?^{ztn=oBz%@j8ctkRfrYcM#eb|9~1b+-sdKhU;-zhVq< zE4FoV8a_BO&3^P^#Som!Uny8;5R;KvS(TSzMVU|M$!1Jy^c|{lrhdPMq6g0_Q{p5f zk^j@?ecoygBSHeC?X*S+a)eYFFH|7dW~u{fhInnUUTQFRXR5`%|bj6xzL1sVCNG=al9tc5Z+B zEv4}2w}8xEVypIQvgRpRc3~wKdcsxrhrqNbmw8$LM)ub050c!6FgtV=!mocQxr?N1 z!0WRy$kEA10tP_?@MyFWGDM9+b+jUttL`$8S z!|TgJRJs0px^k({(FovT}|XVgMR4Sx4vh7);;k zgq%-h4bUwO`o#_*j^O!^fN=nH)^3ZekOjy+<6F$SPtA{pdmdWL2S zCP;_ior3nv*5H+cOqdbiS%Yjy#=qJM7t4i>m%k}s*urTAeWH$#FS_-&w!s9UM)O1r z{%Zk%vj2wN9DM+QE7~O`9E58e@j2#zDeq69uG~uCTMX+WKda0m-};~B)rh~Z0Dm#S zY0qYw3JvK6T`gy+!^?_+xEbUfy9V{dv_gzVxF^yN^+&|uM^0Y(!Ef>NU;Ce_`Jfo` zAJfPs;HRK%jl>duy|5p#hyh67zc~I6c%gxo?rE(p3+4(yh3 z3&Yq2P>SwD419vB7&rB01%3)Cy@Lk{1dY=#4Ubs>J^H-}PU}C&72~MB*g{kPAA4So z{z}7Rkbff45!DVLB~!d!#U&wAl=+&K3MKZ5lRM07f+owJoCtvQfSp4ufwab)=~EKJ zHW3$6v?B!zqrmVN^fD{>@5B@NU>h;%c@qX9G!B`}r$zves6sKxe>@5Tu#m}xVGf~y zD0twR#Ns|(jNlYRllT8?FcY9(7%HGMgNr@`!9W5>%#lkQK8DvdK0ocdT$+4X zQi2)N*>{;cTzoRNTqY!$=D1<(&%mMtAdpe-RhlLPR=)CS&K74d(T}N&v$jVFwr6ZM z#R$qeR+UD7#_(q(0??^tCeL@xHKx?!(8>Peo*mSu3T#-3INu3U9(W_MEYi)X2LFmr zc8iQ z1>vN8Z;PoLdNCFH7uDN1#Cq#Dw4q!AzZ3iS5y2=5C!K$sM$}Y$1_QffY9BV~7lp(q zs~a{cd>sHm`O_U!=1iZig=7#C3qz-9n=%SVinHlOk4xGiPe~I|5xZ3pd1WhsMkdrFHG8zvzDw|`Xv`xE=Fs4aP+AgxiB&lDSr zt|Og#Z;MQ}q@YW0C&YE{{!o*P)qL{>UGGATU+aqM;nJMV(>@q7~0 z+O^I_CPyRqg^6v`K4W{E{kAQ|AIL)D$c4k({vzS4nhVEP2*0j4uNM4{)n4j+ zg?K(fYfPkX%1vba)x*h1Sb5$ARkM+Vp#R0&sU0hYAD>_I?izM zWk!F6#t?Nb{CS0*=5XH~>cSq6Bq7nU%W`Sd;Cs>-ZYXHam9XBQY;?z4Oa^kuNLZ3ky}IRrE7mTW z|0i(YYTJYBKBtAtL4MD4y=2LKJ>;R~cePvL=+z^ar%Joq7Pg^Jo;_5;rIZpu?;HY$ zmu-c;?)-*MuRo%uCN8ydQ~i{7zG=V4k>1?eWvzwEH=hhaNHsH?hnA|=XTj*Lmfr!1 zcF`7gl|U#z@iVEzj?7FmfA405`=5LE_uQVZVf^Zky-~m;dsjFCx4>$c5xWi|2N{kqAti@BAZ5df9Mdxb&=1$&@2<{xEZB z^yEhGO; zIJ~^}%EUJh?HX^!S6bY2YZpoSRxIu67qOa~T5Q*5*0x&4uIxZ9ZBW(zF%-g=N*_mG#?|lnc9< ztw%U^G`N38a=xVfbM=OQxaMzz+xmlIm78(MPJ_CcfBN9Dcy$*PQ^t56Ax~y8L45Pw z#rq#D^@X*Z!is@kSzWIQM`x#};rmp`px1f@s^U5(PfBO8ujTFKVSG5hb*KfhD{+_q^fCYb^T}<7U8p>+<_aK`pb- zSqX{V>vh))PLI%m4`V<#sDANk%J-&5si8*?bbFxG7U1!&iT&Yx%HB#MSn07 zc6UTv^L)=Z?Erp?@{#lRUThTz=(@I%2X@`8KA=r)>fGEh`+E$&JYPZxb=*VY+$ajfC0agG-25jeDKw#}=4sU8gnG#U(GfuZ9dMs z$Ed4md}?eIx7OkDC4bQlv|!??kXwVgBfI=B(^g45GkqS`Ls~!6gS`uo)-~kIi1k%_ z&VFCgr6Gy6Fo#w--b_cz;3#T5VK!Dv@S)*#+% z;21Fk!XtKnZM zFT0FhWN@^$^M=q5z7B=?oP!hBW~y%0L`Ca4_KNuv;ve8VD!WSA(oD`1{~vrll6)5n zMIM%O2t~t_$FZ3WMYvN!S$^_u=2i|5zl5xG%JueUq8Og>JIxC|?8U*@EB2DS$Eab@ znMfT_{^rX(5w5~*TCpam=N`rr-|}k@k4Gcs^N(Y6T!8Smi87<+7P4GL!8|XAO)X`$ z#bI1k+NYT*(2h!BCF+NJ1CTzr%d~CqW@Po>Wtc^L4ub-OjB6(nM#ZsSv&PVvRn~~B zLSiv50J#pt=k)0ESi8Z!(1PtVT^F}og%y9VoVa=3s);fIhflj%;nMyA^TUlX!fqf< zEF0fO#C?-1hVR_K{H#RD)92#*0gBfGm=y-ZAr#{XOOXogDg9jpn&yf7)|QuurJDN| zPpC3Gd-E5>Df%gQ|!{k;j@A@VFcu%_iW)uDf)2!rZ0RNYpD7NU*KNAN3w12oMTc(cA;?5&i+R z(zmb_VY-jb%ZO*8pD^gLr#w5SEI3M6*kN6^-20BS-W9^^us0EmUveGn&`7L4!Vy2O zuV-4gFJ%j86wESMk({@r-kX4UjO);hUi0m5*!M*fuJ2Lk{HK-|Y_g5hq8YoZ!r1p9Cpf3*0z)Aw1|I$`(M_i)Z5cH! zN*qeC$Ng5rFZ)*++4ZR)Kf=q}LYfc$YZQyWcNGG_f*8_MXX1gkTdKo<6xwt79TRpq z*Cy)}y?+zdi8aRW4CIn?_xaj=p@nL+XJW}NmHI+&-H(YC>m!${22Et2e8;=qMLNxS zG+J6Zt=Ke)Etq$v`w5NKGM$@v*)!UbkHL z>+D>nC3?#(nY^lb>biC`VCoJTE%uaDZ(St|cQLswSK5d~`FJ$)Nl!Wj`yvq6bXk1E zq^bnpSJ0ZdfV->xQ8hTR&%0)>MQr@&?eBMgFkYf;c~b)uB{Y+Ed6}+8Q+#ykDi4jK zd^RVyCjWgwujkjz?7q*G%MjGVVYs&a`p3r6K|jLAAFt*O1{aMh3a;n^XBQbk@i$6B z7raSZ(V!|&u|iHMvja-$zFhKnUhE3~Q=(cBE38o;d9LHatA!))gy zh9Krm>T40fO#X+=H#3xZ@mf8V#uR;^mFFo#iZEfjiYCcJ7B6audFts0K`-W>hu3RQ zl;O=yCYS&8n8W1L1!1qo-m;zct)>%sdND^3P@R|IAGd(0e`r55(-Rg7fu*{Ek*kPa zT%NkuFbiUMC38O?si;iS6?G0I(N};3neXa6B!XCh?~MzgtBfOIZ>lOE^BAf?BybAh z8D@5S^`8X6zG-G8yF}K1S17wg)Tme2+|8+ONdd|7iiUOk@^u06a;JoBDg}`w_}5^$ zumeydz!HAo z+_YDJ$ApW)H!pQ!&^o@K;n3#4cG*d!lssh01%MU-3lPV|Zjbe9S}XYWvXFuk&qxnc zHN3G2Z=kdmn+xQ@N1i6xM_OEN$F|BUmKx@s@K)~qtyHCavk3{*N|ZFPQ6|jX2Eyhl zf+YFZdhxtNB{NpS+$uE73n8uQQ7?WAJkP`mdAIB?Mc_C9A1g0~7xe;;UM*e^deR$7 z2)k{>2m9^}+pZcK{R19~Rgc^Q?l!pjuPR*)ALbIn?>og@+}lzBIgjtww@=rcDSHZ?%c*_nabkG-0)!r_Or$K02T zAHirdGqlvB_KA z@7%$vf|si?J#ejpT+KBFFFCEbmHIEo6>~WLn%?i}scJvgZ#NpJ4O#4cR=JtCdabsu zU-mX|e-iv5xQl!&YE~%*$7>`5^{>*`FYPRKnNnJwlp5wVm-g>_8d3@(hLsCP4G-yee?eSmQ+I2=e%zL+ufJXq ziiZhbwSZoxyEgCgl{D-oln#o)>yWc4I~ak6)gvQ^+~QX=jbrA#nO|3FA0e zY^Jr6t9+)bHkkdMm2&2?KOOu)804>|8bB^?h=$+Rfnl-5AgC$i`clQA=@HU1o3f{j zBAgmH4%e+ZsGc1-ZnnLK(z^*;19=>bU$cqV=1$NS;`N52$BU}RES+J)Gk?$R$mi9jq5*(d}EG7hB~%S zK1@7?&OH8|eL+>DD7IQ>u=K1}w@RgNdmY6u(%Pg4FQBh)5xN&LI{I-F9kfI42b`Zi z?;&8h{7`3GJ=voDH( zoZ2;av+d=S9ai{WIw&P5koh84T43qFK8ndEIet0No)I_5FU}L;@eiVfTRFHY%SwoN zY$xNhj7dEn6}K~Xp1S#BOR?=x600WWg@t)u6Qs)1Sapx$FW7D3Ua; zB!)a8Dvz)#)k_|wwOY*y_`P99R3|v(>1hdVBGBUi5uCQ!m{w)vzJbY+v}G>a6n*=s!yEw8ME+5{&Gdj)Ex5k2aI(J9LEttTg+Sr#($`D3N$l%*=I7oj+I?52{E=fMi zB^W-y`&a2wnq>5@v$&ugj*1cr`Z}v58Im-NRR4QL@wxV zg4bp|(srm@9FBCamza;kq)=;jVlq<9NWG4&RZIn?2?w%A!SS1D%RtsMJJ7I-4YVNv zrGqX4q$6^j(&vCMyLV~)M<;l z^}Cy?AJd!SLv`cPxq>WE-My%A3R(_rUU5i^)@_|xT?tbl-)}M1JFQaA-(KS zABs=`jIRmF;1;AFf@XVF!z0eVwyBsKU4ZP? z9b8P$pZoz^1ONJf+x8$DYt=@M*Y;UOWq&UQ-;j#7GA`cDPTLAtj^B>y8YgGL3gG}b z*{^4(qo%F4=|c=x`fvh?^((GAcaeRKpbXLd{}zMKXj6<(niT3;4b!eEQ3P0{atF*p z?$JOMW+$xJ<18be+#!_6-W&(nZNU1K244LN;c_QfHu@Zg2*5F+? zGA_HU=1*(o9A?D-=e1BY|4G;y#bZHyCPUa7bEsv-r+b1n*nE-&$BT)+MTc$t5JUIo z9D1~lYMncM5m+WWUg9~Gr~s%`B=5%mF-s9j6Ytd>x6NJrYS}UQa}qJyKK*St(8q}= z*hw|H7#~P)v>nmspc9=_iQAz%i1oLxm~oT>p*Js(9@;N7gg!lx-g28x&pbHem{+eAIzBR5j3)8hsD7g>tdA_A1Q(g{8AfnH2 zZAftXq)#^N)aEq-+ckmZgIbkap9*$?aqIhK=_-|fZ0R~4c)gMP z`1H*w?1#KjHdG6l5v=P&DSWuOtSO9PW|{aOImkC2HFK%qVkByI6nOt6G)~T@D(mG6 zD?!T~7Mu?dmGk%Wqo5ZNKO(X|bm zqnpT;dugqqG;9qO)H1~X8y&E0ju)Sqp&?-DxSjGCbGhinkpn8Vg)mmOS)+WTPaUZn zVXk`Z#030|b8BNWS*z|D{SKDm@Lel}I|xPY8Jt0@Y{@}>75{v5c{*4;TaQQEG{`-~ zADcn7<88m%KB`zA-4EW%Uz1`V+Go2?s&0_$Je{8w^tT7132HdJpBMJ7T71yP*kW!i zU;r3vML(h@Lk-}|gFqtg70)0}AF0OI9Cf^&aqF2!>7NJ5FXBWtyu17LDIKZNUgbW# z_nDA(ujRq@bz3{?cn_?^gYR(OU*5SEb2`_$7}*8DElECeW=(KslUysMfsLvriOT^q z9=N9^xhLT1&qcdlGI4?b4WR_t4wvm3u#?)VL82)HvO+&v<1pkA*JB_s%r+wF)BsLr z!zx~gqE`1#D2KWw1yUgT!6*DTZR>wf^fpd)p*0ZPzojgv9nKSDB()Ow63vXO=~6=V zwjjqEJ-Kn5uFpdGWro=Bi1YIy>)EQd^i#E0&5?;`Asovtj`%k6Xom#cH(7>Cx{AY+ zN|R{{TVqtm;ry>=jzLcb{EaLo^jq(tO_Cv00D>0+FIWw*1NF?lzedeafYN|;%RR;k zYzm_ryNG)s0|s^>@S?VZWg{KMElQ2wr&VZfkMn?mgi$spgNjWsK21K=7kZ~68& zN*Q;}!*FhC_96B$!`}F@>%YvQcp=Bvf?J=vg3I6d&zJ<#>h*=Q^b3apHcT) z<8G$4B`{0{P5D2q6A!Bfk{^omkxh$i1*$R88r(iTmI`(E9_{c7x95Y;UVQB7Qh<>e z24^iw)zzJai@iN9c`el~?$NEo7mwa+*8GSLw?1DMY`S6oa4#<1Qcu12Em%F!E|fmu zFBQRd-bQ7xl{V1zdMjf5SWUPU_-Xg5T{yP5o-&tG%G?>$g21;fknT{hMfI}k%2j%v zTH88VVwbQ*m6~1EsQ%aN!aMVv8>MCA?w5@o{YON<6YU}Y*tcHr=y@yIM@Lc)^M@j1 zE5Zx&ID)Qj=;^=5_v|IihsjKC?kcPOtY*>bWs}P0$Z}t)5qcrG<|ZrJJ(oX59l|Uh#WxPbip2U7B0=j|QvJ9D@G4-xy`8`niZn zNtKnVe_YvnXwNP1;hB3*?YNchqIvynnTzfNNPvHVW3bl6^`wCHMK1_d}1L2(I2=aENtaQ4|r+g56LuSk6$UN zi}@w`GBNrqC#ooC3??{-FV!lOiG>Z*b3O4r30X`YF~^MmE_}Qe=~saiSc&jvti=U@MNwyb^7Jv+w4~%Q)U{7+;pi&vGI@wEW$sNwc-nWXca7^Uy zsyq(e-#9DZm~GQ2LsITHbJia_5`-U?PP_fIecWDQltZWLs;(@T*9hZRyrfbbAG&k4 zr(ra01d`62l$ze6+*TK*9gReO+e_!t`w6;hEHfIviqCdi_4LeT)c`C%CJu^u!x57o zv<1MiujTadSXvTkVq;KE>tHVHT|ws_!U>B-6+^Ldf`ZCN_hwUz=2-rp8G~2_JtA8` zaedW>wdo7`$@k-#@3S>!&8-~N32p@lwea~IqVE$+0V(YLhIz|XYd%UGaeE4{PcGO_ z`<9}viw46sHmk|4s4=}B(2sMwLndQYCj?zbi+#Mhku~QLqdwy?wQKzDi^mrlvhLIB zGQy3`lsJR1u8p;#zTew9Ij8C$vWD&%VkfNnuhKap% zTw2iebN0Si8_#h#OT-H}Gv2n?qvYXyJ=@8rL`xz}#nBdfK%yHFGF05{jx<`yZNpbD zQ%9})$96}T$)PT!`%B|sTUOnkkwq=tFI?{cjXLk2Ti|HPu)$B!)E{CsXk+sAQN2rH_8@`Wm^yy5o-f z71db%L-6K2?Rq~^?7Rx_%<_RAG^|9+`#s&^@{4(O1xa6B-rat-;{|z_Z*v5LAO4E^ zy-ECBrWL*SPZ2eHY-KCE5&+KtXjUo$Kj$G6kU0KcVMNf-rC`C^-sNaKg^9CZ1}5774K@4#^74f zLAGgovFB@ft0{!XWXJiI`illA=H>WtPnHC8uuJLNR3dcA2z4!@BjpQkg{Seb|;Ku zGaHQ93+1fDJ{OXDZ%@SgF1RuW>~8uvh>XNAxF^i*sUP1g*51+)nN_+lsnpIbnXST2 zRrTn|z?gK;`*2%4+K;GZ1z(m84WH`M+@~DgZ*h{{OrIUR`@f1U#Pq0Y8?^r%Z@9<^ z7{091Ko)xtW~pxkrl}^Jjj=2u|*~_b)Cp38GV86PH(c z9QfgJ>)^Dzk}TdOpAAAY;WARELx)KJ@vm#|t4IlbmxlPGFY$>pxL35p<4$GoId=7* zXUma`fRf6Hi^H(YRf#XMtQSa#HyuiysM%vm#Kus`6iCP20V(iL9DkYdW*|Mw)fqL* z>zqxEp=0R~QQ@Xd>m#)3EH<{Ha7W^Bd0b9Z*092P3sKJQCqaCdDGfPci2^!5tVZCHz^Hyp4LHd7&p~F zN&qhD?L*ef)~n|oJ52z~-Im~^&s6v4{>5qDN6PxfJ~q!BijzZ9tFtCR5zE6H&G`k* z`S)_#nX&Q3F5x>2k7s7(ci^q+xd-`=z_7# z8xO5II~V4Fm9zx6M0WizORWWLqKwJ5ke^@GU1E*w1{(Z1EQzJ^n=yW$%J~qv zL-_}udL9L$uI3r+e0jJ~s|`3~qPcQv^OY7Cv$KG8ZmqQGd z4R=;lLVT?=5-!ZRt#+VL`e(a%0HcDXJVU;pJorM8KYZ1IFN7$Ar3RP>T{!R+H&JHW z7g8T1P}>Diq*Q4+9itq}oL`ML`_4sWE+9t8L3Zh>fG`SW;e75nQ32haf1|Me2Q2*A zc0;*RM{g~V?S*_%J!>mae?tdd;PVEH=uLqQGp&xsZzqBgg~oZAV$3YbD*8Za-MX z78nr(qFdO9RLf)Tw+M3g{lNW_*>eSyn@o&xhofT3*Ot>K)-w&vaV6B7b*qKDb&IGS z&7`ngdoBFN#%|V$Tk#17h%MrnN2ddC+LE$Yyz#U(lq?`cP42AmK_ z%l$sRkh=FbE^Qw?OTCP!LMO==+xwG&RIIs}sn_TeYtXZ!{KrWGoEYdJ9c$}67{*w; zpDic_Evz4LZrKyz!aMCSsNm!j!Lryy7WnZyTv%&7{=r+8!jZL^8oL-6J1dQ?XaSs%{FS7pA3m<+e?SG=xx@P9O_+u3y?BwE%eC zMjySbt^aD^XX8PVg@CMw*h&jIVGEI3%<>C6Mt?Vrg1v%bOxEwN@%V8keT!1ZK?O?v zpgW=<#E+3*BGS2xq09U;M2(jstt0G^ZRUv}+>%@KsK;ZXqZ^r_zPK$!!@n32y}!QG zWiJqC)#Ha<>0uWSb(m))P8=ctVYdt;(QqC6@@N2PH}f}$m2QjW-e#Oif}zb#)GI-*Gjf>BW&m}isQ$t5gtG( z$E;dzPMBuS6t|04I#Spr<5v1Y5JW13jH2`Ie!rs z^k&JGbRjYKfB6o@4YgN4{s;Nw&g^^;lz8BFU#J>AS>vo6{oZl?r;ln{2U_*kaHFm<=o)#-m5 z?Y0^2WVjV=iN2)F{^I(R;$@=Pr}J0h@BB2l{^?dte(Wc-4V>MF7g@SiLnW1VL2lUt zjY_A^3w|db#&04f*Gkel={c7Lk~_EWiIJ!JNl1_mk8q#H zqW>`IWQANt;4jS+os98iN#(iB6--szB$3levM}mI(EW%I#v?CE!y|(M!?}VzKJVu` z)NT^4gWPD2t&+AoEErzF}DdR2wR*vN*=SatbB6=scLN|>Y+7WVsP>}_t z&!<3b@S{Kl!vfiM3Rh@X{Mgdy_azZgAVxg_vq98Gw7SWCr<2xonu-GSM-%FM*GUE! z{|R&_nl%!nqzT{VGoI9{louuy+j07|#9XMjZ*#}~o8VAY&7R$L zL%^wBGP`Se0;Q3Lt~I6+zjc0$G1zIm*icWSJ-R!?)E2i^4QoD1UWPy0TdEL$(c^-@ zl14{0dr7U7vm5%y;3jPvGT9_jv)_$%O$YOYCJIB5@}KDiK&_5gBy*+tbtyv;i}Z_h zxZ-g}+xslsyX%C;%prrPmi`(W1RG(=d^)mX#{mJ=plzWkQrHj3sYLONFWX#ZQhbWi zeNGt`C9SVt35!9vPT2a50%>-6hSA|k7BA51#FzN{UZ)62r7ZD#OPsr^lI>jTbf|WT zp;MNXWHTtKyE-vp0nWTv+`rmR^O6heY4V)B*I#h2?^y6fYQ7B}2r{thQfOY5*7hM> z4Kb_l-I&`7Z^jRPrDqg}RlxbRuS~v-@K267G`q4hCSgw2M{vmc{PG{&@~zz3m!gR7 zCS&fy-+(|0^lzsKLG?Cid88>2IwW)u#hfOcc@7SxM1_6G<+xF&unFNmX94gMtx~g! zl`3}7lPret) z7#|yTVoP)s_}ARu`A_!cVWVJ1-x!c`7r|lc%mfSIFg-B)wPT)Fla!!}9iEgOfQrPC zlV`RmPw-A}d5r%A$c#5&l?(t`+Npy}LnIwgcbG}A{fyv0VoX7vs`n7kH!Emk(TXhH z;bXy|NLSD(oa!ty(p;>$X00MvEQksJZA>K0U$D?HJIh~+lu{`taZUAK<4@__#038z zX+8b2(|*|z?{P8y9&+Hn-T60sE1tv9`W)bhE%csM*=*Vy0#}e~;@qG=ft-CVhHUo- zXuscM%SIyjw8){ycDqBGJoM({XHwaZZ_K)$KVGlDs!!MQ4P6b#hD@U!?X?DuJ&)nZ za0cX_9Z%~kNCrgO_jr^R8CDbm;}YJV>w2~wl04?t{!^$_a8gQHn!A8g;9v&!%onD8 z+hBR)lLF>sy|-I}4XH}6(B^A@f#ehCfdOO^wK{I{&ft3-u{-A9Z}K99B_F{t?@xc! zKqR3Df7LGxKL_q-5b*8obRQ&)IG>68cD@e^9R&S}65{858;Z=oNeO&^EXKbYdb6>lR=RcDT@6+-o5=WQ=nb{kJ}6mt+^zrs?>T&N0iog&X%t-I&aVl~2EP z^nPafQov1Gf%-9zku$N~GI60(_UBTCMD|KyDOl2(jJeI<`kBInaCZ(dA%wM-3q0BT zn;mNUD+4?vM2mHL*PIui_Sko5j+ziv)^`1*Q0ZB(b_Dj4_^teE0~?=Ci;(|L$-C$0 zY$`Px3$FjH)*Iy?6YW^&cZz)x0FLt2kU7#f%9aQnM8*f%g%P^W7)zZB_U}Tqlt^j3 z!E`&zn*UIv=4RhVL1B2mEbSyqvOTuA@#qbHA3#6It$57!N404i^XuYBjcxxhx;Rs9 zYAv=DPAL$KEAaD~b1#)Q`1{h%D?(YjCvG;-@BR~kf^|pmct8VkIbs2qQAlg3eqTtG zVMic@?b>;m8F@>nIVo=z*{9)96IQVSWwH55fXWLqV-vq2S5dH-X448gSH}w*Q<<#S z`DdwfiK15THkwgUxU zM?n280i4 z&M!MvR$Jk9olRm&a15(qdhWD>j>www9=&o~%};XXc*{}Lk4Tmka`ul=5`OhlY&9L| zb_HZPOIFa~-WnI7%_Cz`^gFG2Op?F0A3~lVV}8f(t&hPIc|j`Xco>- zAzxC1sm{udq{agi=!~MnP0@SQcezt_D*@_$lopVJ&}wdTg7)PGWrjolj&IjWdLWG6 zaaP_Ex-b&;P(ROHb>DKO2}Fg=8pLDWxjn$IQ4{y;loMlL!igFzh$zB|x|c%m`s0dR zYKW=@hd@bu5B955yEzOpQ|~fwMvoz$I@E$`ny6tdj(+-Frz4oX8XX=)Cpj~_M^u_S zn51ekyUz^&Q*v16<$LH_dTk8_zV&MMobLV}&H@Ccx&cdwJ)+A8BSQI}~>vouc9~`8JwaW{o%Ns;7RZ` zaG4rP+h_he!FMj}=6lZ?L!fKmoq6P>3lE!I#ra?Ff9|5Tr#tW6Q2DjQrp5luu6Rwk zYscRew@$xlq?^7Q&b1$=Zf_FA^4Y#hF09nvE$}{!I&uZ6Dx3INgH#>M6(?dG&S^joBm~p=Ds9P& zO}7>kKZxp0?f(a1+sU3TbR(tp%xat$%0r}ig}KmMp9g14nfe4lxH7#7C3)q@llrgv z@b{3zGcNpVY0oJ`SPqRiO>daPJJ`jGsI#gsGsN&IC$}yWHI*-&b|2X%@!z%VdfUZ6 z1T1HT=kdT6stb4g?Dx|sVYNAAmeakkz#b#}nVeb@2%nf?j#KNyM1&>!sj5<6?M^>a ztTbh5Enht$ErcfCQKyitH)QTSr0Oph06P_B8ch`y&5>5uqei?M{nq}LV&*|2f312ekp~Gea<4Jp~*}|0=9_S6h|XLy9*IwB~u- zu}!UO)()f5rF)N@$n^5&(R}xD{5%klZ$gjowjO0LGjH;P5RggIhCexH8yK^JeOvgs zY)SDnV-W-JN8C3O%@|!?8ZtfU6*}qZ*9=&MBajOVeg-dDPW0+gnD2qOE;Z|>^mBLl z2Hov=lkcAbeiK=9sN#sDk$R^@7(gFimt^9k!tt~lmq1+!ZJmhh1tTgGm=J$a;Z&N_ z@38s8(UUBmhw&o&yXVn^B~Wv|tMI@&UpaqNqEAP6fi3R7C{__NU1TW4jbTJc{&Tld zR#GD>xD%;vP4vbgn&N%QqF@;4&lB@x(i{0?-u<*kF>L5A#oQxF?XQ$e#^=wT;8EQO zedWm(ISTXL#mlC6d$R9Vb`cIW^_=ZvWh|f0(>#6^t0svg7lxg(LGZ z@Bvr!IDX=8BI)0ASd_YOx4#LE2@D&fR673My8ZAbXqFnbx{HuIsq|m?k!7mVi|qKJ zaRS}}e_RPf+;+=vs31BY(aO2x=DWWUzJ4&@yN8i=xUUjFcc?n|%sfR-5>MSZ*gB>; znCy&{_+0;Dg2Guc(4#`d??N(r+4wz>|8m+~zI-KMcL7nJIX;A1*w|m6$lfoT3OP~> zzj1e{a^FNnUvlQZJu8!1vrpP`rHDool)}1JF*&{ZNWOf2c{d9_ciovz6H1C@NRYsv z1-0Lt3_{C?K!rV$IKFEjHq~Nw`;v7S^p@F?)f8QrG(}?Pz9M6n=)(R&{osMJxb|LM zy7_1l^zp?sYe{ssgn}ku5rNeI>6=&QQC}{@xTUL;7&MolVtG=GraxQV{_dVn32%&n z3}eyDU!dvamp6uZ8c_%HZJfV&rbNX{Z3Yob`KPe$B8^@jC!f#M<0K`!EaI_4Ye1jr zVW!K}BOlO9Dbb{LOl|SIz}L5Q!7`BeUa_TTQ>%G-i}cmx%~!(oZ^SgeqH2FDu0<=K zW1E&KWvhNe-^ZMLU3g&~t6`}mqO}+3wTLwu>B(_yr%KeHyLH?T>@Yla>{V+y9qoBP zOM5FI9_`DO5j_7f4NWKzDNxFB*aSr;026!6PyFYg2R_9|8owO%rf7~wrzoZv=42q) zC<%k9+ag%LvTPzs8ZkzlvipKqx$ni&Xn^t}Ca^2DO8M+^>E^El%Y5;VWYGN{ zXp$elq2MS5uwOAArqL-*JO~LS+0Vp#I?9LP?ymVm{9L~%D2h}df;J#Zt ziu1>9b7G0CU#Z6YKLAHSxWC|hQ#lp-)TVBFe`MQxj63;wWGB1iT-e7h`e#4vV=msn zb|!6f&uK-mH_Jlz$5o~<- zEkJ|pcp0^97u+}KBgjy)Pow_uGQAc4r&lB7 zNInIcj9l%goLN+3QFc2s7BSJ5h4761h$(ptA=>99Zwt%MHp}@lkQB*T3cEpa-UUru zG_f6#1)7|!b_#9$9&oN}V+&jZ7;K9GJakcj8;%+PdJ$lm5#S0Pz@QOeB?mZ}0i4PJ z-mL*FWdN^1fEFXbd>z2$27r&(3Y-B*ia&YO13JUG8sf8G>)+2wg-R?nLo_+ib?8v}PhqVqzuutT$V zh7XhUz52uC>n{vAG=Ba24`AXxl6~RxWY#-;x|l$eLvq-$H8LXSTR6(S^iyCEAao(q z8K&oP2;`4-FqMZ7GpQ@gmRG&2XUk>r>1jR8=?R}7CpKLoJp~t_=NNQ~I-=;Y8R&WM za|1oVSbaO0!Hzq2qOIxW6*2Z!@}w%u59B7{Nih+Kx^;o@;VAT85^(s#=snW#Trv5c zoG&VNn`qB1EZPt{o(~;&!My}}ZZmVb29-OfL zRx!3-&tu-zxMux&U3oHQy~bbfsyPmh!vYkYyvfY|-!Xo{BU1(M&1B+aICy)>IY7*p zpsRv-b`@EI+q#Hpms82ULUo##;$0Y+jzkd4E0`gnvQ%{?ObF=pz;2wjUyA<^Vf?|k z_=8>JZ)wuwuQ?GH|9CzAu?+uj4gLRs;UB=xalm~=7&luG@$gPI;O%uHT3_J$7)wfn zbABfAT58T}6jZtsre#!!o&qv)4AQ`K)q6M%w~?e`CZ?k6U`E)v4;xL&wfG*f2%dg& zuDB%C_oE3o(nK3QFar<$4zoLPDyOsDNa&hIJ)vl&P{(Hotm;OGA{5Jc$7LV0RnvwI zu-~^b%d`hw$MD<)y0+v)+cmVKts4blLA4^d6Dqt;QVlvB3e~cY*{RVvurES1TA6(d zxW?X(B-)i?y}=roLr0=Jv7($b@u4-2AI2gBeEZP?G)pJP4hcZ7LFFPZZN+?|knz|Jf2zJUC zjO{SwfN-ih+EM#aGdMqb0Qu>Ht;^Yfc7xgJm@oStHmMH7DeF0R)f$*FBt*&W7zG`V zbeXtB*~k2}lXRkbu;#3%XQK~aG}&3_E4U!nM*m|2y2QPGNY5fN(=gzqiEiI-65@@I z%5PGEJ=3<`dVX`$@fd!iofm)I&(90hU(-T2)~e^lR>r-(mlL)C$X>~ZJ{tP;C&2Y` zuu74^nvCa2$BlR_)PgElsBFZy$>`W1a%i!km7A9rw{r7xGvb}M9&;o1X@&;YpH$7e zD(6_G>hqH0Oi&TX(m7n?814(8+AtQ>25vCo$)+}zY^uYG*`_3yQOrwbG&1*#$zTje z@~>wpWd~6TEUXE+@uZ&$gr@xqED`~^lo?@Uo~f#hSwfO0G` z!+Je3fs%tbtZ+KVqw5SUXLOyRX&PNoNF(c`7Ptzn+ofM0y^ofR@#VpNObUfIu8NjTZHz3 zaRz)VKaMZ2G5aM9h^K<&lorJ`k(P7wz!^HcN>f~KCpl<-y#lx#niFl@T#AdA&i)@k zV7nXNnCpc%Cg@e&Gt^bDXRX~MbZPtJ(UvHC!BcDX`gk5_;+_;-5HHNs=($r>%*PlM z^JJ!y&xldUed{ndY_Hz(OzCF7UbL!ekJZ36`aatmWiL4WW_PN@sf4j>pr_vwVHuZdX<%jsTq?KDQkM@~D#wl(=LGSw5GiiLwWy0x)ZPwN>EC|& zNc4MHX5(XbL@xaeb!)4*GLV8d^rHyr_z z*O^f=%-aplAT^Q{mg!5!G;>P}4ZOkDj}5gZs$Zh8e*Ww@O>MlRXe8%Fq)hZ~)ZBNZ zGVgzER2pwt$y%1Nme#IX!m_4m)(bow)|)>x1gE$$U4O%8sOs`ogSu?Bjkk(jpPjfq z=WNrj&x0RcgrBEhh@X2>uvUb+U-+a>GdQZ&Y&FQgAL{L;pEXAD=hLsL{IARcbK_u2 z;%`-smmXDqu`e1}@>;MSRIZu+@VbC&imiaJ;X2JvCp8-R-c?)meDBByvG(OjjZysg zo?lJR{W^NeI6b@e80mR(i=Li4boA);>3$@e4en*qlgQG7O13q|UTNfCY}52m=On4t zq;auPf7ElaBZo~<^2Md=i?=k#>Z_%frmrsFtX~dO4*x2B^_@VhzFK-|`sx)1{D%+y zDt+~qfKgu^a&diiRh=Gh*`a8CmF+j+%Qih~3&1fzDK|jruR-~x`r3{JV3Ge~eGPzv zww8t3nv0~6nXU_HYtStNPEB8H-E~2IZS^bv7JY60DxJROiqhA5?TW9jq5s+~xE0w6 z>!IOw=8l7{_n^I1ejNP(%$o;ea$m9LFfD!!i&2H6f3<>COaBHc87o)i+ifeVbUmwc zrKS*8%__b(HJSFEjNa5*Qw6dEeNebtkjFCr!i}7;=mcDNYPgl=H@2vas{gF6&$%AK z!>Yp$KcXni+S;Nr`Rg|6Pn1Uvnqu@No&UB=vtXR|sa>=;I&d!rQO#e8GMXpR-K&{l zTh14;SF_Hkaj)U_!Kn668`|eYwbvWXlW3)(ZU1oFaQ{F_lMBrDv!n^#MVU>o05Pp1 zPFgYUk84+>|8pnP_j5!y-w=<(+TrR#r<@lgQ`$2f$7dzVmPawYAYHBw^50_~CI7`d zwXzRK;9MKoH^@Je;Wyf<|?$a{}%(C^Mq-|NzzG-*5bkDWt>a!xuEf$n+< z?HZ+F?Lah$fOraW55%`Q!ZZ}FO=}rkptsN-`Jwp{SEWKC3^l}U`DohBr;>$P(;|d9Z2k{jKp|& zgDZ^E|KHAOYTnLiI)FeiGlE{LCn7xmTcYRxQt&^}HGZ#(f4m<5eLDphx45bQ}Bcu7$*=8yB9=JC`4y5xC`+Lfw9G%>)fF{^=E zI$?A4=mPxFb*~`&WeA@s=C`6F+8A?`$08McesclHxGwt13N7S_1nF-uQooUk2)i=t zYFyFVX#Lvoir(g;4>d=YT_u)MsymM;uSS%47E)%F=L6Md&l-mf?~`pi0n zjA-ZwwRp+;mV@Wr=67%?OR)&BjJ=?<-qc2u_0~mi2*7|A6@$dnICiQjUWJkH{8svb zfV$gW#Um5o63|f`HXQPr34pNXY|5h50(z}*z%9Npiv`a-Hri)o*@YC1Sk2Ehxu#M< z_bU6m!|nLwx|jKAq{=leJ{l$$EqD=DgN&eIOe7PL$6+pvxRAf28xFc`E%ut>$iow_ zVN;~n>3Fw#O}N+iyJL|0Fbf~v6?uriU9?}5IwV8l4F^6*jXO?ctcV{cshfb6TGeDu zHOWm%^7#kX({8$Cx6s=#gqQj12TM+|{#3X6PZJ8@{HH4#nZH@hdUlI-58`;IY%=kh z&S(vo2(q6=O12-B21q3$J;_2qZ1!d;7gQ-LOR1B}LQ;hda7h)Z9G#5l@~nua&?V(s z=qMFY6I}q0c3A~wrbuVNWeL*mRw|!wk|_=I(>slFqFyC&Wr->iw-a@cC_z) zhtp-ZAN4g3uK%VRXiQhKlAvn?QN7bv4hfc^=WP6BCQ z-+D-voVJW|o43DIMw=X+{w1(}@{ZHTo&)|_Zu6X`H@?LauRw*ybO`4V62R!U)j+Y^ zB$wlG1iET9ZImY>Ze}VMV39hV`#E;y6H$Z{MhfSqEXt zSmq2cp1vg4n&`B+u=`HYrHrCgKLI~e{HebJ$GVVRFM$I|xj2fH7--5;ioPuxJ9TbP92Bv&MN!UWb171uj6zpv_;R^zWo zS?1S$T){WrCV0Ei8(=;bR9ir$C;8TnM3QeKW3A*M&0TYl--{ND3&ClGYu1Gnj9g%)PwZJ zXRu~=7C!TSTi)DYs9F8QaI9H9-B7bS?0-r5yTejMZWe&4;Q4U1Y)Q%tR1GRn`W~8@ zXyuLrUeojW$Pv0NG#;(0>Lq5qZbyEXqueLVXz7dv_=GeSz=0|={3C#0-Igq$&> z3uwWd>4G;Ehd~)tOxOBN{Buco+w(Qb!Db=M#9dNSqf->^Fq*>c={E&$- zUR{4bU9YIp3EWNl3(Ua%>mpoJO(n);*(3nmk2pD!%wV~P(d7a51L z<5dgiSQZ=?tlnh$r6>p;$!-&Em~Rr!whi?E>|f;9((g7E2}0!qjq0`^T5>v7;eS@=&T-4BX%X3&!cI52c7(KC!IohNAy3{^3vXvq^Tv_}Zf2`YfZ;5|t< zY(vlR|GpVJ>S(2BRrZPumC3^;CyJDrGsw&bvJG}wb{Wj<~ zdEf5m_FL~nKoTWEx)V!abc)T`>&ymE0%`O8nBcuuelWd=p5KZCd@p(>8gr+c=r{0N z&V_yX(^e+Lm6QDy(@x!wInN1Gk*0-n`P?U!ok|i=fWi+gPGR*L)7IFUX_jAq+1Dd7z~NqOwF?ay<1gSZ0Q~p{9Q6M} zi?mUv^R+C}%#T@^a6aq;JXRq)py>=OR|W$0;X zx0NZx2^2VisSu~zJDv1@FnOKW1NYfPa*w#^t3NPswgU;MB5b%V>mE$Y+?KTp|Hx}S z^4c|Uu;jI~xniOZLi-&4wSa z6MtQd4;_ZdyXE>>=$`I~0|pqsWb$(T9Q6zCikG$h!GO9miTAOc`5*_U_EC8JZl~3B5RAap# zsnB}Xjn89}f>E9^z$@4LjP(cMOvE8|&X^#c{wR4?s4l$5l|zfC7GDh94QrOpoe`SmhBf0kv52sZgSE#JxTG}r0LN`u zTY!;RSX0D{dt6BWo&xA<0`anq&RXuI-=IH(Rs?G*c`5XV2S*e-JyeM8BkaW_;>BHY z($|7m6`#`WcoXXJe389v>FOm&S1-XDkGy6LO^*+ao9tJity5kj&|l+~_~#3FCA`Vl zQ1qG5t*B3Coy>nb6a>bNbl>6^*dZ($zz92IFsj+lc}^Y;5cQCB8{m6+qCW;n8=#k5 ze2B#$I_n`A_)UoB0BP}Qvpl0o2WbU>dD1_%{t#4~;n&NRytIbrM7>{J2mmu|SAUf|{vmE7J$YY3{ljXzc z*8eOvQC9P9I-EIm!f;B?7oUuz|Ne8lWMIvCoZ8h~$$t}9z2JhmJ4lVdLg;1lz}QJX zp!~Q09!?5y;^SnXCB+!{D(A2$314e3jB*Y-rB5L1RcrAP%=7tivN2rMc-d*tBx5S2 zJZ!#m__wj&}Dx6=1G9{Z_bi%VN81Q&vZ2Fv6V*y|F zM=cicbYP=d&b}D7Kif~j{;-tEQ18^U|5aFEHCN5eaDm8F;uJ{Bye6uj-fU|YFIl-) zFMS?M<~)dTcJT9NZAwJ#@aq9Gd}8T z!Q93p`p5Yk1z4KJlLuyDVsYRq5^ZBsmg`NPONhQ8a?{bQA&YO@#LD46Qk!|Bf=+3b zABCn?q{dfc{0#bmn==dLc;8q0%=&hU!8?a>g_^vE)BqD^ zbEJDxNQXB^2lVndSmJMsF+$x+pI&VO+EQCVFSt!U&&R5D(=a9ePmE{VPmISjay;r9 zq6l+T5@u0T4>l>_5lY%V@x;ih5tN4Og!O6k;J1;Yj6ZEh?kcrW_aaaxS3RRrumlG!fh8IuErR{C@eWBnbK5Akw44%_)7e4_Cm)K2B-{|Gq z6QtGzh(AZJ?VH{~_9V3fSqwysy=U_7vj?Z=2IO37^~t3J>15C09+DfV&)_i-;||@z zg7c$@qk!{ez*SpY=z^zpESFtMqZLQp`LrsZ?jyMdYZoapNuLKvlbmaS8$eEzg~Gh) zfOe>OCFbcZ9l$V1Lq6NNCWIIU5|GPyd8#)m{>ODHLGQU-PVrkX?!oeZg%|vYv-gkC ztb8*Rm1_h#@d=mOWUIH zHt+z0ybhF?JkEqj-aLck8-=u(la6Ca76hu?&(Qy9!tReO_%Bs%#BV?v`jM?+j|;G5 zH|E;wDlDWA{zXOJ?B2rQ(s|pebS7!IVr8qo>03PoH&f6z zjqtyurygb*46gf$d~8bO>kH*$dy^Fqt~)Ou+J8>Fu-f7+;BBV>=zeeYxn6nY6}iAd z7xoRue&rX4<{$Fc)cie8lBH(3wBLURahP+QG-V8C^(7r8*+9*}+5KAjdeHVmWh^-$ z=LUgy+AEg^iNhkz-gdijvjeGH7wn`Z>0r_UQafOLJ=d^MZ)hyQEmle0A``I(QAUbV z{hHrFFNLTEC^OT)jmD|biP&QoKv*D7OZ$zRzkz`lyXbBM65bYIBWE{q;#lBR;UPbU zV~&Kmf;WRt%dm)}Bt>tO-ogdRfI&6FppfCJS3CymA#xz|@E1_luXjf+A@=^H7MPQ* zqdJe9pR@*I(u^ksG@in=+XjfpOC<%y*ycO@|g0F+g9zzG1E?7|!kS0-bGv{T-bS>^j+hxW~F1 zSbNd|j{r8=DF%>)(`sw0DzW;Y38`n$__|%t10b?@0%NXkwzXA8lTVY1C>&mOjm(PJ zIdl+m`%s^}+ap*HWxyeWi&1m}Scu#{(hTj}_EtVeYWqn`Q%@8OFQ|w|Xp%Y!>u%`$ za#9o=*)hL01FH2RytW6VyN+?O!qMn6{ z=N&%sww08?;UAq1$QRC=Hn2f#6$aJ=H~D=Wn7!K;tV%=WSerb908$GA)zE2 z5iUxxm*425_W~~ubbc|t^CiXw)OP{B3RibA#x!Q<)-+_#s)pV=Nfw*oP?zx#~{XalFOVOX0G&2`DYXYd;nx zoO?+}QZC#-v+W?<(u2dU9fLPR7ZojEf%DpB$jrt?fF9ry;oU!6`6%2vmwM{}%UT+p zh-E9EP_b+`o;fs_#NigxbukRucUOIyEk?B3&WYn$>%oPz zJen<=e=@-NHhVu(7Epv)Q8ljxTKYB)s3~BPZ@azv2B|g(tgSn0DY2WA4#+9QZjl8% zmj`S|YocJ!?Qyc;<ni%Hlcg)&pGi^VT!aU2TwLS-gBh1~ek)2GUZ*sj}EfIyDj#Y0HoAgL+m^ zKY>x^25bkaKgNhE!|xCCzihI0xw4?0V##MFr*Yc=bQ|(qateJClbZz*gTN*Q0gIsi z|4+D2xj{g5cD5xYmNfHwuotbO5q7e;fc{;t?!*+iD7+I;Vmxle%`hry_IsS?6xN>o z9w39Hfbn%v(5JqK2ZQOwsqO_Np$(0{!fP`BG4+~Ipd4(arkX_z&kRPf%aRX$71I>9 z_u&Xg0T|ocl+E|kp;^x*S95q~0d9>`ia4?(%gY&&b*yc2~3+NHV?C1f-oEgK3=w&s$_ zyXB&OYZs8~?O6;n!`V+do%93n{~Ha=<}^seR8)>`13Vr$xJSHvrTHkeFKdTZ$m zFT6m`!R3R$UXemM_~N&1s2$`ym%^M3!xvc=d7i~@ zAoDCx7K+L9Eigej(+mi%T+T8N2^uv0IpsVCiRwIY_%YTWTM-8*(SIqDzs7P13IpxbSTy+ip1z?;E`T!5BdN8QDZ*r0`o>m2}kr%J}D_F z16C%gw1vvgYNd(r8ypiub%&b%Z5|tXf_n81gThne9$7CUC z1SO4ry|pWg7!QA0xDZLUdZtMP*z1lWb1kxNF+H|T$F*pL6kJ-U94Ct>+U!&XUftAG z4Z!PW0A57`vG>`osIrm!EM!vsA@OoqJknATv+<|nufD1%{K&=3Tjnu}w z^@}mI?akt4ve@5d531AeqvJz!0vOrG!@+=30N#rP?|nQufKp3d=65)xL4D;Zso4Su zD};Yy3+XQCuOyt1q1K?Uykv-Ti7lW@aXU&|!s=)sWW0I2t;zUU^E~N`FQw*q*qJ;a zh&FPb=yY3i<*71O8XDPnjx%vwFDwiZsf(GT!`|1IM{V@z|NNmWKyK+SdCd*=mb!pqw_9jtR_81F5oXFO=C7be8>}t*DEpR2%3f_@z{v=sLnQHR|Iy`YZN0p5CQB z26{zW%X4*!BnyZZ^c3={8y6N z0ESSq)%LB6LDhl6k!ERkXf*yCt-Fb9w#_xGb`)`>%7$%3hY)+rD3a6%YZZ zka)os9gDjc@UC%svw39C+F{qJ>sOjH{LSb=uo?HN8a0dSr8)~I-a}kRFpP7DC#&8k znn)(!)WfserHE7fc_fE#`kZe%$4loFPkQ*Rt*OomKvcSSw&EG%Tu4{NG9}OzR-ZS| zlDl$-{TWfU1lck0m3KkT5Y+LI%h{K_~Lhm;BDIjaKS#4tz{70jxI zRXA^#tJwPv!e9S*k|)s4`IvGH0QV`yK7cv84mjz(C_M?lv*Y9dy_?nF-Qo@rR*&6@ z^eZCRYhQwG73Hx%KI;hoa*|<4motXk(n*F%VUJ893j{;X$oF&ckx5ICquj)ts`Yrl zooc3#r^av`GP{?PB18QlShyTP9_=G6#^E)*#@1bvw|#2K%|=ntTj>XKY=s{V;a zKs#o$VZv zk4tDF8SCJG3kE$R8_@NjWoT83$-Neq&(kZ;&$; zx|}rjE=9rE)F(JnVAG#GWu{1S7okde9)V$_@0X!17NLM#%yi$-)%3$Me-Ksf*hj8F ziUn!6&acO>Fs#EsnxGjoLQf6iz5BO6#BH?h zHonczur~j);}IRF%aG3TmEV$P>!Guvp^P*$o?ayq*017>5B>>vrPPpCUSWLbcgV=C zpRE`-VH{(UU17_fZS+3RzNbR-wt#pANe`j&PVo{Ms^x^PZ?3 zy0Wb;OEn)V_>>N62t&STCL4nHtiMH{zm%#Zn-$cUSMio|WN@A7k z@z4a;4$$dp?N~KNX$xub2fdj(ITn8O{z2f>Y>PDsE{%F9UIu%AcLE1YdWjZrisYx# zvj%nfJ-6eE-@;b>Odi{~vGBFi5ZYNN5{E@5u_A||h@3qud=UJx(%rKV>&n@)%gfN@ zX>NRP*B~RmFzO7sk0(*2QqZ^Bg0Q;t4Aak|DH{I_W?*curc*bM(<4 zFyK{)>K)Ml+&6%Lc+uvWrk|DhlkB3MWEZV+=%QspC$eF?*26^TVV~+%2BkB2vWMQF zbz%s2Qi+}1C+OR3fm#ch7dL&7jq)9@1KWTW^{wX(bx$nEU3927sY8vw*ihplhKk*^ zU1+?^wVqg-SYU8i{GZTu_3L5vovP4-zKVuwz-Wl$q`C^A#7SO-Er&=2to&>9^5jFTL+OKo9fd@#;-MPr$VcI3i&@bWgOB zF8K9IdIiafBMptHp4jjfd?q#3`|5s1*2YPFCQ@htbP$WV5_T<=1z>DtK;tD?e;7?i%LqPXA$$rIP!0HAFL|UWue$G;Ub2%w+9HO z7L5!LQaeDBptIJ_W^ZeJ^)~0x@K*)rUWHv*R z2k6e(3`rW{YXLHb!FokJ3~y@qrmuLQ1!0M#J&GzdIJFK{+bA_KMo0~Iw_s)@71r0p z0|d{q?w0f8iNk4gEU3G*008-yXbhikanUQ^1pZ-qTHY$pXnejm$n1~ua^R&~r&P}Y zM$AQvkyJQPUe>$w6a(!nsu*)fvYsL4r-4qNqGq%NeztTVZ3zOxQiEqaIYm0^4~c%O zPg^^{DAB0#iIQWKxjbq1f6b0H(E5zYCV9!EEb;L7^QV^@Y%ZWtTCw9iz&EM3gX=C$ zI!M+v46GTKN!GK3%Rzvmv$qi=D!K*i8#rpilj!lqEVzO;`+%`tV!|tLzOWcxT(+Us z#Cd+>QYHjfp)H1B-072=ECn{loVtbHI-yvZ5P%oO>U(&JFVa!|;OJxwS*rDrH!mkM z>{pfmMf;(sQk~$t718?Gtp~~qX(c%Z7yUAyHxW>Wl4vfl5d%lX11w$s8p6> zw8~PDbTcvvEd?5&euM=G3)yjJtc$d7Wx?i<__^sDMR6x1aDL0fGB3CE{(T>(f{jr6 z@M71$qi@P1GGHuljCv=g>>3t7PPsG)Zit~jt}!V-SoG=fniL-PV)^(&A`1R?Sc6wwaY~(;Gw>W zs;F-&pFIzI-oXO zufGwo8@L=98i>_p8#b&rb8>k+aNx1mW?P88Ge^!AG=3+a2hK|kZhSd5eHglkV0?-M zX%A93MFNgyFN{VuH;iO_9{Y{T`24z3WqejtGPVwmPQGETzKJfa3~@kGUwR>#(g`;K z*00o6?7q!wZ^qCaC+=r@5VXqSqPPzYw{RwJZ6}=1^u;S=yAS;U?Yp+x4Gs3c!O&h{ zv19s)-3*7=Yxg;o8;?Mn%8ea($2=}wSZs&FUI0g)iKt)Z=yBj$kR6I+?2u;yam-G0 z+*3PQ3SvrF2eL{`D`8OCrAK`%j$g=~J=bu{*nQAL&E#j>;m?av9yB{rL{kAHq|hB| zmr1j>A_tlOK~klGWb!${mc`O;Kyv;JTP0t6i*4q*o5(x?4h+zZ`%!#p!BajJo*{#g z89#l$%3yGQcHCLu#TZ}^v`mF6oi{fgP*-X1gJ|@k=I^MRLt+c9U4RR(a>21D{1f$P zF_D5&mkPX_z&gNf{+7gM**e!9E5jhwst8}H5oq~Fwh2qiTQu{s4-xmH-e!9144wMd z8U^HRCV(y=d3hviGl9%BoTp$x<;ovE2jDZ2v@4aovMQBcb5flO85dASjdYkey>0jd zy8Lr=E=zA2YEN1raCFxagn9dE9Gttd7Lvigi$g?55s#keh6FgFcm^VKg&Kt#2w4Y< z2;*i$RXIQ65RzKNOXM3SV-Uk)BUI3=H6b_m;bkzOi8_Tm5Y3$zbkaQ%7q;mlT# zzKhv8gC!SGI}g(_tNQY!zp1YB7t$gRS4l(R!CfKyfP@R`v!y-*z4=5~qV!j2TlIS1 zVBxrmXZC2uE(z<=7^8PPv=-5RF&+4l`#;dftK(m}D`Na32v%X(6pHEZhqwP9^!Lzy zG5uwziUq1*KdXm(sx#Cc&To$S7>ntR5p`IV5ylkilKg5?Gr6*Am+A3q+~vePuADXrV>`>c+S2 zChKlJM&htAeSZV$`|}Jk;CUd9^5PpDiGV}%5*qTZ4e{SdJ0~V6=;&AAB339b5 zEHPrSM>ILq_ZX)v#>XyWnap04i_PhjDE9^=!H87d#LA8^rWXkhBdK`k9Qs-jT1p0*gaNLC}q zp(eh>Gumx%(FcARp^8UthQDYuHq0N0UTYa~>4@=1yAuoOT-HRWOz#2RFnGo|C97{{ z7ann}nr&u@Sg!#(XRrkrmWJ|c0Z>|`yA2$H;}Q`89`mJ?0byMWEtYTUTIw0gtW^a% z;V$eE8HsTGSp!;npC1hamNHbS=T62Euu8+nXZ4|%am3hW6#{AJ0+N*NftzzV=`ZIn zbKKVKYjv2ZfsIxlIarpe0S4OvHOeD0B8OvjTyT7)7mCL$^~KTpYUnShuU+A?o=LlL z77$J160|QU&iE)5eyCOk;$_N&0pP8k!}nUL8CKtxSpQYB*wr@J6>ur(u(gT*g2VnKqY6-%3q)j4U9@}o0yWj@#jHt6Kz})i>jkZC*OiaVzx~tgO}cPU&MYFRT>2w`L710AJpxlm9&h!*34k%DMeP4H-2XWBJ1<*cgGOHAA@LTof3Yg{ zXFOLIA}<8o=~PcFU4^RVXgYyoV49b2BZCY^cP~ZbnbM&f@zeH-ushlDup5;V&bv84 z7aKRMzm@~|k5JhECG2k5P@9Gew23jrFhA0y^Mm`~L5Axz%)FHC2de|U%usU{rZAbu z@C*>+&qD*D(>4OZXzi&}sv3>$GDh(7pJ*GJDOYWN`z%;57L&`mq`Lx~OY_3;V}>mH zRt}3vH;SQ0@n8O#bj*7!vz9kED2qYT%H(~mK*V-n3zs54n;TSxfTEw?ctlaOj_~kp zsIzdMJa3(uCa-3IbzK~Q^$O=S9m5T&Wz6nVj#V{%*SO=+#ZGWl1fT%@<@bu>ET)dz z@XW>KoMo}Bh8(-qE?5t<>Bp9NsMK^9a>pp8!xmufH}HITyAE zTAPZ(>H|(%iX9yQmUJ4qnYN)24JBP)v>_+QVa>BplRkBaj%y*YvYtMVMQQ`*jm3X> zsQ*9Sp~fQ@Sl62~nKPVCC)W;$*ODq;Udyu>o-t%UhEkHwvD2a6KNu7mPkkxduK4no0CmxQyNC-G|;aZTfxt z0A5&}YLG!7-SN#I*qdX=VAop&e1Li@>8>8mdPXkoCg@I#X^l@fX@8JSa}3F)fZpU- z8Q=g*HpL93>=2b(G4(o|KvQ3*ED;_`8$se>*D9c-i<*-h&<&r{_rlqZLa=EF40?4Wkied(%Y zom;2eMBjpgQX<>m_uAh_e+D^*{s?4yC|Sk94A`j$I+&Fw^~{4GE6dLdkOuv=VSo3t z{?Ikh-p6q7e3~x?8eZ9t zkQjs*wK&V`RmIuA&@Qq7XJPMdvv;q$o@KX&sj-w8=Wf>`rQfC#V_NM2o&QocyHCzx zFe~b{(~=T7MxE7m`a?RVwG41#oK$;4=b7k^D)APbV6@0+>^GV|c>rTr(z>z>#o%+N zG!Ei?|EEl|Tx9ur#Mqg>-j{K%KPb$Bjd}*rDxmkbNT4Rt{!-PJ#j2=AFk{>pKs51` zX3OHu-NlAiV~~8jlP2DdYm)9{>VklhLJc?*^j|G0DTLFPLjU$XJ9t^RGxO$$wrm%j z3X`Kp_bUqG95TuHXbY(v9?Tyqyp#hxWD9+&R7>vOLtU?i4^xkmCjB~s%Xi<|nDPqV z|59{n6EKFh2ffhLB&Swi?{fTAYnJvZhzO?Cqsr$8aRM2IMdwDLxV;n5fA6d)oD@1- zqfdx&)Admpce}OnB-9XWKX^2oX{YT6Z|j0N%SAnC6UuZD z_C~$^RW1K@?l!`2gY9M+R=>KHfeFR#LpVV=BnxMwUInf#$?bD`eo1G2$CmJNWoU}vYUq&z%JRfe(5%JEbW2oo~gHQhw-7I&L_tE zSD?hLfF9_F@ncggupzPcz^tpTC#Bm-tQ=(3fsU zL#SSLX+vGzb{oe6A{y&4Y#AwfUuqVlTi~9(gXb)&Bz%mGo77MOVmuMwS7}TKW1dvY z=V^6@2FJC%XjltNe;={GFRd|i;%fTZFL7_*M-+JtV&Fi$&Qwg_a;f;&VFGZLiBLS@ zW$ncH*CH}5c2#UcRDApt2ByWR7mYVDYz*Zu!O(2|FNIH7yMI3H3$^ZJ0oBC!{r&Jk z94M$Cn`Cda9!j#mdRE*{I^i79{Qv+^|FHyvcL%S>=#5%M@yJ~=fXMIwz;hs!4;gQPYNi&Q(Z{_#Alf9DN9T_4~5bp5AT{lyolzxbl{ z%UJyd7pcGCqV=b;`i&Q<-+0mb$8)iM;zjBwUVQ$ne(FW)r(S&itbY1M>Zf0P{;Yn- zMfShr;`_e<>pydm{-3${{8{~1FH-;2i_f3cUv-iCt1do&R=?pQ^&2j}|MRi_x{K6b zck%hN`nxW&{<|)^{$=w=#4A3p{`RDc&Y#tP^djSb^y2fMhxNNJGXCz1j{m877t)Um zyT8|8us-uTfdWj?ZXiuMC<^^E(8^~CVfd|S+%sTf}R&v)s<0#K8Sl10Nkok)fnEUw64 z^)bgmI6^m|Ae>i)_$zR}Fp)e#c9`Wniz*o>nSi_!Q1_dcva&r@b|jL_6yw_q1T@8K z>JjwqC__&kGPqNXhJY}QVy##5a-~78gw``z>y?6>X_hPF#w zlq*w6MzWllOq$sWv7q*^yz&ZZDsLHfx3#lDGR(^AL}hgX$uZE&fci!Ox%BH4omh%u zIr_gy=j36~`rAmJxn40o$|xhHI#H=kpuW!-lAobW-~iD%F;dih4y<5y(Iua0Us8L3 z?HEU&XP$=e`{>s!%)Gk(FxV0H-3co9AFoNpfe2;t{T`B+;?2;>juas5>K|oNAq#eZ z!xZlwIkKz%$wqc}p*l3g<|hhi4zLs}T?v>kt8fL$;a9+>5!@^_PQP*I0+WHHn%Ii1 zkaPRJxyUcjKiOH7SQnI{btklG75;Ioz@XJQ+k9*iI9Vfmr&9@H`z`LBgnY4-|1bImol=nfC_aX|I)I3r#?|?uH|Az5O+hR^Y$HnMj`r$LEW%>}!q3L(Y99Kf%SZ{t2fBIho$}diK zSm;9qXzJ024$c$gHG;gxAg?jWYs_uwn4*wFr!J`G+}MP)qs+KwdWlX|-0i11JSuEx zL&0#T(xLm^Y`+k_GxYwC_6yN7L+|gmUkKlYM0qZ9jxNU%u?VR!o^dvd+o?%#{&UPr zb+zb~QS2~0^|xV)mmy`4rbl~83HwKGXa14Xox^<|f6{lcD8hH~HqCc2X~=g`_usoN z{_X!+*F|6c`L2sAmNVByG5`M?*Ttt(FXX!Tw-K(3^hed5Lm=&?y_fQ6S|dbK zBk;F&7}nd!Lu*hR=ho5h{{k(_?8!*`5wx)zu^xb%V0+2jIO=*q zix4Bwzt2_GXQm6~R}cl|?N{---*BA@JAk#Lm%YGrz#}LE>E2`=hW?D~FT)HVlnG4r z>HcV(S&p1x4(0lt2aI&ZzI_^0{I)a{C-^7i@w6FUq0oLOOaW#}y)jdTqrsvvnqA2ZM_D_pkxb0`$P(k%fhKr)9B>;@r4J>5j!x%U@flaK>Nhg%5 z;GWL?`%j-64d};?NT4?|KekC-7TO80%NFqD7Lqan(db2vHV^m?E*JA1bR_4LFBOfU zQ*DC&0l?vLuNm+E4g>O5pM;@53Pbmg#h-u1pUc=x0!})A+3Zr`FJrDeG|+a^8#kdD#w@u^ z7}60HgiRn|vGI}|oUP;9VwR<-RuE2(S#T_38y7NrEfQ9cOn!wegBM4WD4QodCXTF)Q#H23k0(|Qs8BO&0W%zk^=9!~X8CU; zd+8T|P^Gm_&SSP!IaQJ=^}7(sbQ9Uzmc}+m4qOLK?~=34yrdpJ-OT|uf^<3<#EGx%cRict?dU))(6!8%9E+D0Nw>ck<6KB zAOkDV?w6nF72yQ-zwp8f%mMqnkjzq((A&_jD8uh#XaMzHp5@YdCnp+l)fe=K=2?oG z$YCiKNg%DpDIQfXWcGp_(L(Z_-t0wN&a@!!&W73er^2dP3uzxlE$JjIZ+gK66u%?lQ>(V^h2&_PJ%|QYu3~9z0;kb&YkAUw z(YMaiNjqu!Re19f078gM$lCP6^EF0m)4jRDg7d19Ds;EsE3Zw5YeeG_Q=MTdhy8Rl zrmKsEdsaV1$EB@J@cfB^a_NqA)w-zQ;?^P&`MTg#CXxbE{ULE2(phoT3N7LhVW!5? z9VX66_D~)6*IThoJ2j(ji99vUMdz6lUI3oeUl5+5$4MN?LfQv0*Lff<2(Uu>1%_lc zpk6|-`n>bw)3@k~VNhFs08IKV=ZQxN`((Ec2*fIbQe&X=rZc4!l(hP`=-Ayd#c6O% z7&`k|X_2!SSfr&>V4khku0r&;sU7g<^KG%uKn+^IQ&Bejc?V2mfV8#6HX;_t=6m3e z{4|n}^3#mFd-{3y`6By7;-ul}?d{=au(~7d-K1Wgh3L}wu*E}sSb*_i|C>1+g%*Y4 zdy=E{B}_Y87*B$*ywbbqBgNHSPu9kxYt%kG0<-bfQ_CSmDI9JuVXVIfVI=C@jhirU znof;*Isa>hH#nUrL($p965kw-*Q+Y8_Nmk8Qmt~ z#69S$y7*xj@)Zjywvy>n`1Jmif8aFmr~n7MRAXT8GG%N>~qqjgDe9eY}$+O zppaw$#?PJCKBrFXvk&1m#>1CSFtu`3oELAh37a|5!PbmEd#?uSLwtmrZ`{Q6e;I0u z%z?9?O;)B4!bEBVYKG#bu$v+V$*e|d{W5AK;$I~5;sX<>{s`>5acQ8s0^SHiuG~;~K=WE|*5v4w1(#H6* z-gS?uNyZA6p}S@=(5Y}IG=7BVZRYLF-sP@7uGM}@V;#No zc{wYG%DbWIwJZ;>Au=yEE)feF)b1YDX76JM?}HCjCYv5vfC1M|>!e@|Q^aa2b4pi! zsH#*w2Dc8n($O1c0tE6_@farHJ=l)tUS+puFIJ*SI%ZgAMAuG$l@0dOOo_VTIRx)N zVz}#W*?uBT!ZkN;!khxLB!u7MRfOO(nsz{1`D?hm&5Jr(!lW z7paTdPNHN4ry;fC-AuoSdf!#eL#BSt=1})8+x)9r6@~37dhdLMN1yYQ6?)NVcQ0K6 zWj1{D23u5iEj$(%sSXo1u!nzUHA?)GpsA=NcKWnuF8a}{>>f>cmtCF@08o}KMH@P_ zr3>?xM}eX)w(bU&(tR1(fdej3s^VnKY!M`NIpl!m&FjreR6fO3LlYsXHlQXCi`m%wOC$SM!jCUCyYia=2n6xlbW*Gcs!m!2=B&Tz(4ngsson)K(Qlqw;n)8(P z45sES_P&Dzi*1rP&bm9~-YZ6tQ^WHgA->P~N)5JP7sv)2JK2EvQ`O3ojo4{qd{3gL z*iOAL5sJag=p-1C^dS?ydB&-dIMRqH)~}Zi-~l|dQx!R~fFoMwX=y*;Q~^*#YGa-_ zwU{>Wn5Tdo00iFj?`X`nFrPnr$ZL2fFl{^wA{|b2m?5bAlU0#(2eQdoY~u-s?W`DA zNWVuHhP_RGqyr9rjR~yh^620@DoqPS%`U;2>=a(Su29;G++w+TK9H_x4mzA<&F72h z9{`QvCY85XeatXax;Hrr<#A}j_~D!Es^_V9a6{v3YyobUK4cK8dXoAapsv(3TiK3l zOfdkH^d}X9q5r~V(P96E1>d8EQW;C~Gel*bgIfw0>#uJkFk+|?s5^!l(S-4LENi9$ zxLp?eG-Ox{`}AH5K8I?0DHu+qOItqCOa1#9Zc~Zh!q5aRSjrqWIjP@(D2R z)Cob(Uz%<12%zAC1Zr?KDDUFWZbR^lA1;rXnrbqi~@oKX+O zNG2?L3&;mOoSZFD@Eo3$^=7~$jfnW?M`7Y34&_fPa=6ybjPcR3tGZ(q(d1p7h`Osf zplAVLY2EL~SQSGPj^tNI%M92sIY#0Pm6xkYisa=|vO_W{6H6(c42X3^hU@nxWBih2 z&T~o?1hCdkg`@paH+5zHL~FY*m`K2AW{+#FIl>k^j_=cz!!6 zp>{kchXuXBQ-csM?PfqY!$<$)@P2iJnv>Epa>D+@4We65aH3Zr6oj!hai!` z9fjoZUl9=BPIfT*+}>y1?bZSMwP5;^-2MxoUysoprHK7l^$nex0C+$L*J;L^2y6z> zcK*F5v@3UBD3*|LsV6D~mGaoD1@6E!Lk@f+)<_&}r&s=E87z@E$ zmE0zU;G$pNLSQv$Srr;%L#x?< z_1Q805NJZA066;C8%Wp)+*WBj8{~kmtjN)3l@^}9t2<{Uc@1rd8NjD=VNum}_cTcf5jgxc3?Sm;kv5eo(AD?ot|Ju>6N zMXCM?>}f${S9|d&7&s5hotyKzih;uV5py`4=i4&eA(xtE`4d}*r`DU7tje5qIck_s zlzLy_nhvPfl&5hhj{z`_eskWn18rH!Y#|P)kt<_W*~uz1$+7^~>7-r0;XEh1W14E| zG-|~re;T1$>Z1!j4Xc*=oHPIo{yuhr6sBJz>=XOGTDvU3w=v4Zn?Kt{pE`pO{{Zbi ziI)kBzJoW@)>N4$UZXNhu*OlXjD^zCTc^N6v!{jBce#%1eGtB$k&yL@^2Fz;D!n1} zJlvPzE|ny{{dE@QR3C}4P@Jzdn|-;8vDDjh2^5yzW7Ly61|+i7dCZR>vL62~%qF6T zoW?Od$e_jBo3Wl#sYcBUO;`OQX2MzD8blhFRq|Yja?AWKOu4BVs0jZPGeULknhnrV zuU!mY4}{?Lr5}!j*G3gyN5etrMWI+E$Q~65vKO4sF&)5tt?ko!ssK`Nc@Pq*MT^X>0&pyU6`(JCd2NCVO`wF6p;D1It zfU&Q_APwO#Xm&H&&tZ5nfi#7jw!N!O%1ulzdH64glYpESQ2YbDIalv1Gd>m$6Fygf zy2P-tez(WOLKUTD@E!vi(u?0&Sv*nFxV~RzY~xk{Sak4xdZfZXr$b)$B=}r zp~b*B=xjN{V4sDUn1~?zC{+K~V0t+W*C#DEaPlgl zjY)&G_zSx~)l_s0uRlCnQ#BMEvkOPRJ@)x$J!f{!xsN|~ba%_~qxd8GzEK($_VXy= z>aq_UD-9mA2h~{5N5A!E2rhT}66={`&fqZSZ5a;;f*ZN zyIqhs#<4F3`30*@7|d1=;c6X%NtGDM$@q; z1z||leoP0Gfd0Chlnm3+-ExNI*k#@A-5J8M5+NK+4m$3_7(St;B6dUA0+&^2B=Q{a8+(SICoAD97 z6G|Wb*18*xLk|3BmUAr7{9=Jdb6Rp+I5q`lI5uS@%C-^fc&@Q+#6~<Q;D9b4EhH8Uk!+p*5F5R_<^dTwv(iPkrzInAC+j(&@3Y-zcL zJ7$7DWv3ZC-1;ycp-qR-Ynz=Ws}FuW=F>9BNcWneYo@}dzS2zh06>AooRR~JiB}*) zL>MsyPT&w_n1IZ!_lf+`Z+EwZceOhW)@>tV*rbiPdPK%>+P0>eDALptR_KwP>?Z7| zsAwUuqxz??Fx`q)ZI9z(kan`uq}`U>0q^XIwqR8hX|sJ&J(lcA+HY&Gh6Vv9*qTMb z)>5s{@$1rUo$wIjuiDz7Y*UzwHfYJ6HRq~7qIsr-D_rcNGqA_^@V22leUS+hh3X03 zJN+Yg?`-?4#@cetRRD34nqZtAWCvbTX;4==PdcZovP<9Vs?w!eGbi3ccHpeCWmcW+ zmsztiYlcB+{brmbOHeumFN}l}?NzNs&E9OCW1&@%S@Q+*d#sJU@_SfYZHc^x<(+gE za7o8X`(X$*W90Yn#Pse%$p8S_Z-0dK+`&#F-p0?^Pn_v7JH|@f4?A`&E`c6KD%Cz9 ztub+))wY1=S13Y0^COZ`#!YzIl^tvk_Nr}B9rV~u5Y7%`N=!XuD74>&E z(QS!3RC-JVa0)#=4=iH0y}zV{u7T(NDaShfmtkiz>kO!Yz4R{ls%H&i47#H~GWWeU zO5!bP)RXko{G8Q*4761}e?io1eyGWp|#1lEZsDx&k@9k%a}>~KN2LmZ@Nr~|kaVZJ9iul~&XE;Z|wOSuu(hyD#NQwbua0BH}Y zJIb__YbMnW8fSC_qcyUoq~Lwx?aED>e{!&-UO|7le)@dnP!te<1|T|C2Zmq>ufgSh z6Q0eVT$MLJj$Dz8C z8@PXU#K3PYWEeWE`GH;ZsrTup2L0o{$U$$khK)^GxYh~ptH&}LAjVHL(MR9i7irdc zn>9pI(Kk|%Rpr-R3A=@-oKA{kyJaAOVWTYbPsiAjYL95J3V#||s7`~`w}LSO11u)n z?(hMg#m<#-RVNffD+(@jaNTEE| zHoL`hC&O&i)Wjp_GY-fsC4~*>EliH#OAnpN&^XQi>5kpGH*#a>j0;`r;Q{ol`VX^sYFO)6Mz;QHsJ)@h z<3f*^ZS*+Rf^wq(5IEg$0H7R=lm#EH3P1#9hH9!i+{m52@&@qvK}F=OmoJkBcUWQp5fGpY9yY^wGOjAh(4Dv3Q@!mQ^n?qnZ%C3;hOqihj z+&1(&78E(glR-bdPK|nTd7-N!{nu(tjh@THF*Q7i{wePGLKabD8@#5IcCv^X{oiBY z$2kA7uqq30@U_2=3_vt^!*CleIBkX$xC+@@pVsHV0I z9>27({AKV1GpDs^<=1ekQwXSPBdW<#I z7G?Xi#^%jbP;O>?JsM9aTzBgs;FZyL> zEi-St*T8u!T9`Gn7G}*-K);x*(*hu)`o-#VM=kN@nY0khlZwf0=gX1&xVf*gRFLdS z9-G9n+6{|=7BV#@H3qt22WrJ!PP+CiUQB~G-^BXSI+;LjKVR&LHjHx>HJJbl9VOMl z_%F8}@;XeJTTIgTQQ}zo=Lb;eTh<%T?v~4X##>-=F#n~Q(C!rJ4q*_}JdO;PRUV-} zE>sORNj9$CHSnj2M!l#LFC(|R&a+R&uy4FHU*OA!bnH#&d5|s~q)=%gYb%@m@XRSh z*=DvNwFuIe-@}9CaN}1<>kXV3_fuWyrM-xXM=chXmPabbPGTG?RV~Ki8I8f6KA{Z4 z#$#x(lfxnh!u@%1aeR%t9BjCE}=w z9|4YP|5(FOORf!j8bl854mPxSZD^KaTJ@uChsKtR%Q5ZzcvPIWIW3EOY!3`1cz_@ndS@M~>f{&<{>zWRw)^%;zl_(3fWH_X?%mHv+V0>#0cF&%2P%FKD06wu zYW|${45k%n@)pM*}M#=x{l{aEKK!k@`*Pu zj>Xd(54Um>9D@BQc!(&A<;vx1OWA+hT>P-+)?c`}xRLi0_Ujo&rJ}TJpXv{yUG{cw zG0)s6aNUqf7GUvE*2OEaED#;rAMK(dLWm708T@t~fan%?aEUh~0nU#}`r-%a`!N72 z=En720t8-*766ju^U~FvoEL{V>&nY|&zw_~LifN@uPXu9cDlCSOJC$Y`eG@LxWcr9!!o?WPEYeI({ zfEP1hRo=x66$Tf&#C{H6p(Y06>ZZ4xWg4+@ZcOyhgw`+Hf|~JBMY0AemcAPaDxOXl z;UY1NSR7g(Hhi4$=NKhRq*kA|I8MXg(_qfUq+QOo(1%g!cvoPMnav)cUk$L-RV-9q zB#{---or!f#cS=|3boyOIXljqpRgo+*y4)Onl@#42S?Nn#^S0UaMJI8P@{-OAXqow z_$}J|W?qU(4cr#G`w}$+Bf1A-sH}0SE#fFlq!PgVgs9da2MS&EAyt_=8bNj(bhHB< zTuRR<%GMUzbctqZ?((iqfd1zR;k>0|$iy?vdQTq`E=a%(TTa#gD&Hbq!;#fI;1}5Y z$*`29Nds27^sg8#FTh@TZbw6jePR1Lm2e#i1b zwk$0l+5JB0-UPwu%|(Eik?>-avA%Qw@2Ij`hzJF)d{Uy>L7@`@8AJ}`l~-P&ssBcf zD`bG_HZ@-TDlF~S za^$)Na-CDITO`*lmg|Z_>N@rRxOw zL)4G?uPChGQaG*x6TTNK6CMD_CU1U#X+AyIwQ{;*dQBh3nsx#Va2U>h8FpqrZNgLF zSVUS&=yxU-^X>$Fy*$i$o^aAl)9`9YcNkbq4HFkWzZySwN%V!{k+=nAbppN76e8CD zi(t?vL+h4SXQ`fo!S!><0!uv0?Wv~){ew!j)RJhcvsC_*o=HLul zL&x``j5nFapZbaD(*G5c^~Ko>IqDFdn#lRTT^mvgWV!B)U5N5rThVi`&g88dP;Pc7 zy6F33b(l=VxYC#iv`#@W-9L`G_O$uqU=ykn=yvuSij2nMe^lSLj+oG}I1wk^2Bn&( zHRH4pF&HC8aM8QR4R@m@8JOP#V3Hs&w8#tN{F6z#Mb5NfjK<73d<#FF_(gVF6rEha z52=dmArjFRFiKm|CWMrlq#tzRXp+5*aPlhN?~%oUF8e^FP65?Pz`)?s5$Z^o-i<)< zKwa~8l4&CI5|SFw);@9R>%V&_FQaj%gLl$X7Ix=i2sbUpuLndw$O2{@c@8Jy zUkF7CS#u@O9?OAq#&aX|aT`ADkc*(^At2($vOd0N?W3SKlb1(BD~GH;*~O=?kYlhr z`9^G$7D3eu3mH3T+S_3vV~3M|5C>pPxp5s^6bx48dqM(WCNI7!|pY9EH*6 zQj%^;n{P30MopTl*gdc$sl&U9my1qVm9*Q8oAF{|xdGxxwjM$c8Vos7)DJi{tx%{I zN!B95Z-Yi;dr(?)4_948a!=^@>vK$`AVps^Al(uz-cIZRX%;W8lh#-;Zi!g2;l313 z{JB)y$BD~Grdi=O+&6=3i$bBDIJ&ri)UJ{D7~Pg;hDEsr|&2y&1INV z=_x#&Eu&Q&*-ac4>uz^yWMGxM;oZtSODgHKDspLOL2)4+H(3>))>6O;#?Al4DIbtp zcsch3Ubc7GQ>C z1KN$h`8C3VpOm@(Y)7Qb{V$gdCwNB&H0k|0Dno_FPTaMzqgVpsS{Bkk!1|FgAnTb5 zgequ?<`LBRt4On87GLPMpb%(cy72+D{la1VqCt&+Brw7edbsj6=;0Ok`ObE@h?oQn z=q%+~C;ZT8Q_;-wiP4D>t^2Qb($935Q41|gVrDS2x0}>*-^PZ^MX>~#?R4>(5Lw$$ zI)H)}7kw6LKZm`P$WyRq7}V)lHM#liI3$a}A~DZTwkovjUubxDCyVJb*8+bMR~8AW zEA56r+c?RhG(sLz(hIT7a{PumrX|y9!3FIz#Vv8Hyh;aIR$9G{g+8F#~QqeiAzXpxFE_qc;o;0 z&UTJza?%4ac+|}3QX!{!b9lHeyH{pg66O3HjDJLbIf+qB8WJ%+z@s6<6a=o;-PS|s zic&O?HWs0*adQCPP0I}sJ=&Q4JO@6F^%x;~R&)i!Mz1iS>aB`j*_?=0k@kL!|9ut7 z&tW>~oAm7r&n_&cPfx+8LmptsTJPx?TQd8*asRIdO)x zHlFh&L6L+|ThPa;L}*FBpB7?U`8gy@Q0xQpoN;CVv!n{MB~B&9bM!?4n-dRb9Z%;RW>6rNwyegGQZB zSgXCbPBqwOQ7{uoXK2a4B(x$o;KJrZQLAsL@VtrYLKFpwL&${k@|-bdsEK$62$nuL z192pOjvjWv172_+ySY{BDGo&0cnd-hsbOvy~a{qh?7ez zHdkkm>>N@)2ZIaG(HRg(X7Qp4Kq}GK^J%1(isJx)!srdQVsqse{_!iD4X`IBEiCy<5tWT^%I@E<<@3&|_xb&yP7;AmkV|`{wRdvHP4z6vd)1wAWTHv6 z1u+WQFN-JW-lK|=#;lGj$4VbE=St(|>vU+Fv!DJ2USck=G=Ppts%i3mG>ms(*lps# zwDJT{3bwsOJRz3`NNJB}0x1eW zi)e?>ZbNOp=eThL*+?|!cOS~5p4 zqbFQpvBFWi3`|;Xzu&ZBoiKy*7z%On{f{I463Ai{OTbpnSH2HDV~F>)mW-`M3KA^` zjQ3AOnuTm;CS#k?5J?>6Mi;o5Nz7VoWoKx8%QWlO(EeD^#LG;M7R#k9j=h4o|5lQ< zj2IfFdtvBTlZ-@LMsoF3MYMSBCJfD10EfYVp25@9eByc~SID*vR_;b;Jnw@{1a86FzAA`#+B(-Bq?OXv1iMl+vxY@Q~Weac_?=Z^H ztRj~iWsG0SpO_;07*~NHUj^*^TTJl#%@8wEV=p$_0bX406ibKOBDljYzT8|sj&s>pDz>XA?&&WYo?WQkb zR~{z@Tg0yX0|p7taaa1aU7mZUGky#W{rr=E2-YLluOwj~s|WM6%k*c@rBs=AHR>n1U; zl1vNwtTsU@@>_hKRFI%6 z>Q5`MROv~9MJ`BGy&{q_5(tZQmeicI+5!ZB6hYEPUY)Eq2k(mia8Pf7>$d!%~-_ZZuBsFqjK zg)FG+pF~Pbwvsq_FB;l(G3-s8k{$1mniFiR_{tUpMk1iFRzP9Eiw14Qmdf7|u^*G! z>rcZ240tb&N~?)CZbprs6pS)T82u0pLQpfSjAQ7u3lVA{EnhHhK7uC&)+==zKwE9S z90^sEf$Nu38!`|C!|vv3oIgQFUO=l2WaBIQMnu-2S|Xv zFp1ML!P*CGtCk>(pKcvZ0)eF?MY2==KQhblcZ*NtcZPzNDCHXOBuj+Un>Ta zj^p8CVvXyh_3>QwH3fxA4Tco?;M<|DZkdG9X5eAY0MvrlVo=FWAlV7M|3XuS1N7vT zTsV^{%dAXF%`PB&imi%%bq)#x`MR`TND z66Og2JXSWR78b*0cG4;d7WY>soyUNrL6HIY&IJD(Ll&A+UkWqRJ+U&^lv)T(u&Y?k z?I4Ry`lFb6BG)XJ_LIV7+n%aeWCw%(XmSeR49SWo=ivMe!Uc#TL9#TFoJ=~K^p7Oi z^V3yPTnbHJp@>3(0_<$=iG={j=yk~`%<0^kj$K133`^2#@8LWXHu>PM4i*Jg zG$+wNM#)JABG4`36zEsIh z6MB_Fjh#2Cp3J!d9fw>SM~&ZcT#41ENvD5_?9ALBI28Qfxs)CQlo@Rp#H1Yo>5w9= zPmGd_sBsg@IlMK=ve*Y`Td8rXhF3_DP+Ho9YPCeX-UCUkp5LI8Vco`BifpflnaWGh zIp`|7v6D&Pi)fM$Q{6g`He{bnW_amwZEAT%vwIwaNmjE>P2*^ z|7X&A6O?u%h;zbg7;n;XKcPGg~+7-^dEN&wJMM6(@^B3T8e@PxHXsx|9mF(8-j5~CYR6@SdujCeRM zBqJGv94xfRr9QcKA5al;=@HDyT=bT$*(iO20dV=vr1r$hj#Rm*J#E=7aez3C^|vxF zh!d#R5Ib$2TlmURfD0DQpS^+~gUc4YSAwH82vJ;u$iFd0xgsqMF=7^TM;Oj}IE9+JA9xJ=}J z4Xjn)MHT{&IzBFG&iDk`{=TP(lqJINWVy86)>5UH?d@dI_;|UfF{#7mO0LusS2D4` zpJZ>eIa1)4BL&t}-=QyR2Zjg+!zHze3t%Z%?nxY4O&i(IFj>3pKo#P& zi6r2^#8%r`#oO9y>TPYkH8?z=r1}e8aDlhe!G8gV!-6}L3I853JRY2C5Mcpe8rRs}L$VEMAOX9>iaBPSRc9zuHDJF)GI!^i?@b^$PkU4U7)0FSWgkY};YPCbi}VYm0EqZriQS@|@<5H3qF zMN%#)<87U=WSt8OmDGn|OiHzh{tGjMxstCfK9>I)mR#v4vDY%e-BI3;D{$J2y_i$vwtF*W_AG<4K0h)3~PYogW-PkFDx2Zc@9{d zAY9+-p8)~9a(5!ozyeo2#f|BqfL_`&*n*=f4zjbS2)$d?W5+K1BbuDKKP}g6+>BBB zHg$>jkj%xz&@hbF%OM3OxaZQkNy!}00&^zCC(W6Z?1_=?h2L=fC&Bffl;D{v-J8Ny zTUgIKbkJ`B`qAaEz72}NWGej`(4Q^WT=^(a027cDnPZHz%`qZr?nqYhct8FvFWcG@ zLijG@&KXvp^-%E&M)S<&SCCr4xU+es&!BHwk+z2S2$?H@xbCwKrHd=jG@0uJWx^Ay zF!3fqIU%=UmJS1F4ivbf+&Vx*4eXtpZ+rw1*@k|OW6~{P$5X;G!#$a5_M*@m;CL3s zY6VN$qOlnA-qh3|!U<+(JVw7rQ0=#i(axMU9zZNpI%xG7cNPsuUtUp?PWB$|u_`1F zX@;}{gP2&a^CVagr4{himyy-+QmbHmJI2=HSqt=Of~|nBM8+9w15K3Z|CnWqmx6lF zBz3kr@lG{^jvnYMtIwaWOn4O0Y^T)+-3{vvI*PwQN0&r)#5(yz&2t$gj}OQHMvHa& z2XrCt&@9&9*GEcxRl~|?bBf-SuI5MJ;m!?d$=UCC_%EElAcv$UjCdcBWde4*0NqZL zRHZXk9!jPjes7^E;r9vnUR8G`WyHJO2~U=t9V|-+uB=K}=}XXO#G~qH$Sx@C6qp+tQ~PN-1mc-(OccKx1_$K#cB?ReZ1J|5O+tzhKwIC)Ytv|{r!4~@U~-7AOL zbM^k}$D!c1_a#2-!X#=p;@s8BPbk7KTB z2LUJ-1(9X0Hl<|({`ro<0jV{)5J8Pp*AK8^H$Ll1E9*3F#(-MWO8YT1T7cB?`k-_> zP{b$?->`lLr*iE8d;pNf>oItAA3!RA-qJapI32_4JB}1O>GI83JcVP-?EsJo01?J1 zITru?q?|abZ&r4S@ll_xy*drxC{V;Xa)tnLK#so&i+e6ZZ->krZyr#weR{Y^#Rb6J z#!q7+2es*cN}SfLWDlffN%sB(6t2P0@6Tm;iKfF*Mr9SsCkAXV<@+b0>}P3Nm<481 z2^XT4y~XsUws7>14kSv>CcTH;tNszBkdjs>g{lCb3;903BH+Fvu5a5j;U9~38bAI#<=+2 z3?*}Vy9i*m8Pjc`onH{PsZ4MuEcpp7#;9*q(~wws7Smyny(=Ajf}9ucEG(dZ8dPm_ z52uy!)r){}CqIx}Oa?RhMV%*&I1GjUOSanMqFONX3jD-WHbB?p#p9O)`W84m&P*5F zSGpGo)?5ptg9i@MEEu}Y#aF(?cEQ4_8GxY$n?tCo*8AX3<+(zodJ#;N{^nn3%d(i> z&HgT<GYTG;l*o={ONm3k z)D^{O4D;HLS_0w4p6Nr+7t_an#I8fXm(hAw&Z3@(XAzW1eHMVDqv<~{0WgXWLP`$N zL|1*LC=t)#1OSFLK41m{zCuwjnF>;|b(8!}-4@Al#aKMZV|4ah~L zx=CzOi*eINWPGU3vs6`j{EppBpgULC@~4s(xHMZ2!=v(?SLW{Hi9IPHL3i?g-Sr~{v3#=7WCy^gk`Ya`|Mx_{J zQI8@x|~^r)Q++VUNI7pAKgAH#+K{j*NmqE~56h0I|*v2GnL>O#OKPg$;d!>R-w znu!-`WHhhLAZ(2$b;6L}nw$CaxdFIfJ&(IG86X=@p8}2sRw@IquD;2MXn`L? z4me$Oezf|wAKp%PD@iTvsejba)3MsKP6K;(fF2*G4r)K3o&l{6YVg}aFa!p|a3rK;V7?5;$^|*wL{^y)DL#&id#I57LoV%0E3l{>3TXy91rfPx zK%SW93RK^LI$&34bqO+`dr$-e0MfKS+M_#*bjsfT@43D&brga+N&Oe&hFQ* zLJWK-q97P*)*8Pae=#zrUh&*4tzE>4(YCdBR^K?&M`{D!`RNT=cM!)S+0e*TBxQGc z#%jt+WBm;p`frim+l2$K-U~gXeE{$%SmoQU^M9yeXlC$Kic0LX`GRpXMl?ii=^`?_ zfz+5ZwPp3FVsz6l!FWmQ&0KXVd5Wn8%FSuz3Dwi#+D!T|d~I5)jcAsEp^!S&Kn`+w z0tQK9moY6nq3UXQ{}sk$X7QWUYDo`bPe5O)T?uDk$v_uGq}gD& zOvkKd8w!&+(~!d9w*!eQy`v2t=&A$s_x*~(h?~#KH7Acm&$} zIkZ=zm0FX?iSvMM0$}7crIlN%}f;KASY?-ublv+$lPt1~fNVcOvQZ%D&D62rakgwr)g7et1o<9?Bm9G@`etl59Ys9TQ16z>S4|{Y zOyXn9;;W+5GErGw8YJhSLWV83r*bzCyS7hZymD<2hU_sSD(4@R`QH+Ix6D6?lyZkU zUKktUpRhR-HDM%dUuh?-nC-an{v%AjcqB5y-W=(!ZjLk+GD0`4-vzx{7#StcF=PJq@9jo@rh2LQP9Kenmo)B4iufRbyQEx zrhV_iRRR2pkX>&-f$9i7%UiZYe@gmQpH6&5E`?)~h1%X9jGKArKFw}2ZuucfO~D>v z)06Tg4y~UAy6ol>Bt0@05dn$jaS=AMCGjLiEzIAlv7%kaPg-{$D|#PxD+3zn+g!&vZS{x& zoqz$Iu(efshX#W_LL>%;kZU;BnU=#>XA=G?1Q%P`9_H{#TUj{rvB2r}FGNwuLjbq3 zY@bzr3O6aY-_60YCfN;6e6@&2Oxx|E3wxNQI^1q1a?~7y`E*T0qhc}^M>B3C5L29v z^TbxX1q`NtJhn6AQ$=B}f}tdnU%}NM3P}5OfE#ZA=;j+gKylI~J*YO*0!s7qRb#8L zSZUGqunjjdQ$^H_(cGixrg0#t{p``nyHO)GZ7jZ&Z^Q~|H48j9d-Id23E1tn>_k+7 z!_SZs>)keJtHz17)Bpx<__=_d`jC0|AD}xix(dp3is|v=$jgxyn)#V3%`DK!n`~=W zTAff09P1(^@JxhnR8}#cXN*R<$>)jMq06?_36;m)+@`Lj9#bJpN{I#hsL$#gN&6My zG@0zbSo(fY9IYnoBOU%LrSBDSGT`tlF%q}Ubi333i`+ax2}-B zb_#=91fT>86D3wfVpW7fJc$@;W4#dNF%6A*U>C4{E3H0jSGt^?m{z_(oF!*3092K2 z7%sio2n4sBy_mg;cdS#i$u3T6Wf0^NrXwBD2W+CD@+;MALz5>aNVzAJP|mImYb!W7pq|lgj&^;` z063X%0xG-842xWBLGDNF12iF*_S7Hp6iQ8kRY|*dp!!Cj0<339L83H!rVj4JoS9~u zMKo>Kwed5VnTrl_iyjb5HZUl*dj~2HqoG?7Wu|ZNZK1{XkIEfH>|<&k47in|Q;nK> z@cvBKG3IBb`NLFTzF~KP?=K1@mG*nqA`aKfO9XF?&bG=@RRMT+rD10(+Gl16X#Cew z9V3?ra%neQFMWoE%!|{{v*_*J`g!bid)2s=W+-12fK!wk^iLKi!cE<UIOM=t}rpk!KD`$oVWX-KCDa5B>?I0d-s7&Rtiw$zepyN|DY1A3BvE^xMU zP#X9}!-q&%@*m2E-=dKeERSuC#d!ahOyt%NpsOhax(W&~W{VGPkvh^Gmg@AvViF{I zIr;-AN`$4!h(i!F75MJ}@|rDDOM(>7Nz3Cnhix@q=_hDYlqt0(+FX{(R+K>_nBCZy z0OK1<*zB#x7;bIs6NOuFS+{9^HYh75^?m4pPoq6qD3X~;>I3XI=NXT`uWXBEIsqu@ zFIKM=_5Dn=q&aSg7YVCUNr%fEk}-0=gi-ZU?;s zz>cbWh*|dn9S6*m|0(Gz{LOj2-OwOymO@&hdi)KXZ!Z^$_H^`HWbI20M}5bleJHFg z>T?kwyav$e7+L10aKNcMk2wVT@;^}S$YW|G!%l#lT^W3&lNLr*bU@ASUt-`%+v&=7 zVB4Xoey>9ZkCFcGu(ed#o4x`*uW3L z9^H)rCC}{2@ch$uvgT)S__6;(YP`7B@bhTP@z0Mp9o>7(Dd4ZYN;>a=9^N?C3I85F z)(JSN4)_}xRRchb!&Fu@eF0P1Nes6T&ZeuniL{bWa0XvQ`L8>=n+;%Y_i}+Xd30~M z``#)>Z~C9bR{ej1IblC<1Knop(B}F$Q&y{U?MKh4K3I+WRUX*8nkN$hK8=rzF!8Tp z2`J)OLU&+X(()|E&D-JN7OG~10lAbG6GslBF1QnDy>}loGQ?1;$Y?p^^`DYVv-EQ_ zhv?C9XFaAlT4SkKJSiyI$u^M%`V>7}d=`m^u#h~uOtY;HZK$&?BnXJ;C@Dor4%!ww za!{6&$^_^#>%x}9S3ZH-rJ_KF)RG8%FuDPhQW%^KMtgr5EDrO!Vq-sr!Y2iWUK<4N z2AZ=T(zhTl>g0`^-&YkoY^JEoTqju2DjC2AsY8P!81#K)J&wH!S<5X9&23?5Za&Tg zW-$jQ(=OeczyV2R57=r2(O}DnGd|8Tu=}Twq8?j8yf_MNsbk$bKfVNNp&-6`G-eJy zKreGclaX_O61iUBM`1*coJ?)*O$jh$^jd&X-@lN#ZQ<&$`M04v7t9Tp2Ip@Hslw7_rv9;Qh5(dOx@qEosR2~W;gzdA~j&ogQL%|B?8c_xrAiegzBg%ub&r{ed_ zGyW~QZ?{Gn_hv@1fUfn57+di?lF~iKodM=-LRx{-j9@}vIl`j++WSi6(hlbR)8#Q$ zIB(LKTy8@l&3tE2@kgVa0-bWuaMIz;;xXC1Xh2S?EvW^0a(lONr@`W_^^u8IpY*F9 zowwTX$_jamcd0=x47~CR5dSOW++A{!PyB?G_93eY)Zz&gee7T-fgFb2_2%d>JydR| zZNHJxF7AP{q|<=fz4w`9u>(+tap%plc*MAKj)h-g3m7+D!Ndaac+OOQMVehTLOM}3 zj+A!jKa+F2;ru~&8B&|Uw!iWtf>8*7%sjPnx4~Qc)(b1hoXhwZUVuvIt4R3EH$s%PWbK>P(!MLN5-9PD}A_V+VI<}CK`7x84b%0LtJ6Y?Wl^$fTxxHD{S*d z8#m2iRhm}%lF1xX4k_xK#$S$hTRmgUmTb8Q$Pnmmu<|9TwudurnurMD)7FrHUK6%c zAZXnJef$uTk{zf-6IRG6@?xg`du|z%(uXzZuS2O8JVw=Z;gHI2Iq7X*0#b1&G~SOV zHgG6>Vmqj5+K3bTB^UBY<-}-OAB1HZa!dWSkGZ9eIm8XQrA|b5R5HjMM)5SGPbzZ~ zrC$R9`7HXmPKzP8!4ZFe9q}Wyw>5OcC8k+m`iG{o9rbM<)0lIr(`+$d^9`#)IxOI= z(EqlWb~Urb*m{r)`MN@fvou|(s-t)={xg!Q!(F1c+6~9b;u{=^ zBx{jIpvx&_?VSMXN>zyG0$AqBa;HGM=QJPU*L+-u>glbHEJ zny&gvAP!b1OS32P;wUm_B5-)A{WieAY#k!&Fc zC6n0#vjPW(8`qw3W&3Gvqh>K$+ouypF=JQcFV4$XKzj~w;Z%xlzVR?lYw!T_d>X$O zpc#J+^LupC&*2s3Of{nA67ZCJ>GFTF@U>x1=@84AMYpI8Gusvmru$;aWX^4 zv^r8aCC9AHOWaU5gY(2FITmGJGRZYdX)$OqV3MZ6`#2>(UYQ%`kAvbm1?cO{Q1Nt& zl9^2GW+goit*y*q>zJ$qL)hT@uo%NTMYE0x;|b$;8lH$awSV6q5rn9>?%Z(um-qn| zFA=TG(T7@#nTT3_Ohxa_R9L{oWVtM*%`uCM!i?av*}*JJm&^TFVl4$|Bd zj8gyb>K{SCxi=@(q-JjAz=+B2s(C zNI8N6*L1OFY>Tb zA7n?e7Ol~M$9?SUu()4gf&&Zyf>D&pcwpdSWShK}B`k0lSo9CGTxXH%;tE-ik9hjX zXjH!eoJ)2m)0?sXA?H~WTtzc!FH~-#m6PxYK?CTP;4qU`Iv2_|n{Zoi#-8tq=-Ikk z0lpw6h~&D1m-7TU&miZSpJ9}h6fo0TXFaykb6_uA<~>p&32KcV0wa{CMJN-js`@HiPyPz1|`6IwYD6$@Zc?rs! zpe#?H{*eXnlb&M%IN@@N2I)BielLdi(Nw~yj4kvAoPa^i+EJ`b=x5<{{YG!z0J+I+ zD4>7bi-LywGvdFz*|@jo71$UqKx$0z+`*nZ=`V+#m!|-Ea}%nX66?%F?=~o@+v!|* z?>&4M!XjfVj;a@uL@m{L*+e6O)A~ISB<*SQaCh74C8Z6HfAjijS8Pk z$VLf>3BHZV#_S{5@hvX3=ZOHV4BIsv{cr};Y$dMyM(umL*7uG*Lw&cGVdtY*=S#J~ zBda0R*7V#_mmqKU)ck0Fk@ByM<41%wBL~U6LbgLCn7n8b~%u6IObEoGDWrS}yF9IPnilI?p2yHPq zJq>|oHFLo@KIUjV7vQ(fexf@`D(SK($eYEG zt-aqSc$~yUz6Xf1-a@j?`YeOzJCbd(Wtyr}QQLli$!Nw9bd(6%nmsWw~rG$zo64WT=bNH-{cJAULB7fnz z*)lx2A8frhZvow{X8{}>vof{_UPrRi`w#=>mv}-!CPV$cvx^bY^9A}>AA{fC`wSQj zE{m5vHqry3S;)|T<)A(lz;dQ=;=`ajc_+pW_e3?Rukw5vv2m2n~MT``?W)N_lIMLX&= zj%|AHgh7=o_u2`qfLET;Svj$U>M^z#BtxW-KS zy!K#m+vYf(214h<@_VP^)rxb{IOO|*7hTef2B2)WN|0a}hv8x8^Yj`8t@{$ZjyNY$ z2XjM~&IS&(cDnUrNQTpvv0%Mk4a}+fq^g-_@_Cv%nSt|Y0NDP~_~4Cc%o=Y#@3r?h z>9|rfk`sWTKdsvK$t>=ee+Co<)_W-{jA5m4B%d$CRn6_8u`n~+aS;RL$aVq6)B<~M ze^gNl;UG0@z&;@oh6B4bYnI*Mv3m0b%9}7(VBrNzjbV=u&>i2S_E3@)=TVYAE=khGkLfKFNxgr*q zznB0YQ}|vd*Uc_4 zkL&@=A`M6i6%YV{?E}Voi4i%lAB*9|Wyqg-e&bRm1X=D0461~2_H2$hbql?90+81o0pRr4&a%f5&S^$dVwc%m|PXU7M+Q5iI_#+r8XA< z#0?zFD#ci&&3J8`xEjyQ#AymZ5SC)xVlAE~N}a}&HxN?MR)pgF0bu=`77kG&K$(-usyR+PLBGxkZ2|q`rlI@J%;nsOG91ZDN(ib|3^Gu88F4rP zHQO8u){mlp+`&>R#!?%6nK|V4rGhiGPKd>XnENB`BLU+fV84k|LuwBNXdeQDSjn3m z?;M^gkhGWf;+0V0JlcUU8JckuOSmX;ETa;plOSs_m*_75%n-sbScZzhGR)rjiB4Jy zK%2feK?AU83|u)I$vtu6hPnlungiVvt)vV7#Y%dGzfh^M(38JMr)MOTGTd==)d#9* zasWk>lh8i`og-w%xwkniEN5Jl^^EtScEl4jc#opEu$6aykH}1M2*6^Wjbh}V8Rb#K zl9|FgvIPlD@(_}KN$vH!>!yUWi_Tg|r3Eaj?C>m#X!jCt_(aj3_J$R4+bdW_oIqIQ(dy4b$%Wv~5JhM(T}j$IHV$$b^L^`DPN^!uvcPBt^T z!B>|ytMoljYT8o_geMo`K<1oedCVtDYoHqw$#L3p7OWRAUXTbxYL<jj4%Z|_I{0)lii@~dqzJ#g%#cn)aJwseNGhN5PkS8q#1z4+7%*NcD$1#q{3$W~K* z2$&MIX?Kg;+?cu@P}Y)n5oO7>0VZ?it!LU|*TGN<(Ml^l4Ysi+MXpJeYi7wcv*ns} zxn_=BlOxwGkZYWB%_6yGu_l#5(UQxXzZ|gsR6iizEpKV;llLLyD5O85%@dZ2p-=uU zB)A&7-QRozQ+mjvHWW)8zr&xY>Q6!5iFO|J4t%%;g`ska_n~F@H=V}>m0NNUYAM+3 z0_~f;(~0kQE@GdH;qybC2|Kzh8C$MUTOQ@2qu{snkO!+-&&WI1u<{$&=O$d>Ee#sg zCGUKIJ>8~O9WC#Cl>L4J@y!;>zCXi0pJ$&hvd``8vypwi&pv(Za~Jz;XP+JH^9W)r zdFKfni7Hav5>QPg)E~jfA1D{regHEie}p1^ddDn4$2|PZ7HAQEk0pCo$k`^EgFkfm z!a!%wLa0mc0<7266J=|y9!*Qs@BUF(hE9e8@|JzL;AenSuyz6UQbvvT8%59*>7J=vdS{*)0gEwys ze??b6o_g@5nrjecsds6QOSQ*sx>NvO|DDM8oC@jaY5<$wod^*CV@#QY!c&17utD$6 zW$M#d?dh4f@F@dR)cQCA5`tm`j!aB8gU3G`(Y5MhvO zDEWc<_@{zg56M?6dc*S7Sxmkv?YOW+_5$+T*)Cf26QzfjzsJhT!?>>cuga%dL-OSB z-uw^p;ctEnj~_E#xoFFq!{x)U;sX)qCI{IpMQ5Bq6j$S#@1YxOz34s2`5KkVW%Q&71bl>LA7v06rcoi)8gx~$%s$D#+}cI>Cy;aJ zNXl?=Z+-#^5W^eNJF2L63_dWtLD%M@hkiWD5eD%0p&WpV+2#PVM$NL78Y=%1;*ly+ zRUT>2^N~D~G41X6tI*W#k8ZJrR`~@g+mR7 zLdu_{LM+GGT0K+Tn3e;>Zzykj7GlqV#}{ox%iUC`B?L;e`P`|7=x7 zkf8*ICByo=75j4)!zZ`F`!Ld&7_q{~lK0HC!8tZ=x-oIczyQGnb)>P*q^YP5#P3hN z#iXpBxMH%FM@fKXShD!Z^1hn!tZ4dxCojl>grvkZv=vnZcRF5Ju0C>;$p_r!l?B*5nzcIsuAi z_G=eZom{x`G*=|KvE`ITZVam#(zn!VVMao~U&gD1sfwDS;{deh+=}ZfUP4z+;<$Hk z3XS+UFii-@&q;@&z4MG_`SHjPgUYkp&>j%3WBPOKTTmw^O1E}u_E%>gR(EPc{f*d? zIF=4fr@qS2 zLUyvl7FhQW>lrAEQ45F4zCIVq`dHahuV7h+ttNS0w^Fl=&V#y2O+3wHe-r2}uj2KN z15!)N2=drjq*g36m%z&0ZQ;mA$g-52r#ypm(mNx8Q^{HH({u37?4^k>G8M=p*K(YH z5_&|(Gh+j4dJ%>C2k6XeIW4XIi%c8PtsOUXKDw))gQL#c!&9X-DVGk=Lw`mM;cUhv zcy94Njo+BBl8K}Xwk^obRmR)wLd|68D(5Q- zUvA!-brkCUT21|&MtR5y@VCgqQnh1Ii(i|+*`!@zA z3n;~-MkmCgAfCkoo`H$K#=sy}n{s_ z)A4Q@ls7sVGmHjjEs+Ykttbn+kdDs#(vXgB%M0v)?uyjW75$0n=u%%Af}fbgMN!`F zW<9L}h(>QgyR!#Fpm0mD@;fR5 za#_z{^aG69FdEl9I|;F!N+>+22D{xoi3M97hay;1GY-%xt3#1f!cEw>kRoSmbxhc@pyg~Bbt2mR9;6mB4 zF1#{U#Va8u+A<3eYF56~16F$P8p0s=`w@`bUwu0%Dic zh7iS}sV1yGKjz3Zl3cTkMs=f6lxSG@FOqBWx|A{`O-(~ZVHj#@mp6YFJ<>J2bU7<- z2q!FpxwFxt7GepGI5Vl)BP2B&>mOvgOoNk7#aoGr1E3t@;0cqZFyXZF*&!`UKg> zy82zX&6Bq?J=3eiK9B_?vy~^Wdk!f?*xVme!Ms`6(=I#W{%5nkz zH*oY7){3$I=L$07M|D%6QnVhx`-Z+RWP4ht#zIU(=k)@OKoztFQf0Z|b%- zYlx6ES$7xCME800)5u9SKKK&HB1)tuz=ol?nbY}gb6~5|G{jHVV5Vt zuZqu^*nKsoySY>Sepfb>wADG+9RzBBQL*rp2+($i+gg?tjB9{i} zz#Ggq8?)^__^&~Q6rs~)n?1l3*-`AAB(-?%#$z%MKW@g;T~0Cu?}HWWboXKEJ{Fko zXP>dqUE~y|hWz4+rf;JO|Az6E4w-+$e;t`85Jx-)kMIfpaqL$dlYR*LCNvQI1?I0o ztCq<>Ftg&6=T!Smp)vXm7M#)3@T+^YQ{?rF)^$vm2NV@$(@f z@nOI@NQvHzgM0gr46TLa!F@~?htTl&hk2_C>KYLk||_>9gSj!4=j8%K)nO>-6@*> zZ!+D!UQv>^Fj00WPq#uTSuOSWk+9%AG^b8)RuJPB zPw$jV`^2ko80o`EolbI~5MI%n0*V67HmIa%w_D|s+oa!XItTI*qNYNYg=d&`K$j;$ z`h|&;eqrYPmy_HR$l&zRH%w^n-p4go_UeGrDD5LH)b#8Sr>#*YsAWhGktquzenda8 z9HSSqRK_2sGW1Rt0c!O=$n+LZT&k^7Y@37G;lgWCR(U{{s+u`fA4LlgGBMLuWT?A@ zWTzLqoC|6G_kffS0puN9>_VmQ-i37b_soDp1GvnXY;}UNo#mIh5gRvbPRbP+HZ*J| zO=_BLRC+&-x((nS7LwF$bPQ5ZTiBCUO|4xdcr$fRp1n7jWF!!No7Bj}1Dc+;CYtp$$5NjSVCpk7+9YK1F!#17kCijYnS$R0U>B_pS3R@h{ zEMk`glxX}_@ghr+rrt&%Vs2qVK>FlUoSH@}prz4@XA;AT?V8xoi|OxH0+4KF(Mp(( zyWuJP3K#QtPpNSVa0`lrp>;C0^aa$6ljJ+wy7R>v3Kni*4<72j&}1)hQ&OY z{S>kTSJ+N_7Q5&#nZqGz9WE7o?+-YD4q%vy2H^IMBiRDxUE#N8 z*!EVQN;>W3f7kR?lnc9@hh5eP^x;2*yUZ3s=6M@Y2*5-FCRo#qfBH4i4fbt23 z1+WK*KvoOGoyyab5~`VdYdcRLaL;Lb>2L8tE+UkR{` zGI1Ctev!^V_;=Z(cQKq=ZC^Gfwd3U4Bay;X=nlg)(fZxS`bs$dOcFz%_#HbRjapW< zOE6+-dm-(-iJ80DsXrQHpim~_VU8Yms}Cfna9d%boaO#OWzgw>r*8Md+Lg7pDLS%B$@ zlw>7fG^5;hIdvP!ikA)B)Llp!wh4;3o#i~nLqWfM5}Exo#(Ik9O6>E_pY5XEKx4VQ z^K5J0PNU{sZqg~jyOZshC4TigbE%M%fZ@Dk}tOz_T7A)L-k zOd@)ST>der&YHkTkVsSAhU^%^>*(^@HP$8C49mYYwt5p>GhDTAJ zLrF;QKVhBem9U7K^y!EPeSRAvvdXGHBrsg>7Br-9 znkNi()5B@q;FpMQFvpXVUh*X>e9*jquak~dr}7?jfx$>@u$&LP!Q#jC(RbDj1nX;y#M>ldOzq@l)?xZ>mMpr5#|Jxvx-K*ncs|OcBuT! zF8`<8!=jZycQ9JqKM)O6kj}~S1F@m z2DQK(j3x(AMUv3VR;6WQ6W&LaG1R5Q$oi$A;`w)rYM5Iq04DlWzL~kijE7SkC?9UK z5K=*CGn8`&tX;HmwJJn8d-izARb`cFE+ZXCZu1JPQ!_xYTF^B%uCvM`0>ZOn!l|6_`5#9->>zO@JgPJj`4y z2b}0;OBieXSdXGG;99XvQ(lkVJfyt#Pro4Q?xRy|VbtB{q|ZKko_(21+K;-7H*nGN z#Ay1>0OF~T+a(-w6xu`@y_;(%vCB!*=O7~Botk01e-!FY+<%7!R*!_bQV)ez$~5EQ^58UY~fl{AqHcDhvaI8%ojj2vnQahRKWqHP_u zCRWoB9_VGNsp};qc9`g!^;J5=9i9ZZ;oTg;dXr9Xp(@|`hbN<$d>EF|3>z;3HOz-e zC!?{{i25Py!&JpEOLdC|A zaX?x^sc}GRLz$2kGS;hVA$g3SQN^r5K8Vyp-YYT*^j{u`a0AjPQcZX`sfvZ)mFf&p zqQT`QOpJKlk4P*V@cqrACIbvPZA(qd-8yBJ#Ytxl4)-8pmM6><`~751{74;KA-2$d72~j)NNI3GEXd(^U898!=lTk9UfMe`y81%}* zBvYk9TdBc#fB8_t4$T^g(%Bor0G&GNcmK;^HFI`xsl>_tqd=V8xQQj3_E=yil+kE0 z?V@jQg=M&E6Qe{+hKQ9i)l8#;nc_T+r>>AzMlpVdd{VOU3uGGMwUBfKSE% z;qa|Sd1%T!CN10WArtv|Vgt%p6d`_uB1G+&uaJVKs?n}6HehB-zxb4iQd`b4qZJ$n z5a}u^e*TxJ$hG=f=x0i3rFy>=m=f3JW%%eK@y$lq7lf#({9lF?l@p=&X9Pw`je)PX z3V4A1G2|oM9rVv*K0@a~f8j#**F)|9yCT%S88M*R{wVl*EEaBP4B3LMt4>Nk>|q8x zZ2y31y$6}r*JPC22z1(5R4hnLrNW7#rnm{ve|67uJ)ZK1xK;?~_ zr{6ZHfkdobSq-Hq%wuzEr#XTub$JdeBR4v9B!J$Fq5TakEKv`=;s&4&Oh+{9bLmM< zQBO|7&Bc^4c0m)&|DGy}ycvZHLl&MURe)M+ATjlAp35*UKCsBXEm4d*Iq@r$t<|>y z8C(|1*3HPrNu>Gz$KKz-MO|!<2GEg+)n4iDhXw6e-?KftX;LqM2b@m&$5rsjx_~i1+`TnfH62 zVbMOH@BRLNzn?E>mwms_IWu$S%$YMYXU>c=>Jp4P^OYVbFCz+A(xcLK3F$gZr?@MB zuY56$njLl5frSSq;^s?~9Z`%7o9DQ@PRE1M(4GtNJ#Nbix!v;4>^#5$BMxTmbi^0T zs>HkB@uVfr2ESSXGtk^_FfVrkF1Iej9kF!IY;tj=ntoJnq8qldOTHp$Vi_UtU`Jpw z-$KiA#;=@cw{*IAPOaIb+)1XBk8%o@ZeeY2{JL@LmgDYUM1AdXb2hTYdS^9RH%qki z*plM#jB(`K%AV)-VrsE?XsoG1T-zh0R)~gDv1RORvyfsIjXIScMmM8Yr4@~Ol|C7e z6RN7$ydzq=#kIXDkX6FEacdqF@5ZAXF~`N$v43F)Iqt?!@$LpT4;_0i07pQ$zdP1( zcMG$*JV?erhBLo)<+i(VYL)>dyL+d6jB2JhTMGrt(u*lNVYVJi6(*#Fxsf=?h|sk^ zuzlLg!0Z7d9Vul|!x@+hL}bx#Yf-wBcm@e9yET7x8Rj#by|JJOuN^?2q#Se?ueoz3 zdV}sm-zPdvjdLX!6;nNq*WwMOcm4>yb)vN=z@oR^_mI4Q5(txsO)Br{l&pHbcTA*` z@w$Sw63K&!-9CCe00Z^OA|4UbAiJ5N8&JGD^xo4r7;H95n~i;R=9f6Thc>*Cg?s@f zY?&{G;I;1Y?1;^}D7GsZ#n$aCVd#c-DWKRt0I)h0P_PHVUt5fQ@$A%&MF%{*oujVL z5QmD5XWGD&S0^-vKG*=Sj2z6oa^qo3CD3~9q#QH?j}U5EL_NH^122m2!qsMaV;HVg z9i%VzvgVnL3{kTp-A~x58*!1cNeYwmmV`aH4}~}1nuos8t-_G;RRJQ~tu8z&U)8|< z7j^VAHmvI$_tzd^zqN80G(NFBSfZk1ct%${Gdh^Crz&J@S0>d1O?x1HfL(?wAIldv zu$h6dpXZL&@S3rW)}F#DT}epH;hZz4tPO$oqDkY>nN8bB)z*ekd*-BU7z6<#P==Uj zPnvWBbV*f6BMk^d9|-$|%y)JVL4Vt-;U#JY7C07=?3Yg377Bnv<%jA$_TWP_P#cNY z(!b=`3$%6`GQ`W#vl8SoD>JKPvdy>)_lT}}fH@5{8#{3UiTg?n*-Hdt z!xp1-}rys;t>d)Ap7h2yz0VnGMO9Haa2zj&0i;K=T+)0uIuSC}x9DRWK=2%!JxE}W zMfRD%o|&~Ge+PpWpF2XXH-3yhcM136i&YxTf}_MUP>V)vWVe#B_wsa7J`8NW&GriR zV)Q{aKv%|ae?zk}sqrjZlWskOmrX@lqFpVEPh$2aLgx|jz+0lIW-i$k(_Ae+`?LuS z_NjPP@IeJI4FF4s2OPY73v-{n;Tz!p;~OOAsf()O>%$neLpFS6`@%7QrHMLJD_UZ^ z7qQKkb0J*iM?Or@xEk?G@ib;9>awQL`0SRp!!rp};vNT=nU=7FT^fl2?81*h)OkzbJRz z>BM7Mpt4F4F$XM)G!ebX1L*DY1B<34z)S|h53 z3N3w4PdXi!|Tuu4N#O|(h@=3A_i$~-~L)n#N+ z9LsZZvY?Q0AVNKT?3@A2jI=_;8!(1X$E=lTCt1`g=iNm=yGCY(J&fMrc$(@gQJ0C% zyiY!0uvRC5oeGn=9?3i;-l(h6OlZh$s#E1QmBiKyX#ZZkp6_m-EEz+`qn9lT(mORU z|H%>AnV;i7J%h{SFh|l>%>dPARpEh?ie77jMo@`{UKp>eeM44VY|5lR%S<(wibk3w zXkNF!pGk4$6U(@tMqT4*#QpXHy!j+Zs?bQl=Q$E;0aRkP^ai@%8o4z<%i80CIg?Rc z9bU`5tHz*vP4-&MDdu=ahW*k^xU_+aU8t+vahIy+5TO$d5SH$2P%#Fv| z2PUbQs!2Ft3BmI-!GcL^zbxVXO1vm8RcjBmR%;~7kc1ys1&BWi6&gpZ25V2qN_9y{ zwy3Gy4vfl`AA`DrH=&!JfZ|9|wZn%8=%E2f=mU+8OND#Rt_igE1g}|Z?FltkTYFBK zt0s{yP%Whs_S*5V4KXi@PuJOh0AQSdO~TpV%GuC-_7fYRG4WR-wyOODb|4mP_RAV< zsVe(8#9jHO0&}_jB_LzI7@yjoWOPH|0T>~DEmyZ}Y0PbsMt84J>uqUd`LF=}(~w?= z^bw>za#{uHVMt#?It3{Zs>4npVMndj5;J^8g3d3|+BJKdppRjcDBeBZigN89@7{xF zc=YvqP~0Z3k;M%vXqypfU92I^>iV0AUP#i>rxxHE1$u@jL7xzkss-kLCGb1Htn3ya z>+|fBIVLbiV1eB%Dp+^3Pqa|kf|0gFB_wI-wqL=(3IJl(dzcR#{#4^n+z6(3;m<() ziI;oOD*PFQKOexKh4@p8Khe>e&WE3=XWA}n=xf;U7IVFZhTCk#JF+Yn%VBcq_t}yZ z`c6X*$H6PgJhsP$q&4(~yP=*U7RdHW4TAOH)!v8k7WTK$H^-JJshiK7iD#v%3gM*_ zo9&_ETliPZVOxr%Zai~FJSyPk?9E_ho)+KQO!##Zhb>_oBURljyz(^samLbRp9Mr? z4y*UX>^*ZjG3M~t761)>Nv^p%y>sj-K}1+vssx3BHwgzTL#z#bP!LO}U7sqvf(OFK zp2go?>OBGr`6|xP(eEF2T+U{`u{aLqxwK=odo)d$(^0WSs#-&DpM?%bF%1}r8XB1< z;_05ndcyYZ1w8FVT+jclmiO<&;zfM5GE__o6Lgys6WGtnQ(_Z+Yc-2G$&XvE82GrQ zv{En5(lrO6ywS5O@BnTYwX9~LM|HIJVGMQ{Nnf~~-D%)FZQ0~8%A6H9A*aO0k#*>& znJTaccGV2@tDdi0zALswBPB*i?9P!qbz(GzY>rYrj#A4ODu>L)5ao9a2h;Oc74GA9 zV~C^6WA=(oF77*rsvY4>j+@8f98?UIS4md5pT5GoGTni41!vJOBmG94YGC*_+MUb71dGP5jOBMm#V(&QCRQ})t&KWyT5zLJqg{iUZQs+3qgntO8I7ubU zQ4Th4XvDso+kqObL|5O%h9$S6;r%L^xwsC+rbfLml9{KmTR*j8#0D6$<6QkTmd}We zUEUgtH^NT~h_*K9)Gf9ot%+u{yN){zcng|QSGfzlQ;c0QL$kH$3ZirK_b@^RI@MeHNAwJGTjBT{dlFpp6>D0H2ucMHtPeFrSmM%#NU6nW;4oydUAi<)oRD%hV?{*B?#op11 z$6%WO2XC)b7)nKTmRJ$td5i$Jt2RdSdzkW;yWYce$tLAgVz}6dCxS8XfBaVLX`$>q zfxL=~w^zrUR`0`wlJuVV6`GaHFq(9)m>4ZC(Bt$fQM^44gQen=UlZV&IS`&ezdtUn zkATX3Sj_jIqC9079r`SL@$eFcn+E#xd+|z;XV774ewUcrIn~+}%kE0(OyC8(tDLXe zGQ}fR#L>wXv<%pl^;fr{%gpxjQ7b7CkFw}}k;rW3fOLR#@`ucS!P=$ z%kJN9lceQIQL*A|y?FZ=>_Zvq7O*$c!O*V`cgS3N7YJQ?r`WUn!`MA3MwOTr4ZTnO zzAZ~Tm;McR0Uc7P&1DPzZ<|>cEN;Jvu;a@2EOWHz39x@^nf(K-sK)Z|GNTeK$);)M zeh<$+dmTvkQnMo6ICETXf`V6!5fk;xPsg$z^*+vm=0%6+S*{j0uwh{X8^-47&=1xS zKUKeSh*T-Z^;R~)a4Tqet~M6tRmAm8b@M!f}t`eo$<0N)c+FCwFWtj{B z>L%%49PXvmsG9W?h*>`u-FI%mV8iLXdr~HFT^hzZdU&*TjW&u{f~0%viEOpYA!{JV z+SBF>X&cNni(7H+o`zTP;0~Z}9z@psvkJ`;>`ztt1vc_PzbG*KAW>7RMS3|0}(bQ$DisbiPr7^nvo~zsS zB5_fG=~~c$EwSRvq0yksmnTKXIyK-w0%82Q(?~2suofPQ?Nty`QO$aRyCW#gaw(H+ zOfxq;uRaVGvY4pLj5#e;RM5;hd}BXoyV??fXQ=a>_r>vHG+zU~MK8|QSsROK>rCuH zLOx8rq}S1Sl3qtE%zcXoH8q|@%o{6*3T4OL<*xq43b_&E^TW*JpvxDmaPRU%vv74( zig4-e-Ip^D=rhpcSFBL{z^t9qGGdN1zoA@;*A_?8*D)g7e684`zK`8I%tm(gu7no6 zpV%IVul9+%g!I<2jS27L5emFed5`foRqbK3dUNh?Ta0?>Hp2V{m>q@G?zIH%G4`qK z!JBjYHe%S!*}4q+#Ts_;l|4w9e-fSxxp;-?bn!kmfczE5lY~>Nw4pB@73LqYUpV%Z zZK@Rd()*z=b-~zi6fZnIY>x1b&~B?>JZQJ8h1_;1h4@pfO2}=o?v*whQG~<~h1@pK z<9Gr1{LXkItqj41)X>^{aVhApOvzgB!o{I1y?TC|Fn@RaZQ-ki#NQSXdUqZ^2E5#E zA@_s}Y7#zA?@mC!`JPZK+ku=Otxi87r0=xGt3zwqNpt|UT$T!GW-vKUCKx*{I`m21 zp)v<;X|ij!d|9VzR-uQTG*z~wHl8#O!wtLnc>gx8F$3@NWn=6hjcR%hmbl!ODDjynpyseb+g|u+!oI6S}?Z7b}ZzVrQ!iAlVoX! zLG3E_r3JWu;SqyEL&V{C~kA!Q8oiz!^~8=|DUQS-;m9&a9o zS|7B%icV6?=JOa-`GD@w`okr50ir!f9!l=UaYRZP=ekKD9?Qaoj3gZ@r#EKG!4^U{ zq91{^UCnkOzPc3DK^0c*O6xa$;(EQcSu3XCWw=2#)zgS&5x_Je%na){zh&A`S-CnK z;X%;#kz&d?YcmF~3PBvRCWfC96D~#l>bmTQ;JLkdV8Ml8r0CdR`Q%96x=?OLL?;O} zpfkaumF;QyX2dwf*CbZC8Bt&VE^5NIQsTIi0Oup0#Nv$={I=)pXuMKqMpQFKXCw*t zV!yEf$5=0#5p~_Y*io#DqhO#Ij}sLfa&a%px+ii^QZ(MUZaGQ!&f({y#3Za|Hdyv} zh8;ReLXsXNJ2eh$3%drdX5=>uU(dJ~wZ||`CFW_t054IYxy@Xl++D0GSV3YVNW!bw zFC2qY_&l&n4lZ|C-M|?>ZOEjDcywxg$;8a&AU2fVx}1&fip7k)v}osuX*wErFLz}q ziLUQpL&kDG&1k`U6!=VIH|;d>%E8n+MJLSPZ|%{pi|~w1a}#3D-!Hc1*wwb&Rxw4- zHUb$VMkS=6$(xEM?_SOTCJGh}9*<$Q`xG6mL}Sa`4D=K-?iUFlREI-jR);x<^8LUO z2Mb1I;C^XqLYp}_gT7k=RiuYYoKa4k|5#1CVK&?3^82m7!EeRUmxW1QZqdc6E2G)9 zCBst%_E4H0IibO>Gw@$^Drsuu=H&4noA8P=F!$NI!fWyYW9)rVd`HM-vT-WUb+YlV zKk~_jOeBoQICNVn!4QWqw2$3^{x2mC(_3$wt;HDJt28mkg-eAT4L!c2PqNL{L+38W zfP7Rq(I?^Z((E`PKSIclWS;m!epE3FgPcD`NEs*H%#KMk15?D=7&#|c+`yKRQli1C zUb@WpJr?(3%r1;omL%~wA)dk38~|4<06a9w-Tb+UhB%)4h-da)pGQ2iPl|awvyx(u zcxH{oay&D(u{k@AEuaDm=*wsNSOAV~#dz>ZM;}7K9Oc6G^9%r0_+JYE)w+~;1%O&v z%>qExvRD860ia%*Ij8_o^0iUlzQf{<+mhl;7#T!~p?{UF9wmT9{KPoa86LKW{ZcW3 zJ%+Bd!2K`{(-W3n^ToDVy?Zv{n5Yt%EhDh=)Ll|ilsHsk%MI4XXnNx~Hppycacb4^ zTk*V_B@N8_>M*h{G0Qe1j-J9(HA72ObiqmHbuk5$C;H?3d=k1%i#AF&MAW zb6G#YVxoga7OndYy}H>Z^u2R&VbU23`xr&PGz`>XtLVjp7ve~9s4YojvNlenzy2Ft zxXf2)fzG}j%21^hdt#0^{t%dBZND-}{7`K9@Qd28ck;~v6ON<9hvRnT)zvYlYo&Ej z7*8}Y4)}}i7f<}1XB3~EZPq0xoLL(v8hhVq1Ip2mUuBn%+e(M z1&`ZU0ztvsuY*olRlP1!cI!yvbcc$YaY-j(w$`joMvsklM%m#Xr-K;+J8GVpB%aE` z=;&9n*Z{TMbLzqj+*@D)mD6UibKcQ-=)QX0wYKCqJTd>p-#p>2;&9rOgmu+xj!ihu zT&i<-g*#4{hzVZDv7nb`n53%ck^nmEW;P|vcg%_AqvAYJNLcvrFn(XwdkdLIFB`fK z%JE;EBaBdhtx1aum5C6$+S;BaPR0X3$+}SoPir=-%p-71)Dj9dm`&G0gIM@!;A^r@ zJUD8WHs-iZvw2ha=_Zx=4``<0GwYXqB?N)L`;>X{jj*Mtgdi~}Az8ELNe5p+AqLuJ ztAycVlfAPftlowd7*^<9bO8-#U8*5(p&qv`)shlJMy7BJRXwPLG+T&I=&1*g6S|na z!lrt*1llh5nETKw%o14iSl4q!BC~l+Y)EK*1bT3a7T8d*m<_JpqX2Gnppu_-wx3Zz z0f;#3J__*XCaO6*z$oxdkH;u*WDy?)3Kw~d0&gu+Mgj4Zb%~bPW-a7lA!q6HI*fRB z_ONXO)^+M8eUp6MNYHz%wUn4yjv}q-k&EPg-H0RJ)f#Xx^vy|~x+JH2}ECSG2> zm2O_W8yX{_`3DgLe&lXe)xmf&j{!mJ-8+CMGrPnHG+wGss>9oRl-o;8BllW&Ewp}K zr+%M)F1yJ(kG0!oX_|vs&wBVjeIQjDu~}H9b$GO+DVDgP@rz*V!~oUGpxh3;Y&b?a zZw0U9J6*D3q!ckMDBhVkKxJw8K;B7&BN&8Af`vSdqxn8 z?-GKameD0ps}dru)Z)EkS3ZiDIA}o!4XfFVS9q$LgH&u7);f+Fp!Rg7aFLV3L3aJq z7@)CEo6j+wHw3VW%h9oO!r7L5^TY=z` zDy_TRrWyY587X1D&U_*6+8ZK8%~p`E;V=@d`O!--w%3%kM%TV^U=a3~%yN6=h8 zxZMO@sab``wS*PumJUpeqUC?*_XuFCL+e<2Y?kh3V>d zK|2M#&@f`U=M-BJ$lCXw3jhAI`b#WO} z7tx?gOfC&19nAriG)qhM)1h&aHK<26vnEK>(7pR4Ne%}lZeT|>@5iX;s7l6m!Eiou z7wQo0hCX)ma46rfpy7J}%c(&W*D9?~nOO9TFPYHDG7*Lvw2ArZY0=~A!QWV2lTJ5< zm`93D;(x|ouPAbBfZjeFez!)m-xVl zwdUE@$$CpZw5;{4%5o!&E;eJYwK3L2Zw6B=^tB!qGi`0Jc_N!Mw7=AcqegQOio1PG z386D)vyJr2TbL_X^H25;; z4S`)aR_ue;0rh%FZW;TC)5b86729T*a7pw#9rF(!j&~BJa<$xGAHiIbzQfyqaF~JO zu~?1jJ%&XpT>Tb@iiz_CBMq&sfIMqfFC>IO@5Ulv{2!{1M3ZsuXuc9mc}X}%gYz&@rHx+F`dWa*YHFbk~F@StTQ zR2Z>3EW~_0!Wmg+VTULHD#~D>Xf1^%1TH4y-2GMf8}~l&FcpXZ z2!OawOP?HqOPo4HJQKujfVoOk+tQB&;wF@d{`2cTDHE@ao^n0!WY{1HYiZ~MjiImY zodbLkhtf|{34vVl@^44o%ogz<_AQ^Yes#!aG?UNWvF`HO{fmoyZa+O}`F!kGpYoX= z;*`(rue!+R5t{z9ANlMGtu;C2Gki)v@=3XT=5qNYHxoSuW`oI9dY3T2Tb9pXi7Z)R zg0UT>^9C*hiUf3W379(vm&0#RI|Yn62Xr$ryT*ZnLgt2Xig1iCiL3l3F@$begh76( ziOJWyDh`bY)Vw6?1ucKcyts@fYt~X?4nwbj5*6<&)7hmmmOHuF#WaBU0uP8_?56cQ zaIdgiY!PAR$?9E;J4TB9eI(1@Y>g~^E3`BU14&!DLLX?r+K4byNYyEGjLY`+F&P5^ zTb=ABVD)_9LnB3O%-}a!uf_Wc%^(|*S**@DtWe`=-nn|&i{#DgoCRT={-OZO;5UyDGJLP_>C(F)El%6_JL?WZYPfujH>$kI47t|B0X` zc_njZ+a&OcK~dXD1w|U;OOQwLc)G z*_uo``DN~I9kyL%P8Te_;-1>m<}oI2#;*%tP+Qt0OYg?xp|vlwZKf~kK>KRE6az z(g-sf6&e#kF?tfob1R@FERz~Nh(J5;X$F zcWoxU`6Z>TPPnwyUjwkM96X^|{o1Znt=!BAtSz>tUOv^UPV8JO6bDpUB}XV-&sk9wzx$*-{p=+tzxRxM#k| zbjSx&Dt>TbN`cIjTfunGzh7oc9F(8UV7++)obq7H zXxb>V(~~iiUXU5{cUQ)+dG9mws@blw8}JaKwWn}pC`MztAAboOFcPLbHIbpv9oz}U zoWnPut8%V$8yQB71dGmsv!*K1sJCU}%Kq3U^KFjMu{rXUfYZd#4UK3H&ZY5@tVt(; z*x@axxVv%r-`MWPxVYRfd$^2op%WvRV+?ymWgo)dhCbM|rRjEDyyNqD7Lf!)hug#P zDDDL%M7dp;nKhP6O9-D?ndpM=mm4w^d!yPOeePT~C`PrctKStkczw2T^@=37Q2+=bs}O$o7IB(~7^z%H_1C_fA- zQO!lq*GrV^iXHxDz!YM=hR_ltP6RdLo}4_7`JaE^Z|h{3?%ZCI1a}V~*G4395 zJZ#O!5BEvMCw9xb-_PI9cfXY|t=2uDbC0s!m>jDlnZs=}G&bE?x>}BD4F+uN+j=mm zGwIvYSv3AEnv3NUf^5ke=W-v;^Cw5H4M*uu*31R!<;$!&X{eswmEn^Jf`UP*1QQv_V$?pda+q}mmLgANptgugwn(` z0PAsoBow88$Se)Jb2MB?){jVP!Mm#jOcCuoq`fq~pHnlW<$wJWk9i6c1W=6^j$ZJ;JJDwzYnNkWvie#Oy-x zV{2ov9Fodz*IH*+tE@|l3ElHJ4(fCw3>6!evVsennEzNa{XWH)KZ{b|fK=J5X~QgD z-&qrZ*-)8vT41+>x4`u{ntGxSPYWa$3(2Km2?)v6!riR0cVlIN`$HUDr!UvDW)Kfx z-Gk`7p?D4UL~4X7zp%29Ud+pT4@^flEaBz7o0WGDot)w(2p;8~o8sL*$Ax^Yknd=p z{BR*Zj{f~P}l2ycXceg%)$7t-p-Sn0kG z@aE>WWF8I1d0z92Tk*W+Hs^UwX@#a)wF5={F?*ks(_~$(MfW+Fk>&w%PE)fAyN7rD zLD09VR~`D6Exh`;4UaAX*%eo5VOYEp>@^+Cvd!oT_iHfo4|Tnz4js7YEii=QX%zrz z0}u?rqW=N(X6YU|0E;=(D_EAJJXg4t$LIZaXJd}jogr*6y*>jndH!NTPO}4DTiW8+ z7s6a)c5&@q^KfxRF@1d~`bLB5{ZU+1Do6Xk6Z3eL+$Yo69XhMRtzY+=FSho|d1yUi_vjOHn+KjfraZ5NhT;3;rlE>Y6O<2=wzIUzZmibCe=g>dQ z$EI+rFCUwlgILpj+0n-?qRW9HRF_F7PV>09j>Fq#Y3boUeq~*A5H_oMSn)&5SPyZ> znD26*9D`%`$$gdNexDqkkG|$$pM+iyc(1+fUKBgJ={a`FSzaH`vb}*%(Y#>s`rlm7 zCXw-Mg&w#%K}@bbL}3zk9cU);AV2Snr}7s!_Mv@p6%@F5D=TCX%RgpszSzhX?Qydq zITDW>QTh%5Cr63~e%u{BJ;e--n2_-Cn%OYkfTCHW-IJtKO=z&EXIh)JJW{M{Q0t{Y zTpTKPHGUp2N2-F_M$BUisX_vskFPQ`=xJ1?%@b#6ar0Vpf1OH7(&Rh$+4Ov)ZkR)$G4G-47iLW{c1 z{HL(jhUwW>bT&fAEvSV#(idy21zIu(a;iaXA?LCFadgAN0P2O;j@g;`+oF2k^McqbbMU_C&O1<5sP;59N7C-j&#VvqfG>pwO!)sWi_ z71ZEyROZ~GMda^;<#QzRt~Rp`j6KEMZpZ6t(|g2pZE*t(()5Y_FhsBIB`YIn$3v2Y zfeb}{xWd?L4pb#VeN>61d->390zE}=|gEMVi> zXTkFk@Ah#gr;0-JWQOP27}{gSZFXj;jk|&lg{pC zXNB%J$icyQFd?WUef?#5R3gb_S+8)#;C03UNWTQLq zA1*{U4^TDXU^DPNJ0?hY1q;TQ5_@PD+tf9qLBBv=2}|fR55uX!fh_u06p67Jw!Ano z0guFww@QKLaV6>;=A|(-q1o~cgDg>H(NJj0+ak zaPgdhs#=r1No{|0zqF!QOe+Mb$dI@XEUt~&hw;gk4x4FZF(~>U6t%_YQRr!w-YZ7O z952yegx3$j{DNuWMa&ywY_mD2_OQ9!9$YfS9#Epb4c+}&{7rOAZ%sG~()FYLz~!`hjGyY=e-+` zn*&8-_og^m8cVeGe?%!dH5sMImKR}5(%`u~Jkv}2qPRB{T4)xXbzL-B7b>dRy*$`t zMVK4L1dkD`vB!+V)5PIZH|dKn?L{Bbr|I*r^dO2}n; z<{C+&lPCMK*A#yt++%b7ZSHgYmitm~riljY*>Nrry*LBuqkry`oTkD7YJ=bIyM0Rh zV)Rio>Jqe}4>aU@zsLOM0Isr2Xa()`p2GLQullb;E5*6Y<2aczbce!8JdYK+l7|9{nHa z277v}yY(9;BNa~s2#FEa#>kE@BP1z4tuXY#2Db*}F~j0P9ct-_7np{M&kJY! z+Ve9&CO#U*c8f>g+ypQ2!+8ok9*xH*j)P+0-3!NJoSF_Z;rW&&JDKC5K0d#C-7o+? z#Jn5K0k5dGG&wGYcF8}>x-OPj!bRpFbqVHlAGjei{NHpzV!MWD^ORT9oUdwJUO|7a7DG=r27pa}2#lup13+LeNoX`*Cbn!3Fxw%sJwG{2 z%aC|iFZnS_)?bELqVQNtv#}e+JpLlfw`jZzS3tK*7-Iej zeZkUu>>njGT9WZFo*cO`)!L)6T!_t`-m6$=&%xj;&t&6Q7MfF_OjmM$ky2n=vAD9Z zcS25@4@hKYN&RD@h<5y|TkubBt zNx+B@wqDQi=9t8Ng7G9JZ=(&RBZ8&ZJmqvsxH`1S~`k8zo&Bqm|CRh1!HGzL+HKiLXUm)yEkEm1T;~^+l6$UCD%b$&*+X~ zbS=qZdOOtWc2K&hKmkqDH>BBmjAAER-K|q`g=UUx*6niE#!ma)Fy9B*NL%`Dx_lF; z)~DI5Zg6jb9aRo(!eD95)0Nc8vj4aUW_Se~>_l36f zdRlY0qpJ1xxnL^VZ|V<`> zaTZSE$BTz>8Xul8BhGvguFO@S(LZeq@F`O(7~4_*?~{A=Tzbo!JQ}4fD^5A$5@PL9 zhi>c%V1aRo8FlGhQ`wMaDKo@e_@vre7^clWm>wh?9yF^SW1%paT)!-F~x()md&e}HNtZg4( zDWpkFIBOHq+n^8C?zLnB7GCgiRjS7NPd<;r2caAPg6g~j}jJdp!f924E&1Gb2oJ)pNhx$0MEVHX79 z%snBlrPv3p>$JptHM&58etH#8tf5291t4-JD%9eXpq<{g8T%qknv-JXMm3kiJZ4Il zOOMP!LN0wPdk5=VQ}LFV@9uI`|B;Rd*@V<*aW8i%?o&NVaj^?e28PkEdR+3m&Lcni ziqe<2(zVX~w%W&FUyhi-`ZAMyT-OWe!G_sSGn|n7P4tne#QS_9-?$3}(ufea=3~1|*#Ee)L+OeBKZ@TW$x-qqzDLd9!88RN}S&?Lq$rpY$K*^jVyc zO!~c?g_dQ})8?ARiA2{dO2X=IMSb+vc&3lgl+}jTvO_1iz4R>G$c5axK8KmZI<0sg zTkK8m#52&mL!J$&%wnsKmdFx4{d+V<0m7NVFyRUzPb1`MH|Oc>0pd@>s#0L-OCZd2 z#bzFWw}svgx)mqsKe3w{EI|^ZPyZav%zc1gkMJ0H2^n_-P6u0H)DmWHs<0o!Aw$Le z0odIEBcN~cFl!fJaN+=Gt=moXYlBa9jP=iA^^bO~|1PEe-w&q#x4G2+MnCoc8SB3Z z>(2wa(37um7IfL&Sg`d*CX0Zw22my}D4Fz?*Ic=Pr>f|$KRUS(YIWm6?@?DSv`-<< zxI8#TihUR8P7yDz{n#=R?;zRmIb_~2G+oQXT7u1l!7sm>AfMYn$7y%|$=laP-CS|H zRx)Vlx^A3t=s(a-SMJuF)Z})CNLx|VW6JuL#<$^uY>6gOj$vg zPi;KTBT*lq0Vo&8^I+PnEM9s|kb{3@N6A>|+TgK9?P@kj-uJ%J#~d39ne?&A?yJJc zu8ot~RvO)mb6ul0Cx>s?*;a&$7dtOyv3}m`;ECmX$39`AHfApdOhte`bD)@5Y*&LK z!pKsG%>hCPJL;8Q_*Qt_O8zQ!DSDWn^jU1zrbGS3R*d^kuEpXLzuTF*@ zBDOw@ugqWNHD%dWhhrRUYkMyaO*+eT2e_7B9%F`5+ds=J(bA9`6oDL)NpTZ6hn;l1 z#I+>G_M@M76Lx=8rh2y3VgJm#l@H(*;E*`zWv)0 zQvV#*rgEGd%f<*p))N!?mi8Pj8DPfKec#}2(}`Pfx2ZWtF>N7OPv8}dAEit z(?4xtiowH&0Qub}Ih#b`&lit6*8AA+2cMKLv}c#CCgKfq8A4)M%<;^ahGx|fXr3%_ zwI)eO4Bwm>A>>2~iBT9MtJmW=2Pmb>!=u&uB+=4aLWI0AbdLs)vgvHe<4j_57`wPH zvv}*G0DWV72s*(0I10DqBKT>yA&#i3(n=k_e_dRs@j6J<&*eRJAA;_`=eT@%&&hZ~ zT=!QmY(t6HJNH-l*5q?8t|`8@C8brm=3uNpFAm;V`W41XU$4pWzRSja-_+OmD@$>W zBE=yk(V6s`F)ku3tp42_WQGe6`git$-#Ar+b4iQmgBQ1E;L( zB8kQGu7T=dxXchq%s=Mg${%RrNW-Qe784mMV?bnFGt5)=r*XxFyCk+*Zqpo87|%5yt~ZlC$>T_#yan_SmM&H)o%I@Go29cZ(GdD%Fkic;2LsvROeoax zYoO0Jt8_rCiA&S$M;y}RdzV|h5<~A}sF`rLP%{{A%g1vRw?`nq9NTOit@q7^DyL=At-g?a&<6o78i>9pye5<0)Gs`=Kr^<$ zzelrpc++V7XixPygayO<@i$!m9L+B_*VLazq_yyqj#a}T9ZgkUcuduIA=;?X{I>NE zUj(F7%;~Ik3d}TYt91M*{@o$Gt?}uQEcyuA5kj!@F#a{eFu)o;9;ICA<62(~d}-9h zCx)AYO!T&1IdH%8z2aCF(;&_YOVHIuAzMZnVB)=o#ljQSnIeX}af=O!jI!OSwM}ex zIZYi{zfi4TyU&(9(Q#SV5pQ|G_yp`Wnmfl4FhFO8#Weg6=Zcigt;#w4R@?O5>?-d= z=A9tWf1EhigRuCqEoHZS+4pDmhuq35uRf14$b8MfY2ZGLX_!A|hUL;)$vi>~#c(t$ z!rbH0h)uW4`U;~-K^tUd#Ypd9Ep177pgm#us>_s%#dAkD$(TGvLVZE z^eeLTWO#FLteD&FpMhd-uW0G9HfgQjt2imkD^Asb0TTA4j8oCOBpwsc!f;i zcg`@H7%fSS?Ez2WZ--QO94E|>imZpOa{1M=TBI>Ua2lcG5oe)aH3w~tM@JT1 z%3;$e`h~sD9fHTpnC(WHfp^1Io_c_fXc#RwK7q z%KagH$HdorEt{6C*87qRCw;|#CPuR5Y2p(snKbwhlC%Ye#UAyV%jRSjN0$WXrIlJb zY$Td;dI5K^Z?@mK5mc_YP7BirbnL*+MJK-w^B+9!(wkXq0xVMo4gD32k=+sYeX_Gv z7fmg5?09yW=;hIF3mGiF<)#}XJR*qs!z`~a9Jph@fPRj9;}T5RPW*x-br_B$KhLAZ zOTy}>qbSe4lFfguJ*ui;y6XNu$(|gu7sKkNpWK|@35IR6v3)nn?AWb+k}bU*ckglG zlKy%huE}EgZt_O@vX+%Q>3v7M~6hsrXAD;=q%F{Of^dx(lqEQ|<^2c5L zu~vEfFaB7Yz#i+A$A9LJBlu&z@_5Z9=-iSLCN}#qNO*tFu=WfIWdUD8U#ofk4iJ~9 z+HLp+pkD{ySZNlhfzM4>gn_ZU_+ns0LYH}DCe6k_(Nyi?r@-Z9aeGL~dR`f363b^D zR_Fow8Yl8df5;PDk#&IFF@ht9XVT9`Fk}qJ+Ty?*reH#)V-7p-C3CrYXSU1LJI{?{ z3xu|{ZO#jLTrS?x;blAQ@*VwV<9l9L?xYc?FI%QK9&|?*O_d9HZ6=+>3m7hTWwBTf z2iWO1jFg~tlviD{J{y%(m|RDgg3v&j#1)zJmoP>GFNj6|BTvjc$1Of9G=R82%)!~l zFyEY&u{;KF>!da|JO)w>08@{G(WG2-HXQWb8$0BS&I*<(chPZ#_>yqC_A-7vVF+GR zEC-M*Zh+Utk)R4nf{XWq>L_jnaS~geV%^{aP%+JcJCqyH{&<eA^;@-hH2xKd(SojxpC4wv8fJu+Fq?5_ zNhBCn(ZEGe{&vV9Tu5&ajbVauC+~hX<3@Ms7|U*!?v43?re8{kXS@KqnZ4HCnyOhf z0xG08ht82tN6z?--_X>=34>7|C*8Cg{@N3W{gy~KHL&ZaTNDQVg5l7@rJERY-wnrW zAKUn~kJyT9a3n~*lnwAkZH&Z%R|%6dgmoGrIjlKZI|XTc6OJcabf5+Ca$4FJ&i(qJ zUBgNYV#{qpye&;DtnNj|{A3Rhn=`Vcl{z6=PnTZD-HrFqm&0AAuY76kb<}Ik!Lg`W zF+5^aLpW%q5 zKN5FmKW)c-w+xT)cX`5{YXIV+)0-{JW1k0@tiwM9jf^5*Lv=Rg;)# zl3r$qLbYtrRw8h1T76!}I+`;8OhA-s17MGQ>mdaIft)t(Z{TXI^3L3$la^{m!%F@In0@;(y(Flq&@VA9m7 zgy}jMuJ$r#uc2Z~!a?&t#j~{sES+*x8=ZXr!Rl=JlIU$0OG0{s`;sicg?H%mx^ogq_3HQ=}u&{gQmcoD} z{)GwgkK$~N^@}STzYi2OFNZ$YDrjDgX^2nK0X+~pgDKNCYYe^j4fJX-iE-Z9f;LL; z%rZ%@Gmii@ef+CF$z;2aou``q6m&Nh!qPS)HIpts)+eE_^Rq|baf(S~=U{x%Oz+Jw zm1s&-G&az2PuF~Yhq=7FE2gaB$6@f826M#Kz^FXiR6@idbOE||r8xTtfgh}o5CB71 z>?0}8KEmHaA0f6B?WX8_$1#UFxcF%^_Ta5-s*+F>Odsa4SUjN{ZCG@eI`;*b!-1gr zBCmOn-3+tqJ)2%Ym4clhPTYfi;416`5&d@o`y>`i=b^1k{=qmyj!7`8*`wg`8)YoU zC-IbUd$637s~ydQOD375s%(*n^>~N6_p<8@Q;Et%FL;hc7n8#@{ddCv;)ePl!T^Su z2O9>kt_p_gpI7%62GDgp;_+T_j0It`ZNhGB0*ShNBaA91c z0#yrWQY~Kj1f~EJDXFT^+8CEXpZiU|om4!8!;;o^7wR|@Em-Yv*=PMZxmau+n^_?w zSJ*b;jk#T+je0Q|@7tYa7Lv`jO%42^Hs5W%hp#UjgE>O{oomdO2y1&>_A2&w|Avn( zKf$P4d)k~7zb*oX*DStWKL2C%m(TQ!@(%s7YK^aPNj_(vuFcA#J7>tpci#cEpaG`m zCd%op-dA?&`=1?;|3y>do9V@obJ0}3h=Vd%c*D3}*d$dvVWYHH@5ltn7~&kxz5 zhD>_$K%bOnzi{Jm^KeN4p=t0I3!cAlb4o!z=hy|-T~}s;H3og(+MHjNT(F=hGA^=H)iQ zYs`S|4rC|PUEVq7K?a>N4?2zqq_5x++0msZd~;rykf&4TLmv;t`A~#1A8HxuIUl+d z=R-M;`B0OP90^mP7l$f)H7{d!hla`raBZt9GHLmbF!?!dn>{m=owAJ+@>5yFM&Sw! z{43;-6V}JlbtM@7*C002MML=-8{4a%$O9Ni6`DE4b?XE4*2W0BuZpj?^BC+EbXhQz zbTY?>-!RV#7E&}g$4W5?DTSL;77CzdQ{rIam7mvfxt0)L)LUihcgWNo_Wv9)Z6dv5 zIz08Y{w2S!n}P3@;LnH9(fTY@p6@ud1i1!(KB{sL{(Mg`J2bpJgtJgze;iqOFOoPW z&d>|#C+Q~T@}eXSsQl&5w9>{WQe#}@T$`vnRmYt&|TM@iSL>B9W3gqXFDh(@|8PE0?k zl=>(XiM~EWxdYJJI5W4yfL`mJ zTcFv8tE+%QURFUVEbSu4sQ{d&E&E^!u~{oNiRoQ#i|blvsO%yZC_BP4P#Xanqte5&>S zQ|xf!O(q%ht3LT2`m4klsi0{u5L2}AsXEIL`sG%1QnCjy8@4sK4}E!Mk4;F#HqXmBS%4idgIsJGWD3Uc!oZu|8Cj;{t)OO$G5<_DBM`fgb$4XU zaaN*QY4vdNS9=Ki!8{*f$Qs+60RnHbY1WzOhZ?p@X5AeozHeI*utZc@8v~XkG+BNg zdm#VN*}U7qIEyL?$zBpaZF-;ivJGeP)L7N9C4y!;j&0MX2kdRauRibim-?8)Q!xq! zGn0f{@O&1u)YUFWvQV*qZvRa}%iC!Ocd%6*w>=+G6?1r#;`k90_y{xx^WckVFqv2c z(h;RFt}ywKMx>$*jektTJFdUWmZT@T`QQ}{{XVur#^vK;U>Cfygws7PU?CVsOd1jG zfb7&a2R;PvuzzP3Kf=bdY_>B2Q>>VqaIp!>fg_WA>8dnh9v|nlqOm*1*cF3!jqR1M zxsH%5;Y#Q;<`MN(J2R4Zhl8niIlp0i7#1rLk5lW|C6`@NU=J%G6kqx8p1i!Jzl*t>{!9XWrgF^T++TG_Qj1&QBVuj2v zr<~|GDfR~wj@C?H(8Wa%ZeEt9mExpZmXR6P5B*ZK)+sFz}>&xy3f}jRCsH5 zG$>iN93P&`VcdJ!yH2o31{P7GUW@jb?Gh?uK` z;eD+>^rdD{r0Om~ww9V_3u{pbMvVOl9*OB;GP@2)^TX_)*Qquezfp{bD{wgSFc9F7-5G3ud*^SDK>FaNqF&gEW}Mv2zFB;ivWHC<8@6QyH+Qxb;GP- zra}p$mJ!>uDBak?D2!ntj1j4_2_ac}q1q^Yj_rshKPUoT0(|aRMdyVDE$182O)x{e>Ob0kJqdFbkHb{yeB{}XDd#N(hH?gdK;v%?TSahcw;XsF-1$p*elBgXgBB{ zUfJ)Lms_Np>EA+e#tkSpW|t;^7MfrM7HmqE>!CS){GGMTX*+ZPxgGOixM?a;=Cx0_&Mw8ySuC>yN)#+ zpQtVKTzuR_OILH#`?g`I%?Q;&200x;39PSUWI0Rlp1@*2P|=+@NAA3p6-+as z0Vw{5{|EuRBk*UAy+c0fQZs28>S#RnkHftMqag%Z`&5=iG=-Pqn%dKrt9eP9b&NHZ z(Bf+RSO#yE2Vty*%KTMzl=?}p9{UzGs z7F5l6_c9Iy_35_}vLrqbZ9Pja9(mBSYvc4LBO#WVZwyu8>&@Z#dNK$OGYZExn&!|~ ze#732cA&L7imbUx?t*nH`;~Yeh3jlHkoJi+G}1$rpR*-*~Q4Y=5HrY&b= z!1 zrPT4D^y%~hJT$ToyXr~!D2~v>j5i!U=*A(a2cVx_N3%I_WN{e2CHQtBd&>!;%hH77 zkfGAnlTb??w}jKaI%@fc*ow;(M~AaZTPdJOusEcGcyN=gydo@5U8OMgM0vq&&XI@%UJv3=QNg*MjOCmBFGSHwL0} zsUlHtqLq=s?s_R9EKp*gRZ?40IgP1K(&n4^qiL)V_9(o!UNuLGF0Q#BeeM(k{x5ha zdv`c$G_-9MZh13`867w6hrt($#ySEUj}CA;&ShqKoeu}BK>DVJxJ(vce#|^~0HmWP zh*_pMJC9UWD5Btx;vpK+AdLv;>jn|Z^m9XJ6uR_Z0&n+PnydhmH6- zq6R-NUxS}FRpaL*Gkh+&XT&!4HS7WQb$JE*3M^$`ql?+s4TbRa$bEqi!Ix+xPu;`5 zHBW6_gq};t3H(<=xT4L`&){jQ(jaWOQtix)6rm7@F;acd%XK|~qsEfojfaw{t*S7XV zM(C5`Pqz!f!YYk0TN|@i$kK_%6VNRLWA}zLW#*z%(MWGvR#ue8c7NM%N+~L{zziKE zfTP-SPY7xZJ6)w2+C0?QBh1hW89GZ6P#N7dMGBqN5TqjJQIi%11rT$@qze2RG07C9 zCg$Pr5Wa%pp_*7O5GQSil3Nd|1onqLh(CaNP)TsMklq__>|S$Y%wCslAy3E-?*Pcq z!LW3u33nsR!>tDcWD;sIx9bVyg0&@}IS?6xP4W~zInxMZKMCE4cb!^WH2iIV>sz8V zuV&wR^NN_gp{iD#9L(=Rdmz25Bq(pxo`gM?ESbEP6HqGzPB%rG6Ra(f{BfZ27_1R> z>!`MbHp>-2=goFL!j9UNu*))xx0gY<2{EA4#Wdj2toXo<@D!~_M?c7L?FcZ2e{fxi znw}-x0XP6xw@_4Zi?SntZBYuf_kd;)Y@6}(!HxL&Xbpb8x&}Yru7=Mg_dNJC`?5X6 zz8*ERui6Us^-L-IdZ8G;9=Wgf5mYorjZlj*#BBJ-HavsH^2N>}8hT4iLhV-eb~}~? z<0b+I+b`Wdf)GQq5gi#|e!N2(5r*rP#Fj0cIS$}B?_rcKI}U=@*Thu>u6}VHfjew4 zRB55o7>6YXw3Bu8^CubYX?|>$g5)PU*wFfNM&<9XQwj><11ivvGUfPGl z(~w~oq!7~B4t9^gFwY#y7g)FEE=Q;mFSJfV6^a*nqsG%P4`2go#NxU_T>Zc!Z%yoP znuW$gQ++-B-9$t2RDP<)s*Ms-HS`-v{uKbfn(7-6BqQ^k9d{7@TlkUiQ`Mze;$*zI z+!@@-x<`}nQ)LIbhXCwOgxxs^SZTBY&peKv#y(To=S=o#VxRNaXEys>z&;nU&qDTD z%sxxmX9fE-v(IYwxrTk#u+NR`b2Izg3ZG7UOd9=w$M1(2)FbS38~c2keQsx;&$G`u z_PLvVHn7i~?6aPI?qHv-?DGiwY-gV**k?EU+|NFH*(X7^jMlJEE&J55Pd)n#W1n3N zvXgyM_+*X0yb6C<=7qkPi@r71t|{9$n5R6efevB`uD=&1 z8GDR6sSN&G@1#_CcO{O_>6`$|haZRyvm>NS{)P9}b@X~yb$TaId8ZSoRJ;pRPyrUV zvY<5e^zKeHEP=tht?jDNjXGxg?e4Rsw{%<#MQidTwF5|10ggDm7yep?MEAb~M6cvU zvo>lvj(y>~1QQXs6-)4AFR#Zoqm=<|sbj7U+t0O@TRDcFg$r5~ijc0k(Q z2uPb{q_po4sk3A17c#H#aBN4ckCIZr{}bYG#cHMYI;u6ryIEOgMF^~U6M#oLffx4! z9EZSTb^`GCzw(kv)HmdCgw(wakh^8bK)YOa^x5hN{@gd-(Qg57BjVj7<0ZJ^9c}lG z_ZZ^sz>>|A@m45!Xk2tW)(_qi#3R_M5i;ICzmf6QAYSQrzVZHmcqg%(Cx791*S5LoL21%D-<_!zk!B)N zf{gTMCz95Y{Um4hpMK^$`vjyEj+9RI$lcOO>7BK{vkyX~BdGm$G9--Y(9vG7E zjk6GEBI0Pgi!vTx?!a<>a|Q{vbx3=?tA7ObA40YvnYS6_{*DKIK(+wnr(Xl)W~BLN z23g+d?>v-=|{Bt+{S?*1A z%g$bA(M)jA9fNciAl=Wpk**2p_I!avrs1F0`ydkC$B2CQr9$LO_^ZAii9Fox7=a)C zgKvHE8Ux-9z@^=8{SqlX{H4;-XT!^qaoF;OU$U0>q;&$oOAve^(u#Dag$8LlzPtxt zsvWe#ebd5nU4h`S2!7-zZW?|Z<6VPxfb9MPAXE1MxEZ?k3*2?C-IE0+p43J;^$RSgw^s$=%c-*TLtb70RIgW&gQTL-;#%-wo@MSN){ImPZ zlkl?E&;>*f!>p&A;-Y36>$^_*2)Pj< zNA#1?2|A|#j3N;|7157&@xkE5UA_l{)BgcT!#;p~zKd(bpUo&WbscMbU_1{PyZ@so ziZ>BA3~?)D+|+(>wj$&U}qo7@IUh;?@1uGSQ(<~5H;!tb`TYWIR3MKg?I{rKaJo& ze=qCkaUE|q^zlE7J05!7yUU}p3r4&X2hsldo(q3FJ^X3Ea@6*z($)vyuX=4OAa4Ag z3*?cKST})erGM?8(0z!$9rHDQFY_=Lr_Jc*2`xO^Hzsm_K4MP9jEBmY>H~c|z3!1*-G`i8TL) zkky!>Nrp`A2eJYo^$2;5ocovieG6kULK={fy`5YwzTpEBRY(Lvu0hDdo!rQLA0V;i zx=M5&E%)hhpIoa0G9MjBiy4b1GBT|lRv#H+8$Wkk5tb(q=V`>r>U85Y)?njth10io za;}`&=bJ0HA*2p#pzm}TB4O9`vC+SiUgo>dm`5aHp1?LbYUiE%4ZU}Vz&uWWf)>*u z)PHt4kBE-C(R|?CK3eWju^oE7{5HEo2$lHwg=e5UJ{s;#7}@_Q5++DELoUMV(T+-N zyG{Nxx?`sDbYe%M@@Hzt&B~wiIz}mlSlCe#CFhK1P&-~5CAlm_yuDK%RTto|-LvFh z#rW&BS>6mrgX`=uG@P-FFMSKWF^(-_bW zx2O}lTy$brpO~)gm>SjRtrL-jw;}4z!`S}c@?N2RzmG}G!W?g=W4n(*jD?ulXE4hz zt|Pp7=rr3;g-*k(`f-Tye21H;Vpl(gFAK2%_aQm0nv;8?e{%msjID@~+ric3?h?JT zv&A%6n>3++YBS_9$hh?DvA;?3;$4*Cb*Sv~|=#$uKP zYw#`1?MvdYgwn^{o|%Y0e*C!K#_54q^$#HKiNeMOVHFm5bAI9bMp?GID$=B9o(ld^zRn?Gqo`|m+-3yeh-2#|HjKs ze(0#8>t=n!=deS4)(|KIqwsI9QU0Q(XSV^c^p)4RGVUW~viZeVUJViIc=_H}l43n{ z(T|Vx+peF&tNKTd;r!<-u6+LJgPoZ1D+t+(kjuW}6R*;Hy(^2xws1sgy7h?Df;cx} zc^;C>lhS{A(h*|}GD_`Uz3uQa8efh>#(aI!^`)}sp~v?XnB5+?_pGsg`VdV!eEF>R z%cJEEZ#0we)@;~R^Egd z`s08Rgcw~iyTbd&xB@YX5#z|0fI(%9uMhW&(RmmRO~iQeOD?lBpZ6`ZyAZM!A*;WX zly&S$ulknR2N8JQ2^{|bxMUiZVqyQKSc2eV5Ih3GqY-?3|KPVEcrO;^yDxa5N4@F0 z(8Cb21_?KP!KL(go$t>3hK*)yyvY5!t^TwfmOYt&dv?In+xfEH+vKE@zu{hZ` zW7Z&2*r$Lr;R~)ClOlZU#<>Vth>+SZobid@n7B}_?^yGkaVbQH1_6;7J%k!T~hOOVm-#5TFKHCT1==^bX8>fMe1q=Pq*!z?-zo-4cUg`qd z>aT2%IbnPKz?MGagdOwdz_5?IlwqD9*d!O&8b7d+e|N$@-w&+PM$=r{sLc=El&77r znzsgq{fi49q6dIQ7%WSTAA0XU!{2}O4402j<=^;?K0Xw_5$@`L@aH%DgXfPjwA~Nw z5?S^tW!YOR%id-e**oEf##b&f61j6=*vnn`IMWa8_b#y2eqhay^Y?2Wch>!BKR8oe z+NsSC?1hiJwEx=!!$!zFjgonKqs-G=U3ePphsH#?KGR(4WA=j+?oyxaeqc|^^gCVX zAMt||^aOuD{0UyKOP}DlD1+K}2FAVfY5v~&v@^eHesHQETy#z~gTIO|c)&jl`J%=UwO$5Z^h z@AiXJD&v@4aKRJEY98bU7iFd*WUNn>wfB*ImrwonTJ+KVD>;jwZ2iDcD+vo>&sSDk@ z0bpJ0*(ujkkw>L&%C{>X!bQ9npdSL?JvVb+jQ*VH4K8p~;kVK+dUo@EtbNbDEcnco z#sgov(|8?zyS~51<9*tfJP))XJKy8w-|Zuh06D4U?;05fe{1At3*U3r8UH={PhMtM zUb*rl^Pf&0hwT}-J;u4f8U}#He@c6-*(2MbdtCb5Ek63yC+_mPL&j@x!BhC>);~UR z=jEhM;wV>uT%HqqyyXC&U$dP*zhb*nACqRzug*Q*{72Ys7amM&?w<#)WlA|s29!fg z70pilTO*fYYqNJ5TQ{-T03)kEG;hVNNWnqC28ETi?evOZJk>Bw;-1RJPb43qf-kQAw%e~zP zPMjZ{#%JASPQk^xl={K_=XU=7rR}`E-rnxS-R=kXl4qUrd2#^QKDlf=q)G(5l@6CU?%>Vq9 zYu>+P`T3Vi`|leV=Xw|VaeiP&KF8mWdd^9|&=1a57y1wRfj#*z&eu~e<=^cGC;d4m z{hk3}@t@KLvHN)&81_5cpw*)d@JB_w%B{ba%6Qc-c>4K1>x4_W z9vNgFZE_wbUGktl@`x_wdDOk&T_18_U>^MZJb(Y2{5|Xir!I>1gM0T2&N@~2fwjEo zQhz_Ne|V9jLmZ7rCg8&ugx#+D zZ%+5Nf4j*40A)ekBN?3kw|A)g+L_UZopcR{dD%^eecRA0UUu?q^WlN{@oJqDc84F> zr(WiD-TyMr|A_p3@@1!e-|mOT)h=a=`fy;_t~z;MQs=C@$q&xO|K?>J<iR;GFP*6aMkQIH^0lac26#(eL2zNA7T$ z>-)i}lyTO&;Oy{&Lx^uVM_gT-`+C=(G6LNCN?V3AANn!x2R$Eq_XDJVWQVgYO>Ki) z3%Tlc|9x>yo3jk}wDB@L(&k+TSGpr_^H2AP3*D14-EJRr{qfLr%$p3v-tnrpe0KW5+2=w(@{@sK|K-y5GY5dhe~SLv z{0Y}z4}9X(UuGZkm98XS_QzgxS^xD@97ma8NYs}Br0yHwRvBSw+mHa#0I3U7YaiHkUxBR- zNq?n^M8bQ+aurz!sR9!IT?1(=q}DZ{FCZaI9sF*`XVw5>N(A~-1IWT8$YWUm(XWNE z7E%|a_8XxP(LfiH0n$Q9HIVeBDzXi}8z4nPzU}Z$#seHA1EhtJY9MWc)Bvd+5}5$+ zAsHYogj54*8>9wE?T|7HBR{geQ*?N>q~;O`UhF!zR}gTEJ! zggUiD`8PoOz~2p+0InUsgL%>$pgkg>K7WO>!FSz5P#^f-3VAia->s0^Aw^yd^#|M< zgn<;<3*#u{5nT&)I0iBbsSDCVcwP$6*FahTsU2Vql8VfPzk8uvTj7257Y+>|;{c`s-giMVr9inVARkEhZGf~5Qak=$0cE7XXNp&WgUn6`S%uVk0?Ict zfVAHYzad3l2C%<@EG~fW&!B9;FB3f53VBzQz;`*X<3e~=htJ@BD|~N*wln+y@Q_*| zb)A9)c|~>r{h#1@H?#%(T?dJL3-$RK;2>3;20Zv}g4A^ee#3WN50rBjlx21RiH5Qe zQvj)fd@v!e@L6jm)C&@RcR`A*g7$#EXMk^06_yidK^Y_GLLKKpTW15$a)4);ZVO=i z?1Fsi0B#$=wPHOWwL@xv-zLaoYYCKLIiw1}T>)hVq`nhXC?NNKZlf5RwEb zqAP$TLRt*zPmtb$^b@4frvgYiq#8(dkUoI)Go;IY3?Q>0t%vjqq?3?F{uDrFLs|=I zJEYGbUC<5rLAndl8<4((bkom}KcrV6bwC<%I)F@uR0K(Y)CB1lNMp_fkPJv`A-xFc z1f=ku0FnjiK1lCF(wv31gR}wCzaX_jIs<9!F9DJbb$o?f>a6V?~wLD`X16y zq9!*$nh$9mq~{=g1nFWGJcINAr0*cb1*l0eqz53q4(S-ApCOG?t4SuLhaeq;6roX* zB1j^nS0S}Q3JFw`>mbd6^f084A@xCu3{n$}Jy#9sX-KV*RKaR;Go%to+aY}cDNL&- z(;!tqdLGhgNLLS0lRQYzLHZfem=HC|g0vda>yXYsx@oAIR6_bEq%R;{p;MFFAw3Q0 zCrG1)smUTpe}(i3q$`K3Nd~05AiW2PgsMq2q~(yFfYb(QBv_r(A>9w@ZAg9i`vNt& z7g7VHYcGU6A?=2A?L}&0g|rXSrD1AP1?dY&(IeDkBcu-?U3syZltMZHDeMw8DS-46 zBoYo~f%Fih4(7a@HM>DmaWFQl!I zf-Z-0L3$rj^cB$7kUoJFc_r`~(mx=53F+!c;0dI^LHZEVFOWuGr6#i>ZHCkeDeP*H z4M_JwItZy3()FW24j`?D^c1AskWNFI90m1;^cbXlkbZ-7^Jpjwq!%F_g!Db6*lW~e z38c-Cc0w9{E#N|`fb2uay=PK#z8On1G$kzlksE%i6OD%CUP^mg~X9~aw|z76UihpnM{GXbt0Kc zlE^e-B*`R&q>?l;oy;IJNjjNDW)l;cL*|kUG7oxOCdne%B!}EaZYK*!F3BVLWFaXa zi^yV9NQ%f3vXm5)Wu$~GC#B>LF!#zy1zABVNfj{@3t354k!rG<{E^&A){wQ(f7g>5 zvVq)1tYjmpC3lleWHS-S79x^+h>hG!?ju{t{p3&N&*TB}Ao&aVD|v`KO#VjxP97nT zlE=v7WE**cJV~A+Pm^cJKgd7HcJeIw7ifj&$qVE~@)D^dFOz?hSI7?XDtV2(PU^`U zm$S34DIYB-ppOOENcJev-f_zC%lCQ|ujz|Dz!?Z3RDHDf>m185LJk3s7j|ArW&paRq0h1 zs4i4pqzY4wP+hFLL=~>ORCSqZq$)ynx#|klm8wY9RjR91Nwa4q&7C(sHu?{RrKM!b zl;|l@`56_<%CZWUnGI3V(T2hzbHTFGDnn9v* zD%XF;<0lxLITS8qm04L(xvXGOY0+S+V=k&T571bNnF@>}pIPN)bBmxLl?7$ysiozM zmoF<@IupM9=H`lMGR+(m&fKDc!lKHoO61lc5zeP*4y<#79&{}%W#(mf6y=vMDk)k# z0J~ES=LRp6cS!tz4e5k(G-bx(@`|FueB_kj+?ppbF?0Nc7@u^`n+WI+Kk!M@67#QHzH><7++n6qdFW^=*f;=vI1kJE1^ z*dfj}lm2k}&7`tuNfE*oom)NwpvhU4tt=>A28wZ6*>eAVXHd}Ygzo@hSyoY6q!bFq zyFp|yC@lAk%2yUuR#_HRf$T4Waj61r7oVcD%M$KdTm zIla(Vl~*o@uf@QXbINv*c&=GGO;Y~@HnT(x-k5`Fynb-=5%-)4Sjws_6&2-`D8l6f zSqlCDS(}d^++2A4dCY~!pW|FOeE`J4PldfHtb`^#H_UUJB#%Ge`R@4hnu?D99dj~Y z1kN4X?<`CKJm)ExjCii#{m+Y>GUa39GT-rXkoNlcfsE<#gI43?&rS1q4Y6`DxP(hN%FD1GxL^oJ3P%He0crst6A& znj26?BFiSJxV*Ax5ZU01x$_1Mm~W~qU%3pV*e@`aY%pZeWB@yuj~7lJde#YA-BUcc z*vJI>#WbCN`}X`UeCSy$!g?n^uV>SFR-3z~p7hGMR{mFssC zW@Y4PF4E#lYB-+|ewu8F`)d*gyQ~UF>3Nj}i@m0)If>H|#8~DJBDu1t$n!-uh&u8i z1!viVuXm+fXFxMn7cI6RN978q6?*28CPU7$+_9*ra`uwUWtC=2K`Ds%Vwh8vS62DW zS)njHBNL|4UL#0O;*`X!Sm(=rga`;I8qZIDQxbiMu^Wj@Hmw|3lTJ0JWySJ0#?qoY zAj26&N`F8?O8t{yHfipUX%47a<%LB9LOKf5e+y(37oeqrw!|E?Ien{8rvDIw6Y?d+ zY0B%tG+bI$;!4N`#dD_8`}`s1&jMBA(O1)|WGr{viCgWm(EBQE{wyp7nsb9tlrzhh zPUw#)-Skx%;uVqjXD+_ey%b-jLIvt8hzjb<(@E6P~*$9};XOJ}<-C-pl( z@x^kmc})eCFi|ZkO)jq-1Pj~|U>a0>Zb)fmE6bM;W>B14v~*b&*k~EW7IR_wsxl)t zJ^d*}nP1S!i_FvbQn6=899WxrsE0w6)J@-?E4-g>q`;z752Ern%M6G%{0?02fzW|9 znA#PYUEa-HcE>Vvl^W@sQs%qKNvfo7qdX=*8oIiE7xdBdC#mhY~^5>Vq096XZWg(l1`QchpX;DF$rGjl#pLai# zXJ|=~9kfDGWj}LP9N4E$i)PLY3JzN5{o4Vlrm6W^x5?m3mQ~)7zgXtexhza_YbW1h zHZt)1npwOk46dm8Wngg{9K(b!1P(u~sLEVfzS;mnxw>reV0dX%csZ?{tHnXBFQg5O z)!!;p9ANxvMF;mriic z!HfRjdXk5Rs#;Z0aX#(g;^jC1u|aDNU)0RS*m;Vo7sHUpjKg!Tt4B8EQK28JaE?Qb&MI2vYIG0U&XbqBV+L{+ zRhm&TI?$r48ni(;BikSD0A?Xac3(^LXDG{yYB2Fryxs<3*YG|xw+L1G`A^V&;`^QR zx}*NCe9vWe?u~SgB|S&3p4VJn#y!8eJX?P}$C-;S9KZ9Hxt6lTvO)%Z&blK7cSbyv z1@=2jl|k;7o-=5rKf31nznz!T*5|XJ@PC-D$z|o#E0_P-GXL+Dlr0wzdRFDJ=A55V zv=moQ^Ouzs7FFk$FFEINaYmwS{ba!W5IO?i)*7^rfnVH#{0(H};&MxAA)+KMEm&4I z=w2+3hXbPz(4iH40G^Fw$6)I`Fx~+5bwM1&0yOkiT!0?<5QYL;UNJaGw)To6q~m=E2wvUD|+lS@E<%a070Z=~cqd)9y}Eu5fVlL=wvrA*&v zYj{az6*7n=$HMZmB44B&J#0y3`5pOXmOBatvxu33QnIwj%-w187bC{GyZTNwGAD~= zRr!i4G=rAK3B9F4UdZ*~n8AxXI4`+u2k0O?+#JAz_K9N{Wl+JDMft@AWrZ*V`7KhS zqe;rj&C8aoMkX1~#l1A!1s5CjoN+6eMdjORkpcLB%^Rvn@ndncxOW#xF(rb&nLB&F zDL*keIc-+jyc`45G3P{O2E$;gcA)N1#Z4#QAZTi_H~BIcZw3A%A3dV`K?JK{N>o|~ zE1JQBh8wI*(OkKDkWQSu)DHMTNSr%H{u1a@gUE%A;0~n0N;l8TQg#kqO$Hup*YQ5c zkt4eivNz`>+0d`N%E)avM$1RjW+eI$3GQs>EQ9ZUkmdkjO>iy+&7bAFq;3?MQt(-1 zTwms8GI8x}O$@1aA>u02J%{$RM??i5){(t}e-}lczoUsSDh4NT< z%0Ct}F z07eR)CoZV0ELfdiR-)hR`}O4WInV0d@#k~D@2R*y(H|?^`8b#mHXbI@%p^E-zZAN0zK%y`K7 zcM$Zbi|>Bq1t<3(zii+}s|KFc(^fQM4UU~aps`t5%`{$k0Iq8xA1NHg6Ir+9*ez#sZ zKPmLvkN&6W4Qz)Do_7Bx$~iQQPwQon_Q$zq<4Ko`5tlFJ%+3F-iajSg+ohghwQqM{OSMn?gh75fqxcl`h2^1wiC%F6Fk4pSa4x9`Rq2ESK3-jHUR>LvC5mwn_O;kI7nB70Z~kO_HDVNeT690Fs_ z_*nP%P4~Q)zhYv&m%n1!@>f6oWw7m*=Gi!DZcULk0Ke1RnA%UD9=QJhcQ0RhPUB-; z=I-ZL-~Zc}p<}(5p<}Um2j;}@;=})y+YJAI#E1X>eDFNEH((xajQ!u?>wk5-^1KAh zgAa*igAJ?lCk8Rk!qXPF1bwe;Drc(@`t>NAW6CVrl7s7+&yYs|O8_F5-oQt&F$qrt@WjMD%`1EiVP_ z;(8s&dGv>Kil*y5tzL!`zL|IYr;@rwo3>)Axfns7Uku$*PZrafNfl`cook`PXCf z)4Y)eloew&KYe?yy45pS#oY4pemC6R%I6cYe;!~cxAV*^Q#o0Dj&*b4q3==24R&5R z2yOY&)#t`{CmseG9Du$8Hk-RNq?DE~!V2{ld&fU6%g_IK=o!;-P<$IO;HZ1sxY?Zp zv`oJ^gN2ii&kfZ0?luh|W8B12_uK~4Cz1=6RSpKr&BtYcKuVr{wV#k}9_{8gK9iRo z?x)T&=LT0cue$re_F1oUl!Hpp2-W=r=<(`r9!P_0_AF7Xb_JKXP z8Vss@oQmgYIk^5iSS&Z0AK*NySNkI<3`B-0H&5yJ?2ZS0^XiJC{AC!%%fZh7@8AE! z{@r`0n#|jvCbvS0f)oPjbd8#P0%;GV?U3$)R1Iknq-02OkgkR_9Mb9aYVsMRosgb} zbQh!wNDCoNhjc5XF_6L`1wi_Kotm^k+70Q&|6hA=16Nb_w-4`gDnf{2LI@!y?DzMV z5K18mF_}t5jo#FW5GoA~%?KgHgb*4+Xhz6;LqpyhLTG4AM#zY1%=6v*6bvZa9h4F%zd}Kv&Kyb?C@2)KI1{b^9Ae-;p(H^`hms4W0Lob?x1sz7 zML)v?v4(;}afcELB?U?@luJ;4gVKGbu{|$%{S3+sD9KPVp?n9W1j-F4&!G6knjnLq z*h6UrrLO*1msuuA9(*o{HGVFE*VFay+rxb~SMMFl4XFDHiWlrZ1xh@WO;CdK(bF6Ld`La0NdOc0Akb=gp73w6~|@9H7}!${=2&;J+i{}=E7RlKL@ z6<=@l%UKO;puXDgUT72Z#-|B20BHC`WvQ1M1}pM=MbphqE9mO9bt&| zRTnJahi`O)&&|LHu|<2`P^8q&Rig07~dPexQlq0`3?>b{A(*CjCCKhGS*nH z>Hx&e_!_ug^F zzhJdg-5wER@Qs+_t9>8hz3ppYyYG;OuU~i=q8c8x`tGsu$m;j^ssBqI?{-=F!5-=Z z8|$|lpOc5Af3S~n*xpt(YH#iP`;H0Ho|IO9;aYnmJjV_8X!vv5AfSAG2gk+^jq(o* za?u_w5Jez}g!o3rL`@h9IO{ea+i%pJ>mSM3C+u)YlItqC?b^ zXyf$u>L4bNu%NK$AQ(kQI1;exA2T)B@a}M6TXl~Bwe1eD#~3f?S>gU+ApwKcp|^z^ zBRU%&()sUded}jA5eliVtbdF`6sbOiGYpRF*>IN)I`S>_{)y5kubg zoPhW?OzcJatKDbNeoIF?HXIDKAtnm$zy_HDnq9sAFb#tU(gga#H7GU`L0YQsqNhh0 zkk62Jy_=2942TYgoA!-{SoZbx4~T*wiI}WKGt$&}ogoqR7w$V_%FM{9kZ{8k1hM*H zTXg+4wW*oNlqsPx(ZL8}j%b7s9yBu;8Ho5s2L=012?YX1$5R0SzTy5+ zYHayOO`U7RK2<;012txm5kBIP~*bsy9q16Z?BlXL3K~W7`8u$sZ zN9rHX@ZLdP;aZ~9uMnwq$Vc!=dxNmX^QfJMaPQvx1|j~DA^pOl`^^jq?*~WrtN*G} zzfi0n*3UR_@9hyBW-vfrE@eWZYn%_1)>CYn~a!9x# zYL=(FhO_9uY=hxyv@x>rwxQ}+8taFej*bb6nx#EY*jQ7C7^&vn-xEQQ&ieN$A3>6l z!J!e+^?{G{(>cdPhN^Su4K)GkG42p;LsW`Hkd9`aLB_vngdn%g)J3DA5mCk!B;&;4 zkXWQcqw~~TM5N0H=XQm&2g3a{fMsy~lL|bI(;?7KVf{U5ryCMG9zhnEd4>iBMMA#u z44E44A8I^h{iD#3FU>q>h8O~Zy&@X!?nOf{)PD|CufD-PheW^`XCY=L!#=3XGV=KC#b{~dyGHNE;9 zx$Tb+#z=dI%ti(Ws|n}*dRv`yP-xKK?600_M1(r8VjMN-sa$>i-=`TE7#O7v0Axbr z)1&=eLZ$`-v#C?N#%D7k$exD1Vb6h6)aUUUW{vQVo`xV3-u2jDHhg2+@CQQv1_MZe znflDLciRz!&<#u(QfTi*-s*KgKo0a1S!+9Q|- zX^G)lYZ0H#j<{zm}yducsdL+E9($IeRyo5 zbc`Az@U5a5swGk2aP`Xm;pXLLUJbdvfgX{?Ca%$MkvSCZ7ILqL@mbBt0@$XF(^Pdn z+F0jdWPFdHD2-6U1A^-3pplLrVjZaV7q!-N7CfNdD5IbGNA28FppuStw zQHe&}hKw0G#GSy6S?zz@KbUN$ z8h`i{q_sT)S_*)6Bamn)Q{WtS+TKyHxBAl`YC_;05GyhR=?!f)oZA^nw03L=?E7x7 zziV?>eXHS69|K#W;8^wjET|s?HR0Ov>NVNH6^9^`wKGS-cJ<$6a1HOyI}S<|wEXUT z7~E}t1c&?Xj}UOz#$Mt8G{-)ZM zM`^f@hVA};!C^DgT+_Ef1OZ>B#usXiTA-GwJ?bPo%Pz8?94H6Np>m{bkW=L}IbF_> zGvzEfTh5Vl=b*&L2*=2g;ZEYRGbuN#YJ&fJQOd*Tk%o+lt3j|2~{E$gA%L6De+2@lB}dC zsY;rXu4E{gN|utXda+Mu?m9o(e@>WHE!iL$7OI-$;}3+j$~pkAmq>Vx{BfoL!qibkRaG!~6R zA~Qv&HN%d&~iI#88aHSWLv6FlWpKbH_X|FU%YB!Thj5EEo&L zA~6FNi^XB_SQ3_urC_O88kUY_V3}AJmW}0LxmX^Sj}>5r*h#DiE5=H&D_AL3hLvL# zSS414Rbw?+EmnsixG8RqTi}+sHEx63;&!+_?tnYuC{E%mF5)h@JMMvd;oi6p?uQ5B z!FVVhi5u`(JPwb?lkj9b1y9A(@N_%_&&0FvY&-|g#q;odyZ|r6PvS**F=q!MXFI*~zS5?MqxkwfGXc|<-@Kok-ui6Wwy zC?T#8r9>G~PE-(;L={m@)DX2q9f6Rhq&aCpT9VeJ4QWf-k@lnm=}4j^NwTC!I+4z# z3+YaJkY1!W8At|`p=2a!AY;imGM-E#lgSh^l}sbk$qX`+%p$YN95R>8BlF1uvXDGU z7Lmnd33-JqCCkWivVyE6tH^4yhO8y)NQ5$_%qa`XlCq|3C|k;ovZovwR0@?!rBUfr29-%=QQ1@ul}qJO z`BVW_NS&mLsA8&wxsW*{<;*z? z&XTj{Y&cuaj^iP%e@)aIst*7tbYe z$y^GT%B6AXTn3lPWpUYD4wuX2ars;USIC{@inwB~guB9(a+yMwkS*j0xk8?hFBAxc z!bzb>C>BbDD?+JICX@@6VwG4e)`+!Yorp-LlDT9dSxVNDjZ`jGNR?8RR4vs=wNjmg z$fmNnY$039*0POkE8EHTvV-g>qcSP8vM3w5(p~nDy<~6M=P#@oE62(4a*~`Zr~H3n z(EtC~^8?;Q)p>;f1aJOlxbk1I)BhX(`QOPqf6pp^ALZ#x29wETG1*KGlgs2W`Ah** z$ed(~m|~`cxx$n(WlT9!!BjF;Of^%()G~Dp!v1e#fd6FVqXH?gf+#o%&Vq~JE_euD zTI8z{?=E_X-lCsaB&soPC)rDmlC$I`c}u}ks1zy1N~uz&lqKa!`BH&YC=~$*sL^kX zduxbwHOd>}I}l=8jplTSU^RA)QCk68M~zcsgrX3YYCIYvF%F_njXPt+6+x`2QKpGN zq)y+UyR0Fq)Oa#RlAjhmX?~)M2!$*Y*=dA!CBJ^szy-NK?*1WRSPtzfHKJZ zYR)%idk5SJcZTRu<3^1aV;;|j+^t59F&4X$Nw1=#x<~udhg+g{y^IGGKc2dh?wUjw+L0i(+ zv<+=Xv$PBCPJ2K;i=^Y}Bszu8qVwo{x`-~O%jj~tny#U1X@s$1Y#EYqW?UE_#*c|) z3`{H&Lor$vVv6%FxV3NfIf--vq;4eLo7wpC24XjXBoBG`ys3&>b1Vx0h`Dn3mcyr#4M|qNW=3RJi-iHt6Bl&nfiBI9P_&h$JFXD^&68;Kb z#@Fz*yrp0#*b5FCc2(5+2nHcZNEXtC4B=fK%h!-xCDaLsXeyeE7NVtSBch^{=q$R3 zMubO-@nVvgCZ>zoVvblK7K$a}6|q9Btj}`hfPGXV0sAhJJK#T5ij(32`)N`-;6ER5 zUo4er`L7DFuVy=Ao?`*~YF0Dmvn0r2IrZ2tf~-}k>3n03vW2|lT#tP}$VBm4<}v1+ zJjgR;_1Ldf!1`$39f7RkP>+0P$RmMT-Y_cdbjTQLt}tea3djsi5cu8u-zbHF}IleDvje=jekPO)(w>}1nAwN`U`M{V7?124M-ZwISLkt+1Js;Ry<#8i}n*wXA zip|K_9uNg8KO31j1EN6X;Zm-QD~CuxcuU@zx91&rM~DP>hyp)|0s|ilk&w=3^ErG0 zU&xScrlYAyr6&NXXL^oKd?~KpdzsU^bp4QwUi>SlfRRD!-xf| zxi}I*uBh{#s4FneYcveIh5d$^fj5rAm*8vhAMmC`7Xl+T69)-vvJ?3^xtaV1BK$eo zgyJYSDjM!+Ep?o_PW?&=^c2WZYv?WXeaK3t%phh06AD>qKXZz?z&vGIv0XrU4QHpa zGuZ@oGy5HTfz^X1>ch>3SlY-vD=HEw4l? zsh6}sS|vS~nt;j7vOI`==?(ErlQW&q8irhfL%ub!ahMKgECJ-m!e;z z`_LokRbbUxR1e5X#`a)`u=Ch2mTelewrqT0wuf;&~OE~ zuN~QkoIp+?XOla~@5uAyGxAT;2E45ZV;n{;q*hTosPoiK@S&!V!$sPao=iv33xKa~ z(NAfaabtX$5M~XtjX4DUQpa><`>_LAH#U*o!tP_wuoqc#?qkTe?%X(T8RXdm+*$5A zSIzy&wd4EnG(QSBC5q4F5A$dFa=w-DG3dXMLY#0xxFtLl`U5NY02}NS4~u0G|A-_= zBcvJ7XKSRZkl}EM=~?nxK)`+3QrW8PhZsdt5yVHG7Y5~(0~x11(Ss0(g~SSCC$WTS z#AUI8L-BF>)X|n)D~9lL_Q9atnD8;-@7<$qwo& zWk&mf-YKTLFbosM%x0D_x0v4;N0wvP0WY@z?DPeeJp{_)N4}}h4|Ky3;igb2Sc!vy z7dMM9fd3`}<86`-N*AQt(l3&UtjH7Pxsc75$(!Xn@@u&>Fc_vBQJyPEE`k)nwe~|- zphwZur~@_(^T)P%wNh|<$dyHxe2)Be#%hA z6MXO~1t~<-|A+%R4E-G40q*lYYK0BPqOo$UBkelXL^F+CiNF=T%Gfr3V8a#nM~pLvTmWX&uvwQ9xUM%iL#p@JYY07Tg+c zD`-Jez8C+LHxVWX3xpp96LEsLK>Pu&-&W#5eFaF%AlF=l*tM74>J7R8eTmv)5}f}SFyk7$Gq`6zVlQ!?xK7Tc#zAz? zgr3<){|sjw!yJH^u3$cbGY{hyaUb#X;e1*AalVf*6#67js1|I+A)>1|Q%n%Q60615 zpiGB?CN-BY$<073BGtxNyot`h=0MJBNwfkr)fAM{WlE->&fX+{kdF^2$CVd~aWx2jq8aFHJcPJRVw4l* z1MPJIr z7~McOVIhvPdY9=m3-tK@-5KL;0gBCvi{=&rQ$_Ks_-`Pi5O7UdLJ#q}_!@FZYtSql zX!Q)>s0*OX5iMcl!HWfof+~pw9i9t1q#Ber3VJ&dw017|%W6;6gDa{9m@{M#3qW(AFRze;D_}nL-lW)i^l=}+e zq(kE1njKLBG*ke(2)zJaxevHwe{3bz2HbA|TDv(DujuYfe^AU;kb%ngJ3MSHA>1W9u&i|F#UmiMD z(jif(0b7lo0cOF#VMw5vDq(y&kQ0FyuaoUT=Vwvl>6LUhW;yec=?HmlGJ6;@TrZB| zO1LN7X}*&%Q@AH|0*rK((_{~2ow7qY2zpVch9J@%4MV@f%7|$43$4XlkR5&jSN)1? zO?9HWQN2Nh45CKDc@|MCp_h+RKT>a~E;LO$(G$Qod`>R`{ggw0OP`>xLi{vkESR=T z2k;6kGng681Tiz2#Y`4+g!zGa$TVd;gEMB>aqOpT1p5`co;}E3Wgi2kKjM0D60pf< zTogBtTf%LD@%o;-#r@2g@Et(wf66C9q~8Tz=`9QvoW#rG1M!*I3wULU6eaxxxPK)D z$Y*4GK;2R0R|SdGX$#6$pid^D+tDM~Icy}L{S1B?e}OL`SCIp$iPS85Asx+ZWY2>e zk+?~qkxIDdoSwJgLqG$Z=jucNCcFuykI!BHfcJ!FRNfpUZ#9Cg3JWWgqmVu9=_|^d?hUS@ine8@fnQ$Elri>P1xN6%{7QZkzmI>; z$ASy36x_viqNntk6a##e0@`~c+)<(Qy);|)R*-Td2X#RGP!s6gFzjn=KlTc91NXHZ z{}z9O+W|i&5nmEJfE#k?$>0d)G3%M5(8Jfko&3uD$yh<|m&_g*)FSbp_>?3+mEL=?y)JRO^r|I5&xoLl>i+fvHDeF~HQjvF|ZY zJP=goLmVR(le@{&<7tDFhx}ip7|aQqi!H)7K~Cv`9|2!_1FrTJ zZbFPD7m~lzof%hV6tGh$bDVv|MsZ(qmORRP@cH~l!Z6_ks4H`^hd5W?~y2ufYx55jP>m zb!02@EAXuZH4NNp8mNtO>N)ip-HKTTe)31gfgJ~VS9h{Hu?r_1wBWCB0quc!vhEe{CGcd zC^??o1LIpsuA|O@=W?X;7$m!+vVf>pB3=vEufF?f*3iAok6x2j4sI4;KUKaf2bE+lXlkQIo zkgXShV#@;MbA&FV2LM|9m_%k3vj;q7XLbr34Sbaj*Imkf1lbnnT(}wB=Uf&ykoV{3 z@p<4i%!L3UMc60&Ae4dM_(NzWb`TxJ0pL4^h-1J-hKSK(ytr6gEp8O=i;u+T;vb@k z)Jp0kb(4Ba{UjMQ-DqjNmf>Qh1ZknPq<%#qTPg!y?;!V-J!M~c2K3BvV0TH0R+cMY zE8CPk&_nl>hmZ%KE3Y9J>XB?17c;as8iam}mY_G#a?mjZ#zB+}#l~Zkfmfbl&iH6N z2uF!e$#`(yZGgjGP@$k%7BYvK=imwt0k&?l&A=7*;ywn4>H~Ku2xEkmpk96!o(OH> z{?3XI#crUEhfAxa4bo;QN7^TSC!Lh81ETLsk049y<(6`9@B$;jCw?Z!$Q81ptXC>j zoX6{tN_goFY`X+v$O_*CWA+E$mT(|s;yE#Z97K*F$B};IG;$7b?MiK~WFL8iJV#z4 z+fw$_U}`j|u5i#QDWJRxs9V$%sw-#}7ut*djE<(~f?8P*ZXy@P^fhe?8q66qiZ}4{ z3?`mg2&3DcbzogUn-qY1JkM4_HhlxU*%Fv@Ie7Cy{58Irpa?PGY%U7F3WDe)CW>98 zcyL_Ka4*^NdC=Z9;Cd0IiDIF&)qLY1#Z?)l_yccUR?5};Oa-3MshrgjbwQ_~*DyWa z8gws(kHkH3Ke+21_$^?o7q~iS{3Y=H+l|^(W;-52rohu1%O0%uvPy7c@@sSqf9CPYF$iW3qcCw(QP32TK+VJr0UULjvN0{Q8@FiZ@9EO}Wp2W%~nN+mOy zmm}oez>&=q0(y3{Ixm%}*XBa3P=9nc`Uffj{}*6QZ~;8|=g_N1@#oN^JP}B&A_|E| zgf%$wC~_AluhyV)1E?>lV^jzFQ@}_zeVwkMdoUxQhtiny%r8tkb{HGTCV)PvU|WGw zoyFyI_rS@y0aNYaZ}M#gXCWMP{AEEe@(|a1#VV1OJV7OHlP*fXOFiXKIa59?Ka{&E zZYrzi>a{sE7w~>Lfb|ZT8y1YM#aiO7cs%IC8@MS!5R<_7YzO{qN-CfcHv-OU$#xV@ z1yWy8$Eh0dJQz5OX`ug)(of-f$1?LFo7`lYvm~g(RQ446gw=5@=gb9ip)ithTry~% zJnjl;DulNHU9HYD1!}H6moMPU_)3UrYZxIX7?mVoFLjPQ3q~SOC{%yD0sM8XU@F>+ zsOSL<7AGc)*2khY2zuiR#7v!RuB~v2px^_QP$d>rSf-Mr6f33bx=x`UVYPJ~3)B%*suSp; zV8}CR;I*^SBH*+#(0Znr1!j%e0js%y3N>i+*eO^#mX8&JyR5=$F&l7gj^Hr8>R0?j zp|_Lq3_MF)u_@A4Ez7`n)`F)quI<|pjs!`#Xsb5rN`5F2udUmttCv|sJ|L+`Td6Mt zcUlWMQ(c+2CLO@1ilh%2OeT|QWFA>amO}Hzbcsth|C3dTR@+rOX*6wj<#lO83%A#E{qow$7I5&=P_46 zPn)t9tRpM3!E7X(#%614Eyfk}5lz9>O_8MRuTWx&e_ZDC@FPSTm9{g zQmP8rpbi{|1C7!m?E?-mnNFkg05L_{8eSc^5PQIj2V}cICW%R5(t-C2nPSEqINt`c z9to)NV*S`;@Lwus$|3JrLauk`yug=-a z0p8P+bz%){CR@!qalu?Fpud(&=1qb3;^8V|;X1rQWfe)5>I$9qI~69{@5ZudJeYt;SB6@g|Gzfh$OQ>m0D6RRIJuAOB?7MXxkDrivi-jg0W^j zV6^fes;wcGQ@BE|inHTgU^EK(8t{0|Laa~#eeD3foDBV2E4lzD=Rtp3$RzY*Cd7}E z5(m*z271~Au~03HI)NE!e9z*@f9YRp5BQ;nJeSxbu}QPO3tbkzY^iIi&rIwQ4OJ0( zoepb_wrtYOZ=s7W$i!5yGec0HCe1%?qBGScGJ2h9=19~X{isn*YqXh(4#_m#q>o2X z?7d@6?FPm7D}L%Sbi{$+`GMqx{0&~W^;G}tUEN2_>+{9LlO<)Di7gY*M6*B9M3aY^ zCVHJ-Z$%)8?%vb6%k`~XYv-#Mt-Gf-TYs^-7H~=Puoh^uCMIJ|n^@_`dSX^+8});^ zRrB%w(ZM0%Qw)yDdp#M!0b**8nb9meSRm{vV&q?62S8!7KhHz{?;u#Iz@AHLaQ?X#5|JKyiSYKUXU zqhY;IUpZ^TtXlo&sgEW;d+L5{Uv$dHZ)zXyTGGGp-0CAGW)D8`IaD_1>UF14Z3dKQ zSUvnRvoy`+jqT`PD@QvXAncZ|pR%HNaOXm^84F^rFEI9r-45| zjP<@b%Kg_Kb1y6_?~GYLy*srkbf>d2YP0R3LBH+2`X##d(9a_Z`mgT0Sr0(fZBEq9 zgC3cUc7?v|+|$$sZT}X{%}v@hF^BhNW=%~@(9Y@(txVgSTDNK0B5A{{*FxdF(6IF8 z{qBYz$ZdvtsGHlEx}w9fhMaJ#1EpYa*->ZR_`)pP{v zhPq}B%N(+BaInD;$qyJXbLPzcVGXD49}p2XAaYtrL}c`UfT+*`a6N!xz%am9MX#^= zqaVujL)reY9rbUxXPwS;Eb57lZupGq7Y_NCCxF|E{uie(pcd+dTj_Lvn(9&HyTFk-F?5eY`5#$xmxP$)jPkv^<^{b{vij}7o2;u37$3=7+duREYw2J8-!C|fTcj>(= zarfAtR(Dvwa93|jzu){%{l38vv${j>$Y*myv(h|g`rmv#{czQUyH(${c{R8$JMQye z+#&~W*kJqkC+=x=z?0dQb9;S8#pd%oPNUyq&bMjZU~ z$Tz<_hEBX{*gdZQt>pz&Y@eBRrz#F#)s4!O`uK<3ci($%c!%Dj^Fr8)r#Cb6HpW#~ zM^5NAJbQ+VGGRme<(;OR&+;fq*&7^W^0M%#HTz&vJb&-I4L|%*SX=sXz}Aiyb$d_E zkdl?NgI+CNR%$!tWMIbP1w9kfuH#Qv>N`kJzv@#6c4+`~?PoNQXX{{Fz}pFNqM z;qWb5^&qKybH<}z8yo**@55B z+w(PE@EQKq!A-?h;*ilj2Q2zz{Imsq;sQ2~t1$>+cOGtYYkl-Z#jxNmV{3mLwFmF= z$6cGS=pG{;&wkPSCnEKBm)7fs>pr1f4t*QCI~7fAIvdi!WMc|w?%yi)l9sPb-e>-V zt@7^;5! zA5T(=`VZ5AUQGvjNCyyeJF8zuS=#phrZjviXUM6Bb==G1E@BEp1WF9$(WWw}`u{v&IHp{-Wiqz(w=Y{Om(_Z*a}{Ir!5{ch-6C zZSHtt_mA5@-Zi_~?(3^2ob~Hy_Vbh(mH3!mZ3jHuY3^}hpYy>EF#Ow@yelF}e z@3XOe&Cu`4*}$CeKq_`?WB;Z+Kb~mW zb;LM_O|zrQ+wuopKP`Ryc<{0XKhD`ZXJMxw$#WX#c& zv_%Dl-3&c_Z5$31_jX`=rgOvT3vqjv@3QT&ZTple|E{0ib#Ptpn|!b5q|4phXWZ`*WJpUa_`vcggK9|J$8HO z+Gm@U!`5wcr^Y{6JUP~P?|`K1<1>7Y&UA2i`Z(-_!&1kkPRyudcRzRj;$$=TQ&+YO z9$;AcGQ2j{&fC#ylHba;@+czdMsD&)w>ONeS(RHbG&3~)Qu(#ylok!?@G+#rpHa&u z&FZsYJDn*+A<`)S{cSSp@b&;}y{Sc)=7=W}gG@#SBLmSE>XEHY)n*o=t>1FL8EOJA ze@Syu{dhf=I`~^_vypcnZ99Wq?C}MOPBf-D$VBL<%n_Mx3tj885M-+lvyhhKBBLS# zV*-qs2;vr^7J~I{Ch|oIUmVwx-KUL`-59VaBO4{VLH{h-y~}j}qTiN3EI^fp)9H2XiGP(_10w>W;m$(B{G(n`Ftz)- z7OUFa4OqTr(5ktYW?7^j3-aynDA%5dx)`?L%@L)t`B~xUuf9wmJ;s(=ERVa?(arX`IB3b3lH5P0-Its8@3ZcK)M?9={heo~arb@>{N7oLo!#TP z)!ePAh6P{Noa?0@+V}Wk%R^b)%`DbE4t^cne`RK0rSCLv*MP3gLc%Aeuc}y3Q@Hf` z(2sA|2_=WAC*eJJ{j|IH@&90yJ{&=%p^5`MnEIz$=$N76|de`1|19#!Oy#qH_Sfdf}ZL&ugEx-?+B--5<8B zF8Os*z`*;TG|gBfH<{VwVv{{FU9FG!`|qzVJKf3j$en>_S~>iBJ7_@F>fbi{tSUvW zZ5(=d!t>Qznz^}Hu8Hqjg7i7HXWbTM$jmO(=}Vh7eLZ`2_t!2fyYBpbSdaJ@8)}bE zJLtCh-Y+q+9jhKOYi8NF{dsL)kKmXGyIN@k#S|muY{@`uf=6 zu&~d*9}{iWa<<10SYkT^n%FK9>>rx2@_#yU{X+-X$XI43>{1D+9>~p1FpI_}6|Usn zrxsW%wDHz<=zw9U8b`oiYQ41R9Ddr7B7hlckHA#;kS zj6be?d$`}wZ4TpZ&;KN(AnH+#GN*dZ}5 zetgq=-yoMSE_a!DxYvMrM8CBAOD^3!lQzmJ_SsFvDa@hG-0D?P?KX@@I@pBdnnxG+ zS(d(~8c8;tF6Qa?a$hYOtJd!y&IF-3BI2^xF;d*n_Bv%D=2dSq|v>gZb-6|ui%oVgA3@!Nzsyavy$)5E^*WOe=Oz+$6PmPW7)(o@X=b9%1^U(*HaG#A>zhuR(A-4# zHc&y63px3(|c zdu>goPxAR=(|22#KYP4s>E5sBRF8h+_4MxVGwgr6PPUO=JzRLxE$Y7{3(tf57^qD9+NSe?%MYz9=RY!YERRo|JoMt@y)E_?cE9+s!}Inj7x=A1h7a5N&Bigi zOx8BJd%NXCx5=LPiXRKM9UmKb=1BiTJ5D_w_3fjFgHAM~*G31lzQ4Ma?a~enLbnt2 z$VZK2?VU&8lvO^-@!W&|&LyAnJGNnZ;r_p<)2jiz&tfd~`QSNmLpz-!f+v0+HWnRc zRHviR5vW_HYv!=Gb0q(sI)(9r(F4G$$`RG(NH9&APHd!1)k)Ujjg+bU?_sgW_1I(rm8|7N~diSy5{ZG%|x;Vebj{B1;|ID58!_oY> zE`_5ePJZe7Wb4!?e|BqrYRTG)1396Ud0FPQJJXB2r)GP_rME1v?Ux$y;CR-Kk4AQV zl2BkWdDwBw=efC)%TMb9$y$fKa~(`FC#3drTW}CcNJTo5kGiQy0ATU+QKMiY7J{1;OSO;$P2ds5=|NBB=`&+f^7HoI?gtop{tCd&>an^Tpswz)sA z`9XX*!1=pvHD!T^vvkMonyrZap{}6td*?}?w$IR+jp{z4fvFxqkKFsGmi%w2=|5u0 zul_df+|tTTK9hqlN@=N|TtC0o=H!2s^xj(LdSasc;lzK6^!`EHV8t^vE3V!qn3{y; zXu{IJxbXy3YUIYTe|R$-AMD{dp#Bd_{Tp7~<1b#^^WXL2O&=tWZh7x?dAgD? z-9%cyd1JSTeZOoiet{jiV0Lf&o7dyRI-vWGpPKUhXLnvatW3RiSX9p&H%z0XD4j}| zbk_nRN+aDJ(%rC2C`c>aAdQsdN-o{7bhC8F(!KER_xD`y^*n!`nR90D_}nqOGxx+v z1~LbHfId{?(5hWfx;&lj$?Y~mK!@oXMw@fqXE7EmB_W4Hfl~KVyE7$pU-qvzm+{oF z^huAU#GWr7ZAX?PqmJtY>%zII!j5bVL6;UC72opc!0=ohx1C29TU!Pq=hC_$pR+E@ z#P{VXs(YHQYlKrRzf3o~1D<)kZ-Rxff%j1&0t-IJqi3oBfF#65(S9g5S0x8@IDJYw z?7y=4M+`5Q_AgKs{Pzt-rf_8SF4Xu+-~MR!uaXbHiC2}S(v-cbi(R|D`RqDAjZZp( z!A}SPXzVlV41(sOm7B~`XOr>#wmn=xG@bDasjN;|xgByZf_5R5qfHG(mEo1K0PXYc z#sn4*GObmTV&NA4!Jn{|sE+*H;{!2fP7QHR=X^-Meytt*&2Kt^o5WU5;qwBPp zLc95%mi@DlXF=ArZMx6|-bp#Yo?SQWiA!y4e!%d` zK0_ql96L`ncas0%K*bO|bHda%4H?5o@jH}N@tQ};E8V%mFHM3`1NW{5WF^O4HIIV~ zg*OQtj!plFkH-?smDjPl*z#)`mO~xmGRh;;k$;GSYu4MG&36J)9TR>A*ZZAatu=d( z3`~9bG4F|V$t5DFC(LI zW`UX>mHM(9kH@{%yE_tOmB0m_(M3L+E6PsGUAH$E{HBJqr&QOX*&6MuK?CX#(QfAN zeJ<@n;HJkGpv0k|w=xRJwaUQ#$A~Ti^GN3RoytN}#pyba(6~LoWo;}=`fIQ5iMUwX zbb}}B+V!x=!u`}*hnpE1QHhok!}Z#ZZpbT$l4h!utMS!vW*N34y~b|A%W5jYJi1^e z3E4Na`7cSncA0CobgFWMLo(pYK25`00da;P#MNcdQYYaEU2Gtt=6Um4iA*%_<^D6S z>^fYWz!?VrY{1!kNYX8OBA|TxZzWt`iS+8O;bFlYq2|#y@IeRoWKyeX+mR5+kB5YZ2Rj-)G^D3}U?j0nk$=^A@vbSWe1bq0K$5!*! ztMU4TkLzxK%gb0cU?)a##Z);f-E6_`IR$svrVdEwlm*H%^74a2on?Q#S_?)E#S;BGiSc?86^&5| zsGOjJ=gGkEl2ICsUp&Cur?!rt<$_@WbNY*>l3^8%OF=kHtKVTdxG|3M9ec7}EM;>2 z2bbx2`RsCHODeC5%alet4*=X_>}V`&MEw*7IXR>?S-rVHoJl}$?w&*dK3(Af(S5|- zJjLwUqL$ig)oXivF}T(D8{hjn=cwE6)pb`V%7yV+YEu#w2(2v&bR<#~i#&87 zPS)pa9KUM);qzZ(k$1*_Br|e_QLldYC>%C;^c7qBJOeM{6IS8^op@fyIvcYG7`UIx zv3o>weB$FUP9Nd)*uEYiGfE#@-Wsx)O)PM}8%1_OeLY$PX5@Yd%k6XC#7{nLrWNYmQ!q0 zkFO}KHtwTn04K`=z$1ff+}dP$G=hmc8q1<7?L6oLQ3Fi~T&X9OyfV~JIwfB9NS_0j z`*iF3uYXK;WUguzpAB8zJwCoXvNDK@$rg3Reom>o)bXKRe+HQB6ZwI?gEfwg!zofK zI7pJIi`eg0!YMTPlBt;v!P?yV#j1CIHMGh~wZ8Dd7)%qa50a|claa6O!O3kD%JPVH zYH7b@Yv>JX{bAFGaL_4gZx1JUohyDU>VlUM7A0tCu{}Qg(TrmMo^+5jQ(d4n?pxs_ zI3||O05}#}iZvBmjLyRnAXzkoUo*?=vZ`Zft7B)p%*O9P{iu$mohSNB^A*;YwO};X zP`=V&%JCQlOkx=dt&rl+1k$KU!Nm$EGUReHB-6pns+D;!hVpO@q&1m!^Dx+@rFD8}<)KVz1_dr4l9crDHn@aye zQl3C0-dt&Hk%c(q;@xjN`;J>4(mXtNrr-BO1~O-x0>LDSZLCl*;>DgI57H zJEJ%lm>KvkHOwBEG7GJdk@`fw#f~JuLu_D!hLPR9Z+D+e3lNXvnp5~fR&4_3;H~G6 zGxw$HP)gVK;8gBO(HB9HTryjBTEQ2#6_=?m@xMuv%ORP_LqSeIl!x}mv^d`O2d$j+ zI)#=r%{TTvZF01x)7(u$Eu%+FcGFIN9>OUi9!L~zY1rP}>A4^pi)|nc|0_&_(hxD8 zxN)4Ww`kPrC`{^D$qYS@_>|JyR3mhvo0%dg1U!&lMj$T9tDuy(GPkTs-bwVO!D6LP zOeRlJVL%fvjD&%=+_HQ3)9A!}dQ=tq#`Kt9V z8H{i7Q>T(CM=mv(OzNm!W;xP?axz3o|Miv-B*|b-BZ&ugh)Azv<54C_V`|C7>Rd!% zTz+82CYFwD=4)avMVIWi>$3M{) zp#^BNp#JkrIGWKOoN6zpCs`? z@nr6O05hzv4pb&}%sw_D<{zOOZ1)IJbCgZ8`A{C#P@Gcqa#i#n!Lr$c_e{}`@til+ z^F-W}RHdy0Nsn(5FQAtXuWxnlk-`XJFAZCSO!V7PO4H~)tNwe7ll7c;f@dDgS{nM( zM7Fs+Z<1*`Wa7OX651;pTJLb!L(Q`ph{t}DOtVo=CDma_?&1*i(zAxKmrj}GMC1I( zQsVfb`e|EG7N30grMWs<-<>bkJ2Mkm0xj;}t8@9g8+-=b{T8G-s~xX)G+MDha~r^S z_;WAE=(C=fv(Y!Pw4{&aMJv%gcu!njcXhNjMPu8$;_7wDaWY{KNh%Lk%36zhy{!$U z|Jzkxm5(Z?gOp>NEYY-oq&>}92<6{de&j`>>q}{x>R5tiu>WLme*BN#_KCEys?&wU5~i0KZ15Crm4W0>8fmjNzvy{{YfduPs{9?NQ9xWhT!m=6p+aM*G58a7>UF^ z#vj9dcdC;R4*)))8CDD{ItHdXmVAs<@Ec22nMex?xeWm%ObLEcnFR|CPCnL(B&3G{ViWtviS54<|wtc1G0FIdTLUA{lZX$$u@R?G;y zvHKosP|Vd)NYt^%u*r#7!~SGGybXAf77&T~AK`hA!b4uVB(HK5L?~ zL|QV(l?IrHW6o7x75(y2ng2QUP)eSYgB?B=Eomzb|D|tTi0R%hv}Sgn>^@JSDCV_> z>6lrSq1&jkN_|HEOG_{C3iG47zqmhKnotX8zCATK_zrvi#M&|59QPpa{?k10#~Cmw z5_20dGTi?aDR1;0mc3&X{CLYVjES(3Wzl|&u$m#8dp zk~F@aM;SVkI{K{3ICm+E2Q51?VBHbRVCv1idQ}>7+VBsp56$4eD2l`lrW~n8D*-eQ z>!3qV4$^`4?*Dh=uS=Ur-x*Ywko*r@BuGNZh0RboU!j?+w-UAW?x7ws|2M|}6M#kx z|LTSIYH<4h{YMgQt;gAKfl11-&#Ek4&TIG~wav4Mw9YH7jiT-R9XMzj3lKK;HAJ=r7{OQSue~P>~)*5gEPnaP0F4V!q^~k-iAw`4Oyv zLlW`jq=OBKhf;Kgu=oEphSHbXd>LHlR_j)!l*S~tObh7}X_Nec$wSvi;wruEDI%aKTx}adiU$?LbvRA0c zHQ%q4Ufp%fAcc*usgCm~I#9U)ibG$F5%F)6X z2#8UPir}q;Rgo;wMhsk^mv->Y~!p@tb!qsRI+vfKgbl5O0bx$U#xI|5*lV zUx^zs{9%po{SKioMLR?~s{f+%i=dQo>06I^+8$h1P);+88%n;~1j<3g-c2+s=p_{D z3e*xYZs+TLwluoTl-*(#RflhIE3AaR{7=FEV*#0eA*~0Q`XB{niSrbUDf>T%jZ6_f zs-sz}qiU<8dzmTs(*2cwfiy3<5cK~9uW9(qs=Hv3w)22D0B;8Sfamo%0zRjvErkN_lxbHwrOAGDM$9vXLn)aO*QM@Nt)n1cnu19 zJ<|MZoZ(@B3fzrZS#0VI!K|5%r`4U)=Iff2=>@H}C+#@WRq-i)J0%KPZ@&kuS}NQ< z4e~W+Y6)p^AwDD6)1h{pfS-@Q&6y|99sA`c9Jbz@&ZjGjb%RSE3^$|uYaj2XM2hZ* zQm4H=qcXo*>LfHKN5wA6L;C`w1YS|W7k;Y#8gL$3D&)Kyy+&c%P8P!7RiFU>xL!Ch-HHCT)#u2+h<2A4wStzaVl zKJ7)b!W9EGA$wtk{+yo+k@utQ_{sxXN=wX^dW-n!cJ}%u8Wyt1Zp9sDMpBb#pG$IZ zc~ZttKiRB8^UbQ5<* zyd%AYUqQmj)yZRz!K!+@nw@Hd<)e494u)sq;S|Yuh=!ql?Wgv_gSdu?_vNr(G1QuL ztD0i(3}@cQuz^^|OAZ+3^PY{fH_r_p8qH6DB+V=qm6k8B%8#QHw}(~_ntu~(Q=1n> z9SzM7nQHG7?MJi=l$rj7CM0~1O<>Dxw$iUHC7$%^Utj!yrzp6P+I~B9e0YgKC_FHo z4LYp6Gd4y3id=0+c-dd;trlPP84;P4+~DzaHCjy<3K&w|*jL^M&^H7}cr(EdW zmk^0M3;~JkJ9f3%Whcdt1N@}!ng(k_baJO%1gpHD(I1cQ4jZr$#2N+_^*w6CNnMI< ziigSfN%V@DRX2~W-p3p3*@2LAMc$$jwC!XI+<_a8)#^Y<%_r{wM7>&Bp=XDhb-HFi~RK36_Lf|gJ=6_ z`Fo|Ad3KVku2F8Ev?#M>#67<7KzX}ccu;51XxW<7SsQ1bRL3h_=ihKe#U39+xpj%V z+})!4Oj)4v{ftsG^QD9E(5LDl`)UqTEnt-;d5|9y{#&;6r20AgEF#G`V^DW&v`h0H zvd(I?;on(ju4;p}VxQw?Jy2i=ZL4!Rmv`C_w1G4bTm2_DqB+2)aZt6oz)7nJw z!E@OrbrEh-jz+WIK64lV_rRp+(twE;##vN9>B1^$?h8-~sOE-W9)kpXFRt>rp9fQm zSAo%Rmo-5X8h}cZ{C=Pc4?xAz&4%OSqewGC09u{9{?IgV)7^YJB@FZnM|^01vVjCP zxCD%(!mkfa(>CtXPaay6$RDM+zckD2XDhOSoGpN@fD~idbJqk() zpItZ(1a`i;;}qhso(x1dAnrMK{rETsn_3hT-uM)4y*+#<`VFA)-J&9SnX5_0UCuq* zD&a1Bw{nJR$B(z&D@QoI_oB5aTNE0-F1+LCR+YNkNj-M4VL(3^&kIl6t-P>TaMo%| z5Tl6|Ob};Um4LQjU5{MsJU05l)OcM|I6rfx2BvORCOop*moWlB2Z>vsRD>1JFPQpB z_3hJQ7JgZcU@BNFa`;tkl`!h}cPGT`e`EA;)kq*qok|#iU($A4soE&=>0U6UXI>?Y zjOr-N&i=BB^H!=YeqdS%Q&Xy?`;%}KmY7g&%dNymrs*Q6e7(r2up_`YcsCwUelybe zG2zOaF?~}qVZ==#;Yx~iFoQ0|3Zl)Fewe}NVU%hW7i%3pNS&E5az?EH>~7Xe*gd#P zINJVE$mP`+ycL!}s8mZD|Jl9jC^1m4mUHmVO)mkVlV}Bd0wn^OgkGEhKRGeMH~ zQ^JK!IwM3F#yBXNm>9?aWLkaaC!Ijoug!S3VyK-Usg>Ajbe_Z*;M1qr-5TYgk`Une zwdm$U$ABVS*ze2We1~-3m`!Ep{8#!ivbFl0p7XfGv8~nzy8_8ywdczLRMy0n;#Z%^ z4%KQuQQa*0iY3=>=D2b4C($)%igKh6>@b@aupCbL70Ecfm84YqirxfY&jASo(#8#s zgR^68U#qIR^iIeA;T0+rW_(V6yS|s*b*LGyr&F;S-`X@r`2N>9@F4bK=Q>8R^fx}n z!Q<|PFLUJt!MykzH^a@(ZcnU*M)V{XSq~S#YJ9fdP`Hc*9(!U5?+41?Jq3C%lm85b zx$?sVmI_pzm;d%3`AYRO8iehSeC6B)p}O`v$J=exn!vK9Mya6y_KVt8fg*oqMZaIz zI<6v>WzZ4<^fPl%SxgV|c+&3(dNgX47B$sKt4Z1@rpl+YtV-K68b5K8Px_ml0uKJ= zb76_epX)lBl*R0dXv8-`(F%p`IAZW|^UVctO`kI;cGrA~)@^t{5=t#k@6cNj89C9g zXlTlj={p`PXua!Q#LuwE(4sNA-G?SPH2PuM!vq9$Rqq}atNCXbN63RgD~J0dDhGom z=da}RoNA9wQ0;_NX5|?zL6h#14P@qj3r%sZ9(HS=!WQ;flKrMu-YLpm$My3c6UNS7on~P^N2% z$hPF18s|4iIrC7+S`*$^C(m>bTYmAJ(FNt69(_f!6X(-=wf@aJWsd+U!5X@XEY;2V zp8+cx0aBuFg2O!;i^%J`!>NLGK9uP7t;#*w3^2(%?H?{W$8Wc~E-QVL=?>l&qR4_3 z)FhEPr|^$uuSfn{s#lw>X{l2YTR59|^5i4(p{gi^r6yGT8XF0`=(W~m9_4Vb#PZl| zZr`C=bwVV1ALhXbSO54B7v*cnMEYEz|EXqofh`ySK%b!unOCs7k8q`%y*7nGEzy$u zpH{|KDF^g7zV{NK)0mE+v;fYVFDKuZu~$I%=DyJM<*+8Gd-ax-KE-)^6RD+fal!QC zsMo+Otzu6s)5|ufP>|^l=H}&|n-Fyaw-VwotC@ZKhoJTGzZP{Z%!uwv(`T2@pbVc-YR{fL2?Ly2(sqndk}*rGt7Kcym|BuLnXXZuzZR0SCxkU2k?ID zm-7SXI)7`h9jmqt$Md~2|6{Q;e-d8coBhO-WBV2rv7t}Ne-4?FyQrE+S0<}ZvI8Ty zj9KY^Ip5GUO9S=6enw*J%rr_@+WR74?@ZiJf<7uvUNhR$W7H=E_20Qpgm8-=F%M#s z4_KC+Xs3*(rEbq_w6-A?t-k{!pERpsI}4`WHtiPtV6`eugF%6;rpzb_Sk`_2F)A!~#?xJ52U47B3 z;5YxlE*=|gg8(i6Ih(q@raxaW<`-0)!f#OM;W{gK{bWC|na-Vn%e_whP zzvft0z)n8DZnnN(_?*&B&f@W^Wm6B}dY;V6mY6QUaUZ96tB(rMJi_s;6knoYsvo#N z+R{GtU?y_TO(*3E4Tuk41%uZf&4!{*m*csVj`beoZY!M;H{Gj`Teqf?H&C_8(k0;Vp;d15~jo1%KJupD>X?s^_^*UgE7k1H+atOPQ z2R>wJ6-=HGW%PGBq0o%ou0lJ{y4{A^OLM^oioAJ+Q{PD4YrMu7)(9_@r z*XrH+O%{YV@Me^s%#mwF`_5ps67uMxney$q;qf;M8R$tDN)6eXCRr$;(v_;c=~}3h zI?v{Y7$A_vl0(F+>@J?>v_Za|S{n_4Wt+99(al1nbO$;Agmi8W?(hFi_BW4SHPykH zb}V6Sg~X1UL3;bW(m^V~5w$(3I?bE8V&G!XQgvX(6(vOK`M^o_iBhO7s#0v(pPMaE z%ZV-l23+}+By6RLC~)@3^&D1#;`gFAr;c2mFlJY+Qa`)0Uq=I^PN%G&&-TDlp1W#| z+>muJ@O-OT$wco--TG{Nk?h3yEFdToaJ|U^Y2T=$JMlLzJ6udAYv~u)g>BQC8ZXS} z=5TU3NRoANP&T>S+y(&x?!!-~jbU}5%bz=@Wsh1nJ$0bX?`*fUQ@32dg&RXI_t2jX z|2@cCJ@Q?3JAUoQ9nQV&C6sD8ho2bp!Y9;nZ+xPBf`a-uHxRvu)PH9=EoH{7);AJ= z<7u}uYR#WMAG}Yz*j$kb4}Y`tDFM0Ntd1dZblc~Ik_t3l*LFV{&E0C&cek7C19K25 z4(M+h{WQG%Z2P2R>~VF(!38;*Os{VH_ETl5^>hDsm|I9d{n_ry@$~qNr1gR5)L8aY z#>H5!TjzH&zpYgYUr=r|HQXB%#&?Ebqg6V0@>{JMgsf)U4{=jvjsO;u0K9#x%PlNG zKw?eAQ4RKTat8bywJnbMN;OC&zQHa{&62>BVwrZTEDP3swMe~*m^ai^%LG6c6_yb5 z17vj{j~~zD9u(UM`uS>n$Hw%|tYWJ3s-V|riwnL-B;waenPPf-K+;s{}io{<-<(&@lcGM>cG5 zb7e}lBwt++2%dRegk68X;C;9Sw7+F=1==8p6U(*-8i9S{pPposV z3}Mo+kJb;1)CyH=Ke_ocp%^f8qF(#IkHDmlou5{#Xpuyhi;e>PVu$s@;3OC%<;Wb?8#>t+~)vV4yIWpRMff? z6*bK1V~uWJI&`xb9PKdt+O375*` zd3+X|0-2fcRZcPj<#l0M@VD81O0b80D@&46=apr%Ot1u(ukn2m@|KXI(Y zU6|WDs7i_Pcu50hT3XLq-amIJn>n3yzj-izvgqtd2T9BR!9^5V(Z(JvMk0UFT%w77 z0xvv?vwnRtcRpJ4OQ<2-usU|8YH&WS-f6cYMesy8a0RN4>dzz3tIl5nK&-0Jv`q5nG>!rSody6x!$|!`91aAh1N6iP5RJzH7fDYAHsfUmFZ(140~~pPNPlx zce`su6yd2M+oH{i;3%~Y{pu}9g@IL2KGxdTThqs>L5%Aa0^1z|$g0_j- zH27bQI{f~r$;=?A70m@ieVge<`Chi9JQi63F>Ym&ir|(~)6XZddW9xXaB0Fs=kBMe zuE{cxvq<{6ztD$o{8@=zt<7hWOzij`KyfOFD7M?#2rvw~`a^AZ{#{kvbkv)gXrBbF zJ&GOfa89znR{rX=LDwIO6_~AlS2#KS(0boS6KL*pT5R_Q6mRETcjA!!{Bua@u{m62 z+_Vn=leT}iidP^ywdUQg2ZPNI)h)4*^l$jl0v? zLEeHXXQ#|#5!F4305^w21LYN)G=6YHPW)bjVKf=~Q!z|r^k`+4a)~*)V2L^C_Y_y7 zjrby+wqLyIjc0^lps%6BbK7~4ui>F@T-jsbTdd2)*j?9-RqFlGR*09{$QQpsgzbYI zcqQj+<8G-PXZH*_#0k(!(s_c>*58_AQf+gp%zwyhHz}RcyJ7g(5Do7mN z;dHRmd!F{@A#O~^P{a({CPNh>q*?NhA*mCMNAA$X-a^gsKF>~g(547$n?<4;uwPH( zP8}Pvr{=T?CO0XDxsl)jXFf^(EIlz?c3HhL$VH)m+%>3XR2o zUo!0<#`tp(fu4 z2bTNeN1=x`k>5`@{f3*8rRQ~L$ka$K!k0>Fd9{;O#Z+umVr81li~sh93f*?~VZsSa zriyKGoNQv1=LPzxJ%*n}W9x#Qi~rE)rgu>fDT_PYlu715PxSEn*L?*9b<6?xXEG0# zpPY=bk90r};>#R|sGu*g?;OE#!ujXHLD#+oG<{2dU8o}p!7OVYkfDghTZ!q;-JD=3 zg=F&+W?*~ac}%C!Txrg=l$y|8Pyg$MaO}2bb+w0wfyd5S5%)H;_HjFK zifYH4%)TVe=~cy0e`X-f(OUV!Sa$ro(cYiBHyRN&t>k;4h05k<{D$k*lJgX1)4lpb z$U@wv`M%_9qy=pwRN84z)((FKSAUokCq2Dt9lH8GXj}OKX{JY%aBQd&l+w9P{+rf` zb`$~FgkjtX?D7|!(f|VZ{u>l-jQ`W>^6x6UbfIxu)c2p)vSH!Pk?Y|`>yg& zCC3lBp4Xg*r(CHoTA!Ji5rTB{h9UkIESBfsP*RyU=UD2(%37B&grBE!Q&d`72xYE)~3s$MM z2ouxsT#6-m*!f2Vq<7k=`dr=I=0!NIB{f4VE_Qq^-Cb3qT2dq&P$+o)o#7Gd=xAQe z=SQ3Q>c1UkZLXMlKcL|R4Cv7Dq{*(kIPbD=25~9$0`?W^65a|V_@5}`9D7uRLH#>y zwf)-$b#gPcXq`9f7CUXb+ucS?%XU|Tq3G;@l|Gi{UATO;l^P1M{HZJ|%l>!$WF zDDt6u5;qI@m9HEwR}En%9)^$4&TSuTcu&_tTXj&HIR6#n)F9?)p?Ak z?SpK@EhUh$A9eq|61Z=s0nBC5$kprMk%OuU=0#OSN0wAq;f~BppX;xI>PpkOc zQ=l|tCCd^2)~nEm=hdNkay&S4X8+Y;XE2l@20%v+8N$7=eUO;6d%V-ht(&udyqiU& z^yE%H1wJ5d##@??jlo3U`?hpiL)@)E{^N_+ z9uSWmSWAFU9qOVK^r9VZ8!!lK;`bQK67?8MNA}GZO!Y$}rv6>=U6nt#kR2SxbwKN* z>S7FMeC`4sI;zF5ZvIp!7D&rW04u2=82!zwR`2(%nfmFbgE(v(_M=(i)TBYI9(E~B z7f^j8KkX$%vskUzeSNpy=H+#JKeHe;TkZX6+O`L=DY$;6zb0v?;v1C4Z4h)&d%G$! zup3kA!q&W%Gb#^$nd^Y zjV-fhuo3@Mde(pq|4u#l4r(TP?#hZxZ|pd^OV)k14d)jLH!f1*mM-Js0%&tcecQj=o_?Vb zM$d8gr48F9mk2ZPGD4LAg!B`t5lS$?X51)Fm+_%CzSmuEf=l+ur(Z(3d|}^q-}uV; zzFwVjOFz}>xWJH5ISs^BJ9nem4c%q?ubWgmtsNoYjJ1)oI!+J5+M@f)!dbKkgb z{>va5FkVjfX?)ml7C4rDaqYSFp-07@)owM3GMF2In^<3sS_3TP~ z6s7@V<&dq3qQ;-Sp%KQ;VfVf6@F2?h4Z_aInsJNoKn@=E^n3H@wBq}&pYPL4=nplo zWewkhHPIJH&MBU!-8k0#P;=DQ74pSS2z(p6E(j$0ylI4~a;{E~zL&hop#n^~-E>_z z_%5zdP>;J^tosqu0wh*D`HMdEBVOegp9Z3-ZS0P7pE`!@zW2Q}Maf?C!^r7Ajk$k) z|2hY8+T$sk+j)T^0SPW4p*_}>4)6uOKnhnXxc{;tE)x^#?h8bRy7}U*N@uRQb=Q&P zF#9&IHJxR}wYac%SjfDb?Uoz z_g=#3^b(F1xOVqWLVQgWEB+}zE)z!$RU-NHks!xseIlr2(>Z)s$~WiRE|0`&H~-n| zOMZ#?&I?N9ez)BW=QToK_I1CPIgn2NtpFM@3Q~+ucu;*`tZQlU^(LyE%2R>PDNJZA zef|F>N~W1J*cM*`6uc`SfiCFJg_}cidh`uh1Dlj&gl*s{-_#xkZ6d=cA)`2xKTOLGN(*{B=o-Sj`d|-ELL)tf- zxdKeb%zCf_}|!%I%HZigpt>r2TIz{NQ_IOP4kq$Y61+zzbdGlTWLs3y?=bHF>)$rBF9 zGb}mJr62tzbJqh7TVnx;cVP4|>BefGaYO9VlS*Z zYVNA*iwwP~qpmgKMJt`I3hE^*T-!=`N{Q#@N9|)kcM0UacDU_IBgH+aQB^hI=bh+T zu#stQ)s|uRGUh|W(1XE_tqs@&S|s`|R**hDyH|U|6IQS6td|i|-sgVqaAk#_`ll6L z!CQ^jRXZ#UeiY+okQ1VWu?X`7Z*hE#9A)brQI^;J>I>7bH#At3+?HmI!Ho)u+LB02 zA8VfLcq|&UJCfQwz6qZZ#GDdZwOwncB3I5FGSi{>R}|yTp}U-zB^3D^)vlr0Y|)Bq z2;^)jSqi|V5!dXWp;PA9%Q(&^IsNq4$2Zl>W0W^-Rx~H<(wW+aMBMU<)b=~J#=f5P z?BO0MNANu9eI13^SirdK;kwHv(KeuzPOJQT`^&N`AH^$=gL~fNe}A5^^$^Sz&!g|$ z5m}@Mxr}oVkyc=AX8AlCRtJavDIo%gr)A!GWH6w!?AxQgH5+wicp{`1wc6oWm`Gjp zad7W(zf33A8gqW!sRd2IT@?2;N zVKGzFZ5nmhvORtPHBj4+cExZw;z(tM653ea3dFzM2(!bZW7yd2NuaO~%;~hxv`m57 z|6Swe(|sOkJ2jlIr-8j4=AaJEj=Fu{+m%}Usk0jMOeq7KVl{l-q=uTGHWL|ixTl2KxP<}5X#9k}C{I^$+0la}`s$Ug7ulx|`YewOc>idgiXbEEU&!tuqag^0 z)9=nu$lqr$ch3`;>*qZPpY*<${A+w-1Asv>;Ll%4y)57}MZGPXO%|s$rz=TOF1xyc zR#82xyz`gj<@`kl!!_OGhH6VD2b+k75(`m>0V2mAxq@47p8}jDcO)fZ1#K1nO6ueO zl@$Mtf6FnQbLAeux5n+%Ot7f={cD2Vm&H13)FGCU+jX|DP|^|Kn|j!}UC45VT}jcb zS?Dyt30LpAH4L;@b@b!Ou5&Ajrb54D{id!G_pg3G(T0y^_x*0N_h&~CqV6r|%ZQcq zqYr~YM2UPS0N3DAk;SH!^z6p6Z&mqNkd~LK8+66?5~Td|wAy&!bTt^nIvUjh!e`d4 z=X(_HhTE3^>_PvtZYm~;_PqMR;$aOMb_?%PZYppKq&IMzINi+(es&%a$wNPd_r&Bb zFGoM6#jb-)BfbHc3vC#lJhJhg^=~8c$--<&Eq73T#E72pl&jV*e>?}?A{T;ypz4+He!01?d8H=L zYa|cr&a=dor#;s3-iT zBypbXqEVjt)6U|LOG$V)8TQ-yu#raEKXhvu`t3R6Bi=LJ2HhebKiV^na@Bm-sVN~O z*SP(~*-Zz;(--5(0Z09rp@)v}H4TfsSD z&? zEEo%%kcAPsX_bD7FcduG)H)k6-ts5|l=p0wphuN>cd}Yq3&}q^dT`TCc_n$%QEt*ulUl|$LVArzw zb}#_-A-O7seSE8P|E_Za)t3lYzlsb;JntE^fJc{Gq*;Me{{=|gyo&+vjm{y_RmO)L z@_R02Y=t@;$Mh(=q3%5SeLgd8crC%$Zm^ve%cp+tXnJ=_H_un-!SvUv=5O^oPaqU?IZ=t(Q?{}E_wT)U=fe@N7O;a$)-wQ9Vce^Vl7`!lb3zf>QIeH@+fz{}qCztU03_SPnPk%Pp8oMD zl8cB*1Y%yY(HbutW0WbU%4ja6`D9^ksFZ}JY@+R-UoL?C%LX*vtV5%tSUCwZaHLm_ zp0inTh7*H4RNg*(f9i5<(+Y{YN};yLsNVzg4Aa|X+K7_wWbx{f$&zH+ydUaZy_kmw z=t%kY8S%d^hnY>JJ5-!TZyDG!_eyxj?q;MHoI%&p=hk!or5nvAQhMO`{bFvas1$k2 zs_$?m#ND8sW4!(9j)3AQZJ&zdk47aS+TYQ=jyl(G|D{u*b`hnfLR9a}iaEc|nXDTV>3&Qw0|(Ngb| zIFIwIcUZJxY@`f<3}LHok)83{Yn$yR2tG~@T~kP?c0D?~d#7|Ew^w97LK@jM-}K9*LQVD_M;Pr^+%;V(DXIm!fTzPa$J`(=(% z#pL|%!TS*+`IB%mu9cy0)7s(;Bdg=zPl`86vedT;^Y07Cx7vmW{NQ|Uqm+z%!>9~y(Wu6hqbO_M4h&4!j3yl z2>`%s;jWWpG7iK_q@fcEpG?&~KeM=Qy4qt;P?B%ZzrUOmeye~R`?fJ{|ccta-NXj;)qBp7{pDE;({0>UiBHtrj?Dv$ zx(AiZ)th8*TAulihc9(#wr3mTuc_tukb=#H^oy`3)mQMPC!O=UI_dvl zzr?u;`}dli@csz6*NUKrP%(CQ7QOXIb>iS)iaWUk+d=hcd`iDiw)}qL6bPA?8>3<+ zceS6U!!I*xb1iQdNXSllg079k^P7a-5sga}0|9!3u{eOD?-L~`_F$ZA9SgNrM?N2b12m6_#bQd<-T!j8?>-`(z8c0Pe(weK3VDX5=)z|L;sSe)3(#Wn7m$?gi@ z0F}fve~8;YX$!36&cuZrO!el|@8$EK0P(0cHI`q9#Jv~|u9kYUJ1o2iqgD)`t7 z>&P`81gXfGRpcizs`XI})|QMg=|N(6n9#I*di6?M6!0cJh_oWbTv>Y8Xqcf6|1IVKu97^@0^1F{xjm`GmckXO>auA^qUQFiE(*ySs^<%E%~l z{`o6*1zUDtjG-ar-rP9<(1Tq(SRq2k_h0L!cbe=b>XD^}T_url4;k-iOq!0}&TyY1A!h^^5CV4~jOYN{GG427$53Fq`_78^) z#DnO91471J7!;lSt9b^O;%9)7-}Ng)%@y(jx^!W9)%5cjvWdIQ@*m=1vkmW30;f%& z>s**m@kE08fomUfpHj@wCCZ(!Q7$rHT}%m^SxQT&&jb=W!88koRrV{?qnC zq*94hluSo*jhBEIN#VJF3BTE^JGqYhQfZ`zX~=tPrhLNy&GwiSRyVI-l6ffl{{US; zqQ9R?7Ypc1_)eOA%V?0x{7J}Vz2u?HPw^^#vNaX^mScx@GU`dN)!c(9GCqOwsw`m8 zMzB~EeIA}EAw1LN>a#|#uRx4Hha@gBy0sN}*htVpGf>jxfVm8W9EF0q3$v`#zfy%_ zq_a84hc=1cB*tk4fd8gTi2ll4tO;6m z`fkZ0nBLBi&efmSFV=Wg!w>Ra!*@pW-nYze=;t<2v-gY!P@U}7))scxmVDo2@8K2) zCO|F6Nn=A72-r@!{e_)aXrF%WSPa?}cq`psThiTR|DMajfpgC1=yL1w!S3mHGC~&7 z?=PVRl>|uWNn>EJw@YK*Ywz;(Gw*d=Y&c_#YgkfN8o7S;uhHW!g&q&f^q_sxgS)3U zJ+%L8^w1(bKL74tq(?af8c7O#0b%MZm z!X)xGAg0y!jb}EPfkL%ov1E`T59p zxT5|F2-1zVldEqhdDeVq0jMpy1R-|%7HIP6Y|!8!wQ*6`DU?}+)mG{q52By%_8v?m z_O2%VNZ@G8inuJNIE~swwDN^%xRAc&5KR5)XLvqAQmtUTgMoSSb%aNnipXu2lm?i& z*$$W=*@9f2WA)7#1v1}8@ZG4OwnCNqGucq_Xs!a6|ZoNtCkSZNh1yYp8}BqI^U0F^SeWr)8b6`Y)lODjYycMN<5q)_aTl#O(&w{My z$^;#0MAR)DofwWfDYm#6?tc1fk2Zr}u*Pj`xRkwA)zk$1^T}y9TB=xFkzI1+$#o+=a(dDEx!-E?{U{ zHkgyg4LYO;50==k42f!N}R zRT5z)fURl<)ZVe2vR(_-lCQkib4|H0|2}NSCF$TwElkTvLsPb1*%N^Gbbq;! zttSnQvOdU9_v6Auwo%9)CuFAz*=eMqS#xh1X8}nG&F;Zw7XnaRmj>+*;yKf9fq8>r zIph&?;s0qu?i3+6qdC`z#c(S{TG${4$up{yM#HDy4i2tJG3Gb zQKmPNf_@c#-Q$wjxZIri*C&O+aYUXZNKK}dJEhCa3fXbQ8+fHwU+ zIV!B4Lf=LDH`6u4l=Dr}T0+LV!L@{Fi^=3&+c~Kg;uF$WcH}x{8F7ZZM&I_Eu-JZu?C@$ImBe zxng%*v2UG`bfUEdHk7I~pmuNl-H2KtqIOa!VXId;dL+8fv8AGQ0%wt{`b`YX@9YOb z^2>FCP+4dE<~hhlo8kcTM*4YtsLeQ!+MHI<8$yI8*FL{yXV*MMf_eK zBGU^*6w)|XpMI*@fORk&eU4pFH|^0Pj;Eb;8Px0B0BtUYFIX`hgD3*I-=S-h&~m|f zet+}c5|j5E&251pOhDiAYY-arYg`m0StcRZM4E(B3mrNnB)DY$kYuvJcMG`nH5WzJ z*Vi(^9Kj$;e}6mpGW^NP&t|yqC+|4v@2-{l`$tCB-@h0f*5AG18?pR~%fja`2cMNc zpb5giL5BZjEQ3E(1>Z%!a?yQv;$lh@9-?U0=u<%I2X{f0_r_=Nd}(t`1!zwEMV#`% z#z)q}8~X@K!&`eh{Lsp}is7rT4Tr0^-0OMBY%S&_$gKG2j^jAQ^A{Xo2V;ecP6-o03skjvBSqBQ?7&NTn z99GSD10We{2%5_zxGVrcsy5ds(>ijy6!9-Q2KdJ)_@|#Lpt0Q=4q3lcUt4MvN=>z; z7ErUL)^Z^?K`2dxKS@Gqa&752p)|F&G!1v;GVLbMHIj05(H~c_T{*y=SeS08umF*1 z)p%_r_#pfmiwgdaFZ~ZI1tu`Bn+=<9nFw{7r98YJ`%;|SS&$+EnF5ne$y-zSyabo!t5z}OKg}eC4a{e zq9F|>iawC?(@#Ex+70uucBu$%hmZ!`pJ~q4-wx@`8vNSW3qPs+Z&>+`KFXhCJ&ZK*Y}p2r0>Co$jVpV0?o(V&_yr}fC(ZF44+Fm%yJ#)y<5*Mf|+ol zd>)`TgZF~h)(aoGy?6Iudut~K+Z!gj#N~PzX<`~Z!G@WfEmFJUeps_+>qK^tM`gas zlX9H66_GW1VV$1DMwr!1)kQteAmcaqG~!ovV+4MTI53Y~W3|9U7}P;?k1Ag;f`sZ@ z@jd3-9;(lsRKE#_;)DUxP&^L9!{=yo!Y}%lZ>94mK6@MPE(78)iT-0ZC@n`D^$tgp zNHx(vn!>DR99^?GPd{<0yeu2oIK1icCpz5ygBv0*)V~0xd0%!S&Nr2T=W(r@@Q#ed*LP+$VwyA`WWm8wpat4dC;=h6c zzxd$`F@&=m$u=b$&bJNs_b`}93TTEEng{I&Kg?5B5CZP4cz(IO^@NpoPRBvs&Jf6Vx6ZLgz$>ZO!_8=n*v zJ{A}KiGkrWh?pDxrk5~!Gd2oFkkxb*82vl+X$|6&;G#V-3VjkJY^EvLjFuJA#X*W9 zVu(SRm71#|)xV8jXAr+RE?OGIZ;piD69yT-c=~0RC^DQN-^bil;q>p|le9;L&pY}E zd}2fR{8WX{!v86JQdRtO%Y-BT?ud`XGfqzaUcxa!$B1HvcJo!0sBU3(@ConLfAcqtFNPKGk!T^vV3!`1xf8 zp9~oPT7`cyB}4DJ73dztgzq4VR=g>Bv1DCfsj2fjZa5IXQ}b+R2e3UX?|znYz}C4s&ZnPjYgrLX8t}Y_C3r?hv9H#p zg4!SD5?AZVYA|aG*ntW8tm-X*RgK91y|iqJGS4iKjHU*`-tFCJwU9ELY~JF%WLY_Y zTp(R;Gp<>d1}2h5wz=nRF-up5e7PDYo%1)g_b6z5w`uahDyucIeMRptA zcv(6$oUNaGp>2f!LhG1@2pd6KpA9YHuuBrz33y4uiR-j%OY5AICJ)!*ZE|myrp|8S zb#?Z0(wUYHU?ugeUD}!SE;}dH(#5g8XSW9TjBzKLKJ^WlR|l;%{ubnLJwMOoqRt;1W*mMO!fy3O2dTlgv&w> zq%_;?rWHEeh}gi^AqJ+j>W&I7i>@K%{n?A0`SjKPI%!D|7q=qzPw>E`G5u5#xk%n4 zdE>x%zdm!h-ZQV}7;nm4Y~*7z7n^u3X(f0P;2$-|Jh8$GJvk5<=eE$>Q8bbk!O_i* zUFVGxG{QrAq1Y%Cn*v&LSXelYG^m87JEOh=gg1?cMg#Wgr-Twbh>({#iqCNyX$(|H zI*SFqb96~J%-}~7hlxHE2XY?QmluL0&1p8^mX^0%&*9ddFztM!#$&{<4}B$yZtY_F z#3gM=1_Hb%5Z-f_BzWh^M-s%G%xU~B?kV&!lx>2&Q{cOy;S8Qef%oxeIB+_1TuyrM zEN(ogpy_02$Hm!CwUM7XbX{y2Ig*NBDyj_+Pe*A_C6AeWkE1r3l~#8F*?qa2368 z{;m&tkLm73@1ONk+TR|e_wE0Pp!Z{6N+4gk7}?%$5#E~@qryv|f4S5P-e?7$NQ3aI zBjJsa;mzv>FZnp`e_aft??RyOZSFbLekq*3*CV_?8D)4sr4oHXO6179I32oP#Qpsj z66B*@09k?ekFKcj{`y@Mc-acPQDN}duFd-1@G@n1We5-Ce^`fte4ukpT3lzxMQ$Nq z-)p_V+hOt9J8`-{stt*V6Y#ELX?HG~z3uHt_1TYO{*6P}&LAsNOuD{_QlNdwPc-TT zJnJLIC!=KT#1%f5Z=s(46I$R>)_wByWu@$hcv6adrDvWV1)S|6(@GP`HWK}A_&gGb z;ICS19u|@#wTKR4%Mzg=)ZbnI6fphG5uJk#S;S+6VL=tM(Aru7CAC6k!a=$ znnvl}Nb(bj);Cup^l$7GB~u2KXr)O53I?>nSD*}PbLZBU&ZOI=y)7ZDJ5;HE+IEj$W0F6^6tdGw4kSqB&@j5_%hL;aCf{nK zu-f7)vkGO2zM=%7D9N`lSy-6r1ErLg=DnT+uAeNV31HKumkE=kJNB33PjZodXp#1= zz0LA|c$D>sApb-DYyS!(4)b1?|7Sq{0xWQnb{v;Cv~jDPZ9KaXq_hX0Ai-46G=k?} z&cT?3ernFOYP1|zP#~113AkAY5<3AMk_x4XLMd+332d8={#}pj;B@DCDA!dC{b_M) za0~ngm3U4jPY=ralCU9-7N66Gj>pJLb>t|885>va_ZYn88C;-0F0N+Ksvc1+iDQI&SQ3KE4JQr5iPD>-nW`A`}wIPEy84 zTJ-UehT|bg84pEdt;v@?BQusCO7fj9^3d(Vu#~T1lnE9XIzn1$StEHD$nvnsw_*l~ zwKaj!b8Dh7+ev1p5>BTPX5TJMBeN6BgfvJgZD!6C~OpUsx&QP@~$0(=LbAQES zluZT>k1ok)mP#YYujZF})iQkWKS{n?)biCzi`v!l)hf$Z3ra>Qze$}xk=_uNKT*!_ zPnGn87;g=iw-pZnQ_^GoVBT1<6MY^1vtMuuiS?twZwlW#WQS1;NQntQ=UK@@OG<;S zZS{>fS@Y?yK}QbFz{jz8OU3A$0u$x&B2JJ@c=D7qmj=xE0qWwr3+Q7&xH2XhTj>9)X0+Vc01Jx#LB=-THIFgMgB>MTX6C3My_S$W}n*9)Z%{n|KbL50{ zO6Cgd&)DMZ449(h{L_1;+w0fI;ql>QC?L<&-FOa;oxbk99HHD7>qzvT8HQS9p2an$ z#gkK2Y2x_V=Nt5%*w?p#9TYIXz8n6WZ_x5FueZQ2&}uN@qSFTB;RUA)j~A?mX>-MT zHX6@51N|7K@csv#0Y>~0Sxsh^&i5qJ*I+Uru9|P&+jZgN5#pF0F@BqL7r+BjmAq41 z%57C(UF4(}wXuZMYyyaLr)~N#-LX@R9A3x%xNIN*BGM8>(NKMi!>gPdXl7+k^VPP`K8Hd9cU{4`!WTg>?Y)s}#ljmA-r5*%% z+}BXSQDM0eUssywrLXYrx`~XaImN#VfXT|#zyRDvTVqNC*dp@k;FBFxp7^xB`_9bxZG_J_Zx`@L(K8Pce{hks}L zC+59=WRFJnJuA{)B@R44=GMV*b|j(cpjljUAW?OGhaK2k*2R&+1k^?Bz98H4N)imh zut2)4Fwv9a%fSVNE+<$WDGkp2MRf4r*+PQ1tH*Oa&ixZ%Txha125+pE3?l<=4h!Fu z?q}LMkpp`=a1L+_eA$~n!CnCR0AhIf9$+UG2}1@4M;#fVJwP-~5OV65+k-;+d7P-cLr6&Omb2#ZLSU$~e%!>SEVQqxX|o zx2`l0=lw)ib+LQId2dsUw?~IZ=_*Yl{8@M->^vXqTc{D@0<+0GDJ?*v?389(gL(5= z)Gqce_RZF`GP!Pa-9ZWAYZ%~z-ZQr}d}4@eg-i>wG0*zbz^FP%Bm6m%Yn+J_-W4(R z_y>>Fe*zV5`VLoaR!sm(k+&M!`)~S01Y^Y8p0()^*M?_`q2bW%O>c3h*?FM$9y}#q z*{NG`J#>oa-a1XKUG!yZ>Kvk=QwS(v?^-9Svw52^{rIq?MajO_q_ z1mPWnE`maS=tMz>+_E>spX~HwNfNuchbOUPk%@Kfe*u(j_C)DsWnF*l+y1`U8uQ+- zgxJ83SofZ0t@;YP7p98dPeuWZc^hMh!{W=) zq`YNoS-)hqFs!b>FPl;6XHZ|mrv{l&owvcano}Fk0CMcUd3RMN#}8`KXwGLbn)NW6 zjmunaTyY1{329!Pp6JLhTOp8bHr{meXBL`5L?c{5kX-D}!!#ASx^aa~!ygAgO}K|| z63l^SXs!^8)Ot-do_yKpaY4RyMgvdcFtwD(XJCTgVHyr!%4=Yz$!-0R2W{vMQ{-ZMI{-_UFRS!XwvsLzuF?Nc1ad}cOS>1{ZH0Qq^4dBDK$CGGzN z`zlnPEhi#@OrDWQGQ3&3&p-}4=^K!*HrCsu&4Z8pKN&fC(>vmbpb5k@yX6B?n#pKK zCy43F4q;gPBRC=oVMJ6jxs#Z3z}pqGel)Rndi_SHd7q}hxrqK!hbt!?z!Nq8weyjA z?gHMlpCvhZ!G1=ams#CE!0atJnRU>bLu-y=wXwE_^)vJFvp9|3`YjTG0s%lKXkumA zZ?0L%7#x2-?g#cQV{5zLe~Qc9!0Pt1AgJwUgd9B?7-)-VZvrV~N1jm`2wE8JMds`OM0J4echkw(8KC#h?oUL(O~bVnh32Z>Of}P-nBf z3q4Kf{Ju3B!H&0_Q|#viJGC{NH$ToXZD!u5h4z+%`n-YIJ3w@3JtKX2x=cG=zlzw; zHE~~x24ZM}=CD6+hz74#TU9A?IMBMhUAmPfVjz{BwMNKb$Kg2g2Q84*+ioE5Eqw2@ zs!wjIdaJ6|A{uJMjTAs(Sx-zupno7H-S1nh0RyF6NFZ999lC8higO2w^Zp^p#MHOi zUa$fOCNNzM_C3fVqcD7qZ6(k{=dpp7%p#-tkx!Z8ViY&jeUL5&ZYLjX z`i_w^t|4GZ7po5Y^8&X(5>m8(L%>vZ_zbAvR7^=?DTAvHcY>TAjwzfTQxd8Uce(We zV?$tM)!}Y;Oc}HU@BdF|;-FF8jFh8^Bhi%f@!dOF#ixU`|-s-5$r-Ju5I6X*B?Bu5>?W zMcbed5rgsoXD1*JDlMMtUFLmeqC;6imRr=jDKNP)dYiS7i9gNkNm+njeR)q`c zSbd4CzI|BVRLEK`7(nt#RkjDK48!k72d@&p+=Ewv-{`By@BF39;1{x|7e@%|p!`zF z>q4VszF1vF>7r2B31Ux+4%23%A1*{~5y(*hsp|o)-icEA{Kgo!wk$9iwCI8qovo>I z2ngkL7?d4(V9`1#%p(Hv%zp=y!0EK*OE#g^q=T5nJDMnHxBd+Ut%VEXD`?UW!SKi~ zw;55>X=T(E{ictu-z@Y8UxxIXN!D*gfA2%TC8Bo`=n_|d7h=LS6Cy~E0p|oU=L`*%BPWeNuly=KF-$9iA!&r zt>@#2!Dcswj>JW|-;|H89ZcjRj#6AIVbdrTcqjcy8!~#Y#fKxx{1K-353w0$wuRo< zDT<-~BH5P6`JQW9o%7td9Z2g@?_*azAcjvwdZG?8yJ)gl8S*gaYuYU@cL*qZj^;_IF7gsTGcf5;k z%S>cJ8uMr=-It`_oDfB~1}ZuDF~f$w`jM84*uO<;Cn2?&DIF`Bi(pdYA#qdiT!GZ@ zp^31nJRtjAJFA|&<1L@Txs!z440`uKJh7X>)#hgL?1IEQZqD1HchV=n5Jg;1@x;2Q z<9a*{ToQJ_5bD9z3%kiP0&km}_gb>ZLQ^?0<(CTseXF#{KA+*~Mw4_N)VvpF!}A~m zsO1`czREZcwaGbs!e&dXtAM9v@LK1*cgUi^x(^4l|DRr@15J5-+)>q8qH=?l6i zIkHxZd}fsy2hCaf)-=iYg);sueT+Zp{ECJjgkGQGD_U{o@q6H$z+jxReDokYu?ec- z&u_}yXq0`(uGNc^5S3wMg)!pzt9nho{$Ti`T`&5KyCQwU^JiRD{z!UF=}oUI@o(bQ z@~^jJiu&LNTRZb!68YG4e|sBRWO+$0VyrpjxsDX2k;y(iSUxL<`WA>ne^QhL2@5nr z|Jsr5J0Y)k<9yCsjgFqkQQG-WTq4h?JIs72@dk1A{QN*(t)q=_fF9nORGs*+AA#qj zlkH*T1y6rCj>9yTa-Pwl!yj{?8j)|K*S&!<5bU0b-os+xM#8r$hd);LI{aaQYMb(A zYBKFdJm|ePG^|^Z7H<)Y0J!2s^wYNfJEBGc`p~aH`GR{>F ztuXJ^me)+l<_)tUng6Pk98gKHX!6c_d3o1I{hq6?KjHHA=d;si7fBIJ8)jr``QhG5 ztA@9FHv&ft#D$R(i?>Piz9$9@@Dmbn5P2%-t`9^J_yg>ixJKT9h46-;wZMe~AbyX; z+%T?Yp?$#8c1mur{CoDd_vmviY6@+Io_;>((IJyA`yju?6QuVc{Hd)FtF)(HH;Ts||=YLw~VRSZsn8 zlm>=|Fp^UjTZF|{TvJc#V}Bz${z=kiVx^IT;m!{LGd9j{;bYwT#mJv*G~a+u7jYP@ z0>t+R`v4trt(p&Z##d-(G|5LGfDcGRxCP3_uN6(MxYlSEm$YLwhPn8|&9Dn6%=_LV zdDbzVabF3mt-TZm8{Z~&jgGenS~RB$O(|b1yRFgeUoZh>7lJ7#gftKs_)l7?U{OwT zW&>lJ*sjrw|3KT($VHULkLWP{=s(n^rRsa%<hVTh4QA2aGRAB$y+xrvN|!eeX}xAC`cfylR+y93voAxb|x7N!b$sU#;&xGj||OcV0pN1A+86t5RuWW#R)-Y5|xY^74%w-~~B zcDuAcwKfKXbXjA%U%6w%^o3iC*H<>Zq2cNr#=2{rlMI&H0r)Q_e}>78OAe{#2Q$zG zZ~ly+gL{-K(X5{6`*jhsg{!hJKoN&M)XOExJz6OJtVM7*dp~SU$sixgrcS;JQJmB{iTfnA^$pQy|`WBk)6etwyqc zK0^Cver8z)e+?KmFmH40m6iTU^IrRj_NC~&t)xvTq0ROV<_e{x%_)1zmV7#aY194( zjvYs9qeWe~FVkTmt20Pp+wem@4FcbS2`dH=p6b39_*TNV1cnhi_3>@GCiLOi1Sx|3 zNcAC)PAEJ=4A3X&a1`tzW_Z2t#B(zu*iLy_B7Y=kcC|=mR}30l8ITRYXWISz90m*nZ7$6@M)gMxXXvNf7(9O#{SJ~P*Hu?T>}&4D)a>j{CILv`>JtLfBz zRdasBaHwH{`E|WPc3*^NB{sys_rRy-y(RAxdk3D3m1Of#1s~= z11*prCi(|#tt(@I3CbEtY=J9cfRHxe2U{CnP%DTS&gfyPOukrsp>=e&qzn46|9cHo z#r}62>n|d|#y~4!;*D;NoClP<&K*wrIB*>4D#l98l*2Aj%qm- z{4o2b*O2c$UzEpV+SQK7u=-Q4V4vkDgOUo_ugPG)CWY+RB={cYPZ8`*CVw@rYB+N9 zxLZN&!VF~q`vJ*rOm0$nvCaoWMb@@8{*C7K`kM26Kh#t0&1^=K!FkM1=DzqSytn_E zO^Ff<$VW^x*ccapaRYYkL|fB}7}6y0-A-H%qh+V@;Ac0U8@C%%PU`Ub0c0O~qkwOb;@_xXfn?4|G$~S?}d-VrK0YJm^J=`lT>|m?K z&4RtN=9GE!OVHSjM)Vx|9G;|Lt2OA&(C>-GWgt8X>wl7Tk^{`o>3Z8nAfD%DTieFC z^?F;+${2D%C_z7>=a3xEQwlj!dPbCVK?UQ}a1kjD=l~NqjhfSN|C0H$^Nd4#tr_j( z^z*s3Fl*>W$%YPvfBGPZne~WW%{}N~-`+_IJ4#cU%SODn=dMY7<(-J(ccY7ixIccMk>o?!LbIPL^-e{)+J*4{K$AX~%@Z&dfsQ zFYO>YM{^|!EXjbJ zDdaMkfVgC@kS=uh-3cs!9u7fdeGKm~G4V|q;R-p3q|4ri?keeqNz*}7xu73O>|J0y z8&?eS>FpR`0XKrJe`IbVM@grjOdLJyw*uRsZD_GV$noFxu0718?vA)jd$;S+rN zsUSD3`PB7S+;M{U^z=jJf+?>IcV^hif^>fpD-B)> z>H#{xlcxV3@}Y4)|5@W5j0tVCtO=sPx5ZC z8Vh`hgNlSw3+Y**C9{0`!R#8KX<`NQ%PpDd1 z8Be>5)epa>Fn{Y2-r!mOiqpWINXO^D7sU`e?Y>K1{E;Io1oN}6Eexk(^ixsN7}?(C zYX9;CZ+y0()=od=o&rK>B1}EqQ+)QV@O#WLHgIZ7eqLMhSV0AJq}Ht~aK~2Q$q1Ji zKMwt(cY1q74@~P?z)FqJVia?kAGiM;v@l>1S*j?a)YkA)k4dF6N`B7w&nS6}ACIKJ z9)5X75&K!2eK$V@B%}={gK;30Z1#h^F8x#vX+%pRcx)%PQ7e01kL+J`?d)jFM<4S_ z=f=YHiyR0TN#VJ4f1JHby6CEFRQk``F(V>AMNzs~B)6(aZk1DM6_CL_rN}Ap2kECU zCxQtyu(ItN&nOjrME)S@l-da@GlrCO;_VIG4SyfqSYZpQ4@fPzdi;lw!cN4$BZjML zxN+05(F^YGk(!cmK+CE8{Yd<0WPY1duY&(d+GBN`YW>+Q@n5p!H9scZpHF0gEt&rd4a^-_*7@sD^gs8#VyrRetXy(uwLJ^<~Jm|hjNLyT< zS%W^D`RV1EFCTy(p23iUKlpKwau9!vA`FTAq5_KX>QSL(rt_&q(&!D0^|2b91@yY_ zKo-A@2+#|FtW*r6sc6UrM0ezO?7}u)*2TW8_lMDcQ^)OL^gs>ft9b#NBhN8Ws7#T_ zJIj3&qTYu1-Cpi}?F0A$^9V@7Uwk$soyH`7npA~S)O}Npe=+|ilO3*Roe|aR4_Ne@ z;2;^z&=Oz9Vss*&ZP)o2Akgty7g>V>#}s*T@Gi-tbC|GENfVu!Er^rkOvzpY9J#Bo3GuTQ)UtRkQvIv#a=z{`4yibO>$? zxKR4f!O&~UNX6bf)a#1MVKaff0rd z>I3OgTfAL2d&bU0p7o6Mc4ve6f z@Dh@|m>+ij(tAh*ktH~A(MoU}7aVPZ3^T?{_+!M>d6rV^6yL|jv7+~J- zjKfd$G{PTHsh#DS>lgHRlHgPIDG$DT;r&jJxd@qkMHj9az$EP~yh#OGeD?ipVb5pZ z;RN`Z8>M(_kN2U>L&f!3iv~7GAxG6?(_2u4=erYyIJ=YO42ERi5m!Td_T3vDdp2!9siCu2km z4rM&}444wZlNJRUfd4ObIs1zV?U z(P514E%9uqOWd>YbnYYy?9-Wf0Bcg~Ys zbIf!7uKu3uFcu8E*JG8!DnfS;bjsIg@Kyz=2%edD6l27&nF$P$f7FPcc(>M#rbHE6 zalnE)SKv+36nN{Z@U%|qjc3B75T5uw8lH9B74W?Os`2clFQ4jBpEoOMPY3j*#-N~^ zQqv(BbYgt9RF~2v($~e4f2f7hRkI4b z1t##_^yBBHz0E?&@o}>O3!>xINdMgOd8lKx%%AC`pufv}YJLomk*6nkKw=y_C4v7~ z@E<*f)2N>T)xw`7>5pFeb8ZqRv9()Ye;2N=y!ncjlNODQ`T69ClNy0$EU;5A`Nl*p zt~!a!^2b$y@+nz>Iu?H%V|N_xhfXG0AbIn|oFx-G0gaom&NCnj6FbQ} zI1T=!XVdNIc+cufmDkB6t>Z_DdRO%Qd@0Eaf#*Y?+hpO1AL>$|c3=k`eTGg7A^sb{!w zvrW?@ZMIcac5e~n$N5H*oe^uXhg!j6|nmAgIu)*%gx&ZT zTq(AqP{ZZpBXx0j_}pG!eaK^}Yas7UZ{Rh#v+J~ARh|R=55_)D?VpIj*=3d!1Malj zz^J$H@!5A{RAMLn&6B92eU9CNy#-rE5B(mjlViShmah`Zch3xoleqBFF*VX<7 zUfcPu&SaY&XRdC7m+C|2>eq26pRXs5BXr+1^Z;S5A&^v6nV`XVU`MLf=s6f+Ewm{a z@Ae!?KLmiM&yEQc!QWc@3&h?c@-N_d$fQQUQ(9mId|Uw^ z?g+KF);b9a3B13~@qR8)&;}HI1Q*4j%WlBN@<#xrmZ{iWeyC_~g||BNKJW0uM5VPI z`5%unnBG7G$WLv5(sJ_RuPirgCCT1}A@+m3Xy_>RM1;0-8Ze)6?% zFb-rlEgGr*kVZay78%jX6^dvj{E0aW!O%?0!E`de?ywZ(BM-?jtzLdz+gD7h`?f~L zv>N(`vN2$Z9@FX^ZVWsTxiJtE!lC2mOq|Lwt<0|%vVDsgc<)P!Yb6w(Q$kx=5w%K7 z5!*`brg8X+p{+8M&{j>4g+g2X^EGS-n$hQq=$Y5FQfRAbxRI#d+PMAoURxWzqH?YH zK^XRVyMx%@^m7D=p}0t8m(O8R_E~~K47apNv%UW&tW{Z`h z2VTRS3OO>=|gUkw+c1r7PpGmjK zr=OPZq{6E>ppw}hRzkG%E$9Nj70{xYfDQWIl)SV`zK1XD3Uk28%uOphfY4+Z>&w-g zKak|nH*qOh=MPx$$C#`-ho!ukyXi)+UyH}MYK>Xstv689jyZb%fZlW6ri&cF@myy+ z%Ip7>9heI57LEf!?=v~x)!yTH4yuDXpgt@ zxW&i&AsLjbVa7(!WKy!f+P`6(`Sljkn9^)(S$Qoe*p!pd7PqE=3TYTrLc?v)CJ4s9f@cJCZwNdhYpQ+^1RScv@o1|5q)}u44&V{F!0`!#Ch*Y=A8Ms9t#{~rTba!(A|Ezey?Pg zc=j^;eCIB-&lP{0&+UrpZ}ag#MDe$&fj*Y}Up~S7QKATo;%`&)Yv4@%VtT`_aDST& z-p$eBjg1EHXZvJ$Ka$|>hz{>(zmnkX`yKj4oKb%q$MxI43xjh9z;U?~=xe{1;dDrF zCLx^h5}bM(`-JT05(*Un>%&;WEp%Dxy)h`lMPfZ2DubT!yI%e-*VhkyAMjYi@JNs@ zK;RA3JdnGIV^N3*C7T7l7DSCuSUr=?Fya>#}Q-6BS5@WnBX7sQ1~&>%3b7e5#5u*Y~q}Z96gxrhv~Zs zFzv{RnQjm8g8@27eR9~hTtn{!$s9 zOPWj{qPKB?Xy1vxMuGC00J_gYMQ*99Lc-P>aVvrWhbiVBV$l19f9(L0>DtxwB$;sk3e?vd;OyQOjmvpgzy6+r{WOAV;O(9;S# z&K`>Xf2!)1#L&V&i{kV)bImWZ5UttA8Kt=*j4=D&0H>x;eayN3brAm%Dg?s*4@lkeOZr0~FvOlr;>B=a&Gx9k&}0AA9wt z4CHrn^$WOOQMrvX*PzEQ%s*GcYAWG(1`)d}ULHti_yno?1+>#7S888uqifeq3tys;`~Q}LD1@+hFUY}vemF8 z1CX*Qb`tb!o&6=`$;le2{@fd+E*_-`b#a+hroiW(Yl8CSRwc2+T=O-MugS2C9L}XD z;(~+vB@PDl`Rf@)GnKlAScB3lrpb^JNC;MVaY#^L#WfiwU}b_*8GT8rCL43tIbN!D zyu`}9iDeuw$=x$Trq!Zgu{Eri=TJZoV_T0MBc(5us+*}`RXIf6KO;U@`X|vV8Y}w; zdnQi?@Kt|cT1F%9K&Htc*VVg`|vHofU2-zG>J>ks|dX^&<@+8S?@w$}bMFre@< zjt0TL1;i75C7Hn}QDB@HB-hi*&x2QYKZ$35E@hjnWP3@;*6xW9;_yo)k@EfE^`?I4 z)xOF8w1)T|U|v-uEeI-bZbe%9~JObLUUebO?nD(nzv|qiX z{pw}ycf?m%4{s6L@7HSWH`gL-zax+lwO{S@!e2|;ub!T1l(b({tlUD{ah;_7l9~3y zGLrUF!`M*9v>$#Xg`44fLET_fee+TEUE?dM7fNa|yAEhUN@}NDZKR}%zY)}5Lhp}5 z;2WyI%}fQpBouBA{1I3;Oa*Qh9JL`8SPSESvy`LS7_F$nt%OwzMg(ZXg|ar>8n6eeF^9FK z_Mb{yCPG_mtSyyM(SKM5^_wIH>ZLmS(`wa4l6tGIodXngqf%WzRT4i_RM&FUHX~w! zgYjupUAClf)wOs)P{Kv1u2}UW|t>w1#8?LjqW-;BP)?OQ>CfuPz>e-XenL!W(Tu@mo* zg+s%TUf$3Vhc;Xk>+y)<2ZQ4*ps^bn5M~@Z;T<$$oOURIYIu9%8tr<~%>hZ7dDI zCn+6r54BzJ3{LlZdyJ^myl1}l`iDZ-i2N3q*t<2f_A@5t7#Y3(Q79JEQS;K-*GldA zR;`8Z=&#a3$DQtr+9lvETb0YK8{L#I(Kb(nim%sF6kiEWxo|#?*itT==$qdLkJ1`}vzeFI~f0UbtT< z+zyHfz7}qWxmjWB`h==A8g9djS%K$~d_Xw9UlW&>g)P|()i#02mb4SsmUhyFoosDs z?k`kM;$gG}*MApicH#1`CMA295&w?KCY2UnzNVmVRMn;V9)0aKfw(5kyz*x4)G6Kr zTGF!VZ^+|IK=CmK-~FS&Q1}fD?un_((**kY&*;v#CMTH|*J?>qS<`F{S1R3p7zmnW zApJa%t&Ds0ll_4h@AcenO>M5ew9!4Kz73_qCy(PAzO3ZBSTy>$KWV_y6L`OnYZP)V zLavpa*n}@%?bdL0B_`(Py12Od6j;)EMxWy#9?YbPXTfaAWnTK-Tm=S&p#4~qMNY!F z4D|fpaZf23DupgGHszXb+MOCOkyy~I7ok$;)4A5MNVl0x6kO}e)7lzwQ96??N@s>GN{g$&_TEjudsMOD zcRT4V>w=5SyTK-;_YquVF2L2p??3HhiFv=%r;l^y6R@bBIEe19S+&nT%GP5fTR!zu z)f)3|f!|LDlBjFU`$6(bYsI+E99l1)S?%9|{7T>4i$b_QaQ$Ugk8c~^YxQ^+TRE=B zHROM5^?0{x_4sbXzqxw6TdCnpOt01B-NDsko#6_r$Gce_+heX~^|(HA^?0YVh~A}u z=U?b+^>{-}aP@fB4r%px>JB!-%YQD7aJR93G4kiKpDSB*?n(`=xUz#PC6>%G)9Puy zl^So)xRpBhxKh&8a7ODCR~yCn)AD(4&w3%x7#M)QG4hPwx>5Z^P@Keorc%0|=c4a&(F(qz^-gF)ZvWe!+qclVIA0NZJ8O`jemGrrsZZ zuMQyNo>5=paUn6)lN=+uuQkC3&=<5psKUpJM(T=%>mDVq5>%o6e%vw@m`AAaHc3&YzgVaW7I{d=;*uR z9FQh3jP$QL#Or*w>Z>wuL?1)*G@vJqF7g(=A83Nd3Q8KKfP2^cJ{({4nXlB$0xzI| z#f?j+Z$UCm`7tg6vE%y`S0gc8uxba-ZK`j;{`$vHW&6(RJ)^~o&h`$^{8!FG=h6FX zaZkVI6rbWwpa*}3UYa_Q@&i~%g%pMTQR@9`H>;QJq|>$^QXee_VD(`p8KiLH2;P^9s#_ zEZ;vW(|SBmWbqbk9t}|h1G6cuYrGVs^bC(w-)@#8 zqT3E|T)#342quy%MN7&}bNUu0P_Y_0jqdeH2X5SA#Yb(I4|GEBdrCT`+{tbPpl7J& zEVhB)#6<)#Q8cn*T$pLARWdGOYI9=(2Cy#ba$}lu^_tS~^Yp;ViFmXJhGl|zZ>-N@ zanej6VV?I34gWOE-mR$4e>{;;qUicKpk^<48|+bO%{!;-EKV-|y9{Gq*z(qC%1 z(~eH*09}3`HSWNBC0Li;T3k%bthSRS-KCx+sP3dv-ASnKNvIAI;Afko8?v0VcOOC{ zOND;6%If0UkO-r^~9bGW|LrVp$}9lYR2ybn)vKi;93)#I2X}zUrE*( z6b02!x@gNLQLMAK!W<2{xF!8ivktc_%LQYgaK;25-)i1!KMJFSe-~}hBD_4~qJP%A=gea-j{r1o|n}W-@dfCVJ4! zCjL5aoCcS{q(j@ti!`qeOxPq&zAt2QVW>@!PlAyrxp55otasASa3DI8QP*LZFah0f z3(NHaUKB{;+!^&VkX{*eL3$lj=(S&=*AY5ny+W^pz3J8Ur9!WRNUu)1tqMJqwxWAF z#}TPj-qBp(CU%8Vav#(R_WsY1LPzi@h6WNc?HJywvW4>)q#_CxULUBU!hSUsvIUt6 zs{WVTQz2ZoJrAtAOnb&fwx?y|)wHMK7gyJwF9!70o`-6CwTInrO4WtkzqBX%9zWBK zx}qdm@`+w)Tm(2+n`6zet4NG!-GGU-*A}r4sP(Y*5@X9}#m`vX?<3X^K zBLV|IY&-cyvjLkP==`uvY5tccx%mS%*!E^@y&2sa6zQcaV)K5C{xp-gQ2WRIr|thZ zto=)UzBal2r`7FG{7%*8524NY)syCZt&O$$26daShc;iw+MNGkVzL;YAl>!(u+L_9 z#a*Gi72<9S=nWykaO)t4F1ivtl2=vpU_=fT9T zjAxjefrS{D6yBsiZV79WkgxA^SP$#p*RcL&b!b>C?P;mLY^nq$H`YT>{tRc>n69O*TX4YzXOy#lk9$ztnfIv6RHYBA>L;qIYPR5l z4ckp)yJ&8W)2&rHj20e+<{a`Kw5Bw8taz!ee{GyJ5CXAHI!!=Z8^^YC?MGzyboE(+ z9gn93-P3J{inamvkD$4;9e1L(fk^Azz;>d5u5x#6iwzr0P1sul1(%*CKEis7uJzzb z(PkhV`txuS`!;cT7-Qjz`vj`6)@EQW+;}~*M&_-!CLJeZP7oszVQ-b#>iRm36tsm6 z`>mcNJWIh3uQ_CX++VAMKh=dewY;b?SI zx02P?dUhtZQ(p0~b@H*^!y;)67)V(H$xC!O3+SFBs16d*XIP8iIJl{Yo!p&u(tq-A zr>|WKbo%(r`GeW~1QZLh>asGwHZ8C0cvQnZiGEuSq#U^4{{+u*PhPl490zqRzU;{# zwz<)XWGiiAC*^=~DBf@8kI;wTlV+7I-paGs|2PO*0R>ijLB7bJancj9Y!xj$58?f| z8YhLGLhk`k+1u|S6P)!POp=E~tK_Y*CCpo6i<7>!G3c!k8bbfJPC6A$Z`ugluJTc? z(0B31{~r3@e)xZwzU47N`p#$c9e(V}^j&4RO8P!{>?-N&-VjFLO%KcTeWx;1x$H%)dY<%nwA0O*zhxdg(8oX`)jD%Mngy)`LKji})|Lc)I>w!7z>h6W&I23r<@pK zIQ|`W#ODRD6na*OVx-5|s>(#12a}PqPIY=LVLj>^za*9$<4jmg~l_>Bhd#w2dl0J1vC#?w_XDLiFs|9Qc+f6q7|&mKTgHe*G; zo<8&iv(h0S+OmSfBota;=oR9+G_JohhZgX+w&V>`(ncCr>U{P#k*93I{RLLV7oTIG zSs)U>OImWj56qzZ*pfSZ#wzU=+K<6WtEzm1^Gqc6jx3?1Ua;>8j(|PTgP`Lp*5L@) zLrUt4i(%kzTrH1)BQO*~_crRX2lQT_u%bSzx+kd5luQ2PV%0`H$H&kFz%zLHF79&{ znJ<<%WEwn%0RF4pt&zQ-x{dXlkUy3I)O1m(>`XtEQ-GbCu9HGj2-*4~pQA%4>7>qO z8cyy*7B8B#5xdE@SZ}WWDVj<;^;-U6^etO+$}^bg+!~S=mTotZ#rgvJCp{J_j<~c; zR$9ED-s0`Kg&#_C64>fsP9l1(kypHLNcWSY(lU3G#~Lk5o>z?7a1s~UM*+eZRAt#( zX+bfcT_eNQsD&2WTPqfWNCrcu#Ke}R(v>ACZx3$M#-_aGn_|iK=||?foOH>L+1(op zE>TVVF}tuvngbT};CHlO5nG-rIE2ee(%rk~ek{9Pvm1T+7Mpgp1%no#_c1}+q}OnP zIAF>Yt&@Jx5#fMb^55&t;5^rmyj1wRVG#5ATNd)>+(=?0-T0O&?!6WNzv%gOfLuY0 zd9S8SDwNQx5Z*H~@0FM6k`V3)l9z_Xh9KM|87xLw&<2KLfa3@k-lVOxj5M-q=JmU8 zHAoScRWnH3!cuSHKaY|Rr?jOir0bqG#$r6opCWo!WxZAe@j zS897olp-WHvM4FNFO*}%xWU96wt*yFT6P5zh=IKGY$vXU<6T0$`VXT42j0z?SaY8L zNLsdZ8)@;{U_c0rY>(atj4T?29@oHGC5IZH>AE%S!nj*s|2&STco@#<=Y5V&nBRin zBr`Fez<1Y(yxm0~#hdkAMf61tu4PD@QwtNC^DMXE*y?%^gy>A%xWdMTU2J;>Q=jdC zf2t~toOGQ&ECWEK1nq9$9HlB+Qk`g!b;GDIA`AT%WG7w7 z%k+VK53UaOwMp7l`UbKbYE1X{_SaA~v-?)`*D$zZ_pKUBQw+XoX!`3o%d{Jx?YdR% zk;l!^_V3|KJ(c)`(Sx4&9>YG-jiTfsNOE7Pc#JF}czr*G4cM#V?9#F@e`@*I+1>Ov zWg4ymk)hAwfEkNFxnlfcT&hv~8^a627%=%9^M%5KI$8vKm(Skm);irW%o(wc?T||T z?y&3rltglk#ATxUfys1+AuheVsmJQefcnypNgmwnZR8uMun25n-#Q~Zxz$4K32r0U z$6yH-3qw*Gg#!II&ynwh!qem%q42Cwctj{Xj`xfCF2PQTDUIBo1_X*bfSap@9lUXL zu4QztHQ+>~Ne|)ArJq`qFM2*a4Nn`TJl+ND)HQjJahlj6MS9;xBl(&Xb_lDxz4vu< zIo?a!^+!o%w@+^a9KH88>N#ObBjLMQ$z_O-$D+c8!kJADJ`$JHG4)m^Q9V&u~S_XEC`WrD{#^ScqI5> zQq6gDZ7T|uIUnvu|5fyT{USOQKPKwt1FnUg zLp&eu0bl}D-=kTvef2=w(G|Z47y&@wIPND@o+U>G$8mC@=9Kv-t!QXKM;4wF`A(qZ2mh?j@@3=LK zM;w3_v|^-K`#It`o{hfG*66){;LLLX*mijRQ!Ia(&T$48JsoF#CXgh@VGa~iId%Lb z@{oSS*W`WDBs`>#tQPQX6+k&Pc^izP{Va0salR>FEkM)Hm!kn|vV%S-Ik6!2GoL{ozGJ)*sfpik!3w z+P?%P`$dGzw+Xi&)^&3o&h~D z+H+W+9oNKvT((5^%~6Q6HIw&1m(Th&(Z*qnLEoaT1*Cu6tuQv2djzyXT@_}&Y1-BBIsWa9Pr>Y60K5%p)La+XY^XAp4LOlKx%;lHl;+=4EyfLSB`C1U=HDJ=g$%#UUH zuhCalWM(OUy5AX`B2c7q))T^pf^h*{OZ_@k_i8k*$YXOm`_pmh^53zDp|8 zEqeWWr}WJzxhyq^@lL2ZO8iIWKaaT%tL~jm@>hu_Mnr{j*up|3ekE{pFo#~9qgVeh zm%+V5-(Uo$MkSt;hdJ=Y33G9YI2MhnlEz-;Bcts!s=m9&xixH8Kje&r*JTCbT=WqT zBgMir$$8z8_LU*ubCLBM`@kzSSUl`rpcC+wPcRUu6dvE_@e^j&#=O?q-f17PFrd)94QL=QEYQf?pgR zFr5LNC5xrPj$(lPrIF*%Ka9%hv_x<3vMvr+g`6}Iozd94vC(%iTHzT67S|kXw6i`I zdmFwBc9k{ zj&t;Z@A0@+H{Gdc?hkuOL|6KGG5#F7No~bp9rQPuA-lBnp6P~h5oV7{(YGB|=37Y| zJNIt%`+7v(-eiohD8_eSwf4?1fZ|vL7A}urLLoAsoaA)YGg&&G04bf4C(+T8r^^_2 z9w7>ThZ^1b#fKV${TbohQ)03|^~cE>vYCtC%zguo-2g!T{13u07$^Q(C2Kq#`UksEkqRIt&PC4U7slVEm$_lIz-|6#@Yd|*^EAOiBy#gk&{Of}Gqkb3T&qVM?;?i*a z?VUSL6z8T&`iAKvFa=kq!dzTxaNamxlUvryS+6{PuS$RB=~W8Uw@@!@P#JzuWCitU zT2y^%W``MvA)Js-?PEMsf+sx|j1zHoU7pGn3x=rpOa5lx>1MIvqkNa;t-~m0rwZBQ zdim3om&JVD(tRu)>cPb&>iNAjOn;{z!igZvq#fscj*fg##oa+i#WeNaUjCDj9Nr(L zYWQos3!yQ2dY8(SVU#QdLWJn&){604?q=pE2;SxJE12T$FUId;D}b`tk?H>|%B7OV zb{52`h^$Sn5iulLq)AfRNKdcRMA~y0ydFuI#m?o&w*s-u`xGzaVE%WS8)8gQe-=CM z&eiKr=`57LM|S<{Fw#04Y4G<3Tzt!f4+m01@qLDg@keA&_Mq_J9xskXd$E9=EF$X= z%_K6+cF6^-Z{F)CD=cWWgl?bRlgyQtEs3!2kw3h%I8U57RgB*cb$hSpN>%+Odprb< zwo32Ti}Cds5dgCK;V;-Bha&qQv%5K0MeCiQ@Z&ig=>|Zzzl*+v>q}5|Y0%yJkzOu4 z^P~9I?~l2!W9RI515_hw{d&rtgcTt%-ZK!?y# zU3mYxn{HkJGp3(xVL=K(Z99D%jNVJu%DG^VHPjsQOa<1=T-eRuMDoyAfl;=xhApC{ zXsxW)(_fr}kp`oc)j=NJ!j@HYyv)VG#$gCQX4^(V}pKgsT3%Z0^APq$RcwlC_-?z}{&4jMZ@-|q#UDowq2gd!N z(<;l`IkWWW3T)0S45$Tv(3w2^v4}rLARs#0pLS@X%@-uHpX z{|nPdv+dG~QDl&>o}0StAQm|QMM$&v1P9q|mnzQ+*+35q`8dTgV!1wLvC+21SZRWa zvD!vfZLv`p1et5juOGNkUvq4QG42F!t~s?a4%&m;QXTS# zuHjIqee^mXdQ0w3_v`Fk#olAw1dK|s=>v>O5ZPYVdAMkF6Yvkns2d9C9vCPEU^72hRFW7TVZx~r2fuHDtiEDU2me_ z66O)`$y|AxQ<_=VXJm!NyKjFRD^R^R!n>>#<2~rL-Mp`*o*j<1web4MxQc@De~h~| zN(3e*&(^Szp07w58(0(zsp*ixybnF?ukW2h@ozY;M7caQVJ@x_Eq53!ubfu-H~ej( z?BCF_tS|qD>32!$JHo%=@_#^qDEbMH2K?+j!_x6R<7k5WR>eSijNO@$WewW9P`eF$p^?ikU3W5BY?#D=g*+wvC1T<;V zNJF#cAoMG69$?>t*#C8j5>L+$v)W8Mi>KELdDxxKG$D72kekt*Ys6wOEThKLJCYTQ zr`ILL(~~0UCFq%J$AwG6BI!y0Dc&Iq5&lKDu{m}Q)IuF4?9vW;4M*|;*AF?1%PP5g zcJSb+uzCv3U&d~)blsucoqfg+qNF)y$3lYze-WqtneFgfc%&QN*hf4 zxE?28VUX`;Lp9*!iJ=R%ctdh7=fQ3MUul^SY#SGL@g;IWlVo>E@dCRTb>I!-1p;~c zpJN$a%dAw4x;+9v(j#XO`(vtO?iU(iTqOF%6WP8%v!v)c89iBg`nwmqXu3<~!?G|UA`6&qt|As~^Ny-@V20gJbnNm(k$Qpd zuy~@}*5z4A)@86b)EMy#OfnXe__WiPlcloq(hsFHct)X2yp}~2o>hC>tlgU6MC*ga zs16{F@NG~yrPE`LjG}4lyg<&aQlkCKIdz6^%WsT%rFw!43`Buyzv9>195Mj2H%uXfH z&xwTd>E{ZwZx^P)OPP=c>7|mrhTHX0?D9lf(ZeD#oA-9g4i2OM8(`wVXu>!erkO%Xh$SFzu%2 z?q=)SdLXEVnhXI74@myd;+r$2$$!;TRxLKH3p^8G`*d*nQ(LzAjC{2u?8n-`7~o$1pIZQoOk(w7`W&!t{WCNgFP= zCgQ&c^^0#qvr+L+i_e9v^@mbdmv-)T^LH6ZwvgK@U03zukOXqk3dCDd9yT` z_9N!%70{LCq_O=+k*i6d&k;#g2TAjTa$bQ3R}j=i?fA0~Yeci@deK#bTH5xmynB$IoKM zE2izdTRtk00(j5vmRNRm7TGw5IMAx~E;VxAr6!IYs~47Agf-T{a5U$PqIa%dR6gUa zN$*lCn-@t@FOP(`hZLH0g;p@yhu9NXHy3^`!!u5={MeM$ZJXS!Cq=w{3t_X-1DJr9!aP7CU zsWx;U-=7V@_Dq~d9;H1PtuAkVW?2S5TsnFGr*pDIv9}hRYX(EMz}t}rJmvGu&h}s9 zzvyJZu|{?>0kURw1}SVCeu&s9I%Dau2z<+mSlt1^ek5ShH9`*fRei{#B_E(8i3BMu z*a;?h30*2^6aV66Hm+qTyR_~W*S_~TL-VHQGuKnaUL?z2wq6-6Fn`vxZUjv^nLpEh{LOR?nL zTF?%6lnX4F`~f+b{L)}Ra@S=7lK+SF*c+tB3*1%2W50#bBQpv;pdlcjUqKKxDQKCFZi90VBnqA>P7N5Pw5hXtRnXF#%+1Y%ukNE7T zvo05({lK@(vC75yeZ*&HrT^3T?5vW$;aashW!GB1@eH%PVcFb&-oX^18^5g1**8 z&O4*7i!{C$eO&}WAi$qaW$Pj*ZpC$xe-x<}Yd*bIK`L-FB6Tk!bw-8My$VuseIoUz z{HRFX)CW@c%1C{8GeatyA>}NJ&^LXq*F2CNwq7&#)+p;WYqEQ-*I==})@wZ3(bsEg zwQRlS>3Nq~uX%2svR>1H>ovc+Epol)o@0^gHO~~V^_sc!g6lQA7vg%&8X!CV!u1*t zp4xt-1K!w2wqA2^;lI3IlbX%eYo6}JYZ>x-jYIRVt=BN>^s!#k5n8WVe~YwUGtvIv zv0lS~_O)KKWy{sco7rD#F27#mofENM(`!ZJ+x*~)#$qQ5pF5qsRy0)W3&t<5yuPqC z>iR-xec;F?d41scqTcHRg^PNv53u-cQP&6N7KPRax_{Dpec(TwzZ~wl%=|^MziM)W z_E&3$%Kln+=}Puj%1p`rO3IC9e<}KPY;F|&TASTVzq0Y)hkmWjzMOu2L<&pyq`eRQ z%1ZyI`ju7Emww&hyE=aV(`QV-E(h81>Y04huP8=(>sL%A1J>NyOTS7V*57(H`n4il zzgGMk`n5vQuO20&;ctJ63U!V0`UHDjAB=za(`fotK0X>SoA()f4y%*CfT2s8v^L(t zOEE#Mq%c9{>!^2RzbQ@^-N?z2{od{BD*#{yWLtdYY(pX2q$_8`n1lTzvw^C$IZU1* z7*TK^SDuMmr43q0;=jR*8tysuZk%=Bo%8Pm2YmQ(ypSga2Rw2Zx4=We0c*xb1P6Q} z^Z$n6fR5u4!2w4r!2$b|7CQ0~JehdJyl<`{{TS3}vo!02P}P09UO4!1HFR-ez`yj4mdI!gVo6FP>{{o zA^lL0AR}k8YUv#o_F-f1aGefAuf0#+QsW%==91%5A_LZ^COidR*8a;*N>OB<>It z?ii&G9m!S6p(D!)K)1~{1PWMJo7UwcUnE8Hg?S&Ckb8ox(XdfwjRl6N`S(M`nf5*Z zQ-LG*Me@e%ch$Tx#~tF0)iQ6qH(%ln?JX+an0Cv*&l~f87UGSkn!|bH)o&trW8KaF zSMkP>*30q6*~|+ASwZba~ z#ia)|==&|tL~;|9q(3por^%2ykVOkLN}Hu=s_iOQkvoo@^zoKrUu7$vbvNOXtQ(KV zVhulHmFBQSlB<{NW!J^Jn$OnqjCAi@t$Z3^_7Vw3 zvnAa>+J0`dy{nu#xITx0KkP=_(sR<>G!0iy@Cvo{kW?ViyR3urF6%@moJMlkRTK(k zoAOrrc|0y{?;>4z$c%X>?m$0--33m1X`%*iW7POJ#Om5|Q@Z#wP(iL+>ynPc3HEc$ z2k=bqva=lN@-Cz3&vY!SDE6m{ZD@CzDL|NMHlq2L0tHe8V-RXYFRW9gDiM

#}D%9 z?acLrIE~J{HZoFfM_#Wuvpv1y%>FPe&TQvHYE%x>wYbH}v-r3U>>E3^n@)OL(YR3rCBt2C0X_kXjOf)Y*p+sRRWns|qPg7+&{< z;guM|Ye58FR*t@rE8&%&kN!;UT`E^`-+jX_=F6_)WG`{SLsGg7Wr@o;2A8<31sIuj zQD}*4Q<}WQH7={KC9Y`+QG>wV=8wL_g&+{%urX|jt8^MJaXmOkweI%AX$n#+uS29L z;Pc8>6;d4&WuzXvr7xuBL`Q1x8_|(M5D4&*>ljjprXo^pIVz-1PF0ZlXfz^~fRys9 zkSbD;>Y3FSQu)!58r=s{2{NTVAI*@O!jQ^QBQ+<8)I5e%2U2RA3aJA($w=MbCsN7L zk=p!vbfh|DN-dUc!bAq+YsNL26GDB9(}gI;cWwf`ZiXnSCL3VOUh8F1!{U zsYIDljY$lt{tT&W)JP2vA~lL3)rpkau0m?t1R1GY`b6rj=t$k&2U49frEZdt;-?@| z)!8bfYNjYi)r~@=l8{m@Dx`D@Qh%7y7gE29j?~-xqa&3hQ|if445>5eh}4%^Dx^Bo z6{Nl$iAbG8O6^b~^}|#dsrWvTS{@y#j6RS$CsRrJBwhOM*x(Wk`)fO0}wx zI+-FPRb}f7shsFYJ^NqLks2pc>PI6PQhO&OQg7a(LaJ%9g4E$_5vgQE>WB)dISNvN z>3tzJIXY4U`#>sLM(TrW8B(JeQfX?WCIyk2%8=^D?%Sn8>ZKcHq+ER>bzO9%*1j4Y zscyOZ>=II&Cm~W#%~Bz?eUgIIGl_^)7b3M&h175bsfNtHkQx*nsk5&{N2*IkYIh<- z>OvYKWl$p(8$`;)kTPQT?N%X$%RUUL)IO2YMn`I4A4nPH?i(Q?bvHxmp_wYAR-`FN ztsa3$okmJ+Rw30nPDbj9jJ}ZiaA;JdUagOg)M=SgkB(qSy*&|;Iz2;$)Q1xlq&^vr zNSTmQdsIj*QIOL1iBx@bq{jAvlu4%4SHl@n84RgBHBxhfNI4i%XOL1`R7f@5AS3nQ zw7!sfEILwK_C-hPj7+IJC8VCc36c7n{zXp-AAf@V6NKI0ZYMa^@QsvQ+ zy0kYsQWlv~&DSub1~R0sRU7w`}HzXIej8EKRQz7eIRvKrqpB! zskIXjsoLo(q=X3yQa?#Rq*9Sm$5oVyRgik|=Dv`c5*?{`UyhDcs!XY0B`~DUrXo_` zWU7!lm#QFjaTp@SAyO}>kXkWTM(UbAkupa|YDOPOaWYay38{q)sZupkWkICwWk{tV zQf(@vK1`O8s-DspQlG^~Me28Zq9c_iBeilEL+aHOMCwq63aPhJ6r_$0Wga*Xsah3M za}}h%Oz#V+zeY!Da34tNWu!hD%8(k%keb4flJ}c$PQm>y@Ab?PM7ag;AEIB|=3Fa= z^q^^L*_L(3jjBjca>+|K21^>Tq`FwPRP5y&LtAZ1v4b~W7GT4@Y+64w%bzhfO8bKt^+L$iBlggH&4%td zoQ1Al%$oiB_z2`x<=-41z5I-vQ27l}%FFmkd%6POE#SrYadr$qZ{+9(R%?2Ff|*x04>=$TC+i)!dXg#pE73m`lP}aa>+zl zu?i#BM`mE(S)&*A^<7+Ll$sa~it-(s6!Bf!+9nGyrsrT>bz1rw$0-M~cwLi06WXE2 z17z%;v~S7?pp~hJ4uSchPd|(BR(MZvQr{1lw@sfu9hZ4bd2E5vDwmb~w~|Ve?!2q0 zfWCh-mK{l$Q`1D4y#4?eNv%F>p03Gf${}`33cSfnIE(V>?{3EBuw+(cLa+X2BwC6j zyyW3HJY^uYP&#WcELnBdAe4Ux^a?#3CI3IiM9ELBq4NtC7i|pB(;FWuey`5|*k$u$ zK8BNwgwaLs?z4b`|8ZoA`+q4x2^LLo(Ho-S-)yjAdo!ctzi?gnSqyfVz0ycddN*2) zBo7zcR%ppB-Y(6AULbgcyd??UB+aPcVH!uV!(N$^f;`mxMH%c^0xU~b;4 zpPb%sf6HSUPBYt9Xj$<^DQ?T6ytudi49bg^v04_D1Vc(C5IzaRJeO<}9J_I=z0MFB zZ!gFCD)Y^Jz;?hB`>H;jvjnd57$j>{eR~9c7rh&5juNYH zmv{pHL2veSVjixZzF^+hDiqe+_}!i)_(Iz!OGudawJ4h;q_7_PW_du0QvHG)>n{}N zFP)UUQ0?C^J`~w+t zxzK*bh|qr9l^R~Yr&nmd?ZMD~YnH1+`x$$O_S??tc<3G_w4X6Dv|qg(+OMA2x3SQE z^*;R?CAQyIXA!+wfzNO0E4JU)KMKb7yE{$VTxwdvjDWi9LSb~3@lb!=|Gnek7OQGJ ze0CyyJiL&g8V?)JUvWIx76r${KZmHs!yVs5j)z4FR~iqa=0qD0Cnx^*j)zHm{-?)7 z^Mn8CczABtuR*I zhQ;*NYgKSsuZ;@FKPn2GR0U3d3C``aqryp*;Ve?asfZ3|aWpswQ)M{6F)=u4(c5!y zF!Q}cKTA}>;iALo92o`9oeG@k5}f7H+jFN3=MgoWZHZCg)JB7IE=7iOERMmM8@)Z} z21#&+tKpKy)TW7^>7I!3hNC*K>EdW+BG5bDG1LV9p25+;5{== zhPP3Iw<|j;yk`bV@R}2%!24@LRCsR=j{K!WgsRHlup?$#f_J;S>SQ*~Ym=h)DPqn}HkZZ$lIc843w_!yk{jCt}Fk63bYeM+q;lhgpf> z*d{!_T*MHMkjBR^D#|awg+{t>0JDtScN(M!J3bF&qT^!aX;BTnf(`nqG&6$sNI8U; zAHaNdKpQfU#g==>8l1=Y@Ec5JY7nxNAF7sTO@e)Mll@m*ll|8mZ7^U|O8hQOl=kn# zXg)qUK|{Lecm!a^BYqX%KrdCCg@4FHsbQfBDz}ltMfCdLvh$CQj2t~5OAgbR+wr7n zj-lLU|CEoHaB1RF$YI}d4efUeka=v?Iz7k7LEe)E!MtZ7LC!lNaE$Pqi4Vz^?oU6~ zWJnWUUIQ<=0>61vCb|o&EqvPixL+u&o+}h?8G9NX;`@^^vYt>=!F*`Kl&p4zG&QTO z<7R9$SI^S2L#aQR3V$;Tck?kqVO3y=L^C>mswkTG0W85@1@pc|=>5B1C=A93EUagH z2di0Nb7%m6K!CqCM(;}(>5b1x2VK$KC~FiFKrc5+x=g?RsB}ZG|H=~LsBW@9t;zHu ziW>)r_AM|PM?!~a0vOQ-*I+GmRY>VtLvA*-Rd7_xQ2>V@!i)4rEG!ye)r^ddp2a?P z}=??ntc(^@rSI_Ftb?yaoiJhvfk=6wYQeh}by?;?)95DZ;b zUv4X`;%}7V7S?5mVjkqUXC@xyg=W%uJ6Qv(M=|HHm>&k(H2_EjWy2kL0l6|#SPS15 z)D5mJe4^^Rjh;E)FEyU{Q1&{<6Vpv#sT|?2uffS?*oXd*zJ4H7F}n&X-6Is%2L22j zt3k?Rwh7tLmx7~e(?#Wueie|o)=38s#--z0yrW;0@1)}gvenGWZAH+4+GnL6dJTF= z;m~{XIZo<#^S+e^`aLwR+}3HX`30U}mKHC7{SLDg3eqd8<+CtlFIO_RR0jt#do`za8>9WbCFKP11ifWy~dca4x~_kN~Q@HJM@(;U#lxYuJxN$Jn+2*<5@fx5rC-%|Q3(JLA& z`v-d}PX@3G00dqJ8dg>QisSX@AD=MQg~3P-9KDE3$o4IC+(1PQs`cN%e^>ua8yHpp zapC%J+sA$AKmC8E|HPg!{dfO?UiwcT)PE_tp#Ktc`_z9=N9aGt<@8@hxc-~}e~bQ` zcy;=(DnkGLI-35QAEy6W{zd(Vl>VR9f4B9i|0c=$Z)#Bg{p$Y({TCOm|6&2e=?88y%}BqsT$K;|H%+9{qlcB|NSzG{+k=bTjOok z*4n);a#(l`2adqlT40)(e)t4~c8>yWzdcyr9^L^7N7B$e(P6qr$~aZY_*f|8r=DR! zeAe4leAo{XHbeOUj7zVE_-<#qRZ^y)N#}-i=r-uVrA&ub3pFStSM(#wPf3%G&`Fwf z2u8@O+{*F&)BOS)l!2jAC()BPFnubeqI;C>cvZ`>U9i87oFjAJZL+h%rziVUj zvSq5X65h()xJ@T%*4<3AZm*h=w$W1xsgh>h-lIuxAfZhM@`stKrHx6>PM1RDl2=_bOMmfK7}eOywpgPDrOQZ?sM#j3$= zSf3k^t0a$OM?jVb>jtBWE%$z*@r+@O0rL6~+zNw4cf#JLTPHzDetOM${<_O$8w9!5 zTczrm27X*%zXOlKSO_ZNTe2likgAYX@n|2b$mwIBoOF{8YGuDc53`J*mVYj4P%X1& zS5Uy!Ft``hq>Y5)et7=J4$I2DZ-%Wtr z42WF4xh6v+jhppS#o6RFyjiNn@BrQo&M#wn{8KL=7mAt!Cl)*A*7FODr70zABS2$i6I}gdu%BRRZ{nT8J*(Fs(Uz0LdGumRXe0nk4o3fppNFfnm$OttH>N38t!kglQiZxA2R}`UK+=+>CiL|E3ZWCd zJxu4y^v#vQXaP*%&p^+r%10EP%k+*Um?i#disb~5nbmal0FhNd0^2+o1mSq5fTdb^7=4mT>+1hgp}?znf?E zp?|lIzbgG(vL#&q&X^UUf9H>v7=Ox^aQ!=ER#g4lJu^c8n#TXH>EGg{tJS|dAL^xl zZ~ti&{d?0)HHR;Lfhtjqv_v|$4Z5oH~%~O_h*}<=-(eqSF`Zr zd#_smE{~=iXUPa&zqxPydofcDam?O6^>0)Nr;{ZJ2T4@@`$y7S|JG(k)xQsA_Mv~* zCil|6E)uTVZqDpm|6Y?BMgNXf)k%#cs{TEb5ncbbk5QF9QrD;c{Zm~yl3Oy=gno5T zr2btVu74jH^RMXN3o%Up-q0oMU$>?HeYXD#34)^o)T_^NR$QJ&-Ji+o)qd7VE1nAJ zRt)vrN&or<49k*nq%x`f1TGj}9Lwhrdq<9&Lq9Ib{h4GVWQgvXjxWOrG?{-_`Z6YbL!%%51DI{ za0KbRX01{&C$PSRBn@*RVj_i-4|KZ!yoNiFtUlqd(D%*r5%lffuA=YD&qvai|NMWR zz8SUQ^c^i9is2WA4jF{yEk8vzDJWTOWz$o?u)*6ej7>OidwWE`+bB*Cg7uD zHa;Mkjj~d*Cprat6@8=$ub27kwZ%?!mS|O*Y}gfGuW)=n?{iGl2Oyb#G)y)<*uoxP zMa#BHRn!+dE6iy4nJdmk*-d8~8|1LaR{BYkWV+IpN7+EPD+B#^Q~F+bBU4lc*N`T| z1YfQOEIp~BpN>NMCAj-<`ji{Uab-ef)utX!&%y9_8w35}w-J8FxGkjtgX}<4!UQD7 zqa4k8R1&s7RjnhB2*Io8*X{;bwe}y^O2%Z;C=_H;1yuKF_!h}0=_)B5G8hksk}01A zCPs|6$HGr^TPFL-;nGmu2S)-AVrW~d+gKVHGT9&e1Z9TWE_eo~`$6uZS?N9Vwbwrs zI#PTYzxy0J+#33YU74OMLXFf9xJTo&?`X3BSR+|}dzj_7gY9X6<+nq! z{C41;#zIoK2U3V*hxa=)#Tp=!Wce*uTYfo`<>#9$p$CPaM7fRepJe&%pt}>Koekky zRku7)Em?lU@s*XT5k1!eL>0IVy1YRD$)^JSqzrW>KlWB`hZfW#*0t5AJoiDF{JMmy zOB+46;%>(%04F%AWTX)r_Et4LF%#1Gj2e-@h7qA~hfr9xLWf@qtELZ!jO#~KtQudXn@r#1eo z;+Wao+_@>_+}omvO*5)yp;+4U_29trNAZF z{luPx*GvV+5y5_ZV@&$_+(19ba?eJiTU#pdtpRJge^X_`KXpLQr~O{PPO!I!qKnByp&h|8+>v^kK=j!iT*99sFR)v}*$!DjT5khcoi@tnT+xgLS((=F>1EtnL{pd`zwVbRc%d z1l|rmeD)6k%RsuVq_r|9h@tOFwWRSwb0FWN!(lDGPHiZ{9W{Y(3oHm$ zIsFP%Vsx{)tyLHMtsaNCjHxyF?3ZkvEA?0p(qMQH7AL{lS#yfl`FLnTOZq9NyT8wV zuy!N{qY_G5oHYF$bID7Nl1?Z^k5sZUnjH9^X+P*0=B;eu_(3+_y55w@C#;VpcE6AY zX5e8hadczwCJcI28qjcu8Y_084jB9~=zu>3PzQibn220aGapCkvhAE;uf^D`_HD$T zAh6wVADes%uiehp#BdOy(f#;`Jz`a*mGcbuu_32KzM*fR8;>@jWJe$&_=Vk8rqk=$ z7yFK4sY-h4Z#`oAp<2i9Y8}rBj$PW^b?HO#M9#~@N(nq;m{hXgOxo2xqv_#@U_ z9jC`1w|T~4G#qpFV8~Es|DDL{sP;>voG_@K7uxmxa8&F__mg)7do{gnJs!-digG1y zhj?PsVUB|tP{|f(n76W(gGLN6S7%^k01$brOp+e9Z!WI^AvYS=5AhD39q-`T{Z$zo z&0dohS3U*2W)_THgu%`nZBUn){?aV6<19yE+F!O#QVIXWZoptiTZ6+OeLPCuJcCmr z7=7e9WlK4`t_sOU9JRG<%~E_B|E06dawsHwo1_FfP~JAO)xFywk%c~L#Kpa7iW^et zGv~1*R0IYZ^C*2~oCFy(=4R=TN*n#hKwQLQ7($NvA=38vQTpqLC24O*a+hoYRhg{v zak>?2CyuQfbh!cZZcVNIcY!$KXamXQ*b2SHC|Bqg3b&$2$G;$?7Sx$h`L;)F|Lyl^w{;*JKt<+99s_)!vdw~h zYp}AdPOOlM|HR6+Vr845vZ1W9z%qiAa?zD*nGkl7lGL^WOReE*<<&imm5BRcfL2vr{wGwOXWUg|7rca@fB@6!( z*DGDzWUhIHVp-YY4N(y6r`>I8u z?7igy4)Vi#ke2<}0ifeXAMb}k?AC3NGTyNgWt!~2)s$x%@F>o2x$}*B&p_|rF9AoP zoc!rXCa+QG;yp>mKq03762tIms&S&726&1u<1fGu z>hn2vIVG7zpH9YCKfMmBY^UD^DUlS75-3*>8-Wu2BPo#-jS?2jZhNz}kvJQFWNkSvf+*i2Lua(213S)M`OzrVp+%W&D6sA_N$e*Bm< zn0;KGD76`6BB+{+GPV;#*d|2576qeJUee1TW&rcp^I29SKLLrS<(a@4tql6VUB@EU z;dN*_iKRVjvxOvp4&$e}XQiI%MyG{{J27V|x>TS}}c>_4E06tA(m3UqUaTm1` zJqitGbU51DySs5Sgf+}u{kIFUh``RF?JF5FPCzCI_TXq-3{)&_m7UGb+A&l6T2@>` zUo}bS{AML?x>PWO!?PDb(~==10A3 z5bXz2?Lt!Rsrfq3YBB4L85=UD19CAF`dVCc5m5hC7hJschrN&-_QW@!Dg3 z*v4=Fu`kwg$#8~Z*}*~aw2%h<-Rj_7S0Hye~r{o{ju+Qyqkgd^PggvvG!a;3^P z-l9=#dj$SZyWz1qO1_T>{Z*wciexqw(+Q8y=>zNTFDrVzyH6-Hh#OJw{5&@=vCXszt%|T zJX-O;ZW|A&2=A7!+gOfhCr9VO&^#F! znrDWS=2=->p8C^ioCPEE6l36>xlTHfrCaMwd6+&?N}unf!>n>No~+_>B1hqwBmEj! zCIzn~Uy*?|hde{5`~HyEO^<%EKM>=+o`asq)t6$l_jzcl?>i2r`bnXr6QiZmgih#` z##GXYacwSr^PF;TtCMtrQO9?pqbH2QY87%53Y_BVM2sVqG6wx~LaLFxQ{?98@+UAh zDK@h%Z(KlEmp9)0x?jjmtnsgj@pf%oulJTGf`yxRldm+%34d=S@8F*lX94vNLEATP zqHjSmCZ*y(6P)?bEB6nF0%&Mq#BzSP7>5Hi7SUQr8z!-NnV8SQh08J;`k)Ss*aB<-448!)V~*93xW5Rl%jg z1muQom|~GqjF=L?4T+E}C2wb1;?;(+iJ-6=Xja*0MFndbe!H=aDTFg>^<@okdv3T2=`yTmQM#GS(`>gQdyC zkYMd9a?;->1>035wQJ!M!FCl%?V9;SSi6$hCurBXUxeCK1noNe3)U`}>I5t#H;s+1 z92jR)Fn}X8macqP)5|?D`WOBkwr?4HBwvMC%miG9?(Hy|(mbTcU+#k;oNJ}mU@ajh ztV;Q@mnpvn%deEn|9{k7e_WH*{(m+m%$RIlu`ux)CMGvcK`IiITt5@GYvhkefe4g_;pC9LQe!V|OTYc)&l$1ZY2SgLx#0!@Daw7<^$sBo}Fk>SEJ4Qh4W?A8dUk52b8fIKpC%Z>T+wD9GGUgHU@xXtkqa# zk=Sz7R`fQ5y(}f0gUNbw*CnkNVKP>m^|fekV^>d@jJ57BcuYmWW%S}IPB7XQ+`$1p z%+FnJ3e)Hs9uR2J{)h)?k;}CL0RUR8Bp{1cfLpvIpv7tt&|+ciB?2v03luVx-GCOW zJwS`@LA?MiRudW>Q2~J#Yk8o>T8_06pv77d(Bei-KB|opfEKF)0WHoOn>!z1|_4R)mJo=ixp?7^f_gj~suW#0g^!2(o z{Pi`e;nMW=qaQ=i*O=*7TVJ;hyj*=PRRq!32L^_uuagy*uCEG3ulm{?2=*)GUNe1QuU0XE$J`atL>@ z2*E?I$?(*GXAjH`vsgtby0S2f=(lpp;7an4=5ax*_ru5r{L5GK{PV@674tpC{a>fW zQ)zKd4ock&sZFO&oygfg`}ULbrd)+?I!Pgi*!{Gx^yyj^aIof}NGR74c@SjQf;Ps3 zdn!n!^EMx;{Pi{Z*>A~X6_=|xyFX5sMG~-~wta$3^nb?kxb2`{=SENRcJ6wfZhz>C zxMh@TsfemfX{-QgL&7p-*yojYe+iMc3A{e z>pf7I5_BV>ZSO{VDy8izhJKXt{WxhX)fvM6-8MmI`Yor$!}wytYJ#Wu-7;Dn&KHyB z_VpBZ^0oh+;uVrgX{^HyC;4K~@j8$A=W8dSam^CI(gWrO0dqE8Kf;+E^;tsz=3WNlo_mpOD20 zCGG9zKVNC`GY^2~6wKqNF;qs7*a@B4!&_92f0geNk-BVoj4_Z8MjNm_4Z|cE+LbglP zXOFM{ENJZJKM8L1U`KcVS(D@=mG!0cvm26p{U_v#L~Ck&{O8$^{5to+iQdkgktEVR zy@gW!Yf^Xr3Aq)~UA5i(XSqN1ViEN)N}Z$|MB%l(v>*4cvPe{=HIY_@MY$D=UC)z-?0IOehx8W>z57odL05vd`~b8mjIWsXz(kS%%;)=2 zmPmDmupjZEpfivBofdyTnidmQ(Qcu5AN*?$6)0r<|8$V}ug1rJj;ZhFKj%~l{&USJkq$qvr!*H)n&=P?((7q%6HAtKCMPlM zlR^geUq&3r`=m2pE49p%z-+^CydJ!jrZ>VK_opn3iT@CFxT$8Fs%|IoH=LFW9ew z^-it3=(rtB)FH~fw)Dp5Zpa$Zj=!gsBA)goPusBH`{wHMqP?Ig}eRa(l1*|_ODz+xY-FoU%?aFrQ9Kn z=#56!$l!-=y%Ep!G-&v5aFbvu+I$w)2W+@qt)yK?_;%SiB2!@>N|%BOWSneKK2iqIv(u@5;M`_Lu2QbBHV z?#mImDHHK*{nKPydeb1=IY$_q61IPFE*EBZ4G2>smnux%>WZ9T=nB*I*&)HyGYws` zS!-!?4RiU~|6DTrFOEz93D5q2BD4STboNiNFAN>yb;)G@ua_A=)hFcfmt)w4Le}ni zePMbWjVr?J8}54M!t!%2W?BJfLX2>%rC2!DVwCX5T4qT2V=ZGP^jM3M9&33O|LJ@W z|6_9N2OCOK@oYXGX8rMQeG1Il$+;ru&$#oUAD*Q83%52)mcuqvDSG|@EREQR$v*+l zny_N?oA(yeB(OZb<#lq>a`*_pb`ejT@AJJ3_#Qbhz^067A}uP3HobLk- zy;^NC;n>-oouxrf{VF6jCzoM4I7*>HpEP?LYFh@YN|>}7n1wt$_*?ndUdnib(ri|^9Oa@`c_gB3ne?X5?+v|Xugr# zBS>$Er1%oK#!X~DOprUlK7rR?j_?tCt_m1qjpMPv;Xu>N;c$R~MTQQ^>_pPOxo*$; z4R*n#DL_ku?mzEM`%XVhb(H@kN6vv5rpGac>G4ckudvH8PVX8Q*&v9$DLbx8a!6K(&OOK|J5D_ zk5{$@A3T1%H^jkX<9!!9c%1+0QU{OJ`~K*I$A8XJAOCdw6&^fZ;|WK%|NkC5GQ$2C zgU5Ia9X#%6@cQJDmSBU&%hKOz@EH9|PlHEUy|{3_!a zCz+kWmNlNWa+%Rwo@&JYbo-o9h6T~`982sq`Q}0u*ELJZ-I;}z)*{QtruIu0OoE}C zWz+DC^&yuM7vbUyaIV6Dt%gh#dAg8oG<`wzoSO+n?l>m(U-|fb^8!^48Ydz(pKE-` zrNAYaP(G@g<91II=-@7rbB+1QZtlyOX?N=STEB+GY3GWGy?Kq80Z;S|Eq#jpWZ2pM_()^EpK z+aZ)$gsp?iP2ku#u0YP|Kas~?i6SQq zb>qlBXbE_s5}V=iS_YCpY8kjzpN}Hx8KkrltF9joA>ddW`+i%Dt52K55GCb-TO#2^ zVvY=z?7{rkqj$IYK)7f#tnuIA_Byta%L}-Tu*+PMkWG>a#&v6NJ77Gptrmc^sZBnM z%g6zorzh~uj+CV7YxR8$ky2M7S5V6x&dWjD@A5w~f2;luk`DJUR!-$i>^YLp&NNFh zUBkt%!5hlGvP9*sQkI-{Y{G9xK%U6V^wYCG=L)QT9KK#lI6Syj#GxRMj;*1|W8@)U zd6bR62zksOauM=4^&x5P;H^E%WAfI}QA;o=(734Gl3|2eQA} z8}Q%ziNg4 zYC#!mh5l;j-d|Hb5c;bD`l}t4Z-DtyE2EF%)Gd@N!w|+b%EH<;dC=tq?fa+qpkXa= zFi?UYq_H+#IG_gT!f6$u7oNRC)C;xZUYM|j?*);6pGVJ0aC%9gU4g2a@2#?$JT6s5ZyZADY5}bys_v zLtuYHy2s-I?@hqH60(rz%aT8yR`?LQ=N$>tEOZ0mKjCuuU+%;Ibc@Bo^M6dte{8IX z=QhkUl;Zi_9tYuhxR~dgG0!&?{c4^t}P{3AhLiH8xYBn~dxsavp)qiL(X z#!v(&y2R#?hvx0@Gl!bB)EsoT_!@w1XEy_gBnA)#*RxuhB6!2`;Qyt0ao}*5)Dzzh zj{S+ZJ$4}2bNSmr?YUxA5PP<6_hX=A4R6nX;#k3+VFn{(TuHtxy}QGQC3BW6pjjVC z4P@4<-}0KZz>oc{p!~Fy2jK@*`|*&q-KuP#>_OYqbbomwfdSE@k=XOmNV@PfVqJzN1-OwDR5 zaF}e%GOO(twN)!=Vq5sRnib7DjiZBb{+cSe%Cb&$xUT{Z_q{JC$WG*NAG?mEC%0-) z&G9Y*TZC((wBvB*5K_q$|1XXsaN8wh#VH^unGdOFM5KObgC({W{GOF0y}{FbtyGX1 zTJWeFbA+cGBlhW{VVUTfMUm7NToH|75|?yQB*BJcTVP^4mQ!0j6Wa>2y4-UmutGyl zb+>MWc}4|XiYUjv?ceLjr3l=?yxb<<=3`ZjAhsxTo(%QhNW_MU{{crPOL6!0flWk? zEq53DXqX#|!MIh>u~MvKX0{B~ujZ93vN4R|+s{FZ-0Tv#vYlqij`ROH$P9k4?@e)I8L<(om{x2hFE;ieN ze%qB2D1Li&!xfI-=57e;PhKbs;!o!K`x8fbDDh6x=FsAu?a6^0$xG`4#&4DDgYr|g zJP1FP+x+;Ehs2L$TWI`zGbRu}+t*#4_-)*>km9#({jWs)c1=R?`0d-IUIH2O8&^7h z8~0jB@f+k?ziRQDE3p^0m#w{;@mu-Y!13D~OM}L5tE&9Wp|h;#_>HN$xcJR6IzasP z&YEuVn`2E-dp@=#h&?;D_%V=L7HYh6sxstw=bF)h%=)W}?(v&EI6sPD{K))zLith1SYQ@FGZ@2zk6u+%|S{%PsEbBdf+u^*#_^tDZH|UyoM2O$2 zKfi?dZL5gXs3XDRx33<$qVb#Rb)Wcc_)5R{?d6dH;tqFjtLfF2ZqOiQYn#$o2-{hm*~%q9F!1Mbun*@1r7;tpJ8@ekNckToQY6vMHaKOvY4GcM z{7Y$?AqAv}c5I*qn@pokcRhgrvam>_RcIwb-{qtGv1M{mhKATkyE*FYKB>~wKu}r` zRCFrN9Uas>RE^A&<<2S*LAgeIeYW#O+?b+=Sw`htCLv{n{G-`k>g}8Tsm}!JEHc?P+y!ZS zCJ9Z9)o8~$0X?Ir94NOlq=mSyHp4lA>|X-Xe0M5)Zov%{Va)F&l6XeBq@74&CTZa# zN~4Y81@Ia#fSK^m41+UFX#}_s-0r*J50iuO_Ki>1fA)@l=iCD~&)Im~`}de9tDLt> zygeSPDS8I~3A;=5fkLCbQB2MB4g>+QeZ!q_f4yiDW;zgyU#)e!oiRcBVBVh@#(548 z(*|d;()cRnW*+|UNz9Q*I1%P>=L)80(f?BE%bosjm@PvmtF*vYtYP0{&aU6Ea4JDK zIixL=uNaLs+_?^lo`RxOkl;f2H41imi;D42&sR`*4%w?U+2-{zL`hu=Lx*HR1dtgo zS9tb+61-uIFZTY471+Hb`0sxS_Wb<{Uwa<0NMz4n-0N%4<)wcRdtUJN)wJihZq%=$ zJwG@6O4#$Mn}08R4twiL+Vjp$A=|UIq8EF9XV~T2^RP|7gFWB&K=1baV0<8ZRxRt% zo@1?55bq6G2J;CsGz*(F0)V(FM!}?!2IY;VCe1mpQ6tod&7C`h7OcbnYd7mpn$GGY zn%I**>z_lhcuV2{EGZnI+C2$F)_sEeDh$$&$iyfp2LS;^f9wD8_WaRJlv^fgk_KCt z;44r|feJjuEh=kpr7x~a#k3#>QzI$pNl}lEeenlSGE^v~utQyDKkCzS)CY|E)l@}Dn%bt&tu45m1R4m)x>Hgyx^5{@o4xm&q)FP8mLKjvlOOYa_ujd`XXg9v zr5X`371I}wVxpwN0Me!?krbBe4z43yH{Y+|W)T!^hM8~DT2SjKyMV>wEANHFV^TpZ z;c;D?=|Ys5e7~HVW|lO{5@juped;|)rBZ{B#Rv!_63{;*YAyy`LKB=5yQ+ij(W_cG z9fyfV=k(_Pg$#ZNKB*JGr}p zvtk4+8j*m7aRkgBL_qd*sjk&|;L-pQJ>Lu`DPTO1yBL|DI_91(7c~nM9I$Lie;ToH zdV-hX`@v(s0#iU^Qs9Fw*aErOXW{y{86YI{V2eiqx&9Ir(9+@L{qA1|U` znl@Y)AWKc${b|HsJySEpU)5H{`787N3I1y07V59s-X7jxWlbB#Uxiw))?Yn5?F#+X ziFZfuum1k-$o^`{v|;^KR_jPAd6OfilD|$#RLLKv46=zshpyBnb{rasfbE~f2-q-) zfaj+q@mB?hu8+U^^OWo1ug1SKqQ5$~GpWDYcyJ`vRDK#`&6C-QtSQPKB%3=AUa4$; zZ5fGxzPcCzfys#kv`-$+U+r$WCVz$XR7Un!JKi3xzgqeBX#CZf?4aSa4 z{tEZ#$-=8~mQ5CU|5q4WuBah~9A2Xc!spD63y=mUg0d!iemof7gNv>Ko-MupD9!)2 z4$c48$LD{!iTU3~e8eX4-=!(CO^Og>5huTeg*0<0TRC$~^dB}4uY_DKj`-m3xU>u< zg?Df^S-hma-Vv1)HjX2y)lP27fPO!GIncM_h$R5pO!NN?+r1G|5$ubJU=zi7^5r3K zZ({uVua}4LYu36rejU*#@axUW;rJEYo}6Fnn?~Z-x=)7Z*Bjd}!>?(V15Nog^^@fM zx^Mes`1Nsh5`IPWRTHoO4-BubnWNPIcz6#MTmw9trvAdWsQ=aUc?Jrz8-@9Jc~qF) zCUNM;H-#8NAbGymP^sdbPX}+{ zd!OE*QoY_A4h#7igIM_dmccBH(#LH{r0>hMarCiy3H04|S^Cz$dG++Q@`LHa?a+qw zT!V%W^y5zF(y!mi-Yj0S(rwh%A?4eo@fxydrXa11rt76MalQ1mWy7wgrsv6*$<|Zz zo^S$^DEypw_!&5V_VDoO`P`)NZ4cFJP6PjE@%TPuFD{Wa*3!2-b+|>$P>;g6gNq8I z)+ElpB_fPo5Js)IbDgx)Yc|$bEp5bZJ@we&BW}-^F5G0;C(EnQ+OznI3=MIYDM$*| z!_ynOxm0>qUP#XCE3|8V9~;0pl&ee#RQ%h0(F)1x0{t3o*>M~MLnh~*uTfT5Ojt?j znPS|t(k_eC(Bjgwyf3mMEwKEuinkbuh)I&{DvIDWqLbErRHQnzyh-G z%ly8cM%8*Xe3knGe$|{pnO1k)+i^0`)0n@0reF1h4r?O#S~g$W`a79y&4SL0y!!@U zi}x@cQSrUsTU3N^Vm^cG$?Dqu^(Gp=h z#Z7CX(%K2mXl9)0>=d{?Vz3RlK4L5HVV7ty4&-lG2@t+<;#=lRD zuhU#)`vBLAMTHBwzE|9a`yiV|)21Q*BBHM^ZMuxUZY+z_*W2z+(AVy@@%oCrXi8hJ zv%dE0N7mO*A5E^W-+n%j>TCO|IDM_q z4c1pZH!6MopGU4$U+WGHqpvR)538?#YLxV~^E0ZiyDF2!55D`M*kTYfu|jPB#X`6Y z%*4rs@Y;-08SodMum3l0Cenfu)!F8o#Me}`G;ke+olJ{_&|EB%Js95l_{KTJEiJ?t zQ;7>PGG(L3*LN4ixA*%5d^ab>H+cxY9UJ2C+2+>w;`#t_1L{{R#->V;xf3^AwhM+p zv*C=?y`i0yXVlB)$Ye7@5&7stTq=JkOx!^=S&>N!vn?jGc*fHo_8YRXl!!+#Hg4|6 zg5=zNKBwaKs&d@zxJ)3jlqCZT@)|s!?}C(d2X4k{$Zhze^Us`2w&VN-Vr)$D z)a2jg_hLF_Y!`bJAk1$qFU9Og4awyB6LnHiC&IqHS$!_}RENmQ0QI^08~l z;_L|gwy-%nIF2mNjQ!4RGVG;o4{&u1gzVQGQuNID1o%-kQ?_A3m71^0k*ys|%5(Ib zxK@?RiF8AH;Xo}@`z&Bl9Z(r1`f3kQr#6FgkGJkt6jgr4(i$=-cvuhIgD_<4i?~*lF z9j}Tm^(Tv0?+><*ma?mlS1)uXnB<9z$xL$V`-9_$rR*x>)vw1B@V%H6U*-^e->)7V zujZ9q*LaowbbP$pxL_b&RhIrH@#@rGDPBGO;kCu9f8Kx1@ha<+QN*kN{OStg)rnOJ z@#=q{kJ!TftA@ra@HcfR&(c-sZyHg1D$jsX?4Jj>q5LzgGQkR-$FqAkyPJQelNQSf zM1;`#?}nKyv)`B#@jqLfoSGGkV(+kRF@1#3r-U7+b|x@j5C4prWm4 z^R3}>xZym5UZE52DKk-8?f9hjFsDrE2_B?F!3$cQQt6@qHl#7O)?~mvX{%JYs1<9f zSMz(O$6+S=9uHlh0);FQNiyOY$dL{53oYczt_IP@Illv~VP8NCl> zN&dL_0t_}~{RQNg3P$LT3)T?fG{RI+&k}4>C~-zj4BA{ndPgtlatcN%Z($p!7uQQqbNyN*SjTk9~waTWD*eHrg^$ z=gdpx)Ol*VI*3QbUb$4YLwX{cm#;0G#IMWHk`BND?1z_WY zrTnMvknUcvmF zB=d6y=3}uGGrdHkh7@b{9c5Ms@T7@Y({mg2-tu`h$}FK!OPcw9d=!?NyHry%cVSI> zNsV$Tb%2&4@#Xb$nHjKH#K6Z=d~FT_N-dgKANj^CR_d^(%Hu5{FYz$k;9)h2iQj?t z*uX{5sv-ink1SGxPpNLPPa^r5AnlZz{A~Kj?c}5VVVf#=qtsTC_FJHxC2i`*Q6Ea+ zG`5uc1HOO#?$=1X_CmY{C$?|E^6F_w760%&T3$U`-afFrqEfWH9=F`CK~zt!qYvR; zdoI>nMN$)ezP4Or65ln1!?aXPZobwe>OT*MqpuQ>gN51rJemC_QaBqvH;xZ$$ivwy z$oQK36@r{`DBL8~65rAe)To4UI9Ir1^N9woe{2MRigCzSNRu*?PfFB33L*zd$R>qz z5JCo$kQ1Q*u7VX5!HED6tIMuR9|F^w*iVLP%(qXdwKlAXE8g-fvMN$OXyE)AI3KB( zYReQ~M3)bmWRsQd0uh-6d3Zu69T6QLh>fo?07vJ7tMD7N+n4jAjM=gc7chu&`$jyh z^PTgb!Zh)L3eIoP;B!ssfv3tB@GsSLPML4UG}0C>DuRCj+WP5!uTO+*&m{hUU{&)9 zWmuI6CU;&q;p#L3W_tD;IJ+II@rpA07(m5oZJMCMrWJo4CzCA?D*RrU0%N^acN{;8 zq=%h-HigMHJvgaobX`X3u>1qFC1AmTV35 z{mU>AP|P?iYSKUsxN^;HG(FCVHGo4Q-^`haK;T zN1_QRfIv>#iHDn{>tXZCl^g>{1viGxyNB75FUPemzhG@_;sU6go^d>^WE{l`#^KT( zXB(7H|L`?47Leuo4_w8!O=W4#shq{LDB!tlsbo!{gE! z00t+DA)V?ArjVt*ENNg}T@33InUo$jEyOsQ1cP+^g?7U2u2J5In9@9eANSZ9dQ)2H zERlgf;jB%|3aN{iF^(2i4jc`+i@0V5;y(T&UOUDprSc|@-QgW+OHXh z8`uEXtYn391u!_xAOdzZOWg4S!F!SY5w%>bCN5OUCaRQ8TuzURk#ubV)wKmw*HY{S zsE#)(u*s^En_w1m`ms`Fug&&SiV9HwwVKW z4wVY9je>B%TC)WJCq-`0V)+y~m%;K26mo7Xd?@AU zb2fuSQI^#k@JrreB~x7{eN!;irSw~xeSB9*Kc-o%P@zWTLW<5ARjtk4ugVhl)M`Q1 z(xrZi=mV;cGh4=jr2MJAbH}=1^QQ8CuL;A!>fjtYrGQp{xPttU<Ji7!CM>5^IwDrwBuvH21l@|th>DyBOG0ZzOewB71_Ehw%JaFnn zgRe!L`wW)q^)Lrqpxws}eSTFtops`D=U68W>H?6Er}&({fcF@pQ?0)li9@_uelK&t zkii^iN%0g}{8Po17x4{ra1rD&&PCo9zbZh1b*g$JpBljLb9Jgj1bxs4|Hu~7Z%!{z zbK>-gpE{ikpk+w(CT(smQ)x>pR8%aWvHDf22+<-=NX!Ew1f(z{84QZ;jg!(?V={q| z`|W+sz4J&Wk=EMpzt+cEVP?*ApV!{|>~qdXwXeOg+M9fsoZLf!Qw{1(xa5gZ>j;`!SQoQn3OU*z*cgvQmiOm`8{SKNXJl%0G_ zR`;W~n+^_fC5+Ao#fWGhjrS7C2$JU`p$g=XOBV;^VAAz}5iqz;0HqVMfB0t=k(8iw zcsJaTFX*9bLCrWR5>4at1yE(a<)g>`3dI49@u;+bawpVAGzeKa86MQ3PQ5f2Y!cb${f@rWp7AOp;V3nNOd{eT=@=@Wqe6CQV z^2QWRr3!;LFW_YY-f=k2Bh<7;P#cb*+Hl*G7Yd2m5F1|!N{$BAhA*%iy+m!`E) z!lRm5-{~RhCa8klR0D*ua-;(@NNyu8JWU7hK(Q^Hp)$Um3iy0fB8YtM2H^}s9d$K6 zmV~0-5MT8+oqh{6`Ie&~qK9_GsvX9BE|v)8@e&H1OcfggGcrQR4kE$8TKbW!nlQ<#B9c`DYd1C| zS#JnQR!t!kH**N7#D5P;(~O`red$VcX*vN*e5a5~Q?BxH z5f`F2uNBv#Nvl$2(-ji1iJxA@jL3yx=UC@lZ`Z5>9_d(4#^rKX)TUqjR)YfH zyIR+>1-~&6(ef1C)S39l;Y(@kJORF&-plHS4hjfIy_b^fxUTlXdlVgGHEd%}&aoVG8oj!w6;=7~u6Yu)TRX>Z zwj5)P!co7$JOAl!@0h0-S&kRB!+sA2A<>&>ZaIWbx{NCW;u5G)7?tsXWr#6bbB?yY(Xyh1AGHaR1^`r z8wU(15pM-`Rmf^b_70uTGaatG+qXk!xSl1eo9VC{^OKL{=4;WKVj}~U)Cq$I-9mr; zF@UECZHR=V@~kz>6`QF#hAPQ;f0LB-h8?9`5# zY{KR{yq93bn7uFo=`vON<)_!9oRV>~f7ea7WX22&o3QDW&$Al19bI`#mkL%R=uWCkZ++$$U9dTstN1)5!a=0y42Mvu&t0$6gT=c3O~2WklLgOAQvQdI`DT<`hH$X}uFD7?IyU@suUn8_M9 zCrd1L13|)4`FbRo`hky_GCb+QM|Q@>(MzaF5OgD!q4GhIbfu zL>*`Opoyb(~NG6Gh7;VnH_X{8dDGry<#>(>;Woj_KyA$6(PG zCSlWFzZpoN^}c?Uys}ssoOiJ581via?~UFy;-yhHwJQwQx=&5ny?N?7|p`@&5!u)Q%sx2!rjmiLMSRJ5hK#7URhi zk*-6q*s(A(SLuWre;OpmnNs|P%<_mp9`+^9i~#ZBXViE{dVXkzx)Cm`QI)9&Jqegix5wJ;pH6r6s2#zs9;+hz z`td$5*|87mzz&Gm)whscy}^GI)Pza?hHVYs)EnhZ{qGQf(e5 zWh~kF`(eM!40;83LA{cP*%A8Ke{hKLA*BKSmqxs{MsM!)jCSXb$He6P32#RvTMZ|> zmcsN=(g0Hgy;eDo(rNzv^CC@*UI`~dK4>b>Rr*StmWzbSy^lDRJMHc?(Emf zS~v9@TUkGw9lr0|U=8m3NcTSTx!liJ;7kAZ#$Agd_CJ?<*&9XK-dMCKn!OPl(Q@pX z?Tyv8ueUe0-E*z>#)Cv__b(0F8`oh|Dv>>uE|T{@i-^T>Z~=Ta#cW)p*d4bnieh(s zv5(y`tiQO{1X#entk$NO8F%BRs@==9`?3P!w);S8kzW<%P^u3^Iccz`^88UB6FC0~ z>QrrB5f{E2rb;FL#^hcV=hk_?pjsAcU+uHqP{5{($V9rE2B$zZ=f9w7#an{4c-FRI zc}!b<ovM)7~PlWQw{hMC+_p)Q1$qF*E? z5cc5yk3AtDQ$h68DmzS(My*+Vc##_4vszolb<=>?OkbXVBE1EK1bQ1xVDZ z%fpCzZGl46pKe2<)*w;oVMI+*h&p+1e?(cL6V-kqI#Fw6qTII;qGAbABg2Ur6(nj5 zA*uk0S{O#uZ|;(bO6!-XA<>Ci-49U(GEvEtsP!30)RUQEL_L+E5cTt^NYsN!)ci1_ zR0>fqFX@k{tGcK}9X%eMs0U@Demj*Akjds3`alDEwlsOd+4w2DB{z+UD5};)VG# zZEcjcu^v=p{J)bR= z>#3Sb>e&>%p3m-pdbBIV`0#pej9$;sX!SUidREeUwneYUDc7@m4%U-au?0u=?Ku(k z`~d1%ZC@e2GEc6jmDV#0>ls1osSv+Rp@TOMGU0t>`5^$Ed8Ml+if2vuE8!k|dL2eX zh1hjFrD0q+4bvlOco1k<12o*!iv|nQ@JpLa!;c4|p(bOZN?b;WXui7-5e}J%-@#Om zxL=-DaT@8_n@;KJ5YAJI7R|haW$g3CU&El}@=PnNb=W~oKzh^)Js;nSY^-%&wyiX; z8?uSdUy%^cEL82)>3D~Cg*aZR7RFkOc&*dZBif${?Z4M<6mOavIo6X%`|p*<`p37@ z_Cw1FLzst5$0lwxsd|rliCpiicSSZgg#UaOzCUOs045g^gW&SiFu>TmZte-aYQKma zdoA>zkT+3qCix4Cx^80A1*UbLU4`$|CkV$)2edW{=h+9YNqK|PDxC>~@bceY)Rp9T zFa)pj(x0GwJ-(42Dy$3hI@~6mnWbf|nB?YHP^)d5R~BDfZz|b<@YXXz1+0YvOb4t( zK`ZajEnMjEVBnYKJwa5gQPn+CvD@*1|1f9LD1jOp7zH%yCwCIBb-mv&an8gzg7R<9 z%qEEP=pDhxTN{Bz7RID@WccGC$qxz*cFii_j()sU;Y|FSR;4n%;O5*~o@o%;Tuc)T zJmXpFpKLkrOeHt?V}%pqi~I1B_G#0Bdt-!j+KiD9Jf$2l78>Wl*JV|N-g@{6zvh!) zrV=#!TF#qXXt|%UKu5~Tg0jjM#FR31P$Kp#!4wGV+0)XQv}6_%H2GuGrm&0Pi;M9A zRsK6Hc@545L8{`frKA*gB+~cP%Y;<;R(^y~x?f;DCFeG;tkI`r@T|wbELiOR*T244 z$n09!Skx7_VItBrzEtmGu2|Y0)nQq}iFx9%V0|-aeY;A|p{E#VN_FZvn|+XrahHxo zvkRYBZ4<{b#JVG#5ZWM@sH1t*^B^8^`W449Y{DkvOg3>-tiT-NQ~X-)mwnW94i?Eh_FjIys|4fa=RG#@Qaf-H51+TopBRiGZx6(Wz9KQqu3TEASaTbq`j=W? zF17CEt>wJ6qLw*C9xhtT1uf*$p&e-5WEXJ`Y!f1&_)1gV#Ib2@MgqyArv@YH9-UlP z-ippF@^*@UnF8xc@ujY}iKD*LgRTV-yq~7#-$JvKcn6RpJ0Lz@um?C*!FE(}3Li!( z&J%kW`Z?wf8(>`=E{-WsJX|LmWDnO}dfCJEcpdd{{T=GzdJrTiV_ud>8O3gad^+A} z`4qwhc2}xfILJh=unh|rtlKtgGyJA~>Qd%qf4pGz zpf{3jFi529vfO;uw)J{}d=|UeC3x&3K!=nKE4o~0D#lAm+$FWc9HO~$RzeEYWD>Hr6+NL=Z9X&?%JLtmVO~}f@ImwX$?3ec2zoL2>}3nA`68P*ZzgioEoka2 zvMm+wn~sVunXXp1_;5y#)PjdLPzcTerJyZ5vn^N3isQQ^^oKpa*{bdy-Xr0-f`+ zMKGb6VpGW{$j@s`u74m{qcUTPY3|Yz8<`UHf1EtxtiQ`M$He^+J2o_X7L!Eyzjzs@ zwZs%dLFZpoMpp;5`UPyoiEOd+5G8#Q8S2tty+)MnT<5V;;+9-O zv>-|RIaCP+{OCp6#@~_(952Bu+%t*;yf(IarEn<`S2ac<{k%`9*2P~%SbYl z>u&Ui?kr7iv!_5ow?esi(*I9^aMG5^7)u**o7@O5Dq_qolqh3xU($R1<3ZFh4Gg*g z9l--1c+3_`-j`h9-rv$;*B~fa>6QVXT3bn{CX?`lC1w_Q^j_pWKFtEwkc`0w>3Sh@6Ovn0CP->Lj z8-rZiGt0ov;@(BUsbwvkvqHULI_B)Ch|D=WT299xDliVbFVRRAB?0T94CPH)#5^Hy zJBhSnq}3xZqPEh~=9n0qD+l*dR;XtagcBL!JnGpOa$?vEF77;DK4w(Yw-kz4( zmw^J>3R7`Qr@{hoAjD)!$1LGFR7Zlf&e@B51W>N$QRRA+a016sY}iRTRQo|v{Rya^ zDi%eDh-#8#Dk+5uaIj1zo8cRgbuAmPQfmd*rGCuGIS)Z!lw)6%moa-wnY|e)Uc%1^0duziYM4V$v#;=PEf#vDS4HbbzMr&DbcL>rZBqg~`T6>Z}{S)-Flk#4EC z$S>H7rji%z_LPc6AA_QcKLs!BSjl7V3yWdlR81Dz?D6CbLK>k%Bn!4p2#`y_XxPax z^BDSG^nCRD*E%=`4u$#{qQ{vtj z!vzXA>A3tcpdv1~#kDHQ2A`9Jw-Zb5lZZ zWP|6SUq~ayz#4_rt|bO?<6_!G6tdm2$!=A>lErSQcnbFn(^Jqy3LW5TMRA*udP;Db z=R&8~efY`A%OzPx$Nl6GMSK+sf<2AJZbr{7F}I<8fM;8CK@}T$<0_qONPB-?xFJnq z+p=@S2hfLhwb4I3Q)m;HKpV1cIpX5yC|U?(EKhP+$&rY-a3Z3xLnYs{8^ql(#ykV% zyB+a2uF%!0)=*=L8*>kliyWFM{z@+J%FbYcpte5)f|VR;h+lq}7HTR<#EXgs@$TLQ zZwf2;EmDe+96X2>kkxJgqDO_Y7}48Wt!i6Xo+srzXo44uG0zI@qEAv36B2MJB;b z!N1YS$K^;u>Lxkz4LRri&jyFbY$~n*iplAKxcp{$%%8B&7hk`X+PI)i!^m3z$KXtC zPM!FHC`s7ggl;HqFeRKA^F=e1VK)jM@yEA@&C2Vkg$nYA8OHz8P0H!WgvQtZjyC?5 zO!0QP2?u^hn?QP~L^ubz)>@FdlXA)2` z6oa-L<&d`kA3s%mge&jTWO=uq$dlbw88fD|#6iBK@h)WdrDU@${Un9h!^pl?JU3R} zq>l>jf7Lc|F$|s%;GJ9iBZ9#in|PE4!@h&&#&czH{CEoFnSdyt;fEI}gu1ojmcx>? z16b$fQz^Xl9{f zH3U+m4*03sYy*xiKC=WRIiz=%momkW)LAaD(~{r9TFPS63LO)Q-J6qu@mgvgy-s3_ zliQ#=yEQ6x$r*g~uj`^p-8qM5dT0 z7g8V{(W?A63Xe@3FbSw?Tf=}aAw>;S3~Jw2ppQVxjD8OFyshHi8ECk4tGKm@PI?-N zs$twJfmk{e-N-QwiiOlfp-haZIJ{W5O|0HYi(LZ6-gYXrzBEfn?JylKp1zLSC>--S zQRkS&iLs6pv`ZM!F5yV=&%og_2xpVCrp<+!ek#>%sU7X^^k|uB+9PFIg0{{-LTeTM zX05f+A3p~@6~w%uhSD>*9^LxqDY@Ln393!Q3-#Q_B-LZ_&`*{x9@8a%L4a}6DKHLC z^k0O>Nu7<`7*MMY(YZ@_QP5_Z3!6sCe&xPh@aj; zjMzr;P7J7p$!wDokqwPP_Cp#%Hr7B0hiWM85tXH(@prSpOiQjMv^Xd5L=&V_biGN4 z41k(8k(#EV7pJ8*@xT@3MguWO3%KPsi|Go9&mv$qmTSc}S|^Z-a}q-VN)luOzpp0U5H@yC zzDxE!IN~=cqfIv3m^@V-G1yt?7XcO&@WW!Xvpt2cAJFevK>gS)guacELSjxjAp}dZ ziO*jS-SnW<(s?OYC=f#ww+4NmPEajdnk~LVZqJ{FJnxZBBIujb1*;n?=3Ut5mRe9Y zfhWk9;KVmjEntd4(IL^GY}SlNWfNDgfD$OUfaS70{g;*Br>rOBvP9ev${ebO@uF9N3QQ7u z7iK5$S#DR>#bq|)Wkzu=+F%pcwn*|Z4n5$w@m2YNV+d?ln^cZFIA=4g7es?{HgId} zREW)FCw_2q#6Oe!K<1gwYR6!7Sqs|}#_db=$M`(l5RH5~67RA$pcCOKsX%g-n0FI7 zCkfJQXx(j%PuAw*NsUJ6=7L ztz%=jc#S_>$g4wdapI@YCcF7Q$UifynZQ~V&6vWb3MXdfB*4DNRK{FN?I=}e_>;nt zm=5bw$`;4$7iwv$*ivIkhULrkO+sFEua;%rMq0*Ah-g{l`58MQ(%<5rM}{4rsp5Mg z{4Gu#{lCQD;w_(joBkFXFa8((Eou({=lsV07SV6df5+eAo1gwC{VlHj^qT!G4*KM4 z{4LJh4+*}Zzs0|N@?Y?`_{tA^zSiI3)R%(%uGfL?uB52`785HTM}D<8Nj7bSPQ=Aw zVM}^SY~+b}tC%?o7WFYP9`68n@YK<+S*cL*>fRnn_FYEKezMGamRUZj zHod1?SSf1C)x;-K$Zgm~Izr3H`0RK?lAvt=Pq-@@c9O)lQb$XD{46cK5cIPou&w^} z*fUqu?6_o9xh~-Aa?Dgq6FTmNQduwuY#AM4@Iu7&VtmL%GJv z)WCo!LGgR&F~J+EFmGk!k*KUzQr6tCp-IPnkBuAGQmhJjDeHUDLHuH01TW#P=-4c~ zcBS72qu`jxU7@bYmPK=X_1lk?;TTT`cUhm&ee~nt=qjyXcB5E9M`|g-My%QzIW{9P zu=VGm-Y`QZe?##8!|q-qGj{7WkIZkwN`2?|QAXy}(@{odO<1nt-}f1rpU@!UtL%MM zxUaI}r=t(g&Dhb61B^@V>mVt9f~jumuX9J94|h>^g}Nw!;molSXNC>xPPq2hV|ndFA1fY;j_MyPve?KzfBO1s^N)4g|McJg*X+-V zxjxcglwb_=7uBfh3HKM(Wgf_1l>OYl?=NcSb^ZE_3OS29{$)?dSyag1lT8!VA5(h8 zt0+IWUx)X}4iW!0`-y)WH}P*%jg5UiUTN%poA{}8HRRuBzi2d5vlz-B8@VP@u;re< z{%z`rf15<^rwL~2zNSnyi@8L-+th`7w~?bw2Xraq8LT+wcwg@}X2rXW1-;ueh_9+~ z!`~F{-R2^OzkerM@h|rAZUY;apIwJhbpq=q-fa#^Y;~A-n=O8{el&=No5_B2Mxug> zZC_LIH|(#1#D4wT>S!UA_I=;3M3if15pGTbwdldu-zLo~xm$ zxd;8*tQ?~Fw+TC#mHpeGXPQ!`T=YDI{aJ19-?_~gf!P(sxsAk@L!L}gBVg~A_v!e3 zHgRwwM(vE|-Dc$H)Vs~ECJ^s7M@IHt;P%NCqyLKjA4a0Shd2402DClE?(@ZBn0GuQ z?kdGkXG1B|%B^iy$q4W@OrxrdnGnKWHk2_*rOfC4IQWlz^$X4*@^Yh?UWQ!|LR*&} zg_R-Z5IlBi#6)N?QDq74asx@oHVRpd^k&aR^ZFRqG_yZW3-YCrT|`$OxeJ6~oBUUah!y1nC^Ru3`=L2mTD<8B;W4V&5H$ci4hB#>tG) zahC#Ryb{kyHp7Y|@WsLsv$ihuGlXW`^}Pr*<7$$3+)lDE;zvZFD>pQxx^slC^fpsv z%)Y65Gkab3;i62vIa^$AghFZ(bD!)4y5F?>V;a`Sq5N&!W+aQf-eL!#aBO0~S z^sqZI26NlQj6VPm2;PAY{|7m_n^iO$B&g!*B&G7lgO%6D_o_T}rB+ZwwR-6$)%RgQ zBP&&7rM>amQr%G+x1s;?3owQFvsaX&2p$h2ImG5kHxZ&419u^oH#}Ua)SHWm!|-Y$ z1<mLt*h`&Z;UAs#4B;6$}g zy>XO}F(4ajss|kdg5aC_@iw1VhGj_dg|K3Lc_O~${jHi{#fbE2dhYxEVa443es8Rp zm1@L_N!k~}ib>el2P@`wHNlFxcub& zn=HkOF;J`+ytd`ib^0h)Ofz|c@5^fzU#X77im4?JW+_%oK;0KB=C9GPVtl$217OAY z=AQ`o#+(RY#rP6^dFB=uHthO-uwvFxtQa4`it!Cop5&+cJQQ=qj8!g&a^-6MzU3bW ze3}q8OFRNS5XcgrSHXpWTE(}k`ryL+mEgiWt>D63-y0WZ#F21Z81oSY7sjV*T5I-2 zAi~_xA9eSC9EiYzfl+C}2kpN3odG&`a5zNq`l=8X%nu0`%oa`AxFZHc zZSlffh{xWs8+^;ZYFdT0hmc#IBA^5f#37?EuR(OW@iec&em%&M_sF#KP=k5*VI;f( z*8~A*IeD2hgsV0*t!uK*LL~J>Y zh%Gluy%1X-7#t0;-m?g+UiC`dPwddfZtC8nS@!Mn&fvHl| zmA4OvP*+UF*fXj1uFVF5yyAmoW1-*1wNT8Sb{y$Z?*@@q+HK;ncX4Ffaim8!{m|^o zYW0ocu4p#j7sdsDItOaNf0$z{OQID`eX0*h+;t}olnUD0kH8=iNl zZ&jjyh;J3>nS>lTG<;PfFOi?@@^2&7=481n4rnNd|N=m3#$p zEq^5mBJo`G-sTKnmcbXJS{wp%i9KK6@@rgiK|Gg<2(HjnXOAr?zlUITWZ?jbuXkeW z3@}`t`twJD1*Q065Q#*_aR~vrBmq-Gw2?X3p9G790+Cz*9)VCF6Jb$e{EhDt6c=+J z6qnPl0}t_06c_(k+RPAaix06}Ob3}E6t-oMw;ALhRA4pNSIPMKXe1%H_&9UQh8Le= zO(D2KqDUZ+XkuwrH&xA%!Bw1u1lLJ^1a34PI7+%AU4ic` zvl@%*HxdNNvdRg~X>Qg`lGSq;#*u_1ye+MnHIqP-T|*OS{plvgvZ*o{=ZfL zV=L_Kzu|P4{|4%`fdn_n!M*Gw#8*=aeRQn&6OqoqcEYH#CsjOo3N}#B7^s2mbOB;N z;4NSFv^eIAtMXRb1Cq>0a*HDaS0%hc+d860s@3m=)ad`#KyD{WQ^oT;dZb?Kk31s^ z03CJ}UNHnsqQHGXVgoALQ|hoGO<*h?e)?hKe%WZ%&U@tGKnT#Qp_6Bff_y7@$r;XTdCl zi4tcYT#yFK*^k%uTE)0Fq`p;jZzonL@syt4=B$_oX{su)?rYpNe0NWOyM|r6qwg9% z-yPaDoZsEwuHlcbB6~@>?LWP1*s%NmuU*5>fB!G<8lHVNeAn=u=fBNegZSdt?;5IK z4BIvI{4Q+Q@U!Rsy4kR{w)vjJ5AoP< z$lX}~cesES7!z@r;x};t9sWSI&yPkml9nSQu7cf+V}x9YxEH!9m-;uDY5)n~BjKqZ zh?lRZ!a(=0VLo!L>-w9FOu38MW#idheBL&3l|h++&aF{qAL19}xR~vrmy}EFu59s7 zsIJEBC*xx#MKuL|ApGVP(%~?uoU>fT4ug@Q5V@OZgda-T2|r`$&za8h&36iU<=EL_ zt`2%$CgZVl0Udyk%E@>%$g#$~%6QZXdE3Yw+m3Ur)h14Qp3bogDtV3_>P9WPoBe>v>lI9y?J}Y z_n)P6XdIbC)Cqt*hbTBb&LK$1Ik%G8GmM{5`rdeOd}Tw3eyB zbG{7x<>-POrUPyePHo>O!pSj1j<-17&iFF@I}>7xI<1*rYE(^bxQI6__Thj z{18e(Fb~IYfpPcDk2&TaRJ3XKF%>KwL9!_xt(1? zW%3c8sov4q)%nqA&}ut9=$}*CJWt4~hUt*p$dgZPr9IAcI7NGeXWgrIboL1B{*qH{ z9MlJ`+wYHkSoh{2E#rnvb9mjGmJ^$f3G5;0*0l~@8p9UwY&CXrvSU!%6o%Jk6VsZv<`C)G0hu!&DfstaGmIO88aG01e_POi35+tsVpz9Q$bDXgaRW3`2I z?o+W^fvpzuDugP*`Wiw4k0*Xcr+BgA~80P9uzu*5&NR8zgO} z11i5xuB=tAtgUQp(UmEVxS}h`4&9r{esu;tiO1qoSAOqr89>lI(-I-*zTa|9@;gV2kl&MABINg5pA96x_rG+F^7|A0fb!cX z4$v&O7||?2ye9cw^tTB4oh3%d@ADV?liwHiMv>oDdn4s{-PADo?V0*@^82%2_9MS% zwf~Fq`o+7?j&Dfl^bSAM`H@gb)>LW-iSKtMA~6Q;xbz+tEYt{R{dI_niw-n%QY9f3 z2<+VeecBW@#%@^c*QF)1T4Z-WG8O*`%1T>eDt-k&A97@*Epp5S$#Z*IhB|E{@WvHv za#@BZ?G{#BmZ7E6r%vXC2D^r<(X8-mQ{CzDffi!(KA~#T>3FlWA-8qrHVIF}(8_^K# z18v%B$LDF^gRy>84V(vBq2W`|Se-&#iS>5qcTM_V0t^iL_iQJn8R!@YUqaPUW|6*> zkwC-@0#O5EM#70CIL#%JsT944aT4qfjpWM&)6-h=z;E*?PBODr6wV9jsN4yoGTT3c zC`?4q8y~#i(way-Z6&F`c5He=W788hHa}3t=00U?mWGYZR2-YhbO62s zV{@Z_4C+oXFf{%VG}r*+qJs_cCEEW&4bQfhsr=foK@!Oz@z&N-tAs;!i@*flF<4dR zOq*Rb=_t>%v_w@wl~eshttX9_fojqUVsAF9Gf`a}QVu#rwZ z6>=W8uwR9`f+*DPSWu|P-^a5Jy!8;#rt5g7(Q*W|>8!>GZMr%_n{Kd=gpRfqtboRg z)^2%@J1lZKi?gwC*c6N`D_RHtgQDwM;MYjn49usO^NmpQu_vI!K8kfim||TWsaQ8a zA@-4SF~_ec)z2KLddMAK`F152dlJeMpwYF?(M6kLJcdm=(6i4>yDv`iWi zz$%4CyrbAlUcihb*!0Pu&kD|jWG9}p4wdt-(9r_@G4NGT0SQxt8t5C1P^;XnIoqWS z={!rP<1S05={dJchX;g`e=0r=GUGXJ0p2Uk6kmJ)D&DAv#KV<^HxjyvoU!_~oT%v~ z(`4UMgu(>ixXGZvfbx^<#B;+jGLiUBqFZiw9XaXj1|~d5C&cmSA36ZFjfUi-ES(!n z*)r9i1_;%{dH)={dMQGwU`zpmVcEoybx36r0c`bHY756giFUQb26FI3I+X5GElIa} zl~A=Zxc{Np&f?s+uS&K0Ey$+m#Zm47ol2{Vm+%#l~}C z(Lp$vu>7YgFhNCs`L~`6i;kB6S0sORfB9|a!lI+)Uqp2=qzNTM&uYvSBGj8#Rgu1g zd~By5s(ZgdP9W#prYegx=8LMDC)(hFxa=9l2_ z|G?jV`1>>b9f7}k_-ll}v+#Ew{yu}hR`~l8{w~9x8mLwDKcmaqF8=zht5U(9SUkaR zw~2LM(UXEuycJ(nt{5lBm-mjxHFO^)Sv*!wf(HSS@lRlUsS>}oPdvI?ET~rEnLhFF zV7y8p;ih1`*=21bYZ&9VlK3{87}F>J7Z{(WG1qdK$6W8=pM_+y($<9yq zoRr8dbA?;Nk3L-7s#3W4T;Sq&MsV?z2qtb5e|`knUgAJZT)sJK&dveJHAcuX;47g< z)4^#P)4>_}mQHpE=hom2qE^(A^QuO>x#pg9f4hv24;hk=AlnDUDLK!ab27l4kWOCV z8B`1v*#w0y)Inkpmy2}JwGfqP@oX@=bfd9V%S7p)l*eE3|2P|@?rCzR1u=$l@D)U+ z!p9}x6T0fFk`TZ~N$hx=Eh0{=3dP+(wy*w?sr4n214%F)&`r(fb$8FPoLK*5wu0A1 zFDA(RC5Hfm+b3t|h>uc=q!I0qjE|K@7@+s~1v*m!erG0rV z&qzTxPv4$B?Y_8g={&6V!T^S+KU|0~Jh2z9$)09!j}fsDYqX~& z4jRy&*48{gvp#H&Xx8)1*JMxo^~VwRw1Vacds?-xKYLoTGm1U!&rf`#^RVA+?#G_? zdGoh)9_H2m=gz}Qe$mg7(j&i!I8y3#4RjuMX-mX;*il!+d002!?|Inu+}E9l{W-S( z^RPNsKg7T8iXeVgRN@u#riO^e^RU6Li1V7d@8a-TrFk&min z+{B(x1F}yKA&##gr%<<48qqE97;ll2Se$i*VaH;ybElbX!K7>x>WQ7K8l==1d&&w7Ns!Bx;votG932oJQJ!OR)T&8_$coP+(M+AZUvG#Z`}JeN z{d)AXF*j`1|JvtltS;hgtb(2Zc|eB0jn&C#V|9^dV~6P3SOwjzAIjj_3gv9<5M&CS zjbXdEz*0jhe|E9d0r1^K{Wnkd*r)eh!5GH#b;ASa=t3u5} zdHI(QBIM<&qy5Rt2ZSi{a-Q&w&Y#w9=to|@;|=mXVB}CwQkHdOPo>79hK)xf63uKr|ib)W6OiL015rdEjiss_V zfTeE8w^-XT|4~y3Iyu+Y(lC$b)ZJYl= zx#9<~MRQnFkhCEv1>*1x$2lRzKU>>r*RAB)Kt6ZpL_V*boh+Q!er!632SM$j{?>AV z)sDaMc>y7>oeO-A)k6oMWj^dsXf^y_)t?pm6tzA-To)UZXDsmBLwr^Pa9!SdI!Cz7 zb&cPww~vS6I}IX)h%%HT!)loZS%je!2|z{||B8i~DzkkgZ><)rr|$y}oIf)~fSBSU zObXk(&(&;8NLCr3@xXqmlxhSiHCmQZ!&6Vq1tP+QR3nsVIyjQlz-LtpSq-(!>5=5| zfLx2?9<1Sqr(p+aIpJiBHVvjGZE6Gl)Y?a+6w8 zB;wolo#=@PQAi(lRYMDc8TN0SE;&W#dB`-)TityxgVYg$$5x|#x(!X!m8QdHZ4IAw zdPk?z4KfyF9qfQlr=vEJ!1+yrjssCg%+sfN-LGM*30e^j0nDv+IJa!<6ZR=Q>jqly z1DOXAFn~DGVo(900`GJbf!D`O{WSvbbeOF=t3k0Q)Bw$4-U@{4~KNOHo0@j2AR*--PLjjMFfPzrK783ACC}0l>SQiR7L;@V4 zfJPFqF%)o-1Z)ZgsIoAiC=`%H0*ZqHyG!9~L0VR*3vDFDmQa=lNx;@nfP(~V3k7T= z0oy|X`$)jfFiJ_lu24WL3D^?~&}Cylc^IW6;I&Xd8VT4J3b2uY{h@#Y5>OEeC?)}i zLIFETfIAdWK?177C?x@Pp#YHtG=u`WNkC&Lz?g#pr$Yf_AYiu#e#?Ejy9vIhx>tqn zTa#x~n?mou$nSOGzeCub(uiG=FTzNC1lkixwNpt&{jDp(6CddHoUB)TY47J|z2Z0a ziI;BL4q?60)5LXs5|i}X`lb)3{J}o$%NC}S$-){+Ivhp?<3&2-*GnU|Lh5ky6qLVZ z;PT6(^j|Qt&vUy5VXzHkUP=AqgGD?j7V3BsK+I|{xU2e>zb8uh;qk=-)$bsYw9PnE zdp+-~is;`WCGk>xr3N{0St8&$qBC^|acrbBT;K^d2F-FBn0NJ6@D1MyPOPXVKr4+Z z@=mZd^_j=GlhjffiPzViW9E6n{(^_FKu@M*2J~;0X$#~QFDd5 zw)a{uwc4+rwHCz%m0C~;RK`8n&bmNwF>j%F7JptM<%^WPCM7 ztA8v9tFQuO)l-3$U#9B=Y!zQ^l5&YsVX87&s^Qmx>PlO3C!QAc^QayW1=P{ z;%X{wMORaq#7I(22aZuj?{08dHJx7yR@ZVBS5wlT3aY8%3Q&F0id56*#e6kQyaZIg zW2)=LVycXmG?jX`RjQ%EGEntuNvM2I@l<^;0+l<5 zs1BTx1aEo^K6ss{TMb^sPn6WLSL_a+Aqj+5o=_aT3r~~;Z}}x4YT1GW@6{qccx`_H zqS2UWWs$g=hQCBtQ<%g^QcZocD5LxohgH+JOTcQvamCf-^i)ALjlT#~oExd8hlPAK zRi6i{Z!pzFgP5wb8%@Ppr*b_?QFV13)#SxM)#sREsxHqIP5vrjl zd8)v2pqhiJa!*Qvm)MLCp4Vin!HYgfNu6$CcktGHi0_Pgo=eMA96ZAdCBfTx7KmPJ zMuPWNE+4#3Wk7To6Q$*ft7&{wx|$|Pj3m_*cz`m(_ZoLlHGLKeR`5vMQJRu35PRj@HM6}i5J=#Y}4Rf_ScolPD*VO5d;^6r>HBu0~ z*GqwDrwa+*{$qUbcAo~K37DwUad9=>)6msaB{7mzQ`uh1Xw))?RnxwBuzGetaWzFd zE2yTYB|sIgA=R`bo3Ew?#XyyYsh($xsZKPesocM?O11R|iYkwF7**mNpeo(3m}+e! z1ypBF1J$s`glf!BJXP2!pt^*q%6^grZ?&2ap8hkd!JD;*lDgc??%-w22BFQR zxw$By`mhM7E;|vbn@4!6YXv~H7*mC1N`m)KCq8&*CRz=i)Ay9r$6C9C7c~QfJbzFe zyzW{h!RuxKqQfc@yqv>)@UrrOCS(I2kyfep@1Us8E^-)E%ddc{ zVwYknrl|s|tNB3naUG$GI>=K^I0;lesu9(-gOcEFe(}N_ybs4&4PMfAO6rcPn; z9MuLtpc=nZF_m+31ytj5f$Gk4LiJ!DPgQjssBU1Y2|r4L_vUjxcvmB=2G2Evl8SM% zJ9tkAfROJF#lh?7sw8;t0%UG+`RJEBh znl{&A)l}jOR;{-yuBKo&1=ZB*1W>JiN~$U22fmt8vwqUP33K!DrXZ# z^=rJts5bim)xE8Xsb;^Vfa>lspo;m6P|euGQ^o!SR4Xx6)gDRk_CDc*H|`^=!AnV} zq#mPB4?BbRUJM))6lEw5UTO;^!7I!LqQOr{@P_Z^gZDue5EWyhqTS+ZN`6dN({hQC zq?%&WD5Kh0jyrz(0<1P}QCv-@S}LffZ+`+R_s67~I`87EspC(~CSwQvp5uvL8j;DHt&RI&>5H0JgOL$&yyk%OP zF5#pT6QWHhS24c2Jk#H^!Sk;Y-cS|l)v9_A?%Adn_2y??!SCZebI{9?j9!i2i{87S7Hhw1j(%!Rmp!A@hjk%yf1 z3D?DSHn@0=P`Mcb+5 z->nuAhnmKO#`Ev{__F*C8Q)3ZJ6Y;!$;!Z;6jJ-w6%)#(Yp`!FS&};=5$K z(03Bvo;BNLd?$hLWT`LN9*OUXk#&?W*$g?~2~yuj+j!q>Jf6neWqgCdccj!8$H9{C zSlTzv*P8E;27J3lSn>^COML%lo6xs$4SxQ^HW}Yw@Es}jCEFwQji7zWX2|&tk@~LQ z%KP@dfd4FatNGx@==(a_DdE&axHAzxOoSg35oplAEZQ+>*my01C9BEtFt3Iqs|nDu zAL!8Diq$Yfb>!fJ+f#rNhu_1xphFD=W}w0DuTiO(v0Ao}8LQ)JGAK6le@x?joM9U8 zIc9iGpW&^{;|+6X!hD!8KPJpP-r=V4;>Y!xRK&w&!n917Zdwag1Ap)g1Fp>{yY%sR zFm9CBv<&h5nbxm&XY@Xd-VaTmY5o3`mpqk*6i+bsY85^xMD@Q#G(U|qWag*RM>0R6 z9Sc1_6+YJI=iR||7UpN(H`M$DZ?QB#!?(!H4;im;e#m%b=SMtT`T1Ekh><*C;l$XPv~aR{7l>|Ge1eblKBzsnCSUg7()szRE62w_>8AD7Gx0Tj;>r3Z<8aPJ|9CKSdoXjo402%6sU-ta!Y&f_ONm z3O!n<%6O3R2t9=PpgqJRqCAe=0*~a45)ae;d7njT4KBCdK_?YofgbD!Zmb78(4BQ-2fDE;#)%DZXV9^g2ZR1? z7qmCSueQUl>fqIO_|-1Pd%@z{n;D$&O4(e~`HNsYe)uHGS$;fy1>j{ugqmHyPEd$wSwIvL}_ljgBZabMVP& z_oeu3JDZDINsee%ZGf|FWBYBE+E3_?+COBVeP?JNB5Xg=Qu|2*QTzSI_V-qh_Ad$B zce2!eCqB82HMajA+Miu7Xn%XXh4!29_`4h1M?w2l!uH!Nwcm=z|1in8{m?!{*nXm= z_P6l(_Z!>aTMpZAZr>Z94|L+&qmO(w`usF@VbBB)My=y;Yya#*|5_*f_;CM8^`x^@ z=q!Jo>9gL{--UG+#zT7SAnftcr_keI=rML3e1~A_FL7OijgR!WP}t*YV~@L_$8&3i;@J z>G8&5L64rs9-Bdr-TCoFx98kzB%`WKA9DZhsFd{{$o zSVP1b>G;Fg2duu5oPm2(k}rzTQm{IC06Np|Ucu^X;H4V)a;e7raa_%}A}51r$#gVZ zLEqt4(Eo&n`-d-AWcw#4GdA@OraTVpO^5VpJ1S z=jCwqVA4(F#VGtZR5T{`eRZ&ct5HSxvcZ&Lc>LvDUp(%hTUo04_)bUKe#LW|WYy18 zsrnVqZITdF?HtLtRe47;T71Qdv;B(I2XB)rR-6r2tg3^#R$Jg9v+J*t=w__quUMs~ zQxBCR_ka92m%jg_>0Z6)wA&vZG~B(2Jn4_okZ1V`3WT&@pHH_Bzw2BiT>kvDaQS?x zaQWSH!sYE3gv+j%h08N4gv-DEDqJ3PQ?Na3km)!5!?mMi>L2X_g0!ZS1OA=TFyQ)o zIn((8{{81lkWVPf{spcAvBz*!eDOpS_st0vm{VWm+)TdHL_m)u~*CziC1Jf<+V}l)p;i6 z)&7#%Yhyh2ay{a&4ThJU(XJ}FRiCAUd_o8fTZ&|e$;ThtVN&O1yUC|1VB%L!Or9k2 zCN-;tCdgwl_J~+4@dzuUJT{0u{yBs4XnxV`v1|_ZK&OC&0-emX@MhWGSF7~(=M|oWT+3Yc9rqScFzsLjM zYFLaCZV!s@ySAfr{2EZ4N(jn<1w6{06%-2c@WLML7fL+ZoVMg~Wh&)Sdd}=IWQNh> znk^ndAKBp%(%*KER;$3{K{5RO_k8l@DE$5Ra-l~Q_GrF9;-M+FJ^$9p6v(;n`b-(ZirdB*bt{QcVb5$vO+@cZ1KMKAD? zomcp!xMAoxE%8*`;Dn-jOn|?(&~Yd552}lppj%Z&qYq8K!n|5hzv8Bs*_REYhK&z02EVS}ap@2tsGJ;jun>!~`<#{F;X zNIObtx3BG`bTk=CDaAk#0^|8z%>Y(r|N9hs{17kk*j`9^q>4SFzN9=NO3fY>Q_UVa zIC|AKNRmFn4w4fCY$w^i3`i0R2}#d6e7fFQDj-1~o&Utg|8u0r{{@yjM#s>{|7ZB) z|5)2S%EIjMC>vHO zobAD=@UtC*Sqd1D`2^$TS$tkvB+2AuIrf-4OXBfW9_4XV?9n%xK7M<@&g8LsvhC-G zu16X^L2Man2Ujz_?OdZ7;Od=6xbDv2xo$3&oFF2v9PBl8ru6v#q^0x2rk~TF-+^YY zIbTZ753!x=5orba46}ozeSqyG0ZBm8@gyPnWjaq%o+vp#e26{z&yXJf=TaWuN{;_O zqda~HFndJDNX`$jh0F=!gJ3&2-VU^#!)pm}w9X|QhOemm-z^efZ)lg<@i9LBpDsQA z&#|8AgBq{Ql!GXYX12yVhB=*4k(9jd9p55{`GR zF8oUd@aSKxj{iS&$K%3cipM6S$Ybim8hMVU~Dof7At! z3gFSpqK^NMxQ+kwC>~`KL>_SuDe^;+hpYI1j5knnr+5(mZxG`D!zBJMlAu(rm*+zV zfk%6hipSMM?syzrM8~h=MII6PihRf|kNc9n;88eLJC7@m3iE1*Fc0f;DMBe-C+E=u zJT5I)@%Wz)-0|4Ckd9x+i9AC7;t3DaR4;g}Pu9-Ev7Yca`T^!qR4DOyWUV|uy!|T7 ze;2yQ59J!!1)u#0FyzIav2^Ep2Jn2dKj)Z-%l;@sG_~ThRUu7GcEFh@ zcPrAweyGJkG7)g5<6N;A=ZZy;D=vjxF~NgeahZMBG%8mlGSdm;7#9iSyU%Dy7^~Ki zgfagh+5e$HN*Hf11FXotl7Q!?0(Ja*z-|0HpRQky747R#o<@H>Zn8J=?{p91-?c*g zdw|5h%c%Ib)P+A@1w6`@spDU}yZmT$E*<|qD8;{v)cH|INS00V0?AJ)+L08O5hQ7L zjKqJb^tAD-mHE*w;IVY6dj4a-J064Q(e=wQB9G@5YUlA{k{3MqRP8+SN(qng`!SDS zAC`Fhv`We2Yv7UnhJdUU&Iok8FxZn#5zx{F{?6em}trT(L8?;~Ko0;Of2?<2sN}o&T`X zRlYb5c=dls9sj=XE?>Mbm*RDItQ7y|YR(q}y+JZ9T|1J#s|b=S?_ngb{Y66Z(h60+ zXa*j=^40P0d+zeZkLOT4ZW}Gdzw@-_i$}+Kfy0!c9fz)j;P~NPjN|Dg5{}Kq>U{Av z;L*NB9sj=L&c7U-P4Vy@CB?rv8vV=930~lMI8!^08!HHo^Y36B6^kVtk6K*#mwMoF zX|X!~ecK(6ommu*U!tY>H(Mi*?eSjlSTjpIkEUY6B&{~NOl9zCu@{F|rZ zaej}xeBhZ`6p!ymiahitPx8e-dIM#1mUbw076Rp+Js8S@MG_QKkvu<&10JUqsdyaQ z?T$xfCdK322$4tY98dD2wg8$B~8V`JcDk z@mQ5X@u(Rt@@SduQU2@=k9X#1=kb0K;j#HG%wzlliN}~iIggis$Ll&DD-#$8v(=v0WHN%zWul8CKxJf1Cv#FU(iZ|GepT{%0n| zrO)~$JOU?f*Q^xatHz5DbRp-Au-SL>5M)BD4 zfRz7cYUlA`tQS1K$=1%}u>!(l!A{Jh&pe5TVX2bGNZ?U6Po4knaQD27o)F?7%Q|Inwje?Gfek0_UsqOVSXMg)cL(^-G3uC^Cw+uPl58 z{#8{N%AVqiPVjg^QATaRR#>(j^*XHRNxrhs24}J}S=lF)M`L=||GC$0ZU04{^grsH z*ne6^rs^Bm|F9;V^?RSIMqM}ape3R2F@aTOt$cKTQU>=zHp7%(G#ef=Bq6<~W^!L3 zePm^^wasC)IF475IOx6}(B`(;_?Baoc*62*m3%`cwKl;GM9z>-GS&d@s@Rr7Pvz zlugOEm6ES+*8h=wOfDtg-nk0-E}1C#W>WGEcQ2oIQ8-DI>MO|R3LQw4d70c`QIvT^ zlm)aXw#VCuDCo`|nKxgQdCPqyQC^&*5T$-DCCUqbY!^l8e+xvZE}=vTQi_sHi&7LM ziLxc*CW&&LqGAUHT|uIxWOBCIPEk^bC?CzCM5&k|C1P8 zxLp#ZH2vm_a;BIPrAR5tuGy3*-x=CPQGTAO^$FJ|mE3fCoqJH(r z{zdupg8gTR_I7`u@FR=vJDqaN?Coul7y6zfGZpe&OPBn^kakg?vA0g1GqgNImGUf> zwX91*V`1=gAOYCQ(WIrT<`cm zA>SeDnoWq{b?l@N)+gTPQ$t@{tGl89e-#5R=as zwpLg9mBVT|lyaqTy7_JVuuTF>z@-b{U zg05kM1lWqL*N zzv`EiWEo;jZsj*zG%pd>(M47Job|R!5(3P?Y@KTp8?<9)3x)Rm80UD{){HRBN8aSjlS&^-+vfg@GblP z7VzDcE$4eE!dU1dUiMQS{JhW@Hie4d9oH0)16 z-H=;k-T59=M%e!2E11@^gpn_Z4}! zfccT4t}bBy_Z+f-dD)YA0dvU|sWu4-kK8;|yyvc#cn6Y=PdOc-t{2wBi~!!drx4x_ z;9W2BZsZ=`inkp=wDWzq{x^U&z3aeU7>O~^X5`AN4I5v_S9+VVWvvcf!?tA#eZ;{s z?BlGl#ahjy+pOsvzw(i_e&yBX#nv0V#XpKqY&MNf4l>VTjk*e5JbTJ!)NxO|M5{zM z8rGDv=$B3-{!Dv|8RK`brH$h5JkZ@lo$o183ySBI;C~3_cZ@m&0p$e+Ttp_820YiNfbA7j9s8JWk+bpeVTf6? zV&VO|tE|a^q%}$>BxI06aca!93wNBge%EUK$ggLrzGr&N3~qWBsT9{efn=-UzD{D8 zny9N88Qg^oClkRx^znt)2}L(<4nRorY&zHbUD5XY-1Rp63*g3IYy;kxk~;7gyEe=G z#jb0#zj#MwpZc%bReK&x2EUOPi@ZiW?0&$7JMU5Ke9-bdZKd0zk$&E{=dJM0alE#o ztB&KHOyW33ZpJnnpC~zw(dlyiNW3GiNxWV9i(van;9Zu;2>W}`mCfSpBdUM%t!bBH~m z7@Rna+~;Uj^c@MR@Zf>Svfo)kcJpFO9UeN2(HYobM4iZkZPgmM{bWDRqejVPe2^x( z4CF7`falPO?)=4%n`Hjt$19Y-=Je#u=0@Ocy)r)alAZ(QRTBT`_#90>JRS!9p3+#(1AaAu1aQ~tykRpgVy@a z{Xy`TuKhv7HrfcEh{FD$c(r%_VBhMl`h%C#h(EZ8>;M`$Uh)TnQx*RDRi7^U>xWxA z^&h#Qp9jZx;6GYfnSSQ}(q;dV@I}}CN7&zK)qU}R$Ns}q+ExGYTdLfDtRS0vu1}Es z$Li^{|JVXNFD1D1AFo%+{73hnwfGO8^P2p}GoO3mKQ@$g#eaN}Lj1?vO5#5j#XJ2+ z?lc$vBa^obU{P=x%3DI%Xx0WR7P@SuH=i}pbtv=4OAKG1{qdKc~W9<(3sqW$oW+DAb9aOfvm=m+>gM{X#F zGk8C?!|;3!d=QqvI9OvyRq`3WAd<_Ebe1yCq;Pu%po`>Ip1qufKDJpdbDs}KPn;=~ zEe*Hc%``oQOxhaD7|M5}KdOvDDOJgO?w*=99yzu0t>}N1-I0?6@6B5p*@AGXuyL6` zA2(1qs?T!Su+P9w#8o>(`Fe(&cL943>$w1UIWmB^^p=%HQ0q!VWj$-WT#?+sTN+p$ zTPXi{biJY67lcB6EP5Tw+~kn;ylW3+kjcHBU%cTsLvA05PZB0$nf(!r#eAX(b`eXA zEsY?VAq)$?9mZA9GRl!v7`bD{7ADSU*2hgZ>*7+)esOnOhCq9BNrkZmn%4=<>s&VH z(GQ`~!CN>`Et7^6i?qrX45%_52O~K}w435=x6V9BUsll5$RpiO5>@#B7rMQ%7;_WsU&h zEEW0*mGzSsd)-jpTQHYFdC*5_(|=^<^7AV6UnBI~fgl@>3ii{qs9m(5Pd|Zin(a;h zKp$Fbx?v)3i-UquGsPi`l)`>`L{Rp#_ma|nERmY*$8z%4*pI%%d;4ix z;L3j9U8&W6b}W$Wr|RSX5B5_Ut+F3g{Jpr7dS9r$Un{)-98J~x>csfFfm^cwN}}%G9BtyXi@EUu=4#r7wGm z+qaPpwHpq%@y#VH0-wwYCBn~mTQXOEVr|bge&y8`|NL$_%}^X^)E(9k@{!jJIQAc{m^N?GzIk4Yh(xc z65d}@%9lQI(D~B0vV7?roi80zXbJkicK7ehU_ zhPR&K@i$>jT_l=PCRWEQ>DFZfkg`~h603ccs>Di2mb9vC+XRva_b#lJw z-fxxZYvccRs;^SeSN8A@^c7Mn)7P4=>nr!TCVgetXyPrkp6M%C*80?49_uS_qqq8M z8Yi58y$0)Ri!U4wGdm{+;5=G+UP7>a5B7h;s$XPy!IxC=R9`8YIXtMoml~5Z>cd~Bo7In7c zqR>Yv+{Od~xA$rc_l^hfiCY+MpRq7M9BlsuxAyoDXZkXCd+v3m3Zx>c|Grj zR}AJgOyU*&FK20znr(Ely~p1K8`aKjW<25c zWeMi?{rwWRuf{O4dgN_wu>Lo)BYr(gFv{i->XA`A9A=r$-3w=oVmj-n@(b6c3O;Zz+CwWGs@`oaKJd1;16*k z$4wrH9RW9XsEY%5U?1?r5Nix$?#?DgupoH#w(_%U0@n6L!NzS+hAIf6!`ti}<@~v+ zsk`F5i$ukK`A>gMU&kivkg9TH7;A1EV`_Tl&kQ3uDfQ<&WuNEk>^JJ*{HOaEhA)hS zb66VxEFIOt4Gcnqy`F^_;6=V{VK^;3y1lZV8-+Y$Wes=#eJ*%7`yV0ozmD#|?2MxS z7L0$ODz5(4lYH}Hd9if>ECsKJx!uHCV+($S9`tAi_tX}6 z28|AQCB@W87gC)|eOy&it71td^-mT&gBvo0VQTP07(9wRglN3)!gx0gajtiQ^L)Z` zbYVIAFq0$GX5zR>X7Y-F2~vhB7v!%9;F>Y`p5|rp5pVyn9l4(4hW$s8uwn=^KI-Cl z7`f(Oy{Y!&TZOevXz(UZ%gE2o;I@t?1Mg>s60)&{3mfbl`kgWI^^p!d{=sg>Uqp@n zP0iy!a&+EWJFn;WKSK#m}{|>T`0CvdE<3{CA zD?G&0hjTxL694;QxQpjisLuvNeHLo5P@l!8^G*v_m&Jqd`%;R!Eq8$DNuY;5cAkF; zDf!VVBwS0d&DhEY=Lcl5t?1@oUS+35I5YBAK8+OY=bR8L#)3S7AP<(KJ6jM*4=TqQ z=?i>2XCDe8N+4oA#W#*fsZ{~ z(Rbu&aUKx=1iG8~dp(-`*lsAqKei+9m0nVd9HH26BG?`S<9Xp;)p(vNP>g4uYCOFT zNi(5c8Pt})Yai6vkz`PN3hGWk%?D>yR`El_4G>!<3amC_4JFpeFyTpo_XnD{Greu*SDgD@1GW;oHhwj zf4jT(!OHf*ZrZz$KiuWanNoh|{LP{P!u(B;Fn_c2c543Sryk1rn?85o`fAfROlOlb z=H7;V-K&!IDV!e)9h!EM36KGX4Ic_k8-y8*2K5X^<#hC0(Q?elF%`*mE~Y^Sm>;Py za-GhiAeYGDX^<8m#R#Md^mrBo3aFh-febK|2O}rid?#d4Zs>27$y({8mQd#A$oiXN648Zro(gCbK%~?rhF%lFgrJcAdsH0%{aAafzBan2=bc$u3D8zzOp9 z-g>X0e?W!InRDRPd-bYr-TU48xpiMvwF>n?1?sdvt5Cb^qZrsnhR(OE1>L-cgE@UW z9klvKo6AuBW4(?QJEOynk2&{KZ3t;U`u6+di=iSo=>hmSU_rkaU zzLyH9=X_9ozJ&GJmZ;!+fm1#NgyYm-msNnz^Z>yB76kC&fHNI(qGBVn8xH384dqi; z5PlpHzPV4mZ4h)nxp_qn06ZB1o>YiX#`a)EZiR{*0Jpn40^FI&K@}Oszn9tgm#o@* zBnFK5M`5!5sK6=b2uhANu&Brqq5FblqhMFH_Nk^-(@MHH?Ydlo!_tDs*tA|T3cvoC z&FTA%Lf6A2Ca~XMJw@?d16x5YW`YgpJ~_Po{n@pr{zbI`9c>ZTU-;PRE1>^)r!sa< zdQ3V0Vd}uxIdvWv<}W*rc)U1uWO&TIQ^mtv8HC4-(_wg2MBwqmXpZYQB}N9yJx-yDxGNBv|xzS}!2Jf51T z;L%mV@Zcs7;PK}iXeuSseC7fkAxxhSQCB19jAUhHOzd16l|C&6+`n=xq(Dk|} z2E1XeUo5IJZWYlbjtjXqq4{Yu~XvI|z9sJ9jyT^Ua8@a)j+ z+DZE&7TMj%6wR5dAa$yonWE<>4LGx!W=Gi9Ynl8>L&?7}bbaEm<&XPI$iIJh@~5iu zZ&c*JElB=Z5%RytNYrawQ-#i=G zd4~Nm^frNhhsRDHC;vmQB{Jc{<pm1kD%_b5*}M3c|J(k5Dk z=1HJQ3I#^?i$0a|(E|EQAX|icOSj-bk-2S_F0?8^-rGMyc`4C?#B6t8q7W@UOtKQw z?aa&4Q3HG@*4R!lUkh`z^xJA3=xNFQM4Nkx*jmQfn!t`uX~6wH(^PgWUhT>LoqGKn z4gYTjwyQSkTF<4kq2}K+%p)=iTcRWnNq+ zRrMB8+cBn05)B;<1>uS>Ry$xp&;q`mv3>CH**3RQ(jM4bjj0x2hlEo8ju`o7)xr{LNZr|Kr$j z{p7LsKUUQb!`_dFRBD+EF@W?Vh%`X#j8f3PG1RBDxt|bQ(>PnpEkd1jy8`vJwG8TW zV^pXC-)C9`u$uSnWkX%&ubV~xnn}G z3fSv5URsi_n&tRhh<-ym5sgw5S&~eOh1BmQBUzFrrG@&sPYs>Exo?OZgY19CUv38R z#!geWK!?+h@V_VD!2gcl`?A^h+X*Clt~*`xyg}w6hVV-y-ta-JKY>cGT$kOAvbD|? zOHR`dUtt?i>(p<4L*;jKQG_!bjEeu9j6>2vhQa3-CQ5)lnGMFenTRRgyEn!oFBISOXN%J zCzb@~}&fJxsxicq|WG~*Cmr3#$OF4_zNqLLad2Vp}J?Z#uEj!b2Fh8?GVp%Gr zLGpyRuV#|VTbo@bg5OoNbp?T6w0KgMBU;P|(iag2Z%3T)7d*7S0`&m1D7lfX1<22R2f&Kdvf;ykI<2e`i6yxaFYGEGPoD%x=AsimtK)|YA z^;;QK&@DK3rN1a)-vHqabmlOXSdIjK1q3W!|Jq^9TJNrHepi{bMod`?1@{TvO1ftf zjt;GKIuLg^$n=i*z$f216Z`#JrrJv=&u`*`<#}{fs5}*ihnD9J`yleXvuXtL?0xUX z@-*G1$Wy#Zk*6#!AkRaILGp~&fIO$;g5`OoC{&)V8_08^co2EMC>nu0PrZL*c{o)b zPmvY+D2@durR;u&TY|BME*kAP7gNiX06R9j$#@(&JS;r+n-n}|=P^7ix&R(` zjt$4-G~)3aUF7ev3ScIK62E5%}#fN-Q)4Pk~i*!A(uPSwhFqe z0-axHZE_hcUUH(-@SmeSUa=DwV&Xzex>3mxLwCW5>`$aWf?^8jLg23yqP6qAK6(c{ zyshz}_ng(9Qp0YD{gnx~n2IdL)5?7e{W;wDzlFqf1KiOvxWEI5MtkG29C4z%O3;Yo za0ZlXD(GJ_A{U#K^(gdxBc5aE@X?bjgG;FF02%0iVYH8C072Z?bjZl+3%w2ym$rAq z6%E-co)cq^>nUW=(8DkS($B{+#&q+&K(R@Ah=Y6|VLpHR+Zu*@bw)d6;aoa=#_58m z%cXbsX~ajUP2pmOr%SwD;bex9K_eO}d+>3B^5{yP@Aap_v{o90siaYz?0S`5d)T#? zUAf|D<8eKfrvdyOKBu038;1+~`O3sqP|@Q-A6T<$b031{xCpud&L}$eE&zu>c)x~V zJ07=SEUV;9 zr{X>?&eFI&9mskOFV~SeA1SA5g85#svV+f#WO~brV*nVc5!_gd_YhX8zYE|-8E( zStzoXNV5nHoWUuJy&yiMllxEO^xY(jgu5Y!pR0V(T!l3&%zSEt`?5h>H>okcuC)Cy z$kiVk6Nud(&jtJkT{d20C*RdaZ!O`B%Hf~y{D>-7Kkhn(%5%B}I0MeZS9{Sh*URSPV>9y3nwz;wABtRW!QpO|257s-ObTK!Mr* z%LJEdw-We+S!ElswQWz|u;u5iu9@|=7LKiUX$h*Wk?c^)YD61M5P`;%I=TRwl9WbE z1f{R_kHV_u5tR0dtJ&!s_2FxW4>}Louw!4`b-BGE0qV}vdUl9Brs{;^O4Vvevdl1! zn2j(ltS}2=EIjMFFGJ}?-IqxVzU|&&I1>~% z3xz_0{l)PRb`l>DGy=in7;2mSIjndi6fa%}a$BlUA8c zmWy!5!yVW#-7W45963SGkAp(Dz?|%oB3Jq=Pl=`!{0sf+;=7=|Vb zP$~amBkKk>+9v`s1(RGjx?7CdK4JKN25nfip zrLxn=87nDV=&P|fu)@GKo5m=KlPlZsEr)Mi?5#o&-PiLkLVcGvlgy6*Qv6$LYrqNAqA(Mc;sndpbjj zrC!7#tf&OWaH-*gPW;W5lx|6E3kY~Sb}z@nIU`kVve-(ydXRAM+PsT4cu;PTYGXgApTZ#pMc( zr30j03zeuW#)RL@EZ{;pt#Xe~J8+B)wZW{weJl|>UF^aGXhj0WoU999fR+NRv4$`% zusm?U!aL6#+ssS@3s6rV06hzLvqNXRIN)^e`V5D7sHL~1#449PG8=k7BaiQV|4l?WJ{oH47V z+}=gDt6T51GzJw5WkPWzw--tzj@E7;2aV7*+I=QUC4oy__aX}JhNyZy0)x$dAI5ao zf#t^;cKicYxdxr`HY)fn$Ge_mq0#k}rO}{i!d+3toorWsr!%@N<+$~@SfY~W6$07- zfi7RN%j#}OR-*NRXy_8PO(`wb7I6yDIozsP2q{OcN5xT!zA~kSm;-7EcCHrhB7HT) z;@kn0yS>8 z;^b;p%iR|tIaD-7g4-@aMeFm)39I3Kjjg9ZPFTyN76s^6c@SEMjR~5Pd%6iLeZS3H zx&DOKs9KgdtV)~A#v1rfZ4DeGe-~6?G%u+vLbdKy8z~lT>r{=*(C-a6!IAH#FKV&< znWSi?5!x%0|DZKiB1Njf(}RyCl`(r9`cB3+YstuUWvq_#$_cCB7UU>^z%iiFV1+uP zN7cMPf{CCy?&N@=hUr! z+{@Kh&s7_a@=VxNLt3Hlf##m(f4lPO#a(`tgXw^~yoUocUs{h+^;m3Ip~A=sSJyNB$_Xo! z8{Cp9C%nfJb=KteT5KH_P=6haRg77C$sh8*&iwEQXjZnKap^p##ToQv?6VuJXT+F8 z`S8*|8e-!{jmhD-x>m4bC6=?hd7Z@MEAv&RuLf-=EQeB*d)kea(^s1b_dfc!xW9b4 z&dr;ppMQs5s(p8i>s3pi<*?g_%;J%z`ouX3pOuWq*aTOep^a zWp5~V-!r{?LLo+rd;oC?#4Dw^)EF$33|-rAy}u{IzmBJ|dcQMn5qJP_&$d=BArpu$3-u&@^vd|}~te5G+j*^=T~jhA}J z{c2g2ScHwHf=p3$%tD}1sQPDCYSFG|s>Hd>h`FeCHwZ*XeL=kHP*`KLu+k+5-3{Lh zit<1=1|8l&(2rXm2n2N6t!pAP4Gme_IWrfTR%BkfOJV!^1!oVVgz_gS*VhxuUs>5- zp-dFYMNkfea?M@pxyB0RCZU`NWjT~@u+2;7xsSb!y~XNqeFBgtpq>n6=2sEqYJ5`6 zyod3D?XtP?uI2)A9fH(s_~`WfRiW$h{zWo>d7WgV_WN9G%S&O0kJG5&>;2Hs*Y zpWS1?!JC+*hHV z3}VkK1}9=@Hqdw!ySRd4=;k@+;*!w+W^seSc%YfbRnP?Tj3VwZ^qWK356boxnu){9 zxN%l9ahx(RH+Ts8@*R}pZWMLDryL3KQ2I0!$|P3qR45Tqi=ixs&9!%Ee7@aFZF3To zzKw)3;?4tsiOc|GW}?8ccc=+IR+vF0nm}oVvfUkO;Fz=M*95~|F|0QWqR3)v=v)h8 zqABLTKf&%WayTXerHvg%s!S&v1J63!S}!cDGi|Mat(Ydl)|Y=$^p;wly(Gi&;ed$_ zN;C$DC`iHVc(c&i@(y9IKkRwIw=vSU7^7bi)hnUUD$~9)oo2*2TUa?qY&eCH@+&s? zu3o|5@)YJO{{wSN4dWc}IAe-5x70W-(}~6eg}Dea_k(F}Bh2l9xu2xDO-37OCdp{3 zsLxVC{LN%v13Mh-yOK>d<_&P}TMUUlCeuYSXpI2LBp98~QBQqWK&{f|9QD-N0_XDM zF#a#wY!zL`<^Vl)qnu(ZI=f04TRWi)L5Th)m5Ig@S<6!66$OD_TLNM^h;`)@1r|%d zMT&wTi{Ywpu0&vgz~X_0mOD-F-7ni#>A~gHUfmSYh|=B6I^Mm6&0%Bg|0I)YR4V!f z^J3N|8*>Cq37EXw#2i*I=$EfeW*rz$^yT-Gi89s}QW(`gL*Z4M&y}aGGJm$h=5k?a zjA?T)Y=***@zQ3dvH!H%j}|7=08AkIw5?>~jrxTYABz<%b|0rpY-~meMjF_nTk6?; zzv#9UbbENFI#8luq90t2y>(x1^#&-EnAoO5nU6r8EtGHiVAX1KUb$*P-V5@0$cNpc ziT9mI^~75bO4nvW+4>gEgUj=sYo3hwzi7^X7tzvSF5M(1B%el8`Wa_7UJ&1#?3Z8* z+X!~cO`7OCDau0~-i2~UYoT0tlje$d&QRZ6mtXDMVN5UER06zP_NBA#V49Xc+v3I%+Ur&#(hZx>v!S!~ z^LBrQuNZ@iU~f5UyQYk`C8d`BdV4+d6&}j@A9l)MXAJCkAW&|e({69-Lmt~)$lC~c zbTk$Kqt0m>dmo>&+CI9SPs8)AQ_iz|IL-10&fD5%r?^U7&;s+{4HylDh3)yYUOXj@ z%8$O0AH^Ci6-I-}=$od|Fc?jP(U~wBRARiQ2%KV}>E81MiGde$iPLs zm(y&pax^tWMUkx`u8XV0bcA@-HJY7rcR5cYm>N={9FCyNxkj6gf}YMw49Txd>oH)8 zF(l*I`eKE3Oa#-R^hSTqyGD!nv{TfUms;JE=@(B?8y`QZUc}p*rM7{s9VoT)HQPd) zU0^P>8-)RkWs7B>{ljJEhnwvm&ccVo@!`4x<>daw7Cj4 z%Q5|Ilr}BKN(GZ6lvALbgCLm2$}oizjs7&0qoEunm79&eITRkdS~v$7)^s=1pSy(! z542?i$;KL$CuwE-K*7cc;%K3bJpeWmY{pgEOYGDrUkVkOXAPnNUXgwruK#PMOp!k7uf9!n$BoGvW0Nt9mBPH8UQX36so> zhi1a>GpsZdewE?8Bn+e>cKwX4Axh4my}U(3lv`|zeQX}j1*OKc3@d;5sr|#?3@b0= zv-6Y_1U7)HVA!QC;@?6OMEwkNR#=#2Ue3v)mM%R(O;>V)nzHbOv-dnf_?zB$L?NM% zibDEYD7b!b#$<3=PurMVr{NfCT$`C$3I=n3n#t@3;|J!%6 z3f(}jlQEHQG*Fa+pqy%!G6`%yun|{iTR1V3hG@odXBI=Wqse-Mbp`8pg>DfxDwtH% zKi|e!@^KplrsA@Cm`yycd5E(WXbyUJ-(|XE{I}*tUvAU)vicqThDdG}c&yJ<)_kTc+I3nt4RC-NYZ<>_kjST3I0!`p(3MDAkpQzrLn z$Tc97f(7|cCKoUn#AKv~oCwl;6rTSxIh4s}Oper$kAf^6E6A=)x-)rt2$3Ed@?s96 z58mFsK;((Xi2Rnx>|<(j3&{A9g51O8dM2ka`Gtmj7vutXyO7EMW3mgAlQm=?kcHy~ zIf6+aCT|WVvb~19dlLN#ZyPXq`B5TwGkNuQHF*%E2fWQXPh>okA2FGrAwLB<1K$3F z$=OT}V{)E`3<8-x0`I?=?8jt1CI@QBW*}pq!~U1aDsLh)1`%0XL!QnCIYE#{F_EcE ze$HgtQ8oE3NGsxY8c1(t7$SxrL;B5etEt$OX43TX# zu zj{E^9FXRvY>OhC*Oyr`}McgQ4;jrgKM^sclYxO)uD-qPOYiU^8xo+K@X8 z<)G!f&QbtuNTF8}ClsBAXJ)5P2%-W-UoIpfJcVBhyBjb6riT6r$PaVX_CneV#wnC| zeD!xIa~>DUPYdbk>QpcJ&sQ702GFK;x`M=}^%;=OdkQkBkVb*87mb3y`rlV>#2bK! z1ksku`m+_4b)-L)RjFtMl$CWHPw)rf`DX!@mDGyLiq@3%ErxcP|%L?YQ+G)z_1tJ~9FfMDJB#MpBiZbwgp%w@$Mt2h?hBHx-q9!uZ1rgp3 z&;QR5(Ugh98scjZ=^(Z;QQVS<^%~+W5WSxe#D`4$%*0d;(H}%Ch%rnoV8UNR)CH0A zq##-`@f;IXhtx#wG3*lBWB-4e2rnkmHN^KIHi7u%G|k7C70);D_WXS)gF9gV%eIqR zknNco+oM2ufSAa{N+t$rh!!ARQR5CwOk%=ALtOkFdlV42^NHxl#F>L?VmpZSh^K>0 zls6}0vxZm*BDAX@K4an#6Z14g5QtO|uQCzC#BdGK7KC3nL3CkaG80WTgmD!00P$BI z5q?Y*E551*VmAmYh$Bo?HY4I^4e<$x-u{C4oQX6h7HEhUK*WN0orw)hJf|VrfpG01 zh#q+qf#J>65qKvJyXP)={?E1tu5pxOW8apgIYnArnKGC|7*l4a6}JWgrT2 ziD<;cAr0{*h)KS9|HnjO6Cz?X#2X-zLHwPGe={*zL-YaR`xNefn3&ImpN4n@L|A7* zG-F~U6P5eaMAl(E!UAzVhlu7(q-lt6LHHxCwlQ(RlZXu(;%yLLfcS`s1SY~YL;#2w z)O;)xA2ZQYL(~IN0irb%flRn)h`iqf;e$TEa?zZv%XNqPpjCrQ1{iGea5 zgCyRylvHI2xtsT!~5fUpb}#8M_+W5QoU z3_!IRekg1DYV#8D=`(h%_=%KPK} zR~C8okA~z`gvP56KtzF9nMIqz_xGwdg~3p!Viz8kMUggMp@hwjP)@?uyKfdv{Y|77 zImU%<&R$>%C_jXUHoXPm$~7(WpqeHq2-Nfth|NO;k)27zRwh2y5HTPs(3){fe8@zg zhL{YZc^^T%!^9XS+Gq$r5T4HpVjvT(n6PwJ6O{)s5e5jNex~|qw~?rL+Ku;a2Y~uv z$CHu4^K=91><<-%p<|*!c=Z*;*G$Z3VvL4(1w=6_^cE8%nP{aUo&w=NP!Rnys0n5? za9;OO_4~=VzwaoNjafNVqs)ae3M=cS6Qs=9t^O|PPf#vJ=>5vd4fUx5-tbd*!22K~ z(IzXH2xp>?h8P86^ib@7ndr&HBO0Owh%eA49hh)oBCCsz)t{JH#>7Mo@i!1I@alag;_IqM3brPK5Wk~NBZP& zGv9)Ar(aBao?}INgI%8(DALR9>13oYPceDVAdzloPtQmCCA&T!=|A7G`-Ak&Ff-k6 zoJe=H>xUq{%x*6R>4SE?1?fM3H1%ajzhu`B886an?E1|}AGYfgkUllr%y$VE=_dAc z1kxMq`ZSrIYv#{H`ur?2?K?rFr`z@ZNYAs|i$r?6U7wG1U%S2->5+E5|MMcf$gZFN zf=Jgf^-s91Ywc3a5B{zd|KiCvCm!*H7JqJ;Z)IK%Ep@p)~wiF4nF()g(>eg`psBZJO&_*|RY z5r0meR7YK3TbT*WB=R{W!_HPz!y;p8&pMutzGFWa7e(zBr^s_zO?e4?UQn0Xak)4L z*QK(O6kKrhgWL}0dNT)Y*l~LwC)ZW`U5o8PYqJjqwn{aO8#B-5JRfnoigX*^;S#JRV_TvDi*OFR>ZaqZ=tSG=9_+CP-n9CiGE z${Vi8OV~zvHh)4qXnqRwu=^6OC@(nSK`u(@(I=b4*+cyLir=yZb{mn3vWEztk#3pm zF#mZ}-^L$1XiIC*hB#=w8vbwn5z>(QBV3##8`}Nfr0_r4wHEcEqJ4baYIzQEC}(y3 zT2AYwqYwCv73JAtIKCmRy-5%GZL8Z2hq{$lQ#WWgJ7|3xRj(bJZ`TI?mzHyZj-p8S zc;_Jki!uvQq6RX&BoH7&2oORD9psTfAj3@lw660Z?a$GhA$tGJFU3b-ia ziVI5AVL`AmM%go%%vQs=HdwJ(qL-=d13n`hHbkSAW%y zAvSd%sA1RMBQ@%^S0-qq)M*bJw(%&&Ja{k_Soy=v}MZ0TP0 zKEc^h++XE>$S0_y_+P#!?(#aW*wuU3y0w<<+jRUrvgb504%+xm+oQSvv#EQRrfyq@ zxb8F6EEdbF)pM62Xrt6ouJ5Hbew%i)F`n7Y*6^5WW|i0|wjJ>lUo|$qig9-tZhfK zU20Sxn{0f%vYVa5w}}4*_3JH7KbmcPd{wQ^ne0yDx;O0BUq{_{t8p&2X}fK&X8pIR z`?98P*rx7jP2KZ?y2`)S9G}@azU4mBxK3&3L-&2^d?@HFzAlu%cDY7S=l_z^91)h& zx<>ftEu-x~?XNNM(;MxT4yJ4TbfS8lP3mW%-t{Pt*VIomsaI&xuclh{yev!jb=Tn9 zAJmwK1Z`ULa^iJ1YuWeuyfE#1mXBwVy^D_DL-u4Ho7d;-)PCd%7_E6MVABt09rK@B z$F9@Cb^1J(Q>W&{MjKxbkS`zkC^z_ej*j2+gMRD_e^9U0gzLqO5BJgY^gibEE3)70 zQ}c@3#z*CT=A&vqYrBhV?S9qAG8-QY_N#NK#-{Gowd~v_)G}^DtvcTLF5B(qN*$$- z=WYBx8ewDp6WM3Uu8OdC21?%*yC%Zk87SL|t)RS>t(jxlH;7rc(HvV)%jWW9G}ji? zs&l&7#>cb!R9=Nm-PeitE^*$~%;_4Nwl8YNm2jiD?sm=mlx^y+*3>PrsrzM}T6cv_ z-5LAzb9l}^b*z(+bhXF2bBuyw2bor>+eXic?b08Z`uKU4tkKbJJvJ% z*8?mEt*8B!2UrfOqwQbF{`UcW4tnwci`#z=GQ0Pn8n2T!eSU%3ysT-H($zi(`2=-Z zu8Dd!=Cx%1N%rGP#t)RB+3~k$Hn7k+XR^N6PwXHX_O)+WfF|>6gv$oD;{kx|; zlePgv+k#~F-b>rA^;j_-D@|69m6-Sp8sZS3{sayE@f+GI8xizJ+5U4>Jy%u9;`7@5 znCAMosq4C+x&EEjb!E-=8)MjgvZ6RT?sl^0r12-&94{oR{nxq{mO9xxERTU_D2mNf#8PlC{T;0W-y~BATX;@NPsX{9=5trUye{a;IGH`goQ@-Tj-NP96t6RaxNRa%z=o5Y z!*iO6)05(rDu`QBFUD!xOUy|d#dAEp7-t6gpCjNbA&%FEvnZG6Y$MJF^8dbo(@2~e z8_r}oPWO&}rK~=Ouj)-Xzc=rrRk5u;d$T;>@~lbwT)kO;pD^jC@t?L2OP9wvjQ%Uh zSIkB8F)Q4L-fYafO!}+!zwkref3KUF|4(E1b)nUN-p$N^#?9v6BXXs9_4f}qi`Sxi zGIt*l-W*biXe3R$DfsE7f6!YKZl;HV~IG%xG&W#gHp8trGOaAi(oLb^+ zv*CQ~HhKO_W1RUk&+Zk>w=rq-{Ff%??3iNm{70PSCQh*pr*M+V^B-}l$^U)hW~Ebnm;#@|A3AEt^?ROw7EVyDE{)gk(o~C zH{JZ6ePfjM;ln8Fr$qhyOtY=!J(8whqkfmh)+ps$Y*jj2qt=+PjO&q?>b887qJ5rp z7MG*cr&ESLX{}k`_CCO$)AQtW>RIOV_Oq(bR@-_`or77dIt&)a{DV@SGjA~CIEj-Y z;FJ@m)P^%+p+44q7OJt%%wX|zWZ1{Lb}%37D(desnuEWbxw2SSm-8_w&R{;{&oiGL z&RfS|X$FfynMwb(oWRpOCrq3diSu^>=Ol3=Hk`wM=Q)`}=zY*7#_4*=nlpO{wQ{U=M`ljW)xSHo=4`rOkE;7y& z7p*y-p^Q^7RLr@1GtXH=oYWY~!7=fcoNdI}WW!m#k>_+6#{54oG5eBa&2bH5$C8Gb zpVK-zrw29Xv~-yDIc>RTB5oH+{hjAJK^yIOI^55%CABo3O2JwpWzu^OlW}PMFUa86 zlI%=%Em=Z-3Np>(@l+G*!*fmgT=KUj^R;BrP| z%U0^sj;1TiZ&M%T=T6>m=JU~W%;%rZSzik!!&$6~hTFWGdBo)Thd2eqnJ(b$B2LJL zv*clYti2Dbv0jqJ;x{|XKGrE&e5@0*SbuM%{`NR`WwFj!YV!Vrd=^uV_X=`+O%}cX zFzLURljt{j|1pAbE)u7WAdku;=>5kCo3&(($@>rDJVZI;alu-$jyTI~I4?YJ^8SN3 zU(>o!Ef|j@o8EueaJIZ+^8O>6zRA$Ka73^!lo4lMwwSZ@1D^8{ah{=dVYy&kh!CgB zhBN9Np3`e2^M8i&bButKH)@p>G~XUC|%7V+j3Z52;|u3kdhoehs@5Q_jc5$xmTF4J1sK# z{zX0;=$Q!+ATvbK*zS zw^Pd3?+fyE%4o((7%k>3exK)*5N8hM>-mB_>L<=(8_s>3OrC#;^9AMWuLbw<&BUoU z;V5laA7k%%K0Kzzq%{|3-y*)BpGa-*KBkZ7AC9TN2Q9PlvFAARaq2kF|H!88|4`dk zj_dgq$2I@|!^TH8`6x2sXO9uLZJ_bT8~7bH{4yIK!E@^Gl0LGj+at<&{i5nKSA$KP z>C|SnrcKgVahtuGKDlk`&bX-7T`Z_8YqtBh%jABG(2Y><26|DrV#J8Mm=wCqd70{93RiTJeB7sUQwPo???mVooLYW&NZ0x zzCO+KHfwlNK7HS@;Eg2SSOag81@G3AJTE0*<#~yhnQzW}=10a`{-d6^`bTqKU6kjo z(C{L}TW7)h{t)9GKBVV09Wv)F@66Aawi8sIdjjJnPB7=ad6e&l&RB-z@Uk!t;DQ6DMibze%+IO)}^8rg#o8 z#B+#6JkK`symAdMOuQ8qyay?tJ!Z&fPgvx$$4{B8e;S@VnbyC_=Dh8c&vqK}*&d60 z*7j$fS3Fs>{t>Utf_D?~dK!4C7QBBYn5=&qp0kkFKMUTEG(V3U=I3dP`6;#Kd3lAJ z^^bT(h3359QGU7KkY66M$S+UC^SsR(p5&(W&w}?M<(JP4`QjK_h{zIchfSFRze zY~S%ooa6pa*z<7f@m-qZimeFgk8jf)KZj$=k*k>6&WM*bmBdS)CGk?|k1?0!xh_uf{{!m~9Fo$#EmvLGGL4BlYrU>Uq{sVmBTzGLTJP-Z(FZe|{igEzu5Xwe{mE(%PP5iM|IUhz$UVtx1 zMmY!@hCI|JsEi3^=^gY5+&;6sh9`v!ar@#%pa3bX)OF$5ARL1NP!I}1VJHGc zAt@d8ASdKX*ZYvM?H`P~8K?_6hoBB5LvF|ec_AOn8g7qd>`8$`AvV~I5qeUhPK$P2llBB&C_BIrvw+IvwBqg;lv?7{dUX)eZ#Yt(~lG=yu^ zgE2ej<2u51=_x^6KEXERf@H`Ic_6PM9QQ$fC;$ba5EO1)v}l zqHU=e`(`08%s?MtL(7m?x1bK>z8c%x!E75N6(L5^Evmw(>k3IN^}`d|NMV$tB>7YL zfI=F{?;%Haj=8MRzm(f~jFS|=zTisiUj@IPp-+&b5`BamTj39iLY~iY?0V##*Wl+L z$i-cd+h8M5;L%t~UI#yrwgnB91=SKW}kkKmVB}}%pEPaW7kV4zd75EF*nWGrfF^mUg53C}u(hlP+ z8q`Lb2Ngr%cBlhM?GexQu`M}0h&ka#e*?eA{PHSm4aS1@auc*uYEh&=#yJ3EfIsg* zu+nfWANBIt{A!g9pKi#f_=dtz%dzc}uAZXE1oUAdVg@-1&}aCJp1}4QY(Inc)6pJ^ zK++6sgB5|~_NoG3NtYFZA~*+%iVN!a$zrt9e{^b^-h0NW0Xqr+h28swFvamXDgyJ5Xh63QpB zzdy=u#9wy9|6H_%g7BeT^F39#9>8gpze*o3&koEpD6~U)jzwF@t6YN%F?S(HDf(QE zH5~FoK`0DGAxBND z&D7c%ImAHhJ zV~|vh7*(rQVYp%C8XVul%8`+{_H!_&A*YO3*OTkY~Ih5}vh7BI7m*(t+o0hpXG@%j45C z4<(D^a~v@|0!VXNAVLs>gpS0B;$G;)vCbSrJftgW5#5csxVz^&2+#U=$%{zDzsEY@ zeah}ZKE%Sm4r9r)jpmq#=W}hYMf6=v3;&*%wt*j9+D7*=7t>!+TVIh-OI?g}1+gv* zxkh|H`WcZQQ3v>cOdla(z%}UK#ELv8*h=D6hnSmoF^_LUsk(mhZ5=-uiUWM!fWQp_pgQ zO1dC~Ap2fEBLp$mY_EF`^EojU{`(mJ#PEo@L7XS{V9p_KUveI{+4o=FCUEYu>_e^! z=yyt#POx^~Ky0ncOP1$2f7fWA^V=U0lz%Tgh zsoM=(&vPJ>{mAi{fQj_`B=S!tF8RcdbwefQ`MUZd|KI4pWYLIAmPq>eDfHP?>M)Ji z)9I5L)a9UD5zpt4nI^?H@{n91tW%OWc3C9lCvyC0JZ@f zyS$C#TnxuU_FS+)3qCqlRAkt`!iW zE=r`Vqy?1s4gTc1r6RsMb0zEQZ8exzgP31y`bj`-=0UcJYy(LXNUV6~F;87$Jb93m z%(L&GL>gpYWUdI}bM#-F_vA(33EB~mZ zE$OrKy&{%v0;DAKc2m|lo>4*-$C^F9CC{){d}coeN$tqPHt{U|vY9p9Sn9Nc z=NGX;B1i}S|AN z;*p3X1T>+Ih(;{pA$%8!nMb|mUHZ=C`MwlD*&)P=4JHjD&lCRweT0w;p7+;m`JNL< zx&Wb15F1ZB5U`xJ9zs^r9we=04Y!W9-zv^+po||lh9LGs=mXh8K1KZ?`qFo!$a;*w z`rLmAL=ZyiUwSVQgGA5g2J9n8`k zSipAb0tulUL?f2tF-PeSMDu+nZVYW7#g@+rNJ92M$crF^AQ~}9ByF5$izH+}Mmofh zU#MRs=Y#kh={bgYgwWnN%B>J%H193J&2I=HM`D z4Ye`|r|{gTRz~9&R2ydHBOFJQ;Z`PKKdO%)FV3UvNGmh&z-LxkVLUQ_ZlxiH;XJC1 zvhp$h!KD9Wgw2C%mmtoB-ET}Weom9^+{G{LMB_OhuN@GtTe)WWSnXx7)z0Rnw2Q5 zK;G$Ax?&CdXHYhN#tGz_Y2|5DLj(q4I#yy2{zlF(X&)+~DY{`OW?>!nBiC1?L1l!X zJ7yvgx#O(VKp%XEQz$meN^6Y97G(XJ_~?Oeun(DLTX_ae(FbENA3wr>4)w)SWS>jB zF$-#*mD-qqlc+eK>x4Pjh$G0jfcz+pT4;m!FcPz|5eFd)tvrHiXpcddjV<^W`MtojW_+M0W@0POq3{ygjd1kG8f5&AenelafQ=^>dSNN_ zQY+=r3(Jsg8U2PWD7)Os`&f+s@Y45O3oOKGlwM(_Gv?zTlwN723%*CGRn!+J@$72S zVLnddg*C*%BK(J!*IJ3jGT3#r0iWWY^;~~!M3xO!s-iEJLw+D1x??_0qTEKV8~hTi zyp1>QSZRzNn1l^DiOj!Pc^EIEAGV-KBK?6SxOXr20H)zAs_tV<;}Fz-D-WSGY9Smw zFbt>A;DD8R$nz`xjy=fv8}k`@U=9w${~&e4U~GUp#I-^fe1(H3beMLcBSvEt{zB#> zR*E7JF-XKSztiW~j7N?V2dk0)4=e4k1UZgbc@h;7igz&*v#|l0{-m$b84Ix&Ig%Kg zXpi0)j``S%e^KWz;$Sv*BI|K00jQ787>Nbgihn%kPH^Ah0Lq=@`GX;thaKqrH)Wop zpKuoC{~V1rLw_tsiGQt(MfNk?gE)$2XRWNqOaF1-BmX(Z8qT5EdFF2P!I#*C z6Ucpm`lGfuIf)_4NlESGXS{EmJYt<}hILNH;CVkMThS?llbjiy%ts>3Jx+?CB069Q zwjg&VCyg)y+wfp!C$Hmk>_MR{PFi6aR^s8Tl#AWCfV}rQX@RjggreD;yp1>27lweJWgJ~>*$IhSb`(S zbH9^U5rr|>g?tY!J0Z!qm2Z@CkEXHBv$nT^U24EeIBi}<#%Ah6s zU>QsSClxRN3CK~9cz6{d=#F?8fBGDKFdZ9l68Q=_sfqWn04EUeFnx!w@i(3=%yq;9 zoJQ#)PP$+|PUEGbPTs*{oJYAwoba@;y)Apef$L zD6BvtE}*~*j5Tyc91?H}0WVS(zQ7LTdC5r-+F<~uVjmtU!b$y;%ht_NL`StoRdoEh8g$;t~}QS6)I2$dRBDe|0>r3%_~uFjIB()tB@Y= zSEUcDIr$c^zvko&MpozkuR)#AttR(YEhh`nEXawfO&_6Z9VdU_le&zrdejrq^|@~w zI9ZFxhP1nplcjjGF>^>0_7U+qMj|6aYSsngx(_H<2kRrk!??mqY16T3Ug-r}w9x3L>ZIw4ZY z-Wqe;tF_QfTdPowf0*mzag+~z1jiD7ur<`voXNA*JNgWLCC+bTjrOq-#+74cwzf7n z>d;XCn*5W`15_KFaR`mE z7TcuwV|ggX7m(GiqN-2FgQ$;a%UeqTHki!;di zwJ^g9&V04@kd2rONjaEI&AB)vH82p}yEsOuS?U9ha8`dGwXQ%sqd$H(rLgUB8*BE} z!_2A&Y;4c6r-T`~a%HhXH<8*=PK#Izpg7PN=?al{Lp5r#YV8fu7)ub+*lCe|tjL>;~x{7UMf7p+3Di!ha< z2K<%Xp0@^m8USR1E0*_xy@z)A{IhMyEcr1}k7IS)qq?Mx6-g2I32I&dX#IK7vFi(lziJhJ(}za;D6?Q!y~$`et;12Hq!BD zgle4%X?dPGXZUa)7>yZu2Vw`0Tb27g!vl%7kJ7MZ5AgU zp|IlT>7#(5X@qoSz|Mq6fj_j%qRZ9$*bhwLlF-Bx3F=ODtik;(HYZIY+UAe6weHmj z%rvD9N&YhVh_K*@PM+qlh@47^ng8vW^pFy`eRJZ|05We6_c}F;h!q(%;*`ZFswdrN zi6@i)o!gmB~l!2!GX9C)@7B8&#VML^g}Oy888 zras<6L+S-#LvB;~%m)Tb}U_~?kHauD@54!OmNDGFG(N1X;&?yH{kW7;J0Kx zfe@0PAC}*3O1PVPwDU?J?@A0QhhM!s;;RItu?QVcsMrmQt^ntP5Nd-cR7ZgMgD{+^ zJ{-9|j;R62l0Iql4t+)tZIA0f{_mhNbAQ#7NOMnr^@#x?nn6wLA%crxLV1Xf18+1T zMxKadh3|v(c$G5<__O%4Cn0`V!Luo$*eOvWF9hees82gYvU@yCuS(?Md1ShRDh)#1 zP9nAg`e@{PQWIi4ONOw(T|IGP{?SnD7D9+yEXf@aWdWlYHc%?bWa`Gf;)ER}=|*YY4!qq2?j-r#>Ext*6aL2CMJB|V9dc6APLIRh<#mvm9GJL?j7jDs#NR0jQjL`>N>E9m zmr^aJ+?Gkfmr^#8iCaqHmr_3#bg}#mo)gEBLUq$glVrA_>qQ~SofFcOr2ZMdG$*?C z*Un98E3Royc1?NSh@4Dc$u*`(0KCJujK2yCnUqfV&joDfP#J z@T>US_%M^eCk2?v-vNeugd}1nWxhlMCT-CX@mFcSWCbS8p%L}4MEkp-$mE|)3gn~4 zsU%7!Y0}BqOiEn|)nu}!qsvTMMl9E9#8w9;x<$O>vQ5gZ<1g!kfn#*5v>peT5(%Aka!=zRuPWa0 z0S@8_5WU6dqIDwRv;1l;uhd%UawuAxcon3fx5#6{pLNl`S4S+yy&JbQMh&#?R1IR3XY7 z$~ww&sLIq-eyM1vD5$8gKBkpbS(c=gCBY~*s^F`rEh;^#fGx_iDn_ZGKDJqwrIj90 zsRxzFFA6m(S1PAiR<4z>E4R2Sbr!Cb0n0kWTL|W$9;CTME}@ z&kOHmA>pdLD)f(jXO(XyU1iyC@?HgR*kl3PsBqEi1xR_7^ZMJJNE<%Oou8((;PREG*m7a!t$V7uBd_Cl&R|S1qH{%8yE4 zE&Vi0x)dT7nk{QJ^SYF7ElV|vlFF|aVHy=H70VZK8dWQm&KDsc1J+7V7Gf;p*D9Jy zvKOW;%h!sU%JD6W*9w8i(V z_8$!hGryc9J|gJ~vlpjN=ib~t{Jj;r3&u~9AF10a5Ec%d+;jxh7OKxA+v=+F%Fis{ zSao!kXIPzS@e8cZx!S6#iqFp>+j4CRaL>u#oHv!(=O>+O5N31Rf^15)P8l{en`doL z>^_RTrJ5HnojmY!NgvnS0wVr3a zocTX;KgFMy3C}^^eB5&g3XC2o+-nH(LeAtK8F;m0W=J2+-W+))W0s5_)bY!QPw5|A zc?IPb;vbB8MdTLeADnoFrx*0js+}^j3cHu;AMtqArswY;K|AGr@-a_s9}PSI_!RFy zDt1cKFK?ftyBF#fs-3gCSLzlopToaFbt+Jt5I&N3YS_;5pJqR*cFNc;b3e#-N?k8` zJkWC~Tra*qczX$amHEs=JraAV6V7&@=f5F(Nq?2bKj^)+@a4*#bw5&m#PC&qEt)-O zd}Q+#PoKj|e~g(MMY2x4q{5Nsw+qX3+~&(q=v{CC=H;o#-xZ%8Xei+2fEGin1rY99a*62OQysfVJnUCSYKCCNR>ct4V91nvbR475SH%>#`4YS@-$Q`GJ@++nrFe4S1}zV^VH6g$} zGLRxSq0c-3$C{mHVA_IhAeuHkacr7^4H!zPC0~kU)=RlH^-nb?PaQQ4Pqi#cJv9wZ z{nnhnC89?(J*bN4Vs3%P7S0Nnx<;lbg@%UNY;;4uvtxL z)=yQi<4z!&=1VZ}r^%SsuNrzY@uydsrmxxnQ!-7HR{>8cdeV8TR!^xPN4~F%|4b3m zgpX=C4ZYdLCJB#HUgybDyC>`&`!ZO3(v^+~9oPO@O{LTy@jFgsuw%O3P?uNME^(ck zF|R~E5u}frHfdO^dQ?xE)~~-?Bc#tC)wKc$Ji5Jljv8Mt0@%8}Mlfn1D!XZo37did z$XW}0hJ=kVtHYP5PZIA5w5Ad3#Q2=JDh^Jp#LtWN-wI+ZZ zTC?$Wn5I0hMb)}Q0J_$qYJK(USyOPOh1DvN*7{PdGk{lXo2>>OK)SxdR$IMZ)RYRa z2>^t`H_}>5t&at-E;U7Y^#`xBUurc~0?gLep>Zv?oA9m0a80;Zrx<6JBYQE~hE{SlB_-g|$k+>JUEkjm!fr0oI z{Po%E3cv<@qv17m?kR7B;q{-uyi8+?bxL4arfKmyHn8Yp&f8@Fg66$36L7rJ3e3v1 z#asz`!fhY%w!&Qh^~8;9o4jTMZ2j1(s-eE5YG13WwYubL-?FLUUx{Mt{WND^odiaF zjDA`-uK|GVn+7gxI>6>lBcIwZVD+Zi^E%j*+Iu>$IpS*nQ|QMgL8H%#(UV-d@mJmO z?b&Aq9S{~@UN^3K%YJ~+7(=9aG(dtlR9%;6-UBP2s zZ8_PtHdSR>U6W`Rc&t0y;p-%vA8@tPRh6F~wY9r!v|sT)cb)I%>BudQd+oTl^Ho)! zAM-g8wtik2vG;sT!#f3NN4^ih1BEx_pG$F%g4>a83f?EYu0&gBZ6a~c4L3QSgK)3O zTf%JO(l3fyN^R28uS{D~ucV$6BOL2DmTj8S_mA4#Y`U~|FxqT4NS-UTE|OZVwL7%V zl3EY1JevC|oys>Xn};f$&o|tgdp~yA+Q6Ul*ALj*X|E`r>(|ekS}$!v)=z=0n>I1; zgTLB3ZL-&|5nA*%ZLe~;4-nc(t3)>;(fuV8V193v0kkN!n#Ao6Yf7ea0o;&~>~J(% zWKUQZhB|YKrV;DcLu@;GG8gwFLD+BuT`?vx{MVz23)Vr-?lWYTl(9vf|I0TD~ zHHP}bC8~aY%b(4bt!lUs>@;d!f_9IgNxPR?V&F;8aMlj-byOad<#+HVDzYJU{>Yo)E>MpUK8V$ z_ZSar_kxG72agwrEwn=1!Od_8F(_1YD}t&qv{dabUyjyiyeMHCyZ6vgTV znR?gCgl6qhl{1V0x<|`RlKACtx9McbMY^N5$3z;%0${5Z)b!SzyktPH&9G^3_rP83OgW(Y!71S{2L38~Q!KtoyJNh+B)}HtIUg=yA`s!SoJj z*kw!J>WYLQ)Y=vHT{j;FR4i$UHo#b|rdH3QpF=I1&oWZ6gkNVZ!wsKz3#r0CFlvPd zCSKEYHCt1nKrVOIgL+Op<89ZVk4D&e*tEN{wc(Dl)_=^t+A;{p}nd!5p!BxGN^TnkC4* z5l?{}h?-E*Jgn99`ifJOTfDA#^$jIh(WQv{nU|eQQQZSfHj{pu-c9iqybB;&dKjfD z#_id3^nYm_(>mqqjm>nAd1}uwUyF3SPGG-I`oer|;6aMYD;owD!7O76y}p6kwC5Phpl$z0j> z&wB)6Uz;d|YN+{kol*#b!KD_k-3dF6*>%Xsl9hV*GGqy5%bS*V_wjiOh40H%gMX2R zJriGX+99I-_o6gk2TZ3#*LrnqYwVPia>`G@J%?j$4|056 zi2@8d`16Z}zCu8)yU_qk)h{L3``qzbf2$qD7W{M1PnAq>M3 zU&uYwwQBUcw3{6;ed0Q zC0p4Zcqb2oJWCVK#jgczq#a-7o+S(GjvfepiR4lHl&hUJeWg%8R<%#^H{MZ>RQiaw zTckXg%ek{q=xD|hs8R6J$-ZghZu_*z0;IRE5Qn=4ZW?))MJ2pGyHctSoNd7+A=FNg z&^P!hr0mLTnhX?{iz>4>#2%~vbh@oM99B!Dj!{P@G)iAu#7dUo9=|EaZE#!`#}({O zqVjmO`_L!jC*sZ_!hfXJ z$bTfmH`)5r3vtH;=bqPhIc(w?X{)GiJ@oGp^)7(xp4C4Fb?q|f{*t2m%jo*t=zyc@ zU-;heC|{QF3s&~U_mbA|3tGtB=KfXI zhnh`Oay?r)f_2(;dzM zDU;iz@DkC}ckJ898BJxE$%ZCQ(*{yFb>gv@i|f8K&cFVTaa?3Bwi>Ia8-qwP&`F@? z-L10tS`yKS-*c8Ryy&Qs_{*K#cHc%mD}ht&h+=Z=B$s!QfgqWXIWH1c=U66_kf~(* z%|uHk@S1fxa+Q0QXJy!>s`7Hqqg^%d?Wx4rIdoJ>q4_Sm{k%W6hLnY3BXFPRVkM~X}sw`?cM!5Btm|8@Kvw$2rX>gJ#HvILKz^Ue>Hk2Nu4DR`18UPmLkz-4Qfi& zFD;T%3l0Gwda*7IL~C-wVj9xe5-n8+HY-pSEz$-=nmCRV8EI_1jr@CR+8w5SGHPZB zo)*)$z=aq$2*QRKH}l6H7`O2G&KY?AS9IeXzzuSy1p|;Za*>R$A?=KuIismI!Zpk; zSPzhlMd+^#w%IFysnC-fXPxoI;1stp_F{*;vlyqG@nng!Z^g)kMRzh7O$25CnHr4w zc0~MRlzP;DkvP!)2d)!wL=HG`76iH0=3Ds zEeRAh(zz`-)-zW=0OBOo2Jl9t|?mxExu-Am%CK2t?D*eCv-dWSL(_%dhb8=02A~^+y!oTBsqmE^JHVX zoBk*l6D}yL^c6~7zf6{G)IHp_d_ykybXDJiE%auajXU!xGqL9f8d>VsRo_;4doyyK z(c+_K?Cz*oxiXs~&zZC}qJ|b72JdnFPID5@@w!pZ$D_&{P^gR^f}l-%YI_&QB*B!p z%v1}4O-j?l+mg{ByJ$cT<{DM)t8wm_mn%eWKB`Iuqt?+8@gk~dC-2f0PM z;8@E1T(WL|uC)G%Q)LQkIGWq(h%PI#j7qpjd|RxQ-24i!QM7H-P3)^XEVrt%<^d*I z(`H$>HU?1>5c>YgX*iOA7N&#qdVFzSg~8q|u7YgqhWS0R$SnJe>t|58#?-^G?4)6* zPTfVrQSqpcgZE|0$f`$8nq3Y@i6vHpF!@(AdQmEP;f@xdNS;!^J@H-6Vi2}`eQQGd z8Etr(@vAjPlwGXhN;Z8@tT;1Yz1~dXiga%J1g6(yQ0nK4-xsgD@O+=uRB0;CL{AYh z#@C9LYu=aOc7c_mEXPF;MJmziyR#1{mXWAEVDOdir`cRU!Bn(8BL6E z==M@<+`6Mk^G`oOo1aTpMpV7SAg;;+iPN)VRB8DZgN~UL(8V1TdGyMVq6b5cPzB1@ zWL!SGkaMRm4?cND`_w_zDwr!ue0fhQx!aO?(dy5iP~5SSMtHAu){u4~qmHIcW+xA7 zZ@!Z8&X-f7Ra?Za5C4~;He5L*EDHhp<63wR+;zNxU&Z6=b!Cf6IAHso_pm>I+-}W) z|A^DheI?@hd)T1SLr_W*q()UO`Ij(NjbJKH@tDT{ac0m(plD zEM1_hXbTeM9n*H~+6-;b7EJQdO?lPpYh#tmwhx!;8L837~^ zn(p8BwtNhgU&6Rkf3f$BJ1>y-jk~syj7>V{{)0hr{m1SWBw|2Vyt{3Mk%d@I7LpXv zwpltj%E-4iNg#^@aNY3d!8_Yu0nk?d4%GOeP(CH{xV|on`4hg{#DtNqv-waywqJ35 zpBBmoR-9cRz!1(b|hNFjCGo0Y|lr^!hhREG8xtK-9t6k3z- zFA-)w^L8}iN3iBT{jrq-PwZKNNk)_&v&!r$UM7vi=6xKSb`#e}mA{0u`{4g9vX^!O zbTkLEPyJ;@^Cg&&Rkgv_2nRKq5z?I&h8A~=0NhE#dAsBSxP5R8 z{c)l59D%`x4L8#8bNw(r9&~CKL^>!Wk_<;@)gacD&+O-{fSkXkB5>m$nM&;PVZ-kh}=j%L}z^lijb`*O9oUDoM}T8uI{ z%t!aQiJn5;yLaX83B0R*zQa}6>8)|>kU)aZ%$I+|9JFX)q0S^nf&+=Uq0sjK3{eJD z145x|e;6_is1}C8)&2-$+W1H;a^Wy|y{e{3F4m;cxZ09L$sWr^(MeK~ialu+t3J*A zr3606`*{LC^jOUb9kIpe&cMH-8e-Ns!YG-dVF9xxW?_p>Zq_Rf`9O`LWAp?s>=?Q^ z`f5_>)2uSAkdZ^?B4V#eBXO0r<|?_5m(0pWGq|D3wI!%wlgPbhzx3Vl|gkwI@sgIdpKG|ST7bdtq8x_K}6H)_KK3q%t zH=9%NVX?gT&|5-tT*Q!>pxrT@eW@=AXtac@-wFyL!~AkrS)@xTo;Q2PnT0if8SOUw z`Q4FAi?n<_JkXYJ?)zgJOCo$lU;tl0ZBcf>%$NN)6p5C7tc^yX5h>)a&yiSloO_0Q zb6P~^Vt)wz{3$x|4fMA0q(=Z~vd)FqH#|i3X>J(QE&UC&HMk&^G$83HI*VO8aAn03 zrA+!|>kgY2LKwUx%bv7MTnn*UX9jv1^rM>IdaN_+f=^q!f;o$hwl+(*L;QP-J${%p zvGB-C>p7N)f0Z!oVEJ29MQM+f4TAd$tfXsrdlltdQ75|Pk!}TJ&98jw)H=S226~_t zV>`|RI-ZTvsxnYG*2>IWB2HB1=UTAMY8?X3uG8=HBLNEd;3)2g)q3mnXf-@D#8vxB z0beIMLqZqlJ z3fGJ|1Eh)zUpnrKNb71Gr5eLq7)aESH3Sx33HDG<{{F#)^GOJ0`hTSW_A>lyvENwu z_M~W$rmo0$0HUJx9z2`(!gW`G6_dFJx08Buhd+@{2rB*R9ps5glE@jefui|-Zn*#nt^9HaBz+BM+ z%L!dcVykPW!Z>)D3Yk4*a1WFz3S(f`UC*De1jU4zb$MAe-51Z+KZq$YBge!9d&^UB z)0K#w%d>3bvWziG1J;yB4{0eWCBzUO53bZtBF(TdyGE-u5jTvqD?wNqPv=6BH0REI zMou;N^U>NOm=I?+2byWTv!3BpaaDQ3@Fj1ibybtMoUWP}!+o>W92n82BI+qzhl;y~ z_@ky%d(epE+=6_jj1DDFJdbh_G)p(3cU>M4R?T)NYo> zu~$WFeoyV>QEc9*(qOL|Y1bMXHzyj7j@Zx-#>Mg;pi2$K2)COedViPLK{2Exrx?^3 zi1`r!+$==V*212DtQ3a1P3lLJ>WJ>c!6_q@_u&*qBmDQ4ydrZ3%RaFLVM{>xEW#i# zZueg7Gx8U|INXpV-3Q2YEbRDXU$`=<-qp{5u?qRQ20*$?5RK1&ecB}3{y z0@eenE{s43+6tI&u=yx=uM0!|Jj%X)y;H8==z+pa!kuL7h_kM%aV;X!uE6NEtgUGg zCDCTE2+EP5mgdL;`AK*M6V%=pxR0HDv%B=e?;!AQoYL$b^!lDU1 z$|X@hR&#f+2l5UXG>!tPk!iXB`<@5dK4?d%U{sukNtuQv0yp;4uo`A(O!ZJKhfH|Pkrl#~ zi>6Un$IxK_3#3F>h>}yOAj2&A0*QX5E5q&(StSyO#V-)K1_KGT-&={`JhP(Yl?ok= z3|k|f)NNpPHrwHtFx0F4L5bF-HE}vL6~kurAyu|i`p%agIu@aQdv0u}E;MPQJuk|>L8Mv_ zXr_ku>Z+q_Pj=ws34u~`0w7!X8NJt!N2)CbUI{Hkf?)tBfmF-!h=;LYInL#B?1FKv zWDhO;E>~|1HYQh&8(Yn4FaMEXU(=L}&3)i$;e<#Z8)#$2xg~&=AdIY4`3I%0GRWnY zBVl*46>>92swg;^qX6gOU840c_KzcOf$>q27|JBzFB4_6wxym8R9j;qkoiJ`Z6vF( zc^zXAd80~t0C}@i8WLqA?>|U~1Gy~07l~1#Ch)6=pAx=1RYvg(SujL_L>tR}Uk4M;%XP#cF+ZP6s8a$D-BUgn73#N>tkj29=xh zlKP8~_Zl}5<5#TyuS_;1P7FscD5fWDxNx@JhpNQ-9wO;d$j`*1rKgS-L%pdGbR1aQ zQ^#5C2TPTyd{9J%-dZUc2^qU=b{2{t!vIy)H!_6P0z-CTjFM(mK+woS_cA zHIoVQKO^3^5#MTuD%wuOro6(>3cRXW3>X^C>5eWXFodK^1A-luuMWzDBVG$`9BDp? zQgc(8!r1g}Jo=$VgRGIS-AhK}DfJZNLi(OH6q*$K?FZ6tpiztuqn-dA3#+`_Ai*yrtnxDd!$TQSn8!m2tNvV7k`L@|jB)0-+}3(8`SHHv zYNgjGpav|U)wnhfe=Zz2tbcirnW7$2XggdJgU8O5tMXEgp5;1NQ|rX4WUun7c%5zA zTN@rDZ2fMz@(Y=3_rF3e)c;ww*QPC(m36TFg0yr~Oz~vh^D8p-ZWs0_F8a_1xAHH-+Lu)MXM<{4V_@TOd5NVYj6 zHil^yWcxHq#}?z%w4;!C&!v^|c<&34(*@^mG)q#Y=OoGPnWqbG31#_?f9=l>6dcSBkei;2>23+K6L1W3U?XPOn(rGO^Z4N*X6$d+@z{0EcWGaI3fKzU zXHX^n`H5@zmsVjUiJ_o@JUonYUqDQM5)F!bkDM{h%ZuQSk0k_|!!RrW-%II-u~;{R zx#62}40)9t!64Uf43~j3_Pj<&O!sfS6EqtE+&i9w$ej+082kn|AF&tGZF47Cx=6zUPP!&zerv zzDjTzW*|+s`QE>G%r9i~v8r>)6R+pVV0d4Q6~OUh4-64Xz_Wq)N3HC&jvNxN5%=bS z&BsEi6uiqm3$IJSbb5(HEhUA+Z~;wDU9AP|4WiPs_CrKc+Vdj_q}d!o$9>^vRK-tAMA627%6B^4ga4* z|LVYxSWfkVlvN7S80_Ro`~P2lJ8UtXv--wUC{RN*)67m!!RL1-%stHX`YJ_@PqR-3_s zX^3nZujn@Pcz!d`e!LYdTCYnwsLS`goVajzPV;n3sKku(!@oNCqU-bXxc#m1Kj=IZ zt587dlS8e91CiA!S|F?PUpsVe2i#jy3~AAoDk28a z#$VASZ^7eX${rO37j+F!ybxd$I9lg?hN@f*87*s}4bdA=urt+=2MmF_B1n;oZs+omeGu4G<_TxYW@&$wr*vERK0sFu`LcX83|daIDhGGYAc+E9ws?Low)U- zLlfHddE%Q7A-$w5fdAezNuHD`0TzB6Zi@+QrKJrLmCUHfE8wz54Y=JA8QSf9#+iP%KJ`~dsoUOQqQVMEnKKi%Z>EQfg! zo3btYXd4QQ?pe6H6F%GkF`({k^Sq33u-G(d z2PML!n5z=LJE9}3b=d^$4>DQx_#fb=={%twZX@YI8=NCrux`}EKKKRdT)#++Cq>E< zrJzZL5F-n?YR3-HxQhD@(0KCV4qYb=d2Ht{=%U%(bdT+&xXU^rD)1Mf7e^%f zoso^lant%dBbeK-M5U76qM&6=ykYXtWu(5g0^6Cz6^(^V`KE?KChYqrAY{7Jvp|?! zmg+BbhXcw^pI|*bk|Yifno-1)CzRWa)nCO8`eT(j?Z4CR*bF#o!cuGYIA;<%`qB%w zKXD|lj4VcCjM$whjkkNlO5Id9{1MhKSE+QKFSi4wFu=)v5?Jm{3;AL0p9M8NcH)p9 z-qu^Lx49hhk61^4*vaiDKnbLw+qaUS(zTijE}9a% zF4vt3qr>uUw?w%Tb*_zF#Nxq;RDUr3=nE3y-%TEb^q;1GLP?&cf6GfGsnI){>C_0_ zRBXrQVPg#)CO>oe4MTELK}qmuK~4!YJSU(C98GXq+B+e$$&l;Xb~q5Knh8jWKR#OR z8KSn>Um@Y1Jd%=9WNK|XVF71mShnJ!+N5m65wEDi9iC+gBRX93TW>C*cyL!LOP3@1 zR$KKcXDs4TJC~_M6K^+fzcgSrToEEzDI^jhQM=8+`+~MBnb@@9+`xQT*{iyFZuQlb zyUrR(uwT_hE(xB*diCsb>}pK&Azhr>>0)#8LD7QoMlmcR_8-oWNBO`Yk{dF!XJtdW z1-lnxP5O6q;*lw3Y7pVy+%}UE^IE+5VEkzYl!{`kyNcK1xJAS+Yjq~`>(QV$L%KVn ziq4~omwxv%Sa;%ew5wVxBUnX7Wng}rT8P#D6IsNJ5wnWy&Kmj4FAL&kV!(VzcbZ~h z^M~hxsxr!#xi$YUsG`ynwoin?o1s2-84>mgf`4l;Q?C=tP-jgQf-KtCPq8dgAf`HP z8XDk7MyLm)`Y?)wVVBI^5oC6a7_qTBevH2YdbK7bXt?t ze~G@Q8T(;S;+NW{$1R3X1+Ycu-@t)MMBK588DX$Xudi722X#_6vO*|Nugu!hjer%0 z?#7B_yPh4iV_SlIhw3R~xvw{@BfzQtLF>%Dn= zux!okd1QlWH9w{+N9w+-J+o9__V6~fh;dTbVUTj=jEj?B*}|?2k8S!K*j8ty5pJAWa(elX3-^q@~ADy zt{QXVehAtFX9udPd|-PVK%7@)UOD2UVUMq9`x>x+)gzL`f&??RP44wZb&CEToLdRn zIU1=!ivKH&!QE?wRJpy(mJMs#6o`$uoMBclW_F_yle*bck6uXe+zhAmlYjdhfR(m3 zq$5k@PwV{K^7m5(TTN4vOn#;M5{`|aQh<#bBQ3>}asq^Nf5K#EBdtg_P_|rK^}?@| zn`LI0OV|(0C`Bq@D>$9y&%P`km%@pbNk=zBBfD6n;U*|ZIK-YR^85=y)wdAKLVv3| z`IDh!f1h}HK@c}fxxC+e3ofTeFkgCz_etIsq+KCz4Z0_1q`V3ITcpKE5zJ}jZvahn z`d&vPoJKr*qHO+laSTQg=An0vsF$2J-!^G6iE4|+(_U&Lnk!!gi6^U73Nc1AogM8? z)p{LkSO-?@dl@(?D#T@*WwiIF+Cew7p3pEt&$XXdkN0bJuF+WY+M zd@n8^F8--nmYH+j{qv^RY%Fm)2*ROE>08p7~)rNWbCl(tBrNHb!z`TnSV$A>u-+KC5|4fDK4M;D2z!6m(w;Ps4S|{OQokhfOP0EO2^126y;+cjXuWBZ04(>njrt=G{XDeqBi>YJk>X*U+5bzVAkKdQ zU6ijtqqTD26nz3`QJ4QF^~TO!2M#&!W>7D-NXA{ZJl{NgQkLQkKg%w4uoXPD6S0V^qWEEwt@sj7i~Hfqov|)7tGQJ*A}BZYdIh91{`!UE0htg zq}Wt%cmCNv4-m+g;fzgVc9N2i0t*tkq78jBd_Xeu{fDB%xM{$@_H*cQx|VwxZ)*Tk z?&pU)hHh`?(+V(ywfZOb{m(mc`{8A|q=rq0sXl6R0;H3B_i&TZ^zgyc=c*aGmEp(N z{MRndZoP@)%J4AUe`7Aj6|*d!7yu4Mi2Y5J{o8pIT#>LEJ23?kxQ!DvKGH*S+Td_n zg5m+{FJ!O38ByFQK?dh8(bsj%LzS_vF^A{C2pzF1n`VxGSvJl8{O=#QZNqLJxaGp^ zoa@l#0ns*&<3W#uI$65Ms%X)=$BJl81T66nzT7?!=Ym_Y zgxZISlofn4P4o|`FCEDn4&*T=Xa9Q~?FvFGA<+r1_}k(4Jrh}56AHp2krQS3+kyAV zL5j^}3%q~?9uxCG?BXJ|Ate!I1li&BOljtT8#w#2t{k5YTeMF97HmLG>U82m_QpOd>qL zoA_!%!LOfvh(tVKyVn9Qkb%QQp727bLJ)nJ#Ga6Sok-p=ub+&8-whz_z9ZdiOKFMC zxDfGx3vqya&kk4Cppb#-NT1Ms_efcRH^W+J=do5|Gg>$atE}!{ z+XVUDxHle%Jnv|Mh`VpoL={~2Aq%ADxP5p7n;;qd7qiD^AZ8d@{4l#Lf!hHu8m^|G z+t0ht&n%McAk*havV9-?_H!TP`r5vB{DE>H*Wc3-#IlR;5!1{+=^ZL!Y%iRlD zjvGjykFOoxUKeb)oKQ>jywjNJ=BwAGh0_JZVd!$SH|SvpmGG-m$n;h)&vu|$%zKts z=(ru&nUCYkg(KGqGf(;Z$y9&6R?mCe^{Q=OJ>LxeBkw!F^$BK%_pIN8APXNrC>Y}D zRNY^n>}xard_i$@!LhUXbhsZslavM4hRfG6e^UYOTAsOEuVcP_GP~RPb$#qLHvOKE z{Y-iKo-l1>1+)HaBW{WN!Taxu^WR3}0JmQh3m!KwJ%$%(78_R#JdMXH(R#|skGLWi z7qw<-qhSxyAbo$*-QoJ84W*K>P|n~Pnls8yHdz-0Jp)*&2PA)u{L-Cgr;}woiyzI! zXDjLzL8mpbc$O$zw5V@zIpJAXf+rwHj`?VIx&G!;FLfA&0_s~%0(O} zetS`qUze~akod8U{V>Zake_6&Cm8ph)&~&)`h}O69ZT{PqY2Hz;`g++Db^Dm@K43LZT*fSU6~dfefc}|>gBh7YTVJ6n{ z3rI6!&>Adc>L*O!lM9M5VfykX%tkD%HfhWK584$WR2G>E(3Z#ca_H1R< zSW`R7^p^5(lxYmU{=*T#Pk^b7P+2PO^XqcUOU91RA6^Ng0LHJoPo3!w6b1T{`;1uZ{y9zsTfHwst*>FLy@pA$*XXNOV`*g` z#yOHhC*d|JTiLQ_@U|90e}M3D6q>*P?34~Z(m@TN{}*DJMgPWu86A)M?i}nF4>T43 z{mp}<@^C4k7Y@vl%rDYC(*}$k2Yoif?)&Yl&g*UTmaCl1sF{phe`1H1xyrzsaV9s@;zBK{WTL4Wt~%8qKcw^j{mB#h8szGrBb zO`pxf(;9v{16|lO6oNzgyZG4>RQk3;_YsNyZ~5}>C@dO-(u7hAnNK71?+z{~@^;v) z8b+0aaW>cg{P3!|#{~Q|)Dkwv2|xcYhc4UNwA{zmcPqBszBUD6)g)uew-3%+p85GK zXadkI<1;NVEd0TN1T%K@e^A6g-Y~BL>fx2aMo+cF&!0x&V*vQ^9xlm6qchMH*T5b0 zeEASL-LkBhq;ctOW!gFRaw!rs>m7diuNaH=GpjJ!1)RTekY(C_>Try)IeHC+4i=#Q zff&aM&_6xEJqCdOl24Pu2L69~PryvyXPVTL(8diQXW)%$nC zb>MLRd9*IEB|S0@N6qX1exx4p({F_95lZMK{0wKm(dmF%LAhLr={0fh_bt%Krm`{E z+*C|g6aVGmD>|JvcD(UvmP~yRvao#I=+8!L0e$E2sypv=AED1h>7e0n`djk?%z@@` z%YS(0^3u%0ScQ(gKmRMn-vRQ{<37Al2tCJk+9k&GPaMMg1PDEP{Rp~oFp7BNAT&R8 z73Sp<{6B*(!__>xh?d}f9{!(&Yy9_b4*k!cVZ2{OpMtyff%{u?a3C3Clkfj~?;gyt z0xMe+sj#}(1T+Nph5~NcB6{li?T!^Icd0!e=A(((N#4bb59z+$$;T3!KY(gRw|*C(lA^bW++=6fCP4uISW)B@(h zWz~nYa4BrU8W5iY`h7@Oyj&lIm~ zC7KY`ZoIp4h)8Y{9kw7|4(Qw`4!v|J+A4j2(BL1(?W=Te>1gR#>3HeB(uvZ^(y7w@ zrPHNS=}c*&biPzCHA{1)h0=0qwX|M(r1WU1Q`#zNrGCjS?Uo)dT`%1zJyCkH^i=8T z(le!JOV5>_FTGHDvGh{u<1OHe(mSPhOSf?5P9pRObSy8O zd->ce=iWMZ^W59#Zk_w!-0gGsP9B{+HhFyVzR44lCnrx$-amPIvNU;Sa$@rQWPNgO za$$0L^6|;*lQ$-xn0#{bsmW(1pPhVe@`cG4CtsR;dGeLXS0`Vae1G!Ro_gKlIK+?>_Y2L+?Ly>!A-G`tYGU525Oz>b=#Y)nnD;)%&U^swbHzE*v``bPE5>RZ*D)wiqfRNt+>SAD;FtNKCpcJ;&R zohqsws@+>VT02%dUc0Y$qIR-&s&;?vbgfi7Q=6!ruhnbK+FWg+wp?4St=Ar@JzDG3 zwrX0fU$bkwwa07MYd307)Sj$8ReQSjZtcC=`?XuO4{En-AJ*>Fu=2b?I_+#V%dS;i zlWr~EDt%D8UHY(er-aIf%J-I!mXDQ>m+vc|D4#5^mmeuVTJDs$%38T!w#&Qa$II8t zH_A_xpDe#$zE%F9JXcw$ELR?_bShgFt4`HF=O>yI3lpmok4$tXvl1HI+?;r4;=PGm6SpVsOdL9Q^xW}tC(fNZclz9!bLY=B&n=uA z*3mPyXKT;Zp0B-7d$IOX?d94lwO4De)n2c?QG2uYR_$i(?bX zaa(*pwkA)N?=PP&m&#|#6Xo;edbwGiD=(Cn%d3$(#`XJD`RVdAXl}NYRqb7z4FM&^yY)g?aGIhI~6o`XzbpxqhrU$j*s0p zc4F-0*r~Dm$4-xx#?FjQjGZ5=k2S~U#umo@UwdByRpZ|Fy?47(2~h|kib8bH^AIIb z2qA=0D2k{Ep$TyeA%swb42Q@Na+He9A#{u(#0i-@gzviVGMs04pJ%P_eb@7@Z++HT zvfJ&u|NXzN-}LWlA5Yv1_r`s2UpxR0#DnorJRFb2qwr`v9#6m*a~O9T*sL@*IbgcFfO6cJ6t6A8p(B9TZU zl8F=|l}INth}}dcae&Asa)?}_fG8r)6U9UcQA*q<%7}8Jf~X{_h_^&FQA21*ebR_D zCCx|+(vq|ytw~$bo^&7)k|3QpmpaDv;=DN@&X)_|0=Zx=lndt~|H8sabv&H@H%wf> z6>;aeVy=WMC^!#n<}EtQ^iy%RYhrN zMJa8e_ZhYOJ!x+`l#T{{Orn!jear!6ET+pq87t^2+6eGq1)7L}Dl)1r`Y^ssBp_rl zlc*}<0noxCrWCZWjHzHWtUjoqf)y9mjrC@I*kCr4jRveFu#4G5HW~1e%ND4P?>3;O zg4L*gPjPw(;6>q?9}E+(vP!nfBDMTc$?2}2z~d?{e3 z60oBY^aUfq6!7DqV#ibPQt{&}1PalBpd>(0hOk@61RUiEB|^DSAyf-BbvQB;?L`J~ z+3@0N2 zY0+eYinJW^JfN(EEG2IP))b@}0m{rM3(AtRqO2)~(jq`!1zin(R8b-e zRf`zL4ZNs=x&S7S2?gXO1L}4&nSi`(CKr%b0V-9aBF~651EpfXe|msYd8znQ^eRwA zVI<%$2^1>_5Lo;jf#ra~+U!-sD!Hpxx&D#jN2@CTL&3@w4OdTw2BKA|j#x-6C)N`OiK9^4>)>h>H}i#POtvGt zkljgl(vO@%#*oX&_2fP>k32^{BA-D84WWLmsdkhd)s12)nd(WoQv;|Wln*t9noiB4 z;;4nxa%w$wkU9$0E2ZvIFR3@wpVU{%m~I61Ye~1IJJ4O|?sPBOogPFFqy6ZK^gMbI zaOXCt;c+^TK22Ywf2Z%!kLW+>SM+)9>r4t5`V zjJ?L*gz8nZU)V;RIoE<~%XQ)$AuDs7%=P5lIX`YZH<1hDW^ggwFWgpcC)Dl~FzFrc zKKF!s!Trg7DxDb@@&b#wt`3bQBJeHV8?1d^n zAl?zpNE>njnMUT47s!vGd-bR`R2NXZQPfy!8dTyVJsy%;BiO-6b^$2OW!RYw++D5- zs6`OJir>#$2&mvH1PN<^>2C`yL|1XFxK+F=>PvP~jIerrR;zOd4~Rc>}9#%KEVz*ev!ERI(}8m-FZ1xP{zV?hV(7Zwhr- z3A>-glfocjvJfF06s`&{g?9ob_7MB1I^-jc5yy+6Vw|{H{4APD&anP`$p%y-Q9dav z8~j!|Y5@h1RChH8Nkj6G4+sj*X*#&2JYa82@I(EuMJhX=!R})naSZPbm|TsY#IHeG z`4iVC8WK&3PLM|=q9-wqm`ZFTvVc+VfNL=XE@i-}i~{zIC6mYkxc~R09@Ug`rbbW; zsO{7fsuA6To(dRA2Tp6k3u(l+R@Q&JasguFq1B5QJM532jZ zU0y_AqgL2x3DRO&c$_ZGU}g%VV>_{q zY#7%HvWOGx{Xp3JdBSRN2kzoHF-rVJTn%b}Oe};xFN0mLlnmu2a+Z7)d~TVnDOQiR zG?oaBh=?sQh#X2gfhVaa{wg^reP539Q*4TL?UP3VV+^;A8Ox zcuVk#LDU(FrTfzQ%yDKbIG}Rw8E3-};p6y2{6oH(;4DlNRtl}e<>CdgsWe-^)oFc9eFNu02PdbsV;4#OL z5#%f~nBE6_F_EbUT;c-EKNtEjojVF_|ACYF0sJUFf?vQV@lU`}*$M8#6d_934u0vY zU@UTCZ*hXSQ9LDflx9k6r4qosz3eUzmuJY!O)Q) zLk2j+*DfR^0f^Al)`<^w1 zH6r{&=mIz3ffzAM%!9PsOmdNu!Bt+89!d@6PQV8K@*KHD{-j(N(Q3TlLnFiyaRx@% ziJV5BAq~*E=n`}qbm1OwXScA=SbMw+CbUZpA zT@6~3gFZxc&~XE>2y8iclvh|iycZsezrzQDuV@M>Y#_M8SaJiomnySV6L`$(FWR1sS1BsTVNxS4C z`HuWr*7#}_`P9?{6%x@t=umVVx*R6AmO`FAh@HhQVK#uDAUq6T0x2~I ze}n4*{+)?lM1Nv1F`V!RKQ)I~P242j6CFv6><=1thI~!70zJ*8PEmnyHy@d9>_X5` zf-mN03QL78!V>YaI8oXFIiL$o|k{#|}3YZ7aDRD(2yuAc;GUIlpG3;o^z?TpHh-;&YI z=n1q0n6)cLV_sMgHVHck>FX)rQil)4{qQ;XO2}80_X08%+2DCA=nr%hVD>Pikta+R`;Ps}w&PIP zh1*=dU?4Vvd+RO^5-Y{?;H9*hNM+fu<|~L9IslDCk3f=a2f1+}mI*kt#E0S0_*LM0 zePSp$j)kDx&xi)Frpe?=NMt%#PZ)K8dPv!V58nt3--2;wvY`G!z!Oi|j^J|}@fLhr zeva@(43tPYT&ZuSR>Q!Ddq_t}u#F(Cw#DY)Ux+}G1gCb7?#Qf!dOl#DGgr7B!gBGA zI7r^6ye?O(9K~zQk&(z4Bm^0Vb%H$#Bd=06^lJJpZOg`FC(-t}ddv2D2t+*+;^AIb0J zI|?hs$5OhiDXZn{21pZRJ<=QPhTp@Zh+PCv&LX%BiY(}ttC<=i1|Bp=In z5-1@IyunsMCz^pKPZVd14@Da(2>k6zDH^#;;g^L0%9u$9(Zc3)|XjMn`lw)L#9Wo8cKs2BvV=*3F zN)FzLxB*=1O=glfy$igQKXZ$rSe(1c4H7nii`*^5K}sqVUyB34BkAP+vX|@w8mgyy z3mgeRB9P697VV851}|X;X?i8-u{m(}NFt0FN=^bFP)gcSZq!Q9AV*q;#50e%0bbgf z{gusU6~|)3*AvOBt8@0iZvoF$x<(= zzcfrL21Ph0Uju!r-QynUEKs2fm@jDRcH$UdSVy)XZGqc+LXS-&;~|?L0nfIS+QxMj zy`@-bk+cH*{6%T1yjm^?UD4B2z;z5@PtuU1h!+};-Uh{Pf!P9^$=Go9^maQa_Yo`) zJB?k&8sc_1f(POe_)h#No{N{@&w$xl5}gTzZ~?s=M|>bG$W`PW(6|qzJ2jY^3K?ZF zRR&64kG7)Q(Gm1qIt4uNA=-qoU{J=Dafg(?gE<9NEMo{rDOuLcTp0%hpoePgc0!zL6Z^mQ~DR?DC5Td!X9PaxHa4vt|wn4 zJ{Px0PbIBvE(`J)c@reQx1b5N*J=g+u0P_BBqQ6vqrOA*Q55ZmPDEEiX8nS8!|6B*E%5sd}0KxRyO~5{<(@;X|N)4@pOgpagKT zF7#k|NyKvE1!)XQpG@rqjW37%+>Z99{pcC=2B=Ct{Rg;tb4FzR znDI;%bDZxivvQ&QT;*v!jSqZih@i+>ghLx+H^GGu#rNTph&E&w@HVF*nVV2NHJReU zv0b4%F^j<8TxQn+)-g!hZ}^_V2O&~yEOnMp$yjp@d~xGm_`(32hLPT-eOnxt7;q`T8S>E7T* z=F*$NJwKrJnD$H$W&qQV-OM)UJh*+}H_EuD(2a9ow{P-&gm!?`58`+!PnrXIqNfSh zYBb#t9{llL#29T3>17xii;e-;Xac!^0MU}PBj=EbWD`n5OR($t^jEqE(+3>SbY>7c z7o1fou>E895B3%NfvthOWW+U4&!5_Gwp?eY^ryK{DRK#6`%>O%g50LbXtK!~spHxJd@=bV4zBS*T@5FcG37+F+ zz9;X_dqNiW;r+lxE3PUM?lz8J$S;Fzx{gofxAMFAeUOxM_>wxmMmlldBPLn z3n-T)4g^h&71Kb!3V{b7h+3(Qgh|elA1LfbX{U5hIw#$hUVv8IRr<0@PxTLuhzVpw z60+@3JOhs-t`d95FJv+FcMa{q*s;^u-rPDaf-mG#!4aMWmUtmaYF^ffzCG9mFOnq9kobEGTc z1U#`4`4u>=6tRKy-ysqv6AQwyWmY_QR4yU)0wNxV#xU^+#7B;uM-Xn=Hgj!jyEM-Ia~A9Y0}j*+d)V; z=t>q+f*7LaC;|C10!@PK*AQ!siP#>fz#Xg~J`$ge|BhGSMnnKOmzB_OUx0^ak_*Wl zu-_J-BZ<@|>IBshcG?TLy^t=aU75j5ICGtO1gWYU%hsI@USxk~n{gdD7hsVY;BpHg z!MTFMhV$3?N06QeiNUa2evpbZp|yLhL-G(7U5VL)8_*F?h(TmMsH-2HM&pbnR89sz zHU$_WOz;--#krE9>?+S!PNt)Es)iUMuE>1k6k-bOxDG8vKcL}Q7FGd%@hJ3_3DE(( z#D2IVTS!SC$<|aKYAj^Z*Wkwj=r^=JXyHCUfFE@G1=v3W*aK@G3berK}cr&O&Ge|DZ(031MU+CpH zxT-&A1B|JIj;SyDi#tFiF3NwZc}b_vZc?BWdEg+N@$=xXQvgT7WEL=&GqseuO0@x$ zq`=jZz&Gz;b=(oo9InbIJY0Lik?iKMk8BFs z>PG$wD4s(dqpnd6=;pLFq|su!6n0KQtRwJNUnUA1@=_*+*~**-hkplFdIA*eDJYkc z)MtTTSma92P-@2aP*Ron;e3ugcdWX`wb+kyU|JPX>>j~~PjQ4x#h4YYB-hc7lzjkL z90&@2f_uSP@*aEwzn}lYdkU#Sh0t6K7W2eE#LmDNBjx4tG4;$_r%6!Ht0It_NJF#- zx*E+zpP?PGG1x}z3f2H8aFJL|d?FlymzTgA3@8>7`9|bdL@2SDC?O0@oHcxUt^QTfc&%Y0NtVVz={` z`A>XTVYILuu=Y-{gAPa%PeE1NOCHiRDML+{5-9d&xkS}{eRYo05Mhup$YSI-qzY+| zdZF{daXtWd+YUpq?$`j#51WF;V9T*INJ4p#g6?ClFfHB~Z-=Am&shENDR>N^C=Hx& z9)20WkH5mTgmNy865WXbgdZ`5hymZ72HCHi&?n6RRRZZl29gP65_y2k1(#7tT2QtW zLNRdnp;RO=w{ixQOBGSY)a|jJ(a6l$Ep=CDO<1>~$6){3)NXQ_O+C@ulg#R2d+Wt@gL;w``v zIza~n^NS%{C=(w={CU0@{9YyhmNyd20GAGe6ZD3sI`0u6L<-T6jFpLxWI$;uI6Gx( zsu(<1jbJ6(12#RsPk2O$@qo$<@w`|HD6AIsC0pPvPsvA}iAn^%HI*%7TiHP-z_kR( z@$zCh379Nh-Yp-HbLBF*68x9GhS5`gc4!HebV59kP$UtOSt^o&WFk4h(B()a@)oH9 zjx|LsP;1l)bwic38->P$-%D0!nTo*U+(v6qeRYn>8gs%tApM48QCI?&fgQj~urf@8 zo8k_Drf*Y=p13z2fG6N(pu+Z$fxQV|V9#VCohTsAL$zxND|H5ffO@--9+3KdA(f|- zyP@hiWC2-BR*}`v^G1{zWvNa)5TH&zQ~(tWJ&+7NkU?cag1HTBVMLoj2iU4}x86|y zcsd;_U-ECw)+PY5GnpJn*cx?$!IHIR?bT^JC)R@vX2aE4yF@k@?I{q8)#>qXJ>Xq86&?zmuvki}n*!erE>@Y4 ztDX9;kZL4llBagc+g4_vOWf3H9v^j5E>w>E-Z2?!&lCYu-BzZ5!T0KGO0>!>k3B*l zKF~Fhz*{LuI#K}cw+yLJ`^7=+7Eg6PCkf3!Gof2b)J`$OETK;X%moX;BC%-blXPr1 zb{_hq0;|GIaSP~_+8zl9Jy7N^vhiH}HeQbFt5a+CgbU$81VhIp5Q)TY;s7vV1ti*P z!h*C?=hM8^en}=%p;O9%Ba}WtC>PL=KwyakDv3G(98pon5B4-dd)CckC(_Bl3bhm1 z)wG7OuIme5CJ?%zcFMYlDTah!J6Emrf_GgfB(te(E?c5@f(EjuJtsgXcz^>Bg=Coy zneu?z3)P${u!EI4A?6ESF^Z1|XOaP3Q3^@0lCNb3A4nyUzy&G50=1n`Bj|%xGtdKW z&;`NJ1*yOQnZN-h;#*Of>9UjvNdOKAl_I5RsC&9Pzok@LskK90jf3TIsBDHhqgC>) zrii|#RIAKCDwA0P;(`RIRZW1Z?nch5Rjoo479iBBdaEpu@|UVwVYX^jy#W6T`V#^D zMOZObiW%WnxGgY%Qq@pA9$$vXpTBNQ%z_{g$Z2L ziUvYW7pp9gtgMIW*g$nQDM@AgVz!hmQ>SQcAyX>9@evIEECDK;3!Jaa z94V}C&kMW@c#l9n6tJGo@8%DvoL|i=6Mj|#1B~wrZY&Din1b~p;kHmNR0_%@9|FGA zL-Z0u#Yo_G1>42I?$x3x)YV#IBqzXjAXHXCbtY8yyi@{pwUn)8d)YP%VzQ(8AOZ3?v|SU0Gy zkBZvGkmIu10`|Ph-^!$!J=9m=ywrKmcrIB*Zh<=I`Ia+MkxM|8J@^1V5|F!Ct#S_d zyb93u8eXZggWv+J9v}oml@|ko4?vwupwe#v!=@^Rl}alZjshmnsGHEd4PIIwYHcey zNG{;Ke57zGLFMlQQVvwRT&k98r!O630hl@va2%y_^lot5rBHFubWeR{BuJx=m?Bod z&TfboFmyN)1yxUnnil|;6;-c6EKqyEvY^&I81y_DO-FaDb-#_iMUCoaCIwJ)g{9-6 z_UY;@P6@EIQhR0c*a6gBQSx9F(<%4?sC@}kU%|Ag`nv+2>fB=@kqL-a=KM8?q3U%t z>fC3b`a2)Rh!0wg#$%~)_1n->ws6$|JQ`2O3xM;DfbZSlD#|rV2qV%Hc>MtIx)Jn9 zH1tIoWeWY^3EnOV`k;(91s4~pl0n0|<+I*qDrX0eB1OHZY0`P4%@T@gdC<&O= z6lxJK=0f^oKsh5pA?<-B0|7~8>KTWj`ZEItRvWGUM8z98Paja>f&~H&EWp$Fz*h$%r2GC80CS44*`r24pPphBW54>#i zpFYzFG1h4{srsAsqBRKmH?P&V-i01NKEL(X$Gew)y)Kel*WtZ`F8ujo2tRN12~E1k z++nG)4Pua3gRe-e?n$anPphYA4rSCncr|;4Uc=tC|Klm`1Ld-Hi`CYLCCx$WBgTfh z!TN^gdV>d{=161Zfswh%P`|Liph@GyCr?7nkcP@9>zUUZI5r?;@}vN?4PvEy#>Bj7 z-yr{~lfx#D3%7QjJT-LkRP{INQG2AL^1RNx?a$8#jI|yVG=36X)!Ng!yETTODAJ~R z1NexcB!=QBK@W$AD2gFi-9se&e`I}i&+Gol^GK|={m&Z(`>%`DHiBAfP4r^5TFthD z-CbuKA2TW=(PQkO55p&vo%k)S^w^ho$wTQubYcFh(_0Vp7-KpyVx`^e)eHQ~QdbN! zzunHIZ?v=P$}=a^X5~-zp6HD=nt1HbD-X0p!Ar5lKrI*1u)|xifSze79yr}jOx)Qk zZMMnrTlLqyUX|ARVdvF79a^3Uu{%|A){1cu_IH;Sg@UY{R&fc7d&P4W`9K-1x;H z5bL+=A$lx7?14+uW3K|9K8f_c)!*ZV&Fo9d%UYurukMep4Bq7;_upc5tjGKP*Onmb zjy>(0(``-XEqc(X+AXo#Ij|!$k+!gxtvl#jBF(;cvx%;$p%MJsz@VN^hqP9{prL*< zeT&8o>c?-K@sZDc5FCD@xUecNvSlS3RsCk9Op z4RiFL8te$`fi8v~hW1suceL_XSA_10Fx}vJ#P8cZYqk1=kwM6SZ;uhZ`91&TH-Os; z`xoCL95GcEZm!jS)z?Ebe*m#oWJ7Ip~;;piWt~ZC> z6PL6v^q@K#gq(P`&DU;lpT?OzO0VC-3U92?I~$LkSr^qNnXPWu=usOv`>gI^50|S^ zlUohGw_7AxlOu`~01?^pO~V|c`x7McBC&kjyc8Z_PS z*2gKwD~H{$JlgnA=bDVDd4ITvI&a))_52b0s>=W6OtaZGed8bQC!1G&7#=w;c*BL*UQ<|p{F=6Zs@Tad=%L5{ zbA4La4LA_Ql)t)_dLS*Tsw#9?yFMAy+~i>!o2_Uy#b}0S@#6h~V|5>LPgpRIM@DlG zj&8j0Ik&p>gX6ZArZxKur-=#j*&cr`TV854t{@;~QCx@Eq#M|)mHR^01%D*1r@cEf zIrF&F=Yo1w8yUCWEh*1lNPA4OPxxjH$#kf0>=_Va;_6@iob$iJ zw(Pa>#jqu>X3Y1^2skrm-zF?)0=D$<=JV!4&jB4A7mOG>F|JdTf7+0@;e5!wC)?c{ zop@K?E3nPr>fihC!`ghlZy6G1)A#wzcXp5P#Jg=8t?#29LAf0}61+DNiLEyiWME9K z1eo|WOuVA%mF{nuKPKUSgjz8KsioFVKb{{rc`|$v^mov>AmyFTt-D8r2Tq?y&UK%a-aut0GQiV^>M11RkvSDC+&q{Jc zeaO4rrXH<2PkcJub7!_8(^9|p(Yg7iZSK$$Pc-grLHkV{^7sC}qsyL|#(Qt99(LBZrNPs2(<-om9h*2l*=6K; zDa+;X$kKvtx`&fW#suu16hKC9OZ9Km^{Ug#>W6phHMr(8V{7kD zPa6D|YCiq9B=O~^hh2u7we35^e)G(!Wlgxly+@Tkf9||I?)O>yXU%W*yR>id$clyi z<6FK?a~xKl$amd?dlw#-zF=3gbmG4Kdsi@Xu55^Qc-L>p@^)m0e0Ea6+(AcE8vWYB zCjQ*pqq_M^K8&s^9(ZDL(t@1acHtdHTiPEwZ)eYRNM`#`m!kHq*kfh0eaE;;zqS+Z z+xOluI^jWwkyqQfO9KlI4wY?nRhMUuaJ**oFmz<20j|@tsx|j>cI(BCzLncF>v*e@ z!R}Akv_?;DdgoYXyF~RY&&!`Wvux@U+j}Qm*A~7!X*Kj#+>*+^y^-y^mfouzv1xD3 zoy>6$^47%6dVc-6`;*?Cwwv2;+dg}I^y5WiB1i9cjK487#plFyd;3?A2u8UCSR z-2%fy5U1}K&_jqen04!)-h^ee`lc$u>f4sgP~MJI6Ry5J$XN9)O_aBJ>1N&fAF1#7 z*Ymd~mwC--+3jj+`1tng>#uHn-+#s09;;_xnPHlkH+FP42dO%L>gABQFTcqZCTIB* zy>@JQ6Lib}M0;}UnvrAU6KDV8>N&X7bVbyamhM(>gdV>PEYAEq@u5_&Tc`Dp#a3HO z4z`}2#6EZ$aK=T9oN4pMeD=1)@VF&!&vn#u?R;{P*|GE;2Bzzu2Yw9fwkox=+!o4f3dYnQt%wD5Y{ ze9ZDeRbkhQBQ_rB&6=DtIeF^WJ&*SPeyeH1fS%qAHpQ;x++J4d*t(|p#Yq3|4@cBXSs)osH@s}PFQTo*Z+?CUt8SlarT^Q#?x!2t zzqmWrv2xA(G@sR_n(Jw<$A`UHv(?z$&1`LS+hR?p!hP$v$~~vIAx~Y|ym`~ine9Ki zt!lgLQ!kt7cN?ouOg!wq=D{Bkku58q(Q9W|x_`Z%WfK_jc<;xWU;d~c{Va&zTZ25; z_gH%GUPOrha`EzpA^rNFh#uNLEwVAzZsyDGCi~=18RyeS=A|X9A39}7KewK_JW_ZZo@FaAf=qhe&-b1^Gwr9E+c z22;XMcey-cFw*0vmz$ylf~8tSH~IVb2TE%hZTZhI{WnUiAFI`@duJd$r+p`Ma_Qan zx%=IAHMhEK-Fi8Gd#hF%ClhVfhFVYmFmc7u#}l63deJU_+tlF$vNxOe(QAV3-*4PG z=m_`FEi`N41lK`boB#2$-_yYI>y6Ip=v+th1@yCq^}KCeOHETcv`LEcO79fj+u?KC zi-qsEr00b^&n$Ht)bc|zvEqKGD)HWy`g8ThL=4`nKlbXn!t;T59lpLgMLYEUi7yNd_u~)kvSZ^?EPhjio%|E2L^}c4A z7Te?#65Hg%_s!Mmv}kOTC-Aw4e>t0IQUaeU*3;_#{%j&vJIt{Dw_=)U^;2WDZm_p5 z@Prd|bKRxmhneWK|KM<9^E;#FTQ^;vIw0t^zwSxFn7$1gl;vo<2lQFebwCJ~l%r?p zTatLdXQZV$WP7lBPBI2nq=b9~*#nL~LunytP-L^0v&@UV{c%_Z-xZa{*V{mFVH>(zR#L9^KIn zh)pdNt$tiqc}v5tgTnnnLahglojNVZe{5=O!#+r?{+~#!?*30lGu`*j9k|Bl_3fT2 zFuVV)L67<$*@(J3)0wyahbyk54y5j220=-b#u!XJm{BMNt9$tWWS#!yxzFp)=Q57x zMC_O&tm^2wGV}PtubE}>7fgRYD@Tsp*Z-iUbA0v9*iUD<&#uvD8y=Zeo_^s4zMOke zv(%_&!aM}um1%pA)gKu;xpJN9s~HDQud+%p?liES{pHFO*HP;?#ASE6#n9~Bb_?~R@I zPB;_s&FHp*9cliPt?gJH6)5`7A;c-#E{Ta&1+J z2M(VXG8h?B>rVS4eG&K6-l@I5pOO4~?iA_=)q{proFk&1k)WzG9sZLuRSsGE{Nzlf zf6tlzlWYI&`HOk&$(Jc&^8K{S=2St# z`tzMuSq{@yjy>XB^Ts7?dvW#L|1drOE2i4fx^2AN}3q% zWp-_=i6(a5r{__D?hd01^B?)0oOW0m*JVUi(?NFnXxr|sTCQq+(b#-xlpiu|AL7#J zm2X14m$T`OzS)dXR*yR?cV7=fhIWkI(z8MFvv<8>E}XFS#k_r8UwikOkHxnM_Zz=_ zbd1-&ODjEAFSkKL8DotafL@W?prN*N|<)yq>p zrdtM>mhCS?;_!?Y8?8#qy3zE6{>w}PZWUJt-NDo3G^y#*ly~q190__HN}O_@13JncGKPXC^QR4n~A1qw0Uwq%jr({4O_AMAFlh|p2)+W*z|7=!ndXu zp=p|m)k`MQ+xpZ*_Wj`@r)1Edmckbv#uWslC6s}m4)L2DO{{#^a?GKmjX*i12B0vt z=M-k#fKa|7yT=@FHrbc$Ci%@ZhggH&RXcBlzP%q&)R#|5RJ_L<<2&~lBrs-I+j45P z=(Kz!HcJT$HjaxJtCO9)9xX=K+JlLaJOY~7%7mbUX{pwiYYC;py9Ep^F78jJlQ-d- zJW(yM!FJ|1N5k3u2OF;p>U8Jrv9gc%4c@7q_fL*;jx1)w(WxDd=}?T1kLUoT30WOaALnA@daix_4ei!P%@(Jpdc7=rSg++C zymrV7gBQ13?Gd^VT$-VnD2%S_Wx0})8ro`{l-AdaKn8~!P^|V1}7MW^vCB#<%Fe&g%}&lHnAQt&F8+;rz?z$032LU zMQ>LR{pg=s*eGLXgAkbwZljXDd{O8OoTrWL*Uk3%-xpB?4OM4V`_tZ8bIHp){H`l* zZ|o}S)7sd*InDEP$!n^M%(Uw2iGc#_8CJ$?ZBH{x6DJxA z*tOx)Q|a^NrMZdgV6sQ+c+?7TJ26?sdiKD<#vhl5A!970))Dt$FfGjUNA>e`b7x-8 zxwG7{>cB>KEn1nHpNxf$y$Z)N20L*n$M?*iTH7xWMPT5lY`C&2aez$9y*&owWX@mz!q!07EDZ+pGYc;_TV~^PJO>XLqCd2 zHSnHzam^S|rHJR&wS&bHM=dcGC;hD1e=p#FJU}W`Hp9)|pAZ#;`EZ!Mb|@6yugq-f zXR;}WOonqCC+%Gwa#{|0Ql%6*cy!yx&eP&LiRtrxI|A~EF&|?fK7fQ#i0MaPXu!ob z_3xYL?O_QGd~g*8N80v`2Kl(DF?+t&v*<~SeYvBBNqp=02KaQ^***+6WS=;lEz`mM z99YfA8HGGYt{J{idZKsPSUql}Ou?;xSItw5=z#WVUE#G^@fpabbm>%4d^u#86xmE$ zCIpFEw2ZyXtat6KJ>K3oLK>X#MNpjX7=AYq6?05@GVY5uMX%jkfBAP;QB~0A+wZPR zs9ms$dS=ZQO?_5a)v(^Q?>ilLj&xi$OEqGp88TiPi1dCuD@`q@#q9KM;>2+lPaV-S z6O-BOkHobcE_Y1Q#`J$}H&Ih}FB*@r?n^3t@S;r7c4e$MpD7;XlYu{7r{$pq;C-9fmEngC0BW;0RV_q004jmzy`2#a5k`IV5Zk$pf$9#R8oWl01o+t*CY9Y*K=}( z1^@(k1_S{3@AX&ikctXp^S4vOq>{*TioK<|tpIG$lKjsia&r*=xrAU4@PWPrdJ2gm zI1Kr%2b{azFuT#v{`rK}kxY~Y(fRqJ0wTZTt-_|ie0q}Ort6{mhEHeu?EJd?{C8ZJ zUw7Z7#Yf1~|0xhOw;HNNl)7^m{_cwj)LRFRwlD};tocJt5Y@qG`!A8+4}mT^0{OuJ zL(emgT&5oag?{iKf)KfWtL&}8;BXK$bz>v5R3hQzW)0bJYBsZ#WW=YWv~Vo6KaF$& zzRO`!sxZ7;ngn&9g88}3qNH@7u=gZ!+Ndm|qgZxE86ls@qF0cfy!drYaPLEJtsOBY z>Wmjh0gOrn4CZP;p;aJ;popxWBpVQaNFb6Dr#z`aDwH8+Dj}Ko9h8CfCkAi_X9s-L zX*g-PW*W>$ErV_kH@zcc9z-z9KTZm;uS3{i?8bP0K0h9Y$!EG^!WNSjj)3v>^6|KmjnXffGvt8Tb&r z@w)p&uXL{%S$HJ0Vsx&0z&^HnEZ&tK^!=Z&Z0h%r9@O-La=zh$vL2qr>r6yPL3K8~L#$bDQusc5bz#C$;>nkcx| zX)XYcdy!DdqkKCQcBxp5%6>ArT`moePFM#t`)TH?GpRc|j(eUEnWN8~l)PqqD1kUE z(x7CZ9bxW>V}8i}0LCY}YOp70T_Kk=^>GQkus^YABtJ4mnBYB(DmqVi(P_n0NiGRC zgoFZKe0zVj6*G+@Sg>tw5b)Ju550;zyQs>-$myu6V}73Q4DP`f9=A^Gd)MBS9cgWb#fgrwgD-w< zZ#K6M-cmayF-=bhqFRVe5oG2Nx&TCD~DI=wyW+)#(Kd`v;G` z?2A6FTg<%EnW*u|WZ4{1JT!XijdFRRbf{2>#UvZ*5uk{QIi!Xg>arQA8L{Z7Gl#nb zQqY05p>S|Yj^ZkT^w4ZO5~P0Cv=Aa!;Tit6=32JfAUVb=*nMI*{s#!2?pk&rIRa># z#Qrn2RMES@e10^7t)mcnu0WOGr7OL=G=t%-uph7*!qycTp^#cnN%qLCfrdwFfNyQk zIGtr>WqWyXzCT^ESUtp(gBwA!Ma9t8PXgTAI_be37`q%^f4NpqIbMo|4=ybr` zmD7vW^PMXc8oonKVy3df>5SDIJa9A@>f>p(RS1kN{ZBEb4WtW@Eesw%A(q2g}!uRx35>wRwt?15XLZry6oydxN zuPVmo2ax2^YiwJmr^Ua>{Tp9@PuxmKC0(Ut;%&no!e^0>h*Y+VS7SJ*v~KjLoM>>M znHA3OORX&*Wf(Gms07_*4bAn9|OMzz-G-Y$q}o^I2k*k zw&9Q%64);rVpiAjsdW?X8t{p^jl+;rZ2SV8+(TGwIMZZQ8$ zzBc0Q{>v6SeE2YJmf6(1ZTeFuFNzz~QRndP6yHq#c7a*|I{8R0f-q&;-+{lXFb~Rp z?Ot#T_ZP6pSZM|b#P;$S_yxu(kC=*e5cnxBt8y!s+1D_~(XwzaqG^s)JK^`a(wI6=F#Mj3A`PSN*iC4qNy z(KG@tK7x!wGg{N6b6cF);s?dGqnU32ib2n&cw$VoDnm$VRH+&hpxcZ{$1*`4R(~ky z?Tg7BC=^ilg_PxG=f`;G$8J;-%t46|T30%p%MqbhC0`xe5-Sm$kUEezP<%mVX02Ju zJfo(6h1yUU)j0Vi%20`1dBR;`lO+P8>8s&leO5u+d!@?qu?38!v5<$CFF4EZ%W}08 zK!(aNwbA@6`b79H;1PbY3(dsE1w-4%7^e3Ywboqklptff$Q>PQjXSfxhBZgzAS&gj zPlKo9UTH%m`t=KHo-CE~LFN#ze-ZPQKXZ8^Ok;j>yJ+Tcf2=*$F0f_Ii~b@-(na_g zP37uy-}G62KuB-g0l}aJk_cR)S4o z=a6#XT;d+VPSA`LgprvDWlZ1*vl1RRA$U}{z{)YAQSKFK3J~M@~E$t z=na_WQK|JF*5Aoy%4Q0N$b48Jp8HqK6WlAA?rGNckSg)80){@V`!3C%b6^h0tjv38 zj*_Z-zp@Z(M8F1}p~$1@k~N)U3DSkvZ)(A$H56iwqmZybjO!5OX)jWc>$~}(%0z(fkMihDS$=)u zE_*R~#dt)X+z(Ld5mzoTom^vJ-q_8&9N&ERpfRCyMhtF^LBm^s0;^>b=IBT%rC^pP z-5h^D#*%L&2WlSGp?5h%8Pkpu3J0syb>!^Kr6R)+uhKwa`H7i5vcaK6tc?*lazXvL zKz?<8KH$}&T*te~5V5{M5f``6wu+$06F{0G3^y#z1(gIQC7Y5d1IXtfeh?{{BE*8Y zQ+~RDGjC={ROKCsv+1g-m0fn)%*s6evcw)feofFkF$DjXsq9V5S?m}9>{BSMKA(h+!1 zE)FCm%+z1jzT-cN?tS|Iz5vq10{$4N80C)i+h-}27?s4;C^FV|hdG*VOO*x!I?l)0 zEv+4moQEbP&Rg5}TT`ByrKdKctrv+fM?z?E%^{FNtThB06tU|XF{kz>zKXn{E7QQ1YH|8f@aNc-4~eBu7!f)D2;^+`kSwVxS3)! zO*7Yd3EBqYgf>r%V+laRpY}IK%htud`f)V`mSGJyMEwaymwHZBM)x9wt2g^=v3GG==6J66A7+^f8Ot zAuzO*prd$`u%&)l;c6#5r*9E|TYXUD1J1SM{eiqyd|^N%>?3w^D=-_!Kyb#7Ie(c> z*B;Q`e3xPE#B~kGt)fxwcrIV_S2m6twMTw$fU}hc~B& zp-qW#g)y0Y16gTZMLn6tIQMnG9oa?!#X*2LVQ8G&XjLGgA6qHKLunHx4ZK9(H~bok zw1fYRv4SrB8ss8w5)XHJ!$`T&huPmP{&2Gbg^FmQHYmu1KhCLng2TkYuy^DOF1uxN z^kJ;{1PY+Upb5{39J2xsl@^7T`EoV+{G*=d`AWbIUh1v5(hk&XN?Gkeym?${ve1v@ z(A8gZe%t?u*VR@={~{o(*%|=W|1tsWNnkL*XsW9q9`=~ql`bZA!emU)N}(v*m{1fF z3gJ1l7$dZbgvzI%O-G+?gt@j#7}Sm4cORZliV~9cmym(Tt$?;w=!u`z(9ofnt6*Tf zga7fg4tI;1|FMrI`T^+&%c#3y~3c9%*fIb)DbP5=siLh0(m^8?{-A>;LWu z62*+r_)loo4~kwtj911`omAL35CNkqD2Q>vn2^cF-|~Pl9Rmvoy0$7#{Y<@dA?b-67yIC_sdlQIQeE0@l)bnB;#7*?1?bnJ7H?&d(iFf$JsRc#Z-feOjP&ni?v-vs&~ ztC%xW2_7_KLkT8PqEUm=D=gP%``sCgZjGqF_$gGejx71j|Kx>cRQ_GX8wpntr;Os8 zD=B#@bw4OkJv6%u`V3|4#++&nM86Ei$ypElY=_D2KomGcB6~7LqP!LAg>$3&E>;1Mv0CLOF421iX*$^$LJ}5scteL?#SVZA0ISj@k(RHW^l+=)sJRy{5(C%=Tvl59X{!qJ@7Y2-cI&vgI~~lZTYN z35)>89;Drm>bi0P>3U%Qxv2|@yhKJ^wR-rykHKWjUP=PlhQRa*2>Ab)?L-jh+1*i` z+MsXU?fHX*vD1P&yM}7m-xreGX~v6%CR7#Xm?Z}G)oPg*hLR09%_MbHzWorIQ(rN znT=isXt+Qo5o(Zv+mD@4273Z62y%0u-=9D7i6{nDe6vuWA<~a58XAe1*fqppII9-Q z&Uf$}VVRCZfG{)*B&&ni6)KO7TrUp!<~i>pDq!r{pHvlfh^tQvo-!&DIn-@8CpHas z{!=8J1I|W=BYO~}cP;#wEcP?D6TOgvB@)R4>J>4yfVIr42M4s-pB1uLhkRcW#ERUq zGUUcJ#?CUizxe0n!{tj{IM&ZV+uenm6MA>2ZvY4MQ((kp^twkA>OB*~_f3xgfQMd( z+|2-~_D=2v>=e@Ik*cWsJ^TY+r>WsO6cLU!Aklf z1A5UzSTooXjejUc|wDYssjmmFFaLB|dA%Sl`IxjvU?7PtzEdl<&0xl@apv@*0Vcf|qaA6;n1PJGwy`pkMQW22-7=ywEme%tB&A7~ zk(RDf!(pYdGPGoZw8=#s%<7+JnvDpaXr3sZ0G#P){gDQ=x`$P)wU5=IOQB0dXO50w z8WXl!ja789sDa&GY}k>kgP}dUeD<+Sj!cqF&1uSM(P@HdwP{cpm=oHPWQ7s4UAZdU zRo)gatBUcfA)Ha1X(M|v2d_5I>c9?P3)7b2>&PvZ4$%%5=k@3I=RY5EAEqDD&sbmL% z`)cb(WYb*}WPO?*k`a4h0r-U>8d6rv+{Ym0=LY(n zPtVVG%xersTI60@z)4vtoX%;}Z2+o7Qf3YZJFXmqA!1lrfJIs9jOwWfqFkvVBFsQZ znQ5dPfxJW-t05wb4>vFD%>raWK!t+1Dz6hX2hKp+*7`q;95&VI>Byc3Q_3f#L*!jW` z@ydT{|KQ}zXdA$*s|D{$%1`6JzEay=WfkQYmQp8WLlMIXN$dzoaaKgdm9&&}1lTv_ z`_qP73HnV>&p#jr2JIm25DXp?v_CsyRuoOp?Ozcx~{SSAT# z%Mm@anZ)_ovkvZ~*}UTq)o{Y^V>~%pw=}N}q=I-qX2T7F?o_q#;3>FKGnWwb-qEfKJG&p>fA7o;e^G)m+Wu}Ukcq13R>KDtta(f<(N5m95HYbma4S+mYF$a&ES1n_ch6_Wb~~Bj62$Bbt!y|J~+>Mg=7kx46DKA4JGsJ zFc_5nq-GCQv#{ONPH(9TbhGCuHF-zd;kLNtP8%4M#788GRbTf^gi`OfT}8jp?8aCR{mapL`}rQWWdWx?xI z**Q6VROso%#$Y-ME+p^WpFtt3iVq=a%B-O35=f7|p^v2VWWMe#V(TEj>IEwa8 zrvaZP;iR^2KYh1byT=>|ZP>#?Gf6Y9ZXa^*viG@sPnA`U9CT5#8!DRCnJ#Cwc0XjK zdSB{{G9I}NCVQ}GvG)9)uahe%v>fj9-H)5%aofYd%+_!a_=wv6WF$Xv8t=#G z=&v}w{z-FUOmBKTO`d2k;3iF1|7Lc&pRCk#=!XJICw{Jb$+^a^PE@TUpo^D1zF%M3%o={0F$cj^h4;O>k#U;ij&CO>E{s_@jw7H_F1X5qM)7RfQuLLNPzHQj@i|+aU*#mgq+#Z(IZ_gW|W69@9bHtmzV-o`c|Gr%Jd7U&-aP|1B zX!#-E6*E&Y%ly4yeEg=(!h>lwQ>j4F-PS5!F%JTL8 z1>+6QEUJwBRp%kBSMEMn#Ug-a@>swVA)eUa8S+@~*{%}ha+Uirx0~L8ihQA_{Bp(7eg_{!bW=(mfL0V zj_RT%06F9fJY0Eox~rSQ>#*YvZ~|OCP1#mt@8L7rX7R1)X7?>6Jz4?v*b~C6@|i}X z{=6jN2$-ap0L)m77LKKXW$GZhuA9~5CtPr#kIO}j`4(K5kN#mBx&AcI?A8W&g!4U{ zhkI=-aQ@ZeTyX*`0B;EVLh4b9b2XBOy)7ma!@flGN7w9U=WB%b2=$a3>(XmJchDmiy_mKjH;w>1Z$_w_HjLOjn29o{bSK% zRYt1;hiidwA%_}P8kR$s<7fFWYGFKna#Fh|w2EGX)jq9ZDsTY~wbdj?3D4#F<;pra zehE)_YAmuhuDy>*xyB{4H`}MfRX)cfV}z}Wg7+*9FTthj`8c11XT5(98X{!IwmrO^ z5$Zglv=+Kf=I*AWuhhl^n1@kmK^6GA=N~n)^%Jct+~U8G@ImT9r*$trm=gYFlF6{C ztII^a-?*z#%eKVF`V^6K6ofOoK^%bSHODNCI31 z{m_zdbQld)_O0ir-@zq6fp%Qw(rBz_`>^& zaqlyLE(Oo;z2T0N4+YsRm$F4x4>wuewc2N|zD5?7)hVEv`P>$ZmWr6C{2EC-xhNQ} z7M}rUvKV56iiedJ%Qp@dr&+R^CE#aoTpsqXt>feRG_jFtFI76}>y_J0W3BMM)#VIu za50G-WH0l$r5AA@+D1C_J{`Fcnl2w5OswY3y%lM|1G2;-SzN`~%>)+ur=*Bs?~<7g zS{rCytyQqYR#Y9 z$fcjF-lqX9-I53+Z&3n!{_&tn89j67?m~Z3oF-f;t4<~*yp~H-aUVCIts+QgD-_i=vnJXX)Bpo%X(%XU7(C zsVQQ;V7lWE9onk(>J}^JF%BbHtx{{<>#JkZ$>7qjM+UF!FoPk91UrXa}Y-E;ZI_*LW%< zc6aIM8wIBZ7f0TkZ0|HQIl+9bmNrI@({`^SdsPE7{)>WQHP+@6nzaEUZPHq{ePh7d>W5nYvHb`lO_(`}{ z2@+)T4@?r;x*9I{>;vY?;n{eF0t8+scKMg4^JncA!$BW}^qG<;({38bwQM|=`U|Il z#tE;&Kd$xTkvk4>(g*0L{ad)v?(|QVq$y@reT{muePw5c{}jAMrurNO2r(>hOc<`a$WO+7prxxK}*Ie{(t=MY^{ z($N+YQUSeyj90lD9*mlxzfMXghs!d$_p*uOV+40)` z)6-b-Sm^k&DPIwCtBjgr+=99?-IM<|B2sd=`-KFH)jDO-Zv3D5(O5tsL$F$Mo4^Ky>B|i`6Lp`WiN! z^F#Ma{erRE$LYI&^PtALN9~#OaL>xCZfeP}`tx`i&{bz9;PSCEOu?2REZ)}KZ1rFR zPG8m(_HG;g&1k~$-*j$`U0xMZxw=@plv*(*5IvxPHN3+rR%e}-`%Lqy)k9Tw`(w2j&)GtL}Y+Yk*I&8cZ0%T~|W zGv6nx@(QSf?+1Y43uhpk;CLA~S$yqZQHMEMGJ4;-&$@m22}ClTar9vcXNfXo3Xh|l z#S3GLcR5*7KKLCU9}(62N0LY@rao8yK8{r(E{55jzH=Kf8%$xM&P;H`tS~-mb*#qp zTT&GmiqD07+T7G#?KwOQuQSyq4=o}b$k_EhJft$Jx&n|BE3&`uw^q&?0!*Y`&^5Bz z88uzMb~kc(awN#ErajJ6It*@x+Eq-VzZd1YZts?ihS6}9wF;NL*z29+o41z;QQ~e) z9InO~CwJ%ONtzE!lpFHZpA&J-+*%ce!hKlpEymef$aKsP8KJ*nlHC}_tlskdTgjKw zDVjSA(LDaZMN4T?u=`2FxVu%$(w4188x(dJQWE`aKAn5yJZ~GlV8)`;Z(7>iNV0_p z!%AtmZGTA|WKYy|B26E$fOY9Kx1APgfA6))YAstPr)sYI5PhL%mjR6sn#*kz1iBB~ z?#ugs@;%G@+xh}n%^omstdB!+@KcE-1(g37y?nZ4o43R*j}>M|T%p6zv|H?WrITs0 zC@_Y38+3prx9vVeaL5~o1!q>cPVyzVE;8s3E{b~fOZv zF-bMe8I^Z2CfYZLnQTL3TI`PDJPx9slr@L8T)mUW`M`eyI@oI_v^H;Ywuh8*sMAZI zZauBC6^5z92a2mefe&DpZ(2X2-Oe$u_O5GkNJpNB;IcW;c*KM(W%^y{`SZzvfg}l zvay;eWd7Nyl*DM*M5C%-I(wLABoVqDxgKD;pseZcNev&08rqTui)w%NulJg&&($M~ z@yz25{YhgnK1KT>oTw4Qr8CNbfKl@?sefo#QCSf=q&16z&3ZX$W8g`Ka~rif;{DQmO3GTQnXkvhTb=pW zgGW7vRXE(EHOUlB<@E*7@sn8AZdJ4e-gj!~_CQz;pj=B@|3 znp6f&2Q5`MpQ!bh*@kg0HU3hJnl7WTr?jvsgZ@80H)tAQ7QN_7N~T1O6o=l(UY0Wl z^?D&J&$N_sICd;$BNQht3d#!iO!c>sk5&#p6Mhv7qD6D`j4%}w734$U7^jU|fBe8r z>`EGmDia|z{WqL81pU&&&e z%1BPH^e)LUg_{EwU zdr*GVoI@saCQURyF5Pj~AO9P^SP6lyUW{2?cqKG3de1mh0yXy0d<)}diL;})y*{(s zxU%P%lnO`PBwh+*`z4WYoqg_!G(SKqkedD3u6aO8P)`0UAx7j6882R^Tpxogpt`B;||~rE$x~U&E$L zqi5uEK3}bHj<2bql}f9(wDSLc)VbcnaQgv%?t6g&4^YYZ+JA(IxGPzElCFrR;rfUE z{xG`|kDu{%F`uFR#9=wlGoN=t*OB)%4bz3GV4)Trb?u&hJ7*}&U$ z+O+bH^If z4ePdHAb(3|LH7>fv+Y7)dON5Gsq9jN)5PvkZMzzh>M=kpHk{>c+K+vG;o|V(M;7dm zGfjnIg^KJpDuUeZ4%}>;s9<+VJ|p5ZXXhtmWU)&9B8XDcx$Ey#_cfzjNg3T^@9<2F z;Jhb1Zi-j2Qr>;owP_@@-^Isl`7!Gv?|M^`fP<(FL(i;gZe2GlZ6CYsbNa4;iU+3B zU3HohO!xW~WW9EN;X;(A$EH4#8JT*w&|qA44mPCo@cn-nS1 z`2R7;FFoD36?cAqM3(<8R3i(;)tiiQzD%#y2g`{Y(>J3}M8;|rHL4@3hmFT0(ok+< z7{U=GqHk0F&$xuP4_``)JU7ntwL;ya@mG8b1mT_Af<+KRrL$-;ucK<47QfqrqJ8 z55K?5*IFZ2K1XM%%F>b5|Ka1?8D3)?w3Q5&crNVc@KH@xJq)eAD4r9P4Mb^Fajf!?2Wf=g560VpB?+Q3&|4C6L*XJs-d90Rd;r2Ym*J>&Yx z+dpnpwZQQ46ho0XrNT&ImjDR|A;BCCzzkS&0?xgrH~@h+fB;E9#~8?$Z%1lr^}uMW z-*ugTqOkde*j*kjy{yvAUSeP)8vhQ52@?_Mg#|UdOa~Yn|46`=rO+O*5{Vz?lwqkx z*52z+Z3=D|k;;eh)384D4B1%V--#vte_ z6Zw`2IOOVv10jIgAot6M3J`z>X(FGJGw06w0U?3rqSAnZQbRfZ#^INp&6g7w$O8n; zg{^`CC4^!ipOx3G7qlwflvF%KaYq4tw_t_=C52+ZF{Q{d9V2IU%1v&gLPQNh!GZ4y1pkt);|I8fn7TA;1%hP}PkLUTO z2_MOs^7lPb0sRNKZpkd?{{Y{`0)mIy`-KfqJjW@ApC9Wl2oAM+V~Pny@w;{aww6FQ zT#%Lv2#35@8^~S~{x@nS=!L2%#U?BUl9MHRVFrD2q8Boq4^%StuK06tn_C{I*4w}n zipLKWS8Qf2v5w9^{A9uB5%*XMT4t3-?h+kCYzf6hO<=|J8mu8u$%p%XcUO5$0NjwC zLuzk`k{@{G0YbJt2;Epw1Kt_Na|D<{ei@&!p-=c}k#n&|tOebeRbNcjWmPnOPXL6m zm_Z8!)-36f>1a1*d*nDVFc2_#%Ve>|*dv6le*E!3fkvnRQv@)1!aw3)Ops0 zp`9)LU!QGd4Vf{v8y|2Ryo6G}?mCr~po6XBJi2i6K$f>dZ&{8smTy}v=ZKo?xKbAJ zZ9QSmTJI)B^V=X-4g%nsQ#_oFBI3FfiEU~pTBhqoLNjKZ6~oytxDtk^t*vIPe^qFf zc|5&gRJ6AJ=*;mCY1gQ98A0tm%F{ zTm?9GD}X5wRih~xdHlh}aI5&Uumm$QY(r^?9m&RxqNf^l;$zbq=f2)ZueNcTt!UBP z)-5S|J`~a@Yvjq;?syWqT7~a$km3%575HTziL_n2v${?-PEuVbC+UYtpEmOp~L9H5HFwf13Dk;_8qsSr)|Kz@?kC zsATp)%mr65X@=pjxfb2@&Xar1XQlrNw8THE&iAMbpT*g2GgX07Av4ayS76$f^i|@UL!{}XQ|MOJ1tjNL1>&az2AHK*jm@X!wf&8>gHWk z6M0|=RV;x?({N4KBg#>o|0Me!_hIpr#&~kV!+wMLhP!9L2wXuHL?+E`V+^{`F?8Fe zi7Eo82Yx?#$HK=ny!le(Ses`ge>ov~;tKe+TgI6qaFtFjv9uFw*`$V6E=Td&Qq(g| z&fH%48Udjmp3g)CiO7n5yWd*{KlxFcZn^PsS0$cp=X)czQ zwf7ZJD@rp=12|VnTg_QAYJ?=-OEj4!F6qQ|{aW@Y4w75j0oUM&@wko2(d|BZzzum> zupgrA2aLn6ST9e`f{|=pt}z(hS8->mnAuZQ=Vki}vnpX!E_?ayQRo15QbbH6Ug8 zbu5_7O0qK7WSVWvHcpe@?r8Eo;6`ycn;S@<083l(Gk6b|HBP0FUVS+AIxh-JV&%7^ z-;E{%<_yU)-_9J})9s{I-zK~V4@I`afN?}(3@SJDK$Y);qVH5FeDETp%2>gH%Q6T>@jUOvmOS)j_SQY=t6$gx?t z$D(Vp_qGoG)7>?lU8UBQYNd!IecvpEMWh{3Vix$8TkT zx5->}HfdG}r7n5NLcFv{g-;;pG`>Q;7A{{Rgga>gFYYy?%D9L?JStnUmel9HX>>Nd zG1_4RQ(ZlR1y`i(&|-T^QDq;D;O%`g_X!`cK4pPZ_4YN1bW-3h#l%@E(|LXAphwEU zdF8qLh*=@r`a(l!1?R=o7)WJO?j@>-Z>hGJA7&lYZeu?3is@GI-{C{Um zg!W!c`fBu$clTq-PAQBEn?p;%ju$&Vur$BIJzm!rsfURXtKa{2ymayccc&~;3bm#{ zUvPkEqy3rieMosktq&vX>kO2&ioquEOSbC_8h++lC+y$78XxTD5f=i2OP+KnUB z_eCik(i4E2@t3!<^a~7C>#=?z3Kwun^jRjj${muCG;~MdrGHmka(SBWD-6F55q+xC z9t^b18bL$H8pJ6Zy=s%<3i1REj}Qb;HgLwq9Zh@03x`y>#+;+n8z+19_k34mSmD}d=xMWD=z90Di8x##)E}$5f*t$w4q8h~E+2#t_L(?6 zS!%+U#GD|AzKPN60(GMzErCT}Mbz=eWChiN`=e+7H}vdh z`~6$f&2l1K%mpdxV_;oy^(lNJ6I88V@nWke6TG{sZA_Ji3FdDvmgiz;f`vR6Z-gW5 z=t}&w42$p`RRMM$goR1!$k!PYY)Rk>XWj23^z(xWR;)l*rkLOkcJ$+o2@-&C)_riZ zzD}4R8*AYTo!_(1aQN(eA3MuDv&=rj;j{gKon@X`W}juYSqA<;V@EHly|)_x%*Dwb zYqP@bo`Lxi-9y=5GQg1TRh|NusbFXCkyV|=@}PC~cHLpmK3r?2?uUZC*7Np5RJU}~ zo5Vh_abuzBNwG4Jtxb|U?a^VdEGJZMH%IAA!yhgvCpa#tfoqck!!l|mV1RfTzvq=X*jEy~ zC#4(%hNX$!|3US_P|WOyL@m|#%cPFTy^0#;eP00youE=sb^Dt@)(t>WTz{Jj0>EqdoEeXSY<+3eDu z@ty&d8t_LKD42tHR$d+Ib>{?k8orEK%ae;M`dIPyyRSZuy6-fNi@!$$_{IGhSN|FO zXFUDS7*9XV<*DAT_W$CygiN(beUj*LJl6*Q_bcHfd>#L$A=$(;_`2aF1E;J*_(9V; zLTdXye78%>a+MeZJkjr)I`>{tJm!@_?#Gj(&@t1o-tUr+ApckS{p*B-p~!=d(`ybZ zK!#D^#9v*HKn!T^{Pdz;U31x*$H(1jObbApyx4%gC_9auv6dKtH z-Djk^lCdnStAZ{JZ@4_>P9Sy$n60d%;J@*Xg?=zM^c z;xh1Ss~`Qa3>|0IXNON0CF3&hHm*w-<-}zq88)xTPti2aEFGM*SJLFq5rsuBm1}5^ zU)?XZ$U>u#F16Hj%^UUL8lv3zHj=s{@9^4|(<5q?O3ey7N1ub1eS-*Rf}rTW0OJQ2 zK#e?{)yDT1K^#=G$F%qwNNTjKPcNl|g=ri5UO(ZZ+Z(J&ZNz#)Yu`@jn{6zo^E-s+ z^>wbn-%N^Vj|y>NT!c5V19x)qyA>NBS>=r~Y9jf&WrF1hukzDOD3_B6 z{koZ=w0&hI9qU7GmIu8hw2SP1MHAvK-6ohiIa0Dt>%|t89TgwMN*_HX4OYDq(z3Ed z-CginIodD5U~v>zaumb7keA0kS?l(ljOSMvcIZP(_b$| zjjzC6^ZKSmBliWqYY6lwCnl~wr2*BOt7UIlqoHIhy>3zdC3VnU*+O`|ocbH(nSSA3 z+1sZeUJ4g~2+5%jUxy{{`}ZvvO^5H-8OSc)o&&pb5A29LT?!}m1T2cpS_old&Qw(F zq|hysVh4seE1(sw>FT;&YZ;HOIjUyvb;UE<8{epy+-69e+K|w;YYd-}bCqesEnjld z>w`dFv=b<7j^CSSke#P39T)sfpmy~DY z8?K;VR9xop*}ssE)9pX~acdcLu_*NY;l6SxfUr3~8-E*;7rQQ)r+Nh1U=%T2Xt*8n z>X;IeY+cD<`di7Ua4*J-CEEi&hd2W_zW@nc*IwCl6Ay_8%J6h~VYmU^QZO6n05G?LGT9=~Y!b5L=FvI}f_9{RtZ zm+z?DtYECr10@KY_iDM1$jd8g!gkrvu6EgNacPe8_sKbgy%cU@MJEEcTRk*g=+k2% z66A4P`ao*)=qn>-dXi?TVq5o{_%?wV1fK#O$hoUJtLA(jyi^u8!^p zdI*V7-h`|r14P}Jw|6d zZ_^)s=**Ke7K6e%@{IlXqu`C0MQK;|pCWK?P8cjs-awc#d{U>hJ_FBHr(_VrCyKY! zCFcxO#{BJhB`I)lm5A;ry>D=ZU;Nx^Ez9%?fm7M^l3HE)n;-645DEn0(a;*Gz@pSCn9qQ{xX*1-YGpszhP$xlb_YdIXCzSi|L(XxG*-CAwjMA&x0%VPZs zQvw_mwv1$1im#T+icpc4#~(Bvx>}u=FyD?&8(mt>J5$Elq|FT%CL9<`lzVram$PQn zvh`c*gWDKHhb7%W!zBIX`U^h(GRgG0cF*Nov-`k_tD_DK|H}N=(?Zq^dU;+%#L{hy z%;j54zR6C~YyEPwcni&J>6*VQhnbU^IJ*toc=kI*~5GzV!3Ce)!k*ch{<5xODZ z?;W0b4!R~Y&_A4thvw>gTpXSDG(`-b9p1o4C{@R6iSNrU%TU7W8f`C5rU>Any5%-{ z;a!k$kWj(x>0|Tv_uDn>z~9?@_rQO!<{kL=rRp8NPRO*$%Na{x$^5ml<3^p?ghuCA zNNDa&NLItTAd|8A^`1X=;g4PTV;BD4*#%La8=D_~QXv>Gi0gSG`<-#COib2ccLqN2 zU4X{}O^6}NlfF&+Q7PnPONwB3*-Gb!PrmUHB@slk&I@05nZ_p{o_4A^b`0-0Iz;o@ z%#Zi#T&=XC{Lp;)@~5g41r+a;fGz=V8+nN(Wr`Qwqm4Hb#@Bt(wkRrNG}y1-mega0 z$JC!|qEKt;BdN~s%sk~GzEe*J4rYEPFfB?4zZJz3PIg_FewHIlcuC#YYMP#cf2?U> zc&kwW|LGZH!hbY+zCPnA9x;uZ8l`D>E~u?t#G%F4U7)4KHvhiA>Uj&;j^h;{>Rrl&AoxDT1 zuN1ReK~s^S;B$~u{6G$Vf{Ri4nuf)D-tcMH4mdtv|Nr*!T`te8o}`ozq?UP&iuT77 zj-Q}i(CD%(ffRQ|YIaQG@wX>Id!-W$TkdK6o<$4s*FGP9X3|KdpSm*5GZrvHkkoPg zbej7VVRPxRH$pvY2s<`)2QO2)h;QzUY;M$DjjvhDJmhrkujjR>eJsoFOSjf7i;Gcl zcbiIc$s~XJv_-H(3AVgPoFZT&Ejr-RMo^M zT$|K=qZ?0W)(z5n`Zcuo9EQr&+9oM)A{{dr$jS?QJ|N2 z1hWJ`?8r>})+dYSS?1Y^S#{>mc>gor|BUxPQa#- z^ZOKi@xIkBpYlU&>SnDEHm!mlbar0mOv6CF?Ps`^^(Mhi_t-ePK{L3`c>RSmr9?1G z=8(Z~bmjayJ!=`o@-euS}FFd>Fx|WpT zqH&K%g;At?a?;hTS}5jdQkS|tchaKQvsc0O&dMS79Dup-lo6vOT;Uk5QUKd z)hT?r!JU$|K}WCa@>d31l7lf7GLdOb);R%s3#^AV({sBkUQu#`P41g+_nBB1>$|bI zvUQ^PE~eanU8gD$WNxTqmB92P0J)m|c%lO{WS>qkUf7Jsza?RO*>q)P^lZ zW}ZQ8VZk?`#<$tFyx%KI&AF<0*flJ-H!;mW9UbSW-vGZ`NRldKH!`V~C7rS7vrRs=?siaI7$@GPInpJUoJ}c? zIO^#7Jf$Y6S)>P38SR{M@}W~zIiEeZx<#+12&f--beL?p2ii9#65Y4jBZSm`V57^; z#4lHwiy8fqPpCRu`0QNY*?9@@HK{@LtLAG0d##(W3xOQ?a|MNz7+^A}M z=~JI~nl@C0WAaNqFt%M(r=i^UtKC8dwE;6v$>!c3iu=e=6qI$xSw4PksG4`?2vDF;li109pcpA;z`c^M7uzfjf7QTD!fqR0tn&>4Hu zU+aD0mZRGhKO22+G!`^|86dG`fg{I?LtnDPV21Z6sRMSo{+9f>XOpF|hF|@%!-7QH zbyfsTZ<=Z}xx8B|urYY;^ooL{{@`y54)m!Sd=U?|Qh6R6ccO9H=6Oi-F^|x&pX`^1 z3(3=y^pDTf(f)q@k3IQgPyX1GKlbFe_T=x!g8_XCjqFPyd0>Ayu%_ed>*Xtjbs$q| z*!`@1WvUm}ndFQ0CsF5CGJk)z_un{I{F^n|nSDNVRrX)cgYNU8Ir4-oR^;Xsf$#A; zZ0#?ef&>WVWUw-+#(Sa1T4V+&K=|ZZcedmTWWueYJ3k@tL zrr7;2_wek4j?`VIsaN_y zkKS+`3>I^Nv^$SyKBgE#1$c`$0|5e1SMekAOXD&y60zb5S)OnQ{q9Lci4c&vbyLq6 zPcKfZ%gAX#VbT0~laZxDr+Shy;JtoAn1D^D;I3}|wD{bv;+BKQ*zetKf}8f0r}llz zhx;o=qm3k;;Z33%gqkgr^YhQT(vv>@Ta#dwLyfLemhxa?mGk~Q;RnS{6(+zol`7cn zZo6S&B0Jrzk$3m@55n}K*X?IcZsnj~ZE*M7=MF(xxOQrxgAwFeCXs&WI0WsLl)T>> zz7}dG7DTF*HOg>?BJ8NvdT}cN2Jm3H9=lk>g=5IeK091EGu9FbI2MXowkJqE5 zfXBp7epzH+54&ipo_*DN1-{Ui%ok_v4f82pGTf}be*QTXTyZ|dU(^bINm$ynaA^+6 zXB&S26X649?+#sVA}s=L)bkyERG3<<<2+pox3d+Fv-bUm;b#rkFNt5*p^I&#c3ex3 zqN^G)6`C&Hrw;^~ac3NVL6?tr#yJs~^i4sck;^dSbj7vpIj1TH>0wI`W);`h()q

e2FaA<=Ey?;O{UhPLsGVU)U|#VU!7W07i+WnX#Z1d?9uKkcZow~>s zfll8+^W3u!yKa607hdfWUa_+iT!&peOfY!>=0`fqayN#9mS!J$E(A$~H}%il+%LUlcq>(O&#KceVDC#u)^Fu}0auHV5A^Dm!Q-(OTO1OTU@W7_ zrp?=QzTPk^^$uY<9;i4`uvWHjArxe;`Hqw_4JMjuz7J6A0JCjFIEcq{z{q{R-%s)q z=j~nS{=BAPuFvp6yO?yBweP_7?NMXPrE1|xp>P^;>p8HXIwn{r)D)Hvy;L2|$S%H= z&d@R&TuInvH16j;wSlnq-m!?SsaV2V@d44hxfsGg>eYNk*aRLNYZ@1zJ;QIYe-N}N zI|rXsYxiaU=McQn$8giPBRcqZM~(;+?s4H4wC(1(aRi3U)eD1&HtrDTrU~JA{slAZ zVgJT?$Uo2TfA8~CBm)?^+25;YAYn``8>SH9lqWN3D$W3=qlOJ*gmBC&HVkoy4by$Mh5?ZBV}2;V!wJ9; zk@f*>>=-7%j*;s@>hEI1cy}Ucy@)Il2as?~2a<-w#k>K)$Pe*O0N{bJ2f`Le*)czq zL4VDLk?Uu}FcEG*JcNYFIso8p3IG|1EHK+2fGZM4`eOyc-m~qcAz?bwzM}}sA^jnT zv^V#M_M*;m_I%sSx=93#S%63Wyl3M+FpqFBlffMZp0!R+K>px4`I&W;m-G6GH+JC? zya+&YRf+NU_kIG`DZiGm>uU$oq)4@u53)g?HGF(kULJfS%g zH^t4AL!O;~hT~+Qm>Qo$HMo@VBYUmNhrzj_v0E< zi@Tjy_v89<6Lq3*zQ>(t9|=0~d=wY0tPb->PvE@n0k{=>pK+U2eQl2a7d2M{7ge?G z&y0+XIm&3LSd?1$vN9wgqtwm-j$cm-3>1{_4+YdDV9FE|#gB0Gcg-yOpsuFY?b`jt zdrysSU72ApXZSEO(sN|aI2h_Aq@bv17;mj}_F+Dlu}R8s}z6aH&qFiqdgjXathOk0_gXW|XP^g~xqG@J^bLU-(b zSN_J%sdQAE*>wN#a60qkj2S&A!|4m37&m0KhST-Nznp$x-!yvEw>cZwP&J)5$aj0z zwHN84cZOvTc{Gk5Gh%zsr`M9`XT~nQcJ_KIJ@vwf#)_b9`o+N1_9wfRqaW6&^gQ~* z_s6%q-S7&%@xZ5Leus*S6A^KB9-bqWdPRV=t|LZ?I-n@;GaK0N=;k7pmzo3b|tp{gZNWUZx5UW|NbY>|2^Um zy=dZqvaay2Xd%yE)4xN_FMmrMroEH1rm|p}t z#ced7_V?Q8gNHMX9gBbE<;R*X@o|9XO{f14W&P7aJE=fuXYLl-=QBI_HfZ8yz75r0 zrbGDa3i2w_Qtd5Qh`mm_z78*FC$Cxb@vdvU{DaWz(5f<+^y@^gi}-tk@ZI#1O9$TS zTG;J;9>+F`D)RE_#=~z*RH_Gd1`V6qPmrtnsrEdPvrT!E@RX0L^_vNsht~V3a_0MK z@2!*(xm+d8Ge9+9`Inlw&jt(fzlp7{s%+kSQ7?V&OZ1hb^-~3vZ+hYPuZ9VH1!<|_ zLhCnNr4Aau@GpLZ=g$o({#rfKpUgLs$OBZ-`3J6C^$iesC28XWRQ+&Qh$lQXinOCi z`>^Wxn2zLRKjPS1ONRRp}n+**e7il zX`>IP75m>>KceZddQ$2GJNmq3s`rY=vynvzEQd0v6qi$f8SEMQDoWLyu`d-3s0L91o(yiqQd@;7n*MQiEO3g}zki>vN#rpSVI*Qe_MHHVZzN#Rxt` zD=Fo>_(3DN?fGVRdsWPkdbD2#e(gxIpRb0`Kzo7zg2=F6?8EqCs{(QygI6)=nH^*p z*EZn09BiSE#EmVpi>-Lj8<%XC;qiT8z|Tx}wugFBH})LI^7uh-MY6MRZvwv~*>0=C zgKsNbY!y$FDyv<5YXrUpWhs<0EQMg5=H53PTQSZ3TmdQ0K8{auw%IsM$c5ZxTx^PS z{xX4I>N3W4F2^x=wS!)ni=8ikKR{&d9_)x?@VqX3k{+*yI(M*xvQFUTsgyDhNClLb zD)ggnKQ}gUykq=CJJD|9C-#Z)bBvK=pNLmZWZ5wHmQ-gy%YZL;<7c)9KgIE*$@smX zXC*S!AAoTK-aoA`{?N3(^Wb>7n|aWt=!4RO58`D7z05S{Ix56-%F^t)L>+haCyvV| zc!H-(mE~!6Jb3n~4*0e-?=hi`0dD3hPjmJ=I#Q|(NO!4!;KS1GYZP_duA6jcTfv~0 zo$kCwR{;N}i#^nHw??yF>@~vPP~qa65%@+2Ta6y}4;)uK-Tgd@3}LK<_!t0uJLr#x zHkI`4n(ltx&CGCKtMM7aIOKI?jE~EhB9}2+fZyRV#&sUYF?jh$!2aiArxAEFkzv0( zJJl|B!az@v>1-ze_&_2H^I*qu3|_IY-eWVJbDar%VWx9_Vm<7~u`c#HA&+}q>>U8U z+QnX{hkMUn7kf5{YiFiC&zKXRELcBTeRGYv?)I5Z7n{0psd8qP-KGpE9{9X0XFs

u*a*ow{;L8>{?+0S6P~TmwL>me>cEu|jWY$H_@n{ELx!C=D zVmxr1yLeb#=5(|#Vs?~?*A#f(%UuEm4D=z84Jcrkl$s8d2eb$1 z1ke>AZ6zB#} z#QjogDNqH_X`pVPCmxVeNkE%`3_#a`CI(2UM4-7ZO?zo>@f)wN`ouqS+rgXirsuD& z=AU1mOH2!XDlz4?<@rkzLlO%T6Vr2EPQ%|@6IX;y%Fil9J~1aGY*J$4vVvDL@}3F} zPMH{lf}p(2bRi*1A+JG^b7KSJBl&l5A7|Mdl*vIh-Y$znS*Ww#0+fY0$!78PrZ~%Z zJ(ZIzn%A4=EaUZNI>};ry*bV@UJu?8sdfl=(BefonFV0~KA_pa=L=_`7biiy28w)O zK8}I+Oe#>)D=Sz*&Q-^E+;RPSVDz3LMQkuBqo-p2Lu&OlLb1%JFTE+~P2Jri}dfy_D_ zWIPB^qU`6{2W1gpDk=XCFPaQ3UVl0=D4bj75uu^Yaf-y*d+7Cw8us^0UiAP?kN{$yO*|d$CaS1A1kX_&HOh zR4O04g8YoY;HP*I$hP(Cl})7FTyfgA7}mB`-4h)X!vzG|)ZiP(lvFEB2L2mR*Z!EC z+xI6exqbhcrG5LM_v_uT|3%wEUXf6wgIuIP+BOfm5=@3(e!2^Pz=gjjznAA^{t1-Q z?!)*nNv{v?`k!{_&xS%$+AJS{F$3{uo>rrnuU#$gc~znH?RizMour-R)9?Ej?XjL= z_;pOr2^gw9nv$Hv&kp9d#ZpS!yxkIt)rFW?eK>oOQ!80ps}E~pYfGcLo7CMKOt1Gh z%i0%EN-n93Q+Juwa z+P|b)2_A6%Av%*2f*rg5&m+jWErNL=*QVQ!{S{GYkFk0Ygp@V?F$BG(#asg*&cLXj zMX#FxL@jqlWKRKG1DaRZDy9xb9b-*Q6Ysu8dfmb_J0g;;8Y&>7ny<=y*S!w+d_*-` zs}DJzo0)LEgJZcSfX74wJl#_Qyr)+=z+*&~(?uvTw5 z$Ej@qz8>Z>tF2s;l{J{hq9$XqT!5Hd=HeP4P*&HFn>AgQda%yGNC-Xy-kGDoU87mu z4h}*OCu@NCF)SZc-wpd6hy4aO8g z)OBRJ_^ul$MxATIe<0j!498^e!bQW_tZ*ZQ=s~mrmEJtn7LNf1(B*@ z6~T?jJZKxl+nfbBbTF|k8)eIMwpeIe$8gLqIs9C#)fNE~F3AXf*cPHvl+mndhpcjt zRJmD(W+1IFv02ZlE6h<=*dI)m8q1B|Wn0pw>YHfY|74(8d4| z7uU|#S~}o%qldIN!+)DCdWPe7qLn`2uK`7Z0mm-f)C`cqDy9}|O{Rng0`qU?ss4#L z5e#E$APV4hGo(HWmMFv2!n}Is7}E^kE%g@MA(LzzkGHiyn5t!K?At9PQvxV9she5W zZ8n;-Af{Hc03*qYiPgbf?+9}YY*ty0nQyW6i~*_y7~G7nf;KFJb6A1xY#mc$F@Ohr zS2bJZ3iBR>iGvN$Q{{|OxXohNst|zrCRCRdZ_o%*4`RKg&hj&>2aI664(EaDWeqS_ zH54vZJHQ1V?$yZa0W-^sVXhjOtBRB1){o`Hu~?v^pmxI{@j_(8N;UtE)dQ8zS1a7c zW3u0z!B^?rLi^s|!0-I9_}rArJ?}Y{3Z~xXQZJX;8myNFd%XZ?zFw*=b>`pjsf3Rw z+{kM9im7v7F}>Kt`t6~n5#;cjelUp0`2YS^H_c}u;y8%m=JwjG?O-O?!(%~`e%LDs`o zB@|N?V!;hXM zzJd+Cr2|ud%X){>`2}V(;cSl241q$?H1w$(MyQ)pKz%A_S-!n;m_C)GRm-R18)5!{ zXdAruQVB_h4Z1>kv`x1WcC?NVT@umSI`fgqM&FIErAVk4+aBml&>A!5 z2W;(7OlljTzc$@R0!*PCShyqd;#U8jVX5HrQECjBshGTqQW`i}JXDxAsxz|`f1Rch zQj!Na>0LT`X6lmp9J1+j}aCiI4th26#* z2w+K@5`&x=DCa1AODkQ1QC5m+!}$8JiGe;W1&-LdbHPMH5{c~CA>Kq*Ek>5NzpF%y zX+naF!z+ut315Ua;S=U$FTuH^8sn1@K311AOv&Z-5WI?@odDEAj~Z(0XrxPq@!p;Qj(| zB>;DRKHBeq-`^Gg25<1s@e+TVg1@D!d|ed1joWG)*$X1;u*H;=i!uM)wwkrsTyRR}*Lh>y5J*Y6xogYSm?&lG)z zK4e>L%Uk+L$L=F%6F(+93QFo+(BaAe@NmfG4-3gNsR7KOv+MQ)t6X!$5h** z`PBUSpo9|mKC}P(o8HtS_vavs0X=fdz7Sm(LG_#7z`pq>Hlp5i4Zh{E5}zye2JQ$y zuQy-zBEJja-sA9+tLP{3xC8yG)EnTTeqI3=riU8GjYq}5i1H3RBbD@SKd!fc=Qn#3 zgBWjP;GFW3-#w@NtIggZf7v_a&MB|+obo5NUTi)J;Tw{nTh$7fNFHtbk4Omx;)I0g z9*x}gokk*V0Bct(sbGrI_~Z_1eW+x$0<`2s{nz+y&}ozd)e4RLS+&x4hc3yG+drNk z!1DJAwwXwgTD`o9=au9aC|~VYz;maV#)pP6{m-MnE1+N=SQ$&->2XD3QkTwEt7lgHur`OJ@~FVPGUz7Usa?&Vngn$l$8 z7_JGw>lgM;)`VYwiJ#3zvy8&8l;M*_FGy`!B@(jHnxi^}4`8KDl2N;Ct7pBE$=pVmNr z^na6c_2E%fXMQGRV5A8%(5O+PkG8Pl@>zmHH{d4PD@q-A5Hd010IR6k?5aGF0R$uH zB(~!?mVRjIv)ZKsK5VOP?4#S*wRA#a%uGg%+vsyI+&c~$?HO<2nLxyW24~;jId?wt zfdWF??f#S8xgYO&-}k)lc|XrN4BQ7}%wByInfoNiPX@J+2ZuGV$o#muq9N!x=9&_& zsx=25G*_k0PdM}J{zP?k03zds8XfZijvGBBxIp@GZCd))ldL z9--$RvQs5~DcPQXnaP{9ul`E8Gc||Zd?J;quJ+AAoqcI&Pl1U&fAh&kT#_Bpk{yxh zsnp_>$4x%;O#=D$Vz`$Fc!QdXkfRvHeCU1SxZrfd1gl|oeuYSN+*}rJ@41cN1 z$#!q1&+6^O>i(2xfc-aqN(1frbLn5Q-05OvgrK+Wa{ZFUPWF8~_1InXauwUM8S_%y zoP3XQ9jNzjGEg6rMt$?2vZ%j{r=B80-Co5)n+(*irC0lNQGa#e7o)!VUk9N*HjDPb zI0gj1{`zFVQ9xJ|$|+BA%2On;^rq~qFXY(2HOFiUEHD$A%5jK*K8CRT?HuzUjEYR` z%2UDU{y`Y6zcz!>(vx6RGBQ%$3{v09B6a@|q>4@K2REg4!e0sN<*zR;JHF0Mvf&R; zd8@2%-K~u&PrkohcaX|l=Wm_z#;7j8R1_#E^;Hf`dHYt|?wKy1C%H&3q1h{Pu~`pu z9d8WQT|rZL!<2Ti3rQC{|6ve<2S+{cdOK3wtV}tgKQGjSmN|`ANaRoy6*??;!DtIg zHK;Z)HLi53leSi)bQ0}UTGYZAudOO%Cbyulg2F-{FIn)~O<%M<`nnQU3xo8KAW8{! z5;HE71Rxv@sf9P<#xWWN1!J`k#m?w4_I5oQj5u1ehoGg>dtyA9eswE*IASgGk!W0V z_oOidc+K5U_jxr%bN7l$DO*gCwaSPl^(!=DFN&8vsDY>kZk?LcEeMN-qQ|2LXh?TA zPzws=*fj(|t?w3XNU9?OVC1!4{|D}lHw5b}niQc0np+5ihPkNc_S(7QCBgS#%gpdR zlA_Mbe*F+UFqwX_&W8+V%qkG3F)X5@9>aGVZ^w5uHT>7O1b)T9k&=lT4-e?>bQE7u z4o(8L3jn1U%Gopl4msKalDgrx#ZhT$V`K{Ju@M-AUtxCUdBDRxk^qG*sb3HQzHzl+EaMQP9^+YGJZcl*v(p z%$pT@yk+pv*@1R%Br{EjCgry?ML#`HQwSOvM{86WK05D85VAwE8H$Eghn-h&_o$P& zQEibsl{PW)D2}D2QHiK1i1n$K%_!ox@nTq;Xg>oWd9S=zX(zcxLH@019M>{klN3*i z;a(?-ziMGKpbem2tn5__SK*CA%A2aC8CT&bFKE(qw75?uC38`{&q&D=8!}R|bHmA` zB!TZLZ!_eA|6&IXnHeM{)`46pnYm$zl*BVq^34rHq=W=Wm9kPo5VJjb=n+rHtK_TQ zVIqnO=nYI73eA=c7ihBZc!h5*{N7Nj!i0cXHoljIHoMDEDo;eSm{8--g*292AWQij z&@#=EtV%a%7!QQ{p?WF=$dKO+i_}ZOG4W<8t=M8H=lgspS~DOuvCRFsbYH!XjDLaP zGzZDlmgxJ@J~rV&vq}G>dTG+Ah?+g)AYXDwy_&BeEr$LVclnZ+PT0JX`z6vOR0_{9S?BbIaacGgUCvzsDOh2lAy1hmIbL}}8nzYH~Cx(pHEZ?oph=6B?+7XZvwI*kcX)0-Sq0pQp=?I(G zWP*c@>E?cS9BviKW*_?9%GQ~=B3ZVfZ)kK@5~pc$KmsDQ7dYv~n)Oq$V7(}o_|XK6 zscX68u6h!Bg<*+8YoS+32n^XX7wnm8DL4MFs^UI>p_6{AmaQ(nV6@N^Pa+yxc51{W zCJ!NQ`L8~QxMgckk+_AmqP=Z8UGz9V{Pa8tsv|mExIe8(` zj-c-0P6j7d9T$HxhujA|r%3J=8ipPIMGm?4;pCpE%ON)?$egxqsUWw;Aa@1Goz}7R z%*ZYN`yu3B`{HSkySeTZ$=yuDuu-|>UQsuU+;Lx;+;Z;iiJlrOR@7*Vi`)9mw`a+p z$bPd1VMZ~xUP;@3jW+DUO`nk<*Ta+5uhL}ZUjq!S^rDNN^LO!9{0~^V@fLHhf!CMF zHa5d~sL>Sf-9Zz#UyPeVUY$@ooWzkgDCW7&W}?fO;5uCeo#>2Vu#3DQ5GK~m*g&7j zM1c^kMPxRTH60fdhf4nmlM|zGNp&PnljuFGMb zKfGla(U0^tKnf3Om2KR1w@WG0dohaqFZa1sSCx>#aY~Htn>Dx9+nGl`o$Rll7?7-0 zPPX6;vKk2rZT3q^Br>v1n=!b+yzBIRv^PBg??!49zF~!DDzYQIZH*gDzoR7AW|juE z@qR~fxpq+;`!3S?tH-|so2ej~qwPz#yYqf*-T(#zbhI^(v{V+rHrJ;}OCog@M_bKx z^SmFK$qoxtojG`_QcTR>#vX8|jb>(FTx5yH3ZS}0L#G*Gq+LX@L;RCG?7h(Tk?tO@ z(%i3!zBnYhlo0tC0qI9HEzdYTW34_fC3-NUfyszj87+B;(Rhq=dc@-DW|GThlF) zv;1xqxlRc2WdUi2COg@R13A;i2XVZtI%0g4N$6uWC3;Mi68KPR&_~gKl3kjYi!!j& zoWrQ3GXUJ+Was6ACG2(xu=*H6RL_eZLn~l$X^^KSzWmy;to3ha?YQv3Ib1u!hlZ>j zle_*J){gzr|9fl4!h@%^c6{{d7p)x=!+(~w<8|$#EkK62-1CKjaYl?=PHCtwx>@~N|;;dv_p6A}SW@&~# z#nnlhmvT4P(nP;?ry(|P3n&XA&N03thCFv`DZ%NO1^0a}`NZV*9RS1+(-7xY3y4jX z0%AGOk$T(cPpB0;XJkNv%m2$7v&ict*(SQIHBofntXW?Q6j{xU^q*Dk1XM;3RM6q9 zkumzNQl55ocd@H?Vy0#C(daGpz8KH=8JV7?1XG>A3z3AQ>#VVH{d7X0e7a!cO*_w+ zjS0YMnm&|`KizrSY`pj03>)X(n_=UM8G?;7XCT7aF@%jlx?8hlMw*R>wx67hMfVP8 z<7DCSql!oj&1}(AP-D!jLk9E846+6L%4gHS*4cu6V|N(DCcr*y!3?RBk_h)Et%o&O zIGBOo(iDP36J)?@2CVf4th>{&YO1raR$;N5v}$RFb$Wd?mvvL`F<571C+^O$Zrjy@ zbv5*#-6L2-55OA1y_xi&COKPhPiEI|=iEi0z*#5BJg=E#%H4cR(`|fu`v?lyb|{_O z9jz8x@5m(8&zz-`XA(Av8G;SJdG1TGbQ@r;m?>aQ*nS2qRaRzL`ty|;mVWmef~8LS z&%XNHaF!O+gPPdPG)ogdI|)l4TY2g%%?7A0u}wz=sUyUwm#rKzMr}GYC`LVe*N8Ey zvn>;&w%s*CjC#+o7?tr(p`a2oB2?l$g{z2DvCWBrR34m?_Yk9yl^Ew_zc?^FLd5~H zpA(jU5!I9L8j))G;2^38?i`Wor&=>q@4fS!QTgtAI-Nl)52QO!J_3-Ly{9nk4Z9M83Z0~*`}}e8 zv{;sdV>Bf`n>)X#RwelMK`EBWbJ-pba#v`PCiBk~cwRuO0KcjUSjto6Z~w`y&YFRX zp?O_w)qGS0ifK&SlixQZm39k{8@hz&Tbozya}1FCwI43165@QoW@ObE03m9swY-m4ac8WGTj2tB*C}n zk>gm1(fBq>W2^%1^NS$qvgk4WVxm(C<fYM_mXaDiciA(+3+=}N)%%J7#6rP$P=D?Y6=3<(^_S(=58QBhI!6= z%VZ+psP0zwYi>fnNs}Txf|0DqrCd-uYNmMmFFBBfayY$QO{8 zS$oAY5pP4qzKRX>GEzNSvACiNAJhkI)Hj+5c)?sdW2Z84v5=as1ZCr!`4?1a#cz>g9kREyFYl!~$DIw%E z2xCKvUit)r(j(9!aYD~d<+k`cW`1e{Z>-ec)MO~AQBvU5D}J;>Gjuqj9W**fo4A&v z*%4kYf$YGZ@_(;}$E<2nkJ_Pntjv$!k`-AIHM67L*2vpWjns&WY4|$Q81~!*%xIuW za3vLZN><%Mh$|t#yMqJ<$%lC~h2!}o^=d}uZK@_$1*B#i|6tEuIF?M}izDw)YS!G{ z`epPThMhn##%WT2I(3(?|DI=5D!1`WRu5oH{WQrD=ziQX2Hsi|PfA7vgXn$)C-{hE zl@^L#gFipu9ku~_bssu`aXgF6DpXj9hM=wC>%>0q?=bfnM)cSkMscR{hOgXall2;= z4P3?16Qq3Gfc)3UE2TH(!SDXvelf-18-eMLOUQ-F@qdOK?MF*1@_J-q_>J5-n56CJ z6RRIuoD-r0;`O(h%N=R*>rOzF%LI+I>P4P{ux=pJWUaE zB$faddvF@x>m6T)-Y6yhF#UrLsPXeTf?GuLFKru1cwi!`0*xz?j|(*3k(-tiENJ{W zAeMm5@9NREgFYC#y~)@_(>`n7d7sQ}hN z$n!Q_^;D}NB~p!iN04k|Se!|~q3&{}qdVf1I)4QeTnf@h9A!X=;Kxe-Ly1INaF`9$ z5$#YM)&brhhq>%F1QEj48GksRA46g!kV#joDEzfG))HTgG-aUi=0R-X*!SVl5H9iyW@h)kWpBin+VliEm&`wKP;gVpZ0+dJ}*) zVsWAw-S2b21mvFFLM({LdqnHtv9wbmAJ(K@+}%ho1!VZ7Zq&iMqbC%mhAWh?IB%hc z5GdQbb_YMG0HSZrH3Irre#0(0)D3;OfrMfeKU0P1UqTn;LF6m~*Sx|Lv^-O;I*9X2 zc*a)6O!P!Q<_(gSJ$ey02x*n15BL~iem)ZBfF?73A`hHiB{;ogxBzy(%q4dT*4PsE zA4|-p{rv5!T9b?DA|{B@<&a>ojD|>bPw83kJjBR za^_?TBk)~?{mng+ho1Dyyl-)B8i#R_^hok^OK)$~!AB+u1yPWLU>&R(yKS+C)yH_H?FRrD2)zPC0 z)vXV*-M5<4Yq=W03lOkwExw-3RYe3H_gu~D`awvB( z|8$e76ltjnQnxiEs9&IQTWJhv9A<-`TSNNRH(cM;0kyuFq*-C$ax-x`qjZBbP9F_h zDXApy+#EJQ^Q0bHfWo4dVQYBOEX*O?HKVW*;S2q%a827F)j{rD2e=a>1vCDxt?_$U zm^EJT*^EmyAZ_Dft-wxhXoDk4X2QuHpiA4Y==upKyGvY?yNJjS3=m|a*VjO>nKPu{FF+M1dsP$zE*D5c6ECK}>^_J>7azV&Xpp14HRS^P^#k-1dp1 zhqRuwr@0RU(gT$r7IQq>qA!QzN;nX{#Rzc>L|d|GmfP+G5GfA8kqKChu+eDe5M}5{ntQC83*qxB17{O_p!ArgrP*W7bVTv*MoD zlx|Yd;*r4|GNw)6#_VYwtteCr7Y=5_aN$^YjSONH(iMcGI?gPY!Fn8e@#+;$=}+P* zlCc$eGS*^jbiRzSSYHITp2radg5z(i5yp5q9-rTckLrxEo91(PBa$)KA*eBTp$y4@ z&r&oD*G=3|#>9dDk#So`NKpCiOG@!9K(Ecb#GS@Eky>v~-Rcdj$8~ zhaPY^j0|l)n7o@ix5AJl%-}*ZP{!Tr?C8`y6HB1YCeUfk9QY+N0Yt?V>WmwUP;02# z@J7Q=Ss>gwQ-&@LkAl&4;R>hIQ{;<0`b`yb|8uZxRw@3AbwfiJrGyU=p7J!*T|*2 zup>Jj=nTYDXLJq3LzkrhF{!85ALtQ?aJ}#Zy_u#EpZNMPF$ioCIATkq z1Hl%a46z7%6YBMQ>A2u5LGWX!RikDUai-% z|3WOZ6GA$YZmBUX*$+mARF(HMJ*NVJ_QU z?u%q!^)d@U>tU$daFGQd_jE-TpzHY31c07X!4#ST(0Wc^qqW*Vw7gg7xx}Lg^tJF6xY%yAz+Z8qMHUk-8|sKe}+9sCyAS{xIzfe^9MB= z^))ACcn$|(Q2+c1IWD(@R$4O&b$g=5ErR8!O|}5T^SFDOj`)*o6wFws;_&V5vMT-Y zcw8){HZ1bfYG4e3xs}M}J#Q2d1bS1bcERNMnibEjSXQBtZOh zl|$B-B05BBTd_3@?tQ5q4G;w&S&Vn5-@X)xrFsy3qX-GiZu zCTB%7Ip6?hTaMFKH`nG(_?_(M9`zv*NnSYKj_WhTInnVu9JcY!j^b0G$Fnju|9PoQ zMSbGh=UJIr<5%$wh{$GDid4hj6KRIp!)tK`M~b2Dn~ylCR}x_j<162-XmLL|dHcle zxU;Z=BvhEg;a54#Au^6;%jhBi4HY+kg{PU}i<~mf!7X0+mY*1f(eN00C}zkOOpux8 zw_@eDw%CQ)XR#eZJUHt!)7;pe(E7$V6b9QK1j?2HS5Abv=Yk(H)q z0Xsnr0qjx96{}@+B@r1M+!NdjKm6K?j> zLV`1&wn%Ww)4e9aOljKGYNgr9Ps+3SU;=TSj7?CRIQ2mIGlzuq#q{#k39!`zPRz=T zcD){D$wKw20OZ%9{YL&m#McgOL}X1k{1Shi=AS{WSh8&sW!y;{u``Ik?V&nEIB|bS zC~8~=u|D*#H9(R1wCd)gOd}y9N|Ktc2y2G?mw&}q=(a@dyy7hOtPg>q!->yX>zkJ z+I4x+jvH&UJefzTGiVBXb5eJk+37*n;KFd75=~BY*h-<%+mzu{t1Hvkk4Pbu8hMsD zQkqopNFFnzrAi*4DKV!9bBPyLann6#76-)sjkKKY2$9=|Dw^_4;d~U@ZYgnHr`MU6 zfkX9r1^)ZbfSZUJ19u9O^qx>V4T0UEM-XSFUJmCxFMIUbP^-%Ltwq|fPs}p8>7nl; z0n^lT3U-+mEaEI8v0xEAkurQ-ks38KnY8CGSmH1~f5(?gTc_@AG2Y?!EtpTo9x}6` z0;S-DpD0Bses;5iwWlWE0+J%;i_teDh`460*F`|ZM>QhzW|n0t0(m#_#-gLZ7>i*R z!-^ipJZcC=Tqm#*n*@1DNEEC$rN@bSgSmHeQ3N#98uyQs2h?vKRefiQ%f~q#k<+;r z=^lAb7%S)y5todEsh>5oj2Mf-jY1zLq*2flX$)VSOd9EnJCR0f$?+o%?^=sA#^!WQ8l!WL zku*L%H-$7l!vkcKZBnE$cWz~&yV9Gvy4FAVGDzV|({R^5e2q6&S^dyHB7>`8w2gc&!DSe&l(ndE%GJ8g_gr4TRTlBM>XM?+QW?D{cI z&R1(DRFJ7;=E4a-$6q}h6Z)@=XCmW)!t=kdefrb&HtpH=0Clw`5(HcKj*(O zJ8Aw8WGBzxloOIze@0i5M6wj|3!4;?ECM9+_KYrsgya?*orxAOtPWS6t60Vz(#K6@ zpFTO89iy@I;$lgt)8Zr$vS31BRZfeqL%Z;MP~-%n+%&!vXd~%)y)$doOn78BA=yiJ z7cl77!=ow}>8wYFIAuUQ_=-za^;InwaZwGM)niuq*ifw)Ga5%$n>`2oq|q!=!eWC{ z%fyIuV;94%55$1cI49dUA&tsVa|RN~#C`bJ4d04RPT3q=UqOpldjPFUEaCX z$-o$(oe^jf#otW790qkPs4q!l zYkBgT{_}-oU|Y+2!_(ukZtuLTcTS*X4SD$lrJ?pNY3RB$Y`_Nic*mVP7yk2z%|Ii1 z>=WzCP><0kqM!A8lP9?Gtnz#eq9TiinGnr1A=;hN%#dzVkZws?&^d21SD{G|u$3TR zahq9c!)1A^c-vCeyzum1u6cl3^Mm0NWz97^JFofCTYFqHCcMO9uWMrF$#Wy{G$!kt zSeJu3G~eDr3k)fQLl3hL= z4v!N2mut^Ob>wz3p@I}eI&bWt#{R}Z&S0x-?&YUK718D*yA#d9P24(iiwc-hwh-u> zqx3o$AM|RiH55?g>6S%EQ?26?b>`*dnGj<_5u;i?SoS579TDq*88fI`wNi2%xMGX( z2F3jLpm;$zSP~^LmbU#8UOlT4KJpAPC{fXOaaf+Hpk3euBh9hhC3nkH_Fb`q8a4pZ8?wKqH3L*| zqUIcSr3nQUZyd}UHE7TM@^m-T|xH$Y}Pauq0Ud#y)X8ggy z6)-Fi&Kw31#`*xld%Fc8W@$~a!;L9Mda5$}JFx;2cGZ;?JIR0Q5nqdP&7gtSH$xu)uWa@I2wuTqjSYVG0UyCcI zui%O-b3)q(Kf4=qJP^@t%t2jOQxdq(Ml3nDuiWx0k8JKTo3{)}G#{CSJkr9G5%`LI zCSBcoy_yVOk6aUvm>gCv7eDgo4dvoxL!_!FLtX#-I&4kIkNO@ zn5q5?zS_Qkr(tYU#_!qcCZ`#L%$u|q;|$XGb?o3w3w$-IY&6Q>0H;%U2oh6fI2I-B zoD)F3T0$)rmvYQxGg$04(;+eZp;HEbn}rs#D&K~2sTQj=CB-3P^+`@g_(e|qxDuk& zKc580s}`fInSd7e$BcYDCg=r70}cq+3iu;5n|Je>?|EZR1UsBZpC!u0&GL^z{HLfW zjKz&=ozp&eTa5EpvV_tS$pHFVxN0UM(2PWRgR{B@dYHO*a9lzF>Go0GIF|66$Xc~U z%7ePtJOp+sYSbcrhyQv5Mh&d_1TpO^psA=quO}?ABS1jsa+WO9?AGgXrPRVHFl>o1 zets69*a9>b8q`Y&Cb_!DmxH`;9!HW(`cB4-FpX9gZ4 zD4V0w5zXBOXqg6&`_E+bO3eP#k$BuFWXyWoa5Z5!V;>iyedQ-Ww2iK0qTS%?k!T0q z-!;+R<2o^j*6Ff{_QOH_L9|bL`b@OXbW5~%4C-g1b)!}S)2(^M+FhkfNqIt1o;2hs z+7sQ=-u2E@+8bk=T*d&tc(l>n81@!5&%c43aNH;pDWuedOvJ)j~Aw?qt=r1vp$7z(#;;0!SKF)AD{PHQX~sM4*+46x!cZD}q}n5{*2gI?9m z)-!F)ggFxzBH=)T^ebRT-62aubmNw3vDlIy%JxEHh5_3ava57uk@Ems0* zIcnvjyAI2d=)Q>gq6_IsnhbxGdPNy)J8;TE!=+a;_$3J0+0eI# zn)P~2qn_1>6z1^z37`h*KL-LgGX+esgeYCIg{A~(r=6`_!AB-`{cHkRZ?-+Hw^@vH zVuJ)=1vHdB6mXo(f(nWDNzNBu25WAzR#w}tv^JoLz*vv%G#V_CIHdNq8JL|>?v^AK zhAT#}gkGD2E#qt(G->S8q6Q7Qm-dNu4#jlc;t2;hTtE$tXCB)GW+8WYS%5QJHK3M` zY=<;3DJ@6QR1MS~8_u3kOelDz=0OrrE)t6a!I^U-vHBT;#IsT3J^CIchP?he}FL4B%X>Rt$Qv> z5cCE%Se60Sm8#0l4I~txvU9HT5-&wPjs~_2?xrKRo%*OZ#hBGk$lGV>MOxHCNZAj` zWQTS@=5c7&8$$a5ttuU{QFZckl%G5OXOR3elxI(aHj6Y&vakW^+r_sX4teL?0*Q!Z z4gg;?b*RSlv^jCW`grX$v#*D(p}$fVVd zJ!V8QWf?R9t86ns<@F@f<%Ot=0U2vJfYQ$OWW5FnW61Zj!o|7m+Efiwb<-|9n0rwb zL1+RvbbF)55AqK3Ig|->vTxg5L%>p->f?KhJFf{Dq4a zLTv)_GsD}$o_dfiA3DOaFJxDbNr-dJG)gY<_z{Ph-0Xh-Sfw*7tia*YP%JBxxy7AF z;L;9oRg0_8u&hdfC;WGUr(S2&aJD!4@irK>bOg!VkMPNIVad{;S-Atbf_u;+auTfL zMq#$LL$4(mTe`nIvP^^>$l+D-ft71Ju&T@ht3jfr8I6DI!HHGHqHlnG6!u6dB8ggN z12&0FajPyq1RE_OIomQOSiACtcBQ9`v`HIBxFAGukC=Q|Zm>?OM-@pLRYEJ1;Qs=f zC#>WQ$)w!`&k~Q^M3q~=Ln1JCwb*yaktBqSu(T2gAVP!0tuLt&bHHf!o*QP z-LmMALz6*WyrL?wyk-*-Q|aRbRQQ0>Qy?HadOoH^dZ-7;B?ei=s*RK+75=c?_t}F# z4)+~@@YuNTv_5d(AtSyg_g&xa_@3N%SljX8zW>MGwE#y|rRh#cW2=coMD69F?EqtT z)YM1zcqlftoTRC?qwtguK8lFG>L=oYI}JFI1mal|n)idn_6 z^8nJFh;cJox0BmV^AJzlBzyI8O`zGC;hk}pYDXK#E>3h%l?|=W-`Op8D zI$+VY@5KX_5w!2>?y+j$`%ipb+V}QY3hjGs%+S6k`s3PnMl6~3jV({EeM8I7uJ(Ph z-_X8~_s6yG>i%Tf_n!W5hW1_4Gk)#+Oy5Y__qIMm`>yXx(7r4ChH2lKeF@rkQD1`g zozOR;_MOsaXy4C2HEG}X)(zFZy`RRlZ_}q=t@i!Zc(iZnl&@C%zJ78o1V=atG(GvAq6Ci~s-fKL_ zUdCdh_<`Q$SIj>Fv`58YFJxYnW!~TNSS(---D!#h0R-RE+iEr3sU?BO{G4a_r#ra~ zAeuJCG`S~`147pjA1J*Xt{`f8k=WV6z~{p-aBuI}FfgxIV&L@!nGZe**MdIW@IBVJGjG%J z)Zf))Eg{Xu-O*)Y z8mA?dmU3Fsq^&rvA)EfN9=F-f+D#F+qLZsDhIc?PXsL7#R`K{bw~Gt7s9Bsn>s9CP zSc(k{z^rniVE(EJRW6$WwZ9(8;p$vj6uHp75V-QgY$Xeut-ErME}b$@!Pc(jP|zu2 zf2YLpgSBc_NY)V7t;IOl_W_GlF1xYr`}(?d@pS_oT>q6<9#ZH1+0V2E?O3p+Q&OI3 z#hCh_JyMCThPsu^Swh@!zF8yaNn3lQ`c}l0P8mDow2O1`vDgFA=YdDP9(A;38TRrf zdSgdX4G-z>mSRGqc3!XyvyeV)4|+w@ap2jb>Jds>aK7LXJ<>2kKBi_k ztb&MOGr;QM)(bVn1tsFO2v-xD)2C(?@JU~-tjyw6#7hw%x`v)g?BI{pgck91L{_P< zL1uk5b_ZNZvmWJHi53`f%>nK!eiE$`8_{@CkWuOM$XBCsO?8jIL3JvM4~d=5R#YHF zIpnnN{pDaI3@FLG0^j3%eh^n{a)u7UPHdz$n_D>kW^eAdx?NY6=I?DU2c~}!f zQLW*VBxznz8I3i{QvL?@EK~CoTuX$dw5WNxxTce76klWbX&?64O0$~PRKgcF68wB| zHZI;v%PTZ{D=yuO@&h?Y?(yM*ZG1tnnFBi+c!CULVd=!sNY4B15w5*rMB&;d&YEyd zkC}z*N^#bO>%a7yh3mk_Uskxj@v$LXcYgdOh3f+!|DO@A=)oLC>lawu!QkG?`9Wpkv{u?J_yN`Z(87oEXvTuTnEgH6Um{d7qf{bvIg&KIs z#zS_=2)YZ{+p{N}bZ61wwJ9#-or^xVJ%W%WCNMX8Ez!e@M>&!A34! zcDvHdFGZSPA(Q!)cNmnb%;{Uit+1I(h4}l!TG_Rj_%46+AWCG~VJQ)snBNSE-~&-X zB81F*Pu3tkEBHs?Qw*Pw<$?kRkN{DR0BHqESP#<52$<8W?Zx^3XD0s71pmMBHuyip z71b3~*{el0ikcuXy+5SD0`)QrBOeo0Wa}h#)BkxP5dLgYBELD90U!amw^*~9w?&RL z7(7-N^r?9jsI6gb#djYOV?BIoH6rT}c8N91*ln63E+i#C%}(T}>_mS0Y&buCGMu0M z!}+OcWPU0Qje(!u?jDVwE)1Pje%f`=;HMW3rr@VvA3QVsq!|2EjtvZJ22^Q*tDzR3 zn#>F%th_9dZBv4_4HFRY>Qm(CA!c z16S>S)5KLOb5$?6>I8Gu5w6TtFO{bs_D%h`xvDWGS2Yt!HU#To5!8s+17qf^TDS79rd#4*ifixI>~XR^MMsC+_s}GI4-7_oJ&3U(CQo4IKf;~IWpP@A^<4PKFs{`mbwx;8*f?I-GcDr&aXSaWL~$_TW!Xoo7;e3o z2}j>Qjthm>b1SNb8A|0a+w?w|Y;3%7bz`WS;DG3F=T{XjLWw+u8*Y88Y52Cn<*H?>@h+S!V@(z*0-i>&k6X$Z4)(0R&**oUmiS%-{M~!K&96HwsOZr?zGlm z7&uQ(9A*>!le{86@Zdxl01@c$H*+gcv>nyY7~)(4#i+&HU|0)&Ar0HC18Xy`^x=>r zN>tL!fgBFF4(gP>un-!Qy{vrS4IV;=MGv1v`^;z2{?ptW4hO2FS;KxG&tb<#vegtl zqrIXFZ8pxilr@xJH*HKfTcngg{2pypp*Qgu?x$TJW9b>e2WUu`Oe5;AOx9OSK*M#9( zJ?Ly3zEx|!nJQ;3UFEDUSyCjXE5%pa#r!P>xoo!#)b`ln_ zzYWqDt~7C=O>;UaqM=Q5Iz>foo9=X~L&i`o{RhLd0j)mSCJsEknq%E|C_S|6<5Owf z>|QLLG5UG-Tm%?t49l)RANUk5t0)_rK7?ijdpK~7asS96elwA2z;yt=+Mrh^fzyw- z9h7B&;qOvk2Mh=T@<^lAgO;ZRte3bAow;J{A2ZwSP3-o<=3RAd*IS1={q>8?)_kix zfvAuB%h;~Rn82xp&iAXtZ^SF#0^1Ejhj&YD3{at}clwP4G;!QV_MYoQm?!UZoB zUvMvW9l=IOb*{eLJ`^vTifD(3>dEVFDWihk6Y=hZdwuK8AF4KHv z^!~Rc`pSZx091389S=BZReWVv*?%nCqNmj^yG?u^;~R}J|UX~-A5#;GAYgNYjQ*7v@+hP>##@oLBif@e%a z_8&bH8nQhwk`~$%_&PP@f)QbJ1ioDwGWHKiG~_!isWjvpEry2N*!{#bCkk8bBxFhQ^`$xZpVe%Lyu ziBc|BfS(gq%*TpjS#~jQO1Ad!c~b&VgrAc`u^0f18I)4onG&9)%cKNqIes1ivQoe^ zKJb2y+ee$X0uzmC@+C_^kuF+6&pJ+{r6z4&!wD1G6O4;ljH?*W!FW#M_$-XiN*s4!+>tmw9plpz$L$!mCyr0S z_>{!)Y>a0oj%Q*#GjZI8aa-be2F5cI#?yknjd~B+FnuENM+b zyKI%kW|rWbs6xDiLcESbyqL7WLV7ZFA=Y1U-KU=tJaf zrn^jHxIi5f{KWOTRL?a;yMyIB{s4#q`4E;!=@=gZe+o$K;8tT2yD@Q&u~@!TAqCpw z$O3iTzq@D&YNX9F*P!DXX*P4oQ*+rmSqNPI`q4S&f~ah`BbAyWy@=<{E5B-=ErKq_jE~X73dlj`l?I`3Z+ueu6Pu`JrLR?X9NUr$1mW>K^ z(@iwp)p4k|d~EM4^sw@2f&U@`a4T2{|! ztNbwKD&5Z{TjiJLRodP*uktg0S0%h&D$*G_!Kq|U-+t$Dtn-9B8Yuothtqb4S@zS$ zsikW5ASdb`=43$Bz;b57z0|mQaMI=)d9o*ur2LXtud0TF=OhDR6L%2^Ppm(sK6o-v zzNf=^YV%-x`jaRe={Jb_3?@tVC3R|2S)a6YIL9ly`MIeoA#wC&HjB) zxK9OpR9ciVLR+l{+DslG7>6Pt=$qI7a1(n}XxB(f9|bHe;R^`;UnO4Kes9KW#-7pf zdhD$;gV&F)ONQ6U+s`Uqm&ApqE(XsxRT|GWYFXO@@ec7bxKcB+@j`sd$~5syS(1}0 z?I&*&zgTNEz0!havzwpf`pzNlQyNnSiYk;Dh^Vn>?t$6MA%SMs$dbw8mR_@r$+?UW z)7YcS7_NfM#hYc98${)3LGo%I<}s={FQ3WsKa$N^z4Axlc#}&!1IB7oOX?C-0N0|Z ziz+HE12{6gC?*#7#K932)c_}t!4U+GYR%<`;D*v~{1-dH(c4gj_wX~f8a1uc+_%d` z*gscPA$SqZLn5XPStR#zTEu2r#1UWQHq#=JvVb+aEWW~$-~hL?UVXWF?suZ*xjmSB zXEZ*yJwA6|ZG3KQ*%10xg<@O5o}vnyLk6L7A?wyMS+F|yF8<$Y(+XKP$#H9GnO4Yp zc20>bWZlxOy~0eoT0$1E+RfdGxmA&PxE2D|rn+L0f25aC#*u09M!xH&iX!6*n!r?v z>^!|JRb--?WkdTyr){hx{nmspOVU4|HUden>o$|Lhb|R8UE?R|2OLRA+V#d5NZK;p zOw#TyGes28Z{nV=q$GWG_c%y;!*ny*EgrH+6BfzqN=nj??H-+^vuBv+KHh1byOe$t z{?4Q%J#+U6B)wzCmm=wb<6|S~-@QH>Nw0r>oFq-6yo*%K=&7#;G5;MDs z5y2*rCcDTE-I~fn9@;c)8kq3N?L(^+WYnZZ(tqID>~)i775iQT*kodF8c1_b<|aUy z=+setQg#}8+~Y3RTgomGr$5!I8CMma=BBRDvBS2-RfU5|ejuw=dsvCmm*SJU$smoq z_>IBHYkx|Pk(-lZR za;AnglPb%&#%_*r3`M$PtA&j{RFBct+RpWOLLA^Gi&9+4=OHwUY~M$FI*q>aWG7=% z3#GbRn1#L*L{w{LSuFjExe>J1O&UT@o$nwR;`Id^n`mll;~lVZ*yP8hKhP zx2sJ!D`4GX`g*RULhg=02JdY2RgOFdNsl!vHr!^(uCSvK$E$i|Bti>hh`7ofOSQ%X z#X{uO$Vf)7CTz7p)2;u?T8v{GoArL8h3?BFPEiek>_#(A!!Mj391MEZ*&Itrs>3Tn zuV@MSLNvT$(b}W*A|~D^b1G&1)LxzG;*;2~N{D z@L#dz7BUR-u108k|K%3FE6LdCw&;~JMYr2#eF3_yyW-5!?Z!WjgKl%IUp?J=iimDY z>5mvVHY(k&fBB2iEsVoa=r-qNgQ(7bd0cdR!SKmb(e2)y#uC*#Q`7A%(CwNT2Hj>? z7Tf)zq|!Y-Fqz){WrzUCmYIu)0D9ZvxGW}lv0sp%)$%gY>BmPF^K=3m@ zGB1v*6KSSqrP1%D4Kryibwv!hPo!Ab(AJA;Sl4E}3MKAEbWJ+F|H*{keRNYJVwwY1 z1$}c`WK;)tNuu}dj6LpkZ@`J~);c6B%UllK$Mx-h93}oZOO|X}Z?m9URwk`l=bnhm zybX8E8T`@Q^|nV*BG4bn<%oOQXUWJXBiiK-aS4Zp!F@)n{Ak{K+rxjs+;?kp(@$iF zsh$ZQqA%4oCBgVoUDJbM!@WD`&Kz{x#3j?g45Dm^I=>`f%{Sy%;i_}Q{lArcwKA)S zZj~asaiyiAdMY}gPP15AhEDnDn5Mia<&@XyQ^MK&ro|F{jNq?yotopB>6_g$0R~?IJQ*7NGB8!WTb0KbLCev4|nZ5}0%*}o7 zi}kwOc-5O^W1PqXa`9F7RxQ$u$J>Wnk8?Wc#{XsSS)iM$vQ27#O0ZFjX0(p=gMuic z$WKNrpMV0dYmSNq9#$N<8w38(2>9LOqFb`OVR;FK%z1VVse!q( z04(oBGeCCHE)E+)qlcBec89?LHGtvCx|0p1jb<3Zn`p;KUKm1cj9N0vprRo(Glz|i z3eGwsR0DX84gUUOv^s`9AR#8Ryup11tz`~dPo)8@RCUsu=)&PqFgW!R4q(g8JyByW zLUkLg#@Jxda%iHF?=0^#W&y9Ar$+gFZ0ED7fg`{G;R(jFFzqoc(9BtEO@~q&7P?J= z9{h-gGqmutt_L-LcI`QC^!;!CgKYG%p+Cn)zt<6WqaVG18~x_#^WSJJd5Tw!p<=62 z_s*(%FmwvjU6!u)+c|a}pu30yNpn#t7ReAwYYOV;3WEw&VQ}4lm|`k3r+NNjR-Ss& zF@66`VII23Mm%o3j1Gx4_y}9_9^ZQH@m0ZmriJu&j-W z(pIPn7Nug0ZnMX3EB6&8R1C}4%i^O+79WJN__qE+7Vi^1q;2ep9-g`U+~@%rsXo!e zybV7oJ)kTXhaNiCUkG}5?Y!vWmI&YOg7pS^co#^a;C`fsE%m&?l=X4xVc7a}r-w|W zhd@7_PLWrOCf^0UWeeBv;2&`fl}vOZSS8K#mYGL*DM4H>x7*3`w| z6{sHqc9 zyAus~D1Bxf;^spex3j2jlgoZ#s^9p9o zV;I)HOdy6HSmcToti^Jk1xsddfp>o$pVG>`2yn`e>-1B;L3NXD6g*N}hNSSGmecHF)%} z$=|O1xi)#_+8?>eqaRW>dGe z39WN;U8>3^HIU2)R-!h$88y7zP=*A@3_sD;>X{bsbDJW&?ltH%NpH^_g+7x0f2_aT_%$;ph*%I~^pL?oYHIUK;te^3P zxHINW*Uva#oAHB(_>A}AjGPA*;>HywuH4gpLDSQ|!X>aC0Mg^nQAgCv`P@mBmGIIj zKjBXLwDCbK65#N$f4N71>DZFPR>PNq3^+<&hkuv7!_1z7AK*g*TMKfW^V4DY$nMK= zvM-kqRl*l&>SSvlgpTqR)RDdNAYuUxUv{#EooI{qI$nlRE@E%Z#U-$BfdO7o2G~La z%z{2%yB|O*V^b#K=Q8M#=wt_P)?xEnc`w*hjnQHA{+JkSHjNS3)SWeAlVZf?bB7L_ zT@DQ!s{xz(IVv{078|gsaA?>ppQyvetjA_gSufZeAFadYo6#}Yd^uWR)6rqXX3Sy( zHXYM-*tAdAu&L|NW0S05)BdCZn@^@|*qoW5!)C!i9X6*+d%7zpfX(Q96`Na)*nBrl!{&B9HXCo!VRKt=*vuNG!)EHJ7;MIj64=Z= zW5j0TA_F#ar|GcCo2FrN_ZdAlg)SAFyhR3VuG3+&SdUGL9-I8ty#!MfW9%*WA#67p zHQ*K{q}XJfH;OViT8fZ8h!Rt+^_g+DuYG3V8(8bO=Cty=(laa5*mF-eJ?Hhkr`Bm{ zES&ESQu?GT-S$R%Z}WE7p}+m9hc1nzD>f@&FOsRu1SSzm#zL#UBDXvMgT{3ku-s`{ z6&}CT5_aNW{E4^|WFRuxl`o+fDc_u%tFOjzGZ(JHzhoU=;B%AxO~&WRob6C-VG(4Z zoP}7zsbCLmG&|7QJc0W^{-utNs9c4K z7rqTA;Xg;kA{-Hm5NyJ9r~*H^0;@nPT6jk*y2Qk>I~iip)Q5`cXHb_Qj4sxp&ve!y zH1KK)DavxNKXBAm4HLoqJa3a0fEan3!Dje-jVO4+))*I>C zkV_Y0)$`C3`O0^#Xc`wf!i_YtHr^a&@K70rI(VA-CutFO3X?r&=&s9h*Nw ziy~`9J_WAHb`fHR;=j~_!oAmCN0DU@r-*nTCL+tMsJz?%7*;r|Bc(tl+weSv%yl#0 zk#m~wXmY&y-ZRX1n{K|JoM1nWN3de6x~YyCNP0t4&=xHK@Nq;BuO3EQXowm&ZsF?1 z@wV`R=g43$v;a2<|F&b$Q>*hO%G--L4||(9!aoPlRdIoSrTn~shDp)iWtOKTb?nLC zrARHIzmiqVhV=@E!MGLf4yg%^=_ib)cGkaKXz#J*(K7KRXBKOu07gK$zpQZ?-o%j= zc+a&$XBaQmjE^pFyNB#5^_ewupRonv>rF#LT#1=muDWpu81XJu4Po9gs_Hf;zjw@X z%6hOjt;B>{;IeFV_OWISm}fOaYTtUTsWB}of z6~ZOjCt)6JCDA?l;%3(=nrL2%@Z!J`bn$8o-x!b-<|p(6)#l|4XKpT54Q`aE+8=rS zs*3F56zsYuJRl&kX)Ac~fKyb228|?~b&y?kocu2hBr$SWuY-#z{0qvNtco_khX-ph zJD01@U(FMJRRvE=JP8?Qvj3d75o z&gui%J+3Faf+Rt9%|^0wCJFs0z8B$<&`#uG9(Jt7jmjHNjnGOwypJBrdQuH!*JFI{ ztHnsAn*;WGR)!z5-`7!VJ=YbGI)GSTC0v2XwJn^%0~J_aF)9!0>GhndC=f8)eIR=u zPj$bRa-Jp?r%NIQJx>dWfQ0)6>@wvtto0u!AR|D2*;}#1w2!KOWp9vL0^f1bGnG?t z_%hn79rFB$R4e=Z2k?9}!w#ZN)=s}sd3UV*tvf4Zq`V&|y-0TNp!KU~#mkY7n~{#U zkCz?qgUlHW$v{yNu?hh@0J9ms)C7Ov_2l;g@a_$P>}J^^2dh+i-c0l?H6m&}^Z!<& z>skYQ3O*%Vtp1~c|cFSmm~Emq&u5Pm)r=Y+Cmc$?&g9=rVs$^zaP;EcLUId z4VR+74FsQ_$#-``eDZxKAujp4BR!Dsqmk~&_wZ7ce0MHAU-I3!^dFggKl$B1$+vs* zz3<^ZlCPGcO7YU%n5=VT@ACACl%AfBsu7Xpai1RAX?SWih>EG24@kP2KEtB;1d*JS z=2>JGg}Soa)NIFTo?LVR6#PS^h-d!Rxd>9iX-ZuoLP3gRy$Fl5Mck%HVlhdt^e7%l z@$_v0$q7e=%|V+G$p1W&(+ejVe|=ytp{B%ly4xrWg6ImlmRv!~X=`3Yp}9rNZcVjac^iM7YkKG+Zmka27QDeZ$~ zv57ZyMx*eJ?zlE|QdaM>)J}8-YJ(^F!bJy3mvUn19J}&xcXX?rRD65Xoe?+!0H9Nr z4v3G)<2n^}RQ)q&lLri@L$KQd{$Lfx-t%eya8@+4gaZL`LWHH^oSks;szxxYc>Z;eSUH}*L`|N3OYj)L!pDB9fw)n;es3q%u9iLUw8<3mP(%)}#vu$=mapBS zY;f@)+<@RIb%Z;*j*tQ)gwv#|U{&xGa)>Mye#n_cUY4KvoI-EIKE=pgCkU4;hmH9Y z_wZu)y)lOk{*&;!%AAw0xU07kmVqQcRk~r@o}yy8eLQTXk#yV_R~y+c$N`so-+GoN zpz&F;i@mp-Y(QncwDP&Un3AH~FsGGGH#B`-YdV60yk9o@_>C{Vr*H()zVOAGG$R4u zb8u0^s2VxK*3!F>h#>;-hE94BbG{WB*{VNw?ANKxYAWAf8 zDisjQH)%MC`o@pZ#p%LwtY2{WZT_z@9kcjabd1tT1MtZ_tF!_1Aq0Ax3VNIBE`8Z) z<=BVk#lS5cj0bnkL4rFpG0JaJKh>Xt%SxPkI6agOJw0yFf@04@ExAu(aIRbM>>`Hq z$>DRhx@~D?8+a-Ky5i7YFOvTA&U^lM)soPby&(dTXCD~PIh zax@ilj>WO7o!r$SoqJ;ko19$=OAV$eGVr7SWAPdc9Pz-JleDXBOFPyxfE5#w$MMme z*9FaWeM55%Ic?uWJSbu+pcd$sA6C!}-+O*I-deBX*!EF(I37Kz!?AFBWyXL6D|6MeQpG((oj-g+=)-UTW>^D*L zTY~)t>iXSd=y&WbwO{iv?ALFh5|P}+Q{uU5al;i2gljVy&t5(}lgJUm>`q~Gpl>*d zJ-m>oc!|0#fn=j+!!pc}nHA{X8{>xF<~Xx}=4e^a^BkjSjzUZ2Cc@u^p$}e1BsyTL zGr{_1*^~sUr~g9uh$~THZ;cLE246gmR43bWhsoq=EkEI&>~Z5i_tC<&I^o84;)y72 zj~4yPQ_@;d!e)_=RO@W$kdo_JlIS@Q9k2n8jptzWFm$dMf%DL;n{o7AtT@kPs$nT3 zaBEU44mY*m}^mK;D3fS1tp|~6f+86?@ zm4lWY7qp6dw;15s&5q7SxE9~7gX@RcF>r0AHCy702iPk^y92CWe89Hl|NdWQ#eevF zCo90Z2^W6HY}g%*?UqdZGy8RwS=a^Ov8OyLuxAQ zjrS%(gEa3X_`7aM^`Z9CV`i_#K|Uo892rwt+UsS@&dLcUYXjzLtp{;AlmbJXs4=IN ze`CUELg?ygJ=D3Xa}|7DQMl?1=80M{FOc|m?1l+469bqr3Z;CKp#e&EWdU$X6QfQ{ zm8s`{yp!>;pR6lwG^_3{jbdsC{10N|J`jOJrNmGvqwV<(@?9-BS`Mpj>5O{q-@%uC zGugPm@Y`%6-1yspO8PX_4%x9onQD8;MoiuJP7;EwP8gHRhoLB}Tjka*W@`=qiykZ} zQ-i$Iu8QvIT$hW?`0e=03e_O1)gpEu_>@=8To@z&9j;mcAo*SbFh?n zLS+DD7a7rH60QCh<1OLl<771Ph~Pc8!@wY6=mZ=xII^6xBI~J5<{_>)FG4~4PECTl zPyw@@Tk??4dh`ios;%;(?Fb?z%8^~Xs7mqm^ft+pw1tG3w~G`*p+gidFSARYK+TPe zYLX`TUP@J@3~&_-X%;H6sj z)_jCuJ1m9l%YmgprS}fLJi1lM>OekkJJzDfF!13*XjsV)Io~1naSmKZL>Z~hf|XZe z7*`^Oamioe+h!mBSD004y%K)9W>)NF@KyZF;A8z|a4iB&=dhP6Vb3Q&>y=P2uFqG( zh44S+MlnS_&R`8&@4|Lk3VR1`;^ z@0n?5TAhS3jygsXCkUPi=%Kg~1api!gG2`hSO$#&j2h3!+uKt!E&4VjrCAP$DzHMt_NIlMIlHpIE_x4L_NFpRqE zyZ??qnC`A`eScNer@mEPRUKZ-fMr<1@6}R-lLX#xC-6SAHx46)KGQIwqmthJdL+WY zz@iFnpC%ZO_|+hxBP_NjL&8>l93;H;8bSj5|9=7rbN2qJkRbf03JL8gAi={Rq5VdX zU<_jGYL7vJbB_uM?WrK4^gm+01j+5wA>qr%ZUPCyGzKXk;rH(4Y(E}oz82!QM0g;#4RB||L3l5IDa5hRED5>*?$Jp{H zWj0ySC125H)|mQhVc){K%r@5@W!w)$wP{ne$z{(u^+3zwYW5{}c;3REA5}KvL5}_) zn?JgW11a@nFLhwQpPi38_|fHGv(x{xIQAJbWsje#NU!?aI1< zhPFk61IR}6RE$Yq0+UW;vQl$|jc$hVk<6sjIGT4!su?S&G5fOR27Abq4-p4B7b>w7 z!v!1k?tqbL$Y{menyHX;3>PC-;vzP3o^HT+sKi2w`Os#QB?t4u-4OK-QC+bu3O+hotID@-?l_`EWS*+!4@*vnK~);ASPv9p&}+dCWbOG zCaF%4>df%OaGv_t0DXx5*H#d0hM|6-9R+{26$RG|A=5UevR6yKj!eDC4qji;o|>!;X+K6vTq!%S_^V$&kf;@OAew8*=($=_(p2K5n3vHT@bEgVp0 zamb}8r?`zBt3rB9XqoBHr+!iW?Yy zbeU@Ss7v!dQ-e!V!+aAp26eZg#?=f}jmKR{{uf;{`Jd^L!v7UI)&EQnml+jN{m-;W ztp83`Vm%*Nho;X$c>`!zqfkwc%;LC^p;r|nmzX{BRx`D;r#39iU}%#$6E5gKI~kfW z{6#$&1h1Qg6AYlk735Hs9+6xY$z_#XHqsL^+LZc%*%=<6jsMg15uNe+e9EKbscHH= z&z|;b`dmbP?w(1XAM-cTr&rPE;tlorWOKYeUM5+vChEhnr-2GppSSV+^b?zB&}Y#N z^$95DhuN7v>f-C^J6zaK2dCLzDnulQnRE>idWC6ge{iL~ z>@cv;F;$Ijz?q*TZVb&Lg9C9(VgB$hY<>TrOThL0RafZsa;&fXC0D|yE3bv!*TRpF z(ubu$MFb08Fa7qAjyM%#G(t`_~25W`d zKF!F?n;BU(dJ{Xtik+L2?aXLLJ5eF9lP;do$kDKSH2nA#`rjT1Qy)xe-fuiH^TO&I z+DnP%fpMMF%p+AoWi_DKBBF-|Pad`8H(&D2CkAi)tCsg7AJ&pk-L9JH^&J=}^^_h4 zJP8^1K@BB6wKZ{pmYQC{(htVc{g3e+>GD4&a3KeO6rP84m>NvM|1OsQWQzPRv-~HM z@`rQ?exxf|pKcEM^~U(A8>f}9(0|lwIjkp~hxWwNCAIfB`x)I-BEMK0en=OW9Al#d zqN0E$&^Q$`VGEbH^H2vQ9UWebwLy?d1mZGFC1wK69!3YDx)=kLPp--?K4jdC;9%$c zyLpc7bL-?ibZ;5!GGYp87CI+jEQBxCWUrPrgY)m_}PO zdssSR-i-cO#oVPPC-_6npTY89ylMF?kEdhtIv;ozA#KlJD7`cGyRRe}=x)a!Vwg;3H}|DA zj{^nkT@ty6evrCJ7fuobFKuv?{}9GADib5gf-LaabXyG2*c#;^#DC7J4{hp@*P^9tp(QPNC@QBM1HoN_F24;Fj}v zf3UV#Z^?THU*hk>l{_3;t40b}w0^(LLfe<)8w0h9d+1IaixT&>eG(plz`+17s8zdL z5zw0~pfXXw#KX5GK<&%p1^iD|f`FG&2w=s{dg*U2V8tED@<8oWYmsi0J!8CbIzPt7URreDTo_fWMY zQtJCE61h*DYHGZ~r5$f!b1m4yo&ARMp2&i2P&gj8vLW+wB(hU>_meSdL`BkYueejd zLv%Nz%}14MIspiuw~C{2HHolQ8H)+=MJ%)mah?D+a5 z%l`fkw`SSHgHg+JuKzKXg)(k#*$=H?8Q*(D%bvC>mi_i?X4%RDW*L+#Zc$ab-9~G5 zrI*&hNG|P{&UC|dBHFy3YWfHc?VMIpWy!v(S9<4&ZqQ+y>9g&F4xzv6a$oqErN3`* zr@Gc8HTghi_%UHda?8xzZT(mK-vuG~`SZb@$_jMc(+zFS3T+U)pPSg&q5%bKA_DQ6 z4evuhoc7ZTQWe{)^+EL5dfX1BXobt{je{O-#ofJ9nGhSYfb@uNU?^M)Dh`%gKquwA zo56Bhvc}<3`(U~Ka^L02sBOf-lX-@2Fj3nK-apHNtD@NsY|u*C-=CnHa+r1an1F6l ztgab;&cb?}n&C;f@XCkm01*o(Fv_s9slHmJ<0m?Qmn~H%9Umbep`Z{>uqePW5$)fu zC4>_4p#vamN3yJF`~}{oe~IbJsJ=Ea+EvnhH%Wiy2I<+QQ^QyBP{6o*qb~7&$+Y_u ziTD2D6~jN$9{GpO!>5wd|1CbfH97sA`1C+rI^OqByPueLk8j_#dj-Q4Z$=iNE61IJ z&NruiiGl#{vAvvX{-;Y!Tgb zHB|a}Gts}2n*Pq2=#QkPpBuB!?xpS-2ovdx66o)_Dg9q1(Eo1DP3S|$9hF#rJhA!% z;r1IdWK;QnJI3GUrSi@A$HjL)E9N-Nxf8OQy|iLlR-ngJrjzPP-b-`VBvt_8G~-7x zxn&TNd8V?v{hd?(iKPv^G_NKY$$GS$#f3=Gm*y>WTN;)S(M&!hPRskBWp@{jHO!Uy zHHE#0O{8nY08eX`6$e#TcG5?WF-vyo#=Gr4zK6>$9`Cl`#j@fPZp;1h$WutaiD4udWv-%>8S&y>URfT!4T!u)fmBMqSZAv=zAsSNvY8)ik+7)RTz{Z)Cwq@SNm4RljP-e#se#Ea0#^bH*24~3eDWehS!9e2=H0E5*iPTNhGSiF&?-ASJ<)F4-Ht(cJ3ChK6uyBI zJ6fEl3&l3y5_n9vLhy?UoahA=nCupF?ud~%Pk}N1#)}qN?1aZ+Cph$J-x^ZWalG@X z6=}chCBf`aVeb?7M8wnC17J$Mu9E%`hMY}L>A0tKE+iK(lA1IA?_@$OL?!3xC+9zH z^nb_B9hpm>uosRU;j_os%fU_7tWI3EHc^~*+z%euz`>obfYb}+}Cs4)?+J9YL;Zi`f9WvW+uD#}M5QT?envPP&@7U#DrEY<-F@M0Eg<9R|EC1L{ChGvPq`OW0Ci{|*} ztFOLF97UNKMS5Qjj-D;eANZCLi1qS`BVT%`@WNp|>CO&GE}l4Y36~u*bW1K_Bx~GJ z#EVObBc0%wZ6QR>q%?06Fc4<|?^ z?$J$^UXLP|&K-ufF*cL9w3UiiVQ7MmEj(m?!b}_j;oD%ecrPf~*>udx`K)wr4#yQ9 zG(Q1FhpCtn1zxv6y4Whgt|9$+mtHjZd*N5`!LKmh#pAEGek+EiWx7@bQ(d;BRlwh6 z4$^3m%XO6}mNtF0&BwPa9+}&tE8QQ`Ke*Q4ttY+D{F=Ly<8}cy@`#she?77guEA>G ztP>@=k=dO?hVcuTcJq!FJs@SzMjZ!eS?R4NlX3|!m6+uQL29tbbyle^U9PrC)pmJ% zrnEgrE?FU!1Wq*xgq2L9)M~9h6|1(uF*y< z#S%+tVkvvI6e}|H2|X_JsRkkvU^#lT_cS|;Y8HaTdqK)8J z+Fp~yR>|Gbd|fb|On3H^fXY$N9qa6a&>D9*P* zH$ijRN9HTB0nh(91v~9TqD%S zAbSJgbpa&Pe^|_6k2SJ93BUtU0RN383BX$!cn1*1|2P28N1y|P;*NyfoB&UiOUt=2h z_X2+JW%ymK;jrU!g60IoqvGZhn0C4JX$o>1myEF7sHuap&)U z$IE9JNYl+R62x0NKt4IfZkIPO3i2i01S1z#_-XEDmTsfpH9j@mQmpR)~5y_>( z>(?U?HM`7teUd<2>G&@P|5@;#9slJ@f`5O}b*U`BHVDikM*#Om@#W|di*{jEE^t$K zkj<`J{S%hMbBjHZ2k9$qD51y5;gTeEIZ|!#Kr5gieFhIiKj6VHls<`<)uaG=wfStq z)0{l9iEng{TA`k@*B%uBk-$rvgsH^PgiMkWE7@+z{$KXK2P~>G?fc9hV9e1O6?7C- zR4f%M1T0)M26Yr&u~7uU#1UZ>2!R)7{8N(JKsg>Mx7_!&UANUQ+qKPn-Es}fuLj2z zC`u~V#nhH}m#ss?wo{T~k>|bd=Q%TT=0Cgde%JMV*Y$C6KWEN!-}mpipZ|Z(d7jgp z-nn(AWoNXyQ9hPDF9MCIyv9_8>K&aj2WFymsbpu<1^EPqZCU3~@i6+on8TX~a=64d z?(l|3+UP17o2$rctdZ=M&abzbZzo<}={lxvohV?Xcfd3J0>(GpAn-hn@d=i`0!M<~ zA`TXpl*oC4PegCM+ww%TI*0uXkS3t{t6_xpgqnLhMSx0&`t*oHNnTu zz8Qu(ZKm<%65i}=T->SW{&a=sVem7Q&5CcVjt4_zUqdtVP=dRe35q|@g@0yB2z>d~ zK=_|c36Fn$ihy4b3P0gmAp9o~zg*E&h(HJ61~r{f;wG5~*vIklzyf^A5RLm6yq*vw zVJB(Dy_lvE&_Qm4(k*a51-Anq@pDD>f^*E?x7vR%XeC)b_JYZdbk6gL!AOItv zfFRI$L&M0pcxo|^d`sSrgDteqt127(*4y@GD!sh$?8kq@Et1^iKjHx;3ZEqd!dD<) z>fw{Ei{v9XxO>s{wYlHK3LACt^r?u$YZ$eg^=vF|HNfr1f;N=u>k*m)M%d}Rp_=Xk z>7=g&1DEP;JFpt%^M_x0P$40R?p3vNIrFN3U>|V(Np+yR&(j!;A2y1pb4oK=?xMYK& zzplfo>uS+;V<)#q*THU$ZX(MpPGO{*?lebP-sf4nBa$^}*u`;qFcm)e${qf?E)I$U zwhiZZ-ythN57AR$`q#LQzQ4Co*Xr85H4Q71uJz2}rzFe^pom}HEVKQGnPFbIb(_sT zFKcbgOupWUS(V+v9lMKR_^ptK9C+~Nb}spEBo)|JOI;g!Cz^6=WU>5~DTP@b=K~EZ*V0 zGcHhgtxn(YK96;USH7&tPU?Rgmd_?AoO^W|2-8}4?AGFFMvF!BlzE!!v@VXF1+b^s z6#67Dmx?WQ3 z3p;n!#etTModBg?inAq?n*DK{yZ6nBa|~yHtnu7yF6d680$pa+bI*Lk^Ef420qvY+ zCro8yU@E&4rn1A>t?}t*Zc9X>nIn_g#qP_zP)tz#e|5JKgdFG-xEdQt2!lr@(sP?hrT;(5r zxXLCSZw&JvZ$#sG;`I;3;|cGvGn=VSyo^KLbvV{tNru5nsAflAxkJ8tO<&Evg$1K` zES+qUS3#0+S#fS247s?CpVC7_cGWo<(bhX_eq6 zoJC2z!V}Rw<9ams5xiEKthA_pl*i7GW0PPop3cr!rYCJl#YbDWdDGAzE$2;WmB3RGQcGBLf61O)o0*NP8*8LTP=J^L4!>zE+(-L z3OlHlN4w?oqM=|K)(Nk5gi%v_)YNV@d?lBi*l^V}j=YJh_IeYy-e!_n?qLj%(8qFW z-&w9&bfP?<(bs&*mk-<}y;b#O8WycwCHr-vGyR*|2==lSZVIE*KQYJS9G<-hB_8MK zYFV?MTbMv*!fZ>=cL19B%_5jyrGHvGJMlf)`__j+7*$*fjj576ZY0huvz1U;V3sHQ zaD|>@2!_V-7HdyDhkfs27}+=zo9E~TR8QlkKo(|I&DOepOPv$vVKotK6qHS{T&C-X zf-E}+aRrS=`M7jl-#wpw-}2c+_+&~E|9@T4syiz4ck&{#X6I&%|_3bk3y za0|Yo-@VEqi};PpC3_@ z0!{9L@9;wb^t$$3y@e|+g=*BcSRpuYb?j%88;g%Olnu51v z$@!EvYnH;ArL<*Klt=cK(EKjSIS(m3_lTxT* zp@B%V>NqSkeXK6pn(MT1j49W`Nlc@_Q8ip9g{SWYwx2!c$Ykx7F4Lj}U8<`8B~IbnDC>D-H9P*2!l&3|b;?mXH7R<1FU-m9-@3g< zp5DK;mdzbtTZZcTO(PLi-te($J>Zyd*=IGYbIMx4KIDu7j*88TgC;tcOY9(R912ba z!ATg&iMc+IpF-2PH z`We%@o9Adk`VQGe@Dry5Btw3#j=Z&;QGWU=v*FlbuN2#iak=tXk4=NS+`+3*BMwl z+5J$iZc2`WagKZh?#Y3~Va*C_q;-*6Sl-s~oh2Uj?d<>a$~%Wy3e#)xY%?OIjz&=K)NGt28R?8pAFm5-ULCue%? z?c~1wgy;9#Ig`qjQttAsWtvxN>2I*@NZ*Oed$ZF!cMjblPrn15;4SDhpp)ps?sMsQVf3R+yb}w3 zN#@MyNq{Ixg3{=i!AjWbIL}kJ0VpM{IJB6CWwnL3e8Q*PT(k$O@__7uRi^;!p4|-H z$_})yp_p_qPByo9U1EzN_7gHhY!4s5SKaYDLgN=NieHjn{N|AOrTD}TH&N)Z7c7hO ziC_{&FkX^GV+U0LMDbukQ6%wTwn?builW3!;`VRuxNQm)x2A22rTFxS42RS(5)r<3 z7e~%KDk#`!>6eJDFkeXgs2xq|vi{2&$=4eD?!*6t(P z{V)fOx|Kf)TW{0FuFyV0TTpJp9o`5zsk{ErS}|3l&IUo|>t`43}% z(ID;bjR{fyW5U?KHJtqiVuF_cn9%nBER6jcSNZp|f4Qgpd)q(zx=uJh`c|}vp>f2K zh;j6jL?n6|61|@YHfX%pqppxMm1zXui1vx#Qdb0Dh!#gM1$sf^{iMiS?T91shMusG z#Je9qqKC0RB!sMFy!)evq679vUt0tAN28+y^+$8|`t?UjcYl9<6b7Ia0vJX6dsk2Y z_+Ib6T9n^3&M%L`0F`3pev+*aZ2n&Fj#!+Z8sQ*lOn%R)aJQeu#Zyh8fBNJ2;tKhg zppt%iP=TSJeuljHR_JA*PyBuu86tj{tzqK#`;o!p_l7li{5;{;g$n;C;lkI22){CD z_@eT2XGFmAvovt|xjZ6J`FYM7qWt_~#Q!9HjU$4`Z)Aw_;~B4x9}#xEnz1+dc(q#{ zZoFFWQwMzGuMQFaws7^ws16?g2v&$cK9MkM=pw1Gj`{8lm{L^B`6NDOA9=q%uEDk) zC!1u+tWg(@?}xgj-{#3vd-@((ol;`Py_&5%O_^Bo0TevT!yL=sFhiVK=lo4Bn zH^YUS&kOCAIt9!bU+U!fo(u&;(^$c+fS(10Ui02Gr^qD%#8jf0lXjA4}EJ9ni(euPPJrNg+A!v~)YK*sjh^U>7FHRc|g!P^jMe zS{3{8#@)C1jGBp1+8%(P6STE)8*g3NxV!lw?UjM+ou6>{XuQpizwu*1TSqi}ynPs+ z=(fuoiJoot&hh#9CEzCXvkUdXy?@J2mg31lHo$?f$qb%zs0>v-3)^%eNa1*7N zCSBIL0h$_8^P}8l@Z(;CD#FdgkB?3zsYImb;M;F_#u7ESKy}(lPH?(qHFvurI2jk4 zFZf!<)^+pdE{>O>;jFovJR(JVA5);c%2IU-*!v}>Nu9ERH>>8%LrZt!bHXbOJ?`1U zOH6lJc&RCZEmS*Zfs=!8GjeShq7zJLxC!k5+)(dd{6Ka?Q15+i?{nV?-T-I52O6*` zIpl@u-++I^Rac363rlSd9@oK@<4QVjim>o9a~8#z?}0YWrd|t)5sm~lyO))$;FC(b zq~x3-q2XRmU&pqyMh@TfbETGlLXjuHvrE?E0T^ad5k}!7mJcb8t4O$>e33pNf_sI0 zdEFJ(i95V&Fh!Tp^VqDpvR2ibnbD0aG_nr(5&5Wdn(UkO%g|9~E7O-MYwlv_D`fMP z>8&+N%eiUkm(5?|#t%Dt&T%`ByR5)d1x)cJN3ia_a~{8apqD%3=^Z90mh5{@)o+JG zWpF0j*4a-=*4eq2sX`R@4dk~&!~Fy|t#CC%Xqzodw%NHy;pU{o4wbY6YlN);Qr^yO zAA-NlL}D}j9mSQv569m?m|KH&F_&sW)VO&h9P?O`3?!YhqxGI-%8jPyJagY*X8^qc zr=*4{OWUzWyN%vco?EN>Ow>-whr86d}aKSf*2-G zwgF-by}1kY&Z3^9=cI&m&%5TeVZAZQ@g6tbKv{{f+>w|!a8E@MEAECIo=hz%R*(z6Sv<5Y0uVI*tbMV=z+FW>KE?jnF`>odN7{IEK=+9FJ1B zItyZLw>qZi6oqzSM9>Q6Q18wx7&VAbSSnqMCoGi+?=%bV;C~A8rOQ%;ckm@(a(mCL zNV=zi;bRo!95EP%bmxy+9nc7M&_nwE#p=Trdv+!*Z z=A~Y8mA~Z7LZcNg4d8vlH5fLuM5JY5?T6QlPm8A4f?MfAgca~Iqv3HPPjAPu=L$aDHhv8gx~VJy7~mSI{Dk(A%% zN(5E?E9CrsCzE1>flf+WW8JkJ)&F8|JYtQ)U0Dw+QrfJmm8|X>bQ&;--+imnWUOFy z-0ZSS;}Y`ZvTOEYW2G6!n$SK{)yBeWRvD`su-3|l;$uwfj}_J&rFj9MOm$}Ps(H3Z z31gZvZ+)bcF~!ZR#!JjReWZ*r4F^xSM1iM_F^^#9DZp~eMTwRCa70oMHoLn#D&Lwt zFh|$7hTe+rBzP3QZbiNn6{Iv+VpQJZ7N9z?gz|uVy(+|mPf9eav}YKd9?tqpa3!# zz)gF+Dc#Zc`)I(dXMDx~11}iCc z>+Gd!u#u0M2BFa3T)h;V^d=4bwsXaw16-EeqDX)_2=@gq+)3fi-jskM?mC(yuB0$& z`CxjvjBJEzktej{7Y|4^Du-Syj(*G4+OQ9rL~G+7bbnfp?k{$s`@0?J{_kzzZru0N zqvX=Yj}6L)?xjrshot00b(QGzt?D&*E3J8i!3 z$Y|PrLD!>%QK|7f`& zPrp&q14RX8=c4S~LCChZmnNyCYGLLBlM}VIB0=2{xb?&*pSZh-yMwsfh-)HlHE}D5TT0v#aOt3G z1)WApc5K{Tr#K~LY^EXb<9G1Gyh?SVZ~*eb;>p zj8_zL;`tNmsaBA?=jmO%8t#TSJxlJI_LB`R5<}G@nN1He~+ZU$Y#U9mqW9ubGX^X;Xm7`fK98+ooN}tn}9$h0IQ5 zuJG5q`4upe5`dZNuXzQTdyqNCU-K+7yO89zMPxQ1a~Loi8}TUlo(E6kd%p1J5#E~w49tI&XJ%sle@EUz z?^$^Qug7|pl51zrd~)62^CNPt@40@`4{Yf!K+45^5laW)*N+kNcp_k03Ff7qizobb zj)LSQl+4G_4T5Cke+HJk<-peCUXYwENX`o)`2kAKM9iT?vX`HFl)Svd^}HQGh2%e~ zFyk@*Z+odYd-eoS!F1k)Dg$WG%eTEUp$V8vk+}yGV2?NRBrxwqW;`+Ha-yyXj;#q6BMZDSdf^02Q&Sq zLzviU!BO%~1c~Wc)Sipl-`xtTt#R4V5X6Qph|+&QpqvVh^6_f|%Hbd=Nr=*og|iY- zpnyH|n!jW(NJgUMNt9ejCF$XrfkdxzAi5S!8!LeRDM0Wj`CF81#iaQAO|MMv2j&4} z)?uC5;LIBi*84Pa$EXbeS-T@yfK ziJ);>(AXYC#(EsFk`U`m zpI_duLuNZR6^Fc;Sl%)KPe8E0k*UFa*$K?@nY`J_jryDMyn#B$dnW&eCe7*q zy4a;W=GLu7T@&h_xIrz8{Y_xY7NKM`wtV$BJnehHBG#u1%h6yJtmh$z&)fKMGR-Dl$j1P{1GJ$x$wmlr@iTl0~ zcqqGNv`4lngsfI{Uw%d6PA}J0EQ{1V-jgnU0U!Jb zAKW(wftwod!Ce?naaYBM#x30u9(SJz_mF^lJPdC4AaJ`yxP1byd{SuKg1Ydyb0>Ll zmrbI$Ys29FVGy_nM7T!<+*4t2bDju~+bhBy5O9@~L*srp2;7B}J-DkTQ{0j;xXX5i z$2}y%Jucv$4TIY<2;4pqu6zo`jh+%3SGOxX?y@N!+_eI3MHpP$AaIY0a8C)i=fmJW zv^zZRfCyKaKyl*|LgStp1n#N?4{nKoYYKz=;FIBTkBe~63b^fIa9U0g~6R(A0AhEw+A=wZi+ke?$EeL2Z386 z!ZiuF^RNrYQ3;2sEr`}!bo+eNt71l-;* zxYMlRaTBI{a8svK+=bJ_)Z;fB492U&hv#d{+tHM7kEU4fN7KjspG4EA_E)3n ze2I3)CWD{nFmQ{&l^76!L_jKJ53+PW}0V?}d6 z#l4|r*N^eYriPK7BTA3;VPwY%c0VviRM*4k>=dPoEsU&9So>%bt$p-{(fL4_Pk%a= zmK(gf=DG3rh1NY0>scqk>*-jJ?%FWAE}h3>J+>YUqqAGE(K?ov{htWe7sh(lY1+eJ zJU@(_C#It{EPFSE5GGT>*Hz-lcel0`Qn- z%5#NzIEl7f$3@{a1<>mfFfRq*bqJVA@6&4^;6%YSdX`k31hfpORlqNW-}OM4zuHW1 zteNP%E!gM7iwD|&R=^Kn!+yaA&3vg^DdI&VmW}e#=2Jbyk_!|s@SN|_>&aQv_71rA z0x6QE>V#x}+e_yMu)T?5bj|m-y;_J@(gJ_mlLQ;|0eICyywpP4>XWH`)o|ShbSyb= z+Ey>{w{O=OiqXHo-#$i2+f@q#lqbQyJAz+_aCO~}l1oGKugE>+za4(N{4a^*eitEC zUyAU?=@jvQt&pnUQb@hZ?%EVucExqY_4j7lSL;c7&6d(}l0XMjq)b+-l<5aLc|*cD zfs*kosHj2U-ubAIJ%$zX>ux>RTWW)B%*#4}2gekp4FCfz;mtAE*OJ z|Gk8<0Xc!<2LKP~Fi-~`hxmhp(E{xPvH>x701s$A&|#o1pm-kcfer&1ATB$Aeg^a= z&>w&}pznc3KOkk2fwFxpiy(A%uFC1(0ZU9K>t1mb4CJVGa1yg2quz= zVie2}<}PL^l&N9Na7M+bnGwuLW)#%X7-lpxhS4x%nQ=@k6UU5aCNLABTux#pGgFuZ zW-4+XvWFACd!k`$*T688PtVz_FG!MhxCqO^LT$1(SbHbN3vEv@NHN~VqGF>-i$WCv^U|;g1-!D_s~j5^)R;&Y zQ(~+znmjW8c2}A=8>-DF?Lzon<<5Qs3|?xefuRyLNWK7OwYjp`XQNd@y% z!T&4%?prHmmI7@Ass}mu@~=^voIve+~8P z5_oIjej`u~&|^TUKr?_21Air4_rrA$P!|0D3aMK4f8T^X8CdQ*Dwq#%`oKq4A=`Y z^MUmYa_AsmJoW*me*Qwhd=~jtW|N@`rd~yrn~XmEi4hslj24+Um6{ChK}M}hC<{WU zaM?@RQ@2@EojK~V*_#V`t&$AGVxy^ei%ILlpRXg`#}?BTW3iUR?|mw}v8Vz%8$W3X z3WWZP%9v`*K}DEfN;60YsfB?^K~YVmt9S57%#vhe%r7hP>i59lCCD>lO#QpXB@mw_ z7}r-ZuKxUN8ebKp^peUlZ{C7TJ@lk#?dEEjYHAJ30q74jCYvg%Hqc%gq(dp8Z4X9d zY%MD>Rzl2*v3C>2Y$0YuT5vwZ9R8E^nJ9N!$dw|KuVhZ<5`CtAzW2Bw8S*(V6~+&1 znAad}D~v@oTJHfMBwl_Ni56p*Wo$BoAV20JNA2f*%)a%XxW5tL)XgfngTJ4GI-Ag`tZYCR`<|EHW9HNrI1ODp6cjy`5nu`}vm`p$b)P zXC#ba!M1IyN^7dN>Re3R+ipPbMKwj6jafyNB^Ab+6oy&IY&LE#F0El09r?An8hT6e z3&U89zc$0Hp2@;rkOvGi)n(y-xAz`URV3TLa34Sg1VJ%iLd<{&)j4+~Nfc2O6N&-? z3QCe7!33Doh#51O9TS)X3Mx2eF*8QQtf;6sW_`PlVVrdDUF&=Qb>DmIV=d0VEC7(P}*{36m+4tTaR8{dv@~i_&#zGgBHO1M1%&$ z25TLIVFyOX=v$4Bi3>|ejDw!ToB%{d$6(m6SXgpw6plT{2FJk`R*6N1YVRK~aFKW{ z)&L99S0;v8YgdE;qhrvwkoeeW%;HPGBVxXGCvZf3Y@D`e7Z{J#!M+41Bqn-<_HBef zw@$$c!O@tJwiOZ+uYJ1yWh)%3u5D=##cF6n64W3ki#W87jjDN5;g&X;09G z2D&{QYluaC9VMsDUqphIEExa$c3>FR0Sk-|3lAI|5vfh7HrWlKg_!8U5knx=0;7Y0 zbCLpsMMJ(| zAWddh>-h~Qz8%7*8g`2Rx)T%U85RsR1SiAdwLtnBbx#b7OYseh8yph{uo)8O76Wq( ztvC8>>zzU|n;(vT84Mi69&1PaQXt|{Js~_D;-QCO+7>p%u+#T6@#_LP;RUx~N(r#GzD7?$?uMi?L{c-P76`8%SP$6hgmuHZ!hR=s^@MHhXTQ<2Pg;9Twf{C6>znxPUH#gQ_POz& zokgXAc84F9fW=`ESTr^ST8jX67zQmzV}mh(RqUkx8iY~s%j@4=wQ*owu=;S^4X%m8 zVzFSjJ_ULc40~bvwtb+b5NJ0Bi-+IA@C;l1)p2mO_8JUFB47((4hx5Ky6?|+hu?Vp zy%BKj?^pd@o168m+QO_P!Zq5p+VgFpcM;ei{ZrzgU$!t;1l&Ie81wh%^oHL!XzBO% zaTuKwT*<>L38VZnYA5)$g?khA_a*4JQ!oMEM?i0qpp9YBN-S(^KMJskPCtJM`u3xt z50O92OcY$J-50BW8q)X72@8d5z9f4Z?CGHYP%-*DwKhKS%MOW2h+_Eth-iv*NFrjsYhb z>3bfh&*ykJAN+^>Gcn3F`b&XA!E66Imk}~XX2=rNMb@YZvPG?s9dbknxsdLp7a2r` zlHp_|8A~RRndDqDi`-4-lZ9jvd6Ya!mXH_7QnHM^Lp~rY$V#$`d_#U9G0KQCrpzcy zsxD;Cm|Qq7X_dMG#*VxS!fB$Myt>| zl#7m{QgjVfph{GQ-k=W%!;Nrb+zhwGt#O1?co#eX55i;d1Uwl}!?W;p_(nV*FT{)R zqxea@1iyfn;@9vp{0?4$8xeI0Yr>9jBoM(7zC;iaN+b};#3&+-m`r37xy1Ibk-0!r z5S4`S_lP(`6toxw!Mslm7JD#1+u41>dyV;}cNw$Q&z?QPt*gNb8 z){L{{Y`In(#c`a(xp3~B7uSXJ<@$25TpBl?o6BW!>$r_vK3B*c<;u9DavQ}{307t* ztCiDAQ`JKqqGqZ$RRAwc`|s*%AbaG8^3XkGi33K%K4mdZ?}={&Pw%VNluWl zOu%BOf6)fGvMCsq+#h+^U;LBohHkPTr3qo`TbQfe)=iz=or zQnz3fuP8&>obFGDKtwH=dW;+6&jd4Z%ot_{Q_S38o-;LAbG8%Pja|e3!ZznzxpZy; zw-iQjntRIC;%#|59`PKn@B{e-K9!%vZ{SbxkNJ;$HNjk{Co~pB!B6lP;)F56B4Mv^ zMW_&}gb#v|Xev60uA-M13>h&+TrVCIuZp+D$Kp%ztyo{Omkz)Q%?qV4JY5SVuMh>NJ%d z3mJZ!eGgHr%{AnjbM~AYRAN^ygd4`ib1S(G+(C%w3(l4oc$N3%1NcGwP<|pmjbFmA z<2OTG@A03Z7TOD5LcB0Tm@TXnwg@|f{lc$M8_$IIf=w)|bejjfm_zLw!e{rlhTRbIF z(r4)>`H3cql_4X}kPnt}G9yC?!5i|62L@kjUz{0(kS*b*e65+TG)VjHoC zCE0yNkypO!Dlm{MP{0d$K{<|vDmmC6PsPdTIVTP>{pA_QuTp~l>psAjnqL6kPiw$!_Ww{0Bu4$P$7DU3~&>?DR|aCz+bB? zuEA*D;nfJ55Q!edI-)BeK`J?myid-cwo!Yi3&0{TC1Z#t45MUSWF z(Mw^Li|GGh6nEh^){G~<}>-l{5JkD z)Z8`x4qw675(L3j=r05bQ-oY$r%(hH_egjl7>YGTE0Gpm#J*y<7%Qej&CL?Gi8saj zA|_RnoFq@FKj3kmbW5s~tYv4}MeYg_SR20(z!~f&R$%>4j1g62su$mJmW$$|0+c>(CW!}3YccW-1XrMcpv1SlcU z*ZIl{nAa=HZKb+eN9BR7gP_JW<({eeVRAnCmNcUpQ0~CPvjDgAA@{1$HEBC~4eh~r zGn<)%%t_`g;$EGLYqGj&mbAG+vp7b052t)1D58H z2SK;jpjuGBQa34E`YBB@0@H~N=6oPty@ln%IiZ=TimBoRXx~-p1=&?5x$A4XT<)t3 zgluZBZcy{I_HA|8HF#@`(vThQglFQv0;+isy9jg8f9_C!3qkRnBp;D4NGEUw9-#9L zX^M8G{plI>e0nXtlRi#grpuuhAL*J*W5$jl7>Vi3_%Z{S3CuL+3B#}oy8`NJC^s6a z=`1j>32)DL9PDlgJUjjBg}t!3QlO*OMH0tYYdc z^^Rg_6|gRV9tK!9g`Pt%r#I7ufOXda?W$-a#+0ecv}154mp{sv@o!*8?Lfu!6=H?S zLbkA5I4!h=eCPwSIZ0eD?h?-c@_ZH>NX}3@Z&l1!hxG)UHbHF=4*D$!Z9xU#J?^8A zs6HNv&%)2)ukqG|8?gesfeB>AXmT<6j`X5}LF2cjok5*;qk93{h0>|Oc5CRv^m)*! zPw1Dl3Dbz-m@Z5Y(Cg96SY{E_&354WtBfHia%9=tuiH|%`P>>n;zD5hYupn+<7Rwoz8&9z?*Ubo&QIc(0#aN8&HI>t$-n0f zK>;-v90YHnyD&@`C5(ss+9;e6%0d4O6qCf=;sNoNSS8kyVx=k4e(9uiN2&lc)5&$@ zMsh1a4+3@zlm%iyxPUjVgr zd|hCstNbm#itj4y6AlUw1XIwj-Na$QI=iL2(n~p18LH%frn#o|GggOLL+pLPnOs9& z_;`E`z5~CAf52-Itw7CoBgTT7T}rGZHiJ&T1fIQu=nF1zCs|D11$?vtOz@=wshQMV zY7=nu0dRWv!08#$Ui1KZHocK90vGp;ZqD#bH)ar%#7xn1P7Y}F9n5JMLwnX0cz71X z{UCU+>YNSut&Us(7tM_Tmo<^gEMLyQ;A;rD zzykAb7dndlfgQ$*iy*%BK*ti2j}$IVlx|C}!S^?o8Hi?{Tnu{Vp=_kIP{x9mS)%NM zimX(8)L@9?1T`BXs4bA`uk#sPaBBF=!` zdIlPB3weO7MR|euT|}*?-clsp9TaXlJ&j&Qf1<7QJZlNcmSzXAA>i3ALFBBt?l9KL zpkPk|-}MFjp208Vw?ZA4!??^r!@CMy0Ut*T3xNOXi<}rFMvA#&rC0};&r@;$t>7si zkT1wrz?0~(jhOZ~VExf}v1=hOVK!YC2@{;Pt+y_$h08v^ef3c zP_B)rKGY;oSUBC49u2OlkUj;dZpO$^yITSKo--b79Gl5jvgY80qq(u%bg0M^+y|({ z;rvuS7xXnQbQ6Y&OF`NEA{I!$N+wW40wCgac^%C73;Cl=DW{YQsF_hJmaW4cXlret z1C4PK55%YA^YKQ6Cy`3r2JUH0dVm`qP2M6sC_gHM3a6r}k<>EkFjWGq_muhs8D>J) z2h0hlhl9eMPcNZYK)+vu&NT%$afz7-?(Y}y^A)TO*9UlKEw>9Q9}+SSknm9*kXAr_~DnpoP~him5QZGt1{?? zZfY+z5IlaAnh0Kag1SImsU8GJd{(`#-d3NeuhjS87j)QUs4qigj;v5S=%PCL?sJ_DwMV=;t^9n6elrZe-PVvjQwj44~2b%y+P zW8v)G-$g~wS#sM*%w$vShrxZ&JP$cycO)JM5DTz$SNF9Jq)45kMkG# z=e&hbU!XuOdkM+Hd|{~`r_O*fvJqR0ZenM#mlzMMze3y~o`$GA5)Gv$P|Kn;P>PpQ zr8&|@>5z0ndMX*prhwIMa&I|Yo+xjW-^neMVaiEMQ6Hmjl@dBt0W1>EhOpE~!^)q<)a^fAJu?rbW4kJeZ8fQVJ z901fSAuB)`np1Ttd&-CE2YH!DO{F%2t35+qq8?MV=q7XxbJus~V@HE>lDA^Y#h zW{LwS=4r|rrKV~Rj2En?sI%1d>M!a;y${h=%eA?6fPM@EkGBKWz+2<-_;twM^MnkQ z%u-#c1=MbE#&+O|H`Bj>GHA$52UkyVfx=9v!3v?N>?4oX{!g4ro&N6$2ciU21{y^3 zh6TV$Y8kXV7JI9oRgK$$8{vavRKy0cA?bR7a{86$?7+3H6ye0L)sC$zmkX^D{up-(z2~W?UW4 z8uGgp=fENGg#EZ+aLy^*IPNjzw=J;JP*AbsA-mTCYVYAsg3>SJ?|~b90Xpn6UmY;K zE}(b|!5;9D7di+<~Vh!JtYKVYcT9tA)+N0pX}{R!G)6!y?G}%VL@M3RGo#JsSB) zy+K`$m6l1Hq@B`XKvpZciQH1Q2W%2SoeY;{WdNwmG-ZnN6EOP@WiR0DE#;Hqt@;8! z#i<$UWc4R?uDS@=Z4J1`t?F)IxgzzrTB2T3uc_tW)}N|X+B%-40oF>tK3yG|p*qMK z*`QVsSA-b->Vp^Rih2TqX?`verJ&EK3C`oaA=a7rCj2;lACygVP&NIblI9Uxfg3A8 zF}49D3Ia4=MDEhB1J$D10Z)ce8Pry)I*sUl;NWM|yP#Sg(KVP>;MzAZN15873Fm6`1j*UJH-neVW<+ROja_LT%|}ksayb*e*-w* zM3q!`wXd42rm55Q>-mM+x`wR*wo0e1by_1^z>-iDi;_X%<)fp3D3z!#Zj0ODUZC`n z_1IE`mw+RGgEt}U2t@c23Hnv{b;L=clqdtdZbc%JBm0u0fQ>WBT$rIU@&Rc~IZ_>+%BE70b+JvYB--e-+Fd z1_Ood&ib-_L0vD=uXL2KRp70SL0fm>`f@>_u~u=p+-~svA2?&)l1F?`P+8%?ksAS< z3L%?~1WUmhW;#F!2S2QUt!M4Agn$Up{4V#3_iPFCGe!Oane%g0I3i;@k1v zKYY$|0bF?{h)JNM)|2dcT*_N;V zbCpmMb1}d|^?m*OPxtzQH(!EwqYJ1Cs?7zSZB1~{b{T=7jac#x*^^3x{I~$w;Rt!L ziY}uOlLlE($UM-a8K(cful9G?OYpyZviSf1{9o{s*%Dq13^0_@a%^=IhlySjUz_V{ z7-VO(jDN+D`25X=*%I*r>+~9{J7Rba< zhh-bBFi69|_W$u-BU|Bt&ma@Fv$lZO7Qkv`=t+ctrG&gqUdD64lZU*08G4BP=v3COQlZO38d|mWb?abDARI?F4b0)4z-a993BI(Vk+A2e`XW|uI( z*Zqc;6&%PZ-S_ELRv*@nEk6AGm(|-{2bm2^oYQvn{7E5Y*|Ytvt~Gb}Nptg@bL>#g z$ip#xhxH{ahV6TMu3SeIJ(C;u&u)=Fc5-Mw{-xT658hc{xpQ zJIwcRY*Y}{_Q-|f*6jQRpNkvxd-1%>!EN!=+kJZTX!A6u!V?P)lvKOdKVaXrkryw! zbgS)Lw#4fG=j_tCUZ0wDfBK-i<4)3c=F-7)+J-kRtTtj?;^k@8!ilaMZsYq7$Cta; z+xI;5;r-;kSG#q2+H&-nS!GS}y3cP7sfgU*u6A45WS{GATQAN)i}pSA$#+`du+jh$ zRkt!jHwI=T6*YsoY}(q$8rA=v&8CL+svE<0wQ4mC4N+6=f?7uPjq27muQ`5M$~&>J zJTfZFwEeBrq-=ujtUbzSYiW{M>+LTkMEhcRbNx9_p!G z(9FmOd81C*9kV?qx`ihs#0t*NNl8ggQQscx6cQ8V96Kx`CN|zVBreh!`U6=EISlEm zWpAMN(jKwx5$6QwQSi4h>vTrFkRR&)Z66s-^!T?AfMLb|n@1!dGp*xRI^Aa@1BCsl zm<=-wbQonYv2?iQlPw8b9FAGm+kEN78+!SI4STN7FsbVlv2$tuiBBttex5g@x&~!W zFwNWFZ~$&bp9>Cd`D((ML9UkuobxZRx_tU-*=^4&{x{_rEsDD^ZK_2TJX#afwpZuc zxvr&`t`fzUXB!+JPaIn`$|j3{)7;{&jk^1|;cZv<^P^%K_qsWI;oLjJeZ!+_?XYRP zIb%z&y9*l5p18TKWzcWI#qXCTBra%}+x5lh$W?Rwl7g?k8@|87|5nA`+Hc)HtQ$4q zv3IQ7vSm%4+~uEFhCEBP9Np4q{OzrDgUZ+ak_SgF{nY(<@!4ob_j0yX`<5H(`Rp2a zVAE5_$bJ_Sw)A$oK0BXIwom#{d}sefUAJu6J~-lbm#rr{H*DK|djxmq`PJ<0Iio5o zWBr?VUN^!^^m-$7@K-u8(CEMb=m2VNL*=_T%O*~nN~3?yu=Jt!zS!Bf`Eu9B4#OVy^Zj{ub*_<7 zr@dnjo7r4vhaIU?TCaQU0Q?F7A4Mig`-tq%m`YT19SI)@jMZ@PX%UF814M z>Uem|uj|`wPBqzbdA|Sgphndm4j%D<=+UN*^ZgCRzGt?%?;22Av+y=7eJ#^_Kwc};eRMf>wEbKcy(QN#RVK+5V)_V>*XWLqU2m@7Yff4f~j%Vs{k z?N+45mDLe;bsAXuz3Kvm~%_h9A9(XK=FF(P*_)fRT)Td7MR2pBx9mDC4V+xwdcgVS-b#<69evbR zhLvYi`#WE3c{_H1MR(7nZEvt!`5O%~0)gk#kcrGk{cBo08tK()=_;f;RH~e(7qW|(OAFk&PEOB5(pCthWNp^P6pF|zDo9Q^yh3$6m)&%z{MJ8Q}FRXTRPMG&P z`c1NJUq`C}LGuxc?8vNhWtS#TulbD*p8y>`M3&V}zOrCF zoe@+a_5=U@Eg3cZenVXYBQqOQ%nwV%24QYk2UJtL)51t=W+Jlqj{Vh;A-w&8=Hl85 z26(31p1Re#-g>n57yN9?DHQ7Wh30^XP`7NKZ10JkzOoRo)n8di&vCJFF`mK7heJ~>_pMrA&1~_L@OR-(^RgXOhhcp?g)}pXi0+p)|IWCo!kI5U+uisemh7jW zMYrC3cT3wRCD&ip+Op7YL6%P~x#kPwDVLfZCalXVi`$>>zifLa-t>s+p(C3%-`)D_ z)q0b=d-Uaq;cXj@&8uqr=7ytS8IEfF2ZVIE-M_|? zNlNvk>SwFxB{r*jAUJqO<+US?jSk%G@JlVbr#Hf!D;E5g6EMFNyOiU(-~Z);)h6Cv zmJ8FGm0`lS2UET}IA15X^s(8dM zOtJRI0R_3X`r|^t= z!%;@PD6oGhecpeYxc<=r{$MP#(>H4X^fi%93~{p`O)B){_r02UE%f8rddT_r8;$T5 zMz+7jY~D=7)Oj_!*L&~SMW1gj+y0=M>xb}yxlt=AhaHYLk?Rk)o8c59vg@YR`RjMW za~h>t|5Hu>iHVIebeL7r<+c-oURRIq<~)C}>G~N%1FTOE%{jzgPg%9Z#>IEv)#vrs z<~X-VJ{i!^VRZMV-Yv@PRlkUtXx{a%r;dE^Q}6~#=>%>nIvo3PtfQaRxfnO28M7-c z?V6O{c$0OlSL6DH7)9@V^w_M??uW@m_U{fn8<}sGX26m6ff z=3IgQsg=%q&Si!yd)MIL$4M>P3=DdGv3}ao9FK+;tJz$OF?;&-EA4*zr-Bx*KYC6t zEB7C0KHiXNi3v}du0Sm-hICxj=JkvGkX0$`?pL<7@bqdk>aIF*$eB6wJNQ=1i=29) zhvkY^<-g@`&YluC>FloG_7&efq;p?+D<`AQdz4Y<_4mm&G}Pf4b?(El+kaS_sB-}h zl^Ey@|GGAjq4Td^^II@2bw=44Ixm=8cR1k!+5ENX@&2ZUy1!YRF#gTJ<l&f~Nb$#DIUQJR6S zDm%S&Rr*Dge(ryyX4<26Urc4I-_Li73yTbn4*k(wc8SpbA+Auo4Qln}@}@n*gCoP@ zZF~Cp*?RbO72H9Uwx?V@-P?P3xOTu>qn2MvvB~evYJc|B-Y+3ID%RF7EN(vEs&`14`OvKlb8Co^{+a7;~4)k?l2H`Z$ z5*(pln9(4G_`3W5WSsu(y3Y%T6YKWpC$1kO&1>U3CwKpp&$(sePnrFCTumO3*KLQj z+xRzEGTt8-K6<7dueE37omHovlCy-TAAU0aFmwVUH{`au$r}xbjj32<_B>_#FY}r# zF|qH_-0p0}63>B)muBd$t@rOga$DH~rXaXiN%q9dz0cOanE5=g{7Stz_H;7;;L_S= zsG;XJGA*lnKNP1kj6d*G$lWWqqV}1!>9lyr#}xy0Gg^F#8T7f^vCobjRxMqPA8cjC z_9!vEx!DM%b-FRHgN>`>)?T|->YP1xFums&xtJX4(e!A;bC>MA_gm95&K&Q)V0&Ke z0TxNO9Cn|HICX1fd#a-U;|D{jzRT>l3_X_kP3Sg;8ENo?tZi?nIz~HXe)NbP_Ueqe z*r8gTBSn8urx!znfT?)qOSh4uJR292;r%8=^+LVBsMBu92YF|A%I^4mjpRR5rw~7g z9we-$9FcyF1lKFmem^KvZIQL}56V>e&y?vuwEgGx7ppHTUuMYk`)Zf*Ut}TVO^?)@ zEk>8x4K+0#SfofRyvrMv?Vjouc_6a2N!v}A4o2k9A{{eF4abwuOwH`3VtF+KM%G%R z^UpL%d;03rz>x!n50t$NM>vK?oZI$tx#z>gp6j#j`(>`#aQx1*L%SwbluW#PdZx|M z@?*!1?!4Z)pl7YRr?*s(?(x2Ttv=H*>)Ss~=M3)C!^1!PynpA1^0n3J;G!X)Qo6X^ z`&jy5np5yaqJ6H;ZPw(RnD82<{W#yGCqFmY<~U+b*gmYk|I^|InYvtoYHBsTzioE$ zxsn_2He4g?G#k;|!EQtS6PMZyX+C<+5%+tQKc9ToDZV(X(*L)a%eFkNo4sSq{SNb& zCA2z`cQI`b%BT*q$jJZOqJ&AhgMLxM;Q!b~2|WP9E2}L|lMGF307!q?rL^7uGCcn~ zq*~w9JU^-SRKd35gVlmp@5V;V9hT73^5SY!EMvm^C!@l>9RrIG-wi%AVwWcMVP5j+ zRmb#G1+9XJzCoTZ`gWX1jJHV$9x^L1y=UH;IbCKSELt<*Y_L_d-H}FbC#LPY8T`TX zVB4mxhSZ5`)O_j|@6ai}gYpJ^Oll@q_DS5hf2ymGOZT%DOR6hF#=aU{XgJ?}%(D{j zkI{D*9nV|$BGthZue{Q=`mCLkjp+x;O>!SDJSE+CcHg_U>RRajRl0+=CUcTceaJ67 z=00Fx{Uy3;-CFp3L#lf)Bjx|nlK(9<{co}47k}%0V&;Pt0fWNN%5yXOUp~Fay6FEa z=zX`$UxA5fPmuq+K<^*g1}mPex8m9`!PKPBM(H#E;KtLD{DT|E|M6+Q-krW!aW`K- z=g_dh!HJOx|A80x{eu_x`%k@ijeC>3o0lIc%Tm*a8_G*p<}{Dl_IS<7SNMT5)ymg@ zde%8CNH~nN)P@SnGPP({s zSbQy@!RLUt0~^p;t$KSWJae@?x}{+3k+Cn>(f!mWFBYbBp49`}!dWzU<(!fp_Url` z!|wcY`nTh$Sd2GvZ`DFN^TcrV{F(mt`8z)v-HKgKbZ+~x*B0kbJMaEP?i&=i=xQIA zhtE$h9Gp4*^Uy(KuNQZ&Ijc=MU(vAh@Q|e=n@1g9vT}v=ZtUp!Lo<`tM4h{}3v)$49ZfM2ps)8{g{;e%io%lW@Rt*2S`c#W4 z_4fv^t$kuqX|0Zn(uzw?tu1*rdR*l#ak$CN0SQ?xjDxS_`r5?oe|`(Qc^|K_YL4(a ztYh+nH|xAMyuJ}PCU{s(()#ohYfU!t`znr{D9zX&cdBXC?g=`ZwD*mtIP~PV_P=t} zyw}3iN%5lby{z_+W|Wamb?jdiI`?eb{Pod|U9Z^f&Rc(V{?@gwD~_FM*ukf4;#-GB zD{Bu%=l3NL-Zw)=zFIidx$!T}xA^bae9c{2;5+2%!USY1D51e@;VRENBIT;3EcMN3B zi-Sqb`G416tY~QpW2{lVTDw|?28|5sVL#t3u-{?2VRBB5cBhkS9&mZpZ=PXx`Z$!H zwrb3OnYex^)N0i*gKF5Fti$_J3u}vS#t^PS1mExb0s89!lz@caU8s=`cSH`z^lJke zqyzR~ATGvz-Kg#S=bJ}2nlN|rt-ViorOxtgzjl(1L%k>0(&qUd*mU)L6*_jLkIL2h zX?IaagI>o{8?Fg#J>&WQ5jCQIwj$U)OM7phJ++|ZM)kbYb;EvA3#YAUXyLVU{)zPG z&v)dUdfe{k$|p1Cq`eq;EXVHGQS^`@RwDjpmAB#3>#BtmKGmeGWXPrs`B7-jjzvZQ7}DW_!O!8 z%b6(s-46{IIOFM$Q2Kq8eg~!RH?r-n>ZIw9;uDs|CUkSmj_U7yT1fcx>}bD!GZ)0y z8@TBPO3(T~pbq|i;$ZM5BcB{>=hXEe`M7HCo|!v04_e;#d4fNiw=k{hS%V#|{>*gR z^4EQ?S;>y*fYQBV&*L4JM0Gp2uS47XgG&Z+#YMY%Ufu4tY-4k@wsgbA=Sd!$J&h}g zTPYjrxQ1+9@;PZ&%z)y$_7-y1$qOSQyrx>Q?OXVc>NWiHN{ji)cpauV++FvVXF%VgDL~pDCNS8#XBHWZu*@$^C@+a;IqxO)Q?T&o>F% zWIRK+wda$Wv9U&$c2OfX`A;{VyP#-BLG9ziOnv9qtLk#udC2*F?iZgMIjv3D(dc}; zcL94J+K$*IO!M!#SZ^i%7f?$B1QY-O00;nbUkOv(fZBcyP5=Oq%K!iv02}~qb9Q5F zePeKCT@+=UPSUa6amTi8+qP}n>Daby+qP{xFPg5InyH!o=HI)!*4}HMegE7#Z=H4Q z=$Ytr^_^_3%?)*3_064VjI6Abm7st?Lq8DoNj?$uo!wx7fWe-Dfq?#JrAsegFO;RF zmo20CpZfZ$;&ft*zxnGlu+-{yBS_-J|`D?d1* z>!dR4(_D^wK#*CtD@-vj)wKHgRbcb8nidj_qmoSMk{wBPg2GXupoYCd>I5Ic)&|#w zheb^3n;(&yX~)IQ3x0>nylI}kG``#OG)dGToKX-zpzv)3KQ(=6X!YPMI_vr$WE=4$ zVr#7C(({m77&{ht9T9f#2Ul5FIV=T_mtUoD6zi9`fGOv9O5>=5}a>AGWn&pa~|bJ^VqDn-?ELAQ@66aF37wO+u}@ zKusbq5mKE;pvf0CaVLd$|B3_3Tx8hN{h$(`s*j$ zOawQ;+g)(sSwz-BN1O=qy3+bhD`P}=*Vcx{%*tk6#vS&zq(mIe(h|$^A~Q>i`{TtG z9@g{03Z-R6dIr|ty)*rilkQ{Ene=&kjWScss>Rb-#o>no%pzE8>gXoV<)!P+)aJaV z=CZ23o!Lz#jSJb|o&(;kHO`(I8+;Kf1;A3CSlG>;>{=6+Hg{1AXCC=JF{I`tB~8mS z;C38QyH;jaR(^NH@TKkj0Oy9q!D(4O$s)Ccw6uM6?+YC%|It?+xM*8w>hjt*`Bf1vMR%mml?<>M=GA?^&R77 z9~`O_^*Pw8)8OT3Y0mU?S5{QF^$RZQbbysJh_|J+H@`K}X0c7ih=O@lRg))lI@jNQ zb#Cd6PaTRA6t~&7H@4Ur|J3DG6`ZFAdn3!I@CB1yH_V@(xle)5_axk7MZWt9lO7LN zCRyT!7f~vq8T1X?{hXy|@!cb*H`%VVR~x6QQGlaEncF!hjj;EOC$9Vv^FcgcZKY)7I#-e54P{;?|V8wdMFk| zx*MK@Z>Ha`m3MC1W5=s{s>++q5o6DbzTADpRoOGA; zG(9Ypm!_Mwov9;j$ji%~SZ}OKE7oA~+vCJwG|yXxDXo?S(t{~)w&YsPjiHR8Mj2%f zs}8Nkyk6vWWE8fsVx}ag54_&Q-D##(<1$CypcC#{?7G3n+%s=X#slfmY<8CA;34Im zFin0j-S?|-BTRtDlKbecj?*_Zt;)?Y4#+MhVSBZZL0Wc@1-N@ zqy&n)zudKRWu~_9A8;~>!k{;CMaS;@QA@>=!(3puULpQ8qKP@VI+Te6jI&DLb-M5* zp+z4&FCx$6Dz2nZ0+X^JQqn%2MBIQ6?(Mbs4})ntHqCvWlC%i1Id$FYVi>$E1x z)*al{p|zmV?0xP=RolwTZ#7?OSbfo9pQuk>1axMGb73EK*^!E)x$5ru^2bKqb!r|I zM6M(l>5Jd*13&bCEC*2p=; zzRbKkbHi~qa{>M4c!+sQp`zRFrHpY>^8UxFTUSRQn6isG1JXmTz`-@x^;Vo>Rv3g> zel4|TGp`c)vPGQaxhcS@qh6x12BAnZQQ71jy1}WTQ^jqYcqhclI7?jz7}fEx$z&*v z%v33>YhSg$24X#-d58K>EobNZ&9)XvS`W5r}X6OJUi$)O#F?WoR;)xkl0d+ zVT4$I3rrUG$B8ojl;H@C1@90!9%l1;X1>E=7{z*wn$AyL%<)M*S$fMSc>?fSw#9^q zm!VFKv1LF>DR29x>>QKLdsD@Ref5|5xB|Ry&^`%0UHW!%(RNT~n6kP+h>}k1(Yz48 zr=)#$MGAScUQz}m5)8=&#Zvw~cKN<-BGEN;S4@HE_5{~#=$%0H_*A#%Ww&J5AytP) z`drueWyFyUA5DFtb^td;8LGYKR!-?sN!j=5D)AG%?>V4@G7(OmT?QHlpVj_xbcX@{ zQQ>LJ<4HAq>mo>OxxVOhd??^E?9qpq6CvVaFJ^_ONWAnD$AHiH%b4lfhM!Qo=I~zr zJ%+z?s&3f(A&y;U_i zhza@bV+__<`YOiWTPJ!dkduu9YJ4t?Jy{ccX#sb?GWz(ufKYOemeSF16dYh!+o105 zy>0K=?Mi(P7I8`yqhfEOlQAbtFua#(x6CpQ=i8RbVJQq{|lw)SF^@^hV(^_-Ru6 z{SNeVl?#sex)=>bWGb8s3KC!6{_Ef`$Ka4yQO< z_!g-}BKV%4n6()uFvZCezH7T`vs^|{JnP+*?pxQg^-t+UIvp8#MR2c*|Jd5?T$ZN^ zT^U04W);esfWvb=WTiFcI2L_8c@1i0lKDNfzuJfIS|!esF__5xm||)NZV83fIOfy) zZyEa-JMk1s^=Bl@3nvB5OKLQ=!)?&_U}jy)%wyf1tFSTLg46T@nwtiui>>vmsP-lQ zSYTUp5KS<&Q?`*011XM@>Vf%kLeerO3fZ!fx?g?|DbnMAwvqY@qamD;1+oqOxk32x z5Ek3~9&-)wf2&JG@_9%)wuw_Lw(u$OPx$(!_hA}nf(QYEu9L{cHy3VFV zE$@p=omBfRK+H=)=Ol0!EpcvC>UY5<;#E(b6K*eCu2+$C-2BlNNIo2i^%^Pv_+JIk zsM923fR`w-_BJgcr>sWq#A&ovnOY)`jbnvDd{%)3Z_T87gjT&i`47Y*%aJu&)#XSD zU1CA@{L16#YhQdTl6^tvV=)rvCZ{!{9?c+pPUNi9f@n^Go4Nn$&!; zNgxy?ql6Y=G3^R*l0l&=>GpwM?LN|P zj92(#9f{7R@oC}>o|2QKN6M}@6pV`B;k_teiDFAlPT*l%i)%I_a>a>ZPJ6cVw1rwj zO3RUKNFb^AQrkpNi2FP8cc^t22$qp;tMPe=rc=wI$#t26dz z=5~6gMyVC5Fv-=yX8YS6zYMDU2&gOhKNK6(&)Ra1Rd7WdkSJL*QsU)U7u=Sv!OY!Y zgcO{VK)xxHDIMv05e=Z8@y;05NL!#BGQ?{4bTEi#fpJYnsq?*y!W=U&5RV8w+Y?DQ zcIhY*V?)g;HTkXfkP;{8Wxca$u7z1y8NUfVJHim`8vmM`NGUugJH%mfor}OgS(P6zUS~pz z&w9pC-YMV*g_5u(tX&h_z75@HBfLT>uwsdLwB}IJD;U!nH58_J$V99-Q5*L&MU2&e zSehn&%RnpQCDcytoV8X@BHPxrWU>VnUAncN4C^+kPB(3*Xtc{vsLC&H+$MDO0)x^2 zYEiOr)V^%S_s3_EB5Iwo`#JPI7qic40?=nEwDD%~e*KM{;53Zsoo z%fh9~Oo?Jjv_|4N7rXZQEb6-MMDQ9!gtz&N_G`oaUF)E5oLZjH(t#=vQnCB@ca&s< zz5a|ve`;nouc-YYkxvkQx->hyOz0t1dSC>hP<#qutuv&J-Z`+CB7j11awB23pZ84& zMs0-J4=Z*hMUp2kGZnO8EsJNU_Wv^>Q5c}W4<0gM=FuzKhpC$ zm2o`+L=0n02jE9}xege|Lu>W}UeaaLdqss8M{EVgG%XID@Z4|)`{4fSGGq^zJu>!( zeT16^wGo<6Q=|<-He$@%sDr=HqE;Flnu0Jqqt|Fn72fT~%AP&pfj0auqbMd|m_QX& z`yVxQMEu|Fq1Q&HQbU6ef-tGIb&b3l_9({_i@m5-8INK%TgiK(vN2hW`&!B0>h=4x zk%RLGp%RYC(jFr=$)(eCjOG{EQ<%uFSlAp2DYdvp(Ux|gHM9jADdo2s*p#*kFu$H8 zse@6F4W8G=kuEwK=ks(zVi8m{#`?QRaFO=2Pwjh6+KvGlbeqO(_%s7%lZ@l(Hb4yQ&v|4i3WZHE+J%w>( z2wcrovwk$pUI1Rv=xxDl^tvRSc8n#iQ9@0lI~XkLEh7m7QHkf!0w%5NYkRc3!ShH9 zdEmGC0I}-qz)Ryx=%(k+>AaYiytvgk-LwGG0V}?k&hwboQsX^rVC@X4}|?-NaojxQgk z9UD2&1#2Zr@gxd#lcarULA#qXu>bwdo#sUqtd%Ln^GAS2n$VEV_;vm!)~L;}?W&QV zSXY=EDDU3ny027L6(~g(Mycejct^v>MY8$GbfS5UR*Vr;bl$ATY!IzqqS@5uH{GMX z{h`*mkC+!|3I0AMr6P+S8}1+-BJDQKqUt;$#8%}h4gVI{${zC8dHC7pR`IpT&{3K2 zZpGrkG?bTry67vo&C{LauJc#bIE7;{HN@6Q3Z<_`v+ib6WacV$Y{hr7XJ-}h}90x^L-F?nvlir&*;yLh5 ziK;yMFhyqe(h`bCQ09zF*QylSKw_OQV=iU-({zUJvq5hA3L~M2KBeXtY4m)Aj`tCl z!Ay*NYfyEn(-cESA~q<1;okRFOLywn#4m#*a;UU{i!@rfw1;f&hlC1r>XZ$LklggT zj7q8PfN9)|XCl`A>(45JWlN-x9zSQnU;t9xPu7F{oF1D;67mSDiCaZ{W*VIR;}&kb9R+fYF=tqCczsF^l~eyUc6)25Fj;y0V2a z=k5h{e66QU^3D15r--%S)@xq|4Y>h1TNAme$A z@}a4J`M!VHKADY%8@OaMt446){ix%CY1bI&YwwYBi7b&Mn4^8e9z+aKF?}R?5}@!X zNW!>npBZMU;(p>IpD^?yp)hC^ml|P2E{>$p3bthejAu5ATawH%{rA&5NwXV| z;vuaA95OB1YV``kp-ggS?l+#mqA;U1;yy^@V;S_*F$_SCx>ruyij$XKjWLe>HvZ^vu}U@L9#O`Tc)eq8#^ z934Qq?LDIzAlpAUKEj)u12})ThfE&BRlB)G2e*M939HW})jevx#2^Z#!&@q#K2!z? zW=VP<>Xs_EFO*gHi9~~5gLs?{$&LVbCX9HQMI`0iPQ)!g`t!4V_}-v927}!34pm6N zb>WwiMA6Y7-dx@{Ffi>3Avc(6l}+8zY2;$#$B=Sz)FcAZu1 z%RKI=D&Xrp>Me>(=?+=yXTr+u5Fao{G063W^Og1_(_SI$l{~*>q;M`Ko7K`Zh%D+h zEGYR{9bAf1Wf|c1ha`eq>9s5L$Q2`n$^#pQa9yCt#E0L^0dGr~yH+$m< zq0+5VlPaM;Y7PmgHD&~06efMSQF70<@>D;EDudYJm0inrCi2U5xj1<;lejpqC+1B? z_L@?8cL$8gpF*B3aSdSKEwiIE-v~A#pEVVFZzDm47ou_wsv#b@#1g z*cvA0fGFd#BQJCYIpNYCUO_)uDvfebwk<>p{_bZ1!c9qof2*mM!=ECqfJ0Rsk0E%c zm_s_{YinM;O=^5O4x=fkqBTW9<5kSFnK?XAQ(o&a!_f>#I(!+er90^q8lat#V*Zd$ zvmek5z|SAvOlN{Rho$odq30S!*O%9{T2n~e<}cFCH8^6_sPjFhbF|3HQ$PNkPO zOHnzk=JxR4$=r>xB9Ts*Xpcxa+Tj#u*QB8^)gC_VI$w@D)N%F>!E^M^NiItI^LMMd?o93r$hE|R&zhkqZzLmUS!_N zR?l4Bn<%KDW^N%Z1pn-g>LD(?xuGb>+U5@R&S|L8oYG2nbgpKUFlv+>??! zZms8r@}{_F(pX+fXDt1wo?J^+h-E+FuC=|2WUuMH?Mp?i2c{g0@$ePRZ@~nUQad~s zMNQ|M2HCqSwN1<3?GfJ~Qcc%rR2i7qqH-r1=0iV~u1qg(ZjS8cLr)c6>f4jDn&b)^DX7>ElLARuKs?IJ&g`Nh zn`KCNt9Fb*TvJn!Fi3H#0lCoB6qaCpdWlNvq8fwou!w6adr;+M@&XQ>HKbi`m-xl% z-MXAK|C%J9y=CV2@=0a$Yu!gtb5)&`rIbW-S@W-F@E@h=eV^m$hInv}vmGOM3Db;j z&_y>}jjZaQct=@litr~Zm;0Q%WWvpIlBjsx3ACRSoskmaPZHhZp0d#*UKJBnGd#V; z8qo8!Y$2G@8ogH!efYCcJZF+SLu0g<21+T^()e&JNMTjn8qJ^TEGMU52tzXSi z*#JqBVY;SMKO#Ao(T-d-hi_0fBz!@%XL2yonAM0SnT5-jZLrp6S_#C4g<`xASn!q?80V;IXQ342>5S`EOn7KL(SMz)yPD^{6$1nqaRNK} zI1D@q<3-qxN;d8nj>wU$B zKTV%*T@4>ayPKUQW-?Y!-4j40XbOlyubC7IvxTk*G0$b~>pL+bv`V!r2e2s;1@khoO+4>l4P@b&(xxA)w_;y<`&Ag$w=pzdw3Hr&ik$x`*;uKd6E2pwk0UQ-Tt`^bl-m4r zKbGO>Av%%ko}~E!dAWU6&TGM3+z!o}NX+ZkZQ8G+xsoY>Z=F~9sNBt{9#pLKXV?vD zhBI2D>NQA&=nrp3?tC_By*xD=0-CyPiZDo0ND*jrZ%1J&R$Uki1~2oZnypu-_na}Z zck(IgyvK48i4-?-ZJk)Bje)k%S96iLoa*&{Sxn^*CSPP;Zx>J*S+02d+nGlcGEWbD zUKm-|ue5x(yR*=KtPi0Y9rAg zfCGOSGSj8z)=r?C%n;>8G{lbf^kqAcu7(4Q5%%#XZ(7+@)B`@J@RnC09UW09Uh0eS z2(>;GkLCOqabxfziE3&cn*ePJ?g^Zh*}W%L-J!<( z?4ddlrwHOMZQH~Gu|8rX#8U3olI_G2f$K2UgrFMC z`w8_oPiL@9+k|%7*(5J=8k;czW0rUd`2@b`6zGUFLr<8I1!lxUBCTb@C64A_ z>FAAPvwx7m8B}n`nK?{isS>>QX&%NZFQXloH2R-vCD9y0N-)3zfMJx*=(vRZq#w~k zYy-(F5yH49SAOB|;ljt8J=2PB;0?baCNcO5>!|s`^C$oV>&!*div()Bn>=zH9=ct3 zNv|#ZBf8ld=AD()#aZBk%&M%Is^IiYs97z0!9&b?tOR4@GevR=0*K)#8kV0W)o>kE z0T2rG(D1vfy8)zX=DsB>V15a{locv?Bv*v>CV*$q^=ZAw&0%E#@UU!n)E@KG}> z@yiLnsK-jQ{c5kqH#Vmo|D?qB?X9dLIH!AORQCI5=qy~cJ^RQVu2v2?)I=bwB zer$?lmJT`IL)&4sU?$+_(&6{K#uh;?gLjOn119CQ<44@Opi3vh#8iRky4p+eYH)&{ zsObVz>pd~Y#t8TNY{{0c^`+rBgW)q`9+S1ba{nIKF)u$ZenSJR2YRswwV9j*OqGLS zUK93+wBVh)FCxcnjds0gwD{?wVLNhx0IocDP^;TFmrI4m@2n5Sn9MR-m`?DP0r)>c z%vpGdKTa&a>4eObi`o~O+~x#W)d5pG<(>$g23=j?RZL!@=`3cV4j|c%KkLML8701L z&=x=j=q(&G4`hvKqwhHeEG^L2(G=iJ`vq(#k1^`_i-=NlS{v+CKPTN*&L%gc^9a-D{J?$Tj+Kq^+J(H`#_HX3LceB=QP)oLC{y!EsNWz zfG+m2MIw-MM*jtQnX@`n17A4qScY<&;~G{h;oCfOsp9JuVt}Td`&=v2@{k!(%6ZJ3 zGDpXLqHrc3WQZ(;jo306{#ZUH1MTgtTfmkj5=Q6YsS`R{jJSGcHx z;X}^eI~ecbnIo6Ce@Uo2fI&9r&^)yy@UKnf8F_X&7w%pepENh`c+Je6Q(}oBmvN@Y z&lNSB^;StL7^;(Q@115)+7U+$eewFb-g$FCyWrcUmhonkRPr*l({gH>Y+UW z!gG8x4d!;dUo%{k&|D7{8pXd0uykLI2?smzfn3=Q6g!fnZ~?DGggoMdw7Ocys^P2RO;6rT zUsBAzm)5Q4Q*OiZ$seJG&ptCBp$FWr?NOp_c6}aGERrj-HW{rtgA4Uv3mc>~ z-{ItRWgZM$EJ2tbD5-8)U@MEA_!kbcr>5=SU${RYZp50?JWj|qF7Z}^WRz*9OronJ zt)q3 zMmNQIj0OXsfqF;ycMtO68|2>5&6;AIQvQ5IgaAN*dI$J-_wwQE<=)WVqB9Ug{qKia zNVHSrpO0hU0BBHeOMjjwK3$Dmc#Jnmas82Lk@Is;CEl)acFnlU`9OI=2`Q#a9np+BMyLZc6#riV-nbS2I)tpR;k&> z?Jfri4&0zeXg4)#Bn;1__+rP5)TN9T)L1g$G^h0#)4+#2qqL_@>Tr@LOd2TF@G=If zjU3f^IwG{Es`aNGUL?@FbpQT0(&acco*og;jE%O;%2J=6&f@DWZM_AmAXrgSJf*pD zURJY4{>FX9ypgVHiIu+o(IoAh&6e}*;-LL7jYhZpSB=rxB)!4S9EnCNeY|>oXQ>e1 z(A8wk?98bNw9$RKTs7ACSHCpS%+1^s9nJhyDw*Xg;UKs`f{~uVmC}#w?%;)8uC*~Q zw{W_4WG+QIx3QW4eFQe&w5_PQ6IS$sa;P#hy&h5=Qhdey6_S;0V$MKM-n34^d|hgu z(0mOxc4Hm8W(z88d{|Qdb_N@IUNIly#^q(L+-#6rcdYXm))cnts zXCXc}{)5~N{qg|?fNEL*;e)Z50pUZiSOL+B&FV7zr+J#wo{9gkcjSHB{3pFp@Z04- z!At+I|IbT82mE*R{ph*h=>KW779Mc(pZ6&M`S{=2_xqgo?EJ4zzj>dhx!EEpb`DHU zIOzC#_1s`E@Sq8UuFtYfWnGcy!xN1C;=q1TUDFwHjEx$ZWTwS|$q_JN{Gt#!EaKx^ z5$Fmo9R-)~ZJG;f>sD=xe8F-LYo!7qtgQxrarHp@k?Y!WT~<}4DXjWg+~IrTHO;=W z<38Do-&5LK4%P`#DblM5@BbD0;}Mh+`Ie%V0=Nda0`YbcWVXkTkGGZqxCW#G=@zRN z3s{yK>=Op+|Ai@0pVpAf!dFW5R{Wo8k$m5TAfLVCV4cvF61|qa7GPXO0X|9p7K8YV z{HNF=(ri!AKQQi}0DRJa2%K|!V*Y_qgMUi?Ta971C+c4qXpB4YYux5Kg{)K^mTK)^;nElUuxxoQBg#R`Y_*L?6s>f_k_CE~P zK%f6vs>p25pZ^=S{_Q`)ZmM(Lp*l?deCv3WZWNQ&EI9+2lx@YTIG)Zhfq#2u&OAJb z>`!l4O|DojnfQA;@D9e913mK1(o4T+99^N`IQ)LuVr5ZsQ3#d$4wR#ieASn>lFWR5OaF+-CTE3QchUgL22K4%~*Gdmu z54hH%)>tp2PY03{#sutHr8W;e{u7S}2A}uWArSVD;w))End6V7{&%lh(Y|HBE|6!a zRaai(i!ixR&<6f*#%U7owyGC}R@FdeKWj)%C=vcH%|1XNEadwSFCxv01)IcU9#3YT zp;^%_^Pq_JHvs>Z$ZeI?2{z1CqhC#X7Omd_bT`0-2h|I*ujO-6d50f|_165#OY)X0 zvzy)Ydo;5veV>vd7ToLTODz|=M4KW!D8b!S!h(-Go_IY49gft?&T1-#Og2kgo!u~^ zB#`H~u}_mzoBTbhuIr)n1Vu=`sJ-|-#shhs8nH_eoo^+V2_VT!!2^z*ZQHB+^89== zwMXFYb-+Ik$q*51K6z6lTp4WOD=2`gRG=)35-O;2G=)osKS?KNK)i^8P~S-ud&xxs z03yMK7`gZ=(8m@RSZzdN!5s}8C4d95vuk^_8#NLbh#*q#unw)CWP~o2Etkm3a}p6z zHp)whB$>PYJ53!bkLSk;bHC&6!2sXmUjI`Lg1kYM(F zAo@wzEml_+Go>)hp(qK6>vpW82;TMXSAauUDzB3? z=xNI^ND&pbX$@A%Vy?#r3w?Re9!ZG@g+}BKnKDIPI{BAeQGoVu$?W)~oDq%q54iGQ zX_Do&dgbWJ>{5=U40xG=B{Y<2>z5(%hUAK$4M~-9zd4cM!0draUJd(<646UgLQqIN z1fiG$6r^8%hv(~!U0(hO>K`UxTKSQE_lM88)cJ1UwOL=+he{znNh&FLc1Nx`JD}oJ z0 z!JX*KkClz`IQDE$7!!gWW8^-bx(RJBX1_MH!9c=>uFyz0@U9Xn*!?uq+{^ekB911g z0-<6cFbTFPbOA>rH=1yD;=mX)d*JIUbp}SU}rdC>&KUq@M!2@A}t8og8 zF7b_RvkZzlolhi46Tz?jJ`(0RrQy21S3=z-1VUWr-eF zZ-((6K+N|GoQH5(U>-X6NUZIlG?yIe~1ZXc6;eVNF$@B%`*lt*K zxqnvB1bzv2!VADuza02N&i3ry2obz43z(5|v1Zs<)vbX+0;LB)|AFUmlmq5E$_qCG z={)H(rQtfbkUoExZ0!Or#qLB^ZQd$ccCmv~J8-s+Wzx?_Ad&kCE{~DKZ^@4$0Nscq zQuG_ddC~(H%U)0BlSf6?{Y<#4HwV_4LJs9E&j&VY7~U4Jf{6uBoy6CHhyqC&%){`w zPG1_$`?(9%k37x8jUVmTybeh2+vmUzYFD-VRLIbLQepf>kt7GXGW2QMdm3Ptye;{2 zI0?9}|8uWyAI+;2dOL8JU(Qmlqr1d!DEFztn<1ACS)I?5)PUce0Vz!nvsdL@OTf39 zpQd%j|G6SJdTmw$CHqbpXlEIMyBMnLlkO4E0=){lG_tL98~d}OU`hyhH~z18StlkR zS>_K9OSWct4A2-2%ny@+_3Y5QkAsIeC=zv#cg!n|@C#cvk=xOeUQ-|oLMH7Mrn)AR zT4hB|oB^Tr2svIePF;LwAOkIsqwn|i{tG>5w*$G28p_^^b|kY(Ys!B1jE=3@l6hw< z?v4QMv1)kcTCT8?vaqz->Os8yWK|$Rp!e-FXS%7Dy*pM(KIKj2rL&h zthM#68+k0ZY_`n32ij-WjjZdko3k4fSQ@DR3VWMp+IHUR9xPAnPsB_5NZPU;uAHvy zaqYY>0h>$v9LF3-40qbEOx^Il=)CE=Q#B{P`!`Mv*XA}oo(1phFWA@LcD@H+eOsy+ z#2+$pV9IJVA04&2uOe);se}Canjg`vLSEWjv@VHeV>C&UkwsGq4Nh3T{zj+YogyOb z=-lDi;v=UT9^_p~X=ap7${Q-HI<;MvocbMR9aGtsIR@}+u4~*e2BNEVb9dzjjH05Z zzNT4@(T)`jLvINWE~@^N`wX{Rubl45w1=kmOCJV4j6IRxso%*ht<0{f`hId$LBT1( zyinBrFLd1#0-|*ptBN?kx5;CW*yRx%g=*A(Pa}ZtThWtFBcq2;42}#r@0;AY+h?>& zdP!O#@uI*-Q19E{CA#r%nzHpsZpzUN%3FHgcA z5KoUN+h4S4Z;ourUo)iGd~SMdddyig9IZ-NJy=~>X;Ozrt(C5^Wbdp*%rRcy`Oy-$Ih0R!yFh;q*9pBFN|Iq zL8ysn%BWwPIIgx`59)TT)SVR+N3)tKk;1LRMGaed&4mK8ef@QSWna%uU|HsH>WkX zT9;YZcg=src}IIEc*k}{5>3hPhp`{CpW|TTK;r6hb~|S}WIFybg*?%g88&x3X1{Ts zc2MEK&f?7S)SJ?B(s9^69=D%z;l5fw;yC3vlkhKn+Jh806uD)xJ=pqC+a8eJjeI?; z&6apWzz`YUl0e> z9yY98jkX#k2dyP+YG3>=^R+=tyi5V!y!3ERRZ>9X7qf(B;nlq9d<&9S)~PQ{uhdJf z@wy=T!owMd2e?poFps!mRGPRUfDUs~&V-mTYCE>fQP!*I#VB};jscc z{K?UIt4DRX#?386i!_r2&)f7tu_tJHp=auubhi5|9 znvF{#2NxXeq*zzM*xV%@r(zaf3A&4QSHsxaB~yojHeSk#@=3WXb6dK$2@mT2)zQVP zM|ZgG@zk|a%gB}UO54Sj^tDNgx)>@~fa+iKu3*wUb-Kv1d`K57T%i^yLd%d;58@gn zvL#7ryW!s0zzq9ddo?>oI1e0^&8R@UP?66Qa4d<~Jkwc<=$@dbp2*%0NuRULPM!W^`B!>aQgedyMc*7#&C2UO(X1?J!t3 zIMf8uG3EoM(AyOu7mUgU+wC0)5@cLQXpbMK6+iFp^;)SAp%~4bST8SAx9j+zl!W?$ zO@3|RdvcV#;NVk*3-AA#7ZPzHMvET&cp($2R(gPnQpGc?k)``s2g0rpArTjWZrd7HTCS2T{;djbJtFu z5u1BB#nkTlxij!iKaTTgE~a_f%zOVJRyc?}Z)nnAX3fAo*41cuyZhdpna;TFcrfeu zz7$37SZimu-Fz;2@<_ZLw=lZ+vfsq&My0KKJ54N{jnJ~asw=$>-6uLs1fj>$b?n68CwpG=AhX$bw!h8_-INi)d%e0%02Pm<)lA5Q z=p+_}&phsMKYvjAdIjmUybr#zGN{J74gY{yx|OQh;}09v0=R@7`Ei!=IMq;Av^A8O zLNaod6gn}bodUNCu$H4Q6?m@DoQI_6yn2TUqFQpq0ysJiHOV;e(OpaKPd&4z@*2`Q zPW9a!+6zs!vNFTvN!Xe1qCO-@lEaKpz$6K!XxkyH)8n_7`ul6N-?uWOi>jm?gamKH z&>nAtKu7Ksd8)kgTDEvznvR2$=4X5KT?}Z4@vAEmI%PAW2 zt2QR%#fDB#FZGDA@U@X(qC@U_DMFZkPN3K5;D2EVZ-~ytNc}GPv`&YGuwc*@leO?H ziI#NuvVr_X`Q_R!He2Sldj>IJJT6vn!O~E+Y{r*qKi2oY@O7`SUxav5^t1_hdq=Or zR_$rM=)pT%Bw}ne{KUS|UAIchepem}N{f!?AoKZ-!$T-U7thU#| zahj;}pPvRTCNeydJrT9Z_)s2LPZGVEp{+T6Uhmb{wA4-avU1)DA<+yin&P_I`EG#? zA7Aork!E}6J6%f(R{j#?-TFQrGuO@W>U!60@ZDzcy~`O9ZeX=K{Z28QW-O8H&dGT0 zjk`PlqgvS|tDCB$jiuphKJWjDA8H+fk2U{Y(oqo;VPUy5`7;YTU(8iJ6O&JX16)X= zT}S{yJhVCu7!lzt5sQycNF3@q6)v}(_ItVF4TuFAW%*Kw&9U#E29#nY)N4l?Ck<-tgrR}7cB&)A03zPeER zh&c3EQ1j2&(aLEkCXb|~41^Sg>(;acW+85DvHkGN;wcbSgLMD`c8$cv zYPBx@Yj2LAbf@zCB`G4+66rfVp@rXa`xpZWlOHVVJ5n=_qLuIac?d$mhDL>z$Fk{A z7mW5yY9(A;9asK}9(?S9*=UM`N1zVp67Q$>AYF&&fJnN6A-HSo+(Y;L^JJl)?l4{% zZGIzG?*c_2Yq(d@sR*tE7EF5pyH2BUFsaee@bFiUX6ckPuA2Ij>5gCP!Fs!8 zqX(UFnytHBjhVv0eWy=hlnBzAe|Uoow}FQ~N_5`#GOcQoSx_Oe*sdlzNo_Ne&Dbr7 zUpj?xVZu03t5RUiNzX@zhGI9#HQW_pigIk9E`gQc+<%c0^8+H`kEt6B*kcAIF>fD0 zf&giVBzTU{r>pEocGE+r~$Q^DkBGNd)p&&6gdml>_JzLWhMUAb)q|2G5y=|NWt#-5TGL>6{>Sj zN4W>@RhD9)buvBH#$~%RirI(!_bACWor^Y4Vfe1rgYBBb$89S-+y!0~fD45b{lG@D z90m*y4Lk=!hO-AtSE(wjJAIWCK*$z#3WGCkxNC>T79d$08<63Bmk&Ba5WZ`k4|@|Hxs4?0|SfvxFU(bM;db6Sc*wXjPTHBUXepq$Iv|E~z4+H(Kny@}fO|H& z)7D;H{feT3A?C?ZtE(w(F6zwcyS3gc;4@HAJ-#@+VKLWJxgo2UxWn^n`#`c{Cs>$n z$_vdaJmE{6lUw@v&!673HDJbMF*>(}#~0Xsx+XHD5xg8Gh*`wgHyan?IBR;w!y`m( z+o$t}__t*$=dk)4X&gnJl`qXT)aR+e^~h3{AVl7_e@)STB8a{}DpbsOqo;J0EonDe zvl4`lj3jYU^Cx_2VrF?(p1xKZS9Au2qEC22?_`R(^;RvpxYQE9B+L@U-W={Bkj-pZ zt=`Rzw`O-Ip0q_9VEo8 z0F{sW5r_UWny&n=$YfilpfPkjKZrU=Rs#=3Dyh>$8nd!2mlnkFIQN-W=SU6z7}2m% znu`8MoxR;;l^?6b!S1nND?5q20*b}5&{L6xr6J;1V@zTjGT&u(2^_H6qM!%8B~JkP#C0h9h8v_~Pt)tol)Hg%K(bsCNj^+kDTSeNUFX(ff3 zFas(dj!?cv7$fj{U88c9^lmwE(zzbbVn_jb*pU@&Z0wp6eOWrCMXZdBTN9D)2Ua+O zU&Tl;2GrJF)71jZ`X5;pkWZVMd&II7OEa_*>Q-7MFs#is-9!SNP27sD;FE?qF!-4E zO)X*Zr{YW8H9DcQhSRrf(1!OKi3C*1>z(f(G2((EcJaKt`KAuv$GvexYF`Sn9RNe$T87z+e(ysf7mH zhnC?#B%pC@iojlbTn0lJ3>g*n?1aZlsVBQkOO{%~QkFZb@i5$bcF1oh=T_|5W!Wo$ z*95o1^8$c=djI7AI%v~@U=BD}<@v-#GA!UzbAewbEkn2(FB(`2NDQtKC(WjomOS9` zY2zNl2&_X!Tc>1tpm+bI-wSlz-S*fE5cSJWHjYwH! z{kiSp5xLK`*19eIOP*oEV(Er~{*%JUC!-*pdsY;y8tgNVtJkN93%`uOj#=QDfu18)E`YpZ^u_q81M=6Mt*A=cj`031& z7REAFMC`?%HqEWaJJLop>p#KBX_^uunoxsh#Q8l+{fvi)3#qZ-dZ3YHwzEZI{TFLM zeeF_s?k0=0{VDNS9cSxsZvVFdzd01XQD0sl(GG~hAwC=#IuM|s;vsW-XM}w-Xre*p|~v&C{sV_dtUvVx*PJ1^Pt5KRw&3JTRQI5#XSNIjy# zJQWtF1Na&|f%gw$-X5sjRf5z?-PI(Wn)(bb4Sdp+bL(@f1GO{-cYmil_ZBGjLzUVe z40;P|W-5z3^rSJ9lJ20!$op<6s759QwgA&PRLj21t+x)iI=0Qt?Fhw<$&s0rL8ura zRX~MRd_WYF%Xk)I<=-k)H|Icj+zj(5^2#*%s^EJP({MF&RbK@sq;e{c!v0wn}(pBj=_nPJtJ} zjE&rm4yQIUO7pd+^Kj^W)$G5fiJi$=vDf>h2&f01^VY#GeVPnbGKN!FE~dX0#YWMWTbgz zta6-M*raYv&!kxY-L7b`t!pxXd~qD}QV}(ak}gzL`uJ)V3%Ch_WPpaohsn9^z717@ z*3GD~<%aeP8@u5wVYh9_2H4nP_c=y>o`JA)U;s&t2-F?Q(kVlk&li~noESm># z7!lU>!;6Kq=cZ9Lo-rmS4NG1)2xdM27S|q z*o#E`JuWwIh4(GT!=%CdL<>;pu?16~W+wZKyVX}CU*X~aQ0Pf$hS)xcq!96&U4?)9 zpjz@OY(TpF9uQypKN(` zX0sW*CRFXA=@09Lmqd5>j#1lr>(J6w>wXut-e$;r$-qOq;?cmXkUft@lvxtju=(pz zm6Q4}8tdx-EDT5^9FDSmO41)oi}yH%`)_`i4dQJR zEd9o_gzy*^uQkuC%CjHrM>vWHKLf5AXyjLU&>sEOQ3c0GI@BFZvaltBB0!U2b{iZ> z2^^z?d3`=vVO7t#$SF&U2W>6QtS-IKA6UBDJ6QBT-(5O5_FNiCCf%kiicn`T2@*4t z22M1-X%9ZIS{tFvxi6N&bKfKi@5O7KPm|;8(~06Ui(QsB$oN$!S9GU)c?%*FN%njT zaNfOM2|PSpY3%@1*G^fcnC0=X%g-PU-CxcG+ss=B=~yvNCK%6qFTl!reme5iD{ zuZe3{f4ml&rg9`#5)AX|bnbt8G(#yOeRW)}izAyvE7_BLjiuB|U&H$BKT{;)a6Yyh zlH**oiGUlbXxBpS1l_F`%AxB`fvKhtCZ>s^O>LA@o$=K!K51Z9$Gcxeo;I0sZuY_w zLviYu=6KiryaQCu*a|zkh}*n>vm01mMl%nDL-HhG?)cP81aCK(dBRwmfJM5Ki(Lk% zB#vc0b(uf#!*so7Y;bm-5Nub^ye8_bO|R4@FB5~FQfOBQ^x$Y(&oPC|tpjv%vS>zh5YUC6A$U9djF06%Tal`XSLp35wmE1$_n z-m1Hw9o*6;=Cg)?p+j<5d`N$VoZeM|$@g6|M`474Qzx0{_hWPdr>^epV4-buSib4S@3sW>)%Pu>nARdF8@8+DlkXNSYF-HgICjbHX0xZzm|g6-VRBL5m>+y>f36cwk0j>emVip5APu2ZeRNEVbA^%I0(C4eeBd_dMB8GO{w` zMbp8vbv4pnJbqg^p^TB@zyhd585${Z?5^ZVFUo1vOleB0*HU!%Wl)%s8sDa7@yQK( z(4%Z?+{D>5F$bi8A;urrzGW2>p&M_%d-8biSP{4>aHM4=Q*0;M!Tm3gOaX`XTIml< z!GfD$tTiW@2TM3g-0z=hQ^o|83g&%X+SIl8R4g1uz@UXQriF_yOU6H*hp)HR5r0r$ zQ;@)B3+8=5*fDSASGAN`m>S6-%6u)ZjUb~k#)>#9&^xOKIjU84O`j^d)$ErA_IP5x ziC|}ETq4+G%rX&`&Qn5dkN7;`4MICLRh_}QRQ7)W{&#pewt=OFcnSc3aLNAzUe4Iw z#MIc$Mc>lI`TqtlcjX1Hk~DSynkGiCC;%!LoNoZwOY*o~NKh!q6@-x}AjBR@Mj?tA zO+n?RSnpkjeu)c;h)C^rxmpxtB&hKF)4dNwwC5`4W&5Y+f%}Hrd}@03?{PhSX}6EB z7>R%YLKsAnL^uv8^4&q~|K%E7%=#FQ)+qjzeFwN1;yfzI(;J^iB@uyKpGaY&=HwI^ zjxqi2}^xLjaYvlkqD*MN|jK5-vGp>keVjX=)sgX6YS%koHHR156OX)cz)}oMPG}G zX;vM($Ry+fD4;eEtFojBLt^a86GRax1e=hynntB=go_XO?()*lm2jl%E*Oy;S7<6gKLa2F;t1TXi*siGT~9e(8T2L&K1&K=g1p61h<#(C z0@;)99Qx662i`bh;@@y%!>2BNV}>rly+fdeE@3+dL@1qOIw7VyH1T-VtO2S4L^PmI z_Pu;C8iLz0O2!l=b0lHpO@Li0U0nd>sXyI$LiX zoI2qGiY%yn@-3B^N`ZIdqzQ4!qAkIOpWZE`YlQ?*9wH>_cfkr!L_FBD(@|Vn1hRuy+8qo@E#WN# zY-UR&0FaeD2Rt?V6Zu&}EkKb57N9AmEI11MkvR>FolB!z0{0p=SAiOr%tvf6jDm;Q zI+#x@JEV04QxI3U1q95-nuvm|NlC>N)L?co^Ij5W)F#v*YM z7!5kaNNczR2cbf(10u8QaOI1u>ki(>eny4Hf1b0;AVAFdmmoedTb#+UsyP$_G%Xxo zGO3?HVcRdg8>^dv;)bU@2`7e=si8F(I@u+aspokOwjWfR9#gJZjbJEBB9Y=Anra+K zxPV61VHl2JF#Bb_i5);7mhOip$yaplCo=X*6xhwEiSuTJEKnvq58~F@?&-Q**nn;? zkg%Q*&n1T#IRVN9q#-W**9rRy5jN)}Hu8RG#c-c&5OjP255mm&YV2~rjF~_VC^^`& zbi2~~hyD7yreZ&Z5D8myLZTbLUcL6K}{Jsu$~uoI`rvmlMPU6T1%A!vhTmX8?VUrTC@ z?-fSX=6^;`jbxe1Wok~=f`v-i!J>)kq-#`mXE}D^&Y!@24Cs78Gu{!PRb--k!xzeW z5by+^(r*+X6J9xx?fc0UIa1~;aA1Lkq|%4cMas+&>)9vH;I4AC`X{+9 zT9hx`cS~2Kc@J@uOKeWFz$9q_rjzNw&CuX{v1?lOw4efT+r4&uGYcyIgay;8((6{k zTo^?cD(I=F9tTWDxe3sq3dtwY+(4F+^#}$Xgg-Srjj#pu(LHRug9Gy$QXs=8!i5n3 z5?_{XGr|?Y>Xs;<7DUjcFQE`FezvhSwXhCXK`zXrq^8zsf+AAmlY|PklkWss_vu0+ z!$?qr=4hP+WSzgMM0#wWbK5P=-<4Mby^h-4*HZ8&D3IWgdbpF(cnK|Lgwbl}06_gC@dkz9`y z_*F1C6#z$!L+=g?{wcT=;~G%L*vL_dB*O26*R7YhwTMCEvE!Nqb?}$G4VF&%1mYi zH9s3Vsao;}FGSnQu5Npfp@;J5*n(jHdtRwY65?1^G@U*4ou@tlu9Yu;)|weq8VQ46 zq12orE}JBxZ?Yrd4&)3Rqc*(Md|*@hc=I}8Vt)Ydl0*o+NRh{!FI~Y=5^*|hHq6tL zZu-S+sGAJC-Sh8DoVkDyU z5Gh`)4J<4~O(+g*le8yLb2?>mcne7^&d9o7laY&q0g2G}&9D}CwZ6s$53sh&t|4*L z^4D#@>aXcwoDvrx9dY6MKXZ6#8)vsRG~cZmXcllmVF-0AU8K$C_Co_-JAw1offA3U ziP;;0>ldWWyn$F74unzp@}$zcm61KIBJ(W2mKytwxagu%rqFt zFywbPfW%UTT3zRRxbnrS4uEwAg7G4*Dnhg<*@Ntfji}_Uun3E);PkZXO=eup`Uo zf6az<=+RBbG#c6^0Xm~2jvJp`s2YTtVX9%27=iw+)3As7!64vBR%l7;@>`hzoAkJp zVb~HCiz+JY6#)$912HA&RuYb4BsMz=U&i6Pwy;S|ySSrS4ZMgWLk+x;BN>{!up{L9 zP4KB6LAXNN9yBY4{C?04nY9K_PAczPi5dx1mFJ*H4v8+RO7K--!CxCIJP8|YL&5xElh5&=n1MGY~J8<4_AClnhX@gIM zR11^`h(P=J82ck3H!ylh-I#iWg1>vW z15>kyMTTSM9Nn2!yZPG1!M(u6KgJ+LX%N*9udnG)95=Tt2`!AEvQoxf%R0P2mk7Cb z@h=fS4G>w-id}^j%HVkya^;I`OzYkfDRTL z@%gX`=}l8Vt9`=KM^MTMi5+v?P8ZFw_JOe^BxFCpO z$vtgi2-?}F33RJ~2_XV`)T^Y|X8K`4k`A$z-M4!T%s1E_YP^02MqU6Z#= z2aHvyJLSYNBPcNhLdtmR_n~9YfGnXwlmQJoX^@rtw5sp9CIPg+YMJhn1mn*|3H_oc zkTH=@S>7gpUi>&NZvpKNJNWkvX$1Vi;b-Fwex`PE=aBr4JyHSk>V@M4NU#oJOqj~w zh?3$X0G>hLG!%a6!`>~UkIzB;22x0GZ*x)pOI?Ti3Ew@2U=SxD5(0t1ml!<^Xy-$M zxCb;lAwiP}WTC=>W%p{C*F3#a_8{pc(2|0#FkU2wNO_pd8@h zLjr~^3UF8mpxYn_4`*RaY#Bg@)$EM$dLRsAv}t~c5Dxg?to~7po2)^ zi8am{fFwv%&nc^zV6?>LU3%jf)#XCo>v1R21G>G0nh|WXkqT#=B^sS?f072|I_<} zjPLT=-@#pUeTvwO#v963^t?p>sBtl}a#3wvgI#^2#ov5gU%JMzPO?_KG2Kv6&uL(6 zVr}}d;MR;gWLLYz)aB~7aTUAqw3=|qdik*d-^kvAZ>n!O=djB~<80yNadbN(IOkYj z)KHXB6iU4Hd%&@fc!P19ahY+)vG;&mJbTfB+?%|xsiLXmfy2Dixt@8BbAxlPW7@Oq z!SiH*4h7vCT^^l3jh!w}2covNmai7D7O|GH7PD4c$GsiT#n#2v&G+DaKy}DAJu4=w zDyt5AG3JVGi*}24tfs$a)wW=Rsm-Y^#;&jP%8C0_?ZR!cX02xADnYwz1G~+R{dI`$ zQx`4HDnINLV+qqVKj}Av3)Q6H!EY57*cq{h-$X8eooK9O?+U@?5lbrPK30^UBg?pg zECK_N|LtCNU`BlZ9@m)sK~z|Py6{WY*kVMgsH+W?m8f(z6$6Q3jGL=L-omRQ{OPR zny__NF5J`q&R)57$V+;kp120`kTwzNGsDl2Pt5xaEK;Er1h*B@~ktpUay=D~3=u5=E&)5wwa70EG`wadrW*AwPw=+L+x_0(x_t2%ru< zI}$7?P@&>7$tR<*RKdndk!xv<0w73{61IdwK>gA_RBlH8&c~e@gT1Jt$oCiGlf$lN zm4nTT8zRhl@rs_Qj>zb2aRFklrzYxsTKzqlI;78~9Y6|PL zwrFvw^gh)-iPMs)eO|RqQ^VrLVTEZNJBWHv%3Eu`P)S8SHSdxmo_ zUA3wCRdD!U79iZWyL_}B#BIc0a$`S zwEF;scKotWYu3#CT+4hCGFk;@j{KfGaH`3WSs#eLza6nL%h6p)tiaI4b${L4I+B$* z9Z3@xGYpJKBnxFso0lno=Xb^@7Z*tu6|q!qSy*dxSqY~|n*fncE?PA9=Lo5IL55gXDT~X~O+mXYF0V^Eragd@n{w@tTz!LJu2m-MZ z5cJQ$o{0z#N12cPbchcRT*D}Os|X6D6i;hL&`uD1tA?@`cI)8mWVO;~(2s1a!wH|eirO|0It(0wX(E!MCb_}tys27Qx6C+V+qe(h6c2IA@>Vm3x^X#tC{=t;JY_UIQH z>7SJ+eg5aacl2TlHF0w(4aJPv%RV?l>vJ32?VeF(Odr2>dFTkKUoq-_kSlh?!o$_@zI3#9l}uSR z1&YnphOwj(wJql2qtRJ0M)MA6GS?Y+_^rAA{l4K&QX}}%0z6iF1-Hgn+J&E#RK}M2Rlb$rJ27Cext_L1TE`4s zW_!dkv)K{rMv~iV@blipH5(nUr{-K1LWUAOZKfX9jm$~9<=r_V|DkbGZ?T?hOVuv;ETaB-i6cho_M;88+dGH1Ah%n853B+sRbe$l;p<6jGY>T4+t^q>* zhyWGTz>i1#IN;j+eOv0G{{6)F%CGH9XlA_=P7jsACWvEt z@Z?L_@ZMi$%hoU*#%T_X{!w1PzV1?_xaBx?M$VLY>CvstoQ`K{vdl6ydRE+Ln75at zU(~>_m9UldeT_FxD`}R`FQx|N<(Ub1qhym7$l^}zpB~jwk3+Y%KZ>;EG?0e-?zeL# zy)G~`Id`{Kg=?O(uO@J^qGwQTY@?>Gr91JrRad&PGCC&Q50z(%P`S>;r*!muy*Rjn zc$WBweSA~%Tlx#+xMTLpOGwryqOmF(VLjAgHMQ?Pct7T$YYou+BVP*O5?XdTB`5nBcm=^P;L!P&a zeQqXo+Mdoja$Y$xv_x?g&~Vn(mGB|ndhk$WzkGgGi|IqUdQ%}>^3069LmUj}Va|-V zybRr_1u8FlgNxnOxVP*$G_MUxYWv-LnyQb!X`R(r+j%k@y9(|)^DVEEJYphp)0pYY>mqyD;Ntx7=SuP~c;y(4OIwzTyML_v9po}3tU8;gW<;tR zsk^zt;OMD{taj(5E0XW?46Z2fO>NGr-C8WMF*TC9j#ZBui+&F~PHV8kD-0=h1yx}_ zUMxg4!Wr_9A3MgG|5bhzxfP5eu)78$60qF`UOCrgg=~lol;TxgZ(azJz@2V7vIfU~ zMTs7`t0eLb|IUI$kA;#=jM(@PPPoWvG1L!KpOC&N3efNK6N-?Lb`?P=@#$1?B7?Sm ztH1`o7?Ix;yqRJ{L0e0F19nW4BIM7IZn^$wm%V?d6Zhza9##bRD2Ka`Ec7rl+fJ(z zX&^210MA#raq!p!zSSF2k_OR&GVYQzf@It(=M%}Lgtq#a`ioYCEl`U1;;4SF5=#<2 zZ^lCs5VkKg)r)77TqX#IXedSd(Z7%OP;NFCvQjMe-#bN7*sWSUnSNb2q1Qd?q&ib7 z7d6LX+j5S!bm=)TR_snGvRABImKh(k+Imd&jqtHen=Rf`TwgBWOlX)nM2+rWN`v`@ z+2`<*CS|$%$a$Pr+Hg8OA*T&w)n2kn(K~zGMlT*-B_^2OtC5>KSCr=;Ze<|8HWbCq z5#w*S#}qeo=DZ}BcQhL%Zsy8hyx7MhyawDg-9*u2!oq1DwfM2E8wbA$RJ*XK*>BSjM zYZWC1T9Cr#9S1sdroLTnMq_Az-Mp=zHyV$ZS zS`Azd9@Fn%iWh6mT(*%~weo`})2^P`f;$e{ubK{KQ6oP zL)Zp+QKq7Ls4fDLG1XG}R*yZ!K$5+J=PI<#_EmURPc_5|T+tGDcFP{d_L{LKxO(Qz zwC+pC;MSH0E^`}RJZxD0lMYs@wC8JOU0};tC90>#&p&UvbqfUU^Z09z@CoVZal&Y` zw2#b}>a^V6Gjskf!!oJ|nMGk&!?eJ_a(s8znR~e1<dPk?JRz4 zRaNUl4msG{s&9BQ?lLz}9mW+p6kOf;gXBDq1_!^K73m)Ard>GOte=J@Qeaur_(XNg z6Fi&7=&^KEln>%%s(}b1g%yX}+@lsG6{N{dNEWrK&#e-9W3r=mWE2Zg76@TEd zZ=K%F)%>(#wp!7&Gybfh79T$QfI|19?EBy=A3tiF-?c=0pdV|FudQAB!|9c8MbhQC zVE*C3je^3_!GfPS8YvOaq0CDJ$H@ta-hXL{>}?L>U3F(?EoO z0_$=Chm}2Gr&gbfTPxQ8HG2@E|1-K=G5Hal>r(-{qk%im_Qx^epwt&!V8fk6U%^J+j5o`yH)T%e`3xk!mv!H5p;rIzSsa&6pO$1zQ^$TOJIo1NBX7+k&qFd4_!R?16^4#mB_>QAg>>d77f^+k<27yNyDSkvS;#_l;Dw12)Ao zg)r#%Q06&+8Z>v@qJlM*D&TjXc2S?tji&2baXyQ5U)6qE^TR@Q{Ty{v+%mgWo9#|t z>%_D3@fP%N`8gDv3SybnM<17=nT)Lcn>pVcAE6A6ik~#>$|3GjsxxWLs<2#|3qL*l z3YRX`o5sb3GGn9)6Rj)#i;xrWXXWw9!}g=Z&g|Cin8;$YuDOHC?usj=G-_X;V@_ zZDsZmPc&Ff_Dr_QDhIPapeL$LwCtO(0(_TjL|3BHUhwU~fns3KoU`9V<9x-QV>gcT zj!ccuJ_@7D*9Ap=+$nmd++wpusl88`3A}C&z&YrQfcRq4p!~b@s&m zH!4so@YQ&$)wjdpM)t%imm*ldPeh>b)yG~q6FR?pkk9aMzwfq0#lQDU0xNxekCfid zI3S*9Xby63*T`if%Ft`3yMH(@+de+@^YB7xxYsSvbZ&^bRE;I(;dnH!+G3c`RF~=4 zU%Yb=Q?Kqv~`kKR4IM4QGRGV!-n_kPv0cC(*Um?a--XO>=ngA zZ}m2TTVR~GQ-77-w9N8C;=6x01x9{Ry@P1uHB?QOtc81+(GY_6p@%q_ahY!h{kJR{jTW$$6CxsHPXr`suzO_>+`A>c_v!-p6k=M2&#AUSC zjn(g+>9oxtR`bT$FWF0_TlcOWO10%iI96iAbVeuh9ule%Q`h7`2A9L{+Wlz=3dt0;dD-!`0H=!@E zVC}QpS!cV$xOCCNaB;L35?fs&i&tbgQX3@)z4>7<5i;sR&L*~Vd_j_p-}1JA;pLWB zZElr|nRv1N8wmKobJ;K3$ZUGHeC|wGmrH15!}{3sy$f7tNw4S-i@l1uHMM@)`?$)i zZ*C$^J!iY)Np^*T)oN@eLMrMRwPIC*8OEJQ+jI7Fcvnko@~E z0OC_+wS!D68q0g%lQ5!yz7}WblLZwg$%#`8|r2h zrp3tKXyb51VV@(oxt}{Y{AZWD!x6bCU?#j=YpugnmYf6Sj)LW1sPd8HrktfBbP4!Y z-#3iR;<#(RZG;UJ|GEcDaWUD0m6joVKaPc51e8i9E`pt|pR(YOY6@R^TO?UUW zr7K6&Dr5DE%olP)9#5jgcpS}Vae1?Jd4}04eP%;mCvixk@SQbdHz%u~YGG z_D^pu7qtdt)P$tI+S?hll^eKlZ8kC6Y8pKBmL=NH_-mThs3vBcu2vtzS;ydRc3mwtf6(ThWYHgaKeeIxqqSNJ^l7}_ z>cfxS<9Lglk9evEVn&zuysPZIx#`X2`t&?22ww?*>@2oMx-70vn&dzEE%Blpbbf;)H6|Kva;$_W?<^Bb1j)gj^<@Iw9|^vw^32wp6jN^^o1VojQ6wkdZe@Kh7j1CEdEvfuDyHGOT8 zMTBli4u3}u&z#l^u19teR?a6qe=)MvK`T82uk9CV}^~<=O+Qzny<1Y z&UH_3^tw9OFqWadR;%&qDQfo2H&}!d+kK3$(6tnA!7`_NGPY_%MF3VAtBd4IQts=o z743fhOZr?Ty*gx*O54+nlu#O*2Z=Q8qp54H8CONq~p z$7=q?Q1YScB{Xk476X^s38}_u{#iG832v)T$GgjMYOoFIq2YP1t}53|b|dQ8_i_lp zUUbGg_Lx`n0k6Q5Rb+B{tZ8GimPUV`>jCl?gCX%RKMF&|th=Sh^NJ|_o@@U%z|6M& zzJ3%xeMf_P(e7Kbyx5SkG9jO|3qK9SP6z*%m4K^)iyW_G=3`J1&@Y6O-6C|`z{tin zGs$dagP`r8RpJkj<6j?bom&l37z%B?K@7WiM*G-2`2L+6=J2f;rBOj(80~NE}p2%izA(z_bW*X^S zF}~U7ujP~9W|0hclOfFkO08XoUy-?lmB?k*TPyf}nN!!dn_sJY6R2b-tD(ito!)uQ zSWbGe+Vp&h{$Gu~4-J$PoJD!tKeE0h6zPrZJZ@`t`$f!Qb@Hw?bc=Ske%RdaUkR9w zAK0lmm0Y}bix*F@-uu_xDCF5}E~_)yY+982nup)+f9!4hjF8hMm3)U_iygV%yH0Xf z&J;i4PksRZ_X$T`-AzDIM?iphR1g68zXZB&#>VzeHkL+zIduPTgN>IPkjdyHIsJE7 zqfs7*KPYkK7M^5PjyCh<9tMPw0qWug3b*BeVCF9AXVD089xe>Eg?v{edWc)e?%UVO zQ@Y5yp&EsEBDBUJam!1UB#}W^Il@CLZ9gysMyUapQ@v?F-ES(Y>dSSpwYfh#hCD_x zgjk^n)+0BfQ6cT(OZrmP!&QReF|9rB#E$AW%v2U?~t$#QGkl<<63A=yi;1ud3CuzigO`v}5gvRm%Kqiq}By??&9JFc|H9z$;E zEVS#Sc5F79rFJZ|+oX0ZFK`#x<#gv;{g%xIKRTA5JZ*ie96|-dpw~^@Ns;$PC+zVtu-s{Bd{u)h6eMQ@FILBl!CK=k(??tG=;xK;2bNE8A?;{l0p zB7(Zt6!B&7la?S127=)z=S9J`t|#yl^32=dG9H6VD>2?knXUJ80;ZOQ+8 zQBzIx4%=!#2=bouJt))!ZluG-JCMSl0OW~?8XUewy#oxa(NhN9s?V6te)J7h5%`UcGycXgQ=ms)VIE&ZYMh^>Uz{cE&=?M)*gqU( z{CnJ}-%sdHc{CK@OdrD-ajX9UOfASN++&=*sNHio2z&Zsf-c5YQ*s{lH(h^3$g1 zM8ciaTLc2nvYl6A^JGCnkY4Uo`Kb(PejuRmqW)wZ;HdGJk_r$8y`pVBf4J=3`^ji!WSJ><7x6fuU`-ow>09mi~rQnvWNR z1I4J{?H3L}8Wx3tLqbxB7jnb~><d zfFKkWlXzi3u)pPjkPl)Q0scRdL18dX6#W69;CO%Ye@%iXAt4|Lg~c>p004}J;{l-% z^yK(H6Z`^#(Qw!$6oA}683u-g zq>vC42IE9A`2RfY`V>1|AXqKgs)y{I9}&!+)$!Jp8{I#UuTPhtdB~==cwX zbxI)kXQ%?*Xo~p-&tApZV=&tUpIIQFprur^=9&fr!uM!@@&kMTV+Ml!e}M`? zF5k>r|Gy8PgMwP0v40v_d~k)+jq;sfc5&#fA<0`fX~1i`Yrq$9RN!G8?B{tTZ|HiS z1q_&!#J3ExgTe<%Vo)H!MA;^u1HrctG6g|^jtFvnzx|UJB}IR*O}GUEg23}W76t?X z_TMfJG~Uww_gvmWjCuM*bOh$8(q>2^(5N^Z5Dc(ig$N17a%j*m2%uPmOEDiGyp`_u zKdrzB`I{8{KgIn>_}}_mP5wV4_E(zuA1RoL0C%eLpm#>lcJ?6ep zj)nUFyUqLt6QM#PXE0zFE-%4Opw7sw3-FyQ7!V|-1p=isz~AP?Knwu|cSwP@HC-sK z3<8REPvuV2L7 zep1H1((7nzY9JN7Bj1i)2nOaxEQ)QzCe{U6b^Xcx*K>@%c(uwLYK*5# z^;NX`x^V?-D)?BYBvI5+tj2Y$^We?s{cAcmYbys;t9?wux>2#RaV%aA^~?zlMP1+5z3z!V-^UEYG_4lL-rdwFTFg`neBJj0 zZr7IQ{obG0jE}mS7Td@keeSc=53>nBX8sJm&*}nr*&WK*9kj7y)j4_1#JqZr9Ho+> zVZ6ehed{Nmxu>4tTdBY7 zW0!aHdwrdUw&S_lW7hW0qOC&xY~DJ<<&N^7r=fCFsVAMDAgIz zvxdZF-4(SNd@WDo>a&>a#CrPfq0U?imlR^^hEu-gA z^L+Q^ViPi}lDLkb^R|9`eT7$6-&1qD0;@RMgwrxM()w3i%zb1OmAGn=jX6)D8 zGnaYYi~jU~=|1avx}LZnVx{kx?__}2j(#qz8Yz5Q#hRCXzr{Hh@u5qP)Y1FnPQ`Y3 zeRR9vijO$EI@yev@BdV}(r_Q2*A#h9m{<1kz0kjSUSc{g`>U~xnRk{{;�DSuH`G z{I&m-*?~`uZuy2;H|xCAO)?q&s##m2b++^Io|boaUc(<#vr*+f(&u}m|Lm!^Ccn+g zte4|F=Y5s&LqF?1C!>DDhu?E(ezEi6};#=$I zwO0)u7S~gGQK3oO_JZr`d~M&+Ka1r(BOjOLJfJ)0)BQr%$6;|>GWR@p)AuUBw+U#^ z!)tH*mZRItdfWPOY`;d`(|YqnFPqM|Tl4#4(_{Tx7rxc*^P8Q0`*1Oq+zrcNyK2$T z^ABwgShvqmEHXde&APol`+2Xe_7h>*xVn6pOWr2qe&W{4h5ue*6Wyo7O`c5V+t5LozN_C*e?}kw@pX)N zrEeBI*K1)a=bvnci`uz9?w83eHjlgNk6N3x@28$RKevZj_0}EtI)CiNkJ6@{Os@CF z4YrG~N!TGf@AGT!Xa9=1=|J|c$8>Hx+)jh3q2A{OKlQqgJv?T-&)c!So~Pu+*|U?j z*Bfp0EB7W}ubgA#n^WI6)HFNSGscV9x3m@4kFCd5So)_9qwT@cfjL-~xFd4>`Zf5l zX%3(FXumAJKQ||`=2uHTTU-ZZ*PkEkX7IeXtKM@vo}cy4$A84S|J+{282&k^!*p59 z*4@Et94fIrVduMFhaB7Duw9CU*28|jc7MP5-66eL{Lf5K163Hbi8HCq*Z!Uu{5Dx)37!NZ962?H16atbMZ(>A1go%t)QHV>s;v!m- zsoL6z=a%MGir3scRa;%sWn)UM?aZTT`sb_H^CrvvCg&q8qHDu_mgMgYxO*Re--qu` z4}As4>f+hm^A=}GFwpnK`ITk~G<%vq8C z#n09m`_2CXT0o`09{S0y)D<@rzGZ#n#O&nf@80!v_U=&02S2;<&!4^Yj}E`T^*hfO zf0XskuYbEDC*|ebz)q@d+Q&Lz5ZVBit*vw z&i;Mf3)NqH?Q}`_7XuyEzo{50KH*>W+L~XU`E2i&zISWwL+xvRwBz1(=j1DqmtGuA zzx#!no1fYC!%wUaPW;Jd|2%o(u4gvgV*B99dtZI-s;fJyei^y*jc?awy!zC;*FHVH z>L0)R(Wcw>ztK4Umw)>1OxDdSb0QD^%Xc5WA+`CP`#-pK>fiG2eslJdve6^Ix$MK1 z)}5DD-g-^O)c)Ll=gHZ+lxM%?y6=t?2S1!F`|%?;ee{)qCF_4w9KLsr>tAnwA@AS~ zS@-<#^j%**QgG;wAK3fX{B&XUy3*?US9+&Ez4KS!{i_?xGFz^GqQCRT-+w*lj!4-v zD_p^e4cA_ObNF5R(4HIfI$!(6`X3cPduF8e;kn-jZoTux4_B@Cyfyys@4wx4y7<1~ z@4Q_4aq6d+KXv9Cp4s-0^QDz99&UL33HyPe>rTJZaq^R&-~LSQgY(rh*Id#0y>F_| zt&H4uE-x+RXCuA8IWapi*ZI>+zkRCu*}8Gx=8sRPtIB3>`ZOo#`0MWgMT9 zD^AYe5&Y<2U?}VA9e=j&N=ySO_-8y0<%8v-cU!OT(28=0U>^^7wtAY{JhnQ2O`TUy z2S@Y6&CS*AHcwrf&0XiI_qnifi?6xCQ*ZM%HQC%vo_fFp%(iaQGpXMI)Nn^%^;z3g z(%ewr;Hmc26;^xdTdV8q?qP;ku8h9@B%}8p#UI5fZw!6p?bZOM7MZk`wZ|xPdo&-^ z5Y5px14mk;dp7P`vT@?aD2e8COQ56(+BQbZ+mq~D41JL|@rayUf1ds|QI5;$mao4U zzWHDj%#FLF)N3opTz9fOUj?nSEP6{+uRPHDV*1|;zZ;?%x#pzTZSZ_`Y-egovac3< zVnl>gVuYmGfEm~1^K1D;V>etA9kWKaEBWeWw78=(YtbdKHfoa%Q7LNLkUSsrqy1{4 zEj;?pd@=peFKLSMg*~@ORvt!trqSVrx|Rg21<+*Etp>BewkW+en4>N>VvRnE#zDfRFHXn3(O-2*&AnM~Ys{;i9o0pxRHMii z_01^5b4#p`4N5n|pWc|~jao$G{1$k7^+P46o_p=*Urk$`l6r}N`L_$dKWkj|Usrtn z!?n--`u(@lu5w?-W)tK(#J6BhFXCdswkOzu8jfGpdvZ&-Q9}L#@qK-Qu!sYOUD{wKYwx9;D${m+qZKdi!MpG9`CKbk9{&7ue4AW}y;qIL-1sVQ%1_|)yw;2A@NCO!QItxktJLex zDPR0awOdrjD~j5Tlzrb4#jI>qQT=$h)_O_8cU=Fs&-pW!FFbyCy+!T2&v*wHc|8Qb zhxIb7r#j+q9(=?;=L`Awc+*`LH5bZU@b@K>{t>7P!Dj)+3Fv*TMf`7i@LV#LDZ}4O zh4Qxe;G7Ymd<@o+1>+BztY-qoT7bGsC3RUa7OqEDm&?q9^##JbI41TdI!6S$$IRv| z;@6uF?MI9G^&(&KOg6V&QD=5q)HKOB-Mbal(WJ>BRj3GS=))MLpXhmuJQ{t#7Jcm)&kryAEq*HoeT+U{QznXk|DCo=-0|^S}Ex^S>9W z`EmV?7B#b0QEjc3=()5i&@tJfwNDr5h_uV;p6J%vJM_B1E@L-EEg^h|@0P-E&!pqpU74!xE&aKzxDSyzQGh||~ z)(3dLE^N1`HjkptZqeF{au|iVMr-8uS=nJxv$yf%vARKJE|Bq%u9Ni7MEG}D)SM2! zofBm1U(4%Fu!y%yyPNOh-eXZ0pv>QHiOHI!e`Fs|w*&0!oYR;C?asPI`#lVN%(|bK zOFr1DbA4jD%xz077yA~u&L~ItwdJ+(HbSytX|)IW^$;J!KDiw^H}iHe445!{gxAX; zV8K`w@UT&iJ!q%ITYO0F*iY6T$HMPaM{}lk$aT`@S5*5RIaU?y;(27jxYN8Ke z-Hc1qOOXeqb-a$S^OCL-{<3^kcJT9{-jj~gJhc9T?Q*-zs^{CYI7al4?B}YCHCHvu z_1ER(X$-q~d7K4I43+WYvp7lXC7FwOf^}%;GW#YBB?4H?h=5GWcky`E1(-GEON?jJ z{=|63u8r1_x=l`>3+#TdNUn3luemKezewLgCVRcQj>mwWO+fR0xldK;O?0D_m$4hh4S+77&dIP7_3(m;Z`Ret+dgX_WWVHB zv2^zUM)-I0?O1H1@rXD0U6r&W`LH-d<8^@@F6meuf<5{9G*0(j?>3L%2!5 z(LP3lyk2Jq1YeWEFX~hEF!+=n4YQG_!5WRHC=(U-rl3=k1v@U<()1yI-yj`J-6h}4 z2DkHkSpp186ZDVWqsT6U4ZK~jvY@e8y`^UiFxcP9`-t6Mz75$JVUkEQ*>ElFK`9ly zo>t;Mw`&iUNuS`0qyp~)P4Iy= z4z2d{*Ks$z#eb$N#KKtv#ksQ0`gPhX0kO$o(&}8N#v)9yu*l zf^W;(F0I`u=qGDmRE9Aq#hcWb)|J~JpF1SzIN3L?jl~06TTeZ|M-kn$XV2}B>uc1P z3mSa~+vIwZ5Bt40Ag>Em@;FB}LiDDa$bvACur+O`T;~+PnVxN04n&N>7TbB=60fSt zczzIVVcI0dU1N{*vi`s8$-|<3Ckh@dG2vc*e6HU8TQWN1KB&_tcV?m zvxQaWK0H3Ge`C|jXirC8N@IggevPCv=tIbG5d8|&7ny#e4$XnCdW(2FoPoWwdu?Jn zw5>~QhndZC9hwmIkoCt&kH}V28s++3?lfd#lgOkH8UUxcl{7MnVPhs|Ztjhtt z%{9q?E_Co1;YWU&__t$je(jW7!SCebjc?~|Z14dd4@N;R0u8(lu-HaAK(=UKYv3Wy zC({wX;5$3q{G2ovTZX<1`6)KNOy={GPk^>4(`T|zn%j4X_fKdK?RJ~^rT6pvbAo*%&gszhXqlhM#+Ot{z9;1^MVbhk z=wW$N4s*?KU8+jR$O`iA3_uTMizCCN# z^pIa#*lEm<`XbW_(jn46;vL094u!XKjE4gIjB!z>GTTHs51Hhg{rh>|v3f!BVsn8G zq0H-%^Ck!I$rg~$IaR}tMVK4fZLEcIZ*gbTLCpuIc5{`+_i2-}A63VA-zJd5e%`LE z&_C~RV!N_>lh4zX!@5HzXYYi;n8QFn#szwjf#+GflGAt63t5asa-;Lh429PfJioM= zr2GTR9~NI? z_XPCwP4vB_{$49@%dF1R++z>%ZHVViGkoac^@Dsyietp~SSOyDP~SX(F6Ra<&$g^T zf5_y_U5}v&TSQu-xfT;Y;2g+q4E;}-qZVYf6fnlO3A`C>l<&!Db-WG{PEMNOi8ElF zO~=&%CdLJm^q7pd*MKDuD9 z#CB557x6)l83OI`{tFwo&|GN;jrdNam5uM%u_>MrX~B5L>XyrO4AxKpw&$u$4g-m1 z)?XsOj^!EQ1Ibp@Rg61ie8X6T?iYyb#`^^<4eXf~J#5@bbg{X5;t%Cv#N1tUE|VA^ z{pkXe7(+aZX3w?yz;B=3%j0r7@O{CX7?*34z-4-5{^XJn+p}lvBp)^oA-=L_I5xeE z@QUsuiL#;lNVI<9`NTHup4`QpU8xPmI*=xZfG?~(@j-iutk?YJSZ>{GV&^`7EH*Zy zv4(}aIh4=B95svO^o4RJqbB=`m>XeZe3I25@YZRP^F@54SX0akQLKp%>TGQ@FE@4{ zrG}JKb%1O#_waf-1-LWQlvpptyC@y59u`>5f)At(RvArxNp)p7Xw1-f8kq&C_fO!^$ zd}b=}EK;3VZ^SbTv-kwzgcM6)tRtI?;-A=k zA?EG`jlt%vVrQoM9xcXXScmyRna;8~Ly->58M6GzD&u7*p0(q{`p;BB^ppQaXFG9m zMJ+#{W4CekAkvLz4?&l97QCDd&7174l-*w}M{a-a3gaG!SeK&T z3yj@eX2z#=kAwX#waIbTQD)qK7x{TD#pZOS*oOr8q3Z( zSbZmc&6gPXEz*eiP49?^b@Yx1VLQ=3VG^(7`zRs5$$gtPXE$n#JpUBCi_P2-QH8#kZzY^c^~!ndgt2iSlIf4l z!|S9Q#w=VLk3$%P%VbM&d5mV}ehgKR9e11DheIq8<8!QAGV%2sWxUOVpt6||jxfR-GFLR$<%+Dn{S7!ZHvJ+OnG9+dApw9J_ z@H!{jZuAa}fYYpROAp1l;`>%qAB)B6cw9|=n8!+%zp?vM%o!%(qP5Amemw{WjF zOTcdR2tTLnofS53Kzx?@Z$xjzWdEddHk)2Y*fDByzemjTk!`z}v!6i|ti(L?poxuR zE~fB^TsCC4>^)?XPsGXdO2z{^gP{0IJiEjAN|sZh^ZUt-@>~z=N045#vLc?cvWlIH zF!?v4oz?%CJea+n@Q?N3$S!2>Hju3l_KC72?4vQncXRNGy$j-%-$il?aV5L1SiGR? z6rJ~obf8a#dL+Yl+Baf(%so7RWPAC9a94T}{NVz?r}<4h&gH;fHNJD1c=JeNoSQeX z6T5Ie#l9P970U9NJKC8hXTx|eZPw)e_GmYcJ$37gY2CIvO~AYnK;&E znZ)Px?hB1i`)Mvl%Q5>5{rYzd_Gs5_4;IBei2gpE-V8j-$8*FxH}HH==iQaUSeZI+N@4wf z5Z;)qAMGnEXi=}0)P;Ht`o%h=pZLwLtw`s0j7RXCeVo@(IvdFAYOdmIB{E~!5cD$vdhMtS>@Qfel=R!M0o5b&g^E5wU3~Q${ zj2Q%7vh6jAwSl$}V2M>yX9b-)Yl2NZA!Dz@`Flj`>4TJaUSGrInXi#_EHYx zlTACJj~9554z%V$o3WF6EYfN6Z^$4Y2lV)EU%c*N8?>Ft{^aM&>E~sISZfC!&-ZBM zvV1(rIB&mZPjT1{b9ckoj+612mZkH&>R|l4irBZH*RT~)u4F4nC;m%pQ;+Jt70maC z!n%+y|8#-2C4rCG@Vf)%$v>W$7vWxBm*ei>GB$a=5^s|;y3UZ_BH|1BEkqOkzcmr) zL%bK`gMwb8EzVWYV_@7>N<7B(={!-pEYbf?^*LJRP)8oIRk?ak(I~$Wdb*_ zh;@Vsx9@Ib%L;8 ztV8T4zx@iK4*m98llTgKm}wJ#5OE)M`+6DeeaY{u7(>bSb&RWMt#RjxA-SxuUbYt2 zA0xS60Qw>(@$Td?qb=fw{g`3DVq4aJqyJ{rDMJo%dnMFOp*_*QrTZpgJ8Ou?5Y#Qy zBZ{X*8Ze$d+r#gL;vSBA?+=5_o`g8?X)P~N7}oL<^QX1k#DyPd`7fF`_AcNtiyAq@ z@AFw__}q&Z+Sp;-@RJueM{KToi@Nv}Uru9=1?4)D0%HbXywX8MJ@fTFU5MGO!i2!sre=W7TfgiRKK6wom)?|qGl*lKhVY3=u)s=nP-)zun! zpOx=ab)9?ef6sQ8TGEZOW0ev9hq_uh<}O>UyhdQUzPRez+7H%8yD1wm$Gq3|`U5s< z|F730z_J7Di@$06t!1q$maCLLT)bBHZdU}1F>92uVmUJE@7nT{)+yUG!oK(jvgK=( z>kx`R#adp!dX>`ugr%;>c;*P-pULu#VR|5&at--A;5u`i#b-lNg#S<5sI0?sQ?6_2 zdZqmglRmRPxf1q!-c!oeBAy*dX|_v($;m-mz}EH~KJ zhIluvQR>5Tfb~zc&ul8aSbviHLypUMTE0EFF4Py~{88(bbvsa(LmR~r@|ebhwg_Vv z(}Q7;2M)zh>;8Z`uGA;TsB)df%N0QviqWps*Ecq5`E3AnEw55tFUazZ@mPFMzI%Q7 z!EF6_{q@zC>&bX5kEu*^{HapL-m4lfZP#cEtCs+rLDx7xv2}$-l>_qpW_HPQIrx2i z8vR`!C$_Qv)e*rbl;<7Sm_N+!*eYdP^895RjfFJsc{W^Mj><8E>lWF(!Q@!%Ya`5S z2DF}7dA`y{YmJuGN*Q^*;P4 z_^ES6zNTZFx^3FzdB(Op%%>?EmA;ne8RN<4i(ubTwaMn_st9^hKIcXFzUdUzJSm?` zg7ZLNT_)E{zE)tHe7=hc_$jtJ^nD5HdpPuN`?ap4SsN^0!oI&`wKDE)I3E-tPnlm? zJmonNoX1uw?Uwr|_#GjEb%S{m?ARpz?;Y7vvPrTGKDDGJ2J!co)jAE@6!ITk zx3yF&eN6R8#`|~ec-l6F^cTM`9rf8WN%eYM{Y-Kyu#KDHFOX+b_%mZ_@6skE??u$9 z-=vf$p60s*vbv;OwF--#ozru2(nHHO(oL>QLXEP1$#R3G2)upUF;_x1u@>_+j)S*^ zp0yvu=lE^PYiJvIleQucQMVkQt5AQ^b_u(Bnf7|Z3k)yx?4eOMe#^DBulDhZ8oFyDyHB;ZS+fcBNi+_#u|C-_X8?UA09i zPre~PxL(taWye;f-up;5(D*=1$E#vX8(~qsG0U{CMNHe2HneRI_q`mCWIU)-4!Oet zeVFgm*yOv|fKQkY)G}2vd)5Ax+bi1gy1~-EJrqlNmT23o>JRWC#l^Ec!gq?nN5!v6 zc@j3Z2J(cxi^1YozAa=2+i0xG+ZM_VbvvX(%GS_xC+cTf_7~MC$3@mAlO;Xvsu&Hf z3E8K<-%a*)wf}5@YzpjZ-=fST^?Bk_Z5t=k81S05U5jh3tuI%pY!u)3-=?h}<$+e4 zlpBWwGAzbw8HzE}S$(f+v8w+hpq&9|n~s{#;I@SNl{%NGUr{W3Y1E5V{jLm ziyyPm@+c#|kzS>5YCq#K?F{Ke&z%!heTLb!Rj(e59+;LBK)AFNY; z_X=$^cem83uC=Itf=5TK^)Z#nv8KMM=X$Tvb7MhH&{ohVpk7saZZyaNdJiOkdal)T z_kret)`HG}qSxuU`$2O+!`JJ%OwiMy6`tDyMpdhQRP37~nPS3z~4 zGoTx4feTs+`U=!{2XH}0K~eAPxqkx{fT}?kK<1r#E*rE0bQaWYm!6vfdL6VKbPRME z)O$B{0sR$J1Ns4UZ=IgY0og(8L7#)-_UO4sK+l4ff+kEHGqEG}>p2;Nu2`2{>3;5y zzw5V=@~P9a@qTB!lFGG||Q1V2%LC^tyS6XgbjSJWT8*|X*pEx_1uH~PbE zIGjkFL)_Kz!Qyt5c%nU^`ah6&3QU)(j$q5g)nZgn)ejO zDDW(D-a?Eg;Pt-`cryb|NiuNuWSj?1?K2EqtTAv7cP^f7;06s3o-d&GYu_<&9puO9 z3&eA-%D{Qa-*ZGE@uxw(YYmdGl9Fc3f|J|nJR~!WWIEOv#Q7z_OBVBSy#i}E@q8N$ zoTo>ScaPX+twHkf`~~x8Bqj|N$3^g}3_;#7?iV&2oNYN2HF=G}GIfT*;#qlC$K^I0 z3vP3{xty&SrT)0VRR)XgB*Z<>g@imQALuw+3vs8Zs)}0<-?_+dzlrabox>L!`4j#) z`b9|pxdQ%BVZ)+El<2ZE`C^l-q#=uM%S?7vSbY;;5f+`c-cGDu4-IVNV?Y78O63ttEQyLri!=9;d+p$c4+$hKq7H>6WJ5Tt%G5d>+k2+#Y zOL}n`_HLeU3w2y;lxGVLx%`I(`3H;~A4h(G%7?}oY2rcSy?A~01U^ZrI7CUuiLj}2 zu78U2FGkLOTWX=v-j5$=%-~J@GctRCMULigAZ%D{;dPk`SMc4OE)%qif7lUyh(uld zANdoW2}g8Xwr3>NC@g?s&Q#7G%j=vaChkQu-#yti3RTifqZ*ut{SD9~xiK}yK7;R; zI^OhR9^Y*uFr7#J66`n$Haqn(qdfuI`hQVT(Fri!!uOcKKck3Q!<0g5udty$Y7|#G z;_zRl+ozOu_C!~=KU&slrej)JXKy+VFYDAn#y+sDQ%}c!Wt|2(-cZ(Q#L+SN@~02z zxXUYPmqWXoXqRrqZu}VR47B?hys}P{PFHFlSJrtW9q%pcoTM|B+J9fx8O3!gwI_g2 z$KGX~IdqK9^o%DE=VDcXDBZkdD>wlgR=P}Im!Ntmwc7a=w~5|_UmVj zIOO>L?h~ZMsm@GvtKonuEPxhKq8otHmrzuj)SnJ<#YqNBK5S1`Ibg1_9!oNdXVfJf zM-6)SUb>sJpkdEbu$}i-IJ`OFO1+c+(DVI89oL}5&*ZbrJ9HCmN${D+dllV~#)90K zW)ftGdYaGY*#YJN~nl zep%uAh42Ru{-LXKeDq6&Ls)xB!S+T5+RN8$?|&}*LVJ^_y_~CB_ME)H@Jj;tKmDYI zzbG7^PdD!+Ur)^On5j;_sodpwXm2sO|EiA?)-kg31*1614c?nU>@Ott9U?EDf6VB2 zSP->;3H=*y|46KaP{oQ#*&4%JEtL-&E8;z=&^*~yI3@ppnNzfHs(jGs>5ozxgv0WU z?9YMyo<3xG$0BO$Sqw{_q<_)~+3!bi4;2A62&HJ=B{Ki~od zf%7`m`5yEHO#oJ|l;_zg8{c*$@X>s>(X$l_CPx?KtLHOT{$Cp97(WG_aOJ|~Pg;IB zIA$T{`}_u8Cu=X= z4_IVIXDgQfIVjM3Hr~o{xiNbVlWwUdl8sr`fC~u=ACse){aM-zjcQB zXMx;5y}ncV=h(Mh@{hYe{DZ)z=zh&NvyMozn1vkbyTw9@Tsu)GJ8&<$W}ut-CGs1=gqJRu*X66bl0}0>>HIm zBfsgAJ)8Soi#aXV8xnq)T15PUPEs69mCl%ga9n$|_A8F_>kp6Q{YU=;#*DC#2e%&Sg zXLniuH;zmC=Sivsin_V#J(lD%vDi1xbkJ-Keq zZS{23-n&i(#{Z*{$A1*rmWkv4+p_ULtODYBhV1KpOmloGYgLc`tzEM0jy_$TkADc* z;yc2|NAddIa#UQ;?)73-T$;i;qW1dmNxY(` zAH9yQ{~(Hjn7Xu>4D4>C-3_!e)2=7&deE*r?MxXld(xt5XUy;qt+ZCzOhd3(ADH2} z>lKAB%F3)NQf>KHl4>iV+RC76Lj$S>$A6sLR*K{Q3fcJIsJ$K+q8@pOvj`Sp zvUOvv{^uKA1B9_|b7 zOYV!p0n|MzbqZX=KZ5L6SE=e#qyDu{BjBY_h%=a}tnP+Uf)>FlB;x%yPKw& zngy})!rKMZuv%D=rD>X(6`5*rfkR##=8{b#%o6br8F}j2~gt2CV zjc+~x*i&-Y{h~^MJ<{ZsGNvzTHj=ADuos#yx0>7+$zOmwBHStz1gt3(3d{wH`@`@Z zT~fmkA-|nM*v=YE5bH~QBHIWMsjFn0q2T2_WP?1!G5lDa;Jt2TMSO0iwR;=R&k}^T6BEu90Cd0EhZb?l~aNPcXq?`?W6Gi&RQz#H2 zKotC2wMsl97DcTTg|i@yHeNY-X{9agABxpOsen?n)v8p`5K9P=7kLv(DfET%ru9TE z2$H@)U(y2J;gLu-6an4ZdKQkC0+rnJ%xuz0vT3_HKQ7swd1mH2GtX~!XJ@uXts+tz zy0Gt}zF-@v4S5@e+GXX4r|ba_u9H+57ktK_Y^5x5T4rBewn~=yNNG^>2t3PB6HSL0 z+u@hUy4@sC7gE&o=&Gj_;Zm-gyN*s18 z$ykxfX~MaLMouH=G+~@Z$t75A71nxNxz!RE=V;nS#RYK*Jf}TQV(T51j#@`Kmr%xqs`-aWYsjHB zf^p!7hh&=>3bxmBT1Y9k*V-(OGhB+ z+b3a>HnY_Nc9n2tI}Bx>wOb$qFta$$ahh}+Z_g})QFc-(cPIXoaLID~JhB^wo0y{HaJqCIhZcWf?O%SdG55ZnZFLYH0=*NghJJ{K z1ZL|otOEqgZP2H-D#uaBd1$T~Y86mhpqDYL7~)6puiRE)pQu(sPe!YwpkwPCXGn;g zn8iYOl1r8CF#H?hXd;*ufElmOf>wkungMacXmu=fO)cqjjBjvQAS?_}>!7c}N=TWG z?x#sbR}2D!B=A@Z)T!1{MNFwH#;eoN7q_;%e6i0+bJN2xzH&iO-)OZQ9ZyA0l>lB3 z9tpX+YEe$wD_R|5uY;0^kDswsL4b-@63$}mM#l1 zWuR%n`Nv>4U|~sAf;iNeg~q6dwu)A#!dWe<1lz?z8qR_a24~^aqo7jBG32s=bSkz_ zG`2w&=%bRx$C7fOn$V@CLBt>&hX$H?lHi~X;KbGj@RwjUWZi`7OeJn`l$2fz9dZW! z+zd&>4jL1n9ec5Hw;L7nXrqTmq|22iYR({UM9F(wYjQ5`H@V__TB zEGW4SG>er>;RU$@o?C?}t&kM^0Ef;of-9z2apVFUa?vP=6vuQr+{1_(11g)u;~>-q z8mz)$2Ut__tP%fdg-{$;9p#8zj)tum0AnBw@pf#dpKT3LJBYL3*wskKE@)o+Ee@~g zT|FJiV1~h$!{`+khdR9T8qwjXAZ$^u`P3YF*?64aAKy>r_xwfL(!KfI{bcoR&M1EP zlQ`jS!PgwI{ERz-;8{We*Tx$1=3@GfPA5#qVMdh3H}+7*@(cF`$vk*C{cdc1?O@QL zemObCCoaGwha_@#ag{x+b=2@-;5m+%bjlH{EJfMA&G zHnh79+ueprPG1EVAZUFd{*qD4ZaRnO6`UUT)`54{kr$wuP^l7q`xxFi+yp9#64>rKc_y47@oogZv;l%;LFxPEpcN?|~hT#Hr zy+B<@+m29!UTD?EaN76|+KDXf43>75Cv8-BX=A!e8`~Y)Bu<;rK|7D7UC7YtmtcV& zz^UB@PU|jkdgs6n0-PnRcjC*@UQsyd66QI>xJKcb5N0iO0-kXjE-u${Auvra#K7c3 ziSH7c9=RvTcYg}zCuH!DXU3tf{4^Q28Yd0b*Jj+RyB3t8)LrY5(GRja9U3HqnGuig zM6o3L{sykdz!?W8rUYl=^JEX0irMjmo3xf`on({N8&+Yf7qM;^tks^@y3T-g zJj|c@U{UiCUMw!_pHGTehx@TEb6HdKHo<(9r#Z~U+KdS_#Yf^P#`sSWFG@7li^wJr z4HM5l%F7&{g^Bsqe~D!t?XN9sJj`AF8sn#5H=^}aqive;x^YnC^^8Xn={{P|ZM4Qg zWTR$}=RMv@8zOO+>U82niN*_spyGI5UVJKR^B=)AopAK$dk^h`qRl`botO4eaW zYF3s$rKghaU-s(P?DxCZuU^T{yOYIE~G#bGqUMNVsMj8{CJVs4si088HV=x&&O%{rq^jrxh z%`)0_;_WhP6`0ge6NNa-;niSLL`_DB7c>l>F`7Q4Cb=CZw=yOVr=syH4L|#qPfa%-R)C;$#qHPr%Nh7#jE^u-}1wx zm7gK4{=2;DzstA&*CdsnC9VEm<){0WA11B*h0^N3)2seFee2)mzx+Qz`S*I1{{_Aq zm4xvBJ4Mvt;z{-836aLLL@H0+j!t-v@9^PSdQ}q7QmH&ocE%Is%hPs663=p}JUdG| z;VCWg;YpLmvr;P0U7hi$e0i*wCGo70%Cl^HCp;^+`|!j_5M16*mwMs#*;6V=f|Q>cp8g*csBe;5>KI2p1*d+6YI+(m&Q{fm1lopCp_jt zAD*m>l6ZDY<$16(o^W3t{(>Z)QmH%}3q*LzLSxAM*Ucv{8&!b%FF-!y4MTP-nZN%5 zgu1rD19Lc-!#LIY!W-~huT?CQ^9gd`z#W0OH7})QX-az&^t}usUop!0N>^-iqYyIg z7FWno+(vv0U*--tNCP7Jx&n4X8uAGbzpF29C~Ql{&~@(6`}=i-E{4!Jo6Huf=JWbt z1{m5j8^5xIh&WD$O9z7$?gUK}X%sT8PzWV9ji!N6uRi@F%aVJha(b>c)@Oi@H+;MFW|fmjM_u z4mM=BdrzT~n+MI2@!!bF3QpO`cA2u$nB4+s2lD|o@jGF%%<;w) zg-N3z-~7fJA~9R$#Z|JQC&_rGkg>5N<5~Hn&U|Gb?^oUAzXW3oe)gVCKN*Ett*rvP zO1{KAP@e+|rJs#fRgmvP-`O99S6|bsxu%&a-Fv~Zi~;l`(k(PNet&xu$!P_-&ggG? zHu$Q;Xu12!(AMTB_v70gK7&T}GCiwkK1=er!}wgV@gxin=!I ziSJiy?c-HjKdaU@4lns3yjpv%aM7@+YAvt(_T=xRNB5^Fx1iI+O8j zW3UVyYXdr#Jx_3~Xjx_|^gRyb?S>)73E_O`cdYb+cm>H76{o+FG01zfv{Bn)=tDg3 zTrcW*y`1QIAm&Q`*Ui-PCiG%F4-mdWgk%O`4)}J26Z<#2J*0i3 z4|@-x{|?kwU!zUeLv_a*f zbq9@YK29|_(jBC1KDd7|1`Tls$(whIiV72fs6rtG(oKuyZ{Bb^QC#6vTxkbA#l`3= zoPX|M7y1eZeLZuqLtlyxeHr6i`kI)7`g-C(r}~N#^kp7>c6Kq*mznCT>e~v{V{?M5 zwKWIZH~qC*TfZ=-Y7^*?>gxp6mzC;z?}auOB2EEP&8bZ<0n$j@ zr73{4Zl7r#ARR8}li-QRq{XMGSd-Qso0blR@n5_hB-5L-#wI#5Tlx91?t3l%pVC4e zmnPv8i~Dboc;4*hB7?DpzYp5F-o^6H#=uzaXIYXYvz-2vvaJ1nYEREYH66f$+@s;W;Cn=Wc=L`!a@S zctAWS-=sV?tvn8%_YtUOibsESgU7gz@@QC|PuB&)^B2k!&5;lKDNw2O7P_{U5`F#+ zL)2$oVBY-Y8;mz^o+8zoUlUk{vMled4UFXhmgOVKEN9kGmi1pVEWfV_jAhdT_O2kA zB|%{6$Flrw4NjJ&lH5v3V#o$>Ieuo-O6GP`1kV}VeX2vBP^!$Y7^;R(0`cYd(!I4l zPPzb`Kx4lrKPvE?-^=if3y7zAzBkYP(s{P5raTEO&z{wR@J#pN=`WpUpultZONQs3 zfOssgd-GhLESYD~D$4UX%VSyNSSoTY1(F!a#N*R`^0kSyfGAv(8 zX4&^KWtq>iRDT>8%bP6AZpkbU3oK_pXILTwWcg(d!%`%fWvhv@Ok-I-HwDHron^_B z%yPTHV)=|=85$tVu~!+EO_Es*%PGrvmSz3&z*wGPSvE*!`NK$AzTCsG^b3&Xn>2=H ztz?!30?Pv|%OYc7EDy0Pt0l9XUq)Fr?Pgg1vn(){EwdSx6_Qz!1(ty<%j^JI2C*#5 zC9^abD2r|v!*a?H7)#D842wZBi$-9%x|3m<5+KWsm)W_3WR|a%QkJ{`<1(wr07?#livYeg8uq>9$vN?ycOl4VigN*C^_*iRp6fY>O8lLIFhcfeA%qQcf7A3srpU&`pPda`L3_*4Hx};pmXG9 zib7my(f`lu74~d@>lNEuWMsYK>5w4Tdc~e4biG0WQJt<&4BsxYJ~4PZvp#{Fq4^%$ z{jN{^QtZ1v@y}wv>l0Y=2-M-OWYSUr$F{IG!4!&nTOU(l5vFp-BIzVp;W+14)f zaO`|9m4_Y{JoxxiL=Stu{q-);Lyy?pd%DmAw@v*D^xaimZXER8=7evDK;ONbs+}v| zcg7SkJ&Z_O+0Fh--X@}l=xu@?=3$FJu}xGDL$-P9Vc<5udbqT;D?RktCawo2_)<=31qAXNx6Jq#_)^Z^@2`scQggES{VIH9_w>0hPeh-Ed4fLec%xsJC#ugy zd7k>5pXXPf@p)b8b9SD%K8tcY)8|7wdi3?s=d#mt#}R$*nf!YgwCO+2{yIRqKG!Gt z>T_u~+w;}UB6^75%;*8XG2@@!>{kyDZ1&Z|pv`{u(3b0?hx56t9!MYlBbU{~2(&ts z+m#+LQD^rn`KB)PF#5r-pZ3thGavt#xib%o;@H|axS|q7Ma6*8L{wB%HU$J_6U4|W z3Yeg{p`#MkVbeiC9ARh{K?G4;qPQiliQCmA8ZjJAFnP-^l$Gb3=bunbk}dNE!S+q@({C$$wR~@ULGcI65}Cs6P}0l z&g^DszZX~#ESdBgE)YKrXf$~S>; zyqa>K8n3nweLY_JB~#S2_RuuX67EuX2=6 zvQOETqkMh=4;)l3#wR>TXuMLCiSYS!R<1XbPsOj}NInm`We)*-T6%rDS%l9xTf1n` zmqw16IScfq?6p}@pf4po4nBtOOU}Xg@k)X3Pu%*)23-B4jONn?w&gEn`1(f~tNu~O zU;ii*t$&p9)IZh>@oBRD=f?~_$9UeL?yt68%2*28bYj2N-mR#A1abL1Be6aIRf^%E zvJ{;U3EQ-|l$VF(QZ5e*N_lzkDdq4mt`y~g^zgk>l!qqxR?ku~9^gSj<5gIR2oEKH zOtN6`P?~jwnxCj%wZk3oki7nUjR+6+osKyI?^HJ@O^5eaMi%h;&C}gD0q@T>6 zXD`ja7bO@T>PmF=K-l6pORzkgE@AR;u!NU~k`gf_;vZaELP{JGa};rR9hUn?ycm72)b1MKqrkWc{NEU;ij#)jx{(>mO@H>mNlt^^Za!K0Acl z_GR#CeqHTI@>v?Vu@mS$Y`p4Ke^^YjJ z{&8Kb{!xgle-xtiNjP{-FXXR(6k_Thh5Yr8LT>$|0OgbHQx6JIK7GmhM}Zih@F2nZ zN4^N3S?`ZEX7KsRq^oqj-pvHOR9`t#^s@+`!N2QY1AS>n?I>8U-{@Wm$Inp>-a~lm zA3k_K-Ns40{>jJHKk{ilePQ#K=Hu%h`KyW}gt!&d7xl_ETR`aw6a zOI=$1s!-rv;r@QzfOq${PuL0oM^^aW9 z`bREL{Ub++hcA3LF?q1*d&7q0;piCEP|&6!o4d?!Mg3zkmj}1;632fHhKI=-gokkQ z{$In(Lw^mIhmIOv9<(_e9&Y3?c!2rN=Ab;>g$EwY5#s?KB*eq>H6lDLK3;U2!NW_Z zJFe94zlMca03H&0Z!Hz!VbQQ(wZOY-A6Hm!IH?}C0C+cbNytN<-+y`Fco?n7kmz4| zHin1Q*)$K~u;G%ju{=a%GkFNe=HD4=f!Nc4iu2Shzy7W2o9nsF@Oo~$$yZ46h|>gje{uH0>%iFRzQ$Twa6Kyu3Q9IlK;4qr8%RthXBF zwF&OkyjqM`co2ivcbL5TWQp+l?eT*y3|?zYzNE&l%B^AG{=9m(&!e|Rcr8$i?Pi_5Vz6{Xdg29>cwCGf}=> z;PvUCOfkOUK@7ehH!_7Rxo zJfsas*blsWX|)U17d7j)?gQSr{d2qs@7)R;9v&q6H#G^vgME^2e1L5^ED6g)&m<-f z&567`)Fq1X@K++9hjB_F9t;c0Y8gCKH{YP{&vsg^+yL4%;K7w)E$3mlHJb-Vg{8B^ z@Bft;9_o~6y@ITND|vZ1uH^EtL&?iSj*`PeqLMK_Ko93DQ69qKUI9um9^gSjJZy*; z;lZkRpT8MAL|q>0MDmdJKm+UTM|O2^65+wb*2W3e+ru}*`z9D#p*fz(!;^Sk9s6@8F*J!`zM@#dxH46Gw`lHA#nxXyAMaaD55z$%#22PAU*VmMtM*{ z9>zwC@c<7J;vpqUgohm?ZqH}%V6;(7%^%8plC1(fR9g04F2X~I$K+h#-MpI{=K$|^ zkGlO7cz3IGG|znWX_j~%G8q05z0}_;`)0kXM|22bt`#+>>NgMv>|l2W>|&zP$Lv9^ z`V+!lTLi9G)nPXLHSwcm@R!Z>GY-Aprc^FYy6nQpD+_M z8;O@7X&-X^Ds)w=nT_OAdEwGl-KVpk%6oPy^%Le4H0jl;yh#V^k`9)U)Q?Wu5l#9b ze^LuwQVSVLZ~IV53zi~DfAQf>YN<%tjVQj{H=9)t!3m*qVrmzc~=YOwb$jfmy!1$Zz}H*1(Mg0llKQN!yv*MKJyPG zcGM+yl#y6XCw4{?uk&Un{*g)yAMQu^y9DYoyU57ggUP=4Nox!Znoa_y&^3WV zhKOmj2z^waf6YTg1V|U5)QM2a5b=d8 zB_bXb@o!hjB2siBQe=p5phY~Ii(daq7opOLP{|PS*(6GY0u^y)l2j4LGW5`eRb&Vm zK?}Jbfe3MsE@X9E3E4K05)y(6IWSSOkes#>(w`P`Wey@_sB|HDZ6&0@g%UCu6;k0M zS;*S964IF#a%wgrq>pqVC2b`{kknjPn;wRscb7DVYCnyS=iHdb1!KCR zF=ygpR$^ja6O37JkHpl+#SFy6Otuw@S%${kZ;y%D^cgp%nVnEfPc-H%T+BU~m=1z5 zpV(3BPX)ckcuXj<(j3GcN_d+D+tt8T{)onA`>oVEZl2Ef;(v#$2vLlG7?HKeL@#TR zx!!qiQ@uHtUduaahmqF}BhNrkUZ751P$-dRkdY4bc@R^~HD1qwE=MAORCq}_jCc`3 z7;XUn>%)2Srq^>%=lk(Lfy3tn(xS5#1I>$p<|(Fn5pOhi!D*g(n5)@WP;-SXV$)}~ zn3(#Qn2mxlebAV5a548E;>Iko7mE4J28r1n7jqUS<{`nD+t8TDY%no9V`5ed#`HyF z&d0?(e2^PcD;V>+H4@VZ7jrfyW~XsNF)PWK<|iUy%poAAW@a5+udP8dyD8tRfv^cw zzy-*H2qXHSesKM*)c43I$IfiP54XRK&<8qZYtMT%b!bj(&~6_{FF09CdkePqj9V1eF(4Hgxg?^SKWzC zAL$!ck&W)B#qBK=$g55}cr4|?LFmEOu{aOlLssQ^P+`vUAY=>EgG_(P9+afEKMw}Q z$nv0KH08ky=z-Z7oCi^|J=kH!@?g$prU!rfO7_4wrTuxZBUY9NrdE^(p1a^e16DW> z%w>D<$=fUs7F94kNS!9xgS@2n=fUuJSss*(q&&!n9`qT7^I)Is@!;?PmIu+Bm>&FZ zs$>rg65F2#pC-uiz<31Z!7b=P;Rx(_;40UH6Q(Q=k~T6ui1U%`!Rmze=fT)SSsvsK zr#yIfH(Y4M5_?@F+k?~nSstt`XL|6+Te1gw@$Jup&y!?%U@(mGU>Wovdl=3G_^>H? z9-QyT@?gydrUy&BBzvHWZGRp(q{#AMwFTwDQRqR(q1fvp*&JNy%krS0jOoG8o{~Ly z9^L*tIF%~P1HB=X2hGrf^dZ>KBH7nP*G*U+l$J6*nCBtc1La5U&jZ&LvOG||OL;JL z5AfjCyEqTRWphx|hvmVR^-K?bbd&5s)|9vmxXdJyCy*@K0P+n)yknX){HGNU|*gC5kI z;XJUC{aN(io-7Z(Dq?zYWrAc6Y9rg92iI1~@<2O)^57KoVDZ~H4=QAP@YIOq!TGgJ z52iXv_8@#=`|}`lwJZ-7no=Hg{segN&=fl+%l<5S{uaxFZwr|oeB&V5gWL1lp9eK+ zSsv8(qdW+J9z^uV){A71$<2l=4{jDPJ#Zf{*@GGL+Mfpz*|Izc?@M`54n3&pi}Rpb zHV0kU9^A`kdhn&aWDl-Jv_B6X=E(Bkb|2tDHRB)rDVm?b?BDF}zFxnQ+)id~pJHt{ zv$j2dWNv4(wlA}`d){Mi2eY=zSlczM?WOmb+eWp_?dh!TQr7kz*7iF;F}LTlws(Q; zrG_Ui!rptNw?6gTfQF!azs4YP=CLW=&LO7iGbz4AVk42(Kz`#~zZQPuthKAL`#zYs z2s*pAp_`r_G%D+yp;MSe;@eJPR*8Kzu@*UDmYNhRjTc;ZR682vsM7i;gPwFzd(RHe z4x_V=7t20_%AWa8b8~ZgvLC4;lA~fycL^$X_kt=eCXp(_fC{Y&D0(gB(_XERa;K0j z<+2nh=A@K~l$0yBK+4l3M9ScnNO7g51aPDrk|L$Z7)tppk(A;Ar7S|FtT%3jlye1a zDFb^*kkW;eGMtif+y+RgNkpV{Z;6yav=k?flnzp)tmp})R47R)R!~YHDkZ&VE2JFA zXG@8ZBBjv?O6f&O`Pdpr`9_IId1llKgNAUV%#b1_l9aM0ft1n@N|}I4S<(_Ix>nhc z$CmQ76e)M!f>K_^lTwJWK+3@cM9RIlS^@K094TWAB}fS%rNmNF^q>?AR7$Xz6!Zf& zmtadUlOmzo5K4FyM@mQ=gM7f=Fls@T-?t=4+1VXRIUhqx34v0U$0Abpc5j80+c|70Hd3U#O-ga5q+A{aq&$g1q`cD_ zDSjL&{iH}K>;|PAiYBGFK`9GSDeJnmLdw}}wv<;=`bsBK%1}zm(UCyP57CGegO*6q zbz29Hlv*iLQVpP#jUSOxMnWn6PzpH@H?%p?fOw%vYswEW)^-4GJnKT&tK&Kv6BjN_ z>7?}NXnbLcL54egF^ru7UyMy1;fsl*J$x}WoCIIY!^gpw!9LUAi%X?Hd~x$}gD;)T zeL5OP%(+xWK2n6ZLp%)yw=OY6ngO^|QP^E_^0!Frrs<*S37@x7<>YUr40H6iOq}54 zZ>=0mcv};023gPb6Z;b0R+^54w}qyw#@`~x-%{gmRbLJFG=_Uxyxw!=03yVK@VCoyyrUW&U-w0-rJ^==iTT`o_7V=3MLKYi|H5S-^??- z$@4a}A>H{RjTQb9Fz?`Tfqh=bU?= zK8jo!+y{ldJLhrS<2h2o4PhROQ<@p}qxw3)r{9g@&^1MLccFfD* z?#tljfURRWd-6NHWg9E}+0DJpaf*AC^L_ru-N32w>{D*Q^Fkr-SY<}uN@3Ss1zg_y zZZ7ZSY%XsWCpgc0Qbk1(DClW!@{y?12ueAdT!_N^Y2=N}8)W$p~_ zSv)96$GIz6Pja`G8%l*gKf-aJtK_)f;gQwZ%fp9z(E_oLZ*9E%E#UrU6~n#WHz@Am z0=M43uX;IB$;Rzw&8rap#UqyOf{0&be2AlmJAu>7S#ZD2Iq@7Hu&;)ed=~lt z7*7(mwOp6HCN9Xao$KQKl5Z;a7%tY+I_Z|$^&r^wyDN?Szj9E!esc%MeX=}*`^`d* z`yhvJEqhete~|}wa}UltDfaO>p23_p9<<#}A}@7Ly`{Lrfcu9ljQqc1P~4vh+~<@G z?gKnxNjisU-Oa5iFW@Q2wv;F7yv_q0?`Pdo+^v9n+;WCHvV2h7 zhXii#%na_6B7W@``FJhgq_USgflU_nzQD7yW6Ts0zskkF68W)n`VIEgVIfa{I3sV; zBSIeM(%MoY;s#G1wj(0{cRnEYmBk2y?-Q`8$pe2BZ1_jziHZXoZ7kT<@6|4ibmNQ;CY>D+VVgPc~|cHu{h zxN|vVv42*IbAy>jNzdBy8_4@W$U8EvKMsriD$f;p-YfWgNbFbsCr{?=I#%+Ez*#U~ z=>x^w5OWyt7T+w=(6+c}Zz{r(iLcY%lNeT2QJ|IfWQt#`iATRX3vu4+#)z0+r> zWiH^{9cGs|1EH7Ae!yrqnyU1BGAL5bqqL*r{@~NBxxSFp;ThKU=N$jn+R=IgNl_i! zwj1iR;Cz00a5zUe|K?*HmDDtg!f?W`(gF_XOBMn2lX*h{nwrhv=-w;_$6ZHY zUL_Z}ce?OE?t0eu`390D77+9mLka{2-?%h@!II3qO|ijOZZ?CH#484ads6)j=4BYv z7UY@@7NwB4&mi(#hRA=ZW<-8jlSO3ZrNME1T)=T{7dy+%8MH3tDCG+*{^;60L*a5| z6Zvr}i^wlhg9{uxT@YEs1xgQbf#(+MIhjudNGQ zarF|$;6GMpF*x!%fE;2o_srvfjw>8c6ZcQK&xlQP>^BW)=f&#)`n~{^I{=XIjLjDS z&}Zh{&ln&n8&G&L1N7S^S%5qj2M4sjlmlwzxkB0@lEe-H=r{{fddujc0KI-C%O(Ef zLY7N>a$#_RW*z{|ErLM@_XVqW1{b)C_36Wh6xeb(OW^EmfpymrxW7aQtl$F22p>Gd z?I`ayqDR^EF!n_;`jm4?~6&i9+` zdq3LNr~N@=|JlCJBqXM#({0_{A^)sx>#a6y_+htxqKet@&c#_a-1^Pn2wTK1+g;2N z?wiXIc8UxmvlsPuM%^^Ri|7A#gr9$X9fTiD9TZ`Y$`FoQl!frs^MfP&k|!~90Z(F5 z!DNAuM-|!3`t%cb-!#HeR}oeUh#cX6pH(tf*t4p%Zx8*+++W?CVuu13t8OawbWcCL zd$oCe+V94Fq{Xx=9YrJC4$D7d|Jw8%|5x>OT6Z$V{U{-!re;KFQyYpx@3x5(dR+)t$KqJk`& zFE#`VHTplvLkTR;7Wm~?Spu(oHMqc&Q$<=31;ZJhU#(UyS8ib;PJeJnf$RU0C2*4= z@ap@E+SZOS%gNa5;@vc&ZB72lo86X@@kg`UY&^8h$tZ1i;Cf*)Kk@{&QvsA`*?|3) z$Fkd=WMqDJ+m>{V&Ti)=;|H+qI)Ljd^NGCIvfIvN;vZ1|^kkG5`w79b5Bq~JXSdx+ z-;20@(3ahHB;yr?{|;R5FCo-7^&HOcAbdpIP(Qm3*Y^>5fmWQKzlGR;6zXrO!0`z` z+8_Jnq-zt}Bj&~VuT8@FiS>vtOvc+AP`}7y*baJ8zHc7dqqYLusu%GMkVo*SuVFi% z_=EB?;tAi4`q_72f8ZJ7pJmuiG~oDOlp>#~k2&ds{pLy9e}ec@FQNTiWPD;I!T%^} z@4)rc?T9~7j_dC!!||4_*!De%?N~Xk=W0Ryf~4Q#C-&Kh?Lax&TPer&BIU?;>=m3J zEk%CS-Pj-R!2Vz<;t!Dd@zu!Z#<{4EO5%m<1)|Rj*dNeHe9&;c=(FfAxF485K853b zPhdN?1?5FrP#=Zt*MB4a86JWCzSYQ&kL>5*Y~s&FMBe_P_h0HoTrd0#t`~R;+lghU zul=XQpToGG?-iUMeVOR92l)$hAb;vEVxJJs5AQ&IqV4D}B@bac@GQ2~r*VGdNtBm> zeCtl;&cSwU715XYYh(r4U$c>)_#EU%nM>@^j{SDxPcHBWYcl1-w)zU3&FRU_+Nn7cO%O4S+PI39`PmCAfM6Ih$pfN z=O>nuc(k1GSBmAHn|cYMdWl zhWJy!C(CcIV1MKV)F%+a^K9**@_jQ2f9-^y7VHnK!2VP<>Wluz_WK?jA1C}N8nzS95_>_sVCRvo*p8BX8eN0@ zC`)m?tB&m7S*Ty4nC!n+?2kT9^3_`88~w?gjD%6Y;7iy}yomE-B;JNKVjqaVEdTF7 z{d^(h^H;67zU4`RcMGl;3!uE%W?V143FikllJ%N!z5KOA|8>OQ8*x0==PVzPd}Sy8 zjQNwDmsa34YuPXo&`yM@imipNASkh;d;Ruw6EnQ#2X~_Oakg(cQPC#{BFl~ zycO}G{%%sgOQfD;>o+35XdgGJCt1Fy_E<&usKNQM#e{DS?dKzS0;Ha_H(`H*#OElj zN7@M9XHh=xU-y9f5+B*m0fIjY{>I{4EwN86u4kW0{BswMk3Eh3`Ltf6_cwJoUadwx z11kDkgyetw3k2Wuh)>l}ew^4Vep@edF7M@hYe z=UXd@CsHz>)NkS4*l#EN_-H=bh5F```ymV2pW)|leEeC|FW!dwxVE6YfFH-J&A6V6 z#2eS+xSo9j+9S0N*TeeOnpD={`avJAA0+jcO3o+Y8p3xSnO}?ZqYDYYRU|%6C-Ixy z-=xU>LwE!B$JS$8A^9du>L(Ta*PV22CwxAGd|94G{mbtse3JVXC4lP%HsJUu(Z{k1 z+ZgW{pHq zSiMK=h4!QIpVbrKU+jERh5Dqba6ax|R{xUv+(qJ5nB)V6oX>*muwNzqkN(c$DY^d- zlKbbx4&*}(;{3>VlouxTvF|D5C$bswsldN88QzHPz&55e z_$ayGwoFERfroK?inJ}{zCb1CX_eep+E)|(iGSP4{T|lKtiA~%9(ya!k356&@O(f#MqQ8N}pNUxxS+OVB=16~`-!$b1ry5?;iE z&qL_>jqOL0zwHEHir_*2WAT1D%E$XhhIb3LBYw0`+=u=7D~P=bKdD-j7hR0~t|h2{ zoIKwMlX}ih&QDQNKc+~$wJ#!kR$)6r?w=#H9wGH*{${k7otziKO}M^~#5;waAIN=5 zklati>aiWELB1?C*q>NL_^m?y?IgbV%87pDzA>NF6LGSCUClT?O!ALH@ViL;uM+zN z$oWSh`UdBsyg(bav3_Lr6S*ICk>`uSr%<23R+Oib`Xo-CN9L2~L6HECr_X;Zr2Yz% z`q4$|L;GS}-?xz1PsQ=b56cGyh(Goq;&&C{{AdxGPx57KD~^wl`{g*PmlAaUlKmQ8 zLfXXtEMQ+pGVwI>5#NUM74m!m?PDYN6D%IldUy?vA38rcld)wuK3b1$dp*wgRU=;s z(vFe+U|)##Q_1t>1c{eQ1>#Z3`7cWRF;4Owp4Zv_hWkG^sh4cY{7txiWE1xLHXz^O zrPz=6A8dcmNBJ@0&v8=EM7NRon{mDc__vbt06QNMz5=8jEFtsBeTz!&zoLG`mm>K< zA?Hh9Bg%7;d|+Qm#xEuK=>5f9VjuGSK0uylDicV&CiMW;8#1{+XZ0UBKLpA9St6Ch z-^+-7=aBKm*dHeQJ4NoVV=X8@O5#nJ+z$jEM}7j2VgL8Y^WfA{)IVNJ@REAZSBdiR z{Uz+YN}k8ZiwXbZFy7fGAl~4QNqqc!GN07<_`FXd?|)(S-eZU_NS|+zddWx6BTtj{ z$bA-`4{fBL%p>;`R&rm>&RZ*Re1P01`snkCCD;xx#_@@T$Op}z3b7x)U(At=mlFA9 z*v5Fp^auO8iG7*BHY0xu$wwCQe8fV|514NmUUHvjUyuE9^8Pg!dH$->_*jPe{uxFZ#&+ol4%X6`}G-{H-GI zUkfh7wyzG?SIF~|2stk*fadnw}YJDm_L*KpCIRXyif5E`{X5~1 zsH9$Nm&tx4?|-zfLVn_;p2z!o2dOuiy-B>p=jp7z@t{3pGjRRH53o(2*V;?49agX% zag+QUK>Y%Q53FYyALRM7g}y(Ex%gRkexi#z67D;iCzS3mxzZ`0`}bGjfs`fb+Hb|g>yQ0@2M8Yb(#T_#gwPK91H z*JLuj7__HtRgbCBBz^`{i|PN~WrDBBWKvkKUrLW2!v=U8cFi!!Iy)_-7cZ_g#`Ujz zfX&n9N&4Qj_`*@TG}8>0n8ue-MW;Ufs?>(PMz7Q8|rAc)1hrW@9tv* zLfiWEb$|R;NUCD5Ue*8NZwBbE6-!4=oeOehLt)XLQfwWux2v{W%b7CF@6cwLr_40_ zZQ2ZJ%1p^`)n-_y%(VK=g>`!(M^O#!eY7a{$<1;Fis&=}gxUZ@(CtDH#_e4# z4{9UYKWx4`G;!cJmiARO7;AeqNqgYh>xC(8UZ3%!pllU7C-df%9!c&Udi+ zHeW=tljB+ zjejoO_}<&2!?!al8wO>QE8xd8>)67f68=eYtMOT^qr+z{kI?MxC;b0g*kydH!C|XOdql1>bdKT9UlwKlV#w$N&9CI0;`1`0C% zQy5>HEVxn>lxta@G%Sfzp{r7sy~|h zHfvXRXGqt&)vAVlwLl^spyP+p^?>JwP+*8+um0*=Y>K7B{DGMf z;8pcY|H{qwiC*#@lS%*T-To-ZgOGvkP*te7PpdR*v&XcbXdd2^o@q8(Tg+N+C?`~5 zZcjH)Y&(|j8F8q@?4O_wSFe3-v-U|kp6QbOBei?9$IaPY%&+_E+dN~W!V_9<^@juO z&-_Kdn>oDUcg)Aes`~l2vV08pYugV@+M%{(=Kdfq_G6pVbZvhy&nY~iKX6+*J#e~_ zFMI~2GGjpVTN&{(Bc5dmHp^g9fA2I)8@()Lw5OW?pSyRDYqI(uz@Lo)Q>U8>hDk|= zWodD1xUs=(JXjcwa>>q3kT;ezO*$}n z4CLZ4^tvxGjQs7qIA3?lgu%*J4 z2a6$+62I3~;>Ti%50Nw@G>RTG4knWMbl619Y$3(lArn+WVQS%U^6v;dNwHB!4_{x? zn{Ggbcd-tB2qU-rfcem5(KZ#dHpw;i>&R7CL2V{)_owGCQmn3|3&m=-Xq#&lu~UDM zYjkALHdg~#7*m&%2cg@+MZ#~Y2;aya}diZFl_-JC8GSOx~w~7 zKnAmqO$N_Uc~6^-wCstgF=Aru;Hs6GXCU)TL}u0=12v6(z+KX6)aB1ZX-LS7P?tXe z5B7XZW)q$lZwqBPyAw};*S3g80(=ruV+{|8rN_j>tJ2Pt)kF84sY&Q<>~1QJ5RBpN zRV&lZ*n30XT#*4eulomczGX6?nyQrv(_)QZi;}cqWz`9q{Orz!xA*;T3Ws=+co42wPHcQa*8$ZFu z+w`bPrY^e*4S94_e|1@Jc(P~-sMSc_JoU;RGPDa3)d?v|b=lvvMnS2@^Z#|u#fpMB z@buEjmN%Fcn;;%|%GKqsz|%E34osOP%>njFFVIn^FwS#T0Ay;`H7QEVWCi#ZSXs#< zFoF@)FjsDOR)IXmE$# z2!791PJo&jF0Gl^7u}odeZYUE4BT<%+IjjD74#=GB1aMHRu3(cLvZas5IO^iP4iO zZv^yDxc`iEfdsh z*Gk7WP;}~{WXM?f3&4S5tg6sCk`NYa*QZ-1D9DxYMhi?rtszQm z1wX5o*nPG2`j{S72nJ1pwhabyixqlu9Qql1hkjF>tbkTO!Zb%A z_%{3TRxSucLhCe9jE6FS;%N8e<+d8So94h!^&9d7It<~wk7BbJ5%fA2-!A1lK%RU7 z++}SmEsP5EX@B?zIt#3=F+gpZq#)1Fg3{E(U(Yq2pXLUms@uR2dZ=(xODq`=R>%Ow zuCU}PObLQPjfUvzjMtN+>p;cczIFgK7$<8$&DiK~GTKksc@x0tHsFY}QgQdc~`ywj?QPWfIKn zY8-sx5EQaGNznvsYh?!^9t)JIMP*4lOO8NuRxs^nxyGVBt0U{)5xnOw^mv5eDKZSO zYQvD!r~l;fy=Z);C%&K+ggGPEf)@KsqX$9fDex2fj0uH8p-`Y4gYnVpX|LL+AZ85J zItqPahxyi6Ah3stG^`Cpa;#*oU(hKo6tXfrrO+;?Z5K{MC@SZ^j_1EXO2or-c!~23 zS_e+Bt!D>V8JCF@B1Ektq0TUZo*38%d-bIXXxJFPA6UG(Bm&*Uj&qp~F%HI#>6V8T zS(XVfbeg2f@Ju^G!H729B^a*&Jj(=Sx=z%H#TrRlf=}8V=*4mhD&T+7@b&KnIs)yU4g$ZF(xxJw;zJ*Ruw=mldu=f>HGzm3^ zzMz-_V!PtRx-;@D8si(X4|+m6+^r^$Z}y&U-bYJG^af>Ih$j9koN>`A!FM$+ix3Q5 zRxHzCr)V65li~!2BFmr+9^hQ0u%tDUEp1LGm_;M%BFMjVl-tT?we8*T=lBEUHyj!7 zCutxa+Rpf|r<46J2!i*73mD*MG=OF^&VMM=Et7`VhE-(I<89X6$Ik51)8lOh!H?vg zdTe7=7UU28skx_FA7R!+;D+uG!~lhcx$GR% zO&|u^C9)4Ihae18?corNQ4HPW3@y9)+B|C=*K7*(HDXSCjbn?%%30pnO?vrvpkD9rq59WWpS8D zU&*{W`uXbpq%DRr5WR7jf~D=)+j-ZTIMd9H>6QdHB-@M@xEg zr&OQSl!@ZLd)OfWi$HY0YWGrJQ}1JGJKj+r#v(zY4|1UT1n1#TpAgFI{eLC;SZQ~s zI7qWyxsPh6TFY`D8xCK67>jrR_2`2fs6N4Y_|r#5nO!Z_Czhs0@d6E$i7aiYdde&6 zWJi4%i+&P)kOS2xI1hjNgwo7^qD`VtD@~MQC*w7UT1(Z&%3{HZj`}bbVw*=FZR~SnZEs zyy6(IEjg6eCO`IJEFO^P^Mb3s2j}5W9~EWx(?y9sl`Ov+S!*_fmBr<(u2%Zdhp{+% z(W6hLE5CyC@TZT08hesdpBPr(SF*OOfwg6=nUvS=Upnf;SWJ-U6XUAy!Fl-8XR1sA z%)YoF(Pu65atRHTZH!kNvvsQ<`!E)tUGV6G9H>6QdHB;uDU$)So2B}wSo^btdEG^< zt|}R?20!JQu^24TN9AgNg7fgFPY=rM)mDish z3~}`r!Fl-8r#CH(J*4{VWBE0b)s2Q6cFHHKtDF4v7mP(Wi9Y*W`4yap^dI=Ir_g_O zyetTxVhc;^Pk}4PoK5h**L~)8F4?!9&tLNLzg_edNRK@jF?-S9=SyGepMC+-V~<0| zd(lVx(vS8}ABXfL7uFTDfUei};C@9;{$Fa78K=_@Eb^a#Z4MSq_! zeW`!?1xSw`5*hDBAMHy&+CP09(xXR1x_Hqa`_wo6FZfSCu>BNUY*PQ1SNeVFKle{x zf%MqI60;ZmeZKUi{^=JWJ+?q(ycd16Fa2o$^l?a!HI#JmqCZyYoBrqhrytmUiY*SQ z|I;h|zVx5_r>{Wz-ZcGQ^!NGFm-?q)@B&*eIKhkhYG3NRys6pu6gE56hNJX-IEzx+ zhC^0EzvZP$!4O#}crUq`juRv>PK{ubFwR90Z1P1+*4U4emO^gNrY`G6 zpH=kvxPZ^S>GM|lj7^fdteie$^J2JXMLRYVhFey&V|7=TPX&59O5V`NJ>SxmOvPrg z701b?Ew(;#jy5>CE7D?vHq;&QJCMM18XqBOWrM)r|R?DYFvg7TgVCDMa3h$t^g zP*%B6PDi>?qDka=3CbK7$~PXAzU2OM5|nW+lr0{V?&JfhF4s`IVv$ytou~HeL7(^1 zXRK=Kve)P{Hd}_p5Rc*H746uJ7-9>@0sAz@UXT0-liX8`zXthN30#F6JSy}d_nnrg z@TvPp^6FY%^j+n>Vy*x3{unLqdP#YYm6Z3> zlJahql=mk*<$VS(@55Z>eSs_MKkQ8t1>OLbcpcgBwvd$!pG-^yiaF#e>sLzA`sm22za+|3xUkOllwkGNk%NCquvWRS_Div#`Clf<7W*8E z@~#x6zm5!+*wH?YqO6pn4A7B5l7eEthULE$WuT52C0zb%9eL;}_5tD_tREMi!qq;? zc&Et<^{x$Nc8E@BZCk<%hplcj6u!Z*@*ULs-&im?G2Re72*Vi_=D4^Ie)lc{k3~35q`0vdjjlg%cS9=R%4DM%PZQQ?iJkL{sdbO zV;K(}Ga&h0e?r$kWI4Jd}P6bpz+rftPdK0RY7ym`2GbkV}sMr2pS(# zus&$~)dkH#JF_A5%cQ;`zFr zQ}IW!bf?SnMVYBeo}W-{{98LY>9D(0+^37q>t5;maFCN7adT}BBtg+vcgr7AQcNh0hpOQ|-l;Ssy zYW9tTRT&MoIv2iZcRC+s3sSQn0p`rsgevu8-=iA25f(*PvCgbLYpM@N=V|)Nw*PC`n<(pMw#r>kT%@U!d>AVp{*~f|Bi8bH;K6qC`<6;vq zOxO?hJY|_ATXBJ2p*P$%AHu8*@*%L&bGC0Ww!k#L9@(1v)0U%tmqsAj_~q%$Ptg9Z zxIis7$dYs|B>C~5;Wr&w4vkH=SXW+e=nB7^nwre1ZPm_!Xf^mvnND&M0ryn7%L`wx z@~`*)x<3E-?ANPk_ga6F@Aa;-J>KTCKINj%emS4@F5dA#?}PuG&-$8;$9>k{wE58g z<4r%}A8-8YeB>QE?~^_s`SMn8`b8gjr#H-JJx=mJ!Sqww@_pWGTD|3O{KaQ~qYuA) zAN}+`={1X|sS9*Nt^*-{i^NBy_Lx1X=PkNes_KW-4 z_d+oJRX*#pRJIx)d|&bn(_hEds}(v6=6~Q`2>>lPUjiWc z_^dNNTs`oRn*Vy#}~sG-o%+Y+`mBA z!`Q|GCw)d1dC5dOXooX&B2@&}b%&0F$3%0ADj_r#l~xr@OQy~Nu5_zO(a6Y7{0Qv8 z9m1HH?svRKI}Evbl-}ImJXv8LeFxQS5N_>BiL#k-d22_Vdw?_Gt8Wz7clR5f#XGiV z1g7jCk4cra$6p#x!xp6dkkP`p z(-YOt_yxqXm}Lmp3zs9%^*(8jv8$wM?xD-H>PUQj*5E32S&STak<)OqyS;n5E1P0Gn%nyY!J*fHyY~;m5OV{5 z45uqLpj}w)X%{*+3SB_gF6qM#klDE2LDB-$`%VYr{P8v%@vd1x_aB2MTz^f$ovQCuvn}$8+t_9hs7U_Kaf!vkxjy>3C*PrJrpA{o^F-T z$N7UxV^I;LaqufD7k?m| zG+uPG)1ZdHmF$+NC}S$hq+Gv#lyPO{Vc3CmvXT`CpZvu5Ochv7HU{vZ)I4y}3ysl# zBugi$0qLmAO(4EnQJlL?d|R_G&(XG z{Cuy)ctx+ezu3G<7votX)Bg?pl`6g=(QkaBJAPt^`1CCD2>1g! zv$l;lI<&ihhB}dMyjlJz^vpw6B(zS4reLwkxj5DsDUO{=r{p9vd-g6F`wQBmZ~E7Hj**Zh-;9DODD{L>^A`Ti9?`xIrF6hU6b z4>pTbFb@B14o*6KyUx#~6U)yPBQZ0KSBZ&J>FPb4aMXF_#`K(Oel`RNWmTEB)VTIl zLZFfm;J%3*+&57HTJ00%W?8;CMv)Kw3xr`1hOHW-T$!X?HKxnTq%Nz*bX}R$b=8<| zE0emd8lzg7q*^tm`^u#5tH$(Lnbc#|81>2|wOGaX8{vkx_;dO!dTa+>Ae<#OvZpLu zGmHy}EsErXM#BL0_7GFGT&8U+6ta9VRLKd4jX&XTdC!(1*ZCxP7bzC-!oDt=pa9Y3v6>Z?)c0uKVZ=K?)MaH zZ4+_RcMjRH^AjH1XEvv}7N0x-K&S`8G(}*RrncS{~mKuuLXxvnqM*MBP zVf#rPe)p4Vn{5qj<0~!hbfbIPRx+8(Zs{m{vRK-}_iy(GGT%+Rov48u2GnI;(QHbc z?eBKBpqA}+rR;o&{!^;rt>n?Au09I-r#mqJ*1#`?u`mp!w%^KG? zW$jY$4!kh6@)?@{OX+&}a;LiN136ISH4C&P)N0#8a5;P07Si`$X0_&yRvU`DrH#$x zMI5nYuWF>)XEysQ4y&qdsdDNo;I8l(DvJoNqM@*)?IRU!fr^HDWi?jLye0u~LoM1S zF&^BeeLAn*U+R_kiOFefB6~tHB%*^@sbUE=#s{3nV`63#Se?8c&dSy_lq;q+`Li`$ zB_Aw@n*Bvp$ZWHr$Oqv|i=!oNC)xcE+uMPegC7u~P>Q$tN%yOC3eUkdusJ$zV2j5} zYSHfWHpP-csFhIs687M-YxUY{$dM~3J#L+r(x+jmXM68(r%50)sLQUwy5kdVQ?>Jj z-GMb^#%SqO!etqHn3Wh7xf}=CIV|~2tk_RI$Q_3!?l{ChhmONBi}n~_#Xr@Ej)P=< zhmICvB@CK3utgr~wbl*}x2yfsV-&-P0TH{SFQzTi?;Vm7iDTv)$o6 zquW2svCV1o(2uM91M6pnk?s`0Ix|3yM^h~?RSD{{cd&IUb(l-r*a02|x>+!a^J=DV z$7XuEobO9&&N7!aGpVeSI{dcT)wVBiaF3s&ENKnoYFs23@9GDh!?SzuG!6qSCov!qoJoF_8V4j>dOVthxX=7agLq(kQzF`R+F7qq+M!oq8AELMvs%>|rvF+4y+n2i{eTHBO7~~^ zj!`J9mNch69hW%9>BLD|C*2DA<5~%RW}AMB@AqVi4W0$X{s6JTQN7|r zD5&8j{NOZ7iY1c~M|p^Wb}-vp7imdrkr*lxKfKayfV5WBlIM%ru?=b7`p{wF6q>71 z0d(reDNN0+rj}To*AS0n=}9GyZ9kPM1R^-uFp(87v$f?$oGC+j5 z2;GnO#YT~F;1p)#$K_Aqw-~_R6NcZ4ERV!S;kgasxY#KB&w3K_8RM@0vd;dRo=kyP zd%d1)gfPA#+uYl>7!>9bZvlz+)tuc!@M3!eBK73Q&$!^jDEM9Qg6-v;WDsKMDHgi5 zA`I{ifb-nn^e{Yg2s5;;mq*C6pr$RBQjFTs?o@BH+D6c~mH4*ZY8yn~et%<6&@fzSKN&GIT=)gO1`Q= z+)IjRe%W^8ydeo^5$f$2a{zoydoy~2dNPyx2^sV_LoxSP$NAZhW#)kbBKE}Z^^$Zbh#pI)2OVNPbIRR?g7+BYPWp$yNEx-s93N~ zho3>xJMeq<6dh5aphy|pN~kA8rJD7|xC??yi3dYFDxr%2lton1PT+Vr$8i{Xh`ooS zW3j=&dQ`@OkYATsk9z(Wr z==UF)2Tg-zys!vzx<5e7O>)f$2uDIV3Bt(`=0W&6geRcvZ3TE4;269kO+7q^L3k0u zS@4?`;87k|7*n|y%Ur@dSD9P>kEGq0XDGsUV-A$Fq@h^OegoxJYR_1-XLQ8=IwT2su^us7DbUxZ*$Gi>&%%B@|_o67~(nHKwl_blOTKmej5%D0`M5TBTW{-OYm%gFb={JfV+5{t1o`| z0snpR#Q)M4cYA>K#Z{|W&q`;J;;r%ZS$GhLR%uq7Co9d9>Cuo2bzTy}=m|JTT*irU zUIpp1bf)VCdfEr5H8lXys#CF1)XUv!J55e~KM(X9U3A@FQ%KKt9WbMaLS;xhy zz%5a&60$85V^z1yU_fT8qa9N+MB8>?2gux(&FB-@Hq*F&K5BRV*l60Vu)X9#m;ea& zANAzv4}ow6#XA9y_NCAj0ls}9$J>K=pYWGRsK@qAdh#%0Y~ep1M2u(jRZ3!V#tb0$u2$C+8}Fun$N_Q9blzJ%(X=(h4E8>&$f&oe7ZaXn^;J%QdkOehT3b z2&*6*4&iYKp8_xfOaW*GK$_(csvz6|VLgBy;1G{<+aIdTseJJwxB+LS84yDKrhi=- zaF%62{Rr&Ou7LE~aQ&`6-~32y1Zn&b8%0PRrZ8Wm*`di#p!xB+$HuX3o=jNup`T2s z`G;jf_X>9=%&1_Qu(d*x3EDPECah^=nLusn%7n>nEE9VD6Ce}5{>PmO3qN3)Q1b!H zgnkt)6G|#*Cge`XOju)uly3lt2iOd7C%}gQ$pD`M6ae%A_z7(L9Y85OBTXp4QvlZi zOafQ{a4(OeE4O0w=s?_cMr!;BYWyB9r!|y0t(hay_%ESxPP>g``;C4A#d=0Nle0R# zIn6;T>CQ+?W(%}|i3)StSxeekJf2IIAXk@vf+X~iX>~bfM}^GxCS}<4fh$*@b~}$l zkrU4i!}}~Z_F%YBPp(Fj&1F4l1>o5aF`71f-lHe?WBmIpekcC6O-~-ZK(j>aI!$CS zi@pP+pVyP)@3Gt%`98}H%lj-hPQ1^m1dQIClfV$S0PMR>uDK1ugAm>Y;V}qPAZ&wh z3&6bqIsgs)hBW!`d@Y1KA#4Uvz_a5v7Dw|Uisr?~r=)pthPFh1xGF+3s|Z!2FfV#Q z`fT4)(;Ni-Y&FzskOgEUrqZ^JMy-OVAGN9@wx64_&+Q(kUbUXt{AI!Ayno#aiEq%8 zi1*wDf6jZ%)Staa3qEC<22Aan8Oe+I{ED79-{Vnl2w@CB3P2G+29I;KJ;U)h@~jLp#vwKM&9v|jauxpmtne>P0DAHY8VFD8${@4w z5l;%GH9CXToJWgeN0C9ITt=a-l*OomadpgFfEcICK!-O6w?xpvc6N6Bfw#mx@T0{5 z?H8bliYF2jPa2g;rV9$=>5pWqcF}z_8D#OhAaD(R+=Gwh^syfruaxVs1^Bv#uQWD$m88D#&v zypf_^1>{#A*eMyH{&pzW5FUh358=-cPKPiYzyQzF0MY;kz&p~+g69zsmO=O%KreVc ze>;oo*jM1z3r~)P(Hs+8Id+~^kqdWV6|tkKnrV^#5FZbFn)?V$H}A0X-*x5KryhiD zf7AYebuz7+19vk2qdbPAjULgHl^7a0kG>$p{vv;wjW4UA{X&d2{KsqIN1K7xh~sAi zbQiCJCN>?#8MB$yn&&sOTGO(b)tYhdNV*Hr`4^E;D|mI~2hR?g8}>)_By_VR3!X#z zSNO{>__9V%QV?S;|8YJ1Xj?+fJL61%Ea-113Qa`opG3-N;0SgGjr`*k`~q zpC;UfMqTdF%9cleZaCGU8$k>U=w%yAF@|nrx$*c$mK#SmvfPM&+trN-V1!@pf_4eO z29OSL3gA9~3jmt{ZUj&RC;+|$K$m^*L! zb;NMDWdKLaxQJ)YJjk3PCTAC*+e)Zi2O?90(=}7^2*za7m^&PP^W*~jY`cwOyz(Z? zlhbdqJV|+r<;iPrx$rQ($m4wb^+F=em_amS2Dmb&FUy#uvDmLuTZYkoUBt(K1b4sw=U4_2`q|U_JVqZ?GP{?G0&Q$KD$qLKJfm0 z9R2=*DgKcSj?vf$m|2xj_EIs$bWbvjP~LFm(nlVI-~V8p$WTbO?L~@LhL2Cs$M5mc zc@c%tbSboI;$U#zRv0g>%+G~s-Fc=1H1xoCBRz%0|DLxxEtU!*z*v;UVw%0jx@i( zb2)@^fH;7e0Hb-Fq}`fuLTdO(T0PXR>Ji4O$M|G4{6;j-8EUB_d?fC6tDyz$-bkYc zyza7ly}R8iX<_Zwj`cwHAw}yBX|`>okJItdGYt6ykw0XFqcFeTf))cUw7FxuhPZ*+ zwKU$YX(5BgLSJF`+g#lZ9Bhd+JN|r)Wyjd{EIVFU&$7d@-ql>K84HGp1$bNwV-X0S zf^ay5>ma-X!Y?5#1egpk4nPjSA#E9n=U!)RMI{Swmz=^Bm6wK=JQpIq$KRgPlN+!PG;5pbHd)*> ztab2(4UWY)xi5ZWHabW%K7)tg01W(q-9?|EH1=fKlEs3LW%lk~KRAue?hiR}_#qy= ze-LKtOLK1ct1RctudVank<(*UtSg%Xr%VzvlMCVqf#}!&cfne%O&$ zJNjXRZM^?~#qEcE^$PdHp#KL7+Y4@HZ`nNc=#-6RTGp%W?y>uaHkN4y;$1QB=~&GM z+5%Uz!MvJX3)SozJ=wgDWkvfsmK7PVu&j9Z6;~x%2}YO;updhBLI@8-xD>)>2vDM+*#3PWtrRDI8%P2(-<`GEe89?|9k&{Ik6;bS^Km3^?#I z%YYl#NoI4qK++G~4P$YD(*Sb;E&><;Dc0j>mi9{_3MAlwCz4)7?z27uK( zuH*RJ@sl(Yy3kAraaD{S@Fq>lC0=)f@Q_G#{<-d)y?j#hy$7SoAsm| z!|T}Oz@ZlcOb&F>lPh18WJ6y_`mbqn%{+kL0hR)s1}FpQ1#z1pOb56X;6{Ki0FWjT z!UF&^0iFc-5MUjTlk^S09hH2Tk2Lhg5A!{+@J~jIPkPbSG(v}t$v@2+=!Y=X_7i&Y z%?qyC)x)egzZPQb&3aP!JYesJJi~A&v@n?Vt&AFX=i03d|1>^t)stx${|Sr#2!C4- zE#@(o`bBL#s-8S%a%qT7vtj?TvX#V2Y}jim;+FkUx|o(zP|@N zSIBH#AXxJT*y=7Y#32aphVUn}9fT(#oB?4Nz~}J131A(-mGF)<6##MYya&QR0eZmm z#Yrpzg zcnyb2`l;E_6_5W?mpzHG;bNl7G*U3!NAV=<Hd^I}Xc%`+ST@EtLYCq5Sc^t7U zRK$bVK)1hj7c-Rv$|>&KkfoSv6bWaFPqG~o%y1E=ytDKUfD7W0EOTiqUUwuhAMV1z z_3#nW^ofV?k~h3_E`$8fNq3=S7b@<-b?|s(PkjCPU+I-C{A$B=QgR0#h2x;Nwy0{g zXYnK;_K|RzLOh>fY%3WKWg8Mpb-qUYj-2?Hs@z))6En|(rO{E6Ljf>nA{wT{QG7sz zYG!P~r9*KUI&snv*TTjjqOlG5CA62sI@kOauQ81Xx?j;wYuR^y#g<5eQ#+G0*p zprzZ3zr%Y3N?Ua#0hBhCo)xmyt5?zt&yrk>1ZF606YGEk>Zo|$-d7Bxt4>kG4O9ev zCU>Bsr;3l;@5XerVmi7y7bxgyG#OJoHa=T!U2;9j#fwC)04DoMa~st&TufHs?GzA6 z3qtWRF|Aenu23MM^bWhT+1_`<>~pOrt_}CT-!jYRzRU9v-|=G6*|~x4wT!Q?4Oh*; zy|Jp=l!zhl^afIzQx&zs$Qig=grAW=nV*qwo;)O%Z+sl^k$dA~db-~&(WgZHAj>k9 zWqL$y7)lYpsSQW2wIL{peqJ{%8$^%R8$`C$;SNcCj94p9igKtw?3qw&oGR*L#cFX< zG<~l&Tt}CO|5}SjMzp+5*M&pQ3@C8#D8`*+_A%l&%uOB0CY~%~<**G+m9LV>wr;Q#u zk`obsp!}#|gm}g>NuDo;nQEX3-(~!J@TqUxlc8kyQRDrCKkSB%Q8mJxGAKPmN7km& z9V?(XyXz^6)Qxc6fNGi%A=D%)WVJ#iluq*!Wo@D+CKGZ`9&a_L&|A~!jZ$FJtUdR>@Mt)?~|XmXnpe28F_Gnw8L}lw}dugDA>jCePVcj zLSlsPQgL=ii%)z+C-G68#7FzY7sgg@!s4(H(qYbQGN&CY2od@V@kcWq0COY3s#n>%`R+fg27su=Z9jbL?7}fO{EqOAsb{6VNK)74ikH@f` z`FI1B^`iSe+u?dL;MQRG#d+>K#wC>qLy|MYK%c=!1aWBCzz@9vURm?t*)RG1&{ zcUKrZ4qivvle{Yol@}#u;^p1s0G?L>0wcW6Z_08;k`4UUpZ=g$hM29O*AMpAOK<+& zi?=O;x8j{PQDxY^xg4r#vq3AWIVnXe(>Ity<`Re$8fg=@$xti;z1;GQ6&qz-DbKlp9%K~}*Q|ig3w`4AW2F9RZ{ga9?N(aTa9kIj^3BWI!CCWw|HNHSOAH*g`Q=LM}BGPQKGoxPQ9%Q zt}Vr53xQ#4E&beF5fBmZuJY%^T_M(f*F0tS0C0grri5G zuN#gF#2T}0dgZG$oT5rf~HK&ns=aSjAik?mP%Qa$G%To6lZ#BlhVK6ZG zn~+`KUeKjGhp|)PaOlvA!-ZW6F^weRra5gJ=$W8Z6@LP(`hpftdaFx1^o&Z{AOSjF z+-)`V0|(zZ{9`tbPfj>rd|E7Z*h9ZWgTttUM91N$$UbrGC>`oAQmZ72G8|4V&_Fdv zYZ8A>7x#lmb7`G9$Kly}*<{u>(Bd!Y{hh|MqOn1200M0*x%o8hD_jGsKV|*~R#)D_ zZ}h~^1YqZ5fbG6gKArq?vpc$lMQfzd$8N^nElO-KXYL!7)2uF=DWf@qxq-*)mLE`; zO`=D?w8B8fT-s(fk_3m^^enmynFk@WR^nn(I%q2dW7F#~p|0I4j=--Q1r;gKB3QK6 zTCEi~1DH!2#S!9uo!bgVf>)J0pd8+SV?P*=r8SyWk0koyV z9&c+bRAh&oVyZlb3rv81DQGQb?!%3~+9s#5IbDpKg65nw$bA9aAeXE(G;%E_NZXcR zY%NIymSWCXyapN*0>eZy=P#O$USbb1vkeD{is#4R)n!Qvmv|U{ek+pot>_h&N-tXNfRwP!J{UX|1yTA9VK z>vee~CY#DO?Qc()$CVwa+ zpH0J0nz&QOY^2w^@qJN{=6pSQ`YY_*vgz?ao;}obD{K$FkF#wbIjo)(QFg#e?;5;L z{t9Oe{PF-9b@pxXR?=@I-978wU9<9_Wav5W)6#ljYO2mRBIoqvhPBcyV-Z| zz`ma^``dll-#dbhE7CLc(2j|Rvpnac(%V$`6e_;Ga1V)bQXk?QQVb7;%6o7du@k31 zZukUO(qv>=CMna+J)Ds{vGhX63PO_=c!->I4vLLWmoK4LAuQ04wr$==2J!n^(k+vs zYQxsgU^hh3Kxv?XstG5320$Y40`wa5Bu!0ngy6nsrBGG*0+y#WBiQ*RZ7^tqVHZ1P zgg^-++gBi|21k}gr3eGtG!05KYgNGneyDH9z1Q*#Ja1<@%9QbXT8GEjq_qsLHbmi} zXg%nmXt+SNFzR0WocN<-?e~k-Uuy0L)GPAEq4^2JK(296J~mZX=Zg!WQWnee&BoLC zb5E?ByA=7R($j+RTX8Sc&U#b53~Emg=*1FEwHi}vMDaGex~fSbz7T~>S4QC9@8~&6 zs9dtfeTYE8t*)~W{@%)FYv@&&t!sUzYOc@pnyS%})ZtJhoI@ES@AJK)D!)hj#@*_= zFEgq(oSH3#nJtBxErprwA>fRKksfD}5+Rcu`ErajRzktaQMry0i1$9H;40jg8np{g zEvw%V4QWh}8wMwtc4d(0vIWrG+w%psDbCN)0YpBWZG@}L#BJ}2 z%HDW?hh{iGe;s$*=`?s(V=r3u==Rwx^txTSv@2e61WP8rKT;zm((|=k=S5iK+5MR7 zuon8eAM+8$1@ObSV)>7!?WyZx%%vHgi!mLNi!qg>v`YL+HvoiA0jUXNGy(6&yfU7h zFMluckAs(ZD;A*~r8&erRw0%`fojXD)jF_cCTH8Zo-N`b*PXX}Ny4&rryi}z)lR`= znp&e4$Evq27aovnoU!GOLb;}Ptb*=)?G6ugE33jZOM$74XW4{PEmp&l_yb?i!rvob z9GRam3OhO{c63XiPA%o%g{q*Vb9#4lJ-`neW2(`ZE=Ckr+lOIMs`^#Icg>1VV^t+j zX7WBlWG9vEC6AW6H|@emr~5Fr-8dPvodz%&;64CB2$4ztg$Vc+UJzt*Pk;9`+Rri6 z-9EK||HG<83x1HfqLemm*gk1n@CYBXR%uRCkpXnR7cu4RU=kfs<2iN?ybu8oWmeKb z1TPK`4rPVPbso_2q5M3cqaS#m2P9tP{R2A>=+>e1{C`mM161>)q5R>E5O=+9HI?0K)rfPMbX|DWRT{QoJg^Z%o~&i|j{bN>Gn*ZKd74(I<* zaUJ%ra-aV{rPK5Or}&)zAH~oA*L8aSe-yb_*U9<+QF#8p&U601LUR6pbGz&O|10VC z%*mH{{y(1J&(HtIQ~c@q|Lh$9V|b2#C+Gj$Y1~=7IiObC%I*zl!+QhB-`%{=|L~tN!(V&;8e%|HgLjcfHda|8bxA$9?!c zx83J_VyBNjKIbEk@X^oreCT|i_&)yzPu~b1dz|z+4?WC%essK5hyR?B3&8R`{|8RP zNlo`KOfi!V!xVaQ_thPSVTcc71HIvz_;c~r-^!N@m6-f zu8CgDg*(%CU}PeG-O6ze6sX6}=fBgi-66C2V9t9!)OB8K#m{thT;V&PO&XyRfC`TQ zL;NV!6+ZyQK<}7S?TlQ4&9M@U|qH?Cm0nN@EuT(|lRDe#aufZ^*G!4D4QB1W8tp+@jkXS0G@7-? z*in23#qe~=tRzmP(DSHT#k6DWAh%;34syeDBfnA7!|!B4&U+JU>DC%>eAvXl^!u3# zPagz5;*T<=P{m^~uA}E}1yCx~rBrWo&an>1WR*4whKS{*2cahh+dwsIBtP}Vy6@If zT~>&TS8`hD4v)%X2Ji*gm*j4hHu^%dG;9!c48Y5J40F#kh~*xB-I`(=KX@WI?nymZ5G#x4Un`2B~@IHD#Y4%&U6XUCDC z&uZ)rW%o-=W(|1Y(U2r@FFO+z2O{4=6HhN}SFNYVfwWymQ=pHxil^caeCldxdpew< z_u--5`{~su;J)bCS@W2x#bJVwAs*IeS;pee^z7UOu|$#3Zn)XG7^=Psi}DR}8Wm8>vyfgwi1X`{n_ueeA>*Y1Sr#c42<;KlGe z5Eg&b9wHhK@)O7{+N0t>P#P=6{UE_<-0C!1$@+iU_ZYSLW1QwcT7FJlewJ=MLt$p^ zCX03r9(rj`dlM2~$NIs(rqX>fLm2#(8@pRF-^5xt7OZXW#$OKN@ngn=n9}EIXR`%( zEXULH6ui4xPGxL?rogOS3F`OqRUho7-7TionYC+06#s%*yUuJ}Z_Zhbje;d@?FO_< zdO~Os1khzPkdq+P6pFuUvh86yq6nw$f?P~n&lVP?i_?@+0Z@ch+&0usVyRX9#{2pn zi+1PWJ)rwB{I~b-da^cQjAdzEmcB#tTB9wj^M|Vj>Ca{ z+-XW;S2|ShGmXaDEEv0fiYnUfgjT@!yl_)XG+)GJ*0%8~1RWZPOW%wx}*C$xf(HKxrxwhhWb!Y)Io*cG}?F~r;r*UWU7aLaVyEa)zcsagYE z5=vW)pHL}y0R4&Yp}W*7#WZ5Ni8eE4rO&!O*I17(YDG^tK10k^B%FahWs-`#1@zcz zpMgH*HZcpt4nAc1)j{rC>x2QbIozD0FsCT*&7EFtS_=*{51`Yt|r=xUsw;7-3l>TX{u3~ z$0$v^R8YDT4ncl|ndLb2VF$}cVtWdsLRyvw@2YB5(8~j|#DBhk#!o%?VnU6&3^$^Q z-LXGS9J~icb5$1j9A*1op8k1RlJIJKDG91#P0Ozy8;Zd$Pfj+n2ljW7nGe% zBXr_k(vzO_2+e707RjQE0?zWvMsQChNnGfpC&C`btob_{on&bDEV($ZnwbL{@Ri0< zY^8D8OkCZ&b0%GH%uceE7(U)=Sv&|o{u^5k|5+Qpjv^0-zlX0bj4&rF%*jf!NCgU+ z1#=?2DWGOU<&~L}6y_u)oj+ZqI7|uwzcx7vj}fd*j>cmG$pff*Eg2f^nULq}cdmeF zzJ3Qcbaa=@OHfaJIH6l`Ck-J#Q+}8bt7VdAx^2q0zWp!qt98291EuQ8{Zf9lNtD=B zrLpw^+JZ(pQMGnGyZKMgd`@h*vc%EUK{#)S#J0=Uz}l|!&~|aZw1_(1tO%rtY~pgFJA(4XEwd|Jop)rE?|eZ(H7U)#v^Cu!>!#%wzs$M0evazEZ> z9N`>6-b7nxPp_7w4^qx;K&F6NJf+ckvP30GDL=rMjsS4x@`BiV@Wm>pJfXE@0vv)IC8v|B(IWc=H5>c>=au;M<4MW%b6h zb``$i0{(<=jqcL8eI6FXh|0+AP!Nl*q^rwwR2lThNF9F7*q?zbT1h;Xb$Z5l4?1^K zlc*5raVjg8DIt|AW48k0&KC>VMN8OcE*NY_kk&3houPpKeU?JVUe#^oWTj=YrY1yY zs#cn_RHkbL=<*ZxLK6^Tmf`Hre*TGs;MLD%Rzox?$D)|yu%sw6aE-0FUq@cU!&f!- zFg&gv+M+QEl;%uUVyAS4d8GLvg*b_R)Ju-Kwpz>~;_#}3cw?yfjw*XE7>Ce@?$CXX znp&kWD$I{mDWHY2K;L|4O|n8pex*yR$nd^&M9aQI#UFs2!M4}quhG@o`@i_=mFN}U zUI?i-^i*$C+#X*u*YS)@kc~8mfsy$x0phoDNz=ZS%F$qQq1c-Y}Ze z0)C(vaTuMp2Iz){A2u5fE9m(`;<&@O;0kXkqSP8%3q)<3F;2w3I-$mJHMB=i>dm*K zXCT8JKXFj&MPQnRO+0?bH#92jlb}ztUxm?H+~wou_=UhZ8WKlo9mb!K8+2tkXM-^u z-iWKe&L_00%Ljtha@n=6NE*e+`hs~V8Kbc`V691tx>l7K9ELB&j$eDU<2uT?*5b$#UE_Zj}}JwN}P&-)1XyO;l|2KSA{XUJfU&lmpM4EzhfRsKnNEv8PzuXXW{x7NyvpQxl;mni-fP~G*u z!*cw%g>;3{m5Z=^tnbOzo796lL-&m|L(^_fgEqI(oOaBtZNjaHxKOFhlIBRK+T(=} z-gz(z%8(_ku}~-sPEs(+knZg+oeG@zC>rt*ZGVQ2aqQs^`Z7Nkd)nzx&b~Dz z3RML$KY^8(b-@YN-|%)Nv05zEs6VI$S1beu=Im2J?PPN)PC-NtJ_A3AX%T#OV!typ z!v0HzY^C-$Xl-yLNv@_7Iu5V76pa8pk+)H~O8Xl$_T8XZWM$|H`m)u9Q3Z*p#03~z zfu~_?6HAq5BJdj!m~8(A#?*L25B$fEQQ$tT7&)ut!6w7?wc18H zqoi%D7V`+#siQi?)Sde6W+n1ipdd@(-5f{Obr<-jemmvN%}7$*X(-o(rTMIb7Je>cIU1l6|L zN;4#)3&(FenC$hU9kB##LV7+N_?^PWl&b9uq&Vs)j;XkgWVUjC5~a%>D3jo>DA;* zMi5R;;Qb*{okvI-vT ze*iLB4o~D^Mu}g=v#16cbhlDxRz6 zc8Rxsibd*wF1uAyyM^IxQ^sCcp3Pm^!o(F_F&nvkO{38!PWT;)$oQ`K-HF+9C}N#& z%fNgb%eer`Tvgo00Mk0a&(Qb!IK z)Tp;v?fvNoN+qq3*>;VoR3kvyg%};d6QZ)&1ZKf{H2h2j&V7@uXgxgF0nc?N@510_ zrS|y8GH3 zS#EYEZ~&ca@YPH#ZN^*r79&};Xly1b{JI4CMp1oL1qAhd2d^_JRl!(aJ!&*+`Y7Hc zepq0&$mEn6x|*DF7(nD`>_g!XenL{x={cN}ZZ1EkF2gRB{rt+m?D{qO^J@3c_lI$8 zU}wA@nu2RHu<0UqhtX|lyhQCl3uovtq7a>MC$u5X5MwIiF~-B%a4Y+1996;|iyM+; z>CU(U-P3Z7=aYI3U5XinH5B_xm$fMA19t>eqZNU+ulrpW*10!)vuhWy0S+Z& z>E(wmS?{;g0RczBPe8NRhp_cFGvJ}1i$btEBMb1@5ocu4dcIU@MN2G{mclEXkqg)k z?$APc_SZMO(YOvfZERFyndf?3}k!?`2%9##|Ju zu>cHia=Cd!wQb<(ne&L#h^v@*rsJfG*1?=VpZl7zzjBTsy=i_%n8R46>IrS)1#rYu{A4LPdTvlRji;bv8L`CK#>)8b>*SDY%?x(0IxmX3OdEtTS|{*s9zV) z+-nXuFIJc*!=S>Pr4qHrpnk#tY{mI`hP)zCOKzPzFK;}aO{Z(3HBV5QGgL+xq#hgSxXY;?Z4HwNhHFR92$Ktj=+O%>h#aj83lR)G z;716;;l~ic*i#(c2AZ3`k(ud(J&gSTPF`X7G#Ji|(w=?fmiT&!SdaPoJEM3!FxAOxL(wo;K;FP#&R0RFDyYkz!RDk+!D8joSV3u6N<)od zVSK$>)__MbWje5-26O~1A#p~jfS*((`?5Ep}U)iH(yQ(X-EIn?ztbn_t%*ES|J z8m|UEJDQj`dF`%*UB>P#zY@*2A_dg`cxyd0>Z3v<;8%3Z+_f@8S}nm}{rZN&5Q68r z)+%E6K@u#q;uH!xvPe$%5}M2Lyc@G+CH{T34FA5e9RGgu2>$(i3H;4p{p_1Gu)IKn zR}3^LUqFMmifHg&0R-!wEMG^*-wJd2+JA9{1w7!|m0BtntE!mTR;V+Vuc5JoY>U^n#6$6}f=-II82YR)C!I|E>&RMctm_iKF@}FS86xdH z;McC*d!`6NRg=-%vch$>FWK@=Zj^(0@_1Q_y^de-h4=^vx3Q zgl?*cItMnbr|Wl}1COx>{?9qE5&MyWJL%s_`gbe+TS5Qs zrhhllzdGbPa4P+~oc=ANe^=7KtLfi0^zT~w_XYa5fc|}x{#{T1uA_e^Zou2r=oGqX zG_BN+Kks{ ztW?k+pJG31l=STx_7@4SW_{|ajHQ9`3K>vta<#<1U zXGjj79jk)Y);vfguED<3h>gFK9ty{Y{nee2(2X3IPnGAFiss+)3yCFtmS2b|2~)q< z@ZCiiWL915zxIDVsU*tXO5n7qhIw%D%lL$V1-<^YL!cN;wAAn}k&AbyAu-^YntY)7u@NKAm_MkFaf zk`%gJ%3;g&gNd36i1{XBUPUomY=f_Ar{T2$c<*lkiUy>x|HCLQL^Fz0(f%pcA;ri~ zf#Oq6QOha53_wwc6zh@Vf1Ki3PGJc^F%l`1JAfjSQ_SKN_XeN{Ly84RaV@79$|-IL zKylCx6pcu6>LR0X_G1+1`}wDM9Vrq&!;`Q$#n+tTs{j-WkYW>3SUAPYoZ^`P6n7$p zW+zbG%PHn^ikW^Wnt?)v6ible22ODsr?@46j3d7TMH5n-zd(!N;j35?TxZMjL-GQU zy#2eY{Jw(3shvmHuVTL-aC47XjS@WjCb4f7u;}m{!D`d4ZMk`ByUD zb9k7`!z><7=3xpC@8aQYJiLX6*YmJH53k^%nup;$boS->&%?iX_#1>^rH`Xs6?G^l z*x%QGqLvY&T$J5k`K|F|rIJcVpy+j*)XPGP(*P z%Q{Bhjgbb7oYOJ#CXC#Nk#~2D)L>-nE{MFTW8@idrz#)8NKMDcLl}7wBhQ@c5cw%a z4ylI7LwM8B4nJiZOYGB#R)%OFoMV~w`4wngV*7-KO}6)Fc+B<|4_~>WBd3GFDHSx$B97=g;48SRd>V%v|b50XD zr?H$<0_PCV!@&VK9Xf$J)nNXgWt?Q3(}hUJ=~N`+VB_I0k^VWYMotyTX*1`vJ(6+y zfOC4Ab9jx1&j;W%hU!!coN_s*2RWxa&MAj;$mC&K08XvVs1tIE3!rBR|lN-o?)C0M=(y`MleqG5sX6>4?m6Y&uKQ*2|2CcoSx*I{>M2z$~i3N z;rsxcuB1BE1E0mpx`XP3oU%EmIh<22=aj`cOy*%q08XcW zMVAPNx)81Z;Q*|%K;WHk79Dvhw zs?$E;^f2dS=A2e=PD?q5g*==afKvq32{{eLdzK<2S;uy}A9Qq+PW*y`zXo(U73?ro z#n|fnAY;p3iO91N`FjWJKkp2H`2=FFLCkj@Uj2vzA{S!hPK(-N5Kcr_z_WQ8@HvxMcvfuj`k~iDNQg#Pz1BNq2<2N!+Rho_*8h}F) za&RCA^It5-pVzP?U3klXl5R(e;oks7)?du}c^YPn96vI!!&V|i0a6U46iQRoaGM>F zZSB4I_$L76{-1$z9a7r=bW?tal)HJkw5joCD*kdz?O)~pA)6*-^X8vS<&V@%@H2GgzWV)&WB7iSm zD#6_Llkilz5+#KE$#S7*09|$>W;0^q?Ms-H-+Dk2KIzGla0FjA@t3dhCU7XyEJ5ntx;m(}=k4}W25cyHZ$R!v#4I`iJ z7v*-u=Zrd1OD5S^T*JGMa+nf3LL@60*pLw zXE_|&ofYqYRqeEic=+{>=Bfb7^+;(&%Flt)wt<#$hpk5Czj0iM=xvDp1Qpb5Tc=`z z)~Z-zznTl|`Z{Y8%*>)m5+930^aQE-;9%yHueXgEwVXjul3a{ ztd~0W8tYaHyRz_p9^T`J6LwUg$Y}|38en4_UV4o=5QF?sV8(oR7zeCK;UJ7+`3J04 z&3Mhfwi}S56)A9-jEUUWg{5zI7nbNNy0Z7J{5?ECy0ehekVfESpt4mq-B#ZHMA?R+ z<+Qx$7IyL9<3u8dCCFhIaZ|Q(vA1%yR{9~vq&GI=pc;w){*5Ihgj4>b^k0=XB8BoW zP*fv@)?sRrsmoOKo4unQ#M_?y*y*IRBfY!OKB}ynYq~`4M+-E<6y$ja@|^!08bN8B z06c6Hcwt=NM-?RNhh!^}Y~*hp3S;{ZIIhRYejOu!!N?&;AQFekK9RdIvJ4~fMky=~ zM`}AZu1z@qiBej0Xe5ApzFrSHh*^%RJ?EjIY z@+kKI-io2=2V>-Vj7&Yr%IDiR`fv4Rh}nvmSD$1GJhu)7SJ)=(#P^l9H>)vR%fklS z)9X6=4nP_Rqb-nN5@(yr$uHSJDK?uhm#Cc4e z9YVu#^@3Z@eV;Npk8~oZjmuH24=iVmkDM8MJaTq(InABOxp_T{9}z@OtB;)6y&gG3 zb}%^;cLbL+gUcxhB4@@9Z#icra@KM=TRV~SG?(*I5IJ=|avJt|oiCMWju z;Bvm;a%zIe8Tq-l90!+!@kKuTedRbf{|lTyhA!GoozUL&I*T9nI-|wV)iySFLVF*l zMf^SxMtu<&U*I&4bDAf3_#6*k<>5vizR$yLJp6)(H9XwU!$Um$iH9e7_$Lp~@vxnT zVQ;Xo2M_x|h=&sO=kFLsKzRKd%nq1GEnl#FZu=r|K1=h}6^EetQm#pq{63&faPuAa zC9e-(UOwLfAHEN_dCJQ=&Ub4NzW7gk!@E3Uyqt%1KK%b~_vqcm`Kzp#uXl{qo9|;z zkKR+Q!OPnLo<~P`_^Tw3)>xTN>;HpJTYYpI7A14@C7f^Tf8eXC^5*;N)gHcyRgAB$ z>hk%{_Tl^VKo8#+INweGf$vTqzGH@X_&PXWVb|sJjo9U5hg&>+Q+F}G)BXeBA|JjV zjq>na&-qsT2fn*~_=b%3@NMOMmDQKeH@ey<-z0o>)r{|q|G?Ma!*^Jc%-x1>st#VK zdcMW#5FUYXkn5n@?O%tHZ?WGK-{NIKVC_XJyi*^ZhtquGagA#j=M%Hr)6c;u+`0Ne znOpwU-HdK^0Qrw``Kx^RKh5R7#KYHv$Y1XxUo+n${~(vI`pUojeO&%QAO6R<+-4rO z1WW%{-tyNjkfi@Bp8f#x`)p*tU%k`^b+fmZYD{SJn8JKZDCJ z@ZrCJ%U#05}o5@x2FyieX>96&c|K=h|`fGXm1IWMgZT5Sz5C2JAuAYa}gUDa+BY)5m zN&2~bRh@tNE4lnNKKx(ca@X^4Q?T^cdCT9jRFeKWp8f#x4{-TMeE9#$<^ImY)4|g3 zBR}q8N&2~bRlR@t(VN)sv75a4581@z4(H(=n}Vdj-dq0GMxB`KmqsVbCC4=$WL4$Nk5mb+UsBbxXtYMi9Yjf z@o;qz`7vL6%Rji%;{#6hk?Sje+dFK$_Srk0G5X3*Xii9Qw|0Wt>s=3SQzy8>IuEXD ze{kIFcRlkFBRj!0z02Z79zMfqyJ1@QGrJo0cg!C-2bZ0j_A^_3&iQ=xt|w1iA?kAi ze#g0--{0kZ2*>*xLM#*CLx|_X{Rm+f2v76yO+HVv>V(Wa9~ALT@U%s5;r+)JCSwSM zsPphGo^kB7PIye`Jn|(pt2?1tvxV8>g)N@BY-=YpySSXMxvmHJ`++T<@q(fuIFB|? z6Y(D7(f2*}?h3=-W42fEcMOp?)|VNb@Xp{g)81ogbA@AhIN?2yo?ANM^%&=6;d(yD z-(BGbuID@a{hjwbWncMiaNf-v_hxt>83b40_&wfdIHTWZx{TrPt}yO>rpvAT-4$N@ zJ}a+-_`56Y17TMP`|)>IxDZ0zskDf{yTZ31#D{nIyDNMF!rlt>N#k za5jW}A)L$KU11i4(GcFp-(BHH5aJ0~+xfdI{1QUkDZZP(yTW}C4utSq{_YCzhHwys zlli+VTn6D_2u=JQLs$LTcYxXY*n!I*dpizzkG0#yS+0|`e+QY~ z>keMN-W3PE^)Bo0>8qMM(aFJeYW;xsEfpTxnD2wr-dMrnZ>eDExrNhWUo#^Jtt%}n zIQ`?C-W6t7uzqZ21#2hf@^B%9*f%b%@U)A&JJGR%%c$g`mCLB(G8%Z;2qETWQ-!Bp zj5!pXch?U+xDz|U9sVJUM|^CvmIuLi*=PEPOxJuK&g0=C9xmhI<2-zVhtKiwRUU5S z;rl$?#=|c@^yIbbhv51XuD|0$K9|gCF+Wl}p`G{O0qU z!t-Iy^JN~IK4N}|$irto^3*NT39qj>kFO;((T%}r&T|^4L}y(mG&g+A(mC{FX2VhZ zJ?Ud+hw&eKY`U%!UL_xU+7b*N{+Olvan4KR@8ZXvJa%-#^Apa~%Jr-J*kg|&hlA6e z=4rY3vBdxBgeGb9{od?%5Y+6k?&o$09D z?n&n(ozUFAoyFh9X()j-T21%2jV%w#0R?>p8xwu1e#q4w)CvCBO3VlONpW zfP;0jVU~BCg6ocP6%eQL6Gz7gLFk6e4ji`x;*8| zNRwQ%4?qKu3UC&FtAKDVz*)6tn%O*(E2C?iB&-AuZUmn^?ZO{waTvYf#6L zzg(V<)&REflJyJ2y;PpMyukL|8X!(8ujyBome}S?PYcdDTu!o+9j|-3vj9VWhjT28?UK$Su;kEks<>i&~GRisPCtfE1iIXaa9jty@>zHXXad^x0M-+r+dd8#WkSOR_#MI-02NixKK~;VmIK5AIPkY?mVK9 z0B(aopAmQc*DP(m?{0lh0>8PS#{+U<{jWHNgx>%Xj{pro1w1CFUf(FvC;9H0Xd+I>%8Rum5H|w=5 z4q-Xyf^k;RcRi$KO#^;^1^J+><7@mD4LtS&E<9WJgYKA?X2`SSU~hm#fQDbe&L<#$ zAZ!BJaiCiT!U9OcL!b+UR>*VR0hWFZ$P>;3O)cn(`e6QGXk~E-%ONfi?F(RqXT%jy zM+lqvdknl+K)3+Xyx@1x9b`4F0$YiYN09C&khdJtx(4tTe1qRBAl!rBH-RUlX91)o z5z^KK?`t4!75I+!FoT@|W`N#thv0XxZzAMXBG?#hX@xSmCL5ktgRGUHJK#3Jdj&uP z(9m>2S_F{aa16>dq$dt+KLg&IAf1U&1{Xl6!*YOefWM{&;s7dk!|z`~`T#WWEPRFQ zfq(|$X2AOz;J@GppvOE1-Z}s)%7EWzU_OFAGvrXV6cC0$2oM9$G4NXxzzi8Y1FQ#_ z0lzolJK|wHgmF+;H2`a%d^SORVlku#>cn~|Pga060CDiHfx04q{u;202J*5Aey_ma zF$8D;hyhpvZNqwiuK)-Oo&u-__!S`fj9f?sm<3P< z@G$@ZP@k0x;{oyjOaNAZzW@gPEf+EX9s#HXI0f7(cMFYG=QZ56#ypz2KNXNrU5(#Pyuip;0kq!Fae+x-~)hP0D6H( zss&gA@GiiQ0FgaIgj|4E0crux1KiXLYyj{gz+Qlm-XVekpaS3oz+Dj`!kYl?0LhUd z!W@7%0S*F$_W_##6aqXCa1dbl6(Pbxfb{?e0lG$o2uT1-0Nw-m72t}#pfA7*fb9T( z01UVi(h9H?;2nVD0MY$IgcN}30Ph2YMFW3;djM7f>;(87p!ZcF!Z?7102=@r0EDYU zgm{1%02Y8906qJM2>Af-12h5*j0q9a0saTD9pDdutF8e(0G0z-0nP&q91tSt0A2?; z4AAvjwsS%bWl3Hibtp+J}`6bcUrMZ$x^JYl}D zKv*ag3nhX6@Fe&oPYF*8&j@RUXNBj4=YqvJ zK5fFkLc8D;7R{P9zqn}b{X>Vx-8^pMMB%PG#?38RFux@4&P45!S$PW#bLY=X)D_J% zmnw3{lGA?i4+`QTTWTqo- zq7MG*xu~IuiI+jir5J$rexo5TF)hz9bM{Qb%m9dE=Fcz6o9V>{;&LZ3kz*&P1(uY0 zDLDQTGPYpmyg7NZGYm5gd6#E`OQU2d8(TE9q-4^3!<4)^b4v_)#g}XD%b{ly$K@5} z%>m54z-3Uq3a zI9<{-SL~vhMRR8-70)p~m^aUGdFi@j;%QR!u`^5lo1EZ;Oh($&q^SkPc{68wS_A)x zP|C+YxS%Kx6~ElFd8xEqlCh|0N?tzHrg^h2v*27JDU&n#{z9lNK_hu%eVNI`X8~(Pb-s>Ev9V(J(oG%zWd#*(H}L=~4(KGSc!MoL{{3Kgj5ekja>W1tA&Lp~?4d zM)bP*bLSbL^+=3Myc}$O#@vUn(MfbsEy$Z`7?W2pbJ5)S#feGt=03>C)ADA|of)77 zW270ET1ipjRR7JIzbJRB=HrVcUO%p8|j9xjQH$(U6zvv}x;#MycIGmS+C?(bwVHu**KXBy&%c}J#6 z;zkVjj^mjKrNWnGvOC6?U#dICms{@L?x>O8F=JVT(T|`MPn55u@t&v-q9%Cad}L{9 z%$N>+Wk%9nN&2{rrFu9D{u1L6>c#rqTlO5b@&qS+sqhb#J0%jCGUnNXCL#E)Zj z)fINc=E)ltKm682SI8QTlyOGZv<4w!B%C1iatYFc5#&yiqE5PW)XA5Qs=H*=q@uZV z=0P`CR5X7U`ogJsGZ!R|gF(pLq7vVtFn;{yQ}`qCRQb!6cB!ON+T`NAAX-l2DECYBfV`t7P$OAeyC4d7qbj|WyCZEkXl>{p+C=jEy^9;pHQ)kYb4SuA{LfVDK znLyC_6hQ7Isp4M5ey0Vz@rOzA0}XwHkoX&@215@tKNEze^8rv0V*f)3PB7FP_5Z@n z_f}=Nlz-F4nJxck7b$Y4c|ri}J=m+n+cz zYs$pLw7Ij2=a0|xkbf%1-{?jjxe|5%>nZJZ~0eSYoV{MFYyTi#?jleNn-jYCFTju!8c@NB+zZkgl zl<11*FIkEo3<5Q&5|Sj)gsBlno6mTcXj)|bwnDUXr_sY)66ssVMj;TU|B4<6+fTjl zpn7;nN&HolOQP{9KAq8YEV!3IahZB`EYz1kG4iqq{8Ye8A-T+i_^FbA8>#43RKB*b7$T^H`h~dpu~>z9;cxVvwA z2g8D&MxgJ^1VZ~+0wV?9hF@Z<94Db=gOf`sO)LXS<`*r>yM!|Bl}JxFAW4o#AF7UV zUebaEMRRA-a)bU=UUA@2#pPOT4EsC;LjFJEJTB8s&;yMQg3ttT3H&2>K4J($ z{C_eikn$hy&A;>EK@jEq4HSZ)2OKg4p$X(9by!O0MHSFbx&$(RU3qY_0A5C?M3=8% z08gV+B0s)HaGF4#Q|DBdpQu3oQ|DBdnk?yrQ9v(b8tt)!%?fOFF^=0L92;J@OWe}o~SoeZU?2>1e7~Jz@>}RHp+xYCq*_@TD z*X>|??}V_0p^TJw=biQbnMo;kyC$#Vy}I!6^GX%~TRKWj^za4aPnlXgb8bnV{}r#^ zsq$cZEks46yZ!O>zl1a(&mfCH1JU?jtr-Zr!+OX-hyfQEc0%C2QZNv5pk;gk@%^vH z3xpnM*<3*Uz^l~)lK5M878Kcc{a7Gmzl)2!k!OLCc?HWg%+I~ND(3Gytb{GYN46Zp zKidxDjE*e{BYbe?19`ay=)!ZMM!Kkdx^(opDKf>IomVoecVt@N%_D-_dkJ=%GIvfv@Yw}-_Cw{J^#I74n71ge zC@^cBfWgTBhq`MIuHriHE0uQm!*MpY{76EQZLBmjcAd3D9n#RUj%AQA;7u$rNpQ5- z*v7VuwjeilO4^O%)+D8w+k{D&DUR2%Y%2}M3kV_fKnn;EAYcK3#ADh!9VX+XwBEL~ zgo|C22{!U zl8TIj;Qx#N=7;V-Q|`hDg7Fva!xaabFIbIt8C*g`tPdIz0Ebc6V&L1ew?EPZ`J6R?gx!e)##B{qsM5`@1W18Yf3at zT=%|Lqlc?7FRr=&jk(aQ=@>hm^_j93v_B`tn={aV2D=yCgma9K{}l4=!8ztLw&LD? z%rRP{Yxr%h1icfuwgq$1?_r#uLBAashid_N9)D2Ni~DNyB7VCVA9U5|c8GKI@BcFR zKO<-td%#N%qg{u2-QA!GnRWawZo@bnFFXSso&_EB58p(8-1m1edYfLvTzwdiYaaFQ zuF);fk8$pN1N6>j=>d2Jb>di#>?9+u-yUt#<(JW2SYgaq7VA5Ah~BtxUg{FVzzGw z=He4VKI;gspJFs$_$y%Rb}=r-x${{zt{*z{fQ=YD#u#Vp1phed&ER89@b7`AVi)7T z{f~hG*riBvkuFCd({aY5h5jb`%>Yh=U0J#pfh{)@@>#%+b(W1?yAC$`h~Tq`bNi!= z&kIk${|;yAd5=L4y9GTL`tti2y*02;L^{O181s10d4Qu0z3b4ojy^LT@VQ^~(V({= zNcLI#7vOn|uycq$#u;Ykx`)B%A>j`e@ZlX~{?PR_JlvT_YU z7uvWKLoi2yW8~Qt;JH`ai(u!li`^>#ukI0n0}t4WhuOUb_`bbQ+;gB$Z40|+kRJxZ zcRbAPaCxaW_z`-^lyN;%9WwKkqkA6nzRX>MoCzE<@0A}rz0%IZMC>&vP zMo)tF|7us`oK=HsH4F;|uA7K+jnG|iO5zz~wask&n#Uo}W-)(^dtq;K{!#e7`%w`~ zy=l1qF%e5`%+ZW^SJ*6c9YTM>KBlwbCD6-$fjtfSw8y!xV*aRC%U8=iw)2ZYT`%5CSE&ToDMSRN8{c<@8_aB_}oo_N*>q#0|4M8}nzuCQi4Q zFQizOikU;ul{Z(&U1-GkCQ&~c-$%?soe-YR(svO9G=(lb)NSEbp^J#>nEgpz)S-rR zZWi;pABG-3B4VWleZP&m)sXUw3E%fP&O1;KXxoJ?Ow4T@Vz#)99PagsaRu^oj?+%W z-{4$UhXLt9s$+esr*86m?xC8X= z;MhXl6i|(hxT5XO>bL`b5e;XrFG0;z*eU$fARKX@+Ju-I?3HSA=-=3rk_EXtG6cVc z--hl!$#@$7Ghq3MRFA=SQOaK%5YvK_Qf`3n#=R`2d8j!Z$^Uf3I*0pT{3v8TEAfWd z8Kl+=Htb#)SJW_wUva+}>r(AJoE*Cx{-YfgaY2WD171U41G$|0h0KNxS~kZV@Tm6+ zo+HA`cEb>GoX&r!j{#Kqx;D3bykw+ZFbMH9I6)(VNok)rW z(66^M>6_D0m)X0eK9Iv4OZI8Ry1?Ec)pgLjw};KYf@)D);4b_!#(ss%i#p$iZ*M>? zZ*>cuM)0xla8jqusOkN)LVgWCSvbw)zl=Cs_>9o0eJjS_%CQYwn^#t*eFo4e(ez!t6g2vT9Op^B}h! zvGHBdwSHL8HmGh-VZQ6T;OBtAhy&Z`e64yOKBt9Qc{U&~#AgIQ1*&I<7(eGhFB+HX z463U~c`X4s%rlBLDe7QtL|~!-%trf?m^hDG(K{?*l4|ec3=>mn&TXQ8i7ChPHSN2| zjjmLuc+jsK^0hN+4&%7A=7#-kUeB*bUMd_E7}9ZF-ndn*)rOfYtzKv-FmtmNld?A?$8*xpnx76-arKV#c0S zZI4>k35ARX=Jb1$G6u-+{$^<}12%BR7(N*M@Wsnm{}j&&e8%7- z;(2oz_%tQ`7omTP%ltNShqp_NvymfBzQ(`po6yG@A-92X?g$$<9eFxBC}_sC20zR0 zbt2|@H%fa0$jQM?jNc{59mPFTtw;4QUoW~@=ylYTuFSTjy$vH2`& zh2jh`pa1*d@h&!BK>H43+1kg4f7xR~eiN86LN?#qSRa~)ge`RF-rB}wTL-^3w@CXf z;Gw|Rh4bM%+A(P#3Yd!qc^*M*E-2()iuj@J6!ZehmE3axNw0B z?F7SxNwqm-a}*T5 zUx@2|swVNV_OZ8!*Z@^Ps=w$%XByvE?5DjgN&X`E(z*Ooke>_3#JnNtnXe0G(t4^# zTJO_*-5e{3H^xBL4i~>gd&a_c5n=TZvt5k2qRrAC8)S_5{>MesK5m~>OB0WK8IN7? z9p`ST7XK1>xkuCq8f@fZ9TptP^3#u;Y6L5stlj;$wmkuOtF-&`?pBH`^%*6dKFn&8IPWLI+D!{ca`A`guy8C$E!x(*kR<>CW z0i(yVzBP>es{IQ0&}=jp%}0yS^nU?QXy&iqpQgV9+Gs|MBbxEAIHDQ<2D#C!f5*IN z&L!vs&HE3G`A_sgGye-%L9;Kz4rrTbul}Hv3lJ0ZVWxvwsI6?pT5bsOTpN?}KJ1|L zJ+TI?>&?Neem27AgAsvIm-fv$Mtk7v_5o=xgnVW*^P{&>D;3U4``?tqcJjS(@Zk*! zTsXk4c{YiQPGH9#7IDM|uB_2yeu+?DMq5OV_7TU-{rp)5Hyqxwx`zTs9Ph+ z|3b=VT($^!w-vFti)#HrA#;)TRQNvHa*C_nQp}>f&SQH$aIWpj+T=WPT6BlBcDoa_ zKgaOD6zkAvAj?BY&jBR91L(DQfbrc(V|GjXLX;a$Cv`hQds_#jJtxR)?&kFvY*Xly z@)Omb-8?@b4>{wKzKNGTOy5JO)tr;!UPv_h*u5fr&pwxxeJSFQ)-B?gM*DOJ86S11 zk&4nY1`~0%!1w#6V=bnCLRy1k?Hd?kf5*WZyx~dU_NZhh#P^8D_)++WIhy4sLj3b3 z%*2!zQ+qmVNk4-^KQ7^LJL79Pc#af)Hy>Db2BiHYvQ1aAZrKRB!MmE+kD&F}|FGwY z8=!aVl$29x|1C=Lbu;Crz0y7za;o3W^zWgj*H1~hfLzf@ri*o`m!j!Hm&M-&{m+P; zUVtvwBG&lFq*z0~e271zgD#^k>6s1qGS4yDOw^sWEH}-2HYxXQ-$4A^F42bl4em20 zYRuwZfzyE20)u=%8#TBW3YlVx4~Ljc7aoJW-O`>FJx?4+&RdW5Y`i(^TT@W~_zQ$S zf`u4!ztD$;{25SP+$HTvq235iGroEtPdF;Y6xuTw;J&$H?Bj~HHP%hxA4<=P{|K~Zb3SoBhTZBBp9{MMCOndTgki!*+;O)_@tN?&V~ByZ zi_s^=d|LPK;B^UPaL$SN;z1st?>if0GvITLlb=&HqX#iKKX+5tiYi)(72E(#$3(xEbh3Ji>UVIxoACdSuDk^i2j|zAk*Uv z_`h*f%J1|{C6(VBDQ+GW_zw^-9KOdtou1oD`$snD7I-Z`3-%ix*w1)=l>Z$k|Aon_woB=&w<8of69D9qjCa%Z$Pchy$(!UwlCGEYDemSNtg5RPP zS6tf9y!95adDo(Di~6L0UjkY_-)HSX>~U|C z_FaAt{NB#S6>mj9`u8pL40MlFlTsYzd7+bH=}^{ojn8AuQEAOfHDJi}AD~7F4@&++ z^6>q)W?JVT%*O0zfd#8iTK9lfcp|BTdc={!NhyxP=Ei>3M@L_iaQfHz|BfjB8_W?y zPw#+V9QD7)5v_n0q3QnsKGCN95VGOEh8Cf3;h#{O|Cn8K(2ST}yKMvPUqm0Y5Uo&T zdM&kotkGM41iSniyrVg2A!r-u<8Dmyn8(M|A0)?9UkCGc`~>s z_?>*&8h*lfHbBq+6|SKfXgZn++I4RL7uVP7CR%u1tsbGdH`bPP?9=tST7By4@IxF6 z_-&wRH-WDkYD@Mtag5MH+_!OW%Bxv=g&UIe!dH{@BDCU-YI-YPg6^)t?+>64TIt`A zeXzEq^UHqrp2qL>``J14oH;Hi`WiWXJN=}50giD>zMjsk4*a^54itK~Qhi3>$jWEv zOun!qdC$BSb6m^rZBUH!__*~y1Fp%2Xcp-m^M>G`VqHkraPI#!+sC^;*{ANOS)Ijo zYMnJJ`uaKgovy;;^+~;~e>qFfxn50g{cBlS+?2dif0msyzAiPV0wZ>gyza}{IqlS( zBMOW}Df{=llI;_uux7oMwRMsEK&-HJP?zMR@pT1O`1nn4WXC(H@wdIInB%%+pV61H zec}|JJcSMHR2=G5=ry`ANyk#egpm8i3mpnOr{FWyorflFT`Zy`QZ0N|&>E+B>_i|QF>q`9S%+d*Se0fBHUpA-nK5i7ZE+Fw!Rn;hH38x{F09hW;4IN?xie`UP`a?6*B8N_MWMTMPAA;B2}*CGIuC!Xr*LsRryB_Zd}D&7Pn>RYgt~+ z6tC+Q{$=FYd*;tIJ@^E8G=L#$IQMZ}|MUF1vhMHl?+uq3SNsL^w6wNV--Ta?oM=~S zF{<SaE#HsHfARsM}yAMT)K|FaVzZhH2R~N7~`*Ge6`}9 zgFX>{n>dDOR$J}WHJX*?+!%@R^pZcJ0v5ny-COSB8*rJvAr)HwBvkhyhZ-X~9 z4b6WJb9lJ^49?LCxbMFR9?`-V^XqzlOLhI$cPHl~`)SXkb!Yi7zJuc`&_nCrULAdH z2Xu)05t=iAytr2VUF$3VnV-el4UN9oL*Eo!e>>to8hyu*z7yJkb85fI|8^w6Ikn&7 zwEA&Q?aDW@hjC7gzSl}L$8k=r^o>`v3pl4nw29^=oKvH@iKYf#sL{7ziKdQoY9GKk z(KK*Q?OKg}r?NN~@}ga*vG42ZjqnTHze!`?V4Q~ECK{d7m=!o*gLXa6KZw?a^AN2L z=N}?|aIQ6J`i(fJJ|@m*q1}LUnxhrxYtTq`;;H%NyMEmLgShb*M|W@i&@Vr|cgnKg zNa*{S4bbVW>~oN})1PH6YO+^7u`=(4QBvQ#m(VdWHv5-{sryxY8Y4|}Q4{-H(Go+6 zZYkP1U!TeJQ+GNvdzz z*Q%uYPWCM-Y5B^&0!Pc}V^K>>)9m`vyBO|Y+4>xNJ8WaUY1+>`-1@{L3)@yMeqvR7 z>5WhMb7ly%+y@HzGS9B(-5$(zQ}f5VtE2M7cwIER{_0x?tpyX2X>smfDVLK~=6_Bl zR&&PjH+AGpT|xLf*<0pSwom#`*3*Jk!qLJiY*WVGis{Q!%cGO~chwW;Pw1urVYQij zu_{R9+M$ebtC6i`-ei4sF$wX2yG;Fl;|hM9qIGZea_77+J9n;oo>|@A_O!jK4Ozea zxzhWZ@?K@FwAWlksHueCQhcfcAbGee7cnLmU)6y#7h`3ynjbI3+vxIX-&>78r`fLp zw0U^HZxZo7^UBu>m|2k0at==w!(2kAs#lGb;kl_|JW44yH%m^u4wyS(oC<~H>sC>0 zl(CY*Bl_SrbvT;hvA!YeazE`=7Tot!HD&l2= z3)Z}KL!}kjt*RuRfVBaBK(3FdhRVf$bIT+2g3Fe+ho8LDA~qg>0_9~T7rXjsMy5BX{pr#N08;P&FSBOP@?H86 zTlvr2DdN!ske1%xYLR}fJa}5&_-HG_8(rhiTuP6+=Cm$*e052j6rZ?MU%&Y3gsHoe zq?{POVA-mtFg#CTS6QGJd(*2^^LR?f^Q#k-sOAsKVya5X?wpv;()_9RYpkUGe1y*s5loj0TioOENiu_lG*ck z$d|eFk(7yGz4U)tQPs?wFk|A|EamM#NqtEgs%pNJeh5wf$KLtHw3X#|{3JL#-Yy!U zDcUk4c1l}zMJp2B|Nrd^6DI?zGvg*shK|q}2buMf%zIo^$T` zchBd1f9KwdZH#*(_0dT#3Ryq-U>+Zk}s*%|u*2eOxx*z1aDE zj(#qmlWQ!aa{Ro~ar3p6xmJCBj(NIje*>w?Y2h-e&S`FOT)arVF4T|Ca;2?tn!3n; zyQt&8>2v%%=ljL#TG68~O%I;=G+$}jXFoM}KJQwd_&Rs*bIH@KWqtO{rvc0AQPb0vWo6X(G~hWsz2u3;bM>j= z>BNeDyyQvGbL++$`s3M8r&hi0TwF@iR~p<(=W|cHqUl(w>f83tmG`{q%U&&aF7ARy z4$9_p_bq*xSe(ua=t%d>cYTEal-Fp$Y^2^}1oTpdMakaI- z^a`#ERq6ldSYMUP!FgW@;+>ABo-u4*;=1saucKc4xR__33l0)1TF!&zK8C@mw(n_x)=(qy33;KcH$kr0V$nI`;vd^BTuDb!Kb75Ad?C zH+*ecYB|Su!kv|CkIP@zS`*+yYpVg7?!Z_G%STF1` z>_3;XZS zoV-3+L|L#cn6T)CZU3~6xsyjPj!VP*Zr(fY_pgxVBh)Dmn}xN*nqXhS{uOavd7C=}6Vd0Xn0fcoSY@;OXhX`9m*jz~&8wi_5 z8Cxakwo%7*q+2LS_iwaKNY`GH?hTBg7Nk2aNoOGJ0O`CX=^i5N6k(~7uyve|zE98g zOaK0#h5uVD40!I+m&_I~l@Ri!$MwbU-TfelSsGEP;i2T?I8t}^|M74#R~hpUWyjo0 zM04>N{crg5M-;+8zj7PXeKhaBwDSW(mae`V&r`>~GZxDs6m--DGtpQu`7jry*VvHe zP9444LSJM_hR+mw?;@mEj>uD zDduvS93jmfEHtvi`9S>P@FVAm7L?yzSA5|mdE*MbWR$Nc=*vPo&<2{CkTr5M7pGVA z9<%G4$Y=N32Uv%r2ks1~@<})f&PB5!RD`^B?Or_3k`gjOZl*FracAvLzJn6`GqE^( z=qR2eiINt%-Afj)g>z4P^(*VH9m@MlSu%TW-%vAm6JNigM94BYBKF+PrWgmw8e$vSs2X zjU=2lOE_R~5K&!ZFg`dGN1uPc(C)(j9~W8~NhOC6|KR4``|pLj@7&@g&gsivcaBJA z+{_}6hn{5V1>b?l5G4#=On)vPNoF2K28Pr9q^=_UfRmh<1SPV|+Yz`^I5p_}NIJu6 zk*aRg8kg1$K;y3haDXG7NK1^kbH#U5P6b}#Um^cJ!f=uq~lK#3o*2R*{N=&XFA?LoG&}*H&GXy3se+gbbR)+4Wc>a&}I<*Y&#MsO@wta ziDr}62ZyddNv5wOhwJPFUQhAY`RmRwD2vZIj`$@h5||uHqbp0Gv(C8`e{t20^f>=E zABoe6%DL10hn+p4cLE(Bk~{Yv+zb`Rzq1mOh@I2VDD$8ssI5a<;R zLJ|md>%uN$IQ3(j1JV9?cN(*9JUU43I7b&FI@5(quWkZz*GW-$k+U-akW4D>PIm7H zZ*|@e7iPY15%*y~r)!+Ko%~uMG%%b^VLC5HV8j<<*mUm(d&%8Qn7)9V&1;Tkco6n} zF8PRY;D*%~jz$HYL+?hPyfZw2#SD3)5a&d6MTc$;K8(k3j_7?k`b#sI9`Dl0C&_MR z&>Wqa^R;SiD6Wv`4Ds#-M}DWGK0%yQPcgsDAkLDT;rqE5nvx=~7h=2JnzEOHJMAsp zK`MX8dNMwQo~H41%BdFn8e;FK&)}xNjB_{BU9x`_`=#TiJBAN-aghE>&RN;e(TY_p z^*o%T%U}LZ*txr(&Ru$RDT|nR=|)u~tEXdZUdZ-Yy3!PXf9-zSQKhrnR=3$CO2`oP z2`-F&lX@KPYv2pe{peb_Fe4W^{PVi*A$KQ}yN!W^`8Whui%ztEia6%$Jfh$_VIj{S z499a%vACy$oZekzZ%IdqZ&jAq9V2g-gt|1t)B6t^dhaQ^dXPSH+&P3=vxkpK%auTq z{hyPQ*md-%><^JgrM^uNCkn1qjNGOw-A_G>cj?w(2A=0%k^XqHa`uF8nK;+Kwf|jV z7a*G4bw+78{e2^@O8B?2FzLcp-xB=oD$A7SxDQS7lv1aoha^dsL48G;Rs8f=He|V)>b^tcSj!h!%S*q%JegS)coLk5TtPOWb7~B1<42%=xm?RHbc{$XO z`WxRbDeD0&huqG};L*YxL_k|_5D_hPT7Co8LfYIi-XX*6_!z=@_!%5NsnJv#JLY#; zUL6<>S6E->SvhIcB305$k5NyjrcNz+x2ynqH%RDUxq;(@Nc)H#N&7F0R?wb#1Qmo- zk$H}S@kv`o$0e=*`)Ijc$R53@2`*eD&w(X<_3u)cl0BG2-(dGWan{(#8xnM4uOXjkE7=C-+G z_S!K!W#{ctd)%J1r|nsL-d?nq?M)kvVm|`cu524S#-4Ftgv?2EisiFtuH)!^^T0eb zJ(kz1w;HS_tJ!L?Br9NbSwXAMidlJU#2U4xtr=^{TDCT=UF*;~vW~4&i}*agdS9zg z@@YQH7xIODDPPt%;hXeL`KEm{zB%8DZ`HT$+xH##PJE}ndVjOO-Ea9j`~iQjKjqK* z^ZrTytbfkGQao?cgP)a$J_~b3J^WKm#^m=_(r~oZ{}P0Hok)o z@qK)Z&-3H_1V72o^UM4Szrk4Iu;;gtRE{V(HinuDSiQD3icpx5% zN8+*QmFlHtsYPm)EGa03q&_Jo<)v|HLYk3gr8#Lu+K@J-U1?9+mky*O$tyR>ZF0M8 z$sKY?4$D1quiPicE^o-2@|L_U@5sCIp1dy~ z$cOTg>`}Z*z0#_*DeVfUNJ^K|qx33$%7`+dOe#~#yt1sUD67hvvaW0>yULz&q#P?A z)vMO4t*WGIs-=e29yP0uspIOjI-@SAi|VSnrf#X*>b`oQo~Wm4z1E;LY0X-T)~dB> z?HZ>^nxX8LLbp5^hteIpVQ~{1$|Lp(wFrWeN|u6xAlGfKtI$yMx)VWv>EM& zWdx0o(PzYryfI=-7&FGKF=wn8YsR{MEkHq%c^>%~ZXg4t`TkSTx{Q@>AnUu&$M&qKr3KF;M zJEsz-C zyZBx{#b^03M&lg61R5O@5BVeh7(CVs4ML013L-m%kPsGnge-`h6sCk}VMdq*m-E7c zu;|jcDy#|X!iKO3PPc^}VHdO>2t;fU8^u7qfptVnoi3u^qcpVW(#W7GjDb6!q zx5PbhUpy7-r3O&SNgYx^3QIjwLP|+lkUA<&OY_ol38zQWv2-Fe$SpD_OR@$^DW9YA zq&x*aXKN(%R3?f?X;xYkO|g^?B>+Bqm6S5d*ql~olv!mCgi1ieuKeHDOIMI%lj|YYvPqSc^<4SHS6(bzq&iO6m1Ev^M#g8Lt6G zYz)MX`Nn;-pmmASy5ZaOZ9z46!0jH>PKREqpv3R-d;OH-Mt_s5r6GUVAM+=`@;IY+ z0ZO{+U-R!UB|Y+=v0I&4s-}*LPTMo~67+Kg)UMgv_Kv*^YQb!l5PBQI@kd&rp^kp` za9M85RnRGJnp@x&LG2n-QAa;LjMp|sX&73W0Hc)9X?_NTuJP;8%RPRdX{AT-3Y5`C zsAe-rl?09HX0Ol(?Mw(Mm(~SF=Vpn1o`Oq{=mnb&mtnD|Dwz{tb4r|MsyYWs7nruL ziyN-0o{9vlHbPO`q;{9qUZ$%BW=BBn6tr~-+^#cjx1~erR3b8^ww38?ha8Z*KyFsf zLs=&nwX2|%@_8g5%O@_OR6`q-Mx}|ddZwml)P|I>l3?VHDdV7av8bt=%9gUN>@aQJ zS5A~urCx1N+nBO;r~x&|72bA z!03YRCX5t#rrJAdj2Yt~d(xOPrd_`0jRj-TSTdGDI@RI>roB{kTU_0>%rF>FKye+; zr=h#Epnbtyga5k=-}e-(H-Ph2s|~D6@Oyi~c{RT0tyM-fRo`PqHszV~b%5ruuLsJT zXWs7uXm&`Z1XFgs;I-M`QdHkA#x2$0K9}CC%kL7{UH5M@M^Lm@1v*($e=4L8*rJgS3m2(x9dbgwCb%vjXno?GG z_+9XN!c$V4nP*EWwV{xJ}H-L zAhXopJz;!$B9Wx(}w9U>=~k}feO9U70pVuRYKHmU6@$4I1pZbD6|qwsa7z#}D* zGU(7(P}D|{$1&a<(kNe3+8lh@<)W7EY5UBhrF5OReyvxpFH=_zoKY=J=~uv%8oI+&)bVhsmY%rGRVu0l{(2aDl)9-h(pFvX*U?pnw?^oyWCoa@8)F{sD5H(8 zS67&ayKC-YwMl7fVx)1*$ECXJtTsbd50sTMM`?3(bmf2B`x3aC*0%4xcSWTXnH5n8 zr8UoMZ6#wxl%dp73WXFx2$f_$LI|ME;3EWX046w=3iA-^-qJ6t}w-(5O>Q1&s+R zRc0j?WhCm9Q}((#B>E!$;Ch3R;ebim`$m1Grb(4PI33vv$@H{>P}v8|?$=gvzb>Gu z{S?oe0E(K1rh}s9p%=mN%6h6(QUtK+qG)KOf=k)sW-Exiq3GuWtQ4?WRYgq{kJ~^= zq;?8QyWk$6r8<<#+NpE5GB$509`_4ERSGI?6(qV4U6fql4O2dMP`)apEN0%)gBHNV!%Qc%m6N=j8R`hZ=#O1I=#U6>J(A-x#>c-a8${npU^35;seF&$_TX;?J9Gr zd^Xg+((U?#-;MZ;*<>+AQA!!Rdn>c*C(V2S7&ZsJw1vcKFChw&KPjhM70r~>Yt-lb zD(j~X*{7u&%4w49fGZ^P%GBCkLGf?}wF%l3P*9!Qy;yl#RQ^RrQ&g&?DmtoDQELUm z_DX7HDsenq!EJ(qRoy9)F1N~OM{;UyP>I-Y6KlU8B-T$FD`!^yN+&EUtcT)(qZI#} zQi(a{9`mM(P4ABWnl1+yso2lSg zr?Vm-VPA$Qt54@`8Yst24vSy zp0=ES$(tx=xwe2{ox=K8$*o`fY&M{{@EdJqDo=Bxlx#XfL8=a+#{r#k8r4@6_FG)a zTKmP(>aZE7XzsU4TcF_c0r*>$R$n<^s-xsnrV2viw8?;`w5Hk36jp=dVJQ-IB>TafWEzGW@REht*EHZ>y}hGLHd$X z>wM*XU6h=vJ5&0SR%ItFYi_i1>MQ5gpPwv!tHYo4S9j8cR61rCK(H>+&Z)#~rs9&1 zSK{`2J${etD+rdov5Z-;#R-+C$U5}i z0PMaebqQC_wOv4ag8;qJpOKrP_-0*#eNc(rI*M;5=on$TFk4tFYy<~*On45JiHrTk zdE!QJc#p*o;xu6Tb<#BLeXS~6r7~2jRMn6+h!&Xz{CfepgSDV%C8#W%Ff^WdL;3x2NaOK|N7Qk~oi9onQsl*Pr63+-dU@j-p zlN?0OBiE3-$lK%#(vZTa4!}dP)I#bvV4h;?EoDpN^bmR>vx3QCt}!)Oj_t^fW+$;b z*ete~eap7x`fvfhTG3Q<{I%W_zt`~KboJ#AK)MHuXr25LGToY3d>=&`9iT! zCRm7VV8!v`d@)BnBfb!gB~cn7jh7ZmE2JFhnq;b#v|Y8MwUe}Kvq#aJ2E0B3;NBY?;A z2?Me>l}N9m^_UTiE3Yb)d;V0a4xl@cWIovr+|z7oKkQb0x+^__-aucWt1&Fo1Nd?} z^N48(To}a8V)wB3SsplUBxlOwu+l(2p5Fle|0!QvAi?8L6jlm{fpMzC8r>l?Cx8RU z6aNq`Bp+!atnjk*hh(Ak1b@9odtUogt14Hi-ox`SLu`;iL!GCdQ?+3~2hy?hYC4C0PS<8I#*>L{MX+p^$JAzqubL^Y5qA{DR8F^PrNC-|-D$pF0B!#|!&mpKl9)2>N0((OViI4F!&r zPugv?-L(_6bG0Y6McNly-TnNX@U|wfod`@9h>S(9qPNjUXicmW76X-h9y7&T;=kZa zVV#fg7KB8MCAJYK0kuw~51=xid_-DP0n`L=yDtD`F7y;S2~@BKsGcvg1TyA5#)0j` zMnSb+W=%MEZZK%jP3{lQo)3kc%jESyN4$i2;Q0!L#-I+v#r2>B6(SCdA156GE;rY@ zLMlquUI7Os-@jC=PQyDpc(zgCiJl^cz_=43qnt*oW7fcu3$YB~yr#fv;rK>iu=h9( zzG4QE1FTbr>;c{&g}e^V-JTjk&7xANEz~i<+B0e&eTcq5|4FxDL}n-x!z^SrgTKBB znykmxV~sd7t_|nJ^?(&baPv8DegHp#pU>4k6c&XUIpS9(Xw?U=uT}4d#S- zVk3azR{*!?V3#qOOCI5B!i;E15Wpr4$)+Su{zA?oSCA*kBH)jjlr<$%9#kMTmI|T6 z=vj0sy@fso>Eao^k2wVV@eJ0|1Tug->kljv&yMG&aLc%D+#&8VWNkHnf^ECxd za2F;B^My6SLGa*Z;JYovmSRJxsl-V=p`ymZ-lR$)+A!@@?Pl#YZJAbus8t29mPQDQ zbU=QA)HxSi>ki}?vJX9kUITXch}OewVO2)B8QunW!aea3;1yTkTk#zHGJb+MM?50b zq#4-~RG=r>kZMYCR8LsTWMK78z~>jJd(>6>HvN{a#WZCo@KXUyYu1tVWGAz~L47@9 z)tm*_4qT8QxSy%qVs0iskN=H73~Rav2&pAp1yno|Y69Q?BF++5h$qD&V85D>1VnHb zfv_rhMYFW2+AXj<7iH#CsoJY$`cwAk0`e!)1{KkvXbid#-3*F;6VzJ|tM_?LJ@CPx zp!0EWVgNCLm`|)B4uay90bVR%U7yx80QM&iD(}-C?V!(sZ~OqB(FWEfuc;dwz=py8 zB(mE+?@mZHf%j@w_tMozBmLjW={YXAi3~Mq+9RNd~=wQ?v8-PuOirbBy#hzjE znmWLW;_y}A;IHFv@H&J8U}*#~lUPmU62-)yM14{u`;%iq#deVw$R}iVsvW5D5YXau zz*P|_m=(>_ePE>vAnl$4cU+xm0cqBonZ~SP4l-Am4@_g0VtcS-*oEw7_II|7t--bA zwA>)r!9m zO0%VOP_r9S9l)KNc93?3cE9$L_K9+zP%YEGs-;q?7n!{`XH@lYBRl=!OR9`Kdc#Jomg)+nJr@fU`@fd2Y@f%1wPye zva=s#l`!BtNJO?xWHXTdJOEknduaWSacDe)%oq*`h&jRqII6Ws45>9JHz z+ek~mt_Oh2od&!9TDixfRuQo0)=IAQ29=wItW@00U0Agd+6MSD4IFA7)bk7A{^sC3 z`+}E=!{%a}v5%O1QsadO;Nw8=)`2U$j+fzvL@fd%rh>ye3V!Y`@s6lRwgKNZfE)=~ zYZ18{DgjaM4u()usAbe<>NxnBSCl#3kml&w^iD`ig>)IXfhM4VZp;v7Jd+46=OAOo zGOPz^{YZ8?yPDn2=CP024{QUj8RyI`h-ltR0qLFg28K6v|G=w-AO`14?J<1?{U z*nU9H156LE3(lIweL#Wa%&Q_y2^&z)UXW?06HCDpXA!puN3tKJ)HrfJxt=^iUM9=Q z8k7~qP)^_%b|@#lhIC!JE!~Osp(E&dz!aHuE`5)t8Gp!-bC{*f7Um>#i>Y8N*a6%` z(A76wFr>8?yqPdrSSA>WEyP5ql%^6d{U#lgNNrDTw)Tcrm7tdYS4oHuasq0=3#$G$ z`VJk6#bd9qTENQ5cnzWz?Cmaa?m3|M`LM^giF?3gFJY(C$W4%>55RLi0ZIA_d5gRY zj{F6gOsxgyy#rj_Vc6|+)Me@>`0vNy3+mFAv<=;cMj<(BWN7Civgsz!6d4j%Km**(7!qyN=z&?qm-@ z4!+BlvM<05t2sl^>3Wsded{qHx$URWRm%=+CP242z z6c30;#1r5#uZXwAyJ9IQO0u+8%8+(Q`=!Hx%5&g3Z%QSA%|zg!H0?&HvVE`*ziZC| zAKlOvYgH*~Rh(MB2WpDcK^h~?k=6);(1?U|1iX485y&JY4w;QCK$e20t_SzM3vy@< z)MFWB&v%FhFl>(2N1K2nw?}bEpMFrE!_ZOac*vmB(O=O;kVDsiVjV_Lft$Sw+4DZy z4jYV($EISlu=!XLRPHm3#Jk|VaBtin9|GFE4&MjLd=k%x1alk5iE+d%$ac$t`8PnS zdjNG{K$?KAH6ojlW5{e^|C?k9`4}AU8xo~lsP0s6Kz|@LlsZUhpaPoGEoleHUp!Ru z1*qb3`Zc&M1I7fDyf0{ZFcZd%g`MBU9B0lmSD8ZQKJ$d>!Y%=Imv{LHdjhzi4L6>f z1KPd<_;Mq+oipKkgEEZb$MezrbpBW1fvZ(^mKM5!yX-Fv6hgomrwa#!T;a5ELAWLq zfmgE^DNzJYbrpMwsywyQ`}YdmPh)|F8t@lJ3yH!C@Ic!@UoQxEgg3y!K4OHJ3|TQ3 zcKoLJMr5Q&X|MEDYOC$99ixraF4At&stR?!PKDG3?(hM=eTe*ld_eTjdT3MB4s`^N zHv)3gQsqAN@90_38xL$4utseh#RKs@;D=s8W(*^q5*oMtIG5v4hzN$V%m`26S`|DCrgM3D*SNsx2tz z9^pJxus*m|FL9{2P|Oq`iS?weQV{sMxl*>I)>;Estkh;gK2obP6u)o^DFptjf!0Pz z$iF>6D?`u(SV1Ow1U-S?K<}WBAql@lHJBk*2b>-atQY~cwFS$A+S1?>?uSo@RD2HC zBkB^Cz=v%JU*N+uNW{6sY2pHLjWB^)A|P9ICHs(ppd`nkn%1CvJ==9pPAw@`<#8l zs<`T$14nTp=fb%HZ^m*nxdd(rm&G09&H!s%2X(2gCoB-E+Mff086HUcBVq>MkyhVS|?zy1dE5tOY zpzGot@gZo^Ytc$-DLF_4u=Fr#6y)$|X*wv=ZYfJTDxH$vNgAz@)?8a(D{4Dw-L+m& zX|uHpv`ZmHtk<5=Ue?~!mS`UAePSgS$dC&c@f^r|@D>1RKH?QtoVUwI`r5 zO`tA2kfTY8>COZ)K7hfg;Lmn}+pYi%wNu>be8{4C!ds!1Xa`vH6bFkNq%G13;KYZ3 z86&Nkwzbw#E4x(LdCvr&x&gfR3GF%ULpht9YZMJOLd;-2j!0Le2ND8knh7cW7c>#G zrXA_-bR->3ucFt{`+!wS=tq#M)xaqt(}D4V=QWy{#Vlf0G3%HNNIawPG>0I~xf;{0RFD(t10t6W5oU2CL5IO@&%QU7?|1 zB{UOS2(1MN0ToE7eNk`%)$9s+v8T`*+^U}tBrFmB6l0`m;IQVxl~^P#l~%y@SSxLS zs@^8;lJ-GmAC_{V4$esVknlfB_K+VnsyT2yoY0PFSJWNt3H;&>Iu?Kq0-gy)N5b_P zk4}Q*H4Pm4T<~5==zh3T#c-v1f^XT5-NKCV?nE`RBd~`%B*@;RH?U>^Ifx7)LxD@i zkmJcoaE+#sGs(H&xfYS<$l25h>I{`nU81gnS1F?IQV*#z>KXXdH`E78O&icgv?*N+ z)T<%wMq7b@_h$S+s|GP4z_cToG2mJ!F)@&3W-@b``A}&~!NaC9YncLO8he<{1ue=P_aqiXQ%Nq`MLajei6Tv zUjdj}3%s)l62UHhAD;zz_8q?;wCIDN77aurV3=BBU9q8PB{l=U*IINCQIQl`P&OyA zBjlo3QIsZ0e|JArN?*4|Wuj533=ku5B(*@r8X{ImGo%He&;b;UL|8;boWPxQMck2| zNN;2Y;ty)J0!>BNq8rdn=r-`h`_L@#8@ZqjXV84~61a|=Xc2lBeTX&#HE_Z^;$3lf zyeHlp9ETsIi$Qn@)b~hy45X|{cnm%bpNY?fEd4t^1>Evs(1;Vn8E~SPpyqE9MUXij zf**ZGydvHZ9|$#RKpK&zWP8$#>Popo%I-~h0|o-9L6AT~sgcwe(2hw|3^k3KNzJ9^ zQ;VpjR0?&0S_kQD59G7o!HXDyBdx{MWg0S8Of$%-tr-Wv3CXa)Cr;o_yE5)fPpEse0CAL6x1b^UCV9&h1tgLV)wCG-|c%nNG-L%dp6{)_-1?yzBTW_qmXD>UgVwl zj(k^8nV#Tdyg_LKKxdr5b)*Vwg$=?c@E*H_eL@yEkX%reGvGik30H-i;9u?v55b4F z6iMeOoeo}xmND7fcrIAvS)CxRks&*~#>?Z9t z?JjVlS=z(eT;SX@+I;Y%S3ymRw0D7j%d~Hm^8$@>uA_!JXa;-l1paeB@(gK++M*)h zv@hUuDtZs{atlbxD!dzh7V_Z}{0*qQoDosNnHWF}AwpsQj7S?&AiF{8nMN)o>r%hK zj!XsQJ*NoB0}*s0y_(K~eP{`3U@#K}NqG-r!nR?1vlm!n;G#j?4DKNK7@F@4SIZxg z&QkDtrTm|e{n`mbglR$&T(=F7{oFulCW1q|F1`|LOR-SvS3qwBZ7=O)Z2~Bads;4hAnVAO+ae&EZ9x-~X zJ?qJif;6*$JdmNDL(W-yj>9qgJSddP2gB0d?7eene|y2-Xer#KN&@SPq8ZJZQ=qd^7$4 z6dr?hP9jzjSBMfoRWs5aTv!CTfLsH4?>T8m)uCEY0yz8W)BjVcM=H9;8QEmv?xqu?s_Li>RO8UYz;F1iuC zY!+svKd9&%4PM}8nb znqSXv=TGqY{B_74=7NpD3H^jXVZ0EdB*-Je4S^OrgWH}d9u{j#81U`|DH^i4Mm~~L z*&;5;7$hAzi`;}v_ZCs1)ld`g+6_<#)EN}92Pok%^a3O|L#z?D4m*$C2DBOAO~9og z_$Yh=z6o+eE?$UN;C_%C+L6NmW&2?TXUR+C1M&~@J*lA#shY5djj85TYe*3^B~cxr zihBc3hESuZalnt$ff-Y&ZIIwk!rsdF&T7%^=!vkqOX)Rq2JOlCGC|;{HZa?vlJg*^ z8M3w5Mywr+!u9cCL)a;-D>sZA!~M#oE1u~LSIk-QEqR*n!1n;$_UDK2v-k!468<+x zBRTwK{vrRI*9e9}O-SL51Z$zCU=O~U5hTF{@|%ayNAQIN7c7hrB7_OTbb$aLy+U-B zZb@a*3)t^gu-i+3#guy*DY|`bf%FBRcmUjPQ}7Z~L67dEwovtxur8o9v3Lf40vO7g z5D6bhwu!_x;w^Zg}7szemZg56C z&U^3^_?^53Aagpf^b3I##{=gWNY5oRtv&eK!P=?d(of3w61QsPV>nea!~+;qjkX1U za~o}lQP?P8oF|wyJ`|qQL%cEU-8v$NCg z!S80#`E(I@xdy;5b_@Z2SKga&CV@#}Qo!$?1{YGus8~bR7TAVh+p}HRU^bkc!zQxH zY&yFY@^k_FQt9=P`(hccz0%`8T

lf&DwmorXLlPj(rC+qLHjUf{dHs>At6K9Nu6 z(;yG+81OKbh&>>_AL5S$Lk@}puFpnJBNq|5cSi1X^9N>( zLgSPGazKs0>8gv6V&ypwdyK$>u}CZqo1^rz-M~t)2A~K8F5uyKG(HD*YA>FR7XaGI zmCiJ|FU_7{i1tK3!k-8yB8g}s8Io-Vv6sju3Y7Va3PMF%lD5F947ikjN*8w|8BHdW zX=Dau`)sm6>Ef;+Rg@)VOW9Kl)t>4{`BUNG2jzK6=TWRGWHCkbdkw36_HFdlgy++jqGKzl`1J` zDxgv1>|Q-&Q?iDav!HZXqH?r$y^$jq0BDH zvw-EGUn<^`w}q-<`1X81(CctzGAUV^4&2LUD|J&2X;CFuDxD;Za)K*&l0-u7$Q>jZ z!d@X;C=d#vb}9swXertP`!ixc$kM~1c9O)s;!&{#Qna~bSy?sxB>&24`BW*VrHj%H zrC+N;YM`}NDn`)CbB*EP29mUCP&KEaZf?kRlc!O+Xyh3pbEE-cjo3ksW`LW6kl{-G z#32b#Lwli$>jUvGtrYLNbsdW45xX2o+$xZZvYKj(ThzU?nX;4oWfr<1b6Yv#s7k#AC z`L`F?rbOu|GSr$Yvud_lM%!MQagpa^W%fx^PS-26szQxwD{y@ss2#VT^x!5TJE4B$ z&Y`N4XJ0z`1Yo9MrH3y9jZ`}2+-+RRh>#C2Hk^v6 zGOJQ``cv0`xAm(z?XPFb9#EyqY^dB-tD-E^vMK0I0J>y@Y0wpQuT*Uhf| zJ6-z!YK~Rz-TyR!69xV(?uS#YU#8A=(>ZycJBI(xgpNIjfbufRq>el(=k|F5_V4x= ze?80dcP82ER8G&@D&5E5&l7lohxAouYQN06M=9OOGrpQK`1?I`70MjKr@u~7r}FPp z*#2aG$P2uwZ{^JV-<{IU{K+JP?yp?@XnOZ&(+wWV44|(v<)E9x*UdWUdZ<7BRSdb8 zI_-yZ4}Wj!*Yc-R5#LV~emx!WWxD_86Ho<}a}&DR;g`w;;-?PnzxNjaem)_UpiCGi zD{~j=%Cyv{S!3N_?8vK}IWGKm8l%0Q%FyupnZ|#7z&}3VA0P0K5BSFi{Nn@u@d5t> ze1OM4KHwi8@Q)Ap#|Ql51OD*=|M-Cau|B|GPi_VOAM68U<(E}oR(x6QWu=!@URHQn z-DPD@QSM6aRMcEna#_V?1((%ZR&H6fWyNl<+(C)}72XPps}DNr1uB^aDp*cfgNDU{ zDwU9RKvTj&DK1hb!1cbs-zmV;rE~*e)+FFa6JWj>z+e?D0_>6joM8ueKhB!~4rc(4 z^Z^x-Q0Wgu0qRix`y3UL0M(EIyQ~V=Qzfe8`?>b8PbqMf4dMDFz}jtLRcWvS75Irc z@cb%p576#(&|zy()O1iqb5J^Yj;)kJKwVNm9SC6jB;fG(j6E=LBrt3qYYn`Y4lGs& z*l9R$&JErg*dR|Z1e8Vr<{pR*02u`m0#%v-H7EBX>8T=B^8F@XsFDn*0c+UJAlRoA z@I>vw(IkU6F$WJ}2ztL2)Y%U7^8w`#$|XOCexM)-{B8y~HIM7Z+X1VT3YPHu!r>~M z1{~V~I->z|B~k-GOgP{mTb`ax&{Jj{5UAEDxEAHgqs~P#ncTo@0)7PjCRETdiT7{tB{H(sUr>2c(zW-#O(#GMfH@zu7cB?F1Sdn|zWflH()|jJy zA5vmXV~|*bk4UWE{S-ZoTBEVRRVwwJXOk9aYId!dB0p*M9r{=m?}xNpGd;E)kRBEm+X=15KXlhrM%_Zk!w8a5&bZGu?JpE0(m z`}y_#tUH8_3J=r0tT<|iw3NTDXVJ9k*MkOGdxi`d0gr0!(ZR_YLr@fHQr{HbV&ks)uhGK0}E3Ycw5|P=HedR zq0_<>M^Y!|g!LQN4>KEf;LZ6vYU1cqv0*PYH>PHbm*P=+LIZK<@6E)7-#VvGGS0eQ zW7+dXsaC~p7I(C7bSSjdv5O}g(2JLR%&qV9{F&R~9i!*8{j=iH=GpDCPc1o^XK=5- z?|~Z=FI{Qhqn2Y~vc>(6DFumL{$mDvWtSoe*^ z=z*NkcUs}#&VQ4+1j{L0#7N!`EU?|;|pm2mlbtS#q_P6>p-F#PQ z`pGexMTnRD4Ndjk5m%&ZO6Qc0@g0IABEmUG$8qDvwF~|9Yug2eg*t{03keG!?HD*} zxFb9tsA8yLC||jH2gq+65ZVD@+QHY6fKO{ytMz*$o=DG6A0wLhj(_fNo@R_B7t*Rk9y3p6L=uoKIonk2Cm&CKpL(e6E|SM2O^Au6nK@7oKO zCYFr!2o9~et4XWPv0HlIUD9wt{N`5X{(l7IzFQeFW=X@X-Jeeyo|fo2F5vpxky)kQ zMWy>|z3K2iJ!;xx*YFN2S6Y_cWuKJ?KAm7b$;Lgdcsp6Y{IyTy;NdI&?0GUbe*~p| zvB1K?W@BCVJ--~>^u&I+&!vbhecIhzkV!_i9``=CBAdXTlL%- z!jwF_p0YDFs=PehyIHsNv0b#@E9)(2JkoHyN8X(6!GrW(XCJD=-1Cd(?(AQA=0kQx z!E48LjZEHe&mAky(4KUDGjD!@<=~@1$+M=mh)ukLJzKafblLE?r1jWWC&IR7wf}Im zdijcCr;$USPD!@ghm_unD_oQO=!vx1c<&+q9euXiSJd(diZSjKSW?CXo@X0%Uirj( z?z8dn{^>y{CU0AXWe&xD-Lv|%h0w8Q3&-jG`wpAhIw~-=&&vot^!EMru4ac`m2?hn z(!1hnk8N0!4@C_^N87lUO?cJnE}n3!iP`dQ>i$%h1N(+=NkC$&PXHMhsFMKWfSL*C z6}{5?mic35{I^gmh9Ej>ZT;ozy~4uao1nf!1}m?VWbHI2A~k`guxIJ z0=g^$l=2~p$RGa4MJiVFy&Py{InaO{0OmH9za3?6*=|$8h~Hw(-HH9rclT?y%DHiy zVGn&ge%o7(X{6tE|CAh)CO7F}$7&bUC})l=t+w?zdr4gfb-pmubbQeC$%+29!?&#L zn*1>Mm-Dxmdu}(j&)ITyL)*<0jJ8}^?0wR|k-@{kV;^9>TGn>FztPa++zyvLeg#L{ z>5bVK{NhyT3(hyS-pfw=3z?vd5kX|+x|BdOhYRf&Rutc=ZhFag{MxRq@0%V>u^4wS zQGEKYxUG+QQ};f0t0#;qtj+D|`b$AsS%>*kuTI=PF~0FtY1$+mVj5{s- zE;MtMdL7->S8J8tMSF&uJ_qgvxhca zGaKx6iRxH#IA_#^!cq5aZy)NkH23Kd%f8pA&MkHCimc!G>+RD1tG2wqxpnZJ!%Jc& zmR&A$z2CLn9C~zBg-N4$49=!WX=0-Vi8Z@E9zB&foiyp229nH6yNgzHtnFA$4dZf6gxW;#_R3T8S?^Q_A zxbRV7L1O}SN(8tC)B;#PE71T1AAn(s*!@yP?0P|b?_Ndhod1>BeNno9@%Rx)%qscG zTkFRxL1Ge-nE9VCQae2)W-6ln^y@V0diY;yYfxC=XjoZDXuzoPf#IXu1xJJ;?LR+& z1|gbY)>c(saA$z>0y@K$m!}yIAB|Q;C@%vuNO|cR`DK0DSzG-`eTO`cUz=3uHNH{1 z3k4BFnlG=hxK>f%f~C%jC!HT}l5lv?fOhs$Mb4=F(5ZhO)IKmi$sg*xVa>w$-w zlWUjw4T?*cG`o{W?*fwrQRf@ETD}mRXZOn6`e9hHRJ~p6<@dzKYcKAy8kfl4c^GuU zMU0$a^TJ}%x`c?Sb6=insp-__$Sm^%X&VeomX`&;4Q{t6rH!`DuzpX7Grx^?M?6+8b0` zr*2~>#cD0w2iM5n;r^;<-W1EQx|2GtAA7fbn?Y-G{CW+H&#?-mmc-w__M+l>z0_r` ziq5QElJ~?f(5bk8_2lVNwQ<$*t8E+8w9dhRfL-M`jy2Xlc-!gsns!fa4RS19@<*!g z;sVv>)J|F6FP5w|a_wTiG`eY?s&($RWt-mNA~I7rDL}9iREMa8%K8;_F??0 z! Lr?S?ZDBgW;|v;GHv);R8xHXgiCOfY7OtNrz$(EqC1$fsK$V4Tq=%YR4XN-< zzI;>zt%+3qYF)(f%Ma?K%^{U3dg%5S^*4tvP{hKc4)0%Q+i_X+5TRM@8|8q@u z#JS3ko1g@OrPPV8{r&4er8bIg@H0$*qs02LYE_FrynAkMxXs7n&5nK1K?b$VUpyP# zd2vF+MftaKIfq5RrJ~DcUR|C!cF^Q?Zr8``-(T3`R^9p~vlAaCx`xvqa&p9J{s&7& z&6py_1RYJNXQxUy6aQfRo+S&-*Sy@~aAgV0eaTWm~O5M6>F(V^U*l|B1xbe*5|2 z>glP`*xHZabN9YFo2Y#SK6OE(*8Bc!B3A8Pt;VO#G*|1V#Hzc%wRM3nw1;Z0eCRlD zV?Ff`4kzXYr_Q}UqfPVH7n>XSwX9W^(%m9zeZRA$aO}R*CG7A_O*Q|E35R?W8dyNK z2df8LkE1L~wnG`z2hoq#s9&bU6r{ymLSoMUw_wHrX{!sBwqL&9b=08Y0V9H{hD*1Q z(2$5hL1;_FR`=~zUh)f{j<)vl^tA5i*_~r3mrf3ZvvUUr7RO1n1!ALvqUD!|m0zgP z!80NtG~8N#Rnw5bK`F5{-H}-RH%P2rQI(?^>FmjIy>ed@Z53cj-x~C&|CS3;xiUQq zs~yMvDbOjyF7LM<*ltAE>3-sKmTg~d-v*F#_wV#oip9(&?*GYO)}|A zZo!|le0fF_Ueeaq>6S#c2p20W^pUG}Fa;Khn5xpuXy90CVkM>h81T?Xr+EzTfaC2Ky#;Ce) zBL=wkPo3v?u{1TJ#ogy8yU$Y7*Ua#yen|zFE=2uG7Wc~YeClU+C!s8+<7GFy$wl!w z7IpI4>dgwA>SO%YtG6#fkKI-6*uK41QCmBi*Ds#f;dx5&67#abMW2jrW4I#qtJvCw zJ}*{O9LO8!FsyvnZm*iV<7a%B_?0`o6v+9`LK9wem>AWmaV{S0GcB|?(nse`dm!$J zYf9IY&Y#an{+>I9{e#_uf|Z>kqMVVSiZk`8;!Nd3)^1gtsr2`p=}#W}+w&I--N}~; zV*L5E%kVo}D4!SZcvEYV&#L$Z`|8eH-{5Y+kuiNY*!pgcQgyw&daco*A}(GP-lV)N zd9+KrhSyp$q31k2Z(}`=Y^tS-ZoG z-!ysc)lRS1N_&^SwioTAtPd27iJLoc&HR&A)s8qkEgZ8hC-up$^|@?*U~roL$hNKW zZCoeK-IxAo{ehHI6?t|~#YbkHV@%>!FB;nT?$tr2IddnU%6jGGz)iS5)-UUNv$;0* z0~aq4d;|R(9vf+QYf<{vk(xVe``trgtBnK#^!{`xVHDFwIh4@+UwbH_5Fjf5=^vmx zoFWwhBy}GW@`wLndj2b>+F)gxIj+_$&bsuy#gbQVr-UR9i|{hPwANS^JMCRrRIsc4 zfZUwB0Y}E}kp{QzUtZUY-<~@4VIp)kETYs#df2ZgDI>%#iO(Ft@%pVZrwe8$Ow*`lfuJg+e zuoz)?tkIkJ=mWO{-gi3O%F1>~?NN=I&D!D`G}B|?Hop(!nu_Ip$7E#9a&~XuGv6$^ znlxm}tHIfNi(Mu^&2#-Q;_kAO+tQy;Xk(0)U+Z3N{_Yuu>|g)# zMo?Co`mnXp!pJl4GqX>)`2A8ZS#8jxx%(%kg3<%a`>!JT52@+DB$8kHqtB^%4_5mQ z49*u56Z&5{yR5;{|5MWY9GNSLiE_a3|0>e^$zwwlPf?<{yi5o+F$<8Gd0(aR7(}d+ z#?hbtHV@aXx+vbk!_zTn(BObE!z2Di7Weoni+ldvEMEQIjGm@-Y4PEsEb{mJRdjYuL7DtV`{8 z`#k5Ie(7I(*4WeS+dD14;amTs@0(xhlS#IHTqB-3n;+kDXv(oE&*@1%TFd84$9J3G zOSOeDtN+Sze9WM$8+HvV`Tgu4Cnu=FToD)B=EAu$y|s(ydAH8o{XxGdd==KM)ra0& z9RJ*X_gDPDzyZsy_ig|1+1aIo6XtvzI&jL(+-^1Ix4graHtaSsaK*%Cp*hKGRts;Z zOjl`J&`ER_~dD#Fxj`8ZVAd%UaoJv%d4xy}P29H)pJOwYVEp`ugRe$${Q2 z%qFyB`;J{b^0#WU%x2cxAF#gGsbvKjQ%{#L`@9CteNOk)@T0Lzc_j*w0$OjeaT{gbHHEMFeu&{9(Vot3$%3u$a9y?VKyK~eTtCxGH zshdQOWb>sQ&1UagwFKAzG2n%&-Q8?GD5)GxYd2|@))vkPHS=- z?b&+E!qW*CQ(~qfG0|z0|HHyn1*irFDvg1vBq`@0(!7D$r(g)*gW!nI=TDH8pFqX| z%Y06u`fAh>D9X6<0c1)yhBq2b%`y$paCPP1wOn-Hu&k~me*;zJ$2IaP%IDJzIo4`Y zj1fcm$MmYFTBm;cw>m!Mo2RT76Qhcmar98(7Sl#g?wl$*Ymv};)UMYP@~4b!jIU@n zKl}9IHtIX?f(4N zpkbSe^RH~)7~V@p{8f4e*RGXkbH$>djy~M>7b`zUCe*d}XvcAe2HoUY*{v&L7%&jBBraSx3wkh_0o{;7C>b3gV+MAkR3=Pg$ zMp=0Ol03hW$3DZ4y}G>%w;Vkyu+h+Z@Aq1*7%+ZLzx)fU13i0{_q}wUN!si&ZqP6f zKL29JH*4+1qmE|gs`SazKkVur;<~8w`AxQL-Bs_NZmd`BVDkdMM-802J+$Lqx1XQT z?ot13Lq6Isx9w!IX@Zme;6=tk&Wp!gj7aU~_{?HwaimK1zW`860|XQR000O8a$gBk zJIfN*r>Ot{kSPHG5C9wic4KmNZggpFWid1`E@W(MRa6N81Bd(&VI}<#VRd*5009K{ z0RR956aWAK?EMK`6Ib^*j88~_h|xp^MMaH@ON&b&>|xiSR?#2=Qbj}-0TC$7xU`~R zpfyfYZEIWGr;FAucDJpMwXL;5ltrzgwN|UGsI;9lsHjv?sd>-2GXnwfdA|SO|NZ@c z|Ihn*(aGHP+;h)8_uO;OT_*1F&6Jd)C>cCnKh|TA2)9S#<}5J zTHacBcZc{qrM&m{8}NSFHFw7r5_WILSMdJ9?atfJlK1_$HyEQQPf@cZ&Qxy5)x~01BQ+liuF z;N?c~Z=O&RLc9ULOMs41(Ik_K&%(d?*ES%QdoPOGr;7phTmlS7!o77d!(~7#)Q@X7DLP&^pM;cHHc;9^> zUCe)|;{W^izwl>x6q>jUey9d?S$v?;cTIqwhKbnxlOSvx{7^0EvS^SWd@&4vcHz%r zKlo`3f}f}m_$dm-2p#;WBOqHD{75vB@UuAzWA#|tJoqsrBB&$`n~&*IFdP0@@TUcT z43A?$__GUtEcnxaKaKbkm5JH#XBYl7;*UBPel)oli$4bZS&Tos@TVmYi(H0Y{ITR? zHvFNMW8NpQH1bmnKbjJB;g6*RiGx3l=vC7Q8Gp)%i@_xL(}+JU_@i2Z0Px3vKZ{o) zl22mcPay#O(cq5(e-`6U8U93lf$f1mi}9xje|8a99sF4ErxAasdQ68us^1WC{L$b~ z6#i_xfZ6az)rdgx$8s4_!=J_`XbLsRHVF}MjYP^>v3XD~6xlE_{!m*~MD7vk2Q8$s zcmuplN(CfVB^E3zC|p^f&RUg`RYd0&7C;*Ln9lem>6u;P<~hV|b8t61xT7;NvPw$S zD{>3-(=nTRqPn0^J$K%`8EOa4;~Gn_c8*r0Dki+ zEre4Kz~lc=EXU@TMCTK!*(EHtI4jH1KP`(+O3!Dyge8b!5+Q6xK`xz}o}c?<)|6s! zWu)gPWMyDO=vR?^I|orJIlOnn+ZEpX1b-d8VOXP1!5c+)qwsEqH;V9fAztoDsc?u; zdGdZQ;XU4yho|x6^M%2?H@v3{@pC=-QFI=>ap+hmq|fvuqYjk=Z^S=ecozxbwBTPQ zyw?ctGEW}wX9fReA^jG?zg2j@E`)Cr-tP$So$$t?>|?>dOL*@V-g|}o`vkv5@E;WX zb%Ot>;BOG#r-b(zA^yDZZWP{4!kZJ`&BD7yc()38wF&QbA)Fe_zh%N(KA5MgyYN;I z=IJ?pFi#hMc)P=Umf)Wc@BT<%czYq;;5`iMFZhoN{$|0i7(%It8Uybk2nXJ3Y!`Ts zKzhS_EYeAcKLu|k)^7-pHwSN2DO!YZYA6rSdnnI86Nd752gAEB^2tyhPUcX){0exF zLHyv2I_4?C-wtmdtfv>BeySIbR}Q>KVZGoz9`S-Vj)qO}9*X4;?onzoys<6A@PYiO zU_`)s0=DZT6tx!Ki?FX_9>}x?9FIc04?Gj$nGDYu%ml zVbSV`5Na`#NrM7FGsOj@ITq`uQ;Tu86fgA@~Xhk=%f}GE-%PcBX5-C zVv8^CBnF?#5ufgH3sN1Mm0G+!y&!!ls5Sqs{ko-lXgfN#LpLgQf%hP-ADVixpe(^W zUQ`r{)!C2)>nFyg!z448cEn3a;mDg0N2ojh{Rh;0pxJ+b+oiwKIYc)Ux~(XG)T2h> zaM0<`C`?OVnVw5KgoTAiQ0jRg2h>xuN-~Obiv(oUF^KP!{B%%YJ7a(v>hWFTqKk{u zS3?HDJ3SY`7H8zF?gkFx3zruW9-6Zx52~l01^A0?mR?X;uzGnRQ=$f%XO$EIXJPo1 zq)>u?D9pc5O9m}EYhqD)F{A-POa%GWLm>Pu%__(OxjhlZ8wM=y7BCL!2SxJ!2Z11r zG5Oh4WMtYDbWfTziJF}_YZeKIIaz191VER~aKeeJTH zDxg4tdejtP=|>FS4ZIV(Rux2Pny1P12`$PDl|!66W4V*Jz)!ZV5(Dt&noH9^3U5^`9u_akPsFovJBqRxP21p~79 z_6aHcbKA^K6-Lhc=p0gqsLD+B!j=fv4FXU|EVO^MVaEP(yjfnbXn zak89I!i+;&Uh$w*a3q=>Y$+R^ax;F7=6)?-yBpl#_V=O*nF>h{YXgUg& zFy391TYzdB>PL?(ps1&^0gY087xL{7KSg6f@BNK`3}3*!zK&AugD}-^{Id(f4E3OU zj-yl>@Nc_7sTM<6Bcz)*t}|T}gcU*9HVBKte2}gP!l-)D>iOUQBn}Qq#~*sT1a$x7 zbNuJ{{}BEElRruw8A;ksGpdsw=tk^hg4@XqY13foeDHZ1=Q3XpSB(G?z#`KTF zV*n-F;KipG^zf&qBr5;${Je0lipK#h;LGsMNV>GIXMaj1 zc@V_^@}JB_@;~OE?tevq_bKmAdLv#<;BOYfFkXza+i|snx1l@c!9N3}^BU6`hIF3* z{@^j4urQ6J2Uw2z@UH>V<#dC!3j7rguo@l$E2kT*U65{fH&_S3-{1gi*F#|K?gr~w zq{Z0IFyucK%7(FB;bA(b9^g6VCG%DA&lAF$A42D_v0ZVHNhGR@v0dx41^n9_U}2h` z>Qm7TmKUUJ=mx71{LK!qydDCpp&KlJNar=KTl<4Qcw8qe|A)YG%!hvlNSD(M)++E< zIKVPI1XfNrSVfR-cQ;rE!QbEjtLP!Hc6Wod0n$<9ySAYU{GLJ>iSGdp=E3CxTohn0 zQ&eCRZ$O@Mg`$?jGdE#s!kjF9)|rX6*&lqpt(P^&c_zZn3|*M6@m-j#W{!$AjGxOIyytRgWHTO=r$Q1rJI`BXa@vcsON0xBLyRiXkas@^ z_*9i?CnVaAW19A$!93MlmPrp#geA@YUWSTXhc8I+t@5e zlDFF#6=&#=MatMz1*@0ejv{nbR&Iu0lfI4Bt4#WKv%bwV#hootuusWJ)pM|xiWSB+ zo1ktLxWby0$Szaa=WUXuE&@osEy@Zsu_-qoJs?*CH}JmQj>RlfB_(hs;*kRgR<_AB zMFAP*kP7|&o2;(_J;=heb%fZ)Ru94`%T#&x1a2muJC(#d$RtTq{QgsY7x(L_zCS%s z-*&#f3EUcYk^$=U+?&`uNPyRQ0B$Ba}DwF zIUwA-bsQ7)woKC)q2p+<TlurlnUB|reOBLH2 zd(KnXaV$_hzT-@ha}ub;*H{7cxP!;K9ilTgIXm{7?(%`wl8!%J4zfZ06_E{|_9BJ7 zNM$c_=O)>)hKh#?hYtFrC>`iyf5OZDgs1(9@!Ylhe0B!{B_<^&uzzVS&|G|Domh~z z$bmuGzu4<+Ri1>?0&z()Y*oAck4}CC66aBKbC}7LDxQ~xP%{xkBAf3mpBoQYxpPHg z@$%00A^dJ&7kbaH)$anHD67@u5JA=I_wuiO{L6wbvwj!r!PXuuD{v(uAt{eC z8Gd-+#p~qWbBcoCDh>>zbQ$|$^pdZjSWAj9jZq^ZYGcSorH4bCjnIINdeO-%VSDSatqc&nYO%!H@70DlPO}KpbSq#)aOB+Xs-x>46*F zj0YrsN9bZBC!ziLQ06ZqCoArs%-*ZIESFxumr8oUHZ;v>k*hOH|3zulY^}ED0~|w{zMUBwr5$N0rSSvE zdAn9h281`zUBkOSCJ_I0D(SHT)E4%quj;JLvy0TN$c6tH@Ta z8v(vX^c_IoG!=M%KyNd83)SXznv$=~D-_nx1ki2f6>a7j)PIF7?*d!i6}H?Fw%nG4 zfZNS0+W!YQ%eugob%iT)gex=NQ56rksVoGFsmv=N)jFV<%El?bx%eP;FZM)TR%Slv z4@zPq-%#{`jV>EpXBcZF###v<#9U!d5FYsyn5D|+u}&MNIf-e_{(a41N>XY(&wc`fZJ|V#qTSh_ z0TG|W8)NMorrBrDG5#fCZjvm+adWC3;^i3Dr7pK?u`AHZ8MEhLop>*Eo$pe`IptUJU49LJp}Ab(nXQ7K zBsKgj(!fu46#Ntz;Ah2R_*q+YQ@PF+o;i?gT{0wFmj=n!Ern$33L)9Lm5^-Rx}tI` z!n|Eo2BMJJ4An0lZM>>BUQ;_1BLFg5)win}G2O4Xt6;3O(;h$?AQ0Sh( zgbcn2SbLpoHlmP?b;-uMWMh?)`wiJx;;D5V{^x3`} zT&Dr~c^`A*b`?&VX*sYTvcI1>**`JfO0jSMIBl9>`X%{Y6_Js#5B!s^720b zN*Qx-QQ88nrJ9WGY)pG-H62)Cq5lfKOpmK*qAyi(DhTp`pr0Yg1%i&-o?q0V3M}Gv z0shs#C{36HgZ`H+&`i1hA7t@T`O1~bJ&6kTK-Hygv?FvIeH@4abi1QH36xWIr5vG} zC%o%N*D1fNlGXx)>l=`YwJKXf0z{l0edfePt;MHmblr^6M;3^QrXEPEuf}4bD~D(; zf&}$Y?osLvE2lyVT0KpWqc%Y40KkJ0Fk@OvlzCQ2i!!E)xdH(8wg5+rt_7TcJ84S5 z_Z!p2eS-J0L=>gk!P@P^hAC(nX_^#SPOhkjd zZai*R?imcFRi2}V-L4`PybAKKA7BWy|M{27bBy!%N|JZ&FW=h1HTq2Xy%u<{90&B5 zcjMf{NMfiMfIF$&ch1PEq(D~B%5%z!JY;%{B=kpSFWX?AEg`@vk24{oe-Qvq=DPwZ z{4kFIT_R|EiqwUE zJM>4|6B5`ik~R#@h-y)+{HcV+>Q;edZQ*nMCKUJx?-i>#1gmMRQ9*4m@9Y-E6ECd( z!8!}7pxiSANfiyw(BaC;Rah&~ehx@%pKR>MwpE-|R^Srk#wR2dx9CLnHY(B^T_H*6 z0eTR0y*?XVc#m6!g}Dh7fDX$C)i74`TKmRyC+L{tF~}+OAhUmCv=nlAZp^kDs~~{B zVEc#=ZsP&#Hkikmr`eH;Y%Ts0emg)+dfRasC|n&uspt`zY#&eFHo=x11cxqKUhJ%k zmaTBk>yz@sBdOJ0(|b9jk5cuN7_BIEQ1O&hYv1d3JA@>$AcP_^N|rY=lZ@fs{V5ei zkIBI|x9|gsN`d*$Ks%#IVgoRYJ!bd7^6aNmQWkYLFyj21O;sf&v!x2w)!25Q9>}hg zv2k+gO|8EtpAi5g%GqghcBO)ZvZ-?RNJrF<4zqP({^|w!0jhgaa*f`O<_mL)N-Ie+ z08X{~Ry0$Xm==@1ja!S7fbrzMM$?ZTro$@sDaEZQSvHeu(l@bsPm`WA>zhnd+Ku-6 zbODb4ac$;lPG;FO)~nnsD`%(6O!{V!M(*rsuIQTG{<&mBoW0Vr%6;8qHtEJVJA2P~ z?>k_t6@8B#C?J8X}-bvNLT4j^phNtiDyS906+O!Gzqw zBDp8?r0Ap4_T$4(eevDqb`XX%m-rG zruk5+!oX!CJ>r@XiPP*1Ik)GksI@SXYC>YlFgGD7`6wXx`224612#^jwSf3LY)>}c zu`4TH=0_2Jyk=7sX2lGaX60rt*5@`1Wgwz;H|U8_1r=*h=x!AEVyDZchjrJB2PGwQ z``#nwJBVF)I||2rHJt!BmB>IS1UTe>q=&p^9eLGa-a(MJH)2&;0+df%L~K^TK52`z z7Q#h<}@c*BCh*c=SC-Hx_4UwxbQkio2P;my>hEDq|%rpZmIXKqzX zm6h|PXpSH~YP#GM*Q7gJ+)FoKrd+Rs2sT+^N^CUV1*()w&@6NwVQA~wYF1;sZ>QzD z>q=vPNN8LoqiDDNJgc6!Nm=H6w9%r_9bR!7D(=osYt!kQl_Qx5I``_Cn5|t_OJS+SA9dzlNbU!OAhdLn|?WC4m+Is>w zDJ$+kfSqYfU=y3>a^rZ0#PExvsDM7&J-+Ds_q4A*8kq6e`0i z_?pBu+0K%bYkjnKR`w4c_YaA4=Y5Gy;KBg50g_MC-Cz5&*7A&$V8_O_2#WzJLVpwL zz%q?_US|DXLxSBS3c3?8(y{W=21qDTRwg4+_4`cv^9F9*yB&1H<7T(?M~~8@p-tn? z6AHEM=%7%b1l!h|Qa(EYTuC6L&L?nn?H%MDC=to)Q*B7#_IKuN?}3=%h-3hgg*{eT zzt+Pn`(Z28F(jCl&oFZG%KCM!ph{J~fdl=0<=WuAIKl$PHZ7}fW`Xa@gPkeLqTEu& zR#mfEoLKMFGi>Q8>p$R*wSM~#1k?Z%YEOb#ivmVuQ}@~A;{1V?{lt^|CqnKp;xl;> zAKS*BjnSQ6dyb&YBWk>_Qku&VvWfVu3=zHiOtK$dhb&fb?0Ph9vUIK78aJxMOcyEa7-7|2S09WW@ z6e4hi0K5;l`v}1Mg1hfSfGZ(R*)#A#1h`5UqoR`_73hR(RRZ*W;O{3u?+@<&4}m@a z;s*2#{cDulvF^GUcM)}W0XWP?Vmt)k1HnD;A;1Sg+@PL;FC@S{bupeIa8CjFU~mr> zfCG@2ArApQ6yk>V4BU>480)2r@e+Z13BZSedzb)xIJk#D1o#Mu8__fHw*h#bI>qO# z^t$wf?yS<-3j~Gqru0X<_Z#gApoqja>0+A1f}4bbe+T#PLcxE4`;Uhd{3pcy*>k~< zkXmuN7)}Jv3Bax3whF-i0{34J0d9jhThG9cqWq0**2Of7z?%i&SHOKm0DcwRS04iW z8pK`e8F(Q9-lB_X5rMY|z^{Y*x&ZtJxNkfJ_)Unr*)#C|1bC}1rj>MpR)EwhK>r*3 ze+$rWf&11&px=hL+dV@^!#W$=ri*E#<9XEE1mJhTeMbO(7uW8pM#b86F*XsnO#t33aHXP? zD|-aab7_yjf5$;L_L?r{nh5-w0K7%uN<}AE_6VHk(jI|tAi!_xVs47SZwkO$kue3} z0#o(~oM+M=f%_2Pw{Uz0{12{3JEpS6k@7F;VlIj0UlPi{EU>MjlWlt} zpJ(G9%Wr8%;D6|1{t$uxAprkVU|mHg>-GqoXXPG&zes@prHlDX1pb!*+$ONDqLX!d z1kST^kH7;7@Tnb`~w@2VSEB6TeEUu5o-q6L|5P{zifZr5YSJBD3 zJp$)hxkun@2=H6Fm|KKhZ=nR{p?3gsK#jk!t^(N>*|%rtF~G{R@AM3PIDvjw7jsuc z{jPxeJ%M!O3p=2)yd9-JZ?H-p|&>+*ek74nFIvWaz!9i@8W@a}j`F6bif~ z@T#JdS9>gw=iMF)G~C77{H}}nU98RT0&qc7AixD)?GZT7yFCKGc?W@8bum^ExK#iy zXbJ?lz^gq1=XtkB;4c&4S9CE~MBrBh;DV+=fD63ZBXFK~djuXrfM3_eTqi`oj)GZ$ zE~p9wy1=YGLg$&cN9gC;5c=P`n7>8T{}zA?ngRjNYX&_7=f!T1z}FJscXTm#MBsM> z;DV+=fD63ZBXFK~djvkR4aI4PnxI0#i>iW8o3i31V5P3mtT%4soKFt(G36d#pBBX8 z34Th}f>=DkPsv&c7Pq=$G2tx&CEo~#jkhHdUVB)IWBWLvxN!eaTi*IT*YY(4EsgB1{sXX{E7w9;7RgyAhDhJm?vwk|=Te91C>bd@b9n+5k*2XNW& z1}+_leuPpb$yQ#g&sH*Z@IX=kiOfSnPm2 zr}@%4+xNq9ccS%Ldy7ZisZ@haasogBvWVs1Mk^w% zJ50;7b$SKUVsjyH88me^KwQEm%Cp%xg{{BxE2nH-730L-Km?{64>*})Bs=>ue_}nW znBjXR6o^k{L7h!%JfKploN#5K&V#vOmOW-0s{E>oUoj2MaEdOGn*AT6opD|9DqGXd zLf#|&jm_helG&#eT8r{4r75lrOO&%y#t*b$bUp2Hi2*jhr;Jf}rGRh^Y8SDQ~B zt`b4s`o{?(C1(J;gBDkUI_)gI4^VNx#3eSajZKiV2OOGZ0l~S?3S9buXxk;F5%&o4 zp=S5T*qI9UE?aAgYj?;L-lb(HD^q}whsDlHi86P zK9`=hm0|N_wNu$4`gXd1c8DwEBE2s?W_wL~+WI8+!UNV2Tw1IA6_<~&KG)6xgF=-Z zxvQ}&cZ~xal;f*`u^>Hj6qpO%KU;tWoDr2Dfs;;fW<@lVtv#8oYfx7FNT3e_H<~BlG4_B@ZJv`!-?j*vDjPQq+ycE=`IRCVAo zdXTKr`jWi$`qBaa#uUQIGFAaj*;l-1&&eRU=rEsUX}IqY&Kq2d6Iqv+ib4A$dh}l(<*H9 zwKEDk)6C&n)RYvo$=RB2NmTS;Hp-KgLv9r-iD!%Coohzj>~z&+fkAFhB-=2-Q&y^= z=$W{FBxi$!MX6BZT0qsme65J;S`pQ?JgS4abA7Q;i%KknyYGd5_YVJ6;`bE&_A!WRH+m7% zH*SJwDRe!JTTOVTTT7U3)x()Cm5uVkJ%8O9pOzV0CQhy~cJQMOxlkFAJp zo+@YSRp=C-nJG$p4p-Brlbt%fLGA>n$dwgNXnZlqD^D`=5KpD~CRPQ<`pWuM&c+&r z^oA+5oh@~*s9!zymaB>$xgGh#q;G<9J=i*d)f1c8I$J-!NZ_GcRdU*$o$d+EVynDG z|7q%Lo+dZM+a}*~mCym$&h(EEe%!v&9eP<3=XaW{A{f`X%a}#R2#QX@^!7k*HB2h8 z&1};FT1c8FGa5i{g{)g3D}l)+ktBvB?#fRDh;0~F?+H6u$EnDL_bpdh9<@OEQuP#5 zYNKdEueO0k6otLmiV9J86McO(NRz!txE0T$tsDjng)x*Wm9ckhF6Pw=>63C|wP)Ou z=Ek$B@&xWhve2Z$4YDI~&#S*V7OGI<$^E+>5zLbq>?=Hzx!-$Jl+9C&dzHlTG2iy? zkTuqT>P6O`aXnO>kc?~H)9eHZRzCsyxD#6oVFvEwO0@8M+XL;4 zr=4+6=1%t_w)iLm1SE6+GE!6y?t`{7E_6kW@6Dzm6z+VbHJnq7j~Xf zNtKm&J6QQsd?y(Q4$?{bfCD>;vhpylGTsU)@lsZP4^A#+FhzwFsgxDF0U$RMeC&J` zHwE4`dQQSd0W$5l?_m=}^Mx^wVnK|zSV(`dkX}L|OfLjqk%|ECdLno)dJ)&+i4w-k zq;KVZ_eAQnCYXx6xYfN-j8s^dFt)TcBxvn8cA6~5K+f5?$hdlEimI?EE4{F``W9o2 zDhi0Bb6sC|R@12#px!K}9!xKriGwFiv{Jz2fQq(5UyY}F+mRT}Y9wq=LiO>s>uWiEX^&zXgR zMBgU8o^tcu5dG`QP4D8i3;j0brq{{pt|_i1ByOj&;yH?J&_`LNGFoINeIu*?*pztQ zq;F~BOZX$G&oKqle%=&`{&fOjOuvkDK zuLFU~TZ|OsazYY!4jMO^5Jkb=0ESEE-X=X&W#Cf4%WotJP9VJvAeSjC2H}crGB*r> zY3(7a#EivhWTK$GDsI&Cr~{i{}m8^mWLKjf5A0VOpUB=n^@lu}tK}Hsr-Rffv05 zUaVU;TcCXLZ4S3%i9xfX-FQdXF!%W=5cZ|;|!uVVzj?ZjO=lG19?R;*zIsphLIZQP5s#}xX^wM1F@36S|#j4YcT8}fuqS%EtnLKu*yFM%Uo;G&PEq@P;LG4cro zGaNzieZ@KC2`{EEfXiW{h3>J4FA#LqCsop7|Bxr$S4?JOyg>Jbk$H@AkKWrr7{?32 zgE~x_Vy>taL!YpX;Nzj);yc=EF1O+IlnPFc z=Qu4VfayE|u>>yjDZ&*f?2}bE7Mu#1qgWBT?5w*f^}KHC06ZJHb`)C* z?m=?MzHZ>MDtC6ON*0A#;?A(LDsb3(MMF;A97S=BAvupq;Bfm12rKP5z-c5ZmvTYv z5l);7fUCO!eGSbr#?pP1@+FIMkHrAu^*T!G>O9%r zDLBFO0VRh!q2k3lGYsjI;8Ii659P50tv0zU|{#bJWQiW7uDGC|Nhe1hOX z;48Uo6%NJGrs-<7il4OMRxdl>ZBS8jCPNL(+CbZHK@6+Pv6aQ>*H@EvWLr#36E_%u z*`quWX&vKEXB+R^*Dhd7Rl1e3(gfV8sAw_8p_+^vc8qIUWE8E5C+c7^4<<)S6b{_g zXs26bTxA_hj{6`_-9s`VPw}d8BNS)*7f%48g*m*q2#RH_Nd!GPH%!2-dt(Eph02aO zVWyJI33=7iGXW-nieyypaR^}}wC5`AYo9>U{)m&6D59odza{Dg$JT)+W{PWt*gPR66N+{FT1aSlin3JF)@Qd7h>aDryHweBsJs$Wo6wdo70yPRN3^L@c=4t z#%mI0XWF8T)m=spa@LTa-zzBMcSk=|6oXWh(Ed?g{JfTp#+kVDUVlMySxq;)9I$}o zsn)O?9bv?ue8ivnSjwNU)TD>A)BwU#eYgRA2^Vy+0ubW5Cy+Tj68AH*`BA|J zvZ<;{I}Ez(V9*^|9)Vhdvf^)4+apj@P*$J{f2%r5S@9kTB&g8DkWuh(Lq>veEub=h z+t3G_Sw+z?Tq*8FDpHjn!9xO|RhUwn*;vw=!RGlg+c5hIFIH+G2d|qAT(h{fk5SGs zR?8~w>n@N9aD6rB--l=NQg@05-X-awHIlfk{4C8DBVg%Au$=I4Ec+2Gdvli*0+z3H zKPyDq*`9YCZ}L?0yW1;xdq}AQ#>E^q5Pp{k?!Ya^oDi-8e^K=O!yir{TsaaN>qNsk9e21naTblOiqO zWw<2gCqT#=*U9btCQ8shxN)m+rlI&2u}%RMw1hj;n-m}7$t>lL@@~R)DcpYECvx8? z?vsuHQn`NI>m324l4CiR4?tU6+!bE*_REuSGz8m=RNN!YD0sQ7jw0i@6o(=sxtZOI z4DDXzBVCKcrjT>)y$Na9m2$3EZ^4?T;O@H-lhmRGX`=m0&_BKaO}*BotW*E!XkR}5 z1rne0(D-5!Pj|-iT@4Q^*V_|dAdG8I0vy>NlTE3u>^)Or8(XAcqvR&08D@`YSHiu# ztp9$t)~8B$V8tjL-UGP-Qeh&}AM{%22C*R8*xyX0pis1-)CuISI|(U<5Q_uw)3+o< zr*~&uE6*|BCZ=_ouC%3OD$FrhD3)oqZM_xiE@ia0qTD6SM9@fmSbe)so36BN<=9(O z+@lybC}}*Tk=XjMQ#`SA%JyPndVtXTAeEH4i7A*XY_-is>~w8{TOMTO=65YIsA~Nl>&;8V^gN zDo@gV+0=FbaX`RCS5@*iTiVXLKXWU_UB*~}kpW(f{C^4(Ai72Yy#2OMVPLR*1n}NP ziRf-5@LmlG<>Rl~o&!QXOIp=iXjM{Kt=&}G#$GU$HknFW#WsaGP@}1|83v~syQ9L!YFC zhE`UhQF3FVoju|c*P@GSRzC9z1XP@)X@xhNPOnW50Cs*SPk-jZ8qhFESjjB3~b zIhfYZCAP8fVq^K1-T=~ut*f#+LAn!V7abD=z|j@;^av4wc!B^79L^znj)`RN!%&P^ z;0W6?g=Nm`QX7@)<)9TnP7g@ekFB*0fqFVK=U6oI+kQsN6N{!s#z|;VUZyIvCqt5C zYm{t~348P5g4)g|Ds%^EEi|>!B2U=psjUwkA~g7XN$BI);1(xn zb7=5OY?UxsTYyDlLq5rIcRjUS_);r!{T!OLmW-wy2{%;_ne$-pscwQ z5esuCrvHOxNTN!)uu<*oX&-|OdXCTa+h>Q?kM8SpLNpd0upRDs&v4F${%BHwDfnbN z%F&#PYl8oCkQ`Y4cr1U*N}Qjg1^8j*JF%j5xNW#kqQd9&TntOv>BP8lZudY-bmq>Icy}Ok-w?egV-Octo`yy;Byq#; zBc9S4A=p}g_4_Y%OSs=rH=9RJn;1Nw4{j5yyx*zIWd}U{r<3}x5Z8+qklNz<0WdrG z2R+0dn{!Gm<@7(Dh5P*nOl>D}J{I%*!sj_JV&*MiCXByj>pdo_G`hc8{IJ=&oRo0pYc`O!As2nFNBLh2WG$3v9h{ z5y#aesfm_RY2DU)P(LYK3+45)2MRfiW2u^08O8UbRQs3>9n%;4m}ehc|3FUHRWUMD zB8^qfMvL=CRcJrP6Z&-XMuQ|YUcxA_qg6RooP_DX8Pd%<^i6Vw(=7gcLjNRIW8jfs zYf#l_d2sy))ah>yz2Q|3CD{}(H`5--ZFo^aNuoDOfJj%_1KroN;^f9=1uH>&)n1~} z6SNo)dZOlwW)Sw6Bk}3}=2*M5sw`N|$TxUS z$CK7K&CwE&QZUU(HVCcKhT<*)Pa~P9$w1?|?uKR|_KdBc z9c1f0oTqtA$3&^^$wE7%bu=N1vkXx`$K{m-R4Knjg$K0*J3SE>W`@Iz$0|#TXC-Wr znmxv@@)Qj&iy(>HEmt}1{4EvrsstxPxGvUe$dl$ta!|1Ytpl_MyN3bIlMCvFOJcYe z(1=}!8%JoDkZ4ckTV1XXIm#77JgG;NCrl!$IXv^jFE2W8M`QPc`X%=-OwvFMY(gSv zBGY3sY_}w?6l06M>^qv$!!`!Z-0BD|M9G%ofr zIH|2RhGR-QBZcNo0Av>JF($2tHBmyd~#F}FJ9ay7-fIzrM(P-6& zlq_&fJXryg+lp9MvsYyi9uv|+7(K6?uAvA!Nn9ieZ zY)fVyVVU+k*PM4CEmU_Gh7u<0hgPH{hdl1aI{PzHW35bAs$f__A7X3pI2G**34@8? zv~fIJBqf`}D%T)VuEt|_T@CF6b$f(2wsH|f9e{jj`~|M@=OwTUTlb98CVM6bf%Ep) zoPAh_-S~qp=r?cRcnE?LIhh>9?C7YFWf2S*`QRa08!%TL{-6tHbxVZR$rd}ZBdzGn zYICuB(BKkFZNyw>@CRLaPTaWvn6=0uYb)kz#vgR$Nd+?j$*Slv_d-55kuN!NcWg8K zL06uP`{aISeLc{^bBu@60YM@9a|AdJ{j!{($O)jQbTu1Q?meB4|Wnm!x?m+wZGQ_jTxNmXr z!)dCV`k!XG**(kG`%t*SD3x66A|D0)Zcz|StzqV}mdLAQvM#+jg9ucc%$O~w(nx+{ zHsd$MT!&HXkvtWF#El3dBh5gzuW`DA z@o0RpuYHWIC4g{sesG?GJ03{jjjyE;JJWoGWE+?#=l1Xs%8k8isn(H{Y6BGFVpFm; zMl~tJCWBumT%O?T8ZVRuMa?&sDyX${K@;mJY6KLO$wQn_qX*TpPF%nw2*8I}1K71oBJf@rN7OBu6e6strHh)&U-V^4(Y74#@jj>`7V z#~Y#<#ck)EcyARYq38)yVoZZJI>EqrHUV4i7;?AkMmbE}21S`%L*o?mV1b-A6&7rB zL;a9JBqZ2IV#QI!u-Aps@yPF5H+zxHuHn`o#11P5qNzP+S|kfemDA($|I)#gO^GPX%+nYP~M#MS`@8)nOl zZ4#!Rfg7+GxXgu&JXixqW6K0`0$O_ruZ2rguvOZ6lm7gMxH}u-E}KA$kR_R!J4jF6 ziFK+qGTogLx7!zL^p_Ib&lcPU#&<{nQ7iER zULrliAS=7;L_3$=mEx@26fI+`kjnNxdj;`C_NS;Q($vtxv@3U5OHrU)q>xv4=Hb~! z-|omZo$U>LfQ>uAzy&T68@Cr>PbuVg$dRZp-tD#W5n~8wBf5U;2C(y$LDTn!y6U>S_JW}Y}Vt0;*-U;O8h5%tv*w>*YgXRy86YB#>fg2tn6-4<+7dmsOF@j>`$=n>2Qxo%ek;ZNHCH-~)n=E6Mg6sqVX^^AHG62|!yLNk8 zXQPO4rvfzGt}J1^aOSVv4@`3mx+?-7Z*pOyWiU3_hNB{gaX1-7XxXTU$E7GAkc}3t6olzKuI$MC5;DK z0u0=q1)w2f#uUhCb1`u9{^^F4F`^eefsingqDP4cWbg>|XO+N-bC475E^J@Z)aPUl zlqy>-lhS>l+>KGPFv(i!2C-Wuvv;sT=!v*CI}H`X(Mjwa1-nv)GreP`X&s6Pp8)&E zg+*!A{APyj*x7G4?3A#wpT>he!@B=80bYy?PRajrBAv0kc>~aSJBkM%XOt8P$y&Qf zzYW)4N?W<`Q$D4LdmWxq}Mp9uoNvM&fiE!jG`k zsN zCUaa1XGbmq`o=Y)`3Mlu*YTLqQosceQC4_L2)3TMQyh)Gc~1do_OOBb2M=1+**CJwJ#qB%-C9Ua@^&2FW zsq`prq$_R1wU5$|O{Kd`rFCdxFWqe_-K#rHJFzu{uUv@%pyD-LX*kR<<@fR1!ptn*$MtBDbW1T@C1G{00{-mfTXk^>A=Ru#bJvfn}Hs{<B5|n4n*W5+c z%)Cc#_92_w{Xl+Cz*(16Gj2r|8Fo(?r-T)AH1P_LeqdTCiJcLWse7hF{QnmVb8hJL}TC3lQ*Y9$} z|3;QO3KV(=DYOFc%$s1=mqD6$xQ%TX3BAIM7iwXA5+5J80X@q|^v95QGkQab_f*~l z@Yo^%vB#ijWRDoW2et4;pMoBQSHp8VTE*;bnEl>uzUa*cR3nsaLJxeq4JQvgg!axp zNFYqYb8GrKNKg2+negjTL#_TO_VQYN1NjoByp8Xv2yGs=LpUmLphFwOQ5A%tpVe<8 zTA^8g%D`30NwaOv8*kv+ZV_!*tej69GhIs#r7 zf$SY72ASGAfH)x#dMHdc2cc=yl?A??CL`NsTS$j&WlPUnE1@EDG3OY7{xyCFvho(Q zlC2(H6;9d@urp1^XyIb zN2t?aka{q1Xf42a&@##`mhwx8qOCvw&ZFjI7)exVF(q<1+|mQc_{w)4m?y!&YwJgb zSH54F;&2TU`s5_GnmtZN(}@_1#a2W@Bb-^%${ZWe)1d#0U^1DPs*ka*fG9FqB3GD9>Nc&<7LDQnh+jo;+9h*>T zXWqa@T!UR?6{;35N|8JvFpNAaWH4}+5uGv)m@*D+{lnOsTA{hvBb{w}k+kVRXuO-e z+p%3uam`4O4ZwT?&-dcT;5^9#ZBkEaOizQrzK)%k3E}0<<`Eqd3vaLHu3rHc#5NPkp)BY~fA+EV+@=wy9 z#9+4C+Y%N9$P4X!pz(SkTL>9B?7_Tw2KFV7Ju6e0-=vU z-eZs-=nAdDvlbBz^ehCHN8PHn%(uzNX<|}J0D#)ZgOYc2q*L74D_v;ed%Y_yx?l_h zZst2$HSJ0y54MYnlp5`zatLIv>qhp1u0_?fbS(-LcC>s6nNM|Q#+q}!T{D}yWhM)p zEiglgYlRugqBMRBLc&I3vVp0mx2U9)j^gcj9n(w)!(@jUi%hbN&8o6sn6KhSQ3*HV z3YiwwS0`z20Sv+a=}Nb5oKA1y_E;gp70A2m3U83$+fAUqr*gmCXq~UDz+<;Fj8=*8ofe$B)ZZuP z1yU+bGHXe4VjC?fyAws{l-+rpRta_^v#L)PVNK}U{dt9?1v^;FLN)e^R=f$? z)))MlydT*PsYlxMHa35voRyp0*y%F%fax)r2$HZKG_1)W`%k49h+@%-YMpHy`uS2& z8MFWQ(ZJhOrfYFu^5uVcD%?ULAPqOhZtk}&QQzW;SWuX_`BVmKZ!OiJdjO5}22cxKxw?aB=|uya&2 z;|2pZ7589}K74zMjLhS`2;p1ScoNA2La3F@mMfZpN2A!|St;r>w*(Yt|$ro>j5qW|eA#OMVezyx+Q zavEAQ9oJ|)jGUC2*}cgLJ6)J8vXcf%vyJ83ao_NccErBK?E|!IZirYLx7$WI5mGgd z=-S#Z|MeiBp^c8mrrB$wBq1po1?_7xa_52eq;k)hXgu0RQkW9k@_MBi@6A^}^Alk< zJPT=_@}03wrF^CiL-v&QdmX=9QF-NGC;*x^5IMlfh+u01R9d<9oZW7<;8_}d(`G)J ztaV=i|5n~l#K>{*zYc!e_uzP^Goc746ye}r4KrRz+0@QgwN*&P4dsvs81u2ZBL+V~ z!#$5;ImUQdY7HQFw>Iq?-hrfTyYXHcookM^3&N_&z%5kq#twrFx9=ymp)yz`#a$Lg z+Z6gypWo;q@C<~fKRkVnHg{fNsJAQkSd5&WR{7M>3V7u3$b4!TUsEXu`DAKJ#b;>dUT#7bZw2;mLXN+R%#3EJ zRx&GX!+oY8Fg!&&OxmA9hXKYf;{I6z7{0o5PCh@wh~xZ-xkez}O%* z)RKQt!~h=HYn%Qc^2drF>*To+$^a2P55ldFl5;yv2LY_LH!jk1)9E1cl_2pXH0_0i zsr>UkNqC+w9~C$2B|hjB<~`24kD|L8SM0gj#4_@|j+JYNc;7^=;Y&Uxn#9EDpz z9k=$wJwo-&685I?52stz5;~NSr+L;$k(FBrlL!8e2h7FTd7ZJybKFhqk8Q?C$KnmIJ|k*#4k!&@g6t4B%HLWfooPg(BCR&{6koY2z}cCU>E+| zbY8y;7k$ggK9tQ*y+Wy8gXcqdzJaG2o_2UH!Q;7=QUz=!AODm{oSdXmxy;$c75>aK zs1T4=hSw++3-1N+%z|erJX_!?f#(k7TLy6fEev~)faaTTr zU(?}Dr=W(O;PRYqH#}}iY~LxNmm4qMH{MaKoJ5Z|S}A&j&tcjV9u+)tpTo@TJZGC< zp42uePiA{`u5DbN#5Ow5)i%Ot?b{{Y*gQ9o`Mqo|c`i1mJUM5E2FI~S-$DiqU{TKf z@_*MXcwJ}c#ZFDEytzzVjnSTjAz>r6A@LfH3;&>5hw%*aW+o+>_1A!LRHTP;f8wE; zxF%%qdRkadiQ}-#M?hcJp5$TGvv*C2r_f5CcyuEb7OmWWW<%m-p3n0Wtx7cfj6f%akno))r0XWdcZ91lvOD~gVQ#%ew!)r9V?zO!u^;{ zPI$WilEm5~1pG(>E=#NzsBTys!vzeOyV5b-9VjaW4+iSDvihT-+@RVAxV(-AxPM

^{75B_@Uo{bxVzW7}&aB^TO59@LBA~5o`O2@HKZHViJ9idM zC+GF%o^IkRA868VFvnSh%0G*CaQ#6pnv}W0z$-}>?(RiW`6wBG3xLgo=O9#F%yA9e zkBxYky@9l!sKI(OYKPX}Yv8PSyd`cgX?<3wJheT%C3J{SL(#Poz7O zE^}{$@|iNCo2gjEp`1a`9c72|;RRU3c!M~_pWN(b}tT@1V$N zERXXaMHWdg`7TUe;!wD46sme>xa_VF4c#ExhIPOUBQP6Bippf4OF|QnspW8?X<>pH z&{aHG*8=LO->>sBmXZwz;`A0zgp=v*iQfb>Hhbg*XS}E&j5rzO8xvD`i`hjAKGzC7 z=}?*XE_9)!%@dOkAW3&&Qi5YHw9&0$LgW4Hj&R*w37k71vHbE-DKe6+jH6lK^YR(q}bT@`l^rLfe0tMQYfJ>RjllTsR|10&__yDPouT*7Wu@1kBf`u|%}=$2 zDL*-iF9`E})=4nA3Q67APxE z%}ckv+$r!S@#_iigVW}n#E;@hJVrKm{IZ6MmKtn-@C~du%o1hBfsD)UnP$_=1 z?SV-y7C+jPTxntRL?UwBb7y%5<*Bv$yf8H9jo-T*Pgb{2r6{}eUeOq%J-Htn4s?sk z8hY{=JFwRdeqX?uxdyHUgl#Xf)J~RW*tj#;oab?Dc5@OW^+q{RL|J(bnv<6V!E$DF z@N@|?oOHVBw7P4M1Df4TD)CLed&A|uGOE% z{wu6$GyyDATqA%ra106tw2J8)Y~#rNGUrKx2I#^KeAZK>>(A;)h$BNIcN$rBzKqpC ziBL2f*8<(R5744l`4O^DCAIk*+)#oi(R%*^K2#uXK+AkjOw1SfE2TTAK@SOw>Tcz; zrSSu0WjMAaJqjiW!z+JbhK!7B00c+Iog&C^pPk2T)z+8q^9Yio_aw;X1%kdWQs2NE zXH~X7d_PnFO!$l6Jjbyy6y<0Y7@y#Gt}G;;>2<$auv?IGF8%quO{Ym-q5~y`Y1TX* z7P2l$x!+Q8jt($#lcQgqE6&zh44e@P%5kUA$FVJ3=xHLph&k;g|ipjl=@K3f{8Uaba)qed!)*4df!JVo#dJ3DiR_Jl&+ur)RTVB5>MlvatZF%T|? zaO+ccVrRE~Dh`W`3j13lHymrxz`EI#+InFl394!^!PCnZiCcpl%(rCF3@y8p#sqAf zE&=x>bU$LWIPsHtH0^dw1*9vwwPY$$!TFm&QoE3PW50@%j1NC(E#?O;kTI<|P2v|K3!~f!_0)L4fOOgo!vq&n>GT<4 zme7*6NL*%sc>xg`Yz=`2L;B~pYsd=72g>~iqJ@1ff*orfFII_$UM^_pCHOOmY<4EP z`is0dWD6Mxags;bR)T*6@5dRbZK+w`2tn`gR)A(AQ*kR|3JZ$g8G^UVDZ(=x@=QA6 zTdre|flUjU%W#T02pp?;z^;Pjn=9h|E=0WWhVNTHbzNw&Uc67f&jn+Lpupll8rRMX zs1pvW53b5*$VEfmm3|)~*p{XdP%N6ho5?a1(KGOaRV}R}x*FiyM|6xAWQ`TjDEvw2 z`pW&3pbT@qbbn$QRFGd6{AC1fMyBpy45A0!JlRE?nR&XaHq*oO58gxq$i>Ou@E=7( z_Y5lQLrR5Df$;2vXDvL>2@g*n)&ud3NidOj(ndG!E#>~R$!xWWX=CpLYZ$p`WyKV6 z{?<=);3pAr6vj_5Zj2$TI_HSy?5?c5jx?Fs0279)i!yVJld-yibRp?k<2}iWvAmt# z38r>Q3COhpKN9;8-;{~ilp1!GEH47$@bhG)tz6=1aZEtF3K|n&@&fJ2$oal$8wq1F zB+px9N^IfAofZr*t=k1_CnQDPSxh~q?2cre8cM4?DJsDyj|)ohQKTTq3*2ojFR#b# zc_!SKYFo=wc;5*^VOt@qSJYC(;yDs3O{4KbSS_=~!_)_-hY>ZDgd5T%;2ve01PPh3 z#MI_Q_csPh=|09_3FBT%5w3FK;qfhEj**B9Hvrb8=~SLP$r@o&9w=P6{}4}X^b!{1 zY)*L02}X*8w}j1e=3Bg`Ni5l@HV!()o9XzT_vep9i|LEYMNrUenYhk3e|zVyav~dW z*^i7Dee)#jG49_DcDwZ}9O*S{)G$c${hS`_x>|RTcBw7zxN39D8Hh)kdHIRON_Q~ z_OKW)TjOlJ=epL@c*JfSPc@fRx6d~ z#JXU&k44RL{^&Mk1%5=hEF_BGhhow<^HLOqA(@(TUmp`C2Wi!QC%ekij>)4h89a6j z;MWY19af$YHeg}q2F!fwMEB`w16SJ>{^uupfNztFEX%9s&k*%IN?GaaBr2nh`wtro zbTZdGs&oG#Zt=TSEu+^uSYSx9MO^kDL_m;fPfddZ!UUVqC%R5S!?~A6i955d0VQl7 z@Re{n=BSHbK?dG~D{t~Y#CdAfNnrx)Q2HqDXIsaV_Y9WzWkc8Ul-$2k#|5fHlfvc( zvgK_|N5BUsc$*7X=p+W^H+0`O*MR%x9_z46%;bTDbOdDbf)ncJiNjc59L8qLaH}ee zXiX?#C&6R_Pf(aVZKJ!X%$=s<>&Lp7ij&!pN;@O-v!qqCu1O9TCbP%wrx&Gl9&j_R zmPzA)OU zhu~uNB&6_UU28+< zkX_%p`zxdJBss{rIWq8b2}gwsl<4u$YV1q}Oq^sUiA`#nA~#J@_}n&?sMskAqeUjI zN#i%MXf3wC#cT1P@qCSqZ3c&rq>9HG;^RAY^PFpjM}*4(`rX+_^A*^@s7RuP}b`bg9;!w5cK1KP-C9BVmShW2!F6Fu{TE zr41c(r?4pI{6>l3g=wD5SZA+h58&Z-($wP#)<8y>MVN~upx+0OQLeO&{TVNfEagn4 z&3W<##yc~VoA7|SvZ4{St2@EUO<$9>!c5}TDmP8Gc- zdHaa{bXWxAmZwI6dIVAt&5WzU^m;tSB?UEyC3P!{pqK$&cR5+{@??M;%hx^`a7YI2 ze=L7d8^1c#hU6g8z%!%E*4lk>ZLN)LSy*e}emu-~`Od_EWQRbzbcEQx@QQw6Vaa+Q zwEx*WKwcWrsWm@T=_H`^YqaXch@3AP``^o?v{ll24+>UXBDn?w&XT0`h}eU9*j4JL)9bQadw zpHf*%ai6Arn$2dk>ghi)-9`L4Z#!YMK4!e*p?n6vII7%J{=v6oHU15FDfhdWXG;VH zqivdUQw(7aGHq)MUbmc{Yi!f53!n%2)Y7AIXF^|`c=RGux3l==rTx)Zey}@~-xu0u z1`o=5lB|>Z(-$!LFZlBl$@6a4JY=`ej2aj`Kf(-cQEjgQNim<7Zkv$~_u7q5`NWQ+S=Rg<7{lhLe zXw+DTia_Hy0Wkyj1CgXrZWxWFv^gt{LlC0uP}CrP-pJW& zI_8_g)CR+BihBGa3?o4Lq<~qb3q=h@) zU5XPnqH6t8D{RkSF2=X^ag6^)jE@uJPj!iZO^lz&#{&r$Tp<^lKO&@9C8p>NDad$; zo+S`!)wPRx{;(p}jVb&F9l)C`^R#l4>KvDOTDV@Mn<88sSMMRy-n!R?8A*5FdkD8_ zD=T`n3*DaX4FUz�j^PlA9WdSb6s@=9Qq1&qec77Gql<w35iD`tvZI2Ld`(&e= zsM?p^@l#g5cn7DDa;_LnPu6#!jQTy4s@GSPYSh2Ux_s9^C-{@F_Wcn5GKy zTy`i|4%UIry|gNx;M$W(Xrf4L2j22IAPQ1%dL7IOjodJL28vP@nbv#$LTtk2yv+#< z4;e`IZF=&H7rjK|lsE4u+e6MC5Std~mC@MIV`Y7C#Yfl;M%H8v;tIwvax;y}HezG6 zB+Q#0ju+6g~TN<|;Rl3m+H-{glrE`Y7}hdLg18i9aw9 z_2sK)MSTTtJW6bt+p$117gtykTYWyKc3@)=D=vEfEes;tCgt&WAMAiwnqvcFd$26j0i zZv720>4;H;sA#o;H~eYP@CU#E3;Yl@*8o^pKOmo)mkY~`Q9P2wV0>;rM4P)n>$Lq_ zSMsJ#aW^tQ`6l+dQWZt#mED=o#EA@vd%TztT^GTGGC2lI8A?9J?f`>8e80FBW)RN= z`5=;QWKy5T48S#|zGbVN9%n%NmhKSOp1c_y#w0+@JSI+z5eimyt(UD*Aca5%+m^CB zDR|SgA_~gmseJqtVfYGMlwfgl8BTfn|7^G8nq?HO^wGXposk`zp?S5C}%JVUM952E}(=JMPAL6>Cf|(gPxS zeYvcv2R7-h`5wmdb96d;@Aie!_t;wOz@BJxUkZ&~M)pIth#TDd)#|x5BnSr3W+`EI z03HG$ZvQ;`0&ca&LAF9IZocIQ*})KRdkZy|Hg4bd{FNC@8*h!rweJoyupHdlTi{Cr zneVXZ1Z{>R1*+0~wK?^-xa*f}YQ8A6uW*_k_bBm~C<`WDzjF>ttthK;sntW;v8qb< zDGUDu9mauwf@6&PalyFfUy!Q{f~Pnlw673b(-Nq9ta zfq_fJDEtVE-#JzyUauf3FUrasfMuR8=^O-bOKYH$uu5=>EEfz>Rs?}#Iti*GL2}-v z2@ageZ7^`ZT_skwGV7yGI41&@8HvZQ+X#X3a1Mquf@6b(?P-dNdL|Y$wQ$e_$>xkY zQ(QA@GPU|Tetq{Ssbv0lf=XD4!v&EHH}?s<#B6RbX|p-!Ii>u|cnTEY)=dBQG)%Eg!!s^L0_*B;FCJ7BHb zhpk~Y2Yut)~8>exINl94H?|Zsf_&@o& zv)!t{?zyTCW;sT|>)vkx6q_awx8*P+qB?AH+zRBcyoo5_T)rm8PF~tAx*$k90+6@m zI^*ivVAd7JGzDIAe5~VoG~T2u+%kjqSt1s76dh1`iy6w*0f~tv-MmVY%!S&7+ssD5 z6s}ZZo@Qr{NNWs9+>5|fHF#7M;-ob$RL4D358Q`{CSxWL?<`1^`y9xVrRb_kUD&!x z3yoh|`+ojt82NAb+-1*hlm%^Yi8b0J%-?B?7MNxIph%G5U10KdPMuy+;<9muY=b^zdD@D?-?BSV%#$0PVt5~DILpO( zB3iaVG_C!WXdCD(`Blgdg%C^b1l**ki0W7|Vy@u|RgcprvbsGzPtd$WcRQGLyPd28 zym`f}RPIx&Ygy~W65S}7dxGzOW?7JM8DYoH5nO5J_bevY(Jw6t%Ph>;tqz%s6_<`M=+$cPg#gJ6B!M^bch-B1!2E`CYnop@fLFP9I0SB_z9f0US+GL zNz7sM9GPHhe2?hqM-ANXxD6e5Tcrrg6IDL7co{jw*?nerWtFC>n`AryNH(>s^ghLq!+for7m77~GdVKz^}lhpZ>R7W1)ke54DUEk zM0RsV|6803+MLVoN!E=8f&;m!igU_}&G*G4Fg)?`*xGy8RK@_jd2_7M)N{EI|B*~-#Q7VIL_~!Id1n3e&Ia;&$aS* zsF~w7m`HXr$zvkvp$rq#vLV&Z3=w6n-5@AT#duCA)o$y9c2E++_Qf4@$Z5D~4s`2Q z7#3XlO?Z-ubIh{jyf7r!#odAlukat2)15wvnUF%{E-s|2mw zzjhf9(Uba3)HlpR}1U*~qbF7$a)+eHh+qeeY;^DZ~=iOVcRFhR#x?mx>` zi{`ml<)$EFAD>B%VFfBT9VOQ4Zz5c8K_PBP$$OC?x4PAaa7(| zjyix8K5rl&n@lkza2cG-h~YYjo`u+k z8M$8cqv#q#kSFSPWhjq5lwb{q)XCg}gW&W7XF@X9?>DT}7!v+zGlUNz&ar6QP!ibF zjBQHgGk;6SnZQp^5DVuE@N%rlQ(PdLQL6E@mKzrBj1#V5q-y*`|qm z23$s4EbR*;CauJusrWMr2hcc@>&@F@E?Gye_%8%%m3Kt``MDVBFGgl|L>|V-Qg9`6 zqrqtnLk1W}8JEf^tA-pmgyaF3JWs`oi1Xw&81i)&k2S#7!es$bxGbMS2e5s1k;y%W z--jjNIW~wNEak}cw>5f>g08@D22S-6l!zajEdK)&0?eeGg}@wnASWY@WiZZjkAQzM_-$87or@^4^HP$sZe%j|87RqCZ*r*X{BE+{<~(F0=NeJh{9_O(j-G}=XNx7RC4nUN?BG7<)>hF$Ap3n|N&oqYbBJzU8F;bt(wwhi5c%kuF*G=b^}>A{{!fAkMZNf!H(;cxB)mm%=-vQeqKC5`SGVn za?)w5KSh%|f0BiC2}3eB(Jd))mVd$`N)cYe1NUghT| zEMJ%1d7N<-RIb!&J|3i>w!SPhjZwsWO?Ir2(2Y*)*|6v}^03F1&wYrdC>Vfn2ZxYZ zx!=jy7NFdO+qboL_6B+?>T!*^mcPser}C9(Ni2I(VqhdO=3)uIsH)uWVr+|B=TCbR z5qg10&RBXN5t=G~ntzJoC;Uv2IZ03#t@DXUnXRn&iW2Mtp9-mP!Eq3yA4x8Zvi$(4 zmDl4co{@_du-z%4T_8-2QruH%4#CYb4G;jL%6Q3>_A=g;gDknLqCNKF4T9Do_{G-x zGUR`$u|CS!=t6LnI0*%gBES6zia!7xU^Z(LUX53emMiO7-U+F#$H2K4FCVnlfJ<1% z-ss}E@y&8VTEk1|d{7kn;1wa(S0VA&1eL3#whFokOI(IO+4z%=KMSpKK$Nc$yt&wJ zttLsMl@(uz6xuF2o);ZuRL8#DKFUh$?*fU~8ynrq?r7G`(AuY!-3eG14P-RWajv>3 z?|lU7vVo!;h)C!`4<=OTO@1G<9suM8nuP<+Y{6Pf1zybJOlpt&`XTcR5IYRsfRD|C z++Ahs3*N2dmYsSEKbL;P9Lo<_))y$ULohWgcwGt*Dh9Mtab%V&pP9=a$4xj*slFz6 zw`6}!mS)qm^|l3sk!|z7Cin8heT_)lV!y`gv22lF3tJY$@Po!wO6LdX#sKd1~ZXFW~XV4#QgYc$im`LMQ%>j5HCU@W+ z=-{3p`e3dbRCx|Rc41SoQc%VnHe8sCB>XN3D zg4$bnhlanfpw=qwTu@WOSeO@z_SJaOH>Vc*-?xrM-J^`ld8 zXBnP{{s=o~`)h&GY<%ucao4(#N2H*m{afh4{n#b6A53$N@^ived0T+TPR6;KRl zFmNGu5#AWFc(M#T0U5&ebBPt_HjekIB<}nl-K!EG&&J{WUkjyQ#}S&Dr<(J%Cfaue?EB38sDXhq~thV1*ZbBp2TcZH2!jl4Ox-+TP^O7FhsuyifV%K&cZ zk#+vq7Vkje&3xf#5Ve&<#8Biqc>-j17m2pV5z2VM`lwF813BimJC6oEi9G7C^|RAv z8rG>9@)wx%JLOVOZS0F3$I`GN?lM|3hVpagSVICo3UdY&rq$N(f%eI0bR@fDL1|ps zLbR`7c0NxdJTRWhZIpK$k5yI}a798m;TJ52u^}2YJMQQX+^)v4rA=1+!b{$OoKwRn zRT393ez1iZfY&ELn6U<2oIBFxk-R=R)NoApsF+Myd4M!tlWI67?IT>0fM?VKczem$ ze|6Ph8Bv4H7Zunu+yL8uQDE^dXfl(CJ42=sue=F;t%Sey;~=+v8>*~lM3wc<4sv&I z-Z(Nr>I1a1P3Myy;)cun|sF+z)|j-xA;SI z!8p^WC$Is;z!A}l9*K7hie{UgKM7`=U^LqZw^rkN6Z+GDC&=8Q)fE+GVOy_O*w(9g za91xIE$^~_S8IvtX+Q6mc3czXzlRaeMl0|{xn*?-Zf;$TCe$vF*)upyu97_v9@j3d z2FYxSR@95%ac+~YbX#UoI4>TJ-(uTaFhSPNx2H2&I1uZrnjBa^}s>ZlryiKkVd+`?aRY7TMO~ zy8FDjq|>kE*N0K6xS~{CrT2^O-fu5m?HyapuYMjZhq}T|7Zn&ZMdrPB<%r%6t3UJY6)ajex>&^ zpgpzo6YXe7?N2P(Ufn)$l$V*KX`xgc?)zX%-Ffb^EyL@znv%FzUdEMVrri|2kEj!# zJ1i(Q0*;d3gXFi)372m9?_q&W3UnrPGwa<+-Nn@3l5g6$i!=p- zm#>ncD!M` zRcJ{uShc$Dfmqk zP*9i88bjA0cRlDituqRJ{2B~yJyejZh}@%U_08;MO2r?I$i&$XMb>w@O#D&-Hg-II zbSQN;89T*bwn`XDq7uqW<)d*UMgP6$FX47og-HUk!HPG9;z!bJ^#=o+C>0rS%yAZN z6}c(flU&oSz;CDPy-fO3tX^%>pD|{ghdhnMY-h?kZ(80cB4jb^Equw)Z*fB!>S%av zw92G!=AM0>8YS&1knbiW+)6eq1V(4Irp9ZNQaF zF5}mi28G+VAc#!Yde$hW0d zxZ77Kc#d2_KE}Oa9oc!ZsN?&5f^uf+%UyaA2sGUrlmPB$aGHv|$#>Vft-mF4-(WgM zt}F6p`m!rz+=qN({I=LzJP2cz%;u_Vp_S(8GW;?hbCYK^JUb<^ohH<3$C^!-YvUB$ z%ltlhQqOFXi6d(Nkt_D4xO z6n~Ux1DVMMJdfL0&)A|lHMnf8+~1q+_6iH_Yj-|_)3tL`Y-6{htJ1P|*z}N8)eiIs zpS9D zEM64UT7J8L&dLX25s+JuHG>>mYYdo{RlpEZNAX$Bspm~n?4c#H-nG49e4QybB`BCF z0A)-|q+JwP@*;L~G`#KpP>*N?q$?4VPt)idM)+;3qAfrr)E%=3qxRyMZMWbtUHB7PqUd{uM ziU4j^t7)CFPR$29i@|#-O4=~`jP49G9N>J@h>{|%W%L2v0j3Y%UQwn1%mKWrwbWM& z>)+%SDt3C9v`+a|w7c@F6c?k#Q(9Gdjvk7iLM~G1?icrhzA8PYyHo5IW3-Id{aDPA z&(ozo&|mWI=d6MuJx67wRJ>p|@Lh-qcVB&*_7Kj0c+qIe;z01BD+ejTtsz<|ev zf{hN7R+R@E^f+{VjdxpShfMcmT=!x_r&i+^QM=cle26EI`$USvqPxk37Zpi4jlTm? zkObWAL?V1pwDMH4lRa8;6d#lXw_%wDBtQ`mjbh6`awsxTgA)dtAN6P$zsZC=_%LIqe)O{_udv}JUh!<1#X4*< zZl%0fse`mDdd>EMh@tbm-9geq0Js#lf4P0e_zT4zW`AVQVmXTrsDv-K<2pZ}ikbjS z@!ZK!+@>XG{_P%~D$+7`4=>)q&jZyY5lj!-wfbf)G+Y&)YC{?%C4WErkEn0=15em> z$7xyRF=i~c1v$4)cbswkdJaU|`a-B%pMKLQs;W`dyUs*%-ae6L>f;5DMcB@Hc7xET zkXV_fF{Bgadn>es`-xz-vYQc4Kj#e+ z#W;;y?|yh>2Pehl?I!&7XvE)P=kYh-4E~0ng5T_CJt!Hz`m_??umd0&2 zL3q8s$+=C6w4ru1lecU~ixej~@*VByuRIT&QR^HmMF}dQwqxXuR*+M*&K&$=4Qid6 z1^HU*+$Om4Wb>DS?pW3?$X4FZQ`|JmxefePjo|kZ{5f06&05fb>Z`3pZzZ2QwvP$p zzh@b53@1W-O*W{jyPYTt-3uK51_v`w`DrR>EbYdY$erbMPbD%gr$g_OWJd9r^gkUivDf zvX$9Gr`AAe;jrMeA|82(PTbaS%#bgyPnsvYn|QJsI+ zALdJUo8c z6O?N06J7D$0yv_Q@D98!*E(^2wgEH-cjFY$UaqcahaSc7d zY|QVZ!Jr`Qg1e3gQso3E+=}{1Dax=?srTdVt#iZ)y^h;|q1b zIB++BnpFmF!wQIl@G>Z87rcu==~?W@KaD|9HpEATKzJy`>i|CZ%jQ8o{GN6aXr8UW z1MrN4#}A%Zc(UPP;34 z`5mtby+u(%9(PZTq!x# zi*loSL;vYR^`(@+-2JHj)Bwtz@}LG%gD6jGFg1i4N_kPksNvKIN==QVMp2_FZ)yxR zmKsNmryijmrF^If)I@0Z$&>~f*pJdu{!{=JNCi>BR0tJH>8LO&oQk9(s3%>mh$G-Yn$?0GZd^uGR@NtyZiRN9o{taLgnwm5xx z){KJeLI_y3YFx%rt>5IA3Y z4?)wj@{34XsNB>n8bY8@tRx@QQA%uXJ}ssrm7AVkkeT08h~I-jT0Ur2VZqY8M;uZV+!;)9aaIAH zmet834~C^>6hZ^7LNL%$#kmC~xfy9CSv1fFf`FegvPw#F3znvd4N;s`R9H->EiK7T z&&_9wkwztSW*2}(s~-r= zBJPZIW@%1W+CCUq0--B1fLhSDafO*oeipQL7D7pWAckkpj_?@;xpZ!NJ}}{wVsK@o z=O+N`08COoNiwT2J#$v>lHzpaTF97@vn;CwT8%Frs3LZOt^wNa0sct!3vyH z7qL7FxGqPG=uG)wAcpmXCWe+^)20`cKtC$vgCL)l5&%%kvzBM%6qA?W16GCC^1>Ba z_=faUN!Qu}+hG=XiEsE%IV4z_QGyZhDi+?D;Q!7)HRx)Ne$?T-hf^vqaDP7d`>8nT zl{myRTR}%1&Fg5BBBPU*q-Ul9r3y2mpoc_7JrLK?cs<1br?|AV0%-7Jn$ArtB!cZB z@Oj}RL{bmpmH|>sjCG77-EDSZW>zA5|Y)gfnikpEl%_6?9ww1VSwWL4)_Jg+U_UW&ZHrH3(O{uI)uFpVHFV8;t;k6!gfL!=MZ)V!nQ%!L5Hw5 z2wMYT(;dP#0A2VQ>;H4;m`tTFfgzX5qdLb;Pb#}OD~kfnbUEqT6lEf!s%Ui@%xVTv z8A9lLD3A(J+9~-FGPh8W_Gv3hieR{>vmrw~1@bZ+il1e3>3l(j zJLQ~|TTCOkz-tJK1A~2 z*|NFmxg}YhIyR*RsF1STm`qUzq_eWpS9Bk%v1zA*fLab|11!E`r=|3si;8b zgf9LJ=|dF&(CZV5ato#uLYq+UIM2&3i5A)#-2gTzJ)g;<-bY`8=zAA^vB)x?Nbn`- z4b(2g!vI=dbY^BTXnHVXmv+-Ds1;56s8n*)TL=HR#*R=K(`T zmmo&yqP}!aEZ}v4%fkfwp8 z(ZGCJ12biO$6!5RK0FY@r$CzJ6i6gUzZ$Sf2VWL}9S4vy0B#{w0#7!Sp(d$|AvOM{ zLr5;Tpc_(40BcD{*;C;uA$fBlZD*>UV17!Vs(~sDq`|aUz8bL01*lj`G2lg!GDbR- zkpvG&9S7LHP_s#tAJlOYrG*;uHJ${I8uBtEA5Gk=sZe;Y08CZ_jAa0&2;BG=L-m6) zW)MseegTx9??9Cm1batJeT7moAsx@QW#E$l{#no(m>X$U1Z5&MmQp#8lcGL=^8e@b zodhw_1isn?iT2*5HBEHfc(2BiJZ zJc8|+1feC6I-OvQ+{r+0tP7sXhyuRu4zqh}KD;d{;4b9nB*GaFayvzNlb$kk{bu7{3`quT9wva zJKkU8AM79HKi%KpKi_|`e~y2V|0@47{|)||{kQsW^WW*e%YU!G#lOzK!T*eZqd({0 z;@{>^1;_(b0iFTsfbjvEfZ%|rfaw8-fcXK719Ae20#*f-1#Ae|9I!QDTfokMT>*Oo zECF=^4FP8Y8Uwh1mVmYZDo`G%3iJ$A2aXTa1O^921x^n%1kMj!9GDYW6u2s|EO0~M z=D@9i+X8n6?h4!+XbG$fYzRCP*ciwKwgk2XQbF<{Rghf7eq?o0W}{N#QLKb4=mpQoRfpW08P zl)qoFUzlH%-&BESi?#XMRoV*eX6+X3R_*IrcYjZRwf}ga)@J{sK#xZMCjVxCS%5oG zVtl{^LJ0%VATxjl8mtMZ2zWiTsD+u{GA z|L0J04U~G`-{!9l_;)ZxfZ>U|b}i*E^OyT8{8j!)aW9~`H&A^7Ky78Iwo{-s4g@rR3ExHG%s{T=u@F(p>Kxn4E=ZL zH=*B$Hiou@_SKEoMd;#nMY>hGXLRj4*Ra^I1!0@Q-VL*a-3{{%UmwnfdqixC7#g`T za#Q3hk#9x57x_`-uE@QS`y-D6-bPmo%K_wq}5Dl5dvpYrfUKmwm0i9?*}c_|5lQ;rF87 z0YA>~iXR0MPN$u%P0(g*S7@KnKCAs$dso}b-^ah)|5<2{AN-X869XoLtl1j)bKpOL zV?l}(1+5HP8}vrdCqdr@)j*$e3LYIC68v27JHhS2{vj`gd>V2(q&dVRbbP3P=&aD! zLVpOgh4$8k>E`Gb>GE_dbnA7`>0Z%&th=JSuKQbeNB56T66PG%E38l0n6O8}CWiTj z1%!o!g@;9l>BDA(#X&t&!ivMn!otEggkKKNjrc91DPnr$?8sG-uSLEKwEaGk+6Mdv z^^^L@lO(KpBUh_5q9s11H{{}BHf{^|ZJq5qx(iPYvV4;U4o4R|ad0i^E_ z0p|m*2B-pO27VfNEbvs|#Go*cAr(O{1nmoI3>p+XH~5L*QsAO*g0BTrA>kp5L!J-W z9+ddS@nx6qK#^w6BpCqm0ZpAY>Y^o!6RL;ZAtx=`Sf7+tLHG2Lw4m`_A^=;yb~w-0xMt+kP?HcVZnvLD}pP6j|Tr7+&g4&$f%I`kh!5RguV^9 z?+&d6DR3?HZm5TDiY`f)MOx|&-FDr(x+>ivUA^uH-3i@Ky0aZ^HYhADEI;gtux(+V zhMf)@9zH2NDm*Ft_3&T90nZ2Q2>3DJN2?}{Kq%vf4 z$eX0MpAESZ;v5uW1;^*KboSOp?h8Txvo}sQFlw{5;iJqZdgv(#;{Mq zj)wgi_IFr&m{WM~@Im3@!_&jJhF6Cl3qKovG5oLaei1_>d?VhC_z+}$MI;+ZHGo`( zx)f>{O@-za%>hk2v|FTa2FSmcp$ER{`?l|Uz90G?@;&Z*$@fp+fqtX?bbfljIex`{ zcl`Qk2WuxoKir`GUi+uEUCa8v?LQ*m^ME4(=K{tBh6E-ACI>DEOoMh^8n`U5Ft8-> zt-uch4+j1bcq4EWNW{58uLT_o8Vn=SOladLgLecs1osJ<7P1g{^^1_|kaHnzA)3(T zp{qmR3vCW<4;`pmpv%^+hSvR9cR_beClBiv78*7kdfJm=&w*U|8ft$g?02ZWOSlT8 z+LrK7!oLf5f)=GCwnn@a@j=Ath|3X?k&i{ri%f~68YyZ5$b47L6itq%NV7t-Mzdb? zk>($b#rKr2%~uN}p26=czc}q8Xu;BeX9BhWm)->V?h^P(;GV$!fp-F>L5d)cphttW zK}(2iUmwJRgxMW5FnDP2M5t9-a8|HSh$bXDWKqcL(30v%T3@@D$F&kZ#VN5lM}jxk}g!;-@mhAjc!T@m(l*vnz> zg?$!Q6;>N|A}l$)Jp7&To#6+=>%yDDIpAYegnNW0A~+%}Vpc>^#F~hTh>xMC9*t;- zI1|wr!9_Gjv;t?VBE2KSBBLS~MlOzA6Im9yA#!u%*2vc*w?*!Z+!uK;@=RnSXcjGz ztKR11ucIJeU%G%AfJ=mpa?2F*gvvzpBs)N;Kc&RydVVMP$OOVglf*2sLleEoe5 zzD2$pd{J@5eF=Lt56(OP-@pIa-{#R2UL{Lu){kE2((BQur$2qWkHpn!+xpQ(5R~sE zk@zY#eO!7ietNnjODc1cI8&O1E^a<95}9Oukds8VZMJ5XW{^XOQqxN+p|;81b}FMZ zzTNW5)K%AAoBlrH*59~jG5c%Zb%~dq_IhY}xUwm+FV-K}{#140 z{AKg~l*_)obMlhJ|KPRo!E+^{Oy4oB;Rl1B^bFr!J2HIphtsx~x_x)9_iHy^+CJ>^ zqc7_x4EcWfxSCUkJ%e6;#eQhe<2SC)s`|3zS)coDe|^eMvK)D3e}nTM3l@HR{;8jS zj+)bNa^vd*{o7BTa4sK^>SE*Dp3={N~4V1a}>(jg9&DD2x zmP`4|x4KQdSg>bTFO31C`^jc#roE%rO!0k0GY%W4_W*TgAosff{#A>O7#2pH}1N8|333sMQ4c@)(UqjLO`n7i9i!aQ<=qhl7HGM*VyF&clq; zOPYC@V1z7AGgC8T+q7-^r>Er5bW!N!$tzc`oU~jlc2Y*+^2tTZatn(}CTA4qPX>H| zi-CuMeUW?9@MoeXXrd-~62xoL#X3tQvP4aSCSLSwoSxSI*8-qgCI6)eTBAV32S_A# znUjX<)|sX2og|dM)6-|3P+j?q{_N3umB**2kF*86^~y(I|7z~#J}Gz48~cvjf7|cz z*bB>NFWy$^_W5^%Q+*WyC)3kM|6O@}NzCcglPTX1`1#nm#>=t4q+AF$k2*9faIEw4 z@Beyl@wmjt`t6E2bNZa$p`TxHI$Ysb|Js`2TSMALDt{jyx%aU2a?I2p*Axy-yzs)S zTbiCQ5(aK4t!zg6Y|xDhG)OX$&%i-eD5CoM`~H< zrElK+;U7!encI`!8=`3c;?Rol4Uvaq?reJgj92!-%-1&>$E@G-v)}a>KVSY@{@noe ziofd%cYPQ2&q3FgH!ep%k#}v~>*K%HH2+c2xa0M|u10+7w)gwRUncFE(ALk8S?(5_ z(R3v=<7CK?X>VRlF<)Q(^x~bF^=m(W%Wq$v-*aEReRM#WK7P#PXBH$cGd{8=V|!97 zty_NK&-Z64zyG^wTF&sqwzG3S_Z$AtMbG6WqvNhT`S-ZrwVQt%uKd?yk_Ca&zx_J@ zv(1|Iu1~@+u!J81+|v7QK1s$aX?MwAzTtn1X!X-*c+vVuXZ+m4LP!MsotvGTkxpl+ zqZvAG#-8~j--;Rf=rZf$h7b3;1P_tT_-0+TV)(B?%WC?c>D>~1;8mAh zbs;}V+o6?B6zE<^C<<*X~{Rd0aIjE@}MRPZl@!5B+M!qBB>n zOnKgT_NgzPdV1*Dh|ix*xWxbOGlm(uXhzxeEh;I${; zD4X#2?4;*M28^i=Dac%x@XhPWPX>>!IMVu!^l9_$w3dds-#@$MnSGX#^q4fy@q3Pr z8y`GoYsh1P$Jc!R!lz!N-~S-HIekRl#ql%VNZW8}OzMv#XGY9D`0wOMZ)wYkCl^fq zY4qiyRAqeZ$}iifi~BxyTAy~#;_>BoLr*2n{4->`(mHy^KF_^V*XWz7s*9g&EdJB` z!uPSS9=dkGEBTz!+#EMU^ZrNAU1(nL)@SX%?#jMY^-B3uS59A<`R9yB-XAdjz4uF( zmi@7D$*Qz3CRhBN{QAQ0SB@Wl{mSy{@tY=WiVB)jb+K})>0qx}hfcjaWitKJ?Si&d z>iH7}q%MBxwTL;|ieGkZ81&nlvs+)@wJ&yC{??O?r#C#?TO1Crz;I~Qs9bspqo9XG z2ECAS82@@4j0SgPaCed^hPzP-6hkecrclwE-k4JFbctF_24K(#<-&5!b~QEABmvf zLa&9++A$KPX|!p6eq^v)ewFTQud* zcMxfkR8wx$M2f{ZNd{{F+t`{}m{9^X%UzycygH+(WKs^jTocs+z)9mj+)q8sVeuu6 zET-g>Ws23{E1_tz9FRqpFmUN(l6qKAqwmriPrtjhao*}7lYTryFCF!--Y@sNnDN4^ zF)x>%T&>t#m6bMWLPT42@rmWe`}-p~x5K*cr+u*FX70I+??(l^`$}q7#pY5r){uBc z@xq#uLuPv242xmsHthOm+2sh=Nss*NkMN=Ip8EH&m0LnCSu^XWhOc^Z^vwaK?`@`y z=GG%)onjw7uu=8xJ0CbJ{&gkiZqB5awmlm8=(71UGDh^uEqHwE%T2~s%ch&LKEJi= z8omp-Rxsw%-#;67rQz3`eLs73{3~1I`iA$uAvc{KQSIk>spZhbV+-EgJtM@e#_d4O zC!hZQ#o2Qn8{+lzgZ-WuH)P%Ct>fB$n-Hqb-TFm}DW{FQw_fv-G$;g_3jHwJEhZQR8l-hHLv zYHCLGhAQHMtpQ{+UT;s-)#GS z*;g}PxnyHj4Qc)>=+)JpGwr9p9G%1b@!8#W*4Df1uUy?{?V2mHSK9^BcHq$?e=-(y`4wjNv+zx;a|y5B+Z_#x5U){YsCV$vgynIS^ZYk zcgvUO)z2*%(C10R55DU?p3toKSPs)aP5Ddzrw6Ww&Hx=ImfOldMFAu*axbZ`!f}!c zm~{Gj`}Wm1#(HQbcV?9Nj*_YMmH(PYZAy%r`CVS=jUT7}U0v2^+SxJR4wm@LxZQ85 zyFwG=kX_;HukqXFUe^D?^}y{z$~+&|r@M{Bvh@;bdeZ2v{~Ay!efO=6so^o3uAhDM ziNPyA{_S&h_Q8MUS*WOY&a}*{95C5ZlJw+MdC|O;A(>7-v#u|npS@j-q~}=*06R(+tuPvt0!#R@kZqWkMGZo z|KPpqd9JCmVqRGN?&y~?oc@feYlzId@@Z~G#ISGjJtp-2_`)l7Kj~KvefIpQiY=vs zXBh`vSh~P1{LN&qs^6_+&L`8V`Olty@!!k({}#UI@l)f!958eCjsXjlS$o%guy$O_ zW_x|VDQBYg2KL+e)6J&hM=yV$-?z2Tl27hml)Q2L_ZMDVwSLR2>#1DdsotYH{uMC&&eqr=)v4?u~`aWog+lAaUd!If0>e@o7%E@K% zsm;3=CVLKmxjm?Q(DYj05Z}N_0a}>k%gUT2t=r1ayi@*@ru^jpmOe94B&hWXvqPtXRyxr?CU)vXy*?(| zcZ_B<-xa+&!Q#f3i3xQ2@*;IYR`H75jI3?z`#z>wFT0~zFTLnsnu-2w-P~6e-Z-x( z9W%PSg1+W|3n7G>Ne2w;5o1AgXavKI#y7w>$S>HB7-mqU_zB+sN4C>{wfo$7^vKTd z_Awu<4SQ*<;l*9wne4k7D}GR%Jsi0z_47IZ_MB4D_RIQvheQ8~Ej!%z>!+ID`QfVe z`OvHF&&k{KDmB`Vc6nb2k);+DHovC0zIu1ZOo4>jC!HrGV(=PquQ5JnDYo68oHb`-JFs-gwuy%6mZ2+y=J`pUO04Gk$w1dU(u) zFA~4nA(^cIcVN=X2cvo&m^-X)@X6ETXMX1yU_O30{*~RI_e)i-y!hzeUF52l&8KDa| z`fk2C<*7BXLl0?l9ak^IlJQ)oYEJz!YW95uv7;!8Bs z#~n0NG_pSCpqWPe&otAA!0v8;8Nge<6dJbiEXZb0VqFdoO&d?|YJz&ArO5{(XPZ zQ>jlZ3ZH3NF(EVe;dgu5FTL*)kbZFK{nfLk{PE8jj-8bL zli$Q$k}1z`*j||9dgk$9!^$fkdVM)z#g*K5P*d3+2k;x|(i9dDSaElsC{3f0qEfOd z;G>C!E)XHa7+UD%0SU5;iY!uODGEX?Q~?pB2qYqqAQ2EmLJ0{V7+OLLB_Sa%&YO8p z{pSt${Bh@=KR)xlzjOXLbI#03cBNmIzFeuHR_ppwcK|ySk5nnC-+y{|X>=^L$8Rgu zC7nGZJaEuBWg0Qt*{|YPwB5Hd(dT!ou^w!1*^a%`ef8A__V~>FO z4K~HQ9kkh-qR`THK+{sHSU@na-UE^(oVk%VC@wBq2*`}LY|rqadXEGTpK`Cc6vXLn z1;{uW)yqdsFE-7G)ayNAlw$rxNpXWkFLpi1%T?@`a}+$nTUsjm7m5nh%;T|@Mib^X-o6cnwbvC) z#4L7TR#ix}94He-xH}QX?Pq-KV#Hy19j;)9{juORb8dXXvcWl)iN`uKk-a`t7TngH zy$;rZgnUMxIAM27v!HmpCd53-2C}*wsSclfJZY~5i%fEP&06$NW>1&;RT;eJ6_0OF z!kcp2vRvj0-j@*B$kQrqA>lOIAo2cEG|I;*eDPSJS&g@I`D>&(XfDNam>m1Sb=Khc9j_KK!lpC7i#YI5xBfcx z6oxfgs&gwAEux~N`EV7Tn|)zEb%0vK1e4nmIF_Ds*mIP5!RUHzZ&c)E-t*B3IWY9m z!(2lg1M75|Kla3vs`yF66`#)5v3+#4A}1!QXVSuuFy~i{uq$!NikY3rD|m_33$*2g z_-j*Y>oj8+Ru%AIUt0AmueyxHw@#mmwb6L&Ff9A$(k|FDq^f6KNs!W26>RZPW&3nO zF>-7?wBS<^`|a?w3XNijjk@8{84a!TxutM^vO*RV!ij;J`7{WqX>sp-$`?ynN?^?M zB^N&5^OA>m1cuW^{761UabaOX7uYF9M=X#C!om$irr+j~u~H@PMs4n(6zxNL@oMAsagnb>!c^gB6T+7~GKr$m;i z%p0P`iOgS1sTK#B@{?3Cpf0qmu9~ACDPaqmPVc6ZYkqCKC*w>Tx@bp_%sKbPEkH zUvD-f?W~6Pq=hQ#%{~h^{?fwD!Cw;DO*MGBo$0Jmc}Dn1r4H^0V|M{fYgl+=gx7~n z>36`PxHh7*cuKFVTERYj7UGg*EwXkt+R+_SRoD(go(ZhoI8~m3U9>m1ShEK!r(BKc zEet09(kU?{A~zG=GgmM1YKN~FIdt1|VajYRecHAem$AF4>CmXe zXLa?-IC`T4o5>rf)|@q|$+l`~i*&7a;3v|ndbLjDDwObZQ&WmLPX(VD zuw5sw0jEe5kh^z2EtlMfbW2C^Q%^pvU&9r|-;y2Eo zPM1esdE+4kT(kl0(h&Ix002oK1SWNb+IYpeg#-f9>O}xqV4tx6KM(Wf*Ebv;2oDK6 zu7B*PkH5d8gBT!ko43a+JW3h>iLQeHK=dp0BdPwWYZPL+3rH&!2V}k@MLR)2KxhO! zXoq5@iRF5ZFQ-=+IsgTSMS7vacA^`$4pgUU0l*!rANAy%Z*(}?2LX==gQ0vv zccOcEP(c>K!g?r)0`lKw4`>wwBH(azh(F4EhXZF!Rv0WU6&6Gr1nl_^-GWF6(U9=K aFjN5I|Egl*UuUDFu+{DYfCZ|A@ax};Z;*Ka literal 0 HcmV?d00001 diff --git a/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip b/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip new file mode 100644 index 0000000000000000000000000000000000000000..b99e5ae5bf020435ea59216e719fa47490f7f454 GIT binary patch literal 621912 zcmV(zK<2+tO9KQH000080GeJ2Q^1OO;}9eQ0Ep2901W^f0Ap`%V{&vcG%zk?Y;09j z2>=80`Ve8&{19Pvcnbgl1oi;{00a~O0087$dt6lYw%;=_I^tu7l$>sz<9MXv)X6wv zh2rrHg83U6g3=UI3qljSm4riK_K>qtoU`3YrkUFH(aRq9q~7ct-wq4`BEHZR6#+AK z6Qraa#YfIv>-XDxX3vAyJ@@`~`8cy*>+xOdw|?ul)^Cr=PZe< z6H~^SUz{~<`rzKZ!y;uFAnDV)7bM}mY<8Kq62>T5jM_Zjejr5r_Yz4jk zw&wtPEa};o9!q;B&|`7W+v#y+7(2S?^%FBQvrw)-_SDfFH*G;TZt}1T>B_wp?sw*% z+CIPGYPB49RKso_gmWW3n@8{9Io#?72Nc>(VQvVePF(-E4*Z!OuP`Zxz-xATur7q_ zmCSLq_lI&7c+(lmwc>!x3+1XFhT~VE+`>Uf7|LaXLetO?ZhSjqk`2#eHL4peTCOOx zZTi9X>3McIPB|&tiMIREn>j9haL%+0dj`jyOV&`C++%QT(E89>;NM_I#=Nz2P2&s+pWV+`9=q!VCX6IzWIrWK?`IuWBjBff1vw#qNgudc>!STZYx>0R= z;{WF#5qn!<=o@W2v|JVZ5xzM}a(}Mnh(q^&RwzgK(kNGz_Yde-;BEKa)eXpj^jk3u z+!1&DK)HVO;o>v&91)6%<8dRsK^)<5>6J(XYt%N3yT(WVj`9SpLnDj|b;O4Xqx6n= zJrPD3i6h>qFd2+mI5mW8E*Xqtjh77JR-+mo(f;F6 ziW18cLaE>+ebaHiQDdKgU$OWV&R^5y-baK4qdTJPdo9=WTRv0INhcFfh8_5KtNG{Z zhd7%aOx!ubX^f|+JNNL#A)s)hh6qz4qtVW1JO-7( z6vh}JV*I@%uK1NGB2-AiX7IjHK|pUf;hpyqzM+9QNGlBscSPQ4aK5y11c&h>loCgp zo;dQ1Hb=JkNg`%#A^FV~4dOYT3=-~76e@{uh=|EfsS;*&5j{)$6L_kIp2iP^Q&;0z zFu^81l%xsaYz}**xjZp4ibgm;Ei#%TaEU2~bBvF)5YZM%q<6kUVf^$Wj?6!4DA+|^ z5JJT9kuVl}iD1#YBi;b0MM!Rz&H-Mbm5Qb}h@dloE_cMF(`fC~B9LMqOoXIZGJlWV z1c623_8OLzfFQFwVki>vnX%k93)Qn%n(#HU0V?7colXS!fU&m|AsH?-+ccC4AzX)f z#sF_bz(ToA{2&tjV-+SFt-@SWvM@W!Dx8_>B4WfFw~#n_t(0cOxgBxV8N#mba>?_% zLw`S@^7n9$zilIsfH~Z5#&cKWdFF8k*IADbIO5}RXE2ZVZ18xzCJvwby4=R0%g@z! z!sW&F{w_bWsdFwbtXEwACyKazpZEAx_W2KesLx;CE&KelVn3f}Ln_NNq0iTW&pGPz z>)>`oKN-;r7Lz-o2WT&F+Q$;{p#@+qLGuk0otF)7fq!j6IS~#s|Cqr)f32apXG-zZejA*+&^4ULHM33N9y4`KSM3h3>!jVAhj(cAx2I&g zmj>9q@{ph1Glx*Sw>4|qv-^zK?xCHs8*I)tf!$`veERqakR+q6!f~t6N`y-`;b-Y6 zBzr6WX*%M7W0ri(X1H{Nu4y@|qZ^zx(z6A)puqDyN+-TO5027ff7EbAaIpn1*5O4w z|8^@}xQQ@6GTkcdmog`^2^mGd&0x}AYSmGE_-Hiv!YY(uVkbnfN2U_t^~f~H)6wxy zyDa0LoG=lP=QkJ8si7;3f~7qMj4OzohbV-`WTAx!*J7@~SVY6pEPs z`Km9C{G&vGc#S{+jz?oh$2eoE ziEuz_9)lh~AX!GU1cW1G!4V>C{YCvowd!l2@sXcUie{T0afeh&8y3(|lA75V5HTfM zy7NH|7vpq{$$~rC@X0GLL^EkoP*{MBjzT9p(243OWNQOO5bIg|gX&-Wqp$u4nSRTU zD*dYl{~zg>p5&O{=UkBezUH8Y>W{vNH$q~ZAPlDu9B}q5EL;fR9*?C8Abv7gorvzD*nh+2L5s9%)s+){`(|+sPGsn}cDP-rPP0of<=Q zTNwIx62tOyUJSg|=!RQE&(g-t1ozCKw>U~#a)YVNMwL77qbfIgRYozDAI}R=86*xt zLDL1MQ=Fvt1l)}Kf(P6)otmKUa~N=IX_Tz_OfEHRuLEkjE+M*ZDs&c~*kJfzodtvz zs~|yffQ6B$r z19l56fLQk4c8L|qW~n)Z*1dB!hsZZJ&{n>E`^I&`zKi8S?fb*0owaY*;g0QFc{sRz zLw;85>&5I_@wH;#fp6s91KbbMy$l_*<8eI(m3n8x^ZfDesiib$v>1wY;)GC+?ranX z%atcsnC_6|sZp$#jMpHR*nUFc`&#_|08i@T(`dcrxr5{v}llMPBZ|QljUXxcZ zPxbQ5?vI$={71547`nrtc-`wB#e!GyTTHQwZO_R2IjBo0fkJZ`^jgqk5JyiUj)MY) z=ndgo=?u2!qVBVLwgxE7e!P={@~&sR1%A9Nz(w8b^gfKfsLQBlrqVd@ll^O5Fxg2= z_S*Jw6$j-t(Cr4r4gUJXpgbr4i|XTo>5B&5GHA>+On^-hkcbtrECuEYido9UY^_s? zi4Gi#9ziioWX!d7?b^KWR50E<8E;xJUO)VcPPL8y$R;W#yaW8!pcs}j=8sRci@$eJ z-pP!&qJ7>&wZV7?Fy0-0yl^YOfySK+b5nAi_x$&&`~N&8pUb*L-3_deKqUsORga1G zHs4K?C-zf?gBs+*$tTX_IWrOd}VB)*5t2 zXv02vN66ogqHeQXl8L(4Xt$D&c}IFa1O?+fSOaTI(}FnWi8<-_qzOW0jALhuW2q%4CGHa!x9k%5iL$1Ny3EY-=I9 zDU7Yx%O$s4;5UM8^PRcPiiWp3p>cSKA|myC@#v`I$k zUMKAlVm+aniW4is*vKk|SZU=Aq>SkPWr%#^KmG=zrN(`ewdzvU6fbV)Qa{}F=R1M> zL?y$0{dilr((470@y?K=3-TYLCc;6}O9Nw8_ox>aNgZJ>60KBPy zS3O^yCzLh#O}ZVLk9#pv&KNd(x81Iqy_o(2#wPE$!vF9IFaHqAznv{}ZhT!79PW~D zWw`R|q65Cxs)v^g`4GKDak zf1x`lY#UQJpbG8;O~87TbyC57k7}%zBzkMiz%uM+3H2H+CyN?X<>Y1XTowKO1p$G- z9+KPUIeIQv2NVSJ+glAve(Q0W=C@ZqRPtM|PgHqU#uxpaT#p@MBbNH2cf|TN$RpNt zT^_M&x|Hc+jm^qu4({??+|AF=q9)e@EDhvF4KcP@kn_NjtC7 zX`0Hw-B_Yk&rp6wI!~|7{&wp128r=)zW$C`}-let~=rzzb!KniVOFGzLoQysN;`# zlPCNlp-kOz0(Y#=_SO8Nzz$d_hM_S*m@`Hh$x z8>eYGPJO-9dyS?x{?N_$dT4jw>uKHn`2C)jD%Uvg@Lj7XP=rhTIn9+4t$ipF=d{|y z-ZO8x-8P~7wvCumxy4#;{uSt{X7=AmHy2Cv7!$>d(Y*4eJES zJ%U~Ju17WAqVCQEN+bk}Ce4xAFY2iQ!oTq)Opt4wmbs{p+!7KOuEt{|XjQ;}^( zF2^^q>RpZ$F<_TNwlY?>@&Pfx%J%dE7& z#lKPj|Hv-!r(aa~qvi3rC3IU z<>(6kT`dYf(a839iGNW5|C?Rm$G&Z^(iJSM@^%HU$1-WSuHn%LC2A2M-q^q!RE%k(}zB@>*V9=%4D;{YLsRRa0eR%O?I*U0PMBESs#8=zm!0B-Wj!okX8y zN+tVQLv|5!lEblq37&1%UwvA;w963~m!1H(G^i z!Vl-{J!~S;kYU5^ImGS6O4%Cc%sHDJS24R=l4C?=(*s1DWTcfwT#_-T9})W{Yvr0^ zx=yY^gb?BwQ9;}ZbR#g8{@wyZyx@=$;$ge!uPx@jtAu#QQhWyi{kn(b{7y=e<{=rm zRG9_d^)bH~H?d->#Zj6T!=`uXPY0ljiOwBa4lN!6lQJ>+<&fH#2+L)6#W;M@pZ5Cj-{jE@&Um#m=eyp{;@(D;l3 zzm}-!McwZYwf}rm2hS&U@ci=*o}cajU%2wT#9)lq8^TBFFByzh({@r>a!Z zTjw(+bb3NlDKZsAkdDYp5IWHCDvy@5;^7dWBBC$~fIk z2-Z(JiaeEb*IN7RopX}3F!#>*eYQ=`K70RbKh}F~SW1kwF|N{{m;@%UukATFC2}^5 z4MQuGujtKVaztk#8roY@+D5lt4 zsW#7@bdHgC08=61ggk(lM#lrh1A-HnC#YXcu^U79O}sHspdxF%adW8#&whMS% zo24cuB&n2`SW-7@FuKaJNmgWIz$%b3gd*w;6#&mBUzw*J97?~59m*jMvG9rV)!~z* zIm`yJfOra^QEO7K&sYP!U^2TYzazAEXsz!Nk#pMyCaYO~+%rR~1AFIVrQ9dB%YNiZF zNvcKjN3rT4SkxaWA{(r`7$}k{`uAXqdhoi*vwkbXF+Gw`7kaZpm&XbF}PE%tcRU$)4F; zv^X`)BvVcEaO+Lg*iC57lUvSB$Vq!LZJ<{Bv`AYtUugyDBUy2QW!I^~gR+ZunYA?p zDPqPPR^0Vh%5dga=Q3SN309AQnUhRE&PfkMRv&wN?+fgQh^Ov;waY_xjsbv3T| z0<`ifR3;R7SH%Yki+x2eAX_25Uh3N%fnPxfx;J~k`Cm)EaVA2eF!>%BLtz=ofX~-b8ZYlMTi<2F zG<1u!gBhY^{=f&IZ$1wFEq;mI!Lo3vqRm<(*x>5g+R zdb*Ul*VBuq8{;}WP5?8Kd1&XZkkHP9Yqii$Ln}${mn?aoO*Vzw@5DXPhkyG|Q92(! z8`_mDr5?a@*&F_-%5P1>$$gU>vNRVzE0nAc{DJD2P)-I?@lUPFGVmaM-<)s%CCabX z9Q&Q8;|DfF=->~L(N_&8@bPO{q3Xr6le7m^+OBXG;Q+Rsn>kuJgoDc%u)^8e7wCBn zwBXLTMj$C;G?nH6o5z-Ggg|fet4S}=Q6Ie$6|B&P&#f*SKVP{J^8BLl^SPglpO1w+ zm$5$Qs@g#_`?HQf8az<5zBt6tRkD3bhS8iDzgY4yY^ z0}?3sb!Cmoq)+gA3VK$YR$%g;wirhPalt(oI9<~zR%UPWBA>s~7r8#%^cYJ*#`t-) z7^5A<>Ouywvq9FB@Imf>F_^y-RE6aJHnJUD%H_aB8#fIB~wJC>Yu)+ zg$Yd-U-XbFdIB@=eKsDxv(@w_+W~C0I!rdgEOdE*Rfx+ztC=iMF7d~`;@8!{M8t9? z&WI%iM(h-V28W{dI$hl#IN=_hw#4kWB z=V3W3$>y|Y(91B(&o#ni@W+-->L1kW!7ttrJYAm9*Orid~tok_x`T{wlDd9O#lV#p>zA zf>XXsTa9xNPEO#;3giVfFiHOKpeyoe>|e9+P)eYc_vs3<02b`zq)7*ew8OJGX>I1{ z^QV=f(LuZXxm2KP7+Yd@tnp-~aShBDJ#CZmfNt%^59pL~?(wiiKA}}aOhkW!jz^HM z09`^?aOA-_*H181+u9gWfho3dDns?nMkA^&0@dpuGZqG*$A{-_m|+5HTYoE0uR8b<2leDS5qy6A(7UQ$|VK zUhrjLjt^ElrctH-MQ9^&2cN@{`)Of6FW_@HW){w&x6;9egHwHec_=%==iSBg1L4p4 zem*YAttq*KMZLS37-Z0vmM!POmISS?--cHn6w=g1zj9Wn;CZO6T=e|`-N~D=9?jFS zrcYu5`hqoQGilO+wjZf#g{ISNLASGKFr0y(KI8|-7hT_o31WEnfe*hR@!`v~y73dT z?XwkBv*t#r64cRq=ymjeEVG!(%?*~Ger%VAn);u|GfkZm%!)oi0-g!hTQuB4_EKPM zrU{vDEdD*AFa3Q+pZ}qqH^Dd$?4@OSbUI&_x*7UOHu8IiOF2q{5?QiCWBIhXHJ1Ot zGGh4>h&R|JT)-u0ew=)|dV*XyEtU}j|Ez-Jn7fB?v==z~&Y-XW{<(G)9lwBh_MpIM z#0RSGWvu|UlzSL&P3DK~P9jQa z18kCQlYOdv!0VdHk{p=YNCHczTjT8@-%y<6mk&5W+iHm*=QIv_T7e+w$Xi6v?Si18 z1^m9MCh5r&^hiJ41U;)?Yn~pgxYXz=JKr=t_X~Q?)DS(BrgF^drVDynGI}xut-k4D zCsil^gbNV`5%Z9f2#f_0m>-kmKeqr`R36ix1M6A{k%V!S{72w~eYM7@g>~%Qg5|m} zRcygFdMr<}=`}EgYW%&mD!h@F;Js`{&o6zaIo|a*%nYaW@3n=@K+{Ah>|lLy>wfOP z%7q=>bEz?>w4~0{qNl&1J!=6x{TA#o)fpf6vQ89&Sj32rl+xeov6P#uF%ZGNyMuYC z!3^|c5d;-L#Pz9UMG!iFw3L!&ja$O~#@hUj6HYY2>DHe%aJuv)Kd>T-Dh_&IRh7mh z)t7;+ZwIoz{DD}Hvn|NdD2{7nX`G#r6SY0{(L7JHy=pPd-GLm__xP{gE9S|+LHGDB9+QLoE8 zzjm1NPu}>6%UwSF?K~yPr8u@*E~;Px4To|029yQX zL60?F(j`ncN(;?;Mdvy?HOeA#LUnl3!th*r|6jq=@n696@nRjG?8taFpA5xQ9~sYU z49`l2r<~)Fzd$@?H-+Z`Js$tahLH94!Z;v*abY9JKt$wfL^%39y z`~tl%MSg$Jqc?hg@dZ*kBfRT-G(^sz+r^FXT(CwCu^6iZnDdfSE?=4cv+;eQ|5P&l z=g&g_nG~k~3}n8nl|Bz68%%s&Pmui^>^-TIE1XCYi=uV}i>3s7ZAMp)8jd-sZ zF;_RD7%z`s)u2;k`kf)ceDz}My92C~AfR5DRuRqcZfZ?XQ- zR1E1nDMIJ_4K?eBSRTsp6eN6K;MQ+dwU2`NCv?ARpbEYZT2CSQ-HP0u)THBh((lKa z#XJQT|Dz=7D9yo6kyLsgw%!VO6d0Lj*w1YLZ`~mF|4z35$BX@+*R1~E7!3JN0V^P` zxD>{nM@+NW?{R@b#|mKl$dDw8lDG_bTQ49Jp$ka29+auSn@3m*%V{Rh`M|P_t}tsS zU1v8`2VS4Ra)m|+{QW*A{9nGn@!y)qnmi$z+|Qb97fsGj2;T4VZ>R5GpP)~QNMEaF z=-WP?fiGFl!B6;z!2dFigYT(L!9>x-&6*q&P2z5jz6nI13^)q6BYh@M??~kJHjtM& zpU8vL9Yv&RCpuZRKQzB-y1ssaf!?@|gI>CWKqm^&gEi8Zi6;51iCHx1H9p)w5HT+S zi2RNb8CSd<@DQO}(WpqX?!wcST9ZFgQquAagpM~5`m92PK1MFtY9+_YT(ZErB+LNE z(lZQHKnM1@1y-+W_OznV#=VNkl;m%Stw6=rI3I>QhU&onag5^E$~eVyJ|v3c#&U|& zwRtNMO@^^1-#^crbbG;YUU__aw78#=`M9+i@s*-QgAQ+>RO{$@F;H-lvL7gLnjfpA zTOHfXvX{k^vHP)Kn@lui7&q@jF<&pz1QX)o6Vhv5Fm6lsrV^E;8pF5k=i%nq6{Sr^ zE;_lNKvO4>-?y6MnIl&=2HmWG$V_1R` zqwaT24jVjw7;lI-#v6_$sq>1^hJ}3=4c1q~G;Mv@)Svna#OXY*)*yzG2O4^?B_P~S z2AAM{%DQeufUS1ZD`r03EKeDhxkUAJU|s+=Wk<*nFdg1xnGO^^7>(<~S3}5)PuKAM zx1xgfU+ieU|5iO8CNF{^Q(Y!RZD7$?bln$~Et?Pu+cB~?S4-EvR?lih^|4e}Sfq2$ zTrdVfv!ht5mF055q%R&#SrL0wdl{xzM+%%SoEsr&c@)2gQ>Epth zUf8JiMk&p+`0fk!GBlCi_baAAqpl>cU=Ddidulh`Txlly(*f`)KpS9w1f>5(O5$jA zD>XO`s6f#y5bf_$PBpyYgE!RlfN+B@<`48;&CT*_k&-pY8CK0qNu5Q(~m5>8z;QCF|0?r$YcP`9-t0(C1(bksEg zTif5B)qH>N+DiNT{tI%Nmjkz3WVrCCGYfPQ@)ekS~q#12o@&mZ>{U6yS0vK z!&buOOeUw4{3cbj^T3xQlwUD??+E_ih2fk3oZ$bXBHJq@=Fv|(EJA3mq?m-;D7u;m z+H^^oilI3cl?zHNlvC%PD&|b&vBl)(Gl{ubGm?p?mSXz+TL@m3fae`4yo9Ib)9YQu z>!U_CCC{7n=Ss&k|K}KTd}inzpHo%)dap$*fYb5|3JZxxzw8y(#Rta?nGwR~z3yIBJhc-hyn?=a6+lJ_p1RG{p^f|*_o8UJcauD z97RS+o7&&%NeN-rN?~MXon1s(c+4fNOHS#&h^GA9pZI((93I>ccz(5LE1a{|806*m zH_$ya3+dgB;@ttm4QE0-|Bt*a&i`h1{&U6oH@wiO%7*x8SS}an{XKN232c4(XA*SA z`5@IkvQXq_DX_j^me!g>0+Bc9KbX_KEJFV+gZ4Us*)A0U>bJXynXX3tFz=gs$6IgV*>x>U#EPZ1^c_< zjDCJ8Xo}uifYzbc?5h2hbr`|oC3 zOpbX0Fd}1FtA~z&I`;`XGtrWAz2QDd$;_5gGY3tyi_|b*rWCMJ84Em8oRTCzjMX*S ztcxEGvn)uEE-@}1ZG&^MW)_b1sixEk6h- zIX867HOuJYwBVQ(;bX3vsr~ykczzBG=67-lvD`BrwQS-hZZ92?WXjN_+0gx?1Fbqu}|Q*PFA?EjfZ z9Q&FW_N}3z{_B6MKk4}NCjDgOcWheI^vO>%@HtCG{tyGd|0$vWX!P|KP3~Y#)`=!3pZ?GJ z%Q{bs=r41qc5^dyZ63@(&wQ1G9#ux5FAUk_ZtVJ)>(Vh73onq{rb@NHIYo@rnYnGy`@v#2`&9dEc%0;iHHZ>U_y3Q- zrpNx9_t&(T(!9Us!e@Ga%}SlW=6{L%6ZodetN|ELngjwAZdfWs3Ir)yEoz3Mwgxq~ zy`eWWfg<}T&>0kQ+)A4i5M(heaQhgSQAcOSZN_ELSri=wT$>iUP?iEJn+oo?Mzo-o zg2MgIInTXmy1+a0{@?%i@%z!-<+;y#p7WgjY%4pbye82G89l$kQ9GI(1s~D0EDcV} zl?hsYXuKHhzb0E%X=IY2g*TkvNPn-6cnpaTwq7TR4cjoFIM_4#vjp?7Ia_q|uyp8U zOyK5WGCWl0-(oVgACd|7ol!eCMQGmru%>PAgpRkrVy@{5zq;jHuor@EbAX$E9)Bc` zznnRowV4U@xIyRGfks#n>bybPX%c)tx9j^v{iHv#8d37*9<)5CRBZ%y0llCT zHTD~RN{ATM`FlC>`}ey2e$m;#XVLh#;cmaZ;-?PLstkQsb=Dpqt7?&j4O1MUfv}t6 z%2FU6Zw)4BvO4VtU8qw;!Vk1Gw7DN9AqxQ7&Gl#z(S(2Yqf5~dxKQz~1v)mpUVU*V zS-sr@&CF3{_nam<;%zCl$`R{fIh<_@Ud5jU=o#{Py+|DP2U0tqI4BR@Baix!Bh}hc z&i5&E)Y$7J_aUbsH^zB0oPs{|0*`?tL{ZlaPMy~CT;u!U!T)r84LknN#`jf8x8u8^ zC1HH6(*Mc$mOb>Jj<0$9|89J8*YWk}etgTmPZ-~M|2N|!z3hK9zFz;s_%7*od`urw z)}rCvvwa|L{;-!H+gB75m6M*>rwlD+ zI6SV)=K{sZWQW?X*kthiS#=#4e&|y4exr}eo~Az9<9)m~-p6y=M_j&Dw?B{)?GLbj zkKRT78`P?#_6B z26opUicLn%&*J_0a+1-Xp?ZJNJ^{rUC~yWu&!#xIcJ|>ryR}cy;p6D<_n2p+$a;!$=RV$sw{QHoJ@V<(E6SnJOuBdeB_xd0RfagQjR; zqLBzpQR5wFQSQ8YM>ooyC+}!i?t~4|Hik+szk|Do)~hdV=^{)`*puDpvpBrLmsQnV z%+o<%bzPS}47r07p)|xk+|?iGxTLcUr1|!42J-&x?E`s|T0dUXotCPb{uUekb^K@j zz4i8P`+NQEXX$TFcl}N8(%;-}`)kCrJkX+%ZL%TqDIaIzA|JLIEn;F04vha?hk_q` z!UaE5UAjF}6{!}VquNce5i--W`*oG#R#JS`NBC><|4$=Ubvv(VS+yzAE{^Q>;t3KL zY_udMNW|KIhxUz?+!6_7gYxM{0`TldKDg6F-NyPQEXC}Po+9V@0 zA-PjLy2{w6Gyg>U^oxncK0QA`GPdbcXUvo%|45&Oad%gLyPg8`*#7y3-th`d7A~@5 zl&A6_C0HaY36!Lw-7#va!sDw=LisiX8?8!qM@x=Uy3;3rkLM$B2dM5cp=95KE0n=y z{M=61`;2ZRY#Un1Wp+h^1Ih^frSyfz1qaOVaiKrXK?JQI74PFX34k{ur~_8l+)qbv z;BoB8Fnx-O5~pZ8#Zm{7PqqFv4 zSPEx=TPRrJ++K$@0DLRf$7HSA=WZ(b8c*Hj=`wzBfy|OW>6)`+;;p*Q;c&?(fUMLz z-GsCG05&|uqZ7|rO;pD-=;w+09Hm0vh0mz>Qqqt}YWF&ZZ_!LR5^-W>tJdHW!zW)q zOVbj;wL_@5WjsLzuI;?JH&8aKf85IfBSXFRaRQ7+*`hx%7jWiXi5D5li>!|q`D9`| zz7oO@aJ>LQEvE^B928g6cH6%V2r`iY$+1Myh)|x=1%56x;HL)R$Di&|Z+sAQrbQwS zb9?4hvh!j3sy$ky^U@x5+*;g6J#aCAAG6jaELIO(BTUR$(MoG{xa%6da7$b-^ar#55lJBg`Pp4gsIRN#zF`w&UWQ_1JpJw9( z<)`{1){n6)8uZkwo8_*#|CrC-d`osa{lR*v<(FeIPH%Mf8HHowBzg-T)Efjyk|Od5 zO1kcgHrsqBJ~jF<2Wpit6oeSA8X+rrc3)Jr!3$sXNRoOPbcOXq=|R;9cnte`w1iCT z1Uq`ti)@f7chn#=is(gU5IRs;ec@(v=o9W>dJg-b#P~X#sM|mb47>f2OHsKPUWmrn zVJAFV+eYDSQvC|Qf->taqbceVKe(HDIK{@o4szE9vv3mC3$98sMJA0mF|PLHZL36 z{f12)Cu08kM)fLqD6zb8QYMP~nYcQfSU@OF)RFq6L67}^|0iny48}&a%3sYhk$8HzUGs8 z;}r(hff0?wIb{7rO9Rd-oxglSufz-qdUj)_0H&3yc#gu2QTFEB+eO5#cK$9w9%S|* zfpH2Z!iTIP(q>Qo7HKnodLcKTr#PE(k$Nv#;&&H=FtNVX>j^hbanbmfka8W*ewLRy z8URuVw^>4?*;Ef~0;Kh$pTT4#H({>@nGuVaMKY$e-Dinm>XKM*e?(l8bw~_HM~DFP ze%f!WMq+!tfP)voj1Tj{2KF%-HuN4*t7g^*M_C{8x_6g+4zpQE9uGcP_s4+VVGkgo zVO2Q6^7jwY2o&+e41rz_-W|N;9rLm&G|~k3PdjPCpM>Nl{L} z5GJ`uOay7@G>1L>4?x(kR>I5GNHSd_N0KeFyS`+kmXckB_15F+50ou6@`*$8s2W+p z*mCVg1P^H39yTxT|Cp!iFDd_dZ@>?2TFRI6I&pDuG7tF&#xv=Jw<5MZWD9iA1JV7d%Smw2l$x_A13-bcR0&V>0&YUyv>|7hwar% zR#rlx>yT&i*eQ)S2C(Q!+_(%6a_Am*zjCb{nPgW_y=Ch5bW7|Xq2@P-07hH8nh)!M zdiSNAgwT^{48F`{GUN}4;(V0PDF^M?k;=^}Obw-lmpV-=2B`LZW|Nxlp{P=*Xm^aQ ziY32lR-0PP@JM*1s=TU$O^y8y($Mzz#O43j#bq@POnE_=N~kzN{hqWivM@=C6qzaV8b+v?VwZDrQ1c(fIEoc}Lf^*HaUx-oW-NsB07Frd zg9uzsDCX-j5vU=yb#1rtMfaAF!!RO7^76+U`JxxaEZ)i=DQgX|e%|ud;BNql zO2UR)9BhAZI%@(XGCBN>IXDsjc*Cd6L653HQTP7fRcr5Zgil+7m&=iPG?ehQIFwK} zMz7mTaEB#Vy+`{s7{ek9uI26Gsp1spiZ^0hIyjAPhv(bj_oiz*#UIx3QAc|F${P)| z1g8dHaE0)LHhSodx@}p!I6UleqhV8NfaXbc%QYD21O_W?nCA#hj8Be3OWAirl4&zn z3BudS4#B%0!=PM98H}E~h6X9(4M#<0^}%cOefBh57;}!+T^;6p)eU^?It;^T%gB|D zyo!13RW!oEXpcg9Pk7cm!DKRUawNA!K5;NGw8}rK$`@X;&=eZYMI~~oPw^~-|HZzD z$KgA%Umm>+1Q)Qc`LnL5(MiGB-C^jnMM&hln8yt&XsHVd*KRr_=7p;DPGbwqO4 z!y{z71PNHLy8vWY%!2<#Qnqho@Y2Y{U*YS>DpXLTr?4;bAfCa}I(tMM3=f)MMqKLG z7?x`kTxP3|*G(oZzmTre0pAk^1#IySb;ZSTlhoTgx;>xGE(%JX6xp%OO+>?~es)~F zc}x=8?NP>6fZ9P4+v#Up$xNwU^-~U}{dxei;RG4CBb8IbPhjHOb!%Oh?ibKbmhr87 zAggL^uH#(_#b7RhK2_qO1&ia`pPri-D> z_H|Q6c8c$e`H(pd5^*RC{p^@}_XIQ95cH5EOImm`>$lUnGxK2_c}9S(aQNXF_hrsc z)uk-?4&5=n4#n#-!J0g3w-1<*nb0ZJ^6gST_|^AXu)JB9nkdOc_Uk-0Ib2q7++D;)P%N!YeaOLIwHivYkF)wX~F+Sr~oq5}W}b ze0lAD2JC{L@ea$355^BKNho~cF!htdQ<9wgS`)Z zo0(*)3vYpXkSnSS*T=86oA|%FaFc!|hvzReQG#(_c!2}DybD%mAx8Z}M0Z1T*?zOm z{IB8(08{>jZDV1T-e6f((?-{=P9pxHU)+KP>yg>qxxbk2HuTJMJGGdzh6Vg{2E+9CQqwq z5GsZvGCWa3Z6))~BvkZ5smz2~xWVBERtsL?uT7}<&YS=x17{$V4EMt>&+y|u@x$)U zMmX_sX%5`FV2DPKo;^Z6((fv^ng8dnjs1|pB^Ax%4NebpFtJ8))ryg(&>iX!fW=ks z{gKYVL0AQ>$ou3*t)2&;#N2Y^F~0XE+R={~>CoO783K6#AG4;*?xs+G+3m51tXWkt zpfycC#hOkvg?DM@6@_@P?tK-i^2@91uqyTWm$5z!@7I&+6Np)l*VoofeOSr;P|1T( zC)FLO(1I6Z*iUrfRxbyDWpyngo@|!7ki&--p!&h~w`oZxQF#aV2>jp%5$h_9-2s=z z_Q1WSU5L9HJ;d>Ek+D%eL|EH+0AcMu0&bLUeSeW2hJhCLhHM`z_2^MF#W}E}i2hor zj{A}J(y=S~URtQGyt1QxRa~?Zv6bJgb0jCTsy6dbJEw5Pg+EluT9p(L)8z0ej}-A) zWnn_p=WdEjN%p~&81bEy-3Q@ewaNkPQw@kLcj_y`as)1nR@s9JW&T7z#?J+P8; zDi8z?G;HQf7ZT?kc>+$0*sfWsPovMSHXP$A#4QZaz8>56QvgC!#oz+_m%TLI*D}bTAg^!yn2N=PgBMT*pLVQ!})b z!Pn~ou>@Hd4YfbilUjqKN+W2zVF)(*^C<@1h!p1N_opP}Ydv@VuVy{}mlHRhX_TJ- z3&s%r%ofl8H41xO7sI(TFKM5<2qqDsbYM=x0FCMcqc}@-7KX>q`ukaf53oZ3Lk<+e zKCF^qNo8%DJgQoDSBC}{LDtd6bajGNMRWefTk>qFqxVwmL`Dq^a#;IP(QwU^t6E7nX9%tKmC`msX z2x!3*a5n`3NAB@OCYpV$8pUCK{HzH9;|R3o@v~jfZU7@zd}y>TN0MvQy(Y3E+|+I} z*Db&O-;U>rUySiAFn2p1W5Fn{et$W$y6byCK)cUf6$JFhQ$8fFXFRU%yOjAQWvVM4 z|H_5Bj!r&C20>-gbxQlmPx+)A{=6-#RVjYPAGt;W4C7aptjDHf{W#Wn{&z^y00g*e zLPuft3y}_*#ih-%x|Y99g}1?@emoc_z{aWfvqoRE+2V)&1FVR<-X}=)as=@f=^~e> zFGTXK*bntBL9Y?N@N9kQV9y({aQ!%M_W_VIT8bRdtu%_%%|NMoDr*yB6FGhCHXtZU zUHZXjz3o;$H|xzp1tT0sc`p$m<>@E{ z&{Bp|SMy*d|K5T+3!Z&bffm^loHYnR)Txs;Uz_#iE#b4}4ysyg#(Apb67Q*jxc9_b-2wtXBLkj{mvdagEX77=hjf6RV_LD!e z6+ZL}6Iv+8gE#97fcmnrz!+l%{>-le?yqDvs4XDme}&D%e90`i4GR}*DbsyPrag-mhS*pOM`^3HP1` zG6ltjr5RlQ2CPjHH|R!5T%4p<)k`|YcK`^90EOnR2D*cUVn8HcBMDM8t05u+mY9OG zvCR~;%a(edyT0ToOoI$-ZqQfV%Ws@mu^)jF(2~1~T$=stcw|bAAZ?>_8LQoo)uLYg z094EF%8={wGwATGI2!cIkpNH@4Q6##KZF31%NPUB{EmD!zmwgKiw9F>3}7mJ^18BK zK7_2F(8H^uHT0G)aB8QwJlr0Gq?6n)&#%Z%*et0;g96Dpm5X$ZZ86KwZq4xH6yb>2 zNg9gJT^(RY^wAvEK}My~Y?a;3FdEomt^f>Ukk1&#Lw#WwEwWY%9DHklEhZ;b;CasY z+^wbLus5Mh^0Vi38OJW2ipfM11yqr4B-CH>L11_tQE!DDOZ3L7gyLXj*ie)ph4rMh2lyNXM zE1g)w92_kdVV&~1ca(a7hU#yOOwQcxWB!i-w6mlipa_JbUmWJc*|BpE<_uR0GoZMKz%be(V!Hl&5D*hj=`;VrY`2x9J>8*0=o z7sl;lJM3@TC!NlW64)F|?I@F4l;KyF0vc;JqUE#-SyHw} zZidDNxy&vVK_Qx;e zS5XZ*YDK*2!|C3Whc-QR;=6h#3E$qYe1KZ~?pER6yEVk6ejS&#N80IxOOangjk_Pf zJvA%7CLx z!%e)G50TZY4$Vjq>$)Mq{^a&90!{9fEAs3#Y+HbR>g%&Bz@_-ij+5Zq`?ZgK4-*vr z@{All)7`l}!>*s(k?}3atihCB?Q?$@a?nYNzDw%YJ%BVBZJ2ayFhY$RUcF6ovA_6b z@H;tt-F0O*;#_bjyD=TOs2f}WcPMnCh-p5-yUU;XldNR>Gmpyp%)*xS3HdwxiVL66 zmQ#Q>vtA5jd3Fa`6>;`m=Y-D(fY7ZOx_O)Xo6;*_!{gd7%|J!GD!c1Sj>eou@9=0t z0HBEvFyJ%l)JINJ0!)6A#}r zsySpd<3e)&D2b7&{J1teK*HCWLh1Ak2B^Z%aK;DlFW1nQ{gC7T4$h3STAV-_)7I`8 zl&<`2=lSrwW9BH4eJrzM9PscYP*UYSJJefx&_>z&C4vWlx=z?laG}Cj2auiW6+N(q zGHBO&2K(xk-7$oYI&Sr?Hdp~VE;OLy;a;7fkal!WD=9?bH6|WNu&Sk(`P?5Pe&j?gNu~rFa>8}GV(fJ^ zzSM{dm=$~17?zIGh-K<~{(>Zv+Tcv^Cr|B25CNPnMFv4urofsV1d>R0?-SO1i45FP zQt#}d)y0*3(uy<>5XV3wIVJX!>~10MSQK)9WIF-nqU{}EU4W#_PhcRvOjwUrzaX{5 zX2Y0Uu0yC#-UGdEIc zF^c&@50?r@-C3b}^iUGp_k{3V&e*}$TH_UjCwQbN4~ z6^DfKtyH1U831loX1M^x*8^eb0n&CN-~}0wW-fqYgGYFz4oGIRg?yYZ9(2k$af#%;UmLDL7A@ud zs|_16p?rr)KWP;^=WY?XJZo7WwB>V-L|O$tk@+XtY6g8`;pa5#^X7HL!)z#(AW~Fo8cEcWe^B=$R{Ciu+^XLX?HD7Y%b7$Pnqy+5 z&>oqXt(NRZm5;cITj~PFZd3b0wc@+nsd}zm1okuRHihB@h|ALHh?zViVPL2<3?DUz z8aXM4D2(|XcFZ$@n8LMKLzB1$8+9RoTf%y1Rc(^!6`vCuRRpJ1t{9|3p+uO_ku1on7wuAS!uh&HE zYlD)|SNjDC>y@onGwP~@{6i;fkLxwSuE#~g2nQ;LlHiM0JJQ^(v=H$~FFjTs=0CZz z3xeYPyE>tNFTDv>U!2&#g(DOC=h6EYzOs{jL%Rb34@!RhTvCgn#mJ&`WziTQAF|;R zc%4OZomhydOIb7x?qrI>)-SjxgO=Ux9M*c{oO*Jy+)@ux8^|etH`8m)o;gMg%M5o=hGWs^oWHk{D-F^ z7ExY1LS>z*lb2NqT^3}kp3BT8)`lkQn7)x`d(cwuz5Qf(CYpnN&BWOj`dk302t$SR? ztbQ_*y3K<*H`{@QNRU7K9BOM1JP`^~`FpiT$M;V4L;gO4zpux;j=HMwQY#cVx#Cg| z|HxYfMgoP0WHsCJpegRt(s8|^<=C$~+T)^6je5N; z(H=K`mT~tFv@KBI2Lha%|F&?VSREc8j-l%B5c2_8uwL#0>+;?>2N(1)nNasW)y|^P zHfy-edXHUxTxNr+F?rMVsbLjh>v}udxSx<&QutE=XwUjO9J@(bV%01y;pB8v=RD6` z-;lr`d4QIjmDTDQdz6$6Cw<|aniRRyEaj9smdk$DCu>j6%)%8{1XvVPm4uv9WYAc7 z{x+}j*mEZI0+86e;+BB0VSIy7{s=q~Eg$&8Rd)Bx!sR85u;DhlP(g{V=1(rveU?7;37%TqN#lUP@z2jM%-VzaKt?9KMeRE% z$+S5Y7vg|^)EDb$e++5Q{ECZ)q7*w~POhGSAhsA6TCI%JQg)rQgH(~M+y&lc_eS4?r+ z&BQ!}x@>PJ(91OXJw{slXfb*B7 z1kb0e26_H@gv$ZJIkY-dA^-{lzGwNjF<7>=pOH+?XWwIfR(0uXc)$rmAG@!;XN1gV*!}Z+0JHOr9L_O?ex+5RfCtVOpL_rEZ!ys6Ayk9y>~8v-8-`%# zW5__J9gMxdeLK+quUJf3dz!CXY3gLJ+D;6(-X*kQ*{P*$zc>j71t7?F1+8*KZjzM( z53+ezBU7e6{{!x&X%`hP-*^z!@<`V5?a4_tP^-}BT0Xl*KUuW5= zZY1Mws6}~vy}rd0VW^Ycna`zOrZ-DH3;mAj;XxO*l=CmvT` zhfg)fdkEdjpzba6BvgwDwEK70O9LkS>?5CYgTtqkruviud!jrIJtrAup@im81nWVh z|M`z*i~-QZS>#8DAhP6TLf?kpdLK{R6(lFWA0@-#x5#b@T^JZq#p~C$^YLBnDm`P- zxnsG>)QrTvFY+j#Pu)V+ z9=DoB0aKXzR{dqL^&N`wG|4d3Jp)B>!F4Lh9^=k_e4sHdm{Cx`$D)nk5Fgv zTGW4P&7|k3&`@aQEpHXdcb}o$S=4nulxeOy6OV$G)hWIh6VtUSMNF@W-)Ct25vTI- zeSN3*K6;eVfi61;c+j_b^r#EUzj)$!qc>}hD7;}EAa8yRrweVvv+8|XwO@F-!52Pd zFZt9rzYkjHFSGkF+rTmZ{5@HFCbHyxluFExG@aR>d>)SSy-_~DG#*OV=TzZ0J<7Q-T)#{VPvr`av{X-?KJ|^jwnyKOME#dE> zbImw+p&xmLQ_fN0qJ#?J2)D}09D8_z)uE%$+dKG|kP5StO) z?f(6>x+6J}%;uBm7ow7d7nEEgD^HLRJu;y`7S>B0YSWQ}F1CkGrx8FN3Z5s2N18CW zfE@8TIX}%FHrnjO~$k6r&ifQp_6=y83raQ@L#J&+gWJS@7}unT{O@UGi&|x ztBCTCBp*eb;_N?G=AUSzKs66OEiuMMIIfYbc=p$Uk0@*!p zR?y2$4PM+ta=%a>en+yt9<6y9mKbrH<6cC6%a91~$p8Bq{DTLsQQzyGFw~*ocT~B` zEB8a?&C@i>?zpcjkHaSh^=HRvYF6}8UlNGGK98;4LPzO}ekdGRwHf;nOnS{sEJtNx zAP3wA5)-Xnq}QeZtkk14FfhN#H)@Zhh*tmF=P`YmFrj{#CFq#f;NgLeB+p)e%jAy?_GweI%0tAiT?%oJ`QnpRE^pX@-> z{uY=8H6_E$tx{qNmqoBCR&5)rUMpIe*5Fx=9V37IM{!vw^7MR46tDwih52%X^ zv0r3!V-Yj{tG-=!j=t6Qg1#-iNbeg>h%qsdT%0DnZ2q>l$&`5J&6W|{^v!tzJ>U1L z?*#GFTVzw`&>CHBq>J7~(7~_s?4xRdpPbH#Pn1_d4#f{DpCdkJ+I&3-L@)@{2ojsDP9`&AU!_;ALQM@r3AoVE&??S)-?{F$*R* zT^+?EQw@Fxj3gsQvLSRHW5hO})Pnu^rCYYHw6tjL+1q4g3hIei-@(sYOwCV~lJBT8 zWg);}ar|5&iqa3gk@bM%4WVA@m6Y(Xz~h@g#p;{yvy04ZiMpHa8RSFJHsD2Nd_61- zU_)zcs7#?bFdM6rOCZ=jG>SS1oBY3Ll4|7x$=Y8TSfDqG}-eTI?eZBf5ZzMNC^6}Pf|krajx%d`|sx(rfE9#-Ft*fqk)l@i-G*g z%EkGRvZ`PMN{aW01P{FUAU73n{iw86ysE@<)8PIKxX@mnX{UKQ--oKn_9r((m-0(cuxP}HVR2PPtCI3b$o z1m=xT&i@GnRu9u5ki?|_U%;SeSobiPF{~2|_BnRWDZZ{NC}wm8MQ2A)Y+r{V-2mjJ zJ^p`Kga3a(=Ag88$fO191e=Mvf_h-fkFBM=z(zEIr9~(94?VQ{^Yed!#cM<^H(z$I zm=zR(G&AUwd`_qAQtx|)=#(^gmq4eSM;=6&5IwXd){D)vM?belf3`wvNEKP#`$?er z6bUqsb`fZHIm4EEPz;A$Hu>9!e}hthr}3PS*jhXe7{ww$k>Q0112=q*dfC6N=QyHK zo&N>W)stK(iTFEw7V|O-z~uR4BiX6!RZtdBGvmo5m$<(>H&cpIL#GbYvZs$s!G?Z9u$@s3IS;jr&?Kig<)J zlkR%%ffDhaY4HjhW;wj&4Z$U3?sDzmB$KeQN{-BQx$}Z6el16qImlorg}e>nB8`{8 zw_X5KS-om7EqXQukSJXex}1s~J&(4cjF}lR1QII$A}LqJG)=7>4wKr8ntSg&OqWt? zI9hcVYpKinACn_*(!@vp__)4IRi*ellRNn)k=JCtF2`8I1tD!+1R?wdeEe)#w_#Pt z-k*!LRv|uOo{Spga*3{Z?+KtQCfvs9iibGQjQL>`^nj`APLh5VIMl6h@n)gk+@^2L zaYCi}XPep7flt=0Vx4|n3VKM)87|ykMeI_UJ*&ZoqR?6rg${(iB#1(r|4a>x`-Cin z??>p0$?RH`E4#4dR-_G2i|*m~jPS7;_CSIpbTLXo>$oJeZTYuo?l{Sz?s|fVeYcEW z^FaK+_oUgRTdaF1fF`=fmr#`DR+I|tRA=7@z|Yx!Wvd?vj)O zE3=61=*kOVpV)o#0jF~dVm7FrGnWX6WL9$c&MwcigbBTgKy$JYu0Py(-&LUQLN#)@t z&|(r6X(85J+)<{mR*efBi~l2m!d)5YV}@`$=o2#_*q#!bZ1rZ=OQZ7bY;v-gSubWb zK)) zVOC8ba=jzq{$%-ke&rSz(&7x()105YYGo=Ad93Hf`Dtj)PBH~9Vvv-{R{#8bU@dzF zc+{_9*a*7q8jUDsJh;$%GQ7hq&z}h|ck>Aw(?pCZmn~fPi7s1MsM^0q*}@Lh1ivt1 zjgJ=Ba7kyA!0jF*3Oa@;>U-*O3&pd%fjrIj2>rd)er5V``29>$rk{e}JqzICjQFMp zIxjeECnqPF)=l3kvy_eSDIDcL!Vz8tq?dO;7OblCerqy85$_gB3ACyAzeAX$Xihx# zS2d=dz?>)CEMxk5EYOw|OlKa984*ll1;x51F?!E%^qBQPD$44fmZ89Jn5zu;K@X*yGe-r0g;UvuI{@J0TT!;FN#<_PXGLazEO?- zwS($K)Xm=>m@fmD{~2)kjXqeBfJFvQ8dhYA&PfBmjBzZ6Bs@&uq^p9N0ia3G!bvYb z1v~EYRD;X!>0{LZ!n)lcQr?^dY#xIhe9|TdWjjYqDLJh^ak)GMl>9dF=tjn|G1b8-@e|XSSSr|0l&N-%zSPCbyOYo9*5TdxoDTc}!`RrebGrJRlD%90&$xC?+|(_* z-PCQRql-ORyZzC@pY;U?yqoJmdvx6d&zJZW)&Fv0{c?N#Bf8d)wzF#1h1|L@nHvkr z%?l6+$XMbDCtjsI|AP?^H}AMPv5w}vpG}?eBW9?dk3N_EpO9$J*FL^erxL~ohv{A| z&EQvW3SsGbK!c5rjEB5KAiB*kg3cuqWZI4{q$B2lMYe9|eK z-TVEF3AuX&%~1Hb@nGb!gc<4>Uxl4i2O7iN=Vx$&cxL$%rE^g8-Tk_c??00}8tvy% z^3=wYR1d{%{?N~mBQ5kJ@qBeZBWk9uop2=@UA50g<;l*rZt?lO>tq7_BrM(GzFpv_ z`8KZ0>n=}|Q|7dTEuW&-n#IbNI?=&drh}sJ#z_pd_162E<_j-K#XvVslTVpomn}aV z=3{qWiGkdE_*kt^K|?j*VZ^!#J~UWdQ=`H@<5sFP;UVet^5%B$yeTpPGc=nWh2}d&O)wEcJm@3B&r9UH8E#UBpVhfy~no4a&PPL(Z!P9T5C2@L>Y3s64cZ_tdH z4h9VTg@=Rih7KT>dgu+Ap>=S)=AeSxCRXP~s^U7LYhWa)|Fh`g5JXc9G z#w5p(#ogGIQ0K2*$T_5vUM4-EQaqc|_ZgkCDV@c31Oqz2h<>~UYZ`$jF=bcCER(D` zQJhkP2ee5`(Rx#gi~jlx9_;WN92EN)&666**b>GK1o99bkwY)El?u`6{{;mL>&{rj zlcCcX@pz=;3>!t<)7?&Kc`smvTkFsuF;!<3BOjT&i1a()UmkZvh>GL1Dx~4;k-}@$ znICnqLyzy@%acsq_^Wi7KXSwbR5;#>$ z`ByKR78P?Bpgmk4J)U9i?z53V9pdHq*)~FWCxO94(y@V7X?8DKr9Qe#R%??{*IDC7 zCCUj-#%D(#L5|E8!9vbLSK!&ANW>I*YWrrP`|X1-0sx1@4eP7gk^_w6M3=q%g$ z)MV<2Az>=0^E}SxQ})gXl*j&7zkJkW3b$GASt>KzF=ioJ?2gDR%nUc|0NjU0YAI1P zL{)5i@HMkF>ZROkZJXdSmStEwezRM*zKIQDwkP40yC!s=9(Oez8E)Hy{L>)tgtl`1 zx-VQW!E1^I&W&d7dx2bOMt+l|=g-Oi#@rrLUUZR@HToV-k!7u#atky81$p#90;F*@ zI1)Lc2eXM5YLw;76YLL^d+2-h6`;Gva&jCvjT2NJW4%aBc8KAVNkJzHw6dxs42kX) zHpmX-T>7Ajc%&S@z!V$GH_jf6h|(LSsXFLo^-MD96?YF03Fj2WeL0>88`&H-ez$6= z%M={1PI!ctXL$+&1FZLCin*GuRyP7GKMZ)`3^j{9_jV%ioB-mmJ}|UOoj(n!>Q=-W zz&x$XACIvrY(AwQ_7mw3=qK88cZn&42i^aK-(kpBoB{6n-{Kdds7Da9eOlD-t`F%+ z05i0di+k`*=skl|9ZRC@sm4L@AJ^A6Uf%&5ukWYR`qDWQVzKB^kH}3Tibldo2wL@Y zg`THB_}ukn9nmLD!Wvrw`s^5=rK^uLE!nE2g&5bA@7?x5E5Sh`+d`XLv}_UGTZ9TO z4PNSmJ#lL)EakG_qg!IB6PL3$%Bs#Zz#0xxOMgK+q+ZkGp{~CkkApEsjfRNv*URI3?!{I zY+rPK)wL$aEx3VV11iNSP`U2);)s1k;1l0e_FrE5e+K0MJVvF-S*X^{lqlCn~YGxue+RVYb zxc_#<_QG~NQ*wC9g#O%zyLtq2*Ldu><*;TBHOD;a^(`ioZl*_K^g2W;P6{f93Z#0f zbqE)9h|E+Jiw8U6KyCWDF@pqr6-(xt_CyD z>6Dj&7A9A!FQ3#j%oYkfo5>%st%6(qR4Bk`BG@-z5!5Y{u|l&U&*_xww(I%nNSr4) zD}og8hUcG~fqUJ}hhIlR1+`(8$rLM)DSD+GnP*48MNAt+tO`XBO!_cEZiAK|8RrJ7?ClUr{PKLtxoho z$M|c;AR<#7azw6IZ=X(mw;&{ivQhZ7%r1{CrMQk4%Ixy?7GcX?XEG4MRVZU;r;%tq zC@WcdK3u)uMIF2$J5H#_2&qeYkjU*Ajq6En-~T#~vGERSS^UcL+(y^-c>%`U-6|{I zg?@|}n1u|_{b(bsc%i3r9PmGt4=0eVJ_#q{gOO6RZ|zlb_>5Ut^8uP$#q%i$Ua6+}}mKl?#epts?hQ%za0!C-PH zaqgg2j!ZWZgd9FYx{p3S(jx(rhj0`iX)erp^3PhGkNp^^_##vlv-(+x5BbgMm^m2G zK_u;GIdVfA!MzR;_d|u@IxX}y5(Wc&kx4D>lz$~!QM158Icta7V-H^QbzpoQ7+(j* z*CBEA$1Mgga~_X#5079K&u4elQ}5|>D{`=d|C)qOFZOAV1to%bTz!53{sM#Vb< zR`QXHXe9^Ndh<vyLT|Wy^0IJ0$;1|z_Iz@t1!Ag>Ln+slFOmY z<+L?+sUvRUN&dc8pPz>Lw6<{9jEs3E3S;{qG2fWzFhwwYTMwaWb&*U#1dM6dKOFJ@K44k6a^2UmBnXHg#@|re#%~ZEiZ!N4k!%SVV5^8$Ubd zMM>@0s%G@%3ZKzJ$*Oi5ZT*^e!vhMFp9L#Y@=0Q& z4y?fu~%%?VgJiJ}$qyQG?^-EjFgdjIF--2Dv|KsmOs*VnEkOKp{Z0`=8-UF9WV^cfCtMZ6YkKgZ@IAMff~nrhuVVvWHLEj?_gCq!vAKh=ssYRQ z(2bYTJ*9EV6 zGrpCk(1S5>fae1JUMI3$z#hoJ9_$j`yD%}*oH{u?Kh-3yTQ()uOa1me#7Xds<)xBi z7&$nG8?|N(!v-r8EW>J;oKevbx=p=w4X4d6LfQ;ILt$Dl`!(a2nJak!d=829(=`?=?*wHi2{)e!uyiT}bNBsZ%FG@0x#us;W%y zd+`5nS$pIYp_lPkLl9Z}Ee|loo=CUTiMfR!I6)T)#SW|g<|*xb2kCe?tQH?MW6%w_ z`jegy&v-NAAO(F-sU4|5SacB;II~Z`zk+|7^1M-?kQeyaC@@GbP;L~soELc8DDcC9 z4*G=dVuKdh&K?_d{Q*+7oVIm}4H~DfBDPO$-Q+MjS9>kiv$vPc+p8FP_V#Wa`CqoT z>KyIW@8{!lo@0E@|FS*DIog}W+Y6nez1jb5d(T+T-k!RT+WYt%?M46F_FU&^ZwYTN zFyid({c6O2*9e<&&D;Cn%-QF2 z^O^s0K3AQiz53nM-qFElAK&i5=NKQnA2OuVdu2*$NBQ#1u6_FUm-ar1$RTy?)~|TC z-c3z1Da%szo+YHJ?6BUc=V-rOA74Td2yEfh?gN#d3;usO2mCitN4=?MpTCPc_pihJ zCF(2p5yutJpJ-QIP&<#7;xzGSDJ}2CHG5r?C2mK5($pG1#%i-A4dL;O8boEH-KR`U zT>uxUaN+PN*E``i1OB_$x3Zc(Xh>!ZPTy#QWwBQDqJ{ZWs1|Uh#7G0g8$fpM-e^nBPpT;rTlOy zD`-0dvw`&9&u4*YbWo6(D3J(SPHkQ|FVQC}Ec(%)DRi66mbz3YMM5h;-BC!ESe_(T zXxvNRDGHB7RlCznlE!Pal#iRag-fKsp`M;apE=Q|D?)*a`1NKe9_z2(Pgb=2QQ0M( zb)ig@JNff^Y7%9-wo%!7&MdUwWha@>k5;hxeoQd6fq0*lXeTibwO&r40xgIomJeM& z#AbS^+p90aZXXJ*&{8fmy6r~**suO!DsaCuJqVo9&w25hGiRv-!+7{oNgGT_tG0ONHTQamjC~fI zXNVGt&L?)}=r-fQs2#-cklcY{&ZaDizw6olZasfDJ^n7X-P|6ZjKUA1C|id1r)D#S zvO9c>3ADoVvlH_L8M<HW_8(XnCc*6`?G$6g`YKXNx-wnQkt?F)X`TVh=(arJBIOU{)B=>@W`vG_7(>L z1*0fqvg&Y=(-baBhyNMye~hWl<04bcB+Li(aGQzOBCn2O2s0H^S$QfcQ1cvHVtg+h zpgY;$c~6+TvA=t`k&xbzfgMq4r*C_8`|V8r?S1F|c1LS>-wx&9wsrcpbB?VCTg_xd zLoxAB5?V<@%NF&}Hu`?y0Ftkv1Ok_h(sLNdw7nljJ7)Dc!Yg_n*Ma};U=!0hzSY~B z5#P#b<{10hH#=kO|32oF6puOeDi)-gQ{9}uY_-X3II5$fWZ(v~`ueB1_tw%2ZpA^iD|b)j zn6Xi{Pw#jlB*4Q`4$i*pxc*4{(u|?^{J%3o?+s;r-Eh+yO$s`AxE{)Ip~V3swJsD>hkRBvcZyz9=scLe(2@4}>( z^5D9Mcm%3rTFU*OnoY`@Rd8E=NT?_`Vf>$~50lP7OUe0^6lz*Z20TGQCXapl2CgA( zg#g=09;t?z>Rv4+1-^kkPE&s@Mtw#G{yVS7A@6AH0NHXHBXO&o{;f!khj0 zX6-nW@dkHJ5m$zkrux`2CyD!Ow3PRb09M{eb(2buv-U6P(m zbj4Pf0WIZ$vy`vnkcC^Yu+>r2a~FA9WCYU z!yIfm-BLqabn)=mw;Tdy#Atxu*BT&k$Cfx{@)Cbn8hXd|&uve&Y9QI((14P_OF#`;K-gg>j-;C~qJnk%ZRaWbROXm^+cd{0yeZ zUWok&__dT5_`CR#v4k6@_K?>uv&Tw}bF*c=VYzr7;CDT`jbXc(_sbdBax`Q3^po~$ zbX(xp?v~JO#JKIVsXB4idA5t$Chmgx@lvy41`#xv0|5AM`WR-8_w}P4FyuYs!Q`mE zJF?Xlj!d6?*Lar|jr%4h*y$5OKaQ!x)?#bQYt%pJnljf(rcKLSq|S}!Hq5RcO>wNHII*<8)+Q@78 z@2J7zXlMzN0#0K=V8Cl>E!6dY7OW{0<+?mO zZ3|p5G63VNHNLm80G}k&irn^&@!&7-#aAo0h2y!mjdvC8?=Ix;_Qv1cith~n50Nh+ z@KcqYRo$L(L zmU!YK9vPZ^O%{ICj)3F-Dag29-M&u2DT!-zMPxh)`j)B8$q0A;1 z$5CvmmXb&Pf@cv*oDSv9GlJlHovH^dJP4T0<&q0#j1O)@~)H^T{9)-`hgfLoI zHq6vRiA=a0{tZ^JyWNBrw%J73uq zKcX!9dlbg;lvc_zr|51C^#GQ1ZQeMUeV`6LrD+wd!QuFLh|H#@dJ$#Gq*!sTG|y#*m62;JR;hqq%5-;;~QqD0l&1#cyQ#+1p3^Ny_S-RG|FC)GeCEi z-GAX0r06JBVdEw_5nCf&iu0|0^v{Ik*~ITOJDQOP-QHEU@pxReu}YV1{PxAO$Tk4YOh!00!&@TVh~^+Dm;dLew$)LyDE4bTE6{)4(V%Bk+%Lgb){ zOy51&iVNEHxz}I|!QORYTy&mM>H+-PH-Ac+Z@$kdGBGtC=qiJv-K6P(m(pTGkDtz;GCSQbPrRop=qA0F1TZdvPn7Uk?Dsgik5*+tnlVH9k;4EDP|4+a!loam> zB`^wpzK6QLR{}p^{1CDnRJ@wH_&Yu=EC1Q0!zNwlu<7rBu(I&#UvR~GKs7y~4jAWN z1+Op?q*q`Qtcte}Z`?aro%~n_qr-RxuDSE(be6w#a3v%9HzKykC-%!jTjf#5@TCjs z%3u8*L6t}LQD3PrbN;_xJt}g;Qz+7p7dc1Df`oigGkDYhl*$8NjI%m9o5DEin0w0m z$aFnD%VHAWM%H`PNo&b4*0&NLly46X0M1JUd}0Rn>s70|m<*c;Hr~Ryt6XQs@Fg{H zvgS{-)RZ>_N4jf5z2Si*Y`A4w&?2&rE3QV1Q9o;Z4Fsl%(F5iw3wRQ0!OS` zEfU!a0j+B%+wnLaBV(V_G~Go_$G=suS38!Ou^uO~QLW~> zCZ7GJdHFkB*G2ooN%)i?7Fmh#%}xwatfds}GMjjKVL4Jt5o305(knstjOi{;zLf7a zn;xw5;&Ehpvln%|UVXZiT>k~Nb}pLP^$6KK8_h8bS=McANO`(hR7pg;{g&Be^jsF`TjCx`d=Ponx0YxQiRD1TPQw`H~h3{!Hhx(CFZ~77S zPsu>S2=LNpa)>D3@M%6|4i2~0Vq>Sk7gBjkO%4y95!6G@b6eCHgKR%BU z`$qHUuklU26Sw{Z0FGOBk#LQ^iVaa+Ttq4(S*|}-_kYY`{vXftK7I5Wg*ENyR@Wu| zCCNRfx5uX|L@=Om{;7<7F7W9Yq0BH$ix^oB}k`p|LBxqBL8Cpu+PCDw#G16`L zo+D)vOMy<9p%PhtNK}lEnEZOH`4rUHPW>{ zcT)%^xEj?(3O?`)(l%d$NrN7Wy1jj*C%@)76m$&DRw02Em*0}srQKeH`DA_qF9e=v z=l0J0ZpXX`!)Y*FRRKs}(68C1?Fh-1@GinlZGY7X`2>>*9`D}nQ*P5^-Y?Vn93GLO znH>!8M6P(;2KlP0)z1a!!7pMMNPPk8m zeAq$g8FsL}cYqdkR{f_(61x}FyEi%By%2S;rPu%1Ins|Las3EC!&Ys+P{%f*Y5EGU z+GHkYQ7OEc0K4pb1Ha+whNDI7!S_j~dM191@R+s>2sH;uvxa_nI)>3@rg|>Z+tZz# ze$%t05QaNIxkUehYCkD`R;`p?NQZwRT+JfndCSj$0GH!q)`C=1fv0wrPBqD$s3y&m zfwe`SK}O*<-tX`HY`!el(I`xWqu%0ruGY{%AM4>GFH|5J)3yg z?DVl*ry`4k=vWdf>*Ot+$g?3{ZjjJEpE*D1YuWO|a1gdxCG??m6(A@ES70P?^i z`wThBYYklBH}n}o$O(RDMtCfsmzqyM-6?_tmv1p~3>}pR%J&3a82o<>iWTRmJD(=K z3pW~|>}guc#0Im@I(lws{wEZGjg}Ku&G1nuvt8X6906-g*Y0}MRg!ZuJqX#`kBfKb#a z#K#s>jK$Hhc6V8rkeqrF{&Be2s~)F9!H;Xh7wXC>S2uAs9BRo^n)aqer$+8UlY&Tr zBLUxD{}e7&N4l`=AMLcGs-WKQS<0hP8;%POI_qOGS0^CsWEeKB2G>)jJcmKCxcmnN zbdi)l|1B>6c`+`R0Xn0hsG(t6?)LdCy(fTLnQquW1&84dF? zp>HWY3)@6cpAS%dF&E9|XW;fsqkyivxH#?KTjPR9)o}pn^rPq@zNQlzQ&1W~xtMQn z@4v^09cwy?J8Si z=7g;zMmBI}MJp|(L%jvcvaM_LieeLx&Lm}x4kaIL!FY$jP*>VU`z)rbt4jtm+hDj_ z&-JpKdtf(uze4bZ5(xFY-|N&WK*K^+{es_R?NNt(g%tl=NZbRKwynLbD0Wx)Oj6Jm zK4S?@#?N&|6*fMr&cc|;Ras3K`#5U~a;p^R87>vlKkg-oD;mu)F%5pwDfSL-4fUoq zG`0Z&)&mhVV0y&e1GaFmDfU!SjDRKl3`MWGPO)h_=cQ-$Bx?PJJ9Po<+?BaYD90GP znY)4m+`EJdbS8WcLAd!Tg4F-Bq5QD|c0_;{<+k=k$*;k!`)Iy}WFSt~cMV)p`=0V$ zxP5m-CR@1}r1)R$E3^DTOejSw9{OYaEcG!|Jau=LqWw?+&c+T0YGXAWsC1tC*avi+ z_M=G_;W+(gH9t&Ja%J2h!da-)rzvw{4DBb#bx&n&F;4tnfp}j==VY&dG!wUAI zzBW9r8xOUc4wcFs(i3$nziQ{tNqrPq)S^E8GSL9?iGzWmRsK;memCqR3|k9?DQxRy z(-22$yQD-kc_@mO2^UZ3V3}~<1l=;hOPI29w`%_~YgKq8=+odoVEK*jyKoxq@6z>;s3t-@F;c6Pdo2ZxV z;E3-b@m(a}jMffmvCoOmxpf;I=1DIkalU8uv)tY7i25FXv+jBQ%`MM%(&Z+^h|yLf z!uDpha+(ol<77yT%XAjW>~SO+0d^ykiqwNYB9yIp2A)Je2X-!bKRIFo0=yreVY(6| zKgJzkB!J(H;N^~%CXEmithRvYhTT5B|9(ChN$`{ZI zfblpHub$nbSVNeqzJPI(ahN}58M?AZFBP()KjXn5FqoZ! zY;siTFDX8&)E;rPQ=FCXNEFCoY*PMJ_%{xdy8I=QWFyBubJZqN*n}pb5MuJfxC-}k z_G9)EisYbYE#C6B*~AJYnLG0O(F_CsC`bYAtm{d!n)9u)GS8v*dYH5Og%q48A?^xG zqVV`UOgC^a!Daa8P7DQL8CzB3$1Gx-jFe^)n`qB!kf14t@P?G^kl$=iZB`{~QNQqV zH6R@;rZ$`CL30Iiei{_xl`l z?LlVx6?1$4g5$AU7vY})M&I~XSdz;p$WGzqfXxb-mF7@WtJWOxA~P+2@|qLJWd}5jQm<;RZxpx zQA~E^E&n(Oew2sO;O}u+xCCDO(P_syio#Wf9YbRO5e*J%SC50gW=UD?z@kF`)pk4O zc_SS=j=EU5xLEbG2I1l*``I>0xY$!4{`5Ndl{a~__P}r9dT2YdT8@5dgKk&*EDhLm zxNL&oJ;#N)O*vBr1qVpV6o+5w?^mYSl_`VZE>&dv!=GO#M?bguEC-~Vyg@-2(^QAZ zs{P79zcSS>DS1$EYHG~F@*HfbLzvlGp@nQPaWThn!O;uasKd6Pjrs)=PMH_(&^$z# zsl|Fh5%+OXnA>(-xbdVUWb8UF33D6p&o=mTW8WLTHk;H+AYtHL6TB9t^)33E6xy&5 zJC6&O)M44CveAf-ehcNWvMTs9t6E+AXDFD_0xh2qrd@F{e0E%zeZ>&`Jfl{fPj7?i zMMg*phgZnRYl1&hUlB5vrx!vcv!ifZPz6+*VbkvsE+#$V0pa5103|H|p6UpwDZI=9 zwY7!pFn;JH#fY_+yIRO?65S1Dy;)SujKXbxlY4ilkLcc8X4SUE zlC)~ceNYtg_e$<+5z4aabHtOygf_TWM0QMMZDh8j#|W_i?$>B~)KVBIdl)w$N z`<2(QGFAuMOnDgF2bhzT*D`ceQqQ7ZeWzmIa>F;*)x|J9a5n zo=uepGb_X6Xb?Nu!&n)#_h=NO)Kpn^ucKF#GXM&+78F=_4E+bB%PicixJg}Ht_~-q zFmq41EopfsA~^Uz#g1ozR?oNk>;}Sh`so08Z8zdqA)^{L&vT#g%@c&*#TKXr9w+)- z46C`yLUvx0kZP%D2xg#W1EqXnr^&F(U2X3TU`5^>aFX`YQpz@%Ddw*FCA@gkr4ysc zYfxG`y7(%K$&@JjPB_QqNZ$*fCtAus&=-c)!wER{Gu?0CA$$}`x%x9*qtgO>?N*;b%L7{`{)eD!z&93ke zS8T6eNg++q0zTz0uf(nWor%`|2WseqzTmOAPv4{D9kA#u9h_%LwLn{xSEh#~Q z-B;h6u7IU@ugnm+QvmvUgpve0>7>kd<3OAKg%EHFG!t&SJ9HTnw3K&@g|0$CMDrLe z<;8!IPsp!zyBNb`A?4V=x}TSet7%?J9yR9Wm51XNsRt(0yE`8?-aYa#t>yD)sq5n? zmA^1v*G&(1@QvwUlZvThPXT1hhjCnc{%Jt=db)q}pN9Q=a1P9O)*hVgf4+jRF8UXr zf)Gx@n=czvAe28uJF1D6rxwfK^9s%dcaqb!TQzZjE;43j-#@z_jlf4U>k(r#fA~F( z{%>a~f4GX<{l?2L_(NrQzne{2RSS@Cr_5JqrwE{LB-xPdV#sq7jr5xv-Gq4Z) zd-qFF^Dgx(xqbb4zqHYR{5zi)MgM*1?>es2QfB-e0bPq%e%VfO=MH=Bl_n#UsRDOnTiYc2?O%7=i9ScJI+ zOvK17N|nPGn%Jp2(V?sO>O?2LsuR=k=Y;L_36vJ_;7UXX(!12Nd5OO zJVcqH8ASaj3im}98U)73{`Q~R+aHhZzx9jJ{^>_Lw6FHHcGGw#yY6EfyR+-Q z?mn{X9`}K{gI#yx{EFF%==mR(@qaVEZ+~nb-;I~T_zwMSjBnT@UFP@vbIx!3Sh4wS z>OcBm>2`QTe~sr8{S9M$k2Xg8n{yYqmMmR|g{Xs`kRh_tPBh-?!6BWM-$f zU4cwWo`cnkQ8h_ytulx1#(QzMiZ{W&a2-`G(eEvpbuk-ZbSWYxiSDY!*K0t4^gXc% zR_tZlrXP~fP6detw)Joy>W#+~rEBUByFMLr&|}oW3^LQnbF4Q9b;p>JKTyci zSVFXm!*_O{U&HXH_ObmGv((SFQLM#@&d>mv^^QwxU;Z1MN$GOf~9 zwC|f4q%mty-&txL+6mSM&bzwWB+gF7gK*nZWOJa~g_(ZRwD!2Elc@tjyaSi%9ax#z zf&9b{poz-(rDye#N7?1c-RcH&c{Jo>xsKgxc{J!@zjnZNxW*p-{1n`0zzr-M6d6im zMgPa%aTlDIULZlYCERFb`Hns{N`8V>MGe5)lxGv*4IsD~JpsDRH_9Kil+#d_kT;O` z5!-ur8O~+>+;zdxI^9)shWi9>SY{2Wx{NY`uC~OEbzaZJ{20fnokyQVHFw^4)`kh~ zZCYM-ELFsWSma6hEbi)M_;hCZQ#U2~Ni-?wO@HM~osrOpYaNJwR4f9~p`ufLmvuQ9jX`jwf<0)m1lI3^k1E(zH=M8sw@NW zk}+^aG%wv!i*K%u%(w`v_!J$|F4d>)IXo7WxoADcW;)cLPz*s8p=B?HJh}HY>OGDI zQ=wUc6BI3DwmAzr_bBr+c=>kEBH_`h1kM1C;G6aub~GjbqBA+O(jbVGHQn=yUFW1C zJ|~o%45sA*RQ7y9XE!BXxipCk;ReU2Cw2jv=U4y2M+n2i;S5AP%}2KWmvfKI)vZq* z^#9>K#(@6vPa4p#6=Oh~Ds&uyI5r%$GBZF|AA~M|tQ)Lx;blCgj!O?<-&cLz%0(HG zUhN_bP7B`VGMPx3@2($OHToIAauwR=CdUeS75-x>zGeeW-4^^Noct*lIO34MJYCVE&?cpL8aDW1@e>#dR|xc!k|dKcK~%gL~XDn3d<#;TPH>Wroqm31+S$N zk-i`NPRE;cIvT_M{v8}{KKBI$0&U6^Du!pEh#L)aWC?`n6gp; z#N$8|N?wYIesay|K_ohbiq(j0yQ^{rR2f?!^eynPJcOE0f59;UFp*6P9Ld)~Hw!$g zKDF?4G~(6uAzMqhz>_4eotnY(w&Y@eaSeN8{kiQ>eDW#Xb*oW*=@-MrG9FLOcKtU= zrkd_;L@?J=gE*=RH2}x`?3fst-P>Iqbp7z4s6{1r-@)){$&jpuibKLvQFU{RrbY6A zmcjc;HEh4gdW@=b*J9%Mvt|M^;$6t$BJ@-^aKg5;nN!Ddou`S&X3u^iqCG0VLnVj_ zN#~Il0O|Zpi$OYnw$5ZEZg$3FSTtaqdgEr@FNCBDJH&(N(*C6XwEe$~g!Y~B_J7-5 z`*ThvwEtjt?Z@*0_!I0oipZvdM;Sk1dt23egbD4uNAOxw?osQ!JXVnR7o$f4_UQia zjUF|vMP|agUv^uBax>Z7-m_m`yDS5P@-5P#Ei%)}>icZt@b=Ss3lEC};5OB>>NAUY z^*$ZYkSB+#))r)-!EwJh7&rf(qfvmsrOr@6x8S|_3H0y+L?D*e0d<*=)FoVo2E}Px zpewKEiMV?)uNtnmDqb}U@rY)L4>h*3gnPL5oc=3P7d>uF$1;ho_SQlM9z>!H+;m1<+)y zDg5af1+FmX+{r`_poQE_t)e zjm92hZ=d|3o8CUhd)v=&9dor&a;=1(cT2Am&Gs%bGH_Ct-_F?q5BZvK{wRoDW&2}E zS%>1nLiO_>Nr5$1zkR07AY#)t>pW6Hx)u$eObYeUN!KAzHFR&ZUMwESC7lOyF^U}r znxZ?8tM-4f-jPB(o&3SxaN@7G1iv4vo0rhP?tDuOR|+627G?M&qwVv}CZuRi$_ku7 zKu;^PGcW+GW&5;J*A-}xh@ zaOnQvW5e++R(rW#?Om>5Iy{CNJ$~h;bUD_S#|?QAs>I?@<9GB;&Yh{jp(3k;37U{D zTIy(qB83+sHP!Sx9&TZ1?7nVEsO69Iy;#_u$^!k8mt`9Uo;P-}r_X?S1Y&79PryC3 z4HIyC)+YsRDx-zvbSve^^*Mlk9rCzgPY+nOn!*p74R61C^)a=BfUaYlNNzc5TU$o~ z46`(4sa;u`jN%)V-zZBnl%->U5_hS#Z_&|NSvCv)7s3C9@V^-TFOikER?)_I3+;H6 zw|HuywTj~lrUy5e)k`xujc)E^GKH_n9cnRIV2sLJh|psfzG-q`G)gJr zvx=KcE;K1?dKkME#`m}buB|nhvl`U0FOVn6D`o`_BWR#p7bOB*@I-OQ>u$yG@(iix ztVHvjbypD98@AM7I`Wowh|{Ud%-(sfJDFLZr6e<}uj(i>>#%;zm;EMAUh^9Z<8uEF z`6@AgY%QPf)|^FYWd&>}#t5Pigv@VtA>D-Dz-|6W-{G&G(frIpnp*YITab>i$dN%F zIrkqezbN0<82$S@HI2Ua;P=z%`+*p_)%f;&{1!f%rP>#g z;V7T$hG4&}J+KO^C_dcpc_0!k#rZH@Em03qO_LX5$zkfppOJ?jf`J;PHxE#Vpx5y_ z@&LiWlfT02$*#`%HRk|a#bs*6$J=8=zy`uBP9zZKYvX16{cJZoD7u>jiWDQF3GM)y zt)?hE%&yF~HlvfdFtY)c4)y!Qm8$ZwL8d^&W5@I} zY+`!mI;3~n4I-k)h5osAUpQtCHTqa3ek8FPk?mIZ!FE8H0DS&~ZoFnAdkD)MbbNbx_~Q z($RiahID*3mwV>S6_7XC&nPga}OsOL5yWB{mDO)0xWm zql0#VVn<^lEK(~gHW@02-S8e_11SH-O&y!ahG&0Gtm8bH^*xw*b+VEb-J z!P!0ZAQ^=`3pb|l=*Lqm3ASVHF)a)~5~rWt?aVp~JQ9-e3v+Hw6V`l=<362rNREEp zBRA*9>+HfBbibd-r0cTf$T*;1j(Q_Eo4q*;l7%%-lGwS;$0jTh89+#KJ)Umi&6sA# zR8}mrDNfn4&(CtR#qjtRTWBu)oUn$xB9l&J0O>YX%$8a_p-gYZ(a?oHCbc*F@|!~0=7;c9?h9=2_^{)wn<@w%a3_ufVr6o#bL|1DWZ1^`#P|jl>*iZB2|O6sI%5Rutt54(oM7f9nXi@L%T;`uj&XSa1Yw zq{2-yHSv{Ecj&j=xIsZ(J;2&9G|f3*4p%wCP7}q>l_MTmM&IM_V#w<#{t?+s8e#WG ztW_9+H*1eNq68Jx4N{JOguQIW0KP|=^1 zpiZRz{jA|lG@J%n;ovl)1Qz0u2fQf|eyt}KwE4XwnAK{|ecfyEAAOp($R{O+7C&tf z7ZNrvHkms3lsMZ1EF42|5#W|=Iqv@hnCrpKs2El^7U{%1YzW-!exx-qi5CwlW2bNv z`#A-A)oWadWgM0=r@Qr7aS>a=01MSs$b(th+;Cq})$V=0cxDGL%Ka}HVJ~g{sbuJD z*yfe}Efh9)^k!u2)xmoIX6bQ<0Um#fvl_8$)jwZP(8==b7@?61_Y3U7?ATHM{l0mf zzkjIx`xY&a(<;}Kv6lc%*V$J0r~IP2x(7|2j`-a zCqI>9W?N~y=g(3X-f1?md1;XeqI}|WDjKe`%R(-FGG{LC>7%%2K#Hd#p&PUK*bb;B zjjHJ(E0c@Vbf_bI%>&S^l{;v%D;V4e_YET-swyx0Qp&Lf;gOg9B&;dMBzgTZcLWy# z&nZ-(Q;(Ew^Wqnjo70QhuDNj5X!LWa zy;0pTl-!?+pl&t+Fjk*bKbzo05^6Yp56?vQ0a4tRnX@FzQ6)E7nP~SbNo)eLDmXaH z5!uorxkr~kAB20&7{(>*X!EVQ!+sEGlX3kx5jGC`WCDt72hdk)>2N;-&SK+ktlekX z4dmDF?7`#(mK{K%)t7wbgLc{Zv7Ry;_C;R{brKOUKn>@ud{YynmmXupz&-53=bxt{4>y+Hrn6dNsUWj^?Yfoqz^y6@{#T2|z1Q5=- zE3Is*mo8Gv@?uRYXTX4dlyDy(}$eI&~c z$vru>v|sE{z*3dRq?%+!IzQF2!#z2zFldX3!=w}`Xf*7dJ<&> zH_Gr4UQA_SoNqh=0KpVE$d(O|w;n?Ru4EI>-=X`kUAYeSFg#G`JK6m-$6>I6T-@|9 z^iPA+b#XmuXZlPIZ+@ImB0C0qkm%*rylb;u>|vDYvTDY-v3VHH??p?!d$*7m%?TqD zQ_?s5775aXHB->H5#EqWHDx1w$iAh|j!2o$5(o8ghiWinozuO|%O<7y*|H2FZ=2|z zMC`#rQFwG~R;w2SGyRN0n+8#MW2?8~3&C5z&YxZ+RW_G51aA@9n2l0ou32JRCH5iW zB)dChQTl8u+N> zeW?-%3ekN$n4HI~kBN$PW1ceRF_Dem2$&G4E7^i1>|OS6?|_&fFbTHvI&y6>`={1fLvs}8iaAL@^eT|3n)$Hga*g=ukUX>d)V@in1*->Abs8~tfVZ!#yMzE05K zlGs1earN0Zj7Q7^2=$KD05oT0wPNOEwssu>OL7k}ywqwcbBRnGBj$)1Wf%G*<>~8m z4B?a`kEE0QhwTzKn$`3Ak!~?b+kyE%eMnCJ+gq-Mo~<~_qbYf__F%5lHB>o?3A~YC zn=wxV`$2LaDxEwzG9Ebe!CqF&Y6Fp#b{|WQGSQloYlRi86=CKWAle^F4oU70gCq4i zVBvktdMv=KyM2mvx6ymW+7d5NjTPZgg1rGQpsA8IF^k9!O6+HEg%&K#Y7MZj~a2+jh zo4&yNaDgN33w*MDfh+U{Zi^Q<`vQNSrQw#n4(GEK34;p0nB!rQaR!Ge?#Q_d z_+K6-I3E_E!OsTO``AP{{&LfER^v`Dy9pqZL|$x%sBQHtgX#k;7YbaTCbCsnLd?uf z2P(wfB;+-T%Jmruk43f)F?VLQm^m@cT`%O-i^{|?2?a8v@J`8f5f?TYc-|o4rPIn? z>43rT6#ccx>Y<|sIhJ#)z3f^_+NDb(EmK@9Hx;7;*CnCeMH7=ntuEHzotqv?u1o4c zk219UbZWw9FR2!_D#@LnR`NoAWZXr{T^S6}eru>#&Ryxk8f2F+=2N&QU6?}@yZCRw z;jy3C$Ls@|uOs?=v2A{4h3S$Vi2C_-m5$IiGTW)u0+7^WfDDnzx-An%KSL%C}7rKv6<;W*j&OJ!dBAbt?XBSMBDUCYm4`5Dcho#`BLsSE5iAGid#(hmI7l2pG^>=X_4e32I&+4f_uAE3kFoQovi zp`U7;#`3#e;hdyUPl{O;Ob+Mt2)X(7I896qk2LX{9-7~s5wpTy;GkwYaaJn*>4mao zuMavt;2$WKt<{27ZENg_L{@5fy2o#DvZB6MoEQl|m6Q=3&nJ(>%F~~r6+VTsROA<+ zXttNNpa2&J;Ae%-fMu^AYH|6MM+iN8*~D>P_9<+e46i#1D|JN7jCwPF0Tw|L^4j3J z%bR(Ep8Y7fp{DQPv6^+%oB0#k82MOk;#n_8#?^d+HHm~JKEW?X#`vjhh)yri1miQ zV`5hLi&B~NY*;0zL3O=_Gs+Kqsd6VqCvp*u(kNS|I@Ref;o{Y6l9^9_aFEf29~V;F z+I)Z@@d+XwC^|>SbIs6SsR9vmYE>?}W$1RZiSn=+x+>(|n^1Ca>rRxltWQQ~N;q<0 z=kr_?k$Eos$^)zp?h@2Myn8otxGhLhJ^UfE>yyU-+kY1-0+x%b&m>{=uZwU8ErP8V zUOC8A)*HoWyVNgXf2byGh^f?S%XLGp>rkHbITR5oDs&loz$*6>$<4%gm9IqK9`4q%sP2N-&sn(8ftb944!#*Oy91ES z@=MaC%;lGi2~NPD4CcRN9Fs1|z)AIFM%@A6Dgqgj`yLnU=zCnEWtX3wc6-MNdEPOW z`e1*NZ4onTsB@BKzt{4iX!*pg_vYRbI_;KoOeG_DSK-mY2O(N&R=v$K7%`RcIrYOgo9Gv1f8niGucek|lqUJ!=Mb=A5&R5REqoI2#49+p6Rb=X-BF31 z^eb;uJ))4;oVk^q&S%!Ql?hgn4V4tWm~ecQ;%vh?jE69rW=@8qq{w}c)d+dja2RE| z4+?n)W0y(pU8OdtburK^(dH?XfURBvm~rXrFR^+rJJsw%nbOA?-yDb^AKOt2$i<|- zh1NPSavaRXshK|`PK(jvl+&by0Sl5CL1NEQzW}Zz_uG6TiGg9MNd3LkzOIwlpterL z%yJ)lpOlc6DX_Faw`Asy3El#KGXQ}D^3IHk?%Xjj8M$M;Zot`a;S${wGvEpQi3~DX zb_cVKq1To6zlhy82<92MIE)#2d+qf-B%52#8X5C-$-cV5dG7+uA+B;JR0@iV># zY{nR13f~r4@*}_)K9Z*-zb%=|#>6b%v8~d&GAE$R@Ro%Mi0_6(+}0(&mMg%D9a(U; zv`FkT*v39K!5O*B9AGN|Tz5#7yMk74rR)UaWQ-VY1UTF$x&yE=Um$R5VNox$u0@tN zFK4+cJtVTZR<V`Y|y>HJ|GM)MCRO^)35AenTbVGjz2rOY`C-zcw8^5<{S;2 zg6dDL0QFkG=9dXi*@ZQ9^s-}84nI_dHUA*`|D>n9Rj5$fVb#Y5^#sPr(GIGZF%)1_ z62^HkO_Y=)2^nz4=1d&3*a1(|-I6dS=z@D!*(H9K2hg_-rqBqAgPj?40s%})FO~o~ zEEF|6%G3-d>fgLS_m*lrJkDU^!JuM0G=v;j-WaXG8pB_r{yden?bFKjAY@;B%}*zj zOqS(+@Tirw(XW)Cs9r%5nUa8H_YVoAZa`a1EoD)Ox!nMniD?+aL1wnULLD&31#-j| zJDJo$zi^0Nwd)rKU)Mpuu;lk~{lW%b$=A#_m0$G*I$&Hc*jKh(cu;4%?<5&!@i>vh z3*Vk#cNvd6i*j{!-KE?`@^);dNcGlen()T$>Aw~A)Fx#`Y=c72FjHc(J!=mt>H(}~L64KKmn|r0k{a#qtwMPinUiYKy}RTKU`k?d zg{#g0&rvoce9bd$NhY&Ufn)*N-&w*J0W}jV(o*C_IKOum@qQp{wBP6ep=W=^po@bS zOU!?<-tsWm5f>XxL(4k>JJ$r0d9x}uyA@kVQU>D?h*BVb_YN6GC?4h-QcJDg6Xo!s>0~JHYDp z2jZ}5J7K`;%dK%(U55@Vwx?PVR*$rHgw^)>XwUcE{P`sv`~{j{{-uMDRda#X;Xb}T zhu`7rL;Lp%Xvw;~A1tj4uGGmJvx@`^C*cNgWi{=ryll1)7H*Fm8B-s= z#{^u^pYY4tZ0i96I^^pPWmbM5OFZmCoR1iT&x{bajeV zl6V`=(%)k^7AbR|*S!}|zE_ygm|2NLkeE3uU9#+gpEMqXW`BtQW!M2IvRAU~6ElI9 z|1BCyz{r*~zk#2)??~gclf-_)g(--PzmVmpyYt5gO92(?!gF+BxEI??7we+w zR)54=FNU+Np?P9>Nw#V6EOFHm(G;43^Z~pt<9pBF!^MD$2gH_*=|oeZkjE5E3$Os} z{(bH9?B{gT3m0K`P&bWSeH)yYTBBB5aVS)d#L^OND|D$MH^KO!gNU;mJ?Y7u*~w+u zD6o;)xc#eEj5LQv$!wB?Jw(!NiUMR=jRY8pu+(>+GMSVK=~|6@UUEr&HwLKZ*aL82 z--LqC1DhR0RBt>9|AA-xM$HmY+o{E^ARMs8Zb0t0$<>`o zmaODuE6W_f&S194olxUGG@(ddVEI6fh?+dAKJ=rou^L5=ThPseTO_h&_DvLJ<;2&L zvMiYl6uI`53>6Q1c;t{?vXX76UU#uPJ34(9xL{>HsK`}cD1>ga6H*Sq_!`omLN5wE zf6 z^AP5CfqBU&C@5HhwleBZlj40r6Uov1HN84G$s8|D9i*pQCdKnwb;@huT-QZ9=47@W zElH@hhcl~>I(9IgSU-M`{`e_7@TX!=Xn^{+{b)7tF@O99`~W2-+=m_$n>7vQB}}Z* zHCy0~m;EHMYu=!%FZ~u^8IPhBSFz8n31Ot7xtF8>L%Ab!XYhK`9%J9LsusBOhnHRf zL|7Z_h{JNYCP~Hssg}biqDkF{=E`o9@MzTn$#O>OtuBz|w3 z^X{J{W%Qo!l1yx?a_NWg+l(TIaGOP_I1- zgmaoB>`X96=q-=xJClFdZ4VUNY5!vD;B=?QL>{lx`RuJcLLRQom;t!IdLPV_RCynXD`6))8T*LNPKkv{bD9ZM z`omm1O>&C2NN25ZO&;>g#qMn^Dom)>szksnZKXDeX)(dQO#;eDm~cwe>b#aVi7^rq zz;(UohI1t^szKqBUi{{yR%i6VTmD7pD)%;F!nQ08)wThrNBDi6H>=g_-X{vbuZ<0( zs(F3X00y~hbDxA-0M~oR9!25HW~`|)N`4(X)P=(k40fpHR$U`er@uH1^U7M>b=)o* zo3^1&qYO%jo6q2m$gJf>#n;X1^khJuE$$PgGju7lmnf@P{gcHMdcRmP4f0r*#9a3BEq-^U7_WI+894dxRmXHl9?)`qtO zHY3w4zY^>ZoO-Z-y0GSNbZVZ#J2+AQNTk)Y{@LsV9J#G|;qleh{B^-3h8Bwbg?Tx8wv!kpmJXGZ&D@mP5tSi80-c2X?Q;_oZTaoEGaIyxi~{I z&rg@kMd>kTUaeH^F^ig*s!7I2cDPN0+i{vF9jMvm(6M`OLGhhdBe`z?QuYe{z)Zyh z(~0Lq2J(k4-G#(MV&oaRsxj-R`W~S-G8B=Kfb2&$A7p=GIK!0|lc{l!tSUd-DfFM< zz;H7kz=adlP09q9diNR3;zV)NCH5L_pWyO#+m#~2c4Z+jTg7C%QVdrM(RQUMZo6{# z&1Aa*Ovs185~nYeP#OYXE+mD<@_%WX62|aYZZc{pb1?FkdK(_`=lCtRdx`awl-KAH z?c|C*!Tj)BJbbL({V={LyKZdNx-gHz1*E1jPSv&=)-sNEYZ;+JJ42o{E|kX4xuGWf z7P%6ZMH8(pEVMUCYV2+*ec=^z?742^h&j=B=~&LK$-B6Bwo)DxPd%rN%EvRi^m5_)s`czl6F{di4K zQJF=_PuI4(?@Si%y;L(?X1fHEYxn<+llbX4?|x?84|~!Pev^-$^tYI!nv?S%b&1W< zo@0m5JDhLfU7Ccd-Q8Zc71sH7$^8xDA*lBExZo36+CeGjHfv}oa9b0Zb=yS6dQf-5 zn5d-Hqwm7|`n|}SM6F6@$xV`SYli+%za5isTPr%v_}w4TY6b}356CVlLM+s=c&d{Ilf!nwSB7 z@w#)IP*6m)h9pJ8y-U#If_+RKyOL5H{E6Fw z{N#-+GmGqJ5vUaR$%*e^jkc7NCZL7%IM0az^N0R$+ zaJW9FGP9n7l{-&XtmoZ+wyQ>se2nom_UKGaX2Rj9mAPPJ8J{CZ;0~q0{vxaKvL8g2yvnFRtC7QXRQ-haRz4WD1Yp`%nU|fp^@rWHm=QfN=(* zeNMzzENa!Vl3W8-FLIIzh&Q?9d0;%f=&QppF3k2o zkNSIHUwOH`PNVAYgM>yMdg)1#8JeH$cmJBb4X#_@)BZju&N>C>e!`21Ca03(s4>7` zAOFh7W;j?Sog7h#{fQRZ@+VU1xf_8ltPyi2Sj&9ahe_B6>;b%Z#ViTA2LRmlzD(%Y zm)U3O%Os~S9OZopQ(x*xEx#A~;%DdkF!0nm4qdNs(s!rC*3oLY8wkdt09>Lw&suh= zFcS`#2I@$DLtZ4gA9etv((c0WyrOVpo)cRvKwxNz&%ht4)hX1f1u&ttY?hz>o0Hy= zWp-_+%z6TXaCrGLNSU(L%hw{<$H&dALKLaC&Q2aDaNt{jp5?D3=7G{%)jlYQZKM3< zm-G1B9>b5;g_#I>niPt}jj^#^XGxf1$Xi9b7MCBReenQsbsIW~gvOwBxA{`CK}eaq z&InF3>psd__2qa&(mwP$ijJ4^+I>cmD|nIWc#*biJ4Ykj-k!H99>3)zPxQCQ4nr3z zh_#E@mL5sMBu)0n1oK+k?QoNI2ykKs9FCY3>u;kIQ#kcn8W&;b9!WCXIo>*)WKvt5 zxYPT-jP7&pHd&AwWu@<5;7%Df7>dd{^A@6S>13BW?R8*K^T$x$2fY7r6J;_+Ba11s z&8F}+%v_8@08IFjK=4*>iLPGDtjLq*W;%qBXf(GW7g?#}RL?Fq1Ar<5}ztt^zsk{{z| z+fF$A+~SzvY;V>fDQC9547$BEyC9NwhB3Bmh;7>;VeONnooZ7+F6#IWZx(n=Wj~L!1M0(IQh>WB0RT45;6j1UMl*cv9LD+F0lvga4qC^& zW4}Ny8&e8nmuHdR(Slc@1#jd|OtXuAmOE7cqoiQ>3U6TK2d^NVW);}|9NLi~t7N`& z5d4R;eGO-!szSYGKCMz4$b$`-(S`PZO@XEc&gw+*s~AD7D>vTfV5UDG=(Pkl{*)* zNgafnccE~TeEwEng7V2V$l;@;T^C)_s|EZO=gz;R*l7QzHO1+lGTn?a1k5C|Q+=df z(|qyG-o2>ytbSWrzyy0^(G0{;TIrpryMV8f!$!~8>}0R|U>UH!Qz(nYfLO1Ua?-*fyI8xH zFM#D#48}C5-fIEGlWQ;eSNONdK%njn4w%5K&x-6rmi%z8lKiZsSRW>P;6NIu%Slnk zV|;crEW}JH3~+sogsyfHFm&kJLBpC-Bq_g6hX25g{MueN0_aSnF(b(yZq+)`B`HHB zWp=vczTRH)DKTFna1k}~>Tj2Dukqv0=)UIV=|vrJL@0q~ z(XlJ_YGh2mGX(2OuCtATw+L{tcbRP{T*vy*d9T>|?I$_@@)y(_pV2h6=LmGmYug8P zEq_=Y?J%2S_k~BR`;tsLMe^OvMApo_+8|M;Ul|VwzK6ea$KU19yYBWfP@}_Y!rc28 ztr2$v0gLiBe)<594x6?&kGU{Lv@4<&#l%1@B6SU9w_PV|Z>$Fj-e;%GAv=BSpckmo z$P(aNTiExa`{z=*N8DBP*osZ=CoA;W5L6)I!Hv*Ss(L6BL zStM$J9(Z9(V!Oc^(54t!3WGex2brS}5_$7ltp zfZs9Z+%&)YVZ+rn*X3tTc!uu6W`Q=r_K-Z`fz%2B0{|JJWwip`Sd>$mX4v;*7hBnP za`ej{GGggPc45s%dPCX5D9lVjTr4ZPB(~5gu_b9}O>?)*R=V(F5nD?~>smTmC&?_| z2?(wg@L5K-92ry}zI&Xh>?+xPCe&MU-#tbY9;=EuCHL=Kp+0zD)(iNfn1Gj|{`k18 z4@F+ur4~MbzIbmLzIbSCjsC2m2lyasvCD4g^1auiDmaK{2{=ACjPG9OStAGNbadkK zMQ%?&00;}o2R}P5Y&-@$LH_$__u^g5qZx>7>ss5m_mjv|s46&GQttU03f(yb&w20n zawKqA-F-V|--dF@?-SSXjX&p_ioHoDp`r-=b!{8=(&IkrkyEHT{Oev_XLuvLU4SO+ zcO)SQz(oZj7a7+s1`oU7GLJ+&~ems zANI4I6|KR`k$3lJ{s3Tf9%>P-=TTAbt_6yBpR5GnH%V3|w)rgY2Qqhr_KR#6e6`Zg z_Ojhl#0N~o0oW|1^U)y%!2mc=88tP>S+#7b5;jUT_60dTh4KMO*w4Mdj(_O2eC)IQ zA_38{-@Pq(F|g%HY#W+pZIl}A3q&OsNFQsXPf1!JY^|~`Q1Genp|w$D4FSvh&{|13 z1twIl{RpK1wv~I5ObbMJac^NQGb6pf&ns-)C1U6n$$dr$A2yL4!fx5JC)NiF%Dqb+ z3q;F~1#bUdd&zh7gu9{$8>4c>>Vb~?m{cXpBdvS=eFp7a;Dy52?&bjE-3PGkNpF(^ z_t5SnGEW;!@{-AR^;##U_kJG%G|R~|eY>3m2DA{p7AP(rq{Ihjtz!E#d@m{2v|%;u z39{;iV+uj7gGomu<)wQlaGaoCa}V*o+b&Jw_huL8j1^vBqngGGyvPe|RF7QF6LfA= z?=9kaxQ#cp@iz{QA@%xO+IX~YahS@TV4Y~T;RfL6XuRk6SR8!1jJH6aQoctzk z!Vl!pTH6y%WRiA~?%`|OpF-?BMV*kgCje#F1ykfcC$*G8`Faq|dM(8%TFYY>Be}N zY8MHr>fsn!?TPeNbg*Nf!`o7QJkTN8OQw8!w^@&G!|tYqP0cg{-b}hwCxbH7@8=NU zJ#mTgZtSHz4&vEjQ5cM-I_G0e!>C7*ImJSMi=Q?6*;b){o&!xgcEW{Ib$2aHzk_xlS#(#5l(8lHI3+)8)uKuC?P0)?tI> zw8mR(_zYTHYqXfamJ>scVsovwb8Bh!visdGhuA~s2!zp%4{JQw_(_V;@zY-cRv+L8 z)&!_!LbgxwWWzO7Ho*ng@QnBcAVFkzOXxh=?Qw-{BCCm6(b`j1=&hg4a01Q_@6e>2 zd1+-LU}<8DwgIt{UY5yO0(;p7sCS82tNhG|;en#oP)|UgQKvb>VAeiCm%p@<_t+XD z1=bKLU@_k{h~1T}?NIJY4xcsy)33}ML(;IXo(FCiANA%Uv1=!oDLMiriS~4-4#T`D za-4o{mr=y$r8!X^@!?;H+uo^eEhm*YPWT{kgP8~m5|COCla5bXU?e7{Bt!|sYZ|!JLd)KLWJ{B&k(!(44#%V$m((rGl znJ^An_&2kl`6+UpxDZ*XI`MX%iJ};*mK0yGPFzClD&|N5@Y&ra^+5yjXj_tTVKpx7 z#)Shf0HPGJkU|go=GV(ShEsxUr8m%A{`q-@+~5nNu&_4ubKw3Wfg$RKRAW}4-99|< zl9G)`W`4m!S;?Cf4{4~^_BMXN3TDB(BDAk#jatg%xs=7tp{3lPi`Hp{yH5s_S>80Q zYWK;|DJ5^34kR@kV&bv$Q%TQC{+5QVfLPhA=gx>MvS(?^wX(7Tn2cb0F=pzMm8DP| zqd>ZprQ?*P*{nynYD84ELFb3CeG(e0PqH&>Z^hbMj$Cgg&JN%tFN0fGvI30bZpTRS zMMutHy!cS|=0YT1UL2edEhrT&|GjJ{MMD~hqptkq3_-twd7J@HDNrLHj_v`gtL{;M z45Nw1Zj9LO3sGoASbBR_71{=33?0f?f=NvPuV6+K5x^pYAJ3*NF`M$2kBa^(FPr6M zi&>^oQTU_eW&4;1V|IH)=CLYyBZn_5eakMAUBl^YV{BQ^<1;+e(e3(>2 z7lA?VYJ=1}e?&|b!vL`1BCE-VKmdOnrtoiEm?1YC)5FS`$4^!~_Bs!xQt?Hm?w47> zuFlIbnI1$B6Axj}20t#sbh{7Jln*{bm;yscWuFH$Muf7HQ?h$;vxj8bHCR}E_ZKJ# zubuP;hQHEy^Blbizd(D{kHYv`T~IfmFEp%+0(E<8o}hj1S>@`wo2Qmi>){&6cU@eT z8XSaar%B;6RE?xi2vq=_$Yg-&KY8LQz1Q&}zt^G9n>-uz{ zUf2GCdR@B)#_QTLFkTm5h{b#%CIb@>r>BpN=^k^Dg<@CF&f;Lu=8tcgl#)+A-lSNxZA?iuDL_-4u%nk(O4;N{gFCx3tzj}E-#tJP| zg$7XxNpD^oz~d@LY)=-`#;-Br%tmZ);S)r?W+tXm9`10P1w$UMUXR|PQ#{entbRa1 zKDO!^6Sq*2-HoAvKDLhE*8s85;ukhpcgqom8(Cd~@zwR9yG59rBvMeM4%B$2c|udo zBL{R+QO5i8^Ib-NTC2^$-f!YLq#o~&4->=O#>+Q$Dvt-NTHU35Lhc<(dxyD!TWie0 zzHhdexi<0s{&Cyqy7sfGOYB5g%h|KCT0LWrl6?Q9FT7KeB6pgloKnYf+0Xi9?a7&0 zxZ;Wci~7RZrjS#L3>quX-{w^wd&Z2W$`YGb+!7ErjBnuS7)8qmzHpV@J+pB6P7j7L z`3xSCwWNrtUp5ptEQMyOF0wW2feNdrT5&ZK{1NXfJ|w0;R06N?gb{&FY%XYG`jz-B_j`3}G0RpEaVbgo`f0!NhTU%*ejxEV7DD$}=9czSOO zrV~*AJ>5hxy^Hx+O`HMMBYOk$Id4w80YW$NpP5fqn{&`3zPt*(U=}-LKz>bX(ev?M zsV?tlrnqt6$jUtD+IdSldj`bM+Tu=eJ|x!L&rXV~KgBumMF>%dupe04iT!?9w4tqX z_^USbNVEd#a%w5VbGQS_2s!e&ld=*X3IcuuO03Su4Ybp2CR1#PkM#@~?m)-^X(`W< z;r9NC%zrJAXAC{sCn`5y>tqj%fy;^p9<*h^TI>K|>%F{BCl1I%8|2k>gu%j}u97Xg z)P9tcgK}dSy1!1rLe6r|x;P?Wcez;pIJe53VIl!D+lraPTtO>SV_#zM2$D#w+=nFP z#-JgW^ikWvhY1hFNQuruzP*O9Kc?Ro%mYBw% zV`d%F#&?_N%1JJ<&HCv;GK5x7p*3V}F&9HW|5Rn-9s<*o%qGvejijw2dAG85yZ-8W zi`n$x+Km+LD>Ac4jzpY__T)Xq6m30$AxrJ50H3S3Ro%W=FBS z)U2K_Vo}5dfMs$DoS_~v92Y3(`130iobga-zwTxvbO1u0AV-c#;}?}Mv*ZkFe>b%goCt0-9B^3=@T0VdVAEjceT(Huaw0waRA#(K*0 zs{_nh1s@b+f3ybJWuxkSFgbc7@8pp8fx6R%CNZH4(8sZL`MrD!B4_jBwMM2Ox8CcpKUuP0a|OGKbQL;c7&sua?qu74e#Obh8{&fM3HvoQ{U@ylR=mEmns2|Y}Mo2K6oeY^4k-4dEAn?GmeZbm4RT) zxhiDMs*3#z310Oc4eJ%q;RO0CIx)!@0FYHyR@yP}E|ytjqG0y5T^y<8@KPC_km5PP zK2M^P%gC?9#B6aB0*cNRW+jQEs(vMA!*jaKN}`;iloHCra;=Iu9-|T=ADO^c8!R=D zb2ux?LwtTt`jHDoU=_1q>7xX^0e0+HVk(q`XY>Gmfl>7_O0?-jvqVKqLrcc+U#htK zoS0F!dVl<61x9cN9>0;rWYP{?xH`cO=V2eEAFZG=$sQgBD0aS`1g05kh8@qp9&tkR zVv$X-qwW4Itc=1=%B!O?Obp6&RP!%nCp$~Y_rTmPL=OI`CPbhVR|}VSpdRKQk7VHS z)s^(R3xnP%uM(%`fRZpI=m)$AIX+m#&q- zmPnPzTFQOn=tK?O>@9B%7Dl?4v*DwBDJ$*vc>hee`kRbmC)K>~5$TGtaa!ySb|H;*kcFYCPe?KNmU<^~? zaO9PY$ZIL!B0EQ3rvDgu5s#}i$n)daYfwE}S&m^7tt`o1zj79^>LLJO#FNd* z1??ikRuNEM-?$ZMhE~-n{y>~R!IH3DLmA;l#E-Kx(8PKecVhwM_!=2e0~E4-sxSc# zn53*j1}f_=wz=W&t=86vR9S;Xf&CR69LhCA^b;I4g_`08`?p6MR2!p`EV(7>Ozzv`uSRk#$sje44sJe^v%};P4 z(6H15rCJ3wO3^Bam7=y5G@%K+Knh{6B2p}65ro<%P-JTj71C#g zrBrDFfdVc?bgawhs{t&iY)aq%oOAC>UeW}0zVrS4f4_V_x%a(y_wC$s&OP@W;yxyz zIPRMPz#&YzdG6&miT))fvQfToAodVEv<{>g7;1+#DayYNq%A9(Oed9-l8H*ksDoRl)YpBeE|ZxJ_dUo2Kn1O zk|Qu7{!SEd-tq{24H5QlQQHe{F}+0TQN$1Ar-^vp&bdot(jo$@w1M7PW&pRd(ge+n zVD+*|cRkBM9n4R{$k*%S9Y`)kY|3cHuRbpWS45u>;R5q6H#58`KawY?)-9v$$!q36 zN$DiWe?LV=p)TP!ieTz7g|d*dEjO^DtNg_w0c}7f%NBT0tvqS(*Q@#K9Js}twAR7H zZd4qir2g$n9J>a#xof7gLl7eQ3$gPN{bRFPMmNXFh)w`hyd%6WETl?#k8$OVOocHU z=MoZC{Oe`-3}Al^b*q)oW5tTMhzsN)$7oY9i{6VN(g>I6%Mg8&T@;pmTo7vHNqs`7 z%jJl%T#$?u?H14Z73_`~kh6G6%&k`}=3ucxT$$ihX|!4HQ%Q>C5KNEsix&AqA#?&u z{OxnSg=WE9m=6C8_zw^oEtzD30zio9zt=@=*eg#QPGbC+MKq<0)1N ztkWwP=fxEv*vZVaK6r~N=018V9iHw_QRT#Xnz-HB2vN+?k@(Y#!$;ya>8}XWQFm7L z54hP^eQUf|m%tQ&U1mZ zhIXP=d?z|XTE%xF)Tg|dI>}m1BxCkLfp{nxbY&1tBtdnf7WokBTZ9loENk(ZzQf~bUQ)CsFpUc~){Ge@LpQ!fn#PQoFDqFR7mT@GpVqa;NV-tOIyVkN0~u?2Q?1dkS0j4tAQ|KgME{K43PvB|w#HAQGAg z2hT#HvTH=#2(y>tc&>424wE2}d?TiS?`Zf%et#KDew?MlGg(=nbgK*rm}52`cG-O8 zCfEsmtISASZu*0P`}Dcxzmh4W15=;CBoaB=+*auQ)eXSunFA3T0+%5R?~p zN6N5qmB`aT^cgI=i$xbBqF;m2y=w~KBlP2YH?wkQGjAS>k@*+se6N(S4{qj_GjA#L zDiT=#GOvbxtYzLh=DozcIu_+M<^|ZtR+8Xd(~dPreLkJ+sG&r$lKKRDG11E!_F|@& zyYR)gL?rPOc`(ca-w2a`Vj{j>l8^SIMx^;Cq6~w44h8mNcMQ7aipPH)^F?)x>JrO!Z!xp5cWZLcw!Gzur0= zr+u@M>Plxa)Tka;-u{}R00FwRpRIZa(lq|m6LwO`p{$UvJVb-i!pL*?QY8M@wD7Dm z8&MbmqT%5u`mB`gkQydi0IGvW$v`5grcZ$yn#-smphK0^Qm92uXLDE<`T1)}0_aFL zw!vBC%&xG(DXG@V23bKHU}MpW7!;h#GoU7 zzDpNjb1*7xO3buVSlZ0fr`??uEA53WZI9EZeJEzyZ;~mkJSI+Bd9)}nq?S&ssyUsVw|>DIKK&0R3ieeK zVLx0D8Bxct2%|mX&EbfwTPCn>8K9o#_UIBL)jfT+N41}--Sqq~nr?dHmrysor0J$e z^Vn?*e?d}0Gv6F(@~yvU=gkCo(}tVmVG-ejf1$gTUybKyuaUH#Utd7h!F2NRuC(uo z35-#`-p(?SfBu=iXY=oms^35VnZZ2V*Pd7$+WX{eI&&p)JDE;Xq&2vu$}eGjZWObY zJ>&i>KS&(KZm&fjh%A{o$m(6MxO6djlcd$h<6?w92Xg%OqRYHKmMqw_;lWUSEu;V?a2`A_Kg1N549({JmjCrXGWCw=g*z(p>yX!>bdjzp>yZS15SJBy!*d%==}H*q4S}$ zJ3&2kwj2@u&mB77&?aaPoePI_I&|Lhdxy?61{xmPMl-hJ&ci%oly)KxF(8V;L^K7K zB4rc(>_yMv6R^MN*%Q==zjZ!`C3}5{G2ezOm@Hp;x{AIV@I)baJ|0lvDuaSX7)bIH z5-+AF#8j^r{i6-CYbl9ab=FN7XD_o*sr!(kScb|njdwjv2eh(Ny4E4>q%(1V&<+&f zE^0?DoUrq>OOCs3u>95>f<$NpjBQVc<@8$xpWEBL)67ZNG_MvRisxlyzlnn?H;J{zALRwe8$Y!SSLpChXB zlW~Qml~pV~Trsi`c{M6EGOLXCCOLyuZSfzhT8JxY(bS6DYy_Mqp&KO5ZE#=vst$LP zQINP=zaD6USCed#7m1DQHkbC2^pC-(bpW9@CgaTSHH27xXdRJye!!)KO#UrV<;HkB zBJEMC&E99JJ|d-@dMZJXm(FLh#Lz^um{XALo?5YisZHt%7+bsCYb?9uZ~C!AX;-}S z61c3zcDAK&?2Q~)I(m5ZOkyMwAZU@7p6pO!oSyP}T|yQ8EkUj5XQ!~Dc>cBtiVmQT z`3it|_bp|&8@{<#2y3P81izvG-bf&Jt9&_{a(JFz9>;Ya?8W%I$fwU#QoHv?INuwQ zi|;$}N@@Mc;yU8VBVmD*$i?EEPup8iRm@g823&#H=PbyauqdfL&d~|MvqWh(qijj* zn*7ZyRVPx+^GnGp!km+8uw~fH+P=-X&1f=gu{mo$qFiGD<+>pT@-*f=M7b0|xqP>n zSKZP{@T1c>^Ro6um(@>{?l5O5T&{$WX6YI^{eYm#l`Ju@S`sV!58JL}`ie;}G4Z7J z*5U*~X&Q>-<+)g1^evE!DSvws=2f40K+5%d6dr~4%6?~KH0(X5qZsvU`1SWa87U=R z`DCP&_>Qk5c8r17grvkLKdDNITc1Qt$v%C~lL^Fv$rZgrS3b#R%@qxTa7_RB3=$cy z@R()8LPe2pm@UWavD`VCetWigM@sQ>ke2&K-k&$drw9hI!Ms=)HAaCPIIhyjUs z8lQ^Nh`-zfxGeE<;HCKoJhH;8nbHVbZu`>RsJLVKm53sb05HWQ9x2(siXd60jwO5a z2cjkjO|~uF3m9Q7;XgkR+LtLWW)InN346%UT6LFyW>(#4Yg~bQE?ITgt5)4@kfuf& zW0sfP1_Oi#6VjR*TGU)6+xO>$MD8L5KT59>V%V(rZ%j*I2N(b(e)^JO!{2A44P|yB znI$k(?9(rikBeWHWz!cxMfSJC=5^HbNsP%Bx>r?ZV%7k>MKVl^^{liL4u75u5*)xV(>DDWJ#lQdic(c!;E&c6V76b1_$oV|2P|uDPGv3~BK~rv9vtY<}=Pb4v@7+x-U3MupsY^a`MqVa7pduj9g&1UNK0#kr zKTF=;y(g`?HE+Z7GPk2>HLi1qw3axK#Mgv+Y-fB;M6@(~O;PI8010XtU(-?4Aogt{ z(9ZapmrB(*A-&rG`@D;Fr7;ijQg^Z~ciTckFwye5&L(YxieS}9r^r6zJu~peL!(HB z(eAVv@41TKWi{XxObM3~bV(W_9A-}Q%kR((p6z1=AwW|6sfTR3U;O`duwrBtZ_O zg}{VnA36e%P(J@xwi)HTMD@%H;LJ-B{Z^A&Pk^7t^a+Iw?6^%*O6ghvM~7#kkX-;E zR~B^|8l%o=`{#jXH-W!WRp=--MJg<~AERaXEyQ{7Fpz0{oJj~#?IVZ~0VZ^gq_25| zFrgIKayJrrn4BdDoe+)c3kcEp>7=E~w{CE&?sZl_AjSQ(v41qc+W#PjKLfbE}ZfFdp zl$r1@|9b|_05*5@^2174Kcu%pl~c;t>Uqk{N4d) z-+K2z6x4CRQu4d-D?;|(`^CPWsfZP1d(^~EnE@%>6XKp<#d2y5=X3?+WM(ffb ze>kJP)%u?vWZ3(vF!uiWgB*Lug)dLaM5fJ~cvun~fLqPsiy6rQo0pO*r_j+pJZ(t{ z-DH5h6LBK%^@0cWsPUqAAwe2B7NXMcLX)mD?r$bL=~7Ap)I_elRXv(^UoPg9WP8#r z*|ZWpy^+Eh{Tk;b2zSm#gMDIUJ#x?FS3f4O2b;Dt-4aZcMj9Aa)u$;MKvOwL*6y=QE!N!TWyNS! z3lAJUQ>@a+2{vi7b?K#QRUAQUob59+LrJ`ody-RETBNbnFCW{)XgH>v=Nqd~qKCH%VB-CY< zB(@{(!5#Sl+L1*^k%T*PJ`=C_11F7;0C!|ZxGDlxg*0{TUHUNFN})zpU)L!4IrGHO zc{@qtTkMhvO&&5iHlVwmD37_%3TxMWBe~tI-jh0G6;qn=seE+@+jS7>pi}W=r{CV{ zv$xu%4@HuR|5)_dWobl`&;BjS#iP)>{ObfV>+7l0wY`TZid16ALL(x2XW|_hId|BW zPqCwotKkOe6KSXS>kGyDHoeqiwegwVW!IqAT`YqPbA9ZPdL&E9DEtP6#VF}MD#`OO zvyhZqW?Q;0OUYScTaFT;q`#MOod-(ROu(}*^%l1Zn7g<5TYPZDWl9;&0WZb3cNMtdL9 zC^tLNMso)+1B)bPjFGf_|DhA@Q6-Vb*-WO`j1g`G&u55!8yXv+PC8 zHkGl88KVv#yQ6qmr(^fx|IV>Ht7jV4>jtV93gU&Hdxp>2ml@hnDX!%WofS>>zHhMS zF1NwQ+79|i2s&ELqAOtRGT73vh_v-)vcDrMNwiUzR6L8VIi^F0nJGI?QdWn7Y&8pV z+FBql6K#I+LI+Ir4X6pkaqKH@@*|}G(%XW9 z!;+E7q`K$#-{Bq&|D4<%p4&wKy?7u2%LK)U5Ui5CM1efBSy2Ft7NQ~hAM_|;@BQk+ zz5nhypeA@V$tU7{d%*9#n@zzp@k9Mr5NlEfeh?GMYWv=2JCq<{ZucESco5>=WHP<$ zjCTLZ&uI4oPLc2SN7C*+_eRj}rI&0NyC{Y-X`F2``_v(LKVAf@!CR~=)RT+kH{_9=EErjzu)-Gg_? zFZOGR{O~;{wi>I6_gh)d5Q_7Gb>6_AzO8l`VMR1+=FI zyIZR1icD2{s1G~q$EI@Googz*8c#7+?r~3}HdNHnncQRp-g1IXRQQkQ-E#jM6{Y_wlpDz1QkI+*Xv14DgZ8&8t570g<$g~O*>EEr z)d#df1?k!n0LR%!+8sStsFG$C6;!i#l0%|w7Q)ka$v4T00=bv)Q?V=yWZp=touQCx zLfGJ5c2R3_G7b{penxN6V8L6IEd=$Pat?PIS7#8?*IH>N`lccDo*wzGzf@xOv!Zzw z>jlO=Kx}Ib5igIB%t_TAPg2(YAnZps8N}SZo^GP|x#J{-cMjcB@wrS+X%$74#v2?y zmjSDs+?bcfI(?@+eMa;}B|gp(`eB%!*Gvfz?h{IY^dF*?3Ttj)c^|uEEy?$dlAY2v zsafuMER6qq(wu;Q;SO(;wqQT+kdGW?TQ%#c`iQHnnHpZ8bYr$t`phcTWDwllO`3E^ zX8VWf9*K8o*kPtQHJ$>+Y zZN;aWMR<Zvxe~NJ)8o|p#CVu#O$zZxVa={Kp+!0YC zRnEp)5aWi4aiHVvbUuj;7#oXSm=$EZVR(Q%|;3=uk>eRz} z&xnm?0B!QapF|#$420e=0hoQEg!O>g=}%b>&vbHW^AB^$?Es**cuigSF@$CHTc$38 z%JQ(VX~KqcD~#YssaHU{i=QR+vHC4+Y#kT@Lwf|+NmH=ZDg~nNZ7|scVxQ;sbeH-| zFXY=@{UNpi!nAc9YgISDF3=X~EPGbKyZJ(=)DHU;0Qhrl(l%UbR;h=O=j)M~@3#Os z3dq0@{f{{OJsyTE=nykOe*NG4q`mZGl!_`d_;O>)2sCKNS6j~<A`bv{xqf z6ss*&RXp~MTF+ZP<~kJGs}%PIctfNSZ?{{eUS0En2DbW4SV`2)==r|?mr%)fZBhfT zq=_my+9dDz2rIeJ6t9XON|nf+-$>l~`rLZsgPIEnQYF%kT#v=X@sUd3z7i`BY#``! z81O}#@2}62EY$Qq@VCi#lp$EZ8#%l3-_`(1ayMorSL5Swn>-G+&INqKEn*Q&s;P_Y ze%Odm!}76j^8HKG6^GH%Xoo@!j>Hcf#$k;_4}OGm&ECl9MPHiehsEXBFZ!3Fs8evt zr^tM6urlVz?fYQDBs!$uMBnlZv1l|*rw`cSH09SpD8lgU&LQ8jkW6RMaGNT5H#%E> z>l-}H)_ls8*AW+h>)sKS)XQ<@`0hr=WrgTUvP!?%e8r1o@4aZo`MA&iie38ImNV0` ze2}B)Ut;|?u-A;v5EHjclWg)OD4~I`-g>}=wp)QBI_nf!aI)ntv@C@Z?hP}_0l#=x zmE^X+M6C2UU_tEw6p)aEy~ZY@-f;yX`*FPaij8=PX4KR#9Vo=JlNJ9lMDf`d04cRX^iNy>$S2Y5-{GF% zw=d$~5FwTKAXd5y3S<^{t$V++NsZV#QL=wJylamNz(6xZX+mNeyxgTU`j#zHO9Hqz zkLp4dsairT(5de^CFn`%_7xj(Ol9PcL&tqjm(0k*24P+AiI`BvL;r&tNqs~yi})j{ zkjY2Q@%BfcqPsBm?Ke;{82f0x&tu4k^*u`RJnr*61*D~v>`RKz_^{h!A)wHBmUMQgyxNu|Ec^! zGjYFc(y~{KRgI_ctnph~d!m1IAxy$+ke%!wT@H`^>G9qgqN`-jNeV#H{Y(g`|2|3j zk`~f8QPb@~EWvj-@)ejeK%hwJ&FM?mITCk}!H5H*^e&sl!A7XVtG!LIf1P5?_nq=%ClK@?Hh=<}iNb{dp@2-n zBac1mi7MoOg6dbuMLS2UE4pGW;h7Up;TA#wtXls>Wr8pysT8)*dn<{X*5lPAnrm_w zw18FlKElMX1of1uP}K)d3Y~;6^N-0pQiuV3_E%iR-^4GY?x(aX#A|N|(+FWZ#K<dEj0XyY46IpQYGyE9UWTOCO1FzR%hYvL@8JxbP!%AL~ zhWyZKYKw8V%%Td8KhLfM?NCzBJ|VF4GQg0NM1Mi8cSSG3V?{RSwf<2h^2r?`+=L8g zUO@62bg_5I8zdz0+Z7}vu_e!?LtG?y8{WvAO7t=~>HJWIt`6+?ST-VcAwL_HYrpUf zl_t3_%Qt->6Ta{kl?ae|0%e}U&glTVw7ySwW)NS#{<9DnRuFxm<--rHY!fzaYCXR! z1zEh(W>M+)uansOJw<7^(`SW#-KxEJq@gK7=btcRPs;6^VK>(i(o^4VGuC@aKs~V@ zajNH0mdpK}%{KyYx5@n65mI?c{0?P@9$kWKfhVe)zT4*^zEtPNk5jQuW zvhl3KnVwI95!Kd~VB@+PD+O~uei zXqE9?24YNz0 zIUYgsQCm)#&3NzJ1Y*yj{1plWmEd|Fki@u;J4C++NVMT4`S^(xJB(~ol(u^jT<_|? z8p+ej6V1tjuyC$-1Krp5+}c=y56F=a{Wa`@qLBja75#rp@cP4`l7ZYEy%-8C3N(^8a;J zuWQ#--7ZxHsOoM1;FZjdsAQp9$-;0Y#s7^;O3Y5Avbk!{@{4(`oPK6K5$z>!`(9CC zH^h2RBBN6QZRkti^EO>a2K}}mpVF2KQ|+F$r_j2>F0z0bW&W`3$aoaQ3DL_IF(>81 zJCXOb&ohy@AX&;P{m?s!CwCb@%Ddi!6}vP&L&K8$^2`uRuGgI^OHRKJFX-hqh(;Ug z|J@F6zM;Q>E2)RN%PrCa2rR86qa|&VHe0(lwHwz<_|biq)Fd@)KICrp96YueT6Q5j z3_cIwaI8b(&EKdNeIOte?fLNR#0lPNde=g!xtDgwVkil+L{~ie23Z)E?fvD zNQJcK19)3y5XzdWdDT*0Z~iO@XgMS;R=a-qnzc(KMAe!9pxu96qSWS)+NG4javaW+ zS^MSBh9gnc;`C28;jtSYNN&yVyDdL=pL>%0&i)QX>SFVc(i5KGr`w#v4QvmjQ#NUb zG&~8O2E%s~@#@fWupIa|fnX!H7)A=o;Eq@ z1T^_Ay#17TBKL&HB6;z~Q&nJjrj+uRMKEON4`)M`r4AWgpZ#(>8?y7UkN$1|Xy0X( zsCi8JKL2_`CWuL_ zq@IgaVkiFWxZgb@#KqnlYTG~CNeqOwZM-?eNdM*_afg-EJ@}yIsCzb^#%^YWoh8)v zV2xJya0Jyo*fr`NY_vE1j=Bd&Cv^`BHr$26Z!WIxVMvs^2N$V(_%{^q9}!mfU|hMA zi3KRB#)AS=`7jApJ{a$PiMHA@HkZCWz&oJrfWXy081H?6lIDph_f^ZJQ~Q3L`>}g@ zR)8)SlzZHi<{y+8bwG^IAhXS3Ed6RsFRiAeJ92lp;ib$RCDA&rUWkxrMd4cgzK@Vw zbrXzwf_&A_ioz91Tn9y2!-^z0jiA+26P_Y}Nd3l{e`Gv5so$`}juoMQ!)aXKE2jR9 zSCPu~57UmwW9o=33-S?}T!?BmmWGsYObF6?P*SG^)zMf!i+VCjM0lL4jKe|E(}Mt9 zK%>8L2cjF<`~p}=y}N&JxOev+3H9#hKL}B3I>ILvo#j*L;>F<+xaoDtQur~u$qA^0 zUi4ibgr+1oQF0S+k0s#MW`H?(X`jz^6FpKtQL+E1@v&lC%U*4s?de38cBh zhn8D`L`Q)np#DUD`jgw@NfmP~209){qGMqng?Nk77%dk`x{Vx(c$!TbHbAVbC!3G! zb#4DR#Z}OR5#Wdw%9KGk0^@*+f96Ou`5_jbKN;^?QM| zfjj*vD<&b*UWho&?6(A=W|NuDgSNBlkp13Im$?JlMK>-GepZR!xzR?wi< zC2RjN{YW)VZz*LH#F2A`uqn|Ur$o|bE+5Ge5=5LUXH!SGO@5aSZ*}0Sr_EZYvsP=> zEya_!Cw$m-JL1DX$RFm0gx1a@-ex_bl8q?nx?LWDFH%!58TM!W&sy(`hdK zKDQx4mp%T}`5wZrT*y4RwZLN#OSfp`ad|H+OA!JiYMg zAAt=1scY2#b1qv$EBCv*YuGYai){I^!^|$rPPWdo<=tDM#oooeVBG3GmrM8Kxivd! zF1I<*_5&)6?9rs&dvkwd_0`-=hh!fzI|Noqux`YD!%dSZX}6_kkk&R^dOAvIF-9Wx zgq$1_1JTSqj(54NIOwDrRXUt?P#(V%mUJRfSgaR`%ZM^OyEvh>Q_ACnRvss`=0QhK zZXrUex0ZXAFKv+*J_5!Wr+o?Kh9$W7t@~3?}+*JMYr+WG9kD(_O?j4 z_v9-PBixa|;NIo8F}Qcx`Mu!DlPBlj4_NZ094yQpr#&?u{iX&o$fgZAvr_c8A*eBa zL%RaZdC&We`rUIPAmi`IxMl6nmk#ISDbv?v3*KhD7p;%j*#(+>DJhX|I(#5#ZaQp5 zIkJBWnwt*0LpL3a{#D36VYeOfr5del<%DgjIN7*Jak5^UwYMGWNt`UfDfMbXgJsuQ zrMoZ;U~>Qakx5|n-9;W#&;<~@fAvf3GkLmm`tP>tdZb%*XY(49v9c!PJ*dMKFS$41 z?H%Jiy-``+D%9tZbx>;V{|M^x@U22uT2)63i2iFf&^L6o`AVw-I;P#`eOlpf;yrR{ z^aSbSy(e=g0gm~kKo(RAb0^JSdVc>&fME`woiEt}rxB+c?IKya4^&yYK25mBCrg0S zHTNH%;mgaA^3tJ7qXzi$21HaQFB9g_Kq)UHC(rD;M9LeGlb7zfz(&TIi4@u?TYpj% zj(u;JKYAToh%Hb)$d!%vQ) zu0nS-Lkqy?v5g&JUJuDBrk?W+2{jO0e(oUPq$9n@6Fer#TS1$Wr^~8;;!_kmNXrWP zAto#6(l3$kbCBsWzIbCuFgl8$i=-V~65Y*!MS$blMTd~wGU#ZqKbN0+Syq&8Ez&Bk z5W8M2pvu9zvU!J?%4V=e8*`c?Rx zTW@?Q065?zHH~qlmmsre1(uQeIiSJB2_XncV1?_e5s6(Y6B{}`l7pGn*MH{~|;-qUwC8fWgd;b#>8Y&H&~Hs860oA<^% zHmMRd+1PReWr=|G*!l+my`E8)wqXa9W@IwFfom<)T4OLhn9&<0=y6Z|SV z%V+p#3n-i#L)I=Wg2rlj4&ecvAtk!cW6VR;noXY)>y-dI91F%EqF!GYeg1_AnOwRD zP*YTsegV#+wR>njp2=ptKQEzbt8q+=)DYBfU~@w=-XXl&ds6Y7(^xrx?PY>IrAbkI zUgQr3yZG+I&yqwgeamff8`x7YAuAAZCk<)13z|?ZX(mmiK{SOqY# zjg`{rUKEL+?pq189i9(_=lyJ1tACN#Vz1A=4RAA&km1aHM9BfK9HsR-F+GNd=+M-G$Fu!id`xm zgoFD{JuKYN^)?nMKz(r*zfFYVWZM)ab>nB+v2Odz`yEPby286S31{c>(ULCfATq4! zR<1$4LjY*+xl9d6_g5I1RJGp|l#-PX=(2#kbdPsd5*;v%F*u6?D3T=ieou%=WrRo^ zJl$uPnSk(E@p+WAK_`8qpKZ**3Ox3*!1PU=MVT8-3C5nVOh=lGJw+25!!^TNwwPmR zIAL=Wx(4gHPyU6*P@_PFWK+kR@_l(}`My!+@g&af&r3tm27jLH@a4&JAe1OGk|t$3 zp4PVsF?2jB?+{5cqswXaLwRDRZD45&PMX4+LOZBkTPP5*DgE^WK^AB?U-jqyE= zCIQkKO(JyYNbFGT%X;Y3VdiN4IVk%s)q78NU33OglttU5q}4<}NZaHqiEG@~_@EZe~ebTw!)vjFriZM$bkExrL8 zNj8WG^m*w88VwnR3`#)SR-LB4&wNL9x~E>P(+h7wogmgGZLF!R=37FsR=&k*uED`H zA^;`%{7Teew$28%oE|!XmRrgV^h7I8p9jyh=umDA=EY5 z@-WtcF9uXPdk&KBbT=ZI1-0`apaC}tQ8titNT;3frOCO)u*Efdx579k=ZFSRdXAXn z0kD+pn~<0!dK=*Xt6}Mn!QMHpB;znw63o}25#wMBjU!wJpDRhT6L07y(6#d6^{}Fq zZYW-sRL}1+Ti5HnC-f^^?%n{u03hE0qzsU{;9P=EPwxqxXGG;mVyhV+t`C;_hbcpG zIW3$WydDWUwxa0MqKU!&ReP+)2kV2Qylq`zv!NWo=>gfela#L-?`)!c_-JkXmU{c*_tep9-GED!*eM4)G!bi9#1rqT5#QVp;sr5PpX~mT*+E+c zNulezcPFJ~sKtAZ!HFtDz4aEk_;O^7`{vK~y}XVl_RAkKuZ4N7$SrwUCigh|B#>;w zEhhg&9qzx0zoHl)Qr`ZF3Hag2N&K*ZJ|yCYujoU7KIrj7GktiUK6Jqk@6m@Q`am?( z%O~-QIcnBjeK&HKE8oRH>|D<|N@~*{oW4zR;U$Vw_x>kRllGu>7=h#o8|6b%0AZp)@RTOZ;9%vQPiV>{G z+C^!%PiI3RMyu2S???;Yi%qQB<2i>VvH9-)h;p?miS=1~f_a#~H8DWxZBjFfMUc6F zlhyc2lQnU>?_Nw|>tF8|6{#LRV(i@d((aH;hc$8YvBSZ0Am7(0QWItkAFaoZK(_2F zSyCIjNKq`=FJnKsuiBVxKz$&8XV@QI2sqn`28$Y`hw1nY`mOFXE2CN1eo<5Zx% z-(z69W~H%a(#R*1(&|wea^v6V04YuGwurR*S{T!L>E#$6pKp{YRenUx`9XR#zz96M zbR1gSO=iY2Az z`)8yZ9C!kpSMXB6G2v-=0SogDcd-Xags*q-7hg$% z!{^?eW>svvO`?B{MecEBJe_5gB+d%tBr9edWHZi1a#HX#tbj*=N0<6YMU91`Dp-iC zz^YUrbaD7<@Jpl}9g@xC%t9;Y69PcH(SlK2NJ#_`h@rmjN@Dr_CjFR?iqS%{sVUf_e?8v+K6cpZ?~!Ot zT#Yjyk{zJfn;|cBCADFfI+s&*IfWZn(<|!6)v;cPV>3H6ab@iV*U4B#KdK0Th0mt%xh^x_85P+O5q}whg6R_2)G}>}k7)saC^^VsGwyhSW{Mnljww|6* z*>=1(B&QNVQ(8m1?Fw;Q`l^-#Xh5HkS!vU%w)?h~pObBuZ~VGL`9m7)I-gO%xH%aG)p5a zA_AGjc4>tT)|ZVm-m$wNnM&$b63dpiSnkjP08-Fsx6?100HQRr<>mC{&xU`Y3WMyk zS(wjW$R1#MIeiWny*PbhLB7w~j4-GfFSW=%!d38M$IU2gPIY&L0_ z1@|}v#!i2a01#~XlFgLeo@cDLHIbE$eo`f_zpOnumAHjFC2N}T!OhkjSFeTL9Fka~ zq#CvfA$2_QY{goofR%VKN37Ltj{5Jq&^9|Hfa;N89b{(X#@558TiP+irrY=}E_is( z_S}HAgD;=sKqr1;)7?3sVrj=P!QD;%cXvV9VuNQ?00MCXzi_l+4!)!P>C))!z{JpPI*n5n0p*xU9P1dB;Vn3`0zVs zap+P~S+Bo_v@<7fDipYMSu6#`A3q`dj=gt_YVZBKzBJ{Rnbm7Au$$cqvqK8tLWCVG zIa@4Z+ZhU3T4h8j8q1xdxq6~{&jx{Ouzbh2VvA-#=XyPu&ss-kL!lea2g7;_S>ru zLxfBlnZ&b6zv9Zh)+SZT#O5XBU%C(1tK_2%y??VUx7lVK-kiA2p4e>71vKE1M$~ja zAeaOh4S=1#KkQ&9dmruFPJB^&mEtz6wxwSLdwA07o6|2s8)Mz!tEUTmH3z%GH#2_g ziMEQe_Q+3-1A5Gso)PTob*5)17z`O_!_GQdT7$TU^a`RPxrZ0&jjtRErugkj|HH;t z+KsQYB_0A;m0fkvoi6PN*`$x8BrbZ4^{6YTClt<7Jr9bn{*Dgt;KLBxpzpx5A+yUj zBYiL)mm>jXOHT(R+9~~nsYIK!-=6zLX;=HoZ;e$ZI+&m*8?PkTq7GOEgxUt3x{B zThGNvf!T6f+{v~a=pE0QRytd(N^8SK3c0cHzsYP8_v2(={DUXG;}Ki z?o@mA*EruqYMWC!YJ(0-M+=`Q`zWmNGNH z#Uwy&(v0*WfQzX&A9t}$dGHom0#@mLd*XMZ&-sC1^)@P=PsmQH-9K5Ub_k7v4c7)% zYzSsI*alE*fK9Tc+w6&-*`x_D060@=QrQyawoM&CrVrVcEw)_jrQJAw&`xT!UHX{X8+v3P@1jn>r+6}@_8siyA*aB12_xt!lOT7kPh$p9n2t#$7jZ82TzMw@G zqzN6Uw83ZV$gbMsxt?m3_=z%XfKrKo2QUOHU2TY9X%vQ#qIeNv1@#!% zP4Obmq1ZZx#FuM|t(P_p@8GlD;n!4Ekjl6c#!K+FCbGl&VV?rLT_RHVh~95H0H-&L z-Ubt0w!-bCqP#@#%)+SWVAN3%=@ekOhLAxlvyysl11^mzP@_D+V;WF@r@I>_bmMhK z+J8U$1VMaqNXGrU%>qe2rEsx^cTXq^9r(qRD~+)4iRU%|(R9Pob;OZ?@jz_GDV1ge z;&PN=hGaWBIJXj)zNfoV`H}kRvPxX=R_~0Xg9U5F(xWgIdih#}a$9UiEr8@sBU)I* z8o$#Kt}#b6Cp9rV0Twj?j$G%ROX(0C&Ks`!r6B;Dpjv;)u(htEPNh?6~s_yK^3 zq(&klqK}36^*?B@ian_RIsF5O`blW)Y3bInvtBpN;wvRl8ylHpND#gl&r1 ztAPUh!5QWqn6V)%&x3|!9rX`4tS4!Di)Oii2#Yo*k_A zY=1G=P95vQj@_enY!B+#E~2l;F=Y#$OToJO*rBop@Di+jrGm*UnbdliT)qJD?zhXe z=My%Ghqt-M-NX?6>-|^>ZWl^;5MQ21C%#wME>wg~D7AC$S;)j9)b|G$q44xL*x&+o zRB49VJCN5qBHOmj>P|uC8NMnbGlFL-0X7fVC!20q#)#DrDTp5{6=)%(+`t|NzZg6OfsKnxmC<%0JjfN#?cf;{df zAiWxgo6#7PcV@b<5sf&>&G#|Zq_b$aIc$w--Cv@zf17Mv!7XF`Sh(nmTX!m8QNkocZ#YDE1n!2j>@dDjAzzZ`bS!0iA;oN zv|(c`Evhk=J~bL+EXEr2$r#H&;u~XWTchet4V_(k>s|6@tge5ISzgwvkacxAum-am zlcqVK|Hqo}{P+{LHN=o@jjL7L8s?B~jYm*F@@dU$?ELST*BG)UhIx(Drvz?ZH}|~K5$Vos^3w)*#?^C*|xdMPkNx6&h zRh_)%bCSn(;5tdcNj~xc%&HlY0hIr60rXC!3&-5Z>L;7%J{-$uEa~UKK&#)SlLyXc zXV_v$u~Ia{B!b43&sfJvVboF!Tgg^Y%FPPcv-UWp3+>XV!qDA)Sguxofey7Xq5eyU zG=BClDXC^y-=zC(iF@t2yGy@CRSWXf2eI;#O{mMamGsC+O%U-e<1wRD?O$FNL&kR5r2m0@Eh_sa`C8OniVf~6=B!1bA$K>cQg6@QzSPeU zDg)_2N^nYh$ojZb+Uh+H)EGOwhE22rh3RZL!;(CDG^^eqRkF$lU~HFJB_FGGGuHY( zs`ZVIA|I>uquN@lI?(eYb)e^y4s;){wWBCNwGQb(?_rgO8FKDIRL*^-m6dH(0zkYh z^=6^&gUvDT5TXpabhsZ=f{Qv=(Wv?*4yjo_Q(@zOu5~@b5QrE2_AI826ssY5u@Kav zxqKN@D9bcxQsbtHOr5Or537?EnL61>4Vpcg$i76V4stG4z|)r%j=D3KdR3#pQ4Hxi zUGV#`9txFj(>0{wdEJ{xbLH7KB@n&Z#F4OF@`#fissM0fOZ5+u74W5f z(9cHyb^^Y+ViB4-&NGR9cO6Y?(z(w=+1pgH{;MQ0aa5Z8&~Hpb>5~+Dl!4YL#dfYr zYO)>;=-dmP@ZI5U?Qr(p<$!cfNap}FSVCQNVwzLxwcFu6sdJyJ`N&Y@RY|)Yxtrav z)#y-6&1$T-q$62!R=#9#_yHI6r|SOFp~$bjr6~RpI$xsHUCK+!ZECw7h8r)i>HTAL zzU~et`}cct!XX^V5@J}R8WO~fz%e2GGc#FDhR?_;3ka3G7YAA!fQo?*&xbJY-%)3! z;Qf0#VS0Z-Y^U*Vlw8GM3_UD(Ou_Cq&@~$QIW&Xh8*bwGQGrHOCq903=+y{B#~csQ zS@?Pce)Q%aOm@TT(fE-$9)8s8_5T1r`tr4y_|b27Yw)8V@BSnB(N>71@=j?0L)N~T zOq73=nVpB%!5fpOrt2-8&w~#GdN0ab*K@(wL87#rn!biL{f5r1I2Z4wRVmQ)xK?Ar zzcz{6cli%(&L5;w|JQ8Jqr`QD&G{vST%cSBU3Yy1WyT`S&fiK0VDK=fPKY!+zp{?x zg`>>QUsor@FgyP(^sfBro1y!xJ?f(GvqqYomxci}sVDdkFcze}`|N2tF;xO1b9k>w*zC^r3%YJi$#W~@>4O*2LEwb`6HszZTIcZ%#V5EMsR zJI9-w;O^$lP4e`&0>ozc%y?1>C6Bxl7c;;{SYas@t1m@#t1*$ym^{@)g{Ds4oCJ1O z0~zKddD5UsnBi6@>7g4-r0Y%;DD*8csrhJdj`;UjQS=Un(E%_t%?2PJHvIkpJKl`I zZAkuyzk^hxtfU)mOfV`#-UWR|BAa&M2nuZ;G4{+i)#;E35xk3g6_=fh*-2v|xBksl zdzQQ9V$27K`x_v5McIsee@R*v;QW)4tWuZU=H(d<=@xT{SCF|FC`#1ovs0dWf|MHvb{=iU&8feWXL08)k7KHtE~ja-8-`v*FqHMLTq%uFmabh&BPm-1G`{l4K@0R z6Xp3hiirBhkY*U~;+$qw!! zi@WEv#m$dboJOv|!FRCjIL=w}mE{cm8G>edNhc3dEYN+<^vpUPCd)9IAjsG=8MJqGH=FYYAVky@%z^_2@`+k6?ksh+om~6@Tml>ILVMsi1 z>#uzFCDP=p(?NfYGV$AZ(c>MfOO|2_j!;*WV^>&lb2%aZayB*NO!daS4Jox}6vJ19!?gDqX3pBPQEEU-IG=YE+JL$g0TD@C- zdIg(6LvcDJsS^lWk5eed1Ug$jYcWegGYBR6gWDr#P=sB;m~$v!YPVFoaSol}L;p3j z&a%2hokQ=XQnSyqRkIGsM-uxZZ2lFj`4eMKq&&)vXyUz=)owPC%A)e3uB5!aikeMt zM|7pT^K9b!Vi11sbDxXA`)ag=OZ<0!WP5^yo*gE+=`38$c*cs`#Oief4jcw~-X_#c z!1e^yUSU_7m^#J}eDlTVxp~EjKR!1@`#~mN4TeP;vJZ)dT#cmX?o;`D!tOoxqYd5M zeMm@Si*q zKAB7};yexBm3h>bUf3&CFLxh6ND851^)mG9r|CL8UX|^Xx#xO}+s%Ce4s2nkT7Jv! z%$qxBm0)BSGR26hzMINCcgh0aQ;m8=9oOU z(fmQ#>%nA78HfZ(QC}!~fX+Ayh>jxmU7DWh9`-894KH!2XaTO(;DD?>a?5NsUCjJj zFPdd%m#T|2k!=fkrlX0(C(JfJ7zovg%^lln(d_hDP&hB*puO?{Q9#xE2*w}IQf$@7 zWRZ=!0qC+eEKxp(=+I^$XVMr4m{kA4cslzB9F@mOeVkt2`U)|K(@?p3|Ka9$*y-!D z=|oozl@kG*y!$Jv>1Z=$ld}=kfEZV9M$EL=3iU?#unpr@x(dOMxT@{BgLAq+?BEnM%MEd4?&RE=XLaEUmK6@jL$ z`WK*zEUVWsjWTCArROn~6IO-Gj@CXdf)2A}Z$#~^9+P~{Bm!8;D}lWF!i@qFw(5hH z)i{T}za|*hRFhUD{imM;@sP9@k8VY2WqLaY-UVne&*elREV=_;M}nrK4K``G$%;0q zq)HrBR6#E0?pT-~dhZ4AA1_RGNQ`?UZEhb4ghRcU+l+Pi6xlas-rx7IZeg9ihR<5o3N6Ts?%78Ls+0Y3)PA2*3 zr7=aF7-r zWV6HWQz+-Pbto3&8#sXWQ;ys(tN=jhhT|D>;1<&IgvV;F0v)Rnz|bDsca|R{nI4>< zX^}!bB>-shaZ^Bv9(&e7YxOVk{^JB}l7Sq#7(j&l;x8Q5j7SryJ7U6`sV~y`2!k~v z%m`*kwk-n8j4{(y!<^?eV5ZNL8Zcwa$>;=UoJ=BOzzH^=@kUH=49p^(+mSDwa{B8S zpiwEID^*H}B(5)+7ovp5A3#7ulu*X)4A3+YpixjXc#J{5{|N@aFKP1-s397uK<;}u z290ESLx+GTK&v)+O9et1*ezhMU(dh{Y{z6&=qZ%g$$p-|%$f48u{1TiuG~)wFx-g_ z>5DC}_;d`)yuwbE@f1_)u@)wZEgwb1JMycJ8dsubP*^tJ)?PSo5r1ThC|y_mAhl%m z1ipsU^^-5{$minzyW}2vwg$WFXm;9YIcL*y{)0IT)Uh14BJ*1M+3NjH=bi{4rRj7a zB_fA)2`q=Zh>HNr7tjY|KAl)l_~$*Sdu9UGY!Te&*#TWo1xnqCf(;a*texRd*k=OF zSI8RWgIt8C!w_zNoQ-i+0GB&9(^$0!7q`-YiUm^DF>AXA&yg@C*-*+iFh0QKh(+FA zUE~m+pSuOu;4y0&E_UOCfYBM{tEUhP1(zaWBHs5)BG+Lj8b+hL7izMa0QzBD^|R?X zKS)sv>ct`+RX)n4BG|8>SX+SFYxb2l$<2q^Iv{(4fV$v*3R5^)-rr7&0cN+OxDm=6 zb8Yy&NSMrSV&Yy-XiU_WlLT9HtcB#lw&(ZW>Oejd z<1soDx66wjMF@j!YP(e7S~Q^H=SUKg{V?9;O5o9BCwvbePaxPwl1RgR_%pI4f_h4( z;d>P#q8ifL5Y|ttDV`f?c=pPN9#l6s&u+-1mpfmSf|!ZD{>KDM#rKfMu5eBJ zXlv>@kJs{r2Sc^o9ZUfbjKbg_%VYkjD72HEUbj)K`}5GbYS#iNvb^6dUqss_lMxL_W~r93*BVuLvseD%flwdp zU~CF2@5vm@Z1~ z1Q(vMXBR+dN1K8J>!}&UCEBUv{gyXUdzm|K$l+S^wVL#jo|~c$zJe%*t#ZC z78SuNWO^3V&0#h{4PxIhpn}2*#sB8mdzUaD4_dqFPxX7n_YwWRjQ9J9tUdC_b3#3D zhYmt^P^j0HfZE@WI$<HU~@ZymYK9$bq^ zd}r+!y_?hQRa=cKkzZp)^#L=j-V-`w)k=6jmOy4Y7a(`EvL5MJdE;lKarI`WG}$0E z*re@tX;>ked$QKcdej3lriyHrqNUf7K+f*AD)mxQP2Z&Z?72;)-=LYdY#6WLxjgT) zGDd8hnS7wpP{jSn@b(moL~%JF2yozjg4G6yI#Ow2Bgx7n-qEKj#e^^aPQofL~c z^Zu&1VVBGUAfD8c50&jl!aLiRe%@I)L?iL-%>BT9CS^_7%rJrW#T=(J&EPN7L4aD+ zMyo&RDqa?Bx-V#p+Qf=l_E7M%I$iL-$h(grY=clqY=Bd0vok$>S$kCT3b^&=-_1$n z^@xe&Ho7M`q={+LHfAltdxH28HDu9O5&b1;Knt@&K~Z}s*n29G!o)}UB6KKt{w?fK z&~t5s2?$%H2?(r3V^zomL^(47(Kea1Lh!T1U%#T2W>uFO^5I6CnBt_+El5j9RIk&0 zrhPbF{Uc5C@_8MKH#Y&TREp9DWa4{n5OX)-gBuOP1cx3S;Y#w{z_cxmpu38LLc;Ia zN^08!3~nRW-ToXbo?dG5?pCbNPI(R2RT<-!DnDu*5TvL5m5K; zD1$s5;~?vLJSi0ZOEC;ef1T%>(A`0e-XDiS9#p`%#{mCKN_R@7(eT61Cs5Ze{#IbB z+@?iCkLe%&7YVymxX+SOx}1|B$W{OD;I^(k2KoJE+?po0OG+n0csjEGepRsf_Nz$J zIl-d}HfMOkx@98tp4~|nAz|KMf?Hrwi}$$V?koC7>6on2PWf_$%^)cHY|*d(z7eAh zS&K=6Ge`^_Czm)`Wy;fP_9ct{5eDAd>bVYf%d&G}D=?sRwl6_G3lb@b*ysEtAwvBT z-Pg2H+GF~aU$7oQ);@H~;Z*39l5aJg^2*Z4PAO1QKfF%}UFy`xEckCaPWr04%MVOK z@*gERi<&T%QqLN_OLnkxFuHKDu_#hR;6u?r)+lg&9QC#_&2D>Jm8QhzW6gb=_nw5p5_kKDDN*&ik&1m7 z`@QHm)~`GC`vSZ)Rn%t#29;5bcHsrW=7|F7j`HUjI@z8#g~iv&`jY-F96O(`(Q05a z8~V-c1In|@qZ!|ORZsl1ig@w9_lWJybd-0Nz|o%bm~jzpAQ3{}SW{OVVpO~@sTEal>~QE zy8;s_r+l{i5_&f?Q++VG^GyOXg0V}!|7u9ng?mbtIe zVZchGNps&~lg+o-+-Ob5dc$i5ycW=LQK^@%SKrO_(Rk5w`rbm(Mw{=gCgy#_KH_$F zO#ks`d;vr2>}Nsi-v3H%T^F@=lBJNeexaEYsP^-xP@y0+*J7~wmbl~`OatZgr1fBRl+BKITqiBtj|YgD^# zgyER}rhVbA-_{el-qEUdy;L62^}e|--`okl7nypYxvY<`{hIgjy(os`dmKAW?y{tF zcicMt_qrqcc5*CxI-0quZjQWTNGp;lcE@~O@kcu5n0|CixG&x{L0=60QtgWsi+;Z^ z`hUgy;;wtbeQ|Pe=f0R!_y_xfx*FiwbSe}Wd z5z5TTx*mutkqCh3U1Ad4XUS(~LK}1POm2eCt=-kFcFSY5)$Y1o-D=;tU8Da~t9lM; z=A&Yc54+LbO{&BJTc>kh1GL3e7ho+c#s{~EIqzY(2ZdQ_c%gYfO4FK^NB1Na6AEx1 z5T&Qco-Powz3FfpBI@NHQ$zBe+hyfu^rvNr-fw<`NZwz2F#YIz=W!+4bf1tSjWLPG z>Z$$bF=WeXKx8W=Bq+V28n0L$^`wb@COE2*s%^V?1@-|pe#4{G8};+l-gspm^+wyY zd8gAK>DP~;+kE?Aph6exqldMZk60=?cS@**=TqgQ?$5`_UpgUuM!Vc>l3>-Z58#|g zMxl+AO{7E&b%0F?;V_+aQ-<+HNL?0C>w0vv&fSeE-n@SbtBOWffD`3T_Jz%D+Ozgp zjc+8KDQ?u5+;k|LoyJ$TI=m+hrC)V2xw%cfkB2uzYpE@oDjAZPFI34%`T+2v*nw(Y`=qm-0LEo7DZ?m0{z!gsxVHAIkxCyBmW41}u=H~0pu zuG1;QlTM?CrxSl>F-qP_cbJ3OIF9{_X)kbJsqDmC%YgNJ3r#}Bd+bO;T7}EPtye7R z!7fS#-t16DnX8Vjc;CAOpyxR_=u#roYCMsgJv6 z?&|$2ktA|@F!+%C%qb-ZYx6{+=PV)vch!HLRDUu_{%&H25=?=B1W$6ivgUn0(Xx88 zMXKcs#o-0he6v*}nkU=J7&lby|jS=-=oaNbj5O$ zz!)iXJh@#)5`4>Wlh9U$=+lXWh3APyqm%qr#fL*O+#pR@{hK6_mV|C%QN8q_m(&qj zp%OcMUi>bfTgpz`VF>bQ0IsZfhx#6EQ~0s>6>B>5fK)}#Gz6meRugrD*bS9EV?I?| zjcd^QG>SA_fMc^FO>JH`);d_sdK3ok$#FoZ)CgP7B;vfHDrI@QKHMc|gwmj3cDO$( z7}9tgvXYJm@Ap3Qk-$30o#dUFBw*>?d3N7+3&GKtH|$|pvT2?;{V8eZ(1`{jKxVv= zNIK~JsfTfEege4zOCjFEba~|+io&($i_&WB5LD3M*6W~)y67rd-@KpQz~LUh27rJ+ z>*z84@Lgeg`i*43gRcIhgWuu@($PJit4GLFF`Di2%E=fZ&myraV!6}OJSn+r5QgH- zuEyX(jI?43TR~mLoDI}@g}eurpJbBAu8jyE__fQ$94dL)nBDN`gJwl?q&jbXN+~EjSzfQEA94FB{jP3SP!t zizJvz%MsSL!f$qRw~#<3rFcWDjI!lU&%&zWNGukI8}^(R>3KCDN7X-R>9 zg!25ah^~wv>nU_CVpKtHqvspwULLuGWMQ}Px0%9eXC<|LF18(3D8iqkE|`6a9%nVJ z_~EeK5d>eASI~AR-aK)tS};V3aJsfcb{_@z;;X{QheK+yRG(y8%VmbRJkEg2eotGc z`;Ia0vr3B#V2iSmu63)wBuUy;h4wq!bnfoCJKf31nw=Ls!@8j>c1{=T2J8^hgT6km zz$OLUNw8qITLv9e`^=L8jkUb4b4q2{UWc?3+uKELFY!UQ%aeXX;iwO+tlmh1AjO%u zs;uC2`nF=}kg`^{5dYy0nB*k}GJaP184xfl_j}Ipu5by} z`i7?R$_)s;t`q%F6)NhDBf9yw>D=EGLPbW7=}?#oxdUU+uF>@fLQ&R)*j8Jp$XmYP zD$?STWo>W$7GbYJT~9O8Ellr({Ki>0dZd%01<{A|yxS009&7NPBwQIF>@dKU$FD%D zw1Lp+^BFpwN9eREq=X7$U9lp}o?`p8lmN(#$@n0*GSzlNQrLbNU>E5y{zFl^V1-_; zqie=_>7K4Ypk>PEE)K1}57C|inHQahc7vw6M4zWX^eroN`aH8mNxyR&pypX0h(0^4 zgp*F`Fe;PVV_cc6!^2onx}VL>J)R?Yv*j>hfGO+AI9t9TUNq4Fe-lJ3Y8U-UzYsw7 zizYF*UhKU`?7Q2z5+w+cH3pSmbhYRoW)yQbFX|?4XlHCrBcKB9a^^UQ)W22qBg?vK zzj5Uokg759L!du{fLm-+QU@->zHVV+wgS=sq)i3mRuhpr)gXF{O@f27WxN5TV~IJ0 z62o};JZU?!^rZLL*0pnK8|q~v@JloNiMwz{_(!#K5~G)rx_&9bIi#^+WaZu!I1TeN z#jG9jg=4Wg(3=wUrUbnyL2vqtEmIpre}0<$8#5Aj-!!Cq;C;h1M($Kw%@Dzw8M?1d zs2E!PMOG%PNPt_5(>>i0zqV$sg54}5D}Yi}fJ41KoNpTv;Q@p$wLE%Ck@C`W8<+nW zaVdb4mBxoO7RsV{y`&Xe%(*cQcZR`D;ho`G0d|JIyVRYbcoyyqBLIt-mO(P!MeG92 zMj&%Im+DN(VyeM`!S7Y5aHhpZ>upgQN-|jMk0)7?aY^QSrJeHSlj->9Y>6~1R|S@z zLAq^w63|chr!Lk>P^bD!)(K?A&U2ZFB3!H{SMr)%8V!u)g(p;74%&R>{l|!XVMDP@ zsiaeNn@Q$&5>>Z8R9#|nTus&Wyr}L``O+$mDdpMdCc=`)+|J^cOs=5Zj8zNBv=Yye zv`2npVzNLJA!JaX%|)E{O3YjmCjedtgUPrAD7dGw-LVnzANi_$oROpuGMg!c?1~$4 z!Vh@8Hl)kYdmtk=LI3idDw8DYe5T?~V&)>BW1C!Z0WJaJYQl;D^tn|aqZZ@M2=clT zcWB0)ICT_+xjO+j0{C|)+iIgo5>|iJEQZz0bV;2f`fn_xytm6QEM=tST_JGB`$Nk! zV#B)#F9|T@w7pWCfZJ2Vy$@~N1)btnM8;*k3@L&}soA?N0j>Vw7x&rg65I#ntV0?~ z_k~d^*p9PbyAZ7Uhjt939PL?0<;IyfuyfN;6ezF2d%UZspERn#28lA9fc9nPHhI41 zb{8JP<&9C?hk|>>++RE+NyA2gXRG&klBXwODLXlZ^uE}kRE}a?Hb3J7=bY;v#Sr+x z3QWv09G3Z_bZ#4-;}K%Ovco!TZ!4u4sZ#u&Zfu?RqiM*1W|iXkmsQ%bDsMtnU~xaJ zu?1@^0#QSH`5jPS(YqwW;Qo=$EK(3@y&0u6rvTnFH^U*7WpcS4}`Z#qG)+SZ{;Vi@H2Z{=un8Q>VNG2vrrI5E@FtRu{B zScb3)l|W??iZQg))z+@CM18juigbUR`2BCEEHSxN`*_>sz1^`sWLAMR48h7) zS2c$Y#ju`hP&J(GXkJF5u?qF`No4d)BAxSzO;uZ$pG`~!W@l_XTmN^FoWf2AP=`ut z<|26U&NM**MP_@|x5j&!_}EVQUcil=*g7U|%gUJkJ7s(^j+K#vC@)m)daLrG!`q>A zKMO>A#k-tty1NreAF5C7LSCOX0#oD7ReP2bA(YjP5%$3}l&B?syQKCxSNB8=((}1U ze(nSuDeTOuJ#GVD`&ggGjGXykh+wJ+s0w+zJncs)=3z~X@bYUP%F7CXkGD(D+l6Ap zXmCylUfHP|(xMYJk0tPZJJW_x`94lDCf9V!lm>Q!{Z1-v*LM~$Hf4rvC_pnra}DP+ zQ|3>y@S6%$+vf6QWC)X#cqI$Fo`)64A5P}hNJ`{l7IrZYE0OCaYwUGu=D+qDP%A9~ zM8)KxhZ%Dn1|K;LvR|F*fCT`e!}$F`B4_Pi{X{x58Hy7>+2$|O;p9IfH2JNG?*|j4 zhRRu*6#zT5Z#iHtLArTJn!jMz7>0VdyeAR>AI=>;+kJsuLWxjlMIRtfb+Q^;;G-upM2v)b%}F|y+;gR*KE!?J2Xc1ICu3PpC!#iH~G zQ%y3=AQp`z&dsnSE3s`TQZQM1a1H0#Fm;!-_6s`{4joV{1;V2-qO^)J;sfqPUqO=Z zDHb{OacOpe$-T(7`dJzxYCe-r_f!ED;1Q19G##0n{j0i7_aQV ztk$gBE;e0ZdNA3ht7YqUmjebuNtH`cMg24rzrlOeam3x8pvtv1lH~hRv@7~4$_ai8 z(73Bjv8eRFsu z&-Zm~8=GV|&c@gz8{5vt8{4++iM6q9+qP{@Z0nuR_xC*Sf7Low_tvR9)7`hvHKS~j zEmk_p#Q_w;3B{#=$C9Qqh;=3d0y3AH?I}zyhF=2pz@uZvO#AST)rg#)TqO#E-GUZktEks3#g*}o>HTJp+s@7qb?bLaLKTX0eOF5=gWqk0lRL1gboy0HOcCPU8L|k~i zATF=e1?qsLdOga7;{M1RV#_hQTg3K1?NXLpx||N}ntx+>spP_*I~Vr*tND#@|L*=N zMk=AYImB#_we!Cc6y&k-P4qaJ1TX81uj*c@ftEfnJG`$ZbO1JCU4(Juo0N;X2hVux z)<61omqJmzM26}8wekW9-n7|y7upxmt7Q*RwW3`X@XiBGqZZ<9t$U~S*=TcLwxh$d zO58z3>36W!p;laP_e3n*ZNgPG@4F}(fc8~|)WB1fB+8c+yhiz|XTa-$!xj~;VSty+ z3>j%}@`w$M(HgVFD!+@+6?dtcPI|%PSh-=1nx9=^dLRdsvb9o=)IVj&rpp5EX|7)D z8r8-2ZbB@xl6lcpd9XF)=u`=m1eOUu(yDy`nk=C>D!&p-vLJ$C?Hsn=Lrp6;nH?Q1 zzg6i#douB)y{xm8SjW%%iz54}$}LK3bhOw{_!;RQs7`nt zteC#2Fb1cgEOvFgnK&h5ty~BLi3<{{I&m?zsmwJG}D-kuu%z!?5loiV`7UJme=Nmf-N3i3xv*04(|6@`QhFtOz2gF(U*HWiH{;u(QRAnK zWgSuhAo(v}J~}>jI>ObObJv*ugiS3!v~vC}Sg{(!coD(5){Z+gWy9}M`Fp0|@TDD> z173x!*(oaidcX`V1@R4iA-{FK_&?AVdef*)b9fMCfJlWcwx44|8MEUiHsn~!&CuDIE&TKnGRQPD7hu}2ggrY$&Uzf1@Rk}u40}z>G9cxBF(jw;PPHySrOPzj@uVmu!<%)WrjhpO<#BH}kVVE71;>8leV1Ot?Y_fejAcbrd(e)axIiukaltQt~ zFPI#CnK=$9>GxOHEG#w$pQx`$Awip|yQx1)9HsN@duR36*ruXugyfeaiO7wc?WMw= zc+a_FpO)_lNE9lE-G5sgw|yIN&M2y^n z)wc#S#}EgT?vhLWanC zxQZ_({UZD^=JFEkdNRTJ8}QCOmM4{N{ktEgrClp7)-0aYhwMXH^PR^_9MOLxY*?Q- zN;rzi(z0sQJ5vdj$!%rd$uI3-1exvwTrmp{(FKP>g@c!kx9_Mk0wr<`r}nd*ocSh| z`r?mLy^X#m*@Nk)78RYR@_h_r!PVxjCli`>wgr#8a1RXL!k{`u_h<01U3h5O!=7Qc za^*V0mqcS$>R5T>!FrfQ>#pZR7`&fCm*~}9(VT`uW!v_Z3xEFBLu+X)wwnQg*k~a2 z8;w-`aby$4DS&`W!-2;Cnr~+lP^`<*)*+Zlg^k-4o;h#&JbVU5EGs--R?Iuy8>sw+ zZAlzl_1j`NLAb-mWc&dxk|EwDTkl8$7hUL@%|zb}3jGQ|q)AuLw_nz`Jk4lU{qh<$Q1L z@}vSPbJ6+sOrPn&0ad;oQA)=SL4wq~NxuZowP&(7rez^2x)2U+T^*gA3q*$7s+{l%-%sga@lHiO| zZ9&S8Tyk=10ZE{Tc@>Rx94z2U0jzm)AOzeh9Y#{W(dhdU?XUbIg$(7#V{WVweKcYN z9IgTy%1}0r$sb9*UhgKFklYBT?(tC}@7^UI?DxA0KA(nhzP|-j=sLY&jLAx zew_n-)EXu%PUlj6OSctLoaA2HZs#s-v^8BhV6-J)?|f;JSt%Cd-}$66_DuUj&(nUw z(|&&7yQFCaX}AiF@dGYb?~jY*6T0c*Zb%)`EBnEDVd&Iy%H8CO0)PO%63t>TdO>3C z&S;5Pc^kCL6|sP?tWYh+bb&9WV8JKd99R^aP>vr+5rjH29@=rd827u?!wXUNS$kdA-$dZK#=-6VBHr`0b~Mfp|CyC` zJ=1%?OeouT8h#}_+>J15RenLj`Qh?M*4DOH!e8tS99*=*LR}um!imaEKT|q!s*zMv z!jyo2r2F1Pw$?z|2*cA9!QIM_uZLiI2oh2KI*t5(z}A-a#gkIDQ&@77D;zI_RtU+m z5`kPz{fI3pe+%yW6{pTLCuzGo^wbanNbb~2Doj(usH*k}Z)B`z$Vk9;GWG9Dddz4K zP{!aoMb8)d73@lB?*$zk`Qv)psca+uNdVkSq8Amc10l_*DBj%)j10 zAZ>7lJltCDu~L34UKOThUFD)d+ah}m*^11|+!m)GI5|kWc3{TN-JEo9gns9x@FRAR z+j(i$e6(t^kugahL;MA`-1U-F%RepXqL%N&-X0g$$Pku8a@{dvEVP*;mu;5CsdGR5 zS+4HxH%fZ)_MtM26uep;jIdP~vw*aU+xzA?#|Mm2j`eKh!|g(qWZ8PaaNv4K>0Rfl zw?EW{!U(=QwrzL`r3V%74mP&_f-&6uz{}D2x2?(u-}hjJff%V%CiQ>4^~~xR zUXLV!n>XlJG`)y)k;7U}Gu-M%W14P>YH`l#@0PQ6>ST}3hi@xKG>Qnw`Nu`Bz-W4> z0G|$mk>vgv=o(C6T%-QwGfkXKvJ=M8VrNR%h{`tq-LMZI<<&ZZSs}6J!h(jHUp~kS zH5bZ6e%->^7|)g;iP0%u6pO5fT5j7SnEQu5oDm<7dQj}J!ko5bh2{;Evl4GZ{|GBX z2zTts?pwbvKN)s3L{nqae${7>E&6Bipqh+PBFe@@LWS7pO|6kQWeJY}@;ll6?ShN~b?2G1sS-4UufSMlNpf9WEl$|6grQG%i5f86&&9qP@n=TcZ zb(&4E+e!li%IH~Fj*r(LmYh20WFsLw*e{$2F&^2a`gD@vP}t-1rLXw65Rb8=cy#mg z`Di_6K3cM2+eHQ^kn`MzC$)2-7yVCSKZa-)Wgx;COqs^YQ3l<;a|shoNgm?$!CfQj z5L}NUOs^ZsFql-VyrO=UZ)MgM#%ICDvPtl{NSMk5t7`r#*Z7Hq@&`!#=xv zd;TFHSws3r^r~p(y~-;XJDp^Hfts)x^3La8oivLEWpdGW*gSZ;u&xp7CDzFjAX4A) zd8*XE0V*n0uk7DNrUnq*F$w2$gNMPDuIC9yYa&HhP~wQ#Afho~52WlLp}sSAF#_1nKU4jo3{ zuHW@^eele9BNw0%j=8dzutt$fxxy@zp{*ctZcIQhG4P8$3R)&C94F_yW#u$%^#2mym8+iKgIV)^bRP`R_@W0)n{Iyh*Ar7mL}$K z5T&^lWWE*h??UT9h>`FWSnsT%m8T_#KQ`%%8Zr(nr$c9iZQpaMxNS`JN%GcAzkmM1 zaHX=9g_DfbL`8q((>4YGym}FOKKIlqIAlli=!n0e zsyxiQQaqRFFw~9?fCW2N zvM4jQdc&4YC=FFb)sYnQiFvQK`b&oy9CC47;ZVl*GUy(o)tB-6$4(^Bw=A;wQ2AWzTC;S97tk_zx3;b0gu<*t=f?OaSQyc7DsCj z)?tb@)dNcII=1J&&P$}TgaITXV?=9F>apW)gPhnNA}P*piZ6jKwp~;XM10FylqK!0 z>5|ph^r9>Bb^3i~4AC`LL%v-$O%tk53AGIsdH9(G*M2#aRrLiE`$kM}aETJ5m^k7S ze-BRVgSr*!oRnqYjaPN!LbcGMncbvC)$z}$-Nx~xm0fZhddrHIqo0z-x>*w3Jai)u zRrIgHj>mVZ9K9;>HxAubXRpOQ0 z#2AE{Eb|42?Gx0$9NZ5JD&A$z2GdJgzbWd_?lfejU0rX(RA)}BJ9=+eEfSq1(D4s* z!+VBxN7l89idt5(l7s@zx*TdQ)N-p1!Fma3r)W|mI5jZ-2iH@cJnvujFC+sV3~=V3 z-ve_3dn31x&^v(PN>%4#jql80M>~gjEh9q7Z+Zj*vC0lRDq5j`sta^tev^lenVgr& zT%h38iR#MvA#vq-&^u2;mN%-r7?WS*9YU4yFP#4H%CPK$8!7E1{(P8N{f}%7RHoq=-T2KuR)8|V98B#+*ctDB~ zOQ6HR`;M+_Znl2&U0V|mu&(Pe*bYZ@@?D=ZeG|ZUK0B~fv#{``!-6V@5e3q|J55pW zd$=@pUy;Uo#6T?%tch?%cS2~PIM$D0w zF}%+)qZ|{(c=Jm^xx#IoXx8?6&}r7@!G4u|yh z0X>Xx>k-w{XV3nYLri7#=6CL4dhwJ5!Jc->v;5{QQCQ_T3z~p)J`xg%kYR$guCe1| z=*-JuOh09M({nQKoKx>dmxPEq$I{%aC{fBOPUy~q!@904lIuQbnPP05-g!)w;U8|_ z8y%FB#rJY*Zjf^{80?{p16`t@zWKvR-SfJRM4LOe58=&?K9Kr>chka)Pl&n(i6@ak z`52(-&>(bS?pdN~?RQ*69i$0aMe~kK*}YPdY8lo~awr;G%AD6=2tttSPiY6#c#v9U zlxEumTKAiNX^gL>mejGUu5UNL+^!#{_!i7p_m%$Tw>0%kI88$Ug!4+*s?`(rQi;&? z56Q5#gjR0s<5TfUX+7$Mn4bAf(4|aL>rlN2Yw1^elj;by1^EgOw$$LGtm2*r-m;YpV*TqPvh?1#0r(y{GcQmH+c9oDVz_%;g?|uQ zP?YW?2Pnox%_TvKl+o@I7YSG+g<6UT11ZO1ud^J3bMRV7>*K59L)0S#Hh}Si(YRs6 z44c5VycNN9;sx*n`44CR9%v$88nUh@?US;D{SawR`3<^l&a@!@wxGVOmY}md#BEtx zQ`h--L+}DlmP7r>1?4{v%Ik^PR`lgpxC{nYTactITuK0ySRzSChv%s}G}gt{0y6d~ zAvl}T9bHG)>%ymE`Ub0LTKs?Ow52M!BDRWo>l&m_2r@e? z3Nqwhzm-pPqX)7vz%gu^g9h{`H0@Id;1|fIq!)=xFJXD}3iybrKDTbLy5*@qW$8XQ zIB&A`Z&3f{4RwFpFL&JpYm{h(`$@x}igah!K4$hg&cv0k-w{`ZcPVG`7X_~38%`6v zq>@AtoKL8$2Z?^dNI&RBE#06Z6-K{J^GkEx#`l`foXx3_0lgUu1(010T-@U2~R42{#_}y6-w8htq9M3`RA8&}iR&A#dp5wjAz`BRIUQ|0NOR%Ob+Psgo z9@twO7GG4Eg9(46?+MQ&_5Q!pxgU&H}F#$XJaTzg)sVk{Y!i!;Xi$GsZ^``z;V_n)PG^+sfc zm6x_hFRqOO7o{;f2xzFKdiWaUEy-ldjpMkzAnjYcJ}ynB19(V#B8)P^it_d#l_Sxt zli@kqM8ncg|8VOUaiHo6xuw#jeLjh^z8!17Ni8sS#nseQx*y-+D@g{PXDg#jJwBvv zC%o0&JDJjYRC+#@m|fU`jW?=e^Q8dARCML@TF%Cob6&7Mp;8;|5^K+LFyf0L_O zlmsR55E5E$dV$nm4;BAIh7eD1&Bqyq_?Ye=Z!~QX8|hdPXyRs?p?!@*IoA%#cFA7X z4n-p4-%qD!XWe0Lk!g|Od+ifVa8Kq$c_vZl4_6C<>+!)3j<^6jSN6n3Kl1TKZ*Hv8Oon_4*3tQZA537wi;Oi|fF4x( zTx!1$U1VLo>$*&zkIcy3*~~2Sgn!p`TTOfa)-FlsPH(VzPf?fr?!D#S@>`l8h6hoP4I))%UeG87epvC!R4T1Nwrx-q;d&4x& zq^gcVRkwP!mw~mC@44g`JD5+}%IJz8&K4%%;OvEX(?VG2p6iL?XKrkT+7oAyYctqz zCHLUi4b%%-i1lx=K1=PTER`Y{X4=ELr)#KY9bR3%L@2p1Z>h2mO+f_;)}#fJ>Rr^Y z0NB=JQ6~fYbx)*e(4TQWSJ^PulFKio<2lq|i59x)gr{_xMZK)D@&+}@OlRavF8x+T zGx;&dtJ5GcZ;BT6j`-J}Ur0^HncdGe1BnC@-7%H2vSS&3mU8F> zMk1@qFHOo+z{6eDY#qozTsbXbMdQS;^hK8ck(IT%6Jyv(a=`r~SV~22a0~q`J3C!A z2fv0?%O8EUfXS!puE&1y_mx-WUBcS9jn+T2oU!E}6JIaNe&YJURGp1PUSJda2@b_W zQF&?GGSZ;!B`x|NgY~*c>eA}JvSB1^^u7IR6DTqDfG4zTPQ-CYD7*Dvwdf5&aX~rJ zT)Lt&UY#oLz#1ZPFuytd;)YN#r+VQvlS;na0!rP<*KcLcjZrWhITwyZQ=5^2gm&VE~9ExATB*f+_62@X>%7fEnWg-bnwlYmkt>2khN%&lkPKmu@gq-BUu_Ga!bX+8)pdX=zWu z{x*MtZ*(us8w|BB+?bt;wEjZ_JuMjc&3z}B=$(!ez0$wbl2Z}4HpEnh6B__+nJTH{ z`@B2x50xyrM)`GPm(X_X>9FwdO)PD=zoQ<{Y-{rXd9q#RSt+URiavA4<@oJxjx$=M zz;lf9Iiu0H=n9^NILdNXE}y1(R;QZZ9hX?0aoSnxwRo#9eEC2CLYc>9B^N4#&-+1Z z!3uR}iRNy*kD?XFL@ze{j=Hp05JFb90#a^Canr2MfTC6qsnC}D34#Fx-Dv%BibA3E zD9-23*Yj=!47>b<$XuvFAl@7ed>q&9e>xrc?Wbl+$&IZtuKn}IuL1{)z|d&$5Q)>c zA-d4v{dE&;g&=Qga9DmXVaQGCN*WaKYiBW0i*jp_0?|$GFVsGj z(yLWs*>!>_1=rud!eK<(T3e$|LM8FRvGbfGqwOlald#SjCXjNR!E2D;&yiAP_`HuO ziAqtH9hIt`70#2g*fUgL?;RJDJ{NwU%OJjrNxkTk0cN2j&Muw?#U2#WUOn?Ha{GL{ z+QX)=b*q(VW`7sL`F6KP*v_d@)yaCWrh#n(?94HbI_ML7MB#N*Fp>7)FA*tHMOEC{ z5_dGE6iO*u;;E>SC|Xi5CQv9^{4iP;pdj4>Xoca+sCyf8p=c6!gnsoJ^9T;x|LV0R zWD-nK2*|$bfeK}D50G{XAj*Z}R}6?@U4(k0>E*Tv=;GDf^C>e2{mQ1^OBF^S6B@yXk1QwzdS;NeKsBxC%oqU4OB0uo5+6+rO{!>cL;V5;3h z9bZGy4=+G%cZS1jR{z1AD1ef+2+-l_bEf=mV7go- zd72b39`4UK0K=P;R)9JjrwNU}eI#sU`{jrafx4+`P(5wvl z__U?4dSIf3Kb}RIoVG}TaS${$H`ssJUP5{4<6Zf^qeLgJJd6Q~c<$kBiQ4#{WN&YRube1>`6T5>Tp zNMJ>bSmi3b0Nr^9xz#bDoQRdopwHFJ90|y6%Y{1iolHx|ALk}^@X~l6&Ek|5|2#sg zQ8}b&_2il@O0(#^OJM!;=QrS5s(5=MZ3t0~Wy{iLjOdW7*AV0EOqMlfkv2kn`TLUX zuSR5+pqFVke!D6Um)^_bS?Dl(qS+^7?!zlyFbVx?C#pU17mc?}KZ4@lZyd*jp%?Tn zQr@q-!4K~noyMlq4XQr$4aaXJuwx3fHj)F?#Hot0I4VWL_(d@6fVyVFNx#SlQJPX+ zO2x&zA0^_NHMj~FdG_98{#<4YKUm}w{8I*FZhn7{(KAmt9M19Xbv(QvlcT9QyZ;@? zM}>?4m2kMg3M7xY=8{NgR1p(}qlGL{7we=7ao@atrQul_5-}YMT#>Hx!smfPHJckh z?!kSLv6HKY?ekAAT0>v7POxl`-+5Z4?uXCsgnohJmQdVAOfmGND!`SH<1ss*(6S&* z6Z**dA)hO=L79_;+R;aDw=O1_^B(S}M#nAKiI_{NWhe zVnJ(e$h=0NdPd^!`ch;xVg|I#B?eNM)}>U%%JaJ+eJT)T;nP(|y)KVYz=y9QW z5_b+*C;et<|L#^~4b1YJtuW7mpZZLXw$=2^)v#x-#V2kL`y|E&scTx3%Girv+PMB> zs|#gq?rKP9n}}7vnm|tM8`#W1e~-8Fsq?wrN@JkwFScP~Z1^V%y;%Rl3E{efu-p0NtgsLm2e~ zl8JGlIe{+b450K-z(ZOH%{Q**YHA!VwyyK%v@&aZ=iMFgq|%VS6LMHHk}J#J1WFC3|IsgCX%U(pWd)jn6o|^Ed)g~)uZ50j-fZ71CZIVPkX>tCl<;fC zoQ<*kvBgn=j|nofKF)WO9;y9s*oUmX(y^Ddt9Pk5P{fL5O<{|g`rE2Rs4P~l`h0W!giej0T>iQggGKce4?o%h!XtWB+4*b8aTZ$q11b!+- z$|pR0=cdu)+yvp{xYB(F3Q84Xx7#00{}>Z+>w(lK4$O#OCm3+Hrx9r=elm&?eJpUW zcQZ6|OM9(?ax6?X01hp?VzP(~=|!jfN3yvy*-*N0IhsSAk%_W%)qAy$0rX?N0FP+l zPUsRFMDi~NZhh?qvzLOj#VWtPy#%tv5Oq(B)-L*(_yBsZ+XX5BfBD>#cJeXv9B|Ob{SnFU3YFNMZ53 zLCO;%Pp<-t9w55V2rE63ns6FWabmWEM*fJZ+%fDNd7f8bn=LrvS4^mbpFi}Zq?B?m zti#R{5P2^5?>(hVaPz#vAWrip$IJJjrBU_%(mC2{SBTZTusB8B4GS6o^e)#u1)@yFH zlf~UY(E-z&RYJca;BvR$Ul#9XT=-;0cO5FGR<>-PLBx^qEI8d7gy>ghkciDnNp~E- zbja)}Td=q0YJjRkoht zid@Vtu6MO%;j^yfVKMco?N3Rxx#~i`m5i6$DS4(Wc)D&8u^m&s)8_^*`ve+%BLL=M zZr}?ewv)qMMusHkxMp=-4%pTdO+?NOMz54ZSwv4Sq&(C~dQhF?tJ)?^9nEX)GN&SK z*X`;yd_fW`1oRa9qGwfz6HZ)O*(S6qetqvarP=XuTl4dfv8GmA-IL7En9~Yv z@KEZYF-Ehv$>fKv6zM%370?4x%9Vx#6DtG&J}^rcxdMUB+lwvyC0+tw(kD0_3V+%$B9=;wyb*PJar{prQ(nR2s$l{Q27`G10DIQ;0^tQwGkq@Dv3)8hwUrO*6w#5FRIr3 zQRg^rzy|!m6nV1%=C!IU2wrSHS6yC_K*Qh4R0{6s1cXiBa&AWGPpJG!) zzG>Is>@SQ@xrj}xcZS+ye?y^Z17mM!3kaVZlMAXp>&s0zd~uzK z>I?i<7-3iG+4N_qvjD_M8Z7En*3L?<)Fhg%y$KZc8l0E;cV!-&gNO#-^_BFJAQ@F5 zZi`v(q(%57tHuvr{%_}%91WZ7?6;j`PH9QmXPjA2rrBhMq( zq6v*x_|}o6b>i0xkJc>->sff%tAVIuPS6^Qt^W=c!lviNgR^L!j;Db$Nc^%$G`>w6 zhkh8uE32v30-X9+TheJ*8-tzmVZBv3wt&lw#dX7zhZTq@dsKw+~`V|(h)+G++ zyG$8wm%jHY7IUd_X%Q15sr)nD(pFB>p0XPXr!&-~G_B=qe13lxQH84rXu7e|$SX=qyE!WzSG&Z`?+v_e3MoH?k=Qw zn-HZ+A;qr>`o4xtg&k2w7#u3DF{GSCs1ozE5T=XD2QL#*)ccuY<80M&IFLEvrJS~g zl;Vkc7JGCWX)#Z&1ZQ)nU8C!HmtO%o%=Ih5DQ=aH1fXrYMRwIG&l}Kdee6{gI-;7NiZX0Q z7y89Dtyg+`bK<2xX=BP&W}R5j@E_7{?*&s$eZa#VlnzE~NmCRKda+u~F(ZDAf4ygL z+{Zt|V`y9+=5@o$H_FH^`b4#*M=ys}lkTwj{Ip&4u^5zpsN7v#dQ5T=ZLI$sNi_A@ zHW%%ELi!tYn>>e7M21PJK;2-FrXj#u^NFB}b^_^3swz{}h>bAo%wPlwP5egK?l4Ek z4%31?W-{{6?k=Wlp3cu7C>AcV9r`8Q^(VXY;`iVQ~yKyG=N!u*&9ekCfKSE@ntLtK;skLBbC4ezl!+ z*)^D9s-N^FOgb{)l>>V`QNt=!mQOzv|J-aX7F|-Tk|R2dm;S6YCkMZ0ttY!(yz$t~ zi4J=()9RL4cbp)X_3`(5?9OkW?2KxQ#+WpjX8SGOfL}5%2+9HlKAymCiI)?F8w z{KdKavBA&aMKkoN_&vq9TqIY+_WW|jYCp|-yAzLiVp?ax<|f!0*KF@rAFp2e4Za$+ zDV+NRv-dRyB{-{8vsydfY-_LymaGqPT~(|jrMdOjRni07-Fb!5JEmlaD|bnBFag{; z2C5l9Ix*GMlFyjfK?PsQM-Xntx(CoLYTLw=pQl#PfWS_zp@I1>?l zVrd1*rRPw7F?49xcU^YqOj!M_Yb9aE?o3C{UDlrribHzWWTl7i_Iz}|STI!kSQ+Dg zQ8|_~I*?;SgqnbcHn{qmZyxo&`2#Unb++#SHo#4i%>Q&asS@|qP zWkkp;r9``BtftP`oJt}6bwhaO9>XH9^Kqc8^|`4&-Sdmo1*zUp4>-~GyK#o7KnSA9 z)lHBRuZBzrI?@iDi}x$)_h?jRUiw6#`z#siNiMPGr-@6S2*|uYy9-G8ZIwWyns6kr zao5jZHj4XHb+muganefSvL=>p>Xd|z0^7Hy8_3=-K7mmzpr<`C$M;*_bj2#gqmqI9 zSR?!!(y(&cCxxa&!}Y2p5&oHz$=mSaS@gbd2r)dtq<6cXZA$QyF`;B3b9oMs0B>|d z>@&9=Q)Y;?E_`+3D=+j;XWr|Kyfg=M>w23-x!)*+bhl%8tvF@Mma4^nznXq@pnX1~ zQUuW~dN#BoN_q*Qo+k>%$TzrATPqyGQ+t_}F6Q7E&9!LO7k@ps;_3G-T? zYDWod&lGcl=%W>5h`ryjAJ+Zlmet-?;6{f;uQr}HpQXLz{8804MvR*s{&zudk$$ zvAP@8PGVu@ck~rW)|G3js*qcc=9W>jB2jEdZ* zH{I%TX`0xodHlv_yj;1J%L6E)&%QfPyNyn1%5I+2ciBguyc>*%pY$6fLm&Pd9QI$w zKPGGsKXER*$3D94e1AP1+kfF)MsAQyeDH0^?|m+}PHYc-IB)o-zPUPf2fYPvkW7A@ zwPyEz=xkiI0ABd*zQ`_Z_dcaJe52kLH@Xwvn(ch?o*0k2!rz7+-9V63PiHON$#3JW zzO+x;8zg-nPM1$RpVO-o+x;J4Pun}6T$dzmAupTHoVtA=L<^es}wum?@!hn*#jSG|8*5(ulr%FJGcevKX5~L7zD?E@M*fiKk&g2 znCeUUG~db}^~T}o`}@hV^^5Px_%b_gaQFvpoe%AcC-M`>7nGO210N(UU#w4_NG6}F z8zdv%98XmC+bo=Q9WRQ9&9`=GZ+F!IB>ui`63~ZIn7$-^$D))^jJsVucdZF;Pj+8d z8=%gcE!=su*M7wv{+;R%b>xqJW6e8VB|F`8DDrD}8mWF`^M600*8ND6q0^y!g1*#R zc-nq}-jd<8?pRk?lt3<1-3Xs*g#B?FJnoOwcFAD3wFWT-yWacaqXtopR*tn8Rt8=J zml1^F^z*tACi`^8T&x(I=+CeC@Apu{PyoFlDqJoK;GY~wUWgZMDEVMelk9JlJP=DK z8d@7t4xS4exR9e9DM!Qw?(}cgUOQ58rc@Y&fXPB31;QFm47{0v&q;763dvIrmE-10 zYkfw}FG;jC*-#!8-%~^B>`zpjt1SgA`Qc)@a za5837C#jiI!D1YuW$KCJv#3Azpi3N?0@su0RjER#& z0B0^<|6)dQ^i)A(Qw~jZ8zoAuKuX93nM_2~8$Brmx_&iq8%LiTWY@4Lf&$Jy2goq{ z&G2ezruoAExsWAAe`>sjBYD5=FW`6nd=vm)Wzi&WU#J-7KmTH{;5khssS^;CTZx>jCR2O2qa-u4@}GBkv{rJ9fw3i z`=2L-S5Tb(ot)@ToazS=j!-iQ79ZJ9;2V!vS}-4X8?1F3d}Iw3@kPsu7UO63nbyDn z6L}&#{Q)PoaGEpbqEvTRsk|UcE%3x(3l+bJM!jS3<<5U1@KwSNzst)!5`=UCzz;gY zr(mGW37J!xnOL~`*75NFH?_>2fj8GEzR-NoAfVy{UZl$5*F{6fS$pf=xK+SMlpG}& zj?-V@!cMO|h%&&2t6?`?kX?Ur{zpAZrHnUvRX7lFEdGz>^WC);z{LO93*zOrz7J?J z=cD9uqwsSKZTi)mf=FuCXa7eWM>JSe5Eo@iEJ{DXU~-0<5S-|!o74m4j$)pJsE{Us zaG|xOzy+dS|5te07gkls%!qvTMngQiZ)Xk=XATIKF>=Tt&cPAkL1$t(K>!=_5=!;J zxEg^_GtBT@a00~;nVO&_!Fpc9;dZ25+A17QDgSy)`65A6S#j7=nn z3P1!Qf$R_;UVwHK1Wj-oLrXQ4A^r{=1&q?1pPZW41-RT1{&R#}xMC<973U-{HI~9$ za5z}bQ-0!9CMp1MLWd+kRgVi=J}8+H^}YWY`b$_{L9c~~X!oy72*8e*vl6K8k?+Vy zDO`LhWH&JaugbL@gR6Po(&R5jGFY|wq6 zXPcwU3c-z@>4FaVZABl%Gvxp|dak3m6-yvoz8%zT>3<#nXDU2rE$VG8Og@N?X&~Hr zxoShEEp?d?(6TzRS7+d2aG=!N1)JN^V@tJ@E0?0>7;qpK@SA9*V37}D)@*7-{=x_&)=R1kI zvJ*tLK8w>tb$%cOL9knIZxjNlg(9K2-#0c3+fT6LarUS>Ld!U+#HqpKaSAK2;Oi0=M+wGnkeU-n=bhh=y&(W0`0XM+f&6O@*0n28&x z+=7*fy^@;rj7%hrZQ{!@q!qjvTcZIq+6t`xkA}B~tw8k|5I>wj|jCm=?~PL z&OjqRz~Lt}_c@J~D!mK*m?_MRxi^gCCn2RL@whj7dC2QeZN`0Ci(;!!sv%+65-PI^ zu?euuBiYmsZV-!7w+4-XSnT={sLX*Tbl)-`<(b$y{)vb^Y2bgrgrU7)9#3a> z>&<#O+Pf>-hkH&1yzqFqXFLz@dv!a%BHi83&7DL}-bLc39b->tABz3K{%(B!EaY;2 zMQ`7QdoU1Aa_rgbY`-`Bq7ysb1!*nh^Mdw>A_8&8I|1vI{jR#msK=*3e*o*|k<+2h z?an8F7>}BdxeKZD?XLYaQdr|Pg7f4{+4lmOPB&+t)GOx;Z!>$`3)dG`DQ5B5Be^Q= zNt(5q|NDAH)*#P^!lyUE-MpK-KtP?BDHDl;n`*U6#~0=GwS!JJO$Yzg&JE>_lMQi9 z%CgSG@hjB;YTEF6v){5ObTur=a?i|&yW;TMAWMcft{Xp;o43?+M@i&A5(+P$g|a(C zxN3f1`Tdv0kI*|KZDQ%GSH|fCuHlno$v5ZP<58=dn8}34A8~g^zZk32s!4JZR^Ozy zt`zT};FOo9Ic}aQ-ZQ5$cYV#XQlEKB5BDtzZurwtFHSx7uU0Mikv_WfNn}{R)cJ$< zo(|3uoAZwdva*hq>(s+im(!YWUvJ*&usgmHO6%V0Ucx&5;a`wB*zM5y#c}tT1do3A zy^Z8=^KcrfqdRZPt;6{@!0<_E&(*y!rSA6RvdE)jxU=ewSHlDMsW1E2Y2I#J!l7yH z`!@5}Cyc*3H73j7Z@N4CFJBydIJP?C`-Zb0$9oqg z`}^iugWcQszlkY2x-Hy%Ag-#DnZ3M^-(BBwy~WObN_O6)8c1w)zGi(CK-s_~6kqzj zsbY8DlyAPx{6XQhZ@W*g`9?Y!pQ&UbQ;~`tI$mCgU;Up7zL+?vyvBn^TE*>=Cb-i~ zDTxf!KN)D#S$&tl?)IRG?=G@fn%#T*_U7h(vqmh=m^&V&xNXeagbtt7M~qK69;I*A zM^3(PdYyGZi#o#IzSDbc!ann49F3XIx;?WkKSO8Dkv^!@U{BM!2;UsJJ04B{O2L`_ zr+mbqZPnH2b`&sY;MVQkD}f|SLdm3jl=q;{Jv#Nhz*|O}D@MK^oV=u202k)V(DBH5q zWXFcw*)f!WuSNGX-;82ZI}hmx*?#wqSufokC-va5X}s|pdAaV=uAY8bL$_AN6TqDl z$lc!K!}g!B)y1dj?%t60r=A(bvh@Gy%r_kVAP46At~$}baC^JK{>yixqr zcI@!U?71=e#rI_C*7oRjc;iHgVk-MB8-tV2xGTN?H;O4VKt7F=4}0fLh(5F)96*vJ z|DJtQDU)%@H+469;1(9V%$Tq|J^254`U;@9x*pKB6!+q~1&Uj7XN%Kf1&TYA;_eQ` z9SRg@u_7%}q_`})xVyvRzAP@gkMDnP-prY~Gk0=xlALpslT31Qw6}FNynR)7e*X`C zF95&)_vb3RcW*kVc@;A2j~u>|0}poP_^4+3AYL^#k7#Pt=Fi~T1$Ozv`Hvn^;biL_ zU`@di_pu5QcxfwHs`)>qmy&AS?0MSy;&8QJ*~adXR;mXeHOTOTrp>udy=h?P-Zcz#iVGwtjz=d6T%zw?8|@u=-oU zvGp;0IZ*hlCR?)AaXOc|bw$E|m@x=+x4MY0cE6n^L`o|GU7Hj`?+`~$pawPXn9O&! zw#n$&RCexHPj?>rtS_wURCYq0bwCzJmgD0#VmFgiH#_!*{?aCv$o%#77-a7KL~mks z;3`}x1{ee(K{h>Y+^MNPneU4Yt1|k+DeomUFGuz8N<|GfK`xGeZ=lab ze;xm}=eM;OD7gb1=Wh1IdGDs})(zc9n!b%xv76|fL&Mcn9xftov{@iNL69Q!ED))A zpwK$8Uyd!*-)jVsZ#xz3@aN?! z3Qgalu&kk9m2CIdU(6As`}y{(vPeLX+(Y;kHP?l^2SEPX3LaVW&{!gQeX|Z9T^2Fi z=?H(|I-QqIv+58T6JC4VTgntnJM%)nLr}^olwH=|cX%FxhJkQ}pIj%T9piwl6JhZ3 z6^hAt`RVBk2{jh&!o+K9+S<4Ka^RU}j_}n$zU)=-g z&i1%kq5_S{_WX7)7oZydJuCmSqj`G7%z-&)nS5KliExQJVbw?Bbv~>9{|xPJg)j{2 zKb|uRnFQZAhdq!O`E)*e3(#b)ixGBhuOX4-BSF&{xe9|#;G`p7Wa*Ln-Mr7v-M2(9l$;W;`8FUr{%hLKEseGDm1<~*Gb*4o_Rc$J zeA$fvJ+q4v?86UUbJ!#E)Xft#RG6rG$_@pD6UGU<^ z!xn$_O?CR3_lzv zi3!&*lWc;AahN5&|D5qs4K96`!0lQ0Mj@5!JXP0fEQc>Bf$9@2A<~a_nae0H?Qgz& z{ob`ftkTK5;+V}b)VkdlPc&H18?ZV5(z6@I=iRMGBvSAE3_Xb0+Y^i=48i=lIo_Oa z&2@aN8`>94JHnXd9}_{%Qad!dP|hY7lZNTt|!mD>bv%V$5*(?XW>q;JN%+J z0U_+;-{1%_;is0H%d0!5yUbtyIo>{~k4773oR&GDu60fYddT=C1Bm(6@OT<{xfFV< zfepCf_7Odu_t@KUTle6tZD(W^Zzp&VA59y2xFQEU8xE)I@~hY|Y|b_#0l(X(_3_Oi z%7n9XPvX=^E9ij#< z&==U9TYe!|nx>(_Yol7eT=bF;O3LBPr_KIKQaLmzl;Cb!uDyE9^vY=XImWdPej+_> zbX#|(fKNAp1_P3xqh0s~Bt^R;3f=F5e0}mh*${W#kAwJtTe&Xkkhi+$EBe}BL?gqx zBheY1!vYUa56E-D{V~gbIgm#d8@HUy$Q2ZR5|Qh zQd0x*p!*ZIm<=dLe*91bii)iN^q|{rV)n)hT>-26HW4pX{EuZ)VuskeqpbS&Ae}h( zxia$x6_O?q|Iw`@tM_ob={xbgGsYD!qi$HN(w*qR_(9Qd=6Y5aba^;7R$TY7X^R(^ z*_cK_YlZ9H=!s657bjZJ4WYU+ z(gJsqB3|A$77C8M!w3yReMIrDRK`C$uLeK#mi?{6ZtLlMg?RY|Hw)+aU2xEgt3*ly zBx)!7R{Mq*3Lt)?$tJsM0b08I> z3{2#)2Es}RbK%14d2=3(GZ{+P>!%&8z*V& zn0&CLQ@dGT;#NT{JXj7&AQuA{N2awncF(;>&vJqk!mDFHSsjc*Zp0;x@!|LDkb^e} z3HTUL2#8|q8`8!2%Ji?UHuvW3uuNMHx_PP2`25Jt6JB8Nbag8q@47ZZ;`}XO9GwAP zxp}AW?YDgAwx~NhWz!M4EX&m?VgYL|dc|Nr)qw2)# z-nkD!l>R_eI?vm!wwsS^FY;KZP3m06@?Q2YDKU2*iOvVk7hAZ^txeXnySUF@+<2{g z^?8~lOK^BX6wywzakaZ{E&`qp@!JnHGJlB3t|c6k$b z59dD9Ry@>y{3#-d)R12N?TWf@SGZQNIJr^u1y&ASu%#6-X5Ci4N5l85JCt8yxkfk} zF$;yR42PA!P(M#cCoXFBHiVsEuk^+JwXA1(kc3^=wxKqrZ%iV-->L5RpL>yt^p57k zk>;|Zdcm_NT)A$4AV$jNGv{M>;T>K>XLxEgrEaexTW>v&ZNz%n!zbP&es-Fo)n^yT zZQ{({5tfWIgl;@ovk*xBZjRjYVl&_~dcI!>;>3sO_l;2^i;S5`dcKguzeenv^*%J- zEoM(ji}~kB25nuCkK{l9H4?!ZpCZ|(nLy{&%-&s!8<-H|-ewIrg*vQ$CyP#nGb|lcVn|(un|1!pJAun!RB6<}An?Ry$O*l$%in>1jUvWCGUBTV z6&`o+p0~$m_?Oa*2HIUa=)48ZOaM)oj-lUuAcl3fK~=m_q=(DB+3NGB{jhP>UNJS7 zVF*DiQyW1ZbXIDVPaF8Pmo;?mH&X={n{h6~^`m|+O{Wo~Ftghm*qHW;ry0$(YxicR zNX_z>H>D=wIk!7j_)2fs-nPBnyWFXs4V&*{1;Asp0{d)+$lk^~8_};xi8A^CUa%^%!|V5+M&!)n8`^*|xdyMM4H z5LKr?#jZVS7}+*L^+90MLwB7V##5bfG9KTqiqj1+VWwa(d&2+N+Zyb0^zqz;=rkrK z)~Bp9>PqrmU~_m<32FDRnkH9Qkcu~-I3SVmnKn#h(H7d)%(OVaq~Xw9m|S|LgW6Kr zyMotSF)kMx(CWFc>iB0Iy}wNcCvJCQlH?=K!8Sv`l8$;dpJgvJKO=6@<#fk?RR+G6 z>Dam|1&q&To$Gr$#{7p|AdgYqC41{q$1g|JG%tdx+icR(ZrQ?>36nG`+q36jSu-D# zJ7jO4M++V^OZ$8=*>H6z|I3jxuOX+&uttjj&RpoT1c3x;jf_CgAbs=g^10`{6VgBS z0>Bl}o1)a&;2rXoed}jlH58|Sdp)Y@_7QQ}^j+D4v9bwPE z4)Tn{$>%g>MqNW-aNI4wp+Kp3d~q8u~%y? zKf+!`Wm7P73j{_pwb8S2?GpY8MD>9UbB6Gb5_Ft;4W$WeLW;J2_OdqK@$aC$jki|n zZLM+{?)A_v!>kV3!_DZ=Q}keI1O?`rQPj+3l0TDff9x?wS>$*qDprO0P~=e?7b`K| zD{SM8AhqqD;Vld*fz{0GIpp});8F3Lsi?zd9t)2sazUP}zoCn?MI%v9_!s^DQxPCZ zW4hjKTo(?Wt#aTo2utNw-y5$tD$%F!@MiIOt)TS=37Y6bZnDB0E%pw697I3D?4#@` zUGA@&_&4h|o0;VOpU>xhwMAQVDpAkz5|_gd(DMJ-{yxsj^YMLDwD%s1qu={pj))%4 ztpk|M2p?&nH34+K==Idhhg*Bt;eaK5FY87IcX z=n#l?LQ8ETv8Pf$NSeDFX8|UGS?5dYH3xbl>`YI5hx~9tXPI81Z1YqA55TAGS{OD1 z*vg91DG^ZvRO20rQf=v)_fGd$PlV0O_EpACuh-?mma}~rX7Ia(a!L$wM5-{=h9iBu zfmuNXqkN=>cJXXQTm8ZhUjqFf^QDPMtMLdg`BOvgD&td1DLdtexD@(ST3J0i(Js{Z z66sW~_hsAcXd0D?i(G8Fb$Z-r9!D+S$nc*Z@{~gEW@;~a4%2D0JWo_=U_C#{J-)yu zJyjM!Rm-Uh-X-J=xiMDHdjnyu$A)Ow^g5TKNel&g_#bY~$UT{X%#iiH(WSek{X~YP zre==?QQJS<2dG`zm5K|jPUXhEbKy}6=Q0LKS&DZVC@o2QS z)m{CLRpRW#!TE|!^@dL$+q(EQ|B*dgJ)nCS@;}`)dQRQmKig*B@T|-lr77V}vTZT} zOBZzzn?m=6Gi_Jf4?t0~A+) z`u~RtNEdilLT?QWOIN$iy|(MlX6|(a?vm6!m9hfe(CUNC>DN4f4tI-00w*By>?+7J z#X(y({eY<%Y45w<|B>ZmZr{?(qnpeO#JO?ZjIKiV&Xdf*Kx=2eYo#5{wWn-X|dHPXF}( zF*5i+M&R2Ednp+@Bi9N$oMaFYBwJMCk)>UC0;oh{H}eDXgCUQqv8|uBUANt)h^X50 za$KrOJzmr}EF_eW?F5g&5B!0;@SxHSD)!q2Vrn` z!o2!Th}Qfqj$tNLPLFDul6hAf&110V?>B6c8of2?*BpQj64(NpJ3@U`R(?6YF+zRS zRz$@%-_M`RC^JFvZ(Knd# zJk4C(NetnQcg~fD7-OK{d)x>>VzNnZa|230a#yyE2vC<``+sMGoIcbGyW(t(!j3XU zu=@escF%0E6H=*yyR zI;?k2m{JAxD;MZ{T^*$UkT}@r|5lfcNz$jcjr}$E8|jtfrp62(J6j7HyQ9_|cgovq z935#-USm15dLDk|CiZbbQ6SjjtD$@OUb7grUENu9HPzN5F8in!K$q$q$U7@BkT?q} z(|*%4VB`1XcMuus);&nYJL3n;De>lo{Fc}pw3$ylFE|V=c2SjdX-p3!KT#TiMNK)x zQ|DD{tgFE^gudGe<%rV}uH6((6MM-mtfSKt--MiFcmz=B$-72dd6zrM#;Cx86@iuj zagmc<;Jgx0)FmObtsr@Ak8X@uEusD_GA-Ss4f&!|lf`-X~ zE{%!zj3wS5>I0q(e#89A0oP2=fY$Yiuh|8Xp6@Z@7lt^oWc$Ud842| zJb_z|fGZ_s*o)GDr^^2(e_lI~e}U@%Fp79C*K=#fuTKent>3zeDa6M&MrN4LE&Vc{ zPZIlD41#6K55dTj-)oA0x0vmC0^%oAmWc#zfPEUn<PJ{~2mITz;KK^W=bBO`ze7@Z58VCMQW~cCNJnADW|NfMD zSXkT>Gi`s3+cC01$aOKEd16XXh38o_=Hwy1{SWvWy*Xaoz`=Eg?GUZE{0J@EJlyz@ z^7&nXVDqQdGe{aCnFDo9R)0F-M}fXZ*NQB+XW{QyNgY!wc4cY;}Lo(+A za&7{JNuDssby@=Yh5POk;kAg847VI#!|-jccO6>E!_$bLthDdfTtQlw)e}la7RvHS z8Hk?=5)7*VI#3Iqh@13Nmt<`@CRAEnZ`Zc28A4~DaQ3L9Z}=pMM(my_VaQ$?k%rR% zHLlj@67QXR{(iL;!n5a%)?}a#R1pZ|zKV^9sh#slE{{G#6DdRY7Fra%)^$uoD>=H(Pxrru~lFO)CP8bu8X4<3F-G;L_>2G z$h|)^oe#=i<~>|g1z}(0jEX8@ZqFvr8~m;ld1L3UhT95aA89zy$v>cGpcNf_c;e3X z{e2%(AT6d)BLw& z>U{^jK%D=epomiYX_1kn!%ECj-hb)`{AJhd5ZS=}C60alhuGI2=I!>bzV}pCF?x3C zz5%MII9ptDTxt*Yn&R_QbLIOc5$P>9IYc?zR{r0zYO8n>D^n2Bk#6>tCxezkiCrHL z@B4Eo;X)#V5Z`I8NI@IHlaZ6>ReGbeEZz6Gov}3QourV`Kka=6&rm#mWqfq*rSRMN z`J&mR0h|$OtQKt&ieUDp5V7+=@_{oxU$&Wk>+FqZjaYCdV`YRLcmurDlSPY?V%+x= z>vSe)LG~B+ABd0UBR91q%)|b7I{q`bE&{n+gX0 zlG5`;^~v5gz5~2c$F?%wrS#U?rhO$E^F_*GD9}~xTXkKljD@_Wh-gpa1U-@=K96N^^TLT#Qp<+^q-Z`#loc*?z`iV%FL0lwRG$WCr?C$3Pq~Gehnpt2YoKm{K*g_ zda-YRJHOS=e~YeAE&&J!pL;E?18O5!>2Y~ptCNS^{3Q$*js+Hsn&o{a%zaOgyb#CZ zwrE#&Px$bS76Pa{lQc`H!6vJrOS^;gV9Tode=;_d3m=zl;QortCkWaQHGcT+!a!M* z5lj>->&X2B9nf}^$VT3jAtHm-!eLfP7%Qh6G4d;?RrxF1H&X_z^KIAb*CpnHRrzu) z1F;LCMNPN?s61bWH>kr3sC-R@CA-_wj$cyuu&zjtY8_LEC(`bR(vimxW<>W5baM+w zw`9_DPU5R8%5Z&}a2D{xLwWxi?u_fD?tp9eebl-s!R}RvV-H{>R}_Rdb#Tl0``j(9 zR$~-LFO9W<-1l?x!@`%81r#OpsPMn9^={uR-qMOJ#2Qlf#yDJ3=nX)42GZ%TCW$L2 zNbzLf62fa}B!v10j;q#9UC= z83mPozC?=|!CK#|+q11Dg`?GDqqfk#rZBPn8;0kCXyfa1$*o)NHJN6U1)_{t_hYc- z*WH-oevdB;-1{)zy~Sn!i`6#~L{c87Ll_v`79~&J(d9^-e=XG?S{rGdD0FTz zA`cfoo!*bfufZ7RIeYIGZBh1P{LVVcVIhR6v&Q5-YG#JN>^n>+s6?@;S>KCjQpN6_p=p)+Za+1Hu@GUr_AE&gfj0Hgwb%>LTMbR253ADl-X zkK91v00$aEIQPkXR5#=}`iz9Ro^ycF=C(MvwwNMJc6}1quVX~#HljJaKxjMVDSjeYC|R4 zrNg!R+tR?~0GfFBMzt;aPB-ki$lEbTCQ>wuxX|w}6QN=^HAffgW3)ovR|LUjD$~D7 ze5E7XTAK0M>95#RM|>rv0(Zn$RMuM`a7er3|J+hJg<12y;Tn;n={!%G_NO}4Bayj(V=o08>C%EKbP$dpyDK->XlJMN}vn@Y`D z2s2TyqVRiBZBG8p21tE8S=n7XAbfBM@&(^jZP}Vp*0>F|C(- zE!nM_>hKYnlz@9n!59^gCt0Y+zEq$qoR-1eK~efkB(jkBaDF6*Cmy5kjm2F= z)Kb8V))g&0(p7p(ZKE6U?-sLppocI=9@DYlo6|+1r1S(iUk!EULUfOC!gSe8WP^2r z_I`}B@P;hCp*K2Ui->pw)?sM!#Ge@^s|@k=k9JB5?GcV2ZVK-t+?en_>!r>o%#%?Q zCW!oaEA+z!fOu+7ETmj2V86E@CpANzGv~u{ra_=IIhj*jZxn=Nefc03RwP7E5Sw}X z0y`;0*}e;px*yZ4Fcga-S0o zJtE^_Z)X0=do57G*j+=q{Q0kT%DM|$pKxNnM+F@k!<)%lYKjH($K32ks^;|qg1wEE zUg5vCe9bQ8WD~&HyI!%fw(uGYfm1qH_W41<`}hNIDtHar@;S|gGi??3;+bw_MRkT! zvBMrYFsO3yms5tp%=la}0}~yQhbJ#ShLmQ#JQBR>m2>K)4XelR;?z9>sSnBRQEz8t zUo#b8=r|Ku2ANmvH$=|kkVh#B^N?D(U^cn{@XsvO3fO36WWowbmWs#@skg6CTj<|B zIAeHiiWU!Yj7WW!B8bj`LkpAm!+-HUqPA&^ zj2S|4d`T`^Q0FI4yj=4!uGA*+zle_-nqTPU#j@(`aD^!2pVcPZ^hRqcnE=|-OnD7fnxtwHd%H=SOm zF)&L{5+Qax%42$;t$XA;LkkXgBc^4f*U4J_V>GYj?pnFrh{!;#G^44A^-j3=L~ooY>%|Fzc_5^aEu5OzZRPd#Y|MRaJ9QM zhj9Iq@yMJtOwah)5-0Vl#9W`<=(+Z5(RwP&#q5Q#W08`TEFEcd2J$;s`_f@8KBw`F zrG$R{^bV?f|FUi#r~m%loT}2POa6DIekE+SP%B%%e&PNWVRgC^aQ2Pp z@qXa!2GlHelf%dNoXjA?cXJw{vB&DZ<{y5lTQ;#6`~Di--G{Zsa!I6E9l^0t3I^Dc z++)0-VRfha3lRQ`(}HS@BFLq)FTW83CvL%OqSDo<{*3K>p1q-mnk@!!t;AZz7k}Z8 zwX%{wG0C()hbIZ%4yHk|1n+lq zNq&94YN_-fnfb8+NlO>1{)T9yw8D!NbWdow3W^s>;)~My9@5~w5KPH9?$2u>K4>ei zj?qNt3UI+*!ABvv&q=rBP8-^$fTS8)ix>PabzT~TtfQ0~S4>cv0?+Zz@!#GVU@eAd zZ?u9vz?$GzFf+JJ>HS!`d0enTh*(IhAWJ=76ZQh?$bPp!8rN%aumt$!M*4?k%fCUU z3tsQ#!Hyg18>}1H;M-2D*&s51@L9-CcD|4)(3yOo{D_LoCCR`DKQB)l?tsprMw{>B;2^ySZ&rK_lt8`mIH`{4B8 z#*i8@7l8E((O@o9hR=_h=O|Vvb-{OF^Nm*wv>R zGEU&eV87E4<6!i&@ROI-FOEEAoNmmx&eW~h&I!&b{~iX;pe_W@f*V5;+Oa%drN44| z74)j%B(&pM(4ZH-FKUB*z^)r z2Hcz*N_)Wm3r95Hkj_`g*I331BIo!U7eJPJbZ;th7~UM&a|88oS7aFMRZGZr(fK6U z5?p^{h6Hn%(tgISczq0}3^s9QS*t!rniC>Ts_}h#2<#6g*8GVkor2Jx;@%#EzRj16 z0~=is9_!i2SSLJgV`%#mTVRzHsAEXmuy&|r?8K>&)7Z**q zZQ;}HNT&+WQd`By?5P5)5%I}b8kghYTS%#^$1cV4SoPfr zYhy8$U+vp~2MwO<6fKyt-cOaC+FTaw+GO6zh&O4cAFtBWVry#3wt|#YlWn(erdEX_09JZ}XB(%AaYNc{TFHfV#Yl0jx#K0eCX9|skb^XNwe9A#*?q^1df z-QxMLVFd`q@bvW21pn~2rnduE`6nI?rMM!`4Q|y|9KBO*3{xRj-f8AsL4nCjebyQqO6%{AScMw7XC0%H}QcM&d;W;Jr({)D<}NKl#9@J z9Oh@!`i9XRXU2w$vig^y#I|I)a+>Ca@=m?tVsv1pb&~{2fKutELVZVj#dQ0iLyi)k zERx}1U;CpFG1+c2zlol7E{`?yQN5LQ{dAh!!0DME8>7?Yy#~a6kD+G$_s#0}8qmPg zm<)%LWMNFrX^Vh$MRhN_$Di&LN&lQvQy-v$D{xqeJxGquLz0BmN6Hi89_ie=iCU8R zD!aF3TY$tWgMWGJ0q{3REy0dUaw;t>FBEe$h%1J&AWY9Br&YbPnr#R5esB=9} ze1y~FlCVjsNsG~fETgEMqJX+>W~PSHXdWGI$)UK2P(}yQSKRd4<7i^T4xiTg%A>nA zcgD~Mb=x;Lo-Q;GN#Y|nQ zNG3*N1nlnAt;Xk@0kSE*E0{v#ASL@ah9gZXpmf}l_k@Sc=?cBGEttHBSjn*6^Ebyv?7O>qmur?y zimOpnVwr+V&z)i__b=m=MOq0oGqJx83!I5sO_fe)N|RTLr>k9(UtBmC+m+@ZWuAfI<~x8U|hCEz8-uq1lJlXmeF-)Od? z+c{&^LWwUs)#gvF7U%AIlz`#`1^^U467)<<@DE=CB_EWm zW~tlZe4e%NW!xJcm0$MRi` ze8-?-lXg)b%xy8UwX>wvN!5WpN|alo09jlUO5^_v7e*pk@PieCGy8$6F`e5ZOhdI#90HV4!h- zZ6h05kL>sP{_ImMeX6rIrHXhRH={fGPkh@+r_;2MN*ZM^au~nz-~eAw<1cVy7bFe$R~zMMHOX06MO_ld!l3U_|InmXN4TFC^N(#*T@3m1 zl$wp&$^?Ks6fj=&?9Ea-0X{;9U9H70)0Wexv$ZPw{u*(deR{Y2vMwskGrexMnwWO` z!T#zXe&Jxf=w6vh z+M#Q8$|+~)#)_SyC~|7wOSN>eW7^t%@g1cDOQHK>y4+h<{fJ^3Mb!x0}fE#?7Qu9E!)AGB?Grp=qaZ#?e9aA&M87TtMs^>v!w zOsq8*xanOzQc-2AoM;=}tn_)%q!wD-dCDZDqr{*j|qBiq~opN3r zi`^O1u4RC6EcO*OVPa&pM{CA6z>moAN3{*c8OA8201|ZuSISSh8?tAKS1X9@#&1!9Aq|e z%`oeeqG_EPKpD>_mLg9PV4+5+QL}hM1l*%L*ulLk2KSfzoi=}dQ2bE^CwsV9}DLv^E zH|Y0^TQOvw>>X(5#<*f0`1MTG{99*$G~klimM@r~FK@^b3mFd9Vtvd>G2Kig1)a1g zj5P;v?7fN;-*+8J)%>XmJr>U+_4x0h6&)kS4qP+q!*Gg^!|fGC{xgdXEm^cWY%=qCTq}-EIX12gk4q~4 zXM!Cj zRGdm=t{%|JIP&gv19r><#^em#+Oq=rAHs~x$9B!0M;!c9ogXeVa_W|b`6~W^W;JDA zVk%`i$tX9xfdWgT`~}SJjL@J)eQD3$}qapPwq!K++#A z(RziC|4c0iUH-Z%x!%97_{T%zK%F31SZPo%xVw{0CxpT~*~lqfLW*2Pr^$^ke)IR$ zH#c9$m)eY$tdgdp^Xtr)ed9PuFpJRo?9}#4@{(^YygxNm3L4fu0Es}jas)o$(_$J$ zK#yPKRk1t%RR)!A59O+A67g!}_FJ8l>me$vv^sPir($j49rk#(2g_6Fd6Z<#6FwYC zGPw6W=u7ojC#stn$qCV;*>!qnBsA#VFTdLgFgyUEGw#-x=g2GZo5e!K*B*BrTT-H- z%Ay0F+?2un?_ajXJf<}))kbl|iFGxKpiy?tO91|aF&OW+&oL1dC6 zaRMI0RY=~hZM~@7)GJ(n*&$T#gmWz^b4ljEMh~Ro9ZikC;vqV+_{J)a0_T%i(8qdi zvohqSgn}?9YTVR+C*(%;gENudP*eeD;?wB>NnKvhli*vg~Zp zRX?y!@m8*lahGt}j4%jNSIDDy#`=Zb&oWjpU8l=DUTq&le zSRC4QuFWa9m}RLMQ9od1Q@zX{E*@dX8jZe9w*kstP&D#X;<}mePp0T6K26|dMX?;( zYJn_l$RTD3x9accUnNQ<-;TABnI$&ojDM1A!!+fxEIL{IMbVzcjY{CQ-A!t`-Y#xc zZ$-QG2(_a9$+AbD-IQmSZ0{F59xS?O1-=>8Hl9BEq+k@~v{_Vllh4KX!1Zt8eeVaE zTaj4nhxf?L+=hNrlc)d(+PQe~3MwDt3Z8CY@id0h+Y-VHxfSua+IDHDSv11ZQaV)t z+G^?40OtYS-hA600gupJl>JD-F()PHJP5xQDKmBX_Mt{0-zg<`$Sh#;X+gfE6K(Jc zzZRDyp67F@;LVYBDzEzJyMRPIzdq6QaF2wW{Rnj#!q8oMG@58~{*-#sVC{8=(9V0Pt`Kj}gGqbQ29y%Xmu6!lLEh zf|YD|V(KJ&Qu(uLH4CDbUW|2o%!d3;h?qSidWn)ww2sAQef)i;p&Vp&|;Cpc$j2S{V=WG#B}rI9or$eWC`! zEeVTCqintFXD5fa*2>_?&FrNm+p{zNRaVcS56-;OJ{$*{>P156zS)Necd&S?m2?KjntXqaA zm&mvKyh+=DZGmo#7FLF&ca7$)KdU-q7QV|br|97gN$AZ=xyTP&W%>AE;qv&0_QD;G zL*V@p_um?NlspPaiI8hU&dPnYs2krXXC7fIGt+Gzb6H&f)3w|xkv6`Y8GG@nE! zw{<%xuEiZn>`>`0`44H0sS5;Q%+we$N1hUX;wyJgkxmhf|FTd>-$5P{AkTD7aq#6Y zqcw=m9mpYl6OlZ4&dF3k^pU?74^cUy$nfxYKEbRgPoX%z3KHIoGelEbCekwAkHAC- z0(^(6F_$5x6}QLlYy*z>G?Y^{h=il(w!3cTrI?mz=o4=+={tI;F{Y}d)-~r|_l36g zi~pRCOlxCq7aQfPp!+8pe)H8L#fHP)#eTG`V`VKTufnk=rGnJg7hUiKyXHVLKJaz? zRm3Db50W0{Q($iV_L5DLrBz1~P@ZDvgv)>Wk?el?348zS#SchOm#1&P!QW{SMP(DV zUx)6DTaOK7Uvbaq)t?WisA2=*G~3;Hu6FgeVhmp$X2&}F5PRvac&gxV8xo_kuY=E3 z0U@diz9=VIab*Q}6PGM6z-fMI(Cd%y6J z&2nzwO1GCpZ=w%lXl|>i%x+7nJwR?2LU-)4UC9_3kOTLv;vI~fbZ+|fWd$<|nqm>ZEE8K5$J}r@WG{Ikle=ZOAVW6-E zFucI9kZ>&J`RcrXlKVvPq+PLnUF!R z0?}(!1UL)$;vSo#ahQKieT)iF?KT8rI*70m9BD=N`Wh5FM7z)?xTwO<0(ifVeQUg(&i&S}~dPxUVT8kTH$33N7Z}Mg>L+v~hA;M_hOWYeZ7)2OK zFV8=np}{GSlkjUQh|!{4+%SVnR(yv1%{U4CsV-^cE)9qvLT6>-PGj4#?I#})iYrdi z%rQT~DM|AYOEN^wm|iNF*SIE>*ja6A%O`tohIY;F=_{o*|EI)NVBKCFm`G7a{g@_+ zZ01B`0DM+hHfif|e|BJ(t9u_z$>wwc=FKAxjn?ryAQMD%kUpW~z3&@NrV{{qi7+Xqw60#DsP9 zykYdT*TwizIGwB<@cpXs<$Y*lZh`d!$}z6#%{?MN%SPvI4b~D>rl$zKtIW@C2i7s@ z)p8z+Nw~yQGFTJE1#uOY^DqxJ-y>RD={&)7$s0JM&L*9BEjKu~2AJ40urckR*=jM7 zo*EAL)QZKS>G-vM(Wc0?{9|sx3cbbNXVBZNPG%8=4DaV?wt|D_J4Gy-oG`x<7hjiAgIg+ijR4hwcN> zZ`SX}?^;2rJhyyJ@3~9eQu%~WfBJQsWtR~LD=zKkJZLI@kzoN~l>mXIOTXHkFJ8Wvs6R>MU4C`O-B?b`$*LciWl zSHQV_jyb+C^>ZyPI`T||!oOZm+goTNF{rY`d+UENu?v;|V0M>W&Md|KSh`Vena8vX zo8;fI6>tvJEzJekLM5o7xumXwtoP%!cyE2aTSoKQl6;FiK`e&Jb;FwNd7yHfJC>?>MVaNLhSq9=)O?#*B4WR%1*c6#Qe$QMm^ zOj@s7SI3nX_SyJLlGRuJ9{`m=YQOWdxZwBr3Z6y$JOPO^7W+EHVTMeSGq52=KAV{R ztn^z;ap8Ja^II#P^*_(#?@4@N%IL}!d9shH6a^k?oKfl2>?LeIY<-sfKa$&7R@wu9 z6sxOO`6G{QF4_%S$pud?c+dy!Y+2`d0WfvZ)u7h)tEnOT+&=ElCoH#)^7x%Gj=Q}9 zACUf>?8_KjOny%5%Y^5I59aT8sB@L~#CvJN&WDBU7Z&NfD{#>wd2Jdme?LT>t-K(3 z^M_KtAwTpyS?Y4LF5_lgJS1h@`J7*u{CyK=N=6rd=fs(kJe%HBloy6jm8#^4b-|E= zwYO4xZluM$?;UQlCkW3ZyPcr=? z>Qg3jUrtS#F#4Sg@s8IBd3KgE-n2)5Kg$l58upECxbQK5@*OP-Sxna`{DR{~aYiK0 zBINs^F?{4sZo_>@9yz2xACc!mi{SIOi{~T3@#)J<|0a?@i8lWeZC3MXiV}YAOo!S3 zAxS^X`;r&Ywg#n9k!{j+kTKLiL*;p4G1H%g#3JZdUC8vAO7M388Zv@nB;o&TtRjneKHYZwtD& zkBYZGXDdOi4L?gw|263HkWBbB+gb6C6}i5cRw`xcIU&z(XMm)k0*Nw7 zp3kR42g9}J%zNNhqpZuGU(55d^(?=hJW<|7-m25w%(5*?hCCw|{-4`-07>_+^|RY< zY*XecH$l!M6@Dd;GK$m1et&nAcLn>n&7Uyuv$D;h&%uKAp5Fu7aY&SNLTt6HhtGeJ z%;I}#Kjtx8g#Cme%-?GJF!}DXzW?O**!A~-Lg(VuEcZu9ZtLN99uNJJk59VU(x$N2Fx0`vMLGn_Mz8-3RL7e@;7Q}iHde*_uaQS;ZOZLG=fj8dMp4kTXG4}=8$8oq& z?dYA1E25mgit`_)Uy9`X7xZ>?YyN3DR<4bzh=p@hK3fxSTo^KwD6=teo|bp2kwzNK z27VWV$FA6{zxR+q1$F6EfO-qOf%8c1jCHY~tyc8sC&@I-YpTxRcRLyb?{?_@J@ZM0 z9Ke^dze9y?(zSEeEup&E3c81ZN$+d9VTh52RBG?I8WaPGoU}jj3}FP51e!Q>%9$u>LK8^_vM)Q&wB!Ku>K6b<*-7(0}e~t>CY=P z=lQ_r;ByV;;pg7p#{FA>cO;Du*AD<+$j`sT^|L|u`SWu@_lJ%$(J1I^l$TpQqCs!B z;nxcN3CQ@fmdo#bRSW&-dSdWe(umxwlG&?zj15Y$HgABkBpRNHd6S)lzCJ|RhW*6P zT(q$rO2bM`)lw84JVVQSeiQjSuw2rI^4u4(OuZQWcVhWFbo{6P{wsesZ}Rw18vf2@ zK}958-^r2k=0~A>e0(IG-tJ9N+Qk~)9;3~kDDw9g#+H9HiVQ9>zhtE1mtyOsCrUlX zqx55E6uKD`Vz;OBrRe?L9IGvrk@7p^$bUJ8{Jt3Sr$v#!Db9FJ`)zdj)xV7{za@(N zOX7^zwixnv$B@50iu}EC^m9Ch{8)2D=PUkQbiVn&i_W)gew2BzHFmo;$B@4>hWzR% z@;!0n_eGJ-yTE${uuJRqsX_!kx#G2 zm*KORFqMUmeZN518?820~K4Ev9wpUyb) zx5tpbKZgA7DDwBlkx#G3u>aSi%Re4PKQHDgG37VJklz_Yz9owNFUpH8zdMHfmt)A^ z97TS59Qg%*h++SKh%Ub<%KU0+i7me?ihM5Fj`CPOTKqX9{j}%%k@e_**G>DVf%E0> z8%5kb-k{+)>e=|VkjMHeg<8uN0O2Gl_-R#(tHAB{Fh0=cv*`OiB%w4=U`U3R8W}p|KFR`WrEZ>8? z8`z9`dyyBTr`5oAp!+M1#|=~rbOGCeJ|NwI_5fW#56}l#6n?mFpxr75 z5cX*WfCtzN^a4HX6O#;74S0afKp&89GKjv>LewFC1OWFKu%Yxv(N5@zzksjIz`_xqUt zkEkD?(?=;}NktowSM%pbPzTV{hWeMFet`30U^CFi*XLq9Z-*S9V3d)%FM=-HVXxOg zW(TexflQ#_E!Y@jb)AWE+y;8+%fn@;d-FWVyG83`7s|Ga!*tY(y!&qLxAxO_AqV>I z1>YT@0ebF3o%iG3o#;Q%3l#6g=Y609itj>yfxZX9!#V#Z{W^?Kr$XIm%l7rC6ZJ4y zS6tS;6t)3y-U}4epwF0l`ftw`fOa?9*l-DS{cZ4XL|*~R<I?!NU^`#`0_|I75V}ua0h|S0pO1Qx(-5=^`m!uX z8v)CCXm34i`eW4jH_#qKzkr6HLmtp~DL!vNo%2vP=(<*bXC>r(6Mf;|X^naiYzy{z>;4q;4(?~Ob zZvzhj9|1+58_5dX2J`~SUl?gN-~(<14ghZf^rewb0~&xI1HS>XzCst7l9l_rSpIv10MtD7*%ouHv+!^dVw#1GgOtVz-_== zz=R~w0Y3tM3A_bln^Yu}9Hx>5*Z@2Y%Z|ULYw8x(1#HMrNzj z3_Jmxm4iBgH-JeaRk{r50m^eB2RICj$%77o$N9Slqg0v;TmgIloH|;i)xgcb3qWSR zN=tyNfyaP<0;i8r=|W&9@H#MJtV&k_p8>TesdNxXEl_CK;d|l0egUBz?4Gh9Jm>H4){ATdV)%GfYreDz+1q`i7L$kE&}cV-T?AX zQE3J6E#OArEg-uH;|jC^j{+Y96HZm>o4^l&M}fZrDU-kpYytKGLr+s_DR9kc;@d%p z`HZmaBr?$uGSg7lehQ`1FiNB0h(j_clSWV$Wm67~M9h~*qi8hc(-;~{Cm}W)hj3>+ z719KnNT*N{ol2AFG@4AO(-is!Vwe&trKwa#XHYqvNz>>onvR(@gU+UNXeL>xf+}ei z&89gtm*&xYs-gw7kQUKmT0-YiH7%tYs--$wM$73us;3pSlDtkgg@YNRHz zlY^RRHMNkFE~GWoika&oH+kqH@=`nbsDswhI_jkLbTNI4E}=_l1AQCu)n)V@`d`{e zm(vyWUD`xf()Z{px|*(`@6!)xGhItt=!dkGeni*76KtdF=?1!yZlas%7P=L&*=-1k zZl^owPP&VJLObbh`YHX4cG1u29=exy(|xpu?x(%9j~<{0X+IsHhv;F18;{U0=$F(@ zkJ4lCE|1d_^dvn+Pt!B>EImhu=rBD`zos5~fsW8`=tX*oeoMcjm+2LHm3~jJ(d+aF zdV_lDO?r#oMmY2?{gM7eN9jF!AD-$1`j9@NzaVb>D}79Vqhs_5eM*0)ni$uPuVHVic+8&V9ZhGB*@!*D~oA;XYq7-7gVWE*k}BMrHR zJi{o%XhXhXjA5+dBtwB=oZ)1{taF#ls;T3j9<;Z%(#)B~GYhS?9;fR(o6~10EH1X# z9X^}0wcRqy?P}`qdL6E&^%l$0ITaO_>hg;E`jwWIkz5s)1lekvS{(L{R)@FR-RcBI zBz47{%H_o+mPE=U;plK! z8*S|l>)NvDoK;$dmKeNFpTleOxxEPpVP(Xwyrk3;sKf3QJz8t?I&F=uj)eNeKMWaf z;O0~;*QxwfEpfYQ9B7Nz=JHjxx|`NGU8@%%>%YQa&g$w#i80kUY<7qDJTEIPQI0{i z&d)YL^2B@6;qp1#99DPZg^s3pYM*ZzAcL&HKMDF7(g2-*P-~mq9*5n^3bPDwoGKu( zq;zUjNhcr%zNKG|SqpS(pSPpQ*Wq>8>ul|7X1QBip)hCMiOq@#@db76H4ay;&*pOk z&8vTxI60zq82%WeDcnPqb%`d~wi=3#p;djj=Xu>79*gc-6XEdoORcrD(Pp>$Y~Iz5 zXe8BhDrZ~zC(|S~Y2L=#?r3eccDNda8O4*CAVa8hNyoXZmB-AUP#p=e$(rD=oK3b? zv`ZW6ScG;*v#p~Q?blUqb#yx0ebG%!Z_ogkv|1c%;|-s)&Ev(S^;-MYc7K#%@&fI% z`Fysfmc(Qyz!kF|o+<;cCni_Sdc2Ni2NOC5T2(woS(|IEtU6D1WL70cA1b>I z*5mZFI`oENkxZzL#P~w%a<6rG+dCTDF(n$YmU%e#i9RDD>r0$Bw9bIOJt7{ou5m<* z2_q{^kg=cId@Wu?E!K9AqsiF}H;{0D2P>(c_Mo_z5`?(;Fi2((0l8W!EJg=PGsK~O~N49 z``_p3lmp-6NzMV&$KG`XnOBr8(XznZL^l8>aYOTx#9e*K0G(1K)8OhFfEyCLWeFcQ zveznx+bS$vDz7Yq6QG?aMrV(AaG6*G5|I$vf`0v_#ujoCYMq;Kx9jVpF~n=8&e8vo zpr0~6TB)0qXkG|e ztya;j7Pr@tKt+6`rY>PZYqi(C)(PA0mzZ0Yh**v@5K(KZN- zqlsobQd`V0TZZ)g4UM36w)2u+=e0FOY=T!*EMSs3uKpR_$PK3CbpYpIfyOFG&b9o}=Bmpi?_4qGdXy$PE%x3|6Ds&xtH)-J~$ zJz|YnQ8BaPyt2T}7^VaPx)bcDzL^zKtT{x)aCuqo9H5a4)(`rUBXT!Y|2)(OSaH3ON;fTr;$b#2k!4ULQ<|p<3j0Icl93$E3F|adL5QQ?DXRZ=y$j6pOyjs6RSPJhvl)j2Hs$P4Zt#hmjOa*77T;n zwy)(y%yJ?JNm5dKyK}Yc#02pi(;tvqE7$uR*m?6x$LWKgcXBf~3l(B49NrjPT(RU= z&Mp>jVCYO7!N(2}=7Yg{-bzW|?DV!-o20S^bljF=Jf?2ID1f>@L z>Jg1Wqo3_)_j%pxEwIk@uBJrvImfTh*>25OCUhuK)el>&qlo|??}haK^>gwdx)^jk zp=)OzuZeB^PkAv}`VZnXdLDl<{8g@IcVsYGxy-7)O=qpEshBm#TD4^Exqh)FmS8zL zUyMqt{Ipspt6T`39kU%CUke+(t!d4?4x85=oqZM`7zLUgdrz0J3jNIqP$qKX6S^Ih zipQEezY)BtIcOc zgWG-9bxvOkKlwzg{r^>Gn|rOp>gi~28I*ebQJ7zz$L+B?o2?$Sn)ic)5Ncf#Me5)i zSQ$kW82M?`pr?s&r=eoGzuvWu!A%Q{QgMVhUV;WKO&|2Ic|0|=mWVnMon~R~tbLu$ zGpOMSeyKB#xB>W=X!3k5Jh2>|O;|2Ph&k{chdF|H5))k$>U9%p4;+OJBL3E0Ua*t_ z^VTB3Lzi6cj`r5|V#DXK_Y+$#4Y=DSj&;ESe8SapEwrg|u;)L4{eM&e{cop3c~7k70qy-Gxdz(U zf$DZ}Ck2vukS7J=v~!@Fp=eC~Z-r_)Too?6ARmM`;fFF|?t^DxN4rIzrQzFieR>Ah z`~P2?KInrwvG~_Zt`0u0ji~7BZutLebH!P8!W*{m6K`v+V>KV!TAePtqtohc9`Mq+ zP(-y-mXD`~_JA$%*B`3-V~+bOgye2r((6u=}LTR%Ox{Df4o_)3jp=GytX9V z@A!=I`W(cWzzH~JFrR?;^VJlc*6m4VnTZoS80DX~}= ztPC)EJlG9(xT3u6v_fo4^@jp;&MTA0dcmSBnxOS3a6B)S7%b##lrTWf0WV`x!ZOTS zZ|izARWpQGhn|91d!l0v%(A$LI-0-YMJ0i*76-Pzgp&AnO`9DN|{fQdAiJx&Cv2*nLjD>Ju<&V=9^^RD)S3uK40c# zGS8Q}D)YDgN6Vj;`A(T%C-W<0?vZ(e%$Lag9GMr%JWJ+ZPS^7HW&WbfACmblncpDu z@5%gPnO`9DN|{fRdAiJxpC$Do^XFy0TjtwjzCq@VGOw2T44D_oJXPk$rfK;RnRm#GVhf66*Au@^PMu^EA#J5`MvVyWd5|w-_h%ne7WVJ^-PlY z=gK@+=Jv35riOjKTA69B=N}u;H-reNv2L{c?E2>A`-Yi;XYF<3JGNDJi>KA*gM)92 zwe8`EePfb!v9HtWbUFEq!`kljbeBoEVlE@4A8m`E(uU7p6e9D6aOxU|c&MXyfQc5@E}vDc{}2-XtkzkbopZgmHphIioAH*x zG|B4e@H*IlXgnyO11o~KHYfHy4jLy*{7;I+vvdAY(eQFPtYjdrM2wifb?~Ir=4iuS zfM_x}VYRl7)h)gNJLVcwG_h&%L$~`t# zs84*ISjcm^y$)Z8*TpSv_BtFydDJHC6DvJAIz5zQXursdx>i#_(>rNhQ@aZau(pel zvNd@zb=-|&j?)mW)lN6RBhkhIXY)Fb*Xi;#6OD+t*Dmj|sCCrb+}hFJBJ|3m>~gGY zp_zmTw8h%&Y~_Y?m!RDye!XiojKS)%d3na!ysOs|O(ijpo#KpDzvXbPb$Z<{J_jFDjB@ZvT(GTjJ*+f3GjMHn zkj>*fy{-N9bxzmm;5^-{{jkvKttF?IoURR77)6W2<_W$Xe!tD;Yl(LsCgXIij!%Ih z2`1<;ipQ_o!^vv5XO0VdwAw|&>ehzb!*B)N^abCW%VVjPSE)-o9NzWf9V)HlpQJgT zz;`W(#w+0q0pd%V)#`4SPJl`bvpYPk{M!q9iIaI2VTR3N$BA_+sn(%=^^52)Nqpcs z*X^xE|63jBil1(p(@7@<)g#}?qQZ!}t5ACz`lB1mEV&=5)wV;B(=GcTs+-e!9?=hz zYFix+4_%v7>s;-!wQ5z$XEk(lQtdjYuc@WZt@rMjt_AtIom;PcsP;PDsBt}|8s|ln z?MvQX5^)4b5Avs4{nI}F#NVo*5kiWyX^l7@EQdF}NNCQzn)?)ZDci-1 zu1)iEpGS<5xT1?`Rtqme;pJltvmLFDNct|WX|bD6<+M?QrXEz}cXul6b}tVAZBexM z2}DA$uVXf-5_kn;l;CeO#nP zy1laVMLcryt6OYjuP4cu3FY{|ofLSv(8rylUcE$&d^Kw*s_r~N)-7h`d^W;^s(k)h z>u9ri_%RjHhtd-KMZ(&j57Q#11YHss50Bg3su=*y;nJX4`kZkQqNYX8Mz0N8WLbxT zOYr$5_1q4hr^7ef(b%!NR(LR!PQ@tkDQ`8FFH}nDfw{poL6iFo+-Y4-i^DYw>j=Ms zR3jETP#h1LbAd+wRreNQH*^8*v_nxjxpQ_4}etLt;7P;MPIy`gb$|k%w z(I5GaG@E5EEDWgYag47P(%Aj+(Oj_%6a6puE#~bVNp*U(pgUN)##q(vx9z#m7ur>= zy}CwM;GP)i)ef&-R;#TU)ef(4Nv~lfb=i;!> zmaxwaVV^x=pG(8qb@=>(Bh4><^;PMPH%`C)G@_1ef&UAj_k&N7zEL0xj)(T|=+sd8 zQ)X5y7n>Ka*S6k@z|5*`ShS5u9Qwd!C#KxMyIt|={J%sM{kNxL^0o3J7re9TM?PZ7 z4vrRsniG}2pWjf5tH)?R4%T14WB#gG48DGU$0&$C8vl$|TU(>uW-U3#GP7jn%%Heg zdn#g?Sty^dX+QP19G}H1tF=^H8ocYdysA!Xwsm3Hvx$&TAar}}4lmlO>(--t3}!72 zqu1Yy4SCT$@+;_k&K}&?ud%=4>^gRL<35%dtRnh(*7NnK=wG9s{}^)^V1MN#oLQ({ z^xt3knC`!-zYg*B9BZS^$Kv#+@RRWWgnri#*j@+n;7WTjD{Q8T2Vj;M^;MT^U&)U< zJ6FbSulCNfw?jJx3apAzk3i$diTy)-(dosBVgl=Y4^gFZAc4$Wf%iYfmoY5bTO_g6k6UuWp~Cp2nmZU+dj|O3kOcH^ z!b5{{*&7{fzzqB=<9C2DAf< zr=S12?PNdWJ@AGOZa$x&J_a$rPqfBC`Ky0n|4y{V_;KCW=C1~4#R<>H6PFvm{r$vW ziB}dUG8@1U#);Vm8ea{xRto!zQJ-d(n!W13&-3-V`V-ne1?i*z&3{P02j%ZhIDQ$d zKI2!M`1}&@epwgd%K!bK!orGj@vR{78yhh#GP=Bf>oHV<^F-@fxY5Vpo9A^n?CZ7f zz6{9q#E^F`cSwlSTJtZ6?@oBn`!5UQ;_We_?pO8ij-g#mPLGqLc0O2yo@*RU4*u$m zv(3{wfEKQZe8<1UGJtP(lvt{&=Tt^arvwfnN%mPI+nEfdGMVe|4t#yi}iS7={a2z5O1hAPPQ#XMh5SU>;q$M%sYGi8xR zGWh=gH$0{(i}IMJj0Zm<-F$8O`Hy{U_g}7`fB!!H1aum=9>2E#fA#xse~sXT4KRj& zDqL>Y`Zjk*`?+4{YJNYezk!W)5TV+3(fJp%w@!>bX1gm|TjO%a(ht-|E{m(Cq*m1zIs0$TY3M7Rr9xSPj!9e zzt>y!-$y+44|M*1U5)Cg;D;{FN&nTTzEl5lXNC3E==Ub8(XLbeVrw<~SFK-St*V~N ze>u3q^8ft_o8Nv8qGsoJ->!eHpjzv*W0rgcUGxlx8m}TbljaY zGKbnuSN6U1=8ykXyS_+UYWDtd^3NsJ@HzR{pYE#Wy#MLrN7a9A?bO#-W1s)2yncDy zDQ;WU`18I>h4u%HGXC6>n!aar8gkDA)vckzzxP+gj~i6g{z`shrPA`fPyJg8r!!tU z6E$!DU&gH(HB~tdRV;=oujBN~s^shTt7w1Ut|wJc-SzT^&ugb#z7<|y?Qhln{kodH ze^>BLH>a^)^xvue`RP!D`H|~S@x7AOnxE=D&wu+ztW_&U;+2J)?6lFI1I4jg-#F|}#KIJ`5~m<-ySimjU%vx(S;Ka-05QTFRM z%%hh}?88H(*~DQ?yh6s!I#S~tEYjHegT}}3hB%3>kCIY}$6cSl5;5msByYP99XX2&JB#y-p zserX%^<68mb=9@f{z6=Q9c^Qs1n|`)iSzKT=EekQPa3XnF6D#x3uzI3wEyt52dRL2 zdGKY8{dmds^zCKVFb=q0#>9Ho4U84*r#8Mp3b1AtVb2>SPQz8CGTV3T-GX*=Z3w%z zq;0gpgO9eX))FUS zdTXr*^E3`%sm5W9Ya?+IrfckNW54H&zR<4#mT4TuxVBmz(>3;Dp2h(z)i{iCH)(lH z*Vv06k+v{CA&kCR`-L4e_F%Tgx#(&q<*ma>MYbMXuW<-(=8QbboZ8cNiN*meb!&St z?iOtirfclQJdFc*MB}6Q;jPA`QBMdvwwF2b;QaPdW>E)Y>UY$=i6cA8S!(@2^Fz2# z^TSxHlb+A`3@MsDm5VzyHl5iMouy0&J9l9p(uOX0f@Ij6);GJdUhN!V$J=yWVUKRc zB$4mIxZ5d1dFxZR>v-bzJ*d;3Q5YaaQ@&A8&Tmo*`PTX#_7VFl2D3;b8Gq|n8kb;3 zFV5Ikqaq&H_ynHY$C#P)#f4X-Fh|55T$3Vw3F5CQaxc@@mWv zm!w9vkGNp4UG`1xQN|6;s5 zjd7rDBk>1~kKyDY66fF*LygJd+9Vt?RM#pl&^Um*HFgc7{lm2V`1&wew*l;)&c33} z9xT^5j28^oF~n{fd+-5`eYk44^ec$9?$)uwmUrvD@Zu3NR8m2%_DUTao;_% zZo?S3mvJMu*1AvIkB@8Y!<#dB-{6_<#t|8EFE4hwpJzAMdT{6{)|DN9yeL!RSnQlB z<-6bt(k7k@LENYLVVvvL@xgolm-WDyd-3WABu>P09;6$rB^N&OpsaZxE`Lz!3F1DD z&1jY-$<4KCxI*LPhZuv0*;mwK{rkgG&oum1V>8B>_%TvX63!kYZS&(TS;hqI+~Hx3 zlg4u0SRE6*?GYJg4_^JK#7TI@IEh_2YMhqGjpOv0f=Q3boLUb)CSxAPmmils7{WT^ zu#7Vf}Dc(_tOELXv&bLL3Cr%`VsXrUzpONz|37;f+DKiZl zzVV@4=0v(Bi0wzLz*xiF2{t9L{aZxN)kV_i3AtOy)v^<@2sUoP>3k(SLg_ zmXq?iHjH~$@NQ)5$K3_2Asb`!w{`zwipJJ4Z%f~^aNSDI$)&6-oVrTt&&7{c>3a(v z*Vq&q)2vY9B%EBR(|q|MeB zKh-^sp3VBsfD<=M`D|?TnUs&iv7br%XXA)1ynE5Mk$Bct=7zX79@r}RGMU+Xv?{~6S>Elt% zDB)d%GS-bHI?gz4m-Z1GeOpNkZqv9J6AtM+4>k<*UP1XrI58~k$;J1>dXC}u8k@ts7aZ2lfH>u_huq7B&y&K8j~^fTfwSGt5jOjgcM|GJ!mh`tlY9^U z{U_Ea`MG%EFRV-ASRC<-od3DFO=I&bd*@e)y|_Z-5{&;~>^DP9#pkF&g`E zpT;i7G5sBheYip6Ft$8H;xt^KaR?hmN$kcc8VB%4jbo!7Ga_2zT->IyInyzn&(wU} zOS11guCpA|oD|s$IOQy@6OWMW`YEmDnEPu>oQ;p1?U*#i+=qXw<3!%8W3gKusi!+m zt|R4h@aDRX8Ew}Oj;kwWtOXidk7{gfS5K~W<8YGwvo9~^X}-1TxlZI+kc8vUm3$xm zr11$HcAnJf#o&38A410^u?t(dq>KljB-yiK8m_AE7(Z<;#QpW9j5(h+pD+ClV%-Lk z@4{P1g_QSTn3TUfDq?($lu5$7HTL3sjRUy#0>>0lz8J?}=tRzzT=X||BJ1Cp+{lTX zMQ+@!am+=I`GRCW1DcB+^DfE0AC_S9CDIo+Ue(w!Tc|$~S2mWu7vizT(gth7rKA;6 z5nGe&yLWf=U8-Y?uU$s}DHFg8V`&?4ES5HLOu6kZW?wGr%DV7!soxsX*g7kY_HpfO zJQ64MAH|7HoyZxLjc3HuuL8yvd&Enb5^S0vaRRPOkbV{6psOU#z%Q*+qJRL)~={$Dh?CGX!80U1; zeTVOKlXV`#LpG+JVLYR|v3Fm2?hprx)kCS>x`CQ!HL)LQ`ukER2 zFhgUj&m&`K?bS=-6nvnU^nEnW>LqQSjcYW&1nc*f{8(($Tk=!!hu%_8av#U6?xXF; zjwup*Fg-=edvQ*Rp2K)T^R27Tyv&u)$Iq2aWR z`)1*HB>TQ}3^VR_%p$I}b|2vw4{Op|PRgRZwZ%x8_hhV};Y2<^#^7|4e|1#E7e>jw zW@2+MW4k8G+<YBv0 zPe`3XjG4sPu8fNK&?M=57Tz(%iF^i6#XEB(KNSa0Wh@xOG#vPpT$_rMpJr^SGY1bo z!x>8)#^bqi7M;L$&+5AYZhDS+r;K&R3#?u03}F44a)!iWomrf3w7(uMCY9JS`2LH` zi@g@_d&w~Y+K_?Kb0|;Wts_bHz1=$W701L;z5qjh=AC-1PtWCiV?1-QS02wiu1&#R z3pf|3vlO?z&REfgV*K|%rOsn`-Xhk`O8aLNoCBoM#AP^mv16uDJ_{Ev;oJ^JMI8So z<7Vd$YX^8{kZ(P6DQDAKVmz@_>bEX`i?O0i0XE5Jyw=!1yKu}~`LaiXctGPYUb0O0 z7p5$eakg$>F7H}9uweoFmwUzH=mMEjYx>)?jeA*BRywAPds(-WqRHQZ16Dba&&X-` z`6_wt6yvT!#(_FZvBPSaYwJ8x9{GXQb}i_0ZH;4M*D~g|4fs-!nEGeqq;=AsAl7+D z+FuVlXl$MEt`qs3n~lrf)p5YaMN-B(rbzO$@N>;C#s=&4S%aw>TaUfRc}sh&?LT0g ziPN#w$Ml7^wZ*KDwNA{}*c#T@n)r#t)-D=b+lFLLt%E+5b2trspGq04U*knMakCTo z|Gc?av{~PK@#oF@O#I9-*LF_soS@e=F^H zhrN$KZ%~nP7h}UO<=RHLhGgDlPI2=N#)Wtbw)#rSx5ab5raeXMJB;1QSQA@&?$pl$ zn5nTB7w^>fL)^1d-t)__VTqJ!gk4Ld4c+lxjWckD#(q3+mwsNr{9W=aw#I!UZA-#F z-$?lsJf!(yoKULo##pAYwZ|T5n+G@Vk^Qv=)As6K!%z3o=5?G8Wvs_0=x=B%p(JE_yXMwQN$bC#RV3D5OK2@=w*Z=b&&22d;-FN{%-|3c0o1%g8}JX zNt;^0VWucDljvKZ*p%#tR5R#H5aM|tLH}-KyV$_oZe*T(U{g1;p3OkJ?xfBHn)V?3 z-wc@5gUp`|)HNsXivbs!lj%I*Zga9M4!i~u1#8ZaKobixT|-JIm`&*_pot~fc7hiv zT?Jfag=+TF*}#+5Q2$E0h723>zHY$BAewUua7s_I>{MVg14Rx&eu8~_qnhu=5XAbx zI9Aa$tOQY>hq1Orksf`)Zy+598uTOE-WhnWAKB*wz52uWLLN47fgSWO*yjONAjyzU zupg7G4-k z{SSN!(gOKgf$tq*+~K|-fYUJYz9B$u7U^TUzhFvq!7k7iu%iU( zx|01y@EnMFHC@A>hM<~tpcQC0l&k|2SmK6i;!!CO9Y(fM3%F?*nI{L>Z#b%1i<*IT zBS@bi*lQ%JSsxg{L8HiYdtfuj1h_9j0}t}Poe6r7{onx{8Av#7e%ZgKfx=$ly3t~{m8Orz@s3<=YwUyapR#3)W;Wid^}mt zlfa4ok9+(7nz@`#7ED%LsjL1Js$|6AZ$57dDR=0#vmGemZDqz?7q|N~P&!^TgV9k8e&o2Slu_$78nXci$I8+lW?SVCMq@SyR z$KuI8KM9=6CHs~GJO@I2CQI<(0`P5UqY^kSk-V2L&@qXM9l&%D-X-Gu8_?&;w@2?riuMEbA_xIUE}mmJ{AG%AJyuYpuU{vUyF z(#U@60ER6leIOjTZ82$oH?WG*tAQ^my&Z@xp<*I1d?~8=JeT0~bXbd^&%=TGE1``r zjwZnTm1Ny;pxY|4-#mb0vcTtwu?7aOhBial9AL?6vTmh7|7^0)Cji%FlVi6YD9onD zZViguT|=hd0|u{!@q@7o0UEA@xD2`puonmebOtbT9c2etOX+pMzUxVS1TYRH74ma| zDi9^;1Wh(TdqDR9&e%ZKVLgz$5!F23AXvGPv{Mb#*@SAo^JxO~-bBp>;K5DgJURqa zZzAmwbjl%h7vL(87Pwb7@E8d93fKg8-3;RdIs@pxnas}yZrn_*+rSf4x)S&krMCg+ zY=JybmIJH>F$290*l#QNIp`QLU@O`GY+%DyD((PJY$J6Q5ZO-JBv`tg^xHC^OD?(Q zdjL7PWS&&uc@P!YR{@Rk$a-QxHi-RIx`t|y5YP$Q<-@!Hod=x01Nsj1aA4sMvTP~P zYA4hK(yf8EyCBAcjsY3FshA7o>?ZRQETwdU?cL6Fu`l_HTK>=x#U=m1Y zNaq0$QRzycjgZ<00`Ce*oB0JWjv{K!0qzu0a~4=h=_+6sDfB1UGy`stl6JNM7Z!pY z;$FatLds8oh>VQI1Z#^?#250k0e>tZ=Pbbqa&H%eh1 zm6CqW1#T@R`{59?m9@er48tP z2-a;#C#ZiI)*r}k4qORx57G&WkC1DK6j*kY)Rn-xqvX8^S{#FU4P_a?1;@zsY8$ZO z7&SM6=Eq5$U>b<#`}4pWO0NU*PQdtq9fH4r5T8SJ0R2yrb)Ep+Md`bNEtF1ha5;4b z0e%8mNwnn@icC61=3xWVPmy)b23`W;Kpujh zfC(Tf(7C{CARVAL0~zP3vjcDdUqGP0?0K@RFHi`AU5D`n+E$ak=K=fy#0}EJf$OTN zvjgyF5D!Ri0}i-A+OY?QQ#!#EN>2siluqz4rI!KCYsm2;=vzb82RM_`35sgS`$~b% z7pe6VcnQQ8>OioKN+)P>iPWuuL6jZ>Ot?hap6Q#EUv76Ld3XG-n(p#|S1PO<-D&S2J4(Mfda9@z661s-(DgEdj znB#XyJ@zh&thr0-+V@bT&plEnIGWOffqOvgVGm63M@m-%8TUz@;ABcq0low2te|VS zsvhbM{zLFUJ(-7K6QvWhXdv}qU~&U_UxHgGo#07ICwQCE2^y(M-4f_W>2bgflwJ;e zMCklF|vTdq~>f1uUb|3D#12Bk&WY z6SR6n>Q%rdN`DVDe@yBGr#&WZ5>!$;LH!nD?$I@zO6deMTF5*E<&<6pY-%BG5=5R* z?E>0Rx;rq4(g`L}dLHl)r8fXSQ98kaPf4BN8j!v)PYLotIG_`}OywciOz8waQaZsd zKao1YaUfHPIsSh?|HB^|VuBF=Ga2E1pHBP^A@&F9dRhoV{EA7qQVYrdgCWSLyTEG@ z5{S%#*RjYH(EN}|$V9mIgI6EWiJx+<=bs3BHi!{ig`|ApXvz-p-`COaYL8-x_q7AOGe%>b@r{)v z%@xt>L=6|hYZT-}U?hmTPoi?hP<8*(U-5sd4O06IVxs)MgN-?RM)I;hkWlKY(;Zly2D@~R1q}kFO zX}%PfN~NXJGO1EpEmcYDq-tr4v`yL}MGAEbO$yBm8HLP3tkA8{qtLgIT^LdrUdSor z7N!>R3bPAy3iAu`LTO=XVOgQFu)0uHSXZbnY$?lNvbc;-i%!?RB%p$DFt;nOu zw}@R7QWRdqElMro6=fIY6y+D;Mbe_uqOu}oQFW23sIEv|)Kb(|)KP@UbY&(oa~VU% zlwmSAnTO0*#+HT1!ety8SC%T{$+BfRvV0jXlgdhEWiq9#TBefK$<(qIS(~gwh7{`- zn-rTDGm4qTSg~8NN3m})yEvpcyqHtWElw@w6=xUc6z3P?#nR%^;<938adok(xUN`T z+)~_D+)<2_=$4q2n3ph0m?c<=TZu=BZwb33q$IqAQ^GAtE#Z}9m*kY>m*6GRlG2j0 z5@ktsiK?WoL|xKS(pJ(@g2;8{CUSE*L(Y_AayPk$+*i()hseX_9648>D(A_wV13S1#olq$*;N=3CorKnS=6)lQ3MTY`0f%7F1$Z%cU1UJVSI1|TkH{1jF#o2fW z9*%QxE}n|>@N7H>&&P3GikISLxDv0%Rd^k)##`_Sg!fca-a)KdUpxTu&?0^n8NtBw+2HkD@c10?YzFu-7fP$ZQ{BKPao7i`glz?C zkxGn9OkkAqAxgJN%^?bx!s^0=XsRqShxLR9BVq!5>jC{#SHggn<-mMH*jk!%lP}ys z3D!LXCa`}}2|ZxVWxzVB5@Fyw;jmX=!yW*GQ8$MejYITplc|f@(7#+cmzcBRWPNk+ zQoe~0gOwWrpO%8>GQc;h!TT`qGAZml7~n@Z>>Yf;mJ(Jz4~X!;ELZe3ruOHGx@*!;X*vwa+!C2pw z)nXW}f6wh99(}Zj@s-)nZjAA{wsAmV(mJ*KG&%M|NET;@lmXvm^G22Z`v&&CNeQ0DPc}B!z&??n?U~IQ5F;H zPrOez?e*>ZQPGURn7Q#_mBIEJ!*Ie_EUae_L!cAO)rsZIav2&5mn@bO=0sg$$^Vl2 zQSa0LpZ$Y0XF_z~o6vdcic|c$6Q5q^ z2M-M#T6y-xdC?)y*`4R7Y_(gKyE;;hZ<%g-ueWyq&&y})g){smXA?r^hd3F}Kk@eZ z1DeaZ=OZnq(1xWL^=lt_Zs=n3k%!Lr9+^`#j=xO5{I0>ym)rR^O#^es+FMq{*;V~m zV?H!D?{j63(3dYJoIaSee$c0mr}A|U%8PlYRN9Yb%sg>#N$o9&XGGmCV`ve7?~LB2=xIk1|J6ZCA@bQ@iG`2Iv8_vfcLS8 zFKwpLw5DN!*yJzQ7@9TqFK+-C5T+t;hE z&i1?+c71w<>8&ew)lEKkrZ-9mpbk$G(T(7-*t)D$~f6*5Ce6h(g z+kll#2V8r!y$Ma76T9cr zp1)$i(nx+#d-CwOhQ|_rK)vM)SBN+kj__U)w!&&Z+NdylXsd z#t`2V$6^&ZSZ1fi5C&$GAwWOEDCfE+UeUi_`7^TrB}=OlhLKC_fM4Gy{>CxG3;dh> zW82Y;F)7KMgv6NSw68$`UWa0CPME7Z=1v5GA(~4TMqK{egH$H^`*45~;Q)nj0CP^( z_Ab%H+M%Q_z9`cqz~%VMBO$%_d0Gve|0I-MRHox*spWS({cPu+_lM4}>RM;e=5}U> zPHDA!EzOMOd1H-XTGZ<0+rsZsks|3AfaK zGG}3n)0F;Q9Used*;fyG9}TNJ=Ri-9ab8`Fdo^q(zk9pSakX1iaeS0(su+(n9(==N zYe!RkC&SvAX(GP?j}1@Zri)H(8~OZw)1Xk3UI9T&!Qw=9*I`HfeyDr)%xm+C+e;2C z$+EgVYXADMmNk=BTE65vPH)Z`K3MJ?Qh9XLC#M?+=_B_aP;7Bqeti$m{`JJ5&Anaw zopq0oN)J4~+qlHScICzP@! z7P+;pq;O7aM6Y>`Ous#|vLEydyV2W!)Rc3FgGcwJw_RI2!?D)3i5q4-*=Nzg4y5s9 zF`7B+uCm*~a;qPw`9F5&8$Yr2J853#y>x8z>9dK8)rpV$HdOfRsC<6LI{5C24Xpuw zm_)X*p>@VS#fST)a~_<|%UJU4<}?4tegh^@15A5 zTYAz5kIlcXzL~w=;7d4s2I24tW}>4^%Qbu=_RA{R8{SSjIl0@(itvJS<}yma+M3Cppluj1}1EFSkQ!-JSm`wnimHCPB+$ z;vy2$BDqNpoa8vnC?Hz%@dfdbjswHy zkD~9~msz5G_0%*=hZ}XtbNlQv$nDY?xn+lE?y~D?opVk{&vLLI)p0iQTHK0Hr$)Ev z*9@;1S15cHb2qZ0kEMh={{&_o}S4PBn}3UFMd8ofB)$B(n|$lr}^2tf)@l$^c}18ys&HG41J#O z{Eun-PM62U&ATuq$<%N$`!XxD+gvQOTO=I+u#D~h?!fh@C%|tGmUxDoSb!);)}ymJ ze|t!Un*4fYz%s(VeYYFt_-jEeRv#@!&9n1ym3Kx7Ek*2@c6)s3%1za(^|RWOKAksn z43>5sg?WBcyfe!MbHdGdU4Q>sAm5T_{%3RgyGX2+NkbNYX7u&<>$~~j@y<^c-JH_c z&wyT#753u2WUwxJr7><~jMc`vidWYZsmqJ(m?y7mcTB9$<+bRV-R^k0=6cD8ZKIxd zUi)Is8}5SlpSmyEeC!NAJAJ4sW$loLWpmFx3E$iLDJuR+C$!Ia{)D-=i)tMkb}D*W z&AxJgV?2ggptH?MxTV|7H|M+PL`-}iG`X7}Cv))J)1H?zU+A^#q$`i^u%Rnw?)fDv z>h!Y0+N-_qpf|4i$Ih#H95#QOUO?KdErw9fL1}X+qP&Gilz?w;u4u2=Hdg^vdb_ zblM+0oDkfY^E@|iQ;51eXnPOe{v%|TF-1XLj%CF@TGu=0NtdIjPWTPMdi#2FQ#jkh zRu6_A&i5GR;NEaS?*WX}9I4we0}zGPOLsho(|=p@F}z(B^> zz=^|#V4hxs$2z+?4|W>j?8@qg*^<-H`d5?0Uzi>om>dzuWdueiE{utc#xsq)u}rPk zSSJ1cH;-jP?T^KKyIf3QY9?jq_nY&j|B_CiT1Gdu`NJAh%Lb--D8qvF!-eGzqcw!s zk2_&5EEaY7ziy2F@_A2b`Aq9z!xB4PdArT$thGHiR9d9u>n54yakf^qHdy+-I&|r7 z&AHSx3mSjafC`^=wT=_F4ZX6z`-P~&-g-_|pKqp*(_Lver|;7PC*n+prY(E2OMQ9m ztKsid+uq+D5dRUUm%Yu1UF?k>IeF<9Gp2t|h!wr@b?0|2R$I=cvro5{x|rE-9Lrfd zeoU43sV>RcS90&%mOS5M+N1SA{q3hOlbRlmOxx=F_T z>#9a;pQne1tUFo~pKL00S#q!OlKI0g8(b^sksjY{XQPt(jec#tY0Gkz&Z^t)E+drn zthvA0mDWZMo4J;i^U7<pDN6KX%BR?(ho`))mCqj!LRZ%gXv3-%6|hbZfOHV3><2pep*;M&6jbJpqY z7(H-6sV|nPvj7&q>0cg3=w%GlJd8m9V;@Fn7C(3$N<5Ty9^&dsEPk%!r3-QSZ(f@J zYNaZ)F+91b%i3X#)(@t6uivG|Y@44v)udLWk7TZT|7}MtX^ZREVtWS(KFq+Gy8r;u=;mm%Y_oL2Tsj+nc7R>su*SGP{sa{8P&>{^6rJhRI3Z5HoDL&OK4 z({?s}=XOoB?7dduAGL-(dw96M~>ABv^pR4>o z#y{LyvtRmh@j!i6+ntFzn~!Acy0)ZRmp<8XdBkH!@8gp8dr{>Dw9^c|t*MtkoK#-$ z4*Q||ZkqO_J^^1=sz=Zx5B}j%{12C=f67t3_LrcGn_2`jXLGKN+?F%r)|H*+=l-XQ z-q)itwJ;4C?Bx8<&0DE#y; zIL-n)c&2jBg?SCHAGf8JpO^0K*Yx(}v-wklbq7t(^zQopSm63g?crV5F1+O7F=p4j znLR$weEUNW*ZjUg{>jfhO{x_Y=~d}3hb{{pZT)gb+W5^=5QUp@kJpZA8PT^356^Et zf902&#Ylob=H0i?h^x=&qTEf>2b?_eQLB-=&uP5f$7u@3Pe&eZbUrbA*3P@Z9#39e z*)b<){pWeJ)9+V~H`v_&fqSdP_yv)Bmh_H0yIUw2@h*K??!25-aoqLB-EVC^xYccI zuM+9!W)+n0wUldlt|&Xq+ttU7ak$^ZsMa^_70V;1_cLDX;2ykCu%Jk1t?`=f$0Ht#SAQy@s&l{64x7cS_kk6UtvSA`OpOoeH)NdlNk_wWUMq zD|=I)xIAKh!lJ^AixRzJ_YdW%~>7<{prE3NBo=7!#n*dOL8Zu*)3m#))Jn z|AxpVuXn6y^`*V(NoWU#rKIA1^%$cyCw{HtETpo)&J7 zC$ENRJ3KeU*{*k<>)Bb?9$fC)Q}_6tA$M=g+EkyM?bnr`iC)Jt(czji_y_*~&SQnZ zRv((zNk@B-5goOpcSA1mwDZfWtW$O)xqa%pXPpU+wsgQVR$v*tg603_nDxyZ)z(H( zZKOGW`39_yx$)PtAPaM44GsO8;7TQ6i(rBIdhpbuu^eG_(x(g%tGPbBpr{Tk$)B-K z8oi4ynPG>@kjOCPo8l<(;NLb?WAC{!<8g$(@zGe4W#l%tlTU#F(zpi;VFyDyzG$nMSlNzU|M(atD zaYH_L||C2xI7 z^U!lyAyM7WB7#J#2Rp`A?pt_bTLLSSZU|>cJ&jjy{r8UOpB`bqeXf*A8;@Cj`(VlN zH_sP-_v}cPw{&c>mEK$9Wh;7(T>i3Y$WLdcX78M@Q~ygvLDjm1eOSi3Zw#Q0tc<5v z#$zm_8OtcwVr=~&?Vo(^Kyw&&Hq%mZtAF|y`}(aetNgS3B)*N?pI(Y()uwK zHnw2GQnPE1^AqmR>Jc9|RbervqQUfXcK#W`aM_TpB~wDpo*ryKTKC@>U!7HH(Ar`A z3tzeRtxtRX8sXVfmbGm*U1I|~JwEH$CRsQp26=s~uXclT){8p}-S(iJadxdm?9S}# z&b>OeKbf~`ZBEx?^Xa!AMLn}ik6G*0-FwhFRne}LeS;RRx_^9qW7M;8w#!&rdj=+? z9r|Ff>CKioHx!Qpx__Lx=1pSYdUManV&BT#&H_ht|HBtr!QzWGa4|G6E@W(MRa6N81M~V2 zVb%N)VRd*5009K{0RR956aWAK$}!od#%0KnetmVb4HHiOz=;ZIc_`0 z{@mOZ{24f|->{wixYtt;4Bu`D92h=(>9Up1+7*qdh74f>Alw9wZrF( zldaD?=;wjw?euf+^V#%s|MQR2&$j1>(9auw$3FMb>zT`HmLlEWM5rj`xH}$B<`xuw zQmx+W9EAN3+L9dhEzt7mBaQo^l0bKU9oaS4Sc7VY+)GF_C4C& zx%~bbHY8$2l@sjIuB3M){%`$}(77BUn*Z#yaD?}{@`z}!9FE^-y9(g`Uuw+o-Fze5 z3W(-m&SZ{=nNLAuDP;gm3b30j19=XBgSz|8I6uh9GLuqZu`GfJm*H&~_jNq?1|X9pTGQ2N7E3s;&ShE|i|o0K;e?`B@iS9-h&J>0XPhjRxQ zB`4cg`^?y24KyagF=;bxZ@zesnNkss04EFanV2~T>E+C#^imlI&IKL!P!?kTh+y7P zgk9`^#1Gzn(hVOq!~_po-BjqT3i$p+5xX4c!j_?^Ox!53s7b=OOak< z1*O+9QI&6>wWoYy<}ZQQj>GjFr{li?`48Vl`QM@P^AoFCng5xkH^TGh*z*qc`En~| z?-y|0QLPN6=&Pe5dfkT4-Rd=?X9g&x3P#Vb>URjz6U_b+ihoH7#sA378vfy%qw~jA z(&zm7(gXB6t7G-?X1_Rn%qUUy@ea1%IK~j8kDn;*6>Iamk-fvvVi5F`ewjmCr>opA z?x)O;`CiKZOfTi%80DY4xmWwCLyB(3Z<{)PMm)buG=9It_SI7;2QRAa-^*m}akf8w zlgjK^Y1&yaWdK`+jNgfj-+M|mek)3Q<(H1Xd5KZgKP9>_Gag^BhA*~v zGQ`%Z?Z;=v;M;MdhA;G!rZAR&ueQFNLGcCqg{K`GL3X|UVh@h-!>c{^XT4nUHspG$#7LRYKhA%R4m@(0&wqKtXgKwe6 z@@S-fmd(bm)YiA8DfaqODcWmEjJ-aIJt`PK$JHJ~;`#YhpJ2?)gku&d zV-D+m%(tuJ#xzF9l&Yd*x?{(CrYg~R`vxMS+8}HU1aMQ+3#cKu#-O!Yp&M$lNdGku zSp+npy*kEx3u!nREDW{Eoz^C=CDd)G8&3pae;*M*Z285(?-YW%!>6UW7 ztdoRBz_7Pt++pzJeS~ek1-fqA&1TyRtP5{d~b8rLpm2;!m{}eKWu4Y3pb7vS^2#2JWByE%eRP2&wCO_eWF55iS zV1&<+ZeOAd?n{)*1rSMg?ZI(v%D6M=xC;;Hr%t@7iSurtRDUttD4djM?x1 zR{2HqqB&8Sf0d=m>`=x&V(1~Wo?a}e{yI_>JxDc1N#&`enE#kN&okRI$1~T1)M^95 znSgLHzY9_#V^cuL`D@THtb z@VQLs&G6Oa=j&je*V*uvgRggOrxZ#Z;>v#f>n=27;Z1t8=(2U%W&TYU<6_7+xLx1} z*`0o|9AM5?zZtOK0N8J^2`d~Pew9O9*RSY; zb=n0$_e-`JvkSnzg@QW^l7n%VeV}sy7jF{dJ^;4_ZU^`}`1ha%xveI`a@SIv@U?)j z8xdxHO+Ev-?(##tAfJY7zYq=x?GQec3Qc}|#(dIpoa{lDgU(P-7iaJb#|U*!byGc| zH4ZK~*{GbV#;;bRF1P5`>D>ta)z%KJVX^VyQEO8>_RB7OxZK?%{$v9_p55 zJ)SU3p~3n@xLL(_xWXbXz`%~F6INqvg%JVVg3wC8M!Jf3z z0eL#9hg*)p#xoL?%{-(ZK$8D=!Z$Es4G8nqL@ei|2fjps?SD+e%}cm`1``3R4KJKK zH=>WqGFWZIX3g^3tTwk30#zqm=fQOzT)W}g4c8TLT>;nCa9s_bwbm?OR;SgL0Ua~4 zh~Tr?Fxj#BggH*nrb`Hhwc+RJwjL}nSsO&kR=K+;DyxVvhk)r@>z}4=N-J=stN$r> zU6a9C>l^SkERzADys9KL)xvF9bxV!UVk$Q_`C;BdXlx}#t*g_>j!v2WSxM-FPKcDG z%iowdGVTK+E@bN73g{w#gkMB_LTN!jEO!fYTpnSsY=3DC>Qw^M8DD^h3lFuy0AE7wkc0|3>yu&4BuQv3_5b?=Fq*YA)>x^2 zF>qjE!=`ha6-8slJ|#oZa4Af&XP$#6xZd!KEdH9$_&KnEl-o)h+BSeDj)G&5?K^W( zp3X>W=(246iNOeKa?PJHE-Y>6+K}ZJjshUE{fS%}U_{b9wAl$*svEk3snEvS@G`Cv zMP1hVzrbC1Zm|CJW%veP9x4+=8l{MH9J0MJ$IPL|o}ryL6owvjB&`^;@xg20dFeI5 zVH?*NxL~&DQR@`Tiqt8@V10Ob5;~f2(#8U=?t|@kMAC74660(N_O|}m3zsSdYyD)r z0xH(}aq#WYbdAu+D51!)&>gNE&L;a!XSk7{Q&7IWm^6Onr z$gf5-EL|9up**F+!GKU|p+YF2Q>kQoDix?3TX$jXBD_nC?L?do0=-2Bx08`?S?jl9 zdst#njO&S50%GifiLyO9Q9znwmlSC*<^$I@vc*WH8ANbi{Aer+bSy@oW)SELY`PG6 zD^($+5T5}>Jj!tY}=8eNa}Xn zA))J$P@adgyA(wh?{u>W58}H;s=tqo?Fhfd>6r`ezrtU0+%I$rX9L1#IL#o*J3^<~ zf^iI;pA zkZ;hqV{~svXm08r7CL=76gD=TwECLj11Vv>YYi+H6`sY-ZZ+emW`2WT1j}z7q#6&p z1N@x?{K)4isvofccpw&d4s<0x;S369R|P}87d4AjhV@Cpu?vfq0?$HuHJIz*b{~Xb zIiWM)|Y!+T?Yr^MZpsb7n+$30+=m!T=RDx6^D)xa!eGCI4EQ~S}8Yw`LDr5MAYPyFj+z&UkA08oeM#bFY2WGkE~LR_d8rt@ZyvA&T~{o~-(!_(?qALX1lS zUj{v*Zt3-F6S^gA7~=Zocp>AM+2!ja@UW)~$P|O%`RxpAG=u_})%ckQfky@fpPEM` zlkJXZ^4<*;V7`bmbn~?zet=0m}@Clj)Yr6RNr&v(tMnN zZxb$IQUw#>2+m)j7Lual@J)~+$po@mVJvu!y8fc~Z=vq&)y7b>Q)upZ4>;NM6@svG z1Q=soKMa&Yqi~wmWk>!&glUj)?CUaB8S2cDLpWWb(O}`LCK(v60qSn+)08hZ#|k*13l>((~w-v*MVv)0fV1GH~w4_@VNT<==yp^mVUZ6|59-Kp{+hE-aU4m)W ztpV2qO{XpC%JKo(_mUpdB|2Z65Qff;MbR$X#StoskB?yMbPB@g6DBG+KFe^7Adcv> zFm&t*=3`ZlM!4+Y7>>#of-BE0Aqjs=9z z1NcRX+)0W~1=kXhd4vG^bRW9yX1w(?+Tkco*wNj?t<8#C%R#u@%(gO$F}A52u67}X zj9Q(M$OqY0H5kHwM*4@kjgaq-`-&7@Al8!0#CVE~dq037reC-xXFCA;kY89?Kw!D(Ep}vPh6R7oi|2hd*c6_Y})>zlf1zeborrj;UW>v;{J2d5$hvI zs6-DqXo%DXtN*@L8Ds~?{x(L&(_F5Zymgk0+Z8D4T>as6(KHeFJ0N_pmcUY5X24X& z#&IjsB;gxb#T{_?l$|!?I{;JC_cNaKkjEheAYVr;`pm@lP|OvFgg;C z(IJXa2N~BCDC(vd6%yeDDLUK}qaS`9gVCf<8AcQ!ar_O4P&q_M!a;vXwyl^=gkcau zNR<~&5J&I!<2K^~$qQG0ah}1m$26YPV1wn8!bZj&C!&|ZmWiqo_DRJZFw6+q{uw$3 zBD^bcF$6z{sF)Gc(5*mps}S94642{g({2^h)8{Fsa)7_fhFJ280UHRmzpR^rvcLdagL$1aq5kpJw6DmuL#Dje*xue+5XZH3gb~}2Smm?8vKm}br)-!kU>>x z5H4(%uAhkbVMfYg2`6ssx8nvPk=7O)xy?kFyvc5%c?I+?UWDIa;VNd0+|q1lL<);b z;eOp9!8{6lz8vPwL(%s?MR@ag>_A)Y#g;kyneE7DI~5Z zH?DPY!2!}MA7dJ@#<4OJH+a<+H)ED$Cc-}=Q_=o`Y=!jraE$7^<)D{Y3Qq;zCyho5 zLOQ{HDqM2YZ2&i$npF(xIrLo2}0^@URA;h4=_09%U~HRBk->Rp7`h!uMI*QLL(l*NUKsr z^A!5Tl&#zmD?9nw*1uhx>7p&ewJirzGjeY z|HL(Rng#SF6MkKFK%8X>@Ut8N(Q6CvUS~iogB-D}031agWnY{H?9T%By#e68${%ua zh#!3TLnGiHd>x_cLfIb&7B+bx5~Uy-;!npF{U9KbVqGicDBz}`k)t_`*T#~VGUw-* z#e5ET@&tDdo?B25Dl`xjeQ0M2d?i+L{6{fUm}ozT^%2zI+LJks4RdVew+}0#tD^(jBD zHB$dt{kj^>ug#C;N6YBq>w$3RLO9;yv9PS){NN3=ZE1cuz+*&F0ygMz2qjM5<9xKl z)#!1NP!r_gGjnjm1D4Bqc>&(wYxG#8cE;no7&Kh|1(|Ss%$OCCnR48Th@_2r`bbcblUKm4oEsb6q?us>fo$!%! z7w@I#?(hj=ftNITD{x6|^j72c>hOTjLJt6?uVW;M7B&zuvvQ)DllCt&#vFJi#1|O3 zc_OuE@Bn8$sZ!jXJPa7sG~U;Z2(?OcM3 zEfm?(Fcy;VrI4GJZ;`48aA1Zh0TG^xbDo2Hhrh5sRQjAWcqGPzYaF%}Mj~M8>9rG3 zW2MhUOE347z5og=f6khfBn2Yq+vZ51+TuJGc!e(6R!o90^)QilfdQ%s$`B&>l?0`U zgu*mIS(PH&ufi8Rq}eA;$PbX|m!}qv0z8$V_e;;8f`K;eMoE_}Sk$bKxlTZ@ zjk$_l!Q7+;inKno%p(0B<1f*?ol21cZq;f-q7W0Qn&bcjz+4N=)S?;V1F!v zUX2C+Sl0-Gj*bhwwEkH!^{dD6h*Zpc|2i|4J}Ee2@hSqC&iD)G#+J*{Y7^u;S1XNu zCRaD7<7jV6Nc#^Rl=i|2iD}QCpwTXzp!XNxKfun>%_RySX?!&)?l~1XzzR+9d3^tP zeH|X(i@v^+D9@H$rZ3Ta&14Ww)frQvPIDB8JP&x(fKn$s+vQ}%sS+!CGrvDcCw?C0 zAENmKde4v`(*SnkupKo%5w^1!pX9nYY~$pV#`PlS0M_4mohs)I*YzbQi(#DX6jLyJ zBFaq1T1HltiB-4s$fFgHJNzx)(^lL)r`vmnA&_a6#>3R!Y_%Dc+Fenpu5)6I%@$T; zGaHhBoWGOl+@)1=x(SSv7bkJ-1fGL0SxAq+OXaB@ug)`83kfs#{7G~XPqPD}(C+H^ z>87{kRNM(kmhD;ewS$pbS+%Z>X#QGR+(J9h8*stv0)oduL{F{1kgJ0>kFmI!Z=`2D zfIGyNyk*>5@TG#cZCtk=x{%Nttg{_mZGRhf5b%y{w}U+S?LM(22_L$O^4SuVaQqi#|Qu$m?9WHtYGc%YQ{?vs`uHPPy`nDv^~gO^XFVoCSj z#l;q=K_1>oYAQ_Hy6#KsRPO1%vIisdy!&<0wKI1ltEJ7?znI9;+-%8s>~%Xsr_;-;}3# zzT?3(M(#3cffBG5sH!5Z`}}*1wGPH^ud*`evpMpc~j>JeI0P(%4%b@#_(^ zm!?InLO&P6%#TV--{$C`%KUjN1u50!;P4G&;sA)ok0aipy{eR6zhb?r@E%d_J;Lr0 zyjP&yTgmPf=to?ljVQ2FNw`5-|&r(@EA(;6aM~G!Bi4zPP(gbT-^m^1HO~D zz9PnxbR%zvZ4o&zyoCe?nhR2sRLE zvg!Qh{M_FBCcclP%%Oi*$2ne=UXAB#GOo4bxv3`o*-X?vjtifMJ~njiN(xRQ;@a+j zm{|zRX+TKX>BcR<0&q!?^aEsrNYj~QaE=ySa<4mzu(w18V>~j7vH-2pSRdbEQxwlQ z6X|380%iOim)Q9DN`|!kZ94uSB3Owy&=3FKv2g5FjThgE@~}w91ML?+l$eLO@oC)F$cI?sOnA=R^gkwpFK%d`~3 zsJujzrz}=!y>PA5=eO|x-bKcLHS)jiNKgLFD*t!C&G`2LH(iW>To%-ope#vU2|SGB zOC$9hS1TM>9qgOqZ{v5lp5J}#eek>UNR;0_IX(FOl|kkAF$3jydP06*IPjhLO*B8U zyBW)9hmxZ`IzM!_nRz5?n@uC4wn_WA`rx5Ayz-U7-&6)KKtjKIOTo)V?io~|0`*us z8`~S57n1ov%o2Ma7hPgUjL_p$py0G|{}s)jMEG3nVl2`lgzm$7|DjEu^KV7*`StK9 zKD3Xim$9$VfBZs$h#;*shxPuWO|PS63JnB~Y*S{U1T*tkiCmsG=mf0J5Pbq5ZYSUKumYzEvaqkyW``O)QO0cu2o8R-4 zm^=l~N-_O3z~J~kp!>K0VMwa z-bd3$cf0{L&UV>Yjb?$LW(T4t&o7o)uw1iTEOU5hg}PsyWs^RD+@K-6Hf1~hu35xv z-R7g*7c_rQdXE7;>$ZT)-4Nc1B=544z1D3_u_WpHDG*O;?}Xuw%c0{M7F8s)b}1&J z!CN=3eSizj1_KK1l_AxDHFWehCN9)1TkF{-MA=fh&k~^3bKeT~z49;| zypaf{v@F+3gbI3Akx9ggyhsZ8+m3A2wa)<0{a3_?=Afc^9$UJxeuD7B=n9_3TE5dM z%k(7)Jc!$z=pe3l{=>2SA7=bp>+w}(kipu3wRGYwZs}Z`CdXk#jvw|R$M9_RsOgb^ z#mX`48kXf#)_*knTTfei2wWlk^HZI`;ob$la(!(;eXSL5aY&}N9{LLOA!ngO)z`Bh z$I3Z@)}k0&yety%(#1Q0_6B`=h1&Zr@~lI7R-rtTT6@Uj??awtgH(AAeH1Itw!u`M zP#Huby$Un^lPebH|Ir>7wtOdheC#UK9xwbeR<0im*4g9P%I{;3v(JBTd)yK$Z&;OA zROPLw^5#j)_h|Bl6?y0NA@8s4W~C%6B~IQ!dUT&wpLq4|@Jg1W{;Yok5@|}*A zuTzz8rz+oTRK9BICx6%EV~xVrdb(8fF5_PYG8t>7ksrp&I6yDs(Sr$3xZTn>GrskN zy9XGx(hnm#FiwBh`Fh28tl#~e@pxr`ipPL6v3P{Cb$IMK&xtbplVdk!1l+tcWdpx+0O(~t7(kDu}vSxsr461`%FbXDw~mt z22x=sl+ek0bK>eMI?~0YnD|@&KDlgvutPIvEw;#yo(*mc#Uj43@XjL_2cPIjvkW|~hV%&$(!_Sf)DhS*<4qWoU$ujg0r6y&#)LR%1Gc~6C4BQ?8X_OIZk44hOJ z;5+_uE8&x3{XagpsLw?LNkN*vL8r{}9CoJ2@{&EC2f)%QEdjpL0YQFFfcHc1;~og= ziEt?(dTbCt26&IN(X&3jCK1v!TC3PgZDNBQEc4@xDaW4|!2_Uj(QAV*PK?Pd^mWi* zAj|^$T?ClbUMF4@&>u;_8VLb^zJ+I15`=$)O<1$3BL#P0mSRz7Q2j|DtdhV2Jc4y3 zjH1V_Py;ogc)X_o#Ebp{SwNf*a?eM(ZScSeIDq_so>&RJE8QSHfleT=CxArq+x&bg zV9G;=k?!ZyJiMi+zALnfr1R6iz4R@n>PXGMl>Ya&^>i#TSykM4#cjP*K;1F@+e?oU zlt0DQw}x(WvtNS<-8K+^55s^pUo9SIGEHJ{%fbKU;r|M_{B~)hnd4X?Y|`Ii3Sm=r z#1z81_4TW|@sjAV@LqcGfaz81rzWm6|73kQm>l32EQLgwU$^wT7SWdTUG|+JDc6X`dkfgT!6zispSNL$ROdgzqhWd-TJLh(h}8?<1Jrb}hChjx1IW|#9{s?d+%w+q$Pk)Z%jTSXZ&TL`le9H6b z?D@J@_4)ZdI)BdmtAO+Uc=YVx;8|1j?BL6-?CgMgU04rZJKNiOfV(}~Mh_&d0wfyt zndkKzJu~pQW#5SpmZBoGL4X&rX?dmY;&Lrd3t2@ud ztXDa>UbTIwtXIcq&xh!)^y?YA^(rUvc^=TqjHOqA^wz5M8X3I;q&F~AM^Bf+>(;jq zB5~`R@>_)!mG#PB5j*4n!vcG!GjjuS%c_p2)@A}eN3aimOZ9?F5>V#>T|gR=u?go_S0jB zBylgwyD<7%R-Up?A9Dl?&hL;+hiL*2={p73jJ_*S%VP+GYg zUlqHNiwwZ=^5}T)N_OlKt@}$4jv6|ENIH`WBLuUr@m9~+U1M8A;A8A2z*Dg~qm54*%x0<<# zy=xat&7mDuc20#KR^KspP)=vu1qVlu&_!AlbVLfBpi`3k{eOu268NU7?Ej=m+mr$? zERiB0RiYLPQmsl$KoXkJ7kGgpRB%BVk&2^^2#KIjD5f2nJ|oV!ppFhY>Imu#xQqoH zp{39TkOD4jDx1oy5hGSbuu8z~VFb)%1U%B7fQO?9NMHoiZBhx?yHO#aDOHY) zmlWJD1jEQscYeptvRUjCsfC=e=2+2tp7^t6ya9DeiV4AZm(UE5R2pIsmW;@cEWaG%-NhmG`1~5HK({xOOZnkqfJIY!-sW6UVi7x$|X%Dlq ztHilv%y4z?Lk71Q#mp|bw$2^{pKCMn6!!uRNj!%rih*yIA#gP;2!Vl4@nh-9N~V~@ z*g<%DG&ajS4YhL9kgFTuSs)|J?wf^5WVk7abB({fvH=~bOz_QuxiS(%ER=PU5pPcp z+QZ*y7aul2DT27rDDC|~Es04B-N^F9X8=lJm7ix+BnIG9H=yt7ra*sQoSzfuZ6~+s zPk<>HmFTa3Ig0&))p-t!4(PV6;zXnOU@u=@Pw6Q6Gv1G`${{}{HS|fO1Z4Lmpe3Mh zj3VZ(juh{=U0W6TJG;vJNYAkNT%xU~IjLn&AN4n>qr+Zd@+z#ZR>7xA!CHy{6med6 zzuBnxpJyS#vv~CO{5P4^Ing>qKn3yL=4l!My`Nws+rw$u>2}#@$I4qCH{$~1zJ>QY zfJfB|nH$N&7ZuXeq({2zG~U)Ww*|eyhLly=ID0Ms9Iu7#Vlp(o2Oj-dKKHD}krMQ= z1TIPL-7J#Kw-{XJP$a$Wd{+yQVpN!+ckR;@qLBQ0OU)~&84`IfHwnIECxYJJY1zw_ z?$u&wg>hPZjMOB#30&zWxX68wTI9&>`tT6Htz`H}Zpjb~iF9ZrkCrx2f|$U4&`@Mc z?OFtqx;G)A$l>eSgqc4|&Xmka7I)f#U`NTBD(EsmbR-;sni7UAdR{29`EJ0n{-g{N*1O(F`Qx19_eC~- zWdXh^<5v)0Q^zll;tawa(04yk+Nt2c7ap!Pmo9k-$dUCWQNz3)h=;|Anv0L zy;$WGcR9taO=Zt2S|=v2a+Uiq(wU8`te1wtt)j9T?wP7s2i~sCdWv5lSn36Cd_A#i z_zqL8TG{`^_bJc9le$^JpTi?Fph{$z?caz0fT~x>>vpv^x@{c zHE}_jG%*)DpDX_psa9Yih7~y}U~!85so_#K-JjLMGg zp-b+^n_Ojrf3e0a%djSP0^SwqzebZm?6XK$56?Bs?TU9C6~}FhC5`4z9k4JYWsbpC zvBNHI4Z!-i1FmTda4og^yCjF$ShA%mFaWPHQv|c6s))J*dO)z; zZl1=K&4qPY$OW00d%8xf2}OLKdYdBaeIL1As{9Xxd*8e?!8a{kSug<(^As0I zb-2NHiOnqTH%tr{D&CTv(2u1yl!c)70G_Ri8+(7Mn+QF0qyC;J#C?0l08IK1i5tZmNS`VzC@K z)++|VGL(7@`CB@!5*cJCf)QeJSTBb3KT&o`EOroA347ubE=x6R{=_7%@-vuPI*g$u zT4C`O_USdHAlH&nIq=ml6pNyeVPCXmQEYjtqxF5U7wdChaDChd8*yb9>zfkZ!-X!& zo{IuJ(~8oY_djHWO$0_n+5N68m+N@B25Sm1L#{aX>|7<44}wFJHcO|8Uw$7dzmkQLB6q-6>1YMNC9vIz_rZyj{$SDp`)-fR~JFS7m%`Hr?D_I>CsC`O#O!oJR#!%x@nI<%)H6KguA8yg|ry8axJjZh}X*T3Zv+3MuFDs4Rr_Yop?GiKkgVlmEb2 zdYcy~7@WBi44zJ6YOcYfU#$%!Tw4|6xPBXHR(pyQj7^u3nK3kntK8+AkQUT8rA3Y( zXx~61YvD#-tBoG-s=SWpr2AN4$uS{4=FOi*TLOC@F=`_GZA4p6IG$ntoLi)=|3W?6 zj-a!WfsxJJ4vBPhd?aT6P*K%`6PG_bT~X>RsnLuDZH91$NT*h zly=z;08r2hBGbN+t^#j8si-ONLKa>qSMCDeLC%LqFJJD`v%sX0{CS$KqM5n;O4APm zw%&URH1Fae2JgF*msWSht->N+Q=UJ?(@o4bdJp#S*6V%w#sT>~J^ieuEpBoiD9_ZW z+aR}USh|9#<3B#F*93a5KyCcK)8y0}z>i36yrhZL#=AF?Ha_KPi>V04a${CSts@zE^ z71adh}T_wwbD{NxDboLdE2H#2L zePfNPZ)|xgn>5ngZ*cxZ7~-Qp8o8TeeooJ(weUKVQsT5+OP5EGp2AlX)))pUaP$IPLdD?LO*GcS$z z=cyjyA`4FZ6q!ycuHbm2z&ygyN*j+g`dGR3AbLJkLWc)eZ`W%qb)2J(baH~d{CR-bS?WPDKlE8@Jw-9C%s~ zJsc1337O6AOM*7suKuuTS4_AY(5&Cm>;ZwukzAYike1JN^b%`52`JLIMxsRX#EbfH zv(PMzvWffkJML6h&?xh;i1T0M+SFE1UX!Ot$cz&jozXKM}ga=!U)< zn`SMCovoI)Y;?mw@v`2@E8wRaoY2`mk@ZtI%;1xklexuP{Sc4c1f69F_@FtFLMN`r zw7ywmuWxMWh*V?j6K0j4}9yXfwvLw+%Zdwu!B4nYYSGsnd(9 zc|7yF9ScLm;G4&`%e04~5L;mf!MGo~2+bC3+y|PXvYs}N!6{e4RnHGjv8pDF|78IG z+t!Z%wR_(k@BJ*eR2UOMStsEo>X-3NOJgX`hbp- z6bDUDE|X1nGCI?giEZ~=wYUuJ*iFk2<%@_`ku&)-b%!c};_@xl+fMJa4cK8HP!IR) zsaxzmrz(}w`kq-&QyNY2kn0!NLq>EsAMJQNX{4D;A5-i;P1yZNdxz8sMl^XSJ-|du zW`6(||yRmQpJZzSVDO-+UaA+fe; zex&_@AU2@O{3CW?9O5XW5C8VG!$^Y*J$<&G;;1zE)e|SI4|gV`(tTsnoT4+`Nj8k5 zGC-JsQRmEn-x-uB&o?FmerGu`aE&+$#>p69`_3FE2C)%G0c2xf32<7SX!__IV~yfp zYJ5(0|5ScooP7xp^~<)5(MJjy@H zZ`@x5&t?jF|IAlH0!hoOsz5THwk!xCt%QR>&?o>Agp7(T^>g~#{8kxHrN_b(-FP~9 z(sCapG-W3QqsO2aVE~w#`X@vRAy+EWha!u#4@;vK;GGYww6|EwCC?Q?kCO^)EBs?Ky-Hv5unJ~iGr>nkm}Oa zA+F470a^yD+Zcb(6iNQ8UK1R& zcWlyYn!aztAoU5JiII)x*5?GQp`W~1JQ5n%pU*tQ>st9Td0~HEZsgW?=7Q2-0a}p3 zjU;?7aESY$7?!ibd?nbKO!#!b5_2HhJxTXGq1TWAHy#DJkN>QX^6Qxcvwc1q`A?0o zucLSw1l~rV2Ieb>PrY#TXpPZ>vO6YaQfFy9c~1~EF6O8Q6K%TDsoNn~HqJ>xaUpP( zGP_{^1C@xaFqkc^xOLW1R63@a9kq!yAck@g{+-@+>4db1^|k%}16KO8HR}E&m|M1D zACuKkI$FT;$6}0f+?F@ikaaBb{hEB4^Y>L5Dgg4agDj{Uis z_}l*l_)1M=F4-`8848nY;_NS{0e=}*6SlvCYg1&?m9d@&w(Xk`9?vO4q-6aY#yB$% zCuqcen?Fj{_{VB}UH<)%!a=2X@kTqR?0B*Bonoti8>|y9)SSW&>l853yIxsRzGPlCf;bvD;AnLl_5>4ho(&UnMl&I2G;l!M1{wHS;0i%}qJ(y$ zbxK!lmrX0HFq4lY=s@`V(rKyl*qxT+b3enVR(+J`1#xre`A6vaJ(!Q+96|T_%(H<5 zvCr>sR#dzMAgWve!-c;mp^O1_14?GFQ-wXt!mD@-Ow0h!xth>$LEJ3e@&aO%U%+R6 zIWU9Ffv0Z$ zgz_^lVB^W!t_jic$sO7I?U>eBjZl93x40wJ3cg#?;Xe=m3*i40xbwGf@Qy7l6ro(J zL?}ZRPamQ7NbmPLcl@M+6FQisU8b&0hV?G7wh}&kiIee{xD$s?4^#<2+2f$x z0zph14!3yGGy*?h#`Qq&0cfeEBD_U?+t=)u8{(CZI`+}tJf6SedcW5d)6@f z8Wx1*0a`516>@JdhTLl_i&CLHm~|{LscDMjjFsMga;} z@{p(rGxD?yOP7E6MbI$ZYkIc;o}VC|5qUBq@eEIx(H_sE(RglK#?ojgcs^gj@N6CL z*8`9XjV_qNzB|&!`OV0C$N7gpK#LT@dApKVW98GxA10Y+x-dcaHtBiz4b& zcMj@V{GI5Sm^4dMph1P_ou&P8mQHz0uWE4W^3swK1T|cvY&!h`=@{Di!0dJ5`2hOB zaQ`p zU}E@N(|Fs091}JnFp}hrMYh~Q6Fe=vWkTBAf#?!r2)e}h`*sK67rB{J+=&eB79ga^ zHz+&Mg*;=V!k=l(DABg-u$Qbz03Y|Nx?RQwJ!u%WEV zLwbr#v~lwuY!i2Qw`fa_{F!s?us}1>o1jrPJ@lSlb9zsohSPiH1~lMPJljThz)evD{;lJP95DfT`*!w9}xp8H76-FPP+~9Nc z&))BI93^W-kY((CYceK#gBKDU*$sG{O6l`R`&V&_>*hFat@s5#Wcbf<-CULMzYhNI zmQ9COAzsatL#8k9FFa1!NX_per6g88Q!AUkdlk1A}ZY}HmbEb{5IGfKNVG6 zB^CGCJ4$g@?wLB-^aK`H+T!VCEtNf)*wq>E&3&op_$1en>M@CqBe{+~o-U%}Xs)Bb zC-FlRq!i7i=(-ZS`8F?$NK6?5S$O?$QxH541MI&1Q)r|q<4iWCts?bk68hW+I2dEY zN%dTPFr7~gjia9*(}(4h44=EfSnZ2KrO#uw6Ab(iJdxPU{ihzIP+PBU!MWnyDrZ;0 ztC(=yY{fqX_-7h#`NiFjH0dheFY3QeqMS4B^(rc`}o{4RJOX%YTGDy3w_C{8xr2Cojrbk8u=!=??(C(a~6|J&vQ z^g>wt{c6(UB_3U>w6g4FEgA4)Mse{3#%G|o+$ct&c%B^Gt$zvp%kH0(z~{m}cTWeb zHh$A<`}E4~Q9m@4)^FR;j`!dhyMLC!;k(X^>7|u8;D*`5f~!~ugMss_QH=cK zd?nzYA3?-Umlo-9sv*d#C2=Ufw3yiz0oSWFTC!GCO zDK=c<`7rxhvcZC}h`f7>1cS#n<>0!DwB0pz);*N1)1L>OF!s$5 zA05h~6S7dhvT4@a>?oQrnFBwpm0EyB2P~d0W#J`YtUyFRB!XO3r-Ymetak?KVOH^I zn}gh4+*fBkNW2TVszMH9UH@Jg8rS;lsz5HkM&%3Sea7o*9K_A0!Af2&p)CvixLY=T z_!hp{DDII>&%xJ7Dx%g`lnpW;4{@lFa3GteucStg5OFY+?RQ@!ZZvCKVEJEs=no*z zF0Pg_xo>LA#$dPdiw{%3?o<0UaD!~>{1*1$^>dBH& z#C4$X^D~`K(AG%mHdmR#u2uy6;rS6On$aoFPiHFi_Up~XD{=0X(t2~n%INh5rRMNF z7*@;$WM!OcjW5WlvgiuSK(Q7Xi)=c%0#|`9%Rg0EO<5I{B2?UQJHOY@(%0c3Zz#M>rf!9X4$xvJ6VkV< z<&C29)b~JQ!1thdC>#sKIkZYKoQ+hKhw-l)R)+Z3;WuLPuXg%L@r2;Neng=xiyOu7 zyx-{U;x60b!^}uv3)HN&iLGXuUHujqEJ_M45)!UNj>I}kWFQb z`y@h-2VTd$*NScD(%BLw11EI|lU-#U#pKginCys+iCM*D|B7%-&aUVHCYnVdOkP|O z7n6rp#K1)T*R=I)q`z8Y8~qODE$B2i+$b(O=Zd>lA3NvD^2g|?zc04$9JzqP=7&G= zN+_%rCWe-D0sE9f;l{IdB#^T(SZM2oSz$&zhG~p+Y@+xR-`%Cwq*j1aCn~?p=FR#T z`DIQmCR$r}Mh4Vqk{@nwKTGkA=@4osJ0aVJBCS|2T}w&}xHXeAYMCsAlBkj&2OTt_ zyU%)6dxm9;6FGKnpRVGeI^M0pwUxk1A;UA;-XRTY8U4@pBXn%>HOO?-il+W=y zgBFVLtLca=$IU0S_;+dY2^lst-k*)OHR#p@0~vmFf*%R$4+H$rvmYtZ_RappwemU2 zM|(*C;(Yh{cn`pR=eh3N#f@C$M%(Ef_5rpudn&A&QT7aBwr0xj80UX59>R_QGinnw z%&kfAoB9;Mdg~i&fO&2cce=#VjjJbM0*x(rikl`_YTP|wZeyM?G8(OibbH@ynOP&O66f+cpjdNH1Un&tvM zyxhS1y7a>fKK&M2Tw>*dqgCw$W#Q`1PamZR87z$l@d@ZP2F<(|(R$W{pFT#PyP&tx zg8>+N7vM8}c*|DzMbhWAHK4Np_1ibgRinlh6sx+&)nP+m967Sc6{ zQR?gZb~#+6?LNw|p2CZ%(z_ap@zf)J5%sUd$RFfS>{gN-vTqa<6X0iA)jYji6-cDr zurKlWqTmxqr+4s|a6#+I286#@o~&U}q}h3JmrsHhcCria1}~6aDQf@1?5&8O(Q%a* z;L><dfz9kkU_tkFj{1g+7924aa* z8`@cF-MUM$2>65z@^7Ca&i2+qm9u@cAXdDmSt`v10QG`SJX#MF2bt(AunIj{vcQ}{ z*2v1SEi}RL>Uo>slvpDyidQ?hHN$M4x6{-#&b{V|ypP?#08$fXP}8zx}Qqy!RJwu8H?eJt&AZ*}J4ZK&`dh-ET+02pde;fvm){ zf>hX7K547iF4g{CLz`Td(XdLIT`J>#sS4xCTt!aKHnz)s=9QocNOxueViP}S{vU9s z&ha)LcDTe^!RJ{mh!04-=-F{?L%C~(BfAAp0Wo>D(|gv)J*H2hrpcU{#FSBFs#Pa-rq5KrvFI~*qlqxDFr?48nT#g%x9LJm7Gu34cp44}=&?m6%TphTfqnZ+R48d6qK((9@Ld$h<%O{oN*rBYUgSxrTv%?o{OlR z?GOKf6Ec-~D2*L%bFkJu+?ijiVgE`3gPd5ea?mYUn&%!F(>-V^WqI%vZqn?Cys?*5;8rvqoCj~GL^Kr3BhlQXl9klDyR)-!lS zdR;F>!Mg>}`6LpZsI$_!N1~2MBhb0-kqC6O_cL_t-1@uCvs_{w9`ZwFLluxP1H|k} zyx1SvT-w8WjZ55Y6F+qVe8z{7=ND@85W@}~i|UJ@vdd)CniuF;bhlLJ2`_L1{9=U# z(T#sRnzLHTQ*z$(=Ll@s@iW7Ac;xZ({}tP0Y0q5Be$j%^8l=Dfo)n&d?EU6>uFTVj zsEaiV*7)s$?;Zf!Jk+WCf;+L(jE3J%f6tNdFqNL=)_156-v4K)57L=0E-;lQmN|`= z&a6#9;olA> zil8tCb|O&1ZFqrPDj!Lk!?6XmZ!0c{GcjUy$s=ZsyN5J*I>t1amo}#h3Nq>wqj4RE z{ry*g;b=QU0o}a@Tq}X`=ds4pVG5x3#$7l%QrQkW?KnizO|{APSS_Es`HRz4lVqwuLHJl?M)JVrdl z@F;tl;Zgjwf`|Dj6_2Z^{t-Q%>IfcPe%Dbv4kX9MW688gJZhE^9$!4A;_*Ud3_Lzs z33#ac_iBA8Q+$m5{VUP;)%{s--0ofiQywJr1!T)J7DSWT>|2Ixn)!754Q)g|qc^JY zQ6-;|=!9afr@8iR zard$8@aR|D0@tp|r}HFhTc97i1(%Bg?Dp>hJ=WZ#+-7VM^)lsJr?|1!rqx8_tHcio zt{49M%_l-O3ZB{OegdTMCOmHS6;|>-*QyG~gXI)D)iakDix;6}WVBTz4a!_`){{)f z`Z&sLqAhJLHKf{7jhiLYyeeSAX81A(t$_Vw6L?GG+@7|Q;g@82y4Wi0muLq~S#bb1 zPkYHm^(o(b2IXhNx9n`s=)*noc;(YIqa^X=Gx zCoUU!uS{E`;zp1RVQZxM11cp)Y|sR{(bkCKQSFwF{{$mmpy!Ks8wD;8zKO2oAXJa@ z>xYtLhjz!MP*>bm-1D^Bigzf7Wpn#R*q@Hmh)gOt2@oTDaq>^>h2_CTryA8I>1lL? zKFC{6yRQgdPlNsMQ+iDW$bSw-JvwG=DDRvF&p}Ulmh%4M190Os-Y7m0xS_OaVGhr& zugAR@##5}HGmy_b;i%ZgB0-?|3_wUFx8m+NUt5v`yXEn%70JUFc{ahGpwTH7XP`qz zC*IQba3Y`ChG9i*q9g6{DvG{nOcwkOsI_o8jK5W9$dL zL3_pB<}9dLI%T22fDs+Q88!4WJ5o`KL=h|r9uC|Q)m!+G^!+f6CiGfZ1>vtRp}GS7 zVioY#Ff`qPcd!&2wZ8_qhp;FK`!u#i>tMdAM^@<-)zpFF$ebY?*6HKqo0{=Q zSze>p{HATQE2t zX6V^+N09O=h4R94P%i&NrF_N$;D$sxAhJ5xf-WX%L8`r?Rtd3i|5Y%QV>IZ~Ql&w> z66zC%6OtSqGAP#i))Y)illaVi>iWiE5Hz2F=>l2>@i44&z?(gD1z6k!U-2?=blN&v z$TnIwdU^@u0fik58zw0OCR!6t6z1}PCxybL0J z@>b>!<$EH&JJbVDwCCQpV5zls^R;*eW{q3hWR{a4TS1yyJjh~@dwB;t~(e1J$y9)I-x4^p< z;|-+^o;*eEl^w12vgaylFJn;c^{5|B6RRw>OckXQe97f^8Y;F{}<0ZW5N2p8^yQbEJ4~WyB5+Bi44I#Qr{50SJlt2C4yy2O(h<2ENM@bVuwclG#K| zo^2kEf391j(E*G7$c&73@kq^<7c6$pF4=)U*?Ire;GzxRd{f`3(6!YMFXonP#1N0U zj^mzWEby8@uW%9Jh^WW#QP%(|g3qyGJ}WzNnjByTE)M(vpEUtbt3> zXIPJ?^=Nkv8-NPE)>f`JyL3lwy4^l_Pmzh17?~x{=20cXvlelszYqzI4P?u+d9-GD zR?`>CC0Iz8;6iHSo~lB8bJW)~T`t;zY~lG*h%h8rC*PH{JhagN{1vP*XwhWKvmVt8dq1i=_G<`R`$6DAvp9^_qugS^bg`d5E# z6|5LQu&tu597D$|{3f6NuR&%t6s@9u6YxQdnh{1>5L% z!3^4uGbIgXO3s}IuB@C?bQ~?h9|kyggXs^R3L$Pe*y+>b(?vGr*owSG!1Lv$}|+M_j7FD z=&|h7WE)v0-EY2*Iv$3TXQYB?R#?AxQHYA__Mgh45CB$UpcPp5%(Q3zU=8b_;T>vV zsC%AQ%GOuZ1d_05>K%pp@-eJ;1Q~k=sK=>7bH*-0UpdO;NBYvTw}#F1gLYjj^8vyvRGztLZn^D95sypHj`#&RVwft0k5e)D0RgnwG=|Fc8Jp?|$ zJkd7JQ3orwl~$zRh-iLkB%+LYDp1_uWQhKP zkWEV#1EHzBs2OH(8%oaPdJMHX*!FD=94M*QmZ!o~o?f-OYe*)v>wY2JX*>ox(FfhI zR>07(mbP%EFb&l~`3}$e!>w0E!njPxo|_Tf8TWX@J45gd2?>Oq-Q%IKQ5R6y#o>j4 zFf~LK*HXE%&vY#4^9&HG!r70Io1w~U8RzSab&o0K+bTArXgP#LOIyWHHt|aZjaTy- z8nc1|^4JS#z%7qOp~1L?oocCvb)9}H;__YCoJ_`#umXF>niFD2fyEK*2pff-+_~r*nn;l(i90&wP0ABP*%M_y zYgA51&9z=YbJ0&EqPa16;U%}k6}cCBe=!u}GGFp$Bp*Crt2q5zXW67h`e<0fc-|;G zgQc6S@H}LA*Ln~jk6sXGEO(R-G6!{J*M;fG&STYn7HyU!3lBX*!D2YnZoMop?N&O| zZXIl|-8!#6BJs8`eZ--VKH?_w=$<^ZceRKH2-9^CCO3*YVA7qZ>{Qt63r zP}R^%;&`;G;gW^%RSgLXaT}k^`wJVLWLA|Sn|>%J`={7d?krZT6sH9h3=!3>DyHgC z4hq`&_$RXbTu&A26}`f7!Jqgp8ReUvSP`aIc&Hdp$A2il-Fz(`PeZrfH;U8?IGpW4 z)~Sxct4+#u`TwA9)Gh56;aL0h(1_f)fC|SoI1$QT ztwsL$6624yj?*}QL5N0N%Yiy1bC@I1GZ82p97bV#G2z@4rmIDRNSUlOzsOeS&}7YX zdN(Hk&{^Us%=7~PuTvmzwKDeclJfKa<&BiN%I{dk^1U+cP90P3oMf7uRixahYtv^neWF|m zrhrwIJK4%x8m=@wLbubMbUR<8W-z1oYTcf`^E6Z$KYvq|f00tA&ywv6N&008y2%5SSZ;oM_ zn>^n8gVxSOVrjP9XWcPNRlXN(BQ97JOt!RW6w(#pc zq{C^HMq05iU`n9@Ab_-l9R!ws?d}YSnf32kyouyqU`eLpAAU+~(^puoLfa$6ZoA zMK*am@WyxtwHLfAye6WOP781mx?c_5oj0N|x%Rh;iTly{Y3FQd!wbxpf3T;bxK zbN=zUR*1{!=i4kT#W)(r{xxzm{&q-C%svMs2Zu69E&@nk@^htE>w~?#iQOyf(@xpZ z0UYMfjxU6CicofRps=fF#}h(QxUvO0)^D@e@c%6kZGR}nbhTF;QGodH29zyG6wsa@ zimYhrXFU`N#n1?O{Z^zsGeo;GakCWKv(86*ceQkDMJ*CPlu7I_Gl@Mu$nD5%m=y`~ z&@hp`5Q!WdBJ#!yB=P~JY&^;#p`9>L;Jy8NHqP(}-kLM_T+^)`(LHCyz&6N9f^+6V zLH+2#Fi;VTh)MI-b!^g%Cv63;nZfl^dBVq@DEsO+G|Bd0j3z~Kkhy$Z4)O<7Qy>RP zc_D&>)ckfdR{bF+8cm#xB2UiksFFZoP%nrNykRS!iZMX0L+dcKcq}{`uAFG>fO2Bm z{ivMy>rmzr%ybj_;pf*)oJ&8U*aOZO6Q-XyBluizkb2@f-dDJc7d?y6pTP8)l1Ck* zS7x9dfeb_6d_b>x(vjUF;1&a9R!=$VxOBm|wG|Jb(@Hs(kcU2%X#Zg#br4exc%CEG zz{KZJJ+VbL-TDBk1-=n{4=hy%&7^CCYJo}Tvb4~!8Wgo+6r`Qx&{Z@Xr;RAI?#r{{ytQL+6d(A){5s9m=*xQ}Awts`riHkqY`@3k&ohX7 zd=4>SQtBz&@4nb(`O0mg=qtAM7r|f49)Ytr;{XzTj#ah|OsP-|ThbY;G^$d;%6(8< zj#`Cg;Dm1=_!QX|)RoDTeP`NO*!d~&90aI_D12bbZW@*%jUE#gZi?j>>LhZsm zDK_y_+nS5jCBr_7mW&j5BWl&KxR%eE4hzPbr)a?t#2HIbw{6cU^m+8dvna~SGGNgC z1mcoC*Xg7A_0)3L?>UCT?aAQX$#9Y~EiNQ)%MjU#*6paEKAg)0)KoE>?&&skmRm2{FrF9vq4ooVFDt-%R08te%6=f(#DbDaLkAlB6eEo)uk!>!Jc>nbkqi|-~C#CGZ5 zLr|^mIEqvw#j^)H{RP_d*yZNo+tT78EMyZC4vuD@&n)-+{tO1ypTSmP#6eN=v*6a3 zcMEi`)jEsnG&fuaL?qb6hQ)_){aAd4){I2kI}Zl>!4HgelH{o1wE@mnbx3Dx*=Z{| zlx{0&m|`zC)LOL~3>q9BOXLg&+DgFzlyiZ5mCI%@7jfLDyr+fTdPJ!F$XK4!K;)Ti z3kI&5LZ^6pWmC~zENT{IdS1C6qc%)8;-7TW%%nS57fRpq;6knddLK@jXV2x1eudpMSMz}thQ z111IZJCAz@20EW*;C-GqSfFoqn&h3X`Z$aj%{(=50uBs5Vg&hidK@2z{ORPVQHOta z;}UX!Icn4)1ew&Xclk%UV7xi^IyW{$ZF zeK(o|H zyPe{qSne13LH7&2S@1)@_zPiO{umt*mXq(r5{>G6VO3lsJWHWkXUyD;CW$F9m~M~i0P>u8C?)86u{^x|@VC2ARe`>--oGXVmr2i3#qk0g5&n?kd9ggq z^CDPmiy`QFF*hSbfLggYbpv@`d@T7>!aOhJaDR!T=i$q5-GO|$X&`xCoQAP0E1nnd zV&Q@ClIqS=((kCGjm^ky+&#d9S%%!^@b@ zZhqSArr=11`BOsv5STNob2D=Ifx$Q&MR&$Ow;rRsCHSsuW-&L?7bR%ILcxSZ!;(Dj zLkq(Erj_`w%;|x}n>P)-Em`BqhAn4(iR!lS&#}N992DIRB%6`!BBuUPvY*^INtOfm zB1(^&S3p_P*<$kT9G}K40ILt_WYb>quE{rID{kV&JJSM}^D=CLM`MFw+YEPn@Ch$; z2}Z|+a^Av(aAyuL=Yk}o&)yXn=xrP6zBF4F8;Nava&TvBS*<$-2D)sz5t<;^dF8}~hv0X1gKQdf z8%f7gcnX)}H+``PLx1BMT(%1qJ^utl$uOv)ez?D5u1Lyjr zThMK{#bd!C;NqpW`RKKK{qZTxYqxO<^V+SOqIm6oj$iOW&jK8(*Vn@Dwdh+a&B|mF zhRZ=*xK5UVDRJHMjhvA;a^1$KBcm2D{t>1FlCpl4xTgxT` zSNbf)R~%70IT`S1;mRIHNiwXypB|<9{yv%2htFYs>nAJq`6ny&-OuVPhTm&(t18`d z6?uRX3LH8j`Mac1<^)4P$BVTgVn+Rf18T(B$&oFc&uYF-saa5JwyQPwQENW=DAk-A ztohi$b~RrVv*vmVl79Uwg^``FWi?k6vYL0@%xd0pvr_Xzg-XrysOIZ`SEzRWoPiOY z-(Ttc)>7(x2hLw|vl)**rJ+7=GDM-i=nt;qlfkFYVQqb#Wg@TqxsO`x&vtG{L z5Z=p8X9(Dng^|Es#tPrBH1AX)D?CuB6uwby-t(;RcMDnJ-$oSvI4gX{q{zZI&!fV} zO=5*Vs}%l(TKM&ol){Iz!Y`l13V*&Tyzn7sZ~{2WWUjPqAg;nM( zV3iG2D!WvzEUQ4NtZ^PymR_I^ku6uY8zQeRVnf6QLnOe5S94-6RdaYet7hXQR?WIe zN;M7Rm1<_PYSxZtZMu9|c$;o(#ehiLftgXbkdLKDM<-|^E}SXUMqX&;wHkZMbXXn= zrw}`K`U|IlaGoLka~zA+D6(|~Ly$)MW5$n#xxidNY_S>L1Yt)28x44{Y`=RfEW=g@ z_d!<&Mj2(60(nz{mI7m~p{<#{yi?#JUewuG7IHeAT|60;5mh$ar0Hstv`!hezsI$n z*EnNRXvI)hbd<(4YypXpuIOC!yUhhou@&e%K@Dhe-=a1kpBeyj>U_js+m856H-{6S ztVMl+=Pk~d+yTm?XjS6{WpM*%?Sb8xO_bD_u>YKd30kZyTh4!M^__cS|$HaNsg(xePbPJqrtLlBJ~I z>M$*r#h_*3P3>q&I|a0SRl;cDZflUTfRs}mA*FqOjWZ;pVSbJLGzv(wW9~J)cTt*= z_v^cnXeHKRf(uwD`xJmT3sPh(ZfR84vm1T4mo?7MZoxMMI>KadXOZD8Ym<^&w(dj+-M4(f!;I2*< zf#=U@c*{vGo*^zR<(TneLU4t8$NO_7~D znRV`^aM_wuyhH6+n{T)|Fqm~?4Rs^*uJqnh32;Ym#;|vMuA|DGm|MKYoy1&gD->#ewWogMt!@N!HzFJ2cPiHDbALQK3)6m$eH?{nwJ>pjys z@tStY`SA+s@00`?PoAX&8MAbxzN;OiCdimZ0(n`LnxWyPdi;jzu6gmJ9SN8{;|%Yc zWX?b@UCc>RXE$e|sVId@mNtVNRkFZr)wri%8i4EUm};3UA~D3!0TkN&G)cN8^SO3& zj{6$#0`qkm&s8GSu=r;z$bDtmF86>_+^tpBhZAJeL>ER zKH04bJ|~0tT*2_^ABImqz^6B1GgQD;xmz-DL;{5mp;FT0Jg8*$Jtr#vX@2-3Y?eW) zc-&kb$q?}cp(zFy4{27rcu3*-U1+w?f^Oli3MN;eq%O^BQjy}Bf>;bsIHti& zhuQAL*}fw<+b6q7Gm5DT?B)#2$%Be#m$(h{nE4C$;ChsKK$-44B4Z-=am7)=Ccur7 z;yoJol`zGt*c7j))C#L%m+QpMw_^{?@OpQWx425GR_?hdU=;>1h{=m;ZkA z8Q_j<*x^t46)I?_VPhxwTuVq0MdO~O47>-`kSMqjI-QrMvBKmpHBvtHhYWa>AFjt; z<7DR`%cO*Na0@FjH-Y`=dmjof1;8zJ27&7ekfHVpaC-!=Vlb0BMW0UCE=Cg`XjEFu7nsv+lrsnR4@;aFSb3XN^SUf!esOQB z)}3lA@6QHUz+fvM8PLIRsJ$`V;HK8GwZGV;LKVU zHnX;~oeG)Nlz2*&!6}y(R&B}n_{F<2`Yna>wS97NB_6WhogtgYznPyBv%8kMO zghPX&8bsG=oHECH>T%Uv6LA~@vj&h{O3A{YoBXcb((C038W9aXcEj9U=I)U@b*ZOo zSp)Y>m90G4()jS>fl>glX@MM_03v+9XpFO8%)0Fy`^AYY9FsCdzqu@!(gv4j?gP7d z0_9wmYVGEN%WZ2i>A8J2aYC9?JR@%L9!j@yjw){5;Y8T)PQWAOn@}{F;N(VCwQR&w zd(8k|t`fJ@YVMCXo7cblV4#f0FgEio&!*AmRpX0c_+_hUAt%h?18PK!HQte6M zy^XT}&aRlbd|^7C8tcWzKu=E>%=PJ?q76*TZU`in$#z%=z09<9|IH380Uy63R{7rwh5z*p@;`PkjK`J&aEn$AC06{2f^lP1KjKC#wC^mkZqT*kMg_o) znrS`Vu5hD9#*IExxY4Tp$c;85H+oXxMlW_DZnQ$>Mp69v^1lBy{#;b2ao)f6YZ*%B7d>YsaNaJIChIQTQbG`TvTK=R)wwP5Q0)%-!1#pSx4z z;1j)HL1VRg2G*Y5;rkVrC}G@2OfKYlqrx?}+_l7Z+D2OxPqs}l9D0KnFS2tVIA8-a zipL0f8RY_*`n*BTh1+G5ZiHUb^a{D^+0<|c%frJR93HOjY6iVV$*~={58Ut043Bv5 z(eR+5N)IKk2^n_QgZz)Te8rV$>a1>U?B&IkbX-A#usx@Ek-IP3Ex3nc(lWzjYcefh;xd_Tv3&*i6dH(C!uDc&s00j#nI3xE^@db;gkE=gi!x$VLP*0? zzk^DiEZy%ON?Q)(leJMiG9qgW^2k5tQt1BeQneo0&zaoW3Pkpo)zinHEt#{n> zn~XT}_{-xXP}sgbm&J9taJfh&E6CMA0871`Y7>BWnaGZY)5$ysP=wDAVLCXkuEU)&mQ zm((NTIob3gJSEq7e@R@pn^D~37CcnnjBwD|V!U|60D)6%wsGs51Zv3{!Lo67F|5~V zxDC&&;w{Z{hdcfHEajY$_ntATXN^W7bE8uR*`sqdrm*M?lDYU9*=B5&n!3^1W1t_M zHwu_;=W}cM{j{o5)l%+@!Sl!aF$sX1;^k-EStH#e1hSk2 zCRXPCiO**zYib1Gy=fTVCVe6scsXcD0Q`L(n_AK*m;e#_B?F}o8jb?47{Fj)jI>-2 zGoV2(tKxXfW17R3j9}h2W}GoN3H3*q@ti3POwVOwf(0Wx*G{a!U+uQsu&gZ34$Id@ z;~x*Iw;(Od4Y0ZTGZvGk0b$WYzFT){c#Gc zm$t#!qJF!s#l#Gugb9DX0S88x5Am|E%h&LMhos%Ymz)Q0&@&6eEJ?UIm`x$ZY!0Uv zZR8bt=N95iccuk$$+{4Y4Vi&q$g;BY4H2_dQHQe?tND6ne7F&9*JV?;Y`v1yY(t@H zU>T$>$eXfOEEMwrZI7>qONsZ#)_14H_iV>e*GXBmDYW7E!uvKa7-Rld-1pQ!tkr znHZ>s`b|G&Dl>wO&j~yZXag4T>b3gtBzU1Qk%MERx1Onxa9Ctx*gZ06lpan;p*@!V zCDb11e^$n4Iz6TD#gWAlL*>+24O+FgeG`p9I@*E2*EHY6bXa4o<@3$FuUq*90}O_0 zxFuZ&4L~!peUZ7N6naZW=&r$h8{%G156t(7B3EK_RmeXo6SE#Ck@h(sZtKx z@4_^yKA6tyw9)5X=~zx@z+G>4`sYi)gD{gT6e?skIIE>}uAA2B%agLJoW6+%`OGt1 zH=EJr%L6Vn!RVio!24{lwVTwdIlI9HgKLoBAJuI4-;&@IH;(02>9xGB+KGNC2Ag-9 zUgPpxfsUqhr#GPWe8KxVIlWD=7;l9CaG^57$0Yn8qc_dz8yn%30jm(Ka2ttQ@eH2S z0g+X9|C|I|j2I%jAKH8SjdEK>CSa3F|bB% z#_znYR>*7`=QroNGLJj`gX#pa&3XC&2zEHfL`N%MeF%yNirqh^vGK>1Vn|b$W#gRQ zPTaC@K^lHEvIkwwyruf#F1&8ik7-W1-f7u2`!k>vDCV9(31y68pN^}F?lX9Q4D{ty z*q2bwzy!>M2SdYmD~J&{8PK^8UH$C!)byP=BPKfh`Ws;jZ1c7z7H9ASJK>A$EqEMX zWN+9&4#4g&Yw&y;xWwkaS?fSOfgMA}ZFmr*1$cn znC6ix$Wn%T9__eJI5S((oiejpz!TTKPJib#oY%UIyjCZ`md1aZ!FxBzWKQfDBOPjoq@P_+mlZ%_hV9-Mkn8*jsJDXW=OvELfw6H9~XJ)co#=*AGh8$fjjO z^eldNhz~IS6qY#i!*RIA^wSSYRxXSg&sM-tera{R+Z*;;DZMu0V?6NpC;pLGT)z$Y z>-2uUPXj-oUiz0oUB^z33pxr!XYs+f-&6IPT!T9eHX^{;O)sGDj%B0gd$@Juy_irL z*65nB=P~C6Jl_MJM|S_%ROR{Xsv6O%Z>PVe-0_qj9y*e?q>k(c}3{#J6wJYer$-LRiS;ExcFKdcH+7&f99*l?|U6AdFR0 z*-!_T-2=;hZXgM1Q1)fGhn2TDsdQ+{I4C_>xDyLM>LgK)$_H6-#z1|fg+BH^Tpz4n zSAJg}`i=R7ku6pOqI20h3< zQx$w9syx{=C-MUf%J){wv1#AOu}xM@Vhf6{03sL zx{9C}@lGx+v5DUm=`|kMl_b7Xq^FeY*b~qc=#Ax9274{kURdLkW3*SvS*EN?AnPao z&Jc8n%o0d9Mae@?X3Nsm$;eq-`Lbi4zPL?an1sto60>EY)4JE7!vgn&r3bcjMMSns z6WOCI5jvigPBBPa7YvV7MPBzcl;;WiyR^!4MOXnpcVUJ*_?Gl#XH4x&LIY<%gXN9| zlF)z<<~eE#er?r}*-E)4WbPbpLDVicD6=))LiCaI$R24$BDOM_8NE8ORokzkc$Lyu zNi6Pq76#skx!JadY+>ig<3}R<0W$dXh8RpxV@v_-mT9fJ%gK zl&rXpt({_uWT%i(U7E_P>Q$*Mz;xvc3~EjlSB4H-r@|6`xq7rdo(IPM|$m7Y!6)di=ODE z?2z6~1=0S;iu?+5i5zjCG3zzqqN_5VQ)H9wa=qsCo<3ElcX$u#Lql4;$&g)@JHfy` zUB&o;e0jGkBlo*gXk5vrflxNa=Z2T3gvN(_`K9KleAVhu$t7{V5#u6k0WQ_e%+xg3 zC}w4ivgxY<$ZXvGamSMcWoP+xbp2*?7_oswC9zGcEuO;D#l-#k&@2pJ`qHJQPQeMW zRoaH#4?+xG*X4|lHC+cY*WDZA1>Y6T z!2Wm{F5QVk@1iIaaBtnqn0_bpfa|iz^=EL@?-5bIUr)zMQ5S}+PJwuyE(Ect6~vR= zx@H(^CqQEM43bT~p^V(oJd!xhKs*#U!KN$pXg_@^49n5HJX&Zvg|cc{i^nXRq)X`q zuJk49htNw);iV?Vk%nm&+~rB~K5Eb`yfv)9Oeso#IW0*GI(v$3;%Mry;yJSEPpnw? z7#=QC#}l)iVw4(@_w8Uc)ySrdD~Nes7Wlhtx?Oq66r=+c4N3Hb`x0@S;J0agqXlT{ z!pzf2$aXw~eB%T(-RbG{-Uus;&AZT`aVL{~Nuc{09e$+3ykkvA2B?fA-zl9&ozGU} zwh{TvX+6fZ`p@anYjya2Nqjm1BSJ;pR{L*8dSrioe2Mbg>*QtGDMa#j<^{PQn0xLm zf*TgTaqxO>KJQ^^jv{yDk^jhiUfn}eWrQ8Di!EkF1~_qPm<$m12$1!6bCkHW4QEmW z9#QKLFgo$%x&l>fctTpB4haOjf0RT>Z}IK*CkBK0$8aN_{b&H~=+9nZ_&djw6i z;E}zn>6>4}j9P;F0~r7PVW{T`zC1om;=%rs-opZ0b;>ea9VMUfFtz$=!nhY>A2@JH zM3vmM7AT`<>y)?>n+4WSDHkaIvg=_OqMgT%o6B=mMrN6mxp%H@Bs z%WFH)uf|Jp67R(^EvY;I*>-o~6nxkMNsrdeGM1 zUpk)u8>!nze2nMu;&&j%OIN_l-VIo3PtR4ne>qhpTF`@0T9*Ys{NuHzzMmriPu$S^|S@2n3;>t_Hl&(Q}E%TS8 zIPzMqHcVcl2!$%|8~=Sf84OBGgPnsQ?&G@Y&4Sq3%w&EWNdJzqszkoFf zW!9fQl(}VRlWt3OQ$M---f9X2uB#HOh0J=}I{f9ZY+P^|d;0W2n^={(ai^r)SS|Gv z>+h}7+%RJkJTB-O9F{yoaSuo4M3AoPWoM>N(A7J18w1I%%v48aoZwIEd>s zCDm75I_|DEf`xRw8+o0ws)WV|O@ELfVV=P(yjpr!MjcuezviP+kACyn(;Gf_o<7J| zf7k2YEhW!B2V=OGJ+s<&U)9|{mQN{a(Pw&DqtiDh7zpUzGr=B2$5E{B1fHB?JOy7~ zJ^bI}tjjw{=g82>PI0HxKek@F=^9ynlHvhj(nv1Dj$6J(Xg5R*l6CxcX=@7E4Pmrs zG#t8~4#cs+=eHc1bun5F{qq|g%EaO@oS@9%Krk2JFfx3nixCMZY&~t#x(DJPGydu2 zJ)?C)srz6^Z(EDQRKoRQTXutlP1{C{RYIm7=rcoNn9X9`&dK`F`CZdfRDAGXlaKw5lZ)LojOAiC^$q1>&+QeJi#=l)!a$ZhDQ=3@WrFw#PvzNjO)*cTsx_H-y0`<4B1rD&gv{g=a}6bK$I z4nbuo$`6uNf3SH~olQhDVn^Av~_RC<>3XT*9OGMGTK76T{<>Nx?&N zk&4F?xrE1YQ%CT4=x|5zn0r}lJOYOz@wl=#;bFa4#iK1J1|FAYDtLs;6;d|F)yu|H z$=pH;!yx!48|>wGF5vz70`JS8BKY%-@O!r4ADC=yl=df z_f0Nv`O*YmZ@Vwe<-6F0UoPf-%S)I=)3a#NL~O{H@8-oi!FT7(NRb3Zd}xi&29MaF zOq&%by0VaMkj3g6GUcE-gkjq7m=ANvIDjJ#h6gVyWUIGtEG)ufft;~C{4anzU!xXM z9nb!DQMlt7jOzrF1!R*CZmcs>m0P8Q2XvZPC($aMgbAL~zxH zDgMYz)Kj_dD0yVI#ytg^z5(6TQNN5k$Yf|}k)@R@WgS0uCN0russgikq8mLurzjf? z#LOz2KI=)_iwv)%bzkXiP4L`On>SmtS{sbQ_xNE(-8>*|+MP9c7lmB1lg!~WTiyKy zQ7lnp!5l%XQ!u|Dy&f#Bo|KaLvo+k)Re^t8TNPMI{a+wM5fgIo(9v7f#`_W?_A7Yb zpx*cM3B0dU)cq-R-x_&8{G5VK2gK@$~@QZ9(+MQYr)3i2k0a`u4 z^cxGv+y>M2HkhtsVY-fm={gan>qMBY6Jfeew8At7MwnNC5f}gloSr<)Pxx@cZW3p# zjVAD`O7&gJd!isc9lFdn)*bIW1lr3_H!`y(1Mk;QXg6P`u>00{_X`FZX;VKPq{=E@ ztBq+dxEHr-b*B-G9l`zR@ynv!eWLU?(+kpPp?XcA=L+b` zehouODY|bcS=j6e+hOdsGEvx1^5iGd^aPT@GLESSpu+;HkfhiHxdnv zeFi>rC$Z$6KlAVd__5E3a~4nBFXbw?1LH2CskD~ESzX7KVvsJbvaRV)8ak`sihQ1A zoXI$~(V9o_yHeo#NP$ka{Eq%;LWe&x;72cF1Mmez;{!$-|3;kksW|ILah0y*fL{ zo<0Z~G7|vNt7$8;w81vg-U^fi9#+sp<;6WkmecOLgxtKDo~~SFBkVW2BA~UN&jnOO z`6ojEbaUGTsG!I<%qik&x4SRz^D-B4nDEmLPK3ELtkxnm3VVtcQ@qWzA%Sul6w$g% z96vNat+D54$bM5%+^4gU@ej?iiA@xnh_phvS)Oh-*w}bdX$|n`dG8MiDyQR0@k82t zMwUyf+;cI8SEBbU?1Xr6Dc-T8+34r8X-5}2cD`A;Okn|J(+ljX6@$@WJupz0vX(ES z@v02e{NO;%r-6#{oq&p)=>~WvH2{RA%Jd6W=V$lyV#{`fOU|O zI!h4$QbN;qqMX2y3 z!{3=C5BEjNS}U8fQ?cQXBzo&*q9)FXgfbo$n9~Z8S+5K+>vfXKtjE*x6}0;u3~aT) zHQg}A-+2sg**T}5&2N)!*sO_RLIYQ+X&O%U-J;SXo=(Jknyw&s?>v%}al~W-uV8hG zDrct2O#=r8jl!8(G2Zzw`})czeJ8!rFYOG1I*Im%Ab?2cw~49(|A!{%+(*Lj%tr3Z6G&31U>jj+tN+i+qyI`G2CsYdSMZF+8 zM!GE3E^ZXYnt+OV&fyy3%B&WkWlp(jjK61!^x+Ym+8<2;JV=M7r!cbF-1@XYe&{DJ z7LSCc_2)Cs@VZuX3hB?wjokXqTyS~pj=mFcBMF}i9O5dc_6j6^$d-=6{BSR}Asx^~ zeVvTDsLMXlL4RWK+3~obE!iEhprtv*`KW`M8m9kOY-s?v$AF%7Y-H>}7Mw@&>{!LX zJpm2eE4y=*834@>awU^bFp4S2t<-LwnVmsPQk36=8j2AL zIFA=cs|*1)siV;m0M_Y+*YK7zym)};Tzj2puQPoef0u{}St_UuD5BLAR1g1A|_`JhgN4^exZ;y7!lJxX8- z7%sgSYd)D2)+Qsg=Oc0Rk(8|Cll$3pX z?>x3om(**pPnU3?{&hS1yXv&C_YRGU_a59dXR`gh{mygm@1M*CmLPVyyi+o|rCyW* zAHKx?_)FZ0^S1{|j8OJCC>P|EM13dJjF}Sg1BPr5T&SR>Ae48*+ux+n?H^Lu?atx1 zF?B$&elhWdgGs=c=jr9DKq8&K`VyDGB~+d%?ez}!zy+=6I@p6gP9-+D6Q#R;hnui2 z-r$7^?82kUg(S)PyHK0s=u?jkVER-~r;tANj3!E-T0emFsfRR7pZdIp=~MrtQS_B`f7uqPPu;B}ed>x7RiFCL0WtKcJ5BNR%U@o` z@OU64gvVMrDH4x0mk}PXz;(pK2cL+?VtfwqD911KyJ;aQbG^~*Qc z$I>sy(v_Ar#n6>bKdbv+=t}$QW9v#I^q2RXiPT>X>DqDqJ zj_fBM{8gv!C*E(3v7Z?Kt1jk#V(72BxciAdzv^P`C(b9Y4jL9uUOk)}PhM@#?U=k; za`v3^>dM^TCa*pe_dU4jPG|h$=}G62SBH?iI<+MU<<%>6B(F}!U$N!Y;VmeFm`317 z40*L5yM2Xn`&{zs51+M@SNDGwC9gJorpl|Eeo9ir(|2job@?!O;T3k_UFAZO^yJN9 z@@jlLu@e-K6Qcq+VMjj0Dc(Xs(hD$vqMfTuCZF*BYLxVg!lR_)ZoSCvO9-T4m~^f( z6es<~Gdc~tT*WrvHcJ(Sdgr}$2~JC!XL*N1y$gQZMHC;e-rcn**9bhtTLnc_c}}8Y zVrT0)VlqGfSyCuQ9hxZk(L6_wgyZ>RWE$13{KU+mNr`P~oKx-S;jprONUG_M2 z+bg=2?(#J8KF<#Le~_P<{xod?ZyTy_HH3_#)?^z@FSTESYI{k&(P{a-Hv0sRp zw~M;L{qe9C*XYhIz=CYUlHI9JSjZ~$LrbeX%^0haN_?ui+_jSD@@hww3{Tg%F*MVT z(gpymb_};(pDyT{ny>k+#79f)B?t*iR ztJKde0+Hlc``I-*r*nGD$#u6fx#(Rei7OolSBlVAIz4QD9sQ#gn}dTvR;Sohd(L=+ z$EnP*VeQkV8rU2&2IttJQv^!XSl0G}C~ZrdnmP$sa~cw!5ohOt$!I*O%M>r}J&l)>!kc??h$3?Oqo--?$&*&$m}k zoMXQI@x=dNzS)ldujboa?|)*xJ=FYvnQz(6|A+b3a{L13Tf_1H^?d8~So`@_vm^F= zTe>DOIG@T$e7h45iF;B)g z6^3$j!gX1t`z}s^eVfCIZ$O8n;T!PgdEUBSc?;gSn1zQ>8XVDH*m^(@%1-eNMP`C# zV|g(7PC${(6-KDDxxDb8!=Dd38?ih>^2LLq6Cws+aJdX4mgmbtl1!;Q==xR;Jwn3?3< zToa{lDWC(oJ#4E#VRV9q+B$lW(%9j_#tPKfNwVq3Km@57Na_@o)O(tU)C?pw4+BsF zof*>R7HsEeY-i-lIrwrP_2sMB%Q^V+2g=JiQaQft@8ZFTKhWFKycV@}zR~H}LZxAX zzjJrzn#%`VR{HgeC%0zr!aZ_7?gM=%=eqZBpdwP)RlK~>#jV=twCtRH#3`ajb|B4V zsdlF!D=Ny>&c1SPf7s0@IAK%ATef(Pz$=z5+%r|)1{t2?9M#N$uEtw02YvK1cRIzr z&dfb%d$98tj8|dVx2Ut=*Z0Bp-L^@m3DYqI^PSF23r5ZLes6{S5+)7sO-sjkw=f8K zxz3(r<~*lii>h^sU-05-K|JBUTJW<}7%T`PMhq9svz+4RQq4sPux(%1g@2BhYhcab zGn+lT;On{srs$MS)kkq9Y)15_2)?^6z8o?;B^wk z6#%0^T)+AZ-8!r=&8~2Yx8*p+TBmrQ6-~vI7_l|d<0F}8Q>^{H+S+t}11`7lZ;^wm zJVWte_wttA+>$mj!h0wUw~w$UcpH*2pK~|Lf?UV%{EpAuC|H^wPT}jSv4p@iWBmGS zDSK)?C4*ffE9LV3@mjdZE%`gPUe~}cK0{^%ous!m<9KTh&~lh8%>yA@wrMtb-RcEO}ys0+gSAbhIA_%!k#hS0A;ff%Gd z9EE^BlgI8&bNS5|VdPPQ{*$@gD8@()gPylYVbJq_{XkTD6ZCt+p&!ejuZx5}SvtD0 zO%8OrJwaN&S?9cZi{j)lG|ItxAYU1Fr`|U_P zdVEcI^!-7_<2zjpJi4^R$D_B7;gK7LNAa;JJbp|dJZ2nYcx*nv@c8I}f`@QS#pCx> z{|HNm@Nm7|Q9J^1@Thw$5)a)G!sGs9Djxe1V&HN1$M|@pBrrU7ejmc4-}g~?e61xs z62E77{Oc=*$3MPO@c2HU;&Ch0KVpBNBY2E>tD|@Ks5*};63*nLYT@)Uh;32@{SQEqJnJ*X~{x1|f zK5bI*7)JGvsA=j59{pB!6p!Oq#KvRUiby;@IY4-P-=yO4CQ=@a$Hyn)hjol-1B-Q9eG@L2p!2#=4yj>5zJGvV>CuNfYW-3*TryA?cM z{9481yPpXU@#~J@@$su2#lss1kNj67@$l{=Jl_9W#pBkWW8hKzeSAE8XBZy$ejUQ& zUq_nj^Odp-#UuN*f@9${aYj+bM_J* zFCJ0x7;`2D9uEZKI@;!wiqhKVx|G`%J;(@xv+}TUrT^ z`G-4#$3I@_C>}lH;BnSjA&lYYaT5eit8)drvbwx*ZOa#|}l|VLwfH zxDGKqOrJ75x_qk2V~12c)}1Ci1{~@L9xuJz;r)v_HXglRj>Kc)ZoQ1$9@n)H9{z(2kHk+H9%pwcc(@O$c)Zy{cuYUo5j_0MJBr7g zICvy4kHjPIbHd~4gDM^ux5U6>gcKhS=TFLddmv06I}nA3`6nii9bkC;w3Fd+Y^N%Z z9Z>Oj;U^}K9q0%i#eeN69s}awar&i5JhDGy>+Jy*k4`_uz+>>y_;}?0$nbE6;o<)( zTL1AQ;otp@EH45hx8vWbrg@r0de#nFGlG|2U<{A76&>cxijI{^JzkQMO&te>5ukk46QL`?m-6A5{N{+qQQ||M6l+ z@mLZEkKKQX(tmuypxB~{_%yP|JbhRKens< z% zqnYp+ykF6Oe5~j{K34GPxj(4?p!!GX_IF7C@j{3BM;tsVpO4ai>|p$3e^CF?90QMI zhtHw^IIi%IFg*PGqV*rg8UNU)=s&h8`j2fY|JWDQe>5}xv9Ck=kLNpzhb|5tZ#);J z|7c|VV_#7JaXbbdUmiM#{^JLQf9wt6@#UUq{l^c4$L2kX{$s16|JbVF@%Eme{)6ft z@s~Xv(tkYH;r-lYar7U{qVylz8UNTD)PMXCOaE~Y@Srmcf8r}fy~a*XHd@7qH{9OX zpQ!C}?)!Vs^M39*-w)b%^WsKc+)EGoyIhaqW_+m_Y;CMjao|L~!h?1Y_PI>jwTr{3Z-zZ&D$U*z=n9OCqM9xeFwQ`w1rY7-AFHHyC31b%dD)Ej@h6%wQ7BVTD87IwQ7C4YpK2`r5N=c|MU6lThckAJ{W6dti&6`OH5Zvd?{9m5#?1H zqRYFUmG{HZ80EQQl@}b3AL1)C9$PkQ-JQsvO}hBc5ymFb^*@*zjo-gEtN6XOS;g;_ z%|z3!;WT9_G_CzpOw=OzTd=-G$af@Kk1ADErr6ptRpPBrc&;kAry!Yc|-Xif_z7&4??!wde zxi>|eyg%?NoxE2u4;Ra`7-vGy3_^S?K}WrG@5|Vbxj@{5X|cj4*GSynHaQ%x!uBa= ztC%C#j8~J?{MjzC$t51-eT9|0&$WuHbUawZGNKgEMjStt4wwe4n72fqm_n4J>H=R+DZZm%vkW{W*z0gE8GD88~HlN?qs3| zetX-J=l0`$op9l>{Q$1+N`!`Xfq|{bwOhW2A_b& zWNr@kJ`HP1ce;VOcMq_Z!xU)SgvF^)!0b4-iQJW11xpLYkn!}xR)y3y71eg~r- zqpoZy8s+Oe3mz3XyJsmh{{Y-NjR9GT4+MsmRxQlsx%KsE{E}G%Lwe3tLgonvSJ}u+ zIGqYAMYn()>Ce}eQ>2?L7U|{yM!Kc7RWKA7~sbLT% zasNgR^Z1hg48PJ)X*Vv3;8(AT=j2z2R2u4n7Jt8kv8Ib(P*_u`nK7&Hc=1H`eu|V| z*1(nS!Hc=QxiaAA8vodIY3k$5S{;ohaasVcd)oXmz522A{Y_fUlWLZ7-a9Xil5%S> z+JRp%=SV-UY(tMLjmCQq+WsdF*HDlZyqO^#O-)e7K)#U$2f4AOCc)j+d*m#8yXf6E z`AL+7NAu#-<~39Wpr1qNKdumQ-q>jM>kzl3KkAgEm+&UF9MGFl5rG~;#JGHW+zvl? zfw)~Ro&&e{C!8C%f3Jwdt@IaN2)9+8&V$?4@BAlldvYz}w!Jh4Zi9X2z%3%LyBZhq zJMxnAz0WF#ZNn@+_ZrWA{&7a>S0B^x0J7|6mHS3sbQpN%B~?KA<-73yf~)`lV7dlR zrpZ%d*&A*0dL)gaVfqt$wUH76^^Ux+zqO?nHiYcSeyiK$HAxg`B0L=4ed$hdPKKJ6 z;kVFky)fE6dsN!@|2>lS{b%CQ{<~HGCE82NFNpTFyH(ozN726f*LbwQ{!gG?_g|pB ze#r&Ve)e;f_UW%j(*Bp$c(gbDLwS11YGxLxZ@FQ{FoK2h6s%im(R>t5nSskM1_O{iVMGS;s5Uv@A%G;&3Yvs$a+m-ucjQ%?KENfTkkMY{o z>D=x54ZPvH$1aFB+`Ch4S>y6Z-f%iD(S;xuQI^(o@rG@`k+-=QUl9E(cc}DpQS?_f z$D{w5H~x$Czh81e^fxuC^pF2*B>l%v#iReTzyBBM|8da;(LXRq|I(Kt>0fm`9{r|{ z(0|@JEvj?+F`Cm^(y&GC=5*mXD1UIfO8I9m#-7t3pQJe*n$MG-hWQ+$LH5^`2E~}g z7v7H93ooc0Z+xt_qf4B2l>HFZjTzdF1WoPEV!Wd2DYm0o$x~J_VzyBq4vIu?RAZg*WOqE*0J_R=95s{jJf`@kNMsv zx~SbNsoev)GGq~cSDLhbzOv0ORUWEx&!IpFapa#ZLHXzTHu)ps8f=TLr8$=@_~#m( z-YYb+DdTO1sGpTApz(ndjSrTr)M=bSKq{gXJbFdt2wpvs-pE5l9(p0?`(}2xn^%*x3F`d=g=91H z%{SkC^UZH(z9|vvF-rz7W>$BOuI2^nJhM<*X?B(_5r*XKAl&@@g7>W*@pykvG6CVS z=)>2lRP|2~jE6bP@l^hV4VVg%B*)YGvnyYh=N zIlW`y>jDg=D)>)f#M;$z?Z>NWULUAfudMj!YouawSvJ<5Lu(gk zHp=(V4(UzWypU#gmz*-YQ@da&;?7RR|B-C*WBu8y(0nnyL7lgxP_6nKy;7(KGrCNH zU%T$qR_(JI#Y{Q3hGxo91!u|``Wj}SsVM^kk(1=4J0@yf`$?9Z0%NN60dPgt9#t>1 zd$w9EnI+eHS*X~E#xc1C7{_>BKMu14j*Og1DMZ&jHV`$0W>QLL$;jaxV?Z_Z z@65J4!GRc0mE~(KtuU!145dsS2J=Z68mDM-N#1rLqkInM7^TzaTXaR0Z8-HPT^^1I zJ$_i|yAkgQPO+b}V0s|TOXEt>l51HVYLn(e+&edd)tqtSeMfHNMlt!6i7Bofytq(b z$yEIhjv@G_YM+TQ)(%m%*VosIAK}AA%rXx?J;acrXR5Bp6jVn0cT80$_#08qopC59 zV|{exlyUjfG8p?iD*B$lRPb?mXYu-#T_{vh)j1Tj%%;<-e4WU9v)v})6VTX3@6}IU z_Cxtb_Xu@~<_zBPg#Bq2pyR2KL*ljSv9AP5EDy#_YOrzHdVU2g> zV9*FK=BVCasG2cr3CuQ29OSgofh>I#9T!@**a6_-Flm-I_qjq_}=KAuO?>INcOT2vwHibNM^-3Dz&kW zGRC$JO|al(C1cba#}Rft=U50MtgAG_+QA6hP)|k}>qslnoM$!X@ezGft?QR{VXO=N zChMvt=0!C0?5(nKeRJ9G=U}4G!|_bqY5f?ItW4CzYwJRisc9fcrAgc-X2?D=$Dh3j z9xW?4Y@O%N{&lVRwj5eTU$~ZNPMhhrX$B%6Xc|wtAwS2`jrM2wmbH^xmJ6lRq8b_p zlYuW2q#?5dM9*kmyyWqjs*doq{TyRw)EHlUjxB#Kk0xI`f*k{j`%2}fxOV%} zuymIP+9@{VP=C^ps-b@DKWGRUUpUanh~ot4QgEw}cEOo)J9QJMIn6QB*S=Kpy;#(f zv?x0|VD?>n6g44*Ft}7i({y)z=B%$ROh1Lp zR?NQ0GRBW!3M%9rQ)Sh%ajN#ywF-eH^|G!#4eFh93svtK93DiM{ZDX)nYu#DohJAF z5ZzX+H5DAS+~Cj7S_86X@`7rh)}mc}G2g27X!N{)zwhYc_tcI|c*@$*qv=N?OHT7G z(Lb?MUk3Rf`J%`Lb^`MdQ}t1bY^x{x(SCU+MG7k;CKcxLdj@yP=|6{dirLv$8CbTo zjDbQ;aTNM|XrU1K^=*j!(iwXOhzmwn3gMYpaxwK{JtZ&JGg|-TDXRe&wihSJz9d>@ zT|cwJKST|Eq^BBk$bt9BI2vF#Rk)jC@f7GOVGsO< z!F?3}oJRLi=;=NRq#faQ8%E_L#<^F5k4#)G5sG{FHo`{?t0VJ~uB$`&h<0^MeB_%| zQtdsKhvy@^RyD>)UR)K1j}#mwe5CtP1s|EeO6DWIdnJO8^jLL8_(;vl1oDycl~;g| zv|jo9;UiD1h?|cvHztOUq_6lb`N)k&e%E~Df1gb*AGvHvG#@cM8xtQnbi*H(kIY>v z@{za3N9QB;-4o77R&;MRA6fSASB{U=9!?-1`EqG2e8i%QnU6S^#>_|VUn=vFubxgg zA5kq$1Rr_q`h@Y3N1jP8A7P$pjE}r^UBdWC*>%n2Bc;^|<|CUwOD-ST{B$%va@V!7 z@{!X*viQh>+~)9+gYG2okp-cJCWDW(cgMy@-ngbYeB{liegi&||5P&g$hX3k;v*S#?>)`rBjkXJklMX8n2!|Eo9ub#Xoysel}IwCBPeO059I2Q2K+GDAzlVL z);sKNq#wudHzPvm$3RD9`f;@b$pRiz8@-X-L4f$9B`G*kwZ0{CwFhU(*dw4uJPQFe zV$daDQRu!~O=^6DoHan&QD=6iE+ZQc>Hb5BcDUe)65+f97Oo=nFmx872O9ct+3^k* zjjCVtaN)Kk7$=?%iYDJui-Y-+^BaWrgxAqRW z#zp3?I_2DzF3nwa^4!&-d@4~5g!_hk2=Xh_{2VM-bacJE&(a0_Q{6tC(6V4cgNc_6 z690SPvJpmAD_|65QD0E-R83jb?UETg$fKcBM3tkDln8@#89^q%0Qf3`*sv+S)byB@8k5)Ig1Z1GM+yg_@qP~(ZB1N7KU^8KicSsk@vYj@VDJ|uW2&F6#7j3-M!`#x zTg##Si+rgJ)}ad<_!8+v5h@LivkCvPND~GpVeJ$rP1O}4K>C_=?WfqL{5I4~J|+NZ zp)OBGM@6v*7I}-pML^O73~Nk(9lNEe(a+}<4bczf8hc5X5b+<&qFpkKU?UKp5+fWd z;ArhNj1wnhacuZ&@MTK|6+ri9sQQ7e-Ww9JQnd;FuT+J+UBk)gKtSXc#-j3ecak z=`d+ZO!wr1XfYk<@`+07+!{RJIaJeK>ga);KDb3)76b=&1A^8D_aT32Xv?2yTl8XE z^wHX~<1w*<)FPJ$9o7K4ICz;Z4kCw)A)?NHEI4Ew5U@HJji9AL&_Qtu+D0xOp)x5R zix$=V_Eppt_aHko%xjEWhX}sy(Xcv}z0b6U0T!=z$3LpnE{v`L0i)|uCLLXO5PpI) zaRWag4;-Nn*pD`$S;yu_XjY8w=3xXT=x!d_rF1vDo34%F^TTVSzO^5tDZ_XKBcKHW zMwYOStI-AHi`MCwK67-fl96nTfstIclKfY!Mup)RMI`Ht6p<8BM1sI03?hlbM^Z;M zn~w~hCxeQDmm1+Co##d7BbVof;Uo2P8{;GU=gQS)m51jeug;CkN1mD+z(;mcKC)B6 zM;@FT%tvxsHkXfdn44rivc;MhJ~GRiR6f$$+C)CGJiW<$2{wqOzWX(g-_(;KJ zRYX2=4gIcZeB`=^lE6nke(;L$k+KJ4=Oe%TpQ<^0L`N-E(k&krM zEBVM1d9m;jvv9=Z9G7KsK9pl}mgkzB!?X>o9aMS(Z#ytpn*B(#7Kl*uw;N?1-eg2~E+znFECAhsz|xmr8SxbYV^Q^I`L z)9jpynUFy|vzyaR;@zXj9V;L^@6TQcmDAL+n)=h~n=UA-8+o^>8-fPa&ez*y& zw|+TkuPCyyagkwlz-+r)ly|m2y8}EvFwHtul+_rwte;I$)(5qyd|IhKFa=i{`j9VR z$>P)4m=>b=lg^>|7WhPsmw~WMWtvIwV0s>E-s21=;h`dvP|jnD9%+p1PWbr9nIeym z2@}JQkDn(;9v@#!{td^+_{qQh_&EGPyyIj20|^)(4?GZUeE5HgG(NgFE^_erI5(-8 z<73ODCXA1wpPD{CBJuTO^nB(kg$llYG4y<9B`+NCo^|1}@G!A=wkOQe+em(``iU@H zjQzhe=a<%6wPm=Byv3I-GjQnMT6h94mAC~L4v;tl3@ix!IknUr)ex>-EUj zKlEoWhhHE#GKiZxjj-mCk@o$K#;qd!$qjk`IWb^GXLj}o4DHFj{s`T$e4BQL1NQ`Mz60&j$R`Py!8G6whcuomkri&Bzg@JcTl|I-Yivu z+j~hRSb<=i*Im_GrFusTV!Q8cL2l@~T?NZ3Ybqzy`#RpPf^V!^b|+qE<@Zn-41w1n$T#t$U^*KGIRkT&DJmFFtcl##($?l>syka9blG2rYt#Jc6ox*JOIE*Vz!r2mc z*)@1jTt2>c(@8x2{0Ri;E2T4hNXO0O43c3?ql7psm=Jdxhurm(&<^XUSgCv-cKiG1 zbfsb{*jqjk)+oggm6Zo-qp^h`$ zrC#(mSS|YAK`~z-wm~i9DvwGJCILZ*V65gGS%1Z4B>s4g!Q>vJ#yfTj{w`tlxfe{m zJj!L|l6weNKj3};S`15#btpMXJqIR;5o&U6%ikj|N=^7{?~-frV8dQ%Rr*7Bx+=}3 ztI}h^2OFZq6QlEDh$lAN!p0M;$3%-K=8aLr6Fcq>6Hny+nZy&9*NE|iaZF%5@pFB% zHynV!^1S#1p#E zVy|30vElCDQ9SYGKNA>Fe0*1;;)(vN|B&&-M6B?jwc2T z7vqUJoth9&Tr9zOVnC;Wc;ba_amEwE54`P%*$_%(+$@C>r#WHPB;IX>x!yS4*dJ#7 z(%J0emyT=jHJ9-NUigw_*4A>)lFh8GR>M23+gaPCDcf5RID*;zIcE5U65%vCXbWGp z#DDkqd%fRKhfIXpiCxbY_!;-d68kC^F;D*(*w>Na>(f#(Z*VJfLi=*2@0<3a4Brolc-NkrJ^n_Af0Iw`c0_$eJz5}DG8 zihScJRpj?7F`#5g%6g*H4rS;A?67s22N)%ZGq>nv6XIuNw(ymN2T)4c}lIKniDQ~XF&n| z_f^%vY{wY!_SJ)YzOlxVW2@MtvbHU#*u1b6S8!?}RA&6ZY4B-m`&xkW6-rK{Pvvu{ zPbJqX*!Ij0zE?ANeRd4iXSsAg^_CkXrXzD8OkePJ1GejjkZh0O?wmc8c z@^vF$511AB_06QTqAaomTmhx=_8PMg=Jm>BSQCVQ1`-28=yn1AXd$r6B3Ji9ly`v_ zz!XMT$y=m{nsD{k!|a@v#n%qa!HAG^^dNb61P{iP@Tbru43*OQ1+*-~^e^_VJ-n$Z z+vm|oS}7+jO)0M-NDD13SSXK{7HMcgPav`QDAa)piuQ656>Vax@~9@2CTF64bH^E+ z>lN=OYDGkRLD3m#Dbn%~5D*G=EI25s!QuIp0@`!e+WVa31QKYVF#d7*zLMmev)5XC zt+m%)>v#6qhi>b)+Z8=JuDqX{UjP>3SiWPSCJ3z$U!IIbVnz>z2miB#_ zl%7Sd#*s-$8O=IlxDENHZvN*Sbsah3=KCvjNLdn?Q!0k|+&14;&k^9QsaoD?G-~+LE zN6G@ZGWW=Gx{;u(LT<=1sFkJA+z3+G{=xtqO#5uR3ZcvzwB37{*&m9 zYr7_s{HX?M11oPdi7eGQ3=reYjuQF?XI*uX$`ek%GV9F{>FUjJT~DkGqOdYB=G0lw z7=63vRX|Bl_XijWP!pJ-N5w{NirSbnC9zD4sk6~BNp0jd8*R_xi(8GHLSNlOrLUf) zl6!g@oLh|CcK16&FmY~ym{V)Cbz+$SN^Gdi&4M!FCt4KL+P6UjYuJEd$$Dh9Z50mF z>l;v$^PtgYeOCW0#*Q}^iHeFv73Bm~k*`8}-+&o+VAaq>t=d2m?|GhZ^1D=*ZG~!~ zfmRDm#++SceS~VEiOAdnGH<5E0=}Z#;A|vstu@!WKS`!t<{P~RhJs;_D6UW~B#PBS zkY!iCYT=~bUv{V#8az=~NKU-Xc2Mv((H|K5YA78Ll!7d`m965*Ko=r_1z*5!N?K^b zRp)dcLCbKe`U_Y9)Pc%Dl}6=ZPXcb4nB`%BZY^m;FZUlB`m9+=Sx~rDwJ>&sj-0WT z7s1Z>ZYE>eVX_F=OL}~KqE!o5l10G2qiWoe?|%7`FIc|bvyL#ub5D&jLMdj1&?l^Y z=|hW(JSZya#Ik@{HXw$CZ?`svM$=|@={T}tnm890F09;?RDpvruP0q?U(ptGiRY0A z^TKLe-q{sCpY@=8YI`VUZBLWQPB4JtfZr~u5b&yvu31S?s*()>AqI+}&uY4g17ErS z<0Sv3+CB$1$PCqsRl<1QV#@iaWIVGu4LF{+csI-;)vqh&L^U1LELW&za|(sw%sL9x z_)cxxQu-d|NIIPLFq~ucPI2#_{kId1UgpkSUv#dW6kySLB=$q+Y(!@^;+*!CyLP{* zN&8owJoQ^W_1i_l^yNPRB=VXqI()ypZw|6}X;d}fqJDJD&q)9Q{ofsm{^#q0>)$&J zA!w9DgG|$yD%`VCctJg7GBs2^b>s!mQxx4}5S1kz_J7zXljW3DGA^jY#17+Dq@nlo zg}g5G6P&Fm8p2|=`_g1cR&b`61zz>%7X!@{gM-c#z=wKQFN1Tdka}%*TeQT#2s34K z+uy+UWc1?l6i*atcLUw@Q=e3vxtGnJrnW}mxwam^@H}Uz%xPv^a`>9ofS|u6shDn1 z#d3FntKXGaHh{YNWa#r&*0zMYdPHesm?}f&>81M!T&s~RL)_(V8`#Nb?lq=;YpwU? zOl>PKVS*-TK5nm9;mgT!!6RKowbUzd{oh2$KTMc^w5cGqhD>-gZtRB-+hiM>m>1uX z(Hw;4E40nUJH2h9Y}>-bHms|J#9I|U3)GAowxWe+gks@0?+k8XZ?v3Eu?AVLlt$0r0D z5Ppt{izZoYKvap1K88kM6VRh48lRklzg(6pQnR*;s;9=XuJ|Gao#Hzr3oH8sz?_(6 zjS)~+ZH>T{PljeD*3iMI&2A>vtb$z9VQ@ojaEjCh4ns3h3UBM=+bS;va*8}&(`SO$ zz_;19sjlr5(B_9+Z<`*rZR&1x3NH5sokERHanjEz_!mT{P@z+t_j8Kx;iS|f=wln9 zF67^zbJ;@vD5HN`LTjnx+%w)QbfizA zRn;=ORdHT>ULq8ERpjSB446lE3IWqv7W(XTEW2y7lNoP#)Pgql;FvJfmgayE$s zSgpck{-gm^fhuO%t3LU3ARl>T>j@oJ8D>#DDZmuuF^tLC5^oem^K;0+qFMToH-`Eh z1}B0_G7*Nh_aJ-|XPwt~cxRn(l6@|sc_rQ77fmTPf^QsFEIo8=&LPI$BqjxHpne5Y z`(s&jgL>SgH$dVEkswEDUC7!R6e87-_rQ;qu(%>4WVwUb>vy($Gr~Un6&YdRUZX2B z>*}EMBx_&{_2j-YT-!GDqU8BA=Q!^k{FnF83#uEit6osOn6|LjSsvFpTg8s+LhX30 zk8!X}c+EC%znlJr{k8a#r0tb?w?7}k?Mx5?oUuc;pmr4g_B z96omPnosZp!kvoj?a$`yW?r0x4&^F=D7RQm+^z%fEa9sM(7@oO-ZSvc6~?lVaM@dA zk{YP9$?Kh)M1KpZ*!gu#@L{f#`rO>>PA2|3CRi1cC;FUO)fP<S`Biqn30|5b3>e`N;8 zY4>J^jMJ{jyaAl{p-d5{t+^vOPMez<1g9OGDZyz!eum(*E1r?yw2I7Ijnhv1@|NSY z14aeJX(L8;9j84!vYT;Qu}oJ)ZL5I7TnQw zoVKO@|8ZKb$z5??A>gzd2i;7ZRtp1PPzjqf-dw5t#IPJY7B{=PfC%Xfu-5LFx z!D)5T*TiY(|Lxj1?Te|QS`-i$S;lHdA+kxZ?JE(ggYls^C+E@?`AgE**cc!n1Zd2f-Gj^4CgWkH%;eHo;R7032kC4bit>K83-a;=TSr)V>?DX}uCQulef<%IQMuUzuC|$z zISO*2{R|*<9}+6UrUOaSKG-xKn=+(ne6{lH`tPCXUQXdC$jie$Dr=L{DxL-s@Z02q zs>;aCOfhngX5j7{56pg`;p#ecKHUW!YWj9nhZ7fHro$<}hN{C84-p-Xocr6;p(QD? z2zBUMkhFn){zM%a9N{O=hYNQ{#*vrr9pW{|$eMTvFy=^iidE3CVK>#VxmDD#;UveV zILzmXV#`3WWuVxy6v!DGNzpt)tDmov3?grhv zV%o+lW$vAL&%u$e^&V}(qwDC=6ns(NqgxH@N0jOv)=egdwGlTwL);r0m>bE|q30UO zLt-xTCifeYLGty6O{I?fL}->CxS6!Y0$+>1o>7oc?`BdBk&sZgGO6a?&~3v>?DkO3 zaNc3K1DphU2_Y2&YHT`>0o6PXL*cpO3K{MwQ%9-_v_+X;|O*7elM4)PoXRpeyWl#~FxbcT-z5rUz~&ZE-~JZP6DWg)As0 zF?=e7^@2I{YhTd!MAWzaN2>3&0rb67@-OdRjQJ*RGiVm8WgQqomk57ad>eWO`sFa5 zcR%tGlrs4#7{q6W*xs22?URGGC$<;o9bWSRF)h^0jcV3~iA_!i<{}K_LmDM+I*C)$ z4xCW3$b`c3$!FqW=Dq6s5KsHONpnv0rbsh)!Hj$L7i}_eZJ^dCaBplA+3CMzY34S( z!`DOSj7U$b`PKEL+yX)Aw2spylIP9oW90)_*If!WXGeLwt}=IQhBZd#)Q^>?7uU41 zjQ+Hd(~(`kSjXabrMrN|^``d|SGYTVK`%3`0AH%&EyKMRsd>%M38;rAJX2@s#d0#? zwdXQkvlmXmaR)esqv#-%v#qq8#a#n0MV1dB{td#%FzYw7uEjVMOV7Sv>bS3@(GyP| za>Xsc_kjeB?&&y0amYgo)v7WzNAX~GKls?xHT&2iHeZLTeZiH8hICYZA2}b-1`fi4=&N534 zD{LKkjGdFABfuq4Q^BOE1$nG5BV+gZ(NL-K{(7aGrvVv=b6N>K_javJ=9zt&5%41S zS~%ltCgmV)?aC7<>kjvPP1MpE{f zEk?)e6rT`Y{@mhCLira7BD}IuMs5ig@nrML47z#caq>*>fuLz~avY}3bN>N(vZLfY zI5Uqo=c`MgT~RAZ#K%Yy@y2UVI~a@9G)N7nAaf)}!e@`tB%JI+_xu(N1c6_r+Lie6 z&B=dz{MZ{O4!A^y4;DW@hzlA&{utMN@gpInOYvi1T=4jz^mgah34!Cs(bypIqbc^v z`0;6Mm*R&jHq`i0@XoD^AL}zhh#$`W!Q#il{#}V5Q!;|bj}iTEAbyVPRc!IfV8&kCzS6*>r#@`kJzuT=jm};7{+_#UA%S_p02E$=%Z!Y|7bcBqrbA)@~ zlU>d_!lU5?x$rf-kQ;LmPHdQiK)|0!51JuVyorNonr)5>t_;5$S0*`>(5A+%IsPk8 z(&&6OO{1r3;dqmK!aO0p&d(5z=ZWcc{xmEm^NSH7HDhVGkKBV^gE)1&r-bDORG?^8 zE#4l?8BTieU^MN)NTCO*Vh`A8p$7-yLTI0bzqwuRJ!d;1Ff`yQ4&bo;gHB%8-l$^i z%N1zV0T1>@HtkGR1;Q*6ey7q2KV5IiuRe;jSbr3-)7SS9c1czR&D!s>}3VK_%Fxp%NGc33Q?aYc+xdC(}sh-_%e$uO7uq z?EL-vM6;a6U$gxvaf?9wL?k7CEK(poQzZUJN_?t7{E5+0;#HLR*vp7-97c!_6o|hp z5ML$|j}VEEp~TzcDe+NbrNmR-$D7lc+uY@51Sy-yJhJb|*Bv<4s~Q>m1q>zVvHwMf z`ucc(6ON-|ycti$PzhqlL@}1d3u3GpMod@^zc*;ngzp{lGvOe?gd5Gip+5h9xiBl_ ztNq9B!d1Spn-uS_nU$$Tq!>XYYXlYPu?Rt=bG-zS`VAwRIo6A6CNW8>nVzf2*wr$2 zvON$t;LBcQ>{{nZd1X;v@Aaa*q6A)Yk=H9CuMblRugYHH;91pt<>0w!qJzf_gU7>4 z`Rqxd@j>A8zu}b6YvE#i5cw2PK8Zq~9!{3_Y5aSnPhpJxH6`iOg*YJ|^;h_hi#e3v zp*SJ_3H(w;esytT{7WJH*2anb+IuiSJTe6e3ae{~V=s>LH{TCK3F}gU^|~<1dQF(X z+AOl3MOjbqvYruuH4~8a$QlxlqT~FXc|K)*HkPu^5m;x6tPjTutd9;Q&b%FdZ>T`l z-yZNevnzc2R9q9>Vo9>iRiQI)&DYC(F==HRf=#aQ9z-;y&#C|WT_V+@SbwSZ4u;Wxh4yhE(NRPAs5~hJb`$vf)Zb= z5QrCw#2=uT z{u1F&3(0Ct=3X#|CFO`PyOyYpwxLj3C5KTQ z&Sj`EAp~bc2(DU=S5XL#*E|cqJTTDCLpPw(Yx|f+^nN}4E%@kQB5Rl+Ypx*cXi-*A zj3DdxgNX%OVyFfA45CTwdNJ`=zuis7_@>MvD93K-nJ|c2lgkSK&|Ksl31EQ+7 z{ycb$iJnnIF%?Niq0l~(EcmdR38v1}jA4peYS$i;l_pmQHLrBBfpj=cYGs!1GRxG; zKI?)#5HWcu0%|5k4=c+-vJf9Ut}=J6z4w_1oS8wO@4NT=L!8%HYwh1&d+qf*XYaN9 z5RIc}!Ta4>gudqy(K`tAaWs|&&KoXH%_a7VsbaS6%)*`7b_~h3GwT%moTz2e>% z-)w5dcL-mS;yW^`uN&i6hc8oorI(q$x^8SVETH;Mb`vN0xfN|E`7?J&P4dP~WFWrZ z&2u1T4<@Q#>qb>iWU9xCsxw5@mj@Hovz@9N%G;_wEvfqOMlzGX)$Hjf*HG0$Gex=j zpH%hHe=^l`n;FVW1`|Jd6y8(5v$$N6@08xritkMQ#W&ws7EUZ!siKX4kXbNJv|yo% zHQo_U8b4D-qfnX0^jg1xAUj^=iEK0#K86Xu+)RaEXlBBPio)Nb!uv4c%ROemRaAIt zlc$H448`NGt{CP_XM*x<_{uWcw4Z@cXpRa@}04_yN3EAqc^^jx!yH)PX&3x{_#L!(xD(~ zQg#zHDWi#*^l=a~DS8k==ItN~dt8`1GU4l6@ty1-3BJQqT~9Jy{h6*_qON37*Peky zmo`WoJf9wHJ9u)&c;h=K2M|4SrstzZs^`5%rpF%0^h~09ssd@7a7~x1P4}*A#dmfG zdf_{1R3Gvw691UKcu`-bsPFUuqAx8_Y}Zc*B;rwmR8aVQEphDI13kO%;&nvzaHhJb zfvV1LV5)nFs%KHvR{})UPe`cVD5+Yvmc*k20iMphh^pQdKvmzzR40n6*90)t=dL5p zoD(22i{DD!`OZTLzW7e#ca&MIOK8P+wyz;pjcH}oss6;OVa%#eE>WxAzr?Hx5v`g+ zt-7oft(sUW$#*{cZwbEhD6{IB1b=*I0<|YYN$nZN>=`85^SF}PbFe?@pb1KLKf%Kn z^WPx7f2Y#3_Zz~9@F*s{pu9-jS5fCIM9#e(0!Q7wHK($S1vG>&2pwv7e+kej9lb9V~QpD&WU*6eCOHjz;~`) zDaChwUrqSVopMjU6G=^sl2a2K&r=iY&Wrsinz*4K;X4kQ$amf-lH@zTj&8+wMmH$h z@}1W&DBSo?4!o1#JJmAJ9{gKhV(XVOYO9gink3q~UB+y!?@M}ct&H~IV2_(hzr058 zPqq>sPK6I35Ly5-G>*__m6TV-P??m@^@tq&;ziPfSrrtAy38D54WA=S_j@ozZoEX&#+{mFTJ!%R%U81m^CoZ_ zQ@w&LRRRDajkbURJU$XFvENNHPh1s=&zWZ7u?S|3mM)RB8}60k%HJ<}D{e-g3Chxd zTrG?sOZ_4Uox;=Rc%cU9llci=Sfs*drpdf;k2ZEf7NhNpD>m=fx>jta zCRxcA{118hhIZO5@!Wp}FxWI&FX6ii@wxv@lTc?QMGLT4f>GFlC;s6f zddXl!HlE0rid*2Jy`SJAcf2+IBz#^+9$jj_$q&A{ZfNx~m^AYhC!{u%Xx!Fpl8G5T(` z!BGIMG-`(6;?Qz?>{6Mm<~^K5{)Dr+)4^r^*ON zXmCC0x|6(Z8oeABwe!3HQ91ewJxpG&k3FlmrOUHQpNrwS{Jnfa@w`Yrx)I!^jLaLy z$=Po&pr%B5Qd|_z-Csp6O)q2Tzhfit5O~~*!8U|h^^JV7UdA?s*$Fli@F(_ZfR|Eo<|<+3u&JedHesS($ytr(~y+ zitDAfb81r<*UQwH%FPM(8QzF)kCMn2B*{;^N!dnmF@TVxm5$5I4FCM|5O6Mvn1XKC>ypd|PId#yldk zDcXE}X48CgaAwm&^YGdj+?8rbIJ6+ZU^B^+5{~nI_SO#ImHNioQ1}~H%Ndk=^|*h< z(yY{T`n?9_VR#uZcdG&XOk5Ari0eTbaSm(NgA0b~mYgDMLM)fH=0x3+%eeXj4+1Vo zAC2*qoYD!-TEXQ`C95^|pzdCYMv!`pQ?-rCEU`UxE)`3SjFnPBx><2jG-8~@hpt%S_f`Cr(;Me1aCZTiOZd! z8RCrI0pM`X9Rn)19V^HgB;Xf6ou%=eE~&Z~wej&P+)k1>IbPM?Sz_JU*0Y3DS(0GX zXpPZh)vlJ4lSPI&!B4ZNhahZjP(e>1jNo6HHRUZ3jiV2GeBTO1_}vP7RHMTo_Mm$U zWz05%Fjj>two1+S@VNMkoi8#OASJCdv~DH9DW|POF}r{B+C;2`Lrvp_p1g3knvdQG z>4?q<@Yqb7YE;;Q_>@NOg|#@F*&sw(GGw?CNN=a31%9@(=0_}8Vbe#!ms;{=luZ~Hdut4gtRoo zUTy5UwFYCC`C|p192BR^4<@-ox)ypfBdS9I`;;9e4xthoK^8{d47e&~y8AjR-jA5| zL`?NAB4#oK@#(ZkQU;IHYvj$yMsP*cOk5Eal}A@Z?WEh9(pqn8@{>!gEc)Nfr8d_% zv!)R}ow=0F_33at(l)qf+wNqV@OC_J`R4#Rc zvI}ylZ-1B0r9S?-OLD2%KmS|jQoqWtI+xld|I>1*hrLLEEDwjI*1IDhX?Om5k zy-p;4y{(hE)Hot$`8MfXsyfmmm-_YtKe?2T`+zWS*J0I8dLwWeFFfQ5BIH)uQ1Mn; znyCK}1;;EGM`B-sRN6?YN6p=G|VrDj6)s#USJc;3e}k z@djY$EZ+M=CoHD>7kS|XbR;BwTX5mGa34rrOJ7BhEsMNkm;mqH_bY-=AJ&tKW(KuE zxGM~uAfa+WEEdvI8EqlGOxAnVCZTYy(j+Kis|~g>JQh~yX26@y2XIGCqIDo-9EG)g zlX8-<0OtV%e4d26DC{j4TUaF;%a<{K*(d5Hd%nre47OvkF!n55#|uruvDEk|a|k-9 zO{;*{=@3+``6I^Z6sj=MPbQ9Jr_=ap)HDO9{6rb^Isc&%zC9wtB zrQE6l%K-=c&lh52rnK5H(O!9@<+Aq6i$DRwxCqQpmAH?WFHQ`j2`c9I_77Wdox;Ah zyHVSDeS@aO+x3Dj%HtC*R~NR=ew%!H(4E}_r+U&hTTUn|dPl*-M zswKazAb~CL&fSbrWw9q1CrtEEY#~e2NPef|LX~kd+Fh}KNkx#pId~MEIly9)$+1SyWSKdMw-Jw6tRXA|UOPfx{DwT}5W*}G zL_By+xg+%ZDoCz_t%Lb^7=Y#wNRm|bI<8lMrWWNXV1fu{v&IAsjj^Lij3u><{MOvC zpTIyndBtN{%Fh2uh?+i8HDYWqm)sMHA0UjxAl2m^kJMR(oQrCLKK^NxLd!uNA4$ksH(9|C1c6@mX2I>i;V z1O2C}5dE#25&dFPB?XZ0v6Ud7>4y9ik3CY7{`>cTbgln7-gf!#W3$@jzh%dz{5SmT zKhuAsjw6DpZ?*B?;&-mvf5&{%QU5)%+0%bVJ>4b#d$jT2+<&(nL-g&NJp9+R#h?E& z{M`-{UI_4iu}g%%&IIWggTILvrp*Ls)1Vg?k{n0iAvH3U(1JNCM`$3pzzQKT4D)_0 zm=Hy$y+gQE!j?jwO|O~na0r!FeI8ajrp5Ro>GhDW>?fNM$lh4INo5-^CpW1+K)+4N z%h6W?9S$iD!UQ-~XAQ5NG-jlhdlkz`RPwfIhj~ta!e}$)B?*P-3plsodPDeW5Xq?c z_-O^^{;}2Z)AG`LC+uC&Q=8>5_epK2!e7*0;}qP}ARI&AxipguHYGTYt8*y45aG z4Yu)i{0vtx!27L)a%kbNQIl++O71znNY64A=o5b6D)4hGpkxKOiqE41S-wmod6t*{ z{q7^r{-x2$t$$}o_wTS{-uky#?B7q#(7#ora~;^Z8fWL)zw+$dNwjkh!^hgVj7qU% zb=EPO+L5#y_mOTyiuhPxIebGl?9#Je1LHID%t7=L(p?qyd!BIbu3$!{i^Sg2zU8mC zyyEr#&W_=A@u*+Ca^`u%EAOZzUT+?44_>hY{NvRx)hAx+H~r(~yf;qni|^2^z+>K* z{FX+RQU=cdkinKco!lWJh3R>OZheV@&U?UTzNm!PCO{M7d6Y@0BlA@;r0=o=YZ6G} zY~K>;aAYplXl3TfMw?M{OOjB4zly^w4QsY1g_$0yA>-D8A~L9a~#ifZ+@JC(_K#2 ziWHqEGoy_(1>iZiky71FN=kK=G{$;vqV|o?#1Fa%pX}ixJ|+o#eqO}zsdwTtoZ>Us z4WB`PPk(~V>2ie(e+m(ifWo^_IrW$yRH`@lM5U|a;X0r`^_==e%`(q`h!bE!jFhJb_r1G49Gt+;1~MoFw$YP8*sE&I!AkOC3y;? zFCj?@^iTxW#V`rvX#_2)YF|5t4fjeM?8BXd{a}T2uzOa8_#gNpGowm|3$9EBbcmPm zSYLH|e>ngW3<|4YgqN6uEExqd^EJi6WQdbK`wIIk=26yXoSfal!hRD|gd5i@T&H5U zH$Pavy<_q4mvX=HaM3f~;^EeE$#}T3+%p~;B;(;LeGn|)@i6!ipYc$(uEX=k>}?&x z>ytx%@w#`mH@p@elEmwgL+!y!+0Q>-$N%CJugW!7iC>y#&Zk#$ zsw!nQB>x{`U6GX4MnV5BD9^R`>Vri&(TvT#@t%vRm`^ZO%p1TC{J^?(xo}wpDMylP z9mum1HTbMRI8H>D`60DR zXgkBf4XEQ&ssHP`-QlDMdKeu0!K?-x0p+U+{mPTM-v##=1YY9B?eBDqe`S~YX0haaFs`^g zeBx)7e?C$CpwIcB;MES#pCdPS46pPezj!_5HQ$IUlEmx!qW0jmtA~HQR!{bc*NRs< z%fBwY*+u+9AMJ~OadWPoe_0Cs@UKOa{4kBbzT9d4_4TGM!e`ixzWCQ;v#*YSJ+ap- z|9X6)A5^BS{LklKcW&$$|5{q;pMO35lr;akuh0ws8dd0xf1T{!KK`|Hf-nBHZFvXy z*Psm@i-$Az`sWi*dW(lqdnMyxuf6Tz6McgG^NFS7ea6GgmpVLuoL$#3yha!J#p}A6 z-tcPNBZ=3Kd)kASrJH}eCOzO2uVh>2_*dTAuH#?7<%|65*E7EPS7E+X`o1H--TZ4w zzIQUTGEgK#Ljya=zaCJw2~s^Y~ZRXUM+_U+~7ilD_cEziyb} zn}5l_km|tN&wcQ(7g}T;;a`*P_rt%&=J?}Z&U!CutoIU@;u$*9Xt5Wg@RLzEXB1{9 z8w9zGY_*g1#sDmNY(?Br==jkXkE-6p5i>2fDSf`erD4rJ3kLWI@w^7QtE&t%)XbQj;1~ zlwPZ)J~FmirJ6`PRn!JX;xS6^XtfW~s#USJTCL(^syvj?mw+9`$IK_Tx1Y(49k9Kl zpd;t5wa>{RAqkKK>-7)#@;LkKz1QQne>-RGwW_sNHM~u4aLmHMRIjnksrBZPALmxi;z!iikrFQUS10rb+g7Qb@2?K}t9Gt2J0{Y(1-ulE%kbY+vqNH> zjIXlHO<{CiqQv_)n75bJWs)TEXj8EmARptpE6#$?)vEyf=Z|_dI4qPXpx<~3=t=m- zaS@|>tkR<#`0Cg&mjv*7*JUH{vIB4#>0J@;IRY3Oj1s$rl?r7EqiG<+B7(-@H*irC z`O#D(QFHVsW;UN*`&(jWCyANOpmV#22}V^c$Ym-1d(G?!r*rGcM-t<+=B98uuV0ui zRiPQFteWN)T@8JyMOWLyh!JsL@OqLv{u1U-F!SLXXMf?l9-iMl?88>SfyKFNxPz}W zB<xG592sh3~K}`a)F?Q#wJ5qF1}#@;(%8gZm@ z5D`eUr$8dHyLJC2m#XkmiX*S}`1*fg?D><>M0>`=dm-2Xb6Ot9)#r7oxx2*K6PBPH z8Q$Hjko6EQxS|oaAOh)#7>M;?txbIH z+o^}{7rgCixXXAOeOnR~-e%1X4sYvjNq$MZef6_|@b;NocNK5HKX6a*_T@eQ`?Pd$!*4^~P+mf5^cw2b59lU*26$;+E;@A1u zb;hEXx5iwZfVo$oUp=~vFTj2LQy1VqKyuoU^M#MQ!)>=u3EU#s_H7Ne|J;XBX*le! z`55uM8H9X>| z(Ji`;F4)WSEncd&>NuTX zgkReq$^`d`GXfCm!5UH?%Ji(XV-l0QBDY#0~6t z#}PDy=R~j)r4J1;UK-j97lQ}P5G>$nGFd!c@sLaZ?bPv`FZ33XP~wclzV?OXr^*Bg z_f`@K=WZiW^rj5q<68<|@Lvn|mn!t`RmRkZ{WX8wa)iUj6v6rF;wi%Xvbd-!xO~L$ z?Ago_T+^unDuSW6l`cvbdQ=BI-yDzrp;yW&Yw4d%??GXDHI(3QXjcI_*{?tL>tx<4 zytXGq6@Ijb@6D$QpB=@kFo~F$=sQpGALl(17Ptyi_Oz)&A2`yLOwlwOUEU;9bsg;3 zO&D4`{xeN4R2pgRKQQS-AGP=o+&04Wg7Wil#FZ&ZtC}ND%&)+r>lVRE#Tomfk9)Yc zy?iRVG_2(SLmmiZFPbWXj$RO4^T<$*{AyPl5IsM|U4cZ36;WqU57X12CxGkXdXTi2k zUVk3i*6#Xq?8#6m#F4E5P>9uALsa3ot!+?ep#sN z1b!K|$%|ip@n|_ZQ;rG?uw?jb%}EOQ?D%Mb4?ZJdfL$Z$yeo)dFUU-4!S;u_ z*?v#7{p9G@rhofgFVhcaED!URA1NZ(X&%A8c^47Pv_!bSlH5Nz+BiVCd3*bR_s$|# zl{=--74P8I)CkYXrnH`H(KE#8!8j#sWFA1;g}`UqGoaaCeuvD~DBOQ!fV185j$48l zoauqE{1{t`TR6PZ9XMF{jW4^}GwtP2$~9bU|Xnon^n*Hj4H;WW7K8 zwQclgzaMW5$bQc;A=_`t#y~XMb7M&Mi|0gwwqHkT==Q7I5XfvF-Vl=g*1a8s{f4Io zWxqc0UEUhs4O`m4_vnDv53l<#-WKfrz5Gw`Jq_z1ndfh5+3VB*e^9TjT*Mvv2$lL*DC&(RVpw|Q<$O&CCFc3DMja&Seh{8BxTxiDMdA&`s%G}I zmK`J4!7U!bf;8qg_@py^=wfxaAz5OQDT>4R432D#CZ%$bk_bV#DjYT@J@kxKU2jk& z%V6O!mq@hMbg3q#%=9Dnu8AvUYA~?4+aejmm1$C{7oHGXKd80#7{|obFcwo4{9R^j z>Z`oO;k{Ld0nA;p`!kmG%b3KCPvB;M@MX;2W*kuE?50|!UTg37?H#UwNj6KAYDE7kN3HZan zV;sd^`-;kDjt?D%1!c3rg79lq?aZDoM?l>=ue$)qzm#CM$+wKlPlDE=kt~BbMKv#vk8_;qxb=nR(@+_GOO^ygf?i zx$Kcs*TmbuH&Pu4Gu@p%H zT5C5<0b`zd9u~S@E~!R3J|qcY)h<6+>Fx(-*9g#?X+LTdpqE4FrGZLd-X$(-{~EF6 zj63v_@7`WRl2`AK=l9QCrNsaPhMzy8iJK=9HTwY9$1cX(z*q>@(rUA2Sg8^^I2+AD3i=gpWTM0>#JfXMEw~*%~i=eCD7pe2lGj$H%b~ z0>sCI77-tB<@v_P|FZbU$HkV`_&CdQxA5^|VMp4M>%<}JImVcR{g53rtp2XYf^YvlG!j3AS?+S&4vTq*W z%+Ye#V9%_^D-?X9MHDGryiUkmSw(g?fT>^(SkdBJ{#dYm+yb793;P)qR^=q|6jn>( zN8j>M&%8(SF$E%@WzPmLpSby6^4T@fzkC?>q}vH;>zq$PRzxz7WgP~2Ma>ZLOv7Gx zEw4?Cd3G(?aawd&)m9~~wW#Z1{jFukMjxT#DeBQu05Q|Dlu}by#$3f&U+Cq0jbU?Q zurxsgTjlI)>X}z!l3FvuoSBJVIHgB0w3(TCVI&m;lGD+#74#(Oi~9&NN-=(RaWc9r zd=7l=O$o)Si{D8siz|Dna)XJY4qFa3_PyySz15Q~E44oyUdC)>!q012b-k9=!KyVe zL6cIG-4BodY0QlzU1lwy($$IxIpw4^aWxn$jqsWHL^A;Jv(e;z`b|6n`}ZGV>jvKe z=+t9NUF}bFZzG@pTZQ{|SbOX>S_Iu{a=K8sN?j{1YPOavqAnbX(E>5Bs%>NytFk}# zc{72JdiRW+wH-i?=r<8LChJ@a7(tF>03>wmXZ#NYD^6~qJTr5)qwhhP_fFrhe!}bf z#ZNr+UAFwL>3hr{czrAPXh-=RZZ)O+Tre~9YNW@ zV1h?&(;P3<{z>NxwOh%@^!z=Qk0}uO6r~0)pY%U^$>)sLr+ixH8$fbi$qS2_>XvT+ zz|F2@zXM?bJKqUg7I0PFO65%@Ni<*@s>BK%;Losa{FRD6q+Z6bqZzEO6>H$b#nd}V z5%1GnR923Ja-1V9Tj>fu-0c1X@c0~8a)RU{QZIm%n7(9;bN>nW_c`KEzhJQDJ;SiJHdd8yK>20|BqpBC#gJJ#CNJB-TJtQ;C?ifPkrt{tGDCR15U}bjvNs z-mj&#Zm0cJK54Eo8)AL+LNo)H72*$QxtEM12;7RiyrLD-E(WBna7WtXf0AHH;nVge zHG=I)*LnZx&k&odr2OYw^YqWN8TSJpr&Ymw=524!Owx3ij~|?JKk)JT(ZS2-lEF(p z(M-^MJa6*7<7YM7d45*9-GiS!l6}|s*|tZ$__+4FcKMl785%#+914=3*~;s?W~+z36HIqa-v|D~i;q8F*RHEcs!nT4vkeuNI$w z2|Ax`qmS#d>Er5Uxh1AZFj=Ql23I@b)sM$uN_-*)e1c)KHx!`LjK#v8HW2P*^6bne zRBpo+=_-h&7VLKcf_^oFOEeRH$@y1U+V}=SiDR4A8leZ-+emHWboj1}udRgs0jlE@ z!+l|D=6I*UdOw%F9!}Ugu*4mF1(&#=lW;&bm((*xdX-xeWlk3>`@1h8E%gnf@$4QZ zN@dK~6l9^Z0}x_Wz{DFWOV)X!FuiUO;$AAL*D&a$+)e@c%u&3-( z&4>o$fOOK5LCoBOcm0 zq``SNj9m#mK5h8=^<5(Z(X&90K8VA*=qd`z({BRs)4jXUaz;+~9xrFinQ{uxP=iAsN5H>P zE-?->e}4xS20PjIJjQQ(s!6Tha|@$=a9Oy9_`&i)A0@w zEiGX9gFba{0k##31+Y~Zmw^2~j8(9)mG3gxz4`k@b5SqP;Y(nHS(bn$2xU#hdbwPb zW~M7t1HtoT%v8sFLp;Vx`sW5IXS#hVwm*U`?M=h@)J~T0qg!Z-1Skphc5lz%>+N!b znr67Q2Nt6Opfz7hq8~G21w-vX1s!|ceiGIc`a~rjzy6RCl7X$m7ftYj#jOj*s0?oS zQzV_63vYoHK}!r&S=^JJ(ldQ8AxK(GIWCtjz>KVTXQjE4BiH;6KJLlDMfSNe(pZ{E zT1+QN$}`CS=PI1>bnI!nMkWl!*aLbu^*#dcDW~)ZvL|~FIcW|JoZM;QOa2oN_m8&aJ74^rFPDLu?*LkHy@=DOhir<5+%M1oc?!rvDLWvS!;$a@p;sH&u4xYwjZ5^l3J z1O*8aH4?yRL<0?)3*B&Ax-}}iL>588mKjE6pc_yLiFN|%Tv~A(opEM#)ERNya05hz zERuvZ0bE#AqJrM0MPkq_K)7F3oqK!9g28#``~TD0wK<4mBZHtf#f!j zK({?nFu==P177LeRG~f5#A<9l;J&b| zd-QDvF9&~h-URpP0bJ2VTKb>{np@@_1to7l#BTv5c0)P*-)4tj__neEF1Ev)-fsT| zZV-`&Va(kCUEse(2XAU@Sj^XIUJMrNbOwRjPzeCGyUQhxfRqP0w_RW6wk4Lq?_PNA z=H(r3eHpjm0JousJ8?ghLRk2PTfYqo*17#Xpz;>;qPN}t>8!R7Qdw{8UwQ{sgRlv0 z5G7RK9?5Off8{R!nsX=Cz>^Am;_rctp@;mQAe7&O;88%h%b~w)iO?ZX48Shg5NR0^ z4W||pl&Hp;#mr+ec+e|SpuqUJ@VOJ56V2EeG3TaGT6~1;i@Yl~x{+yz?s8jqc{7?O z#Btt~EZIH!L4(%}e|6p-?$K7SF|bq{GHyn8ry&)5TT3TUXT*sW&HJrY z`KfOGPa8Sl7kBx0IF|Oi`NWfM|6=6lU@;U1%AO9~D+3X}<@k??vpz&g;I(u z%R$#fn3c+leaColyF0N09>ciY7lPI!|IdhX+pRw?iub1F6lHk^yR})~3*6epIuI|~ z#RhLr;zKMJdb!y>iwJ;zcy#b{| z>yVw-tfhr-`^!tr{KjWwS!Un{tW+z0wix&k8d19z*LoWs(0K;qeE0lVBR>8K?m3tv zuOu*V+uv)EEzjPr(^P9NMkN@uOv)7u^I50|oza#Y#blCShH6pk2n=8P*R(n^^=9Dd zn{1v-U)+F4aBiO+s~~Y3ZjRg7PyqN0t#e!JylG854e>JfXr6{RPeWWwqtSCEn~_jx zzd}<|AEjU-6*NLYPqMrDAjo=5n&c}xOR^@wN6Q?LMjG&|-gNs>Pz?#u#lAs?3aOq!THbDw7t+7+mYwapg7^K7_W3vv zKUCDihc%dn1=T}UJDm0}T&hIv5S|-{yd0$+l5z-7ldpg>((Q5dsqrHE;C8YB^XRXt z9FC1UhjMtrNS}A69Bz~~s{W=^^tU~rARUW`Ij0Wvw>s|yP2~k%E^BXCWBXf2jZM|j z-v1NLKpETN{|=Dy+Y}uwg=q!l1s2kQNL3;oP0%U&Svlxu&lpwxtQF^yC;77HNIxq_ z{jC1Eh<;WF`dJ0(X8?3hz6m70Sy8_ZGWAOxgUi#*F!FU&D>e^Wij!&9Hp-d+Pd<11 zzA1o_&yO~7MJs7uuW*BH*$e8@@SA>;aY~2!{H6~I*%#aK-def%awuVf|9f!e((hIE zqC_eT$`k5i+uhcEi$4DV_Xyn9gG(>r*{rD{%?q?0oBngue?Y(4bBsQ-9$a!5F=A>B zpSTaT88higsApD2t&5q}SYK>Sh?E(^-=QOACSFlyE^3;fEAaNrk^_@Ks#v)qj4(z0 z7n~WZztlwOFW0r#Uy%K-zlQV|9qBKBd6Ae`(O-Ho{pD62(_c0svGOklC5GFun|rfn zV*z$M^<3h1&=s)TgI{V$yFF$L+>Uz8HwFA=;to_T`Jq4B^apw0X^C6?DlQKfAL9Hk zQ2UyRso{gu4Fw#xAq%K@0H)fC;*c&Dyjm&fC98@9E>#>Pc;=qc_3PGO7dCPr2eNWTqk{ z1uwxX|AHv>AQRPtY?Q${*q4H`;-%;YY*TDaL?zfcN>K@H7?MK@>jV}p4PhZf`u7D3wHK+@#27BYw`twj;r8jUR zN4)?=K@%8+!tSdqqlPI4JwZHXBM?YNcvh#Ug*ZI7!WAlFe`#3VA z=lv9#_n}60t{=zd`fI{-z4dhLTtD#r0-Wo8(h8XGb?yB)^6D+XQmJSqcN z?#inaW#A+#10OSG0A?iP&(9$M&;_tdPd|sc0A79i910AxWx(_Jn+FO&SQp@Cpeg|0 zCcj37ZZoo9RTUV??hc?PU_?b=0#pTx0FU(}*;V=Y40j+#`>}TmMo+uvAA9!Jdb&g3I9!D&hQ#;R5VI z3@!haOFRpf7Wc?(%p(xE`wTY?8eleTpd4%gS1x>M-!Ivr@~piu2)NbPC{T*}VyOQu zQ**g3c0mN9nO%7qHNMuL3-Y*;yJ2`Pz}?>}agQLE$>k1fb$${8Ud4^9df=E>;sX<9 zYfZjEE)T`=*4>KEs9wF7XcS1}=iU+u{4c;0tNJiL7TRVBAS^y-^s4x?+yB2?jk_%J<|3QFXKh z9@N^cXSt&1nC6Hb=gJl4R$^gj#=upAxKCOM-)u@^%car?hIKAp?`bBakwMH!8wf0* zU(PVJ=RJ_%jYoRS2^p#KFbj_YfV_AZm`9MS9KMeYy!9(?btP~~XaK1DE|$4%kHo7~9czy5`^DGpg*t`vOL2J4s0 z>y#E&DUJLC+ci^L%GSIc=-zd}ITyDr9p&{pi65qJ3#HDe>~Ic~#ezi<26AjUu3i}P zMySgW7$n6x_Y{|oNsa;MyP&-wEwz|-51FJ)`WQw66au}ctiZ+x^0X+_ycX!A(pJW9 zW$+qBQm7Bz;U$*l0#~DoXg$q6l7ScgNrvFXbP@jnP9nTm=C^OOi6`yFGWQ5`$GNKv zon11%7f*uZ5`1S3`8SLY^v%2>U>GR`l2-B7&+~^frKk$%HUu!Ow&_pW{964!L4Ohk zST948;43%ar_1+*l5Kw5du5x^GO~$HgdKhf^5M_4x-xYgws9Ra_FXBnMIBYRne8(Z0)uJx<@3(@o{E>k2!@rQt_NzlWq zs^v#T`$nfeq_0E)Vaqs)*`PB{^5`3+aam7rtK%sUUN!AFfo2moi0uTG(PWEVn2uej zX023CJ)UdL(=nIgvCKV49n2N2C*2yX*Vh%1WQNhlJwv0f7vATeWj3!xv8RxZrEkuDIbNts=|b z0I)AVP4$d|ua!)4#q7(zZ;1Qw_PRDqaks-Ie#}6<4Wdbqw>qrHmw%wFy;XO){MKRZ z7uzp3ZhT!lG@owDX z{24K*$&!(O#%h(-j~-fO_2X1z^`n=UIRY{)BpD#nQ#7Jzn+{-fUlTEfYx)e-`i^NdR_SX zHq`gFbLo3RS0KPGPvHuS%gh#7Dwd>5#$VXjHREf{iw!y=fS-vfSLQuHJ)bykE%qj)igt^xz693DN;&)b(ReH7b!qj58820X=?*X%W; z9g{%iaXm-bEQfI&?i!dy(RmM-pVC>PA2h~nVmT#7mMztGu<`bzQ8Lswe7vju z&jZ?X?SG!6{KuW{e+H+0D`UK+;EAL3`>X5nlP>+$b@?-oopW9OJr6XgZjoYx-}D68 z;0y1IU6*gT_uT99^ZwXjU4F!UU9QVt^G9`EzGY3P>+%oXt5&SPm#xcR`&i7n{Mxjb zb@@N9iC&l2PdeAS{9E@#*5&W$*Nt`g-~x&DNiF-0T91C~adkbqf9!fRDh0S6z3_2$ zJ-TUV^m_F1$a?haYogbqKVPG+M;EVQ>(NJ_I@fyiv^8u!diI*=^=SX29oD1o?zkSE zXDNS-ef<{ur8VgH9%F0JA3YYkHmyEqzujD$zPc@TZCcsi7sxJ!{sfsf$^5nmqIdKg>0gdIhayDC-jg^nmD-eEWlMqN2sO0V2F>T&SZu(w$ zMGbuB5kz+SUcp+v{1Y_RpJoNCrLk1Yatup5vx=DnU=LKb!w+DLn)z5PerE<##|#>a zQHBD>U4&dM;0J}1YpKG#eT;H58?fsWiDHCj9d#4oX=26l$`QAE?1d0}SuOo#u!37- zd)(U75ApkOO)P#h75x4hApB;<;CDqQ_$^oy#&6EK@SCjScj}_9@axjPprbAgxb=y4m?M!}^u?pOolcRdrOeO9=t_2e$Tv23Wgwu)_OoK(apy4$RKo z>WOoRLQ2R0e^Z@eRn}f<)IZx~!9NM+oaBGqsnZlb_&7}NUn?S9akV&ZG-2g=!Yez?lPl%KQyaGav^vd5*wt9z? zOIRFp3A@n@eV7AX(raR>n3L|Gm@ZrXI3XP9#UcbIo6uc!#2Cb)R2uqMfVNX3x>$?%y0Y~{0h$qeq&$!fJ)wQCr%mT^!Pw@nFQY!O)HywZ za~M7R&ImomIor|W<2gi+OPq`zPmN>r2#izck>phAF^1|NcY1Pn=rO2Acj*!8f*wyN z#L{D54$YN^T$Hmg)#c_-tf=Z7EW_3c3zunX|Jzkl~=y8V-p@&6iM~|YJM31&f zj2=@*GkUm2EA%)rNu|fBnM99$le$BXFOA)$$A&KGG0hlDk0-_wJ)WGT(&P6tJE2G5 z#;)n{=NSq;CPmR>QagHhW-xk8Wc09SGkT26R_QTOrN^-uj2;uaLyujC?$X281wAGi zV(H2Yv6(PM|bJM{QS-(7ml?SdZH>SO7#cnr~FwOysh4bwZJ$30G<2kGOu-l6(*+x!3| zU=&s%2rZgEfVH0oO_)!`f}kl@fO<5rNtUHP`()WaA-#PVVdZRux(o==ZUJZv@E_rY zR`6X4=(Ng~MWazU+b_^|xU~V#zVwtWN3OxTFTxF6Xv}AL2Z(3W`eXEZUps&Ffbofy zSo8qaAUZH!CVe^?wCu;&RjO=RbqyI6IT%yXukS4UaRlD8?chyj-x}I|8;*yL-+W$# z-?YLvXe!`W7OMLus5_BefnKX+SDCV`->J{rGV70Is+e zE&`KNrC;KhmQ5#fLDiln1fEpR-RzQzcFFP@j0w)-b1q9UxT;GjKaepf7j^P8V7z!EzjC1wHeq(fF=@tlX*9EI0K}f?ssJPU=}5t+No?6FIBpGXq!xK z?7q1~q|YBBCPXc2(%kt6j}EtL>VV+IhQX6;QjXCfGEQ zc6~UQwQFrbSMA!Iey(=y>`tGpRonI)wJlHjPTQevi_X!u3&U-DXmICkd!MyU@Xa-Y zc$=Goe^TMeU(;%6d$0_26lm?rA7NI`-$Ao-Cr#aV88p>+uBJxM-!Q)DD6rb*gE$8q z_rJ!@#$mf(p>|)+72}W!xKgAaHOe$lq!cRiEEdEGUA22?uG((@MQt)|S1MD$5NdC{ zGqBX`0A)e82v;ha0)y;CWx;{U!iJGLjU%iqyaIR1ERoZ+HQexnmnsd9c2$F+naP&P zZ;#agSbhz&!@nTK;a{BU@J}{H_I268NlLY`p90^!rYyd*V8#yrgX^M$8B3W;Fymb5 zAAQ=Qf*JRxA{f+RYi8GZHeSiZX1|*z!GqE`LHsZcr(R6kmW+Nz;!A1ua6k4E?_4KL zev~Ol)CqEx5*7jDub30*4C|aKTUw5$v}Xy-nNI&abU$3KL=VOtE+u+!U<$fIZU@{2 z*)lv+r%b*2;A%i=Y${juEmVO=m$_myPKR`SnR%~AXG{H*-Lq_|$f4YNU2vE3sh3wy zJzv@Rsps|4r=Hhdq0^woYb=?Nz|K5ZO)zP>^^+4E;-r)&gCKB4EY5XecXH(6Dr{5* z*|KGlQh|~qZz$%-1BL~a8^+T$Kex$9Y`ZMWK|fWa;AQR zoOiZ@U(R{s_wq$@|*oy2J*DSZCvIY=OIkjm$Cp#+EEO_zsq^%3NU6 zZ();%Ms&?4wlv}s&%EEvBSS}Y#v?KX^Trqpjf_ji0hcI`+J2%(#e=j;tNOx4_5{;; zHR`qDo%5I>O=Ys3jLCdZaBPM*4`Bzq2TJVwe_K1P!#itdXvWwP-f6M0&!3MEXLQQP z3cYqUFnS$AE})5ks8;h^c)^>|o@};~1*T5O=7fS{Grewr&FkBK8_7%=5t7AYU3}?Q zXcLK$9a7T?#u@0ejsvDxUJ+Lnmx;c9tlgio0>vl7?LP13t{m3Q=FWnG0imw2HDPH1 zP&fa9k~ksYY8I?N34w`PtdW~gg@nM&6TgKPXD&Y%Esp)jXmMef7DB+j-x+X~@f&-f zXyxoxLSSkQh8F<5_E%{%(wz%XWFe)*7U>lLkhQI-EWm3ydkuapjJDT!QhfPk8gFlF z{>RIcZPpXZp)z}&KO1D?HXYb~qN|1CKQ?Pg>mlXe_wk$#K3whcC)C44Px%%^<3Ek6 z&@(&zls}=x2@0Tp3e1=OscQxCJ(l>&leW;Fb;Jg9YqnE-uY{!S?J!VgV63O-qhmeo zOV?>^+5bd=>=_JgeMTB}3_Y93eikt5(`1VJG~OH zDnv*9;dfY5$-K0y@lU4He^&$i*L=VNGt4sCO73c4@u5ltHB-Sg@QDiw!GBPJW~JCm z?$QJ%G}^>^sC_KvAaIBibCeKPG0i%284F?cjI_Q-8^hQBOoy~$pIz3qy0hsrRCnSj z7M}4AFQqZL5q}6dV6i`A0pDGPnw-+*X_>ZC55_nlZ&-UF%Ua@VF>t=*-k>hoj3ubC zJOO6^*fg$aTQdA+a772>;dhCr_t<4z(NZ(ICjxnb7aBnKoR^lTQCxzQR)H3D>1DV< zD?!8Ovo*GK%=$`{PlQ)Ix9JBncKTTAd5p+3`mTDeDBXa}qku^T`FE*KQ({ZwaR?+C z$dRQ^@a0PsK(MZGbOGo4MUTnU?pkLD{^Okfd)C1J8XojA7;5%14D7THHMeDgI0f^d z)k;reU~qBPI+P&Kkv`~5TgZ!T_R{-I_M+q7n;hp%+Y5uYcUX*Ef2odrPePh6nFPYb z;k%42hrq4LRw~)K^>-)m;wSvZMC!3^(p(vb6)25D;0BObWde@OjO~K2wFl?JBVM4( z2NnYXOOv!tC^yF;-e7iEZ%A2usQ}~H;U8~yh!qas1Gpvxs;jLe$y8-CX%%4@>T55p zQsx8kQ&3E$q&OC8MDWRG7I~`D2_3j>eBs#C0y$_Ai;9vd zX*u{6o?uy4I2JV7qRa4zH;`*uC*d!W^%sZCVR~_wAY0V)zB52Bd&%7iArq3z zUJ4W9IE@8i7gT zQ}TlCZ$!%hTItdsRX8BO20LEx=dW>eFFSNcaqUWr64!`3h->WVFneVOQDPHQfQ^ej z={=m{*C8WArT7SqL8o}n8eTcYgUxXG+K{Cy`NFXVU^17!K9p<&#=jcqz?Rlx8Gm`& z)f#UuK;w$uLnEhG;VI}xblP`i)O&izfE?4;;7x_1UUT&0G3Mooh4*VUD>eyodFVZ= z4Bsuf$`P2V#l>l5?QZ~E`>{dB^E7;hL3y3~Zw*I>Z(I5%qM+ zSbrYGO&ZdHxDjayU7E*d&raZqFMwwsuqp00(&5F_D7_r3DXokdsE;0PlkL`{T;NT% zI8gkwMrqFZ@XEN<4$a|9W}7Hys}RUbjA5D@X@09#!x(12kTDYjsh#ezcnGk|-|hiY z_tm3-og-yWJu6e=ZTfG6r=g>0IF&d-9`qm@DZ>jFAAHNq8m|T4IYo&^;_ZFf5|NdUfVtNVH_8L?StL*DE09EI) z0n|1IRSn5?WmHJ6SQ!_yqr$Da6`)Hu0caduJN^^q|JR@h^C!fNu}-^n=^)lqJt8_8 zU~h`XzTsD~|10Q-(qrFgG3aR>*UR3(nO*f!Ab~QLOSZ)9M5#Y|W0RM0d{F-R^>wXpfc!b{<;78Oi?DhMbWEtj$%;3T%q*%DG)-ld67{JIC zA6<2EiAK?`nsQQ1-erp7F#D@EIqOhSnfG@*<&0ODDSEXfk~2PLjvO!`dXCgP{DZ^n z!bc@4;Ff8@qwN}QRS|JA;k4sRoUgohoG(VJ zQqBt_u(@NNfDK<(l7iAleKu=d@E}vuLBnoEK~y8?(Ox;g6@3v_LRG0Wrc1>lB9xBw zN7K^PnNZ?4F;i)p3Cx2{s#qGM_j(=rgGv>)ZjSLj;W54#DrC^wX^$0A+T-*uP|Ign zp%$4a(;0DUG}wu1yNLVI#)PE`M;sQLp}D0?TKyZO?uezPb| z>eoR%uZkjY>p`ZT^D?UE>T-=!yo;^Wj4xb%Nl7YKy!=O5#+dY17ag*RU-O*pV_PXB zaByWKd}`U+N2ggMTQ;+w4e+y5wya}!udtsN*u!Vp-IMGmz#gvbLvgxY8VW-LW;eGo zjgF|#YJfg45z9A}f-J_gnu1&j<0Je6uE6NC?&pdssW%keaxiOI@xOnIn*CYdPSorI zlfw7Ye{*lF=-aM0a!RY&v<7NR8l7!*X*z41C&eLdb@=kZj59byyT;+W%dDvav6TYG zd8WqGi+^i`Q>@O~i~6?aP#YztFhwV(Xy}H6job?zY{q@5g<-H2zA@vf>}~oP)StlLRHKY@T|CVA6a%LojKfY&mX5ov6W{ z)#UUqHN*c@_@55{X$y3rGcZZO-E$#Z>hGcB4`w>;fbLR%-!03w(wl7dqHP{y>BQWS zzI5Wv+~zW0%L<;GROO$P&266eKiuXUZ?VC}twO-^@ojh@%aSl%r-6^WLwH|{Zs~=- zR_)S$z81sMUgHBve85nmFE6jxIrJ4FFq0}ojepWu_Yl&5Gk*3o<(g`Vxn`_Je6Jp1JIyi{y4GDmb@fu}dPu43ezmTa3zWJpV|7Vz&rW$>d|0bdn{Q^# zPregVr29=F7mVSM5M7VJ>Ure?R?h&Xo?dD_Yt(wSTtW2|UclOvv^%;@Z@)=xGH^xD z>8aQq7Z_OwUyPAlqUzf}lIr7?`d&$7_5CAJsV`lv?PnH>ot`%LqRQj8|c$#fLnX+WZS( zBUNym{ji7eCm7&IWKrMKeHkL!y4=02MD+sMhVBJ)Z zpdb{dA~b>!8r0OEab)SvXpT&Mop|8u{;@o88w0+*KLb8Y0e-Ox{H^{9@Vemy{2vPN zaWO-<@-+g!x*hN{8B9JXz;~G#@U13QKB&MKGvH1IxTZE5xSIjLwjJ>281PgD_(BEv zd=+@S3cMc!-rSD?e0bbI`GOSq4>Q>;$t8ZweBd6ghPmwi zmzWP+EbHF-DysaWu7)aHAShK7R}Ui;2Pr7dif1TJRZ%pkDC)5-;;5f3fFg!jl+GT| zGJiL_gedX~igWW62WC_^M#>hkhvK{j#jFjudec+3oQ)?hcR-N#5>LP_qT9y8dj#KR zLcP!u+kN$y6Ry9rFkIh?W4OK&r{J1vQE;6$jK0sYu?soLvhdet*)qpOP6D7tA^YKOD*Tl#4in}sqR?2G z%uys=wp@+i@F{yD$d-%Y$)ClBtj3H+P}_2B;*aP!C^$ziFfYBx4ii_#1hYdfvz0p2 zhOkk%e|MW4{0G~5%>;`>PHk6$IGSR3J<`N4z`L%r{L+F?sHc#Q@A&nGOd`O^>eJ z{414*^jP`jk;=_V<*87)y|e;d1H`>NEfKThlzl3tKZhLke4$c;X9N zmw7G?WnxMohjrIdgCpS5+N_@meab@@3VORHWP!h_Au06o2(vzI&cd*ITr zXsttBnhx{ViXI@Dml%P8PzX2L#ex+6q3c zyK!wJ)mBUGDJs? zDaq)V;)CnpJLJ+a*$lm&Q(Wd9i6YkbAnvw0MM|7EKY|BUF8%8iMhx2<{3FFZY;js^ zxK(Cad0Ig-^cIw%&e3MiWgtjltfD9dA!-|KUU6Y(LUf=FS7wH^6Qz9(5m9PVRvY6Y zqZdjv>M`%g`kJh5SgRdt#i~Y|FlEZ5ywRra$?RA4om6BK@eo?ZWOJnPm=I@3nEcD6 z$>>NEO)TgOBqYpTCBdC%K|*HK#=LJwNAh8s-Wtw67uz_C(m02BqX~o^%u(YpI}0_? zOgUllfS#7<)GTTPafy-|h;Vl4I%!=DTF#7Wr+$JNQyvCfYlA%z^U+f^+)6x5;Z&D$ zoVb+31_>0~82lb%fCcPkc$rQw0e~wNUeyG5!mCJI**6i+b?ujW>0%jI;rih*3?KDH zu7t@K7>ldlQB$0H$>D1=5YuxHn0a4<$q@=ydnd7^qqlM3J zX1c^ZKnXLtFU@gbIFy4*RzsLm_Mvic*NqyDSd&#&yp4OR%(opQs|ght0<2@cSqd;& zMj&*QZEHMGVtfiN2Ql$e;$BUgi)FkP|8BxJ7idLl9q6 zlH|TbS$2`7=SPwRmln}657Hb_DUojY6wP2})I45E4*wLTS@3~Eqw&~;fCnR2E>D|- ziW6MRmN(CksUoKMQVDzxr6tv#)@sCf|6pM>(A1Lzvy7<`Lpi2}5FSh5$i^RfyCTz# zBBrC=w_jcJO+17A>!{YUWW*qvk?&sLp;c3Bsi_)cQ~XBf$Ba%@Ht?XTU^fs2>z&AW z_W4fHyHF7Gmw`S~A4N3#97eFIWzq$o5WT#00*iHcdj;D7WU;ky(JTQjbnJ+Sb*Xu2 zKRO>LBXq1Z=>B8Vyjwz)s@wFPb;scjQbEA`dkd3aV|J=9khdc+sJl`C4qsOTxcKQnwm zo#%h>Y9WRrEY8w(1mh4vPPpdKz5?=;db*53#Z~}@%ud?VN;-doa;S<3G%%Jxm}@L} z@GNf18vlL0P7|D{RVjew)na3@*gI$O!kf?q%Dbf4cY{0O)e%rlf<8E&Ha(5; z*WsbQIM!$O1|kCyg)v$kg34AY9Mz2o>DwyO$bz4Q$(z@Jlk}1fd$*JR55<^@!sFn6kX_Je~?l<}pb)jTm zi*^MbDw*TUF(oMl27RcUvP;w|QzRM)ALFvuLS@-2+aW%f(|*}2>O8SY)uk}!ZhM*Q zQe^JeP;V{{na%|6>Lyj-PCRgi9W8Bl*5da~+Ahyp+}Nbm#Mm|Cno#kQKENI-F0*mA zkb^Zg^8;Kh^ycA~cMx9UjyErK^ib()$NR`HTIGb&qy&}#w`vj|&bx`uaB^Gv3gyRi zO%+;i9^YOP(lUcMYYy^ZCQec5FurpImR&s}9mDG&_y5A=3XWv{BjPno!D6$95Q_I& z!B=Tk7-VL9>8{3A-YK01K2L~~6Y-nirL+dnruB{#0uwqJZgz23Jjjpyi@yEk;`nJMfMIjNs-&m^6* zA_dbVe^Uq(lGq{iT~YUc;kuxV$b~a<2ib!{+$jzvx^>iOlX8+ zriSxwP2rE-oMX*6V7yPWi8UuXw1n=jqDi*X$mt%r5o zehkOYg4g9$fG+4rjuZ#xC^$%K&zPD++I>TtEY^S)*aH_I=le{$5M3?Y(+c`IRaS|i zF(|TsOi+IoKg8tlFEBg&i(_&^}F(@dx>78OOAh`%WSX?KdTD_a;cdxM% zX&6xMu3A*^8nO;yRJdV+*pCXKinr>Au!`54sd#}~wWQ$PLvKE0Q9%Ne!2a}fv_D;- zi((J$lHxX_4*hN#2D3?s+cJfl9^)_Fb{ z9t*ea?A+MN4|Z+r+yU==FHsGu;mPU?vkP~bVtnG>F&dvun{Yg=PJ0H&gLbXXd#M^u zPC?bzV$V1F%I1Z7_*&*IHyWR8k!3&Mmnk+tjcT<6z;<4Mk@+)~4w&6;ec72&A&A=@ zVqI|4KQTbR>aPjJzc~}Pr-9KXn1$*IDYQGSr?aDN?G~LoSwEf1Tois19{Qo4t)L$K zSY1JtZ6z4=$8UemDSjjPT`xPudw^Z@3!+x{o{sK-!4fkzAC(Sl$Eq8U{+EYK0R7wW zAemT$^zS8znoZ1;K?C+5y@&&Ge!>|rK0x6KEqCJ&jTd0}3MgT}O3rE$MlUsc2DV#4 zZOzttP5cI}lEwWiT%lde$1Iz2>-(lCM-!nIX=fQ8Xmy=Z53~mCCxna|Jk+{4ftFWi zn-^^fe|^0(kaVq(@fC)R%CCk0hlOhU_ekrUl6>5H18h!nOZx#y4m-t4hqy<&3o6Jd zSw5&1&$`;sQDdK!u=R|*Mi`xc*b5TP{ylvMv=|7$4lAJv=*)9%sr^KWHmBr%ZK#i} z)OCUY5VfR{)o0|=StkI6H{t&l_+Jf^AENN4wBmatyZt%la%z9H1czC`{w6Kh*h+w> zOYKWbY|3&Cx?<$Q#4@2vtj8TBzw6OP&OEDtOW%RKNV!g~s9X}y zO&j#!2+A-~&OL2QyCtg$L;ef!C}a^HI$P35qgmX$k-L4+N9gD6tLlL{BNiWZJGd1G zfZd+W&}lZMGsgH_djIt{**UiXq*zb3ypHzuV_G$S7qR`SXtqy3s>q!=(%03|at9+p zu=pFsqg~6L;1MKSJf2mpWtRQt@C174W{3C{d8B(c!2fFCky_?q;l;#r8;Id7fJ`+o zT-ILc@JCvB=NYsi)Qds`C@iXYfpb20sDU%)K((ba z({8qvR^k#Cd?Foq1Zd&+J`Mk0hW~Fud93?ssqfJC;|S^ z<;f4sGy|6jln$m&dNhCwWE0J?p$(Mn#@)2pEl#bMEw6r~N=TU6^@lc$$l3=Y^+E;OZz!3(sUX=mk2?!Y#MD98%Z8eg5 zM^qv4R| zCM$uHb8(}24sJeUzK5KsNjxeZuh`lk8{hrVpz#hC0)vMBV4y$`&-SFH_bJEW)>oM> zSoeFSPU8B;?l=2;QNA)S2%*rIfY?CaDN}xbVQz&GNGb$qRUv>DNUD)$EtA{tKjZhl zAyD@L+42T?en)Ex%KI9*fh#x01G2sr?fr=F;t<34P%`2B(ht%2M!v_?Si)Q_5Hb<- zUkUOh4}XaYt0}*)g4-|^H!b3(q`hs|J|L9%fkLQL$5_RSSj8D#RIJiFkh~2(2+mtd z>5MQ3Wjr9U^$S4an|QIpuZ0pXBglZ^aX_OyI7TVS{uOVd@=rl|OzV}r9E$qtUJIRg9Md!vf|B2yYI590N{&18sc>@K32JiO)R^SBjcRh-?+RwQS@PP2?X%-1 zBjjo~W}6IpzwbOfZW0(SpSK8p!{0k!c04Q0sf?T$&2i#{^Y zK}pT{m_B)6j5j-ym$py1Zp`sk-SWWq-lI48=E*G@YOXtyCHGF+jhl)zQ?lgV{$w~y z?su3a_i;^3mfQ=Ze>@(QCHJq7Q=b?B=b83ArA9;Z1DAT!x`PhOlWS7*#X2%(!hGyl$j4xpdbUPLq4BR!NgPlD6z8nSz(lMO2#H zm((=5xc~1A+_kQKn%v1zX>zMEP3{OKZz1K$ZDe_Jol2hEMm0~aiSp#ufu`rT{jyHY zlS}Bil|;FJA){sLT5RQ7Xzufpb&Wc#0iY)Th_|%F>x+1 z3x+AIm0pb`${mOa9Jj7hWy99BQJHd&i1wG%Ot~h?l>1U_rd&g8rra<7$#h1_>X?qb zn2x;|t6B;xp>1nl%@W+^vD@obD;YkiJWI3J z>ssGg-HN$Dm{AKZ0>YTQae{w=Idn4`eee~`NrAA`ua!=UDn#7>6%pRdYXQn)v+t~a z#Xtm;b`jARIAK9b2$M&8sJ<+gP*l~raV4=`eDX-X+{UPUxjEr{xtA~7&!x9YhCo!# z-0M{L9rEMBjH;(KYDU$T2YP$W?si#oQP(h2;?zFewB(k7O4?kUomkpjREY)*Wh)a) z7h`{wS{8!(>f%Vk zT&i{`m%d%fWj*1-+;~RcK+0c;c@OdHT1=W-D~(uvj-uarm;O`>qT{Ft1C znORNb$^us5HW%0o@mXcTc|ggW?5n&+xBtq}2?`9!k6vVAQSW&11_SlfDUH%)7&V35 zQmi|;;v*XRmJ9TOvZcDT`mtr+WNv+L>K)%VgG0UHVZ+#ZuJ~)D`}*F#Z#Wit&>>ek z=9UT8>cw~SrTVn8v4vdmN3<8cK2fajeSLj-eVn+5m&+3CpeX7_b%BR9>9RadhA!k* zK1LduzZc94!50X24;He7Td((h9WRti`hDMwFPdUb@_-d9YrXLs&w`{3-A}lOjLM|a z!zET$+tSkUZc_p+NITO~ZMc+l!A`BFxXhCwR)%b`1*r`4Q{e+QgH3p12Nw0F$d*s* zw3=$oFl>E|97^KW>i|iulE#EG1d%gvWI-Nmm78~IBM0(fcHHzl*yr%vfzJhTyh#vy z!|kxWSj;JYajOyl{LgQMy_j<;GY(*tRp#(dsBu(Js7D(guJ_{t5g0YJe3a@6z{|j` z8>K;8aD7z7EZWhI#F^b#s;H>Nr3%fnxTCb5?sxQjSgy1tT zw#C|Sz8|ukxEN#4+J^qY>9|&v3d6IHjPXT|@1T#f!y<;{@*LUn1N4TtUz$o08p|l+ z1WYmCl$`;qO>xCe#iyBWYN+%HIV)O{UuRhjGNu0iCMrkP2PhBvL0+tpOfR5^nK#jI zg%<62&7cV-U|0cTDqISFjF_xwk{y=^qV{WWkH9xCjd^sZBHXbrw8`N_JieiL=FfXs zWb42d$N`OGf*H_UPm$+#NPl?+m$DyB4ozU$C6uyNdJQd*$lTKY0-hhhsSGNSeh=lV z3|39354zT;;#wpKgU_WNZ&3GQSdRWLgjY>scY;*=-FPKbUJ1W@UNva`BRluZeI4xF z{QEk!bGP3Yv2!=y*O{Gr^)hDXE`RmEYv&60{r}m{z2V+pv2!hVN7=b+fA@dO&VBoy z^S5(X+!JNz&T>cW+@I$EMmu--J^#?Z$ljrp@Q1V(m?BIph(`7c;WK`UeUVLR9v$LOJRO;o-cHo8!23r` z=nSX#U8?cbH>*iwwsOU4sJ?8>Yn20o8{K>K$|Nlqs3$GyFvszMS$c7wb8Z9Huu!^( zPKkg;A?^bu&f9~VwpY3ho`q)Ffcj z;J>xZWnklTD*tiBe=c&zh;ZN43BSU9>j}=(4bFh;P3SKhZbf5ffd#8M37z#puNcO2 z)k=0R875nzy6P%;6uQkO+TW30^n`m#U_Et$^;B)Rr(9bk?_!nb4pTq9_onoFkIHk0 zl|uFRv0nN>pZ_LRNDTKCNcCWY%5W83pnoKpq2FRAUOODcYsH=p_%HH=@&9&l7x*8J z#{a!VD*m-9{{LRA;$N#2N>%YEE<^mKA%MI%6F+mITm~Lc@r|09|A2Ut>57 zA2t0|;V}9D8LSt6F1_%(2!BC2`^&&y9OZv~gxG7XragOghzI2RVIi=P_iy1flKEm0 zX-!!&dPSg3ZqGVSZqN2CnP?m6Tb6Y^FlhvEn6=IpSgPf2s^f=@=7-#GDAkY6^d{NG zvkv`NcdRL|?%hG&j1v0a@F#0>SY zX-5`XGkMN_+@b%HTc0Rfen($=>o7?|sx7ckD_eGdgsUpIY6a0$svn~jI6LS%2a9EI z@4_*e-ejm>?g%AY(P`LuriQT$OjCQ$NLQ{?fTDL z`rS@z?efi~`r-DqhrPddir?nEmw`Q%SX!PlZ0p)KuL3M%LQLac7ypHtX2g0oj^<6v|-INKLE z9i;R4=E5?5=vF034YE|R44t<_@b|#m`N+ECS2N9>l5zSzUf51qZ?8Wwe;(C8Hx)oT z0}D+8daAc_>l2)uz0oDM@?t|MPLQj)^_*-;WtcXU4h~HCbZwi*@J>cZ4=-I}txc>_ zob;bHVx)!5U{w@2tk-#s4*$rI(P16wH5702_A5M{;Wb!yFHJ5yJ;Hm%L#=>o;gZW_ zOWOyyu6Z%)_r{&AK5KhZyGPLuv+S)15F|49C>^^sBkMcr9}oYrqyFLk zV~qYW^yv=zN9xmE=^v#ty3#+sC^xvd2fW{Lk;^^sEoU7aNJ$vH3~5BF2aGw2^(ehuRyTYEbWmq=1cD|hjr(YOJvLP zjhzj^oMB%!9TF}qbLc+~QAiySQDeRoU5z%5ndz6qmZ$%H3J$X_y-&4LkB9?JeQPh#BRb z>DE}4CcBCqchC?owg?!P`6mqaW8i&5Gyt|hNx#6S4)H5wP&RX!H?Z!ql@7(sb;zSe z{#|0yNktXgMYCQoFg$iF!%4JO^NCp{N!>{DFb+R^x#w zmSf`Gv_ks*kPeJEts(CqnjW0~YqH8h%eR?kHVy`7!3Esr)l|K>HKW`rE$WfAx4GF0qS14aZ29)x&IYD$mkg8!b!b0dcm`C4 ztyI6@zX&(CqGtXM=S4cor>qFDg>A^6$3Sm50* zSwI-_8g)fZ$!p3x+z z3OA%Kn}pmjR8ZLg<6mWz3&0Uv)?T5xS+Euns-D5JW#`+Sv4z0+0zYJ&A)Jdfy0B^f zu_kQO%u#KcCfkDgc(7wY2Pf0U0i1!RO<>|sFd%H!dK-6BJwN11e#jpUrIcYS6%3kW z8;JLCPdNjlZq)05m~{?uH@~qTE>Up-t;R9;2ket+ctj#FB14bCX)YNHKX&-qY5k?3 zk0WENTs6MbkV@u;L#_(x@_R(&>Imqkx&?~Zg7F5LBtTkvLL4`Mv zeY6)Xqu5g8xy`n|*$cKq*=^n=Uf$}E%fKXB6-pj2Cao&}+Mur)?@wCA%jFKa!e!mJ z{6m=Jaw8TJ_I$)bLNOadPDRA{dRmeQQPO*@xR12v} z8-IQl(j3&?QVV|FLJH?kkS&kBq1E{HU`M}!p9!GROg7;O&mOX6338OOb06!n-qjvl z4jrF#fSDnl;xqPVv^qoyk3vGbdc%BARV*~gmVt1Ev;0DnNCEzbiIvvV0_9iaoYl^` zWzvs+qQ+$FY0o6zHaRd+Ydy_5%R^%@hL26G#hDN$uT2yZkuB-$mb`^$&;_B7Y0NGauTP^E%GX4WW3vwTYQR@p11UG#OF^>sL#A(s4W5n)w`+Z1HqxHdr9p z6t_~`VZ&pwF0tA-FD+fONrP%H^M(+2NuT?$b)HONZi}_5;POts!h`4RBXJjIRZW#% z^qrwxTytn&XF29srCgF8%x(xxCv~d04lGH<}n8Ip-y4Gxr$&GwS&px9m2O&=q$tlF|6GQDF2Fy@xH~o% z^V>on;=uGMzXO%ukI%4Qr}BH`H2Yny{2sw_zMzT4#dF}FvH0f-{Btq>v0%_M%n0z7 zhWR`BmESv*-*TCO8m;{9Im>=;Q+~fDRD3@OEZ1iTeJj@OzAgice};k##H8B8z-cu7K+PYoz>8+7+`%;qv3pU4AJmpSnsd-}~I< zcd+t#;qu=++*x_GcqT=Cvs@l-;X56Q$HJe>KA01R|8Ph60b{RLptE}KM$-q)9bjX; z&fO%-8jah!-2-;?vJ{wUFeY+ec(9lY@LSxzmH|tzar??sLz!wAX86bkUy`R`4tYs} zwvZy-0#h!x0bUlDxjCCNEr;7s9^-oWEA>yM`n>}P1Ev1wsD7*47fcO}>bm|-8uEh! zBz6`23KEg{bhM}L^e;^1mGBT~`@w|n$biYbRoHWu^r);u!X6n@9u$L0C|g|rWV%tv zAczjsj=VN&vsleJtAx2V%q#)hp$QpHjnoQYqO^ps4&)M z)L*W<@R0A@v-j;Y>c( zA@LkOE14LXw~!V;esX0rIK@*=P}BCDb?lJ^-)=cerYN*Q)LNkBx4y*G8f^JJCVV)+ zWo=%{-4rlfLrL*=N#;U45Z)A8?htEbi|!?s;sg|xJ8jrNJ6J(mNGCy|$%~&;kdIx` zfE6(C@!YyMT`7hyFnbpH1MQMp^3?(ZXq`ROn_HhW*vkp7O%;`)T=9@L+1$qg~Yfo%21rhhb+1W`zCi>-WdP&Ua?c%=ylH zzMnJmJ$nxNr!Y03=LnVVsFR}PHw=Bj7u=q)=*%<iRQ9!PB0%{pD4tT;c27bs6cZR~Ca6ymJXqB&d5J2cR3w^B(s7=4Q2A)QG=nBxj z*)r7b-@@u^jm|)WPoQOysof7orQ{o!lFsrCA9t2<7S-Ru|CXb5zM-&*`;LBgQ(L~_ zznaA58y>pE$u~GPaq$rHePDWH_X2zD&H{n zA~)YK`Vu$au>Biuz9C=1%{TmeY`c6zpJGnF!J}9x-|%FJP`+VqX7}}X_@t=g{tir! zF7|ggbJNkPzeC(aPJe;miL}4K{#Y^n9m*$g`a9&uTK9Lj8Ohh*;d;23{tlk*F;ndhST5Sg9+CC9eg7>{T)VxTlII)gtqJN@K6D#zk^!= ze}9L4j6i>f-t(mOcPKC5z8f@!;=y#((BA=mdI8e}Tv8gJYcPfnrSArzpM$La;WXXv zzS>8X#$reON>o;RD zWO6g_Ws+ZnfUUh~lHb{Yb|=go z{lPu72Gt%l8`FGFrj;MlK1}@4f{f;l4lx_Si-X&Bbf{-_bg($kdn0k6;nbm_sQfz@ z(gItodpmgXyvy_Ed6zeFj+ovK()4*}UT6CJH>a-X^Uk}ZKfvp)9DTlblB;PgC)@P- z@JVs{tUtlg=hBlLeQs^w*XO_qqV@S-N0oF6Ep&!5fW*Jm*~aC}evx(?^Sok$KG8?LS+ z88s3Z%~^0a^wme}Mz+ECqT>BX{6m_C58%!%KPD@~SIelVN8htUA;i8Ys}DiJu33Zc z+99&~@h-?bOp8MBcsG+g6%Lb)qp3@)D@3QBcUrFFYyX8~%MT>^jqYN65L|xw$u^U& zISfquXo{RzRbbsrvgGKNcceQngDtob`5tIDA*Gc|7O`#i`l--Q~kqo+%)C@Y04&vXm6l$ps zYLE^#@Z`w6%sDb|bB@dy=GZ<@mZl?p?tY`HbQ_7F+Sq&Nb@V{>liDc%5`B@?_c=r| zNk`KHJY;1<5KKm#1W}<#SZwe3Lo%TX2<&2iAC%b91Y+(y<4254sjG z#{ul(WFouBRXz=-kI5eHmR13Hle8IHK16%)C&Xk+arU1ju+dmxwUPw(wn=_-jYVKt zd9r#|p3EY!r`DJS_G6^2I&Pv>%&OP`i^t#B~!9*JL4a{fy=^jJ*S|IXal+@@g{b znNdFe8ocf>$^Tf66~erX4&xfI*#&KRm6*p+^AIq9M$9izbAK=&B<5k%+zZS##QY*P z->bvs3S#!B=Idb2B<2y+d>PCM#5|Ik&w@FanEj~vD42bSc@#D81M}n7=e-5Y(4W{S zA4|=v!TbX;zf8^LVE&SrgQz(N%)iNu8tH8W~m?u*6OJGjK zX1(hRb$GBV`6RN|Bo8H9PorD^1-2eW%t_ST8_a!)SwqeDH(|2_F;A!F8(?lRf;oel zuYkFcm={p4f1m}w2I~>l7}LO#rL33aXgR@Zd6(7l3ajOH#`eo9 ztM&bscD#O_)$%h|%kLW4%Ldm&H7?L81CPU|#Xkoz4g7&_f)>sh+VB6n)p88Gd=kG5 zuB%@`-f1dt$m`ua9lsiM)gc3tf!gEkQ~YTEI_vVRmY*@exC*W+e4eI(jn(WN1onr4 zxTaU^bi}z@C$8vVXQrBn~#`!A)Ic(2y9*iNFVeVWA zMZ$RJtU`CiMcMNx`qx*f5B5+#8~nI-DD1@`52^dH-HHzV>oR;oh>?;L_-dfG|8d3b z2AAxI7~@kx2OQBO23mAN3*!$!5;PJ2lPNEd5tC8XarK%f9mZa5fH6i%->>G&MUej6 zI@`&C+V-cDF(xd1_!)OR5ULpjm}2mvpGlwV*)a)!? zguMsuS&@LS3HlgwkcPgEM0GqZrD#NG^ky#=hKQ1y)X-d^fuLsPb!7A;_9;hLtscf2Z8;$fIw~v3xSI1-JuooN=m}Y(-Pyt#R^J7>0W!r1;j!`1z%iv zu|k-I{uMTj3x8iCA}*MAv!Hl*4?xj>4<8irgB&PoasY~y=`1L+lDa`F9G6fg?n`Cj zWI1J`=q)=l5m3&@#IxnXO!O$XDHA)(L@?31i^at4UBE<-H~E-w+|Oa6DjS%HN@6iF zZEBaa;+Brm;Bi8tR@Cb#4Og1&rxnqt{iEa4ioQBw7XB=?sa9+#71oO89V`TH?EnN^ zcJd)`{~Znj6^C<&8x+A}U77M>~LiwiwVgju+; z*rsuzda;PO@YOaJ6#uggpt!M(4~mw(94N9f0g4x=u%HM_=mxF$x|lN2D3ytI#gvH& zJM7Fv?_xeC?i2|#(NttpCJKr~F!9kA789qp0257H_?Y;54~L0qr2p}=2`nZ?#&=08 zP8Cub{&iTQRu~E?4F|rnpH?ua{Zq)N6|DurEPPpDQ?1A>5Y~#n*Rc>dRR;)MsN+MR z@l6f_Q|1B!UhymhhR1b>RvcbLNqFgq#JEthh>|d5vpwSiV&Omed~xA&zAy{N^KBXz zQu9T`g&nmlDE8F?6vu1%pg6pf14UQ{K+!Xf1;vxG-Jlh_@+cGAr84nK9%bThb#`Xr z+gv^-&g2R+@pi6FnTXC6!NjVKEGB9;0uyg<{?u2x~?4dKLof$pGAY*7G5-eJcln z;c0-tuhA?7oTIu!D^_SJ36H!lF)qYuDGB%2+cPd87Cz19iwpa+g;`jeZPU2mpDiLT zELz8cV#zvyV(mIUC{}FdK=EWMKyf*e1;urBH)uuS0?I_bR3;P)C=-9YVP_@|X7Mpm znm^hz^Jm$c&5`ILsG?@H7P zzxkAg#Bc1U72c@*lgXzQt1^XISeR*3t?_|WI*7P zaV!MRGu@#TGhd}7eD|)zxG?xtN%Pb<7o`zM1>E7CKBSs0&TQ?0l*TUaanm$MKEfsV4S6PNQL5cxU>f%6G~zy>7? zf!)E~p%nqMDGBfGmKYcA&7vf1UvAI1fLNG6i!UxrnI+7^%d>167p|m>hzrjeSx^jv z&a$pA8~LCJc#Q+av3P)@BA5lmYeC(h6$8^L69e{0WTGXFGO>A?otc=L#>Yf>nlKYS zX*Olzvza28kQrD^JZS(Xd<=X{3|!4&Vs9KUu^@=WMCsTrX+__el!j5;Bx*%dDy6~Y zwEeWg6SaR*`LrT1RhWgRQ*Ejh4JpD}(M!)lpsyYf@YeGoFkmGIf!(oyK;~E$0>xvx zLo0fxP!g)PNsJ5UW>6CH_4bSlh=tG@d~sp;3}F`f%&=)(I51sAT)4A@1%(qCl&nt$ z9~8Z-IZ(V715ivE!-68?rEbuQ-;*g5x1}<1IGHk$Q)y==MkMnw(LY(32~(0ynb?#h zf{CkTEGDj#5ynhqd`$dq0HI3%91)R4Z0b71oL?r7Q$mO96qqrF;n7HgFIqjsOId{wxGyM|X!-e49u~ zm{uz>E^JGrBt+`$85a->{Sx`&f^(uU3qMY=X zB4a#2F?=)&im{`*K`TC=Oqn<>m5CLTDHG#L?aYMhWIiTtBnUI{Rf0{K$W9Q!M12v9 ziDN~;#8*XpOnhF!VPcXBnCL%>#l(=2UDAq=G?a#azAjNKbQ(&-p2POj3V+o8iRaUb ztMS4toQk)pR?LbQ)`|lKECfC(00hn!@F8%zoP$7YI3VCTl7+w^-|oKgIFIh4XR3EPNPe)3}frCn7G?=Ch!9GasP%FrN>KgE|frV?zN7 zXI~Z+j}7Yvt*DzwnW&M<#LS75iN6-unTd6NLsLWv@uqp=-*qXzKKus|Rfk6sD;F=E$fj?FH{b>w~+cpJA@*z#^C#SnIy`He<|9{ltE~+6r&l+OBY8%oVi$Zo5+b8k@@(;%kU(xybgX z3f)sFnJy~l+f3m7>=BGEok3MfN0lR!2=+xSnW`vFRhXtSTvHjTVGL2xj3Er^d^q|< zMw^G>W?)>rX)wnh9nXedA2q(0FNXRxD)F$~*tP!XbVqHqxOU`L;###n28Dwd6eMCa z=#z|ArDsyN1`)S9nrr)grd4g5ac$N5Sod}{RvFGUugq!h$^n_3xw3i-zbmElvxV}r zr9*yd%Ej=rsa!NaZ@qA@U8N(+JLPAy7=GeVxkCKBz~Se75uRlrN+YWeL&^}2KB5`w zl%LI%pUr~&gqI2N6IUjhpBbatyV9wwQ-12YiJ#$hmP!t->zJQ1QT&vN=4U8l#ZRY| zUbIOJKVOGS;b(1u&G`98t;MyCwZi=91o_d4=I6UGD}HpHxze|`8~7Q43OY51R8lwH z3!}oY$uYV<1Zx$ZZO~Y1S22gLb}B;{(%2Ap>uWk{D%@ zwwkI8jm@e|PAi5_?{ItMQ?vxA;S*E9L7W-=crWxp?d9>PqT}(2Rm{d~LOk2}vLN8f zdtzq1;k<4V;^Uk`3LmR-{*U;m zDiq=)s8EQHNZ!jJz{ka)2jk=C0wF$j6bSKgu%Ht@64yQeA2(7he0X9ig4mE4T@R%a zA?5P%cB<9c+~jK;v}8!pkww`L{A|MUcU$A&Q}B3?R5J=Op6C;VJ`8Rdpfz8o&^lVR z2+pMN)iJcJA=`uH_V3pSXox8PFVE}9{|R|Q{*THNMiMFzCVgZKzZ~+wufbNiL(>`0vH}*-hJi;IO3skn;3mkvf3E$ zfnDIQHio0teK>0RNBJ`Ps367|k0NP2s#CZcxl5xqM!Bo?QK-K0UMk%H2mEGD=jEV6 z|5WHdwiv>K)CT-d!=#`>!@N|^XB9qY8H2kDkC7gZ{DjtqYxH=fKe`s4Wbj@y~)DV*HcfZ!7<3lu+Ry#sL0l z^9+OQ7p_t2i9e8Q$d>+OOPEA7sLd&4^Jv)I>=jhuyagTl;|;DqQryJ&L-peSPk-!c z?+>`2ORiPJU3pO-gQpygoN9i`m5jTQGq`)7>mS1G%bf2ypL2e5&hLAeNu~Ms z6Rj_+)F4>ux2IU8cIilc-IlTH8zfw^RejgNl83TOUOQN7eLtuB=fN3NeYf|OQr}=$ zYR*Yksl5bBW&Y_5SG4-sK3e_KAT0>)Pti&m2qovprLA$Lk0x_4kP;9M6lsl9vrWE0 z5FTI|0WTkyG{!9A@g_0N(DhT|0Z$Ku-|Ppj5AXpPx?xm3Ru4jY3UtOT$fVO6W(R2v zOGC7VYH$%3*F%z+v793aDOuGADtTQ@N%2s&lKi2R63nI&Y3*|?!@7OCJVCZkLoifT zK3qiAc#f(ew5o*zAE|x3UgfvX%Sg#|F(tc)u$62dLMf>Vef0Ku>lN$vxtT(?&)MEk z)q!Cms(Nu$b){8Jv8hU9P*>>;Cy;jyu4~8(P3%$yUxu$G8WXA#!3$Tw3-#oOv`-Qr zswZAUbMORxs%M=ReF@FdcY)7L(I!QmVL#gRs^V;y$6Vp~tg9#0dVPZ=5RANDg z7IRZm$fw(*2R28smkKt@i?dPQ_<3*vIWY|`BH;oO7MZS>T4p@8b~LT4#xVYhj(NXP z{C%*Vx(qiM#GF)S-9e)*0lWnFXQb)oc_h1@1iyu#MU*-OEtJ$?jpdUR0bW;w$upS3 zCq$vQ6W|&Ptgo~Na6y3^Lf~};{($Q%wBDKsmRR6ja`vwaRngP`DG>lFaosVd!rGoP zeXK zi8soDFu_LY@IJ{!Vf0R@>;=qDs9=SJeUih&=$%k$A3JiNq;XAXGr0vy-u-5`s>uD4 zL)iNzSE<)(jTk zS2-e?wOuME8D1dWbnpjnyVRtayzNqFdZ6u6pSiHMOMQ)5Zs{e;8)gG|;kL+}vxRd$bLn>O@t@yukG+269uMBjJ)U@g zd;DA;_xR1@+~bY^<{tlC%B`OvUxN>Pbr97MR6JI|c!Ch+Yh2-cc923KIt%1Omk{|- zP3H5^YH5IPg@}*3djkRPpA>M^`K6;#r;5b&2hr3B4!<_=Gx@_ND(tg*jX{l)ARm^% zv#C>}qu%M9sNCmb@gdGV0uR^(&YfsDqRwo_Gkny>6)xm?&7Yz^KPf&*xvx-@8HFwD z3y#vjFYg549$LfW@$lkqOi+M%Tw=D1B~WYVtTvq0qFseScz_z6w@{tgf@io`CWH9s zp~=OGfA4By^5BETj<5|asMnNv2|oJwi1`)pDL8nDS>v+M5Cfsg;xF{7Ci6`XEW%osxj!0MF*q?{jW;E?GmlGt zzj#lGjR*3#$GU`J+CJTk=uTyv-dWawDNeyiZCPZVc7$-!V zkHivtB^bq&B@+kMX^#u30N3m7? zV7_D~_59~08Ya5x>_*p(A#`;0pT~->yUFO<3W31r+9)Zy=D1jV>F62*&JA8VqCSkn zXVWIdG-vpt=h>3yH)o@BPKrkn_HhjvVUuvsL8%Dq0=En&>Rlv;(5g}l6>0et3?8*I zr&vRUv7y zk;uf=tVA>>HYD=m>bhiZTLh*FQR9-y2(iQcfukN$r>F#@0 z@ziy#9f85zvJnZp5pOT701NW(Q#-@B*pRKX@Z6uPfvTI z(Xjq56;Hougz=Q!AKtHnRRT={`b0})xt<#MXNfPYS=xrLjhYjxq5eS1oeY>!bihnw z2h5x(e-dmw_F|{)35?N_WfCf-42EUQvkN6m>GX#6aYZe=< zdO)#PDFb$j>26ya{^5}fFPPPdH(gkYa@eWRtQJd}-t2*h9|v&|QlOx#vx*V)$XEt? zTr7f~5^Dv0t|@sE8~PGcnLC31_eKKx64UN_Zv2zQ|8<)GIZ6C`F#O-9`A2a02h#kz zTk#J>%|Um7)zfW4P6UOkvQzn#AFO1<) zemRj#`HmZ+{A)c$`Q&YivJhW`Fy39S$-^5RK;wPmP2u4k7$7#h+x!(&GwY09qwS4h z0M!xxO-1|Mhf zeeE8b@ASp?`M%_1lkX2>7{2?*Aik%@@c0(~h08Y;LwtK$D83P9hA(eD3ultbuqBt2 zss=&Kofp!WuSmhXPDx`vl>vp-^Dw`r6vONMPTWyGeY#QUtK;K_dQ^CT zi_$r`Q%&#PVdH*bp?%!TyAinI-h$m|g*;oOR;rfPYYo-JLX-r*Hqe?HCsyd?hK4F9Df{Jll_d&}Yvzzd9X61=Ckv%s%v zV1r*U-#+-ayGjIqa3ll1a3lg>Hj)Sa!gwzD7upc`!L=0l=<6hS`Md+u2CGjW`XZi6rd?L(42?Pn+i(Xo$q{%_UtLWSSD8XzB#4htX4PP%|5pxv z$vlH+;&{erE!qI>0c+?Deou)GddE$n7|F z24BiTFM%^Dvpz&k;ZEitqG6sUK2f>vxF$MiSs%DX0-QPkW>~Z=PcS}vyajX{g&HP6 zVYXz1Cu1~r%{<*n{|l;;CEOAcS*|0KBRaz`p}#`n#G3{}nEDrmS%u>`B|<+ZS_SI6 z%-c|qS?H3KRijL-x48Y*iP5PAz2kN3oXQ2JcW6y3Z!x_?Wt#mW**jF?z2h|x&^xqf z%lHa-$yd)|r(1G|GF{?#v~ADr!ceO&5oIbJ%kB~y({s(JON3SvT|#3TdlhyGf%%+H zWIosD1$pzi9q#nNzxImoeC{K6v4OwXUDkX~CpMo0-Qh8JXU*rfS=i(L{TJ+y`wg9K z&gcG!V9?%-KxkVdc%!{}GukPP=h)|a53|YlFM}AqQwAZvvj_3`rVZ!vt*J+Rca&3n50u&DE0*ux@-z*& zUJ76pMgwk8wF@Rh-7v9)s0GKQ`W?y~`-VrlUO3s3g=0>F?^45tU-Ptm_{|S7TkzGP z4E*F!1pnDk9{kxux$tXmBltVZDEPl!WUskp@lT`qUy#Hf)ra6es9kV?yFJ822Dqgi z;|y?Rjs(LaT`%l-{Jq^-l!LFaDL+iJPuZ{CCgs`v8OksBN0e9e=TXjx;8IrHLX-6o&43l_;2l3%nXW@;lV&m=i zlzqIBtv2z#6~f@n4ncS~h4Aox9>&Gn--7Vgo~Q7(l+x>I$$TqPsSpC++i{(PESDmc9(%@A@LXxqW$j_YLOqO)w+A z?&m1JJxk>CZGS=@Uwt#p_ld`Z`KC9E@zpguiZ2+y_ zto&y%k^cQz47xilw&VM$EVScH}K4wwF zgNK_;eET#x2Y0IJj?a?VhVix zX{-Gv^7&7r`CpX8|1QJ7j|l&}qWtg5<4+{O#lZKt>n!jaF0sM?b(($fhC33$pA2Ha zUkF0rYlC>;YaZi*Us8?0k2y_&pIn3q-?QBw??K7(c>mN$^M13pFz-(q#dv2lI*fM+ z9x%AkS-iiwz~;Rw**@>h4H9`@4rF-W2t>T^2l9B|?Z@T)-c`gqsfgnJ)JcN3mEZ9s zwb7L0cod?viiLJOMopz14>LwO9;lTL!p)$7N?UqjOfB3mDd2qML|a1G&c|0^v98Q% zws=9$W9L86LAw4RvK`=Qgl!K|%L8pH)sMII2TiNYxk%)=W9!kmswlq$nJgnMgColc z$pWqi{D?A#kfGkIFo)Z)J#NXiJ*JvIILEd;k_l03fVn>62_x(o9yxGBbW=S@fP1y_*g zZaRx^#c4L*!%6n}mYQwy4OTLIRZ7G+TFK)(Hi*kNzYOtRb&TS>Dc{EWTg?BMD37nF zndV#KE6n$XNsMoiN#^=HTa0fCnE#uc#kakP&9`!*eZF^UZStMq&G6ND6Mz5Y&Eq>W zkjwYlMa1{pe2Q=G5gU9(^WSQj{zokhSm7(;f7FU4NG&)1JO0NFr}-bJIR1xPg8y;d zw*TS9@;|%?|HF&#fB0MZALj}G;|T44{MKRrBTlCOah>6>6!AZ!v8o(`ycx|;D5x(^gpiB zd=tDx{Ew@m{>N2E`X5zJ^FMy)_#YD__#aia{SP;m|KUdXA8vgAqnnlgQB3$B2WbBz zw*&r1v`qh_lIHt@r-=ViDe8YzI@166!)gAR=&_CLnT^gqfO{%#`t%SHK@JJkOucbflknB#wpmEeC| zmg;}t7~_AyO<6{_TKL1t|M0N#KTZ<<$6nh1__?G0$7q@UM;XoA;40#Ol!^KuWe)W} z%ADqZ9OC#Nqb2wsm!$e1?F{c5?TGjNb{_A$ovr+jLc;&}nf5>Skp74C{O2f{`Ok|q z=RYIo2#29p_{84 z-PYU#Qw%*|dBU^&fu8uV;a` z4Lyq)J$e;ekM;f7%M6W~Hx!^lz{k(7Y}OgQ%km(6_x*E0cQBc@U>F!xJv0WT#^|rr zjKn{ieZEp8kTcL|7F(~cu??~$k15c`TEb{32uKJ+$v4;iE6~}_ zn7;WEN$}Au>b`;~|CDL#W>kVjOGQGX+rRFh;jS=|FA+~bP}G38wl%=c4DY9CIyjK( zVASb9+JLGe(0CH{nt$i&y|;<4_tuwuy&eiNy?@rzdb{%UUij=$^`7DCO%l)>%hx;J zQ&evpqjyfIwcbJU^wx9qnqWN1Z$)~SkSE;m1+8}prPsKLr`NB6=nwU@UKjxtb?|wU zf6{t)4xsc>r(eGQ5%sqG+{V#+eIs9Q#TPuiA9PlT>8-p?>uu|At+!y^BkJ98f~$9o zfZm~ey?*YZdWSQ5bv(U+qI!$?n$g`Zq1Z_B;!<>gxVX%R7Z(GFGx1FY3aGWmd@44* zzmbSdFTv}|EsS`U+{(G|=>iiphG}Rw;vjmyGoTQDx)4tBe9=?tT+loQ59k8k*zZG% ze*ROkByUtQY?8-6wRvHZSf9mv+L-zr*dSD&J)bl6Nrt&>oAsG-T%SOs#s6JOWF!ecPw?3H#BK28Vz^YH)C%pRn@JR>i!=BHKB&)P?%|leB zG+3pexCBaNz%4_To}ph9(iWQDXq`~7_Q#?&A@;1=;)2p&+Kk0#AqYGB87MI$4P zRNVHleZ>;=_H1cm^v++;*E{z!Mz5|5OYdY_Z!E1hlF(aM&DX1QjNaU4mfm02@%8Rr z&*%;A%+k9tkJ4+*qx5FxA-xj>^lmXel3r?lo4cCm4?$3`$2#u(*64rUgY1(nM4+>b z0&<4>vSe2sK@vAzVY{e zQS$dvBk*UH@>e-clfNy~MgETJB>WX^CvgQ8{08(&va z-}o9Io&Pa5KAMgHqTcbn8UH_j&z^rBd;WUIzr^@2X5(kG@k#ZLpU?Oo&c?sM#@B6< zjZZx6aZZ+0$VQ(B7s@|dr^r8V@%vmgRsQ*;w4yjvyN25Yy}Lq)1d34WWebG-@Ah)N z-xvJ)%j8{vJ(1Cci*}Pb-6_1y5Ct8nAG7ji1CHEOT|Q*52{W~*ve!i0tMn$PEhfs; z-WJoED|ihVyDsIP)DSoku^FI1eNHp?5BFa48N2w54svV3n16{Yh!$(&UoywUgmC$x z_&1XB`0>*4SI6x__$$_ZerX*&hk8UB{)*Vn!e5FWSl%)mC(^DN4%NP!L5G`GtY&yA z8UsbAS{X}2DDU1@TuG&seqZc(>6-pVtRnw`k37zgrf) zqwL?ewpWJ=L9o%n7wzNPM(Ma&pg<6*W~#6psf;L2?~6*3y81kZ46(S|dV_tHCgd ztx>?_Pe00`w|WhR$*?&TdJ|?zpjUiOgx(kDFwL*dDQH%Cmo9cLA+l>I5MEV8lhJm;WA}7dS}Q!%lr|p z)P#nM-+<3De}p#~6#Y~M*ZeB9G5sP8+vFU*BZ4{35}#T6eFX9=7XH+RNa5_s{qw~- zpV@^a2S-_Ss3$_Fte2b(oqi0gk4cK_H`WAb8v*70V-2_gpQQ+|Bo^ps(%i43Np6Xb zb~KrB$>(#J;}W2e$?rO0Z3wKZvUYnNOLU5Md%%Zh0s1GU`(7^pCj0JHZPL-cdpXu1 zTb#5SOuFu6NxJUw!uvtnnRu_H@1FGc4Q^%9RlD!rotyB=x64-8KQ0^6aIE~O1jxdH zqd<1q^!ZbwZVxzx6)O9b&i}IMTj!;^ZE;^&h83!8D!puau+YE1h2Q=$1Tev0welw@ z?)JM!*8=V;ZGhL~cr}-8_mucU*h{B5U429d*7QO~0jojcB{XHM<^B-{8^XBLGEEzT z7e8kZC5+8EQ^%(6RPVq`8i8 zNx8qIV`IA$B4|3Ez@SMwp#V+S8aZfoe?;p46;jYF6AZUd8P(nT!Ocx06g_WeH3%HYJ=#mg~q9{dcw=~^^9-sE2Q z<=$Z~d^?P?<9$j&4%U{|lvoScUZP+6bQB?7ebMhBl9s*626#!{yV(jrrZDpx1u0XZ zniYaHNhR6Q!nd#t>IeA9Dy!?GJ25CnpU0!-jlJ9rw zwMUniVccmOBnkegJ<5)yHc3`?10m7Vi~r3cfYHofc8)_UmvbyKOf`WG(@X0Z7LWZb zVsSGzOg?T(0HZ)$zp?g8)=Fsd)(jvi+NFqfV3!V{(c`13PUf~UmBG6-V*~XH+T`rX z`7O(d9PSE)xs466QRSIPTuTytqLm`U&BRQ-E;UmoVyC*jYcL}_RnPin%yK?y<-_6x zQ+2-|mJ-&=9ULQaa4dp@gKKIqKn*)20_vSZ7*PBn1)v~<)Cd>XKmY+fo7V< zl~&-g@UjkkD-MWq+;9NPan}Kb^(sFhw_dgdSdMoVQ0vv+?%xh1@c!trb+_6cy|9|W z;;%p5oIQ$MeQWH|N2?lWkDUE)1ABB~Wf1o0v9BfZkFQDm<7>t8VLc{aK90^M@sEjg z`B*!_&mKwrr6KzI{iXWs*Q8Hw2m3XD1;g$fZ==|+)D=x?zjQDEZ|qn0@?h=Pr_~|= zw^m~S?yFV+u=0o;fPH3T0Jfb#0oZALquVd2^Cbv-wddyT*WZ>knf=l)yH)mU-p5+@ z>xW8Fju$Jj97C%V_DgqIZoeka#B%ICp4zX0;}rJG#-G6vRTCG5G&Enp@hS8XJ$^{9 z!n6~;oXM5&7uac{pUdIrY4}^lO!Rqkufk;aFStYYFSt22+W&q;4&iYKwlU|+P@gZe zspd;ppRaal6U}E)pKn4_%@?meUsO}g7pp#BQBf1k7o$F3LQ~B*NPWKYCH2mi4fBC% ztPHqj(%Qc<*P9_>6n{L zwEt(^WFv0S{8nebD64Z+4EnajmXnjcUVM#}N3Q6P7BtO4@|4J<%P^op>tW5~_mTDd z%wl$#W_Dg3xlD5!y4>G|zXHqKmc{tAE%CHUxsgSSN$tfGXbO|F9avJ_V?JpkZe7pi zA3<)8sard)pjr>U<~NWalr!*}4|xa$>+!E5QWDEuZP=ZfG(@q-gIxYfv_5hrcUbvU zoMFHfKxeM50}Nb&0XxMUT7x;Lva1uV441#3d`|Bc!%f)<43y|Sd&$7ur^uk31ch0O z@ek})EFy<1%^a!Cy`dK8eHE#O5U%98=8T>EGmHz!eguO=>RP6=x|UtVV+ILzWSIsD z)Wcl6yv-2t^&yT_gYa%LUc>JuKMi>>o-ue1fu}BNJfYX;da*fqd9l9eJJ5GrC#*bd z4r1|3^+jL(P^asDf-6WQ41*Y}a*PdRdDDkva)V=l!Bc}>1w6k8x61QXhyp*XQxWRh zXt?WOVb8M@t~@%ryJa)jzOZyKgdbOq>L?pO1O4kdrJ(RwPh(JGpW5Pv6{9@wXPeGjQft$}0FjM`yB9G1}^XMe& zMWg6z=Y>pP@65ZVs;|u#YU?Z9X9%dTcZCZ28X3y;b<88zvA)(aedS68$z=PY7c3wO zTZ2;pBJepm+LVMdP>x*yXXRYMC91$3BnsRnM6SRdi2|R+{+izNy`aD;g1-GlRe@zY zh-M2UyQ}alXKJKWWhjrSk&MZ2=3^%F|0AlkDWHzP##k~Z(vj)&C3C9jc0yu$M z-Y%ov76Z#Mof;sV^CZgN!%x{e%PGu~XuFg~%Dkpm_nNs3fg4OyBcn zs_T2(x!U^v`pF7#Oj1Vvm^te zoLd-bwmu*UDpYLx?d&nkRUT47QtNv7IA|dh-QV^dT&2d(w^+k?O^{L#Wk`X$eMT{nD;}a2l(jTAQ-Zj(*&nE)F z^B+@!jYU7OQH1ED8!<$m+NgkNk1ypAtsIXbS~!?Objceshz5nvmD6tnd~Pjd_$+N4 zd=4tSCHM>}Yz%xJG5$~RX`UP`J|})A;&aMp7@tc%Q{eN%t#W+EkHh%JmW5;W0>evSkrq5?>t{~25rxuGMUs#MqzP4B)^3|K=B7giHEb^Z7Aiza30a`(T77*ab1_+S&^O5v44GEBQ!d{_LI7Qo$dx!Mg-`mDt zkXax}yF!w-hDg#%B-v9b$%&32B)Mmu#Q#}G{GWA7|EI*y{~1I4pMlK(c~#B-X+(K$ zn#AO};nvCXsY$nyJU4t0ggk#;Bg!*mEtcmW)+*%5ZIa7#;oDfAnXgiLjvSznC${ef zavvcbI}OMr2wC9v%o95IJsawLd(CmNcp5$pb8ocq6|f16ciRl`Teu#Ae%*mR)Y$@d zlc=@-O`;tIFa#I8NQc!e6Fsq!-LJ4;h-dNy>zmF;dG(2hEdzHJ9DZN0_3^&waRvBI z#rvNhdH6k&+pFmF>AmvLY1OjNmZWO5y%&>6?5|pM@bLCm({+$ea_x&cpDQ)P^?ZgS zIm5!IraN9l|4U0pxJV~nlk>TB$psOhXAMAaEuzPjre<)Z&u3&g3CS#{Hch!dmtb<8 zFuJZn#6tCsS5`i|<8|K~fQ3r@@=fVx*Dj;$xRtN+ML(SnXBlw!tF!d(SEqb!iE%Il zXVDlzU>3w%!T`}e%lYFCGyc{Lz9#aQjrikA34b5iRr%xX+WZZ-HxhpjiTn+D zUxPorI)C1Y+WZ}usL3DoheF``6TO<26V|01Zot{v;`@L1T9WqVv~h<(V>0;#Hm`nt zi2Q<-r_;A}GJNwUL|rF|rzrAOB=?Q(r}-)Yb5(Tcai)>1A(QJEtle#PvXqr(_WeHZ zpa$L$_%u(rApo=6ulr1#`MSWC_m5;I$>NKBWdAgifXi63x9Gy@KHr74kSeZoTy3s~)Uu$%9_uhrLG$Fz`S^3v@@tw|;#Bx(%dKg^oG;`*i zW+zDj@!SLt*^W*`qU1=ryj7&oL`MGyQ+v>e#UpZKQF?d-Cd;fPaoW|S`OP@ zLojUr(3iqCu8$&pP$7>}Re9)-s>oxkzdQm6gEn;qVX&sIgWkctVg+FctHs_X5XkXY zqypJ;w0?nnJvNX)?p`7h$OB8TKoXZI1d^~yE|6`5u|V?rP=U<+t3n{6e!;`ysj;4! zooHc(yfOS9&?=4V5QCywNV1wQy6sq{y4?UhBc)e0CLldw%@9bCK^M6wI*BnJEd?+K zqwR|xt)v({y;vo^hXc{G$)4Ay`Mfdoc~6cG_`Jc=>q_Z48;4#3rPnPmJs2#JuS1Jo zI;H1%yTN$jN(sR%_^3qSTqVUDp~n!I8i<~N7fM1CfD+nh{m=EI*Z_L5D)g3X(^HeL z`tvdbzUWWw#^^BwMl=pRhQQLCK=M_=pE^B;z&DiiB>B6T;wnZl#gTjvegj@3B+=3w zULjAo9R4A_Z?v$c z&CHdw0AriN@2JQ5F*_m!=jR}?x4(^K6*IU3;GrTS!~?The7i(;`iH2n@#RR!p{-5n zg8ZeDG+*H}XL~@7#}-MB$Bt2*WptqFyV-SJL2BeksD@JuIUYs$iS>|VAr#pk7uVJ3 z$boE*p>gr9$vBbP0l6IfWD>)a6yL8VPxZh^md+uYA2NzOYp=s`{{RDsDb>O!OJ7^E z_}bF^URxU^bo|=(kvYmnh_yyqqPK>~kp~2lX4s7fBx!VX@%AA&^0E@CR{SP!P?V2I z7z#xU33)1EaaMjeJt|X8Qn?BJ0%ZmT%Sv&|HLNIGs$uf~n7$3U5q*0mo9SDpzUoZ+Fmsy9?nJ+74K9_e_Yt$fReG3CRrRL)2%#HDAkLt5v z+6BbD=@Kgrh@UcI4Z5g&uuVgN zn1vN=370F7qfc=9W1*L)|2~B2PnlAG42`Eh*WRpOf941VtzO;ff(;~k$&CD>H%a8r zoOG#iRLAzFDE5X}I7NL!uvlCPu5SqSYVq+wIet)~+F_0%%D>PdUtvFYy+x(H=5A@J z!{(4;i^#{y;vTY?>YTysc=+?xG?<#B6-9j*{TyTI7B#A!vHM70ZVUufeLC_5gT+^6 zaT>Ui5Pa+a0YXSuetfV{fz`xcw;u=-o&~-_vI}|oaKRiia0M>Ocm19QEe(}}#2^Sh zBG20mZ&Oxwu;mo7>|o3k90}<#Mdx3a@1eQ!NXUi^90?hnOd}yq6Pk*~Xg3%Bm$+*W zZ>q}nNz?R6Nr48+YbcUJpgbxSNhyV-P2~hqT9ijEuR@`KaDC9^0s^IQ8Xz2^4vIQH z$LA;Ge5iA0cslc?1ZklvQ1r9FWd`Mr9*m#2aA$yma_(AtpObS=($dPyf0C1Pve()B zw|;9s*Irwmqdn*&GQadi%&s&(uavo%pF?F15M?grM}f>X_^|tS@FA&vK5SeR;)CuF zAwIa<;6u8^hdvcK%dfk;EBL@_z>Xl_8Zz5XEDz~0aFOh|eRnwV=Zf|@afOpP5xte@ z&$u;$6J58fIq}+bqQ7#R#0mF!1t;F-WKMj@5t+4I1SkIXoSG98sZ4`S;za-Pt;DbT z_4rY-Q131eORo()L|(IboD_!irLnEXkC{$;bj__r*1WM_Qs43rk;c3w z`6am~F`i3R52)Fsb#jLsq&cw)#&72%Mf2nK>PQdokfESOTE;d+gsJS%%5_%T)L1oN zrUn^Uk{ERgry>6)S%;r)B04;?NfYigbm|$k(@@2O1l){V$!RDzN1?;pgzsPH$@sqA zaeOz zulhBl)cify1VI7%jU2$oGTiF8w%7BmbEKoCX}2HR-=(S0^-qJYzrz2U zRrtTK1pU|9h!)_Lt?E|AJem4`TclP+xL>_L;a2fyXNU0D{Qj2lA2qwf_($C~{Qa{! zf`8No8UM~32>$&xMBv|tQ{(@hli>d!qb2;;jk>+~>t@OLzj5pQpV?vjzqxwb_&>8l z{I6ET`GvB&&wmE2?gQxy>ys8S2O%-3#G37kv?c9R4S9Eplm`j=8w+N5Z2M4QEU{zC$4 zGeM=zudkEiA5fdUP@BC#n}fswPG;Nc^pu3u`R0qn`YbAa&XV*gc|+N`qoS|2l`a?M zm!J;Or%2~SPt@C@#UE$1rp3xsl|kLRR@P$fS_1mywP6NjAZarm!bpZ}aH$G?8zrtI^rsdvCWCr|Bweg1Zpti@MX5zu$8im=c3H>mA%w4K2I z({RZ?I}cMRGIy1%NC%Z!8ZI-FpRqx0pXc(4%=d;%_PKB9uVtSp4}}!k<;7dI&$5`_JLFfe&w?o-b>=;PQ}(HwatG{lXK@GY^MYH};?HgZdguR)u+PMGYWsY_ zN?@OrA=&4ngB6N=)h#P>2bHk#$;k`{R%tiT{Un{KVrR&`Glw3g7KXDwOu%nm+C zmY(rJ+rbFUPK_p~4wq5$Nn%iGJY3_3t#$iDZV;J*QJAFBAS{Wk|LJUFh$lni&?`>Gx6xnCFgJMB(lHPu)8+myfMsy zlhYJ>{CK9eE!CN=sqAYTQ!Kc9w7y&%z)ERXBd-52Lb`rZ_%UG4rwP!qp}1WWBlqKqlP2Ayt#S#W_&s7mxJ+LwB@F#PBHj-0_tZLy6=KNHg%cITJiuG-L!TD$iKGE{iy9B~z?4=pHP8I3iYt_9C1 zC@+Ef@f6zSMQB(tpG75*Kjhx#N$^z!uK>y3NyQOI#f9WQ)FJuX*5S@zEJqQPgTA7| z+ul3m_5vH-RI+4{MrQRdVmOR8-Zk(>W#QI913_LkInKd75GQU)#tWCp;_yz6!yMS= z1aAKQ?^d-_)(+xDd4NbOCCDCspP~5 z@>PP_O|_FJ76(_93WDIWmV%pEGZWDmHX zbB4IN84n=wZtckK&7lRMfq}zAr?tbn>+Y!FszcO;S4)#E7L5P46aOK@bNyTJC5(j{C4o|lHVG><%?x}A6rcDtzH~~@7gES_{L`ud_V6i;d{3C?ZJ0h zUI^bMn>vo~sJuIlZ=5m;r^U$vB2I64Q#FP zAhCT{&X;pSJV>ny@u0E|9+)H^^loh|N`r5}2w6+6Bp1^e!Vx99kAZjKNFP#Q4h)qQobS5;3-RQxIeST$vbkbBWBRxe>(p za$Mu`}fZu0!(ufhi|JH&_0cZd(>?7PT^{g(FmAk3EekT{3vuFsqZKJ;9q z=EFN?qPs`BNqkt-MZt&nXUlxpJ)6j^pB=%6Pb$=Wm_lWC?;8%@S*N{X zL_69>n)$?S&>~r2B=3&(JrvEI3OdmtF;8qYrRFl^kjOy$KU$-qAuG@s@subKRrfJ< zMx-~ys1{U=?r7q}au5rgv9%94U{4IFCTpF1*FJJ%Y3Bd6O>8X9Z8w&2qotlfn8TvI z<^aLAXqIBew{#W(cj+umxX0q+g=&w*e+?wy?(HIZEH)SjxaU8Jr*mfP4`#k7QKRwr zUHum_Ty%OSDJ~Ni)H6Btu9Pf)Gh_HD$!ynY+&_-=lel;vYJ-=hwB2ahSiS-T27B_O zi>?V=J%7Y}@?6m}B#g^Me9zJxTni8eyHe-*WeJ!ccX#%h*4+hIkk!a= zp8?*2@u^W*-l?V%(3Lm|#w|0EDH1D{f z{B{k8cVtx<9@7^*jd`vY{?D*ZKL!54c=e_5g+EK_&vN)Wq#p*KS}7*nUOi-YnP%IAGXq zOJRIdQW@PbxQtGR3ZWUi+~m|9XUglH1CBe&0~5IvsGahYF@_bFA9u;F)frrghF$Q@o^5vO>YZ8e zn!_{CKcNZr#VRTc{c1EC^3ETUodCQOZK*Kpw9_}$l|SA8|Ba@((D$;YSrBZ8ba>YdRWqdJ&<@#y$)Uc!LVixt8=avVNoHzmlPfOQ3W8f3 zU}_I1pPwIx*%9&e=^M7vcHgVLbU^D$*q4f2n@D_QB@1AfjDBoU!I!jYen; zesu`Pd+;6$4M!%mLr|^I;7j6u-eU4DDD_p+AmP6(OwQk3JslYW3Vxgl7K}qdE+Yhj zXM!4Z3oUkVGtCaJGE>|QIak3g?j(|_fj>iQ8r=HJuYhaJ@OQN640}y*#qc9GUKc*) zkLO(zK0QC;Dv3MV>a;Aa2pHk6A48#@ zQO*-&TDP=t`Y*>qYvNy*L=#(S+&H1$tN$1t@V&2y$fU8krEnw4J(?c@CF8HgfXF2v zm`%?cire743=whSN81CZu*J)Y(m$r0^+%Q8yggvL-Lw z0V34=34JrTL)=#Y8UK6aY3R`l%156(a7}!~3R?I5Aao>Y;PKfGc!t5Ax7YHA$lh%I z3i@JKXX!tJaqBS9xZu*Wg3fIXi5;$ZBSs(A=Zaa5_mybT6DXJ>{Z}wLhDkXO#z5Z` z7q7{@$4Z>{ApJPidY0Hdxq^L@3t%MBbNC<{i&TfRHxWlBJbj(m_xVLJp>))6UfvWSzEi4Yu{okGd8vv#q}{Vd0~0JJ-lHXC1cr^0ftt^@cZX>n|>ObfLo= zZ7N7S0&=3c7Oc~}Q%#-R#SEL&BOaQz^ycw`A=e+H)ws;LWie42SO46{@vE2q!Eu@WyBz1Z=j#{nVn^X;#hJD|q81Q@+ys;E+?8kE?ynPqYXT2*| z!DpY!N)WF-#?*e3#v~qNXm~`R{yl*)0rw0}e?R=2t&gkIr^5yAL-Ohez?JP%AkAfA z>(jul;isNFNPsIClIfs_-(=5e!Xwoa;vG2CG2fR-(UP`AuX0GA0!DNpV ze-KY_Xk>ha&9*(qfF6ARXyWkGEEq4Cu$p<5oD1gL-h}tLhJq}=U_5G~{2t4&3rrSx z>N`uGl0RY4VR8Tyu+!(`qv_PY8D)|+fULuUaU&7!sgI^Ziekp=!Pj6$$W7ZGIzI6O zl|2|lir=D>Q-BGSfk_t|p=yNhsajIDAtsz0(=sHef)^u8`3jm1&js+B!I!oS5Pfn0 zhXJJi?F3!5I3J_q|8||80(4AI2C|6R?10=}V(R?^Ncjchz5)2Qx5>}?eeuy6KWQ@= zA${`zTGlx1B#4ODnCQ@jc!7O37%KQN7N{|IkO2_R27C4WL2;OC3oWnyBMa(VpO8)Z z#j8i_$Um)cz0QKHod$RCwRQTU(UiY+`iI4_sUKUCDVVY@S{tRh&n2zbqhM!r znsgI}he->hn~>1Y0^kBd0{xQ!_8X>)GsY(cmq;P>OayF;KuNsHs>C&`MO$s^CO6tK&$n$|4S$ zTI->W8FLjKd1yXTOa zitO!K)Xu++eQN*NkB!3B-rI}sus1C*W67Fh{hr{))zD>04c+h7(|piv>KKH-9>!sB zokhjBQch0|^|p?#6vGyKWextB?3HctpRJ;rhEt?cH$|7*Z8QtjS!zGh{W)#_?2%dO zI)4r=_x!UbQY1E_U>M)Cc=Lx)JW|k$@1%!OSlwIh`5t#{IptbAF4miJjB=gs&CcdL z`s)_nAsyRL_=3YE2aEAIhDNwgtly+Fu|M9|EH#%=a{WxXoZ53evwP%zEt_@Al<^(RP3m+U`(R1?884B`Ty@j zT58DaXMd)GR=al=mGnY-tE5+7dA&}<2K!8&MItf?}c}G@3K^T3cZct6SuTC9WyS;YdmhS zJ=UwRc_%iwXR_5k&z@)RJ-y04@6IfH@4HBsmf9QN;VqXj$ShaUbR#Uh` zjjFBNQMDzFD)w#qsM->v`e_+|UsU#khfc&IPh$)`i!189>jQjkTxaM1-FPM4XztPu zetRN@^-x_{)TesKlpOvrbH|iC_1G|b-$nTO@;+0&OH7{LEqy$bv*;qOWlEO4X<-h{ z)o zizWhB$ZAOabQ~k_Fw&n)Oy9Rd_0{jBX)>y5I&y-i>F5cb8z=g$f(FOyruW2o+Wu`k-I(`9F3nkL2?R?TGDB*Tj zAZ#q@=<(!0+fci=Uxz-A4Lohe%L`>k*!*^57OD@~k?}Wgkz<<15m~@0R;J5_`g2s;?&h z1zm|eOMF?<7Wnp?%q>u# zl>R{?omzw^ZjrBMbjC8a8@tdalk@UG?l06Q^>($8aE}ssYi`OwPD(Uhn`d zR3(;M6_y*^gEl@tsSp-mWOzm)S#DKWZhZbRAh8i44rs z$G#x%m&U$c`|i{vYrJ|kOzBF@?%o4kiN!s?K%e#9wo(M(#?@MgHrRq(LXi+1%o<&rH^LqvA*4`pKkhK))sY_vzt0iTFrl0r%4;s zH+{Exr*!`PzC4G=n6Q(u1D?FJAJzG_kCpao9W)Y1%7-3;0zs(SSTOz-#9FUc6SOQ&_j zxRSA4i8Sx8?OlnJ%^SD=(&bjYHca1LX&rK(Y=_n#>S3DLd6-q>IBdC9Kl|BKLt(mC z<-3hl*#<+fa8;Y9ZBEjio_mGGu(&_npf97lS(n{2i|!QboZ9ODgGFV>ra#VUp*_`t zM=3Q{yn5PFM@MTa8msUOVLxkdA6Ob(W5?FaGPgfg_`sO!%P z?{Pa94-&Gw@X-vlheR9uyAIX=lr~W6c~m^IxwraRLyA8~xg+Icx?)N8oBJm0FYn9t zU(oG0^o`rs;`Y~0VEYR>Z~kvdv`shnV7~?St91J}A->Lka2OwVlYxal#qW!hwGwg4=sR#R0bJ2g@{_C*6?i}6z z!87RS!>8h@eqI~@W^VTQgGNKF-=b61CmMZX{dVD{D^KJ(M_Ma(A*k4n`_J7pvuPn;!zp{Tf=gnVQg#Pzr|8>}V<IS0`zuQ_=ijQvzeJ6H3Hm?3r}2lq zak~8h{n7uCT`|K+{7ov)VU3=y)M!y!Dmqeh%9%_&-M>IM@>0Z%SMg5{HMVE-q z7PX0%ix!A>%6NVux>NL3(KVuO(VInQitdwnD3`cVw0*eJ_e3{|t`=P`I#+a>=mb%V zXrAbiGMNX_eWKe%pA~&T)FoOkdcJ6d=wQ)2(N3AqcG2CUJ4H8(J}liS82JCc0U4z33WIpXmLf*NWOj$BLRoCy70SIki2mwD|Qy zRQ%8&rC$z8O>anxYXepMGts*xeU<1PqASzN*@ntKk>j1xzI#Pm2c+iTK3LY19N#56 zSdO<#e3G=kTy(dj9~+ojUxgeWE&7hM<4w_k9DjarYPnXa_i@oxVz*xU^Qs&-NxVVI zUnM$Cbb{zWY1d&Xw^y`Q%2$X^5Z#+Lj?1JTx9F>qzCqM4+A8U*;{6e|iTx`?U82iH z?-PAM^ik0#MAwT3M7N5*BN|E@_o&!CA(|!qy*oaxGM>+h-X_PlNqn~SXQ3RwB0k=6 z6?=U|2g>myY2!9Y;-f_^qQ#=MqEARW`$&9q+InbDi~Gt9?p{e6v}Uu0(=`6)DpL7E zCZ&E+s~oo#OFa_ziJIiNr9_TP+$kCr4NJO9)Fv7dHIzy{q7Ko3sG*MFX|Sxh?>ljPt+y=xp!lC~q9Qzj`?+b!COep`7Q1vz8l6HbaBg{WQa2+E2;XAwl z9VI;e1L4`jgsn#iJC6}|pCD+Rgu`8UhqG4}t(M*dOE%%GQwX-cgk_@%e;q@3A1EFN zoJ*(x=AH*kLO$Ty3sKBQIQ?QmY%*bg4dKH}2|K0|UYkz%a~Fk=>8 z`@EVk_8LObb%cMlVW0x1s_UD!2_5+<^)f zqk^TV;7(L<7b;kRV)vj}BZ@T<#xxVguOWi|eZzY6>N(uK3$i|tNux<$9+|voi%LosTAT0e2;qT+J z)j~9>#mydw-4XjBc2Mkf*aflQVW-5NhTRVP8g@kNUD(yIe_`iifQMoC!h47}6nhzV zCG2O|xftMS*nO~XVTZ!rg^aySu&-bT!QO&h z0Q(1a3hW_x+p|w#N5EczT>$+JK^7p)p`oY+AO}zRfB;|{jw}RY6fl$%tbh}60a3s( zg5W!Y5CDQ_pcq2Vk+`WDg<_+T02t08m;eW0u_E6XlpTvKfbV=nfglh9qJVoM%1%Ok zfE$PcrVDTk@BslJ1XwOaOMx(8xdVwrOrGW1WOyiZ9H^4d*i(w`z1uV1BX21>j06!1_fISOcO?MCT#_umLV000cQ< z9=ZTn0XN_S{6G*00a3s-ANl5^xeHJq-~&Q{VIhVXaNLL;)qq+6!%fHx1aHEaB4oW8 z)_@S>cW5(UxCIRYe1Pfqhyu<<7%;#M_y9i;0D?f66Y#;R<90N1F=h*J0&c*v1jhgy z-~e2J8}I=EPFRX#cv--`93}n$Yk+A5Y5`n;8wdemz)Nn6G1F!%#zy-Jg-@O7l>>x;Fc1X{526@gc@V>i{~lv`2;&U+A3`y}@i1Bn zxB)*90D^#F6&m>ndWm0^3<17Bq0-eTix)5afMYFc0m4sUqW+91;0MBh!H*J8;uzq3 z3O0Z!V0aom0)p$1fVB*%fDee4p+Uou4{!mNa)Jq8p=~`A2|y5t0+tGd@NSUd zH|P;y16+U)2oQ`Rz%UYx1pGh{2#iAY_`Y@DSx7w_b^#yY2LeC{2m_AMhytc_(3x{k z7Vra>F=zqc1YCf74C(_yfMF~e4p;#n5ClR%6fjl75a0%afMFbp0am~V1c4|gjK|;* z3_*n469~3*(fa@A?o5E>sLngylE4B4Xt;#|(U@IUc!L=(BNiKtkdRg~iv}bk0ZuW! zGrhZm=4j^F0|aUbU4W2AT$b#VnF`=w17=*tM%X4a_hNGlSR4in1B9R?jx!Yq7D>gM z@89qLW@pBd7My}qWvlx4_5Z!^eeZj(kLe2mlmTNpf&#%a;0zQyFf1T=Cej7mGZ7vL zpM}`~j2Vz2L>3qoAUG341;S^;4=4bUS?~jjfHw;}A=XN>;XDVE1BB+nc`nNE)mLC1 zvY&^90AoJ<0J95gK)DMo&O@d^Z~>w$@b44RIpBp6>U?aJ^U^ z1i-u=g9ak_|ACi*&<)51r~rWtNC+rxL~l1EjoUFVcVLu&dnbnVcaV1>(;omo1nx$~ zFR^bti~Qa~8dc=?0rL9@E&haxiQ}c>tnt{$f)ZfN5QJt4+_|V>cyJ15=52@JQvMY2%MFuy#{5g1m)FmS%Y|M5oH~kT??#7 ziw*Dtsv8k`6UuHBgl~ciVB8`I-ij#S2EK!!x54jDbaWqF@Sjpg01t2}JE z$)WQ+{+s%KL05Sz4KmjNL<_e5sB`Q{7Sb)0vE0G!Q`TOfh*ci{p=zK2>LMMMS)cftiIejnN2E#U+AAUqHTT%ZJ0fWW;N zAmH4KX~&xJBlrQ%eF$1YfFGk7-~nYIbU%Uukq6-ncpjoWj%Gj+C;=W&1}Z=mFrGjL zfC&VFFc1M8zy(Tx2b6)T-(sIV#2Ijb0Kq^$zamfp%t?X>;O&cw{RGBj)B>Rc;BufK zaFCz`1P=xd5tM=Op}=8+D&R~(yu%@Z$`?^~1j>#?8DLCBJRe6RyblvW0T(C(<}qjs zctGGwhys*>@Uf@>LM=!Q2)3dO2p@;e0jCXJPD5nCI3A+}N`MChzl<`#11f;=SLpHt zggO!Z0!5$%ct9Db09C*Uq6J_AK_CQ#0T(Eq4Chnfd>U$jFc1M8zy%6G5hwv3PzEYM z^>noDKwF>y6oC@p0cD^9Q~~2mqz0Hk5D0x8n1QmHaG8Y`vk_np(202S5&2xKorjLX zNb&;oy9iM(1ePH3#pvh~^!wLvz7(*~Hi~98WG~j@2(T0Zl4zR-E=PbY0^|^&AI^io z6$r2l&8|i>7tL0p#VWK|vxl5`fZ$p@VFbeK1Qo!!7Ef(A2&#a$aS!~v7kCi631vX> zM#!7c0&s7}paJ6+%nRTG9$?&xG9U~@00(e^0#F1zpyE4kf&4DkzK3A}#%;(R2mxUr z0yuyRlz=K={w*qi@EsWM_t6Y+fzn-w_XFT=gue$a_rm2qGy^;zKSq8(f#3b804hNE zr>F&jKSTCF-~l)Tp4QbHaG(jzyYc+p~Wi*^;;%KUx5x$p{|+4nT_o5#?Y6I0VfOMF60582X(8d=c_U1el7omObS-I&M$g z#0df?;IVm7P@F9&0UltUBM1T^pa_%z4>0GT76<`Fpagh;F&B{m2Pgp^PzHjXXaWWgz$s)B<6k1bE-zdjbBrKT+>3BUAZ)?Kow> znOd4k52Q?cFlsxwcsfP@9rO=0`FC5f2H(N^ckO2-66vU=96Nui{7QVUTBNNU#rJf_ zDmzTS=J&9HCcn;~eKJ3TFY`KouKc|Vcamo&V-1-pd%#TCsoq>4{Ij`CJk@LF(rGi1 zPW2k)U9>O$LX|sBT13(rH!PiSFk_Ycv>>Ft)z~lwMk=V zGU<$zPk(6FhX6Adb3%G#VzwQ{;tRv* zu@YHZ^2ufLcKz|xVJtNAMyh6GhGh(i>N3)Psj|9sk@(m(o2`{E78R@6R72e&qfflG z&D7P*5f!glUAjT#D2q!?>w&h4}M1dS2+H{@}2j;*AqMc{Tt@*cW_bd_w3ScD{FUj$S+NI zJZENY|F>zhyvK?sMk_|k?Q~C_f9F;eSMHWnUQO2r=DJ3!k0-4wn^=AexAU#k*Ed?D z={iHlAAXR(n;dVrm*bApZw=?Kx&Iq!H!}XswCq289k)08H`g=H2F6{`>w&k? z7S_9ET^380OuMYhVzpzvTh_Z}y}QiI8Q%T=6xe&5 zn)khwbtWns{(`cRNy-NIQP$j3+3E!P(+0*XTiBQNWAqz2l~PUuJoM<*klk*yr%88pJv2UONw)2G2eHk9llfApY0~Fzp3;){s0vp-&-Qy z;Vd_qH$LIS@61swKZxZ=wJ94Ci*wCPu{g=h5Zi<8Ar>D_9WGWDCohud}{+uFq zeck@oL+f0p&MW$STcFk_+#>bm)LW@1QkSWZq25ltzwYRIrT3-`Q1?+!qc*9hQ(s2i zN!?2wpI0~Q)JITdNcLG)OS(4I)Cb`sE4Vq*7;NKOTC?Xh0cGY(s9ZFwf?c3My-GELewj% zmr(0t*P&iby^eZ6Js#>c)FtY*)E@Ozoj-L8b(MOO&bOfSm#Bl(%c(o5uceMqU$66} zzK+_ZK8(6RJw@kBeKhqjbw=k)y^4AV^*ZXkHYshzVi`M|HG8aNJTYXZ(sGBGcbs%4 zr!FmZeLQ96ah<9)9Ibx2)wH@3I0x$M(r7xBvZJ}$b*Yu<%_r?t&P>F!xzWeOqvh$m zEIno{owVX9K4(^a`0b&4;tAV~+1Y3&?&Q*$T6t2OtX_2n*4NWM_9dXVw_T~9_ zMnzXlzUrAE`V#rx8-ner)_nP?w3t^XH~$;OJy4F?iVrfYX5@kGMz zwGw9cP|h~9GC(^OMI#xzBeUvc;&M}B#?#m_a+!3(OgnbQ8hwEhPvvAFc}H&`eP68i zk6Qowe%ZGDDVB)((Y>nAnwD9cJoz0{`-Yi~U*V4-W#vc*ERiouKKZ%@6NByZ9ohRJi zApui1nc8ogzC?al+DKd=}(Q`ch@!+_r{uT8VF7`vhCy)>(6j@D4Vk} zF}0%~UHN;C$|D_(<}>y6`BZJcF>`WyuzLOTrPSvWwX9|rhI$QF{rHNzc_ zJ4-H}w55$)&)9=Gm6Ct@F;EdR{VM+tr;VhQQfX&#^!hVi%YP^JjqPw$Wb1TxDA}D( z#G`(NnXvoq1h$(T-TC?x*FYwX$6|Umb3;6@=kXsgqBU$5Vz z(m%xd!WjOgU-NmwiF^)mQlpK~x~vaA!{>L@p_3Y|b5dh`eO;}uPxW<{zCO~|Ir@4- zUuWRD9M`tEKEpMYzBbXuO;{|Nhy%%^4lLG~Y+%Xpo9J~fZqpZ?w~ z#^d<)^WosLN(Y&b!EMk-zrgvU<+w(3%WS{#|7QM5FK|Cx{MpW*b5Y~`xl5GY(KLTM zI3I<@jq?|Lk$#sn&R_YreExhB=fCMWUgf>c zhS=_X=C7ZFvOM%S?Ms~Z8+IP2G{`)*)BiDL%WPNKMLYc-sKoyCe%JT%?%P$aQvStd z%I+A$zxXt_(=(i(*OY&G{r_%WN*rgA)7vF?uRQmt`+om+5d7Lhu11Q z<@GU+`)!m=zt(QEHDWG!CZdo-ikgmtqkd)AQyJ^* zQ|1~?e-j&}+i1%(l*K=rFi!44CK{8Cqm3EHa%03888HS*qI1?vvlwiQw!me`GbPXg%ah#w8h-I4u2LfngC(#De zNUWDed>&nX#|rd#`0qyW?{;GxzsBe)qd~?wy%CGwynD#dFT@)BMxU!@t`8*6}~= zS+474C|_swP~F!*x>391%0JYZdZs-pmiqaq9n-#P&#<%4kV^t>u-#i{#DqzciwEgF zdK-z&T?%Otqyek*LXsBB5tY3S7w&ACH_uPd#@7<(a!rmqmG1yp-KLcC@sDAkwd3<`abKR2i8`sCzCJ(H><;%aH z;&A?F;_2PByMDWGaL(nqp<~T;(Y~^4Dt__a-pR@zESpKoRr0;xf9LOJ78~v2D(8+4 z8*C?>y~g-x`}P!Wf9zbZ=f}hPI@_tQt)CP&-p34mbG+}LK#%-Dmrojcj2^TnUdq2-y{!YfaL_BYzGW3cYMyJ~CKJ!A3H zNBS*3{{ve)I=i|94+cZwo^20Bwi{El^RAp-Hgm@G>7}L9a8dPTD!b&=$3*t(ub+3# z+^es;viyoUNZD$&&Vr>(UuM#UV^2RT0ieh-rC2GMs!l7NK4WItEd7^|rt`-A7xO;h z^LqU0yqh^k)aLClpBqT!$(zr!xrX`qgMxg|1%Cdpkxza#B`4opg4z}>vXRz)75&}V z*4@$J4Qtmgv$gneDlC9A0hD!jSJ>ppML+ZNV>wzdv0&XVTX=G)goZQg<%=)#Pn`E+W>77lv-9kf(bKvtej-nOklZF7MR zQ5t7LwIELHRrBltk*zXoo&Hd#H{9lf&HKP~yY2pJSv&$>a8;naDr)a~sXgQ;4KHKw z5o+&Ixjk?DgeI8)(kU67GVy|SM%ZvN-ab{BbbF&(^YuA8Z^(AvrUl;n(Q7Bp=R(zW zw?pd)@#>FGE9(A&n(ZMk&h>|SHgEQ~`SI}3XXk8TI)l4S3x;jsTw?w%z`VB2{w_aG z0n>S>PH&qJxK<0|7t{ipHw-@S?b2}C>V~=-AEBPh05Lhz7JgP;a!WH}Vt=!~Gw6-7b;tqrH*+4?u9-opQgYy3z3;_M6%LRnSWP z(54>N8iTL}BU`inSjURUD1I-*aRq(eLc6!im}qYdLW|W_Y=bIX5m|5UH*1~VfUi5K ztv7k(mgEF1APi&^mzvi_G&+Lgu(6WeOi}EY>gJ~33h=L zR6=7<5PI^kVWc~FPmlhpAR8xRGI+JWYpWh8Mi0z+w`i-{&HNUB+t$YJp04ncTDrf| z-=VGQ(w~Nb3T)Wk1f6$>rdMRM5GSU+W_n7Ak#j+p;}2=|AymPPoOaUP)lT2dSmzD; zN1y0e@7>BpFdrHwUkQ12Xj)(#+SqL3V}6_0tY57^i-1dg8+~@j$XpR=gZlAr(@0Bj zy9lDo8aU1IhQ@<@9w3%eKTN~u4pJ)3J)1iH;qm$T?a0U`jnMhRfZj}BbAhUXxUZ(K z$70RE{_4iNy@64*i2mUcxTv28x}wFS=YKY1O+Vp!$#_2SulTtAvT>a&&^MR=$y=+0;2Q5~ZGg1%bA{|YqvT-p*=hW~%ohr=ew2&hvy}L} zs_QF-xIWP_2kZ3zaB(bBdwHh&*XiXPFxH;?^!0J`LE4^rD@X1BZF$Uy|DZef`Ow5V z^LjyFr%c}M{BnKMg|!u1q>}#MDELhPseF>b(|NLJ!(6a^;o1qQKTnSJI{-SioeF+i zKqeu81SUPCoXgzWPm9*Y>Ucn+BD}-SRTU?9vQ5Eu-h~s z=0VHP;C*?r8BzHM#{0zq1BVa01IiD0QsS2mGcSzcRj}=X6oC|fL9B6)Pue2fe^G3uQ@3%0t`#i%dI!?fg10ZE@TG3zGVWJ~#mO znqHK*!zhFI!?vhTH*NCTVc_%S7 zl}kDJ?c4Q|xI7-%7Jpe0mdUiqi)%{4!vQ)jwEMFBIP)=h47VrXV%p{% zXRepUd`2La8Ndf5e6Wlo3~qfz8CNH!EnYomB>tno*#q_*ugLvJ7(Dh0KMqkrJM+9E z_HSPblSa?7Na;K8VB^JPF%{ax#6GmyRs2NK(wp2|CvO z3Vw@6%ByPs81CLbIbKcyFYA6WuL$V$1Kzt|&MV2_*-7$>2zd2NcqKr``Kp570+RA_ zGkEmX$?-}EcsXAc^BM-7LBJCdUT8DR;1#bG#tCin+WVk{kM$On_`tsNHF=zR89V{o zg>o`|@Z#iqP0Y&yI^=Z)zj-9(Rq^^5-t_w9csT^T$m?QW9?)O$MnJDWq(7=D+*;v0Uj6dGUJtDaO;7|9fwgt zyiy!q_}&D{CPBvoejEqn=YtUjj~^)Px(jXdjziA@ah%j!;ra*glz@-fFQ^B{aQnf+ zVuZGNe6lh=i&fGK_T2}?cJUmhkHHfM`E3Yo@y-u>4~qR~pskUVg5T&7`o}U=q{eUu zY>POUHhJe=BqioyhuB2{_oU?Y5@+yKs<3*o3+g2*;S&R$ibD#1BS^}}euy~_I8+#) zn1D~kAu*pU==1~Ldq~bF$>75U^2rML^htS*E0-zHG+;dod z9K;!X0Jigeo3?nzK~&;D2|DT#1>eb(`nMfn%6)|IKPm999udbg!eG%nIqPzMJUvIm zdAm9msPqRE(23LMS%KZlzJSy;?5cqc=75g8-@&lfj zz<-v()nkI|eo^^3{zvaO)z_?PH_F3=PObtxZfz?L~Xt~ zhS}bf-~VOp=Un$4lN@KD69BtSZ_4W>%HVO>PI2VX7We*to~+f$KYp?T4ro(-iy3F$ z6qg5Wqn(Pk#@oudegm!;7pFkZJYVvFy%@;&ICAEKa{UaReygx{gSL6?Cia%(I0hYC zTETZ{g?7=VBh8G1X?eRro4j^oON)7AL8l+^-n5tpo&RL;be?Qh&<^@Je$C_9Sr6qu zuHZYvLcb{II6j7ZU^{wz;`%__y!x;m&(&e$W%|S8SQf7s*d7EsiR1F)EX&~P+se3@ znYOt7-|_MO&32pt9XHsszdb?!$KZW$7Z$gSAZ~7sFY~-;kj369?7XO|f$t|N@#B1F z4D%J}$0hJn^^UlHdqHOa@Yp+iyC$DBgO3!*r&qvdK*A>jI!*5?_%6DXkB7md?-qD} zQNX9^T`?bLBeZ|O6BF>sGI+&_!uX(V?)@JLA0O!Wz`pZ@oKG)<_ZP^=C*b2dA?Y7L zX9V!HgbyOt_r`E1ZWq{wOdq`a1tSt(NzjRbAK!cOIQ27l`n|$9B?WPcy(i{lyB+%f zlL~$tK+4B)lIj0X7RCo{bNm03lH(q95`affiuvGo^-?T8sE{g1h4x3RFH?%;m&o&- zxSe0mu0ei3iMc)on=Cgr9)sXJ3Hp5;U#2X^W01kE?|-v+3<~0r92D1=b%jbi;Lq{C z{5Xp+ct30xde6!9!Hd7=eKD^9=!^iKmheKG)>F*&?y15!p>1Bf9+B|DHiYfPbBf<) z%q0!B6Ce}i$g#Xr44y5JcUr(ZaZ1eFzEUO54;1_cgp`k)!FxX_@V?bbJ|E`?Vm>j@ zNdw+L0iR(8w|-dQeJcTyNz5jVy(m#Ms#Tf;^$sy%qKf_%2 zohh(?5b&uuBj%F^oqoW3&xrY8Kc8gq5!kkUH2HoWeewGF{(Rp}{CaNsNIdR&7;LiK z*mzWd?*Y({aeSGw7>_iAtC?>W5A?;0$G}J8`tpE|8~oWb^5ZPP;PK4l#yyW9{_c#J zR}yrxfTtw9=={&eW4Qg}0?#j-1oJ-t%CLsGQAu*mUoiDhYRG5 zwz>EJKNjefRE>_n2-HVX#aqxBz&;nP(Nj^`(WGm>E!zj^u_Bp zvU$Eeck=5w_9_3Ez}$WYn=H4Sd2{eh&MEjU2B9p*qvG5c?mG9);t>_ZgParBR}ysM zfJe^BkFx;=A3is^el#hFe_X=Lb{8D~LkfNaLdc8Wr)2QRP=Wo?UHo?K7!vcrHiYdZ z&f$}D{S3CVAd?dC#)Urre4ZJn&-3Sn&=&8$XjZ~Avsfhou-9~6?mx=l@$>w-_KaY@ zKX6`h{DV#w@RYxgh5eXYk|&`SU~x0gtGJhjocctQQsh z_Jx#3)x|Nq>7twm+T`)DUL4QEMvU_qIJWu#4_xHiL=O1>NfIo6Ng4O-P}QZvo+aWf z$ibX3VtM!FaWdcku;$6-zyHCO&$=4gKXSVyFJFwoGq9aMN1|RXY|ezYAO~LgB6*z5 zd8+*K<G<@11TH*m9GmX|NU;7QodpUWJ+T-e+uZ}AAq=g#9~&Zp;>FMmFrJx=<; zw&ODx|2`9QLU|X1d-7z_hPmX7bBtUCwNpfX4tXQ6{j<*%8smHZgpdayeft_hmO=Up zNH0J-0qL=8p`UGmzRB zDde9a9fUM}G3Y|-hV=WJ6|!@=LM}NKa%a6lc0sZ=DCEx}eF^D6qe2ctIu7Xsq>mvD zL;4ERe?Y=YvXv|NPBckC1M*Y@9|-i2T_Ho?B4kbT?ag~``f*F~y*pO?^OlJ@vqHds7W`J@$LcHF>A#4{&V{ynm15*|(vQw3fX@;_M)NA|cFOU^E6ltI{Ws?G zr<(iQ^^Xg<;MxUFIII2b-mtfFF>NR76#Bo3s_pi+&0FBSANH~3R5z|^sdP8iqs$=W z#fwJs=)cO1xE0!_jl9*E7tPIEi+O$S9DDfsIEm-vt)f2qbMvT=L~b6{9mvUZQQc&2 z9+gYw^W@}h zp!|GnUW+al$jQ^?B9K>g8{|cFj`5pHh^@rjucY6KNG>iRzEee{TGM5!tSYG}8f_0X zw2zpr(tg3nqu)++Lmty^YW2=TB@fIR&09kA>?-kLS#|UDo#;V}u@7Owcba2YjqcNL zY^0%|ia~!;y~+5O9RcvQ6!rs#kpx`h(z1qvZI$+gG)|Bgoidt7dFR~skSudcETp8y zV!wD3)H}q?`4jj7j&qBPNO1!pX8zpLVp3XLLP~2&0%jim&oA6g!gkXk_(`Mgj0AL* zRtQgnr5x-`DW%x)oH;YQT!aPX zrl6>(Xlh7kWL_3;sHvfOL9+k_gF;*pO}iMDnU)n=mZlb#mYEfrmDvqVOUn#R%Ptz( z&B}`Gs;RxdGc&s^>@F-U{onWH^ZCx4opa9foXc~bbI#1{G8EZ|jlp3eDB)&Ju0OG8 zBS(mnO&1}F`Boh61FB|=wtk{@rls`Iyh$>P=9mba^K2A(wz$jFvxitkM0M~V7jadn z6iNYAewC&Q#l|z`1l>MGdrMLN6jc;VQLuw3*g+KRAZmG7WmLCjLrVEcb!ZGp1I??d2oU=Ga3XKSK3rcG1PI*ZM>vwplE3laO0sAx_>r;*=i7tLl{ ziSZOdfm_WsA+yC_iuV6I<6{u${Fks>Ax9ILsV<^q{t|^a`7JAw#1F&a%D=?;5oy(2 zif8fKc&IrJMS2^OWzH69uceS$<7KIkwE~A7ASkdD%d^a0<9lJjF-;tLi^CjoSR@V) z;lMVF|K%9cLe+f>WC_0_WF583`cmk6Bgom;{w~UJMqCfo$9N%2(9X-z#!l&?|37) z$J833V*?J)iZW4qu2oL8nn7K8*^Q z6dg#>*`X9YHiV*K1uIcJ?#wg!l1XwPsU49WSMFmjQZWG3yim`|JBZ*T~RYYZ2h4AIo zvKHHAEfcb)7_uTAvNnqNEjYY##@-ewG8<#GMGV>^8g0=EZ4v2ci_b-#Z*cfgGt5b#0`3VkRXgaTlPPUFQlEOv*2BLohC_GW?-->F#tgls^ z+b;4P+9n1E7EK|w)?Z2I44>B*0d)>_XW=euv~-dG8j;`Xw(aMhGyK5<1o*XdE&M@k zJ!|ZGHi$II)SB7nF+YqY^eNU-QAi(^hHUzX`w4xFVSHgu3r#fUK>OMaHWn2n>KaU#X>Im7*17#xo;TWO-FeXmto9mlJ1)oX z#9^<9_YWM2d5>SqlC|}>*51Kr?;r}^iuIq<$%u*j%5}9G}~&6q?-v zcF>$cPX}myEjECe;NF(aFM5GzPq7470jelH9ej10(;5ER_7Bo5ACX$8}i(5>kd@MVs*}QHaY5F*^ zkpCfloKy<=-{Wu=sQ%lwV4P@EOm}hc9|7l;%ZI2>vRdOWQc~gnaA5u1uCJ026`z8` zP?08ZG2~|Q0vg0>?Q1g;t&OET1b7#0Oi|WQg+iwU*_verrUfL}?KS{*>kl2U-!ZWF zFf(kzkHVlg40^*7Ln?*5GNJbs9Lj}WZBEK$t7H}uYJ#1G!_Hu9Vi#bH!Ag<-dmR1} z`u!6|h@T}8xz;gt5_MrIjAM@V$JoXCzp%!BOq&B^8S;0=p&w{rwnWC7&B;R|FH@+i zHea(+a}1*nW=nEtndU1(R0_emKV(cZTVQ5@CL>$On2y7IpaxC8%xL{Fq8w#vAKph@ z%S5|~@t@1!Pq9aZ9Y(?qF|TRtssbwgDym1a7%O`rc9R$@2NC!lm^FV` zqV@9>&0A|eyCn1n_J-Qq{HNN>oUERs-`SHC)eh}_qPA4S|I4cOzs}=3Bak4ghM%?Z z6Ko4}jA07}9>N%|Lr$)Lk`vSTteP`{%~*;SYrq;ksh6V)Nq2~RXAlVBVmydusp=)#X_5$*1umrCEYHHz)X@R# zf^;tK4K$ zGnE8x)MYNjYwL;y2;2*t))nQv2j{<&Av>&w>=G~f_^`bm#}GKD*xSju9*NooEu6wE zJ7rcyBh)nW-D*4k%S`G>X6R@Q9pP?uB&)`FOFoukzs*(PcAN8oE!jM=eoinY;EwqEm3PQLk^YRo-~3F(H^=?l7=j4{^} zfe|KAHb}(V6o^nnn7$p`CqQ5~!_JqS}acj0I&5Cd<}9tzS1Ix#Pafi};`} zr%|=;#q2j5{dq*lj;t{cpA;}U5{cWc6K%2NA`xAww(gS9Hk6?$5?0nsLL{wj%wD)HSA z5bdY?ssu@_8OB=BhQU}jG_MM&3@#Tlen=SBEMp_FW{IFM`+h4^Z@Vy; zt2gYw5JNetgP-0HZr14Qk@R+<|5*fH@pt&j?6vS=yc^?~Nb6b~eC==es-F;00&1-d zgp>+RR!QgyR*tp7A~gwXgOzlFb=F>GsCWJgu+M)8WCqyhREbqP!}7pIm{i+_@G0uV z04CY}Ye&dz?N;VWi21=jr>?^XZO&Y&&6zP~OlLgV#aVplDbryU2NW^9<6s?ED3a)7gynfm<#`|f6m)$FV8^)ssd}xd7?b7b6zuMOi1ng zXPVS&och}HyqD0-u%*ioNN-ka-qUTV#=Q3|V}8sr@r_gMdbE2I(eUA|us02N+>;PG z9<=LNi@^G3_W7%r4`%EqR&zdit(oEP_K0*tjlF9u_O30-x!flay-Uda6#*$otieAJ zYw%MzTq_P{vFfw(TZQ}xGsblg#&w_=*OehQOQz5jk8JThaOe{x#^8xQ#u}bofIY|g zQE1Kan8vZxy~g1XBKMW&H{*omtjHk2L%q4-F%)@x=`(g*Q zYu2pw=umT^B{#Guvs9U~7kn<}qjg)zYmzZc*u7A+H$%u@*P-Qkc0M zYo`=hUEq+{3-Y?Vl{Xe5t{3fo4FbzSc%TrnK-O01nObYLRjQ@0lMPQ%sD0x4r|@eS z@563akICkA|5JgqOE+up?wuVDv7`Zj^o25YZ2*3?&)b&s4(KuAx`URRs_eP+ z`@GHI^8>MN53p(RW1Ql(d;Y-`j(%}|rXvN*6r2kio?_f9#-70+dju%;Ljk|r7VDWxbZ-{&&& zgATEW(QPgSHfMp&1yP&Anl{J7U_Z@p7!DHb>+b4R_DZw4?2YP=nApwcnQSeqy~N%k zx44#4npv!&eP~Ctwe{OHPVF+jyY@bg{=B9c#z~-lU#n2J-D0782@c|3HV?YCpE0j$ z&wx(XoKZFZA7;#pVm^HxAJko#NcZUL3J-JX$uMILFXqx;MIJGU?sQu7c%Db&!m<1m z{=6FkOT)!H+M;i*c~toHoi*oC?GpQF7?O4`?5oeCt8wnNyRSZv{?yk#kES@yqdQqI zKYbp(JFv`rFKfLxHm%+Cz}9}j<`JzE&EqYTLSvX^gKXZ%!Y4(>uy6zqvgoIoxi1U4 z)z7?2X+4se70&9NN6&>D^QaPmO>;!8c~8HoWS%JI(T!@BnD?|jO0-zxxQyGO$bLx3 z8jV1OZmg3dYcRJ=! ztv!u76sr!*p$pa5WpkSU9yxGCXx~u(WxUp&og$6-%MSrIjqCoh1x0Li9e3I@syH+J zNfd>juD+Kb$~+>{u*-1>WJT7vr&Y=&#>QKqH*B_fyo zJdYCNABU(YagV&uoibUqTRLnU)8>#`_b}% z*M0{|i_aDH)#g?E^IgB{=Zdb+adn^G#*f-eX;l?f8Bq~d-rN?N8IooZ&lXx@ew$MB zK9P9lP?vr&FY3=(n^9DDAVrN0peXcNRAE@9{`@YOVzPrMW^6Nx!Q2y5Xgt3&p8aTf zzeXPT<|y<{6xup!Ojwovd@l%VlV-4ggvV5`P4x1#pcWqNuJ*eX+gqyo`x4@P2&(xm z#bJBg674Hp`nsu5UpG~?H16G;L4eKF`d5ner`RGa0kbjZkL47bBvWiv>q`B(k0_T} zdnThj|7xY`&(|+QfIZ5bWshiO$Q~!+25YjN$6CIKpN+#UB0jKcgm`wA&Wz_vfzSh8 zg}R=VBJH|X#`-Sky#ZpWTFch8_y6oV_aVS-Oq(0EcXf<+|D5!n5^4Xy;UA$tXn_?L zeoE~5Le(12skP@{(Ma1ChfdKV?F5n54>gKvZldPczdJ_1{rxv2OCEkigAMyJuM_?mxQMQ)bumIyfN2 zK-`{cad|v&Jj6|8hCEMl{#QM)q9RNbaiaSE zTYyo&qe9L%IGl~qZ1gj*;WVzAJXm5ifqXvamWNSTZ?-l+B4RKNp)G!OAd|7fZ!PR!RJV=&S#YM)v*4jLpOx@I>NaiMigIP6TuGtYb9-(7)5gf(69NAr zG>tvPhp5|)`}g`Yb?yGINOuhmwl?;%JLy?$4osk2)&*Hd0CNAH;L0|}bK`Xg@O*ki ztQ$Po^6oZ<%^XDFL>t{^K565y8HH;@?3}}vJ;(`+HROmJ!Chi?Ic;Mda%SpdBCEc! zHP$XC3xPsIPF}SfKV8l_Z^l-sUDmS*ykW?Cv07G&J|3dH8C$39vd$pj-&U6;wROnq zpv$`87+X=8w+@9>HLnb*u#Yb7{(oDiv6X@kLD$ntthJV!mxyuloV#(7+twH-vk_P% zbi#mdZR;2m3&fz<#H+MHQET2;NyK zs1U5}v4_@ob9$R+3OUnpxC^XoC*}q5u0(n7qRyPk6EvZfxzFhngKEBC7c9Oxv`z^u zz}9B2xhGTivA}Jd-1&-fA^&|Gz5=|x@N+`GRg-VUW^U+xnxlhWPR$NsCuO{s9uLvUJ#iQYCbbvi+F4KXeEZZky1m0G z9gzbUL+V0x7k|nBQIx-xnOb4Z)#^}0Raj;7a{Im2g+lf+95#TL9J13i*=bI)pKb4G z_A_!|DP-TLzQhmtZ-;Dcf7t?S#pV=wsQMmErI7s-4sx7IadGzcvCDRDpU^n1eOkpi z+UF;G`;1hA9)Y&`>Mry3YBLtJgGY(|H|i2yeg8(QOIn;!munCxh||YdPMo9Mqm`ib zTDed2boB@S9b!Kc6)yG^VHAF-dOfSX|6`Z+Gy*#eSzD@Q?UVgiLe^n5go!3_Yaf+% z89yO#!I1GsoTHEOnE&07F<$-Dzf9X3Y475MQj4)6SdUr6`dDkn_zvR!Hx2_kh_T?* z_u5Mbf%8R2^BrnG+RUSZ6bd>~(~fcKk8^ZMNAdh0hi5w4+c`X4b95wMu#T;bZQC|Bw(X6v!N$%e8{4*RCmU=s@x-=m zO_G=2dGEY`>Q;Z}_H=ihs=76euhf&WD%$Rj&|Cxp)HX)C5y`1JJIC-t@<^inRo5#F$RYb6-hyh~R9?Q(CAY)d+zvYF@&(>vON3reO;Z+b_(J zZHBtgou-fO6ur~}CulXgs={Q4i@J`WE588Q4Gh3}Iu?i>A@IKl5{A}p1rWIp|wR+O#(;e^^>bS#kNq5((pRxPB}2=0Bd=D6|@GrI7nRm9P@&sNBI zo(wu8Ils~bipJg>Z%>Q1TnoYW87|4-_j2Z!MycKt-t|l2Ev1og>zC%6u!b-;HJ@&W z<}{}$T4VS%768p*y#3PC6lzrSKj-Iys`rd_h{Y?WhIJ<3;WPiePA8ssVW*9E>|#Ng zGLOXce~(m__G2Fymy!aEmnW~U#?d^xvo5!^-CdtO#xA$a(^pq6w>GLUk9b>O)$P8P8hL6Ff4F)H zlQgSiEs?S;XQEVJcWNGozsy%Lu6N7-Ab|B>eVq?}u&Lj}D-d!J1`3gG+7XrKQ6(vq zg1-ttEnJCd_hpgaMhF47h_E_qlf=J;xDj2E`wpEn-T4_MNYC@|60bQ2C4^B3LKWlZ z)k%6Ls&_y8wy1X-%PZuRON@(06YJk8_z764zqt;{-be3 zmYcMGXQ}?GoQOWUMjG#I>rH#_li5=Hh2y@}64}2jyQ(7U~E&5Y;Z zeM7P5{8qU$frXKmUA07JNLMT+T=>@qgR6SogM{GOq=MU)({Ot)TSKhBYf*TvyCTiU zQ;IA9EQ+NgaAjQmpj7ZLs^=QaFTSy~_B!oXD)%LZ0Y$HX0J&eXR!i!qV#;1Z-s~I4 z8C|nY#MQ?erxD7P`qO4S-ct(&{|?2JWz{wuj1Rvn-^}Gc5V{vhN7E?tpT95)2|f4a zUfyYT(rMcez1z4_7*_>Mcb>8M%a7|SGj3-YJS=}2$$s#uXAfFcu7b$?S7n{iy@?6o z^`}+o9=$8@Ct|32hH+@OpU{|L4qEJ=%Z6)8HmHV+(q(85kNG|-OoMlkxZPN6CFceKYni&)95;Vg;5(?VjAw&p$0 zLfw=7xrlRnAN~Y<`(%MR!Zg5O(G!ogWCJkl?+~tGyAQe(OxZje#5N&Y$qO9Av|4A- zJ1g16c}IC>*Zj1`2pxB3mi85g%@+HM9s%ShM@c5PKMQ>otFsJLTCvYC`^gNgnXd!7 zog&PrCPnV;m1NZ;KeHGOLTyF<*0)e&tSrOz>QeLUr$-Z}DRE;hEt)g?iy;B^-K+=S zA-0{90U$v9fX<>zZV*wg!@E?9PH6zXq1`^RBKs$y0{83{;Rc`ZZ^Ki5cRp}5DdJUw zjPV!PE`tdcZ(4y-Mv~nrCaTPEhMAk(Fq8a^5J@&e_S69{joU08^ue#}T5X6^TQmt~ zi`YsO{Z6sz;bUVJ1b`@CIao?Qf(pbr)KG9J)K5g6b)^M5n34#Yv`{ypZ?Hg7>iQrv z6$m*oJX%;RAv-|%&{3uQ&l(=#Cv0=96ER&UHtqysjYsS~Sbk<{UD5LcE=$Tk(|%$B zLnWjeqWgHodE?=M1@S!HKXQTJ+PM*4fhvn^?cCTdBOz|b=}>;!5>H&O9gL6`WYhDR z>~b#7@BG>H%nx=?b!f6YF>sDa_Bn?2`Lv=xp23x%P1-BhC zh>>Jvm`5_lQBu0(s--&dDC}bF^$>`=099=9k73&B@1e?(os8l_Vt)H z{5duJK*I`I`;RchnQv;CK;`lAnnRiF^Sf_sQpLH=bC;qF4>Wrq*0LxbEzVMDt_{SS>KoE#%d#+05-xg^RGH*n8fcPWy(D1kg8(WOds0^CRSIj{lkzwWvVKB;TY=LC%Pie%H;QT& zE7ukY<4aV9|7*#M$evI}_zYnn8sQAG8zPi0!Cd0}hD3&tVP`G$05;;thDe6oK~A~r4gjw5Jz=degpd%G zoHflX0QwrwZrI6$t{G2M(kZ1-#J~O1{?EAFZ=`Vgt)8-9OaHJ=7a7zd9IqJhMsg!x#< zdExO_rlXlq0)9{~#1aNDtDcY+B6014&m%;!mXZCB!8MWkCm0r zS+zg%oq7=N$ju+gH!s1@zcVz?F)pw3C{XNR18KSH*CRXE=@+4B2uC7@Q9TS!4pA=VRs-+2mvLGpZ%?()oWiZAVQ%Fg@#wD zz<=36+mOzkF!qvKg2S_DU_?!q#FN#N#pIZ&z*G+5dJ-;)u6kEj<%Zg;AOX0=59hY= zs++heE_H{5;+pdf>Y;mIpog3Wxm4ik{Y6Q-|M13NqXR$x{QB1dO#A3+PEAYruza`o z(-fiU9&5S(C8;L>f30~C#uyauAU)cG@rHdL(kMhbz82qvV@I#@;K-#l+dFHmpB_h3x4f@46IuRTPki-QcZ#j;%&}X~;^kc3ah9vkq%u5s40mm}JhsTJ$G$0o$->+f< zy*5l3+ytE*^#XY$lK^w)RXZB)3V)kIo(-c#-u=z0iT0n5_1t^;68OEV6xUR551~74 z#~{|Y*kwL?UX_Wqj2RdT8a@O)NdzyNAeV zBjE){>J1T#iv&-g&MHSm-}C-st`wd#y(FE4uaYs2S{}xYlb}RCa;65Z6;R1!(c3Iv zcbvij9@zlFXTQ)TGpTkz|NVr;BF0q_nRZiENLYc4Ln#U;!tR&iM*`h((r} zvV3$(jY8jih#`OYj(kELrFoJ(cmVVk=|U8DYOvmJdy@bK4 zlgyjI{`*({EAS22^Tp=>rXA@nt!MXKo|pdpcf6%Z2f+hZ`73Pabdl0$p#u7&I{Uf8 zK<*B)%h!8-SLRv}GwPkWzefmoe8TlDIY>J+3%vPu;^@cv-ebUDO49z zcl{h2SfsPM3Va29X%`o$POg71^?xdIs#8rZ%3janUFXTDlesA*Ha8aI?bR%chRT0c z6HYZ6A=8Uke@LioGz3GZwdL?r;tKmnFrHGKiT-$}nL8*MsyM2wBU=9JAk<;`joKqK zFTy5v|T<}b%)pq|ILMaF?7na0V5FBv<8>bKN{^0>W^VyGq06!Y-4bvQO2i3XUQq)!l(3Isq5x*P)gTmmS1D4t=TKeWs;OO?JwT7~pb19otx+JYR9OdoV1 zyDa?3hkbK?L01fkMWs_0)C{Yl72k?rrP6@E6y!i9EE4HT2Y|JQ)6Peh%Ny$5v$+Sa z7pl36k?Slp*qE96TKj_=9bf$-3&IhVOpwzKJR0CJ*|$%*gbfJUbk*>N@P!@OEC(Wpx@vY3cf)-^>E3C0 zqlRu3110Nm?mKl$1o|Ck-mz00Vd{cn+bTH`yP%I zf&-!B;q6Vho{5JxYSr*PF?}JD1vDCieXm(Pp?#693*-z3SnJ`lA(z%8UV{&#Gyu?V zqE%h_B!plQQJS5Igs_WL^g>|%U=){HCv46z20o|&l)q_}$*=&iE+5TIWMMGbr?0oe zm*F-cx4>WgQ#u0~M2U5vh#^@*;s&nrb>UB8V1gB#33QNKgPg$iOVv1!p}&U+3@~it zp!E^HgD(aSl3s&1Q>3FBL1^ZWha3l}AUp=mk$OV|!3o+4s$rdix5G66VEW)bIwfo- zkk1&!wUQnEbTb}U=g5Mn{~h(=Tgqynycqr{V|#isD6puWsh$8zFzhyH!Gz@*aVX_)?f_vu%sIqXYUGvdK-j>K zYYrJtgA(z2WDtB}p2~@q3d;V2HNiD2+;U(mWVmP)jV3;-R5#Io>FYM+hXT8;3=(bpOwpOWjeh>7XsqTUYuG$o-=NM2*00 z_LT2IK3}I+FIBQ*0>6x?pg-GcB655UVUI@$Zn~y{533b8nEw)GEmDv4H9d_KXC3h| zBt7jLtUuVvn(Fs!I_sB6xukyEYra1#L5)HGM5(ObD&^GsF}9UeuJO-N3_kc@3C^Kz z3Fwhw@V2S1NtYo%pmMf`-0LB_QFZw25ikR3`c=`oF)rX7p;>~azdma0CPiL{yM!d@ zKkNTwN}&!c>%Stk1Nu=?;9)KUkyR0K*PetA`|nC;b^@U(ws{+zH z6I)!UpO9S>n_LxkladI6#|Hg*(&$8Bg!TcukoH%B%Y`O^Jn?&GVg+AESq#3|W<7QW z<3`?ut`QEjS3wbHFv;7-LUTqlhqi})8)yq8QELcSs~|~-i|miurkQz^Nck#$mywl% zAH!9dSs^e3(d&_+hz8tQF{)um0=GoxrH{4a!5tyvf?S-56<%@uQ6vd9ZD`X`QbR6C z-!Ue5wr7=h*2I8T*H|VZe?Ww;sI=f$As_!VRw4L_|9#oUSQLjB*@a&76)E}^{TDQ| zP|yb?YKYJ_&de@@0jzciqRGZmIzg`(L7?T9+0+}#2cn=SI`>15oU7O~YB^8(HHEK; zjvvCu3r#b;Cs;RBEX3UOb={XGV3k6Ojc2sCHJyp?>w<-X{78?rqdipMP=bc$>Rs!J z2q7H&B}jr5w<%>lhz`KlM6r$^VL`Z_2!t@eJbFW<)DV&Wg@A$Sz}5hBVMvi+hUu?= zst3Bue9tiT0UDif(NIfYafR(n2$e077fjGEu?=ValeiZW1g>Qu3hKl4*`5p-?t-V5 zgXze}^aYy=QA&((`EnBmw0?zTsF9GDd`v>vhQ^4-evW(ugTIQL2>Zdmx7n_R+I84w z8L!!SQ7jfD4g0N~aXj%rFwEQ;*8HBtAO!DBMK=mhXag`U5ez0UC-C7dNr{1;ZO9ox zumi}dfuA$NGm`pZavT_bP!Pci^WO|mKcL!Rv&w%DOKJxmd_^BlGZLFBOgo|hwrwHF zz}}=HXYKEZi7!re*tx+3=D(8vd4Y5xlc)$QFT@8JlX*UYMbKOS-L|?jO0#1OZ#?`Z zjB>C?zbx9!h-64V(^uL+{(v59IA*EG^F`fV&9nwfhlm_t2wW{7&WGxT`2g# z>dLSdNM9#6iOoHPxd8DGYO|dBC8JN$tzB97MVBJxhM-@!5FJxfh zm^I#R*rdky=Q~HOJ~1yG@Kd};)LB4%Tp)RXh|^c(V4FUFuE zXJpL4789Csh#l=hUlS%?}^I*>*fgbf9c0y?H}|QFELVmAwhcz8t{&IPKBfJ z)T=N+pz7DH$HEAHW%k)M(kEMt7v=AtA*xIH4*e2KW`Y*!$F9yk)UQLpP2v66OYMfVap9wvI|Lw?2$9qBWmeU<9g(y`Xg=j_4Eu#v^$-?5H2^1Jse& zsob;ZHS}>WK+<)98SNep1lbO=4)rUrDgVm$S+^dW5Uej4+ioV-guETT8vJ0}Zo3V; z6~+kqTU;T?gyi@d*B*BQ`mEn)8x>7IvICkB?B;C7f14uyns>eFp;thF$x(S}MDJl3&Y@mv=2D=rspL48TPm~Rvb`|^{;=+2v^sMSU?HK(e)DA=4 z6Vm{B=>yCDw2PnKj(XDXcU4GuRp4)O>pxE-cq=?|2aZ7m&EpsNSRvXMC<|+fu^l0i zUMI{sOIN?kdN@x(OS6os=Noz(L?hM|xAWEy&-zHaNVKof$@G9Bs;uo!e``Y{@BJq}+zOE%P;!PS7co(SZVNH&zo+nJ_C)hV(#!pu4i*<|p8ue%gA??y4LkE< z^-NU{4}wVa4?+&q9?*66!dfNnMwkj24;bc!sS9+?|FoH4*mf}aK<*sy%ZF%wCi(!c z2B!(Ug&pB+$FqkG2pke&V+qtWfqlkw`$T*Imlj*~8s|@YpxuA|*8Gm+4=V`034_f(M_CW9 z5j0N4F$tsJkD3q3p$?;i92w5{k&nBI}WA2hqIbxrFD>5H<9$#D>pHV{rK z_ghdCbS1&@j+Ce{3W**3_LZ&iR8V@qQs7;F$%+Dzm@+H20pe{xk~5+XL~2lqiI69L za*%ev(lv@Fi7%Wmv~jTdHtIDC>q`b5(ok?MDPD0PnF%E>YAy82wzml+Pw>mOfBhGI zdfI@}HS;qP2x=YN2HOPx*YB~-bB!7gIe`hU z7s5LOZ2-Xxsl^Hv4P`!H!HTsGhA<#l54Mh+8nW=U3-&o!ml!lJj6YNWB=H~qK;~`q z8B`?Ly#Y*TNLq-t;4ijpK?vfnLb+`K+!=}xaw{-zz&J27V+^$!yhoHC7ZIV~(1eQ? zvTHz9g$f8;u$fPijubgyZbI`6mklj8P%qDlX@FcBVpdOR4}BHP5@ahTpf}|6iq4i8 zk@S2NfqSNzeqCQB7+%UR4!xs_yz zxnutGhr46%&Y)Wxeyka!##sgWVqFc%CL5Fu8CM(P4N0CkCO3x3vbFMb?3oKUSf*kD zn{OLZQ8w@!icd3YooSavZxx}bvo79&#KlA}+qC(UYR-%HqV{?AFyqU6lJU^v>$Gm=GDfd)4 z`E|#3uVK9= zDA{CF+lyr?EfOaZ^Y&SwWrEZ==wuL58P)z^(sQNh$(X9OW0Fb9sjA6Wq+l`)%hZ%` zt5wfy+0wM6?G~e`NmnFcvbd*q%HYTvmN=?;oS4yxosv<>rOC*uDOaSqRU|VSQa_|J zr@hMfmH4X#jM10KKo$o9Np0#mCFN4W1cuHylNn2fgEEU#oj&=8 ztQ64RCf2QpQ#+GpgK~p$gM5R2GR-k5vIH7<0VD#7pD5WRxTQ7g=#g(QZBQ_!T&7$m zU8czuTLN89z)!?%e&5I6*Eg%_Q3;XPq%O+Xl}MNTD^|0~xo^J@Z8lmZ6`~WOa!Afi z<(8e6$ttlbIVwIXVYi8D*7O!zrS75aq3mIFNHa?KB~vF?CtD})B;zD^093L`yH9P_ zU!~|_>Y=zud6d19yOMn?c`AVf!m9mtQ{kudqa{j>l_8NOk(Vw}E>!t-Wj@WW<)hg$ewMPPWTa=jumxcEL+El~4+UrZ{#~!KnB8Jx%9t%xFM~a- z*QmvqIb6S3jT~j^pewMLJKTU$O>s7~t82CR+i<9}(Xm>fE1jyIm@&cdZ=#O!Vw52u zamBKlrz`ux)K6DrF;7<)fuJ9zQOur|;IG<>3U@TJm3I9%drE@gS_>O{PJ+Q&GxHT* z?p#Ha6MOQkq1qowD_rM#sRQh~*jax|8|_mEOB>E~n8(eRnh11+v!)Q1k$#wS=#pg( zM=slQhi{A?HhP^?|KjNSy#Y9ETGL_E?{~EFsQ-Pg)cR**7`ut;-0>HxKH$>wso_l* zUVkLD33fTe9@^)R(1cc_=J~hQ%om7;StEx=;KzZDF7p|$>83+0BGbDP!(p+W8@ zs}8bjCK89gG-91 zl((^W%c^X0)Oc7x$o{}B8lNUHJ!(?;xb`T;e!?y7iyWUKF;iSZUE)8$-u}%k6rW)( zt#V>rD$V%P=n??Wr8JvLIiU=&;u0&U*sC;^a<_kWK(O!Y)oDOF9q(|f(4mq`g)`Q4 zz;_#SYv7gZo8;T%8|7OiXxyvFoQgBrHtsQscpz{fb!&KQ<`v=F<{Q_o(W~97+-uC7 zkTy1c0J{%+fORWzJ9sPSl>++Ht!G40mqL<4k|clNe!F*zaqI0B0xAY2cdPpAZBmL* zIHl&LvWq}GoiJ-=ALw~7FvOfAg>OST^vOc=Bae)KFTm4%{uQ*UE zD7IUBlctZMkLD_ED-Cs&WEAUG>?Iyl;Hx7{9zYquNRlFwAd(`IB$5_4_V0jYU+zWz zB@~nbY5~Q7YCt)@Y9B@)Vjrr)RG*ZejGyG%V|{?KgRT9o1Cf1^gTDQ~1CCqW7m}B3 zU!z{q&!`CF-RSXb*WXp^LzC-qZ+BqCqggkk#1l+{q2afO-O#9eJA%Q|x3rsyZM(CA zVQq&Fo{Uc8Yd4RAu^!t*UNpqh0(ZFy^WV)!Jwz@2oQSikk73VNG*4QWv}Pu zpd0mS{O#sr9iaXUCLE8t8}fpd3kbh8_2umgj@zbqq4lRy-Ya^+^~WL^|95la3!!|3 z@j~g(l{ZnhvviA;JJ$4!DV$<;M<^U){Lj^kxG#6B|L(=jsNc$&Aa4xuh0$op>W&Ze zT{vxekLM=qg$bl3oVPU+^#b89&=5JQ%mVajN%@*k= z*=Kl`0GakC*6qe!FZd!GkCPn4-Kh1(icIBgpS{3*PzFRg?JM6Yg7A7XM8@;>rtjE$ ze>)wpTswfIKF|X)oQ~?Qi9nhk_yIXzJiPk{zBF6ITf1E^#2?%N;a3N5&s`v)-n_nP z)N52vXpvFW8`Kw5|ImOTkwfVh8j$)2-e)$+-oF=|?!}(`Rhn{?;AI za~ufsGvae1U`G?A?VtHM5^xl7E%IUva`F$$JNUeI1qt@`e-1Y)tEH1O7^q@uveEq* zR=&}ONER*0Kre=3z^0Of*UV&KN@k@ph)^Laaf$R|^llmX>CG-S1y)x8OGPKbR10W859iUhixRf=@bja&gGpb%{U8<(iKhZzYJuy%L*eyG518?&i z#l2*_baX4K)reKK80?Z;07U?Exr#<5F9R=e-8y@fT#Z~cZhB>gtYoX?Nk9|86>uk) z(I~Z2*`05%ZlqzPW~9~1P)93}tevuyyp+O|%#-3`S=K1;CBIU0uDYqUsrpF&mh6%8 zl>82O1AreumWcC}6RHVopfE@$$0f(56ai=eWdJL{5rFNW{r0z7g>OZ+O0Qb4R$oF(hgqeb?%vwq7gVAmT%I>qhu=|7$Z|MM27dGaK zB#o!4oR!AzHF;%@{Wj7Cp(*kQrXFm}83_*;PZeuXIV(e*b*g*3yOwI}!l^GIH!l^H zkhwTu8+?!B`$T%CHmgHeOZWnR1&TujadXKcFm;u3m7UsCC5^2hRe0a-&tItW>~u{X z6Q`6}8CxuPj8nOIZqI(w>4p<7Gqp2}Y&~`7%A@7X;0vw?#*Xg@e@3jPZ+`8lH~u(K zoE*(!t8yH~Tl^#Volk9K&6Ssb-n{peUh}McZb&e>YD&;Od0buCEEO3(1nHz{1i5aOHY58v|@~sdPwBNM8|VgZW_5;L=K3LOvh=^QH`&{qM&| z-8`3(eoxHBUGdGW8wa(Wwl6Sb}@8 zJ@)TP8JfwOp+q%J4vVYlKMYJCxXbK-^WPb6(&xBrMl_9VJ0KMKRi=~Kh^*Qodr}wU zs~v%{c0h3-h8i;6jpCdcPcGsn{t~84tMQ*5Pdi4{^1$W+$S{lO%ACT-_gMbWl0{YO zv@Qbo(Tapt{zj8WH)E&hg_@oU%yzl^oy$xQsYlJAD#KRq_a);JUAC3=( zJ8J~!T6A?G8GH2qRa^L)Ec*y;_&#o$%J~%v< zEb!O=+`ngpmK{E`7}}`QS5Q5OxR)xH4#dcibmx>gf|PsQl5CtQfIK96+OD894pDXb z26>{en0zj$k!qy#cbDwl2M5EE`05Hisg^xkd+`k#wnoGgycdqrP1bQPrFrMh$;jnu zUkW?9mV3!APvx=mCoV!J%^DNyha`EP1}+LG(T5xt??_gFAhr-IjaB8pR_7RHg6u>` z$=EJ>ZTl|65+!G!mF1mzM_r$PHdzqL33HNp^VUtKQ?p(xT)VawH8zM!`k=yUT^{NQ~&eT&xz_c(TlbMuZL zuU}8jd7r#a@2QAy=Q)c>D4(MEsW|fDRv}WqIY&R9C8<|b?IKJcXs{W7cS}Q66I3o-&+2Ok#x#7vMIw= zn7Lj0N8d}hdV2$K=kR_;4*XYA@wLr(wf?pJQ$xZ+-JX2|(Z15(u`njKYNGV~q*Q0D z3uoEF&+v%7J$xL0X-spmVdP@R@rJ)q%WZPl#yvFa!3veP{^dM;ZAnVO|o;uF#uH zvn8{fLjoD-uhAbW=ViY@qi{r%T)Yvp# zWrlBd&)5i`#ZnsK-yHO=u7=&(AxA6HHh9BQ)V2zPL-e)^>iMivtB@@rH#k|m-KJ2D zJSwCI-M_3~!;|h`EMa-vlcrErVfl8T6?`ng&tAB(>mCty->Y~ek~NN+v?Nx)cJ2sK zC&#}h{1{mZWrONGIol)MD_4azn;uwa5AMkE+XG8nqQS7nOJMNCzJ`#Lp1As7psDr$ zgCjq_!0Z2Ju6)h>Lu0&z_N~|BYdorftMfC2g!aC>&NoV%7tPsx6g@ZxvfSY{E1!!`w5sS?HuCppL@_eKOm1nzMi-C%8p+Dq_lYE z#bg)6ZEA~>K~oh@igQ9pK$#W+e6)xm*#z6aPQ@WFV=E; zPDUMnkB8ak$%i9& zta}}zGA@c-E=T9C4UDlJ{LW=cp+7V3AuDKQr$W}_uU*V*%gK&{N@gj7*nx?*C+{*L);ep!oeMK`J^=haS;qJn3IrG-n7iaN5VBkN1 zXxY|F@usnfz(x^RrFfs|$`quySf_Xs{2wyc#C(;&N50;jV;|@0C${`e{p`Zz&3#Eb zk;=!WM})2`yC?6Bk$5evukzM zo6jRHPhUsA6!JtP)+F-;UV2I%Y98+cOcj$zmGx@$-;>4j?8549kS8Db;rsUJWDlmh27r%@ao~h+vQ> z77oL!PJgYpwF+b8ExS4WDS zaiL!fm-$R|Fk||0%%UD*xM}%EzQ)Fo0dQGgS3YzhGU0Tj`#c-MBUo*t%o*D}joq`e z#yVFZX~yYcDN?zHW^;w`9RBc7A;&N|DQJXS&&mv-()`H#lgwPtf6-m*Vr5pvbV>+# z&SGKf57q17ozNnyg-n+jREmU-gnz2kj*21o{?{){e|>D4VKgkE9S^S1VHhX>TNamc zG>lIe9`|zaETj#&>L$`p5&bvfI~5l667M8h^I)Nr@?S~rhM&nWhsw>deU!6$dqEb3 z?kW$cKNpzu;(D$cd~|Dt(DppV`*;LvF63Ck$Sh-tm>uiH)_|G0`tQ$c*BW; z>$Tq0w)p!Nw?>T0X;Gg;@plqI>o4>WgP^>b?(sTC^QEt8g}M`Q2Dj!@zse!<4ekpP zY1UuriBSL5knqhao6zgW{!!O?@bIcBVXNxP%kI?g-nefz5AgkdX5X|qTGB7q%W~8; zC@i_D2fuocFI{H(Te}x})vmdh#V{r+j05rxYJItY3ggp`nI|%g4MW5~;y-Q;C5kEa zeCGAcOYpY%+|(sWe1mYL?z;^fM8Emn5g8>n)?(TD9b~_Kig`@Gf9CfceFV(z%w^f0 z693o|iXY+q%=fRGYTsC*m}UER@Gip6hlJIayq|thb1DB6FsbaR+4wU?bs>wn|e5h`dM6h%GN_egSX2u2U^#) z@%FX`*;h=@2cNZ@82$k?m5YOZ`W_)fgSS%d5n@*$taWtUrFrcjeL51s&P;~Vq%nEGP3_nmFh8?;~6;^L@9 zlrfm8M$(tw7qhf-l{``v#yW;lH-9T*{#qzpmvU~7x-u&pPP;b`aeY9>u>`tbRS&mu^l>p_+HgXRpQ&EyP`)DQ!UcU^{6(Ka#wV+ns4GdXEIe_U0b0)ZNzkTVQKp9Pj&JD=56+LHnH`ic+AYu&&bW4*MqfFzg9 zh|_LU^KQb@_tqAj#g1>;rk~+Gu0e5?ylt7;`&uazxMHUu>| z%^DwleABMnU0JSc=U=@*eLr4&skf|-CIpW%&N4Wxrn1)!vMnr(9hsEyTiF3s@oAEv z$qy4v_lU{i{JZAiD8!;DiGhyB;pdRa=G2vonpBcDiD``dq*L2+Ag2nAWveZ3LCRST z(GTeP?a89FYmSW}&lv9A;lUzlZXE=}5~k3{;@a@VU1WlEw2EJD1Ja?6oQO5Q4N@-s zwy!5E#q7tY%Q8k#djPF#iWzL(CDuS47gjy4yg#c(V0u^R-J9(whfgrLVy z2IK$p?LZ;;nEZ}-l}1h3(@|LdoprVOSof^?M22ZLsiNdI|9WO)~U2Yg7Blb zf*Mn|g|)Q#hm@@51P)$Iq+Fc+u!d4v@;q}2!MKZ4u*89!g1~-{XGQBtwV>JxwoDAC zp+KvG&4Tu^eo4MQ+p!tCPpRi!4qaDB;@sD?(pE(?diXkr`t+@gRZMOZy`JpakfR1r*DxBFmG`xf_NfctU!~1%1s#wy}SEdouRdS zNpd&)AG_a87P}bBSx(CFL1IMEUroP|7~n!PIXKDYvBwo8!li_9uNF=8f3Il4!^(Q=uRo#rYy{WZir;&MLPJC(1;&4tF~bDf((kHlS1 z1mc=>q&b=o7N`VD*qOgu$~}u1#WeFL+m8p7qNJRy-jY;{4z`4Ed7&&wr^s{xaRb&? z>i0VmC0~rjyK_go+~>T}HP3q&1VDNh?9d@VmGHI-#om-N#mfE4h{1+%lz;D(9&C>N{Q=ZWOsbsSzhRX&TrjuO{NrjO}r ztcjuB&+XTN=E+MCXdz~5+W6oYM7S86t)n8y;U!ev-PdM*9#E|pJ+l01|C_b79noWm zZknnmN`9WAJyBZxn^P4_wc$xvn!aY0_LP?qmkdZe-Hnluo3GN)H3_0zLh&KnWjN|r zPt7Aw>@tekGkLCC^ZI>xDj5YzoB?@~C#S%8Lrn4yjfgLyw~}~&YHwdiY@meUQ{?~G z6nE>817+~JHP2-}y{}bUm~hk{`j>dUTdMNXa|p#Mj*|wPuS~>uzsVbds;C3I#1hW! z+Tjy=-`QnQF?O+SZH@I~OY1A=G(fH`_G)+7yH`Fn-l?ZzQhSZ4P(V`tuti14)(8FetCKdPWV zz-qTr!kP@Jl?rZGgc&r1@8-Rv^srCtEeWmptNuuss8?J25mKI3ln&_nYndW^@xI%; zgY8Db3p$J;?e~8}yBHTjyTYzY(Fc{5;G?$ip347$(~{4 z#%XiVG>OXBGa+*BwZjj0VXS$hEZ~guAr*lHNEIw%#}4A97yBWryu12Os|_$!F(rwR z$L9>p_6sdQEjbJTpdJ$zmLi<~B#r^PY#`l|te z^=^cFY}4g4;m^zz6d`z~A?2cT&je^~wg(Wut-asTs9wxu-JfFy{0w7n)4jXZlY^Xf z@P2uHFblgo1wQ)n46g2${_D-~`Q!)XjM>Y~A_2?NH7C7Xu6@PJyUC?z?U6Unq(9z> zAP1>=!wn~v(oLYh58K1yL)H+Xm!bkf_Cu$hO&MI1ks$}K-4CwC#fTJAxd9UUR@EbH zTxqsyhaW$qPcw80i}1P0WBoUP!DADX0DD!-Upy?dJ9U*utr3W>TSHPEPP@Y$PG**u zS?#<}iUx^q)V5E<9S$W=t}~TAc_BY87MwWo;(j)^GR5v#vX&{#_aPl7N4z3af2mF^ ze3peD|DhnlX6yHH6QvAmPrL~_24v=t4$Hlxd|d(qn5Lg|LXm!{z+UXb+?-dOq4g&9 zzW4nv)q(cue7;Zk3(ZS=tPLoc5E4LUN$aqx9N^-BMSkz*d`+&hdOSR8`c+otpsA-< z>11<>^u3yKvH`fnq3su(u=dbDw7g!0Ks|D3?I2UHWB8-gasS1u0<9DK$gO6kh4-l{ zjPt=&ok1;T@&qBl*8XzoH;R<*!twax>i)Yik=yy?$roX()1`JKvtIS%OZm5okwZ6k z87}U&vtSWx^Y`S}j`2_m7u9pOhbgz(&XXmDPI;z8n1GnyFyII@@iUl>(aG>HGzR#TE4y{^=-sbB#zU>uC;s33u|^vW2CL@7N2O zl8pv(r6l9FY#IKHRRu=d2z z@jvaPX7+yZ_;_-aIp@Yy^svi~8DP4`JY~9ueN^ut@3PE&Ayq@jHJvqnF_1Nm{I_b0 zoPGIhWWN3W@pXY^VZC0lPsMex)qq+3-H4gTp5y$$Wl%80h*>&%`~b#uX56whDz(DT z<_fpgxV6Kgz#sf5@pp$Y;I+q?S=V|Q_AoI4M#xktBmHfXsziHY#LBGEXFQ?Xg?t#W z-rlIAn<0N7^moxZgvFYj#IRH;5-*g9Z9((9t6{`INWnJ}T#m&Y++}euMoc*O^19Dc z1v0(xS?by57$Y}v8XbKjN1`yRnR#8wkQsjU5Gp|tYYG_rGw1qN=VkVrFjF^-JVHH@ z|5H|#c4N#bDSMf49d3AH9AR2QtyK0fd?->@^7vW`7-R6%1y=uuq^}H%<9EBpU4OU~ zcP;Mju0;yPT^D!P;w)aQIF#bWiY`(dN^xIcaa-Ks+5h`~nawpHvYE`Clbn;uxld;@ zR%h+Pe7&RFaKg&|Q3gdUrjxOcZK8R~&bsKSPUFQLh$ zbN(^~YvcM?bJ*s!azt|S!>-9P`_#4(LzzA)OJgwxBC=yesQ@KK6E_(8rZpBfF@?+Q zcAn`k9*|#=sQT+mwz{vH(GQ7rU0IFTd_C4}b+vx!h}ZuZ(;8rq5n0ZSrf_)z7)2V;}f<*;dg2I!Dp_Lw_ts)ZQ99=U(!; zM<~JVbh|bOU+j`HHWm?HZ zcll%UW6v4o!jmuOWHoaZ|0^nH1SuYyzIo1!&M2HLcH-?9r9T-C!octXm>Xpf%Jt2E zXP76*{Oj1NhSL>_zKDCJ>32T0k$>xCxOsLC=XtD#dq=hoDFO_0rc*j53%7}%H|@Oi z+F$3}k~pS%RGn=cQw)%%)?F_4+KNwP+JrpUX0j}$PIub2oaMETpluJ4$?%MCAv!ZF zR}(ca0RH@-?z4kc;4t>)otMJc`MKM3Pl}D7bNj0)W^-4=T4+6*@z%U+-%*V@~`t%8q5)xMQaR}^;Hrs!af7d86$in8t=I#x6LtGg# z-RmAGIL!?6n+Awx+JKqRK=<;zn5B>=H7Qvdnu~hXx~NPO)LCEy1=A(x$6Yu(dzC;D zHE#LZ|B|0z2{%_9cP!PX3306ho3*u%#&51%>V>0fYdUu(xkCT^<_mUTH}ybi2+U`n zK0R}h+t_ZiKx{5s-fO0494Z?3>|&d6t-V=XUSO5`#yT~Cz{%HY?DpSEz;}!C04e@B zj?-%EO2KCh_-~z|YiQR0m`4V4Z^u?Tta%sm_l|Qay71)|o=XtytPimDdRFQpr$E{^ zH}_JT3dd?bD4$O^8=+GP5ya2!B;TlP)FR2n+!)a&hD) zXFXnYzDV(VZ8opFr0dr0+}E4wV*eaBFy!n?l#?fYw3HObJKypxIQbY{`;+({@+}~$D#I92kh1=k(8gDTxP1) zI`@Y=Ik`h^^{M;z?J4fQzG9zNUtWCq8I17W9iq=(62r*ln1Ae*!M!2HYU)#Xy|1=& z-AIj=*ynj{QM_ZtzEug|xCop_C1*1Fen!oZUW%hBehQ5f2=blG;Cwo5Sbuyn`>5ejW@w9sJ*VS6zgKlAxOC%r`Nk|bxli2 z=*LEn5${I$$GKP3mc~~L;ZpvGvNvt*gXw{;cw#ToxLIEc%=U#;r^;EIqG|a(2WA5e z{glvHBGs~Lxsu9C9j3R^oyw1jDJnU{K5fCE&`*7Ul8QjF$AXC4yy|w#o*p__2E$3s zv`hpty8K~w_JhdRpIfVMvm}iY&wO+4u3=2bF5Jj@MV7+YJT-2F3a2Vx_8lsxcn2kF zP6(!cttfZf4-LrHG_ngPRD`c;gFg1!?r`j^DdRYk7Aa}@IWOt9M<8^iYE2{)mR~}C z*%>Jj6Wa|LzS|uUq;xtg4|h5(@0bd>sMM2JhDg5@wn!IlE`{P4^RjfM=(hf($NyfOAaaas7NgKPQvf3mg^q zv%g-gw3{jJ>t^swO`Gz2`^V83<=&9Bdv7VR22E=-h=PJ=#nkYG6 z`E+;Ynei2=KSq7~qa!rAZIA!k>E9NEjNw#un?TyP(vn4;Zlt|0nFSnQI0enLl{K^K ziA}g!85n<%>14&XgGkV-KjcjtqU=$r3AAMTGLm{-D}_|nYIRf_>XxcvH67OaI{j4p z`#6Z>{;8Jx5SJd}9Y{r-bt13${_^MJ@18xX*x56#N99)sukjy~ygtU2Cd?M&2@~55 z0u&)AqP^L?+iFwvk|!>qfrSkU6b0rNdb7V#SjK$JN>G3Vv6@B3liW-Xt6{MR=9 zCYH#wxN;HquZ_E~opzTv#i~N3qOgLn%oG(^;zpqc=juppwvv5LZ<44>(X$iHOHsq1 z<`a68piwvRxo}3_r!?k>JC&zS_RBS=-7Ae5X^OA zdwYbll{{l>{~`2=-br!`nl=G|^8fIW%nCdSR%{<-HvZHZ!$xrnfM8HNP&`h#yAF43 z%bYoQ{mv`vsrr2EnOEI-Yv9^hgOPtRVfDSju}S2+fmNBwZDHmimVT~-GT$1nwq+^p zYtoE*VkJYb)8~cfEY{roNzNOE-wIbm+b;9YyzA`_+6bf` zPep}(V{#yt8+|8`7gLF|ofTql!a9$^wp!~Z5{!3!)+D`*Dr=JPU>j0a`OHTPgSm9G8#tT^ks#95?E{RLw_FNBY1 zv4*8x*OHl$zm~hzCN~9ZF0J#m%15LMd!RK4QV>JMB+Eitvk^O^d_t1 z$F%7i;d*m^S|%$aXB!h|%wuN2JR0qm=jBdw$y5?;w_FreEY7|;57%uy#7ZHEV(3-L z*kteft#j5hPzHn0HL5>=@zJ(tK zKk6FS5%*KvnhVg)V_gh;d6=RjDPA&+0AzI~s_Lg_``+z;@?{8J@z<5*&QZrqPwGki zSn=|zd*eT#{qI1vj+foBPVSrixBUIQoBuKvzY*5z@@lAmKGFF(Q=XH{zf@{uh&P8+ z(TOuWy)rK!gHVFQSh{2~8!4Cpw`wK?43r@#%9$(7RDs*vQ`}Hg;jW&(Q)cW|0)7Il zw*Ycsv_ZTk7G4o%mtL)@i;bA1?&PUGQBID!1Y8w7-tchJurj$|wkBSiHsE!7v50D@ z>tx*jBubG9;IO>QY0D%J5SQ%IT12JbTmCSr8Z{;Uan?eL`YubzD%rEMZLD=i6YA) zP)_tWG&#vZB$!Po-oZL|;F;RV1DoqlY#f2#)B?J(n@!oKk8pi;u+3=Z1VEhSi)Hd4 z4m0E_@MRZW9cADl41{egs+0CL*LsI|*UVHu6ul;zR? zK<;&7!{ql;bs0$$ens*qR0h9gxxV?D!^P41#m2NJ5oS7Wj|*FnefUD^GVng!OY>&< z7{zPzN&9v0z?g2E)6nXw){bG6E3W)5SM^chSQ|-B^VV<#0QXhvv z$4=PQw~@$B5(}!%xfR6oYUUvFqf-aJQ?;;DhqzORw9~KuYGmqgS{K!5Rr9bScsmV3 zHX3KQ<{`;3-F0u${nQKhg(U%VBsCN^e%~GMP%H?qq$=-@7MvPLabge_(5Po6bjy4* zk0Z35Ngd^zo};E1MMJN}e8ioXwK%UHuX=JH3spitsn&9@|Chj0^Y*3gwj{hy##clI z^`=f!gx|&2_&ew7&ev%2Oov~JZQnaxH8fNi#+B$3bhjH)ALzaaaH;~R`c_-izqTqw zQlf-#m}3}eurznnwVH_u;7n*z7P34U6c^OyuC+>k!GhTZ%Ia5}SdD)~FZ%SQL$Y;T z-+x{KpsJ$p^uxeJIIcz!emY}gbfsm~o%X4O5ro2GNWN;7Kp2Pn5CVTgcAcg!HEmeI zuk7`Uk1HuKjo-ZGpV5Cf8HU5%s^-Eq@&v)Z{L#%!@jk_>q);z$RxrG8(*bpjS%Cj3 zuwxoyG@qpWK`^^LO5#4fx3VaW*R1=NSbAw4bJu=#*ktwGj4+!6CW$t8{S+a58*kZH zx%(#p{sEimVFRtNbpg5bu-dXu1G?7REj`Z^{Y>_PG!Tpd=M?lHI%2E5id0M#kh2N`vNi@!{S}yrM8e9T>nF9(L4MS9ssr}O`PHTDw6%! z$4n457M$|<(MzHw!+3(jdfGzMD#aG3P=0+ZICf>ibi&2H-sX4S@MKh8kcia#(E4<6 zY=BTpegJaIx6Z_glH9NZo-L8|2fls3JU`wBt3^H%o*6r0o)I1?(TItN&cx2-&aa(C zWOj?ZGuB1p1w)i0>l1sAI9{k;VqWamJo|-{t&_F68tS-nQ{t9afDbugBOf&SMn2wa zf^T@tE#KlhvmQTQyV>DhEACfJB2SV{qECthOY+dJ6amIBxId`%`Droqits%8tBqgy zrIu_p@HTAkQsnaNuV(t6?{*Y6zMHjO1n&PX%NTUeAG-beYHq1%_3rm@PubR&+ZmO5 zA;M(5RdX7~bj|ECf}0m|`o3Ha@jo%T=$XV0-u$dN!*^;v+GisChOtk#zcOjPT#qZf zscDBiB9#>&=w+`e>Dyf7w$B$F%-53BnUd=M_uruR^YMe~GpV7~UFg`M?!lv3(Kzu3N1vxK{= z>=L=o=AsIrTxCy2JU>rsVu{FpxZVXp53)9G@z*0}!2(pC24UKptEM!AV=TLhA7vwvZ< z6OqyjK2rP=n$kYH`NwvVV5MrWUQlklKx6>WTPIA|(%fZ&XEow+U%47yz+^wrD&7-q z!5>q;y3}OuLq`z$nSiI1XyBuXyTl=L?}_~5O}P zM%e-C3b9IY%1adT?q6cCgAx#3Jb|S>{KrxHq)XNSYDV&hB+M9+V~Vu_E23Ysh0C1? zj!q*0l6zB0V^hycW>Vcb*1iC?k&E(`IQ+^b>L1K?@k>8(nD)wZ+s%4Nps_W&@(Sww z0;A8=w4(BQ8Qose#Wdd_nNf$jWH<`Lt)(hlB*a3a-IoFht);B0N|osD8NpkBQ)iz< zXUjL6cG;K#9Ul(<+xqVpi+((POkkpNi&m)-G!yY%QeB&?gkx<%znVb$iw^AxC&VDr z12c55rq7C(`;wjdK5>v1^C5@W7wdgdHq0cO%&ArA$6_OnqoGn6Ue)NjTe=?pfpo$z z)gunp8ZSbpskoohI%w;YzWfSumW188Tyyma=0r|R@x>D_M_7sQ_zs!kiZosGbP8Pt zbmxP6yE?m3(NcrIt9@$m_eSDUG^!-tRK z6CZLV#m|Yinc2=8<3K!E=hY2eWS`P5+XS!_9yVBLHF`YwwSOMaxwLJa6w@|as*%+F zojQ)ULo)w-&E%uF|0&a`#b4;{d02VLI^84?K1+X&EQf4G(l^V3|=Haqw(2{{kY&=bu+>#6^PdGB23ZR z>g#Z5;rgl6tS}!_O-eVBP_B|7v9@Bes#kg6G4W?nbIDk}Wn|hHjf_6Bjqf><-g~zS z-*n9FF?^U(+!R~oR2w&_(UDE|OjC2S2qe)~&zQYT*Z6Q$YCx!DlEqoiP!yl{0JS2HHNhBJX% zT;OBK@lDM-cO_*i*o_i7pOk@7Ucbj!a85?SomjT+j_DxngMxmc^v5}%lf&_4v@VYb z$@!(pGy(@XMrH?g_=%RFtBqqwK4EG3WPbv@4TU_#wAq%&FC?6`Z<>v1A8 zd?yj+N9_cm6q@X3?B^Df(D?_j&6nc68oiFJEK>Doq$N)9UO>CVyvsnZ#Wxfie*r1_ zeAZb1K5qM_3~O?_o|#qDaXV>_3x?E})lbK%^*}GZPtA7Ry$8Z?mMy4ytZC7~sVYd&{X9rDk2EB# zlPNzB$%=lR1rC40^uX5F8)d$$4LC{svtM>rb|0cUs>GSuYwmGAIzq(XdEfgt|0@3| zmpYRk!F5P*TIUDD+gpzwr+A*)j#vt>z-5*ye*(NbdZ4cTK(Gf>)HyuE&|vy;`#v81%PWTH zfJP|YnN=@Jw)^y_cJNt^Uaagd(W!j`bC&89?;|#G$_m1M3yy`bDwnhCV9TV5znLM} zwfh5~l;%$+HKt=`ALgCNoDc^5A=l)lY(>6C>az)2!|`KV+BQVzXP7@$k4;{i^xv`R zd=g9o&Dp)0dFPjjwSSG7%wPWqz8yp`%lwSKH4T=8 zhGviBnlUmHK~1T7x%#)nKJ-FXugmH|K!S<-G+$A7s1&y&hN$KFBfx%qsAZXtF4fV% z&nIG9zrnwu-%0U)ah?~PTb#t9CZfhs&q^xd6TBAhnc2HhvlmbK)O(XL#_i}L-zwY7 zA#fyMdvX69y(*e|QDv4BN>uXqM;m}FRp~DRT?c!x5|#0G>dTMi338O|3a;U=l- z{!vvk*Wuyo9L^$h9xs$HvtHK?f(C!j@UH6POzYq3l7WS5d8@@IUUu)=39kySHzq+q znmuva3#}Mo?a@av5fb_#td6;u?hE`5Zh{>e+7YxXP3d^P|88vm+X8t&wj^Iv0^Jy& z3(()L+-=PK%=2Hmq>kF@_c|)g`zB*`z0bbB`<7FN`O&Z3$CU3HJe{5_{i}U_8{Sjs zb6ah4-%3I?D-I;#tv{RI_UGNIB64KQI5U3M_v|bV&>2ezG%w4^_Nb|i!WyjL*cM#K zAzy@cr^h#_^+X(KCA2v8K!e4YmfR)iUC$SmkUxsZN zYKVFqfBY{+yPZ>++VVej@2B^>AjB^ar1BgJNQDHu-HXnlo@x{I!1iy zT30DO4{qvXC{9!nbA?kJlbTf>dam5#+HmE7y zW60|=G#B(~_#<`poLnX(+wr@U$76Ti2bGiMx`C`U&792%@jrX{i))$#!eR4?p{gSl zVUmydIFBhAFX7Qrn|0;4@aWcJ1~z+ot_DwoSCP5;L$TX?qBSV4bmME&d^%=6|J2)z z+fA*HaOF3yV{^2=W#vTl`a;TQ)D6Eq0oH<3vP7*z#MdCn|GYDL<||8P44mfv+NXcs zc4*l9Zgc6!?-z5QAY<$U|8nv%BT$AE9^q>`Wc6~OZDfueRme? zimO(`gj>jbwv4?Qt;UUgh3vQXDo?}{*WjFS{;+1~r3(A2zSG|RUTtfYXW~nId)3uE zxy3>e@03jKU8W<>=f&r%@_n1xB93bwl?F|(W+yE&L*>M~Oph4;2Kx%%6XTkIo)7ir zd7CoE3k~WQ-#JcX97S&@c;A6HDW##d!E#PIeZ0N2tp(IVxqZA^VWRo6DYE9|h}tGk zF6(fIM_(JdKICz}zt5bTm*6FCpT9Ip`}aSC$!YH+5dnqNMAZesERZnb_>aq8ERSk; zEgS(*OTs_ciysuY z#RDl0b;hO>=hV))In{P-$%so_7i}q>L2a>#BYGmr?HE` z2AI$YMdCKSGePf9`v}AVbUU(Cs70uTH!=~qi1qo(w!f%eaMm@Wj}XJ)mkkLh|ND`Mm)+}@P0<*!*${W!_||B3Pmmvq2tS~3xM63 z=_LnF;oU{xZ;3s*m;De{XhfdnfodSbZE>_+HexGA)0&?%U~Qfw5@z(KeH+?J<)D5w zEfOn=RoM?l-m!S@GTGwW>a=&Vz7aD6L=#sCQnC4*6As_-_U4rD!gs%z9)K$5bS`X- zqqd6RgKWqmP^Zdy$x(Z;h2Am{F1a55aRA#R`b5>@Mc%pa#QIqezH7bwl(w}Hq_i1Z zIKW7MZ$QX8nW67Q4Zad@wnR%RObw1Ia%%+O+?*hOAApipGc1WOX+m*+e-8HuU+v=3EqZNY%DWGkG9%?G$GBWcS`guXt!ThxFx z+&6eP&Z}-fI$kZYPA*(W;hqdMDN)5YqK+dD#B6kq7#6~Fj}Ddso23U}YS!RcQC^K8 zzmcAELI&~tr7Q5PEeC}gyMfQxfFQ=!E_lG6m6Mnc_(LM=mk=;>Vy6U`3>j#zEF}ii zH?W}kYB4J3NhgLVt0W(2alMSuJ8T&tX3q0L&t`Y~MpaHpD_?0iUs}K6LlQ{tK-#%`;JRybs6i z_bKbSiJDCwY6dSh97>Y3-3>+XHEg_l+Dcqo`7;*!?8iskj{zwko0DsO70s)<8wRTJ` zp_R^KMO1*eOj~l$s#`Bggc$6lYIUXz+(f#Yjoh9EX5QZZL=8^Z3x#G=eAG;XrDPuP zf%yPjm>~vfkHq%#n9Fqb_n{SB5>Z`5_?&KGzi}6+nQ^;Ca|!|bY-W!gNDBg~h*rRw##ex4KMR7?)*6~v-fAKD+5CLe@7Bx1M`))u|Y{W9>D z*5zTI_bqs5LSDvEuR?l3+`p0Af)RO7$&i>g#$h$ansjbNyte-fU?kaO8d5A+LKNKi z<cIT6x4|!1jl9*V)C&?+?8Y<*PW%Ilmi(<9XJY@ykolD3ekqWvQmzaX_^e}iS7H{? zzB#k24sE}`zQueAe^ubV38M%>7zl#-g|X4xN9IU{i{f_TtUtj$-baOngzrkmJAx%YtdG|Aji zAE_lN6Zq7>p#{jj_>LP2m-H1;$@-femuT$>!@xNvL65`_(T`9 zL0h|*lR;OKa6UL5jMR^4OR}RO;2!D;zvw^lo2j;8Q4eWdpw43GGF&i(U_4F|8Z+}Z z?x4i9e^ip~{ygOB)jEz%%`L_x@gwk>k|8IOdtmHN08aFZ(n`pM1&N{nr-{UJ0K5gZ z8+@aM{zZsmi7`dWPNH2w6bu2;iqgh`7OYjyQ9ht*uggNq5kKr`j&cExWC+`Fy4@&Y z;q)9Q3=2R*LeZfc+2#>!E-}JDZDJn_$VXroXz>zfJAx90TM7sf)u!CTqo7gK%$43w zWJqON!8YTYrQK8@yA9{oDED$j6jJy6dyCn9VH8&#F3|%n-AyxONeqR8cRx7w6vEX< zVh&2dxn;gJbB5n4prr;<&e^|id?`R{yX=<1)?wh5y`!DWvP`D>0;FS0`WM59LKnIPv9XNp$j zfiZ&IQBL>1zJhx| zU)Nlw(Ny{(T=?%C{z9}61|cTK=cq!3+jvoETd4b5C0A@n?vZPl?^KQ6Y$?eKuSjxw zBDkOiEnABIGu`Z6B*0rdsc*$YkFg6W7Ai3F6K;|RaD(noPx ztg_u66=X!*LRsoSoI~&bVV!d548GXvJrtU39LxpnXi?@u;I@Y?X2|PtJf1=9GrrZY zT0;KYBJ=GXppj|X=jZ<7qCs1Xb6ev{-}tMQ%wj5Nw3jV)L@->>fo`rSG|x)~W)^UM zO<9U+83>XpQnEw==}jwbWO;yyI}T{K(A~c6YCn68)sduebb@97k)cS1XIx|L&>hCY zet_5OEF_0VDCS;ztEeCXRm$UzVeCJu%Bhf@PMq5!X96RM)R$+F(7s1#WNMD+JdZu~ zA*)xl^{^;(z?1D>6flMmCwg}s9>?KbZQUo^7)G13OBYxDc_;)KX2dIpHTHzE_{ozj zLaG+>glN`AY8wXkMESRMNXA+aKt0g`P4EGDx-8=Eb5`)*MNosX=+3DCE<}4JkZ30$ zqtK`8&i>=Q%wiw#U(a!^F;qkCP@KqvrL;mf=o7i9A$Lbv?2WST5|HEZ>@&s|YW?r% ztB+X3U&e0$Dw+%=&Vr(0xyY_$$5pZ};q>}>r zi+_pEH=X+eEJsUHRM`4Q21lufHn1ea829QR?D#-NKLwTS^$+yMp6(Q5z+F-0?1Fif zW=JL%knzA&q1k3QM z!iU3s6I3ruWSfC8OJtU;@=BbNQe4F`cZMPUSEZcrvzKua}?qfvSD4Razj zbmyJ$J)lguN2G4ZzS>h8b-^!XdXUzJTVK!_Uk!BW-1o+dqPK0yc`^&Xn#GCk7a+eS z@6keM-P60t^v~%#kqd&Q#)iSvu&KQG$S1xXg9iu6{Y`L8!eS1Qp*@^LQn{Jp1tOEc zE+{(GLZNbw+C@dzNg;l32|NgYOckELY=p3L;D&3t%NEMz-9_O_sCrUAfUT=@9JoWk zQK>ChZ;*hb8XQ?uXrXzUI1&|9boB#77W#`8_ys)tE>A@jdqUrFOb;9cCl;5uQZkJE zIkfIes~V57*ys>%!n9<-)Ep%+!BKpR(khVir1qaa6>mMYi@I-F$L#fSD>{^WQ!WaW z90Qu{L%#O>Wi<}AEJ7>sBLX}*=23fRnh(bb_`rf{&mgpk5O6)$KM^#|7tbO9y?bK+ zn&H9Mg|r~hQa#hV4B)X9VvZWxAg0xWdW6c|!KKxc3W(}Gxt@QH`S_4bp?r>9Y@AWG z>30*=5F7)62OK5CW)7EUr=mdIf7qcxoWo~`VYdQ6RM1x448d!4ToiaRe+)(FVPLAf ziaL0g7()t)>IoZ|*x%w_as3;Vum&SSeZP};a3ZPQXefrx0<5k+5H2Bey)upYfD^jL zVnqSZuK{%7fdX*Tq473C{`VQx%&^vK6*C%(=P+|cWZd8SfLl23qYV4PgJm{OP`D47 zSFkr1v|!k!Wkk%q_jNEdlX+j-9$$EOgdq-u8ala$**n&}9Q$#t11B6G7Tj`gn73fO zUt-Xd;{C6;u*w}9()uui_j4dyxaSWS;4Qq z-~e}BeUgVvUa*;ygIFB3QmJF%V6t+Iv28^yxb&2dt@J zj1@ljt_q8G!3he`>PEnEPZh*9OwL#?Y`BDdsu8B!uw80z>e56iyQUBNQn35#sKT|l;2KhNK!7MBntDcCL4e* z6$VWh%x5|4FUVC!sZik7hZFs?b%Y|HaJ@4``zUu%oqR4WbTz8L3KQbii0dY1Gq!|F zR~Q9u*)4GTYzEJc(#QN5MMYHU~GFA)X!=V?e3YUt_a9l?T zeFs#DO0Hv{f4C_3z!5%I1~M{#P=LW8I)mD@2n_uatd+A(Mbcp~$N(F(EdV*C5_M2U ziqDT-IuifArp!NXQ1tBQ{;wk<3nzy7%j2G;!4LjbqJNf=o)QX(1!8BJA~H9mN`PUhG;cldg3AV2QPSVtBmDGr-?q><4S-71RxhE4RIMu zl~mA2^6|J(=-}shz5+wjNdFwVvouD(z(S`!QMAR!LML;!M~;Hkut?SZn!7wpXchbw zHQbhq&ySEYG|^#iVV`M?V8Ns(nhCT5VxNQ$N~H~mePkK7sZS_wPIAWmv69eYSADM1 z4^OLOc0&GS6Fy)Gj{8Wla*mN=@B_{j_1Y03wt3~<0@~aDL?$z)L0M=$QviF1vp`JF z%bUI6Nmwvb zN@#f`m=BB{kRXCBg+vu*Xa6F;p}3)K$Mq*JrB4xoaTn7IXQF26nb(R?&3nJPtl{(q z5v(vhZco|dcMP*?cEwx`A;0~MJEcS-S>GpY0c?sw)7J`1$*-6ik2L3XMlAtjU#cm1 zu{vIhA#9{PRnqjuT)^LjARA=smSMFlw4>M)Do zYoouYOu=vIzbXa^j$-ytn9&9>K2(^)7IGg&rQdjhcF~+!??_yUM$bIb@ z5Kb^(gKlmec*ZaNogI7wLSkF_z~8J%M;BI zX8=1hT|@s@)O1rMvp=369ws}2XZ=J^;V&ModW{e}NnUQi!A*}?1^0M|5hjQ*G2r5F z@OH#03NuwUw5}3K zU$9{8!fy#D3}8io&C??Az88N%WZ zM@^6X_x`56EaF8lk)t}^hGZF~U8hY0YMYJPPtTNLio^!ftEymp0aKHv6VG%S)_zRk z8Nx&OgTh>HCh5Cy?gHhvSMa3Om0y$=VMZ9W|BebwXflspV;hXZ)Qa6`-i&N0R)357 z{!Vy>8}+P7`@Zb>xD(o;rzK+wEuXtyXez+$PK0Sa&sBO~j#0yAV1U0#_Bd)ipUnDb zH8~&5gZgNUHeHy_OGi|DwsQ9F@W$7V!cS}K*#h9$vGxWxHBf%4rVbI5^h3*#Vgv2w_9&6g7M@8NkcYJ z98JdViNgL_TTBgxfNFg0e(rYh~|#=YyDG& zE))-cKeU|S5|x*!VMVz0rabfv@Ac_x6}zr{C^dVQ+Rn6Uk` zPylE(*hgbk6+p7?c4OED>R(be!-nyst685$f+wM*1L8j8l{b%T9_H+KJcy-4nxm!vUgJ1d}{T{}K~=IF)!T)o%-d zu0JdLMy-M8FWDZpL+0&K(8PD(hotZWke{f(OEO?_t;RbO zzerfT4L(|lLuCKHejLu~PS|sU&T2R~SCyJY6uZ$$?C6Z9mPibkix+W+{vz{1yMprv z6A-rJiQlUvoS+P90sA0cTEL=ERw;$RC(ucsQea^JJs46DwG8AAHc`fj>az;K;|Ta> zgZn@TYdpjr#v!&WI!U~Pi(y?%+#xf+O$E5ay&CSiBF89tq8sA^45Y%eyjQ7xaU|+s zvR7-KE2Nhj+v$vANMiF@ycaoapl?pU3O-EFjE12VJb1Bn(Lr$=zDvr@&^&;-b-2*x zP!&;OU5NWcG%)%`$q}Jf+#V?vM-l!G=l{|Gt2zv>9Ub-_R@{~A5n;ZL%MpTIQ7&jv z?ytm4%^4OH{tFi#8aW5ZxG#ufG2jUR0i?5E3)ZQRwJhesomBs@dvD0c&xNDmb6$DO z^~-R7A_~2x58On+Sa+DL#0$X~71RwTJo*_^A=vj#KY>~~?!*6|+DOf?f^0Snn+Px_ z5W(d_Fe2VyA%Z120&5F6pMfMYr`Z&>_kn4N9U9Ya8Q0w#m9e!giX!-i9KhCa1b*$B zjiXoRUcUD;MNMx9uEkyrqmItNL+kEUs1Uf{>?P}*5rjh0xZ5TIkyBpdA+n8o*04p6 zSkT}5@hgOUNk$Q@-fQkVRPz_V-uOZ+F;K(BASt3J?x7E1^_vfvY#L!<3;BG4IZQ{J zCR8%^1+||$Md7VP9TRkgh%w}U)6)a4K_&hxDh&`TcnZeQ$6@<^FujX0lxNNmMnd|( z;5;M2oc}F~RuuFRM)d-OMQ~Sef8){`H+SRl4*2&2<&06NMX~nK#h&{kFJX^6jt?96 z87UliT`*0y9oAdaq6siGUPKozRwwD$&P8a#3|PjLHw)t{oot~fF{A7k%!w{g!Wi;#K?&q|48pxK*bJd&(hEID$k(FjL?&e3 zDKgoN3Lj>M$+e6VRDcf?Cm>WUhC&J$dEG>zGEefirwi+MPS^{gsAdNHf$fS}N7|8h zTCLjPIi%2G2!sqS10<&jMO_pP?cu~~a%mr^4_Lg9d2@|z73$KQD?Ai}$MrCWG)I9# zg$6X0>Vp9w^ZqeOFXm+*lmV46Xe%o+9b5p3k#N>D<0Df+w^kUYX(-bbuH|vR$DB4! z8jMoTjmA1P+LJHHjTsu9`@zz3f$9_Qh`!4#F#^k%YFsbY~)G~%xxS> z48((k%>y$u7Eo^ zWt1&`B@66Bs<#Zl^nXxr5b7`&rZV-%e|!e58TtOk9MXr$4+0!&W+TmzU5~%sVlsxNbHudS*8C_Wy4ru28R!UcFBh%k|>0F^$f6l|M}QxqlIT zLxoH*%jMp~dcYmVw@ zMY=N=8C!ALy@B8)J)=ZdW zae&=j%aU2pSx>KXT~GD|VuiGdHa{V59nyMfi4n_xfRwn>??}>mb)^xsVo+b*kl$=e z@KaHWEMa^ke}~|EV&R{>X*5V36n^+W!)b1ZyCY;`ZR%LyL?(!$YqK|4f;IUcDua=x zF=zyi*Vf&@g+IwDupPomo+OHRi3|~zv-G9Q%0EqO!TnP}tRFW9hMT52;atmjJw*Ba z2fo5||Ia8-Kpw^USF8&NqwnW3c{jR+%MT~hgySGlexl%dcwjj^6;HStU3s89UPlAs zK2%qw6he)=pZeh!yGOWz2$zmPJ}?OB@cpWx?&y0Af~OJ!CyNs6JsbXH#d=H@9uWZP zh%op5;-XgIFt1`qi*$(5ctQ8PQFlN9Ob*XN`~6eJX)j{tEMjxmM#Dp2Ypg;V4_HP= zg#`YNgL3lzAXoc%hI(BCW!{oaCCfqmlcWNQ_GmvTN%-zXKxjjc)MDY4JACHDts(7p z`=Np45?U=#TqS1P<}dXf`=p~4(el>kz3(K*Lr73snBg(ei&LmX-`A~y3-CSk`7sTS zxgQe?83|qo4wDj@3qcEB0gnZZNE`Vp!nbg)QT&g{3JCHvge*uzkxnu_-(~(XIN*Du z1tPs7D901*#Kg)*rm;lFq{xz#RHe~j5yoQF49L@JD3k1?<;j-dsp?0WA)?A1KmfUv z$W@VgQ;s{o0HVTpBqPIK*qumR>6l(0I=M`>xMCrkPO&0ss>-FqA&g6?*^%ebP$xNv&)cEnp(9Qp+QIN> z?J!%Xar>`5UTjEWCzOW)qyqg<4qZkzv;RM(uZcr?vgt^wvW)sFrO`|!YMu=0D(Oh+ zXp9&tG3B3ixgFE^RX#I(08**+Apv*F{cB1x*)%f$m3up;WU_Js>2yoX6vdVs>!Lra!FPE(hX#7`BhEGN|eo} z%C1_gcA`e1ny+f2N=v3`$3j0E&ugTZjyx`Ft8ZMI#8funX`^nOj+KsXi(wpt1>A-B zca{{;FppbymaQwCvsR5F?v;4{=$6@Ja8H->{_YhnrWGBDM5oVyUqb86fNDvnfEI~{ zkE|P^Pk*b7_%Hew%?D-Nf2qD0QcozcrdI|W-8n_t|xCzelC32N1+zk7|?I@90daAxsv>_u?Q`r+x6;kdN@iW2<_ zsQpMd;0UxvFgie*C~BB@LI3jb-gd^4$5??Qn$d>Ui0G%2fkbV@!jO}tPh+6bD@S~? z_3lcn3&L84h9zpV+={+0UH+eGOTHBamuOBK_GO||Ctr!Kh>amPOTWgTHiDcqYYSJ z5f|YKxe9K@AUo^Hjj63J0RYUm;Z)3wj2k0iO2-F@KH~NTH^Rw@MGA z^e#&4RNAD{A_E6jp_KEhln{)An9BI6@>SJ{(u(OU0$3Xr$XWqp!vje|fsH7DG&o=d zp#bv=fN(rOb0Uxg0*Ism1hxoZfTwzZ8W*FN5VYx$^G=Ar~AtA3+cv2?g@>ATGe0w6KUk4|K54E*zy))f@t_ zS_QCCuqVLYU|>TPU?1?nCK>?di2&c#LIS{QB0wVy(3S&`MF422mf)qKAeQ{;N#g&% z{vUkhC?5iXMnf>eGF4r3Lbhl28y%De8ev#Qfm99~#`4ojPbh2QE+Gm)7X-6<<6dJ8>3y#2#de`TnhkAW^PzUOflI?)7 zC~Y88CP^FCRL3mFs_UZ{`;qkFVaf(hUn?LinHn9xG9@9NgolSJZ))gkER2kZONfsk znZYK?J6ie%KGESx3CRgjDTLVxNr?$bs^=Avi10XNKSqD_;Qok6f?xE?c)*oFnLUd@ zhDjvYY@`nOB$MpPBwLc5!xGRWk;pK4pb4k^TjU$)$NWJ*%s^QT-YBsD7zRog=o+Pk zW}r|IzkHTQdgY4cLgu_kzc))(wN#yAHJ$j>R}|pj=TKYIdye;m>xv;Use7!}7iIe&(=wk9DkCXlQfbK-5|Bb43Q7tTftQBzZgj(zQX)*-{t`G<4n z&P?$f)_SeVd)mb>_p(eMPbhGoWLymZ3(*$M(wepnpUf=*_e8J zn;MloU-FoAq9*x~o56|Rh-dAzzMR6FYOt2#v@My4?rem>&@Q80{*1{O5l1!aj&EY*7u6&a zD5;T2DG7vy(cuZC5%6#&shYkH*%l^~946XLv4!nPUnH_rN<<&-86VL`QM=9%k92Ub zD323fNJa037byuwV|`$6*bAA1xM$C%rlcfJwXsP{OS6vqI<0khLYz%vOmsqGvQ2nW ztPPL{pcr5nz*mXh5aneu>@XR2vDfMG8Db zDY!lg^%;u>`R=cc8N)!MAUkw+(`tOTJVid?Jl;Td>tetCp+d=z4|Zq{w~jt~@Z`l$ zZ1NJ1=D7Kx$Y!mwN~2)X5c|fkFw4HpS68^+3T_Ol(!YJ>Udv;TyFty)J1lDFO&O;a zSJlZ6wOTxPn8LN`);)6V?L72_P2}^%>&%Lr`ptFQ%w|?xz&v(!yS^@A)Z*s6efdvT zQ>bx6%gwB04Ef@=LZiHFnH4_t=djxMB`K+eMvD0_*T-`6{nEnjy<1(`71Y{weAwIB zAB5{R|Ky!GyQIXVyUnS$C;UYQe!Zp7rpJfvNA|o~LW_z$_-Vm~+H3JsP_OdzCtFGk ze2y(YRr-8V?2?-)a)0Xwc_-~@Hk@hfyN3v^>I}bWaU$bTXb(@;qoqZ+dp9Fyee> z*&*`DRpgz=*q8LDyDu1Pvt?;O%*OHS!dd>WQ>Mi=w+p;=tNNbIp_(o3zvEj*Hv8CW z7?*76)1A?0)n?0lXr{Y=E^6r%&l5k!%9$`jBLjqi6<d zN`~Ptu66t`{RW;=( zuj3nPhL}BYh^fae7nvmL_h6RzVnOsk0&m{kM{8=vNMue z^rjy3THe&%J$v`YJ8KWG%^r1UM%nh@j;+3%j9;>Bf}Sv^O_tdP)*hSjiG2MC#<}dU zJkK$+@gRLt-+cew=JsQ2oZ=%k_#Ho>TWVyv>Ei3-nCu;ILV6k&R&CGUair@2$Et}DxvnOF)$QC(r9SG5wo4u<4B8XJ7w+2@(&4y;d+RBl-SS6cmSofRnSA#L&<5yuWY4TJwi8=en;cD|+I|b)R z*DSZ%!Arh`IS`3pU&IIT&h{FJLLgfGHVUa?Tw+o}L~8h#NCfN_SS>KtZ;>bjwhbYZ zRl#ogpkTKUroi(D1v}S23wFOmx52dKe z!;z|zwzAA`O)&kv=)3Y|Hm|5XI`+9Yjh3S@KI0g8lNCImQv%Qrp`xAAkJXS)c1+fu0G<^*Sc$iNrJ(8cfp!AmkE)an&5>i zvTIDkrxa#4-+k5pas;c`s`WCju;F=d_^iiEH4bcqcQu zHP1RXl=%E%q)k`h&#YwyP0%ftM`h5fLY}6#Cw?D&bOSWLwroFdrhA&1eO)7)eJCTt z;+^N-(USLbEa`nE{Z%o?ybB-wlu9%1>U7wbZs`5_))7l;>Qni<58Hmyrguh9lYfA_ zvGaB|H>bvh?{>a+(0{&f6+OU$MH@yo&v-FQt8C_b;U#YHX;#kufYtu9Ea&+lKe zREzEz^D+I<>B_jcRp%Eb>+57tE|VArD`AE~I7t8CtiAtq;QGT1a8P22WXY5cXaLBX z7}AizNh*-!mzFkZC_K2=0JiyMAeLl-CA^g-$FI5TvDb^#U%AArEiBDrc>C0kld>O- z9bdSey2}@K9W;Ch$qptX!|8gz{rM2fm~Qw-HvL^B#xhV4=_#{YUT{&5GVDOF`MD>v zC79Xnk+qj?$E93b`u3*jm3xjSi0<<7n!+hZmTQk6GB17F$Gn*t$39`(dfr{d_kD8N z#CvaZSEPZruiSiTd*`j)DI>!l9=0STbWX1@H_x+Zh`65~(m!dLng`-E*=lEn)_Orl zsm3DHNuO=;_v?{u{*gV}I<&3Caap$nHb+-YjxAbEE8kGBZnEB|<208?y*+kJTIQ>q zd5gwyKMt+yO~`fJ<~#RL(38`j8)-WF)QCDok%b`swdQ`i&4CRmBWn_*X4bU@hsKHc z6RbR21D=e}dt{mHy1_#;KkcD;>KeamI9Axtd9!RxZ|Q7FtO|}}G4y(2hFxTJCYO7mg+oChV4kC zf#!dmj{dUVQ+(ZWBQwM34I*B1r6VKoLVW9IQX zC)VE05k^up5^LB9bv_z>+<_MYnG|sZ!x$&ZEyReSiLxDNuTMd zA5jl*`-;i;Kj*vnQ)H`8W>{XcpSbL4@XGfux~Far9BO_8+gI~))|{;x<-sd@cnA0D z9*o@3Vwd6aVg=E9BcHohCZE-F>~#1BXEw{&{>Tx-nVu|Sx}Tj`o(c4#7b!kg^OH&>?@ zZ?fh*TmZd2VyadS@A!Ja9R+q|C)V{#9FR%hJWCoq?`7R7d1ah z;H-W~rvA=%)wB4u+a{k#=^XFiJfb`I>A3qxchivM=9Q1MXhf>u@tB{7UVa)h=gLQ8 zoMW-=yeU-d8<}&1DfVQpQ$zbZ*0Jft8(PB;1}EL^ymdNriRn$-Ra@xePPA`WS@Np! z&|@phUE>bf<>w3D6)!Vdy8U8hYLDAc+YrO8nm5b-(&D{;v zXl6}N6(i99vBe0L^8;50Wl`F8ioLzk`PqMI+9{j=uxtJ+PbD(dIhi&rcPgRlgMMM( zyA9F#F)54iH+fnRWApp&byV+3A+rdKrecdiSA zgUVnx-QLigO^ap^x$RToh&$r?V2|`xG8{0D!Ex7V=#uKtkg>~HqVuNM{K36C=33W zHrlx-AXQqK>+0jO;F|6M^%*NS^hH%;3fwYZGbnsYvL>Fq1UhG+jzo!+-pdB8DEnM}6*S2?{u;0;o6M3sUo zmkE-YtUNet*RQhhEZBKa7Eb!Z*(ly#UsCYd6hE7Y$f&T?*p$DKgHwK$gZus699-jR z&H|lBbuC3RvsPoA4{})M2}ge7U+N>Bx~ld_^yytdoDqEFOl{QpRn2|vJ+#Vm!jiF% z-=6G_SrmYqxPalN_x?w}?U!DM>gBGvbvEf1OTfR3`nwJ53sx~{{J6%}18+?~I5y?KuH$JHWOFM^jAdBYjTPne{T7ad^093Z zU2k4jWrhch)y=SW3RuHlEmqIf-8$lUm|)n&;-;Z}`db+esSSJyYLA&Y^Ym3q2)o9wudm#U!MOvMvfVrA{Zh>Mk}hen`4( z`nqB>%8dSg)Yb`$oDMI&Td%WtU&fZ?X}G6FlRI~`*jnq2@2j?1WM%%QUON9Sv7$_L zui&u2m3{uI(JY^q?6(t&Im4ph>nBpk-p%1_`}XD9j5=p74=R`4cbi^FS$&(;SiAUf z^uvJD*B-*vA1^)id9PCy?ECDXnUSN-$*X~C)-QAriv8UenpP$^k7U}L;f~*(a_@S` zu7@c(UV1DBx)Eler>S!A53K*rw1RD^1(v6wt~PNf25pQnfc(971$;J-TvR!7N;hkL z*@J^+;S-Rojc^v7oB0pNtU)%arUs$apeIGKeAvQJcOVzECEMCL+S@JpcEfJq2Al>Q z%(ui7iz3+o@1!-rK&@iMD9fLczUn8ew8%a9h>id4w+pGB@l)<*JPGRh zX^HL6@5HD2CfYrZ5trXN0A*I)y`Ou6u)n13{-iO;Ls@aJg4r-KiKlS)TkXI6&6-m~ z&t>lQrs#HU#4gaBTO4w6ea%jXrqq(#4@_?E3AeB>D6p~Ho>n)?<$0-b(79L{iGk4p zIi#iv)qDO~js9T@JGidIK+T1X2N#xfepO%iP3>qLrTD?sV-JV8v{)#dHdDnVm5j-E zPHIg}m_;_)xla2nob_%H1GEh(s}s&@hqIo*S(R8qX4|H&{2PTkTQZ+UVCHcnY>&t6 z?lNDVCSRWSqF3+wBRH$*Kfn}zf8)U7bErWLo;@+`_5NOk!`~JK>^deNIzk%2p7vq8 zd+vhZMc>6aWzj3jQQ^>5h+lCY{kF-C`C!#6az}1MP{1sSG0*d`bjG_Un@X;i+D&_I z^6ZVFjvmxn_BvAUsez@zkggu*8Ou{E4vwvfz3segNa(7w$DgN*dcCg*UY@$4Hi;4C z<+vJdo5ZJ2)tzZEMBKURT4cGI>8^6oo$|70eKzs4b{N`3$bTw6?LWO?dvU`u#o62| z+m06Gji)wiB+c{gJ0drFx|h1Aqi^T(Z2=|Y1G~+4e2TlA`(T|>Qj#<|ebI$}HLE+j zr@3+)FBo3C*d#+$tA$LwFRNHZv7{!fME1>C#+_KAlYS=4>hku-4@H&LYC3u)hTCVz z%KRHXYI@6aWAK2mqR12~#cRAKE)E004+x000gE8~|-| zc4KfcG%#N=E@W(MRa6N81M~V2Vb%N)VRd*5009K{0RR956aWAK>^g09Q^%Df+s~H2 zA_Eq2`4mKCoo#T1Wxx&}mSUSer!o;>h)Yal*?zXw$dV#Gft{r%rot`aty(urHqfMJ z*)%=Po|7NmY(ES+n>e~Rvi}J6$3-E=-Phb6j!lx*Htn0{?)3ko%>U$La`06JV9=`f}3Olbb_#;~1>T2sk zxibyHUPnm#ce9DJ>Pm}>npC>qs6Rz!<^8lsdl2+0PJM;N4OfEn{| z?$v-@TdZm$72H~9MSC?NHW2(Y7`_LQKF|Xv2DDlP{g8VIIdBK|PRL%IA2T71vrNrV zpjT=V5GMF(W@IO2qR=A(e6C1QXf7K^Vlsd>W&U=j=ma(NOeUGHJP8SgU@2p4G%Yhedv18Zf(6ZI(`Uz zaC{qNDm`?$R9>H4NA5iZ&)`EavQ`v7Zy6GhC$X01%b zLSA|Us}4oVyS&tFG@&SmD(ZR`ikgd}&W(SAf^%qio=ct(MaKv?&w!)8#D_J^lSH!WKY6z_g@gJLLX|xQq1Oq6~w&hvCEIc);qz;Z2)w?j8YtQOkGC@Obz`}-4nyK zS^|yeIQg(>EaJm^jRxxuHsp_O(nsdgaz+jz`=h%3BdAOmR6ls}K&{bY`IRQL!3jGw z(e=L!`6+HoHI05mex1V>;{EbFxlp?eYVvBx_=tY)5(I^r0Oaxlv}5ot`4Pqr2GGB~ zn9^Tcj4P-Bg?_$~ZC94^_`$RC`=E_+%->pScUF1WRL^Sf!h-{{6iX}GbmE+i; zUivjJW!j`d4+yiyUyXAS5Jhb?kztOq*^)$KB`@tY@@QS_L$PBysQ(z`188S;{!NyH zHzclMBP#q@A!}@bCMHKLJ`(0CfFOIb6w4Sw8}37;gKOmw4t)s_WIb3Sw4xmF1ctE` zIxt_b9Q=)31*K~j7rq&KM@+<#= z{>t6YIu>tJdE)wa7lHxfA{UBh1}#%!%MyAN?}CvO@l)^CB4R@gG@OAuG{e+ zfa;0+Id0UE!Y9C5x(A|sIG>Mf)AA8stDBP{Z^%Vg<(^xF>nC$+f$|||5#oXP;taCg)3UeBBKvm>)5-4FCIu#t@r#8_Mrc?&-mOjE_afYr zNR@5?0~irlvdp=h9Z7= z%)p!L@G0WW!~C&0O(5wGsFqf+4b99o|{bS)t5EQ&p&f>tJX5v%zjM2&3=EyI=9yRe5`F^OwRyxSgS>@4* z9QhxhGN_?Vu?+--4H_B{o?!A+Sx;pnHi(Vp%HL;|9JwC@(dC5pasV*0XIkj=dUa#` z5sf9vqP>9|9IV@MIm*#UF+cQ~COC&~C%kkXd@Zhj{B|5~qp?Y{8SRP-x^YVv>HZ8$ z`d=~dV84Gwg@{F9?30x&PL5x~{rP<+os(bXl<=vH^R!YQ$Nt&c(*2Tq3>x>Jei64) zU2W_D=Y_iLhZDGbgqyNJvHaJpk_&S7L2=^4gVPb5$?DrsfoE{(Qm`|0L$l{w@}(@Q zVQK%HK_h6n1O=~R%AwYGVtpAKMH%#A?$CL;edx%ck@zRN$GCq(2Mcr80ATeRtHuGm z!kva(LoE&}9OE#++$7ZZLYET8|&A-Dwat171-7d`}3rG zwVA`Vby2^T51lBTcnjtaf)=$?=usp2HkR_hFS7w=P&X}!F&iDcB*!u_R>GL`_rD2e z!SCs>tzqlv!I)~ti8UBjFNsuC8ZRaD; z8!fnd?YoSVEL}PV4n^YnjAFI|q_gMmI)-mo@<~{Epm(HU(mFJX5q*0mF4l(0xSlIU zC;YLDYcKM|#Vo9ytusgC`hS|oV&Hl{)NZTV5BFm?s0c+`hYr&-Ulf*4{{htVrHtto5GGq7?AzhB)B>Xj^fhT zcM@&3E0z>57XM%=w%%|60G^jKas1hFy^KObF)hFDE&q7pQ8}?~8*AH~()RgT+D>m7 zMdqmawCbts!FRhL-FMA}-tDL$H7wyzx1l zu?%~R{4ovD&~q=Z`^j*ixK`qxg?DgkXhc&R=8iydA_s5-P#D64U2TN_Y~AGEJ5g8Y zy;j0gOqSZW?)W>ofxNo>VQ9On0uR7XtzxI@+Hm1vDC&-XP^=66kRd{VcoA<|!i)DZ z+QL9DjSv&zXu3RhPb~O=#BB!d(1`XFw*d<1-(hYYl&sfc z2C*_UVmQSap~_Lh@>TQ(EOWH>j*GSHE(S}eB;(lnv0!1SFcYYj@ZuXL7VAp!a4gaZ zD#ByHIeOrRf;n2n{Q;ODY{0A8J#)~y#WbH!^Aegbpt+Ie8);rj^LuG-qxtRx{^c}p zruk-?TWDTR^G2F4nS=Y>pe6=7B))Zabm*4ur|K zok=n=NwkIe^u9`%lc?^{urv(#Kl8Q6mq|&#tz~dW9fOq$T%pK|kdK&c;9;Bf_xD$u9E$&_{_H4JW6V1WX6C~$dAGJl0qmlgPP z1wO989tCzN(4xT66gi_s?EY7UlS>&aG9<(QZu%MCuYP+WtKfaC39}Z=`Y;>7Y_N!F zLdY0qlk`p*mT!guGZSWZ%zT*jVRn>O@ox7hX7pB@;5lI6cGlH%tEcX_DI8GXm^xks zn${%qwJ2~fg>OjVGb#1HRQoB=vX(*6Ey;dE3LcbX`Y3R_=V6a`kH;YNvwj(iVVjcG)AiET$@DS%bXlEu3uY;NYC=7`5@>y^ zZ7 zdTx}RUb1#}J;r9xV{8^ZEL&mD*V4Ug#zgnP z1o}V7F}dLL2%L z=m#oq?OTE9JIMuN%!&@6VCf&0==keW*tE``n0)GkBLqlv;>{I35 zD*A1dhF%Q17;`b&{sI`!!t)e_@$WB!y&ay>C4~G7JWDYKz)t!yewYH@Ll7Wd2V4Q5 z=Yz1v0dCz^yKSTS<@%Q|KHm^}_Sf=lmfibs{O$KNJ58&a9ecd~hnvmKf#&8`m&fUM z34vy3cl(+OzaS!pRhpZ-1H0RNt13*6Rb>=Y=4)?Ni~;kjQ@;HgyJ-j=10B%CRh-de zn$m~idZxZ5m$~rUt&y%Eh#RC7c}%9Zj!u|2sN=`MvYM@p#-^Q@*}r9OXNZglnXH1IakbhwqnY>#|OvtIg}dPv02ph@0X%Tl z)o(Njx0C93F%j3u_{NfOj86t!56(po)3IeE{(nKO8|hyO)c+Qg35*B-V}Lp;^H)xN z!OHwv!I`O_Ta)IQer?C|du8Snj_roH4`5rxOt_X0sC6(C#!d{%NI4Xjy2$KGop7v9dHAgQ#h*k7mf9rEDgy5-8I=6B)dGrq{+)7c@^0t zuRPl~N3SJ%F_-AubBG>r`tmtrElb%xP&$v0C+D#>iRmVk@e)j}EAs4fs5~vn+nVje zws@Ya2EK>jS*h?=KzTPjo0R%vP(GMI$GVh2&s~sB7F6iS0{{H6V&A+Ll#AmXE@tls zI}Hp{9kyoJHM(q~gE8n}47&1sA06{|fL@qI3M+C+VR^Pa7jbaLRAqg#fW2l67#P%K zXyBy*V6m1Ix5LwzLyAFOae0wXqvyar$BAAuM6!{G_6JV|M|5-ZeTnmtI4_r#|A+K# zymemC#)knPO`+i-vq_0b>OPGAnfhN7DmM+^P#Qi}4{i0WG>j9a)#-In({pM1X-?D6 zU?n@(zodL5&QYEos2_r75c`Kc-;fY5d`ir~7qs(0pMXbeA>>YYHo+sn({DkkFh8Je z7U4)XY1p=-#?H3(cDI{2tZrAQM`(9+3ZkQ3=x_$zqQfr)yg`4PFinLw8=$%cQAk4! zq;0z0wZeo}k<9=oY%Y{@!UM>><`^o3_H*^+DV@oPfItxIVy*Cg47X z_w^wR$_e)cb8BIrG_n0mO@6%BNco>pV*2u4guyR*FCt}BJDT9-*+E~jEdjCJV)>@E zscGkwk5UhX_~mfaI08PQ&DG&*b96a9?QQ`g=`z4WBgfSHyra$Q>jiyp!M6(m(eLer zb>OJ!>1pipd-v2$UjnR9_{CnI;Ba|5yehxL?FvAY-$=mEzzNYJTeoj+tZ%4G)U6HF z2M_KdUm@LeB)Hxjs`DzSH3THPL{ac}leN=jhX6Z5hhIR~oo+)B)dU>B%hBO-1ET{w zD}lbeo(@-M&@VXP@CKuBIQ^Zw2{AK?E)ekO6iM*xcKN-YZUIj!6-gXkkI>^19ip?< zEl_(2JwjVhOd-sN{;*1j)8|^*9ay=?knwdvtO@%4 zFb2Ed+a?48_3gx%fMwzUNj^sGw%&j!bT>lh*1#qg^ads3%dDRo`ZgO(DGyD^8(CXL zca760f;DU4G>89Xd<$_lulM1gZ&T3IhVD`45&hs(?;~Z0%P$6HOQ6#RDBgV4|?x$Pe2pzTv0wuw$5At7#?AkIjRfHY-{UjHV+iF&D0zfre!bDhmx z&aC~voh;?-zNAEfT?$lV-1%LG3mvb=M5EV{>RO!7)kzPay zO^^>A|iMd3t+>7G!Y9(S5UxzPJ)W<-utir-T%Jz*2`Mw z?3p=x_UvhU&$rJ>DL9`t@iNkI+=7LV!4Fega7}5@WB7Y`I?M360_jC3DWRcUtQglqtdr zVF~i4;BExc@UIRtJO|4YLvVqdQ{-p*8LGqa6x4UJ;n*A!$Ri@c5&no^#AYDP9}$S~ z0g{6ezK9UObCb>kfip9NP@_{lI_z5yj0+N>4|swQp$G!tj|JKgK#qQVM_2$!UO;XL zA_82#pbadYI~;JscLFH!2Prsc>Ia@7zqD-tt_XTvf57{d>t`~{=~A&^R3ZTn%nP@V z1={%|Jn5~31HG_dtl$8cCqPMG+i?I_IFR&JJ`VIw9)Sn_lm{pRn(wtdxUisZB)uMy zo{mMR0~!Uii3TzPfRs>#7hQ%Hf*Z79O4kOK9}IK|Tre_0fETV8N^cFKYbKBI20Vu?wU`&~xxe!&*jxyEotj zc8#vT)jX&Fy#AlZEch??!wwj7B*L1mp*I*iFVN;6uMc=G9YARW;3m+ug;!=Is15g{ z909*a;QB%4_p|uxIRyTVzT+I|tK!F%4*yjRL)ScXB-K^g&}# zNG1fdk(q53GZKR&tDuk=iY2rj5?oNi4Y8t;2nvRVN=87q@9JW(PXtR`Z3wcjZ=|*J zT{SPrQOJqdW15G5Fg-X^KIeBBMRYfpixK^ieC{d``*!nOca;tR1&N- zvKJORuPl-ssFa9fhgg}>_84Y01QKdaXb(gT)2pTk&Tac zP)M*hP8?bam$2~*SonK|heU+<60!Or;h`bn^shv4vd{|nIhtQ$;d5^vtS!J_Kq}T+ zUl)sqa5zX@kP}cmP6>}w#I03v0a+Xl58>yt5bL^k=$Ue zk!&b35{W22t!orp=eemgW4({gN;>CFFhnSn*JzR~{cSEySXL!1Jf@Z15l)9t;;wO5BVg>8`PkzCRfnpaiggfvfG zTc(+D+?blcR`-Pc;Mf7GSf5WjbvubYYb@^`j z%Pl#VTA5zBZmfN}_5MR0D_(`}eExw?l#VQuxuqN4zTB|-j3PGuu4CTbA|VY8xYZ4FqaOyEl*HGVtWE^JBkGj#a1+V`?I> z){#Ep#1O2FzgGxu3A7kyW#Z?=D?)gj%Br=>ijWeHp^+4G|Fi+nt%yHqf(UWI zit{6ppD-xk&@!cTB&Y0r=U&#u^8RNIUu>D9 z;a!Znf-F5K+u5q>gxzo)O6>%K)a3R%o_Y`5+MVn9AKre_-DmXJxl41eWYc=(6-+_( zuZulo?9F&9^*SCr!8bk3MBPZiUq2Wxo~t&sjJsc4``it5pPs?p_z+S1uFSlw=V8`< zL0qTBWy;AF_We1+nW^P6JRa`|O&<%0kvYPZmSYKlMOn7dgeM=u>IR*A1~2eV>(7+N zZ+~MRs$WpBbf{l#bi`{ojweCNBBifdNpR%7ON?*e;kgYrn%aVuk>i>CYoy8qEY5Da zbn@-$K$rW(3J3Yl%!^7fE2C$cp4Z(+T2VAt68!ttSGSl6%WOF1uljuS3FQ-;!IDhTFqT;B);-mJKIr$C>ldrOa7$M2y-;v#_VQH6dxc^Vj+yGFD9xSP zH}s~{_jN4wz3QExwoRIx^$M34pZxWl4*YhEDuXZ&N0cXP5|6hAXPl1qz0+MPC%KS#X)fPA9qq@)Q|1} z1-k6LHPOT5hrK81lY7VA4T24~$muF9h$%lyV$g2vT1QkP5^UFcTzJ`wD# z6jMy`;$CxCXa7{+vqhZuH^v?{T{*yciNYU!DNA$sW8W$l9tjHvSz26pH=p`h(@h;i zL;Cx+J=$8mHC6PHR@EN2m%FS|M8>EJ&d)Q}ttnS@YC5Ymhre5c)~u?o$W%>iKb*XJ z($ZnyG9~FowP5cg+Y9;JCxxX_S|%=_Q}@1iA8ECz-;=fD;^k#TX?G#nGdE>qRi$&) z%#`oMS7nwjl`1*rJ4le&(j#kn*nMZOwAI-$_-e446 zGll56cmhRse{xx%rcU&pz4?F|mHU;H=|!P)2Jwc^8ydsoy2A&Schwu^H4R@`>iA^a z-a!jfsHE(-u0hu$6*HZczP$}Oq^&~_hRg>{SC;V07MCP!PJWT*8RK59kn+$me`9^L ztnBDeP@`=6>U13ytA?KK2D`4Zu5Y?`R9}I3;C=8^4AyBiznjN_gIZRKDUU063O*~a zoXD=cXhaFjZSQ`ta}WDxcQ^#x;VX!TnRVU^3Lr6H7a|t;zn?lI;V%^yqc9xeYykg6 z0<5Wz(1qCHn%o#zOe)0v#r88nXh6PkbKz^$QMe5K^NX1*dtR4Z!?j86S_`=_+#K*k zkQK#(VxDR`?}dQ3`q2x~eOzdGh4J3Cqkqnu<}?8DF-5DL8N_O21y71-S?S2MKgwA4CVkM1(w6>}TG0 z^H}Q9+-|#A5&63v#Lbe2*t2^8Et@8HOcZx=CNI8Be^SSJher~m^cm%`F-${KR}VwmJ4t)^^#!q z4|d7Te!gwua{9Q@s%JCnTI-aCgQd&+D`bXRJIA>y@?>*zEx0t<$5?hfkZ8mU^^P>H zx$RnT%2bW5ne9sR$@2c{M^6NHZZLFG#fQm=BvnnwOg&qzj`h#2cHZR|98z+Y*r>zA zT#S^H)go*2TllhHuCbWxNl#iDB9LHM64kFG=Tq3|X5*RKDCVV{liKxod}?e7^`K18 zt)n@uZ{56f`&<|0@6ck7W^QAyij-JZvS6w{;H-I0 z@0-XNk-^t0d9gy~pB~gm`9;2{_%O5k4SVuy|8*5J&=6+*Z(UuHL0XEZ@yp_db=j)4KbGDsa%-UOJmeVWU}<7_S?~IxC|9;*lYrUSBMo&yL0hidMDTOQ zS>M8u1vW!u0WSdmAku+<+Hw670WJWR6jC{CfaVu6D;meK5TpW4zUJ6*T+qU&0+7Pj ziWrKDsd<=s^y zR9t$_aZILP>+^JREm)!vMb;^vDI@KAydzo!c_hZOB5b30$neu!ohPI_Rf}vmxm`Fc zSw7u(#YtX!EPJEJ%DAS)Ce=xG&(PDqNnH70Fpx%amKcq6=Up^X(Xiy+h;!K34tCz! z_=4)Qcy04_wlY3!?1Q^01S*+t6e9BteHmOd8i^zGb%L@--^3>PZh)d@6cYVoY=Vrm zXJ-FAnLJ1gg^V-+W2*xmXn@r`FFM7U4UPOsa3b;|sYbNsjh(UAao6xe{b%P+y?r~e z?ucRZS=+>uc3jdZW{hR5w!5RbFF=SHi8qDM7k8X-JB(4VQiu34dy_ z#+FD33dP#`gh%;%`B2DQ1`ruD36asA3&JwaM`jY;SHB*ke+`6m{{Ce0Wyu z*p;YtCCg{;u6l&>=2=s)ZTem6B93Ld55C#@(HE2BiVSleqrF?MqI#BRbC>?AdkW~q zf&r2mqS`Od7`AfxuV$&4v97$9ht?c^p>5CmRBrs`o>x2A!)aw69NbeqYU7EMrdrDa3Us>E87Y|Kq%))dTv%`@Q=KoH=8RPm}*@QD$k$3}4U6RFFB6Db^AHCu2Z)%t5E(qG8_KJLQLhoxgPd$#+xJ$H3_?e^%E+3YgN=1#fP0H)2rN|`4Ce175dhszTuGgaSg+peM|GxWM~`D^>ZU8fa3A{r*=gyNR=Y6T>A%j@Et z#skDWKN>5N?_Og{cwbA+a#edwdVI9a;oi}q*W)fbgPK{tNjt^x<86_u6f#!rcnT?=Rb zVblDCol}f3Jd}pVwr$(CZQHhO^Bdc?ZQHhOduIRJ&0g$Xo3y7*nkMbR`lJ)~5{YlembA z!bR+uo&3?}0&uDef^8S!aVuXxbK+@bE1OmT9$&uLXp2;;Ol7NpY7p78uwUniy8T>m)J(PIdL4uE~f_tHW-$)k=&vX-sT>x2ezz zK)m=7rubD}dZrINeB0w}F77L~eSWrHnx1s<_4vx4U-_$A_yS5f`;QBQ)qU7EdHy5B z7O;=P1rpu`*$+dBOjP0@JSh3^6bL2m%hY=Hv=j!S*50;?;Scdkzkh#q&%j};FKPh2 zXT2k^sjY)sJqsmll}ATH7ftD@^!=3Nb`iI9*Vq{!Cpe@alUlZSt<6OiE@pUHmzwt9 zZB{P_iow}!;h{nF@#Y>-sP6z+87kQYy%`Z75y z`F}Dx?H2U7Q_-wj=E3C(CurF!5u_hHTy9+ZA1zyVcEf~1(}ck%iCFGo^WsfQIqG^m z%+VK+yg}?q87*}R>D)_ySw=NwUjyWETy1oO(WTtm{q2x$FLWS`OT2dPFLQae~oK?qi zP1MuQ7i#wqS6j~Ia#Fc9#;>4vYDf=fy1HBCPD>~w^t9Cud^*1?0>$E7*M+UTlYI03 zn8(6e&F4=xvgm2}oL0Lud$?%msw*8H;*gWR8OTSpE*==FrWe0hBc%>$(DA7GJFaqn zW7?OJFTNl7!r%vL#R?9jdC@Crf7iJ2T&|KEjo!nZWsy-EdBEscyi*)G^3ksgTHYu1IJ> zE6+U;Y{E-iw+hhp;u(wiUfzDSJ(+0C+&AW&xHv!n6d7q=BJv^h4HvCvGQy+*rT<@wAaf1%gGB4 z)^oG+BU3uC(n2nibBK~+8RlZE9c`nyT}jZK^C>n?-By|uv8J4R=GhBrM%(*fN^RRp za&@AKUGmSHOb?can7SPlm%(lX;dr%+cidgbw$FZWB)Se&0+Dsm9RnV)uCc-+|1Ok+ ztom!cRm@IB%_CJTj`w=@JyQU-iXYvt`GgXB+E+8u(lqRo2#&&AtEs7Jd+OwRo~cfc z_*#Z^()?KLv-i##?-3cDp`E_n=~9hgUX#PJreNM_zn3exKWb>x-j&^|ve(WSV%blA z|1HU1X!m^VN_jOm%bf+4pxeMya<3h|HEsT@#G2bpZ$*DVw|mfG>V{u&&)uT-)+lD( zT5(iPnEQ7BKPl+wKvAqc+<*WPD?tA{1)b^tOF=)@f>%*lS?yzHfShy)SAVAv6~i@& z=!+!bdz@I-Fn6M$@|wm#hjrgsXTpZ=m|?c21P<2=Xtb=@;(f9HGK@wGjjf!zTR zcLa3c)WvBBKIVtH-QvIF5Gs}u?vw{|hutM?xXaGw{i__^0vZ~fOOyQqesiq}jZR0k z`0H2O>M$X6qYT|L3XZ?@q++&GA4Cf@}SU4*&(LT&nG-#sy{-e z)s!f$?9oQiQ5`UFfgF8GM;+vp#v`BYosTacHziM#&0J-HHfHOe^l;23=(r$`@F^ok zM0~{daNj06A+VYMT-HR6#vsHco=(kvpffvDX|r`hdI+NyRz{dOpStG~APcTySndG`E5 zug|T}*HHHt>k9mG7wXqEQ!XuMiIz0Q0k_2_``R2!_k?%yao}OZ47``sN)48O|D5;f zQty}SDwE33TY~l?gG^pbQI4id`O8bFB(K%SsEhn#I{vNxT-3v#;`pJ=|2v^1Z{3%h zP!W8u9-kZ3%pi~RUO+-43!}f!E>Oy!bt>9d8L#(K&EF%-d!i3jYwXe$G~_i(+%G5% z5~^8FFgPSA!XaapB#_jf5a8wB{>c-AP8AsX+8{-wlAt8502edP{@8$7D0v>3*qj-n zM$wclrl$|BPJD@gUUfVyFE=zDtFQ&ks<$_JHIjod-`yKKXGu5Lo>kVb(+lk7Y}NJN zO*OC}`o5JTFmJj~>#Eybf@b%aKeq^{zQn`FxLK?2G^HvH(ZPR3Wj02Y`FlY zWduE^Xjos~Re-R^qOe~FJymC9u>Y-$lsb1&7k1)3u@-8o7z3Z`Z0u7%EsX~wZOk;{ zw3qW|M9g3@?FY|Ldw5raqICeKFsqs)CE}tQt%TVbJ5pdbmWfyUqmA14 ziYeeBzU;LooVJB)t|El1xfI;0pFMdN!v8NS5FX&^lStPu%9pFIuMGCrjAd|-7{Fp( zBB`FEkY$pDr<4(rJj^N>wv#p>*(NA0`5e`-xlj%sNLb|?4fdhv8UcZJ>tF;q5tmIA z-Y7gQgpXT=bh{jPl}u*~&J%JOu1vXh32ue*502Xbik|!XPSzmYQ<>K-B7m+iun$JV zbTxY?Od1p;WDWn+I8Aa&CQMn&&8;6Ph>SAH#&|;|&YQM<%73ChxR%x+;~e}jB|C5} zFElE}$Pb1>tsgjc4L_h&r{%^?Wx@$1!YiL}x_aqI;HB(np}6cL`}~b;k#qz!C@#>+ z*+t954X4t3zjKilH~l|_GUNvf>n;UDXwPQ!{L=ma+0FsBvw-x<0qyw!hBB$$16SDZI4yA%2O(DoFc#t+{aVJWmY%-oSl;|~bzp=u z=P3Cb=maq3hGJSKw;+0{L(;+yjwesO0b0B0^>}`M;GpG5*{8-naDXMi*;9%c4AITFN=}muC9Q~BbI#+UTEJqy?+kZtUiD7 zjKE*Nay8dq_++p%Z}MY$ZFl}i+%9`-Bl+MCH-ZWZoaWqgzL^LB|g}%SRPm z(!Fqga=JLzTX5{qE)&anEehi13cR?G_c|O>`npJbD+S($-tYE@8M@1|9z)hdWeiEj zxB}_h048b2{$~bom5Rt3cbd%*;|(TO}GvR$UBg9 z%S|XS?r+6 z05j8KtzG?98DPl$!42+U2SOWSWKGa-#15PkG!!=wwX`TD(JD}G+g_;k?)v?*T7e+hg8Iesra_{=-!9a8DW*~%<03}(_a`ylAQ(>d5|&>FL<-E zY?GcIikReeV+l!ZY)aA{mZKB6H6O^K;lFj6y-0)VYm0nr79bt{{2b}h0C z66{LfY42+QAYwaXpf8PN1vCoZSaj&QZVMwsl5~X_p=r7UlE~1bpHnlaWAjy^3V_$# zg2Ts^>@^f|?^22bxhM91&%-^FSwue;B>7ad-@HDrrY-ha;mff)pQhtU z`x1Wsqba^6`#ln7-Rpwuq9OtU_XUxQf5zGLIr!P&(x!e#z89YdkqW(^g>AL6#y>7M zhexPS@1d{I-;@x*vgysTd1K^iH1PUPP2w!=9&TI^ap%m+nc^kgu#HmKW3NN9s~2j1 zr-}&b@P{}Qai=dli$ZB346(uFz)kv|!~I;uuie+Y(TK)B_ZM{wU|H3Qb#4@VX=J~q z!EF&tHFrGUHVkj589!T_zpan!%Z5p|nalvVG4Nqi2qd@O+NW>%t9zvbGP=SBd9r!m z_V%iEA|D&e@og7#On1Gk)FpP>=fvKbs`Q$$z2h3+t>`v+vwcK_7NmGKz%U7h_hy;mj%`km||BSIt5Gk!+mr-D( z9gL5>H!5$VpVGeR6!-mF?bN)d&%wS%B8Rp!iKlw9I1yAkT8M+-UOq=9F~WOCYvXci zWCIZClTIMzMn69hdnzU4NeCx)ckJR_ggRmYX2K|0En9wM7wN^fG^%9$*v9b1E=%SK!dXU)6bUi)4J971b4Zuj1~?puZ+0E$ZkZP|_|gps?`h%q1a}WwqZ_ z-6=Up(;;s$iE^4{Mb7WT)o})Vg{y7z%Me`ch!&U?ghA-2`p8 zwH7L)Msd>+9U5kvpKzr+Nz_~ymO{6b86;Ht1d-UJzpYbuSP6{Gt|(SlP8R^#HTlB5 z|A>2xWPk=+r&1kGKg_PL3EH|XKg2+m>BYlHT$}(|7zcHRf_4EJ7#Bi-k;iz<4cJ*3 z19xyoecV)dNT0p?-2nJFYvT)Gxi$*GK!_B@ih>3%ZoL%;RO+>c2Auvau#=SNMj%zCzd zt_>srE%_uMy)b%!G=h%7g;@ke_aSJxHTj42v170xQ!uKdRSjsEM0!R{s?O6d=R^eO z4KoOc+$`p_mKg%f9(BW@xOC#yCDhLkxJl6PcGr&O4%7(kGR1U6$829?Xg)FM=X{X_&}`R8 zxsL(rnG511+yWFdUQW#b0WLeJ*U*c#k?somwq+;D4thP)yi{DXG>5ZA2P$`VR$@0 za}?!M{m(KQUv}m`#ssVpoKZ{s0+5)<&@iT}=BbN!LrQY>ys4wK3faKzWKh{IigLgbjgC-y<20adc?{?50amnK zQm8feG%1uzknXtdu5K_G|FKEOyl{$G4^%HQ*FI4RrKe-%+u-FAy66GG={#7k*lhU3$D`Y#`85Y%xCQ+ zlh2w+Gy%<3#tJ1qczx$q89opY34>@(*BUJIo3!E-f%TaszUlQ#?FQ0EwAI8vB#RBv zrMC2TTXEH{fBC4a;M>nB5fZU1Y28pfEzA<;E}%RkQ&!^I05X<@QgS|xL4Gg)us>>- zvF%_>(CV{6>8LMFIKySrm1ekuI+W1W7UWb9RYsZRlZE@7QR4=W zt=82tH;EDAnsMN#0*oaYN}Z6I2JfpvXF8cuzlNsojZEHssLlSe(|@)CA$yr*0RJn( zB_L}8giLZMbh;DZc>K_S&r+^C{D1;Y86u{4d(1vVY3c&@&o$*k9I; zxU4@AmAbqzWcjHHqa(!0GU>AJ#s)R_+NpV|c84JUyYB`Zn`7iMCHZwbAu#GkccPZ`s{2?W9LO6`wcS?i2BPRv<0rFMUT_IfhD;5QEXw4-$r0P>o2{QR8%E z(}|&>b#iW+Syzmhh24=0J2B$poey^&PyE)mwA*b=Bb=u`bj3(Qi!cXDlxsb87tX(# z_q!-fJM&#(-j>E6`z`M8zlnDIP}Vb)&hM+VzL&=){`t9Nz32I{E?00u3zi$jq>K~MH!aWgP%{txCj~3jH+)J#@;HiKr<@_zjhn5TX`~@

!VgoR6t`h2D1YKVZlq3n}>o&1KO>$48!K{psog|8mf zy8^Nra~Pi>Pll}Cq$2y|q;OR}3hcvYd1>1p&l9!l+e^}6G zjWrHDeLocm=Hj6E(-Fj3FC7jQVF#-IZvE<#Sj+RWKyW$7TV9377&0c!+CIbUr4Tv&=J%yI zh7h8s=%_|228LORs;Gc@k`icLOKD9g4T|{?7b)~kW9GAL?>-w{3+kZ0Ipd^lpN=i> z*^W2fv+e(Oinsd*xlFM#!GQcII$gbLxMpO+p=*9{qlwoU(!e9aNmGAkV`d>^R;O1` zy)7asqtKg~cIQehe|C~$cS2m(@VIVWdxAwq`X9EZSW+4y%-`7s8SzV&7xJq`6y^)8zxOd6`?bA% zx)f`0(V@+vzBeR^O=NU6zrTk9)$`SWeUI&TJXAxn&QRmD`;VQd=}6L|NwfE@(4KF_J&cz#VZz=A6*3zz~_cCBf2- zqbH22aJ|Y0m70H&3$K#m_ym1>a>}#AKFRuHnoP}arlvc4YB68``h<+;4>}7Y%D;@c zhTb6WLbc(a;a#w@G3P+aXB2pig3{oZe@Qca{6D6g-5G}+=j}l2x+A;xL1*4+>R$4k z(YNBIt==;(UUXu>=D)Q4-@K@vu`$n1jd8H;WT5&$lBNSy3KM)UCH*Q#KXj%Hygz#^ z{dYt}n?+lzy}vKD&YRYB&^`;N)(bGrF)!atHHSshke@0v9DJ(SNT!yh2JNNG468n^ zIfHn^I=w3mPC4#ZCC9L|zp+*`PRig~ za#tKyULc~6F z)MGb0K<9t;0^1t(B)WvtY;3|6;+W|q;TuhuA^%{uD}@=`&F-kfxIN4>y9O zD~d0Gp1O`9Rt$fKrKOvjjag?GPkH$I&BUo&HBsv|>fRi^Sa#sn8FSaguQk!ab@T0R z(CJy5+8)06D5pJmb)32fu6cKatWCT0S5?P5>k)iHvqnOPDPVm*xry5?X6N)6*6LO{85rMV zge;4d#t%b6Au`D<0u(a2HI?$MOc%k^E6lQ1t`4q=lmN#@V!XKYP0j4r4KKyRoBpTnWR@nC_7^}o(dNJ?uBJ)0&3&j09<-8W3 zhzSt?M(LJRB4cX_hX>)4lD#i;gi7%_Nb0{>y&n`*;4RxhmxI3l4BvukU8V*pjTs!& z-9VG=waN0kyuTn;wmjs&cmFts6_dIAkcHy|a|{JS2pc{+uzVdvaRx~JvO!9oXx*7% ze$a(nE%=@ZFY;G6tq8F?xCIzit4sOq931sUBVsOQ4=>kMtoIoIt3|^Jbu5%YlJ|i72p1k#v?uAt_8_{?4x zxt781)zt)plK_LBpx3VY3@@u?VVfL9EKuP`NFoWKFfw8ifhBDl zFo8nEfAxkt&t(H#Iv5*9Kg2+>W!%^`zM3t&QEmB{;w4h4FHuObvX%8Skgj5y%OogJ z*VvJP(pe*pCA4hF&6>crg=4(cxj59TyXSnw@H}dqvujSDnKNkVC@gs_0OTGvd>F$G zWDF7XZ!X#5U8=A5iv2af-WuCU8$*j?=H4FdnwzBF>CIp62Jr4WuC3wD*jx4Cb!=|4 zc(>OpoZcK4l}Wrl<*m-I4r2g~;r`r%H-^Hp=r@KE$Zg?}u%eCp1@*M^s>X7Kvu zuIoc-*0`_tTT`{K3q7-E+jy^acQL%# zM;BJ`Y3d!DL&=?4M(|}TJ67Ht&jjm3AQ=|$Bkwj6U{9;uM%WW^9>uk3QVmq}!$5y#FScuO6(Q!oi~K zi<$q7+I?(M2e7Et)Di2F{b7jAR}%eh`^lE;M{==4X~9&aNaa)raJ3}Ik8jB=&JW=W0{wFa~4Ml#9uId)ydv1vM1sI;wpzk-13et@n zDe!&{1$@wi#r^6{rCSeQX%YH!6^^^U=e!>Gvy-;d(8s^gvLV*60k(UMT$;r_i0`1(Y7Gi?3%pXCw$mgt|3I*iw>$*#~J@Dmfdox_ZPFO?5) zR*W*2Nzw75_EId%W~F;(#Q^3{Hd}gz)>Qfq2%2gHQ39AU$g#GB=m119HX31+z(p52 zp!XKe2Xb>7p_3@6vE}foP^i=-g0(<3E%PD*m@joucSeR|RJ*i(KV%Wyy9U_a9^h#&of9M2uVRnP}W z1Yj5$Q^srS|Dtmp_s?-Ih<|JF|CohC_>u+vEs3>Tz*CtK(`{Ul0JF1{J9m>6o;bAq zU@2e|+RwCSxcKI_?TA9tk>>+_x>Z(SKOpGu6!_?{MAz9ca}2$0`>1VN)XdjwH8^eP zbsDC^d%J}{+O6!*t<%D8IrcZ_^PSGZz>=TBW>s7y`S3RP|HYTS+&&`OAe|K9Cnl%+ z;!cd&73uO3#TNSTG}Y05Na)TT7Yz&^8%LDjezdciG}1#PUruQ%8F^6&G=0{+;RzxbWCgOld`!c7#JDBf;77i78_>{z_5{(~9={CE!Cu;yT2}RA$5WmTY2{k@3?pEJ87r$`cyclpXfV{TvxQ*7`s)D$8G5Tw{+-+xEZ+F z*jjV6resINM#-ao{D!)@AA0)DUjoBcH#7sbBDrIyFvy&%eR9NECpH4pkK`H*NrLs? z2a(} z;-lfY5odUq8knnT+##zdN3HHF5tEeCp_aICgZ&;+%^%2;>Dz0D336-s$#=<~nvvPE zOtWQ(G?l7v#KElfV3W7ONgZdD^iZ_mMT25)_wS{V)syg;$T-)zTElVKAtAMgYu15T zlObJ$ZbtHqJ0KXQrZ3$)0jslM!GO??Yh(NeJm!^?aC&5GN%9)k{fQn3K|bd zNA39&PaNH9MsWAb(Lp>TLMY1{6O+3dJC=t}Dl{k!Cvb`3CP)h3g!fU8@UyN%JaL(f zW|-c)5c23o`Gu2?M5`@**Hsbi35&pK14d@C9oc6(m;%p0m!2*r(>NH-?}#0R30?8d zn?U=Rs)AfDT^GgJHzW439V&%l1*#Xg4BC?B0_!VnXK?SXFGnfEWPXg@NiyfH!!i9R zv09kDRZ)G$RaX9?RbmCGx(bi|oo&~xJm8wHtv!UAt}Q*($qx|deK*_U`nS#Nj=iYX zxcI2ry1RTwQjz2SY2UyuJS;=^^~AB~Se6_u&AAGWh2M1(a_Q-K5xahOQLimLuuE^v zKU|dDT)r=VY8M_Qxg{8heM6gk=ArgUja9iTskRxbBBH|KzQM^X!$8Vp^ZXmZ)q`-1 z;<*NWc>{xZSt1(S><7CxO$fIYy#7sHsWere2lE+kGoxml2JoTIqXU+KDT^4sd|}i)$f=Z28}}!lj3vb) zIVTQ9=xPvt&n=Sq?KOA_4~PZe`p_tL6v04?!+-{Q4_B0M}c_MsFbYI>#^K?-3f3!L|1eEXH z2;FY7F`E+{na$Ppsy&9x5wG3nm4Kf{l*y0u5YxOQ0pIDdmUD_cbV9;ZclW0awoYxW zjg;G7va=oC^(IyK;O(xeIYxhGL81nbz~uXt2D})n%62Sxs(_1Gy@AqIk*bFz626)G z>+&?6h)esslXM+4_kn;-R*YZ-|LNdW%-5Fznz&a&csJl(1OIxjEQ<52p zBXe6R*BWqNw4ja*eGKXoah#zk1$n~)7m)%o($nEd>A=OmAZlAdsg|_57O1WB?o8jY zMw^0i*zX@QR3RB+M=ljucXP@MoN4u9Q@waH$I_O|90rCDwn}Ws1?Sw5GD_fC(S8a0 z4ISK!HmKDLRc8T(O7@9TN=|x*n zLSGsNN(#leqYB(H$Zbmcs>=h>Hdbc;>?I+}RECkJ)|g6}hS&lXG*%}a$byJ6*6{tz z?cTvv3HOpJM<|1dRA~TR37bWm0KSm-FVlM?*;T?6)h<4gWF{E=k?Eff>2@2%Ul}ggGCX$;$QaI}yd$kCXdS__QcusYI?rocX}iFl z^xKr<%ajmp4Xn<4s~vD$@nplrLUaAQTViU!+%3MIs#_U8s5g$`A{h?U&x(qgomIWE zOiR|EC|`4-A1n?+|H1g-y0tv14rFjV>fq`{kmdLY5!_M^#9h8#*}d?H_V*-o}MqLswHYOHuDqVIg3$c||f!JMv4 zS?uo3iYhR~YL3udhUrpUtdu}94hVgNIdU}cVtgMBMSE7`ED;De6jePu$|zLbQ!JC+ z4Lr#u(hhPsagYMLs}~BUOIB{Bod0(LV5)+=QoNw#-$$nfplP`&O*c>D!_r}HZkU*w z57HC8aBOcVWk7i*WrP7Yb%~kpjC;8S$*8ftQio>Pi8=#6 z^Y`-$&RL(V3nhLXuK=ds1&R=+iQ7LM&$MmCP|~IrdRll@KUsV;T|tcA(Rk26;!B47 zlJce1MpMRxTd`9HU%T^3F@w0^!NERFkHgKBs{PI_TMtL*jjiI4tNqFf zy)Ahcu+37Artyf2#KEBkV@Y0r-&Oc9sj~xpW1LNzWWC|!-(0bH>0-A`gubgpjC^wY zOpC;=GUZ{=Q5)ukw2>T#^YPCNhwnuxW8qu9egx{{)t5y5^6SPNDkz$5_J>B26A@u< zSdMAEqZ9zv(_Izo1Nx@KPHJw-=3I+|HNxv3Zk#RfXaHyogU!L`B@Bxw;TC>*;V3Nw zRn~VSh&k=>BaN7vZEs|IM&cH|fcRhWvTQzZoXO@V#ycV*J0dA-lO2(hanF^fTx7hd$mCu|9c#%@!SX+v3+d(5#J)< z+S9v6qxY^)>ZZBvoqcFzQl{2hZL7K6q;rW*8*z(EqZD)RpE_i`F#ZPrZyvShZTB^@ zaCo=PJE6N?E8~|}_jN|FT{q2n1TuYA(tzj2w+1+UbfoK{sr*RiePaQC*JC61m}_iW zY@}&-%!x1}{nPk2t>*@^de_`kujR-T+vzru9-EcynWtu6Hk)V02lVI^7-=w%oo65P zf3~Y%JvCqL$EU*VKUBMEo)p5nX^OA9ucxZBCZ|Xkb@xpli0Gv!dg17)XS+eWr|Gu_ zru61)My76UlG0?yUG5uEW~ac+-TJ3mo%CMcut%qyxCdm`+4SXyMb_=w`Iw=(7kSOx z_PwTUV9m{%>@k?HUf=zd?bsNORvnvWE^eHfOBY~`Sfg3%=@}69GSQiYfQ3qyAw+yb8K@> zz7lef{6lK3i}PrJ_JN?$%RFS z+e$wmK=8#03b-LNYW(Ddh;+cM<_d6pfOSUp6j_!AK$nQ z$aMY0WQ@Sj*dF$N8I>Lh(lanak~2aU`WHdrLonmp6D^1pPDS$$;WD=&Sr1Kr{x>=) z@@+)M-R^x=%)-NNls_Uvu*DGUps=G`UuI+j()LSjA1lmCQH7hqg&i4t zx=&)_%##wf{(#8g5ySD^5U=sW~0d`wkp*|3Zw>sC_}( z_yM>6>f%u%0ZYM%p4z!3uzoF%yx{gLYpOQSiI zEnl-|D)-LY#4|}JRvTKmMA32e7esYh+?YsBcu1hvFfKvwY5YjBW*&|gX3qk>!9Cr)}dI%rsvXYEph3CD8X5~ zuUnQVCnz+}QeW4U)9LfbVI1w;I6sGpC8?fSyYSDk)zGy~v&L`QDlmEz%Q#Wsq+8GR z9HmqPZB#B)Z|Rp+FTU=ntNm17{ZrL&&1v+qR)^JW*{&PE90t@>GX?KXO|eT;Z=Bo` zZ^ZK6_m@-)^VOS}oNDJqST8m~ThHl{3xsr+CjCc`NQyRz>@ah$>y$hzfxai2X25=) zZFT~Of!6uA!#rOj*s4LjoJto0EmgXNo6QPyok+;Nj(2EZ+%#ZaYUI2ctaIMxh$Jg=k&Dtod2CC!PyFS zVio#GR;+U%xIsp-1#*(m!IK|cPCXb>qDfZb6UGqldhe7g>1+)yPP~fm?%|WneCu#Q zF*n;Xks2IrXK=<<%o!ZL9#*(t$mB;$r(F($4y#E4j&8YV1x<&=8Kp08+sdo5U9h%J zYm_94+}&k&eY<@Wv;(`<(|^mZ9f*z}_iUmz*U?C#-LJ2dWO$*EaoZ1@eZv;hZsbm#xQCnFv<_yMAVplH@?H3jG$xeWn%VgJ z4OXMX%#I{C%5^RZ-QI$BkAO6nTz$zwuFb?;vgs?}H>s$s zYno;oEe-Q8+oi8FN^^P}mBmCay_Ou&$pR|{zA6Pd;mH~Ai?G5hQt}#-)ge`KfYhXi z;J=J(zzp%$@qZIF;7saaoPE`fxdIZ9!EU))<$~=V1w?G+^8Ow7;4z6P|m`rbE zNd?~FF*>BvUV{v|-e<>*E^^0w3@&&u+~T{2*Df1sz}z)#-hnxIp6P}L<>daQGYO>Y zn7@fW$PUZC7{wy#Ls8xkKWSjlHp$=n1dRm$6xeY6yB=*fG#TI6nF zUIt|>YueY<*oVWj?S#3!-5lbhvrN|q^7z~E+`6X}>xwf84a)C=w(A!xAq`3Okqu1_ zM?;3@W;WDA(;$7ljE44D)eqFL%C)%4iscHX$)K^%N{SLbX%dnlj2qpMW|IdRgfPfa zIf9XqV2W0cdWJCneLA8fRZHCWl$Uv(~nDNOkIjujX-1=U{`yMh0J#nYE7dfEjb%R|56|67DQ~xwmv&MKs%=M zL;djRIxD2*)gR*2wxqE+B{13R{B{ScRKr~u+Zbw>!GIIe2S-zgSLSi8ildsgfb*ij z)a;Vo+u$Xiz>=OFglxJ4J6*QEkt$dHhHE_;`0cI7sCuL536zPED7Frlvjce3wH_>a zjiMtIM-(t+rjNhI9Nrt!*pdO6)@V;sj}XsIi=f*<_S8ijsMM|EM;0r}lFwD7;N(*1 zq8$I^uc554{bUF_DcY&S%k0=S;xIOoF@xgIl^Vp5od8OYNykAB1!NS@oAZ+Ll16Dz zpv`=n{!t5S@5@h5`Qu?~n)>rCVzp>Yt`nfm7Y7Dgd?22ZB1Vr4gEYxy%^L1WQz(T) z9q2jT`Ba;q@sG{+^+vHCr(56MTprg*e8K#>qpg%gEJQHlJueEnxHmn+UzA8G<;=;C}Q#5$nr3Bpsy8&~?O^u~RCu@Jo!8u=+9SYr*ZHZ= zWfC~oyL`mzbcdO_VGq5<#p&-f-+QiG>v=wTxztPgy4cO2Vc$ve;c=OZccJ(0srF8b zJx*gZV64;s29A0gY5Zf3Qs)Rnp+}u*O>^V=VZt`f<)d*-4hTagJ09{nU7N631l^9v zNAWP=JlM6bIHAK-K*p1QmF1pJ8WPQ?Zav{K_36NntqLiy(UQbQS+YuW$XfVq6|0eQ z?*2+H2^q4W=O8w&Vsu!Zy*R6ndr)HaR+kem1KI8Yxt@8=D>#IO`fg2@t`wgz)0XXX z}v?jq+%gI5Y}P19r;IvQEtO^%U0<9iX31&k2@foa)9vmJQypZjs}8UeOYWyFIrkKz z)|c0kGFVL6sjZhSz@eh??HWI*{|g^@MBPq1l~|Pu?mGkR0kEK1DUf?jk+NGvw0H@s z;iP+V{mk`Qk9*Kv+|0u0(T@AbIu=+Z53TunV-TOb9F=ZsWRymg1Evt)s>foB%8SmxxMkfLB;UwZbns_S?FvJeJvL zS)9?@S{g^Ib5aw>s(lj?ZmFDO?Haiu4(jdR)n~AYT-u0d@N&;{eLwEV?73!W2AS7RzRWg5 zlFZG9mFSZ$CFN0AoyVz9)-%D8&JSPi1GqNy?0*19K)AnC(p}t>wQW}O@_4CyDBre> zw&z|hH#f)McV%CH+jNnqqQLxY7|&M#&iN`48iGGcRqX1GU94V)z@AOVF#M$P62dAv z16M_$Q))O?CW8Mi(vmCKmFLN0TS}adiV;aA3Xe&{=+|tbgOiP8QlI(MpuS`|O>3|& zppgm;9%E0xLaZk!kqaQzor%_dvr6p6nedHMW{yF&#javMc3q6-B+I^p(zib0(7)f0 z`Tx@jng8}y!*K_JQx%h3?2kJ1`{TsqMjF@~j`)W0K=%cP5P>gEMA#C=+ z^Y|ieXbz^SAnOxAvhxajg!$Y%R8%HL zU6&56R0-T&?FqPRuHX(YlG{vV9{wwY_Z!t|0u}J_jkKJ$#5E;{>{!j_Y{FIB&2!M{`*1jxl7z=qJHikj z`dM&zf+o44F5qO8#RnHNVy=W)dN&5#Lic^tVIl+LC}_soS#53llr9%DK>JcKS}7a~ zn;8aIYjJdAk$gtpG|;%FtjG5pn&|U6! z78)el_m}ZfdlU0x-N0ngrS$_lHR{jMUF6y${_wW&>0ji`t2c3dSSg$=qf3)Cu0>Ey4~w5-z-4!xFZH~>W}4nQ^e@|UIe z;mf)CZhZM6iyyqWN3DGMaS5N^-Q)gzNfdmE$*jSbrVAc|FEQvRZR{cQ#gHX@-g+o} z>AR$UzTldo)5hp)uw0Y(V<73Qh*kz`ln-zL>rfv4ga%tp27)YoqA0JKxL6Ja9Ysf8%wMO*t_Llmgp0+t zPH@GcqI6kiWHJosy3=@Lp@{{`*f z!7z8an<$@KA|2*Ni|v!JcNAM{?Hd3LX(g3eVICC^hl5%`C*4lX$$(Q)1dQj9NT0ca zi0LQ?x(EipkqOtmfF^xFq%{gY5v@wX{rz}SM^{)Ea^Ct@R1V(VvK&wZH?_jM(W~Ew zy!x{T0h&rztGM3!!{cJB!3`H*X4iv@v1=F??`FB;qJMTBxR{#lii;O}GcHzVd5w#e zb6s%pd4@YKaNpL6i%VS@7o+DXxVSuzadD$-owx{{r{H4yJb{bCt_m(TcdZi_n-pB2 z|ApJBw>@wX{88<=xH`W+T%;|2FkDPA0WPkr^29}G;}~T?jrkI-sBV@jkt>j%^safL zwDxs@h5j(liwP;tLM*^*usjl17mB5g1cz|{@l>r=0Aj@|21Gn4RSH%G)b`9`Fr*B1 z0m8dHDBC+Y^nZGTQLrpiF{_^wEQmyl0tm=VknU{}!HeS!mL+0nmIp{26bBaJP2&X* z!u=D72@?kZ6Fyz(x)0+m4?d1?<3Zs9Z+Ku7JQzA(=0W!bweTQ$fx?65dNL0d&99#a za}PcY55^mT2WM6g56E6@EMBQexK=^y2ci7bQf|0-X20ZTa3&qd<%)Nts>E1eDV0W^ z0HhAa7bzA>hRM;OD1L&R@{i_; zCrogEGBlFv>HDoN_4hsQq(|RZM{?hvuBY#}=hol%D_M2-9Xf5mP6z5@e!ivacO5(K zr;BmWLY{Aue>@z#D+S7r@LaMVK0+^cVQoKE1$GCLghtCH|Eus2dh7=Nq_|upKSjVH z5DEL;)b+~;;s!U`0fH_r696ycrGuSoB{?8eSW%f!9bLF^YXUT4QfLJ9EHn<=e z%eiQ-ft$+O2-8Z5tvmGbKpez41e z@!<}qReLalt=eN$7vs7}ZOwrOzlVzVnGrqwwcB87Y{ody*fMxh?XaovI&#LnhMb#C z?=YM#OSeMl9aoL7tq^ft8ZE!Q9JIy?uT!nTath^~2V;-@bsWo{nT?AY*!oT3t%C)H ztI^3j#YU?gd@KwryX%bAWNN2KqMe~+SsVSw<2|Gzu7)j5)}7;+<%n|tJ2eRgusw@m z0K++RvD>-5lV~c6kq1)NVjkyHuTg+(ua9_PGM>NnE7E~gpa(w8!=PTX@OMXlS|c!q zRN>EnZ-G7E;?I6lDZwe)>(8c;t7JavUX$Ctl74vN;_|{ncrkQ+v6J3Ya3iO`qz?!~;13#i= zGnOy)cX!%;@hQF65N>dsLb!q9js$K{K|6#kA$ z7o_!_c%S^^4~Ys4SQq7O?RC@D!f7i`^ECC`bO(Doc^Yz#QZX2@$oRJ;YmOTeb;q@j z5C@E{TTD}{oM#vhc-$zCSHZ>jX&pRW9N_p6xaKnO+*#*2lt(xm-Yiu=7pQg9u%!8v zUva@WF$~plFm%z{#{rnx31&wH@Xx0sH0|?hg+u>)1lx9~4=Cw((%D)}4*{D~$2Mn3 zY&sT;!7&l5Lq&@PXZ9kC)ijua{(|FG@@mOAV7ZhUf-BeR>^6lJ)psh<-N8cDahHN3 zIrQlf^7hnfVS?^YcV2Hf=FksQ`eiJ!jdsw`6EX;HuCVi?(OURbHX&ZEvj5H6Lk+#7 zT?gta^;24uA}@zur{(Y<>vW#^9_&cwE!r+6cTPL>KZSFT%uir%UWVRSjyv=h!iD8a z!Zp6qpqWT!-${z1FTZCGbZJ zN<{+jg-H|~d07p14oY!j}d z0Y*rmiFme??he9VK6wb$bqik8TjjS_#BcRZ-zV|yP*TT zw8CeE`?tcu6dKd-oy?W-ps^~xyA;|w4lTf+MY?`pN>NRfy90`_4>)XGo5p=G0Bz9+ z8*k$sqn*4%Qs*aBDkiAV!@+v%G$%a_$DyKbMbhjRbag~fVJj2i^3n{g zxhut!B2~FG_&)6|>U$(^p{i4wM4a|4%;;zCi;=Ri5QEZW??r1<9o+#F^TPj?Zo!Ly zBHYQp7L%KenLZ##ATWJ`rh3XCa>np;po7jI;TyJv=4jLxwqWF)v8iK;oqe)I*c&Q_ z8=;nq{wD~z>!8pSr=WV6U}}J+95qqYqp3M^%)k#OBM(8Q8%$cNe$3!cI0+}94=9D} zH94Z_`vG=!jFd-qfW1EkBl9qkUKN*z+h?vKgXge*B7|S=%7T~v&<4o4o0Uj zR>Nof)M!NWaW*ZCS^#*e2CJ*UpK7=k(5j(IOp=)*b-Weem;T7+jUisc`|x59pE+Tq)2yFtvo!Vo+lbV}Zwo-jGxrrfI_oVJ#1 zYSd=QrasG2Y|8Qo^}tVCJ+`T?!ltfl{v~WGFvlaC8nIQjsbO24YzlN(Yc}<#&CS@< z``a4Xl-DNrHkB6XVpHq3IoVY9CfTOOMYh$Zex^Albt*QTOe-Hs*6~~1*iwv$1Y4Q(4eK9)h%cq8afQ&c7g*VU&@eeLb2Vn7 zSmMD9%V`#-${xpm#sz^c+#(mA2y<8=-Sr6O#nPS~ENcipx>G7vxn1qNSL#j5q3}uX zCN)lNJS!rO|Q`ez9@FRmxt_=Xd1K40ev zZmx#8!Ofm9ft$&z?h!W=9&9siKJ;peo8(RRh8w}*sp}*TugVl0&O82tG1T!xH%c9k zw2U+^*2DU56kOee&f!zr{6TOI-J_#?j~AToks$6-_)N3+0DsoPJ#G$#SkZyUo%S<= zfk$HCuuMpMJLg)E2hbklvWp}lZRJ|R{soi>kgM|XXnL^W@`KszPSe8H)fb_b*NKcF z=E3mdko2_Q@GIF0Xl59@?Ue?H!v>q|0FzpI`+pZLxb-`vT+3`7goJdM{RlO_j8-gs zCoM(W&-!7>-em34joyO$OsDwOF}Dh!QYrF8s@a1LL{nG-G2cE;6M7ya{QL{2Aa27| z*6&9l@L*3kajG~1=2yBF#>F9MmW9%OI3DmA2b7YVKwH{VKU{WMcL?qC(lcT&PfcPO z=IGwEP}6@Wb<0Uvw<&t<3I5iOnr#MZwkW_(mq_ZO-1Tx7CAPzivMVwC>L@dU9uyJM z&>GtFX}Eb9yg17(t5;lJHWug=#<`zusF8Pi+HbqB9c3AV|G^!vt1VrzrJ-$c20sU? zFGKGaWLV7U{8*hh4x14jhyTSNjj@2eF-$3W3a_1S4(5*sKCN&#qyv~|5JR;d@$s;x z3M;Kf?OAABtP*N+sRh?dj<^K#8F6{fN|*gwoS4sGV{Y46Dry^cNt0vurSzwIO-`9F z>D9BET&U}+*;-CrzMtJ0>MgdG3azmOQ6E#Wr)K7 zst2A(>kU5PH$x;W6tdhQ*aqeH`&g#@@lgp#P5_!Wg!O0M!}FOf9%QL4~osEfRZ^?)*WUoZv03PG z@^PO=J`e>##eeGfb1S{QcECSm2=tMXoh9D9}!_~>U#Aa6mJAO6>Bi8{HwLf?pq zUsVR;w(0khxmT8fArfSt}Z)VSwTd;k9&A zS)V+89sGtavl(rYULmwewGaf6$CgXlq_4SmTwZ*pi_tOACB=TSF4?8%lC6p^c?WgL zA0%C}V1UpiU>=qSP-^G~$mz=PpjK8HETqYDKZpt?45PTfjG7p$L+ z-5I}|7Abc&?`ChLL-Z}OJ`Ls-bSvQtctCtBpKVSjuLpIp^^il1%V6_u-inkc6jGw! zQ=UkP_}y-mMEpvjB2v@bsfZcDii(H{mQ}>K;FeTGpI})o zL%1X!-s|ry9*pMMc*gPmin)B+UlI?qRLu-1)mc!gimcHUVyZ0rZZ zftwzQ23Wjb4t6E$g$29W#Px3F!lyzm?B|WcM)xDR@YAB^PzE1TL+p-$2>O@594F_muxpE`v*X4 z>BWlrq8+ej@c$Yp)Ng~F3Qe-=6Ih%0c3)b~RJ)AEewuAiOaehP=Umy62EFb4D;=sE_J&W-dD=S~d~6X?bU90J;h zL555ELbT0tm}+0Vkdis7`d~vq?i4%Ohklf>(PoQ~E~a>z9w0h)879jW&?-z?bqU=U z6B0z7l^jByb$qGZSpm|@xzV^>61!^cGO4TBIA6J&tP4apP46SRi5lCbn|{Z?Wc#a2 zHwE>fZaULj>86({nIW>%GSD7{>~?SJs)VPht1MG!!6c?lVTuUlN^V?ELtH$nNj|0= z!jW+*1YBn?_mX^^)Y$$Dl*X=~PmR5!x6;@PdN*n8DA$sddSxxzc1gNnnmPn;Ynm6) zi(dg5_U!;__v9sVyMHJq?2dC+a|x`u7E|j|_|sX2@MoI)lpMd@pF3Wc5X& zCC2}q!41zDX|-PF9>_l*^rGm8s+eEN=1=P%N0f+QhSd&|JnV>NC^+&Xl3ug2_53t8 zsu#6BUT6t_V=E#j3Ob_>zlf0x@POB!#AeiN+bcrgRyIc5TDuQ(t=&iRKE;oQpn|^ z;AiOKKla4Dn|xwB*gnNRnmylBGG!~<+EZdu*v6%rnR_ajnAuZ~7@2_f3dE;9>?-FyHL;t5{l1-8zEF2KusL4=pVi)6fP^_I4e7kH_`Uq1Xv zXS|dvc==Ng#Ea_Bi{%Lg!pk|rOLucOWR( zbC`b*SqSB`U(r=ehj0UoXu^iY%FQk^6giwEXGaft8UAYLJL#NB~LHAXzD<#HQrcwy?t5X=o=6>t^s_^*_J_m~$m1 z07-5?0*TK886-9_CbO+6gPyy#8Zr}cuKys1%~!NvT6^<-I=pk}Vcbm!#tSCsn_NtE z*!md_$oaTE#5@39fk}&VD472@m*g$f)i1EE6C0Ktg)PMp2qU$mW>tyv5kY%S0qOFW*zp{hLzB{@3EfnOLg zSvp#iSWJ9mIL0sCz~7&Wp%UpOX$h1ETgZyKqI;{Rk(B;>c*t2Pa<(-^J__Jk(pX2& zP<|!umxgqQR(}u;LLWmb8be^hTuMP$-q*C^37ZemD!S${2<6jyH?K{4K zx9`Xc*wl~DA*8}uX+z-n6fu5F>moY@wy=xhC}ww|x(vJtE>h``v;5yBT8?#0Bmtj? zUvLWk+<-sD@CSyaCo!|5lGauuncRdwd`xD7eTc&F+YCN}XqVyknof@Wo9nTNN!CJ# z!;uG9>bwD$ZU=~8DHS<+h^o#Q;3x#lgZOO{@pa&)6dH9x6GABnK2i<63_Xe74AzVr z>ba!Y@g%bHMOa9zqq1QTrNQcMunC>2lxmjY8}TXksimUMhmQ$Qhu?VvAcz&t#1~R~0BvyKexoRa5fB(VN6Ozo>pq_qZfJFsO&?@hYX!WQ=$QW%TfGuT7B;qA6@oApopT#CIz?$=r2R-!k7l+DY);{Mqt3 z-gEAr?+)Km`0lFO4d4CQaF2Yq=$68Fuyhp+?`pCezME*c8@`KBD||OdE%**9B1(K$ zKi!G%j?HQV-<|E)lRlQrjo2YM*@4D)p_-?eZHNN{k*_rPq=-a|~ zhvJ*?-8&uJ@?EDSneU!TbmqJGB*AxC9TdK^bWr$iX@`G5zT1`L5#PN!rxm_qNiFeR zmth%$wd%e0OS=%y;@kh3`%$HRijiNiFi-=SjqO(~^nr`Xo2ccP}S3 z!*~Cc)QIoK&uoG3wkNsr-785R^W6`kskH}YIq_Xi(*H5vCB54$-_;L-%JEM%;=6qJ zd?(dB2PhKXl_1|KwToOc!!1icu|t^fH@d1HNjuzzCbLep(nb0R6M2q6Zrl~h(Xe}7E*lSpcLG; zzd{E(n%fI{vD!P+O^Ck1lwr5|5r;LozeU=Xc7tH~?G(oOsGW#@y_GC8j(W!wVjNsW z$h})4uI1xwn0NvSW~@XomUarkaKpZTh7-2{jbJ@^Aeci)CHR9Md<@TNz_L$|`JlTm zg#|_E?$5^xxWb!rfbBqNZ4{!vK0X){%lQ~NNQzerEQ%GT8Eob1*A$1lE?b?UK{?-4i%KcAdpNwnBE z`!N4q^|NSl`=B!kERsp!P5P1ixp|AoBoIn7R$?uJ2EMEKRM)N@8-~c*pX!Ju*W5t+ceML zQ^(g9mpAPla(O45T;2(CdG`(AV47}ns!{Q8>hoUmA)i+r*Q(Epy4JGKo9ZL`ydeqy za-a8=w_>mP-ood-|Cr+QUW)P1=Vf`j_&n`24}9L(_%`}HXm5{w-nE&s&-->J`n=Ma zZhYRU8I673cM~~G|J%#0&r?ln;`6q9DQ&pHOYwQBUc%=+AJ>x4>pJD`eV+f!U&iNs zIm1(*w_uvH&wH61nflXdvd;^NC!hEFWb%1`izAm7H@O*~*OfpK_}Y}FKF=|!kG4jY;^|`n>6HHtX}c1#&Ru7$l$fxU#~9iOf0LJW#43UE8=0CAz;SAhMCV zU6Zqj7W7qUa#HA3r6%WbdUeR+kJGfF1z23ta-ZZ4a8}ULftI4w;&>S6rqj%+ST_~+ z0Y@Z%amUNc*2_oiP#9$HhoA1bLkk;osa%-q$w@GS?v2G}AQ}KOWJ9lDFn~6yHB_*4 zJV%d_hW3+b`CvX3t>pKcZjJ|cR7k5Si|nuCeWLErjo|@d6F{nEgNKSQJ7^OdQN_}r z{&*6NJ8es;m{x-(*M_d`isafVZe#Cb6-xH~ZIR*qmJZK5iOwx`5;?bM@{#%6X?z@y z9;On|_Ua>O%$h{)?s#+tC20l`gfUpz^S8+}(I&_kN6?XZ9&w5y@Jl`!PhlL)w*W7} z4L2d4+H&+GI{Yt|8-l{*M+$f4Kx>wSy)YaLii44Dgm6o_G;@uH_pke|}_oq^!`Ix_KFYQfWX>K&9?hR_EnKfjp2n*-_xJJ`B@WM*Yt3VCV601 zEG5Z6%6k;8k*wDJFLPHO-}G_i?HCzz{PL3lfn_5f!6w=UCvL&S#4)wN4l+qAh@%u!I3rYWC=0BtTlAx)bDwmHiZ+mdf^9Ke7vhan;Sv?9b9C)noD z&UPE0C3$``^P73|=DqLCym=4&>w_H|jL3f6G-JaJ=4~x#^T&r(HUe~j z6@v4uXg(sncL@f%NxuEEOSNXJAQZ$#eEVsfD!Kzx(32yie`NuN#uIy}--13k7)m{a zW`6b-fT@a&L(O+I+t;Hf4Lj@~+mH(}uKO>dxy1x*soi6N4gZ0-R~;ECqxyd>c^J9! z>F;RC;AP$abzBKLHQAFJ$>Yg`J@26C2)sI#OZ(hAvDm3LG7Wq)m$uJgPpygk(A#xU zQ~2(^Zp{Y`yWiPN|*2ws{PY%-xTZ)5%shw|GkD z$W4%^)4#RR#Vtz1`2gjmpJU6&So$Ogwh{}qrVFIo?O|R64ZH{^AlOo{tYIx=ukrl? zoenr5Zb75C>Pdd}>=a+mwD{YAyc6#rFDe=N7y)lPx&s6W7{IMYHZESogTM*6<;tkl z44dM)z)u$;XE$w1I?&B{PP!#haX}8MU_TVChL626n*NNU8O4Gn*lZ8}$Pi~k7-ZUY zW7wb(YcX6VM$Ty*||rDw5fXqllGO7i&X!V0`FZDJ&Cvrp+l z3@);4L#*Poql~msONsUU;>H(6b4v0kMPskQ|sdo!+1 zaO8RHbXYqv7!SkCw3MDm53S#qO|5r;Gc7wvdD~fB+Q-Uy>_AcQ;|7H<;Z{0`t?sfJ zop>)S3(|2@-H_fPISUDm+>P60m)OK1;B)BS)8(u|0-r8 z6c?%e;f=7?ff}&Ut^pf<8EmlMorE*;!N?G%3IL9Wy2-Tv@MHyW>=;NFU%Cl!^b+9s z1_1}Um$#6WVU-qPK*Cm{o0{+nRC7^Wc|BQ~-qKzQ2gF`}y@M`;A++y6Xx}#n+>jZP zi=%u&fgKi#xUrXT2OCWw=V2I0xLW`o{cRU5JTL`}6&{%Oa9k+9kIcLjl6p01vM;?Fa)NZ|RikL3GUn(#>twLXWwcRv_u6#FkL(0Xlw{ z6y3tH2gEc1Y1BhKa()ZgBRA}upqVvbR6&*oVu@hkp>utA2H}rVjz7NT_+y_Ryx*EW z$l>?{IZ4JJ7D5#O8*J4Ue)*{>AKuK`wwLe+u3^-S02Z7%?_@>F7rSaX_y9TQXhHBn zH`stt!hw%q?BTt3?9r30#U7369DCIGFONNLm>7&bywV$f8G9fUS?5>4M~$?`FOOQa z^o;)+!N->D%YcthvcrLo<(ZcOAG<=phiCUSfR9T}R|FruLEyta{_4R8nS_AFdw1DP z4e~&MVc3#EMjSN+J-(Ci2O12-QdZi0J%a!w1ba9TdpsC~J%q+g6?+^+{E>&y!g&;@ z446e&#UjJr+Yl(gC!1Y_Wq-)!Qt9}bNgNT|)mF}jq{XA+pZjIZITL|9Z?l|~+ z1^UYPW6dPqVd+bW@P}=DSo~3+8OhyNIEDPoApC*SyHkrl3MUf&7(G$NAN8`z0I?=) z$Rzl%EmOfCjavMXb8G_n*`g9M{%Di|OwL`rg4LOXKmLavqoo7GAA>Upf1FbANBaZ? ze<0W^&gA%G9jCcR#vfI3fBWE%qu(R`K)AIhbz5V^*z2USuOcGGSCr4l3RqW9O=f>)_LpIG z>)sm61mEf?_Uj}3H8t)sbzeLiuI_~X>VD=%x$fs~ykgz2hOPV46aCaZCrz&Vk+i;b zgZ1H(3_AYNFT8^dsU8H5B@TYYuwT>T*qB?uiSl$kumUnh45Y!w^q8QuNcK0_Zvl}9 zA$!Qghb7tvYYHgySgBT%0&jy?Ix9WcwnMkbZF@{udE@oIw7uBi_X zeGh$5Q+N|KDbR9gzTD5Qn|VJ)@FgFi^fg22Ylfy+P#^vw{V)3`^@ z0mD-nWJSpX)xM24V;!c4ek9vXU~)9xZia(@w1OIDXO_=HgEb>h(!FzVHZj50j2k(tc_(CN0NiISfP5$qRUI zz{kSgbV^^F9-Lk%r;iIxm+Sd5S?M}bXmPqsA$(oBQk_lrr*xvsR!XZ4P5)g;dI3)# z6p~&!j?#_el=P-x0$Z>#_JjT?v8at-t@eb!U0vup1ZZGZd10zKP~g5hnIQpJWr6F! z>cZ4lKG7b_vXYS|yCqy;o(~fSwj0d50U$4YN{GeO4F_ge3sr!*vxs@8sgeW%e+`kH- zi_#+S6TMSl^_8;Ir0?Md&@m;2VcatOo9w6rAl8nH-GST$Sji6s~?A zv)1N64xNNX!cxlNSrJ$~!9un-us*g|1UxPeEqBpC2<*Hc~>QTHwzM|ok8rmWvT(uZcrKlbk4iFqt`nV0(sU3yxna_f^nALONg@M>%f_;#C zwpiUW--RXULm$HDJ)eTefF)>bJ{%%~Plni;aCVZv1zgNLD-Fv7>*05vPoACFI;m@Z0P-7^a!!h)YWMepcf;xufr?&~D} zWI|%dle=%=Pkx=OeF7=A6sjM$KPvc3JqbMj?lH1P#Pp5y%0{R&fG=?fkIPl^^mkJz zz3c`#U2lpaS4<7mWyc=Wc#JY_ zGqVbu>ZcHV(!De`$fE%1cA{(=l*`Z_@x8l~ZGA zpz6hC{La`J3)!D!zJ$GWWE#hi8zO)-AMc}L?y-?_m~Ml8tb%D0<8hA#_WKl2^GSRk zP(vO|9ERVHR?)@~lafui(6eravO;^e_qzY(A^<;wD^&(CFziCMIO_G|wJYLN|AeUeJ5TN1;Q3etVe zw8@+{I5Cp!!wXXB9Of9=03Slk3rEVjfh0$ST9OS8Sw?-v<;+0t=MDv3Z!#B9AiQ=Z z$P&mQJi*1BSAHs2^HB=Tlcgk99Xjd{n_nq%X`9YP)p}*Qm7UC|0}3k(-Osb>@=V~h zNqTj+ywT5ISmD2&y}ud`AKquB%HpMEL61Sp=r*!JSpp`*NmZhZE1WMz2+am z^Y@Mp(rX@rs@HCeBfeTESIN`&(|i(MR=7%Ux+y-qUW+nP*EbF$<{Fa_q}Mj5X!Y7S z&}U%jy_7qdS<1b{tS9d?Yw?GaG$vSm&1?=;U+JcP)z?94Q0AP`s`~2vjEXfSsp@M( zlA^xm4ezVI&JXXSzFrtXU2Qg~s;O|Ks=i(rsj4sUNHxz#abeZh6XZ9evs;{Y|`&D1_N&V(@#H#|iy@>?!=0sH>*Ci?f`47Vs^>v$u zNU}_x%)79_&_^bZA!3=v8&sKjZlo$>hs3Ee_8WsLV;>!<<`2ZF`Cm7x`D-=#7mZN! zzid=x^2WIEGWqqmQ1x|J!e!N0l~JYEwp~Q6XG|)!UNb4wnx^kxeGQ@cVJPoQD%)@( zl}*zXuZC2)8d7Z^p>}<}CjSKusWLRA@(x$?zcNhCzc)@LRq?3sq=Ixsea+wws&s2i z{i!c2i3=enAy|FsiSGn!f<}F1$7|Hr^|BmfxwggkqrQ3w?g^D6x%#61u=y3Qs;`~K zVDpLT=FHd71^>rv-QD1`+6!o><9dwIGEmP}xUfvb z%W&%R^4_RLg^=m}5j>vZx{4LMOH6Rht4J5aM2GZQ16~QL(H4p<$trMt4*vbD(m5Y2 zsFi1xm)M(lEW>*D^Z+hbxyOqpR=a3!ay2ZkSbV)w#&_&%O%1*=rvd?rH+?ypr0pS^ z{K0q?G$}TQ(&YIOS3#3khlim_7wl>Mt^V>f`9k`?(&TriwKO^DN18zF6!f$vF-3YW zxn>m3Go~-igjN(l(9mJtOzOD)rW(FGk%;)t&q5 z*X1O9s@ZS3m<@|L(w2z$4Td6czP<?<;2oGiN=oEpXbQlGk5SMiUnObWjD9UxTxO@?52i@=EI9DwDD{@^8M0GFH z9g2gfntP3|s1K7#iD}6Wf_iq@)b7R@m82%r`9?kiRkgu}|^K46^x)MD8y^*M`2wc*t}rT>0+qgpP(3;_ALK8F7jCXrU%0fI zZdd{gWiS%MTKDTjc0R;_6IMS;;e(3{U5!?^;b0V#K<_6cW$oZ^yL=d09f_%P#^ZAQc z!{@!8U_Nh;?Zf9=qd1>$Z;|zvEjJ=;!rKOe=}6&_8~)w+i&!!%ojy<8H$EoiQDI29k0e0q-S^ zL|*O5<$Vienkvw;(Sj%U4lurpjpXKU6}+IcnF_%I`1N5lKU#U@bXG1#oUMBfhGZQa zjK{*!zOX4uR>sRK#1;1i2o6x_Y@{1>Vst7an-vgE`gZgS1#rC$K++d0>PrT&4tdB5 z%#z()I(DjiXaoQebBiUwFvls>%!GgN-uSV+?5c$a8EF(-D~gTILc@|bZAKgf!o%#6PZ0;2wfIWcGj0Ygke0n`aDOd7*c@bj#B_N+%Tid5X+e;$l zwx9SE+g|f2wS6!7oGZgf@E>v^RyICN+rzi35Zbjafww(;|6bS+aV@Fsz~3E%;GI6U zp2~B>V=u#%J^d8`wV?~c8ccXtQ>nEgSrg=HTV`$(D?KRSYz^6R1i0bMQ!fskx;x9 z`3f%47X9B?(Y z3bsq~m)DESH${;8twglZJ%rqh7gy8H96zi|B5lLn@;}OIhLCQx0mFM<)F5EvU!*JN zZ>#`U<2SP!v`)$^6j$Tp1aeZo*c+jQc0=jkhHjINu%Aa^&tomq+WsN%!Zqlh>gb9# zxIkXxe@vE?2TG^oCwaHLV{CCqSo zuy(l`UJKsq;bSlhN?c&HbG4$<$FM|aa7b3QMG)Pqp?9lxMIhZ5&na~00vxn5#4<~7 zbzAGCF9i$~fR-7oo8XxL5k}SYp^*qglF3*^`fq_d=NIice;R;<;X6jA`*oaRIW{6C zQLOfAD7IfTpAS#5nk!H&kD!Wf`yegFay6DIj8~Zijj^s?{J-oydt8)N+5<8~$~fa? z(o8q{0>kw~Ef=db68b=g0%Z%986~9l%e8WoRT!-=%|uXqoyvZ(NxA8!YnRP0t-NFx zxhR9ETtr1t!y9;pK>|#z?LY*iHA^0mB~=l|c?m zRZ=M%gwE4RuvVl68d2VZte@_xuWQIo)j7A_xXy%5=HHjHE@^{17 z^Urj={77pRziu_xJM_r~gymsBXT4MY%2aq|w|x@rkTQ?G{Gw!)RpuiL9eIF_(7pfU zr9!VXA3kK=_!?MScG(6&|A}7wjYlj`dGVJXBla{MA78a#N~FbS2+@GcNmgu0UAf}`vNKjR;!t!R@a1|6N9_S_0`zGmS1>Y#J%5ArZUVi1* zGriP%b*Gn6c|K<78O0@uMK$O3UZRTFu#3s1$Bv9ZJTX31n!+EvFNW0-o9^=6JTA*hyVk3BujAFXR z+9KMCZCJqx#<0^AJ+z0 z6#`(a$0%nMSh>_6T*dzEGtm?1R+|NxoHNQCvPx0C*2|0D{s1Q~L-s7l5}Ty&aA5uf z3NPg+;Cen*jAezWAz@_;`BB^+1e_7h&xCXCfsy)pi;)IE51$5ygWWG)ZJ%Ti53^a9 zc>A5Ao$__4*JH+yB=xtjKfm%NsYB;o#s9-Tq7+RSi=Ai4yp-mh{V3L#0N1TWDram@ zUbOULK(sVBxQp`^mZ88{9)6%LFu7`B(3!TanZNTwU)11e)^)r63EV!|SLt(ivo!dI z_w{M8^9vUZdU;dZO9R|!@HU?wXt0z|6tjMDJsSM`7anNvVL^j)E_$Frlc2!`y___7 z`ht=MpZvl_gEM={H28g|iw1{u5)B4_p`^h=L4)528XW3Sd7we_6?Yn((dkZu9V(dy zfo_Nfm&+p|4bB7_+_+iLV4O^YSce{ovE~Bruwt_tF%BZSQ;)vPJEU%=i~7q%j6*Kp zLShU`>`HAf{>b0?7k?r~-)^^u!tHtfP9I*`$0E3%RxKolnidecP(r83Ly)|A5YR&I zCftZ>6T>!h_O}J~lI!O1uRfS^JyyYQ|6Y`4Ndx$UrLI|Hp<#`Pe9Tn4~-`1`YWtr}TX&L@G zp0^lm>?27fh50_z4|ZT~NU%R)s*e5I#e0Rh=MjfWj>XW#Indk*nz2ICjHxeTJ`|Ha zj3!LB+$=^Zk?|dkMD5_Z<3vY=-Boa=H$~>$?Dw=|~&bgl&10J8bjcSHM(vrxFd8S%F`RFav|6UdcD8 zhOvA*6g44QY6Cnh`YVjz%G_hnMpYOru{A6w5`ofdY|ws;w^#CZ2U{T8L!sRgyOYg= z1~%?0F9mi#ZyJ{1jOFWdh=)B!e6WB!fFYl)HCep zU|uwe1D|>oM@iVpFFs-yCL<*LSJ1CHrXKM8WJ-5D^fN3_Q>0r3)O zV86d2<7r?5&*@7QcveqEJSS`uPyH~}h3EHIWIVr`Bjf2U<9X*&1)lFrRp8mjiD%dd z&v@of?v7{3Id?pFKSub3opZ-?qiBEboPc;WG_V<+GM*{t1fKJ<6nM6n5YNP|;;Bm} z7oNZEl=0m6XBp2S=LDYjXDRUf%%s5c%UBuD>|oD$9*ymeXH=a#o+ly*pM*MhJa>!s z1$6@Aub_ctU6%2jUnlTXib4+(UpRRSs^I|yR^QT&OJWq)BoLT|#K4@Tn>5%b!t5)Fo(Gmro|A7$d zmwqLl8g6vq`9g<`XXk7g&&O*8p3f{%;CVbzf#-PV_dGw`GoH75z%%`q-oqk3JsQ?UC4;eCHyiq?udYq_oc8DMEsPBL)J@obN+V`Ot zTvz&cpiZS*|LSi={|fj1jqRTMf7-QwFq`d!h4(qYm!XOI*rx#0-3CK8?31K&R_?%V z^vu0iyFAnXde0a!{L+ItF$K(tDTaIVF(eaginG9`cqi>P!%XM-Myd~QxoTEx6Z7og ztQG20i%u2hp)6?~`~nqJDHi;_2M&Md`7`!G%(xCWFHNZb^Fu$)^IbII#zKS@CTRZH z14{d%MU!}(ugrB<#XR0D{3gnM7?#`M0shFdGW@GQ^Z1H z_vX;rJ`kPMFEq#rTW{lob+!nL7Hq}eEo9&S4LMM*gp4(gn^@6FhhsTptGT+U*-N{% zJ&^yiR~y+*>65`XGzqe1wowk_A?bs|wOg$jOf|IAxbn8uH9#*&P^;DS?X?~UH$N%?fH z*PP*7H1ln6e4(?KW(Z~$%V$H^z{%~Uy|V5eVF3j`hiA+*u_reOuiG(Xc|_-#b_hEB z>?1-tu+eFWosqdGy*Etwx13Y$Ml3t-orc!S;WnWgyA5JdRrc{1%^oXZp>U%dx2k@S z!^+-H#K_zhEUKqdzgSNyJvRRXoYW$h{|h?3TnA1c0q3jdyTduK8=MQ76HYh?_XOvg zd^^Y*-Qg^>2{_CD+7q1kjZ>rw05lE{UE1J^Xz2($H-Ug$Qv+&7PKRKR0L};oCEPO7 z8v=RqB`Mfor6vd>rjuKmFA((#Y=s2TBEWbk4Ef&xYM26~II=C@csxJ7O$LMugxqg1 zoOtoq;2GP>g>sL0)d;+-Km1F0tycGp*9yK~& zDd?&h>3_YTf4h^eX2W&-38bK+c_z=K6VKOsHpqldlP!>vjz6>P9ElT_^FBEnwen78 za9P)EA?gQCZlC81>R&rJ7OJBfX!&#_uxr8FxlZVR>fPa8fBf3;u0IPl3ZommN9qK; zweJzUlbrCXdw@5`?g?Jmrja@r)~Gmz9YY)3AyrH5<15T)M$8m?_nGg3PG9{;7>!JJ zd)egoWG@kRH~uUK(=NyR2dPpjV~Ko8k*pT~yG9n`c7k_x#BVuaGXP0w{(qQeF31-$*cQ=ITh5P;$d{*TmKJV5m@L8Da#AixwcYL;;x)FR1 z_4A0&uVj2i-Z(x^do|({{(z&anVKWkoO$47UHI}2sUSCe(K#>a$k^(U)nL8_pV&|X z?s2W)6q>UN;X)&h53GPdKFc_bnSH_+wR%D3Q?g4{ab!zb^%az{e3>dl-(S%9vWHh8 z>`h`#4}IQ7YkCSqW&jp|&rC!G*Y*Gzp<9K|{IgDccE9TZpY3OE3ZLiSb;l={-5@?3 zmS;FQ1Va;@;e`n>bkTz`iM&y>4tFW0-{~YTF8rbg^M8J)6KrmZ z0E1!#uKwhuYOL1AAEMxn-rD&6P_V-68=X<_mA)Z%giiJm1AQXA^&aptm!f@Qq@w@_ z2VplCX%UWI9EJ#K0Cbs#;5`9*|1*zS{Ci&j`FsK5imUnbOf47W3o2XCfC-yV3cDyQEdgX1r7draCEO49ywTE8xhzUP z6+pvHu_%6)g2m6`FlBQ%OCE@JItHsn6Y(w#d=KAI+F>DjbRcbdL(^-pW1B0jD}3W3 zwk`+Py2!wXC_K<)WeanJwI;U?284tmx}d*Skwpd~lULS(2`0V;w#{|up{3likm=SVgXNWNbbteqGPn8b%6J16ZDGVTM@M5#>i`|x(c;yD#^5HJjws^w)hIF z*M^BocR}iod3_uO5#!6Xi<6+RSV{Ew;f zj^X6-ASRRHKkki|rUl$^y)BSckgpVIoZ=JopQ8!7pM0&U*$jGw^R9FF&&6-!`uE6h z|HFkVyQ9D0H}!GP{5JOOZv58wZNYCN&bat3A zW&fHtM7J?;CE=AWG>6trNShd%TSJT*^cJOe(1o2lLgd?ulhZ_>s#F_a>f){+k-Lgm z%o`**fV&#uMDj#AZoj}Pz4j>Vwhwb+C@~MA(EU*VYTOou`c#u^!3BH22&&n2p@ni* zC%4YfjtWaEqcWQO?j5$lmV_-?^IGU9qC^EvrSHWm9Nbh?QbTE2EPv)Gi&>^a@L_v!{X01acK}gzccGfr4Y0C)7%v8q(|SQ(^?9 zRE!j|JF%xhQ94eUnF?`So=3WWs9*zHxBwEB{DIUwEQix1bT=t6mZZQ`@XU1)6MFDb zg$Mke2?nV_cya@n?_siu(dA^ZYjUq}>GY+IhY5JF61%oT{;VX3B4!&gK)6`MVrV9d zlrbAH&sB21Z6cL?vBp@UbOuYpT9&ud`GEZpo(3Q;5~{cGFX1>u=Z7lv8io8g=@sOz z3$*k60a0Yyyms4w2Kv z?ET$f<&6Cp%#UyGrjG}*rxd{0T1FFKrIJF)dF*5ci55&0lZrmosFaIC17bKjV~#Pr zz6-wQ(=Hu1e&a6O>axKCS1Gu4JAR~o3v`CB9rWilgT%kr4%u3d5z7VIC2vyHC4VH7 z9*NaqM{zqjy0)Mx`}_oLnz zgiX8$+fqu!uV6L3@;WD&KTv_=`vE3Zxv>6y86{+8fgMb%VXqya^|cI`ZSHy7OCq?z zDu@#%kUiVsa6rW3+q9ig_pVSWA{MVD2o!WedTFo%r!r6~b6Np&J&}tx>mM|GnxYC( zra1_}E8Yw1N4THC5*L8+FJB{)>P@`ec2GR>O@;W5cR2JvX48)AkL6Bd&|NdYqAp@^ z&+)$Y+a3DR*}@cokq}TPK$EnWwuafSg0a}Bj@d`1y%fGevomG*;v$Fs#4Vd-q;4djnTji6N5?<5*&hGPZ_~A^bk#15=o88=uty&W8EyzU}l5v|MyIT7(Nu z3m1kkRL#S?*OdU^aJ;YMTR3w9!1fw_@M7&!62ASSf@45LaJIrBqZ(u~1$Gyef#hR#7~$>Mi)-p_2Q1%sEL;l2X0PAM~6#Gr##gX1?>A$;@vo zNe5|fiV{P-VM}ZgP?V&|$FvsJOc6i^9U!80DC7r3f`z2bEyJvIm*gI$`Y^)IeO4r= z^s2(9!<6?>8^5WBrPR(3QX47zmiE(fc&Sol@bkFii?`^;D~F$>?HawV5GQ(-hDEra zOIK!cP?;Job51MsOt>FBFL z4zncNF~@n#;Y8z0_>>~aAJH~spY25ic-MJfaT|dd1)n(Iga2o`zL*lw^kqUVXM1xI5K-?egB5 zRO7^zP;?-bL>pJ9yp%?wMU*XhDa$7HR%MgTD_v-}Dpwn2>p!invdxVk+vcgkvaKj5 z*(!T!vX!~Q$hIq0$R_V`x}|uh+s^Z4a+slwIz~b3UdIz>+zo~9lpXKyl<@w}oaMvh zGZdpfsew%cuvW{VSv^n-*x^^g=vLS3V~i z{SI^rz`Hzx&f^=mM63Y4Bi43Wn2=2UA5S^sIb|F~`ilF`aHFU)=^bJb^06Ru zI|JX|$0(86Z64$q9;GfWJBbn)>o#RwrTC@xR^x3@-P|sCV(hs(-i6gLkH$qIYStbU z^_Q~WA?n>=XP57hMHHXC{T>{DJ(<4+m@%DsT)O{A*Jy*+O3k7f=i=6p)E0T={fO|L z|CVr^@Vb(wu}=7_;B@w+m{29}XT_R!3a7dUIy1(H>V)TA+Ca`${yy)TDE7+0)hOeT zr|1>-U$vX%K3XaQWpylRkLN+Xv!i%Yblkuh#((6ayKQYRV{El$pYX<=Ve(<8TrWKH zT4my|v?7Gs=x^S`o>e`$Z%bJYGyvsv?G{_Ctj1Ve2pd<*&i8Jym$ZK&R5K@&&ksfy z--N!spdpLDTI?zjuK(M7`*_OrBd;Cq3mj0u2=_-rDHq}WwfUUy+hmM ze@D%jTQHvIf@8ezxiy@uj*OG=AHZ%K(DJZ~mV@@FXcYTt)!6I1nwd zF)IVA2rbhr(?9zm-E#UQ#^OG1c_N9j-=N~ms9mq3$I>&%4bDQBUO?pXrUNUY8jMoqvz0!|n5ov&7wfsTgxd@Q=}(Ua z0BQdzd)f-5&r}m06>C5WF1dO2d`NTv{9I@aNyk%Sonuop1i%*XglNKV0 zPoT*cf}$co|2cuk*$7mcdjudSFx62owX~t4GbkyZq4O32os64J8g42AH(La5p4V`b z*0nF~p`V0FoBOMR(6iUO0zI)^GkR9|(ev*C=sEEn#+DVcb?CWR1Sho3s718=MyxO? z0*mfC77j|X1JJYa-C*=of}p73Jq)}d!oZ5Z@a1bQlML(d*CG|*vsK)$R zCYA!jKzgiKJcauq#I9z{9SW zIJw9bcVr_|BZG!dAB7At1zZk zs;83^Cv0V|o8ICoB<4EtJWF9-Ua6^0)hISDbg(&bM2) zs_lzj+iz2quN~5VmSavNR&nDv5FC9u?{hY*`E|}_`|K^wK9_A_9zgGOZ#bRlzgsBf ze=CF1G0?nu1510z`ytD4?LXcoss3eIc2KN@H&HLn_xhy}Eez`ytW;JI8RJy8RbnwK z+WaG>=Ns3)rL`~-aymcpNELShiP<+6?Fh>AKITaiA|+I+9gGw-ts+n{^m`PAr8aL> zCCszAxl!Y0l%w9C#CqqCHKGMBJ0om1h^1>ro3(o<&{n*e#n$}^7R^C*=GnGv4l>^p z6`E2{sR0I-vDAr|wyJDu-_3vJR%{z8jCQswC8tN&9K30FJp)Z=QkrZ{>%Q{l(eueeJjZrc`&tB2kW!qr8axplo(8HB4d zD*U+Gktvzq_JczRu0B>323I?555?7IcZR{$PMbAc{hp0r{&^Gg^*uKc<09?_=Bu`Z z;_4ToXWUR37FRb25l?Z0H>SST23);r`(<&pd6R~#rcKQC@{L?Y3v->gnyWVV-5iRm zd$x%Fyd^BIez!3gSFaTv;JYn;T%EWz7*}tqjDV}_s`R)zr^b(~9ctKKDLeGITD>u- zV{$(Y!qp*fGF#)fwTi35Ih*ZqCYX2~cSrMQZwBM)*Fq`JZ4Sj%(GUH&I;cvItIMk+ z;40QHSjpeO;M0{AoY;d~n)#&-jG4}jp}1Pg!-Bbm#V}ecLvi(6F@_%86ojj`4dHQh z+eSaG-nJzSuD-NQkE>s7_v7lO?O||r%MupNyPg4Y%=#en-M%3dSNpKkiLY%6#nsMi z4BJ^@;}?UmmvD7dMG&suxWtdEE}@Wu^?odUa)bZ*myO|Z_3kMuu6ip2a5cYL!`1#% z+JdWYXM#yvyFV+*7hIDQaxl_w@j%IA{8tICx<hfN76<>>aLeiapA_*+e4lAxLY#+tT*&wWWOO!Gn zU)`Ce=IDMt$-Q8L)01m*x^v^4o>Z&ToeHz!>?EfpGS*BjtZmg@!sh8~AMVu`vh@x!bYwF_z-U z#FKFZ^Jtf3L;11d!5~GUW|qmCSze%B7*NbkmU(z8S?0^xs%4USHvd2zsrv&-Zd7~} z(nC|HNvF=Ii(oF!8a<$F#Ypf_Uop{>Tm?l2SHVmU=YmW2BpvJ98p!uWkQG>$ph-0p z{~0KJvX(do?HSL_qQ@NIPNIv~Iv6P!fpN}SuEmob2XBklvb1ulT`I^yP8U&1+Mn!d zTAC-NB@GflIhk?NxzD6)j!8X_DARAHCPfBFR#}Y5qn(;P^7t1&kGXyx?ZjhYEc1A5 zO(2h-uF>=O$(pwDxW7+$9pSK@o=y-dTmOzbbUR7FH|0<6El&dEB{5&tuK1w(*z} zfyYQToWDxr?N&W+^XYdHV>gvGW8$h18{V+$57`h(R!kM>KLVG!EuEqz&v27+cs$iP zcA8qJdzxD3a2QFl<0UY>S&~6F_Bfv{wFpO>%p}N>1b<;bOYJLXCc9H%crzY;MJ4j- zD=Lv-#YEz*YgpcHXWnkqc>8W@YrMTar4`;LX-b{B>i6+h`fxbjo*l>iZK_IkM60}Y zY>l@|FR4VXd^tRCd?y9Ep+jFc7@#Refgj*WiBZpOyGO)I7;k?e?iH!HrrCK@qzL6bYiar>R=_bDLg5}8p8Bi1oEJMO9L{IY~l(?(l zHPqJ$kc;kIiN_0w&JLs9ubt+ZXtS67HB}%xEo7%96w&4%TBwbAgI0l2vhn%hCA%XZ znFC&%Z^MH}4Vpmx^67UMqR6|za7)plXT>f*OQ+d*SQTWE>p;fq+0#xn`F9oZd0SQ9 z993TO-{jqr$DH^Op8wq?xjTY0Xp%gV37VCXN`c;t$q#XBP`k9QthXp!*(wuIpe? z7#)BwJU`BgrR5W|;2U&A^T{a$hv@VY9iUjBmXH)hdyJ6a_Y}*_xFC=q$0^bA^YoN( z{A}jpYun0C_T~5)&jfQr`O(ig45~5#hlZK}QcS^+Qayl z&Tl*zR(=YRW}I56YvVj76eq+54h)}2@)^n9Ns>ED1t|c$&NvCCeqbCc8^`3cT{98& z*+nWC8ar@dcWViNzO#D{=D9|?^FZ;{%J{wp+Fu4EIA*Y0R_<+%v2TyEFJA<}QqSlw z<1v2Kr%>hSE|s4IS3=IV`fQUQKq}esKHL5Ll;pGJ;FGW;h}6B1n~^VZ?>G^M+0o>b z8-$;8Gks3$(1gjt&uIcbr^Ba^&>}MZ#M zVAhU#KIQ%D4n@#8+D0TtPj8kc5TpuIU;Ij>Tfc6>^ZIAKe2We~pr6|g^ zWcKt2h1thWK;;ewMN((`FSaKrI-AtH59j<~cd&Cb?Go_WZif#wR$9F{pq$9?t3Ied zm*JckX5&H!A5wiwJ8&w0Buy;A$29md8IxSKi>`|X`sCeB2ZUU>VbZVz^n5sg`Z?>_Y92)U_*!MPRBgM zR=2BZ!kzAb>k`juC9f<{OJkk-9 zy*;T;jWgK^$13bWva^$f`LELOqvKedCQ@dc*FC_ZxN1$_T;vGSt|y_Q>IAJU0-&2T z0{idh=$rsRT_xEhxQGJe^Vv4tuQH;^rL1xo4CT8@+Dmdrr9(HGqjrhR-qibIX(Z3tXUiB( z_3u-@I)-=xqlgoHbzp#XmI4ekkI}>fHem#e^5n8`N5@d%E1W>m8Pn)^A}bF@7zsxI z9P$YAB)J_Or75&LmL{&qOIq&4-AH(q9&j*Ik`IHJFf^8r!3YMixHV;A!c2q1l73u> zg6HgZCai`hnG-l$fGAKW?qxZ4?wF(Jk&Yv*9Y09#w+q_KD2%{*o1L`X5hb%LFWQLZ zk+`NBN#hsuD9>KpMS|+ZGah*T0-Yrk_bvQhJKS@wbvwL!pRmKYqmk@TkQd1gS$Y2< zJ3uGruUfvF~Mqkzr=#4r-JLrpc_PVcz{+i|5=gac#N?ANO z+EDVdx8!QNp)o5yz#lrAAL0)+kw26rd??8Cvd9A!HDn2Q3xPiafM57lG&(%U`8qTO zq&f0wAwa>rpJtJRcN&Kr&ClVLeegkGk)M81emF`0w%R_ii!%@A#gnd*7~mzcJST?)U9H#_a%}FekvN+6A9&B8*%Z z^UGW_f%M;~)ZtkL^4-KwOm#u!=QZJkqEXrTo6pAu7mI>VgcJ-ar(ikgk9Jz02PU%I zX(q*=bR8t6dn6^@?KFJ}?D=f(k5G9l6;<4L8Qxj~>%h4**3sfmdV37j(Smh+OLa`t z>X@m@e`okQw7b-x@ywe5#NHi4Lm>X1ctR@op&ve6=ZDwjM)1Se=W2dAL+Wo%{vGxI>2Xc|i1nY&(aAqDK>pDBA8t$iKxXsff%w$@ z9<_g}y`^Vzs~BMXn2x?(z@7<>Aq#rpclTh*aA5fqKY?#`f}R?Ew(4PO1${*Yy9cy} zW5#YH1L%WGtk^SD3UPwM;t&c4gZ^oA`;#Npx&6<(ql!D!nDBR%lmUi-#jX12M5laA z@(hh~{9GGf+^#;Vm*lI3XDdGUgy+J&QGIR(PNLJn8tu}SN8xw*XteUjim#ui^w zcF_=TFce>N=aG`w3PVPrZB9#IksJG6xps!i_6fit7)>g+kV&g{ah?pelFz?qSz?a)vVNyMN#Ry*KRYj!ocMkh~_wvmcG!eBh^g zHn(F)qB~Lpa#ek!WDw%<1|>_DN)JjU(h`Zi8E_@}X282KL4Pwq^U^La{eT2Z_LHD@ zSIF~34Tj89e>tHQRY)|&(9mhtJ>U4V*iF!QxK5&y`AvpLxvhp zfP_l|tX%d&6dm!1>kb*$LGc1z+PtL^pCP@TathO)O7;%df3_6P(d z#^v1+kl+rfinGHvC6R8{a=(Oe3>_w64@Z)Sg`V2EH90mYsEBU=ZW69I4>=u}Q*k~v zb9@O_9bohjYoOo)~oDl+|+hm9Jsw*4wmO9QHVpQ%l8v{ z3JV^Ef;($*3lmz>!7P+EG*415On)75wmvl;t&>0|JBhW91R~5rxiL%2ZGtuSwz_`? zZYKXFSkoQ2rd>6Ds94gP+EVQREmg6W9-pC8f3HUDZ>#^zUSR#VJb}NEgoHy{Aw423 zmmbCwu0SL05#4v_8Xcg$##Y{Z{xkGpS4}CaVRkX{^J_@XsU%p~*b@euA0xlHL9aO- zO6Y1QKjHxciiq-o)bZC3yYUXZuRsU1;3nPyrg+VoC06f%kMJU!TFs38ms2~jGHzqm zJMm(X-ifchshxNQEz0yXdxkUk$3!=P+&)yq0~uzqehhVoqAWb?xOy*?*@@likb^Us z$Pn+>lA^x*g$Gm`lIObPoyvUVW1a2r%g&{c%3}S_Xt`e(@mXOvup>5CJ%Vn1XR=FR zb-uu3(V#>rE79V?_Y^)O;SpoK$Q?myfMpl~0>0zPZjC#!pxDbEMrhG!kPmqqc%Fsj zRi@`H<$3wgsxP~-&^&$bv=}J%S)uHj1+Wa~Wru{C!ZGoy;Bk|b*m&O~z2>wZ{>!Z^ znI#{^`EQn&Ug7+=R-tXkevv#3#C<0VHzM%_+TkB}J$4%|*(GB~bKN>W`tw0#ot?1Y zcc=5CKNR)Dk6td)S!WdfwE~$r&sk>_I72kSI$JsGG|XYFGb+SPjCEpPTGjF=Ip>7p z73Uf4j3ziI?4Ol|jB|2K*B0uS{C_* z|HaVI?!yHY<@ZBTUJMrUTOK6!oxl*ADrW&hY(cRQ8RCfzQ;GGOC)BK;LS&B|Mb*Wl z5#=JCAzlvMxV5Xw52VxID$~loIAT#cN8ANS{tt)^$QgDqZum@s8@hC6Rj~uWs;>;u znPJM{>0E7BoEgSjQ;*Kz3_+XV(R+u`SoE7d$ zVTFlie`H9o!j1V!1meKy@><3UkEu9730A13GO&vSFtCG#$iQB;GFI3DYgE%&Ayfo0 zDy60904@1gORKDLR+xpm{E2CNmtW?)d~Xj{m}2`kumGwpV@!hI(e55&9^$(P{j`KH z-ZCxsL1;hFsrkq1u3BO|N*9{~M>3erag8f!HBY%GzKq@FPlB5)z#$l@E%7tY$m)Pf ztduG8y0g9o7{%og_uc4$eg+%@`gIFS;iEEF1D3nMg@)PiTm$Gc?wa8-P!)|5@CJ4% z3CNFc%!O&D5eZQ=HV!q&S|bTuGWdTZnuhA?(jo3d1{4TRCJf9vZfB4lX}B)VKD=b8 z6L7o2kZ$)q6i0dy%#9Bou*!O#b?+C+>k$Uq8<@tYPph!KCx zSjyURT@SPeJ)6MaD85zhgg zQ=6uwr8R+&qy8bDmWWSS$^s{77h7a94+yc=Db7eaXtGsi)BJD-Ov*gSh@u@E$nJQ& zAw}(+iAYBqGwedljLH+pf=BRFH_`=Ey*IDx)_ANo<LHOi^TgZB_k0LpZ1P#n&^I6Pv1oy(0N&(*;H^CQ{|fJo$pgT9{nU4ahq5PIaw$hjpz$E13iH`6m60lgIO*V` z!kcrLGkzDo-_AU;W2ugC>%`H@F)ue&HOMu`ID7AdG^+{h$D4 z>dC&eWcK9qzprF(J#oe20GFpT&m53RD)~-$GEtrm<5xqWaiaI5U$iyx1l5iOlb89 zEaAe-*lY-OW$0O^XDc9&;!SNpSZdgVkEgDp@6)CJ_fhb>VOc*)9RgRt$ z+GBh^ZvBDA$Nyc%=YK8VcYMNM^ctVvPrBat;C-hLF!hWnXJx;Gv3iL+!w@-xwF(#;9Q0oFge6f4dM>$ZysfbPb$ckRS$~zY&OZ!UNO;AKu}y?l zT2LX#pv|#S{t?v?e`dJU5L;<)iewhW)8FOdhxQ>2Qx=*04g}xm9M#cQcWnf%uTyU|){}oF#?uP*TvAYE4nHtM zcD9t?Lc*R_Inc?BfvuotUTn-T2{BQ&w%`e@u(wsM>eMthUC2&kg-bd>D^l~@*mb0> zAmU%bn%-9Cf48`4GkWuB$wf8uR>b`THXkqLX=MryN?K7qc{p1kPr1&ALEw+dOzTsZ zwXri_C6}0VelM5KTTwdK#iITs^phxhsgYYz*K3R$tB&Ey$$yQd>bm#=6lu5zuSE8Xy@}S?N#FQ?YKDLeCJLW%zUrC_&w+Q!jEvi zPfSRg?{gCenD03K+oRLJlL`9AV={pBoyI$_(Q~SVGub_XXf^EO&M=@$#cJMoBuB^&ugNBJd;ys zAhP#<3ofRlQ)y!=mCg{<5#zoumA?3CFDgB1Lf=%n$M{Tc`9a6W|6RuCvoYUye4?>l ztM!3RiPJHfVp>f$Xj0HeMah#%mU!+w@bQBXg8sw z-Qqq^6^PZujmYspE6;A3zQatVznUQkZ9Ca{vXbYSrLOL7-BMTZs>YQa?=Bv-J|z+T z_1KI=^w&*#^w&}z{k4GkEh_fIzbJ`%nz_LS{s#FsTmppQsLN4NMeG)r8d)8(5k;XGmgpVw_- z-e;iC&vOKht9M`0G~M9k;YPUqK>3-=o&5SHI_8^HHfh0DjlEh{kS|ut8Ut*bbL&@ICWTA;S**YsM{G0o7Sty}W)1>fDM zpEDb4nU?#I?RuuKwRm(@oqbC(+|Hu@`cuX;*dFOG+|FoA9+z}+g+4uw#wLX>vwn7I zxzq1r`WI+YKP_fAep#3Ur(>GFk`M0IwAUaF*5_A{7CMc1`jY;|-Dz>N@c>@7 zs5e4h&5BYIs3L;@Cc0Gb9Rm`YhVF`X-!S&qv(tF|K|-_B@dm|lU5esrlj&tiJlLjC z3Dn6h#aaJU1@K5S@J-noARldQw*i+y>e97i?Eo?Ux8a9B%QPx_dziyKlH zS>o2QA#N73B33_*+wPj?&$36_@I6p&Fk1}t6lmoTIh-SSVNjDE6v z>@v7(X2UWqlW4T9z`$)|wH8M7@`?4B+;N(WYbUo~A3cbvR>s|EHPRJ94`QcGB zC1Cl0j`awlfw7~77A0t7Mz|oF_DX}5Fe&(iCPvni01zpc57aU&`jB0R6CvQ`l&S`SayS zvx_y~P@G!-By`PcIF$~fWn=Cz_D!>EMoOZ)h1NH)rP6b4Cs|+QA``2a+#o2S9mR?stQa0;hqkQV@H|88b%ic6EA(TT)+yG%GPCCg<(puq z<)GCMHk-_LnnJU@;b|sFpAL&-y6il(aV-3it-!tZ)b1R>LrvGF|47>00>RK=6{lI0tA@Z~i2H$7(Oze0&mYuoFOC~f9LE$$@)DDmHhW>y;JG^Wp@-Qf zf>H5J;kC9F!{d-sD}O7FBwS8SP)*6Hjni65+Prrv9OWtp2Ls#uJ3^R$q>=`q%z5o`5=Q zqJChw28oDJI{4Y-D_$5Le**wmK&QXU8HqVL-rz*ZdKIuO4%%3$saR?@c_-+DHz24%b zrP&faqmQoFXy(H`bD%-?()mEn9T?tXjVjL5qINzME)$(v+=G+2N%IWLArLw2(FqsB9Xt18Z`pC^08x!{#8hQ#lZ2xY~X6M1~gi#SaQg(0qV&8sH zW_S34`_%y3%6}qso&!Uis!#f6!gnzs3A0~0l{jG_Vnp<|$#`Y#A@v1R1)%`3p1Tvs z{~`xjeTWzgF-tni5s|CKzq`U{?)pUk)M_Yc**iJSyJJpU@sf4z_VHF#kjpvJwrz9W5L)8I|+qjvhhZ0?D~cZR=WQ1InJ!*>n}{_;V? zuNoBm#|I66^`PK;2MymoDEJcw4PP7-{4RHI_@xX!UZulZDd>U3v>x8)G;kG(Gei*fz?pQ)y%ltMY-U{IY9N|uuJ%(R$NDT+c` zr-k;J6iH=lnG=a@hhxi9S+eh%X=t-0TVx4g62~aAp5}jl@8_8oobx;9{lEYJb-nLp zuKIlL`?>e+``pjHe$R6pXBLjL5h)dAzfqo>D^HUAkMe2N*J=HG`Lz6d)|KVcKF0d( z={FY;r7_3P`ENu zMG%j;4Dl?)m575vb%$MAuls9>gKzt{-oiv^S7l$G9 znQ(^fEo1?%i>ZQ>Ki}Zvn0zYW)cfRa_-rD4G!v`+dko^+gyQQE@^yUCQ^u4a9A7fJ zkz{OnWMpS{jO#a1Z9-99>4lu5m}<2}vc3&Rxpm08EKS6jyuc3V6$UJq>(Ai7z#s- zSTObjONm8Hg+bRIsJC0LSS;4%N+5J{ng_xcexfI6DGVeAFeWZwkV6X)pu$+^CmKy* ztmqE-2ICWqH@mk#lY=~6)|W=Dt%4!z&RmfJtq30Ik^Or{<2jo0SQ}$^>opg^O7S_sLRm)s0Q84Ax>o?HwEXrSg z=lG%sHM+s-NAv}D4Sk_Fi$1z$+N;ST6$*vOQOzi{xf;jQv6q(aC!f&^pCi<^KM^)K%C@(Bz0wQv7=2+{3t!L$rU2bh?Gk9U<+JRCDvj2_keybA z=~Xd)<}9kjE3RM=0DbG}=18#CkWQ`P!*eWnQcJnUw-L6jl*^{6!M;=7MKBX3d!zz0 zDa%l2gE&qX+HQj*+I1Jhc@m|JRY-$Rsx(u@J5?vjCTJmDJ`BQlRnfJtqt^$*QqcW0|Vxn9EI=$J<*XXVpPt+4L3on6ulpC;`J2?Lvg9IYw_P%9@!zbXdOGps%#6FnH8W z8S@5p%Nj5xhoF*R2#VaPbm{B!1Dr)4@S$uVf*G!eHGFh-YeZEl5RC*#i$S) zddU2d9#zpXTKKWraB+i%%o1q|>DVc_K@H88jvHEzSSc&9U&FNEX#zOvD(g>bG-N%H zqTyvb_c9G=YbtG?w&g=)`vuX?c(4lcqh40Y#4WJ9@H0K33oa%k<+!3!g~7b8kX|n% zACW7J;dc~yIIF@nLDryfMc5PQhUn})LCT-{2u(QCxGY%Y^hUW{TWtR zi0&kW2|v;kXUXa{a12cs*CZIBSgM1^r^%#4tUH6gfK0_u6ozG}ZVV$24x12`DVlUs z88?rEh-5rC4!zJ+%t-qjQ3>N9LORZI6or!vXt-MVAP^K2B#I;_+@_(PxizC)r$v#F z#0}Q@f)q&5odi;?NVoQIM0LpjL?;!n#x_jwavfZ(dz`J$&GaK)JVvMg5^#Arj6zNj#m zsSPKBgkx>lDNQw+Sb5^j3~lAU30Kr^P}lDy>X{+CuG(aCJ_+xY%L7rWHbScMuuy=$ zSOy~QP5Vv| zpTqcKIt5-x=misWNWNNhBBi!97heqknHmm)Aiz~r2H9c`)9C17ENDz&E8z^-JF0@5 zDN2P3mH7wpT)@l@OLVa%>JDvG91J6f=jq|w9}d|7d$Vk$2?DN=XNW>`GgKI~XhGy0 zQTNSJPhlWKI+2)b0qPs1I@66s>d%1Q5vA4*^{i@HTuVpz*W+4Bl$YEip9Ua}Wm{0F z3QABZY|AIRa$N5FN~B$$Pu6`uDV%eQ#5$@C3$$=M&eTak`g&MMhUy{QpGC@Be^^v2 zWE#ny;S&#;Dy`?YMS&fmu^(J?N3LJ-=4S3SBH$I7{!+{U*+kW>M)F0WCc1ce;7L`G z9_Nv7aA~reoPDAauCq+W$M3tz6$Y6gH$|Jt3+{5N)o^{r>ur)X$x<7uEVay9Y>F+l zl^jZ3rc4dj5hGLZ6BI0H931TR;xaXAxgYbH@p~JRj(IktJ9`-&>Ay-8E8tYc~XXpGk&BC=4n{ajcC*5i^sk zDm#b!5SxrD8F&#CVP|D=mJdCezw~M5M z+rgw;qp#d9`Wv@xuTVWpv1PgLb_bEf(Xz%tbdjAdj~4JE+39b_^P&?RL{%cWWjQ<3 zp6=MGOm{mwy)h!1=bWKtMArrKigQKo{YkeUr=9;m1x90NvF}VeBkQtknr^#L<|~|E^J{L-64bV?ig$kN6%g& z#R=#@I%IM`+sNmeKwMGi4kG9+sVT$OJ@rJBujSZDg4~|O5^2Y7Qu;V#=xjww-BZu? zb%m?Q2O{*~G%R<{=)H(E>&2go@Mk_Bc~7t8N*=SgohtEt^dRS0{iS212~$OkDaf|4 zj9JQXL<*?i84UZ~Fd;2v39N)=tWt&<(lW5YAf!UCV%18A0o4vR)d_~!C!^Z2wFx>x zoleM<$mmVqO}1ND<|ZNs3ia^JBktHsVJ_5~@pc!AV~)601SyPrG9j0>F}7&n9LK1I zpXg)Ug`e!fn2&KU#{C!zF&@%r^Jf>vJs9ILF2Wd%(Fdao#-A~E#z?~$)Gpiy*w0#w z+cD;n;bPM)j0-UeFou5P|9p(2Fq$&q_5Ag>XvgnSMeTQt+nXEDgQkf}Q_q_e*)J3Z zx=iIQuiO(f>PL#qORYA|vCXF%-?eEDHZzw)SGs=AL3nR3hwWnZCVQ|Nj?${{Nvf?t z-I-K_;Hg(3)v!FQ{_fs3`z{4->b33Ek4ZHw<3*Aj?&U-}4=`~hU2{QAfVzF$zlkrQ zOuxv{Ls(0PUFt%fM+;ZvhPuHLmh99n^0Zs@-Vl+Y2_=U0X1f)Nb}p?*m-<4U6}U9# zrb{4EvXu6Y%}K|s`s7_59CKTP<&utt5)WBJ!B;Np+4&t@5v{I>HdjZ#aTS~oVwO}N zXs1prXj9v@Wo7+oQXK$lenGp;Bk+3roUOP=M6T{st!Q@`qwuj>C%inAT}Vg$DST5( z`@AN|x`6}fNd^Mv!_{l;!fIGLR^PI-T@tC|sWQcok!i1#32V!O#jvqknF~8X(*hj_ z)DKiu*4sAE%-eZp`KnzmiOCA2ir>Lx^;?~bqXZ|%)z99E)$l^B_uttrKUiN^CRY2( zM4gT1W+f|m*%}ztGSc$eT7V$0ExT!CP`D0%$ZKm2rul8^+Pn^xfJG)>N62GACGe4* z$^9mumb>BaIlub4CfJDA_89f8n_$82S5YS|TQ({!<5nZ0i}2MLFigvG&D5Fk3Ko?( z#)`aA-_b2VCU`^oM71dF!m>Qw3N=fy?iz_m;@44Lk^S}tHr_rt0lL#Yad(YcC465M zciq1|FX$-Nj;xJF^6Igif>T%xRgGL;^7pJ`6^!VIaGb4*qma0 ziW@2W)fAUde1hU)Dm+)p{$A?&rmymY`Bk_rUzyhy(BNp-Ru+Gvllre%`D1k@?a8}P z_9?E_qx2NBC=Q^QMlqp$PPwB|oTjw*U;GU>Mi2XA=Yt0O0_u0Zsrs0nqsc z!AJlAXau+n`N4#>{!MuZk_!+Fh8;IcUn4l$NadCunb|mJadzXp z#07y%1!pnNZ(LZo6>;Ori#09f}ST!2adIY8k9 z2<;)Arcww{3&5&H-~kW|kOfc(PzgYLguomi7N8J7j-Ne-X8?r&ascxu@C=|3Kn`GD z2hRWs0rZ}N7GNttB>?Ri*aN5q$a;=I3?K(!$sm3JX#j-)wE%kcU;vN?PzWG?0qF(s zcnP@yuodc8Z4)H_BV>u9eY;F^Qz6^$VNx!jyu$1qJv%xkF`C6o3g*QMB4VN``yc+o zevObjbBCOV9! z=ly+mS%2}|aLV_ek71Gii>H$2C~GiV`E=B`GUdL|=llLy`F{U-8kJ7esxW(aQug+d zdY?(8K9Jzv2Tr@! z(S57`lgaSqe-VBy6%J~u9oz@p|NHRGsc=yv{@d_9yvXp=+U3uBis9QKBqkv!l85>O zsIst>nsz_NFU@^d>!=Z=PHs2J=1g+Hv{|G+&4W;@_F?W_T14vge_Pq~leG)>fxb2_E`n{>bp4&7;3nv^ zj{UKiE!(#9uq3q^^s#Y<8`wQfWc`m8>Tw_|cKx}H-SgP;^x@lBpihf9yNNy3^<6^j zmjR%s?O_VpzvoKqJzp4bl<9Ho_tPx58%cJ`^h`MaK5>gjt(`JG3x&ZgJr-NGrYG_K z;pad1olhv!v)rTUsbcVHLYW@D`!0oVGaCtIdIA#uw9%N^zcoEi!fti5%5rH<&kK($ zoNYC;Thnvp^?H|({6%bKdamgF@@m++HEd;i)CVNKG$>lnR;FiYh5Oa@{YC5!g@n@4 zk!e(U8B%OT@l2}RmXKU{nBpfSn{+43?ZAZA@PNPkdInhh>+tu$rZ%o4P=2N*Bx>*} zK8(2f_ zrB8DlHYU#tpA=c^&@a$VO5_c6Y*?`4_eFOEj@_H?JYBj@;uscTTQ#J-v(w_;OP6>) zo9?7Dq;K@gU-F%DE%$5foAQxj=ke^Fs{EjS8^^DGXt^;-+A#jtGkatg{l;;1UpzQe ztiOu8?cB9Kbnz3ef7cCObirunkQF_*1m^te+_FAa)$jfzXLW~gvn@|Xxp41rUx>9< zxO{mNzS(+Moy%$0SgY4!n+Z+t_FL`|uAZ`0RIA zn{$gCR_uB2`umZk#s`uox!oIIIiPRCKDXd8J=_Mq)^MLzS8dt3D%gE5t?0AONwK@B zbD^RCxt}Mh5*tUZHkmt7CGc#o>7lnKzSuHgRk7;`kI37X{G`XSJ>F@`f0-(J5?I3q4&Hjrb#}PInT?F_VTCG>0>WS58_QFm6rGVe`p4ZWilk^H)uN z5SY7Fyt;9+(Ar|(1Pw1Q*{!GxpPnD}inzCDu8{*h5Wih@^b zvH~~x+zuVL;OslP@A;;p^1CWwzWwF7XAj@L?7P-h@HR8Ya@xhf{AF8JHcmS_x2i@u zmp(ndch$HhlCbIH1m+X8y(_2h3S4REcx=>+N!v08dCIrUD6Fsr(bQ-D zHilQ%mxOu-+{=-=uQfj%KzNyKoFXs|93Z--a@25D;LGu=JfyYly4*p&I>lMmp_;gX%NENQ2m?LxP>9Z#^0OpX4>nJQ;c%E z`{vVms!y-3lT?)QhAd;qT4*Cfxvz57pB&j9I&8>?_rfcBVRhHyEGEub5LTl(JlLqZ zA*_49coDn6H@vuK?(0$aOT)Y0$x93N9~Cir7-uUa&N+K(l|8Rb0u?D$new zhd({+b+$+)`ggr^YNx&V(GDrECh7lF7d>E0rPDyBcg$^#!eze=Es61F+OQX|8Xem( z^x77&bzW@F61Tnf!+XV-{FofGlBOPG1=djvjmzqVf>;~9IakB_C!9RLuvUYclR&ff8GPpg zbIzrpiwo{*q|9-8UtYVd@0&S0ZoVFqKZZZ|JN|S` z?{|}mfBn?uYFllZ?5@nOSixQ=x3j*ge|LC)x!uw3)lJq+x!qyzISX|ax!oD&=Dlg? z{q=a=`>rg+N30E1n27rZ+Mo`<-0oJ6dK$dn`kaRDo!qWeA7(S;b`w=#iI&`sLrcT& zKMmabliSh37{3oyNKT{Pk1UFr6swS&M!i2-6f-GSp_u;N_w4_E4vyjb$41b;`pNB{ zo^6O4va7<*j_$YPrsf*Ek(KOaYL=7iju`BG^>h})Zt{eCSv{GB z=V(@RjDQuyW5x4g<9U2uw15}FiilbI*+CIKzBX3wb zF1usr{pXNiq<#!@jS!+s5Cr69D*RPVjVw4j4;khn5-k}416~fzo;7S$^U$w zv*-5%{AK-L6z?yd=dvSD!#+H0l+D?I-qO1pYT_KHo7nq&x;KI@=qsI!OPj@Od)>eY z3oitf5ENd7fESM+^UYXc5p&>Goe+zPGjZtm>JwF$B4&52J-DoSQBgc(^4)s)H}346 zhGSR%+`dJ-P=3^(|JU`$bl#dFpEZ4o=SObrw`s*_jxI|3tmp+4U8%X4dQ-)e;bY>fG@TbKs zek=B8xX!Q}S4Frjx?e3;-7U6o(BKcgCLlEW+~RiOSc{eM5WJ|kaADjnV(JH9e>3ws zB3sZkb=%$>#QM5ky<9B{hzoaX5`wIPiGmZ;>P{uTY_(roHqg?|vX00x{4M6zab(|O zgl&;u0Wq~^*y!vX!L8|Jykh;H<~~InKs$Il(l$)o(v)J#dpBJy+jgVzzUwIQdRys; z>gP>G+iq*M%+pv{MA|<_+;X3FD{E>g@!)lDv+)6Ei16T9^XfS3h(pc8Cee0HY4zXz z=8E@z8;D}(;K#x%hwD>i>zHi($=*A+*>FXHxcHKj#r{8zdT~zlD%$Yk&%2U}XFj{f2TW$WDORua zc0Vi5p0w&pQBqgpbKSHJ*WMhr51Pk+;1uOvSgNJ18`hk_3nu8>|qn=Lu&D>XViT>Qz*>1EX zAV~KKt@4xjUhhqDi9)|u+*PV?)lEkvZ4_f;8O2j3H($l+)mgIFCUi{~< zPvT1nh54Fi*0$CU{fWEBY__T;RNPqE+2wDE2CW@ILrmpF?Wh-3S-MrkUcl9@^jO!oB>7V~w-z{M+>}&JJ#TC5RcQ z!;X3M(@6c2MXml@tSBF0IO0gLU070mm1aQk`4O9EWw;q}-u?X1!RzdKNu_`9(d+9x z*?LzuUXSKq62I9N+Ta;wL1?YGCKKj;;K;qCFaA8cnv?l>_mG?W`f~2^XW#b7-O9dT z>oVa$l_C56O-XZNV@7LuZeN(jWA~4h?l`h(Oiyi9d#xuY?5!W#7CYS2S-oSqQ*rd3 zZPPtw)Ux}!W~Qte8!U-QxiReVy&J?AhlbqDSPSvsy@sDI4O?x$;A5}ro1PE0e^&9- zd41*)&he31K9;s4IF@ljhRM8r>}GXI*#iNK9kn;E#vw`68vg(E_FZBjulN*Q%rWa4 z)7by{YL4hv!KSf~y^DzvQy1FSeqyr^ezNl)`Kx5QMhkm}b|?0(4~uWQSMkLwwzn{w zlHPCzJBGhssj-4HP`dnKpSm8LiN8FV-*x0>_KimiZ&W$`#J<>5z9?i@TX|{MH0Aq! zd}4pVYx9!qqt~T{yud_piup!_}CUiRjs5tol|xOPp_ z;+cKmt+g#XXV)jEhfb?o+nxN%3qjrUu?}7tqg7H{Nn*K>iGeNi(H^X6sL$|G3tDMDjI5$K}_JqRwm2yHHc@Jo#h# zea95bH%ZoC*d;DUihCw6+wG)NHs6~!U$1oN!X4Lr_a%*2^L=fbTll2(estk{_v0Si zxaOZ%#T%U~yWJ^l>x!#p&W~?DVNW*RTyCz$y6SAI>vG-g!Rwst^ok|U`SUM4e(PfB zkYe4Z@2rbe8Jd$T20fY(ThqOG*@3B-f^0;ci~3!2%{W!J^2l4~%Of9T$i1`AyIp#( zae9ds`^q&_!{K?caNqGfqwnZ@$TUvCyhg{`j=D$Cncx8F6=y zijv1%GrJkFWWb=ao>9s4)cJdfs``TInu=2CB=vHp%2;pib@`*Dd~b2I_l=j2HPOvJ==Y_&pm}YR{e%6H zzUyk(w`W$?h#CT_Wxg z+de@fI%S(Ywte&1d_zI>x%6eh;di!POuo^tPOmH@bZh3Rkn$2Oe+?V2r?&mzvEo=(Q21 zJil&hd`G-^ed$wAojX;t#x+{kNUy~;*iPeHF1P3ssW<&GKjc;6%by}rUO3P^qXyr2 zS{FU*&8sCu{*h3N?C5}3ca0(fEMNcXa;H<0=FJ#+-uTi!;}hlkHVwHWI9MKSXF<~ z$AZ)%N$h*Gd47e7-a6xMev+=5@HwF%HMxI{qkeMBFJG$MxA@r4t(xDnMgPS5=kxzm zoA=Dx<)g`hDOPvpXa8E&GVM;vjpdOGQai|z$gseusKCFT&lG%X{zIPpkS9On$q#w* zU3v2D@(h}Dw15{M9T>@Kw*bO1K0YSij1|P=3s`gTydpn_6&e`NN(|&HM;yPuuHwJA zuH!r7r{w$!VI4So<15=?#G@IVT)djU6s3keRy(n^xhQb$uIwRSju(C1UyJd){lMw* z5MsaK?ZFj!UPMJ|`G-2@Y+~t^EdraeIO6m}RpET&Fv8)HDDT4mY41D0n##6+cS3;B zyA%OIQLq335wURs0-~ZKAfkd0AQWja1Vu$fEQ5}Yhy|<@L?wU)J7WVocEpAqJ31EZ z=vdy`IXf62ICKB+-tWHmzL$@Ee(S8g_S$Q&-4CZE59OA71l_GqH{sqtw4Z-#*fA;# zM(5=vNfmXKyBE3GO{W^1Z6Cg6KXM;<-!GbV;GnxKpVxYpzdg*CRE$rv)J?bbr$#@} zDqLE2|IpRh3z&O$U#hs2aH8Px*PRtlPQID%*FC1fwqt+KGu=KPlBGX?bkW2o&iNH? zb86zgjB=v#PQ^^x(czCnvXWdnIUMzNJ5>?C`{vLCdCba@HEJQgFFRBoymNEeB4?$_ zjnxy<5+-oDCGIy<4@CNLlaASKSobTJJHC7Oz1K3$xR)c!v-%#pM78vfy;y11q0k=O zUTSX-d&LBBRi~cq=+`%fTQgVt?4*bZ+@!-MS-o$DaW%)j@8@0HpX+{QsgDaI9S2pSU|MmBOXZoi8k{>nFl2hYjlljpK@AD$^Q7Ory_~bNx zba;%AC;e}U;DBl!M9=1BkMq3S}NZvjh60f#m7UjRTO4MTSZQ>q93P-O-te@i>xNci{i)t z-SmGT>DY}2!lsN6!jb-C>HFrnJDNYApgWG{>yqaa+{iiF-dv6yO7rW;efJ!4j*b(# z>jX)B<@fL3bz9b3bbPA?YY5T9ZINFC;WrR{CSl~h|FY(Cm4q*rJwHLtt&WlBEr?w? zE$pf!&udi5{PB#$6BL{y&mo*4=}(bzUm$)vPvojdJ|_uQ)7V)+*N5(S;-{U8^YK4H z@~NRwOwJ<+<`6#L8}jmpc%rYeg}>awNj{_=BglDW3xBaA2wzr@%Pz{*?jNoZ_uz(T zV*gbu@o#>AlP4?l@{E5Y_gm!<6ca4#-+S)7r)5k3Am^3+wYGGu=(z3#Ij)>TuM}J=A-v0QLy(4Dix(66kd%Rhn>uxhh z_-6ls0(akc`U+<1KY9GuWSK{t8W8LZbhfCdM_pi+??7a7D#9sHmuBBDZb*OYN zahNmY)%+@V2X4-piF3}lZ{du8%PpvOcdb5k@AdY}?&9|=>zCcX?Vi+bbN8Bqb?)Kw zHPwdYK5);R@^t9v&`0iLPW$@2N_y;m+ug1gN9&1uO~IiDF~(2aQ}R!VtGw&oAD88M z&)@po{m)V7g02|6aCg!1-pB6z!riA`YV4pvuiO{jd_8U9^*8SGUHesN&Hvz@vIn^v zsyDiCxiDe!kd1#~zewk*x&c+~e`7JI^O^HTDUDTXx-ZYivi+-znp)K3BN?IM6S`Ev z!PV)(F8b77hfmM!XBtpl`v}MW>R?EzyQ~}9wVwqw|1s10$uSFR&!hE@RdX!KJZMFg zhRiS+Zf{MsPmQ1De9MM9yK>4*$yqz9qL*fEBBv{Lf6LZS>NC1iqei}CubE~~y?>(V zxN{tbqCQU;cyc_4s;j+rKjbKfy7%apcJ~fBQHSOoZkKr2l^RffBDj7=A1bMbUti6L zzLe$rtJOC=`ctn9RqBt|LH-U zt6F@w{YEdU!-4p@*Im4+s{FdS+J6kC1X?<|{0}22tHYPZ)(ZTo^3ypP-ZT80?ps6= zzk7t<4xl#eo@|}KY2EG2ig8q}$t3z`Q7+pebUhPhQRg1_iTr$j7Paq=|Hz!-SrntTyK3S7 zxs>yaTH~YEzfc!*wW6L}`GpEF**J5;*adVuII@81YBgl|@MQ~0yU3yHkYC=>;w4nX zilRBsCg(QYADaBIacGC>*1m|CO!`|h{1N~yvgyt7q* zlu}RDRGI8OT}r7h?^G4?rj&|Y(0~4^g`25Q%k?g4=xw1&$4ys?_;m{o5Aq6gIw?0O$8I|Ga}2}uFD>2x)RSlRE)FQnnsVk zRHM_m_+A&vDRV3BSDwTFpy=`R4=VX&=@QlTe^A>dL_F3XcaYj)8DvMNHimHA;_!8%u{FG1KJS15j>SxIKt6>5#GhxS>Et5ij9cO3_htJD~K&g4<&uTtgf zV}8>*c#Zn9dsjf$YuBjs4;7no=G9XD$o-L{2s)>B3~jgg24(Jg$1d#gO={%p@tJChN?Psde@*K?Wx6Nj%fj^g)R{i-ALhI)20IFZGm(y;sm*)=#O~VL2OO&Hg0M zM?R;d!&m-uYWJb&s@JhEDBGj?<M8xb#e6P7)bQ{v z$>$xVJTd8)!zbQR=cI3Q0qi$3W}WzhG=7!KM~ZVQZ|UCgAK{!ERU7${3MgB#@BY@0 zBz+?_W73XGuT&aI`9D#@o6A&P2Yn{-U#NsvyOuPL`$8q!9(iea_ABM&)V1q8?{Cz! zfB}E<#NViEe~eF=68)Wek@3Xs(%kP&&trNFi&bgAmOPi4!{82lbNBM0r3@~&eV&8S z0418wRpJ(QkI&KlOwJLD`{#bg1^VY$-1yOkzp7U#bF-)`=Pxi+xRzs-G?rDUaL3H0 zst?{);a(WHq_+N{DoMxY&J6BX*MY<4(&bR&9?ENf*730h_vI4j!j*%xxQ~|#Pv5$z z#Z^n$mNu$Ln@%T@p$>OVaQ?8sVjZGSm;1-Pd%r6e>T+YgW!k0K7!bZ8*P-l};otTe z(dXp^Q9Bw9+i|@edW>5P$mlon+8?%rFMTdG^AB%R6Jnn!;hS^)g}dIq*DNhj zcI@rD>({~L|Ki7TxLRFw?Jv*GS(W`RmPPE|uJwdb3LoT{z2~8{NpipXI!T3Gu^@ao^hR)#<)j&HgG!yn>>GD`I@_O zf{w6rkN4cfl`##vtN!Ayf423@P$dm6CF_<&GVyE&>r}<%XDo+3M$BDVc zb1GJhS-BkkDq;upS#kT?`U>MG{%VI`6;+rldgL1Og;rg4|B7B!eB9lNDN#J~5|9#dy@eqD@%p@sz=)q63&nxQM4_XY&EoLK3rv(? zNQJoa7IB7%zM@ba%|z89&EIF_rpU;D!!J(ER9Jq0CGvg^g1X8yPLm0wd9=P9@*n)2 zCG};(qS1-)Lx_EG1O)`e#fr<-)=yrxQOrb=VNCo^n4ij5%Eec=ki|M=Kl-h~^vfG0 zE#&0$t%fvK!yr2~ur(9P67rMsV7Yv`_{xBP=7+L2{E!*Y+8!&C4=Q3J-9RRO*ONk>Sf3gM zPGIZ!OyISwBT3*tvsK{_Y?1j=-hSN{dCSVF`wz+~`}u?JM$l8Opd7Lvj?oR0Yxh96 zZ2K}@ipGb=z}ETW_d!YP9nz$OKL4P${j3flM=rh|=zqJ9i7jdh>`xDB+kaLYn8*#* zO1h(d?mvII__$GR%m*73`_FAu+xa&Z(rdMxpX85ySS=Ud0CZb6$aE^256XXHhcC=! z++Zetms^{5;Fu7(ctNmY8+_ZC7r8BTvn7h{tW;nF)`$I2D;Jj!bRz`c(TZ*6NEDCf zR&B^j|Hmg6*Vw|25Q*Y>PVf_b*>Z9DqZR9GXz3?uWTdFCV9CvzHW>4 zKGvZq-};~ATMua>K#yC<&+|=}i_dN$Ti-^$5g~2o>kD&HEyS1oBwySv8sy76hS_G@ zl<8E|?rMKxhXC4i1n6}e^K(1W<>K@I#ty7gksT3Z+O{JH+DtvfSGHjXmX*$bW97zs zp#QF~$aE{3XX}4rPXyFyI_UQw+qOOUdkHym@s-#fIp5T`kybtr*?+^Ejq76-w`-G@ z_A6-DCN1pO9jAEAk^QQ1{ez5~0y+G{e#4)!NSBK*2U<~C>)O!K@`v+OA#}6$(IUNk zzryjd^`D}CB`deAJnT@#<@P6fWOAG~lRdtemiEvw%Ux%hmb6_w$i)-TReQT=km+AfdG&PoM# z%GaAruD%U9*{=cmyxVvtzK2Gkjk58?UoJj<{68B{8rm37+{U*qpX^r$Yp5!S7yrZ_ z+>Ywx;&sEB&Bpw{YezT_MeV3cfn6qYX>hz-_|N_64|3Tc6QMv3|77h*EEivg?N?BS zf7*_4o{HL0c6i(6k=eg4$OGGX2JoZn}ouV4%cj45_|aKwk#^isw35@NwQVP$oD;jMG9q)^P>S0|LDQ zmy9W&a(CZ4k5xXqa)#d+%)sT0?ioNS44HAD_zog|y2vJ4`c^fLKKP`bL3l~h*G89t zJ+25f@oYF8sf|-vGG4m$t<>Y1WTr&Z8Nl2PjnW}LnkVAf^pds>CD8k_O_rE-YX zj3xO`&y-P%FySqe`90fqf?<=s&L{1=R)ic>ASm; zpMmV$Mu^j#g|>%3@j`f$QW8Z;*qQj6e1=jJf9lk!(UGyZEkzSsgrX>$!5&ik>YDM! z(teTW1<`U1&3Lq2Bk(HCfR|3&SUwZhFf7@*O}ey=%z^zh43rLhTBdeBj9;j!o}ICb zcmY$|p9rrf=9Mo1azKs0Xx1L2_Zi~vS!`8n;<-xe0bkF>X=(M+Yq9tvZVtpv_?lFu zQqy^=v@b*d4uOHnAu?5Zj>?$mixHd)h>~g;H>G7w6-L=`q?h3%fS04x#FN_nPp`c* z^p%l*q_#jSLepP^1LrITVnq`41k_SOT5&AIS`W0Q64H!QLaZ#H^^}la9Fvw+fw9$@ z$ziCokh+TsQg>DnNYjIy9*H{-V@ez!`piT)OoqVJ-UBQqb{<1V1?fbqBOMntq!XYa zpwHQGu8MS=;oOjo45KxWVURj91R28sT>-;f6PZWrA@d+zWbUGa%!MWm?dlAx^(!F` z=NZ7{=&@Od?aUG=MbP=z^F;IjvJ_=%RnO5L~W+byei=#4@$z|h@E+A7mKL{XM zL>e5J9MnvI0Ifd<>t6{6F_ux!01LS)9MqrLt+}3Zh;};;hbjoQ5Lj$o=$XuFoJ=nh z583NjLChi2dRAxRdhs`HuZG&kv5|4KF*0^BLdFbzq$@xO>tn})J|j3dK%gLKs+UW! zkFW)m5pgqc!5pO3#$u_s#!7R+lf!K#?MUR#G-Q2K*~ILo?xRWa6RP;@N!tV)sRyV? z%a#bK?9$*c4}ywRKi9oUueu*{U=Cr~so-yUAw5kN(sX7ER3lWR<(1&H+u?8s0$ckT zOigyKhF{>wnu=C2W$n;kPlrXfB~o8?INeh?yo12fwt*B5BjKPj0}Ww@u#lcyyT$Ul zaipD^;s)-9EWxP^Q*~r2QbVTDE=_|}ktxWVIvdsL>9UZni#pO3YD&}k;k2XR5P{Qb zulXkPSRL4BqK-_Uotp%yArtV4iL+sYejT(|BQ<0sQbtD6EMydt9M821t{D2?wkuBsyWb%9<(h_O;q#yD_ znEjiTFFR4L-z>*+p3TcIi{G92qn#c6q0gaZ`4i>p=Mk3cLhFkp<(I`PC;jMmT1irU zc%cle2XyENVclTMMz-ln$O6_crY_pZ zG*{oBVWNUeU?Hd*r;IdqsB#$AD##jS%s_^n%i=KX)scNZ6PaZzA$^e31UdE&7RS_G zAGwz+BR7c(>Y1;K9J1M{Yq}ch9H)+~8OF%0&O~4w(T*PflF7Ur2E>eHGX4Rw4D|8! zOuVNm&@j*!H__}_Os1(Cn!;vg%jRnExv85jg5gYYkGiRV5zHD0Cb<#wgvYDto_{9+Xq8qIRbDSp9&D9hz zD1AiLDxp4AEaU?Ia0GvJ0e_f*KlH&L+Tag$@CT!ZCh7q`v;iL)f)910bwGzU(%k_% zhU%lCpu-z>8-gDIL(1MrD**uY!`QR8ed>8-+{MP;kCF z8lJ6z+|o6XQ=AsE6Lm&*(Kg7=r4zDam?MjZ_I0M!#+61eIWW8pz*Z%&m4*5dJFLMD zGqA%T4zJxzkPg_whIJI93mbKTG8;je^;~q3-VQAR!-kD)z@O&nc+Jd0TDfW*h6W31 zfWK9Qux{?Fi~5574q$&ru-^>qHw622qhW0ZI&^EmXJd@em?|a;EC+u`!1wvepi>2O zszP3D$g3UFgSDKxKrMoeZH{S=Vj5u?Qm=%@*0NAgl`^AD2B9euHnLD+y)ufdRY9TPw?OdQ5b&D|_^lK8%@X`(41P0k zv4nKyNSfyaV>AKMMMJu=kS++)4TW@`kgf-$YY*w#K{_2srvYiCb=Oe?b%e5;LfH+W z>^e|(4Jfx2xEA;h{Hn48eAZrFu6;tga?zsOC*CW-@I;RTWIv$`z63o@ z{thrk0pJ%u@QV-lr7!r!3H)LQe(3;yF$2FC#NqVqpMSk^c#H|H?RjDJ#at+i?qKP`^Ya#P&ZDgFT1NwDA ze`nC&k=8%n6pe>;BOu*CNY@+EIYT;oNM{G>I<%Fps~YMGl7K;N{1zG(q{)6~TbnKIfTeJnp(AC0bsr4RU}AM}r|&_8UTe{_KUp%48- z8~TSjj8_b67$+r4P|vX5&Q?a6FcxU;U<(-C)lhe^-yG~Wmbbq%Zlg>n6FoMoz_;~$-dl)jhw)TX6ZOB@nLrr)E)YQ zg{&_)Xf_*n44^zZE_z6(hV9SjrHy)(Gf@vHuPv0<63VNet^$5i1wZK_X?!gm-$_yY zSW`3>+EP$Gw53{QL{+IkUsDDDvBB?Z&_>iDPZ)pVU`%kafHrMLx9Lu*()kD5Z6v$) zV6KMDq0GimW}S3Ac9M3|4aQ@TQy!i zkziv8*cbpd`hksuz{Xx+qdnNz8Emu!8_lAvks0(AGq6*qPD`MP>ncDE1(YK+I2+ch z;McCQu`1LQg+l)ehW_Ud{ci~LKhJ;C|9p&*4~*Mfm>*rC4>~~~>;Zky7W!aE=!0g^ z2MyxzwGvbCS3BBYFgLqEnXRGB=1^v1QjgO1>Imaci~fbzGYm^LWC?u3mi|mMLWwXY zhQOFO5XQuQFeW;~nAjc0L>m|rjbOYtf-$jao^ez`j^O`}vh|a{KJth5FcjK@GqeX= zXb)D<9?YYSV0~u@>pKHv{>DfkT_2Ef%e-m6)S>4~WmuBpy)H)+aiC0gP$qLIlM$3j z589?)jh4XF64n%82TP#DVS*j`e=^Y7$xO5l)-!fjfOrogJ zZz#U-pqYp`%#JK)mGSIkbtsEA3)(KUB_Y$FVa=AV3k{%Ln$QO|bD?i_Q-^V!3F8v1 zDdpCNjP5KLClFH6hyEnhL0^Aj^wvPVLEah4&=ty{FW(k>YeJk7#Ic}_DMK4mfi?zX z&>NP&o|ZDwa+3DA@viqG5XqwRF@Tq z*mZ@2qf{2>$%NfkGoN~4nPF39KIO>#&T!L&HN4_>-m+}jSjUp7&3u6?2bZm->@F;O zaB4F@$YkaBfVV&-;vTl!9qAdWAVb)D8G;@|XI-hDFIbQAG+6vk!}Z9)^~g#_Ud$H$ zw3$ZyX%7Ue8+Zy|pHqRpp)R{dC*33ZO~co};1D^j`8C!Yb_BDA<&IpLNKa1{d|=#c zy(%p~PP+sS1rSuUr+$Sk?J78o#|M?M2&OBRsV%HOz~@>G8g**bs+GzDRsK+eAblDb`?4w zJ$+b%L7nPDor0$X>zTOT*`E)!6E?bYUi%){K`m*`|8LFp~>$lgjoKLI1(}3eYO>frEI7N#4 znfVNm?L4DdKa*t=;ttN2T= zO}OlXHAgwZZGry(3|^xuBOQGW>Dx{4(9ry1>;9F%fk#DBr|GSIHvm&zp zr{n+kEnL_9KWO;>+0_c<{MWVqPg|n`xwGQdax%Mf6xfA1k~VC{&s4P3U$4LpUj=e! z3tG$d*h2G6X0^^sdOr{FGa*PIR1=%3fo@bF-|{EBKT6&X{Qqz6s@8TytZy#Ih45~5 zYdL{}a`jhGE_|<>{C#ft|MemT^&q=9j7#jr|4a7zy|vu)%`}gjrPxoIz?VR%hR~Sx zulot#k0f6%?5FY-t?dvOw)WEj1^#R-Z0)BY1?9r`WVe(<3jC=_+6i}d8|4SS1VZ)f z|5-cn-_W}J8Tqa4sN2xG{2>b3iSNeN<$9=~{IY&%GDmSgY-{Y;1?)WS2 z_#^K48}9fM?)VGt_yg|vz8L)ZcKr2r{PA}D?RNa>cKqe`4p0_12mufhAk2lZ48jHo z2OwO5@B)I$FHB?!p*Mu#5Mm%qhp-&NW(db1+<@>J0t@!?CJ?$qpdbW5h=DK}!h8s; zA#8eO@ zCelLMNC(DkJ*1BekRdWc?T|6F0aIj#+9Pvhfh2 zqhW!;1O4fnjiM71QFwT&C^|efDJGnknwk<7p2&+$;|qoSHX~wM$M9qM$z$>MUGS8Gez#AUme(rAukfg7beAv_^mn3gr*2mL_J$E zo3?>rG@v?ze54_ApKX@6-nW%~hd^x#wz~ca~OpbR5c#V*@ zQ7-Kr;Bl28tsD=nfV9A?l}l>{JS)(L?=O@6Bza6S3n~72uAt+_6BH6$NKj1h0Kpps zUlL^I(P&A~jo@&C(FA7@T$b0|mh>F@d;`Jz1X)XHra*Ax7(w7HzkvgYU633?LrCwQ62`I7T=g3bi{wy;aMoU}KB9D;@f-w^p) zf+q-;6Ffrl^CR+`34cusf3RER`>jQM5~3(hxL->&qRB&_;-J zY8@9c7X8Yk!b2i?(c#ipRM2r_JVGGmHyo45PmUGEg-c(e+7#*U?dgTJF2)imX`=8H z7)5!>vHVsN9h9lGY%QJ(S9T>RdTME(!M!-wM7G+gQ!UKC%{OkglBS@=*~?owRt z))K>HW!C}U4^ED69y<`S_Xw66o+RKpIdj8@V_jG1QpF~wMA9!J0r{UMgA95rQemDEw|+`7(M*YsT#-64(nka6mB@-a~4NCu%7j2p$0;5DsouT4&q>f?b@ska`RF zJCV;5a^q7{!y|dAa_$WAl@{@+5*5oA;Rf7N1UH1>CY{)Go3Uq`i;a?bbt%~C5gm=0 z%_RbHoAwBHlG-yF*Ix7Z!7>fnvU+J9k2@7Q#-dXBB7u+}6`u-yLDm(ZwL)VGgvmc% zzzrY5b;5_SXdT$&#gBsJ1GjZsz*b8u&Id=)nf1g*ykTG)ev(RR$ps|}+GmUEa1Q1) zIm-i~CqyPWElGgihOAgjw&RfQH;^j|7o@bWL22TMY0!f6@}U|lYl z0<&Z*el#DtSjrUGC4~=6PY;R{rcCyhMdI-;9J76dyd=IaFF87qFLZ&i871+PqT+-I z^^;Ob0)+ZYDV)auOeLjE;v;uVK|SCrCZGq_UQiz~@loM%v?>I9Dq=Y}Cmat0GJC=k z<5MAB@W6mk!-ED7^KL4)JSW_V=Ji34b`mc!J~~{$!={2G!jn^^`jS(Gd{LSZ$^t%t z0X8`Wp}hze(@9R~42lr~g{o0vG{)N~F(uYX0K32_X%EI%1VDn1&;kTjMWqUoVaF)7 zI6ejMbdtbg?%JlT~R5?lMo6*JP{A_%|KE7c(5`DMWv*oTzrf}Xa}|d z9YQgQDJep93dJO)M8oO}TY$bHLCR#L&q$3=MuzwZ^6eO@6NTUrV=2ZU69!fee#NFH z^C!o_zBP$h?TY;oFT%|M_5MMJ^AUx@Q~7b>G4YAmMc8>J2)~{@CO#G#Q+P5DW}wO8 zJYg)@-;+)m4}P866va=T6faCk#;^6pFHn}3NJ-|W$BV*6yvRg8ZV%x?ek@cXkT(T(ypzM}MJ7G7{6zl80<}%OO|!?} z$YFg^yi}2#m_j+yq2<@ViskTUVW49YLEOu^()aeaArEtq5XRTZ%@qH{UocB2!hjSG z>$5~ItQ4fl=xHfFc>+I51WJBp1Xq4q74hN||2E=B{tE2*cUtf__4vaaMRK%Pq${6) zN&i3cXLL#uFFskmivCA#{1^Q9FXX7$AFUNAmV>z*7CQ{{{d3 z&n);!xqqtgr~ZOA2_A+`0M`qwjf&$5!$m?~yeL(+gl_V?qV&x-4?oK%d~ix?By943 zKHkaff#n0AmlT;6^S4(0U3(zqqZR%O+J3Gt<=?C?{(qqGZ~O7lk>1wZKgIdWZy|ruE=PqAgjFz}vZMKlu$TE4>H~I=uq}iQB+w~M z%`iM(gumf~3-YrHYL-u%`kU=g70Wg4=2}_(fALQjq4p4T`AlR5*pJX$z;T3*0GvkX zOu+4gt^~Y9=sLi+ghqM@jn*S+0rwKR60n}o4S**4G>zX`5DK9`&=@xvAQX|xL=wO^ zhBW;Sa9TTr>_kkI0jOj`(=5PmX0Q_n8Pp!3&lWWO1#p5jLfMm;C=u|64NczyH1162 zYXZoHa4e1~#T3ksXCeV$DUJhrGvG@I0-zfJ&AT8Z1{z}kgleFJ04G5}`1wP?0tg(S zHvpa>{3^iLg#Qk(y)BdrXmh}Ug!TeVAao+&S_nBnivhnusKhs4+94DL;T_P?fH@HC z5|~mv0l^pOD!^BEw4Me)3plYWtzQJVn()^Eo+JEfz>kFA2-u+;OVd{s3c)1_>JIQCk-v;@P_U=vG4dg>W0)uoFwFs> zX#CtT;5HdHRd(w0zpnor##;D{9X@O<|>i34c(wN8)aEcp3 zZqOz(04048+JM_O;OV{yJp`GvfFJwO{6;|D0E89-Khzze5H6G*_!#?pB2+G9O3~2^ z>InE8z>YpN-v+Q|5JCr1puB)tgK2sW;L_m;)dIZ?urQFOHvs-V3Zb*0M+~@nG)=Do zJTM0Q2{guA<7gV=Z(#^&Lchxa^bDtIFF@NU=o$ZPiK9$5ujOz`b7z<%p!x<8<}0O}ERVq|TkX^eX}L0y2&<$z(u2ql8w zA^@+KLcat0A>gCUP&esJR1f%l3qm$Pqpi^Iw$Zc+p#64)E<@Yk0DAAFX{$Wr7HAj1sSwzpCj+n?LO$e$@lV2k z4rp3Q^UVQgRFXLYP;eB+Mkou$nxi!TGT^ZjG#}&VDzG2uFM!TxXxarZ{|qgU@e83b z&N_$CGTfd4*IkDG4|a+HJ71;wc7W$@L7N1AHDJ~q&KdF%78kCqh9$YXbiDnx?-1zWjjDbI{oUc>gb&eh7H=8$wpl?`i>0GZ<3(EMPx% z2Fk?!6mY*j1Nj1d0Ps2lJjdMv{6y$4fTjjCZ4Q`5=ybq~guV=DZ%EV8rVLcqo`D*m zUtrv0&OmOHpysohz)cQppi9An*hGBWuSCi7QoF8G>!3~BLj({omT>0 zOo&)pzju!q}+fj&bJ;5r4|)0d{*1~AY8cd#Lu zDaHE~12saMc?dX`%RrSthXU$*GLS#iqaok|Pny3F(9?_NdjTHz0e?ciRe%A5X*vk- zg&zY2L0@YCj2KSaEC5s+!9WY~GroWu2`vHKF@l!a1$cEfXB16i>^q9q*&i^J&=_L~9S4|AXpDIfaG%NtEGK-7Ckej_u%7TS zz9IBGK+|Aa#vHIqFwM6E97g#5fI6c=FSKV}zzc+~2K*8N?EvZ;qsv&D#wdYM1vJJ! z8Oy3+S_eXd3i4Je~u( z;DE24M1X209CT0ifOr1`>g7hJd>vh=IoU&^qNo zxvGTAvC^U`L{JRodX!Y7TPQLHUeonCM#xHY!WG`^>~JE8G?@Iwi0wH4}aD_yP#zJeFsUq6a2K3*Z{bm&}D!(chY4M?1H(h9Qu0(j3Iw8kl8*4O2mB=@GS%` z^o4hTFZaV(40HqFtqKOJ1$m54l`sy1{VsryAy~n_pdN79VVcIc3j$tiVZ2OejMI=WIYQUhGQf?5mH>7*%0L;|=YXpTy#`S281xfdFMz*6sKfmc@Ef5uk3-)-PSXOw zAtxBf1ni6eTmqpI$|455Na)LeBTmx!`U6H*!FU3G5CCSKqUjvK>1UuVLD@3_lg`n! z08sZlj5$DKw7Wpda{zs-Y1$ufdJXsr>j6AmL+5)8F!2(kg)s`F&1L9^U?;}&5Z2)9 z27o138PYu$x(4>t!dw9SIe^~RK_~D-0p~+tL;GI?c#6=s0M%~LGzW0bP4FRpMGMAT zP*1py08MVg`UGezz|h-ty)*!ByhGPx8DPv^n#T977u==iP1*hHn8v7AN7J(V*`;$U zU<#q}{q2kEXg}lo+=~f~?{_~%Xnf!MJ3{08-#gtSeH?JYJ)#FtLiqT;_-BNV?~k{; zPvik7+$ZvY62iy#%|9c2eE+=N10oMN;Q^5cln_3?ul^a~Eb`~vz9$kbze33UK8#ko_9g&oh})bbC$nG5Qf2ps)(e9Z=0_U_krP)8X1LNH09-yMWsy0;M4Y#8yY_m07shF_>RlF;~J%3leMAC^2! zX#ASMU}so#zF|cv{3dg&X&I% zCH%&{U=)bL;pmM5fEtWOpy6;n7>+(by%7tZ!Y|e>hP?l|e;pvkL#p3OPD`rv40{?|ISG!@()Ymm zNbMg%B;$$wGM^>?<8lNcGlbueh%Jl;-$g;5KW`tnE`x!a3er5OwYX)bL26u%tq4D_ zr`<|t%UW!^4w3Y|bTFDoTSe=Zj*yMCmjp?@mkc@+o3&B|eYH&XYIC_CjE8XaFAOP+ zD2yu<6lNA?7v>b^7m5odg=K~1g_VU>h1G?%g_%X!ML9)fMdd}6MO8)BMYTnZMM%Py za3pRLuEba3FA0%ENa7>{Nv0%Qk|W8Nh$Rw9nWS7&DXEfFOKK%`l6pylq)~#3*~PlW zCdHg$w_DK0B6FRm=EDy}ZBEv_rBFK#Gq zEJh{l65SG$5~~us5>AO*34Q@Kes|o@QnyUQE%7b!F9|7$D2XdcFUc&)F3Bm$FAu-8-@}NxWu@h%>6>#l=WiBow%TI1#cd0Bi|-cyE%?32(idxs*iP%bP zC+3LV#9Xnj*k2qX7K^LJjbf_;-vU8_xS+ZqVtwWM`t?>&6ODyd8~iuaZSdWgxv_F% z{YKqQ5u3_3p(1~%4_pHw;O_>BNpb7i{kvawC)@;JZq4eHrlQtup6|SmS`1aDIIB|( zup*fZhLa9QOGPbWo-c#1#9}g(5hqkdy}Js7#mMT-WU$1;Ier{tIgSoTO$onib3HQ? zahzJDWm%borMP}zmaSa+@s=QBRqm$W_GT45DAZFsJlgLDca0-ooTW93lcoHflcn@T zti)t6nFc+f+24OXJ(sCHnEpU2<33hPjAdwmNi&=@IBF_NqgW~i%u#_(1{^(n!ZuJJ z%}b4oPmUF(Bs=MHv@uiFKsA6Los^Os?PSR@#~gJ7qhaw;!j#mM7?IV$6rmtRD7|aL z$&S+n$153hkdKe%TLs3)CWBTh|A8J>&KxHvj-{~{ptDmiXQ!S{F1^RXsgsj4$C;dR zME?={NW4-T@tiD1M|nqq|CO>BI#AaPb!HZWfi~^-@R@Qja=awRk01DP?1Z|CLj|?_ zzrD{N-8-=N@uRO#ZrtS=shNpa=XV2+^`2@nVAo0O0XZdu3Z|FAU-O^sU61bfivIMJCF6bn>0K^JXd}DLDMiN z&0aM;UZ?kSFGPA?4XYVbVQ}sI&AJCZH^$r@u&Cp4Ki4kGNfpmFMc9rSqF3fwd-bOC z@oTxv({r3pt;(>>@6%|l^T?7aKdtn@)9X@3O8ZfFb5|~VIMF{YNqe`Y?bfVqqaNj% z=FZz{s~howcl@t)qO?5Ivf*#0CvI34IGK0z^TdPC$J~3qPw&&fFOrP8e-0H4T({2r z#iKs28=_uK)t%mH*qjGDdKou-9Gf1Kxc1w~)5k9+yE5M98rXL#H5#^e{Glx`dnAs% zBH9-0a69)vuk@~yzZ`#f@CsvuctBTP`~$xoXNH*Cj@%XR_VD#h@vef5h6ceH>mia! zzSNj??Q+{sWKZ$0UbrKUuk^99!qDw;SZ3e*`_`TNTG?3p(Q%WR=9eAECk@D^PJ4b@ zyrkAV=2-Na1+zP6ExYFYdg=D0Rf(T_SxtI>Dy8fo_v`X$|cQZ!Vj;{C%xah9VRuaDU7Z29$`Nm6R3 zVK1h>w|&$z=Z>Y$>LHAAuD<*KNZgjg$x@vP(?BFW1*r41b84jXl~T)*e^&N?WN3Bf zaOk16>yP+=loSwx_KhF*;EDKF9%-Vu6k)t*O4B3&$KD(_XHKs^oIZFGaFw1qaq#JX zeUi#zww?}{csgLhbO3#>wBfT**W6)CZE{JL?l6~qZ}x;(|L)noTf)<^{w3uqZf2~( z`(_^1w7lIr;e>vzMuXdtl`3UbeXcMJojfni*P0Uj%Zz0aHi_HT4PNs!ZhXz%)qy+I zdmP<%y}0|4XZuopp;e`_x4QE+rFU% z?cVw9t8p}Oix*diE z+pV7}tkdtickuYy7cT}bnSFiQj%oASUw7ZWFsy$5h&g6&3LM8g%;{&pwP(ojz3$(f zFYQzsuzkn2T(=oDYcqSiA0E8Kx>x6;eUhVR2JTy& zUk+4Si#ms!*zGxMYvoDP--Qcz1+4nn#U9ukP9&l{; zXv#*Z;o{VBj#oN85QOQB^qIV~5#2kmnVA)Sv(jki!SL!S)p zT4Z3isc3p^=HmsC>ESyZ=Uf}TCbVL*o!#pfNk{D#_gKvBJ>u}axnA>+srenhym6qT zX!*zF#&oNY9tL3%%U8LN=sD*`S+?<=b;IASC_CUIPRy^VyPCaFLpB}0fa&llM^{CS z%z{P?7W6_SH~%S|j7*yn7&2L!mg=zoNrS!SK;*&Ez$tZDSj{|+PSe=0%uxbpHq8m` z`!bz!2L55FJpA6XqLWS+JI#0DjHRbJn29(e#KXix=M5&a5X@GsXCdhvClIDYr$x~- z5%gQ=wa{6cW}0~!vZn6vD_;ZHKO?I9?o9uqfbU)EA;>`LT8*jy$mB*R2 zj5BLVlam~jIJ0JRC|Pz)M!TMWH@8NoM5RK>;*)s7DN%w{hd5CZhufrp$#JoCwz81h ze1}Un(}~ip>J&Ijh3z0uy0K4^ZiYq3!OB9>>>K-L-p2g8pebe!muf|^9an3t(7P9v zyV7&T^qMJ}Ifwb-4n5o(j|wj)&Hi?Xs#ibVuVPSf;oJC|Q579~ZOjYf&&iqotB?Pv zTFu;y8ndD1Z~J@x8c<#KHQ|A~szcY+j|a5hczL(QvH|2vj$9qkcmD$cZgm zAMLn)(n;6~f+_NZyVKEw& zJBPi$w|J&`iqUlMqDhaq-S~w^!vZ4b9kqyZ&6{`k#@og>?Fv@e-aEH3uli+Jl*fZ{ zs%w66SDCDGQDu8t2g5@=-tLB5C)%?P-Ss%BZTIpH-|>0ghl0=*wdiVr&%rTo^ERpt z_0?UO*`XSBJ-&VQM#_7#Wv>%8>(~E2b!x}YzRNq5{xzsm=KFPx6$yKX=H34@E#2(- zv)(JGm<;`Xb!Vrzw8z^%fBE%Kjm&59{kDDKykPnLcK2>tQq+Wpxnk%JZmc+fJj4DQ*zRj-=u;iKh89qv^nyzMjXc< z30Y1ZSyoFkkF8(xIKxP^;pmoo&no(--(P+qsPc8E%Di`*224I3%JGy-uIc2$aTXh9 z>bJfoRA82A(pH~tF%z@07|2t-n_1=|`y(dnBHvsc<8jcDvqPz(wA;y*C;iu3Ub^1j zVOqeg3%*YUDe=0i+p*3U+^HO--+t`*Sr11!4djZJg!Nj*P5Mf8;?hAH1xpqI+7B@%;=b#s?JFxskXc zLv~r-Fj;*2%!Ad5GsitXbNNBg_HS!CABbGgpHq;f|C*Df|DwscN=gi;EdATSyWZ^D zg#Kw@onSJQT3?&UVvJJJkX5EGgC)*lc!6)Z5YZo6Gf_QfjJgs-!Nm!uuIc9ui>AM{ z8a?r_>aG1@3*O#d^QyDXIq%<=F1Xy?fz4EjxRg_*Gt9&Q_V%#UgQ|!9J;%wx#SPYX ztV|~3op{#e4YMwCW}W|!^qGHQQ`v=MLyKAj#qkpPsa8ROfmYsu!~6R5?(Jjm-Lsdg zeJ@WBZ>P?jPINak|Dh5;oM;~?;w1^J0{Oy8@lkwnmbMosi}jw9rF2{Fvds9kT~CJp zqFnGy+9}7kT%2e8N1Q-Pret8D&|)%daHJPyoM8NLaq0uLC0&m@b6lL9$m#!cb@aE_ zdsbcQG&^Ui=|^$Nr1SMtL%yz@Uvc!L&hyQC*PU|xI_1uV@F!#a`V*ABV;M_3B06g(2G3ms#(RzVR3|Uv+m_WW&a_%XQZB zXV$q)<-UrvbC|tp!~E5z=C@3>O=~wlyZ3q5 z73o;H6)C>BIz(i*AGq;2SKdELc;zT&0HQE$4+#cs76gmyF2`pcL(Cu~@!m#my} z?`z1GdwXA>?uJ$zI%zxVsIRE%pt+Jx;E8QRi=#)p+PZ_{-hDsTVVl;GO>5SczDWPP zF=lv7(Eyd-x2k*|G2vV|8>gg2gDJv?Kq#g(2De#z{z z|H;hQb#H5ae_-2bahKm+mMts#yeiam+`=;lYu+X&uC`C>F?`P#^W#r`p*D@>?Rs?m z=*EO!>~cF@ZZzs-okQ(?V>*3-+1bOJRBEmot_ahpsT~vltHaU3*OPXa49Xp{1RY%v z!^u*a2!r1k*~JL8S>5P=o%|nrF+w`{!Lc{KDBaVwS1&yH^`cK*@acbfX#TsAs@Ov7 zz+}AzeXX8^VULHLndTcVqALyuEbu)l|;tE?I@%TGh|q51MtZ z!Y0BwB*N!S$e?-7b1X%?*d^h!g0^2+>X&=?*ru?HJcDGr6K0?0W$wSr`{Hxh*1{%M zUub5%VB65>`TmjH!@f@LFrZ;{+UA1`Jcn^dUesBm;vPHmeN3g&3a=Tjs)v3}ezfZJ zcFCKm-PD~LZVXphvL~C}t3KVl?CHvL{hv5`?JIhBEBfFD#$hY9rRnFs9H>0y6*j)z z8iw+Sj>BXl)no9;{hw~d6%I}RmaX`e55Z>^*RKzajJr5sSjT6WJv(p=B8u(-f_w-{`KPB2@g-6 z|8ROLN*T)Wvgz3W!V9I1D;AIGdSK62);+=R&O>a!j@st1kU{BA*gc8t4Gr8R2h z)L&Bju^;E#KU-AS(?P%M`%1?kTkDTin}^@9E8kvxbH$D#&-JG+n0gGWoA;^PszSXO z&ZYe#=b?9_roCUez_I;F>uqCpZ@uN!KhJ;SwSt=CqaMWH8GZQT9Zu!fvyX@UrBx9& z;^|sTQ#Z#07ebUBUTKN_d);`YW@~1d)u_JTc~d%C#wav9sia3KuNWEfzYHl8eolOv2y>O=t^ z&DHUXfGb>vb7Sn z`ffYi$eHz7P65-=Y1T8&tS6jV4>_|AvaDu2n)7_w<-FhOW;~8o^4k#Ib6>)e=hovV zZyTTc>b3r*`!@Wkci56X zrSB6KDs^5wH#@?p*u@4V^nL6ZdQ6g#TU*i&y}P4jTl+-$H=FL!)5>4h1nygQ=iY&o z-t%000&WrVRP3|mo4+o_F<_%yi)f~X~3_m9&c^` zVPo#o&c++Eo!6#1@-}gwW$b>P@=<%o5hJ}f->Yn0V{j(X(vGvSZQC2$*w{8Uwryi$ z+Z)@qZQHiqZ|nPaZ*JAhpPpx)#+-BNOwDw6)4}_B6twp9$8V3Z7o$A`SB4~Cs#@*O zMm&b3%}2xA4@u!l8@xlFvTXMvqOtvl-7Xyu8;?8F-FSW*XjoNg58pAz8O0nBNryj@ z^4o)uAyRKLtZyT}54Twnz^w?!zQFRoK*3OffPj7grRzc|6~Qb=04W0jg{=btVFKX- zSv$HI*)jcO)H86hwKg}>b2Tt`qBpj(Qc;Ei0=<7jG+6vVG;nr<0Rjel0tN#5f2Ip> z7%%0)N5BP5E0mR>A(W@EK)i9cFx6kfuYwF`BR&ju~pp?y~j{pQb<2OpDJ0Nu7;iYSWRmD3D}e(m^53i^ZFhci2TVJ(Ux*PxmLIweUVdDbmyia?tS zVHV0)F(dPwGe0z_IeB43{fTDD0#lz*6%Fe_GSSQHN0);#!RbY&&AYtQR=Fmj2u@xl z<))+A>?BV!*%#m|>`~A3Vmu(BconjLxkymZ$dXVP7O9qM6{L$OBol;md=9=urUXT} z7-<-(fxp0PiBzbsYzN+U$OniGNuL)hK^^=qv<^YPDoov#8|tpn#1TD2GH@npONU?y zoW5ijX15eUeeI)nOlEbJ63zzjCA*?==X$ zBydON6rBlGqL~h?>&csg@1TkA-nIgR2gXc_km$IP`)jE_TfTO!M^bly2&J~m0WWc+ zWEP#wFyZl<ablw?V-E+550L2rQCKw)1G^we;Fw1>l(SqfIh=Ql>p zaG5Hs&TS9M5LT9H|}1K}oA#LBNV!PQU0Qp}$k@DU5E12Y!w zmcY~`V`dw7NQjeEMu_$Mfl8vBYKZmK7y&3x`HJ};c^s#m#YspC zYylh)wWsKUnGY^Cae$B(2{Ve@vJKJKn)H7i`+>=+68=@V zMKW>OT;j{+4?TfokJus`(+j)1z0sNZEo`aZKsg+FrKGXKM$vK#N}lO7psO}T>iRj2k6CNlMv{)cuQim11Y6%35wk>wGF=3vtm$L!1! z-^?~!s%+ncLo^?UgDv#smk!Ghb1PQ!kyBzckX~%Lu>vcVW z==mL6B+F~o73pwMt8z3C@vzmjUXfb!7&^mG&`Lk}iB}HH;wty1*@2N4iYhEd2%`t? zdS0>a%k6C8?x)MroKw%o^X0P%L``RZ)n2sHMYCMAm%+TSMXBH%P|ZW=PAEec7yVSq zc0mlX7ZrvdGZ4_(qk`$e(hOCTkeq?XBq!TLf&S-bo=*OM&!%gl($!8v77)%m~RL?QGfGcaCNmTn`$K1od99j-YY*J|tszqAG**c%3VFL*nZ$|L) zKRoSher{3r^auTx5}nHHaPssy>b+4#EOh0-tcR#^F9lpz33>*YIyr@}7zGoC6&T04 zFMgG)<+{fBxr0C!jm3nbzZtQ`7=hei+|HXn<|bJ(;p7+wLsOs!`g&zNY^9~e z0H+2M6ZEumzWGTf3n^;j&pDQ}n|$riayYF`R<&*ud}_uwJaw=ll=s}|=A7#PJeyD40BoUH@*TMIC{* z2F6}?KL{k&75cmlm|=|NsUUnO|7FJ`?DxBqWOL`irnM0xxrePLB3SATft^ETD+I^7 zlg93c--e-t-SJ6)-{}P&#lJsNm;-ohIDtbUC-nzL`gvB_{b$@}J+rm4>D=lq= zS@YWsCAT1EL+H=f7}v?eo4m}MCR?>qTpziQ#4_;|ZgETWZB`&K@Lv(CDi+xs_ZiG( z@+nFtH{8PLLFyIswQ5GFDr;wHM$P&uqv6rA5}>IRg{M$ctsqXAsWX1m&F#^>Amcobt36(U=9fd&8EXkaw z{ovr}jW7R^uW#dp$|)?0uoWcFdc?wtoQ6Ik%rJDrk%CQc+ zr>&m@!HJ&Qz~+M$O=p-C0M!IX3_1)F<#Ys{ooSd?l0C#VBOmiy@LISE|HeOVeGjU& z%$;qbL6Sa0sVyPmEKzkH{^c6G`?&dY4*ZxYBS1~D;4Jn2WXfELn(8nr9)?2|43}mn zJr9iB)v#iPi`;n+K&sVF+05x=8je8%If$6q|3I=MMYL4z)53Fpk}1;GaI6S}}H{|lGD=1}2_9dhzpULF>XJH^6g{JGuyqx-a&1$>E< zlM#N?F8Iw}%_7>K%qKR(4!oit>KFl5py1i|V+&4gT1cIlSB519w?EB{^~bC1%ZLQ_ z_2kPY)6d06kdTm=w}=PYmbXIoEZvAzXBPmY?n%Q#vC~1(JRJejvOxfi!_ITdMnI** zhYMhHFFSN@^TiCcUgof8b>oJxbFp(L#K<|ojos#fw+%39be2=iyZ7sEgeJ|pA+G2$jJv~3YPw2mR^vVQ~=iMd5{s8cKmePMn6;91E! zR?oun<1ImT*2{FyHfsJR}5#xOpqI+ z%IXETVn3@&?CA!cY+B{M`aqw{mO+s)$7O%tytZk5byggVmh{Iey!I5xwXb_zyfU7W zx(pap3p}V{0=KKj%0WIIO0DolIHRpRv($Fj=3+!Jl}7b^niv2PLz@r^K!4wBvk(PJFK!^UJG;sz?H8VmYe9Q4+nt1j=w zyUa9duL?gS%HpALuMuNHre|5V=*3Gu%k?eqf5mTvqOTAoH#1Da=&-?bA`-ih2mSJ4 z!A-$t6{NPs?;kVKPs;|n5ve6g{OSEBej$dC8Ooy^`!TWf#-1Cx9ix8Tf*Ycw%ZB(x ziSH(#Cv-b96k~;pgpE=#Nqdh$8&#X|s}V&a3r_LcIscQOfvMH}7#wBb4XSXswBSo# z9R6ovHQ9x(8!ko}yl4Q`?r=lM*zuZW3CjoYD=51d;93qCN+*v4tMq(2buwBe=!%o) z2JuRu!DCD-LmOb1bB6zWgl;~SsGB<{_N}>}J--z@HXSD!1PhH)Pd5CIq@HdtY>=dm^vf?>{hRI-HLAxbK6)ocE2Xt$9|a zpqgOl!I>@e!J45x+^S@~LEZV?Lm~q}4cV zP-ay*j2Jbak~ffZDGw9agZ$HTs?jCaag&M1VoovhF1bMGeY4_oviG@=X}z5SfLPzz z4y101AA;pe=TvTq;C~80Ynrs|Qr-ACm8! zD65h$n^U0{hFvtu9IH@_Rw?6~Iw{c{F7sgKND?ht!xL7SDqB;b?v#2=z%2KYeWXb` z8z1LbjKD01%Ctz5dNfO-<`1FvSK2I8bS_ukH!SsN$6O>$vdEEgHWlJu)XE$yRJ2wt z+ltIv8!Y2X^HYu%4=g6{&Qt3N-~EJYG4UmI8kB3#*LwPF$9C!lJB-@ow?}9R0ovXB z9YxX}d;>}qbWuOkdMWRw1C1cH;GLPhmIw@dsNccs1-i$=vBips@Ng6YDRB`3y&c9C z4`o@j9s)}-QZjJSPV3qukqP=ND>>Y677vWX31nY5_za#L0*IG({Z|W6l*38sSf;e!A-MW)~gymhcqLD z6)OTo-DwAc(UV53FR0R-c_09URi6V9h5EMarO8FzCy$ImR#i5WZSdxUg>hdnlMsSo zEE^e@YK;BIt}oecG}h+$8?@|hw(wU5W%pLQFhnXOC&}FmzdfjHvLDJlY)_x{Fu&+) zEM|<)#UDB5{Nwl)p@azb!re*gec|1l`FT)Ym=ja1_$>r7UiUx18O}GiBLuC&K16M% zZU@yoH33U{A{&o~E_*g(jqSp%5tg>t6Pb*s0jnN%2d)jhUQKL>GKB6A33` zF+&mp>KLyesSqjcj$5q-UUGA*wFFL+Nv2I9vs$TN$@XMXY#k=lURDUc&p^+R&l+L z$Zxb_d5X|UTm*^WoW%k2CIRb%X`#~rqF%{5Y@ZLGn9uiF23PDvXFixTDRt_rIVMv- zcB+_Rk}*;?pkrRxyz?LPR+MMZT(ZMNcvgF>b_U&7)f6cjLe(hp13O}6)w9B6fAE3B z&$qKOpa%WuciQ8HRl5^)gs8{@*Fm!M=7Mt%+>52(u@ARud*+>}S&D4rMO#>xrc>Ye zsb{QGpJySwOla(b${9|~0B+nr+XF7o8r?i`Oj{T(o~|1R6S7>v6v*N2s9wb7!sKvP z?5SDIfZ#}fM47@?!9IbFl5T=CVC0%OTum7(?hpAeqIR_Vx+%XY<&GBCQ|*~UG-gXS z$L4qKYtnS!`V1YH_U~N+N54KukZvNjDNb0cC0(-hX9!CWVxs;6qE&Z$P;Dcu;rnJg z_4+h}P*ct)%(Y%X7M!@wk+|k5CeV}JI~T!Het_Kzqb&6uuR$#na%S)pVawt5NyT0= zfO4vjRhwzuPOKqqb)P@m!up<~Y*JH&S2A?+<;dCha7E}pd2}?4bSp^Q&eSpsq zK94s=1Ur-5h61jDi2~2#C}g`QY_ES>A3qzWp8+F8@2s7v|C#_`+|N+d z-xH$eIn8U=CpI>pb#e)AIG`D^2c+F2i_8VPBH20fw|JOKgT3E4(VgR7f577~7*q6_ zXj5G*w7y4`(E3C~aF^T|jQQLfGh)f9ZdW9-JGed3bRgQSC15DwC5D3M)g7BQz>_jrgjE_Y73KimNhJ6xWoPao3$qOd z{Wh%iHl?D~4=w%e1#e3^Da&O{hb{I4As1S@FJ|iLzR`!RYHQS90O$1PW)Y`FFr!3f zWCy~zRUD{uU}uL!N!re@XqR2ut2LCZpCQBU9`tCh;^e5ux_M*mO6+osE;oB+HUdT@ zDnto6WQH+wtvWBuq9xffELN)9J;Soa0C&+2e*Q+EP&jHaZipg58gw5;i(?Ws+H?Yr zrIrN9po<~FWdXPNh(8+Ch|p7xT8a*^O5)o;(hkLiC4yD#xn(+!^&&`=bT**=kjv;6 z@Y+k~amIfXqSp&mN-@nK@+st76uoL18Z|7!xy;7x>CxPpm%(LI3&|eRLlJp@;I;Dt z>}F@~Hnn~FARjUGJGHdmy6D5Owj&oYCsHqL9O@+JGVQ_j-sW>IZFT5ZprqlarAKX- zIL1favTO$Z(3XsW%)0x$ixn3mp5+a<{?&+&oNVi0#}@pXA|(04pV-r{=9WqE7*WVC z5Yjlf=*oOfKx|1Hw3x51-K}2S@UM_B8)`+O9G#xyKh$eA-9?)h~mcAHcy~3`_wfmiqiV~2qGh<$%X!wZyo{t zTE{zz;Ic2I%%}lTH_Gs8W;krM9=<#z-ZY+ivWuzwC(g*T6bpdX+~M((; zX$W~I-&NXaDxcVzXFxRFk9b#^-(U8yWm^Z(NS}&X>%$lkl0yE5nC`7)_+d{O83l;K zuf@OizjOBip9ChTox+nD*~<1sil0Z9h*9sB?1$6EBtvK(wf@ks`%KA)^(P#P#BUp= zpuvA_^akm4--ZN=qB3Cp6`@`do3A$SDT6pCOA*#DerDK?EnHnAfQz>o6+pC@AopBw zz;Zs7i|QYIK4ta6d%!+6J?~BbqY}Jr9Qf9m0&E@H|B$PI@TVC9252aBMJyr2=BnRP zmcMpe!NQ-EY*&_rCy52>7NZ=7?q~{lt}hMHcRMcO?1v}FJrW$1UMT3^PBYG%K^aHN z(TrQN7KfX)dQKv)`8_`ec?T_s1+((0pZImxI{6nai3V;{}F8nPr{l9hU1GQE* z4`)gAbkJ^Ffnx!q)Z^nJ1ZQniCJl_%P{|>(Z%9ZEh`PU9B5y;+Ko6RLSbN%2(QstN z5#b)DSPN*w4Uqr`*-^LMI309(H>*ZF7)CK4T>_4KRTSi=6-3svdu0Y7iZ^6=2@AuA zpArj}vY0bl*~V*Sf^7N|AFensQO^}<8gx{dBT+LJu2Y~IR4F%lqUcC-K8lI50Qoia zRd>D@enXTIF9enWp?arkv6o zjv*!MX4bt;5Baq0-9f`E@?ly)-eki38H#@6_{k^OTlFntz0zma=>a~^FR<>YKyA6r zg{3L0hFuAcfY~vF8F%&mS^W;T?#CNoXBg2&Bgn)!IB}4nkpAAn8{n47O*_gD3uMwwwqopkeMB zx#@hx59-KFFC0ck(XHrI^6shbnrDDy|3da=LeoHq($gWX3`ty(*)J@_I$ob8wYk7s8xnsjuwof)_S!aD62n|0Pz@ z5z6~6`VOdOaTcjAiaYhv_@DmdvNzlue#u139}-{dgIuw8;<4n2%bydexQ@-$;wXL# zBRYwy%R-(v)F&p~=E@1roKda_#J77WE^n{{a0?}Mu+!gDF4aL6USRcFlQqMA#6ade z$?a$Q;`U=Gy)c26F93t0;u-fqm`n$54{kKl>sa6~3!I=dW*!*T*z`xOE}3e0l*1LX z=(Ml6%*++A@9JM_5Q=nDtaWdw?+o4Lx+`UGB)z5rnMgnw z_fCV!E#eAiB;EiBsH7=0&d4Baa!PkBq(AoVaK$9uhoSvMAds)bb>{5 z--$Giso%P|mtqQKYA{i1={cZ=d|hoOFteI(>iFMVhvbICJLA zry*etuFMNT@o`J!?$OiscjQDg&FO^Matu^kMP(2H6YIpILL!p&sEB47&6!1 zGWTsg#mPs01Mo}v8{#h(v6Fxl_08V^Y>t@QcmK{RC9C%Zdie7JGcgxfnG1|Yy-j>= z$_#vncSgv%!Z)lS1}MwOnTa8=sYl&^xQJeKRds*cR6+-L`> z>#kz%RPCHhn8v9Y@YUzK%j2jC)uwRu?rk9>d@kS~NmI+FQ|83mKTA( zD-FDx;oA#3&nADNmghQ;=U485@q0%?x6)fMOkl5LcNjz38~B#9Wq*A3r3}qlhjxXj zh_eg$q7`SX#X4b`_hjX)V%Pe__YDoQ($=A5q_>&Vk@Yd4`JLjmD6A}?4naSGHRs0i zbq;z*Vyx2tyI=KHe~MN6ZZ zz}m&q6X42a$Har$wJs9P4l-SmnhO5aw z3B(|~{xEuwBiA>(f)5+^sEd=nhF-fRkn{6keL%ho{OWv1$_A}u&mEuJmb#|?-16q% zUOf#2b9B>%iG?6RbixYdS?mOoLkAWk?Ng?=ZW97&y0$%yKYWkMPH!H^wcc%4^`!Wq zyDPv|W1m5?xK>W-ZrE;-K9yXCVMV`?`+-Y&ptk!wxo zCP%lkhYZm3CYOA8jkg zMNB?cZ$VRqW`2n1_JnZ0u$^{wIv5gLW(Dw%vMcU9Z`Xo&X9;2(;sX8a{W(73*IeXL zCM>DE9pN>fKXm!ogw6?~miD_t;N0*jma38D0nn&}nb*h=-n-f7+r}P$#yr4`d0(-? z4wQRtiun>_Xz;Oo`CL9|Hs{haB3?I$Be#*#_cGi|=w) zTqVCwkaQs~0_cwB4>M*phj%m~tFZ4+Q;eL_AeT*gse9Q%)H(N?^Zd2~x7YSxpoJP! z4Nt@=&5tTYXfSU(y{T&hZ&T=;cnop(%%!%z&}?pxmPhXmF3=N0v;b2aoHo|6mwKG; zEBF`N8Q^ch;3z9whg7vj7jH|HVfzvsx- z8+gsRspW~(^K{#q8224GTpPJRcKc$R?%r|7Eor{}V9wSXzX#{3-hD;S?(Vq<<+jXQ8HqYLoN?w$k`d=3z9 z5J2kq4p-AFo;$HzrPJ=eOO356V%s0fY|+l~rF=i!MAwc&Z#?FC`!*oq0AvzLGjIEA6uBf~$t znF$CryPb&+ZA>v-CGF^dmBbwZ?~d4v(41jML_O-wGeO72UEU|d$F0vQziKL zc)8TrUyIKuyms9qxYCC+XB!O}YZ)?aR25ZEjK#i7`z^mWp6j_feHDSq=;XiP?nz&5 z<#nv=i7K9uI}2~bdK%H)%<>vq32Nv!LkkQ6=XKz^IRxd(+GL28W#!sUnSJlCi%ag7 zhKEDvKJNFH0gJkN8h*_AkNUX*W98*ijCM0HjZ7gs%ZWH~CY;J%5J>>gOq6ZWu zV(CMEdg|kC+#nSI&sD}9!B+etyI8~q(S~A&J?ZklzIe%bscnSkt>!v*_;mY1nl32? z6HGDru>z=@hO)U1+IKt4~ zO9>XYvx4q<#oJ|j8aAZ8-!ch2P;ivC-S1be|8^*E)m}Dx$-Phd)iUH89cB`29E@o$ z`08|=Y%rJdEG|zEj2Y|(S4Xr7K?Mi#(iqp( z=MO&{fQ4*RJ{l)0@LN9!!c|XXn_wsGId3(; zEj46jq`jnb!Zadd4VVy{pa=NEW$j$MK5!~30%IhoRvhFZ6L$Y8G(TTSzo%*v zW8{Nv-RYw-4FM(CPi9x$t&V5DE#Lbuly?}-3HGnkdOE%8-mmHw5BoHuv%Pqg#xAr- z7L0cc23k?dlyZYrf*{n+vBg@pS^$&L-}o)IY3?XWFQo z(S9kH0HxjJ@pv;RdWe!e8hkuI{#~fEXnRl>Xrhty)oAM%Jp3fqY)Xgt%{qI|dO;`g z;VrF1efueXswUr^IsRc2(XY^jN?dqo7p!Hb)=)KJ05_ht~3Ir+0;-6 z5)e4$UUStqP}TJd`gj?I>yhhUs%tNoK+|K&rh*n)saXH6CJv*>_;ttqR0J8Ow5C)m zZI^(1+q&psJshQ$$YK3(G8$7`6tGFu6tIbRjf9-Wq;Z4i>igshs}uF9uh}vxLqV^r zYh&E+9N7Yo3Vptbapqd-QtX-M`C8a(42JN?WF1LwW4f8%6@~4hV8>U@zUp3Sj%LpH z>Qy`^oQiNeUq+Mg9G&~;(6Z~*K3B6OMV-&CJEvh~QmWuOS>cALvCr$OGOmIcOtZaA zinxUck$BBS0mYL3SM6V$I*dM%xwUf{yScuBg@dP(8$UZi$N$!Nk^O5jI44FCKMM?l zrDH%|P*M^%i%|5GwQNl#(iZyX>R93#U5C#pTr8f0H-=O;^6&gC8xh)|aE3jpb9#~N z@cK*^5CxNBjJ3;piJgk3whLJ*07xE0vITE{gD4zBZu0n8N0!M9yYdy99T&88i{!Ct|g$I`W^Y(~q<@xZ7@ z=)=;JeC^k7Tbq~YEdTQ?$cHMesyjrx0FbGS^YDB!>J`W*U^91qd_Jn&scRNV!nR`z z`qgA!=G=w^f~D{?Dk?QwLQI-`(;CRL@R zbARc_DWf2L4s7UYTvVR~s{Tba?9eJ{{|S*A4#pqzA#mfSTUlnjIkA0$k;|cjirNwqo|k@n0D5UyqdcH zY?fsmH!@2DF+KO>0W&r5X)1Qf`<4R_@~|3Gt|!W?as6P0GG8J zdy`=IwHMp~ZCp0WY6}5tVB9UsTlH%T4X*5PiQlJ6sCvgV6>uAYAlHa_ZQTq^J;KR8c>e_zkkmF64~t#L91@wC^k zc%Pq{OvJck*Xe(*gHpka*~h{>&)Lzyxz5>3eXr8dcfO{UZ`wQzjW>*0FJZhH#>pE(e0L#AW1Hya}?1Va} z7N@v#<;`4(L#QI`0W_x+ryJ-5s|X_EbQ)(WsQF_Qp$uzIRw9%LFA&w0yCOX9NT3Z* z@yDF0gmoNWh!ez|o%%LK<-dMMsi1#xc4Js$284P32q2F##*4&DLi@Xi!BQOi8z}CE z=#z31SY?Tc{h39i(GeJmw=dM+3fK^WmC_MqM{jZbh2FLyszhC0E+HAtFR!4e+QqDX zdY}A~lJeStMz$#UgX~xG(wh{u<%fS=xTA5Ma(@-fK7`Rk)!Ztyt-6TvG3;y3a__CPy0P@JbQ3cFF$-I3J4&{| zLQUifO@%!K`m&Jn9tVP(>c5qh`RqiQ`I$C6Nr~^uKQ^bhF_=&xrc_RKF6j{VAkY)Q z?g*VKY{S;jw&KZVpsWCB+=277kM5mJ@qVrXSu2F_*JipB3rcn>TaoP6Q|)%WF>yu+eKVP%67 zg(^_HmD*zq9!_TC8L7VA#Lxzyn#a#t9>+sY)mgq?X`aFk8z39Rrb{!Q`d4RhqJdPN~|LIUVJ7rp$I} z7cfgZ+I+1RX9r~oX}b^#w4sMYO&)f_5r~R@+Vn)RpIAM26AW`v0BD;*8#X#yYNJb=}$s#<%l6c`MKDzJtHu!hivu zF6T7wGpnj9FV|KdK0z4=H|dfxJrtL|FYnlDtGDojGG}i24DMQU2t%>(t|xr)!>11_ z1#Y(q+V${4)$Ah1AmGr2Dln|Xmx8vS2~hDW^}A@Vh1C!tfbo@aFvnCv)q%k>OBN7?m8Z7GxWjbQVyW_36YlY9x&}qOWfdQT8RQVBLF?Pg0U#w%mK3=S&&ptw|qbBCO zTejc}m%?J;7cPbQ01a-*?*W+e)~|Ecv+4hY%1=L5FPHKUhhyI&4@+X-LJw16-wJ^) zcME>IKUy0g7X9X!S&GD9pIL}hZI_vk#IP~^*e&-B`t105{f#wX;4$Urf9prwdkg(0 ztnokpANrY>^_KqI(!SW%FaB>*A(Z8A$!|s>*nJYrd?N_d20(s`%FM*0?Kjpwxt1^f z*>42G+JNsK+$dE6h)xn$X!Z*XF z|Apq;L+E2@`Re{21%}qI?(Yl*+5qwIPS8{Vs=qz58_R#v?^w)b0Mj>f8|*%{z7udP z1MI&+t@hq*=e~J{q79(-mUR1U~&$hIo};auL7Wd*LC=2yZB8PTNQxq8|frCtW8{3q?5?cVKY@LBt7 zAP$uKx$66F(X}}78AHV>$K;_4JjWz+QnU4Lk4v%ku_oh*3GZ=XWEc1`e_p}Ijm{?- z>y`@HMKC|LG3DEdmOIBZ0pIq0En>Wx!8QMoo}|X`^zG&{!OcDW2my}lhzp0Uf5!>W zO||0F*D(8((cugHNH_^5Zx_rQq`5tnCq*Gu((Fz<(Q$E|gO~V$XPlIw$(w zw!j#*$#o1*$aNg8$#vWW=IGR-t0aV!#us6icwJ(r+?|euUUJ>DWV&^f8N|U~!rk|{ z+sRIZySwPP94z4xC=LMnU5EVz?)~e2iwvRfjBHqKRA-K2OWdfRJ7l3*iXR;zzatV+ z@L><+-Rr45#Nl8Z0~P|$ZU@MncM75*WO@oAL+Z#uudB`%?%XN!Q`b1j2ky3K>zks` z7K71sx7UxMy7pjU?1*Mc~L zKvI9$9ATR=kgOdq*n|6eOc|yF{-fhm{;6qp;V*Lj;SWrH0PbCf)lzpVWSx6^+^-MZ zd)NB3&8^Um0}B^j=vQ-zjByP<^3Wus?CELIk>&e)h!Nkk{zHE+_CogY)#*d^DucsR zJ!a51;4v1$ZTL}*-n-apvV&ddr*SR}?cs}SzYhor=tK-*pCQQ+aXTAFBtS$3O zkJ;I0^z{|XW5~pP>~qX~9$??~#2VMJZgY-|{R{}N%})^K*u);DoQYr-_SQceK+859 zQvr@29abOjt|QkV@gQo0f~?&!_r1?j=a9Eu6MkXt56w|Gu2spv%~Fb@9kbgE0kvZ0 zxW-(CR_W6sykVhHK@J~ug%*n9V}3qx5=szi7l)(Z4=Lq0?C~FQ$wD|TQkS3>JVRg9 zbDIZ>J4H=dkfU<7Ndh?U>yl>o8oA0`exAM=N`m`T#@Hu(!Nvhc6Ua)0tH3ygEkL|_!c`$3@H^em zZWTcE&UP_5t)y^kc1loxeBl~&(TwOmDwzfrXP0n~6WN~FhnD)1V!qNBn8)C(wIG>u z7nA=SDmRx1Wczj#6bnR0-Zc-~-OkfoASHglbEwX2b7_h=xBXF4b;wfYUixTuq9HEu zb0tb@1F_PEf84&-I)Q}AT|ihcQ3icDBg|E3&I0p&COV|e4)lEA@P@Nw!K;KR70=?8 z(ud0PPyZ@OHy4yuBE>F-QN z*ZO7f%l?<+_)zjx^1pv=_M3)b2pD`iUT>pgUHP^iTTBw7o8~*r_lu`wbTVk=R7FdRwlvtbXPVHq)aO7Q;gNr}> z%0y-=9v~>L8I|#18E$k&STXCf{(&d=_4GQ50pR?+f9bz_G`jM7!QX6dKIaR2zf09| za}207@xJ34_D~wA-MUFZ57aGLjvWb_Ih@~r<|&l3&*{PlUh?$)E9^9=9Y8DkI|`odPdnnhUe9QpCwnXB&5 zwMxTMSJz>^$u!n=t4y@nwfk*sUx&fxc{&=VI^A{g{BoE(+so%Dz7VZ9N4DkI>^a4? znaAlqES7$PM*W??t$nPtbg5|)jE;zGQhu(+>~~@us#u$Xlu#lvCYPCjh{$P`Ah-xm zA!d-@4{FSy9|019#1b&XJI#}$@$&o z5jXbCcP;$$?JM6mcp?6Ha8!>Nf+^e-9qB1}QQ{0#xSb<48}`9f2FOQ*2#!@0Er7(> z@BV~SSQ2yfZ%VnGs53v+(?U?Eqrn9#@_qy)A8Fo znV7>#N%!8nQjASKj*piarH2+WQqFlM?M~mU$OXpfy6c}6@FF#?m7!ok{uf_*)B zP#`Tmod-LiETGcuOVYqbq(q?C;o*`7jDCUg{@7DsW-D@{SVLmU#7gjZ{zwCX=20{8 zqDdhO5SfS;8j}K>A2eg#BAcT2+AZ|k&-t}Ly6*c`(8@)>Kf37srq&lK?Ezl~pYdbU zg{elWJPPQ|REdkbd49%l1u#{#1eUZ9Xe`fXn|YJsc5IUNy!xrat}*`Z0y}Rc(;fiQ zU=onU^+H$ysI!29u!Bbd_Y;@^VOQR!fclN!y+#r-_VD!$oD#)V+fj}v`M8*3!b20q z2Z%j;&Q2v6jRoh-Fei7Osi#cIF?6y!g=Y$WJX~(P#az-*(o7n-@^XK0MvOtATORP8 z+IV_yEjlvDmUq=%HCYv2d-8EGS8?idR=9GJZ2-xvCZ!mP9#DxfMU3wVaxh6Yqy>nbNy0uN&~9g zq#BJ}vj7N}`SzF5`KgqJdPLCgAYHEUI(wyy0S zq+fZbj7%jvGj!ACNzb!)bB6j2o6f@&21&1_Z!D$bu2nom{1pZx4EA3wt(JxIVZ#2a z&_{9aH%?6H#rDoZ&xcK^MtI{?~eXhNd-v!76tBUo}qp?_au>%t3m#~9TY3LVw8y}?!`nO zRDB9|6#(Rx1b!gz*TD0yAsBF47BMm*D!@LjPtX?uQXT|XK#=2K0aPc0{)}Xuj})K( zBkUZaGXd5`8z&vxwv$dewr$(CZQHhOTOHfB?fh|GpYz`2-N~KQs#W6}Rqb!@I2?9O zor;Z;P4um|O6$Se+ZtHgEwTio5UbVDwyQ(jL>HT)%QEeFuXqW?d-Yz7L4W1z4t}&V zcV61z*x4FxIj+;5C>el#s{>kyxzyVwazZ2F!$a?%Nmnk_1Hg>$t-L7YIccfXIIOO6 z)o#O$A0_-For}hey`Cz&LVt6i1{d+E`{IxSXFk*|Kgmi_VmI%1YH}q&Sij(3&SKez zyXWT%b@+|pkXW@@yVEpKEwEec4E4r*^J#Xvf=sj*z7{&x)T>4r%k1kW&=>VmF(CL@ zc#x=pNg`^6OLDV3*D~GEM8zXfTiwkz_^dKlgShRFUsQLPQM-c;{4@h?w*nqbOZ}OY zRVz%6Xp95Z>19^?@6Xn$z_vVSsEK>CvCFY;s0wLnqf4F+x;&Ks62Ff{ExY6UAb?y zp5Y_HPEl@i;ebE8_C=;O`RM34(G@F>xXa0Swd(tM$_kOdmU;{XBIXE zhMoh32XjhDN(~gC)>Wj1?^8~)f8JrB>1R6Hphlubs>UwZ;&PSx?&3eyFDC_Aeo0?s zj(P$O3MTJt(6dNM=rLL56QA^Na`&V(M|#S1Yl#BJs?ymf2<+J*2lu`FQ-eRbpqT{s z@-dLF2m1KHNK&^UVC7$VWub|Ui1iASge`@|fFuQ_u-OZ;%>7N-UQj?8?@lSwAJLWk ziBXnrej{GS%%JiKy58%?gH-np;D!tP^Xg*6#jbqE3KD95;fJ^~-c+}h71a%~k%D}o zKWWZsxFw9C$2W@upMzM{T=1F_~YdVt|(!RsplSs&bx zM+^a3ljeUKu~P<1Jk-@M&o2%@YOha3xCtw^ z;dT5*qc;$eFf!z|Y16e0^USYoHY+)6>8n~F3s z5UBQ;FKIL!bJhw|( z>*P|U=N3rSh<6CP?xU%}M0!{ks9e3>cWZ4tpxU4!Z1_@TReSP`VH%Yi5QRM;>%-M) zBdPn*RQj{$D42K*3=cKE!k3DKq!&XG)0PJ-m9Dy!T34><;Kd-A0gPdnDPOXbkz6}B z3~ij5Izxs8$ay^7^*wwdDhQ?YpEL|SPsOalNDfCOoMvbeUXz>?l;0hHZ?(16X}B+r zK=ko>A0f5ujkd`mH=XA}%N9SQ^ia=P*K5l%kcttMOh@lskRn^wOwlW^+?^)dUiLbY z@ll4G9hy_Conz4LgfDkLi>`8=JvAWo0#`RNH)aG(oz>Mt_~u8abNG~d zqofBBba8WEUEai(uccU(mhEC1%qSt&6Aa`1@eBT!<(zyjTdmH5F>IXfnR<$~{8Pa@ z{=y}TCcKLPFqKNf=}%j(bQ2GZRWAmdZBP*@SvYnKTGxsNrN#A6Ri%`&sr-WOvHeMIqUZJUB1AN+C~c&+K( z^*POFT{w9Zc$sG!KvV*RnoAUT1L*VxwgP+1dwa5r$BYd^{8bPrrjnj$fl)d{g72*6 znOexhrwTnCSz@6rL!Vh_MHmHkBvmmE2Q7&g)UhqwTP!yzm4ft0V@hvx1M?`|fB4#3G$EDg7nK? zgX7;SFN{@b7kBIv%0Fe8p>z!yXBln1c7)1btu#z zIi`GO-8By|D-|*31|xc6-%K0xgAm9T!6?`A^udxeTuqXfF6x&?aP(qa293i%0ss-x zGaw{-yMv)N29EZ2hhBr!&;F3`KK2o&KmzyLsc=YZUu|c5Ba)AdK*1UDfXirT$oz(r zu)z9lu}p$3?&}Z8!fE{3Alo0}GaSM60yIGxQP}k@!ppkjws86f)q$|D&Eac|kgJ^~ zTzeb@X5f`Izu7qAKM<@3-_iulJALj)_l3dfizN#L-U~rfA>^*e^2UM`$B*3(|G1ew zrFqJ!BG?%VB2Hp|hweVQ37~6bU|?(E&rj`iCjo>_Y3{CJHqT?n-c8pU2C))W0_q2u zi%v0k@Dm50&s+799vRl+>${s~4=klL@+>$69!!H9PUSC}%W-l(rn^;QKLCV6#u*yr zY6gImg(+GTf06@)2o zNr49mBCGVqP4#^S1*YeH9L${X8YDRS#!tshI_?Gmrr{P{Hq#dg5FmYd&?9RRrP>{7f*X|cBXv%eJAeQjO8V~)>0N@W` z3|}xZ3pPedJ2fal?rzD#_J6?UVHZImI#c1Feh7bAPbgl#q0?k&ryl*r!g$rpFrz7hU<4 zdgy=$w>^n(yKdN9=-I}$u2nWmgQ2m$o9Vr+c;>0g7y&ruBw=t1e8?e7ol{>an-(G6 zuESiYrt12jlhZ@BoAf zH>#;9s+o!SS=@(K0yXT%{^v)fn0i0I60l29<)6M?ET^{(#g}ss`Mcnnu9$Ez=Yt93 zAP~L9#wMfLZ_iC^T;u2!q^JQ1cibg7h9%{YgX8NM(W}1^S-wo8IVyrlVRN<2OD(oq z(}nWwgodF1lklbzLgjR-68%MkBoA+5P}XT&kOmuVIHG|s#XO3o!aKp6w&Kko!6l8S zCrlsb^7Jgc)8Ip9+7uYRSIK#&daL2sd=v(Pf~KbJvjY`l%EylNZ{9Rv6;B|;2{CvjxE(RhhI82vi;ytk{n=3q~o`w5)UP^)-T zFb~K6rs?-mii;IiX%0j~AN*~C1WV|xga$u};UC67Us~nvw-imWuWo9x+0N&R(h}@D z0Pd)_{U4UIYsKT+00~i8vbS4kPrn5=@qv3T)a-ic_$mD)AnQ*#T3i^)8-x8ZKD=q5 zny;9^DlAMXc217Xk4}Sc2uQZqh#;IF9e7zzG$V1A71qRp=;uJH6J5H!m+Ei%CucN| zAMkti@t4pJE*SZ*{N=atjQ!>F5dToI)yLOSzSY@|%=DxP%!8%#-Yw=>uTDcTj7xyB z$g%iK77Aox?$>QyWp_W90@2&$6NgAx2XoI)x5?9 zRq+ILpBkdL7T8*Bk2Z4REHQY6A-;3=uGR(26a8^H6jFM-8@J(02npJ0@KVi>gD*Ud zuO&?lq?(-BNUl>M5)4rUj|2oQBPRE;D@bZsb{-sbv53&JgyPjiw3X~;kewPmKF!XT z6|SkB<;6a*O$BLRrC!_Cnj!+X$q%g2iESsunQ#Vcj9{6gs#pZ>Ql@EG&ReH&Yic3u zlZEhN_MAD(q~Hi3z{X*gCupD>jTFRUWu!_Sx5t2^up8-nll{u_*9^ZL9z+PfE0peZ zbc(*$UmaTwr5DjJ`_2G0Jps3>^VjpXlW85hfYO>WrB1za?3(A@Q^&?JiKV`JLiD_{ zK^$>}sU(nlMKiyx{vaaUdKAx+{W_Y93xmX=yMr^2{-N8W8t% zi(V%IiR9ws?B?X<8_rD1MqK-pf#3>uQ68&VZ`DLdYufo()sl&d@M`mCO<_UGbLcAfLtEqYXBhrssRUY!F`2mGA@TC)J+0@lab3$Gt9 zUr$auPZJv~#WfhM<&a$IB%g8Id`;UVon}~gjs{+f{sB>YJ&XaxAUHH!_i!F?d2h&Y zsj6iVf1)xeml&ZFScOgspeRmhd-%o zzwaoKvDG%V~pvWzA73mt|DWVJ4hJWdt@UO!AsT1cNd1BO zkvffwEaPerG+uc`YS{K6`Eyn;MC`FUzK#BD{y_f!M$uQ_)SYt5e@0Qc|6vq0vNJX@ za&^|TFn0Q1jiQ_|_DZWMfb&xVQ3zVbz6T(zsac=aY<)pWrqppKE2B|9_0Ac${TEY37{FavU&W_eDx{w)BSlofm;Pr-0tM_N&F0)uf`Uq9?1Zm01@ z=J6znI@;?)NnH)iu2x*bn!*IJab#hZ^6wt*Lp zy_xmlT^c`UEpCkaxMV!gS9R5$R-@#Xu~vA00%Kmq6{-D(Mlnk7^ar2#b*+~=R9?*$ zq5W7JSl_%`YB=xuW{$? z@rcNQ#)AbiL8c-o@G16Y-vis_akceK%{|bGKqzhXz}Z<~Cm>jXng?2faW&x)tEU7> zQjry(v5c5tpWn7fLFC5f%iD?&F)awLv3DDB&R{|VZ42MP@PgsncEaT>@4GVM=G2sTFD_qO zx0aCn$|Rz-XCl4-h9nuKR+$QNewY{$h@F=;6`s!I48705JcN#s$<7SUYVV~cQMfPZ z4UR=)F8hJ%e_QaCpwXNn>4Jq6H#hBm?g7!6`p2)Sn8~1a8CzwpwX9BHEb)VOmv`$G zcAIfk*)k(L=I?szBZ4je1iGBx%@Kty+xkUDS-&qM6J~Ph`QKt4R>||DDDU_qJ7&Be z3sWu!hHw0n3~V}TVFxa;04eJ+xzcq@`iRP{dz8zWKCkwRt4NEMc%{|XH=K^Z_f?+= z7Ox}x9n4y0?H4u?k3K?Kr=||N;I%BxRw(R#{R53k^Fu`aj!gEP$FCu9wVXZ=RaqV< z>fGw9P|*ZkNUZfJVp699rgsPKCMHLo|79frKzd?+H{oIzswe2J#MFC5#_ncF`L!t^ zY%gjQq%(3TUy!?Ck-JZlr3(^B->v(F<@$wRF0Yf%p7>^zk8(EBqhEC$_;okCB->vL zOXtn=fL_)My(Q^foQ8EzxrEE%D61?RrNi}_Nbe=8#t|KtuHKXbAK#;nrNSFXbkU8- zYFBR@aN+Z-^*ib|&>oPgZXCU-LP|aI=d1_HR+jg$9=iGQ{I?VsIi0udM?&! zgoY_l1v5Dg$#0)E-da{K#i$4B`0gl)(4l~6-8ZsT`yoApAtiixg++!axwVANUt8fSt@i) zodh`C5;LAD+uq__etW>_;}jWaM2lk~R+M1n|FY^41I_HLn4Y?b5Vy?XjBDV<25L( zmQ|8ij>+UugoyUr;V-2vZt|kdS5e`cScTD?)OjC|f`i&^hQ zFq8BoB<6bjCSPK{tI$4}n1})ddYTNbjyC>PUUzH7hqO3c(VR8Q81%17jQE7fq@(3) zegzuxWZ7sRm|BxY79kk_Ds~M*2>Qy?80X~-oQ{zYv-vsH6gA;v*k~i@m$r#cA*zN{ ztAzQ$$sp-d&fw8U(N@+fXcgkJfTmIOdKSO|Gry9a;YQ4bT=a%w_46a7=G(!L{~FCa z^`l&joWK3)x)M9xbnF??;cv>S4~O~oS@xR%XO@0wiF`2-=c9#vO6(_>D~848-U_T) z(Fv>l(lHg=Rh|nG&=$t3$LD&q>T3RBbW>bSohTEMOA*#)1FJtDF?~{^IFLpJ=>V)W z!?w=oDqwvF7Mcy9Zu3uL-s;DtL;XPs5_(kvBy6vQ$$v_Xr<4`sh4umW$zgTcE%HvK zEZ~4)9+ZIZl2|zR85ph)MdvZqc%)itjmPe{ia_Uvj~Q46KYaP5HJ+lLVy_V?L$%Hp z_rog3(1&F#G91Xj@rhL7FaGh7Z~a0{4_>PyI7~iupo%#ctq@cSl>Y1J!$S_!S&yKMbf~IOYQ#QPJq5VXWQzy~q((ZoJf12#7>DO|ba39_O8-V`c4f-3wKu|@^(SWn|OZRPL} z*+I1PHfSwX%`W&HaH(b|7>-&zt?SH3@L;5l56cx7K9wEIXOE?_fYDKAaPtN^-PK#lVSr$q-D%)fB${8`$=Ero=AerqZDys1X|!6W{3;6 z9ymsLdd9>HdL|AsmkU;mfB+Y3d%rO<-$OH}YQ9~t2X7JHX5;%_Q!MYOFab2?IyF4B zT!ijw=;U0&3tRqn?zS+UV8=Yz=5G&;@S+oi(9juDuzjb43LW5i)6ITI&QRKKs%#R% zgB;5<>tlf~JZ>M&q9VH9Xrf#+C%dAiOL(M4QyI z7KW`Z*s*@GF51z3rOw@|ow-4NF@1|XaV>^ht^cBj_M`|CF)H#H2lnKRgUjm@$W+ej zfK_|5boFj`Zcn};aMMXPaA9YQn5u%~=NM#th=3l!?ZGxfk1w^C#LihfyG34e;|(8{ zOfK|S+@_^2d9K{2UL<9dQ66+{B_V)4XX9hchXwk2Y_IZ8`2IXU(}a+#Gq@vxE46q+ z!~Sf6fMjvk4jRJ#0j}|mgRuHzuE*8VGuq7M(FYM$wmlzb`M$w&Hm{!wacaq^5JX`0 zptT~syvUo!L-*HURuJN&Y=DoYCL45>Z_WGd&x`63?@!a4(IWD~ft|d2igmEJFNZPA zVchC|)1YI+5 z8M31#4!5@;z{>iCpoTHdNfP^SC&C+s9sB0%EhezPzH9-6(r*ZM} zm&s!1m46wLE%%x|sfd?QbL)G&GuTyoiu7D{Afqi#QAjDIUx=#gTjRf7-oG=Ew}-NSJ04l9 z#LR790^S0{>cxghTT3uEBt--hd=UC?SLg=uJVuT6&c$8gHczOm+Y9zQm)TsNTnJ}@ zUFe#a{Hs=U40)~hvN_8Nx}YQrmFU|~)PD_Z5KSB~Ay=Fxitm?fI*XSY;-i-chjEb% zKfU1=o}I1+`s-Nnh~dNX$-xDSmU|F9=Pq$ zvbz0?&2>8|W(1KmQmLu&(ya3*J|asvhEVx8wy*F)diMo?_F>;9`Rff-rx^KD%;uJl zI!;_gycdI$HxkZDql2BomqDH^73p574Oq99kun&@Z^#$WoKBG&hVrHtAe?L^1EpCK zR(ME|vaBlS2K(qh+$=eBv!z{2yokU$a>kxbjDC>VpGS~xgLY5{6bJP_Z8ZZyvbjp_ zEdCvQMnZX5;LQ?utflJT2+cMlJz1v;7&o#nYH(-DyO!e7sNTsxW~C$u^hSMv4;U8W zFh+Wi$kz$%hBB;jMq8{7(kW_gEUm}IC>&%GQ!tQ8Xv~JrJY8j&lj z`$?+FkZl!vXFW5VWQ*5v!-ZSDHTO{W+iFP$Hxm-BGr zfAsW@{UsZRni%xGJ%ersrFvW!uUf%E3aqN6uFz9c+1?CQ7$J&yr9`-&VnVehxgxhG z8(~d86Tft2{nQFH5HW=X)+(r1)JBDyoL%n#@zs@GtG_5KXf3^PX?cHxRhQ_^)g#bX zr!tsBWCtC#OVQ$CQg&r&o^VIOP_{(gG4#+J*)fNY{>uTp`%+Wr($&M~a!bHNhw&<@ zr)HruHQR;bQeCV%dk9CcqVgpF=por1C$M(!Mo>*kDqI=+23Bfd`&hAlp1|7WZA-C$ z;E(^M`!-&pHYPm8z&|CP$6@l8B!i>kmOctMS>IUFPM97dd5 zO(U2#eIV3ZNP(a&l{2{Uohs2)>?_V8jyTQz$1NOWYp9Ry`XvDE?P$+7T%@|<9^$yZ zs$>HaF9aRoB1#Cpf@o`r6nrvB5em;^L;cza^p?ufNeWoGAq5_Z2+62~bho(^r`f{i z39?O+zL}=>*-8X$Gg8vJgO>bzNAVN>VM6CKWaKA$_@Iyb@9c!$3CqZDi0?$RLxu%C z!T~IE2W&2!13cFJeJF&%3=_;dAt1Bt@J*gIE$tNr{n$X9s( z0_RdOh*wZJKHxhWI5BsXAwCH67Dn8C2)I7HJvYK1SF)hfKb(UGaANJyGhAr_W_0~! zf4UL$IkJ6Z>x|`dN6(=8s`e>>bb~;>h_ktPA<_}t0MWD%CUAT}j3_fIE41&S{GLRo zX7d10PuXU(-{Gg-A~aath5JzVxMA#q+;Pv<#`cg$ZED{wkba&AXL|hoXxEG9?6)B} zb4hmtx%E!(96TNLIg5wS`Ju|l6y8zd;QGVG(bhqZzrEW)V$ZRL?07|du4ey*UL4vr z7K2X3w*PEfJyR-37$cMF9+1f7PfKZrgJt>@C%gtwBs@UXL&6w?hG?DbDoW~y9g7p) zH75c&0RPz5kARa!T+oBd*v?&25RJvmLCAFk^~dO8$ankim$)%#z;zzja1n-;S7W$S zwA&UxhAGn^=>7slcrg6nRzD?!Yfd3t8xE@5BHRz)WLy8`En5yi>=tI}7x5^(#vT*M zmut=d<(VPxm;+d|S3qS_G!i`$;*>fv4Z9_60^J0oy9UI12}t?3SHway5o}PFq8IuS ztSvK*Yt{5eIRbBF+ACQ#?QaUMk#EA2!M{@tlozl**#KGd*Rdp+24Yh?^6_W`|u@jsB6K{L^C&g=SlrhCDRy|YQi0$SrwNW<$Ij1RA@$4C~-TAm$8{jSh zv4CySZ-7Q$Fp3-P`ikFGazAqBEQ)=c7|9S)JK!(Pg?IX;1^a>5iT|J6YE|_q^@+04`0@ZlGDbd}JDT(~`)igbfF?gQ3S4F905ny`;2) zk`K626{~npVV^NfdS?g_p8^=V_)d+%XzF!&O~#HrVh1!mGQ4vWX&C5t9fGp4cO@Pb zKu%7!TaJhO)A?@l|NxZ%z$zRja$jx@)s^Syz;IeZG8Vp$#XAE*~7R+z&~C zHC=fY?H>&!WQxaOEdW?Pz!e}=*5|#eXmeGq){p3Y=f}heXV(y0INHTHBE3R5iq>S3X(BP1g-mKp(L8}T;i}+@h{y*Zk3ajuI}7;O0RX!D zZu7iuE_>^~IArH$|>nt!U3Ub!oDL;&h;$q6+ zB6phJtxg$*J1tZrwqYF^?bGH86_fsXw-&ZXKP4U7oEwLbT73Rkz^FBKhzb9Duw%cd zAEc|>Tsulk>|?ua&`RgqlE=5d?;n{MErdaSz8**58sl5zQtsnge->R%S8!y^@%ZFw zIE}o4NIRbju2|!t>Yc>0jTpSydOo6%h3xI%zlmj{#uZRygMhVaRGsaTj)mNO!Xn5; zg7rh<8;dc{AmC&@Rw=JA^bV@h^y#R+M2=VGu}~*Tp!EcW#-=`k0CxQMk0kGpu_a|o z#Ew(%wV8>;s0vMemz$l93<26Mf#y2kHl%@hZa!liT1~xz;ANnMcp$U`kSqsAS+Ejq z$InkH0yifFfDRT96&Rd+9NS0x=(NR>`_GcjEfbs$nVkgO3*11ci>r)hoXaZ$K~uT$ zYpO!W3)oJJ$Nwo3N7u1S`;GFT7OdrUbOc6S&$bCtpau-12(H^sqz`n4uT13jFDGeLQf8XU>8Utx-X5ConS6f00&|^&A3OlIEtN+0)9-&{}%XZ zmPLrrUaB9L@k;|DUqRE41E{^)zYh}xaxRg41~$MuOlW}ncy$JrgrCTGVXhpXFZA3{ zyTP0W)JnnaBT`z`aX;u=YKayZAHZa}!B$&z)eKoLI`g+Fd_mJyjI>6Y0-LVg`DIw) z*>+TRlk@Q(EhaS@pu5#qBUl-y|R#t{_Vpm_rE# z_5Ct}Lxl?KpqO&2Ug*dDW?XUc9;OSkgV=4A0TOLt%2r${Q@L>~r$bt{mFUvCPu+mD*KFjJ|Sgw!pel-&2Br8v817CP%oC!pFb9d8XY17#) zj4pk)x@fq?)|0fI#`xJ^QwM<|)uDhm?rvt16O+{tn%2ZKx$=ZxI|R);+?t!8)1HFj#voiTF5o#k#ZbSlLdkChPnjgKHygPHw~UVxvrMYU>%EU1%Q& za7hs0)l7IlRQnblMVR7(TzjhDf)$%3v))%H?|HOsW$4dOi5?}ckH00o096G{snEk4+JoYPwjTbsO4<-4Pf1%*m3AcV!;4g>=8 zRMS2s*B$W1dApzgNtlZS@voWd{7D0t^787F&Gkwk|G_?;C_@i-0^9@oL#%`T!H=Lv z3jWa``TQWj>(l?>N3?(NBQgJf;78>D2S3`|F6RYE9mvK3=sWEEy5 zxRSI|P#`6|ia)FUb|#`fmDO?Bxm3xnNaH>C zEq2<=^uQ9VZg{mB2q=`lIAD`#wVA;^O0uOOmpA=LCmn^Ukk(DM+Ht$GNc0Cs@AR`B z_2oT_@9|RTvXtLBZf%Q_w#P>jV)&M1aWFaJA=60{60hC)cIXTfKFJSLOEz5d>n~-*d)}WaJ%cM5MLg`bSowTYRGQt@i=q z;vd#h(Ok_g@U3hnTD+E+pu*i3EngJaCKc?V!uf^gI>3A#rBYumrI&reIdws=%b}(; z8;9a(h|&R2Ru|T#6cNyS5mC1gL`u{54cJ7@1iZ$F>xL>l)yE&w_=1`=l|oyk^wf8x z6}%+s1d{Z|K9z0=9Mc#Jv9uL{mGyeAJZR3Wra~M-uRkRPda_ujWEyA|!Nq(R1brWR zL0&W~WtkB;XaI70|Cp3R9$}2YORt-u)dAAl;n+wI|g-^;FVu;g*>8mEN*kE&(qb&+~w>~-5gpl2yqX3eOc`=wxMUTsW zOpGd+d&uU!cFeHp$WxZgQF;_PA-jqg7CwWAED+%-fFYnS2q?#(1A=tLTS0%m5u?}I zaO(N@o4KLlhaeL1z}Gvm`GqVc5GY(wz~}5neI0=yd_SR2K8Fzm6CAJ+)W>2%c~AI6 zdr_kkAg)^V;f0C&)6K>IgfDyq{9pQ!_4mx`?kE{zQ;2Z>ZlEp<=OrsL2e6_n zuwVkLP^Lk)Y;VLYj#oOzU)r{@=rW`I4rYTqm8bcaK@Kj2b{;~79^>UdWP_tnnyTSv z1CnfJYZYxvKn*pNLPwJ&B~S>Qk2&mgcwbge(|Q+j8QsS@w}7+EKl^2)`J=MyuS#Kg zWO-Q(erVdp&?Y^Z#nY4F*YWHI(a!OCk$E)8jPIVli!X?Hg#gQVY^g>X#jO; zRvXeVEB7kx_ZIJCZ7xV5_+hQ# z-Zy|lOyv=(2ciO!Ng#An0gjAM$Ns|HF%&()iha!|icw`@EgLr8bgU?gqmgk^5rijS z9qrYOEDFA7YSz~8!*B(hhB9fsGHE2?`^3UO@oC3I*gC%fwIL|q_WalP^a&Y&+C-)C z$#r-M9^>uxjh5EDB&P~EXDme$53aEnwlv*Im3ioc1dH=-Umlu2%K9n*YPlgbm(5) z%e4L(^h-xriT$3ig!P8}?~r?S-;4dvWw7>;4WtKA;=pHNL`p;P&0G@H>`8i(f5vqB zXWFX$j~L+g#ev!LobiZ0H-^TU%#_i;j@%SpunVGB+d#*_YA^b8Hqy;jfS>WEEfN-h zBFB)&pS*M!vPcrIXT9Iq#9=A;$Ka1Riib3k*89fpwOSMwWVV_qP%P~tF=Q~N(q(or z*hGCPq7_(}l&Y#P9Dg1L(;DHrB&)cCk_``#qdL9(t{i4gfSh(f{%pxD9>7fwm-B=j zkQLDya-WP)yIX~*(|*o7nOtEMw(RvFh!SkV9Ies`S7h|)DPh38B)N&e`qc%F%^tUs z)d#;nz9?)(>wxJpr-SUqIMAh?E?RwDN#VHZ6)xr{IjR>SQwl>gEP@$9VJ1<0MWI13 zBWzuVooR=4uSp+5r7o4aHD{f;bC%`M_y4uciTZF))MOcABB^{6yiC!&7b}#9yFX4m zb%{(vC&xTr2{(!CjC91ALtB5a23h3h*U#M9(Z+-gBxQUKn%()9%+j|6C1~OQFZ&1< zP6j?bJ7nzN9|0Cl-riQZ7bTR>@^khcP4{2+QIOn+YJH%v`C%|-$LBXSg@#)X#{t!Z zpH{GsIZwGn`3{2Q{wMWizb+Z8Y5ZnT1-|xo4_M#qb&{x{2>9^*o77-~XNh-9 zy)E3Zk@ts;uJ^pV{&wVFU4MW0R)RH6!MGB z+xOOjvOzK@b|mk0Fxc|i1{nBS>U2BDN5j>d@vP0Dj?tVwzA_st$^vR^LNr9hcHZA6 z0deNku&sTr&W`ovF;J;Zt$!4{I0_!K_m+9-H+#v=_<-Fk545wdyT)c-6C#9=1Xt8= zcnm+GEqG4jIoL-&)JFg_PfnGGilIWh!PFCtiI6-y9JVh>d??zo_F^wr;$}Ld!zP6d zfay0}1Toqes^(H8vJ=iR~NO73BJj3CS2>p??2zv8DAD z(QjQ<=v)GK&-{i&CQ*GQ906O?VplFE#l-f?J`xg@Z!fI#yr(a%?GO*C`$BNUO=^tg z+XaC++lLZvw8wM~#XM%PjbGcjtuDq~F-wl5Q=-jikJIH%ss@ODNN+Y8z{=h+<6uVC4ZRE~nxMz^LQ0+IBi-JLCfzuf~*kB9YV_ug+7 zQ_fc^l~z^E-_FKo)Q(uKp)lIcA1~;3SF)i^7wUah;#i6~apl&Bm%T@f087JWlEw{o zkyDS8a7=ZY3{2dlgVMvvz!^G8Cr zsp2Hg=@Q5BnXAz~+Y91oy9P3KE7Vl($UW+3ETxeJ7Y$hNs2lN`EzoYO zAr>9@Kv1Z@`gcuVRHE{SZN4QCUv+3CtU(0sDI^gKij7Q6#d{P@D#_$%Kl^i^Y6+G8 zS+(Th_n!j26)q=7CCgB8G`Z|Bp|#pzSo>ES&goqN=}JzAnD6ru9}~8aSEuUJA^YOG zEV8jxTeB4zMniM{HdOf%e7CqhBZ5IMF$N{vDjlAF2iG@1{hkE-q3AlJqhxrWO`Sb6 zlsIPB;5a#yOIY~0@9NzsJB3do2m8UHrQD-w=B9QD=&^MQ3Co~oF4qtSH$9{2pB%5V z7&z^J({brsKGFYH{V~IH?|SL(ET}w}dUu9CHbRoJeXb{+R%*h7*zF!&*pRfN4~`^` zcj^&Ey$2Ll7?THfNRlh%;bEsORZr-7pDJd7TI?0c!uq!AD(zODyBoW425+hN-^%WoZVP<~QDR*0~SAISGbqOoFVMwX{yWZ!KJF@cR2j{t|{3fQKh$bfS)fKpt z_XMYCe?SgSL5D)h{0au-_ohaRi$Hm;|6la+FTY(>pjCRhRA+l<0celCUrS@L!8`1^&}u`L5Wn1+8x1|)d887F+*jMc(Q=F8@)+7e65lX^GQGgzd(uOX$#q({ z^_*^TyvNvJN*F$;8`T{TchswupB2iQd99Ym-%cxR9WeR`_^{`9tL0Go?9>e+b#@Vw z91;mO(|r%$3zoz0iVqzIJl19kp6a$PA9&QzwH%Qo6gXNka7FB&7gNv@R>6+msDnqe zOM7Woat7C+=ffl^)$`hK+@cP+eWq@xoq3JCG-v5^n#WFJ&ED{u_RCH~QNPL|aUG~Y zl+SjMQSkFN-iR6l97s)M4+a=&IR{W^*j!-xx&eG4FHgUy``b>Hc2)n)N?F_zZ9V&q z^r(BF52AF8V3AlB$eZqd1kW(BJZbwAFNdMt$E87(kfy&dEK6xZJ^Fb#ZkDSRH#X-p zRM6^ud0xV}vJ4HB*A-f@VQ%}YEwd*N0G%8uUU*2%jD8NcqKVx8K_JU|?HRwZZpFQK zx7)sZ;F15iK8GA)>EskU9EZhs^%&2@U_C-^au!C(*wL^wsI7faV&Ya->1p(-%ck!e z6}kLvf|8#D1YFrZKCs&K+^rU)e+-QnN9*y|Cn) z;YS`9&Ih+g;w1Yul-@EVHabk^v|#%YFck;MkBF(?8bN^@ zm<(2gav%m-Y#vMaB>+lIi2zQ*cznWBG_4x4R7O+`-N=Mvl}C{GNMPs|de zVESnC(;X&^^dImMUoMptF@KrRn~l&`wvtKV*y?+lLHIj=tI4gv&B81zs_yM{)?C8P zq7mCCYTP3Hlu`=UEV6WK`Xh0*abVuf8>B(+N5^_U5i_|IZ4sv>lg*zlU9lq}ZLKWh zXwIXoi%ckXfT3OIyC6Zxru<31Ep9plgp9Zk%q2E93&e4D1IqRHRp9U z_bg9wEvFM1xDdhq3N`i2_upleiza2DqY$u zkS6oj+nxtjMvnM_+8YJ_r1Y-~d2E4(MGx1&cKB z*Kh^Od(hj1_Um@oJ6IVj^tZ_Ucj zJy-V6|7fXOk9;`_qyYnk+yVOvp9#cLv5@tb1!7FfGPdq zGfi~fZaVK(5HY&9!hOK|bWeo<<#l;Y-B!yBuLs4L_qWI6{Fk;DkFQrv3xFd`>vn6p z!~wS~uunh!5Z>v^Y@hz1Y_I)_MtAx|cijCkEe!32%}+zK!!oGA@@vCuC2iMEUANbC zTC;8cCMERw0DGvC1>({M1`bfC)4;?>L$gTPl+JrUD@9|~)hOh5Jh5tDS=hPHD+1vX5yOI;w)qK!f^KbTT$qezA8k-dy#~eR=)_r$XIPF!}Q<3AL^MnJ$@I@;BHm)?zpDzX(%ML?OyR_IOu{ z_)hH64*Vj&S$gUMTSs(9ltGHYX>lbd+G+*5mvZ*O@-|`BTWZob1p+mHs&~>^p5G+3#%oEtJe+gr8bQ#9c`#fdPVH-*M^hb{JCaw5| z^=kTb=BQ~~{x8zBc@RnLR5(myIyx8yHh~KaY_Bbiz+!#c<#usPu?@0obEl<0cb|E? zzl}EIpa*E)#0p_eKf8Md1Qsrq+(Drox^*mBxE{AE)6jrT=70S}gF@5)TKg1Q}BsmDg8V@UZou)Q!%3TUf2bzx!aCt$g&2IE!ga)VH}!&2%dcH zO)9$H);TzF0d7H0*@XdBT+5Gb9+jmi&Q_xHQa5A?qsG_Ob)yT|&Yf?F*WWJ5RiIm8 z^AH_1vU!Q7_8;MyX0H7E?l^0Xy|SLGfVO0q@uP=8(_Zx5x6qiFnq+bQj+YvRKcFZ! zIkheCDaryZ(zAJkH9g`PYu2n>Q0a!@DY~rf+2h3&ZNxLc+=+Sl`&)71lt}v$!@Q#S z7%ur#R1^O%)6FVG1=R1=Uoy5FjEF*6e<;NXpcW|eC9%1IvfNZN+cgwVzg5Q3W>q3C zNn3JRSqwvpXNqNN+DU`^)M>@B1f8fx65v<~VG8r-)pUr(UBNKC1quDSWDfC1Ap}33 zV4`)-Sz~lAP>b8n8H3{?L_sfx!f9<|HR87%*P4amciu=iW9N+7EX!}va~h?@f=5sa zE5|U}A#933Lh|bhUmrRS*j`M-O)F-}pTV3?xP_OoX~b1qPo?6ELLHMIMVdN49ituP zOVDr&$5!!!@^BgAF{TigIRJgdN1rNt6M@c5*}tc>k;XQ5anS}HE~7l<%@_w@BfDe* zi-qZ?`{#W4#{()+A#V(pR%)m(ew8D!5#9`lpg*X4C3Ats-UjG!9g2utZdT{ABPFTw zVXq8d1}Yy{r^2ISZXiBQBMejFncePu>Dzt9jbHh=s9Aamd_PPkj`h*&`l@z+4!N6| z%hgj9&(@UB?jejnWw;vn*h-hq!dyGNTSx5vWq;G4?4z#u{8D6kOV{geYS${^?OGyH zmAZfgGy!!9E3V#}0kf(quFjRN_~i6p1uY6TZ9~jfi`BgyDBSZ(^vLR}kXPKMsBIHf zq&2A=(6zCegen?iv62dGt||G1!DR*sM6ojNGG9KKZT?7k%PQ{DI=%N4w~l3#O6b5& z2gm?PiocL^`-uALPrP>fRJixV{@m8)m|9W{8oZI6o|1-Ge3 zTd&1kFC_&ouuJN{hVo%NnK;eVHu!p#oZ}h+@Tb0AjRD&QY6sg+CuT- zuEDLi1&R|akdP;z|M&Ntlh^LfIlHs7vw6?nJF`2N>Fj>7P>?%f-vu<*Yy)}y)B9?* z=jFH$^PU00J7r`m|G{S|9W`K3Y^oInOBc=_w0{)vU|WqiN*k?7m69q2xG&Oj2Fq4k_<0SNy5F9UT5F zp|$?7!J1x{oNk<_W*HpzXJcrz{gu|bjOw1uA`K5I%v3M1jWe)MF-!SW+R%o@>_Pr~ z&ZI{G_?+kNcyRxnC!7^KCda$uLc^__#q^$mkUVn7F}aYAg-S9)eg&TC6zttirW8pA1> zEsxAHfif-_PUIp21AfWd1BJf*I#hI}!JuN&hf*f^znmJiS|h&F9#e`j3@<~2Cp~-Q zVKkZ4nnCiibhIGgjq7C4^QLZ4P+MAjo83>C7y=O?uJ)%3%tQ>?w!}z`O!v%u)>1jX zM&D}MIrT+cz5Yk2hxDtZh}+{qeUE&mk}VOs;NmKU1RJGp+TZ|O{yZM~1APxVkCdFG zg|jD?JulwunFqg=#11l|Dl;4YVG$-;khZDmD>ZKB=ZBLa0@Nk7^dh(=!3l3KS{J2i zP3ZGf=6%5|{gEuW)0N^Po7~?j>+tpw9X?U>EtslnD1EDJm9nR}$rz!rv`LC1SNs6oDpFz`14foCALX#A~K>IK!)Ew9z1=GFx%s&~T zt@a>8aFg`~SpfdoCHoZ#wR?aJ{N2cqwseq*q9eU!GxIEKPg6lhwYA7& z!?OCtYqo~wEA{1dD(Jwh6Uq=(ipp&*7bo8-h^+^?R-@mA8eA)L?b+(PZ{pl*IvGp3UN*> zHa6F;aQbW~pI-z#WgD_RW5h^a)90s1mJwzMONf3F@DwE!HngqzH2FlB0rR~@+85=m zQiA~Vdaf{Kf_AO>*`qnqMk)#sbNV3zCU!PeK}plxr|GmTrn4h1cNnK0N1oF% zYIBSwD}Z92{45t+VAk6m4qGv}NY&5v07|_cFt)Ec7FHcNHT(9LH80hq291w{obHdUks@E) zLX#5AMDE<-Yg@p1Fp)lVw;}1yHfn{RLnViggv;E zSGJSkGbW;HD&@c`TC!Glsn}`TkQW_}(Qzwdxh+@` zv%{wTSpo=Js}2&LIo5kA#fv!OQ6Epo+ynCDkXHk2vXhJ3Z-nsQU@1~hd-oNSRNrdr zSP%P~Y+Ff;+{;+A;rP5J9q$>km?NGaQB0jJ@+aa<{}m$4UE8ZVW$7PJ=-A->jH}aLbZG8nop6P!rW{p!lIEh)Q`1HQT3Mq8*?9@eWcF4M6mCIilTk$3Z&n3J*iCNF< zX~yf9^!u?@^vB2V9^bc_d(L)O=IUzl@f)@|riX=Gw$$}NamBG*h5%SO{Sm;VneLCP zB!7MEY;om?fBIt2I9;{2eerzbW6$I39!zYV_$;@4fc-1^hT71+f@h5q^Zv!UOF4fe ztT!{tYPKzaI0OUV{JpgC4mbYCE2mclo0SFAZZT<4=?lv-wo4Lw?PdvuMLHY1b!|5@%|p z%dI!|6abal3}5rOj$o_@;eP$j6w{8DsgO82=bG}F6JWzT@;kn!HSthOs-oS|iPL8s zU*6J9lTDvETUqD%8*n*H$9@Va)9&ODWIMbqrz7SFElyAZw>WRgTSN4j6J>8_Smi$H zn%ue1nq@!m}hcB5jNXbo$85@kgU#e$Ych3hTQUT`G0d*p6QYu&&ont%Yvj0;qDe zCRs}9+l#V6G;xs+g(jESRAbYLYogQ}))Ajg+F$U5TG!T>u{5rK!u{A|DSzoFde)z<*X_DAekesH zorU*=^32UC=Zj|MMSjZqBqp+^K*J><&rvy8T0@xh=BZUQHN6qtaA}>>({I(O9o`Ih zcE?Hyhc{J;| zGt9_zC1N6L2J8mjsbmCqk$=QKab6!oRNam~I}s(`)9X=b-@q*@VY$GM2^?Mo&aY#N zpJlrVk_h{}fa*3{F28wWqSNaoeo81Xc2vZY9pQJZ|55aXTEk;~@;=z;8^}ZB$9F&7 zCSEbAST4DFJpOX_Z^;$e0zdXC*M4Z=ZPPHZ|Lmc~#7G!uaa4_R=)EN$UR)v}TP$OK zF}B$oHk+NE6l$~~*P$h+oU)S}UGp7&$*0&}n#PW4I_zD^|2ATg_XAy|D*Z&-CsG$| z$-jqoT;UM6S+ErM-nQQ%EPW;Z2G$te0v znI1e7Aw6^?V4x_9%+h{xrLpvhM^-tN>iWY}^u*)RSyYjFLL7N>xwmq9j4YKT+~-aB5aO7G z2C_XQqpL9knPDR;I#hSY3pbtP8`VCAN949}-2PL+faq%q%F-tp#t^|(SQ!@2^JNBKGA`yb-cp5UAgUIw<~LW+ zz`OP*;bkGLJ3sY$C;LB1)48WO&3Ec? zfyZMGjm?Kl;ZKQUzh>$=E*JS^$6SteCfLk!J=54$cHPgl_`aKdY9iJS=}h*ZH?*ZAEw?Q% ztSBVFPsPTd`B|B(HsU^>mr-Es`NX9?c6x%8>7bse6469CPf{Ai!8=m=LesEcG%vZ> z&5g!+-zRvuh-MBb5j6eP8&b-|Sf-tm{>P~%Mb`jXvlb$S=5P?gHfbc;OZnt++coxS zi(0#{l?RHatDi7z?M)JLL|)vPl{*UhsPldh8g^(cX;A5FeD-NGPEz2fgpRL%U~*H0 zgZ0s4F9+UBYAN~+M~D#F(=z8DCXDbkQLG~pAZMm6RZDfr2cv@CGMOGa-loKF>1~6W zEEC)|?yNOjhC#x=4V~CU0^I1{=SxPelg1ymV7$`uc}wkH`|+qBaV}_b6kwrW6Ye)H zCbrf_k-Q+^?nJM8)E`xZY5Mre2fz9`2Xlktmfc$e;GDJ~J~N~iBfPL+nzGkwyj3|@ zKh|>VN-(Jd_>20`U{{~W!N*b{?O8zs!fKp0vV?U;54Rp6@#rjvR%6>it0&YTz8;4mF`(E;OjsDBWkCnkn{N0s%*gm5C zQ1=}|9s#{7w`9c%iN7qDim>5>p&V0^M2B21^7F-?zmdm&=U5>ZE(y|YC0e!v+M^)h9+_G%EG ztPAw^$nf}qdc910c107I&{R20`#yQjY+&xUY;yJ+G10O-%F~Z}5$O&O0TxDnN#4~$ zW7+Ohi=qmi7W)z12Fk#I@pla@l#3F7dr%^O%OZF5y&`?N3K=2%Jy-vHUjP0iq3DY8 zmLS8k%dXL=Q6m@4eYVAB_qsJglS!OqX2f1av^C+M7C)BQ_FHUbJO0$5#EAHxSyfV$ z{W#mggG(wcaB0?GO*kuK~&r8gQb9`orWaiGWb=;_vdpq*8Jb4wJc zTPnq{xI}RhZ=#~BIL&TIDFxE9r!5|84$Tzsz-^u3Q+C6E2RG-nQSX9U_X7XQS?*_U ztdV@oF_UdnC~?W2#9rfI(r|c-aAevQ-a6B7z z>&>xVBYpdsy&9FQ#3 z|Jatc&_Q~ddEI6_@>3Ud6ewv|9LHofFk`Nj-Dk<) z1GU&Vy?AD<;Qxd?w9)&?E1b_X_np;AyzBu2#u2@$Iuha;_N^YAy-yKtYd~pZ@7vK| zFJm~GMk_@I45U6<^fDWxN3j~!Fjj>yYkzop8S`p}wNn?wss$L?^K45hob%S-^}|%g z&R=vEeJSe@X5i@YksRV+Ft=LAlqOIM*mvOCZdE*|dKjeK5SC`NH13`iG$Mx(he55I zw581&%p%IlC<-n{15%(CHr|~VS5CAASt0t}4FG*|ZNVOxe5SUvfI}m`b_zv-iM_N` z7*uv60MjhWaqu;cG%Cb|Tw9_C)@uR|C{1=JFYqy@K3}a#;7HrleHio+Yj_th$h}1_ zOm(41F0Ji&-=DrLEIn^bb&eR}*^Xro2nkr?D3Bp-#)nw5NzZ0n91s*l2k*o>(-t&l zQJ?3j828&U1^79lFVILS{``|(Z+dk?G)n+3e9*^u>^uWi`Nh%r+bf2G&A{~A*r7pm zKJ)5okIBLeL_M?9;srd1d@xYPd9|8hzrM;juiEViigVN3>1}wpLH%`MPYcYr<;i&f zFw{K;u6Y;VVD`hUE7}QcRjk|7|4-a&!~HsR<+1|(CITOTHU^`yDUa07->=Hl`X`ME zV>jknf49#j;Zg|cl#w(DZ_>AIA^V?;b~Sgc&`X9-2$!HlbV&0zFY4y9wN7h3zJK!e zQ(Pa_gk=3c3FYD0*?Dfq{+Djd0wW@D55Zp_=96IcMT&7W>*MG9i;riSJBDYIBQ=IzfcdR>ErRszA%NFcLl6e@j#}KurSSX%J z^q6K>Nx652r=cJ$+R8R5ZZ3Rm%#>tZHlZn#ODT&MX((!Z0h>;D36|q4gLys!m z0p`kh461ppM!ZUqM~dB!_3|5R(M9VJVPYK(8&0LlHQa_CtKa~>X`+4wd`!%|scFe` zKwA}AbmB`wk5Q@3Vd$9@Jvu|-bR_}TQNW8*P<5n&*_LrDlDYbFV zlf&Rd&Y;TGHwZ;F-LLT2S3^WN##_B7n(if(+fm=VK2xsFdxvFGCV0VDsGr3yAH3Pr zLtw|ZKMeq4Bph)x%@AuO|2k6io3LH{b^NWjmf%<_%>u8%736m^QoFX)NZGzVQY(KP z1j7pIxW`zNuqW%XU8!8Ua0xPAq6%0YqYIn5{W@9h1Kwdlko&v^kC{Gvys1s$Lw^v8 z{ixmT;lg9jO3xMh_gFM%>ACGPTxBKh`Ia~SH|4luk+jSI7WHulw5qCHBtHuB^^5t2 zYMx!?vAPfNr~Os*ape&s_r~UvH~aN1hIB^nH}!$ktWWw?aiNMZLt@ZJzcfEprzfXD z!v98$3H^Y2jPjJc&tzwDD2XRfgr-|;ihrJd=)eghQs!N5KBjelhuexYVjiPX?*ICs zl;)L`pgw6@1>~M(df@?QVshj+7Q=Hz?6W%sAJ4Z$uNo7l%=ED}R}{Z3 za5@cY6gx)~()B5RYhsiohh6Wiu6;F6=5DZXe`au(^BLd#kV_ZIq(~CmrVT$X%QyQb z^$^?1$g!q837Tn#fMy<~Q6oFNQbYPdtC+07J;aubAIV&R9geaz(;W$!7j>gZ+s8$1 z>z~SeFU_WFfA%R|8m9f4be`Q^Q6bZ{*NY(#wpWqLs zV|!zBI{5iiGdzk{%txKtB~<^M3)6QFqi)>n{_pwA00X|1v4^%}xE%ivmW?HITd8pE zn!!&VCt>SRbm_BAM+bLwvyl_d^b20EOVxwYc9hs!S0gqQIs8SQ09at~&;77s3XTfH zH)lN%!&*yq@RCL>zBs@DRO19IDRc}OAk8a zvGj#S8?-I+E~HO5d}s@ZonT=3-tX{E(az1G3Crk5B-N*k`kcsLrhMc>CjX4F1L~J! zq-k?yvN>!U3eXuX2U(r+)SIgON|=}%5Eg5&hk81IP^6Jm(i44PWLx5 zwSHep6UsR|q7Q~%NckCp2qNa&{a2X}cr)A0aVDnWM zzt{&y7bkLCn;KIHeg9(LI9pvp97trVyOkO$kjP?MQxaBMfE`|wjy*xj8c^o@f}hx# z=BJ?F(lXakN(9#6?50tUhBq*TGh z9rZ#*Xo@BUfvco)5UY&hY-{?9`jRW+bYlEo$TM+(83O7^v;%EP{1`%VI|2z;9=(IV zKd*r=7nF2EG7}Twttsa|Y&U>&AN$0LpvBYS&^KZD6AK|hK8AXUhutFP*W66rqmXDW z0OJn?kOEHILXRirb-QWfyWG8YB$Xjev}%!yYUU?gMIL)bv=a@~-Jr*LdvsQolP_O{VU1t9b3*u@ zyjfFR`{xB+?fS9sL!43d(JlRz;T7J8J$`pM{_@59>3%Mmj`fFt$WmlujU+{%`)Df7 z>B$QeqA|r8H#X%#uL?=wsQ5AT4t=pWP{k&Yb>h)>Y!a?xY|^NRD>G^YhysH&4&NR}sykf2R zGVp`d^}w?`aW>;W$lTw+W>AB7`EB4@f~WNJnGwFWrdKZuYF8^6>DW!LZ{{i&o5L0R z`0`j6Z6o{mq%A6on~P2GwwD!_>~)=<76H6Jdy3=3@;VX)qeNEzE)lh`;c(@3e5u=b z0>8%LTEN%P7+kfXe*HtV>_$>lilCP&DKlj%UEDCQ!9_0uKwnDpd_97>UatMl{Y7(c zBg1mH>bUlr$`X1PI0RhG1vQc8P0%u znJ+%s^A3Lpo=`Z-fIom@wr{R%nnjzS+kg%ZZ5GbW>r}elBkgs6k+?83ga!n1O^q{MviaW--yI!EkmY(RmYAJ z0<(o=mZkD8mNSLgS4sF!s_+l&-JVia8)Dy8=tqOb^sq? zwuceNb*%^ZUKipwc*(^9U~GUb&AI~71~s`w3(b31+b9wpJI>*FKD0{ybqAEm9=HQM zdmuK>GY9`t*b{4;oF6Y>*9NozE0wPou-l57ZD@W){q(hErQ4?(&96#}72=<@o+&SW zdAfYiCZHhJCgmu)4Nd)%0LHr%4mq88*oWdNV!ea6MmrU90;1z_E4f0}xzRJs1a_DR z?Lp^13=|;HuY|q+z$S13`)IihE&1~t07Cv?r}(-Z|0x#iU#5=u!-Mp@gf-1;UixeR za(f^#Sr;($8#U6bm$3HsNq?LWBUqjCY8cp?=?lL_b4LM2+ixk?#g0PtbVAKps!xZ- z4XPngWlar$`LbH1=LjS^jg?p`h1Gd#ofUN#XqJ`$7H1#1gM(Gk7iX#}OdKSwS}H%F znugmEJRYDnS7>-BO#D8Rf3`60N(r%=m37yOCvf2--pLj6$ZNO_J*l7obb*83z+2C_ z0dQKgiMzlnfdQdc0At`|!0Utx(l$oPWy}!E%XD!7C}V3MD)8{zIwXC?Da5D5^9Nk> z=OSX?(K%$c;vFp4pKijFBxu6Zk7v@8eHuRDbh6!C44?S>DFmj;c#BXfgylA`!E&ch z4iTkg=rleddqXbM?eYHG^oHD>U_{PZ-bp;{+q~{wJ7!2e-dQv_Mb;AaYKZh9kX3i) zv5tc6q)zn;65^qqu?*3S6@Gv#eW*Hy<|*df0CG5IA7f)ZP6VpVI9yQD4H^$%v9aHv zxYktUpOKMcXkFfT$YVde35?RrT!u7^2tNe!E=MEtJkTkMhVhX7+}xkw|5U@sp?c$^ zo@NIixD8r?P&>Lnn4LSm@h(H2r?EY7v2JKRI0u(p!|40Rj?3JegUX!%#? zYKRL!9hbefbO4kMjvYJ7qs3Atixvvjwmsb^t3So^NU`$2v2<5ZnjBmUI+oc&#v##q zFjR6Rq+pNrAutUsyGiYgW9YczNJu6-S}lQ(?*JzpP2ZLwC1qua6erMj=Pt#bY^i`7 zV00!BC4J%!c*FV`i275P1IuMra{eS^ouRadR6^agHs}sQ{ORlwFRb0D0U*be#M(OI z`e{S=6eVNT%b(QS2WpxbiM1_?%LjCsOEF;Bwml+`Twn*#@%k=_XW!DC!Mb))HhXV0x0x+j2h08fpxs@4xS@`*nj6 zOpa~ZDVUT<5yvDE_ez4do&f**>ug$ke)#J}UXd}r+0k(}ANdL4EDfI?KA)o_%undcLj4|x!|U4=FQu^WIibj3K;Qi2 zck{l|&kT)?s$(Ft-^sxoiY&5S4DR`mvvb^m%Rp979bWYF2iHCp@)c;E2^sir(RCSm z1j~6wWf&oZdzVLn1x!jNKZlQv7B`I}b7?kV7ifBDlrQ0rZVz>yPMt5nS&(%in4gb> zeJPo&wyEF0wx?ipUGix@NFK5Up$4n?P5>pPPVA~q74z@nfm(uA8&79OFI!C|IEO`E zP|Zo0(}K_F$-rkcWMGi^(g4&AOK*w<4K5bvK?4h*FJjc+A|-6WSDx_w!n=N`&~!I6 z6;01{<{g5S;?A6s0N6=?s3M!3Q;`C>s+N@foWDeJ>*)ukZ;YRt^XdwBb=!pH0Lj5S z6G#q!O)U5yfv1(Ojx+{~nS%B}OY*3@KY~v$0v)m~fDBYol}C|+1{N~C7AJiJFm2p^C?jzUr0nSe9LP*@At`|7%?u&=i z?mMT}KqozXMRhJ{C!N~wt9Kz&Hlxsk>*V5naLCVLD2)i%M|uwmek&($s~{);<88)8 zs}=sXPaRo5)PvdYVwnS*>j1#4BA+1!$C53JmP~^<0t|*7gBD(Z%`9>#$6$eh_m#QDrAm?@dXF*0tTQ>deJ&UD z(enVIk5|rLN1|g<`x1R-cAAS;UYZMdvUU4i9%VwMtFd5ZZDNm(Metb#XuE1mfXXmS z42^4fz#|v-g`_n0MXN=OuWa-M?!W}#{}%S%kFliV=wxUsoI}cjpFhn4}e^_DLI=_YgC>$oYm@w&Ik2FA=fbw z@V^>ZpwNG+b%gyVU~W&H%)8a5;MKOcT@He z|6Z~oR(z5&rPoUy4U#?+E`&X* z$PXdq<)ec@#6XYbXIv_?&$!CH&B(#e4?-ag*WFO(Aa$U}OKhMAIW}+s8~xWK8sym( zk#S9eY_)}X6+j+(OAl2wudQ~3dvRr-Gg4hh_K!o8eE&sVw04tL9)&R)EII@1fs>aC z!1dGUyFcp?ep%3Fbfs$^V`c3kqOi@<)4!eO9^$U!`_ePKlBX&$;BlJqVtF_D4$<7@ zE1QM)N_K-#5y=1acP~^GBL?yViwtZ<{IlEfy^aBSBt|!s5$g-Y5epj_!g2(ATiQkL zbn1!y?SVct34IHjPoN?bSv#=47S}cF4*&K?cLFen^CExN|LP8CD;@+Abku53RaAHO zyy;?ljmmaUh4i<4d}}IW@wqrNA9RI-!-r<#!f+5e(isD>MYl}R_bUTX6RYAv9W-)Q zI#a{OA9}5kUjTYj6zEEoeB6dBgEnD&Xox}Tz=BgeV8J#Xkl_rfBD+ER> zb@-E_1+EmShUS6T#l;*9GW|4j-~w-weG(S}m2}^qT4_1c!hPGwNu9R+b0A~3>u<;d zb381Kyb?R*Ws8*uYYHsl**ntGyubTL`S1s?J{Z_WK`VaOI<6;{{bdT6KOA+LxCOe+v{2qzjJG)+KIe=r#M~!okEAj>yRuD4xr=mhJqcu!5kuAk)SkiGR6G zJ4kYoQ2K>lTim!@yg&V1r{uk!xrIWq|Hv7U)VVpPoiEE^-%9IP0!_7@NC z8%KJ1|w!5eukYklfixekly(gqv2pw z4+)mX3kiOYMgywSmBd&vRflfbLXhWu>Vl;0XK^0F6`#AsN^&4a;E5$T|+En zP<>V_>d^ia6vTB53fg)sdyd{*UPN8>BDDopQCFBNe|}#7b1|`6qMCO2ps$!K{w?kS zOt*@f+k7ljDn%_ny#?fMJvxfGA|a(MsAZ~K0Q`S9)wJt7PS>L)QAmd-`ljYQ`pcH2 zAdJvSd7=P{JCVkObc?ZbWHny8M~kzh6ZmdP!DD=^K{>g=Q; z4AE7e5nj=yat-KeY?0k)wnQ~(EYz{cj3c#4eNeLsHmF&wTY!ugYId#jvFyz$sBH(m z;^b^m(4)F*#C`bh*W7}t5V{4t-hV6`{D7JrLtmQkEuiDY3JOv8whL|U!>>y)K>j<( zAZMNQ*iXS*@gSw88PqE8d+?d<>Efq9EXN>jldiN^mw!7;9J=-vs-_OE7wgQ8FGBwI zmtON&#UpMq2p>*DsM|wI{0AjqGhdmHXQ(~TTC2FOVwq>kLhIFahD+SAB|#x@LK(@R zk=)5zixKIeS_|i|ncnyC6v7XXwT{=5Jzv)us}kbVUJov_{To4wHe*DVG^E%&}_ne;2YmN)5NyH*QB0>)m8;iti@Y(ZZEZR6VmfBFEmLEm|ckd>2aa)AZ`Eiv9Z(5Ea>XX!>o5mzWcq( zs5`caEZCR;k)_CaNb3c1F27#@5U`K@MB7yyDYxV&-Td4lx$DDf|l&wd2r@_V{PNb5WSms298-qdrg z!oq;$rz~}as1Gf*3s$4Lr2PAQehzHGIQ-4ZE409cn{CIx38DhgyT*#J~61Fc&0YLQ<-2E9svo=Bj zekc!xKq^BaBQDC9l4Z$nWoH}q@2cYnBd@v5cD^a5eS3ek53c*$?Fcsfew2gChD#qT z;$F61*p}CFBp0^1FjP=qz{QX^g69DLgLZMj zkl%&b*XzD61rPyYf#vJ<@v$J_B&6+k@}anQm)3*kYTaxIA-JbM4Eb^W_1oH^Hl?w@eZ z34wzp!(RbC25G6b8aQ0}FlEo|UTdm*6GhYO#T}WfO2j}qoeAL@kZ(IaAr|t|5U6H; zX&vk9>s~PQ?{YV^!io^Snq;oJ+h2d3;WHlVei>PeO`)s1?QeZr0UNQaAu#Dp%vt@{hI(Z%NBCD6VR21gaW0&o&G*QB>rDh1D(aIE4p>`BXPMP zviS>f4#g_jX9P6d{h|NJm6P6od9q>;hbvQZIu#L6i4GNy3vlG3OCZGD^}?loOu$xC zMZSm~ZD^u**v|eH9EkJPh#6Ii?B^^IO4dc~9{-4Eerr8a>X5WNy zr++bJqLUw1Cssp8YjE@{aPD99Z z%7_Hm`5ukIsS3JtPUnB_Hfl)^59X2UXZ5M*C_3u&V}m_vsP+2;#_nhm6qetB_8RLc z%4-MMbS!+-GcE$v2)1EM6#dXGHoptWPHaPCD20KAyLw^Uz5m&-Cky_M;gwa>fbuq% zQFKS4LEIf87+uhPy(8edY0_ODnm_fcgkY92GH^H=>EJR3H;omqY#$I8>&ae8=>-GtI#pUCt-vg(%^}YRZt)VS;FxDW#IiV>Y_raK=9EBtdK^9i zSvcGmKJ$W=(>lg^DvKPEuzp}$JiH$p`1gvos)T7F+Q^pn=##(JSpWUjZ-Z)!GH+hb zs$VllpJw+yba2!17?SZc#5bB?9esM=YEr7N_p7#`_t%H&K{}(dzd$wig%=|xo&+EA zC-Aj*sAt_z&S0>>HL#6leuN$?*-|`#9*;U_! znfTNUd=m;k{AO5?Lf{XNFlafZl*l8c*WzB6W?y$}4Pas2p~bmqQZh+wj)+~<;9u{M zt_X+>l(2L|28`?brW>w*Q$m|Zw@&okI9d)1A5#yGvJb{C$EVZz-hr~>3wYyX@xRk& zA4qPAw)MAq*ne+?q)QqZ<+5p@nE-H@XiSsOI9DqWsmXivwm$UMtNr1 z8gRn86G*t7*vN23LwV*>tMLcfs&`=o-3~~8BVOn?DL8F4^0gv=$-H94IX~XjG03s} zCBGO_;8aG~Qut=_puqCqjL4;lb{k#T?sU8RM>X6)$sLOH0qr5X`nwIG3SJ|IKGV6V zSQIYdC8ynGfEIspQd*!Bc3?s?UvjPnL!h$#<&u{HcXFVzH^V@Y*q-kA?zAC4snv+I zL6~&Za^{c)d$NqO-R0eOi~)mw@}@N5o8=5;zI5dnbB5Ejz*U(9m`XDr>DO=jsevZ! z$)?dImo$9CI~p;Xfuc5-PFz_-eF}PktI`oLl_ow?;`E^z1>r!AjB(pSrsU%!vAwOn z-7V&YKy#@WnBY>T@{{zT<8a}?gp~0<&S6_uyxdEChs%Hrmh_><7ZuV;dz;dj zZexAjyF#k`$yuzh80y_$!b_#nA9AGCNQZa5Z@w5Xw3BX1i#F}0j_(TT@h79piA{8w zy|`W~ouI+6*kFG-Jd!zTYauqNU~k57sKF4tcNv2|n{vv~oS)d9P|C=Td8P)#-}>T9 zle9>5ppRy*WE%~JijRTVYWvfQDML>Dq_xdbnT#|=8!@QlCRkGZ_IvvCFnk)KLjHj` zLJS^G?j^1u+E;Yx+173}^mNx{v79#)Zn@;|N@8M6Vq%Ej5eqHSBi(QwGga3NI&wd_l=N=Rw<-QTPAG{$68?SF~a0}u1ed3E4NrDf1)KnB5 z!W4*ou=*>C!c5p=5~~NrhICpIS_{IidI;qIxV@f4P7S2UGS4v;Yj;b@$Z!&NnLZ2V zC&BCU%vU}8=6q5I{?m0j^B86W+7)+yPzAVuoNPLypXmg@s2b^{900LpD+P9?Qo(RT z{NL#QL65}sFjk>b5+p#(eJuol3>C%LNAdy6x*F-%U5QM!#J%EUe%Ori3GeV3cdi|I zH;O71O>kyo3t!Jfo4DnEbMze$xjm0I#^k}peHVP8{}^xu+VEIJ0cnwp`99Jtcu2tc zu`>+6XySXJ=^d{1vrdc{3Sqh%)bxOL(|G^aoA(VGLE3OUMA28%{c^xXd&;8Mst)3Y zt&;aW{AbIU8vR;LN=3jBs7cj7Sjm<78u_30Z!WkdDpCh_8u08hI~2&27{vbWlv z#QLa2;OI#%PqS6rVvIJGGsAMiv-^^#aOpXURC2waGth1cC=owBH_D?`Obgr=*Og9VL?tbN{U=WERa{$ zEU0~iy`%csjsz1o+_bu+v}zQU+w=qwwet71An~M z`+brm?1#!;n%2$Bv9Ep%Y)DuHV2%EMDD$PQ0{#2S^`QJ``vBY?efS86|8zj*Nspg^ zEutlnuwviDd+0FrXHmcQ{3oL(L~pW%?;VHbjZlh} zS>N6NzOcL1mht;U1wF+uLYV`)kHFfbPPf`y=u|cd=R?^en=)!#TJ9E-jrHySqv@;T zq6)gdDFvh@1!<*2N+g$%7Le|cPHAaYTDqiTrMr>t?xj<@yJ3Nicc0($y!+35&dg`N zbI!TnnLGFHnR8*Y2$Uw^3iakKIA~ws7=+aV4ER^Q19<-XbZ~?J_Zs9>gZN@~3;wmL z53dKE0G`v~^>Igl=TFE2DW(0laEDsc&I0d=;5W&=!243DHK-ABy7vq=&OuliB z<6+j-v53?8V-VSy{T?6{32Jc*zBkZ^YxLfMUKb%sQq2&oTMz;f;rsuHznx*lpND{o zX>biG2-rImajNzRrg20p_dS5bL=jh&=>XvSXD|&lVmW^vP)d(f6lV!I$oDklFntX& zt3r_Q!Q?tfo>h5?L z0D|BVc)h_8P<-kbgwulr<*r3Uo8PK7>x}`$H=n?mUl3~C1MnNv|Dp20ihrE|WQ5@w z{kI^7Uf?7fv{-%*AhUw3cmOfvAo#1)5DiJs2SY}}@Oq_kU{H|V9>ByD@uu$(Rf-U&yAZGnQnElw$YSmal>G&!s?G2P644vb4um`8!r|{fs>{P@1$u!j^JOY zHPs@pk>~%s3jP?2sL3}+EDzp-91RhyQ&%9gA_NH`+)aH8@PZ9laSuZ4L?8pFivuRk zH&jRA8h>^G27jNJYdk8U2mJS-cd`iFw%k2{K?}S-`3P`axCCr;g@#j8KdXw*0ov3m&}s+5KCZ80tbU6JF;l>?I00m)!SjtHVb;X|1$96YO128&fFx85k_7S)Fi|MN zLGuwTi=0lw51<%P1P^kKZpRM8>m`w>Hg{lw4J2y#67VJ8(?PyH5;gDyK$Qkh8o331 ziA7kr!kCke0O?4_A~C-dAuL|Q-Q>3b=}7D6?m>5c2tDu(==eC}7M#wz3|u&YfPE_A z*n&tp#hM}%OmE}1%2E&&J5ONY|IT~8p)1-S?x%ng_3y7vUu{~zese?je$ z3s%YrATJj2>kEPi8?jt{NvpO8$ZJIW@d0K;~g76RP|LVSdq` zzprK%_tV?dG&OTC1I@DNepTc3#kJVZQKVk2a5_QPoOlL_sVnCpm;R58J#)lC{5!t2 z+uSz2zd&c2&=~@XD}cqo#g-;CRqyJeejDiI$^>6ykcDz7zqva2L<$sRhp$#ZWQ(`M zYqu|~1^|tN>5x=Uo^8ZvB%)R0%DQg5iMQ?v_*Vl4jszr<0#-{Qg(7dR+;p#eKanCV zwV~`4kd12BGK6Q%AgzfuLgfD-mHt0uwHXp}rWC$0`3v!e9e$!R02+xjnTSLNCbp)$bDY0(cFi`m|YJemUdt$6GlmK(&eT1h_g8To43pejI*m$FxyeZvS(uwMlnkp^Te$RZo1YF{l_zpr60w~QEj*~09x*~vQ!#7y3!DSB|vz2#AKt3mtmA1@l4(c$N3>5g}Hiw9HuXG-{nSWG7dv8?s zP?Gof=syKL(_$CFZ626BmE`8A_NCpNSsx|`5HBbGip{ATjd7od*m+NyRP9{`ejRf{ z`;&6sgDit{Q+8fTae_R8^o?DD^<{tj)W?7Lsjq^UZ(nMbS$T8P8$o+)2I>6QHP)_1-E@s*IN4xu8&ds!K7vG`24Q;h1 zr@OJ4xu)ZI*g&7yRD=lM!RT(eRlvQbl`6J3I@(i$}6P2lw?JI}1HdF%%U(@n? zj7G-q-9K1xm+C}L>IT@B0^Aao_1#e#@y-G(TRCeAeNk3R!@3ui6PnmjyU8|4m$*J% z-!~_5tx~^ORZ8H^Bz)x{W*At>L9!&}gJNJJXzE+fQ}g)oWuuzo&xtA2rNP5`E%qfi z5p%t=AWG&4!BUF*%SI-=z_V(a?jH-JOYoBt+m;=}!(DWTVv*xI0lQsOsw4J#&Ks0Q zbmML^Dtrgqv!7?5Icf&xJPiq+&<#MJOo@+C3~Iy;*k7zNmX~Kc=8@MZpPf)_w4$zV za&_--U@Z-P8f-!a$)wESFa3K9vpW7Wwjk5fSlE@-<2}SV+DbOG3;mMF!@sK z!^=iNVf4(U6_*@YwAHiwRtFps2e%mBOar7bLp)*7Da@rO{+ErG+-L?}KHUqA>@}p} zCTjE}3D6#ead0gD9qM9$7dG#@&tc2~B(1fsU@!rq9KUPQ^axtF>Q^ z8g)8IVfT6C_u0%fe=o2c+^Ba}SpY-cD^yF--?|$YF_+31emef?i$S|xE+ci|Zb(^f zT=cv(0GOU^qB$g-cmHS*sxf*%Yt$C|dA6z&d&7^iL>7J+M=u_E!un}%+Bo*=OlHV~ z9&72N09s>QPRpo6utQQB=Yoro4GgU&+c^tI(0Gnkn#AFvF*LyKMPnFepyTdF74PaE zidGM44(gKo`p&z?brf`-5exWro=5(hV zeNv^WTWY^@Qphz3>8DeDK43#L|6C8lM>VQ0B-(fpcQD@^!jt3Go+MjNG43O67UN=Q zVvZIXnPy|LqAls*WiLlPU~-73y<*q`j^ZL)0H0{l8iv(7*2Yn9@Xicv9$JSw;#$n-3NeAmEA2q(y z%<8cJvnmtSy2kNv5ugo?e|I+bOt#{g_EDLy}yiwHFZvQ$KA;7Jd5_E^zrR4 zu|mXcr!|NC6SZkn<*kJ#carzxhfMS(Xf#$OXo&qZy+!rp_jr8S0C5{Tap5-mYT*L) z>4rDCYd<~a%yrTQYG`W2anx!*7U^+n6%iF7IS}p<~1yM=KD2)6Dhu8q&=p5h5o^L;F;E4Y5Q` z9-?Li*X~D%SuDH|kG*Nfu4(mnxv=A?Sy5@T5S|3`oFFdDXi}kyvx9%pyBSQ%-H-U{ zrqQnH1~8Z){yOoZ^e{(#`r?>G%$9-cDNNzeq(ijIpAK|z@rEwWR*XKb%U6=7T1gr$ zVTks+RFo#|_N_h5@e9}^1()e;PPe!C7=bRgPr2QCpq0n8>DiShryjF-;?0vi%dS9+ z(5^=VbB%ao*>$T0&cM^tXYrG5>%SJTyqK&-k)GJAda)Y#D!Q&n)7QN_MVbM9-axS3 zW4Tp@K8@QymK~oElylEmjjnzwXXfWJ=hf+E)j@mjFUMYCL;oV(R?Oi6<|bxNM(6`7hAv1t+Ms zYdY)L%U9@e{Mv3c*iJ_w*!Zt=YhCQEbRnu!Pj_kt)&*j)mExt0sQx31M;CHmbPC~@ z?APCYf7_C_@`5(8ZQhIJO~(f@4C<}3IV*@NR$qZSwMq&+XzB|gX5M?IIEoV?`i@>V zQ;?kTQ9e;`Wj+f$Zwx|^DwcGGB4lqkVaNh4_2UAaV&Hk!)6AJEQp}kprj~7ux|Zmt z4T^#7W90f(Qy#vOtnLlk3j~NJ$4{>Xj7aLlDruX46+g@K#eg>wCqP0f87r#*Ub z)?$+gioF)~@X3lu{NhfNX2r=z^@=1}KsCH!>DjGhO^%tVck}CefkWT?Ka$~6!Q$ua z0yaPA{xn_Q^h)m?gevKDtO`^6+$;afT}Zg}CaN?vKA}6POX#~X_I>HmMULiiaZq|Z zy@aR9g)V#L_NeTb?Jk_9da)4Ny5WyiK@5^wBUsd_-M3J$;FG=9(-EKlmOMD^9@S*| z*XFPAK8#aGhwVU&lNwRRv_MWdaz`34>g}W?#6xQnL90|n(4whaq3RR!4@gPM&6<++ za43D=Rma)_AOJ*GGq!X>7+MgK+7{3Ti9M*O(w@(rU&H&#P*?HMx+&1^gD*&!hPVY0 zzh~jsHDJ-coAOR|0C}*GLrKPM&#z?f_{FtXWW?PTS=`kwbUtAh?iQYO0yAdb8G1}u zt&U~%Ry2qoS`8^P(O12`Q!pBNymr36OP-L=DXT8S&Ut0$|6Rlts1sw+sgaR%lC;kJ z%$WM?!cjr*%Xbrj@ZEa#D|QUt_2Z&lKNpefJJyS&M@|#T`_v)zYY5xSFf6%P9ri|| zvvYgQ&P-hJ=ZXo+8*az9E#ej!e{+t*3jia2GU`{ZJ0p)}!1$Zp`;sp6VehWa-7C2t z#e?K#;-l*4s`DG5aaDc75tI3_ilJjni%xlQ{s4}5JLG~)2aLt)k3Lbyak`0umUtd; z5c31@&d9DkP4W!*(p!`@X=JAb%B+Q`-F-rJN`xgU#dNgWR1q90$EdCi?@Zw352BkK`)H{t&#|vZ9fxbp-B}9>(Sj7Ob(EP-K(}nk zAmiQpf$Wli>!K=5rfW#~OzG8!>@sd1_@AqQj|0#60>)n$Z|Z$0N5}!8yR119mCR4{ zqlrq7W#VrO?0>pxbcNlNTOSWU)s$7L6owx;;UWugr z1Ma;_m&Y!%;&Ew~o=ZAhy+52)i#zkskD-)V|P>LlFIe-_W@xSBg^wYZRlfTQ z+Jf=rZlr@ZK1Id<`L!JgHpI`+GT$>Sg}dn@9KM|y$jw@gcYg4t07S8LSgOw463&KS zDf6!_j$Z>Lw(>v2@83s>MFSpc9<$@Z_o4yg#suS5d&_={W2c$htkH1BXaMzZz`#T8 zqt(X=laEP79k|H95A_3nLzwp*#68|kj0Z#rOy58JlezIFuYa7Byn4{Jg)fLzAPj6* z45Y6}>#u}vNeYYaJ}5s>=fTXG&OF|a-K#%Ede+~6&v(9&jjRTPH|;3^7llXb`YZ5r zQhu?1Xz963gSZ`FIq}NB0^z9!A869>>XzA>>vCaki~H(>PN3nlJx+VJrnr|rh;9mf zvTcQ-4w?v7a-UmV_v|BnW~Xp}oWB3mG~-^8u>ZjJn|5u?2EMIFV*CXvRC9n|b2WM@ z!hCaK)@B>v^z+ISvoh{j^{1dlz(YmRquho09g+W;A{if|)Z=Ag(~#8F-!Pku(WX5n ze;cbC87Nj^lLMd+d}?sR*x={Dce0_((G|W=Ycv2cPX6ZQ(gu6PkQu5EPK3eDzn= z3c}3eZFv*ujk$M0c*4zGUp!W%?$ZV`y#+`eoiII8j|VKJMyr}W!LapB;-qH%%WZtO z$Bl8uzd<5d+L=My_Sv6j2Ygxm13enrn<9#$(#767^T9u{0%)2FzRn}CcWcp&4l^!`Dag&)Cy9L)R>@EYvKJQ48f;$Fim z`VnM##}5=)O#K&7QDnRRCZOg;5Vd&A%uZMlD@aZ&4^lxEN;7*4_;vE zQ_ygjs z;1S)xHlURT9S#m|0A|JTufcezX$RPC{5v>k7W8EvH`>w`0HZtgmKDH*sizJH|Bz>c zTH6ZI>5Hm@2jRjE+dr^x0lyXm`xj`l*!5uJImhNFIFptYq7kZr;sJ?8=#IZluE-RD z+))H-zNFAM%x9{6QJ4cMGthx?+9*C?k7&%aU~*%%|LdfV&R-vIayODO{+LQbF*yj&0eQHlnxv)BMT;m$;%JtO+)_N(M(kMTyjcopi z1hRxrgtyrWPcH2EG^s%jnMUJKR%vG@p_OV30P1nTOCKbc*5!}zyjVLa>JexT9c9+o zsyk&+vmxIvm9ApfA|C*T#YQ@U-{KW7|UfIlERn zFQC$$Ie#O1OA)IH(UrL{TF-hYDT&F?fyEMkG$?28-m}%z^|~I3_~^q$?PCiEsHp8m z_tDOw!jQO@Fw8#h9$`8#yH9j(8s6>p>w?}vh>-{YSDIpSri_O}Q?eZG77 z_hL1s0;;751T~`>pKEAyC>9Lt$2C*Kf2r365TDmn_TJ1-`!_a4zYLbFf8Zt+NhD4I zHs=axf{4PuLhZh&0W>wEtPso?;TxrPtF<14mq0yZf602 zt~=)iqa$FM0|I4wO`o+u)2CGIdqzikp!0CmX6&a5EABBmf1-G%bJ9$&t>-L7wHL-` zuWA@klu~RGUp&F8eok0^&AT_!J)bxEC%-{`kw{o^kpI4^l|efq!+2r$qn8JYc$A4A zK=G_?!5kLs3Uak?tzs*&$2k{*%l%8iAMd9g^_tiA>MS`iEd0dg)|9uubHBq15p#=Y zWYs?!l0N$(>UKIbG3e@h-GYmysH|*KRha5)gfdXIy99wBU8Y9nJ-t?Cy1Ep zfVU=M+tRpN0n{c!X57`_&F8a3gDp3D@y}Mt6{Rz$0OmJFdPZrJJ2a2=C(LZ)Cwl!P#0^g3iy}J{aN3bJ+dn+xrD%2TDjk@+LD@!9G#l@kB+ zrdcxd(?1VvD;j%COrWJpX2fp&Jg@k^=Xw@`IV~; zM+xS46iHd8Csu0A{QaZYZGzbd0;t8y@eD8<-#Dx+S4Gjg7*4!wkF8o9*z_(?=5!bP zJ+@x+sfMB@;O1;<2~k%>#<fc;p z_NjVvsStIuW2n#>Cs?Z1a##K!eGWi(of2Qn_%ojrCYG_v(B%{e6p~1wRXA#;sK5rv z_LS9!3`OU@KOz@4McL5#&VN{g7P4Gt@{nc_Ykq|KLR^s*d6$m6k%D10CqQn)iehjn zgF~71L&!uykYoL)*d&g>;>O~l*lOw3v)prh}6D6J|(n_TI8t(HpIKlN9#uXulj}nz|9Cy0WQ#dat>6 zU)qN^&j^>d`O-o6#Ouj9tQ1SYy!K{G8bAMUJ;^TEM8okcD+(xFz`vcWg2el zkH4hoxgYa*IuTs`{62Tc+yg&7 zvza!&K7s4LJIlE#YrM}Cn%O@ux)VaYe#mw5%r;A;aVe2{_)6_l03EHCgT6cf!d`a~ zSahE)Pd8Fma|E1Q3pS8n`jf#Bw6HJj(ip-6x!n!li4WSk;kOQ#4s)uXnjp?r(7|f> z8@ZB+)VqGLNTK{Ldtvpcr&~ofm8p1b<9*+*y2)`s`K!NuwL~Pz=DY-RklHCL^FKEZ z+cX*^wlb7z@WXsE&{K}ph5_139SnzFCJz~nMR}V%W}oh~j8hE)md;3Yxi;kd zO-?j=K3_?+nI0Mj<|gUKfY|-ZmQ&C3;3;pFCn4JCDAv00YcW0+ z!y0Yk{I{0-+H$`~i5@nn?$M5bCHFVrsd-z23U}$@MIYt5`S0(le3vihOfgx;@RcB0 zVI-B>s(8K?=*CLtrC((-Fyce!B?9r;P~tf=FCV2y4>iN7a6fiE{Wwy}!=BQgeH-#4 zrIRii1yehb83hD>nc#5z^fqaV8o`>x*uyIBk;e@~Q+LSWYGVI$&u?T)^s?3G0K3(_ z?$ z=e@6!tyYo`?d5xZytw59?60^I-0v76Gh|Cf6X#3a<(;!t zvpFp9vaB8^UyQiz9E^TxLUAhJ&#Qpb8|nN&j;g3PfkIUVa5{YY=X1;0&adO>DLCKA z-V@t(MAHAPWg~A5z|uU6{0i@S6E_MjeZxZG=BJ)V9%q}O>Z{7VjlL}VHg`7QZX*

rHSHs0yi|x(!=lP<2e$}K+v44?&hqnDjJ95T|LBrPjz%c&FDK93}dfI!$(N%c|$Pw@UXg&l%n)D8oXi9xk2ddHu*ro zz&$b2Jaj=V*op_Xcm|kmSOb&p=;PiB@JZchX`CA8=kCL?bE(TpO`x{el{(5!5da)1 z#j@+=WKh8U4iIlmr+S6EchTqXr8z`Zz^ME4L@Rc#ItO1%rabk* zXAF$E5S?E4GjwV{CVm}(GnX;Ni?7OY&6b6aWwx1+db*|S(@}HM)YwNlpiQHl-4v@~ zdB*uHtDl5`&=z>^-{t7PuQ7J|Rn~fC8Em&4Qw{1JMEIZ;0!47KJ}>cc)$-v7I1?s? zGz@Ko_Q=M86{|S<-vkPIiC?2LpSUh~4fo_U^I!B%E8`aH)Dx!DHlShBfL*~!Q~6w9 zq18ZOer|L@NP52P=!`7A&`_XbnW27MUAg-2@B%3+nC~odvQ@e5yLDXN8uyu6)AzV@ z3b?qh<_zr($)BWG_xZP9Lu)%t)4WP-5yu~`_ek$X*!U$S#YqK9-DBQzsOJ%D#u1pX}fq2F%K%UnK5cZ3980IOU3xf+{m#m*zBHq7H3i!0<=YN zo%Wg~;iAOl^;>7ArfWh_wb|;Nv*p&WCRAd6}0(*2!HlKFAz9^qPlny(wjv*X@#Rox$T{KKQ0hR1z<` zo+)mbY=$oz8BUv@43YBV&`X8MPHhepzup_G5{QEA~Sir59ov`|A z!#J_zv>bl<-QyZ{U*Trlql}_IynREZrmX?Ct;Qn4WCa_b-ccT&c49ENfz(GydMBp32nv7Z% zkdcoeV=)XN=%FNzJyZPO28RPl1y;pjk*p3Rbt6H6YN*6^~@C70cVwC;G{h&CoXPxqlU|9FqSlz=)4Ub^i?(Qvs(f= zZK;m52i`m^{!W0lO6zBb?z^BrJx5a|f_}quvl%chCH`ArGdD8CF-}x<&nauB^1SlyoA~)0WmJ#2}{E8P>?lh*gD)&Bxkrv(u zZ+1LA|M0Z1YUc_R$bLVg9V!&j0_Z^$)u zT7GIqF{$3GYJi(7Quk_U_qQ9fMfb_brLTS_%7|^$VVi9)!)w&utEE5f`(G&NyHs~0 z9`3{T>|&f(_sn8fc*3sJT61knE?c>147q~ixXG~hbTcw?Kc>#*;7y(i&+?#lKXz;0 zNl(BX5~5#p#Cn2JxCB-9(|z4Y>}D-)zv8&7-jvgU<}c*g=8iU7=HIdrsQBD}#a3C$ zEq1nL?y@oLbI>xC)oy^x*`MEq$kv)4WD^R4=W-l2&ceUDm0sj0Vv0Kwe)e=wAPxGZ zU)(JSeL>>*^GnS82$gmV!sf$7*WaVTar5M_0VE(W6TBdJP4Uhbdr#H%mv~sG!>XxN zIvcG4k7z2))k*t#uzDeZr>o@1iMvp`E)J^7^sY@Pr$yUZN4-m%d4IWhqp*H(=zJz! zgp~Ql#wa3)ow2v18^>cab&OC5V=XP9_nmf=9!2VOFDUZlCnfFi4F04#gNOMg>*-*| zZ#GJ8(W@Y@-}_y!ow$=+Y+o3at1?-=&+*lNI`~cz+@<@TG)m+Otb;oakX!VF6@B^e z`rrAtZ%_ma?wW=yNB$)zKvv&{pGC3m#5|#F6rUN0WJ=9)?Tqvrg63YC?P5qyDgM>> zwWm#KH8N2yt6GVa5Xj0DZ+CS3ySBEv)^^|6)UmpDR(AVhNx;-m#*bv9q&bI>)`k?V zWcjgE$a-@3-7mNVc*L(d{jt;A@5bLPmXccJvba|>tI%2|FOh>yFR84ad`opJ!Ss&x z4)XdH_p?il?J;_=^2Y$GO#VIBncrb^5p?Gy-n%McI7A1NjFprLAnLp%t9GKJ)3Z!0+|y?^T8y`;TdUQcXEFQ^yxdS*2~NEs?Y8%DgKP=)h?c6w}o3iKPV@t zvD=<7-eQ-ZBpOn%e$y35>7@qF#U$bG*r2_UJ6f5rW$U2T*8N-ZDq+e02YQ8HBC0TB zx0Y{@yx7?rc&9$Ya{!}<Al*DMT{zzMX$v2BXprP&# z(b^1s>|sifWaT?OHb=E5iShAEfKxY&az(V?Gku0!0+6^M77p$(vqsM&=Sp;M9!%@zhMIP+@sR9Idyj_e7|veLK?= z)QyJ`kK2&7@7y~2Fl^tA3!bG&kmy&Q>3P7rM8DgF)ECUDkQG}L%w7E4@Mp>;R}m#f zF+>ScDyXFt=V1eRY`Kqcf?^X&1AcsZVzTb-UL$IBXy1(HI;F23z}bv1Zbqlzq1CYTFS5J$6?9}C)Ft=Sm~bBPnfaqwI)S?C@h}%87~_z!{L`WXsYzs0 zWO-u6_M6*rrScNFP8Y3BX!pWbb$^kHXc9iNW>>ei=HX3pW=zi z*lz~*wnTol1?G1lAHY;CZ=cnl=9Q!2BS4;~Mv*)dNx`}B!sii|jvu|z3iO|*zKayT zt)OWiPWzl~_JwJtEZwmKJCmXA2NkYK0+EaHlX-?P_X|k4>qwA>XtZIWE^TFMFZ*^V zf5qY~+t`L6Z;EfmcO&i=@%r7T@wSM5P;#pcA+WTJC&xV*W=kUC78oG3ha+}8nE=iSW~@;O(F5+IdEuiB z29^n{csP0i!Fh;mdFUQf43vCsvNE3HO0W3hO91*Yz;e1;ff3j;YL+s(eO$VlWHJpD zKBN{dCj^qEN`_G#|5=o#jx!c>3BS&VYmjmCkok<7)sF^I0bhFbNu_i(sVo46Sw-Ey z(068hRQdNtR`lQlR*1R8} z9^rIsI z45*VGMGv6=9*{~NC})ehT(w68)WIg(WLFWtr2LrXLG{d?SM^&iCm6O z;fk8(FL&12qh=?1gWG4{b$_b5U`|GayC*UnrYsdz&_2fE6G=M&QBXz6#2#t1efX~a1IK1m3KpVkz>0%tU zeMQhYJP+F1u)&RuyHWJ#GXdfQh5HaiIDT)*1+O8YZv#xix{y-C$s5tmh`tTM$oVd0 z+hU#P@fK z`B9;i(!-;#r%YW1wo^{LjZ@miYb=UeRV=W7W~y~0FetdGyP(^e12y=EpJBz39R}Wl zIP@L_Kyc*_2hKiFzEMK_0)MDmA|q|;!}_Mg{vA_EFhw%t{oVULas{E*|CMgt5t z_U4J<@;);rv;H=~Q7*~>zr4#^=lNubO#xBoF%(ir_nyQi8jTk-a@VIsclm(^VuatE zCq~$;1xD@aMn!mW5;mt`2Be{XZ>Pk9q7pdl{sdw(+?X9^s?^|{+X{<6^jM>d2hUR~ zJxBpOda-Oc{AodwCk|jT&)yP?=VWNhz3dSqpJf>SH{0<6i66qojj7{h4w+mp&@L8T zx-~v+PDrvK(lKeeQB|?#P{DRDy;)^L|Isd8=e)ta2EE<&4^%u0%4{VgcRD zI~rMHOwRW#F82^%nb?Yjub*!1km7>NY-B+sT(?J4G^spcCjE6$VwVMjLK!j@BiSfy zYd|Rw`T7+i1iv)kdfCf;uq6lS^9`B;?{A5G3AilRN8d8{f~>(Wx059xZtZ4H_u(6^ zLWBWS1YJ|K@Yxgz$X!-8nez|IzU3qANfm(lnR~87F#)g0*{>p~s|>k-mZuw?95MGd zi^#{NUR{>LFWJ$3`}s0aAMP>NZ;;nW{-s|Y zi1u{XW z*INaaurE@V>Eev?akl8I$3`lVj$W4D83wO;< zm(iZ7arU}0p!BpgVWPYr(FI3^t?@1p=Y3-ys8a>I-7+YfgyecXY)7;xB&r2yuDg5%}M2vet@kk){vs>NM+Q<(+efT;?=cxUw#xv33{3 zD;m#Xmwc(_>QQ2K^^+uGjd>`gP>D72U0pZy9cLzRme~ipzJ+k?@SVY2Bjabn+=@Xm z_6F+Ep>F~l=GV(6bZS;IMRfRKkf5z(VQO06xAi~fQ8Ln=f-9}Pvy0^vJ1|yX&wY}| zv)UM-w`%*==~MNap4$NDx}P$&5Wkga(YDl04_~xtA+eq@WNy^b*6Sf^1)XcGw&Fgr ztA!(Gik?ep_Ifa8){w09cGalhwB60~+K5XDrzCXD^-i^VJ#(whOR4k8yK2l8c4Lcp z7AZv@`x)g*JMet>J1SQ~<9z;vEl)zmug}+Le{rM1kMeh>jklYU=_49TS9r<=zB*j^ z_t5mUYS3>4`e^vn^)=mI_$O$nt=c@w? zW1y6)^9wSmTTOM9ywu_z{z4_Nw$AoTn>Xk64TawOf<;c|ADyFbnLZ8Ol747ya(k0* zKjW4ymS3?g-t#7hd##Ih5}8vzT)vO?!)HbP;`}X)m1uyo%AWKH=8my&-3Vm?%@cRln zF@e|4$8Ug(|MVY3vNMKWY@WnlzHs>Uiiq1lt~KP=Zbw)*Z$DCL!h14=qFlZxX5}lr z|HXA@n%~yzdjTBAa<^Fn<4yx-Ez}oabfL){0;#1hJ~5_QyQGcJB(0NvVvp0!owQ#t z=>JN&h%7krmwwHQH@5Vvl$PUT%owJV_CaHcnV`7m6;Wdmn#Br~$gyE5`FoexS&qyG z6mR?qe+a5{oh`<^ALivojU&%#LXQ^5Yd&fhG^y{(=dk=%(l3OAwqV=;ryyT0iz;-T z4bH;({z7QdMuh(%NFi3OTsITPOuVt>L*nUOy=|tP#6*9`43sTA4nfpzz7@Lo+F+SlOaDVQoN?Os1eLiS7wXe5inNMu(dZtVyrN1gFZzasEZ?he_& zOgNnX=Q({&YeMN`L<|x}>~n5ToGUL1T-?Ae;%rVXivdb~7v7vOl?nfkj!lVib0&;V zX2nJA6DFN^*_7S_(456#s(jmx1A*&@@AGqV=)m(d#IrUY%rWD^K^f>cduv@HyY7?c{>i>0Z2glX!hTb_%Jv*I-A~KC*v$>dn;v4M^ya^)7ykYpqzxZNc{JyTf^FB$ z`Ok6(Hg-Ea9=FTkw^9Okh?cX3vzr+$gl8ojvP0}UyL7-b;hTrVrwlLN+ z%dHNG>EKL#Kr^K2{5DTW>EkEpZTuxiEbc-+h$ISH?c6RfS@xy0p+Iw6nPae%ey~%| zqIn|nA$=h`G#v|>E_x6ZZ`z*fmoFD4Yl(bbI+yy);$_zuab8P}gqumAmP4-hK^rc@ zA00nSX~oSLQiPcFMG|6v4a;n_q{2{XLLB2v%SE`2}ilq z9p?jT^68f#mR6fOuh7=dzc41Og_khW(OM>16`JSY&4g#68yoNb#b)lyzSYQzvK#4L zwH{{hQoK%~C;O%O>H<5~p+5dQg_QA`|FH1xJ|PyhiPW!)c;{xrqe)>WNaYH{g*|@*0sM2ozQRJ=#~%jUwho(g|Dg( zULfjTzTjrI$-xfzy}(%LZ_WSPJvxyyM@`Q#nEH)#aWI98U@k)_xM2NuIcT=wV}{SH zwUW6VU99GnSIC`3Ps(SYE0)1dW9#;su+gDD!)=4`RO(EhxKLZl<-hw9#Rj*ZQnv*- zPCmc9>g zAMMkhs`}AU>+q_aNjAW^|(KDxToYF===xQ8eck&m` z4t@zw%?^c>EXn=<$o4-IsM#3O)S^!7~W=sVdJ5^{>c}7`-g& zVU38V7oiXF4`iRMSiBgGp5Ob_nB{7aTe^4~{EhWI0scXq=I>sG5ovY37+Pue zBo>T|p&Vj!Y~f5+pd)!dd!lf7`a7v&n`-3KBKhk=XO^?oUtE9bgFXq4-@Itc`xWCE zVM zRAuI$Yv#{jj;DHJX~IESS3UsS+pBvgD zf|WjojOtH2Gs@m$aNj-pEdhy>svQx6-AkbU8p z_!_3n?$IA`88JwtlwGqKYZ<#_fu?Fi5Lmk7j{m|aqao>d2Bng#!`V>IG1YE1Yv+$L zJ2xTh@jKht#5`nXTTOnvEWaU7Y$R^YirkeB@b0FFR$zd&`cCNJ`f1CYgOI8_Zpu$)txQTV8Q z7F!_5kr7EQOC5C~AD}Me2I@jS1}DjmOs=n(DGN&9u8STo!5bmUYj$Ea#`4c;#ub z+9E8;Xjj;%T;I`p6uFMV+o#05R4FYCFEAOX+z#broPHpn?p2Ue;8>GP{#slXZzX!o ziH+%|P~v2=ENoGTzfXzh3*tPyz{`Y~++`;#j_TBVsMKWq`SQ=2s^&oV?DXW268Vk+ zBqx`S3Qo%uoYu`F=*5II#krV@GS8|Rp;&%Mg@6-@MB@o*+z%d?E?h&?nbcqzzx*yf zw{MAnnSGX&R!rM!?SWT+Z71DB_2WF%j~1#QS5^JkPTEt##>_qPZ{Dsnu&HWWC+Pr! zkTL{{3PuDCG6kyM0>Z68FbG8iMO3(sE%-zRp<0x3d4fQiR;Ds2qn1e~TgH|_3uP#j zG87609#}=Vi4|@E;Q|$M_wXgzXG#L%`$4|8`Sx)3Ip5lA?X}N_4JMfa;&b?SR@M)i zGqIlJk4PyCT*>#6YpqSbO|@CwAJA{55pA~xkRu*Z# z!29n1z7xEaA9bvF9>3#xoWk$;GEkN>4>XDNFg}Yv%vRphi6GGP8JtPht~8PAjL+ko zSk8GHoC5MlcSwp&8qm{8S|K&L?ytrk&K|6Yma@w$B}(Ft_b>GECV#P$e{px5%KOWXv2w=8d5=$v^Hn*ulzu}; zD~=cBqcxfpOdd%J+3n7kIrJc`0#~k$zb=+dkoyn(1hua8T!wiiY+IaF zXUxb`nNF#_zOhxX*W~wCw+X+yJKXem6#8GQcXiSE77c{Mft!|4@k`DNu3e%NUfOh5 zR!Zyh%@1#S-`?y_L@=S)I3bV4%?W0m{}{lUJE@CDo{^W2v1jBNdM=;qNdvk<^e{dl zkF`4QsLp@PpZ`WTJjOc1cBBxUmoqc7pQbvA&Uu#eD9v*6$I>i{_~#PVWPR4;cD=O8 za;MOi_x#@y*8fEz>o{&_<~*|qy-AY*kV@?K=tV4I{KBt`OA z_`dfD7uut&PbqoC=itXM&p9I4e*YLWU^irs`z#lx*<%`ZgpP)b=KhJ%ZJGT)J&Dns z_uBCl#ONkOH6jsxf>r0A$z0x^9z4o=`X265ywyKLlUlQn^7?7((X_W7QzO>ZI|)mGs4oCY)}xrs-o$J+r88H`q|Uef z=Xdb8V0qW+kly-0sWG=9RrHl7Y*Rev9nLC#ljO#s<(9JN2i!OD!7u;|RwD5LQa4%1Uk%=JQ^1nB9;X0jhiZhHZZdEf*d4Qg( zIN>Z^^tusYoI=;3j1#tyPb!>*Fsy}rj+NF|=toPoD%PDHjZuX*%3opb?=eN8u~D=! zo5j7&AmjA*n0_nfoc>{SRgjS8$VqmOX%9#pDCviI3nRXxml5MUrUf8AK#3a(>3Ml= zZ<6O9q}Aj*_q)Wi8t;xYPRQ5c;xce?eb{x4@Ks=D6^x_nF~t8v9l7M|%f=|1InmfE zBD$uY{8Gzi{)E&wpLLIDX?vwPW}w-fF7Ix#$Mo;Ryp~T%Emue_7E(!J4SHg7Et`c_ z(w`J^4z##?>!rPj8&2jw6z;})NL0MtWiD?bVGZ_}(!jF#Sl7)lUdL7F&bZ0!x(0%- zH}KoxEZF{2RP_Q!O}73x{zjB>N+o+H_OMZ^Zk#X`!!?KvUT3~sI;ssDyeFmIc979< zj>}>V{eFlyWF-{${c61BjJF@WD9ZbVS(JmW%$htez_xjox(2X4WrdDNk12w5v;1+% zg-vDkOc2_O9cVGm%y%>*zDS17+j0eOxq|$=PD(9erFx;1f!+sOg(I@OaJ;+LsIK%9 z8EZUK#z8)S&B7)2CM+EGrZKZO;*K8p_YV@ zcl09USUH)P9TUi-1#-z_gj`+L57?b^`#JwbgD}2TjikUyJ?xH^JsWPX-O0kB{F6N) zYhiafve8-0qjIxkgt*-qgN7>f{*DdlMB4dKtqiw8c9hvEl<8fYl@sPFHtt(RRmw@bej^16rX{f|luQk|QnfaoF4juaO!$5#(v znbxCR932<t`>_Pg@1?pDJhta~qHfzGeedYt#rjE`Mj^lBVGGE!F=t?NH6h(+x$85M5H+M?6}oqtsJJxR#TC1h^dA{aTjp_jPYLJyPYc8;AI z4LcX#j@ua_#XY>1X_4>_9AWNW z5lSzu8(JF?$=!lPZ+k_5*J|%mwf4rMy*5I7XTp^Z-?0MDt0iymbSJI$s;jkE6YU+2 z@V7Tq-X1a7iD)l7g09(ECZn}~chqWa{%)1l7VYM>j1*ea$y*~9|2lfrH$rmnMre*@ zYqi$eqBWDy+Vn6?8AH8K(p5Z3W7(7Rwy!r5Z*~wmR2&X(mU^)yW+Y+ZD>DChOE! zyCj{8Wu2P8i`O)jHAJ`Vhq-UbeQ+}MTt(-Hhr{!wE-xR3&6>kt*`+cJ&Cz13aQ`qk zWllStzQ&`)nm!(`@MI^gE6mxca)n!V!t90lukb;6dt?TzgZ9>h$=bWwQL8<>T6;av z-cX^vmFmv>A`C^3FqvuwX%1;gzRHl6=JR^0hxvyz);kSvVbgFsJ*MIH68GO=5Z(XZ z*_2BEKLg>30gv|vuT~U|BDUY=~jL@k!qK7A1!FM_3 zLsUY9x6oD-gTx4Y^}N>L&Do*S+T0zymh*aFYwYju?_i6cd|FQ;(>nXl7WlM&&^N80 zMqjhUX`TM}7p{?--KeG3jk@T@Fn>4Lixt7cm)BSqlHXDdUpn$Hd#Z*n=6o@H5vTQ! z+VE+0C@>ND4`4rFFJQMed?BS1@M&Nx;A6l>zB@}8`p>vIY_u+K*H{92HBY$&)G|-H1k}_D|HrE8r})-? zu05MdqS+J7U194#+xcUWDP8|*V}PGBpQ(GKHy$fwvHoJjW7VPVkLX8l`tbzQ`4yRK zOZ}6&XlOHCWGR*th)Z!Lj}yM8glsv1_yENqOs0gXasrvOvq2a}3B%+BGS5y0p&upm zViqfM)M(o8zW#-)r+oz_k-EK;$l-Q#*Do3LLmTgBvp`q z{^gb*zvq1VHe>lBYO4Cst7%5BW|~Ui@*Vi3GdlMN>_ARMnBoM7hC}Y|at=3KgTSHd zn(n4_J3)afzXey=7T`MIN?;~%E^r3W2^%hLi9>6ZZ4lR5+cukEP zH%UZ3XPc_XW4$6@97yD~HAH?>MdYow2_iqCBJ!Hs1d->ci2TA)QEwi^{_<;>75?2IB zahs$P_oq!{60g50pu`m+$Gi6<|5=EbtWY2(S>C z58MJ=2V4ov1kMG{06Kx=fu90LHu3)-%>CI}%9DFX;pON!W3%BdaxwH&f*6m{a@mo> zUK3?f-?sse4wHJcSL$}J)Ymhqr>aZ6v4+&E`Ih0MeY5#nXsDPf!L64Oh}7dY_>-RQ z0y%+5{b3LmQNl7gfk^%KdQO-`2@~Z6BK6ZC45b9V92#kykOpBS(vLWvZ}t_A5@+=J zC?AQuY*eDL)$UIslEUIP>ElxOxa-Q>Bo^R3lrcy>`6%BcZwJ9G_2i>`ll?Eda0uk`6T z@)wnO3~s1%y3X@p@wr)@a4+dJtx6 z#p`8RoG|7t;`PNSYuAd`$Ai#XD_(yagn9;zc>P%rZUuiaofr3bCn{ zJvaoyb9Z46GElaTmOU5=0v!WYd(dZ5EqckLlN}P&)k88gPjjfH$K#UIzXO z{26!>SPa|;+yUGSTnk(Q{08_Ha5^v*_*rzYnEqMmT>sOzFs4$8|II7$OeXQsUWp%O z577!pycQ%rhJG*`Yi07dp(62QG?Y!sK%Yldg#IFE)3s)ND+r^tW_;tfK@@&*`Q0mg z?eZWBpSC=R!iO&pqVN}%2UmFW<@a9U$Crhm@U_e23Qs#HD7C<^~zS%?bny6nyr zUUyk=g;y*MuJHX!l?u0*xWdf9XkZkuDo_W!UKjp9&;=|5mI4m}3xIjR9AFl3IdD;3 zEq~ss<6OQB0+?re3DcUye#MSeLm1d$hI%0<5HCqd-(z6nL-lQTmU`5T#c zCi2#q!9`v@Gq}kA^^H>GU1}l!1&jeU2R;gX5LgFT9T*0zjKcaKupD?6cnWv~SP0Aq zZi!+dXY%eV57#}$W^}cL2t37-*xfj^f~&kfu4nCE9ks9d8p{J(QF}TFSz2@W5D;c- z&EY*j7^^jhKLJ9*UCiMZ7xA)fwC3<45TdoB_EjKwwB~Rp2*JPSpKWHib z6A+eaDgIRu(zO&H3&NJ$ zpa&ed-}UC|oDfZd-xS!@b9I;I@bXX5@=a6{bB92n?@7>2;?dGv-W8p+T525Tqljjz zZVU*4Qg!{oxtFQBaMT$vRrlj;oh((CTOy?DEVJ$;RX2UM;PnSe)x9@c@cKhY)wP=~ zc>NlwI@4@H{~D>fOS1(1BUKk40sR9z0-pl51U3aW1e$;~fDyo3;m|+u0`MI0G_V9% z1l&dOj(q+{XZ=6<{F`S6@%g`;8N}yLo*BgFe`RJ6pa1EZcklDx%n0K1pU4Q}^XFs) z@%d+G1o!zzX54$9|KW@feEyp=|C_zu9~v=9FV#tmL?y7+gv{_GWXeSMMK&g`)SKVd{E+(sznNtZxV2 zzG=LnmgGBk9XWyM{aX;~Qo?0}j6n208H7qIsKass(fc3}ex`&hIf3ZC+cd%EOfjhI z{bOH2tGENN8p^V?e_wg!MDOmYoWRolZDo~Ey%&J+4kgr*6NuiIfk01ta98N1O+fGC z!D&S~`}NTKE9@>=^w4{yPJEkbsa|}WX%4N|G0-ZxM6UbCQ8#^8**#p1$EEL?0{4*e zxL0)0b*#wa=}8_k2l1(y|8DdC5B0QgfydPS3%@&Jccxg&!|cvVB632z=A|9*m>PpN zlhKkG?F!G>AM}`hOJ_%z5Z=$^Cwqt6NrrX$?)3NKboPGo7M*O^xS{EZE*oi=XX@Cc zx!5@gjO!9#UNVDQP92FM)YkN*!}l1qH3KDlZfh#gV|w&P`7@I7;)doR2sFv%edEX~ zZuncn2+1|t(oI%mKJipK(Wtvahf7uAl>IXK24D?knZz+J57-evuhx>fn1ls4j z*U9UwulH}^eMV!u?|i2;UvSX%@m|5XIPVB$pKpJsG+&^(&$qQRV7@@H&)1Wx$QLN~ z`JPHux0d^e>k^96NoaEVn1h5;+DbG!m92c89<0v-Vt0`q}efa`!OftkR$z!^X% z#UOcq={tFDM_`SIw6!OcoZvWzLgUiN;SGmE2XlS83#Y2?rlgcl6HH@@5>R04>r zlR58h692gp|6aW1{2$cA53(mdF37(mx3!W3qog~$Gp=-u2LF0yr zq2vTdO_FL{Gbh+%Qb3I>a)J*|QfOQeK>TN-LgR{1;mAa###>+I8haf0Ft9$*2&@A1 z{Lb+|!2eSBCV)*9`ycP=5(v|7>M)_6BGi+FdW=we3$?pYGllv~l&e1ob)Qf_73zmV-7VB@LfsraQd_;< z`n?l{Xa3asJ+-agtCM;>-TJ*v>$pX1AI_dMS^QR^f$hWDm?m#uCN!{pIJcz9+tY;x zwhzaZCU2i1G@O=LLumhVIs6Cr5pwu0_Yrb7pg2& zlTb^9Y7}arPz^%O7ph*UTA@lpZTp(5QK3eJ8vgpwpIyIq_8RV;M^f|nC^e5i%JTS{ z^!@R^o#gSuY4Z3xS8-kp`Ihu;X*xKsDp%hf>8SjRVUQ0LXAa4`#-`mdPVWp zisGQzH#tL&4P0=UIB({|KxXp!Fh0$>&t@bZ(Ea3g_m+0|_d!7X+yZ(Kynuh;+_Oq6 zRrQq>%`@~MmX~n_`@(h)*|R_^sYkbtx(wgyYF@WhJq@inGCdZHwLi-hYY@!S)Pr29 zbgI}8w&Kj9?`8Ha?1xt2BaA&|WvA?$rL`z#Dywc_&)VeDJvz@9EKl@c-#;g-BwOwN zs?%k~Vqi};;)$}wTGXK1m@!7Tu}?G-TH9c708*s%0a;N*BmyhlHHD3b9?) zt)k|0T}_S1aEi>1Rj22R<9bUd{jb97W_CWvp(^eUoU)=6%C+CP^!RSo2>#tMdf7Te z_84&P9S$a>XRk8@Z^rV@bK!zgzLp>IzK!btY6%CQX)UE6w;WA@HY|wjm2jlLnkC7b zg5vNxb~rB%i)FZ1AtWk@y!=hvt(6ho=-pTaL z;@P4t-cJE;QFdBOqdLz-R^qYnFv3HFVIFrUS(Ue}Ma{a6`mwr=*;;c^$kk2u1T!mY zgRXrP;+e56IH1W=`qqlqYWM4EG$csbxhWgLeBcwgsR8Hg%Dz0~jr2fgYQP8lgX3Y@ z9vqz5{gs}SmRhhPuL2(cf6uC9+Q`~b%n=shh#^+xeQQxjw^2)u$kCgNwyKWUN{-m- z>PZ^&6_a+1H~UU=tWoxCCI`L0;yHM_oP3C5Xx_(3!K}I(csV&SQc=9yqV&Z{3ZsOF z$Fr}q4`CkON{I^|?q)6u!mbA4;h?MM7$y5gb8Iu|&-a?O2uWM%ffdi8to+zQk@$_S z?oA2j$Y&RDD=B&ZopO%FH;%wA#lJji!#O_jx+qd5U=XS4#X$mbrjlIKMXQ*By)89Z1Utd_;GdBcSd&CG(qM~KpHEX8{qT9#nt(vMbbVrQ$t zqKKZI+c&4cuDp?SbTC$Uph6IdR4s2(&#yxKwpqO7R&qg|T&td2Mk#zxfRc4Tr+``&?i6h{6 z!98vcFSvcH^mKOvyX#6<%h?shv@%)7nEG~h9N+|YUPpI+GU1axJepcUkEz~n!a1eA zbhS^il5r}PJrj)N3j=%Xsw9zB=W*f6BrT*I=SE#Y!Ir^KgTv_uyH>*g7s1<_yJs`) z=0FBNBI@7lNW6h_AInq^;!_K`_=$7zGe5k3*4l_%dNEhe4Ihe$6ZZ1HcsPTdzh;cA zn}#DhRWs9tnX`qNfk|ebjhnGeOGzs%eX{eW%y##xSyELWWQ{yFiFxgER$=6}c8TS+ zt&sSEQ`DYb-l6*I?uwuH(;Ru~s5Q-of<#GGau(t8Q6S1E37}TdM)-1dQVy#934xaKQy z7M`J;5?mp9i8`tEhW6%x2E9}Ekgxi(Q`i%w z2lBs@l^&^u6h%`ZFCh#h`e|qT1ekbVNbj%vo%cq1zwCG3m(cr(9pA^@&yJRgJWoZz z{>F@|+?N~@pU)ps#^b|?Q*%2UK?(B)^fTGv$Ku>c9A-nMmoM*p>X*C-L6Ph+EM4eo z-ezK(Bd;Zn=$r-p4rxny0hNt(X^>F+;K;Ohq4Nu>PL#IV;HqPBj!Q4rGDAO!3gk7E z-TW=JyuF4Kd=PoPupj_BsLYv58sMsgkqIzT8>^nmYN+C^vX`QnA1kKnqPQBk95UD& zc6ti)k6I0A*-I9ai_|X8Ye>WtTE|_=lazz`aqpq{iM?0el_5^-{c2KDB$&i9T)Wj< zQ7C(-hh?uVVtz48I)P_IoXw-Z81+g;8)c_2rXxGqqAV!4u==LLAbTqFr7abD%5N2V z*hg6s!YI30eKrI8@;R0&_-y%+wrt+5d4tS0kp`5`X3y3bKkgNsATx@$hn6sHwaf~| zmaiTnlP|?ji^*Ju6&OCv{WIXCim2?ls!gI$^pWcn&9yVoMfm6v)m_IQd^WWGy z)-P$=>eJPJ5lX?=aK@?9WTRYl;4_leqYmxHsMUA*0gKMG1(w}(4k9T+*^}vLSx-8| zt5xTqK;KI3doxap`bJlS5Am_ogIAy zW)HXdvJcDD6N_!8sv>8yYkw^7Hi|2%FQ`?k>@mMcVx-d51&$S)sWsWRT%ojYQ{JS! z%U0tYw}cglF&tmHpX9{yhAlxlyNtSH-(|SIY1OcH9SZ(8M#E=2Bd?5xcc^p2vzRGO zA7^grdC7_Lx8mZY@>iQ${!YF)UHMD(y9Fu!;>F_dqtqk6_*s>j+WROuH`AN=k?AY( zB-p5a6W6AMsM$K=7}xQNW?CITLv(?nvDNW2L}w|Qv2@uwV%%oTww#V{rUxZ?=iB~sh!m+hslnvDI#5gZ?@YQ0&&XULv zR;2~!nxTt7a}gUku_$j^lnRM=@yiyWi{GRz{>I|jL1k4Pq5PyN31R|>1r?uY^(iTSMzv=DDv)OBN*JL$g&4& zW4byF1lyFp)XC<%A>h{{(b3k`)UiO>Vl4{lHfk}HBo%GLP*RW&C2e(8Ar;7yl?r&d zsipu`Ll>CwX$IG$Jj;}Q*RrP;Wp^y^&4q~K+Mw!+>a)?xkS*Sve_52C5Ac%5eSGu>hMaD8tE($wCw^RS1G?+iZxmknU8lg zX>Fy4SG>@wRb^9}KGmlDWKj}lK5nk0aA)-Zb;7??HMVMAtWv!i(5-gro~^!c(m4en}+;QO?&=+X+L3VjSiC#K_gql%zbss=UVrjr3yBXjk%} z(Qa3kITlohu3oJ`dN?WrwPv$Ot)EaryuzJGt=&sl7Gk4T%pWZnkB4hnv)A(=O9|wb zUyHx7@?aj763jz>H<`g^o`Z&Q zyiV%IMi*G668Z2f(PO4$*&r{l800Kjk3mlAqddMKxsenv-+vvSTE4U1=HJdrF5lzz zo9K_o%9**h!_+Ub=Oh@W{@HxquHDQUyVKC~qZ0PD`C=*svnABd$e^M^;>SSK+e_!e zlD=#hHbPAaDm=R^h(GdC<4qYuN$-aX@GMFRZlwcV2@2O!cul848O*o@i zIZgdP7PHk$;7H~Qvg*uKtNO*Q3e01gPqQj6Q!CZaKQgM+W&hJ_yLAG!9wgql5~i}5)?r7hy1OVJ2yjvP1~i^YeO zc3y_P0n{^jA8jZf<#{h95z{8L`_o_k(hX-2>#k;A3i%6?t z@pPTizTGGz_a9_qvS@97ENAlK%cm*lmlzlQejK~Zmz&HbY$ig<9epz&yv<>?754wZ ziQahznYY7?TgDYiXkges!%<1Fci#6=wIQXK7Uy?7zslKSbDR~H*d@<>K zBxUjjIiC%@lI1tfK}O%B%+OLX)Yr3*%2v-W8Lq2*v-`D9#jIOu`c2lK2(0~STR(td1-7sk(@9#o=A37bVwve-`FmAqvQ|& zma@VOcG_-JMUWFnZ_-1cnZOU z5S4Jg+{Kgzl<9?~y{cBQ777dN@7d|eDcg~#@jsgjUX1v*mBjMypDjtEYu6k(`ZqSl zYv8YLot<>BaH6G_KZiYTWR(o|?@@$OknExqr%Y?6V$s^HjlA_Mm(=DDf9i1&c96l!QJ^fGS&A%p83l0??(w5JU-dcdX?0!>#I@lv?gz zvw7`7?YP>#*y+$;Rw&tzC^O)U_M{5sx!KsZ)L{yXrPrxydxi>;57+=HD@iic0X$jR z7I}aj_uPgMSR@Wq8YT`@3eG}nYXF;tJ|3+e2b+p+n8g#-1+3UXe^wmrxu+$wHP_S9wW<$pbwGDOgH;))VWgmSkmq2ZwXUk8 zJuSIa-{M^h8G^3Ns*j{>qFEc^OcC>eK0&Q|(_B9^U(+K1o}5rS)GCPMI-Ldj)zn^RB~!dyQY#D5aPCWaIzN_|%WYgh)kHq4CdMUI6J1}DS+N<3 zys(lLmH9e-*B|+bTPSFnBj+C!*`$Kn4wZUxr!JOv-;DS$O?;Tm$j|@Y(!s&;bs=%P z#;=UGCf+(s4p39(~&Qg+5msZ5p5RuLuh0Ga3gdEEoOd`^|;+$Er_Tho{ zh-W1Axk6TDt81;InOVgL|N1H#dleu2>!D~Y>@uh7EUV9KL@y34&nwoAys2p4O|BJ+ z#w<)-q-aJ8Q*#teF*oI!Y{R&p*~oIoi7dOCjf$L8yOn zwsp?Bk*zP~eB>IWXbQaC4yU5&@8xz>Dw@7t?&#Tw+t$<8jrcSJB!>kC0>&#B#A-5ZxLig8^zC)UuKMeSRqp;67B z$<1HirA9Wtzccdb=0j{S&vhOWzORfN@*z1yu5G3w;=o>94rmrrbFsX4rm>7`mX+f% zG9|g zY^sRVyt?I`ri!n9+l*bP$Tb|uY?|v&g_|kKcF*?>~x6e&!@>-S6 z)EpUmJ+&!vkwjtxuSl6OW&P3hZAtx6x^Ma|`Ci*H=%?iS{FXs&srQ#3PBo8@>!iGo zrONAfIQ9L^-%{_J+R6LsQ0o1SzoyDN(C+=l_V3%-r~5Vae*K|T{Y}57%Dc7Q`$6s8 z_sEs0_v@42e_g`oO!;a+K4Um`vKj}Xy!H%Sf<10j-3=I(DHf^laTE+>MaBiM%~Q~vAk35EIsEG2cx=Y`~R{&lTV(sH_pc&c9ZE|zA+i3HSewU zm>B$dk{AF=9FZ%-Mnaik8{Y=&rzGe#(17qTw= z%*p%_8>=z%?d4^YI~%SUJo!%#*G$~NhHDmS#Bj|bb-3o=_TL|_*}%HuII1~9CT6GZ{o3^6(c~9-*O0i_VyuevrXWXPg#lo9Q?U%Qi%;!@eWT>kb zD8xRb;B4KsBQ@-krfZy-&1J&_Y*QaGnsWQrxl34WnnDuunkFtr3WOq-b|NO$*{0=5w`KPVqz=o z%1fwQR@Kd-yc^kTU>(Y9xx3K1O}JO2sRwQ7%r}a~?_v!sWzK8yTy-yVAosBe;VR>W zp@S=DrE_I`HOI)#TzTe^U3u8bTuC{c%@Ar>%CP$86i}p|NA0|oB^Kqg_-MEo3oo%? zq7i9i8p3)Sn~R|)!n0OPe=tcrU$F5*w&?mO_Xs}XrNuT#DRRdN>iBt5p10_k?_|#c zeZqxvPyi?s*wn7xT`E=e;1Xt?0MB(Suv6PR@$r#Mt68DhQ$`CNZ4uvu5)=$9OW)KR z(`hAhkfj?jvCxfa{$QLs-!7IZqtNR(UdHTOsxDqsmnmD6mc+!YZ&fwxS`8=b_fuFT~kl3qi|x90OjFUp&gF)SEl+!=Y|SIoI$Qneea9;S|CvlQg2S;)N( zI4!;|bzWlav|S&cMcBvOV5gEorY_jy`ds}b5=`tw;%2k%@a=9JHluNb+r2O??@x`L!@k&!P9*D^QXip9|0vQNM zJ%dP(nyfBLN>&=U2>MCyzo6H#pX-p-d&z6EuVOumLz}Y0rZiblQ*Mv+ z#gersr4da_9h-dKtE+vL`H(F2*{+Y=%nqOBet3vY7gtbbn*J4w+rU_{(}|gIHOXGr zcd1L?$XOnll};7UP*~wh7VopoGjl&CM;(xryt~%GpLutaLXqgrl)WpovS+j2>OF;= z{IOgbs_H`hZ_E&SG|Na=erql{;;qPYUD%4Rk5h*L^JFmzqGJItwwzKq4-cw!u{Nk; z#Hlk}`>j6PMv@X^OMlGd4Fm$8jV5>9N`Ln@RXh@8of2#YA4{5VC4b*bHg1eK4j|NU zN$m`T!T#!DY-L_~$tCNnbXKL}RuR5`#~6cRyOrE$Q(k8|ovJh^vUZd`=f(Ubx5i8P zY2%m&_F~*Fu^4bPOi&eS>n9iQh=lU_M5Z25w(fN#$&{JQ4t>Ggu~#8cQA71Kwn$QQ zJiE_50ZM6uuI3>YgH(7_=gTAVVRsX5OAorfZecy@(!*8LWZyhCn26aO*;_^xRaqOU zVQNs9Zm;SJ_o4--Oo-6aNYa{e^1xMNMLuMS_^@ju*?!+@IAU@e#x#$UJ@eZzK8Q8L zeElQKo4Ko@iTWn8H}_Ip`f{&e#^xmFSL)5p!MaT`k0n+J9mUtKt4{c(g4ZO}mXGVx ztNN0PjNNg4?fVCE>13lz6S@0vti$x=4%RPcos8U@ugsK~9g5~*x}-uhhn)QcT}Cp0 zRgEUCN2S_9Gk2FLPs$5Sie~DS7*C`W@(Yy~Rb!EMi@!nY9dYVp*8wcyFrZ0DO@`Vs z(FX31rh$RaSw&uBi+|`|*%QlK0xgZLJu!EVXx$Yt{K{XOkeEDud19)Qc(+-d3-6?T zOIScq_U6ugH|*E6GaAS>o-6XYoE+qK|--RxU-?2UWv_bS;j3 z$O+XiSIYm8ttZPEE7l!$AILnK$>7 z>h}9RPAE&+?}4AX{?B}BEST5>QoNTh{u``lW;*yN*EmIEckofJQHsXm;Gw znl;|J)U3f8x5(*zM4P1??Qg8UPPA<1LIRdoL>l)FTdeA(S)<&a$ZhO8@V zvy`^!*5Az^ua$i{Z?Kg?>khKKaJuX>4?-1k&6OE^rfz+2wur5&r`2a4Wbu3&ws^<1 zwH_VITX7Di>e1bgEa;71VW>5$qImZNU-o4--x-@NN>_`wi^VrXZ&Ai+Ext*5i+8M+ zEr==IRmGN*QtN>9qx8M1FT-o8y_VrTszpl0xCmy zM;5WwjY|utqp`)xHW{&!L|M(NKJ&cB2tMe#Bhs%h7XLO5o1%%AiN15uI*)AD$6`(6 zPUXwzH8q3PY5IMGd3vYxSH%mKp7AWVWi($qe@$HXlZ7I8%qxLaC2Y)w?W?7-m2a)7 z)gv#^f0I_^o_w}s)iWR3=2MB-RA%f7XEc?SU|6pyOOo8@Sd}s@Uk&?w7Hcv#@m1Bl zL7uc8m-X~K4JDCxV7Ba3z6ooBK`rAAi88*+==fq^H>(o1N4oZk&mtuc0Hu>(bwv3O zpBX ztVq{X30aDoDgW^bsKA16wCMS~`vio1KASN2mKCzHl70CvmiKijuX|Yi>dT#8jfc9C zd|#JZ=eIALs9W;&icHC-d~f4hXHduT-N$F|U4b^_$5ln0RI{E@YBl>Ys@eVztYH0{ zC!r*&SyAt%t!+`$qQ*_CZ6}D@cBD>p;3lAKVwu91vjs&8wHM7~1fezh!n$B?$86NTU!T}EZ>EZTj}_x22dzazsm{u>mj0mg zu&(B&82_C>eLda!7FIl+@h-1q4zqwCB>N^(C9_j^4ZiCU8JI(%{SE4vrPN^=BzwLJ z%ieKqt^M$DKbl25BN;hxUK_r}dI)>&hguZfcwe8Nx!B){&!Z8apXIRHZnVXFj>Sur zk$0TFwI}s=OW0zw6R?_9E^TxD)H)EG)0-_tIw~)^$_*`9mJwZL-MBXqhTSa6ITS)a z%1^Z41}}DpP|fO@oc*M1q}$Zu91 zKSuXLc80Y$YI!kfAo+Va)*zQ|Tk)xlZy@)vvAj1UmuDlCup5Aiq#W7Xx4dqdmVeY_ z(?b61B9ExhyLwVlf%)E8-sfjh7+8IkS~j+6B}bR=QvdqyEX2j4z^U~9V(o?=b+Ud- z7S*|S^!)U)Pc~S+gV9~=hSrmOy=7EeLDvR~w1pNZv{0;Aaf-W3OL2z+MT5ILgajzo z;_gx$ik0FT3dJS3y9Eyxf`-fc{wf)yPM)IoP5_e)1vcnXm5ObA_;_0`%CglKWN~H(3Vxna!CzxU)zv*4qnI74Zuj zwX`~c%{`8Z76aY6yMp68$CgRQgyR_#mB({KiB#J1@hRrNTsGyhC6KV>ghJ7FQ<+t2 zPj~WNGs~)|Ag#O75)9jtXWJEFB=bD`6VD(NYAuS3?uK`DT_5XcNN>mI+ctC=>1j!t%(8E<8%yI0lAKafrwy&N zv!*!gWAx1agl@`aiOkfm7Pr|Bz0jcq6a(|1>O)F)RJFtbHsATxRd2_17NQ&k68~0@ zG~)Rzrql!PzHP2}BPie_;DENqJCNhh>cFs%<-)DL0PAnsRn0oc$Hz#bfIc$Th=$e{ zy}5+T_AdoZ(=ykar1pC`KV{2lwf1*qLsMpUijH4RtF0)%)fUpJe!AJYFymK#W_nQe zo9AglGua-IjRnNZ?2}b;mci$x7tzJ|bJWDZmUs{ZGTamx6 zY(7q8Y5KRm)lAji%chN6IM-_GF7u!uFWqqK?R+$;xBLO1r*np zD~%N+NoT=8qzFH>Xl|?Tk2=rt@8@o+} zQ!F?Zik*n(MkMNLGCiO1RIZ#lFmRf>7fdg_ta;xrQn=chNu*$-&()e@h9U63|W@71as zgyz0cVHvO{>_S_0O$!dNs@#d(RUKWlpNBS4W%8?fSBvN`m`D^WTAx0~ZGO-t*0v9T z+Kf;(WKwI^e6BPHxt;rAznN81pATJR^fOoJ>2DEmf^c&yTg`?2&!1-}vl| z{Enn8?M`9x_;D(S_ol-7Tj~~8^d04*utqY|Cni}#NfWrE9Ybdp(*|7o(yqQ`I+>pd z=0#Dx$vgFU=L>U2D(U2>Q4-`gh7b9wE}o{5cyp>e<@j_=UtWUNE>t_5|BQH8TIt7{ zA%DVS_=^uCRic8JusSBcawuc};b{1kC|Qtn$0|+q@J8r~2|fcf5i)^lo^}F?Igd08~v;&cP8EQ_&?@znt)H42K+My%UULpy=SVz+0e^{ zDPt{5&FX=$)aZR&_T&LpJmT%ak3V~1j5p=`<6l(Ee`lUe9wj>j-@<((D=Dm27Ha6% zD^Cd4J2pn!JfR6qjXG31?TER!)zi*;>8nCJf0KFP#`~oA_rx4i+3MZ;25z>;&779< z9PLYEKL9>I-VwW7s^_|tcQDkxeJ$-DFin`YqqqP_j%Dom6}9F0fgO|Q%guKrOy6_& zH||W$)Aw>U%bg+;iikIYv_NpQw&(Z1CfZvg9bdW?_SnyOWb)>I27i{hH!mk)|Dq+a zV^k7h+V}cxivN%Fj-II~Hpg-CNk(hRLWw%!mweozK2F1zw6Pp(Pr| z^UF6VVz&5R50OQ^v9ts*cUr-5x~13Y^-ge7yk7I?8;$Al6xs%;Qfj3&slJlD z!tPj$;;*i$TfI9E!)#->58o3Wef)1{G;VByi~PTFYv<+UZkB_KOe|@9c{g^U2DeeB-62h-R@V|(`B5D-uZORBb^3QS>2T{CB8Ao zbkZ#eKbnP433dr<&NH3pOTKa#ARTQU#95H@(^paq^%5Ls6vIva3V%y1IoJwQ?9rRg zFtAZSu+Bvc+n*o}=0Vd0zYBcwoSJv6mHoAFGDT!F@(vA@GU5x@yyzk9be55fscC;c zW@Vk^hi&_^zco1ZSGm6mQ)!sL`q?1sK9ZMKu5TP?@R8)R{=K1+;!J}l-JwOi{Yo*_ z;pXCw|16*VxYe?(JN099GGH{HxTrbuaLCmFB)n+hoQt8fSM}51(m$rY&Q!*GzsQpR zi&C(IRm(4gd4!~$^lz6lLk$|E(DLIXj@G!0fdt!*tCg-WeA-m+o(Rb^_t=c~s;aY_ zws@FT#AOP3OvxmnZ8w(oJb$Z^a6FP(!+DO%x4c!}jAixYENeNN&m8fFVe~*AC!(Q| z#Umg?CJ$)q?UY)mJcH{5AGGjdhyY$m!NCw{lyC^DFRHJ@9YG3?E!H^1|1&YkI-TeNWg^oy-B zrFg>zZ18@FNB2DJZfcgUivmUIrf6arjF&U+e*TWM8zdZZ$(H#TUW4%(&h}+pqcvAk z+XBbl!)8UW++FFT8e8?6#i2p5?K8G;3d4~sRrRS_giDTt`q3!rFqrtIse#c19pmduKVk^ zpQ5+%guiDW^g3k%_EXqT>j$QuNFWNmR?k1OqD4oO+$wNQ!sv|=>jqmBnZYbU&Y7w# zJKQ=!`Qe+>Vv+b%H6XJb8et>zoIkQFaFf*!6BnH%8AJl_evXe9FpiK+@-PVHDV(43 z~_AC2VZLLuKP)(Z6a8#$ZGPw`CiNhsVPI?xwRnDY1 zCt}io`t#ST4Jq`BBj6=~j-yxBuqG;F1NI}R?*_ZWG^WxZWj^DRzhbb6;~@@%j@|8; z%Davv{*q3Dp+&3C*#NPc;c4jLp*-MpnM_&gnh(Ba@W~&!6?vFIvDi7JNY;fLzkFw& zE4kn^J4WD}YvwCAq^@%J*5~`aF@Sw2VnPq2VWcP+;GO@GQ2bow$dL{$mZ*vG$)_&J{OSR)RYo5Zn&Rt>d z<@rAxWSZLR^D!Lhn$zE{Me`^Mo>SaKpXINgVO;m%vPf^x8RPR+9BNhB9Oq+99@>sF zBki1i49!$sRK#!yB@!Ixti`=UmSPbA|>G!+>2X@6ePt{&Jy1IlEqdBz-&+U?ZQDXQ>KXW?N z!Gxnsif@SYR1X%2>esHOf*QmOUJ7E>inGMenltO1)^ME{6EFn!-#R3tiMYV1Vu^s> zAtPUw+QOrYoJ*444!!jjQ>!VMB09jSZB*8xofepg#5Sf`{tb^G4@H0aNsY0kfsFIi zS90C}$9|3Aa;$AnK!=kKFgsIo!_8}ofg@CA(*Vg^6Nz4N z;%=~Hq!Vtrq)U2Y$=m$S zAuf{j%LrUJG&1+{j33@7k~Bspq( zX;WJE?!qyEmqFq)6-H-YtquZ>Ql3U{CQJC~u}qrthG-Tq19IYv%J=htL7&xx>_+Sw zh!gn@p!UtTt*RYI;=!{!u!LZ+3FQ*8!0*Py8)qGWp4Sr?8|$0-r#MKs>2ZaSfQ`hj z5;4{<#s(=yGVRTcbId4U+Ey8%^k#sGuR-4_{?AVpQ>VXZQI0*p%Fp+vrHjRH&%KJY zg`H@0WakuDjpx1de3wf!r*>MkB6`hs*!6t&BlO?!429K{UD!wAExF=;yyxj1tFcp* zpCiF3aaBFL4O(P=lU96Q(H8Sr1NFaVhR4`ziH~OD(QG)#0&RvsdOl{n)MuxcQOPR zr_PvB8_QCy#90VVg5L>7@N`&yHfklv&zU;t>oHq-Vczn~3^i5Ey=iTANI6nB+4ybG zy)bda>}&ur?3gxXUk00HYje_Hm0(PMQ9zs7CSZ-F_uZvZi|#<8njD+bID?jdrI3J! zTNsal-M;Ba76B$Z28lhH@gJfMEoyW=I?QOk(gGiKB7k1RdWge}Xo?f;`?Iz6m)vma z>9^OF90Qa-6K+uc=c3g$Oow+TZ@r?OgX`J4&$r^fL$!v5akK~>1bJ^!ZmNSvr?V6r zs}C33%R(t2*+MQ_E!bGK{_aK!SGdP!&a0rBQcD!SZ9+8#S^6Pn_|NMoj|{x{rIy0F zQJzK2(3(23exhp&P=ZMc$#JDh?)+B2zoV`3Oc`hO;a$-?H`EP1?DrRD zwwNAsm=Jj6Y!<@E*!Qb~*!5o2Gc^{pbu`AW0JhKoI^#ux3*jjULOqAIii8x_Yku5D zw%mTRFb3hG9rZT z4|cP@U1V36F(nf&Y4me+Sb{wZ;hvi0uAiJr9aFarG6Z@a=e-nqpBK3&Eb2d}t7j(B ze>v5z;V0(Al|8X1o+vpzNT3&2eyE)(9JY4=d?3@{ium>LbKAvGzk-1!XkhR{hApT= zGjWTYSP{@p6$xA0Xq@9$dYiZs;fd>IZsGYG&1!Gg+MI=WUQZxh$Gy~Or19`RP5;4c z#B1o9ntm15c(|I_&(ivk-9&Y%K^~s;34HOF{c3qo)M*No{J{-T|GvSlTyvf1-Nv_z zs~0`#W1^lSx$s3Mpd;fAZ~n7Cs=U?FIXXmDta`uB5?r)ZwVs+i%MP7Mq{R%d%5;2j zO6FY*N)edAhqhhEzmjy{3w4Jo!HAfIL!YSS_Pj2<9ilH$w$4k}X!A6Bj_~{1l1ZKv zW&wBjRTF>H%)X4&AmrVvc{eeWKr8nIchcYK)ATU%$ddWc+%WV|tDjYl@9UiQ&m#{Nku9AJ*(ch!%X1>wr~C-g z2LbxEA?nm5`{f_+)k_^ej*ty&2dm3HpXQii_~?m)AkKRh(jr~D56}5J1$2cS(|$%= z!2l}-<1lDp6a**Jgjb8|{Vjha;Ws!? zc0)x?f5$q-8S0mbKbSDy5*%X23Hy&vYNuW1ci$ zXzpJ)HIbe3D^wV|p}42iPt^TU*QT`Tv3(wjc8dR_xKbK5-IVP|BgW4V_4&C-#N zBgDtHgviww{7a)oS|K2k7l^1t%~iOA_d{izsB}o5W*#xNHs@4kNQ;(A>IOJT9biL} z6H`F%Et_EXC3oPnmMA}J(jDZLM;>yc8^F#a)&D)Dg9#ne#h_CxWHB-5L&zT+aF%!) z*Bwz;;yE?|t(}v8>Dh2|2-n$*F4u$Sxninwu?^eVX_oWf@fTC!G<6_3=IbE!dqUGm zuvoW`-$1pG7dF()C*=bb>j2~DO_O|;t2R9YZ?T~sUdkLq7q7o`Qz2)LsVscF1Unav zR4bVSBc)xnv`$m}f_d!4MYqtwX{WzQmt_c5Dxr?i2ht8kEj(x9ezw15O@|B$>BYuk zU8O#I(v9G+pJU6p0@sI?B?&MtK+m6bqEBI(ekJ;=?nLy<-0>Tx3nrKbjWm{V1ijM- zw+d~uCJtjamaZv?dF?QkwbMf^3tb3}Y3>CD{w+=0g`vjLJ`5iYJ zV<&o3kRJ_pNFWzYvb4MK*U7J6o?dL`k#VU8KJRo2ecovsT=RVlmz41xB^qPhm~@~y z&uhH391LHrclcbY!ELrKyfnftqSP7a4^QLT&~D6r#7^&b6vJb zsTU@HpYtK42wZ*$#S3(j5};+x4O2g(&+60&5Rb~MamKm`2=@3v#kKC!ML_TrBW;2I z2bMWTm$%ePHUa(|Jgi-c)TGa9|5Tiwj-Z|Tr5_q6T6cFGRtTD8%iL>5!K1)ez%yyJ zKYu5=(9_gupP}8?{=&2@xTS6ap)&?Dj~aOVFz*^2oq{|ed6>5j#KS2|f z*d=;KU*{NHEls4uI2U9?_{kJcT6}HpnJ&6UXEe|J3GpZNcb&1_B@9`Gc8`tuW7TI& z!eru4ogYZEc14gMNH7SXQ*}dNYIK5e-=XcsG6)3`n25c+31l{b zH7PV~jCVA+pU?-M`39nMVK8>S{GUFEjg6@x)is~!*NH}oPH2Kf5lHyx#6PI8(LT^T z2?=7RQTZzNFKE5OMcf1}D)9L)%$J?do6sLKF+T~UA;9R8B8p>_rur9DEcI{b<3Ay; z)SppGy*kl_&;{49_Jhy}o_v%d{r1~-e(#?Tq>$|2Q2M_iy?;XV6cPFVAc8UB z(2x1QbHteN{b!AZ!vBVv{|V7&nf#lF*gtu^eu+Da`411VmH4OgJYQ{^`KKBBKh5?& z{*#C%!#}NT-~AKXqy1-v6aNQf_D`$V63WH@?9QHlKK;U?e?v+%_*`hRoloY_vDTg} z1yYw{VMq~RqnpyakRSR#phsQ(hn4C-*-!i*G~WE1Ve-GB_y3>&B4D&oe`-12DIIcY{EXTg7Z!(hOn(4It zPvgJV$GPG8KY1J|r65$!r`nxdCV#oDK0K}ZuTE5jTKz{}nAWf`W;X}bQf{3Cah(3X zA$Wozxc&lr2_tpo1t*$mr^j0{O!-DSf*=}`rr@<_U4eV5yO@QoKk%_J!dmn2u`!E* zKk(y@LnUaiN%#L-zWqIFrRwFSxga$*!Ja0S z+CrbG{YLdDdl;rtH=-X&xX@pA;!RTy6udlE!I@GSpO2`uCz`#)_jFZl}@I|)9$ zcakEpF8`k=CQC2OrEIWXuV=bE z`;Q_&$~z*Mmm6LFzr(^@_}Wu~C;75b^CW~9k329Q=Px01i@ZDo2x~L<5f?+e@RZkW zPxAx$Fm3`L=L>1FAE{0H-di!LLrubbK3ZYBq!R9N?zCN=k*CABLx zmG&F@($G+v@9_AXSrz9Uap5RE0a zM{4E1T^X>t3I6EVK_6N6Ig@LeWlE)dQW9c<3JtqfO634>LF`V?Ea|Ns{Xkx}Wqg32 z3B-y+MQ`o zgus%x7|RO_?q%%>5LtWkC%1`v%@)9iV+_uM+ovA&2_DL{_sR6OuN3qZbh&Dxs!R$Z zJ^plzE{%<&BscwDcvnSLE^Yuo(_!~?7&m10qufhKJ_&;60tScYy&df?iwM|t?ZRGP zGgpat>tDawLnL+e6g8Y0H7|n?X`AP|NM1drfartoM}sULX!p9B#P3#;-2(E|jg;%< zdnSH0Jx8o0djzC-$PNOR+-o;C+D7j@|7o`{AR?gJ46 zpA{K~3M0Sh_f%(1Bu7p}`$>i!r`EP!7jhciyp_8l>DwGw?jK5~n3mo!HhOA8fHZ55 z_{@0>i~yE2!QUNIZgjLZ%|-&FoAAHg`uM#;-v4a}EWsjxiA}Q+z}6->|sfAEa7tId!Q4H(5-*?YR`Z*`68XeU9re>xaWz~Z2;Z>#x@j4ip8 z*dLoX!j$uoKy=*Pb*IxYir=7&j7CRei7Vv&QyhGs;|zb~MXFlqW6tFz z>$$O4x+BfEOC{FG+cohah1wMe>f-HJHQ^lzzo85y%x!xrQ}`E!*#Cuc&+9 zto#5>1-@mdUgAQI8rbv7Le8cuqT5T-Wk*T1I!sYO@2m&;@f`RnWVgKC6erf6J zT(Q@zAm$UDRI>A&J+H};SxVq?MO7`l>`>(5PbCt5HtLhTfYvJg{uj0 zoA>+{;*S)ER2jHUwvv!j8){!(yfN3E_jbU&-)vU7E`qX=-klVw9YTF=Vy{l%l)ZSO_XI_jnZ(Ig$!er)HAOxc5GPv$$5S5}jRHR8w#wbK3eV&CDg}FZzW$m;rIQRAFQ~k zp}>H&VL81uKRcqg6|N_vfke=cn$)KqcrQ}QUBc$*15m=qyH^j4`+4I5963yl=D~*;>dC@ z#?+30>HFkOzdL%r|*JvfA?#F}dKIrJ%fV({mi$W$YXQvHsl zUmn-LBm0j?D?6m3KHE$6vYD4;3?S=iuOwWC=o;62+`{!N>-hTztie9+kjq~(;8{V6AcTqSxT(G3la_)1tmJ=SO%r#eHob8 z=iwP|j*8joo47X@ul;9#|22L4=XRm!D_-K?lQeEP zvVFM2;?I5;@Nl>W#dS%u2uir0s(_c{(<(gi_@H+be<`4`(wx20RffMY@RlPC}Cep6{ZEF09jR_mS!9H!TnevO>hiIQV z?gK!0BUWXg?}nAyqN|eWk@goPfN+`=h6-)%)4A*lC+7PZ^fyXWJ7c-0@oWRTo4g_W zqQh9K1#+_!t@exU-6)-5-7sosLf2`!3yM{t(Pjg3GWoW4vZ^nRYU@1KFKOBniT9(}Y{c#3Vsgm-1I;vGV0PT#cTtxbZw+IS*6okgq>Ggm zRZfdiNze9(@2D4|@dGm%{yFJx)hLWcDNU z$vNO226-^Pe>WXE*@OvQR`Bv-WSs}Nh%PnnV&5qCKa3PYb7gZ;GZ*WI%cIe%6RGL4 zxp~>cQcO3BBF{<0>H|_FR!o?-hH}0679||&#U-_R z%@f8|4SevsBnmEVi* zogBT13eL$^R*CoYOK{Z!&WlesC(LzT9!eceUaN`hp^O#^c^+rSYMj-M@^ZeR8w*la zb#L?%*30>>xg`(0hX~qi?G;8i8C@wm5;uYmD^=+lTfTJ#->H8zeTx#%;<2(;SXg4) z6QB%bQdN4Z1_fFx_$kJM@dik<7`BlE30noVg))i13tOmp803gU(FX+8DbhOb@7<0i z?`zHs4Ht8VQi$UhkK$vzZbVri^^!&VDyr0oSk3pM@05P3(Q!84l>sFTeiryoEPa>I z&dI>fmm9P83E7uP@2Y2Lm+(AFco=XfyK#82sd`a%7L}Bey{dlw*hM$pnQJv=*9!b_ z6tICvoI}ZvFZ$Ys2IRXvs-!Y|$+9#VV*k$8Cy$I^)Y7%oVq&h7f|LH}u)n1dGODMAx*C>|25Cg#Fbu@xAld zcB6R`@)Z4-o|j57O#F9NAn%YfojTnm+Y9jp1yO-{|6PtjtfbUn+hI}agF=6sBT_X+ z*UA*Z8$QeI_c)UE_nHZ^fUAZ>AOGU1^L7zvfSu2@r&+cBO6gIJ6q6-4*k@CIFd#2{ zGS!|Tc(MLASDxBUk^i6W-DkP0f57*i#h}vmtVCuh zcM*I{qlpc9LHWkE;+wSco|FdG)2J6BvuW6{McdV1> zC3$vWUT0?x@^EN(w5#`CFlgVKHtw^Qvp>2=rq`;D&evW(Z^-j+zr95br1RQWw<8zq ztLrZ}vxvAK%y)U<`-q^ycdcoYN#=d^DeWvg9`kuOLVsqQFI;EWcOSgl5s4Qpqhk!t zh(+^N7Ptq$q-y91RY}smMov;PMQx=!>o7N~c`Wmt%Pc)>7z(!x=dNX^bu;y174MbL zqA2*e_F>Ky147)`*jD9`1%4Uj+DFX?wp6CM57-4cgYuhNLi4mXg^UK5hZds^Yr%IK z{>heuMM<(zW5T`50cIi|{xL}ZpYHp|dyp>t>np9pgB0mAPxF#3#epmUpUutunRF2T z&|=EWa5s>@CL+qci~oMxV`Lhd$buRn|1z3GY0R&MXgUG#-0yUL?`HF3Q?CC!Q9e7IPEv9!ab-d6S4vvolkt9xFhCDTk|q4owb#|P|Mo+%maY_OHBw)iFH+TYb>0dRIaUFX;&h{|do5t-B9%tKgrBYjHh}te znRuq{-RPKxBGy>d=`l+2hosxB4^p?Z3An*jLLt90j>d@RtJyUpAEC*oHwZTaEa$nl-_P>n3ce32=3fS3P zm~*^qV-uV-H|l-QBr4VZmfQJIN_nntEPf@dr}Gq)v|v){F8lQcm0+mFe&-*v18~f^ z;~AJBJikT=WB@#PueZ~&M_)GZTIa}Di&$Tr$J={k88HvLq%s_I&THs#TJOhuA<(@2 zXHSLa;uKacwT!&4br;lt?GtB8@2sMQJj1OdKj{IT-iX5R%N)+dU53RJ-0&#>tU6!A z-6WTp+YpZ_N6AW!6YOCQ9d~mVs9O(|Ja(hu)VZ80B;k+H%ZCwyBcRXX8b9`+LnB(j)eOGbU3!%ILQl)D1iyNrj(*L;<9lXWm@bMeMMxh4smyCS+aa>`=;B;-=J z$T)RY=nyxb{3b+FQb|T~ZSUXrjJ>L?3%BhfSL4C}a0Q%fh%b;3Dc(+jK3Lz{0NA^- zFyRu|n+3Kmv$OrEOXdT6BFPD`3cgnER_ZmPNU8w5%wl1b*pA1abRK;d2(ZTHg;js2 zp1-E=hOz+kS9eWzZ>;tXR5s%_YK<8oONrJ;XXArHQD@uo+1rQqnZE*bspqmj)?BF0Mx7UWKzohj(;_mPqLIJ5haVu)#r*GTT zX};{rDQU^Awl26~BiiZ8rf!0r>s^Ijd{{ZP@qjPaN+>^ytxLaQFV&&QGs1!a5Wlb* zQ4Ie}-2lCFXy?^gn_itTA(wnW8Ot?aI?t4CuCm2FX_9L7ecAxV6 zL9n1%A=|!=xk$Hi_Cjyg0`>xUdKRHSTS$!n^18P5Qa6C-Rg_$775jQdyzbdrldJ}# zSBHieM*k9eK@6aMTV&viEJgYC5{Z%ok>lEfqN4}o`nEDSe4{QYjKj`o@~$#6PYD$W z5Ihu}P{|xPIl_CWTvCg>lgIys$bG-e~&9 zcZ~a4!_g25%m;}{HlP~t>e<~zR-wKmLU`b$kq>&$dO-bQ?%H^Dt(WApB=A1dHrySo zc2AP_cdoD1_uh{8Vl(DYnYBB1l%?!$%3ZMzKkr7*iMmVT!8{sCp>-_PpjqYl`=EJp zXsF=R?Xz)CpP^YWN?Y2x8m@PnWgDg{o|~uS#vVD@TKF65^-P6UgvJfH=zM8i7IfY7 z{%TWt`P77IfAM?=4XSih>@zl>YXSdydwNuWNIUUaaT+@ho%%;F)!{Nb4Xqtqkcn+zJO`}YDgAULzqnLE`h6kw zbbh;bt&X_v4k$uRcDy`-TH&)f+n4N!1+@TVp~0md`{y)C76+O!?oVX;c{~ai!682m zK$v3GmB%>nl=BZW;N-Ny1M(4={Ae_JOKBHp?^qcu&!f!19n~h24pXH5+W}S1drS!i z;4Fsu4XwBrTors}RQ!b1 zOc-@#UnmiH-;d%n$y=1dOY)w?c*Q8(X7n~7mbzsB{Dx&j7>sBR6dUA3O*=x(NDa!+ zM>9)d#1Gn=@iUxLUS$U)JoBzB=c8Qb!fxSk_ZWaa^vM2_3dRS=2ipc-XT2$r<@0!V zkTD~(NiOm}btBCSkMi1jfn*q0o#P0Ie4O~6N6q-lD6^K}fHE8LDrfR#_d^0JP8oX!iiXXT0=bRNUNsf`Cqw~Q>~Z5$&>n+p$l z=drky;~BS-giDukujj*Q6LUvNo)@j`U(oZYAT(6O=*2{X{ zQkL-1Wn#W<^?aX3Fr%6rIPTCqDX+lOJpGxzAfu%G-2|>=K0hpA*dH+1sOFoKd9?t4 zFu7`=k%-3KKv5^NR~p>D+IHlJ*092zvGryz+P>VZo$(Z`Zr-~l)ENAYz5H;7D`_1- zXtYM)d{5W%D=cQP9tyGDJ#-gKp;Y4bcsBxsOkH)?gH3xZo6+~>F5CLYdXC2q16aLU z&vkODgdlB_Hf?bLMXG&v>)%Y2xRl}#Mf*WFcd3)q6U_?}VblyP(_^j2O%Z2{Fv)c5 z;jDvKb>KJVyJvu_!`tc4PE6fuIRoo=9PPTQjI;iVzKqJffreu5#YR702wf8HQDkQT z#N}WV)_fg_wQY#RLuV}MlHdm{OCNbRr4yPi#vt+YoU?W z)Iy5HVNv;!bFAd!%Q@HJB50oQ4J!U@*JSj~fIboBt-1%8p>Rr99I`RIx-C?j(W;%8 z>s$2~oSRTjm2N%gvX6CS{dC7K)*u5APA1}aECT{yBbRU(5}YH}NoM9;kHy(E4l?fS zW7g&1x%e!1Q%LAgE$N~EG4Y76)izmOR`H6RB!aT_{G5p#aB*iE=KC@N6%eu@QT;&u z;ZnQKXEAr{p=lFif^#CAIT6yDkXmrFf?+i1ytznt;X_>wLIsz_PC6~XA(p$E8Azn- z0l&tE99(j$LT}+VaK_I-Q(DfA0l1u(SKwybkz~2cJV0gR+MnKAcH>VWsConoukifd zThMkNe)PcaaS1GAlbjSllT5HL35$814cH$an}@%u*#emC?ur}oG#>`{0h{_)KE+;A zNv*%eGZ+sQdo&fmSswwR8)-#WZ)qVrsWGtalMt?YAc0 zZOPJX)80S4+lm%imro@WZy&Qfp3gA-u0@=;BL+|p)VM~U&Z&Nv?lhBnQss7J&L%dU zNt&N!AT!Fm681SZlhe$XTbZx?WF;mZVi`9k1*R*VFVACVZs+y+;WK%zube$#jkC=* zsP3(j$0%R8u4N8cGRyZlwmaA#5P;$O)$P4tU&{F!3Xob?yMJDnw6*-BpuNSHryaU7 zAozZ8)q2$}aYwu(qn^ecB{;QAon*6I6E94-rVp*T)Hchg#hikV`yRXXL*v9%c&}Ue zp|^EEVNU$F>`S-aM+jYeiPu6^ZA)T@X<|xzHgM>81*c{DJ+b2Dt#jJVI1uphg(RY# zlOw=38cN$8n@|=T1GC!9I2W}Y5vjH-(U*OBx0HX6(%orq6drlt+y_dX-%Xz0Tn>qy zA!)T~r_o!uw*bMm<$6rR&zQmv8Hl0xWYQQI71rza3a7q2X;*JNoVy%B6Ir7!UESCf(e#aUv@XTNL?>bU*%*C-BjKZ|zq zGS8^8>G+u=Qb*&d%A31E%=odx#|!?B2?z zNK|qf$ceS=U%cYGYwIC1AScIXzTmJri)9S20<1g$7s!!X$$kPHFyBwaI(Lw9vhJB&q{+oU_p(GgBYX)x{k3va8SJw}R}1LR%QwYdHkS(|Bn$)S z7QJ3LLpO0Kk<{y(-!4PIVxK6@d-l9*@o!`(L|zMhhqC78u3$2Awa{Ge#K5i8+Bm&LvK6e4?n^#-hkjDOgoJm$q%`wEm5$0V-^=NUWu5g8!nfto z&)rNMP4-fKPqQu#D?smec1&hL7t_S#UOR%DR9Z2N=29J`_*) zGW>~lh`ez4r=Z5i%lf1paZ*@g7ocb`J`L$!;l z{h;;3vVB5jt%2ea=X!xIE|E$0Rm!1XaoCSB*0sy-DSbxqMOi$l#_roS?>}X14ecW( z96n*NePaxEcq~(&yi#xK7nM%!7qYi1ooa98nkTnTkXHN?%{Nqj>U@LzQ=RbkPt;E; zKee9zM-W9 z%iC-IU_M9Wh^K;@d1U@XKEGVY>U7kd?5vufv$N!HQVv_+4gFDdmuM-;Ij5Xo##WZo zk(|`>RXKGk^Q6Egd_M=i>>Jh|+@{hVTy%A5AFk)ziqbm^K4Wt3TZW$Ti*&L=1Xk&7 zru0v-v)u8B{l2)T{i1Cn;s_>x+c~?g{**dy@QeEMz>6v0Nk{c8Fm%Z$`Ee9hXuE%+ zS%<$!>Bkh>xDLK{t#{iJYj&M{>~pE8a|HG2TkRcJHzwWvYd^`0xz?jz_2;9kWI6xf(;9u+ppPGu4qemfPWo)kdugB2 ztjDd<$H_GA#hUVRQkwerdDK6&vH!Q2CVw|mACjjWgL1s3nff@(Nw4PhRF*0E=a~G? za)_?T&`DcD$ysk{Uv%|}JNhZRVb|!RUB6w#-*rC1((C$ljGeqr6W?B-*E6y|35Flh zn)-7t->+1kE+)Uz4;9d_3E1JBhR%L3BF0K9vCMTXy_<4f?o># zmDJFYe$u$?VCX^@?s|JG$G#lr^=Esxrwik?1-c*A&=Efx*9nG>{HaV{{?sWS@(10T za!DVke)$3DNIwINZ6jK#pPlFQ|IXQFo34$IlL{y&jCv=Ta=JD4vTdWAwo4PIw0P7f z>#$vNEOGmWa(t1k;JjYvaX&l5DI;qoKz0->COeA-GJVbg?)U?MwFAci1 z^uul5?Iyd^G5{Tot0Y55Df5k8Kfb9yi%dNV=kEIQNv>_@dfN@Mmz@k9o#R)}E2!uA z^P2IJ_V7h58_LIm97p+$OrBjND|6g3_P=o#%@?R#DXZ9dy!|4(8$RE=-^iY}YUu29 zr`ys2GoTeOVx9$HJx)#ki={!#_ zM~YWlF7(!0YF}~yI^ zsz;Be9x_RpW*y_EQDawR|EuNL_QD!p;Z1sbp{|2-_JZ^%v(l|c_I$+Z&km*>Cmqd; zvkV*TBBuZ}8IoccQf8xAv>tdtniLZ@COKpli7T^p`_NxBTPlXS!5)j-MA7uXLBy ziT?!F}+54KV(EHZRRlGiB&^I}>t&5SdlTP-H?1vmfNA(dqSszK8ceJM)*X`})wCX)q zeNd+4A7}ElJ*Ro@$V)zsK2ddPpd|D6IdnOQJjKh)^QE5W&jEBQw9y{^JVdWjm-tog z@0k}g`>>bl<(PfeFS6jz(~CA|Jbp%+p7*2m$Ka2cD@YIcAXBvnu{4>pYc13^5{;G7U{oPf*A)%4Ad9KjW zpF1lXvI}MBpA_AeW0jJVF00yw7x;61>Uc^iW0vfW%3rlRuDAzv%0b3`%y^T(as8Q7 ze4HE$bsXJg`f{bNNWF+N_L=OUI$u!jAZMegoFrq<%s#rRyiVqOs~oZ|{7kbRc8z|% zl6L5nYySTt{B9cYnI><&LYLIg1r)jt4V_TvvJ4%~$JF&_kB_9+&OAFPWl}6BfSm@Z@rdD(#gI^-dL{l(oPm`>c5{y{=f}8 zcfP(JwE2Hh2SdNmJnyOWh3sS9JKSkJ#-Vdj)G2g5UnfZ`bvNAMXs>l%e?O&s^}IgS zv+Q|4MrR~H`QDV!P37{I_C0u=W*x$^CwZp+sTVc-3QB)V8H{G=q6%H^7yS9o+)aFY zC#b$=zl`dq8Cebhf$MrH-{iF%gQylaJBubFMU9gL%_9DeKDR?b8I#;uFx6k4th^=3Ifu@rT@a~SIS!wS=X z9*K$424E9#C2$My6!0SOF;F=Yasx&Hqk+!@lYj$(>A+FIx;=!rXAk**(}Z{zI1NVl zzkqdcyrbZBjsz9}Cjwsw76WPECLjwu3%mfl47>)6LuDgzAua;02DSpX0BaTtaW1eC zxER<1TnStcWPxXa7l4<6*MMV|2vN06h+~08KpZ#|_;9%pp8~$q(KcWhPzQ_y;=l@E z6|f%o2JpW?-wGk7o*~2`z+u3AV6QV_n}Nx|LBKR%CNKv$Yo&aj5b6qiAGi=mXva-T^*7OP(P?yMYUUO~6&a;2Bu8Sc<;8VcZ3?B~s9{3aR z7VsYMA@B)Mc?tSt3v>>c2`mHF0vmuITq?xdz$cf=fdyRwZn#W{Zv(diUBHikdx3|6 zzRRH}S3pmI!+`m~UROd-fXTo?z%*bcFb9}>l@Qke-vT;;9|6;@L4IHs&;lF{90!~P zw5K6M;CH|ufIkCITnpU?o(Fyj{0ev#cpZ3SE82D)$^)Xn(ZH|1iTuFtfj;0(;2q#& zpz#LC5V!!?1Y89yz7hHctOV8p8-R_#CBT*r=o|2Ahy34>;00{hhWx-q+oUamz5&+) z+kl>%p>N-Uz5#m!2LQfXpl?78um>;(*b~?nnDlLoC1B&X<$LVG3mDUh{J>s7Bd|X( z6_^1eZbe@Jj{#2tF95fGSBTqzJAwOw?Z6|z)4(&|gWleT@_-PqKd}A#$PYXL^Z?HR zF9ELrfghqTfKz}az$#!?7xWgG2OJNa2rL4Y0gvAf9>DX!2Y_)0Y!a~HNB9kR9>@W| z1(x3>#A;v-&<30jTm)PWbl(j*-6KRTFcKIKyaRj$d;(Ntz!#_iMgte#3)ur#0uKUD z0?z=G?uR^qjX(;x0vPikb2S5xs8F=Ff?-hk;7`C?zTDoqH?++emmO`x6VPNdxRlo&o{)m zz!uDd)^tKzK<}yPx11VtJ<%am`)rMGijUk=`eh&N!cm;R^ z_%rYq;8S4ewT7qz&GtH)3wR&PWY}RqxP`cM9@@e$DO>^#2bCS|&n42F{RxDk-e(}UFnfGop%yro* z;qYn85r;S8yPU^OIefvwiLHmvk|bf=3-;feXU>b)t|>uFZYVq)Xvm2%R(K^ks?D&RV_RZ2tERk#CUqmakZfxIOha zbY*XRUn}JSf7jkdu^pUMqjKDxf=RD(fl_7 z!QYnhty-~q*1~xcPdycM_3Ay$i$b^;bl$_9B8+>R4*|brCH@aI_(i?X))Dz?Ir03s z(te0_?;v?Ek9!VXYBujp@QXu++4rWIZ3_EEANWqOMBI0+Uj!H&u*sw5SmwMQuca89?dh9 zn>2N1-2-ObWxHqJX|;PkE{wTGoh3i{TU`FO0v}2FTg@m_5Mxk|L$;Ex9Y05|$H#3z zHxLMvh-b>rG(3a9>Q2dGWb`_}Y3W}#Y7@pW{$``8Dr!Wlg0jENK8dNk|46*w0V?kT z6EOS8#q0^mY%Z8hu9|5wQ+YJrW%2kDE{?Q#9DJ9P$8V@YldTG=JXT5`=ib%N!)jy7 zYGXqHW1`8QtcaKN>06Ti_PZ>Z^wXzJ`b@HRxm8??F+;Xr&Ht**|0jGDfttJR{GC>Q z*#vR7vk4y<7SE=tqb#0E8^U*6ayS?l$K73$!yFfr*ZMoABunQQNrJTTTzp({x76h# z>2loE<+yPe#z@0{L&eyodx^0NLtMmVoZ!G2-GP=HPukacZw2 zPUxu+M|JzfoUTd{?Whu8X&)jEXbXsa6GKI{Z?y0oJw_Z|#P5BT;!qf>={-Y4b9X>Y z>>4UQ-%%~bwGR`c+iFB@VmRo>3*XFQ>N-?3WN{mau z9{B2nbNqDP1Df0M@#A}BTVt}Vrp_!iKdYL>IAa`aq;F=Wm1ByDe9uu24% z4iQ1n1)FM2x`mSN>-bn|^Gy2>F%m*UznSQ-l6(&_Hc7fs0rZO>_O@1x*cwOabKjpa#cvKQ1!-M@9PV&z-*gnj5B1N&3d2?&;`?{$~N4GHVpTR;TWgG7nJ(^ zHe_wQ3m*>y{*nna`@A!dG9vz&Vlw7BW2<2^YG5yhiLp(46zcLLa>KKww*8Un^Ny^; z{iXVRRH~1hE3ByutTwLli+RRmPd=0HxA;uQ#ZeZYS@+v~R!BaVIQi6q&u}qzft$~I z;&Tx$wpx5HEAf%@mN`}6v$zs`X1Mg{PRh3f7tdPxp19wZ>mp;G;Z^^)DE~*esD7Z7 z-}iuB{{v*XM^+9fhvo+?KC^JKz~XacKcDGR4krvO&yB?AGF)u4_*~o1=Qzn{Rf&&N zuOE28((6ZYQ57~?#Z1Hb`^)tEYx4JL{l70L{rUyeYZWc7ah?xQ$Gbzs;nP-|f31q(UHDdhwVPgE!YB7G{P%$3kczjbsVQhYMwr*?y zJ{|f19?*q+T{39OYO~~X6+W)pF6;RRlYp50tutg1^Z$RHPpl=$QM(2I?)uMjsFj0@X)T1u-O`{4U^TDJK^OkW7#w16_ zN4aA7;6vcwfRE5aQa4ILNzjTVHd%j{knI|Zeuo`hFf6J1%2vr|F+NV0d}_zwma)|! z8TQ-vl%%^1A8AQfeI>$?xN#EnP})}5kI}Fnqu18vYm&oC{!h~Hz(+4oZTi-v*-?ci zd7U+tDEg$lj*a`oAFoP7Hx~Nm!aYqkW>n7>^QwbJ&@UP$1jK}`;|lfpF}YFMk;%Gv zu+)A`W+UVAF&~I`liprr%m{DxiBx!_{I=LwD+`D>B~aa@S@5kT#a;~U^;eyMd`87c z`An`{e;K*bII8MiqjtK#dZxciG#kO%keq)uSn;|U|0BzOTk-KjAQ=eM9(B1D=`1!@ zmi|*kT7VeRMwv?w*vQL>wJfbF# z==S~_wU3G04dcY>`oIihZ-0HjWoKrCMdN&YEPR;8O!EJDI~Ta9%Dj*N=FDLjhKn2mJKQj1vtT&T2|CsTb3p&nO$_TtgU_5mP*YuWxQW2 zw42>_al6??qkFm4?(cuj8JIy-ct0;YpU?L>&pGG+-2TsV{?Gq;&I~9~Cyw=s4Tdi} zlM$N_1@vC)=)ESLAAvMqZ#8XdyqcC3r>2?gs@<*o`>QBg?UXerBh6A*n{|x|4cVpl z8D3u9d@bC(sO>_`Fg&U(RdcQDY`v*LYp_4vy1`hF{=|qHOYa#W{9(JSR^vnFi3gH442-|+3lYDd@bDEeqo3raqTItJsK2W zZElZOs_pS1VL|di@zs_f`S2ilYmj_Ue04;Sye&vRvd#S0y22DCF?NKK7(;ACB{^ge z89)-YDk;Nx>`zija;}n?PBKY$0cURGSr9v!+3v^BrfLS>F8bLN&{0G7vw`u}sOhM2 zbNhuMio~@#H69I$uQs>GE7kV+kgy>6p!jM_kbHQMyfsKZD84!(NZuACANfjKTy&I5 z9i>u7snk&_b(BgSrBX+!)KMyR)So&^rH)dmqts+V9i>u7sfLa^`u)o`8NSgIjI7jdUNir<=&SNjlw+vC)r-;QY#XA?HMA6rA+coJEmv zX4&A(OleJSNo-E=$9o+e<~wcv-^@N7#o&s~;xl~SW-YX}smn^QW1Omtund=a20cSQ zihJfTQ2HBo(i&nX>m}di44#pEZMjXHWU^%johE^0JeDfj{A`c%>?Wh1?qcMDCcU2)G`029T+1a~s--B7ldQ4UHyQrL7Ri4&%9KPn zB2`2#R=ws7MSI5H6Pb?~92c3OwelHCj!cFoC1ozBxLjp2eS~avyk&%%uB0PZ3)7Sk z8$Zy`yoNFu>6gA9Jm2VRuh!RIwLhGSBgTT5gW)YBzAa#|*MR7L%i$WyNIMvUx0s zvZKF}XvwkKqclA{*RF>Tr4C0K^x4AsBjAT?d#66#waC@C2{f-JFL0K69T&Q}xRGHx zyq0w9V$|Dmqwcj7(_q=Hp}zL$Er#EC86!%jVv4gME-MOgh4kD!Hn&4Nk3n3uey**5 zB+Xq6=5Gnin|@0nUXE0f(V6^TM1C!Uty{!@5cyWU8D)i-peXfRDd~*Xcn5B^9&Wj& z3J>!hC&UW+v)?Lyj91o^0eFn(_Kv7QM=kC&?jf|@uB}G9$1tJ{Q%v+YlFRr=`J$1) z_9s;La+^A|#gt+}3T0xd!@ZV_Ft@{Dfuq{iqRkFG*X*C!v(2yT$)xGc)}}Yj zP8EjQDeDed%Q8sY5wMjlxw^&FMqT`D6wn~2s&i4KrgE-Lw&-i(pm8u(@|eJ2y5tel zZ$YSyw786pbVQgCVWg>*H1ildqNR~Fx|YVHrSZ@l7-{-g(5KMeqP-=`(EZGpB<&6c zpX?C3_EZ6Bgjt?bHC=H0^kJV;CtCe?OZo#0juD%ti&|Y5 zhXYNiU8AqH{;XtJCaE3ln01%5e^+3B`*lHRKj>>t+KMFFiX_^KB$mzfr#2VoOWJV^ zigw8wzTyg+MJ3N{?8WtFJ>${mVCdf0Y>E!Mi@`JGRbsbl3Ei2->?bD;nRN`k56dN& z9=}0RxLjMu1ZteNz?L^ImEm!*@<&=FJ}*4BUb(|^uPxU)Tg|YP#pFiKRt^8lcuv3F zW%wSZ-Nt-H-y?!?MbA>cFCoKId!JTlaR=Ma42pTq7uj6Hr?SyTMzr~aZr&Prr3i_P zO;+Bu$~5{C+CEG)5{sgcm=%e{LVX`b+y9X?3mDYz4(x$6D1Jp>TT#!npW3@oZT~}L zUu3Xjcfbz#vlp~1{(J3#WlLkd1C96MDOvwnf1p1otM<(4W?~qJ;%s69D+dfL> zqnGmu%KY{kg)Tu($|TMfd$kuLv0D_iRvE0;SyovGS@zrR*IeijpLR5}@r>Ieb0CuX zioJ0l%6MuG<@-a&oMyVc*5S0871rDiwr2gFfd40DW+_P> z_R=-~PwI47?$3rJ+Jb2IQ*>rT3uj$zF8W#W2-|ChV{bs0wb|dZ7M6OIUJtZAwGSgW z?yVg6;T-qco?5HEAJIN9=I@p9&tTGCv7PhT-Ja~!g)~9w3@pp&aol0SiSH8UQrvSp z1EnlPm-TY}A-P^~Vjc8-z1Gg!yI;~BkaRblP}UK-cU108guW+eJ7G`IkrIWJsdl6k z_2T%6K}vOEYeGwWbF5z3Ee}#8W6*|_ z%=SD?oYBLwOdbVi*!9$F~?ejvW7rR?giw zDp*SvE!2N`5s}D#VP7O>^+Muey?+9-A5t!ImV!+S^`j}=jPMa=H!Tz#DOTz~n!0Bn z6l%I}OIJ$SI+9XnlwrId4&488j)FNi@ma3Zk0!+gdIxAY@dtv=gZBz0jm$f*^8PV0%aGa5xaAhs%{cGHc=9dn_vO5A(ESJwgkr$RqPW(NM)VlSM8Asj@y{G*@iN*SF?*hNG zv35*s$R$$F-UT`LVm0LVM<^}oP19!u?uoPb|F9QH}YN3d!T-&D>v{F{ns7n zS^fWi<{Lb&{GFte@|JaTd^!(wo{w|s>GKi&J@rP<=}+=+eXVEvr?I|kD6L0K33t+@+!LH^!CPqjrpqA$-|J*O}E_&YooG8YV9 z5c*BtV7=Nq8K1%R^}qd}>EGs)^lwWS`Zpw>w@zbEIY8hwZt&2cH+oe|HZ* zkKQFGj<1SSj4#jYr|yrIPV7!_dyX9J+4;}a1$!=^Wt}`%x${HYE%iOrgZ`9!0_%lg zubo=2=w#m1p64FaXIDO7<^mIvYgk5=Z{QG8fTi|Y%swu=P#SM<|B)E|52Wk<~;gzT#sj*#W{5u z^CX^!c#kyq<~V$q`4K1a^O}2?2@NYLM?48kvx0HrWSM^E*TOn@N$a_+=U~o{%-`L+ zy$pW=>(2T+M=7wgJ}sCWxKbqEFN8?k$9S@Ph#hFH_yFH;~c_tPqOV#lOm>{#C7sE z){}a!J%@QypI+A6zeK<0-J>4`|7b+LLq$6A(cmwFL*A@#;{&*iKS%W53N@}zHBUSmDmmdd=?A0GCP z97RU%2`=9Ar^@eWSSAtK2I3~Q#(5+0h#cGO%x7J%Wm{^v()O`Tw%gDC%wSvXY@3^H z^RR6$w$079x!5)*+u~x|Tx^?*ZFBRymilPEf%PV>ml5i=L114wMaQgv^M2-;rJ#l7 zw0IS`s#)d*?9=m!v{mZLws_cHFXc023{&@ho;zjyNn1I->R6WCYwgFiK?-Gg%t$7E zsTZDSB(aS=Z{T??&l`E3Av#TG-mDXtcZ-ku=dq4;trfko-mb^l4^J?@ibn#*!E1iv zdW>;WOI*C}mvfPm`>W*HQqV{+mPfhI_CUrL$CGE9{@Y9spOF^o)+J--Lixg-{(H?9 zUiVACGwvnLjF-_iEFv$EcS)a*&B!NJWC3}Od`$X(V#cLp5!p^YB+esdj3edbKJpUz zhV=c^jESU%EFlMp@|hVIl5(lrL7`&lIO{1 zWWX^q+@ylsN7j+|NZ&8b$R)GL$>+rR9qUQ{KsJzHNZ$99BhQed z?S{v^L}C-NCSDB*pHiWF}anjAzzTRe==XPggAa?oIF8V zNWw47pKKy;kzYyrue?u=kd%M1o@5qTNp_L1Ndi<{LaNCg@(VfBq+&d&ArFxz@&O4~ zRGdr3lNz#&Y$AtAyjjHvatm2Qej$TZ6~*KpvWa{{&Iwa-4Ov8XkRQnqi;BtQL9&mi z;VRr@5!paKC;hD|t|E108~K@3N2vHK`G}0PsaQ>-B2`Qvv&cGfg!GGIoync#dGa+W zidM0NY$wM_W{isK$tv@9kU8W@@)|*J%8^ELoTMeFm`NTZhlsV0is7V+tRSzE z_(T=s$j#(WMm6NCwXP`gML>kV*0Gy41I0u7pE(X&mrQRt0LIEzuXpG@_`#r{D9LD2PxN#W@aXE@G0TXcrig6{b!hhpx zT!Trt79NyfGNxcEN-+)7QHF9{hwD*+N>pJ6ytsj5w+24U#EqDR*_eZya5LuO7Tk*4 zP>b7f2mS|l;x7CFcjF$+!@al<^RWQ;V_eF@gSDrAv}yn@Fl+L5;kEon&8J4 zY{fQg#}4eoF6`!0wio-bA1~t-yozQVz(Kr**YO74#9KIoxA6|%#d~-kf5YF=g2VU+ zKEQ|g2p{7U9Komf44>l*9K|twiB^1tukj7O#dr7~Kj26FgyZ-pe&&<>EB=MxUwG$_ z9gB-{^UG_zGipkQyGG5PQtI`U&!}>_D@uIRW>iloS7he@Dc)72-ylVRC zq>AQvOFNgjC?jvml+v1-{L-rO(y5&ldzmKea?4?&QQT0Mt2=3pf_zLod8V(_HLlcG zB0teRndFEWGb&0;LNnscYsc%Qj=Xf-DGD3iC8186GqS9tYI^C^%Y7xj((bCFi)r-+ zj;tuDsTn`RS6Di|yvA2r-F?k>D+j&M{L+fj>8#yJ@0Zt1nNd|$I>p!h>U1%!UeKtj znUyEKciN8acCQwclvm8G?skWsKAB$Qi>u40jxDX4?kjT@msD1`CX8`iQC>B5#;h7+ z*p9yCNPscyPQH#kr~>Oq7pM!C(fKDZWKO?z&`Y@ zBU6K!p(XQbNgFqfQrWIh=k6WLxLLIHf@Uvh_J&TE%>6$a?;d|G6^nn zLLLfzc|rdI4N5X6voy6lrsdTQ-L&OVLoYJ=IW4QJK_QOOE<|=-%5yOw@CoO7anD!F zc(e=O;o23WE+euIz@o$RDzRsRuGyy5zja2wtQIPocn!AK727n{m|li8k7%TfO(w6} z2OgBOT28GL%DiM+VGHF7rdmF9&^J5hntbrEZxUxptfX(%sqR$YlGEL(z6F(I-MJ&a zsc|+_5ivX3o9p8?)0^vOcf2?0=Qrt0jrFD2h0#YxmlFrDi{SaLHHuK^Bt>EfKFe@c z7;IY18iE(;&PO%Vi8+;+*5;uJ=EofNV`Ga44;}uj%pr>?M<*&Q{)P}S5)e!eOE4Ek zpv=2b=cA*Zh>n_!j5=D-*9?*l3k5SnYwBrDwd4uX*XV^(pEW4Q6W2r}py`EyYvCB3 z+C@BNYaybpa$j&^M_}V38%i0`klnb$?s-TmN<>Q`)z;)N>zQ7rzQl}&)Kg?RgOp5R zIhxoTOVa2U#z`7*3nt14JTj1UK7B1To-B0G#n#Dk0yR7~>dCwZ<(0!UB8{6_jXWeL z$DBi+G}JQ%ZA#6QNue{YX7U=PbS@xhGf3+?jcY#3mMVqX4rNK_CylbbHLV(1QiyUr zO`fZ$gd)OqgEH?C-a{_FgYZ!tbW;TO*I`7$UO1E>@Mb8CDneToMjp_UaFPRFG z;i*I=WmLLr*_@~pjY>Dv_BJ%5ykJ)Bj5eCh3Ij8#upBIQMaVMKqPeaa!i+<*8BJ*9 zza@ui*37Kau8_&ljco`0+>dJIMb%!@%A>ZeZUtlrmhSu34qeKb2wiW6rWf)HYb*j6 za{61*Iy1Zmagoj#dRbQsK_shClaDGzI{1xnioVScb2vVBONJg@%&9`9lozJnh=tv- z6&=z3Xr`=hIC}tNp_Io6#qyk)trWB|k}u=~E2&v|PS4n8$;_1378iY(!iu6FE3k~{L-pxPC|-p6qK_t^sOZQ0Q0j@kWg`?3eKf+# zp${v#TSy(jMac9a|O$Dpnpi2nRV|=SdJX0&xQ( zK_I@!Yh1{^b=*WkFN#Lxx2u6v0;`TfGqiL9n~y?sWJnT`mqsEQdP5>Fjzl!%mPFB< z-bwmyFI>$nAzaba_3nGTKsQtr^^C6c9E;lWgttMYMJDxT+boO~XgTP4=wFyrEi9BM z^9K&_1z7nvL5mEbdrvdOvSybk=g_OB)n)!tx=T=6(aZUP7gRm64b3jgQ7@Stp7c0U z+O^zj^hl5}ssK*&C@U=^jV1yzNfTL47!@Y=R#EDVnfK1j8~xRO{~ zBHajre$*(Qa1pZ>D3WclqbQk$4K0e1d-m!|9G*?}gmPr67^9X|&@+x5(P~Yz!&;Z& zbvHT2UUm^q|C`G%YIadKh{<9`#DrQ>C<|vHbH)jf!tXqss$|iGDMsikJf?d{_K=5V z?yj+WC<42UFd|{&5lRr)gM?8978YXk2%Sk7`G?bdcsLyPa){7}gb65YNx}$%yOS`g z;F7EF3q%iHNYxdAP+SUx69(2Q1IeyM%)okOAepdY38xCK!v@n`%jSZsvB7lFx97gi z3M^+pAdWZ*2DIE)k~3%iPjW z{^UNg-{-eWqMtToc@J>coebUs_z2)KU>@)bfX@J41Z)6a0elPa$AG)y4DJR@0FD7J z0$PAy1biCs8-OqH`F#x50KW{{QiACz0#lG z+3S7AQ}ud4KK|yX*vu<{F9N=U_1@*r*w;loe;M!;;KP7>0dD}j3c5FZ^8Fz0eSY>H zoGaiJ!29p<;(ha8>=*DYz-IxU0xST|13qxCcbzw|ZVK=c;Ku>40^SU|_wQ!V!1$YY zd)NDuJ?#1S0B?l;<99Lmb9Z_1f9%u8>!5oF@O{9i03QVX7~mqH0aydv3HXPAR{%#K z&s99X3)uF_l>)yk;Nd;q`B!|;?_%9$=%WC*7cd6+tAMX?yTtRufG_&=d3Gd4OTcax zlcYZ#!5!*CYK4BUL8c0olD?r4?9t4*if;GM{F#qtA+M5Fg8ms_F9jd}<84zUq^o5l z(G^Mh)^_&NE;E|V+8X^gP5*kwtf^*a^)t3gi&%oQyn{x6npGs}p>31&wtk#+ka|Wf zERY20NdNMVSxw#0`o@bpdhZQJz0IzPH@Mb*nh19O!^Stm^izWOABNcUg2 zpp_j|NxC477f3t7KJk=x5sn!%t2te}W=rQ>JA!(pKfi9#-V&<(_+)B+a`w={2eO3% zJ$FSJ-`t$CN!f%iNDmX+mlTV%YGgMB=`Mm%Drc24zw(v#U_|4lLJMZn9B)aV>YK@! z))w(`GETLD4BoY|86#)XNU4ymI$emUTNx)(oFUm=x{a)oVTEUpgLJ!SRLQV62GUNt zKZSPRdQQ(MdA3Drk96Cl)iKYzEdCLxs1->WLY#%~+UXpKn0${)fwafO0>@KJ^NTZc zlLrnblmkqc?lCrU6I(?$i9E$35o@D#-Xb^p2!2CyhWnQ&FO2zK$P=(R#O}#s%dI3 zhIT$ra5$eesb@EjN=iw~D>=PDg{KPI?Ow}JiOZeGYKN#y+^G4KW^CxBLoGT!aoB@l z8rr5#I#PPYLSmex*to~wGgQCMArtG>HnmKp>?68^$U@G72ujAJo5_>&3-txchi*f9 zsQ0^~5$PQzORA;sFP8S7*Ny#Th5gO}?k^nJe_%f|1K+&OuE#?#@+4fir~5h3IOM^2 zcKf5sYQgyfxQa$YmrTUKtDdn5S`5!EERQFs6;=GDw@;SfeRQh=$iNu;(l#-0PJ27fxIU9x`Einwww^oSF#8NK@+hu}UsS^nWv-V-uuFoh+WwvgC~DeH4|Hgv=BTNxF0UMptCf^dq7@ zbT6=Ij5r!V`LRBEX33fE?5@ZDdM|x zgN|tSr^u}PLY)3BqA7F6^}L-tyX|HsOa4bfJdN$wxpap>`-dAH~l zKuCp7XO?xlTu}>423d{NS>L^G$zaK^ce6L;1Y$}03avB(r|+b74DNfl+dh9?_XTj* zlYeE$ObHLBJw;59$eJVt91YfABXdp#wMJc^jV+9vtZ4RDl6Wqy0EulnHz9;$yr1Jt zR@&~q%E|Q=+S|U;K!2kjGfQ8RGMZk<`6Do?So59}`v0H*>p%SOg8ZA)@xRUe-3K2WeEx^~ zfAiDquQa4LDxZl1@hSuSZ;PCMb9o0hl<0alqs;2Cux$=+nQ1eXK!#Klp^y&v@6la*WZne_PDIT4yl!oR>~2FqpWV@ozoI)`=_ZxjDyR z>^Sy|eRqn`3+8v<$>^*F_H4}p?*!;<2J4RjCZSK@wV~hGe=t2J!LPyjV!opRnvfUz z&%Xn@l2~qe8T^*Lav%9D_RHnsd`{w=L2m)JKh5xE=%)ex@)GE|eHdQ(k~p6#_)6G+ z4X|+z^6Cs$p_k?eqw5&a>lTe_rWP0X+H8w|!Tdik|*?n%g>fIV4QH^w3NlMdTg{Ry!?YFzG|S0AtOc*^}9cyY`( z)-eu$ZsWY`n3n)M@Sm0*P2Q^~p%*JaFE;d2^6BM2kq7eF7vyPS-6rg)h4@#89wgw! zcpQVi5&`k30X^0YZ#=rZlgZs&5c99C!Y}gS{M(bTUqfv7HN>yPS+Ct+_z0t`<-~Hg zkryS+D|h@3*zcP7oSTrlb0++pH)Cv_v>EN3YboGEU;cdE=5h0s*Kd+HgDx$mvpCt@dkj3M*&rjRfU!v%xYk{tTd4HU(od!S6>p$|t;K$EXj0I1*2Z&o9p7InO(o zzW{rUWyE&Zf?V+@!|kpH{_Rx&v_MP0ah#B$PkGV1&wZbR-9I5dj|R?Be$1=ai?@I- zE3SJIdH8?c>*opJw|292c76iz+j=Z!c)>{Jd#;pN*!~W%xcpvQw z>c0(f{Z_jL@#BN-@C)%iWd8hwcOU70M%-H!>pzA#Ev@_X{}BBCak1Z5kzebY*zc3@ zqa*Jz{ubBM06X6~#rW2rjE2AaAE6hVpTF*CLtjA>ZeKO)p zK%HYjk7-|>lbk@D;Pt9cpB=>O_*t<&c^uvOBh)>o#CBPOT~>MB8(^2!gE(KlZ=XNc z3s{fmMP47yL2oaJ^RL`M9&Y&j@@ICyKgIh`^7>=I{$E2}ka#`rKR2F_O5SsegRaf% z6QBPiVL!5OzZb8f9-e{ze0ib8>v>=OW`SRPPRy?kJE-~U%sBimc20aQP4KIp_R_T; z!MVUs2K5IW^tDoe9Vbzbb&h-Oc=a~KWiuL|)(tp!OMG4p=)HE*7k}olpQM;h#`?8+ zFWofGs{?!T_p33UKVgSK{c81-sFwodZXqwk%0B+vAh#yAmpG3%zPJ+qH`p1k-+ca& z=Ia^Z{tjMg`BXoF^MM`o^t;&fj<0{d=bzq}r?9)QtakTke#Lbtz``QWr>+twI;C%O??~&nWgX(S>b%s?C?=K0vmj~2~RoF-JW%0Rn z{tElLBEC1X?tq-=yBjOf zIPbKWUx(|{mzQPS?{#>cJ;0t5JnjVCN7i5`X`EY7d{5qobN{y3|Kcw}?=N}vyooxl zenm`|{vOWfd%k$I3w^-FfIi?Gu>0Q<`$OwCCifBCvq^<${?K|0{a+~>{w36fjiOk; zE%0p?!u4C*4?Fq;@qSx>gz*==`%V5n^oe`x={WM)m&E$6{UYqY>RsmokN5l@%IDW@ z*l%J$KFy=QsPg*}U!9bOKev2!QtNui9grW>yiWDy$M}tiBd5grw?Nk%aNpj#2X?v| zF>a!6O5_9l3iUzN4(D4(ziB-w)>o3(5vRQRx{5y8`g`#?CthW+{;Kz!FXP@my&~3g z4d-89@ap*z&eb|Cma~fZ(X_pCuKqO6)eeYrTpzysjLQ=IpTA%6^#cv~X9xS>b^V3+ zaX%ZBSGXSeeON#~e*0&kr#bOD7V3kv8oo{)aWe_M_4Qw{A35*c|4!(w`KmY`q+iE* zzwSNXD(=&3cVa!LVZMRHzDUJ&=?^{oKI~c`2`TGU`{V}hzGv0l^jefCpPV8?o@M8;Jf4hWx zhvte{uL;}_Bz^btmv2QLDvABH2|JDP^9+bDGVaMbb~O9BjQ6hbaJq zu!SwOu%MKPMJAiK&HI8-LuqA&f{}lP)r%NWarIV=%JuH$a=Ae&{(ooBGo?G3*_lm2 z{d{abJ^4O+a^BB#p7SziJF$SV9Z6Dsox10AuHRGt)EW1)LjA%l?AS2Xmz5jWH$?VG z(%A=@>{w`%aecjX?i6cEudj!mmouB=+w%#sdmSeGo(S!Gy3+Fr{h7{twC7a1c~jNw zAN8ZzyqTc;70yz7_A5#2es_y;d6Qw%&sLot(!Rs5t$QraYX-IFiJG43?24l8lXUl5 z0%TX>K4X1T$gan=`TNLvp2o4ZKG3h*HG8AGx8$XJ7_!a&mT8^jEi*2klkBmiv*%82 z+-m14x^+i_!ux4`apXP#-N$0ueFckl_tUvU!sI;MN$ud*oyRk>gUUf;`{a6=o)^8G zt`|w|9EI$YMCVdYlldf0dK%NsCxdjpq?nu+Nz^}4i~aN{JrD3wKj_yXaq54jyXO@r zy-S+3o0s%W*0o!Z#=m!+aegtaf7YhwH$vwnA(Q71NzyN+Ha)*6&3kU``ILn|j1(TtYFii;T+UUzDEzP%i1UcAjCi&ZYeZn@9G7C=vZcg{TspCDd=k#Jqnl zIu}uSiSDatJ|ubr9p{wpYdqPtXZl-6Z# z?H-YSpQh~tm!|h?KAks)Y(97K(erBmYGeIL(l~bO?&TEHe3H1%*zP2?dq_IFqtd$C z`4wY(Drx8G)jE5s?dvxf=jYe%cPviNPoesSOwM~`?H<{+na7z*&%`Vg5J@i~UW^xZIq^&D9WX@lr{h4GkUwF0W{JMS{rFKl3 z+=qx#e=EBEsNly-I5L#!U3R zna+D$l#YJA`c;^9?wVj{w16D;p9tbA7S&}Y%|T5ag+Pps%D?{_qymjT)EL?olei+qPL{$ zmv+x0Nza{SIv`%BO` ziI?m2i=I0$v-{)Pb3TiE9|_V^SFN$0%Gz@$-F^E)IuB3SoL}>4-bj)?9dqABJEzs| z>6qO&B>T(!Cg%`qXkD^4T`vmher2@GxV`*jmz}FKw^xYHW8)RZdZB9fGBgl@`#}Hl z^nRDtuzv#Zt6gYw6Y3-q3_i6VPiP}8q zE~NG{IoC{R&zt?}`9go zQ9DT$#`a0o_6fRsdOliLdQHwzo!WgE?f!#Ff6H{pZj=3YLfaRwPj9c} z8DziBo)c)#KeT&t+I*{h??>w|-8{)O`>5N0IO$%X+vL2~uif*a@*MeI25jk&oP>pJhUH@tBmb^LR+uN>Gr;W?7jP1<9IT~i|OuJ zG|~NsWQo0T>!W?JSJQI~{Rz;#r?i^vYv}n}v?F~Tp?!}mME4M5zoT{F4aRz*kUlHE zbiGh$-KCm*&qCGuTl?OQcJ4{va}8ZNjrB81<2CcFD1HAp+y7_8e=W@eJ1D(-D1V~7 ziONBAZYKRAhKNyOkkVt6UX0R-6PElkJ z&oPaZUW~%Kn(UR&xi@q9)Ge9Irx4}uW>>!CMq0=9Q@T+qCsF(Vh1AE>^;~YS7ccQx zdc4tkd-ZVd&8{Av27BpxvrIQ$pSk}+6kfLIzvesa>4EfA=5z{fx0gy~h_Gz=t`C(7yejT-`ukxPEj{f?9&NJMKFKV{LBQrb?9$cCe~Z0#Rjt+&7UwHR z*45eWo4!Wp{|6|)=c#>(Zt_=N5Oi)G-+5(noblvw#*5!Y@n(^~c78X5&cTWLb205a zZ)7{^<0bSwhSEtE(>eY*^nEm9j2I^-h>9+KJx6xZa}$ZmMfBfG=X#W{ZyWW~CAxFq zAU$86Wn50}-0@h;8KL%!&dA)J`t#iQg;dVFNq_0HljypK>N&@e_VwZu%_K=NP5yTtc6hQoj;I%cvc-^r^j@weno8m7C%b{p9Z?j`{x-weX=_!*+~P zywLrWk4*I<`sMMRw@VLwxaq}qQTvrsI}@Y+^zq}WqIP&Nb9nD{)KBY(rBptm>%kAV z{DJ%Jl_y?FjEVOKNc34_KIQGD&l)FC-hTS5@g&Myrq3EbPI)Wz zS>wr+cZfb~JcaUB>9fX9P~O2`iO9a)C>Q?&b#k=<0oAmU&^!>*tzH#<* zBRlib^r5j?F!)Tmp3F$Sz4e+B@k-~^E4)8=6KRI<3vOsE7jOI?`vOz*e8#@nSo^z4 zc+;0<>r*f*r;YMtdOM5*zVM`-7LSrqUZIujlX3 zs&A-kUbl$Djtp0=v4s-bdOP4yO$wg;(r8`o4W9vZf+EU#{?6!8s~ zs+s!lN`sQx)%TPQ8|QdET0b*crVr0P8yH-x^68Q=*@ld3e5UDp#xZ2CJxi{s*-%rv zxyDX@3vA3ghW%}GAX$yatB%1=rnQ^SP}|d;^eh`XV|`84(tdqx8b+Jz z0aG>4RQ{6jj>mB<1TEL`OFxYHI(tKHuf7>~F*ef6I+=|1Pn#D_7yQ=yytMXTHLm%~ z(#%-IFB#YTXLbl}T;7$o7N7R_+lKmQxly*+pJi9s+@I@J^jJ53_WIj$8DLdEPnPE( z3+=8~vfHNE8}|ka$4vUQ(6&82YEH9Nzw$LzWk;|2ZTFYuv6pTA%J!hbUimC{)>fI= z+YcJ8JZ+WF?y|#PISe-7R{F1>7H#EYpokguaO`nrruJEByYsR=U&>^|vbE30wvD$= zKieasW61AleK1+)*gD57NnbJ=6m-tq5*zFDwDm{EdXMv5%wS<^Z=5f(c`k2on0t)# z|1lqQXPckZ5`6snHnx4TcrasS?~k?Ene2zbajMPry?w)DIUY>cnAx^mbjmuHCymDRHS|~w7S?!wZ zFOYcA)>mP>V%6wJo>Uv9yRc#7-17QLY1Q(@(vqsRb+z@i>l!kgJEWh7nb&KH_(KF=KErf;DIB$gT}-=?R^i znwM|o(Iv5}q4D@QIt590-e|j>_=Hqjv;G4G&)Coz_4ioX8$st)*OqUntSG?_>6#KK zSAC$oT9Q)gV^t>W_TgpAY7C|R=^NFo97g%4sY5FJ?DXjv+w-UvwC+*s_Tw5_nrf?Y z>W4{sGye|yW@yQbhwBkzmRakJ%VU)Hg|_Dz`}LVQwmx&DG4oWG_T;0Rb4>JsTAF5& z%vxZ$j;O6^xTd~xY31f?cq$t*SuUgaN3AZ_@E-r*cAItk*dB?Q&6=sof9U!#j{ecl zFE;ZUtA~FV{Tuhn#E$tn#`(ptKQLs+njtG=ZMg0@qEjDMc}?fTIr>$GO>=3r-Zkr} zf3!{hdy6fUluosXXg^LVLTg`z73z#nteIIBp*9pV#pBB{1UuLKU;n| z!=#*+m*-95S$-M8tejR~&M?W(@=FY6<+OWA-z;Cdm+ejBXS(EXSY8{K>rMM3)1`Wo z@?~}Zf3}S;g&g&h;rZ8*;}=h5xxApJp>B((yr!c1s3n+1JB>Hqf||yScG@MwwX96_ z!~C|SwxSW*4TCB=#amaqmi)$+@fi0nX12U&?0RT?`*P|pD!;A;Eb2e=qp80x(bT+S zC#Ui5KbKTC1ZpdmLt&8ts;*py zSsatR=^rxWm&(K{qIvu*)+b|h+F*T>^}02S^{8R~*&WbiXvd=z#5A_4-Os2@R{C=6 z_0O^!EXs4leq8iX*bn_U{`=b#qw}Ytzr*>HLH~_oKW;Q78n0uqSgI+n&ZL$f{eH}7 z&-)MEkB$F4V~$*2*TBoXBGA}Sk-9sSw(YWcUhl1}JCf4Ea^B8c=9RAvRGQcq!+bOA z<9MDQWqDTgkJ5nbpC5f#=QCElpY^_Su9__T{oro$SHYi=e*%nsMwB55E-YZIfu{pbJ&iF1vKwrH3qg*8XM9%3 zZm@;qD0s%_$RAYU4)DA{=M*7xDkO6zN#?9Nov{(#R^S_OC-=f1y!7*oxgk5hz~4Q_ zm=`kV$6P2UWEUv8gufS*N#?xb494P+CD4C{@aHT#6MfbPf6#v>`GZAMg})b+r&2oL z71Jmk&_9jBgEP(|f6#vx`GZAYWX#9Y2mN0pf3WCm;qL|IvnhRWej)T2{(ev?6#ksP za~K=q{-AOW`GeP-%h;5D_=D|me#kL!@I16Xm%+q&6n;8mufq-V@Lyu=0k{O@B>2b- zv^iuIeDZw8-i9o@p-XO|4{Ro5)iZ@GgA*?h@+5HfMbM=I)DfI^DPzTuIjb(mxPUB! z-?&_aSHUD)2>zTq=b+yqhrk!-h%j;R^tnPV0&C~e*Z~ia%*p17aghMun@8oG&)65{ z3%L-KNam~|Sq6h7bM7H|5X>uK>=l&932uXXla~#2EW$XA=JIz+vj^ZVgRFw*Uxjvo z>;|79IR-9UEMzbEYmyUS?Gho&;NM7QOBuUwsgOh97nTXx1rCrL1!sGOEP*eR90$v; z7P24wFOrksZOetMfTykyvJ-riWEDKWl*$i2LvkG4S%yC2b=nuK01Z8@O^kV@;4l z;5h-2rW@P~7li*X`0*-{=E-0fE(HH5SiOPz7kqt#C_smPPs!_@XS* zOoF|2B5oA?e7%S(fv+}-@CoqxO(J{;) zAEI^xKi^F42EO_*wHtW-x2WB~#g9Qg0rp2l zIzyo2G0dHa>jYnY9C`>j0d9Jd$`78jhsqDW`CTeMc-Cp**|66Mre>e6Wt>2Jm%~6X2p> z33)NNhvY%<%-4lH4ZN4+5cn684}(>|M%f@&gFh$vmtaXk$cw=rz#W1-1YYzT;a?24 zlN2G97Nkf(uNB=>?d-w<*U z_!P^FJ$OFW5$M6wLdakey%!$uf9= z8hA~^}pKb_JC+enUrd7r2B!3vUP@BqniaM~9reQ+zuA@C5%oHec- z_BM|T=AV(n3isrusFKWCe5Q~&<0NwirV5#pO%pO_Gs&FpvxLk!L^9{{FN%Ci!8gAs z{5iLtEo9Cqg+k_hf@IFc=Lnhe5Xqc3pDSd}N#_ZfGYZG|Xq>;Aj`;9TfH!|h_@_8S z$ed4*%;`N}$eeGI%o%hGnRD7qA#=t^=3IS&kU2+5=KRKmLgsWA37PXC$((_Ugv^tN0coH&YUa!i@=HV za@a3kN@xAsixN%cXYJV+*`{AY`Obk3_ zvyh$OS4sARcfw7F90v=VsNcbLaC}`I0JoF81MDL?22S0QBi3(Vc#H5K2CutOq~izk zZpvXrNT0I_Za!qrkKHWdI>Grji|{hoe6vVL1(R^q2+wZGVW-{(9p&W*e|J0D1acCb zx)oz-5M>5$-AZx6z1ygq;DkGc>;fMkSp{FYQ{9fC%7Oe`o#l29i()?L{PLp z=Q-O^x2JOX`{>xeZ5LrygI~M{HUj>I;Qo6=UtG2$ht=*7VM5@)?-2Q(yc2D|Q}hLA z)lLzn8tfvu7yRB%k#7u??iF!4UH4(EA}`JeTq)!zczY;^?SLEtx80BN$z|~DucHpU z%-~&Lr}BV%;fnSmeX!>nIn2jpuw_>cQy@pdoQJ4=z;cph@HLVX;H+jLOW-b&Rq$Ps z**A08%5MtU4?ab544nKh^%1y*WCeVcF=iW!KX-$fs-Gh z^uaA8E8wdnC%{?Xru4yGB&*=NB(q16{-cyW_!P-8aI#A2gIh>ez*k95fU{aCeQ+1a zD)=tRES$qGfJ>mwC9sZU1$><382A>+tQFx~DLhz5vI0I%atwTnWY&i8Z4@4?BUu3- zCpiYbMKWtg_;v~p){(4$eQ@k)=mz+i4xy7Sa2A{k{u2077wi(`ICxKA4)gDW4FR8k z<9mP3Q8>Q$=e)EZbM1bN7w}fNef#;oDTkdE75eW7--YA*clH>@793ymH+={5$P=&$ z2%isL{{)RAuwW15?;+p4upv*2ayr3X`;iX(o5AVN2wQX+=!2_(zaKm;hWQw>3!L!% z95x8q3EmEO2yzI_e-`8Y8NP4IVH=(m43X+Ol;1!3IadbolZ#v>R%h`b_9I6|M^+47U=i!RO~V=-k!8&Y9<6N%&6(?}EGR zd91a-H{oQ+ob~e^Dcx4UN9K#TDmZb0D4Pq6EJS}Gdk&t zvpGKKI%F@HfUDsC;O?(DSTkhKE3Os&B7yI%cCa}74}))iRn(nx%5_NRhq)=vC)o>D zTqp8Xz(+||!OP1~e}s|1C*b&6D+V4a6Ja=8{P0JZDEO{lw1czU!QLnr@(B3Q8pJ`E zX7HT~=*f#1@8HXo4(5g&2hUsQU;)T(@E($PfKS71g}e_ueZ5H21zxaTl%WWm89-Yg zOc8ibK!n)=o>ql&!`}tg!My^x0qnURdJef4oU#$&|6gv3+ct_YL9lJ3NS{-!#rQ`U z&P!#K4RQi3xK&{X{xZ0`p2i(`m}J)AV84Ld z$I}PD(TH+Fj)E`26&yr(@V!QnX5JvPG0_HTZjyM?mF9 zjOBy5DYo4x{5gL_avVJACbHGw)SE<_ZcrhabJxunBR_&(fm3dAu+@+`m)r`w0oe=g zyj6_H5P0BLN*@g0hOvY&ZQ!RB2OEK04Eo?wdoD2N4vZi87l3QO2HWux=2OtQ73~F? z^TMs7JU+0V0H5oQql)SYNUq%#Tp`ki8YMZx#sisA3N3$_T( z1z7?Y!WBa1TnAT;bn3tdNZtd!0yor)@dXwHh5s~gKFMBiBgv0|F_MQtwq3+M87v~1 zb2Z5d7$KSS`RyVu=Wj_~bT{nz-9mmC{5@O~_I;<`gSy@${4WE)LUJXzjpX~m$4KUU ziDb?{k<6LDL&(#?5|TNqNIre1gI%&y$Sc8mlJ5hbB$@N4B>x6{pJdL@-7Dl(U?W@s zZ0Sz0i~N5C{*mN&z)#*sd4csLbKVcP7<$gRpZqzegwXEr=k$bxKc^qA0Q$zcnfy6J zBy+Zr%=sGJFv7F@F-P1_^AxxVF7m_NW4q{Z{Kv*mPU?Lo1&k?re|{ImZQws4k1-D< zzw-pP+n|ef=N94jO16TP;r9Zz98w8e#+Kr934Skx%zyUh{nz_Ez0*@~Q}H!woK5NQ zcf>e!X&-j*7%Nxkyj%E{9JQq#DK_%btm}w7loT)u-ZC zA?ydCjMMCgTZxoiY*nfb%ULPpwW&NRAyvb7O)9@Sl#sFO*;z;BwGyrlseO>gOw@1& zy8v~a!7fBiMa^fxxmY3PF_$%_;#a_bCc7Aao0)PvRBlv9*clyWBJH6N|Jm~my9f%uCNc6}-)Z$BBe;4QPB1&}IZT_~6Rww{5%%t1=k z;0LMHr#|sk=cU|)-@ImAFM0WwQOGJPzt(m&w(C*KK8tj`sS;M8?bagC%(^0awF;jb zQ+c{lTH1)%ydM3jw*ADIH1)k{-j}JXA&9>+r59thp0R1E5mTCKy&9xhou;u2<2IU? z5>y8t?~JwOw>jGi+FWh!w&FIajYVPCc^?i02L=aX149Gxf#HF~z{o&yfWWG?DM^)C6 z-{Nd3XmPa^Qo5cNZ;P+R-x6q%Tbf#wmS9V$rMX3IiL^vp22;7lkK{Yr!ovAsXSg8j z3Kxdm;o`6q_JqA*U)UcGgynEkSP2Kip>T6p4M)P!@L)I=9ty|9!{J1DB%BP7hFNQV ztFyJB)zwTWG=m0CTm-d10$zctV*w>GsZt-;n%YjdmG8flHT4z|Wxhg##U!>x(d z5wzN9>j$k~NbT-H3;NK80kmQh+A)ZhY;IHAB52RSwpiOxTfA+!Ezvg8mTVhsW9|9v z&h~FxA&`a1)ia%WSg(i!XwbvAdZ zosrIH=U`{7bEq@kIoz4(9O+DUj&`z0e#99mh`1t!5qG3GB1JqAZ^RezM*529X_QZOIdg499J&B%?o@CEx z59`hEb@mqYx_S$H-Mz)VQm?1i+w1G~_Xc|9-lkrqH`p8MZSGZjBfZhy!QNQ!P;b0< zxHr)|(wpoZ?PY!Wea^muK3891pS!QPPwMmZdHZ~Q{=Pt;+}G5n^acAuea(GpU!*VE zH`o{J8|sVq4fiGbM*5O{qkXJDzu(zk(C_Lm?05GU_e=eres908-`^kTm;0OgmHuFV zsK2>i?T_?F`v?1D{X_ln{^9;a|44tbf3%+sqz!kW0$Le;Gvw*<#6!EsCQFJ%e5$L+vz zJ8;|%{L9+`|8YBT+zuSK1OM`NKt653p2?QO?IWAK`p_^U=-6b{r$R z0Ap9d7#$54VkF8Kck$MIjHxD!osqUej0qK*?}BzWLz9yoPH3zQz08N+g`iUf&>jW) z;Y8o7=wmnfFVW*hFT{Hb(WWZe&V@FK^}A5J5NgBJywt-}C(7Q8vbte)RFuVqk_2HX zBv^nrM!6RwIN9dHn2e$X#TZ-h4j1ws#3*n>(^Y7*1dUB}xuAavw2VQY{Lq~^@(&^Z z0`#kb9!&O0=#gZY0@jy5L1`y3l@p^x@d2&&PkPeu7i+%i&<|JEk_CIN{toJ$L@!DLEf=sCP_V z2d_1boSd1bx=%TAg8xoWPGxSMBj*I>UVY+8=bf07mvhIL9XWaGQuku_XO8kY)jc6M z2kIVnD9k-m7dOxKhp#_YJ9qsF&-Gui_SL<C-BnL0ov(lY>D`aL_@}#Lv(-CJ zx&8llcimAC5FBP1MPA{Hp&)Mgle z7gsN5thX1E5uhhNq<~WxIy!iIdpRJv0Z!r*YB=leS33}@-hMvb-#!tS%7 zz4Z=`2qRZ#FK|?ZzN#_;1t5_C_kKF?7llNlkdjC#X-lw(M4|xHP7%QVE%NQu!~UQi zz(e@!*XtZkSHq!W@V4!|$CDTmWNV=_M z;Zf0T6+2q@fQy0=>GAd>gj5Uo8(uY?XjP4ij~^C(_h6AX2R#D zQNUmT7qJ07={`~xCOVqf{E*K_o5nmmGpWT!y=p6{0DWRL6R8%Uc~u=yMG6Cg#Lc0B zBYxhT-7k^)BlH6t{jlB$Lsxrm@|#5bM2VtVj(a&w z>XY^2Z|s+r=Bx26c@hA<1Q-(=@Q~^N+JF{8lb{}_>Vn1k$Vo^91qF$Fet)gFy|<@? zkGre4kH3VypN9k}52(eU4uk4T)LvWSMGTM@17yU(dI0l%&JYNx319>md@lpgIQ2h$ z0XQxHKY0Zfpd|_phd{oPLIILr88ZwIg^)-=;|6>g=gP6=B99qa%U*V@p)Y5Y-Wy7y zVi9+(&AZ>RRfw|G81~dXO*lhc*>J!bNsI2mVE9+gJhxMMY29Pd2=9M3I5MvB+G1Ef ziLdSCkwavjjWgFy3!0o@s!$ntIf!cOPlY~-MLo_A=gyQ}<7JrSR=oQJHm;)98}7|v zGMt)~HsP!9;z?h_Em(#xH<`>hkQ!Ge$awk_rfnl18<26JLU%F3<7%2w5N7bRZ^Nv` z=xiO+y6R?0_?e%yeN^-FIp-#27v}9hhB8L*>%@*%q4&?PS_V6LfEt;aO;Dphh-nO+*Euh^*%aSIB2tNqM;9>N01lBxQ?H!>Ns&g(BQVK%*4VV z;dVjz{Jf6^?}?JYN6U@Xv^KCRagCjJ zv#KpnK0)z`%KC+r0Zyk@hupI<0{FCk)WXF|&uov+Xhh)3WABOvr7x`%^LgXSzHT3* za)s^zv+rU@uH?>qP$;9m+jzR#v_f=^N#6lStzkbgCuiRy%dVOK!6IoPB<^&H!{f-x z%c%QqsPlIUJK@LF4Fn|Ot<2nGgv0F%Oqa1oJ%^`?wHX>$CNy2RP1auNRie1RjIw$9 z^Xtrot_V&_Ccouo$TlY6%D3ifNyk4yMlTq_Cy zzPVQ6pX&|1y}?GHe!Duk+GDVe2;~5*i?^REHe?qBz^gPMg96a9fGiONj%*i^0I~Rw zgA^Y6TR4Cc;Q$K40cdlj^Pl|~ImK@cc-_D=>PXcs)|&HPR^bqFpSIM$ahF_%om8tX zs)d$&NZP%fd4OhK=3y3j#gDRm5Ei6LZz5fYLwsb~>4P5S`C7TtE;c>GIYw2~qAlgG zt{*N7r7G`Fw|H`zoow1E@IA`#5VOQoDW(4NYPCDo1Fhn)fKr#G4$mdIQw95$HR?uW z9Bz6!po6aw>>0#*l`gK0zonq-I~7u-B|JsrS&69=A~;tvuU=F#+pdUTp=#)*3U#gRuGTP%DW({&62`1_ONS$l`!}I@+rcK*>IRM%CfZZ71q{>tTKC!Rs)eghG`IMLj`&R-9B$OeYWw(*(`+y% zX;w!IC@wueJZp8id~>M6X{w~0Y-AF9Ta4Rkb+i7vIqZu7mN<$(sEOi7?A{! z*paA`C<8Rao(!bKVd4OWUEfazz`&nhz?`3$92A+XdXI%ncXX!s5we>eV1?f`PiZ49ZuKJ*sk9aP6(A9KtoT$|BLl%e(jvwnd4I z{2D|=NCB@1*Kwk0s@7QsG$ z3p5%mmNJT{w2w1pMe}r#2)9+{6e+3)aiiON3JWiXhVp$reu<}aLz6#xC4a5a{f>6V z*iQk$?6Wh{Ss`rNUtd=9y9B%||GfFbPc+dpu1Cu^fjQEX=ZA*_Jnd8DyYozS^%|qi z_zHrVP`sfZm8mNgH%dCMS~nLYK<2bQhA&cXhj`;-2F?)<>m%YPq)X0{&2cb z{ijGgt22OSwFl`RfV=ch1J@riz&*s0fGZ<9pdCe~f+1=5l2lNVpG!1IdSGuYDGxfBxPw!?d+AFM9>*}k6tK3Ef%FVMVSAj`0 zwA>L}*Vk$#iOR*IXN)LPVeJ$a2^6`{I-^%?n8uIQER`&=qoy;_nM{5BP8Teo!!H9# z@ON1+p{V4P^Fs8z&J^I87XUo--0tGSU=Sppc?f*=>KC~Q<|p8zb|?h)+uQ^mVnR;y zeKQ#$qy#)f4P09#u;Lh~%{xQKT2R9vzmuG}$3;#McFxPGImM-W{Y6sjRO_1#|1A5( z+_B(?LO)IWLCH_|UKks+WP^j;9t`!stq1u%fE1UJL|X!+(NM@T0r%o6t{cET`&(@0 z|3E4a0SCV|PEV9YO-qTV(OBz%3@NI8tO;^fd88ra{fFd@gH5q zjIbC_AB2&kU!beKBLPpZ2H;6o06c7HPg(}EQYdLd5IN|&ZIqZ_2j}5`%OdQ|3#1P|g6d;8}?iBy$_UKRZJsAmkCCaz-zH&&-#IvGEBv`CF zLiLlcdV8oj6b0dB5SbOc!R!l%bmw@+=U=~g^Y})G`ue3oc3Hv!*&^>}KGKSpm*412 zu)I&Dn^-sUi-4{iIH6h^AvdXLP8;3oFfOD^dL)3~=8fGE{jZ0eG=Tc?A~;TjySG-I z>hLuUD~1m``*Nf>4PvZ&AIOq(`LZyg!p}P8wsfh4@MzCW50yvUx)ARrV@Sd9xF+;z zkJNCuQyC>Ke3hzwi;EIjbSIuy>qM`3bq?xa!&2B#u1~moTccj5JNFm!bSWQ2VS@0h zeL`;%-G;kIR`k5LW`ns~s?$lXi@{(#;xCTd?q>JSlPZPlOTU4qC=m6RTErRB5Zse~; zrQrU7y9X*OF=7O^b0f&@h}3dVL`qDqp4bzSD*UyG^bf}Vb>0R3E#pNCQ152BD1Q?U zf_lSvlELb_zSE<$UKHo}I)kL8YL^X3t`_Bu4ktEqYhKF-8qEBG;#~?%&R+czLQmQ) z08l`$zw4tf%!zg`826M$sEB5s?})M(|v1UaN_9dQ;2NRmv{YBXo-RMLp-0+Lj;SB@TA;vY@1!12x8@mjGrUn(bZc0 z&s!69PLasUkCS}ePU|b@eYm0@3A9oK8Zb=|J}GRm;{^Iyz9!^c`XX{v*CemyP_D`c zfkuBw)uz8%+>AiIV3EuC}|v{(IZ@Qz2u}}$h52+{p9{fhF$@3 z-07694xjD;cyeFR`B{8VMo{5Iwv!Ri|Jr24w(|q8(!`{+Ay9v)&`5fh%=8iRH>?@)s!JpZf3wO~LwQXye#5@eVqu_MdYM#|d zkQB+I%)x$%>lqdNd^iTMs07p)7EUL`8mrRw>)e&0tX3Jic=M${V0H+9MV+o|WAo@nmXq>ox0c4BShooqatnx_%h5snIORb1zFnpDmmnVbd9#3<4QEw!lnlBVa>*5(qgI@nVCia+AG@@_cui(MsVrFx z6`@AXzt$yBsZF3nzYpfDn9h27Y)V3{u6X&4L&H@_GlJ@3@YBuvO^?;AZT96t$n^Mh zzI&>7;EIg>TPps$uIWEQ#rr;)c3gO0c*@SDTRtt>s{dIwTkHSV>FrYG9mjM;3?=!W za(aI-Hb}t<+Z3EQO_0psQUTnBU$Af-Aisx&BmeMi`r2CGD7dP=k%WVz6DGg|`xiL4 z{x3MV(O>1@6z>uY=*HSdG8J*YF!{VI1-#zXKV9owK|Xj+Hgc zm8tpQhDRm&0^{rV=iH6WC=VOp)tEQ#8J+7~KFxeKuv1A%Ip@u({a;V5+w4bY9yHa) zepF%nvAi*=J!(-p!cviQF)QRmiXln448#5vi4dIQtLruH6OW#KdJ;T+YGy@dcnsJN_IV4?2~^0N$y`|2>oOdZ(+^qSexZCy``yFNz5%#R-R zr5d)zX7W*DURUUIdp9hMk_=BFDXw0WTXob7e!o_7yma-gUnIueJLo#DqnPTZZ2fF| z#{mAe-&3yTyJsNW(Hk6zBF3^+RmnKX+%<-S$L&M!dQ@Z^~amK zuZ8YbULQ=aDpo0c{QQ8j&Pd$4NcI&bC!n_;i_#vp4_nDPE5Y%Ix7?zp?2X#741M4J zf}S>$ao4wI&E0Q-rZ1iEbT;T3t@Wn!xDUuk+<$IPCjOC*ppSn2kxG!WuR0RVOXG6BtA$ z3nfqkl*Dc@iULHz_j^bj;a4KVa5xeS>$=7XTZX%3ooOM=HaCjbRT{$%T0v#31H_Ix z=i_a0ldJ16q+4f=Q~rp~akr~6s31D3b*@*eLh@1Yb!g5mkdPlcDciZLU% zA>p-*KR%_+Hn;(8Vv3r)?42jj7;mP*X}p#EDoT}3P^hf7tspjt7ls!9uxz`A#KY)7 z4oS7G)ffNS9sNTJyEm_dhnxV|_a>I;evx1JP41|3rlPaE*w-Yu{pO=d9e$@{omVTb z^QFcoeXaWHa?Pg}z@wn zU7`on&MwD~HVu*`s>#DeE1bPV4|}11xOvQj_nMIvEd%9aa7@S$$fF)q$R3^32KP*6Bxj!UZExqC^Ic6{D|J~^L@%aMIi*;W zIwqapn(gzKR4??}H}IoT-a@+?Pkg=Vh6>-%Ep368 zMJVu;A}&RJdP7_a`bvvR{^!ixn~j2g|J&#H|9^gk+*$9;nKNh3oH=u5uJ!hh4)4>cVzyMA85x2 zT}(vpAJF*qb zWDUc7JzK%tkC9syOc!=Uxq``!0XJ8{)Qtppn*#qcCF2y#!fW(}8`3irK@nO77J1db z34Bo%5AJha5VF6#jFz58NOeP!L%c1!oNk4&RWAst*$fr1A^osC& z?g`~IBWWYFm5Bw{ctt*a-h88BAz<@ojKCfM+%q{>=9^GaSz!Y|!QED=-uXS{D_U7* zBj}_LEbrDU@?~EQU;lsoJ3{`7i`ii{B+NO!OW~ZBi>@a;;`4%kS2tm#y zG+-su@eRR}tcRKkm;mHbfkQ(J^#29H4~1#4>?DL;Yi@4PssB8{tC8|;l^ZPQ4=*Xa zJhP)qTam@?IIJPlK=N%KEv~<4}&KVnk;{B%e^c zwO%n*6>lF0g_wOU5NH3vDzumzJWBg8tcj)Ru{52QKgjK9^EGXSOcDGPPEc@JkmC+c zFle|eW4y+00k0OlTKjbHCZRXUJ^@;#Gy4wOKQueqhe?(2Ps#5Of@BRQ{{YF6UIzyK zKOr`nV_P`(AeYrZYriUf7vIVW-JEd2EVTUe{GEAM(xcE|aV7Tp-sb}}u&p z>}W_UhD2i>7v03}jxKg;nzjnj@NTvW(|bAbJ-rIjdo=}>8&o8|23fNs;9X;#J|a;6 zE;%2(9V^!=RY-!nNU)BE1QNg`seY+y65NGBC|%XW3DFz?$?l4-!<+~&U`4mKKTu)x zPuXnQ!ibKa*v(Pk-Mml%G?jI{viB{n*a&mhLG~U4u-dJT>Br~5%*WpI6$~izmM)w$ z*7QRUlfr~%5SHVQEwS|{`zWtZWBKAGIscdCLJ7xgPv-j=;oLhv0X2;oKW`{>- zPCaN&J#JMt_%r;Sq2)qx?0q*$y%p#$C^H&<^L6mJT@R0C`TKv{90k95I(Xcshets^ z#Mf8T+z)S~?vrlnKG%-!Z=0Rf)O~0hb)Rrk_gPHmwlp9#+?Gak%v(h2P!JteKA+F( z;-CR%+pWTBbAwksOL+Q8Jt3n{48E_h(f3D#eB}e*=KG^0AH?bWaXNpT-XEv;$L0Iu zBwv{&x*8)aQPniGjfS?-keh~xvr^H%|MF&Lf7jZk;;lX6>s!Frw}`KA5ntc_5qy0s z;p zZ`cSB+tvVOZ?F)`uK&g7gYgo9<7H(;2V_9Zu=hUTr)(A|TN_N-m|@LbGQW?8-+V1R zZr8zMSz0lSIQY#2{=fM)9XtxsAbz$Uv*lp6e9Tr331&U!%fWp4n6Di0UQN*7SWRFR zsY5EPd<1X~pq%ZXI9DOg>-;!J@Bv3S)B#5{w3UXo($F><+D1b=XlMrwxoOBvL#;H_ zN<-~5M4XihWGGCNADtkx3W&Ta8j)A(|BtT`VOaxg&;lMO`XK%^cc z(=bv8F=-H^!x$~Ajg7azjwQmJw3$t5p!KTwfkE}fD-dD?4`;6q$jR~=1FMZTu<k#M8;aR{x75dL zwVo9WVUhCto$8OwK&JM`02GU|0YwYMs3o;QM&fi!R@~5;3gE8!1WyWVutZ4U-3GrM z6jyHewZgBx7#7ykF2u#vg<)IOko0{Ffn3i@#!Lj;1ar zT23cnjOWm81d*8&)QdVH$(8s6Fc&VXksn#a393^_$&6>!!;~?V0 zu}%2i5PVNK&bi|LNnUN>Q~a-zd=?>FYsu2X%kb2!iTH3V=a|aaBk+A9A3K*MYZiml zs}nu}!V$MaF~UhsIK~M}KnQm1)8%*8exnXg;qd)hoRw14X`g`Oe?(TXz`tFFE)Ul*&z&R zidk<Y*S5fWYz*P=ebq(rxd{IU2QCz7z)%>-QilbIQgh$MlRD$e&%? zk*v-WQRT7V6aHr2-HE8{G-gH~mW6y*0IUcF!X`*#VhPk8L)4arIDuBn{x31CoP zk-S%DtkQ0D!i_(ovU~6zeZJ3&2x0!`ji*OiPDHU`x zCp1dl$ylZW=RLB%Vfn_ea-*?R&A>#wAF%JXaLVHr<$IQ;|s@Q?@D6ptN`*JgvXmqx_z`EBZGej8sn`ejbv+5AqO<{yFI9#;FPVv;Jn*# z)M8Hy*wX^^w7dr1jpG$Y4N{EUg~zcAM`34k(>vg~b|mRXLbP^3v%N70-9soe@x_Kr zEkCN*l&RqdlhrY_&AS_V0HzI0LqbKeg%u!_F7peJ|8EA;LY~3yKD2QHj93G^8?SoNf*4PTQX!ynWs==jKZ0~LVrr3~m z(4Gv4@cE#9EnXFNfJBp1Z7qrRAE6J-E|tCs8PmZ)ilH)+q$KEh+G$@y2O-Ryu#a=* z!iXt>0aKpDIjeN+?y2eD(@|5L^&=fMACpnjc~jV^X~W5tUEK`hrl;XDK4`euVYt|P z94;-~agWud;-GOUY|Wo=Nn^;$u&)juGo}DdO${0|atGS4F=mc?IORcUvPD3)2=C*FW{*J{Ki~E+ex}j!!|sOhqrvgByOG^Z#t-nfeeTHOtaH4I-90a& zIO_)(KoP+M=wtvVd4T~G=N~{-Gz?t?h|q_Rpqm0XNDm!Bad!=7>*nChDL3~ZVv$b) z(#ha=W1GA+Qj6J|5s(pEE&5yilz$kl#9_oIC*fGq1qo;z@a*MvIA=P(sq9w4pc{|MA4gZjT0JKi(m0rIrf{|#6-;LFoN^w_spb^%Ivh&5z zqXBt>6EdONEjQ1fF{7OyzvX7@TyRHixtX9KP5dewg`q0Qc85m%m;`cDe2J>>LDmot zK-f{0#%$7v?||3Gr4exiu#EyqQ+WH*9K#uG;)B_wi8^Q(50z&VjHV&t09o09I7roZ zd3BtlFgXR}t+;2h6bwoO=fnz`tcG!QMK@-9SKliHdJyM9 zx2g7^-3z4E#|LWyH<01x7K-van#kng{mGZO!b?C6lM@V!47VDv+taMVd9%>Q2^TDa zC*5brtWxo;)mEhvN1p^~+m#UM?ZpYFjDn9eY9kbDs`c^lV&qAxO|{KXaWFwCK?K!j ztkbeJw?mf9^%}b-GmCXuF_5((8nS6PC6v_BsjTMH44u8S{BP?_yK0Ynt}r<8Z)RlXK(LxU*i3c3Z*G{&MYiSp@am< zldZOy37l|LjQ!5%GdMOSXxQ2}uyZnRVr%|Eda1YBb*n}^@+&YCXD`pg5a&z02}{e| zl*F@U+nq_`loKfB$11&QXr1r{RBoSS&fJ*7kH$KNL!JaI1OXErSP(L@Cam^Rug~W- z_!7VO021*H3RUeHz}(s61aoHF%p@6e*sT7B`d-PZ&bC1ZFP`=_2;CsF>x5=6@QdwG z$W~wCv*=I;mV>-27kO7c(!L?jaHnBu(;x3+4e4Wzn0>s5KCbrn@yO$HA8SJUcq!@Q zSbJ3FdKK%MOG2s*QNoN^sgq}@f_wQ?roWfRd3xyOCBDRcBJ}bM)z!NB1@YS5JnG!F zyZJwtg1dRK7}Cu-t$)0mpS~2_&98fCH}?{+?B+|y{)gS%@Ga@)z2E*$H(xQ9Z$%bl z7P>9M`Sde}%%qLbjtkyMv#lJuZIY+}%5;bdeww38VK3%fi=idAq=+ZFeLjoOk(rmo z$BI@2)|Lm5wi(_=)?pP6aJa5KD*ht{7|Hei*PaS~tzY1^zze1LLZmP8JS;=E(q$<$ zBLkbEN9JU>v*}8H6xN1ZmDC7>P#%ko(EZk9BjSRgsXKbrMw}fJ(uj|{f?k_Gjw757BHau2gzgakC7LJH-?8k-v z{mHFl!Qa6NZzZ>2ZXl1|76hS{6Fw9ZFC(elpNvAMaGOTR6#x7oLV8QARXA!9YLa)5 z^+7}Z{mET~)IX}TB1mD^(1WH48Z6B_+)LSvdEzwYb6QSf8X~Ya6i9GyF0Ddmm}6V|^OK*1iBTQofsB!bz(8`%*nu76&9f?1<_F=HXW9$u@A*6>`6hzuZ^Ka*Q~;#l4;Sf z;Jisp%EVSyWr%DhD%4Jal+QWxGZ^tPXiJUT-rp=76W5PKKU;gN5;^9OuRw~h0PzDd zX2Q5NxjE;?J#rHdM958iHzKTwuRw$;{d;3(SuA_+zX0b#E$JoKqCJ4x6wYOBvj|6# z2k1<~0kO6T78+Vp+fVFfQwy#&VCAsMon`7^9aRd1+S+CmidrqeHLEzm)Pf?SEk|nr zs&&Oyn+1c$;!@QcSgndN+boRMY2ASZu<@pChO7hZgCj_P?`FeVpr4k$?F zYk~3#Yu%go8GQRqSwHg2IAPIN&q`R+)Ym>v=oAJFp-xyjEvl|FCg;`zd4LPkhUswx zhoi_Qo9-@XGtT*c>OQP5&^ByTL#}gV^&cId(RV>F|w#yh#Up= z4x2t4x|O{L%|M_-b$oA8fz7hb)j@OY!-ZMN4PwN01zYn28M~*DkzV|O($N_teklV# zRD9${vVzcw!i^L$C7hE3$@;B!AT&~Vy!bpB)~%dv7L?!WRhos5pt0ilVYEaiBEe;? zgK^0TA0rQ14E(9yMUm4YA?GAA5XV6u@(vDph93GP11$#N4QzS}M(V_W0_Ax#9h+mB zHR;vwVHu45Mv6bgG~~&!3eAW(v+!>t6qbUBC#d7pFua@3;`@ueZou_h z)n&Fe4Op14iF5oI;Tr496n%qXm108mj-%5z2ifl zj~{Ec-KxQ%#O{iAoMmUT@7E}_2?{2={(glHTrRa$$o}=D=#5BsXLYfz4}w_b0Y9sJ zH=I?z5@40sCE~)#epXq7{LMI7VwG8wLs(@=iul(9e}qd;!BQ@#2e{;gbcsuP(nGoA zT|)v~a?wyqi#oz3FMcL*$tnwSNtd<5EPO2fhYhsQ<>!=+a86l)NgF#D_TH(OR7*Bc zU5h#-Ho2WpA)l->3eDm(9}qUV9ob}E5Sw(fj^8APCYZKboExig{sMM5E2)^He6TL7 znSC%F%LWz*9q`~7LMk)0P*D7vewPk;Is@?XRul; z2gS8qfI#jA@QG}^wM|s*zY1gY?h7zRqcBYi9Cp)G@uPhR?F?86w{tLF%Q>6IqC5*A zw7@76r)UUQOrspp;5!B{Z9Z%i033u;{96JLAMh%K&zM!1ypq4olTR3A7ag{^(@_O7 z3NlCNzaaj&P2!J;Wr;u5QU2&MwOF9QG{PGR#>H`1kT5hlG4OUjx2jcIj0HL2D*2FHQz- zR+43Pv-k9YaS6*LJ)aMRhursty@XchiOCZseme*}7H0{tS~IZZBS>*;CRM-JMHt=< z6d{Dv3X$RMrws2D@<~7-MMpW%r+6|gh~M=(L-}1f^1Itw2*0cAncqG29pQI%;+JuR z->r`({H{)1HXivMl|OYTe{#+oZLkdLDro@5ha%{fOc-u3+n+}XO*qf57ENg=g%y=? z!p7wQm6qY{=?8s@&zwf0YZFH!u$j49KF_e_CZgE76+EcvG778K@>U>-&4gSQ%>^cp z^ps&nsRSBtq+%xW`Pkh^j{GnbHIs;W!#;#2SlYzmBnjyakdR))`GmdqMw~E7Ij%)7 z2E-kZS*%}(5AP)d>I)ppyMxB^#mdJ(-1)*NZ2Z9Dw9Wz$cl)KC=Br6NrzB~o*^sr5 zeeiiQ4V#*&wA0Q$_-7(r-=g7!rN_7iua5H_=3IlHg5ML4^(uxRZ?UaViQ_;}H7ob? zgG{yG!yG@P0q6KRDwyMuX;%Yw?N)4Pr&4b>N~p64rp0E8LEyZ(3_NlaWQ@UpYR1Ov z>F{#E&|+})Cn0tB1)mQKGx(aJxwDK}8!xi=urP={kK!bg_(~hmUtJ-?w*W0<&`Gd- zj64aZ$du6yLwgV50q0;|Boe1^o-iVm=&RToJJK*a0jcH%6vK_ed)vtrgS2xy($2C| zly=UImuSZ*k|t&DO2rn{N{W*@^^G9Yi48S;re>umY!d#ibwc`9QQI z3l{k016An#OuyQIT%Tz92`K>E|AgRV7P9?n38)>&_TSMEnsH+oFuF&a$_65Yr8I6d z#5ueQp2bKp33EWue%`UMmEluF@B2tGi&}yC^8;&701cfcG1k^#8k*Z_I-<9m!?8%n z2e24ILDh&Iy`MqLROIK-rt`?5q3M8$vHNjTBPU1ziIN16s9*ua?92h?sZaW2*#`ek zHprsmT-hqjE<_ugTrzO%d|!jvsn-B|&d;)LXYVN?t6ysha>?{2!ay>@xuh@g?T-jE zy<(q=>PXVhSaEfIjuW~wZ&R@~DD!c)yHw(u_o&#bJj&LfuwB~;-D6v;5|_S5)LXq! zy>(dlDdRRRTeBBULn5pB=uQ)(Wouud`Qycjm>0^o&4S_Hfl8~Nq_`H&W zlbNT2%&HzTW1IzY;zo}}2}Cnf3v(DuZrSXv1Sa*6J(BaeYfsp}uRX}C(%s(79T7Dq zhF5n+YWQTL;RCON9{^qzdR2UHLrpJURTIgp-$9*@(BxIqol&ZkS&2iAN2_8~5XkDK zKZrjKTk~IvKaF_fy8-;Q2k-}Ly?3v~-v@}lSt>sQ-;fd5@s5lWjUOj8E)0Ph$lTOJ zW{mrvAaFWGU@w41!;c{tM8AR`L{Nx+70)6L)itwJ|05(`4Tm|zV(GkDXrZVSy`TDg zVq*$b!k}Hx+hc(X?;Q;fN0pwjXV6_FJ}r~AYaN@DSbJ}7%jSGyNga{dpHC8$bbUDj zVk)9LXCy1w+ItlQ#l>t*Em;<*KuE8^ZP|B|>xiXMZ6VE};}>T3w_gvktBymd=dkDl zdUkaAHr$XkL$xu@h&JCJLXt&j5?_UdwkH)8)~tPqQJ9-yaCG}N#2T~as5bWEgt;l+ zk9~=#_dh~ZydR^|s^m?0=OO_jma%8cz~W-KBEhj<#{h%tdYep=WFf2zZVr3Tdt@ym zFO88>q)oNefC^@hqdiZoKTazRg#O5kwO6wQ%Bz?8Uf*0 zeGRFij;ICTUC|BVnx9hgky1*KQp%B1s(?~*fKnt~E^es;GfrAersyr-RgWtK}wcjhqjseg|B6jCm7|WlaJYZW4QqlC}4}jGw7X?iMnG)O13})N7qIn)) zr`k%k_Ay$jj;(nWrGm^Ddmc=%I>az6_jhWW7NplXDc(1HiT52N^x6zXLj~es zpfS%;($glG*Dbi-L#ih6C8iyd$&f0EKa|L@8Dd)K`;-h10~xmY z$q=Q)|NbD6p~Xcxj9IutXBJkZn1xEH?<<@_$v!jPk4<;8i`{jGB%%T_42EAa8*fOn zI1PHE)68XN$MPo5Hb2&^Y_+)5UGL)N(7fN%SdfM+&^VRw)QTf}L1QAEu%>CEJFZ8L zIPQ)`i(j^;fy^m#@lX7*;?0o7;8cL4{X;noNq;Qc;NQsxStQA29!M@`rvY9vCwrBS z;f!RH=eJ4VghtL4r-0r=+Yo0A=UM>t#cA012vIo%XGwJe?xg{WWUO$G0xibJl6V7T zSD(jCBSah)9#-}X$9gLJ#lgfpMaDCfu~90+(V+1R7!ojd47O)*sTY$ixNgWOe1ciU zAE2PlCIuh2H;FK(V`AMN|0LLag|Px@d&n*b@V#zRx(_9NopJcPWV^yL9F>R=KpGa` z8IGh~LbQr{@wMStJcx$Q8p7^4VQmiQd@FemmA*Nui52qngwvz2YZ2BGnKlO}vWGtc zc3FyLILmcn{%~q_Aub*+*-Z%ZQ*eCei~re)wYHN1Wy#!}%MWg*dnq_(#4#j4SI#-< zur%x;_V_rvS&}(Xa|p%KHmVGk__DXE*_f;&Z+mnw^X2Oq+|og~9kkgDKO9%|N5zAm z;_Gw>TU?yZDCm=f9F?H)##vmqKyYS~aH~q_?Mv)`2-zo~4l$l$5pYo`)n|c8CC4I= zh4&Hh)}3Sv+CFK^O@u>T=h&2_vM0j~@7LS)=WrwML#P{;lDy^fXTm4|4jy`A zU4FH3czzB1>fm=j{I+9wG5MpW5zIvIHnE@CJrhNi2HXSibYCcnagwe0YD!e zGJQl2lj*~fte1)76cR^{nAH+Q9Dw+ZfcPc*%DLJzd?Z=Mdwcm3dmWTjq*8uZ2%{b} z&AY)*M$ZkUWF$TT9+)4nK~Am;sj7=oRR^J}p|me6Kf!X0$@*)P)V^jUsX~dQ0u>{1 zab^(SVs;=Yef|5Sbk8Tjr1bM?K<i;@5^FMQz!) zo$7-EB$e*AxQ0Bo%jbjHh%h$8V2jWoPe^!Z&R7p?@EnD)Ze9}35{Ax31wSUhvj<2# zdqCJ~b1+$$b~Z-vy@d$Q(WsIZsyc{M{gDEa$x=1K;+n03P>P|xF*(iZMkreOLq(ZC`@#7=#?|s_z+yzLeA80W^T;nstQc&1jPCw4%&Ho57)D z-bP|D>_HMR2#Tx*`%(k3^1BsY)frOV5?y4cm@^G2_PAyx(lE`LPIHbk2xiFH;^>Z8 zt1~;+r7`va;(LRTAjr9ABCgQ)P~{;_UV-_^Aaq%U2GNEF{B5;vyGE>rmz`BrxbBc` z`J3=Cs}7YqNyLahbT&#e31`i?1pCAybc^=a{9`DajiYmq?(qtG;w;83~xnp8HKq9ujdSxN+z8YT0%Yi}+dz3OeY7y^jLeVE{6LW(K)2rP8euKx_#whO+LySk32iNJAK>4V zn+sl{yh@xkz|V%ntO2y6*TXte&)GnTQZAN6Pmo39cKUn)Nf1Q&5K+)6*v*0{^_)OsIG3EmD4);{ipjy4T+T&&!1gU5ngEIJ133fcA4)&hwQNNkRC+D*FCX?+j`QVCJs6^IGEOKD1u-Zy38&9c#O##@7+08Z7 z9E84dSWH*qDqL$x?Vzu@jC6|!CX=HU7tL%C{!Nxk;={NLiaT#c2h8ydF|RK>;Mcu^ z=W|$iZaQCk(8!uvut)W#+JpQi*USj#EIfJb=u)idZ?+xelyfzH3Ksw7_W8V1a8`qr zv=$hLJr0_FKWVZ`>y0tWA!Vcd?%VjT*sp=o{oa?LSCWv#O8`ekLhxiIj#D7#`TJ#* zfBO5${QCZqEiPT3q1y4?*AYW+Q6XZ>z78ft{phzX^RMNpbPg-{E;Fi8E{K1A5~Vx~ z3aZUiKO(e=pKrq^NMZ5UCxDx@Z5n9=PKG&g^_lP_&eMkjnb;?hzyjfBcnL?$%H#~0 z0TPXrB$NB$N7ALA*asd&imTjCj0ntvDa~Tbk2O2? zsVvGb#U+7gw7@kAlcP|7SVhT1qgd@gYPKT0>y29&#N@-R`?EN*%|i!yNHyqBss@W-;C|d#|D-V zZJXQ!O}@l;J_xqg4e19wrO=%@c2iDzr&#wQF}zT5$^(>*4ifEwD9S|(UktEfSV?iN z#0PpS7%@{zwy}o$_H&K{30c=~*g44^t_LJo-=Xk+Ye5NN$j7gtdi}UXI3(>;_Au$n z88s~j48IWu$943l8f!u()In^yh56o#zd?7b{PmtmJeU-Lb$ zHq~_62O@{bM-Fow`EPSp*2YLO!o3G@Ia6g8R3K^b$r@IxWaAlDI|n!w+RoJ^Qw5m@ zButv9z(nkcayH(q4IvC-$QH1fB$Gd}3x(+)XaB_-YgUeiy$=o0Et&Jc=O9WFklgTe z?L3V+wUNte-FO45Rj~2E^(ZgH?IBB6>zYN#REbf2iWq&F3f?KcCN6W9X6@JBDXccq zAe;_+&zz~(tQj2i;_=_`MGL%mn9IBcUKEbtd!eZ-b5+%;rt zmMKu-hC7G!oSeS%fS#?fC<4YIBNei7$N)u{aR{;JK|>XgF>j0ao4%Y!4rh+1rY~sq zBJTJ*#H{!`vg0mkyKEs*1^W`8-AkwG6qKaOAxr9pLzf2YQtwq6!x2ikFq%>DzEJH28^y1w8KHt`@v@-_zj$t*xqErM1?*k~7z5N`+ zplvh?Q!tzT@FJ1Ys>nDY6PTpZ98C;;bisx`eJTE8#Q|CgfBe71IBih>Z-@TgqhmrY zaS1*jgFVa_lHNVTFOg~*NopDrT$7g86eYgF%006HyFk48S?HW6NhGJ^oO89Jiw064 zkR(>K)JDS_%{gXkeeg)~i7Qw#hl@6rPU{<^#9Z{|r%}BXRnGf0S3&SP)@gm@kT6@L zKp{c_wb!1pZ=n0N0D&|O1Q{qx#ID{_`Z^s^x+ja@1Y&U4gYP5$I}n3PWBX{a zQHmjlpl&!!x?d~43bDAN=5*q-hd?@cx_3z3^F*O{P~F+$hAZm+(-n2+hSePl-Jub) zdrQJjo>siEH^n3blEkqzkP@`2x`k8h9rw^b}UXi z8Juml7A8#vXN%QXln0?b0CxkI1*;0X#tHg-Nqe1d{!y)R%^rTrPqW`EoabK0Xl?~B?D0CpQ5U%zmRZHWdjzEGpFDo>kW zDzI6FCbLazRvz~?5ClPcn%EkmOaVS?$U0Qf&bd}-AzS)Evn|8yng^M?IpM#UKax|n za;X>CT@mcA!IM&(a0|E*5b*dwlUBw zO@hjPa<)yS!AhWHv+GXip--T6=%K*_4NyLwu{J>kUOcGdYmlq>!21*@GaAew|W zgU|-D*)6sWX|aYk71@F@4KRBdwWL6CxQF)&uCQ$Vg=VwzV?*jbL)QK^W;_asW^-nc zhqD^^F&1nu?y9G@(t~s=ts%8VB1gB~3)KTf_CcEAginmZpHxQSTMKLaL@Fp(Dk$|2 z6~xvO?F!ivTA0jj=xecM$0BXdfu$a$?FLHQZlvvewssd8N6n_xeWtAa?0tX3rA!To z6wffYB?;u31O=liGZtyl#O^w1g2m@fh4)5|5^ow1?*SxU1G&W!MD>PjVd2DthI0{} zC#t_xKwnKvP#A<3*4SdKKL~~MT+IpI2m-V*b4KijzMO3VfH+4xTTt~u5IN;>diJXk zk2OVdnK%Y#aKcYyweO7qIMf_#JO^<3n$3=8RTivA*?THsUdO_v6I{XpZ#-A~Js%B& zmsjHmLbXmnvL$*_d7o(9E_rhZm7z`k_^kyi1q~O!J~8wL`d7!=)a& z_{kvmNZ@-dmeli^L+wL3*CWyz;ixb1E26<~K^+OkfsA0AnSh3OsV(5sW(yXLsIXbc!dm;+9Iljch zZ&2eJLXnX`t_d7s1WzJ1@MD1h^e}OPkolI}la0TTDhHjFBRmlls(62vGMc5NU5+>) zWqIbGa*Ty$G5NFnJ%Rw<s(yLbH)_Be!TYJcQSl++tV8?&&e0gfxeQUT9Iw|* zjn&$tf%VlKv|pgN1GXrdLis(v%B2pJ^xhDohmN9)y%#DtNjMNDZ~C1%1{lWj!SR!S4h|V8nbhS*6ged z8KfW`DAGHLMdq|B5ZW$$ zt41IKH>_)YZhgk&Na#IiR}yrgk5jf(XX@;+`nrjn?;wXaGXBW9N)(*Sisw6D^LKAH z$b1%86$lK+*s(_3t!gf2H#5XEq}`Wz>NQ;DbXeIpZ$kdr;q3z>+Ke_#pZgNOd=(e= z7YPllAw|O4ZWZwyK#l{P<_=hgt+r9LQim%cwo|FGtC4M}KVbAvDE5*S2{~EgLi-O~y^-w} ztUH4vS9H3vmA*vHt90+Z9I2KV69H$$^Y@FDWK81@!a=@QrXh{}o4hio=lg@xm3R|_ zf7wpcBQQO#M&7|ge80t8CRKSdV# zdJT4EFJEHrt9bb1(ksM%J!rkyExt}N=4!Mdt z=C2I1cN>f61Jy(+80}Nv{wwD7Mv9~0rAdCg+Zxd390o?8Uy%}GTYxhxf0^256C4Ud zbdcg50$G0i1rTx=GChSEeTj1+pD#}FD_MVju|py6h55^T{}+FmgI*5eHUWK3AIQ~; zxw2Xi795(SSiv98OwP>+woI`vB=Q&ycp7 zKgWXNgt(g^FElNNj0b2*F_r-AU^LNE9|)Qf{PgChPseoMMFby>NK<`@|J;t1e}56R z@FJc?!efP9zQi*x5{-)ks%4E`XeDz6{Q@B^!kvd#@hpw{<%`7lk{<=r+tsSs}g zCTHJZfWY`%@uDd^=)sVW((!n?@DnZL<5^g$gl zM&W=jag0>WTz@qoV>I{=8ddN@s66)e^PyoyNV=ddg+U|VOHKLNw?5jBQ01#HAZ_02 ze}%g*F7qNn(GWxr=?6JyjB^g&H^dJIAQ1G%w+ocNvm8p&ILo!(VSDl3HgU0(BhgUb8|aSe8?|*H?*LR#VHUL5 z4_Cz1uH(3puDdutY=sD09zd{IhHV6~pA^sNY2E3l#gle&Tf`e+tb0dL=-!rc#1r5i z+$#e|_F9}uWQEZ^Lg$}ffH1@9g4`Z;$x)zT4D}u4Hu=-_5jAeUjNEN`NI&%eKMvLn zFfc!d3bU)HX&8I#MfFDVEdd3?-+`?L-(>Sc-5pKgc*nKf1?O!V zWFmn1nXUy|!21_J0V-GDj0yl^RD&|spWlXQz{%9#qN(;oRCg;lcIHoJpboFnBH#sm z+{LzmXF5+-f2AZb7H1^P-F11I9&jwJu?6**4o2SF$}pv$LGk?ti*Ul?9Dv)b4d-s= zo-hJ%0EMw`2_&UA;^dLqXokhko)0CCW4_qVHJnq{H@e#+T(R1SgU0I`4At&5Q=P&B z>$Tbw_EuBP2|f~DG8|Bh#|@zy%))U~Y6JEIFJVWp0D$R7#MlQ2ch}$16a?qyAA{jM zfpAW?IOCysqjMg#@U*dRVe;hvDO3o_9+wP>FK+e)+$2IS)(^nZU%-@x zNX~J#3;3~l8}c-~<8=%qcy>)kUJJt}YVCE7Okh#QEXC%2W&mA4qQ4vNVl`;!`du(+ zoY@+0KgTi+18eyRuSW7IImZYFuP&5%s{&VS zDy~%g!MsrJt6_Nqe6@NVjuPapn{UTO)ux4x4jdOBUjnv~K@GEAacEm5 zuAfG^M}JO0F>Ctz`5R~ z-U`um3pB85ZV2AC3SM($Mp07WZ@_Dfd4C&z`vKZ;|5Vxr;5ID4F3ibWuGtD_ z3=H;@VasPIF-cIr`~#XSHePJC;%T|W!_P9zb_E}q&1zMR-odj4RU_wcM{rJ6a<*V> z)HuWlU>h1ph1FHVPwZw~T_n*tW>M%7<92S#cWApTsBeTi($Az4*8&Y7R$-iQ-f4Oq z-s8l3Y`Ekz3y}zZU&F;&8vMQtAJuHCB0I~<;Y+Kw`0)Emc6Ruk8gBT|12Z)dp5DC~ zW{dAKMvof41Wo7EmVe>BL(SBjj&7#sh^fGeJZIxD^KY0rmUH$p2(vVvLvgreaB(lW zh2uT^c9CJrHw-K&*_sU+6zGK}qwuX^%hya2CjE*F81|6{L+x*n8&L1Gma~tzr39y` z#K;=Y8AtwTbgp`YA1)l*a+VncYtt=EhtKzt4}h@8Zs}(F;yJ1QSkm5adsp5i#HD=C zFb$rFn7Co!Iqtc5yt6?;w7I-NY)L@gSeRT+)THI$t73NQ@LctzIK*WWQR9*{(fA9s zJjoFd7pN!lvf_PEKy{Tyu}Q=3%GDsY(+`?zzh`TA(oM@MdT>8cwI0ARsw;#^EHNP zHYVpr_$?{{*fdf!$w$gh6aRKehBtm7uG(Mp!n5JHc(D~Izffps5LLu`TkT={qARg4 z?vC-p=SO}sun5BroeXh~_+>YRSdZFu09t$jB7~(So_nC{(RJePxYO!(T3f)`;k51n zXPeWyofy258=9QfR;RU%bk`${p91YGgyvC9B*RR{&uH1F?o`kk6UHE0k=mMrmrU(c zKrq*7Y7x6e%V|#24jQ?~8#$d(Lji;30!{{H+D0>Nm(zC3X}CBsCYO_BV{(-|Dr)h* zPCr~YdMgF>E$8s|Dz~FOAKR1zzg+n|!?b*UEVQp0g^wJkwGPkG(>{4`0IewvplHP% zZSQDuERLAN;*mKC>(*-VVaQs&u}-lmlHFyhbDG+OxHBFc%1BspkFqsyGC06gK?7@@ z*guK(A?+S0P*iszL1|Dgi3X`oVrUbIyHReo<{qN^ILMb@RNHZ55=h)$2m{f%v>G9C zF0B)vjiRgvNsd7G)_3%g6N+42>Bs3WEg z2dTG)0P1Ze;~1G0J>2vQ*63WiHFIedTN9%|^%dTeBWld_AQkOvxzj? zlS4wm>H}b1RBsb$quAP)k@20$T>1!G^S&a$^=fA-g8H`?#-JGlbY})JKysBi>#ELNzPm%+5duu@QAOrS=bA-v!VM;TRqW$dpaKS zuu;*pQSZLgL!&Io`BJOq{S*Wfw#xlq1Zq(`X%PS~p80POoB9WU?41x){HmUdml;(j zyo~%2NM7Af&Pq6sloeZ#>_^JqFSxQysLV0_^KW3yX73}YPkcje(J;c4t3L1z-FC|t z^P}kel0(8d;LBxprXb;*=oL;lU;m8#G1lKa(f4yub%&sCnL!N>VFVykEs`?50GR~! z6uJ5au`fAKf3dZuiBAyM8tE1S$$B?r^^Ov45JyMKfQ($trUX23M+VDTGFV#hGyrYN zkV!#rPa_o_{g1yQja0`^w+H3wN9*|F_CH$3ZzFy`r=VU8)i#I`SR-2#50i}*ufx+^ zl1^j+^EW#q!N(b#vp~c&5p!w107Q6Kge-^?J+!S--UmmQ0-}&SZUv_k60q`Uf4P@{~x4CO~O0>r1ol#GgXu`~$k+ zIUmN`e=f*VFx6AM1YwV$qFmuutg#^okT(hdkX0%aqJl)I512k-f)t2Q(BOCsHbDc0 zCXjwFwhcP5(i79tGHUplNn3 z06(%}I#Y%65mDgAu5+WA(j`5t14!FjKuEyupYuHJe*B)NcVYU$EtkmQnnxPa$$Ej%t=o?+ zV3F#v&r@MI!#gVdVC@N#70nHUvdzW)Ly~O+-3h)*)AoKB-n5h-durOnGlSftaGa@$ z%-+o3aZ&x3c1gVP;}LD5uTh!xipt*kp{L5A$F!Am<>_q2wF~X?h8J4gW1)nh!g3C| zsdJ_VFHp%>(9OFo`*e!X+r^F?IZ|_ipA%S5QyqaUH|+TWR~Sg6ah}9 zS0gJk!}S2DQ(N!_)_S@+gRyH-$pB4s6BeZK7T~DWQ9BoM6aUW9BJY!zAP21e2}vi` zl18d~L1EPydOOvmsxN*{C1Maam3XqiIYZ-(b=Wkr6-orv73h0Df0TpgUQTn)u_(?l z&QguO{zItaxz9-*z=;&-xt*<-)Yv*v8srEiQ$8pY7R8+0di0`9n zda{hKIw(Qxe;vga>REq;K0Tnjzozg)#?0nOx@dry+|y8rnwC=uEAjqd}X&PqCrn ztJiB3o5uhaa6~7bf1YY>qM$im9OCT|0dGR5pgvo=+oegm=mqb1Ar={uE^iShd`B%t z)G41yq>{_lFf#J@5}aCF#i_W}XKhPA0dSCuYT=E}PZ=gd%huqPtV zZG}N-Y8PA1kp&Uwj6tQ{42@?Ctp#c*`V`?Z!RkcmoTKrsz*rOuo$5KH?nPmBH~f{< zh08K82=waz{*8oD3ib|$mnFF1{TM8nNSJEPGZZp}ibC||w|+ewZpP*;4UE5&XRa83 z`oGihw~~&nrL!4k=N;64+$#^iS+j$NUoVK>xs>`}_Y$gIe<9W`a}dz;NH?iZd_n=8 zN&#)6L0+{(;oVER2lDRR2q`apg6D|OQEvK2+7Y(@M)i9@&GHpR>cyYnGT%p3ljn^Y z8Eg%Hky`8+O|JHaMMoWeF`BJ;5}v5r)8Izj#%)ID#_jxd%_j1-B*00|qolugpbiE4 z99-)*?Drkw4ceFCYuxcCmjy>|H2!2f2yQxp5B)v2mz2 z!bJ=&K&(5&;UX?!mLjc_!8To#Rxs9WViX{}dgFG*rpGLqH}lIRK=~+=SvcmMx-%uX zc=1hgcYd?K?ET-$%OhI$*>9z#pwNO#MDc}hrQu}VhU3Y!9TK-ePPaH_B8(Yp3*HjQ z?)AhtZ@~{cc<=%xOXgI5IiSY=433u;D#X*f1OY?tohgW)Rs@THG}I3g)sap5mMl+O zC79gef4>13>5&%-nYf%nxy^fBDE7n?T|rqpNO4(Gix7kGD)X(xp^u7q;7Yz)2 z8lN_85}!Ej277)m#d>Ne3+)iUnkEeo=!JCT))_K?ajF0LgrD8Dhq5~-;KtjVTHoQD z1AsQ*J&8D*pS;?8^S(h)2`>~(Z7`5RIaJU+a8+81vpflJ#0Z)V29kR+@tl#7-IZpl z>11oBt8r0Y)CwroD;z~_cqG^2RCj!gi-e;F$K_o1zC<#jena82d(PT5{5Xea2EWwd zQSiC=Sb&dd_?UtZ9X z=&iLFa}4dnjyuj-`BKL@tOg!~@xkIF8Xs!MC54@LoK<>8MFbW2_!AO_N46t|yLwR! zn<$1mq=~3Q#&C!D!*u2`h{;kSI`a4%%r3nTSl#;{Nfhx=U%KK! zg9B@;b5R=}HmprVyDul}T&(fn)MR3JWx$IHc(L{)MD;O)!)Imh^P%8g(?n1`i@gt~ zGKL>ws`2qv4o@=TR~L+5vV`A5S3Zd@XsYK05<^>=ScWlwjvy`m@d+H&wEd*vRwS(s z+UNtsLK<058$s4L?MP-V9z2K-@7kV5CaV7Q-{jraW)rT{7*96dg6-jY?t1(xn_-WJ z0uqgYL@-*b1V(npjDxgyuun`YlK~C)yv)jHz2}^`A3j#}|bFRj7ju?cV zuonxb9bZLsHYoUPqbpkt&=o!|(dydIe7@Q1E#{D{w``G}}+JH&?x zO+yaU^qabzrZ&!z#qbk>3OdNZbrf}>j$`Hpc^Ypg!}<31z?31*J1g_wNaVk>G~S{3 z8o+b&6aInKL9U}bCFNd$?M!}p z?FoL8<5C1a+Hpz64|H7W#rJkx($I|**J8DI-d-Xu&)!QU_8EJzB=0mbKY4H1i%9lP za9rxmCp#`_`6S1sK71^IU^ne0Qgz?Gq+4S5B6sns_iE(HvDcU60`Yd|Mv>KOirKOT zJdS>d9!xi|o8034aiLuG0_$BWI>J4iNhdbTmW}yD~fmNhiD<6d0E_ z1?IbQEuQT&;uZXvG&!7+#4JUz4(_ZqBtwe-?Di*U)gbSun*bxV?U8A)u-RPoL+RAYaIeOlr~ zTX}fovhQp5^uwMy521XhF9&|}Tsf&z58)>pt&OQ2!U2bDzm>oE7?mIxl%=J3k{;-A~)y}>;QbP1JmjLCZOz0U|Cx5%mv zap7l3={gV)@dkOkiMrflC0Dmt+dTNO6|x!lEKtS)WAZ$O=SDf|0UDK~@Qfr1yOT!) zAmMv^~3`?z!syP=!cN6+#4ey43;|96mi)XGISYX2)qANg0ROuhc45KtHc{5 z5K8=gd=(JhI)%6etnM|?b<5vAl-IM5en?2oeq9eVmL}SIGt!PET0j36#-l680Pp9) zTW=#B!P@-&G}d}3^*lM~==p$HVX1G1^o%4vlJlu2gV%9+7<=D0&$4j!&Uoe#^Mv&0B!d915kk0BSeAK9eUhHuisD@))NV)Uq^gk*tm zb+?{RPCw(!(;S$EJad?5DGW|&VSyd=0U#OpCA>Z!vI|F#i;lCgWFSm-bjRB7NIwIJ zwSPdNIB+4XN0y!rZEFg?Glph<0y19{WazsHs+*UjnA8WZ*+=CchxgIvZBicz@@XWg zTm>yxlIH*X;5Exlx}w~Bdo0(OQJ9>=X9p2gwp?)NK4?|&zD)17D~J{gnnw$wD;@Q3 z5Bz>X`%hg_(EE%`_`viTYO}=FAZIfOd79eK?F+1#qxoBQ>X3owl2L_Jm@jUDbuL~y zEpOB0ihn*Ou{o*MH1V}lm|U7Ysct#@+G0Q&dgoFAcR@N1C}(w|Cm7#_vA~YR3UI zSZj}~yq?|L5$9A_9*}HLc8JfzRLt%jpARuWGo~FxZrp%LEY;BIh#weUD}WR!Pw7x|Bh-poTa07D#TwVLYCo> z{>grVj`S1a&wX$m0)DhvvThT1nDBJcc8g%&X07eyd$W7LiK^5X-lZmmcqgrJ+0gJK zYjAw4Ad0^ji!sqwr(rPFFCxs&;Tt@(;dJ3GXWapTmnaR)4eaqw49FW-;KfQ<*# z%lFe2-403jwoTl15~Ynai_^YMDAzdc+o8k>`=vdXhiHjzv4(ENq5&A`#676@zL#qG zB!wR;`j9@{#C%59Hm07WOCT?9SASf4%{SV~8{H?!8?BPPikJu`0Av6o0Eb|jK$M6@ zOhvMLe~5y}x6u?5KV_?qIS)ZdVrx!AXYVDug%{Y`BgE=oy?7t#B7Pt$oO`2Aj%bh_ zBN^{fkbN>Bxi&Eyo_sRizc8%!ynPTX@!X7Ycu|Xluy*lnpij@;m#I1u$J};^@D9de zut=C=dyz8)(a_)`0UNCJ#WCcwSWThVnpdV+1zSMz7`h%wvN+3S{ad~%y%UcL2yu@z zF^sdbleSkBk*PX*_U+lXHoVc(ChIbrV$)&5YABE?!LD$;{1Jy&+?w5YX9 zscv!}!cxqpF|a}hO8yEyEayp%;#C~@m?%wci^w{@Jmo-EK7 zwFcyE7B??|Odh=nB@=rKKOOKXHezv$-1?eMejwBsLta}01$!SR)viGAyO0uvDIPmv z3)WWe^UznXG~!rohKFM~gX>4!LZvp?LFZC% zOM!MrIVU8>G~#eRNYtV4N0@5d_AJ~lx|is`d;9g@i&~M$5jR8wF1CE=*H+W3P9Fp; zdT+Nlt!|^!+G42gT5K;6)bBOmz7%`0)zPl7mvFvD&YAcG#F2D@EpetoQiS|H$D@7=#uhvGPnD z-GHjIh4gC{4i7BR9+N$_)WeStDAaJKHM$JY&55G`51#v&AT^%yUc77^P&AOBs7*~# z)G8?xTPb?(Vx+bZD(n!SI1<DPkNOc&DL;Y`U zK%ohYWeK_e9QnSKy3paChdYsK02dqyIgH2yYJ885_#9v@lw zxC0*}@R8uSq~KZS9BIol#^a_y%>l{FJIb#%o9v)UE=k)u#36wk>j-F%p>JP0NEjD> z2M`8>PyHx(T!v%YyY1@g?kV&!Gob4G;hrm0eI0bl&N&>c(P|gnAIiED)M$yQ(F)!g zAlj4d&{JsfPi_1II|WzhopOjalw5Wd_tX1mUD`%eqlDdj`99f2k2eX828U>%A9pbj z6~$BqJ!i(&;72p7Ghw>wi@MwQ4T>6&RU?T2dyuW|kD7umL&KMKCFW?b|M4SM zRQ-;#cr0ZU10mL%Y}j$9|E2B0Fa7df&`XR(XyB#yzJvsCTJ^>@#pdg*GGeS9q?-4P^n}#=WjWG%l2A7_k{pgK|w9+7-?%iG zTy^aG3Ltogcs~qmgWY(s%Hmyjr9loW+El~U>X_c@EPneKauy?+s}BL z>VX#G&fbG}OmM8}XlEOpJ#oJxhIHorw2|pYMB!GtGrE;ZCorE@>IjyOgy~5`bfnJ$ z;_o#%anm zQPr7wZ0(aO9QzXujuxNehZ`wMp5Kf5sIm4@P$K_2q;jr#3daxAVRCZfDO2H)Yv!+x zE~SB;)!|J5!LM$i3ZsuZE-TpwIbvYi!PeZYB1>SS@C`10EzVV^0IDy5`87M29y2(; zj$mtY2n*5!&^xx^{?Sp&Qb(@G>FP&mM;jWkC=xg;Hs&I7)=05)KjEy+l(T*-7LyMn zKn^HeF#YH>9RsTR>}`~bQg{o#Cr%m2Fu3?#MR0=nK^uUmPvp5S5~IZ-EmFa*MPnc$ zQjBd0Eb^_*-ee2;;v&54T69rHu=s5=ss5s{)Q1p2-33rLw9*UnayIT6&ZDr<%>k71 zYlkZdlK_A&Li)tF$wEa?XMo4io)1;{0;eI^nn%eB!g`Uyzlg$*@V!cm6il%EH=Pnp ztkd*`;eFKT=sORn=#|Za`s+P}^No>JzGx{V3hh4MmH6mx3&lrVvmYN9NqxT_2*Jl4 z08}It5{Vh05SF~vUqow(9~JBaWdI7h%k-lMuj0paEDZ35rAK;-e)IXnh<40;Ov(2l zTB%snN*P9e9@7T+6Vxdou=B~{`fq}}K2lt9AZUtdhMxBh5f^`R^&+jA*YgIn)(A9Q zRJQqBNt@m8fNZWosbErgB{I8xb~m6hja(7ZL9UUoI8%(y5)FP(`@iIjnYF)I*PpgQ zWP#d3t45sHo(jeJ6Z_@LDa!w~U$#SO7PsJ+DC7F>MnMwDWLiF-gex;xe=IshGUSsC z7k1&HmcG)V7V8+kuQ(-Ad^=oCMV(v89-AaK@%%jk5Nn=jcIXOj}ef+jtIP_ z!m`zW*+pAJ%;up>CW-H+2J*D;$Nke|cspGN`8=w#7n-aHrv5}a9)??Rlg0W}spfx^ znvVw8>}@hS%TPP2^lBtugvEJ1@=40|g0(pMP%TdApJSay!9MD~+>)##N%dhjAd~x5 z1haOZ*Php{-YN5dU5%AIzj80AiL0fq?Ow~l4wQJ&gsMzO-LKFeh;V92O4HmH``;EAW1zwy&lHNW$DJ_kPHo^-Fq1bK1UIxRsGW6adc;}559^gc}%!$sJWL!deYdMml)!M`Q~quj~_vvgUPcWd1}cs+CbK_ zWh$ajxfN*t+$2s3gF$^;Fh~ktkch!?uU2P?H z;Jx6}W9{ONZc<3QQCKQ^@P1Qhy4x!J3{7vTfpP~aqz&=ugJ3*NgH)okf#fzEg(r^( zn;f)?-+tgXIdF@gld|nFZf^Kx4a1N#OsF`snuT8E6NShhw&cOQev>^jV`&TfAlcMk z08?k&{5N4bIr}T6>L|{cc0jMu;aswh@>jRGeJ?E?UfgVfV(p1LZ$jWP5uq2S9LHm1 z^8BR00=I@1cvDD$i=_gswLV^Hs2j!!H$1-sg_Y6v-WKN+s?hM@#x|$?2?*i79Yj(= z>;O|JO+(RBYXo_~;#{=D=v=x7^4NA@lTYwdt2g;3+9&Va6pTRe(N?K5Wb|!+KhPU; zDTuK|zpY~Z2ei{6#~zEbztNc&tYh(;6IumZMnJRYnu%s!r`{m6kwZqR!ssl}c=rL( zWwJF7p#hA;qe1fXDD1@s*KG<)elGP{!oSaDV(mbQ0rI>5Z8Y^^Yu5c1M21Bu+yH0> zcCRF8dt*}(6Bl4tWT6C!_}Jf9)Y$I0_y^1Pos?xh9=wR5POrQ*D-RFflK||G9;H)@fIL1 z&wNzdvhUBwVcpx4jKt`?LH|1GmbdM=R#{Gzrq@9}(lPfFHCHc$Ac7x24VmDPL|6qr z)KIzINGneC)~25@qLhsai1UfW+Kb)mi48L0YK6|KkCsI71FxZOlOI~$oyKdiZYp4m z@!*A3$bVfq3hz!p(O)5>Cl>`+s0d4!TIo_Z6)@7B4Xxsew~4_R)W~}8cXZEVAH+=} zwr0_9*cuKzKJ-9WSY*!(*)vu4Oq4w-vS*m=>4zS~^*o=9MwdFffRJb673BH-E@{pt zqtn;zSC-&e@(@iJ*#KV2P)2CqNwe4s_LvzhbTP*_v*Jtb4pH!SR6X zX_b?ji3e73m2%Rb(9_+Et$hJ^ecq3zI@Cq4B5iT(ga5|;kTDLAfggdrIKn)>6I z8;bnqWnlSnw9TKqOf3-Xry-j?8l@kndOx1U_S?jHhFl5^&qPmcSAfbsz4IzmcHdj1 z^bIeQ`5MdbFI$5Xf)Xddpzz!d1;Zd??|+P_59hUVj!SCR;lz;g1f8_h+il1IaHj|6 zFMkE2X$5#+B@sdJWA%w~FC)EyWbv(}22Ul3o${N6;R(M{4A+bz*0I^h{ZGGyCZ)ef z$IIpYu_=29?RSWMw0N#!oNVttXOB!Ztz!STr93~dA$i6G<@rf2=TK-KWl)}Dd+5no z$hGx9fiu#O>$BZ~TrcgBqAL;7us3K1>KHncw8_4B=;@v8-15 zUR)8#C?07ew%w8$iB_~!-=ElT1+|Tp_E`EuP+apS!k>uuHyX&*ikuKhOn4CozD5C0 zavx^*iU3{W%ohoo+MT8@@el~o-tqO>ifPGwW{sPzy`Bi3>gQJDs4#T}3{;zOmwxlV ze7+u(!knWq0t){AA2?c~9o_TU2al0C2)|ea9RGD>xurN_X5gc@qq{d-^Bkl}`y|;4 z3)TZIcdEDlgD9FhjFSDd5mz_j%#R7Fdd`v^*la~$H_fSDgC`jQWs@MvbEl7Hz;odw zH5XFF>%D-@8Fa!4!)8lq2sRHwP)ylDusP%(h)p!e@w`T6m<7}T1KV&5iU#8y-AbIo zXOMj#i&N?O;&PyRr~1#R4i{egOBKs_F#&p%c}_jLsI$QR_zFyRtz zgjJGF88=%~N3@5z_T_Yd{DP+rGoz83>?u(BPsL=LPbF{j5f~)9^Cm#5XBH)|Sta4y zfgOR2Pwt@Y(kkdyZhx|s8(&=&al1VR*1}4t_@3vfN#smOOSUh8w1xItrL^K8pm1Nj zAFT}WL+@V&IG2+sM6YK(L{(?pX2%;WX}67n0>tN0Tm87aCHL_WpAXMm=|O-nNc<)A zcw*Rs@1r+Dx86)U>DC)aYF-_dq=S;QQhz~7XaMVdOR&m2a(HkNZvq^!BGg=lj>C&@ z_B;-+au$ws>UpcJ8x~}mrKl8d=9KRAGiF$F3m1rKr=Y(6?+iKK)KO0knc_w0F2kW| z$~8AucjxgFEy5uvUB}TYN=!ntxbp-`_C5tY2Xd5sq{%2WiLb)k=Sfw_oEUiPq1Syr zbLLQfAeT9ekHWSq@R}>~H7Q8G9x($cv7EGrH*}|bH0`Lk=36N3Gu&bpHUc|PNB@Z; z+__XV)%rF~mv5AN61a&t_6NGmOoE89CCI?=`cB|{Xqm*3Q@>3z!$1291GdB6pLk z=Rb?AInD8dWB`%0}|Xlu$~R5@;pi?FaF@Nqn0W zkJc09R3Kqqbo~vbUpzJPB0nB-KSXj%hZjQ52JfKkZVBY}ti^G?agSoNl^8|gorKUp z%!G~HIaYiCKiHWj_hjxi=*e%rv?uYRy$^~=PmYx?+CwN_eMz<@kZj1j{X({4{W>>E zH_#Mpry|*?$U0p02Y-{gPH&H=o$?yQh%K*>?{$2C$``mi@QS@S7}=a_`*vjLTfWmr zphBe6;u^m)9~DUx4fUNy=UwEQQ9+&iH_1+HzY>)yH!OiL9|xX=f5RDivI@^}PT|*E zoU68xJ=kW`cDewurU6#B6;so8^(*q>O_ce>HLuB-h(t`x(xAD|6g;HK%eOp%FIT+o zgP=2?xNSR~zUT%R{njb~<<4h_T_aoj9XWhP#O-IEq2h!mnXTCoA+7l)63;-k#x5`Z zhLISUq=u5CU>elgSoLBuIk@>~8VUt3MIg`n96?HonYN}lp{5PEU?tJGXrN11(&nENU=G6^@IjmatA zK7>Vke?lE@1lei*oIqg2y$`0GA83+mFz89taTPh6%4foqsoOTo_7^g(xMiKPPP88q>szRF+*_a03|(2x4v7% z{c>N^Qw9)oAH?W)DLgBXt=tW>@sjGhBW}iHR-`~!BskSSK8*$9IetcrJSUg1Ln^`a zN+^8L==YzNcOnVnQojyiPa*-+g>s?z+ed(me2ddw4S-e~o%TA^eV%$()_o%U)uQ)h zY31^0DU=CxbO}csdHnu zOHCnr?k(bmm;I*zh`}*wOWc=F(aRie1F*J=B6NoL7F+?!*go+TX%^0W;;uo2l2E+y z_MyuW9Z=ujNc1B5)!Y^_;pI?-88g-bji-`(Yt@dYa4e_sebA!joNiYu|EP}>?9 zTk6M9)m3}Y9y^?vcPa3)6%Ii`@lHPWPNyKE1a}m*FvPA6XE<^A_JF1yK)ERl6btz{ zN{@EW(pPPW{pS<(pt)V^SWZ^YN`whJHT}f2o9%kK0kfK%Tt+()v`xKY`))iAo+say zz_AcfPJI1EvQ;D;6Zwzmwp|Y0w$q~x2<~6nr;(fPjj4^stcHymgV2IEBkL@#xI6Pv zEIV#VJ!BCYXVj|{hC0UVbDJIK6!s4M{D-o!32!$J>_3{bS}Qg622EWAD&|6xLiR&Z zOPD0>hD!UKNxsCRGwFI=X!L*UB=~zQg>A6S848 zKJH7bCZ&ab1;8TA(_lk04O%06i$QC`+epa`#fL1ZCk*wfXoIhz4)?oESuGVm;x*68 z#=u<~c%80VGhfn@38(SPeq^(<(LfVzU|DZeiUNGf1mVUfF`!kV*wlSkSvS(H?4I4$77PV&5Np*ft;^}P_t7_&(Erl zpgs-AcMQ!w{jop1TN<+W9H)AW814Z%+6X!FQn^`vOUL3|{1U;>M!eO}p2RuSt$$${ zmC>cgX2Dt=56JaYSLj{T>Zkr9$w)WblZj|c&Fi0zz;DqY6LG4m{zATz^5|E1@iJkz z;-Y>Gb9LD%)Q$;fI1ru;Zz+t!@v?Yq-l)Hz7-erZ){VLzyqyY=p8lgC28{~Sr0+qF zJVmYtauj{RkSl`5Veg^!gfkBBOo1Xk0Z04~>Glj_w6XD&)AWT0XQb-PWLS*ZS68ot zmFp1p{dp0NbICl6nu+{Gv?a~NN3!FRlD{6Z+Gj%9;PN*?;rBmEX0ZO^^(s7OYxtLf zb2Pw`H?9&AyvH%%jjmp&yw$!R{3=Me9TGf)>1N^>aaH!3i+)Xr-6K)s;NsgPM_^BM;4frt=xV~~CHq6~l#pDFenE{I|4QK0z*Esivnd4&^ zuW?^Lf-Do>d>XM^^R+#vdgC$0=GkPs&@$ZKjFuXR@15gB&A)=B@DuQ+SkNAj!ZUC8 z-`XTwc#M9AEI+8Z$@#zI=Lo;Yvv=z5MZ{`L8L-;AC|Pac2e?cZMdRPeRwK->afV0G z0J3`H7mCdW)1H&G&5f5i(~SMZUz#I+frn zr?&#VmDAH3jd=X8{!1VPBK4+4;62ESWVUuR{kqk|knT7Hz>cr}GcL{0irs~}Qqv^7 zd1O6C;>~SA7iCnreeO)d}Dj?@>o(b>R`*XuL&!w)gip3p+zXpJGy7G53VLVwnxZv>qhp$ zOGMR+dm^YeYCr|)4{2=8E5xJVr{W{U@k0dP&v9A7*2WW4&0}I7>Ll-y0?%|qpim0D z9SclrF$Aio>g`i-jR9hobOV=jE1Ajq%xC1Y7UZrWc#U4hv=m6hGYB&891SeIoobNn zqWJ6%y4xsjKPXv|V*}NG{D|>V?k+x8p_PC|2W6wa= zf8vcUo+GQ zvebIKj}w(5?1S|Yw2rS)Uv~}yJJ*LGto#k&g-2lo?_S6-?`Ro%b4y_Ndl@P3_7-^y zsa-67M%tm7Wy!R++Y@(=3n_5e8)&SI7T~TpFOZvf_a~B72puZp&?@ABT$tf;pU?5b zOnwtmPk(sO?|ETp1tFu;(r%x4?B@UoU;Xu(AUyqg0E7|*;o|9Qfbjlf64F)$)v)E- zHB1evp%>Qh=(TECBG(WTRKu{p;(B%?`aQFWl4CyIq`sN%oxmfC^mB`hfe(Mew}I({ zxI7xqWtfZVn{cN_k9XkuSo}yF zk0;J=z!UNS`$*xiVdODiqE-3~MPHZ;h#-DU{N_=h9=r?8F!BqFOYOJ?1&qU3GW|r* zWtcHAw``lV>rw31`TPyY57MwVM?i1#aluPiTy))Y3li_1fjIT_hwyv|!g!#@0$1z>>#<*Md95BTvd3n`C3y^CaS`ndpSmkBIeS7^4P@= z67SEX9jGPSlH{bqs7HWc?*xqYr5f5y>(vmed@K~Pl}`jA_N2HKg5E?)$GV8;}uO6(8P zPZ)5ID9);EGQe8YsV>0`^~^ZKv;pkBAEGfJtQy$bcF1f-Yj^c5%mDM6qx%N-!OKMa zKY{+5$#_m2X0XrbrYu{FH}m6_Th$pk7S`yduWS6TTQ`sA2SaBVU533c0T=b*#^!7o zrrj2{7VXvO3#XjGfT<16E&K>W>S44yiQvC_u}VAJl_$>WBFBBO9?(~__))Mb(p_x z56N;9X2!bAz825BB)@^JAxg5;!&G1=p8KhX-;9q6M|Yh41F8KKuxV)P^? zOn?o8Yqq-Kti~WT#2ngkkg3Dl*Vw)1;tV62SmSYn<7_WJ$pAAV9}7SWZ9TIZYC@cnclzX~4Ii4D(ET0syraKl1{R zy?-Ahyh;+XXacj6B)otLAXYP-r%6{0a$jsywvr=Zh-<(a3)8&;-VhF9`|l$s$Kn9_ zGP=8yHr^^UlB#1`4QQ#K1OiEyhL3S{Y1!KIK&{>=f_)`mpLEY?(mjK*d(bErji?Tx z<3VM){R5q4v|%7Th2JOZ-GEjV4!dy^Rkje`aC zQ}~+gsQjHSU*dDbR=kbeenCGBbr>1wTr6vBP!V@eCZhWt_tKJVAB$ffC%ezm7h3yU zYzJVZSd=^Sw&?l@MBY~u`b%Hga~7X zhVSx5)J50hSGsBr+CQm_mUMo4v;*hRK07b?PS(HIGT{a@TEdtWp63}^%XcUFs7v5Z z*4~d0tX72rGOY>)#rF+>{ zAY?qXdSOoRsa46`UD6}Ltg)bm+y?=RQt?cy<1 z6ZfT&kq&7axQ{PT{GT2s5P9#x<;8Zz=CtZeJs-J4;hBk;Y4!FLzKpK;#*(( z|3OZJ6LL6bABeu$gE^tgD2zqgD4k?VqAX|+NVQN^>c9>Bn#h!wZwzC zbw+@FiIK|s3zbKwh@0^SLj<3bz^4a+a-NkpHu__NSookM4lbHSz^{H#!u2uHx|Q0$ zR)=8t0|){XQ2pjbq{d2Wo0CPMj|f0NMuvWjXF6``!y6-UFvZcc(@;Z<)SJh|M`>Nl zq`D^4x)wmtiz>oHvv5;@o+c!PTO-AnA0X6tk?cqNq`fWLzw!kNNp$m`>XKnaP@aT$ z2ZrmBQLnc_UK~uZ2yvfo#??=&zW#zmI1f{|!hfm!Q2cx?&L(l&H{+Zi_a->vTP|1; z%p=?RK0|fp&Gtpa{PL9eikEIEqv}?D?`C>005Wh9g=kVvA-Bo zPu4xB=(^{kxc+{kevYJTpNryS5cV$cAC~;o8JgWfv-kHN!w;h-LAUDdeW3Tv!WYog zy&F+<^mzx+E$UY`l0g?Kep4rRI2u+=<%AomH&T`S1hh;TE9)GegIICmL8@cui=UDT z)5e>fLk-SN8lrTtPQfMEV@V-rzOP*M2KM6wLu|+v9WlBLiU~cxm>})g#gZ*J5Uz&c zvCN?3i$VK!f&ql7?ppV{G5({B@WOZaxdvgb26ysk%qI||Y{Fx7J$B#Q;{AVDE;8j{rO#*b(Bp}6mJF!YEl2229Pw!Tzcjh4Lj^O)WR3Bb1iNdIA z*}0xv#)!A>y|JE*-)-xuoTnE%-wJ&_^l&9qb+i5B>Z%C6y_MWz(;jsGw8a!?BfDOT=}i*E#I{gGCEB}|4YE;thER2 zgVWteVKgbk0?Ke_Q~E*i{*6B0R?a!(>$|bPYmymir1lvG9izU#8+%}}`zLm@`ULo) z6$lT0&yRCX--?->)3>1GB2nrh$&V&I^S}=!;?m8jUMZEYq2*(~1M1tcc*m$b>MTLW zh-&kXsf>_J-fD`~#tpXOoUJejDutu#vOUHkM3{v`Vm%`dWl5$+$~1!* z(9nw^h8wmSgUKa?f>EOid?(m8e=rngbgCi(;}lkjxK~d~5bp_lJ!^wTwMpfKXYhar zAUtTFjcy4}2n%SN@uGBozQuN1l0}%Wnd4IR#r`rF1w(?gb)PoJ6?=HLEg{~lJi%ps zQlT->y?YVwQR!=NnSExL>TtGT)JjkTv3dXNOYAcmzw6w?-CT}!S_YvPPOvrP8wfVD z*5bP1;RNjSyEW;Z7UeO=A##ihras46b~gKd%|va2g2}GGf1(atOKOWrxb0WNNv{bw z`3kd;tu+eTYveWvhF?#L$_^k>Wh19-=Cax$oODF zoRva2pf7RcNGP-yoa3r7DBtD;;k%rJukqnQRaaams^&@vw@^>Z%w59u0UZ6W|qXrtz@W z9u2-&@Wt{#wvDE&)`}wr;h-t&xcw%~17*a*3-)B7Bl|e-5Z6q_n+k(-7H7owpq*Cb zK7+3n0EM@F2aG}k0DI4^(2PCwhW8~ngmFuI1@NTG;4SSuBI{b5oX+CZgRGmQMFZ*u zR0Dq6k>3K{%e6zAC~mK!(_^j>L-$Nwj>EvIbEs#0zGEUQYhxy>7as_@VQ z`GIj?;+ORex7H7^xaKM91{#F(FyNonA_<8W!oxe8RD3dw$mpJjW2-^Ph7s({R&_Sv zVP9aWjz*O;ThrO34GPAZZ4p*iwhCC6F$1XHp3~VB!OsD(Q?61pmkPZ*A6UsblXH&N zRC|Kg2@%3<73Z3-$vVJC6J~d)!t0GPK)P4y%+_``Dfm$Us)m;LNEoP%1|bJHhrziZ z0kNXBq&DNHqhR`WBZ73uHBw*_57_8@LY6Uy#oS9iA6;7GAxxCK9(BtTL48EBqKCf1 zDS#tpTbIOjWWb&x$96iz7=K|yXkqqV2ABpxju$wUJUt#2-@TpMtOa)-&SQp7MuEa8 zT1fD520;&vi6xihreQHu;T5w4*R_6KrWP4i68T0r<#;epRmEC_vxMcHw+bB=0JEYS zxn4pt^t`iQt&iu;Kv2G8&_^F|uEAT)$~;Zh3e~3nWUbI_ZZ``o`G71;2EN4Bp*UI0 z(54@>D2>TEMqy4Yuzj3{`kP%jv79Z!oYlx{Vddm#OfX~}toR-tl~0=mRWcB;(`>WU z0nd(Aa3I)|PiUh=zmlB%(2-%gB=1f-#-QoP%}xa(f$wb)xCEl7h*g-KQD&i`>38`& zT^d+~FM;KoN#~k{Zj;bu6e?rIySnfQ-3g%sKl@kn9mzOL12c=ob_dL3Ek;2!2p_v{ zC!-GL*1GI0K1sDH5*^K^SmC751l;Ni+=RkCc9*K!>`HK$m94ljdIDzaBywfkVd!*N zObXemOvR>=LbfK;pxQjhn3cJqpD}BlYO~gqb=V-dO;8(aYJhrSF60~y38t(A75gC# z?~8>FY(zH=vjlOwO8ll|ios=4uv*~qMix)$>^GuvkktZZ;CTi-)L_+8&KSfMeTm&e zh}hHve;_8VPO>EUgdJNh0`@SNmhOW|FokpFw6Q5<1RaKjMR}V^=t%DrZ$hJdA;yFs zQ#{DlyifRYqA?cfdPb6P%$U`&VF=eC>cF!(q4uCb=mJTbHFjf}Qh5ttc5nwcM7%Vj z<0TG3cn{}XpmEMf`a`(}|4uIF3|K5>t1z<=%{gaioO6;*Sq*G0UIgGf0IlQcweWgk zO^mz&KfkFZmsyjg%YJE903zJ_6u`8d!KUOO;t}UbbfEw|hN#=l!YZSks#k0)Wc3Pb z5f-h5wamdAv1lN|(_~b$T29NxL&xfMY`j?u!8A7Bpo6d;!kC-`VLgQNA)Lc%>rlN2 zQTY%B^HB9x8Z2kyokV-<+(NH$Q@74Vh93h}KgbEDx#0edvzhY1eG$To!QIX7GHu4| zw?L%ROOix(ip6Pt3^=;QX?lZxu-3Wg1GCVFt~r}c$8_pH5AY@K9Z0kpEH2PHHAL^( zF6lkPagg_?a2$-f;r}HZyqklohfoBNh6$!JL_`1qT#{AL>&(j88mlrJ#Cy|nw8-lT zF`U!7o8Xn81xh<5p(zAEPYwv+=i|s={8$772;hnFQLrX=8Z;K+U|@C&rC&>`8>WI^ zt&r=aXmA>m{%FR*zmxH#=(tJ$QaJ*#2GCFPFfaQm!NOv01ncD5o2h22Q+l)Fa*~moF^M5XVY| ziaG+$;tY#$%px2RqUopWm0W(hyUr*b>cFALkqg^}55-?*P~j<;iV+||mB@DgJ;0Ig z$o~}9J>8D{c5pfz`Cr3$^f>ZG#*zOWgFqh8w=oE!_?MMI@HtmX@KvKnn43W+29y~N zzyN1JNCOZ#VHU^^8nNbQm_Ll{KP2V(_G{O6Z23)_?>OYtP{vacm*O_(pZy>|>|9dnp(`WqN+E2!K)y``W#)lwaMJu3I_iHlV$(jP?=y9)aA=@lb}UmbgGM%qahZ=EddX?d)esX zegP?EG{OqC8-+7iyIeQB_p7MNXj4oBygL?8U8~pI7*e{?8CPXP%Y|(RCG2f_-Jw}nS4Jsz@R*M8>yr-w+aT3iw=Qs zP0oGOy9^{nxqg930Ej*@IKJw_$*hxs1WlBafSHO|yjr zSQX6B3LLP88JbR`Ldo(m>@IK}?Hv|nizOYZIe3TXS>_sYtIjIS1As1AQHe`s{-ZF~ z4Z=XK;VYF{`H96fLnSWrp>@0}xwx~9Ks@Ng#ZQ#2wF^{yJPb|zm`^Vt~4L6iQS#si*vY>%t|+^(}oH;v6ie8714kY25qc1uB^q;&GRne89mwlb-ux z{1do!lGSAAIY&_vZV4s%5?7OtiJ73=Oj=)J34~x#J`w$e)bFzDXm|;RO*&%z-_Wk( z(oOi&a_J4++dlFKC5?nYdY~$fH8!AKM?R%{j=P9=`JBrml&M=9n8(r&`VyO1VvpfV zY-CXb{wfK!?2S?}bj{ECMs3A7_OvP7raP)LN{Cku%NKgLFLf)uGnhSp5ZI%2iW>Mxd2zS6ArNIrU}#|k%Y zl%2)#+^kYWgv1MR@p%MHl}?US;C;q-N|AV2I}X1bspNc(*7OrUnc1O-@V>ZO zWVe>gc(%5VpuUlV`I{5&g6>_HVi5`exKa=#?t*!G9ml>tFZaxsr%RU-!QQ#2bf4 zGfXuu#*;1ZNWn*%!PiKZ=VXh(A{>$qTjR@7;zc}4*3@(Ro$(y`1P@fd#7Cj~}8z&=VGw2WB7gOJPV zXrZ|pvDY#pf~NQqC&x0()@G9qVMG21Q!Z|&=@(JfhEU~NT#z646s~`Cm*6E}L{s#f3nvVe(oapCbL>mJE;w-RvU`LPIzJ zJ)~njoU6VZhizM_&*v`-csIW7iOGYpOwrF#TFQYRC*GGwI+2TN`*$?cd+-H#>rUCDi2a-Wpkfl|GrCHF?j|Ei!z;OMl3O8_+a-l{Qh1!?PL*6va_38KzU0sWZ)b{#y1f*i^WoC%Kt4j%7X z;PKX6c>MWRc>D_=(ICq*bMxVm3aco_@dG?g;UnKi>(|A@Ll2LL9Q2i=uY)}Lz+>A0 zcmQY&vjZNAc6?|DL)=z;xQF0l7(Dbkd;=d^gmddiOu$DgKA2>9l;finA07D6j>3}h zp~pw75n+La5u=@jtzHO^ZSaWLfe$x6TJfPi<4!Cp55djHc0fZT+VP=X zhK1rI4IUBs_}Ge%R(y2WFj$Cn;G-2EOc54^j~sk#E5>(MpbsBA@ZrWsD?Zxsf#)+K zs&Tu2D?Ya2qZJ=ov@O#g!UTNmz(+ei(hg(7H}J^8M?OBb;-eiO49?A4+c!N$3gsiMX(g`3-)* zO71TM`V5GHpGtBWW&+I*9YpDIA$1q6Ua`7t?P^_7m940pFD+Xwm0xw0{L2aoL-KZA zC12I`J;!%lCEj4O6;)R1)|8g5EWn~ccv)Sho4;_OSyx!V7Yvu%ZQ%K$Rpoq9p{_v3 zuPZMqE!342>sH%Wt}N#(bme8GtNEe|$ej&vRusWQin>~Pn6Gf%>Vj3Jwme%wB`+sb zB~m!|NN$(p7BJ;ZDKnl~#Z1KI`+Wr|do?^)GU<@U^jI$a!v*oom6e8-E6Z%bv?ImOtSBl94$ms$ zZ!K79FAC4MAUIwj#hX``@}&hUOYbh4Q2{<%!O8_iHtaZ470I`42*sC1Dr<)nj+DZC z!qQu$aFi5olfuzbSPTnyN#R&2tQi{meu5P4C53fi>1knM{m@YOxS^r-TWedZ1e zrI!*ZoFIj(!_v1(;XYD$yA&QQg6mFBkqopu2JoJ5?6lP_74G+clWGSqc@fVgqXLu-l{P0lzQ$0Ks-&>_{Zz;Sj z3?BFJ(Dt`W;ZZWaM$mAw4DX20^js;NDuwGtgqFX3L@2&mrSJ$T+$Dumq;See8oo|0 zUkVSB!c`+f`Ei{Tj+fgvGL(OY^UED!`CG!u7e|KD7n2-{ADt8)aFz1I`CB-D3LoF$ z{2-h^h10)Ysy|T*=jAOcs4Oa!(Fb8$VUcYmxa9?vFk1%VEBQhQuk?rclG4gRL|$Go zNF~AHH89VXRmd?=?n*h3mseDPw--z0t}QL*^CmK(;Yt25$V)d)2DhT9lCLPW@p8O< zHB9TvR~HphJW8q95Z&scwKCkg!lIQ$d=WDp{5RbM>!E2dq`*HNB8J27j-@nTS$R=K z0lmWT9e&gYHJ^V)Dd+^dUC}1@BH^A>s_~oxD<_ngsEE*%hpTA~VSQwEwhRH{v zvaDE+X7WLHDp;wj6Y6IDhf$jKB8sy@<6^d*x!H^h__=0jgjL?cTse~3*kBP z3yX>i>??Vlt!%ZNptG$ktF%`T*z%DU3a;_KRDSU5r98#?n2`E=O!lW!@UK=1<}0n> z?SP5G(#rCc1-7D9MXPz;s)BO4okb#-N|umIrg z6{|@%<(oJrAI1OBe)UsDXt?Ksk^W`6mgrv&GWlhzR+s9KhAK;uD)IwJqB+Ci3s1je zX-JOzRRyaHmcugV_xd+1-&OlLxPQVh7gFAp_`gPOq+hyt(v|ub1Qohs$R@W_PA}k# zs!I7__=0i;)A2Q{yMkV?ox3MI{!aNJ184C6`irD(pFJg9t5B+8{08sWdSUH1F3*5UCI^dhKh=Ub<&Gc6jut>RM<+^ zg%_=xQMRg_(B0f+e}bm#EYPl?dh-ibm#tp6s?1)ggZ?S1EC*`Dv>CTfCGDCzmFC0M zHmt~t#+MgVKt4bgU0vp1C1H8d>LQpg#v>cWi2SgKF*2T^dK!0SL6|;jWifN}&3QA> zKViZI=9Wbk3yCje!s;i5gh3ZnuMP2ZjdX}DDJ#4pgQQVl)|8bNUZuUv#xXkh$#e%I(WmmfHNu%pH7*ef5f^Q{`|`6<}($Zo~{2 zE@L5=tpp}Bh_5UutKh*?x|%2Nt}KJO4P5)`N+NuqA8{`iyoTt7fHQ}M<$6GhnmSeL zv^*#>Gz{`Y(aM#j<&~wCFdWArK}mn(f)f~&amuKA3;ecnJ5#W#oEay<&B$;w1a1N| z#ucwDE5NwIGC&4Ew5+U5lFcAj2CawqC*DZi^hwku@)yQW%8=oiW~U0do1`$-n*j|# zcjAqCGJ7H6^hts6T8w~Tft@eIFbbPEGEy*NEY`pF#>vo*MYqhkW$xlz7#Yiigs|OV z2+g<4ia}eYeGK*sD&-K*NI38wVLt#zne3C`qUyF{U?(K5XtjNnj>JPf;2Dj-(fEGp z)Tsr?(5BJ%FkD``8pTqK8G8rAtS<(THlkZbX$BsxX;jY6`GyH`x4tI1hotz4G+(=v zF1y;-B|ZIN>b6Sx@}+b}r0zDUyd83RlW0D@luvf6rF?oRU#paFt6ZN{UaOR^UCQ@I z{uDtTh5rwOAraw!^o?t$UnBYdl|M#z^Ub>8wpNV|U9ZVu{HcP%5n<`_`W#5SN4(1G zbKDC5gJ}PMxn3g*=jijXukcI<+v z*c%i~?8wkG)EQwPs-`Fy)hL>l@ka;2|L`v(U(q9;x5m@>ZIY|)Beh%3U^i%k^LO+O z(+~XL{I^fS`=0nvy~pEHc)R58kX*Mdv|YjeZTSi&Wr70tc&=I>;ZZ^9IbnIR4Ec{* z5t7R}Xt`IZw+i0b2I*DTsFxV!1*NxwKfG?aEIBr~{%zN&UsnKp<qq{WkrqhL z1^?Axkjs>LYzQ9Gdd`orj|Zh~56j0D^h{UPX;*Jo3B1#Va9*WO{6iSE69eff;191` zE~_WJc3q?XI(VlF%FCH}?fSO{rEkAV{dHHVziQ&u>)(#;hxFFpsb4!OsQr_Au7CU0 z+JBAuTe1C6Ue2W7X@5}q_N&z2dbRdXx_bRx*nUWF{hj)?=|SyJ@45c2tF`|c^(QTZ z_Ct9&=~u6x$aq2N+rsi<8S-C9yMyv2F;tfO6ynfS^M06Db>L1O#4xkqw-SE$45FqX zXc_X|GKP7|#xS3kFpRns(%`oZeuv=qZ}_1#1KKgH`+;FTgkRgQ4D%WM#{9-GuRuLx zK<=6bzh_i{EBL(uzkkB-1Na?;-^cL#3V#2E-*50sR4bUl@T-ngFpt1*JN$lxU%W=a zjDp`R_&oqWG%P9q3hKuzSApTQLLInIvK7kc0~7!J1;z!V$`^V5a#PZrS0*&XL%AiA(a5azzo3F*LAb$r@XQeKlXY zs%YZmv{`*2^Cl_L+0jvYB`)t){-Ct5Hk@e4pw0NDpWS0e6=LPAAk4VcNZ?hg{B7Nr>-c7 zuae3%0Np6$a5C9=-;u3gCTk$Bi01ivwgN9amD@+u|4Zg5h)gEMrDI(AoDe)rCOEY! zm~%>h+>Im-=7$`7R{|ji!d-G0nuYv6C|pVC9>+o?#4W2NVWu4L83W;RsSuECeg-FR zzR|GIfE}P9a@mB6$_iU@8rj0ghq%X;{)>$#jw1Ri7t#_)X;~Dz8a=4 zuVUCjB@?qSf{Da98LnOtOs`21X~dl$kroxFVB!EyVjcwLn@3VxF{D>?6_H9NGFzDw zkp}*u(ag}vy_unt26iO1_iIgX_bHFh*XEG=3!@ko2!Nd*Q$Ap*h8a3PBu^TakG%6b z3pF%b(@R0zALlU4IdJdqC9MxBd^w5)4UNpd#CAltYg&7` zRprY32!FZ%HBo*w2KWZL7=9Q(*Q2K84oHn*29)$>5*0(4-W{>+(XASHuX1(13fiT; zOv_j}m)gI*0kELU-@-7*!QC&xc2b6Kenidy#IYig0ciXs$*}a{DBs)%b)$>n&*1ly z1dC#DEHk(;jv2fV+{w|*;7Lgx{n`^+`?$5`iUHBg0G?$A6!vBYER16YOpav+Je=6B z7!=J6D(uG$TA0WTnw-E4x{%P(r(Mn;1^G3QKN9lyVg^h~Y>y)G@NOZr8|oM^Ke3~4 zdwi=5<6WZ}=9w|H&(SRKxiQq0!@u|bt-z>i$4c`NxX!WEmBUeq8YYp4UtuJZ2rwm1 zif@-`?8~v#01^G@UMug5*HJq~^rJgq48Woo#l&^Qv`4i@y4B^Xd}Ufh&Lh7OmdXsc z4)Cd%*kd{lD8;~NW+32i;Ld&>3GIDaweH^Karv=1icvAlsL3OlQHmrcl)B|Q$Mjfc z3f4I!lIi_rbcd#0rpf?Kom~WUcq?A{L;LBs-un*IJay--j;l3U5 z?QCnDJEmO5^_o0tgP8_%Ai5Rsdo(B1#CC`u63q-*7{v^MaXzGDQ2T(^{_e!`gnY#y z4Kt`?KzskzM0elvKKb!E+B8KEeM;$G3v|x{-S=h&R|L^L;UO&WklqaYaEug=gfO_1 zAWYtc-@=$H$BfjT_@pQ%i66=&6%JvNCJ$zk6a$&~_CP!T3X@)?2j-zjX&m-ZGkuoP z{*Qq^8<0rxi2aI+QAKYR(;MpVJt?|89(muse$3FqMBsr5%uqW1axyM~JT)EW?cw-< zUqtleyBMZjb#KJ?Xmc@Od<-*;AHWPNOk##j?#~QU^kL|m41@JlV?D!SnPK}9AzuRI z>kIk%FvCue_T?x>#4sa(5sfIcG9xBim=Ow&A#|IA@b_Z+e9ba_0B3!6#^lhsA)1M< zfVlzr#nK>2o5+AFmjU{XGuGbTXsu=-nRM8tvAUN9Om! zItgsCOx(i&8{&eGf%l`CJ{5oqlGdAv+bO5TL0T-+=V3@26UU6<)0r`a6PYoS(?CAG zjv3rBusx}@pF07|Mcn-iIQtrK2Ja1m_Xa(jAeDnKY6*-aUod_jrfnkgB!4BXP#g^g z91Q{-0j?AYAvmIQUT>K5;$Y5;WfC3++U~Dm`a_-l3xPL4z5P#UrGAcOq92BSMwozR z`ar++xd7v&eRmE5?Y@;Y=WokTHllYma56igHz^wg*rO$FuT z;qn>&y=A1X&T!>Ajp%(KKEH>wDoNKRugh>9mTbHtn_njuoUgivd@>x{d#DTbFD($? zaTQqbuce22x^l0~*VRM5^4qQsPeyjnc)1!Z@_Xf1d9N`n5s1KhJ>m6U3usJX-XlaXcPBz|#kK_y7+P;28p>AGD*#Qv`U30PW}ThbHuR zo&b*%;AsMKejyp!l1z_9;Hd~aV1VZf@OS|pMZi-Gc!&Y5?9s>`&oZFxJ)S(kg9mu5 z0Z$y@fdf2mfX5ARt&2aj>PMq~Ja~ZT4)9F~>eUn%@H!S8AKy#c>{@cS5k9{BwVzl2!|W+MD@;P)r^ZGztu@OueQ)$n^9e%s;ahTmuKy8ypPvw|4|zgh5`2fwB8tAJk} z{GNl~Uif_mzn|b21?$oQ@S6a?Y4Do~za{W1gWp>CZGvAd{O*U}Bk+3)elNrCP58YH zKR5gi!|w$AKABBdauG1vRE(PG1&d7$6U9U`F$~N~3`jDtHi!9)=?m-LM5Z6pA82#{ zEYAlqgP9@BP-YmTV}>&$n2|s-qnOdm7+C+0Wv*jVnQ_c`W&(3Plg3PBCNb&EWab9u zMrI1gC7H}rCX1QIOlNLlZf5k1fyriOFh<72%w%qzF@Mo53(Zy&swxUsu4M9NR1_8P zMKjTKz`VM+3?izk7M4_$tu^@%Q{>GoU0rBcUASoV%F@*sX(f~%hBlhGXm!O^lZ}#5 z#FZ(e_C>4LLihL~pH;-?tSsP*%PLm+U!WbEcZ+3Kp0TvD98oz*3eP8&Xc(Hma7IBT z^x~~(Sb{+#HITws5~inGz>5|`RxAXKJna<~MXUL|z|or?;_^^ARaC`Oqk@Xk)s@gY zl|}rYR+aYD)_wRmBj_6EA16U)I>F;mhyF1L>5w# zl;QzLt4db`!OLN23II@C1~0EFFQ~XOUV1-oWhp^JXktl0WyuwhtI7C)s#X@QD&i~F z<*h0xT@6WV%2pKRK~tA6Cmj`*PE7)X((S8_x6qO#pQ(!YZ3Xt_C7}qvGO`lwaczJt zAO~w%p?zf$5J(Z0viOP=O1}R1YaO7GvRKLr3N59}DhiO$;l<@ed|o;92wE8ih9bt= zR~J>46SEv9ue36#Tc|ZFlSizLARE#!<1d6kQs|*bjIBf@7ObvZNvwC#)CI|CRnaP2 zNd)dj_ML0l5Vq25#e`xMW@YI~N^{`YxX>i(~C z-gY5VunaA+mNNlflf)F0BW(kiRR#3So0Cvq`MSJH2IhR56#EaPvDHYxEafVtgfFdv z@v^3}9GE0u3@^-Mfbv0NdjQKIxeLlm$FHg!4`XON4A1e>Zp8SN6UR>+PpA2?93@2s z<-y6}>C}e&+BpEYYt#T7e@zj%K{>uw&F5Fr*@{V5m{!}$3X9~4|4*v<1*Mfm0hxn2 zi@9@b3(JskaYY4%kZX<7jJW|%`%gFS#*qI^#~BvJi;>dz=rhtYv%Kd98%t%@`dNQT6uY}lmJPM=S%a-hz$M*>Zek3Ami2Y z;Ti9s;(E+S#K2pCZC<4vXlg4Z_n2NDl%dD;;0msRf0Ath4`ZX!pSj2K3QDgDk6&gE zRQJFC{y+Yw)k0rusZBsWxb^VUf`2QxyWp1uemA(0@gU=YAMXdrfL|K;Q6FZ2-(>Kk zUThwwO;He?)aR%x13&7fcK2bJT=2Jo+YY~cj0d-90L;VSuLgH3{K~qlF(4mjDv0~fu^`)n z|1EIWT*ojeQx!}VxLLp>7lMBpxN+m)UGQtcJuCUY2KR3h@jLd=c5olOkzuxhe=E5E z$bj~Pe>=F3XEDsr;79kPX@D=Ns}0FdJlf@VA0Hdj^FWb<_Dq z>PP+aPRWm#t@JTb{NSA{(X#;m847as%2G4X29zVtW|*=$G=Dj`+vd>x=#H}jiXlI` zY!2WG-v##{@GAlTc5weP58#7x9tT%9pT>U*?#l}p=2s}^Rd6d8f}9WWsFO`tL~)L~ z**K7V60#ITCwpWutrvB(ow?MHI&RbxBW&oFOMY};mHg;Vz60ula?oA5gkio0Kf1Md zLiy7aM6WwO52O}|NB! z=%7)zn+tw)UoWKXLS64+$&dQo3z8pox8sUvIjFatTSVhg-@CDhwznGG*QEFz;O>+B z=zc8u+rT|9`F{p?bTRcCi}4%;Eg!G5aZB;&_FhiowcuX2oW_p>_j~x2U|YbQUP58e zgL_{I%~J>N%aZ?9aDOQU+JpSRf@}B_Ez<~Yz2v_i+!K=j-{6i~LCZv)_d_daU8v{& zWd$uW7QcDClIBPEe#wvS<&`u)vkKObt7tie;Hp>CKGJ~u*VQ!7^WZKhqyB~9ZZ4C? zLK$A0L*rH8z6`%l!T&0_xcQ6z?cg52OM)5P-y}Z+lI++D>K_O0BFUc%?#C4ro;GlW zN(p9gyDBBR1sBgtV|*dF@A5PrU5%Y#-nv;q+}9jH(>Fn1gUi$aZDD)CeGq=)bf7D6 zN8Af*c*viOA56Xva0L0$9au;631J;2*w5Y{|#>6 zKLg(Ze*(C#JOy+H{#U_$?`hzB;CF-j{NJEGh+A-f*aq=X{enSc2JU_EtARXq z;0}62Y9qKWzd`%)Rd9RnpnSdt+%aT7q<(bQ9E156;Hd&P>IC2u^2dU!KLztF#G|{t4R{Oq zkAQpor?jpm;J*GD<)u5ot^J(VRR`{GpHrMLUjUxJpmp)!9&QIZhdf8X<<8PP=+6BL zcs;Z|2izOKfw>v-;1yMM-_W}7rmBy>q3!w_+|F-lzu>;sEciVFeS!O1_jmv&;I9Gq z6#S0h82>-Ydk(Osnq||dQjH+p07?_#By^D?O^QeasfzR_gpL$NNK`~@h)5AjLhsEk zASi+r1r)(XM-Wj!KtVy@o&**3Z})rm|Ni^#%lFNh-959jv*pa}?AfycSbmt17sgh@ z44#_+jvk(WUI4y1PtyUg&TB@xF2K?w;F$qs!SC`k-!k;=8h{T0@Y4md0o(&X7liu( zRvTrE4RwGW#u)m43&3$>44#PqPmVEYU@SGxkVgjKaRAMrEP;2Ry(buD!T9PV=ogS) z2XOrqXlD?<0dU`YMi_n%IXuna55J4ln_<+&0AQOL27fz%x6OzB0Nyjhpoj6v`7r!m zQWf5t0_pcKx^|f5jZ5taAW! ze+Fd$8hC%x@z0F8L^;6s0px>p9PkcF0E$7F3jZlR@C=(l7{>5BPP+aC*b)isIY{3G za07sT5JsSoh}QsQ;PHkA?F&E`wmSgl05DKvye}nUz*vKAD8Q}&#z5X!fa3s6!~F;_ zybo|pje+4kfX?a+d}BTx#uM}DFoyRd!s#%E_aegSFfN-9!Xu`p=h zJq6Hw7{){v1`WL5D0n^{#_(PTXI%!)oDctB`H5kH&RGJ+G{8pyu!Ha@z}*0x0n7nt z0B`|-E`Wysr~o1W-~hbj1F8f-3xIk6=K;_F90CvtzykpPQZUW{AOTG9BM<`sS^(4l zC;)IAK(GJ;;S0bKfCYfH0F(f%0I*a5-aEpOw^Im#cm{w4_yg?#_XC&&fC2GG0CNC5 z1TYR@9{7DZ2#>*s)qrp_z%s&r*c|`Qe~i870*virKcvGW8v2cNHVm*A2*$qZWHjQJ zUoslr0&at_LO6k|A;J=bOb`}`4Ipj;u0{w8I$aHB^fSy)lzx(tUD#KwGtUS9?ho}7 z?@1J#!wL}!CSmaTcV2*-55f;jp-n){69Ip~+)Sr|5Xt~F=va?_3f_7IKSTR~%@SFhB#-!sX+Dbnxkaa4ErvNP1pZg!qCoZ01h}{LUK>YNmi7fI2E5 zlt2v`HC6zC19`*f`9kU8NW^M@w*yJS0S{lmB@n?l)=v+?3(7E|OTsuE&;{iB-YVPa z(l3-0KVOPFpkpkt#)24Z9Xvp9z`5Zz3j}4tt>K06M!+9E6oB&odiyGXJ8S9uaSOPF z&|`2*!;)?XS9b*bF}^9i4i@v2KJ#_>+HU^;Qe8G7gy3B~bSd2dts9{I<1qsFrwvFA z0n{#ZiQ&;126Dr7p`QQ>#`VuUf9}oS?@QoA)1NvU`pEcow8Kjs7`o)a^f4U*?z{Xj z2H7y>Uyl-G{CL2;89dyHv@{x(mQO3EA+lMsp=|YR-E8M<@9ed6?%c6lL>^BbE)UAn%_HVH=LP1)<|XD)^UCw;@|yFy z^TzTJ`8@f!d?;TxpP28QADADTpO{b0FVC;bZ_e+|AInD+@D$(*paR_jVu5o(U_op_ zVga?Fyr8b2xuCmXtN>BSQ-~{s3Uv#Kh0cY6g|UT+h1A0G!n(rd!tTPcLPQZy5v~X- z(k&tuITr;M#TF$NQH#op>WZ3+x{Jn&5XC&jxMHYSx0qP$TpU;&Tbx)-EiNyvD{e0C zE*>jJl<<_`N}v+m5@LySNnlBA3H|g|9OL_uX&@gfkWc`~CPl{vbd+?L43&(Pz~8{Y+s0U_L}~<; zO1(v$rs8PEG;dlw=!s_9G)*A$SB?7b{l&xk+fXP3M3D?*W|NCEj{CrgWI<6WlAa*R z6@^6NxgkzwHs?5Fqz4*q4b)k|7Gy7l?u$q@qzs z6bh>ZhJ5dMbP|foWI6^G8rci8ofj6l2#B-`zX)PuMq6W;u_$XRJQm`GBdl0aJL&`MeC-I_zEv}=y-4L7 zqgsNN$kkz7D<-vWs6`2Aox3Tnl~}lr7R_GsY|)8#$7!N{aw&%LLbW96#z%Jq)KXHv zG%R&^H@^Pb*^q-PKTi*r9#E*en|igG>E-4vRnK>{wd-2&Dt4d54t}9_BpZJg+4y#7 zqx=OW+>ukB$E3YQ>zKCh4r@Qa;}1yG8L! z#(TU*{E7^8EV4wZ^xjvt9fD3&y)v&>NR`V%fq{z5q9AvH7KwtEfwmM~ff0cCzl~;g zG#@i72s1IUpwW;hOu&WV$1LIHT(swOG!Q3*~GO5x&ifF@e@)8$$Yc zSx6f0oJCmN_s+4PpXLN__`wmwJc2_5aF#x90eF6B2~5g_LU1iacxWmP3e1<#O3 z5|$`-iLt-?0Fmh-rEsa5n#6fh{+%$?acC1vunc1knL#GhbyUMReeclFz}1S1;o;#5 zq_4#)xCM|D1ATo00z(wtg8dYMJYW=qF${(;JbInr%PL516{M~J?n5qL>x@KVtRX9C z2cs_1250JC_KF%oM?8m0tBZ@|)ZPcKdXF7j?Q5~^ z_n&q589lY{(mEv3uwGS)iBvnB>nv?;%3H42@%R~`p*;z8XAj}_iJf9;8q?z317bRt z?x6eh3?A+b5Vr10N>A<&B6^d!&WlNxQp&6cQU#OZN~L+6-@7z?IvpC8DpT@LyNyEav}-4O0tO0{NmQDC2!^Z9NI$5Y!qH3RjWkEhR-(i*R&xms9LfvK7H#e z@0toqZ#?It-ar0~dXBbpY%I`T+%$i?v5x&|{v_cb)<|OW!Afrr^oP3ICF(C7V^{ZH zIo&i@H{J0;F;|FVwz6TnR=my~y_q9NJ48HhxSveey@Hb5P8dH{K|100Q3bbs>UKbR zjqcnHma$WPYlF6p#+;P742`_p)17s4_^oy+`=wgvvo_`O)4W7?GP{vm|C`lrtr|k> zPQSH3G#(k}obP^nSH&4Z^)|xci<$SZYYaE8P~5lK)_1q;PB)s(WT+;oYcS7@yLPI7 zowu0v^b?B;g4kS-04YS${7ux9^ng-grxfFS1MtihjeQK}5m6}V@GqqBm@8j-cfn5ljGnNZ z?<)sl;U#8uA&kkDn0gMe7izwZi#ryLsb5cLF2AMGhFpTzd$^x7(tY2qWas66Wv5L} zzVhDM+Ld8d$u3`C_M~8CX%t&ody4%XXCbCnp4*2AmQsrq2a8yV56&7~bnLjHfDSA2 zp14bzSiObDKWTKOTiw0b-(4jlm+Hp7>Y?tj>Ap@D&bBR)IVQ4$oL8yX@Tp$|@D&6c$X zllN8EiHELm5|FuYPg+KOMVf}G>VusXNu?r^c?F&$F3Yy{$e5gRitk|A6)d+9KHeO@D;?v%r^vx$Sazv+G^2o2B=W-fTu zY7{Q_bQ`=B*I0R=-*yx{V7A4sZ{1V^|{a z+=8bTJgna=krSlkL?Fv9nQe6sa0>x7^C7teN4f=uD0qjGAl+{aP>`}10Vle^`F5f^ z(|&X}H4?-^fGg-ickW4aXE+q0fD`>K>+AI{E+?&fQ>2i>!;VldiHt=lyghD7>3S*A zt&tpw*F2mQ!snS=t%OQWs-wrA#OMP?MC%TYq@Aro zW+D@7^bS}ym(Th3X|pKEX1vrA&UtiRG(1_O_m%r?1FeWC$q8(9Zer-}Lz8!meRj_vhK))(aO$Q&E^`FQU4XC17*^0 znqN$Cm8Hw1rkQhTEqceg|M9YVfaq6PoFck zv`|dNr1W&B{A!#}TBZGdZ~uV2i=p+pOw74RIT>w=4%XarQQcYdsh%S-A_08ShI!iu zbmcs<>K!dz=fEpN1nQF5#lNlx(S%|Je(#z7$r58INQ95y%iHfnZzVzg z<0j5{wM%nPiE)ogybaa)gPveS+6zXyy~d?3T9yuBELG!{E#b#MI-xNdKYs9YhS@m@ z_jHF8UR}SjfzjnA_v~-sto8S5zE*sI#BFIG`ZBypN(FaIqP<#X@%4k;>cg{rs-&5# zx5gUsFFevjKT<0jR=JlylyH1o)@AEVNV8hOU9JM?*r3s@G&}vc2lq2o?{8D+zLuey zwB>EWiS0zOnmwyN)>$do>8Be#T6$}oVu6m4?Q&JX5*wPloAx(0wwvjEe$iz;_+nSc zeJP*eQI~c~@ImEOX3H)#x9r#9x!C9znWrkq!5w$0FW_{2jLoOJ;}?@@l*Qu^W$~MD zjf+Mj@s!0cK-!ZZ)+QF;0ZEN0B>LC22@2AhdC}L-%QN#YIF(Y+at5jBCT|x%j}Y7@D=VC# z)rQroke>c3LnU>kRRmQf75oZFlJOXdd}j%7@?B*W>Ou;{S$PC+_i^)}Qn(Bt3T6tT zpkFLlmT{23>~ElaH-LV~Fw)`Y$@$K|r4i<9hQ^Biro{7V1EMd=;DP>7#%qAyQibQ^ z1V|Z=pO606_0gZM_mtOc5wYbgk!CHO6#WvHAaoqxzk>g^dx1C`q4CS(n02gsI6apSS5}d*YLU^eGP;`*CNw`b zCx3b->pw?DUz#ENMH!$jRNs5QM8+)9P8i)Y#qn`SIO#?V@!Cka@)G&OhTaLLYa0!& z@`lFWPkH(zZ}b#)=}2Ygli_zEeFIvN$BYMWwz&l#ZMt0beCXBj$yk<%L#At+9xgf= zSb{CdP;|ThaGew)L*hcdQ0AA5$qrrNX50vQtiY)29Uqh9HO1RzB@1Nx21FWrM2v>d zs~)&m;vb65Qr_{r=bpg8*9opLXpyB0rnBxLD|DtrjwJ1BX5RZmLwQYIC*JFaS!tWw z>MaTQ#0mW!JB@@Jl)N4GlB^*c#;nu=GKb8lCe(G`)<*u?tQ7Pg=shr4;Ta=DUmL;G zXQU1bW~A`qs_B9msrFx+k^T-I=!j|&tWH-KX ztBT{5%(D_4rqU8zt%4`t&35;3_F8VWP3n}Y-RHbi=H`jF_UV@E^6@d}6?SxgNGad(fZKqpfr~}GDNgxQ1F}sq>)uQ*Q%t`_j`rL0 zmv!Trpc|&*_oq8wZ)4ARehsUgZMx>H?uzS2I2&_TYP=Mf@oGAJqCH}WGkK?m`T>{g+b?Q+uG~DvXC;lnFIy`tbX@d48}{%{7s$Q> zGTybn<<0~*5G|#6n%X zjyjPyRXjMhKIz(xT*np{tiMd7&`exxRhP@G(KTt&xq&s|ZT}E~WQ7ynzc&rMuv8HGwlj zPJ+lse(=J{kk$e(9RIte5zR~(F1S9?N^$=GR{nwyPW-_KxB9C-ILpiUjhwxW-Dx`H zAhg!0ESh-0+1I)ErtnuEF!dIE{%A`Qgw9@X@VvdPYie*TqUL7)=@or5)o*+^*|M(O zNHJLa>9Wx<8 zNYkU1h%$BVrBjNLWRE8W=Y9Kc-hY253K3ui87!Ar^WY6SC*_E}Z1sgXOi$n$f~oYJ zb(!Ml3j>Fhs$88;JhRn(HGV(cGx6Y;ZLTpd8cY`*mFm?P5i|{QJGDccRDUunbIr$? z=#*`V5xJz+o|7}8v+5nmlZ`p-DRJ2~r-e!}db=;3kIj%!$DLm>;6CzUvUZo7{R-|V z1r6KnnL&lj3Ecbnuejv#-aXO5weCc0LvvGJ^Jw($u^!DJwl2reGznIhr{zSkfSU0h zMAsmmCHvUw4<73xhNkn4i#~J)?{e`C2rnSt&0{Ops2XX!+d(-O+$1`AX)jVN_LJ~_ zxlJ0Co1fm|v`&xO7oy4fGHuoHp>8FG#j;a%ikqaxKin$b@KokfMZvR_$~?Wy+YbcS zns>*|$eqaI^@JW)g%ZrV+;&W*CnyTv6fd(sU;5l&O)4>{oz~i5-RIM3d#$Ats++s_ z()<%=t)s=OQ(}VZiq#M7m=s1isYI2hqioV5ZD)6>h_POIs`~7q(~-{5c$3963aS;N zpfu?|_$=+e)2+yqWCzt_VP;y%g+>XX`4IPFnbK++MZ!)ZQY1S0yCHIH z_O5@}W-WL|nV1kLCPaVQu0xQ70QWav5D%%~)f~PhsLUro;XuKBb38Ffyduy}?DGs@ zZO#rKbcA9CD)|eR}Ibj0T1=&&X!pXM<4A_%JQQ09@I2)RUhNFEABjcz) ziHs(b5r?*!t>-)IN-B*!(y4U#)&-H8*&PuXD_bS~hfda)ZP1n)8R_Moy7N&kjZ0=Y z%;7Mhn~IL`V9Q6WQ~kn6J1E%bF|nQ~Tgy8fc)Vs=Q&w{cQd~j(Qpbzp@SC2+S33&( zVzN%p2)YfW9t`VOyAfyS&R>tn3>NN9H*7ewy(&2XPeF46ACiqes*nA%J^H&Y?80>= z3epr3TDY*p`NR6cFKb6xT&Xv+Y1HgIW8b!%>gm-2A5Vd9PW#MDPr$BBmT7r~L;! z3cuZOpz`H|yLRa-uh}6s`eF+;Cv_@Y#X)nxJ=JH5u$6xayO;b=b`D@rv<&y_;i)?mJme%C4QOa`5zJHQTCurpl~hW_iKx z*q*~T97QzU1`Cc49{ae)X=w8L%bj0}tTSpIqhF=Sn9B!ckgk-r=`g)7$ML_{k+!^9 z#MShQaPcV3A%2;*=Y=5KV1(6i&lme%hH2!cJY}EU>hsJOn%yV(`o!vYwVbI!$i=}q zg0oh<(uu>DCi&FXjCN%lur{eYAvQt78I~5j-h1?nN4YI?ZhUTbl_gW`)>KQo)?31< z{Yt*?H5Ko2u*NxAdBz~jG+)L%+9#H$J>?$}G2J@0LRGrOba+mrZddWq9qVo9_u>2x zP)h>@6aWAK2mqR12~z{SwnxxS004lI000sI8~}D>a&>NWX>DaOG%#N=E@W(MRa6N8 z1M~V2Vb%N)VRd*5009K{0RR956aWAK?Y#+HQ^&VBezO2!O;E6apixnA!?36zE(s)f zqd_BXxDpIOs4OYm2<|{YjaOP~ZCz?x+)?{=t6J+)K~!)@t(B^+xYes@MXQLdn%|jw zZy-c%`~Ls;{k->kzn70UXU@!-Gc#w-oH=La5@RM5V>}GQ`0$v`7`6{%K2q!-d^i~9 z)_%Vmw%hSchkYE?nGU0qQ?kVR%#3N7329<&LV9`zA)cxeXByJQDd}SQh*9FSj6_|a ztE-bg%R(N%ZUi>yjIZUnJ^ZvE7hvo#20BJh@pY!lPWuMY`Dgv?Xn4ZUfreFnJ~TYz z$Dv`cF9Xjpc#1Ze=KYS3T7hAS3+%D@@ZU6+yn3vo*qQ6*hxG@fFO#Q{!Iy|m=5jDL z&BZW#08SVK=}#FW%yyeFkSnUiRI-3KaVD9YieXCtx};%P4$3THC#4v}UZM#~2bxX* zcVO56=s>_!3>*CyQ`q01ov>cZI$^=I3+`iMacftiU3L;|Ec zGCVOfk%L;&N#S20LxzbRP-ie&R@l@x=*j~13?V(nVz$l1xA4o%%G6?Pc?k=X-$tiP z&CoJ3Lb9UUO?`td^4*g7f9rFa{v;&{+ZWI{ZhR1cn^gW1+%(!hOm33bpo;SuVEw#Jf(w(iK|-^^w+9&v=|q{B&mX9?XQ(+UJ4a8af`d<$kceCG-oXSy+*@WK#^FVq=hBR3m_DwE)aWkf$*tL7J8Alz}@NRXT| zfa(t1>cn;J8ZZ&z4P*g0DL`CZagu|RQjytHGnkeFj#|XcH&07PL1SW^{E3>?5%o|V zSLcX_`w%c~u~7Ab;Ym>>2P%jsR)mz*HJrw^T%1>KoW;lHv{7k?@^NjfKuNkds$R(E%ZG z0cOuRU}H{G_W?KNNU(VV>N+INxCAFB`Ab5|P;cVt{-btq69ED<&GGk=jggI&p=Kp9 zBHEIda*2xGK40_s9f3{w_)IMmtWeh36T9xP{O2B7{oq6fdf_2U1WMUY#8bkh~6!8-kVLa|4JjsErOlVW|;`vYJ za`FYNF)>+L2del5Fex^Beue50lPlhs=dnn&l;jH) z`8edNEIf`s7Y6dA&Ot`%VLpPX+po-KBgG*G7#lf;B)n)G3}tu#C4+1gSsxznMYvGc zV5#8Rc<5@4>^ro7#E{2v@**1KGjAAV@1|-_*3~vNS8J}Y4^?n5d!CMfkz>>8R{o`nwI$2KoL1!K!#IyyP=*A7sZfgRb+ub z+8$xyE?(l_NUO=EMJj(a@K=FEaU@$Q2ssQ8iOQ7f4`x>GFQ!7eAkx+TC5-9RFcTu1 z6hCD$T7S|pqpXHf^tFEuGI@Cq7SY>=-G&qZAuw!(UPbPyr~ChhirgTei+@6fHb+UG zRg&jbCS8e=e0*H)uK_Vo6p5DpJRBn%i8hqIA0T0`g@wJq4It;?#z)*bU#i}h&Lvt= zze0)(qpky}#6S#YquDH6f_w&8AOcKPWXHc}HWS^H+Oe>z#|ngdos2hxLxtZLa2iK~ zcd&Q~M-U4=>QPNbzPw1)aYM=?8ihrN!AZ%BK!!?KcpUm}obAOI79K?MjcT&R!i8ID z(ae@$!a^h^D%20*Ibuv$Pyie#e}t29VyZ2j7#4;G$x)HX0aRgtr3FJf#NFU!#W~xV z;aboSarwoU<{E|(gQyF>NRT;U)Cov{lHlYi+^7k|C~r>;D=0I(rzFo&ujsU}P|QQ? zzXv#ar&XYiIVv(*fEUHoE6E?J4bIIYEaF;5Xyn?AFcTHct;d85`lF&k6bax(W9mUe z0)SPi3KR_VM=U2Rsa0Umg(5}i@51?tMM=V}8bOk9URi{qOlW+?LEWBPN-By~rn$LL zJ6R?a4e?6CfkNo6SQJqy{3rzRE~msW0Uvdx)g)R3Z7pKE4Q&XhOmM+Tg;)`CTW(79 zPXtD(g=NA5q}I@Mm{1gh1G}mxfW$q46eV4JaNmneJj*S4UEOA9~jpQ-6XHRhg1u`=1M8*lDD3GlFvuA%S6rK8!$U zRZH{_1Id99B%u6Yu)xPT#fT#gEx>QQElUL~a{s=xOBSQYLrzlZTXk-{VAtJZ})~KMlwmMEg^rv}UTvb1HyfNiWGk8nEk}MeRtC^08{t(+{buu#m$= z9YU%SgCPU3-T|#_ST+kBpoFdfiw#2@Q9T7&36#auv3B&zDn^Ss*J@Fh3XR+8Nr4qz zdjM+1vn+O)WHWoUKZ#hBGCN6Y&PEt4T=dFQg7+JTn$C-x{`QHF4k z=sv2Wb8`kcnaOj5JYyY)5TZ;9Lll4-9vkOYV?+Q|hx|(Im}*$$gb(;mZq!e7P7LIf zBLklcQZ|feP@I+;l(td?104qqo&~7p(5n%aUM`Z3v}qyYMp%fL)k^ehL@N=74wCrv zrrE3{)m9sgBfHT!T5mK;lNuI@)a##-`Th+Vkcjc3NIoJbon$85A6BNuT&^@p1+WY? z5izC0{AAio0Cy8s!*zdpHH6u&2D7JK2)$@%e+4%sgVUw=LS8!KcZFPyL0-sxoO}^J zo-Zsw`#P@ufKScnfOMfs#XBo3xC)uz7z%N1HlLc@fp$0Tk-K?{{5@X8%qJDUeyxdQU4@QBC6u<&?35cA(cjPg`KsWg^} zR6J0cXc5>dFk+Fyy&hPPlLiOeTn1syL-3#Y5YdG1;1(z_!mhzVK={)hHzYZPHzfHe z$tT=o1@;8Lz=5zkfc!+VqRws~hgM$N(Mv^3jzEzva->*q@AN;EpFmjfjFF#!>ML%R zUv0De1eBlnP5B*0@*Bc$65w`=0PEXZWFlyi36p0dKmia}yoDI${YL~ikP)Cg^hQ8* zr6q{c9HK2PLX_qcLL@`Gf+76>kPunFKNb1?gumA~+Vti<$ffK3q$KYt$*LH#QAIwX z3hT{gYF}$qaFF(eeFr;vZn$1*=)w3pL??MT&maP4M`#e@d9yXh(IMRA z;m(9(gM(OD_>es|_GNsjy%h*5uWufslEb)ADAEJA#a7 zOu@mS3fBvWG1L@4#H1Q!;94A9tMi0ZxR(iEROA^jSg^-e;%wN1d?~7eaw+ zV_lTIYKS|QTth+l$iIs?=z)w9n0W9lqCy~WspFc!C#hhLT~41i&T@fKoe&n3)3(CG zzSlovuLKgAi333Nk0@nc|BTG{Z^(dz-H}GZj-)inppw$Rj%VxuLN0HXDUv051TS)( z+Y1)TxH8=2j2Ddp{lW!8cP`b#1zA74;$Fy4QwoV>l*l26 za1Z$>HMC!aL-0BGJrLhfJzjW~ za6rNjsIxbFCUv)jy(~RAy(qSo)XlDx`-|yDwz-0TsF7mS378bH8E8Nz0XjQDaxfmG zyMqz!kn4YzD!7l>K(M7M!XdML)k~~9+z0g78Z9{P0Alm+QH_n5o-k*$aj_P4R!vm;=&|qO1;lwm=2N?Cz1%*F3 zV(NDn`b;!{a$~w)OLv{ZTr9^MNMZAiB4q*N#CUC>S0QDpBEPmZW;0kPTG3^`Dze;S zpm0(qkS_*bTg{O#8tH|shOFTThv?e8hP~M}>}}&!`qB<)!x1jwMsjiE5kBjXiXo5x zPZ;>qE-6=4G?WimfNb#*e@To9M?3{Y!~l^&q|`bpi)4J{swz#mq+$TeFbUO>k!t*% z^<;GXiwlZexTrsW*+xs+unNZRs7Oa8StB!wvG0&3$o8T6myw)^a>Ha9eWE`Ws)}Vj z%2=g{3{Zy40t~K|Jfy!2We7vrQ${L*vdZ{^n=MuvXG$=GH+9Ad)Ilmm#Ps&X)I*^S zYs{K$m5gkRAV^5eQQRCs_{Rws_|wN}^wk$|rB0(5 z-9(=r!{i7>St6nPZrU>u20-7Eg!2oS<5;H3De80jybgTydZt>DoH-%_%XL;PBK zA82=t`h$n20ZQc%z)CSi$cZFj#_^_@9umKR<2&zh;R(4=QH5qxssKNKUQsmHGz2e_ znT?H{8Sg2zCve^ffova-G2YZS5c}8lLnc&%lOj-5gDVVu2|Z-KsLok#!FWe3b-zP( zsqVK?sCVDEz%{}6?NBCM@wjwQTMEFrc&fd_kjUq#-d?}E_zt|HhR(hwdj{O}|CJgF6$$Z932 zkf^j87p3tiU&Xsgt!~Z+57tFahB_$<8|ID1wa4)yelD7UI4ZjplFwx5Xrofg_s4mS zl$x#w>V$4ov@v`jE=FsFS4ElX(XL={lbO$=R)F37Lv=jaw;c2F2m>Zu@&Rr9C;6*o ziz`vcq(LE5G94{hMhemUkWRGMVjDZr-s?jYbatDk4r!pO+>t9e0s_M0#yK25Mzp2& zG-u)3NgO`49%b+AZ6jx>%>LD{lfu`lxZ$RkP!M9j4yGbLH3A}_$KD#&uB`6fg@*OaD- zUNNR&VlV)rmiIPdw5LjgA1Zms^LfozoPg%}LmUm^sG z+kDzcIhpN#jX|_^4iq>LSa2CziJ2GDuL?@*{6LA8Ljn}9$mMf*VA2=*0q(jPatp5- zuJ7PjCdxW~L+_)&_Sg!l7XTq1QAI$2@-O-d5L#)9K}pv<6>MHF%tR;%+~7uSLHn?4 zs*2XPkPFmJ9umM+!V@_4r#aOiU4fa>Fw7VoL9%L%(Xui)m^~-?p@UjhJ}uU^*f`J) zn41j_A!YYI#?T$$VRTLE`eGP-H&WeEd&ZNn&!IO|l99&Sfr+Y{Lb_fC3M!T{@gw%5 zH?N9@zA}4O)7AG6Yg!?lz)sS9AngvTqmsu2C%+&*uB7WnKnf!h@aO>2?718|$}&*CRuj z>2sk#Q=LDefJHE12=bG=)*1)NZ<0N zeIRy?BQ_(U+IP%3pWQ%)(;&o22AqHg@N%(K9%RL6D9y-w999gpE!32ABzy%ki;Z3NP~)Z4zwS1%_@ox zVqLNzHi}k|<6p`y)+s>Arz^=wk(?C$1vL|M^XkdtO5PEiR|&Q^z9~}BP$9BHU6fjGOi?5s0zG*c zgqH?&Lj{)@h8KARyn{8k9&$1krsKZCG)$xbpF#}Y^-zdYVg(V$fwj3@5pgoJuFg>g z<#jx8&(Wc0=VrftnN3%Z$VfFvUuGKRfy51wDjmIefwzq$FGi9NpznJYbNxA%{an_7 zagZN_&ix5j2o0dJi-1OD#uH`~&Pu2uk6;2=8;qQt^GXv3RV3VGBn}n0PzO9VNA|q= zU3YTy*#%mY!ftwS;O`++5@M#`Ehtjg2m|O|o`Tq0U!x%FA$me@!pKQyP-_T}g$3W! zOV@MtQKa=zUgSA>MEx8;=Bm4XzDMB+8TkBUy-@xF@nrdp5`e-s&w@KEHTM=*1L$5L z%IhCd`Th-+DT+RhN5rHP4k3$an7;xxn?fZRcJpx<9SYn)ou$n2!zpSs2VMNbj;kt~ zghG^KN^+t|NoI+a?G(I+DA zG7V(Gy38$RPzZcXbW~)@qYq$;>{d|dl0@+lPAWzK{0i49;#DTab``0}0DO~huVOyH zN+pSV#gJ05k~dTk!;2IE-89I*3-*Y<_78wWfXWoJ9F`JpiuqOvYu+^Gu##*A+%Bt{ zPF?Uy#X01Sn>~M}BmEd5I-SV0qfaG}I3N&Hkuorno@qxQC^pFiD)NL?RNP|j*r+%L zsOSq7uYXGD`!|FaDdQ{w7DHdk$i&QRI|9ocjj+HA-5-*X5d2wRFTF*r(B%%(vwueR z*Gk$%CT5PBA?2?${Y@9%c5n4pWPi6#-hOYTG*TR4;RA3Nto#$)!>d&XBvN{f@U+5+$2=ItMkb`p( zMGTM3o*C}wg!Ix=3r8y_fQ}0e(DkJnt9Xgp#N;Knhhed;dq37Wm^g0lF1*p0a>=C&37&^?9vhUgey&h5l^y^6w zKQQ6I8S3(NSVhP_fpG#0G#!jn7A)3VO@3s^Rgh?3<9&371Ybhq9~^^{TJhLy7VfR7 z8%)iDRNYf53P3>_(c5gNgZ-O+OQEhKX%{C&-ts&zb5%@XnaNHGsPNNrDvhQVep=By zvv2rfO0toT&QXJQR0t29hE~#V4BPlv_eOyMZoUsKveVKB4xnBfbV=hxUno*enDu4l zy^nazIUme(3@HP9eNsuDr(7);0+!fVI!Zx$!PX`--fBdKrUB!t$n!*NFam-Yt(yP5 zM#VdiYpYb9&MUPQLrD+tgrYz2?=N6UVz5hdKIrq6#~tin0Rju_E+x+ z#gORl5uVU5y4-_q;pi>j7c$ZZFTcfC^1w5V;!{)6g@l6dpVUx8_qgqa=T~^ug`@a{ zJ4`YCcJ+^Ff`j;f_pZ%5|DGelwZlLRGl=l=a#7OraT@yvIXy=>|D@0e1cqvZ5%C!nsPgR+jf9C0(a9@%FR9RRUb=la2B;lMI`4n_nF)Yx+|qQgKv0zP&-Uc59L8^ziF zID{n}`f3yxt}D;-kdt;#(d+ugDvsP_cW)qiwH)$lA+ZnreU-cj{}a56E~u~2x}3a< zTDP<<+<(h4%Squ}4t4(pRVwW|4PY)_LG#dDZexHS&lWgBQ)ozji>a!PV$WLaEf`mlwfIazF^}6e80m zYKm{fV{q~TW1{TTi10x0+UvbmqQ5tYs4x(?QRjtT)$}rZ#xm~;f+{M&9ajMcPG%5F z<4n3JM$F(+XZkH-$N=rA72_ca^oa0~a{N0p{Q*v$010Vs3Kz=Dag*Jd{!CwS{5gom zZf}1k&YN6u5B(*H^?MEIHO{Y`*aMI+olx|>h(@dSJMytIOLUm(#Jp(0{QZ&fLD+Zi z(ML8aCpuDhpaae`-S8sU#V||MW{f%L2!qByK!09Rgwn<`ScrC z!jZK{hF^uQ4xv;+UMHb9e`3+4V5QN_owF0#lcJj*bLU=2(4C*7 zqRubq??379pB-)9=diUG^d-Pwm3Qt4tfiyI(1%OpeeeTe4q_y^bK@`~4M$f&BuqjK zlqmx8!svGZ#-g+_$BS=J(YZNd`h7DW7U_TxR~A8UNf|m*j~}paX+jLjFpU#X*B;Ot zXn@&s3JjyTQXxh?LK+N)ellzb(lP8+7H%rarQ;Fwx_lu;E6b-y

$oY%XS0^|aL} z$QYc`JvW<;bH$jUKTdvuh}l=TdeDEE(AoMA6THydVbswR^eYU`%*)e}fE80=Ovfdt z<5Uj@0RiCwRAe34`Xc64h87)$!QH1$pJ4hWnQWbVAL>~C{;Lk1JxMGibk}ia0@U!} z)c19$u_DIx#~~n4;Akqy@nO`uI@a^}{yyr%KBf;aX5?XT80UiaD{!)~guU<;tH>he z!nTmP$Nh8>4IHVI%VsmBMl6jQJyxk`zJW)lj%Zlef8S5T2MoN)!m}*g!NO%M%w=IJ z3u9S0fQ4OH=)%I+dIsKR;RP0!v2Yg)OIf&zg-cj?KZAjPu+W9gk7J>jg<&kjS(vMD zIld!o{lzR?%fhcSTGG8tXJ9Kf|7m*5a*D-kZSw6|dS4rU8h7UJ#4S)fgn?2UNP!EF zE%Sh{DW33kqYHeM^n|aBaAusSTnb-YwL8NO%IUn$@Kubys?pz(szcyQA_c2W|D}2f z{iVR(_>{2yF~2syg6Aa*|MYI;2=xpgmS?6=ud}VqHk&=*_GD{bg2`Ozt>L{HS$xcE4Dgd@)0xLI` zR~d^tuy~bCc@2v@vN*-!PAuMNg9|1yxC@JWO>9};pT#Y|cObDTm)hX!i7opVH?d`V z$t>>7;yE_;^I6>TDPVESPZJc|@L$2=mO86hoXg@RHu!oQ`cj+nuWj%hHu%1YE#-aG zro76A|7n}@Y8(6q8~Pd>{F)73%i=s1zim@a+29Xs@Oqo}pW2i++LXVvDaR(YEay*Z zSuU`_U2JfXO??j=+{*^{wZX+UxW5e^V1xIZ)KXp&8$8s8KFkImV1r91+333su9?(Q zKa(f5)R!C<_h9j2oAMGCZ^h#KSlsf1CN(VX$Ks7Pc6DJ|tKXK_1={A}noQ(E?~U`k8-Q#_@myh~Wz zjm3A^^rw1C%kkHC&Z zEZ*J1pT*m<_##b9d%S|h-7VwOw6veL_Oi@|zQ%@sqo$>Pi4t1MN5bOX@8EB1-)!xZ z?fSO02e$UfR{zy(`<^TwA3rrAOP6R7AK=T5^DIss)LU*Ck9%Y2urml0?J@VJ-3CCR|Ts_6-G?smmfVQ?!I7 z-;fU5`m}T%^W!#_QZx{8x-Q$&Z*ih7RY&MB^ur~C27$eS5WNGW2SNrqxSBYbq07+g zG83S`DBWuiqdz@BDSLsFg8USupIzv(?pSY((cb~+8OZ!x&me%v%?#*OrLez@Q7H=fE=s9?gs-B4KK(t}EMr&5WrwI1*x4Ohe4n z>oSro*_Z}=r-W2-7Lfo>j(D0OAv2LK(;%^=Piv-|f#w^e0{I3sL0v8R^mH-ef(o`2 znnYbvf+3XPdBXP z{%QGG{2g?n_Lky*`=^nO-%Df++>ysXb$)YyEx(b6_HvZ)Pl9K3%QAG?R09}|sMPxyG{zFp37BboeU7M8Hk)k0v9iQZD}-}Xt>G41D$Z0Xl( zSX|G-92Ty~d;7TlzkaNFCFg=afZUVBcM!AmQmb;c4K3nf`7k#o%x_5s*w9(GD}_2G ziS!xLJ3sWLgH?HT;+t(-cv*N`w_oxX?H5BGv5sB?@BAS8u;wEi4A-?RM?5Sa>vqLf zbY3(c4pyLJeiD(N#KZ}S;GAc>GlzeR`(W5kcmn%km6g(H;xrAfbJ$Rg8(;lg04i$63 zp%q1eOhM2Z;9UpO}q6-=XwRqmLCuLzti6wB-3qBu#x{@^YGf*D^2#{f?x6A5;(4I2~UFN+jB(Y4qBn zaOa9UDIViv@KfQujqvi!a!S_7#Z3+_+**|1jW-$!NhW@Mxi zX-H*|9UQ=I%AYlBR^n7>7Sn*8D~3QfFo)q;Kb`rBO9uC+5A-a*`x&TXXwdI}BA)Dc z=&(Yc#^?^4hfaF2MQJU@9g6bM*|e0`l*jyXX)dFOmfzz<>qBk9VOX^8H{)5T$_~iW zgCPo>nw7;UH}m_Pmfz|`zrU%W<&c%B^_S40kOuNfcujf1mNmoSb1;77UqH<2vFBp; z!CVQ)iH|vj+G9?^_IgKG4(6JVVRg|A%}?&knK{w}1UU{o%psDe=1L&l&IxN5>V~xo z_Nn)(ZBy-0)=KZL5ve&sdrX+0^Gi`zKO1w?`VLbPMB}7SACmWkLp%sB0a}LfO#asGui?30J(Ss zLta}atZiZ&tnFw|tZk?V*7k`85d>6=vu>*ha zSkL;_weHoHeoknOVI~ho#^^et$b*3v+|g5jc@pp>I$)l_(=*t;)-oq|J($}Ilt$3I zJM^Cugt^u`*E&`^l-cR|8lHr!{y_$M1w?6a#)y+>`UYEhY+-c{qG_U;P( z;&gp$J!Eh=Z|MUww?dd(H%z$HS>HmxKyR3G2Pg*^4CS<(;Ti4xrkv3{-sWRMf`>UL zaxsTcu-2ZA(54-<$;UiL^PpWW=2;74^yOi09AAtJiwX4){mmh#vkTTa*$(9F4ssTO z{DmNEH_UsNXDu^_?l6ZUm_s4vu}h?P^x|S(7d#{uIgSWtu6-(T2GY8n&+0apgV7MP zj0v7lJ51OL#^lP!Ttgi(*I;|KWnZJ|966XHm{*Q~j|npQdY*<$UwDYw!z_`_@8Y5l z(uDxXSHGnU*$(uv>~F{>75l$Qm;4sG2XCRXeEPEf;qg?a-#qpy1$;L=WvR^77PMQ+1i>OlG)hNW2^Z z?F2%b9L(aebTEqy)()$?y)GFb2iOyet@3cl6F^>-RbDdWNg*%UDz66eYSoO5wtQNl z9Yp~@aX-Rbz2vFjdg~n3nh%j>Ch=mcuTCz#_zx|ul@$hT z*D|rmf5kle{CxhoqZVEay1vN=6jTxp`+hP%5HhDh<5~NrqvPl8(3nL{bo>BR(qRsqPrh}OpXFJIiD&a038(MYS>WS%OJHHZ@)d09cssC zoYdP_SK80+w>)_Z?45I5=-M?@gWPJ-mx)_|$^>6o5R@^$I4B&h*+><8+1fwHua)2i zd_Rvkv8O64KRN%h%R;NX8B7NZEm#evizHY(J*JMgAMvD;J@{1)8RZRSB}po7Q6FNv z^4h8TZAa><299i(4==U=t+h?{L(TbXJkwe5%x~{0dh;pKFQk%HQGDi?cb|vkn~Pj% zPolF*yS$%FEyi^Tt8$x{$y+wfxrk~ZAuPSdg=&uX&uB2!u5_k)6rY8DB#$aXruyzx z#hw-f&w{|MqcQHi(@ zlxr7l`$Jyi%VmK@0qagKA~Kg_qCm7a7=;C8E^xBcU2D$ej{`;>Cs^EtLJ66io^*^{ zfA&?6=*&3h0QEmQ@5i=CZXA*GscWx`u#Zr$`G_#6eN$o9x!d}9k)gm^fIJSr?0WI! zrAl(-yAW1_)Hhj%Pzm}Pi55-9AgOW@|D>hwi2ot2=+GI*BviL^GY;+-lY zH0s^4kFM}Yj7kq4T#{E}+hTo7M-brj*QOVP#0JDk+;(MlzrO(!2dUjtDRC(L^ED7O zJHprS#Xt0&@tr|Q7F#hZuA5;j4m6%BUL$Ia&+d$r+c9ysctiqEy>HP!{usLos98#S zYWr;ZN3GO?!qxi{>k_xSE~Q9Da7R4_Z=5ae#dZdT!ZUW9?+m)Wqm=p5_H7w=_g&be zX_$DX#r%RZ&XLnSer9gLS)m9AGngz`ACpZP|0G+@gG6-{1j018z&Ab%RSiDXps=j6 zKbL+2E3d4BN zA8Cj5K|+uYNNDg5ap^#C%dZ$E|YfHbd5fy~z%4F-|f5F?KONF_0LC7^v1x%95y{ zsW?%EllvvZc=UMA-b8VUCA#j0X=5`|RdOq;36m^84Ds__BkF1FXq;EiuuHpmx+Ys{8;`pSG zVS=ivNnl+41tykFHm>W}u1|w5d{tF#O|rW2<;qzKrDCPY@3Ypv9CryV?f%+|cewZj zmyicAD$IT6ZVW8IY4h{2%@>bRJOgfmhXU2AA2NA3M6#d@zalcT26GeWAYEQW&KY@SZvU!z zk+OG<4;7e1<|OU<%nHk)SzY zhmvf(rQU8oEGFlFJZ`E=|De+2w-T$OZj+y^(9Y(iF;Mm@j8r!c=*H~DIkW25{-#eb zUJK-_w4V5Wr|0LzBO4YvF8A#Vb8T9<_7IMYFwE)eV1JB?y2N~OU%R61X@+eknlt1L;`~iWR&6H{=`K+ zy{6on#x&Fl5=xfe(R`@nya0W0EAG-X-d3Pp;CB+gU^MN^9-42s`9?o57;KgCeM-e$ z{SAmr_{i~n2p};WNA~t(%}?( z8rg+WkA!luXZu zrwnN*Vx~UEa!2Kr#mkR|cxoNCk`h+SSdMk~7bf0M`xfEOWSlZbn%gr#ojdDO>!rGX zyrakGpp%w?{4vKxT+9_Mm8*dNf{GfT`YDq$Aw=%8`eEOdC}j|eEbfl zf8LC9pWC5TM*Pdvg{*5c(SSh``@s1Ankcb0ZQSZ(xJ1cpMz5`uWauRVC-{IhSNB*P>D1CFGz^H@zTV`$6zm+6sHKJ*mGs) zrh}NUt?!yrviBQjeaX#vzK2J>bU-5hbpk`qp@i+l@&4b%?qg@BE1xr>SaX3gl)nNe zwrSl%ZdZBt*!})1r)8}tV-5r2&>&3TxaPNwRxJwyK-)R%yeAI7TjRxo64Cgn-3ng6 z^^y8Jib|f8ALOElv;7^zx0@T#x%WA1dk13`QK=%P{^i9NhveoSTH!WIsjah(!dbC_ z9{v5_(LoCC(^&+*Z)B3}h0m59fQG4n{a-BYbia6Jr98@&%P6PCACUmxhVO6@u3On; zox5Pn!otItE27J}#2nOh{v70KXB&kov$9wsQsFDdN& ztXNl#)vt8kikjfnK6N4W!umq{Rf*sM%Lw^r*ZQCcl2fE5uw;S_o={d*6$B zttX$weP#F9`pwf84JGBIL(Z4b+!r45n?K|a+arI>brcMYaC&!mDw*ny2Q|11e)#nM zp~4O1QEzHH99o01^3mLkW~^2gd$Z&=#lqVL_Sd^p>1XGss>j0nn%|-N4CyPi@9}e? zb)~V&`+Zs$)t0=HgXGmvz9moyt;RSWUv$T@lx?4X{b+PnRv+{1dT&BlHD_ZYd=MEALwySpHDBz@iSA+UtKFz z3@439X$iGV+PhksTHRTNV~(`<*qpC9vt29|s4_2~9C)xBUvP25P+l>c!T3gaD1*;i zxH^3s&5iqqf#@9tY|nebH%sKCv@b-7}MXqmng(^Vyt?g+pct1S*ibViOn&k^KFi0bn%Sv?kyisx@3MPZxAOWNq@(c8 z6P=cjLym&tN$I)IS+I%?)11dHj;O>DC94;dg>ebZe*5_j#m2p%pn0Z&`e+@g^W}kC z7o5j-ftN}K|#X~eLqWO``wH!$Y{Hg?eQm3V~q4uj%ax8yt5N-FZgjA zjOf~h-bbOtihLN4u^i?&`d-d)ZDKJU$y0~$Z4U*QUwFjCQS!&#GR{|_29IYBW1qqj zxlrX`tP#S&_w?df6?t4a%IA0Rp*?1CW1htnis=7@!7o5jU!;(c=^>~229=8w*pMcH ztt@X`!4tO-;ec+VV1IH0zYb}y1#cP44P1J=Qy1^*H4~=r2O{7-Eq6RHJhCN}T4q8x zsblSA>0-A}6avB}hJ-ob6qi+_m8jw2IpwRFW7EDVMC}*Nj!6RZ-VeeNKfOm_Gq*R5k?zA3yhYnI46ng{^@G!b7Tc>^5(c>Vbh z7bjP~)+=S1sY???{+YdZ5%JasBdMA9$~X1v3(dD5H9U%B2i}bzFGG;7I_T|=(;$`< zif5OUWst?`P_F0p()LM`juC^oqlbMbd13x{;mw{h+|wUT|3}cLQ(W3Fh-(_>$S8=Clq(=BPNKt$NG08SR}G zl)f{wksDNcg08t05U767QhA9`790mR^Y;mMXZQ6U?i2G>X1UQg|J*Xe#PqDpAP-#a zwNlmv9eN6Qo+XouojSS!xGs_jAIH5>ODfx)sr$)}4Jm>jN_$$0JMI4V&H5=l(cWkt zFkc>^`MnY`qPPmKsSwVO;5K;_ilfHz4;?ZhU0QWN4VMG(*X zLAdeR=`DTKP!B_4gUH_c%iNCWm+hq9_LXRh=udA=$_76P*DdfOTltDZJJtW+I zDv-DFE7vDoO24WhDM+?n0Dh*OG(ei#G`zPsHT_9gaF)EG9FGy0B(8c>L?2sebh=`X z#(5C0)gHrpXx^P%V!o_FFVW4-ni5V&Td{stCr3nMU@I}r_rH)`!|UDG$q}$tV^Bg* zfkR=;gk#=sHd(v1sE*o(mUGHub7BpsgvN-)wwBR}bh}>keSmJKx#}qfuFOes!6T=olF-G|=15(GQXOoO%+a&x6t24JZ@v~s zV2+~)T!C(}u=}T?`F(5On(xLX1E9PfxTAMb_x<~q^2XT55~a|rjUl)-toPZ$P#dA# zn11o25~APi5)vrnoZ0gIz`F}mMy65aY&M@%l_S_J$inGKyu{$cfuaNNd|m~`B?19l zwghAxW8HZ@j4hC%p3ufwDqE7=o^)=}%V8f7&w>a6ZkZ93yAe6ndj8znH_mEApDimr z1cBWeUs;|`9u8<_SJ(O-GoPAMzIqztZs=|Zx}RyrghhuWWGD$WU@JZ?GekJs3fSH% z@Cg@Cmn?o_BAK`0v<#Q~E*>Ht__es(`tzX1UDYZz`PSe3PaTWYO1Oqq&;IZS(}U2q zspEg%rN(p=ZhX?(jg-9vxvS409Z7piz+)Btbs77BC&0}K2gai$lrx>%;}!sp)>VHY1ulHqt_Mk zled)OzzLLDM|>H^HR-Pj7Hn54oa?$9HahWAS-Uw#D2rxvfgkgMH%XazY9>h;kb7DL zy4sb`vn2+t5fd>m(yxb>5fdhAkA(Udr%%>(yYQL-Sn!Xd-jeO0QR~iUM`%IjFbmSM zZ}Yab1>vNitU-f`7ytBW>R2Mm7nt>H>OfMfdBJXRlczN1S5ZnD<%kY{6pqYQ6v8l*Y{u%)2-pEYcPkI}9Yg-;naE_{6m6(#W#|Q* z;uo`)t(7nduVyqKNn+5dno9S^1LHS;v$_CWxA2AYCSBva{hFU*=Y0e zi}hI!Sacz&j;tzcJ-`ItgCG5XOcoIoigb5Gf+dR@4PKDHEFSXW%mvofR*rY-YWM$y zHX>I@vP#Gle@R)tT$f7bBALV_sCP;Vio z!{AD<)xiYY3aoY85$ph#UO;lj!!~Lk4#;F>Mc6YXcas%Z9vywET|^{w`YF^Mv7bd! ziWa_^TgFYzkq+h*cq6&eZlg9gA8jDJ9F6X=10+z&zLbHkNcoVN6^G-CxNp2psI*!B z5}SjD>Wh>#N>sBXx?wbKA7OjQO}1(NdTRe!R`JxWsgpg9sBsJ-jB~$oWk{rMqqJFsAWRS^)Vo}~*I4;aM zolSQ!sWmzcoXlI`-~Q<(e2^2L-Q z<+`TTwhD<_edsMCD1}3Fs%Qk&dAb4d-~SM3S%qiW3f?u#GGDkmam^S0FoCO)XhC$a zfP>$YjffV=n`c9Sjyv{6(kC;FNkJYo-0x9lcx9<#V{{KggMQVg3kIlbTI>)5&VB`^jNPtAwAXBXc1n#W6zRHg2 z*U9Jyyx{8<^-Q|-LK@b7d+)YZ2I6Z+es5EgqDU+LDPhomvDv*Kwa-o3zBivEG0Kso zw;ZV>Lx@g0|JNaCE_)5&uZn}uB@^u3nJ5&9?c16n45Pj3VQ*_;Di&b3{rmU?Gnq8W zx+Yyr{tYsCjP>ZxLgxIG5-U>oSWKxOK;d6?a%8gtxHzy1MBhC zZvk(ahjq%*t>Df0BYpC!iOFZvw}Krnxds=19=P>juL5Y~??(8l5FouaZs4ytVyPc{rg-!5&h3G-;~TR~U= zRyo^?ggIUr{sT7P0pCoLA+->`&|&7r5f%)Bz@_vpe2D)ynYx=?6qewc1;}IJl$NRo_KeFZ#W&dHrV#Dc~_yg z9g!(Uf7{Rek!n1WeYyCpNYZwH&YP0#{DIToC+a&M)s&v!I&e8M?+Y#J3p^M9ZHH@# z4!~RyxIpz`h=z)p>gKsM?q3`B-TxkKK_0eVKIfvie`(@q|D}ohFGbvcBfsg}!*I># z$ZwaZnEr+|I=_&+H`w;=KO~B-@=hy=QBi#`r=nu{8!;9`^M8r|M`ri`my+W`v@}wI zn(D*Yh5ur*UZedtQ}UmT{5#`BG?n3bPBhhd%Kt3pbzQoDGp&6~|D3M(-*ktZ|DfA~?Cfp9K9#uZirbymC2BL`AbUHKby)8^9OOoaa=>feoq|5@hSA`E|{{=cRKE#3dla)$Gr O=^_=C420qQ?0*0Sg%sle literal 0 HcmV?d00001 From 576355f16aef9ab192446008d695e2479daa7b83 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Fri, 17 Sep 2021 11:42:27 +0200 Subject: [PATCH 06/56] Add 'ssl.ca.pem' property (#2380) --- CHANGELOG.md | 7 ++++ CONFIGURATION.md | 1 + src-cpp/rdkafkacpp.h | 11 +++-- src/rdkafka.h | 13 +++--- src/rdkafka_conf.c | 9 ++++- src/rdkafka_conf.h | 3 +- src/rdkafka_ssl.c | 84 +++++++++++++++++++++++++++++---------- tests/0097-ssl_verify.cpp | 52 ++++++++++++++++++++++-- tests/test.c | 2 + 9 files changed, 146 insertions(+), 36 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a89dfc589..938775f7ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +# librdkafka v1.9.0 + +## Enhancements + + * Added `ssl.ca.pem` to add CA certificate by PEM string. (#2380) + + # librdkafka v1.8.0 librdkafka v1.8.0 is a security release: diff --git a/CONFIGURATION.md b/CONFIGURATION.md index 7fbef2de3e..aea225340a 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -68,6 +68,7 @@ ssl.certificate.location | * | | ssl.certificate.pem | * | | | low | Client's public key string (PEM format) used for authentication.
*Type: string* ssl_certificate | * | | | low | Client's public key as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* ssl.ca.location | * | | | low | File or directory path to CA certificate(s) for verifying the broker's key. Defaults: On Windows the system's CA certificates are automatically looked up in the Windows Root certificate store. On Mac OSX this configuration defaults to `probe`. It is recommended to install openssl using Homebrew, to provide CA certificates. On Linux install the distribution's ca-certificates package. If OpenSSL is statically linked or `ssl.ca.location` is set to `probe` a list of standard paths will be probed and the first one found will be used as the default CA certificate location path. If OpenSSL is dynamically linked the OpenSSL library's default path will be used (see `OPENSSLDIR` in `openssl version -a`).
*Type: string* +ssl.ca.pem | * | | | low | CA certificate string (PEM format) for verifying the broker's key.
*Type: string* ssl_ca | * | | | low | CA certificate as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* ssl.ca.certificate.stores | * | | Root | low | Comma-separated list of Windows Certificate stores to load CA certificates from. Certificates will be loaded in the same order as stores are specified. If no certificates can be loaded from any of the specified stores an error is logged and the OpenSSL library's default CA location is used instead. Store names are typically one or more of: MY, Root, Trust, CA.
*Type: string* ssl.crl.location | * | | | low | Path to CRL for verifying broker's certificate validity.
*Type: string* diff --git a/src-cpp/rdkafkacpp.h b/src-cpp/rdkafkacpp.h index 25a2ab29a8..6eb1391dcb 100644 --- a/src-cpp/rdkafkacpp.h +++ b/src-cpp/rdkafkacpp.h @@ -1305,6 +1305,9 @@ class RD_EXPORT Conf { * * @remark Private and public keys in PEM format may also be set with the * `ssl.key.pem` and `ssl.certificate.pem` configuration properties. + * + * @remark CA certificate in PEM format may also be set with the + * `ssl.ca.pem` configuration property. */ virtual Conf::ConfResult set_ssl_cert (RdKafka::CertificateType cert_type, RdKafka::CertificateEncoding cert_enc, @@ -1419,14 +1422,14 @@ class RD_EXPORT Conf { */ virtual struct rd_kafka_topic_conf_s *c_ptr_topic () = 0; - /** + /** * @brief Set callback_data for ssl engine. * - * @remark The \c ssl.engine.location configuration must be set for this + * @remark The \c ssl.engine.location configuration must be set for this * to have affect. * - * @remark The memory pointed to by \p value must remain valid for the - * lifetime of the configuration object and any Kafka clients that + * @remark The memory pointed to by \p value must remain valid for the + * lifetime of the configuration object and any Kafka clients that * use it. * * @returns CONF_OK on success, else CONF_INVALID. diff --git a/src/rdkafka.h b/src/rdkafka.h index f4fcfd185d..f302628aa3 100644 --- a/src/rdkafka.h +++ b/src/rdkafka.h @@ -2302,6 +2302,9 @@ typedef enum rd_kafka_cert_enc_t { * * @remark Private and public keys in PEM format may also be set with the * `ssl.key.pem` and `ssl.certificate.pem` configuration properties. + * + * @remark CA certificate in PEM format may also be set with the + * `ssl.ca.pem` configuration property. */ RD_EXPORT rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf, @@ -2315,18 +2318,18 @@ rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf, * @brief Set callback_data for OpenSSL engine. * * @param conf Configuration object. - * @param callback_data passed to engine callbacks, + * @param callback_data passed to engine callbacks, * e.g. \c ENGINE_load_ssl_client_cert. * - * @remark The \c ssl.engine.location configuration must be set for this + * @remark The \c ssl.engine.location configuration must be set for this * to have affect. * - * @remark The memory pointed to by \p value must remain valid for the - * lifetime of the configuration object and any Kafka clients that + * @remark The memory pointed to by \p value must remain valid for the + * lifetime of the configuration object and any Kafka clients that * use it. */ RD_EXPORT -void rd_kafka_conf_set_engine_callback_data (rd_kafka_conf_t *conf, +void rd_kafka_conf_set_engine_callback_data (rd_kafka_conf_t *conf, void *callback_data); diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c index 5ff59dc414..a8aa5af54c 100644 --- a/src/rdkafka_conf.c +++ b/src/rdkafka_conf.c @@ -817,6 +817,11 @@ static const struct rd_kafka_property rd_kafka_properties[] = { "path will be used (see `OPENSSLDIR` in `openssl version -a`).", _UNSUPPORTED_SSL }, + { _RK_GLOBAL|_RK_SENSITIVE, "ssl.ca.pem", _RK_C_STR, + _RK(ssl.ca_pem), + "CA certificate string (PEM format) for verifying the broker's key.", + _UNSUPPORTED_SSL + }, { _RK_GLOBAL, "ssl_ca", _RK_C_INTERNAL, _RK(ssl.ca), "CA certificate as set by rd_kafka_conf_set_ssl_cert()", @@ -3703,8 +3708,8 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, if (conf->ssl.keystore_location && !conf->ssl.keystore_password) return "`ssl.keystore.password` is mandatory when " "`ssl.keystore.location` is set"; - if (conf->ssl.ca && conf->ssl.ca_location) - return "`ssl.ca.location`, and memory-based " + if (conf->ssl.ca && (conf->ssl.ca_location || conf->ssl.ca_pem)) + return "`ssl.ca.location` or `ssl.ca.pem`, and memory-based " "set_ssl_cert(CERT_CA) are mutually exclusive."; #ifdef __APPLE__ else /* Default ssl.ca.location to 'probe' on OSX */ diff --git a/src/rdkafka_conf.h b/src/rdkafka_conf.h index c1afbab470..ac08651d83 100644 --- a/src/rdkafka_conf.h +++ b/src/rdkafka_conf.h @@ -159,7 +159,7 @@ typedef enum { /* Increase in steps of 64 as needed. * This must be larger than sizeof(rd_kafka_[topic_]conf_t) */ -#define RD_KAFKA_CONF_PROPS_IDX_MAX (64*28) +#define RD_KAFKA_CONF_PROPS_IDX_MAX (64*29) /** * @struct rd_kafka_anyconf_t @@ -238,6 +238,7 @@ struct rd_kafka_conf_s { char *cert_pem; rd_kafka_cert_t *cert; char *ca_location; + char *ca_pem; rd_kafka_cert_t *ca; /** CSV list of Windows certificate stores */ char *ca_cert_stores; diff --git a/src/rdkafka_ssl.c b/src/rdkafka_ssl.c index 2a83894471..58a702a862 100644 --- a/src/rdkafka_ssl.c +++ b/src/rdkafka_ssl.c @@ -955,6 +955,7 @@ static int rd_kafka_ssl_probe_and_set_default_ca_location (rd_kafka_t *rk, */ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, char *errstr, size_t errstr_size) { + rd_bool_t ca_probe = rd_true; rd_bool_t check_pkey = rd_false; int r; @@ -972,31 +973,74 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, /* OpenSSL takes ownership of the store */ rk->rk_conf.ssl.ca->store = NULL; - } else if (rk->rk_conf.ssl.ca_location && - strcmp(rk->rk_conf.ssl.ca_location, "probe")) { - /* CA certificate location, either file or directory. */ - int is_dir = rd_kafka_path_is_dir(rk->rk_conf.ssl.ca_location); + ca_probe = rd_false; - rd_kafka_dbg(rk, SECURITY, "SSL", - "Loading CA certificate(s) from %s %s", - is_dir ? "directory" : "file", - rk->rk_conf.ssl.ca_location); + } else { - r = SSL_CTX_load_verify_locations(ctx, - !is_dir ? - rk->rk_conf.ssl. - ca_location : NULL, - is_dir ? - rk->rk_conf.ssl. - ca_location : NULL); + if (rk->rk_conf.ssl.ca_location && + strcmp(rk->rk_conf.ssl.ca_location, "probe")) { + /* CA certificate location, either file or directory. */ + int is_dir = rd_kafka_path_is_dir( + rk->rk_conf.ssl.ca_location); + + rd_kafka_dbg(rk, SECURITY, "SSL", + "Loading CA certificate(s) from %s %s", + is_dir ? "directory" : "file", + rk->rk_conf.ssl.ca_location); + + r = SSL_CTX_load_verify_locations(ctx, + !is_dir ? + rk->rk_conf.ssl. + ca_location : NULL, + is_dir ? + rk->rk_conf.ssl. + ca_location : NULL); - if (r != 1) { - rd_snprintf(errstr, errstr_size, - "ssl.ca.location failed: "); - return -1; + if (r != 1) { + rd_snprintf(errstr, errstr_size, + "ssl.ca.location failed: "); + return -1; + } + + ca_probe = rd_false; } - } else { + if (rk->rk_conf.ssl.ca_pem) { + /* CA as PEM string */ + X509 *x509; + X509_STORE *store; + + /* Get the OpenSSL trust store */ + store = SSL_CTX_get_cert_store(ctx); + rd_assert(store != NULL); + + rd_kafka_dbg(rk, SECURITY, "SSL", + "Loading CA certificate from string"); + + x509 = rd_kafka_ssl_X509_from_string( + rk, rk->rk_conf.ssl.ca_pem); + if (!x509) { + rd_snprintf(errstr, errstr_size, + "ssl.ca.pem failed: " + "not in PEM format?: "); + return -1; + } + + if (!X509_STORE_add_cert(store, x509)) { + rd_snprintf(errstr, errstr_size, + "failed to add ssl.ca.pem to " + "CA cert store: "); + X509_free(x509); + return -1; + } + + X509_free(x509); + + ca_probe = rd_false; + } + } + + if (ca_probe) { #ifdef _WIN32 /* Attempt to load CA root certificates from the * configured Windows certificate stores. */ diff --git a/tests/0097-ssl_verify.cpp b/tests/0097-ssl_verify.cpp index b25f6a97b4..9b77b4a98e 100644 --- a/tests/0097-ssl_verify.cpp +++ b/tests/0097-ssl_verify.cpp @@ -132,7 +132,7 @@ static void conf_location_to_pem (RdKafka::Conf *conf, std::string errstr; if (conf->set(loc_prop, "", errstr) != RdKafka::Conf::CONF_OK) - Test::Fail("Failed to reset " + loc_prop); + Test::Fail("Failed to reset " + loc_prop + ": " + errstr); /* Read file */ std::ifstream ifs(loc.c_str()); @@ -143,7 +143,7 @@ static void conf_location_to_pem (RdKafka::Conf *conf, " from disk and changed to in-memory " + pem_prop + "\n"); if (conf->set(pem_prop, pem, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail("Failed to set " + pem_prop); + Test::Fail("Failed to set " + pem_prop + ": " + errstr); } /** @@ -257,7 +257,9 @@ static void do_test_verify (const int line, bool verify_ok, conf_location_to_setter(conf, "ssl.certificate.location", RdKafka::CERT_PUBLIC_KEY, pub_enc); - if (load_ca == USE_SETTER) + if (load_ca == USE_CONF) + conf_location_to_pem(conf, "ssl.ca.location", "ssl.ca.pem"); + else if (load_ca == USE_SETTER) conf_location_to_setter(conf, "ssl.ca.location", RdKafka::CERT_CA, ca_enc); @@ -376,8 +378,8 @@ extern "C" { return 0; } - do_test_bad_calls(); + do_test_bad_calls(); do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM, @@ -393,6 +395,10 @@ extern "C" { USE_CONF, RdKafka::CERT_ENC_PEM, USE_CONF, RdKafka::CERT_ENC_PEM, USE_LOCATION, RdKafka::CERT_ENC_PEM); + do_test_verify(__LINE__, true, + USE_CONF, RdKafka::CERT_ENC_PEM, + USE_CONF, RdKafka::CERT_ENC_PEM, + USE_CONF, RdKafka::CERT_ENC_PEM); do_test_verify(__LINE__, true, USE_SETTER, RdKafka::CERT_ENC_PEM, USE_SETTER, RdKafka::CERT_ENC_PEM, @@ -408,4 +414,42 @@ extern "C" { return 0; } + + + int main_0097_ssl_verify_local (int argc, char **argv) { + if (!test_check_builtin("ssl")) { + Test::Skip("Test requires SSL support\n"); + return 0; + } + + + /* Check that creating a client with an invalid PEM string fails. */ + const std::string props[] = { "ssl.ca.pem", "ssl.key.pem", + "ssl.certificate.pem", "" }; + + for (int i = 0 ; props[i] != "" ; i++) { + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + + std::string errstr; + + if (conf->set("security.protocol", "SSL", errstr)) + Test::Fail(errstr); + conf->set("debug", "security", errstr); + if (conf->set(props[i], "this is \n not a \t PEM!", errstr)) + Test::Fail("Setting " + props[i] + " to junk should work, " + "expecting failure on client creation"); + + RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); + delete conf; + if (producer) + Test::Fail("Expected producer creation to fail with " + props[i] + + " set to junk"); + else + Test::Say("Failed to create producer with junk " + props[i] + + " (as expected): " + errstr + "\n"); + } + + return 0; + } + } diff --git a/tests/test.c b/tests/test.c index b7965637a4..0f51568b9d 100644 --- a/tests/test.c +++ b/tests/test.c @@ -208,6 +208,7 @@ _TEST_DECL(0093_holb_consumer); _TEST_DECL(0094_idempotence_msg_timeout); _TEST_DECL(0095_all_brokers_down); _TEST_DECL(0097_ssl_verify); +_TEST_DECL(0097_ssl_verify_local); _TEST_DECL(0098_consumer_txn); _TEST_DECL(0099_commit_metadata); _TEST_DECL(0100_thread_interceptors); @@ -409,6 +410,7 @@ struct test tests[] = { #endif _TEST(0095_all_brokers_down, TEST_F_LOCAL), _TEST(0097_ssl_verify, 0), + _TEST(0097_ssl_verify_local, TEST_F_LOCAL), _TEST(0098_consumer_txn, 0, TEST_BRKVER(0,11,0,0)), _TEST(0099_commit_metadata, 0), _TEST(0100_thread_interceptors, TEST_F_LOCAL), From 28f3163635c38b72b1da041f07fbee0501bae719 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 22 Sep 2021 11:08:21 +0200 Subject: [PATCH 07/56] Improve nuget release script - Verify artifact file contents and architectures. - Verify that artifact attributes match. - Get README, CONFIG,.. etc, from artifacts instead of local source tree (which may not match the released version). --- packaging/nuget/packaging.py | 61 ++++++++++++++++++++++++++++---- packaging/nuget/requirements.txt | 5 +-- 2 files changed, 58 insertions(+), 8 deletions(-) diff --git a/packaging/nuget/packaging.py b/packaging/nuget/packaging.py index b130ba77df..9a51392b36 100755 --- a/packaging/nuget/packaging.py +++ b/packaging/nuget/packaging.py @@ -17,6 +17,7 @@ from collections import defaultdict import boto3 from zfile import zfile +import magic if sys.version_info[0] < 3: from urllib import unquote @@ -31,6 +32,38 @@ 'i386': 'x86', 'win32': 'x86'}} +# Filemagic arch mapping. +# key is (plat, arch, file_extension), value is a compiled filemagic regex. +# This is used to verify that an artifact has the expected file type. +magic_patterns = { + ('win', 'x64', '.dll'): re.compile('PE32.*DLL.* x86-64, for MS Windows'), + ('win', 'x86', '.dll'): re.compile('PE32.*DLL.* Intel 80386, for MS Windows'), + ('win', 'x64', '.lib'): re.compile('current ar archive'), + ('win', 'x86', '.lib'): re.compile('current ar archive'), + ('linux', 'x64', '.so'): re.compile('ELF 64.* x86-64'), + ('linux', 'arm64', '.so'): re.compile('ELF 64.* ARM aarch64'), + ('osx', 'x64', '.dylib'): re.compile('Mach-O 64.* x86_64') } + +magic = magic.Magic() + +def magic_mismatch(path, a): + """ Verify that the filemagic for \p path matches for artifact \p a. + Returns True if the magic file info does NOT match. + Returns False if no matching is needed or the magic matches. """ + k = (a.info.get('plat', None), a.info.get('arch', None), + os.path.splitext(path)[1]) + pattern = magic_patterns.get(k, None) + if pattern is None: + return False + + minfo = magic.id_filename(path) + if not pattern.match(minfo): + print(f"Warning: {path} magic \"{minfo}\" does not match expected {pattern} for key {k}") + return True + + return False + + # Collects CI artifacts from S3 storage, downloading them # to a local directory, or collecting already downloaded artifacts from # local directory. @@ -315,8 +348,6 @@ def build (self, buildtype): destpath=os.path.join('build', 'native')) self.copy_template('librdkafka.redist.props', destpath='build') - for f in ['../../README.md', '../../CONFIGURATION.md', '../../LICENSES.txt']: - shutil.copy(f, self.stpath) # Generate template tokens for artifacts for a in self.arts.artifacts: @@ -334,6 +365,12 @@ def build (self, buildtype): [{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './include/librdkafka/rdkafkacpp.h', 'build/native/include/librdkafka/rdkafkacpp.h'], [{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './include/librdkafka/rdkafka_mock.h', 'build/native/include/librdkafka/rdkafka_mock.h'], + [{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './share/doc/librdkafka/README.md', 'README.md'], + [{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './share/doc/librdkafka/CONFIGURATION.md', 'CONFIGURATION.md'], + # The above x64-linux gcc job generates a bad LICENSES.txt file, + # so we use the one from the osx job instead. + [{'arch': 'x64', 'plat': 'osx', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './share/doc/librdkafka/LICENSES.txt', 'LICENSES.txt'], + # Travis OSX build [{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/librdkafka.dylib', 'runtimes/osx-x64/native/librdkafka.dylib'], # Travis Manylinux build @@ -396,9 +433,14 @@ def build (self, buildtype): found = False # Try all matching artifacts until we find the wanted file (member) for a in self.arts.artifacts: + attr_match = True for attr in attributes: if a.info.get(attr, None) != attributes[attr]: - continue + attr_match = False + break + + if not attr_match: + continue if not fnmatch(a.fname, fname_glob): continue @@ -414,6 +456,11 @@ def build (self, buildtype): except Exception as e: raise Exception('file not found in archive %s: %s. Files in archive are: %s' % (a.lpath, e, zfile.ZFile(a.lpath).getnames())) + # Check that the file type matches. + if magic_mismatch(outf, a): + os.unlink(outf) + continue + found = True break @@ -436,6 +483,8 @@ def verify (self, path): """ Verify package """ expect = [ "librdkafka.redist.nuspec", + "README.md", + "CONFIGURATION.md", "LICENSES.txt", "build/librdkafka.redist.props", "build/native/librdkafka.redist.targets", @@ -482,9 +531,9 @@ def verify (self, path): if len(missing) > 0: print('Missing files in package %s:\n%s' % (path, '\n'.join(missing))) return False - else: - print('OK - %d expected files found' % len(expect)) - return True + + print('OK - %d expected files found' % len(expect)) + return True class StaticPackage (Package): diff --git a/packaging/nuget/requirements.txt b/packaging/nuget/requirements.txt index c892afd11b..0fa2fd19ca 100644 --- a/packaging/nuget/requirements.txt +++ b/packaging/nuget/requirements.txt @@ -1,2 +1,3 @@ -boto3 -rpmfile +boto3==1.18.45 +rpmfile==1.0.8 +filemagic==1.6 From c7cebbe813fc077019e69002cd000b6c2ea37c5a Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 22 Sep 2021 14:51:50 +0200 Subject: [PATCH 08/56] Bump to version 1.8.2 (Skipping 1.8.1 due to dotnet release with that number) --- CHANGELOG.md | 14 +++++++++++++- src-cpp/rdkafkacpp.h | 2 +- src/rdkafka.h | 2 +- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 938775f7ee..16c27de8c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,21 @@ -# librdkafka v1.9.0 +# librdkafka v1.8.2 + +librdkafka v1.8.2 is a maintenance release. + +## Fixes + + * The `librdkafka.redist` 1.8.0 package had two flaws: + - the linux-arm64 .so build was a linux-x64 build. + - the included MSVC 140 runtimes for x64 were infact x86. + The release script has been updated to verify the architectures of + provided artifacts to avoid this happening in the future. ## Enhancements * Added `ssl.ca.pem` to add CA certificate by PEM string. (#2380) +*Note: there was no v1.8.1 librdkafka release* + # librdkafka v1.8.0 diff --git a/src-cpp/rdkafkacpp.h b/src-cpp/rdkafkacpp.h index 6eb1391dcb..daed1cbf5b 100644 --- a/src-cpp/rdkafkacpp.h +++ b/src-cpp/rdkafkacpp.h @@ -111,7 +111,7 @@ namespace RdKafka { * @remark This value should only be used during compile time, * for runtime checks of version use RdKafka::version() */ -#define RD_KAFKA_VERSION 0x010800ff +#define RD_KAFKA_VERSION 0x010802ff /** * @brief Returns the librdkafka version as integer. diff --git a/src/rdkafka.h b/src/rdkafka.h index f302628aa3..b85ba9099b 100644 --- a/src/rdkafka.h +++ b/src/rdkafka.h @@ -158,7 +158,7 @@ typedef SSIZE_T ssize_t; * @remark This value should only be used during compile time, * for runtime checks of version use rd_kafka_version() */ -#define RD_KAFKA_VERSION 0x010800ff +#define RD_KAFKA_VERSION 0x010802ff /** * @brief Returns the librdkafka version as integer. From e6742cd7cdf33bc8eaab5fe02242d8624be8b005 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 28 Sep 2021 14:04:16 +0200 Subject: [PATCH 09/56] mklove: fix static bundle .a generation on osx --- mklove/Makefile.base | 8 ++++++-- mklove/modules/configure.cc | 5 +++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/mklove/Makefile.base b/mklove/Makefile.base index 0f8259b286..2ce15d717a 100755 --- a/mklove/Makefile.base +++ b/mklove/Makefile.base @@ -134,7 +134,7 @@ $(LIBNAME)-static.a: $(LIBNAME).a @printf "$(MKL_YELLOW)Creating self-contained static library $@$(MKL_CLR_RESET)\n" ifeq ($(HAS_LIBTOOL_STATIC),y) $(LIBTOOL) -static -o $@ - $(LIBNAME).a $(MKL_STATIC_LIBS) -else # HAS_LIBTOOL_STATIC +else ifeq ($(HAS_GNU_AR),y) (_tmp=$$(mktemp arstaticXXXXXX) ; \ echo "CREATE $@" > $$_tmp ; \ for _f in $(LIBNAME).a $(MKL_STATIC_LIBS) ; do \ @@ -145,7 +145,11 @@ else # HAS_LIBTOOL_STATIC cat $$_tmp ; \ ar -M < $$_tmp || exit 1 ; \ rm $$_tmp) -endif # HAS_LIBTOOL_STATIC +else + for _f in $(LIBNAME).a $(MKL_STATIC_LIBS) ; do \ + ar -r $@ $$_f ; \ + done +endif cp $@ $(LIBNAME)-static-dbg.a # The self-contained static library is always stripped, regardless # of --enable-strip, since otherwise it would become too big. diff --git a/mklove/modules/configure.cc b/mklove/modules/configure.cc index 2d564616a3..cf39cd6d1a 100644 --- a/mklove/modules/configure.cc +++ b/mklove/modules/configure.cc @@ -158,6 +158,11 @@ function checks { mkl_mkvar_set staticlinking HAS_LIBTOOL_STATIC y fi fi + + # Check for GNU ar (which has the -M option) + mkl_meta_set "gnuar" "name" "GNU ar" + mkl_command_check "gnuar" "HAS_GNU_AR" disable \ + "ar -V 2>/dev/null | grep -q GNU" } From 7b54ade2e6eef0b270867f4f7ce339893240b3f8 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 28 Sep 2021 14:05:17 +0200 Subject: [PATCH 10/56] mklove: portable checksum checking for downloads --- mklove/modules/configure.base | 16 ++++++++++++++-- mklove/modules/configure.libzstd | 2 +- mklove/modules/configure.zlib | 2 +- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/mklove/modules/configure.base b/mklove/modules/configure.base index 0298acb97b..efb8796438 100644 --- a/mklove/modules/configure.base +++ b/mklove/modules/configure.base @@ -2358,11 +2358,11 @@ function mkl_toggle_option_lib { # # Arguments: # url Archive URL -# checksum_type The ${checksum_type}sum tool will be used to verify the checksum. E.g., "sha256". +# shabits The SHA algorithm bit count used to verify the checksum. E.g., "256". # checksum Expected checksum of archive (use "" to not perform check) function mkl_download_archive { local url="$1" - local checksum_tool="${2}sum" + local shabits="$2" local exp_checksum="$3" local tmpfile=$(mktemp _mkltmpXXXXXX) @@ -2375,6 +2375,18 @@ function mkl_download_archive { if [[ -n $exp_checksum ]]; then # Verify checksum + + local checksum_tool="" + + # OSX has shasum by default, on Linux it is typically in + # some Perl package that may or may not be installed. + if $(which shasum >/dev/null 2>&1); then + checksum_tool="shasum -b -a ${shabits}" + else + # shaXsum is available in Linux coreutils + checksum_tool="sha${shabits}sum" + fi + local checksum=$($checksum_tool "$tmpfile" | cut -d' ' -f1) if [[ $? -ne 0 ]]; then rm -f "$tmpfile" diff --git a/mklove/modules/configure.libzstd b/mklove/modules/configure.libzstd index a1c7b67304..8cb3a02baa 100644 --- a/mklove/modules/configure.libzstd +++ b/mklove/modules/configure.libzstd @@ -49,7 +49,7 @@ function install_source { if [[ ! -f Makefile ]]; then mkl_download_archive \ "https://github.com/facebook/zstd/releases/download/v${ver}/zstd-${ver}.tar.gz" \ - "sha256" \ + "256" \ $checksum || return 1 fi diff --git a/mklove/modules/configure.zlib b/mklove/modules/configure.zlib index 9f9f4c178f..ba770488c3 100644 --- a/mklove/modules/configure.zlib +++ b/mklove/modules/configure.zlib @@ -49,7 +49,7 @@ function install_source { if [[ ! -f Makefile ]]; then mkl_download_archive \ "https://zlib.net/zlib-${ver}.tar.gz" \ - "sha256" \ + "256" \ "$checksum" || return 1 fi From 9c97720a083dca2c2eaf56a275596dc559bde157 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 28 Sep 2021 14:05:56 +0200 Subject: [PATCH 11/56] mklove: allow --source-deps-only OpenSSL builds on OSX --- mklove/modules/configure.libssl | 72 ++++++++++++++++----------------- 1 file changed, 34 insertions(+), 38 deletions(-) diff --git a/mklove/modules/configure.libssl b/mklove/modules/configure.libssl index 458642f151..8a11e7b4c9 100644 --- a/mklove/modules/configure.libssl +++ b/mklove/modules/configure.libssl @@ -26,7 +26,7 @@ function manual_checks { *) mkl_err "mklove internal error: invalid value for ENABLE_SSL: $ENABLE_SSL"; exit 1 ;; esac - if [[ $MKL_DISTRO == "osx" ]]; then + if [[ $MKL_SOURCE_DEPS_ONLY != y && $MKL_DISTRO == "osx" ]]; then # Add brew's OpenSSL pkg-config path on OSX # to avoid picking up the outdated system-provided openssl/libcrypto. mkl_env_append PKG_CONFIG_PATH "/usr/local/opt/openssl/lib/pkgconfig" ":" @@ -71,45 +71,41 @@ function manual_checks { } -# No source installer on osx: rely on openssl from homebrew -if [[ $MKL_DISTRO != osx ]]; then - # Install libcrypto/libssl from source tarball on linux. # # Param 1: name (libcrypto) # Param 2: install-dir-prefix (e.g., DESTDIR) # Param 2: version (optional) - function libcrypto_install_source { - local name=$1 - local destdir=$2 - local ver=1.1.1l - local checksum="0b7a3e5e59c34827fe0c3a74b7ec8baef302b98fa80088d7f9153aa16fa76bd1" - local url=https://www.openssl.org/source/openssl-${ver}.tar.gz - - local conf_args="--openssldir=/usr/lib/ssl no-shared no-zlib no-deprecated" - if [[ $ver == 1.0.* ]]; then - extra_conf_args="${extra_conf_args} no-krb5" - fi - - echo "### Installing $name $ver from source ($url) to $destdir" - if [[ ! -f config ]]; then - echo "### Downloading" - mkl_download_archive "$url" "sha256" "$checksum" || return 1 - fi - - echo "### Configuring" - ./config --prefix="/usr" $conf_args || return $? - - echo "### Building" - make - - echo "### Installing to $destdir" - if [[ $ver == 1.0.* ]]; then - make INSTALL_PREFIX="$destdir" install_sw - else - make DESTDIR="$destdir" install - fi - - return $? - } -fi +function libcrypto_install_source { + local name=$1 + local destdir=$2 + local ver=1.1.1l + local checksum="0b7a3e5e59c34827fe0c3a74b7ec8baef302b98fa80088d7f9153aa16fa76bd1" + local url=https://www.openssl.org/source/openssl-${ver}.tar.gz + + local conf_args="--openssldir=/usr/lib/ssl no-shared no-zlib no-deprecated" + if [[ $ver == 1.0.* ]]; then + extra_conf_args="${extra_conf_args} no-krb5" + fi + + echo "### Installing $name $ver from source ($url) to $destdir" + if [[ ! -f config ]]; then + echo "### Downloading" + mkl_download_archive "$url" "256" "$checksum" || return 1 + fi + + echo "### Configuring" + ./config --prefix="/usr" $conf_args || return $? + + echo "### Building" + make + + echo "### Installing to $destdir" + if [[ $ver == 1.0.* ]]; then + make INSTALL_PREFIX="$destdir" install_sw + else + make DESTDIR="$destdir" install + fi + + return $? +} From 7bc7d6088fc6cd8c0e32b7fe5c8c9d9f613f1529 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 28 Sep 2021 11:57:03 +0200 Subject: [PATCH 12/56] Don't build ancient OSX Sierra artifacts --- .travis.yml | 7 ++----- CHANGELOG.md | 4 +++- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index db2226bf89..58bbb4eebc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -57,13 +57,10 @@ matrix: - name: "OSX clang: +static" os: osx - # Use an older image to disable syslog and for broader compatibility - # with old and new osx versions. - osx_image: xcode9.2 compiler: clang - env: LINKAGE=static HOMEBREW_NO_AUTO_UPDATE=1 + env: LINKAGE=static before_script: - - ./configure --install-deps --disable-lz4-ext --prefix="$PWD/dest" --enable-static --disable-syslog --enable-strip + - ./configure --install-deps --disable-lz4-ext --prefix="$PWD/dest" --enable-static --enable-strip - name: "Windows MinGW-w64 Dynamic" if: tag IS PRESENT diff --git a/CHANGELOG.md b/CHANGELOG.md index 16c27de8c9..9cf9192cae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,9 +6,11 @@ librdkafka v1.8.2 is a maintenance release. * The `librdkafka.redist` 1.8.0 package had two flaws: - the linux-arm64 .so build was a linux-x64 build. - - the included MSVC 140 runtimes for x64 were infact x86. + - the included Windows MSVC 140 runtimes for x64 were infact x86. The release script has been updated to verify the architectures of provided artifacts to avoid this happening in the future. + * Prebuilt binaries for Mac OSX Sierra (10.12) and older are no longer provided. + This affects [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go). ## Enhancements From c33cdc54fdcb101323bf89e9502769ef3ed32bea Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 28 Sep 2021 12:00:04 +0200 Subject: [PATCH 13/56] Travis: reduce build minutes (tagged jobs) --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 58bbb4eebc..16b2d14ade 100644 --- a/.travis.yml +++ b/.travis.yml @@ -56,6 +56,7 @@ matrix: - ./configure --install-deps --disable-lz4-ext --prefix="$PWD/dest" --enable-werror --enable-strip - name: "OSX clang: +static" + if: tag IS PRESENT os: osx compiler: clang env: LINKAGE=static @@ -107,6 +108,7 @@ matrix: - ./configure --disable-gssapi --install-deps --source-deps-only --enable-static --disable-lz4-ext --prefix="$PWD/dest" --enable-strip - name: "Linux GCC s390x: +devel" + if: tag IS PRESENT os: linux arch: s390x dist: bionic From 806f8f85a44826135f77df76a89fea5eb3274355 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 28 Sep 2021 11:57:40 +0200 Subject: [PATCH 14/56] Travis: use --source-deps-only for dependencies instead of using homebrew Homebrew is fantastically slow to update to Travis-CI, and it is burning build credits like crazy. --- .travis.yml | 12 ++++++------ CHANGELOG.md | 3 +++ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index 16b2d14ade..d7aadeda6d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,7 +32,7 @@ matrix: compiler: gcc env: ADDITIONAL_BUILDS="centos" SKIP_MAKE=y - - name: "Linux clang: +alpine +manylinux" + - name: "Linux clang: +alpine +manylinux +werror" os: linux compiler: clang env: ADDITIONAL_BUILDS="alpine manylinux2010_x86_64" LINKAGE=std @@ -47,21 +47,21 @@ matrix: before_script: - ./configure --enable-static --install-deps --source-deps-only --disable-gssapi --disable-lz4-ext --prefix="$PWD/dest" --enable-strip - - name: "OSX GCC" + - name: "OSX GCC: +werror" if: tag IS PRESENT os: osx compiler: gcc - env: LINKAGE=std + env: LINKAGE=std HOMEBREW_NO_AUTO_UPDATE=1 before_script: - - ./configure --install-deps --disable-lz4-ext --prefix="$PWD/dest" --enable-werror --enable-strip + - ./configure --install-deps --source-deps-only --disable-lz4-ext --prefix="$PWD/dest" --enable-werror --enable-strip - name: "OSX clang: +static" if: tag IS PRESENT os: osx compiler: clang - env: LINKAGE=static + env: LINKAGE=static HOMEBREW_NO_AUTO_UPDATE=1 before_script: - - ./configure --install-deps --disable-lz4-ext --prefix="$PWD/dest" --enable-static --enable-strip + - ./configure --install-deps --source-deps-only --disable-lz4-ext --prefix="$PWD/dest" --enable-static --enable-strip - name: "Windows MinGW-w64 Dynamic" if: tag IS PRESENT diff --git a/CHANGELOG.md b/CHANGELOG.md index 9cf9192cae..5137c774d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,9 @@ librdkafka v1.8.2 is a maintenance release. provided artifacts to avoid this happening in the future. * Prebuilt binaries for Mac OSX Sierra (10.12) and older are no longer provided. This affects [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go). + * Prebuilt binaries for Mac OSX now contain statically linked OpenSSL v1.1.1l. + Previously the OpenSSL version was either v1.1.1 or v1.0.2 depending on + build type. ## Enhancements From ec044fca05e8cac39412f8f55ec4d6a6fa283d3a Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 29 Sep 2021 10:22:59 +0200 Subject: [PATCH 15/56] mklove: added mklove_patch --- mklove/modules/configure.base | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/mklove/modules/configure.base b/mklove/modules/configure.base index efb8796438..b3d52b5d3b 100644 --- a/mklove/modules/configure.base +++ b/mklove/modules/configure.base @@ -607,6 +607,41 @@ function mkl_dep_install { } +# Apply patch to a source dependency. +# +# Param 1: config name (e.g. libssl) +# Param 2: patch number (optional, else all) +# +# Returns 0 on success or 1 on error. +function mkl_patch { + local name=$1 + local patchnr="$2" + + if [[ -z $patchnr ]]; then + patchnr="????" + fi + + local patchfile= + local cnt=0 + for patchfile in $(echo ${MKLOVE_DIR}/modules/patches/${name}.${patchnr}-*.patch | sort); do + mkl_dbg "$1: applying patch $patchfile" + patch -p1 < $patchfile + local retcode=$? + if [[ $retcode != 0 ]]; then + mkl_err "mkl_patch: $1: failed to apply patch $patchfile: see source dep build log for details" + return 1 + fi + cnt=$(($cnt + 1)) + done + + if [[ $cnt -lt 1 ]]; then + mkl_err "mkl_patch: $1: no patches matchign $patchnr found" + return 1 + fi + + return 0 +} + ########################################################################### # From f64035c2c0e4c2104d86d79f7988482dfaa83c35 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 29 Sep 2021 12:12:16 +0200 Subject: [PATCH 16/56] mklove: show more of failed build logs --- mklove/modules/configure.base | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mklove/modules/configure.base b/mklove/modules/configure.base index b3d52b5d3b..ab3f7ed46a 100644 --- a/mklove/modules/configure.base +++ b/mklove/modules/configure.base @@ -499,7 +499,9 @@ function mkl_dep_install_source { else mkl_dbg "Source install of $name failed" mkl_check_failed "$iname" "" disable "source installer failed (see $ilog)" - mkl_err "$name source build failed, see $ilog for details. Last 50 lines:" + mkl_err "$name source build failed, see $ilog for details. First 50 and last 50 lines:" + head -50 "$ilog" + echo " .... and last 50 lines ...." tail -50 "$ilog" fi From b76e649929eaf79c24578496db8185f299286b4d Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 28 Sep 2021 16:08:52 +0200 Subject: [PATCH 17/56] mklove openssl installer: workaround build issue in 1.1.1l on osx. --- mklove/modules/configure.libssl | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/mklove/modules/configure.libssl b/mklove/modules/configure.libssl index 8a11e7b4c9..2af69c1f25 100644 --- a/mklove/modules/configure.libssl +++ b/mklove/modules/configure.libssl @@ -83,9 +83,9 @@ function libcrypto_install_source { local checksum="0b7a3e5e59c34827fe0c3a74b7ec8baef302b98fa80088d7f9153aa16fa76bd1" local url=https://www.openssl.org/source/openssl-${ver}.tar.gz - local conf_args="--openssldir=/usr/lib/ssl no-shared no-zlib no-deprecated" + local conf_args="--prefix=/usr --openssldir=/usr/lib/ssl no-shared no-zlib no-deprecated" if [[ $ver == 1.0.* ]]; then - extra_conf_args="${extra_conf_args} no-krb5" + conf_args="${conf_args} no-krb5" fi echo "### Installing $name $ver from source ($url) to $destdir" @@ -94,8 +94,13 @@ function libcrypto_install_source { mkl_download_archive "$url" "256" "$checksum" || return 1 fi - echo "### Configuring" - ./config --prefix="/usr" $conf_args || return $? + if [[ $MKL_DISTRO == "osx" ]]; then + # Silence a load of warnings on OSX + conf_args="${conf_args} -Wno-nullability-completeness" + fi + + echo "### Configuring with args $conf_args" + ./config $conf_args || return $? echo "### Building" make From 95bc0fb2b3cc88e6fa608ac43125f0531b3079cf Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 29 Sep 2021 10:25:59 +0200 Subject: [PATCH 18/56] Apply OpenSSL PR 16409 patch to fix 1.1.1l build issues on OSX --- mklove/modules/configure.libssl | 7 +++ mklove/modules/patches/README.md | 8 +++ ...osx-rand-include-fix-OpenSSL-PR16409.patch | 56 +++++++++++++++++++ 3 files changed, 71 insertions(+) create mode 100644 mklove/modules/patches/README.md create mode 100644 mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch diff --git a/mklove/modules/configure.libssl b/mklove/modules/configure.libssl index 2af69c1f25..d8c24c4efd 100644 --- a/mklove/modules/configure.libssl +++ b/mklove/modules/configure.libssl @@ -95,6 +95,13 @@ function libcrypto_install_source { fi if [[ $MKL_DISTRO == "osx" ]]; then + # Workaround build issue in 1.1.1l on OSX with older toolchains. + if [[ $ver == 1.1.1l ]]; then + if ! mkl_patch libssl 0000 ; then + return 1 + fi + fi + # Silence a load of warnings on OSX conf_args="${conf_args} -Wno-nullability-completeness" fi diff --git a/mklove/modules/patches/README.md b/mklove/modules/patches/README.md new file mode 100644 index 0000000000..1208dc86df --- /dev/null +++ b/mklove/modules/patches/README.md @@ -0,0 +1,8 @@ +This directory contains patches to dependencies used by the source installers in configure.* + + +Patch filename format is: +.NNNN-description_of_patch.patch + +Where module is the configure. name, NNNN is the patch apply order, e.g. 0000. + diff --git a/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch b/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch new file mode 100644 index 0000000000..b0e37e3256 --- /dev/null +++ b/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch @@ -0,0 +1,56 @@ +From cef404f1e7a598166cbc2fd2e0048f7e2d752ad5 Mon Sep 17 00:00:00 2001 +From: David Carlier +Date: Tue, 24 Aug 2021 22:40:14 +0100 +Subject: [PATCH] Darwin platform allows to build on releases before + Yosemite/ios 8. + +issue #16407 #16408 +--- + crypto/rand/rand_unix.c | 5 +---- + include/crypto/rand.h | 10 ++++++++++ + 2 files changed, 11 insertions(+), 4 deletions(-) + +diff --git a/crypto/rand/rand_unix.c b/crypto/rand/rand_unix.c +index 43f1069d151d..0f4525106af7 100644 +--- a/crypto/rand/rand_unix.c ++++ b/crypto/rand/rand_unix.c +@@ -34,9 +34,6 @@ + #if defined(__OpenBSD__) + # include + #endif +-#if defined(__APPLE__) +-# include +-#endif + + #if defined(OPENSSL_SYS_UNIX) || defined(__DJGPP__) + # include +@@ -381,7 +378,7 @@ static ssize_t syscall_random(void *buf, size_t buflen) + if (errno != ENOSYS) + return -1; + } +-# elif defined(__APPLE__) ++# elif defined(OPENSSL_APPLE_CRYPTO_RANDOM) + if (CCRandomGenerateBytes(buf, buflen) == kCCSuccess) + return (ssize_t)buflen; + +diff --git a/include/crypto/rand.h b/include/crypto/rand.h +index 5350d3a93119..674f840fd13c 100644 +--- a/include/crypto/rand.h ++++ b/include/crypto/rand.h +@@ -20,6 +20,16 @@ + + # include + ++# if defined(__APPLE__) && !defined(OPENSSL_NO_APPLE_CRYPTO_RANDOM) ++# include ++# if (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101000) || \ ++ (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) ++# define OPENSSL_APPLE_CRYPTO_RANDOM 1 ++# include ++# include ++# endif ++# endif ++ + /* forward declaration */ + typedef struct rand_pool_st RAND_POOL; + From 2fd81e149d0d6fd1eadf58d0ab3756c5487d264b Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 29 Sep 2021 14:39:34 +0200 Subject: [PATCH 19/56] Travis: Remove -Werror from OSX worker since OpenSSL builds have quite a few warnings --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index d7aadeda6d..d41b7ae015 100644 --- a/.travis.yml +++ b/.travis.yml @@ -47,13 +47,13 @@ matrix: before_script: - ./configure --enable-static --install-deps --source-deps-only --disable-gssapi --disable-lz4-ext --prefix="$PWD/dest" --enable-strip - - name: "OSX GCC: +werror" + - name: "OSX GCC" if: tag IS PRESENT os: osx compiler: gcc env: LINKAGE=std HOMEBREW_NO_AUTO_UPDATE=1 before_script: - - ./configure --install-deps --source-deps-only --disable-lz4-ext --prefix="$PWD/dest" --enable-werror --enable-strip + - ./configure --install-deps --source-deps-only --disable-lz4-ext --prefix="$PWD/dest" --enable-strip - name: "OSX clang: +static" if: tag IS PRESENT From ecf3d00974a9dda17787de5149282011abed23f7 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Fri, 1 Oct 2021 11:16:02 +0200 Subject: [PATCH 20/56] mklove: try both wget and curl for archive downloads --- mklove/modules/configure.base | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mklove/modules/configure.base b/mklove/modules/configure.base index ab3f7ed46a..e963139400 100644 --- a/mklove/modules/configure.base +++ b/mklove/modules/configure.base @@ -2404,10 +2404,13 @@ function mkl_download_archive { local tmpfile=$(mktemp _mkltmpXXXXXX) - if ! curl -fLs -o "$tmpfile" "$url" ; then - rm -f "$tmpfile" - echo -e "ERROR: Download of $url failed" 1>&2 - return 1 + # Try both wget and curl + if ! wget -nv -O "$tmpfile" "$url" ; then + if ! curl -fLsS -o "$tmpfile" "$url" ; then + rm -f "$tmpfile" + echo -e "ERROR: Download of $url failed" 1>&2 + return 1 + fi fi if [[ -n $exp_checksum ]]; then From 6d5fbf9131693288f0f198692fae5aa169b61912 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Fri, 1 Oct 2021 11:38:46 +0200 Subject: [PATCH 21/56] Don't overwrite ssl.ca.location on OSX (#3566) --- CHANGELOG.md | 3 +++ src/rdkafka_conf.c | 3 ++- tests/0004-conf.c | 20 ++++++++++++++++++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5137c774d2..c6f9a7912d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,9 @@ librdkafka v1.8.2 is a maintenance release. * Prebuilt binaries for Mac OSX now contain statically linked OpenSSL v1.1.1l. Previously the OpenSSL version was either v1.1.1 or v1.0.2 depending on build type. + * It was not possible to configure `ssl.ca.location` on OSX, the property + automatically would revert back to `probe` (default value). + This regression was introduced in v1.8.0. (#3566) ## Enhancements diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c index a8aa5af54c..ed1787fbd8 100644 --- a/src/rdkafka_conf.c +++ b/src/rdkafka_conf.c @@ -3712,7 +3712,8 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, return "`ssl.ca.location` or `ssl.ca.pem`, and memory-based " "set_ssl_cert(CERT_CA) are mutually exclusive."; #ifdef __APPLE__ - else /* Default ssl.ca.location to 'probe' on OSX */ + else if (!conf->ssl.ca && !conf->ssl.ca_location && !conf->ssl.ca_pem) + /* Default ssl.ca.location to 'probe' on OSX */ rd_kafka_conf_set(conf, "ssl.ca.location", "probe", NULL, 0); #endif #endif diff --git a/tests/0004-conf.c b/tests/0004-conf.c index ac6770c98d..4cd7ed4dda 100644 --- a/tests/0004-conf.c +++ b/tests/0004-conf.c @@ -607,6 +607,26 @@ int main_0004_conf (int argc, char **argv) { rd_kafka_conf_destroy(conf); } +#if WITH_SSL + { + TEST_SAY("Verifying that ssl.ca.location is not " + "overwritten (#3566)\n"); + + conf = rd_kafka_conf_new(); + + test_conf_set(conf, "security.protocol", "SSL"); + test_conf_set(conf, "ssl.ca.location", "/?/does/!/not/exist!"); + + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, + errstr, sizeof(errstr)); + TEST_ASSERT(!rk, + "Expected rd_kafka_new() to fail with " + "invalid ssl.ca.location"); + TEST_SAY("rd_kafka_new() failed as expected: %s\n", + errstr); + } +#endif + /* Canonical int values, aliases, s2i-verified strings, doubles */ { static const struct { From 3180dbd5a17da33653c99cd8c37aefc5cc3ee151 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 5 Oct 2021 09:41:03 +0200 Subject: [PATCH 22/56] Travis: bump Linux base builder from trusty to xenial to circumvent ISRG cert expiry .. which causes older versions of OpenSSL+curl to fail to download OpenSSL.. --- .travis.yml | 2 +- CHANGELOG.md | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index d41b7ae015..f5a8d99791 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,5 @@ language: c -dist: trusty +dist: xenial cache: ccache addons: diff --git a/CHANGELOG.md b/CHANGELOG.md index c6f9a7912d..e0dda3260e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,8 +14,11 @@ librdkafka v1.8.2 is a maintenance release. * Prebuilt binaries for Mac OSX now contain statically linked OpenSSL v1.1.1l. Previously the OpenSSL version was either v1.1.1 or v1.0.2 depending on build type. + * Some of the prebuilt binaries for Linux were built on Ubuntu 14.04, + these builds are now performed on Ubuntu 16.04 instead. + This may affect users on ancient Linux distributions. * It was not possible to configure `ssl.ca.location` on OSX, the property - automatically would revert back to `probe` (default value). + would automatically revert back to `probe` (default value). This regression was introduced in v1.8.0. (#3566) ## Enhancements From df16ea5530e114ef1bd0442e6955eed0c62f52b2 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 6 Oct 2021 11:15:26 +0200 Subject: [PATCH 23/56] AddOffsetsToTxn Refresh errors did not trigger coord refresh (#3571) --- CHANGELOG.md | 18 ++++++---- src/rdkafka_txnmgr.c | 61 +++++++++++++++++-------------- tests/0105-transactions_mock.c | 65 +++++++++++++++++++++++++++++++++- 3 files changed, 111 insertions(+), 33 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e0dda3260e..26b86533e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,13 @@ librdkafka v1.8.2 is a maintenance release. +## Enhancements + + * Added `ssl.ca.pem` to add CA certificate by PEM string. (#2380) + * Prebuilt binaries for Mac OSX now contain statically linked OpenSSL v1.1.1l. + Previously the OpenSSL version was either v1.1.1 or v1.0.2 depending on + build type. + ## Fixes * The `librdkafka.redist` 1.8.0 package had two flaws: @@ -11,19 +18,18 @@ librdkafka v1.8.2 is a maintenance release. provided artifacts to avoid this happening in the future. * Prebuilt binaries for Mac OSX Sierra (10.12) and older are no longer provided. This affects [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go). - * Prebuilt binaries for Mac OSX now contain statically linked OpenSSL v1.1.1l. - Previously the OpenSSL version was either v1.1.1 or v1.0.2 depending on - build type. * Some of the prebuilt binaries for Linux were built on Ubuntu 14.04, these builds are now performed on Ubuntu 16.04 instead. This may affect users on ancient Linux distributions. * It was not possible to configure `ssl.ca.location` on OSX, the property would automatically revert back to `probe` (default value). This regression was introduced in v1.8.0. (#3566) + * The transactional producer could stall during a transaction if the transaction + coordinator changed while adding offsets to the transaction (send_offsets_to_transaction()). + This stall lasted until the coordinator connection went down, the + transaction timed out, transaction was aborted, or messages were produced + to a new partition, whichever came first. #3571. -## Enhancements - - * Added `ssl.ca.pem` to add CA certificate by PEM string. (#2380) *Note: there was no v1.8.1 librdkafka release* diff --git a/src/rdkafka_txnmgr.c b/src/rdkafka_txnmgr.c index 903c11041d..f6a0fb18bd 100644 --- a/src/rdkafka_txnmgr.c +++ b/src/rdkafka_txnmgr.c @@ -45,7 +45,7 @@ static void rd_kafka_txn_curr_api_reply_error (rd_kafka_q_t *rkq, rd_kafka_error_t *error); -static void rd_kafka_txn_coord_timer_restart (rd_kafka_t *rk, int timeout_ms); +static void rd_kafka_txn_coord_timer_start (rd_kafka_t *rk, int timeout_ms); /** @@ -1883,9 +1883,10 @@ static void rd_kafka_txn_handle_AddOffsetsToTxn (rd_kafka_t *rk, err = rd_kafka_txn_normalize_err(err); rd_kafka_dbg(rk, EOS, "ADDOFFSETS", - "AddOffsetsToTxn response from %s: %s (actions 0x%x)", + "AddOffsetsToTxn response from %s: %s (%s)", rkb ? rd_kafka_broker_name(rkb) : "(none)", - rd_kafka_err2name(err), actions); + rd_kafka_err2name(err), + rd_kafka_actions2str(actions)); /* All unhandled errors are considered permanent */ if (err && !actions) @@ -1896,22 +1897,28 @@ static void rd_kafka_txn_handle_AddOffsetsToTxn (rd_kafka_t *rk, "Failed to add offsets to " "transaction: %s", rd_kafka_err2str(err)); + } else { + if (actions & RD_KAFKA_ERR_ACTION_REFRESH) + rd_kafka_txn_coord_timer_start(rk, 50); + + if (actions & RD_KAFKA_ERR_ACTION_RETRY) { + rd_rkb_dbg(rkb, EOS, "ADDOFFSETS", + "Failed to add offsets to transaction on " + "broker %s: %s (after %dms): " + "error is retriable", + rd_kafka_broker_name(rkb), + rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent/1000)); + + if (!rd_timeout_expired(remains_ms) && + rd_kafka_buf_retry(rk->rk_eos.txn_coord, request)) { + rk->rk_eos.txn_req_cnt++; + return; + } - } else if (actions & RD_KAFKA_ERR_ACTION_RETRY) { - rd_rkb_dbg(rkb, EOS, "ADDOFFSETS", - "Failed to add offsets to transaction on broker %s: " - "%s (after %dms): error is retriable", - rd_kafka_broker_name(rkb), - rd_kafka_err2str(err), - (int)(request->rkbuf_ts_sent/1000)); - - if (!rd_timeout_expired(remains_ms) && - rd_kafka_buf_retry(rk->rk_eos.txn_coord, request)) { - rk->rk_eos.txn_req_cnt++; - return; + /* Propagate as retriable error through + * api_reply() below */ } - /* Propagate as retriable error through api_reply() below */ - } if (err) @@ -2287,7 +2294,7 @@ static void rd_kafka_txn_handle_EndTxn (rd_kafka_t *rk, rd_kafka_err2str(err)); } else { if (actions & RD_KAFKA_ERR_ACTION_REFRESH) - rd_kafka_txn_coord_timer_restart(rk, 500); + rd_kafka_txn_coord_timer_start(rk, 50); if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) rd_kafka_txn_set_abortable_error(rk, err, @@ -2915,15 +2922,17 @@ static void rd_kafka_txn_coord_timer_cb (rd_kafka_timers_t *rkts, void *arg) { } /** - * @brief (Re-)Start coord query timer + * @brief Start coord query timer if not already started. * * @locality rdkafka main thread * @locks none */ -static void rd_kafka_txn_coord_timer_restart (rd_kafka_t *rk, int timeout_ms) { +static void rd_kafka_txn_coord_timer_start (rd_kafka_t *rk, int timeout_ms) { rd_assert(rd_kafka_is_transactional(rk)); rd_kafka_timer_start_oneshot(&rk->rk_timers, - &rk->rk_eos.txn_coord_tmr, rd_true, + &rk->rk_eos.txn_coord_tmr, + /* don't restart if already started */ + rd_false, 1000 * timeout_ms, rd_kafka_txn_coord_timer_cb, rk); } @@ -3079,7 +3088,7 @@ rd_bool_t rd_kafka_txn_coord_query (rd_kafka_t *rk, const char *reason) { if (rd_kafka_idemp_check_error(rk, err, errstr, rd_false)) return rd_true; - rd_kafka_txn_coord_timer_restart(rk, 500); + rd_kafka_txn_coord_timer_start(rk, 500); return rd_false; } @@ -3106,7 +3115,7 @@ rd_bool_t rd_kafka_txn_coord_query (rd_kafka_t *rk, const char *reason) { if (rd_kafka_idemp_check_error(rk, err, errstr, rd_false)) return rd_true; /* Fatal error */ - rd_kafka_txn_coord_timer_restart(rk, 500); + rd_kafka_txn_coord_timer_start(rk, 500); return rd_false; } @@ -3140,7 +3149,7 @@ rd_bool_t rd_kafka_txn_coord_set (rd_kafka_t *rk, rd_kafka_broker_t *rkb, if (!rkb) { rd_kafka_dbg(rk, EOS, "TXNCOORD", "%s", buf); /* Keep querying for the coordinator */ - rd_kafka_txn_coord_timer_restart(rk, 500); + rd_kafka_txn_coord_timer_start(rk, 500); } return rd_false; } @@ -3165,7 +3174,7 @@ rd_bool_t rd_kafka_txn_coord_set (rd_kafka_t *rk, rd_kafka_broker_t *rkb, if (!rkb) { /* Lost the current coordinator, query for new coordinator */ - rd_kafka_txn_coord_timer_restart(rk, 500); + rd_kafka_txn_coord_timer_start(rk, 500); } else { /* Trigger PID state machine */ rd_kafka_idemp_pid_fsm(rk); @@ -3197,7 +3206,7 @@ void rd_kafka_txn_coord_monitor_cb (rd_kafka_broker_t *rkb) { /* Coordinator is down, the connection will be re-established * automatically, but we also trigger a coordinator query * to pick up on coordinator change. */ - rd_kafka_txn_coord_timer_restart(rk, 500); + rd_kafka_txn_coord_timer_start(rk, 500); } else { /* Coordinator is up. */ diff --git a/tests/0105-transactions_mock.c b/tests/0105-transactions_mock.c index c92fe54dca..6ed6507262 100644 --- a/tests/0105-transactions_mock.c +++ b/tests/0105-transactions_mock.c @@ -1406,7 +1406,6 @@ static void set_next_coord (rd_kafka_mock_cluster_t *mcluster, /** * @brief Switch coordinator during a transaction. * - * @remark Currently fails due to insufficient coord switch handling. */ static void do_test_txn_switch_coordinator (void) { rd_kafka_t *rk; @@ -1475,6 +1474,68 @@ static void do_test_txn_switch_coordinator (void) { } +/** + * @brief Switch coordinator during a transaction when AddOffsetsToTxn + * are sent. #3571. + */ +static void do_test_txn_switch_coordinator_refresh (void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + const char *topic = "test"; + const char *transactional_id = "txnid"; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + + SUB_TEST("Test switching coordinators (refresh)"); + + rk = create_txn_producer(&mcluster, transactional_id, 3, NULL); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + 1); + + /* Start transactioning */ + TEST_SAY("Starting transaction\n"); + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + /* Switch the coordinator so that AddOffsetsToTxnRequest + * will respond with NOT_COORDINATOR. */ + TEST_SAY("Switching to coordinator 2\n"); + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + 2); + + /* + * Send some arbitrary offsets. + */ + offsets = rd_kafka_topic_partition_list_new(4); + rd_kafka_topic_partition_list_add(offsets, "srctopic", + 3)->offset = 12; + rd_kafka_topic_partition_list_add(offsets, "srctop2", + 99)->offset = 99999; + + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction( + rk, offsets, + cgmetadata, 20*1000)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + + /* Produce some messages */ + test_produce_msgs2(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, 10, NULL, 0); + + /* And commit the transaction */ + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + /** * @brief Test fatal error handling when transactions are not supported * by the broker. @@ -2623,5 +2684,7 @@ int main_0105_transactions_mock (int argc, char **argv) { do_test_txn_switch_coordinator(); + do_test_txn_switch_coordinator_refresh(); + return 0; } From 09916a752e76359a289d4fe887ea9013a1d817f1 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 6 Oct 2021 13:43:26 +0200 Subject: [PATCH 24/56] Ensure timers are started even if timeout is 0 --- CHANGELOG.md | 4 ++++ src/rdkafka_timer.c | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 26b86533e3..59a7fc6a71 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,10 @@ librdkafka v1.8.2 is a maintenance release. This stall lasted until the coordinator connection went down, the transaction timed out, transaction was aborted, or messages were produced to a new partition, whichever came first. #3571. + * librdkafka's internal timers would not start if the timeout was set to 0, + which would result in some timeout operations not being enforced correctly, + e.g., the transactional producer API timeouts. + These timers are now started with a timeout of 1 microsecond. *Note: there was no v1.8.1 librdkafka release* diff --git a/src/rdkafka_timer.c b/src/rdkafka_timer.c index 58610d92f1..ed88a1ba5e 100644 --- a/src/rdkafka_timer.c +++ b/src/rdkafka_timer.c @@ -180,7 +180,10 @@ void rd_kafka_timer_start0 (rd_kafka_timers_t *rkts, rd_kafka_timer_stop(rkts, rtmr, 0/*!lock*/); - rtmr->rtmr_interval = interval; + /* Make sure the timer interval is non-zero or the timer + * won't be scheduled, which is not what the caller of .._start*() + * would expect. */ + rtmr->rtmr_interval = interval == 0 ? 1 : interval; rtmr->rtmr_callback = callback; rtmr->rtmr_arg = arg; rtmr->rtmr_oneshot = oneshot; From 2d78e928d8c0d798f341b1843c97eb6dcdecefc3 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Fri, 8 Oct 2021 14:55:11 +0200 Subject: [PATCH 25/56] Transactional producer: Fix possible message loss on OUT_OF_ORDER_SEQ error (#3575) --- CHANGELOG.md | 23 +++++- src/rdkafka_broker.c | 1 + src/rdkafka_idempotence.c | 8 +- src/rdkafka_idempotence.h | 5 +- src/rdkafka_request.c | 8 +- tests/0105-transactions_mock.c | 137 +++++++++++++++++++++++++++++++++ 6 files changed, 171 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 59a7fc6a71..0387cd0e5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,15 +24,30 @@ librdkafka v1.8.2 is a maintenance release. * It was not possible to configure `ssl.ca.location` on OSX, the property would automatically revert back to `probe` (default value). This regression was introduced in v1.8.0. (#3566) + * librdkafka's internal timers would not start if the timeout was set to 0, + which would result in some timeout operations not being enforced correctly, + e.g., the transactional producer API timeouts. + These timers are now started with a timeout of 1 microsecond. + +### Transactional producer fixes + + * Upon quick repeated leader changes the transactional producer could receive + an `OUT_OF_ORDER_SEQUENCE` error from the broker, which triggered an + Epoch bump on the producer resulting in an InitProducerIdRequest being sent + to the transaction coordinator in the middle of a transaction. + This request would start a new transaction on the coordinator, but the + producer would still think (erroneously) it was in current transaction. + Any messages produced in the current transaction prior to this event would + be silently lost when the application committed the transaction, leading + to message loss. + This has been fixed by setting the Abortable transaction error state + in the producer. #3575. * The transactional producer could stall during a transaction if the transaction coordinator changed while adding offsets to the transaction (send_offsets_to_transaction()). This stall lasted until the coordinator connection went down, the transaction timed out, transaction was aborted, or messages were produced to a new partition, whichever came first. #3571. - * librdkafka's internal timers would not start if the timeout was set to 0, - which would result in some timeout operations not being enforced correctly, - e.g., the transactional producer API timeouts. - These timers are now started with a timeout of 1 microsecond. + *Note: there was no v1.8.1 librdkafka release* diff --git a/src/rdkafka_broker.c b/src/rdkafka_broker.c index 2b72a5e493..588f1a5583 100644 --- a/src/rdkafka_broker.c +++ b/src/rdkafka_broker.c @@ -3759,6 +3759,7 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, rd_kafka_idemp_drain_epoch_bump( rkb->rkb_rk, + RD_KAFKA_RESP_ERR__TIMED_OUT, "%d message(s) timed out " "on %s [%"PRId32"]", timeoutcnt, diff --git a/src/rdkafka_idempotence.c b/src/rdkafka_idempotence.c index f3cf26641a..a2e9dad151 100644 --- a/src/rdkafka_idempotence.c +++ b/src/rdkafka_idempotence.c @@ -613,7 +613,8 @@ void rd_kafka_idemp_drain_reset (rd_kafka_t *rk, const char *reason) { * @locality any * @locks none */ -void rd_kafka_idemp_drain_epoch_bump (rd_kafka_t *rk, const char *fmt, ...) { +void rd_kafka_idemp_drain_epoch_bump (rd_kafka_t *rk, rd_kafka_resp_err_t err, + const char *fmt, ...) { va_list ap; char buf[256]; @@ -630,6 +631,11 @@ void rd_kafka_idemp_drain_epoch_bump (rd_kafka_t *rk, const char *fmt, ...) { rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_DRAIN_BUMP); rd_kafka_wrunlock(rk); + /* Transactions: bumping the epoch requires the current transaction + * to be aborted. */ + if (rd_kafka_is_transactional(rk)) + rd_kafka_txn_set_abortable_error_with_bump(rk, err, "%s", buf); + /* Check right away if the drain could be done. */ rd_kafka_idemp_check_drain_done(rk); } diff --git a/src/rdkafka_idempotence.h b/src/rdkafka_idempotence.h index a7685c45ff..8be8ae75dd 100644 --- a/src/rdkafka_idempotence.h +++ b/src/rdkafka_idempotence.h @@ -74,8 +74,9 @@ void rd_kafka_idemp_pid_update (rd_kafka_broker_t *rkb, const rd_kafka_pid_t pid); void rd_kafka_idemp_pid_fsm (rd_kafka_t *rk); void rd_kafka_idemp_drain_reset (rd_kafka_t *rk, const char *reason); -void rd_kafka_idemp_drain_epoch_bump (rd_kafka_t *rk, const char *fmt, ...) - RD_FORMAT(printf, 2, 3); +void rd_kafka_idemp_drain_epoch_bump (rd_kafka_t *rk, rd_kafka_resp_err_t err, + const char *fmt, ...) + RD_FORMAT(printf, 3, 4); void rd_kafka_idemp_drain_toppar (rd_kafka_toppar_t *rktp, const char *reason); void rd_kafka_idemp_inflight_toppar_sub (rd_kafka_t *rk, rd_kafka_toppar_t *rktp); diff --git a/src/rdkafka_request.c b/src/rdkafka_request.c index 3d8f921ad5..e32952a5f3 100644 --- a/src/rdkafka_request.c +++ b/src/rdkafka_request.c @@ -2660,7 +2660,7 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, perr->update_next_err = rd_true; rd_kafka_idemp_drain_epoch_bump( - rk, "skipped sequence numbers"); + rk, perr->err, "skipped sequence numbers"); } else { /* Request's sequence is less than next ack, @@ -2763,7 +2763,7 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, firstmsg->rkm_u.producer.retries); /* Drain outstanding requests and bump epoch. */ - rd_kafka_idemp_drain_epoch_bump(rk, + rd_kafka_idemp_drain_epoch_bump(rk, perr->err, "unknown producer id"); rd_kafka_txn_set_abortable_error_with_bump( @@ -2800,7 +2800,7 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, firstmsg->rkm_u.producer.retries); /* Drain outstanding requests and bump epoch. */ - rd_kafka_idemp_drain_epoch_bump(rk, + rd_kafka_idemp_drain_epoch_bump(rk, perr->err, "unknown producer id"); perr->incr_retry = 0; @@ -3169,7 +3169,7 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, /* Drain outstanding requests and bump the epoch .*/ rd_kafka_idemp_drain_epoch_bump( - rk, "message sequence gap"); + rk, perr->err, "message sequence gap"); } perr->update_next_ack = rd_false; diff --git a/tests/0105-transactions_mock.c b/tests/0105-transactions_mock.c index 6ed6507262..15f91dc55a 100644 --- a/tests/0105-transactions_mock.c +++ b/tests/0105-transactions_mock.c @@ -2618,6 +2618,141 @@ static void do_test_commit_after_msg_timeout (void) { SUB_TEST_PASS(); } + +/** + * @brief #3575: Verify that OUT_OF_ORDER_SEQ does not trigger an epoch bump + * during an ongoing transaction. + * The transaction should instead enter the abortable state. + */ +static void do_test_out_of_order_seq (void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + int32_t txn_coord = 1, leader = 2; + const char *txnid = "myTxnId"; + test_timing_t timing; + rd_kafka_resp_err_t err; + + SUB_TEST_QUICK(); + + rk = create_txn_producer(&mcluster, txnid, 3, + "batch.num.messages", "1", + NULL); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, + txn_coord); + + rd_kafka_mock_partition_set_leader(mcluster, "mytopic", 0, leader); + + test_curr->ignore_dr_err = rd_true; + test_curr->is_fatal_cb = NULL; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + + /* + * Start a transaction + */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + + + /* Produce one seeding message first to get the leader up and running */ + TEST_CALL_ERR__(rd_kafka_producev(rk, + RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END)); + test_flush(rk, -1); + + /* Let partition leader have a latency of 2 seconds + * so that we can have multiple messages in-flight. */ + rd_kafka_mock_broker_set_rtt(mcluster, leader, 2*1000); + + /* Produce a message, let it fail with with different errors, + * ending with OUT_OF_ORDER which previously triggered an + * Epoch bump. */ + rd_kafka_mock_push_request_errors( + mcluster, + RD_KAFKAP_Produce, + 3, + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, + RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER); + + /* Produce three messages that will be delayed + * and have errors injected.*/ + TEST_CALL_ERR__(rd_kafka_producev(rk, + RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev(rk, + RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev(rk, + RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END)); + + /* Now sleep a short while so that the messages are processed + * by the broker and errors are returned. */ + TEST_SAY("Sleeping..\n"); + rd_sleep(5); + + rd_kafka_mock_broker_set_rtt(mcluster, leader, 0); + + /* Produce a fifth message, should fail with ERR__STATE since + * the transaction should have entered the abortable state. */ + err = rd_kafka_producev(rk, + RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__STATE, + "Expected produce() to fail with ERR__STATE, not %s", + rd_kafka_err2name(err)); + TEST_SAY("produce() failed as expected: %s\n", + rd_kafka_err2str(err)); + + /* Commit the transaction, should fail with abortable error. */ + TIMING_START(&timing, "commit_transaction(-1)"); + error = rd_kafka_commit_transaction(rk, -1); + TIMING_STOP(&timing); + TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail"); + + TEST_SAY("commit_transaction() failed (expectedly): %s\n", + rd_kafka_error_string(error)); + + TEST_ASSERT(!rd_kafka_error_is_fatal(error), + "Did not expect fatal error"); + TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), + "Expected abortable error"); + rd_kafka_error_destroy(error); + + /* Abort the transaction */ + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + + /* Run a new transaction without errors to verify that the + * producer can recover. */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + TEST_CALL_ERR__(rd_kafka_producev(rk, + RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END)); + + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + int main_0105_transactions_mock (int argc, char **argv) { if (test_needs_auth()) { TEST_SKIP("Mock cluster does not support SSL/SASL\n"); @@ -2686,5 +2821,7 @@ int main_0105_transactions_mock (int argc, char **argv) { do_test_txn_switch_coordinator_refresh(); + do_test_out_of_order_seq(); + return 0; } From 063a9ae7a65cebdf1cc128da9815c05f91a2a996 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Fri, 8 Oct 2021 14:55:41 +0200 Subject: [PATCH 26/56] Mock push_request_errors() appended the errors in reverse order --- src/rdkafka_mock.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rdkafka_mock.c b/src/rdkafka_mock.c index 72ca3b429b..468de2cece 100644 --- a/src/rdkafka_mock.c +++ b/src/rdkafka_mock.c @@ -1608,6 +1608,7 @@ rd_kafka_mock_push_request_errors_array (rd_kafka_mock_cluster_t *mcluster, const rd_kafka_resp_err_t *errors) { rd_kafka_mock_error_stack_t *errstack; size_t totcnt; + size_t i; mtx_lock(&mcluster->lock); @@ -1622,8 +1623,8 @@ rd_kafka_mock_push_request_errors_array (rd_kafka_mock_cluster_t *mcluster, sizeof(*errstack->errs)); } - while (cnt > 0) { - errstack->errs[errstack->cnt].err = errors[--cnt]; + for (i = 0 ; i < cnt ; i++) { + errstack->errs[errstack->cnt].err = errors[i]; errstack->errs[errstack->cnt++].rtt = 0; } From 1fb2af0955b6d90536b84df95babb8d2f8763b0d Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 20 Oct 2021 08:56:17 +0200 Subject: [PATCH 27/56] Update list of supported KIPs --- INTRODUCTION.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/INTRODUCTION.md b/INTRODUCTION.md index 6eed11c3d1..abb920166d 100644 --- a/INTRODUCTION.md +++ b/INTRODUCTION.md @@ -1841,7 +1841,7 @@ The [Apache Kafka Implementation Proposals (KIPs)](https://cwiki.apache.org/conf | KIP-140 - AdminAPI: ACLs | 0.11.0.0 | Not supported | | KIP-144 - Broker reconnect backoff | 0.11.0.0 | Supported | | KIP-152 - Improved SASL auth error messages | 1.0.0 | Supported | -| KIP-192 - Cleaner idempotence semantics | 1.0.0 | Not supported | +| KIP-192 - Cleaner idempotence semantics | 1.0.0 | Not supported (superceeded by KIP-360) | | KIP-195 - AdminAPI: CreatePartitions | 1.0.0 | Supported | | KIP-204 - AdminAPI: DeleteRecords | 1.1.0 | Supported | | KIP-219 - Client-side throttling | 2.0.0 | Not supported | @@ -1867,7 +1867,7 @@ The [Apache Kafka Implementation Proposals (KIPs)](https://cwiki.apache.org/conf | KIP-359 - Producer: use EpochLeaderId | 2.4.0 | Not supported | | KIP-360 - Improve handling of unknown Idempotent Producer | 2.5.0 | Supported | | KIP-361 - Consumer: add config to disable auto topic creation | 2.3.0 | Supported | -| KIP-368 - SASL period reauth | 2.2.0 | Not supported | +| KIP-368 - SASL periodic reauth | 2.2.0 | Not supported | | KIP-369 - Always roundRobin partitioner | 2.4.0 | Not supported | | KIP-389 - Consumer group max size | 2.2.0 | Supported (error is propagated to application, but the consumer does not raise a fatal error) | | KIP-392 - Allow consumers to fetch from closest replica | 2.4.0 | Supported | @@ -1883,14 +1883,14 @@ The [Apache Kafka Implementation Proposals (KIPs)](https://cwiki.apache.org/conf | KIP-460 - AdminAPI: electPreferredLeader | 2.4.0 | Not supported | | KIP-464 - AdminAPI: defaults for createTopics | 2.4.0 | Supported | | KIP-467 - Per-message (sort of) error codes in ProduceResponse | 2.4.0 (WIP) | Not supported | -| KIP-480 - Sticky partitioner | 2.4.0 | Not supported | +| KIP-480 - Sticky partitioner | 2.4.0 | Supported | | KIP-482 - Optional fields in Kafka protocol | 2.4.0 | Partially supported (ApiVersionRequest) | | KIP-496 - AdminAPI: delete offsets | 2.4.0 | Supported | | KIP-511 - Collect Client's Name and Version | 2.4.0 | Supported | | KIP-514 - Bounded flush() | 2.4.0 | Supported | | KIP-517 - Consumer poll() metrics | 2.4.0 | Not supported | | KIP-518 - Allow listing consumer groups per state | 2.6.0 | Not supported | -| KIP-519 - Make SSL engine configurable | 2.6.0 | Not supported | +| KIP-519 - Make SSL engine configurable | 2.6.0 | Supported | | KIP-525 - Return topic metadata and configs in CreateTopics response | 2.4.0 | Not supported | | KIP-526 - Reduce Producer Metadata Lookups for Large Number of Topics | 2.5.0 | Not supported | | KIP-533 - Add default API timeout to AdminClient | 2.5.0 | Not supported | @@ -1904,7 +1904,8 @@ The [Apache Kafka Implementation Proposals (KIPs)](https://cwiki.apache.org/conf | KIP-602 - Use all resolved addresses by default | 2.6.0 | Supported | | KIP-651 - Support PEM format for SSL certs and keys | 2.7.0 | Supported | | KIP-654 - Aborted txns with non-flushed msgs should not be fatal | 2.7.0 | Supported | -| KIP-735 - Increase default consumer session timeout | TBA | Supported | +| KIP-735 - Increase default consumer session timeout | 3.0.0 | Supported | +| KIP-768 - SASL/OAUTHBEARER OIDC support | WIP | Not supported | From 74308416dc357ea07d4125a8301a39626a334e42 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Mon, 16 Aug 2021 13:09:07 +0200 Subject: [PATCH 28/56] Add rd_buf_new() --- src/rdbuf.c | 17 +++++++++++++++++ src/rdbuf.h | 6 ++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/rdbuf.c b/src/rdbuf.c index 7a3d566f74..2652c223e7 100644 --- a/src/rdbuf.c +++ b/src/rdbuf.c @@ -340,6 +340,14 @@ void rd_buf_destroy (rd_buf_t *rbuf) { } +/** + * @brief Same as rd_buf_destroy() but also frees the \p rbuf itself. + */ +void rd_buf_destroy_free (rd_buf_t *rbuf) { + rd_buf_destroy(rbuf); + rd_free(rbuf); +} + /** * @brief Initialize buffer, pre-allocating \p fixed_seg_cnt segments * where the first segment will have a \p buf_size of backing memory. @@ -370,6 +378,15 @@ void rd_buf_init (rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size) { } +/** + * @brief Allocates a buffer object and initializes it. + * @sa rd_buf_init() + */ +rd_buf_t *rd_buf_new (size_t fixed_seg_cnt, size_t buf_size) { + rd_buf_t *rbuf = rd_malloc(sizeof(*rbuf)); + rd_buf_init(rbuf, fixed_seg_cnt, buf_size); + return rbuf; +} /** diff --git a/src/rdbuf.h b/src/rdbuf.h index 68c64ba341..29eb51c59e 100644 --- a/src/rdbuf.h +++ b/src/rdbuf.h @@ -212,8 +212,10 @@ size_t rd_buf_get_write_iov (const rd_buf_t *rbuf, size_t iov_max, size_t size_max); void rd_buf_init (rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size); +rd_buf_t *rd_buf_new (size_t fixed_seg_cnt, size_t buf_size); void rd_buf_destroy (rd_buf_t *rbuf); +void rd_buf_destroy_free (rd_buf_t *rbuf); void rd_buf_dump (const rd_buf_t *rbuf, int do_hexdump); @@ -226,8 +228,8 @@ int unittest_rdbuf (void); /** - * @name Buffer read operates on slices of an rd_buf_t and does not - * modify the underlying itself. + * @name Buffer reads operate on slices of an rd_buf_t and does not + * modify the underlying rd_buf_t itself. * * @warning A slice will not be valid/safe after the buffer or * segments have been modified by a buf write operation From b421ee0ea5eeeec0a64810337536531c2dbf8d36 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Mon, 16 Aug 2021 13:11:39 +0200 Subject: [PATCH 29/56] Import cJSON v1.7.14 URL: https://github.com/DaveGamble/cJSON Tag: v1.7.14 SHA: d2735278ed1c2e4556f53a7a782063b31331dbf7 --- LICENSE.cjson | 22 + LICENSES.txt | 26 + src/cJSON.c | 3095 +++++++++++++++++++++++++++++++++++++++++++++++++ src/cJSON.h | 293 +++++ 4 files changed, 3436 insertions(+) create mode 100644 LICENSE.cjson create mode 100644 src/cJSON.c create mode 100644 src/cJSON.h diff --git a/LICENSE.cjson b/LICENSE.cjson new file mode 100644 index 0000000000..72cd1e1071 --- /dev/null +++ b/LICENSE.cjson @@ -0,0 +1,22 @@ +For cJSON.c and cJSON.h: + +Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/LICENSES.txt b/LICENSES.txt index f2aa57d07a..1ab8a1dd4d 100644 --- a/LICENSES.txt +++ b/LICENSES.txt @@ -27,6 +27,32 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +LICENSE.cjson +-------------------------------------------------------------- +For cJSON.c and cJSON.h: + +Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + LICENSE.crc32c -------------------------------------------------------------- # For src/crc32c.c copied (with modifications) from diff --git a/src/cJSON.c b/src/cJSON.c new file mode 100644 index 0000000000..4c6a308eec --- /dev/null +++ b/src/cJSON.c @@ -0,0 +1,3095 @@ +/* + Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. +*/ + +/* cJSON */ +/* JSON parser in C. */ + +/* disable warnings about old C89 functions in MSVC */ +#if !defined(_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) +#define _CRT_SECURE_NO_DEPRECATE +#endif + +#ifdef __GNUC__ +#pragma GCC visibility push(default) +#endif +#if defined(_MSC_VER) +#pragma warning (push) +/* disable warning about single line comments in system headers */ +#pragma warning (disable : 4001) +#endif + +#include +#include +#include +#include +#include +#include +#include + +#ifdef ENABLE_LOCALES +#include +#endif + +#if defined(_MSC_VER) +#pragma warning (pop) +#endif +#ifdef __GNUC__ +#pragma GCC visibility pop +#endif + +#include "cJSON.h" + +/* define our own boolean type */ +#ifdef true +#undef true +#endif +#define true ((cJSON_bool)1) + +#ifdef false +#undef false +#endif +#define false ((cJSON_bool)0) + +/* define isnan and isinf for ANSI C, if in C99 or above, isnan and isinf has been defined in math.h */ +#ifndef isinf +#define isinf(d) (isnan((d - d)) && !isnan(d)) +#endif +#ifndef isnan +#define isnan(d) (d != d) +#endif + +#ifndef NAN +#define NAN 0.0/0.0 +#endif + +typedef struct { + const unsigned char *json; + size_t position; +} error; +static error global_error = { NULL, 0 }; + +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) +{ + return (const char*) (global_error.json + global_error.position); +} + +CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item) +{ + if (!cJSON_IsString(item)) + { + return NULL; + } + + return item->valuestring; +} + +CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item) +{ + if (!cJSON_IsNumber(item)) + { + return (double) NAN; + } + + return item->valuedouble; +} + +/* This is a safeguard to prevent copy-pasters from using incompatible C and header files */ +#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || (CJSON_VERSION_PATCH != 14) + #error cJSON.h and cJSON.c have different versions. Make sure that both have the same. +#endif + +CJSON_PUBLIC(const char*) cJSON_Version(void) +{ + static char version[15]; + sprintf(version, "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH); + + return version; +} + +/* Case insensitive string comparison, doesn't consider two NULL pointers equal though */ +static int case_insensitive_strcmp(const unsigned char *string1, const unsigned char *string2) +{ + if ((string1 == NULL) || (string2 == NULL)) + { + return 1; + } + + if (string1 == string2) + { + return 0; + } + + for(; tolower(*string1) == tolower(*string2); (void)string1++, string2++) + { + if (*string1 == '\0') + { + return 0; + } + } + + return tolower(*string1) - tolower(*string2); +} + +typedef struct internal_hooks +{ + void *(CJSON_CDECL *allocate)(size_t size); + void (CJSON_CDECL *deallocate)(void *pointer); + void *(CJSON_CDECL *reallocate)(void *pointer, size_t size); +} internal_hooks; + +#if defined(_MSC_VER) +/* work around MSVC error C2322: '...' address of dllimport '...' is not static */ +static void * CJSON_CDECL internal_malloc(size_t size) +{ + return malloc(size); +} +static void CJSON_CDECL internal_free(void *pointer) +{ + free(pointer); +} +static void * CJSON_CDECL internal_realloc(void *pointer, size_t size) +{ + return realloc(pointer, size); +} +#else +#define internal_malloc malloc +#define internal_free free +#define internal_realloc realloc +#endif + +/* strlen of character literals resolved at compile time */ +#define static_strlen(string_literal) (sizeof(string_literal) - sizeof("")) + +static internal_hooks global_hooks = { internal_malloc, internal_free, internal_realloc }; + +static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks) +{ + size_t length = 0; + unsigned char *copy = NULL; + + if (string == NULL) + { + return NULL; + } + + length = strlen((const char*)string) + sizeof(""); + copy = (unsigned char*)hooks->allocate(length); + if (copy == NULL) + { + return NULL; + } + memcpy(copy, string, length); + + return copy; +} + +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks) +{ + if (hooks == NULL) + { + /* Reset hooks */ + global_hooks.allocate = malloc; + global_hooks.deallocate = free; + global_hooks.reallocate = realloc; + return; + } + + global_hooks.allocate = malloc; + if (hooks->malloc_fn != NULL) + { + global_hooks.allocate = hooks->malloc_fn; + } + + global_hooks.deallocate = free; + if (hooks->free_fn != NULL) + { + global_hooks.deallocate = hooks->free_fn; + } + + /* use realloc only if both free and malloc are used */ + global_hooks.reallocate = NULL; + if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free)) + { + global_hooks.reallocate = realloc; + } +} + +/* Internal constructor. */ +static cJSON *cJSON_New_Item(const internal_hooks * const hooks) +{ + cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON)); + if (node) + { + memset(node, '\0', sizeof(cJSON)); + } + + return node; +} + +/* Delete a cJSON structure. */ +CJSON_PUBLIC(void) cJSON_Delete(cJSON *item) +{ + cJSON *next = NULL; + while (item != NULL) + { + next = item->next; + if (!(item->type & cJSON_IsReference) && (item->child != NULL)) + { + cJSON_Delete(item->child); + } + if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL)) + { + global_hooks.deallocate(item->valuestring); + } + if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) + { + global_hooks.deallocate(item->string); + } + global_hooks.deallocate(item); + item = next; + } +} + +/* get the decimal point character of the current locale */ +static unsigned char get_decimal_point(void) +{ +#ifdef ENABLE_LOCALES + struct lconv *lconv = localeconv(); + return (unsigned char) lconv->decimal_point[0]; +#else + return '.'; +#endif +} + +typedef struct +{ + const unsigned char *content; + size_t length; + size_t offset; + size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */ + internal_hooks hooks; +} parse_buffer; + +/* check if the given size is left to read in a given parse buffer (starting with 1) */ +#define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) +/* check if the buffer can be accessed at the given index (starting with 0) */ +#define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) +#define cannot_access_at_index(buffer, index) (!can_access_at_index(buffer, index)) +/* get a pointer to the buffer at the position */ +#define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset) + +/* Parse the input text to generate a number, and populate the result into item. */ +static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer) +{ + double number = 0; + unsigned char *after_end = NULL; + unsigned char number_c_string[64]; + unsigned char decimal_point = get_decimal_point(); + size_t i = 0; + + if ((input_buffer == NULL) || (input_buffer->content == NULL)) + { + return false; + } + + /* copy the number into a temporary buffer and replace '.' with the decimal point + * of the current locale (for strtod) + * This also takes care of '\0' not necessarily being available for marking the end of the input */ + for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++) + { + switch (buffer_at_offset(input_buffer)[i]) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '+': + case '-': + case 'e': + case 'E': + number_c_string[i] = buffer_at_offset(input_buffer)[i]; + break; + + case '.': + number_c_string[i] = decimal_point; + break; + + default: + goto loop_end; + } + } +loop_end: + number_c_string[i] = '\0'; + + number = strtod((const char*)number_c_string, (char**)&after_end); + if (number_c_string == after_end) + { + return false; /* parse_error */ + } + + item->valuedouble = number; + + /* use saturation in case of overflow */ + if (number >= INT_MAX) + { + item->valueint = INT_MAX; + } + else if (number <= (double)INT_MIN) + { + item->valueint = INT_MIN; + } + else + { + item->valueint = (int)number; + } + + item->type = cJSON_Number; + + input_buffer->offset += (size_t)(after_end - number_c_string); + return true; +} + +/* don't ask me, but the original cJSON_SetNumberValue returns an integer or double */ +CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) +{ + if (number >= INT_MAX) + { + object->valueint = INT_MAX; + } + else if (number <= (double)INT_MIN) + { + object->valueint = INT_MIN; + } + else + { + object->valueint = (int)number; + } + + return object->valuedouble = number; +} + +CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring) +{ + char *copy = NULL; + /* if object's type is not cJSON_String or is cJSON_IsReference, it should not set valuestring */ + if (!(object->type & cJSON_String) || (object->type & cJSON_IsReference)) + { + return NULL; + } + if (strlen(valuestring) <= strlen(object->valuestring)) + { + strcpy(object->valuestring, valuestring); + return object->valuestring; + } + copy = (char*) cJSON_strdup((const unsigned char*)valuestring, &global_hooks); + if (copy == NULL) + { + return NULL; + } + if (object->valuestring != NULL) + { + cJSON_free(object->valuestring); + } + object->valuestring = copy; + + return copy; +} + +typedef struct +{ + unsigned char *buffer; + size_t length; + size_t offset; + size_t depth; /* current nesting depth (for formatted printing) */ + cJSON_bool noalloc; + cJSON_bool format; /* is this print a formatted print */ + internal_hooks hooks; +} printbuffer; + +/* realloc printbuffer if necessary to have at least "needed" bytes more */ +static unsigned char* ensure(printbuffer * const p, size_t needed) +{ + unsigned char *newbuffer = NULL; + size_t newsize = 0; + + if ((p == NULL) || (p->buffer == NULL)) + { + return NULL; + } + + if ((p->length > 0) && (p->offset >= p->length)) + { + /* make sure that offset is valid */ + return NULL; + } + + if (needed > INT_MAX) + { + /* sizes bigger than INT_MAX are currently not supported */ + return NULL; + } + + needed += p->offset + 1; + if (needed <= p->length) + { + return p->buffer + p->offset; + } + + if (p->noalloc) { + return NULL; + } + + /* calculate new buffer size */ + if (needed > (INT_MAX / 2)) + { + /* overflow of int, use INT_MAX if possible */ + if (needed <= INT_MAX) + { + newsize = INT_MAX; + } + else + { + return NULL; + } + } + else + { + newsize = needed * 2; + } + + if (p->hooks.reallocate != NULL) + { + /* reallocate with realloc if available */ + newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize); + if (newbuffer == NULL) + { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + } + else + { + /* otherwise reallocate manually */ + newbuffer = (unsigned char*)p->hooks.allocate(newsize); + if (!newbuffer) + { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + if (newbuffer) + { + memcpy(newbuffer, p->buffer, p->offset + 1); + } + p->hooks.deallocate(p->buffer); + } + p->length = newsize; + p->buffer = newbuffer; + + return newbuffer + p->offset; +} + +/* calculate the new length of the string in a printbuffer and update the offset */ +static void update_offset(printbuffer * const buffer) +{ + const unsigned char *buffer_pointer = NULL; + if ((buffer == NULL) || (buffer->buffer == NULL)) + { + return; + } + buffer_pointer = buffer->buffer + buffer->offset; + + buffer->offset += strlen((const char*)buffer_pointer); +} + +/* securely comparison of floating-point variables */ +static cJSON_bool compare_double(double a, double b) +{ + double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b); + return (fabs(a - b) <= maxVal * DBL_EPSILON); +} + +/* Render the number nicely from the given item into a string. */ +static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + double d = item->valuedouble; + int length = 0; + size_t i = 0; + unsigned char number_buffer[26] = {0}; /* temporary buffer to print the number into */ + unsigned char decimal_point = get_decimal_point(); + double test = 0.0; + + if (output_buffer == NULL) + { + return false; + } + + /* This checks for NaN and Infinity */ + if (isnan(d) || isinf(d)) + { + length = sprintf((char*)number_buffer, "null"); + } + else + { + /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */ + length = sprintf((char*)number_buffer, "%1.15g", d); + + /* Check whether the original double can be recovered */ + if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || !compare_double((double)test, d)) + { + /* If not, print with 17 decimal places of precision */ + length = sprintf((char*)number_buffer, "%1.17g", d); + } + } + + /* sprintf failed or buffer overrun occurred */ + if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1))) + { + return false; + } + + /* reserve appropriate space in the output */ + output_pointer = ensure(output_buffer, (size_t)length + sizeof("")); + if (output_pointer == NULL) + { + return false; + } + + /* copy the printed number to the output and replace locale + * dependent decimal point with '.' */ + for (i = 0; i < ((size_t)length); i++) + { + if (number_buffer[i] == decimal_point) + { + output_pointer[i] = '.'; + continue; + } + + output_pointer[i] = number_buffer[i]; + } + output_pointer[i] = '\0'; + + output_buffer->offset += (size_t)length; + + return true; +} + +/* parse 4 digit hexadecimal number */ +static unsigned parse_hex4(const unsigned char * const input) +{ + unsigned int h = 0; + size_t i = 0; + + for (i = 0; i < 4; i++) + { + /* parse digit */ + if ((input[i] >= '0') && (input[i] <= '9')) + { + h += (unsigned int) input[i] - '0'; + } + else if ((input[i] >= 'A') && (input[i] <= 'F')) + { + h += (unsigned int) 10 + input[i] - 'A'; + } + else if ((input[i] >= 'a') && (input[i] <= 'f')) + { + h += (unsigned int) 10 + input[i] - 'a'; + } + else /* invalid */ + { + return 0; + } + + if (i < 3) + { + /* shift left to make place for the next nibble */ + h = h << 4; + } + } + + return h; +} + +/* converts a UTF-16 literal to UTF-8 + * A literal can be one or two sequences of the form \uXXXX */ +static unsigned char utf16_literal_to_utf8(const unsigned char * const input_pointer, const unsigned char * const input_end, unsigned char **output_pointer) +{ + long unsigned int codepoint = 0; + unsigned int first_code = 0; + const unsigned char *first_sequence = input_pointer; + unsigned char utf8_length = 0; + unsigned char utf8_position = 0; + unsigned char sequence_length = 0; + unsigned char first_byte_mark = 0; + + if ((input_end - first_sequence) < 6) + { + /* input ends unexpectedly */ + goto fail; + } + + /* get the first utf16 sequence */ + first_code = parse_hex4(first_sequence + 2); + + /* check that the code is valid */ + if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) + { + goto fail; + } + + /* UTF16 surrogate pair */ + if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) + { + const unsigned char *second_sequence = first_sequence + 6; + unsigned int second_code = 0; + sequence_length = 12; /* \uXXXX\uXXXX */ + + if ((input_end - second_sequence) < 6) + { + /* input ends unexpectedly */ + goto fail; + } + + if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u')) + { + /* missing second half of the surrogate pair */ + goto fail; + } + + /* get the second utf16 sequence */ + second_code = parse_hex4(second_sequence + 2); + /* check that the code is valid */ + if ((second_code < 0xDC00) || (second_code > 0xDFFF)) + { + /* invalid second half of the surrogate pair */ + goto fail; + } + + + /* calculate the unicode codepoint from the surrogate pair */ + codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF)); + } + else + { + sequence_length = 6; /* \uXXXX */ + codepoint = first_code; + } + + /* encode as UTF-8 + * takes at maximum 4 bytes to encode: + * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ + if (codepoint < 0x80) + { + /* normal ascii, encoding 0xxxxxxx */ + utf8_length = 1; + } + else if (codepoint < 0x800) + { + /* two bytes, encoding 110xxxxx 10xxxxxx */ + utf8_length = 2; + first_byte_mark = 0xC0; /* 11000000 */ + } + else if (codepoint < 0x10000) + { + /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */ + utf8_length = 3; + first_byte_mark = 0xE0; /* 11100000 */ + } + else if (codepoint <= 0x10FFFF) + { + /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */ + utf8_length = 4; + first_byte_mark = 0xF0; /* 11110000 */ + } + else + { + /* invalid unicode codepoint */ + goto fail; + } + + /* encode as utf8 */ + for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--) + { + /* 10xxxxxx */ + (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF); + codepoint >>= 6; + } + /* encode first byte */ + if (utf8_length > 1) + { + (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF); + } + else + { + (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F); + } + + *output_pointer += utf8_length; + + return sequence_length; + +fail: + return 0; +} + +/* Parse the input text into an unescaped cinput, and populate item. */ +static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer) +{ + const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1; + const unsigned char *input_end = buffer_at_offset(input_buffer) + 1; + unsigned char *output_pointer = NULL; + unsigned char *output = NULL; + + /* not a string */ + if (buffer_at_offset(input_buffer)[0] != '\"') + { + goto fail; + } + + { + /* calculate approximate size of the output (overestimate) */ + size_t allocation_length = 0; + size_t skipped_bytes = 0; + while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"')) + { + /* is escape sequence */ + if (input_end[0] == '\\') + { + if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length) + { + /* prevent buffer overflow when last input character is a backslash */ + goto fail; + } + skipped_bytes++; + input_end++; + } + input_end++; + } + if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"')) + { + goto fail; /* string ended unexpectedly */ + } + + /* This is at most how much we need for the output */ + allocation_length = (size_t) (input_end - buffer_at_offset(input_buffer)) - skipped_bytes; + output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof("")); + if (output == NULL) + { + goto fail; /* allocation failure */ + } + } + + output_pointer = output; + /* loop through the string literal */ + while (input_pointer < input_end) + { + if (*input_pointer != '\\') + { + *output_pointer++ = *input_pointer++; + } + /* escape sequence */ + else + { + unsigned char sequence_length = 2; + if ((input_end - input_pointer) < 1) + { + goto fail; + } + + switch (input_pointer[1]) + { + case 'b': + *output_pointer++ = '\b'; + break; + case 'f': + *output_pointer++ = '\f'; + break; + case 'n': + *output_pointer++ = '\n'; + break; + case 'r': + *output_pointer++ = '\r'; + break; + case 't': + *output_pointer++ = '\t'; + break; + case '\"': + case '\\': + case '/': + *output_pointer++ = input_pointer[1]; + break; + + /* UTF-16 literal */ + case 'u': + sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer); + if (sequence_length == 0) + { + /* failed to convert UTF16-literal to UTF-8 */ + goto fail; + } + break; + + default: + goto fail; + } + input_pointer += sequence_length; + } + } + + /* zero terminate the output */ + *output_pointer = '\0'; + + item->type = cJSON_String; + item->valuestring = (char*)output; + + input_buffer->offset = (size_t) (input_end - input_buffer->content); + input_buffer->offset++; + + return true; + +fail: + if (output != NULL) + { + input_buffer->hooks.deallocate(output); + } + + if (input_pointer != NULL) + { + input_buffer->offset = (size_t)(input_pointer - input_buffer->content); + } + + return false; +} + +/* Render the cstring provided to an escaped version that can be printed. */ +static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer) +{ + const unsigned char *input_pointer = NULL; + unsigned char *output = NULL; + unsigned char *output_pointer = NULL; + size_t output_length = 0; + /* numbers of additional characters needed for escaping */ + size_t escape_characters = 0; + + if (output_buffer == NULL) + { + return false; + } + + /* empty string */ + if (input == NULL) + { + output = ensure(output_buffer, sizeof("\"\"")); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "\"\""); + + return true; + } + + /* set "flag" to 1 if something needs to be escaped */ + for (input_pointer = input; *input_pointer; input_pointer++) + { + switch (*input_pointer) + { + case '\"': + case '\\': + case '\b': + case '\f': + case '\n': + case '\r': + case '\t': + /* one character escape sequence */ + escape_characters++; + break; + default: + if (*input_pointer < 32) + { + /* UTF-16 escape sequence uXXXX */ + escape_characters += 5; + } + break; + } + } + output_length = (size_t)(input_pointer - input) + escape_characters; + + output = ensure(output_buffer, output_length + sizeof("\"\"")); + if (output == NULL) + { + return false; + } + + /* no characters have to be escaped */ + if (escape_characters == 0) + { + output[0] = '\"'; + memcpy(output + 1, input, output_length); + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; + } + + output[0] = '\"'; + output_pointer = output + 1; + /* copy the string */ + for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++) + { + if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\')) + { + /* normal character, copy */ + *output_pointer = *input_pointer; + } + else + { + /* character needs to be escaped */ + *output_pointer++ = '\\'; + switch (*input_pointer) + { + case '\\': + *output_pointer = '\\'; + break; + case '\"': + *output_pointer = '\"'; + break; + case '\b': + *output_pointer = 'b'; + break; + case '\f': + *output_pointer = 'f'; + break; + case '\n': + *output_pointer = 'n'; + break; + case '\r': + *output_pointer = 'r'; + break; + case '\t': + *output_pointer = 't'; + break; + default: + /* escape and print as unicode codepoint */ + sprintf((char*)output_pointer, "u%04x", *input_pointer); + output_pointer += 4; + break; + } + } + } + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; +} + +/* Invoke print_string_ptr (which is useful) on an item. */ +static cJSON_bool print_string(const cJSON * const item, printbuffer * const p) +{ + return print_string_ptr((unsigned char*)item->valuestring, p); +} + +/* Predeclare these prototypes. */ +static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer); +static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer); +static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer); + +/* Utility to jump whitespace and cr/lf */ +static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer) +{ + if ((buffer == NULL) || (buffer->content == NULL)) + { + return NULL; + } + + if (cannot_access_at_index(buffer, 0)) + { + return buffer; + } + + while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32)) + { + buffer->offset++; + } + + if (buffer->offset == buffer->length) + { + buffer->offset--; + } + + return buffer; +} + +/* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */ +static parse_buffer *skip_utf8_bom(parse_buffer * const buffer) +{ + if ((buffer == NULL) || (buffer->content == NULL) || (buffer->offset != 0)) + { + return NULL; + } + + if (can_access_at_index(buffer, 4) && (strncmp((const char*)buffer_at_offset(buffer), "\xEF\xBB\xBF", 3) == 0)) + { + buffer->offset += 3; + } + + return buffer; +} + +CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated) +{ + size_t buffer_length; + + if (NULL == value) + { + return NULL; + } + + /* Adding null character size due to require_null_terminated. */ + buffer_length = strlen(value) + sizeof(""); + + return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end, require_null_terminated); +} + +/* Parse an object - create a new root, and populate. */ +CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated) +{ + parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } }; + cJSON *item = NULL; + + /* reset error position */ + global_error.json = NULL; + global_error.position = 0; + + if (value == NULL || 0 == buffer_length) + { + goto fail; + } + + buffer.content = (const unsigned char*)value; + buffer.length = buffer_length; + buffer.offset = 0; + buffer.hooks = global_hooks; + + item = cJSON_New_Item(&global_hooks); + if (item == NULL) /* memory fail */ + { + goto fail; + } + + if (!parse_value(item, buffer_skip_whitespace(skip_utf8_bom(&buffer)))) + { + /* parse failure. ep is set. */ + goto fail; + } + + /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */ + if (require_null_terminated) + { + buffer_skip_whitespace(&buffer); + if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0') + { + goto fail; + } + } + if (return_parse_end) + { + *return_parse_end = (const char*)buffer_at_offset(&buffer); + } + + return item; + +fail: + if (item != NULL) + { + cJSON_Delete(item); + } + + if (value != NULL) + { + error local_error; + local_error.json = (const unsigned char*)value; + local_error.position = 0; + + if (buffer.offset < buffer.length) + { + local_error.position = buffer.offset; + } + else if (buffer.length > 0) + { + local_error.position = buffer.length - 1; + } + + if (return_parse_end != NULL) + { + *return_parse_end = (const char*)local_error.json + local_error.position; + } + + global_error = local_error; + } + + return NULL; +} + +/* Default options for cJSON_Parse */ +CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value) +{ + return cJSON_ParseWithOpts(value, 0, 0); +} + +CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length) +{ + return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0); +} + +#define cjson_min(a, b) (((a) < (b)) ? (a) : (b)) + +static unsigned char *print(const cJSON * const item, cJSON_bool format, const internal_hooks * const hooks) +{ + static const size_t default_buffer_size = 256; + printbuffer buffer[1]; + unsigned char *printed = NULL; + + memset(buffer, 0, sizeof(buffer)); + + /* create buffer */ + buffer->buffer = (unsigned char*) hooks->allocate(default_buffer_size); + buffer->length = default_buffer_size; + buffer->format = format; + buffer->hooks = *hooks; + if (buffer->buffer == NULL) + { + goto fail; + } + + /* print the value */ + if (!print_value(item, buffer)) + { + goto fail; + } + update_offset(buffer); + + /* check if reallocate is available */ + if (hooks->reallocate != NULL) + { + printed = (unsigned char*) hooks->reallocate(buffer->buffer, buffer->offset + 1); + if (printed == NULL) { + goto fail; + } + buffer->buffer = NULL; + } + else /* otherwise copy the JSON over to a new buffer */ + { + printed = (unsigned char*) hooks->allocate(buffer->offset + 1); + if (printed == NULL) + { + goto fail; + } + memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1)); + printed[buffer->offset] = '\0'; /* just to be sure */ + + /* free the buffer */ + hooks->deallocate(buffer->buffer); + } + + return printed; + +fail: + if (buffer->buffer != NULL) + { + hooks->deallocate(buffer->buffer); + } + + if (printed != NULL) + { + hooks->deallocate(printed); + } + + return NULL; +} + +/* Render a cJSON item/entity/structure to text. */ +CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item) +{ + return (char*)print(item, true, &global_hooks); +} + +CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item) +{ + return (char*)print(item, false, &global_hooks); +} + +CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt) +{ + printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; + + if (prebuffer < 0) + { + return NULL; + } + + p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer); + if (!p.buffer) + { + return NULL; + } + + p.length = (size_t)prebuffer; + p.offset = 0; + p.noalloc = false; + p.format = fmt; + p.hooks = global_hooks; + + if (!print_value(item, &p)) + { + global_hooks.deallocate(p.buffer); + return NULL; + } + + return (char*)p.buffer; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format) +{ + printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; + + if ((length < 0) || (buffer == NULL)) + { + return false; + } + + p.buffer = (unsigned char*)buffer; + p.length = (size_t)length; + p.offset = 0; + p.noalloc = true; + p.format = format; + p.hooks = global_hooks; + + return print_value(item, &p); +} + +/* Parser core - when encountering text, process appropriately. */ +static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer) +{ + if ((input_buffer == NULL) || (input_buffer->content == NULL)) + { + return false; /* no input */ + } + + /* parse the different types of values */ + /* null */ + if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0)) + { + item->type = cJSON_NULL; + input_buffer->offset += 4; + return true; + } + /* false */ + if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0)) + { + item->type = cJSON_False; + input_buffer->offset += 5; + return true; + } + /* true */ + if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0)) + { + item->type = cJSON_True; + item->valueint = 1; + input_buffer->offset += 4; + return true; + } + /* string */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"')) + { + return parse_string(item, input_buffer); + } + /* number */ + if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9')))) + { + return parse_number(item, input_buffer); + } + /* array */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '[')) + { + return parse_array(item, input_buffer); + } + /* object */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{')) + { + return parse_object(item, input_buffer); + } + + return false; +} + +/* Render a value to text. */ +static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output = NULL; + + if ((item == NULL) || (output_buffer == NULL)) + { + return false; + } + + switch ((item->type) & 0xFF) + { + case cJSON_NULL: + output = ensure(output_buffer, 5); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "null"); + return true; + + case cJSON_False: + output = ensure(output_buffer, 6); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "false"); + return true; + + case cJSON_True: + output = ensure(output_buffer, 5); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "true"); + return true; + + case cJSON_Number: + return print_number(item, output_buffer); + + case cJSON_Raw: + { + size_t raw_length = 0; + if (item->valuestring == NULL) + { + return false; + } + + raw_length = strlen(item->valuestring) + sizeof(""); + output = ensure(output_buffer, raw_length); + if (output == NULL) + { + return false; + } + memcpy(output, item->valuestring, raw_length); + return true; + } + + case cJSON_String: + return print_string(item, output_buffer); + + case cJSON_Array: + return print_array(item, output_buffer); + + case cJSON_Object: + return print_object(item, output_buffer); + + default: + return false; + } +} + +/* Build an array from input text. */ +static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer) +{ + cJSON *head = NULL; /* head of the linked list */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) + { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (buffer_at_offset(input_buffer)[0] != '[') + { + /* not an array */ + goto fail; + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']')) + { + /* empty array */ + goto success; + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) + { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do + { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) + { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) + { + /* start the linked list */ + current_item = head = new_item; + } + else + { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse next value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) + { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } + while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']') + { + goto fail; /* expected end of array */ + } + +success: + input_buffer->depth--; + + if (head != NULL) { + head->prev = current_item; + } + + item->type = cJSON_Array; + item->child = head; + + input_buffer->offset++; + + return true; + +fail: + if (head != NULL) + { + cJSON_Delete(head); + } + + return false; +} + +/* Render an array to text */ +static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_element = item->child; + + if (output_buffer == NULL) + { + return false; + } + + /* Compose the output array. */ + /* opening square bracket */ + output_pointer = ensure(output_buffer, 1); + if (output_pointer == NULL) + { + return false; + } + + *output_pointer = '['; + output_buffer->offset++; + output_buffer->depth++; + + while (current_element != NULL) + { + if (!print_value(current_element, output_buffer)) + { + return false; + } + update_offset(output_buffer); + if (current_element->next) + { + length = (size_t) (output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ','; + if(output_buffer->format) + { + *output_pointer++ = ' '; + } + *output_pointer = '\0'; + output_buffer->offset += length; + } + current_element = current_element->next; + } + + output_pointer = ensure(output_buffer, 2); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ']'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; +} + +/* Build an object from the text. */ +static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer) +{ + cJSON *head = NULL; /* linked list head */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) + { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{')) + { + goto fail; /* not an object */ + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}')) + { + goto success; /* empty object */ + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) + { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do + { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) + { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) + { + /* start the linked list */ + current_item = head = new_item; + } + else + { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse the name of the child */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_string(current_item, input_buffer)) + { + goto fail; /* failed to parse name */ + } + buffer_skip_whitespace(input_buffer); + + /* swap valuestring and string, because we parsed the name */ + current_item->string = current_item->valuestring; + current_item->valuestring = NULL; + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':')) + { + goto fail; /* invalid object */ + } + + /* parse the value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) + { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } + while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}')) + { + goto fail; /* expected end of object */ + } + +success: + input_buffer->depth--; + + if (head != NULL) { + head->prev = current_item; + } + + item->type = cJSON_Object; + item->child = head; + + input_buffer->offset++; + return true; + +fail: + if (head != NULL) + { + cJSON_Delete(head); + } + + return false; +} + +/* Render an object to text. */ +static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_item = item->child; + + if (output_buffer == NULL) + { + return false; + } + + /* Compose the output: */ + length = (size_t) (output_buffer->format ? 2 : 1); /* fmt: {\n */ + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + + *output_pointer++ = '{'; + output_buffer->depth++; + if (output_buffer->format) + { + *output_pointer++ = '\n'; + } + output_buffer->offset += length; + + while (current_item) + { + if (output_buffer->format) + { + size_t i; + output_pointer = ensure(output_buffer, output_buffer->depth); + if (output_pointer == NULL) + { + return false; + } + for (i = 0; i < output_buffer->depth; i++) + { + *output_pointer++ = '\t'; + } + output_buffer->offset += output_buffer->depth; + } + + /* print key */ + if (!print_string_ptr((unsigned char*)current_item->string, output_buffer)) + { + return false; + } + update_offset(output_buffer); + + length = (size_t) (output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ':'; + if (output_buffer->format) + { + *output_pointer++ = '\t'; + } + output_buffer->offset += length; + + /* print value */ + if (!print_value(current_item, output_buffer)) + { + return false; + } + update_offset(output_buffer); + + /* print comma if not last */ + length = ((size_t)(output_buffer->format ? 1 : 0) + (size_t)(current_item->next ? 1 : 0)); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + if (current_item->next) + { + *output_pointer++ = ','; + } + + if (output_buffer->format) + { + *output_pointer++ = '\n'; + } + *output_pointer = '\0'; + output_buffer->offset += length; + + current_item = current_item->next; + } + + output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2); + if (output_pointer == NULL) + { + return false; + } + if (output_buffer->format) + { + size_t i; + for (i = 0; i < (output_buffer->depth - 1); i++) + { + *output_pointer++ = '\t'; + } + } + *output_pointer++ = '}'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; +} + +/* Get Array size/item / object item. */ +CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) +{ + cJSON *child = NULL; + size_t size = 0; + + if (array == NULL) + { + return 0; + } + + child = array->child; + + while(child != NULL) + { + size++; + child = child->next; + } + + /* FIXME: Can overflow here. Cannot be fixed without breaking the API */ + + return (int)size; +} + +static cJSON* get_array_item(const cJSON *array, size_t index) +{ + cJSON *current_child = NULL; + + if (array == NULL) + { + return NULL; + } + + current_child = array->child; + while ((current_child != NULL) && (index > 0)) + { + index--; + current_child = current_child->next; + } + + return current_child; +} + +CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) +{ + if (index < 0) + { + return NULL; + } + + return get_array_item(array, (size_t)index); +} + +static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive) +{ + cJSON *current_element = NULL; + + if ((object == NULL) || (name == NULL)) + { + return NULL; + } + + current_element = object->child; + if (case_sensitive) + { + while ((current_element != NULL) && (current_element->string != NULL) && (strcmp(name, current_element->string) != 0)) + { + current_element = current_element->next; + } + } + else + { + while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0)) + { + current_element = current_element->next; + } + } + + if ((current_element == NULL) || (current_element->string == NULL)) { + return NULL; + } + + return current_element; +} + +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string) +{ + return get_object_item(object, string, false); +} + +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string) +{ + return get_object_item(object, string, true); +} + +CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string) +{ + return cJSON_GetObjectItem(object, string) ? 1 : 0; +} + +/* Utility for array list handling. */ +static void suffix_object(cJSON *prev, cJSON *item) +{ + prev->next = item; + item->prev = prev; +} + +/* Utility for handling references. */ +static cJSON *create_reference(const cJSON *item, const internal_hooks * const hooks) +{ + cJSON *reference = NULL; + if (item == NULL) + { + return NULL; + } + + reference = cJSON_New_Item(hooks); + if (reference == NULL) + { + return NULL; + } + + memcpy(reference, item, sizeof(cJSON)); + reference->string = NULL; + reference->type |= cJSON_IsReference; + reference->next = reference->prev = NULL; + return reference; +} + +static cJSON_bool add_item_to_array(cJSON *array, cJSON *item) +{ + cJSON *child = NULL; + + if ((item == NULL) || (array == NULL) || (array == item)) + { + return false; + } + + child = array->child; + /* + * To find the last item in array quickly, we use prev in array + */ + if (child == NULL) + { + /* list is empty, start new one */ + array->child = item; + item->prev = item; + item->next = NULL; + } + else + { + /* append to the end */ + if (child->prev) + { + suffix_object(child->prev, item); + array->child->prev = item; + } + } + + return true; +} + +/* Add item to array/object. */ +CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item) +{ + return add_item_to_array(array, item); +} + +#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) + #pragma GCC diagnostic push +#endif +#ifdef __GNUC__ +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif +/* helper function to cast away const */ +static void* cast_away_const(const void* string) +{ + return (void*)string; +} +#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) + #pragma GCC diagnostic pop +#endif + + +static cJSON_bool add_item_to_object(cJSON * const object, const char * const string, cJSON * const item, const internal_hooks * const hooks, const cJSON_bool constant_key) +{ + char *new_key = NULL; + int new_type = cJSON_Invalid; + + if ((object == NULL) || (string == NULL) || (item == NULL) || (object == item)) + { + return false; + } + + if (constant_key) + { + new_key = (char*)cast_away_const(string); + new_type = item->type | cJSON_StringIsConst; + } + else + { + new_key = (char*)cJSON_strdup((const unsigned char*)string, hooks); + if (new_key == NULL) + { + return false; + } + + new_type = item->type & ~cJSON_StringIsConst; + } + + if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) + { + hooks->deallocate(item->string); + } + + item->string = new_key; + item->type = new_type; + + return add_item_to_array(object, item); +} + +CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) +{ + return add_item_to_object(object, string, item, &global_hooks, false); +} + +/* Add an item to an object with constant string as key */ +CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) +{ + return add_item_to_object(object, string, item, &global_hooks, true); +} + +CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) +{ + if (array == NULL) + { + return false; + } + + return add_item_to_array(array, create_reference(item, &global_hooks)); +} + +CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) +{ + if ((object == NULL) || (string == NULL)) + { + return false; + } + + return add_item_to_object(object, string, create_reference(item, &global_hooks), &global_hooks, false); +} + +CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name) +{ + cJSON *null = cJSON_CreateNull(); + if (add_item_to_object(object, name, null, &global_hooks, false)) + { + return null; + } + + cJSON_Delete(null); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name) +{ + cJSON *true_item = cJSON_CreateTrue(); + if (add_item_to_object(object, name, true_item, &global_hooks, false)) + { + return true_item; + } + + cJSON_Delete(true_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name) +{ + cJSON *false_item = cJSON_CreateFalse(); + if (add_item_to_object(object, name, false_item, &global_hooks, false)) + { + return false_item; + } + + cJSON_Delete(false_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean) +{ + cJSON *bool_item = cJSON_CreateBool(boolean); + if (add_item_to_object(object, name, bool_item, &global_hooks, false)) + { + return bool_item; + } + + cJSON_Delete(bool_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number) +{ + cJSON *number_item = cJSON_CreateNumber(number); + if (add_item_to_object(object, name, number_item, &global_hooks, false)) + { + return number_item; + } + + cJSON_Delete(number_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string) +{ + cJSON *string_item = cJSON_CreateString(string); + if (add_item_to_object(object, name, string_item, &global_hooks, false)) + { + return string_item; + } + + cJSON_Delete(string_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw) +{ + cJSON *raw_item = cJSON_CreateRaw(raw); + if (add_item_to_object(object, name, raw_item, &global_hooks, false)) + { + return raw_item; + } + + cJSON_Delete(raw_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name) +{ + cJSON *object_item = cJSON_CreateObject(); + if (add_item_to_object(object, name, object_item, &global_hooks, false)) + { + return object_item; + } + + cJSON_Delete(object_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name) +{ + cJSON *array = cJSON_CreateArray(); + if (add_item_to_object(object, name, array, &global_hooks, false)) + { + return array; + } + + cJSON_Delete(array); + return NULL; +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item) +{ + if ((parent == NULL) || (item == NULL)) + { + return NULL; + } + + if (item != parent->child) + { + /* not the first element */ + item->prev->next = item->next; + } + if (item->next != NULL) + { + /* not the last element */ + item->next->prev = item->prev; + } + + if (item == parent->child) + { + /* first element */ + parent->child = item->next; + } + else if (item->next == NULL) + { + /* last element */ + parent->child->prev = item->prev; + } + + /* make sure the detached item doesn't point anywhere anymore */ + item->prev = NULL; + item->next = NULL; + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which) +{ + if (which < 0) + { + return NULL; + } + + return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which)); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which) +{ + cJSON_Delete(cJSON_DetachItemFromArray(array, which)); +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string) +{ + cJSON *to_detach = cJSON_GetObjectItem(object, string); + + return cJSON_DetachItemViaPointer(object, to_detach); +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string) +{ + cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string); + + return cJSON_DetachItemViaPointer(object, to_detach); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string) +{ + cJSON_Delete(cJSON_DetachItemFromObject(object, string)); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string) +{ + cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string)); +} + +/* Replace array/object items with new ones. */ +CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) +{ + cJSON *after_inserted = NULL; + + if (which < 0) + { + return false; + } + + after_inserted = get_array_item(array, (size_t)which); + if (after_inserted == NULL) + { + return add_item_to_array(array, newitem); + } + + newitem->next = after_inserted; + newitem->prev = after_inserted->prev; + after_inserted->prev = newitem; + if (after_inserted == array->child) + { + array->child = newitem; + } + else + { + newitem->prev->next = newitem; + } + return true; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement) +{ + if ((parent == NULL) || (replacement == NULL) || (item == NULL)) + { + return false; + } + + if (replacement == item) + { + return true; + } + + replacement->next = item->next; + replacement->prev = item->prev; + + if (replacement->next != NULL) + { + replacement->next->prev = replacement; + } + if (parent->child == item) + { + if (parent->child->prev == parent->child) + { + replacement->prev = replacement; + } + parent->child = replacement; + } + else + { /* + * To find the last item in array quickly, we use prev in array. + * We can't modify the last item's next pointer where this item was the parent's child + */ + if (replacement->prev != NULL) + { + replacement->prev->next = replacement; + } + if (replacement->next == NULL) + { + parent->child->prev = replacement; + } + } + + item->next = NULL; + item->prev = NULL; + cJSON_Delete(item); + + return true; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) +{ + if (which < 0) + { + return false; + } + + return cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem); +} + +static cJSON_bool replace_item_in_object(cJSON *object, const char *string, cJSON *replacement, cJSON_bool case_sensitive) +{ + if ((replacement == NULL) || (string == NULL)) + { + return false; + } + + /* replace the name in the replacement */ + if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL)) + { + cJSON_free(replacement->string); + } + replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); + replacement->type &= ~cJSON_StringIsConst; + + return cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement); +} + +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) +{ + return replace_item_in_object(object, string, newitem, false); +} + +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem) +{ + return replace_item_in_object(object, string, newitem, true); +} + +/* Create basic types: */ +CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_NULL; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_True; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_False; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = boolean ? cJSON_True : cJSON_False; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_Number; + item->valuedouble = num; + + /* use saturation in case of overflow */ + if (num >= INT_MAX) + { + item->valueint = INT_MAX; + } + else if (num <= (double)INT_MIN) + { + item->valueint = INT_MIN; + } + else + { + item->valueint = (int)num; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_String; + item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); + if(!item->valuestring) + { + cJSON_Delete(item); + return NULL; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) + { + item->type = cJSON_String | cJSON_IsReference; + item->valuestring = (char*)cast_away_const(string); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Object | cJSON_IsReference; + item->child = (cJSON*)cast_away_const(child); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Array | cJSON_IsReference; + item->child = (cJSON*)cast_away_const(child); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_Raw; + item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks); + if(!item->valuestring) + { + cJSON_Delete(item); + return NULL; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type=cJSON_Array; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = cJSON_Object; + } + + return item; +} + +/* Create Arrays: */ +CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + for(i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber(numbers[i]); + if (!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + a->child->prev = n; + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for(i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber((double)numbers[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + a->child->prev = n; + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for(i = 0;a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber(numbers[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + a->child->prev = n; + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (strings == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for (i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateString(strings[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p,n); + } + p = n; + } + a->child->prev = n; + + return a; +} + +/* Duplication */ +CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse) +{ + cJSON *newitem = NULL; + cJSON *child = NULL; + cJSON *next = NULL; + cJSON *newchild = NULL; + + /* Bail on bad ptr */ + if (!item) + { + goto fail; + } + /* Create new item */ + newitem = cJSON_New_Item(&global_hooks); + if (!newitem) + { + goto fail; + } + /* Copy over all vars */ + newitem->type = item->type & (~cJSON_IsReference); + newitem->valueint = item->valueint; + newitem->valuedouble = item->valuedouble; + if (item->valuestring) + { + newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks); + if (!newitem->valuestring) + { + goto fail; + } + } + if (item->string) + { + newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks); + if (!newitem->string) + { + goto fail; + } + } + /* If non-recursive, then we're done! */ + if (!recurse) + { + return newitem; + } + /* Walk the ->next chain for the child. */ + child = item->child; + while (child != NULL) + { + newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */ + if (!newchild) + { + goto fail; + } + if (next != NULL) + { + /* If newitem->child already set, then crosswire ->prev and ->next and move on */ + next->next = newchild; + newchild->prev = next; + next = newchild; + } + else + { + /* Set newitem->child and move to it */ + newitem->child = newchild; + next = newchild; + } + child = child->next; + } + if (newitem && newitem->child) + { + newitem->child->prev = newchild; + } + + return newitem; + +fail: + if (newitem != NULL) + { + cJSON_Delete(newitem); + } + + return NULL; +} + +static void skip_oneline_comment(char **input) +{ + *input += static_strlen("//"); + + for (; (*input)[0] != '\0'; ++(*input)) + { + if ((*input)[0] == '\n') { + *input += static_strlen("\n"); + return; + } + } +} + +static void skip_multiline_comment(char **input) +{ + *input += static_strlen("/*"); + + for (; (*input)[0] != '\0'; ++(*input)) + { + if (((*input)[0] == '*') && ((*input)[1] == '/')) + { + *input += static_strlen("*/"); + return; + } + } +} + +static void minify_string(char **input, char **output) { + (*output)[0] = (*input)[0]; + *input += static_strlen("\""); + *output += static_strlen("\""); + + + for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) { + (*output)[0] = (*input)[0]; + + if ((*input)[0] == '\"') { + (*output)[0] = '\"'; + *input += static_strlen("\""); + *output += static_strlen("\""); + return; + } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) { + (*output)[1] = (*input)[1]; + *input += static_strlen("\""); + *output += static_strlen("\""); + } + } +} + +CJSON_PUBLIC(void) cJSON_Minify(char *json) +{ + char *into = json; + + if (json == NULL) + { + return; + } + + while (json[0] != '\0') + { + switch (json[0]) + { + case ' ': + case '\t': + case '\r': + case '\n': + json++; + break; + + case '/': + if (json[1] == '/') + { + skip_oneline_comment(&json); + } + else if (json[1] == '*') + { + skip_multiline_comment(&json); + } else { + json++; + } + break; + + case '\"': + minify_string(&json, (char**)&into); + break; + + default: + into[0] = json[0]; + json++; + into++; + } + } + + /* and null-terminate. */ + *into = '\0'; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Invalid; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_False; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xff) == cJSON_True; +} + + +CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & (cJSON_True | cJSON_False)) != 0; +} +CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_NULL; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Number; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_String; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Array; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Object; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Raw; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive) +{ + if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF)) || cJSON_IsInvalid(a)) + { + return false; + } + + /* check if type is valid */ + switch (a->type & 0xFF) + { + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + case cJSON_Number: + case cJSON_String: + case cJSON_Raw: + case cJSON_Array: + case cJSON_Object: + break; + + default: + return false; + } + + /* identical objects are equal */ + if (a == b) + { + return true; + } + + switch (a->type & 0xFF) + { + /* in these cases and equal type is enough */ + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + return true; + + case cJSON_Number: + if (compare_double(a->valuedouble, b->valuedouble)) + { + return true; + } + return false; + + case cJSON_String: + case cJSON_Raw: + if ((a->valuestring == NULL) || (b->valuestring == NULL)) + { + return false; + } + if (strcmp(a->valuestring, b->valuestring) == 0) + { + return true; + } + + return false; + + case cJSON_Array: + { + cJSON *a_element = a->child; + cJSON *b_element = b->child; + + for (; (a_element != NULL) && (b_element != NULL);) + { + if (!cJSON_Compare(a_element, b_element, case_sensitive)) + { + return false; + } + + a_element = a_element->next; + b_element = b_element->next; + } + + /* one of the arrays is longer than the other */ + if (a_element != b_element) { + return false; + } + + return true; + } + + case cJSON_Object: + { + cJSON *a_element = NULL; + cJSON *b_element = NULL; + cJSON_ArrayForEach(a_element, a) + { + /* TODO This has O(n^2) runtime, which is horrible! */ + b_element = get_object_item(b, a_element->string, case_sensitive); + if (b_element == NULL) + { + return false; + } + + if (!cJSON_Compare(a_element, b_element, case_sensitive)) + { + return false; + } + } + + /* doing this twice, once on a and b to prevent true comparison if a subset of b + * TODO: Do this the proper way, this is just a fix for now */ + cJSON_ArrayForEach(b_element, b) + { + a_element = get_object_item(a, b_element->string, case_sensitive); + if (a_element == NULL) + { + return false; + } + + if (!cJSON_Compare(b_element, a_element, case_sensitive)) + { + return false; + } + } + + return true; + } + + default: + return false; + } +} + +CJSON_PUBLIC(void *) cJSON_malloc(size_t size) +{ + return global_hooks.allocate(size); +} + +CJSON_PUBLIC(void) cJSON_free(void *object) +{ + global_hooks.deallocate(object); +} diff --git a/src/cJSON.h b/src/cJSON.h new file mode 100644 index 0000000000..e97e5f4cdc --- /dev/null +++ b/src/cJSON.h @@ -0,0 +1,293 @@ +/* + Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. +*/ + +#ifndef cJSON__h +#define cJSON__h + +#ifdef __cplusplus +extern "C" +{ +#endif + +#if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32)) +#define __WINDOWS__ +#endif + +#ifdef __WINDOWS__ + +/* When compiling for windows, we specify a specific calling convention to avoid issues where we are being called from a project with a different default calling convention. For windows you have 3 define options: + +CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever dllexport symbols +CJSON_EXPORT_SYMBOLS - Define this on library build when you want to dllexport symbols (default) +CJSON_IMPORT_SYMBOLS - Define this if you want to dllimport symbol + +For *nix builds that support visibility attribute, you can define similar behavior by + +setting default visibility to hidden by adding +-fvisibility=hidden (for gcc) +or +-xldscope=hidden (for sun cc) +to CFLAGS + +then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJSON_EXPORT_SYMBOLS does + +*/ + +#define CJSON_CDECL __cdecl +#define CJSON_STDCALL __stdcall + +/* export symbols by default, this is necessary for copy pasting the C and header file */ +#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && !defined(CJSON_EXPORT_SYMBOLS) +#define CJSON_EXPORT_SYMBOLS +#endif + +#if defined(CJSON_HIDE_SYMBOLS) +#define CJSON_PUBLIC(type) type CJSON_STDCALL +#elif defined(CJSON_EXPORT_SYMBOLS) +#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL +#elif defined(CJSON_IMPORT_SYMBOLS) +#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL +#endif +#else /* !__WINDOWS__ */ +#define CJSON_CDECL +#define CJSON_STDCALL + +#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(CJSON_API_VISIBILITY) +#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type +#else +#define CJSON_PUBLIC(type) type +#endif +#endif + +/* project version */ +#define CJSON_VERSION_MAJOR 1 +#define CJSON_VERSION_MINOR 7 +#define CJSON_VERSION_PATCH 14 + +#include + +/* cJSON Types: */ +#define cJSON_Invalid (0) +#define cJSON_False (1 << 0) +#define cJSON_True (1 << 1) +#define cJSON_NULL (1 << 2) +#define cJSON_Number (1 << 3) +#define cJSON_String (1 << 4) +#define cJSON_Array (1 << 5) +#define cJSON_Object (1 << 6) +#define cJSON_Raw (1 << 7) /* raw json */ + +#define cJSON_IsReference 256 +#define cJSON_StringIsConst 512 + +/* The cJSON structure: */ +typedef struct cJSON +{ + /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */ + struct cJSON *next; + struct cJSON *prev; + /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */ + struct cJSON *child; + + /* The type of the item, as above. */ + int type; + + /* The item's string, if type==cJSON_String and type == cJSON_Raw */ + char *valuestring; + /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */ + int valueint; + /* The item's number, if type==cJSON_Number */ + double valuedouble; + + /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */ + char *string; +} cJSON; + +typedef struct cJSON_Hooks +{ + /* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */ + void *(CJSON_CDECL *malloc_fn)(size_t sz); + void (CJSON_CDECL *free_fn)(void *ptr); +} cJSON_Hooks; + +typedef int cJSON_bool; + +/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them. + * This is to prevent stack overflows. */ +#ifndef CJSON_NESTING_LIMIT +#define CJSON_NESTING_LIMIT 1000 +#endif + +/* returns the version of cJSON as a string */ +CJSON_PUBLIC(const char*) cJSON_Version(void); + +/* Supply malloc, realloc and free functions to cJSON */ +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks); + +/* Memory Management: the caller is always responsible to free the results from all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is cJSON_PrintPreallocated, where the caller has full responsibility of the buffer. */ +/* Supply a block of JSON, and this returns a cJSON object you can interrogate. */ +CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value); +CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length); +/* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */ +/* If you supply a ptr in return_parse_end and parsing fails, then return_parse_end will contain a pointer to the error so will match cJSON_GetErrorPtr(). */ +CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated); +CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated); + +/* Render a cJSON entity to text for transfer/storage. */ +CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item); +/* Render a cJSON entity to text for transfer/storage without any formatting. */ +CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item); +/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */ +CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt); +/* Render a cJSON entity to text using a buffer already allocated in memory with given length. Returns 1 on success and 0 on failure. */ +/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will use, so to be safe allocate 5 bytes more than you actually need */ +CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format); +/* Delete a cJSON entity and all subentities. */ +CJSON_PUBLIC(void) cJSON_Delete(cJSON *item); + +/* Returns the number of items in an array (or object). */ +CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array); +/* Retrieve item number "index" from array "array". Returns NULL if unsuccessful. */ +CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index); +/* Get item "string" from object. Case insensitive. */ +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string); +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string); +CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string); +/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void); + +/* Check item type and return its value */ +CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item); +CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item); + +/* These functions check the type of an item */ +CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item); + +/* These calls create a cJSON item of the appropriate type. */ +CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean); +CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num); +CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string); +/* raw json */ +CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw); +CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void); + +/* Create a string where valuestring references a string so + * it will not be freed by cJSON_Delete */ +CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string); +/* Create an object/array that only references it's elements so + * they will not be freed by cJSON_Delete */ +CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child); +CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child); + +/* These utilities create an Array of count items. + * The parameter count cannot be greater than the number of elements in the number array, otherwise array access will be out of bounds.*/ +CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count); + +/* Append item to the specified array/object. */ +CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item); +CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item); +/* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object. + * WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before + * writing to `item->string` */ +CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item); +/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */ +CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); +CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item); + +/* Remove/Detach items from Arrays/Objects. */ +CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item); +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which); +CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which); +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string); +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string); +CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string); +CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string); + +/* Update array items. */ +CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */ +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement); +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem); +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem); +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,const char *string,cJSON *newitem); + +/* Duplicate a cJSON item */ +CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse); +/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will + * need to be released. With recurse!=0, it will duplicate any children connected to the item. + * The item->next and ->prev pointers are always zero on return from Duplicate. */ +/* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal. + * case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */ +CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive); + +/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from strings. + * The input pointer json cannot point to a read-only address area, such as a string constant, + * but should point to a readable and writable adress area. */ +CJSON_PUBLIC(void) cJSON_Minify(char *json); + +/* Helper functions for creating and adding items to an object at the same time. + * They return the added item or NULL on failure. */ +CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name); +CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name); +CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name); +CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean); +CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number); +CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string); +CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw); +CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name); +CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name); + +/* When assigning an integer value, it needs to be propagated to valuedouble too. */ +#define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number)) +/* helper for the cJSON_SetNumberValue macro */ +CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number); +#define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number)) +/* Change the valuestring of a cJSON_String object, only takes effect when type of object is cJSON_String */ +CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring); + +/* Macro for iterating over an array or object */ +#define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next) + +/* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */ +CJSON_PUBLIC(void *) cJSON_malloc(size_t size); +CJSON_PUBLIC(void) cJSON_free(void *object); + +#ifdef __cplusplus +} +#endif + +#endif From 84c2946701d6c0766df3a387b3463c46bcc58a50 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Mon, 16 Aug 2021 13:11:39 +0200 Subject: [PATCH 30/56] Added HTTP(S) client using cURL --- configure.self | 2 + mklove/modules/configure.libcurl | 55 +++++ src/CMakeLists.txt | 5 + src/Makefile | 3 +- src/rdhttp.c | 363 ++++++++++++++++++++++++++++ src/rdhttp.h | 73 ++++++ src/rdkafka.c | 19 +- src/rdunittest.c | 6 + src/rdunittest.h | 12 + src/win32_config.h | 3 +- tests/LibrdkafkaTestApp.py | 3 + tests/interactive_broker_version.py | 3 + vcpkg.json | 4 + win32/librdkafka.vcxproj | 4 + 14 files changed, 552 insertions(+), 3 deletions(-) create mode 100644 mklove/modules/configure.libcurl create mode 100644 src/rdhttp.c create mode 100644 src/rdhttp.h diff --git a/configure.self b/configure.self index 81a6014438..4267f65828 100644 --- a/configure.self +++ b/configure.self @@ -20,6 +20,7 @@ mkl_require zlib mkl_require libzstd mkl_require libssl mkl_require libsasl2 +mkl_require libcurl # Generate version variables from rdkafka.h hex version define # so we can use it as string version when generating a pkg-config file. @@ -105,6 +106,7 @@ void foo (void) { mkl_check "libssl" mkl_check "libsasl2" mkl_check "libzstd" + mkl_check "libcurl" if mkl_lib_check "libm" "" disable CC "-lm" \ "#include "; then diff --git a/mklove/modules/configure.libcurl b/mklove/modules/configure.libcurl new file mode 100644 index 0000000000..9616f3b195 --- /dev/null +++ b/mklove/modules/configure.libcurl @@ -0,0 +1,55 @@ +#!/bin/bash +# +# libcurl support, with installer +# +# Usage: +# mkl_require libcurl +# +# And then call the following function from the correct place/order in checks: +# mkl_check libcurl +# + +mkl_toggle_option "Feature" ENABLE_CURL "--enable-curl" "Enable HTTP client (using libcurl)" "try" + +function manual_checks { + case "$ENABLE_CURL" in + n) return 0 ;; + y) local action=fail ;; + try) local action=disable ;; + *) mkl_err "mklove internal error: invalid value for ENABLE_CURL: $ENABLE_CURL"; exit 1 ;; + esac + + mkl_meta_set "libcurl" "apk" "curl-dev curl-static" + mkl_meta_set "libcurl" "deb" "libcurl4-openssl-dev" + mkl_meta_set "libcurl" "static" "libcurl.a" + mkl_lib_check "libcurl" "WITH_CURL" $action CC "-lcurl" \ + " +#include + +void foo (void) { + curl_global_init(CURL_GLOBAL_DEFAULT); +} +" +} + + +# Install curl from source tarball +# +# Param 1: name (libcurl) +# Param 2: install-dir-prefix (e.g., DESTDIR) +# Param 2: version (optional) +function install_source { + local name=$1 + local destdir=$2 + local ver=7.78.0 + + echo "### Installing $name $ver from source to $destdir" + if [[ ! -f Makefile ]]; then + curl -fL https://curl.se/download/curl-${ver}.tar.gz | \ + tar xzf - --strip-components 1 + fi + + ./configure --with-openssl + time make -j DESTDIR="${destdir}" prefix=/usr all install + return $? +} diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 6e24a23882..89a00a196c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -65,12 +65,17 @@ set( tinycthread.c tinycthread_extra.c rdxxhash.c + cJSON.c ) if(WITH_SSL) list(APPEND sources rdkafka_ssl.c) endif() +if(WITH_CURL) + list(APPEND sources rdhttp.c) +endif() + if(WITH_HDRHISTOGRAM) list(APPEND sources rdhdrhistogram.c) endif() diff --git a/src/Makefile b/src/Makefile index a7fca7561f..814b0f6cd6 100644 --- a/src/Makefile +++ b/src/Makefile @@ -17,6 +17,7 @@ SRCS_$(WITH_ZLIB) += rdgz.c SRCS_$(WITH_ZSTD) += rdkafka_zstd.c SRCS_$(WITH_HDRHISTOGRAM) += rdhdrhistogram.c SRCS_$(WITH_SSL) += rdkafka_ssl.c +SRCS_$(WITH_CURL) += rdhttp.c SRCS_LZ4 = rdxxhash.c ifneq ($(WITH_LZ4_EXT), y) @@ -41,7 +42,7 @@ SRCS= rdkafka.c rdkafka_broker.c rdkafka_msg.c rdkafka_topic.c \ rdkafka_assignor.c rdkafka_range_assignor.c \ rdkafka_roundrobin_assignor.c rdkafka_sticky_assignor.c \ rdkafka_feature.c \ - rdcrc32.c crc32c.c rdmurmur2.c rdfnv1a.c \ + rdcrc32.c crc32c.c rdmurmur2.c rdfnv1a.c cJSON.c \ rdaddr.c rdrand.c rdlist.c \ tinycthread.c tinycthread_extra.c \ rdlog.c rdstring.c rdkafka_event.c rdkafka_metadata.c \ diff --git a/src/rdhttp.c b/src/rdhttp.c new file mode 100644 index 0000000000..9253fd5d9f --- /dev/null +++ b/src/rdhttp.c @@ -0,0 +1,363 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2021 Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/** + * @name HTTP client + * + */ + +#include "rdkafka_int.h" +#include "rdunittest.h" + +#include + +#include +#include "rdhttp.h" + +/** Maximum response size, increase as necessary. */ +#define RD_HTTP_RESPONSE_SIZE_MAX 1024*1024*500 /* 500kb */ + + +void rd_http_error_destroy (rd_http_error_t *herr) { + rd_free(herr); +} + +static rd_http_error_t *rd_http_error_new (int code, const char *fmt, ...) + RD_FORMAT(printf, 2, 3); +static rd_http_error_t *rd_http_error_new (int code, const char *fmt, ...) { + size_t len = 0; + rd_http_error_t *herr; + va_list ap; + + va_start(ap, fmt); + + if (fmt && *fmt) { + va_list ap2; + va_copy(ap2, ap); + len = rd_vsnprintf(NULL, 0, fmt, ap2); + va_end(ap2); + } + + /* Use single allocation for both herr and the error string */ + herr = rd_malloc(sizeof(*herr) + len + 1); + herr->code = code; + herr->errstr = herr->data; + + if (len > 0) + rd_vsnprintf(herr->errstr, len + 1, fmt, ap); + else + herr->errstr[0] = '\0'; + + va_end(ap); + + return herr; +} + +/** + * @brief Same as rd_http_error_new() but reads the error string from the + * provided buffer. + */ +static rd_http_error_t *rd_http_error_new_from_buf (int code, + const rd_buf_t *rbuf) { + rd_http_error_t *herr; + rd_slice_t slice; + size_t len = rd_buf_len(rbuf); + + if (len == 0) + return rd_http_error_new( + code, + "Server did not provide an error string"); + + + /* Use single allocation for both herr and the error string */ + herr = rd_malloc(sizeof(*herr) + len + 1); + herr->code = code; + herr->errstr = herr->data; + rd_slice_init_full(&slice, rbuf); + rd_slice_read(&slice, herr->errstr, len); + herr->errstr[len] = '\0'; + + return herr; +} + +void rd_http_req_destroy (rd_http_req_t *hreq) { + RD_IF_FREE(hreq->hreq_curl, curl_easy_cleanup); + RD_IF_FREE(hreq->hreq_buf, rd_buf_destroy); +} + + +/** + * @brief Curl writefunction. Writes the bytes passed from curl + * to the hreq's buffer. + */ +static size_t rd_http_req_write_cb (char *ptr, size_t size, size_t nmemb, + void *userdata) { + rd_http_req_t *hreq = (rd_http_req_t *)userdata; + + if (unlikely(rd_buf_len(hreq->hreq_buf) + nmemb > + RD_HTTP_RESPONSE_SIZE_MAX)) + return 0; /* FIXME: Set some overflow flag or rely on curl? */ + + rd_buf_write(hreq->hreq_buf, ptr, nmemb); + + return nmemb; +} + +rd_http_error_t *rd_http_req_init (rd_http_req_t *hreq, const char *url) { + + memset(hreq, 0, sizeof(*hreq)); + + hreq->hreq_curl = curl_easy_init(); + if (!hreq->hreq_curl) + return rd_http_error_new(-1, "Failed to create curl handle"); + + hreq->hreq_buf = rd_buf_new(1, 1024); + + curl_easy_setopt(hreq->hreq_curl, CURLOPT_URL, url); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_PROTOCOLS, + CURLPROTO_HTTP | CURLPROTO_HTTPS); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_MAXREDIRS, 16); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_TIMEOUT, 30); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_ERRORBUFFER, + hreq->hreq_curl_errstr); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_NOSIGNAL, 1); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_WRITEFUNCTION, + rd_http_req_write_cb); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_WRITEDATA, (void *)hreq); + + return NULL; +} + +/** + * @brief Synchronously (blockingly) perform the HTTP operation. + */ +rd_http_error_t *rd_http_req_perform_sync (rd_http_req_t *hreq) { + CURLcode res; + long code = 0; + + res = curl_easy_perform(hreq->hreq_curl); + if (unlikely(res != CURLE_OK)) + return rd_http_error_new(-1, "%s", hreq->hreq_curl_errstr); + + curl_easy_getinfo(hreq->hreq_curl, CURLINFO_RESPONSE_CODE, &code); + hreq->hreq_code = (int)code; + if (hreq->hreq_code >= 400) + return rd_http_error_new_from_buf(hreq->hreq_code, + hreq->hreq_buf); + + return NULL; +} + + +int rd_http_req_get_code (const rd_http_req_t *hreq) { + return hreq->hreq_code; +} + +const char *rd_http_req_get_content_type (rd_http_req_t *hreq) { + const char *content_type = NULL; + + if (curl_easy_getinfo(hreq->hreq_curl, CURLINFO_CONTENT_TYPE, + &content_type)) + return NULL; + + return content_type; +} + + +/** + * @brief Perform a blocking HTTP(S) request to \p url. + * + * Returns the response (even if there's a HTTP error code returned) + * in \p *rbufp. + * + * Returns NULL on success (HTTP response code < 400), or an error + * object on transport or HTTP error - this error object must be destroyed + * by calling rd_http_error_destroy(). In case of HTTP error the \p *rbufp + * may be filled with the error response. + */ +rd_http_error_t *rd_http_get (const char *url, rd_buf_t **rbufp) { + rd_http_req_t hreq; + rd_http_error_t *herr; + + *rbufp = NULL; + + herr = rd_http_req_init(&hreq, url); + if (unlikely(herr != NULL)) + return herr; + + herr = rd_http_req_perform_sync(&hreq); + if (herr) { + rd_http_req_destroy(&hreq); + return herr; + } + + *rbufp = hreq.hreq_buf; + hreq.hreq_buf = NULL; + + return NULL; +} + + +/** + * @brief Same as rd_http_get() but requires a JSON response. + * The response is parsed and a JSON object is returned in \p *jsonp. + * + * Same error semantics as rd_http_get(). + */ +rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp) { + rd_http_req_t hreq; + rd_http_error_t *herr; + rd_slice_t slice; + size_t len; + const char *content_type; + char *raw_json; + const char *end; + + *jsonp = NULL; + + herr = rd_http_req_init(&hreq, url); + if (unlikely(herr != NULL)) + return herr; + + // FIXME: send Accept: json.. header? + + herr = rd_http_req_perform_sync(&hreq); + len = rd_buf_len(hreq.hreq_buf); + if (herr && len == 0) { + rd_http_req_destroy(&hreq); + return herr; + } + + if (len == 0) { + /* Empty response: create empty JSON object */ + *jsonp = cJSON_CreateObject(); + rd_http_req_destroy(&hreq); + return NULL; + } + + content_type = rd_http_req_get_content_type(&hreq); + + if (!content_type || + rd_strncasecmp(content_type, + "application/json", strlen("application/json"))) { + if (!herr) + herr = rd_http_error_new( + hreq.hreq_code, + "Response is not JSON encoded: %s", + content_type ? content_type : "(n/a)"); + rd_http_req_destroy(&hreq); + return herr; + } + + /* cJSON requires the entire input to parse in contiguous memory. */ + rd_slice_init_full(&slice, hreq.hreq_buf); + raw_json = rd_malloc(len + 1); + rd_slice_read(&slice, raw_json, len); + raw_json[len] = '\0'; + + /* Parse JSON */ + end = NULL; + *jsonp = cJSON_ParseWithOpts(raw_json, &end, 0); + if (!*jsonp && !herr) + herr = rd_http_error_new(hreq.hreq_code, + "Failed to parse JSON response " + "at %"PRIusz"/%"PRIusz, + (size_t)(end - raw_json), len); + + rd_free(raw_json); + rd_http_req_destroy(&hreq); + + return herr; +} + + +void rd_http_global_init (void) { + curl_global_init(CURL_GLOBAL_DEFAULT); +} + + +/** + * @brief Unittest. Requires a (local) webserver to be set with env var + * RD_UT_HTTP_URL=http://localhost:1234/some-path + * + * This server must return a JSON object or array containing at least one + * object on the main URL with a 2xx response code, + * and 4xx response on $RD_UT_HTTP_URL/error (with whatever type of body). + */ + +int unittest_http (void) { + const char *base_url = rd_getenv("RD_UT_HTTP_URL", NULL); + char *error_url; + size_t error_url_size; + cJSON *json, *jval; + rd_http_error_t *herr; + rd_bool_t empty; + + if (!base_url || !*base_url) + RD_UT_SKIP("RD_UT_HTTP_URL environment variable not set"); + + RD_UT_BEGIN(); + + error_url_size = strlen(base_url) + strlen("/error") + 1; + error_url = rd_alloca(error_url_size); + rd_snprintf(error_url, error_url_size, "%s/error", base_url); + + /* Try the base url first, parse its JSON and extract a key-value. */ + json = NULL; + herr = rd_http_get_json(base_url, &json); + RD_UT_ASSERT(!herr, "Expected get_json(%s) to succeed, got: %s", + base_url, herr->errstr); + + empty = rd_true; + cJSON_ArrayForEach(jval, json) { + empty = rd_false; + break; + } + RD_UT_ASSERT(!empty, "Expected non-empty JSON response from %s", + base_url); + cJSON_Delete(json); + + + /* Try the error URL, verify error code. */ + json = NULL; + herr = rd_http_get_json(error_url, &json); + RD_UT_ASSERT(herr != NULL, "Expected get_json(%s) to fail", error_url); + RD_UT_ASSERT(herr->code >= 400, "Expected get_json(%s) error code >= " + "400, got %d", error_url, herr->code); + RD_UT_SAY("Error URL %s returned code %d, errstr \"%s\" " + "and %s JSON object", + error_url, herr->code, herr->errstr, + json ? "a" : "no"); + /* Check if there's a JSON document returned */ + if (json) + cJSON_Delete(json); + rd_http_error_destroy(herr); + + RD_UT_PASS(); +} diff --git a/src/rdhttp.h b/src/rdhttp.h new file mode 100644 index 0000000000..24485540be --- /dev/null +++ b/src/rdhttp.h @@ -0,0 +1,73 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2021 Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef _RDHTTP_H_ +#define _RDHTTP_H_ + +#define CJSON_HIDE_SYMBOLS +#include "cJSON.h" + + +typedef struct rd_http_error_s { + int code; + char *errstr; + char data[1]; /**< This is where the error string begins. */ +} rd_http_error_t; + +void rd_http_error_destroy (rd_http_error_t *herr); + +rd_http_error_t *rd_http_get (const char *url, rd_buf_t **rbufp); +rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp); + +void rd_http_global_init (void); + + + + +#ifdef LIBCURL_VERSION +/* Advanced API that exposes the underlying CURL handle. + * Requires caller to have included curl.h prior to this file. */ + + +typedef struct rd_http_req_s { + CURL *hreq_curl; /**< CURL handle */ + rd_buf_t *hreq_buf; /**< Response buffer */ + int hreq_code; /**< HTTP response code */ + char hreq_curl_errstr[CURL_ERROR_SIZE]; /**< Error string for curl to + * write to. */ +} rd_http_req_t; + +static void rd_http_req_destroy (rd_http_req_t *hreq); +rd_http_error_t *rd_http_req_init (rd_http_req_t *hreq, const char *url); +rd_http_error_t *rd_http_req_perform_sync (rd_http_req_t *hreq); +#endif + + + +#endif /* _RDHTTP_H_ */ diff --git a/src/rdkafka.c b/src/rdkafka.c index 8ffd91b643..9fe770ba26 100644 --- a/src/rdkafka.c +++ b/src/rdkafka.c @@ -67,6 +67,12 @@ #include #endif +#define CJSON_HIDE_SYMBOLS +#include "cJSON.h" + +#if WITH_CURL +#include "rdhttp.h" +#endif static once_flag rd_kafka_global_init_once = ONCE_FLAG_INIT; @@ -129,7 +135,12 @@ void rd_kafka_set_thread_sysname (const char *fmt, ...) { } static void rd_kafka_global_init0 (void) { - mtx_init(&rd_kafka_global_lock, mtx_plain); + cJSON_Hooks json_hooks = { + .malloc_fn = rd_malloc, + .free_fn = rd_free + }; + + mtx_init(&rd_kafka_global_lock, mtx_plain); #if ENABLE_DEVEL rd_atomic32_init(&rd_kafka_op_cnt, 0); #endif @@ -140,6 +151,12 @@ static void rd_kafka_global_init0 (void) { * object has been created. */ rd_kafka_ssl_init(); #endif + + cJSON_InitHooks(&json_hooks); + +#if WITH_CURL + rd_http_global_init(); +#endif } /** diff --git a/src/rdunittest.c b/src/rdunittest.c index c05497ed1f..006b165ee7 100644 --- a/src/rdunittest.c +++ b/src/rdunittest.c @@ -444,6 +444,9 @@ extern int unittest_scram (void); #endif extern int unittest_assignors (void); extern int unittest_map (void); +#if WITH_CURL +extern int unittest_http (void); +#endif int rd_unittest (void) { int fails = 0; @@ -478,6 +481,9 @@ int rd_unittest (void) { { "scram", unittest_scram }, #endif { "assignors", unittest_assignors }, +#if WITH_CURL + { "http", unittest_http }, +#endif { NULL } }; int i; diff --git a/src/rdunittest.h b/src/rdunittest.h index 930c432ea3..bff125e296 100644 --- a/src/rdunittest.h +++ b/src/rdunittest.h @@ -71,6 +71,18 @@ extern rd_bool_t rd_unittest_slow; return 0; \ } while (0) + /** + * @brief Skip the current unit-test function + */ +#define RD_UT_SKIP(...) do { \ + fprintf(stderr, "\033[33mRDUT: SKIP: %s:%d: %s: ", \ + __FILE__, __LINE__, __FUNCTION__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\033[0m\n"); \ + return 0; \ + } while (0) + + /** * @brief Fail unit-test if \p expr is false */ diff --git a/src/win32_config.h b/src/win32_config.h index 4579cf70a6..79ec1943b2 100644 --- a/src/win32_config.h +++ b/src/win32_config.h @@ -37,6 +37,7 @@ #define WITH_ZLIB 1 #define WITH_SNAPPY 1 #define WITH_ZSTD 1 +#define WITH_CURL 1 /* zstd is linked dynamically on Windows, but the dynamic library provides * the experimental/advanced API, just as the static builds on *nix */ #define WITH_ZSTD_STATIC 1 @@ -49,6 +50,6 @@ #define SOLIB_EXT ".dll" /* Notice: Keep up to date */ -#define BUILT_WITH "SSL ZLIB SNAPPY SASL_SCRAM PLUGINS HDRHISTOGRAM" +#define BUILT_WITH "SSL ZLIB SNAPPY ZSTD CURL SASL_SCRAM SASL_OAUTHBEARER PLUGINS HDRHISTOGRAM" #endif /* _RD_WIN32_CONFIG_H_ */ diff --git a/tests/LibrdkafkaTestApp.py b/tests/LibrdkafkaTestApp.py index 4e9d86744d..a8f0263d3c 100644 --- a/tests/LibrdkafkaTestApp.py +++ b/tests/LibrdkafkaTestApp.py @@ -148,6 +148,9 @@ def start_cmd (self): self.env_add('ZK_ADDRESS', self.cluster.get_all('address', '', ZookeeperApp)[0], False) self.env_add('BROKERS', self.cluster.bootstrap_servers(), False) + # Provide a HTTPS REST endpoint for the HTTP client tests. + self.env_add('RD_UT_HTTP_URL', 'https://jsonplaceholder.typicode.com/users') + # Per broker env vars for b in [x for x in self.cluster.apps if isinstance(x, KafkaBrokerApp)]: self.env_add('BROKER_ADDRESS_%d' % b.appid, diff --git a/tests/interactive_broker_version.py b/tests/interactive_broker_version.py index cf74115274..30a39280eb 100755 --- a/tests/interactive_broker_version.py +++ b/tests/interactive_broker_version.py @@ -157,6 +157,9 @@ def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt cmd_env['TRIVUP_ROOT'] = cluster.instance_path() cmd_env['TEST_SCENARIO'] = scenario + # Provide a HTTPS REST endpoint for the HTTP client tests. + cmd_env['RD_UT_HTTP_URL'] = 'https://jsonplaceholder.typicode.com/users' + # Per broker env vars for b in [x for x in cluster.apps if isinstance(x, KafkaBrokerApp)]: cmd_env['BROKER_ADDRESS_%d' % b.appid] = \ diff --git a/vcpkg.json b/vcpkg.json index ab51f11eb7..1e1fa0b1f7 100644 --- a/vcpkg.json +++ b/vcpkg.json @@ -13,6 +13,10 @@ { "name": "openssl", "version>=": "1.1.1l" + }, + { + "name": "curl", + "version>=": "7.74.0#8" } ], "builtin-baseline": "dd3d6df5001d49f954bc39b73a4c49ae3c9e8d15" diff --git a/win32/librdkafka.vcxproj b/win32/librdkafka.vcxproj index 042b685303..389add524f 100644 --- a/win32/librdkafka.vcxproj +++ b/win32/librdkafka.vcxproj @@ -93,6 +93,7 @@ + @@ -110,6 +111,7 @@ + @@ -164,6 +166,7 @@ + @@ -177,6 +180,7 @@ + From 370e955f222d41076923d7877dc94b1b6ac34edc Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Thu, 19 Aug 2021 21:51:50 +0200 Subject: [PATCH 31/56] Add HTTP(S) client using cURL --- CONFIGURATION.md | 2 +- src/rdhttp.c | 5 ++++- src/rdkafka_conf.c | 7 +++++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/CONFIGURATION.md b/CONFIGURATION.md index aea225340a..a4c62bcd06 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -3,7 +3,7 @@ Property | C/P | Range | Default | Importance | Description -----------------------------------------|-----|-----------------|--------------:|------------| -------------------------- -builtin.features | * | | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins, zstd, sasl_oauthbearer | low | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support.
*Type: CSV flags* +builtin.features | * | | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins, zstd, sasl_oauthbearer, http | low | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support.
*Type: CSV flags* client.id | * | | rdkafka | low | Client identifier.
*Type: string* metadata.broker.list | * | | | high | Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
*Type: string* bootstrap.servers | * | | | high | Alias for `metadata.broker.list`: Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
*Type: string* diff --git a/src/rdhttp.c b/src/rdhttp.c index 9253fd5d9f..57dc7d3847 100644 --- a/src/rdhttp.c +++ b/src/rdhttp.c @@ -341,6 +341,9 @@ int unittest_http (void) { } RD_UT_ASSERT(!empty, "Expected non-empty JSON response from %s", base_url); + RD_UT_SAY("URL %s returned no error and a non-empty " + "JSON object/array as expected", + base_url); cJSON_Delete(json); @@ -351,7 +354,7 @@ int unittest_http (void) { RD_UT_ASSERT(herr->code >= 400, "Expected get_json(%s) error code >= " "400, got %d", error_url, herr->code); RD_UT_SAY("Error URL %s returned code %d, errstr \"%s\" " - "and %s JSON object", + "and %s JSON object as expected", error_url, herr->code, herr->errstr, json ? "a" : "no"); /* Check if there's a JSON document returned */ diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c index ed1787fbd8..d0d42a1c51 100644 --- a/src/rdkafka_conf.c +++ b/src/rdkafka_conf.c @@ -159,6 +159,12 @@ struct rd_kafka_property { #define _UNSUPPORTED_ZSTD .unsupported = "libzstd not available at build time" #endif +#if WITH_CURL +#define _UNSUPPORTED_HTTP .unsupported = NULL +#else +#define _UNSUPPORTED_HTTP .unsupported = "libcurl not available at build time" +#endif + #ifdef _WIN32 #define _UNSUPPORTED_WIN32_GSSAPI .unsupported = \ "Kerberos keytabs are not supported on Windows, " \ @@ -328,6 +334,7 @@ static const struct rd_kafka_property rd_kafka_properties[] = { }, { 0x400, "zstd", _UNSUPPORTED_ZSTD }, { 0x800, "sasl_oauthbearer", _UNSUPPORTED_SSL }, + { 0x1000, "http", _UNSUPPORTED_HTTP }, { 0, NULL } } }, From 143ab0ed834c5355828eb360be0ffdb8f8bf0e20 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 17 Aug 2021 11:37:17 +0200 Subject: [PATCH 32/56] Fix uninitialized warning on msvc --- src/rdmap.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/rdmap.h b/src/rdmap.h index d226054b09..458cd1b145 100644 --- a/src/rdmap.h +++ b/src/rdmap.h @@ -450,7 +450,8 @@ unsigned int rd_map_str_hash (const void *a); * @remark The \p RMAP may not be const. */ #define RD_MAP_FOREACH(K,V,RMAP) \ - for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem) ; \ + for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem), \ + (K) = NULL, (V) = NULL ; \ rd_map_iter(&(RMAP)->elem) && \ ((RMAP)->key = (void *)(RMAP)->elem->key, \ (K) = (RMAP)->key, \ @@ -475,7 +476,8 @@ unsigned int rd_map_str_hash (const void *a); * @remark The \p RMAP may not be const. */ #define RD_MAP_FOREACH_KEY(K,RMAP) \ - for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem) ; \ + for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem), \ + (K) = NULL ; \ rd_map_iter(&(RMAP)->elem) && \ ((RMAP)->key = (void *)(RMAP)->elem->key, \ (K) = (RMAP)->key, \ From b38cdacebf9a00fd393326f033dbe407753d499d Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Mon, 16 Aug 2021 13:11:39 +0200 Subject: [PATCH 33/56] Remove commented-out printfs --- src/rdkafka_sasl_scram.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/rdkafka_sasl_scram.c b/src/rdkafka_sasl_scram.c index 3175c3d7a2..0eec5c6af3 100644 --- a/src/rdkafka_sasl_scram.c +++ b/src/rdkafka_sasl_scram.c @@ -242,9 +242,6 @@ rd_kafka_sasl_scram_HMAC (rd_kafka_transport_t *rktrans, rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp; unsigned int outsize; - //printf("HMAC KEY: %s\n", rd_base64_encode(key)); - //printf("HMAC STR: %s\n", rd_base64_encode(str)); - if (!HMAC(evp, (const unsigned char *)key->ptr, (int)key->size, (const unsigned char *)str->ptr, (int)str->size, @@ -255,7 +252,6 @@ rd_kafka_sasl_scram_HMAC (rd_kafka_transport_t *rktrans, } out->size = outsize; - //printf("HMAC OUT: %s\n", rd_base64_encode(out)); return 0; } From 5baa2ea1e38ebd560547c2126013f65bfbdb79a2 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 17 Aug 2021 09:04:44 +0200 Subject: [PATCH 34/56] Remove stray license include in librdkafka vcxproj --- win32/librdkafka.vcxproj | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/win32/librdkafka.vcxproj b/win32/librdkafka.vcxproj index 389add524f..191ef43657 100644 --- a/win32/librdkafka.vcxproj +++ b/win32/librdkafka.vcxproj @@ -245,11 +245,8 @@
- - - - + \ No newline at end of file From 12bf2fb677f4f5b525f27545c9447490d8268e4d Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 17 Aug 2021 15:08:36 +0200 Subject: [PATCH 35/56] librdkafka.vcxproj: remove stale OpenSSL paths and enable Vcpkg manifests --- win32/librdkafka.vcxproj | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/win32/librdkafka.vcxproj b/win32/librdkafka.vcxproj index 191ef43657..cc4b1a2178 100644 --- a/win32/librdkafka.vcxproj +++ b/win32/librdkafka.vcxproj @@ -16,8 +16,11 @@ $(VC_LibraryPath_x86);$(WindowsSDK_LibraryPath_x86) - $(VC_IncludePath);$(WindowsSDK_IncludePath);C:\OpenSSL-Win64\include - $(VC_LibraryPath_x64);$(WindowsSDK_LibraryPath_x64);C:\OpenSSL-Win64\lib\VC\static + $(VC_IncludePath);$(WindowsSDK_IncludePath) + $(VC_LibraryPath_x64);$(WindowsSDK_LibraryPath_x64) + + + true From 175e5fe3f05be628d471aca86fa3a5ef32e22998 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Thu, 19 Aug 2021 21:49:47 +0200 Subject: [PATCH 36/56] mklove: but all built deps in the same destdir and set up compiler flags accordingly This fixes some issues when dependency B depends on dependency A, in this case for libcurl that depends on OpenSSL, to make it find the OpenSSL libraries, pkg-config files, etc. --- mklove/modules/configure.base | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/mklove/modules/configure.base b/mklove/modules/configure.base index e963139400..f2c01b7315 100644 --- a/mklove/modules/configure.base +++ b/mklove/modules/configure.base @@ -39,6 +39,8 @@ MKL_NO_DOWNLOAD=0 MKL_INSTALL_DEPS=n MKL_SOURCE_DEPS_ONLY=n +MKL_DESTDIR_ADDED=n + if [[ -z "$MKL_REPO_URL" ]]; then MKL_REPO_URL="http://github.com/edenhill/mklove/raw/master" fi @@ -328,7 +330,7 @@ function mkl_depdir { # Returns the package's installation directory / DESTDIR. function mkl_dep_destdir { - echo "$(mkl_depdir)/dest/$1" + echo "$(mkl_depdir)/dest" } # Returns the package's source directory. @@ -598,12 +600,24 @@ function mkl_dep_install { if ! mkl_resolve_static_libs "$name" "${ddir}/usr"; then # No static libraries found, set up dynamic linker path mkl_mkvar_prepend LDFLAGS LDFLAGS "-L${ddir}/usr/lib64 -L${ddir}/usr/lib" - mkl_mkvar_prepend PKG_CONFIG_PATH PKG_CONFIG_PATH "${ddir}/usr/lib/pkgconfig" ":" + fi + + # Add the deps destdir to various build flags so that tools can pick + # up the artifacts (.pc files, includes, libs, etc) they need. + if [[ $MKL_DESTDIR_ADDED == n ]]; then + # Add environment variables so that later built dependencies + # can find this one. + mkl_env_prepend LDFLAGS "-L${ddir}/usr/lib64 -L${ddir}/usr/lib" + mkl_env_prepend CPPFLAGS "-I${ddir}/usr/include" + mkl_env_prepend PKG_CONFIG_PATH "${ddir}/usr/lib/pkgconfig" ":" + # And tell pkg-config to get static linker flags. + mkl_env_set PKG_CONFIG "${PKG_CONFIG} --static" + MKL_DESTDIR_ADDED=y fi # Append the package's install path to compiler and linker flags. mkl_dbg "$name: Adding install-deps paths ($ddir) to compiler and linker flags" - mkl_mkvar_prepend CFLAGS CFLAGS "-I${ddir}/usr/include" + mkl_mkvar_prepend CPPFLAGS CPPFLAGS "-I${ddir}/usr/include" return $retcode } @@ -1380,7 +1394,7 @@ function mkl_compile_check { int main () { return 0; } " >> $srcfile - local cmd="${!4} $cflags $(mkl_mkvar_get CPPFLAGS) -Wall -Werror $srcfile -o ${srcfile}.o $ldf $(mkl_mkvar_get LDFLAGS) $5"; + local cmd="${!4} $cflags $(mkl_mkvar_get CPPFLAGS) -Wall -Werror $srcfile -o ${srcfile}.o $ldf $(mkl_mkvar_get LDFLAGS) $5 $(mkl_mkvar_get LIBS)"; mkl_dbg "Compile check $1 ($2) (sub=$sub): $cmd" local output From 2da68d51888eb2c4879b63deeceeeb3909cf85a7 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Thu, 19 Aug 2021 21:50:48 +0200 Subject: [PATCH 37/56] mklove: don't include STATIC_LIB_..s in BUILT_WITH --- mklove/modules/configure.base | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mklove/modules/configure.base b/mklove/modules/configure.base index f2c01b7315..a18cd6befe 100644 --- a/mklove/modules/configure.base +++ b/mklove/modules/configure.base @@ -872,7 +872,7 @@ function mkl_generate { # Generate a built-in options define based on WITH_..=y local with_y= for n in $MKL_MKVARS ; do - if [[ $n == WITH_* ]] && [[ ${!n} == y ]]; then + if [[ $n == WITH_* ]] && [[ $n != WITH_STATIC_LIB_* ]] && [[ ${!n} == y ]]; then with_y="$with_y ${n#WITH_}" fi done From f483206c0becaaf1dbd8f8c45e18b0620038e28e Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Thu, 19 Aug 2021 21:51:07 +0200 Subject: [PATCH 38/56] mklove: Some autoconf versions seem to need a full path to $INSTALL --- mklove/modules/configure.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mklove/modules/configure.cc b/mklove/modules/configure.cc index cf39cd6d1a..d294883833 100644 --- a/mklove/modules/configure.cc +++ b/mklove/modules/configure.cc @@ -112,12 +112,12 @@ function checks { if [[ $MKL_DISTRO == "sunos" ]]; then mkl_meta_set ginstall name "GNU install" if mkl_command_check ginstall "" ignore "ginstall --version"; then - INSTALL=ginstall + INSTALL=$(which ginstall) else - INSTALL=install + INSTALL=$(which install) fi else - INSTALL=install + INSTALL=$(which install) fi fi From bab588a71ac00d14f7aa2cd1a2da538feaba7665 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Thu, 19 Aug 2021 21:51:35 +0200 Subject: [PATCH 39/56] curl: disable everything but HTTP(S) --- mklove/modules/configure.libcurl | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/mklove/modules/configure.libcurl b/mklove/modules/configure.libcurl index 9616f3b195..c40b93d646 100644 --- a/mklove/modules/configure.libcurl +++ b/mklove/modules/configure.libcurl @@ -49,7 +49,32 @@ function install_source { tar xzf - --strip-components 1 fi - ./configure --with-openssl - time make -j DESTDIR="${destdir}" prefix=/usr all install + # Clear out LIBS to not interfer with lib detection process. + LIBS="" ./configure \ + --with-openssl \ + --enable-static \ + --disable-shared \ + --disable-ntlm{,-wb} \ + --disable-dict \ + --disable-ftp \ + --disable-file \ + --disable-gopher \ + --disable-imap \ + --disable-imaps \ + --disable-mqtt \ + --disable-pop3 \ + --disable-rtsp \ + --disable-smb \ + --disable-smtp \ + --disable-telnet \ + --disable-tftp \ + --disable-ssh \ + --disable-manual \ + --disable-ldap{,s} \ + --disable-libcurl-option \ + --without-{librtmp,libidn2,winidn,nghttp2,nghttp3,ngtcp2,quiche,brotli} && + time make -j && + make DESTDIR="${destdir}" prefix=/usr install + return $? } From 7efa51d9a805c7c04996b5e33f7c8f324cbd1cab Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 25 Aug 2021 13:14:01 +0200 Subject: [PATCH 40/56] Added string splitter and kv splitter --- src/rdkafka_conf.c | 61 +++++++++++++ src/rdstring.c | 212 ++++++++++++++++++++++++++++++++++++++++++++- src/rdstring.h | 3 + 3 files changed, 275 insertions(+), 1 deletion(-) diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c index d0d42a1c51..8a90675ad0 100644 --- a/src/rdkafka_conf.c +++ b/src/rdkafka_conf.c @@ -3677,6 +3677,67 @@ static void rd_kafka_sw_str_sanitize_inplace (char *str) { #undef _is_alphanum +/** + * @brief Create a staggered array of key-value pairs from + * an array of "key=value" strings (typically from rd_string_split()). + * + * The output array will have element 0 being key0 and element 1 being + * value0. Element 2 being key1 and element 3 being value1, and so on. + * E.g.: + * input { "key0=value0", "key1=value1" } incnt=2 + * returns { "key0", "value0", "key1", "value1" } cntp=4 + * + * @returns NULL on error (no '=' separator), or a newly allocated array + * on success. The array count is returned in \p cntp. + * The returned pointer must be freed with rd_free(). + */ +static char **rd_kafka_conf_kv_split (const char **input, size_t incnt, + size_t *cntp) { + size_t i; + char **out, *p; + size_t lens = 0; + size_t outcnt = 0; + + /* First calculate total length needed for key-value strings. */ + for (i = 0 ; i < incnt ; i++) { + const char *t = strchr(input[i], '='); + + /* No "=", or "=" at beginning of string. */ + if (!t || t == input[i]) + return NULL; + + /* Length of key, '=' (will be \0), value, and \0 */ + lens += strlen(input[i]) + 1; + } + + /* Allocate array along with elements in one go */ + out = rd_malloc((sizeof(*out) * incnt * 2) + lens); + p = (char *)(&out[incnt * 2]); + + for (i = 0 ; i < incnt ; i++) { + const char *t = strchr(input[i], '='); + size_t namelen = (size_t)(t - input[i]); + size_t valuelen = strlen(t+1); + + /* Copy name */ + out[outcnt++] = p; + memcpy(p, input[i], namelen); + p += namelen; + *(p++) = '\0'; + + /* Copy value */ + out[outcnt++] = p; + memcpy(p, t+1, valuelen + 1); + p += valuelen; + *(p++) = '\0'; + } + + + *cntp = outcnt; + return out; +} + + /** * @brief Verify configuration \p conf is * correct/non-conflicting and finalize the configuration diff --git a/src/rdstring.c b/src/rdstring.c index fe7b4de1c5..c85ea0e25c 100644 --- a/src/rdstring.c +++ b/src/rdstring.c @@ -405,10 +405,219 @@ static int ut_strcasestr (void) { strs[i].exp, of, ret ? ret : "(NULL)"); } - return 0; + RD_UT_PASS(); } + + +/** + * @brief Split a character-separated string into an array. + * + * @remark This is not CSV compliant as CSV uses " for escapes, but this here + * uses \. + * + * @param input Input string to parse. + * @param sep The separator character (typically ',') + * @param skip_empty Do not include empty fields in output array. + * @param cntp Will be set to number of elements in array. + * + * Supports "\" escapes. + * The array and the array elements will be allocated together and must be freed + * with a single rd_free(array) call. + * The array elements are copied and any "\" escapes are removed. + * + * @returns the parsed fields in an array. The number of elements in the + * array is returned in \p cntp + */ +char **rd_string_split (const char *input, char sep, rd_bool_t skip_empty, + size_t *cntp) { + size_t fieldcnt = 1; + rd_bool_t next_esc = rd_false; + const char *s; + char *p; + char **arr; + size_t inputlen; + size_t i = 0; + size_t elen = 0; + + *cntp = '\0'; + + /* First count the maximum number of fields so we know how large of + * an array we need to allocate. Escapes are ignored. */ + for (s = input ; *s ; s++) { + if (*s == sep) + fieldcnt++; + } + + inputlen = (size_t)(s - input); + + /* Allocate array and memory for the copied elements in one go. */ + arr = rd_malloc((sizeof(*arr) * fieldcnt) + inputlen + 1); + p = (char *)(&arr[fieldcnt]); + + for (s = input ; ; s++) { + rd_bool_t at_end = *s == '\0'; + rd_bool_t is_esc = next_esc; + + /* If we've reached the end, jump to done to finish + * the current field. */ + if (at_end) + goto done; + + if (unlikely(!is_esc && *s == '\\')) { + next_esc = rd_true; + continue; + } + + next_esc = rd_false; + + /* Strip leading whitespaces for each element */ + if (!is_esc && elen == 0 && isspace((int)*s)) + continue; + + if (likely(is_esc || *s != sep)) { + char c = *s; + if (is_esc) { + /* Perform some common escape substitions. + * If not known we'll just keep the escaped + * character as is (probably the separator). */ + switch (c) + { + case 't': + c = '\t'; + break; + case 'n': + c = '\n'; + break; + case 'r': + c = '\r'; + break; + case '0': + c = '\0'; + break; + } + } + p[elen++] = c; + continue; + } + + done: + /* Strip trailing whitespaces */ + while (elen > 0 && isspace((int)p[elen-1])) + elen--; + + /* End of field */ + if (elen == 0 && skip_empty) { + if (at_end) + break; + continue; + } + + rd_assert(i < fieldcnt); + + /* Nul-terminate the element */ + p[elen++] = '\0'; + /* Assign element to array */ + arr[i] = p; + /* Update next element pointer past the written bytes */ + p += elen; + /* Reset element length */ + elen = 0; + /* Advance array element index */ + i++; + + if (at_end) + break; + } + + *cntp = i; + + return arr; +} + +/** + * @brief Unittest for rd_string_split() + */ +static int ut_string_split (void) { + static const struct { + const char *input; + const char sep; + rd_bool_t skip_empty; + size_t exp_cnt; + const char *exp[16]; + } strs[] = { + { "just one field", ',', rd_true, 1, + { "just one field" } + }, + /* Empty with skip_empty */ + { "", ',', rd_true, 0 }, + /* Empty without skip_empty */ + { "", ',', rd_false, 1, + { "" } + }, + { ", a,b ,,c, d, e,f,ghijk, lmn,opq , r s t u, v", + ',', rd_true, 11, + { + "a", "b", "c", "d", "e", "f", "ghijk", "lmn", "opq", + "r s t u", "v" + }, + }, + { ", a,b ,,c, d, e,f,ghijk, lmn,opq , r s t u, v", + ',', rd_false, 13, + { + "", "a", "b", "", "c", "d", "e", "f", "ghijk", + "lmn", "opq", "r s t u", "v" + }, + }, + { " this is an \\,escaped comma,\\,,\\\\, " + "and this is an unbalanced escape: \\\\\\\\\\\\\\", + ',', rd_true, 4, + { + "this is an ,escaped comma", + ",", + "\\", + "and this is an unbalanced escape: \\\\\\" + } + }, + { "using|another ||\\|d|elimiter", '|', rd_false, 5, + { + "using", "another", "", "|d", "elimiter" + }, + }, + { NULL }, + }; + size_t i; + + RD_UT_BEGIN(); + + for (i = 0 ; strs[i].input ; i++) { + char **ret; + size_t cnt = 12345; + size_t j; + + ret = rd_string_split(strs[i].input, strs[i].sep, + strs[i].skip_empty, + &cnt); + RD_UT_ASSERT(ret != NULL, + "#%"PRIusz": Did not expect NULL", i); + RD_UT_ASSERT(cnt == strs[i].exp_cnt, + "#%"PRIusz": " + "Expected %"PRIusz" elements, got %"PRIusz, + i, strs[i].exp_cnt, cnt); + + for (j = 0 ; j < cnt ; j++) + RD_UT_ASSERT(!strcmp(strs[i].exp[j], ret[j]), + "#%"PRIusz": Expected string %"PRIusz + " to be \"%s\", not \"%s\"", + i, j, strs[i].exp[j], ret[j]); + + rd_free(ret); + } + + RD_UT_PASS(); +} + /** * @brief Unittests for strings */ @@ -416,6 +625,7 @@ int unittest_string (void) { int fails = 0; fails += ut_strcasestr(); + fails += ut_string_split(); return fails; } diff --git a/src/rdstring.h b/src/rdstring.h index b90f32f4db..cd05dc4846 100644 --- a/src/rdstring.h +++ b/src/rdstring.h @@ -81,6 +81,9 @@ int rd_strcmp (const char *a, const char *b); char *_rd_strcasestr (const char *haystack, const char *needle); +char **rd_string_split (const char *input, char sep, rd_bool_t skip_empty, + size_t *cntp); + /** @returns "true" if EXPR is true, else "false" */ #define RD_STR_ToF(EXPR) ((EXPR) ? "true" : "false") From d90161ffd04f1e6f030dac82a36e2e564ac7faa8 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Tue, 31 Aug 2021 00:22:04 -0700 Subject: [PATCH 41/56] OAuth/OIDC: Add fields to client configuration (#3510) --- CONFIGURATION.md | 8 ++- src/rdkafka.c | 4 +- src/rdkafka_conf.c | 69 ++++++++++++++++++++-- src/rdkafka_conf.h | 22 +++++-- src/rdkafka_sasl_oauthbearer.c | 8 +-- tests/0126-oauthbearer_oidc.c | 105 +++++++++++++++++++++++++++++++++ tests/CMakeLists.txt | 1 + tests/test.c | 2 + win32/tests/tests.vcxproj | 1 + 9 files changed, 204 insertions(+), 16 deletions(-) create mode 100644 tests/0126-oauthbearer_oidc.c diff --git a/CONFIGURATION.md b/CONFIGURATION.md index a4c62bcd06..775a1eec39 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -3,7 +3,7 @@ Property | C/P | Range | Default | Importance | Description -----------------------------------------|-----|-----------------|--------------:|------------| -------------------------- -builtin.features | * | | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins, zstd, sasl_oauthbearer, http | low | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support.
*Type: CSV flags* +builtin.features | * | | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins, zstd, sasl_oauthbearer, http, oidc | low | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support.
*Type: CSV flags* client.id | * | | rdkafka | low | Client identifier.
*Type: string* metadata.broker.list | * | | | high | Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
*Type: string* bootstrap.servers | * | | | high | Alias for `metadata.broker.list`: Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
*Type: string* @@ -92,6 +92,12 @@ sasl.password | * | | sasl.oauthbearer.config | * | | | low | SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is "sub", the default value for scopeClaimName is "scope", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: `principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions can be communicated to the broker via `extension_NAME=value`. For example: `principal=admin extension_traceId=123`
*Type: string* enable.sasl.oauthbearer.unsecure.jwt | * | true, false | false | low | Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. This builtin handler should only be used for development or testing, and not in production.
*Type: boolean* oauthbearer_token_refresh_cb | * | | | low | SASL/OAUTHBEARER token refresh callback (set with rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by rd_kafka_poll(), et.al. This callback will be triggered when it is time to refresh the client's OAUTHBEARER token.
*Type: see dedicated API* +sasl.oauthbearer.method | * | default, oidc | default | low | Set to "default" or "oidc" to control which login method is used. If set it to "oidc", OAuth/OIDC login method will be used. sasl.oauthbearer.client.id, sasl.oauthbearer.client.secret, sasl.oauthbearer.scope, sasl.oauthbearer.extensions, and sasl.oauthbearer.token.endpoint.url are needed if sasl.oauthbearer.method is set to "oidc".
*Type: enum value* +sasl.oauthbearer.client.id | * | | | low | It's a public identifier for the application. It must be unique across all clients that the authorization server handles. This is only used when sasl.oauthbearer.method is set to oidc.
*Type: string* +sasl.oauthbearer.client.secret | * | | | low | A client secret only known to the application and the authorization server. This should be a sufficiently random string that are not guessable. This is only used when sasl.oauthbearer.method is set to "oidc".
*Type: string* +sasl.oauthbearer.scope | * | | | low | Client use this to specify the scope of the access request to the broker. This is only used when sasl.oauthbearer.method is set to "oidc".
*Type: string* +sasl.oauthbearer.extensions | * | | | low | Allow additional information to be provided to the broker. It's comma-separated list of key=value pairs. The example of the input is "supportFeatureX=true,organizationId=sales-emea". This is only used when sasl.oauthbearer.method is set to "oidc".
*Type: string* +sasl.oauthbearer.token.endpoint.url | * | | | low | OAUTH issuer token endpoint HTTP(S) URI used to retrieve the token. This is only used when sasl.oauthbearer.method is set to "oidc".
*Type: string* plugin.library.paths | * | | | low | List of plugin libraries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically.
*Type: string* interceptors | * | | | low | Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors.
*Type: see dedicated API* group.id | C | | | high | Client group id string. All clients sharing the same group.id belong to the same group.
*Type: string* diff --git a/src/rdkafka.c b/src/rdkafka.c index 9fe770ba26..11f8c12271 100644 --- a/src/rdkafka.c +++ b/src/rdkafka.c @@ -2239,12 +2239,12 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_ERROR; #if WITH_SASL_OAUTHBEARER if (rk->rk_conf.sasl.enable_oauthbearer_unsecure_jwt && - !rk->rk_conf.sasl.oauthbearer_token_refresh_cb) + !rk->rk_conf.sasl.oauthbearer.token_refresh_cb) rd_kafka_conf_set_oauthbearer_token_refresh_cb( &rk->rk_conf, rd_kafka_oauthbearer_unsecured_token); - if (rk->rk_conf.sasl.oauthbearer_token_refresh_cb) + if (rk->rk_conf.sasl.oauthbearer.token_refresh_cb) rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH; #endif diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c index 8a90675ad0..355af364d7 100644 --- a/src/rdkafka_conf.c +++ b/src/rdkafka_conf.c @@ -161,8 +161,12 @@ struct rd_kafka_property { #if WITH_CURL #define _UNSUPPORTED_HTTP .unsupported = NULL +#define _UNSUPPORTED_OIDC .unsupported = NULL #else #define _UNSUPPORTED_HTTP .unsupported = "libcurl not available at build time" +#define _UNSUPPORTED_OIDC .unsupported = \ + "OAuth/OIDC depends on libcurl which was not available " \ + "at build time" #endif #ifdef _WIN32 @@ -335,6 +339,7 @@ static const struct rd_kafka_property rd_kafka_properties[] = { { 0x400, "zstd", _UNSUPPORTED_ZSTD }, { 0x800, "sasl_oauthbearer", _UNSUPPORTED_SSL }, { 0x1000, "http", _UNSUPPORTED_HTTP }, + { 0x2000, "oidc", _UNSUPPORTED_OIDC }, { 0, NULL } } }, @@ -1010,7 +1015,7 @@ static const struct rd_kafka_property rd_kafka_properties[] = { _UNSUPPORTED_OAUTHBEARER }, { _RK_GLOBAL, "oauthbearer_token_refresh_cb", _RK_C_PTR, - _RK(sasl.oauthbearer_token_refresh_cb), + _RK(sasl.oauthbearer.token_refresh_cb), "SASL/OAUTHBEARER token refresh callback (set with " "rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by " "rd_kafka_poll(), et.al. " @@ -1018,6 +1023,62 @@ static const struct rd_kafka_property rd_kafka_properties[] = { "the client's OAUTHBEARER token.", _UNSUPPORTED_OAUTHBEARER }, + { _RK_GLOBAL, "sasl.oauthbearer.method", _RK_C_S2I, + _RK(sasl.oauthbearer.method), + "Set to \"default\" or \"oidc\" to control which login method " + "is used. If set it to \"oidc\", OAuth/OIDC login method will " + "be used. " + "sasl.oauthbearer.client.id, sasl.oauthbearer.client.secret, " + "sasl.oauthbearer.scope, sasl.oauthbearer.extensions, " + "and sasl.oauthbearer.token.endpoint.url are needed if " + "sasl.oauthbearer.method is set to \"oidc\".", + .vdef = RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT, + .s2i = { + { RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT, "default" }, + { RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC, "oidc" } + }, + _UNSUPPORTED_OIDC + }, + { _RK_GLOBAL, "sasl.oauthbearer.client.id", _RK_C_STR, + _RK(sasl.oauthbearer.client_id), + "It's a public identifier for the application. " + "It must be unique across all clients that the " + "authorization server handles. " + "This is only used when sasl.oauthbearer.method is set to oidc.", + _UNSUPPORTED_OIDC + }, + { _RK_GLOBAL, "sasl.oauthbearer.client.secret", _RK_C_STR, + _RK(sasl.oauthbearer.client_secret), + "A client secret only known to the application and the " + "authorization server. This should be a sufficiently random string " + "that are not guessable. " + "This is only used when sasl.oauthbearer.method is set to \"oidc\".", + _UNSUPPORTED_OIDC + }, + { _RK_GLOBAL, "sasl.oauthbearer.scope", _RK_C_STR, + _RK(sasl.oauthbearer.scope), + "Client use this to specify the scope of the access request to the " + "broker. " + "This is only used when sasl.oauthbearer.method is set to \"oidc\".", + _UNSUPPORTED_OIDC + }, + { _RK_GLOBAL, "sasl.oauthbearer.extensions", _RK_C_STR, + _RK(sasl.oauthbearer.extensions_str), + "Allow additional information to be provided to the broker. " + "It's comma-separated list of key=value pairs. " + "The example of the input is " + "\"supportFeatureX=true,organizationId=sales-emea\"." + " This is only used when sasl.oauthbearer.method is set " + "to \"oidc\".", + _UNSUPPORTED_OIDC + }, + { _RK_GLOBAL, "sasl.oauthbearer.token.endpoint.url", _RK_C_STR, + _RK(sasl.oauthbearer.token_endpoint_url), + "OAUTH issuer token endpoint HTTP(S) URI used to retrieve the " + "token. " + "This is only used when sasl.oauthbearer.method is set to \"oidc\".", + _UNSUPPORTED_OIDC + }, /* Plugins */ { _RK_GLOBAL, "plugin.library.paths", _RK_C_STR, @@ -3788,7 +3849,7 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, #if WITH_SASL_OAUTHBEARER if (conf->sasl.enable_oauthbearer_unsecure_jwt && - conf->sasl.oauthbearer_token_refresh_cb) + conf->sasl.oauthbearer.token_refresh_cb) return "`enable.sasl.oauthbearer.unsecure.jwt` and " "`oauthbearer_token_refresh_cb` are mutually exclusive"; #endif @@ -4377,7 +4438,7 @@ int unittest_conf (void) { readlen = sizeof(readval); res2 = rd_kafka_conf_get(conf, "client.software.name", readval, &readlen); - RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "%d", res2); + RD_UT_ASSERT(res2 == RD_KAFKA_CONF_OK, "%d", res2); RD_UT_ASSERT(!strcmp(readval, "aba.-va"), "client.software.* safification failed: \"%s\"", readval); RD_UT_SAY("Safified client.software.name=\"%s\"", readval); @@ -4385,7 +4446,7 @@ int unittest_conf (void) { readlen = sizeof(readval); res2 = rd_kafka_conf_get(conf, "client.software.version", readval, &readlen); - RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "%d", res2); + RD_UT_ASSERT(res2 == RD_KAFKA_CONF_OK, "%d", res2); RD_UT_ASSERT(!strcmp(readval, "1.2.3.4.5----a"), "client.software.* safification failed: \"%s\"", readval); RD_UT_SAY("Safified client.software.version=\"%s\"", readval); diff --git a/src/rdkafka_conf.h b/src/rdkafka_conf.h index ac08651d83..86bc7127ce 100644 --- a/src/rdkafka_conf.h +++ b/src/rdkafka_conf.h @@ -151,6 +151,10 @@ typedef enum { RD_KAFKA_OFFSET_METHOD_BROKER } rd_kafka_offset_method_t; +typedef enum { + RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT, + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC +} rd_kafka_oauthbearer_method_t; typedef enum { RD_KAFKA_SSL_ENDPOINT_ID_NONE, @@ -285,11 +289,19 @@ struct rd_kafka_conf_s { #endif char *oauthbearer_config; int enable_oauthbearer_unsecure_jwt; - /* SASL/OAUTHBEARER token refresh event callback */ - void (*oauthbearer_token_refresh_cb) ( - rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque); + struct { + char *method; + char *token_endpoint_url; + char *client_id; + char *client_secret; + char *scope; + char *extensions_str; + /* SASL/OAUTHBEARER token refresh event callback */ + void (*token_refresh_cb) ( + rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque); + } oauthbearer; } sasl; char *plugin_paths; diff --git a/src/rdkafka_sasl_oauthbearer.c b/src/rdkafka_sasl_oauthbearer.c index d89f0d637a..fe20f99368 100644 --- a/src/rdkafka_sasl_oauthbearer.c +++ b/src/rdkafka_sasl_oauthbearer.c @@ -167,8 +167,8 @@ rd_kafka_oauthbearer_refresh_op (rd_kafka_t *rk, * the op has already been handled by this point. */ if (rko->rko_err != RD_KAFKA_RESP_ERR__DESTROY && - rk->rk_conf.sasl.oauthbearer_token_refresh_cb) - rk->rk_conf.sasl.oauthbearer_token_refresh_cb( + rk->rk_conf.sasl.oauthbearer.token_refresh_cb) + rk->rk_conf.sasl.oauthbearer.token_refresh_cb( rk, rk->rk_conf.sasl.oauthbearer_config, rk->rk_conf.opaque); return RD_KAFKA_OP_RES_HANDLED; @@ -1310,9 +1310,9 @@ static int rd_kafka_sasl_oauthbearer_init (rd_kafka_t *rk, * unsecure JWS token refresher, to avoid an initial connection * stall as we wait for the application to call poll(). * Otherwise enqueue a refresh callback for the application. */ - if (rk->rk_conf.sasl.oauthbearer_token_refresh_cb == + if (rk->rk_conf.sasl.oauthbearer.token_refresh_cb == rd_kafka_oauthbearer_unsecured_token) - rk->rk_conf.sasl.oauthbearer_token_refresh_cb( + rk->rk_conf.sasl.oauthbearer.token_refresh_cb( rk, rk->rk_conf.sasl.oauthbearer_config, rk->rk_conf.opaque); else diff --git a/tests/0126-oauthbearer_oidc.c b/tests/0126-oauthbearer_oidc.c new file mode 100644 index 0000000000..6e7540393a --- /dev/null +++ b/tests/0126-oauthbearer_oidc.c @@ -0,0 +1,105 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2021, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" +/* Typical include path would be , but this program + * is built from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" /* for Kafka driver */ + + +/** + * @brief After config OIDC fields, make sure the producer gets created + * successfully. + * + */ +static void do_test_create_producer () { + const char *topic; + uint64_t testid; + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_conf_res_t res; + char errstr[512]; + + SUB_TEST("Test producer with oidc configuration"); + + test_conf_init(&conf, NULL, 60); + + res = rd_kafka_conf_set(conf, + "sasl.oauthbearer.method", + "oidc", + errstr, + sizeof(errstr)); + + if (res == RD_KAFKA_CONF_INVALID) { + rd_kafka_conf_destroy(conf); + TEST_SKIP("%s\n", errstr); + return; + } + + if (res != RD_KAFKA_CONF_OK) + TEST_FAIL("%s", errstr); + + test_conf_set(conf, + "sasl.oauthbearer.client.id", + "randomuniqclientid"); + test_conf_set(conf, + "sasl.oauthbearer.client.secret", + "randomuniqclientsecret"); + test_conf_set(conf, + "sasl.oauthbearer.client.secret", + "randomuniqclientsecret"); + test_conf_set(conf, + "sasl.oauthbearer.extensions", + "supportFeatureX=true"); + test_conf_set(conf, + "sasl.oauthbearer.token.endpoint.url", + "https://localhost:1/token"); + + testid = test_id_generate(); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + topic = test_mk_topic_name("0126-oauthbearer_oidc", 1); + test_create_topic(rk, topic, 1, 1); + + /* Produce messages */ + test_produce_msgs2(rk, topic, testid, 1, 0, 0, NULL, 0); + + /* Verify messages were actually produced by consuming them back. */ + test_consume_msgs_easy(topic, topic, 0, 1, 1, NULL); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +int main_0126_oauthbearer_oidc (int argc, char **argv) { + do_test_create_producer(); + return 0; +} diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 2f9457f692..4af8fc88f4 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -117,6 +117,7 @@ set( 0123-connections_max_idle.c 0124-openssl_invalid_engine.c 0125-immediate_flush.c + 0126-oauthbearer_oidc.c 8000-idle.cpp test.c testcpp.cpp diff --git a/tests/test.c b/tests/test.c index 0f51568b9d..91ec31a613 100644 --- a/tests/test.c +++ b/tests/test.c @@ -238,6 +238,7 @@ _TEST_DECL(0122_buffer_cleaning_after_rebalance); _TEST_DECL(0123_connections_max_idle); _TEST_DECL(0124_openssl_invalid_engine); _TEST_DECL(0125_immediate_flush); +_TEST_DECL(0126_oauthbearer_oidc); /* Manual tests */ _TEST_DECL(8000_idle); @@ -446,6 +447,7 @@ struct test tests[] = { _TEST(0123_connections_max_idle, 0), _TEST(0124_openssl_invalid_engine, TEST_F_LOCAL), _TEST(0125_immediate_flush, 0), + _TEST(0126_oauthbearer_oidc, TEST_BRKVER(3,0,0,0)), /* Manual tests */ _TEST(8000_idle, TEST_F_MANUAL), diff --git a/win32/tests/tests.vcxproj b/win32/tests/tests.vcxproj index 0943ea59d8..f22fbecda6 100644 --- a/win32/tests/tests.vcxproj +++ b/win32/tests/tests.vcxproj @@ -207,6 +207,7 @@ + From c9f9249bc80d59a430ba370b5f497995e2e855db Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 7 Sep 2021 17:09:56 +0200 Subject: [PATCH 42/56] Implement native Win32 IO/Queue scheduler (WSAWaitForMultipleEvents) This removes the internal loopback connections (one per known broker) that were previously used to trigger io-based queue wakeups. --- src/rdkafka_broker.c | 18 +- src/rdkafka_broker.h | 3 - src/rdkafka_ssl.c | 2 +- src/rdkafka_transport.c | 349 ++++++++++++++++++++++++++++++------ src/rdkafka_transport.h | 11 +- src/rdkafka_transport_int.h | 12 +- src/tinycthread_extra.c | 17 ++ src/tinycthread_extra.h | 16 ++ 8 files changed, 360 insertions(+), 68 deletions(-) diff --git a/src/rdkafka_broker.c b/src/rdkafka_broker.c index 588f1a5583..7a3ca2fa21 100644 --- a/src/rdkafka_broker.c +++ b/src/rdkafka_broker.c @@ -3462,16 +3462,15 @@ rd_bool_t rd_kafka_broker_ops_io_serve (rd_kafka_broker_t *rkb, abs_timeout = rd_clock() + ((rd_ts_t)rd_kafka_max_block_ms * 1000); + if (likely(rkb->rkb_transport != NULL)) { - /* Serve IO events. + /* Poll and serve IO events and also poll the ops queue. * - * If there are IO events, cut out the queue ops_serve - * timeout (below) since we'll probably have to perform more - * duties based on the IO. - * IO polling granularity is milliseconds while - * queue granularity is microseconds. */ + * The return value indicates if ops_serve() below should + * use a timeout or not. + */ if (rd_kafka_transport_io_serve( - rkb->rkb_transport, + rkb->rkb_transport, rkb->rkb_ops, rd_timeout_remains(abs_timeout))) abs_timeout = RD_POLL_NOWAIT; } @@ -5545,8 +5544,8 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, const char *name, uint16_t port, int32_t nodeid) { rd_kafka_broker_t *rkb; - int r; #ifndef _WIN32 + int r; sigset_t newset, oldset; #endif @@ -5636,8 +5635,8 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, */ rkb->rkb_wakeup_fd[0] = -1; rkb->rkb_wakeup_fd[1] = -1; - rkb->rkb_toppar_wakeup_fd = -1; +#ifndef _WIN32 if ((r = rd_pipe_nonblocking(rkb->rkb_wakeup_fd)) == -1) { rd_rkb_log(rkb, LOG_ERR, "WAKEUPFD", "Failed to setup broker queue wake-up fds: " @@ -5655,6 +5654,7 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, rd_kafka_q_io_event_enable(rkb->rkb_ops, rkb->rkb_wakeup_fd[1], &onebyte, sizeof(onebyte)); } +#endif /* Lock broker's lock here to synchronise state, i.e., hold off * the broker thread until we've finalized the rkb. */ diff --git a/src/rdkafka_broker.h b/src/rdkafka_broker.h index 02c08bc961..936607705a 100644 --- a/src/rdkafka_broker.h +++ b/src/rdkafka_broker.h @@ -241,9 +241,6 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */ rd_socket_t rkb_wakeup_fd[2]; /* Wake-up fds (r/w) to wake * up from IO-wait when * queues have content. */ - rd_socket_t rkb_toppar_wakeup_fd; /* Toppar msgq wakeup fd, - * this is rkb_wakeup_fd[1] - * if enabled. */ /**< Current, exponentially increased, reconnect backoff. */ int rkb_reconnect_backoff_ms; diff --git a/src/rdkafka_ssl.c b/src/rdkafka_ssl.c index 58a702a862..9d1f2d0a2b 100644 --- a/src/rdkafka_ssl.c +++ b/src/rdkafka_ssl.c @@ -200,7 +200,7 @@ rd_kafka_transport_ssl_io_update (rd_kafka_transport_t *rktrans, int ret, break; case SSL_ERROR_WANT_WRITE: - case SSL_ERROR_WANT_CONNECT: + rd_kafka_transport_set_blocked(rktrans, rd_true); rd_kafka_transport_poll_set(rktrans, POLLOUT); break; diff --git a/src/rdkafka_transport.c b/src/rdkafka_transport.c index e39eed9461..47ecabccda 100644 --- a/src/rdkafka_transport.c +++ b/src/rdkafka_transport.c @@ -62,6 +62,8 @@ RD_TLS rd_kafka_transport_t *rd_kafka_curr_transport; +static int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout); + /** * Low-level socket close @@ -88,6 +90,10 @@ void rd_kafka_transport_close (rd_kafka_transport_t *rktrans) { if (rktrans->rktrans_recv_buf) rd_kafka_buf_destroy(rktrans->rktrans_recv_buf); +#ifdef _WIN32 + WSACloseEvent(rktrans->rktrans_wsaevent); +#endif + if (rktrans->rktrans_s != -1) rd_kafka_transport_close0(rktrans->rktrans_rkb->rkb_rk, rktrans->rktrans_s); @@ -185,14 +191,17 @@ rd_kafka_transport_socket_send0 (rd_kafka_transport_t *rktrans, #ifdef _WIN32 if (unlikely(r == RD_SOCKET_ERROR)) { - if (sum > 0 || rd_socket_errno == WSAEWOULDBLOCK) + if (sum > 0 || rd_socket_errno == WSAEWOULDBLOCK) { + rktrans->rktrans_blocked = rd_true; return sum; - else { + } else { rd_snprintf(errstr, errstr_size, "%s", rd_socket_strerror(rd_socket_errno)); return -1; } } + + rktrans->rktrans_blocked = rd_false; #else if (unlikely(r <= 0)) { if (r == 0 || rd_socket_errno == EAGAIN) @@ -675,22 +684,34 @@ static int rd_kafka_transport_get_socket_error (rd_kafka_transport_t *rktrans, /** * IO event handler. * + * @param socket_errstr Is an optional (else NULL) error string from the + * socket layer. + * * Locality: broker thread */ static void rd_kafka_transport_io_event (rd_kafka_transport_t *rktrans, - int events) { + int events, + const char *socket_errstr) { char errstr[512]; int r; rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; - switch (rkb->rkb_state) + switch (rkb->rkb_state) { case RD_KAFKA_BROKER_STATE_CONNECT: /* Asynchronous connect finished, read status. */ if (!(events & (POLLOUT|POLLERR|POLLHUP))) return; - if (rd_kafka_transport_get_socket_error(rktrans, &r) == -1) { + if (socket_errstr) + rd_kafka_broker_fail( + rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, + "Connect to %s failed: %s", + rd_sockaddr2str(rkb->rkb_addr_last, + RD_SOCKADDR2STR_F_PORT | + RD_SOCKADDR2STR_F_FAMILY), + socket_errstr); + else if (rd_kafka_transport_get_socket_error(rktrans, &r) == -1) { rd_kafka_broker_fail( rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, "Connect to %s failed: " @@ -801,37 +822,233 @@ static void rd_kafka_transport_io_event (rd_kafka_transport_t *rktrans, } + +#ifdef _WIN32 +/** + * @brief Convert WSA FD_.. events to POLL.. events. + */ +static RD_INLINE int rd_kafka_transport_wsa2events (long wevents) { + int events = 0; + + if (unlikely(wevents == 0)) + return 0; + + if (wevents & FD_READ) + events |= POLLIN; + if (wevents & (FD_WRITE | FD_CONNECT)) + events |= POLLOUT; + if (wevents & FD_CLOSE) + events |= POLLHUP; + + rd_dassert(events != 0); + + return events; +} + +/** + * @brief Convert POLL.. events to WSA FD_.. events. + */ +static RD_INLINE int rd_kafka_transport_events2wsa (int events, + rd_bool_t is_connecting) { + long wevents = FD_CLOSE; + + if (unlikely(is_connecting)) + return wevents | FD_CONNECT; + + if (events & POLLIN) + wevents |= FD_READ; + if (events & POLLOUT) + wevents |= FD_WRITE; + + return wevents; +} + + +/** + * @returns the WinSocket events (as POLL.. events) for the broker socket. + */ +static int rd_kafka_transport_get_wsa_events (rd_kafka_transport_t *rktrans) { + const int try_bits[4 * 2] = { + FD_READ_BIT, POLLIN, + FD_WRITE_BIT, POLLOUT, + FD_CONNECT_BIT, POLLOUT, + FD_CLOSE_BIT, POLLHUP + }; + int r, i; + WSANETWORKEVENTS netevents; + int events = 0; + const char *socket_errstr = NULL; + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + + /* Get Socket event */ + r = WSAEnumNetworkEvents(rktrans->rktrans_s, + rktrans->rktrans_wsaevent, + &netevents); + if (unlikely(r == SOCKET_ERROR)) { + rd_rkb_log(rkb, LOG_ERR, "WSAWAIT", + "WSAEnumNetworkEvents() failed: %s", + rd_socket_strerror(rd_socket_errno)); + socket_errstr = rd_socket_strerror(rd_socket_errno); + return POLLHUP | POLLERR; + } + + /* Get fired events and errors for each event type */ + for (i = 0; i < RD_ARRAYSIZE(try_bits); i += 2) { + const int bit = try_bits[i]; + const int event = try_bits[i + 1]; + + if (!(netevents.lNetworkEvents & (1 << bit))) + continue; + + if (unlikely(netevents.iErrorCode[bit])) { + socket_errstr = rd_socket_strerror( + netevents.iErrorCode[bit]); + events |= POLLHUP; + } else { + events |= event; + + if (bit == FD_WRITE_BIT) { + /* Writing no longer blocked */ + rktrans->rktrans_blocked = rd_false; + } + } + } + + return events; +} + + +/** + * @brief Win32: Poll transport and \p rkq cond events. + * + * @returns the transport socket POLL.. event bits. + */ +static int rd_kafka_transport_io_serve_win32 (rd_kafka_transport_t *rktrans, + rd_kafka_q_t *rkq, int timeout_ms) { + const DWORD wsaevent_cnt = 3; + WSAEVENT wsaevents[3] = { + rkq->rkq_cond.mEvents[0], /* rkq: cnd_signal */ + rkq->rkq_cond.mEvents[1], /* rkq: cnd_broadcast */ + rktrans->rktrans_wsaevent, /* socket */ + }; + DWORD r; + int events = 0; + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + rd_bool_t set_pollout = rd_false; + rd_bool_t cnd_is_waiting = rd_false; + + /* WSA only sets FD_WRITE (e.g., POLLOUT) when the socket was + * previously blocked, unlike BSD sockets that set POLLOUT as long as + * the socket isn't blocked. So we need to imitate the BSD behaviour + * here and cut the timeout short if a write is wanted and the socket + * is not currently blocked. */ + if (rktrans->rktrans_rkb->rkb_state != RD_KAFKA_BROKER_STATE_CONNECT && + !rktrans->rktrans_blocked && + (rktrans->rktrans_pfd[0].events & POLLOUT)) { + timeout_ms = 0; + set_pollout = rd_true; + } else { + /* Check if the queue already has ops enqueued in which case we + * cut the timeout short. Else add this thread as waiting on the + * queue's condvar so that cnd_signal() (et.al.) will perform + * SetEvent() and thus wake up this thread in case a new op is + * added to the queue. */ + mtx_lock(&rkq->rkq_lock); + if (rkq->rkq_qlen > 0) { + timeout_ms = 0; + } else { + cnd_is_waiting = rd_true; + cnd_wait_enter(&rkq->rkq_cond); + } + mtx_unlock(&rkq->rkq_lock); + } + + /* Wait for IO and queue events */ + r = WSAWaitForMultipleEvents(wsaevent_cnt, wsaevents, FALSE, + timeout_ms, FALSE); + + if (cnd_is_waiting) { + mtx_lock(&rkq->rkq_lock); + cnd_wait_exit(&rkq->rkq_cond); + mtx_unlock(&rkq->rkq_lock); + } + + if (unlikely(r == WSA_WAIT_FAILED)) { + rd_rkb_log(rkb, LOG_CRIT, "WSAWAIT", + "WSAWaitForMultipleEvents failed: %s", + rd_socket_strerror(rd_socket_errno)); + return POLLERR; + } else if (r != WSA_WAIT_TIMEOUT) { + r -= WSA_WAIT_EVENT_0; + + /* Get the socket events. */ + events = rd_kafka_transport_get_wsa_events(rktrans); + } + + /* As explained above we need to set the POLLOUT flag + * in case it is wanted but not triggered by Winsocket so that + * io_event() knows it can attempt to send more data. */ + if (likely(set_pollout && !(events & (POLLHUP | POLLERR | POLLOUT)))) + events |= POLLOUT; + + return events; +} +#endif + + /** * @brief Poll and serve IOs * - * @returns 1 if at least one IO event was triggered, else 0, or -1 on error. + * @returns 0 if \p rkq may need additional blocking/timeout polling, else 1. * * @locality broker thread */ int rd_kafka_transport_io_serve (rd_kafka_transport_t *rktrans, - int timeout_ms) { - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + rd_kafka_q_t *rkq, int timeout_ms) { + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; int events; - int r; rd_kafka_curr_transport = rktrans; - if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_CONNECT || - (rkb->rkb_state > RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE && - rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight && - rd_kafka_bufq_cnt(&rkb->rkb_outbufs) > 0)) + if ( +#ifndef _WIN32 + /* BSD sockets use POLLOUT to indicate success to connect. + * Windows has its own flag for this (FD_CONNECT). */ + rkb->rkb_state == RD_KAFKA_BROKER_STATE_CONNECT || +#endif + (rkb->rkb_state > RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE && + rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight && + rd_kafka_bufq_cnt(&rkb->rkb_outbufs) > 0)) rd_kafka_transport_poll_set(rkb->rkb_transport, POLLOUT); - if ((r = rd_kafka_transport_poll(rktrans, timeout_ms)) <= 0) - return r; +#ifdef _WIN32 + /* BSD sockets use POLLIN and a following recv() returning 0 to + * to indicate connection close. + * Windows has its own flag for this (FD_CLOSE). */ + if (rd_kafka_bufq_cnt(&rkb->rkb_waitresps) > 0) +#endif + rd_kafka_transport_poll_set(rkb->rkb_transport, POLLIN); - /* Only handle events on the broker socket, the wakeup - * socket is just for waking up the blocking boll. */ + /* On Windows we can wait for both IO and condvars (rkq) + * simultaneously. + * + * On *nix/BSD sockets we use a local pipe (pfd[1]) to wake + * up the rkq. */ +#ifdef _WIN32 + events = rd_kafka_transport_io_serve_win32(rktrans, rkq, timeout_ms); + +#else + if (rd_kafka_transport_poll(rktrans, timeout_ms) < 1) + return 0; /* No events, caller can block on \p rkq poll */ + + /* Broker socket events */ events = rktrans->rktrans_pfd[0].revents; +#endif + if (events) { - rd_kafka_transport_poll_clear(rktrans, POLLOUT); + rd_kafka_transport_poll_clear(rktrans, POLLOUT|POLLIN); - rd_kafka_transport_io_event(rktrans, events); + rd_kafka_transport_io_event(rktrans, events, NULL); } return 1; @@ -881,6 +1098,11 @@ rd_kafka_transport_t *rd_kafka_transport_new (rd_kafka_broker_t *rkb, rktrans->rktrans_rkb = rkb; rktrans->rktrans_s = s; +#ifdef _WIN32 + rktrans->rktrans_wsaevent = WSACreateEvent(); + rd_assert(rktrans->rktrans_wsaevent != NULL); +#endif + return rktrans; } @@ -972,55 +1194,64 @@ rd_kafka_transport_t *rd_kafka_transport_connect (rd_kafka_broker_t *rkb, } +#ifdef _WIN32 +/** + * @brief Set the WinSocket event poll bit to \p events. + */ +static void rd_kafka_transport_poll_set_wsa (rd_kafka_transport_t *rktrans, + int events) { + int r; + r = WSAEventSelect(rktrans->rktrans_s, + rktrans->rktrans_wsaevent, + rd_kafka_transport_events2wsa( + rktrans->rktrans_pfd[0].events, + rktrans->rktrans_rkb->rkb_state == + RD_KAFKA_BROKER_STATE_CONNECT)); + if (unlikely(r != 0)) { + rd_rkb_log(rktrans->rktrans_rkb, LOG_CRIT, "WSAEVENT", + "WSAEventSelect() failed: %s", + rd_socket_strerror(rd_socket_errno)); + } +} +#endif void rd_kafka_transport_poll_set(rd_kafka_transport_t *rktrans, int event) { - rktrans->rktrans_pfd[0].events |= event; + if ((rktrans->rktrans_pfd[0].events & event) == event) + return; + + rktrans->rktrans_pfd[0].events |= event; + +#ifdef _WIN32 + rd_kafka_transport_poll_set_wsa(rktrans, + rktrans->rktrans_pfd[0].events); +#endif } void rd_kafka_transport_poll_clear(rd_kafka_transport_t *rktrans, int event) { - rktrans->rktrans_pfd[0].events &= ~event; + if (!(rktrans->rktrans_pfd[0].events & event)) + return; + + rktrans->rktrans_pfd[0].events &= ~event; + +#ifdef _WIN32 + rd_kafka_transport_poll_set_wsa(rktrans, + rktrans->rktrans_pfd[0].events); +#endif } +#ifndef _WIN32 /** * @brief Poll transport fds. * * @returns 1 if an event was raised, else 0, or -1 on error. */ -int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout) { +static int rd_kafka_transport_poll (rd_kafka_transport_t *rktrans, int tmout) { int r; -#ifndef _WIN32 r = poll(rktrans->rktrans_pfd, rktrans->rktrans_pfd_cnt, tmout); if (r <= 0) return r; -#else - r = WSAPoll(rktrans->rktrans_pfd, rktrans->rktrans_pfd_cnt, tmout); - if (r == 0) { - /* Workaround for broken WSAPoll() while connecting: - * failed connection attempts are not indicated at all by WSAPoll() - * so we need to check the socket error when Poll returns 0. - * Issue #525 */ - r = ECONNRESET; - if (unlikely(rktrans->rktrans_rkb->rkb_state == - RD_KAFKA_BROKER_STATE_CONNECT && - (rd_kafka_transport_get_socket_error(rktrans, - &r) == -1 || - r != 0))) { - char errstr[512]; - rd_snprintf(errstr, sizeof(errstr), - "Connect to %s failed: %s", - rd_sockaddr2str(rktrans->rktrans_rkb-> - rkb_addr_last, - RD_SOCKADDR2STR_F_PORT | - RD_SOCKADDR2STR_F_FAMILY), - rd_socket_strerror(r)); - rd_kafka_transport_connect_done(rktrans, errstr); - return -1; - } else - return 0; - } else if (r == RD_SOCKET_ERROR) - return -1; -#endif + rd_atomic64_add(&rktrans->rktrans_rkb->rkb_c.wakeups, 1); if (rktrans->rktrans_pfd[1].revents & POLLIN) { @@ -1033,9 +1264,21 @@ int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout) { return 1; } +#endif - - +#ifdef _WIN32 +/** + * @brief A socket write operation would block, flag the socket + * as blocked so that POLLOUT events are handled correctly. + * + * This is really only used on Windows where POLLOUT (FD_WRITE) is + * edge-triggered rather than level-triggered. + */ +void rd_kafka_transport_set_blocked (rd_kafka_transport_t *rktrans, + rd_bool_t blocked) { + rktrans->rktrans_blocked = blocked; +} +#endif #if 0 diff --git a/src/rdkafka_transport.h b/src/rdkafka_transport.h index 6c289283c6..17223984fc 100644 --- a/src/rdkafka_transport.h +++ b/src/rdkafka_transport.h @@ -39,6 +39,7 @@ typedef struct rd_kafka_transport_s rd_kafka_transport_t; int rd_kafka_transport_io_serve (rd_kafka_transport_t *rktrans, + rd_kafka_q_t *rkq, int timeout_ms); ssize_t rd_kafka_transport_send (rd_kafka_transport_t *rktrans, @@ -71,7 +72,15 @@ void rd_kafka_transport_close(rd_kafka_transport_t *rktrans); void rd_kafka_transport_shutdown (rd_kafka_transport_t *rktrans); void rd_kafka_transport_poll_set(rd_kafka_transport_t *rktrans, int event); void rd_kafka_transport_poll_clear(rd_kafka_transport_t *rktrans, int event); -int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout); + +#ifdef _WIN32 +void rd_kafka_transport_set_blocked (rd_kafka_transport_t *rktrans, + rd_bool_t blocked); +#else +/* no-op on other platforms */ +#define rd_kafka_transport_set_blocked(rktrans,blocked) do {} while (0) +#endif + void rd_kafka_transport_init (void); diff --git a/src/rdkafka_transport_int.h b/src/rdkafka_transport_int.h index 2c060623cb..09f9603bb0 100644 --- a/src/rdkafka_transport_int.h +++ b/src/rdkafka_transport_int.h @@ -52,6 +52,16 @@ struct rd_kafka_transport_s { SSL *rktrans_ssl; #endif +#ifdef _WIN32 + WSAEVENT *rktrans_wsaevent; + rd_bool_t rktrans_blocked; /* Latest send() returned ..WOULDBLOCK. + * We need to poll for FD_WRITE which + * is edge-triggered rather than + * level-triggered. + * This behaviour differs from BSD + * sockets. */ +#endif + struct { void *state; /* SASL implementation * state handle */ @@ -75,7 +85,7 @@ struct rd_kafka_transport_s { /* Two pollable fds: * - TCP socket - * - wake-up fd + * - wake-up fd (not used on Win32) */ rd_pollfd_t rktrans_pfd[2]; int rktrans_pfd_cnt; diff --git a/src/tinycthread_extra.c b/src/tinycthread_extra.c index c1aa31331e..d48de04bc7 100644 --- a/src/tinycthread_extra.c +++ b/src/tinycthread_extra.c @@ -59,6 +59,23 @@ int thrd_is_current(thrd_t thr) { } +#ifdef _WIN32 +void cnd_wait_enter (cnd_t *cond) { + /* Increment number of waiters */ + EnterCriticalSection(&cond->mWaitersCountLock); + ++cond->mWaitersCount; + LeaveCriticalSection(&cond->mWaitersCountLock); +} + +void cnd_wait_exit (cnd_t *cond) { + /* Increment number of waiters */ + EnterCriticalSection(&cond->mWaitersCountLock); + --cond->mWaitersCount; + LeaveCriticalSection(&cond->mWaitersCountLock); +} +#endif + + int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms) { diff --git a/src/tinycthread_extra.h b/src/tinycthread_extra.h index 0bf922f6ec..fc08a5bb8d 100644 --- a/src/tinycthread_extra.h +++ b/src/tinycthread_extra.h @@ -54,6 +54,22 @@ int thrd_setname (const char *name); int thrd_is_current(thrd_t thr); +#ifdef _WIN32 +/** + * @brief Mark the current thread as waiting on cnd. + * + * @remark This is to be used when the thread uses its own + * WaitForMultipleEvents() call rather than cnd_timedwait(). + * + * @sa cnd_wait_exit() + */ +void cnd_wait_enter (cnd_t *cond); + +/** + * @brief Mark the current thread as no longer waiting on cnd. + */ +void cnd_wait_exit (cnd_t *cond); +#endif /** From 440ec7b0f684f8a7218f2f92ca5c8beba805bbf6 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 7 Sep 2021 17:10:38 +0200 Subject: [PATCH 43/56] Add vcpkg_installed to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index f7b1e11609..31c5061e33 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,4 @@ test_report*.json cov-int gdbrun*.gdb TAGS +vcpkg_installed From 5cc69326c741c6325f78f51d49a532ec69e07719 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 7 Sep 2021 17:11:20 +0200 Subject: [PATCH 44/56] Left-trim spaces from string configuration values This makes it easier to use Bash on Windows where a prefixing / is translated into the MinGW32 file system root. --- src/rdkafka_conf.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c index 355af364d7..1d66580143 100644 --- a/src/rdkafka_conf.c +++ b/src/rdkafka_conf.c @@ -1971,6 +1971,12 @@ rd_kafka_anyconf_set_prop (int scope, void *conf, switch (prop->type) { case _RK_C_STR: + /* Left-trim string(likes) */ + if (value) + while (isspace((int)*value)) + value++; + + /* FALLTHRU */ case _RK_C_KSTR: if (prop->s2i[0].str) { int match; From 9401a5bd5454a810d76b1d9a0d902efd5a3ddd4c Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Thu, 9 Sep 2021 20:15:57 +0200 Subject: [PATCH 45/56] Mark rd_kafka_conf_kv_split as unused .. until it's used. --- src/rdkafka_conf.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c index 1d66580143..889c6d1842 100644 --- a/src/rdkafka_conf.c +++ b/src/rdkafka_conf.c @@ -3758,7 +3758,8 @@ static void rd_kafka_sw_str_sanitize_inplace (char *str) { * on success. The array count is returned in \p cntp. * The returned pointer must be freed with rd_free(). */ -static char **rd_kafka_conf_kv_split (const char **input, size_t incnt, +static RD_UNUSED +char **rd_kafka_conf_kv_split (const char **input, size_t incnt, size_t *cntp) { size_t i; char **out, *p; From 6e9c9b5f12dc4cebf5feb8be54e3ca1062c3cc7f Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 5 Oct 2021 13:16:38 +0200 Subject: [PATCH 46/56] rd_kafka_queue_get_background() now creates the background thread --- CHANGELOG.md | 11 ++++++ src/rdkafka.c | 43 ++++++------------------ src/rdkafka.h | 15 +++++---- src/rdkafka_background.c | 72 ++++++++++++++++++++++++++++++++++++++++ src/rdkafka_int.h | 7 ++++ src/rdkafka_queue.c | 23 ++++++++++--- 6 files changed, 127 insertions(+), 44 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0387cd0e5f..733a1304ba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +# librdkafka v1.9.0 + +librdkafka v1.9.0 is a feature release: + + +## Enhancements + * `rd_kafka_queue_get_background()` now creates the background thread + if not already created. + To be used in conjunction with `enable.sasl.callback.queue`. + + # librdkafka v1.8.2 librdkafka v1.8.2 is a maintenance release. diff --git a/src/rdkafka.c b/src/rdkafka.c index 11f8c12271..ee0fdb616b 100644 --- a/src/rdkafka.c +++ b/src/rdkafka.c @@ -2110,7 +2110,7 @@ static int rd_kafka_thread_main (void *arg) { } -static void rd_kafka_term_sig_handler (int sig) { +void rd_kafka_term_sig_handler (int sig) { /* nop */ } @@ -2403,46 +2403,23 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, pthread_sigmask(SIG_SETMASK, &newset, &oldset); #endif - mtx_lock(&rk->rk_init_lock); - /* Create background thread and queue if background_event_cb() - * has been configured. + * RD_KAFKA_EVENT_BACKGROUND has been enabled. * Do this before creating the main thread since after * the main thread is created it is no longer trivial to error * out from rd_kafka_new(). */ - if (rk->rk_conf.background_event_cb) { - /* Hold off background thread until thrd_create() is done. */ + if (rk->rk_conf.background_event_cb || + (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_BACKGROUND)) { + rd_kafka_resp_err_t err; rd_kafka_wrlock(rk); - - rk->rk_background.q = rd_kafka_q_new(rk); - - rk->rk_init_wait_cnt++; - - if ((thrd_create(&rk->rk_background.thread, - rd_kafka_background_thread_main, rk)) != - thrd_success) { - rk->rk_init_wait_cnt--; - ret_err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; - ret_errno = errno; - if (errstr) - rd_snprintf(errstr, errstr_size, - "Failed to create background " - "thread: %s (%i)", - rd_strerror(errno), errno); - rd_kafka_wrunlock(rk); - mtx_unlock(&rk->rk_init_lock); - -#ifndef _WIN32 - /* Restore sigmask of caller */ - pthread_sigmask(SIG_SETMASK, &oldset, NULL); -#endif - goto fail; - } - + err = rd_kafka_background_thread_create(rk, + errstr, errstr_size); rd_kafka_wrunlock(rk); + if (err) + goto fail; } - + mtx_lock(&rk->rk_init_lock); /* Lock handle here to synchronise state, i.e., hold off * the thread until we've finalized the handle. */ diff --git a/src/rdkafka.h b/src/rdkafka.h index b85ba9099b..1faea29de1 100644 --- a/src/rdkafka.h +++ b/src/rdkafka.h @@ -3246,19 +3246,19 @@ rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t *rk, * @returns a reference to the background thread queue, or NULL if the * background queue is not enabled. * - * To enable the background thread queue set a generic event handler callback - * with rd_kafka_conf_set_background_event_cb() on the client instance - * configuration object (rd_kafka_conf_t). + * The background thread queue provides the application with an automatically + * polled queue that triggers the event callback in a background thread, + * this background thread is completely managed by librdkafka. + * + * The background thread queue is automatically created if a generic event + * handler callback is configured with rd_kafka_conf_set_background_event_cb() + * or if rd_kafka_queue_get_background() is called. * * The background queue is polled and served by librdkafka and MUST NOT be * polled, forwarded, or otherwise managed by the application, it may only * be used as the destination queue passed to queue-enabled APIs, such as * the Admin API. * - * The background thread queue provides the application with an automatically - * polled queue that triggers the event callback in a background thread, - * this background thread is completely managed by librdkafka. - * * Use rd_kafka_queue_destroy() to loose the reference. * * @warning The background queue MUST NOT be read from (polled, consumed, etc), @@ -4953,6 +4953,7 @@ typedef int rd_kafka_event_type_t; #define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH 0x100 /**< SASL/OAUTHBEARER token needs to be refreshed */ +#define RD_KAFKA_EVENT_BACKGROUND 0x200 /**< Enable background thread. */ /** diff --git a/src/rdkafka_background.c b/src/rdkafka_background.c index 540fe477a5..f643280e18 100644 --- a/src/rdkafka_background.c +++ b/src/rdkafka_background.c @@ -37,6 +37,8 @@ #include "rdkafka_event.h" #include "rdkafka_interceptor.h" +#include + /** * @brief Call the registered background_event_cb. * @locality rdkafka background queue thread @@ -151,3 +153,73 @@ int rd_kafka_background_thread_main (void *arg) { return 0; } + +/** + * @brief Create the background thread. + * + * @locks_acquired rk_init_lock + * @locks_required rd_kafka_wrlock() + */ +rd_kafka_resp_err_t rd_kafka_background_thread_create (rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { +#ifndef _WIN32 + sigset_t newset, oldset; +#endif + + if (rk->rk_background.q) { + rd_snprintf(errstr, errstr_size, + "Background thread already created"); + return RD_KAFKA_RESP_ERR__CONFLICT; + } + + rk->rk_background.q = rd_kafka_q_new(rk); + + mtx_lock(&rk->rk_init_lock); + rk->rk_init_wait_cnt++; + +#ifndef _WIN32 + /* Block all signals in newly created threads. + * To avoid race condition we block all signals in the calling + * thread, which the new thread will inherit its sigmask from, + * and then restore the original sigmask of the calling thread when + * we're done creating the thread. */ + sigemptyset(&oldset); + sigfillset(&newset); + if (rk->rk_conf.term_sig) { + struct sigaction sa_term = { + .sa_handler = rd_kafka_term_sig_handler + }; + sigaction(rk->rk_conf.term_sig, &sa_term, NULL); + } + pthread_sigmask(SIG_SETMASK, &newset, &oldset); +#endif + + + if ((thrd_create(&rk->rk_background.thread, + rd_kafka_background_thread_main, rk)) != + thrd_success) { + rd_snprintf(errstr, errstr_size, + "Failed to create background thread: %s", + rd_strerror(errno)); + rd_kafka_q_destroy_owner(rk->rk_background.q); + rk->rk_background.q = NULL; + rk->rk_init_wait_cnt--; + mtx_unlock(&rk->rk_init_lock); + +#ifndef _WIN32 + /* Restore sigmask of caller */ + pthread_sigmask(SIG_SETMASK, &oldset, NULL); +#endif + return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; + } + + mtx_unlock(&rk->rk_init_lock); + +#ifndef _WIN32 + /* Restore sigmask of caller */ + pthread_sigmask(SIG_SETMASK, &oldset, NULL); +#endif + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} diff --git a/src/rdkafka_int.h b/src/rdkafka_int.h index 2bbc68a538..5a19cd7fe0 100644 --- a/src/rdkafka_int.h +++ b/src/rdkafka_int.h @@ -1037,9 +1037,16 @@ rd_kafka_app_polled (rd_kafka_t *rk) { } + +void rd_kafka_term_sig_handler (int sig); + /** * rdkafka_background.c */ int rd_kafka_background_thread_main (void *arg); +rd_kafka_resp_err_t rd_kafka_background_thread_create (rd_kafka_t *rk, + char *errstr, + size_t errstr_size); + #endif /* _RDKAFKA_INT_H_ */ diff --git a/src/rdkafka_queue.c b/src/rdkafka_queue.c index b43225a009..9bb5bea94c 100644 --- a/src/rdkafka_queue.c +++ b/src/rdkafka_queue.c @@ -742,10 +742,25 @@ rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t *rk, } rd_kafka_queue_t *rd_kafka_queue_get_background (rd_kafka_t *rk) { - if (rk->rk_background.q) - return rd_kafka_queue_new0(rk, rk->rk_background.q); - else - return NULL; + rd_kafka_queue_t *rkqu; + + rd_kafka_wrlock(rk); + if (!rk->rk_background.q) { + char errstr[256]; + + if (rd_kafka_background_thread_create(rk, + errstr, sizeof(errstr))) { + rd_kafka_log(rk, LOG_ERR, "BACKGROUND", + "Failed to create background thread: %s", + errstr); + rd_kafka_wrunlock(rk); + return NULL; + } + } + + rkqu = rd_kafka_queue_new0(rk, rk->rk_background.q); + rd_kafka_wrunlock(rk); + return rkqu; } From 88ac5cfc692387f065eb537e376fbf300c5683f4 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 5 Oct 2021 22:01:40 +0200 Subject: [PATCH 47/56] Added custom SASL callback queue --- CHANGELOG.md | 11 ++- CONFIGURATION.md | 2 +- src-cpp/HandleImpl.cpp | 5 +- src-cpp/QueueImpl.cpp | 4 +- src-cpp/rdkafkacpp.h | 54 ++++++++++++- src-cpp/rdkafkacpp_int.h | 80 ++++++++++++++----- src/rdkafka.h | 84 +++++++++++++++++++- src/rdkafka_background.c | 3 +- src/rdkafka_conf.c | 42 ++++++++-- src/rdkafka_conf.h | 5 +- src/rdkafka_event.h | 5 ++ src/rdkafka_int.h | 1 + src/rdkafka_op.h | 2 + src/rdkafka_queue.h | 2 + src/rdkafka_sasl.c | 33 +++++++- src/rdkafka_sasl_int.h | 6 +- src/rdkafka_sasl_oauthbearer.c | 51 +++++++++++-- tests/0128-sasl_callback_queue.cpp | 114 ++++++++++++++++++++++++++++ tests/CMakeLists.txt | 1 + tests/interactive_broker_version.py | 2 +- tests/test.c | 30 ++++++-- tests/test.h | 1 - tests/testshared.h | 2 + win32/tests/tests.vcxproj | 1 + 24 files changed, 485 insertions(+), 56 deletions(-) create mode 100644 tests/0128-sasl_callback_queue.cpp diff --git a/CHANGELOG.md b/CHANGELOG.md index 733a1304ba..d672692706 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,9 +4,18 @@ librdkafka v1.9.0 is a feature release: ## Enhancements + + * SASL OAUTHBEARER refresh callbacks can now be scheduled for execution + on librdkafka's background thread. This solves the problem where an + application has a custom SASL OAUTHBEARER refresh callback and thus needs to + call `rd_kafka_poll()` (et.al.) at least once to trigger the + refresh callback before being able to connect to brokers. + With the new `rd_kafka_conf_enable_sasl_queue()` configuration API and + `rd_kafka_sasl_background_callbacks_enable()` the refresh callbacks + can now be triggered automatically on the librdkafka background thread. * `rd_kafka_queue_get_background()` now creates the background thread if not already created. - To be used in conjunction with `enable.sasl.callback.queue`. + # librdkafka v1.8.2 diff --git a/CONFIGURATION.md b/CONFIGURATION.md index 775a1eec39..35475c7b28 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -91,7 +91,7 @@ sasl.username | * | | sasl.password | * | | | high | SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism
*Type: string* sasl.oauthbearer.config | * | | | low | SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is "sub", the default value for scopeClaimName is "scope", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: `principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions can be communicated to the broker via `extension_NAME=value`. For example: `principal=admin extension_traceId=123`
*Type: string* enable.sasl.oauthbearer.unsecure.jwt | * | true, false | false | low | Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. This builtin handler should only be used for development or testing, and not in production.
*Type: boolean* -oauthbearer_token_refresh_cb | * | | | low | SASL/OAUTHBEARER token refresh callback (set with rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by rd_kafka_poll(), et.al. This callback will be triggered when it is time to refresh the client's OAUTHBEARER token.
*Type: see dedicated API* +oauthbearer_token_refresh_cb | * | | | low | SASL/OAUTHBEARER token refresh callback (set with rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by rd_kafka_poll(), et.al. This callback will be triggered when it is time to refresh the client's OAUTHBEARER token. Also see `rd_kafka_conf_enable_sasl_queue()`.
*Type: see dedicated API* sasl.oauthbearer.method | * | default, oidc | default | low | Set to "default" or "oidc" to control which login method is used. If set it to "oidc", OAuth/OIDC login method will be used. sasl.oauthbearer.client.id, sasl.oauthbearer.client.secret, sasl.oauthbearer.scope, sasl.oauthbearer.extensions, and sasl.oauthbearer.token.endpoint.url are needed if sasl.oauthbearer.method is set to "oidc".
*Type: enum value* sasl.oauthbearer.client.id | * | | | low | It's a public identifier for the application. It must be unique across all clients that the authorization server handles. This is only used when sasl.oauthbearer.method is set to oidc.
*Type: string* sasl.oauthbearer.client.secret | * | | | low | A client secret only known to the application and the authorization server. This should be a sufficiently random string that are not guessable. This is only used when sasl.oauthbearer.method is set to "oidc".
*Type: string* diff --git a/src-cpp/HandleImpl.cpp b/src-cpp/HandleImpl.cpp index a97d9fc64b..f4ae56dbe8 100644 --- a/src-cpp/HandleImpl.cpp +++ b/src-cpp/HandleImpl.cpp @@ -369,10 +369,7 @@ RdKafka::HandleImpl::get_partition_queue (const TopicPartition *part) { if (rkqu == NULL) return NULL; - RdKafka::QueueImpl *queueimpl = new RdKafka::QueueImpl; - queueimpl->queue_ = rkqu; - - return queueimpl; + return new QueueImpl(rkqu); } RdKafka::ErrorCode diff --git a/src-cpp/QueueImpl.cpp b/src-cpp/QueueImpl.cpp index 5de1f78620..8499dfccb7 100644 --- a/src-cpp/QueueImpl.cpp +++ b/src-cpp/QueueImpl.cpp @@ -35,9 +35,7 @@ RdKafka::Queue::~Queue () { } RdKafka::Queue *RdKafka::Queue::create (Handle *base) { - RdKafka::QueueImpl *queueimpl = new RdKafka::QueueImpl; - queueimpl->queue_ = rd_kafka_queue_new(dynamic_cast(base)->rk_); - return queueimpl; + return new RdKafka::QueueImpl(rd_kafka_queue_new(dynamic_cast(base)->rk_)); } RdKafka::ErrorCode diff --git a/src-cpp/rdkafkacpp.h b/src-cpp/rdkafkacpp.h index daed1cbf5b..fbd77a06f7 100644 --- a/src-cpp/rdkafkacpp.h +++ b/src-cpp/rdkafkacpp.h @@ -719,7 +719,7 @@ class RD_EXPORT DeliveryReportCb { * The callback should invoke RdKafka::Handle::oauthbearer_set_token() or * RdKafka::Handle::oauthbearer_set_token_failure() to indicate success or * failure, respectively. - * + * * The refresh operation is eventable and may be received when an event * callback handler is set with an event type of * \c RdKafka::Event::EVENT_OAUTHBEARER_TOKEN_REFRESH. @@ -1436,6 +1436,33 @@ class RD_EXPORT Conf { */ virtual Conf::ConfResult set_engine_callback_data (void *value, std::string &errstr) = 0; + + + /** @brief Enable/disable creation of a queue specific to SASL events + * and callbacks. + * + * For SASL mechanisms that trigger callbacks (currently OAUTHBEARER) this + * configuration API allows an application to get a dedicated + * queue for the SASL events/callbacks. After enabling the queue with this API + * the application can retrieve the queue by calling + * RdKafka::Handle::get_sasl_queue() on the client instance. + * This queue may then be served directly by the application + * (RdKafka::Queue::poll()) or forwarded to another queue, such as + * the background queue. + * + * A convenience function is available to automatically forward the SASL queue + * to librdkafka's background thread, see + * RdKafka::Handle::sasl_background_callbacks_enable(). + * + * By default (\p enable = false) the main queue (as served by + * RdKafka::Handle::poll(), et.al.) is used for SASL callbacks. + * + * @remark The SASL queue is currently only used by the SASL OAUTHBEARER " + * mechanism's token refresh callback. + */ + virtual Conf::ConfResult enable_sasl_queue (bool enable, + std::string &errstr) = 0; + }; /**@}*/ @@ -1783,6 +1810,31 @@ class RD_EXPORT Handle { */ virtual ErrorCode oauthbearer_set_token_failure (const std::string &errstr) = 0; + /** + * @brief Enable SASL OAUTHBEARER refresh callbacks on the librdkafka + * background thread. + * + * This serves as an alternative for applications that do not + * call RdKafka::Handle::poll() (et.al.) at regular intervals. + */ + virtual Error *sasl_background_callbacks_enable () = 0; + + + /** + * @returns the SASL callback queue, if enabled, else NULL. + * + * @sa RdKafka::Conf::enable_sasl_queue() + */ + virtual Queue *get_sasl_queue () = 0; + + /** + * @returns the librdkafka background thread queue. + */ + virtual Queue *get_background_queue () = 0; + + + + /** * @brief Allocate memory using the same allocator librdkafka uses. * diff --git a/src-cpp/rdkafkacpp_int.h b/src-cpp/rdkafkacpp_int.h index ceb8175cda..239f363189 100644 --- a/src-cpp/rdkafkacpp_int.h +++ b/src-cpp/rdkafkacpp_int.h @@ -187,6 +187,24 @@ class EventImpl : public Event { bool fatal_; }; +class QueueImpl : virtual public Queue { + public: + QueueImpl(rd_kafka_queue_t *c_rkqu): queue_(c_rkqu) {} + ~QueueImpl () { + rd_kafka_queue_destroy(queue_); + } + static Queue *create (Handle *base); + ErrorCode forward (Queue *queue); + Message *consume (int timeout_ms); + int poll (int timeout_ms); + void io_event_enable(int fd, const void *payload, size_t size); + + rd_kafka_queue_t *queue_; +}; + + + + class HeadersImpl : public Headers { public: @@ -721,6 +739,17 @@ class ConfImpl : public Conf { return static_cast(res); } + Conf::ConfResult enable_sasl_queue (bool enable, std::string &errstr) { + if (!rk_conf_) { + errstr = "Requires RdKafka::Conf::CONF_GLOBAL object"; + return Conf::CONF_INVALID; + } + + rd_kafka_conf_enable_sasl_queue(rk_conf_, enable ? 1 : 0); + + return Conf::CONF_OK; + } + Conf::ConfResult get(const std::string &name, std::string &value) const { if (name.compare("dr_cb") == 0 || @@ -732,7 +761,9 @@ class ConfImpl : public Conf { name.compare("rebalance_cb") == 0 || name.compare("offset_commit_cb") == 0 || name.compare("oauthbearer_token_refresh_cb") == 0 || - name.compare("ssl_cert_verify_cb") == 0) { + name.compare("ssl_cert_verify_cb") == 0 || + name.compare("set_engine_callback_data") == 0 || + name.compare("enable_sasl_queue") == 0) { return Conf::CONF_INVALID; } rd_kafka_conf_res_t res = RD_KAFKA_CONF_INVALID; @@ -929,6 +960,27 @@ class HandleImpl : virtual public Handle { Queue *get_partition_queue (const TopicPartition *partition); + Queue *get_sasl_queue () { + rd_kafka_queue_t *rkqu; + rkqu = rd_kafka_queue_get_sasl(rk_); + + if (rkqu == NULL) + return NULL; + + return new QueueImpl(rkqu); + } + + Queue *get_background_queue () { + rd_kafka_queue_t *rkqu; + rkqu = rd_kafka_queue_get_background(rk_); + + if (rkqu == NULL) + return NULL; + + return new QueueImpl(rkqu); + } + + ErrorCode offsetsForTimes (std::vector &offsets, int timeout_ms) { rd_kafka_topic_partition_list_t *c_offsets = partitions_to_c_parts(offsets); @@ -1004,6 +1056,16 @@ class HandleImpl : virtual public Handle { rk_, errstr.c_str())); }; + Error *sasl_background_callbacks_enable () { + rd_kafka_error_t *c_error = + rd_kafka_sasl_background_callbacks_enable(rk_); + + if (c_error) + return new ErrorImpl(c_error); + + return NULL; + } + void *mem_malloc (size_t size) { return rd_kafka_mem_malloc(rk_, size); }; @@ -1266,22 +1328,6 @@ class MetadataImpl : public Metadata { }; -class QueueImpl : virtual public Queue { - public: - ~QueueImpl () { - rd_kafka_queue_destroy(queue_); - } - static Queue *create (Handle *base); - ErrorCode forward (Queue *queue); - Message *consume (int timeout_ms); - int poll (int timeout_ms); - void io_event_enable(int fd, const void *payload, size_t size); - - rd_kafka_queue_t *queue_; -}; - - - class ConsumerImpl : virtual public Consumer, virtual public HandleImpl { diff --git a/src/rdkafka.h b/src/rdkafka.h index 1faea29de1..60fa182426 100644 --- a/src/rdkafka.h +++ b/src/rdkafka.h @@ -2082,14 +2082,28 @@ void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, * * Note that before any SASL/OAUTHBEARER broker connection can succeed the * application must call rd_kafka_oauthbearer_set_token() once -- either - * directly or, more typically, by invoking either rd_kafka_poll() or - * rd_kafka_queue_poll() -- in order to cause retrieval of an initial token to - * occur. + * directly or, more typically, by invoking either rd_kafka_poll(), + * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), etc, in order to cause + * retrieval of an initial token to occur. + * + * Alternatively, the application can enable the SASL queue by calling + * rd_kafka_conf_enable_sasl_queue() on the configuration object prior to + * creating the client instance, get the SASL queue with + * rd_kafka_queue_get_sasl(), and either serve the queue manually by calling + * rd_kafka_queue_poll(), or redirecting the queue to the background thread to + * have the queue served automatically. For the latter case the SASL queue + * must be forwarded to the background queue with rd_kafka_queue_forward(). + * A convenience function is available to automatically forward the SASL queue + * to librdkafka's background thread, see + * rd_kafka_sasl_background_callbacks_enable(). * * An unsecured JWT refresh handler is provided by librdkafka for development * and testing purposes, it is enabled by setting * the \c enable.sasl.oauthbearer.unsecure.jwt property to true and is * mutually exclusive to using a refresh callback. + * + * @sa rd_kafka_sasl_background_callbacks_enable() + * @sa rd_kafka_queue_get_sasl() */ RD_EXPORT void rd_kafka_conf_set_oauthbearer_token_refresh_cb ( @@ -2098,6 +2112,37 @@ void rd_kafka_conf_set_oauthbearer_token_refresh_cb ( const char *oauthbearer_config, void *opaque)); +/** + * @brief Enable/disable creation of a queue specific to SASL events + * and callbacks. + * + * For SASL mechanisms that trigger callbacks (currently OAUTHBEARER) this + * configuration API allows an application to get a dedicated + * queue for the SASL events/callbacks. After enabling the queue with this API + * the application can retrieve the queue by calling + * rd_kafka_queue_get_sasl() on the client instance. + * This queue may then be served directly by the application + * (with rd_kafka_queue_poll(), et.al) or forwarded to another queue, such as + * the background queue. + * + * A convenience function is available to automatically forward the SASL queue + * to librdkafka's background thread, see + * rd_kafka_sasl_background_callbacks_enable(). + * + * By default (\p enable = 0) the main queue (as served by rd_kafka_poll(), + * et.al.) is used for SASL callbacks. + * + * @remark The SASL queue is currently only used by the SASL OAUTHBEARER + * mechanism's token_refresh_cb(). + * + * @sa rd_kafka_queue_get_sasl() + * @sa rd_kafka_sasl_background_callbacks_enable() + */ + +RD_EXPORT +void rd_kafka_conf_enable_sasl_queue (rd_kafka_conf_t *conf, int enable); + + /** * @brief Set socket callback. * @@ -3215,6 +3260,39 @@ RD_EXPORT rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk); + +/** + * @returns a reference to the SASL callback queue, if a SASL mechanism + * with callbacks is configured (currently only OAUTHBEARER), else + * returns NULL. + * + * Use rd_kafka_queue_destroy() to loose the reference. + * + * @sa rd_kafka_sasl_background_callbacks_enable() + */ +RD_EXPORT +rd_kafka_queue_t *rd_kafka_queue_get_sasl (rd_kafka_t *rk); + + +/** + * @brief Enable SASL OAUTHBEARER refresh callbacks on the librdkafka + * background thread. + * + * This serves as an alternative for applications that do not call + * rd_kafka_poll() (et.al.) at regular intervals (or not at all), as a means + * of automatically trigger the refresh callbacks, which are needed to + * initiate connections to the brokers in the case a custom OAUTHBEARER + * refresh callback is configured. + * + * @returns NULL on success or an error object on error. + * + * @sa rd_kafka_queue_get_sasl() + * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb() + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable (rd_kafka_t *rk); + + /** * @returns a reference to the librdkafka consumer queue. * This is the queue served by rd_kafka_consumer_poll(). diff --git a/src/rdkafka_background.c b/src/rdkafka_background.c index f643280e18..178cb714a7 100644 --- a/src/rdkafka_background.c +++ b/src/rdkafka_background.c @@ -74,7 +74,8 @@ rd_kafka_background_queue_serve (rd_kafka_t *rk, /* * Dispatch Event:able ops to background_event_cb() */ - if (likely(rd_kafka_event_setup(rk, rko))) { + if (likely(rk->rk_conf.background_event_cb && + rd_kafka_event_setup(rk, rko))) { rd_kafka_call_background_event_cb(rk, rko); /* Event must be destroyed by application. */ return RD_KAFKA_OP_RES_HANDLED; diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c index 889c6d1842..35592cf2c0 100644 --- a/src/rdkafka_conf.c +++ b/src/rdkafka_conf.c @@ -1020,9 +1020,16 @@ static const struct rd_kafka_property rd_kafka_properties[] = { "rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by " "rd_kafka_poll(), et.al. " "This callback will be triggered when it is time to refresh " - "the client's OAUTHBEARER token.", + "the client's OAUTHBEARER token. " + "Also see `rd_kafka_conf_enable_sasl_queue()`.", _UNSUPPORTED_OAUTHBEARER }, + { _RK_GLOBAL|_RK_HIDDEN, "enable_sasl_queue", _RK_C_BOOL, + _RK(sasl.enable_callback_queue), + "Enable the SASL callback queue " + "(set with rd_kafka_conf_enable_sasl_queue()).", + 0, 1, 0, + }, { _RK_GLOBAL, "sasl.oauthbearer.method", _RK_C_S2I, _RK(sasl.oauthbearer.method), "Set to \"default\" or \"oidc\" to control which login method " @@ -2879,6 +2886,13 @@ void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, #endif } +void rd_kafka_conf_enable_sasl_queue (rd_kafka_conf_t *conf, int enable) { + rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, + "enable_sasl_queue", + (enable ? "true" : "false")); + +} + void rd_kafka_conf_set_socket_cb (rd_kafka_conf_t *conf, int (*socket_cb) (int domain, int type, int protocol, @@ -3855,10 +3869,28 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, #endif #if WITH_SASL_OAUTHBEARER - if (conf->sasl.enable_oauthbearer_unsecure_jwt && - conf->sasl.oauthbearer.token_refresh_cb) - return "`enable.sasl.oauthbearer.unsecure.jwt` and " - "`oauthbearer_token_refresh_cb` are mutually exclusive"; + if (!rd_strcasecmp(conf->sasl.mechanisms, "OAUTHBEARER")) { + if (conf->sasl.enable_oauthbearer_unsecure_jwt && + conf->sasl.oauthbearer.token_refresh_cb) + return "`enable.sasl.oauthbearer.unsecure.jwt` and " + "`oauthbearer_token_refresh_cb` are " + "mutually exclusive"; + + if (conf->sasl.enable_oauthbearer_unsecure_jwt && + conf->sasl.oauthbearer.method == + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC) + return "`enable.sasl.oauthbearer.unsecure.jwt` and " + "`sasl.oauthbearer.method=oidc` are " + "mutually exclusive"; + + /* Enable background thread for the builtin OIDC handler, + * unless a refresh callback has been set. */ + if (conf->sasl.oauthbearer.method == + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC && + !conf->sasl.oauthbearer.token_refresh_cb) + conf->enabled_events |= RD_KAFKA_EVENT_BACKGROUND; + } + #endif if (cltype == RD_KAFKA_CONSUMER) { diff --git a/src/rdkafka_conf.h b/src/rdkafka_conf.h index 86bc7127ce..fd39286a6d 100644 --- a/src/rdkafka_conf.h +++ b/src/rdkafka_conf.h @@ -163,7 +163,7 @@ typedef enum { /* Increase in steps of 64 as needed. * This must be larger than sizeof(rd_kafka_[topic_]conf_t) */ -#define RD_KAFKA_CONF_PROPS_IDX_MAX (64*29) +#define RD_KAFKA_CONF_PROPS_IDX_MAX (64*30) /** * @struct rd_kafka_anyconf_t @@ -289,8 +289,9 @@ struct rd_kafka_conf_s { #endif char *oauthbearer_config; int enable_oauthbearer_unsecure_jwt; + int enable_callback_queue; struct { - char *method; + rd_kafka_oauthbearer_method_t method; char *token_endpoint_url; char *client_id; char *client_secret; diff --git a/src/rdkafka_event.h b/src/rdkafka_event.h index 49a389f1e4..53215ff094 100644 --- a/src/rdkafka_event.h +++ b/src/rdkafka_event.h @@ -55,8 +55,13 @@ rd_kafka_event_type_t rd_kafka_op2event (rd_kafka_op_type_t optype) { */ static RD_UNUSED RD_INLINE int rd_kafka_event_setup (rd_kafka_t *rk, rd_kafka_op_t *rko) { + + if (unlikely(rko->rko_flags & RD_KAFKA_OP_F_FORCE_CB)) + return 0; + if (!rko->rko_evtype) rko->rko_evtype = rd_kafka_op2event(rko->rko_type); + switch (rko->rko_evtype) { case RD_KAFKA_EVENT_NONE: diff --git a/src/rdkafka_int.h b/src/rdkafka_int.h index 5a19cd7fe0..64ba5ea63a 100644 --- a/src/rdkafka_int.h +++ b/src/rdkafka_int.h @@ -609,6 +609,7 @@ struct rd_kafka_s { struct { void *handle; /**< Provider-specific handle struct pointer. * Typically assigned in provider's .init() */ + rd_kafka_q_t *callback_q; /**< SASL callback queue, if any. */ } rk_sasl; /* Test mocks */ diff --git a/src/rdkafka_op.h b/src/rdkafka_op.h index 8a0ee0d289..00fdb09400 100644 --- a/src/rdkafka_op.h +++ b/src/rdkafka_op.h @@ -74,6 +74,8 @@ typedef struct rd_kafka_replyq_s { * callback will be triggered * to construct the request * right before it is sent. */ +#define RD_KAFKA_OP_F_FORCE_CB 0x100 /* rko: force callback even if + * op type is eventable. */ typedef enum { RD_KAFKA_OP_NONE, /* No specific type, use OP_CB */ diff --git a/src/rdkafka_queue.h b/src/rdkafka_queue.h index 90216768be..33000fdf8c 100644 --- a/src/rdkafka_queue.h +++ b/src/rdkafka_queue.h @@ -875,6 +875,8 @@ struct rd_kafka_queue_s { }; +rd_kafka_queue_t *rd_kafka_queue_new0 (rd_kafka_t *rk, rd_kafka_q_t *rkq); + void rd_kafka_q_dump (FILE *fp, rd_kafka_q_t *rkq); extern int RD_TLS rd_kafka_yield_thread; diff --git a/src/rdkafka_sasl.c b/src/rdkafka_sasl.c index 7579b69eb3..44f46fe262 100644 --- a/src/rdkafka_sasl.c +++ b/src/rdkafka_sasl.c @@ -33,7 +33,7 @@ #include "rdkafka_sasl.h" #include "rdkafka_sasl_int.h" #include "rdkafka_request.h" - +#include "rdkafka_queue.h" /** * @brief Send SASL auth data using legacy directly on socket framing. @@ -273,6 +273,12 @@ int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, +rd_kafka_queue_t *rd_kafka_queue_get_sasl (rd_kafka_t *rk) { + if (!rk->rk_sasl.callback_q) + return NULL; + + return rd_kafka_queue_new0(rk, rk->rk_sasl.callback_q); +} /** @@ -329,6 +335,8 @@ void rd_kafka_sasl_term (rd_kafka_t *rk) { if (provider && provider->term) provider->term(rk); + + RD_IF_FREE(rk->rk_sasl.callback_q, rd_kafka_q_destroy_owner); } @@ -432,6 +440,29 @@ int rd_kafka_sasl_select_provider (rd_kafka_t *rk, } +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable (rd_kafka_t *rk) { + rd_kafka_queue_t *saslq, *bgq; + + if (!(saslq = rd_kafka_queue_get_sasl(rk))) + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__NOT_CONFIGURED, + "No SASL mechanism using callbacks is configured"); + + if (!(bgq = rd_kafka_queue_get_background(rk))) { + rd_kafka_queue_destroy(saslq); + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, + "The background thread is not available"); + } + + rd_kafka_queue_forward(saslq, bgq); + + rd_kafka_queue_destroy(saslq); + rd_kafka_queue_destroy(bgq); + + return NULL; +} + /** * Global SASL termination. diff --git a/src/rdkafka_sasl_int.h b/src/rdkafka_sasl_int.h index 35f3a6cc7a..583e76f19c 100644 --- a/src/rdkafka_sasl_int.h +++ b/src/rdkafka_sasl_int.h @@ -32,13 +32,13 @@ struct rd_kafka_sasl_provider { const char *name; - /**< Per client-instance (rk) initializer */ + /** Per client-instance (rk) initializer */ int (*init) (rd_kafka_t *rk, char *errstr, size_t errstr_size); - /**< Per client-instance (rk) destructor */ + /** Per client-instance (rk) destructor */ void (*term) (rd_kafka_t *rk); - /**< Returns rd_true if provider is ready to be used, else rd_false */ + /** Returns rd_true if provider is ready to be used, else rd_false */ rd_bool_t (*ready) (rd_kafka_t *rk); int (*client_new) (rd_kafka_transport_t *rktrans, diff --git a/src/rdkafka_sasl_oauthbearer.c b/src/rdkafka_sasl_oauthbearer.c index fe20f99368..3bff8908df 100644 --- a/src/rdkafka_sasl_oauthbearer.c +++ b/src/rdkafka_sasl_oauthbearer.c @@ -84,6 +84,12 @@ typedef struct rd_kafka_sasl_oauthbearer_handle_s { /**< Token refresh timer */ rd_kafka_timer_t token_refresh_tmr; + /** Queue to enqueue token_refresh_cb ops on. */ + rd_kafka_q_t *callback_q; + + /** Using internal refresh callback (sasl.oauthbearer.method=oidc) */ + rd_bool_t internal_refresh; + } rd_kafka_sasl_oauthbearer_handle_t; @@ -185,8 +191,16 @@ static void rd_kafka_oauthbearer_enqueue_token_refresh ( rko = rd_kafka_op_new_cb(handle->rk, RD_KAFKA_OP_OAUTHBEARER_REFRESH, rd_kafka_oauthbearer_refresh_op); rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH); + + /* For internal OIDC refresh callback: + * Force op to be handled by internal callback on the + * receiving queue, rather than being passed as an event to + * the application. */ + if (handle->internal_refresh) + rko->rko_flags |= RD_KAFKA_OP_F_FORCE_CB; + handle->wts_enqueued_refresh = rd_uclock(); - rd_kafka_q_enq(handle->rk->rk_rep, rko); + rd_kafka_q_enq(handle->callback_q, rko); } /** @@ -1308,15 +1322,39 @@ static int rd_kafka_sasl_oauthbearer_init (rd_kafka_t *rk, /* Automatically refresh the token if using the builtin * unsecure JWS token refresher, to avoid an initial connection - * stall as we wait for the application to call poll(). - * Otherwise enqueue a refresh callback for the application. */ + * stall as we wait for the application to call poll(). */ if (rk->rk_conf.sasl.oauthbearer.token_refresh_cb == - rd_kafka_oauthbearer_unsecured_token) + rd_kafka_oauthbearer_unsecured_token) { rk->rk_conf.sasl.oauthbearer.token_refresh_cb( rk, rk->rk_conf.sasl.oauthbearer_config, rk->rk_conf.opaque); - else - rd_kafka_oauthbearer_enqueue_token_refresh(handle); + + return 0; + } + + if (rk->rk_conf.sasl.enable_callback_queue) { + /* SASL specific callback queue enabled */ + rk->rk_sasl.callback_q = rd_kafka_q_new(rk); + handle->callback_q = rd_kafka_q_keep(rk->rk_sasl.callback_q); + } else { + /* Use main queue */ + handle->callback_q = rd_kafka_q_keep(rk->rk_rep); + } + + if (rk->rk_conf.sasl.oauthbearer.method == + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC && +#if FIXME /************************ FIXME when .._oidc.c is added ****/ + rk->rk_conf.sasl.oauthbearer.token_refresh_cb == + rd_kafka_sasl_oauthbearer_oidc_token_refresh_cb +#else + 1 +#endif + ) /* move this paren up on the .._refresh_cb + * line when FIXME is fixed. */ + handle->internal_refresh = rd_true; + + /* Otherwise enqueue a refresh callback for the application. */ + rd_kafka_oauthbearer_enqueue_token_refresh(handle); return 0; } @@ -1339,6 +1377,7 @@ static void rd_kafka_sasl_oauthbearer_term (rd_kafka_t *rk) { RD_IF_FREE(handle->token_value, rd_free); rd_list_destroy(&handle->extensions); RD_IF_FREE(handle->errstr, rd_free); + RD_IF_FREE(handle->callback_q, rd_kafka_q_destroy); rwlock_destroy(&handle->lock); diff --git a/tests/0128-sasl_callback_queue.cpp b/tests/0128-sasl_callback_queue.cpp new file mode 100644 index 0000000000..45ab2c8840 --- /dev/null +++ b/tests/0128-sasl_callback_queue.cpp @@ -0,0 +1,114 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2021, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/** + * Verify that background SASL callback queues work by calling + * a non-polling API after client creation. + */ +#include "testcpp.h" + + +namespace { +/* Provide our own token refresh callback */ +class MyCb : public RdKafka::OAuthBearerTokenRefreshCb { +public: + MyCb (): called(false) {} + + void oauthbearer_token_refresh_cb (RdKafka::Handle *handle, + const std::string &oauthbearer_config) { + handle->oauthbearer_set_token_failure("Not implemented by this test, " + "but that's okay"); + called = true; + Test::Say("Callback called!\n"); + } + + bool called; +}; +}; + + +static void do_test (bool use_background_queue) { + SUB_TEST("Use background queue = %s", + use_background_queue ? "yes" : "no"); + + bool expect_called = use_background_queue; + + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + + Test::conf_set(conf, "security.protocol", "SASL_PLAINTEXT"); + Test::conf_set(conf, "sasl.mechanism", "OAUTHBEARER"); + + std::string errstr; + + MyCb mycb; + if (conf->set("oauthbearer_token_refresh_cb", &mycb, errstr)) + Test::Fail("Failed to set refresh callback: " + errstr); + + if (use_background_queue) + if (conf->enable_sasl_queue(true, errstr)) + Test::Fail("Failed to enable SASL queue: " + errstr); + + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); + delete conf; + + if (use_background_queue) { + RdKafka::Error *error = p->sasl_background_callbacks_enable(); + if (error) + Test::Fail("sasl_background_callbacks_enable() failed: " + error->str()); + } + + /* This call should fail since the refresh callback fails, + * and there are no brokers configured anyway. */ + const std::string clusterid = p->clusterid(5*1000); + + TEST_ASSERT(clusterid.empty(), + "Expected clusterid() to fail since the token was not set"); + + if (expect_called) + TEST_ASSERT(mycb.called, + "Expected refresh callback to have been called by now"); + else + TEST_ASSERT(!mycb.called, + "Did not expect refresh callback to have been called"); + + delete p; + + SUB_TEST_PASS(); +} + +extern "C" { + int main_0128_sasl_callback_queue (int argc, char **argv) { + do_test(true); + do_test(false); + + return 0; + } +} diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 4af8fc88f4..34422b9375 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -118,6 +118,7 @@ set( 0124-openssl_invalid_engine.c 0125-immediate_flush.c 0126-oauthbearer_oidc.c + 0128-sasl_callback_queue.cpp 8000-idle.cpp test.c testcpp.cpp diff --git a/tests/interactive_broker_version.py b/tests/interactive_broker_version.py index 30a39280eb..eae8e68662 100755 --- a/tests/interactive_broker_version.py +++ b/tests/interactive_broker_version.py @@ -89,7 +89,7 @@ def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt break elif mech == 'OAUTHBEARER': security_protocol='SASL_PLAINTEXT' - os.write(fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n')) + os.write(fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n'.encode('ascii'))) os.write(fd, ('sasl.oauthbearer.config=%s\n' % \ 'scope=requiredScope principal=admin').encode('ascii')) else: diff --git a/tests/test.c b/tests/test.c index 91ec31a613..96dc77a673 100644 --- a/tests/test.c +++ b/tests/test.c @@ -239,6 +239,7 @@ _TEST_DECL(0123_connections_max_idle); _TEST_DECL(0124_openssl_invalid_engine); _TEST_DECL(0125_immediate_flush); _TEST_DECL(0126_oauthbearer_oidc); +_TEST_DECL(0128_sasl_callback_queue); /* Manual tests */ _TEST_DECL(8000_idle); @@ -448,6 +449,7 @@ struct test tests[] = { _TEST(0124_openssl_invalid_engine, TEST_F_LOCAL), _TEST(0125_immediate_flush, 0), _TEST(0126_oauthbearer_oidc, TEST_BRKVER(3,0,0,0)), + _TEST(0128_sasl_callback_queue, TEST_F_LOCAL, TEST_BRKVER(2,0,0,0)), /* Manual tests */ _TEST(8000_idle, TEST_F_MANUAL), @@ -4177,13 +4179,29 @@ void test_conf_set (rd_kafka_conf_t *conf, const char *name, const char *val) { name, val, errstr); } +/** + * @brief Get configuration value for property \p name. + * + * @param conf Configuration to get value from. If NULL the test.conf (if any) + * configuration will be used. + */ char *test_conf_get (const rd_kafka_conf_t *conf, const char *name) { - static RD_TLS char ret[256]; - size_t ret_sz = sizeof(ret); - if (rd_kafka_conf_get(conf, name, ret, &ret_sz) != RD_KAFKA_CONF_OK) - TEST_FAIL("Failed to get config \"%s\": %s\n", name, - "unknown property"); - return ret; + static RD_TLS char ret[256]; + size_t ret_sz = sizeof(ret); + rd_kafka_conf_t *def_conf = NULL; + + if (!conf) /* Use the current test.conf */ + test_conf_init(&def_conf, NULL, 0); + + if (rd_kafka_conf_get(conf ? conf : def_conf, + name, ret, &ret_sz) != RD_KAFKA_CONF_OK) + TEST_FAIL("Failed to get config \"%s\": %s\n", name, + "unknown property"); + + if (def_conf) + rd_kafka_conf_destroy(def_conf); + + return ret; } diff --git a/tests/test.h b/tests/test.h index 1ee062388e..48c46b4015 100644 --- a/tests/test.h +++ b/tests/test.h @@ -565,7 +565,6 @@ void test_consumer_close (rd_kafka_t *rk); void test_flush (rd_kafka_t *rk, int timeout_ms); void test_conf_set (rd_kafka_conf_t *conf, const char *name, const char *val); -char *test_conf_get (const rd_kafka_conf_t *conf, const char *name); char *test_topic_conf_get (const rd_kafka_topic_conf_t *tconf, const char *name); int test_conf_match (rd_kafka_conf_t *conf, const char *name, const char *val); diff --git a/tests/testshared.h b/tests/testshared.h index d4da82302d..505df5fa65 100644 --- a/tests/testshared.h +++ b/tests/testshared.h @@ -34,6 +34,7 @@ #ifndef _RDKAFKA_H_ typedef struct rd_kafka_s rd_kafka_t; +typedef struct rd_kafka_conf_s rd_kafka_conf_t; #endif /* ANSI color codes */ @@ -161,6 +162,7 @@ void test_SKIP (const char *file, int line, const char *str); void test_timeout_set (int timeout); int test_set_special_conf (const char *name, const char *val, int *timeoutp); +char *test_conf_get (const rd_kafka_conf_t *conf, const char *name); const char *test_conf_get_path (void); const char *test_getenv (const char *env, const char *def); diff --git a/win32/tests/tests.vcxproj b/win32/tests/tests.vcxproj index f22fbecda6..6fe10900e4 100644 --- a/win32/tests/tests.vcxproj +++ b/win32/tests/tests.vcxproj @@ -208,6 +208,7 @@ + From e6babf3c5cae3d3e4e16bfeaf01c009d60075b3f Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 5 Oct 2021 22:02:11 +0200 Subject: [PATCH 48/56] Fix test flags for 0122 and 0126 --- tests/test.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test.c b/tests/test.c index 96dc77a673..9aad5a4a5c 100644 --- a/tests/test.c +++ b/tests/test.c @@ -444,11 +444,11 @@ struct test tests[] = { _TEST(0119_consumer_auth, 0, TEST_BRKVER(2,1,0,0)), _TEST(0120_asymmetric_subscription, TEST_F_LOCAL), _TEST(0121_clusterid, TEST_F_LOCAL), - _TEST(0122_buffer_cleaning_after_rebalance, TEST_BRKVER(2,4,0,0)), + _TEST(0122_buffer_cleaning_after_rebalance, 0, TEST_BRKVER(2,4,0,0)), _TEST(0123_connections_max_idle, 0), _TEST(0124_openssl_invalid_engine, TEST_F_LOCAL), _TEST(0125_immediate_flush, 0), - _TEST(0126_oauthbearer_oidc, TEST_BRKVER(3,0,0,0)), + _TEST(0126_oauthbearer_oidc, 0, TEST_BRKVER(3,0,0,0)), _TEST(0128_sasl_callback_queue, TEST_F_LOCAL, TEST_BRKVER(2,0,0,0)), /* Manual tests */ From 53d98659358623107c40968228a58e17b6cfe8b9 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 5 Oct 2021 22:02:40 +0200 Subject: [PATCH 49/56] Test 0119: remove unused code --- tests/0119-consumer_auth.cpp | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/tests/0119-consumer_auth.cpp b/tests/0119-consumer_auth.cpp index b0cd27c52e..b899dba59a 100644 --- a/tests/0119-consumer_auth.cpp +++ b/tests/0119-consumer_auth.cpp @@ -33,24 +33,6 @@ #include "testcpp.h" -namespace { -class DrCb : public RdKafka::DeliveryReportCb { - public: - DrCb (RdKafka::ErrorCode exp_err): cnt(0), exp_err(exp_err) {} - - void dr_cb (RdKafka::Message &msg) { - Test::Say("Delivery report: " + RdKafka::err2str(msg.err()) + "\n"); - if (msg.err() != exp_err) - Test::Fail("Delivery report: Expected " + RdKafka::err2str(exp_err) + - " but got " + RdKafka::err2str(msg.err())); - cnt++; - } - - int cnt; - RdKafka::ErrorCode exp_err; -}; -}; - /** * @brief Let FetchRequests fail with authorization failure. * From 5b492e567d9e83d188da14b618600274ead0a217 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Thu, 28 Oct 2021 12:21:20 +0200 Subject: [PATCH 50/56] Direct questions to the github discussions forum to keep issue load down --- .github/ISSUE_TEMPLATE | 2 ++ README.md | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE b/.github/ISSUE_TEMPLATE index eb538b35af..ed7b6165fc 100644 --- a/.github/ISSUE_TEMPLATE +++ b/.github/ISSUE_TEMPLATE @@ -1,5 +1,7 @@ Read the FAQ first: https://github.com/edenhill/librdkafka/wiki/FAQ +Do NOT create issues for questions, use the discussion forum: https://github.com/edenhill/librdkafka/discussions + Description diff --git a/README.md b/README.md index cc6200d62f..2186146887 100644 --- a/README.md +++ b/README.md @@ -158,10 +158,9 @@ Commercial support is available from [Confluent Inc](https://www.confluent.io/) **Only the [last official release](https://github.com/edenhill/librdkafka/releases) is supported for community members.** -File bug reports, feature requests and questions using -[GitHub Issues](https://github.com/edenhill/librdkafka/issues) +File bug reports and feature requests using [GitHub Issues](https://github.com/edenhill/librdkafka/issues). -Questions and discussions are also welcome on the [Confluent Community slack](https://launchpass.com/confluentcommunity) #clients channel. +Questions and discussions are welcome on the [Discussions](https://github.com/edenhill/librdkafka/discussions) forum, and on the [Confluent Community slack](https://launchpass.com/confluentcommunity) #clients channel. # Language bindings # From 1c586b1d511c04f7060a7a3c38bdcb0d0d887083 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 19 May 2021 16:56:00 +0200 Subject: [PATCH 51/56] Add clang-format style checking and fixing --- .clang-format | 53 +++++++++++++ .clang-format-cpp | 52 +++++++++++++ .formatignore | 18 +++++ CONTRIBUTING.md | 9 ++- Makefile | 9 +++ lds-gen.py | 5 +- packaging/tools/style-format.sh | 133 ++++++++++++++++++++++++++++++++ 7 files changed, 275 insertions(+), 4 deletions(-) create mode 100644 .clang-format create mode 100644 .clang-format-cpp create mode 100644 .formatignore create mode 100755 packaging/tools/style-format.sh diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000..ccb49ccf9b --- /dev/null +++ b/.clang-format @@ -0,0 +1,53 @@ +--- +BasedOnStyle: LLVM +AlignAfterOpenBracket: Align +AlignConsecutiveMacros: 'true' +AlignConsecutiveAssignments: 'true' +AlignConsecutiveDeclarations: 'false' +AlignEscapedNewlines: Right +AlignOperands: 'true' +AlignTrailingComments: 'true' +AllowAllArgumentsOnNextLine: 'true' +AllowAllConstructorInitializersOnNextLine: 'true' +AllowAllParametersOfDeclarationOnNextLine: 'false' +AllowShortBlocksOnASingleLine: 'false' +AllowShortCaseLabelsOnASingleLine: 'false' +AllowShortFunctionsOnASingleLine: None +AllowShortIfStatementsOnASingleLine: Never +AllowShortLoopsOnASingleLine: 'false' +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: 'true' +BinPackArguments: 'true' +BinPackParameters: 'false' +BreakBeforeBraces: Custom +BreakBeforeTernaryOperators: 'true' +BreakConstructorInitializers: AfterColon +BreakStringLiterals: 'true' +ColumnLimit: '80' +DerivePointerAlignment: 'false' +SortIncludes: 'false' +IncludeBlocks: Preserve +IndentCaseLabels: 'false' +IndentPPDirectives: None +IndentWidth: '8' +Language: Cpp +MaxEmptyLinesToKeep: '3' +PointerAlignment: Right +ReflowComments: 'true' +SpaceAfterCStyleCast: 'false' +SpaceAfterLogicalNot: 'false' +SpaceBeforeAssignmentOperators: 'true' +SpaceBeforeCpp11BracedList: 'true' +SpaceBeforeParens: ControlStatements +SpaceBeforeRangeBasedForLoopColon: 'true' +SpaceInEmptyParentheses: 'false' +SpacesBeforeTrailingComments: '2' +SpacesInAngles: 'false' +SpacesInCStyleCastParentheses: 'false' +SpacesInContainerLiterals: 'false' +SpacesInParentheses: 'false' +SpacesInSquareBrackets: 'false' +TabWidth: '8' +UseTab: Never + +... diff --git a/.clang-format-cpp b/.clang-format-cpp new file mode 100644 index 0000000000..d7bcf00b4f --- /dev/null +++ b/.clang-format-cpp @@ -0,0 +1,52 @@ +--- +BasedOnStyle: Google +AlignConsecutiveMacros: 'true' +AlignConsecutiveAssignments: 'true' +AlignConsecutiveDeclarations: 'false' +AlignEscapedNewlines: Right +AlignOperands: 'true' +AlignTrailingComments: 'true' +AllowAllArgumentsOnNextLine: 'true' +AllowAllConstructorInitializersOnNextLine: 'true' +AllowAllParametersOfDeclarationOnNextLine: 'false' +AllowShortBlocksOnASingleLine: 'false' +AllowShortCaseLabelsOnASingleLine: 'false' +AllowShortFunctionsOnASingleLine: None +AllowShortIfStatementsOnASingleLine: Never +AllowShortLoopsOnASingleLine: 'false' +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: 'true' +BinPackArguments: 'true' +BinPackParameters: 'false' +BreakBeforeBraces: Custom +BreakBeforeTernaryOperators: 'true' +BreakConstructorInitializers: AfterColon +BreakStringLiterals: 'true' +ColumnLimit: '80' +DerivePointerAlignment: 'false' +SortIncludes: 'false' +IncludeBlocks: Preserve +IndentCaseLabels: 'false' +IndentPPDirectives: None +IndentWidth: '2' +Language: Cpp +MaxEmptyLinesToKeep: '3' +PointerAlignment: Right +ReflowComments: 'true' +SpaceAfterCStyleCast: 'false' +SpaceAfterLogicalNot: 'false' +SpaceBeforeAssignmentOperators: 'true' +SpaceBeforeCpp11BracedList: 'true' +SpaceBeforeParens: ControlStatements +SpaceBeforeRangeBasedForLoopColon: 'true' +SpaceInEmptyParentheses: 'false' +SpacesBeforeTrailingComments: '2' +SpacesInAngles: 'false' +SpacesInCStyleCastParentheses: 'false' +SpacesInContainerLiterals: 'false' +SpacesInParentheses: 'false' +SpacesInSquareBrackets: 'false' +TabWidth: '8' +UseTab: Never + +... diff --git a/.formatignore b/.formatignore new file mode 100644 index 0000000000..7d4a45c7be --- /dev/null +++ b/.formatignore @@ -0,0 +1,18 @@ +# Files to not check/fix coding style for. +# These files are imported from other sources and we want to maintain +# them in the original form to make future updates easier. +src/lz4.c +src/lz4.h +src/lz4frame.c +src/lz4frame.h +src/lz4hc.c +src/lz4hc.h +src/queue.h +src/crc32c.c +src/crc32c.h +src/snappy.c +src/snappy.h +src/snappy_compat.h +src/tinycthread.c +src/tinycthread.h +src/regexp.h diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b8cb2abf0a..61dffca085 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -189,6 +189,9 @@ Use 8 spaces indent, same as the Linux kernel. In emacs, use `c-set-style "linux`. For C++, use Google's C++ style. +Fix formatting issues by running `make style-fix` prior to committing. + + ## Comments Use `/* .. */` comments, not `// ..` @@ -228,7 +231,7 @@ Braces go on the same line as their enveloping statement: .. } } - + /* Single line scopes should not have braces */ if (1) hi(); @@ -258,12 +261,12 @@ All expression parentheses should be prefixed and suffixed with a single space: Use space around operators: int a = 2; - + if (b >= 3) c += 2; Except for these: - + d++; --e; diff --git a/Makefile b/Makefile index cdc19e7b3e..9878d11d85 100755 --- a/Makefile +++ b/Makefile @@ -105,3 +105,12 @@ coverity: Makefile.config tar cvzf ../cov-librdkafka.tgz cov-int && \ printf "$(MKL_GREEN)Now upload cov-librdkafka.tgz to Coverity for analysis$(MKL_CLR_RESET)\n") + +style-check: + @(packaging/tools/style-format.sh \ + $$(git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h)$$') ) + +style-fix: + @(packaging/tools/style-format.sh --fix \ + $$(git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h)$$')) + diff --git a/lds-gen.py b/lds-gen.py index a0c701c2e2..cb6bf8dc66 100755 --- a/lds-gen.py +++ b/lds-gen.py @@ -40,7 +40,10 @@ last_line = '' for line in sys.stdin: - m = re.match(r'^(\S+.*\s+\**)?(rd_kafka_\S+)\s*\([^)]', line) + if line.startswith('typedef'): + last_line = line + continue + m = re.match(r'^(\S+.*\s+\**)?(rd_kafka_[\w_]+)\s*\([^)]', line) if m: sym = m.group(2) # Ignore static (unused) functions diff --git a/packaging/tools/style-format.sh b/packaging/tools/style-format.sh new file mode 100755 index 0000000000..983e0b46b6 --- /dev/null +++ b/packaging/tools/style-format.sh @@ -0,0 +1,133 @@ +#!/bin/bash +# +# Check or apply/fix the project coding style to all files passed as arguments. +# + +set -e + +ret=0 + +if [[ -z $1 ]]; then + echo "Usage: $0 [--fix] srcfile1.c srcfile2.h srcfile3.c ..." + echo "" + exit 0 +fi + +if [[ $1 == "--fix" ]]; then + fix=1 + shift +else + fix=0 +fi + +function ignore { + local file=${1//q./\.} + + grep -q "^$file$" .formatignore +} + +# Read the C++ style from src-cpp/.clang-format and store it +# in a json-like string which is passed to --style. +# (It would be great if clang-format could take a file path for the +# format file..). +cpp_style="{ $(grep -v '^...$' .clang-format-cpp | grep -v '^$' | tr '\n' ',' | sed -e 's/,$//') }" +if [[ -z $cpp_style ]]; then + echo "$0: Unable to read .clang-format-cpp" + exit 1 +fi + +extra_info="" + +for f in $*; do + + if ignore $f ; then + echo "$f is ignored by .formatignore" 1>&2 + continue + fi + + if [[ $f == *.cpp ]]; then + style="$cpp_style" + stylename="C++" + elif [[ $f == *.h && $(basename $f) == *cpp* ]]; then + style="$cpp_style" + stylename="C++ (header)" + elif [[ $f == *.py ]]; then + lang="py" + style="pep8" + stylename="pep8" + else + style="file" # Use .clang-format + stylename="C" + fi + + if [[ $fix == 0 ]]; then + # Check for tabs + if grep -q $'\t' "$f" ; then + echo "$f: contains tabs: convert to 8 spaces instead" + ret=1 + fi + + # Check style + if ! clang-format --style=$style --dry-run "$f" ; then + echo "$f: had style errors ($style): see clang-format output above" + ret=1 + fi + + else + # Convert tabs to spaces first. + sed -i -e 's/\t/ /g' "$f" + + if [[ $lang == c ]]; then + # Run clang-format to reformat the file + clang-format --style="$style" "$f" > _styletmp + + else + # Run autopep8 to reformat the file. + python3 -m autopep8 -a "$f" > _styletmp + # autopep8 can't fix all errors, so we also perform a flake8 check. + check=1 + fi + + if ! cmp -s "$f" _styletmp; then + echo "$f: style fixed ($stylename)" + # Use cp to preserve target file mode/attrs. + cp _styletmp "$f" + rm _styletmp + fi + fi + + if [[ $fix == 0 || $check == 1 ]]; then + # Check for tabs + if grep -q $'\t' "$f" ; then + echo "$f: contains tabs: convert to 8 spaces instead" + ret=1 + fi + + # Check style + if [[ $lang == c ]]; then + if ! clang-format --style="$style" --dry-run "$f" ; then + echo "$f: had style errors ($stylename): see clang-format output above" + ret=1 + fi + elif [[ $lang == py ]]; then + if ! python3 -m flake8 "$f"; then + echo "$f: had style errors ($stylename): see flake8 output above" + if [[ $fix == 1 ]]; then + # autopep8 couldn't fix all errors. Let the user know. + extra_info="Error: autopep8 could not fix all errors, fix the flake8 errors manually and run again." + fi + ret=1 + fi + fi + fi + +done + +rm -f _styletmp + +if [[ $ret != 0 ]]; then + echo "You can run the following command to automatically fix the style:" + echo " $ $0 --fix $*" +fi + +exit $ret From f357c0bd01a89db6ca57e1e43e65c4b5808119b4 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 25 Aug 2021 09:24:37 +0200 Subject: [PATCH 52/56] Add Python style checking and fixing --- CONTRIBUTING.md | 11 +++++++++++ Makefile | 4 ++-- packaging/tools/requirements.txt | 2 ++ packaging/tools/style-format.sh | 33 ++++++++++++++++---------------- 4 files changed, 32 insertions(+), 18 deletions(-) create mode 100644 packaging/tools/requirements.txt diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 61dffca085..11665b3e00 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -33,6 +33,17 @@ When writing C code, follow the code style already established in the project. Consistent style makes code easier to read and mistakes less likely to happen. +clang-format is used to check, and fix, the style for C/C++ files, +while flake8 and autopep8 is used for the Python scripts. + +You should check the style before committing by running `make style-check` +from the top-level directory, and if any style errors are reported you can +automatically fix them using `make style-fix`. + +The Python code may need some manual fixing since autopep8 is unable to fix +all warnings reported by flake8, in particular it will not split long lines, +in which case a ` # noqa: E501` may be needed to turn off the warning. + See the end of this document for the C style guide to use in librdkafka. diff --git a/Makefile b/Makefile index 9878d11d85..ee2c8c80d0 100755 --- a/Makefile +++ b/Makefile @@ -108,9 +108,9 @@ coverity: Makefile.config style-check: @(packaging/tools/style-format.sh \ - $$(git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h)$$') ) + $$(git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h|py)$$') ) style-fix: @(packaging/tools/style-format.sh --fix \ - $$(git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h)$$')) + $$(git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h|py)$$')) diff --git a/packaging/tools/requirements.txt b/packaging/tools/requirements.txt new file mode 100644 index 0000000000..43603098a2 --- /dev/null +++ b/packaging/tools/requirements.txt @@ -0,0 +1,2 @@ +flake8 +autopep8 diff --git a/packaging/tools/style-format.sh b/packaging/tools/style-format.sh index 983e0b46b6..b6d0fefda2 100755 --- a/packaging/tools/style-format.sh +++ b/packaging/tools/style-format.sh @@ -1,6 +1,7 @@ #!/bin/bash # # Check or apply/fix the project coding style to all files passed as arguments. +# Uses clang-format for C/C++ and flake8 for Python. # set -e @@ -20,10 +21,18 @@ else fix=0 fi +# Get list of files from .formatignore to ignore formatting for. +ignore_files=( $(grep '^[^#]..' .formatignore) ) + function ignore { - local file=${1//q./\.} + local file=$1 + + local f + for f in "${ignore_files[@]}" ; do + [[ $file == $f ]] && return 0 + done - grep -q "^$file$" .formatignore + return 1 } # Read the C++ style from src-cpp/.clang-format and store it @@ -45,6 +54,7 @@ for f in $*; do continue fi + lang="c" if [[ $f == *.cpp ]]; then style="$cpp_style" stylename="C++" @@ -60,20 +70,9 @@ for f in $*; do stylename="C" fi - if [[ $fix == 0 ]]; then - # Check for tabs - if grep -q $'\t' "$f" ; then - echo "$f: contains tabs: convert to 8 spaces instead" - ret=1 - fi + check=0 - # Check style - if ! clang-format --style=$style --dry-run "$f" ; then - echo "$f: had style errors ($style): see clang-format output above" - ret=1 - fi - - else + if [[ $fix == 1 ]]; then # Convert tabs to spaces first. sed -i -e 's/\t/ /g' "$f" @@ -126,8 +125,10 @@ done rm -f _styletmp if [[ $ret != 0 ]]; then + echo "" echo "You can run the following command to automatically fix the style:" - echo " $ $0 --fix $*" + echo " $ make style-fix" + [[ -n $extra_info ]] && echo "$extra_info" fi exit $ret From 18452517dcaf71806882fe426e8868f5c7a2554d Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Wed, 25 Aug 2021 15:40:15 +0200 Subject: [PATCH 53/56] Run style-checker with Github Actions --- .github/workflows/base.yml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 .github/workflows/base.yml diff --git a/.github/workflows/base.yml b/.github/workflows/base.yml new file mode 100644 index 0000000000..ba888bb2f4 --- /dev/null +++ b/.github/workflows/base.yml @@ -0,0 +1,30 @@ +name: check +on: [push, pull_request] +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: | + sudo apt install -y python3 python3-pip python3-setuptools libcurl4-openssl-dev libssl-dev libsasl2-dev + python3 -m pip install -r tests/requirements.txt + - run: | + ./configure --CFLAGS="-std=c99" --CXXFLAGS="-std=c++98" --install-deps --enable-devel --disable-lz4-ext --prefix="$PWD/dest" + - run: | + make -j + make -C tests -j build + - run: | + examples/rdkafka_example -V || true + examples/rdkafka_example -X builtin.features + - run: | + make -C tests run_local_quick + + style: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: | + sudo apt install -y python3 python3-pip python3-setuptools clang-format + python3 -m pip install -r packaging/tools/requirements.txt + - name: Style checker + run: make style-check From 2f331f7c2ece4b76d5e0f07c63c5342febd86db6 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Thu, 28 Oct 2021 18:01:18 +0200 Subject: [PATCH 54/56] Automatic style fixes using 'make style-fix' --- examples/consumer.c | 45 +- examples/delete_records.c | 72 +- examples/idempotent_producer.c | 70 +- examples/kafkatest_verifiable_client.cpp | 576 +- examples/openssl_engine_example.cpp | 337 +- examples/producer.c | 76 +- examples/producer.cpp | 83 +- examples/rdkafka_complex_consumer_example.c | 545 +- examples/rdkafka_complex_consumer_example.cpp | 347 +- examples/rdkafka_consume_batch.cpp | 104 +- examples/rdkafka_example.c | 1107 ++-- examples/rdkafka_example.cpp | 428 +- examples/rdkafka_performance.c | 1797 +++--- examples/transactions-older-broker.c | 198 +- examples/transactions.c | 168 +- examples/win_ssl_cert_store.cpp | 695 ++- lds-gen.py | 4 +- packaging/cmake/try_compile/atomic_32_test.c | 4 +- packaging/cmake/try_compile/atomic_64_test.c | 4 +- packaging/cmake/try_compile/c11threads_test.c | 18 +- packaging/cmake/try_compile/crc32c_hw_test.c | 39 +- packaging/cmake/try_compile/dlopen_test.c | 2 +- .../try_compile/pthread_setname_darwin_test.c | 4 +- .../pthread_setname_freebsd_test.c | 4 +- .../try_compile/pthread_setname_gnu_test.c | 2 +- packaging/cmake/try_compile/rand_r_test.c | 6 +- packaging/cmake/try_compile/regex_test.c | 10 +- packaging/cmake/try_compile/strndup_test.c | 2 +- packaging/cmake/try_compile/sync_32_test.c | 4 +- packaging/cmake/try_compile/sync_64_test.c | 4 +- packaging/cp/check_features.c | 28 +- packaging/nuget/artifact.py | 24 +- packaging/nuget/cleanup-s3.py | 4 +- packaging/nuget/packaging.py | 301 +- packaging/nuget/release.py | 43 +- packaging/nuget/zfile/zfile.py | 16 +- packaging/rpm/tests/test.c | 47 +- packaging/rpm/tests/test.cpp | 2 +- packaging/tools/gh-release-checksums.py | 5 +- src-cpp/ConfImpl.cpp | 19 +- src-cpp/ConsumerImpl.cpp | 196 +- src-cpp/HandleImpl.cpp | 262 +- src-cpp/HeadersImpl.cpp | 5 +- src-cpp/KafkaConsumerImpl.cpp | 99 +- src-cpp/MessageImpl.cpp | 4 +- src-cpp/MetadataImpl.cpp | 88 +- src-cpp/ProducerImpl.cpp | 151 +- src-cpp/QueueImpl.cpp | 24 +- src-cpp/RdKafka.cpp | 14 +- src-cpp/TopicImpl.cpp | 48 +- src-cpp/TopicPartitionImpl.cpp | 22 +- src-cpp/rdkafkacpp.h | 1659 +++--- src-cpp/rdkafkacpp_int.h | 1218 ++-- src/cJSON.c | 4525 +++++++-------- src/cJSON.h | 369 +- src/rd.h | 256 +- src/rdaddr.c | 347 +- src/rdaddr.h | 143 +- src/rdatomic.h | 168 +- src/rdavg.h | 75 +- src/rdavl.c | 82 +- src/rdavl.h | 98 +- src/rdbuf.c | 808 +-- src/rdbuf.h | 236 +- src/rdcrc32.c | 136 +- src/rdcrc32.h | 47 +- src/rddl.c | 30 +- src/rddl.h | 10 +- src/rdendian.h | 111 +- src/rdfloat.h | 11 +- src/rdfnv1a.c | 75 +- src/rdfnv1a.h | 6 +- src/rdgz.c | 190 +- src/rdgz.h | 31 +- src/rdhdrhistogram.c | 308 +- src/rdhdrhistogram.h | 61 +- src/rdhttp.c | 88 +- src/rdhttp.h | 23 +- src/rdinterval.h | 48 +- src/rdkafka.c | 3745 ++++++------ src/rdkafka.h | 2395 ++++---- src/rdkafka_admin.c | 1786 +++--- src/rdkafka_admin.h | 156 +- src/rdkafka_assignment.c | 438 +- src/rdkafka_assignment.h | 26 +- src/rdkafka_assignor.c | 957 ++-- src/rdkafka_assignor.h | 230 +- src/rdkafka_aux.c | 61 +- src/rdkafka_aux.h | 33 +- src/rdkafka_background.c | 32 +- src/rdkafka_broker.c | 4191 +++++++------- src/rdkafka_broker.h | 557 +- src/rdkafka_buf.c | 279 +- src/rdkafka_buf.h | 1094 ++-- src/rdkafka_cert.c | 361 +- src/rdkafka_cert.h | 22 +- src/rdkafka_cgrp.c | 3208 +++++------ src/rdkafka_cgrp.h | 318 +- src/rdkafka_conf.c | 4931 ++++++++-------- src/rdkafka_conf.h | 611 +- src/rdkafka_confval.h | 49 +- src/rdkafka_coord.c | 213 +- src/rdkafka_coord.h | 108 +- src/rdkafka_error.c | 70 +- src/rdkafka_error.h | 38 +- src/rdkafka_event.c | 346 +- src/rdkafka_event.h | 89 +- src/rdkafka_feature.c | 691 ++- src/rdkafka_feature.h | 59 +- src/rdkafka_header.c | 82 +- src/rdkafka_header.h | 10 +- src/rdkafka_idempotence.c | 192 +- src/rdkafka_idempotence.h | 79 +- src/rdkafka_int.h | 802 ++- src/rdkafka_interceptor.c | 524 +- src/rdkafka_interceptor.h | 113 +- src/rdkafka_lz4.c | 154 +- src/rdkafka_lz4.h | 20 +- src/rdkafka_metadata.c | 750 ++- src/rdkafka_metadata.h | 246 +- src/rdkafka_metadata_cache.c | 254 +- src/rdkafka_mock.c | 968 ++-- src/rdkafka_mock.h | 106 +- src/rdkafka_mock_cgrp.c | 221 +- src/rdkafka_mock_handlers.c | 466 +- src/rdkafka_mock_int.h | 298 +- src/rdkafka_msg.c | 1327 +++-- src/rdkafka_msg.h | 395 +- src/rdkafka_msgbatch.h | 42 +- src/rdkafka_msgset.h | 40 +- src/rdkafka_msgset_reader.c | 1053 ++-- src/rdkafka_msgset_writer.c | 713 ++- src/rdkafka_offset.c | 921 ++- src/rdkafka_offset.h | 65 +- src/rdkafka_op.c | 752 ++- src/rdkafka_op.h | 595 +- src/rdkafka_partition.c | 2853 +++++----- src/rdkafka_partition.h | 1212 ++-- src/rdkafka_pattern.c | 92 +- src/rdkafka_pattern.h | 54 +- src/rdkafka_plugin.c | 82 +- src/rdkafka_plugin.h | 12 +- src/rdkafka_proto.h | 555 +- src/rdkafka_protocol.h | 122 +- src/rdkafka_queue.c | 592 +- src/rdkafka_queue.h | 635 ++- src/rdkafka_range_assignor.c | 134 +- src/rdkafka_request.c | 2353 ++++---- src/rdkafka_request.h | 555 +- src/rdkafka_roundrobin_assignor.c | 98 +- src/rdkafka_sasl.c | 149 +- src/rdkafka_sasl.h | 50 +- src/rdkafka_sasl_cyrus.c | 293 +- src/rdkafka_sasl_int.h | 44 +- src/rdkafka_sasl_oauthbearer.c | 751 ++- src/rdkafka_sasl_oauthbearer.h | 27 +- src/rdkafka_sasl_plain.c | 50 +- src/rdkafka_sasl_scram.c | 460 +- src/rdkafka_sasl_win32.c | 337 +- src/rdkafka_ssl.c | 544 +- src/rdkafka_ssl.h | 40 +- src/rdkafka_sticky_assignor.c | 2194 ++++--- src/rdkafka_subscription.c | 105 +- src/rdkafka_timer.c | 273 +- src/rdkafka_timer.h | 113 +- src/rdkafka_topic.c | 1113 ++-- src/rdkafka_topic.h | 267 +- src/rdkafka_transport.c | 818 ++- src/rdkafka_transport.h | 89 +- src/rdkafka_transport_int.h | 76 +- src/rdkafka_txnmgr.c | 1247 ++-- src/rdkafka_txnmgr.h | 71 +- src/rdkafka_zstd.c | 58 +- src/rdkafka_zstd.h | 43 +- src/rdlist.c | 270 +- src/rdlist.h | 155 +- src/rdlog.c | 86 +- src/rdlog.h | 37 +- src/rdmap.c | 216 +- src/rdmap.h | 219 +- src/rdmurmur2.c | 94 +- src/rdmurmur2.h | 6 +- src/rdports.c | 65 +- src/rdports.h | 60 +- src/rdposix.h | 153 +- src/rdrand.c | 78 +- src/rdrand.h | 30 +- src/rdregex.c | 110 +- src/rdregex.h | 13 +- src/rdsignal.h | 54 +- src/rdstring.c | 416 +- src/rdstring.h | 53 +- src/rdsysqueue.h | 412 +- src/rdtime.h | 185 +- src/rdtypes.h | 38 +- src/rdunittest.c | 318 +- src/rdunittest.h | 123 +- src/rdvarint.c | 92 +- src/rdvarint.h | 36 +- src/rdwin32.h | 198 +- src/rdxxhash.c | 1685 +++--- src/rdxxhash.h | 354 +- src/regexp.c | 2179 +++---- src/tinycthread_extra.c | 29 +- src/tinycthread_extra.h | 127 +- src/win32_config.h | 74 +- tests/0000-unittests.c | 8 +- tests/0001-multiobj.c | 95 +- tests/0002-unkpart.c | 201 +- tests/0003-msgmaxsize.c | 91 +- tests/0004-conf.c | 701 +-- tests/0005-order.c | 160 +- tests/0006-symbols.c | 116 +- tests/0007-autotopic.c | 175 +- tests/0008-reqacks.c | 123 +- tests/0009-mock_cluster.c | 9 +- tests/0011-produce_batch.c | 288 +- tests/0012-produce_consume.c | 796 +-- tests/0013-null-msgs.c | 656 +-- tests/0014-reconsume-191.c | 572 +- tests/0015-offset_seeks.c | 131 +- tests/0016-client_swname.c | 71 +- tests/0017-compression.c | 112 +- tests/0018-cgrp_term.c | 338 +- tests/0019-list_groups.c | 154 +- tests/0020-destroy_hang.c | 168 +- tests/0021-rkt_destroy.c | 19 +- tests/0022-consume_batch.c | 42 +- tests/0025-timers.c | 57 +- tests/0026-consume_pause.c | 354 +- tests/0028-long_topicnames.c | 42 +- tests/0029-assign_offset.c | 280 +- tests/0030-offset_commit.c | 765 +-- tests/0031-get_offsets.c | 141 +- tests/0033-regex_subscribe.c | 709 ++- tests/0034-offset_reset.c | 339 +- tests/0035-api_version.c | 47 +- tests/0036-partial_fetch.c | 66 +- tests/0037-destroy_hang_local.c | 59 +- tests/0038-performance.c | 150 +- tests/0039-event.c | 241 +- tests/0040-io_event.c | 340 +- tests/0041-fetch_max_bytes.c | 67 +- tests/0042-many_topics.c | 279 +- tests/0043-no_connection.c | 52 +- tests/0044-partition_cnt.c | 62 +- tests/0045-subscribe_update.c | 492 +- tests/0046-rkt_cache.c | 30 +- tests/0047-partial_buf_tmout.c | 67 +- tests/0048-partitioner.c | 243 +- tests/0049-consume_conn_close.c | 27 +- tests/0050-subscribe_adds.c | 26 +- tests/0051-assign_adds.c | 32 +- tests/0052-msg_timestamps.c | 128 +- tests/0053-stats_cb.cpp | 289 +- tests/0054-offset_time.cpp | 148 +- tests/0055-producer_latency.c | 159 +- tests/0056-balanced_group_mt.c | 74 +- tests/0057-invalid_topic.cpp | 48 +- tests/0058-log.cpp | 151 +- tests/0059-bsearch.cpp | 74 +- tests/0060-op_prio.cpp | 38 +- tests/0061-consumer_lag.cpp | 140 +- tests/0062-stats_event.c | 156 +- tests/0063-clusterid.cpp | 36 +- tests/0064-interceptors.c | 185 +- tests/0065-yield.cpp | 41 +- tests/0066-plugins.cpp | 51 +- tests/0067-empty_topic.cpp | 50 +- tests/0068-produce_timeout.c | 43 +- tests/0069-consumer_add_parts.c | 17 +- tests/0070-null_empty.cpp | 106 +- tests/0072-headers_ut.c | 260 +- tests/0073-headers.c | 195 +- tests/0074-producev.c | 23 +- tests/0075-retry.c | 79 +- tests/0076-produce_retry.c | 119 +- tests/0077-compaction.c | 170 +- tests/0078-c_from_cpp.cpp | 80 +- tests/0079-fork.c | 21 +- tests/0080-admin_ut.c | 446 +- tests/0081-admin.c | 1075 ++-- tests/0082-fetch_max_bytes.cpp | 51 +- tests/0083-cb_event.c | 104 +- tests/0084-destroy_flags.c | 88 +- tests/0085-headers.cpp | 235 +- tests/0086-purge.c | 151 +- tests/0088-produce_metadata_timeout.c | 53 +- tests/0089-max_poll_interval.c | 71 +- tests/0090-idempotence.c | 47 +- tests/0091-max_poll_interval_timeout.c | 74 +- tests/0092-mixed_msgver.c | 37 +- tests/0093-holb.c | 50 +- tests/0094-idempotence_msg_timeout.c | 59 +- tests/0095-all_brokers_down.cpp | 108 +- tests/0097-ssl_verify.cpp | 322 +- tests/0098-consumer-txn.cpp | 554 +- tests/0099-commit_metadata.c | 97 +- tests/0100-thread_interceptors.cpp | 84 +- tests/0101-fetch-from-follower.cpp | 279 +- tests/0102-static_group_rebalance.c | 135 +- tests/0103-transactions.c | 427 +- tests/0104-fetch_from_follower_mock.c | 89 +- tests/0105-transactions_mock.c | 1407 +++-- tests/0106-cgrp_sess_timeout.c | 106 +- tests/0107-topic_recreate.c | 60 +- tests/0109-auto_create_topics.cpp | 140 +- tests/0110-batch_size.cpp | 94 +- tests/0111-delay_create_topics.cpp | 44 +- tests/0112-assign_unknown_part.c | 14 +- tests/0113-cooperative_rebalance.cpp | 2334 ++++---- tests/0114-sticky_partitioning.cpp | 101 +- tests/0115-producer_auth.cpp | 110 +- tests/0116-kafkaconsumer_close.cpp | 48 +- tests/0117-mock_errors.c | 89 +- tests/0118-commit_rebalance.c | 31 +- tests/0119-consumer_auth.cpp | 94 +- tests/0120-asymmetric_subscription.c | 69 +- tests/0121-clusterid.c | 15 +- tests/0122-buffer_cleaning_after_rebalance.c | 94 +- tests/0123-connections_max_idle.c | 12 +- tests/0124-openssl_invalid_engine.c | 20 +- tests/0125-immediate_flush.c | 14 +- tests/0126-oauthbearer_oidc.c | 27 +- tests/0128-sasl_callback_queue.cpp | 33 +- tests/1000-unktopic.c | 231 +- tests/8000-idle.cpp | 11 +- tests/LibrdkafkaTestApp.py | 98 +- tests/broker_version_tests.py | 98 +- tests/cluster_testing.py | 64 +- tests/fuzzers/fuzz_regex.c | 80 +- tests/fuzzers/helpers.h | 66 +- tests/interactive_broker_version.py | 134 +- tests/interceptor_test/interceptor_test.c | 127 +- tests/interceptor_test/interceptor_test.h | 33 +- tests/performance_plot.py | 33 +- tests/plugin_test/plugin_test.c | 10 +- tests/rusage.c | 98 +- tests/sasl_test.py | 64 +- tests/sockem.c | 239 +- tests/sockem.h | 36 +- tests/sockem_ctrl.c | 28 +- tests/sockem_ctrl.h | 22 +- tests/test.c | 5018 +++++++++-------- tests/test.h | 1097 ++-- tests/testcpp.cpp | 45 +- tests/testcpp.h | 513 +- tests/testshared.h | 391 +- tests/tools/stats/graph.py | 2 +- tests/xxxx-assign_partition.c | 70 +- tests/xxxx-metadata.cpp | 231 +- win32/wingetopt.c | 858 +-- win32/wingetopt.h | 55 +- win32/wintime.h | 23 +- 354 files changed, 57834 insertions(+), 58531 deletions(-) diff --git a/examples/consumer.c b/examples/consumer.c index fe8aa15f0c..9e1eb173d6 100644 --- a/examples/consumer.c +++ b/examples/consumer.c @@ -49,7 +49,7 @@ static volatile sig_atomic_t run = 1; /** * @brief Signal termination of program */ -static void stop (int sig) { +static void stop(int sig) { run = 0; } @@ -58,10 +58,10 @@ static void stop (int sig) { /** * @returns 1 if all bytes are printable, else 0. */ -static int is_printable (const char *buf, size_t size) { +static int is_printable(const char *buf, size_t size) { size_t i; - for (i = 0 ; i < size ; i++) + for (i = 0; i < size; i++) if (!isprint((int)buf[i])) return 0; @@ -69,7 +69,7 @@ static int is_printable (const char *buf, size_t size) { } -int main (int argc, char **argv) { +int main(int argc, char **argv) { rd_kafka_t *rk; /* Consumer instance handle */ rd_kafka_conf_t *conf; /* Temporary configuration object */ rd_kafka_resp_err_t err; /* librdkafka API error code */ @@ -107,8 +107,8 @@ int main (int argc, char **argv) { * host or host:port (default port 9092). * librdkafka will use the bootstrap brokers to acquire the full * set of brokers from the cluster. */ - if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%s\n", errstr); rd_kafka_conf_destroy(conf); return 1; @@ -119,8 +119,8 @@ int main (int argc, char **argv) { * group, and the subscribed topic' partitions will be assigned * according to the partition.assignment.strategy * (consumer config property) to the consumers in the group. */ - if (rd_kafka_conf_set(conf, "group.id", groupid, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set(conf, "group.id", groupid, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%s\n", errstr); rd_kafka_conf_destroy(conf); return 1; @@ -131,8 +131,8 @@ int main (int argc, char **argv) { * in the partition to start fetching messages. * By setting this to earliest the consumer will read all messages * in the partition if there was no previously committed offset. */ - if (rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%s\n", errstr); rd_kafka_conf_destroy(conf); return 1; @@ -147,8 +147,8 @@ int main (int argc, char **argv) { */ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); if (!rk) { - fprintf(stderr, - "%% Failed to create new consumer: %s\n", errstr); + fprintf(stderr, "%% Failed to create new consumer: %s\n", + errstr); return 1; } @@ -169,9 +169,8 @@ int main (int argc, char **argv) { /* Convert the list of topics to a format suitable for librdkafka */ subscription = rd_kafka_topic_partition_list_new(topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) - rd_kafka_topic_partition_list_add(subscription, - topics[i], + for (i = 0; i < topic_cnt; i++) + rd_kafka_topic_partition_list_add(subscription, topics[i], /* the partition is ignored * by subscribe() */ RD_KAFKA_PARTITION_UA); @@ -179,8 +178,7 @@ int main (int argc, char **argv) { /* Subscribe to the list of topics */ err = rd_kafka_subscribe(rk, subscription); if (err) { - fprintf(stderr, - "%% Failed to subscribe to %d topics: %s\n", + fprintf(stderr, "%% Failed to subscribe to %d topics: %s\n", subscription->cnt, rd_kafka_err2str(err)); rd_kafka_topic_partition_list_destroy(subscription); rd_kafka_destroy(rk); @@ -220,29 +218,28 @@ int main (int argc, char **argv) { /* Consumer errors are generally to be considered * informational as the consumer will automatically * try to recover from all types of errors. */ - fprintf(stderr, - "%% Consumer error: %s\n", + fprintf(stderr, "%% Consumer error: %s\n", rd_kafka_message_errstr(rkm)); rd_kafka_message_destroy(rkm); continue; } /* Proper message. */ - printf("Message on %s [%"PRId32"] at offset %"PRId64":\n", + printf("Message on %s [%" PRId32 "] at offset %" PRId64 ":\n", rd_kafka_topic_name(rkm->rkt), rkm->partition, rkm->offset); /* Print the message key. */ if (rkm->key && is_printable(rkm->key, rkm->key_len)) - printf(" Key: %.*s\n", - (int)rkm->key_len, (const char *)rkm->key); + printf(" Key: %.*s\n", (int)rkm->key_len, + (const char *)rkm->key); else if (rkm->key) printf(" Key: (%d bytes)\n", (int)rkm->key_len); /* Print the message value/payload. */ if (rkm->payload && is_printable(rkm->payload, rkm->len)) - printf(" Value: %.*s\n", - (int)rkm->len, (const char *)rkm->payload); + printf(" Value: %.*s\n", (int)rkm->len, + (const char *)rkm->payload); else if (rkm->payload) printf(" Value: (%d bytes)\n", (int)rkm->len); diff --git a/examples/delete_records.c b/examples/delete_records.c index f0c55dd947..2660996a57 100644 --- a/examples/delete_records.c +++ b/examples/delete_records.c @@ -43,15 +43,15 @@ #include "rdkafka.h" -static rd_kafka_queue_t *queue; /** Admin result queue. - * This is a global so we can - * yield in stop() */ +static rd_kafka_queue_t *queue; /** Admin result queue. + * This is a global so we can + * yield in stop() */ static volatile sig_atomic_t run = 1; /** * @brief Signal termination of program */ -static void stop (int sig) { +static void stop(int sig) { if (!run) { fprintf(stderr, "%% Forced termination\n"); exit(2); @@ -64,11 +64,11 @@ static void stop (int sig) { /** * @brief Parse an integer or fail. */ -int64_t parse_int (const char *what, const char *str) { +int64_t parse_int(const char *what, const char *str) { char *end; unsigned long n = strtoull(str, &end, 0); - if (end != str+strlen(str)) { + if (end != str + strlen(str)) { fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n", what, str); exit(1); @@ -78,27 +78,28 @@ int64_t parse_int (const char *what, const char *str) { } -int main (int argc, char **argv) { - rd_kafka_conf_t *conf; /* Temporary configuration object */ - char errstr[512]; /* librdkafka API error reporting buffer */ - const char *brokers; /* Argument: broker list */ - rd_kafka_t *rk; /* Admin client instance */ +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /* Temporary configuration object */ + char errstr[512]; /* librdkafka API error reporting buffer */ + const char *brokers; /* Argument: broker list */ + rd_kafka_t *rk; /* Admin client instance */ rd_kafka_topic_partition_list_t *offsets_before; /* Delete messages up * to but not * including these * offsets */ rd_kafka_DeleteRecords_t *del_records; /* Container for offsets_before*/ - rd_kafka_AdminOptions_t *options; /* (Optional) Options for - * DeleteRecords() */ - rd_kafka_event_t *event; /* DeleteRecords result event */ + rd_kafka_AdminOptions_t *options; /* (Optional) Options for + * DeleteRecords() */ + rd_kafka_event_t *event; /* DeleteRecords result event */ int exitcode = 0; int i; /* * Argument validation */ - if (argc < 5 || (argc-2) % 3 != 0) { - fprintf(stderr, "%% Usage: %s " + if (argc < 5 || (argc - 2) % 3 != 0) { + fprintf(stderr, + "%% Usage: %s " " " " ...\n" "\n" @@ -112,15 +113,15 @@ int main (int argc, char **argv) { brokers = argv[1]; /* Parse topic partition offset tuples and add to offsets list */ - offsets_before = rd_kafka_topic_partition_list_new((argc-2) / 3); - for (i = 2 ; i < argc ; i += 3) { + offsets_before = rd_kafka_topic_partition_list_new((argc - 2) / 3); + for (i = 2; i < argc; i += 3) { const char *topic = argv[i]; - int partition = parse_int("partition", argv[i+1]); - int64_t offset = parse_int("offset_before", argv[i+2]); + int partition = parse_int("partition", argv[i + 1]); + int64_t offset = parse_int("offset_before", argv[i + 2]); - rd_kafka_topic_partition_list_add(offsets_before, - topic, - partition)->offset = offset; + rd_kafka_topic_partition_list_add(offsets_before, topic, + partition) + ->offset = offset; } /* @@ -132,8 +133,8 @@ int main (int argc, char **argv) { * host or host:port (default port 9092). * librdkafka will use the bootstrap brokers to acquire the full * set of brokers from the cluster. */ - if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%s\n", errstr); return 1; } @@ -150,8 +151,8 @@ int main (int argc, char **argv) { */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) { - fprintf(stderr, - "%% Failed to create new producer: %s\n", errstr); + fprintf(stderr, "%% Failed to create new producer: %s\n", + errstr); return 1; } @@ -163,11 +164,10 @@ int main (int argc, char **argv) { signal(SIGINT, stop); /* Set timeout (optional) */ - options = rd_kafka_AdminOptions_new(rk, - RD_KAFKA_ADMIN_OP_DELETERECORDS); - if (rd_kafka_AdminOptions_set_request_timeout(options, - 30 * 1000 /* 30s */, - errstr, sizeof(errstr))) { + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETERECORDS); + if (rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) { fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); return 1; } @@ -186,7 +186,7 @@ int main (int argc, char **argv) { /* Wait for results */ - event = rd_kafka_queue_poll(queue, -1/*indefinitely*/); + event = rd_kafka_queue_poll(queue, -1 /*indefinitely*/); if (!event) { /* User hit Ctrl-C */ @@ -205,12 +205,12 @@ int main (int argc, char **argv) { const rd_kafka_topic_partition_list_t *offsets; int i; - result = rd_kafka_event_DeleteRecords_result(event); + result = rd_kafka_event_DeleteRecords_result(event); offsets = rd_kafka_DeleteRecords_result_offsets(result); printf("DeleteRecords results:\n"); - for (i = 0 ; i < offsets->cnt ; i++) - printf(" %s [%"PRId32"] offset %"PRId64": %s\n", + for (i = 0; i < offsets->cnt; i++) + printf(" %s [%" PRId32 "] offset %" PRId64 ": %s\n", offsets->elems[i].topic, offsets->elems[i].partition, offsets->elems[i].offset, diff --git a/examples/idempotent_producer.c b/examples/idempotent_producer.c index 358552f1ad..1e799eaf8f 100644 --- a/examples/idempotent_producer.c +++ b/examples/idempotent_producer.c @@ -57,13 +57,13 @@ static volatile sig_atomic_t run = 1; /** * @brief Signal termination of program */ -static void stop (int sig) { +static void stop(int sig) { run = 0; } static int deliveredcnt = 0; -static int msgerrcnt = 0; +static int msgerrcnt = 0; /** * @brief Message delivery report callback. @@ -76,8 +76,8 @@ static int msgerrcnt = 0; * The callback is triggered from rd_kafka_poll() or rd_kafka_flush() and * executes on the application's thread. */ -static void dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { if (rkmessage->err) { fprintf(stderr, "%% Message delivery failed: %s\n", rd_kafka_err2str(rkmessage->err)); @@ -85,9 +85,8 @@ static void dr_msg_cb (rd_kafka_t *rk, } else { fprintf(stderr, "%% Message delivered (%zd bytes, topic %s, " - "partition %"PRId32", offset %"PRId64")\n", - rkmessage->len, - rd_kafka_topic_name(rkmessage->rkt), + "partition %" PRId32 ", offset %" PRId64 ")\n", + rkmessage->len, rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); deliveredcnt++; } @@ -112,8 +111,8 @@ static void dr_msg_cb (rd_kafka_t *rk, * the idempotence guarantees can't be satisfied, these errors * are identified by a the `RD_KAFKA_RESP_ERR__FATAL` error code. */ -static void error_cb (rd_kafka_t *rk, int err, const - char *reason, void *opaque) { +static void +error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) { rd_kafka_resp_err_t orig_err; char errstr[512]; @@ -143,8 +142,8 @@ static void error_cb (rd_kafka_t *rk, int err, const */ orig_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr)); - fprintf(stderr, "%% FATAL ERROR: %s: %s\n", - rd_kafka_err2name(orig_err), errstr); + fprintf(stderr, "%% FATAL ERROR: %s: %s\n", rd_kafka_err2name(orig_err), + errstr); /* Clean termination to get delivery results (from rd_kafka_flush()) * for all outstanding/in-transit/queued messages. */ @@ -153,7 +152,7 @@ static void error_cb (rd_kafka_t *rk, int err, const } -int main (int argc, char **argv) { +int main(int argc, char **argv) { rd_kafka_t *rk; /* Producer instance handle */ rd_kafka_conf_t *conf; /* Temporary configuration object */ char errstr[512]; /* librdkafka API error reporting buffer */ @@ -183,16 +182,16 @@ int main (int argc, char **argv) { * host or host:port (default port 9092). * librdkafka will use the bootstrap brokers to acquire the full * set of brokers from the cluster. */ - if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%s\n", errstr); rd_kafka_conf_destroy(conf); return 1; } /* Enable the idempotent producer */ - if (rd_kafka_conf_set(conf, "enable.idempotence", "true", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set(conf, "enable.idempotence", "true", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%s\n", errstr); rd_kafka_conf_destroy(conf); return 1; @@ -222,8 +221,8 @@ int main (int argc, char **argv) { */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) { - fprintf(stderr, - "%% Failed to create new producer: %s\n", errstr); + fprintf(stderr, "%% Failed to create new producer: %s\n", + errstr); return 1; } @@ -252,21 +251,19 @@ int main (int argc, char **argv) { */ retry: err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_VALUE(buf, strlen(buf)), - /* Copy the message payload so the `buf` can - * be reused for the next message. */ - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_END); + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_VALUE(buf, strlen(buf)), + /* Copy the message payload so the `buf` can + * be reused for the next message. */ + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END); if (err) { /** * Failed to *enqueue* message for producing. */ fprintf(stderr, - "%% Failed to produce to topic %s: %s\n", - topic, rd_kafka_err2str(err)); + "%% Failed to produce to topic %s: %s\n", topic, + rd_kafka_err2str(err)); if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) { /* If the internal queue is full, wait for @@ -279,7 +276,8 @@ int main (int argc, char **argv) { * The internal queue is limited by the * configuration property * queue.buffering.max.messages */ - rd_kafka_poll(rk, 1000/*block for max 1000ms*/); + rd_kafka_poll(rk, + 1000 /*block for max 1000ms*/); goto retry; } else { /* Produce failed, most likely due to a @@ -304,7 +302,7 @@ int main (int argc, char **argv) { * to make sure previously produced messages have their * delivery report callback served (and any other callbacks * you register). */ - rd_kafka_poll(rk, 0/*non-blocking*/); + rd_kafka_poll(rk, 0 /*non-blocking*/); msgcnt++; @@ -313,10 +311,9 @@ int main (int argc, char **argv) { * some time. */ if (msgcnt == 13) rd_kafka_test_fatal_error( - rk, - RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, - "This is a fabricated error to test the " - "fatal error handling"); + rk, RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, + "This is a fabricated error to test the " + "fatal error handling"); /* Short sleep to rate-limit this example. * A real application should not do this. */ @@ -328,9 +325,8 @@ int main (int argc, char **argv) { * rd_kafka_flush() is an abstraction over rd_kafka_poll() which * waits for all messages to be delivered. */ fprintf(stderr, "%% Flushing outstanding messages..\n"); - rd_kafka_flush(rk, 10*1000 /* wait for max 10 seconds */); - fprintf(stderr, - "%% %d message(s) produced, %d delivered, %d failed\n", + rd_kafka_flush(rk, 10 * 1000 /* wait for max 10 seconds */); + fprintf(stderr, "%% %d message(s) produced, %d delivered, %d failed\n", msgcnt, deliveredcnt, msgerrcnt); /* Save fatal error prior for using with exit status below. */ @@ -344,4 +340,4 @@ int main (int argc, char **argv) { return 1; else return 0; - } +} diff --git a/examples/kafkatest_verifiable_client.cpp b/examples/kafkatest_verifiable_client.cpp index c818b48b30..bdb8607a33 100644 --- a/examples/kafkatest_verifiable_client.cpp +++ b/examples/kafkatest_verifiable_client.cpp @@ -61,56 +61,60 @@ #include "rdkafkacpp.h" static volatile sig_atomic_t run = 1; -static bool exit_eof = false; -static int verbosity = 1; +static bool exit_eof = false; +static int verbosity = 1; static std::string value_prefix; class Assignment { - public: - static std::string name (const std::string &t, int partition) { + static std::string name(const std::string &t, int partition) { std::stringstream stm; stm << t << "." << partition; return stm.str(); } - Assignment(): topic(""), partition(-1), consumedMessages(0), - minOffset(-1), maxOffset(0) { + Assignment() : + topic(""), + partition(-1), + consumedMessages(0), + minOffset(-1), + maxOffset(0) { printf("Created assignment\n"); } Assignment(const Assignment &a) { - topic = a.topic; - partition = a.partition; + topic = a.topic; + partition = a.partition; consumedMessages = a.consumedMessages; - minOffset = a.minOffset; - maxOffset = a.maxOffset; + minOffset = a.minOffset; + maxOffset = a.maxOffset; } Assignment &operator=(const Assignment &a) { - this->topic = a.topic; - this->partition = a.partition; + this->topic = a.topic; + this->partition = a.partition; this->consumedMessages = a.consumedMessages; - this->minOffset = a.minOffset; - this->maxOffset = a.maxOffset; + this->minOffset = a.minOffset; + this->maxOffset = a.maxOffset; return *this; } int operator==(const Assignment &a) const { - return !(this->topic == a.topic && - this->partition == a.partition); + return !(this->topic == a.topic && this->partition == a.partition); } int operator<(const Assignment &a) const { - if (this->topic < a.topic) return 1; - if (this->topic >= a.topic) return 0; + if (this->topic < a.topic) + return 1; + if (this->topic >= a.topic) + return 0; return (this->partition < a.partition); } - void setup (std::string t, int32_t p) { + void setup(std::string t, int32_t p) { assert(!t.empty()); assert(topic.empty() || topic == t); assert(partition == -1 || partition == p); - topic = t; + topic = t; partition = p; } @@ -123,7 +127,6 @@ class Assignment { - static struct { int maxMessages; @@ -141,14 +144,13 @@ static struct { std::map assignments; } consumer; } state = { - /* .maxMessages = */ -1 -}; + /* .maxMessages = */ -1}; static RdKafka::KafkaConsumer *consumer; -static std::string now () { +static std::string now() { struct timeval tv; gettimeofday(&tv, NULL); time_t t = tv.tv_sec; @@ -157,7 +159,7 @@ static std::string now () { localtime_r(&t, &tm); strftime(buf, sizeof(buf), "%H:%M:%S", &tm); - snprintf(buf+strlen(buf), sizeof(buf)-strlen(buf), ".%03d", + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ".%03d", (int)(tv.tv_usec / 1000)); return buf; @@ -166,18 +168,19 @@ static std::string now () { static time_t watchdog_last_kick; static const int watchdog_timeout = 20; /* Must be > socket.timeout.ms */ -static void sigwatchdog (int sig) { +static void sigwatchdog(int sig) { time_t t = time(NULL); if (watchdog_last_kick + watchdog_timeout <= t) { - std::cerr << now() << ": WATCHDOG TIMEOUT (" << - (int)(t - watchdog_last_kick) << "s): TERMINATING" << std::endl; + std::cerr << now() << ": WATCHDOG TIMEOUT (" + << (int)(t - watchdog_last_kick) << "s): TERMINATING" + << std::endl; int *i = NULL; - *i = 100; + *i = 100; abort(); } } -static void watchdog_kick () { +static void watchdog_kick() { watchdog_last_kick = time(NULL); /* Safe guard against hangs-on-exit */ @@ -186,13 +189,11 @@ static void watchdog_kick () { - - -static void errorString (const std::string &name, - const std::string &errmsg, - const std::string &topic, - const std::string *key, - const std::string &value) { +static void errorString(const std::string &name, + const std::string &errmsg, + const std::string &topic, + const std::string *key, + const std::string &value) { std::cout << "{ " << "\"name\": \"" << name << "\", " << "\"_time\": \"" << now() << "\", " @@ -204,12 +205,12 @@ static void errorString (const std::string &name, } -static void successString (const std::string &name, - const std::string &topic, - int partition, - int64_t offset, - const std::string *key, - const std::string &value) { +static void successString(const std::string &name, + const std::string &topic, + int partition, + int64_t offset, + const std::string *key, + const std::string &value) { std::cout << "{ " << "\"name\": \"" << name << "\", " << "\"_time\": \"" << now() << "\", " @@ -223,29 +224,27 @@ static void successString (const std::string &name, #if FIXME -static void offsetStatus (bool success, - const std::string &topic, - int partition, - int64_t offset, - const std::string &errstr) { +static void offsetStatus(bool success, + const std::string &topic, + int partition, + int64_t offset, + const std::string &errstr) { std::cout << "{ " - "\"name\": \"offsets_committed\", " << - "\"success\": " << success << ", " << - "\"offsets\": [ " << - " { " << - " \"topic\": \"" << topic << "\", " << - " \"partition\": " << partition << ", " << - " \"offset\": " << (int)offset << ", " << - " \"error\": \"" << errstr << "\" " << - " } " << - "] }" << std::endl; - + "\"name\": \"offsets_committed\", " + << "\"success\": " << success << ", " + << "\"offsets\": [ " + << " { " + << " \"topic\": \"" << topic << "\", " + << " \"partition\": " << partition << ", " + << " \"offset\": " << (int)offset << ", " + << " \"error\": \"" << errstr << "\" " + << " } " + << "] }" << std::endl; } #endif -static void sigterm (int sig) { - +static void sigterm(int sig) { std::cerr << now() << ": Terminating because of signal " << sig << std::endl; if (!run) { @@ -258,21 +257,17 @@ static void sigterm (int sig) { class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb { public: - void dr_cb (RdKafka::Message &message) { + void dr_cb(RdKafka::Message &message) { if (message.err()) { state.producer.numErr++; - errorString("producer_send_error", message.errstr(), - message.topic_name(), + errorString("producer_send_error", message.errstr(), message.topic_name(), message.key(), - std::string(static_cast(message.payload()), + std::string(static_cast(message.payload()), message.len())); } else { - successString("producer_send_success", - message.topic_name(), - (int)message.partition(), - message.offset(), - message.key(), - std::string(static_cast(message.payload()), + successString("producer_send_success", message.topic_name(), + (int)message.partition(), message.offset(), message.key(), + std::string(static_cast(message.payload()), message.len())); state.producer.numAcked++; } @@ -282,28 +277,27 @@ class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb { class ExampleEventCb : public RdKafka::EventCb { public: - void event_cb (RdKafka::Event &event) { - switch (event.type()) - { - case RdKafka::Event::EVENT_ERROR: - std::cerr << now() << ": ERROR (" << RdKafka::err2str(event.err()) << "): " << - event.str() << std::endl; - break; + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_ERROR: + std::cerr << now() << ": ERROR (" << RdKafka::err2str(event.err()) + << "): " << event.str() << std::endl; + break; - case RdKafka::Event::EVENT_STATS: - std::cerr << now() << ": \"STATS\": " << event.str() << std::endl; - break; + case RdKafka::Event::EVENT_STATS: + std::cerr << now() << ": \"STATS\": " << event.str() << std::endl; + break; - case RdKafka::Event::EVENT_LOG: - std::cerr << now() << ": LOG-" << event.severity() << "-" - << event.fac() << ": " << event.str() << std::endl; - break; + case RdKafka::Event::EVENT_LOG: + std::cerr << now() << ": LOG-" << event.severity() << "-" << event.fac() + << ": " << event.str() << std::endl; + break; - default: - std::cerr << now() << ": EVENT " << event.type() << - " (" << RdKafka::err2str(event.err()) << "): " << - event.str() << std::endl; - break; + default: + std::cerr << now() << ": EVENT " << event.type() << " (" + << RdKafka::err2str(event.err()) << "): " << event.str() + << std::endl; + break; } } }; @@ -313,15 +307,17 @@ class ExampleEventCb : public RdKafka::EventCb { * in the produce() call. */ class MyHashPartitionerCb : public RdKafka::PartitionerCb { public: - int32_t partitioner_cb (const RdKafka::Topic *topic, const std::string *key, - int32_t partition_cnt, void *msg_opaque) { + int32_t partitioner_cb(const RdKafka::Topic *topic, + const std::string *key, + int32_t partition_cnt, + void *msg_opaque) { return djb_hash(key->c_str(), key->size()) % partition_cnt; } - private: - static inline unsigned int djb_hash (const char *str, size_t len) { + private: + static inline unsigned int djb_hash(const char *str, size_t len) { unsigned int hash = 5381; - for (size_t i = 0 ; i < len ; i++) + for (size_t i = 0; i < len; i++) hash = ((hash << 5) + hash) + str[i]; return hash; } @@ -329,35 +325,35 @@ class MyHashPartitionerCb : public RdKafka::PartitionerCb { - - /** * Print number of records consumed, every 100 messages or on timeout. */ -static void report_records_consumed (int immediate) { - std::map *assignments = &state.consumer.assignments; +static void report_records_consumed(int immediate) { + std::map *assignments = &state.consumer.assignments; if (state.consumer.consumedMessages <= state.consumer.consumedMessagesLastReported + (immediate ? 0 : 999)) return; std::cout << "{ " - "\"name\": \"records_consumed\", " << - "\"_totcount\": " << state.consumer.consumedMessages << ", " << - "\"count\": " << (state.consumer.consumedMessages - - state.consumer.consumedMessagesLastReported) << ", " << - "\"partitions\": [ "; - - for (std::map::iterator ii = assignments->begin() ; - ii != assignments->end() ; ii++) { + "\"name\": \"records_consumed\", " + << "\"_totcount\": " << state.consumer.consumedMessages << ", " + << "\"count\": " + << (state.consumer.consumedMessages - + state.consumer.consumedMessagesLastReported) + << ", " + << "\"partitions\": [ "; + + for (std::map::iterator ii = assignments->begin(); + ii != assignments->end(); ii++) { Assignment *a = &(*ii).second; assert(!a->topic.empty()); - std::cout << (ii == assignments->begin() ? "": ", ") << " { " << - " \"topic\": \"" << a->topic << "\", " << - " \"partition\": " << a->partition << ", " << - " \"minOffset\": " << a->minOffset << ", " << - " \"maxOffset\": " << a->maxOffset << " " << - " } "; + std::cout << (ii == assignments->begin() ? "" : ", ") << " { " + << " \"topic\": \"" << a->topic << "\", " + << " \"partition\": " << a->partition << ", " + << " \"minOffset\": " << a->minOffset << ", " + << " \"maxOffset\": " << a->maxOffset << " " + << " } "; a->minOffset = -1; } @@ -369,36 +365,39 @@ static void report_records_consumed (int immediate) { class ExampleOffsetCommitCb : public RdKafka::OffsetCommitCb { public: - void offset_commit_cb (RdKafka::ErrorCode err, - std::vector &offsets) { - std::cerr << now() << ": Propagate offset for " << offsets.size() << " partitions, error: " << RdKafka::err2str(err) << std::endl; + void offset_commit_cb(RdKafka::ErrorCode err, + std::vector &offsets) { + std::cerr << now() << ": Propagate offset for " << offsets.size() + << " partitions, error: " << RdKafka::err2str(err) << std::endl; /* No offsets to commit, dont report anything. */ if (err == RdKafka::ERR__NO_OFFSET) return; - /* Send up-to-date records_consumed report to make sure consumed > committed */ + /* Send up-to-date records_consumed report to make sure consumed > committed + */ report_records_consumed(1); - std::cout << "{ " << - "\"name\": \"offsets_committed\", " << - "\"success\": " << (err ? "false" : "true") << ", " << - "\"error\": \"" << (err ? RdKafka::err2str(err) : "") << "\", " << - "\"_autocommit\": " << (state.consumer.useAutoCommit ? "true":"false") << ", " << - "\"offsets\": [ "; + std::cout << "{ " + << "\"name\": \"offsets_committed\", " + << "\"success\": " << (err ? "false" : "true") << ", " + << "\"error\": \"" << (err ? RdKafka::err2str(err) : "") << "\", " + << "\"_autocommit\": " + << (state.consumer.useAutoCommit ? "true" : "false") << ", " + << "\"offsets\": [ "; assert(offsets.size() > 0); - for (unsigned int i = 0 ; i < offsets.size() ; i++) { - std::cout << (i == 0 ? "" : ", ") << "{ " << - " \"topic\": \"" << offsets[i]->topic() << "\", " << - " \"partition\": " << offsets[i]->partition() << ", " << - " \"offset\": " << (int)offsets[i]->offset() << ", " << - " \"error\": \"" << - (offsets[i]->err() ? RdKafka::err2str(offsets[i]->err()) : "") << - "\" " << - " }"; + for (unsigned int i = 0; i < offsets.size(); i++) { + std::cout << (i == 0 ? "" : ", ") << "{ " + << " \"topic\": \"" << offsets[i]->topic() << "\", " + << " \"partition\": " << offsets[i]->partition() << ", " + << " \"offset\": " << (int)offsets[i]->offset() << ", " + << " \"error\": \"" + << (offsets[i]->err() ? RdKafka::err2str(offsets[i]->err()) + : "") + << "\" " + << " }"; } std::cout << " ] }" << std::endl; - } }; @@ -408,12 +407,10 @@ static ExampleOffsetCommitCb ex_offset_commit_cb; /** * Commit every 1000 messages or whenever there is a consume timeout. */ -static void do_commit (RdKafka::KafkaConsumer *consumer, - int immediate) { - if (!immediate && - (state.consumer.useAutoCommit || - state.consumer.consumedMessagesAtLastCommit + 1000 > - state.consumer.consumedMessages)) +static void do_commit(RdKafka::KafkaConsumer *consumer, int immediate) { + if (!immediate && (state.consumer.useAutoCommit || + state.consumer.consumedMessagesAtLastCommit + 1000 > + state.consumer.consumedMessages)) return; /* Make sure we report consumption before commit, @@ -422,106 +419,102 @@ static void do_commit (RdKafka::KafkaConsumer *consumer, state.consumer.consumedMessages) report_records_consumed(1); - std::cerr << now() << ": committing " << - (state.consumer.consumedMessages - - state.consumer.consumedMessagesAtLastCommit) << " messages" << std::endl; + std::cerr << now() << ": committing " + << (state.consumer.consumedMessages - + state.consumer.consumedMessagesAtLastCommit) + << " messages" << std::endl; RdKafka::ErrorCode err; err = consumer->commitSync(&ex_offset_commit_cb); - std::cerr << now() << ": " << - "sync commit returned " << RdKafka::err2str(err) << std::endl; + std::cerr << now() << ": " + << "sync commit returned " << RdKafka::err2str(err) << std::endl; - state.consumer.consumedMessagesAtLastCommit = - state.consumer.consumedMessages; + state.consumer.consumedMessagesAtLastCommit = state.consumer.consumedMessages; } void msg_consume(RdKafka::KafkaConsumer *consumer, - RdKafka::Message* msg, void* opaque) { + RdKafka::Message *msg, + void *opaque) { switch (msg->err()) { - case RdKafka::ERR__TIMED_OUT: - /* Try reporting consumed messages */ - report_records_consumed(1); - /* Commit one every consume() timeout instead of on every message. - * Also commit on every 1000 messages, whichever comes first. */ - do_commit(consumer, 1); - break; - - - case RdKafka::ERR_NO_ERROR: - { - /* Real message */ - if (verbosity > 2) - std::cerr << now() << ": Read msg from " << msg->topic_name() << - " [" << (int)msg->partition() << "] at offset " << - msg->offset() << std::endl; - - if (state.maxMessages >= 0 && - state.consumer.consumedMessages >= state.maxMessages) - return; + case RdKafka::ERR__TIMED_OUT: + /* Try reporting consumed messages */ + report_records_consumed(1); + /* Commit one every consume() timeout instead of on every message. + * Also commit on every 1000 messages, whichever comes first. */ + do_commit(consumer, 1); + break; - Assignment *a = - &state.consumer.assignments[Assignment::name(msg->topic_name(), - msg->partition())]; - a->setup(msg->topic_name(), msg->partition()); + case RdKafka::ERR_NO_ERROR: { + /* Real message */ + if (verbosity > 2) + std::cerr << now() << ": Read msg from " << msg->topic_name() << " [" + << (int)msg->partition() << "] at offset " << msg->offset() + << std::endl; - a->consumedMessages++; - if (a->minOffset == -1) - a->minOffset = msg->offset(); - if (a->maxOffset < msg->offset()) - a->maxOffset = msg->offset(); + if (state.maxMessages >= 0 && + state.consumer.consumedMessages >= state.maxMessages) + return; - if (msg->key()) { - if (verbosity >= 3) - std::cerr << now() << ": Key: " << *msg->key() << std::endl; - } - if (verbosity >= 3) - fprintf(stderr, "%.*s\n", - static_cast(msg->len()), - static_cast(msg->payload())); + Assignment *a = &state.consumer.assignments[Assignment::name( + msg->topic_name(), msg->partition())]; + a->setup(msg->topic_name(), msg->partition()); - state.consumer.consumedMessages++; + a->consumedMessages++; + if (a->minOffset == -1) + a->minOffset = msg->offset(); + if (a->maxOffset < msg->offset()) + a->maxOffset = msg->offset(); - report_records_consumed(0); + if (msg->key()) { + if (verbosity >= 3) + std::cerr << now() << ": Key: " << *msg->key() << std::endl; + } - do_commit(consumer, 0); - } - break; + if (verbosity >= 3) + fprintf(stderr, "%.*s\n", static_cast(msg->len()), + static_cast(msg->payload())); - case RdKafka::ERR__PARTITION_EOF: - /* Last message */ - if (exit_eof) { - std::cerr << now() << ": Terminate: exit on EOF" << std::endl; - run = 0; - } - break; + state.consumer.consumedMessages++; - case RdKafka::ERR__UNKNOWN_TOPIC: - case RdKafka::ERR__UNKNOWN_PARTITION: - std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl; - run = 0; - break; + report_records_consumed(0); - case RdKafka::ERR_GROUP_COORDINATOR_NOT_AVAILABLE: - std::cerr << now() << ": Warning: " << msg->errstr() << std::endl; - break; + do_commit(consumer, 0); + } break; - default: - /* Errors */ - std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl; + case RdKafka::ERR__PARTITION_EOF: + /* Last message */ + if (exit_eof) { + std::cerr << now() << ": Terminate: exit on EOF" << std::endl; run = 0; + } + break; + + case RdKafka::ERR__UNKNOWN_TOPIC: + case RdKafka::ERR__UNKNOWN_PARTITION: + std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl; + run = 0; + break; + + case RdKafka::ERR_GROUP_COORDINATOR_NOT_AVAILABLE: + std::cerr << now() << ": Warning: " << msg->errstr() << std::endl; + break; + + default: + /* Errors */ + std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl; + run = 0; } } - class ExampleConsumeCb : public RdKafka::ConsumeCb { public: - void consume_cb (RdKafka::Message &msg, void *opaque) { + void consume_cb(RdKafka::Message &msg, void *opaque) { msg_consume(consumer_, &msg, opaque); } RdKafka::KafkaConsumer *consumer_; @@ -529,22 +522,22 @@ class ExampleConsumeCb : public RdKafka::ConsumeCb { class ExampleRebalanceCb : public RdKafka::RebalanceCb { private: - static std::string part_list_json (const std::vector &partitions) { + static std::string part_list_json( + const std::vector &partitions) { std::ostringstream out; - for (unsigned int i = 0 ; i < partitions.size() ; i++) - out << (i==0?"":", ") << "{ " << - " \"topic\": \"" << partitions[i]->topic() << "\", " << - " \"partition\": " << partitions[i]->partition() << - " }"; + for (unsigned int i = 0; i < partitions.size(); i++) + out << (i == 0 ? "" : ", ") << "{ " + << " \"topic\": \"" << partitions[i]->topic() << "\", " + << " \"partition\": " << partitions[i]->partition() << " }"; return out.str(); } - public: - void rebalance_cb (RdKafka::KafkaConsumer *consumer, - RdKafka::ErrorCode err, - std::vector &partitions) { - std::cerr << now() << ": rebalance_cb " << RdKafka::err2str(err) << - " for " << partitions.size() << " partitions" << std::endl; + public: + void rebalance_cb(RdKafka::KafkaConsumer *consumer, + RdKafka::ErrorCode err, + std::vector &partitions) { + std::cerr << now() << ": rebalance_cb " << RdKafka::err2str(err) << " for " + << partitions.size() << " partitions" << std::endl; /* Send message report prior to rebalancing event to make sure they * are accounted for on the "right side" of the rebalance. */ report_records_consumed(1); @@ -556,12 +549,13 @@ class ExampleRebalanceCb : public RdKafka::RebalanceCb { consumer->unassign(); } - std::cout << - "{ " << - "\"name\": \"partitions_" << (err == RdKafka::ERR__ASSIGN_PARTITIONS ? - "assigned" : "revoked") << "\", " << - "\"partitions\": [ " << part_list_json(partitions) << "] }" << std::endl; - + std::cout << "{ " + << "\"name\": \"partitions_" + << (err == RdKafka::ERR__ASSIGN_PARTITIONS ? "assigned" + : "revoked") + << "\", " + << "\"partitions\": [ " << part_list_json(partitions) << "] }" + << std::endl; } }; @@ -570,11 +564,12 @@ class ExampleRebalanceCb : public RdKafka::RebalanceCb { /** * @brief Read (Java client) configuration file */ -static void read_conf_file (RdKafka::Conf *conf, const std::string &conf_file) { +static void read_conf_file(RdKafka::Conf *conf, const std::string &conf_file) { std::ifstream inf(conf_file.c_str()); if (!inf) { - std::cerr << now() << ": " << conf_file << ": could not open file" << std::endl; + std::cerr << now() << ": " << conf_file << ": could not open file" + << std::endl; exit(1); } @@ -593,18 +588,23 @@ static void read_conf_file (RdKafka::Conf *conf, const std::string &conf_file) { // Match on key=value.. size_t d = line.find("="); if (d == 0 || d == std::string::npos) { - std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << line << ": ignoring invalid line (expect key=value): " << ::std::endl; + std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << line + << ": ignoring invalid line (expect key=value): " + << ::std::endl; continue; } std::string key = line.substr(0, d); - std::string val = line.substr(d+1); + std::string val = line.substr(d + 1); std::string errstr; if (conf->set(key, val, errstr)) { - std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key << "=" << val << ": " << errstr << ": ignoring error" << std::endl; + std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key + << "=" << val << ": " << errstr << ": ignoring error" + << std::endl; } else { - std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key << "=" << val << ": applied to configuration" << std::endl; + std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key + << "=" << val << ": applied to configuration" << std::endl; } } @@ -613,19 +613,18 @@ static void read_conf_file (RdKafka::Conf *conf, const std::string &conf_file) { - -int main (int argc, char **argv) { +int main(int argc, char **argv) { std::string brokers = "localhost"; std::string errstr; std::vector topics; - std::string mode = "P"; - int throughput = 0; + std::string mode = "P"; + int throughput = 0; int32_t partition = RdKafka::Topic::PARTITION_UA; MyHashPartitionerCb hash_partitioner; int64_t create_time = -1; - std::cerr << now() << ": librdkafka version " << RdKafka::version_str() << - " (" << RdKafka::version() << ")" << std::endl; + std::cerr << now() << ": librdkafka version " << RdKafka::version_str() + << " (" << RdKafka::version() << ")" << std::endl; /* * Create configuration objects @@ -646,7 +645,7 @@ int main (int argc, char **argv) { { char hostname[128]; - gethostname(hostname, sizeof(hostname)-1); + gethostname(hostname, sizeof(hostname) - 1); conf->set("client.id", std::string("rdkafka@") + hostname, errstr); } @@ -664,15 +663,15 @@ int main (int argc, char **argv) { conf->set("enable.partition.eof", "true", errstr); - for (int i = 1 ; i < argc ; i++) { + for (int i = 1; i < argc; i++) { const char *name = argv[i]; - const char *val = i+1 < argc ? argv[i+1] : NULL; + const char *val = i + 1 < argc ? argv[i + 1] : NULL; if (val && !strncmp(val, "-", 1)) val = NULL; - std::cout << now() << ": argument: " << name << " " << - (val?val:"") << std::endl; + std::cout << now() << ": argument: " << name << " " << (val ? val : "") + << std::endl; if (val) { if (!strcmp(name, "--topic")) @@ -712,22 +711,22 @@ int main (int argc, char **argv) { std::transform(s.begin(), s.end(), s.begin(), tolower); - std::cerr << now() << ": converted " << name << " " - << val << " to " << s << std::endl; + std::cerr << now() << ": converted " << name << " " << val << " to " + << s << std::endl; - if (conf->set("partition.assignment.strategy", s.c_str(), errstr)) { + if (conf->set("partition.assignment.strategy", s.c_str(), errstr)) { std::cerr << now() << ": " << errstr << std::endl; exit(1); } } else if (!strcmp(name, "--value-prefix")) { value_prefix = std::string(val) + "."; } else if (!strcmp(name, "--acks")) { - if (conf->set("acks", val, errstr)) { - std::cerr << now() << ": " << errstr << std::endl; - exit(1); - } + if (conf->set("acks", val, errstr)) { + std::cerr << now() << ": " << errstr << std::endl; + exit(1); + } } else if (!strcmp(name, "--message-create-time")) { - create_time = (int64_t)atoi(val); + create_time = (int64_t)atoi(val); } else if (!strcmp(name, "--debug")) { conf->set("debug", val, errstr); } else if (!strcmp(name, "-X")) { @@ -764,7 +763,8 @@ int main (int argc, char **argv) { else if (!strcmp(name, "-q")) verbosity--; else { - std::cerr << now() << ": Unknown option or missing argument to " << name << std::endl; + std::cerr << now() << ": Unknown option or missing argument to " << name + << std::endl; exit(1); } } @@ -786,7 +786,7 @@ int main (int argc, char **argv) { signal(SIGINT, sigterm); signal(SIGTERM, sigterm); - signal(SIGALRM, sigwatchdog); + signal(SIGALRM, sigwatchdog); if (mode == "P") { @@ -804,28 +804,30 @@ int main (int argc, char **argv) { */ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); if (!producer) { - std::cerr << now() << ": Failed to create producer: " << errstr << std::endl; + std::cerr << now() << ": Failed to create producer: " << errstr + << std::endl; exit(1); } - std::cerr << now() << ": % Created producer " << producer->name() << std::endl; + std::cerr << now() << ": % Created producer " << producer->name() + << std::endl; /* * Create topic handle. */ - RdKafka::Topic *topic = RdKafka::Topic::create(producer, topics[0], - NULL, errstr); + RdKafka::Topic *topic = + RdKafka::Topic::create(producer, topics[0], NULL, errstr); if (!topic) { std::cerr << now() << ": Failed to create topic: " << errstr << std::endl; exit(1); } - static const int delay_us = throughput ? 1000000/throughput : 10; + static const int delay_us = throughput ? 1000000 / throughput : 10; if (state.maxMessages == -1) state.maxMessages = 1000000; /* Avoid infinite produce */ - for (int i = 0 ; run && i < state.maxMessages ; i++) { + for (int i = 0; run && i < state.maxMessages; i++) { /* * Produce message */ @@ -833,27 +835,26 @@ int main (int argc, char **argv) { msg << value_prefix << i; while (true) { RdKafka::ErrorCode resp; - if (create_time == -1) { - resp = producer->produce(topic, partition, - RdKafka::Producer::RK_MSG_COPY /* Copy payload */, - const_cast(msg.str().c_str()), - msg.str().size(), NULL, NULL); - } else { - resp = producer->produce(topics[0], partition, - RdKafka::Producer::RK_MSG_COPY /* Copy payload */, - const_cast(msg.str().c_str()), - msg.str().size(), - NULL, 0, - create_time, - NULL); - } + if (create_time == -1) { + resp = producer->produce( + topic, partition, + RdKafka::Producer::RK_MSG_COPY /* Copy payload */, + const_cast(msg.str().c_str()), msg.str().size(), NULL, + NULL); + } else { + resp = producer->produce( + topics[0], partition, + RdKafka::Producer::RK_MSG_COPY /* Copy payload */, + const_cast(msg.str().c_str()), msg.str().size(), NULL, 0, + create_time, NULL); + } if (resp == RdKafka::ERR__QUEUE_FULL) { producer->poll(100); continue; } else if (resp != RdKafka::ERR_NO_ERROR) { - errorString("producer_send_error", - RdKafka::err2str(resp), topic->name(), NULL, msg.str()); + errorString("producer_send_error", RdKafka::err2str(resp), + topic->name(), NULL, msg.str()); state.producer.numErr++; } else { state.producer.numSent++; @@ -868,15 +869,16 @@ int main (int argc, char **argv) { run = 1; while (run && producer->outq_len() > 0) { - std::cerr << now() << ": Waiting for " << producer->outq_len() << std::endl; + std::cerr << now() << ": Waiting for " << producer->outq_len() + << std::endl; producer->poll(1000); watchdog_kick(); } - std::cerr << now() << ": " << state.producer.numAcked << "/" << - state.producer.numSent << "/" << state.maxMessages << - " msgs acked/sent/max, " << state.producer.numErr << - " errored" << std::endl; + std::cerr << now() << ": " << state.producer.numAcked << "/" + << state.producer.numSent << "/" << state.maxMessages + << " msgs acked/sent/max, " << state.producer.numErr << " errored" + << std::endl; delete topic; delete producer; @@ -900,21 +902,21 @@ int main (int argc, char **argv) { */ consumer = RdKafka::KafkaConsumer::create(conf, errstr); if (!consumer) { - std::cerr << now() << ": Failed to create consumer: " << - errstr << std::endl; + std::cerr << now() << ": Failed to create consumer: " << errstr + << std::endl; exit(1); } - std::cerr << now() << ": % Created consumer " << consumer->name() << - std::endl; + std::cerr << now() << ": % Created consumer " << consumer->name() + << std::endl; /* * Subscribe to topic(s) */ RdKafka::ErrorCode resp = consumer->subscribe(topics); if (resp != RdKafka::ERR_NO_ERROR) { - std::cerr << now() << ": Failed to subscribe to " << topics.size() << " topics: " - << RdKafka::err2str(resp) << std::endl; + std::cerr << now() << ": Failed to subscribe to " << topics.size() + << " topics: " << RdKafka::err2str(resp) << std::endl; exit(1); } diff --git a/examples/openssl_engine_example.cpp b/examples/openssl_engine_example.cpp index 37db36c2ad..401857e6b2 100644 --- a/examples/openssl_engine_example.cpp +++ b/examples/openssl_engine_example.cpp @@ -52,116 +52,108 @@ */ #include "rdkafkacpp.h" -static void metadata_print (const RdKafka::Metadata *metadata) { - std::cout << "Number of topics: " << metadata->topics()->size() - << std::endl; - - /* Iterate topics */ - RdKafka::Metadata::TopicMetadataIterator it; - for (it = metadata->topics()->begin(); - it != metadata->topics()->end(); - ++it) - std::cout << " " << (*it)->topic() << " has " - << (*it)->partitions()->size() << " partitions." << std::endl; +static void metadata_print(const RdKafka::Metadata *metadata) { + std::cout << "Number of topics: " << metadata->topics()->size() << std::endl; + + /* Iterate topics */ + RdKafka::Metadata::TopicMetadataIterator it; + for (it = metadata->topics()->begin(); it != metadata->topics()->end(); ++it) + std::cout << " " << (*it)->topic() << " has " + << (*it)->partitions()->size() << " partitions." << std::endl; } class PrintingSSLVerifyCb : public RdKafka::SslCertificateVerifyCb { - /* This SSL cert verification callback simply prints the incoming - * parameters. It provides no validation, everything is ok. */ -public: - bool ssl_cert_verify_cb (const std::string &broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, - size_t size, - std::string &errstr) { - std::cout << "ssl_cert_verify_cb :" << - ": broker_name=" << broker_name << - ", broker_id=" << broker_id << - ", x509_error=" << *x509_error << - ", depth=" << depth << - ", buf size=" << size << std::endl; - - return true; - } + /* This SSL cert verification callback simply prints the incoming + * parameters. It provides no validation, everything is ok. */ + public: + bool ssl_cert_verify_cb(const std::string &broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + std::string &errstr) { + std::cout << "ssl_cert_verify_cb :" + << ": broker_name=" << broker_name << ", broker_id=" << broker_id + << ", x509_error=" << *x509_error << ", depth=" << depth + << ", buf size=" << size << std::endl; + + return true; + } }; -int main (int argc, char **argv) { - std::string brokers; - std::string errstr; - std::string engine_path; - std::string ca_location; +int main(int argc, char **argv) { + std::string brokers; + std::string errstr; + std::string engine_path; + std::string ca_location; + + /* + * Create configuration objects + */ + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + std::string engine_id; + std::string engine_callback_data; + int opt; + + if (conf->set("security.protocol", "ssl", errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + while ((opt = getopt(argc, argv, "b:p:c:t:d:i:e:X:")) != -1) { + switch (opt) { + case 'b': + brokers = optarg; + break; + case 'p': + engine_path = optarg; + break; + case 'c': + ca_location = optarg; + break; + case 'i': + engine_id = optarg; + break; + case 'e': + engine_callback_data = optarg; + break; + case 'd': + if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + break; + case 'X': { + char *name, *val; + + name = optarg; + if (!(val = strchr(name, '='))) { + std::cerr << "%% Expected -X property=value, not " << name << std::endl; + exit(1); + } - /* - * Create configuration objects - */ - RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - std::string engine_id; - std::string engine_callback_data; - int opt; + *val = '\0'; + val++; - if (conf->set("security.protocol", "ssl", errstr) != - RdKafka::Conf::CONF_OK) { + if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); - } + } + } break; - while ((opt = getopt(argc, argv, "b:p:c:t:d:i:e:X:")) != -1) { - switch (opt) { - case 'b': - brokers = optarg; - break; - case 'p': - engine_path = optarg; - break; - case 'c': - ca_location = optarg; - break; - case 'i': - engine_id = optarg; - break; - case 'e': - engine_callback_data = optarg; - break; - case 'd': - if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - break; - case 'X': { - char *name, *val; - - name = optarg; - if (!(val = strchr(name, '='))) { - std::cerr << "%% Expected -X property=value, not " << - name << std::endl; - exit(1); - } - - *val = '\0'; - val++; - - if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - } - break; - - default: - goto usage; - } + default: + goto usage; } + } - if (brokers.empty() || engine_path.empty() || optind != argc) { - usage: - std::string features; - conf->get("builtin.features", features); - fprintf(stderr, + if (brokers.empty() || engine_path.empty() || optind != argc) { + usage: + std::string features; + conf->get("builtin.features", features); + fprintf(stderr, "Usage: %s [options] -b -p \n" "\n" "OpenSSL engine integration example. This example fetches\n" @@ -180,83 +172,78 @@ int main (int argc, char **argv) { " -X Set arbitrary librdkafka configuration" " property\n" "\n", - argv[0], - RdKafka::version_str().c_str(), RdKafka::version(), - features.c_str(), - RdKafka::get_debug_contexts().c_str()); - exit(1); - } - - if (conf->set("bootstrap.servers", brokers, errstr) != - RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - - if (conf->set("ssl.engine.location", engine_path, errstr) != - RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - - if (ca_location.length() > 0 && - conf->set("ssl.ca.location", ca_location, errstr) != - RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - - if (engine_id.length() > 0 && - conf->set("ssl.engine.id", engine_id, errstr) != - RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - - /* engine_callback_data needs to be persistent - * and outlive the lifetime of the Kafka client handle. */ - if (engine_callback_data.length() > 0 && - conf->set_engine_callback_data((void *) engine_callback_data.c_str(), - errstr) != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - - /* We use the Certificiate verification callback to print the - * certificate name being used. */ - PrintingSSLVerifyCb ssl_verify_cb; - - if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) != - RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - - /* - * Create producer using accumulated global configuration. - */ - RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); - if (!producer) { - std::cerr << "Failed to create producer: " << errstr << std::endl; - exit(1); - } - - std::cout << "% Created producer " << producer->name() << std::endl; - - class RdKafka::Metadata *metadata; - - /* Fetch metadata */ - RdKafka::ErrorCode err = producer->metadata(true, NULL, - &metadata, 5000); - if (err != RdKafka::ERR_NO_ERROR) - std::cerr << "%% Failed to acquire metadata: " << - RdKafka::err2str(err) << std::endl; - - metadata_print(metadata); - - delete metadata; - delete producer; - delete conf; - - return 0; + argv[0], RdKafka::version_str().c_str(), RdKafka::version(), + features.c_str(), RdKafka::get_debug_contexts().c_str()); + exit(1); + } + + if (conf->set("bootstrap.servers", brokers, errstr) != + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + if (conf->set("ssl.engine.location", engine_path, errstr) != + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + if (ca_location.length() > 0 && conf->set("ssl.ca.location", ca_location, + errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + if (engine_id.length() > 0 && + conf->set("ssl.engine.id", engine_id, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + /* engine_callback_data needs to be persistent + * and outlive the lifetime of the Kafka client handle. */ + if (engine_callback_data.length() > 0 && + conf->set_engine_callback_data((void *)engine_callback_data.c_str(), + errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + /* We use the Certificiate verification callback to print the + * certificate name being used. */ + PrintingSSLVerifyCb ssl_verify_cb; + + if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) != + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + /* + * Create producer using accumulated global configuration. + */ + RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); + if (!producer) { + std::cerr << "Failed to create producer: " << errstr << std::endl; + exit(1); + } + + std::cout << "% Created producer " << producer->name() << std::endl; + + class RdKafka::Metadata *metadata; + + /* Fetch metadata */ + RdKafka::ErrorCode err = producer->metadata(true, NULL, &metadata, 5000); + if (err != RdKafka::ERR_NO_ERROR) + std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err) + << std::endl; + + metadata_print(metadata); + + delete metadata; + delete producer; + delete conf; + + return 0; } diff --git a/examples/producer.c b/examples/producer.c index fc9021718c..6fa25f3c16 100644 --- a/examples/producer.c +++ b/examples/producer.c @@ -47,7 +47,7 @@ static volatile sig_atomic_t run = 1; /** * @brief Signal termination of program */ -static void stop (int sig) { +static void stop(int sig) { run = 0; fclose(stdin); /* abort fgets() */ } @@ -64,15 +64,15 @@ static void stop (int sig) { * The callback is triggered from rd_kafka_poll() and executes on * the application's thread. */ -static void dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { if (rkmessage->err) fprintf(stderr, "%% Message delivery failed: %s\n", rd_kafka_err2str(rkmessage->err)); else fprintf(stderr, "%% Message delivered (%zd bytes, " - "partition %"PRId32")\n", + "partition %" PRId32 ")\n", rkmessage->len, rkmessage->partition); /* The rkmessage is destroyed automatically by librdkafka */ @@ -80,13 +80,13 @@ static void dr_msg_cb (rd_kafka_t *rk, -int main (int argc, char **argv) { - rd_kafka_t *rk; /* Producer instance handle */ - rd_kafka_conf_t *conf; /* Temporary configuration object */ - char errstr[512]; /* librdkafka API error reporting buffer */ - char buf[512]; /* Message value temporary buffer */ - const char *brokers; /* Argument: broker list */ - const char *topic; /* Argument: topic to produce to */ +int main(int argc, char **argv) { + rd_kafka_t *rk; /* Producer instance handle */ + rd_kafka_conf_t *conf; /* Temporary configuration object */ + char errstr[512]; /* librdkafka API error reporting buffer */ + char buf[512]; /* Message value temporary buffer */ + const char *brokers; /* Argument: broker list */ + const char *topic; /* Argument: topic to produce to */ /* * Argument validation @@ -109,8 +109,8 @@ int main (int argc, char **argv) { * host or host:port (default port 9092). * librdkafka will use the bootstrap brokers to acquire the full * set of brokers from the cluster. */ - if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%s\n", errstr); return 1; } @@ -132,8 +132,8 @@ int main (int argc, char **argv) { */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) { - fprintf(stderr, - "%% Failed to create new producer: %s\n", errstr); + fprintf(stderr, "%% Failed to create new producer: %s\n", + errstr); return 1; } @@ -149,12 +149,12 @@ int main (int argc, char **argv) { size_t len = strlen(buf); rd_kafka_resp_err_t err; - if (buf[len-1] == '\n') /* Remove newline */ + if (buf[len - 1] == '\n') /* Remove newline */ buf[--len] = '\0'; if (len == 0) { /* Empty line: only serve delivery reports */ - rd_kafka_poll(rk, 0/*non-blocking */); + rd_kafka_poll(rk, 0 /*non-blocking */); continue; } @@ -170,28 +170,28 @@ int main (int argc, char **argv) { */ retry: err = rd_kafka_producev( - /* Producer handle */ - rk, - /* Topic name */ - RD_KAFKA_V_TOPIC(topic), - /* Make a copy of the payload. */ - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - /* Message value and length */ - RD_KAFKA_V_VALUE(buf, len), - /* Per-Message opaque, provided in - * delivery report callback as - * msg_opaque. */ - RD_KAFKA_V_OPAQUE(NULL), - /* End sentinel */ - RD_KAFKA_V_END); + /* Producer handle */ + rk, + /* Topic name */ + RD_KAFKA_V_TOPIC(topic), + /* Make a copy of the payload. */ + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + /* Message value and length */ + RD_KAFKA_V_VALUE(buf, len), + /* Per-Message opaque, provided in + * delivery report callback as + * msg_opaque. */ + RD_KAFKA_V_OPAQUE(NULL), + /* End sentinel */ + RD_KAFKA_V_END); if (err) { /* * Failed to *enqueue* message for producing. */ fprintf(stderr, - "%% Failed to produce to topic %s: %s\n", - topic, rd_kafka_err2str(err)); + "%% Failed to produce to topic %s: %s\n", topic, + rd_kafka_err2str(err)); if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) { /* If the internal queue is full, wait for @@ -204,11 +204,13 @@ int main (int argc, char **argv) { * The internal queue is limited by the * configuration property * queue.buffering.max.messages */ - rd_kafka_poll(rk, 1000/*block for max 1000ms*/); + rd_kafka_poll(rk, + 1000 /*block for max 1000ms*/); goto retry; } } else { - fprintf(stderr, "%% Enqueued message (%zd bytes) " + fprintf(stderr, + "%% Enqueued message (%zd bytes) " "for topic %s\n", len, topic); } @@ -225,7 +227,7 @@ int main (int argc, char **argv) { * to make sure previously produced messages have their * delivery report callback served (and any other callbacks * you register). */ - rd_kafka_poll(rk, 0/*non-blocking*/); + rd_kafka_poll(rk, 0 /*non-blocking*/); } @@ -233,7 +235,7 @@ int main (int argc, char **argv) { * rd_kafka_flush() is an abstraction over rd_kafka_poll() which * waits for all messages to be delivered. */ fprintf(stderr, "%% Flushing final messages..\n"); - rd_kafka_flush(rk, 10*1000 /* wait for max 10 seconds */); + rd_kafka_flush(rk, 10 * 1000 /* wait for max 10 seconds */); /* If the output queue is still not empty there is an issue * with producing messages to the clusters. */ diff --git a/examples/producer.cpp b/examples/producer.cpp index 71c1e02cf6..ec3d387e61 100755 --- a/examples/producer.cpp +++ b/examples/producer.cpp @@ -52,34 +52,34 @@ static volatile sig_atomic_t run = 1; -static void sigterm (int sig) { +static void sigterm(int sig) { run = 0; } class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb { -public: - void dr_cb (RdKafka::Message &message) { + public: + void dr_cb(RdKafka::Message &message) { /* If message.err() is non-zero the message delivery failed permanently * for the message. */ if (message.err()) - std::cerr << "% Message delivery failed: " << message.errstr() << std::endl; + std::cerr << "% Message delivery failed: " << message.errstr() + << std::endl; else - std::cerr << "% Message delivered to topic " << message.topic_name() << - " [" << message.partition() << "] at offset " << - message.offset() << std::endl; + std::cerr << "% Message delivered to topic " << message.topic_name() + << " [" << message.partition() << "] at offset " + << message.offset() << std::endl; } }; -int main (int argc, char **argv) { - +int main(int argc, char **argv) { if (argc != 3) { std::cerr << "Usage: " << argv[0] << " \n"; exit(1); } std::string brokers = argv[1]; - std::string topic = argv[2]; + std::string topic = argv[2]; /* * Create configuration object @@ -133,8 +133,8 @@ int main (int argc, char **argv) { /* * Read messages from stdin and produce to broker. */ - std::cout << "% Type message value and hit enter " << - "to produce message." << std::endl; + std::cout << "% Type message value and hit enter " + << "to produce message." << std::endl; for (std::string line; run && std::getline(std::cin, line);) { if (line.empty()) { @@ -153,32 +153,31 @@ int main (int argc, char **argv) { * has been delivered (or failed permanently after retries). */ retry: - RdKafka::ErrorCode err = - producer->produce( - /* Topic name */ - topic, - /* Any Partition: the builtin partitioner will be - * used to assign the message to a topic based - * on the message key, or random partition if - * the key is not set. */ - RdKafka::Topic::PARTITION_UA, - /* Make a copy of the value */ - RdKafka::Producer::RK_MSG_COPY /* Copy payload */, - /* Value */ - const_cast(line.c_str()), line.size(), - /* Key */ - NULL, 0, - /* Timestamp (defaults to current time) */ - 0, - /* Message headers, if any */ - NULL, - /* Per-message opaque value passed to - * delivery report */ - NULL); + RdKafka::ErrorCode err = producer->produce( + /* Topic name */ + topic, + /* Any Partition: the builtin partitioner will be + * used to assign the message to a topic based + * on the message key, or random partition if + * the key is not set. */ + RdKafka::Topic::PARTITION_UA, + /* Make a copy of the value */ + RdKafka::Producer::RK_MSG_COPY /* Copy payload */, + /* Value */ + const_cast(line.c_str()), line.size(), + /* Key */ + NULL, 0, + /* Timestamp (defaults to current time) */ + 0, + /* Message headers, if any */ + NULL, + /* Per-message opaque value passed to + * delivery report */ + NULL); if (err != RdKafka::ERR_NO_ERROR) { - std::cerr << "% Failed to produce to topic " << topic << ": " << - RdKafka::err2str(err) << std::endl; + std::cerr << "% Failed to produce to topic " << topic << ": " + << RdKafka::err2str(err) << std::endl; if (err == RdKafka::ERR__QUEUE_FULL) { /* If the internal queue is full, wait for @@ -191,13 +190,13 @@ int main (int argc, char **argv) { * The internal queue is limited by the * configuration property * queue.buffering.max.messages */ - producer->poll(1000/*block for max 1000ms*/); + producer->poll(1000 /*block for max 1000ms*/); goto retry; } } else { - std::cerr << "% Enqueued message (" << line.size() << " bytes) " << - "for topic " << topic << std::endl; + std::cerr << "% Enqueued message (" << line.size() << " bytes) " + << "for topic " << topic << std::endl; } /* A producer application should continually serve @@ -217,11 +216,11 @@ int main (int argc, char **argv) { * flush() is an abstraction over poll() which * waits for all messages to be delivered. */ std::cerr << "% Flushing final messages..." << std::endl; - producer->flush(10*1000 /* wait for max 10 seconds */); + producer->flush(10 * 1000 /* wait for max 10 seconds */); if (producer->outq_len() > 0) - std::cerr << "% " << producer->outq_len() << - " message(s) were not delivered" << std::endl; + std::cerr << "% " << producer->outq_len() + << " message(s) were not delivered" << std::endl; delete producer; diff --git a/examples/rdkafka_complex_consumer_example.c b/examples/rdkafka_complex_consumer_example.c index e402924303..1632b30305 100644 --- a/examples/rdkafka_complex_consumer_example.c +++ b/examples/rdkafka_complex_consumer_example.c @@ -3,24 +3,24 @@ * * Copyright (c) 2015, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -44,63 +44,61 @@ /* Typical include path would be , but this program * is builtin from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static volatile sig_atomic_t run = 1; static rd_kafka_t *rk; static int exit_eof = 0; -static int wait_eof = 0; /* number of partitions awaiting EOF */ -static int quiet = 0; -static enum { - OUTPUT_HEXDUMP, - OUTPUT_RAW, +static int wait_eof = 0; /* number of partitions awaiting EOF */ +static int quiet = 0; +static enum { + OUTPUT_HEXDUMP, + OUTPUT_RAW, } output = OUTPUT_HEXDUMP; -static void stop (int sig) { +static void stop(int sig) { if (!run) exit(1); - run = 0; - fclose(stdin); /* abort fgets() */ + run = 0; + fclose(stdin); /* abort fgets() */ } -static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) { - const char *p = (const char *)ptr; - unsigned int of = 0; +static void hexdump(FILE *fp, const char *name, const void *ptr, size_t len) { + const char *p = (const char *)ptr; + unsigned int of = 0; - if (name) - fprintf(fp, "%s hexdump (%zd bytes):\n", name, len); + if (name) + fprintf(fp, "%s hexdump (%zd bytes):\n", name, len); - for (of = 0 ; of < len ; of += 16) { - char hexen[16*3+1]; - char charen[16+1]; - int hof = 0; + for (of = 0; of < len; of += 16) { + char hexen[16 * 3 + 1]; + char charen[16 + 1]; + int hof = 0; - int cof = 0; - int i; + int cof = 0; + int i; - for (i = of ; i < (int)of + 16 && i < (int)len ; i++) { - hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff); - cof += sprintf(charen+cof, "%c", - isprint((int)p[i]) ? p[i] : '.'); - } - fprintf(fp, "%08x: %-48s %-16s\n", - of, hexen, charen); - } + for (i = of; i < (int)of + 16 && i < (int)len; i++) { + hof += sprintf(hexen + hof, "%02x ", p[i] & 0xff); + cof += sprintf(charen + cof, "%c", + isprint((int)p[i]) ? p[i] : '.'); + } + fprintf(fp, "%08x: %-48s %-16s\n", of, hexen, charen); + } } /** * Kafka logger callback (optional) */ -static void logger (const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { - struct timeval tv; - gettimeofday(&tv, NULL); - fprintf(stdout, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", - (int)tv.tv_sec, (int)(tv.tv_usec / 1000), - level, fac, rd_kafka_name(rk), buf); +static void +logger(const rd_kafka_t *rk, int level, const char *fac, const char *buf) { + struct timeval tv; + gettimeofday(&tv, NULL); + fprintf(stdout, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", (int)tv.tv_sec, + (int)(tv.tv_usec / 1000), level, fac, rd_kafka_name(rk), buf); } @@ -111,32 +109,34 @@ static void logger (const rd_kafka_t *rk, int level, * librdkafka to the application. The application needs to check * the `rkmessage->err` field for this purpose. */ -static void msg_consume (rd_kafka_message_t *rkmessage) { - if (rkmessage->err) { - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - fprintf(stderr, - "%% Consumer reached end of %s [%"PRId32"] " - "message queue at offset %"PRId64"\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, rkmessage->offset); - - if (exit_eof && --wait_eof == 0) { +static void msg_consume(rd_kafka_message_t *rkmessage) { + if (rkmessage->err) { + if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + fprintf(stderr, + "%% Consumer reached end of %s [%" PRId32 + "] " + "message queue at offset %" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset); + + if (exit_eof && --wait_eof == 0) { fprintf(stderr, "%% All partition(s) reached EOF: " "exiting\n"); - run = 0; + run = 0; } - return; - } + return; + } if (rkmessage->rkt) - fprintf(stderr, "%% Consume error for " - "topic \"%s\" [%"PRId32"] " - "offset %"PRId64": %s\n", + fprintf(stderr, + "%% Consume error for " + "topic \"%s\" [%" PRId32 + "] " + "offset %" PRId64 ": %s\n", rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset, + rkmessage->partition, rkmessage->offset, rd_kafka_message_errstr(rkmessage)); else fprintf(stderr, "%% Consumer error: %s: %s\n", @@ -146,59 +146,58 @@ static void msg_consume (rd_kafka_message_t *rkmessage) { if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) run = 0; - return; - } + return; + } - if (!quiet) - fprintf(stdout, "%% Message (topic %s [%"PRId32"], " - "offset %"PRId64", %zd bytes):\n", + if (!quiet) + fprintf(stdout, + "%% Message (topic %s [%" PRId32 + "], " + "offset %" PRId64 ", %zd bytes):\n", rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset, rkmessage->len); - - if (rkmessage->key_len) { - if (output == OUTPUT_HEXDUMP) - hexdump(stdout, "Message Key", - rkmessage->key, rkmessage->key_len); - else - printf("Key: %.*s\n", - (int)rkmessage->key_len, (char *)rkmessage->key); - } - - if (output == OUTPUT_HEXDUMP) - hexdump(stdout, "Message Payload", - rkmessage->payload, rkmessage->len); - else - printf("%.*s\n", - (int)rkmessage->len, (char *)rkmessage->payload); + rkmessage->partition, rkmessage->offset, + rkmessage->len); + + if (rkmessage->key_len) { + if (output == OUTPUT_HEXDUMP) + hexdump(stdout, "Message Key", rkmessage->key, + rkmessage->key_len); + else + printf("Key: %.*s\n", (int)rkmessage->key_len, + (char *)rkmessage->key); + } + + if (output == OUTPUT_HEXDUMP) + hexdump(stdout, "Message Payload", rkmessage->payload, + rkmessage->len); + else + printf("%.*s\n", (int)rkmessage->len, + (char *)rkmessage->payload); } -static void print_partition_list (FILE *fp, - const rd_kafka_topic_partition_list_t - *partitions) { +static void +print_partition_list(FILE *fp, + const rd_kafka_topic_partition_list_t *partitions) { int i; - for (i = 0 ; i < partitions->cnt ; i++) { - fprintf(fp, "%s %s [%"PRId32"] offset %"PRId64, - i > 0 ? ",":"", - partitions->elems[i].topic, + for (i = 0; i < partitions->cnt; i++) { + fprintf(fp, "%s %s [%" PRId32 "] offset %" PRId64, + i > 0 ? "," : "", partitions->elems[i].topic, partitions->elems[i].partition, partitions->elems[i].offset); } fprintf(fp, "\n"); - } -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque) { - rd_kafka_error_t *error = NULL; +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque) { + rd_kafka_error_t *error = NULL; rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR; fprintf(stderr, "%% Consumer group rebalanced: "); - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: fprintf(stderr, "assigned (%s):\n", rd_kafka_rebalance_protocol(rk)); @@ -220,14 +219,13 @@ static void rebalance_cb (rd_kafka_t *rk, error = rd_kafka_incremental_unassign(rk, partitions); wait_eof -= partitions->cnt; } else { - ret_err = rd_kafka_assign(rk, NULL); + ret_err = rd_kafka_assign(rk, NULL); wait_eof = 0; } break; default: - fprintf(stderr, "failed: %s\n", - rd_kafka_err2str(err)); + fprintf(stderr, "failed: %s\n", rd_kafka_err2str(err)); rd_kafka_assign(rk, NULL); break; } @@ -243,7 +241,7 @@ static void rebalance_cb (rd_kafka_t *rk, } -static int describe_groups (rd_kafka_t *rk, const char *group) { +static int describe_groups(rd_kafka_t *rk, const char *group) { rd_kafka_resp_err_t err; const struct rd_kafka_group_list *grplist; int i; @@ -256,20 +254,21 @@ static int describe_groups (rd_kafka_t *rk, const char *group) { return -1; } - for (i = 0 ; i < grplist->group_cnt ; i++) { + for (i = 0; i < grplist->group_cnt; i++) { const struct rd_kafka_group_info *gi = &grplist->groups[i]; int j; printf("Group \"%s\" in state %s on broker %d (%s:%d)\n", - gi->group, gi->state, - gi->broker.id, gi->broker.host, gi->broker.port); + gi->group, gi->state, gi->broker.id, gi->broker.host, + gi->broker.port); if (gi->err) printf(" Error: %s\n", rd_kafka_err2str(gi->err)); - printf(" Protocol type \"%s\", protocol \"%s\", " - "with %d member(s):\n", - gi->protocol_type, gi->protocol, gi->member_cnt); + printf( + " Protocol type \"%s\", protocol \"%s\", " + "with %d member(s):\n", + gi->protocol_type, gi->protocol, gi->member_cnt); - for (j = 0 ; j < gi->member_cnt ; j++) { + for (j = 0; j < gi->member_cnt; j++) { const struct rd_kafka_group_member_info *mi; mi = &gi->members[j]; @@ -293,187 +292,182 @@ static int describe_groups (rd_kafka_t *rk, const char *group) { -static void sig_usr1 (int sig) { - rd_kafka_dump(stdout, rk); +static void sig_usr1(int sig) { + rd_kafka_dump(stdout, rk); } -int main (int argc, char **argv) { - char mode = 'C'; - char *brokers = "localhost:9092"; - int opt; - rd_kafka_conf_t *conf; - char errstr[512]; - const char *debug = NULL; - int do_conf_dump = 0; - char tmp[16]; +int main(int argc, char **argv) { + char mode = 'C'; + char *brokers = "localhost:9092"; + int opt; + rd_kafka_conf_t *conf; + char errstr[512]; + const char *debug = NULL; + int do_conf_dump = 0; + char tmp[16]; rd_kafka_resp_err_t err; char *group = NULL; rd_kafka_topic_partition_list_t *topics; int is_subscription; int i; - quiet = !isatty(STDIN_FILENO); + quiet = !isatty(STDIN_FILENO); - /* Kafka configuration */ - conf = rd_kafka_conf_new(); + /* Kafka configuration */ + conf = rd_kafka_conf_new(); /* Set logger */ rd_kafka_conf_set_log_cb(conf, logger); - /* Quick termination */ - snprintf(tmp, sizeof(tmp), "%i", SIGIO); - rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); + /* Quick termination */ + snprintf(tmp, sizeof(tmp), "%i", SIGIO); + rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); - while ((opt = getopt(argc, argv, "g:b:qd:eX:ADO")) != -1) { - switch (opt) { - case 'b': - brokers = optarg; - break; + while ((opt = getopt(argc, argv, "g:b:qd:eX:ADO")) != -1) { + switch (opt) { + case 'b': + brokers = optarg; + break; case 'g': group = optarg; break; - case 'e': - exit_eof = 1; - break; - case 'd': - debug = optarg; - break; - case 'q': - quiet = 1; - break; - case 'A': - output = OUTPUT_RAW; - break; - case 'X': - { - char *name, *val; - rd_kafka_conf_res_t res; - - if (!strcmp(optarg, "list") || - !strcmp(optarg, "help")) { - rd_kafka_conf_properties_show(stdout); - exit(0); - } - - if (!strcmp(optarg, "dump")) { - do_conf_dump = 1; - continue; - } - - name = optarg; - if (!(val = strchr(name, '='))) { - fprintf(stderr, "%% Expected " - "-X property=value, not %s\n", name); - exit(1); - } - - *val = '\0'; - val++; - - res = rd_kafka_conf_set(conf, name, val, - errstr, sizeof(errstr)); - - if (res != RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - } - break; + case 'e': + exit_eof = 1; + break; + case 'd': + debug = optarg; + break; + case 'q': + quiet = 1; + break; + case 'A': + output = OUTPUT_RAW; + break; + case 'X': { + char *name, *val; + rd_kafka_conf_res_t res; + + if (!strcmp(optarg, "list") || + !strcmp(optarg, "help")) { + rd_kafka_conf_properties_show(stdout); + exit(0); + } + + if (!strcmp(optarg, "dump")) { + do_conf_dump = 1; + continue; + } + + name = optarg; + if (!(val = strchr(name, '='))) { + fprintf(stderr, + "%% Expected " + "-X property=value, not %s\n", + name); + exit(1); + } + + *val = '\0'; + val++; + + res = rd_kafka_conf_set(conf, name, val, errstr, + sizeof(errstr)); + + if (res != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + } break; case 'D': case 'O': mode = opt; break; - default: - goto usage; - } - } + default: + goto usage; + } + } - if (do_conf_dump) { - const char **arr; - size_t cnt; - int pass; + if (do_conf_dump) { + const char **arr; + size_t cnt; + int pass; - for (pass = 0 ; pass < 2 ; pass++) { - if (pass == 0) { - arr = rd_kafka_conf_dump(conf, &cnt); - printf("# Global config\n"); - } else { + for (pass = 0; pass < 2; pass++) { + if (pass == 0) { + arr = rd_kafka_conf_dump(conf, &cnt); + printf("# Global config\n"); + } else { rd_kafka_topic_conf_t *topic_conf = - rd_kafka_conf_get_default_topic_conf( - conf); + rd_kafka_conf_get_default_topic_conf(conf); if (topic_conf) { printf("# Topic config\n"); arr = rd_kafka_topic_conf_dump( - topic_conf, &cnt); + topic_conf, &cnt); } else { arr = NULL; } - } + } if (!arr) continue; - for (i = 0 ; i < (int)cnt ; i += 2) - printf("%s = %s\n", - arr[i], arr[i+1]); + for (i = 0; i < (int)cnt; i += 2) + printf("%s = %s\n", arr[i], arr[i + 1]); printf("\n"); - rd_kafka_conf_dump_free(arr, cnt); - } + rd_kafka_conf_dump_free(arr, cnt); + } - exit(0); - } + exit(0); + } - if (strchr("OC", mode) && optind == argc) { - usage: - fprintf(stderr, - "Usage: %s [options] ..\n" - "\n" - "librdkafka version %s (0x%08x)\n" - "\n" - " Options:\n" + if (strchr("OC", mode) && optind == argc) { + usage: + fprintf(stderr, + "Usage: %s [options] ..\n" + "\n" + "librdkafka version %s (0x%08x)\n" + "\n" + " Options:\n" " -g Consumer group (%s)\n" - " -b Broker address (%s)\n" - " -e Exit consumer when last message\n" - " in partition has been received.\n" + " -b Broker address (%s)\n" + " -e Exit consumer when last message\n" + " in partition has been received.\n" " -D Describe group.\n" " -O Get commmitted offset(s)\n" - " -d [facs..] Enable debugging contexts:\n" - " %s\n" - " -q Be quiet\n" - " -A Raw payload output (consumer)\n" - " -X Set arbitrary librdkafka " - "configuration property\n" - " Use '-X list' to see the full list\n" - " of supported properties.\n" - "\n" + " -d [facs..] Enable debugging contexts:\n" + " %s\n" + " -q Be quiet\n" + " -A Raw payload output (consumer)\n" + " -X Set arbitrary librdkafka " + "configuration property\n" + " Use '-X list' to see the full list\n" + " of supported properties.\n" + "\n" "For balanced consumer groups use the 'topic1 topic2..'" " format\n" "and for static assignment use " "'topic1:part1 topic1:part2 topic2:part1..'\n" - "\n", - argv[0], - rd_kafka_version_str(), rd_kafka_version(), - group, brokers, - RD_KAFKA_DEBUG_CONTEXTS); - exit(1); - } - - - signal(SIGINT, stop); - signal(SIGUSR1, sig_usr1); - - if (debug && - rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% Debug configuration failed: %s: %s\n", - errstr, debug); - exit(1); - } + "\n", + argv[0], rd_kafka_version_str(), rd_kafka_version(), + group, brokers, RD_KAFKA_DEBUG_CONTEXTS); + exit(1); + } + + + signal(SIGINT, stop); + signal(SIGUSR1, sig_usr1); + + if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% Debug configuration failed: %s: %s\n", + errstr, debug); + exit(1); + } /* * Client/Consumer group @@ -483,9 +477,8 @@ int main (int argc, char **argv) { /* Consumer groups require a group id */ if (!group) group = "rdkafka_consumer_example"; - if (rd_kafka_conf_set(conf, "group.id", group, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set(conf, "group.id", group, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } @@ -493,22 +486,21 @@ int main (int argc, char **argv) { /* Callback called on partition assignment changes */ rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); - rd_kafka_conf_set(conf, "enable.partition.eof", "true", - NULL, 0); + rd_kafka_conf_set(conf, "enable.partition.eof", "true", NULL, + 0); } /* Set bootstrap servers */ - if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create new consumer: %s\n", + if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, "%% Failed to create new consumer: %s\n", errstr); exit(1); } @@ -525,17 +517,17 @@ int main (int argc, char **argv) { /* Redirect rd_kafka_poll() to consumer_poll() */ rd_kafka_poll_set_consumer(rk); - topics = rd_kafka_topic_partition_list_new(argc - optind); + topics = rd_kafka_topic_partition_list_new(argc - optind); is_subscription = 1; - for (i = optind ; i < argc ; i++) { + for (i = optind; i < argc; i++) { /* Parse "topic[:part] */ char *topic = argv[i]; char *t; int32_t partition = -1; if ((t = strstr(topic, ":"))) { - *t = '\0'; - partition = atoi(t+1); + *t = '\0'; + partition = atoi(t + 1); is_subscription = 0; /* is assignment */ wait_eof++; } @@ -553,16 +545,14 @@ int main (int argc, char **argv) { exit(1); } - for (i = 0 ; i < topics->cnt ; i++) { + for (i = 0; i < topics->cnt; i++) { rd_kafka_topic_partition_t *p = &topics->elems[i]; - printf("Topic \"%s\" partition %"PRId32, - p->topic, p->partition); + printf("Topic \"%s\" partition %" PRId32, p->topic, + p->partition); if (p->err) - printf(" error %s", - rd_kafka_err2str(p->err)); + printf(" error %s", rd_kafka_err2str(p->err)); else { - printf(" offset %"PRId64"", - p->offset); + printf(" offset %" PRId64 "", p->offset); if (p->metadata_size) printf(" (%d bytes of metadata)", @@ -588,8 +578,7 @@ int main (int argc, char **argv) { fprintf(stderr, "%% Assigning %d partitions\n", topics->cnt); if ((err = rd_kafka_assign(rk, topics))) { - fprintf(stderr, - "%% Failed to assign partitions: %s\n", + fprintf(stderr, "%% Failed to assign partitions: %s\n", rd_kafka_err2str(err)); } } @@ -617,12 +606,12 @@ int main (int argc, char **argv) { /* Destroy handle */ rd_kafka_destroy(rk); - /* Let background threads clean up and terminate cleanly. */ - run = 5; - while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1) - printf("Waiting for librdkafka to decommission\n"); - if (run <= 0) - rd_kafka_dump(stdout, rk); + /* Let background threads clean up and terminate cleanly. */ + run = 5; + while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1) + printf("Waiting for librdkafka to decommission\n"); + if (run <= 0) + rd_kafka_dump(stdout, rk); - return 0; + return 0; } diff --git a/examples/rdkafka_complex_consumer_example.cpp b/examples/rdkafka_complex_consumer_example.cpp index 5b87234ce5..b4f158cbd9 100644 --- a/examples/rdkafka_complex_consumer_example.cpp +++ b/examples/rdkafka_complex_consumer_example.cpp @@ -3,24 +3,24 @@ * * Copyright (c) 2014, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -63,13 +63,13 @@ static volatile sig_atomic_t run = 1; -static bool exit_eof = false; -static int eof_cnt = 0; -static int partition_cnt = 0; -static int verbosity = 1; -static long msg_cnt = 0; -static int64_t msg_bytes = 0; -static void sigterm (int sig) { +static bool exit_eof = false; +static int eof_cnt = 0; +static int partition_cnt = 0; +static int verbosity = 1; +static long msg_cnt = 0; +static int64_t msg_bytes = 0; +static void sigterm(int sig) { run = 0; } @@ -77,81 +77,80 @@ static void sigterm (int sig) { /** * @brief format a string timestamp from the current time */ -static void print_time () { +static void print_time() { #ifndef _WIN32 - struct timeval tv; - char buf[64]; - gettimeofday(&tv, NULL); - strftime(buf, sizeof(buf) - 1, "%Y-%m-%d %H:%M:%S", localtime(&tv.tv_sec)); - fprintf(stderr, "%s.%03d: ", buf, (int)(tv.tv_usec / 1000)); + struct timeval tv; + char buf[64]; + gettimeofday(&tv, NULL); + strftime(buf, sizeof(buf) - 1, "%Y-%m-%d %H:%M:%S", localtime(&tv.tv_sec)); + fprintf(stderr, "%s.%03d: ", buf, (int)(tv.tv_usec / 1000)); #else - SYSTEMTIME lt = {0}; - GetLocalTime(<); - // %Y-%m-%d %H:%M:%S.xxx: - fprintf(stderr, "%04d-%02d-%02d %02d:%02d:%02d.%03d: ", - lt.wYear, lt.wMonth, lt.wDay, - lt.wHour, lt.wMinute, lt.wSecond, lt.wMilliseconds); + SYSTEMTIME lt = {0}; + GetLocalTime(<); + // %Y-%m-%d %H:%M:%S.xxx: + fprintf(stderr, "%04d-%02d-%02d %02d:%02d:%02d.%03d: ", lt.wYear, lt.wMonth, + lt.wDay, lt.wHour, lt.wMinute, lt.wSecond, lt.wMilliseconds); #endif } class ExampleEventCb : public RdKafka::EventCb { public: - void event_cb (RdKafka::Event &event) { - + void event_cb(RdKafka::Event &event) { print_time(); - switch (event.type()) - { - case RdKafka::Event::EVENT_ERROR: - if (event.fatal()) { - std::cerr << "FATAL "; - run = 0; - } - std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " << - event.str() << std::endl; - break; - - case RdKafka::Event::EVENT_STATS: - std::cerr << "\"STATS\": " << event.str() << std::endl; - break; - - case RdKafka::Event::EVENT_LOG: - fprintf(stderr, "LOG-%i-%s: %s\n", - event.severity(), event.fac().c_str(), event.str().c_str()); - break; - - case RdKafka::Event::EVENT_THROTTLE: - std::cerr << "THROTTLED: " << event.throttle_time() << "ms by " << - event.broker_name() << " id " << (int)event.broker_id() << std::endl; - break; - - default: - std::cerr << "EVENT " << event.type() << - " (" << RdKafka::err2str(event.err()) << "): " << - event.str() << std::endl; - break; + switch (event.type()) { + case RdKafka::Event::EVENT_ERROR: + if (event.fatal()) { + std::cerr << "FATAL "; + run = 0; + } + std::cerr << "ERROR (" << RdKafka::err2str(event.err()) + << "): " << event.str() << std::endl; + break; + + case RdKafka::Event::EVENT_STATS: + std::cerr << "\"STATS\": " << event.str() << std::endl; + break; + + case RdKafka::Event::EVENT_LOG: + fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(), + event.str().c_str()); + break; + + case RdKafka::Event::EVENT_THROTTLE: + std::cerr << "THROTTLED: " << event.throttle_time() << "ms by " + << event.broker_name() << " id " << (int)event.broker_id() + << std::endl; + break; + + default: + std::cerr << "EVENT " << event.type() << " (" + << RdKafka::err2str(event.err()) << "): " << event.str() + << std::endl; + break; } } }; class ExampleRebalanceCb : public RdKafka::RebalanceCb { -private: - static void part_list_print (const std::vector&partitions){ - for (unsigned int i = 0 ; i < partitions.size() ; i++) - std::cerr << partitions[i]->topic() << - "[" << partitions[i]->partition() << "], "; + private: + static void part_list_print( + const std::vector &partitions) { + for (unsigned int i = 0; i < partitions.size(); i++) + std::cerr << partitions[i]->topic() << "[" << partitions[i]->partition() + << "], "; std::cerr << "\n"; } -public: - void rebalance_cb (RdKafka::KafkaConsumer *consumer, - RdKafka::ErrorCode err, - std::vector &partitions) { + public: + void rebalance_cb(RdKafka::KafkaConsumer *consumer, + RdKafka::ErrorCode err, + std::vector &partitions) { std::cerr << "RebalanceCb: " << RdKafka::err2str(err) << ": "; part_list_print(partitions); - RdKafka::Error *error = NULL; + RdKafka::Error *error = NULL; RdKafka::ErrorCode ret_err = RdKafka::ERR_NO_ERROR; if (err == RdKafka::ERR__ASSIGN_PARTITIONS) { @@ -165,7 +164,7 @@ class ExampleRebalanceCb : public RdKafka::RebalanceCb { error = consumer->incremental_unassign(partitions); partition_cnt -= (int)partitions.size(); } else { - ret_err = consumer->unassign(); + ret_err = consumer->unassign(); partition_cnt = 0; } } @@ -176,66 +175,65 @@ class ExampleRebalanceCb : public RdKafka::RebalanceCb { delete error; } else if (ret_err) std::cerr << "assign failed: " << RdKafka::err2str(ret_err) << "\n"; - } }; -void msg_consume(RdKafka::Message* message, void* opaque) { +void msg_consume(RdKafka::Message *message, void *opaque) { switch (message->err()) { - case RdKafka::ERR__TIMED_OUT: - break; - - case RdKafka::ERR_NO_ERROR: - /* Real message */ - msg_cnt++; - msg_bytes += message->len(); - if (verbosity >= 3) - std::cerr << "Read msg at offset " << message->offset() << std::endl; - RdKafka::MessageTimestamp ts; - ts = message->timestamp(); - if (verbosity >= 2 && - ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) { - std::string tsname = "?"; - if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME) - tsname = "create time"; - else if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME) - tsname = "log append time"; - std::cout << "Timestamp: " << tsname << " " << ts.timestamp << std::endl; - } - if (verbosity >= 2 && message->key()) { - std::cout << "Key: " << *message->key() << std::endl; - } - if (verbosity >= 1) { - printf("%.*s\n", - static_cast(message->len()), - static_cast(message->payload())); - } - break; - - case RdKafka::ERR__PARTITION_EOF: - /* Last message */ - if (exit_eof && ++eof_cnt == partition_cnt) { - std::cerr << "%% EOF reached for all " << partition_cnt << - " partition(s)" << std::endl; - run = 0; - } - break; - - case RdKafka::ERR__UNKNOWN_TOPIC: - case RdKafka::ERR__UNKNOWN_PARTITION: - std::cerr << "Consume failed: " << message->errstr() << std::endl; - run = 0; - break; + case RdKafka::ERR__TIMED_OUT: + break; + + case RdKafka::ERR_NO_ERROR: + /* Real message */ + msg_cnt++; + msg_bytes += message->len(); + if (verbosity >= 3) + std::cerr << "Read msg at offset " << message->offset() << std::endl; + RdKafka::MessageTimestamp ts; + ts = message->timestamp(); + if (verbosity >= 2 && + ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) { + std::string tsname = "?"; + if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME) + tsname = "create time"; + else if (ts.type == + RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME) + tsname = "log append time"; + std::cout << "Timestamp: " << tsname << " " << ts.timestamp << std::endl; + } + if (verbosity >= 2 && message->key()) { + std::cout << "Key: " << *message->key() << std::endl; + } + if (verbosity >= 1) { + printf("%.*s\n", static_cast(message->len()), + static_cast(message->payload())); + } + break; - default: - /* Errors */ - std::cerr << "Consume failed: " << message->errstr() << std::endl; + case RdKafka::ERR__PARTITION_EOF: + /* Last message */ + if (exit_eof && ++eof_cnt == partition_cnt) { + std::cerr << "%% EOF reached for all " << partition_cnt << " partition(s)" + << std::endl; run = 0; + } + break; + + case RdKafka::ERR__UNKNOWN_TOPIC: + case RdKafka::ERR__UNKNOWN_PARTITION: + std::cerr << "Consume failed: " << message->errstr() << std::endl; + run = 0; + break; + + default: + /* Errors */ + std::cerr << "Consume failed: " << message->errstr() << std::endl; + run = 0; } } -int main (int argc, char **argv) { +int main(int argc, char **argv) { std::string brokers = "localhost"; std::string errstr; std::string topic_str; @@ -258,7 +256,7 @@ int main (int argc, char **argv) { while ((opt = getopt(argc, argv, "g:b:z:qd:eX:AM:qv")) != -1) { switch (opt) { case 'g': - if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) { + if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } @@ -268,9 +266,9 @@ int main (int argc, char **argv) { break; case 'z': if (conf->set("compression.codec", optarg, errstr) != - RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); } break; case 'e': @@ -286,47 +284,44 @@ int main (int argc, char **argv) { exit(1); } break; - case 'X': - { - char *name, *val; - - if (!strcmp(optarg, "dump")) { - do_conf_dump = true; - continue; - } - - name = optarg; - if (!(val = strchr(name, '='))) { - std::cerr << "%% Expected -X property=value, not " << - name << std::endl; - exit(1); - } - - *val = '\0'; - val++; - - RdKafka::Conf::ConfResult res = conf->set(name, val, errstr); - if (res != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } + case 'X': { + char *name, *val; + + if (!strcmp(optarg, "dump")) { + do_conf_dump = true; + continue; } - break; - case 'q': - verbosity--; - break; + name = optarg; + if (!(val = strchr(name, '='))) { + std::cerr << "%% Expected -X property=value, not " << name << std::endl; + exit(1); + } - case 'v': - verbosity++; - break; + *val = '\0'; + val++; + + RdKafka::Conf::ConfResult res = conf->set(name, val, errstr); + if (res != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + } break; + + case 'q': + verbosity--; + break; + + case 'v': + verbosity++; + break; default: goto usage; } } - for (; optind < argc ; optind++) + for (; optind < argc; optind++) topics.push_back(std::string(argv[optind])); if (topics.empty() || optind != argc) { @@ -354,19 +349,20 @@ int main (int argc, char **argv) { " -v Increase verbosity\n" "\n" "\n", - argv[0], - RdKafka::version_str().c_str(), RdKafka::version(), - RdKafka::get_debug_contexts().c_str()); - exit(1); + argv[0], RdKafka::version_str().c_str(), RdKafka::version(), + RdKafka::get_debug_contexts().c_str()); + exit(1); } if (exit_eof) { std::string strategy; if (conf->get("partition.assignment.strategy", strategy) == - RdKafka::Conf::CONF_OK && strategy == "cooperative-sticky") { - std::cerr << "Error: this example has not been modified to " << - "support -e (exit on EOF) when the partition.assignment.strategy " << - "is set to " << strategy << ": remove -e from the command line\n"; + RdKafka::Conf::CONF_OK && + strategy == "cooperative-sticky") { + std::cerr + << "Error: this example has not been modified to " + << "support -e (exit on EOF) when the partition.assignment.strategy " + << "is set to " << strategy << ": remove -e from the command line\n"; exit(1); } } @@ -392,7 +388,7 @@ int main (int argc, char **argv) { std::cout << "# Global config" << std::endl; for (std::list::iterator it = dump->begin(); - it != dump->end(); ) { + it != dump->end();) { std::cout << *it << " = "; it++; std::cout << *it << std::endl; @@ -414,7 +410,8 @@ int main (int argc, char **argv) { /* * Create consumer using accumulated global configuration. */ - RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr); + RdKafka::KafkaConsumer *consumer = + RdKafka::KafkaConsumer::create(conf, errstr); if (!consumer) { std::cerr << "Failed to create consumer: " << errstr << std::endl; exit(1); @@ -430,8 +427,8 @@ int main (int argc, char **argv) { */ RdKafka::ErrorCode err = consumer->subscribe(topics); if (err) { - std::cerr << "Failed to subscribe to " << topics.size() << " topics: " - << RdKafka::err2str(err) << std::endl; + std::cerr << "Failed to subscribe to " << topics.size() + << " topics: " << RdKafka::err2str(err) << std::endl; exit(1); } @@ -454,8 +451,8 @@ int main (int argc, char **argv) { consumer->close(); delete consumer; - std::cerr << "% Consumed " << msg_cnt << " messages (" - << msg_bytes << " bytes)" << std::endl; + std::cerr << "% Consumed " << msg_cnt << " messages (" << msg_bytes + << " bytes)" << std::endl; /* * Wait for RdKafka to decommission. diff --git a/examples/rdkafka_consume_batch.cpp b/examples/rdkafka_consume_batch.cpp index bdca44daad..576b396f87 100644 --- a/examples/rdkafka_consume_batch.cpp +++ b/examples/rdkafka_consume_batch.cpp @@ -68,7 +68,7 @@ static volatile sig_atomic_t run = 1; -static void sigterm (int sig) { +static void sigterm(int sig) { run = 0; } @@ -77,11 +77,11 @@ static void sigterm (int sig) { /** * @returns the current wall-clock time in milliseconds */ -static int64_t now () { +static int64_t now() { #ifndef _WIN32 - struct timeval tv; - gettimeofday(&tv, NULL); - return ((int64_t)tv.tv_sec * 1000) + (tv.tv_usec / 1000); + struct timeval tv; + gettimeofday(&tv, NULL); + return ((int64_t)tv.tv_sec * 1000) + (tv.tv_usec / 1000); #else #error "now() not implemented for Windows, please submit a PR" #endif @@ -93,13 +93,14 @@ static int64_t now () { * @brief Accumulate a batch of \p batch_size messages, but wait * no longer than \p batch_tmout milliseconds. */ -static std::vector -consume_batch (RdKafka::KafkaConsumer *consumer, size_t batch_size, int batch_tmout) { - +static std::vector consume_batch( + RdKafka::KafkaConsumer *consumer, + size_t batch_size, + int batch_tmout) { std::vector msgs; msgs.reserve(batch_size); - int64_t end = now() + batch_tmout; + int64_t end = now() + batch_tmout; int remaining_timeout = batch_tmout; while (msgs.size() < batch_size) { @@ -130,17 +131,18 @@ consume_batch (RdKafka::KafkaConsumer *consumer, size_t batch_size, int batch_tm } -int main (int argc, char **argv) { +int main(int argc, char **argv) { std::string errstr; std::string topic_str; std::vector topics; - int batch_size = 100; + int batch_size = 100; int batch_tmout = 1000; /* Create configuration objects */ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - if (conf->set("enable.partition.eof", "false", errstr) != RdKafka::Conf::CONF_OK) { + if (conf->set("enable.partition.eof", "false", errstr) != + RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } @@ -150,7 +152,7 @@ int main (int argc, char **argv) { while ((opt = getopt(argc, argv, "g:B:T:b:X:")) != -1) { switch (opt) { case 'g': - if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) { + if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } @@ -165,32 +167,30 @@ int main (int argc, char **argv) { break; case 'b': - if (conf->set("bootstrap.servers", optarg, errstr) != RdKafka::Conf::CONF_OK) { + if (conf->set("bootstrap.servers", optarg, errstr) != + RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } break; - case 'X': - { - char *name, *val; + case 'X': { + char *name, *val; - name = optarg; - if (!(val = strchr(name, '='))) { - std::cerr << "%% Expected -X property=value, not " << - name << std::endl; - exit(1); - } + name = optarg; + if (!(val = strchr(name, '='))) { + std::cerr << "%% Expected -X property=value, not " << name << std::endl; + exit(1); + } - *val = '\0'; - val++; + *val = '\0'; + val++; - if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } + if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); } - break; + } break; default: goto usage; @@ -198,26 +198,27 @@ int main (int argc, char **argv) { } /* Topics to consume */ - for (; optind < argc ; optind++) + for (; optind < argc; optind++) topics.push_back(std::string(argv[optind])); if (topics.empty() || optind != argc) { usage: - fprintf(stderr, - "Usage: %s -g -B [options] topic1 topic2..\n" - "\n" - "librdkafka version %s (0x%08x)\n" - "\n" - " Options:\n" - " -g Consumer group id\n" - " -B How many messages to batch (default: 100).\n" - " -T How long to wait for batch-size to accumulate in milliseconds. (default 1000 ms)\n" - " -b Broker address (localhost:9092)\n" - " -X Set arbitrary librdkafka configuration property\n" - "\n", - argv[0], - RdKafka::version_str().c_str(), RdKafka::version()); - exit(1); + fprintf( + stderr, + "Usage: %s -g -B [options] topic1 topic2..\n" + "\n" + "librdkafka version %s (0x%08x)\n" + "\n" + " Options:\n" + " -g Consumer group id\n" + " -B How many messages to batch (default: 100).\n" + " -T How long to wait for batch-size to accumulate in " + "milliseconds. (default 1000 ms)\n" + " -b Broker address (localhost:9092)\n" + " -X Set arbitrary librdkafka configuration property\n" + "\n", + argv[0], RdKafka::version_str().c_str(), RdKafka::version()); + exit(1); } @@ -225,7 +226,8 @@ int main (int argc, char **argv) { signal(SIGTERM, sigterm); /* Create consumer */ - RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr); + RdKafka::KafkaConsumer *consumer = + RdKafka::KafkaConsumer::create(conf, errstr); if (!consumer) { std::cerr << "Failed to create consumer: " << errstr << std::endl; exit(1); @@ -236,8 +238,8 @@ int main (int argc, char **argv) { /* Subscribe to topics */ RdKafka::ErrorCode err = consumer->subscribe(topics); if (err) { - std::cerr << "Failed to subscribe to " << topics.size() << " topics: " - << RdKafka::err2str(err) << std::endl; + std::cerr << "Failed to subscribe to " << topics.size() + << " topics: " << RdKafka::err2str(err) << std::endl; exit(1); } @@ -247,7 +249,9 @@ int main (int argc, char **argv) { std::cout << "Accumulated " << msgs.size() << " messages:" << std::endl; for (auto &msg : msgs) { - std::cout << " Message in " << msg->topic_name() << " [" << msg->partition() << "] at offset " << msg->offset() << std::endl; + std::cout << " Message in " << msg->topic_name() << " [" + << msg->partition() << "] at offset " << msg->offset() + << std::endl; delete msg; } } diff --git a/examples/rdkafka_example.c b/examples/rdkafka_example.c index 80588d49b3..91415318ac 100644 --- a/examples/rdkafka_example.c +++ b/examples/rdkafka_example.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -44,138 +44,142 @@ /* Typical include path would be , but this program * is builtin from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static volatile sig_atomic_t run = 1; static rd_kafka_t *rk; static int exit_eof = 0; -static int quiet = 0; -static enum { - OUTPUT_HEXDUMP, - OUTPUT_RAW, +static int quiet = 0; +static enum { + OUTPUT_HEXDUMP, + OUTPUT_RAW, } output = OUTPUT_HEXDUMP; -static void stop (int sig) { - run = 0; - fclose(stdin); /* abort fgets() */ +static void stop(int sig) { + run = 0; + fclose(stdin); /* abort fgets() */ } -static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) { - const char *p = (const char *)ptr; - size_t of = 0; +static void hexdump(FILE *fp, const char *name, const void *ptr, size_t len) { + const char *p = (const char *)ptr; + size_t of = 0; - if (name) - fprintf(fp, "%s hexdump (%zd bytes):\n", name, len); + if (name) + fprintf(fp, "%s hexdump (%zd bytes):\n", name, len); - for (of = 0 ; of < len ; of += 16) { - char hexen[16*3+1]; - char charen[16+1]; - int hof = 0; + for (of = 0; of < len; of += 16) { + char hexen[16 * 3 + 1]; + char charen[16 + 1]; + int hof = 0; - int cof = 0; - int i; + int cof = 0; + int i; - for (i = of ; i < (int)of + 16 && i < (int)len ; i++) { - hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff); - cof += sprintf(charen+cof, "%c", - isprint((int)p[i]) ? p[i] : '.'); - } - fprintf(fp, "%08zx: %-48s %-16s\n", - of, hexen, charen); - } + for (i = of; i < (int)of + 16 && i < (int)len; i++) { + hof += sprintf(hexen + hof, "%02x ", p[i] & 0xff); + cof += sprintf(charen + cof, "%c", + isprint((int)p[i]) ? p[i] : '.'); + } + fprintf(fp, "%08zx: %-48s %-16s\n", of, hexen, charen); + } } /** * Kafka logger callback (optional) */ -static void logger (const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { - struct timeval tv; - gettimeofday(&tv, NULL); - fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", - (int)tv.tv_sec, (int)(tv.tv_usec / 1000), - level, fac, rk ? rd_kafka_name(rk) : NULL, buf); +static void +logger(const rd_kafka_t *rk, int level, const char *fac, const char *buf) { + struct timeval tv; + gettimeofday(&tv, NULL); + fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", (int)tv.tv_sec, + (int)(tv.tv_usec / 1000), level, fac, + rk ? rd_kafka_name(rk) : NULL, buf); } /** * Message delivery report callback using the richer rd_kafka_message_t object. */ -static void msg_delivered (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void msg_delivered(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { if (rkmessage->err) fprintf(stderr, - "%% Message delivery failed (broker %"PRId32"): %s\n", + "%% Message delivery failed (broker %" PRId32 "): %s\n", rd_kafka_message_broker_id(rkmessage), rd_kafka_err2str(rkmessage->err)); else if (!quiet) fprintf(stderr, - "%% Message delivered (%zd bytes, offset %"PRId64", " - "partition %"PRId32", broker %"PRId32"): %.*s\n", - rkmessage->len, rkmessage->offset, - rkmessage->partition, + "%% Message delivered (%zd bytes, offset %" PRId64 + ", " + "partition %" PRId32 ", broker %" PRId32 "): %.*s\n", + rkmessage->len, rkmessage->offset, rkmessage->partition, rd_kafka_message_broker_id(rkmessage), (int)rkmessage->len, (const char *)rkmessage->payload); } -static void msg_consume (rd_kafka_message_t *rkmessage, - void *opaque) { - if (rkmessage->err) { - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - fprintf(stderr, - "%% Consumer reached end of %s [%"PRId32"] " - "message queue at offset %"PRId64"\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, rkmessage->offset); +static void msg_consume(rd_kafka_message_t *rkmessage, void *opaque) { + if (rkmessage->err) { + if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + fprintf(stderr, + "%% Consumer reached end of %s [%" PRId32 + "] " + "message queue at offset %" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset); - if (exit_eof) - run = 0; + if (exit_eof) + run = 0; - return; - } + return; + } - fprintf(stderr, "%% Consume error for topic \"%s\" [%"PRId32"] " - "offset %"PRId64": %s\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_errstr(rkmessage)); + fprintf(stderr, + "%% Consume error for topic \"%s\" [%" PRId32 + "] " + "offset %" PRId64 ": %s\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset, + rd_kafka_message_errstr(rkmessage)); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) run = 0; - return; - } + return; + } - if (!quiet) { - rd_kafka_timestamp_type_t tstype; - int64_t timestamp; + if (!quiet) { + rd_kafka_timestamp_type_t tstype; + int64_t timestamp; rd_kafka_headers_t *hdrs; fprintf(stdout, - "%% Message (offset %"PRId64", %zd bytes, " - "broker %"PRId32"):\n", + "%% Message (offset %" PRId64 + ", %zd bytes, " + "broker %" PRId32 "):\n", rkmessage->offset, rkmessage->len, rd_kafka_message_broker_id(rkmessage)); - timestamp = rd_kafka_message_timestamp(rkmessage, &tstype); - if (tstype != RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) { - const char *tsname = "?"; - if (tstype == RD_KAFKA_TIMESTAMP_CREATE_TIME) - tsname = "create time"; - else if (tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) - tsname = "log append time"; - - fprintf(stdout, "%% Message timestamp: %s %"PRId64 - " (%ds ago)\n", - tsname, timestamp, - !timestamp ? 0 : - (int)time(NULL) - (int)(timestamp/1000)); - } + timestamp = rd_kafka_message_timestamp(rkmessage, &tstype); + if (tstype != RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) { + const char *tsname = "?"; + if (tstype == RD_KAFKA_TIMESTAMP_CREATE_TIME) + tsname = "create time"; + else if (tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) + tsname = "log append time"; + + fprintf(stdout, + "%% Message timestamp: %s %" PRId64 + " (%ds ago)\n", + tsname, timestamp, + !timestamp ? 0 + : (int)time(NULL) - + (int)(timestamp / 1000)); + } if (!rd_kafka_message_headers(rkmessage, &hdrs)) { size_t idx = 0; @@ -185,46 +189,45 @@ static void msg_consume (rd_kafka_message_t *rkmessage, fprintf(stdout, "%% Headers:"); - while (!rd_kafka_header_get_all(hdrs, idx++, - &name, &val, &size)) { - fprintf(stdout, "%s%s=", - idx == 1 ? " " : ", ", name); + while (!rd_kafka_header_get_all(hdrs, idx++, &name, + &val, &size)) { + fprintf(stdout, "%s%s=", idx == 1 ? " " : ", ", + name); if (val) - fprintf(stdout, "\"%.*s\"", - (int)size, (const char *)val); + fprintf(stdout, "\"%.*s\"", (int)size, + (const char *)val); else fprintf(stdout, "NULL"); } fprintf(stdout, "\n"); } - } - - if (rkmessage->key_len) { - if (output == OUTPUT_HEXDUMP) - hexdump(stdout, "Message Key", - rkmessage->key, rkmessage->key_len); - else - printf("Key: %.*s\n", - (int)rkmessage->key_len, (char *)rkmessage->key); - } - - if (output == OUTPUT_HEXDUMP) - hexdump(stdout, "Message Payload", - rkmessage->payload, rkmessage->len); - else - printf("%.*s\n", - (int)rkmessage->len, (char *)rkmessage->payload); + } + + if (rkmessage->key_len) { + if (output == OUTPUT_HEXDUMP) + hexdump(stdout, "Message Key", rkmessage->key, + rkmessage->key_len); + else + printf("Key: %.*s\n", (int)rkmessage->key_len, + (char *)rkmessage->key); + } + + if (output == OUTPUT_HEXDUMP) + hexdump(stdout, "Message Payload", rkmessage->payload, + rkmessage->len); + else + printf("%.*s\n", (int)rkmessage->len, + (char *)rkmessage->payload); } -static void metadata_print (const char *topic, - const struct rd_kafka_metadata *metadata) { +static void metadata_print(const char *topic, + const struct rd_kafka_metadata *metadata) { int i, j, k; int32_t controllerid; - printf("Metadata for %s (from broker %"PRId32": %s):\n", - topic ? : "all topics", - metadata->orig_broker_id, + printf("Metadata for %s (from broker %" PRId32 ": %s):\n", + topic ?: "all topics", metadata->orig_broker_id, metadata->orig_broker_name); controllerid = rd_kafka_controllerid(rk, 0); @@ -232,20 +235,18 @@ static void metadata_print (const char *topic, /* Iterate brokers */ printf(" %i brokers:\n", metadata->broker_cnt); - for (i = 0 ; i < metadata->broker_cnt ; i++) - printf(" broker %"PRId32" at %s:%i%s\n", - metadata->brokers[i].id, - metadata->brokers[i].host, + for (i = 0; i < metadata->broker_cnt; i++) + printf(" broker %" PRId32 " at %s:%i%s\n", + metadata->brokers[i].id, metadata->brokers[i].host, metadata->brokers[i].port, - controllerid == metadata->brokers[i].id ? - " (controller)" : ""); + controllerid == metadata->brokers[i].id ? " (controller)" + : ""); /* Iterate topics */ printf(" %i topics:\n", metadata->topic_cnt); - for (i = 0 ; i < metadata->topic_cnt ; i++) { + for (i = 0; i < metadata->topic_cnt; i++) { const struct rd_kafka_metadata_topic *t = &metadata->topics[i]; - printf(" topic \"%s\" with %i partitions:", - t->topic, + printf(" topic \"%s\" with %i partitions:", t->topic, t->partition_cnt); if (t->err) { printf(" %s", rd_kafka_err2str(t->err)); @@ -255,23 +256,24 @@ static void metadata_print (const char *topic, printf("\n"); /* Iterate topic's partitions */ - for (j = 0 ; j < t->partition_cnt ; j++) { + for (j = 0; j < t->partition_cnt; j++) { const struct rd_kafka_metadata_partition *p; p = &t->partitions[j]; - printf(" partition %"PRId32", " - "leader %"PRId32", replicas: ", + printf(" partition %" PRId32 + ", " + "leader %" PRId32 ", replicas: ", p->id, p->leader); /* Iterate partition's replicas */ - for (k = 0 ; k < p->replica_cnt ; k++) - printf("%s%"PRId32, - k > 0 ? ",":"", p->replicas[k]); + for (k = 0; k < p->replica_cnt; k++) + printf("%s%" PRId32, k > 0 ? "," : "", + p->replicas[k]); /* Iterate partition's ISRs */ printf(", isrs: "); - for (k = 0 ; k < p->isr_cnt ; k++) - printf("%s%"PRId32, - k > 0 ? ",":"", p->isrs[k]); + for (k = 0; k < p->isr_cnt; k++) + printf("%s%" PRId32, k > 0 ? "," : "", + p->isrs[k]); if (p->err) printf(", %s\n", rd_kafka_err2str(p->err)); else @@ -281,118 +283,117 @@ static void metadata_print (const char *topic, } -static void sig_usr1 (int sig) { - rd_kafka_dump(stdout, rk); +static void sig_usr1(int sig) { + rd_kafka_dump(stdout, rk); } -int main (int argc, char **argv) { - rd_kafka_topic_t *rkt; - char *brokers = "localhost:9092"; - char mode = 'C'; - char *topic = NULL; - int partition = RD_KAFKA_PARTITION_UA; - int opt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - int64_t start_offset = 0; - int do_conf_dump = 0; - char tmp[16]; - int64_t seek_offset = 0; - int64_t tmp_offset = 0; - int get_wmarks = 0; +int main(int argc, char **argv) { + rd_kafka_topic_t *rkt; + char *brokers = "localhost:9092"; + char mode = 'C'; + char *topic = NULL; + int partition = RD_KAFKA_PARTITION_UA; + int opt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + int64_t start_offset = 0; + int do_conf_dump = 0; + char tmp[16]; + int64_t seek_offset = 0; + int64_t tmp_offset = 0; + int get_wmarks = 0; rd_kafka_headers_t *hdrs = NULL; rd_kafka_resp_err_t err; - /* Kafka configuration */ - conf = rd_kafka_conf_new(); + /* Kafka configuration */ + conf = rd_kafka_conf_new(); /* Set logger */ rd_kafka_conf_set_log_cb(conf, logger); - /* Quick termination */ - snprintf(tmp, sizeof(tmp), "%i", SIGIO); - rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); + /* Quick termination */ + snprintf(tmp, sizeof(tmp), "%i", SIGIO); + rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); - /* Topic configuration */ - topic_conf = rd_kafka_topic_conf_new(); + /* Topic configuration */ + topic_conf = rd_kafka_topic_conf_new(); - while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:As:H:")) != -1) { - switch (opt) { - case 'P': - case 'C': + while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:As:H:")) != -1) { + switch (opt) { + case 'P': + case 'C': case 'L': - mode = opt; - break; - case 't': - topic = optarg; - break; - case 'p': - partition = atoi(optarg); - break; - case 'b': - brokers = optarg; - break; - case 'z': - if (rd_kafka_conf_set(conf, "compression.codec", - optarg, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - break; - case 'o': + mode = opt; + break; + case 't': + topic = optarg; + break; + case 'p': + partition = atoi(optarg); + break; + case 'b': + brokers = optarg; + break; + case 'z': + if (rd_kafka_conf_set(conf, "compression.codec", optarg, + errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + break; + case 'o': case 's': - if (!strcmp(optarg, "end")) - tmp_offset = RD_KAFKA_OFFSET_END; - else if (!strcmp(optarg, "beginning")) - tmp_offset = RD_KAFKA_OFFSET_BEGINNING; - else if (!strcmp(optarg, "stored")) - tmp_offset = RD_KAFKA_OFFSET_STORED; - else if (!strcmp(optarg, "wmark")) - get_wmarks = 1; - else { - tmp_offset = strtoll(optarg, NULL, 10); - - if (tmp_offset < 0) - tmp_offset = RD_KAFKA_OFFSET_TAIL(-tmp_offset); - } + if (!strcmp(optarg, "end")) + tmp_offset = RD_KAFKA_OFFSET_END; + else if (!strcmp(optarg, "beginning")) + tmp_offset = RD_KAFKA_OFFSET_BEGINNING; + else if (!strcmp(optarg, "stored")) + tmp_offset = RD_KAFKA_OFFSET_STORED; + else if (!strcmp(optarg, "wmark")) + get_wmarks = 1; + else { + tmp_offset = strtoll(optarg, NULL, 10); + + if (tmp_offset < 0) + tmp_offset = + RD_KAFKA_OFFSET_TAIL(-tmp_offset); + } if (opt == 'o') start_offset = tmp_offset; else if (opt == 's') seek_offset = tmp_offset; - break; - case 'e': - exit_eof = 1; - break; - case 'd': - if (rd_kafka_conf_set(conf, "debug", optarg, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, - "%% Debug configuration failed: " - "%s: %s\n", - errstr, optarg); - exit(1); - } - break; - case 'q': - quiet = 1; - break; - case 'A': - output = OUTPUT_RAW; - break; - case 'H': - { + break; + case 'e': + exit_eof = 1; + break; + case 'd': + if (rd_kafka_conf_set(conf, "debug", optarg, errstr, + sizeof(errstr)) != + RD_KAFKA_CONF_OK) { + fprintf(stderr, + "%% Debug configuration failed: " + "%s: %s\n", + errstr, optarg); + exit(1); + } + break; + case 'q': + quiet = 1; + break; + case 'A': + output = OUTPUT_RAW; + break; + case 'H': { char *name, *val; size_t name_sz = -1; name = optarg; - val = strchr(name, '='); + val = strchr(name, '='); if (val) { - name_sz = (size_t)(val-name); + name_sz = (size_t)(val - name); val++; /* past the '=' */ } @@ -406,249 +407,238 @@ int main (int argc, char **argv) { name, rd_kafka_err2str(err)); exit(1); } + } break; + + case 'X': { + char *name, *val; + rd_kafka_conf_res_t res; + + if (!strcmp(optarg, "list") || + !strcmp(optarg, "help")) { + rd_kafka_conf_properties_show(stdout); + exit(0); + } + + if (!strcmp(optarg, "dump")) { + do_conf_dump = 1; + continue; + } + + name = optarg; + if (!(val = strchr(name, '='))) { + char dest[512]; + size_t dest_size = sizeof(dest); + /* Return current value for property. */ + + res = RD_KAFKA_CONF_UNKNOWN; + if (!strncmp(name, "topic.", strlen("topic."))) + res = rd_kafka_topic_conf_get( + topic_conf, name + strlen("topic."), + dest, &dest_size); + if (res == RD_KAFKA_CONF_UNKNOWN) + res = rd_kafka_conf_get( + conf, name, dest, &dest_size); + + if (res == RD_KAFKA_CONF_OK) { + printf("%s = %s\n", name, dest); + exit(0); + } else { + fprintf(stderr, "%% %s property\n", + res == RD_KAFKA_CONF_UNKNOWN + ? "Unknown" + : "Invalid"); + exit(1); + } + } + + *val = '\0'; + val++; + + res = RD_KAFKA_CONF_UNKNOWN; + /* Try "topic." prefixed properties on topic + * conf first, and then fall through to global if + * it didnt match a topic configuration property. */ + if (!strncmp(name, "topic.", strlen("topic."))) + res = rd_kafka_topic_conf_set( + topic_conf, name + strlen("topic."), val, + errstr, sizeof(errstr)); + + if (res == RD_KAFKA_CONF_UNKNOWN) + res = rd_kafka_conf_set(conf, name, val, errstr, + sizeof(errstr)); + + if (res != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + } break; + + default: + goto usage; + } + } + + + if (do_conf_dump) { + const char **arr; + size_t cnt; + int pass; + + for (pass = 0; pass < 2; pass++) { + int i; + + if (pass == 0) { + arr = rd_kafka_conf_dump(conf, &cnt); + printf("# Global config\n"); + } else { + printf("# Topic config\n"); + arr = + rd_kafka_topic_conf_dump(topic_conf, &cnt); + } + + for (i = 0; i < (int)cnt; i += 2) + printf("%s = %s\n", arr[i], arr[i + 1]); + + printf("\n"); + + rd_kafka_conf_dump_free(arr, cnt); } - break; - - case 'X': - { - char *name, *val; - rd_kafka_conf_res_t res; - - if (!strcmp(optarg, "list") || - !strcmp(optarg, "help")) { - rd_kafka_conf_properties_show(stdout); - exit(0); - } - - if (!strcmp(optarg, "dump")) { - do_conf_dump = 1; - continue; - } - - name = optarg; - if (!(val = strchr(name, '='))) { - char dest[512]; - size_t dest_size = sizeof(dest); - /* Return current value for property. */ - - res = RD_KAFKA_CONF_UNKNOWN; - if (!strncmp(name, "topic.", strlen("topic."))) - res = rd_kafka_topic_conf_get( - topic_conf, - name+strlen("topic."), - dest, &dest_size); - if (res == RD_KAFKA_CONF_UNKNOWN) - res = rd_kafka_conf_get( - conf, name, dest, &dest_size); - - if (res == RD_KAFKA_CONF_OK) { - printf("%s = %s\n", name, dest); - exit(0); - } else { - fprintf(stderr, - "%% %s property\n", - res == RD_KAFKA_CONF_UNKNOWN ? - "Unknown" : "Invalid"); - exit(1); - } - } - - *val = '\0'; - val++; - - res = RD_KAFKA_CONF_UNKNOWN; - /* Try "topic." prefixed properties on topic - * conf first, and then fall through to global if - * it didnt match a topic configuration property. */ - if (!strncmp(name, "topic.", strlen("topic."))) - res = rd_kafka_topic_conf_set(topic_conf, - name+ - strlen("topic."), - val, - errstr, - sizeof(errstr)); - - if (res == RD_KAFKA_CONF_UNKNOWN) - res = rd_kafka_conf_set(conf, name, val, - errstr, sizeof(errstr)); - - if (res != RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - } - break; - - default: - goto usage; - } - } - - - if (do_conf_dump) { - const char **arr; - size_t cnt; - int pass; - - for (pass = 0 ; pass < 2 ; pass++) { - int i; - - if (pass == 0) { - arr = rd_kafka_conf_dump(conf, &cnt); - printf("# Global config\n"); - } else { - printf("# Topic config\n"); - arr = rd_kafka_topic_conf_dump(topic_conf, - &cnt); - } - - for (i = 0 ; i < (int)cnt ; i += 2) - printf("%s = %s\n", - arr[i], arr[i+1]); - - printf("\n"); - - rd_kafka_conf_dump_free(arr, cnt); - } - - exit(0); - } - - - if (optind != argc || (mode != 'L' && !topic)) { - usage: - fprintf(stderr, - "Usage: %s -C|-P|-L -t " - "[-p ] [-b ]\n" - "\n" - "librdkafka version %s (0x%08x)\n" - "\n" - " Options:\n" - " -C | -P Consumer or Producer mode\n" + + exit(0); + } + + + if (optind != argc || (mode != 'L' && !topic)) { + usage: + fprintf(stderr, + "Usage: %s -C|-P|-L -t " + "[-p ] [-b ]\n" + "\n" + "librdkafka version %s (0x%08x)\n" + "\n" + " Options:\n" + " -C | -P Consumer or Producer mode\n" " -L Metadata list mode\n" - " -t Topic to fetch / produce\n" - " -p Partition (random partitioner)\n" - " -b Broker address (localhost:9092)\n" - " -z Enable compression:\n" - " none|gzip|snappy|lz4|zstd\n" - " -o Start offset (consumer):\n" - " beginning, end, NNNNN or -NNNNN\n" - " wmark returns the current hi&lo " - "watermarks.\n" - " -e Exit consumer when last message\n" - " in partition has been received.\n" - " -d [facs..] Enable debugging contexts:\n" - " %s\n" - " -q Be quiet\n" - " -A Raw payload output (consumer)\n" + " -t Topic to fetch / produce\n" + " -p Partition (random partitioner)\n" + " -b Broker address (localhost:9092)\n" + " -z Enable compression:\n" + " none|gzip|snappy|lz4|zstd\n" + " -o Start offset (consumer):\n" + " beginning, end, NNNNN or -NNNNN\n" + " wmark returns the current hi&lo " + "watermarks.\n" + " -e Exit consumer when last message\n" + " in partition has been received.\n" + " -d [facs..] Enable debugging contexts:\n" + " %s\n" + " -q Be quiet\n" + " -A Raw payload output (consumer)\n" " -H Add header to message (producer)\n" - " -X Set arbitrary librdkafka " - "configuration property\n" - " Properties prefixed with \"topic.\" " - "will be set on topic object.\n" - " -X list Show full list of supported " - "properties.\n" - " -X dump Show configuration\n" - " -X Get single property value\n" - "\n" - " In Consumer mode:\n" - " writes fetched messages to stdout\n" - " In Producer mode:\n" - " reads messages from stdin and sends to broker\n" + " -X Set arbitrary librdkafka " + "configuration property\n" + " Properties prefixed with \"topic.\" " + "will be set on topic object.\n" + " -X list Show full list of supported " + "properties.\n" + " -X dump Show configuration\n" + " -X Get single property value\n" + "\n" + " In Consumer mode:\n" + " writes fetched messages to stdout\n" + " In Producer mode:\n" + " reads messages from stdin and sends to broker\n" " In List mode:\n" " queries broker for metadata information, " "topic is optional.\n" - "\n" - "\n" - "\n", - argv[0], - rd_kafka_version_str(), rd_kafka_version(), - RD_KAFKA_DEBUG_CONTEXTS); - exit(1); - } + "\n" + "\n" + "\n", + argv[0], rd_kafka_version_str(), rd_kafka_version(), + RD_KAFKA_DEBUG_CONTEXTS); + exit(1); + } - if ((mode == 'C' && !isatty(STDIN_FILENO)) || - (mode != 'C' && !isatty(STDOUT_FILENO))) - quiet = 1; + if ((mode == 'C' && !isatty(STDIN_FILENO)) || + (mode != 'C' && !isatty(STDOUT_FILENO))) + quiet = 1; - signal(SIGINT, stop); - signal(SIGUSR1, sig_usr1); + signal(SIGINT, stop); + signal(SIGUSR1, sig_usr1); /* Set bootstrap servers */ if (brokers && - rd_kafka_conf_set(conf, "bootstrap.servers", brokers, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } - if (mode == 'P') { - /* - * Producer - */ - char buf[2048]; - int sendcnt = 0; + if (mode == 'P') { + /* + * Producer + */ + char buf[2048]; + int sendcnt = 0; - /* Set up a message delivery report callback. - * It will be called once for each message, either on successful - * delivery to broker, or upon failure to deliver to broker. */ + /* Set up a message delivery report callback. + * It will be called once for each message, either on successful + * delivery to broker, or upon failure to deliver to broker. */ rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered); - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create new producer: %s\n", - errstr); - exit(1); - } - - /* Create topic */ - rkt = rd_kafka_topic_new(rk, topic, topic_conf); + /* Create Kafka handle */ + if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, + "%% Failed to create new producer: %s\n", + errstr); + exit(1); + } + + /* Create topic */ + rkt = rd_kafka_topic_new(rk, topic, topic_conf); topic_conf = NULL; /* Now owned by topic */ - if (!quiet) - fprintf(stderr, - "%% Type stuff and hit enter to send\n"); + if (!quiet) + fprintf(stderr, + "%% Type stuff and hit enter to send\n"); - while (run && fgets(buf, sizeof(buf), stdin)) { - size_t len = strlen(buf); - if (buf[len-1] == '\n') - buf[--len] = '\0'; + while (run && fgets(buf, sizeof(buf), stdin)) { + size_t len = strlen(buf); + if (buf[len - 1] == '\n') + buf[--len] = '\0'; err = RD_KAFKA_RESP_ERR_NO_ERROR; - /* Send/Produce message. */ + /* Send/Produce message. */ if (hdrs) { rd_kafka_headers_t *hdrs_copy; hdrs_copy = rd_kafka_headers_copy(hdrs); err = rd_kafka_producev( - rk, - RD_KAFKA_V_RKT(rkt), - RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_VALUE(buf, len), - RD_KAFKA_V_HEADERS(hdrs_copy), - RD_KAFKA_V_END); + rk, RD_KAFKA_V_RKT(rkt), + RD_KAFKA_V_PARTITION(partition), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_VALUE(buf, len), + RD_KAFKA_V_HEADERS(hdrs_copy), + RD_KAFKA_V_END); if (err) rd_kafka_headers_destroy(hdrs_copy); } else { if (rd_kafka_produce( - rkt, partition, - RD_KAFKA_MSG_F_COPY, - /* Payload and length */ - buf, len, - /* Optional key and its length */ - NULL, 0, - /* Message opaque, provided in - * delivery report callback as - * msg_opaque. */ - NULL) == -1) { + rkt, partition, RD_KAFKA_MSG_F_COPY, + /* Payload and length */ + buf, len, + /* Optional key and its length */ + NULL, 0, + /* Message opaque, provided in + * delivery report callback as + * msg_opaque. */ + NULL) == -1) { err = rd_kafka_last_error(); } } @@ -656,110 +646,115 @@ int main (int argc, char **argv) { if (err) { fprintf(stderr, "%% Failed to produce to topic %s " - "partition %i: %s\n", - rd_kafka_topic_name(rkt), partition, - rd_kafka_err2str(err)); - - /* Poll to handle delivery reports */ - rd_kafka_poll(rk, 0); - continue; - } - - if (!quiet) - fprintf(stderr, "%% Sent %zd bytes to topic " - "%s partition %i\n", - len, rd_kafka_topic_name(rkt), partition); - sendcnt++; - /* Poll to handle delivery reports */ - rd_kafka_poll(rk, 0); - } - - /* Poll to handle delivery reports */ - rd_kafka_poll(rk, 0); - - /* Wait for messages to be delivered */ - while (run && rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 100); - - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); - - /* Destroy the handle */ - rd_kafka_destroy(rk); - - } else if (mode == 'C') { - /* - * Consumer - */ - - rd_kafka_conf_set(conf, "enable.partition.eof", "true", - NULL, 0); - - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create new consumer: %s\n", - errstr); - exit(1); - } - - if (get_wmarks) { - int64_t lo, hi; - - /* Only query for hi&lo partition watermarks */ - - if ((err = rd_kafka_query_watermark_offsets( - rk, topic, partition, &lo, &hi, 5000))) { - fprintf(stderr, "%% query_watermark_offsets() " - "failed: %s\n", - rd_kafka_err2str(err)); - exit(1); - } - - printf("%s [%d]: low - high offsets: " - "%"PRId64" - %"PRId64"\n", - topic, partition, lo, hi); - - rd_kafka_destroy(rk); - exit(0); - } - - - /* Create topic */ - rkt = rd_kafka_topic_new(rk, topic, topic_conf); + "partition %i: %s\n", + rd_kafka_topic_name(rkt), partition, + rd_kafka_err2str(err)); + + /* Poll to handle delivery reports */ + rd_kafka_poll(rk, 0); + continue; + } + + if (!quiet) + fprintf(stderr, + "%% Sent %zd bytes to topic " + "%s partition %i\n", + len, rd_kafka_topic_name(rkt), + partition); + sendcnt++; + /* Poll to handle delivery reports */ + rd_kafka_poll(rk, 0); + } + + /* Poll to handle delivery reports */ + rd_kafka_poll(rk, 0); + + /* Wait for messages to be delivered */ + while (run && rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 100); + + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); + + /* Destroy the handle */ + rd_kafka_destroy(rk); + + } else if (mode == 'C') { + /* + * Consumer + */ + + rd_kafka_conf_set(conf, "enable.partition.eof", "true", NULL, + 0); + + /* Create Kafka handle */ + if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, + "%% Failed to create new consumer: %s\n", + errstr); + exit(1); + } + + if (get_wmarks) { + int64_t lo, hi; + + /* Only query for hi&lo partition watermarks */ + + if ((err = rd_kafka_query_watermark_offsets( + rk, topic, partition, &lo, &hi, 5000))) { + fprintf(stderr, + "%% query_watermark_offsets() " + "failed: %s\n", + rd_kafka_err2str(err)); + exit(1); + } + + printf( + "%s [%d]: low - high offsets: " + "%" PRId64 " - %" PRId64 "\n", + topic, partition, lo, hi); + + rd_kafka_destroy(rk); + exit(0); + } + + + /* Create topic */ + rkt = rd_kafka_topic_new(rk, topic, topic_conf); topic_conf = NULL; /* Now owned by topic */ - /* Start consuming */ - if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){ - err = rd_kafka_last_error(); - fprintf(stderr, "%% Failed to start consuming: %s\n", - rd_kafka_err2str(err)); + /* Start consuming */ + if (rd_kafka_consume_start(rkt, partition, start_offset) == + -1) { + err = rd_kafka_last_error(); + fprintf(stderr, "%% Failed to start consuming: %s\n", + rd_kafka_err2str(err)); if (err == RD_KAFKA_RESP_ERR__INVALID_ARG) fprintf(stderr, "%% Broker based offset storage " "requires a group.id, " "add: -X group.id=yourGroup\n"); - exit(1); - } + exit(1); + } - while (run) { - rd_kafka_message_t *rkmessage; + while (run) { + rd_kafka_message_t *rkmessage; /* Poll for errors, etc. */ rd_kafka_poll(rk, 0); - /* Consume single message. - * See rdkafka_performance.c for high speed - * consuming of messages. */ - rkmessage = rd_kafka_consume(rkt, partition, 1000); - if (!rkmessage) /* timeout */ - continue; + /* Consume single message. + * See rdkafka_performance.c for high speed + * consuming of messages. */ + rkmessage = rd_kafka_consume(rkt, partition, 1000); + if (!rkmessage) /* timeout */ + continue; - msg_consume(rkmessage, NULL); + msg_consume(rkmessage, NULL); - /* Return message to rdkafka */ - rd_kafka_message_destroy(rkmessage); + /* Return message to rdkafka */ + rd_kafka_message_destroy(rkmessage); if (seek_offset) { err = rd_kafka_seek(rkt, partition, seek_offset, @@ -768,39 +763,39 @@ int main (int argc, char **argv) { printf("Seek failed: %s\n", rd_kafka_err2str(err)); else - printf("Seeked to %"PRId64"\n", + printf("Seeked to %" PRId64 "\n", seek_offset); seek_offset = 0; } - } + } - /* Stop consuming */ - rd_kafka_consume_stop(rkt, partition); + /* Stop consuming */ + rd_kafka_consume_stop(rkt, partition); while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 10); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy handle */ - rd_kafka_destroy(rk); + /* Destroy handle */ + rd_kafka_destroy(rk); } else if (mode == 'L') { err = RD_KAFKA_RESP_ERR_NO_ERROR; - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create new producer: %s\n", - errstr); - exit(1); - } + /* Create Kafka handle */ + if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, + "%% Failed to create new producer: %s\n", + errstr); + exit(1); + } /* Create topic */ if (topic) { - rkt = rd_kafka_topic_new(rk, topic, topic_conf); + rkt = rd_kafka_topic_new(rk, topic, topic_conf); topic_conf = NULL; /* Now owned by topic */ } else rkt = NULL; @@ -809,8 +804,8 @@ int main (int argc, char **argv) { const struct rd_kafka_metadata *metadata; /* Fetch metadata */ - err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt, - &metadata, 5000); + err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt, &metadata, + 5000); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { fprintf(stderr, "%% Failed to acquire metadata: %s\n", @@ -825,12 +820,12 @@ int main (int argc, char **argv) { run = 0; } - /* Destroy topic */ - if (rkt) - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + if (rkt) + rd_kafka_topic_destroy(rkt); - /* Destroy the handle */ - rd_kafka_destroy(rk); + /* Destroy the handle */ + rd_kafka_destroy(rk); if (topic_conf) rd_kafka_topic_conf_destroy(topic_conf); @@ -847,12 +842,12 @@ int main (int argc, char **argv) { if (topic_conf) rd_kafka_topic_conf_destroy(topic_conf); - /* Let background threads clean up and terminate cleanly. */ - run = 5; - while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1) - printf("Waiting for librdkafka to decommission\n"); - if (run <= 0) - rd_kafka_dump(stdout, rk); + /* Let background threads clean up and terminate cleanly. */ + run = 5; + while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1) + printf("Waiting for librdkafka to decommission\n"); + if (run <= 0) + rd_kafka_dump(stdout, rk); - return 0; + return 0; } diff --git a/examples/rdkafka_example.cpp b/examples/rdkafka_example.cpp index f570e808d2..91c3440b3d 100644 --- a/examples/rdkafka_example.cpp +++ b/examples/rdkafka_example.cpp @@ -3,24 +3,24 @@ * * Copyright (c) 2014, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -54,28 +54,26 @@ #include "rdkafkacpp.h" -static void metadata_print (const std::string &topic, - const RdKafka::Metadata *metadata) { +static void metadata_print(const std::string &topic, + const RdKafka::Metadata *metadata) { std::cout << "Metadata for " << (topic.empty() ? "" : "all topics") - << "(from broker " << metadata->orig_broker_id() - << ":" << metadata->orig_broker_name() << std::endl; + << "(from broker " << metadata->orig_broker_id() << ":" + << metadata->orig_broker_name() << std::endl; /* Iterate brokers */ std::cout << " " << metadata->brokers()->size() << " brokers:" << std::endl; RdKafka::Metadata::BrokerMetadataIterator ib; - for (ib = metadata->brokers()->begin(); - ib != metadata->brokers()->end(); + for (ib = metadata->brokers()->begin(); ib != metadata->brokers()->end(); ++ib) { - std::cout << " broker " << (*ib)->id() << " at " - << (*ib)->host() << ":" << (*ib)->port() << std::endl; + std::cout << " broker " << (*ib)->id() << " at " << (*ib)->host() << ":" + << (*ib)->port() << std::endl; } /* Iterate topics */ std::cout << metadata->topics()->size() << " topics:" << std::endl; RdKafka::Metadata::TopicMetadataIterator it; - for (it = metadata->topics()->begin(); - it != metadata->topics()->end(); + for (it = metadata->topics()->begin(); it != metadata->topics()->end(); ++it) { - std::cout << " topic \""<< (*it)->topic() << "\" with " + std::cout << " topic \"" << (*it)->topic() << "\" with " << (*it)->partitions()->size() << " partitions:"; if ((*it)->err() != RdKafka::ERR_NO_ERROR) { @@ -87,26 +85,23 @@ static void metadata_print (const std::string &topic, /* Iterate topic's partitions */ RdKafka::TopicMetadata::PartitionMetadataIterator ip; - for (ip = (*it)->partitions()->begin(); - ip != (*it)->partitions()->end(); + for (ip = (*it)->partitions()->begin(); ip != (*it)->partitions()->end(); ++ip) { - std::cout << " partition " << (*ip)->id() - << ", leader " << (*ip)->leader() - << ", replicas: "; + std::cout << " partition " << (*ip)->id() << ", leader " + << (*ip)->leader() << ", replicas: "; /* Iterate partition's replicas */ RdKafka::PartitionMetadata::ReplicasIterator ir; - for (ir = (*ip)->replicas()->begin(); - ir != (*ip)->replicas()->end(); + for (ir = (*ip)->replicas()->begin(); ir != (*ip)->replicas()->end(); ++ir) { - std::cout << (ir == (*ip)->replicas()->begin() ? "":",") << *ir; + std::cout << (ir == (*ip)->replicas()->begin() ? "" : ",") << *ir; } /* Iterate partition's ISRs */ std::cout << ", isrs: "; RdKafka::PartitionMetadata::ISRSIterator iis; - for (iis = (*ip)->isrs()->begin(); iis != (*ip)->isrs()->end() ; ++iis) - std::cout << (iis == (*ip)->isrs()->begin() ? "":",") << *iis; + for (iis = (*ip)->isrs()->begin(); iis != (*ip)->isrs()->end(); ++iis) + std::cout << (iis == (*ip)->isrs()->begin() ? "" : ",") << *iis; if ((*ip)->err() != RdKafka::ERR_NO_ERROR) std::cout << ", " << RdKafka::err2str((*ip)->err()) << std::endl; @@ -117,34 +112,34 @@ static void metadata_print (const std::string &topic, } static volatile sig_atomic_t run = 1; -static bool exit_eof = false; +static bool exit_eof = false; -static void sigterm (int sig) { +static void sigterm(int sig) { run = 0; } class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb { public: - void dr_cb (RdKafka::Message &message) { + void dr_cb(RdKafka::Message &message) { std::string status_name; - switch (message.status()) - { - case RdKafka::Message::MSG_STATUS_NOT_PERSISTED: - status_name = "NotPersisted"; - break; - case RdKafka::Message::MSG_STATUS_POSSIBLY_PERSISTED: - status_name = "PossiblyPersisted"; - break; - case RdKafka::Message::MSG_STATUS_PERSISTED: - status_name = "Persisted"; - break; - default: - status_name = "Unknown?"; - break; - } - std::cout << "Message delivery for (" << message.len() << " bytes): " << - status_name << ": " << message.errstr() << std::endl; + switch (message.status()) { + case RdKafka::Message::MSG_STATUS_NOT_PERSISTED: + status_name = "NotPersisted"; + break; + case RdKafka::Message::MSG_STATUS_POSSIBLY_PERSISTED: + status_name = "PossiblyPersisted"; + break; + case RdKafka::Message::MSG_STATUS_PERSISTED: + status_name = "Persisted"; + break; + default: + status_name = "Unknown?"; + break; + } + std::cout << "Message delivery for (" << message.len() + << " bytes): " << status_name << ": " << message.errstr() + << std::endl; if (message.key()) std::cout << "Key: " << *(message.key()) << ";" << std::endl; } @@ -153,32 +148,31 @@ class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb { class ExampleEventCb : public RdKafka::EventCb { public: - void event_cb (RdKafka::Event &event) { - switch (event.type()) - { - case RdKafka::Event::EVENT_ERROR: - if (event.fatal()) { - std::cerr << "FATAL "; - run = 0; - } - std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " << - event.str() << std::endl; - break; + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_ERROR: + if (event.fatal()) { + std::cerr << "FATAL "; + run = 0; + } + std::cerr << "ERROR (" << RdKafka::err2str(event.err()) + << "): " << event.str() << std::endl; + break; - case RdKafka::Event::EVENT_STATS: - std::cerr << "\"STATS\": " << event.str() << std::endl; - break; + case RdKafka::Event::EVENT_STATS: + std::cerr << "\"STATS\": " << event.str() << std::endl; + break; - case RdKafka::Event::EVENT_LOG: - fprintf(stderr, "LOG-%i-%s: %s\n", - event.severity(), event.fac().c_str(), event.str().c_str()); - break; + case RdKafka::Event::EVENT_LOG: + fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(), + event.str().c_str()); + break; - default: - std::cerr << "EVENT " << event.type() << - " (" << RdKafka::err2str(event.err()) << "): " << - event.str() << std::endl; - break; + default: + std::cerr << "EVENT " << event.type() << " (" + << RdKafka::err2str(event.err()) << "): " << event.str() + << std::endl; + break; } } }; @@ -188,91 +182,91 @@ class ExampleEventCb : public RdKafka::EventCb { * in the produce() call. */ class MyHashPartitionerCb : public RdKafka::PartitionerCb { public: - int32_t partitioner_cb (const RdKafka::Topic *topic, const std::string *key, - int32_t partition_cnt, void *msg_opaque) { + int32_t partitioner_cb(const RdKafka::Topic *topic, + const std::string *key, + int32_t partition_cnt, + void *msg_opaque) { return djb_hash(key->c_str(), key->size()) % partition_cnt; } - private: - static inline unsigned int djb_hash (const char *str, size_t len) { + private: + static inline unsigned int djb_hash(const char *str, size_t len) { unsigned int hash = 5381; - for (size_t i = 0 ; i < len ; i++) + for (size_t i = 0; i < len; i++) hash = ((hash << 5) + hash) + str[i]; return hash; } }; -void msg_consume(RdKafka::Message* message, void* opaque) { +void msg_consume(RdKafka::Message *message, void *opaque) { const RdKafka::Headers *headers; switch (message->err()) { - case RdKafka::ERR__TIMED_OUT: - break; - - case RdKafka::ERR_NO_ERROR: - /* Real message */ - std::cout << "Read msg at offset " << message->offset() << std::endl; - if (message->key()) { - std::cout << "Key: " << *message->key() << std::endl; - } - headers = message->headers(); - if (headers) { - std::vector hdrs = headers->get_all(); - for (size_t i = 0 ; i < hdrs.size() ; i++) { - const RdKafka::Headers::Header hdr = hdrs[i]; - - if (hdr.value() != NULL) - printf(" Header: %s = \"%.*s\"\n", - hdr.key().c_str(), - (int)hdr.value_size(), (const char *)hdr.value()); - else - printf(" Header: %s = NULL\n", hdr.key().c_str()); - } - } - printf("%.*s\n", - static_cast(message->len()), - static_cast(message->payload())); - break; - - case RdKafka::ERR__PARTITION_EOF: - /* Last message */ - if (exit_eof) { - run = 0; + case RdKafka::ERR__TIMED_OUT: + break; + + case RdKafka::ERR_NO_ERROR: + /* Real message */ + std::cout << "Read msg at offset " << message->offset() << std::endl; + if (message->key()) { + std::cout << "Key: " << *message->key() << std::endl; + } + headers = message->headers(); + if (headers) { + std::vector hdrs = headers->get_all(); + for (size_t i = 0; i < hdrs.size(); i++) { + const RdKafka::Headers::Header hdr = hdrs[i]; + + if (hdr.value() != NULL) + printf(" Header: %s = \"%.*s\"\n", hdr.key().c_str(), + (int)hdr.value_size(), (const char *)hdr.value()); + else + printf(" Header: %s = NULL\n", hdr.key().c_str()); } - break; - - case RdKafka::ERR__UNKNOWN_TOPIC: - case RdKafka::ERR__UNKNOWN_PARTITION: - std::cerr << "Consume failed: " << message->errstr() << std::endl; - run = 0; - break; + } + printf("%.*s\n", static_cast(message->len()), + static_cast(message->payload())); + break; - default: - /* Errors */ - std::cerr << "Consume failed: " << message->errstr() << std::endl; + case RdKafka::ERR__PARTITION_EOF: + /* Last message */ + if (exit_eof) { run = 0; + } + break; + + case RdKafka::ERR__UNKNOWN_TOPIC: + case RdKafka::ERR__UNKNOWN_PARTITION: + std::cerr << "Consume failed: " << message->errstr() << std::endl; + run = 0; + break; + + default: + /* Errors */ + std::cerr << "Consume failed: " << message->errstr() << std::endl; + run = 0; } } class ExampleConsumeCb : public RdKafka::ConsumeCb { public: - void consume_cb (RdKafka::Message &msg, void *opaque) { + void consume_cb(RdKafka::Message &msg, void *opaque) { msg_consume(&msg, opaque); } }; -int main (int argc, char **argv) { +int main(int argc, char **argv) { std::string brokers = "localhost"; std::string errstr; std::string topic_str; std::string mode; std::string debug; - int32_t partition = RdKafka::Topic::PARTITION_UA; + int32_t partition = RdKafka::Topic::PARTITION_UA; int64_t start_offset = RdKafka::Topic::OFFSET_BEGINNING; - bool do_conf_dump = false; + bool do_conf_dump = false; int opt; MyHashPartitionerCb hash_partitioner; int use_ccb = 0; @@ -280,7 +274,7 @@ int main (int argc, char **argv) { /* * Create configuration objects */ - RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); @@ -311,20 +305,20 @@ int main (int argc, char **argv) { break; case 'z': if (conf->set("compression.codec", optarg, errstr) != - RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); } break; case 'o': if (!strcmp(optarg, "end")) - start_offset = RdKafka::Topic::OFFSET_END; + start_offset = RdKafka::Topic::OFFSET_END; else if (!strcmp(optarg, "beginning")) - start_offset = RdKafka::Topic::OFFSET_BEGINNING; + start_offset = RdKafka::Topic::OFFSET_BEGINNING; else if (!strcmp(optarg, "stored")) - start_offset = RdKafka::Topic::OFFSET_STORED; + start_offset = RdKafka::Topic::OFFSET_STORED; else - start_offset = strtoll(optarg, NULL, 10); + start_offset = strtoll(optarg, NULL, 10); break; case 'e': exit_eof = true; @@ -339,49 +333,46 @@ int main (int argc, char **argv) { exit(1); } break; - case 'X': - { - char *name, *val; - - if (!strcmp(optarg, "dump")) { - do_conf_dump = true; - continue; - } - - name = optarg; - if (!(val = strchr(name, '='))) { - std::cerr << "%% Expected -X property=value, not " << - name << std::endl; - exit(1); - } - - *val = '\0'; - val++; - - /* Try "topic." prefixed properties on topic - * conf first, and then fall through to global if - * it didnt match a topic configuration property. */ - RdKafka::Conf::ConfResult res; - if (!strncmp(name, "topic.", strlen("topic."))) - res = tconf->set(name+strlen("topic."), val, errstr); - else - res = conf->set(name, val, errstr); + case 'X': { + char *name, *val; - if (res != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } + if (!strcmp(optarg, "dump")) { + do_conf_dump = true; + continue; } - break; - case 'f': - if (!strcmp(optarg, "ccb")) - use_ccb = 1; - else { - std::cerr << "Unknown option: " << optarg << std::endl; - exit(1); - } - break; + name = optarg; + if (!(val = strchr(name, '='))) { + std::cerr << "%% Expected -X property=value, not " << name << std::endl; + exit(1); + } + + *val = '\0'; + val++; + + /* Try "topic." prefixed properties on topic + * conf first, and then fall through to global if + * it didnt match a topic configuration property. */ + RdKafka::Conf::ConfResult res; + if (!strncmp(name, "topic.", strlen("topic."))) + res = tconf->set(name + strlen("topic."), val, errstr); + else + res = conf->set(name, val, errstr); + + if (res != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + } break; + + case 'f': + if (!strcmp(optarg, "ccb")) + use_ccb = 1; + else { + std::cerr << "Unknown option: " << optarg << std::endl; + exit(1); + } + break; default: goto usage; @@ -390,8 +381,8 @@ int main (int argc, char **argv) { if (mode.empty() || (topic_str.empty() && mode != "L") || optind != argc) { usage: - std::string features; - conf->get("builtin.features", features); + std::string features; + conf->get("builtin.features", features); fprintf(stderr, "Usage: %s [-C|-P] -t " "[-p ] [-b ]\n" @@ -430,11 +421,9 @@ int main (int argc, char **argv) { "\n" "\n" "\n", - argv[0], - RdKafka::version_str().c_str(), RdKafka::version(), - features.c_str(), - RdKafka::get_debug_contexts().c_str()); - exit(1); + argv[0], RdKafka::version_str().c_str(), RdKafka::version(), + features.c_str(), RdKafka::get_debug_contexts().c_str()); + exit(1); } @@ -456,7 +445,7 @@ int main (int argc, char **argv) { if (do_conf_dump) { int pass; - for (pass = 0 ; pass < 2 ; pass++) { + for (pass = 0; pass < 2; pass++) { std::list *dump; if (pass == 0) { dump = conf->dump(); @@ -467,7 +456,7 @@ int main (int argc, char **argv) { } for (std::list::iterator it = dump->begin(); - it != dump->end(); ) { + it != dump->end();) { std::cout << *it << " = "; it++; std::cout << *it << std::endl; @@ -487,7 +476,7 @@ int main (int argc, char **argv) { * Producer mode */ - if(topic_str.empty()) + if (topic_str.empty()) goto usage; ExampleDeliveryReportCb ex_dr_cb; @@ -515,7 +504,7 @@ int main (int argc, char **argv) { for (std::string line; run && std::getline(std::cin, line);) { if (line.empty()) { producer->poll(0); - continue; + continue; } RdKafka::Headers *headers = RdKafka::Headers::create(); @@ -526,27 +515,27 @@ int main (int argc, char **argv) { * Produce message */ RdKafka::ErrorCode resp = - producer->produce(topic_str, partition, - RdKafka::Producer::RK_MSG_COPY /* Copy payload */, - /* Value */ - const_cast(line.c_str()), line.size(), - /* Key */ - NULL, 0, - /* Timestamp (defaults to now) */ - 0, - /* Message headers, if any */ - headers, - /* Per-message opaque value passed to - * delivery report */ - NULL); + producer->produce(topic_str, partition, + RdKafka::Producer::RK_MSG_COPY /* Copy payload */, + /* Value */ + const_cast(line.c_str()), line.size(), + /* Key */ + NULL, 0, + /* Timestamp (defaults to now) */ + 0, + /* Message headers, if any */ + headers, + /* Per-message opaque value passed to + * delivery report */ + NULL); if (resp != RdKafka::ERR_NO_ERROR) { - std::cerr << "% Produce failed: " << - RdKafka::err2str(resp) << std::endl; + std::cerr << "% Produce failed: " << RdKafka::err2str(resp) + << std::endl; delete headers; /* Headers are automatically deleted on produce() * success. */ } else { - std::cerr << "% Produced message (" << line.size() << " bytes)" << - std::endl; + std::cerr << "% Produced message (" << line.size() << " bytes)" + << std::endl; } producer->poll(0); @@ -568,7 +557,7 @@ int main (int argc, char **argv) { conf->set("enable.partition.eof", "true", errstr); - if(topic_str.empty()) + if (topic_str.empty()) goto usage; /* @@ -585,8 +574,8 @@ int main (int argc, char **argv) { /* * Create topic handle. */ - RdKafka::Topic *topic = RdKafka::Topic::create(consumer, topic_str, - tconf, errstr); + RdKafka::Topic *topic = + RdKafka::Topic::create(consumer, topic_str, tconf, errstr); if (!topic) { std::cerr << "Failed to create topic: " << errstr << std::endl; exit(1); @@ -597,8 +586,8 @@ int main (int argc, char **argv) { */ RdKafka::ErrorCode resp = consumer->start(topic, partition, start_offset); if (resp != RdKafka::ERR_NO_ERROR) { - std::cerr << "Failed to start consumer: " << - RdKafka::err2str(resp) << std::endl; + std::cerr << "Failed to start consumer: " << RdKafka::err2str(resp) + << std::endl; exit(1); } @@ -609,8 +598,8 @@ int main (int argc, char **argv) { */ while (run) { if (use_ccb) { - consumer->consume_callback(topic, partition, 1000, - &ex_consume_cb, &use_ccb); + consumer->consume_callback(topic, partition, 1000, &ex_consume_cb, + &use_ccb); } else { RdKafka::Message *msg = consumer->consume(topic, partition, 1000); msg_consume(msg, NULL); @@ -646,7 +635,7 @@ int main (int argc, char **argv) { * Create topic handle. */ RdKafka::Topic *topic = NULL; - if(!topic_str.empty()) { + if (!topic_str.empty()) { topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr); if (!topic) { std::cerr << "Failed to create topic: " << errstr << std::endl; @@ -658,13 +647,13 @@ int main (int argc, char **argv) { class RdKafka::Metadata *metadata; /* Fetch metadata */ - RdKafka::ErrorCode err = producer->metadata(!topic, topic, - &metadata, 5000); + RdKafka::ErrorCode err = + producer->metadata(!topic, topic, &metadata, 5000); if (err != RdKafka::ERR_NO_ERROR) { - std::cerr << "%% Failed to acquire metadata: " - << RdKafka::err2str(err) << std::endl; - run = 0; - break; + std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err) + << std::endl; + run = 0; + break; } metadata_print(topic_str, metadata); @@ -672,7 +661,6 @@ int main (int argc, char **argv) { delete metadata; run = 0; } - } delete conf; diff --git a/examples/rdkafka_performance.c b/examples/rdkafka_performance.c index e925a54580..c4ba0274b5 100644 --- a/examples/rdkafka_performance.c +++ b/examples/rdkafka_performance.c @@ -33,7 +33,7 @@ */ #ifdef _MSC_VER -#define _CRT_SECURE_NO_WARNINGS /* Silence nonsense on MSVC */ +#define _CRT_SECURE_NO_WARNINGS /* Silence nonsense on MSVC */ #endif #include "../src/rd.h" @@ -46,7 +46,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /* Do not include these defines from your program, they will not be * provided by librdkafka. */ #include "rd.h" @@ -59,117 +59,119 @@ static volatile sig_atomic_t run = 1; -static int forever = 1; -static rd_ts_t dispintvl = 1000; -static int do_seq = 0; -static int exit_after = 0; -static int exit_eof = 0; +static int forever = 1; +static rd_ts_t dispintvl = 1000; +static int do_seq = 0; +static int exit_after = 0; +static int exit_eof = 0; static FILE *stats_fp; static int dr_disp_div; -static int verbosity = 1; -static int latency_mode = 0; -static FILE *latency_fp = NULL; -static int msgcnt = -1; +static int verbosity = 1; +static int latency_mode = 0; +static FILE *latency_fp = NULL; +static int msgcnt = -1; static int incremental_mode = 0; -static int partition_cnt = 0; -static int eof_cnt = 0; -static int with_dr = 1; -static int read_hdrs = 0; +static int partition_cnt = 0; +static int eof_cnt = 0; +static int with_dr = 1; +static int read_hdrs = 0; -static void stop (int sig) { +static void stop(int sig) { if (!run) exit(0); - run = 0; + run = 0; } -static long int msgs_wait_cnt = 0; +static long int msgs_wait_cnt = 0; static long int msgs_wait_produce_cnt = 0; static rd_ts_t t_end; static rd_kafka_t *global_rk; struct avg { - int64_t val; - int cnt; + int64_t val; + int cnt; uint64_t ts_start; }; static struct { - rd_ts_t t_start; - rd_ts_t t_end; - rd_ts_t t_end_send; - uint64_t msgs; - uint64_t msgs_last; + rd_ts_t t_start; + rd_ts_t t_end; + rd_ts_t t_end_send; + uint64_t msgs; + uint64_t msgs_last; uint64_t msgs_dr_ok; uint64_t msgs_dr_err; uint64_t bytes_dr_ok; - uint64_t bytes; - uint64_t bytes_last; - uint64_t tx; - uint64_t tx_err; + uint64_t bytes; + uint64_t bytes_last; + uint64_t tx; + uint64_t tx_err; uint64_t avg_rtt; uint64_t offset; - rd_ts_t t_fetch_latency; - rd_ts_t t_last; - rd_ts_t t_enobufs_last; - rd_ts_t t_total; - rd_ts_t latency_last; - rd_ts_t latency_lo; - rd_ts_t latency_hi; - rd_ts_t latency_sum; - int latency_cnt; - int64_t last_offset; + rd_ts_t t_fetch_latency; + rd_ts_t t_last; + rd_ts_t t_enobufs_last; + rd_ts_t t_total; + rd_ts_t latency_last; + rd_ts_t latency_lo; + rd_ts_t latency_hi; + rd_ts_t latency_sum; + int latency_cnt; + int64_t last_offset; } cnt; -uint64_t wall_clock (void) { +uint64_t wall_clock(void) { struct timeval tv; gettimeofday(&tv, NULL); - return ((uint64_t)tv.tv_sec * 1000000LLU) + - ((uint64_t)tv.tv_usec); + return ((uint64_t)tv.tv_sec * 1000000LLU) + ((uint64_t)tv.tv_usec); } -static void err_cb (rd_kafka_t *rk, int err, const char *reason, void *opaque) { +static void err_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) { if (err == RD_KAFKA_RESP_ERR__FATAL) { char errstr[512]; err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr)); printf("%% FATAL ERROR CALLBACK: %s: %s: %s\n", rd_kafka_name(rk), rd_kafka_err2str(err), errstr); } else { - printf("%% ERROR CALLBACK: %s: %s: %s\n", - rd_kafka_name(rk), rd_kafka_err2str(err), reason); + printf("%% ERROR CALLBACK: %s: %s: %s\n", rd_kafka_name(rk), + rd_kafka_err2str(err), reason); } } -static void throttle_cb (rd_kafka_t *rk, const char *broker_name, - int32_t broker_id, int throttle_time_ms, - void *opaque) { - printf("%% THROTTLED %dms by %s (%"PRId32")\n", throttle_time_ms, - broker_name, broker_id); +static void throttle_cb(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque) { + printf("%% THROTTLED %dms by %s (%" PRId32 ")\n", throttle_time_ms, + broker_name, broker_id); } -static void offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque) { +static void offset_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque) { int i; if (err || verbosity >= 2) printf("%% Offset commit of %d partition(s): %s\n", offsets->cnt, rd_kafka_err2str(err)); - for (i = 0 ; i < offsets->cnt ; i++) { + for (i = 0; i < offsets->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &offsets->elems[i]; if (rktpar->err || verbosity >= 2) - printf("%% %s [%"PRId32"] @ %"PRId64": %s\n", - rktpar->topic, rktpar->partition, - rktpar->offset, rd_kafka_err2str(err)); + printf("%% %s [%" PRId32 "] @ %" PRId64 ": %s\n", + rktpar->topic, rktpar->partition, rktpar->offset, + rd_kafka_err2str(err)); } } /** * @brief Add latency measurement */ -static void latency_add (int64_t ts, const char *who) { +static void latency_add(int64_t ts, const char *who) { if (ts > cnt.latency_hi) cnt.latency_hi = ts; if (!cnt.latency_lo || ts < cnt.latency_lo) @@ -178,21 +180,22 @@ static void latency_add (int64_t ts, const char *who) { cnt.latency_cnt++; cnt.latency_sum += ts; if (latency_fp) - fprintf(latency_fp, "%"PRIu64"\n", ts); + fprintf(latency_fp, "%" PRIu64 "\n", ts); } -static void msg_delivered (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { - static rd_ts_t last; - rd_ts_t now = rd_clock(); - static int msgs; +static void msg_delivered(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { + static rd_ts_t last; + rd_ts_t now = rd_clock(); + static int msgs; msgs++; - msgs_wait_cnt--; + msgs_wait_cnt--; - if (rkmessage->err) + if (rkmessage->err) cnt.msgs_dr_err++; else { cnt.msgs_dr_ok++; @@ -202,107 +205,107 @@ static void msg_delivered (rd_kafka_t *rk, if (latency_mode) { /* Extract latency */ int64_t source_ts; - if (sscanf(rkmessage->payload, "LATENCY:%"SCNd64, + if (sscanf(rkmessage->payload, "LATENCY:%" SCNd64, &source_ts) == 1) latency_add(wall_clock() - source_ts, "producer"); } - if ((rkmessage->err && - (cnt.msgs_dr_err < 50 || - !(cnt.msgs_dr_err % (dispintvl / 1000)))) || - !last || msgs_wait_cnt < 5 || - !(msgs_wait_cnt % dr_disp_div) || - (now - last) >= dispintvl * 1000 || - verbosity >= 3) { - if (rkmessage->err && verbosity >= 2) - printf("%% Message delivery failed: %s [%"PRId32"]: " - "%s (%li remain)\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rd_kafka_err2str(rkmessage->err), - msgs_wait_cnt); - else if (verbosity > 2) - printf("%% Message delivered (offset %"PRId64"): " + if ((rkmessage->err && (cnt.msgs_dr_err < 50 || + !(cnt.msgs_dr_err % (dispintvl / 1000)))) || + !last || msgs_wait_cnt < 5 || !(msgs_wait_cnt % dr_disp_div) || + (now - last) >= dispintvl * 1000 || verbosity >= 3) { + if (rkmessage->err && verbosity >= 2) + printf("%% Message delivery failed: %s [%" PRId32 + "]: " + "%s (%li remain)\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, + rd_kafka_err2str(rkmessage->err), msgs_wait_cnt); + else if (verbosity > 2) + printf("%% Message delivered (offset %" PRId64 + "): " "%li remain\n", rkmessage->offset, msgs_wait_cnt); - if (verbosity >= 3 && do_seq) - printf(" --> \"%.*s\"\n", - (int)rkmessage->len, + if (verbosity >= 3 && do_seq) + printf(" --> \"%.*s\"\n", (int)rkmessage->len, (const char *)rkmessage->payload); - last = now; - } + last = now; + } cnt.last_offset = rkmessage->offset; - if (msgs_wait_produce_cnt == 0 && msgs_wait_cnt == 0 && !forever) { + if (msgs_wait_produce_cnt == 0 && msgs_wait_cnt == 0 && !forever) { if (verbosity >= 2 && cnt.msgs > 0) { double error_percent = - (double)(cnt.msgs - cnt.msgs_dr_ok) / - cnt.msgs * 100; - printf("%% Messages delivered with failure " - "percentage of %.5f%%\n", error_percent); + (double)(cnt.msgs - cnt.msgs_dr_ok) / cnt.msgs * + 100; + printf( + "%% Messages delivered with failure " + "percentage of %.5f%%\n", + error_percent); } - t_end = rd_clock(); - run = 0; - } - - if (exit_after && exit_after <= msgs) { - printf("%% Hard exit after %i messages, as requested\n", - exit_after); - exit(0); - } + t_end = rd_clock(); + run = 0; + } + + if (exit_after && exit_after <= msgs) { + printf("%% Hard exit after %i messages, as requested\n", + exit_after); + exit(0); + } } -static void msg_consume (rd_kafka_message_t *rkmessage, void *opaque) { +static void msg_consume(rd_kafka_message_t *rkmessage, void *opaque) { - if (rkmessage->err) { - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + if (rkmessage->err) { + if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { cnt.offset = rkmessage->offset; if (verbosity >= 1) - printf("%% Consumer reached end of " - "%s [%"PRId32"] " - "message queue at offset %"PRId64"\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, rkmessage->offset); - - if (exit_eof && ++eof_cnt == partition_cnt) - run = 0; - - return; - } + printf( + "%% Consumer reached end of " + "%s [%" PRId32 + "] " + "message queue at offset %" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset); + + if (exit_eof && ++eof_cnt == partition_cnt) + run = 0; + + return; + } - printf("%% Consume error for topic \"%s\" [%"PRId32"] " - "offset %"PRId64": %s\n", - rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt):"", - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_errstr(rkmessage)); + printf("%% Consume error for topic \"%s\" [%" PRId32 + "] " + "offset %" PRId64 ": %s\n", + rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt) + : "", + rkmessage->partition, rkmessage->offset, + rd_kafka_message_errstr(rkmessage)); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) run = 0; cnt.msgs_dr_err++; - return; - } + return; + } - /* Start measuring from first message received */ - if (!cnt.t_start) - cnt.t_start = cnt.t_last = rd_clock(); + /* Start measuring from first message received */ + if (!cnt.t_start) + cnt.t_start = cnt.t_last = rd_clock(); cnt.offset = rkmessage->offset; - cnt.msgs++; - cnt.bytes += rkmessage->len; + cnt.msgs++; + cnt.bytes += rkmessage->len; - if (verbosity >= 3 || - (verbosity >= 2 && !(cnt.msgs % 1000000))) - printf("@%"PRId64": %.*s: %.*s\n", - rkmessage->offset, + if (verbosity >= 3 || (verbosity >= 2 && !(cnt.msgs % 1000000))) + printf("@%" PRId64 ": %.*s: %.*s\n", rkmessage->offset, (int)rkmessage->key_len, (char *)rkmessage->key, - (int)rkmessage->len, (char *)rkmessage->payload); + (int)rkmessage->len, (char *)rkmessage->payload); if (latency_mode) { @@ -310,21 +313,23 @@ static void msg_consume (rd_kafka_message_t *rkmessage, void *opaque) { if (rkmessage->len > 8 && !memcmp(rkmessage->payload, "LATENCY:", 8) && - sscanf(rkmessage->payload, "LATENCY:%"SCNd64, + sscanf(rkmessage->payload, "LATENCY:%" SCNd64, &remote_ts) == 1) { ts = wall_clock() - remote_ts; if (ts > 0 && ts < (1000000 * 60 * 5)) { latency_add(ts, "consumer"); } else { if (verbosity >= 1) - printf("Received latency timestamp is too far off: %"PRId64"us (message offset %"PRId64"): ignored\n", - ts, rkmessage->offset); + printf( + "Received latency timestamp is too " + "far off: %" PRId64 + "us (message offset %" PRId64 + "): ignored\n", + ts, rkmessage->offset); } } else if (verbosity > 1) printf("not a LATENCY payload: %.*s\n", - (int)rkmessage->len, - (char *)rkmessage->payload); - + (int)rkmessage->len, (char *)rkmessage->payload); } if (read_hdrs) { @@ -338,23 +343,22 @@ static void msg_consume (rd_kafka_message_t *rkmessage, void *opaque) { } -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque) { - rd_kafka_error_t *error = NULL; +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque) { + rd_kafka_error_t *error = NULL; rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR; - if (exit_eof && - !strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) - fprintf(stderr, "%% This example has not been modified to " + if (exit_eof && !strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) + fprintf(stderr, + "%% This example has not been modified to " "support -e (exit on EOF) when " "partition.assignment.strategy " "is set to an incremental/cooperative strategy: " "-e will not behave as expected\n"); - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: fprintf(stderr, "%% Group rebalanced (%s): " @@ -380,7 +384,7 @@ static void rebalance_cb (rd_kafka_t *rk, error = rd_kafka_incremental_unassign(rk, partitions); partition_cnt -= partitions->cnt; } else { - ret_err = rd_kafka_assign(rk, NULL); + ret_err = rd_kafka_assign(rk, NULL); partition_cnt = 0; } @@ -407,8 +411,10 @@ static void rebalance_cb (rd_kafka_t *rk, * First find 'field1', then find 'field2' and extract its value. * Returns 0 on miss else the value. */ -static uint64_t json_parse_fields (const char *json, const char **end, - const char *field1, const char *field2) { +static uint64_t json_parse_fields(const char *json, + const char **end, + const char *field1, + const char *field2) { const char *t = json; const char *t2; int len1 = (int)strlen(field1); @@ -443,21 +449,20 @@ static uint64_t json_parse_fields (const char *json, const char **end, /** * Parse various values from rdkafka stats */ -static void json_parse_stats (const char *json) { +static void json_parse_stats(const char *json) { const char *t; #define MAX_AVGS 100 /* max number of brokers to scan for rtt */ - uint64_t avg_rtt[MAX_AVGS+1]; - int avg_rtt_i = 0; + uint64_t avg_rtt[MAX_AVGS + 1]; + int avg_rtt_i = 0; /* Store totals at end of array */ - avg_rtt[MAX_AVGS] = 0; + avg_rtt[MAX_AVGS] = 0; /* Extract all broker RTTs */ t = json; while (avg_rtt_i < MAX_AVGS && *t) { - avg_rtt[avg_rtt_i] = json_parse_fields(t, &t, - "\"rtt\":", - "\"avg\":"); + avg_rtt[avg_rtt_i] = + json_parse_fields(t, &t, "\"rtt\":", "\"avg\":"); /* Skip low RTT values, means no messages are passing */ if (avg_rtt[avg_rtt_i] < 100 /*0.1ms*/) @@ -475,62 +480,63 @@ static void json_parse_stats (const char *json) { } -static int stats_cb (rd_kafka_t *rk, char *json, size_t json_len, - void *opaque) { +static int stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) { /* Extract values for our own stats */ json_parse_stats(json); if (stats_fp) fprintf(stats_fp, "%s\n", json); - return 0; + return 0; } -#define _OTYPE_TAB 0x1 /* tabular format */ -#define _OTYPE_SUMMARY 0x2 /* summary format */ -#define _OTYPE_FORCE 0x4 /* force output regardless of interval timing */ -static void print_stats (rd_kafka_t *rk, - int mode, int otype, const char *compression) { - rd_ts_t now = rd_clock(); - rd_ts_t t_total; +#define _OTYPE_TAB 0x1 /* tabular format */ +#define _OTYPE_SUMMARY 0x2 /* summary format */ +#define _OTYPE_FORCE 0x4 /* force output regardless of interval timing */ +static void +print_stats(rd_kafka_t *rk, int mode, int otype, const char *compression) { + rd_ts_t now = rd_clock(); + rd_ts_t t_total; static int rows_written = 0; int print_header; double latency_avg = 0.0f; char extra[512]; int extra_of = 0; - *extra = '\0'; + *extra = '\0'; - if (!(otype & _OTYPE_FORCE) && + if (!(otype & _OTYPE_FORCE) && (((otype & _OTYPE_SUMMARY) && verbosity == 0) || cnt.t_last + dispintvl > now)) - return; + return; - print_header = !rows_written ||(verbosity > 0 && !(rows_written % 20)); + print_header = !rows_written || (verbosity > 0 && !(rows_written % 20)); - if (cnt.t_end_send) - t_total = cnt.t_end_send - cnt.t_start; - else if (cnt.t_end) - t_total = cnt.t_end - cnt.t_start; - else if (cnt.t_start) - t_total = now - cnt.t_start; - else - t_total = 1; + if (cnt.t_end_send) + t_total = cnt.t_end_send - cnt.t_start; + else if (cnt.t_end) + t_total = cnt.t_end - cnt.t_start; + else if (cnt.t_start) + t_total = now - cnt.t_start; + else + t_total = 1; if (latency_mode && cnt.latency_cnt) - latency_avg = (double)cnt.latency_sum / - (double)cnt.latency_cnt; + latency_avg = (double)cnt.latency_sum / (double)cnt.latency_cnt; if (mode == 'P') { if (otype & _OTYPE_TAB) { -#define ROW_START() do {} while (0) -#define COL_HDR(NAME) printf("| %10.10s ", (NAME)) -#define COL_PR64(NAME,VAL) printf("| %10"PRIu64" ", (VAL)) -#define COL_PRF(NAME,VAL) printf("| %10.2f ", (VAL)) -#define ROW_END() do { \ - printf("\n"); \ - rows_written++; \ - } while (0) +#define ROW_START() \ + do { \ + } while (0) +#define COL_HDR(NAME) printf("| %10.10s ", (NAME)) +#define COL_PR64(NAME, VAL) printf("| %10" PRIu64 " ", (VAL)) +#define COL_PRF(NAME, VAL) printf("| %10.2f ", (VAL)) +#define ROW_END() \ + do { \ + printf("\n"); \ + rows_written++; \ + } while (0) if (print_header) { /* First time, print header */ @@ -581,21 +587,25 @@ static void print_stats (rd_kafka_t *rk, } if (otype & _OTYPE_SUMMARY) { - printf("%% %"PRIu64" messages produced " - "(%"PRIu64" bytes), " - "%"PRIu64" delivered " - "(offset %"PRId64", %"PRIu64" failed) " - "in %"PRIu64"ms: %"PRIu64" msgs/s and " + printf("%% %" PRIu64 + " messages produced " + "(%" PRIu64 + " bytes), " + "%" PRIu64 + " delivered " + "(offset %" PRId64 ", %" PRIu64 + " failed) " + "in %" PRIu64 "ms: %" PRIu64 + " msgs/s and " "%.02f MB/s, " - "%"PRIu64" produce failures, %i in queue, " + "%" PRIu64 + " produce failures, %i in queue, " "%s compression\n", - cnt.msgs, cnt.bytes, - cnt.msgs_dr_ok, cnt.last_offset, cnt.msgs_dr_err, - t_total / 1000, + cnt.msgs, cnt.bytes, cnt.msgs_dr_ok, + cnt.last_offset, cnt.msgs_dr_err, t_total / 1000, ((cnt.msgs_dr_ok * 1000000) / t_total), (float)((cnt.bytes_dr_ok) / (float)t_total), - cnt.tx_err, - rk ? rd_kafka_outq_len(rk) : 0, + cnt.tx_err, rk ? rd_kafka_outq_len(rk) : 0, compression); } @@ -627,10 +637,8 @@ static void print_stats (rd_kafka_t *rk, COL_PR64("msgs", cnt.msgs); COL_PR64("bytes", cnt.bytes); COL_PR64("rtt", cnt.avg_rtt / 1000); - COL_PR64("m/s", - ((cnt.msgs * 1000000) / t_total)); - COL_PRF("MB/s", - (float)((cnt.bytes) / (float)t_total)); + COL_PR64("m/s", ((cnt.msgs * 1000000) / t_total)); + COL_PRF("MB/s", (float)((cnt.bytes) / (float)t_total)); COL_PR64("rx_err", cnt.msgs_dr_err); COL_PR64("offset", cnt.offset); if (latency_mode) { @@ -640,59 +648,57 @@ static void print_stats (rd_kafka_t *rk, COL_PRF("lat_hi", cnt.latency_hi / 1000.0f); } ROW_END(); - } if (otype & _OTYPE_SUMMARY) { if (latency_avg >= 1.0f) - extra_of += rd_snprintf(extra+extra_of, - sizeof(extra)-extra_of, - ", latency " - "curr/avg/lo/hi " - "%.2f/%.2f/%.2f/%.2fms", - cnt.latency_last / 1000.0f, - latency_avg / 1000.0f, - cnt.latency_lo / 1000.0f, - cnt.latency_hi / 1000.0f) -; - printf("%% %"PRIu64" messages (%"PRIu64" bytes) " - "consumed in %"PRIu64"ms: %"PRIu64" msgs/s " + extra_of += rd_snprintf( + extra + extra_of, sizeof(extra) - extra_of, + ", latency " + "curr/avg/lo/hi " + "%.2f/%.2f/%.2f/%.2fms", + cnt.latency_last / 1000.0f, + latency_avg / 1000.0f, + cnt.latency_lo / 1000.0f, + cnt.latency_hi / 1000.0f); + printf("%% %" PRIu64 " messages (%" PRIu64 + " bytes) " + "consumed in %" PRIu64 "ms: %" PRIu64 + " msgs/s " "(%.02f MB/s)" "%s\n", - cnt.msgs, cnt.bytes, - t_total / 1000, + cnt.msgs, cnt.bytes, t_total / 1000, ((cnt.msgs * 1000000) / t_total), - (float)((cnt.bytes) / (float)t_total), - extra); + (float)((cnt.bytes) / (float)t_total), extra); } if (incremental_mode && now > cnt.t_last) { - uint64_t i_msgs = cnt.msgs - cnt.msgs_last; + uint64_t i_msgs = cnt.msgs - cnt.msgs_last; uint64_t i_bytes = cnt.bytes - cnt.bytes_last; - uint64_t i_time = cnt.t_last ? now - cnt.t_last : 0; - - printf("%% INTERVAL: %"PRIu64" messages " - "(%"PRIu64" bytes) " - "consumed in %"PRIu64"ms: %"PRIu64" msgs/s " + uint64_t i_time = cnt.t_last ? now - cnt.t_last : 0; + + printf("%% INTERVAL: %" PRIu64 + " messages " + "(%" PRIu64 + " bytes) " + "consumed in %" PRIu64 "ms: %" PRIu64 + " msgs/s " "(%.02f MB/s)" "%s\n", - i_msgs, i_bytes, - i_time / 1000, + i_msgs, i_bytes, i_time / 1000, ((i_msgs * 1000000) / i_time), - (float)((i_bytes) / (float)i_time), - extra); - + (float)((i_bytes) / (float)i_time), extra); } } - cnt.t_last = now; - cnt.msgs_last = cnt.msgs; - cnt.bytes_last = cnt.bytes; + cnt.t_last = now; + cnt.msgs_last = cnt.msgs; + cnt.bytes_last = cnt.bytes; } -static void sig_usr1 (int sig) { - rd_kafka_dump(stdout, global_rk); +static void sig_usr1(int sig) { + rd_kafka_dump(stdout, global_rk); } @@ -700,15 +706,15 @@ static void sig_usr1 (int sig) { * @brief Read config from file * @returns -1 on error, else 0. */ -static int read_conf_file (rd_kafka_conf_t *conf, const char *path) { +static int read_conf_file(rd_kafka_conf_t *conf, const char *path) { FILE *fp; char buf[512]; int line = 0; char errstr[512]; if (!(fp = fopen(path, "r"))) { - fprintf(stderr, "%% Failed to open %s: %s\n", - path, strerror(errno)); + fprintf(stderr, "%% Failed to open %s: %s\n", path, + strerror(errno)); return -1; } @@ -729,9 +735,9 @@ static int read_conf_file (rd_kafka_conf_t *conf, const char *path) { *t = '\0'; t = strchr(buf, '='); - if (!t || t == s || !*(t+1)) { - fprintf(stderr, "%% %s:%d: expected key=value\n", - path, line); + if (!t || t == s || !*(t + 1)) { + fprintf(stderr, "%% %s:%d: expected key=value\n", path, + line); fclose(fp); return -1; } @@ -744,8 +750,8 @@ static int read_conf_file (rd_kafka_conf_t *conf, const char *path) { if (r == RD_KAFKA_CONF_OK) continue; - fprintf(stderr, "%% %s:%d: %s=%s: %s\n", - path, line, s, t, errstr); + fprintf(stderr, "%% %s:%d: %s=%s: %s\n", path, line, s, t, + errstr); fclose(fp); return -1; } @@ -756,12 +762,15 @@ static int read_conf_file (rd_kafka_conf_t *conf, const char *path) { } -static rd_kafka_resp_err_t do_produce (rd_kafka_t *rk, - rd_kafka_topic_t *rkt, int32_t partition, - int msgflags, - void *payload, size_t size, - const void *key, size_t key_size, - const rd_kafka_headers_t *hdrs) { +static rd_kafka_resp_err_t do_produce(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + void *payload, + size_t size, + const void *key, + size_t key_size, + const rd_kafka_headers_t *hdrs) { /* Send/Produce message. */ if (hdrs) { @@ -771,14 +780,11 @@ static rd_kafka_resp_err_t do_produce (rd_kafka_t *rk, hdrs_copy = rd_kafka_headers_copy(hdrs); err = rd_kafka_producev( - rk, - RD_KAFKA_V_RKT(rkt), - RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_MSGFLAGS(msgflags), - RD_KAFKA_V_VALUE(payload, size), - RD_KAFKA_V_KEY(key, key_size), - RD_KAFKA_V_HEADERS(hdrs_copy), - RD_KAFKA_V_END); + rk, RD_KAFKA_V_RKT(rkt), RD_KAFKA_V_PARTITION(partition), + RD_KAFKA_V_MSGFLAGS(msgflags), + RD_KAFKA_V_VALUE(payload, size), + RD_KAFKA_V_KEY(key, key_size), + RD_KAFKA_V_HEADERS(hdrs_copy), RD_KAFKA_V_END); if (err) rd_kafka_headers_destroy(hdrs_copy); @@ -797,7 +803,7 @@ static rd_kafka_resp_err_t do_produce (rd_kafka_t *rk, /** * @brief Sleep for \p sleep_us microseconds. */ -static void do_sleep (int sleep_us) { +static void do_sleep(int sleep_us) { if (sleep_us > 100) { #ifdef _WIN32 Sleep(sleep_us / 1000); @@ -812,52 +818,52 @@ static void do_sleep (int sleep_us) { } -int main (int argc, char **argv) { - char *brokers = NULL; - char mode = 'C'; - char *topic = NULL; - const char *key = NULL; +int main(int argc, char **argv) { + char *brokers = NULL; + char mode = 'C'; + char *topic = NULL; + const char *key = NULL; int *partitions = NULL; - int opt; - int sendflags = 0; - char *msgpattern = "librdkafka_performance testing!"; - int msgsize = -1; - const char *debug = NULL; - int do_conf_dump = 0; - rd_ts_t now; - char errstr[512]; - uint64_t seq = 0; - int seed = (int)time(NULL); + int opt; + int sendflags = 0; + char *msgpattern = "librdkafka_performance testing!"; + int msgsize = -1; + const char *debug = NULL; + int do_conf_dump = 0; + rd_ts_t now; + char errstr[512]; + uint64_t seq = 0; + int seed = (int)time(NULL); rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_queue_t *rkqu = NULL; - const char *compression = "no"; - int64_t start_offset = 0; - int batch_size = 0; - int idle = 0; - const char *stats_cmd = NULL; - char *stats_intvlstr = NULL; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *rkqu = NULL; + const char *compression = "no"; + int64_t start_offset = 0; + int batch_size = 0; + int idle = 0; + const char *stats_cmd = NULL; + char *stats_intvlstr = NULL; char tmp[128]; char *tmp2; int otype = _OTYPE_SUMMARY; double dtmp; int rate_sleep = 0; - rd_kafka_topic_partition_list_t *topics; - int exitcode = 0; + rd_kafka_topic_partition_list_t *topics; + int exitcode = 0; rd_kafka_headers_t *hdrs = NULL; rd_kafka_resp_err_t err; - /* Kafka configuration */ - conf = rd_kafka_conf_new(); - rd_kafka_conf_set_error_cb(conf, err_cb); - rd_kafka_conf_set_throttle_cb(conf, throttle_cb); + /* Kafka configuration */ + conf = rd_kafka_conf_new(); + rd_kafka_conf_set_error_cb(conf, err_cb); + rd_kafka_conf_set_throttle_cb(conf, throttle_cb); rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb); #ifdef SIGIO /* Quick termination */ - rd_snprintf(tmp, sizeof(tmp), "%i", SIGIO); - rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); + rd_snprintf(tmp, sizeof(tmp), "%i", SIGIO); + rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); #endif /* Producer config */ @@ -865,173 +871,174 @@ int main (int argc, char **argv) { rd_kafka_conf_set(conf, "message.send.max.retries", "3", NULL, 0); rd_kafka_conf_set(conf, "retry.backoff.ms", "500", NULL, 0); - /* Consumer config */ - /* Tell rdkafka to (try to) maintain 1M messages - * in its internal receive buffers. This is to avoid - * application -> rdkafka -> broker per-message ping-pong - * latency. - * The larger the local queue, the higher the performance. - * Try other values with: ... -X queued.min.messages=1000 - */ - rd_kafka_conf_set(conf, "queued.min.messages", "1000000", NULL, 0); - rd_kafka_conf_set(conf, "session.timeout.ms", "6000", NULL, 0); + /* Consumer config */ + /* Tell rdkafka to (try to) maintain 1M messages + * in its internal receive buffers. This is to avoid + * application -> rdkafka -> broker per-message ping-pong + * latency. + * The larger the local queue, the higher the performance. + * Try other values with: ... -X queued.min.messages=1000 + */ + rd_kafka_conf_set(conf, "queued.min.messages", "1000000", NULL, 0); + rd_kafka_conf_set(conf, "session.timeout.ms", "6000", NULL, 0); rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", NULL, 0); - topics = rd_kafka_topic_partition_list_new(1); - - while ((opt = - getopt(argc, argv, - "PCG:t:p:b:s:k:c:fi:MDd:m:S:x:" - "R:a:z:o:X:B:eT:Y:qvIur:lA:OwNH:")) != -1) { - switch (opt) { - case 'G': - if (rd_kafka_conf_set(conf, "group.id", optarg, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - /* FALLTHRU */ - case 'P': - case 'C': - mode = opt; - break; - case 't': - rd_kafka_topic_partition_list_add(topics, optarg, - RD_KAFKA_PARTITION_UA); - break; - case 'p': + topics = rd_kafka_topic_partition_list_new(1); + + while ((opt = getopt(argc, argv, + "PCG:t:p:b:s:k:c:fi:MDd:m:S:x:" + "R:a:z:o:X:B:eT:Y:qvIur:lA:OwNH:")) != -1) { + switch (opt) { + case 'G': + if (rd_kafka_conf_set(conf, "group.id", optarg, errstr, + sizeof(errstr)) != + RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + /* FALLTHRU */ + case 'P': + case 'C': + mode = opt; + break; + case 't': + rd_kafka_topic_partition_list_add( + topics, optarg, RD_KAFKA_PARTITION_UA); + break; + case 'p': partition_cnt++; - partitions = realloc(partitions, sizeof(*partitions) * partition_cnt); - partitions[partition_cnt-1] = atoi(optarg); - break; - - case 'b': - brokers = optarg; - break; - case 's': - msgsize = atoi(optarg); - break; - case 'k': - key = optarg; - break; - case 'c': - msgcnt = atoi(optarg); - break; - case 'D': - sendflags |= RD_KAFKA_MSG_F_FREE; - break; - case 'i': - dispintvl = atoi(optarg); - break; - case 'm': - msgpattern = optarg; - break; - case 'S': - seq = strtoull(optarg, NULL, 10); - do_seq = 1; - break; - case 'x': - exit_after = atoi(optarg); - break; - case 'R': - seed = atoi(optarg); - break; - case 'a': - if (rd_kafka_conf_set(conf, - "acks", - optarg, + partitions = realloc(partitions, sizeof(*partitions) * + partition_cnt); + partitions[partition_cnt - 1] = atoi(optarg); + break; + + case 'b': + brokers = optarg; + break; + case 's': + msgsize = atoi(optarg); + break; + case 'k': + key = optarg; + break; + case 'c': + msgcnt = atoi(optarg); + break; + case 'D': + sendflags |= RD_KAFKA_MSG_F_FREE; + break; + case 'i': + dispintvl = atoi(optarg); + break; + case 'm': + msgpattern = optarg; + break; + case 'S': + seq = strtoull(optarg, NULL, 10); + do_seq = 1; + break; + case 'x': + exit_after = atoi(optarg); + break; + case 'R': + seed = atoi(optarg); + break; + case 'a': + if (rd_kafka_conf_set(conf, "acks", optarg, errstr, + sizeof(errstr)) != + RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + break; + case 'B': + batch_size = atoi(optarg); + break; + case 'z': + if (rd_kafka_conf_set(conf, "compression.codec", optarg, errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - break; - case 'B': - batch_size = atoi(optarg); - break; - case 'z': - if (rd_kafka_conf_set(conf, "compression.codec", - optarg, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - compression = optarg; - break; - case 'o': - if (!strcmp(optarg, "end")) - start_offset = RD_KAFKA_OFFSET_END; - else if (!strcmp(optarg, "beginning")) - start_offset = RD_KAFKA_OFFSET_BEGINNING; - else if (!strcmp(optarg, "stored")) - start_offset = RD_KAFKA_OFFSET_STORED; - else { - start_offset = strtoll(optarg, NULL, 10); - - if (start_offset < 0) - start_offset = RD_KAFKA_OFFSET_TAIL(-start_offset); - } - - break; - case 'e': - exit_eof = 1; - break; - case 'd': - debug = optarg; - break; - case 'H': - if (!strcmp(optarg, "parse")) - read_hdrs = 1; - else { - char *name, *val; - size_t name_sz = -1; - - name = optarg; - val = strchr(name, '='); - if (val) { - name_sz = (size_t)(val-name); - val++; /* past the '=' */ - } + RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + compression = optarg; + break; + case 'o': + if (!strcmp(optarg, "end")) + start_offset = RD_KAFKA_OFFSET_END; + else if (!strcmp(optarg, "beginning")) + start_offset = RD_KAFKA_OFFSET_BEGINNING; + else if (!strcmp(optarg, "stored")) + start_offset = RD_KAFKA_OFFSET_STORED; + else { + start_offset = strtoll(optarg, NULL, 10); + + if (start_offset < 0) + start_offset = + RD_KAFKA_OFFSET_TAIL(-start_offset); + } - if (!hdrs) - hdrs = rd_kafka_headers_new(8); + break; + case 'e': + exit_eof = 1; + break; + case 'd': + debug = optarg; + break; + case 'H': + if (!strcmp(optarg, "parse")) + read_hdrs = 1; + else { + char *name, *val; + size_t name_sz = -1; + + name = optarg; + val = strchr(name, '='); + if (val) { + name_sz = (size_t)(val - name); + val++; /* past the '=' */ + } - err = rd_kafka_header_add(hdrs, name, name_sz, val, -1); - if (err) { - fprintf(stderr, - "%% Failed to add header %s: %s\n", - name, rd_kafka_err2str(err)); - exit(1); - } - } - break; - case 'X': - { - char *name, *val; - rd_kafka_conf_res_t res; - - if (!strcmp(optarg, "list") || - !strcmp(optarg, "help")) { - rd_kafka_conf_properties_show(stdout); - exit(0); - } - - if (!strcmp(optarg, "dump")) { - do_conf_dump = 1; - continue; - } - - name = optarg; - if (!(val = strchr(name, '='))) { - fprintf(stderr, "%% Expected " - "-X property=value, not %s\n", name); - exit(1); - } - - *val = '\0'; - val++; + if (!hdrs) + hdrs = rd_kafka_headers_new(8); + + err = rd_kafka_header_add(hdrs, name, name_sz, + val, -1); + if (err) { + fprintf( + stderr, + "%% Failed to add header %s: %s\n", + name, rd_kafka_err2str(err)); + exit(1); + } + } + break; + case 'X': { + char *name, *val; + rd_kafka_conf_res_t res; + + if (!strcmp(optarg, "list") || + !strcmp(optarg, "help")) { + rd_kafka_conf_properties_show(stdout); + exit(0); + } + + if (!strcmp(optarg, "dump")) { + do_conf_dump = 1; + continue; + } + + name = optarg; + if (!(val = strchr(name, '='))) { + fprintf(stderr, + "%% Expected " + "-X property=value, not %s\n", + name); + exit(1); + } + + *val = '\0'; + val++; if (!strcmp(name, "file")) { if (read_conf_file(conf, val) == -1) @@ -1039,34 +1046,33 @@ int main (int argc, char **argv) { break; } - res = rd_kafka_conf_set(conf, name, val, - errstr, sizeof(errstr)); + res = rd_kafka_conf_set(conf, name, val, errstr, + sizeof(errstr)); - if (res != RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - } - break; + if (res != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + } break; - case 'T': + case 'T': stats_intvlstr = optarg; - break; + break; case 'Y': stats_cmd = optarg; break; - case 'q': + case 'q': verbosity--; - break; + break; - case 'v': + case 'v': verbosity++; - break; + break; - case 'I': - idle = 1; - break; + case 'I': + idle = 1; + break; case 'u': otype = _OTYPE_TAB; @@ -1087,127 +1093,126 @@ int main (int argc, char **argv) { case 'l': latency_mode = 1; - break; - - case 'A': - if (!(latency_fp = fopen(optarg, "w"))) { - fprintf(stderr, - "%% Cant open %s: %s\n", - optarg, strerror(errno)); - exit(1); - } break; - case 'M': - incremental_mode = 1; - break; + case 'A': + if (!(latency_fp = fopen(optarg, "w"))) { + fprintf(stderr, "%% Cant open %s: %s\n", optarg, + strerror(errno)); + exit(1); + } + break; - case 'N': - with_dr = 0; - break; + case 'M': + incremental_mode = 1; + break; - default: + case 'N': + with_dr = 0; + break; + + default: fprintf(stderr, "Unknown option: %c\n", opt); - goto usage; - } - } + goto usage; + } + } - if (topics->cnt == 0 || optind != argc) { + if (topics->cnt == 0 || optind != argc) { if (optind < argc) fprintf(stderr, "Unknown argument: %s\n", argv[optind]); - usage: - fprintf(stderr, - "Usage: %s [-C|-P] -t " - "[-p ] [-b ] [options..]\n" - "\n" - "librdkafka version %s (0x%08x)\n" - "\n" - " Options:\n" - " -C | -P | Consumer or Producer mode\n" - " -G High-level Kafka Consumer mode\n" - " -t Topic to consume / produce\n" - " -p Partition (defaults to random). " - "Multiple partitions are allowed in -C consumer mode.\n" - " -M Print consumer interval stats\n" - " -b Broker address list (host[:port],..)\n" - " -s Message size (producer)\n" - " -k Message key (producer)\n" - " -H Add header to message (producer)\n" - " -H parse Read message headers (consumer)\n" - " -c Messages to transmit/receive\n" - " -x Hard exit after transmitting messages (producer)\n" - " -D Copy/Duplicate data buffer (producer)\n" - " -i Display interval\n" - " -m Message payload pattern\n" - " -S Send a sequence number starting at " - " as payload\n" - " -R Random seed value (defaults to time)\n" - " -a Required acks (producer): " - "-1, 0, 1, >1\n" - " -B Consume batch size (# of msgs)\n" - " -z Enable compression:\n" - " none|gzip|snappy\n" - " -o Start offset (consumer)\n" - " beginning, end, NNNNN or -NNNNN\n" - " -d [facs..] Enable debugging contexts:\n" - " %s\n" - " -X Set arbitrary librdkafka " - "configuration property\n" - " -X file= Read config from file.\n" - " -X list Show full list of supported properties.\n" - " -X dump Show configuration\n" - " -T Enable statistics from librdkafka at " - "specified interval (ms)\n" - " -Y Pipe statistics to \n" - " -I Idle: dont produce any messages\n" - " -q Decrease verbosity\n" - " -v Increase verbosity (default 1)\n" - " -u Output stats in table format\n" - " -r Producer msg/s limit\n" - " -l Latency measurement.\n" - " Needs two matching instances, one\n" - " consumer and one producer, both\n" - " running with the -l switch.\n" - " -l Producer: per-message latency stats\n" - " -A Write per-message latency stats to " - ". Requires -l\n" - " -O Report produced offset (producer)\n" - " -N No delivery reports (producer)\n" - "\n" - " In Consumer mode:\n" - " consumes messages and prints thruput\n" - " If -B <..> is supplied the batch consumer\n" - " mode is used, else the callback mode is used.\n" - "\n" - " In Producer mode:\n" - " writes messages of size -s <..> and prints thruput\n" - "\n", - argv[0], - rd_kafka_version_str(), rd_kafka_version(), - RD_KAFKA_DEBUG_CONTEXTS); - exit(1); - } - - - dispintvl *= 1000; /* us */ + usage: + fprintf( + stderr, + "Usage: %s [-C|-P] -t " + "[-p ] [-b ] [options..]\n" + "\n" + "librdkafka version %s (0x%08x)\n" + "\n" + " Options:\n" + " -C | -P | Consumer or Producer mode\n" + " -G High-level Kafka Consumer mode\n" + " -t Topic to consume / produce\n" + " -p Partition (defaults to random). " + "Multiple partitions are allowed in -C consumer mode.\n" + " -M Print consumer interval stats\n" + " -b Broker address list (host[:port],..)\n" + " -s Message size (producer)\n" + " -k Message key (producer)\n" + " -H Add header to message (producer)\n" + " -H parse Read message headers (consumer)\n" + " -c Messages to transmit/receive\n" + " -x Hard exit after transmitting " + "messages (producer)\n" + " -D Copy/Duplicate data buffer (producer)\n" + " -i Display interval\n" + " -m Message payload pattern\n" + " -S Send a sequence number starting at " + " as payload\n" + " -R Random seed value (defaults to time)\n" + " -a Required acks (producer): " + "-1, 0, 1, >1\n" + " -B Consume batch size (# of msgs)\n" + " -z Enable compression:\n" + " none|gzip|snappy\n" + " -o Start offset (consumer)\n" + " beginning, end, NNNNN or -NNNNN\n" + " -d [facs..] Enable debugging contexts:\n" + " %s\n" + " -X Set arbitrary librdkafka " + "configuration property\n" + " -X file= Read config from file.\n" + " -X list Show full list of supported properties.\n" + " -X dump Show configuration\n" + " -T Enable statistics from librdkafka at " + "specified interval (ms)\n" + " -Y Pipe statistics to \n" + " -I Idle: dont produce any messages\n" + " -q Decrease verbosity\n" + " -v Increase verbosity (default 1)\n" + " -u Output stats in table format\n" + " -r Producer msg/s limit\n" + " -l Latency measurement.\n" + " Needs two matching instances, one\n" + " consumer and one producer, both\n" + " running with the -l switch.\n" + " -l Producer: per-message latency stats\n" + " -A Write per-message latency stats to " + ". Requires -l\n" + " -O Report produced offset (producer)\n" + " -N No delivery reports (producer)\n" + "\n" + " In Consumer mode:\n" + " consumes messages and prints thruput\n" + " If -B <..> is supplied the batch consumer\n" + " mode is used, else the callback mode is used.\n" + "\n" + " In Producer mode:\n" + " writes messages of size -s <..> and prints thruput\n" + "\n", + argv[0], rd_kafka_version_str(), rd_kafka_version(), + RD_KAFKA_DEBUG_CONTEXTS); + exit(1); + } + + + dispintvl *= 1000; /* us */ if (verbosity > 1) - printf("%% Using random seed %i, verbosity level %i\n", - seed, verbosity); - srand(seed); - signal(SIGINT, stop); + printf("%% Using random seed %i, verbosity level %i\n", seed, + verbosity); + srand(seed); + signal(SIGINT, stop); #ifdef SIGUSR1 - signal(SIGUSR1, sig_usr1); + signal(SIGUSR1, sig_usr1); #endif - if (debug && - rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - printf("%% Debug configuration failed: %s: %s\n", - errstr, debug); - exit(1); - } + if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + printf("%% Debug configuration failed: %s: %s\n", errstr, + debug); + exit(1); + } /* Always enable stats (for RTT extraction), and if user supplied * the -T option we let her take part of the stats aswell. */ @@ -1216,13 +1221,12 @@ int main (int argc, char **argv) { if (!stats_intvlstr) { /* if no user-desired stats, adjust stats interval * to the display interval. */ - rd_snprintf(tmp, sizeof(tmp), "%"PRId64, dispintvl / 1000); + rd_snprintf(tmp, sizeof(tmp), "%" PRId64, dispintvl / 1000); } if (rd_kafka_conf_set(conf, "statistics.interval.ms", - stats_intvlstr ? stats_intvlstr : tmp, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { + stats_intvlstr ? stats_intvlstr : tmp, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } @@ -1232,7 +1236,7 @@ int main (int argc, char **argv) { size_t cnt; int pass; - for (pass = 0 ; pass < 2 ; pass++) { + for (pass = 0; pass < 2; pass++) { int i; if (pass == 0) { @@ -1240,13 +1244,12 @@ int main (int argc, char **argv) { printf("# Global config\n"); } else { rd_kafka_topic_conf_t *topic_conf = - rd_kafka_conf_get_default_topic_conf( - conf); + rd_kafka_conf_get_default_topic_conf(conf); if (topic_conf) { printf("# Topic config\n"); arr = rd_kafka_topic_conf_dump( - topic_conf, &cnt); + topic_conf, &cnt); } else { arr = NULL; } @@ -1255,9 +1258,8 @@ int main (int argc, char **argv) { if (!arr) continue; - for (i = 0 ; i < (int)cnt ; i += 2) - printf("%s = %s\n", - arr[i], arr[i+1]); + for (i = 0; i < (int)cnt; i += 2) + printf("%s = %s\n", arr[i], arr[i + 1]); printf("\n"); @@ -1284,7 +1286,8 @@ int main (int argc, char **argv) { ))) { fprintf(stderr, "%% Failed to start stats command: " - "%s: %s", stats_cmd, strerror(errno)); + "%s: %s", + stats_cmd, strerror(errno)); exit(1); } } else @@ -1292,17 +1295,17 @@ int main (int argc, char **argv) { stats_fp = stdout; } - if (msgcnt != -1) - forever = 0; + if (msgcnt != -1) + forever = 0; - if (msgsize == -1) - msgsize = (int)strlen(msgpattern); + if (msgsize == -1) + msgsize = (int)strlen(msgpattern); - topic = topics->elems[0].topic; + topic = topics->elems[0].topic; if (mode == 'C' || mode == 'G') - rd_kafka_conf_set(conf, "enable.partition.eof", "true", - NULL, 0); + rd_kafka_conf_set(conf, "enable.partition.eof", "true", NULL, + 0); if (read_hdrs && mode == 'P') { fprintf(stderr, "%% producer can not read headers\n"); @@ -1316,71 +1319,71 @@ int main (int argc, char **argv) { /* Set bootstrap servers */ if (brokers && - rd_kafka_conf_set(conf, "bootstrap.servers", brokers, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } - if (mode == 'P') { - /* - * Producer - */ - char *sbuf; - char *pbuf; - int outq; - int keylen = key ? (int)strlen(key) : 0; - off_t rof = 0; - size_t plen = strlen(msgpattern); - int partition = partitions ? partitions[0] : - RD_KAFKA_PARTITION_UA; + if (mode == 'P') { + /* + * Producer + */ + char *sbuf; + char *pbuf; + int outq; + int keylen = key ? (int)strlen(key) : 0; + off_t rof = 0; + size_t plen = strlen(msgpattern); + int partition = + partitions ? partitions[0] : RD_KAFKA_PARTITION_UA; if (latency_mode) { int minlen = (int)(strlen("LATENCY:") + - strlen("18446744073709551615 ")+1); - msgsize = RD_MAX(minlen, msgsize); + strlen("18446744073709551615 ") + 1); + msgsize = RD_MAX(minlen, msgsize); sendflags |= RD_KAFKA_MSG_F_COPY; - } else if (do_seq) { - int minlen = (int)strlen("18446744073709551615 ")+1; + } else if (do_seq) { + int minlen = (int)strlen("18446744073709551615 ") + 1; if (msgsize < minlen) msgsize = minlen; - /* Force duplication of payload */ + /* Force duplication of payload */ sendflags |= RD_KAFKA_MSG_F_FREE; - } - - sbuf = malloc(msgsize); - - /* Copy payload content to new buffer */ - while (rof < msgsize) { - size_t xlen = RD_MIN((size_t)msgsize-rof, plen); - memcpy(sbuf+rof, msgpattern, xlen); - rof += (off_t)xlen; - } - - if (msgcnt == -1) - printf("%% Sending messages of size %i bytes\n", - msgsize); - else - printf("%% Sending %i messages of size %i bytes\n", - msgcnt, msgsize); - - if (with_dr) - rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered); - - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create Kafka producer: %s\n", - errstr); - exit(1); - } + } + + sbuf = malloc(msgsize); + + /* Copy payload content to new buffer */ + while (rof < msgsize) { + size_t xlen = RD_MIN((size_t)msgsize - rof, plen); + memcpy(sbuf + rof, msgpattern, xlen); + rof += (off_t)xlen; + } + + if (msgcnt == -1) + printf("%% Sending messages of size %i bytes\n", + msgsize); + else + printf("%% Sending %i messages of size %i bytes\n", + msgcnt, msgsize); + + if (with_dr) + rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered); + + /* Create Kafka handle */ + if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, + "%% Failed to create Kafka producer: %s\n", + errstr); + exit(1); + } global_rk = rk; - /* Explicitly create topic to avoid per-msg lookups. */ - rkt = rd_kafka_topic_new(rk, topic, NULL); + /* Explicitly create topic to avoid per-msg lookups. */ + rkt = rd_kafka_topic_new(rk, topic, NULL); if (rate_sleep && verbosity >= 2) @@ -1392,206 +1395,216 @@ int main (int argc, char **argv) { if (dr_disp_div == 0) dr_disp_div = 10; - cnt.t_start = cnt.t_last = rd_clock(); + cnt.t_start = cnt.t_last = rd_clock(); - msgs_wait_produce_cnt = msgcnt; + msgs_wait_produce_cnt = msgcnt; - while (run && (msgcnt == -1 || (int)cnt.msgs < msgcnt)) { - /* Send/Produce message. */ + while (run && (msgcnt == -1 || (int)cnt.msgs < msgcnt)) { + /* Send/Produce message. */ - if (idle) { - rd_kafka_poll(rk, 1000); - continue; - } + if (idle) { + rd_kafka_poll(rk, 1000); + continue; + } if (latency_mode) { - rd_snprintf(sbuf, msgsize-1, - "LATENCY:%"PRIu64, wall_clock()); + rd_snprintf(sbuf, msgsize - 1, + "LATENCY:%" PRIu64, wall_clock()); } else if (do_seq) { - rd_snprintf(sbuf, - msgsize-1, "%"PRIu64": ", seq); + rd_snprintf(sbuf, msgsize - 1, "%" PRIu64 ": ", + seq); seq++; - } + } - if (sendflags & RD_KAFKA_MSG_F_FREE) { - /* Duplicate memory */ - pbuf = malloc(msgsize); - memcpy(pbuf, sbuf, msgsize); - } else - pbuf = sbuf; + if (sendflags & RD_KAFKA_MSG_F_FREE) { + /* Duplicate memory */ + pbuf = malloc(msgsize); + memcpy(pbuf, sbuf, msgsize); + } else + pbuf = sbuf; if (msgsize == 0) pbuf = NULL; - cnt.tx++; - while (run && - (err = do_produce(rk, rkt, partition, sendflags, - pbuf, msgsize, - key, keylen, hdrs))) { - if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) - printf("%% No such partition: " - "%"PRId32"\n", partition); - else if (verbosity >= 3 || - (err != RD_KAFKA_RESP_ERR__QUEUE_FULL && verbosity >= 1)) - printf("%% produce error: %s%s\n", - rd_kafka_err2str(err), - err == RD_KAFKA_RESP_ERR__QUEUE_FULL ? - " (backpressure)" : ""); - - cnt.tx_err++; - if (err != RD_KAFKA_RESP_ERR__QUEUE_FULL) { - run = 0; - break; - } - now = rd_clock(); - if (verbosity >= 2 && + cnt.tx++; + while (run && (err = do_produce( + rk, rkt, partition, sendflags, pbuf, + msgsize, key, keylen, hdrs))) { + if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + printf( + "%% No such partition: " + "%" PRId32 "\n", + partition); + else if (verbosity >= 3 || + (err != + RD_KAFKA_RESP_ERR__QUEUE_FULL && + verbosity >= 1)) + printf( + "%% produce error: %s%s\n", + rd_kafka_err2str(err), + err == RD_KAFKA_RESP_ERR__QUEUE_FULL + ? " (backpressure)" + : ""); + + cnt.tx_err++; + if (err != RD_KAFKA_RESP_ERR__QUEUE_FULL) { + run = 0; + break; + } + now = rd_clock(); + if (verbosity >= 2 && cnt.t_enobufs_last + dispintvl <= now) { - printf("%% Backpressure %i " - "(tx %"PRIu64", " - "txerr %"PRIu64")\n", - rd_kafka_outq_len(rk), - cnt.tx, cnt.tx_err); - cnt.t_enobufs_last = now; - } + printf( + "%% Backpressure %i " + "(tx %" PRIu64 + ", " + "txerr %" PRIu64 ")\n", + rd_kafka_outq_len(rk), cnt.tx, + cnt.tx_err); + cnt.t_enobufs_last = now; + } - /* Poll to handle delivery reports */ - rd_kafka_poll(rk, 10); + /* Poll to handle delivery reports */ + rd_kafka_poll(rk, 10); print_stats(rk, mode, otype, compression); - } - - msgs_wait_cnt++; - if (msgs_wait_produce_cnt != -1) - msgs_wait_produce_cnt--; - cnt.msgs++; - cnt.bytes += msgsize; - - /* Must poll to handle delivery reports */ - if (rate_sleep) { - rd_ts_t next = rd_clock() + (rd_ts_t) rate_sleep; - do { - rd_kafka_poll(rk, - (int)RD_MAX(0, - (next - rd_clock()) / 1000)); - } while (next > rd_clock()); - } else { - rd_kafka_poll(rk, 0); - } - - print_stats(rk, mode, otype, compression); - } - - forever = 0; + } + + msgs_wait_cnt++; + if (msgs_wait_produce_cnt != -1) + msgs_wait_produce_cnt--; + cnt.msgs++; + cnt.bytes += msgsize; + + /* Must poll to handle delivery reports */ + if (rate_sleep) { + rd_ts_t next = rd_clock() + (rd_ts_t)rate_sleep; + do { + rd_kafka_poll( + rk, + (int)RD_MAX(0, (next - rd_clock()) / + 1000)); + } while (next > rd_clock()); + } else { + rd_kafka_poll(rk, 0); + } + + print_stats(rk, mode, otype, compression); + } + + forever = 0; if (verbosity >= 2) - printf("%% All messages produced, " - "now waiting for %li deliveries\n", - msgs_wait_cnt); + printf( + "%% All messages produced, " + "now waiting for %li deliveries\n", + msgs_wait_cnt); - /* Wait for messages to be delivered */ + /* Wait for messages to be delivered */ while (run && rd_kafka_poll(rk, 1000) != -1) - print_stats(rk, mode, otype, compression); + print_stats(rk, mode, otype, compression); - outq = rd_kafka_outq_len(rk); + outq = rd_kafka_outq_len(rk); if (verbosity >= 2) printf("%% %i messages in outq\n", outq); - cnt.msgs -= outq; - cnt.t_end = t_end; + cnt.msgs -= outq; + cnt.t_end = t_end; - if (cnt.tx_err > 0) - printf("%% %"PRIu64" backpressures for %"PRIu64 - " produce calls: %.3f%% backpressure rate\n", - cnt.tx_err, cnt.tx, - ((double)cnt.tx_err / (double)cnt.tx) * 100.0); + if (cnt.tx_err > 0) + printf("%% %" PRIu64 " backpressures for %" PRIu64 + " produce calls: %.3f%% backpressure rate\n", + cnt.tx_err, cnt.tx, + ((double)cnt.tx_err / (double)cnt.tx) * 100.0); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy the handle */ - rd_kafka_destroy(rk); + /* Destroy the handle */ + rd_kafka_destroy(rk); global_rk = rk = NULL; - free(sbuf); + free(sbuf); exitcode = cnt.msgs == cnt.msgs_dr_ok ? 0 : 1; - } else if (mode == 'C') { - /* - * Consumer - */ + } else if (mode == 'C') { + /* + * Consumer + */ - rd_kafka_message_t **rkmessages = NULL; - size_t i = 0; + rd_kafka_message_t **rkmessages = NULL; + size_t i = 0; - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create Kafka consumer: %s\n", - errstr); - exit(1); - } + /* Create Kafka handle */ + if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, + "%% Failed to create Kafka consumer: %s\n", + errstr); + exit(1); + } global_rk = rk; - /* Create topic to consume from */ - rkt = rd_kafka_topic_new(rk, topic, NULL); - - /* Batch consumer */ - if (batch_size) - rkmessages = malloc(sizeof(*rkmessages) * batch_size); - - /* Start consuming */ - rkqu = rd_kafka_queue_new(rk); - for (i=0 ; i<(size_t)partition_cnt ; ++i) { - const int r = rd_kafka_consume_start_queue(rkt, - partitions[i], start_offset, rkqu); - - if (r == -1) { - fprintf(stderr, "%% Error creating queue: %s\n", - rd_kafka_err2str(rd_kafka_last_error())); - exit(1); - } - } - - while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) { - /* Consume messages. - * A message may either be a real message, or - * an error signaling (if rkmessage->err is set). - */ - uint64_t fetch_latency; - ssize_t r; - - fetch_latency = rd_clock(); - - if (batch_size) { - int partition = partitions ? partitions[0] : - RD_KAFKA_PARTITION_UA; - - /* Batch fetch mode */ - r = rd_kafka_consume_batch(rkt, partition, - 1000, - rkmessages, - batch_size); - if (r != -1) { - for (i = 0 ; (ssize_t)i < r ; i++) { - msg_consume(rkmessages[i], - NULL); - rd_kafka_message_destroy( - rkmessages[i]); - } - } - } else { - /* Queue mode */ - r = rd_kafka_consume_callback_queue(rkqu, 1000, - msg_consume, - NULL); - } - - cnt.t_fetch_latency += rd_clock() - fetch_latency; + /* Create topic to consume from */ + rkt = rd_kafka_topic_new(rk, topic, NULL); + + /* Batch consumer */ + if (batch_size) + rkmessages = malloc(sizeof(*rkmessages) * batch_size); + + /* Start consuming */ + rkqu = rd_kafka_queue_new(rk); + for (i = 0; i < (size_t)partition_cnt; ++i) { + const int r = rd_kafka_consume_start_queue( + rkt, partitions[i], start_offset, rkqu); + + if (r == -1) { + fprintf( + stderr, "%% Error creating queue: %s\n", + rd_kafka_err2str(rd_kafka_last_error())); + exit(1); + } + } + + while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) { + /* Consume messages. + * A message may either be a real message, or + * an error signaling (if rkmessage->err is set). + */ + uint64_t fetch_latency; + ssize_t r; + + fetch_latency = rd_clock(); + + if (batch_size) { + int partition = partitions + ? partitions[0] + : RD_KAFKA_PARTITION_UA; + + /* Batch fetch mode */ + r = rd_kafka_consume_batch(rkt, partition, 1000, + rkmessages, + batch_size); + if (r != -1) { + for (i = 0; (ssize_t)i < r; i++) { + msg_consume(rkmessages[i], + NULL); + rd_kafka_message_destroy( + rkmessages[i]); + } + } + } else { + /* Queue mode */ + r = rd_kafka_consume_callback_queue( + rkqu, 1000, msg_consume, NULL); + } + + cnt.t_fetch_latency += rd_clock() - fetch_latency; if (r == -1) - fprintf(stderr, "%% Error: %s\n", - rd_kafka_err2str(rd_kafka_last_error())); + fprintf( + stderr, "%% Error: %s\n", + rd_kafka_err2str(rd_kafka_last_error())); else if (r > 0 && rate_sleep) { /* Simulate processing time * if `-r ` was set. */ @@ -1599,110 +1612,110 @@ int main (int argc, char **argv) { } - print_stats(rk, mode, otype, compression); + print_stats(rk, mode, otype, compression); - /* Poll to handle stats callbacks */ - rd_kafka_poll(rk, 0); - } - cnt.t_end = rd_clock(); - - /* Stop consuming */ - for (i=0 ; i<(size_t)partition_cnt ; ++i) { - int r = rd_kafka_consume_stop(rkt, (int32_t)i); - if (r == -1) { - fprintf(stderr, - "%% Error in consume_stop: %s\n", - rd_kafka_err2str(rd_kafka_last_error())); - } - } - rd_kafka_queue_destroy(rkqu); + /* Poll to handle stats callbacks */ + rd_kafka_poll(rk, 0); + } + cnt.t_end = rd_clock(); + + /* Stop consuming */ + for (i = 0; i < (size_t)partition_cnt; ++i) { + int r = rd_kafka_consume_stop(rkt, (int32_t)i); + if (r == -1) { + fprintf( + stderr, "%% Error in consume_stop: %s\n", + rd_kafka_err2str(rd_kafka_last_error())); + } + } + rd_kafka_queue_destroy(rkqu); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - if (batch_size) - free(rkmessages); + if (batch_size) + free(rkmessages); - /* Destroy the handle */ - rd_kafka_destroy(rk); + /* Destroy the handle */ + rd_kafka_destroy(rk); global_rk = rk = NULL; - } else if (mode == 'G') { - /* - * High-level balanced Consumer - */ + } else if (mode == 'G') { + /* + * High-level balanced Consumer + */ - rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); + rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create Kafka consumer: %s\n", - errstr); - exit(1); - } + /* Create Kafka handle */ + if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, + "%% Failed to create Kafka consumer: %s\n", + errstr); + exit(1); + } - /* Forward all events to consumer queue */ - rd_kafka_poll_set_consumer(rk); + /* Forward all events to consumer queue */ + rd_kafka_poll_set_consumer(rk); global_rk = rk; - err = rd_kafka_subscribe(rk, topics); - if (err) { - fprintf(stderr, "%% Subscribe failed: %s\n", - rd_kafka_err2str(err)); - exit(1); - } - fprintf(stderr, "%% Waiting for group rebalance..\n"); - - while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) { - /* Consume messages. - * A message may either be a real message, or - * an event (if rkmessage->err is set). - */ - rd_kafka_message_t *rkmessage; - uint64_t fetch_latency; - - fetch_latency = rd_clock(); - - rkmessage = rd_kafka_consumer_poll(rk, 1000); - if (rkmessage) { - msg_consume(rkmessage, NULL); - rd_kafka_message_destroy(rkmessage); + err = rd_kafka_subscribe(rk, topics); + if (err) { + fprintf(stderr, "%% Subscribe failed: %s\n", + rd_kafka_err2str(err)); + exit(1); + } + fprintf(stderr, "%% Waiting for group rebalance..\n"); + + while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) { + /* Consume messages. + * A message may either be a real message, or + * an event (if rkmessage->err is set). + */ + rd_kafka_message_t *rkmessage; + uint64_t fetch_latency; + + fetch_latency = rd_clock(); + + rkmessage = rd_kafka_consumer_poll(rk, 1000); + if (rkmessage) { + msg_consume(rkmessage, NULL); + rd_kafka_message_destroy(rkmessage); /* Simulate processing time * if `-r ` was set. */ if (rate_sleep) do_sleep(rate_sleep); - } + } - cnt.t_fetch_latency += rd_clock() - fetch_latency; + cnt.t_fetch_latency += rd_clock() - fetch_latency; - print_stats(rk, mode, otype, compression); - } - cnt.t_end = rd_clock(); + print_stats(rk, mode, otype, compression); + } + cnt.t_end = rd_clock(); - err = rd_kafka_consumer_close(rk); - if (err) - fprintf(stderr, "%% Failed to close consumer: %s\n", - rd_kafka_err2str(err)); + err = rd_kafka_consumer_close(rk); + if (err) + fprintf(stderr, "%% Failed to close consumer: %s\n", + rd_kafka_err2str(err)); - rd_kafka_destroy(rk); - } + rd_kafka_destroy(rk); + } if (hdrs) rd_kafka_headers_destroy(hdrs); - print_stats(NULL, mode, otype|_OTYPE_FORCE, compression); + print_stats(NULL, mode, otype | _OTYPE_FORCE, compression); - if (cnt.t_fetch_latency && cnt.msgs) - printf("%% Average application fetch latency: %"PRIu64"us\n", - cnt.t_fetch_latency / cnt.msgs); + if (cnt.t_fetch_latency && cnt.msgs) + printf("%% Average application fetch latency: %" PRIu64 "us\n", + cnt.t_fetch_latency / cnt.msgs); - if (latency_fp) - fclose(latency_fp); + if (latency_fp) + fclose(latency_fp); if (stats_fp) { #ifndef _WIN32 @@ -1714,10 +1727,10 @@ int main (int argc, char **argv) { if (partitions) free(partitions); - rd_kafka_topic_partition_list_destroy(topics); + rd_kafka_topic_partition_list_destroy(topics); - /* Let background threads clean up and terminate cleanly. */ - rd_kafka_wait_destroyed(2000); + /* Let background threads clean up and terminate cleanly. */ + rd_kafka_wait_destroyed(2000); - return exitcode; + return exitcode; } diff --git a/examples/transactions-older-broker.c b/examples/transactions-older-broker.c index 5d2861ba51..e9f8d06f75 100644 --- a/examples/transactions-older-broker.c +++ b/examples/transactions-older-broker.c @@ -72,8 +72,8 @@ struct state { rd_kafka_t *producer; /**< Per-input partition output producer */ rd_kafka_topic_partition_t *rktpar; /**< Back-pointer to the * input partition. */ - time_t last_commit; /**< Last transaction commit */ - int msgcnt; /**< Number of messages processed in current txn */ + time_t last_commit; /**< Last transaction commit */ + int msgcnt; /**< Number of messages processed in current txn */ }; /* Current assignment for the input consumer. * The .opaque field of each partition points to an allocated 'struct state'. @@ -85,29 +85,31 @@ static rd_kafka_topic_partition_list_t *assigned_partitions; /** * @brief A fatal error has occurred, immediately exit the application. */ -#define fatal(...) do { \ - fprintf(stderr, "FATAL ERROR: "); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - exit(1); \ +#define fatal(...) \ + do { \ + fprintf(stderr, "FATAL ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(1); \ } while (0) /** * @brief Same as fatal() but takes an rd_kafka_error_t object, prints its * error message, destroys the object and then exits fatally. */ -#define fatal_error(what,error) do { \ - fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", \ - what, rd_kafka_error_name(error), \ - rd_kafka_error_string(error)); \ - rd_kafka_error_destroy(error); \ - exit(1); \ +#define fatal_error(what, error) \ + do { \ + fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", what, \ + rd_kafka_error_name(error), \ + rd_kafka_error_string(error)); \ + rd_kafka_error_destroy(error); \ + exit(1); \ } while (0) /** * @brief Signal termination of program */ -static void stop (int sig) { +static void stop(int sig) { run = 0; } @@ -133,11 +135,10 @@ static void stop (int sig) { * In the case of transactional producing the delivery report callback is * mostly useful for logging the produce failures. */ -static void dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { if (rkmessage->err) - fprintf(stderr, - "%% Message delivery failed: %s\n", + fprintf(stderr, "%% Message delivery failed: %s\n", rd_kafka_err2str(rkmessage->err)); /* The rkmessage is destroyed automatically by librdkafka */ @@ -150,7 +151,7 @@ static void dr_msg_cb (rd_kafka_t *rk, * and begin a new transaction. */ static rd_kafka_t * -create_transactional_producer (const rd_kafka_topic_partition_t *rktpar) { +create_transactional_producer(const rd_kafka_topic_partition_t *rktpar) { rd_kafka_conf_t *conf = rd_kafka_conf_new(); rd_kafka_t *rk; char errstr[256]; @@ -158,15 +159,15 @@ create_transactional_producer (const rd_kafka_topic_partition_t *rktpar) { char transactional_id[256]; snprintf(transactional_id, sizeof(transactional_id), - "librdkafka_transactions_older_example_%s-%d", - rktpar->topic, rktpar->partition); + "librdkafka_transactions_older_example_%s-%d", rktpar->topic, + rktpar->partition); - if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || rd_kafka_conf_set(conf, "transactional.id", transactional_id, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || - rd_kafka_conf_set(conf, "transaction.timeout.ms", "60000", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) + rd_kafka_conf_set(conf, "transaction.timeout.ms", "60000", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) fatal("Failed to configure producer: %s", errstr); /* This callback will be called once per message to indicate @@ -199,7 +200,7 @@ create_transactional_producer (const rd_kafka_topic_partition_t *rktpar) { /** * @brief Abort the current transaction and destroy the producer. */ -static void destroy_transactional_producer (rd_kafka_t *rk) { +static void destroy_transactional_producer(rd_kafka_t *rk) { rd_kafka_error_t *error; fprintf(stdout, "%s: aborting transaction and terminating producer\n", @@ -226,9 +227,9 @@ static void destroy_transactional_producer (rd_kafka_t *rk) { * position where the transaction last started, i.e., the committed * consumer offset. */ -static void abort_transaction_and_rewind (struct state *state) { - rd_kafka_topic_t *rkt = rd_kafka_topic_new(consumer, - state->rktpar->topic, NULL); +static void abort_transaction_and_rewind(struct state *state) { + rd_kafka_topic_t *rkt = + rd_kafka_topic_new(consumer, state->rktpar->topic, NULL); rd_kafka_topic_partition_list_t *offset; rd_kafka_resp_err_t err; rd_kafka_error_t *error; @@ -249,12 +250,11 @@ static void abort_transaction_and_rewind (struct state *state) { /* Get committed offset for this partition */ offset = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(offset, - state->rktpar->topic, + rd_kafka_topic_partition_list_add(offset, state->rktpar->topic, state->rktpar->partition); /* Note: Timeout must be lower than max.poll.interval.ms */ - err = rd_kafka_committed(consumer, offset, 10*1000); + err = rd_kafka_committed(consumer, offset, 10 * 1000); if (err) fatal("Failed to acquire committed offset for %s [%d]: %s", state->rktpar->topic, (int)state->rktpar->partition, @@ -263,17 +263,18 @@ static void abort_transaction_and_rewind (struct state *state) { /* Seek to committed offset, or start of partition if no * no committed offset is available. */ err = rd_kafka_seek(rkt, state->rktpar->partition, - offset->elems[0].offset < 0 ? - /* No committed offset, start from beginning */ - RD_KAFKA_OFFSET_BEGINNING : - /* Use committed offset */ - offset->elems[0].offset, + offset->elems[0].offset < 0 + ? + /* No committed offset, start from beginning */ + RD_KAFKA_OFFSET_BEGINNING + : + /* Use committed offset */ + offset->elems[0].offset, 0); if (err) - fatal("Failed to seek %s [%d]: %s", - state->rktpar->topic, (int)state->rktpar->partition, - rd_kafka_err2str(err)); + fatal("Failed to seek %s [%d]: %s", state->rktpar->topic, + (int)state->rktpar->partition, rd_kafka_err2str(err)); rd_kafka_topic_destroy(rkt); } @@ -282,7 +283,7 @@ static void abort_transaction_and_rewind (struct state *state) { /** * @brief Commit the current transaction and start a new transaction. */ -static void commit_transaction_and_start_new (struct state *state) { +static void commit_transaction_and_start_new(struct state *state) { rd_kafka_error_t *error; rd_kafka_resp_err_t err; rd_kafka_consumer_group_metadata_t *cgmd; @@ -301,8 +302,7 @@ static void commit_transaction_and_start_new (struct state *state) { /* Get consumer's current position for this partition */ offset = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(offset, - state->rktpar->topic, + rd_kafka_topic_partition_list_add(offset, state->rktpar->topic, state->rktpar->partition); err = rd_kafka_position(consumer, offset); if (err) @@ -311,8 +311,8 @@ static void commit_transaction_and_start_new (struct state *state) { rd_kafka_err2str(err)); /* Send offsets to transaction coordinator */ - error = rd_kafka_send_offsets_to_transaction(state->producer, - offset, cgmd, -1); + error = rd_kafka_send_offsets_to_transaction(state->producer, offset, + cgmd, -1); rd_kafka_consumer_group_metadata_destroy(cgmd); rd_kafka_topic_partition_list_destroy(offset); if (error) { @@ -363,36 +363,36 @@ static void commit_transaction_and_start_new (struct state *state) { * these producer's from this callback. */ static void -consumer_group_rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque) { +consumer_group_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque) { int i; if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) - fatal("This example has not yet been modified to work with " - "cooperative incremental rebalancing " - "(partition.assignment.strategy=cooperative-sticky)"); + fatal( + "This example has not yet been modified to work with " + "cooperative incremental rebalancing " + "(partition.assignment.strategy=cooperative-sticky)"); - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: assigned_partitions = - rd_kafka_topic_partition_list_copy(partitions); + rd_kafka_topic_partition_list_copy(partitions); fprintf(stdout, "Consumer group rebalanced, new assignment:\n"); /* Create a transactional producer for each input partition */ - for (i = 0 ; i < assigned_partitions->cnt ; i++) { + for (i = 0; i < assigned_partitions->cnt; i++) { /* Store the partition-to-producer mapping * in the partition's opaque field. */ rd_kafka_topic_partition_t *rktpar = - &assigned_partitions->elems[i]; + &assigned_partitions->elems[i]; struct state *state = calloc(1, sizeof(*state)); state->producer = create_transactional_producer(rktpar); - state->rktpar = rktpar; - rktpar->opaque = state; + state->rktpar = rktpar; + rktpar->opaque = state; state->last_commit = time(NULL); fprintf(stdout, @@ -413,11 +413,12 @@ consumer_group_rebalance_cb (rd_kafka_t *rk, "Consumer group rebalanced, assignment revoked\n"); /* Abort the current transactions and destroy all producers */ - for (i = 0 ; i < assigned_partitions->cnt ; i++) { + for (i = 0; i < assigned_partitions->cnt; i++) { /* Store the partition-to-producer mapping * in the partition's opaque field. */ - struct state *state = (struct state *) - assigned_partitions->elems[i].opaque; + struct state *state = + (struct state *)assigned_partitions->elems[i] + .opaque; destroy_transactional_producer(state->producer); free(state); @@ -441,16 +442,16 @@ consumer_group_rebalance_cb (rd_kafka_t *rk, /** * @brief Create the input consumer. */ -static rd_kafka_t *create_input_consumer (const char *brokers, - const char *input_topic) { +static rd_kafka_t *create_input_consumer(const char *brokers, + const char *input_topic) { rd_kafka_conf_t *conf = rd_kafka_conf_new(); rd_kafka_t *rk; char errstr[256]; rd_kafka_resp_err_t err; rd_kafka_topic_partition_list_t *topics; - if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || rd_kafka_conf_set(conf, "group.id", "librdkafka_transactions_older_example_group", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || @@ -458,8 +459,8 @@ static rd_kafka_t *create_input_consumer (const char *brokers, * output producer's transaction using * rd_kafka_send_offsets_to_transaction(), so auto commits * must be disabled. */ - rd_kafka_conf_set(conf, "enable.auto.commit", "false", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + rd_kafka_conf_set(conf, "enable.auto.commit", "false", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fatal("Failed to configure consumer: %s", errstr); } @@ -488,8 +489,8 @@ static rd_kafka_t *create_input_consumer (const char *brokers, rd_kafka_topic_partition_list_destroy(topics); if (err) { rd_kafka_destroy(rk); - fatal("Failed to subscribe to %s: %s\n", - input_topic, rd_kafka_err2str(err)); + fatal("Failed to subscribe to %s: %s\n", input_topic, + rd_kafka_err2str(err)); } return rk; @@ -500,16 +501,16 @@ static rd_kafka_t *create_input_consumer (const char *brokers, * @brief Find and parse next integer string in \p start. * @returns Pointer after found integer string, or NULL if not found. */ -static const void *find_next_int (const void *start, const void *end, - int *intp) { +static const void * +find_next_int(const void *start, const void *end, int *intp) { const char *p; int collecting = 0; - int num = 0; + int num = 0; - for (p = (const char *)start ; p < (const char *)end ; p++) { + for (p = (const char *)start; p < (const char *)end; p++) { if (isdigit((int)(*p))) { collecting = 1; - num = (num * 10) + ((int)*p - ((int)'0')); + num = (num * 10) + ((int)*p - ((int)'0')); } else if (collecting) break; } @@ -529,8 +530,8 @@ static const void *find_next_int (const void *start, const void *end, * the output topic using the transactional producer for the given * inut partition. */ -static void process_message (struct state *state, - const rd_kafka_message_t *rkmessage) { +static void process_message(struct state *state, + const rd_kafka_message_t *rkmessage) { int num; long unsigned sum = 0; const void *p, *end; @@ -540,7 +541,7 @@ static void process_message (struct state *state, if (rkmessage->len == 0) return; /* Ignore empty messages */ - p = rkmessage->payload; + p = rkmessage->payload; end = ((const char *)rkmessage->payload) + rkmessage->len; /* Find and sum all numbers in the message */ @@ -555,17 +556,14 @@ static void process_message (struct state *state, /* Emit output message on transactional producer */ while (1) { err = rd_kafka_producev( - state->producer, - RD_KAFKA_V_TOPIC(output_topic), - /* Use same key as input message */ - RD_KAFKA_V_KEY(rkmessage->key, - rkmessage->key_len), - /* Value is the current sum of this - * transaction. */ - RD_KAFKA_V_VALUE(value, strlen(value)), - /* Copy value since it is allocated on the stack */ - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_END); + state->producer, RD_KAFKA_V_TOPIC(output_topic), + /* Use same key as input message */ + RD_KAFKA_V_KEY(rkmessage->key, rkmessage->key_len), + /* Value is the current sum of this + * transaction. */ + RD_KAFKA_V_VALUE(value, strlen(value)), + /* Copy value since it is allocated on the stack */ + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END); if (!err) break; @@ -586,7 +584,7 @@ static void process_message (struct state *state, } -int main (int argc, char **argv) { +int main(int argc, char **argv) { /* * Argument validation */ @@ -597,8 +595,8 @@ int main (int argc, char **argv) { return 1; } - brokers = argv[1]; - input_topic = argv[2]; + brokers = argv[1]; + input_topic = argv[2]; output_topic = argv[3]; /* Signal handler for clean shutdown */ @@ -618,7 +616,7 @@ int main (int argc, char **argv) { rd_kafka_topic_partition_t *rktpar; /* Wait for new mesages or error events */ - msg = rd_kafka_consumer_poll(consumer, 1000/*1 second*/); + msg = rd_kafka_consumer_poll(consumer, 1000 /*1 second*/); if (!msg) continue; @@ -636,13 +634,13 @@ int main (int argc, char **argv) { /* Find output producer for this input partition */ rktpar = rd_kafka_topic_partition_list_find( - assigned_partitions, - rd_kafka_topic_name(msg->rkt), msg->partition); + assigned_partitions, rd_kafka_topic_name(msg->rkt), + msg->partition); if (!rktpar) - fatal("BUG: No output producer for assigned " - "partition %s [%d]", - rd_kafka_topic_name(msg->rkt), - (int)msg->partition); + fatal( + "BUG: No output producer for assigned " + "partition %s [%d]", + rd_kafka_topic_name(msg->rkt), (int)msg->partition); /* Get state struct for this partition */ state = (struct state *)rktpar->opaque; @@ -656,7 +654,7 @@ int main (int argc, char **argv) { if (++state->msgcnt > 100 || state->last_commit + 5 <= time(NULL)) { commit_transaction_and_start_new(state); - state->msgcnt = 0; + state->msgcnt = 0; state->last_commit = time(NULL); } } diff --git a/examples/transactions.c b/examples/transactions.c index d6390cff8e..0a8b9a8cf0 100644 --- a/examples/transactions.c +++ b/examples/transactions.c @@ -60,29 +60,31 @@ static volatile sig_atomic_t run = 1; /** * @brief A fatal error has occurred, immediately exit the application. */ -#define fatal(...) do { \ - fprintf(stderr, "FATAL ERROR: "); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - exit(1); \ +#define fatal(...) \ + do { \ + fprintf(stderr, "FATAL ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(1); \ } while (0) /** * @brief Same as fatal() but takes an rd_kafka_error_t object, prints its * error message, destroys the object and then exits fatally. */ -#define fatal_error(what,error) do { \ - fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", \ - what, rd_kafka_error_name(error), \ - rd_kafka_error_string(error)); \ - rd_kafka_error_destroy(error); \ - exit(1); \ +#define fatal_error(what, error) \ + do { \ + fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", what, \ + rd_kafka_error_name(error), \ + rd_kafka_error_string(error)); \ + rd_kafka_error_destroy(error); \ + exit(1); \ } while (0) /** * @brief Signal termination of program */ -static void stop (int sig) { +static void stop(int sig) { run = 0; } @@ -108,11 +110,10 @@ static void stop (int sig) { * In the case of transactional producing the delivery report callback is * mostly useful for logging the produce failures. */ -static void dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { if (rkmessage->err) - fprintf(stderr, - "%% Message delivery failed: %s\n", + fprintf(stderr, "%% Message delivery failed: %s\n", rd_kafka_err2str(rkmessage->err)); /* The rkmessage is destroyed automatically by librdkafka */ @@ -123,18 +124,18 @@ static void dr_msg_cb (rd_kafka_t *rk, /** * @brief Create a transactional producer. */ -static rd_kafka_t * -create_transactional_producer (const char *brokers, const char *output_topic) { +static rd_kafka_t *create_transactional_producer(const char *brokers, + const char *output_topic) { rd_kafka_conf_t *conf = rd_kafka_conf_new(); rd_kafka_t *rk; char errstr[256]; rd_kafka_error_t *error; - if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || rd_kafka_conf_set(conf, "transactional.id", - "librdkafka_transactions_example", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) + "librdkafka_transactions_example", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) fatal("Failed to configure producer: %s", errstr); /* This callback will be called once per message to indicate @@ -162,7 +163,7 @@ create_transactional_producer (const char *brokers, const char *output_topic) { * @brief Rewind consumer's consume position to the last committed offsets * for the current assignment. */ -static void rewind_consumer (rd_kafka_t *consumer) { +static void rewind_consumer(rd_kafka_t *consumer) { rd_kafka_topic_partition_list_t *offsets; rd_kafka_resp_err_t err; rd_kafka_error_t *error; @@ -184,18 +185,17 @@ static void rewind_consumer (rd_kafka_t *consumer) { } /* Note: Timeout must be lower than max.poll.interval.ms */ - err = rd_kafka_committed(consumer, offsets, 10*1000); + err = rd_kafka_committed(consumer, offsets, 10 * 1000); if (err) fatal("Failed to acquire committed offsets: %s", rd_kafka_err2str(err)); /* Seek to committed offset, or start of partition if no * committed offset is available. */ - for (i = 0 ; i < offsets->cnt ; i++) { + for (i = 0; i < offsets->cnt; i++) { /* No committed offset, start from beginning */ if (offsets->elems[i].offset < 0) - offsets->elems[i].offset = - RD_KAFKA_OFFSET_BEGINNING; + offsets->elems[i].offset = RD_KAFKA_OFFSET_BEGINNING; } /* Perform seek */ @@ -211,8 +211,8 @@ static void rewind_consumer (rd_kafka_t *consumer) { * position where the transaction last started, i.e., the committed * consumer offset, then begin a new transaction. */ -static void abort_transaction_and_rewind (rd_kafka_t *consumer, - rd_kafka_t *producer) { +static void abort_transaction_and_rewind(rd_kafka_t *consumer, + rd_kafka_t *producer) { rd_kafka_error_t *error; fprintf(stdout, "Aborting transaction and rewinding offsets\n"); @@ -238,8 +238,7 @@ static void abort_transaction_and_rewind (rd_kafka_t *consumer, * @returns 1 if transaction was successfully committed, or 0 * if the current transaction was aborted. */ -static int commit_transaction (rd_kafka_t *consumer, - rd_kafka_t *producer) { +static int commit_transaction(rd_kafka_t *consumer, rd_kafka_t *producer) { rd_kafka_error_t *error; rd_kafka_resp_err_t err; rd_kafka_consumer_group_metadata_t *cgmd; @@ -263,7 +262,8 @@ static int commit_transaction (rd_kafka_t *consumer, if (err) fprintf(stderr, "Failed to get consumer assignment to commit: " - "%s\n", rd_kafka_err2str(err)); + "%s\n", + rd_kafka_err2str(err)); else rd_kafka_topic_partition_list_destroy(offsets); @@ -281,8 +281,8 @@ static int commit_transaction (rd_kafka_t *consumer, rd_kafka_err2str(err)); /* Send offsets to transaction coordinator */ - error = rd_kafka_send_offsets_to_transaction(producer, - offsets, cgmd, -1); + error = + rd_kafka_send_offsets_to_transaction(producer, offsets, cgmd, -1); rd_kafka_consumer_group_metadata_destroy(cgmd); rd_kafka_topic_partition_list_destroy(offsets); if (error) { @@ -334,8 +334,8 @@ static int commit_transaction (rd_kafka_t *consumer, /** * @brief Commit the current transaction and start a new transaction. */ -static void commit_transaction_and_start_new (rd_kafka_t *consumer, - rd_kafka_t *producer) { +static void commit_transaction_and_start_new(rd_kafka_t *consumer, + rd_kafka_t *producer) { rd_kafka_error_t *error; /* Commit transaction. @@ -355,15 +355,14 @@ static void commit_transaction_and_start_new (rd_kafka_t *consumer, * when the consumer's partition assignment is assigned or revoked. */ static void -consumer_group_rebalance_cb (rd_kafka_t *consumer, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque) { +consumer_group_rebalance_cb(rd_kafka_t *consumer, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque) { rd_kafka_t *producer = (rd_kafka_t *)opaque; rd_kafka_error_t *error; - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: fprintf(stdout, "Consumer group rebalanced: " @@ -418,31 +417,31 @@ consumer_group_rebalance_cb (rd_kafka_t *consumer, /** * @brief Create the input consumer. */ -static rd_kafka_t *create_input_consumer (const char *brokers, - const char *input_topic, - rd_kafka_t *producer) { +static rd_kafka_t *create_input_consumer(const char *brokers, + const char *input_topic, + rd_kafka_t *producer) { rd_kafka_conf_t *conf = rd_kafka_conf_new(); rd_kafka_t *rk; char errstr[256]; rd_kafka_resp_err_t err; rd_kafka_topic_partition_list_t *topics; - if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || rd_kafka_conf_set(conf, "group.id", - "librdkafka_transactions_example_group", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || + "librdkafka_transactions_example_group", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || rd_kafka_conf_set(conf, "partition.assignment.strategy", - "cooperative-sticky", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || - rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || + "cooperative-sticky", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || + rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || /* The input consumer's offsets are explicitly committed with the * output producer's transaction using * rd_kafka_send_offsets_to_transaction(), so auto commits * must be disabled. */ - rd_kafka_conf_set(conf, "enable.auto.commit", "false", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + rd_kafka_conf_set(conf, "enable.auto.commit", "false", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fatal("Failed to configure consumer: %s", errstr); } @@ -476,8 +475,8 @@ static rd_kafka_t *create_input_consumer (const char *brokers, rd_kafka_topic_partition_list_destroy(topics); if (err) { rd_kafka_destroy(rk); - fatal("Failed to subscribe to %s: %s\n", - input_topic, rd_kafka_err2str(err)); + fatal("Failed to subscribe to %s: %s\n", input_topic, + rd_kafka_err2str(err)); } return rk; @@ -488,16 +487,16 @@ static rd_kafka_t *create_input_consumer (const char *brokers, * @brief Find and parse next integer string in \p start. * @returns Pointer after found integer string, or NULL if not found. */ -static const void *find_next_int (const void *start, const void *end, - int *intp) { +static const void * +find_next_int(const void *start, const void *end, int *intp) { const char *p; int collecting = 0; - int num = 0; + int num = 0; - for (p = (const char *)start ; p < (const char *)end ; p++) { + for (p = (const char *)start; p < (const char *)end; p++) { if (isdigit((int)(*p))) { collecting = 1; - num = (num * 10) + ((int)*p - ((int)'0')); + num = (num * 10) + ((int)*p - ((int)'0')); } else if (collecting) break; } @@ -517,10 +516,10 @@ static const void *find_next_int (const void *start, const void *end, * the output topic using the transactional producer for the given * inut partition. */ -static void process_message (rd_kafka_t *consumer, - rd_kafka_t *producer, - const char *output_topic, - const rd_kafka_message_t *rkmessage) { +static void process_message(rd_kafka_t *consumer, + rd_kafka_t *producer, + const char *output_topic, + const rd_kafka_message_t *rkmessage) { int num; long unsigned sum = 0; const void *p, *end; @@ -530,7 +529,7 @@ static void process_message (rd_kafka_t *consumer, if (rkmessage->len == 0) return; /* Ignore empty messages */ - p = rkmessage->payload; + p = rkmessage->payload; end = ((const char *)rkmessage->payload) + rkmessage->len; /* Find and sum all numbers in the message */ @@ -545,17 +544,14 @@ static void process_message (rd_kafka_t *consumer, /* Emit output message on transactional producer */ while (1) { err = rd_kafka_producev( - producer, - RD_KAFKA_V_TOPIC(output_topic), - /* Use same key as input message */ - RD_KAFKA_V_KEY(rkmessage->key, - rkmessage->key_len), - /* Value is the current sum of this - * transaction. */ - RD_KAFKA_V_VALUE(value, strlen(value)), - /* Copy value since it is allocated on the stack */ - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_END); + producer, RD_KAFKA_V_TOPIC(output_topic), + /* Use same key as input message */ + RD_KAFKA_V_KEY(rkmessage->key, rkmessage->key_len), + /* Value is the current sum of this + * transaction. */ + RD_KAFKA_V_VALUE(value, strlen(value)), + /* Copy value since it is allocated on the stack */ + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END); if (!err) break; @@ -576,9 +572,9 @@ static void process_message (rd_kafka_t *consumer, } -int main (int argc, char **argv) { +int main(int argc, char **argv) { rd_kafka_t *producer, *consumer; - int msgcnt = 0; + int msgcnt = 0; time_t last_commit = 0; const char *brokers, *input_topic, *output_topic; rd_kafka_error_t *error; @@ -593,8 +589,8 @@ int main (int argc, char **argv) { return 1; } - brokers = argv[1]; - input_topic = argv[2]; + brokers = argv[1]; + input_topic = argv[2]; output_topic = argv[3]; /* Signal handler for clean shutdown */ @@ -611,8 +607,8 @@ int main (int argc, char **argv) { "Observe summed integers on output topic %s:\n" " $ examples/consumer %s just-watching %s\n" "\n", - input_topic, brokers, input_topic, - output_topic, brokers, output_topic); + input_topic, brokers, input_topic, output_topic, brokers, + output_topic); /* Begin transaction and start waiting for messages */ error = rd_kafka_begin_transaction(producer); @@ -628,12 +624,12 @@ int main (int argc, char **argv) { printf("msgcnt %d, elapsed %d\n", msgcnt, (int)(time(NULL) - last_commit)); commit_transaction_and_start_new(consumer, producer); - msgcnt = 0; + msgcnt = 0; last_commit = time(NULL); } /* Wait for new mesages or error events */ - msg = rd_kafka_consumer_poll(consumer, 1000/*1 second*/); + msg = rd_kafka_consumer_poll(consumer, 1000 /*1 second*/); if (!msg) continue; /* Poll timeout */ diff --git a/examples/win_ssl_cert_store.cpp b/examples/win_ssl_cert_store.cpp index 09eb9c25c6..a80dfea30c 100644 --- a/examples/win_ssl_cert_store.cpp +++ b/examples/win_ssl_cert_store.cpp @@ -26,9 +26,9 @@ * POSSIBILITY OF SUCH DAMAGE. */ - /** - * Example of utilizing the Windows Certificate store with SSL. - */ +/** + * Example of utilizing the Windows Certificate store with SSL. + */ #include #include @@ -42,383 +42,354 @@ #include #include - /* - * Typically include path in a real application would be - * #include - */ +/* + * Typically include path in a real application would be + * #include + */ #include "rdkafkacpp.h" class ExampleStoreRetriever { -public: - ExampleStoreRetriever (std::string const &subject, std::string const &pass) - : m_cert_subject(subject), m_password(pass), - m_cert_store(NULL), m_cert_ctx(NULL) { - load_certificate(); - } - - ~ExampleStoreRetriever() { - if (m_cert_ctx) - CertFreeCertificateContext(m_cert_ctx); - - if (m_cert_store) - CertCloseStore(m_cert_store, 0); - } - - /* @returns the public key in DER format */ - const std::vector get_public_key () { - std::vector buf((size_t)m_cert_ctx->cbCertEncoded); - buf.assign((const char *)m_cert_ctx->pbCertEncoded, - (const char *)m_cert_ctx->pbCertEncoded + - (size_t)m_cert_ctx->cbCertEncoded); - return buf; - } - - /* @returns the private key in PCKS#12 format */ - const std::vector get_private_key () { - ssize_t ret = 0; - /* - * In order to export the private key the certificate - * must first be marked as exportable. - * - * Steps to export the certificate - * 1) Create an in-memory cert store - * 2) Add the certificate to the store - * 3) Export the private key from the in-memory store - */ - - /* Create an in-memory cert store */ - HCERTSTORE hMemStore = CertOpenStore(CERT_STORE_PROV_MEMORY, - 0, NULL, 0, NULL); - if (!hMemStore) - throw "Failed to create in-memory cert store: " + - GetErrorMsg(GetLastError()); - - /* Add certificate to store */ - if (!CertAddCertificateContextToStore(hMemStore, - m_cert_ctx, - CERT_STORE_ADD_USE_EXISTING, - NULL)) - throw "Failed to add certificate to store: " + - GetErrorMsg(GetLastError()); - - /* - * Export private key from cert - */ - CRYPT_DATA_BLOB db = { NULL }; - - std::wstring w_password(m_password.begin(), m_password.end()); - - /* Acquire output size */ - if (!PFXExportCertStoreEx(hMemStore, - &db, - w_password.c_str(), - NULL, - EXPORT_PRIVATE_KEYS | - REPORT_NO_PRIVATE_KEY | - REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY)) - throw "Failed to export private key: " + GetErrorMsg(GetLastError()); - - std::vector buf; - - buf.resize(db.cbData); - db.pbData = &buf[0]; - - /* Extract key */ - if (!PFXExportCertStoreEx(hMemStore, - &db, - w_password.c_str(), - NULL, - EXPORT_PRIVATE_KEYS | - REPORT_NO_PRIVATE_KEY | - REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY)) - throw "Failed to export private key (PFX): " + GetErrorMsg(GetLastError()); - - CertCloseStore(hMemStore, 0); - - buf.resize(db.cbData); - - return buf; - } + public: + ExampleStoreRetriever(std::string const &subject, std::string const &pass) : + m_cert_subject(subject), + m_password(pass), + m_cert_store(NULL), + m_cert_ctx(NULL) { + load_certificate(); + } + + ~ExampleStoreRetriever() { + if (m_cert_ctx) + CertFreeCertificateContext(m_cert_ctx); + + if (m_cert_store) + CertCloseStore(m_cert_store, 0); + } + + /* @returns the public key in DER format */ + const std::vector get_public_key() { + std::vector buf((size_t)m_cert_ctx->cbCertEncoded); + buf.assign((const char *)m_cert_ctx->pbCertEncoded, + (const char *)m_cert_ctx->pbCertEncoded + + (size_t)m_cert_ctx->cbCertEncoded); + return buf; + } + + /* @returns the private key in PCKS#12 format */ + const std::vector get_private_key() { + ssize_t ret = 0; + /* + * In order to export the private key the certificate + * must first be marked as exportable. + * + * Steps to export the certificate + * 1) Create an in-memory cert store + * 2) Add the certificate to the store + * 3) Export the private key from the in-memory store + */ + + /* Create an in-memory cert store */ + HCERTSTORE hMemStore = + CertOpenStore(CERT_STORE_PROV_MEMORY, 0, NULL, 0, NULL); + if (!hMemStore) + throw "Failed to create in-memory cert store: " + + GetErrorMsg(GetLastError()); + + /* Add certificate to store */ + if (!CertAddCertificateContextToStore(hMemStore, m_cert_ctx, + CERT_STORE_ADD_USE_EXISTING, NULL)) + throw "Failed to add certificate to store: " + + GetErrorMsg(GetLastError()); + + /* + * Export private key from cert + */ + CRYPT_DATA_BLOB db = {NULL}; + + std::wstring w_password(m_password.begin(), m_password.end()); + + /* Acquire output size */ + if (!PFXExportCertStoreEx(hMemStore, &db, w_password.c_str(), NULL, + EXPORT_PRIVATE_KEYS | REPORT_NO_PRIVATE_KEY | + REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY)) + throw "Failed to export private key: " + GetErrorMsg(GetLastError()); + + std::vector buf; + + buf.resize(db.cbData); + db.pbData = &buf[0]; + + /* Extract key */ + if (!PFXExportCertStoreEx(hMemStore, &db, w_password.c_str(), NULL, + EXPORT_PRIVATE_KEYS | REPORT_NO_PRIVATE_KEY | + REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY)) + throw "Failed to export private key (PFX): " + + GetErrorMsg(GetLastError()); + + CertCloseStore(hMemStore, 0); + + buf.resize(db.cbData); + + return buf; + } private: - void load_certificate () { - if (m_cert_ctx) - return; - - m_cert_store = CertOpenStore(CERT_STORE_PROV_SYSTEM, - 0, - NULL, - CERT_SYSTEM_STORE_CURRENT_USER, - L"My"); - if (!m_cert_store) - throw "Failed to open cert store: " + GetErrorMsg(GetLastError()); - - m_cert_ctx = CertFindCertificateInStore(m_cert_store, - X509_ASN_ENCODING, - 0, - CERT_FIND_SUBJECT_STR, - /* should probally do a better std::string to std::wstring conversion */ - std::wstring(m_cert_subject.begin(), - m_cert_subject.end()).c_str(), - NULL); - if (!m_cert_ctx) { - CertCloseStore(m_cert_store, 0); - m_cert_store = NULL; - throw "Certificate " + m_cert_subject + " not found in cert store: " + GetErrorMsg(GetLastError()); - } - } - - std::string GetErrorMsg (unsigned long error) { - char *message = NULL; - size_t ret = FormatMessageA( - FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, - nullptr, - error, - 0, - (char*)&message, - 0, - nullptr); - if (ret == 0) { - std::stringstream ss; - - ss << std::string("could not format message for ") << error; - return ss.str(); - } else { - std::string result(message, ret); - LocalFree(message); - return result; - } - } + void load_certificate() { + if (m_cert_ctx) + return; + + m_cert_store = CertOpenStore(CERT_STORE_PROV_SYSTEM, 0, NULL, + CERT_SYSTEM_STORE_CURRENT_USER, L"My"); + if (!m_cert_store) + throw "Failed to open cert store: " + GetErrorMsg(GetLastError()); + + m_cert_ctx = CertFindCertificateInStore( + m_cert_store, X509_ASN_ENCODING, 0, CERT_FIND_SUBJECT_STR, + /* should probally do a better std::string to std::wstring conversion */ + std::wstring(m_cert_subject.begin(), m_cert_subject.end()).c_str(), + NULL); + if (!m_cert_ctx) { + CertCloseStore(m_cert_store, 0); + m_cert_store = NULL; + throw "Certificate " + m_cert_subject + + " not found in cert store: " + GetErrorMsg(GetLastError()); + } + } + + std::string GetErrorMsg(unsigned long error) { + char *message = NULL; + size_t ret = FormatMessageA( + FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, nullptr, + error, 0, (char *)&message, 0, nullptr); + if (ret == 0) { + std::stringstream ss; + + ss << std::string("could not format message for ") << error; + return ss.str(); + } else { + std::string result(message, ret); + LocalFree(message); + return result; + } + } private: - std::string m_cert_subject; - std::string m_password; - PCCERT_CONTEXT m_cert_ctx; - HCERTSTORE m_cert_store; + std::string m_cert_subject; + std::string m_password; + PCCERT_CONTEXT m_cert_ctx; + HCERTSTORE m_cert_store; }; class PrintingSSLVerifyCb : public RdKafka::SslCertificateVerifyCb { - /* This SSL cert verification callback simply prints the certificates - * in the certificate chain. - * It provides no validation, everything is ok. */ -public: - bool ssl_cert_verify_cb (const std::string &broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - std::string &errstr) { - PCCERT_CONTEXT ctx = CertCreateCertificateContext( - X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, - (const uint8_t*)buf, static_cast(size)); - - if (!ctx) - std::cerr << "Failed to parse certificate" << std::endl; - - char subject[256] = "n/a"; - char issuer[256] = "n/a"; - - CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE, - 0, NULL, - subject, sizeof(subject)); - - CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE, - CERT_NAME_ISSUER_FLAG, NULL, - issuer, sizeof(issuer)); - - std::cerr << "Broker " << broker_name << - " (" << broker_id << "): " << - "certificate depth " << depth << - ", X509 error " << *x509_error << - ", subject " << subject << - ", issuer " << issuer << std::endl; - - if (ctx) - CertFreeCertificateContext(ctx); - - return true; - } + /* This SSL cert verification callback simply prints the certificates + * in the certificate chain. + * It provides no validation, everything is ok. */ + public: + bool ssl_cert_verify_cb(const std::string &broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + std::string &errstr) { + PCCERT_CONTEXT ctx = CertCreateCertificateContext( + X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, (const uint8_t *)buf, + static_cast(size)); + + if (!ctx) + std::cerr << "Failed to parse certificate" << std::endl; + + char subject[256] = "n/a"; + char issuer[256] = "n/a"; + + CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE, 0, NULL, subject, + sizeof(subject)); + + CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE, + CERT_NAME_ISSUER_FLAG, NULL, issuer, sizeof(issuer)); + + std::cerr << "Broker " << broker_name << " (" << broker_id << "): " + << "certificate depth " << depth << ", X509 error " << *x509_error + << ", subject " << subject << ", issuer " << issuer << std::endl; + + if (ctx) + CertFreeCertificateContext(ctx); + + return true; + } }; /** -* @brief Print the brokers in the cluster. -*/ -static void print_brokers (RdKafka::Handle *handle, - const RdKafka::Metadata *md) { - std::cout << md->brokers()->size() << " broker(s) in cluster " << - handle->clusterid(0) << std::endl; - - /* Iterate brokers */ - RdKafka::Metadata::BrokerMetadataIterator ib; - for (ib = md->brokers()->begin(); ib != md->brokers()->end(); ++ib) - std::cout << " broker " << (*ib)->id() << " at " - << (*ib)->host() << ":" << (*ib)->port() << std::endl; - + * @brief Print the brokers in the cluster. + */ +static void print_brokers(RdKafka::Handle *handle, + const RdKafka::Metadata *md) { + std::cout << md->brokers()->size() << " broker(s) in cluster " + << handle->clusterid(0) << std::endl; + + /* Iterate brokers */ + RdKafka::Metadata::BrokerMetadataIterator ib; + for (ib = md->brokers()->begin(); ib != md->brokers()->end(); ++ib) + std::cout << " broker " << (*ib)->id() << " at " << (*ib)->host() << ":" + << (*ib)->port() << std::endl; } -int main (int argc, char **argv) { - std::string brokers; - std::string errstr; - std::string cert_subject; - std::string priv_key_pass; - - /* - * Create configuration objects - */ - RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); - - int opt; - while ((opt = getopt(argc, argv, "b:d:X:s:p:")) != -1) { - switch (opt) { - case 'b': - brokers = optarg; - break; - case 'd': - if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - break; - case 'X': - { - char *name, *val; - - name = optarg; - if (!(val = strchr(name, '='))) { - std::cerr << "%% Expected -X property=value, not " << - name << std::endl; - exit(1); - } - - *val = '\0'; - val++; - - if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - } - break; - - case 's': - cert_subject = optarg; - break; - - case 'p': - priv_key_pass = optarg; - if (conf->set("ssl.key.password", optarg, errstr) != - RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - - break; - - default: - goto usage; - } - } - - if (brokers.empty() || optind != argc) { -usage: - std::string features; - conf->get("builtin.features", features); - fprintf(stderr, - "Usage: %s [options] -b -s -p \n" - "\n" - "Windows Certificate Store integration example.\n" - "Use certlm.msc or mmc to view your certificates.\n" - "\n" - "librdkafka version %s (0x%08x, builtin.features \"%s\")\n" - "\n" - " Options:\n" - " -b Broker address\n" - " -s The subject name of the client's SSL certificate to use\n" - " -p The private key password\n" - " -d [facs..] Enable debugging contexts: %s\n" - " -X Set arbitrary librdkafka " - "configuration property\n" - "\n", - argv[0], - RdKafka::version_str().c_str(), RdKafka::version(), - features.c_str(), - RdKafka::get_debug_contexts().c_str()); - exit(1); - } - - if (!cert_subject.empty()) { - - try { - /* Load certificates from the Windows store */ - ExampleStoreRetriever certStore(cert_subject, priv_key_pass); - - std::vector pubkey, privkey; - - pubkey = certStore.get_public_key(); - privkey = certStore.get_private_key(); - - if (conf->set_ssl_cert(RdKafka::CERT_PUBLIC_KEY, - RdKafka::CERT_ENC_DER, - &pubkey[0], pubkey.size(), - errstr) != - RdKafka::Conf::CONF_OK) - throw "Failed to set public key: " + errstr; - - if (conf->set_ssl_cert(RdKafka::CERT_PRIVATE_KEY, - RdKafka::CERT_ENC_PKCS12, - &privkey[0], privkey.size(), - errstr) != - RdKafka::Conf::CONF_OK) - throw "Failed to set private key: " + errstr; - - } catch (const std::string &ex) { - std::cerr << ex << std::endl; - exit(1); - } - } - - - /* - * Set configuration properties - */ - conf->set("bootstrap.servers", brokers, errstr); - - /* We use the Certificiate verification callback to print the - * certificate chains being used. */ - PrintingSSLVerifyCb ssl_verify_cb; - - if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - - /* Create any type of client, producering being the cheapest. */ - RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); - if (!producer) { - std::cerr << "Failed to create producer: " << errstr << std::endl; - exit(1); - } - - RdKafka::Metadata *metadata; - - /* Fetch metadata */ - RdKafka::ErrorCode err = producer->metadata(false, NULL, &metadata, 5000); - if (err != RdKafka::ERR_NO_ERROR) { - std::cerr << "%% Failed to acquire metadata: " - << RdKafka::err2str(err) << std::endl; - exit(1); - } - - print_brokers(producer, metadata); - - delete metadata; - delete producer; - - return 0; +int main(int argc, char **argv) { + std::string brokers; + std::string errstr; + std::string cert_subject; + std::string priv_key_pass; + + /* + * Create configuration objects + */ + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); + + int opt; + while ((opt = getopt(argc, argv, "b:d:X:s:p:")) != -1) { + switch (opt) { + case 'b': + brokers = optarg; + break; + case 'd': + if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + break; + case 'X': { + char *name, *val; + + name = optarg; + if (!(val = strchr(name, '='))) { + std::cerr << "%% Expected -X property=value, not " << name << std::endl; + exit(1); + } + + *val = '\0'; + val++; + + if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + } break; + + case 's': + cert_subject = optarg; + break; + + case 'p': + priv_key_pass = optarg; + if (conf->set("ssl.key.password", optarg, errstr) != + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + break; + + default: + goto usage; + } + } + + if (brokers.empty() || optind != argc) { + usage: + std::string features; + conf->get("builtin.features", features); + fprintf(stderr, + "Usage: %s [options] -b -s -p " + "\n" + "\n" + "Windows Certificate Store integration example.\n" + "Use certlm.msc or mmc to view your certificates.\n" + "\n" + "librdkafka version %s (0x%08x, builtin.features \"%s\")\n" + "\n" + " Options:\n" + " -b Broker address\n" + " -s The subject name of the client's SSL " + "certificate to use\n" + " -p The private key password\n" + " -d [facs..] Enable debugging contexts: %s\n" + " -X Set arbitrary librdkafka " + "configuration property\n" + "\n", + argv[0], RdKafka::version_str().c_str(), RdKafka::version(), + features.c_str(), RdKafka::get_debug_contexts().c_str()); + exit(1); + } + + if (!cert_subject.empty()) { + try { + /* Load certificates from the Windows store */ + ExampleStoreRetriever certStore(cert_subject, priv_key_pass); + + std::vector pubkey, privkey; + + pubkey = certStore.get_public_key(); + privkey = certStore.get_private_key(); + + if (conf->set_ssl_cert(RdKafka::CERT_PUBLIC_KEY, RdKafka::CERT_ENC_DER, + &pubkey[0], pubkey.size(), + errstr) != RdKafka::Conf::CONF_OK) + throw "Failed to set public key: " + errstr; + + if (conf->set_ssl_cert(RdKafka::CERT_PRIVATE_KEY, + RdKafka::CERT_ENC_PKCS12, &privkey[0], + privkey.size(), errstr) != RdKafka::Conf::CONF_OK) + throw "Failed to set private key: " + errstr; + + } catch (const std::string &ex) { + std::cerr << ex << std::endl; + exit(1); + } + } + + + /* + * Set configuration properties + */ + conf->set("bootstrap.servers", brokers, errstr); + + /* We use the Certificiate verification callback to print the + * certificate chains being used. */ + PrintingSSLVerifyCb ssl_verify_cb; + + if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) != + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + /* Create any type of client, producering being the cheapest. */ + RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); + if (!producer) { + std::cerr << "Failed to create producer: " << errstr << std::endl; + exit(1); + } + + RdKafka::Metadata *metadata; + + /* Fetch metadata */ + RdKafka::ErrorCode err = producer->metadata(false, NULL, &metadata, 5000); + if (err != RdKafka::ERR_NO_ERROR) { + std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err) + << std::endl; + exit(1); + } + + print_brokers(producer, metadata); + + delete metadata; + delete producer; + + return 0; } diff --git a/lds-gen.py b/lds-gen.py index cb6bf8dc66..44c718d130 100755 --- a/lds-gen.py +++ b/lds-gen.py @@ -47,7 +47,9 @@ if m: sym = m.group(2) # Ignore static (unused) functions - m2 = re.match(r'(RD_UNUSED|__attribute__\(\(unused\)\))', last_line) + m2 = re.match( + r'(RD_UNUSED|__attribute__\(\(unused\)\))', + last_line) if not m2: funcs.append(sym) last_line = '' diff --git a/packaging/cmake/try_compile/atomic_32_test.c b/packaging/cmake/try_compile/atomic_32_test.c index de9738acc6..b3373bb8b9 100644 --- a/packaging/cmake/try_compile/atomic_32_test.c +++ b/packaging/cmake/try_compile/atomic_32_test.c @@ -1,7 +1,7 @@ #include -int32_t foo (int32_t i) { - return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST); +int32_t foo(int32_t i) { + return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST); } int main() { diff --git a/packaging/cmake/try_compile/atomic_64_test.c b/packaging/cmake/try_compile/atomic_64_test.c index a713c74b0f..31922b85c2 100644 --- a/packaging/cmake/try_compile/atomic_64_test.c +++ b/packaging/cmake/try_compile/atomic_64_test.c @@ -1,7 +1,7 @@ #include -int64_t foo (int64_t i) { - return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST); +int64_t foo(int64_t i) { + return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST); } int main() { diff --git a/packaging/cmake/try_compile/c11threads_test.c b/packaging/cmake/try_compile/c11threads_test.c index 1dd6755472..31681ae617 100644 --- a/packaging/cmake/try_compile/c11threads_test.c +++ b/packaging/cmake/try_compile/c11threads_test.c @@ -1,14 +1,14 @@ #include -static int start_func (void *arg) { - int iarg = *(int *)arg; - return iarg; +static int start_func(void *arg) { + int iarg = *(int *)arg; + return iarg; } -void main (void) { - thrd_t thr; - int arg = 1; - if (thrd_create(&thr, start_func, (void *)&arg) != thrd_success) { - ; - } +void main(void) { + thrd_t thr; + int arg = 1; + if (thrd_create(&thr, start_func, (void *)&arg) != thrd_success) { + ; + } } diff --git a/packaging/cmake/try_compile/crc32c_hw_test.c b/packaging/cmake/try_compile/crc32c_hw_test.c index 4e337c5b6f..e800978031 100644 --- a/packaging/cmake/try_compile/crc32c_hw_test.c +++ b/packaging/cmake/try_compile/crc32c_hw_test.c @@ -3,22 +3,25 @@ #define LONGx1 "8192" #define LONGx2 "16384" void main(void) { - const char *n = "abcdefghijklmnopqrstuvwxyz0123456789"; - uint64_t c0 = 0, c1 = 1, c2 = 2; - uint64_t s; - uint32_t eax = 1, ecx; - __asm__("cpuid" - : "=c"(ecx) - : "a"(eax) - : "%ebx", "%edx"); - __asm__("crc32b\t" "(%1), %0" - : "=r"(c0) - : "r"(n), "0"(c0)); - __asm__("crc32q\t" "(%3), %0\n\t" - "crc32q\t" LONGx1 "(%3), %1\n\t" - "crc32q\t" LONGx2 "(%3), %2" - : "=r"(c0), "=r"(c1), "=r"(c2) - : "r"(n), "0"(c0), "1"(c1), "2"(c2)); - s = c0 + c1 + c2; - printf("avoiding unused code removal by printing %d, %d, %d\n", (int)s, (int)eax, (int)ecx); + const char *n = "abcdefghijklmnopqrstuvwxyz0123456789"; + uint64_t c0 = 0, c1 = 1, c2 = 2; + uint64_t s; + uint32_t eax = 1, ecx; + __asm__("cpuid" : "=c"(ecx) : "a"(eax) : "%ebx", "%edx"); + __asm__( + "crc32b\t" + "(%1), %0" + : "=r"(c0) + : "r"(n), "0"(c0)); + __asm__( + "crc32q\t" + "(%3), %0\n\t" + "crc32q\t" LONGx1 + "(%3), %1\n\t" + "crc32q\t" LONGx2 "(%3), %2" + : "=r"(c0), "=r"(c1), "=r"(c2) + : "r"(n), "0"(c0), "1"(c1), "2"(c2)); + s = c0 + c1 + c2; + printf("avoiding unused code removal by printing %d, %d, %d\n", (int)s, + (int)eax, (int)ecx); } diff --git a/packaging/cmake/try_compile/dlopen_test.c b/packaging/cmake/try_compile/dlopen_test.c index 61c2504c55..ecb478994a 100644 --- a/packaging/cmake/try_compile/dlopen_test.c +++ b/packaging/cmake/try_compile/dlopen_test.c @@ -4,7 +4,7 @@ int main() { void *h; /* Try loading anything, we don't care if it works */ - h = dlopen("__nothing_rdkafka.so", RTLD_NOW|RTLD_LOCAL); + h = dlopen("__nothing_rdkafka.so", RTLD_NOW | RTLD_LOCAL); if (h) dlclose(h); return 0; diff --git a/packaging/cmake/try_compile/pthread_setname_darwin_test.c b/packaging/cmake/try_compile/pthread_setname_darwin_test.c index 2fe34e90a6..73e31e0695 100644 --- a/packaging/cmake/try_compile/pthread_setname_darwin_test.c +++ b/packaging/cmake/try_compile/pthread_setname_darwin_test.c @@ -1,6 +1,6 @@ #include int main() { - pthread_setname_np("abc"); - return 0; + pthread_setname_np("abc"); + return 0; } diff --git a/packaging/cmake/try_compile/pthread_setname_freebsd_test.c b/packaging/cmake/try_compile/pthread_setname_freebsd_test.c index 2989e37f94..329ace08ef 100644 --- a/packaging/cmake/try_compile/pthread_setname_freebsd_test.c +++ b/packaging/cmake/try_compile/pthread_setname_freebsd_test.c @@ -2,6 +2,6 @@ #include int main() { - pthread_set_name_np(pthread_self(), "abc"); - return 0; + pthread_set_name_np(pthread_self(), "abc"); + return 0; } diff --git a/packaging/cmake/try_compile/pthread_setname_gnu_test.c b/packaging/cmake/try_compile/pthread_setname_gnu_test.c index 48aef9ee89..3be1b21bc4 100644 --- a/packaging/cmake/try_compile/pthread_setname_gnu_test.c +++ b/packaging/cmake/try_compile/pthread_setname_gnu_test.c @@ -1,5 +1,5 @@ #include int main() { - return pthread_setname_np(pthread_self(), "abc"); + return pthread_setname_np(pthread_self(), "abc"); } diff --git a/packaging/cmake/try_compile/rand_r_test.c b/packaging/cmake/try_compile/rand_r_test.c index 53b7ae0082..be722d0a05 100644 --- a/packaging/cmake/try_compile/rand_r_test.c +++ b/packaging/cmake/try_compile/rand_r_test.c @@ -1,7 +1,7 @@ #include int main() { - unsigned int seed = 0xbeaf; - (void)rand_r(&seed); - return 0; + unsigned int seed = 0xbeaf; + (void)rand_r(&seed); + return 0; } diff --git a/packaging/cmake/try_compile/regex_test.c b/packaging/cmake/try_compile/regex_test.c index 1d6eeb3690..329098d209 100644 --- a/packaging/cmake/try_compile/regex_test.c +++ b/packaging/cmake/try_compile/regex_test.c @@ -2,9 +2,9 @@ #include int main() { - regcomp(NULL, NULL, 0); - regexec(NULL, NULL, 0, NULL, 0); - regerror(0, NULL, NULL, 0); - regfree(NULL); - return 0; + regcomp(NULL, NULL, 0); + regexec(NULL, NULL, 0, NULL, 0); + regerror(0, NULL, NULL, 0); + regfree(NULL); + return 0; } diff --git a/packaging/cmake/try_compile/strndup_test.c b/packaging/cmake/try_compile/strndup_test.c index 9b620435d8..a10b745264 100644 --- a/packaging/cmake/try_compile/strndup_test.c +++ b/packaging/cmake/try_compile/strndup_test.c @@ -1,5 +1,5 @@ #include int main() { - return strndup("hi", 2) ? 0 : 1; + return strndup("hi", 2) ? 0 : 1; } diff --git a/packaging/cmake/try_compile/sync_32_test.c b/packaging/cmake/try_compile/sync_32_test.c index 44ba120465..2bc80ab4c9 100644 --- a/packaging/cmake/try_compile/sync_32_test.c +++ b/packaging/cmake/try_compile/sync_32_test.c @@ -1,7 +1,7 @@ #include -int32_t foo (int32_t i) { - return __sync_add_and_fetch(&i, 1); +int32_t foo(int32_t i) { + return __sync_add_and_fetch(&i, 1); } int main() { diff --git a/packaging/cmake/try_compile/sync_64_test.c b/packaging/cmake/try_compile/sync_64_test.c index ad0620400a..4b6ad6d384 100644 --- a/packaging/cmake/try_compile/sync_64_test.c +++ b/packaging/cmake/try_compile/sync_64_test.c @@ -1,7 +1,7 @@ #include -int64_t foo (int64_t i) { - return __sync_add_and_fetch(&i, 1); +int64_t foo(int64_t i) { + return __sync_add_and_fetch(&i, 1); } int main() { diff --git a/packaging/cp/check_features.c b/packaging/cp/check_features.c index 52810755ae..4229402fd6 100644 --- a/packaging/cp/check_features.c +++ b/packaging/cp/check_features.c @@ -2,7 +2,7 @@ #include #include -int main (int argc, char **argv) { +int main(int argc, char **argv) { rd_kafka_conf_t *conf; char buf[512]; size_t sz = sizeof(buf); @@ -12,8 +12,8 @@ int main (int argc, char **argv) { int i; int failures = 0; - printf("librdkafka %s (0x%x, define: 0x%x)\n", - rd_kafka_version_str(), rd_kafka_version(), RD_KAFKA_VERSION); + printf("librdkafka %s (0x%x, define: 0x%x)\n", rd_kafka_version_str(), + rd_kafka_version(), RD_KAFKA_VERSION); if (argc > 1 && !(argc & 1)) { printf("Usage: %s [config.property config-value ..]\n", @@ -22,7 +22,7 @@ int main (int argc, char **argv) { } conf = rd_kafka_conf_new(); - res = rd_kafka_conf_get(conf, "builtin.features", buf, &sz); + res = rd_kafka_conf_get(conf, "builtin.features", buf, &sz); if (res != RD_KAFKA_CONF_OK) { printf("ERROR: conf_get failed: %d\n", res); @@ -36,22 +36,22 @@ int main (int argc, char **argv) { * which will return an error if one or more flags are not enabled. */ if (rd_kafka_conf_set(conf, "builtin.features", expected_features, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { - printf("ERROR: expected at least features: %s\n" - "got error: %s\n", - expected_features, errstr); + printf( + "ERROR: expected at least features: %s\n" + "got error: %s\n", + expected_features, errstr); failures++; } printf("all expected features matched: %s\n", expected_features); /* Apply config from argv key value pairs */ - for (i = 1 ; i+1 < argc ; i += 2) { - printf("verifying config %s=%s\n", argv[i], argv[i+1]); - if (rd_kafka_conf_set(conf, argv[i], argv[i+1], - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - printf("ERROR: failed to set %s=%s: %s\n", - argv[i], argv[i+1], errstr); + for (i = 1; i + 1 < argc; i += 2) { + printf("verifying config %s=%s\n", argv[i], argv[i + 1]); + if (rd_kafka_conf_set(conf, argv[i], argv[i + 1], errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + printf("ERROR: failed to set %s=%s: %s\n", argv[i], + argv[i + 1], errstr); failures++; } } diff --git a/packaging/nuget/artifact.py b/packaging/nuget/artifact.py index 1baac8a5cb..88c6f64f5c 100755 --- a/packaging/nuget/artifact.py +++ b/packaging/nuget/artifact.py @@ -30,6 +30,7 @@ s3_bucket = 'librdkafka-ci-packages' dry_run = False + class Artifact (object): def __init__(self, arts, path, info=None): self.path = path @@ -49,7 +50,7 @@ def __init__(self, arts, path, info=None): # Assign the map and convert all keys to lower case self.info = {k.lower(): v for k, v in info.items()} # Rename values, e.g., 'plat':'linux' to 'plat':'debian' - for k,v in self.info.items(): + for k, v in self.info.items(): rdict = packaging.rename_vals.get(k, None) if rdict is not None: self.info[k] = rdict.get(v, v) @@ -64,11 +65,10 @@ def __init__(self, arts, path, info=None): self.arts = arts arts.artifacts.append(self) - def __repr__(self): return self.path - def __lt__ (self, other): + def __lt__(self, other): return self.score < other.score def download(self): @@ -136,7 +136,7 @@ def collect_single(self, path, req_tag=True): # Match tag or sha to gitref unmatched = list() - for m,v in self.match.items(): + for m, v in self.match.items(): if m not in info or info[m] != v: unmatched.append(m) @@ -144,19 +144,22 @@ def collect_single(self, path, req_tag=True): # common artifact. if info.get('p', '') != 'common' and len(unmatched) > 0: print(info) - print('%s: %s did not match %s' % (info.get('p', None), folder, unmatched)) + print('%s: %s did not match %s' % + (info.get('p', None), folder, unmatched)) return None return Artifact(self, path, info) - def collect_s3(self): """ Collect and download build-artifacts from S3 based on git reference """ - print('Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket)) + print( + 'Collecting artifacts matching %s from S3 bucket %s' % + (self.match, s3_bucket)) self.s3 = boto3.resource('s3') self.s3_bucket = self.s3.Bucket(s3_bucket) self.s3_client = boto3.client('s3') - for item in self.s3_client.list_objects(Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'): + for item in self.s3_client.list_objects( + Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'): self.collect_single(item.get('Key')) for a in self.artifacts: @@ -165,9 +168,8 @@ def collect_s3(self): def collect_local(self, path, req_tag=True): """ Collect artifacts from a local directory possibly previously collected from s3 """ - for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]: + for f in [os.path.join(dp, f) for dp, dn, + filenames in os.walk(path) for f in filenames]: if not os.path.isfile(f): continue self.collect_single(f, req_tag) - - diff --git a/packaging/nuget/cleanup-s3.py b/packaging/nuget/cleanup-s3.py index 43b2ec749b..6cc8803330 100755 --- a/packaging/nuget/cleanup-s3.py +++ b/packaging/nuget/cleanup-s3.py @@ -59,7 +59,8 @@ def may_delete(path): if tag is None: return True - if re.match(r'^v?\d+\.\d+\.\d+(-?RC\d+)?$', tag, flags=re.IGNORECASE) is None: + if re.match(r'^v?\d+\.\d+\.\d+(-?RC\d+)?$', tag, + flags=re.IGNORECASE) is None: return True return False @@ -101,6 +102,7 @@ def chunk_list(lst, cnt): for i in range(0, len(lst), cnt): yield lst[i:i + cnt] + if __name__ == '__main__': parser = argparse.ArgumentParser() diff --git a/packaging/nuget/packaging.py b/packaging/nuget/packaging.py index 9a51392b36..11c7020872 100755 --- a/packaging/nuget/packaging.py +++ b/packaging/nuget/packaging.py @@ -42,12 +42,13 @@ ('win', 'x86', '.lib'): re.compile('current ar archive'), ('linux', 'x64', '.so'): re.compile('ELF 64.* x86-64'), ('linux', 'arm64', '.so'): re.compile('ELF 64.* ARM aarch64'), - ('osx', 'x64', '.dylib'): re.compile('Mach-O 64.* x86_64') } + ('osx', 'x64', '.dylib'): re.compile('Mach-O 64.* x86_64')} magic = magic.Magic() + def magic_mismatch(path, a): - """ Verify that the filemagic for \p path matches for artifact \p a. + """ Verify that the filemagic for \\p path matches for artifact \\p a. Returns True if the magic file info does NOT match. Returns False if no matching is needed or the magic matches. """ k = (a.info.get('plat', None), a.info.get('arch', None), @@ -58,7 +59,8 @@ def magic_mismatch(path, a): minfo = magic.id_filename(path) if not pattern.match(minfo): - print(f"Warning: {path} magic \"{minfo}\" does not match expected {pattern} for key {k}") + print( + f"Warning: {path} magic \"{minfo}\" does not match expected {pattern} for key {k}") return True return False @@ -94,6 +96,7 @@ class MissingArtifactError(Exception): s3_bucket = 'librdkafka-ci-packages' dry_run = False + class Artifact (object): def __init__(self, arts, path, info=None): self.path = path @@ -113,7 +116,7 @@ def __init__(self, arts, path, info=None): # Assign the map and convert all keys to lower case self.info = {k.lower(): v for k, v in info.items()} # Rename values, e.g., 'plat':'linux' to 'plat':'debian' - for k,v in self.info.items(): + for k, v in self.info.items(): rdict = rename_vals.get(k, None) if rdict is not None: self.info[k] = rdict.get(v, v) @@ -128,11 +131,10 @@ def __init__(self, arts, path, info=None): self.arts = arts arts.artifacts.append(self) - def __repr__(self): return self.path - def __lt__ (self, other): + def __lt__(self, other): return self.score < other.score def download(self): @@ -162,7 +164,6 @@ def __init__(self, match, dlpath): if not dry_run: os.makedirs(self.dlpath, 0o755) - def collect_single(self, path, req_tag=True): """ Collect single artifact, be it in S3 or locally. :param: path string: S3 or local (relative) path @@ -201,7 +202,7 @@ def collect_single(self, path, req_tag=True): # Perform matching unmatched = list() - for m,v in self.match.items(): + for m, v in self.match.items(): if m not in info or info[m] != v: unmatched.append(m) @@ -213,10 +214,11 @@ def collect_single(self, path, req_tag=True): return Artifact(self, path, info) - def collect_s3(self): """ Collect and download build-artifacts from S3 based on git reference """ - print('Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket)) + print( + 'Collecting artifacts matching %s from S3 bucket %s' % + (self.match, s3_bucket)) self.s3 = boto3.resource('s3') self.s3_bucket = self.s3.Bucket(s3_bucket) self.s3_client = boto3.client('s3') @@ -248,7 +250,8 @@ def collect_s3(self): def collect_local(self, path, req_tag=True): """ Collect artifacts from a local directory possibly previously collected from s3 """ - for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]: + for f in [os.path.join(dp, f) for dp, dn, + filenames in os.walk(path) for f in filenames]: if not os.path.isfile(f): continue self.collect_single(f, req_tag) @@ -259,7 +262,7 @@ class Package (object): A Package is a working container for one or more output packages for a specific package type (e.g., nuget) """ - def __init__ (self, version, arts, ptype): + def __init__(self, version, arts, ptype): super(Package, self).__init__() self.version = version self.arts = arts @@ -271,22 +274,22 @@ def __init__ (self, version, arts, ptype): self.kv = {'version': version} self.files = dict() - def add_file (self, file): + def add_file(self, file): self.files[file] = True - def build (self): + def build(self): """ Build package output(s), return a list of paths to built packages """ raise NotImplementedError - def cleanup (self): + def cleanup(self): """ Optional cleanup routine for removing temporary files, etc. """ pass - def verify (self, path): + def verify(self, path): """ Optional post-build package verifier """ pass - def render (self, fname, destpath='.'): + def render(self, fname, destpath='.'): """ Render template in file fname and save to destpath/fname, where destpath is relative to stpath """ @@ -302,8 +305,7 @@ def render (self, fname, destpath='.'): self.add_file(outf) - - def copy_template (self, fname, target_fname=None, destpath='.'): + def copy_template(self, fname, target_fname=None, destpath='.'): """ Copy template file to destpath/fname where destpath is relative to stpath """ @@ -322,16 +324,17 @@ def copy_template (self, fname, target_fname=None, destpath='.'): class NugetPackage (Package): """ All platforms, archs, et.al, are bundled into one set of NuGet output packages: "main", redist and symbols """ - def __init__ (self, version, arts): + + def __init__(self, version, arts): if version.startswith('v'): - version = version[1:] # Strip v prefix + version = version[1:] # Strip v prefix super(NugetPackage, self).__init__(version, arts, "nuget") def cleanup(self): if os.path.isdir(self.stpath): shutil.rmtree(self.stpath) - def build (self, buildtype): + def build(self, buildtype): """ Build single NuGet package for all its artifacts. """ # NuGet removes the prefixing v from the version. @@ -339,7 +342,6 @@ def build (self, buildtype): if vless_version[0] == 'v': vless_version = vless_version[1:] - self.stpath = tempfile.mkdtemp(prefix="out-", suffix="-%s" % buildtype, dir=".") @@ -361,61 +363,159 @@ def build (self, buildtype): a.info['toolset'] = 'v140' mappings = [ - [{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './include/librdkafka/rdkafka.h', 'build/native/include/librdkafka/rdkafka.h'], - [{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './include/librdkafka/rdkafkacpp.h', 'build/native/include/librdkafka/rdkafkacpp.h'], - [{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './include/librdkafka/rdkafka_mock.h', 'build/native/include/librdkafka/rdkafka_mock.h'], - - [{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './share/doc/librdkafka/README.md', 'README.md'], - [{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './share/doc/librdkafka/CONFIGURATION.md', 'CONFIGURATION.md'], + [{'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std', + 'fname_glob': 'librdkafka-gcc.tar.gz'}, + './include/librdkafka/rdkafka.h', + 'build/native/include/librdkafka/rdkafka.h'], + [{'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std', + 'fname_glob': 'librdkafka-gcc.tar.gz'}, + './include/librdkafka/rdkafkacpp.h', + 'build/native/include/librdkafka/rdkafkacpp.h'], + [{'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std', + 'fname_glob': 'librdkafka-gcc.tar.gz'}, + './include/librdkafka/rdkafka_mock.h', + 'build/native/include/librdkafka/rdkafka_mock.h'], + + [{'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std', + 'fname_glob': 'librdkafka-gcc.tar.gz'}, + './share/doc/librdkafka/README.md', + 'README.md'], + [{'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std', + 'fname_glob': 'librdkafka-gcc.tar.gz'}, + './share/doc/librdkafka/CONFIGURATION.md', + 'CONFIGURATION.md'], # The above x64-linux gcc job generates a bad LICENSES.txt file, # so we use the one from the osx job instead. - [{'arch': 'x64', 'plat': 'osx', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './share/doc/librdkafka/LICENSES.txt', 'LICENSES.txt'], + [{'arch': 'x64', + 'plat': 'osx', + 'lnk': 'std', + 'fname_glob': 'librdkafka-gcc.tar.gz'}, + './share/doc/librdkafka/LICENSES.txt', + 'LICENSES.txt'], # Travis OSX build - [{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/librdkafka.dylib', 'runtimes/osx-x64/native/librdkafka.dylib'], + [{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, + './lib/librdkafka.dylib', 'runtimes/osx-x64/native/librdkafka.dylib'], # Travis Manylinux build - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-manylinux*x86_64.tgz'}, './lib/librdkafka.so.1', 'runtimes/linux-x64/native/centos6-librdkafka.so'], + [{'arch': 'x64', + 'plat': 'linux', + 'fname_glob': 'librdkafka-manylinux*x86_64.tgz'}, + './lib/librdkafka.so.1', + 'runtimes/linux-x64/native/centos6-librdkafka.so'], # Travis Ubuntu 14.04 build - [{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './lib/librdkafka.so.1', 'runtimes/linux-x64/native/librdkafka.so'], + [{'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std', + 'fname_glob': 'librdkafka-gcc.tar.gz'}, + './lib/librdkafka.so.1', + 'runtimes/linux-x64/native/librdkafka.so'], # Travis CentOS 7 RPM build - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka1*el7.x86_64.rpm'}, './usr/lib64/librdkafka.so.1', 'runtimes/linux-x64/native/centos7-librdkafka.so'], + [{'arch': 'x64', + 'plat': 'linux', + 'fname_glob': 'librdkafka1*el7.x86_64.rpm'}, + './usr/lib64/librdkafka.so.1', + 'runtimes/linux-x64/native/centos7-librdkafka.so'], # Travis Alpine build - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'alpine-librdkafka.tgz'}, 'librdkafka.so.1', 'runtimes/linux-x64/native/alpine-librdkafka.so'], + [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'alpine-librdkafka.tgz'}, + 'librdkafka.so.1', 'runtimes/linux-x64/native/alpine-librdkafka.so'], # Travis arm64 Linux build - [{'arch': 'arm64', 'plat': 'linux', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './lib/librdkafka.so.1', 'runtimes/linux-arm64/native/librdkafka.so'], + [{'arch': 'arm64', 'plat': 'linux', 'fname_glob': 'librdkafka-gcc.tar.gz'}, + './lib/librdkafka.so.1', 'runtimes/linux-arm64/native/librdkafka.so'], # Common Win runtime - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, 'vcruntime140.dll', 'runtimes/win-x64/native/vcruntime140.dll'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, 'msvcp140.dll', 'runtimes/win-x64/native/msvcp140.dll'], + [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, + 'vcruntime140.dll', 'runtimes/win-x64/native/vcruntime140.dll'], + [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, + 'msvcp140.dll', 'runtimes/win-x64/native/msvcp140.dll'], # matches librdkafka.redist.{VER}.nupkg - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/x64/Release/librdkafka.dll', 'runtimes/win-x64/native/librdkafka.dll'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/x64/Release/librdkafkacpp.dll', 'runtimes/win-x64/native/librdkafkacpp.dll'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/x64/Release/libcrypto-1_1-x64.dll', 'runtimes/win-x64/native/libcrypto-1_1-x64.dll'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/x64/Release/libssl-1_1-x64.dll', 'runtimes/win-x64/native/libssl-1_1-x64.dll'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/x64/Release/zlib1.dll', 'runtimes/win-x64/native/zlib1.dll'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/x64/Release/zstd.dll', 'runtimes/win-x64/native/zstd.dll'], + [{'arch': 'x64', + 'plat': 'win', + 'fname_glob': 'librdkafka.redist*'}, + 'build/native/bin/v140/x64/Release/librdkafka.dll', + 'runtimes/win-x64/native/librdkafka.dll'], + [{'arch': 'x64', + 'plat': 'win', + 'fname_glob': 'librdkafka.redist*'}, + 'build/native/bin/v140/x64/Release/librdkafkacpp.dll', + 'runtimes/win-x64/native/librdkafkacpp.dll'], + [{'arch': 'x64', + 'plat': 'win', + 'fname_glob': 'librdkafka.redist*'}, + 'build/native/bin/v140/x64/Release/libcrypto-1_1-x64.dll', + 'runtimes/win-x64/native/libcrypto-1_1-x64.dll'], + [{'arch': 'x64', + 'plat': 'win', + 'fname_glob': 'librdkafka.redist*'}, + 'build/native/bin/v140/x64/Release/libssl-1_1-x64.dll', + 'runtimes/win-x64/native/libssl-1_1-x64.dll'], + [{'arch': 'x64', + 'plat': 'win', + 'fname_glob': 'librdkafka.redist*'}, + 'build/native/bin/v140/x64/Release/zlib1.dll', + 'runtimes/win-x64/native/zlib1.dll'], + [{'arch': 'x64', + 'plat': 'win', + 'fname_glob': 'librdkafka.redist*'}, + 'build/native/bin/v140/x64/Release/zstd.dll', + 'runtimes/win-x64/native/zstd.dll'], # matches librdkafka.{VER}.nupkg [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', 'fname_excludes': ['redist', 'symbols']}, 'build/native/lib/v140/x64/Release/librdkafka.lib', 'build/native/lib/win/x64/win-x64-Release/v140/librdkafka.lib'], [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', 'fname_excludes': ['redist', 'symbols']}, 'build/native/lib/v140/x64/Release/librdkafkacpp.lib', 'build/native/lib/win/x64/win-x64-Release/v140/librdkafkacpp.lib'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, 'vcruntime140.dll', 'runtimes/win-x86/native/vcruntime140.dll'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, 'msvcp140.dll', 'runtimes/win-x86/native/msvcp140.dll'], + [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, + 'vcruntime140.dll', 'runtimes/win-x86/native/vcruntime140.dll'], + [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, + 'msvcp140.dll', 'runtimes/win-x86/native/msvcp140.dll'], # matches librdkafka.redist.{VER}.nupkg - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/Win32/Release/librdkafka.dll', 'runtimes/win-x86/native/librdkafka.dll'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/Win32/Release/librdkafkacpp.dll', 'runtimes/win-x86/native/librdkafkacpp.dll'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/Win32/Release/libcrypto-1_1.dll', 'runtimes/win-x86/native/libcrypto-1_1.dll'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/Win32/Release/libssl-1_1.dll', 'runtimes/win-x86/native/libssl-1_1.dll'], - - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/Win32/Release/zlib1.dll', 'runtimes/win-x86/native/zlib1.dll'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/Win32/Release/zstd.dll', 'runtimes/win-x86/native/zstd.dll'], + [{'arch': 'x86', + 'plat': 'win', + 'fname_glob': 'librdkafka.redist*'}, + 'build/native/bin/v140/Win32/Release/librdkafka.dll', + 'runtimes/win-x86/native/librdkafka.dll'], + [{'arch': 'x86', + 'plat': 'win', + 'fname_glob': 'librdkafka.redist*'}, + 'build/native/bin/v140/Win32/Release/librdkafkacpp.dll', + 'runtimes/win-x86/native/librdkafkacpp.dll'], + [{'arch': 'x86', + 'plat': 'win', + 'fname_glob': 'librdkafka.redist*'}, + 'build/native/bin/v140/Win32/Release/libcrypto-1_1.dll', + 'runtimes/win-x86/native/libcrypto-1_1.dll'], + [{'arch': 'x86', + 'plat': 'win', + 'fname_glob': 'librdkafka.redist*'}, + 'build/native/bin/v140/Win32/Release/libssl-1_1.dll', + 'runtimes/win-x86/native/libssl-1_1.dll'], + + [{'arch': 'x86', + 'plat': 'win', + 'fname_glob': 'librdkafka.redist*'}, + 'build/native/bin/v140/Win32/Release/zlib1.dll', + 'runtimes/win-x86/native/zlib1.dll'], + [{'arch': 'x86', + 'plat': 'win', + 'fname_glob': 'librdkafka.redist*'}, + 'build/native/bin/v140/Win32/Release/zstd.dll', + 'runtimes/win-x86/native/zstd.dll'], # matches librdkafka.{VER}.nupkg [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v140/Win32/Release/librdkafka.lib', 'build/native/lib/win/x86/win-x86-Release/v140/librdkafka.lib'], + 'build/native/lib/v140/Win32/Release/librdkafka.lib', 'build/native/lib/win/x86/win-x86-Release/v140/librdkafka.lib'], [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v140/Win32/Release/librdkafkacpp.lib', 'build/native/lib/win/x86/win-x86-Release/v140/librdkafkacpp.lib'] + 'build/native/lib/v140/Win32/Release/librdkafkacpp.lib', 'build/native/lib/win/x86/win-x86-Release/v140/librdkafkacpp.lib'] ] for m in mappings: @@ -454,7 +554,10 @@ def build (self, buildtype): except KeyError as e: continue except Exception as e: - raise Exception('file not found in archive %s: %s. Files in archive are: %s' % (a.lpath, e, zfile.ZFile(a.lpath).getnames())) + raise Exception( + 'file not found in archive %s: %s. Files in archive are: %s' % + (a.lpath, e, zfile.ZFile( + a.lpath).getnames())) # Check that the file type matches. if magic_mismatch(outf, a): @@ -465,21 +568,22 @@ def build (self, buildtype): break if not found: - raise MissingArtifactError('unable to find artifact with tags %s matching "%s" for file "%s"' % (str(attributes), fname_glob, member)) - + raise MissingArtifactError( + 'unable to find artifact with tags %s matching "%s" for file "%s"' % + (str(attributes), fname_glob, member)) print('Tree extracted to %s' % self.stpath) # After creating a bare-bone nupkg layout containing the artifacts # and some spec and props files, call the 'nuget' utility to # make a proper nupkg of it (with all the metadata files). - subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" % \ + subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" % (os.path.join(self.stpath, 'librdkafka.redist.nuspec'), self.stpath), shell=True) return 'librdkafka.redist.%s.nupkg' % vless_version - def verify (self, path): + def verify(self, path): """ Verify package """ expect = [ "librdkafka.redist.nuspec", @@ -529,7 +633,9 @@ def verify (self, path): missing = [x for x in expect if x not in pkgd] if len(missing) > 0: - print('Missing files in package %s:\n%s' % (path, '\n'.join(missing))) + print( + 'Missing files in package %s:\n%s' % + (path, '\n'.join(missing))) return False print('OK - %d expected files found' % len(expect)) @@ -542,40 +648,71 @@ class StaticPackage (Package): # Only match statically linked artifacts match = {'lnk': 'static'} - def __init__ (self, version, arts): + def __init__(self, version, arts): super(StaticPackage, self).__init__(version, arts, "static") def cleanup(self): if os.path.isdir(self.stpath): shutil.rmtree(self.stpath) - def build (self, buildtype): + def build(self, buildtype): """ Build single package for all artifacts. """ self.stpath = tempfile.mkdtemp(prefix="out-", dir=".") mappings = [ # rdkafka.h - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-clang.tar.gz'}, './include/librdkafka/rdkafka.h', 'rdkafka.h'], + [{'arch': 'x64', + 'plat': 'linux', + 'fname_glob': 'librdkafka-clang.tar.gz'}, + './include/librdkafka/rdkafka.h', + 'rdkafka.h'], # LICENSES.txt - [{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, './share/doc/librdkafka/LICENSES.txt', 'LICENSES.txt'], + [{'arch': 'x64', + 'plat': 'osx', + 'fname_glob': 'librdkafka-clang.tar.gz'}, + './share/doc/librdkafka/LICENSES.txt', + 'LICENSES.txt'], # glibc linux static lib and pkg-config file - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/librdkafka-static.a', 'librdkafka_glibc_linux.a'], - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/pkgconfig/rdkafka-static.pc', 'librdkafka_glibc_linux.pc'], + [{'arch': 'x64', + 'plat': 'linux', + 'fname_glob': 'librdkafka-clang.tar.gz'}, + './lib/librdkafka-static.a', + 'librdkafka_glibc_linux.a'], + [{'arch': 'x64', + 'plat': 'linux', + 'fname_glob': 'librdkafka-clang.tar.gz'}, + './lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_glibc_linux.pc'], # musl linux static lib and pkg-config file - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'alpine-librdkafka.tgz'}, 'librdkafka-static.a', 'librdkafka_musl_linux.a'], - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'alpine-librdkafka.tgz'}, 'rdkafka-static.pc', 'librdkafka_musl_linux.pc'], + [{'arch': 'x64', + 'plat': 'linux', + 'fname_glob': 'alpine-librdkafka.tgz'}, + 'librdkafka-static.a', + 'librdkafka_musl_linux.a'], + [{'arch': 'x64', + 'plat': 'linux', + 'fname_glob': 'alpine-librdkafka.tgz'}, + 'rdkafka-static.pc', + 'librdkafka_musl_linux.pc'], # osx static lib and pkg-config file - [{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/librdkafka-static.a', 'librdkafka_darwin.a'], - [{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/pkgconfig/rdkafka-static.pc', 'librdkafka_darwin.pc'], + [{'arch': 'x64', + 'plat': 'osx', + 'fname_glob': 'librdkafka-clang.tar.gz'}, + './lib/librdkafka-static.a', + 'librdkafka_darwin.a'], + [{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, + './lib/pkgconfig/rdkafka-static.pc', 'librdkafka_darwin.pc'], # win static lib and pkg-config file (mingw) - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './lib/librdkafka-static.a', 'librdkafka_windows.a'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './lib/pkgconfig/rdkafka-static.pc', 'librdkafka_windows.pc'], + [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka-gcc.tar.gz'}, + './lib/librdkafka-static.a', 'librdkafka_windows.a'], + [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka-gcc.tar.gz'}, + './lib/pkgconfig/rdkafka-static.pc', 'librdkafka_windows.pc'], ] for m in mappings: @@ -610,28 +747,32 @@ def build (self, buildtype): break if artifact is None: - raise MissingArtifactError('unable to find artifact with tags %s matching "%s"' % (str(attributes), fname_glob)) + raise MissingArtifactError( + 'unable to find artifact with tags %s matching "%s"' % + (str(attributes), fname_glob)) outf = os.path.join(self.stpath, m[2]) member = m[1] try: zfile.ZFile.extract(artifact.lpath, member, outf) except KeyError as e: - raise Exception('file not found in archive %s: %s. Files in archive are: %s' % (artifact.lpath, e, zfile.ZFile(artifact.lpath).getnames())) + raise Exception( + 'file not found in archive %s: %s. Files in archive are: %s' % + (artifact.lpath, e, zfile.ZFile( + artifact.lpath).getnames())) print('Tree extracted to %s' % self.stpath) # After creating a bare-bone layout, create a tarball. outname = "librdkafka-static-bundle-%s.tgz" % self.version print('Writing to %s' % outname) - subprocess.check_call("(cd %s && tar cvzf ../%s .)" % \ + subprocess.check_call("(cd %s && tar cvzf ../%s .)" % (self.stpath, outname), shell=True) return outname - - def verify (self, path): + def verify(self, path): """ Verify package """ expect = [ "./rdkafka.h", @@ -654,7 +795,9 @@ def verify (self, path): missing = [x for x in expect if x not in pkgd] if len(missing) > 0: - print('Missing files in package %s:\n%s' % (path, '\n'.join(missing))) + print( + 'Missing files in package %s:\n%s' % + (path, '\n'.join(missing))) return False else: print('OK - %d expected files found' % len(expect)) diff --git a/packaging/nuget/release.py b/packaging/nuget/release.py index 7a46d600f8..0b1f64c29a 100755 --- a/packaging/nuget/release.py +++ b/packaging/nuget/release.py @@ -16,22 +16,44 @@ dry_run = False - if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument("--no-s3", help="Don't collect from S3", action="store_true") + parser.add_argument( + "--no-s3", + help="Don't collect from S3", + action="store_true") parser.add_argument("--dry-run", help="Locate artifacts but don't actually download or do anything", action="store_true") - parser.add_argument("--directory", help="Download directory (default: dl-)", default=None) - parser.add_argument("--no-cleanup", help="Don't clean up temporary folders", action="store_true") - parser.add_argument("--sha", help="Also match on this git sha1", default=None) - parser.add_argument("--nuget-version", help="The nuget package version (defaults to same as tag)", default=None) + parser.add_argument( + "--directory", + help="Download directory (default: dl-)", + default=None) + parser.add_argument( + "--no-cleanup", + help="Don't clean up temporary folders", + action="store_true") + parser.add_argument( + "--sha", + help="Also match on this git sha1", + default=None) + parser.add_argument( + "--nuget-version", + help="The nuget package version (defaults to same as tag)", + default=None) parser.add_argument("--upload", help="Upload package to after building, using provided NuGet API key (either file or the key itself)", default=None, type=str) - parser.add_argument("--class", help="Packaging class (see packaging.py)", default="NugetPackage", dest="pkgclass") - parser.add_argument("--retries", help="Number of retries to collect artifacts", default=0, type=int) + parser.add_argument( + "--class", + help="Packaging class (see packaging.py)", + default="NugetPackage", + dest="pkgclass") + parser.add_argument( + "--retries", + help="Number of retries to collect artifacts", + default=0, + type=int) parser.add_argument("tag", help="Git tag to collect") args = parser.parse_args() @@ -48,7 +70,7 @@ try: match.update(getattr(pkgclass, 'match')) - except: + except BaseException: pass arts = packaging.Artifacts(match, args.directory) @@ -119,5 +141,6 @@ print('Uploading %s to NuGet' % pkgfile) r = os.system("./push-to-nuget.sh '%s' %s" % (nuget_key, pkgfile)) - assert int(r) == 0, "NuGet upload failed with exit code {}, see previous errors".format(r) + assert int( + r) == 0, "NuGet upload failed with exit code {}, see previous errors".format(r) print('%s successfully uploaded to NuGet' % pkgfile) diff --git a/packaging/nuget/zfile/zfile.py b/packaging/nuget/zfile/zfile.py index bdedb778f7..51f2df25fb 100644 --- a/packaging/nuget/zfile/zfile.py +++ b/packaging/nuget/zfile/zfile.py @@ -5,6 +5,7 @@ import zipfile import rpmfile + class ZFile (object): def __init__(self, path, mode='r', ext=None): super(ZFile, self).__init__() @@ -49,8 +50,8 @@ def headers(self): return dict() def extract_to(self, member, path): - """ Extract compress file's \p member to \p path - If \p path is a directory the member's basename will used as + """ Extract compress file's \\p member to \\p path + If \\p path is a directory the member's basename will used as filename, otherwise path is considered the full file path name. """ if not os.path.isdir(os.path.dirname(path)): @@ -66,7 +67,7 @@ def extract_to(self, member, path): zf = self.f.extractfile(member) while True: - b = zf.read(1024*100) + b = zf.read(1024 * 100) if b: of.write(b) else: @@ -74,9 +75,8 @@ def extract_to(self, member, path): zf.close() - @classmethod - def extract (cls, zpath, member, outpath): + def extract(cls, zpath, member, outpath): """ Extract file member (full internal path) to output from archive zpath. @@ -85,11 +85,10 @@ def extract (cls, zpath, member, outpath): with ZFile(zpath) as zf: zf.extract_to(member, outpath) - @classmethod - def compress (cls, zpath, paths, stripcnt=0, ext=None): + def compress(cls, zpath, paths, stripcnt=0, ext=None): """ - Create new compressed file \p zpath containing files in \p paths + Create new compressed file \\p zpath containing files in \\p paths """ with ZFile(zpath, 'w', ext=ext) as zf: @@ -97,4 +96,3 @@ def compress (cls, zpath, paths, stripcnt=0, ext=None): outp = os.path.sep.join(p.split(os.path.sep)[stripcnt:]) print('zip %s to %s (stripcnt %d)' % (p, outp, stripcnt)) zf.f.write(p, outp) - diff --git a/packaging/rpm/tests/test.c b/packaging/rpm/tests/test.c index fa18782407..cf39b6bcd3 100644 --- a/packaging/rpm/tests/test.c +++ b/packaging/rpm/tests/test.c @@ -2,26 +2,16 @@ #include #include -int main (int argc, char **argv) { +int main(int argc, char **argv) { rd_kafka_conf_t *conf; rd_kafka_t *rk; char features[256]; size_t fsize = sizeof(features); char errstr[512]; const char *exp_features[] = { - "gzip", - "snappy", - "ssl", - "sasl", - "regex", - "lz4", - "sasl_gssapi", - "sasl_plain", - "sasl_scram", - "plugins", - "zstd", - "sasl_oauthbearer", - NULL, + "gzip", "snappy", "ssl", "sasl", "regex", + "lz4", "sasl_gssapi", "sasl_plain", "sasl_scram", "plugins", + "zstd", "sasl_oauthbearer", NULL, }; const char **exp; int missing = 0; @@ -39,14 +29,13 @@ int main (int argc, char **argv) { printf("builtin.features %s\n", features); /* Verify that expected features are enabled. */ - for (exp = exp_features ; *exp ; exp++) { + for (exp = exp_features; *exp; exp++) { const char *t = features; - size_t elen = strlen(*exp); - int match = 0; + size_t elen = strlen(*exp); + int match = 0; while ((t = strstr(t, *exp))) { - if (t[elen] == ',' || - t[elen] == '\0') { + if (t[elen] == ',' || t[elen] == '\0') { match = 1; break; } @@ -60,16 +49,16 @@ int main (int argc, char **argv) { missing++; } - if (rd_kafka_conf_set(conf, "security.protocol", "SASL_SSL", - errstr, sizeof(errstr)) || - rd_kafka_conf_set(conf, "sasl.mechanism", "PLAIN", - errstr, sizeof(errstr)) || - rd_kafka_conf_set(conf, "sasl.username", "username", - errstr, sizeof(errstr)) || - rd_kafka_conf_set(conf, "sasl.password", "password", - errstr, sizeof(errstr)) || - rd_kafka_conf_set(conf, "debug", "security", - errstr, sizeof(errstr))) { + if (rd_kafka_conf_set(conf, "security.protocol", "SASL_SSL", errstr, + sizeof(errstr)) || + rd_kafka_conf_set(conf, "sasl.mechanism", "PLAIN", errstr, + sizeof(errstr)) || + rd_kafka_conf_set(conf, "sasl.username", "username", errstr, + sizeof(errstr)) || + rd_kafka_conf_set(conf, "sasl.password", "password", errstr, + sizeof(errstr)) || + rd_kafka_conf_set(conf, "debug", "security", errstr, + sizeof(errstr))) { fprintf(stderr, "conf_set failed: %s\n", errstr); return 1; } diff --git a/packaging/rpm/tests/test.cpp b/packaging/rpm/tests/test.cpp index c72845fc40..d78a767102 100644 --- a/packaging/rpm/tests/test.cpp +++ b/packaging/rpm/tests/test.cpp @@ -2,7 +2,7 @@ #include -int main () { +int main() { std::cout << "librdkafka++ " << RdKafka::version_str() << std::endl; RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); diff --git a/packaging/tools/gh-release-checksums.py b/packaging/tools/gh-release-checksums.py index e3ff80a701..e7259dc202 100755 --- a/packaging/tools/gh-release-checksums.py +++ b/packaging/tools/gh-release-checksums.py @@ -24,13 +24,14 @@ print("Release asset checksums:") for ftype in ["zip", "tar.gz"]: - url = "https://github.com/edenhill/librdkafka/archive/{}.{}".format(tag, ftype) + url = "https://github.com/edenhill/librdkafka/archive/{}.{}".format( + tag, ftype) h = hashlib.sha256() r = requests.get(url, stream=True) while True: - buf = r.raw.read(100*1000) + buf = r.raw.read(100 * 1000) if len(buf) == 0: break h.update(buf) diff --git a/src-cpp/ConfImpl.cpp b/src-cpp/ConfImpl.cpp index f497d3a062..53d7b30c56 100644 --- a/src-cpp/ConfImpl.cpp +++ b/src-cpp/ConfImpl.cpp @@ -35,18 +35,16 @@ RdKafka::ConfImpl::ConfResult RdKafka::ConfImpl::set(const std::string &name, - const std::string &value, - std::string &errstr) { + const std::string &value, + std::string &errstr) { rd_kafka_conf_res_t res; char errbuf[512]; if (this->conf_type_ == CONF_GLOBAL) - res = rd_kafka_conf_set(this->rk_conf_, - name.c_str(), value.c_str(), - errbuf, sizeof(errbuf)); + res = rd_kafka_conf_set(this->rk_conf_, name.c_str(), value.c_str(), errbuf, + sizeof(errbuf)); else - res = rd_kafka_topic_conf_set(this->rkt_conf_, - name.c_str(), value.c_str(), + res = rd_kafka_topic_conf_set(this->rkt_conf_, name.c_str(), value.c_str(), errbuf, sizeof(errbuf)); if (res != RD_KAFKA_CONF_OK) @@ -56,8 +54,7 @@ RdKafka::ConfImpl::ConfResult RdKafka::ConfImpl::set(const std::string &name, } -std::list *RdKafka::ConfImpl::dump () { - +std::list *RdKafka::ConfImpl::dump() { const char **arrc; size_t cnt; std::list *arr; @@ -68,14 +65,14 @@ std::list *RdKafka::ConfImpl::dump () { arrc = rd_kafka_topic_conf_dump(rkt_conf_, &cnt); arr = new std::list(); - for (int i = 0 ; i < static_cast(cnt) ; i++) + for (int i = 0; i < static_cast(cnt); i++) arr->push_back(std::string(arrc[i])); rd_kafka_conf_dump_free(arrc, cnt); return arr; } -RdKafka::Conf *RdKafka::Conf::create (ConfType type) { +RdKafka::Conf *RdKafka::Conf::create(ConfType type) { ConfImpl *conf = new ConfImpl(type); if (type == CONF_GLOBAL) diff --git a/src-cpp/ConsumerImpl.cpp b/src-cpp/ConsumerImpl.cpp index 04977a112f..b7f5e3b220 100644 --- a/src-cpp/ConsumerImpl.cpp +++ b/src-cpp/ConsumerImpl.cpp @@ -33,14 +33,16 @@ #include "rdkafkacpp_int.h" -RdKafka::Consumer::~Consumer () {} +RdKafka::Consumer::~Consumer() { +} -RdKafka::Consumer *RdKafka::Consumer::create (const RdKafka::Conf *conf, - std::string &errstr) { +RdKafka::Consumer *RdKafka::Consumer::create(const RdKafka::Conf *conf, + std::string &errstr) { char errbuf[512]; - const RdKafka::ConfImpl *confimpl = dynamic_cast(conf); + const RdKafka::ConfImpl *confimpl = + dynamic_cast(conf); RdKafka::ConsumerImpl *rkc = new RdKafka::ConsumerImpl(); - rd_kafka_conf_t *rk_conf = NULL; + rd_kafka_conf_t *rk_conf = NULL; if (confimpl) { if (!confimpl->rk_conf_) { @@ -55,8 +57,8 @@ RdKafka::Consumer *RdKafka::Consumer::create (const RdKafka::Conf *conf, } rd_kafka_t *rk; - if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, - errbuf, sizeof(errbuf)))) { + if (!(rk = + rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, errbuf, sizeof(errbuf)))) { errstr = errbuf; // rd_kafka_new() takes ownership only if succeeds if (rk_conf) @@ -71,13 +73,13 @@ RdKafka::Consumer *RdKafka::Consumer::create (const RdKafka::Conf *conf, return rkc; } -int64_t RdKafka::Consumer::OffsetTail (int64_t offset) { +int64_t RdKafka::Consumer::OffsetTail(int64_t offset) { return RD_KAFKA_OFFSET_TAIL(offset); } -RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic, - int32_t partition, - int64_t offset) { +RdKafka::ErrorCode RdKafka::ConsumerImpl::start(Topic *topic, + int32_t partition, + int64_t offset) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); if (rd_kafka_consume_start(topicimpl->rkt_, partition, offset) == -1) @@ -87,10 +89,10 @@ RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic, } -RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic, - int32_t partition, - int64_t offset, - Queue *queue) { +RdKafka::ErrorCode RdKafka::ConsumerImpl::start(Topic *topic, + int32_t partition, + int64_t offset, + Queue *queue) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); RdKafka::QueueImpl *queueimpl = dynamic_cast(queue); @@ -102,8 +104,8 @@ RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic, } -RdKafka::ErrorCode RdKafka::ConsumerImpl::stop (Topic *topic, - int32_t partition) { +RdKafka::ErrorCode RdKafka::ConsumerImpl::stop(Topic *topic, + int32_t partition) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); if (rd_kafka_consume_stop(topicimpl->rkt_, partition) == -1) @@ -112,10 +114,10 @@ RdKafka::ErrorCode RdKafka::ConsumerImpl::stop (Topic *topic, return RdKafka::ERR_NO_ERROR; } -RdKafka::ErrorCode RdKafka::ConsumerImpl::seek (Topic *topic, - int32_t partition, - int64_t offset, - int timeout_ms) { +RdKafka::ErrorCode RdKafka::ConsumerImpl::seek(Topic *topic, + int32_t partition, + int64_t offset, + int timeout_ms) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); if (rd_kafka_seek(topicimpl->rkt_, partition, offset, timeout_ms) == -1) @@ -124,68 +126,71 @@ RdKafka::ErrorCode RdKafka::ConsumerImpl::seek (Topic *topic, return RdKafka::ERR_NO_ERROR; } -RdKafka::Message *RdKafka::ConsumerImpl::consume (Topic *topic, - int32_t partition, - int timeout_ms) { +RdKafka::Message *RdKafka::ConsumerImpl::consume(Topic *topic, + int32_t partition, + int timeout_ms) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consume(topicimpl->rkt_, partition, timeout_ms); if (!rkmessage) - return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, topic, - static_cast - (rd_kafka_last_error())); + return new RdKafka::MessageImpl( + RD_KAFKA_CONSUMER, topic, + static_cast(rd_kafka_last_error())); return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, topic, rkmessage); } namespace { - /* Helper struct for `consume_callback'. - * Encapsulates the values we need in order to call `rd_kafka_consume_callback' - * and keep track of the C++ callback function and `opaque' value. +/* Helper struct for `consume_callback'. + * Encapsulates the values we need in order to call `rd_kafka_consume_callback' + * and keep track of the C++ callback function and `opaque' value. + */ +struct ConsumerImplCallback { + ConsumerImplCallback(RdKafka::Topic *topic, + RdKafka::ConsumeCb *cb, + void *data) : + topic(topic), cb_cls(cb), cb_data(data) { + } + /* This function is the one we give to `rd_kafka_consume_callback', with + * the `opaque' pointer pointing to an instance of this struct, in which + * we can find the C++ callback and `cb_data'. */ - struct ConsumerImplCallback { - ConsumerImplCallback(RdKafka::Topic* topic, RdKafka::ConsumeCb* cb, void* data) - : topic(topic), cb_cls(cb), cb_data(data) { - } - /* This function is the one we give to `rd_kafka_consume_callback', with - * the `opaque' pointer pointing to an instance of this struct, in which - * we can find the C++ callback and `cb_data'. - */ - static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) { - ConsumerImplCallback *instance = static_cast(opaque); - RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, instance->topic, - msg, false /*don't free*/); - instance->cb_cls->consume_cb(message, instance->cb_data); - } - RdKafka::Topic *topic; - RdKafka::ConsumeCb *cb_cls; - void *cb_data; - }; -} - -int RdKafka::ConsumerImpl::consume_callback (RdKafka::Topic* topic, - int32_t partition, - int timeout_ms, - RdKafka::ConsumeCb *consume_cb, - void *opaque) { + static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) { + ConsumerImplCallback *instance = + static_cast(opaque); + RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, instance->topic, msg, + false /*don't free*/); + instance->cb_cls->consume_cb(message, instance->cb_data); + } + RdKafka::Topic *topic; + RdKafka::ConsumeCb *cb_cls; + void *cb_data; +}; +} // namespace + +int RdKafka::ConsumerImpl::consume_callback(RdKafka::Topic *topic, + int32_t partition, + int timeout_ms, + RdKafka::ConsumeCb *consume_cb, + void *opaque) { RdKafka::TopicImpl *topicimpl = static_cast(topic); ConsumerImplCallback context(topic, consume_cb, opaque); return rd_kafka_consume_callback(topicimpl->rkt_, partition, timeout_ms, - &ConsumerImplCallback::consume_cb_trampoline, &context); + &ConsumerImplCallback::consume_cb_trampoline, + &context); } -RdKafka::Message *RdKafka::ConsumerImpl::consume (Queue *queue, - int timeout_ms) { +RdKafka::Message *RdKafka::ConsumerImpl::consume(Queue *queue, int timeout_ms) { RdKafka::QueueImpl *queueimpl = dynamic_cast(queue); rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consume_queue(queueimpl->queue_, timeout_ms); if (!rkmessage) - return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, NULL, - static_cast - (rd_kafka_last_error())); + return new RdKafka::MessageImpl( + RD_KAFKA_CONSUMER, NULL, + static_cast(rd_kafka_last_error())); /* * Recover our Topic * from the topic conf's opaque field, which we * set in RdKafka::Topic::create() for just this kind of situation. @@ -197,42 +202,43 @@ RdKafka::Message *RdKafka::ConsumerImpl::consume (Queue *queue, } namespace { - /* Helper struct for `consume_callback' with a Queue. - * Encapsulates the values we need in order to call `rd_kafka_consume_callback' - * and keep track of the C++ callback function and `opaque' value. +/* Helper struct for `consume_callback' with a Queue. + * Encapsulates the values we need in order to call `rd_kafka_consume_callback' + * and keep track of the C++ callback function and `opaque' value. + */ +struct ConsumerImplQueueCallback { + ConsumerImplQueueCallback(RdKafka::ConsumeCb *cb, void *data) : + cb_cls(cb), cb_data(data) { + } + /* This function is the one we give to `rd_kafka_consume_callback', with + * the `opaque' pointer pointing to an instance of this struct, in which + * we can find the C++ callback and `cb_data'. */ - struct ConsumerImplQueueCallback { - ConsumerImplQueueCallback(RdKafka::ConsumeCb *cb, void *data) - : cb_cls(cb), cb_data(data) { - } - /* This function is the one we give to `rd_kafka_consume_callback', with - * the `opaque' pointer pointing to an instance of this struct, in which - * we can find the C++ callback and `cb_data'. + static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) { + ConsumerImplQueueCallback *instance = + static_cast(opaque); + /* + * Recover our Topic * from the topic conf's opaque field, which we + * set in RdKafka::Topic::create() for just this kind of situation. */ - static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) { - ConsumerImplQueueCallback *instance = static_cast(opaque); - /* - * Recover our Topic * from the topic conf's opaque field, which we - * set in RdKafka::Topic::create() for just this kind of situation. - */ - void *topic_opaque = rd_kafka_topic_opaque(msg->rkt); - RdKafka::Topic *topic = static_cast(topic_opaque); - RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, topic, msg, - false /*don't free*/); - instance->cb_cls->consume_cb(message, instance->cb_data); - } - RdKafka::ConsumeCb *cb_cls; - void *cb_data; - }; -} - -int RdKafka::ConsumerImpl::consume_callback (Queue *queue, - int timeout_ms, - RdKafka::ConsumeCb *consume_cb, - void *opaque) { + void *topic_opaque = rd_kafka_topic_opaque(msg->rkt); + RdKafka::Topic *topic = static_cast(topic_opaque); + RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, topic, msg, + false /*don't free*/); + instance->cb_cls->consume_cb(message, instance->cb_data); + } + RdKafka::ConsumeCb *cb_cls; + void *cb_data; +}; +} // namespace + +int RdKafka::ConsumerImpl::consume_callback(Queue *queue, + int timeout_ms, + RdKafka::ConsumeCb *consume_cb, + void *opaque) { RdKafka::QueueImpl *queueimpl = dynamic_cast(queue); ConsumerImplQueueCallback context(consume_cb, opaque); - return rd_kafka_consume_callback_queue(queueimpl->queue_, timeout_ms, - &ConsumerImplQueueCallback::consume_cb_trampoline, - &context); + return rd_kafka_consume_callback_queue( + queueimpl->queue_, timeout_ms, + &ConsumerImplQueueCallback::consume_cb_trampoline, &context); } diff --git a/src-cpp/HandleImpl.cpp b/src-cpp/HandleImpl.cpp index f4ae56dbe8..0d1cf9a055 100644 --- a/src-cpp/HandleImpl.cpp +++ b/src-cpp/HandleImpl.cpp @@ -34,7 +34,7 @@ void RdKafka::consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); - RdKafka::Topic* topic = static_cast(rd_kafka_topic_opaque(msg->rkt)); + RdKafka::Topic *topic = static_cast(rd_kafka_topic_opaque(msg->rkt)); RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, topic, msg, false /*don't free*/); @@ -42,14 +42,16 @@ void RdKafka::consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) { handle->consume_cb_->consume_cb(message, opaque); } -void RdKafka::log_cb_trampoline (const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { +void RdKafka::log_cb_trampoline(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf) { if (!rk) { rd_kafka_log_print(rk, level, fac, buf); return; } - void *opaque = rd_kafka_opaque(rk); + void *opaque = rd_kafka_opaque(rk); RdKafka::HandleImpl *handle = static_cast(opaque); if (!handle->event_cb_) { @@ -57,17 +59,18 @@ void RdKafka::log_cb_trampoline (const rd_kafka_t *rk, int level, return; } - RdKafka::EventImpl event(RdKafka::Event::EVENT_LOG, - RdKafka::ERR_NO_ERROR, - static_cast(level), - fac, buf); + RdKafka::EventImpl event(RdKafka::Event::EVENT_LOG, RdKafka::ERR_NO_ERROR, + static_cast(level), fac, + buf); handle->event_cb_->event_cb(event); } -void RdKafka::error_cb_trampoline (rd_kafka_t *rk, int err, - const char *reason, void *opaque) { +void RdKafka::error_cb_trampoline(rd_kafka_t *rk, + int err, + const char *reason, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); char errstr[512]; bool is_fatal = false; @@ -81,37 +84,36 @@ void RdKafka::error_cb_trampoline (rd_kafka_t *rk, int err, } RdKafka::EventImpl event(RdKafka::Event::EVENT_ERROR, static_cast(err), - RdKafka::Event::EVENT_SEVERITY_ERROR, - NULL, - reason); + RdKafka::Event::EVENT_SEVERITY_ERROR, NULL, reason); event.fatal_ = is_fatal; handle->event_cb_->event_cb(event); } -void RdKafka::throttle_cb_trampoline (rd_kafka_t *rk, const char *broker_name, - int32_t broker_id, - int throttle_time_ms, - void *opaque) { +void RdKafka::throttle_cb_trampoline(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); RdKafka::EventImpl event(RdKafka::Event::EVENT_THROTTLE); - event.str_ = broker_name; - event.id_ = broker_id; + event.str_ = broker_name; + event.id_ = broker_id; event.throttle_time_ = throttle_time_ms; handle->event_cb_->event_cb(event); } -int RdKafka::stats_cb_trampoline (rd_kafka_t *rk, char *json, size_t json_len, - void *opaque) { +int RdKafka::stats_cb_trampoline(rd_kafka_t *rk, + char *json, + size_t json_len, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); - RdKafka::EventImpl event(RdKafka::Event::EVENT_STATS, - RdKafka::ERR_NO_ERROR, - RdKafka::Event::EVENT_SEVERITY_INFO, - NULL, json); + RdKafka::EventImpl event(RdKafka::Event::EVENT_STATS, RdKafka::ERR_NO_ERROR, + RdKafka::Event::EVENT_SEVERITY_INFO, NULL, json); handle->event_cb_->event_cb(event); @@ -119,56 +121,57 @@ int RdKafka::stats_cb_trampoline (rd_kafka_t *rk, char *json, size_t json_len, } -int RdKafka::socket_cb_trampoline (int domain, int type, int protocol, - void *opaque) { +int RdKafka::socket_cb_trampoline(int domain, + int type, + int protocol, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); return handle->socket_cb_->socket_cb(domain, type, protocol); } -int RdKafka::open_cb_trampoline (const char *pathname, int flags, mode_t mode, - void *opaque) { +int RdKafka::open_cb_trampoline(const char *pathname, + int flags, + mode_t mode, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); return handle->open_cb_->open_cb(pathname, flags, static_cast(mode)); } -void -RdKafka::oauthbearer_token_refresh_cb_trampoline (rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque) { +void RdKafka::oauthbearer_token_refresh_cb_trampoline( + rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); - handle->oauthbearer_token_refresh_cb_-> - oauthbearer_token_refresh_cb(handle, - std::string(oauthbearer_config ? - oauthbearer_config : "")); + handle->oauthbearer_token_refresh_cb_->oauthbearer_token_refresh_cb( + handle, std::string(oauthbearer_config ? oauthbearer_config : "")); } -int RdKafka::ssl_cert_verify_cb_trampoline (rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - char *errstr, size_t errstr_size, - void *opaque) { +int RdKafka::ssl_cert_verify_cb_trampoline(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + char *errstr, + size_t errstr_size, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); std::string errbuf; - bool res = 0 != handle->ssl_cert_verify_cb_-> - ssl_cert_verify_cb(std::string(broker_name), broker_id, - x509_error, - depth, - buf, size, - errbuf); + bool res = 0 != handle->ssl_cert_verify_cb_->ssl_cert_verify_cb( + std::string(broker_name), broker_id, x509_error, depth, + buf, size, errbuf); if (res) return (int)res; - size_t errlen = errbuf.size() > errstr_size - 1 ? - errstr_size - 1 : errbuf.size(); + size_t errlen = + errbuf.size() > errstr_size - 1 ? errstr_size - 1 : errbuf.size(); memcpy(errstr, errbuf.c_str(), errlen); if (errstr_size > 0) @@ -178,21 +181,21 @@ int RdKafka::ssl_cert_verify_cb_trampoline (rd_kafka_t *rk, } -RdKafka::ErrorCode RdKafka::HandleImpl::metadata (bool all_topics, - const Topic *only_rkt, - Metadata **metadatap, - int timeout_ms) { - - const rd_kafka_metadata_t *cmetadatap=NULL; +RdKafka::ErrorCode RdKafka::HandleImpl::metadata(bool all_topics, + const Topic *only_rkt, + Metadata **metadatap, + int timeout_ms) { + const rd_kafka_metadata_t *cmetadatap = NULL; - rd_kafka_topic_t *topic = only_rkt ? - static_cast(only_rkt)->rkt_ : NULL; + rd_kafka_topic_t *topic = + only_rkt ? static_cast(only_rkt)->rkt_ : NULL; - const rd_kafka_resp_err_t rc = rd_kafka_metadata(rk_, all_topics, topic, - &cmetadatap,timeout_ms); + const rd_kafka_resp_err_t rc = + rd_kafka_metadata(rk_, all_topics, topic, &cmetadatap, timeout_ms); - *metadatap = (rc == RD_KAFKA_RESP_ERR_NO_ERROR) ? - new RdKafka::MetadataImpl(cmetadatap) : NULL; + *metadatap = (rc == RD_KAFKA_RESP_ERR_NO_ERROR) + ? new RdKafka::MetadataImpl(cmetadatap) + : NULL; return static_cast(rc); } @@ -200,47 +203,45 @@ RdKafka::ErrorCode RdKafka::HandleImpl::metadata (bool all_topics, /** * Convert a list of C partitions to C++ partitions */ -static void c_parts_to_partitions (const rd_kafka_topic_partition_list_t - *c_parts, - std::vector - &partitions) { +static void c_parts_to_partitions( + const rd_kafka_topic_partition_list_t *c_parts, + std::vector &partitions) { partitions.resize(c_parts->cnt); - for (int i = 0 ; i < c_parts->cnt ; i++) + for (int i = 0; i < c_parts->cnt; i++) partitions[i] = new RdKafka::TopicPartitionImpl(&c_parts->elems[i]); } -static void free_partition_vector (std::vector &v) { - for (unsigned int i = 0 ; i < v.size() ; i++) +static void free_partition_vector(std::vector &v) { + for (unsigned int i = 0; i < v.size(); i++) delete v[i]; v.clear(); } -void -RdKafka::rebalance_cb_trampoline (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *c_partitions, - void *opaque) { +void RdKafka::rebalance_cb_trampoline( + rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *c_partitions, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); - std::vector partitions; + std::vector partitions; c_parts_to_partitions(c_partitions, partitions); handle->rebalance_cb_->rebalance_cb( - dynamic_cast(handle), - static_cast(err), - partitions); + dynamic_cast(handle), + static_cast(err), partitions); free_partition_vector(partitions); } -void -RdKafka::offset_commit_cb_trampoline0 ( +void RdKafka::offset_commit_cb_trampoline0( rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *c_offsets, void *opaque) { + rd_kafka_topic_partition_list_t *c_offsets, + void *opaque) { OffsetCommitCb *cb = static_cast(opaque); - std::vector offsets; + std::vector offsets; if (c_offsets) c_parts_to_partitions(c_offsets, offsets); @@ -250,28 +251,26 @@ RdKafka::offset_commit_cb_trampoline0 ( free_partition_vector(offsets); } -static void -offset_commit_cb_trampoline ( +static void offset_commit_cb_trampoline( rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *c_offsets, void *opaque) { + rd_kafka_topic_partition_list_t *c_offsets, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); RdKafka::offset_commit_cb_trampoline0(rk, err, c_offsets, handle->offset_commit_cb_); } -void RdKafka::HandleImpl::set_common_config (const RdKafka::ConfImpl *confimpl) { - +void RdKafka::HandleImpl::set_common_config(const RdKafka::ConfImpl *confimpl) { rd_kafka_conf_set_opaque(confimpl->rk_conf_, this); if (confimpl->event_cb_) { - rd_kafka_conf_set_log_cb(confimpl->rk_conf_, - RdKafka::log_cb_trampoline); + rd_kafka_conf_set_log_cb(confimpl->rk_conf_, RdKafka::log_cb_trampoline); rd_kafka_conf_set_error_cb(confimpl->rk_conf_, RdKafka::error_cb_trampoline); rd_kafka_conf_set_throttle_cb(confimpl->rk_conf_, - RdKafka::throttle_cb_trampoline); + RdKafka::throttle_cb_trampoline); rd_kafka_conf_set_stats_cb(confimpl->rk_conf_, RdKafka::stats_cb_trampoline); event_cb_ = confimpl->event_cb_; @@ -279,9 +278,8 @@ void RdKafka::HandleImpl::set_common_config (const RdKafka::ConfImpl *confimpl) if (confimpl->oauthbearer_token_refresh_cb_) { rd_kafka_conf_set_oauthbearer_token_refresh_cb( - confimpl->rk_conf_, - RdKafka::oauthbearer_token_refresh_cb_trampoline); - oauthbearer_token_refresh_cb_ = confimpl->oauthbearer_token_refresh_cb_; + confimpl->rk_conf_, RdKafka::oauthbearer_token_refresh_cb_trampoline); + oauthbearer_token_refresh_cb_ = confimpl->oauthbearer_token_refresh_cb_; } if (confimpl->socket_cb_) { @@ -291,9 +289,9 @@ void RdKafka::HandleImpl::set_common_config (const RdKafka::ConfImpl *confimpl) } if (confimpl->ssl_cert_verify_cb_) { - rd_kafka_conf_set_ssl_cert_verify_cb(confimpl->rk_conf_, - RdKafka::ssl_cert_verify_cb_trampoline); - ssl_cert_verify_cb_ = confimpl->ssl_cert_verify_cb_; + rd_kafka_conf_set_ssl_cert_verify_cb( + confimpl->rk_conf_, RdKafka::ssl_cert_verify_cb_trampoline); + ssl_cert_verify_cb_ = confimpl->ssl_cert_verify_cb_; } if (confimpl->open_cb_) { @@ -320,12 +318,11 @@ void RdKafka::HandleImpl::set_common_config (const RdKafka::ConfImpl *confimpl) RdKafka::consume_cb_trampoline); consume_cb_ = confimpl->consume_cb_; } - } -RdKafka::ErrorCode -RdKafka::HandleImpl::pause (std::vector &partitions) { +RdKafka::ErrorCode RdKafka::HandleImpl::pause( + std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_resp_err_t err; @@ -342,8 +339,8 @@ RdKafka::HandleImpl::pause (std::vector &partitions) { } -RdKafka::ErrorCode -RdKafka::HandleImpl::resume (std::vector &partitions) { +RdKafka::ErrorCode RdKafka::HandleImpl::resume( + std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_resp_err_t err; @@ -359,11 +356,10 @@ RdKafka::HandleImpl::resume (std::vector &partitions) return static_cast(err); } -RdKafka::Queue * -RdKafka::HandleImpl::get_partition_queue (const TopicPartition *part) { +RdKafka::Queue *RdKafka::HandleImpl::get_partition_queue( + const TopicPartition *part) { rd_kafka_queue_t *rkqu; - rkqu = rd_kafka_queue_get_partition(rk_, - part->topic().c_str(), + rkqu = rd_kafka_queue_get_partition(rk_, part->topic().c_str(), part->partition()); if (rkqu == NULL) @@ -372,31 +368,28 @@ RdKafka::HandleImpl::get_partition_queue (const TopicPartition *part) { return new QueueImpl(rkqu); } -RdKafka::ErrorCode -RdKafka::HandleImpl::set_log_queue (RdKafka::Queue *queue) { - rd_kafka_queue_t *rkqu = NULL; - if (queue) { - QueueImpl *queueimpl = dynamic_cast(queue); - rkqu = queueimpl->queue_; - } - return static_cast( - rd_kafka_set_log_queue(rk_, rkqu)); +RdKafka::ErrorCode RdKafka::HandleImpl::set_log_queue(RdKafka::Queue *queue) { + rd_kafka_queue_t *rkqu = NULL; + if (queue) { + QueueImpl *queueimpl = dynamic_cast(queue); + rkqu = queueimpl->queue_; + } + return static_cast(rd_kafka_set_log_queue(rk_, rkqu)); } namespace RdKafka { -rd_kafka_topic_partition_list_t * -partitions_to_c_parts (const std::vector &partitions){ +rd_kafka_topic_partition_list_t *partitions_to_c_parts( + const std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; c_parts = rd_kafka_topic_partition_list_new((int)partitions.size()); - for (unsigned int i = 0 ; i < partitions.size() ; i++) { + for (unsigned int i = 0; i < partitions.size(); i++) { const RdKafka::TopicPartitionImpl *tpi = - dynamic_cast(partitions[i]); - rd_kafka_topic_partition_t *rktpar = - rd_kafka_topic_partition_list_add(c_parts, - tpi->topic_.c_str(), tpi->partition_); + dynamic_cast(partitions[i]); + rd_kafka_topic_partition_t *rktpar = rd_kafka_topic_partition_list_add( + c_parts, tpi->topic_.c_str(), tpi->partition_); rktpar->offset = tpi->offset_; } @@ -407,24 +400,23 @@ partitions_to_c_parts (const std::vector &partitions){ /** * @brief Update the application provided 'partitions' with info from 'c_parts' */ -void -update_partitions_from_c_parts (std::vector &partitions, - const rd_kafka_topic_partition_list_t *c_parts) { - for (int i = 0 ; i < c_parts->cnt ; i++) { +void update_partitions_from_c_parts( + std::vector &partitions, + const rd_kafka_topic_partition_list_t *c_parts) { + for (int i = 0; i < c_parts->cnt; i++) { rd_kafka_topic_partition_t *p = &c_parts->elems[i]; /* Find corresponding C++ entry */ - for (unsigned int j = 0 ; j < partitions.size() ; j++) { + for (unsigned int j = 0; j < partitions.size(); j++) { RdKafka::TopicPartitionImpl *pp = - dynamic_cast(partitions[j]); + dynamic_cast(partitions[j]); if (!strcmp(p->topic, pp->topic_.c_str()) && - p->partition == pp->partition_) { - pp->offset_ = p->offset; - pp->err_ = static_cast(p->err); + p->partition == pp->partition_) { + pp->offset_ = p->offset; + pp->err_ = static_cast(p->err); } } } } -}; - +}; // namespace RdKafka diff --git a/src-cpp/HeadersImpl.cpp b/src-cpp/HeadersImpl.cpp index b31912c677..b567ef36c0 100644 --- a/src-cpp/HeadersImpl.cpp +++ b/src-cpp/HeadersImpl.cpp @@ -34,7 +34,7 @@ #include "rdkafkacpp_int.h" RdKafka::Headers *RdKafka::Headers::create() { - return new RdKafka::HeadersImpl(); + return new RdKafka::HeadersImpl(); } RdKafka::Headers *RdKafka::Headers::create(const std::vector

&headers) { @@ -44,4 +44,5 @@ RdKafka::Headers *RdKafka::Headers::create(const std::vector
&headers) { return new RdKafka::HeadersImpl(); } -RdKafka::Headers::~Headers() {} +RdKafka::Headers::~Headers() { +} diff --git a/src-cpp/KafkaConsumerImpl.cpp b/src-cpp/KafkaConsumerImpl.cpp index 5aca143259..5d94df953e 100644 --- a/src-cpp/KafkaConsumerImpl.cpp +++ b/src-cpp/KafkaConsumerImpl.cpp @@ -31,14 +31,17 @@ #include "rdkafkacpp_int.h" -RdKafka::KafkaConsumer::~KafkaConsumer () {} +RdKafka::KafkaConsumer::~KafkaConsumer() { +} -RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (const RdKafka::Conf *conf, - std::string &errstr) { +RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create( + const RdKafka::Conf *conf, + std::string &errstr) { char errbuf[512]; - const RdKafka::ConfImpl *confimpl = dynamic_cast(conf); + const RdKafka::ConfImpl *confimpl = + dynamic_cast(conf); RdKafka::KafkaConsumerImpl *rkc = new RdKafka::KafkaConsumerImpl(); - rd_kafka_conf_t *rk_conf = NULL; + rd_kafka_conf_t *rk_conf = NULL; size_t grlen; if (!confimpl || !confimpl->rk_conf_) { @@ -47,8 +50,8 @@ RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (const RdKafka::Conf *con return NULL; } - if (rd_kafka_conf_get(confimpl->rk_conf_, "group.id", - NULL, &grlen) != RD_KAFKA_CONF_OK || + if (rd_kafka_conf_get(confimpl->rk_conf_, "group.id", NULL, &grlen) != + RD_KAFKA_CONF_OK || grlen <= 1 /* terminating null only */) { errstr = "\"group.id\" must be configured"; delete rkc; @@ -60,8 +63,8 @@ RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (const RdKafka::Conf *con rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_); rd_kafka_t *rk; - if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, - errbuf, sizeof(errbuf)))) { + if (!(rk = + rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, errbuf, sizeof(errbuf)))) { errstr = errbuf; // rd_kafka_new() takes ownership only if succeeds rd_kafka_conf_destroy(rk_conf); @@ -79,18 +82,14 @@ RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (const RdKafka::Conf *con - - - - -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::subscribe (const std::vector &topics) { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::subscribe( + const std::vector &topics) { rd_kafka_topic_partition_list_t *c_topics; rd_kafka_resp_err_t err; c_topics = rd_kafka_topic_partition_list_new((int)topics.size()); - for (unsigned int i = 0 ; i < topics.size() ; i++) + for (unsigned int i = 0; i < topics.size(); i++) rd_kafka_topic_partition_list_add(c_topics, topics[i].c_str(), RD_KAFKA_PARTITION_UA); @@ -103,12 +102,11 @@ RdKafka::KafkaConsumerImpl::subscribe (const std::vector &topics) { -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::unsubscribe () { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::unsubscribe() { return static_cast(rd_kafka_unsubscribe(this->rk_)); } -RdKafka::Message *RdKafka::KafkaConsumerImpl::consume (int timeout_ms) { +RdKafka::Message *RdKafka::KafkaConsumerImpl::consume(int timeout_ms) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consumer_poll(this->rk_, timeout_ms); @@ -118,13 +116,12 @@ RdKafka::Message *RdKafka::KafkaConsumerImpl::consume (int timeout_ms) { RdKafka::ERR__TIMED_OUT); return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, rkmessage); - } -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::assignment (std::vector &partitions) { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::assignment( + std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_resp_err_t err; @@ -133,7 +130,7 @@ RdKafka::KafkaConsumerImpl::assignment (std::vector &p partitions.resize(c_parts->cnt); - for (int i = 0 ; i < c_parts->cnt ; i++) + for (int i = 0; i < c_parts->cnt; i++) partitions[i] = new RdKafka::TopicPartitionImpl(&c_parts->elems[i]); rd_kafka_topic_partition_list_destroy(c_parts); @@ -143,15 +140,14 @@ RdKafka::KafkaConsumerImpl::assignment (std::vector &p -bool -RdKafka::KafkaConsumerImpl::assignment_lost () { +bool RdKafka::KafkaConsumerImpl::assignment_lost() { return rd_kafka_assignment_lost(rk_) ? true : false; } -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::subscription (std::vector &topics) { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::subscription( + std::vector &topics) { rd_kafka_topic_partition_list_t *c_topics; rd_kafka_resp_err_t err; @@ -159,7 +155,7 @@ RdKafka::KafkaConsumerImpl::subscription (std::vector &topics) { return static_cast(err); topics.resize(c_topics->cnt); - for (int i = 0 ; i < c_topics->cnt ; i++) + for (int i = 0; i < c_topics->cnt; i++) topics[i] = std::string(c_topics->elems[i].topic); rd_kafka_topic_partition_list_destroy(c_topics); @@ -168,8 +164,8 @@ RdKafka::KafkaConsumerImpl::subscription (std::vector &topics) { } -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::assign (const std::vector &partitions) { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::assign( + const std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_resp_err_t err; @@ -182,14 +178,13 @@ RdKafka::KafkaConsumerImpl::assign (const std::vector &partitio } -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::unassign () { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::unassign() { return static_cast(rd_kafka_assign(rk_, NULL)); } -RdKafka::Error * -RdKafka::KafkaConsumerImpl::incremental_assign (const std::vector &partitions) { +RdKafka::Error *RdKafka::KafkaConsumerImpl::incremental_assign( + const std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_error_t *c_error; @@ -204,8 +199,8 @@ RdKafka::KafkaConsumerImpl::incremental_assign (const std::vector &partitions) { +RdKafka::Error *RdKafka::KafkaConsumerImpl::incremental_unassign( + const std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_error_t *c_error; @@ -220,8 +215,9 @@ RdKafka::KafkaConsumerImpl::incremental_unassign (const std::vector &partitions, int timeout_ms) { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::committed( + std::vector &partitions, + int timeout_ms) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_resp_err_t err; @@ -239,8 +235,8 @@ RdKafka::KafkaConsumerImpl::committed (std::vector &pa } -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::position (std::vector &partitions) { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::position( + std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_resp_err_t err; @@ -258,20 +254,19 @@ RdKafka::KafkaConsumerImpl::position (std::vector &par } -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::seek (const RdKafka::TopicPartition &partition, - int timeout_ms) { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::seek( + const RdKafka::TopicPartition &partition, + int timeout_ms) { const RdKafka::TopicPartitionImpl *p = - dynamic_cast(&partition); + dynamic_cast(&partition); rd_kafka_topic_t *rkt; if (!(rkt = rd_kafka_topic_new(rk_, p->topic_.c_str(), NULL))) return static_cast(rd_kafka_last_error()); /* FIXME: Use a C API that takes a topic_partition_list_t instead */ - RdKafka::ErrorCode err = - static_cast - (rd_kafka_seek(rkt, p->partition_, p->offset_, timeout_ms)); + RdKafka::ErrorCode err = static_cast( + rd_kafka_seek(rkt, p->partition_, p->offset_, timeout_ms)); rd_kafka_topic_destroy(rkt); @@ -280,15 +275,11 @@ RdKafka::KafkaConsumerImpl::seek (const RdKafka::TopicPartition &partition, - - -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::close () { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::close() { return static_cast(rd_kafka_consumer_close(rk_)); } - -RdKafka::ConsumerGroupMetadata::~ConsumerGroupMetadata () {} - +RdKafka::ConsumerGroupMetadata::~ConsumerGroupMetadata() { +} diff --git a/src-cpp/MessageImpl.cpp b/src-cpp/MessageImpl.cpp index 9562402c53..c6d83150fd 100644 --- a/src-cpp/MessageImpl.cpp +++ b/src-cpp/MessageImpl.cpp @@ -34,5 +34,5 @@ #include "rdkafkacpp_int.h" -RdKafka::Message::~Message() {} - +RdKafka::Message::~Message() { +} diff --git a/src-cpp/MetadataImpl.cpp b/src-cpp/MetadataImpl.cpp index c2869f5aa0..fd50811d40 100644 --- a/src-cpp/MetadataImpl.cpp +++ b/src-cpp/MetadataImpl.cpp @@ -41,15 +41,23 @@ Metadata::~Metadata() {}; */ class BrokerMetadataImpl : public BrokerMetadata { public: - BrokerMetadataImpl(const rd_kafka_metadata_broker_t *broker_metadata) - :broker_metadata_(broker_metadata),host_(broker_metadata->host) {} + BrokerMetadataImpl(const rd_kafka_metadata_broker_t *broker_metadata) : + broker_metadata_(broker_metadata), host_(broker_metadata->host) { + } - int32_t id() const{return broker_metadata_->id;} + int32_t id() const { + return broker_metadata_->id; + } - const std::string host() const {return host_;} - int port() const {return broker_metadata_->port;} + const std::string host() const { + return host_; + } + int port() const { + return broker_metadata_->port; + } - virtual ~BrokerMetadataImpl() {} + virtual ~BrokerMetadataImpl() { + } private: const rd_kafka_metadata_broker_t *broker_metadata_; @@ -61,91 +69,97 @@ class BrokerMetadataImpl : public BrokerMetadata { */ class PartitionMetadataImpl : public PartitionMetadata { public: - // @TODO too much memory copy? maybe we should create a new vector class that read directly from C arrays? + // @TODO too much memory copy? maybe we should create a new vector class that + // read directly from C arrays? // @TODO use auto_ptr? - PartitionMetadataImpl(const rd_kafka_metadata_partition_t *partition_metadata) - :partition_metadata_(partition_metadata) { + PartitionMetadataImpl( + const rd_kafka_metadata_partition_t *partition_metadata) : + partition_metadata_(partition_metadata) { replicas_.reserve(partition_metadata->replica_cnt); - for(int i=0;ireplica_cnt;++i) + for (int i = 0; i < partition_metadata->replica_cnt; ++i) replicas_.push_back(partition_metadata->replicas[i]); isrs_.reserve(partition_metadata->isr_cnt); - for(int i=0;iisr_cnt;++i) + for (int i = 0; i < partition_metadata->isr_cnt; ++i) isrs_.push_back(partition_metadata->isrs[i]); } - int32_t id() const { + int32_t id() const { return partition_metadata_->id; } - int32_t leader() const { + int32_t leader() const { return partition_metadata_->leader; } - ErrorCode err() const { + ErrorCode err() const { return static_cast(partition_metadata_->err); } - const std::vector *replicas() const {return &replicas_;} - const std::vector *isrs() const {return &isrs_;} + const std::vector *replicas() const { + return &replicas_; + } + const std::vector *isrs() const { + return &isrs_; + } ~PartitionMetadataImpl() {}; private: const rd_kafka_metadata_partition_t *partition_metadata_; - std::vector replicas_,isrs_; + std::vector replicas_, isrs_; }; /** * Metadata: Topic information handler */ -class TopicMetadataImpl : public TopicMetadata{ +class TopicMetadataImpl : public TopicMetadata { public: - TopicMetadataImpl(const rd_kafka_metadata_topic_t *topic_metadata) - :topic_metadata_(topic_metadata),topic_(topic_metadata->topic) { + TopicMetadataImpl(const rd_kafka_metadata_topic_t *topic_metadata) : + topic_metadata_(topic_metadata), topic_(topic_metadata->topic) { partitions_.reserve(topic_metadata->partition_cnt); - for(int i=0;ipartition_cnt;++i) + for (int i = 0; i < topic_metadata->partition_cnt; ++i) partitions_.push_back( - new PartitionMetadataImpl(&topic_metadata->partitions[i]) - ); + new PartitionMetadataImpl(&topic_metadata->partitions[i])); } - ~TopicMetadataImpl(){ - for(size_t i=0;i *partitions() const { return &partitions_; } - ErrorCode err() const {return static_cast(topic_metadata_->err);} + ErrorCode err() const { + return static_cast(topic_metadata_->err); + } private: const rd_kafka_metadata_topic_t *topic_metadata_; const std::string topic_; std::vector partitions_; - }; -MetadataImpl::MetadataImpl(const rd_kafka_metadata_t *metadata) -:metadata_(metadata) -{ +MetadataImpl::MetadataImpl(const rd_kafka_metadata_t *metadata) : + metadata_(metadata) { brokers_.reserve(metadata->broker_cnt); - for(int i=0;ibroker_cnt;++i) + for (int i = 0; i < metadata->broker_cnt; ++i) brokers_.push_back(new BrokerMetadataImpl(&metadata->brokers[i])); topics_.reserve(metadata->topic_cnt); - for(int i=0;itopic_cnt;++i) + for (int i = 0; i < metadata->topic_cnt; ++i) topics_.push_back(new TopicMetadataImpl(&metadata->topics[i])); - } MetadataImpl::~MetadataImpl() { - for(size_t i=0;i(opaque); RdKafka::MessageImpl message(RD_KAFKA_PRODUCER, NULL, (rd_kafka_message_t *)rkmessage, false); @@ -50,12 +48,13 @@ static void dr_msg_cb_trampoline (rd_kafka_t *rk, -RdKafka::Producer *RdKafka::Producer::create (const RdKafka::Conf *conf, - std::string &errstr) { +RdKafka::Producer *RdKafka::Producer::create(const RdKafka::Conf *conf, + std::string &errstr) { char errbuf[512]; - const RdKafka::ConfImpl *confimpl = dynamic_cast(conf); + const RdKafka::ConfImpl *confimpl = + dynamic_cast(conf); RdKafka::ProducerImpl *rkp = new RdKafka::ProducerImpl(); - rd_kafka_conf_t *rk_conf = NULL; + rd_kafka_conf_t *rk_conf = NULL; if (confimpl) { if (!confimpl->rk_conf_) { @@ -76,8 +75,8 @@ RdKafka::Producer *RdKafka::Producer::create (const RdKafka::Conf *conf, rd_kafka_t *rk; - if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, rk_conf, - errbuf, sizeof(errbuf)))) { + if (!(rk = + rd_kafka_new(RD_KAFKA_PRODUCER, rk_conf, errbuf, sizeof(errbuf)))) { errstr = errbuf; // rd_kafka_new() takes ownership only if succeeds if (rk_conf) @@ -92,16 +91,16 @@ RdKafka::Producer *RdKafka::Producer::create (const RdKafka::Conf *conf, } -RdKafka::ErrorCode RdKafka::ProducerImpl::produce (RdKafka::Topic *topic, - int32_t partition, - int msgflags, - void *payload, size_t len, - const std::string *key, - void *msg_opaque) { +RdKafka::ErrorCode RdKafka::ProducerImpl::produce(RdKafka::Topic *topic, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const std::string *key, + void *msg_opaque) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); - if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags, - payload, len, + if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags, payload, len, key ? key->c_str() : NULL, key ? key->size() : 0, msg_opaque) == -1) return static_cast(rd_kafka_last_error()); @@ -110,91 +109,83 @@ RdKafka::ErrorCode RdKafka::ProducerImpl::produce (RdKafka::Topic *topic, } -RdKafka::ErrorCode RdKafka::ProducerImpl::produce (RdKafka::Topic *topic, - int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, - size_t key_len, - void *msg_opaque) { +RdKafka::ErrorCode RdKafka::ProducerImpl::produce(RdKafka::Topic *topic, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + void *msg_opaque) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); - if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags, - payload, len, key, key_len, - msg_opaque) == -1) + if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags, payload, len, key, + key_len, msg_opaque) == -1) return static_cast(rd_kafka_last_error()); return RdKafka::ERR_NO_ERROR; } -RdKafka::ErrorCode -RdKafka::ProducerImpl::produce (RdKafka::Topic *topic, - int32_t partition, - const std::vector *payload, - const std::vector *key, - void *msg_opaque) { +RdKafka::ErrorCode RdKafka::ProducerImpl::produce( + RdKafka::Topic *topic, + int32_t partition, + const std::vector *payload, + const std::vector *key, + void *msg_opaque) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); if (rd_kafka_produce(topicimpl->rkt_, partition, RD_KAFKA_MSG_F_COPY, payload ? (void *)&(*payload)[0] : NULL, - payload ? payload->size() : 0, - key ? &(*key)[0] : NULL, key ? key->size() : 0, - msg_opaque) == -1) + payload ? payload->size() : 0, key ? &(*key)[0] : NULL, + key ? key->size() : 0, msg_opaque) == -1) return static_cast(rd_kafka_last_error()); return RdKafka::ERR_NO_ERROR; - } -RdKafka::ErrorCode -RdKafka::ProducerImpl::produce (const std::string topic_name, - int32_t partition, int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, void *msg_opaque) { - return - static_cast - ( - rd_kafka_producev(rk_, - RD_KAFKA_V_TOPIC(topic_name.c_str()), - RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_MSGFLAGS(msgflags), - RD_KAFKA_V_VALUE(payload, len), - RD_KAFKA_V_KEY(key, key_len), - RD_KAFKA_V_TIMESTAMP(timestamp), - RD_KAFKA_V_OPAQUE(msg_opaque), - RD_KAFKA_V_END) - ); +RdKafka::ErrorCode RdKafka::ProducerImpl::produce(const std::string topic_name, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + int64_t timestamp, + void *msg_opaque) { + return static_cast(rd_kafka_producev( + rk_, RD_KAFKA_V_TOPIC(topic_name.c_str()), + RD_KAFKA_V_PARTITION(partition), RD_KAFKA_V_MSGFLAGS(msgflags), + RD_KAFKA_V_VALUE(payload, len), RD_KAFKA_V_KEY(key, key_len), + RD_KAFKA_V_TIMESTAMP(timestamp), RD_KAFKA_V_OPAQUE(msg_opaque), + RD_KAFKA_V_END)); } -RdKafka::ErrorCode -RdKafka::ProducerImpl::produce (const std::string topic_name, - int32_t partition, int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, - RdKafka::Headers *headers, - void *msg_opaque) { - rd_kafka_headers_t *hdrs = NULL; +RdKafka::ErrorCode RdKafka::ProducerImpl::produce(const std::string topic_name, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + int64_t timestamp, + RdKafka::Headers *headers, + void *msg_opaque) { + rd_kafka_headers_t *hdrs = NULL; RdKafka::HeadersImpl *headersimpl = NULL; rd_kafka_resp_err_t err; if (headers) { - headersimpl = static_cast(headers); - hdrs = headersimpl->c_ptr(); + headersimpl = static_cast(headers); + hdrs = headersimpl->c_ptr(); } - err = rd_kafka_producev(rk_, - RD_KAFKA_V_TOPIC(topic_name.c_str()), - RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_MSGFLAGS(msgflags), - RD_KAFKA_V_VALUE(payload, len), - RD_KAFKA_V_KEY(key, key_len), - RD_KAFKA_V_TIMESTAMP(timestamp), - RD_KAFKA_V_OPAQUE(msg_opaque), - RD_KAFKA_V_HEADERS(hdrs), - RD_KAFKA_V_END); + err = rd_kafka_producev( + rk_, RD_KAFKA_V_TOPIC(topic_name.c_str()), + RD_KAFKA_V_PARTITION(partition), RD_KAFKA_V_MSGFLAGS(msgflags), + RD_KAFKA_V_VALUE(payload, len), RD_KAFKA_V_KEY(key, key_len), + RD_KAFKA_V_TIMESTAMP(timestamp), RD_KAFKA_V_OPAQUE(msg_opaque), + RD_KAFKA_V_HEADERS(hdrs), RD_KAFKA_V_END); if (!err && headersimpl) { /* A successful producev() call will destroy the C headers. */ diff --git a/src-cpp/QueueImpl.cpp b/src-cpp/QueueImpl.cpp index 8499dfccb7..19ebce9d68 100644 --- a/src-cpp/QueueImpl.cpp +++ b/src-cpp/QueueImpl.cpp @@ -30,16 +30,15 @@ #include "rdkafkacpp_int.h" -RdKafka::Queue::~Queue () { - +RdKafka::Queue::~Queue() { } -RdKafka::Queue *RdKafka::Queue::create (Handle *base) { - return new RdKafka::QueueImpl(rd_kafka_queue_new(dynamic_cast(base)->rk_)); +RdKafka::Queue *RdKafka::Queue::create(Handle *base) { + return new RdKafka::QueueImpl( + rd_kafka_queue_new(dynamic_cast(base)->rk_)); } -RdKafka::ErrorCode -RdKafka::QueueImpl::forward (Queue *queue) { +RdKafka::ErrorCode RdKafka::QueueImpl::forward(Queue *queue) { if (!queue) { rd_kafka_queue_forward(queue_, NULL); } else { @@ -49,7 +48,7 @@ RdKafka::QueueImpl::forward (Queue *queue) { return RdKafka::ERR_NO_ERROR; } -RdKafka::Message *RdKafka::QueueImpl::consume (int timeout_ms) { +RdKafka::Message *RdKafka::QueueImpl::consume(int timeout_ms) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consume_queue(queue_, timeout_ms); @@ -60,11 +59,12 @@ RdKafka::Message *RdKafka::QueueImpl::consume (int timeout_ms) { return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, rkmessage); } -int RdKafka::QueueImpl::poll (int timeout_ms) { - return rd_kafka_queue_poll_callback(queue_, timeout_ms); +int RdKafka::QueueImpl::poll(int timeout_ms) { + return rd_kafka_queue_poll_callback(queue_, timeout_ms); } -void RdKafka::QueueImpl::io_event_enable (int fd, const void *payload, - size_t size) { - rd_kafka_queue_io_event_enable(queue_, fd, payload, size); +void RdKafka::QueueImpl::io_event_enable(int fd, + const void *payload, + size_t size) { + rd_kafka_queue_io_event_enable(queue_, fd, payload, size); } diff --git a/src-cpp/RdKafka.cpp b/src-cpp/RdKafka.cpp index 75ba69c01c..b6cb33c288 100644 --- a/src-cpp/RdKafka.cpp +++ b/src-cpp/RdKafka.cpp @@ -30,30 +30,30 @@ #include "rdkafkacpp_int.h" -int RdKafka::version () { +int RdKafka::version() { return rd_kafka_version(); } -std::string RdKafka::version_str () { +std::string RdKafka::version_str() { return std::string(rd_kafka_version_str()); } std::string RdKafka::get_debug_contexts() { - return std::string(RD_KAFKA_DEBUG_CONTEXTS); + return std::string(RD_KAFKA_DEBUG_CONTEXTS); } -std::string RdKafka::err2str (RdKafka::ErrorCode err) { +std::string RdKafka::err2str(RdKafka::ErrorCode err) { return std::string(rd_kafka_err2str(static_cast(err))); } -int RdKafka::wait_destroyed (int timeout_ms) { +int RdKafka::wait_destroyed(int timeout_ms) { return rd_kafka_wait_destroyed(timeout_ms); } -void *RdKafka::mem_malloc (size_t size) { +void *RdKafka::mem_malloc(size_t size) { return rd_kafka_mem_malloc(NULL, size); } -void RdKafka::mem_free (void *ptr) { +void RdKafka::mem_free(void *ptr) { rd_kafka_mem_free(NULL, ptr); } diff --git a/src-cpp/TopicImpl.cpp b/src-cpp/TopicImpl.cpp index e83505434a..bf9734df94 100644 --- a/src-cpp/TopicImpl.cpp +++ b/src-cpp/TopicImpl.cpp @@ -43,45 +43,43 @@ const int64_t RdKafka::Topic::OFFSET_STORED = RD_KAFKA_OFFSET_STORED; const int64_t RdKafka::Topic::OFFSET_INVALID = RD_KAFKA_OFFSET_INVALID; -RdKafka::Topic::~Topic () { - +RdKafka::Topic::~Topic() { } -static int32_t partitioner_cb_trampoline (const rd_kafka_topic_t *rkt, - const void *keydata, - size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { +static int32_t partitioner_cb_trampoline(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { RdKafka::TopicImpl *topicimpl = static_cast(rkt_opaque); std::string key(static_cast(keydata), keylen); return topicimpl->partitioner_cb_->partitioner_cb(topicimpl, &key, partition_cnt, msg_opaque); } -static int32_t partitioner_kp_cb_trampoline (const rd_kafka_topic_t *rkt, - const void *keydata, - size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { +static int32_t partitioner_kp_cb_trampoline(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { RdKafka::TopicImpl *topicimpl = static_cast(rkt_opaque); - return topicimpl->partitioner_kp_cb_->partitioner_cb(topicimpl, - keydata, keylen, - partition_cnt, - msg_opaque); + return topicimpl->partitioner_kp_cb_->partitioner_cb( + topicimpl, keydata, keylen, partition_cnt, msg_opaque); } -RdKafka::Topic *RdKafka::Topic::create (Handle *base, - const std::string &topic_str, - const Conf *conf, - std::string &errstr) { - const RdKafka::ConfImpl *confimpl = static_cast(conf); +RdKafka::Topic *RdKafka::Topic::create(Handle *base, + const std::string &topic_str, + const Conf *conf, + std::string &errstr) { + const RdKafka::ConfImpl *confimpl = + static_cast(conf); rd_kafka_topic_t *rkt; rd_kafka_topic_conf_t *rkt_conf; - rd_kafka_t *rk = dynamic_cast(base)->rk_; + rd_kafka_t *rk = dynamic_cast(base)->rk_; RdKafka::TopicImpl *topic = new RdKafka::TopicImpl(); @@ -123,6 +121,4 @@ RdKafka::Topic *RdKafka::Topic::create (Handle *base, topic->rkt_ = rkt; return topic; - } - diff --git a/src-cpp/TopicPartitionImpl.cpp b/src-cpp/TopicPartitionImpl.cpp index 71a688ce80..90ef820bf6 100644 --- a/src-cpp/TopicPartitionImpl.cpp +++ b/src-cpp/TopicPartitionImpl.cpp @@ -32,24 +32,26 @@ #include "rdkafkacpp_int.h" -RdKafka::TopicPartition::~TopicPartition () { +RdKafka::TopicPartition::~TopicPartition() { } -RdKafka::TopicPartition * -RdKafka::TopicPartition::create (const std::string &topic, int partition) { +RdKafka::TopicPartition *RdKafka::TopicPartition::create( + const std::string &topic, + int partition) { return new TopicPartitionImpl(topic, partition); } -RdKafka::TopicPartition * -RdKafka::TopicPartition::create (const std::string &topic, int partition, - int64_t offset) { +RdKafka::TopicPartition *RdKafka::TopicPartition::create( + const std::string &topic, + int partition, + int64_t offset) { return new TopicPartitionImpl(topic, partition, offset); } -void -RdKafka::TopicPartition::destroy (std::vector &partitions) { - for (std::vector::iterator it = partitions.begin() ; +void RdKafka::TopicPartition::destroy( + std::vector &partitions) { + for (std::vector::iterator it = partitions.begin(); it != partitions.end(); ++it) - delete(*it); + delete (*it); partitions.clear(); } diff --git a/src-cpp/rdkafkacpp.h b/src-cpp/rdkafkacpp.h index fbd77a06f7..6d7d136302 100644 --- a/src-cpp/rdkafkacpp.h +++ b/src-cpp/rdkafkacpp.h @@ -82,12 +82,12 @@ typedef SSIZE_T ssize_t; /**@endcond*/ extern "C" { - /* Forward declarations */ - struct rd_kafka_s; - struct rd_kafka_topic_s; - struct rd_kafka_message_s; - struct rd_kafka_conf_s; - struct rd_kafka_topic_conf_s; +/* Forward declarations */ +struct rd_kafka_s; +struct rd_kafka_topic_s; +struct rd_kafka_message_s; +struct rd_kafka_conf_s; +struct rd_kafka_topic_conf_s; } namespace RdKafka { @@ -111,7 +111,7 @@ namespace RdKafka { * @remark This value should only be used during compile time, * for runtime checks of version use RdKafka::version() */ -#define RD_KAFKA_VERSION 0x010802ff +#define RD_KAFKA_VERSION 0x010802ff /** * @brief Returns the librdkafka version as integer. @@ -119,13 +119,13 @@ namespace RdKafka { * @sa See RD_KAFKA_VERSION for how to parse the integer format. */ RD_EXPORT -int version (); +int version(); /** * @brief Returns the librdkafka version as string. */ RD_EXPORT -std::string version_str(); +std::string version_str(); /** * @brief Returns a CSV list of the supported debug contexts @@ -144,7 +144,7 @@ std::string get_debug_contexts(); * a clean shutdown is required. */ RD_EXPORT -int wait_destroyed(int timeout_ms); +int wait_destroyed(int timeout_ms); /** * @brief Allocate memory using the same allocator librdkafka uses. @@ -157,7 +157,7 @@ int wait_destroyed(int timeout_ms); * mem_free(). */ RD_EXPORT -void *mem_malloc (size_t size); +void *mem_malloc(size_t size); /** * @brief Free pointer returned by librdkafka @@ -173,7 +173,7 @@ void *mem_malloc (size_t size); * that explicitly mention using this function for freeing. */ RD_EXPORT -void mem_free (void *ptr); +void mem_free(void *ptr); /**@}*/ @@ -198,350 +198,350 @@ void mem_free (void *ptr); * @sa Use RdKafka::err2str() to translate an error code a human readable string */ enum ErrorCode { - /* Internal errors to rdkafka: */ - /** Begin internal error codes */ - ERR__BEGIN = -200, - /** Received message is incorrect */ - ERR__BAD_MSG = -199, - /** Bad/unknown compression */ - ERR__BAD_COMPRESSION = -198, - /** Broker is going away */ - ERR__DESTROY = -197, - /** Generic failure */ - ERR__FAIL = -196, - /** Broker transport failure */ - ERR__TRANSPORT = -195, - /** Critical system resource */ - ERR__CRIT_SYS_RESOURCE = -194, - /** Failed to resolve broker */ - ERR__RESOLVE = -193, - /** Produced message timed out*/ - ERR__MSG_TIMED_OUT = -192, - /** Reached the end of the topic+partition queue on - * the broker. Not really an error. - * This event is disabled by default, - * see the `enable.partition.eof` configuration property. */ - ERR__PARTITION_EOF = -191, - /** Permanent: Partition does not exist in cluster. */ - ERR__UNKNOWN_PARTITION = -190, - /** File or filesystem error */ - ERR__FS = -189, - /** Permanent: Topic does not exist in cluster. */ - ERR__UNKNOWN_TOPIC = -188, - /** All broker connections are down. */ - ERR__ALL_BROKERS_DOWN = -187, - /** Invalid argument, or invalid configuration */ - ERR__INVALID_ARG = -186, - /** Operation timed out */ - ERR__TIMED_OUT = -185, - /** Queue is full */ - ERR__QUEUE_FULL = -184, - /** ISR count < required.acks */ - ERR__ISR_INSUFF = -183, - /** Broker node update */ - ERR__NODE_UPDATE = -182, - /** SSL error */ - ERR__SSL = -181, - /** Waiting for coordinator to become available. */ - ERR__WAIT_COORD = -180, - /** Unknown client group */ - ERR__UNKNOWN_GROUP = -179, - /** Operation in progress */ - ERR__IN_PROGRESS = -178, - /** Previous operation in progress, wait for it to finish. */ - ERR__PREV_IN_PROGRESS = -177, - /** This operation would interfere with an existing subscription */ - ERR__EXISTING_SUBSCRIPTION = -176, - /** Assigned partitions (rebalance_cb) */ - ERR__ASSIGN_PARTITIONS = -175, - /** Revoked partitions (rebalance_cb) */ - ERR__REVOKE_PARTITIONS = -174, - /** Conflicting use */ - ERR__CONFLICT = -173, - /** Wrong state */ - ERR__STATE = -172, - /** Unknown protocol */ - ERR__UNKNOWN_PROTOCOL = -171, - /** Not implemented */ - ERR__NOT_IMPLEMENTED = -170, - /** Authentication failure*/ - ERR__AUTHENTICATION = -169, - /** No stored offset */ - ERR__NO_OFFSET = -168, - /** Outdated */ - ERR__OUTDATED = -167, - /** Timed out in queue */ - ERR__TIMED_OUT_QUEUE = -166, - /** Feature not supported by broker */ - ERR__UNSUPPORTED_FEATURE = -165, - /** Awaiting cache update */ - ERR__WAIT_CACHE = -164, - /** Operation interrupted */ - ERR__INTR = -163, - /** Key serialization error */ - ERR__KEY_SERIALIZATION = -162, - /** Value serialization error */ - ERR__VALUE_SERIALIZATION = -161, - /** Key deserialization error */ - ERR__KEY_DESERIALIZATION = -160, - /** Value deserialization error */ - ERR__VALUE_DESERIALIZATION = -159, - /** Partial response */ - ERR__PARTIAL = -158, - /** Modification attempted on read-only object */ - ERR__READ_ONLY = -157, - /** No such entry / item not found */ - ERR__NOENT = -156, - /** Read underflow */ - ERR__UNDERFLOW = -155, - /** Invalid type */ - ERR__INVALID_TYPE = -154, - /** Retry operation */ - ERR__RETRY = -153, - /** Purged in queue */ - ERR__PURGE_QUEUE = -152, - /** Purged in flight */ - ERR__PURGE_INFLIGHT = -151, - /** Fatal error: see RdKafka::Handle::fatal_error() */ - ERR__FATAL = -150, - /** Inconsistent state */ - ERR__INCONSISTENT = -149, - /** Gap-less ordering would not be guaranteed if proceeding */ - ERR__GAPLESS_GUARANTEE = -148, - /** Maximum poll interval exceeded */ - ERR__MAX_POLL_EXCEEDED = -147, - /** Unknown broker */ - ERR__UNKNOWN_BROKER = -146, - /** Functionality not configured */ - ERR__NOT_CONFIGURED = -145, - /** Instance has been fenced */ - ERR__FENCED = -144, - /** Application generated error */ - ERR__APPLICATION = -143, - /** Assignment lost */ - ERR__ASSIGNMENT_LOST = -142, - /** No operation performed */ - ERR__NOOP = -141, - /** No offset to automatically reset to */ - ERR__AUTO_OFFSET_RESET = -140, - - /** End internal error codes */ - ERR__END = -100, - - /* Kafka broker errors: */ - /** Unknown broker error */ - ERR_UNKNOWN = -1, - /** Success */ - ERR_NO_ERROR = 0, - /** Offset out of range */ - ERR_OFFSET_OUT_OF_RANGE = 1, - /** Invalid message */ - ERR_INVALID_MSG = 2, - /** Unknown topic or partition */ - ERR_UNKNOWN_TOPIC_OR_PART = 3, - /** Invalid message size */ - ERR_INVALID_MSG_SIZE = 4, - /** Leader not available */ - ERR_LEADER_NOT_AVAILABLE = 5, - /** Not leader for partition */ - ERR_NOT_LEADER_FOR_PARTITION = 6, - /** Request timed out */ - ERR_REQUEST_TIMED_OUT = 7, - /** Broker not available */ - ERR_BROKER_NOT_AVAILABLE = 8, - /** Replica not available */ - ERR_REPLICA_NOT_AVAILABLE = 9, - /** Message size too large */ - ERR_MSG_SIZE_TOO_LARGE = 10, - /** StaleControllerEpochCode */ - ERR_STALE_CTRL_EPOCH = 11, - /** Offset metadata string too large */ - ERR_OFFSET_METADATA_TOO_LARGE = 12, - /** Broker disconnected before response received */ - ERR_NETWORK_EXCEPTION = 13, - /** Coordinator load in progress */ - ERR_COORDINATOR_LOAD_IN_PROGRESS = 14, - /** Group coordinator load in progress */ -#define ERR_GROUP_LOAD_IN_PROGRESS ERR_COORDINATOR_LOAD_IN_PROGRESS - /** Coordinator not available */ - ERR_COORDINATOR_NOT_AVAILABLE = 15, - /** Group coordinator not available */ -#define ERR_GROUP_COORDINATOR_NOT_AVAILABLE ERR_COORDINATOR_NOT_AVAILABLE - /** Not coordinator */ - ERR_NOT_COORDINATOR = 16, - /** Not coordinator for group */ -#define ERR_NOT_COORDINATOR_FOR_GROUP ERR_NOT_COORDINATOR - /** Invalid topic */ - ERR_TOPIC_EXCEPTION = 17, - /** Message batch larger than configured server segment size */ - ERR_RECORD_LIST_TOO_LARGE = 18, - /** Not enough in-sync replicas */ - ERR_NOT_ENOUGH_REPLICAS = 19, - /** Message(s) written to insufficient number of in-sync replicas */ - ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20, - /** Invalid required acks value */ - ERR_INVALID_REQUIRED_ACKS = 21, - /** Specified group generation id is not valid */ - ERR_ILLEGAL_GENERATION = 22, - /** Inconsistent group protocol */ - ERR_INCONSISTENT_GROUP_PROTOCOL = 23, - /** Invalid group.id */ - ERR_INVALID_GROUP_ID = 24, - /** Unknown member */ - ERR_UNKNOWN_MEMBER_ID = 25, - /** Invalid session timeout */ - ERR_INVALID_SESSION_TIMEOUT = 26, - /** Group rebalance in progress */ - ERR_REBALANCE_IN_PROGRESS = 27, - /** Commit offset data size is not valid */ - ERR_INVALID_COMMIT_OFFSET_SIZE = 28, - /** Topic authorization failed */ - ERR_TOPIC_AUTHORIZATION_FAILED = 29, - /** Group authorization failed */ - ERR_GROUP_AUTHORIZATION_FAILED = 30, - /** Cluster authorization failed */ - ERR_CLUSTER_AUTHORIZATION_FAILED = 31, - /** Invalid timestamp */ - ERR_INVALID_TIMESTAMP = 32, - /** Unsupported SASL mechanism */ - ERR_UNSUPPORTED_SASL_MECHANISM = 33, - /** Illegal SASL state */ - ERR_ILLEGAL_SASL_STATE = 34, - /** Unuspported version */ - ERR_UNSUPPORTED_VERSION = 35, - /** Topic already exists */ - ERR_TOPIC_ALREADY_EXISTS = 36, - /** Invalid number of partitions */ - ERR_INVALID_PARTITIONS = 37, - /** Invalid replication factor */ - ERR_INVALID_REPLICATION_FACTOR = 38, - /** Invalid replica assignment */ - ERR_INVALID_REPLICA_ASSIGNMENT = 39, - /** Invalid config */ - ERR_INVALID_CONFIG = 40, - /** Not controller for cluster */ - ERR_NOT_CONTROLLER = 41, - /** Invalid request */ - ERR_INVALID_REQUEST = 42, - /** Message format on broker does not support request */ - ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, - /** Policy violation */ - ERR_POLICY_VIOLATION = 44, - /** Broker received an out of order sequence number */ - ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45, - /** Broker received a duplicate sequence number */ - ERR_DUPLICATE_SEQUENCE_NUMBER = 46, - /** Producer attempted an operation with an old epoch */ - ERR_INVALID_PRODUCER_EPOCH = 47, - /** Producer attempted a transactional operation in an invalid state */ - ERR_INVALID_TXN_STATE = 48, - /** Producer attempted to use a producer id which is not - * currently assigned to its transactional id */ - ERR_INVALID_PRODUCER_ID_MAPPING = 49, - /** Transaction timeout is larger than the maximum - * value allowed by the broker's max.transaction.timeout.ms */ - ERR_INVALID_TRANSACTION_TIMEOUT = 50, - /** Producer attempted to update a transaction while another - * concurrent operation on the same transaction was ongoing */ - ERR_CONCURRENT_TRANSACTIONS = 51, - /** Indicates that the transaction coordinator sending a - * WriteTxnMarker is no longer the current coordinator for a - * given producer */ - ERR_TRANSACTION_COORDINATOR_FENCED = 52, - /** Transactional Id authorization failed */ - ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53, - /** Security features are disabled */ - ERR_SECURITY_DISABLED = 54, - /** Operation not attempted */ - ERR_OPERATION_NOT_ATTEMPTED = 55, - /** Disk error when trying to access log file on the disk */ - ERR_KAFKA_STORAGE_ERROR = 56, - /** The user-specified log directory is not found in the broker config */ - ERR_LOG_DIR_NOT_FOUND = 57, - /** SASL Authentication failed */ - ERR_SASL_AUTHENTICATION_FAILED = 58, - /** Unknown Producer Id */ - ERR_UNKNOWN_PRODUCER_ID = 59, - /** Partition reassignment is in progress */ - ERR_REASSIGNMENT_IN_PROGRESS = 60, - /** Delegation Token feature is not enabled */ - ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61, - /** Delegation Token is not found on server */ - ERR_DELEGATION_TOKEN_NOT_FOUND = 62, - /** Specified Principal is not valid Owner/Renewer */ - ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63, - /** Delegation Token requests are not allowed on this connection */ - ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64, - /** Delegation Token authorization failed */ - ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65, - /** Delegation Token is expired */ - ERR_DELEGATION_TOKEN_EXPIRED = 66, - /** Supplied principalType is not supported */ - ERR_INVALID_PRINCIPAL_TYPE = 67, - /** The group is not empty */ - ERR_NON_EMPTY_GROUP = 68, - /** The group id does not exist */ - ERR_GROUP_ID_NOT_FOUND = 69, - /** The fetch session ID was not found */ - ERR_FETCH_SESSION_ID_NOT_FOUND = 70, - /** The fetch session epoch is invalid */ - ERR_INVALID_FETCH_SESSION_EPOCH = 71, - /** No matching listener */ - ERR_LISTENER_NOT_FOUND = 72, - /** Topic deletion is disabled */ - ERR_TOPIC_DELETION_DISABLED = 73, - /** Leader epoch is older than broker epoch */ - ERR_FENCED_LEADER_EPOCH = 74, - /** Leader epoch is newer than broker epoch */ - ERR_UNKNOWN_LEADER_EPOCH = 75, - /** Unsupported compression type */ - ERR_UNSUPPORTED_COMPRESSION_TYPE = 76, - /** Broker epoch has changed */ - ERR_STALE_BROKER_EPOCH = 77, - /** Leader high watermark is not caught up */ - ERR_OFFSET_NOT_AVAILABLE = 78, - /** Group member needs a valid member ID */ - ERR_MEMBER_ID_REQUIRED = 79, - /** Preferred leader was not available */ - ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80, - /** Consumer group has reached maximum size */ - ERR_GROUP_MAX_SIZE_REACHED = 81, - /** Static consumer fenced by other consumer with same - * group.instance.id. */ - ERR_FENCED_INSTANCE_ID = 82, - /** Eligible partition leaders are not available */ - ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83, - /** Leader election not needed for topic partition */ - ERR_ELECTION_NOT_NEEDED = 84, - /** No partition reassignment is in progress */ - ERR_NO_REASSIGNMENT_IN_PROGRESS = 85, - /** Deleting offsets of a topic while the consumer group is - * subscribed to it */ - ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86, - /** Broker failed to validate record */ - ERR_INVALID_RECORD = 87, - /** There are unstable offsets that need to be cleared */ - ERR_UNSTABLE_OFFSET_COMMIT = 88, - /** Throttling quota has been exceeded */ - ERR_THROTTLING_QUOTA_EXCEEDED = 89, - /** There is a newer producer with the same transactionalId - * which fences the current one */ - ERR_PRODUCER_FENCED = 90, - /** Request illegally referred to resource that does not exist */ - ERR_RESOURCE_NOT_FOUND = 91, - /** Request illegally referred to the same resource twice */ - ERR_DUPLICATE_RESOURCE = 92, - /** Requested credential would not meet criteria for acceptability */ - ERR_UNACCEPTABLE_CREDENTIAL = 93, - /** Indicates that the either the sender or recipient of a - * voter-only request is not one of the expected voters */ - ERR_INCONSISTENT_VOTER_SET = 94, - /** Invalid update version */ - ERR_INVALID_UPDATE_VERSION = 95, - /** Unable to update finalized features due to server error */ - ERR_FEATURE_UPDATE_FAILED = 96, - /** Request principal deserialization failed during forwarding */ - ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97 + /* Internal errors to rdkafka: */ + /** Begin internal error codes */ + ERR__BEGIN = -200, + /** Received message is incorrect */ + ERR__BAD_MSG = -199, + /** Bad/unknown compression */ + ERR__BAD_COMPRESSION = -198, + /** Broker is going away */ + ERR__DESTROY = -197, + /** Generic failure */ + ERR__FAIL = -196, + /** Broker transport failure */ + ERR__TRANSPORT = -195, + /** Critical system resource */ + ERR__CRIT_SYS_RESOURCE = -194, + /** Failed to resolve broker */ + ERR__RESOLVE = -193, + /** Produced message timed out*/ + ERR__MSG_TIMED_OUT = -192, + /** Reached the end of the topic+partition queue on + * the broker. Not really an error. + * This event is disabled by default, + * see the `enable.partition.eof` configuration property. */ + ERR__PARTITION_EOF = -191, + /** Permanent: Partition does not exist in cluster. */ + ERR__UNKNOWN_PARTITION = -190, + /** File or filesystem error */ + ERR__FS = -189, + /** Permanent: Topic does not exist in cluster. */ + ERR__UNKNOWN_TOPIC = -188, + /** All broker connections are down. */ + ERR__ALL_BROKERS_DOWN = -187, + /** Invalid argument, or invalid configuration */ + ERR__INVALID_ARG = -186, + /** Operation timed out */ + ERR__TIMED_OUT = -185, + /** Queue is full */ + ERR__QUEUE_FULL = -184, + /** ISR count < required.acks */ + ERR__ISR_INSUFF = -183, + /** Broker node update */ + ERR__NODE_UPDATE = -182, + /** SSL error */ + ERR__SSL = -181, + /** Waiting for coordinator to become available. */ + ERR__WAIT_COORD = -180, + /** Unknown client group */ + ERR__UNKNOWN_GROUP = -179, + /** Operation in progress */ + ERR__IN_PROGRESS = -178, + /** Previous operation in progress, wait for it to finish. */ + ERR__PREV_IN_PROGRESS = -177, + /** This operation would interfere with an existing subscription */ + ERR__EXISTING_SUBSCRIPTION = -176, + /** Assigned partitions (rebalance_cb) */ + ERR__ASSIGN_PARTITIONS = -175, + /** Revoked partitions (rebalance_cb) */ + ERR__REVOKE_PARTITIONS = -174, + /** Conflicting use */ + ERR__CONFLICT = -173, + /** Wrong state */ + ERR__STATE = -172, + /** Unknown protocol */ + ERR__UNKNOWN_PROTOCOL = -171, + /** Not implemented */ + ERR__NOT_IMPLEMENTED = -170, + /** Authentication failure*/ + ERR__AUTHENTICATION = -169, + /** No stored offset */ + ERR__NO_OFFSET = -168, + /** Outdated */ + ERR__OUTDATED = -167, + /** Timed out in queue */ + ERR__TIMED_OUT_QUEUE = -166, + /** Feature not supported by broker */ + ERR__UNSUPPORTED_FEATURE = -165, + /** Awaiting cache update */ + ERR__WAIT_CACHE = -164, + /** Operation interrupted */ + ERR__INTR = -163, + /** Key serialization error */ + ERR__KEY_SERIALIZATION = -162, + /** Value serialization error */ + ERR__VALUE_SERIALIZATION = -161, + /** Key deserialization error */ + ERR__KEY_DESERIALIZATION = -160, + /** Value deserialization error */ + ERR__VALUE_DESERIALIZATION = -159, + /** Partial response */ + ERR__PARTIAL = -158, + /** Modification attempted on read-only object */ + ERR__READ_ONLY = -157, + /** No such entry / item not found */ + ERR__NOENT = -156, + /** Read underflow */ + ERR__UNDERFLOW = -155, + /** Invalid type */ + ERR__INVALID_TYPE = -154, + /** Retry operation */ + ERR__RETRY = -153, + /** Purged in queue */ + ERR__PURGE_QUEUE = -152, + /** Purged in flight */ + ERR__PURGE_INFLIGHT = -151, + /** Fatal error: see RdKafka::Handle::fatal_error() */ + ERR__FATAL = -150, + /** Inconsistent state */ + ERR__INCONSISTENT = -149, + /** Gap-less ordering would not be guaranteed if proceeding */ + ERR__GAPLESS_GUARANTEE = -148, + /** Maximum poll interval exceeded */ + ERR__MAX_POLL_EXCEEDED = -147, + /** Unknown broker */ + ERR__UNKNOWN_BROKER = -146, + /** Functionality not configured */ + ERR__NOT_CONFIGURED = -145, + /** Instance has been fenced */ + ERR__FENCED = -144, + /** Application generated error */ + ERR__APPLICATION = -143, + /** Assignment lost */ + ERR__ASSIGNMENT_LOST = -142, + /** No operation performed */ + ERR__NOOP = -141, + /** No offset to automatically reset to */ + ERR__AUTO_OFFSET_RESET = -140, + + /** End internal error codes */ + ERR__END = -100, + + /* Kafka broker errors: */ + /** Unknown broker error */ + ERR_UNKNOWN = -1, + /** Success */ + ERR_NO_ERROR = 0, + /** Offset out of range */ + ERR_OFFSET_OUT_OF_RANGE = 1, + /** Invalid message */ + ERR_INVALID_MSG = 2, + /** Unknown topic or partition */ + ERR_UNKNOWN_TOPIC_OR_PART = 3, + /** Invalid message size */ + ERR_INVALID_MSG_SIZE = 4, + /** Leader not available */ + ERR_LEADER_NOT_AVAILABLE = 5, + /** Not leader for partition */ + ERR_NOT_LEADER_FOR_PARTITION = 6, + /** Request timed out */ + ERR_REQUEST_TIMED_OUT = 7, + /** Broker not available */ + ERR_BROKER_NOT_AVAILABLE = 8, + /** Replica not available */ + ERR_REPLICA_NOT_AVAILABLE = 9, + /** Message size too large */ + ERR_MSG_SIZE_TOO_LARGE = 10, + /** StaleControllerEpochCode */ + ERR_STALE_CTRL_EPOCH = 11, + /** Offset metadata string too large */ + ERR_OFFSET_METADATA_TOO_LARGE = 12, + /** Broker disconnected before response received */ + ERR_NETWORK_EXCEPTION = 13, + /** Coordinator load in progress */ + ERR_COORDINATOR_LOAD_IN_PROGRESS = 14, +/** Group coordinator load in progress */ +#define ERR_GROUP_LOAD_IN_PROGRESS ERR_COORDINATOR_LOAD_IN_PROGRESS + /** Coordinator not available */ + ERR_COORDINATOR_NOT_AVAILABLE = 15, +/** Group coordinator not available */ +#define ERR_GROUP_COORDINATOR_NOT_AVAILABLE ERR_COORDINATOR_NOT_AVAILABLE + /** Not coordinator */ + ERR_NOT_COORDINATOR = 16, +/** Not coordinator for group */ +#define ERR_NOT_COORDINATOR_FOR_GROUP ERR_NOT_COORDINATOR + /** Invalid topic */ + ERR_TOPIC_EXCEPTION = 17, + /** Message batch larger than configured server segment size */ + ERR_RECORD_LIST_TOO_LARGE = 18, + /** Not enough in-sync replicas */ + ERR_NOT_ENOUGH_REPLICAS = 19, + /** Message(s) written to insufficient number of in-sync replicas */ + ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20, + /** Invalid required acks value */ + ERR_INVALID_REQUIRED_ACKS = 21, + /** Specified group generation id is not valid */ + ERR_ILLEGAL_GENERATION = 22, + /** Inconsistent group protocol */ + ERR_INCONSISTENT_GROUP_PROTOCOL = 23, + /** Invalid group.id */ + ERR_INVALID_GROUP_ID = 24, + /** Unknown member */ + ERR_UNKNOWN_MEMBER_ID = 25, + /** Invalid session timeout */ + ERR_INVALID_SESSION_TIMEOUT = 26, + /** Group rebalance in progress */ + ERR_REBALANCE_IN_PROGRESS = 27, + /** Commit offset data size is not valid */ + ERR_INVALID_COMMIT_OFFSET_SIZE = 28, + /** Topic authorization failed */ + ERR_TOPIC_AUTHORIZATION_FAILED = 29, + /** Group authorization failed */ + ERR_GROUP_AUTHORIZATION_FAILED = 30, + /** Cluster authorization failed */ + ERR_CLUSTER_AUTHORIZATION_FAILED = 31, + /** Invalid timestamp */ + ERR_INVALID_TIMESTAMP = 32, + /** Unsupported SASL mechanism */ + ERR_UNSUPPORTED_SASL_MECHANISM = 33, + /** Illegal SASL state */ + ERR_ILLEGAL_SASL_STATE = 34, + /** Unuspported version */ + ERR_UNSUPPORTED_VERSION = 35, + /** Topic already exists */ + ERR_TOPIC_ALREADY_EXISTS = 36, + /** Invalid number of partitions */ + ERR_INVALID_PARTITIONS = 37, + /** Invalid replication factor */ + ERR_INVALID_REPLICATION_FACTOR = 38, + /** Invalid replica assignment */ + ERR_INVALID_REPLICA_ASSIGNMENT = 39, + /** Invalid config */ + ERR_INVALID_CONFIG = 40, + /** Not controller for cluster */ + ERR_NOT_CONTROLLER = 41, + /** Invalid request */ + ERR_INVALID_REQUEST = 42, + /** Message format on broker does not support request */ + ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, + /** Policy violation */ + ERR_POLICY_VIOLATION = 44, + /** Broker received an out of order sequence number */ + ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45, + /** Broker received a duplicate sequence number */ + ERR_DUPLICATE_SEQUENCE_NUMBER = 46, + /** Producer attempted an operation with an old epoch */ + ERR_INVALID_PRODUCER_EPOCH = 47, + /** Producer attempted a transactional operation in an invalid state */ + ERR_INVALID_TXN_STATE = 48, + /** Producer attempted to use a producer id which is not + * currently assigned to its transactional id */ + ERR_INVALID_PRODUCER_ID_MAPPING = 49, + /** Transaction timeout is larger than the maximum + * value allowed by the broker's max.transaction.timeout.ms */ + ERR_INVALID_TRANSACTION_TIMEOUT = 50, + /** Producer attempted to update a transaction while another + * concurrent operation on the same transaction was ongoing */ + ERR_CONCURRENT_TRANSACTIONS = 51, + /** Indicates that the transaction coordinator sending a + * WriteTxnMarker is no longer the current coordinator for a + * given producer */ + ERR_TRANSACTION_COORDINATOR_FENCED = 52, + /** Transactional Id authorization failed */ + ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53, + /** Security features are disabled */ + ERR_SECURITY_DISABLED = 54, + /** Operation not attempted */ + ERR_OPERATION_NOT_ATTEMPTED = 55, + /** Disk error when trying to access log file on the disk */ + ERR_KAFKA_STORAGE_ERROR = 56, + /** The user-specified log directory is not found in the broker config */ + ERR_LOG_DIR_NOT_FOUND = 57, + /** SASL Authentication failed */ + ERR_SASL_AUTHENTICATION_FAILED = 58, + /** Unknown Producer Id */ + ERR_UNKNOWN_PRODUCER_ID = 59, + /** Partition reassignment is in progress */ + ERR_REASSIGNMENT_IN_PROGRESS = 60, + /** Delegation Token feature is not enabled */ + ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61, + /** Delegation Token is not found on server */ + ERR_DELEGATION_TOKEN_NOT_FOUND = 62, + /** Specified Principal is not valid Owner/Renewer */ + ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63, + /** Delegation Token requests are not allowed on this connection */ + ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64, + /** Delegation Token authorization failed */ + ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65, + /** Delegation Token is expired */ + ERR_DELEGATION_TOKEN_EXPIRED = 66, + /** Supplied principalType is not supported */ + ERR_INVALID_PRINCIPAL_TYPE = 67, + /** The group is not empty */ + ERR_NON_EMPTY_GROUP = 68, + /** The group id does not exist */ + ERR_GROUP_ID_NOT_FOUND = 69, + /** The fetch session ID was not found */ + ERR_FETCH_SESSION_ID_NOT_FOUND = 70, + /** The fetch session epoch is invalid */ + ERR_INVALID_FETCH_SESSION_EPOCH = 71, + /** No matching listener */ + ERR_LISTENER_NOT_FOUND = 72, + /** Topic deletion is disabled */ + ERR_TOPIC_DELETION_DISABLED = 73, + /** Leader epoch is older than broker epoch */ + ERR_FENCED_LEADER_EPOCH = 74, + /** Leader epoch is newer than broker epoch */ + ERR_UNKNOWN_LEADER_EPOCH = 75, + /** Unsupported compression type */ + ERR_UNSUPPORTED_COMPRESSION_TYPE = 76, + /** Broker epoch has changed */ + ERR_STALE_BROKER_EPOCH = 77, + /** Leader high watermark is not caught up */ + ERR_OFFSET_NOT_AVAILABLE = 78, + /** Group member needs a valid member ID */ + ERR_MEMBER_ID_REQUIRED = 79, + /** Preferred leader was not available */ + ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80, + /** Consumer group has reached maximum size */ + ERR_GROUP_MAX_SIZE_REACHED = 81, + /** Static consumer fenced by other consumer with same + * group.instance.id. */ + ERR_FENCED_INSTANCE_ID = 82, + /** Eligible partition leaders are not available */ + ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83, + /** Leader election not needed for topic partition */ + ERR_ELECTION_NOT_NEEDED = 84, + /** No partition reassignment is in progress */ + ERR_NO_REASSIGNMENT_IN_PROGRESS = 85, + /** Deleting offsets of a topic while the consumer group is + * subscribed to it */ + ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86, + /** Broker failed to validate record */ + ERR_INVALID_RECORD = 87, + /** There are unstable offsets that need to be cleared */ + ERR_UNSTABLE_OFFSET_COMMIT = 88, + /** Throttling quota has been exceeded */ + ERR_THROTTLING_QUOTA_EXCEEDED = 89, + /** There is a newer producer with the same transactionalId + * which fences the current one */ + ERR_PRODUCER_FENCED = 90, + /** Request illegally referred to resource that does not exist */ + ERR_RESOURCE_NOT_FOUND = 91, + /** Request illegally referred to the same resource twice */ + ERR_DUPLICATE_RESOURCE = 92, + /** Requested credential would not meet criteria for acceptability */ + ERR_UNACCEPTABLE_CREDENTIAL = 93, + /** Indicates that the either the sender or recipient of a + * voter-only request is not one of the expected voters */ + ERR_INCONSISTENT_VOTER_SET = 94, + /** Invalid update version */ + ERR_INVALID_UPDATE_VERSION = 95, + /** Unable to update finalized features due to server error */ + ERR_FEATURE_UPDATE_FAILED = 96, + /** Request principal deserialization failed during forwarding */ + ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97 }; @@ -549,7 +549,7 @@ enum ErrorCode { * @brief Returns a human readable representation of a kafka error. */ RD_EXPORT -std::string err2str(RdKafka::ErrorCode err); +std::string err2str(RdKafka::ErrorCode err); @@ -558,9 +558,9 @@ std::string err2str(RdKafka::ErrorCode err); * @brief SSL certificate types */ enum CertificateType { - CERT_PUBLIC_KEY, /**< Client's public key */ - CERT_PRIVATE_KEY, /**< Client's private key */ - CERT_CA, /**< CA certificate */ + CERT_PUBLIC_KEY, /**< Client's public key */ + CERT_PRIVATE_KEY, /**< Client's private key */ + CERT_CA, /**< CA certificate */ CERT__CNT }; @@ -569,9 +569,9 @@ enum CertificateType { * @brief SSL certificate encoding */ enum CertificateEncoding { - CERT_ENC_PKCS12, /**< PKCS#12 */ - CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ - CERT_ENC_PEM, /**< PEM */ + CERT_ENC_PKCS12, /**< PKCS#12 */ + CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ + CERT_ENC_PEM, /**< PEM */ CERT_ENC__CNT }; @@ -612,56 +612,56 @@ class KafkaConsumer; */ class RD_EXPORT Error { public: + /** + * @brief Create error object. + */ + static Error *create(ErrorCode code, const std::string *errstr); - /** - * @brief Create error object. - */ - static Error *create (ErrorCode code, const std::string *errstr); - - virtual ~Error () { } + virtual ~Error() { + } - /* - * Error accessor methods - */ + /* + * Error accessor methods + */ - /** - * @returns the error code, e.g., RdKafka::ERR_UNKNOWN_MEMBER_ID. - */ - virtual ErrorCode code () const = 0; + /** + * @returns the error code, e.g., RdKafka::ERR_UNKNOWN_MEMBER_ID. + */ + virtual ErrorCode code() const = 0; - /** - * @returns the error code name, e.g, "ERR_UNKNOWN_MEMBER_ID". - */ - virtual std::string name () const = 0; + /** + * @returns the error code name, e.g, "ERR_UNKNOWN_MEMBER_ID". + */ + virtual std::string name() const = 0; /** * @returns a human readable error string. */ - virtual std::string str () const = 0; - - /** - * @returns true if the error is a fatal error, indicating that the client - * instance is no longer usable, else false. - */ - virtual bool is_fatal () const = 0; - - /** - * @returns true if the operation may be retried, else false. - */ - virtual bool is_retriable () const = 0; - - /** - * @returns true if the error is an abortable transaction error in which case - * the application must call RdKafka::Producer::abort_transaction() - * and start a new transaction with - * RdKafka::Producer::begin_transaction() if it wishes to proceed - * with transactions. - * Else returns false. - * - * @remark The return value of this method is only valid for errors returned - * by the transactional API. - */ - virtual bool txn_requires_abort () const = 0; + virtual std::string str() const = 0; + + /** + * @returns true if the error is a fatal error, indicating that the client + * instance is no longer usable, else false. + */ + virtual bool is_fatal() const = 0; + + /** + * @returns true if the operation may be retried, else false. + */ + virtual bool is_retriable() const = 0; + + /** + * @returns true if the error is an abortable transaction error in which case + * the application must call RdKafka::Producer::abort_transaction() + * and start a new transaction with + * RdKafka::Producer::begin_transaction() if it wishes to proceed + * with transactions. + * Else returns false. + * + * @remark The return value of this method is only valid for errors returned + * by the transactional API. + */ + virtual bool txn_requires_abort() const = 0; }; /**@}*/ @@ -700,9 +700,10 @@ class RD_EXPORT DeliveryReportCb { /** * @brief Delivery report callback. */ - virtual void dr_cb (Message &message) = 0; + virtual void dr_cb(Message &message) = 0; - virtual ~DeliveryReportCb() { } + virtual ~DeliveryReportCb() { + } }; @@ -742,10 +743,12 @@ class RD_EXPORT OAuthBearerTokenRefreshCb { * @param oauthbearer_config The value of the * \p sasl.oauthbearer.config configuration property for \p handle. */ - virtual void oauthbearer_token_refresh_cb (RdKafka::Handle* handle, - const std::string &oauthbearer_config) = 0; + virtual void oauthbearer_token_refresh_cb( + RdKafka::Handle *handle, + const std::string &oauthbearer_config) = 0; - virtual ~OAuthBearerTokenRefreshCb() { } + virtual ~OAuthBearerTokenRefreshCb() { + } }; @@ -768,18 +771,20 @@ class RD_EXPORT PartitionerCb { * * @remark \p key may be NULL or the empty. * - * @returns Must return a value between 0 and \p partition_cnt (non-inclusive). - * May return RD_KAFKA_PARTITION_UA (-1) if partitioning failed. + * @returns Must return a value between 0 and \p partition_cnt + * (non-inclusive). May return RD_KAFKA_PARTITION_UA (-1) if partitioning + * failed. * * @sa The callback may use RdKafka::Topic::partition_available() to check * if a partition has an active leader broker. */ - virtual int32_t partitioner_cb (const Topic *topic, - const std::string *key, - int32_t partition_cnt, - void *msg_opaque) = 0; + virtual int32_t partitioner_cb(const Topic *topic, + const std::string *key, + int32_t partition_cnt, + void *msg_opaque) = 0; - virtual ~PartitionerCb() { } + virtual ~PartitionerCb() { + } }; /** @@ -796,13 +801,14 @@ class PartitionerKeyPointerCb { * * @sa See RdKafka::PartitionerCb::partitioner_cb() for exact semantics */ - virtual int32_t partitioner_cb (const Topic *topic, - const void *key, - size_t key_len, - int32_t partition_cnt, - void *msg_opaque) = 0; + virtual int32_t partitioner_cb(const Topic *topic, + const void *key, + size_t key_len, + int32_t partition_cnt, + void *msg_opaque) = 0; - virtual ~PartitionerKeyPointerCb() { } + virtual ~PartitionerKeyPointerCb() { + } }; @@ -822,9 +828,10 @@ class RD_EXPORT EventCb { * * @sa RdKafka::Event */ - virtual void event_cb (Event &event) = 0; + virtual void event_cb(Event &event) = 0; - virtual ~EventCb() { } + virtual ~EventCb() { + } }; @@ -835,25 +842,26 @@ class RD_EXPORT Event { public: /** @brief Event type */ enum Type { - EVENT_ERROR, /**< Event is an error condition */ - EVENT_STATS, /**< Event is a statistics JSON document */ - EVENT_LOG, /**< Event is a log message */ - EVENT_THROTTLE /**< Event is a throttle level signaling from the broker */ + EVENT_ERROR, /**< Event is an error condition */ + EVENT_STATS, /**< Event is a statistics JSON document */ + EVENT_LOG, /**< Event is a log message */ + EVENT_THROTTLE /**< Event is a throttle level signaling from the broker */ }; /** @brief EVENT_LOG severities (conforms to syslog(3) severities) */ enum Severity { - EVENT_SEVERITY_EMERG = 0, - EVENT_SEVERITY_ALERT = 1, + EVENT_SEVERITY_EMERG = 0, + EVENT_SEVERITY_ALERT = 1, EVENT_SEVERITY_CRITICAL = 2, - EVENT_SEVERITY_ERROR = 3, - EVENT_SEVERITY_WARNING = 4, - EVENT_SEVERITY_NOTICE = 5, - EVENT_SEVERITY_INFO = 6, - EVENT_SEVERITY_DEBUG = 7 + EVENT_SEVERITY_ERROR = 3, + EVENT_SEVERITY_WARNING = 4, + EVENT_SEVERITY_NOTICE = 5, + EVENT_SEVERITY_INFO = 6, + EVENT_SEVERITY_DEBUG = 7 }; - virtual ~Event () { } + virtual ~Event() { + } /* * Event Accessor methods @@ -863,25 +871,25 @@ class RD_EXPORT Event { * @returns The event type * @remark Applies to all event types */ - virtual Type type () const = 0; + virtual Type type() const = 0; /** * @returns Event error, if any. * @remark Applies to all event types except THROTTLE */ - virtual ErrorCode err () const = 0; + virtual ErrorCode err() const = 0; /** * @returns Log severity level. * @remark Applies to LOG event type. */ - virtual Severity severity () const = 0; + virtual Severity severity() const = 0; /** * @returns Log facility string. * @remark Applies to LOG event type. */ - virtual std::string fac () const = 0; + virtual std::string fac() const = 0; /** * @returns Log message string. @@ -891,25 +899,25 @@ class RD_EXPORT Event { * * @remark Applies to LOG event type. */ - virtual std::string str () const = 0; + virtual std::string str() const = 0; /** * @returns Throttle time in milliseconds. * @remark Applies to THROTTLE event type. */ - virtual int throttle_time () const = 0; + virtual int throttle_time() const = 0; /** * @returns Throttling broker's name. * @remark Applies to THROTTLE event type. */ - virtual std::string broker_name () const = 0; + virtual std::string broker_name() const = 0; /** * @returns Throttling broker's id. * @remark Applies to THROTTLE event type. */ - virtual int broker_id () const = 0; + virtual int broker_id() const = 0; /** @@ -917,7 +925,7 @@ class RD_EXPORT Event { * @remark Applies to ERROR event type. * @sa RdKafka::Handle::fatal_error() */ - virtual bool fatal () const = 0; + virtual bool fatal() const = 0; }; @@ -934,9 +942,10 @@ class RD_EXPORT ConsumeCb { * * The callback interface is optional but provides increased performance. */ - virtual void consume_cb (Message &message, void *opaque) = 0; + virtual void consume_cb(Message &message, void *opaque) = 0; - virtual ~ConsumeCb() { } + virtual ~ConsumeCb() { + } }; @@ -944,7 +953,7 @@ class RD_EXPORT ConsumeCb { * @brief \b KafkaConsumer: Rebalance callback class */ class RD_EXPORT RebalanceCb { -public: + public: /** * @brief Group rebalance callback for use with RdKafka::KafkaConsumer * @@ -1013,11 +1022,12 @@ class RD_EXPORT RebalanceCb { * @remark The above example lacks error handling for assign calls, see * the examples/ directory. */ - virtual void rebalance_cb (RdKafka::KafkaConsumer *consumer, + virtual void rebalance_cb(RdKafka::KafkaConsumer *consumer, RdKafka::ErrorCode err, - std::vector&partitions) = 0; + std::vector &partitions) = 0; - virtual ~RebalanceCb() { } + virtual ~RebalanceCb() { + } }; @@ -1025,7 +1035,7 @@ class RD_EXPORT RebalanceCb { * @brief Offset Commit callback class */ class RD_EXPORT OffsetCommitCb { -public: + public: /** * @brief Set offset commit callback for use with consumer groups * @@ -1042,9 +1052,10 @@ class RD_EXPORT OffsetCommitCb { * - \c err: Commit error */ virtual void offset_commit_cb(RdKafka::ErrorCode err, - std::vector&offsets) = 0; + std::vector &offsets) = 0; - virtual ~OffsetCommitCb() { } + virtual ~OffsetCommitCb() { + } }; @@ -1055,7 +1066,7 @@ class RD_EXPORT OffsetCommitCb { * @remark Class instance must outlive the RdKafka client instance. */ class RD_EXPORT SslCertificateVerifyCb { -public: + public: /** * @brief SSL broker certificate verification callback. * @@ -1092,14 +1103,16 @@ class RD_EXPORT SslCertificateVerifyCb { * @remark See in the OpenSSL source distribution * for a list of \p x509_error codes. */ - virtual bool ssl_cert_verify_cb (const std::string &broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - std::string &errstr) = 0; + virtual bool ssl_cert_verify_cb(const std::string &broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + std::string &errstr) = 0; - virtual ~SslCertificateVerifyCb() {} + virtual ~SslCertificateVerifyCb() { + } }; @@ -1122,9 +1135,10 @@ class RD_EXPORT SocketCb { * * @returns The socket file descriptor or -1 on error (\c errno must be set) */ - virtual int socket_cb (int domain, int type, int protocol) = 0; + virtual int socket_cb(int domain, int type, int protocol) = 0; - virtual ~SocketCb() { } + virtual ~SocketCb() { + } }; @@ -1145,9 +1159,10 @@ class RD_EXPORT OpenCb { * * @remark Not currently available on native Win32 */ - virtual int open_cb (const std::string &path, int flags, int mode) = 0; + virtual int open_cb(const std::string &path, int flags, int mode) = 0; - virtual ~OpenCb() { } + virtual ~OpenCb() { + } }; @@ -1155,7 +1170,6 @@ class RD_EXPORT OpenCb { - /** * @name Configuration interface * @{ @@ -1185,18 +1199,19 @@ class RD_EXPORT Conf { * @brief RdKafka::Conf::Set() result code */ enum ConfResult { - CONF_UNKNOWN = -2, /**< Unknown configuration property */ - CONF_INVALID = -1, /**< Invalid configuration value */ - CONF_OK = 0 /**< Configuration property was succesfully set */ + CONF_UNKNOWN = -2, /**< Unknown configuration property */ + CONF_INVALID = -1, /**< Invalid configuration value */ + CONF_OK = 0 /**< Configuration property was succesfully set */ }; /** * @brief Create configuration object */ - static Conf *create (ConfType type); + static Conf *create(ConfType type); - virtual ~Conf () { } + virtual ~Conf() { + } /** * @brief Set configuration property \p name to value \p value. @@ -1211,24 +1226,25 @@ class RD_EXPORT Conf { * @returns CONF_OK on success, else writes a human readable error * description to \p errstr on error. */ - virtual Conf::ConfResult set (const std::string &name, - const std::string &value, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + const std::string &value, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"dr_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - DeliveryReportCb *dr_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + DeliveryReportCb *dr_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"oauthbearer_token_refresh_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set( + const std::string &name, + OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"event_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - EventCb *event_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + EventCb *event_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"default_topic_conf\" * @@ -1237,42 +1253,44 @@ class RD_EXPORT Conf { * * @sa RdKafka::KafkaConsumer::subscribe() */ - virtual Conf::ConfResult set (const std::string &name, - const Conf *topic_conf, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + const Conf *topic_conf, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"partitioner_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - PartitionerCb *partitioner_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + PartitionerCb *partitioner_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"partitioner_key_pointer_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - PartitionerKeyPointerCb *partitioner_kp_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + PartitionerKeyPointerCb *partitioner_kp_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"socket_cb\" */ - virtual Conf::ConfResult set (const std::string &name, SocketCb *socket_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + SocketCb *socket_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"open_cb\" */ - virtual Conf::ConfResult set (const std::string &name, OpenCb *open_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + OpenCb *open_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"rebalance_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - RebalanceCb *rebalance_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + RebalanceCb *rebalance_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"offset_commit_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - OffsetCommitCb *offset_commit_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + OffsetCommitCb *offset_commit_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"ssl_cert_verify_cb\". * @returns CONF_OK on success or CONF_INVALID if SSL is * not supported in this build. - */ + */ virtual Conf::ConfResult set(const std::string &name, SslCertificateVerifyCb *ssl_cert_verify_cb, std::string &errstr) = 0; @@ -1309,15 +1327,17 @@ class RD_EXPORT Conf { * @remark CA certificate in PEM format may also be set with the * `ssl.ca.pem` configuration property. */ - virtual Conf::ConfResult set_ssl_cert (RdKafka::CertificateType cert_type, - RdKafka::CertificateEncoding cert_enc, - const void *buffer, size_t size, - std::string &errstr) = 0; + virtual Conf::ConfResult set_ssl_cert(RdKafka::CertificateType cert_type, + RdKafka::CertificateEncoding cert_enc, + const void *buffer, + size_t size, + std::string &errstr) = 0; /** @brief Query single configuration value * - * Do not use this method to get callbacks registered by the configuration file. - * Instead use the specific get() methods with the specific callback parameter in the signature. + * Do not use this method to get callbacks registered by the configuration + * file. Instead use the specific get() methods with the specific callback + * parameter in the signature. * * Fallthrough: * Topic-level configuration properties from the \c default_topic_conf @@ -1326,7 +1346,7 @@ class RD_EXPORT Conf { * @returns CONF_OK if the property was set previously set and * returns the value in \p value. */ virtual Conf::ConfResult get(const std::string &name, - std::string &value) const = 0; + std::string &value) const = 0; /** @brief Query single configuration value * @returns CONF_OK if the property was set previously set and @@ -1337,7 +1357,7 @@ class RD_EXPORT Conf { * @returns CONF_OK if the property was set previously set and * returns the value in \p oauthbearer_token_refresh_cb. */ virtual Conf::ConfResult get( - OAuthBearerTokenRefreshCb *&oauthbearer_token_refresh_cb) const = 0; + OAuthBearerTokenRefreshCb *&oauthbearer_token_refresh_cb) const = 0; /** @brief Query single configuration value * @returns CONF_OK if the property was set previously set and @@ -1352,7 +1372,8 @@ class RD_EXPORT Conf { /** @brief Query single configuration value * @returns CONF_OK if the property was set previously set and * returns the value in \p partitioner_kp_cb. */ - virtual Conf::ConfResult get(PartitionerKeyPointerCb *&partitioner_kp_cb) const = 0; + virtual Conf::ConfResult get( + PartitionerKeyPointerCb *&partitioner_kp_cb) const = 0; /** @brief Query single configuration value * @returns CONF_OK if the property was set previously set and @@ -1375,15 +1396,17 @@ class RD_EXPORT Conf { virtual Conf::ConfResult get(OffsetCommitCb *&offset_commit_cb) const = 0; /** @brief Use with \p name = \c \"ssl_cert_verify_cb\" */ - virtual Conf::ConfResult get(SslCertificateVerifyCb *&ssl_cert_verify_cb) const = 0; + virtual Conf::ConfResult get( + SslCertificateVerifyCb *&ssl_cert_verify_cb) const = 0; /** @brief Dump configuration names and values to list containing * name,value tuples */ - virtual std::list *dump () = 0; + virtual std::list *dump() = 0; /** @brief Use with \p name = \c \"consume_cb\" */ - virtual Conf::ConfResult set (const std::string &name, ConsumeCb *consume_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + ConsumeCb *consume_cb, + std::string &errstr) = 0; /** * @brief Returns the underlying librdkafka C rd_kafka_conf_t handle. @@ -1401,7 +1424,7 @@ class RD_EXPORT Conf { * * @returns \c rd_kafka_conf_t* if this is a CONF_GLOBAL object, else NULL. */ - virtual struct rd_kafka_conf_s *c_ptr_global () = 0; + virtual struct rd_kafka_conf_s *c_ptr_global() = 0; /** * @brief Returns the underlying librdkafka C rd_kafka_topic_conf_t handle. @@ -1420,7 +1443,7 @@ class RD_EXPORT Conf { * @returns \c rd_kafka_topic_conf_t* if this is a CONF_TOPIC object, * else NULL. */ - virtual struct rd_kafka_topic_conf_s *c_ptr_topic () = 0; + virtual struct rd_kafka_topic_conf_s *c_ptr_topic() = 0; /** * @brief Set callback_data for ssl engine. @@ -1434,8 +1457,8 @@ class RD_EXPORT Conf { * * @returns CONF_OK on success, else CONF_INVALID. */ - virtual Conf::ConfResult set_engine_callback_data (void *value, - std::string &errstr) = 0; + virtual Conf::ConfResult set_engine_callback_data(void *value, + std::string &errstr) = 0; /** @brief Enable/disable creation of a queue specific to SASL events @@ -1460,9 +1483,8 @@ class RD_EXPORT Conf { * @remark The SASL queue is currently only used by the SASL OAUTHBEARER " * mechanism's token refresh callback. */ - virtual Conf::ConfResult enable_sasl_queue (bool enable, + virtual Conf::ConfResult enable_sasl_queue(bool enable, std::string &errstr) = 0; - }; /**@}*/ @@ -1479,10 +1501,11 @@ class RD_EXPORT Conf { */ class RD_EXPORT Handle { public: - virtual ~Handle() { } + virtual ~Handle() { + } /** @returns the name of the handle */ - virtual const std::string name () const = 0; + virtual const std::string name() const = 0; /** * @brief Returns the client's broker-assigned group member id @@ -1492,7 +1515,7 @@ class RD_EXPORT Handle { * @returns Last assigned member id, or empty string if not currently * a group member. */ - virtual const std::string memberid () const = 0; + virtual const std::string memberid() const = 0; /** @@ -1506,8 +1529,10 @@ class RD_EXPORT Handle { * To wait indefinately for events, provide -1. * * Events: - * - delivery report callbacks (if an RdKafka::DeliveryCb is configured) [producer] - * - event callbacks (if an RdKafka::EventCb is configured) [producer & consumer] + * - delivery report callbacks (if an RdKafka::DeliveryCb is configured) + * [producer] + * - event callbacks (if an RdKafka::EventCb is configured) [producer & + * consumer] * * @remark An application should make sure to call poll() at regular * intervals to serve any queued callbacks waiting to be called. @@ -1517,7 +1542,7 @@ class RD_EXPORT Handle { * * @returns the number of events served. */ - virtual int poll (int timeout_ms) = 0; + virtual int poll(int timeout_ms) = 0; /** * @brief Returns the current out queue length @@ -1525,7 +1550,7 @@ class RD_EXPORT Handle { * The out queue contains messages and requests waiting to be sent to, * or acknowledged by, the broker. */ - virtual int outq_len () = 0; + virtual int outq_len() = 0; /** * @brief Request Metadata from broker. @@ -1535,15 +1560,17 @@ class RD_EXPORT Handle { * if zero: only request info about locally known topics. * \p only_rkt - only request info about this topic * \p metadatap - pointer to hold metadata result. - * The \p *metadatap pointer must be released with \c delete. - * \p timeout_ms - maximum response time before failing. + * The \p *metadatap pointer must be released with \c + * delete. \p timeout_ms - maximum response time before failing. * * @returns RdKafka::ERR_NO_ERROR on success (in which case \p *metadatap * will be set), else RdKafka::ERR__TIMED_OUT on timeout or * other error code on error. */ - virtual ErrorCode metadata (bool all_topics, const Topic *only_rkt, - Metadata **metadatap, int timeout_ms) = 0; + virtual ErrorCode metadata(bool all_topics, + const Topic *only_rkt, + Metadata **metadatap, + int timeout_ms) = 0; /** @@ -1555,7 +1582,7 @@ class RD_EXPORT Handle { * * @sa resume() */ - virtual ErrorCode pause (std::vector &partitions) = 0; + virtual ErrorCode pause(std::vector &partitions) = 0; /** @@ -1567,7 +1594,7 @@ class RD_EXPORT Handle { * * @sa pause() */ - virtual ErrorCode resume (std::vector &partitions) = 0; + virtual ErrorCode resume(std::vector &partitions) = 0; /** @@ -1578,10 +1605,11 @@ class RD_EXPORT Handle { * * @returns RdKafka::ERR_NO_ERROR on success or an error code on failure. */ - virtual ErrorCode query_watermark_offsets (const std::string &topic, - int32_t partition, - int64_t *low, int64_t *high, - int timeout_ms) = 0; + virtual ErrorCode query_watermark_offsets(const std::string &topic, + int32_t partition, + int64_t *low, + int64_t *high, + int timeout_ms) = 0; /** * @brief Get last known low (oldest/beginning) @@ -1600,9 +1628,10 @@ class RD_EXPORT Handle { * * @remark Shall only be used with an active consumer instance. */ - virtual ErrorCode get_watermark_offsets (const std::string &topic, - int32_t partition, - int64_t *low, int64_t *high) = 0; + virtual ErrorCode get_watermark_offsets(const std::string &topic, + int32_t partition, + int64_t *low, + int64_t *high) = 0; /** @@ -1626,8 +1655,8 @@ class RD_EXPORT Handle { * @returns an error code for general errors, else RdKafka::ERR_NO_ERROR * in which case per-partition errors might be set. */ - virtual ErrorCode offsetsForTimes (std::vector &offsets, - int timeout_ms) = 0; + virtual ErrorCode offsetsForTimes(std::vector &offsets, + int timeout_ms) = 0; /** @@ -1638,7 +1667,7 @@ class RD_EXPORT Handle { * * @remark This function only works on consumers. */ - virtual Queue *get_partition_queue (const TopicPartition *partition) = 0; + virtual Queue *get_partition_queue(const TopicPartition *partition) = 0; /** * @brief Forward librdkafka logs (and debug) to the specified queue @@ -1656,7 +1685,7 @@ class RD_EXPORT Handle { * * @returns ERR_NO_ERROR on success or an error code on error. */ - virtual ErrorCode set_log_queue (Queue *queue) = 0; + virtual ErrorCode set_log_queue(Queue *queue) = 0; /** * @brief Cancels the current callback dispatcher (Handle::poll(), @@ -1669,7 +1698,7 @@ class RD_EXPORT Handle { * @remark This function MUST ONLY be called from within a * librdkafka callback. */ - virtual void yield () = 0; + virtual void yield() = 0; /** * @brief Returns the ClusterId as reported in broker metadata. @@ -1685,7 +1714,7 @@ class RD_EXPORT Handle { * @returns Last cached ClusterId, or empty string if no ClusterId could be * retrieved in the allotted timespan. */ - virtual const std::string clusterid (int timeout_ms) = 0; + virtual const std::string clusterid(int timeout_ms) = 0; /** * @brief Returns the underlying librdkafka C rd_kafka_t handle. @@ -1703,7 +1732,7 @@ class RD_EXPORT Handle { * * @returns \c rd_kafka_t* */ - virtual struct rd_kafka_s *c_ptr () = 0; + virtual struct rd_kafka_s *c_ptr() = 0; /** * @brief Returns the current ControllerId (controller broker id) @@ -1720,7 +1749,7 @@ class RD_EXPORT Handle { * @returns Last cached ControllerId, or -1 if no ControllerId could be * retrieved in the allotted timespan. */ - virtual int32_t controllerid (int timeout_ms) = 0; + virtual int32_t controllerid(int timeout_ms) = 0; /** @@ -1744,7 +1773,7 @@ class RD_EXPORT Handle { * @returns ERR_NO_ERROR if no fatal error has been raised, else * any other error code. */ - virtual ErrorCode fatal_error (std::string &errstr) const = 0; + virtual ErrorCode fatal_error(std::string &errstr) const = 0; /** * @brief Set SASL/OAUTHBEARER token and metadata @@ -1785,82 +1814,83 @@ class RD_EXPORT Handle { * @sa RdKafka::oauthbearer_set_token_failure * @sa RdKafka::Conf::set() \c "oauthbearer_token_refresh_cb" */ - virtual ErrorCode oauthbearer_set_token (const std::string &token_value, - int64_t md_lifetime_ms, - const std::string &md_principal_name, - const std::list &extensions, - std::string &errstr) = 0; + virtual ErrorCode oauthbearer_set_token( + const std::string &token_value, + int64_t md_lifetime_ms, + const std::string &md_principal_name, + const std::list &extensions, + std::string &errstr) = 0; - /** - * @brief SASL/OAUTHBEARER token refresh failure indicator. - * - * @param errstr human readable error reason for failing to acquire a token. - * - * The SASL/OAUTHBEARER token refresh callback should - * invoke this method upon failure to refresh the token. - * - * @returns \c RdKafka::ERR_NO_ERROR on success, otherwise:
- * \c RdKafka::ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not - * supported by this build;
- * \c RdKafka::ERR__STATE if SASL/OAUTHBEARER is supported but is - * not configured as the client's authentication mechanism. - * - * @sa RdKafka::oauthbearer_set_token - * @sa RdKafka::Conf::set() \c "oauthbearer_token_refresh_cb" - */ - virtual ErrorCode oauthbearer_set_token_failure (const std::string &errstr) = 0; + /** + * @brief SASL/OAUTHBEARER token refresh failure indicator. + * + * @param errstr human readable error reason for failing to acquire a token. + * + * The SASL/OAUTHBEARER token refresh callback should + * invoke this method upon failure to refresh the token. + * + * @returns \c RdKafka::ERR_NO_ERROR on success, otherwise:
+ * \c RdKafka::ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not + * supported by this build;
+ * \c RdKafka::ERR__STATE if SASL/OAUTHBEARER is supported but is + * not configured as the client's authentication mechanism. + * + * @sa RdKafka::oauthbearer_set_token + * @sa RdKafka::Conf::set() \c "oauthbearer_token_refresh_cb" + */ + virtual ErrorCode oauthbearer_set_token_failure( + const std::string &errstr) = 0; - /** - * @brief Enable SASL OAUTHBEARER refresh callbacks on the librdkafka - * background thread. - * - * This serves as an alternative for applications that do not - * call RdKafka::Handle::poll() (et.al.) at regular intervals. - */ - virtual Error *sasl_background_callbacks_enable () = 0; + /** + * @brief Enable SASL OAUTHBEARER refresh callbacks on the librdkafka + * background thread. + * + * This serves as an alternative for applications that do not + * call RdKafka::Handle::poll() (et.al.) at regular intervals. + */ + virtual Error *sasl_background_callbacks_enable() = 0; - /** + /** * @returns the SASL callback queue, if enabled, else NULL. * * @sa RdKafka::Conf::enable_sasl_queue() */ - virtual Queue *get_sasl_queue () = 0; + virtual Queue *get_sasl_queue() = 0; - /** + /** * @returns the librdkafka background thread queue. */ - virtual Queue *get_background_queue () = 0; - + virtual Queue *get_background_queue() = 0; - /** - * @brief Allocate memory using the same allocator librdkafka uses. - * - * This is typically an abstraction for the malloc(3) call and makes sure - * the application can use the same memory allocator as librdkafka for - * allocating pointers that are used by librdkafka. - * - * @remark Memory allocated by mem_malloc() must be freed using - * mem_free(). - */ - virtual void *mem_malloc (size_t size) = 0; + /** + * @brief Allocate memory using the same allocator librdkafka uses. + * + * This is typically an abstraction for the malloc(3) call and makes sure + * the application can use the same memory allocator as librdkafka for + * allocating pointers that are used by librdkafka. + * + * @remark Memory allocated by mem_malloc() must be freed using + * mem_free(). + */ + virtual void *mem_malloc(size_t size) = 0; - /** - * @brief Free pointer returned by librdkafka - * - * This is typically an abstraction for the free(3) call and makes sure - * the application can use the same memory allocator as librdkafka for - * freeing pointers returned by librdkafka. - * - * In standard setups it is usually not necessary to use this interface - * rather than the free(3) function. - * - * @remark mem_free() must only be used for pointers returned by APIs - * that explicitly mention using this function for freeing. - */ - virtual void mem_free (void *ptr) = 0; + /** + * @brief Free pointer returned by librdkafka + * + * This is typically an abstraction for the free(3) call and makes sure + * the application can use the same memory allocator as librdkafka for + * freeing pointers returned by librdkafka. + * + * In standard setups it is usually not necessary to use this interface + * rather than the free(3) function. + * + * @remark mem_free() must only be used for pointers returned by APIs + * that explicitly mention using this function for freeing. + */ + virtual void mem_free(void *ptr) = 0; }; @@ -1883,13 +1913,13 @@ class RD_EXPORT Handle { * a list of partitions for different operations. */ class RD_EXPORT TopicPartition { -public: + public: /** * @brief Create topic+partition object for \p topic and \p partition. * * Use \c delete to deconstruct. */ - static TopicPartition *create (const std::string &topic, int partition); + static TopicPartition *create(const std::string &topic, int partition); /** * @brief Create topic+partition object for \p topic and \p partition @@ -1897,8 +1927,9 @@ class RD_EXPORT TopicPartition { * * Use \c delete to deconstruct. */ - static TopicPartition *create (const std::string &topic, int partition, - int64_t offset); + static TopicPartition *create(const std::string &topic, + int partition, + int64_t offset); virtual ~TopicPartition() = 0; @@ -1906,22 +1937,22 @@ class RD_EXPORT TopicPartition { * @brief Destroy/delete the TopicPartitions in \p partitions * and clear the vector. */ - static void destroy (std::vector &partitions); + static void destroy(std::vector &partitions); /** @returns topic name */ - virtual const std::string &topic () const = 0; + virtual const std::string &topic() const = 0; /** @returns partition id */ - virtual int partition () const = 0; + virtual int partition() const = 0; /** @returns offset (if applicable) */ - virtual int64_t offset () const = 0; + virtual int64_t offset() const = 0; /** @brief Set offset */ - virtual void set_offset (int64_t offset) = 0; + virtual void set_offset(int64_t offset) = 0; /** @returns error code (if applicable) */ - virtual ErrorCode err () const = 0; + virtual ErrorCode err() const = 0; }; @@ -1942,9 +1973,9 @@ class RD_EXPORT Topic { /** @brief Special offsets */ static const int64_t OFFSET_BEGINNING; /**< Consume from beginning */ - static const int64_t OFFSET_END; /**< Consume from end */ - static const int64_t OFFSET_STORED; /**< Use offset storage */ - static const int64_t OFFSET_INVALID; /**< Invalid offset */ + static const int64_t OFFSET_END; /**< Consume from end */ + static const int64_t OFFSET_STORED; /**< Use offset storage */ + static const int64_t OFFSET_INVALID; /**< Invalid offset */ /** @@ -1956,21 +1987,23 @@ class RD_EXPORT Topic { * * @returns the new topic handle or NULL on error (see \p errstr). */ - static Topic *create (Handle *base, const std::string &topic_str, - const Conf *conf, std::string &errstr); + static Topic *create(Handle *base, + const std::string &topic_str, + const Conf *conf, + std::string &errstr); - virtual ~Topic () = 0; + virtual ~Topic() = 0; /** @returns the topic name */ - virtual const std::string name () const = 0; + virtual const std::string name() const = 0; /** * @returns true if \p partition is available for the topic (has leader). * @warning \b MUST \b ONLY be called from within a * RdKafka::PartitionerCb callback. */ - virtual bool partition_available (int32_t partition) const = 0; + virtual bool partition_available(int32_t partition) const = 0; /** * @brief Store offset \p offset + 1 for topic partition \p partition. @@ -1983,7 +2016,7 @@ class RD_EXPORT Topic { * @returns RdKafka::ERR_NO_ERROR on success or an error code if none of the * offsets could be stored. */ - virtual ErrorCode offset_store (int32_t partition, int64_t offset) = 0; + virtual ErrorCode offset_store(int32_t partition, int64_t offset) = 0; /** * @brief Returns the underlying librdkafka C rd_kafka_topic_t handle. @@ -2001,7 +2034,7 @@ class RD_EXPORT Topic { * * @returns \c rd_kafka_topic_t* */ - virtual struct rd_kafka_topic_s *c_ptr () = 0; + virtual struct rd_kafka_topic_s *c_ptr() = 0; }; @@ -2027,16 +2060,16 @@ class RD_EXPORT Topic { */ class RD_EXPORT MessageTimestamp { -public: + public: /*! Message timestamp type */ enum MessageTimestampType { - MSG_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ - MSG_TIMESTAMP_CREATE_TIME, /**< Message creation time (source) */ - MSG_TIMESTAMP_LOG_APPEND_TIME /**< Message log append time (broker) */ + MSG_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ + MSG_TIMESTAMP_CREATE_TIME, /**< Message creation time (source) */ + MSG_TIMESTAMP_LOG_APPEND_TIME /**< Message log append time (broker) */ }; - MessageTimestampType type; /**< Timestamp type */ - int64_t timestamp; /**< Milliseconds since epoch (UTC). */ + MessageTimestampType type; /**< Timestamp type */ + int64_t timestamp; /**< Milliseconds since epoch (UTC). */ }; @@ -2050,7 +2083,7 @@ class RD_EXPORT MessageTimestamp { * @remark Requires Apache Kafka >= 0.11.0 brokers */ class RD_EXPORT Headers { -public: + public: virtual ~Headers() = 0; /** @@ -2073,10 +2106,8 @@ class RD_EXPORT Headers { * @remark key and value are copied. * */ - Header(const std::string &key, - const void *value, - size_t value_size): - key_(key), err_(ERR_NO_ERROR), value_size_(value_size) { + Header(const std::string &key, const void *value, size_t value_size) : + key_(key), err_(ERR_NO_ERROR), value_size_(value_size) { value_ = copy_value(value, value_size); } @@ -2096,8 +2127,8 @@ class RD_EXPORT Headers { Header(const std::string &key, const void *value, size_t value_size, - const RdKafka::ErrorCode err): - key_(key), err_(err), value_(NULL), value_size_(value_size) { + const RdKafka::ErrorCode err) : + key_(key), err_(err), value_(NULL), value_size_(value_size) { if (err == ERR_NO_ERROR) value_ = copy_value(value, value_size); } @@ -2107,8 +2138,8 @@ class RD_EXPORT Headers { * * @param other Header to make a copy of. */ - Header(const Header &other): - key_(other.key_), err_(other.err_), value_size_(other.value_size_) { + Header(const Header &other) : + key_(other.key_), err_(other.err_), value_size_(other.value_size_) { value_ = copy_value(other.value_, value_size_); } @@ -2117,14 +2148,13 @@ class RD_EXPORT Headers { * * @param other Header to make a copy of. */ - Header& operator=(const Header &other) - { + Header &operator=(const Header &other) { if (&other == this) { return *this; } - key_ = other.key_; - err_ = other.err_; + key_ = other.key_; + err_ = other.err_; value_size_ = other.value_size_; if (value_ != NULL) @@ -2145,7 +2175,7 @@ class RD_EXPORT Headers { return key_; } - /** @returns returns the binary value, or NULL */ + /** @returns returns the binary value, or NULL */ const void *value() const { return value_; } @@ -2166,7 +2196,7 @@ class RD_EXPORT Headers { return err_; } - private: + private: char *copy_value(const void *value, size_t value_size) { if (!value) return NULL; @@ -2211,7 +2241,8 @@ class RD_EXPORT Headers { * * @returns an ErrorCode signalling success or failure to add the header. */ - virtual ErrorCode add(const std::string &key, const void *value, + virtual ErrorCode add(const std::string &key, + const void *value, size_t value_size) = 0; /** @@ -2322,52 +2353,52 @@ class RD_EXPORT Message { /** @returns The error string if object represent an error event, * else an empty string. */ - virtual std::string errstr() const = 0; + virtual std::string errstr() const = 0; /** @returns The error code if object represents an error event, else 0. */ - virtual ErrorCode err () const = 0; + virtual ErrorCode err() const = 0; /** @returns the RdKafka::Topic object for a message (if applicable), * or NULL if a corresponding RdKafka::Topic object has not been * explicitly created with RdKafka::Topic::create(). * In this case use topic_name() instead. */ - virtual Topic *topic () const = 0; + virtual Topic *topic() const = 0; /** @returns Topic name (if applicable, else empty string) */ - virtual std::string topic_name () const = 0; + virtual std::string topic_name() const = 0; /** @returns Partition (if applicable) */ - virtual int32_t partition () const = 0; + virtual int32_t partition() const = 0; /** @returns Message payload (if applicable) */ - virtual void *payload () const = 0 ; + virtual void *payload() const = 0; /** @returns Message payload length (if applicable) */ - virtual size_t len () const = 0; + virtual size_t len() const = 0; /** @returns Message key as string (if applicable) */ - virtual const std::string *key () const = 0; + virtual const std::string *key() const = 0; /** @returns Message key as void pointer (if applicable) */ - virtual const void *key_pointer () const = 0 ; + virtual const void *key_pointer() const = 0; /** @returns Message key's binary length (if applicable) */ - virtual size_t key_len () const = 0; + virtual size_t key_len() const = 0; /** @returns Message or error offset (if applicable) */ - virtual int64_t offset () const = 0; + virtual int64_t offset() const = 0; /** @returns Message timestamp (if applicable) */ - virtual MessageTimestamp timestamp () const = 0; + virtual MessageTimestamp timestamp() const = 0; /** @returns The \p msg_opaque as provided to RdKafka::Producer::produce() */ - virtual void *msg_opaque () const = 0; + virtual void *msg_opaque() const = 0; - virtual ~Message () = 0; + virtual ~Message() = 0; /** @returns the latency in microseconds for a produced message measured * from the produce() call, or -1 if latency is not available. */ - virtual int64_t latency () const = 0; + virtual int64_t latency() const = 0; /** * @brief Returns the underlying librdkafka C rd_kafka_message_t handle. @@ -2385,18 +2416,18 @@ class RD_EXPORT Message { * * @returns \c rd_kafka_message_t* */ - virtual struct rd_kafka_message_s *c_ptr () = 0; + virtual struct rd_kafka_message_s *c_ptr() = 0; /** * @brief Returns the message's persistence status in the topic log. */ - virtual Status status () const = 0; + virtual Status status() const = 0; /** @returns the Headers instance for this Message, or NULL if there * are no headers. * * @remark The lifetime of the Headers are the same as the Message. */ - virtual RdKafka::Headers *headers () = 0; + virtual RdKafka::Headers *headers() = 0; /** @returns the Headers instance for this Message (if applicable). * If NULL is returned the reason is given in \p err, which @@ -2404,11 +2435,11 @@ class RD_EXPORT Message { * error code if header parsing failed. * * @remark The lifetime of the Headers are the same as the Message. */ - virtual RdKafka::Headers *headers (RdKafka::ErrorCode *err) = 0; + virtual RdKafka::Headers *headers(RdKafka::ErrorCode *err) = 0; /** @returns the broker id of the broker the message was produced to or * fetched from, or -1 if not known/applicable. */ - virtual int32_t broker_id () const = 0; + virtual int32_t broker_id() const = 0; }; /**@}*/ @@ -2439,7 +2470,7 @@ class RD_EXPORT Queue { /** * @brief Create Queue object */ - static Queue *create (Handle *handle); + static Queue *create(Handle *handle); /** * @brief Forward/re-route queue to \p dst. @@ -2451,7 +2482,7 @@ class RD_EXPORT Queue { * function, \p src will not forward it's fetch queue to the consumer * queue. */ - virtual ErrorCode forward (Queue *dst) = 0; + virtual ErrorCode forward(Queue *dst) = 0; /** @@ -2465,7 +2496,7 @@ class RD_EXPORT Queue { * - timeout due to no message or event in \p timeout_ms * (RdKafka::Message::err() is ERR__TIMED_OUT) */ - virtual Message *consume (int timeout_ms) = 0; + virtual Message *consume(int timeout_ms) = 0; /** * @brief Poll queue, serving any enqueued callbacks. @@ -2474,9 +2505,9 @@ class RD_EXPORT Queue { * * @returns the number of events served or 0 on timeout. */ - virtual int poll (int timeout_ms) = 0; + virtual int poll(int timeout_ms) = 0; - virtual ~Queue () = 0; + virtual ~Queue() = 0; /** * @brief Enable IO event triggering for queue. @@ -2493,7 +2524,7 @@ class RD_EXPORT Queue { * @remark When using forwarded queues the IO event must only be enabled * on the final forwarded-to (destination) queue. */ - virtual void io_event_enable (int fd, const void *payload, size_t size) = 0; + virtual void io_event_enable(int fd, const void *payload, size_t size) = 0; }; /**@}*/ @@ -2510,8 +2541,8 @@ class RD_EXPORT Queue { * This class currently does not have any public methods. */ class RD_EXPORT ConsumerGroupMetadata { -public: - virtual ~ConsumerGroupMetadata () = 0; + public: + virtual ~ConsumerGroupMetadata() = 0; }; /**@}*/ @@ -2532,7 +2563,7 @@ class RD_EXPORT ConsumerGroupMetadata { * strategies (see \c partition.assignment.strategy) */ class RD_EXPORT KafkaConsumer : public virtual Handle { -public: + public: /** * @brief Creates a KafkaConsumer. * @@ -2544,18 +2575,19 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * @sa CONFIGURATION.md for \c group.id, \c session.timeout.ms, * \c partition.assignment.strategy, etc. */ - static KafkaConsumer *create (const Conf *conf, std::string &errstr); + static KafkaConsumer *create(const Conf *conf, std::string &errstr); - virtual ~KafkaConsumer () = 0; + virtual ~KafkaConsumer() = 0; /** @brief Returns the current partition assignment as set by * RdKafka::KafkaConsumer::assign() */ - virtual ErrorCode assignment (std::vector &partitions) = 0; + virtual ErrorCode assignment( + std::vector &partitions) = 0; /** @brief Returns the current subscription as set by * RdKafka::KafkaConsumer::subscribe() */ - virtual ErrorCode subscription (std::vector &topics) = 0; + virtual ErrorCode subscription(std::vector &topics) = 0; /** * @brief Update the subscription set to \p topics. @@ -2591,10 +2623,10 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @returns an error if the provided list of topics is invalid. */ - virtual ErrorCode subscribe (const std::vector &topics) = 0; + virtual ErrorCode subscribe(const std::vector &topics) = 0; /** @brief Unsubscribe from the current subscription set. */ - virtual ErrorCode unsubscribe () = 0; + virtual ErrorCode unsubscribe() = 0; /** * @brief Update the assignment set to \p partitions. @@ -2602,12 +2634,12 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * The assignment set is the set of partitions actually being consumed * by the KafkaConsumer. */ - virtual ErrorCode assign (const std::vector &partitions) = 0; + virtual ErrorCode assign(const std::vector &partitions) = 0; /** * @brief Stop consumption and remove the current assignment. */ - virtual ErrorCode unassign () = 0; + virtual ErrorCode unassign() = 0; /** * @brief Consume message or get error event, triggers callbacks. @@ -2633,7 +2665,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * - timeout due to no message or event in \p timeout_ms * (RdKafka::Message::err() is ERR__TIMED_OUT) */ - virtual Message *consume (int timeout_ms) = 0; + virtual Message *consume(int timeout_ms) = 0; /** * @brief Commit offsets for the current assignment. @@ -2648,14 +2680,14 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @returns ERR_NO_ERROR or error code. */ - virtual ErrorCode commitSync () = 0; + virtual ErrorCode commitSync() = 0; /** * @brief Asynchronous version of RdKafka::KafkaConsumer::CommitSync() * * @sa RdKafka::KafkaConsumer::commitSync() */ - virtual ErrorCode commitAsync () = 0; + virtual ErrorCode commitAsync() = 0; /** * @brief Commit offset for a single topic+partition based on \p message @@ -2666,7 +2698,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @sa RdKafka::KafkaConsumer::commitSync() */ - virtual ErrorCode commitSync (Message *message) = 0; + virtual ErrorCode commitSync(Message *message) = 0; /** * @brief Commit offset for a single topic+partition based on \p message @@ -2677,7 +2709,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @sa RdKafka::KafkaConsumer::commitSync() */ - virtual ErrorCode commitAsync (Message *message) = 0; + virtual ErrorCode commitAsync(Message *message) = 0; /** * @brief Commit offsets for the provided list of partitions. @@ -2688,7 +2720,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @remark This is the synchronous variant. */ - virtual ErrorCode commitSync (std::vector &offsets) = 0; + virtual ErrorCode commitSync(std::vector &offsets) = 0; /** * @brief Commit offset for the provided list of partitions. @@ -2699,7 +2731,8 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @remark This is the asynchronous variant. */ - virtual ErrorCode commitAsync (const std::vector &offsets) = 0; + virtual ErrorCode commitAsync( + const std::vector &offsets) = 0; /** * @brief Commit offsets for the current assignment. @@ -2711,7 +2744,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @returns ERR_NO_ERROR or error code. */ - virtual ErrorCode commitSync (OffsetCommitCb *offset_commit_cb) = 0; + virtual ErrorCode commitSync(OffsetCommitCb *offset_commit_cb) = 0; /** * @brief Commit offsets for the provided list of partitions. @@ -2723,9 +2756,8 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @returns ERR_NO_ERROR or error code. */ - virtual ErrorCode commitSync (std::vector &offsets, - OffsetCommitCb *offset_commit_cb) = 0; - + virtual ErrorCode commitSync(std::vector &offsets, + OffsetCommitCb *offset_commit_cb) = 0; @@ -2737,8 +2769,8 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * in with the stored offset, or a partition specific error. * Else returns an error code. */ - virtual ErrorCode committed (std::vector &partitions, - int timeout_ms) = 0; + virtual ErrorCode committed(std::vector &partitions, + int timeout_ms) = 0; /** * @brief Retrieve current positions (offsets) for topics+partitions. @@ -2748,7 +2780,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * in with the stored offset, or a partition specific error. * Else returns an error code. */ - virtual ErrorCode position (std::vector &partitions) = 0; + virtual ErrorCode position(std::vector &partitions) = 0; /** @@ -2773,7 +2805,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @remark The consumer object must later be freed with \c delete */ - virtual ErrorCode close () = 0; + virtual ErrorCode close() = 0; /** @@ -2793,7 +2825,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @returns an ErrorCode to indicate success or failure. */ - virtual ErrorCode seek (const TopicPartition &partition, int timeout_ms) = 0; + virtual ErrorCode seek(const TopicPartition &partition, int timeout_ms) = 0; /** @@ -2813,7 +2845,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * be stored, or * RdKafka::ERR___INVALID_ARG if \c enable.auto.offset.store is true. */ - virtual ErrorCode offsets_store (std::vector &offsets) = 0; + virtual ErrorCode offsets_store(std::vector &offsets) = 0; /** @@ -2826,7 +2858,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @sa RdKafka::Producer::send_offsets_to_transaction() */ - virtual ConsumerGroupMetadata *groupMetadata () = 0; + virtual ConsumerGroupMetadata *groupMetadata() = 0; /** @brief Check whether the consumer considers the current assignment to @@ -2843,7 +2875,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * @returns Returns true if the current partition assignment is considered * lost, false otherwise. */ - virtual bool assignment_lost () = 0; + virtual bool assignment_lost() = 0; /** * @brief The rebalance protocol currently in use. This will be @@ -2860,7 +2892,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * "NONE", "EAGER", "COOPERATIVE" on success. */ - virtual std::string rebalance_protocol () = 0; + virtual std::string rebalance_protocol() = 0; /** @@ -2878,7 +2910,8 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @remark The returned object must be deleted by the application. */ - virtual Error *incremental_assign (const std::vector &partitions) = 0; + virtual Error *incremental_assign( + const std::vector &partitions) = 0; /** @@ -2896,8 +2929,8 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @remark The returned object must be deleted by the application. */ - virtual Error *incremental_unassign (const std::vector &partitions) = 0; - + virtual Error *incremental_unassign( + const std::vector &partitions) = 0; }; @@ -2927,9 +2960,9 @@ class RD_EXPORT Consumer : public virtual Handle { * @returns the new handle on success or NULL on error in which case * \p errstr is set to a human readable error message. */ - static Consumer *create (const Conf *conf, std::string &errstr); + static Consumer *create(const Conf *conf, std::string &errstr); - virtual ~Consumer () = 0; + virtual ~Consumer() = 0; /** @@ -2951,7 +2984,7 @@ class RD_EXPORT Consumer : public virtual Handle { * * @returns an ErrorCode to indicate success or failure. */ - virtual ErrorCode start (Topic *topic, int32_t partition, int64_t offset) = 0; + virtual ErrorCode start(Topic *topic, int32_t partition, int64_t offset) = 0; /** * @brief Start consuming messages for topic and \p partition on @@ -2959,8 +2992,10 @@ class RD_EXPORT Consumer : public virtual Handle { * * @sa RdKafka::Consumer::start() */ - virtual ErrorCode start (Topic *topic, int32_t partition, int64_t offset, - Queue *queue) = 0; + virtual ErrorCode start(Topic *topic, + int32_t partition, + int64_t offset, + Queue *queue) = 0; /** * @brief Stop consuming messages for topic and \p partition, purging @@ -2971,7 +3006,7 @@ class RD_EXPORT Consumer : public virtual Handle { * * @returns an ErrorCode to indicate success or failure. */ - virtual ErrorCode stop (Topic *topic, int32_t partition) = 0; + virtual ErrorCode stop(Topic *topic, int32_t partition) = 0; /** * @brief Seek consumer for topic+partition to \p offset which is either an @@ -2987,8 +3022,10 @@ class RD_EXPORT Consumer : public virtual Handle { * * @returns an ErrorCode to indicate success or failure. */ - virtual ErrorCode seek (Topic *topic, int32_t partition, int64_t offset, - int timeout_ms) = 0; + virtual ErrorCode seek(Topic *topic, + int32_t partition, + int64_t offset, + int timeout_ms) = 0; /** * @brief Consume a single message from \p topic and \p partition. @@ -3007,8 +3044,7 @@ class RD_EXPORT Consumer : public virtual Handle { * - ERR__TIMED_OUT - \p timeout_ms was reached with no new messages fetched. * - ERR__PARTITION_EOF - End of partition reached, not an error. */ - virtual Message *consume (Topic *topic, int32_t partition, - int timeout_ms) = 0; + virtual Message *consume(Topic *topic, int32_t partition, int timeout_ms) = 0; /** * @brief Consume a single message from the specified queue. @@ -3031,7 +3067,7 @@ class RD_EXPORT Consumer : public virtual Handle { * errors, so applications should check that it isn't null before * dereferencing it. */ - virtual Message *consume (Queue *queue, int timeout_ms) = 0; + virtual Message *consume(Queue *queue, int timeout_ms) = 0; /** * @brief Consumes messages from \p topic and \p partition, calling @@ -3052,10 +3088,11 @@ class RD_EXPORT Consumer : public virtual Handle { * * @sa RdKafka::Consumer::consume() */ - virtual int consume_callback (Topic *topic, int32_t partition, - int timeout_ms, - ConsumeCb *consume_cb, - void *opaque) = 0; + virtual int consume_callback(Topic *topic, + int32_t partition, + int timeout_ms, + ConsumeCb *consume_cb, + void *opaque) = 0; /** * @brief Consumes messages from \p queue, calling the provided callback for @@ -3063,9 +3100,10 @@ class RD_EXPORT Consumer : public virtual Handle { * * @sa RdKafka::Consumer::consume_callback() */ - virtual int consume_callback (Queue *queue, int timeout_ms, - RdKafka::ConsumeCb *consume_cb, - void *opaque) = 0; + virtual int consume_callback(Queue *queue, + int timeout_ms, + RdKafka::ConsumeCb *consume_cb, + void *opaque) = 0; /** * @brief Converts an offset into the logical offset from the tail of a topic. @@ -3104,10 +3142,10 @@ class RD_EXPORT Producer : public virtual Handle { * @returns the new handle on success or NULL on error in which case * \p errstr is set to a human readable error message. */ - static Producer *create (const Conf *conf, std::string &errstr); + static Producer *create(const Conf *conf, std::string &errstr); - virtual ~Producer () = 0; + virtual ~Producer() = 0; /** * @brief RdKafka::Producer::produce() \p msgflags @@ -3116,39 +3154,39 @@ class RD_EXPORT Producer : public virtual Handle { */ enum { RK_MSG_FREE = 0x1, /**< rdkafka will free(3) \p payload - * when it is done with it. - * Mutually exclusive with RK_MSG_COPY. */ + * when it is done with it. + * Mutually exclusive with RK_MSG_COPY. */ RK_MSG_COPY = 0x2, /**< the \p payload data will be copied * and the \p payload pointer will not * be used by rdkafka after the * call returns. * Mutually exclusive with RK_MSG_FREE. */ - RK_MSG_BLOCK = 0x4 /**< Block produce*() on message queue - * full. - * WARNING: - * If a delivery report callback - * is used the application MUST - * call rd_kafka_poll() (or equiv.) - * to make sure delivered messages - * are drained from the internal - * delivery report queue. - * Failure to do so will result - * in indefinately blocking on - * the produce() call when the - * message queue is full. - */ + RK_MSG_BLOCK = 0x4 /**< Block produce*() on message queue + * full. + * WARNING: + * If a delivery report callback + * is used the application MUST + * call rd_kafka_poll() (or equiv.) + * to make sure delivered messages + * are drained from the internal + * delivery report queue. + * Failure to do so will result + * in indefinately blocking on + * the produce() call when the + * message queue is full. + */ /**@cond NO_DOC*/ /* For backwards compatibility: */ #ifndef MSG_COPY /* defined in sys/msg.h */ - , /** this comma must exist betwen - * RK_MSG_BLOCK and MSG_FREE - */ + , /** this comma must exist betwen + * RK_MSG_BLOCK and MSG_FREE + */ MSG_FREE = RK_MSG_FREE, MSG_COPY = RK_MSG_COPY #endif - /**@endcond*/ + /**@endcond*/ }; /** @@ -3207,21 +3245,26 @@ class RD_EXPORT Producer : public virtual Handle { * * - ERR__UNKNOWN_TOPIC - topic is unknown in the Kafka cluster. */ - virtual ErrorCode produce (Topic *topic, int32_t partition, - int msgflags, - void *payload, size_t len, - const std::string *key, - void *msg_opaque) = 0; + virtual ErrorCode produce(Topic *topic, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const std::string *key, + void *msg_opaque) = 0; /** * @brief Variant produce() that passes the key as a pointer and length * instead of as a const std::string *. */ - virtual ErrorCode produce (Topic *topic, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - void *msg_opaque) = 0; + virtual ErrorCode produce(Topic *topic, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + void *msg_opaque) = 0; /** * @brief produce() variant that takes topic as a string (no need for @@ -3229,11 +3272,15 @@ class RD_EXPORT Producer : public virtual Handle { * message timestamp (milliseconds since beginning of epoch, UTC). * Otherwise identical to produce() above. */ - virtual ErrorCode produce (const std::string topic_name, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, void *msg_opaque) = 0; + virtual ErrorCode produce(const std::string topic_name, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + int64_t timestamp, + void *msg_opaque) = 0; /** * @brief produce() variant that that allows for Header support on produce @@ -3242,30 +3289,34 @@ class RD_EXPORT Producer : public virtual Handle { * @warning The \p headers will be freed/deleted if the produce() call * succeeds, or left untouched if produce() fails. */ - virtual ErrorCode produce (const std::string topic_name, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, - RdKafka::Headers *headers, - void *msg_opaque) = 0; + virtual ErrorCode produce(const std::string topic_name, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + int64_t timestamp, + RdKafka::Headers *headers, + void *msg_opaque) = 0; /** * @brief Variant produce() that accepts vectors for key and payload. * The vector data will be copied. */ - virtual ErrorCode produce (Topic *topic, int32_t partition, - const std::vector *payload, - const std::vector *key, - void *msg_opaque) = 0; + virtual ErrorCode produce(Topic *topic, + int32_t partition, + const std::vector *payload, + const std::vector *key, + void *msg_opaque) = 0; /** * @brief Wait until all outstanding produce requests, et.al, are completed. - * This should typically be done prior to destroying a producer instance - * to make sure all queued and in-flight produce requests are completed - * before terminating. + * This should typically be done prior to destroying a producer + * instance to make sure all queued and in-flight produce requests are + * completed before terminating. * * @remark The \c linger.ms time will be ignored for the duration of the call, * queued messages will be sent to the broker as soon as possible. @@ -3276,7 +3327,7 @@ class RD_EXPORT Producer : public virtual Handle { * @returns ERR__TIMED_OUT if \p timeout_ms was reached before all * outstanding requests were completed, else ERR_NO_ERROR */ - virtual ErrorCode flush (int timeout_ms) = 0; + virtual ErrorCode flush(int timeout_ms) = 0; /** @@ -3306,7 +3357,7 @@ class RD_EXPORT Producer : public virtual Handle { * ERR__INVALID_ARG if the \p purge flags are invalid or unknown, * ERR__NOT_IMPLEMENTED if called on a non-producer client instance. */ - virtual ErrorCode purge (int purge_flags) = 0; + virtual ErrorCode purge(int purge_flags) = 0; /** * @brief RdKafka::Handle::purge() \p purge_flags @@ -3351,7 +3402,7 @@ class RD_EXPORT Producer : public virtual Handle { * See rd_kafka_init_transactions() in rdkafka.h for more information. * */ - virtual Error *init_transactions (int timeout_ms) = 0; + virtual Error *init_transactions(int timeout_ms) = 0; /** @@ -3366,7 +3417,7 @@ class RD_EXPORT Producer : public virtual Handle { * * See rd_kafka_begin_transaction() in rdkafka.h for more information. */ - virtual Error *begin_transaction () = 0; + virtual Error *begin_transaction() = 0; /** * @brief Sends a list of topic partition offsets to the consumer group @@ -3414,10 +3465,10 @@ class RD_EXPORT Producer : public virtual Handle { * See rd_kafka_send_offsets_to_transaction() in rdkafka.h for * more information. */ - virtual Error *send_offsets_to_transaction ( - const std::vector &offsets, - const ConsumerGroupMetadata *group_metadata, - int timeout_ms) = 0; + virtual Error *send_offsets_to_transaction( + const std::vector &offsets, + const ConsumerGroupMetadata *group_metadata, + int timeout_ms) = 0; /** * @brief Commit the current transaction as started with begin_transaction(). @@ -3447,13 +3498,13 @@ class RD_EXPORT Producer : public virtual Handle { * * See rd_kafka_commit_transaction() in rdkafka.h for more information. */ - virtual Error *commit_transaction (int timeout_ms) = 0; + virtual Error *commit_transaction(int timeout_ms) = 0; /** * @brief Aborts the ongoing transaction. * - * This function should also be used to recover from non-fatal abortable - * transaction errors. + * This function should also be used to recover from non-fatal + * abortable transaction errors. * * Any outstanding messages will be purged and fail with * RdKafka::ERR__PURGE_INFLIGHT or RdKafka::ERR__PURGE_QUEUE. @@ -3479,7 +3530,7 @@ class RD_EXPORT Producer : public virtual Handle { * * See rd_kafka_abort_transaction() in rdkafka.h for more information. */ - virtual Error *abort_transaction (int timeout_ms) = 0; + virtual Error *abort_transaction(int timeout_ms) = 0; /**@}*/ }; @@ -3526,7 +3577,7 @@ class PartitionMetadata { /** @brief Replicas iterator */ typedef ReplicasVector::const_iterator ReplicasIterator; /** @brief ISRs iterator */ - typedef ISRSVector::const_iterator ISRSIterator; + typedef ISRSVector::const_iterator ISRSIterator; /** @returns Partition id */ @@ -3557,7 +3608,7 @@ class PartitionMetadata { class TopicMetadata { public: /** @brief Partitions */ - typedef std::vector PartitionMetadataVector; + typedef std::vector PartitionMetadataVector; /** @brief Partitions iterator */ typedef PartitionMetadataVector::const_iterator PartitionMetadataIterator; @@ -3580,14 +3631,14 @@ class TopicMetadata { class Metadata { public: /** @brief Brokers */ - typedef std::vector BrokerMetadataVector; + typedef std::vector BrokerMetadataVector; /** @brief Topics */ - typedef std::vector TopicMetadataVector; + typedef std::vector TopicMetadataVector; /** @brief Brokers iterator */ typedef BrokerMetadataVector::const_iterator BrokerMetadataIterator; /** @brief Topics iterator */ - typedef TopicMetadataVector::const_iterator TopicMetadataIterator; + typedef TopicMetadataVector::const_iterator TopicMetadataIterator; /** @@ -3602,7 +3653,7 @@ class Metadata { * @remark Ownership of the returned pointer is retained by the instance of * Metadata that is called. */ - virtual const TopicMetadataVector *topics() const = 0; + virtual const TopicMetadataVector *topics() const = 0; /** @brief Broker (id) originating this metadata */ virtual int32_t orig_broker_id() const = 0; @@ -3615,7 +3666,7 @@ class Metadata { /**@}*/ -} +} // namespace RdKafka #endif /* _RDKAFKACPP_H_ */ diff --git a/src-cpp/rdkafkacpp_int.h b/src-cpp/rdkafkacpp_int.h index 239f363189..6b70a23680 100644 --- a/src-cpp/rdkafkacpp_int.h +++ b/src-cpp/rdkafkacpp_int.h @@ -57,90 +57,101 @@ typedef int mode_t; namespace RdKafka { void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque); -void log_cb_trampoline (const rd_kafka_t *rk, int level, - const char *fac, const char *buf); -void error_cb_trampoline (rd_kafka_t *rk, int err, const char *reason, - void *opaque); -void throttle_cb_trampoline (rd_kafka_t *rk, const char *broker_name, - int32_t broker_id, int throttle_time_ms, - void *opaque); -int stats_cb_trampoline (rd_kafka_t *rk, char *json, size_t json_len, +void log_cb_trampoline(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf); +void error_cb_trampoline(rd_kafka_t *rk, + int err, + const char *reason, void *opaque); -int socket_cb_trampoline (int domain, int type, int protocol, void *opaque); -int open_cb_trampoline (const char *pathname, int flags, mode_t mode, +void throttle_cb_trampoline(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque); +int stats_cb_trampoline(rd_kafka_t *rk, + char *json, + size_t json_len, void *opaque); -void rebalance_cb_trampoline (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *c_partitions, - void *opaque); -void offset_commit_cb_trampoline0 ( - rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *c_offsets, void *opaque); -void oauthbearer_token_refresh_cb_trampoline (rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque); - - int ssl_cert_verify_cb_trampoline ( - rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - char *errstr, size_t errstr_size, - void *opaque); - -rd_kafka_topic_partition_list_t * - partitions_to_c_parts (const std::vector &partitions); +int socket_cb_trampoline(int domain, int type, int protocol, void *opaque); +int open_cb_trampoline(const char *pathname, + int flags, + mode_t mode, + void *opaque); +void rebalance_cb_trampoline(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *c_partitions, + void *opaque); +void offset_commit_cb_trampoline0(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *c_offsets, + void *opaque); +void oauthbearer_token_refresh_cb_trampoline(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque); + +int ssl_cert_verify_cb_trampoline(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + char *errstr, + size_t errstr_size, + void *opaque); + +rd_kafka_topic_partition_list_t *partitions_to_c_parts( + const std::vector &partitions); /** * @brief Update the application provided 'partitions' with info from 'c_parts' */ -void update_partitions_from_c_parts (std::vector &partitions, - const rd_kafka_topic_partition_list_t *c_parts); +void update_partitions_from_c_parts( + std::vector &partitions, + const rd_kafka_topic_partition_list_t *c_parts); class ErrorImpl : public Error { public: - ~ErrorImpl () { + ~ErrorImpl() { rd_kafka_error_destroy(c_error_); }; - ErrorImpl (ErrorCode code, const std::string *errstr) { + ErrorImpl(ErrorCode code, const std::string *errstr) { c_error_ = rd_kafka_error_new(static_cast(code), errstr ? "%s" : NULL, errstr ? errstr->c_str() : NULL); } - ErrorImpl (rd_kafka_error_t *c_error): - c_error_(c_error) {}; + ErrorImpl(rd_kafka_error_t *c_error) : c_error_(c_error) {}; - static Error *create (ErrorCode code, const std::string *errstr) { + static Error *create(ErrorCode code, const std::string *errstr) { return new ErrorImpl(code, errstr); } - ErrorCode code () const { + ErrorCode code() const { return static_cast(rd_kafka_error_code(c_error_)); } - std::string name () const { + std::string name() const { return std::string(rd_kafka_error_name(c_error_)); } - std::string str () const { + std::string str() const { return std::string(rd_kafka_error_string(c_error_)); } - bool is_fatal () const { + bool is_fatal() const { return !!rd_kafka_error_is_fatal(c_error_); } - bool is_retriable () const { + bool is_retriable() const { return !!rd_kafka_error_is_retriable(c_error_); } - bool txn_requires_abort () const { + bool txn_requires_abort() const { return !!rd_kafka_error_txn_requires_abort(c_error_); } @@ -150,53 +161,85 @@ class ErrorImpl : public Error { class EventImpl : public Event { public: - ~EventImpl () {}; - - EventImpl (Type type, ErrorCode err, Severity severity, - const char *fac, const char *str): - type_(type), err_(err), severity_(severity), fac_(fac ? fac : ""), - str_(str), id_(0), throttle_time_(0), fatal_(false) {}; - - EventImpl (Type type): - type_(type), err_(ERR_NO_ERROR), severity_(EVENT_SEVERITY_EMERG), - fac_(""), str_(""), id_(0), throttle_time_(0), fatal_(false) {}; - - Type type () const { return type_; } - ErrorCode err () const { return err_; } - Severity severity () const { return severity_; } - std::string fac () const { return fac_; } - std::string str () const { return str_; } - std::string broker_name () const { - if (type_ == EVENT_THROTTLE) - return str_; - else - return std::string(""); - } - int broker_id () const { return id_; } - int throttle_time () const { return throttle_time_; } - - bool fatal () const { return fatal_; } - - Type type_; - ErrorCode err_; - Severity severity_; + ~EventImpl() {}; + + EventImpl(Type type, + ErrorCode err, + Severity severity, + const char *fac, + const char *str) : + type_(type), + err_(err), + severity_(severity), + fac_(fac ? fac : ""), + str_(str), + id_(0), + throttle_time_(0), + fatal_(false) {}; + + EventImpl(Type type) : + type_(type), + err_(ERR_NO_ERROR), + severity_(EVENT_SEVERITY_EMERG), + fac_(""), + str_(""), + id_(0), + throttle_time_(0), + fatal_(false) {}; + + Type type() const { + return type_; + } + ErrorCode err() const { + return err_; + } + Severity severity() const { + return severity_; + } + std::string fac() const { + return fac_; + } + std::string str() const { + return str_; + } + std::string broker_name() const { + if (type_ == EVENT_THROTTLE) + return str_; + else + return std::string(""); + } + int broker_id() const { + return id_; + } + int throttle_time() const { + return throttle_time_; + } + + bool fatal() const { + return fatal_; + } + + Type type_; + ErrorCode err_; + Severity severity_; std::string fac_; - std::string str_; /* reused for THROTTLE broker_name */ - int id_; - int throttle_time_; - bool fatal_; + std::string str_; /* reused for THROTTLE broker_name */ + int id_; + int throttle_time_; + bool fatal_; }; class QueueImpl : virtual public Queue { public: - QueueImpl(rd_kafka_queue_t *c_rkqu): queue_(c_rkqu) {} - ~QueueImpl () { + QueueImpl(rd_kafka_queue_t *c_rkqu) : queue_(c_rkqu) { + } + ~QueueImpl() { rd_kafka_queue_destroy(queue_); } - static Queue *create (Handle *base); - ErrorCode forward (Queue *queue); - Message *consume (int timeout_ms); - int poll (int timeout_ms); + static Queue *create(Handle *base); + ErrorCode forward(Queue *queue); + Message *consume(int timeout_ms); + int poll(int timeout_ms); void io_event_enable(int fd, const void *payload, size_t size); rd_kafka_queue_t *queue_; @@ -204,17 +247,15 @@ class QueueImpl : virtual public Queue { - - class HeadersImpl : public Headers { public: - HeadersImpl (): - headers_ (rd_kafka_headers_new(8)) {} + HeadersImpl() : headers_(rd_kafka_headers_new(8)) { + } - HeadersImpl (rd_kafka_headers_t *headers): - headers_ (headers) {} + HeadersImpl(rd_kafka_headers_t *headers) : headers_(headers) { + } - HeadersImpl (const std::vector
&headers) { + HeadersImpl(const std::vector
&headers) { if (headers.size() > 0) { headers_ = rd_kafka_headers_new(headers.size()); from_vector(headers); @@ -229,41 +270,37 @@ class HeadersImpl : public Headers { } } - ErrorCode add(const std::string& key, const char *value) { + ErrorCode add(const std::string &key, const char *value) { rd_kafka_resp_err_t err; - err = rd_kafka_header_add(headers_, - key.c_str(), key.size(), - value, -1); + err = rd_kafka_header_add(headers_, key.c_str(), key.size(), value, -1); return static_cast(err); } - ErrorCode add(const std::string& key, const void *value, size_t value_size) { + ErrorCode add(const std::string &key, const void *value, size_t value_size) { rd_kafka_resp_err_t err; - err = rd_kafka_header_add(headers_, - key.c_str(), key.size(), - value, value_size); + err = rd_kafka_header_add(headers_, key.c_str(), key.size(), value, + value_size); return static_cast(err); } ErrorCode add(const std::string &key, const std::string &value) { rd_kafka_resp_err_t err; - err = rd_kafka_header_add(headers_, - key.c_str(), key.size(), - value.c_str(), value.size()); + err = rd_kafka_header_add(headers_, key.c_str(), key.size(), value.c_str(), + value.size()); return static_cast(err); } ErrorCode add(const Header &header) { rd_kafka_resp_err_t err; - err = rd_kafka_header_add(headers_, - header.key().c_str(), header.key().size(), - header.value(), header.value_size()); + err = + rd_kafka_header_add(headers_, header.key().c_str(), header.key().size(), + header.value(), header.value_size()); return static_cast(err); } - ErrorCode remove(const std::string& key) { + ErrorCode remove(const std::string &key) { rd_kafka_resp_err_t err; - err = rd_kafka_header_remove (headers_, key.c_str()); + err = rd_kafka_header_remove(headers_, key.c_str()); return static_cast(err); } @@ -272,16 +309,15 @@ class HeadersImpl : public Headers { const void *value; size_t size; rd_kafka_resp_err_t err; - for (size_t idx = 0; - !(err = rd_kafka_header_get(headers_, idx, key.c_str(), - &value, &size)) ; + for (size_t idx = 0; !(err = rd_kafka_header_get(headers_, idx, key.c_str(), + &value, &size)); idx++) { headers.push_back(Headers::Header(key, value, size)); } return headers; } - Headers::Header get_last(const std::string& key) const { + Headers::Header get_last(const std::string &key) const { const void *value; size_t size; rd_kafka_resp_err_t err; @@ -296,8 +332,7 @@ class HeadersImpl : public Headers { const char *name; const void *valuep; size_t size; - while (!rd_kafka_header_get_all(headers_, idx++, - &name, &valuep, &size)) { + while (!rd_kafka_header_get_all(headers_, idx++, &name, &valuep, &size)) { headers.push_back(Headers::Header(name, valuep, size)); } return headers; @@ -318,7 +353,7 @@ class HeadersImpl : public Headers { } -private: + private: void from_vector(const std::vector
&headers) { if (headers.size() == 0) return; @@ -327,8 +362,8 @@ class HeadersImpl : public Headers { this->add(*it); } - HeadersImpl(HeadersImpl const&) /*= delete*/; - HeadersImpl& operator=(HeadersImpl const&) /*= delete*/; + HeadersImpl(HeadersImpl const &) /*= delete*/; + HeadersImpl &operator=(HeadersImpl const &) /*= delete*/; rd_kafka_headers_t *headers_; }; @@ -337,7 +372,7 @@ class HeadersImpl : public Headers { class MessageImpl : public Message { public: - ~MessageImpl () { + ~MessageImpl() { if (free_rkmessage_) rd_kafka_message_destroy(const_cast(rkmessage_)); if (key_) @@ -346,20 +381,36 @@ class MessageImpl : public Message { delete headers_; }; - MessageImpl (rd_kafka_type_t rk_type, - RdKafka::Topic *topic, rd_kafka_message_t *rkmessage): - topic_(topic), rkmessage_(rkmessage), - free_rkmessage_(true), key_(NULL), headers_(NULL), rk_type_(rk_type) {} - - MessageImpl (rd_kafka_type_t rk_type, - RdKafka::Topic *topic, rd_kafka_message_t *rkmessage, - bool dofree): - topic_(topic), rkmessage_(rkmessage), - free_rkmessage_(dofree), key_(NULL), headers_(NULL), rk_type_(rk_type) {} - - MessageImpl (rd_kafka_type_t rk_type, rd_kafka_message_t *rkmessage): - topic_(NULL), rkmessage_(rkmessage), - free_rkmessage_(true), key_(NULL), headers_(NULL), rk_type_(rk_type) { + MessageImpl(rd_kafka_type_t rk_type, + RdKafka::Topic *topic, + rd_kafka_message_t *rkmessage) : + topic_(topic), + rkmessage_(rkmessage), + free_rkmessage_(true), + key_(NULL), + headers_(NULL), + rk_type_(rk_type) { + } + + MessageImpl(rd_kafka_type_t rk_type, + RdKafka::Topic *topic, + rd_kafka_message_t *rkmessage, + bool dofree) : + topic_(topic), + rkmessage_(rkmessage), + free_rkmessage_(dofree), + key_(NULL), + headers_(NULL), + rk_type_(rk_type) { + } + + MessageImpl(rd_kafka_type_t rk_type, rd_kafka_message_t *rkmessage) : + topic_(NULL), + rkmessage_(rkmessage), + free_rkmessage_(true), + key_(NULL), + headers_(NULL), + rk_type_(rk_type) { if (rkmessage->rkt) { /* Possibly NULL */ topic_ = static_cast(rd_kafka_topic_opaque(rkmessage->rkt)); @@ -367,16 +418,20 @@ class MessageImpl : public Message { } /* Create errored message */ - MessageImpl (rd_kafka_type_t rk_type, - RdKafka::Topic *topic, RdKafka::ErrorCode err): - topic_(topic), free_rkmessage_(false), - key_(NULL), headers_(NULL), rk_type_(rk_type) { + MessageImpl(rd_kafka_type_t rk_type, + RdKafka::Topic *topic, + RdKafka::ErrorCode err) : + topic_(topic), + free_rkmessage_(false), + key_(NULL), + headers_(NULL), + rk_type_(rk_type) { rkmessage_ = &rkmessage_err_; memset(&rkmessage_err_, 0, sizeof(rkmessage_err_)); rkmessage_err_.err = static_cast(err); } - std::string errstr() const { + std::string errstr() const { const char *es; /* message_errstr() is only available for the consumer. */ if (rk_type_ == RD_KAFKA_CONSUMER) @@ -387,62 +442,79 @@ class MessageImpl : public Message { return std::string(es ? es : ""); } - ErrorCode err () const { + ErrorCode err() const { return static_cast(rkmessage_->err); } - Topic *topic () const { return topic_; } - std::string topic_name () const { - if (rkmessage_->rkt) - return rd_kafka_topic_name(rkmessage_->rkt); - else - return ""; + Topic *topic() const { + return topic_; } - int32_t partition () const { return rkmessage_->partition; } - void *payload () const { return rkmessage_->payload; } - size_t len () const { return rkmessage_->len; } - const std::string *key () const { + std::string topic_name() const { + if (rkmessage_->rkt) + return rd_kafka_topic_name(rkmessage_->rkt); + else + return ""; + } + int32_t partition() const { + return rkmessage_->partition; + } + void *payload() const { + return rkmessage_->payload; + } + size_t len() const { + return rkmessage_->len; + } + const std::string *key() const { if (key_) { return key_; } else if (rkmessage_->key) { - key_ = new std::string(static_cast(rkmessage_->key), rkmessage_->key_len); + key_ = new std::string(static_cast(rkmessage_->key), + rkmessage_->key_len); return key_; } return NULL; } - const void *key_pointer () const { return rkmessage_->key; } - size_t key_len () const { return rkmessage_->key_len; } + const void *key_pointer() const { + return rkmessage_->key; + } + size_t key_len() const { + return rkmessage_->key_len; + } - int64_t offset () const { return rkmessage_->offset; } + int64_t offset() const { + return rkmessage_->offset; + } - MessageTimestamp timestamp () const { - MessageTimestamp ts; - rd_kafka_timestamp_type_t tstype; - ts.timestamp = rd_kafka_message_timestamp(rkmessage_, &tstype); - ts.type = static_cast(tstype); - return ts; + MessageTimestamp timestamp() const { + MessageTimestamp ts; + rd_kafka_timestamp_type_t tstype; + ts.timestamp = rd_kafka_message_timestamp(rkmessage_, &tstype); + ts.type = static_cast(tstype); + return ts; } - void *msg_opaque () const { return rkmessage_->_private; }; + void *msg_opaque() const { + return rkmessage_->_private; + }; - int64_t latency () const { - return rd_kafka_message_latency(rkmessage_); + int64_t latency() const { + return rd_kafka_message_latency(rkmessage_); } - struct rd_kafka_message_s *c_ptr () { - return rkmessage_; + struct rd_kafka_message_s *c_ptr() { + return rkmessage_; } - Status status () const { - return static_cast(rd_kafka_message_status(rkmessage_)); + Status status() const { + return static_cast(rd_kafka_message_status(rkmessage_)); } - Headers *headers () { + Headers *headers() { ErrorCode err; return headers(&err); } - Headers *headers (ErrorCode *err) { + Headers *headers(ErrorCode *err) { *err = ERR_NO_ERROR; if (!headers_) { @@ -460,7 +532,7 @@ class MessageImpl : public Message { return headers_; } - int32_t broker_id () const { + int32_t broker_id() const { return rd_kafka_message_broker_id(rkmessage_); } @@ -473,10 +545,10 @@ class MessageImpl : public Message { rd_kafka_message_t rkmessage_err_; mutable std::string *key_; /* mutable because it's a cached value */ -private: + private: /* "delete" copy ctor + copy assignment, for safety of key_ */ - MessageImpl(MessageImpl const&) /*= delete*/; - MessageImpl& operator=(MessageImpl const&) /*= delete*/; + MessageImpl(MessageImpl const &) /*= delete*/; + MessageImpl &operator=(MessageImpl const &) /*= delete*/; RdKafka::Headers *headers_; const rd_kafka_type_t rk_type_; /**< Client type */ @@ -485,8 +557,8 @@ class MessageImpl : public Message { class ConfImpl : public Conf { public: - ConfImpl(ConfType conf_type) - :consume_cb_(NULL), + ConfImpl(ConfType conf_type) : + consume_cb_(NULL), dr_cb_(NULL), event_cb_(NULL), socket_cb_(NULL), @@ -499,9 +571,9 @@ class ConfImpl : public Conf { ssl_cert_verify_cb_(NULL), conf_type_(conf_type), rk_conf_(NULL), - rkt_conf_(NULL) - {} - ~ConfImpl () { + rkt_conf_(NULL) { + } + ~ConfImpl() { if (rk_conf_) rd_kafka_conf_destroy(rk_conf_); else if (rkt_conf_) @@ -512,8 +584,9 @@ class ConfImpl : public Conf { const std::string &value, std::string &errstr); - Conf::ConfResult set (const std::string &name, DeliveryReportCb *dr_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + DeliveryReportCb *dr_cb, + std::string &errstr) { if (name != "dr_cb") { errstr = "Invalid value type, expected RdKafka::DeliveryReportCb"; return Conf::CONF_INVALID; @@ -528,11 +601,12 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } - Conf::ConfResult set (const std::string &name, - OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb, + std::string &errstr) { if (name != "oauthbearer_token_refresh_cb") { - errstr = "Invalid value type, expected RdKafka::OAuthBearerTokenRefreshCb"; + errstr = + "Invalid value type, expected RdKafka::OAuthBearerTokenRefreshCb"; return Conf::CONF_INVALID; } @@ -545,8 +619,9 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } - Conf::ConfResult set (const std::string &name, EventCb *event_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + EventCb *event_cb, + std::string &errstr) { if (name != "event_cb") { errstr = "Invalid value type, expected RdKafka::EventCb"; return Conf::CONF_INVALID; @@ -561,8 +636,9 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } - Conf::ConfResult set (const std::string &name, const Conf *topic_conf, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + const Conf *topic_conf, + std::string &errstr) { const ConfImpl *tconf_impl = dynamic_cast(topic_conf); if (name != "default_topic_conf" || !tconf_impl->rkt_conf_) { @@ -575,15 +651,15 @@ class ConfImpl : public Conf { return Conf::CONF_INVALID; } - rd_kafka_conf_set_default_topic_conf(rk_conf_, - rd_kafka_topic_conf_dup(tconf_impl-> - rkt_conf_)); + rd_kafka_conf_set_default_topic_conf( + rk_conf_, rd_kafka_topic_conf_dup(tconf_impl->rkt_conf_)); return Conf::CONF_OK; } - Conf::ConfResult set (const std::string &name, PartitionerCb *partitioner_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + PartitionerCb *partitioner_cb, + std::string &errstr) { if (name != "partitioner_cb") { errstr = "Invalid value type, expected RdKafka::PartitionerCb"; return Conf::CONF_INVALID; @@ -598,9 +674,9 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } - Conf::ConfResult set (const std::string &name, - PartitionerKeyPointerCb *partitioner_kp_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + PartitionerKeyPointerCb *partitioner_kp_cb, + std::string &errstr) { if (name != "partitioner_key_pointer_cb") { errstr = "Invalid value type, expected RdKafka::PartitionerKeyPointerCb"; return Conf::CONF_INVALID; @@ -615,8 +691,9 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } - Conf::ConfResult set (const std::string &name, SocketCb *socket_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + SocketCb *socket_cb, + std::string &errstr) { if (name != "socket_cb") { errstr = "Invalid value type, expected RdKafka::SocketCb"; return Conf::CONF_INVALID; @@ -632,8 +709,9 @@ class ConfImpl : public Conf { } - Conf::ConfResult set (const std::string &name, OpenCb *open_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + OpenCb *open_cb, + std::string &errstr) { if (name != "open_cb") { errstr = "Invalid value type, expected RdKafka::OpenCb"; return Conf::CONF_INVALID; @@ -650,9 +728,9 @@ class ConfImpl : public Conf { - - Conf::ConfResult set (const std::string &name, RebalanceCb *rebalance_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + RebalanceCb *rebalance_cb, + std::string &errstr) { if (name != "rebalance_cb") { errstr = "Invalid value type, expected RdKafka::RebalanceCb"; return Conf::CONF_INVALID; @@ -668,9 +746,9 @@ class ConfImpl : public Conf { } - Conf::ConfResult set (const std::string &name, - OffsetCommitCb *offset_commit_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + OffsetCommitCb *offset_commit_cb, + std::string &errstr) { if (name != "offset_commit_cb") { errstr = "Invalid value type, expected RdKafka::OffsetCommitCb"; return Conf::CONF_INVALID; @@ -686,9 +764,9 @@ class ConfImpl : public Conf { } - Conf::ConfResult set (const std::string &name, - SslCertificateVerifyCb *ssl_cert_verify_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + SslCertificateVerifyCb *ssl_cert_verify_cb, + std::string &errstr) { if (name != "ssl_cert_verify_cb") { errstr = "Invalid value type, expected RdKafka::SslCertificateVerifyCb"; return Conf::CONF_INVALID; @@ -703,8 +781,7 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } - Conf::ConfResult set_engine_callback_data (void *value, - std::string &errstr) { + Conf::ConfResult set_engine_callback_data(void *value, std::string &errstr) { if (!rk_conf_) { errstr = "Requires RdKafka::Conf::CONF_GLOBAL object"; return Conf::CONF_INVALID; @@ -715,10 +792,11 @@ class ConfImpl : public Conf { } - Conf::ConfResult set_ssl_cert (RdKafka::CertificateType cert_type, - RdKafka::CertificateEncoding cert_enc, - const void *buffer, size_t size, - std::string &errstr) { + Conf::ConfResult set_ssl_cert(RdKafka::CertificateType cert_type, + RdKafka::CertificateEncoding cert_enc, + const void *buffer, + size_t size, + std::string &errstr) { rd_kafka_conf_res_t res; char errbuf[512]; @@ -728,10 +806,9 @@ class ConfImpl : public Conf { } res = rd_kafka_conf_set_ssl_cert( - rk_conf_, - static_cast(cert_type), - static_cast(cert_enc), - buffer, size, errbuf, sizeof(errbuf)); + rk_conf_, static_cast(cert_type), + static_cast(cert_enc), buffer, size, errbuf, + sizeof(errbuf)); if (res != RD_KAFKA_CONF_OK) errstr = errbuf; @@ -739,7 +816,7 @@ class ConfImpl : public Conf { return static_cast(res); } - Conf::ConfResult enable_sasl_queue (bool enable, std::string &errstr) { + Conf::ConfResult enable_sasl_queue(bool enable, std::string &errstr) { if (!rk_conf_) { errstr = "Requires RdKafka::Conf::CONF_GLOBAL object"; return Conf::CONF_INVALID; @@ -752,12 +829,10 @@ class ConfImpl : public Conf { Conf::ConfResult get(const std::string &name, std::string &value) const { - if (name.compare("dr_cb") == 0 || - name.compare("event_cb") == 0 || + if (name.compare("dr_cb") == 0 || name.compare("event_cb") == 0 || name.compare("partitioner_cb") == 0 || name.compare("partitioner_key_pointer_cb") == 0 || - name.compare("socket_cb") == 0 || - name.compare("open_cb") == 0 || + name.compare("socket_cb") == 0 || name.compare("open_cb") == 0 || name.compare("rebalance_cb") == 0 || name.compare("offset_commit_cb") == 0 || name.compare("oauthbearer_token_refresh_cb") == 0 || @@ -771,22 +846,18 @@ class ConfImpl : public Conf { /* Get size of property */ size_t size; if (rk_conf_) - res = rd_kafka_conf_get(rk_conf_, - name.c_str(), NULL, &size); + res = rd_kafka_conf_get(rk_conf_, name.c_str(), NULL, &size); else if (rkt_conf_) - res = rd_kafka_topic_conf_get(rkt_conf_, - name.c_str(), NULL, &size); + res = rd_kafka_topic_conf_get(rkt_conf_, name.c_str(), NULL, &size); if (res != RD_KAFKA_CONF_OK) return static_cast(res); char *tmpValue = new char[size]; if (rk_conf_) - res = rd_kafka_conf_get(rk_conf_, name.c_str(), - tmpValue, &size); + res = rd_kafka_conf_get(rk_conf_, name.c_str(), tmpValue, &size); else if (rkt_conf_) - res = rd_kafka_topic_conf_get(rkt_conf_, - name.c_str(), tmpValue, &size); + res = rd_kafka_topic_conf_get(rkt_conf_, name.c_str(), tmpValue, &size); if (res == RD_KAFKA_CONF_OK) value.assign(tmpValue); @@ -796,81 +867,82 @@ class ConfImpl : public Conf { } Conf::ConfResult get(DeliveryReportCb *&dr_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - dr_cb = this->dr_cb_; - return Conf::CONF_OK; + if (!rk_conf_) + return Conf::CONF_INVALID; + dr_cb = this->dr_cb_; + return Conf::CONF_OK; } Conf::ConfResult get( - OAuthBearerTokenRefreshCb *&oauthbearer_token_refresh_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - oauthbearer_token_refresh_cb = this->oauthbearer_token_refresh_cb_; - return Conf::CONF_OK; + OAuthBearerTokenRefreshCb *&oauthbearer_token_refresh_cb) const { + if (!rk_conf_) + return Conf::CONF_INVALID; + oauthbearer_token_refresh_cb = this->oauthbearer_token_refresh_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(EventCb *&event_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - event_cb = this->event_cb_; - return Conf::CONF_OK; + if (!rk_conf_) + return Conf::CONF_INVALID; + event_cb = this->event_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(PartitionerCb *&partitioner_cb) const { - if (!rkt_conf_) - return Conf::CONF_INVALID; - partitioner_cb = this->partitioner_cb_; - return Conf::CONF_OK; + if (!rkt_conf_) + return Conf::CONF_INVALID; + partitioner_cb = this->partitioner_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(PartitionerKeyPointerCb *&partitioner_kp_cb) const { - if (!rkt_conf_) - return Conf::CONF_INVALID; - partitioner_kp_cb = this->partitioner_kp_cb_; - return Conf::CONF_OK; + if (!rkt_conf_) + return Conf::CONF_INVALID; + partitioner_kp_cb = this->partitioner_kp_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(SocketCb *&socket_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - socket_cb = this->socket_cb_; - return Conf::CONF_OK; + if (!rk_conf_) + return Conf::CONF_INVALID; + socket_cb = this->socket_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(OpenCb *&open_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - open_cb = this->open_cb_; - return Conf::CONF_OK; + if (!rk_conf_) + return Conf::CONF_INVALID; + open_cb = this->open_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(RebalanceCb *&rebalance_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - rebalance_cb = this->rebalance_cb_; - return Conf::CONF_OK; + if (!rk_conf_) + return Conf::CONF_INVALID; + rebalance_cb = this->rebalance_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(OffsetCommitCb *&offset_commit_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - offset_commit_cb = this->offset_commit_cb_; - return Conf::CONF_OK; - } + if (!rk_conf_) + return Conf::CONF_INVALID; + offset_commit_cb = this->offset_commit_cb_; + return Conf::CONF_OK; + } Conf::ConfResult get(SslCertificateVerifyCb *&ssl_cert_verify_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - ssl_cert_verify_cb = this->ssl_cert_verify_cb_; - return Conf::CONF_OK; + if (!rk_conf_) + return Conf::CONF_INVALID; + ssl_cert_verify_cb = this->ssl_cert_verify_cb_; + return Conf::CONF_OK; } - std::list *dump (); + std::list *dump(); - Conf::ConfResult set (const std::string &name, ConsumeCb *consume_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + ConsumeCb *consume_cb, + std::string &errstr) { if (name != "consume_cb") { errstr = "Invalid value type, expected RdKafka::ConsumeCb"; return Conf::CONF_INVALID; @@ -885,14 +957,14 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } - struct rd_kafka_conf_s *c_ptr_global () { + struct rd_kafka_conf_s *c_ptr_global() { if (conf_type_ == CONF_GLOBAL) return rk_conf_; else return NULL; } - struct rd_kafka_topic_conf_s *c_ptr_topic () { + struct rd_kafka_topic_conf_s *c_ptr_topic() { if (conf_type_ == CONF_TOPIC) return rkt_conf_; else @@ -919,48 +991,54 @@ class ConfImpl : public Conf { class HandleImpl : virtual public Handle { public: ~HandleImpl() {}; - HandleImpl () {}; - const std::string name () const { return std::string(rd_kafka_name(rk_)); }; - const std::string memberid () const { - char *str = rd_kafka_memberid(rk_); - std::string memberid = str ? str : ""; - if (str) - rd_kafka_mem_free(rk_, str); - return memberid; - } - int poll (int timeout_ms) { return rd_kafka_poll(rk_, timeout_ms); }; - int outq_len () { return rd_kafka_outq_len(rk_); }; - - void set_common_config (const RdKafka::ConfImpl *confimpl); - - RdKafka::ErrorCode metadata (bool all_topics,const Topic *only_rkt, - Metadata **metadatap, int timeout_ms); - - ErrorCode pause (std::vector &partitions); - ErrorCode resume (std::vector &partitions); - - ErrorCode query_watermark_offsets (const std::string &topic, - int32_t partition, - int64_t *low, int64_t *high, - int timeout_ms) { - return static_cast( - rd_kafka_query_watermark_offsets( - rk_, topic.c_str(), partition, - low, high, timeout_ms)); + HandleImpl() {}; + const std::string name() const { + return std::string(rd_kafka_name(rk_)); + }; + const std::string memberid() const { + char *str = rd_kafka_memberid(rk_); + std::string memberid = str ? str : ""; + if (str) + rd_kafka_mem_free(rk_, str); + return memberid; + } + int poll(int timeout_ms) { + return rd_kafka_poll(rk_, timeout_ms); + }; + int outq_len() { + return rd_kafka_outq_len(rk_); + }; + + void set_common_config(const RdKafka::ConfImpl *confimpl); + + RdKafka::ErrorCode metadata(bool all_topics, + const Topic *only_rkt, + Metadata **metadatap, + int timeout_ms); + + ErrorCode pause(std::vector &partitions); + ErrorCode resume(std::vector &partitions); + + ErrorCode query_watermark_offsets(const std::string &topic, + int32_t partition, + int64_t *low, + int64_t *high, + int timeout_ms) { + return static_cast(rd_kafka_query_watermark_offsets( + rk_, topic.c_str(), partition, low, high, timeout_ms)); } - ErrorCode get_watermark_offsets (const std::string &topic, - int32_t partition, - int64_t *low, int64_t *high) { - return static_cast( - rd_kafka_get_watermark_offsets( - rk_, topic.c_str(), partition, - low, high)); + ErrorCode get_watermark_offsets(const std::string &topic, + int32_t partition, + int64_t *low, + int64_t *high) { + return static_cast(rd_kafka_get_watermark_offsets( + rk_, topic.c_str(), partition, low, high)); } - Queue *get_partition_queue (const TopicPartition *partition); + Queue *get_partition_queue(const TopicPartition *partition); - Queue *get_sasl_queue () { + Queue *get_sasl_queue() { rd_kafka_queue_t *rkqu; rkqu = rd_kafka_queue_get_sasl(rk_); @@ -970,7 +1048,7 @@ class HandleImpl : virtual public Handle { return new QueueImpl(rkqu); } - Queue *get_background_queue () { + Queue *get_background_queue() { rd_kafka_queue_t *rkqu; rkqu = rd_kafka_queue_get_background(rk_); @@ -981,84 +1059,78 @@ class HandleImpl : virtual public Handle { } - ErrorCode offsetsForTimes (std::vector &offsets, - int timeout_ms) { + ErrorCode offsetsForTimes(std::vector &offsets, + int timeout_ms) { rd_kafka_topic_partition_list_t *c_offsets = partitions_to_c_parts(offsets); - ErrorCode err = static_cast( + ErrorCode err = static_cast( rd_kafka_offsets_for_times(rk_, c_offsets, timeout_ms)); update_partitions_from_c_parts(offsets, c_offsets); rd_kafka_topic_partition_list_destroy(c_offsets); return err; } - ErrorCode set_log_queue (Queue *queue); + ErrorCode set_log_queue(Queue *queue); - void yield () { + void yield() { rd_kafka_yield(rk_); } - const std::string clusterid (int timeout_ms) { - char *str = rd_kafka_clusterid(rk_, timeout_ms); - std::string clusterid = str ? str : ""; - if (str) - rd_kafka_mem_free(rk_, str); - return clusterid; + const std::string clusterid(int timeout_ms) { + char *str = rd_kafka_clusterid(rk_, timeout_ms); + std::string clusterid = str ? str : ""; + if (str) + rd_kafka_mem_free(rk_, str); + return clusterid; } - struct rd_kafka_s *c_ptr () { - return rk_; + struct rd_kafka_s *c_ptr() { + return rk_; } - int32_t controllerid (int timeout_ms) { - return rd_kafka_controllerid(rk_, timeout_ms); + int32_t controllerid(int timeout_ms) { + return rd_kafka_controllerid(rk_, timeout_ms); } - ErrorCode fatal_error (std::string &errstr) const { - char errbuf[512]; - RdKafka::ErrorCode err = - static_cast( - rd_kafka_fatal_error(rk_, errbuf, sizeof(errbuf))); - if (err) - errstr = errbuf; - return err; + ErrorCode fatal_error(std::string &errstr) const { + char errbuf[512]; + RdKafka::ErrorCode err = static_cast( + rd_kafka_fatal_error(rk_, errbuf, sizeof(errbuf))); + if (err) + errstr = errbuf; + return err; } - ErrorCode oauthbearer_set_token (const std::string &token_value, - int64_t md_lifetime_ms, - const std::string &md_principal_name, - const std::list &extensions, - std::string &errstr) { - char errbuf[512]; - ErrorCode err; - const char **extensions_copy = new const char *[extensions.size()]; - int elem = 0; - - for (std::list::const_iterator it = extensions.begin(); - it != extensions.end(); it++) - extensions_copy[elem++] = it->c_str(); - err = static_cast(rd_kafka_oauthbearer_set_token( - rk_, token_value.c_str(), - md_lifetime_ms, - md_principal_name.c_str(), - extensions_copy, - extensions.size(), - errbuf, sizeof(errbuf))); - delete[] extensions_copy; - - if (err != ERR_NO_ERROR) - errstr = errbuf; + ErrorCode oauthbearer_set_token(const std::string &token_value, + int64_t md_lifetime_ms, + const std::string &md_principal_name, + const std::list &extensions, + std::string &errstr) { + char errbuf[512]; + ErrorCode err; + const char **extensions_copy = new const char *[extensions.size()]; + int elem = 0; + + for (std::list::const_iterator it = extensions.begin(); + it != extensions.end(); it++) + extensions_copy[elem++] = it->c_str(); + err = static_cast(rd_kafka_oauthbearer_set_token( + rk_, token_value.c_str(), md_lifetime_ms, md_principal_name.c_str(), + extensions_copy, extensions.size(), errbuf, sizeof(errbuf))); + delete[] extensions_copy; + + if (err != ERR_NO_ERROR) + errstr = errbuf; - return err; + return err; } ErrorCode oauthbearer_set_token_failure(const std::string &errstr) { - return static_cast(rd_kafka_oauthbearer_set_token_failure( - rk_, errstr.c_str())); + return static_cast( + rd_kafka_oauthbearer_set_token_failure(rk_, errstr.c_str())); }; - Error *sasl_background_callbacks_enable () { - rd_kafka_error_t *c_error = - rd_kafka_sasl_background_callbacks_enable(rk_); + Error *sasl_background_callbacks_enable() { + rd_kafka_error_t *c_error = rd_kafka_sasl_background_callbacks_enable(rk_); if (c_error) return new ErrorImpl(c_error); @@ -1066,11 +1138,11 @@ class HandleImpl : virtual public Handle { return NULL; } - void *mem_malloc (size_t size) { + void *mem_malloc(size_t size) { return rd_kafka_mem_malloc(rk_, size); }; - void mem_free (void *ptr) { + void mem_free(void *ptr) { rd_kafka_mem_free(rk_, ptr); }; @@ -1095,28 +1167,27 @@ class HandleImpl : virtual public Handle { class TopicImpl : public Topic { public: - ~TopicImpl () { + ~TopicImpl() { rd_kafka_topic_destroy(rkt_); } - const std::string name () const { + const std::string name() const { return rd_kafka_topic_name(rkt_); } - bool partition_available (int32_t partition) const { + bool partition_available(int32_t partition) const { return !!rd_kafka_topic_partition_available(rkt_, partition); } - ErrorCode offset_store (int32_t partition, int64_t offset) { + ErrorCode offset_store(int32_t partition, int64_t offset) { return static_cast( rd_kafka_offset_store(rkt_, partition, offset)); } - static Topic *create (Handle &base, const std::string &topic, - Conf *conf); + static Topic *create(Handle &base, const std::string &topic, Conf *conf); - struct rd_kafka_topic_s *c_ptr () { - return rkt_; + struct rd_kafka_topic_s *c_ptr() { + return rkt_; } rd_kafka_topic_t *rkt_; @@ -1129,39 +1200,55 @@ class TopicImpl : public Topic { * Topic and Partition */ class TopicPartitionImpl : public TopicPartition { -public: + public: ~TopicPartitionImpl() {}; - static TopicPartition *create (const std::string &topic, int partition); + static TopicPartition *create(const std::string &topic, int partition); - TopicPartitionImpl (const std::string &topic, int partition): - topic_(topic), partition_(partition), offset_(RdKafka::Topic::OFFSET_INVALID), - err_(ERR_NO_ERROR) {} + TopicPartitionImpl(const std::string &topic, int partition) : + topic_(topic), + partition_(partition), + offset_(RdKafka::Topic::OFFSET_INVALID), + err_(ERR_NO_ERROR) { + } - TopicPartitionImpl (const std::string &topic, int partition, int64_t offset): - topic_(topic), partition_(partition), offset_(offset), - err_(ERR_NO_ERROR) {} + TopicPartitionImpl(const std::string &topic, int partition, int64_t offset) : + topic_(topic), + partition_(partition), + offset_(offset), + err_(ERR_NO_ERROR) { + } - TopicPartitionImpl (const rd_kafka_topic_partition_t *c_part) { - topic_ = std::string(c_part->topic); + TopicPartitionImpl(const rd_kafka_topic_partition_t *c_part) { + topic_ = std::string(c_part->topic); partition_ = c_part->partition; - offset_ = c_part->offset; - err_ = static_cast(c_part->err); + offset_ = c_part->offset; + err_ = static_cast(c_part->err); // FIXME: metadata } - static void destroy (std::vector &partitions); + static void destroy(std::vector &partitions); - int partition () const { return partition_; } - const std::string &topic () const { return topic_ ; } + int partition() const { + return partition_; + } + const std::string &topic() const { + return topic_; + } - int64_t offset () const { return offset_; } + int64_t offset() const { + return offset_; + } - ErrorCode err () const { return err_; } + ErrorCode err() const { + return err_; + } - void set_offset (int64_t offset) { offset_ = offset; } + void set_offset(int64_t offset) { + offset_ = offset; + } - std::ostream& operator<<(std::ostream &ostrm) const { + std::ostream &operator<<(std::ostream &ostrm) const { return ostrm << topic_ << " [" << partition_ << "]"; } @@ -1182,97 +1269,93 @@ class ConsumerGroupMetadataImpl : public ConsumerGroupMetadata { rd_kafka_consumer_group_metadata_destroy(cgmetadata_); } - ConsumerGroupMetadataImpl(rd_kafka_consumer_group_metadata_t *cgmetadata): - cgmetadata_(cgmetadata) {} + ConsumerGroupMetadataImpl(rd_kafka_consumer_group_metadata_t *cgmetadata) : + cgmetadata_(cgmetadata) { + } rd_kafka_consumer_group_metadata_t *cgmetadata_; }; -class KafkaConsumerImpl : virtual public KafkaConsumer, virtual public HandleImpl { -public: - ~KafkaConsumerImpl () { +class KafkaConsumerImpl : virtual public KafkaConsumer, + virtual public HandleImpl { + public: + ~KafkaConsumerImpl() { if (rk_) rd_kafka_destroy_flags(rk_, RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE); } - static KafkaConsumer *create (Conf *conf, std::string &errstr); + static KafkaConsumer *create(Conf *conf, std::string &errstr); - ErrorCode assignment (std::vector &partitions); - bool assignment_lost (); - std::string rebalance_protocol () { + ErrorCode assignment(std::vector &partitions); + bool assignment_lost(); + std::string rebalance_protocol() { const char *str = rd_kafka_rebalance_protocol(rk_); return std::string(str ? str : ""); } - ErrorCode subscription (std::vector &topics); - ErrorCode subscribe (const std::vector &topics); - ErrorCode unsubscribe (); - ErrorCode assign (const std::vector &partitions); - ErrorCode unassign (); - Error *incremental_assign (const std::vector &partitions); - Error *incremental_unassign (const std::vector &partitions); - - Message *consume (int timeout_ms); - ErrorCode commitSync () { - return static_cast(rd_kafka_commit(rk_, NULL, 0/*sync*/)); - } - ErrorCode commitAsync () { - return static_cast(rd_kafka_commit(rk_, NULL, 1/*async*/)); - } - ErrorCode commitSync (Message *message) { - MessageImpl *msgimpl = dynamic_cast(message); - return static_cast( - rd_kafka_commit_message(rk_, msgimpl->rkmessage_, 0/*sync*/)); - } - ErrorCode commitAsync (Message *message) { - MessageImpl *msgimpl = dynamic_cast(message); - return static_cast( - rd_kafka_commit_message(rk_, msgimpl->rkmessage_,1/*async*/)); - } - - ErrorCode commitSync (std::vector &offsets) { - rd_kafka_topic_partition_list_t *c_parts = - partitions_to_c_parts(offsets); - rd_kafka_resp_err_t err = - rd_kafka_commit(rk_, c_parts, 0); - if (!err) - update_partitions_from_c_parts(offsets, c_parts); - rd_kafka_topic_partition_list_destroy(c_parts); - return static_cast(err); - } - - ErrorCode commitAsync (const std::vector &offsets) { - rd_kafka_topic_partition_list_t *c_parts = - partitions_to_c_parts(offsets); - rd_kafka_resp_err_t err = - rd_kafka_commit(rk_, c_parts, 1); - rd_kafka_topic_partition_list_destroy(c_parts); - return static_cast(err); - } - - ErrorCode commitSync (OffsetCommitCb *offset_commit_cb) { - return static_cast( - rd_kafka_commit_queue(rk_, NULL, NULL, - RdKafka::offset_commit_cb_trampoline0, - offset_commit_cb)); - } - - ErrorCode commitSync (std::vector &offsets, - OffsetCommitCb *offset_commit_cb) { - rd_kafka_topic_partition_list_t *c_parts = - partitions_to_c_parts(offsets); - rd_kafka_resp_err_t err = - rd_kafka_commit_queue(rk_, c_parts, NULL, - RdKafka::offset_commit_cb_trampoline0, - offset_commit_cb); - rd_kafka_topic_partition_list_destroy(c_parts); - return static_cast(err); - } - - ErrorCode committed (std::vector &partitions, int timeout_ms); - ErrorCode position (std::vector &partitions); - - ConsumerGroupMetadata *groupMetadata () { + ErrorCode subscription(std::vector &topics); + ErrorCode subscribe(const std::vector &topics); + ErrorCode unsubscribe(); + ErrorCode assign(const std::vector &partitions); + ErrorCode unassign(); + Error *incremental_assign(const std::vector &partitions); + Error *incremental_unassign(const std::vector &partitions); + + Message *consume(int timeout_ms); + ErrorCode commitSync() { + return static_cast(rd_kafka_commit(rk_, NULL, 0 /*sync*/)); + } + ErrorCode commitAsync() { + return static_cast(rd_kafka_commit(rk_, NULL, 1 /*async*/)); + } + ErrorCode commitSync(Message *message) { + MessageImpl *msgimpl = dynamic_cast(message); + return static_cast( + rd_kafka_commit_message(rk_, msgimpl->rkmessage_, 0 /*sync*/)); + } + ErrorCode commitAsync(Message *message) { + MessageImpl *msgimpl = dynamic_cast(message); + return static_cast( + rd_kafka_commit_message(rk_, msgimpl->rkmessage_, 1 /*async*/)); + } + + ErrorCode commitSync(std::vector &offsets) { + rd_kafka_topic_partition_list_t *c_parts = partitions_to_c_parts(offsets); + rd_kafka_resp_err_t err = rd_kafka_commit(rk_, c_parts, 0); + if (!err) + update_partitions_from_c_parts(offsets, c_parts); + rd_kafka_topic_partition_list_destroy(c_parts); + return static_cast(err); + } + + ErrorCode commitAsync(const std::vector &offsets) { + rd_kafka_topic_partition_list_t *c_parts = partitions_to_c_parts(offsets); + rd_kafka_resp_err_t err = rd_kafka_commit(rk_, c_parts, 1); + rd_kafka_topic_partition_list_destroy(c_parts); + return static_cast(err); + } + + ErrorCode commitSync(OffsetCommitCb *offset_commit_cb) { + return static_cast(rd_kafka_commit_queue( + rk_, NULL, NULL, RdKafka::offset_commit_cb_trampoline0, + offset_commit_cb)); + } + + ErrorCode commitSync(std::vector &offsets, + OffsetCommitCb *offset_commit_cb) { + rd_kafka_topic_partition_list_t *c_parts = partitions_to_c_parts(offsets); + rd_kafka_resp_err_t err = rd_kafka_commit_queue( + rk_, c_parts, NULL, RdKafka::offset_commit_cb_trampoline0, + offset_commit_cb); + rd_kafka_topic_partition_list_destroy(c_parts); + return static_cast(err); + } + + ErrorCode committed(std::vector &partitions, + int timeout_ms); + ErrorCode position(std::vector &partitions); + + ConsumerGroupMetadata *groupMetadata() { rd_kafka_consumer_group_metadata_t *cgmetadata; cgmetadata = rd_kafka_consumer_group_metadata(rk_); @@ -1282,20 +1365,17 @@ class KafkaConsumerImpl : virtual public KafkaConsumer, virtual public HandleImp return new ConsumerGroupMetadataImpl(cgmetadata); } - ErrorCode close (); + ErrorCode close(); - ErrorCode seek (const TopicPartition &partition, int timeout_ms); + ErrorCode seek(const TopicPartition &partition, int timeout_ms); - ErrorCode offsets_store (std::vector &offsets) { - rd_kafka_topic_partition_list_t *c_parts = - partitions_to_c_parts(offsets); - rd_kafka_resp_err_t err = - rd_kafka_offsets_store(rk_, c_parts); - update_partitions_from_c_parts(offsets, c_parts); - rd_kafka_topic_partition_list_destroy(c_parts); - return static_cast(err); + ErrorCode offsets_store(std::vector &offsets) { + rd_kafka_topic_partition_list_t *c_parts = partitions_to_c_parts(offsets); + rd_kafka_resp_err_t err = rd_kafka_offsets_store(rk_, c_parts); + update_partitions_from_c_parts(offsets, c_parts); + rd_kafka_topic_partition_list_destroy(c_parts); + return static_cast(err); } - }; @@ -1308,7 +1388,7 @@ class MetadataImpl : public Metadata { return &brokers_; } - const std::vector *topics() const { + const std::vector *topics() const { return &topics_; } @@ -1320,7 +1400,7 @@ class MetadataImpl : public Metadata { return metadata_->orig_broker_id; } -private: + private: const rd_kafka_metadata_t *metadata_; std::vector brokers_; std::vector topics_; @@ -1329,81 +1409,100 @@ class MetadataImpl : public Metadata { - class ConsumerImpl : virtual public Consumer, virtual public HandleImpl { public: - ~ConsumerImpl () { + ~ConsumerImpl() { if (rk_) rd_kafka_destroy(rk_); }; - static Consumer *create (Conf *conf, std::string &errstr); - - ErrorCode start (Topic *topic, int32_t partition, int64_t offset); - ErrorCode start (Topic *topic, int32_t partition, int64_t offset, - Queue *queue); - ErrorCode stop (Topic *topic, int32_t partition); - ErrorCode seek (Topic *topic, int32_t partition, int64_t offset, - int timeout_ms); - Message *consume (Topic *topic, int32_t partition, int timeout_ms); - Message *consume (Queue *queue, int timeout_ms); - int consume_callback (Topic *topic, int32_t partition, int timeout_ms, - ConsumeCb *cb, void *opaque); - int consume_callback (Queue *queue, int timeout_ms, - RdKafka::ConsumeCb *consume_cb, void *opaque); + static Consumer *create(Conf *conf, std::string &errstr); + + ErrorCode start(Topic *topic, int32_t partition, int64_t offset); + ErrorCode start(Topic *topic, + int32_t partition, + int64_t offset, + Queue *queue); + ErrorCode stop(Topic *topic, int32_t partition); + ErrorCode seek(Topic *topic, + int32_t partition, + int64_t offset, + int timeout_ms); + Message *consume(Topic *topic, int32_t partition, int timeout_ms); + Message *consume(Queue *queue, int timeout_ms); + int consume_callback(Topic *topic, + int32_t partition, + int timeout_ms, + ConsumeCb *cb, + void *opaque); + int consume_callback(Queue *queue, + int timeout_ms, + RdKafka::ConsumeCb *consume_cb, + void *opaque); }; class ProducerImpl : virtual public Producer, virtual public HandleImpl { - public: - ~ProducerImpl () { + ~ProducerImpl() { if (rk_) rd_kafka_destroy(rk_); }; - ErrorCode produce (Topic *topic, int32_t partition, - int msgflags, - void *payload, size_t len, - const std::string *key, - void *msg_opaque); - - ErrorCode produce (Topic *topic, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - void *msg_opaque); - - ErrorCode produce (Topic *topic, int32_t partition, - const std::vector *payload, - const std::vector *key, - void *msg_opaque); - - ErrorCode produce (const std::string topic_name, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, void *msg_opaque); - - ErrorCode produce (const std::string topic_name, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, - RdKafka::Headers *headers, - void *msg_opaque); - - ErrorCode flush (int timeout_ms) { - return static_cast(rd_kafka_flush(rk_, - timeout_ms)); - } - - ErrorCode purge (int purge_flags) { - return static_cast(rd_kafka_purge(rk_, - (int)purge_flags)); + ErrorCode produce(Topic *topic, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const std::string *key, + void *msg_opaque); + + ErrorCode produce(Topic *topic, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + void *msg_opaque); + + ErrorCode produce(Topic *topic, + int32_t partition, + const std::vector *payload, + const std::vector *key, + void *msg_opaque); + + ErrorCode produce(const std::string topic_name, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + int64_t timestamp, + void *msg_opaque); + + ErrorCode produce(const std::string topic_name, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + int64_t timestamp, + RdKafka::Headers *headers, + void *msg_opaque); + + ErrorCode flush(int timeout_ms) { + return static_cast(rd_kafka_flush(rk_, timeout_ms)); + } + + ErrorCode purge(int purge_flags) { + return static_cast( + rd_kafka_purge(rk_, (int)purge_flags)); } - Error *init_transactions (int timeout_ms) { + Error *init_transactions(int timeout_ms) { rd_kafka_error_t *c_error; c_error = rd_kafka_init_transactions(rk_, timeout_ms); @@ -1414,7 +1513,7 @@ class ProducerImpl : virtual public Producer, virtual public HandleImpl { return NULL; } - Error *begin_transaction () { + Error *begin_transaction() { rd_kafka_error_t *c_error; c_error = rd_kafka_begin_transaction(rk_); @@ -1425,18 +1524,18 @@ class ProducerImpl : virtual public Producer, virtual public HandleImpl { return NULL; } - Error *send_offsets_to_transaction ( - const std::vector &offsets, + Error *send_offsets_to_transaction( + const std::vector &offsets, const ConsumerGroupMetadata *group_metadata, int timeout_ms) { rd_kafka_error_t *c_error; const RdKafka::ConsumerGroupMetadataImpl *cgmdimpl = - dynamic_cast(group_metadata); + dynamic_cast( + group_metadata); rd_kafka_topic_partition_list_t *c_offsets = partitions_to_c_parts(offsets); - c_error = rd_kafka_send_offsets_to_transaction(rk_, c_offsets, - cgmdimpl->cgmetadata_, - timeout_ms); + c_error = rd_kafka_send_offsets_to_transaction( + rk_, c_offsets, cgmdimpl->cgmetadata_, timeout_ms); rd_kafka_topic_partition_list_destroy(c_offsets); @@ -1446,7 +1545,7 @@ class ProducerImpl : virtual public Producer, virtual public HandleImpl { return NULL; } - Error *commit_transaction (int timeout_ms) { + Error *commit_transaction(int timeout_ms) { rd_kafka_error_t *c_error; c_error = rd_kafka_commit_transaction(rk_, timeout_ms); @@ -1457,7 +1556,7 @@ class ProducerImpl : virtual public Producer, virtual public HandleImpl { return NULL; } - Error *abort_transaction (int timeout_ms) { + Error *abort_transaction(int timeout_ms) { rd_kafka_error_t *c_error; c_error = rd_kafka_abort_transaction(rk_, timeout_ms); @@ -1468,12 +1567,11 @@ class ProducerImpl : virtual public Producer, virtual public HandleImpl { return NULL; } - static Producer *create (Conf *conf, std::string &errstr); - + static Producer *create(Conf *conf, std::string &errstr); }; -} +} // namespace RdKafka #endif /* _RDKAFKACPP_INT_H_ */ diff --git a/src/cJSON.c b/src/cJSON.c index 4c6a308eec..9aec18469c 100644 --- a/src/cJSON.c +++ b/src/cJSON.c @@ -32,9 +32,9 @@ #pragma GCC visibility push(default) #endif #if defined(_MSC_VER) -#pragma warning (push) +#pragma warning(push) /* disable warning about single line comments in system headers */ -#pragma warning (disable : 4001) +#pragma warning(disable : 4001) #endif #include @@ -50,7 +50,7 @@ #endif #if defined(_MSC_VER) -#pragma warning (pop) +#pragma warning(pop) #endif #ifdef __GNUC__ #pragma GCC visibility pop @@ -69,7 +69,8 @@ #endif #define false ((cJSON_bool)0) -/* define isnan and isinf for ANSI C, if in C99 or above, isnan and isinf has been defined in math.h */ +/* define isnan and isinf for ANSI C, if in C99 or above, isnan and isinf has + * been defined in math.h */ #ifndef isinf #define isinf(d) (isnan((d - d)) && !isnan(d)) #endif @@ -78,2897 +79,2646 @@ #endif #ifndef NAN -#define NAN 0.0/0.0 +#define NAN 0.0 / 0.0 #endif typedef struct { - const unsigned char *json; - size_t position; + const unsigned char *json; + size_t position; } error; -static error global_error = { NULL, 0 }; +static error global_error = {NULL, 0}; -CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) -{ - return (const char*) (global_error.json + global_error.position); +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) { + return (const char *)(global_error.json + global_error.position); } -CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item) -{ - if (!cJSON_IsString(item)) - { - return NULL; - } +CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON *const item) { + if (!cJSON_IsString(item)) { + return NULL; + } - return item->valuestring; + return item->valuestring; } -CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item) -{ - if (!cJSON_IsNumber(item)) - { - return (double) NAN; - } +CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON *const item) { + if (!cJSON_IsNumber(item)) { + return (double)NAN; + } - return item->valuedouble; + return item->valuedouble; } -/* This is a safeguard to prevent copy-pasters from using incompatible C and header files */ -#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || (CJSON_VERSION_PATCH != 14) - #error cJSON.h and cJSON.c have different versions. Make sure that both have the same. +/* This is a safeguard to prevent copy-pasters from using incompatible C and + * header files */ +#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || \ + (CJSON_VERSION_PATCH != 14) +#error cJSON.h and cJSON.c have different versions. Make sure that both have the same. #endif -CJSON_PUBLIC(const char*) cJSON_Version(void) -{ - static char version[15]; - sprintf(version, "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH); +CJSON_PUBLIC(const char *) cJSON_Version(void) { + static char version[15]; + sprintf(version, "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, + CJSON_VERSION_PATCH); - return version; + return version; } -/* Case insensitive string comparison, doesn't consider two NULL pointers equal though */ -static int case_insensitive_strcmp(const unsigned char *string1, const unsigned char *string2) -{ - if ((string1 == NULL) || (string2 == NULL)) - { - return 1; - } +/* Case insensitive string comparison, doesn't consider two NULL pointers equal + * though */ +static int case_insensitive_strcmp(const unsigned char *string1, + const unsigned char *string2) { + if ((string1 == NULL) || (string2 == NULL)) { + return 1; + } - if (string1 == string2) - { - return 0; - } + if (string1 == string2) { + return 0; + } - for(; tolower(*string1) == tolower(*string2); (void)string1++, string2++) - { - if (*string1 == '\0') - { - return 0; + for (; tolower(*string1) == tolower(*string2); + (void)string1++, string2++) { + if (*string1 == '\0') { + return 0; + } } - } - return tolower(*string1) - tolower(*string2); + return tolower(*string1) - tolower(*string2); } -typedef struct internal_hooks -{ - void *(CJSON_CDECL *allocate)(size_t size); - void (CJSON_CDECL *deallocate)(void *pointer); - void *(CJSON_CDECL *reallocate)(void *pointer, size_t size); +typedef struct internal_hooks { + void *(CJSON_CDECL *allocate)(size_t size); + void(CJSON_CDECL *deallocate)(void *pointer); + void *(CJSON_CDECL *reallocate)(void *pointer, size_t size); } internal_hooks; #if defined(_MSC_VER) -/* work around MSVC error C2322: '...' address of dllimport '...' is not static */ -static void * CJSON_CDECL internal_malloc(size_t size) -{ - return malloc(size); +/* work around MSVC error C2322: '...' address of dllimport '...' is not static + */ +static void *CJSON_CDECL internal_malloc(size_t size) { + return malloc(size); } -static void CJSON_CDECL internal_free(void *pointer) -{ - free(pointer); +static void CJSON_CDECL internal_free(void *pointer) { + free(pointer); } -static void * CJSON_CDECL internal_realloc(void *pointer, size_t size) -{ - return realloc(pointer, size); +static void *CJSON_CDECL internal_realloc(void *pointer, size_t size) { + return realloc(pointer, size); } #else -#define internal_malloc malloc -#define internal_free free +#define internal_malloc malloc +#define internal_free free #define internal_realloc realloc #endif /* strlen of character literals resolved at compile time */ #define static_strlen(string_literal) (sizeof(string_literal) - sizeof("")) -static internal_hooks global_hooks = { internal_malloc, internal_free, internal_realloc }; +static internal_hooks global_hooks = {internal_malloc, internal_free, + internal_realloc}; -static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks) -{ - size_t length = 0; - unsigned char *copy = NULL; +static unsigned char *cJSON_strdup(const unsigned char *string, + const internal_hooks *const hooks) { + size_t length = 0; + unsigned char *copy = NULL; - if (string == NULL) - { - return NULL; - } + if (string == NULL) { + return NULL; + } - length = strlen((const char*)string) + sizeof(""); - copy = (unsigned char*)hooks->allocate(length); - if (copy == NULL) - { - return NULL; - } - memcpy(copy, string, length); + length = strlen((const char *)string) + sizeof(""); + copy = (unsigned char *)hooks->allocate(length); + if (copy == NULL) { + return NULL; + } + memcpy(copy, string, length); - return copy; + return copy; } -CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks) -{ - if (hooks == NULL) - { - /* Reset hooks */ +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks *hooks) { + if (hooks == NULL) { + /* Reset hooks */ + global_hooks.allocate = malloc; + global_hooks.deallocate = free; + global_hooks.reallocate = realloc; + return; + } + global_hooks.allocate = malloc; + if (hooks->malloc_fn != NULL) { + global_hooks.allocate = hooks->malloc_fn; + } + global_hooks.deallocate = free; - global_hooks.reallocate = realloc; - return; - } - - global_hooks.allocate = malloc; - if (hooks->malloc_fn != NULL) - { - global_hooks.allocate = hooks->malloc_fn; - } - - global_hooks.deallocate = free; - if (hooks->free_fn != NULL) - { - global_hooks.deallocate = hooks->free_fn; - } - - /* use realloc only if both free and malloc are used */ - global_hooks.reallocate = NULL; - if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free)) - { - global_hooks.reallocate = realloc; - } + if (hooks->free_fn != NULL) { + global_hooks.deallocate = hooks->free_fn; + } + + /* use realloc only if both free and malloc are used */ + global_hooks.reallocate = NULL; + if ((global_hooks.allocate == malloc) && + (global_hooks.deallocate == free)) { + global_hooks.reallocate = realloc; + } } /* Internal constructor. */ -static cJSON *cJSON_New_Item(const internal_hooks * const hooks) -{ - cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON)); - if (node) - { - memset(node, '\0', sizeof(cJSON)); - } +static cJSON *cJSON_New_Item(const internal_hooks *const hooks) { + cJSON *node = (cJSON *)hooks->allocate(sizeof(cJSON)); + if (node) { + memset(node, '\0', sizeof(cJSON)); + } - return node; + return node; } /* Delete a cJSON structure. */ -CJSON_PUBLIC(void) cJSON_Delete(cJSON *item) -{ - cJSON *next = NULL; - while (item != NULL) - { - next = item->next; - if (!(item->type & cJSON_IsReference) && (item->child != NULL)) - { - cJSON_Delete(item->child); - } - if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL)) - { - global_hooks.deallocate(item->valuestring); - } - if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) - { - global_hooks.deallocate(item->string); +CJSON_PUBLIC(void) cJSON_Delete(cJSON *item) { + cJSON *next = NULL; + while (item != NULL) { + next = item->next; + if (!(item->type & cJSON_IsReference) && + (item->child != NULL)) { + cJSON_Delete(item->child); + } + if (!(item->type & cJSON_IsReference) && + (item->valuestring != NULL)) { + global_hooks.deallocate(item->valuestring); + } + if (!(item->type & cJSON_StringIsConst) && + (item->string != NULL)) { + global_hooks.deallocate(item->string); + } + global_hooks.deallocate(item); + item = next; } - global_hooks.deallocate(item); - item = next; - } } /* get the decimal point character of the current locale */ -static unsigned char get_decimal_point(void) -{ +static unsigned char get_decimal_point(void) { #ifdef ENABLE_LOCALES - struct lconv *lconv = localeconv(); - return (unsigned char) lconv->decimal_point[0]; + struct lconv *lconv = localeconv(); + return (unsigned char)lconv->decimal_point[0]; #else - return '.'; + return '.'; #endif } -typedef struct -{ - const unsigned char *content; - size_t length; - size_t offset; - size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */ - internal_hooks hooks; +typedef struct { + const unsigned char *content; + size_t length; + size_t offset; + size_t depth; /* How deeply nested (in arrays/objects) is the input at + the current offset. */ + internal_hooks hooks; } parse_buffer; -/* check if the given size is left to read in a given parse buffer (starting with 1) */ -#define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) +/* check if the given size is left to read in a given parse buffer (starting + * with 1) */ +#define can_read(buffer, size) \ + ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) /* check if the buffer can be accessed at the given index (starting with 0) */ -#define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) -#define cannot_access_at_index(buffer, index) (!can_access_at_index(buffer, index)) +#define can_access_at_index(buffer, index) \ + ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) +#define cannot_access_at_index(buffer, index) \ + (!can_access_at_index(buffer, index)) /* get a pointer to the buffer at the position */ #define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset) -/* Parse the input text to generate a number, and populate the result into item. */ -static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer) -{ - double number = 0; - unsigned char *after_end = NULL; - unsigned char number_c_string[64]; - unsigned char decimal_point = get_decimal_point(); - size_t i = 0; - - if ((input_buffer == NULL) || (input_buffer->content == NULL)) - { - return false; - } - - /* copy the number into a temporary buffer and replace '.' with the decimal point - * of the current locale (for strtod) - * This also takes care of '\0' not necessarily being available for marking the end of the input */ - for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++) - { - switch (buffer_at_offset(input_buffer)[i]) - { - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - case '+': - case '-': - case 'e': - case 'E': - number_c_string[i] = buffer_at_offset(input_buffer)[i]; - break; +/* Parse the input text to generate a number, and populate the result into item. + */ +static cJSON_bool parse_number(cJSON *const item, + parse_buffer *const input_buffer) { + double number = 0; + unsigned char *after_end = NULL; + unsigned char number_c_string[64]; + unsigned char decimal_point = get_decimal_point(); + size_t i = 0; + + if ((input_buffer == NULL) || (input_buffer->content == NULL)) { + return false; + } - case '.': - number_c_string[i] = decimal_point; - break; + /* copy the number into a temporary buffer and replace '.' with the + * decimal point of the current locale (for strtod) + * This also takes care of '\0' not necessarily being available for + * marking the end of the input */ + for (i = 0; (i < (sizeof(number_c_string) - 1)) && + can_access_at_index(input_buffer, i); + i++) { + switch (buffer_at_offset(input_buffer)[i]) { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '+': + case '-': + case 'e': + case 'E': + number_c_string[i] = buffer_at_offset(input_buffer)[i]; + break; + + case '.': + number_c_string[i] = decimal_point; + break; - default: - goto loop_end; + default: + goto loop_end; + } } - } loop_end: - number_c_string[i] = '\0'; - - number = strtod((const char*)number_c_string, (char**)&after_end); - if (number_c_string == after_end) - { - return false; /* parse_error */ - } - - item->valuedouble = number; - - /* use saturation in case of overflow */ - if (number >= INT_MAX) - { - item->valueint = INT_MAX; - } - else if (number <= (double)INT_MIN) - { - item->valueint = INT_MIN; - } - else - { - item->valueint = (int)number; - } - - item->type = cJSON_Number; - - input_buffer->offset += (size_t)(after_end - number_c_string); - return true; -} - -/* don't ask me, but the original cJSON_SetNumberValue returns an integer or double */ -CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) -{ - if (number >= INT_MAX) - { - object->valueint = INT_MAX; - } - else if (number <= (double)INT_MIN) - { - object->valueint = INT_MIN; - } - else - { - object->valueint = (int)number; - } - - return object->valuedouble = number; -} - -CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring) -{ - char *copy = NULL; - /* if object's type is not cJSON_String or is cJSON_IsReference, it should not set valuestring */ - if (!(object->type & cJSON_String) || (object->type & cJSON_IsReference)) - { - return NULL; - } - if (strlen(valuestring) <= strlen(object->valuestring)) - { - strcpy(object->valuestring, valuestring); - return object->valuestring; - } - copy = (char*) cJSON_strdup((const unsigned char*)valuestring, &global_hooks); - if (copy == NULL) - { - return NULL; - } - if (object->valuestring != NULL) - { - cJSON_free(object->valuestring); - } - object->valuestring = copy; - - return copy; -} - -typedef struct -{ - unsigned char *buffer; - size_t length; - size_t offset; - size_t depth; /* current nesting depth (for formatted printing) */ - cJSON_bool noalloc; - cJSON_bool format; /* is this print a formatted print */ - internal_hooks hooks; + number_c_string[i] = '\0'; + + number = strtod((const char *)number_c_string, (char **)&after_end); + if (number_c_string == after_end) { + return false; /* parse_error */ + } + + item->valuedouble = number; + + /* use saturation in case of overflow */ + if (number >= INT_MAX) { + item->valueint = INT_MAX; + } else if (number <= (double)INT_MIN) { + item->valueint = INT_MIN; + } else { + item->valueint = (int)number; + } + + item->type = cJSON_Number; + + input_buffer->offset += (size_t)(after_end - number_c_string); + return true; +} + +/* don't ask me, but the original cJSON_SetNumberValue returns an integer or + * double */ +CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) { + if (number >= INT_MAX) { + object->valueint = INT_MAX; + } else if (number <= (double)INT_MIN) { + object->valueint = INT_MIN; + } else { + object->valueint = (int)number; + } + + return object->valuedouble = number; +} + +CJSON_PUBLIC(char *) +cJSON_SetValuestring(cJSON *object, const char *valuestring) { + char *copy = NULL; + /* if object's type is not cJSON_String or is cJSON_IsReference, it + * should not set valuestring */ + if (!(object->type & cJSON_String) || + (object->type & cJSON_IsReference)) { + return NULL; + } + if (strlen(valuestring) <= strlen(object->valuestring)) { + strcpy(object->valuestring, valuestring); + return object->valuestring; + } + copy = (char *)cJSON_strdup((const unsigned char *)valuestring, + &global_hooks); + if (copy == NULL) { + return NULL; + } + if (object->valuestring != NULL) { + cJSON_free(object->valuestring); + } + object->valuestring = copy; + + return copy; +} + +typedef struct { + unsigned char *buffer; + size_t length; + size_t offset; + size_t depth; /* current nesting depth (for formatted printing) */ + cJSON_bool noalloc; + cJSON_bool format; /* is this print a formatted print */ + internal_hooks hooks; } printbuffer; /* realloc printbuffer if necessary to have at least "needed" bytes more */ -static unsigned char* ensure(printbuffer * const p, size_t needed) -{ - unsigned char *newbuffer = NULL; - size_t newsize = 0; - - if ((p == NULL) || (p->buffer == NULL)) - { - return NULL; - } +static unsigned char *ensure(printbuffer *const p, size_t needed) { + unsigned char *newbuffer = NULL; + size_t newsize = 0; - if ((p->length > 0) && (p->offset >= p->length)) - { - /* make sure that offset is valid */ - return NULL; - } + if ((p == NULL) || (p->buffer == NULL)) { + return NULL; + } - if (needed > INT_MAX) - { - /* sizes bigger than INT_MAX are currently not supported */ - return NULL; - } + if ((p->length > 0) && (p->offset >= p->length)) { + /* make sure that offset is valid */ + return NULL; + } - needed += p->offset + 1; - if (needed <= p->length) - { - return p->buffer + p->offset; - } + if (needed > INT_MAX) { + /* sizes bigger than INT_MAX are currently not supported */ + return NULL; + } - if (p->noalloc) { - return NULL; - } + needed += p->offset + 1; + if (needed <= p->length) { + return p->buffer + p->offset; + } - /* calculate new buffer size */ - if (needed > (INT_MAX / 2)) - { - /* overflow of int, use INT_MAX if possible */ - if (needed <= INT_MAX) - { - newsize = INT_MAX; + if (p->noalloc) { + return NULL; } - else - { - return NULL; - } - } - else - { - newsize = needed * 2; - } - - if (p->hooks.reallocate != NULL) - { - /* reallocate with realloc if available */ - newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize); - if (newbuffer == NULL) - { - p->hooks.deallocate(p->buffer); - p->length = 0; - p->buffer = NULL; - - return NULL; - } - } - else - { - /* otherwise reallocate manually */ - newbuffer = (unsigned char*)p->hooks.allocate(newsize); - if (!newbuffer) - { - p->hooks.deallocate(p->buffer); - p->length = 0; - p->buffer = NULL; - return NULL; + /* calculate new buffer size */ + if (needed > (INT_MAX / 2)) { + /* overflow of int, use INT_MAX if possible */ + if (needed <= INT_MAX) { + newsize = INT_MAX; + } else { + return NULL; + } + } else { + newsize = needed * 2; } - if (newbuffer) - { - memcpy(newbuffer, p->buffer, p->offset + 1); + + if (p->hooks.reallocate != NULL) { + /* reallocate with realloc if available */ + newbuffer = + (unsigned char *)p->hooks.reallocate(p->buffer, newsize); + if (newbuffer == NULL) { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + } else { + /* otherwise reallocate manually */ + newbuffer = (unsigned char *)p->hooks.allocate(newsize); + if (!newbuffer) { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + if (newbuffer) { + memcpy(newbuffer, p->buffer, p->offset + 1); + } + p->hooks.deallocate(p->buffer); } - p->hooks.deallocate(p->buffer); - } - p->length = newsize; - p->buffer = newbuffer; + p->length = newsize; + p->buffer = newbuffer; - return newbuffer + p->offset; + return newbuffer + p->offset; } -/* calculate the new length of the string in a printbuffer and update the offset */ -static void update_offset(printbuffer * const buffer) -{ - const unsigned char *buffer_pointer = NULL; - if ((buffer == NULL) || (buffer->buffer == NULL)) - { - return; - } - buffer_pointer = buffer->buffer + buffer->offset; +/* calculate the new length of the string in a printbuffer and update the offset + */ +static void update_offset(printbuffer *const buffer) { + const unsigned char *buffer_pointer = NULL; + if ((buffer == NULL) || (buffer->buffer == NULL)) { + return; + } + buffer_pointer = buffer->buffer + buffer->offset; - buffer->offset += strlen((const char*)buffer_pointer); + buffer->offset += strlen((const char *)buffer_pointer); } /* securely comparison of floating-point variables */ -static cJSON_bool compare_double(double a, double b) -{ - double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b); - return (fabs(a - b) <= maxVal * DBL_EPSILON); +static cJSON_bool compare_double(double a, double b) { + double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b); + return (fabs(a - b) <= maxVal * DBL_EPSILON); } /* Render the number nicely from the given item into a string. */ -static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer) -{ - unsigned char *output_pointer = NULL; - double d = item->valuedouble; - int length = 0; - size_t i = 0; - unsigned char number_buffer[26] = {0}; /* temporary buffer to print the number into */ - unsigned char decimal_point = get_decimal_point(); - double test = 0.0; - - if (output_buffer == NULL) - { - return false; - } - - /* This checks for NaN and Infinity */ - if (isnan(d) || isinf(d)) - { - length = sprintf((char*)number_buffer, "null"); - } - else - { - /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */ - length = sprintf((char*)number_buffer, "%1.15g", d); - - /* Check whether the original double can be recovered */ - if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || !compare_double((double)test, d)) - { - /* If not, print with 17 decimal places of precision */ - length = sprintf((char*)number_buffer, "%1.17g", d); +static cJSON_bool print_number(const cJSON *const item, + printbuffer *const output_buffer) { + unsigned char *output_pointer = NULL; + double d = item->valuedouble; + int length = 0; + size_t i = 0; + unsigned char number_buffer[26] = { + 0}; /* temporary buffer to print the number into */ + unsigned char decimal_point = get_decimal_point(); + double test = 0.0; + + if (output_buffer == NULL) { + return false; } - } - /* sprintf failed or buffer overrun occurred */ - if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1))) - { - return false; - } + /* This checks for NaN and Infinity */ + if (isnan(d) || isinf(d)) { + length = sprintf((char *)number_buffer, "null"); + } else { + /* Try 15 decimal places of precision to avoid nonsignificant + * nonzero digits */ + length = sprintf((char *)number_buffer, "%1.15g", d); - /* reserve appropriate space in the output */ - output_pointer = ensure(output_buffer, (size_t)length + sizeof("")); - if (output_pointer == NULL) - { - return false; - } + /* Check whether the original double can be recovered */ + if ((sscanf((char *)number_buffer, "%lg", &test) != 1) || + !compare_double((double)test, d)) { + /* If not, print with 17 decimal places of precision */ + length = sprintf((char *)number_buffer, "%1.17g", d); + } + } - /* copy the printed number to the output and replace locale - * dependent decimal point with '.' */ - for (i = 0; i < ((size_t)length); i++) - { - if (number_buffer[i] == decimal_point) - { - output_pointer[i] = '.'; - continue; + /* sprintf failed or buffer overrun occurred */ + if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1))) { + return false; } - output_pointer[i] = number_buffer[i]; - } - output_pointer[i] = '\0'; + /* reserve appropriate space in the output */ + output_pointer = ensure(output_buffer, (size_t)length + sizeof("")); + if (output_pointer == NULL) { + return false; + } - output_buffer->offset += (size_t)length; + /* copy the printed number to the output and replace locale + * dependent decimal point with '.' */ + for (i = 0; i < ((size_t)length); i++) { + if (number_buffer[i] == decimal_point) { + output_pointer[i] = '.'; + continue; + } - return true; + output_pointer[i] = number_buffer[i]; + } + output_pointer[i] = '\0'; + + output_buffer->offset += (size_t)length; + + return true; } /* parse 4 digit hexadecimal number */ -static unsigned parse_hex4(const unsigned char * const input) -{ - unsigned int h = 0; - size_t i = 0; - - for (i = 0; i < 4; i++) - { - /* parse digit */ - if ((input[i] >= '0') && (input[i] <= '9')) - { - h += (unsigned int) input[i] - '0'; - } - else if ((input[i] >= 'A') && (input[i] <= 'F')) - { - h += (unsigned int) 10 + input[i] - 'A'; - } - else if ((input[i] >= 'a') && (input[i] <= 'f')) - { - h += (unsigned int) 10 + input[i] - 'a'; - } - else /* invalid */ - { - return 0; - } +static unsigned parse_hex4(const unsigned char *const input) { + unsigned int h = 0; + size_t i = 0; + + for (i = 0; i < 4; i++) { + /* parse digit */ + if ((input[i] >= '0') && (input[i] <= '9')) { + h += (unsigned int)input[i] - '0'; + } else if ((input[i] >= 'A') && (input[i] <= 'F')) { + h += (unsigned int)10 + input[i] - 'A'; + } else if ((input[i] >= 'a') && (input[i] <= 'f')) { + h += (unsigned int)10 + input[i] - 'a'; + } else /* invalid */ + { + return 0; + } - if (i < 3) - { - /* shift left to make place for the next nibble */ - h = h << 4; + if (i < 3) { + /* shift left to make place for the next nibble */ + h = h << 4; + } } - } - return h; + return h; } /* converts a UTF-16 literal to UTF-8 * A literal can be one or two sequences of the form \uXXXX */ -static unsigned char utf16_literal_to_utf8(const unsigned char * const input_pointer, const unsigned char * const input_end, unsigned char **output_pointer) -{ - long unsigned int codepoint = 0; - unsigned int first_code = 0; - const unsigned char *first_sequence = input_pointer; - unsigned char utf8_length = 0; - unsigned char utf8_position = 0; - unsigned char sequence_length = 0; - unsigned char first_byte_mark = 0; - - if ((input_end - first_sequence) < 6) - { - /* input ends unexpectedly */ - goto fail; - } - - /* get the first utf16 sequence */ - first_code = parse_hex4(first_sequence + 2); - - /* check that the code is valid */ - if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) - { - goto fail; - } - - /* UTF16 surrogate pair */ - if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) - { - const unsigned char *second_sequence = first_sequence + 6; - unsigned int second_code = 0; - sequence_length = 12; /* \uXXXX\uXXXX */ - - if ((input_end - second_sequence) < 6) - { - /* input ends unexpectedly */ - goto fail; +static unsigned char +utf16_literal_to_utf8(const unsigned char *const input_pointer, + const unsigned char *const input_end, + unsigned char **output_pointer) { + long unsigned int codepoint = 0; + unsigned int first_code = 0; + const unsigned char *first_sequence = input_pointer; + unsigned char utf8_length = 0; + unsigned char utf8_position = 0; + unsigned char sequence_length = 0; + unsigned char first_byte_mark = 0; + + if ((input_end - first_sequence) < 6) { + /* input ends unexpectedly */ + goto fail; } - if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u')) - { - /* missing second half of the surrogate pair */ - goto fail; - } + /* get the first utf16 sequence */ + first_code = parse_hex4(first_sequence + 2); - /* get the second utf16 sequence */ - second_code = parse_hex4(second_sequence + 2); /* check that the code is valid */ - if ((second_code < 0xDC00) || (second_code > 0xDFFF)) - { - /* invalid second half of the surrogate pair */ - goto fail; - } - - - /* calculate the unicode codepoint from the surrogate pair */ - codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF)); - } - else - { - sequence_length = 6; /* \uXXXX */ - codepoint = first_code; - } - - /* encode as UTF-8 - * takes at maximum 4 bytes to encode: - * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ - if (codepoint < 0x80) - { - /* normal ascii, encoding 0xxxxxxx */ - utf8_length = 1; - } - else if (codepoint < 0x800) - { - /* two bytes, encoding 110xxxxx 10xxxxxx */ - utf8_length = 2; - first_byte_mark = 0xC0; /* 11000000 */ - } - else if (codepoint < 0x10000) - { - /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */ - utf8_length = 3; - first_byte_mark = 0xE0; /* 11100000 */ - } - else if (codepoint <= 0x10FFFF) - { - /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */ - utf8_length = 4; - first_byte_mark = 0xF0; /* 11110000 */ - } - else - { - /* invalid unicode codepoint */ - goto fail; - } - - /* encode as utf8 */ - for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--) - { - /* 10xxxxxx */ - (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF); - codepoint >>= 6; - } - /* encode first byte */ - if (utf8_length > 1) - { - (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF); - } - else - { - (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F); - } - - *output_pointer += utf8_length; - - return sequence_length; + if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) { + goto fail; + } -fail: - return 0; -} + /* UTF16 surrogate pair */ + if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) { + const unsigned char *second_sequence = first_sequence + 6; + unsigned int second_code = 0; + sequence_length = 12; /* \uXXXX\uXXXX */ -/* Parse the input text into an unescaped cinput, and populate item. */ -static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer) -{ - const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1; - const unsigned char *input_end = buffer_at_offset(input_buffer) + 1; - unsigned char *output_pointer = NULL; - unsigned char *output = NULL; - - /* not a string */ - if (buffer_at_offset(input_buffer)[0] != '\"') - { - goto fail; - } - - { - /* calculate approximate size of the output (overestimate) */ - size_t allocation_length = 0; - size_t skipped_bytes = 0; - while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"')) - { - /* is escape sequence */ - if (input_end[0] == '\\') - { - if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length) - { - /* prevent buffer overflow when last input character is a backslash */ - goto fail; + if ((input_end - second_sequence) < 6) { + /* input ends unexpectedly */ + goto fail; + } + + if ((second_sequence[0] != '\\') || + (second_sequence[1] != 'u')) { + /* missing second half of the surrogate pair */ + goto fail; + } + + /* get the second utf16 sequence */ + second_code = parse_hex4(second_sequence + 2); + /* check that the code is valid */ + if ((second_code < 0xDC00) || (second_code > 0xDFFF)) { + /* invalid second half of the surrogate pair */ + goto fail; } - skipped_bytes++; - input_end++; - } - input_end++; + + + /* calculate the unicode codepoint from the surrogate pair */ + codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | + (second_code & 0x3FF)); + } else { + sequence_length = 6; /* \uXXXX */ + codepoint = first_code; + } + + /* encode as UTF-8 + * takes at maximum 4 bytes to encode: + * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ + if (codepoint < 0x80) { + /* normal ascii, encoding 0xxxxxxx */ + utf8_length = 1; + } else if (codepoint < 0x800) { + /* two bytes, encoding 110xxxxx 10xxxxxx */ + utf8_length = 2; + first_byte_mark = 0xC0; /* 11000000 */ + } else if (codepoint < 0x10000) { + /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */ + utf8_length = 3; + first_byte_mark = 0xE0; /* 11100000 */ + } else if (codepoint <= 0x10FFFF) { + /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */ + utf8_length = 4; + first_byte_mark = 0xF0; /* 11110000 */ + } else { + /* invalid unicode codepoint */ + goto fail; } - if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"')) - { - goto fail; /* string ended unexpectedly */ + + /* encode as utf8 */ + for (utf8_position = (unsigned char)(utf8_length - 1); + utf8_position > 0; utf8_position--) { + /* 10xxxxxx */ + (*output_pointer)[utf8_position] = + (unsigned char)((codepoint | 0x80) & 0xBF); + codepoint >>= 6; + } + /* encode first byte */ + if (utf8_length > 1) { + (*output_pointer)[0] = + (unsigned char)((codepoint | first_byte_mark) & 0xFF); + } else { + (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F); } - /* This is at most how much we need for the output */ - allocation_length = (size_t) (input_end - buffer_at_offset(input_buffer)) - skipped_bytes; - output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof("")); - if (output == NULL) - { - goto fail; /* allocation failure */ + *output_pointer += utf8_length; + + return sequence_length; + +fail: + return 0; +} + +/* Parse the input text into an unescaped cinput, and populate item. */ +static cJSON_bool parse_string(cJSON *const item, + parse_buffer *const input_buffer) { + const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1; + const unsigned char *input_end = buffer_at_offset(input_buffer) + 1; + unsigned char *output_pointer = NULL; + unsigned char *output = NULL; + + /* not a string */ + if (buffer_at_offset(input_buffer)[0] != '\"') { + goto fail; } - } - output_pointer = output; - /* loop through the string literal */ - while (input_pointer < input_end) - { - if (*input_pointer != '\\') { - *output_pointer++ = *input_pointer++; + /* calculate approximate size of the output (overestimate) */ + size_t allocation_length = 0; + size_t skipped_bytes = 0; + while (((size_t)(input_end - input_buffer->content) < + input_buffer->length) && + (*input_end != '\"')) { + /* is escape sequence */ + if (input_end[0] == '\\') { + if ((size_t)(input_end + 1 - + input_buffer->content) >= + input_buffer->length) { + /* prevent buffer overflow when last + * input character is a backslash */ + goto fail; + } + skipped_bytes++; + input_end++; + } + input_end++; + } + if (((size_t)(input_end - input_buffer->content) >= + input_buffer->length) || + (*input_end != '\"')) { + goto fail; /* string ended unexpectedly */ + } + + /* This is at most how much we need for the output */ + allocation_length = + (size_t)(input_end - buffer_at_offset(input_buffer)) - + skipped_bytes; + output = (unsigned char *)input_buffer->hooks.allocate( + allocation_length + sizeof("")); + if (output == NULL) { + goto fail; /* allocation failure */ + } } - /* escape sequence */ - else - { - unsigned char sequence_length = 2; - if ((input_end - input_pointer) < 1) - { - goto fail; - } - - switch (input_pointer[1]) - { - case 'b': - *output_pointer++ = '\b'; - break; - case 'f': - *output_pointer++ = '\f'; - break; - case 'n': - *output_pointer++ = '\n'; - break; - case 'r': - *output_pointer++ = '\r'; - break; - case 't': - *output_pointer++ = '\t'; - break; - case '\"': - case '\\': - case '/': - *output_pointer++ = input_pointer[1]; - break; - - /* UTF-16 literal */ - case 'u': - sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer); - if (sequence_length == 0) - { - /* failed to convert UTF16-literal to UTF-8 */ - goto fail; - } - break; - default: - goto fail; - } - input_pointer += sequence_length; + output_pointer = output; + /* loop through the string literal */ + while (input_pointer < input_end) { + if (*input_pointer != '\\') { + *output_pointer++ = *input_pointer++; + } + /* escape sequence */ + else { + unsigned char sequence_length = 2; + if ((input_end - input_pointer) < 1) { + goto fail; + } + + switch (input_pointer[1]) { + case 'b': + *output_pointer++ = '\b'; + break; + case 'f': + *output_pointer++ = '\f'; + break; + case 'n': + *output_pointer++ = '\n'; + break; + case 'r': + *output_pointer++ = '\r'; + break; + case 't': + *output_pointer++ = '\t'; + break; + case '\"': + case '\\': + case '/': + *output_pointer++ = input_pointer[1]; + break; + + /* UTF-16 literal */ + case 'u': + sequence_length = utf16_literal_to_utf8( + input_pointer, input_end, &output_pointer); + if (sequence_length == 0) { + /* failed to convert UTF16-literal to + * UTF-8 */ + goto fail; + } + break; + + default: + goto fail; + } + input_pointer += sequence_length; + } } - } - /* zero terminate the output */ - *output_pointer = '\0'; + /* zero terminate the output */ + *output_pointer = '\0'; - item->type = cJSON_String; - item->valuestring = (char*)output; + item->type = cJSON_String; + item->valuestring = (char *)output; - input_buffer->offset = (size_t) (input_end - input_buffer->content); - input_buffer->offset++; + input_buffer->offset = (size_t)(input_end - input_buffer->content); + input_buffer->offset++; - return true; + return true; fail: - if (output != NULL) - { - input_buffer->hooks.deallocate(output); - } + if (output != NULL) { + input_buffer->hooks.deallocate(output); + } - if (input_pointer != NULL) - { - input_buffer->offset = (size_t)(input_pointer - input_buffer->content); - } + if (input_pointer != NULL) { + input_buffer->offset = + (size_t)(input_pointer - input_buffer->content); + } - return false; + return false; } /* Render the cstring provided to an escaped version that can be printed. */ -static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer) -{ - const unsigned char *input_pointer = NULL; - unsigned char *output = NULL; - unsigned char *output_pointer = NULL; - size_t output_length = 0; - /* numbers of additional characters needed for escaping */ - size_t escape_characters = 0; - - if (output_buffer == NULL) - { - return false; - } - - /* empty string */ - if (input == NULL) - { - output = ensure(output_buffer, sizeof("\"\"")); - if (output == NULL) - { - return false; +static cJSON_bool print_string_ptr(const unsigned char *const input, + printbuffer *const output_buffer) { + const unsigned char *input_pointer = NULL; + unsigned char *output = NULL; + unsigned char *output_pointer = NULL; + size_t output_length = 0; + /* numbers of additional characters needed for escaping */ + size_t escape_characters = 0; + + if (output_buffer == NULL) { + return false; } - strcpy((char*)output, "\"\""); - - return true; - } - /* set "flag" to 1 if something needs to be escaped */ - for (input_pointer = input; *input_pointer; input_pointer++) - { - switch (*input_pointer) - { - case '\"': - case '\\': - case '\b': - case '\f': - case '\n': - case '\r': - case '\t': - /* one character escape sequence */ - escape_characters++; - break; - default: - if (*input_pointer < 32) - { - /* UTF-16 escape sequence uXXXX */ - escape_characters += 5; + /* empty string */ + if (input == NULL) { + output = ensure(output_buffer, sizeof("\"\"")); + if (output == NULL) { + return false; } - break; - } - } - output_length = (size_t)(input_pointer - input) + escape_characters; - - output = ensure(output_buffer, output_length + sizeof("\"\"")); - if (output == NULL) - { - return false; - } + strcpy((char *)output, "\"\""); - /* no characters have to be escaped */ - if (escape_characters == 0) - { - output[0] = '\"'; - memcpy(output + 1, input, output_length); - output[output_length + 1] = '\"'; - output[output_length + 2] = '\0'; - - return true; - } - - output[0] = '\"'; - output_pointer = output + 1; - /* copy the string */ - for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++) - { - if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\')) - { - /* normal character, copy */ - *output_pointer = *input_pointer; + return true; } - else - { - /* character needs to be escaped */ - *output_pointer++ = '\\'; - switch (*input_pointer) - { - case '\\': - *output_pointer = '\\'; - break; + + /* set "flag" to 1 if something needs to be escaped */ + for (input_pointer = input; *input_pointer; input_pointer++) { + switch (*input_pointer) { case '\"': - *output_pointer = '\"'; - break; + case '\\': case '\b': - *output_pointer = 'b'; - break; case '\f': - *output_pointer = 'f'; - break; case '\n': - *output_pointer = 'n'; - break; case '\r': - *output_pointer = 'r'; - break; case '\t': - *output_pointer = 't'; - break; + /* one character escape sequence */ + escape_characters++; + break; default: - /* escape and print as unicode codepoint */ - sprintf((char*)output_pointer, "u%04x", *input_pointer); - output_pointer += 4; - break; - } + if (*input_pointer < 32) { + /* UTF-16 escape sequence uXXXX */ + escape_characters += 5; + } + break; + } + } + output_length = (size_t)(input_pointer - input) + escape_characters; + + output = ensure(output_buffer, output_length + sizeof("\"\"")); + if (output == NULL) { + return false; } - } - output[output_length + 1] = '\"'; - output[output_length + 2] = '\0'; - return true; + /* no characters have to be escaped */ + if (escape_characters == 0) { + output[0] = '\"'; + memcpy(output + 1, input, output_length); + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; + } + + output[0] = '\"'; + output_pointer = output + 1; + /* copy the string */ + for (input_pointer = input; *input_pointer != '\0'; + (void)input_pointer++, output_pointer++) { + if ((*input_pointer > 31) && (*input_pointer != '\"') && + (*input_pointer != '\\')) { + /* normal character, copy */ + *output_pointer = *input_pointer; + } else { + /* character needs to be escaped */ + *output_pointer++ = '\\'; + switch (*input_pointer) { + case '\\': + *output_pointer = '\\'; + break; + case '\"': + *output_pointer = '\"'; + break; + case '\b': + *output_pointer = 'b'; + break; + case '\f': + *output_pointer = 'f'; + break; + case '\n': + *output_pointer = 'n'; + break; + case '\r': + *output_pointer = 'r'; + break; + case '\t': + *output_pointer = 't'; + break; + default: + /* escape and print as unicode codepoint */ + sprintf((char *)output_pointer, "u%04x", + *input_pointer); + output_pointer += 4; + break; + } + } + } + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; } /* Invoke print_string_ptr (which is useful) on an item. */ -static cJSON_bool print_string(const cJSON * const item, printbuffer * const p) -{ - return print_string_ptr((unsigned char*)item->valuestring, p); +static cJSON_bool print_string(const cJSON *const item, printbuffer *const p) { + return print_string_ptr((unsigned char *)item->valuestring, p); } /* Predeclare these prototypes. */ -static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer); -static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer); -static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer); -static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer); -static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer); -static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer); +static cJSON_bool parse_value(cJSON *const item, + parse_buffer *const input_buffer); +static cJSON_bool print_value(const cJSON *const item, + printbuffer *const output_buffer); +static cJSON_bool parse_array(cJSON *const item, + parse_buffer *const input_buffer); +static cJSON_bool print_array(const cJSON *const item, + printbuffer *const output_buffer); +static cJSON_bool parse_object(cJSON *const item, + parse_buffer *const input_buffer); +static cJSON_bool print_object(const cJSON *const item, + printbuffer *const output_buffer); /* Utility to jump whitespace and cr/lf */ -static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer) -{ - if ((buffer == NULL) || (buffer->content == NULL)) - { - return NULL; - } +static parse_buffer *buffer_skip_whitespace(parse_buffer *const buffer) { + if ((buffer == NULL) || (buffer->content == NULL)) { + return NULL; + } - if (cannot_access_at_index(buffer, 0)) - { - return buffer; - } + if (cannot_access_at_index(buffer, 0)) { + return buffer; + } - while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32)) - { - buffer->offset++; - } + while (can_access_at_index(buffer, 0) && + (buffer_at_offset(buffer)[0] <= 32)) { + buffer->offset++; + } - if (buffer->offset == buffer->length) - { - buffer->offset--; - } + if (buffer->offset == buffer->length) { + buffer->offset--; + } - return buffer; + return buffer; } /* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */ -static parse_buffer *skip_utf8_bom(parse_buffer * const buffer) -{ - if ((buffer == NULL) || (buffer->content == NULL) || (buffer->offset != 0)) - { - return NULL; - } +static parse_buffer *skip_utf8_bom(parse_buffer *const buffer) { + if ((buffer == NULL) || (buffer->content == NULL) || + (buffer->offset != 0)) { + return NULL; + } - if (can_access_at_index(buffer, 4) && (strncmp((const char*)buffer_at_offset(buffer), "\xEF\xBB\xBF", 3) == 0)) - { - buffer->offset += 3; - } + if (can_access_at_index(buffer, 4) && + (strncmp((const char *)buffer_at_offset(buffer), "\xEF\xBB\xBF", + 3) == 0)) { + buffer->offset += 3; + } - return buffer; + return buffer; } -CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated) -{ - size_t buffer_length; +CJSON_PUBLIC(cJSON *) +cJSON_ParseWithOpts(const char *value, + const char **return_parse_end, + cJSON_bool require_null_terminated) { + size_t buffer_length; - if (NULL == value) - { - return NULL; - } + if (NULL == value) { + return NULL; + } - /* Adding null character size due to require_null_terminated. */ - buffer_length = strlen(value) + sizeof(""); + /* Adding null character size due to require_null_terminated. */ + buffer_length = strlen(value) + sizeof(""); - return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end, require_null_terminated); + return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end, + require_null_terminated); } /* Parse an object - create a new root, and populate. */ -CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated) -{ - parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } }; - cJSON *item = NULL; - - /* reset error position */ - global_error.json = NULL; - global_error.position = 0; - - if (value == NULL || 0 == buffer_length) - { - goto fail; - } - - buffer.content = (const unsigned char*)value; - buffer.length = buffer_length; - buffer.offset = 0; - buffer.hooks = global_hooks; - - item = cJSON_New_Item(&global_hooks); - if (item == NULL) /* memory fail */ - { - goto fail; - } - - if (!parse_value(item, buffer_skip_whitespace(skip_utf8_bom(&buffer)))) - { - /* parse failure. ep is set. */ - goto fail; - } - - /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */ - if (require_null_terminated) - { - buffer_skip_whitespace(&buffer); - if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0') - { - goto fail; +CJSON_PUBLIC(cJSON *) +cJSON_ParseWithLengthOpts(const char *value, + size_t buffer_length, + const char **return_parse_end, + cJSON_bool require_null_terminated) { + parse_buffer buffer = {0, 0, 0, 0, {0, 0, 0}}; + cJSON *item = NULL; + + /* reset error position */ + global_error.json = NULL; + global_error.position = 0; + + if (value == NULL || 0 == buffer_length) { + goto fail; } - } - if (return_parse_end) - { - *return_parse_end = (const char*)buffer_at_offset(&buffer); - } - return item; + buffer.content = (const unsigned char *)value; + buffer.length = buffer_length; + buffer.offset = 0; + buffer.hooks = global_hooks; -fail: - if (item != NULL) - { - cJSON_Delete(item); - } + item = cJSON_New_Item(&global_hooks); + if (item == NULL) /* memory fail */ + { + goto fail; + } - if (value != NULL) - { - error local_error; - local_error.json = (const unsigned char*)value; - local_error.position = 0; + if (!parse_value(item, + buffer_skip_whitespace(skip_utf8_bom(&buffer)))) { + /* parse failure. ep is set. */ + goto fail; + } - if (buffer.offset < buffer.length) - { - local_error.position = buffer.offset; + /* if we require null-terminated JSON without appended garbage, skip and + * then check for a null terminator */ + if (require_null_terminated) { + buffer_skip_whitespace(&buffer); + if ((buffer.offset >= buffer.length) || + buffer_at_offset(&buffer)[0] != '\0') { + goto fail; + } } - else if (buffer.length > 0) - { - local_error.position = buffer.length - 1; + if (return_parse_end) { + *return_parse_end = (const char *)buffer_at_offset(&buffer); } - if (return_parse_end != NULL) - { - *return_parse_end = (const char*)local_error.json + local_error.position; + return item; + +fail: + if (item != NULL) { + cJSON_Delete(item); } - global_error = local_error; - } + if (value != NULL) { + error local_error; + local_error.json = (const unsigned char *)value; + local_error.position = 0; + + if (buffer.offset < buffer.length) { + local_error.position = buffer.offset; + } else if (buffer.length > 0) { + local_error.position = buffer.length - 1; + } + + if (return_parse_end != NULL) { + *return_parse_end = (const char *)local_error.json + + local_error.position; + } - return NULL; + global_error = local_error; + } + + return NULL; } /* Default options for cJSON_Parse */ -CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value) -{ - return cJSON_ParseWithOpts(value, 0, 0); +CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value) { + return cJSON_ParseWithOpts(value, 0, 0); } -CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length) -{ - return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0); +CJSON_PUBLIC(cJSON *) +cJSON_ParseWithLength(const char *value, size_t buffer_length) { + return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0); } #define cjson_min(a, b) (((a) < (b)) ? (a) : (b)) -static unsigned char *print(const cJSON * const item, cJSON_bool format, const internal_hooks * const hooks) -{ - static const size_t default_buffer_size = 256; - printbuffer buffer[1]; - unsigned char *printed = NULL; - - memset(buffer, 0, sizeof(buffer)); - - /* create buffer */ - buffer->buffer = (unsigned char*) hooks->allocate(default_buffer_size); - buffer->length = default_buffer_size; - buffer->format = format; - buffer->hooks = *hooks; - if (buffer->buffer == NULL) - { - goto fail; - } - - /* print the value */ - if (!print_value(item, buffer)) - { - goto fail; - } - update_offset(buffer); - - /* check if reallocate is available */ - if (hooks->reallocate != NULL) - { - printed = (unsigned char*) hooks->reallocate(buffer->buffer, buffer->offset + 1); - if (printed == NULL) { - goto fail; - } - buffer->buffer = NULL; - } - else /* otherwise copy the JSON over to a new buffer */ - { - printed = (unsigned char*) hooks->allocate(buffer->offset + 1); - if (printed == NULL) - { - goto fail; +static unsigned char *print(const cJSON *const item, + cJSON_bool format, + const internal_hooks *const hooks) { + static const size_t default_buffer_size = 256; + printbuffer buffer[1]; + unsigned char *printed = NULL; + + memset(buffer, 0, sizeof(buffer)); + + /* create buffer */ + buffer->buffer = (unsigned char *)hooks->allocate(default_buffer_size); + buffer->length = default_buffer_size; + buffer->format = format; + buffer->hooks = *hooks; + if (buffer->buffer == NULL) { + goto fail; } - memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1)); - printed[buffer->offset] = '\0'; /* just to be sure */ - /* free the buffer */ - hooks->deallocate(buffer->buffer); - } + /* print the value */ + if (!print_value(item, buffer)) { + goto fail; + } + update_offset(buffer); - return printed; + /* check if reallocate is available */ + if (hooks->reallocate != NULL) { + printed = (unsigned char *)hooks->reallocate( + buffer->buffer, buffer->offset + 1); + if (printed == NULL) { + goto fail; + } + buffer->buffer = NULL; + } else /* otherwise copy the JSON over to a new buffer */ + { + printed = (unsigned char *)hooks->allocate(buffer->offset + 1); + if (printed == NULL) { + goto fail; + } + memcpy(printed, buffer->buffer, + cjson_min(buffer->length, buffer->offset + 1)); + printed[buffer->offset] = '\0'; /* just to be sure */ + + /* free the buffer */ + hooks->deallocate(buffer->buffer); + } + + return printed; fail: - if (buffer->buffer != NULL) - { - hooks->deallocate(buffer->buffer); - } + if (buffer->buffer != NULL) { + hooks->deallocate(buffer->buffer); + } - if (printed != NULL) - { - hooks->deallocate(printed); - } + if (printed != NULL) { + hooks->deallocate(printed); + } - return NULL; + return NULL; } /* Render a cJSON item/entity/structure to text. */ -CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item) -{ - return (char*)print(item, true, &global_hooks); +CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item) { + return (char *)print(item, true, &global_hooks); } -CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item) -{ - return (char*)print(item, false, &global_hooks); +CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item) { + return (char *)print(item, false, &global_hooks); } -CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt) -{ - printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; +CJSON_PUBLIC(char *) +cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt) { + printbuffer p = {0, 0, 0, 0, 0, 0, {0, 0, 0}}; - if (prebuffer < 0) - { - return NULL; - } + if (prebuffer < 0) { + return NULL; + } - p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer); - if (!p.buffer) - { - return NULL; - } + p.buffer = (unsigned char *)global_hooks.allocate((size_t)prebuffer); + if (!p.buffer) { + return NULL; + } - p.length = (size_t)prebuffer; - p.offset = 0; - p.noalloc = false; - p.format = fmt; - p.hooks = global_hooks; + p.length = (size_t)prebuffer; + p.offset = 0; + p.noalloc = false; + p.format = fmt; + p.hooks = global_hooks; - if (!print_value(item, &p)) - { - global_hooks.deallocate(p.buffer); - return NULL; - } + if (!print_value(item, &p)) { + global_hooks.deallocate(p.buffer); + return NULL; + } - return (char*)p.buffer; + return (char *)p.buffer; } -CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format) -{ - printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; +CJSON_PUBLIC(cJSON_bool) +cJSON_PrintPreallocated(cJSON *item, + char *buffer, + const int length, + const cJSON_bool format) { + printbuffer p = {0, 0, 0, 0, 0, 0, {0, 0, 0}}; - if ((length < 0) || (buffer == NULL)) - { - return false; - } + if ((length < 0) || (buffer == NULL)) { + return false; + } - p.buffer = (unsigned char*)buffer; - p.length = (size_t)length; - p.offset = 0; - p.noalloc = true; - p.format = format; - p.hooks = global_hooks; + p.buffer = (unsigned char *)buffer; + p.length = (size_t)length; + p.offset = 0; + p.noalloc = true; + p.format = format; + p.hooks = global_hooks; - return print_value(item, &p); + return print_value(item, &p); } /* Parser core - when encountering text, process appropriately. */ -static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer) -{ - if ((input_buffer == NULL) || (input_buffer->content == NULL)) - { - return false; /* no input */ - } - - /* parse the different types of values */ - /* null */ - if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0)) - { - item->type = cJSON_NULL; - input_buffer->offset += 4; - return true; - } - /* false */ - if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0)) - { - item->type = cJSON_False; - input_buffer->offset += 5; - return true; - } - /* true */ - if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0)) - { - item->type = cJSON_True; - item->valueint = 1; - input_buffer->offset += 4; - return true; - } - /* string */ - if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"')) - { - return parse_string(item, input_buffer); - } - /* number */ - if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9')))) - { - return parse_number(item, input_buffer); - } - /* array */ - if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '[')) - { - return parse_array(item, input_buffer); - } - /* object */ - if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{')) - { - return parse_object(item, input_buffer); - } - - return false; +static cJSON_bool parse_value(cJSON *const item, + parse_buffer *const input_buffer) { + if ((input_buffer == NULL) || (input_buffer->content == NULL)) { + return false; /* no input */ + } + + /* parse the different types of values */ + /* null */ + if (can_read(input_buffer, 4) && + (strncmp((const char *)buffer_at_offset(input_buffer), "null", 4) == + 0)) { + item->type = cJSON_NULL; + input_buffer->offset += 4; + return true; + } + /* false */ + if (can_read(input_buffer, 5) && + (strncmp((const char *)buffer_at_offset(input_buffer), "false", + 5) == 0)) { + item->type = cJSON_False; + input_buffer->offset += 5; + return true; + } + /* true */ + if (can_read(input_buffer, 4) && + (strncmp((const char *)buffer_at_offset(input_buffer), "true", 4) == + 0)) { + item->type = cJSON_True; + item->valueint = 1; + input_buffer->offset += 4; + return true; + } + /* string */ + if (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == '\"')) { + return parse_string(item, input_buffer); + } + /* number */ + if (can_access_at_index(input_buffer, 0) && + ((buffer_at_offset(input_buffer)[0] == '-') || + ((buffer_at_offset(input_buffer)[0] >= '0') && + (buffer_at_offset(input_buffer)[0] <= '9')))) { + return parse_number(item, input_buffer); + } + /* array */ + if (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == '[')) { + return parse_array(item, input_buffer); + } + /* object */ + if (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == '{')) { + return parse_object(item, input_buffer); + } + + return false; } /* Render a value to text. */ -static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer) -{ - unsigned char *output = NULL; +static cJSON_bool print_value(const cJSON *const item, + printbuffer *const output_buffer) { + unsigned char *output = NULL; - if ((item == NULL) || (output_buffer == NULL)) - { - return false; - } + if ((item == NULL) || (output_buffer == NULL)) { + return false; + } - switch ((item->type) & 0xFF) - { + switch ((item->type) & 0xFF) { case cJSON_NULL: - output = ensure(output_buffer, 5); - if (output == NULL) - { - return false; - } - strcpy((char*)output, "null"); - return true; + output = ensure(output_buffer, 5); + if (output == NULL) { + return false; + } + strcpy((char *)output, "null"); + return true; case cJSON_False: - output = ensure(output_buffer, 6); - if (output == NULL) - { - return false; - } - strcpy((char*)output, "false"); - return true; + output = ensure(output_buffer, 6); + if (output == NULL) { + return false; + } + strcpy((char *)output, "false"); + return true; case cJSON_True: - output = ensure(output_buffer, 5); - if (output == NULL) - { - return false; - } - strcpy((char*)output, "true"); - return true; + output = ensure(output_buffer, 5); + if (output == NULL) { + return false; + } + strcpy((char *)output, "true"); + return true; case cJSON_Number: - return print_number(item, output_buffer); + return print_number(item, output_buffer); - case cJSON_Raw: - { - size_t raw_length = 0; - if (item->valuestring == NULL) - { - return false; - } + case cJSON_Raw: { + size_t raw_length = 0; + if (item->valuestring == NULL) { + return false; + } - raw_length = strlen(item->valuestring) + sizeof(""); - output = ensure(output_buffer, raw_length); - if (output == NULL) - { - return false; - } - memcpy(output, item->valuestring, raw_length); - return true; + raw_length = strlen(item->valuestring) + sizeof(""); + output = ensure(output_buffer, raw_length); + if (output == NULL) { + return false; + } + memcpy(output, item->valuestring, raw_length); + return true; } case cJSON_String: - return print_string(item, output_buffer); + return print_string(item, output_buffer); case cJSON_Array: - return print_array(item, output_buffer); + return print_array(item, output_buffer); case cJSON_Object: - return print_object(item, output_buffer); + return print_object(item, output_buffer); default: - return false; - } + return false; + } } /* Build an array from input text. */ -static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer) -{ - cJSON *head = NULL; /* head of the linked list */ - cJSON *current_item = NULL; - - if (input_buffer->depth >= CJSON_NESTING_LIMIT) - { - return false; /* to deeply nested */ - } - input_buffer->depth++; - - if (buffer_at_offset(input_buffer)[0] != '[') - { - /* not an array */ - goto fail; - } - - input_buffer->offset++; - buffer_skip_whitespace(input_buffer); - if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']')) - { - /* empty array */ - goto success; - } - - /* check if we skipped to the end of the buffer */ - if (cannot_access_at_index(input_buffer, 0)) - { - input_buffer->offset--; - goto fail; - } - - /* step back to character in front of the first element */ - input_buffer->offset--; - /* loop through the comma separated array elements */ - do - { - /* allocate next item */ - cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); - if (new_item == NULL) - { - goto fail; /* allocation failure */ - } +static cJSON_bool parse_array(cJSON *const item, + parse_buffer *const input_buffer) { + cJSON *head = NULL; /* head of the linked list */ + cJSON *current_item = NULL; - /* attach next item to list */ - if (head == NULL) - { - /* start the linked list */ - current_item = head = new_item; + if (input_buffer->depth >= CJSON_NESTING_LIMIT) { + return false; /* to deeply nested */ } - else - { - /* add to the end and advance */ - current_item->next = new_item; - new_item->prev = current_item; - current_item = new_item; + input_buffer->depth++; + + if (buffer_at_offset(input_buffer)[0] != '[') { + /* not an array */ + goto fail; } - /* parse next value */ input_buffer->offset++; buffer_skip_whitespace(input_buffer); - if (!parse_value(current_item, input_buffer)) - { - goto fail; /* failed to parse value */ + if (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == ']')) { + /* empty array */ + goto success; + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) { + input_buffer->offset--; + goto fail; } - buffer_skip_whitespace(input_buffer); - } - while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); - if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']') - { - goto fail; /* expected end of array */ - } + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) { + /* start the linked list */ + current_item = head = new_item; + } else { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse next value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } while (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || + buffer_at_offset(input_buffer)[0] != ']') { + goto fail; /* expected end of array */ + } success: - input_buffer->depth--; + input_buffer->depth--; - if (head != NULL) { - head->prev = current_item; - } + if (head != NULL) { + head->prev = current_item; + } - item->type = cJSON_Array; - item->child = head; + item->type = cJSON_Array; + item->child = head; - input_buffer->offset++; + input_buffer->offset++; - return true; + return true; fail: - if (head != NULL) - { - cJSON_Delete(head); - } + if (head != NULL) { + cJSON_Delete(head); + } - return false; + return false; } /* Render an array to text */ -static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer) -{ - unsigned char *output_pointer = NULL; - size_t length = 0; - cJSON *current_element = item->child; - - if (output_buffer == NULL) - { - return false; - } +static cJSON_bool print_array(const cJSON *const item, + printbuffer *const output_buffer) { + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_element = item->child; - /* Compose the output array. */ - /* opening square bracket */ - output_pointer = ensure(output_buffer, 1); - if (output_pointer == NULL) - { - return false; - } + if (output_buffer == NULL) { + return false; + } - *output_pointer = '['; - output_buffer->offset++; - output_buffer->depth++; + /* Compose the output array. */ + /* opening square bracket */ + output_pointer = ensure(output_buffer, 1); + if (output_pointer == NULL) { + return false; + } - while (current_element != NULL) - { - if (!print_value(current_element, output_buffer)) - { - return false; + *output_pointer = '['; + output_buffer->offset++; + output_buffer->depth++; + + while (current_element != NULL) { + if (!print_value(current_element, output_buffer)) { + return false; + } + update_offset(output_buffer); + if (current_element->next) { + length = (size_t)(output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) { + return false; + } + *output_pointer++ = ','; + if (output_buffer->format) { + *output_pointer++ = ' '; + } + *output_pointer = '\0'; + output_buffer->offset += length; + } + current_element = current_element->next; } - update_offset(output_buffer); - if (current_element->next) - { - length = (size_t) (output_buffer->format ? 2 : 1); - output_pointer = ensure(output_buffer, length + 1); - if (output_pointer == NULL) - { + + output_pointer = ensure(output_buffer, 2); + if (output_pointer == NULL) { return false; - } - *output_pointer++ = ','; - if(output_buffer->format) - { - *output_pointer++ = ' '; - } - *output_pointer = '\0'; - output_buffer->offset += length; - } - current_element = current_element->next; - } - - output_pointer = ensure(output_buffer, 2); - if (output_pointer == NULL) - { - return false; - } - *output_pointer++ = ']'; - *output_pointer = '\0'; - output_buffer->depth--; + } + *output_pointer++ = ']'; + *output_pointer = '\0'; + output_buffer->depth--; - return true; + return true; } /* Build an object from the text. */ -static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer) -{ - cJSON *head = NULL; /* linked list head */ - cJSON *current_item = NULL; - - if (input_buffer->depth >= CJSON_NESTING_LIMIT) - { - return false; /* to deeply nested */ - } - input_buffer->depth++; - - if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{')) - { - goto fail; /* not an object */ - } - - input_buffer->offset++; - buffer_skip_whitespace(input_buffer); - if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}')) - { - goto success; /* empty object */ - } - - /* check if we skipped to the end of the buffer */ - if (cannot_access_at_index(input_buffer, 0)) - { - input_buffer->offset--; - goto fail; - } - - /* step back to character in front of the first element */ - input_buffer->offset--; - /* loop through the comma separated array elements */ - do - { - /* allocate next item */ - cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); - if (new_item == NULL) - { - goto fail; /* allocation failure */ - } +static cJSON_bool parse_object(cJSON *const item, + parse_buffer *const input_buffer) { + cJSON *head = NULL; /* linked list head */ + cJSON *current_item = NULL; - /* attach next item to list */ - if (head == NULL) - { - /* start the linked list */ - current_item = head = new_item; + if (input_buffer->depth >= CJSON_NESTING_LIMIT) { + return false; /* to deeply nested */ } - else - { - /* add to the end and advance */ - current_item->next = new_item; - new_item->prev = current_item; - current_item = new_item; + input_buffer->depth++; + + if (cannot_access_at_index(input_buffer, 0) || + (buffer_at_offset(input_buffer)[0] != '{')) { + goto fail; /* not an object */ } - /* parse the name of the child */ input_buffer->offset++; buffer_skip_whitespace(input_buffer); - if (!parse_string(current_item, input_buffer)) - { - goto fail; /* failed to parse name */ + if (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == '}')) { + goto success; /* empty object */ } - buffer_skip_whitespace(input_buffer); - - /* swap valuestring and string, because we parsed the name */ - current_item->string = current_item->valuestring; - current_item->valuestring = NULL; - if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':')) - { - goto fail; /* invalid object */ + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) { + input_buffer->offset--; + goto fail; } - /* parse the value */ - input_buffer->offset++; - buffer_skip_whitespace(input_buffer); - if (!parse_value(current_item, input_buffer)) - { - goto fail; /* failed to parse value */ - } - buffer_skip_whitespace(input_buffer); - } - while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) { + /* start the linked list */ + current_item = head = new_item; + } else { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse the name of the child */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_string(current_item, input_buffer)) { + goto fail; /* failed to parse name */ + } + buffer_skip_whitespace(input_buffer); + + /* swap valuestring and string, because we parsed the name */ + current_item->string = current_item->valuestring; + current_item->valuestring = NULL; - if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}')) - { - goto fail; /* expected end of object */ - } + if (cannot_access_at_index(input_buffer, 0) || + (buffer_at_offset(input_buffer)[0] != ':')) { + goto fail; /* invalid object */ + } + + /* parse the value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } while (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || + (buffer_at_offset(input_buffer)[0] != '}')) { + goto fail; /* expected end of object */ + } success: - input_buffer->depth--; + input_buffer->depth--; - if (head != NULL) { - head->prev = current_item; - } + if (head != NULL) { + head->prev = current_item; + } - item->type = cJSON_Object; - item->child = head; + item->type = cJSON_Object; + item->child = head; - input_buffer->offset++; - return true; + input_buffer->offset++; + return true; fail: - if (head != NULL) - { - cJSON_Delete(head); - } + if (head != NULL) { + cJSON_Delete(head); + } - return false; + return false; } /* Render an object to text. */ -static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer) -{ - unsigned char *output_pointer = NULL; - size_t length = 0; - cJSON *current_item = item->child; - - if (output_buffer == NULL) - { - return false; - } +static cJSON_bool print_object(const cJSON *const item, + printbuffer *const output_buffer) { + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_item = item->child; - /* Compose the output: */ - length = (size_t) (output_buffer->format ? 2 : 1); /* fmt: {\n */ - output_pointer = ensure(output_buffer, length + 1); - if (output_pointer == NULL) - { - return false; - } - - *output_pointer++ = '{'; - output_buffer->depth++; - if (output_buffer->format) - { - *output_pointer++ = '\n'; - } - output_buffer->offset += length; - - while (current_item) - { - if (output_buffer->format) - { - size_t i; - output_pointer = ensure(output_buffer, output_buffer->depth); - if (output_pointer == NULL) - { + if (output_buffer == NULL) { return false; - } - for (i = 0; i < output_buffer->depth; i++) - { - *output_pointer++ = '\t'; - } - output_buffer->offset += output_buffer->depth; } - /* print key */ - if (!print_string_ptr((unsigned char*)current_item->string, output_buffer)) - { - return false; + /* Compose the output: */ + length = (size_t)(output_buffer->format ? 2 : 1); /* fmt: {\n */ + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) { + return false; } - update_offset(output_buffer); - length = (size_t) (output_buffer->format ? 2 : 1); - output_pointer = ensure(output_buffer, length); - if (output_pointer == NULL) - { - return false; - } - *output_pointer++ = ':'; - if (output_buffer->format) - { - *output_pointer++ = '\t'; + *output_pointer++ = '{'; + output_buffer->depth++; + if (output_buffer->format) { + *output_pointer++ = '\n'; } output_buffer->offset += length; - /* print value */ - if (!print_value(current_item, output_buffer)) - { - return false; - } - update_offset(output_buffer); + while (current_item) { + if (output_buffer->format) { + size_t i; + output_pointer = + ensure(output_buffer, output_buffer->depth); + if (output_pointer == NULL) { + return false; + } + for (i = 0; i < output_buffer->depth; i++) { + *output_pointer++ = '\t'; + } + output_buffer->offset += output_buffer->depth; + } - /* print comma if not last */ - length = ((size_t)(output_buffer->format ? 1 : 0) + (size_t)(current_item->next ? 1 : 0)); - output_pointer = ensure(output_buffer, length + 1); - if (output_pointer == NULL) - { - return false; - } - if (current_item->next) - { - *output_pointer++ = ','; - } + /* print key */ + if (!print_string_ptr((unsigned char *)current_item->string, + output_buffer)) { + return false; + } + update_offset(output_buffer); - if (output_buffer->format) - { - *output_pointer++ = '\n'; - } - *output_pointer = '\0'; - output_buffer->offset += length; + length = (size_t)(output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length); + if (output_pointer == NULL) { + return false; + } + *output_pointer++ = ':'; + if (output_buffer->format) { + *output_pointer++ = '\t'; + } + output_buffer->offset += length; - current_item = current_item->next; - } + /* print value */ + if (!print_value(current_item, output_buffer)) { + return false; + } + update_offset(output_buffer); + + /* print comma if not last */ + length = ((size_t)(output_buffer->format ? 1 : 0) + + (size_t)(current_item->next ? 1 : 0)); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) { + return false; + } + if (current_item->next) { + *output_pointer++ = ','; + } - output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2); - if (output_pointer == NULL) - { - return false; - } - if (output_buffer->format) - { - size_t i; - for (i = 0; i < (output_buffer->depth - 1); i++) - { - *output_pointer++ = '\t'; + if (output_buffer->format) { + *output_pointer++ = '\n'; + } + *output_pointer = '\0'; + output_buffer->offset += length; + + current_item = current_item->next; } - } - *output_pointer++ = '}'; - *output_pointer = '\0'; - output_buffer->depth--; - return true; + output_pointer = + ensure(output_buffer, + output_buffer->format ? (output_buffer->depth + 1) : 2); + if (output_pointer == NULL) { + return false; + } + if (output_buffer->format) { + size_t i; + for (i = 0; i < (output_buffer->depth - 1); i++) { + *output_pointer++ = '\t'; + } + } + *output_pointer++ = '}'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; } /* Get Array size/item / object item. */ -CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) -{ - cJSON *child = NULL; - size_t size = 0; +CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) { + cJSON *child = NULL; + size_t size = 0; - if (array == NULL) - { - return 0; - } + if (array == NULL) { + return 0; + } - child = array->child; + child = array->child; - while(child != NULL) - { - size++; - child = child->next; - } + while (child != NULL) { + size++; + child = child->next; + } - /* FIXME: Can overflow here. Cannot be fixed without breaking the API */ + /* FIXME: Can overflow here. Cannot be fixed without breaking the API */ - return (int)size; + return (int)size; } -static cJSON* get_array_item(const cJSON *array, size_t index) -{ - cJSON *current_child = NULL; +static cJSON *get_array_item(const cJSON *array, size_t index) { + cJSON *current_child = NULL; - if (array == NULL) - { - return NULL; - } + if (array == NULL) { + return NULL; + } - current_child = array->child; - while ((current_child != NULL) && (index > 0)) - { - index--; - current_child = current_child->next; - } + current_child = array->child; + while ((current_child != NULL) && (index > 0)) { + index--; + current_child = current_child->next; + } - return current_child; + return current_child; } -CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) -{ - if (index < 0) - { - return NULL; - } +CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) { + if (index < 0) { + return NULL; + } - return get_array_item(array, (size_t)index); + return get_array_item(array, (size_t)index); } -static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive) -{ - cJSON *current_element = NULL; +static cJSON *get_object_item(const cJSON *const object, + const char *const name, + const cJSON_bool case_sensitive) { + cJSON *current_element = NULL; - if ((object == NULL) || (name == NULL)) - { - return NULL; - } - - current_element = object->child; - if (case_sensitive) - { - while ((current_element != NULL) && (current_element->string != NULL) && (strcmp(name, current_element->string) != 0)) - { - current_element = current_element->next; + if ((object == NULL) || (name == NULL)) { + return NULL; } - } - else - { - while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0)) - { - current_element = current_element->next; + + current_element = object->child; + if (case_sensitive) { + while ((current_element != NULL) && + (current_element->string != NULL) && + (strcmp(name, current_element->string) != 0)) { + current_element = current_element->next; + } + } else { + while ((current_element != NULL) && + (case_insensitive_strcmp( + (const unsigned char *)name, + (const unsigned char *)(current_element->string)) != + 0)) { + current_element = current_element->next; + } } - } - if ((current_element == NULL) || (current_element->string == NULL)) { - return NULL; - } + if ((current_element == NULL) || (current_element->string == NULL)) { + return NULL; + } - return current_element; + return current_element; } -CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string) -{ - return get_object_item(object, string, false); +CJSON_PUBLIC(cJSON *) +cJSON_GetObjectItem(const cJSON *const object, const char *const string) { + return get_object_item(object, string, false); } -CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string) -{ - return get_object_item(object, string, true); +CJSON_PUBLIC(cJSON *) +cJSON_GetObjectItemCaseSensitive(const cJSON *const object, + const char *const string) { + return get_object_item(object, string, true); } -CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string) -{ - return cJSON_GetObjectItem(object, string) ? 1 : 0; +CJSON_PUBLIC(cJSON_bool) +cJSON_HasObjectItem(const cJSON *object, const char *string) { + return cJSON_GetObjectItem(object, string) ? 1 : 0; } /* Utility for array list handling. */ -static void suffix_object(cJSON *prev, cJSON *item) -{ - prev->next = item; - item->prev = prev; +static void suffix_object(cJSON *prev, cJSON *item) { + prev->next = item; + item->prev = prev; } /* Utility for handling references. */ -static cJSON *create_reference(const cJSON *item, const internal_hooks * const hooks) -{ - cJSON *reference = NULL; - if (item == NULL) - { - return NULL; - } +static cJSON *create_reference(const cJSON *item, + const internal_hooks *const hooks) { + cJSON *reference = NULL; + if (item == NULL) { + return NULL; + } - reference = cJSON_New_Item(hooks); - if (reference == NULL) - { - return NULL; - } + reference = cJSON_New_Item(hooks); + if (reference == NULL) { + return NULL; + } - memcpy(reference, item, sizeof(cJSON)); - reference->string = NULL; - reference->type |= cJSON_IsReference; - reference->next = reference->prev = NULL; - return reference; + memcpy(reference, item, sizeof(cJSON)); + reference->string = NULL; + reference->type |= cJSON_IsReference; + reference->next = reference->prev = NULL; + return reference; } -static cJSON_bool add_item_to_array(cJSON *array, cJSON *item) -{ - cJSON *child = NULL; +static cJSON_bool add_item_to_array(cJSON *array, cJSON *item) { + cJSON *child = NULL; - if ((item == NULL) || (array == NULL) || (array == item)) - { - return false; - } - - child = array->child; - /* - * To find the last item in array quickly, we use prev in array - */ - if (child == NULL) - { - /* list is empty, start new one */ - array->child = item; - item->prev = item; - item->next = NULL; - } - else - { - /* append to the end */ - if (child->prev) - { - suffix_object(child->prev, item); - array->child->prev = item; + if ((item == NULL) || (array == NULL) || (array == item)) { + return false; + } + + child = array->child; + /* + * To find the last item in array quickly, we use prev in array + */ + if (child == NULL) { + /* list is empty, start new one */ + array->child = item; + item->prev = item; + item->next = NULL; + } else { + /* append to the end */ + if (child->prev) { + suffix_object(child->prev, item); + array->child->prev = item; + } } - } - return true; + return true; } /* Add item to array/object. */ -CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item) -{ - return add_item_to_array(array, item); +CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item) { + return add_item_to_array(array, item); } -#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) - #pragma GCC diagnostic push +#if defined(__clang__) || \ + (defined(__GNUC__) && \ + ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) +#pragma GCC diagnostic push #endif #ifdef __GNUC__ #pragma GCC diagnostic ignored "-Wcast-qual" #endif /* helper function to cast away const */ -static void* cast_away_const(const void* string) -{ - return (void*)string; +static void *cast_away_const(const void *string) { + return (void *)string; } -#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) - #pragma GCC diagnostic pop +#if defined(__clang__) || \ + (defined(__GNUC__) && \ + ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) +#pragma GCC diagnostic pop #endif -static cJSON_bool add_item_to_object(cJSON * const object, const char * const string, cJSON * const item, const internal_hooks * const hooks, const cJSON_bool constant_key) -{ - char *new_key = NULL; - int new_type = cJSON_Invalid; +static cJSON_bool add_item_to_object(cJSON *const object, + const char *const string, + cJSON *const item, + const internal_hooks *const hooks, + const cJSON_bool constant_key) { + char *new_key = NULL; + int new_type = cJSON_Invalid; - if ((object == NULL) || (string == NULL) || (item == NULL) || (object == item)) - { - return false; - } - - if (constant_key) - { - new_key = (char*)cast_away_const(string); - new_type = item->type | cJSON_StringIsConst; - } - else - { - new_key = (char*)cJSON_strdup((const unsigned char*)string, hooks); - if (new_key == NULL) - { - return false; + if ((object == NULL) || (string == NULL) || (item == NULL) || + (object == item)) { + return false; } - new_type = item->type & ~cJSON_StringIsConst; - } + if (constant_key) { + new_key = (char *)cast_away_const(string); + new_type = item->type | cJSON_StringIsConst; + } else { + new_key = + (char *)cJSON_strdup((const unsigned char *)string, hooks); + if (new_key == NULL) { + return false; + } + + new_type = item->type & ~cJSON_StringIsConst; + } - if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) - { - hooks->deallocate(item->string); - } + if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) { + hooks->deallocate(item->string); + } - item->string = new_key; - item->type = new_type; + item->string = new_key; + item->type = new_type; - return add_item_to_array(object, item); + return add_item_to_array(object, item); } -CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) -{ - return add_item_to_object(object, string, item, &global_hooks, false); +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) { + return add_item_to_object(object, string, item, &global_hooks, false); } /* Add an item to an object with constant string as key */ -CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) -{ - return add_item_to_object(object, string, item, &global_hooks, true); +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) { + return add_item_to_object(object, string, item, &global_hooks, true); } -CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) -{ - if (array == NULL) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) { + if (array == NULL) { + return false; + } - return add_item_to_array(array, create_reference(item, &global_hooks)); + return add_item_to_array(array, create_reference(item, &global_hooks)); } -CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) -{ - if ((object == NULL) || (string == NULL)) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) { + if ((object == NULL) || (string == NULL)) { + return false; + } + + return add_item_to_object(object, string, + create_reference(item, &global_hooks), + &global_hooks, false); +} + +CJSON_PUBLIC(cJSON *) +cJSON_AddNullToObject(cJSON *const object, const char *const name) { + cJSON *null = cJSON_CreateNull(); + if (add_item_to_object(object, name, null, &global_hooks, false)) { + return null; + } - return add_item_to_object(object, string, create_reference(item, &global_hooks), &global_hooks, false); + cJSON_Delete(null); + return NULL; } -CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name) -{ - cJSON *null = cJSON_CreateNull(); - if (add_item_to_object(object, name, null, &global_hooks, false)) - { - return null; - } +CJSON_PUBLIC(cJSON *) +cJSON_AddTrueToObject(cJSON *const object, const char *const name) { + cJSON *true_item = cJSON_CreateTrue(); + if (add_item_to_object(object, name, true_item, &global_hooks, false)) { + return true_item; + } - cJSON_Delete(null); - return NULL; + cJSON_Delete(true_item); + return NULL; } -CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name) -{ - cJSON *true_item = cJSON_CreateTrue(); - if (add_item_to_object(object, name, true_item, &global_hooks, false)) - { - return true_item; - } +CJSON_PUBLIC(cJSON *) +cJSON_AddFalseToObject(cJSON *const object, const char *const name) { + cJSON *false_item = cJSON_CreateFalse(); + if (add_item_to_object(object, name, false_item, &global_hooks, + false)) { + return false_item; + } - cJSON_Delete(true_item); - return NULL; + cJSON_Delete(false_item); + return NULL; } -CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name) -{ - cJSON *false_item = cJSON_CreateFalse(); - if (add_item_to_object(object, name, false_item, &global_hooks, false)) - { - return false_item; - } +CJSON_PUBLIC(cJSON *) +cJSON_AddBoolToObject(cJSON *const object, + const char *const name, + const cJSON_bool boolean) { + cJSON *bool_item = cJSON_CreateBool(boolean); + if (add_item_to_object(object, name, bool_item, &global_hooks, false)) { + return bool_item; + } - cJSON_Delete(false_item); - return NULL; + cJSON_Delete(bool_item); + return NULL; } -CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean) -{ - cJSON *bool_item = cJSON_CreateBool(boolean); - if (add_item_to_object(object, name, bool_item, &global_hooks, false)) - { - return bool_item; - } +CJSON_PUBLIC(cJSON *) +cJSON_AddNumberToObject(cJSON *const object, + const char *const name, + const double number) { + cJSON *number_item = cJSON_CreateNumber(number); + if (add_item_to_object(object, name, number_item, &global_hooks, + false)) { + return number_item; + } - cJSON_Delete(bool_item); - return NULL; + cJSON_Delete(number_item); + return NULL; } -CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number) -{ - cJSON *number_item = cJSON_CreateNumber(number); - if (add_item_to_object(object, name, number_item, &global_hooks, false)) - { - return number_item; - } +CJSON_PUBLIC(cJSON *) +cJSON_AddStringToObject(cJSON *const object, + const char *const name, + const char *const string) { + cJSON *string_item = cJSON_CreateString(string); + if (add_item_to_object(object, name, string_item, &global_hooks, + false)) { + return string_item; + } - cJSON_Delete(number_item); - return NULL; + cJSON_Delete(string_item); + return NULL; } -CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string) -{ - cJSON *string_item = cJSON_CreateString(string); - if (add_item_to_object(object, name, string_item, &global_hooks, false)) - { - return string_item; - } +CJSON_PUBLIC(cJSON *) +cJSON_AddRawToObject(cJSON *const object, + const char *const name, + const char *const raw) { + cJSON *raw_item = cJSON_CreateRaw(raw); + if (add_item_to_object(object, name, raw_item, &global_hooks, false)) { + return raw_item; + } - cJSON_Delete(string_item); - return NULL; + cJSON_Delete(raw_item); + return NULL; } -CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw) -{ - cJSON *raw_item = cJSON_CreateRaw(raw); - if (add_item_to_object(object, name, raw_item, &global_hooks, false)) - { - return raw_item; - } +CJSON_PUBLIC(cJSON *) +cJSON_AddObjectToObject(cJSON *const object, const char *const name) { + cJSON *object_item = cJSON_CreateObject(); + if (add_item_to_object(object, name, object_item, &global_hooks, + false)) { + return object_item; + } - cJSON_Delete(raw_item); - return NULL; + cJSON_Delete(object_item); + return NULL; } -CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name) -{ - cJSON *object_item = cJSON_CreateObject(); - if (add_item_to_object(object, name, object_item, &global_hooks, false)) - { - return object_item; - } +CJSON_PUBLIC(cJSON *) +cJSON_AddArrayToObject(cJSON *const object, const char *const name) { + cJSON *array = cJSON_CreateArray(); + if (add_item_to_object(object, name, array, &global_hooks, false)) { + return array; + } - cJSON_Delete(object_item); - return NULL; + cJSON_Delete(array); + return NULL; } -CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name) -{ - cJSON *array = cJSON_CreateArray(); - if (add_item_to_object(object, name, array, &global_hooks, false)) - { - return array; - } +CJSON_PUBLIC(cJSON *) +cJSON_DetachItemViaPointer(cJSON *parent, cJSON *const item) { + if ((parent == NULL) || (item == NULL)) { + return NULL; + } + + if (item != parent->child) { + /* not the first element */ + item->prev->next = item->next; + } + if (item->next != NULL) { + /* not the last element */ + item->next->prev = item->prev; + } + + if (item == parent->child) { + /* first element */ + parent->child = item->next; + } else if (item->next == NULL) { + /* last element */ + parent->child->prev = item->prev; + } + + /* make sure the detached item doesn't point anywhere anymore */ + item->prev = NULL; + item->next = NULL; - cJSON_Delete(array); - return NULL; + return item; } -CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item) -{ - if ((parent == NULL) || (item == NULL)) - { - return NULL; - } - - if (item != parent->child) - { - /* not the first element */ - item->prev->next = item->next; - } - if (item->next != NULL) - { - /* not the last element */ - item->next->prev = item->prev; - } - - if (item == parent->child) - { - /* first element */ - parent->child = item->next; - } - else if (item->next == NULL) - { - /* last element */ - parent->child->prev = item->prev; - } - - /* make sure the detached item doesn't point anywhere anymore */ - item->prev = NULL; - item->next = NULL; - - return item; -} - -CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which) -{ - if (which < 0) - { - return NULL; - } +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which) { + if (which < 0) { + return NULL; + } - return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which)); + return cJSON_DetachItemViaPointer(array, + get_array_item(array, (size_t)which)); } -CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which) -{ - cJSON_Delete(cJSON_DetachItemFromArray(array, which)); +CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which) { + cJSON_Delete(cJSON_DetachItemFromArray(array, which)); } -CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string) -{ - cJSON *to_detach = cJSON_GetObjectItem(object, string); +CJSON_PUBLIC(cJSON *) +cJSON_DetachItemFromObject(cJSON *object, const char *string) { + cJSON *to_detach = cJSON_GetObjectItem(object, string); - return cJSON_DetachItemViaPointer(object, to_detach); + return cJSON_DetachItemViaPointer(object, to_detach); } -CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string) -{ - cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string); +CJSON_PUBLIC(cJSON *) +cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string) { + cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string); - return cJSON_DetachItemViaPointer(object, to_detach); + return cJSON_DetachItemViaPointer(object, to_detach); } -CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string) -{ - cJSON_Delete(cJSON_DetachItemFromObject(object, string)); +CJSON_PUBLIC(void) +cJSON_DeleteItemFromObject(cJSON *object, const char *string) { + cJSON_Delete(cJSON_DetachItemFromObject(object, string)); } -CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string) -{ - cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string)); +CJSON_PUBLIC(void) +cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string) { + cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string)); } /* Replace array/object items with new ones. */ -CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) -{ - cJSON *after_inserted = NULL; +CJSON_PUBLIC(cJSON_bool) +cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) { + cJSON *after_inserted = NULL; - if (which < 0) - { - return false; - } - - after_inserted = get_array_item(array, (size_t)which); - if (after_inserted == NULL) - { - return add_item_to_array(array, newitem); - } - - newitem->next = after_inserted; - newitem->prev = after_inserted->prev; - after_inserted->prev = newitem; - if (after_inserted == array->child) - { - array->child = newitem; - } - else - { - newitem->prev->next = newitem; - } - return true; -} - -CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement) -{ - if ((parent == NULL) || (replacement == NULL) || (item == NULL)) - { - return false; - } + if (which < 0) { + return false; + } + + after_inserted = get_array_item(array, (size_t)which); + if (after_inserted == NULL) { + return add_item_to_array(array, newitem); + } - if (replacement == item) - { + newitem->next = after_inserted; + newitem->prev = after_inserted->prev; + after_inserted->prev = newitem; + if (after_inserted == array->child) { + array->child = newitem; + } else { + newitem->prev->next = newitem; + } return true; - } - - replacement->next = item->next; - replacement->prev = item->prev; - - if (replacement->next != NULL) - { - replacement->next->prev = replacement; - } - if (parent->child == item) - { - if (parent->child->prev == parent->child) - { - replacement->prev = replacement; - } - parent->child = replacement; - } - else - { /* - * To find the last item in array quickly, we use prev in array. - * We can't modify the last item's next pointer where this item was the parent's child - */ - if (replacement->prev != NULL) - { - replacement->prev->next = replacement; +} + +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemViaPointer(cJSON *const parent, + cJSON *const item, + cJSON *replacement) { + if ((parent == NULL) || (replacement == NULL) || (item == NULL)) { + return false; } - if (replacement->next == NULL) - { - parent->child->prev = replacement; + + if (replacement == item) { + return true; + } + + replacement->next = item->next; + replacement->prev = item->prev; + + if (replacement->next != NULL) { + replacement->next->prev = replacement; + } + if (parent->child == item) { + if (parent->child->prev == parent->child) { + replacement->prev = replacement; + } + parent->child = replacement; + } else { /* + * To find the last item in array quickly, we use prev in + * array. We can't modify the last item's next pointer where + * this item was the parent's child + */ + if (replacement->prev != NULL) { + replacement->prev->next = replacement; + } + if (replacement->next == NULL) { + parent->child->prev = replacement; + } } - } - item->next = NULL; - item->prev = NULL; - cJSON_Delete(item); + item->next = NULL; + item->prev = NULL; + cJSON_Delete(item); - return true; + return true; } -CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) -{ - if (which < 0) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) { + if (which < 0) { + return false; + } - return cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem); + return cJSON_ReplaceItemViaPointer( + array, get_array_item(array, (size_t)which), newitem); } -static cJSON_bool replace_item_in_object(cJSON *object, const char *string, cJSON *replacement, cJSON_bool case_sensitive) -{ - if ((replacement == NULL) || (string == NULL)) - { - return false; - } +static cJSON_bool replace_item_in_object(cJSON *object, + const char *string, + cJSON *replacement, + cJSON_bool case_sensitive) { + if ((replacement == NULL) || (string == NULL)) { + return false; + } - /* replace the name in the replacement */ - if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL)) - { - cJSON_free(replacement->string); - } - replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); - replacement->type &= ~cJSON_StringIsConst; + /* replace the name in the replacement */ + if (!(replacement->type & cJSON_StringIsConst) && + (replacement->string != NULL)) { + cJSON_free(replacement->string); + } + replacement->string = + (char *)cJSON_strdup((const unsigned char *)string, &global_hooks); + replacement->type &= ~cJSON_StringIsConst; - return cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement); + return cJSON_ReplaceItemViaPointer( + object, get_object_item(object, string, case_sensitive), + replacement); } -CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) -{ - return replace_item_in_object(object, string, newitem, false); +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) { + return replace_item_in_object(object, string, newitem, false); } -CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem) -{ - return replace_item_in_object(object, string, newitem, true); +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, + const char *string, + cJSON *newitem) { + return replace_item_in_object(object, string, newitem, true); } /* Create basic types: */ -CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void) -{ - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = cJSON_NULL; - } +CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_NULL; + } - return item; + return item; } -CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void) -{ - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = cJSON_True; - } +CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_True; + } - return item; + return item; } -CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void) -{ - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = cJSON_False; - } +CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_False; + } - return item; + return item; } -CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean) -{ - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = boolean ? cJSON_True : cJSON_False; - } +CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = boolean ? cJSON_True : cJSON_False; + } - return item; + return item; } -CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num) -{ - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = cJSON_Number; - item->valuedouble = num; +CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_Number; + item->valuedouble = num; - /* use saturation in case of overflow */ - if (num >= INT_MAX) - { - item->valueint = INT_MAX; - } - else if (num <= (double)INT_MIN) - { - item->valueint = INT_MIN; - } - else - { - item->valueint = (int)num; + /* use saturation in case of overflow */ + if (num >= INT_MAX) { + item->valueint = INT_MAX; + } else if (num <= (double)INT_MIN) { + item->valueint = INT_MIN; + } else { + item->valueint = (int)num; + } } - } - return item; + return item; } -CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string) -{ - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = cJSON_String; - item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); - if(!item->valuestring) - { - cJSON_Delete(item); - return NULL; +CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_String; + item->valuestring = (char *)cJSON_strdup( + (const unsigned char *)string, &global_hooks); + if (!item->valuestring) { + cJSON_Delete(item); + return NULL; + } } - } - return item; + return item; } -CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string) -{ - cJSON *item = cJSON_New_Item(&global_hooks); - if (item != NULL) - { - item->type = cJSON_String | cJSON_IsReference; - item->valuestring = (char*)cast_away_const(string); - } +CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_String | cJSON_IsReference; + item->valuestring = (char *)cast_away_const(string); + } - return item; + return item; } -CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child) -{ - cJSON *item = cJSON_New_Item(&global_hooks); - if (item != NULL) { - item->type = cJSON_Object | cJSON_IsReference; - item->child = (cJSON*)cast_away_const(child); - } +CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Object | cJSON_IsReference; + item->child = (cJSON *)cast_away_const(child); + } - return item; + return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) { - cJSON *item = cJSON_New_Item(&global_hooks); - if (item != NULL) { - item->type = cJSON_Array | cJSON_IsReference; - item->child = (cJSON*)cast_away_const(child); - } - - return item; -} - -CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw) -{ - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = cJSON_Raw; - item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks); - if(!item->valuestring) - { - cJSON_Delete(item); - return NULL; + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Array | cJSON_IsReference; + item->child = (cJSON *)cast_away_const(child); } - } - return item; + return item; } -CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) -{ - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type=cJSON_Array; - } +CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_Raw; + item->valuestring = (char *)cJSON_strdup( + (const unsigned char *)raw, &global_hooks); + if (!item->valuestring) { + cJSON_Delete(item); + return NULL; + } + } - return item; + return item; } -CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void) -{ - cJSON *item = cJSON_New_Item(&global_hooks); - if (item) - { - item->type = cJSON_Object; - } +CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_Array; + } - return item; + return item; } -/* Create Arrays: */ -CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) -{ - size_t i = 0; - cJSON *n = NULL; - cJSON *p = NULL; - cJSON *a = NULL; - - if ((count < 0) || (numbers == NULL)) - { - return NULL; - } - - a = cJSON_CreateArray(); - for(i = 0; a && (i < (size_t)count); i++) - { - n = cJSON_CreateNumber(numbers[i]); - if (!n) - { - cJSON_Delete(a); - return NULL; - } - if(!i) - { - a->child = n; - } - else - { - suffix_object(p, n); +CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_Object; } - p = n; - } - a->child->prev = n; - return a; + return item; } -CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count) -{ - size_t i = 0; - cJSON *n = NULL; - cJSON *p = NULL; - cJSON *a = NULL; +/* Create Arrays: */ +CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) { + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) { + return NULL; + } + + a = cJSON_CreateArray(); + for (i = 0; a && (i < (size_t)count); i++) { + n = cJSON_CreateNumber(numbers[i]); + if (!n) { + cJSON_Delete(a); + return NULL; + } + if (!i) { + a->child = n; + } else { + suffix_object(p, n); + } + p = n; + } + a->child->prev = n; - if ((count < 0) || (numbers == NULL)) - { - return NULL; - } + return a; +} - a = cJSON_CreateArray(); +CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count) { + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; - for(i = 0; a && (i < (size_t)count); i++) - { - n = cJSON_CreateNumber((double)numbers[i]); - if(!n) - { - cJSON_Delete(a); - return NULL; - } - if(!i) - { - a->child = n; + if ((count < 0) || (numbers == NULL)) { + return NULL; } - else - { - suffix_object(p, n); + + a = cJSON_CreateArray(); + + for (i = 0; a && (i < (size_t)count); i++) { + n = cJSON_CreateNumber((double)numbers[i]); + if (!n) { + cJSON_Delete(a); + return NULL; + } + if (!i) { + a->child = n; + } else { + suffix_object(p, n); + } + p = n; } - p = n; - } - a->child->prev = n; + a->child->prev = n; - return a; + return a; } -CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count) -{ - size_t i = 0; - cJSON *n = NULL; - cJSON *p = NULL; - cJSON *a = NULL; +CJSON_PUBLIC(cJSON *) +cJSON_CreateDoubleArray(const double *numbers, int count) { + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; - if ((count < 0) || (numbers == NULL)) - { - return NULL; - } + if ((count < 0) || (numbers == NULL)) { + return NULL; + } - a = cJSON_CreateArray(); + a = cJSON_CreateArray(); - for(i = 0;a && (i < (size_t)count); i++) - { - n = cJSON_CreateNumber(numbers[i]); - if(!n) - { - cJSON_Delete(a); - return NULL; - } - if(!i) - { - a->child = n; - } - else - { - suffix_object(p, n); + for (i = 0; a && (i < (size_t)count); i++) { + n = cJSON_CreateNumber(numbers[i]); + if (!n) { + cJSON_Delete(a); + return NULL; + } + if (!i) { + a->child = n; + } else { + suffix_object(p, n); + } + p = n; } - p = n; - } - a->child->prev = n; + a->child->prev = n; - return a; + return a; } -CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count) -{ - size_t i = 0; - cJSON *n = NULL; - cJSON *p = NULL; - cJSON *a = NULL; +CJSON_PUBLIC(cJSON *) +cJSON_CreateStringArray(const char *const *strings, int count) { + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; - if ((count < 0) || (strings == NULL)) - { - return NULL; - } + if ((count < 0) || (strings == NULL)) { + return NULL; + } - a = cJSON_CreateArray(); + a = cJSON_CreateArray(); - for (i = 0; a && (i < (size_t)count); i++) - { - n = cJSON_CreateString(strings[i]); - if(!n) - { - cJSON_Delete(a); - return NULL; - } - if(!i) - { - a->child = n; - } - else - { - suffix_object(p,n); + for (i = 0; a && (i < (size_t)count); i++) { + n = cJSON_CreateString(strings[i]); + if (!n) { + cJSON_Delete(a); + return NULL; + } + if (!i) { + a->child = n; + } else { + suffix_object(p, n); + } + p = n; } - p = n; - } - a->child->prev = n; + a->child->prev = n; - return a; + return a; } /* Duplication */ -CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse) -{ - cJSON *newitem = NULL; - cJSON *child = NULL; - cJSON *next = NULL; - cJSON *newchild = NULL; - - /* Bail on bad ptr */ - if (!item) - { - goto fail; - } - /* Create new item */ - newitem = cJSON_New_Item(&global_hooks); - if (!newitem) - { - goto fail; - } - /* Copy over all vars */ - newitem->type = item->type & (~cJSON_IsReference); - newitem->valueint = item->valueint; - newitem->valuedouble = item->valuedouble; - if (item->valuestring) - { - newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks); - if (!newitem->valuestring) - { - goto fail; +CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse) { + cJSON *newitem = NULL; + cJSON *child = NULL; + cJSON *next = NULL; + cJSON *newchild = NULL; + + /* Bail on bad ptr */ + if (!item) { + goto fail; } - } - if (item->string) - { - newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks); - if (!newitem->string) - { - goto fail; + /* Create new item */ + newitem = cJSON_New_Item(&global_hooks); + if (!newitem) { + goto fail; } - } - /* If non-recursive, then we're done! */ - if (!recurse) - { - return newitem; - } - /* Walk the ->next chain for the child. */ - child = item->child; - while (child != NULL) - { - newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */ - if (!newchild) - { - goto fail; + /* Copy over all vars */ + newitem->type = item->type & (~cJSON_IsReference); + newitem->valueint = item->valueint; + newitem->valuedouble = item->valuedouble; + if (item->valuestring) { + newitem->valuestring = (char *)cJSON_strdup( + (unsigned char *)item->valuestring, &global_hooks); + if (!newitem->valuestring) { + goto fail; + } } - if (next != NULL) - { - /* If newitem->child already set, then crosswire ->prev and ->next and move on */ - next->next = newchild; - newchild->prev = next; - next = newchild; + if (item->string) { + newitem->string = + (item->type & cJSON_StringIsConst) + ? item->string + : (char *)cJSON_strdup((unsigned char *)item->string, + &global_hooks); + if (!newitem->string) { + goto fail; + } } - else - { - /* Set newitem->child and move to it */ - newitem->child = newchild; - next = newchild; + /* If non-recursive, then we're done! */ + if (!recurse) { + return newitem; + } + /* Walk the ->next chain for the child. */ + child = item->child; + while (child != NULL) { + newchild = cJSON_Duplicate( + child, true); /* Duplicate (with recurse) each item in the + ->next chain */ + if (!newchild) { + goto fail; + } + if (next != NULL) { + /* If newitem->child already set, then crosswire ->prev + * and ->next and move on */ + next->next = newchild; + newchild->prev = next; + next = newchild; + } else { + /* Set newitem->child and move to it */ + newitem->child = newchild; + next = newchild; + } + child = child->next; + } + if (newitem && newitem->child) { + newitem->child->prev = newchild; } - child = child->next; - } - if (newitem && newitem->child) - { - newitem->child->prev = newchild; - } - return newitem; + return newitem; fail: - if (newitem != NULL) - { - cJSON_Delete(newitem); - } + if (newitem != NULL) { + cJSON_Delete(newitem); + } - return NULL; + return NULL; } -static void skip_oneline_comment(char **input) -{ - *input += static_strlen("//"); +static void skip_oneline_comment(char **input) { + *input += static_strlen("//"); - for (; (*input)[0] != '\0'; ++(*input)) - { - if ((*input)[0] == '\n') { - *input += static_strlen("\n"); - return; + for (; (*input)[0] != '\0'; ++(*input)) { + if ((*input)[0] == '\n') { + *input += static_strlen("\n"); + return; + } } - } } -static void skip_multiline_comment(char **input) -{ - *input += static_strlen("/*"); +static void skip_multiline_comment(char **input) { + *input += static_strlen("/*"); - for (; (*input)[0] != '\0'; ++(*input)) - { - if (((*input)[0] == '*') && ((*input)[1] == '/')) - { - *input += static_strlen("*/"); - return; + for (; (*input)[0] != '\0'; ++(*input)) { + if (((*input)[0] == '*') && ((*input)[1] == '/')) { + *input += static_strlen("*/"); + return; + } } - } } static void minify_string(char **input, char **output) { - (*output)[0] = (*input)[0]; - *input += static_strlen("\""); - *output += static_strlen("\""); - - - for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) { (*output)[0] = (*input)[0]; - - if ((*input)[0] == '\"') { - (*output)[0] = '\"'; - *input += static_strlen("\""); - *output += static_strlen("\""); - return; - } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) { - (*output)[1] = (*input)[1]; - *input += static_strlen("\""); - *output += static_strlen("\""); + *input += static_strlen("\""); + *output += static_strlen("\""); + + + for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) { + (*output)[0] = (*input)[0]; + + if ((*input)[0] == '\"') { + (*output)[0] = '\"'; + *input += static_strlen("\""); + *output += static_strlen("\""); + return; + } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) { + (*output)[1] = (*input)[1]; + *input += static_strlen("\""); + *output += static_strlen("\""); + } } - } } -CJSON_PUBLIC(void) cJSON_Minify(char *json) -{ - char *into = json; +CJSON_PUBLIC(void) cJSON_Minify(char *json) { + char *into = json; - if (json == NULL) - { - return; - } + if (json == NULL) { + return; + } - while (json[0] != '\0') - { - switch (json[0]) - { - case ' ': - case '\t': - case '\r': - case '\n': - json++; - break; + while (json[0] != '\0') { + switch (json[0]) { + case ' ': + case '\t': + case '\r': + case '\n': + json++; + break; - case '/': - if (json[1] == '/') - { - skip_oneline_comment(&json); - } - else if (json[1] == '*') - { - skip_multiline_comment(&json); - } else { - json++; - } - break; + case '/': + if (json[1] == '/') { + skip_oneline_comment(&json); + } else if (json[1] == '*') { + skip_multiline_comment(&json); + } else { + json++; + } + break; - case '\"': - minify_string(&json, (char**)&into); - break; + case '\"': + minify_string(&json, (char **)&into); + break; - default: - into[0] = json[0]; - json++; - into++; + default: + into[0] = json[0]; + json++; + into++; + } } - } - /* and null-terminate. */ - *into = '\0'; + /* and null-terminate. */ + *into = '\0'; } -CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item) -{ - if (item == NULL) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON *const item) { + if (item == NULL) { + return false; + } - return (item->type & 0xFF) == cJSON_Invalid; + return (item->type & 0xFF) == cJSON_Invalid; } -CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item) -{ - if (item == NULL) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON *const item) { + if (item == NULL) { + return false; + } - return (item->type & 0xFF) == cJSON_False; + return (item->type & 0xFF) == cJSON_False; } -CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item) -{ - if (item == NULL) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON *const item) { + if (item == NULL) { + return false; + } - return (item->type & 0xff) == cJSON_True; + return (item->type & 0xff) == cJSON_True; } -CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item) -{ - if (item == NULL) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON *const item) { + if (item == NULL) { + return false; + } - return (item->type & (cJSON_True | cJSON_False)) != 0; + return (item->type & (cJSON_True | cJSON_False)) != 0; } -CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item) -{ - if (item == NULL) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON *const item) { + if (item == NULL) { + return false; + } - return (item->type & 0xFF) == cJSON_NULL; + return (item->type & 0xFF) == cJSON_NULL; } -CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item) -{ - if (item == NULL) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON *const item) { + if (item == NULL) { + return false; + } - return (item->type & 0xFF) == cJSON_Number; + return (item->type & 0xFF) == cJSON_Number; } -CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item) -{ - if (item == NULL) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON *const item) { + if (item == NULL) { + return false; + } - return (item->type & 0xFF) == cJSON_String; + return (item->type & 0xFF) == cJSON_String; } -CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item) -{ - if (item == NULL) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON *const item) { + if (item == NULL) { + return false; + } - return (item->type & 0xFF) == cJSON_Array; + return (item->type & 0xFF) == cJSON_Array; } -CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item) -{ - if (item == NULL) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON *const item) { + if (item == NULL) { + return false; + } - return (item->type & 0xFF) == cJSON_Object; + return (item->type & 0xFF) == cJSON_Object; } -CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item) -{ - if (item == NULL) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON *const item) { + if (item == NULL) { + return false; + } - return (item->type & 0xFF) == cJSON_Raw; + return (item->type & 0xFF) == cJSON_Raw; } -CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive) -{ - if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF)) || cJSON_IsInvalid(a)) - { - return false; - } +CJSON_PUBLIC(cJSON_bool) +cJSON_Compare(const cJSON *const a, + const cJSON *const b, + const cJSON_bool case_sensitive) { + if ((a == NULL) || (b == NULL) || + ((a->type & 0xFF) != (b->type & 0xFF)) || cJSON_IsInvalid(a)) { + return false; + } - /* check if type is valid */ - switch (a->type & 0xFF) - { + /* check if type is valid */ + switch (a->type & 0xFF) { case cJSON_False: case cJSON_True: case cJSON_NULL: @@ -2977,119 +2727,108 @@ CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * cons case cJSON_Raw: case cJSON_Array: case cJSON_Object: - break; + break; default: - return false; - } + return false; + } - /* identical objects are equal */ - if (a == b) - { - return true; - } + /* identical objects are equal */ + if (a == b) { + return true; + } - switch (a->type & 0xFF) - { + switch (a->type & 0xFF) { /* in these cases and equal type is enough */ case cJSON_False: case cJSON_True: case cJSON_NULL: - return true; + return true; case cJSON_Number: - if (compare_double(a->valuedouble, b->valuedouble)) - { - return true; - } - return false; + if (compare_double(a->valuedouble, b->valuedouble)) { + return true; + } + return false; case cJSON_String: case cJSON_Raw: - if ((a->valuestring == NULL) || (b->valuestring == NULL)) - { - return false; - } - if (strcmp(a->valuestring, b->valuestring) == 0) - { - return true; - } - - return false; - - case cJSON_Array: - { - cJSON *a_element = a->child; - cJSON *b_element = b->child; - - for (; (a_element != NULL) && (b_element != NULL);) - { - if (!cJSON_Compare(a_element, b_element, case_sensitive)) - { - return false; + if ((a->valuestring == NULL) || (b->valuestring == NULL)) { + return false; + } + if (strcmp(a->valuestring, b->valuestring) == 0) { + return true; } - a_element = a_element->next; - b_element = b_element->next; - } - - /* one of the arrays is longer than the other */ - if (a_element != b_element) { return false; - } - return true; - } + case cJSON_Array: { + cJSON *a_element = a->child; + cJSON *b_element = b->child; - case cJSON_Object: - { - cJSON *a_element = NULL; - cJSON *b_element = NULL; - cJSON_ArrayForEach(a_element, a) - { - /* TODO This has O(n^2) runtime, which is horrible! */ - b_element = get_object_item(b, a_element->string, case_sensitive); - if (b_element == NULL) - { - return false; + for (; (a_element != NULL) && (b_element != NULL);) { + if (!cJSON_Compare(a_element, b_element, + case_sensitive)) { + return false; + } + + a_element = a_element->next; + b_element = b_element->next; } - if (!cJSON_Compare(a_element, b_element, case_sensitive)) - { - return false; + /* one of the arrays is longer than the other */ + if (a_element != b_element) { + return false; } - } - /* doing this twice, once on a and b to prevent true comparison if a subset of b - * TODO: Do this the proper way, this is just a fix for now */ - cJSON_ArrayForEach(b_element, b) - { - a_element = get_object_item(a, b_element->string, case_sensitive); - if (a_element == NULL) - { - return false; + return true; + } + + case cJSON_Object: { + cJSON *a_element = NULL; + cJSON *b_element = NULL; + cJSON_ArrayForEach(a_element, a) { + /* TODO This has O(n^2) runtime, which is horrible! */ + b_element = get_object_item(b, a_element->string, + case_sensitive); + if (b_element == NULL) { + return false; + } + + if (!cJSON_Compare(a_element, b_element, + case_sensitive)) { + return false; + } } - if (!cJSON_Compare(b_element, a_element, case_sensitive)) - { - return false; + /* doing this twice, once on a and b to prevent true comparison + * if a subset of b + * TODO: Do this the proper way, this is just a fix for now */ + cJSON_ArrayForEach(b_element, b) { + a_element = get_object_item(a, b_element->string, + case_sensitive); + if (a_element == NULL) { + return false; + } + + if (!cJSON_Compare(b_element, a_element, + case_sensitive)) { + return false; + } } - } - return true; + return true; } default: - return false; - } + return false; + } } -CJSON_PUBLIC(void *) cJSON_malloc(size_t size) -{ - return global_hooks.allocate(size); +CJSON_PUBLIC(void *) cJSON_malloc(size_t size) { + return global_hooks.allocate(size); } -CJSON_PUBLIC(void) cJSON_free(void *object) -{ - global_hooks.deallocate(object); +CJSON_PUBLIC(void) cJSON_free(void *object) { + global_hooks.deallocate(object); } diff --git a/src/cJSON.h b/src/cJSON.h index e97e5f4cdc..1b5655c7b6 100644 --- a/src/cJSON.h +++ b/src/cJSON.h @@ -24,23 +24,27 @@ #define cJSON__h #ifdef __cplusplus -extern "C" -{ +extern "C" { #endif -#if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32)) +#if !defined(__WINDOWS__) && \ + (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32)) #define __WINDOWS__ #endif #ifdef __WINDOWS__ -/* When compiling for windows, we specify a specific calling convention to avoid issues where we are being called from a project with a different default calling convention. For windows you have 3 define options: +/* When compiling for windows, we specify a specific calling convention to avoid +issues where we are being called from a project with a different default calling +convention. For windows you have 3 define options: -CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever dllexport symbols -CJSON_EXPORT_SYMBOLS - Define this on library build when you want to dllexport symbols (default) -CJSON_IMPORT_SYMBOLS - Define this if you want to dllimport symbol +CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever +dllexport symbols CJSON_EXPORT_SYMBOLS - Define this on library build when you +want to dllexport symbols (default) CJSON_IMPORT_SYMBOLS - Define this if you +want to dllimport symbol -For *nix builds that support visibility attribute, you can define similar behavior by +For *nix builds that support visibility attribute, you can define similar +behavior by setting default visibility to hidden by adding -fvisibility=hidden (for gcc) @@ -48,31 +52,35 @@ or -xldscope=hidden (for sun cc) to CFLAGS -then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJSON_EXPORT_SYMBOLS does +then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way +CJSON_EXPORT_SYMBOLS does */ -#define CJSON_CDECL __cdecl +#define CJSON_CDECL __cdecl #define CJSON_STDCALL __stdcall -/* export symbols by default, this is necessary for copy pasting the C and header file */ -#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && !defined(CJSON_EXPORT_SYMBOLS) +/* export symbols by default, this is necessary for copy pasting the C and + * header file */ +#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && \ + !defined(CJSON_EXPORT_SYMBOLS) #define CJSON_EXPORT_SYMBOLS #endif #if defined(CJSON_HIDE_SYMBOLS) -#define CJSON_PUBLIC(type) type CJSON_STDCALL +#define CJSON_PUBLIC(type) type CJSON_STDCALL #elif defined(CJSON_EXPORT_SYMBOLS) -#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL +#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL #elif defined(CJSON_IMPORT_SYMBOLS) -#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL +#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL #endif #else /* !__WINDOWS__ */ #define CJSON_CDECL #define CJSON_STDCALL -#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(CJSON_API_VISIBILITY) -#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type +#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined(__SUNPRO_C)) && \ + defined(CJSON_API_VISIBILITY) +#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type #else #define CJSON_PUBLIC(type) type #endif @@ -87,109 +95,145 @@ then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJ /* cJSON Types: */ #define cJSON_Invalid (0) -#define cJSON_False (1 << 0) -#define cJSON_True (1 << 1) -#define cJSON_NULL (1 << 2) -#define cJSON_Number (1 << 3) -#define cJSON_String (1 << 4) -#define cJSON_Array (1 << 5) -#define cJSON_Object (1 << 6) -#define cJSON_Raw (1 << 7) /* raw json */ - -#define cJSON_IsReference 256 +#define cJSON_False (1 << 0) +#define cJSON_True (1 << 1) +#define cJSON_NULL (1 << 2) +#define cJSON_Number (1 << 3) +#define cJSON_String (1 << 4) +#define cJSON_Array (1 << 5) +#define cJSON_Object (1 << 6) +#define cJSON_Raw (1 << 7) /* raw json */ + +#define cJSON_IsReference 256 #define cJSON_StringIsConst 512 /* The cJSON structure: */ -typedef struct cJSON -{ - /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */ - struct cJSON *next; - struct cJSON *prev; - /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */ - struct cJSON *child; - - /* The type of the item, as above. */ - int type; - - /* The item's string, if type==cJSON_String and type == cJSON_Raw */ - char *valuestring; - /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */ - int valueint; - /* The item's number, if type==cJSON_Number */ - double valuedouble; - - /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */ - char *string; +typedef struct cJSON { + /* next/prev allow you to walk array/object chains. Alternatively, use + * GetArraySize/GetArrayItem/GetObjectItem */ + struct cJSON *next; + struct cJSON *prev; + /* An array or object item will have a child pointer pointing to a chain + * of the items in the array/object. */ + struct cJSON *child; + + /* The type of the item, as above. */ + int type; + + /* The item's string, if type==cJSON_String and type == cJSON_Raw */ + char *valuestring; + /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead + */ + int valueint; + /* The item's number, if type==cJSON_Number */ + double valuedouble; + + /* The item's name string, if this item is the child of, or is in the + * list of subitems of an object. */ + char *string; } cJSON; -typedef struct cJSON_Hooks -{ - /* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */ - void *(CJSON_CDECL *malloc_fn)(size_t sz); - void (CJSON_CDECL *free_fn)(void *ptr); +typedef struct cJSON_Hooks { + /* malloc/free are CDECL on Windows regardless of the default calling + * convention of the compiler, so ensure the hooks allow passing those + * functions directly. */ + void *(CJSON_CDECL *malloc_fn)(size_t sz); + void(CJSON_CDECL *free_fn)(void *ptr); } cJSON_Hooks; typedef int cJSON_bool; -/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them. - * This is to prevent stack overflows. */ +/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse + * them. This is to prevent stack overflows. */ #ifndef CJSON_NESTING_LIMIT #define CJSON_NESTING_LIMIT 1000 #endif /* returns the version of cJSON as a string */ -CJSON_PUBLIC(const char*) cJSON_Version(void); +CJSON_PUBLIC(const char *) cJSON_Version(void); /* Supply malloc, realloc and free functions to cJSON */ -CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks); - -/* Memory Management: the caller is always responsible to free the results from all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is cJSON_PrintPreallocated, where the caller has full responsibility of the buffer. */ -/* Supply a block of JSON, and this returns a cJSON object you can interrogate. */ +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks *hooks); + +/* Memory Management: the caller is always responsible to free the results from + * all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib + * free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is + * cJSON_PrintPreallocated, where the caller has full responsibility of the + * buffer. */ +/* Supply a block of JSON, and this returns a cJSON object you can interrogate. + */ CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value); -CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length); -/* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */ -/* If you supply a ptr in return_parse_end and parsing fails, then return_parse_end will contain a pointer to the error so will match cJSON_GetErrorPtr(). */ -CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated); -CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated); +CJSON_PUBLIC(cJSON *) +cJSON_ParseWithLength(const char *value, size_t buffer_length); +/* ParseWithOpts allows you to require (and check) that the JSON is null + * terminated, and to retrieve the pointer to the final byte parsed. */ +/* If you supply a ptr in return_parse_end and parsing fails, then + * return_parse_end will contain a pointer to the error so will match + * cJSON_GetErrorPtr(). */ +CJSON_PUBLIC(cJSON *) +cJSON_ParseWithOpts(const char *value, + const char **return_parse_end, + cJSON_bool require_null_terminated); +CJSON_PUBLIC(cJSON *) +cJSON_ParseWithLengthOpts(const char *value, + size_t buffer_length, + const char **return_parse_end, + cJSON_bool require_null_terminated); /* Render a cJSON entity to text for transfer/storage. */ CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item); /* Render a cJSON entity to text for transfer/storage without any formatting. */ CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item); -/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */ -CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt); -/* Render a cJSON entity to text using a buffer already allocated in memory with given length. Returns 1 on success and 0 on failure. */ -/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will use, so to be safe allocate 5 bytes more than you actually need */ -CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format); +/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess + * at the final size. guessing well reduces reallocation. fmt=0 gives + * unformatted, =1 gives formatted */ +CJSON_PUBLIC(char *) +cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt); +/* Render a cJSON entity to text using a buffer already allocated in memory with + * given length. Returns 1 on success and 0 on failure. */ +/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will + * use, so to be safe allocate 5 bytes more than you actually need */ +CJSON_PUBLIC(cJSON_bool) +cJSON_PrintPreallocated(cJSON *item, + char *buffer, + const int length, + const cJSON_bool format); /* Delete a cJSON entity and all subentities. */ CJSON_PUBLIC(void) cJSON_Delete(cJSON *item); /* Returns the number of items in an array (or object). */ CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array); -/* Retrieve item number "index" from array "array". Returns NULL if unsuccessful. */ +/* Retrieve item number "index" from array "array". Returns NULL if + * unsuccessful. */ CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index); /* Get item "string" from object. Case insensitive. */ -CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string); -CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string); -CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string); -/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ +CJSON_PUBLIC(cJSON *) +cJSON_GetObjectItem(const cJSON *const object, const char *const string); +CJSON_PUBLIC(cJSON *) +cJSON_GetObjectItemCaseSensitive(const cJSON *const object, + const char *const string); +CJSON_PUBLIC(cJSON_bool) +cJSON_HasObjectItem(const cJSON *object, const char *string); +/* For analysing failed parses. This returns a pointer to the parse error. + * You'll probably need to look a few chars back to make sense of it. Defined + * when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void); /* Check item type and return its value */ -CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item); -CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item); +CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON *const item); +CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON *const item); /* These functions check the type of an item */ -CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item); -CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item); -CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item); -CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item); -CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item); -CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item); -CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item); -CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item); -CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item); -CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON *const item); /* These calls create a cJSON item of the appropriate type. */ CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void); @@ -212,77 +256,138 @@ CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child); CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child); /* These utilities create an Array of count items. - * The parameter count cannot be greater than the number of elements in the number array, otherwise array access will be out of bounds.*/ + * The parameter count cannot be greater than the number of elements in the + * number array, otherwise array access will be out of bounds.*/ CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count); CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count); CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count); -CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count); +CJSON_PUBLIC(cJSON *) +cJSON_CreateStringArray(const char *const *strings, int count); /* Append item to the specified array/object. */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item); -CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item); -/* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object. - * WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before - * writing to `item->string` */ -CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item); -/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */ -CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); -CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item); +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item); +/* Use this when string is definitely const (i.e. a literal, or as good as), and + * will definitely survive the cJSON object. WARNING: When this function was + * used, make sure to always check that (item->type & cJSON_StringIsConst) is + * zero before writing to `item->string` */ +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item); +/* Append reference to item to the specified array/object. Use this when you + * want to add an existing cJSON to a new cJSON, but don't want to corrupt your + * existing cJSON. */ +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item); /* Remove/Detach items from Arrays/Objects. */ -CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item); +CJSON_PUBLIC(cJSON *) +cJSON_DetachItemViaPointer(cJSON *parent, cJSON *const item); CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which); CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which); -CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string); -CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string); -CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string); -CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string); +CJSON_PUBLIC(cJSON *) +cJSON_DetachItemFromObject(cJSON *object, const char *string); +CJSON_PUBLIC(cJSON *) +cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string); +CJSON_PUBLIC(void) +cJSON_DeleteItemFromObject(cJSON *object, const char *string); +CJSON_PUBLIC(void) +cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string); /* Update array items. */ -CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */ -CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement); -CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem); -CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem); -CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,const char *string,cJSON *newitem); +CJSON_PUBLIC(cJSON_bool) +cJSON_InsertItemInArray( + cJSON *array, + int which, + cJSON *newitem); /* Shifts pre-existing items to the right. */ +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemViaPointer(cJSON *const parent, + cJSON *const item, + cJSON *replacement); +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem); +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem); +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, + const char *string, + cJSON *newitem); /* Duplicate a cJSON item */ CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse); -/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will - * need to be released. With recurse!=0, it will duplicate any children connected to the item. - * The item->next and ->prev pointers are always zero on return from Duplicate. */ -/* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal. - * case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */ -CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive); - -/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from strings. - * The input pointer json cannot point to a read-only address area, such as a string constant, +/* Duplicate will create a new, identical cJSON item to the one you pass, in new + * memory that will need to be released. With recurse!=0, it will duplicate any + * children connected to the item. + * The item->next and ->prev pointers are always zero on return from Duplicate. + */ +/* Recursively compare two cJSON items for equality. If either a or b is NULL or + * invalid, they will be considered unequal. + * case_sensitive determines if object keys are treated case sensitive (1) or + * case insensitive (0) */ +CJSON_PUBLIC(cJSON_bool) +cJSON_Compare(const cJSON *const a, + const cJSON *const b, + const cJSON_bool case_sensitive); + +/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from + * strings. The input pointer json cannot point to a read-only address area, + * such as a string constant, * but should point to a readable and writable adress area. */ CJSON_PUBLIC(void) cJSON_Minify(char *json); /* Helper functions for creating and adding items to an object at the same time. * They return the added item or NULL on failure. */ -CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name); -CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name); -CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name); -CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean); -CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number); -CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string); -CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw); -CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name); -CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name); - -/* When assigning an integer value, it needs to be propagated to valuedouble too. */ -#define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number)) +CJSON_PUBLIC(cJSON *) +cJSON_AddNullToObject(cJSON *const object, const char *const name); +CJSON_PUBLIC(cJSON *) +cJSON_AddTrueToObject(cJSON *const object, const char *const name); +CJSON_PUBLIC(cJSON *) +cJSON_AddFalseToObject(cJSON *const object, const char *const name); +CJSON_PUBLIC(cJSON *) +cJSON_AddBoolToObject(cJSON *const object, + const char *const name, + const cJSON_bool boolean); +CJSON_PUBLIC(cJSON *) +cJSON_AddNumberToObject(cJSON *const object, + const char *const name, + const double number); +CJSON_PUBLIC(cJSON *) +cJSON_AddStringToObject(cJSON *const object, + const char *const name, + const char *const string); +CJSON_PUBLIC(cJSON *) +cJSON_AddRawToObject(cJSON *const object, + const char *const name, + const char *const raw); +CJSON_PUBLIC(cJSON *) +cJSON_AddObjectToObject(cJSON *const object, const char *const name); +CJSON_PUBLIC(cJSON *) +cJSON_AddArrayToObject(cJSON *const object, const char *const name); + +/* When assigning an integer value, it needs to be propagated to valuedouble + * too. */ +#define cJSON_SetIntValue(object, number) \ + ((object) ? (object)->valueint = (object)->valuedouble = (number) \ + : (number)) /* helper for the cJSON_SetNumberValue macro */ CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number); -#define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number)) -/* Change the valuestring of a cJSON_String object, only takes effect when type of object is cJSON_String */ -CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring); +#define cJSON_SetNumberValue(object, number) \ + ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) \ + : (number)) +/* Change the valuestring of a cJSON_String object, only takes effect when type + * of object is cJSON_String */ +CJSON_PUBLIC(char *) +cJSON_SetValuestring(cJSON *object, const char *valuestring); /* Macro for iterating over an array or object */ -#define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next) +#define cJSON_ArrayForEach(element, array) \ + for (element = (array != NULL) ? (array)->child : NULL; \ + element != NULL; element = element->next) -/* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */ +/* malloc/free objects using the malloc/free functions that have been set with + * cJSON_InitHooks */ CJSON_PUBLIC(void *) cJSON_malloc(size_t size); CJSON_PUBLIC(void) cJSON_free(void *object); diff --git a/src/rd.h b/src/rd.h index 3106410468..a1b120826a 100644 --- a/src/rd.h +++ b/src/rd.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -32,7 +32,7 @@ #ifndef _WIN32 #ifndef _GNU_SOURCE -#define _GNU_SOURCE /* for strndup() */ +#define _GNU_SOURCE /* for strndup() */ #endif #if defined(__APPLE__) && !defined(_DARWIN_C_SOURCE) @@ -41,7 +41,7 @@ #define __need_IOV_MAX #ifndef _POSIX_C_SOURCE -#define _POSIX_C_SOURCE 200809L /* for timespec on solaris */ +#define _POSIX_C_SOURCE 200809L /* for timespec on solaris */ #endif #endif @@ -93,7 +93,9 @@ #if ENABLE_DEVEL == 1 #define rd_dassert(cond) rd_assert(cond) #else -#define rd_dassert(cond) do {} while (0) +#define rd_dassert(cond) \ + do { \ + } while (0) #endif @@ -101,65 +103,67 @@ #define RD_NOTREACHED() rd_assert(!*"/* NOTREACHED */ violated") /** Assert if reached */ -#define RD_BUG(...) do { \ - fprintf(stderr, "INTERNAL ERROR: librdkafka %s:%d: ", \ - __FUNCTION__, __LINE__); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - rd_assert(!*"INTERNAL ERROR IN LIBRDKAFKA"); \ +#define RD_BUG(...) \ + do { \ + fprintf(stderr, \ + "INTERNAL ERROR: librdkafka %s:%d: ", __FUNCTION__, \ + __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + rd_assert(!*"INTERNAL ERROR IN LIBRDKAFKA"); \ } while (0) /** -* Allocator wrappers. -* We serve under the premise that if a (small) memory -* allocation fails all hope is lost and the application -* will fail anyway, so no need to handle it handsomely. -*/ + * Allocator wrappers. + * We serve under the premise that if a (small) memory + * allocation fails all hope is lost and the application + * will fail anyway, so no need to handle it handsomely. + */ static RD_INLINE RD_UNUSED void *rd_calloc(size_t num, size_t sz) { - void *p = calloc(num, sz); - rd_assert(p); - return p; + void *p = calloc(num, sz); + rd_assert(p); + return p; } static RD_INLINE RD_UNUSED void *rd_malloc(size_t sz) { - void *p = malloc(sz); - rd_assert(p); - return p; + void *p = malloc(sz); + rd_assert(p); + return p; } static RD_INLINE RD_UNUSED void *rd_realloc(void *ptr, size_t sz) { - void *p = realloc(ptr, sz); - rd_assert(p); - return p; + void *p = realloc(ptr, sz); + rd_assert(p); + return p; } static RD_INLINE RD_UNUSED void rd_free(void *ptr) { - free(ptr); + free(ptr); } static RD_INLINE RD_UNUSED char *rd_strdup(const char *s) { #ifndef _WIN32 - char *n = strdup(s); + char *n = strdup(s); #else - char *n = _strdup(s); + char *n = _strdup(s); #endif - rd_assert(n); - return n; + rd_assert(n); + return n; } static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) { #if HAVE_STRNDUP - char *n = strndup(s, len); - rd_assert(n); + char *n = strndup(s, len); + rd_assert(n); #else - char *n = (char *)rd_malloc(len + 1); - rd_assert(n); - memcpy(n, s, len); - n[len] = '\0'; + char *n = (char *)rd_malloc(len + 1); + rd_assert(n); + memcpy(n, s, len); + n[len] = '\0'; #endif - return n; + return n; } @@ -169,25 +173,27 @@ static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) { */ #ifdef strndupa -#define rd_strndupa(DESTPTR,PTR,LEN) (*(DESTPTR) = strndupa(PTR,LEN)) +#define rd_strndupa(DESTPTR, PTR, LEN) (*(DESTPTR) = strndupa(PTR, LEN)) #else -#define rd_strndupa(DESTPTR,PTR,LEN) do { \ - const char *_src = (PTR); \ - size_t _srclen = (LEN); \ - char *_dst = rd_alloca(_srclen + 1); \ - memcpy(_dst, _src, _srclen); \ - _dst[_srclen] = '\0'; \ - *(DESTPTR) = _dst; \ +#define rd_strndupa(DESTPTR, PTR, LEN) \ + do { \ + const char *_src = (PTR); \ + size_t _srclen = (LEN); \ + char *_dst = rd_alloca(_srclen + 1); \ + memcpy(_dst, _src, _srclen); \ + _dst[_srclen] = '\0'; \ + *(DESTPTR) = _dst; \ } while (0) #endif #ifdef strdupa -#define rd_strdupa(DESTPTR,PTR) (*(DESTPTR) = strdupa(PTR)) +#define rd_strdupa(DESTPTR, PTR) (*(DESTPTR) = strdupa(PTR)) #else -#define rd_strdupa(DESTPTR,PTR) do { \ - const char *_src1 = (PTR); \ - size_t _srclen1 = strlen(_src1); \ - rd_strndupa(DESTPTR, _src1, _srclen1); \ +#define rd_strdupa(DESTPTR, PTR) \ + do { \ + const char *_src1 = (PTR); \ + size_t _srclen1 = strlen(_src1); \ + rd_strndupa(DESTPTR, _src1, _srclen1); \ } while (0) #endif @@ -205,35 +211,35 @@ static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) { /* Round/align X upwards to STRIDE, which must be power of 2. */ -#define RD_ROUNDUP(X,STRIDE) (((X) + ((STRIDE) - 1)) & ~(STRIDE-1)) +#define RD_ROUNDUP(X, STRIDE) (((X) + ((STRIDE)-1)) & ~(STRIDE - 1)) #define RD_ARRAY_SIZE(A) (sizeof((A)) / sizeof(*(A))) #define RD_ARRAYSIZE(A) RD_ARRAY_SIZE(A) -#define RD_SIZEOF(TYPE,MEMBER) sizeof(((TYPE *)NULL)->MEMBER) -#define RD_OFFSETOF(TYPE,MEMBER) ((size_t) &(((TYPE *)NULL)->MEMBER)) +#define RD_SIZEOF(TYPE, MEMBER) sizeof(((TYPE *)NULL)->MEMBER) +#define RD_OFFSETOF(TYPE, MEMBER) ((size_t) & (((TYPE *)NULL)->MEMBER)) /** * Returns the 'I'th array element from static sized array 'A' * or NULL if 'I' is out of range. * var-args is an optional prefix to provide the correct return type. */ -#define RD_ARRAY_ELEM(A,I,...) \ - ((unsigned int)(I) < RD_ARRAY_SIZE(A) ? __VA_ARGS__ (A)[(I)] : NULL) +#define RD_ARRAY_ELEM(A, I, ...) \ + ((unsigned int)(I) < RD_ARRAY_SIZE(A) ? __VA_ARGS__(A)[(I)] : NULL) -#define RD_STRINGIFY(X) # X +#define RD_STRINGIFY(X) #X -#define RD_MIN(a,b) ((a) < (b) ? (a) : (b)) -#define RD_MAX(a,b) ((a) > (b) ? (a) : (b)) +#define RD_MIN(a, b) ((a) < (b) ? (a) : (b)) +#define RD_MAX(a, b) ((a) > (b) ? (a) : (b)) /** * Cap an integer (of any type) to reside within the defined limit. */ -#define RD_INT_CAP(val,low,hi) \ - ((val) < (low) ? low : ((val) > (hi) ? (hi) : (val))) +#define RD_INT_CAP(val, low, hi) \ + ((val) < (low) ? low : ((val) > (hi) ? (hi) : (val))) @@ -241,11 +247,11 @@ static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) { * Allocate 'size' bytes, copy 'src', return pointer to new memory. * * Use rd_free() to free the returned pointer. -*/ -static RD_INLINE RD_UNUSED void *rd_memdup (const void *src, size_t size) { - void *dst = rd_malloc(size); - memcpy(dst, src, size); - return dst; + */ +static RD_INLINE RD_UNUSED void *rd_memdup(const void *src, size_t size) { + void *dst = rd_malloc(size); + memcpy(dst, src, size); + return dst; } /** @@ -272,7 +278,7 @@ typedef rd_atomic32_t rd_refcnt_t; #endif #ifdef RD_REFCNT_USE_LOCKS -static RD_INLINE RD_UNUSED int rd_refcnt_init (rd_refcnt_t *R, int v) { +static RD_INLINE RD_UNUSED int rd_refcnt_init(rd_refcnt_t *R, int v) { int r; mtx_init(&R->lock, mtx_plain); mtx_lock(&R->lock); @@ -281,11 +287,11 @@ static RD_INLINE RD_UNUSED int rd_refcnt_init (rd_refcnt_t *R, int v) { return r; } #else -#define rd_refcnt_init(R,v) rd_atomic32_init(R, v) +#define rd_refcnt_init(R, v) rd_atomic32_init(R, v) #endif #ifdef RD_REFCNT_USE_LOCKS -static RD_INLINE RD_UNUSED void rd_refcnt_destroy (rd_refcnt_t *R) { +static RD_INLINE RD_UNUSED void rd_refcnt_destroy(rd_refcnt_t *R) { mtx_lock(&R->lock); rd_assert(R->v == 0); mtx_unlock(&R->lock); @@ -293,12 +299,14 @@ static RD_INLINE RD_UNUSED void rd_refcnt_destroy (rd_refcnt_t *R) { mtx_destroy(&R->lock); } #else -#define rd_refcnt_destroy(R) do { } while (0) +#define rd_refcnt_destroy(R) \ + do { \ + } while (0) #endif #ifdef RD_REFCNT_USE_LOCKS -static RD_INLINE RD_UNUSED int rd_refcnt_set (rd_refcnt_t *R, int v) { +static RD_INLINE RD_UNUSED int rd_refcnt_set(rd_refcnt_t *R, int v) { int r; mtx_lock(&R->lock); r = R->v = v; @@ -306,12 +314,12 @@ static RD_INLINE RD_UNUSED int rd_refcnt_set (rd_refcnt_t *R, int v) { return r; } #else -#define rd_refcnt_set(R,v) rd_atomic32_set(R, v) +#define rd_refcnt_set(R, v) rd_atomic32_set(R, v) #endif #ifdef RD_REFCNT_USE_LOCKS -static RD_INLINE RD_UNUSED int rd_refcnt_add0 (rd_refcnt_t *R) { +static RD_INLINE RD_UNUSED int rd_refcnt_add0(rd_refcnt_t *R) { int r; mtx_lock(&R->lock); r = ++(R->v); @@ -319,10 +327,10 @@ static RD_INLINE RD_UNUSED int rd_refcnt_add0 (rd_refcnt_t *R) { return r; } #else -#define rd_refcnt_add0(R) rd_atomic32_add(R, 1) +#define rd_refcnt_add0(R) rd_atomic32_add(R, 1) #endif -static RD_INLINE RD_UNUSED int rd_refcnt_sub0 (rd_refcnt_t *R) { +static RD_INLINE RD_UNUSED int rd_refcnt_sub0(rd_refcnt_t *R) { int r; #ifdef RD_REFCNT_USE_LOCKS mtx_lock(&R->lock); @@ -337,7 +345,7 @@ static RD_INLINE RD_UNUSED int rd_refcnt_sub0 (rd_refcnt_t *R) { } #ifdef RD_REFCNT_USE_LOCKS -static RD_INLINE RD_UNUSED int rd_refcnt_get (rd_refcnt_t *R) { +static RD_INLINE RD_UNUSED int rd_refcnt_get(rd_refcnt_t *R) { int r; mtx_lock(&R->lock); r = R->v; @@ -345,67 +353,67 @@ static RD_INLINE RD_UNUSED int rd_refcnt_get (rd_refcnt_t *R) { return r; } #else -#define rd_refcnt_get(R) rd_atomic32_get(R) +#define rd_refcnt_get(R) rd_atomic32_get(R) #endif /** * A wrapper for decreasing refcount and calling a destroy function * when refcnt reaches 0. */ -#define rd_refcnt_destroywrapper(REFCNT,DESTROY_CALL) do { \ - if (rd_refcnt_sub(REFCNT) > 0) \ - break; \ - DESTROY_CALL; \ +#define rd_refcnt_destroywrapper(REFCNT, DESTROY_CALL) \ + do { \ + if (rd_refcnt_sub(REFCNT) > 0) \ + break; \ + DESTROY_CALL; \ } while (0) -#define rd_refcnt_destroywrapper2(REFCNT,WHAT,DESTROY_CALL) do { \ - if (rd_refcnt_sub2(REFCNT,WHAT) > 0) \ - break; \ - DESTROY_CALL; \ +#define rd_refcnt_destroywrapper2(REFCNT, WHAT, DESTROY_CALL) \ + do { \ + if (rd_refcnt_sub2(REFCNT, WHAT) > 0) \ + break; \ + DESTROY_CALL; \ } while (0) #if ENABLE_REFCNT_DEBUG -#define rd_refcnt_add_fl(FUNC,LINE,R) \ - ( \ - fprintf(stderr, "REFCNT DEBUG: %-35s %d +1: %16p: %s:%d\n", \ - #R, rd_refcnt_get(R), (R), (FUNC), (LINE)), \ - rd_refcnt_add0(R) \ - ) +#define rd_refcnt_add_fl(FUNC, LINE, R) \ + (fprintf(stderr, "REFCNT DEBUG: %-35s %d +1: %16p: %s:%d\n", #R, \ + rd_refcnt_get(R), (R), (FUNC), (LINE)), \ + rd_refcnt_add0(R)) #define rd_refcnt_add(R) rd_refcnt_add_fl(__FUNCTION__, __LINE__, (R)) -#define rd_refcnt_add2(R,WHAT) do { \ - fprintf(stderr, \ - "REFCNT DEBUG: %-35s %d +1: %16p: %16s: %s:%d\n", \ - #R, rd_refcnt_get(R), (R), WHAT, \ - __FUNCTION__,__LINE__), \ - rd_refcnt_add0(R); \ +#define rd_refcnt_add2(R, WHAT) \ + do { \ + fprintf(stderr, \ + "REFCNT DEBUG: %-35s %d +1: %16p: %16s: %s:%d\n", #R, \ + rd_refcnt_get(R), (R), WHAT, __FUNCTION__, __LINE__), \ + rd_refcnt_add0(R); \ } while (0) -#define rd_refcnt_sub2(R,WHAT) ( \ - fprintf(stderr, \ - "REFCNT DEBUG: %-35s %d -1: %16p: %16s: %s:%d\n", \ - #R, rd_refcnt_get(R), (R), WHAT, \ - __FUNCTION__,__LINE__), \ - rd_refcnt_sub0(R) ) +#define rd_refcnt_sub2(R, WHAT) \ + (fprintf(stderr, "REFCNT DEBUG: %-35s %d -1: %16p: %16s: %s:%d\n", #R, \ + rd_refcnt_get(R), (R), WHAT, __FUNCTION__, __LINE__), \ + rd_refcnt_sub0(R)) -#define rd_refcnt_sub(R) ( \ - fprintf(stderr, "REFCNT DEBUG: %-35s %d -1: %16p: %s:%d\n", \ - #R, rd_refcnt_get(R), (R), __FUNCTION__,__LINE__), \ - rd_refcnt_sub0(R) ) +#define rd_refcnt_sub(R) \ + (fprintf(stderr, "REFCNT DEBUG: %-35s %d -1: %16p: %s:%d\n", #R, \ + rd_refcnt_get(R), (R), __FUNCTION__, __LINE__), \ + rd_refcnt_sub0(R)) #else -#define rd_refcnt_add_fl(FUNC,LINE,R) rd_refcnt_add0(R) -#define rd_refcnt_add(R) rd_refcnt_add0(R) -#define rd_refcnt_sub(R) rd_refcnt_sub0(R) +#define rd_refcnt_add_fl(FUNC, LINE, R) rd_refcnt_add0(R) +#define rd_refcnt_add(R) rd_refcnt_add0(R) +#define rd_refcnt_sub(R) rd_refcnt_sub0(R) #endif - - -#define RD_IF_FREE(PTR,FUNC) do { if ((PTR)) FUNC(PTR); } while (0) +#define RD_IF_FREE(PTR, FUNC) \ + do { \ + if ((PTR)) \ + FUNC(PTR); \ + } while (0) /** @@ -413,7 +421,7 @@ static RD_INLINE RD_UNUSED int rd_refcnt_get (rd_refcnt_t *R) { */ typedef struct rd_chariov_s { - char *ptr; + char *ptr; size_t size; } rd_chariov_t; diff --git a/src/rdaddr.c b/src/rdaddr.c index 616a0cb427..f84f009d4c 100644 --- a/src/rdaddr.c +++ b/src/rdaddr.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -36,46 +36,43 @@ #include #endif -const char *rd_sockaddr2str (const void *addr, int flags) { - const rd_sockaddr_inx_t *a = (const rd_sockaddr_inx_t *)addr; - static RD_TLS char ret[32][256]; - static RD_TLS int reti = 0; - char portstr[32]; - int of = 0; - int niflags = NI_NUMERICSERV; +const char *rd_sockaddr2str(const void *addr, int flags) { + const rd_sockaddr_inx_t *a = (const rd_sockaddr_inx_t *)addr; + static RD_TLS char ret[32][256]; + static RD_TLS int reti = 0; + char portstr[32]; + int of = 0; + int niflags = NI_NUMERICSERV; int r; - reti = (reti + 1) % 32; - - switch (a->sinx_family) - { - case AF_INET: - case AF_INET6: - if (flags & RD_SOCKADDR2STR_F_FAMILY) - of += rd_snprintf(&ret[reti][of], sizeof(ret[reti])-of, "ipv%i#", - a->sinx_family == AF_INET ? 4 : 6); + reti = (reti + 1) % 32; + + switch (a->sinx_family) { + case AF_INET: + case AF_INET6: + if (flags & RD_SOCKADDR2STR_F_FAMILY) + of += rd_snprintf(&ret[reti][of], + sizeof(ret[reti]) - of, "ipv%i#", + a->sinx_family == AF_INET ? 4 : 6); - if ((flags & RD_SOCKADDR2STR_F_PORT) && - a->sinx_family == AF_INET6) - ret[reti][of++] = '['; + if ((flags & RD_SOCKADDR2STR_F_PORT) && + a->sinx_family == AF_INET6) + ret[reti][of++] = '['; - if (!(flags & RD_SOCKADDR2STR_F_RESOLVE)) - niflags |= NI_NUMERICHOST; + if (!(flags & RD_SOCKADDR2STR_F_RESOLVE)) + niflags |= NI_NUMERICHOST; retry: if ((r = getnameinfo( - (const struct sockaddr *)a, - RD_SOCKADDR_INX_LEN(a), + (const struct sockaddr *)a, RD_SOCKADDR_INX_LEN(a), - ret[reti]+of, sizeof(ret[reti])-of, + ret[reti] + of, sizeof(ret[reti]) - of, - (flags & RD_SOCKADDR2STR_F_PORT) ? - portstr : NULL, + (flags & RD_SOCKADDR2STR_F_PORT) ? portstr : NULL, - (flags & RD_SOCKADDR2STR_F_PORT) ? - sizeof(portstr) : 0, + (flags & RD_SOCKADDR2STR_F_PORT) ? sizeof(portstr) : 0, - niflags))) { + niflags))) { if (r == EAI_AGAIN && !(niflags & NI_NUMERICHOST)) { /* If unable to resolve name, retry without @@ -86,154 +83,154 @@ const char *rd_sockaddr2str (const void *addr, int flags) { break; } - - if (flags & RD_SOCKADDR2STR_F_PORT) { - size_t len = strlen(ret[reti]); - rd_snprintf(ret[reti]+len, sizeof(ret[reti])-len, - "%s:%s", - a->sinx_family == AF_INET6 ? "]" : "", - portstr); - } - - return ret[reti]; - } - - - /* Error-case */ - rd_snprintf(ret[reti], sizeof(ret[reti]), "", - rd_family2str(a->sinx_family)); - - return ret[reti]; + + if (flags & RD_SOCKADDR2STR_F_PORT) { + size_t len = strlen(ret[reti]); + rd_snprintf( + ret[reti] + len, sizeof(ret[reti]) - len, "%s:%s", + a->sinx_family == AF_INET6 ? "]" : "", portstr); + } + + return ret[reti]; + } + + + /* Error-case */ + rd_snprintf(ret[reti], sizeof(ret[reti]), "", + rd_family2str(a->sinx_family)); + + return ret[reti]; } -const char *rd_addrinfo_prepare (const char *nodesvc, - char **node, char **svc) { - static RD_TLS char snode[256]; - static RD_TLS char ssvc[64]; - const char *t; - const char *svct = NULL; - size_t nodelen = 0; - - *snode = '\0'; - *ssvc = '\0'; - - if (*nodesvc == '[') { - /* "[host]".. (enveloped node name) */ - if (!(t = strchr(nodesvc, ']'))) - return "Missing close-']'"; - nodesvc++; - nodelen = t-nodesvc; - svct = t+1; - - } else if (*nodesvc == ':' && *(nodesvc+1) != ':') { - /* ":".. (port only) */ - nodelen = 0; - svct = nodesvc; - } - - if ((svct = strrchr(svct ? svct : nodesvc, ':')) && (*(svct-1) != ':') && - *(++svct)) { - /* Optional ":service" definition. */ - if (strlen(svct) >= sizeof(ssvc)) - return "Service name too long"; - strcpy(ssvc, svct); - if (!nodelen) - nodelen = svct - nodesvc - 1; - - } else if (!nodelen) - nodelen = strlen(nodesvc); - - if (nodelen) { - /* Truncate nodename if necessary. */ - nodelen = RD_MIN(nodelen, sizeof(snode)-1); - memcpy(snode, nodesvc, nodelen); - snode[nodelen] = '\0'; - } - - *node = snode; - *svc = ssvc; - - return NULL; +const char *rd_addrinfo_prepare(const char *nodesvc, char **node, char **svc) { + static RD_TLS char snode[256]; + static RD_TLS char ssvc[64]; + const char *t; + const char *svct = NULL; + size_t nodelen = 0; + + *snode = '\0'; + *ssvc = '\0'; + + if (*nodesvc == '[') { + /* "[host]".. (enveloped node name) */ + if (!(t = strchr(nodesvc, ']'))) + return "Missing close-']'"; + nodesvc++; + nodelen = t - nodesvc; + svct = t + 1; + + } else if (*nodesvc == ':' && *(nodesvc + 1) != ':') { + /* ":".. (port only) */ + nodelen = 0; + svct = nodesvc; + } + + if ((svct = strrchr(svct ? svct : nodesvc, ':')) && + (*(svct - 1) != ':') && *(++svct)) { + /* Optional ":service" definition. */ + if (strlen(svct) >= sizeof(ssvc)) + return "Service name too long"; + strcpy(ssvc, svct); + if (!nodelen) + nodelen = svct - nodesvc - 1; + + } else if (!nodelen) + nodelen = strlen(nodesvc); + + if (nodelen) { + /* Truncate nodename if necessary. */ + nodelen = RD_MIN(nodelen, sizeof(snode) - 1); + memcpy(snode, nodesvc, nodelen); + snode[nodelen] = '\0'; + } + + *node = snode; + *svc = ssvc; + + return NULL; } -rd_sockaddr_list_t *rd_getaddrinfo (const char *nodesvc, const char *defsvc, - int flags, int family, - int socktype, int protocol, - const char **errstr) { - struct addrinfo hints; - memset(&hints, 0, sizeof(hints)); - hints.ai_family = family; - hints.ai_socktype = socktype; - hints.ai_protocol = protocol; - hints.ai_flags = flags; - - struct addrinfo *ais, *ai; - char *node, *svc; - int r; - int cnt = 0; - rd_sockaddr_list_t *rsal; - - if ((*errstr = rd_addrinfo_prepare(nodesvc, &node, &svc))) { - errno = EINVAL; - return NULL; - } - - if (*svc) - defsvc = svc; - - if ((r = getaddrinfo(node, defsvc, &hints, &ais))) { +rd_sockaddr_list_t *rd_getaddrinfo(const char *nodesvc, + const char *defsvc, + int flags, + int family, + int socktype, + int protocol, + const char **errstr) { + struct addrinfo hints; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = family; + hints.ai_socktype = socktype; + hints.ai_protocol = protocol; + hints.ai_flags = flags; + + struct addrinfo *ais, *ai; + char *node, *svc; + int r; + int cnt = 0; + rd_sockaddr_list_t *rsal; + + if ((*errstr = rd_addrinfo_prepare(nodesvc, &node, &svc))) { + errno = EINVAL; + return NULL; + } + + if (*svc) + defsvc = svc; + + if ((r = getaddrinfo(node, defsvc, &hints, &ais))) { #ifdef EAI_SYSTEM - if (r == EAI_SYSTEM) + if (r == EAI_SYSTEM) #else - if (0) + if (0) #endif - *errstr = rd_strerror(errno); - else { + *errstr = rd_strerror(errno); + else { #ifdef _WIN32 - *errstr = gai_strerrorA(r); + *errstr = gai_strerrorA(r); #else - *errstr = gai_strerror(r); + *errstr = gai_strerror(r); #endif - errno = EFAULT; - } - return NULL; - } - - /* Count number of addresses */ - for (ai = ais ; ai != NULL ; ai = ai->ai_next) - cnt++; - - if (cnt == 0) { - /* unlikely? */ - freeaddrinfo(ais); - errno = ENOENT; - *errstr = "No addresses"; - return NULL; - } - - - rsal = rd_calloc(1, sizeof(*rsal) + (sizeof(*rsal->rsal_addr) * cnt)); - - for (ai = ais ; ai != NULL ; ai = ai->ai_next) - memcpy(&rsal->rsal_addr[rsal->rsal_cnt++], - ai->ai_addr, ai->ai_addrlen); - - freeaddrinfo(ais); - - /* Shuffle address list for proper round-robin */ - if (!(flags & RD_AI_NOSHUFFLE)) - rd_array_shuffle(rsal->rsal_addr, rsal->rsal_cnt, - sizeof(*rsal->rsal_addr)); - - return rsal; -} + errno = EFAULT; + } + return NULL; + } + + /* Count number of addresses */ + for (ai = ais; ai != NULL; ai = ai->ai_next) + cnt++; + + if (cnt == 0) { + /* unlikely? */ + freeaddrinfo(ais); + errno = ENOENT; + *errstr = "No addresses"; + return NULL; + } + rsal = rd_calloc(1, sizeof(*rsal) + (sizeof(*rsal->rsal_addr) * cnt)); -void rd_sockaddr_list_destroy (rd_sockaddr_list_t *rsal) { - rd_free(rsal); + for (ai = ais; ai != NULL; ai = ai->ai_next) + memcpy(&rsal->rsal_addr[rsal->rsal_cnt++], ai->ai_addr, + ai->ai_addrlen); + + freeaddrinfo(ais); + + /* Shuffle address list for proper round-robin */ + if (!(flags & RD_AI_NOSHUFFLE)) + rd_array_shuffle(rsal->rsal_addr, rsal->rsal_cnt, + sizeof(*rsal->rsal_addr)); + + return rsal; } + + +void rd_sockaddr_list_destroy(rd_sockaddr_list_t *rsal) { + rd_free(rsal); +} diff --git a/src/rdaddr.h b/src/rdaddr.h index 6e91cef6b2..34d6002bfa 100644 --- a/src/rdaddr.h +++ b/src/rdaddr.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -48,25 +48,28 @@ * It provides conveniant abstraction of AF_INET* agnostic operations. */ typedef union { - struct sockaddr_in in; - struct sockaddr_in6 in6; + struct sockaddr_in in; + struct sockaddr_in6 in6; } rd_sockaddr_inx_t; #define sinx_family in.sin_family #define sinx_addr in.sin_addr -#define RD_SOCKADDR_INX_LEN(sinx) \ - ((sinx)->sinx_family == AF_INET ? sizeof(struct sockaddr_in) : \ - (sinx)->sinx_family == AF_INET6 ? sizeof(struct sockaddr_in6): \ - sizeof(rd_sockaddr_inx_t)) -#define RD_SOCKADDR_INX_PORT(sinx) \ - ((sinx)->sinx_family == AF_INET ? (sinx)->in.sin_port : \ - (sinx)->sinx_family == AF_INET6 ? (sinx)->in6.sin6_port : 0) - -#define RD_SOCKADDR_INX_PORT_SET(sinx,port) do { \ - if ((sinx)->sinx_family == AF_INET) \ - (sinx)->in.sin_port = port; \ - else if ((sinx)->sinx_family == AF_INET6) \ - (sinx)->in6.sin6_port = port; \ - } while (0) +#define RD_SOCKADDR_INX_LEN(sinx) \ + ((sinx)->sinx_family == AF_INET \ + ? sizeof(struct sockaddr_in) \ + : (sinx)->sinx_family == AF_INET6 ? sizeof(struct sockaddr_in6) \ + : sizeof(rd_sockaddr_inx_t)) +#define RD_SOCKADDR_INX_PORT(sinx) \ + ((sinx)->sinx_family == AF_INET \ + ? (sinx)->in.sin_port \ + : (sinx)->sinx_family == AF_INET6 ? (sinx)->in6.sin6_port : 0) + +#define RD_SOCKADDR_INX_PORT_SET(sinx, port) \ + do { \ + if ((sinx)->sinx_family == AF_INET) \ + (sinx)->in.sin_port = port; \ + else if ((sinx)->sinx_family == AF_INET6) \ + (sinx)->in6.sin6_port = port; \ + } while (0) @@ -79,12 +82,14 @@ typedef union { * IPv6 address enveloping ("[addr]:port") will also be performed * if .._F_PORT is set. */ -#define RD_SOCKADDR2STR_F_PORT 0x1 /* Append the port. */ -#define RD_SOCKADDR2STR_F_RESOLVE 0x2 /* Try to resolve address to hostname. */ -#define RD_SOCKADDR2STR_F_FAMILY 0x4 /* Prepend address family. */ -#define RD_SOCKADDR2STR_F_NICE /* Nice and friendly output */ \ - (RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_RESOLVE) -const char *rd_sockaddr2str (const void *addr, int flags); +#define RD_SOCKADDR2STR_F_PORT 0x1 /* Append the port. */ +#define RD_SOCKADDR2STR_F_RESOLVE \ + 0x2 /* Try to resolve address to hostname. \ + */ +#define RD_SOCKADDR2STR_F_FAMILY 0x4 /* Prepend address family. */ +#define RD_SOCKADDR2STR_F_NICE /* Nice and friendly output */ \ + (RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_RESOLVE) +const char *rd_sockaddr2str(const void *addr, int flags); /** @@ -96,15 +101,14 @@ const char *rd_sockaddr2str (const void *addr, int flags); * Thread-safe but returned buffers in '*node' and '*svc' are only * usable until the next call to rd_addrinfo_prepare() in the same thread. */ -const char *rd_addrinfo_prepare (const char *nodesvc, - char **node, char **svc); +const char *rd_addrinfo_prepare(const char *nodesvc, char **node, char **svc); typedef struct rd_sockaddr_list_s { - int rsal_cnt; - int rsal_curr; - rd_sockaddr_inx_t rsal_addr[]; + int rsal_cnt; + int rsal_curr; + rd_sockaddr_inx_t rsal_addr[]; } rd_sockaddr_list_t; @@ -121,22 +125,21 @@ typedef struct rd_sockaddr_list_s { * } * ... * } - * + * */ - + static RD_INLINE rd_sockaddr_inx_t * -rd_sockaddr_list_next (rd_sockaddr_list_t *rsal) RD_UNUSED; +rd_sockaddr_list_next(rd_sockaddr_list_t *rsal) RD_UNUSED; static RD_INLINE rd_sockaddr_inx_t * -rd_sockaddr_list_next (rd_sockaddr_list_t *rsal) { - rsal->rsal_curr = (rsal->rsal_curr + 1) % rsal->rsal_cnt; - return &rsal->rsal_addr[rsal->rsal_curr]; +rd_sockaddr_list_next(rd_sockaddr_list_t *rsal) { + rsal->rsal_curr = (rsal->rsal_curr + 1) % rsal->rsal_cnt; + return &rsal->rsal_addr[rsal->rsal_curr]; } -#define RD_SOCKADDR_LIST_FOREACH(sinx, rsal) \ - for ((sinx) = &(rsal)->rsal_addr[0] ; \ - (sinx) < &(rsal)->rsal_addr[(rsal)->rsal_len] ; \ - (sinx)++) +#define RD_SOCKADDR_LIST_FOREACH(sinx, rsal) \ + for ((sinx) = &(rsal)->rsal_addr[0]; \ + (sinx) < &(rsal)->rsal_addr[(rsal)->rsal_len]; (sinx)++) /** * Wrapper for getaddrinfo(3) that performs these additional tasks: @@ -149,14 +152,18 @@ rd_sockaddr_list_next (rd_sockaddr_list_t *rsal) { * * Thread-safe. */ -#define RD_AI_NOSHUFFLE 0x10000000 /* Dont shuffle returned address list. - * FIXME: Guessing non-used bits like this - * is a bad idea. */ +#define RD_AI_NOSHUFFLE \ + 0x10000000 /* Dont shuffle returned address list. \ + * FIXME: Guessing non-used bits like this \ + * is a bad idea. */ -rd_sockaddr_list_t *rd_getaddrinfo (const char *nodesvc, const char *defsvc, - int flags, int family, - int socktype, int protocol, - const char **errstr); +rd_sockaddr_list_t *rd_getaddrinfo(const char *nodesvc, + const char *defsvc, + int flags, + int family, + int socktype, + int protocol, + const char **errstr); @@ -165,23 +172,23 @@ rd_sockaddr_list_t *rd_getaddrinfo (const char *nodesvc, const char *defsvc, * * Thread-safe. */ -void rd_sockaddr_list_destroy (rd_sockaddr_list_t *rsal); +void rd_sockaddr_list_destroy(rd_sockaddr_list_t *rsal); /** * Returns the human readable name of a socket family. */ -static const char *rd_family2str (int af) RD_UNUSED; -static const char *rd_family2str (int af) { - switch(af){ - case AF_INET: - return "inet"; - case AF_INET6: - return "inet6"; - default: - return "af?"; - }; +static const char *rd_family2str(int af) RD_UNUSED; +static const char *rd_family2str(int af) { + switch (af) { + case AF_INET: + return "inet"; + case AF_INET6: + return "inet6"; + default: + return "af?"; + }; } #endif /* _RDADDR_H_ */ diff --git a/src/rdatomic.h b/src/rdatomic.h index 03017167c9..00513f87bd 100644 --- a/src/rdatomic.h +++ b/src/rdatomic.h @@ -31,59 +31,61 @@ #include "tinycthread.h" typedef struct { - int32_t val; + int32_t val; #if !defined(_WIN32) && !HAVE_ATOMICS_32 - mtx_t lock; + mtx_t lock; #endif } rd_atomic32_t; typedef struct { - int64_t val; + int64_t val; #if !defined(_WIN32) && !HAVE_ATOMICS_64 - mtx_t lock; + mtx_t lock; #endif } rd_atomic64_t; -static RD_INLINE RD_UNUSED void rd_atomic32_init (rd_atomic32_t *ra, int32_t v) { - ra->val = v; +static RD_INLINE RD_UNUSED void rd_atomic32_init(rd_atomic32_t *ra, int32_t v) { + ra->val = v; #if !defined(_WIN32) && !HAVE_ATOMICS_32 - mtx_init(&ra->lock, mtx_plain); + mtx_init(&ra->lock, mtx_plain); #endif } -static RD_INLINE int32_t RD_UNUSED rd_atomic32_add (rd_atomic32_t *ra, int32_t v) { +static RD_INLINE int32_t RD_UNUSED rd_atomic32_add(rd_atomic32_t *ra, + int32_t v) { #ifdef __SUNPRO_C - return atomic_add_32_nv(&ra->val, v); + return atomic_add_32_nv(&ra->val, v); #elif defined(_WIN32) - return InterlockedAdd((LONG *)&ra->val, v); + return InterlockedAdd((LONG *)&ra->val, v); #elif !HAVE_ATOMICS_32 - int32_t r; - mtx_lock(&ra->lock); - ra->val += v; - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int32_t r; + mtx_lock(&ra->lock); + ra->val += v; + r = ra->val; + mtx_unlock(&ra->lock); + return r; #else - return ATOMIC_OP32(add, fetch, &ra->val, v); + return ATOMIC_OP32(add, fetch, &ra->val, v); #endif } -static RD_INLINE int32_t RD_UNUSED rd_atomic32_sub(rd_atomic32_t *ra, int32_t v) { +static RD_INLINE int32_t RD_UNUSED rd_atomic32_sub(rd_atomic32_t *ra, + int32_t v) { #ifdef __SUNPRO_C - return atomic_add_32_nv(&ra->val, -v); + return atomic_add_32_nv(&ra->val, -v); #elif defined(_WIN32) - return InterlockedAdd((LONG *)&ra->val, -v); + return InterlockedAdd((LONG *)&ra->val, -v); #elif !HAVE_ATOMICS_32 - int32_t r; - mtx_lock(&ra->lock); - ra->val -= v; - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int32_t r; + mtx_lock(&ra->lock); + ra->val -= v; + r = ra->val; + mtx_unlock(&ra->lock); + return r; #else - return ATOMIC_OP32(sub, fetch, &ra->val, v); + return ATOMIC_OP32(sub, fetch, &ra->val, v); #endif } @@ -97,27 +99,28 @@ static RD_INLINE int32_t RD_UNUSED rd_atomic32_sub(rd_atomic32_t *ra, int32_t v) */ static RD_INLINE int32_t RD_UNUSED rd_atomic32_get(rd_atomic32_t *ra) { #if defined(_WIN32) || defined(__SUNPRO_C) - return ra->val; + return ra->val; #elif !HAVE_ATOMICS_32 - int32_t r; - mtx_lock(&ra->lock); - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int32_t r; + mtx_lock(&ra->lock); + r = ra->val; + mtx_unlock(&ra->lock); + return r; #else - return ATOMIC_OP32(fetch, add, &ra->val, 0); + return ATOMIC_OP32(fetch, add, &ra->val, 0); #endif } -static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra, int32_t v) { +static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra, + int32_t v) { #ifdef _WIN32 - return InterlockedExchange((LONG *)&ra->val, v); + return InterlockedExchange((LONG *)&ra->val, v); #elif !HAVE_ATOMICS_32 - int32_t r; - mtx_lock(&ra->lock); - r = ra->val = v; - mtx_unlock(&ra->lock); - return r; + int32_t r; + mtx_lock(&ra->lock); + r = ra->val = v; + mtx_unlock(&ra->lock); + return r; #elif HAVE_ATOMICS_32_ATOMIC __atomic_store_n(&ra->val, v, __ATOMIC_SEQ_CST); return v; @@ -125,50 +128,52 @@ static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra, int32_t v) (void)__sync_lock_test_and_set(&ra->val, v); return v; #else - return ra->val = v; // FIXME + return ra->val = v; // FIXME #endif } -static RD_INLINE RD_UNUSED void rd_atomic64_init (rd_atomic64_t *ra, int64_t v) { - ra->val = v; +static RD_INLINE RD_UNUSED void rd_atomic64_init(rd_atomic64_t *ra, int64_t v) { + ra->val = v; #if !defined(_WIN32) && !HAVE_ATOMICS_64 - mtx_init(&ra->lock, mtx_plain); + mtx_init(&ra->lock, mtx_plain); #endif } -static RD_INLINE int64_t RD_UNUSED rd_atomic64_add (rd_atomic64_t *ra, int64_t v) { +static RD_INLINE int64_t RD_UNUSED rd_atomic64_add(rd_atomic64_t *ra, + int64_t v) { #ifdef __SUNPRO_C - return atomic_add_64_nv(&ra->val, v); + return atomic_add_64_nv(&ra->val, v); #elif defined(_WIN32) - return InterlockedAdd64(&ra->val, v); + return InterlockedAdd64(&ra->val, v); #elif !HAVE_ATOMICS_64 - int64_t r; - mtx_lock(&ra->lock); - ra->val += v; - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int64_t r; + mtx_lock(&ra->lock); + ra->val += v; + r = ra->val; + mtx_unlock(&ra->lock); + return r; #else - return ATOMIC_OP64(add, fetch, &ra->val, v); + return ATOMIC_OP64(add, fetch, &ra->val, v); #endif } -static RD_INLINE int64_t RD_UNUSED rd_atomic64_sub(rd_atomic64_t *ra, int64_t v) { +static RD_INLINE int64_t RD_UNUSED rd_atomic64_sub(rd_atomic64_t *ra, + int64_t v) { #ifdef __SUNPRO_C - return atomic_add_64_nv(&ra->val, -v); + return atomic_add_64_nv(&ra->val, -v); #elif defined(_WIN32) - return InterlockedAdd64(&ra->val, -v); + return InterlockedAdd64(&ra->val, -v); #elif !HAVE_ATOMICS_64 - int64_t r; - mtx_lock(&ra->lock); - ra->val -= v; - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int64_t r; + mtx_lock(&ra->lock); + ra->val -= v; + r = ra->val; + mtx_unlock(&ra->lock); + return r; #else - return ATOMIC_OP64(sub, fetch, &ra->val, v); + return ATOMIC_OP64(sub, fetch, &ra->val, v); #endif } @@ -183,29 +188,30 @@ static RD_INLINE int64_t RD_UNUSED rd_atomic64_sub(rd_atomic64_t *ra, int64_t v) */ static RD_INLINE int64_t RD_UNUSED rd_atomic64_get(rd_atomic64_t *ra) { #if defined(_WIN32) || defined(__SUNPRO_C) - return ra->val; + return ra->val; #elif !HAVE_ATOMICS_64 - int64_t r; - mtx_lock(&ra->lock); - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int64_t r; + mtx_lock(&ra->lock); + r = ra->val; + mtx_unlock(&ra->lock); + return r; #else - return ATOMIC_OP64(fetch, add, &ra->val, 0); + return ATOMIC_OP64(fetch, add, &ra->val, 0); #endif } -static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra, int64_t v) { +static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra, + int64_t v) { #ifdef _WIN32 - return InterlockedExchange64(&ra->val, v); + return InterlockedExchange64(&ra->val, v); #elif !HAVE_ATOMICS_64 - int64_t r; - mtx_lock(&ra->lock); - ra->val = v; - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int64_t r; + mtx_lock(&ra->lock); + ra->val = v; + r = ra->val; + mtx_unlock(&ra->lock); + return r; #elif HAVE_ATOMICS_64_ATOMIC __atomic_store_n(&ra->val, v, __ATOMIC_SEQ_CST); return v; @@ -213,7 +219,7 @@ static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra, int64_t v) (void)__sync_lock_test_and_set(&ra->val, v); return v; #else - return ra->val = v; // FIXME + return ra->val = v; // FIXME #endif } diff --git a/src/rdavg.h b/src/rdavg.h index f706dce074..a170e8da53 100644 --- a/src/rdavg.h +++ b/src/rdavg.h @@ -40,14 +40,13 @@ typedef struct rd_avg_s { int64_t minv; int64_t avg; int64_t sum; - int cnt; + int cnt; rd_ts_t start; } ra_v; mtx_t ra_lock; - int ra_enabled; - enum { - RD_AVG_GAUGE, - RD_AVG_COUNTER, + int ra_enabled; + enum { RD_AVG_GAUGE, + RD_AVG_COUNTER, } ra_type; #if WITH_HDRHISTOGRAM rd_hdr_histogram_t *ra_hdr; @@ -74,18 +73,18 @@ typedef struct rd_avg_s { /** * @brief Add value \p v to averager \p ra. */ -static RD_UNUSED void rd_avg_add (rd_avg_t *ra, int64_t v) { +static RD_UNUSED void rd_avg_add(rd_avg_t *ra, int64_t v) { mtx_lock(&ra->ra_lock); if (!ra->ra_enabled) { mtx_unlock(&ra->ra_lock); return; } - if (v > ra->ra_v.maxv) - ra->ra_v.maxv = v; - if (ra->ra_v.minv == 0 || v < ra->ra_v.minv) - ra->ra_v.minv = v; - ra->ra_v.sum += v; - ra->ra_v.cnt++; + if (v > ra->ra_v.maxv) + ra->ra_v.maxv = v; + if (ra->ra_v.minv == 0 || v < ra->ra_v.minv) + ra->ra_v.minv = v; + ra->ra_v.sum += v; + ra->ra_v.cnt++; #if WITH_HDRHISTOGRAM rd_hdr_histogram_record(ra->ra_hdr, v); #endif @@ -96,7 +95,7 @@ static RD_UNUSED void rd_avg_add (rd_avg_t *ra, int64_t v) { /** * @brief Calculate the average */ -static RD_UNUSED void rd_avg_calc (rd_avg_t *ra, rd_ts_t now) { +static RD_UNUSED void rd_avg_calc(rd_avg_t *ra, rd_ts_t now) { if (ra->ra_type == RD_AVG_GAUGE) { if (ra->ra_v.cnt) ra->ra_v.avg = ra->ra_v.sum / ra->ra_v.cnt; @@ -121,8 +120,7 @@ static RD_UNUSED void rd_avg_calc (rd_avg_t *ra, rd_ts_t now) { * * @remark ra will be not locked by this function. */ -static RD_UNUSED int64_t -rd_avg_quantile (const rd_avg_t *ra, double q) { +static RD_UNUSED int64_t rd_avg_quantile(const rd_avg_t *ra, double q) { #if WITH_HDRHISTOGRAM return rd_hdr_histogram_quantile(ra->ra_hdr, q); #else @@ -137,7 +135,7 @@ rd_avg_quantile (const rd_avg_t *ra, double q) { * Caller must free avg internal members by calling rd_avg_destroy() * on the \p dst. */ -static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst, rd_avg_t *src) { +static RD_UNUSED void rd_avg_rollover(rd_avg_t *dst, rd_avg_t *src) { rd_ts_t now; mtx_lock(&src->ra_lock); @@ -150,26 +148,26 @@ static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst, rd_avg_t *src) { mtx_init(&dst->ra_lock, mtx_plain); dst->ra_type = src->ra_type; - dst->ra_v = src->ra_v; + dst->ra_v = src->ra_v; #if WITH_HDRHISTOGRAM dst->ra_hdr = NULL; - dst->ra_hist.stddev = rd_hdr_histogram_stddev(src->ra_hdr); - dst->ra_hist.mean = rd_hdr_histogram_mean(src->ra_hdr); - dst->ra_hist.oor = src->ra_hdr->outOfRangeCount; + dst->ra_hist.stddev = rd_hdr_histogram_stddev(src->ra_hdr); + dst->ra_hist.mean = rd_hdr_histogram_mean(src->ra_hdr); + dst->ra_hist.oor = src->ra_hdr->outOfRangeCount; dst->ra_hist.hdrsize = src->ra_hdr->allocatedSize; - dst->ra_hist.p50 = rd_hdr_histogram_quantile(src->ra_hdr, 50.0); - dst->ra_hist.p75 = rd_hdr_histogram_quantile(src->ra_hdr, 75.0); - dst->ra_hist.p90 = rd_hdr_histogram_quantile(src->ra_hdr, 90.0); - dst->ra_hist.p95 = rd_hdr_histogram_quantile(src->ra_hdr, 95.0); - dst->ra_hist.p99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.0); - dst->ra_hist.p99_99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.99); + dst->ra_hist.p50 = rd_hdr_histogram_quantile(src->ra_hdr, 50.0); + dst->ra_hist.p75 = rd_hdr_histogram_quantile(src->ra_hdr, 75.0); + dst->ra_hist.p90 = rd_hdr_histogram_quantile(src->ra_hdr, 90.0); + dst->ra_hist.p95 = rd_hdr_histogram_quantile(src->ra_hdr, 95.0); + dst->ra_hist.p99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.0); + dst->ra_hist.p99_99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.99); #else memset(&dst->ra_hist, 0, sizeof(dst->ra_hist)); #endif - memset(&src->ra_v, 0, sizeof(src->ra_v)); + memset(&src->ra_v, 0, sizeof(src->ra_v)); - now = rd_clock(); + now = rd_clock(); src->ra_v.start = now; #if WITH_HDRHISTOGRAM @@ -181,23 +179,23 @@ static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst, rd_avg_t *src) { int64_t mindiff, maxdiff; mindiff = src->ra_hdr->lowestTrackableValue - - src->ra_hdr->lowestOutOfRange; + src->ra_hdr->lowestOutOfRange; if (mindiff > 0) { /* There were low out of range values, grow lower * span to fit lowest out of range value + 20%. */ vmin = src->ra_hdr->lowestOutOfRange + - (int64_t)((double)mindiff * 0.2); + (int64_t)((double)mindiff * 0.2); } maxdiff = src->ra_hdr->highestOutOfRange - - src->ra_hdr->highestTrackableValue; + src->ra_hdr->highestTrackableValue; if (maxdiff > 0) { /* There were high out of range values, grow higher * span to fit highest out of range value + 20%. */ vmax = src->ra_hdr->highestOutOfRange + - (int64_t)((double)maxdiff * 0.2); + (int64_t)((double)maxdiff * 0.2); } if (vmin == src->ra_hdr->lowestTrackableValue && @@ -226,15 +224,18 @@ static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst, rd_avg_t *src) { /** * Initialize an averager */ -static RD_UNUSED void rd_avg_init (rd_avg_t *ra, int type, - int64_t exp_min, int64_t exp_max, - int sigfigs, int enable) { +static RD_UNUSED void rd_avg_init(rd_avg_t *ra, + int type, + int64_t exp_min, + int64_t exp_max, + int sigfigs, + int enable) { memset(ra, 0, sizeof(*ra)); mtx_init(&ra->ra_lock, 0); ra->ra_enabled = enable; if (!enable) return; - ra->ra_type = type; + ra->ra_type = type; ra->ra_v.start = rd_clock(); #if WITH_HDRHISTOGRAM /* Start off the histogram with expected min,max span, @@ -247,7 +248,7 @@ static RD_UNUSED void rd_avg_init (rd_avg_t *ra, int type, /** * Destroy averager */ -static RD_UNUSED void rd_avg_destroy (rd_avg_t *ra) { +static RD_UNUSED void rd_avg_destroy(rd_avg_t *ra) { #if WITH_HDRHISTOGRAM if (ra->ra_hdr) rd_hdr_histogram_destroy(ra->ra_hdr); diff --git a/src/rdavl.c b/src/rdavl.c index 083deab017..f25251de8e 100644 --- a/src/rdavl.c +++ b/src/rdavl.c @@ -36,46 +36,43 @@ #define RD_AVL_NODE_HEIGHT(ran) ((ran) ? (ran)->ran_height : 0) -#define RD_AVL_NODE_DELTA(ran) \ - (RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_LEFT]) - \ +#define RD_AVL_NODE_DELTA(ran) \ + (RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_LEFT]) - \ RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_RIGHT])) #define RD_DELTA_MAX 1 -static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran); +static rd_avl_node_t *rd_avl_balance_node(rd_avl_node_t *ran); -static rd_avl_node_t *rd_avl_rotate (rd_avl_node_t *ran, rd_avl_dir_t dir) { +static rd_avl_node_t *rd_avl_rotate(rd_avl_node_t *ran, rd_avl_dir_t dir) { rd_avl_node_t *n; - static const rd_avl_dir_t odirmap[] = { /* opposite direction map */ - [RD_AVL_RIGHT] = RD_AVL_LEFT, - [RD_AVL_LEFT] = RD_AVL_RIGHT - }; - const int odir = odirmap[dir]; + static const rd_avl_dir_t odirmap[] = {/* opposite direction map */ + [RD_AVL_RIGHT] = RD_AVL_LEFT, + [RD_AVL_LEFT] = RD_AVL_RIGHT}; + const int odir = odirmap[dir]; - n = ran->ran_p[odir]; + n = ran->ran_p[odir]; ran->ran_p[odir] = n->ran_p[dir]; - n->ran_p[dir] = rd_avl_balance_node(ran); + n->ran_p[dir] = rd_avl_balance_node(ran); return rd_avl_balance_node(n); } -static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran) { +static rd_avl_node_t *rd_avl_balance_node(rd_avl_node_t *ran) { const int d = RD_AVL_NODE_DELTA(ran); int h; if (d < -RD_DELTA_MAX) { if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_RIGHT]) > 0) - ran->ran_p[RD_AVL_RIGHT] = - rd_avl_rotate(ran->ran_p[RD_AVL_RIGHT], - RD_AVL_RIGHT); + ran->ran_p[RD_AVL_RIGHT] = rd_avl_rotate( + ran->ran_p[RD_AVL_RIGHT], RD_AVL_RIGHT); return rd_avl_rotate(ran, RD_AVL_LEFT); } else if (d > RD_DELTA_MAX) { if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_LEFT]) < 0) ran->ran_p[RD_AVL_LEFT] = - rd_avl_rotate(ran->ran_p[RD_AVL_LEFT], - RD_AVL_LEFT); + rd_avl_rotate(ran->ran_p[RD_AVL_LEFT], RD_AVL_LEFT); return rd_avl_rotate(ran, RD_AVL_RIGHT); } @@ -85,7 +82,8 @@ static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran) { if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_LEFT])) > ran->ran_height) ran->ran_height = h; - if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_RIGHT])) >ran->ran_height) + if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_RIGHT])) > + ran->ran_height) ran->ran_height = h; ran->ran_height++; @@ -93,10 +91,10 @@ static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran) { return ran; } -rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl, - rd_avl_node_t *parent, - rd_avl_node_t *ran, - rd_avl_node_t **existing) { +rd_avl_node_t *rd_avl_insert_node(rd_avl_t *ravl, + rd_avl_node_t *parent, + rd_avl_node_t *ran, + rd_avl_node_t **existing) { rd_avl_dir_t dir; int r; @@ -105,10 +103,10 @@ rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl, if ((r = ravl->ravl_cmp(ran->ran_elm, parent->ran_elm)) == 0) { /* Replace existing node with new one. */ - ran->ran_p[RD_AVL_LEFT] = parent->ran_p[RD_AVL_LEFT]; + ran->ran_p[RD_AVL_LEFT] = parent->ran_p[RD_AVL_LEFT]; ran->ran_p[RD_AVL_RIGHT] = parent->ran_p[RD_AVL_RIGHT]; - ran->ran_height = parent->ran_height; - *existing = parent; + ran->ran_height = parent->ran_height; + *existing = parent; return ran; } @@ -117,14 +115,14 @@ rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl, else dir = RD_AVL_RIGHT; - parent->ran_p[dir] = rd_avl_insert_node(ravl, parent->ran_p[dir], - ran, existing); + parent->ran_p[dir] = + rd_avl_insert_node(ravl, parent->ran_p[dir], ran, existing); return rd_avl_balance_node(parent); } -static rd_avl_node_t *rd_avl_move (rd_avl_node_t *dst, rd_avl_node_t *src, - rd_avl_dir_t dir) { +static rd_avl_node_t * +rd_avl_move(rd_avl_node_t *dst, rd_avl_node_t *src, rd_avl_dir_t dir) { if (!dst) return src; @@ -134,11 +132,10 @@ static rd_avl_node_t *rd_avl_move (rd_avl_node_t *dst, rd_avl_node_t *src, return rd_avl_balance_node(dst); } -static rd_avl_node_t *rd_avl_remove_node0 (rd_avl_node_t *ran) { +static rd_avl_node_t *rd_avl_remove_node0(rd_avl_node_t *ran) { rd_avl_node_t *tmp; - tmp = rd_avl_move(ran->ran_p[RD_AVL_LEFT], - ran->ran_p[RD_AVL_RIGHT], + tmp = rd_avl_move(ran->ran_p[RD_AVL_LEFT], ran->ran_p[RD_AVL_RIGHT], RD_AVL_RIGHT); ran->ran_p[RD_AVL_LEFT] = ran->ran_p[RD_AVL_RIGHT] = NULL; @@ -146,8 +143,8 @@ static rd_avl_node_t *rd_avl_remove_node0 (rd_avl_node_t *ran) { } -rd_avl_node_t *rd_avl_remove_elm0 (rd_avl_t *ravl, rd_avl_node_t *parent, - const void *elm) { +rd_avl_node_t * +rd_avl_remove_elm0(rd_avl_t *ravl, rd_avl_node_t *parent, const void *elm) { rd_avl_dir_t dir; int r; @@ -157,22 +154,21 @@ rd_avl_node_t *rd_avl_remove_elm0 (rd_avl_t *ravl, rd_avl_node_t *parent, if ((r = ravl->ravl_cmp(elm, parent->ran_elm)) == 0) return rd_avl_remove_node0(parent); - else if (r < 0) + else if (r < 0) dir = RD_AVL_LEFT; else /* > 0 */ dir = RD_AVL_RIGHT; - parent->ran_p[dir] = - rd_avl_remove_elm0(ravl, parent->ran_p[dir], elm); + parent->ran_p[dir] = rd_avl_remove_elm0(ravl, parent->ran_p[dir], elm); return rd_avl_balance_node(parent); } -rd_avl_node_t *rd_avl_find_node (const rd_avl_t *ravl, - const rd_avl_node_t *begin, - const void *elm) { +rd_avl_node_t *rd_avl_find_node(const rd_avl_t *ravl, + const rd_avl_node_t *begin, + const void *elm) { int r; if (!begin) @@ -187,7 +183,7 @@ rd_avl_node_t *rd_avl_find_node (const rd_avl_t *ravl, -void rd_avl_destroy (rd_avl_t *ravl) { +void rd_avl_destroy(rd_avl_t *ravl) { if (ravl->ravl_flags & RD_AVL_F_LOCKS) rwlock_destroy(&ravl->ravl_rwlock); @@ -195,7 +191,7 @@ void rd_avl_destroy (rd_avl_t *ravl) { rd_free(ravl); } -rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags) { +rd_avl_t *rd_avl_init(rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags) { if (!ravl) { ravl = rd_calloc(1, sizeof(*ravl)); @@ -205,7 +201,7 @@ rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags) { } ravl->ravl_flags = flags; - ravl->ravl_cmp = cmp; + ravl->ravl_cmp = cmp; if (flags & RD_AVL_F_LOCKS) rwlock_init(&ravl->ravl_rwlock); diff --git a/src/rdavl.h b/src/rdavl.h index 0c6e1871ec..f3e539242b 100644 --- a/src/rdavl.h +++ b/src/rdavl.h @@ -49,13 +49,13 @@ typedef enum { * provide it as the 'field' argument in the API below. */ typedef struct rd_avl_node_s { - struct rd_avl_node_s *ran_p[2]; /* RD_AVL_LEFT and RD_AVL_RIGHT */ - int ran_height; /* Sub-tree height */ - void *ran_elm; /* Backpointer to the containing - * element. This could be considered - * costly but is convenient for the - * caller: RAM is cheap, - * development time isn't*/ + struct rd_avl_node_s *ran_p[2]; /* RD_AVL_LEFT and RD_AVL_RIGHT */ + int ran_height; /* Sub-tree height */ + void *ran_elm; /* Backpointer to the containing + * element. This could be considered + * costly but is convenient for the + * caller: RAM is cheap, + * development time isn't*/ } rd_avl_node_t; @@ -63,24 +63,23 @@ typedef struct rd_avl_node_s { /** * Per-AVL application-provided element comparator. */ -typedef int (*rd_avl_cmp_t) (const void *, const void *); +typedef int (*rd_avl_cmp_t)(const void *, const void *); /** * AVL tree */ typedef struct rd_avl_s { - rd_avl_node_t *ravl_root; /* Root node */ - rd_avl_cmp_t ravl_cmp; /* Comparator */ - int ravl_flags; /* Flags */ -#define RD_AVL_F_LOCKS 0x1 /* Enable thread-safeness */ -#define RD_AVL_F_OWNER 0x2 /* internal: rd_avl_init() allocated ravl */ - rwlock_t ravl_rwlock; /* Mutex when .._F_LOCKS is set. */ + rd_avl_node_t *ravl_root; /* Root node */ + rd_avl_cmp_t ravl_cmp; /* Comparator */ + int ravl_flags; /* Flags */ +#define RD_AVL_F_LOCKS 0x1 /* Enable thread-safeness */ +#define RD_AVL_F_OWNER 0x2 /* internal: rd_avl_init() allocated ravl */ + rwlock_t ravl_rwlock; /* Mutex when .._F_LOCKS is set. */ } rd_avl_t; - /** * * @@ -94,21 +93,18 @@ typedef struct rd_avl_s { * In case of collision the previous entry is overwritten by the * new one and the previous element is returned, else NULL. */ -#define RD_AVL_INSERT(ravl,elm,field) \ - rd_avl_insert(ravl, elm, &(elm)->field) +#define RD_AVL_INSERT(ravl, elm, field) rd_avl_insert(ravl, elm, &(elm)->field) /** * Remove element by matching value 'elm' using compare function. */ -#define RD_AVL_REMOVE_ELM(ravl,elm) \ - rd_avl_remove_elm(ravl, elm) +#define RD_AVL_REMOVE_ELM(ravl, elm) rd_avl_remove_elm(ravl, elm) /** * Search for (by value using compare function) and return matching elm. */ -#define RD_AVL_FIND(ravl,elm) \ - rd_avl_find(ravl, elm, 1) +#define RD_AVL_FIND(ravl, elm) rd_avl_find(ravl, elm, 1) /** @@ -118,7 +114,7 @@ typedef struct rd_avl_s { * * NOTE: rd_avl_wrlock() must be held. */ -#define RD_AVL_FIND_NL(ravl,elm) \ +#define RD_AVL_FIND_NL(ravl, elm) \ rd_avl_find_node(ravl, (ravl)->ravl_root, elm, 0) @@ -127,32 +123,31 @@ typedef struct rd_avl_s { * * NOTE: rd_avl_wrlock() must be held. */ -#define RD_AVL_FIND_NODE_NL(ravl,elm) \ - rd_avl_find(ravl, elm, 0) +#define RD_AVL_FIND_NODE_NL(ravl, elm) rd_avl_find(ravl, elm, 0) /** * Changes the element pointer for an existing AVL node in the tree. - * The new element must be identical (according to the comparator) + * The new element must be identical (according to the comparator) * to the previous element. * * NOTE: rd_avl_wrlock() must be held. */ -#define RD_AVL_ELM_SET_NL(ran,elm) ((ran)->ran_elm = (elm)) +#define RD_AVL_ELM_SET_NL(ran, elm) ((ran)->ran_elm = (elm)) /** * Returns the current element pointer for an existing AVL node in the tree - * + * * NOTE: rd_avl_*lock() must be held. */ -#define RD_AVL_ELM_GET_NL(ran) ((ran)->ran_elm) +#define RD_AVL_ELM_GET_NL(ran) ((ran)->ran_elm) /** * Destroy previously initialized (by rd_avl_init()) AVL tree. */ -void rd_avl_destroy (rd_avl_t *ravl); +void rd_avl_destroy(rd_avl_t *ravl); /** * Initialize (and optionally allocate if 'ravl' is NULL) AVL tree. @@ -162,7 +157,7 @@ void rd_avl_destroy (rd_avl_t *ravl); * * For thread-safe AVL trees supply RD_AVL_F_LOCKS in 'flags'. */ -rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags); +rd_avl_t *rd_avl_init(rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags); /** @@ -173,71 +168,70 @@ rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags); * * rdavl utilizes rwlocks to allow multiple concurrent read threads. */ -static RD_INLINE RD_UNUSED void rd_avl_rdlock (rd_avl_t *ravl) { +static RD_INLINE RD_UNUSED void rd_avl_rdlock(rd_avl_t *ravl) { if (ravl->ravl_flags & RD_AVL_F_LOCKS) rwlock_rdlock(&ravl->ravl_rwlock); } -static RD_INLINE RD_UNUSED void rd_avl_wrlock (rd_avl_t *ravl) { +static RD_INLINE RD_UNUSED void rd_avl_wrlock(rd_avl_t *ravl) { if (ravl->ravl_flags & RD_AVL_F_LOCKS) rwlock_wrlock(&ravl->ravl_rwlock); } -static RD_INLINE RD_UNUSED void rd_avl_rdunlock (rd_avl_t *ravl) { +static RD_INLINE RD_UNUSED void rd_avl_rdunlock(rd_avl_t *ravl) { if (ravl->ravl_flags & RD_AVL_F_LOCKS) rwlock_rdunlock(&ravl->ravl_rwlock); } -static RD_INLINE RD_UNUSED void rd_avl_wrunlock (rd_avl_t *ravl) { +static RD_INLINE RD_UNUSED void rd_avl_wrunlock(rd_avl_t *ravl) { if (ravl->ravl_flags & RD_AVL_F_LOCKS) rwlock_wrunlock(&ravl->ravl_rwlock); } - /** * Private API, dont use directly. */ -rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl, - rd_avl_node_t *parent, - rd_avl_node_t *ran, - rd_avl_node_t **existing); +rd_avl_node_t *rd_avl_insert_node(rd_avl_t *ravl, + rd_avl_node_t *parent, + rd_avl_node_t *ran, + rd_avl_node_t **existing); -static RD_UNUSED void *rd_avl_insert (rd_avl_t *ravl, void *elm, - rd_avl_node_t *ran) { +static RD_UNUSED void * +rd_avl_insert(rd_avl_t *ravl, void *elm, rd_avl_node_t *ran) { rd_avl_node_t *existing = NULL; memset(ran, 0, sizeof(*ran)); ran->ran_elm = elm; rd_avl_wrlock(ravl); - ravl->ravl_root = rd_avl_insert_node(ravl, ravl->ravl_root, - ran, &existing); + ravl->ravl_root = + rd_avl_insert_node(ravl, ravl->ravl_root, ran, &existing); rd_avl_wrunlock(ravl); return existing ? existing->ran_elm : NULL; } -rd_avl_node_t *rd_avl_remove_elm0 (rd_avl_t *ravl, rd_avl_node_t *parent, - const void *elm); +rd_avl_node_t * +rd_avl_remove_elm0(rd_avl_t *ravl, rd_avl_node_t *parent, const void *elm); -static RD_INLINE RD_UNUSED -void rd_avl_remove_elm (rd_avl_t *ravl, const void *elm) { +static RD_INLINE RD_UNUSED void rd_avl_remove_elm(rd_avl_t *ravl, + const void *elm) { rd_avl_wrlock(ravl); ravl->ravl_root = rd_avl_remove_elm0(ravl, ravl->ravl_root, elm); rd_avl_wrunlock(ravl); } -rd_avl_node_t *rd_avl_find_node (const rd_avl_t *ravl, - const rd_avl_node_t *begin, - const void *elm); +rd_avl_node_t *rd_avl_find_node(const rd_avl_t *ravl, + const rd_avl_node_t *begin, + const void *elm); -static RD_INLINE RD_UNUSED void *rd_avl_find (rd_avl_t *ravl, const void *elm, - int dolock) { +static RD_INLINE RD_UNUSED void * +rd_avl_find(rd_avl_t *ravl, const void *elm, int dolock) { const rd_avl_node_t *ran; void *ret; diff --git a/src/rdbuf.c b/src/rdbuf.c index 2652c223e7..1392cf7b18 100644 --- a/src/rdbuf.c +++ b/src/rdbuf.c @@ -36,7 +36,7 @@ static size_t -rd_buf_get_writable0 (rd_buf_t *rbuf, rd_segment_t **segp, void **p); +rd_buf_get_writable0(rd_buf_t *rbuf, rd_segment_t **segp, void **p); /** @@ -44,7 +44,7 @@ rd_buf_get_writable0 (rd_buf_t *rbuf, rd_segment_t **segp, void **p); * * @remark Will NOT unlink from buffer. */ -static void rd_segment_destroy (rd_segment_t *seg) { +static void rd_segment_destroy(rd_segment_t *seg) { /* Free payload */ if (seg->seg_free && seg->seg_p) seg->seg_free(seg->seg_p); @@ -58,10 +58,10 @@ static void rd_segment_destroy (rd_segment_t *seg) { * and backing memory size. * @remark The segment is NOT linked. */ -static void rd_segment_init (rd_segment_t *seg, void *mem, size_t size) { +static void rd_segment_init(rd_segment_t *seg, void *mem, size_t size) { memset(seg, 0, sizeof(*seg)); - seg->seg_p = mem; - seg->seg_size = size; + seg->seg_p = mem; + seg->seg_size = size; } @@ -71,12 +71,12 @@ static void rd_segment_init (rd_segment_t *seg, void *mem, size_t size) { * @remark Will set the buffer position to the new \p seg if no existing wpos. * @remark Will set the segment seg_absof to the current length of the buffer. */ -static rd_segment_t *rd_buf_append_segment (rd_buf_t *rbuf, rd_segment_t *seg) { +static rd_segment_t *rd_buf_append_segment(rd_buf_t *rbuf, rd_segment_t *seg) { TAILQ_INSERT_TAIL(&rbuf->rbuf_segments, seg, seg_link); rbuf->rbuf_segment_cnt++; - seg->seg_absof = rbuf->rbuf_len; - rbuf->rbuf_len += seg->seg_of; - rbuf->rbuf_size += seg->seg_size; + seg->seg_absof = rbuf->rbuf_len; + rbuf->rbuf_len += seg->seg_of; + rbuf->rbuf_size += seg->seg_size; /* Update writable position */ if (!rbuf->rbuf_wpos) @@ -89,14 +89,13 @@ static rd_segment_t *rd_buf_append_segment (rd_buf_t *rbuf, rd_segment_t *seg) { - /** * @brief Attempt to allocate \p size bytes from the buffers extra buffers. * @returns the allocated pointer which MUST NOT be freed, or NULL if * not enough memory. * @remark the returned pointer is memory-aligned to be safe. */ -static void *extra_alloc (rd_buf_t *rbuf, size_t size) { +static void *extra_alloc(rd_buf_t *rbuf, size_t size) { size_t of = RD_ROUNDUP(rbuf->rbuf_extra_len, 8); /* FIXME: 32-bit */ void *p; @@ -118,15 +117,14 @@ static void *extra_alloc (rd_buf_t *rbuf, size_t size) { * * Will not append the segment to the buffer. */ -static rd_segment_t * -rd_buf_alloc_segment0 (rd_buf_t *rbuf, size_t size) { +static rd_segment_t *rd_buf_alloc_segment0(rd_buf_t *rbuf, size_t size) { rd_segment_t *seg; /* See if there is enough room in the extra buffer for * allocating the segment header and the buffer, * or just the segment header, else fall back to malloc. */ if ((seg = extra_alloc(rbuf, sizeof(*seg) + size))) { - rd_segment_init(seg, size > 0 ? seg+1 : NULL, size); + rd_segment_init(seg, size > 0 ? seg + 1 : NULL, size); } else if ((seg = extra_alloc(rbuf, sizeof(*seg)))) { rd_segment_init(seg, size > 0 ? rd_malloc(size) : NULL, size); @@ -134,7 +132,7 @@ rd_buf_alloc_segment0 (rd_buf_t *rbuf, size_t size) { seg->seg_free = rd_free; } else if ((seg = rd_malloc(sizeof(*seg) + size))) { - rd_segment_init(seg, size > 0 ? seg+1 : NULL, size); + rd_segment_init(seg, size > 0 ? seg + 1 : NULL, size); seg->seg_flags |= RD_SEGMENT_F_FREE; } else @@ -153,14 +151,13 @@ rd_buf_alloc_segment0 (rd_buf_t *rbuf, size_t size) { * (max_size == 0 or max_size > min_size). */ static rd_segment_t * -rd_buf_alloc_segment (rd_buf_t *rbuf, size_t min_size, size_t max_size) { +rd_buf_alloc_segment(rd_buf_t *rbuf, size_t min_size, size_t max_size) { rd_segment_t *seg; /* Over-allocate if allowed. */ if (min_size != max_size || max_size == 0) max_size = RD_MAX(sizeof(*seg) * 4, - RD_MAX(min_size * 2, - rbuf->rbuf_size / 2)); + RD_MAX(min_size * 2, rbuf->rbuf_size / 2)); seg = rd_buf_alloc_segment0(rbuf, max_size); @@ -175,7 +172,7 @@ rd_buf_alloc_segment (rd_buf_t *rbuf, size_t min_size, size_t max_size) { * for writing and the position will be updated to point to the * start of this contiguous block. */ -void rd_buf_write_ensure_contig (rd_buf_t *rbuf, size_t size) { +void rd_buf_write_ensure_contig(rd_buf_t *rbuf, size_t size) { rd_segment_t *seg = rbuf->rbuf_wpos; if (seg) { @@ -200,11 +197,10 @@ void rd_buf_write_ensure_contig (rd_buf_t *rbuf, size_t size) { * * Typically used prior to a call to rd_buf_get_write_iov() */ -void rd_buf_write_ensure (rd_buf_t *rbuf, size_t min_size, size_t max_size) { +void rd_buf_write_ensure(rd_buf_t *rbuf, size_t min_size, size_t max_size) { size_t remains; while ((remains = rd_buf_write_remains(rbuf)) < min_size) - rd_buf_alloc_segment(rbuf, - min_size - remains, + rd_buf_alloc_segment(rbuf, min_size - remains, max_size ? max_size - remains : 0); } @@ -215,9 +211,9 @@ void rd_buf_write_ensure (rd_buf_t *rbuf, size_t min_size, size_t max_size) { * @remark \p hint is an optional segment where to start looking, such as * the current write or read position. */ -rd_segment_t * -rd_buf_get_segment_at_offset (const rd_buf_t *rbuf, const rd_segment_t *hint, - size_t absof) { +rd_segment_t *rd_buf_get_segment_at_offset(const rd_buf_t *rbuf, + const rd_segment_t *hint, + size_t absof) { const rd_segment_t *seg = hint; if (unlikely(absof >= rbuf->rbuf_len)) @@ -255,8 +251,8 @@ rd_buf_get_segment_at_offset (const rd_buf_t *rbuf, const rd_segment_t *hint, * @remark The seg_free callback is retained on the original \p seg * and is not copied to the new segment, but flags are copied. */ -static rd_segment_t *rd_segment_split (rd_buf_t *rbuf, rd_segment_t *seg, - size_t absof) { +static rd_segment_t * +rd_segment_split(rd_buf_t *rbuf, rd_segment_t *seg, size_t absof) { rd_segment_t *newseg; size_t relof; @@ -269,39 +265,37 @@ static rd_segment_t *rd_segment_split (rd_buf_t *rbuf, rd_segment_t *seg, newseg = rd_buf_alloc_segment0(rbuf, 0); /* Add later part of split bytes to new segment */ - newseg->seg_p = seg->seg_p+relof; - newseg->seg_of = seg->seg_of-relof; - newseg->seg_size = seg->seg_size-relof; - newseg->seg_absof = SIZE_MAX; /* Invalid */ + newseg->seg_p = seg->seg_p + relof; + newseg->seg_of = seg->seg_of - relof; + newseg->seg_size = seg->seg_size - relof; + newseg->seg_absof = SIZE_MAX; /* Invalid */ newseg->seg_flags |= seg->seg_flags; /* Remove earlier part of split bytes from previous segment */ - seg->seg_of = relof; - seg->seg_size = relof; + seg->seg_of = relof; + seg->seg_size = relof; /* newseg's length will be added to rbuf_len in append_segment(), * so shave it off here from seg's perspective. */ - rbuf->rbuf_len -= newseg->seg_of; - rbuf->rbuf_size -= newseg->seg_size; + rbuf->rbuf_len -= newseg->seg_of; + rbuf->rbuf_size -= newseg->seg_size; return newseg; } - /** * @brief Unlink and destroy a segment, updating the \p rbuf * with the decrease in length and capacity. */ -static void rd_buf_destroy_segment (rd_buf_t *rbuf, rd_segment_t *seg) { - rd_assert(rbuf->rbuf_segment_cnt > 0 && - rbuf->rbuf_len >= seg->seg_of && +static void rd_buf_destroy_segment(rd_buf_t *rbuf, rd_segment_t *seg) { + rd_assert(rbuf->rbuf_segment_cnt > 0 && rbuf->rbuf_len >= seg->seg_of && rbuf->rbuf_size >= seg->seg_size); TAILQ_REMOVE(&rbuf->rbuf_segments, seg, seg_link); rbuf->rbuf_segment_cnt--; - rbuf->rbuf_len -= seg->seg_of; + rbuf->rbuf_len -= seg->seg_of; rbuf->rbuf_size -= seg->seg_size; if (rbuf->rbuf_wpos == seg) rbuf->rbuf_wpos = NULL; @@ -314,17 +308,18 @@ static void rd_buf_destroy_segment (rd_buf_t *rbuf, rd_segment_t *seg) { * @brief Free memory associated with the \p rbuf, but not the rbuf itself. * Segments will be destroyed. */ -void rd_buf_destroy (rd_buf_t *rbuf) { +void rd_buf_destroy(rd_buf_t *rbuf) { rd_segment_t *seg, *tmp; #if ENABLE_DEVEL /* FIXME */ if (rbuf->rbuf_len > 0 && 0) { size_t overalloc = rbuf->rbuf_size - rbuf->rbuf_len; - float fill_grade = (float)rbuf->rbuf_len / - (float)rbuf->rbuf_size; + float fill_grade = + (float)rbuf->rbuf_len / (float)rbuf->rbuf_size; - printf("fill grade: %.2f%% (%"PRIusz" bytes over-allocated)\n", + printf("fill grade: %.2f%% (%" PRIusz + " bytes over-allocated)\n", fill_grade * 100.0f, overalloc); } #endif @@ -332,7 +327,6 @@ void rd_buf_destroy (rd_buf_t *rbuf) { TAILQ_FOREACH_SAFE(seg, &rbuf->rbuf_segments, seg_link, tmp) { rd_segment_destroy(seg); - } if (rbuf->rbuf_extra) @@ -343,7 +337,7 @@ void rd_buf_destroy (rd_buf_t *rbuf) { /** * @brief Same as rd_buf_destroy() but also frees the \p rbuf itself. */ -void rd_buf_destroy_free (rd_buf_t *rbuf) { +void rd_buf_destroy_free(rd_buf_t *rbuf) { rd_buf_destroy(rbuf); rd_free(rbuf); } @@ -354,7 +348,7 @@ void rd_buf_destroy_free (rd_buf_t *rbuf) { * * The caller may rearrange the backing memory as it see fits. */ -void rd_buf_init (rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size) { +void rd_buf_init(rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size) { size_t totalloc = 0; memset(rbuf, 0, sizeof(*rbuf)); @@ -374,7 +368,7 @@ void rd_buf_init (rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size) { totalloc += buf_size; rbuf->rbuf_extra_size = totalloc; - rbuf->rbuf_extra = rd_malloc(rbuf->rbuf_extra_size); + rbuf->rbuf_extra = rd_malloc(rbuf->rbuf_extra_size); } @@ -382,7 +376,7 @@ void rd_buf_init (rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size) { * @brief Allocates a buffer object and initializes it. * @sa rd_buf_init() */ -rd_buf_t *rd_buf_new (size_t fixed_seg_cnt, size_t buf_size) { +rd_buf_t *rd_buf_new(size_t fixed_seg_cnt, size_t buf_size) { rd_buf_t *rbuf = rd_malloc(sizeof(*rbuf)); rd_buf_init(rbuf, fixed_seg_cnt, buf_size); return rbuf; @@ -399,10 +393,10 @@ rd_buf_t *rd_buf_new (size_t fixed_seg_cnt, size_t buf_size) { * and sets \p *p to point to the start of the memory region. */ static size_t -rd_buf_get_writable0 (rd_buf_t *rbuf, rd_segment_t **segp, void **p) { +rd_buf_get_writable0(rd_buf_t *rbuf, rd_segment_t **segp, void **p) { rd_segment_t *seg; - for (seg = rbuf->rbuf_wpos ; seg ; seg = TAILQ_NEXT(seg, seg_link)) { + for (seg = rbuf->rbuf_wpos; seg; seg = TAILQ_NEXT(seg, seg_link)) { size_t len = rd_segment_write_remains(seg, p); /* Even though the write offset hasn't changed we @@ -428,14 +422,13 @@ rd_buf_get_writable0 (rd_buf_t *rbuf, rd_segment_t **segp, void **p) { return 0; } -size_t rd_buf_get_writable (rd_buf_t *rbuf, void **p) { +size_t rd_buf_get_writable(rd_buf_t *rbuf, void **p) { rd_segment_t *seg; return rd_buf_get_writable0(rbuf, &seg, p); } - /** * @brief Write \p payload of \p size bytes to current position * in buffer. A new segment will be allocated and appended @@ -452,7 +445,7 @@ size_t rd_buf_get_writable (rd_buf_t *rbuf, void **p) { * uninitialized memory in any new segments allocated from this * function). */ -size_t rd_buf_write (rd_buf_t *rbuf, const void *payload, size_t size) { +size_t rd_buf_write(rd_buf_t *rbuf, const void *payload, size_t size) { size_t remains = size; size_t initial_absof; const char *psrc = (const char *)payload; @@ -463,24 +456,24 @@ size_t rd_buf_write (rd_buf_t *rbuf, const void *payload, size_t size) { rd_buf_write_ensure(rbuf, size, 0); while (remains > 0) { - void *p = NULL; + void *p = NULL; rd_segment_t *seg = NULL; size_t segremains = rd_buf_get_writable0(rbuf, &seg, &p); - size_t wlen = RD_MIN(remains, segremains); + size_t wlen = RD_MIN(remains, segremains); rd_dassert(seg == rbuf->rbuf_wpos); rd_dassert(wlen > 0); - rd_dassert(seg->seg_p+seg->seg_of <= (char *)p && - (char *)p < seg->seg_p+seg->seg_size); + rd_dassert(seg->seg_p + seg->seg_of <= (char *)p && + (char *)p < seg->seg_p + seg->seg_size); if (payload) { memcpy(p, psrc, wlen); psrc += wlen; } - seg->seg_of += wlen; + seg->seg_of += wlen; rbuf->rbuf_len += wlen; - remains -= wlen; + remains -= wlen; } rd_assert(remains == 0); @@ -497,7 +490,7 @@ size_t rd_buf_write (rd_buf_t *rbuf, const void *payload, size_t size) { * * @returns the number of bytes witten (always slice length) */ -size_t rd_buf_write_slice (rd_buf_t *rbuf, rd_slice_t *slice) { +size_t rd_buf_write_slice(rd_buf_t *rbuf, rd_slice_t *slice) { const void *p; size_t rlen; size_t sum = 0; @@ -524,8 +517,10 @@ size_t rd_buf_write_slice (rd_buf_t *rbuf, rd_slice_t *slice) { * @returns the number of bytes written, which may be less than \p size * if the update spans multiple segments. */ -static size_t rd_segment_write_update (rd_segment_t *seg, size_t absof, - const void *payload, size_t size) { +static size_t rd_segment_write_update(rd_segment_t *seg, + size_t absof, + const void *payload, + size_t size) { size_t relof; size_t wlen; @@ -535,7 +530,7 @@ static size_t rd_segment_write_update (rd_segment_t *seg, size_t absof, wlen = RD_MIN(size, seg->seg_of - relof); rd_dassert(relof + wlen <= seg->seg_of); - memcpy(seg->seg_p+relof, payload, wlen); + memcpy(seg->seg_p + relof, payload, wlen); return wlen; } @@ -549,8 +544,10 @@ static size_t rd_segment_write_update (rd_segment_t *seg, size_t absof, * This is used to update a previously written region, such * as updating the header length. */ -size_t rd_buf_write_update (rd_buf_t *rbuf, size_t absof, - const void *payload, size_t size) { +size_t rd_buf_write_update(rd_buf_t *rbuf, + size_t absof, + const void *payload, + size_t size) { rd_segment_t *seg; const char *psrc = (const char *)payload; size_t of; @@ -559,10 +556,10 @@ size_t rd_buf_write_update (rd_buf_t *rbuf, size_t absof, seg = rd_buf_get_segment_at_offset(rbuf, rbuf->rbuf_wpos, absof); rd_assert(seg && *"invalid absolute offset"); - for (of = 0 ; of < size ; seg = TAILQ_NEXT(seg, seg_link)) { + for (of = 0; of < size; seg = TAILQ_NEXT(seg, seg_link)) { rd_assert(seg->seg_absof <= rd_buf_len(rbuf)); - size_t wlen = rd_segment_write_update(seg, absof+of, - psrc+of, size-of); + size_t wlen = rd_segment_write_update(seg, absof + of, + psrc + of, size - of); of += wlen; } @@ -576,24 +573,26 @@ size_t rd_buf_write_update (rd_buf_t *rbuf, size_t absof, /** * @brief Push reference memory segment to current write position. */ -void rd_buf_push0 (rd_buf_t *rbuf, const void *payload, size_t size, - void (*free_cb)(void *), rd_bool_t writable) { +void rd_buf_push0(rd_buf_t *rbuf, + const void *payload, + size_t size, + void (*free_cb)(void *), + rd_bool_t writable) { rd_segment_t *prevseg, *seg, *tailseg = NULL; if ((prevseg = rbuf->rbuf_wpos) && rd_segment_write_remains(prevseg, NULL) > 0) { /* If the current segment still has room in it split it * and insert the pushed segment in the middle (below). */ - tailseg = rd_segment_split(rbuf, prevseg, - prevseg->seg_absof + - prevseg->seg_of); + tailseg = rd_segment_split( + rbuf, prevseg, prevseg->seg_absof + prevseg->seg_of); } - seg = rd_buf_alloc_segment0(rbuf, 0); - seg->seg_p = (char *)payload; - seg->seg_size = size; - seg->seg_of = size; - seg->seg_free = free_cb; + seg = rd_buf_alloc_segment0(rbuf, 0); + seg->seg_p = (char *)payload; + seg->seg_size = size; + seg->seg_of = size; + seg->seg_free = free_cb; if (!writable) seg->seg_flags |= RD_SEGMENT_F_RDONLY; @@ -612,7 +611,7 @@ void rd_buf_push0 (rd_buf_t *rbuf, const void *payload, size_t size, * * @remark This is costly since it forces a memory move. */ -size_t rd_buf_erase (rd_buf_t *rbuf, size_t absof, size_t size) { +size_t rd_buf_erase(rd_buf_t *rbuf, size_t absof, size_t size) { rd_segment_t *seg, *next = NULL; size_t of; @@ -621,7 +620,7 @@ size_t rd_buf_erase (rd_buf_t *rbuf, size_t absof, size_t size) { /* Adjust segments until size is exhausted, then continue scanning to * update the absolute offset. */ - for (of = 0 ; seg && of < size ; seg = next) { + for (of = 0; seg && of < size; seg = next) { /* Example: * seg_absof = 10 * seg_of = 7 @@ -657,7 +656,7 @@ size_t rd_buf_erase (rd_buf_t *rbuf, size_t absof, size_t size) { RD_BUG("rd_buf_erase() called on read-only segment"); if (likely(segremains > 0)) - memmove(seg->seg_p+rof, seg->seg_p+rof+toerase, + memmove(seg->seg_p + rof, seg->seg_p + rof + toerase, segremains); seg->seg_of -= toerase; @@ -671,7 +670,7 @@ size_t rd_buf_erase (rd_buf_t *rbuf, size_t absof, size_t size) { } /* Update absolute offset of remaining segments */ - for (seg = next ; seg ; seg = TAILQ_NEXT(seg, seg_link)) { + for (seg = next; seg; seg = TAILQ_NEXT(seg, seg_link)) { rd_assert(seg->seg_absof >= of); seg->seg_absof -= of; } @@ -683,7 +682,6 @@ size_t rd_buf_erase (rd_buf_t *rbuf, size_t absof, size_t size) { - /** * @brief Do a write-seek, updating the write position to the given * absolute \p absof. @@ -692,7 +690,7 @@ size_t rd_buf_erase (rd_buf_t *rbuf, size_t absof, size_t size) { * * @returns -1 if the offset is out of bounds, else 0. */ -int rd_buf_write_seek (rd_buf_t *rbuf, size_t absof) { +int rd_buf_write_seek(rd_buf_t *rbuf, size_t absof) { rd_segment_t *seg, *next; size_t relof; @@ -707,17 +705,17 @@ int rd_buf_write_seek (rd_buf_t *rbuf, size_t absof) { /* Destroy sub-sequent segments in reverse order so that * destroy_segment() length checks are correct. * Will decrement rbuf_len et.al. */ - for (next = TAILQ_LAST(&rbuf->rbuf_segments, rd_segment_head) ; - next != seg ; ) { + for (next = TAILQ_LAST(&rbuf->rbuf_segments, rd_segment_head); + next != seg;) { rd_segment_t *this = next; next = TAILQ_PREV(this, rd_segment_head, seg_link); rd_buf_destroy_segment(rbuf, this); } /* Update relative write offset */ - seg->seg_of = relof; - rbuf->rbuf_wpos = seg; - rbuf->rbuf_len = seg->seg_absof + seg->seg_of; + seg->seg_of = relof; + rbuf->rbuf_wpos = seg; + rbuf->rbuf_len = seg->seg_absof + seg->seg_of; rd_assert(rbuf->rbuf_len == absof); @@ -738,15 +736,16 @@ int rd_buf_write_seek (rd_buf_t *rbuf, size_t absof) { * * @remark the write position will NOT be updated. */ -size_t rd_buf_get_write_iov (const rd_buf_t *rbuf, - struct iovec *iovs, size_t *iovcntp, - size_t iov_max, size_t size_max) { +size_t rd_buf_get_write_iov(const rd_buf_t *rbuf, + struct iovec *iovs, + size_t *iovcntp, + size_t iov_max, + size_t size_max) { const rd_segment_t *seg; size_t iovcnt = 0; - size_t sum = 0; + size_t sum = 0; - for (seg = rbuf->rbuf_wpos ; - seg && iovcnt < iov_max && sum < size_max ; + for (seg = rbuf->rbuf_wpos; seg && iovcnt < iov_max && sum < size_max; seg = TAILQ_NEXT(seg, seg_link)) { size_t len; void *p; @@ -768,14 +767,6 @@ size_t rd_buf_get_write_iov (const rd_buf_t *rbuf, - - - - - - - - /** * @name Slice reader interface * @@ -789,20 +780,23 @@ size_t rd_buf_get_write_iov (const rd_buf_t *rbuf, * @returns 0 on success or -1 if there is not at least \p size bytes available * in the buffer. */ -int rd_slice_init_seg (rd_slice_t *slice, const rd_buf_t *rbuf, - const rd_segment_t *seg, size_t rof, size_t size) { +int rd_slice_init_seg(rd_slice_t *slice, + const rd_buf_t *rbuf, + const rd_segment_t *seg, + size_t rof, + size_t size) { /* Verify that \p size bytes are indeed available in the buffer. */ if (unlikely(rbuf->rbuf_len < (seg->seg_absof + rof + size))) return -1; - slice->buf = rbuf; - slice->seg = seg; - slice->rof = rof; - slice->start = seg->seg_absof + rof; - slice->end = slice->start + size; + slice->buf = rbuf; + slice->seg = seg; + slice->rof = rof; + slice->start = seg->seg_absof + rof; + slice->end = slice->start + size; - rd_assert(seg->seg_absof+rof >= slice->start && - seg->seg_absof+rof <= slice->end); + rd_assert(seg->seg_absof + rof >= slice->start && + seg->seg_absof + rof <= slice->end); rd_assert(slice->end <= rd_buf_len(rbuf)); @@ -815,21 +809,23 @@ int rd_slice_init_seg (rd_slice_t *slice, const rd_buf_t *rbuf, * @returns 0 on success or -1 if there is not at least \p size bytes available * in the buffer. */ -int rd_slice_init (rd_slice_t *slice, const rd_buf_t *rbuf, - size_t absof, size_t size) { - const rd_segment_t *seg = rd_buf_get_segment_at_offset(rbuf, NULL, - absof); +int rd_slice_init(rd_slice_t *slice, + const rd_buf_t *rbuf, + size_t absof, + size_t size) { + const rd_segment_t *seg = + rd_buf_get_segment_at_offset(rbuf, NULL, absof); if (unlikely(!seg)) return -1; - return rd_slice_init_seg(slice, rbuf, seg, - absof - seg->seg_absof, size); + return rd_slice_init_seg(slice, rbuf, seg, absof - seg->seg_absof, + size); } /** * @brief Initialize new slice covering the full buffer \p rbuf */ -void rd_slice_init_full (rd_slice_t *slice, const rd_buf_t *rbuf) { +void rd_slice_init_full(rd_slice_t *slice, const rd_buf_t *rbuf) { int r = rd_slice_init(slice, rbuf, 0, rd_buf_len(rbuf)); rd_assert(r == 0); } @@ -839,18 +835,18 @@ void rd_slice_init_full (rd_slice_t *slice, const rd_buf_t *rbuf) { /** * @sa rd_slice_reader() rd_slice_peeker() */ -size_t rd_slice_reader0 (rd_slice_t *slice, const void **p, int update_pos) { +size_t rd_slice_reader0(rd_slice_t *slice, const void **p, int update_pos) { size_t rof = slice->rof; size_t rlen; const rd_segment_t *seg; /* Find segment with non-zero payload */ - for (seg = slice->seg ; - seg && seg->seg_absof+rof < slice->end && seg->seg_of == rof ; + for (seg = slice->seg; + seg && seg->seg_absof + rof < slice->end && seg->seg_of == rof; seg = TAILQ_NEXT(seg, seg_link)) rof = 0; - if (unlikely(!seg || seg->seg_absof+rof >= slice->end)) + if (unlikely(!seg || seg->seg_absof + rof >= slice->end)) return 0; *p = (const void *)(seg->seg_p + rof); @@ -859,9 +855,9 @@ size_t rd_slice_reader0 (rd_slice_t *slice, const void **p, int update_pos) { if (update_pos) { if (slice->seg != seg) { rd_assert(seg->seg_absof + rof >= slice->start && - seg->seg_absof + rof+rlen <= slice->end); - slice->seg = seg; - slice->rof = rlen; + seg->seg_absof + rof + rlen <= slice->end); + slice->seg = seg; + slice->rof = rlen; } else { slice->rof += rlen; } @@ -882,21 +878,19 @@ size_t rd_slice_reader0 (rd_slice_t *slice, const void **p, int update_pos) { * * @returns the number of bytes read, or 0 if slice is empty. */ -size_t rd_slice_reader (rd_slice_t *slice, const void **p) { - return rd_slice_reader0(slice, p, 1/*update_pos*/); +size_t rd_slice_reader(rd_slice_t *slice, const void **p) { + return rd_slice_reader0(slice, p, 1 /*update_pos*/); } /** * @brief Identical to rd_slice_reader() but does NOT update the read position */ -size_t rd_slice_peeker (const rd_slice_t *slice, const void **p) { - return rd_slice_reader0((rd_slice_t *)slice, p, 0/*dont update_pos*/); +size_t rd_slice_peeker(const rd_slice_t *slice, const void **p) { + return rd_slice_reader0((rd_slice_t *)slice, p, 0 /*dont update_pos*/); } - - /** * @brief Read \p size bytes from current read position, * advancing the read offset by the number of bytes copied to \p dst. @@ -910,9 +904,9 @@ size_t rd_slice_peeker (const rd_slice_t *slice, const void **p) { * * @remark If \p dst is NULL only the read position is updated. */ -size_t rd_slice_read (rd_slice_t *slice, void *dst, size_t size) { +size_t rd_slice_read(rd_slice_t *slice, void *dst, size_t size) { size_t remains = size; - char *d = (char *)dst; /* Possibly NULL */ + char *d = (char *)dst; /* Possibly NULL */ size_t rlen; const void *p; size_t orig_end = slice->end; @@ -927,7 +921,7 @@ size_t rd_slice_read (rd_slice_t *slice, void *dst, size_t size) { rd_dassert(remains >= rlen); if (dst) { memcpy(d, p, rlen); - d += rlen; + d += rlen; } remains -= rlen; } @@ -947,15 +941,14 @@ size_t rd_slice_read (rd_slice_t *slice, void *dst, size_t size) { * * @returns \p size if the offset and size was within the slice, else 0. */ -size_t rd_slice_peek (const rd_slice_t *slice, size_t offset, - void *dst, size_t size) { +size_t +rd_slice_peek(const rd_slice_t *slice, size_t offset, void *dst, size_t size) { rd_slice_t sub = *slice; if (unlikely(rd_slice_seek(&sub, offset) == -1)) return 0; return rd_slice_read(&sub, dst, size); - } @@ -966,19 +959,19 @@ size_t rd_slice_peek (const rd_slice_t *slice, size_t offset, * @returns the number of bytes read on success or 0 in case of * buffer underflow. */ -size_t rd_slice_read_uvarint (rd_slice_t *slice, uint64_t *nump) { +size_t rd_slice_read_uvarint(rd_slice_t *slice, uint64_t *nump) { uint64_t num = 0; - int shift = 0; - size_t rof = slice->rof; + int shift = 0; + size_t rof = slice->rof; const rd_segment_t *seg; /* Traverse segments, byte for byte, until varint is decoded * or no more segments available (underflow). */ - for (seg = slice->seg ; seg ; seg = TAILQ_NEXT(seg, seg_link)) { - for ( ; rof < seg->seg_of ; rof++) { + for (seg = slice->seg; seg; seg = TAILQ_NEXT(seg, seg_link)) { + for (; rof < seg->seg_of; rof++) { unsigned char oct; - if (unlikely(seg->seg_absof+rof >= slice->end)) + if (unlikely(seg->seg_absof + rof >= slice->end)) return 0; /* Underflow */ oct = *(const unsigned char *)(seg->seg_p + rof); @@ -1014,7 +1007,7 @@ size_t rd_slice_read_uvarint (rd_slice_t *slice, uint64_t *nump) { * * @remark The read position is updated to point past \p size. */ -const void *rd_slice_ensure_contig (rd_slice_t *slice, size_t size) { +const void *rd_slice_ensure_contig(rd_slice_t *slice, size_t size) { void *p; if (unlikely(rd_slice_remains(slice) < size || @@ -1037,7 +1030,7 @@ const void *rd_slice_ensure_contig (rd_slice_t *slice, size_t size) { * @returns 0 if offset was within range, else -1 in which case the position * is not changed. */ -int rd_slice_seek (rd_slice_t *slice, size_t offset) { +int rd_slice_seek(rd_slice_t *slice, size_t offset) { const rd_segment_t *seg; size_t absof = slice->start + offset; @@ -1068,11 +1061,11 @@ int rd_slice_seek (rd_slice_t *slice, size_t offset) { * * @returns 1 if enough underlying slice buffer memory is available, else 0. */ -int rd_slice_narrow (rd_slice_t *slice, rd_slice_t *save_slice, size_t size) { +int rd_slice_narrow(rd_slice_t *slice, rd_slice_t *save_slice, size_t size) { if (unlikely(slice->start + size > slice->end)) return 0; *save_slice = *slice; - slice->end = slice->start + size; + slice->end = slice->start + size; rd_assert(rd_slice_abs_offset(slice) <= slice->end); return 1; } @@ -1081,8 +1074,9 @@ int rd_slice_narrow (rd_slice_t *slice, rd_slice_t *save_slice, size_t size) { * @brief Same as rd_slice_narrow() but using a relative size \p relsize * from the current read position. */ -int rd_slice_narrow_relative (rd_slice_t *slice, rd_slice_t *save_slice, - size_t relsize) { +int rd_slice_narrow_relative(rd_slice_t *slice, + rd_slice_t *save_slice, + size_t relsize) { return rd_slice_narrow(slice, save_slice, rd_slice_offset(slice) + relsize); } @@ -1093,7 +1087,7 @@ int rd_slice_narrow_relative (rd_slice_t *slice, rd_slice_t *save_slice, * rd_slice_narrow(), while keeping the updated read pointer from * \p slice. */ -void rd_slice_widen (rd_slice_t *slice, const rd_slice_t *save_slice) { +void rd_slice_widen(rd_slice_t *slice, const rd_slice_t *save_slice) { slice->end = save_slice->end; } @@ -1107,11 +1101,12 @@ void rd_slice_widen (rd_slice_t *slice, const rd_slice_t *save_slice) { * * @returns 1 if enough underlying slice buffer memory is available, else 0. */ -int rd_slice_narrow_copy (const rd_slice_t *orig, rd_slice_t *new_slice, - size_t size) { +int rd_slice_narrow_copy(const rd_slice_t *orig, + rd_slice_t *new_slice, + size_t size) { if (unlikely(orig->start + size > orig->end)) return 0; - *new_slice = *orig; + *new_slice = *orig; new_slice->end = orig->start + size; rd_assert(rd_slice_abs_offset(new_slice) <= new_slice->end); return 1; @@ -1121,17 +1116,15 @@ int rd_slice_narrow_copy (const rd_slice_t *orig, rd_slice_t *new_slice, * @brief Same as rd_slice_narrow_copy() but with a relative size from * the current read position. */ -int rd_slice_narrow_copy_relative (const rd_slice_t *orig, - rd_slice_t *new_slice, - size_t relsize) { +int rd_slice_narrow_copy_relative(const rd_slice_t *orig, + rd_slice_t *new_slice, + size_t relsize) { return rd_slice_narrow_copy(orig, new_slice, rd_slice_offset(orig) + relsize); } - - /** * @brief Set up the iovec \p iovs (of size \p iov_max) with the readable * segments from the slice's current read position. @@ -1145,13 +1138,15 @@ int rd_slice_narrow_copy_relative (const rd_slice_t *orig, * * @remark will NOT update the read position. */ -size_t rd_slice_get_iov (const rd_slice_t *slice, - struct iovec *iovs, size_t *iovcntp, - size_t iov_max, size_t size_max) { +size_t rd_slice_get_iov(const rd_slice_t *slice, + struct iovec *iovs, + size_t *iovcntp, + size_t iov_max, + size_t size_max) { const void *p; size_t rlen; - size_t iovcnt = 0; - size_t sum = 0; + size_t iovcnt = 0; + size_t sum = 0; rd_slice_t copy = *slice; /* Use a copy of the slice so we dont * update the position for the caller. */ @@ -1170,8 +1165,6 @@ size_t rd_slice_get_iov (const rd_slice_t *slice, - - /** * @brief CRC32 calculation of slice. * @@ -1179,7 +1172,7 @@ size_t rd_slice_get_iov (const rd_slice_t *slice, * * @remark the slice's position is updated. */ -uint32_t rd_slice_crc32 (rd_slice_t *slice) { +uint32_t rd_slice_crc32(rd_slice_t *slice) { rd_crc32_t crc; const void *p; size_t rlen; @@ -1199,7 +1192,7 @@ uint32_t rd_slice_crc32 (rd_slice_t *slice) { * * @remark the slice's position is updated. */ -uint32_t rd_slice_crc32c (rd_slice_t *slice) { +uint32_t rd_slice_crc32c(rd_slice_t *slice) { const void *p; size_t rlen; uint32_t crc = 0; @@ -1212,37 +1205,38 @@ uint32_t rd_slice_crc32c (rd_slice_t *slice) { - - /** * @name Debugging dumpers * * */ -static void rd_segment_dump (const rd_segment_t *seg, const char *ind, - size_t relof, int do_hexdump) { +static void rd_segment_dump(const rd_segment_t *seg, + const char *ind, + size_t relof, + int do_hexdump) { fprintf(stderr, "%s((rd_segment_t *)%p): " - "p %p, of %"PRIusz", " - "absof %"PRIusz", size %"PRIusz", free %p, flags 0x%x\n", - ind, seg, seg->seg_p, seg->seg_of, - seg->seg_absof, seg->seg_size, seg->seg_free, seg->seg_flags); + "p %p, of %" PRIusz + ", " + "absof %" PRIusz ", size %" PRIusz ", free %p, flags 0x%x\n", + ind, seg, seg->seg_p, seg->seg_of, seg->seg_absof, + seg->seg_size, seg->seg_free, seg->seg_flags); rd_assert(relof <= seg->seg_of); if (do_hexdump) - rd_hexdump(stderr, "segment", - seg->seg_p+relof, seg->seg_of-relof); + rd_hexdump(stderr, "segment", seg->seg_p + relof, + seg->seg_of - relof); } -void rd_buf_dump (const rd_buf_t *rbuf, int do_hexdump) { +void rd_buf_dump(const rd_buf_t *rbuf, int do_hexdump) { const rd_segment_t *seg; fprintf(stderr, "((rd_buf_t *)%p):\n" - " len %"PRIusz" size %"PRIusz - ", %"PRIusz"/%"PRIusz" extra memory used\n", - rbuf, rbuf->rbuf_len, rbuf->rbuf_size, - rbuf->rbuf_extra_len, rbuf->rbuf_extra_size); + " len %" PRIusz " size %" PRIusz ", %" PRIusz "/%" PRIusz + " extra memory used\n", + rbuf, rbuf->rbuf_len, rbuf->rbuf_size, rbuf->rbuf_extra_len, + rbuf->rbuf_extra_size); if (rbuf->rbuf_wpos) { fprintf(stderr, " wpos:\n"); @@ -1252,7 +1246,7 @@ void rd_buf_dump (const rd_buf_t *rbuf, int do_hexdump) { if (rbuf->rbuf_segment_cnt > 0) { size_t segcnt = 0; - fprintf(stderr, " %"PRIusz" linked segments:\n", + fprintf(stderr, " %" PRIusz " linked segments:\n", rbuf->rbuf_segment_cnt); TAILQ_FOREACH(seg, &rbuf->rbuf_segments, seg_link) { rd_segment_dump(seg, " ", 0, do_hexdump); @@ -1262,22 +1256,23 @@ void rd_buf_dump (const rd_buf_t *rbuf, int do_hexdump) { } } -void rd_slice_dump (const rd_slice_t *slice, int do_hexdump) { +void rd_slice_dump(const rd_slice_t *slice, int do_hexdump) { const rd_segment_t *seg; size_t relof; fprintf(stderr, "((rd_slice_t *)%p):\n" - " buf %p (len %"PRIusz"), seg %p (absof %"PRIusz"), " - "rof %"PRIusz", start %"PRIusz", end %"PRIusz", size %"PRIusz - ", offset %"PRIusz"\n", - slice, slice->buf, rd_buf_len(slice->buf), - slice->seg, slice->seg ? slice->seg->seg_absof : 0, - slice->rof, slice->start, slice->end, - rd_slice_size(slice), rd_slice_offset(slice)); + " buf %p (len %" PRIusz "), seg %p (absof %" PRIusz + "), " + "rof %" PRIusz ", start %" PRIusz ", end %" PRIusz + ", size %" PRIusz ", offset %" PRIusz "\n", + slice, slice->buf, rd_buf_len(slice->buf), slice->seg, + slice->seg ? slice->seg->seg_absof : 0, slice->rof, + slice->start, slice->end, rd_slice_size(slice), + rd_slice_offset(slice)); relof = slice->rof; - for (seg = slice->seg ; seg ; seg = TAILQ_NEXT(seg, seg_link)) { + for (seg = slice->seg; seg; seg = TAILQ_NEXT(seg, seg_link)) { rd_segment_dump(seg, " ", relof, do_hexdump); relof = 0; } @@ -1295,13 +1290,13 @@ void rd_slice_dump (const rd_slice_t *slice, int do_hexdump) { /** * @brief Basic write+read test */ -static int do_unittest_write_read (void) { +static int do_unittest_write_read(void) { rd_buf_t b; char ones[1024]; char twos[1024]; char threes[1024]; char fiftyfives[100]; /* 0x55 indicates "untouched" memory */ - char buf[1024*3]; + char buf[1024 * 3]; rd_slice_t slice; size_t r, pos; @@ -1317,21 +1312,21 @@ static int do_unittest_write_read (void) { * Verify write */ r = rd_buf_write(&b, ones, 200); - RD_UT_ASSERT(r == 0, "write() returned position %"PRIusz, r); + RD_UT_ASSERT(r == 0, "write() returned position %" PRIusz, r); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200, "pos() returned position %" PRIusz, pos); r = rd_buf_write(&b, twos, 800); - RD_UT_ASSERT(r == 200, "write() returned position %"PRIusz, r); + RD_UT_ASSERT(r == 200, "write() returned position %" PRIusz, r); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200+800, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200 + 800, "pos() returned position %" PRIusz, pos); /* Buffer grows here */ r = rd_buf_write(&b, threes, 1); - RD_UT_ASSERT(pos == 200+800, - "write() returned position %"PRIusz, r); + RD_UT_ASSERT(pos == 200 + 800, "write() returned position %" PRIusz, r); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200+800+1, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200 + 800 + 1, "pos() returned position %" PRIusz, + pos); /* * Verify read @@ -1339,18 +1334,19 @@ static int do_unittest_write_read (void) { /* Get full slice. */ rd_slice_init_full(&slice, &b); - r = rd_slice_read(&slice, buf, 200+800+2); + r = rd_slice_read(&slice, buf, 200 + 800 + 2); RD_UT_ASSERT(r == 0, - "read() > remaining should have failed, gave %"PRIusz, r); - r = rd_slice_read(&slice, buf, 200+800+1); - RD_UT_ASSERT(r == 200+800+1, - "read() returned %"PRIusz" (%"PRIusz" remains)", - r, rd_slice_remains(&slice)); + "read() > remaining should have failed, gave %" PRIusz, r); + r = rd_slice_read(&slice, buf, 200 + 800 + 1); + RD_UT_ASSERT(r == 200 + 800 + 1, + "read() returned %" PRIusz " (%" PRIusz " remains)", r, + rd_slice_remains(&slice)); RD_UT_ASSERT(!memcmp(buf, ones, 200), "verify ones"); - RD_UT_ASSERT(!memcmp(buf+200, twos, 800), "verify twos"); - RD_UT_ASSERT(!memcmp(buf+200+800, threes, 1), "verify threes"); - RD_UT_ASSERT(!memcmp(buf+200+800+1, fiftyfives, 100), "verify 55s"); + RD_UT_ASSERT(!memcmp(buf + 200, twos, 800), "verify twos"); + RD_UT_ASSERT(!memcmp(buf + 200 + 800, threes, 1), "verify threes"); + RD_UT_ASSERT(!memcmp(buf + 200 + 800 + 1, fiftyfives, 100), + "verify 55s"); rd_buf_destroy(&b); @@ -1361,16 +1357,20 @@ static int do_unittest_write_read (void) { /** * @brief Helper read verifier, not a unit-test itself. */ -#define do_unittest_read_verify(b,absof,len,verify) do { \ - int __fail = do_unittest_read_verify0(b,absof,len,verify); \ - RD_UT_ASSERT(!__fail, \ - "read_verify(absof=%"PRIusz",len=%"PRIusz") " \ - "failed", (size_t)absof, (size_t)len); \ +#define do_unittest_read_verify(b, absof, len, verify) \ + do { \ + int __fail = do_unittest_read_verify0(b, absof, len, verify); \ + RD_UT_ASSERT(!__fail, \ + "read_verify(absof=%" PRIusz ",len=%" PRIusz \ + ") " \ + "failed", \ + (size_t)absof, (size_t)len); \ } while (0) -static int -do_unittest_read_verify0 (const rd_buf_t *b, size_t absof, size_t len, - const char *verify) { +static int do_unittest_read_verify0(const rd_buf_t *b, + size_t absof, + size_t len, + const char *verify) { rd_slice_t slice, sub; char buf[1024]; size_t half; @@ -1385,53 +1385,53 @@ do_unittest_read_verify0 (const rd_buf_t *b, size_t absof, size_t len, r = rd_slice_read(&slice, buf, len); RD_UT_ASSERT(r == len, - "read() returned %"PRIusz" expected %"PRIusz - " (%"PRIusz" remains)", + "read() returned %" PRIusz " expected %" PRIusz + " (%" PRIusz " remains)", r, len, rd_slice_remains(&slice)); RD_UT_ASSERT(!memcmp(buf, verify, len), "verify"); r = rd_slice_offset(&slice); - RD_UT_ASSERT(r == len, "offset() returned %"PRIusz", not %"PRIusz, - r, len); + RD_UT_ASSERT(r == len, "offset() returned %" PRIusz ", not %" PRIusz, r, + len); half = len / 2; - i = rd_slice_seek(&slice, half); - RD_UT_ASSERT(i == 0, "seek(%"PRIusz") returned %d", half, i); + i = rd_slice_seek(&slice, half); + RD_UT_ASSERT(i == 0, "seek(%" PRIusz ") returned %d", half, i); r = rd_slice_offset(&slice); - RD_UT_ASSERT(r == half, "offset() returned %"PRIusz", not %"PRIusz, + RD_UT_ASSERT(r == half, "offset() returned %" PRIusz ", not %" PRIusz, r, half); /* Get a sub-slice covering the later half. */ sub = rd_slice_pos(&slice); - r = rd_slice_offset(&sub); - RD_UT_ASSERT(r == 0, "sub: offset() returned %"PRIusz", not %"PRIusz, + r = rd_slice_offset(&sub); + RD_UT_ASSERT(r == 0, "sub: offset() returned %" PRIusz ", not %" PRIusz, r, (size_t)0); r = rd_slice_size(&sub); - RD_UT_ASSERT(r == half, "sub: size() returned %"PRIusz", not %"PRIusz, - r, half); + RD_UT_ASSERT(r == half, + "sub: size() returned %" PRIusz ", not %" PRIusz, r, half); r = rd_slice_remains(&sub); RD_UT_ASSERT(r == half, - "sub: remains() returned %"PRIusz", not %"PRIusz, - r, half); + "sub: remains() returned %" PRIusz ", not %" PRIusz, r, + half); /* Read half */ r = rd_slice_read(&sub, buf, half); RD_UT_ASSERT(r == half, - "sub read() returned %"PRIusz" expected %"PRIusz - " (%"PRIusz" remains)", + "sub read() returned %" PRIusz " expected %" PRIusz + " (%" PRIusz " remains)", r, len, rd_slice_remains(&sub)); RD_UT_ASSERT(!memcmp(buf, verify, len), "verify"); r = rd_slice_offset(&sub); RD_UT_ASSERT(r == rd_slice_size(&sub), - "sub offset() returned %"PRIusz", not %"PRIusz, - r, rd_slice_size(&sub)); + "sub offset() returned %" PRIusz ", not %" PRIusz, r, + rd_slice_size(&sub)); r = rd_slice_remains(&sub); RD_UT_ASSERT(r == 0, - "sub: remains() returned %"PRIusz", not %"PRIusz, - r, (size_t)0); + "sub: remains() returned %" PRIusz ", not %" PRIusz, r, + (size_t)0); return 0; } @@ -1440,13 +1440,13 @@ do_unittest_read_verify0 (const rd_buf_t *b, size_t absof, size_t len, /** * @brief write_seek() and split() test */ -static int do_unittest_write_split_seek (void) { +static int do_unittest_write_split_seek(void) { rd_buf_t b; char ones[1024]; char twos[1024]; char threes[1024]; char fiftyfives[100]; /* 0x55 indicates "untouched" memory */ - char buf[1024*3]; + char buf[1024 * 3]; size_t r, pos; rd_segment_t *seg, *newseg; @@ -1462,9 +1462,9 @@ static int do_unittest_write_split_seek (void) { * Verify write */ r = rd_buf_write(&b, ones, 400); - RD_UT_ASSERT(r == 0, "write() returned position %"PRIusz, r); + RD_UT_ASSERT(r == 0, "write() returned position %" PRIusz, r); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 400, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 400, "pos() returned position %" PRIusz, pos); do_unittest_read_verify(&b, 0, 400, ones); @@ -1474,22 +1474,22 @@ static int do_unittest_write_split_seek (void) { r = rd_buf_write_seek(&b, 200); RD_UT_ASSERT(r == 0, "seek() failed"); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200, "pos() returned position %" PRIusz, pos); r = rd_buf_write(&b, twos, 100); - RD_UT_ASSERT(pos == 200, "write() returned position %"PRIusz, r); + RD_UT_ASSERT(pos == 200, "write() returned position %" PRIusz, r); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200+100, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200 + 100, "pos() returned position %" PRIusz, pos); do_unittest_read_verify(&b, 0, 200, ones); do_unittest_read_verify(&b, 200, 100, twos); /* Make sure read() did not modify the write position. */ pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200+100, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200 + 100, "pos() returned position %" PRIusz, pos); /* Split buffer, write position is now at split where writes - * are not allowed (mid buffer). */ + * are not allowed (mid buffer). */ seg = rd_buf_get_segment_at_offset(&b, NULL, 50); RD_UT_ASSERT(seg->seg_of != 0, "assumed mid-segment"); newseg = rd_segment_split(&b, seg, 50); @@ -1498,10 +1498,10 @@ static int do_unittest_write_split_seek (void) { RD_UT_ASSERT(seg != NULL, "seg"); RD_UT_ASSERT(seg == newseg, "newseg %p, seg %p", newseg, seg); RD_UT_ASSERT(seg->seg_of > 0, - "assumed beginning of segment, got %"PRIusz, seg->seg_of); + "assumed beginning of segment, got %" PRIusz, seg->seg_of); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200+100, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200 + 100, "pos() returned position %" PRIusz, pos); /* Re-verify that nothing changed */ do_unittest_read_verify(&b, 0, 200, ones); @@ -1522,7 +1522,7 @@ static int do_unittest_write_split_seek (void) { * @brief Unittest to verify payload is correctly written and read. * Each written u32 word is the running CRC of the word count. */ -static int do_unittest_write_read_payload_correctness (void) { +static int do_unittest_write_read_payload_correctness(void) { uint32_t crc; uint32_t write_crc, read_crc; const int seed = 12345; @@ -1537,7 +1537,7 @@ static int do_unittest_write_read_payload_correctness (void) { crc = rd_crc32_update(crc, (void *)&seed, sizeof(seed)); rd_buf_init(&b, 0, 0); - for (i = 0 ; i < max_cnt ; i++) { + for (i = 0; i < max_cnt; i++) { crc = rd_crc32_update(crc, (void *)&i, sizeof(i)); rd_buf_write(&b, &crc, sizeof(crc)); } @@ -1546,8 +1546,8 @@ static int do_unittest_write_read_payload_correctness (void) { r = rd_buf_len(&b); RD_UT_ASSERT(r == max_cnt * sizeof(crc), - "expected length %"PRIusz", not %"PRIusz, - r, max_cnt * sizeof(crc)); + "expected length %" PRIusz ", not %" PRIusz, r, + max_cnt * sizeof(crc)); /* * Now verify the contents with a reader. @@ -1556,20 +1556,20 @@ static int do_unittest_write_read_payload_correctness (void) { r = rd_slice_remains(&slice); RD_UT_ASSERT(r == rd_buf_len(&b), - "slice remains %"PRIusz", should be %"PRIusz, - r, rd_buf_len(&b)); + "slice remains %" PRIusz ", should be %" PRIusz, r, + rd_buf_len(&b)); - for (pass = 0 ; pass < 2 ; pass++) { + for (pass = 0; pass < 2; pass++) { /* Two passes: * - pass 1: using peek() * - pass 2: using read() */ - const char *pass_str = pass == 0 ? "peek":"read"; + const char *pass_str = pass == 0 ? "peek" : "read"; crc = rd_crc32_init(); crc = rd_crc32_update(crc, (void *)&seed, sizeof(seed)); - for (i = 0 ; i < max_cnt ; i++) { + for (i = 0; i < max_cnt; i++) { uint32_t buf_crc; crc = rd_crc32_update(crc, (void *)&i, sizeof(i)); @@ -1581,41 +1581,41 @@ static int do_unittest_write_read_payload_correctness (void) { r = rd_slice_read(&slice, &buf_crc, sizeof(buf_crc)); RD_UT_ASSERT(r == sizeof(buf_crc), - "%s() at #%"PRIusz" failed: " - "r is %"PRIusz" not %"PRIusz, + "%s() at #%" PRIusz + " failed: " + "r is %" PRIusz " not %" PRIusz, pass_str, i, r, sizeof(buf_crc)); RD_UT_ASSERT(buf_crc == crc, - "%s: invalid crc at #%"PRIusz - ": expected %"PRIu32", read %"PRIu32, + "%s: invalid crc at #%" PRIusz + ": expected %" PRIu32 ", read %" PRIu32, pass_str, i, crc, buf_crc); } read_crc = rd_crc32_finalize(crc); RD_UT_ASSERT(read_crc == write_crc, - "%s: finalized read crc %"PRIu32 - " != write crc %"PRIu32, + "%s: finalized read crc %" PRIu32 + " != write crc %" PRIu32, pass_str, read_crc, write_crc); - } r = rd_slice_remains(&slice); - RD_UT_ASSERT(r == 0, - "slice remains %"PRIusz", should be %"PRIusz, - r, (size_t)0); + RD_UT_ASSERT(r == 0, "slice remains %" PRIusz ", should be %" PRIusz, r, + (size_t)0); rd_buf_destroy(&b); RD_UT_PASS(); } -#define do_unittest_iov_verify(...) do { \ - int __fail = do_unittest_iov_verify0(__VA_ARGS__); \ - RD_UT_ASSERT(!__fail, "iov_verify() failed"); \ +#define do_unittest_iov_verify(...) \ + do { \ + int __fail = do_unittest_iov_verify0(__VA_ARGS__); \ + RD_UT_ASSERT(!__fail, "iov_verify() failed"); \ } while (0) -static int do_unittest_iov_verify0 (rd_buf_t *b, - size_t exp_iovcnt, size_t exp_totsize) { - #define MY_IOV_MAX 16 +static int +do_unittest_iov_verify0(rd_buf_t *b, size_t exp_iovcnt, size_t exp_totsize) { +#define MY_IOV_MAX 16 struct iovec iov[MY_IOV_MAX]; size_t iovcnt; size_t i; @@ -1623,31 +1623,32 @@ static int do_unittest_iov_verify0 (rd_buf_t *b, rd_assert(exp_iovcnt <= MY_IOV_MAX); - totsize = rd_buf_get_write_iov(b, iov, &iovcnt, - MY_IOV_MAX, exp_totsize); + totsize = + rd_buf_get_write_iov(b, iov, &iovcnt, MY_IOV_MAX, exp_totsize); RD_UT_ASSERT(totsize >= exp_totsize, - "iov total size %"PRIusz" expected >= %"PRIusz, - totsize, exp_totsize); + "iov total size %" PRIusz " expected >= %" PRIusz, totsize, + exp_totsize); RD_UT_ASSERT(iovcnt >= exp_iovcnt && iovcnt <= MY_IOV_MAX, - "iovcnt %"PRIusz - ", expected %"PRIusz" < x <= MY_IOV_MAX", + "iovcnt %" PRIusz ", expected %" PRIusz + " < x <= MY_IOV_MAX", iovcnt, exp_iovcnt); sum = 0; - for (i = 0 ; i < iovcnt ; i++) { + for (i = 0; i < iovcnt; i++) { RD_UT_ASSERT(iov[i].iov_base, - "iov #%"PRIusz" iov_base not set", i); + "iov #%" PRIusz " iov_base not set", i); RD_UT_ASSERT(iov[i].iov_len, - "iov #%"PRIusz" iov_len %"PRIusz" out of range", + "iov #%" PRIusz " iov_len %" PRIusz + " out of range", i, iov[i].iov_len); sum += iov[i].iov_len; - RD_UT_ASSERT(sum <= totsize, "sum %"PRIusz" > totsize %"PRIusz, - sum, totsize); + RD_UT_ASSERT(sum <= totsize, + "sum %" PRIusz " > totsize %" PRIusz, sum, + totsize); } - RD_UT_ASSERT(sum == totsize, - "sum %"PRIusz" != totsize %"PRIusz, - sum, totsize); + RD_UT_ASSERT(sum == totsize, "sum %" PRIusz " != totsize %" PRIusz, sum, + totsize); return 0; } @@ -1656,7 +1657,7 @@ static int do_unittest_iov_verify0 (rd_buf_t *b, /** * @brief Verify that buffer to iovec conversion works. */ -static int do_unittest_write_iov (void) { +static int do_unittest_write_iov(void) { rd_buf_t b; rd_buf_init(&b, 0, 0); @@ -1667,7 +1668,7 @@ static int do_unittest_write_iov (void) { /* Add a secondary buffer */ rd_buf_write_ensure(&b, 30000, 0); - do_unittest_iov_verify(&b, 2, 100+30000); + do_unittest_iov_verify(&b, 2, 100 + 30000); rd_buf_destroy(&b); @@ -1678,7 +1679,7 @@ static int do_unittest_write_iov (void) { /** * @brief Verify that erasing parts of the buffer works. */ -static int do_unittest_erase (void) { +static int do_unittest_erase(void) { static const struct { const char *segs[4]; const char *writes[4]; @@ -1689,98 +1690,105 @@ static int do_unittest_erase (void) { } erasures[4]; const char *expect; - } in[] = { - /* 12|3|45 - * x x xx */ - { .segs = { "12", "3", "45" }, - .erasures = { { 1, 4, 4 } }, - .expect = "1", - }, - /* 12|3|45 - * xx */ - { .segs = { "12", "3", "45" }, - .erasures = { { 0, 2, 2 } }, - .expect = "345", - }, - /* 12|3|45 - * xx */ - { .segs = { "12", "3", "45" }, - .erasures = { { 3, 2, 2 } }, - .expect = "123", - }, - /* 12|3|45 - * x - * 1 |3|45 - * x - * 1 | 45 - * x */ - { .segs = { "12", "3", "45" }, - .erasures = { { 1, 1, 1 }, - { 1, 1, 1 }, - { 2, 1, 1 } }, - .expect = "14", - }, - /* 12|3|45 - * xxxxxxx */ - { .segs = { "12", "3", "45" }, - .erasures = { { 0, 5, 5 } }, - .expect = "", - }, - /* 12|3|45 - * x */ - { .segs = { "12", "3", "45" }, - .erasures = { { 0, 1, 1 } }, - .expect = "2345", - }, - /* 12|3|45 - * x */ - { .segs = { "12", "3", "45" }, - .erasures = { { 4, 1, 1 } }, - .expect = "1234", - }, - /* 12|3|45 - * x */ - { .segs = { "12", "3", "45" }, - .erasures = { { 5, 10, 0 } }, - .expect = "12345", - }, - /* 12|3|45 - * xxx */ - { .segs = { "12", "3", "45" }, - .erasures = { { 4, 3, 1 }, { 4, 3, 0 }, { 4, 3, 0 } }, - .expect = "1234", - }, - /* 1 - * xxx */ - { .segs = { "1" }, - .erasures = { { 0, 3, 1 } }, - .expect = "", - }, - /* 123456 - * xxxxxx */ - { .segs = { "123456" }, - .erasures = { { 0, 6, 6 } }, - .expect = "", - }, - /* 123456789a - * xxx */ - { .segs = { "123456789a" }, - .erasures = { { 4, 3, 3 } }, - .expect = "123489a", - }, - /* 1234|5678 - * x xx */ - { .segs = { "1234", "5678" }, - .erasures = { { 3, 3, 3 } }, - .writes = { "9abc" }, - .expect = "123789abc" - }, - - { .expect = NULL } - }; + } in[] = {/* 12|3|45 + * x x xx */ + { + .segs = {"12", "3", "45"}, + .erasures = {{1, 4, 4}}, + .expect = "1", + }, + /* 12|3|45 + * xx */ + { + .segs = {"12", "3", "45"}, + .erasures = {{0, 2, 2}}, + .expect = "345", + }, + /* 12|3|45 + * xx */ + { + .segs = {"12", "3", "45"}, + .erasures = {{3, 2, 2}}, + .expect = "123", + }, + /* 12|3|45 + * x + * 1 |3|45 + * x + * 1 | 45 + * x */ + { + .segs = {"12", "3", "45"}, + .erasures = {{1, 1, 1}, {1, 1, 1}, {2, 1, 1}}, + .expect = "14", + }, + /* 12|3|45 + * xxxxxxx */ + { + .segs = {"12", "3", "45"}, + .erasures = {{0, 5, 5}}, + .expect = "", + }, + /* 12|3|45 + * x */ + { + .segs = {"12", "3", "45"}, + .erasures = {{0, 1, 1}}, + .expect = "2345", + }, + /* 12|3|45 + * x */ + { + .segs = {"12", "3", "45"}, + .erasures = {{4, 1, 1}}, + .expect = "1234", + }, + /* 12|3|45 + * x */ + { + .segs = {"12", "3", "45"}, + .erasures = {{5, 10, 0}}, + .expect = "12345", + }, + /* 12|3|45 + * xxx */ + { + .segs = {"12", "3", "45"}, + .erasures = {{4, 3, 1}, {4, 3, 0}, {4, 3, 0}}, + .expect = "1234", + }, + /* 1 + * xxx */ + { + .segs = {"1"}, + .erasures = {{0, 3, 1}}, + .expect = "", + }, + /* 123456 + * xxxxxx */ + { + .segs = {"123456"}, + .erasures = {{0, 6, 6}}, + .expect = "", + }, + /* 123456789a + * xxx */ + { + .segs = {"123456789a"}, + .erasures = {{4, 3, 3}}, + .expect = "123489a", + }, + /* 1234|5678 + * x xx */ + {.segs = {"1234", "5678"}, + .erasures = {{3, 3, 3}}, + .writes = {"9abc"}, + .expect = "123789abc"}, + + {.expect = NULL}}; int i; - for (i = 0 ; in[i].expect ; i++) { + for (i = 0; in[i].expect; i++) { rd_buf_t b; rd_slice_t s; size_t expsz = strlen(in[i].expect); @@ -1792,63 +1800,63 @@ static int do_unittest_erase (void) { rd_buf_init(&b, 0, 0); /* Write segments to buffer */ - for (j = 0 ; in[i].segs[j] ; j++) + for (j = 0; in[i].segs[j]; j++) rd_buf_push_writable(&b, rd_strdup(in[i].segs[j]), strlen(in[i].segs[j]), rd_free); /* Perform erasures */ - for (j = 0 ; in[i].erasures[j].retsize ; j++) { - r = rd_buf_erase(&b, - in[i].erasures[j].of, + for (j = 0; in[i].erasures[j].retsize; j++) { + r = rd_buf_erase(&b, in[i].erasures[j].of, in[i].erasures[j].size); RD_UT_ASSERT(r == in[i].erasures[j].retsize, - "expected retsize %"PRIusz" for i=%d,j=%d" - ", not %"PRIusz, + "expected retsize %" PRIusz + " for i=%d,j=%d" + ", not %" PRIusz, in[i].erasures[j].retsize, i, j, r); } /* Perform writes */ - for (j = 0 ; in[i].writes[j] ; j++) + for (j = 0; in[i].writes[j]; j++) rd_buf_write(&b, in[i].writes[j], strlen(in[i].writes[j])); RD_UT_ASSERT(expsz == rd_buf_len(&b), - "expected buffer to be %"PRIusz" bytes, not " - "%"PRIusz" for i=%d", + "expected buffer to be %" PRIusz + " bytes, not " + "%" PRIusz " for i=%d", expsz, rd_buf_len(&b), i); /* Read back and verify */ r2 = rd_slice_init(&s, &b, 0, rd_buf_len(&b)); RD_UT_ASSERT((r2 == -1 && rd_buf_len(&b) == 0) || - (r2 == 0 && rd_buf_len(&b) > 0), - "slice_init(%"PRIusz") returned %d for i=%d", + (r2 == 0 && rd_buf_len(&b) > 0), + "slice_init(%" PRIusz ") returned %d for i=%d", rd_buf_len(&b), r2, i); if (r2 == -1) continue; /* Empty buffer */ RD_UT_ASSERT(expsz == rd_slice_size(&s), - "expected slice to be %"PRIusz" bytes, not %"PRIusz - " for i=%d", + "expected slice to be %" PRIusz + " bytes, not %" PRIusz " for i=%d", expsz, rd_slice_size(&s), i); out = rd_malloc(expsz); r = rd_slice_read(&s, out, expsz); RD_UT_ASSERT(r == expsz, - "expected to read %"PRIusz" bytes, not %"PRIusz + "expected to read %" PRIusz " bytes, not %" PRIusz " for i=%d", expsz, r, i); RD_UT_ASSERT(!memcmp(out, in[i].expect, expsz), "Expected \"%.*s\", not \"%.*s\" for i=%d", - (int)expsz, in[i].expect, - (int)r, out, i); + (int)expsz, in[i].expect, (int)r, out, i); rd_free(out); RD_UT_ASSERT(rd_slice_remains(&s) == 0, "expected no remaining bytes in slice, but got " - "%"PRIusz" for i=%d", + "%" PRIusz " for i=%d", rd_slice_remains(&s), i); rd_buf_destroy(&b); @@ -1859,7 +1867,7 @@ static int do_unittest_erase (void) { } -int unittest_rdbuf (void) { +int unittest_rdbuf(void) { int fails = 0; fails += do_unittest_write_read(); diff --git a/src/rdbuf.h b/src/rdbuf.h index 29eb51c59e..1ef30e4a95 100644 --- a/src/rdbuf.h +++ b/src/rdbuf.h @@ -61,50 +61,50 @@ * @brief Buffer segment */ typedef struct rd_segment_s { - TAILQ_ENTRY(rd_segment_s) seg_link; /*<< rbuf_segments Link */ - char *seg_p; /**< Backing-store memory */ - size_t seg_of; /**< Current relative write-position - * (length of payload in this segment) */ - size_t seg_size; /**< Allocated size of seg_p */ - size_t seg_absof; /**< Absolute offset of this segment's - * beginning in the grand rd_buf_t */ - void (*seg_free) (void *p); /**< Optional free function for seg_p */ - int seg_flags; /**< Segment flags */ -#define RD_SEGMENT_F_RDONLY 0x1 /**< Read-only segment */ -#define RD_SEGMENT_F_FREE 0x2 /**< Free segment on destroy, - * e.g, not a fixed segment. */ + TAILQ_ENTRY(rd_segment_s) seg_link; /*<< rbuf_segments Link */ + char *seg_p; /**< Backing-store memory */ + size_t seg_of; /**< Current relative write-position + * (length of payload in this segment) */ + size_t seg_size; /**< Allocated size of seg_p */ + size_t seg_absof; /**< Absolute offset of this segment's + * beginning in the grand rd_buf_t */ + void (*seg_free)(void *p); /**< Optional free function for seg_p */ + int seg_flags; /**< Segment flags */ +#define RD_SEGMENT_F_RDONLY 0x1 /**< Read-only segment */ +#define RD_SEGMENT_F_FREE \ + 0x2 /**< Free segment on destroy, \ + * e.g, not a fixed segment. */ } rd_segment_t; - -TAILQ_HEAD(rd_segment_head,rd_segment_s); +TAILQ_HEAD(rd_segment_head, rd_segment_s); /** * @brief Buffer, containing a list of segments. */ typedef struct rd_buf_s { struct rd_segment_head rbuf_segments; /**< TAILQ list of segments */ - size_t rbuf_segment_cnt; /**< Number of segments */ - - rd_segment_t *rbuf_wpos; /**< Current write position seg */ - size_t rbuf_len; /**< Current (written) length */ - size_t rbuf_erased; /**< Total number of bytes - * erased from segments. - * This amount is taken into - * account when checking for - * writable space which is - * always at the end of the - * buffer and thus can't make - * use of the erased parts. */ - size_t rbuf_size; /**< Total allocated size of - * all segments. */ - - char *rbuf_extra; /* Extra memory allocated for - * use by segment structs, - * buffer memory, etc. */ - size_t rbuf_extra_len; /* Current extra memory used */ - size_t rbuf_extra_size; /* Total size of extra memory */ + size_t rbuf_segment_cnt; /**< Number of segments */ + + rd_segment_t *rbuf_wpos; /**< Current write position seg */ + size_t rbuf_len; /**< Current (written) length */ + size_t rbuf_erased; /**< Total number of bytes + * erased from segments. + * This amount is taken into + * account when checking for + * writable space which is + * always at the end of the + * buffer and thus can't make + * use of the erased parts. */ + size_t rbuf_size; /**< Total allocated size of + * all segments. */ + + char *rbuf_extra; /* Extra memory allocated for + * use by segment structs, + * buffer memory, etc. */ + size_t rbuf_extra_len; /* Current extra memory used */ + size_t rbuf_extra_size; /* Total size of extra memory */ } rd_buf_t; @@ -113,13 +113,13 @@ typedef struct rd_buf_s { * @brief A read-only slice of a buffer. */ typedef struct rd_slice_s { - const rd_buf_t *buf; /**< Pointer to buffer */ - const rd_segment_t *seg; /**< Current read position segment. - * Will point to NULL when end of - * slice is reached. */ - size_t rof; /**< Relative read offset in segment */ - size_t start; /**< Slice start offset in buffer */ - size_t end; /**< Slice end offset in buffer+1 */ + const rd_buf_t *buf; /**< Pointer to buffer */ + const rd_segment_t *seg; /**< Current read position segment. + * Will point to NULL when end of + * slice is reached. */ + size_t rof; /**< Relative read offset in segment */ + size_t start; /**< Slice start offset in buffer */ + size_t end; /**< Slice end offset in buffer+1 */ } rd_slice_t; @@ -127,7 +127,7 @@ typedef struct rd_slice_s { /** * @returns the current write position (absolute offset) */ -static RD_INLINE RD_UNUSED size_t rd_buf_write_pos (const rd_buf_t *rbuf) { +static RD_INLINE RD_UNUSED size_t rd_buf_write_pos(const rd_buf_t *rbuf) { const rd_segment_t *seg = rbuf->rbuf_wpos; if (unlikely(!seg)) { @@ -146,20 +146,19 @@ static RD_INLINE RD_UNUSED size_t rd_buf_write_pos (const rd_buf_t *rbuf) { /** * @returns the number of bytes available for writing (before growing). */ -static RD_INLINE RD_UNUSED size_t rd_buf_write_remains (const rd_buf_t *rbuf) { +static RD_INLINE RD_UNUSED size_t rd_buf_write_remains(const rd_buf_t *rbuf) { return rbuf->rbuf_size - (rbuf->rbuf_len + rbuf->rbuf_erased); } - /** * @returns the number of bytes remaining to write to the given segment, * and sets the \p *p pointer (unless NULL) to the start of * the contiguous memory. */ static RD_INLINE RD_UNUSED size_t -rd_segment_write_remains (const rd_segment_t *seg, void **p) { +rd_segment_write_remains(const rd_segment_t *seg, void **p) { if (unlikely((seg->seg_flags & RD_SEGMENT_F_RDONLY))) return 0; if (p) @@ -172,7 +171,7 @@ rd_segment_write_remains (const rd_segment_t *seg, void **p) { /** * @returns the last segment for the buffer. */ -static RD_INLINE RD_UNUSED rd_segment_t *rd_buf_last (const rd_buf_t *rbuf) { +static RD_INLINE RD_UNUSED rd_segment_t *rd_buf_last(const rd_buf_t *rbuf) { return TAILQ_LAST(&rbuf->rbuf_segments, rd_segment_head); } @@ -180,53 +179,59 @@ static RD_INLINE RD_UNUSED rd_segment_t *rd_buf_last (const rd_buf_t *rbuf) { /** * @returns the total written buffer length */ -static RD_INLINE RD_UNUSED size_t rd_buf_len (const rd_buf_t *rbuf) { +static RD_INLINE RD_UNUSED size_t rd_buf_len(const rd_buf_t *rbuf) { return rbuf->rbuf_len; } -int rd_buf_write_seek (rd_buf_t *rbuf, size_t absof); +int rd_buf_write_seek(rd_buf_t *rbuf, size_t absof); -size_t rd_buf_write (rd_buf_t *rbuf, const void *payload, size_t size); -size_t rd_buf_write_slice (rd_buf_t *rbuf, rd_slice_t *slice); -size_t rd_buf_write_update (rd_buf_t *rbuf, size_t absof, - const void *payload, size_t size); -void rd_buf_push0 (rd_buf_t *rbuf, const void *payload, size_t size, - void (*free_cb)(void *), rd_bool_t writable); -#define rd_buf_push(rbuf,payload,size,free_cb) \ - rd_buf_push0(rbuf,payload,size,free_cb,rd_false/*not-writable*/) -#define rd_buf_push_writable(rbuf,payload,size,free_cb) \ - rd_buf_push0(rbuf,payload,size,free_cb,rd_true/*writable*/) +size_t rd_buf_write(rd_buf_t *rbuf, const void *payload, size_t size); +size_t rd_buf_write_slice(rd_buf_t *rbuf, rd_slice_t *slice); +size_t rd_buf_write_update(rd_buf_t *rbuf, + size_t absof, + const void *payload, + size_t size); +void rd_buf_push0(rd_buf_t *rbuf, + const void *payload, + size_t size, + void (*free_cb)(void *), + rd_bool_t writable); +#define rd_buf_push(rbuf, payload, size, free_cb) \ + rd_buf_push0(rbuf, payload, size, free_cb, rd_false /*not-writable*/) +#define rd_buf_push_writable(rbuf, payload, size, free_cb) \ + rd_buf_push0(rbuf, payload, size, free_cb, rd_true /*writable*/) -size_t rd_buf_erase (rd_buf_t *rbuf, size_t absof, size_t size); +size_t rd_buf_erase(rd_buf_t *rbuf, size_t absof, size_t size); -size_t rd_buf_get_writable (rd_buf_t *rbuf, void **p); +size_t rd_buf_get_writable(rd_buf_t *rbuf, void **p); -void rd_buf_write_ensure_contig (rd_buf_t *rbuf, size_t size); +void rd_buf_write_ensure_contig(rd_buf_t *rbuf, size_t size); -void rd_buf_write_ensure (rd_buf_t *rbuf, size_t min_size, size_t max_size); +void rd_buf_write_ensure(rd_buf_t *rbuf, size_t min_size, size_t max_size); -size_t rd_buf_get_write_iov (const rd_buf_t *rbuf, - struct iovec *iovs, size_t *iovcntp, - size_t iov_max, size_t size_max); +size_t rd_buf_get_write_iov(const rd_buf_t *rbuf, + struct iovec *iovs, + size_t *iovcntp, + size_t iov_max, + size_t size_max); -void rd_buf_init (rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size); -rd_buf_t *rd_buf_new (size_t fixed_seg_cnt, size_t buf_size); +void rd_buf_init(rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size); +rd_buf_t *rd_buf_new(size_t fixed_seg_cnt, size_t buf_size); -void rd_buf_destroy (rd_buf_t *rbuf); -void rd_buf_destroy_free (rd_buf_t *rbuf); +void rd_buf_destroy(rd_buf_t *rbuf); +void rd_buf_destroy_free(rd_buf_t *rbuf); -void rd_buf_dump (const rd_buf_t *rbuf, int do_hexdump); +void rd_buf_dump(const rd_buf_t *rbuf, int do_hexdump); -int unittest_rdbuf (void); +int unittest_rdbuf(void); /**@}*/ - /** * @name Buffer reads operate on slices of an rd_buf_t and does not * modify the underlying rd_buf_t itself. @@ -251,7 +256,7 @@ int unittest_rdbuf (void); /** * @returns the read position in the slice as a new slice. */ -static RD_INLINE RD_UNUSED rd_slice_t rd_slice_pos (const rd_slice_t *slice) { +static RD_INLINE RD_UNUSED rd_slice_t rd_slice_pos(const rd_slice_t *slice) { rd_slice_t newslice = *slice; if (!slice->seg) @@ -266,8 +271,7 @@ static RD_INLINE RD_UNUSED rd_slice_t rd_slice_pos (const rd_slice_t *slice) { * @returns the read position as an absolute buffer byte offset. * @remark this is the buffer offset, not the slice's local offset. */ -static RD_INLINE RD_UNUSED size_t -rd_slice_abs_offset (const rd_slice_t *slice) { +static RD_INLINE RD_UNUSED size_t rd_slice_abs_offset(const rd_slice_t *slice) { if (unlikely(!slice->seg)) /* reader has reached the end */ return slice->end; @@ -278,7 +282,7 @@ rd_slice_abs_offset (const rd_slice_t *slice) { * @returns the read position as a byte offset. * @remark this is the slice-local offset, not the backing buffer's offset. */ -static RD_INLINE RD_UNUSED size_t rd_slice_offset (const rd_slice_t *slice) { +static RD_INLINE RD_UNUSED size_t rd_slice_offset(const rd_slice_t *slice) { if (unlikely(!slice->seg)) /* reader has reached the end */ return rd_slice_size(slice); @@ -287,21 +291,25 @@ static RD_INLINE RD_UNUSED size_t rd_slice_offset (const rd_slice_t *slice) { +int rd_slice_init_seg(rd_slice_t *slice, + const rd_buf_t *rbuf, + const rd_segment_t *seg, + size_t rof, + size_t size); +int rd_slice_init(rd_slice_t *slice, + const rd_buf_t *rbuf, + size_t absof, + size_t size); +void rd_slice_init_full(rd_slice_t *slice, const rd_buf_t *rbuf); -int rd_slice_init_seg (rd_slice_t *slice, const rd_buf_t *rbuf, - const rd_segment_t *seg, size_t rof, size_t size); -int rd_slice_init (rd_slice_t *slice, const rd_buf_t *rbuf, - size_t absof, size_t size); -void rd_slice_init_full (rd_slice_t *slice, const rd_buf_t *rbuf); - -size_t rd_slice_reader (rd_slice_t *slice, const void **p); -size_t rd_slice_peeker (const rd_slice_t *slice, const void **p); +size_t rd_slice_reader(rd_slice_t *slice, const void **p); +size_t rd_slice_peeker(const rd_slice_t *slice, const void **p); -size_t rd_slice_read (rd_slice_t *slice, void *dst, size_t size); -size_t rd_slice_peek (const rd_slice_t *slice, size_t offset, - void *dst, size_t size); +size_t rd_slice_read(rd_slice_t *slice, void *dst, size_t size); +size_t +rd_slice_peek(const rd_slice_t *slice, size_t offset, void *dst, size_t size); -size_t rd_slice_read_uvarint (rd_slice_t *slice, uint64_t *nump); +size_t rd_slice_read_uvarint(rd_slice_t *slice, uint64_t *nump); /** * @brief Read a zig-zag varint-encoded signed integer from \p slice, @@ -310,16 +318,15 @@ size_t rd_slice_read_uvarint (rd_slice_t *slice, uint64_t *nump); * @returns the number of bytes read on success or 0 in case of * buffer underflow. */ -static RD_UNUSED RD_INLINE -size_t rd_slice_read_varint (rd_slice_t *slice, int64_t *nump) { +static RD_UNUSED RD_INLINE size_t rd_slice_read_varint(rd_slice_t *slice, + int64_t *nump) { size_t r; uint64_t unum; r = rd_slice_read_uvarint(slice, &unum); if (likely(r > 0)) { /* Zig-zag decoding */ - *nump = (int64_t)((unum >> 1) ^ - -(int64_t)(unum & 1)); + *nump = (int64_t)((unum >> 1) ^ -(int64_t)(unum & 1)); } return r; @@ -327,35 +334,36 @@ size_t rd_slice_read_varint (rd_slice_t *slice, int64_t *nump) { +const void *rd_slice_ensure_contig(rd_slice_t *slice, size_t size); -const void *rd_slice_ensure_contig (rd_slice_t *slice, size_t size); - -int rd_slice_seek (rd_slice_t *slice, size_t offset); +int rd_slice_seek(rd_slice_t *slice, size_t offset); -size_t rd_slice_get_iov (const rd_slice_t *slice, - struct iovec *iovs, size_t *iovcntp, - size_t iov_max, size_t size_max); +size_t rd_slice_get_iov(const rd_slice_t *slice, + struct iovec *iovs, + size_t *iovcntp, + size_t iov_max, + size_t size_max); -uint32_t rd_slice_crc32 (rd_slice_t *slice); -uint32_t rd_slice_crc32c (rd_slice_t *slice); +uint32_t rd_slice_crc32(rd_slice_t *slice); +uint32_t rd_slice_crc32c(rd_slice_t *slice); -int rd_slice_narrow (rd_slice_t *slice, rd_slice_t *save_slice, size_t size) - RD_WARN_UNUSED_RESULT; -int rd_slice_narrow_relative (rd_slice_t *slice, rd_slice_t *save_slice, - size_t relsize) - RD_WARN_UNUSED_RESULT; -void rd_slice_widen (rd_slice_t *slice, const rd_slice_t *save_slice); -int rd_slice_narrow_copy (const rd_slice_t *orig, rd_slice_t *new_slice, - size_t size) - RD_WARN_UNUSED_RESULT; -int rd_slice_narrow_copy_relative (const rd_slice_t *orig, - rd_slice_t *new_slice, - size_t relsize) - RD_WARN_UNUSED_RESULT; +int rd_slice_narrow(rd_slice_t *slice, + rd_slice_t *save_slice, + size_t size) RD_WARN_UNUSED_RESULT; +int rd_slice_narrow_relative(rd_slice_t *slice, + rd_slice_t *save_slice, + size_t relsize) RD_WARN_UNUSED_RESULT; +void rd_slice_widen(rd_slice_t *slice, const rd_slice_t *save_slice); +int rd_slice_narrow_copy(const rd_slice_t *orig, + rd_slice_t *new_slice, + size_t size) RD_WARN_UNUSED_RESULT; +int rd_slice_narrow_copy_relative(const rd_slice_t *orig, + rd_slice_t *new_slice, + size_t relsize) RD_WARN_UNUSED_RESULT; -void rd_slice_dump (const rd_slice_t *slice, int do_hexdump); +void rd_slice_dump(const rd_slice_t *slice, int do_hexdump); /**@}*/ diff --git a/src/rdcrc32.c b/src/rdcrc32.c index 79f79029ce..2a6e126c14 100644 --- a/src/rdcrc32.c +++ b/src/rdcrc32.c @@ -29,7 +29,7 @@ * \file rdcrc32.c * Functions and types for CRC checks. * - * + * * * Generated on Tue May 8 17:37:04 2012, * by pycrc v0.7.10, http://www.tty1.net/pycrc/ @@ -42,7 +42,7 @@ * ReflectOut = True * Algorithm = table-driven *****************************************************************************/ -#include "rdcrc32.h" /* include the header file generated with pycrc */ +#include "rdcrc32.h" /* include the header file generated with pycrc */ #include #include @@ -50,71 +50,49 @@ * Static table used for the table_driven implementation. *****************************************************************************/ const rd_crc32_t crc_table[256] = { - 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, - 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, - 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, - 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, - 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, - 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, - 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, - 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, - 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, - 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, - 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, - 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, - 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, - 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, - 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, - 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, - 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, - 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, - 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, - 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, - 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, - 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, - 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, - 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, - 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, - 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, - 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, - 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, - 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, - 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, - 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, - 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, - 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, - 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, - 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, - 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, - 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, - 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, - 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, - 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, - 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, - 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, - 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, - 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, - 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, - 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, - 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, - 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, - 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, - 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, - 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, - 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, - 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, - 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, - 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, - 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, - 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, - 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, - 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, - 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, - 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, - 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, - 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, - 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d -}; + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, + 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, + 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, + 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, + 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, + 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, + 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, + 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, + 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, + 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, + 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, + 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, + 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, + 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, + 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, + 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, + 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, + 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, + 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, + 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e, + 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, + 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, + 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, + 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, + 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, + 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, + 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, + 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, + 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, + 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, + 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d}; /** * Reflect all bits of a \a data word of \a data_len bytes. @@ -123,20 +101,14 @@ const rd_crc32_t crc_table[256] = { * \param data_len The width of \a data expressed in number of bits. * \return The reflected data. *****************************************************************************/ -rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len) -{ - unsigned int i; - rd_crc32_t ret; +rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len) { + unsigned int i; + rd_crc32_t ret; - ret = data & 0x01; - for (i = 1; i < data_len; i++) { - data >>= 1; - ret = (ret << 1) | (data & 0x01); - } - return ret; + ret = data & 0x01; + for (i = 1; i < data_len; i++) { + data >>= 1; + ret = (ret << 1) | (data & 0x01); + } + return ret; } - - - - - diff --git a/src/rdcrc32.h b/src/rdcrc32.h index 8193073542..c3195fca62 100644 --- a/src/rdcrc32.h +++ b/src/rdcrc32.h @@ -76,7 +76,7 @@ extern "C" { typedef uint32_t rd_crc32_t; #if !WITH_ZLIB -extern const rd_crc32_t crc_table[256]; +extern const rd_crc32_t crc_table[256]; #endif @@ -95,12 +95,11 @@ rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len); * * \return The initial crc value. *****************************************************************************/ -static RD_INLINE rd_crc32_t rd_crc32_init(void) -{ +static RD_INLINE rd_crc32_t rd_crc32_init(void) { #if WITH_ZLIB return crc32(0, NULL, 0); #else - return 0xffffffff; + return 0xffffffff; #endif } @@ -113,7 +112,7 @@ static RD_INLINE rd_crc32_t rd_crc32_init(void) * \param data_len Number of bytes in the \a data buffer. * \return The updated crc value. *****************************************************************************/ - /** +/** * Update the crc value with new data. * * \param crc The current crc value. @@ -121,22 +120,22 @@ static RD_INLINE rd_crc32_t rd_crc32_init(void) * \param data_len Number of bytes in the \a data buffer. * \return The updated crc value. *****************************************************************************/ -static RD_INLINE RD_UNUSED -rd_crc32_t rd_crc32_update(rd_crc32_t crc, const unsigned char *data, size_t data_len) -{ +static RD_INLINE RD_UNUSED rd_crc32_t rd_crc32_update(rd_crc32_t crc, + const unsigned char *data, + size_t data_len) { #if WITH_ZLIB rd_assert(data_len <= UINT_MAX); - return crc32(crc, data, (uInt) data_len); + return crc32(crc, data, (uInt)data_len); #else - unsigned int tbl_idx; + unsigned int tbl_idx; - while (data_len--) { - tbl_idx = (crc ^ *data) & 0xff; - crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffffffff; + while (data_len--) { + tbl_idx = (crc ^ *data) & 0xff; + crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffffffff; - data++; - } - return crc & 0xffffffff; + data++; + } + return crc & 0xffffffff; #endif } @@ -147,12 +146,11 @@ rd_crc32_t rd_crc32_update(rd_crc32_t crc, const unsigned char *data, size_t dat * \param crc The current crc value. * \return The final crc value. *****************************************************************************/ -static RD_INLINE rd_crc32_t rd_crc32_finalize(rd_crc32_t crc) -{ +static RD_INLINE rd_crc32_t rd_crc32_finalize(rd_crc32_t crc) { #if WITH_ZLIB return crc; #else - return crc ^ 0xffffffff; + return crc ^ 0xffffffff; #endif } @@ -160,14 +158,13 @@ static RD_INLINE rd_crc32_t rd_crc32_finalize(rd_crc32_t crc) /** * Wrapper for performing CRC32 on the provided buffer. */ -static RD_INLINE rd_crc32_t rd_crc32 (const char *data, size_t data_len) { - return rd_crc32_finalize(rd_crc32_update(rd_crc32_init(), - (const unsigned char *)data, - data_len)); +static RD_INLINE rd_crc32_t rd_crc32(const char *data, size_t data_len) { + return rd_crc32_finalize(rd_crc32_update( + rd_crc32_init(), (const unsigned char *)data, data_len)); } #ifdef __cplusplus -} /* closing brace for extern "C" */ +} /* closing brace for extern "C" */ #endif -#endif /* __RDCRC32___H__ */ +#endif /* __RDCRC32___H__ */ diff --git a/src/rddl.c b/src/rddl.c index 5f8e16ed56..785e28c486 100644 --- a/src/rddl.c +++ b/src/rddl.c @@ -44,7 +44,7 @@ * @brief Latest thread-local dl error, normalized to suit our logging. * @returns a newly allocated string that must be freed */ -static char *rd_dl_error (void) { +static char *rd_dl_error(void) { #if WITH_LIBDL char *errstr; char *s; @@ -72,20 +72,20 @@ static char *rd_dl_error (void) { * else NULL. */ static rd_dl_hnd_t * -rd_dl_open0 (const char *path, char *errstr, size_t errstr_size) { +rd_dl_open0(const char *path, char *errstr, size_t errstr_size) { void *handle; const char *loadfunc; #if WITH_LIBDL loadfunc = "dlopen()"; - handle = dlopen(path, RTLD_NOW | RTLD_LOCAL); + handle = dlopen(path, RTLD_NOW | RTLD_LOCAL); #elif defined(_WIN32) loadfunc = "LoadLibrary()"; - handle = (void *)LoadLibraryA(path); + handle = (void *)LoadLibraryA(path); #endif if (!handle) { char *dlerrstr = rd_dl_error(); - rd_snprintf(errstr, errstr_size, "%s failed: %s", - loadfunc, dlerrstr); + rd_snprintf(errstr, errstr_size, "%s failed: %s", loadfunc, + dlerrstr); rd_free(dlerrstr); } return (rd_dl_hnd_t *)handle; @@ -98,7 +98,7 @@ rd_dl_open0 (const char *path, char *errstr, size_t errstr_size) { * @returns the library handle (platform dependent, thus opaque) on success, * else NULL. */ -rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size) { +rd_dl_hnd_t *rd_dl_open(const char *path, char *errstr, size_t errstr_size) { rd_dl_hnd_t *handle; char *extpath; size_t pathlen; @@ -135,7 +135,7 @@ rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size) { pathlen = strlen(path); extpath = rd_alloca(pathlen + strlen(solib_ext) + 1); memcpy(extpath, path, pathlen); - memcpy(extpath+pathlen, solib_ext, strlen(solib_ext) + 1); + memcpy(extpath + pathlen, solib_ext, strlen(solib_ext) + 1); /* Try again with extension */ return rd_dl_open0(extpath, errstr, errstr_size); @@ -146,7 +146,7 @@ rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size) { * @brief Close handle previously returned by rd_dl_open() * @remark errors are ignored (what can we do anyway?) */ -void rd_dl_close (rd_dl_hnd_t *handle) { +void rd_dl_close(rd_dl_hnd_t *handle) { #if WITH_LIBDL dlclose((void *)handle); #elif defined(_WIN32) @@ -158,9 +158,10 @@ void rd_dl_close (rd_dl_hnd_t *handle) { * @brief look up address of \p symbol in library handle \p handle * @returns the function pointer on success or NULL on error. */ -void * -rd_dl_sym (rd_dl_hnd_t *handle, const char *symbol, - char *errstr, size_t errstr_size) { +void *rd_dl_sym(rd_dl_hnd_t *handle, + const char *symbol, + char *errstr, + size_t errstr_size) { void *func; #if WITH_LIBDL func = dlsym((void *)handle, symbol); @@ -170,10 +171,9 @@ rd_dl_sym (rd_dl_hnd_t *handle, const char *symbol, if (!func) { char *dlerrstr = rd_dl_error(); rd_snprintf(errstr, errstr_size, - "Failed to load symbol \"%s\": %s", - symbol, dlerrstr); + "Failed to load symbol \"%s\": %s", symbol, + dlerrstr); rd_free(dlerrstr); } return func; } - diff --git a/src/rddl.h b/src/rddl.h index 6a49d2e0db..eaf6eb6d5e 100644 --- a/src/rddl.h +++ b/src/rddl.h @@ -33,9 +33,11 @@ typedef void rd_dl_hnd_t; -rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size); -void rd_dl_close (rd_dl_hnd_t *handle); -void *rd_dl_sym (rd_dl_hnd_t *handle, const char *symbol, - char *errstr, size_t errstr_size); +rd_dl_hnd_t *rd_dl_open(const char *path, char *errstr, size_t errstr_size); +void rd_dl_close(rd_dl_hnd_t *handle); +void *rd_dl_sym(rd_dl_hnd_t *handle, + const char *symbol, + char *errstr, + size_t errstr_size); #endif /* _RDDL_H */ diff --git a/src/rdendian.h b/src/rdendian.h index c1e201eefe..613d44bfaf 100644 --- a/src/rdendian.h +++ b/src/rdendian.h @@ -41,59 +41,59 @@ */ #ifdef __FreeBSD__ - #include +#include #elif defined __GLIBC__ - #include - #ifndef be64toh - /* Support older glibc (<2.9) which lack be64toh */ - #include - #if __BYTE_ORDER == __BIG_ENDIAN - #define be16toh(x) (x) - #define be32toh(x) (x) - #define be64toh(x) (x) - #define le64toh(x) __bswap_64 (x) - #define le32toh(x) __bswap_32 (x) - #else - #define be16toh(x) __bswap_16 (x) - #define be32toh(x) __bswap_32 (x) - #define be64toh(x) __bswap_64 (x) - #define le64toh(x) (x) - #define le32toh(x) (x) - #endif - #endif +#include +#ifndef be64toh +/* Support older glibc (<2.9) which lack be64toh */ +#include +#if __BYTE_ORDER == __BIG_ENDIAN +#define be16toh(x) (x) +#define be32toh(x) (x) +#define be64toh(x) (x) +#define le64toh(x) __bswap_64(x) +#define le32toh(x) __bswap_32(x) +#else +#define be16toh(x) __bswap_16(x) +#define be32toh(x) __bswap_32(x) +#define be64toh(x) __bswap_64(x) +#define le64toh(x) (x) +#define le32toh(x) (x) +#endif +#endif #elif defined __CYGWIN__ - #include +#include #elif defined __BSD__ - #include +#include #elif defined __sun - #include - #include +#include +#include #define __LITTLE_ENDIAN 1234 -#define __BIG_ENDIAN 4321 +#define __BIG_ENDIAN 4321 #ifdef _BIG_ENDIAN #define __BYTE_ORDER __BIG_ENDIAN -#define be64toh(x) (x) -#define be32toh(x) (x) -#define be16toh(x) (x) -#define le16toh(x) ((uint16_t)BSWAP_16(x)) -#define le32toh(x) BSWAP_32(x) -#define le64toh(x) BSWAP_64(x) -# else +#define be64toh(x) (x) +#define be32toh(x) (x) +#define be16toh(x) (x) +#define le16toh(x) ((uint16_t)BSWAP_16(x)) +#define le32toh(x) BSWAP_32(x) +#define le64toh(x) BSWAP_64(x) +#else #define __BYTE_ORDER __LITTLE_ENDIAN -#define be64toh(x) BSWAP_64(x) -#define be32toh(x) ntohl(x) -#define be16toh(x) ntohs(x) -#define le16toh(x) (x) -#define le32toh(x) (x) -#define le64toh(x) (x) -#define htole16(x) (x) -#define htole64(x) (x) +#define be64toh(x) BSWAP_64(x) +#define be32toh(x) ntohl(x) +#define be16toh(x) ntohs(x) +#define le16toh(x) (x) +#define le32toh(x) (x) +#define le64toh(x) (x) +#define htole16(x) (x) +#define htole64(x) (x) #endif /* __sun */ #elif defined __APPLE__ - #include - #include +#include +#include #if __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN #define be64toh(x) (x) #define be32toh(x) (x) @@ -120,26 +120,23 @@ #define le32toh(x) (x) #define le64toh(x) (x) -#elif defined _AIX /* AIX is always big endian */ +#elif defined _AIX /* AIX is always big endian */ #define be64toh(x) (x) #define be32toh(x) (x) #define be16toh(x) (x) -#define le32toh(x) \ - ((((x) & 0xff) << 24) | \ - (((x) & 0xff00) << 8) | \ - (((x) & 0xff0000) >> 8) | \ - (((x) & 0xff000000) >> 24)) -#define le64toh(x) \ - ((((x) & 0x00000000000000ffL) << 56) | \ - (((x) & 0x000000000000ff00L) << 40) | \ - (((x) & 0x0000000000ff0000L) << 24) | \ - (((x) & 0x00000000ff000000L) << 8) | \ - (((x) & 0x000000ff00000000L) >> 8) | \ - (((x) & 0x0000ff0000000000L) >> 24) | \ - (((x) & 0x00ff000000000000L) >> 40) | \ - (((x) & 0xff00000000000000L) >> 56)) +#define le32toh(x) \ + ((((x)&0xff) << 24) | (((x)&0xff00) << 8) | (((x)&0xff0000) >> 8) | \ + (((x)&0xff000000) >> 24)) +#define le64toh(x) \ + ((((x)&0x00000000000000ffL) << 56) | \ + (((x)&0x000000000000ff00L) << 40) | \ + (((x)&0x0000000000ff0000L) << 24) | \ + (((x)&0x00000000ff000000L) << 8) | (((x)&0x000000ff00000000L) >> 8) | \ + (((x)&0x0000ff0000000000L) >> 24) | \ + (((x)&0x00ff000000000000L) >> 40) | \ + (((x)&0xff00000000000000L) >> 56)) #else - #include +#include #endif diff --git a/src/rdfloat.h b/src/rdfloat.h index e3654156e3..310045f0ea 100644 --- a/src/rdfloat.h +++ b/src/rdfloat.h @@ -37,9 +37,8 @@ * More info: * http://docs.sun.com/source/806-3568/ncg_goldberg.html */ -static RD_INLINE RD_UNUSED -int rd_dbl_eq0 (double a, double b, double prec) { - return fabs(a - b) < prec; +static RD_INLINE RD_UNUSED int rd_dbl_eq0(double a, double b, double prec) { + return fabs(a - b) < prec; } /* A default 'good' double-equality precision value. @@ -53,16 +52,16 @@ int rd_dbl_eq0 (double a, double b, double prec) { * rd_dbl_eq(a,b) * Same as rd_dbl_eq0() above but with a predefined 'good' precision. */ -#define rd_dbl_eq(a,b) rd_dbl_eq0(a,b,RD_DBL_EPSILON) +#define rd_dbl_eq(a, b) rd_dbl_eq0(a, b, RD_DBL_EPSILON) /** * rd_dbl_ne(a,b) * Same as rd_dbl_eq() above but with reversed logic: not-equal. */ -#define rd_dbl_ne(a,b) (!rd_dbl_eq0(a,b,RD_DBL_EPSILON)) +#define rd_dbl_ne(a, b) (!rd_dbl_eq0(a, b, RD_DBL_EPSILON)) /** * rd_dbl_zero(a) * Checks if the double `a' is zero (or close enough). */ -#define rd_dbl_zero(a) rd_dbl_eq0(a,0.0,RD_DBL_EPSILON) +#define rd_dbl_zero(a) rd_dbl_eq0(a, 0.0, RD_DBL_EPSILON) diff --git a/src/rdfnv1a.c b/src/rdfnv1a.c index 34feffae88..e951ec59f2 100644 --- a/src/rdfnv1a.c +++ b/src/rdfnv1a.c @@ -34,14 +34,15 @@ /* FNV-1a by Glenn Fowler, Landon Curt Noll, and Kiem-Phong Vo * * Based on http://www.isthe.com/chongo/src/fnv/hash_32a.c - * with librdkafka modifications to match the Sarama default Producer implementation, - * as seen here: https://github.com/Shopify/sarama/blob/master/partitioner.go#L203 - * Note that this implementation is only compatible with Sarama's default + * with librdkafka modifications to match the Sarama default Producer + * implementation, as seen here: + * https://github.com/Shopify/sarama/blob/master/partitioner.go#L203 Note that + * this implementation is only compatible with Sarama's default * NewHashPartitioner and not NewReferenceHashPartitioner. */ -uint32_t rd_fnv1a (const void *key, size_t len) { - const uint32_t prime = 0x01000193; // 16777619 - const uint32_t offset = 0x811C9DC5; // 2166136261 +uint32_t rd_fnv1a(const void *key, size_t len) { + const uint32_t prime = 0x01000193; // 16777619 + const uint32_t offset = 0x811C9DC5; // 2166136261 size_t i; int32_t h = offset; @@ -52,7 +53,8 @@ uint32_t rd_fnv1a (const void *key, size_t len) { h *= prime; } - /* Take absolute value to match the Sarama NewHashPartitioner implementation */ + /* Take absolute value to match the Sarama NewHashPartitioner + * implementation */ if (h < 0) { h = -h; } @@ -64,45 +66,44 @@ uint32_t rd_fnv1a (const void *key, size_t len) { /** * @brief Unittest for rd_fnv1a() */ -int unittest_fnv1a (void) { +int unittest_fnv1a(void) { const char *short_unaligned = "1234"; - const char *unaligned = "PreAmbleWillBeRemoved,ThePrePartThatIs"; - const char *keysToTest[] = { - "kafka", - "giberish123456789", - short_unaligned, - short_unaligned+1, - short_unaligned+2, - short_unaligned+3, - unaligned, - unaligned+1, - unaligned+2, - unaligned+3, - "", - NULL, + const char *unaligned = "PreAmbleWillBeRemoved,ThePrePartThatIs"; + const char *keysToTest[] = { + "kafka", + "giberish123456789", + short_unaligned, + short_unaligned + 1, + short_unaligned + 2, + short_unaligned + 3, + unaligned, + unaligned + 1, + unaligned + 2, + unaligned + 3, + "", + NULL, }; // Acquired via https://play.golang.org/p/vWIhw3zJINA const int32_t golang_hashfnv_results[] = { - 0xd33c4e1, // kafka - 0x77a58295, // giberish123456789 - 0x23bdd03, // short_unaligned - 0x2dea3cd2, // short_unaligned+1 - 0x740fa83e, // short_unaligned+2 - 0x310ca263, // short_unaligned+3 - 0x65cbd69c, // unaligned - 0x6e49c79a, // unaligned+1 - 0x69eed356, // unaligned+2 - 0x6abcc023, // unaligned+3 - 0x7ee3623b, // "" - 0x7ee3623b, // NULL + 0xd33c4e1, // kafka + 0x77a58295, // giberish123456789 + 0x23bdd03, // short_unaligned + 0x2dea3cd2, // short_unaligned+1 + 0x740fa83e, // short_unaligned+2 + 0x310ca263, // short_unaligned+3 + 0x65cbd69c, // unaligned + 0x6e49c79a, // unaligned+1 + 0x69eed356, // unaligned+2 + 0x6abcc023, // unaligned+3 + 0x7ee3623b, // "" + 0x7ee3623b, // NULL }; size_t i; for (i = 0; i < RD_ARRAYSIZE(keysToTest); i++) { - uint32_t h = rd_fnv1a(keysToTest[i], - keysToTest[i] ? - strlen(keysToTest[i]) : 0); + uint32_t h = rd_fnv1a( + keysToTest[i], keysToTest[i] ? strlen(keysToTest[i]) : 0); RD_UT_ASSERT((int32_t)h == golang_hashfnv_results[i], "Calculated FNV-1a hash 0x%x for \"%s\", " "expected 0x%x", diff --git a/src/rdfnv1a.h b/src/rdfnv1a.h index bd6e06ddc2..8df66b0d62 100644 --- a/src/rdfnv1a.h +++ b/src/rdfnv1a.h @@ -29,7 +29,7 @@ #ifndef __RDFNV1A___H__ #define __RDFNV1A___H__ -uint32_t rd_fnv1a (const void *key, size_t len); -int unittest_fnv1a (void); +uint32_t rd_fnv1a(const void *key, size_t len); +int unittest_fnv1a(void); -#endif // __RDFNV1A___H__ +#endif // __RDFNV1A___H__ diff --git a/src/rdgz.c b/src/rdgz.c index 14958fcc1c..794bd9cc1c 100644 --- a/src/rdgz.c +++ b/src/rdgz.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -32,87 +32,89 @@ #include -#define RD_GZ_CHUNK 262144 - -void *rd_gz_decompress (const void *compressed, int compressed_len, - uint64_t *decompressed_lenp) { - int pass = 1; - char *decompressed = NULL; - - /* First pass (1): calculate decompressed size. - * (pass-1 is skipped if *decompressed_lenp is - * non-zero). - * Second pass (2): perform actual decompression. - */ - - if (*decompressed_lenp != 0LLU) - pass++; - - for (; pass <= 2 ; pass++) { - z_stream strm = RD_ZERO_INIT; - char buf[512]; - char *p; - int len; - int r; - - if ((r = inflateInit2(&strm, 15+32)) != Z_OK) - goto fail; - - strm.next_in = (void *)compressed; - strm.avail_in = compressed_len; - - if (pass == 1) { - /* Use dummy output buffer */ - p = buf; - len = sizeof(buf); - } else { - /* Use real output buffer */ - p = decompressed; - len = (int)*decompressed_lenp; - } - - do { - strm.next_out = (unsigned char *)p; - strm.avail_out = len; - - r = inflate(&strm, Z_NO_FLUSH); - switch (r) { - case Z_STREAM_ERROR: - case Z_NEED_DICT: - case Z_DATA_ERROR: - case Z_MEM_ERROR: - inflateEnd(&strm); - goto fail; - } - - if (pass == 2) { - /* Advance output pointer (in pass 2). */ - p += len - strm.avail_out; - len -= len - strm.avail_out; - } - - } while (strm.avail_out == 0 && r != Z_STREAM_END); - - - if (pass == 1) { - *decompressed_lenp = strm.total_out; - if (!(decompressed = rd_malloc((size_t)(*decompressed_lenp)+1))) { - inflateEnd(&strm); - return NULL; - } - /* For convenience of the caller we nul-terminate - * the buffer. If it happens to be a string there - * is no need for extra copies. */ - decompressed[*decompressed_lenp] = '\0'; - } - - inflateEnd(&strm); - } - - return decompressed; +#define RD_GZ_CHUNK 262144 + +void *rd_gz_decompress(const void *compressed, + int compressed_len, + uint64_t *decompressed_lenp) { + int pass = 1; + char *decompressed = NULL; + + /* First pass (1): calculate decompressed size. + * (pass-1 is skipped if *decompressed_lenp is + * non-zero). + * Second pass (2): perform actual decompression. + */ + + if (*decompressed_lenp != 0LLU) + pass++; + + for (; pass <= 2; pass++) { + z_stream strm = RD_ZERO_INIT; + char buf[512]; + char *p; + int len; + int r; + + if ((r = inflateInit2(&strm, 15 + 32)) != Z_OK) + goto fail; + + strm.next_in = (void *)compressed; + strm.avail_in = compressed_len; + + if (pass == 1) { + /* Use dummy output buffer */ + p = buf; + len = sizeof(buf); + } else { + /* Use real output buffer */ + p = decompressed; + len = (int)*decompressed_lenp; + } + + do { + strm.next_out = (unsigned char *)p; + strm.avail_out = len; + + r = inflate(&strm, Z_NO_FLUSH); + switch (r) { + case Z_STREAM_ERROR: + case Z_NEED_DICT: + case Z_DATA_ERROR: + case Z_MEM_ERROR: + inflateEnd(&strm); + goto fail; + } + + if (pass == 2) { + /* Advance output pointer (in pass 2). */ + p += len - strm.avail_out; + len -= len - strm.avail_out; + } + + } while (strm.avail_out == 0 && r != Z_STREAM_END); + + + if (pass == 1) { + *decompressed_lenp = strm.total_out; + if (!(decompressed = rd_malloc( + (size_t)(*decompressed_lenp) + 1))) { + inflateEnd(&strm); + return NULL; + } + /* For convenience of the caller we nul-terminate + * the buffer. If it happens to be a string there + * is no need for extra copies. */ + decompressed[*decompressed_lenp] = '\0'; + } + + inflateEnd(&strm); + } + + return decompressed; fail: - if (decompressed) - rd_free(decompressed); - return NULL; + if (decompressed) + rd_free(decompressed); + return NULL; } diff --git a/src/rdgz.h b/src/rdgz.h index 5c4017b764..10d661cb3b 100644 --- a/src/rdgz.h +++ b/src/rdgz.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -39,7 +39,8 @@ * * The decompressed length is returned in '*decompressed_lenp'. */ -void *rd_gz_decompress (const void *compressed, int compressed_len, - uint64_t *decompressed_lenp); +void *rd_gz_decompress(const void *compressed, + int compressed_len, + uint64_t *decompressed_lenp); #endif /* _RDGZ_H_ */ diff --git a/src/rdhdrhistogram.c b/src/rdhdrhistogram.c index bdf408295e..3f2b6758b5 100644 --- a/src/rdhdrhistogram.c +++ b/src/rdhdrhistogram.c @@ -26,7 +26,7 @@ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. -*/ + */ /* * librdkafka - Apache Kafka C library @@ -78,12 +78,13 @@ #include "rdunittest.h" #include "rdfloat.h" -void rd_hdr_histogram_destroy (rd_hdr_histogram_t *hdr) { +void rd_hdr_histogram_destroy(rd_hdr_histogram_t *hdr) { rd_free(hdr); } -rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue, - int significantFigures) { +rd_hdr_histogram_t *rd_hdr_histogram_new(int64_t minValue, + int64_t maxValue, + int significantFigures) { rd_hdr_histogram_t *hdr; int64_t largestValueWithSingleUnitResolution; int32_t subBucketCountMagnitude; @@ -101,22 +102,21 @@ rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue, return NULL; largestValueWithSingleUnitResolution = - (int64_t)(2.0 * pow(10.0, (double)significantFigures)); + (int64_t)(2.0 * pow(10.0, (double)significantFigures)); subBucketCountMagnitude = - (int32_t)ceil( - log2((double)largestValueWithSingleUnitResolution)); + (int32_t)ceil(log2((double)largestValueWithSingleUnitResolution)); subBucketHalfCountMagnitude = RD_MAX(subBucketCountMagnitude, 1) - 1; unitMagnitude = (int32_t)RD_MAX(floor(log2((double)minValue)), 0); - subBucketCount = (int32_t)pow(2, - (double)subBucketHalfCountMagnitude+1.0); + subBucketCount = + (int32_t)pow(2, (double)subBucketHalfCountMagnitude + 1.0); subBucketHalfCount = subBucketCount / 2; - subBucketMask = (int64_t)(subBucketCount-1) << unitMagnitude; + subBucketMask = (int64_t)(subBucketCount - 1) << unitMagnitude; /* Determine exponent range needed to support the trackable * value with no overflow: */ @@ -127,24 +127,24 @@ rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue, } bucketCount = bucketsNeeded; - countsLen = (bucketCount + 1) * (subBucketCount / 2); + countsLen = (bucketCount + 1) * (subBucketCount / 2); hdr = rd_calloc(1, sizeof(*hdr) + (sizeof(*hdr->counts) * countsLen)); - hdr->counts = (int64_t *)(hdr+1); + hdr->counts = (int64_t *)(hdr + 1); hdr->allocatedSize = sizeof(*hdr) + (sizeof(*hdr->counts) * countsLen); - hdr->lowestTrackableValue = minValue; - hdr->highestTrackableValue = maxValue; - hdr->unitMagnitude = unitMagnitude; - hdr->significantFigures = significantFigures; + hdr->lowestTrackableValue = minValue; + hdr->highestTrackableValue = maxValue; + hdr->unitMagnitude = unitMagnitude; + hdr->significantFigures = significantFigures; hdr->subBucketHalfCountMagnitude = subBucketHalfCountMagnitude; - hdr->subBucketHalfCount = subBucketHalfCount; - hdr->subBucketMask = subBucketMask; - hdr->subBucketCount = subBucketCount; - hdr->bucketCount = bucketCount; - hdr->countsLen = countsLen; - hdr->totalCount = 0; - hdr->lowestOutOfRange = minValue; - hdr->highestOutOfRange = maxValue; + hdr->subBucketHalfCount = subBucketHalfCount; + hdr->subBucketMask = subBucketMask; + hdr->subBucketCount = subBucketCount; + hdr->bucketCount = bucketCount; + hdr->countsLen = countsLen; + hdr->totalCount = 0; + hdr->lowestOutOfRange = minValue; + hdr->highestOutOfRange = maxValue; return hdr; } @@ -152,32 +152,32 @@ rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue, /** * @brief Deletes all recorded values and resets histogram. */ -void rd_hdr_histogram_reset (rd_hdr_histogram_t *hdr) { +void rd_hdr_histogram_reset(rd_hdr_histogram_t *hdr) { int32_t i; hdr->totalCount = 0; - for (i = 0 ; i < hdr->countsLen ; i++) + for (i = 0; i < hdr->countsLen; i++) hdr->counts[i] = 0; } -static RD_INLINE int32_t -rd_hdr_countsIndex (const rd_hdr_histogram_t *hdr, - int32_t bucketIdx, int32_t subBucketIdx) { - int32_t bucketBaseIdx = (bucketIdx + 1) << - hdr->subBucketHalfCountMagnitude; +static RD_INLINE int32_t rd_hdr_countsIndex(const rd_hdr_histogram_t *hdr, + int32_t bucketIdx, + int32_t subBucketIdx) { + int32_t bucketBaseIdx = (bucketIdx + 1) + << hdr->subBucketHalfCountMagnitude; int32_t offsetInBucket = subBucketIdx - hdr->subBucketHalfCount; return bucketBaseIdx + offsetInBucket; } -static RD_INLINE int64_t -rd_hdr_getCountAtIndex (const rd_hdr_histogram_t *hdr, - int32_t bucketIdx, int32_t subBucketIdx) { +static RD_INLINE int64_t rd_hdr_getCountAtIndex(const rd_hdr_histogram_t *hdr, + int32_t bucketIdx, + int32_t subBucketIdx) { return hdr->counts[rd_hdr_countsIndex(hdr, bucketIdx, subBucketIdx)]; } -static RD_INLINE int64_t bitLen (int64_t x) { +static RD_INLINE int64_t bitLen(int64_t x) { int64_t n = 0; for (; x >= 0x8000; x >>= 16) n += 16; @@ -199,29 +199,30 @@ static RD_INLINE int64_t bitLen (int64_t x) { } -static RD_INLINE int32_t -rd_hdr_getBucketIndex (const rd_hdr_histogram_t *hdr, int64_t v) { +static RD_INLINE int32_t rd_hdr_getBucketIndex(const rd_hdr_histogram_t *hdr, + int64_t v) { int64_t pow2Ceiling = bitLen(v | hdr->subBucketMask); return (int32_t)(pow2Ceiling - (int64_t)hdr->unitMagnitude - - (int64_t)(hdr->subBucketHalfCountMagnitude+1)); + (int64_t)(hdr->subBucketHalfCountMagnitude + 1)); } -static RD_INLINE int32_t -rd_hdr_getSubBucketIdx (const rd_hdr_histogram_t *hdr, int64_t v, int32_t idx) { +static RD_INLINE int32_t rd_hdr_getSubBucketIdx(const rd_hdr_histogram_t *hdr, + int64_t v, + int32_t idx) { return (int32_t)(v >> ((int64_t)idx + (int64_t)hdr->unitMagnitude)); } -static RD_INLINE int64_t -rd_hdr_valueFromIndex (const rd_hdr_histogram_t *hdr, - int32_t bucketIdx, int32_t subBucketIdx) { - return (int64_t)subBucketIdx << - ((int64_t)bucketIdx + hdr->unitMagnitude); +static RD_INLINE int64_t rd_hdr_valueFromIndex(const rd_hdr_histogram_t *hdr, + int32_t bucketIdx, + int32_t subBucketIdx) { + return (int64_t)subBucketIdx + << ((int64_t)bucketIdx + hdr->unitMagnitude); } static RD_INLINE int64_t -rd_hdr_sizeOfEquivalentValueRange (const rd_hdr_histogram_t *hdr, int64_t v) { - int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v); - int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx); +rd_hdr_sizeOfEquivalentValueRange(const rd_hdr_histogram_t *hdr, int64_t v) { + int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v); + int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx); int32_t adjustedBucket = bucketIdx; if (unlikely(subBucketIdx >= hdr->subBucketCount)) adjustedBucket++; @@ -229,35 +230,35 @@ rd_hdr_sizeOfEquivalentValueRange (const rd_hdr_histogram_t *hdr, int64_t v) { } static RD_INLINE int64_t -rd_hdr_lowestEquivalentValue (const rd_hdr_histogram_t *hdr, int64_t v) { - int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v); +rd_hdr_lowestEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) { + int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v); int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx); return rd_hdr_valueFromIndex(hdr, bucketIdx, subBucketIdx); } static RD_INLINE int64_t -rd_hdr_nextNonEquivalentValue (const rd_hdr_histogram_t *hdr, int64_t v) { +rd_hdr_nextNonEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) { return rd_hdr_lowestEquivalentValue(hdr, v) + - rd_hdr_sizeOfEquivalentValueRange(hdr, v); + rd_hdr_sizeOfEquivalentValueRange(hdr, v); } static RD_INLINE int64_t -rd_hdr_highestEquivalentValue (const rd_hdr_histogram_t *hdr, int64_t v) { +rd_hdr_highestEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) { return rd_hdr_nextNonEquivalentValue(hdr, v) - 1; } static RD_INLINE int64_t -rd_hdr_medianEquivalentValue (const rd_hdr_histogram_t *hdr, int64_t v) { +rd_hdr_medianEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) { return rd_hdr_lowestEquivalentValue(hdr, v) + - (rd_hdr_sizeOfEquivalentValueRange(hdr, v) >> 1); + (rd_hdr_sizeOfEquivalentValueRange(hdr, v) >> 1); } -static RD_INLINE int32_t -rd_hdr_countsIndexFor (const rd_hdr_histogram_t *hdr, int64_t v) { - int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v); +static RD_INLINE int32_t rd_hdr_countsIndexFor(const rd_hdr_histogram_t *hdr, + int64_t v) { + int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v); int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx); return rd_hdr_countsIndex(hdr, bucketIdx, subBucketIdx); } @@ -274,9 +275,10 @@ typedef struct rd_hdr_iter_s { int64_t highestEquivalentValue; } rd_hdr_iter_t; -#define RD_HDR_ITER_INIT(hdr) { .hdr = hdr, .subBucketIdx = -1 } +#define RD_HDR_ITER_INIT(hdr) \ + { .hdr = hdr, .subBucketIdx = -1 } -static int rd_hdr_iter_next (rd_hdr_iter_t *it) { +static int rd_hdr_iter_next(rd_hdr_iter_t *it) { const rd_hdr_histogram_t *hdr = it->hdr; if (unlikely(it->countToIdx >= hdr->totalCount)) @@ -291,24 +293,22 @@ static int rd_hdr_iter_next (rd_hdr_iter_t *it) { if (unlikely(it->bucketIdx >= hdr->bucketCount)) return 0; - it->countAtIdx = rd_hdr_getCountAtIndex(hdr, - it->bucketIdx, - it->subBucketIdx); + it->countAtIdx = + rd_hdr_getCountAtIndex(hdr, it->bucketIdx, it->subBucketIdx); it->countToIdx += it->countAtIdx; - it->valueFromIdx = rd_hdr_valueFromIndex(hdr, - it->bucketIdx, - it->subBucketIdx); + it->valueFromIdx = + rd_hdr_valueFromIndex(hdr, it->bucketIdx, it->subBucketIdx); it->highestEquivalentValue = - rd_hdr_highestEquivalentValue(hdr, it->valueFromIdx); + rd_hdr_highestEquivalentValue(hdr, it->valueFromIdx); return 1; } -double rd_hdr_histogram_stddev (rd_hdr_histogram_t *hdr) { +double rd_hdr_histogram_stddev(rd_hdr_histogram_t *hdr) { double mean; double geometricDevTotal = 0.0; - rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr); + rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr); if (hdr->totalCount == 0) return 0; @@ -322,8 +322,9 @@ double rd_hdr_histogram_stddev (rd_hdr_histogram_t *hdr) { if (it.countAtIdx == 0) continue; - dev = (double)rd_hdr_medianEquivalentValue( - hdr, it.valueFromIdx) - mean; + dev = + (double)rd_hdr_medianEquivalentValue(hdr, it.valueFromIdx) - + mean; geometricDevTotal += (dev * dev) * (double)it.countAtIdx; } @@ -334,8 +335,8 @@ double rd_hdr_histogram_stddev (rd_hdr_histogram_t *hdr) { /** * @returns the approximate maximum recorded value. */ -int64_t rd_hdr_histogram_max (const rd_hdr_histogram_t *hdr) { - int64_t vmax = 0; +int64_t rd_hdr_histogram_max(const rd_hdr_histogram_t *hdr) { + int64_t vmax = 0; rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr); while (rd_hdr_iter_next(&it)) { @@ -348,8 +349,8 @@ int64_t rd_hdr_histogram_max (const rd_hdr_histogram_t *hdr) { /** * @returns the approximate minimum recorded value. */ -int64_t rd_hdr_histogram_min (const rd_hdr_histogram_t *hdr) { - int64_t vmin = 0; +int64_t rd_hdr_histogram_min(const rd_hdr_histogram_t *hdr) { + int64_t vmin = 0; rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr); while (rd_hdr_iter_next(&it)) { @@ -364,8 +365,8 @@ int64_t rd_hdr_histogram_min (const rd_hdr_histogram_t *hdr) { /** * @returns the approximate arithmetic mean of the recorded values. */ -double rd_hdr_histogram_mean (const rd_hdr_histogram_t *hdr) { - int64_t total = 0; +double rd_hdr_histogram_mean(const rd_hdr_histogram_t *hdr) { + int64_t total = 0; rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr); if (hdr->totalCount == 0) @@ -373,9 +374,8 @@ double rd_hdr_histogram_mean (const rd_hdr_histogram_t *hdr) { while (rd_hdr_iter_next(&it)) { if (it.countAtIdx != 0) - total += it.countAtIdx * - rd_hdr_medianEquivalentValue(hdr, - it.valueFromIdx); + total += it.countAtIdx * rd_hdr_medianEquivalentValue( + hdr, it.valueFromIdx); } return (double)total / (double)hdr->totalCount; } @@ -388,7 +388,7 @@ double rd_hdr_histogram_mean (const rd_hdr_histogram_t *hdr) { * @returns 1 if value was recorded or 0 if value is out of range. */ -int rd_hdr_histogram_record (rd_hdr_histogram_t *hdr, int64_t v) { +int rd_hdr_histogram_record(rd_hdr_histogram_t *hdr, int64_t v) { int32_t idx = rd_hdr_countsIndexFor(hdr, v); if (idx < 0 || hdr->countsLen <= idx) { @@ -410,7 +410,7 @@ int rd_hdr_histogram_record (rd_hdr_histogram_t *hdr, int64_t v) { /** * @returns the recorded value at the given quantile (0..100). */ -int64_t rd_hdr_histogram_quantile (const rd_hdr_histogram_t *hdr, double q) { +int64_t rd_hdr_histogram_quantile(const rd_hdr_histogram_t *hdr, double q) { int64_t total = 0; int64_t countAtPercentile; rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr); @@ -419,13 +419,13 @@ int64_t rd_hdr_histogram_quantile (const rd_hdr_histogram_t *hdr, double q) { q = 100.0; countAtPercentile = - (int64_t)(((q / 100.0) * (double)hdr->totalCount) + 0.5); + (int64_t)(((q / 100.0) * (double)hdr->totalCount) + 0.5); while (rd_hdr_iter_next(&it)) { total += it.countAtIdx; if (total >= countAtPercentile) - return rd_hdr_highestEquivalentValue( - hdr, it.valueFromIdx); + return rd_hdr_highestEquivalentValue(hdr, + it.valueFromIdx); } return 0; @@ -444,55 +444,50 @@ int64_t rd_hdr_histogram_quantile (const rd_hdr_histogram_t *hdr, double q) { /** * @returns 0 on success or 1 on failure. */ -static int ut_high_sigfig (void) { +static int ut_high_sigfig(void) { rd_hdr_histogram_t *hdr; const int64_t input[] = { - 459876, 669187, 711612, 816326, 931423, - 1033197, 1131895, 2477317, 3964974, 12718782, + 459876, 669187, 711612, 816326, 931423, + 1033197, 1131895, 2477317, 3964974, 12718782, }; size_t i; int64_t v; const int64_t exp = 1048575; hdr = rd_hdr_histogram_new(459876, 12718782, 5); - for (i = 0 ; i < RD_ARRAYSIZE(input) ; i++) { + for (i = 0; i < RD_ARRAYSIZE(input); i++) { /* Ignore errors (some should fail) */ rd_hdr_histogram_record(hdr, input[i]); } v = rd_hdr_histogram_quantile(hdr, 50); - RD_UT_ASSERT(v == exp, "Median is %"PRId64", expected %"PRId64, - v, exp); + RD_UT_ASSERT(v == exp, "Median is %" PRId64 ", expected %" PRId64, v, + exp); rd_hdr_histogram_destroy(hdr); RD_UT_PASS(); } -static int ut_quantile (void) { +static int ut_quantile(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); size_t i; const struct { - double q; + double q; int64_t v; } exp[] = { - { 50, 500223 }, - { 75, 750079 }, - { 90, 900095 }, - { 95, 950271 }, - { 99, 990207 }, - { 99.9, 999423 }, - { 99.99, 999935 }, + {50, 500223}, {75, 750079}, {90, 900095}, {95, 950271}, + {99, 990207}, {99.9, 999423}, {99.99, 999935}, }; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int r = rd_hdr_histogram_record(hdr, (int64_t)i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", (int64_t)i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i); } - for (i = 0 ; i < RD_ARRAYSIZE(exp) ; i++) { + for (i = 0; i < RD_ARRAYSIZE(exp); i++) { int64_t v = rd_hdr_histogram_quantile(hdr, exp[i].q); RD_UT_ASSERT(v == exp[i].v, - "P%.2f is %"PRId64", expected %"PRId64, + "P%.2f is %" PRId64 ", expected %" PRId64, exp[i].q, v, exp[i].v); } @@ -500,36 +495,36 @@ static int ut_quantile (void) { RD_UT_PASS(); } -static int ut_mean (void) { +static int ut_mean(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); size_t i; const double exp = 500000.013312; double v; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int r = rd_hdr_histogram_record(hdr, (int64_t)i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", (int64_t)i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i); } v = rd_hdr_histogram_mean(hdr); - RD_UT_ASSERT(rd_dbl_eq0(v, exp, 0.0000001), - "Mean is %f, expected %f", v, exp); + RD_UT_ASSERT(rd_dbl_eq0(v, exp, 0.0000001), "Mean is %f, expected %f", + v, exp); rd_hdr_histogram_destroy(hdr); RD_UT_PASS(); } -static int ut_stddev (void) { +static int ut_stddev(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); size_t i; - const double exp = 288675.140368; - const double epsilon = 0.000001; + const double exp = 288675.140368; + const double epsilon = 0.000001; double v; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int r = rd_hdr_histogram_record(hdr, (int64_t)i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", (int64_t)i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i); } v = rd_hdr_histogram_stddev(hdr); @@ -541,19 +536,19 @@ static int ut_stddev (void) { RD_UT_PASS(); } -static int ut_totalcount (void) { +static int ut_totalcount(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); int64_t i; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int64_t v; int r = rd_hdr_histogram_record(hdr, i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i); v = hdr->totalCount; - RD_UT_ASSERT(v == i+1, - "total_count is %"PRId64", expected %"PRId64, - v, i+1); + RD_UT_ASSERT(v == i + 1, + "total_count is %" PRId64 ", expected %" PRId64, v, + i + 1); } rd_hdr_histogram_destroy(hdr); @@ -561,64 +556,61 @@ static int ut_totalcount (void) { } -static int ut_max (void) { +static int ut_max(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); int64_t i, v; const int64_t exp = 1000447; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int r = rd_hdr_histogram_record(hdr, i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i); } v = rd_hdr_histogram_max(hdr); - RD_UT_ASSERT(v == exp, - "Max is %"PRId64", expected %"PRId64, v, exp); + RD_UT_ASSERT(v == exp, "Max is %" PRId64 ", expected %" PRId64, v, exp); rd_hdr_histogram_destroy(hdr); RD_UT_PASS(); } -static int ut_min (void) { +static int ut_min(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); int64_t i, v; const int64_t exp = 0; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int r = rd_hdr_histogram_record(hdr, i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i); } v = rd_hdr_histogram_min(hdr); - RD_UT_ASSERT(v == exp, - "Min is %"PRId64", expected %"PRId64, v, exp); + RD_UT_ASSERT(v == exp, "Min is %" PRId64 ", expected %" PRId64, v, exp); rd_hdr_histogram_destroy(hdr); RD_UT_PASS(); } -static int ut_reset (void) { +static int ut_reset(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); int64_t i, v; const int64_t exp = 0; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int r = rd_hdr_histogram_record(hdr, i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i); } rd_hdr_histogram_reset(hdr); v = rd_hdr_histogram_max(hdr); - RD_UT_ASSERT(v == exp, - "Max is %"PRId64", expected %"PRId64, v, exp); + RD_UT_ASSERT(v == exp, "Max is %" PRId64 ", expected %" PRId64, v, exp); rd_hdr_histogram_destroy(hdr); RD_UT_PASS(); } -static int ut_nan (void) { +static int ut_nan(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 100000, 3); double v; @@ -632,13 +624,13 @@ static int ut_nan (void) { } -static int ut_sigfigs (void) { +static int ut_sigfigs(void) { int sigfigs; - for (sigfigs = 1 ; sigfigs <= 5 ; sigfigs++) { + for (sigfigs = 1; sigfigs <= 5; sigfigs++) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10, sigfigs); RD_UT_ASSERT(hdr->significantFigures == sigfigs, - "Significant figures is %"PRId64", expected %d", + "Significant figures is %" PRId64 ", expected %d", hdr->significantFigures, sigfigs); rd_hdr_histogram_destroy(hdr); } @@ -646,16 +638,16 @@ static int ut_sigfigs (void) { RD_UT_PASS(); } -static int ut_minmax_trackable (void) { - const int64_t minval = 2; - const int64_t maxval = 11; +static int ut_minmax_trackable(void) { + const int64_t minval = 2; + const int64_t maxval = 11; rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(minval, maxval, 3); RD_UT_ASSERT(hdr->lowestTrackableValue == minval, - "lowestTrackableValue is %"PRId64", expected %"PRId64, + "lowestTrackableValue is %" PRId64 ", expected %" PRId64, hdr->lowestTrackableValue, minval); RD_UT_ASSERT(hdr->highestTrackableValue == maxval, - "highestTrackableValue is %"PRId64", expected %"PRId64, + "highestTrackableValue is %" PRId64 ", expected %" PRId64, hdr->highestTrackableValue, maxval); rd_hdr_histogram_destroy(hdr); @@ -663,41 +655,41 @@ static int ut_minmax_trackable (void) { } -static int ut_unitmagnitude_overflow (void) { +static int ut_unitmagnitude_overflow(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(0, 200, 4); - int r = rd_hdr_histogram_record(hdr, 11); + int r = rd_hdr_histogram_record(hdr, 11); RD_UT_ASSERT(r, "record(11) failed\n"); rd_hdr_histogram_destroy(hdr); RD_UT_PASS(); } -static int ut_subbucketmask_overflow (void) { +static int ut_subbucketmask_overflow(void) { rd_hdr_histogram_t *hdr; - const int64_t input[] = { (int64_t)1e8, (int64_t)2e7, (int64_t)3e7 }; + const int64_t input[] = {(int64_t)1e8, (int64_t)2e7, (int64_t)3e7}; const struct { - double q; + double q; int64_t v; } exp[] = { - { 50, 33554431 }, - { 83.33, 33554431 }, - { 83.34, 100663295 }, - { 99, 100663295 }, + {50, 33554431}, + {83.33, 33554431}, + {83.34, 100663295}, + {99, 100663295}, }; size_t i; hdr = rd_hdr_histogram_new((int64_t)2e7, (int64_t)1e8, 5); - for (i = 0 ; i < RD_ARRAYSIZE(input) ; i++) { + for (i = 0; i < RD_ARRAYSIZE(input); i++) { /* Ignore errors (some should fail) */ int r = rd_hdr_histogram_record(hdr, input[i]); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", input[i]); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", input[i]); } - for (i = 0 ; i < RD_ARRAYSIZE(exp) ; i++) { + for (i = 0; i < RD_ARRAYSIZE(exp); i++) { int64_t v = rd_hdr_histogram_quantile(hdr, exp[i].q); RD_UT_ASSERT(v == exp[i].v, - "P%.2f is %"PRId64", expected %"PRId64, + "P%.2f is %" PRId64 ", expected %" PRId64, exp[i].q, v, exp[i].v); } @@ -706,7 +698,7 @@ static int ut_subbucketmask_overflow (void) { } -int unittest_rdhdrhistogram (void) { +int unittest_rdhdrhistogram(void) { int fails = 0; fails += ut_high_sigfig(); diff --git a/src/rdhdrhistogram.h b/src/rdhdrhistogram.h index 681306e6bf..868614b7b0 100644 --- a/src/rdhdrhistogram.h +++ b/src/rdhdrhistogram.h @@ -32,33 +32,33 @@ typedef struct rd_hdr_histogram_s { - int64_t lowestTrackableValue; - int64_t highestTrackableValue; - int64_t unitMagnitude; - int64_t significantFigures; - int32_t subBucketHalfCountMagnitude; - int32_t subBucketHalfCount; - int64_t subBucketMask; - int32_t subBucketCount; - int32_t bucketCount; - int32_t countsLen; - int64_t totalCount; + int64_t lowestTrackableValue; + int64_t highestTrackableValue; + int64_t unitMagnitude; + int64_t significantFigures; + int32_t subBucketHalfCountMagnitude; + int32_t subBucketHalfCount; + int64_t subBucketMask; + int32_t subBucketCount; + int32_t bucketCount; + int32_t countsLen; + int64_t totalCount; int64_t *counts; - int64_t outOfRangeCount; /**< Number of rejected records due to - * value being out of range. */ - int64_t lowestOutOfRange; /**< Lowest value that was out of range. - * Initialized to lowestTrackableValue */ - int64_t highestOutOfRange; /**< Highest value that was out of range. - * Initialized to highestTrackableValue */ - int32_t allocatedSize; /**< Allocated size of histogram, for - * sigfigs tuning. */ + int64_t outOfRangeCount; /**< Number of rejected records due to + * value being out of range. */ + int64_t lowestOutOfRange; /**< Lowest value that was out of range. + * Initialized to lowestTrackableValue */ + int64_t highestOutOfRange; /**< Highest value that was out of range. + * Initialized to highestTrackableValue */ + int32_t allocatedSize; /**< Allocated size of histogram, for + * sigfigs tuning. */ } rd_hdr_histogram_t; #endif /* !_RDHDR_HISTOGRAM_H_ */ -void rd_hdr_histogram_destroy (rd_hdr_histogram_t *hdr); +void rd_hdr_histogram_destroy(rd_hdr_histogram_t *hdr); /** * @brief Create a new Hdr_Histogram. @@ -69,18 +69,19 @@ void rd_hdr_histogram_destroy (rd_hdr_histogram_t *hdr); * * @sa rd_hdr_histogram_destroy() */ -rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue, - int significantFigures); +rd_hdr_histogram_t *rd_hdr_histogram_new(int64_t minValue, + int64_t maxValue, + int significantFigures); -void rd_hdr_histogram_reset (rd_hdr_histogram_t *hdr); +void rd_hdr_histogram_reset(rd_hdr_histogram_t *hdr); -int rd_hdr_histogram_record (rd_hdr_histogram_t *hdr, int64_t v); +int rd_hdr_histogram_record(rd_hdr_histogram_t *hdr, int64_t v); -double rd_hdr_histogram_stddev (rd_hdr_histogram_t *hdr); -double rd_hdr_histogram_mean (const rd_hdr_histogram_t *hdr); -int64_t rd_hdr_histogram_max (const rd_hdr_histogram_t *hdr); -int64_t rd_hdr_histogram_min (const rd_hdr_histogram_t *hdr); -int64_t rd_hdr_histogram_quantile (const rd_hdr_histogram_t *hdr, double q); +double rd_hdr_histogram_stddev(rd_hdr_histogram_t *hdr); +double rd_hdr_histogram_mean(const rd_hdr_histogram_t *hdr); +int64_t rd_hdr_histogram_max(const rd_hdr_histogram_t *hdr); +int64_t rd_hdr_histogram_min(const rd_hdr_histogram_t *hdr); +int64_t rd_hdr_histogram_quantile(const rd_hdr_histogram_t *hdr, double q); -int unittest_rdhdrhistogram (void); +int unittest_rdhdrhistogram(void); diff --git a/src/rdhttp.c b/src/rdhttp.c index 57dc7d3847..91500d865b 100644 --- a/src/rdhttp.c +++ b/src/rdhttp.c @@ -41,16 +41,16 @@ #include "rdhttp.h" /** Maximum response size, increase as necessary. */ -#define RD_HTTP_RESPONSE_SIZE_MAX 1024*1024*500 /* 500kb */ +#define RD_HTTP_RESPONSE_SIZE_MAX 1024 * 1024 * 500 /* 500kb */ -void rd_http_error_destroy (rd_http_error_t *herr) { +void rd_http_error_destroy(rd_http_error_t *herr) { rd_free(herr); } -static rd_http_error_t *rd_http_error_new (int code, const char *fmt, ...) - RD_FORMAT(printf, 2, 3); -static rd_http_error_t *rd_http_error_new (int code, const char *fmt, ...) { +static rd_http_error_t *rd_http_error_new(int code, const char *fmt, ...) + RD_FORMAT(printf, 2, 3); +static rd_http_error_t *rd_http_error_new(int code, const char *fmt, ...) { size_t len = 0; rd_http_error_t *herr; va_list ap; @@ -65,8 +65,8 @@ static rd_http_error_t *rd_http_error_new (int code, const char *fmt, ...) { } /* Use single allocation for both herr and the error string */ - herr = rd_malloc(sizeof(*herr) + len + 1); - herr->code = code; + herr = rd_malloc(sizeof(*herr) + len + 1); + herr->code = code; herr->errstr = herr->data; if (len > 0) @@ -83,21 +83,20 @@ static rd_http_error_t *rd_http_error_new (int code, const char *fmt, ...) { * @brief Same as rd_http_error_new() but reads the error string from the * provided buffer. */ -static rd_http_error_t *rd_http_error_new_from_buf (int code, - const rd_buf_t *rbuf) { +static rd_http_error_t *rd_http_error_new_from_buf(int code, + const rd_buf_t *rbuf) { rd_http_error_t *herr; rd_slice_t slice; size_t len = rd_buf_len(rbuf); if (len == 0) return rd_http_error_new( - code, - "Server did not provide an error string"); + code, "Server did not provide an error string"); /* Use single allocation for both herr and the error string */ - herr = rd_malloc(sizeof(*herr) + len + 1); - herr->code = code; + herr = rd_malloc(sizeof(*herr) + len + 1); + herr->code = code; herr->errstr = herr->data; rd_slice_init_full(&slice, rbuf); rd_slice_read(&slice, herr->errstr, len); @@ -106,7 +105,7 @@ static rd_http_error_t *rd_http_error_new_from_buf (int code, return herr; } -void rd_http_req_destroy (rd_http_req_t *hreq) { +void rd_http_req_destroy(rd_http_req_t *hreq) { RD_IF_FREE(hreq->hreq_curl, curl_easy_cleanup); RD_IF_FREE(hreq->hreq_buf, rd_buf_destroy); } @@ -116,8 +115,8 @@ void rd_http_req_destroy (rd_http_req_t *hreq) { * @brief Curl writefunction. Writes the bytes passed from curl * to the hreq's buffer. */ -static size_t rd_http_req_write_cb (char *ptr, size_t size, size_t nmemb, - void *userdata) { +static size_t +rd_http_req_write_cb(char *ptr, size_t size, size_t nmemb, void *userdata) { rd_http_req_t *hreq = (rd_http_req_t *)userdata; if (unlikely(rd_buf_len(hreq->hreq_buf) + nmemb > @@ -129,7 +128,7 @@ static size_t rd_http_req_write_cb (char *ptr, size_t size, size_t nmemb, return nmemb; } -rd_http_error_t *rd_http_req_init (rd_http_req_t *hreq, const char *url) { +rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url) { memset(hreq, 0, sizeof(*hreq)); @@ -157,7 +156,7 @@ rd_http_error_t *rd_http_req_init (rd_http_req_t *hreq, const char *url) { /** * @brief Synchronously (blockingly) perform the HTTP operation. */ -rd_http_error_t *rd_http_req_perform_sync (rd_http_req_t *hreq) { +rd_http_error_t *rd_http_req_perform_sync(rd_http_req_t *hreq) { CURLcode res; long code = 0; @@ -175,11 +174,11 @@ rd_http_error_t *rd_http_req_perform_sync (rd_http_req_t *hreq) { } -int rd_http_req_get_code (const rd_http_req_t *hreq) { +int rd_http_req_get_code(const rd_http_req_t *hreq) { return hreq->hreq_code; } -const char *rd_http_req_get_content_type (rd_http_req_t *hreq) { +const char *rd_http_req_get_content_type(rd_http_req_t *hreq) { const char *content_type = NULL; if (curl_easy_getinfo(hreq->hreq_curl, CURLINFO_CONTENT_TYPE, @@ -201,7 +200,7 @@ const char *rd_http_req_get_content_type (rd_http_req_t *hreq) { * by calling rd_http_error_destroy(). In case of HTTP error the \p *rbufp * may be filled with the error response. */ -rd_http_error_t *rd_http_get (const char *url, rd_buf_t **rbufp) { +rd_http_error_t *rd_http_get(const char *url, rd_buf_t **rbufp) { rd_http_req_t hreq; rd_http_error_t *herr; @@ -217,7 +216,7 @@ rd_http_error_t *rd_http_get (const char *url, rd_buf_t **rbufp) { return herr; } - *rbufp = hreq.hreq_buf; + *rbufp = hreq.hreq_buf; hreq.hreq_buf = NULL; return NULL; @@ -230,7 +229,7 @@ rd_http_error_t *rd_http_get (const char *url, rd_buf_t **rbufp) { * * Same error semantics as rd_http_get(). */ -rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp) { +rd_http_error_t *rd_http_get_json(const char *url, cJSON **jsonp) { rd_http_req_t hreq; rd_http_error_t *herr; rd_slice_t slice; @@ -248,7 +247,7 @@ rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp) { // FIXME: send Accept: json.. header? herr = rd_http_req_perform_sync(&hreq); - len = rd_buf_len(hreq.hreq_buf); + len = rd_buf_len(hreq.hreq_buf); if (herr && len == 0) { rd_http_req_destroy(&hreq); return herr; @@ -263,14 +262,12 @@ rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp) { content_type = rd_http_req_get_content_type(&hreq); - if (!content_type || - rd_strncasecmp(content_type, - "application/json", strlen("application/json"))) { + if (!content_type || rd_strncasecmp(content_type, "application/json", + strlen("application/json"))) { if (!herr) herr = rd_http_error_new( - hreq.hreq_code, - "Response is not JSON encoded: %s", - content_type ? content_type : "(n/a)"); + hreq.hreq_code, "Response is not JSON encoded: %s", + content_type ? content_type : "(n/a)"); rd_http_req_destroy(&hreq); return herr; } @@ -282,12 +279,12 @@ rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp) { raw_json[len] = '\0'; /* Parse JSON */ - end = NULL; + end = NULL; *jsonp = cJSON_ParseWithOpts(raw_json, &end, 0); if (!*jsonp && !herr) herr = rd_http_error_new(hreq.hreq_code, "Failed to parse JSON response " - "at %"PRIusz"/%"PRIusz, + "at %" PRIusz "/%" PRIusz, (size_t)(end - raw_json), len); rd_free(raw_json); @@ -297,7 +294,7 @@ rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp) { } -void rd_http_global_init (void) { +void rd_http_global_init(void) { curl_global_init(CURL_GLOBAL_DEFAULT); } @@ -311,7 +308,7 @@ void rd_http_global_init (void) { * and 4xx response on $RD_UT_HTTP_URL/error (with whatever type of body). */ -int unittest_http (void) { +int unittest_http(void) { const char *base_url = rd_getenv("RD_UT_HTTP_URL", NULL); char *error_url; size_t error_url_size; @@ -325,7 +322,7 @@ int unittest_http (void) { RD_UT_BEGIN(); error_url_size = strlen(base_url) + strlen("/error") + 1; - error_url = rd_alloca(error_url_size); + error_url = rd_alloca(error_url_size); rd_snprintf(error_url, error_url_size, "%s/error", base_url); /* Try the base url first, parse its JSON and extract a key-value. */ @@ -341,9 +338,10 @@ int unittest_http (void) { } RD_UT_ASSERT(!empty, "Expected non-empty JSON response from %s", base_url); - RD_UT_SAY("URL %s returned no error and a non-empty " - "JSON object/array as expected", - base_url); + RD_UT_SAY( + "URL %s returned no error and a non-empty " + "JSON object/array as expected", + base_url); cJSON_Delete(json); @@ -351,12 +349,14 @@ int unittest_http (void) { json = NULL; herr = rd_http_get_json(error_url, &json); RD_UT_ASSERT(herr != NULL, "Expected get_json(%s) to fail", error_url); - RD_UT_ASSERT(herr->code >= 400, "Expected get_json(%s) error code >= " - "400, got %d", error_url, herr->code); - RD_UT_SAY("Error URL %s returned code %d, errstr \"%s\" " - "and %s JSON object as expected", - error_url, herr->code, herr->errstr, - json ? "a" : "no"); + RD_UT_ASSERT(herr->code >= 400, + "Expected get_json(%s) error code >= " + "400, got %d", + error_url, herr->code); + RD_UT_SAY( + "Error URL %s returned code %d, errstr \"%s\" " + "and %s JSON object as expected", + error_url, herr->code, herr->errstr, json ? "a" : "no"); /* Check if there's a JSON document returned */ if (json) cJSON_Delete(json); diff --git a/src/rdhttp.h b/src/rdhttp.h index 24485540be..4238abcbce 100644 --- a/src/rdhttp.h +++ b/src/rdhttp.h @@ -37,16 +37,15 @@ typedef struct rd_http_error_s { int code; char *errstr; - char data[1]; /**< This is where the error string begins. */ + char data[1]; /**< This is where the error string begins. */ } rd_http_error_t; -void rd_http_error_destroy (rd_http_error_t *herr); +void rd_http_error_destroy(rd_http_error_t *herr); -rd_http_error_t *rd_http_get (const char *url, rd_buf_t **rbufp); -rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp); - -void rd_http_global_init (void); +rd_http_error_t *rd_http_get(const char *url, rd_buf_t **rbufp); +rd_http_error_t *rd_http_get_json(const char *url, cJSON **jsonp); +void rd_http_global_init(void); @@ -56,16 +55,16 @@ void rd_http_global_init (void); typedef struct rd_http_req_s { - CURL *hreq_curl; /**< CURL handle */ - rd_buf_t *hreq_buf; /**< Response buffer */ - int hreq_code; /**< HTTP response code */ + CURL *hreq_curl; /**< CURL handle */ + rd_buf_t *hreq_buf; /**< Response buffer */ + int hreq_code; /**< HTTP response code */ char hreq_curl_errstr[CURL_ERROR_SIZE]; /**< Error string for curl to * write to. */ } rd_http_req_t; -static void rd_http_req_destroy (rd_http_req_t *hreq); -rd_http_error_t *rd_http_req_init (rd_http_req_t *hreq, const char *url); -rd_http_error_t *rd_http_req_perform_sync (rd_http_req_t *hreq); +static void rd_http_req_destroy(rd_http_req_t *hreq); +rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url); +rd_http_error_t *rd_http_req_perform_sync(rd_http_req_t *hreq); #endif diff --git a/src/rdinterval.h b/src/rdinterval.h index 0d5d4eb95f..4283376462 100644 --- a/src/rdinterval.h +++ b/src/rdinterval.h @@ -32,13 +32,13 @@ #include "rd.h" typedef struct rd_interval_s { - rd_ts_t ri_ts_last; /* last interval timestamp */ - rd_ts_t ri_fixed; /* fixed interval if provided interval is 0 */ - int ri_backoff; /* back off the next interval by this much */ + rd_ts_t ri_ts_last; /* last interval timestamp */ + rd_ts_t ri_fixed; /* fixed interval if provided interval is 0 */ + int ri_backoff; /* back off the next interval by this much */ } rd_interval_t; -static RD_INLINE RD_UNUSED void rd_interval_init (rd_interval_t *ri) { +static RD_INLINE RD_UNUSED void rd_interval_init(rd_interval_t *ri) { memset(ri, 0, sizeof(*ri)); } @@ -60,13 +60,13 @@ static RD_INLINE RD_UNUSED void rd_interval_init (rd_interval_t *ri) { * will be returned immediately even though the initial interval has not * passed. */ -#define rd_interval(ri,interval_us,now) rd_interval0(ri,interval_us,now,0) -#define rd_interval_immediate(ri,interval_us,now) \ - rd_interval0(ri,interval_us,now,1) -static RD_INLINE RD_UNUSED rd_ts_t rd_interval0 (rd_interval_t *ri, - rd_ts_t interval_us, - rd_ts_t now, - int immediate) { +#define rd_interval(ri, interval_us, now) rd_interval0(ri, interval_us, now, 0) +#define rd_interval_immediate(ri, interval_us, now) \ + rd_interval0(ri, interval_us, now, 1) +static RD_INLINE RD_UNUSED rd_ts_t rd_interval0(rd_interval_t *ri, + rd_ts_t interval_us, + rd_ts_t now, + int immediate) { rd_ts_t diff; if (!now) @@ -91,7 +91,7 @@ static RD_INLINE RD_UNUSED rd_ts_t rd_interval0 (rd_interval_t *ri, * Reset the interval to zero, i.e., the next call to rd_interval() * will be immediate. */ -static RD_INLINE RD_UNUSED void rd_interval_reset (rd_interval_t *ri) { +static RD_INLINE RD_UNUSED void rd_interval_reset(rd_interval_t *ri) { ri->ri_ts_last = 0; ri->ri_backoff = 0; } @@ -100,8 +100,8 @@ static RD_INLINE RD_UNUSED void rd_interval_reset (rd_interval_t *ri) { * Reset the interval to 'now'. If now is 0, the time will be gathered * automatically. */ -static RD_INLINE RD_UNUSED void rd_interval_reset_to_now (rd_interval_t *ri, - rd_ts_t now) { +static RD_INLINE RD_UNUSED void rd_interval_reset_to_now(rd_interval_t *ri, + rd_ts_t now) { if (!now) now = rd_clock(); @@ -112,7 +112,7 @@ static RD_INLINE RD_UNUSED void rd_interval_reset_to_now (rd_interval_t *ri, /** * Back off the next interval by `backoff_us` microseconds. */ -static RD_INLINE RD_UNUSED void rd_interval_backoff (rd_interval_t *ri, +static RD_INLINE RD_UNUSED void rd_interval_backoff(rd_interval_t *ri, int backoff_us) { ri->ri_backoff = backoff_us; } @@ -122,19 +122,19 @@ static RD_INLINE RD_UNUSED void rd_interval_backoff (rd_interval_t *ri, * If `expedite_us` is 0 the interval will be set to trigger * immedately on the next rd_interval() call. */ -static RD_INLINE RD_UNUSED void rd_interval_expedite (rd_interval_t *ri, - int expedite_us) { - if (!expedite_us) - ri->ri_ts_last = 0; - else - ri->ri_backoff = -expedite_us; +static RD_INLINE RD_UNUSED void rd_interval_expedite(rd_interval_t *ri, + int expedite_us) { + if (!expedite_us) + ri->ri_ts_last = 0; + else + ri->ri_backoff = -expedite_us; } /** * Specifies a fixed interval to use if rd_interval() is called with * `interval_us` set to 0. */ -static RD_INLINE RD_UNUSED void rd_interval_fixed (rd_interval_t *ri, +static RD_INLINE RD_UNUSED void rd_interval_fixed(rd_interval_t *ri, rd_ts_t fixed_us) { ri->ri_fixed = fixed_us; } @@ -144,7 +144,7 @@ static RD_INLINE RD_UNUSED void rd_interval_fixed (rd_interval_t *ri, * A disabled interval will never return a positive value from * rd_interval(). */ -static RD_INLINE RD_UNUSED void rd_interval_disable (rd_interval_t *ri) { +static RD_INLINE RD_UNUSED void rd_interval_disable(rd_interval_t *ri) { /* Set last beat to a large value a long time in the future. */ ri->ri_ts_last = 6000000000000000000LL; /* in about 190000 years */ } @@ -152,7 +152,7 @@ static RD_INLINE RD_UNUSED void rd_interval_disable (rd_interval_t *ri) { /** * Returns true if the interval is disabled. */ -static RD_INLINE RD_UNUSED int rd_interval_disabled (const rd_interval_t *ri) { +static RD_INLINE RD_UNUSED int rd_interval_disabled(const rd_interval_t *ri) { return ri->ri_ts_last == 6000000000000000000LL; } diff --git a/src/rdkafka.c b/src/rdkafka.c index ee0fdb616b..460d3972d2 100644 --- a/src/rdkafka.c +++ b/src/rdkafka.c @@ -75,7 +75,7 @@ #endif -static once_flag rd_kafka_global_init_once = ONCE_FLAG_INIT; +static once_flag rd_kafka_global_init_once = ONCE_FLAG_INIT; static once_flag rd_kafka_global_srand_once = ONCE_FLAG_INIT; /** @@ -97,8 +97,8 @@ rd_kafka_resp_err_t RD_TLS rd_kafka_last_error_code; * This is used in regression tests. */ rd_atomic32_t rd_kafka_thread_cnt_curr; -int rd_kafka_thread_cnt (void) { - return rd_atomic32_get(&rd_kafka_thread_cnt_curr); +int rd_kafka_thread_cnt(void) { + return rd_atomic32_get(&rd_kafka_thread_cnt_curr); } /** @@ -106,12 +106,12 @@ int rd_kafka_thread_cnt (void) { */ char RD_TLS rd_kafka_thread_name[64] = "app"; -void rd_kafka_set_thread_name (const char *fmt, ...) { +void rd_kafka_set_thread_name(const char *fmt, ...) { va_list ap; va_start(ap, fmt); - rd_vsnprintf(rd_kafka_thread_name, sizeof(rd_kafka_thread_name), - fmt, ap); + rd_vsnprintf(rd_kafka_thread_name, sizeof(rd_kafka_thread_name), fmt, + ap); va_end(ap); } @@ -123,7 +123,7 @@ void rd_kafka_set_thread_name (const char *fmt, ...) { */ static char RD_TLS rd_kafka_thread_sysname[16] = "app"; -void rd_kafka_set_thread_sysname (const char *fmt, ...) { +void rd_kafka_set_thread_sysname(const char *fmt, ...) { va_list ap; va_start(ap, fmt); @@ -134,15 +134,12 @@ void rd_kafka_set_thread_sysname (const char *fmt, ...) { thrd_setname(rd_kafka_thread_sysname); } -static void rd_kafka_global_init0 (void) { - cJSON_Hooks json_hooks = { - .malloc_fn = rd_malloc, - .free_fn = rd_free - }; +static void rd_kafka_global_init0(void) { + cJSON_Hooks json_hooks = {.malloc_fn = rd_malloc, .free_fn = rd_free}; mtx_init(&rd_kafka_global_lock, mtx_plain); #if ENABLE_DEVEL - rd_atomic32_init(&rd_kafka_op_cnt, 0); + rd_atomic32_init(&rd_kafka_op_cnt, 0); #endif rd_crc32c_global_init(); #if WITH_SSL @@ -162,7 +159,7 @@ static void rd_kafka_global_init0 (void) { /** * @brief Initialize once per process */ -void rd_kafka_global_init (void) { +void rd_kafka_global_init(void) { call_once(&rd_kafka_global_init_once, rd_kafka_global_init0); } @@ -170,10 +167,10 @@ void rd_kafka_global_init (void) { /** * @brief Seed the PRNG with current_time.milliseconds */ -static void rd_kafka_global_srand (void) { - struct timeval tv; +static void rd_kafka_global_srand(void) { + struct timeval tv; - rd_gettimeofday(&tv, NULL); + rd_gettimeofday(&tv, NULL); srand((unsigned int)(tv.tv_usec / 1000)); } @@ -182,12 +179,12 @@ static void rd_kafka_global_srand (void) { /** * @returns the current number of active librdkafka instances */ -static int rd_kafka_global_cnt_get (void) { - int r; - mtx_lock(&rd_kafka_global_lock); - r = rd_kafka_global_cnt; - mtx_unlock(&rd_kafka_global_lock); - return r; +static int rd_kafka_global_cnt_get(void) { + int r; + mtx_lock(&rd_kafka_global_lock); + r = rd_kafka_global_cnt; + mtx_unlock(&rd_kafka_global_lock); + return r; } @@ -195,34 +192,34 @@ static int rd_kafka_global_cnt_get (void) { * @brief Increase counter for active librdkafka instances. * If this is the first instance the global constructors will be called, if any. */ -static void rd_kafka_global_cnt_incr (void) { - mtx_lock(&rd_kafka_global_lock); - rd_kafka_global_cnt++; - if (rd_kafka_global_cnt == 1) { - rd_kafka_transport_init(); +static void rd_kafka_global_cnt_incr(void) { + mtx_lock(&rd_kafka_global_lock); + rd_kafka_global_cnt++; + if (rd_kafka_global_cnt == 1) { + rd_kafka_transport_init(); #if WITH_SSL rd_kafka_ssl_init(); #endif rd_kafka_sasl_global_init(); - } - mtx_unlock(&rd_kafka_global_lock); + } + mtx_unlock(&rd_kafka_global_lock); } /** * @brief Decrease counter for active librdkafka instances. * If this counter reaches 0 the global destructors will be called, if any. */ -static void rd_kafka_global_cnt_decr (void) { - mtx_lock(&rd_kafka_global_lock); - rd_kafka_assert(NULL, rd_kafka_global_cnt > 0); - rd_kafka_global_cnt--; - if (rd_kafka_global_cnt == 0) { +static void rd_kafka_global_cnt_decr(void) { + mtx_lock(&rd_kafka_global_lock); + rd_kafka_assert(NULL, rd_kafka_global_cnt > 0); + rd_kafka_global_cnt--; + if (rd_kafka_global_cnt == 0) { rd_kafka_sasl_global_term(); #if WITH_SSL rd_kafka_ssl_term(); #endif - } - mtx_unlock(&rd_kafka_global_lock); + } + mtx_unlock(&rd_kafka_global_lock); } @@ -231,25 +228,27 @@ static void rd_kafka_global_cnt_decr (void) { * Returns 0 if all kafka objects are now destroyed, or -1 if the * timeout was reached. */ -int rd_kafka_wait_destroyed (int timeout_ms) { - rd_ts_t timeout = rd_clock() + (timeout_ms * 1000); - - while (rd_kafka_thread_cnt() > 0 || - rd_kafka_global_cnt_get() > 0) { - if (rd_clock() >= timeout) { - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT, - ETIMEDOUT); - return -1; - } - rd_usleep(25000, NULL); /* 25ms */ - } - - return 0; +int rd_kafka_wait_destroyed(int timeout_ms) { + rd_ts_t timeout = rd_clock() + (timeout_ms * 1000); + + while (rd_kafka_thread_cnt() > 0 || rd_kafka_global_cnt_get() > 0) { + if (rd_clock() >= timeout) { + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT, + ETIMEDOUT); + return -1; + } + rd_usleep(25000, NULL); /* 25ms */ + } + + return 0; } -static void rd_kafka_log_buf (const rd_kafka_conf_t *conf, - const rd_kafka_t *rk, int level, int ctx, - const char *fac, const char *buf) { +static void rd_kafka_log_buf(const rd_kafka_conf_t *conf, + const rd_kafka_t *rk, + int level, + int ctx, + const char *fac, + const char *buf) { if (level > conf->log_level) return; else if (rk && conf->log_queue) { @@ -276,52 +275,57 @@ static void rd_kafka_log_buf (const rd_kafka_conf_t *conf, * * @remark conf must be set, but rk may be NULL */ -void rd_kafka_log0 (const rd_kafka_conf_t *conf, - const rd_kafka_t *rk, - const char *extra, int level, int ctx, - const char *fac, const char *fmt, ...) { - char buf[2048]; - va_list ap; - unsigned int elen = 0; - unsigned int of = 0; - - if (level > conf->log_level) - return; - - if (conf->log_thread_name) { - elen = rd_snprintf(buf, sizeof(buf), "[thrd:%s]: ", - rd_kafka_thread_name); - if (unlikely(elen >= sizeof(buf))) - elen = sizeof(buf); - of = elen; - } - - if (extra) { - elen = rd_snprintf(buf+of, sizeof(buf)-of, "%s: ", extra); - if (unlikely(elen >= sizeof(buf)-of)) - elen = sizeof(buf)-of; +void rd_kafka_log0(const rd_kafka_conf_t *conf, + const rd_kafka_t *rk, + const char *extra, + int level, + int ctx, + const char *fac, + const char *fmt, + ...) { + char buf[2048]; + va_list ap; + unsigned int elen = 0; + unsigned int of = 0; + + if (level > conf->log_level) + return; + + if (conf->log_thread_name) { + elen = rd_snprintf(buf, sizeof(buf), + "[thrd:%s]: ", rd_kafka_thread_name); + if (unlikely(elen >= sizeof(buf))) + elen = sizeof(buf); + of = elen; + } + + if (extra) { + elen = rd_snprintf(buf + of, sizeof(buf) - of, "%s: ", extra); + if (unlikely(elen >= sizeof(buf) - of)) + elen = sizeof(buf) - of; of += elen; - } + } - va_start(ap, fmt); - rd_vsnprintf(buf+of, sizeof(buf)-of, fmt, ap); - va_end(ap); + va_start(ap, fmt); + rd_vsnprintf(buf + of, sizeof(buf) - of, fmt, ap); + va_end(ap); rd_kafka_log_buf(conf, rk, level, ctx, fac, buf); } rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token (rd_kafka_t *rk, - const char *token_value, - int64_t md_lifetime_ms, - const char *md_principal_name, - const char **extensions, size_t extension_size, - char *errstr, size_t errstr_size) { +rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, + const char *token_value, + int64_t md_lifetime_ms, + const char *md_principal_name, + const char **extensions, + size_t extension_size, + char *errstr, + size_t errstr_size) { #if WITH_SASL_OAUTHBEARER return rd_kafka_oauthbearer_set_token0( - rk, token_value, - md_lifetime_ms, md_principal_name, extensions, extension_size, - errstr, errstr_size); + rk, token_value, md_lifetime_ms, md_principal_name, extensions, + extension_size, errstr, errstr_size); #else rd_snprintf(errstr, errstr_size, "librdkafka not built with SASL OAUTHBEARER support"); @@ -329,8 +333,8 @@ rd_kafka_oauthbearer_set_token (rd_kafka_t *rk, #endif } -rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token_failure (rd_kafka_t *rk, const char *errstr) { +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, + const char *errstr) { #if WITH_SASL_OAUTHBEARER return rd_kafka_oauthbearer_set_token_failure0(rk, errstr); #else @@ -338,487 +342,437 @@ rd_kafka_oauthbearer_set_token_failure (rd_kafka_t *rk, const char *errstr) { #endif } -void rd_kafka_log_print(const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { - int secs, msecs; - struct timeval tv; - rd_gettimeofday(&tv, NULL); - secs = (int)tv.tv_sec; - msecs = (int)(tv.tv_usec / 1000); - fprintf(stderr, "%%%i|%u.%03u|%s|%s| %s\n", - level, secs, msecs, - fac, rk ? rk->rk_name : "", buf); +void rd_kafka_log_print(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf) { + int secs, msecs; + struct timeval tv; + rd_gettimeofday(&tv, NULL); + secs = (int)tv.tv_sec; + msecs = (int)(tv.tv_usec / 1000); + fprintf(stderr, "%%%i|%u.%03u|%s|%s| %s\n", level, secs, msecs, fac, + rk ? rk->rk_name : "", buf); } -void rd_kafka_log_syslog (const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { +void rd_kafka_log_syslog(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf) { #if WITH_SYSLOG - static int initialized = 0; + static int initialized = 0; - if (!initialized) - openlog("rdkafka", LOG_PID|LOG_CONS, LOG_USER); + if (!initialized) + openlog("rdkafka", LOG_PID | LOG_CONS, LOG_USER); - syslog(level, "%s: %s: %s", fac, rk ? rk->rk_name : "", buf); + syslog(level, "%s: %s: %s", fac, rk ? rk->rk_name : "", buf); #else rd_assert(!*"syslog support not enabled in this build"); #endif } -void rd_kafka_set_logger (rd_kafka_t *rk, - void (*func) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf)) { +void rd_kafka_set_logger(rd_kafka_t *rk, + void (*func)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf)) { #if !WITH_SYSLOG if (func == rd_kafka_log_syslog) rd_assert(!*"syslog support not enabled in this build"); #endif - rk->rk_conf.log_cb = func; + rk->rk_conf.log_cb = func; } -void rd_kafka_set_log_level (rd_kafka_t *rk, int level) { - rk->rk_conf.log_level = level; +void rd_kafka_set_log_level(rd_kafka_t *rk, int level) { + rk->rk_conf.log_level = level; } - - - -static const char *rd_kafka_type2str (rd_kafka_type_t type) { - static const char *types[] = { - [RD_KAFKA_PRODUCER] = "producer", - [RD_KAFKA_CONSUMER] = "consumer", - }; - return types[type]; +static const char *rd_kafka_type2str(rd_kafka_type_t type) { + static const char *types[] = { + [RD_KAFKA_PRODUCER] = "producer", + [RD_KAFKA_CONSUMER] = "consumer", + }; + return types[type]; } -#define _ERR_DESC(ENUM,DESC) \ - [ENUM - RD_KAFKA_RESP_ERR__BEGIN] = { ENUM, &(# ENUM)[18]/*pfx*/, DESC } +#define _ERR_DESC(ENUM, DESC) \ + [ENUM - RD_KAFKA_RESP_ERR__BEGIN] = {ENUM, &(#ENUM)[18] /*pfx*/, DESC} static const struct rd_kafka_err_desc rd_kafka_err_descs[] = { - _ERR_DESC(RD_KAFKA_RESP_ERR__BEGIN, NULL), - _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_MSG, - "Local: Bad message format"), - _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_COMPRESSION, - "Local: Invalid compressed data"), - _ERR_DESC(RD_KAFKA_RESP_ERR__DESTROY, - "Local: Broker handle destroyed"), - _ERR_DESC(RD_KAFKA_RESP_ERR__FAIL, - "Local: Communication failure with broker"), //FIXME: too specific - _ERR_DESC(RD_KAFKA_RESP_ERR__TRANSPORT, - "Local: Broker transport failure"), - _ERR_DESC(RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, - "Local: Critical system resource failure"), - _ERR_DESC(RD_KAFKA_RESP_ERR__RESOLVE, - "Local: Host resolution failure"), - _ERR_DESC(RD_KAFKA_RESP_ERR__MSG_TIMED_OUT, - "Local: Message timed out"), - _ERR_DESC(RD_KAFKA_RESP_ERR__PARTITION_EOF, - "Broker: No more messages"), - _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - "Local: Unknown partition"), - _ERR_DESC(RD_KAFKA_RESP_ERR__FS, - "Local: File or filesystem error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC, - "Local: Unknown topic"), - _ERR_DESC(RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, - "Local: All broker connections are down"), - _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_ARG, - "Local: Invalid argument or configuration"), - _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT, - "Local: Timed out"), - _ERR_DESC(RD_KAFKA_RESP_ERR__QUEUE_FULL, - "Local: Queue full"), - _ERR_DESC(RD_KAFKA_RESP_ERR__ISR_INSUFF, - "Local: ISR count insufficient"), - _ERR_DESC(RD_KAFKA_RESP_ERR__NODE_UPDATE, - "Local: Broker node update"), - _ERR_DESC(RD_KAFKA_RESP_ERR__SSL, - "Local: SSL error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_COORD, - "Local: Waiting for coordinator"), - _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP, - "Local: Unknown group"), - _ERR_DESC(RD_KAFKA_RESP_ERR__IN_PROGRESS, - "Local: Operation in progress"), - _ERR_DESC(RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS, - "Local: Previous operation in progress"), - _ERR_DESC(RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION, - "Local: Existing subscription"), - _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - "Local: Assign partitions"), - _ERR_DESC(RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, - "Local: Revoke partitions"), - _ERR_DESC(RD_KAFKA_RESP_ERR__CONFLICT, - "Local: Conflicting use"), - _ERR_DESC(RD_KAFKA_RESP_ERR__STATE, - "Local: Erroneous state"), - _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL, - "Local: Unknown protocol"), - _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED, - "Local: Not implemented"), - _ERR_DESC(RD_KAFKA_RESP_ERR__AUTHENTICATION, - "Local: Authentication failure"), - _ERR_DESC(RD_KAFKA_RESP_ERR__NO_OFFSET, - "Local: No offset stored"), - _ERR_DESC(RD_KAFKA_RESP_ERR__OUTDATED, - "Local: Outdated"), - _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, - "Local: Timed out in queue"), - _ERR_DESC(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, - "Local: Required feature not supported by broker"), - _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_CACHE, - "Local: Awaiting cache update"), - _ERR_DESC(RD_KAFKA_RESP_ERR__INTR, - "Local: Operation interrupted"), - _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_SERIALIZATION, - "Local: Key serialization error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION, - "Local: Value serialization error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION, - "Local: Key deserialization error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION, - "Local: Value deserialization error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__PARTIAL, - "Local: Partial response"), - _ERR_DESC(RD_KAFKA_RESP_ERR__READ_ONLY, - "Local: Read-only object"), - _ERR_DESC(RD_KAFKA_RESP_ERR__NOENT, - "Local: No such entry"), - _ERR_DESC(RD_KAFKA_RESP_ERR__UNDERFLOW, - "Local: Read underflow"), - _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_TYPE, - "Local: Invalid type"), - _ERR_DESC(RD_KAFKA_RESP_ERR__RETRY, - "Local: Retry operation"), - _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_QUEUE, - "Local: Purged in queue"), - _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_INFLIGHT, - "Local: Purged in flight"), - _ERR_DESC(RD_KAFKA_RESP_ERR__FATAL, - "Local: Fatal error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__INCONSISTENT, - "Local: Inconsistent state"), - _ERR_DESC(RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE, - "Local: Gap-less ordering would not be guaranteed " - "if proceeding"), - _ERR_DESC(RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED, - "Local: Maximum application poll interval " - "(max.poll.interval.ms) exceeded"), - _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_BROKER, - "Local: Unknown broker"), - _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_CONFIGURED, - "Local: Functionality not configured"), - _ERR_DESC(RD_KAFKA_RESP_ERR__FENCED, - "Local: This instance has been fenced by a newer instance"), - _ERR_DESC(RD_KAFKA_RESP_ERR__APPLICATION, - "Local: Application generated error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST, - "Local: Group partition assignment lost"), - _ERR_DESC(RD_KAFKA_RESP_ERR__NOOP, - "Local: No operation performed"), - _ERR_DESC(RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, - "Local: No offset to automatically reset to"), - - _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN, - "Unknown broker error"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NO_ERROR, - "Success"), - _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE, - "Broker: Offset out of range"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG, - "Broker: Invalid message"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, - "Broker: Unknown topic or partition"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE, - "Broker: Invalid message size"), - _ERR_DESC(RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE, - "Broker: Leader not available"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, - "Broker: Not leader for partition"), - _ERR_DESC(RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, - "Broker: Request timed out"), - _ERR_DESC(RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE, - "Broker: Broker not available"), - _ERR_DESC(RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, - "Broker: Replica not available"), - _ERR_DESC(RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, - "Broker: Message size too large"), - _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH, - "Broker: StaleControllerEpochCode"), - _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE, - "Broker: Offset metadata string too large"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION, - "Broker: Broker disconnected before response received"), - _ERR_DESC(RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, - "Broker: Coordinator load in progress"), - _ERR_DESC(RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, - "Broker: Coordinator not available"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - "Broker: Not coordinator"), - _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION, - "Broker: Invalid topic"), - _ERR_DESC(RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE, - "Broker: Message batch larger than configured server " - "segment size"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, - "Broker: Not enough in-sync replicas"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND, - "Broker: Message(s) written to insufficient number of " - "in-sync replicas"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS, - "Broker: Invalid required acks value"), - _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, - "Broker: Specified group generation id is not valid"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL, - "Broker: Inconsistent group protocol"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_GROUP_ID, - "Broker: Invalid group.id"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, - "Broker: Unknown member"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT, - "Broker: Invalid session timeout"), - _ERR_DESC(RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, - "Broker: Group rebalance in progress"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE, - "Broker: Commit offset data size is not valid"), - _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, - "Broker: Topic authorization failed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, - "Broker: Group authorization failed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED, - "Broker: Cluster authorization failed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP, - "Broker: Invalid timestamp"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM, - "Broker: Unsupported SASL mechanism"), - _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE, - "Broker: Request not valid in current SASL state"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION, - "Broker: API version not supported"), - _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS, - "Broker: Topic already exists"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PARTITIONS, - "Broker: Invalid number of partitions"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR, - "Broker: Invalid replication factor"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT, - "Broker: Invalid replica assignment"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_CONFIG, - "Broker: Configuration is invalid"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_CONTROLLER, - "Broker: Not controller for cluster"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUEST, - "Broker: Invalid request"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT, - "Broker: Message format on broker does not support request"), - _ERR_DESC(RD_KAFKA_RESP_ERR_POLICY_VIOLATION, - "Broker: Policy violation"), - _ERR_DESC(RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, - "Broker: Broker received an out of order sequence number"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER, - "Broker: Broker received a duplicate sequence number"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH, - "Broker: Producer attempted an operation with an old epoch"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TXN_STATE, - "Broker: Producer attempted a transactional operation in " - "an invalid state"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING, - "Broker: Producer attempted to use a producer id which is " - "not currently assigned to its transactional id"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT, - "Broker: Transaction timeout is larger than the maximum " - "value allowed by the broker's max.transaction.timeout.ms"), - _ERR_DESC(RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, - "Broker: Producer attempted to update a transaction while " - "another concurrent operation on the same transaction was " - "ongoing"), - _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED, - "Broker: Indicates that the transaction coordinator sending " - "a WriteTxnMarker is no longer the current coordinator for " - "a given producer"), - _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED, - "Broker: Transactional Id authorization failed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_SECURITY_DISABLED, - "Broker: Security features are disabled"), - _ERR_DESC(RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED, - "Broker: Operation not attempted"), - _ERR_DESC(RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, - "Broker: Disk error when trying to access log file on disk"), - _ERR_DESC(RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND, - "Broker: The user-specified log directory is not found " - "in the broker config"), - _ERR_DESC(RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED, - "Broker: SASL Authentication failed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, - "Broker: Unknown Producer Id"), - _ERR_DESC(RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS, - "Broker: Partition reassignment is in progress"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED, - "Broker: Delegation Token feature is not enabled"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND, - "Broker: Delegation Token is not found on server"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH, - "Broker: Specified Principal is not valid Owner/Renewer"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, - "Broker: Delegation Token requests are not allowed on " - "this connection"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED, - "Broker: Delegation Token authorization failed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED, - "Broker: Delegation Token is expired"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE, - "Broker: Supplied principalType is not supported"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP, - "Broker: The group is not empty"), - _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND, - "Broker: The group id does not exist"), - _ERR_DESC(RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND, - "Broker: The fetch session ID was not found"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH, - "Broker: The fetch session epoch is invalid"), - _ERR_DESC(RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND, - "Broker: No matching listener"), - _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED, - "Broker: Topic deletion is disabled"), - _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH, - "Broker: Leader epoch is older than broker epoch"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH, - "Broker: Leader epoch is newer than broker epoch"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE, - "Broker: Unsupported compression type"), - _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH, - "Broker: Broker epoch has changed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE, - "Broker: Leader high watermark is not caught up"), - _ERR_DESC(RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED, - "Broker: Group member needs a valid member ID"), - _ERR_DESC(RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE, - "Broker: Preferred leader was not available"), - _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED, - "Broker: Consumer group has reached maximum size"), - _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID, - "Broker: Static consumer fenced by other consumer with same " - "group.instance.id"), - _ERR_DESC(RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE, - "Broker: Eligible partition leaders are not available"), - _ERR_DESC(RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED, - "Broker: Leader election not needed for topic partition"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS, - "Broker: No partition reassignment is in progress"), - _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC, - "Broker: Deleting offsets of a topic while the consumer " - "group is subscribed to it"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_RECORD, - "Broker: Broker failed to validate record"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, - "Broker: There are unstable offsets that need to be cleared"), - _ERR_DESC(RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED, - "Broker: Throttling quota has been exceeded"), - _ERR_DESC(RD_KAFKA_RESP_ERR_PRODUCER_FENCED, - "Broker: There is a newer producer with the same " - "transactionalId which fences the current one"), - _ERR_DESC(RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND, - "Broker: Request illegally referred to resource that " - "does not exist"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE, - "Broker: Request illegally referred to the same resource " - "twice"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL, - "Broker: Requested credential would not meet criteria for " - "acceptability"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET, - "Broker: Indicates that the either the sender or recipient " - "of a voter-only request is not one of the expected voters"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION, - "Broker: Invalid update version"), - _ERR_DESC(RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED, - "Broker: Unable to update finalized features due to " - "server error"), - _ERR_DESC(RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE, - "Broker: Request principal deserialization failed during " - "forwarding"), - - _ERR_DESC(RD_KAFKA_RESP_ERR__END, NULL) -}; - - -void rd_kafka_get_err_descs (const struct rd_kafka_err_desc **errdescs, - size_t *cntp) { - *errdescs = rd_kafka_err_descs; - *cntp = RD_ARRAYSIZE(rd_kafka_err_descs); + _ERR_DESC(RD_KAFKA_RESP_ERR__BEGIN, NULL), + _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_MSG, "Local: Bad message format"), + _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_COMPRESSION, + "Local: Invalid compressed data"), + _ERR_DESC(RD_KAFKA_RESP_ERR__DESTROY, "Local: Broker handle destroyed"), + _ERR_DESC( + RD_KAFKA_RESP_ERR__FAIL, + "Local: Communication failure with broker"), // FIXME: too specific + _ERR_DESC(RD_KAFKA_RESP_ERR__TRANSPORT, "Local: Broker transport failure"), + _ERR_DESC(RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, + "Local: Critical system resource failure"), + _ERR_DESC(RD_KAFKA_RESP_ERR__RESOLVE, "Local: Host resolution failure"), + _ERR_DESC(RD_KAFKA_RESP_ERR__MSG_TIMED_OUT, "Local: Message timed out"), + _ERR_DESC(RD_KAFKA_RESP_ERR__PARTITION_EOF, "Broker: No more messages"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, "Local: Unknown partition"), + _ERR_DESC(RD_KAFKA_RESP_ERR__FS, "Local: File or filesystem error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC, "Local: Unknown topic"), + _ERR_DESC(RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, + "Local: All broker connections are down"), + _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_ARG, + "Local: Invalid argument or configuration"), + _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT, "Local: Timed out"), + _ERR_DESC(RD_KAFKA_RESP_ERR__QUEUE_FULL, "Local: Queue full"), + _ERR_DESC(RD_KAFKA_RESP_ERR__ISR_INSUFF, "Local: ISR count insufficient"), + _ERR_DESC(RD_KAFKA_RESP_ERR__NODE_UPDATE, "Local: Broker node update"), + _ERR_DESC(RD_KAFKA_RESP_ERR__SSL, "Local: SSL error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_COORD, "Local: Waiting for coordinator"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP, "Local: Unknown group"), + _ERR_DESC(RD_KAFKA_RESP_ERR__IN_PROGRESS, "Local: Operation in progress"), + _ERR_DESC(RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS, + "Local: Previous operation in progress"), + _ERR_DESC(RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION, + "Local: Existing subscription"), + _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, "Local: Assign partitions"), + _ERR_DESC(RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, "Local: Revoke partitions"), + _ERR_DESC(RD_KAFKA_RESP_ERR__CONFLICT, "Local: Conflicting use"), + _ERR_DESC(RD_KAFKA_RESP_ERR__STATE, "Local: Erroneous state"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL, "Local: Unknown protocol"), + _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED, "Local: Not implemented"), + _ERR_DESC(RD_KAFKA_RESP_ERR__AUTHENTICATION, + "Local: Authentication failure"), + _ERR_DESC(RD_KAFKA_RESP_ERR__NO_OFFSET, "Local: No offset stored"), + _ERR_DESC(RD_KAFKA_RESP_ERR__OUTDATED, "Local: Outdated"), + _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, "Local: Timed out in queue"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, + "Local: Required feature not supported by broker"), + _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_CACHE, "Local: Awaiting cache update"), + _ERR_DESC(RD_KAFKA_RESP_ERR__INTR, "Local: Operation interrupted"), + _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_SERIALIZATION, + "Local: Key serialization error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION, + "Local: Value serialization error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION, + "Local: Key deserialization error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION, + "Local: Value deserialization error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__PARTIAL, "Local: Partial response"), + _ERR_DESC(RD_KAFKA_RESP_ERR__READ_ONLY, "Local: Read-only object"), + _ERR_DESC(RD_KAFKA_RESP_ERR__NOENT, "Local: No such entry"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNDERFLOW, "Local: Read underflow"), + _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_TYPE, "Local: Invalid type"), + _ERR_DESC(RD_KAFKA_RESP_ERR__RETRY, "Local: Retry operation"), + _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_QUEUE, "Local: Purged in queue"), + _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_INFLIGHT, "Local: Purged in flight"), + _ERR_DESC(RD_KAFKA_RESP_ERR__FATAL, "Local: Fatal error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__INCONSISTENT, "Local: Inconsistent state"), + _ERR_DESC(RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE, + "Local: Gap-less ordering would not be guaranteed " + "if proceeding"), + _ERR_DESC(RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED, + "Local: Maximum application poll interval " + "(max.poll.interval.ms) exceeded"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_BROKER, "Local: Unknown broker"), + _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_CONFIGURED, + "Local: Functionality not configured"), + _ERR_DESC(RD_KAFKA_RESP_ERR__FENCED, + "Local: This instance has been fenced by a newer instance"), + _ERR_DESC(RD_KAFKA_RESP_ERR__APPLICATION, + "Local: Application generated error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST, + "Local: Group partition assignment lost"), + _ERR_DESC(RD_KAFKA_RESP_ERR__NOOP, "Local: No operation performed"), + _ERR_DESC(RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, + "Local: No offset to automatically reset to"), + + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN, "Unknown broker error"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NO_ERROR, "Success"), + _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE, + "Broker: Offset out of range"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG, "Broker: Invalid message"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + "Broker: Unknown topic or partition"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE, + "Broker: Invalid message size"), + _ERR_DESC(RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE, + "Broker: Leader not available"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, + "Broker: Not leader for partition"), + _ERR_DESC(RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, "Broker: Request timed out"), + _ERR_DESC(RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE, + "Broker: Broker not available"), + _ERR_DESC(RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, + "Broker: Replica not available"), + _ERR_DESC(RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, + "Broker: Message size too large"), + _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH, + "Broker: StaleControllerEpochCode"), + _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE, + "Broker: Offset metadata string too large"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION, + "Broker: Broker disconnected before response received"), + _ERR_DESC(RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, + "Broker: Coordinator load in progress"), + _ERR_DESC(RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + "Broker: Coordinator not available"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_COORDINATOR, "Broker: Not coordinator"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION, "Broker: Invalid topic"), + _ERR_DESC(RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE, + "Broker: Message batch larger than configured server " + "segment size"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, + "Broker: Not enough in-sync replicas"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND, + "Broker: Message(s) written to insufficient number of " + "in-sync replicas"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS, + "Broker: Invalid required acks value"), + _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + "Broker: Specified group generation id is not valid"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL, + "Broker: Inconsistent group protocol"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_GROUP_ID, "Broker: Invalid group.id"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, "Broker: Unknown member"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT, + "Broker: Invalid session timeout"), + _ERR_DESC(RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + "Broker: Group rebalance in progress"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE, + "Broker: Commit offset data size is not valid"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, + "Broker: Topic authorization failed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, + "Broker: Group authorization failed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED, + "Broker: Cluster authorization failed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP, "Broker: Invalid timestamp"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM, + "Broker: Unsupported SASL mechanism"), + _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE, + "Broker: Request not valid in current SASL state"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION, + "Broker: API version not supported"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS, + "Broker: Topic already exists"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PARTITIONS, + "Broker: Invalid number of partitions"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR, + "Broker: Invalid replication factor"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT, + "Broker: Invalid replica assignment"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_CONFIG, + "Broker: Configuration is invalid"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_CONTROLLER, + "Broker: Not controller for cluster"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUEST, "Broker: Invalid request"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT, + "Broker: Message format on broker does not support request"), + _ERR_DESC(RD_KAFKA_RESP_ERR_POLICY_VIOLATION, "Broker: Policy violation"), + _ERR_DESC(RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, + "Broker: Broker received an out of order sequence number"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER, + "Broker: Broker received a duplicate sequence number"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH, + "Broker: Producer attempted an operation with an old epoch"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TXN_STATE, + "Broker: Producer attempted a transactional operation in " + "an invalid state"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING, + "Broker: Producer attempted to use a producer id which is " + "not currently assigned to its transactional id"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT, + "Broker: Transaction timeout is larger than the maximum " + "value allowed by the broker's max.transaction.timeout.ms"), + _ERR_DESC(RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + "Broker: Producer attempted to update a transaction while " + "another concurrent operation on the same transaction was " + "ongoing"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED, + "Broker: Indicates that the transaction coordinator sending " + "a WriteTxnMarker is no longer the current coordinator for " + "a given producer"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED, + "Broker: Transactional Id authorization failed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_SECURITY_DISABLED, + "Broker: Security features are disabled"), + _ERR_DESC(RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED, + "Broker: Operation not attempted"), + _ERR_DESC(RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, + "Broker: Disk error when trying to access log file on disk"), + _ERR_DESC(RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND, + "Broker: The user-specified log directory is not found " + "in the broker config"), + _ERR_DESC(RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED, + "Broker: SASL Authentication failed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, + "Broker: Unknown Producer Id"), + _ERR_DESC(RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS, + "Broker: Partition reassignment is in progress"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED, + "Broker: Delegation Token feature is not enabled"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND, + "Broker: Delegation Token is not found on server"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH, + "Broker: Specified Principal is not valid Owner/Renewer"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, + "Broker: Delegation Token requests are not allowed on " + "this connection"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED, + "Broker: Delegation Token authorization failed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED, + "Broker: Delegation Token is expired"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE, + "Broker: Supplied principalType is not supported"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP, + "Broker: The group is not empty"), + _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND, + "Broker: The group id does not exist"), + _ERR_DESC(RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND, + "Broker: The fetch session ID was not found"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH, + "Broker: The fetch session epoch is invalid"), + _ERR_DESC(RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND, + "Broker: No matching listener"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED, + "Broker: Topic deletion is disabled"), + _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH, + "Broker: Leader epoch is older than broker epoch"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH, + "Broker: Leader epoch is newer than broker epoch"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE, + "Broker: Unsupported compression type"), + _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH, + "Broker: Broker epoch has changed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE, + "Broker: Leader high watermark is not caught up"), + _ERR_DESC(RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED, + "Broker: Group member needs a valid member ID"), + _ERR_DESC(RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE, + "Broker: Preferred leader was not available"), + _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED, + "Broker: Consumer group has reached maximum size"), + _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID, + "Broker: Static consumer fenced by other consumer with same " + "group.instance.id"), + _ERR_DESC(RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE, + "Broker: Eligible partition leaders are not available"), + _ERR_DESC(RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED, + "Broker: Leader election not needed for topic partition"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS, + "Broker: No partition reassignment is in progress"), + _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC, + "Broker: Deleting offsets of a topic while the consumer " + "group is subscribed to it"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_RECORD, + "Broker: Broker failed to validate record"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + "Broker: There are unstable offsets that need to be cleared"), + _ERR_DESC(RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED, + "Broker: Throttling quota has been exceeded"), + _ERR_DESC(RD_KAFKA_RESP_ERR_PRODUCER_FENCED, + "Broker: There is a newer producer with the same " + "transactionalId which fences the current one"), + _ERR_DESC(RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND, + "Broker: Request illegally referred to resource that " + "does not exist"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE, + "Broker: Request illegally referred to the same resource " + "twice"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL, + "Broker: Requested credential would not meet criteria for " + "acceptability"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET, + "Broker: Indicates that the either the sender or recipient " + "of a voter-only request is not one of the expected voters"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION, + "Broker: Invalid update version"), + _ERR_DESC(RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED, + "Broker: Unable to update finalized features due to " + "server error"), + _ERR_DESC(RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE, + "Broker: Request principal deserialization failed during " + "forwarding"), + + _ERR_DESC(RD_KAFKA_RESP_ERR__END, NULL)}; + + +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, + size_t *cntp) { + *errdescs = rd_kafka_err_descs; + *cntp = RD_ARRAYSIZE(rd_kafka_err_descs); } -const char *rd_kafka_err2str (rd_kafka_resp_err_t err) { - static RD_TLS char ret[32]; - int idx = err - RD_KAFKA_RESP_ERR__BEGIN; +const char *rd_kafka_err2str(rd_kafka_resp_err_t err) { + static RD_TLS char ret[32]; + int idx = err - RD_KAFKA_RESP_ERR__BEGIN; - if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN || - err >= RD_KAFKA_RESP_ERR_END_ALL || - !rd_kafka_err_descs[idx].desc)) { - rd_snprintf(ret, sizeof(ret), "Err-%i?", err); - return ret; - } + if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN || + err >= RD_KAFKA_RESP_ERR_END_ALL || + !rd_kafka_err_descs[idx].desc)) { + rd_snprintf(ret, sizeof(ret), "Err-%i?", err); + return ret; + } - return rd_kafka_err_descs[idx].desc; + return rd_kafka_err_descs[idx].desc; } -const char *rd_kafka_err2name (rd_kafka_resp_err_t err) { - static RD_TLS char ret[32]; - int idx = err - RD_KAFKA_RESP_ERR__BEGIN; +const char *rd_kafka_err2name(rd_kafka_resp_err_t err) { + static RD_TLS char ret[32]; + int idx = err - RD_KAFKA_RESP_ERR__BEGIN; - if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN || - err >= RD_KAFKA_RESP_ERR_END_ALL || - !rd_kafka_err_descs[idx].desc)) { - rd_snprintf(ret, sizeof(ret), "ERR_%i?", err); - return ret; - } + if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN || + err >= RD_KAFKA_RESP_ERR_END_ALL || + !rd_kafka_err_descs[idx].desc)) { + rd_snprintf(ret, sizeof(ret), "ERR_%i?", err); + return ret; + } - return rd_kafka_err_descs[idx].name; + return rd_kafka_err_descs[idx].name; } -rd_kafka_resp_err_t rd_kafka_last_error (void) { - return rd_kafka_last_error_code; +rd_kafka_resp_err_t rd_kafka_last_error(void) { + return rd_kafka_last_error_code; } -rd_kafka_resp_err_t rd_kafka_errno2err (int errnox) { - switch (errnox) - { - case EINVAL: - return RD_KAFKA_RESP_ERR__INVALID_ARG; +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox) { + switch (errnox) { + case EINVAL: + return RD_KAFKA_RESP_ERR__INVALID_ARG; case EBUSY: return RD_KAFKA_RESP_ERR__CONFLICT; - case ENOENT: - return RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; + case ENOENT: + return RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; - case ESRCH: - return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + case ESRCH: + return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - case ETIMEDOUT: - return RD_KAFKA_RESP_ERR__TIMED_OUT; + case ETIMEDOUT: + return RD_KAFKA_RESP_ERR__TIMED_OUT; - case EMSGSIZE: - return RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE; + case EMSGSIZE: + return RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE; - case ENOBUFS: - return RD_KAFKA_RESP_ERR__QUEUE_FULL; + case ENOBUFS: + return RD_KAFKA_RESP_ERR__QUEUE_FULL; case ECANCELED: return RD_KAFKA_RESP_ERR__FATAL; - default: - return RD_KAFKA_RESP_ERR__FAIL; - } + default: + return RD_KAFKA_RESP_ERR__FAIL; + } } -rd_kafka_resp_err_t rd_kafka_fatal_error (rd_kafka_t *rk, - char *errstr, size_t errstr_size) { +rd_kafka_resp_err_t +rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size) { rd_kafka_resp_err_t err; if (unlikely((err = rd_atomic32_get(&rk->rk_fatal.err)))) { @@ -843,9 +797,11 @@ rd_kafka_resp_err_t rd_kafka_fatal_error (rd_kafka_t *rk, * @locality any * @locks none */ -int rd_kafka_set_fatal_error0 (rd_kafka_t *rk, rd_dolock_t do_lock, - rd_kafka_resp_err_t err, - const char *fmt, ...) { +int rd_kafka_set_fatal_error0(rd_kafka_t *rk, + rd_dolock_t do_lock, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { va_list ap; char buf[512]; @@ -877,12 +833,10 @@ int rd_kafka_set_fatal_error0 (rd_kafka_t *rk, rd_dolock_t do_lock, * will be automatically logged, and this check here * prevents us from duplicate logs. */ if (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_ERROR) - rd_kafka_log(rk, LOG_EMERG, "FATAL", - "Fatal error: %s: %s", + rd_kafka_log(rk, LOG_EMERG, "FATAL", "Fatal error: %s: %s", rd_kafka_err2str(err), rk->rk_fatal.errstr); else - rd_kafka_dbg(rk, ALL, "FATAL", - "Fatal error: %s: %s", + rd_kafka_dbg(rk, ALL, "FATAL", "Fatal error: %s: %s", rd_kafka_err2str(err), rk->rk_fatal.errstr); /* Indicate to the application that a fatal error was raised, @@ -893,16 +847,15 @@ int rd_kafka_set_fatal_error0 (rd_kafka_t *rk, rd_dolock_t do_lock, * while for all other client types (the producer) we propagate to * the standard error handler (typically error_cb). */ if (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_cgrp) - rd_kafka_consumer_err(rk->rk_cgrp->rkcg_q, RD_KAFKA_NODEID_UA, - RD_KAFKA_RESP_ERR__FATAL, 0, NULL, NULL, - RD_KAFKA_OFFSET_INVALID, - "Fatal error: %s: %s", - rd_kafka_err2str(err), - rk->rk_fatal.errstr); + rd_kafka_consumer_err( + rk->rk_cgrp->rkcg_q, RD_KAFKA_NODEID_UA, + RD_KAFKA_RESP_ERR__FATAL, 0, NULL, NULL, + RD_KAFKA_OFFSET_INVALID, "Fatal error: %s: %s", + rd_kafka_err2str(err), rk->rk_fatal.errstr); else rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__FATAL, - "Fatal error: %s: %s", - rd_kafka_err2str(err), rk->rk_fatal.errstr); + "Fatal error: %s: %s", rd_kafka_err2str(err), + rk->rk_fatal.errstr); /* Tell rdkafka main thread to purge producer queues, but not @@ -914,8 +867,8 @@ int rd_kafka_set_fatal_error0 (rd_kafka_t *rk, rd_dolock_t do_lock, * OP_PURGE request. */ if (rk->rk_type == RD_KAFKA_PRODUCER) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_PURGE); - rko->rko_u.purge.flags = RD_KAFKA_PURGE_F_QUEUE| - RD_KAFKA_PURGE_F_NON_BLOCKING; + rko->rko_u.purge.flags = + RD_KAFKA_PURGE_F_QUEUE | RD_KAFKA_PURGE_F_NON_BLOCKING; rd_kafka_q_enq(rk->rk_ops, rko); } @@ -923,9 +876,9 @@ int rd_kafka_set_fatal_error0 (rd_kafka_t *rk, rd_dolock_t do_lock, } -rd_kafka_resp_err_t -rd_kafka_test_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason) { if (!rd_kafka_set_fatal_error(rk, err, "test_fatal_error: %s", reason)) return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS; else @@ -939,7 +892,7 @@ rd_kafka_test_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, * * @locality application thread */ -void rd_kafka_destroy_final (rd_kafka_t *rk) { +void rd_kafka_destroy_final(rd_kafka_t *rk) { rd_kafka_assert(rk, rd_kafka_terminating(rk)); @@ -957,8 +910,7 @@ void rd_kafka_destroy_final (rd_kafka_t *rk) { /* Destroy cgrp */ if (rk->rk_cgrp) { - rd_kafka_dbg(rk, GENERIC, "TERMINATE", - "Destroying cgrp"); + rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying cgrp"); /* Reset queue forwarding (rep -> cgrp) */ rd_kafka_q_fwd_set(rk->rk_rep, NULL); rd_kafka_cgrp_destroy_final(rk->rk_cgrp); @@ -972,9 +924,9 @@ void rd_kafka_destroy_final (rd_kafka_t *rk) { rd_kafka_q_destroy(rk->rk_consumer.q); } - /* Purge op-queues */ - rd_kafka_q_destroy_owner(rk->rk_rep); - rd_kafka_q_destroy_owner(rk->rk_ops); + /* Purge op-queues */ + rd_kafka_q_destroy_owner(rk->rk_rep); + rd_kafka_q_destroy_owner(rk->rk_ops); #if WITH_SSL if (rk->rk_conf.ssl.ctx) { @@ -993,61 +945,57 @@ void rd_kafka_destroy_final (rd_kafka_t *rk) { } if (rk->rk_type == RD_KAFKA_PRODUCER) { - cnd_destroy(&rk->rk_curr_msgs.cnd); - mtx_destroy(&rk->rk_curr_msgs.lock); - } + cnd_destroy(&rk->rk_curr_msgs.cnd); + mtx_destroy(&rk->rk_curr_msgs.lock); + } if (rk->rk_fatal.errstr) { rd_free(rk->rk_fatal.errstr); rk->rk_fatal.errstr = NULL; } - cnd_destroy(&rk->rk_broker_state_change_cnd); - mtx_destroy(&rk->rk_broker_state_change_lock); + cnd_destroy(&rk->rk_broker_state_change_cnd); + mtx_destroy(&rk->rk_broker_state_change_lock); mtx_destroy(&rk->rk_suppress.sparse_connect_lock); cnd_destroy(&rk->rk_init_cnd); mtx_destroy(&rk->rk_init_lock); - if (rk->rk_full_metadata) - rd_kafka_metadata_destroy(rk->rk_full_metadata); + if (rk->rk_full_metadata) + rd_kafka_metadata_destroy(rk->rk_full_metadata); rd_kafkap_str_destroy(rk->rk_client_id); rd_kafkap_str_destroy(rk->rk_group_id); rd_kafkap_str_destroy(rk->rk_eos.transactional_id); - rd_kafka_anyconf_destroy(_RK_GLOBAL, &rk->rk_conf); + rd_kafka_anyconf_destroy(_RK_GLOBAL, &rk->rk_conf); rd_list_destroy(&rk->rk_broker_by_id); - rwlock_destroy(&rk->rk_lock); + rwlock_destroy(&rk->rk_lock); - rd_free(rk); - rd_kafka_global_cnt_decr(); + rd_free(rk); + rd_kafka_global_cnt_decr(); } -static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { +static void rd_kafka_destroy_app(rd_kafka_t *rk, int flags) { thrd_t thrd; #ifndef _WIN32 - int term_sig = rk->rk_conf.term_sig; + int term_sig = rk->rk_conf.term_sig; #endif int res; char flags_str[256]; static const char *rd_kafka_destroy_flags_names[] = { - "Terminate", - "DestroyCalled", - "Immediate", - "NoConsumerClose", - NULL - }; + "Terminate", "DestroyCalled", "Immediate", "NoConsumerClose", NULL}; /* Fatal errors and _F_IMMEDIATE also sets .._NO_CONSUMER_CLOSE */ if (flags & RD_KAFKA_DESTROY_F_IMMEDIATE || rd_kafka_fatal_error_code(rk)) flags |= RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE; - rd_flags2str(flags_str, sizeof(flags_str), - rd_kafka_destroy_flags_names, flags); - rd_kafka_dbg(rk, ALL, "DESTROY", "Terminating instance " + rd_flags2str(flags_str, sizeof(flags_str), rd_kafka_destroy_flags_names, + flags); + rd_kafka_dbg(rk, ALL, "DESTROY", + "Terminating instance " "(destroy flags %s (0x%x))", flags ? flags_str : "none", flags); @@ -1064,12 +1012,13 @@ static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { if (tot_cnt > 0) rd_kafka_log(rk, LOG_WARNING, "TERMINATE", "Producer terminating with %u message%s " - "(%"PRIusz" byte%s) still in " + "(%" PRIusz + " byte%s) still in " "queue or transit: " "use flush() to wait for " "outstanding message delivery", - tot_cnt, tot_cnt > 1 ? "s" : "", - tot_size, tot_size > 1 ? "s" : ""); + tot_cnt, tot_cnt > 1 ? "s" : "", tot_size, + tot_size > 1 ? "s" : ""); } /* Make sure destroy is not called from a librdkafka thread @@ -1090,7 +1039,7 @@ static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { /* Before signaling for general termination, set the destroy * flags to hint cgrp how to shut down. */ rd_atomic32_set(&rk->rk_terminate, - flags|RD_KAFKA_DESTROY_F_DESTROY_CALLED); + flags | RD_KAFKA_DESTROY_F_DESTROY_CALLED); /* The legacy/simple consumer lacks an API to close down the consumer*/ if (rk->rk_cgrp) { @@ -1100,7 +1049,8 @@ static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { } /* With the consumer closed, terminate the rest of librdkafka. */ - rd_atomic32_set(&rk->rk_terminate, flags|RD_KAFKA_DESTROY_F_TERMINATE); + rd_atomic32_set(&rk->rk_terminate, + flags | RD_KAFKA_DESTROY_F_TERMINATE); rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Interrupting timers"); rd_kafka_wrlock(rk); @@ -1116,7 +1066,7 @@ static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { #ifndef _WIN32 /* Interrupt main kafka thread to speed up termination. */ - if (term_sig) { + if (term_sig) { rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Sending thread kill signal %d", term_sig); pthread_kill(thrd, term_sig); @@ -1126,8 +1076,7 @@ static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { if (rd_kafka_destroy_flags_check(rk, RD_KAFKA_DESTROY_F_IMMEDIATE)) return; /* FIXME: thread resource leak */ - rd_kafka_dbg(rk, GENERIC, "TERMINATE", - "Joining internal main thread"); + rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Joining internal main thread"); if (thrd_join(thrd, &res) != thrd_success) rd_kafka_log(rk, LOG_ERR, "DESTROY", @@ -1141,11 +1090,11 @@ static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { /* NOTE: Must only be called by application. * librdkafka itself must use rd_kafka_destroy0(). */ -void rd_kafka_destroy (rd_kafka_t *rk) { +void rd_kafka_destroy(rd_kafka_t *rk) { rd_kafka_destroy_app(rk, 0); } -void rd_kafka_destroy_flags (rd_kafka_t *rk, int flags) { +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags) { rd_kafka_destroy_app(rk, flags); } @@ -1155,9 +1104,9 @@ void rd_kafka_destroy_flags (rd_kafka_t *rk, int flags) { * * Locality: rdkafka main thread or application thread during rd_kafka_new() */ -static void rd_kafka_destroy_internal (rd_kafka_t *rk) { - rd_kafka_topic_t *rkt, *rkt_tmp; - rd_kafka_broker_t *rkb, *rkb_tmp; +static void rd_kafka_destroy_internal(rd_kafka_t *rk) { + rd_kafka_topic_t *rkt, *rkt_tmp; + rd_kafka_broker_t *rkb, *rkb_tmp; rd_list_t wait_thrds; thrd_t *thrd; int i; @@ -1185,33 +1134,32 @@ static void rd_kafka_destroy_internal (rd_kafka_t *rk) { /* Call on_destroy() interceptors */ rd_kafka_interceptors_on_destroy(rk); - /* Brokers pick up on rk_terminate automatically. */ + /* Brokers pick up on rk_terminate automatically. */ /* List of (broker) threads to join to synchronize termination */ rd_list_init(&wait_thrds, rd_atomic32_get(&rk->rk_broker_cnt), NULL); - rd_kafka_wrlock(rk); + rd_kafka_wrlock(rk); rd_kafka_dbg(rk, ALL, "DESTROY", "Removing all topics"); - /* Decommission all topics */ - TAILQ_FOREACH_SAFE(rkt, &rk->rk_topics, rkt_link, rkt_tmp) { - rd_kafka_wrunlock(rk); - rd_kafka_topic_partitions_remove(rkt); - rd_kafka_wrlock(rk); - } + /* Decommission all topics */ + TAILQ_FOREACH_SAFE(rkt, &rk->rk_topics, rkt_link, rkt_tmp) { + rd_kafka_wrunlock(rk); + rd_kafka_topic_partitions_remove(rkt); + rd_kafka_wrlock(rk); + } /* Decommission brokers. * Broker thread holds a refcount and detects when broker refcounts * reaches 1 and then decommissions itself. */ TAILQ_FOREACH_SAFE(rkb, &rk->rk_brokers, rkb_link, rkb_tmp) { /* Add broker's thread to wait_thrds list for later joining */ - thrd = rd_malloc(sizeof(*thrd)); + thrd = rd_malloc(sizeof(*thrd)); *thrd = rkb->rkb_thread; rd_list_add(&wait_thrds, thrd); rd_kafka_wrunlock(rk); - rd_kafka_dbg(rk, BROKER, "DESTROY", - "Sending TERMINATE to %s", + rd_kafka_dbg(rk, BROKER, "DESTROY", "Sending TERMINATE to %s", rd_kafka_broker_name(rkb)); /* Send op to trigger queue/io wake-up. * The op itself is (likely) ignored by the broker thread. */ @@ -1221,7 +1169,7 @@ static void rd_kafka_destroy_internal (rd_kafka_t *rk) { #ifndef _WIN32 /* Interrupt IO threads to speed up termination. */ if (rk->rk_conf.term_sig) - pthread_kill(rkb->rkb_thread, rk->rk_conf.term_sig); + pthread_kill(rkb->rkb_thread, rk->rk_conf.term_sig); #endif rd_kafka_broker_destroy(rkb); @@ -1247,7 +1195,7 @@ static void rd_kafka_destroy_internal (rd_kafka_t *rk) { * and these brokers are destroyed below. So to avoid a circular * dependency refcnt deadlock we first purge the cache here * and destroy it after the brokers are destroyed. */ - rd_kafka_metadata_cache_purge(rk, rd_true/*observers too*/); + rd_kafka_metadata_cache_purge(rk, rd_true /*observers too*/); rd_kafka_wrunlock(rk); @@ -1261,16 +1209,15 @@ static void rd_kafka_destroy_internal (rd_kafka_t *rk) { rd_kafka_q_disable(rk->rk_consumer.q); } - rd_kafka_dbg(rk, GENERIC, "TERMINATE", - "Purging reply queue"); + rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Purging reply queue"); - /* Purge op-queue */ + /* Purge op-queue */ rd_kafka_q_disable(rk->rk_rep); - rd_kafka_q_purge(rk->rk_rep); + rd_kafka_q_purge(rk->rk_rep); - /* Loose our special reference to the internal broker. */ + /* Loose our special reference to the internal broker. */ mtx_lock(&rk->rk_internal_rkb_lock); - if ((rkb = rk->rk_internal_rkb)) { + if ((rkb = rk->rk_internal_rkb)) { rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Decommissioning internal broker"); @@ -1279,17 +1226,17 @@ static void rd_kafka_destroy_internal (rd_kafka_t *rk) { rd_kafka_op_new(RD_KAFKA_OP_TERMINATE)); rk->rk_internal_rkb = NULL; - thrd = rd_malloc(sizeof(*thrd)); - *thrd = rkb->rkb_thread; + thrd = rd_malloc(sizeof(*thrd)); + *thrd = rkb->rkb_thread; rd_list_add(&wait_thrds, thrd); } mtx_unlock(&rk->rk_internal_rkb_lock); - if (rkb) - rd_kafka_broker_destroy(rkb); + if (rkb) + rd_kafka_broker_destroy(rkb); - rd_kafka_dbg(rk, GENERIC, "TERMINATE", - "Join %d broker thread(s)", rd_list_cnt(&wait_thrds)); + rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Join %d broker thread(s)", + rd_list_cnt(&wait_thrds)); /* Join broker threads */ RD_LIST_FOREACH(thrd, &wait_thrds, i) { @@ -1326,25 +1273,26 @@ static void rd_kafka_destroy_internal (rd_kafka_t *rk) { * @brief Buffer state for stats emitter */ struct _stats_emit { - char *buf; /* Pointer to allocated buffer */ - size_t size; /* Current allocated size of buf */ - size_t of; /* Current write-offset in buf */ + char *buf; /* Pointer to allocated buffer */ + size_t size; /* Current allocated size of buf */ + size_t of; /* Current write-offset in buf */ }; /* Stats buffer printf. Requires a (struct _stats_emit *)st variable in the * current scope. */ -#define _st_printf(...) do { \ - ssize_t _r; \ - ssize_t _rem = st->size - st->of; \ - _r = rd_snprintf(st->buf+st->of, _rem, __VA_ARGS__); \ - if (_r >= _rem) { \ - st->size *= 2; \ - _rem = st->size - st->of; \ - st->buf = rd_realloc(st->buf, st->size); \ - _r = rd_snprintf(st->buf+st->of, _rem, __VA_ARGS__); \ - } \ - st->of += _r; \ +#define _st_printf(...) \ + do { \ + ssize_t _r; \ + ssize_t _rem = st->size - st->of; \ + _r = rd_snprintf(st->buf + st->of, _rem, __VA_ARGS__); \ + if (_r >= _rem) { \ + st->size *= 2; \ + _rem = st->size - st->of; \ + st->buf = rd_realloc(st->buf, st->size); \ + _r = rd_snprintf(st->buf + st->of, _rem, __VA_ARGS__); \ + } \ + st->of += _r; \ } while (0) struct _stats_total { @@ -1363,57 +1311,60 @@ struct _stats_total { /** * @brief Rollover and emit an average window. */ -static RD_INLINE void rd_kafka_stats_emit_avg (struct _stats_emit *st, - const char *name, - rd_avg_t *src_avg) { +static RD_INLINE void rd_kafka_stats_emit_avg(struct _stats_emit *st, + const char *name, + rd_avg_t *src_avg) { rd_avg_t avg; rd_avg_rollover(&avg, src_avg); _st_printf( - "\"%s\": {" - " \"min\":%"PRId64"," - " \"max\":%"PRId64"," - " \"avg\":%"PRId64"," - " \"sum\":%"PRId64"," - " \"stddev\": %"PRId64"," - " \"p50\": %"PRId64"," - " \"p75\": %"PRId64"," - " \"p90\": %"PRId64"," - " \"p95\": %"PRId64"," - " \"p99\": %"PRId64"," - " \"p99_99\": %"PRId64"," - " \"outofrange\": %"PRId64"," - " \"hdrsize\": %"PRId32"," - " \"cnt\":%i " - "}, ", - name, - avg.ra_v.minv, - avg.ra_v.maxv, - avg.ra_v.avg, - avg.ra_v.sum, - (int64_t)avg.ra_hist.stddev, - avg.ra_hist.p50, - avg.ra_hist.p75, - avg.ra_hist.p90, - avg.ra_hist.p95, - avg.ra_hist.p99, - avg.ra_hist.p99_99, - avg.ra_hist.oor, - avg.ra_hist.hdrsize, - avg.ra_v.cnt); + "\"%s\": {" + " \"min\":%" PRId64 + "," + " \"max\":%" PRId64 + "," + " \"avg\":%" PRId64 + "," + " \"sum\":%" PRId64 + "," + " \"stddev\": %" PRId64 + "," + " \"p50\": %" PRId64 + "," + " \"p75\": %" PRId64 + "," + " \"p90\": %" PRId64 + "," + " \"p95\": %" PRId64 + "," + " \"p99\": %" PRId64 + "," + " \"p99_99\": %" PRId64 + "," + " \"outofrange\": %" PRId64 + "," + " \"hdrsize\": %" PRId32 + "," + " \"cnt\":%i " + "}, ", + name, avg.ra_v.minv, avg.ra_v.maxv, avg.ra_v.avg, avg.ra_v.sum, + (int64_t)avg.ra_hist.stddev, avg.ra_hist.p50, avg.ra_hist.p75, + avg.ra_hist.p90, avg.ra_hist.p95, avg.ra_hist.p99, + avg.ra_hist.p99_99, avg.ra_hist.oor, avg.ra_hist.hdrsize, + avg.ra_v.cnt); rd_avg_destroy(&avg); } /** * Emit stats for toppar */ -static RD_INLINE void rd_kafka_stats_emit_toppar (struct _stats_emit *st, - struct _stats_total *total, - rd_kafka_toppar_t *rktp, - int first) { +static RD_INLINE void rd_kafka_stats_emit_toppar(struct _stats_emit *st, + struct _stats_total *total, + rd_kafka_toppar_t *rktp, + int first) { rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; int64_t end_offset; - int64_t consumer_lag = -1; + int64_t consumer_lag = -1; int64_t consumer_lag_stored = -1; struct offset_stats offs; int32_t broker_id = -1; @@ -1430,8 +1381,8 @@ static RD_INLINE void rd_kafka_stats_emit_toppar (struct _stats_emit *st, offs = rktp->rktp_offsets_fin; end_offset = (rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED) - ? rktp->rktp_ls_offset - : rktp->rktp_hi_offset; + ? rktp->rktp_ls_offset + : rktp->rktp_hi_offset; /* Calculate consumer_lag by using the highest offset * of stored_offset (the last message passed to application + 1, or @@ -1445,93 +1396,111 @@ static RD_INLINE void rd_kafka_stats_emit_toppar (struct _stats_emit *st, if (rktp->rktp_stored_offset >= 0 && rktp->rktp_stored_offset <= end_offset) consumer_lag_stored = - end_offset - rktp->rktp_stored_offset; + end_offset - rktp->rktp_stored_offset; if (rktp->rktp_committed_offset >= 0 && rktp->rktp_committed_offset <= end_offset) consumer_lag = end_offset - rktp->rktp_committed_offset; } - _st_printf("%s\"%"PRId32"\": { " - "\"partition\":%"PRId32", " - "\"broker\":%"PRId32", " - "\"leader\":%"PRId32", " - "\"desired\":%s, " - "\"unknown\":%s, " - "\"msgq_cnt\":%i, " - "\"msgq_bytes\":%"PRIusz", " - "\"xmit_msgq_cnt\":%i, " - "\"xmit_msgq_bytes\":%"PRIusz", " - "\"fetchq_cnt\":%i, " - "\"fetchq_size\":%"PRIu64", " - "\"fetch_state\":\"%s\", " - "\"query_offset\":%"PRId64", " - "\"next_offset\":%"PRId64", " - "\"app_offset\":%"PRId64", " - "\"stored_offset\":%"PRId64", " - "\"commited_offset\":%"PRId64", " /*FIXME: issue #80 */ - "\"committed_offset\":%"PRId64", " - "\"eof_offset\":%"PRId64", " - "\"lo_offset\":%"PRId64", " - "\"hi_offset\":%"PRId64", " - "\"ls_offset\":%"PRId64", " - "\"consumer_lag\":%"PRId64", " - "\"consumer_lag_stored\":%"PRId64", " - "\"txmsgs\":%"PRIu64", " - "\"txbytes\":%"PRIu64", " - "\"rxmsgs\":%"PRIu64", " - "\"rxbytes\":%"PRIu64", " - "\"msgs\": %"PRIu64", " - "\"rx_ver_drops\": %"PRIu64", " - "\"msgs_inflight\": %"PRId32", " - "\"next_ack_seq\": %"PRId32", " - "\"next_err_seq\": %"PRId32", " - "\"acked_msgid\": %"PRIu64 - "} ", - first ? "" : ", ", - rktp->rktp_partition, - rktp->rktp_partition, - broker_id, - rktp->rktp_leader_id, - (rktp->rktp_flags&RD_KAFKA_TOPPAR_F_DESIRED)?"true":"false", - (rktp->rktp_flags&RD_KAFKA_TOPPAR_F_UNKNOWN)?"true":"false", - rd_kafka_msgq_len(&rktp->rktp_msgq), - rd_kafka_msgq_size(&rktp->rktp_msgq), - /* FIXME: xmit_msgq is local to the broker thread. */ - 0, - (size_t)0, - rd_kafka_q_len(rktp->rktp_fetchq), - rd_kafka_q_size(rktp->rktp_fetchq), - rd_kafka_fetch_states[rktp->rktp_fetch_state], - rktp->rktp_query_offset, - offs.fetch_offset, - rktp->rktp_app_offset, - rktp->rktp_stored_offset, - rktp->rktp_committed_offset, /* FIXME: issue #80 */ - rktp->rktp_committed_offset, - offs.eof_offset, - rktp->rktp_lo_offset, - rktp->rktp_hi_offset, - rktp->rktp_ls_offset, - consumer_lag, - consumer_lag_stored, - rd_atomic64_get(&rktp->rktp_c.tx_msgs), - rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes), - rd_atomic64_get(&rktp->rktp_c.rx_msgs), - rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes), - rk->rk_type == RD_KAFKA_PRODUCER ? - rd_atomic64_get(&rktp->rktp_c.producer_enq_msgs) : - rd_atomic64_get(&rktp->rktp_c.rx_msgs), /* legacy, same as rx_msgs */ - rd_atomic64_get(&rktp->rktp_c.rx_ver_drops), - rd_atomic32_get(&rktp->rktp_msgs_inflight), - rktp->rktp_eos.next_ack_seq, - rktp->rktp_eos.next_err_seq, - rktp->rktp_eos.acked_msgid); + _st_printf( + "%s\"%" PRId32 + "\": { " + "\"partition\":%" PRId32 + ", " + "\"broker\":%" PRId32 + ", " + "\"leader\":%" PRId32 + ", " + "\"desired\":%s, " + "\"unknown\":%s, " + "\"msgq_cnt\":%i, " + "\"msgq_bytes\":%" PRIusz + ", " + "\"xmit_msgq_cnt\":%i, " + "\"xmit_msgq_bytes\":%" PRIusz + ", " + "\"fetchq_cnt\":%i, " + "\"fetchq_size\":%" PRIu64 + ", " + "\"fetch_state\":\"%s\", " + "\"query_offset\":%" PRId64 + ", " + "\"next_offset\":%" PRId64 + ", " + "\"app_offset\":%" PRId64 + ", " + "\"stored_offset\":%" PRId64 + ", " + "\"commited_offset\":%" PRId64 + ", " /*FIXME: issue #80 */ + "\"committed_offset\":%" PRId64 + ", " + "\"eof_offset\":%" PRId64 + ", " + "\"lo_offset\":%" PRId64 + ", " + "\"hi_offset\":%" PRId64 + ", " + "\"ls_offset\":%" PRId64 + ", " + "\"consumer_lag\":%" PRId64 + ", " + "\"consumer_lag_stored\":%" PRId64 + ", " + "\"txmsgs\":%" PRIu64 + ", " + "\"txbytes\":%" PRIu64 + ", " + "\"rxmsgs\":%" PRIu64 + ", " + "\"rxbytes\":%" PRIu64 + ", " + "\"msgs\": %" PRIu64 + ", " + "\"rx_ver_drops\": %" PRIu64 + ", " + "\"msgs_inflight\": %" PRId32 + ", " + "\"next_ack_seq\": %" PRId32 + ", " + "\"next_err_seq\": %" PRId32 + ", " + "\"acked_msgid\": %" PRIu64 "} ", + first ? "" : ", ", rktp->rktp_partition, rktp->rktp_partition, + broker_id, rktp->rktp_leader_id, + (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED) ? "true" : "false", + (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) ? "true" : "false", + rd_kafka_msgq_len(&rktp->rktp_msgq), + rd_kafka_msgq_size(&rktp->rktp_msgq), + /* FIXME: xmit_msgq is local to the broker thread. */ + 0, (size_t)0, rd_kafka_q_len(rktp->rktp_fetchq), + rd_kafka_q_size(rktp->rktp_fetchq), + rd_kafka_fetch_states[rktp->rktp_fetch_state], + rktp->rktp_query_offset, offs.fetch_offset, rktp->rktp_app_offset, + rktp->rktp_stored_offset, + rktp->rktp_committed_offset, /* FIXME: issue #80 */ + rktp->rktp_committed_offset, offs.eof_offset, rktp->rktp_lo_offset, + rktp->rktp_hi_offset, rktp->rktp_ls_offset, consumer_lag, + consumer_lag_stored, rd_atomic64_get(&rktp->rktp_c.tx_msgs), + rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes), + rd_atomic64_get(&rktp->rktp_c.rx_msgs), + rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes), + rk->rk_type == RD_KAFKA_PRODUCER + ? rd_atomic64_get(&rktp->rktp_c.producer_enq_msgs) + : rd_atomic64_get( + &rktp->rktp_c.rx_msgs), /* legacy, same as rx_msgs */ + rd_atomic64_get(&rktp->rktp_c.rx_ver_drops), + rd_atomic32_get(&rktp->rktp_msgs_inflight), + rktp->rktp_eos.next_ack_seq, rktp->rktp_eos.next_err_seq, + rktp->rktp_eos.acked_msgid); if (total) { - total->txmsgs += rd_atomic64_get(&rktp->rktp_c.tx_msgs); - total->txmsg_bytes += rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes); - total->rxmsgs += rd_atomic64_get(&rktp->rktp_c.rx_msgs); - total->rxmsg_bytes += rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes); + total->txmsgs += rd_atomic64_get(&rktp->rktp_c.tx_msgs); + total->txmsg_bytes += + rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes); + total->rxmsgs += rd_atomic64_get(&rktp->rktp_c.rx_msgs); + total->rxmsg_bytes += + rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes); } rd_kafka_toppar_unlock(rktp); @@ -1540,80 +1509,78 @@ static RD_INLINE void rd_kafka_stats_emit_toppar (struct _stats_emit *st, /** * @brief Emit broker request type stats */ -static void rd_kafka_stats_emit_broker_reqs (struct _stats_emit *st, - rd_kafka_broker_t *rkb) { +static void rd_kafka_stats_emit_broker_reqs(struct _stats_emit *st, + rd_kafka_broker_t *rkb) { /* Filter out request types that will never be sent by the client. */ static const rd_bool_t filter[4][RD_KAFKAP__NUM] = { - [RD_KAFKA_PRODUCER] = { - [RD_KAFKAP_Fetch] = rd_true, - [RD_KAFKAP_OffsetCommit] = rd_true, - [RD_KAFKAP_OffsetFetch] = rd_true, - [RD_KAFKAP_JoinGroup] = rd_true, - [RD_KAFKAP_Heartbeat] = rd_true, - [RD_KAFKAP_LeaveGroup] = rd_true, - [RD_KAFKAP_SyncGroup] = rd_true + [RD_KAFKA_PRODUCER] = {[RD_KAFKAP_Fetch] = rd_true, + [RD_KAFKAP_OffsetCommit] = rd_true, + [RD_KAFKAP_OffsetFetch] = rd_true, + [RD_KAFKAP_JoinGroup] = rd_true, + [RD_KAFKAP_Heartbeat] = rd_true, + [RD_KAFKAP_LeaveGroup] = rd_true, + [RD_KAFKAP_SyncGroup] = rd_true}, + [RD_KAFKA_CONSUMER] = + { + [RD_KAFKAP_Produce] = rd_true, + [RD_KAFKAP_InitProducerId] = rd_true, + /* Transactional producer */ + [RD_KAFKAP_AddPartitionsToTxn] = rd_true, + [RD_KAFKAP_AddOffsetsToTxn] = rd_true, + [RD_KAFKAP_EndTxn] = rd_true, + [RD_KAFKAP_TxnOffsetCommit] = rd_true, }, - [RD_KAFKA_CONSUMER] = { - [RD_KAFKAP_Produce] = rd_true, - [RD_KAFKAP_InitProducerId] = rd_true, - /* Transactional producer */ - [RD_KAFKAP_AddPartitionsToTxn] = rd_true, - [RD_KAFKAP_AddOffsetsToTxn] = rd_true, - [RD_KAFKAP_EndTxn] = rd_true, - [RD_KAFKAP_TxnOffsetCommit] = rd_true, + [2 /*any client type*/] = + { + [RD_KAFKAP_UpdateMetadata] = rd_true, + [RD_KAFKAP_ControlledShutdown] = rd_true, + [RD_KAFKAP_LeaderAndIsr] = rd_true, + [RD_KAFKAP_StopReplica] = rd_true, + [RD_KAFKAP_OffsetForLeaderEpoch] = rd_true, + + [RD_KAFKAP_WriteTxnMarkers] = rd_true, + + [RD_KAFKAP_AlterReplicaLogDirs] = rd_true, + [RD_KAFKAP_DescribeLogDirs] = rd_true, + + [RD_KAFKAP_SaslAuthenticate] = rd_false, + + [RD_KAFKAP_CreateDelegationToken] = rd_true, + [RD_KAFKAP_RenewDelegationToken] = rd_true, + [RD_KAFKAP_ExpireDelegationToken] = rd_true, + [RD_KAFKAP_DescribeDelegationToken] = rd_true, + [RD_KAFKAP_IncrementalAlterConfigs] = rd_true, + [RD_KAFKAP_ElectLeaders] = rd_true, + [RD_KAFKAP_AlterPartitionReassignments] = rd_true, + [RD_KAFKAP_ListPartitionReassignments] = rd_true, + [RD_KAFKAP_AlterUserScramCredentials] = rd_true, + [RD_KAFKAP_Vote] = rd_true, + [RD_KAFKAP_BeginQuorumEpoch] = rd_true, + [RD_KAFKAP_EndQuorumEpoch] = rd_true, + [RD_KAFKAP_DescribeQuorum] = rd_true, + [RD_KAFKAP_AlterIsr] = rd_true, + [RD_KAFKAP_UpdateFeatures] = rd_true, + [RD_KAFKAP_Envelope] = rd_true, }, - [2/*any client type*/] = { - [RD_KAFKAP_UpdateMetadata] = rd_true, - [RD_KAFKAP_ControlledShutdown] = rd_true, - [RD_KAFKAP_LeaderAndIsr] = rd_true, - [RD_KAFKAP_StopReplica] = rd_true, - [RD_KAFKAP_OffsetForLeaderEpoch] = rd_true, - - [RD_KAFKAP_WriteTxnMarkers] = rd_true, - - [RD_KAFKAP_AlterReplicaLogDirs] = rd_true, - [RD_KAFKAP_DescribeLogDirs] = rd_true, - - [RD_KAFKAP_SaslAuthenticate] = rd_false, - - [RD_KAFKAP_CreateDelegationToken] = rd_true, - [RD_KAFKAP_RenewDelegationToken] = rd_true, - [RD_KAFKAP_ExpireDelegationToken] = rd_true, - [RD_KAFKAP_DescribeDelegationToken] = rd_true, - [RD_KAFKAP_IncrementalAlterConfigs] = rd_true, - [RD_KAFKAP_ElectLeaders] = rd_true, - [RD_KAFKAP_AlterPartitionReassignments] = rd_true, - [RD_KAFKAP_ListPartitionReassignments] = rd_true, - [RD_KAFKAP_AlterUserScramCredentials] = rd_true, - [RD_KAFKAP_Vote] = rd_true, - [RD_KAFKAP_BeginQuorumEpoch] = rd_true, - [RD_KAFKAP_EndQuorumEpoch] = rd_true, - [RD_KAFKAP_DescribeQuorum] = rd_true, - [RD_KAFKAP_AlterIsr] = rd_true, - [RD_KAFKAP_UpdateFeatures] = rd_true, - [RD_KAFKAP_Envelope] = rd_true, - }, - [3/*hide-unless-non-zero*/] = { - /* Hide Admin requests unless they've been used */ - [RD_KAFKAP_CreateTopics] = rd_true, - [RD_KAFKAP_DeleteTopics] = rd_true, - [RD_KAFKAP_DeleteRecords] = rd_true, - [RD_KAFKAP_CreatePartitions] = rd_true, - [RD_KAFKAP_DescribeAcls] = rd_true, - [RD_KAFKAP_CreateAcls] = rd_true, - [RD_KAFKAP_DeleteAcls] = rd_true, - [RD_KAFKAP_DescribeConfigs] = rd_true, - [RD_KAFKAP_AlterConfigs] = rd_true, - [RD_KAFKAP_DeleteGroups] = rd_true, - [RD_KAFKAP_ListGroups] = rd_true, - [RD_KAFKAP_DescribeGroups] = rd_true - } - }; + [3 /*hide-unless-non-zero*/] = { + /* Hide Admin requests unless they've been used */ + [RD_KAFKAP_CreateTopics] = rd_true, + [RD_KAFKAP_DeleteTopics] = rd_true, + [RD_KAFKAP_DeleteRecords] = rd_true, + [RD_KAFKAP_CreatePartitions] = rd_true, + [RD_KAFKAP_DescribeAcls] = rd_true, + [RD_KAFKAP_CreateAcls] = rd_true, + [RD_KAFKAP_DeleteAcls] = rd_true, + [RD_KAFKAP_DescribeConfigs] = rd_true, + [RD_KAFKAP_AlterConfigs] = rd_true, + [RD_KAFKAP_DeleteGroups] = rd_true, + [RD_KAFKAP_ListGroups] = rd_true, + [RD_KAFKAP_DescribeGroups] = rd_true}}; int i; int cnt = 0; _st_printf("\"req\": { "); - for (i = 0 ; i < RD_KAFKAP__NUM ; i++) { + for (i = 0; i < RD_KAFKAP__NUM; i++) { int64_t v; if (filter[rkb->rkb_rk->rk_type][i] || filter[2][i]) @@ -1623,8 +1590,7 @@ static void rd_kafka_stats_emit_broker_reqs (struct _stats_emit *st, if (!v && filter[3][i]) continue; /* Filter out zero values */ - _st_printf("%s\"%s\": %"PRId64, - cnt > 0 ? ", " : "", + _st_printf("%s\"%s\": %" PRId64, cnt > 0 ? ", " : "", rd_kafka_ApiKey2str(i), v); cnt++; @@ -1636,58 +1602,58 @@ static void rd_kafka_stats_emit_broker_reqs (struct _stats_emit *st, /** * Emit all statistics */ -static void rd_kafka_stats_emit_all (rd_kafka_t *rk) { - rd_kafka_broker_t *rkb; - rd_kafka_topic_t *rkt; - rd_ts_t now; - rd_kafka_op_t *rko; - unsigned int tot_cnt; - size_t tot_size; +static void rd_kafka_stats_emit_all(rd_kafka_t *rk) { + rd_kafka_broker_t *rkb; + rd_kafka_topic_t *rkt; + rd_ts_t now; + rd_kafka_op_t *rko; + unsigned int tot_cnt; + size_t tot_size; rd_kafka_resp_err_t err; - struct _stats_emit stx = { .size = 1024*10 }; - struct _stats_emit *st = &stx; + struct _stats_emit stx = {.size = 1024 * 10}; + struct _stats_emit *st = &stx; struct _stats_total total = {0}; st->buf = rd_malloc(st->size); - rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size); - rd_kafka_rdlock(rk); - - now = rd_clock(); - _st_printf("{ " - "\"name\": \"%s\", " - "\"client_id\": \"%s\", " - "\"type\": \"%s\", " - "\"ts\":%"PRId64", " - "\"time\":%lli, " - "\"age\":%"PRId64", " - "\"replyq\":%i, " - "\"msg_cnt\":%u, " - "\"msg_size\":%"PRIusz", " - "\"msg_max\":%u, " - "\"msg_size_max\":%"PRIusz", " - "\"simple_cnt\":%i, " - "\"metadata_cache_cnt\":%i, " - "\"brokers\":{ "/*open brokers*/, - rk->rk_name, - rk->rk_conf.client_id_str, - rd_kafka_type2str(rk->rk_type), - now, - (signed long long)time(NULL), - now - rk->rk_ts_created, - rd_kafka_q_len(rk->rk_rep), - tot_cnt, tot_size, - rk->rk_curr_msgs.max_cnt, rk->rk_curr_msgs.max_size, - rd_atomic32_get(&rk->rk_simple_cnt), - rk->rk_metadata_cache.rkmc_cnt); - - - TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { - rd_kafka_toppar_t *rktp; + rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size); + rd_kafka_rdlock(rk); + + now = rd_clock(); + _st_printf( + "{ " + "\"name\": \"%s\", " + "\"client_id\": \"%s\", " + "\"type\": \"%s\", " + "\"ts\":%" PRId64 + ", " + "\"time\":%lli, " + "\"age\":%" PRId64 + ", " + "\"replyq\":%i, " + "\"msg_cnt\":%u, " + "\"msg_size\":%" PRIusz + ", " + "\"msg_max\":%u, " + "\"msg_size_max\":%" PRIusz + ", " + "\"simple_cnt\":%i, " + "\"metadata_cache_cnt\":%i, " + "\"brokers\":{ " /*open brokers*/, + rk->rk_name, rk->rk_conf.client_id_str, + rd_kafka_type2str(rk->rk_type), now, (signed long long)time(NULL), + now - rk->rk_ts_created, rd_kafka_q_len(rk->rk_rep), tot_cnt, + tot_size, rk->rk_curr_msgs.max_cnt, rk->rk_curr_msgs.max_size, + rd_atomic32_get(&rk->rk_simple_cnt), + rk->rk_metadata_cache.rkmc_cnt); + + + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rd_kafka_toppar_t *rktp; rd_ts_t txidle = -1, rxidle = -1; - rd_kafka_broker_lock(rkb); + rd_kafka_broker_lock(rkb); if (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP) { /* Calculate tx and rx idle time in usecs */ @@ -1705,67 +1671,81 @@ static void rd_kafka_stats_emit_all (rd_kafka_t *rk) { rxidle = -1; } - _st_printf("%s\"%s\": { "/*open broker*/ - "\"name\":\"%s\", " - "\"nodeid\":%"PRId32", " - "\"nodename\":\"%s\", " - "\"source\":\"%s\", " - "\"state\":\"%s\", " - "\"stateage\":%"PRId64", " - "\"outbuf_cnt\":%i, " - "\"outbuf_msg_cnt\":%i, " - "\"waitresp_cnt\":%i, " - "\"waitresp_msg_cnt\":%i, " - "\"tx\":%"PRIu64", " - "\"txbytes\":%"PRIu64", " - "\"txerrs\":%"PRIu64", " - "\"txretries\":%"PRIu64", " - "\"txidle\":%"PRId64", " - "\"req_timeouts\":%"PRIu64", " - "\"rx\":%"PRIu64", " - "\"rxbytes\":%"PRIu64", " - "\"rxerrs\":%"PRIu64", " - "\"rxcorriderrs\":%"PRIu64", " - "\"rxpartial\":%"PRIu64", " - "\"rxidle\":%"PRId64", " - "\"zbuf_grow\":%"PRIu64", " - "\"buf_grow\":%"PRIu64", " - "\"wakeups\":%"PRIu64", " - "\"connects\":%"PRId32", " - "\"disconnects\":%"PRId32", ", - rkb == TAILQ_FIRST(&rk->rk_brokers) ? "" : ", ", - rkb->rkb_name, - rkb->rkb_name, - rkb->rkb_nodeid, - rkb->rkb_nodename, - rd_kafka_confsource2str(rkb->rkb_source), - rd_kafka_broker_state_names[rkb->rkb_state], - rkb->rkb_ts_state ? now - rkb->rkb_ts_state : 0, - rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt), - rd_atomic32_get(&rkb->rkb_outbufs.rkbq_msg_cnt), - rd_atomic32_get(&rkb->rkb_waitresps.rkbq_cnt), - rd_atomic32_get(&rkb->rkb_waitresps.rkbq_msg_cnt), - rd_atomic64_get(&rkb->rkb_c.tx), - rd_atomic64_get(&rkb->rkb_c.tx_bytes), - rd_atomic64_get(&rkb->rkb_c.tx_err), - rd_atomic64_get(&rkb->rkb_c.tx_retries), - txidle, - rd_atomic64_get(&rkb->rkb_c.req_timeouts), - rd_atomic64_get(&rkb->rkb_c.rx), - rd_atomic64_get(&rkb->rkb_c.rx_bytes), - rd_atomic64_get(&rkb->rkb_c.rx_err), - rd_atomic64_get(&rkb->rkb_c.rx_corrid_err), - rd_atomic64_get(&rkb->rkb_c.rx_partial), - rxidle, - rd_atomic64_get(&rkb->rkb_c.zbuf_grow), - rd_atomic64_get(&rkb->rkb_c.buf_grow), - rd_atomic64_get(&rkb->rkb_c.wakeups), - rd_atomic32_get(&rkb->rkb_c.connects), - rd_atomic32_get(&rkb->rkb_c.disconnects)); - - total.tx += rd_atomic64_get(&rkb->rkb_c.tx); + _st_printf( + "%s\"%s\": { " /*open broker*/ + "\"name\":\"%s\", " + "\"nodeid\":%" PRId32 + ", " + "\"nodename\":\"%s\", " + "\"source\":\"%s\", " + "\"state\":\"%s\", " + "\"stateage\":%" PRId64 + ", " + "\"outbuf_cnt\":%i, " + "\"outbuf_msg_cnt\":%i, " + "\"waitresp_cnt\":%i, " + "\"waitresp_msg_cnt\":%i, " + "\"tx\":%" PRIu64 + ", " + "\"txbytes\":%" PRIu64 + ", " + "\"txerrs\":%" PRIu64 + ", " + "\"txretries\":%" PRIu64 + ", " + "\"txidle\":%" PRId64 + ", " + "\"req_timeouts\":%" PRIu64 + ", " + "\"rx\":%" PRIu64 + ", " + "\"rxbytes\":%" PRIu64 + ", " + "\"rxerrs\":%" PRIu64 + ", " + "\"rxcorriderrs\":%" PRIu64 + ", " + "\"rxpartial\":%" PRIu64 + ", " + "\"rxidle\":%" PRId64 + ", " + "\"zbuf_grow\":%" PRIu64 + ", " + "\"buf_grow\":%" PRIu64 + ", " + "\"wakeups\":%" PRIu64 + ", " + "\"connects\":%" PRId32 + ", " + "\"disconnects\":%" PRId32 ", ", + rkb == TAILQ_FIRST(&rk->rk_brokers) ? "" : ", ", + rkb->rkb_name, rkb->rkb_name, rkb->rkb_nodeid, + rkb->rkb_nodename, rd_kafka_confsource2str(rkb->rkb_source), + rd_kafka_broker_state_names[rkb->rkb_state], + rkb->rkb_ts_state ? now - rkb->rkb_ts_state : 0, + rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt), + rd_atomic32_get(&rkb->rkb_outbufs.rkbq_msg_cnt), + rd_atomic32_get(&rkb->rkb_waitresps.rkbq_cnt), + rd_atomic32_get(&rkb->rkb_waitresps.rkbq_msg_cnt), + rd_atomic64_get(&rkb->rkb_c.tx), + rd_atomic64_get(&rkb->rkb_c.tx_bytes), + rd_atomic64_get(&rkb->rkb_c.tx_err), + rd_atomic64_get(&rkb->rkb_c.tx_retries), txidle, + rd_atomic64_get(&rkb->rkb_c.req_timeouts), + rd_atomic64_get(&rkb->rkb_c.rx), + rd_atomic64_get(&rkb->rkb_c.rx_bytes), + rd_atomic64_get(&rkb->rkb_c.rx_err), + rd_atomic64_get(&rkb->rkb_c.rx_corrid_err), + rd_atomic64_get(&rkb->rkb_c.rx_partial), rxidle, + rd_atomic64_get(&rkb->rkb_c.zbuf_grow), + rd_atomic64_get(&rkb->rkb_c.buf_grow), + rd_atomic64_get(&rkb->rkb_c.wakeups), + rd_atomic32_get(&rkb->rkb_c.connects), + rd_atomic32_get(&rkb->rkb_c.disconnects)); + + total.tx += rd_atomic64_get(&rkb->rkb_c.tx); total.tx_bytes += rd_atomic64_get(&rkb->rkb_c.tx_bytes); - total.rx += rd_atomic64_get(&rkb->rkb_c.rx); + total.rx += rd_atomic64_get(&rkb->rkb_c.rx); total.rx_bytes += rd_atomic64_get(&rkb->rkb_c.rx_bytes); rd_kafka_stats_emit_avg(st, "int_latency", @@ -1777,58 +1757,63 @@ static void rd_kafka_stats_emit_all (rd_kafka_t *rk) { rd_kafka_stats_emit_broker_reqs(st, rkb); - _st_printf("\"toppars\":{ "/*open toppars*/); - - TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) { - _st_printf("%s\"%.*s-%"PRId32"\": { " - "\"topic\":\"%.*s\", " - "\"partition\":%"PRId32"} ", - rktp==TAILQ_FIRST(&rkb->rkb_toppars)?"":", ", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition); - } + _st_printf("\"toppars\":{ " /*open toppars*/); + + TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) { + _st_printf( + "%s\"%.*s-%" PRId32 + "\": { " + "\"topic\":\"%.*s\", " + "\"partition\":%" PRId32 "} ", + rktp == TAILQ_FIRST(&rkb->rkb_toppars) ? "" : ", ", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); + } - rd_kafka_broker_unlock(rkb); + rd_kafka_broker_unlock(rkb); - _st_printf("} "/*close toppars*/ - "} "/*close broker*/); - } + _st_printf( + "} " /*close toppars*/ + "} " /*close broker*/); + } - _st_printf("}, " /* close "brokers" array */ - "\"topics\":{ "); + _st_printf( + "}, " /* close "brokers" array */ + "\"topics\":{ "); - TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { + TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { rd_kafka_toppar_t *rktp; - int i, j; - - rd_kafka_topic_rdlock(rkt); - _st_printf("%s\"%.*s\": { " - "\"topic\":\"%.*s\", " - "\"age\":%"PRId64", " - "\"metadata_age\":%"PRId64", ", - rkt==TAILQ_FIRST(&rk->rk_topics)?"":", ", - RD_KAFKAP_STR_PR(rkt->rkt_topic), - RD_KAFKAP_STR_PR(rkt->rkt_topic), - (now - rkt->rkt_ts_create)/1000, - rkt->rkt_ts_metadata ? - (now - rkt->rkt_ts_metadata)/1000 : 0); + int i, j; + + rd_kafka_topic_rdlock(rkt); + _st_printf( + "%s\"%.*s\": { " + "\"topic\":\"%.*s\", " + "\"age\":%" PRId64 + ", " + "\"metadata_age\":%" PRId64 ", ", + rkt == TAILQ_FIRST(&rk->rk_topics) ? "" : ", ", + RD_KAFKAP_STR_PR(rkt->rkt_topic), + RD_KAFKAP_STR_PR(rkt->rkt_topic), + (now - rkt->rkt_ts_create) / 1000, + rkt->rkt_ts_metadata ? (now - rkt->rkt_ts_metadata) / 1000 + : 0); rd_kafka_stats_emit_avg(st, "batchsize", &rkt->rkt_avg_batchsize); - rd_kafka_stats_emit_avg(st, "batchcnt", - &rkt->rkt_avg_batchcnt); + rd_kafka_stats_emit_avg(st, "batchcnt", &rkt->rkt_avg_batchcnt); _st_printf("\"partitions\":{ " /*open partitions*/); - for (i = 0 ; i < rkt->rkt_partition_cnt ; i++) + for (i = 0; i < rkt->rkt_partition_cnt; i++) rd_kafka_stats_emit_toppar(st, &total, rkt->rkt_p[i], i == 0); RD_LIST_FOREACH(rktp, &rkt->rkt_desp, j) - rd_kafka_stats_emit_toppar(st, &total, rktp, i+j == 0); + rd_kafka_stats_emit_toppar(st, &total, rktp, i + j == 0); i += j; @@ -1836,97 +1821,105 @@ static void rd_kafka_stats_emit_all (rd_kafka_t *rk) { rd_kafka_stats_emit_toppar(st, NULL, rkt->rkt_ua, i++ == 0); - rd_kafka_topic_rdunlock(rkt); - - _st_printf("} "/*close partitions*/ - "} "/*close topic*/); + rd_kafka_topic_rdunlock(rkt); - } - _st_printf("} "/*close topics*/); + _st_printf( + "} " /*close partitions*/ + "} " /*close topic*/); + } + _st_printf("} " /*close topics*/); if (rk->rk_cgrp) { rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; - _st_printf(", \"cgrp\": { " - "\"state\": \"%s\", " - "\"stateage\": %"PRId64", " - "\"join_state\": \"%s\", " - "\"rebalance_age\": %"PRId64", " - "\"rebalance_cnt\": %d, " - "\"rebalance_reason\": \"%s\", " - "\"assignment_size\": %d }", - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rkcg->rkcg_ts_statechange ? - (now - rkcg->rkcg_ts_statechange) / 1000 : 0, - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - rkcg->rkcg_c.ts_rebalance ? - (now - rkcg->rkcg_c.ts_rebalance)/1000 : 0, - rkcg->rkcg_c.rebalance_cnt, - rkcg->rkcg_c.rebalance_reason, - rkcg->rkcg_c.assignment_size); + _st_printf( + ", \"cgrp\": { " + "\"state\": \"%s\", " + "\"stateage\": %" PRId64 + ", " + "\"join_state\": \"%s\", " + "\"rebalance_age\": %" PRId64 + ", " + "\"rebalance_cnt\": %d, " + "\"rebalance_reason\": \"%s\", " + "\"assignment_size\": %d }", + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rkcg->rkcg_ts_statechange + ? (now - rkcg->rkcg_ts_statechange) / 1000 + : 0, + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + rkcg->rkcg_c.ts_rebalance + ? (now - rkcg->rkcg_c.ts_rebalance) / 1000 + : 0, + rkcg->rkcg_c.rebalance_cnt, rkcg->rkcg_c.rebalance_reason, + rkcg->rkcg_c.assignment_size); } if (rd_kafka_is_idempotent(rk)) { - _st_printf(", \"eos\": { " - "\"idemp_state\": \"%s\", " - "\"idemp_stateage\": %"PRId64", " - "\"txn_state\": \"%s\", " - "\"txn_stateage\": %"PRId64", " - "\"txn_may_enq\": %s, " - "\"producer_id\": %"PRId64", " - "\"producer_epoch\": %hd, " - "\"epoch_cnt\": %d " - "}", - rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), - (now - rk->rk_eos.ts_idemp_state) / 1000, - rd_kafka_txn_state2str(rk->rk_eos.txn_state), - (now - rk->rk_eos.ts_txn_state) / 1000, - rd_atomic32_get(&rk->rk_eos.txn_may_enq) ? - "true":"false", - rk->rk_eos.pid.id, - rk->rk_eos.pid.epoch, - rk->rk_eos.epoch_cnt); + _st_printf( + ", \"eos\": { " + "\"idemp_state\": \"%s\", " + "\"idemp_stateage\": %" PRId64 + ", " + "\"txn_state\": \"%s\", " + "\"txn_stateage\": %" PRId64 + ", " + "\"txn_may_enq\": %s, " + "\"producer_id\": %" PRId64 + ", " + "\"producer_epoch\": %hd, " + "\"epoch_cnt\": %d " + "}", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), + (now - rk->rk_eos.ts_idemp_state) / 1000, + rd_kafka_txn_state2str(rk->rk_eos.txn_state), + (now - rk->rk_eos.ts_txn_state) / 1000, + rd_atomic32_get(&rk->rk_eos.txn_may_enq) ? "true" : "false", + rk->rk_eos.pid.id, rk->rk_eos.pid.epoch, + rk->rk_eos.epoch_cnt); } if ((err = rd_atomic32_get(&rk->rk_fatal.err))) - _st_printf(", \"fatal\": { " - "\"error\": \"%s\", " - "\"reason\": \"%s\", " - "\"cnt\": %d " - "}", - rd_kafka_err2str(err), - rk->rk_fatal.errstr, - rk->rk_fatal.cnt); + _st_printf( + ", \"fatal\": { " + "\"error\": \"%s\", " + "\"reason\": \"%s\", " + "\"cnt\": %d " + "}", + rd_kafka_err2str(err), rk->rk_fatal.errstr, + rk->rk_fatal.cnt); - rd_kafka_rdunlock(rk); + rd_kafka_rdunlock(rk); /* Total counters */ - _st_printf(", " - "\"tx\":%"PRId64", " - "\"tx_bytes\":%"PRId64", " - "\"rx\":%"PRId64", " - "\"rx_bytes\":%"PRId64", " - "\"txmsgs\":%"PRId64", " - "\"txmsg_bytes\":%"PRId64", " - "\"rxmsgs\":%"PRId64", " - "\"rxmsg_bytes\":%"PRId64, - total.tx, - total.tx_bytes, - total.rx, - total.rx_bytes, - total.txmsgs, - total.txmsg_bytes, - total.rxmsgs, - total.rxmsg_bytes); - - _st_printf("}"/*close object*/); - - - /* Enqueue op for application */ - rko = rd_kafka_op_new(RD_KAFKA_OP_STATS); + _st_printf( + ", " + "\"tx\":%" PRId64 + ", " + "\"tx_bytes\":%" PRId64 + ", " + "\"rx\":%" PRId64 + ", " + "\"rx_bytes\":%" PRId64 + ", " + "\"txmsgs\":%" PRId64 + ", " + "\"txmsg_bytes\":%" PRId64 + ", " + "\"rxmsgs\":%" PRId64 + ", " + "\"rxmsg_bytes\":%" PRId64, + total.tx, total.tx_bytes, total.rx, total.rx_bytes, total.txmsgs, + total.txmsg_bytes, total.rxmsgs, total.rxmsg_bytes); + + _st_printf("}" /*close object*/); + + + /* Enqueue op for application */ + rko = rd_kafka_op_new(RD_KAFKA_OP_STATS); rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH); - rko->rko_u.stats.json = st->buf; - rko->rko_u.stats.json_len = st->of; - rd_kafka_q_enq(rk->rk_rep, rko); + rko->rko_u.stats.json = st->buf; + rko->rko_u.stats.json_len = st->of; + rd_kafka_q_enq(rk->rk_rep, rko); } @@ -1936,7 +1929,7 @@ static void rd_kafka_stats_emit_all (rd_kafka_t *rk) { * @locality rdkafka main thread * @locks none */ -static void rd_kafka_1s_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { +static void rd_kafka_1s_tmr_cb(rd_kafka_timers_t *rkts, void *arg) { rd_kafka_t *rk = rkts->rkts_rk; /* Scan topic state, message timeouts, etc. */ @@ -1951,9 +1944,9 @@ static void rd_kafka_1s_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { rd_kafka_coord_cache_expire(&rk->rk_coord_cache); } -static void rd_kafka_stats_emit_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { +static void rd_kafka_stats_emit_tmr_cb(rd_kafka_timers_t *rkts, void *arg) { rd_kafka_t *rk = rkts->rkts_rk; - rd_kafka_stats_emit_all(rk); + rd_kafka_stats_emit_all(rk); } @@ -1962,7 +1955,7 @@ static void rd_kafka_stats_emit_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { * * @locality rdkafka main thread */ -static void rd_kafka_metadata_refresh_cb (rd_kafka_timers_t *rkts, void *arg) { +static void rd_kafka_metadata_refresh_cb(rd_kafka_timers_t *rkts, void *arg) { rd_kafka_t *rk = rkts->rkts_rk; rd_kafka_resp_err_t err; @@ -1973,24 +1966,23 @@ static void rd_kafka_metadata_refresh_cb (rd_kafka_timers_t *rkts, void *arg) { * subscribed topics now being available in the cluster. */ if (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_cgrp) err = rd_kafka_metadata_refresh_consumer_topics( - rk, NULL, - "periodic topic and broker list refresh"); + rk, NULL, "periodic topic and broker list refresh"); else err = rd_kafka_metadata_refresh_known_topics( - rk, NULL, rd_true/*force*/, - "periodic topic and broker list refresh"); + rk, NULL, rd_true /*force*/, + "periodic topic and broker list refresh"); if (err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC && rd_interval(&rk->rk_suppress.broker_metadata_refresh, - 10*1000*1000 /*10s*/, 0) > 0) { + 10 * 1000 * 1000 /*10s*/, 0) > 0) { /* If there are no (locally referenced) topics * to query, refresh the broker list. * This avoids getting idle-disconnected for clients * that have not yet referenced a topic and makes * sure such a client has an up to date broker list. */ rd_kafka_metadata_refresh_brokers( - rk, NULL, "periodic broker list refresh"); + rk, NULL, "periodic broker list refresh"); } } @@ -2004,7 +1996,7 @@ static void rd_kafka_metadata_refresh_cb (rd_kafka_timers_t *rkts, void *arg) { * @locality app thread calling rd_kafka_new() * @locks none */ -static int rd_kafka_init_wait (rd_kafka_t *rk, int timeout_ms) { +static int rd_kafka_init_wait(rd_kafka_t *rk, int timeout_ms) { struct timespec tspec; int ret; @@ -2012,8 +2004,8 @@ static int rd_kafka_init_wait (rd_kafka_t *rk, int timeout_ms) { mtx_lock(&rk->rk_init_lock); while (rk->rk_init_wait_cnt > 0 && - cnd_timedwait_abs(&rk->rk_init_cnd, &rk->rk_init_lock, - &tspec) == thrd_success) + cnd_timedwait_abs(&rk->rk_init_cnd, &rk->rk_init_lock, &tspec) == + thrd_success) ; ret = rk->rk_init_wait_cnt; mtx_unlock(&rk->rk_init_lock); @@ -2025,23 +2017,23 @@ static int rd_kafka_init_wait (rd_kafka_t *rk, int timeout_ms) { /** * Main loop for Kafka handler thread. */ -static int rd_kafka_thread_main (void *arg) { - rd_kafka_t *rk = arg; - rd_kafka_timer_t tmr_1s = RD_ZERO_INIT; - rd_kafka_timer_t tmr_stats_emit = RD_ZERO_INIT; - rd_kafka_timer_t tmr_metadata_refresh = RD_ZERO_INIT; +static int rd_kafka_thread_main(void *arg) { + rd_kafka_t *rk = arg; + rd_kafka_timer_t tmr_1s = RD_ZERO_INIT; + rd_kafka_timer_t tmr_stats_emit = RD_ZERO_INIT; + rd_kafka_timer_t tmr_metadata_refresh = RD_ZERO_INIT; rd_kafka_set_thread_name("main"); rd_kafka_set_thread_sysname("rdk:main"); rd_kafka_interceptors_on_thread_start(rk, RD_KAFKA_THREAD_MAIN); - (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1); + (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1); - /* Acquire lock (which was held by thread creator during creation) - * to synchronise state. */ - rd_kafka_wrlock(rk); - rd_kafka_wrunlock(rk); + /* Acquire lock (which was held by thread creator during creation) + * to synchronise state. */ + rd_kafka_wrlock(rk); + rd_kafka_wrunlock(rk); /* 1 second timer for topic scan and connection checking. */ rd_kafka_timer_start(&rk->rk_timers, &tmr_1s, 1000000, @@ -2053,7 +2045,7 @@ static int rd_kafka_thread_main (void *arg) { if (rk->rk_conf.metadata_refresh_interval_ms > 0) rd_kafka_timer_start(&rk->rk_timers, &tmr_metadata_refresh, rk->rk_conf.metadata_refresh_interval_ms * - 1000ll, + 1000ll, rd_kafka_metadata_refresh_cb, NULL); if (rk->rk_cgrp) @@ -2067,17 +2059,17 @@ static int rd_kafka_thread_main (void *arg) { cnd_broadcast(&rk->rk_init_cnd); mtx_unlock(&rk->rk_init_lock); - while (likely(!rd_kafka_terminating(rk) || - rd_kafka_q_len(rk->rk_ops) || - (rk->rk_cgrp && (rk->rk_cgrp->rkcg_state != RD_KAFKA_CGRP_STATE_TERM)))) { + while (likely(!rd_kafka_terminating(rk) || rd_kafka_q_len(rk->rk_ops) || + (rk->rk_cgrp && (rk->rk_cgrp->rkcg_state != + RD_KAFKA_CGRP_STATE_TERM)))) { rd_ts_t sleeptime = rd_kafka_timers_next( - &rk->rk_timers, 1000*1000/*1s*/, 1/*lock*/); + &rk->rk_timers, 1000 * 1000 /*1s*/, 1 /*lock*/); rd_kafka_q_serve(rk->rk_ops, (int)(sleeptime / 1000), 0, RD_KAFKA_Q_CB_CALLBACK, NULL, NULL); - if (rk->rk_cgrp) /* FIXME: move to timer-triggered */ - rd_kafka_cgrp_serve(rk->rk_cgrp); - rd_kafka_timers_run(&rk->rk_timers, RD_POLL_NOWAIT); - } + if (rk->rk_cgrp) /* FIXME: move to timer-triggered */ + rd_kafka_cgrp_serve(rk->rk_cgrp); + rd_kafka_timers_run(&rk->rk_timers, RD_POLL_NOWAIT); + } rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Internal main thread terminating"); @@ -2085,8 +2077,8 @@ static int rd_kafka_thread_main (void *arg) { if (rd_kafka_is_idempotent(rk)) rd_kafka_idemp_term(rk); - rd_kafka_q_disable(rk->rk_ops); - rd_kafka_q_purge(rk->rk_ops); + rd_kafka_q_disable(rk->rk_ops); + rd_kafka_q_purge(rk->rk_ops); rd_kafka_timer_stop(&rk->rk_timers, &tmr_1s, 1); if (rk->rk_conf.stats_interval_ms) @@ -2104,24 +2096,26 @@ static int rd_kafka_thread_main (void *arg) { rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Internal main thread termination done"); - rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1); + rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1); - return 0; + return 0; } -void rd_kafka_term_sig_handler (int sig) { - /* nop */ +void rd_kafka_term_sig_handler(int sig) { + /* nop */ } -rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, - char *errstr, size_t errstr_size) { - rd_kafka_t *rk; - static rd_atomic32_t rkid; +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, + rd_kafka_conf_t *app_conf, + char *errstr, + size_t errstr_size) { + rd_kafka_t *rk; + static rd_atomic32_t rkid; rd_kafka_conf_t *conf; rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR; - int ret_errno = 0; + int ret_errno = 0; const char *conf_err; #ifndef _WIN32 sigset_t newset, oldset; @@ -2158,18 +2152,18 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, } - rd_kafka_global_cnt_incr(); + rd_kafka_global_cnt_incr(); - /* - * Set up the handle. - */ - rk = rd_calloc(1, sizeof(*rk)); + /* + * Set up the handle. + */ + rk = rd_calloc(1, sizeof(*rk)); - rk->rk_type = type; + rk->rk_type = type; rk->rk_ts_created = rd_clock(); /* Struct-copy the config object. */ - rk->rk_conf = *conf; + rk->rk_conf = *conf; if (!app_conf) rd_free(conf); /* Free the base config struct only, * not its fields since they were copied to @@ -2177,18 +2171,19 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, * freed from rd_kafka_destroy_internal() * as the rk itself is destroyed. */ - /* Seed PRNG, don't bother about HAVE_RAND_R, since it is pretty cheap. */ + /* Seed PRNG, don't bother about HAVE_RAND_R, since it is pretty cheap. + */ if (rk->rk_conf.enable_random_seed) call_once(&rd_kafka_global_srand_once, rd_kafka_global_srand); /* Call on_new() interceptors */ rd_kafka_interceptors_on_new(rk, &rk->rk_conf); - rwlock_init(&rk->rk_lock); + rwlock_init(&rk->rk_lock); mtx_init(&rk->rk_internal_rkb_lock, mtx_plain); - cnd_init(&rk->rk_broker_state_change_cnd); - mtx_init(&rk->rk_broker_state_change_lock, mtx_plain); + cnd_init(&rk->rk_broker_state_change_cnd); + mtx_init(&rk->rk_broker_state_change_lock, mtx_plain); rd_list_init(&rk->rk_broker_state_change_waiters, 8, rd_kafka_enq_once_trigger_destroy); @@ -2203,50 +2198,49 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, rd_atomic64_init(&rk->rk_ts_last_poll, rk->rk_ts_created); rd_atomic32_init(&rk->rk_flushing, 0); - rk->rk_rep = rd_kafka_q_new(rk); - rk->rk_ops = rd_kafka_q_new(rk); - rk->rk_ops->rkq_serve = rd_kafka_poll_cb; + rk->rk_rep = rd_kafka_q_new(rk); + rk->rk_ops = rd_kafka_q_new(rk); + rk->rk_ops->rkq_serve = rd_kafka_poll_cb; rk->rk_ops->rkq_opaque = rk; if (rk->rk_conf.log_queue) { - rk->rk_logq = rd_kafka_q_new(rk); - rk->rk_logq->rkq_serve = rd_kafka_poll_cb; + rk->rk_logq = rd_kafka_q_new(rk); + rk->rk_logq->rkq_serve = rd_kafka_poll_cb; rk->rk_logq->rkq_opaque = rk; } - TAILQ_INIT(&rk->rk_brokers); - TAILQ_INIT(&rk->rk_topics); + TAILQ_INIT(&rk->rk_brokers); + TAILQ_INIT(&rk->rk_topics); rd_kafka_timers_init(&rk->rk_timers, rk, rk->rk_ops); rd_kafka_metadata_cache_init(rk); rd_kafka_coord_cache_init(&rk->rk_coord_cache, rk->rk_conf.metadata_max_age_ms); rd_kafka_coord_reqs_init(rk); - if (rk->rk_conf.dr_cb || rk->rk_conf.dr_msg_cb) + if (rk->rk_conf.dr_cb || rk->rk_conf.dr_msg_cb) rk->rk_drmode = RD_KAFKA_DR_MODE_CB; else if (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR) rk->rk_drmode = RD_KAFKA_DR_MODE_EVENT; else rk->rk_drmode = RD_KAFKA_DR_MODE_NONE; if (rk->rk_drmode != RD_KAFKA_DR_MODE_NONE) - rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_DR; + rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_DR; - if (rk->rk_conf.rebalance_cb) - rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_REBALANCE; - if (rk->rk_conf.offset_commit_cb) - rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_OFFSET_COMMIT; + if (rk->rk_conf.rebalance_cb) + rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_REBALANCE; + if (rk->rk_conf.offset_commit_cb) + rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_OFFSET_COMMIT; if (rk->rk_conf.error_cb) rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_ERROR; #if WITH_SASL_OAUTHBEARER if (rk->rk_conf.sasl.enable_oauthbearer_unsecure_jwt && !rk->rk_conf.sasl.oauthbearer.token_refresh_cb) rd_kafka_conf_set_oauthbearer_token_refresh_cb( - &rk->rk_conf, - rd_kafka_oauthbearer_unsecured_token); + &rk->rk_conf, rd_kafka_oauthbearer_unsecured_token); if (rk->rk_conf.sasl.oauthbearer.token_refresh_cb) rk->rk_conf.enabled_events |= - RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH; + RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH; #endif rk->rk_controllerid = -1; @@ -2254,50 +2248,51 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, /* Admin client defaults */ rk->rk_conf.admin.request_timeout_ms = rk->rk_conf.socket_timeout_ms; - if (rk->rk_conf.debug) + if (rk->rk_conf.debug) rk->rk_conf.log_level = LOG_DEBUG; - rd_snprintf(rk->rk_name, sizeof(rk->rk_name), "%s#%s-%i", + rd_snprintf(rk->rk_name, sizeof(rk->rk_name), "%s#%s-%i", rk->rk_conf.client_id_str, rd_kafka_type2str(rk->rk_type), rd_atomic32_add(&rkid, 1)); - /* Construct clientid kafka string */ - rk->rk_client_id = rd_kafkap_str_new(rk->rk_conf.client_id_str,-1); + /* Construct clientid kafka string */ + rk->rk_client_id = rd_kafkap_str_new(rk->rk_conf.client_id_str, -1); /* Convert group.id to kafka string (may be NULL) */ - rk->rk_group_id = rd_kafkap_str_new(rk->rk_conf.group_id_str,-1); + rk->rk_group_id = rd_kafkap_str_new(rk->rk_conf.group_id_str, -1); /* Config fixups */ rk->rk_conf.queued_max_msg_bytes = - (int64_t)rk->rk_conf.queued_max_msg_kbytes * 1000ll; + (int64_t)rk->rk_conf.queued_max_msg_kbytes * 1000ll; - /* Enable api.version.request=true if fallback.broker.version - * indicates a supporting broker. */ - if (rd_kafka_ApiVersion_is_queryable(rk->rk_conf.broker_version_fallback)) - rk->rk_conf.api_version_request = 1; + /* Enable api.version.request=true if fallback.broker.version + * indicates a supporting broker. */ + if (rd_kafka_ApiVersion_is_queryable( + rk->rk_conf.broker_version_fallback)) + rk->rk_conf.api_version_request = 1; if (rk->rk_type == RD_KAFKA_PRODUCER) { mtx_init(&rk->rk_curr_msgs.lock, mtx_plain); cnd_init(&rk->rk_curr_msgs.cnd); - rk->rk_curr_msgs.max_cnt = - rk->rk_conf.queue_buffering_max_msgs; - if ((unsigned long long)rk->rk_conf. - queue_buffering_max_kbytes * 1024 > + rk->rk_curr_msgs.max_cnt = rk->rk_conf.queue_buffering_max_msgs; + if ((unsigned long long)rk->rk_conf.queue_buffering_max_kbytes * + 1024 > (unsigned long long)SIZE_MAX) { rk->rk_curr_msgs.max_size = SIZE_MAX; rd_kafka_log(rk, LOG_WARNING, "QUEUESIZE", "queue.buffering.max.kbytes adjusted " - "to system SIZE_MAX limit %"PRIusz" bytes", + "to system SIZE_MAX limit %" PRIusz + " bytes", rk->rk_curr_msgs.max_size); } else { rk->rk_curr_msgs.max_size = - (size_t)rk->rk_conf. - queue_buffering_max_kbytes * 1024; + (size_t)rk->rk_conf.queue_buffering_max_kbytes * + 1024; } } if (rd_kafka_assignors_init(rk, errstr, errstr_size) == -1) { - ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; + ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; ret_errno = EINVAL; goto fail; } @@ -2305,26 +2300,27 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, /* Create Mock cluster */ rd_atomic32_init(&rk->rk_mock.cluster_cnt, 0); if (rk->rk_conf.mock.broker_cnt > 0) { - rk->rk_mock.cluster = rd_kafka_mock_cluster_new( - rk, rk->rk_conf.mock.broker_cnt); + rk->rk_mock.cluster = + rd_kafka_mock_cluster_new(rk, rk->rk_conf.mock.broker_cnt); if (!rk->rk_mock.cluster) { rd_snprintf(errstr, errstr_size, "Failed to create mock cluster, see logs"); - ret_err = RD_KAFKA_RESP_ERR__FAIL; + ret_err = RD_KAFKA_RESP_ERR__FAIL; ret_errno = EINVAL; goto fail; } - rd_kafka_log(rk, LOG_NOTICE, "MOCK", "Mock cluster enabled: " + rd_kafka_log(rk, LOG_NOTICE, "MOCK", + "Mock cluster enabled: " "original bootstrap.servers and security.protocol " "ignored and replaced"); /* Overwrite bootstrap.servers and connection settings */ - if (rd_kafka_conf_set(&rk->rk_conf, "bootstrap.servers", - rd_kafka_mock_cluster_bootstraps( - rk->rk_mock.cluster), - NULL, 0) != RD_KAFKA_CONF_OK) + if (rd_kafka_conf_set( + &rk->rk_conf, "bootstrap.servers", + rd_kafka_mock_cluster_bootstraps(rk->rk_mock.cluster), + NULL, 0) != RD_KAFKA_CONF_OK) rd_assert(!"failed to replace mock bootstrap.servers"); if (rd_kafka_conf_set(&rk->rk_conf, "security.protocol", @@ -2338,9 +2334,9 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, if (rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL || rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_PLAINTEXT) { /* Select SASL provider */ - if (rd_kafka_sasl_select_provider(rk, - errstr, errstr_size) == -1) { - ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; + if (rd_kafka_sasl_select_provider(rk, errstr, errstr_size) == + -1) { + ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; ret_errno = EINVAL; goto fail; } @@ -2348,7 +2344,7 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, /* Initialize SASL provider */ if (rd_kafka_sasl_init(rk, errstr, errstr_size) == -1) { rk->rk_conf.sasl.provider = NULL; - ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; + ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; ret_errno = EINVAL; goto fail; } @@ -2359,7 +2355,7 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL) { /* Create SSL context */ if (rd_kafka_ssl_ctx_init(rk, errstr, errstr_size) == -1) { - ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; + ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; ret_errno = EINVAL; goto fail; } @@ -2371,11 +2367,10 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, if (RD_KAFKAP_STR_LEN(rk->rk_group_id) > 0) { /* Create consumer group handle */ - rk->rk_cgrp = rd_kafka_cgrp_new(rk, - rk->rk_group_id, + rk->rk_cgrp = rd_kafka_cgrp_new(rk, rk->rk_group_id, rk->rk_client_id); rk->rk_consumer.q = - rd_kafka_q_keep(rk->rk_cgrp->rkcg_q); + rd_kafka_q_keep(rk->rk_cgrp->rkcg_q); } else { /* Legacy consumer */ rk->rk_consumer.q = rd_kafka_q_keep(rk->rk_rep); @@ -2383,7 +2378,7 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, } else if (type == RD_KAFKA_PRODUCER) { rk->rk_eos.transactional_id = - rd_kafkap_str_new(rk->rk_conf.eos.transactional_id, -1); + rd_kafkap_str_new(rk->rk_conf.eos.transactional_id, -1); } #ifndef _WIN32 @@ -2394,12 +2389,11 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, * we're done creating the thread. */ sigemptyset(&oldset); sigfillset(&newset); - if (rk->rk_conf.term_sig) { - struct sigaction sa_term = { - .sa_handler = rd_kafka_term_sig_handler - }; - sigaction(rk->rk_conf.term_sig, &sa_term, NULL); - } + if (rk->rk_conf.term_sig) { + struct sigaction sa_term = {.sa_handler = + rd_kafka_term_sig_handler}; + sigaction(rk->rk_conf.term_sig, &sa_term, NULL); + } pthread_sigmask(SIG_SETMASK, &newset, &oldset); #endif @@ -2412,8 +2406,8 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_BACKGROUND)) { rd_kafka_resp_err_t err; rd_kafka_wrlock(rk); - err = rd_kafka_background_thread_create(rk, - errstr, errstr_size); + err = + rd_kafka_background_thread_create(rk, errstr, errstr_size); rd_kafka_wrunlock(rk); if (err) goto fail; @@ -2421,22 +2415,22 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, mtx_lock(&rk->rk_init_lock); - /* Lock handle here to synchronise state, i.e., hold off - * the thread until we've finalized the handle. */ - rd_kafka_wrlock(rk); + /* Lock handle here to synchronise state, i.e., hold off + * the thread until we've finalized the handle. */ + rd_kafka_wrlock(rk); - /* Create handler thread */ + /* Create handler thread */ rk->rk_init_wait_cnt++; - if ((thrd_create(&rk->rk_thread, - rd_kafka_thread_main, rk)) != thrd_success) { + if ((thrd_create(&rk->rk_thread, rd_kafka_thread_main, rk)) != + thrd_success) { rk->rk_init_wait_cnt--; - ret_err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; + ret_err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; ret_errno = errno; - if (errstr) - rd_snprintf(errstr, errstr_size, - "Failed to create thread: %s (%i)", - rd_strerror(errno), errno); - rd_kafka_wrunlock(rk); + if (errstr) + rd_snprintf(errstr, errstr_size, + "Failed to create thread: %s (%i)", + rd_strerror(errno), errno); + rd_kafka_wrunlock(rk); mtx_unlock(&rk->rk_init_lock); #ifndef _WIN32 /* Restore sigmask of caller */ @@ -2453,27 +2447,27 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, */ mtx_lock(&rk->rk_internal_rkb_lock); - rk->rk_internal_rkb = rd_kafka_broker_add(rk, RD_KAFKA_INTERNAL, - RD_KAFKA_PROTO_PLAINTEXT, - "", 0, RD_KAFKA_NODEID_UA); + rk->rk_internal_rkb = + rd_kafka_broker_add(rk, RD_KAFKA_INTERNAL, RD_KAFKA_PROTO_PLAINTEXT, + "", 0, RD_KAFKA_NODEID_UA); mtx_unlock(&rk->rk_internal_rkb_lock); - /* Add initial list of brokers from configuration */ - if (rk->rk_conf.brokerlist) { - if (rd_kafka_brokers_add0(rk, rk->rk_conf.brokerlist) == 0) - rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, - "No brokers configured"); - } + /* Add initial list of brokers from configuration */ + if (rk->rk_conf.brokerlist) { + if (rd_kafka_brokers_add0(rk, rk->rk_conf.brokerlist) == 0) + rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, + "No brokers configured"); + } #ifndef _WIN32 - /* Restore sigmask of caller */ - pthread_sigmask(SIG_SETMASK, &oldset, NULL); + /* Restore sigmask of caller */ + pthread_sigmask(SIG_SETMASK, &oldset, NULL); #endif /* Wait for background threads to fully initialize so that * the client instance is fully functional at the time it is * returned from the constructor. */ - if (rd_kafka_init_wait(rk, 60*1000) != 0) { + if (rd_kafka_init_wait(rk, 60 * 1000) != 0) { /* This should never happen unless there is a bug * or the OS is not scheduling the background threads. * Either case there is no point in handling this gracefully @@ -2501,30 +2495,25 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, bflen = sizeof(builtin_features); if (rd_kafka_conf_get(&rk->rk_conf, "builtin.features", - builtin_features, &bflen) != - RD_KAFKA_CONF_OK) + builtin_features, &bflen) != RD_KAFKA_CONF_OK) rd_snprintf(builtin_features, sizeof(builtin_features), "?"); rd_kafka_dbg(rk, ALL, "INIT", "librdkafka v%s (0x%x) %s initialized " "(builtin.features %s, %s, debug 0x%x)", - rd_kafka_version_str(), rd_kafka_version(), - rk->rk_name, - builtin_features, BUILT_WITH, - rk->rk_conf.debug); + rd_kafka_version_str(), rd_kafka_version(), rk->rk_name, + builtin_features, BUILT_WITH, rk->rk_conf.debug); /* Log warnings for deprecated configuration */ rd_kafka_conf_warn(rk); /* Debug dump configuration */ if (rk->rk_conf.debug & RD_KAFKA_DBG_CONF) { - rd_kafka_anyconf_dump_dbg(rk, _RK_GLOBAL, - &rk->rk_conf, - "Client configuration"); + rd_kafka_anyconf_dump_dbg(rk, _RK_GLOBAL, &rk->rk_conf, + "Client configuration"); if (rk->rk_conf.topic_conf) rd_kafka_anyconf_dump_dbg( - rk, _RK_TOPIC, - rk->rk_conf.topic_conf, - "Default topic configuration"); + rk, _RK_TOPIC, rk->rk_conf.topic_conf, + "Default topic configuration"); } /* Free user supplied conf's base pointer on success, @@ -2582,7 +2571,6 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, - /** * Counts usage of the legacy/simple consumer (rd_kafka_consume_start() with * friends) since it does not have an API for stopping the cgrp we will need to @@ -2595,7 +2583,7 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, * A rd_kafka_t handle can never migrate from simple to high-level, or * vice versa, so we dont need a ..consumer_del(). */ -int rd_kafka_simple_consumer_add (rd_kafka_t *rk) { +int rd_kafka_simple_consumer_add(rd_kafka_t *rk) { if (rd_atomic32_get(&rk->rk_simple_cnt) < 0) return 0; @@ -2604,7 +2592,6 @@ int rd_kafka_simple_consumer_add (rd_kafka_t *rk) { - /** * rktp fetch is split up in these parts: * * application side: @@ -2623,91 +2610,91 @@ int rd_kafka_simple_consumer_add (rd_kafka_t *rk) { * */ -static RD_UNUSED -int rd_kafka_consume_start0 (rd_kafka_topic_t *rkt, int32_t partition, - int64_t offset, rd_kafka_q_t *rkq) { - rd_kafka_toppar_t *rktp; +static RD_UNUSED int rd_kafka_consume_start0(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset, + rd_kafka_q_t *rkq) { + rd_kafka_toppar_t *rktp; - if (partition < 0) { - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - ESRCH); - return -1; - } + if (partition < 0) { + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + ESRCH); + return -1; + } if (!rd_kafka_simple_consumer_add(rkt->rkt_rk)) { - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); return -1; } - rd_kafka_topic_wrlock(rkt); - rktp = rd_kafka_toppar_desired_add(rkt, partition); - rd_kafka_topic_wrunlock(rkt); + rd_kafka_topic_wrlock(rkt); + rktp = rd_kafka_toppar_desired_add(rkt, partition); + rd_kafka_topic_wrunlock(rkt); /* Verify offset */ - if (offset == RD_KAFKA_OFFSET_BEGINNING || - offset == RD_KAFKA_OFFSET_END || + if (offset == RD_KAFKA_OFFSET_BEGINNING || + offset == RD_KAFKA_OFFSET_END || offset <= RD_KAFKA_OFFSET_TAIL_BASE) { /* logical offsets */ - } else if (offset == RD_KAFKA_OFFSET_STORED) { - /* offset manager */ + } else if (offset == RD_KAFKA_OFFSET_STORED) { + /* offset manager */ if (rkt->rkt_conf.offset_store_method == - RD_KAFKA_OFFSET_METHOD_BROKER && + RD_KAFKA_OFFSET_METHOD_BROKER && RD_KAFKAP_STR_IS_NULL(rkt->rkt_rk->rk_group_id)) { /* Broker based offsets require a group id. */ rd_kafka_toppar_destroy(rktp); - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, - EINVAL); + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, + EINVAL); return -1; } - } else if (offset < 0) { - rd_kafka_toppar_destroy(rktp); - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, - EINVAL); - return -1; - + } else if (offset < 0) { + rd_kafka_toppar_destroy(rktp); + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); + return -1; } rd_kafka_toppar_op_fetch_start(rktp, offset, rkq, RD_KAFKA_NO_REPLYQ); rd_kafka_toppar_destroy(rktp); - rd_kafka_set_last_error(0, 0); - return 0; + rd_kafka_set_last_error(0, 0); + return 0; } - -int rd_kafka_consume_start (rd_kafka_topic_t *app_rkt, int32_t partition, - int64_t offset) { +int rd_kafka_consume_start(rd_kafka_topic_t *app_rkt, + int32_t partition, + int64_t offset) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); rd_kafka_dbg(rkt->rkt_rk, TOPIC, "START", - "Start consuming partition %"PRId32,partition); - return rd_kafka_consume_start0(rkt, partition, offset, NULL); + "Start consuming partition %" PRId32, partition); + return rd_kafka_consume_start0(rkt, partition, offset, NULL); } -int rd_kafka_consume_start_queue (rd_kafka_topic_t *app_rkt, int32_t partition, - int64_t offset, rd_kafka_queue_t *rkqu) { +int rd_kafka_consume_start_queue(rd_kafka_topic_t *app_rkt, + int32_t partition, + int64_t offset, + rd_kafka_queue_t *rkqu) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); - return rd_kafka_consume_start0(rkt, partition, offset, rkqu->rkqu_q); + return rd_kafka_consume_start0(rkt, partition, offset, rkqu->rkqu_q); } - -static RD_UNUSED int rd_kafka_consume_stop0 (rd_kafka_toppar_t *rktp) { +static RD_UNUSED int rd_kafka_consume_stop0(rd_kafka_toppar_t *rktp) { rd_kafka_q_t *tmpq = NULL; rd_kafka_resp_err_t err; rd_kafka_topic_wrlock(rktp->rktp_rkt); rd_kafka_toppar_lock(rktp); - rd_kafka_toppar_desired_del(rktp); + rd_kafka_toppar_desired_del(rktp); rd_kafka_toppar_unlock(rktp); - rd_kafka_topic_wrunlock(rktp->rktp_rkt); + rd_kafka_topic_wrunlock(rktp->rktp_rkt); tmpq = rd_kafka_q_new(rktp->rktp_rkt->rkt_rk); @@ -2717,34 +2704,34 @@ static RD_UNUSED int rd_kafka_consume_stop0 (rd_kafka_toppar_t *rktp) { err = rd_kafka_q_wait_result(tmpq, RD_POLL_INFINITE); rd_kafka_q_destroy_owner(tmpq); - rd_kafka_set_last_error(err, err ? EINVAL : 0); + rd_kafka_set_last_error(err, err ? EINVAL : 0); - return err ? -1 : 0; + return err ? -1 : 0; } -int rd_kafka_consume_stop (rd_kafka_topic_t *app_rkt, int32_t partition) { +int rd_kafka_consume_stop(rd_kafka_topic_t *app_rkt, int32_t partition) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); - rd_kafka_toppar_t *rktp; + rd_kafka_toppar_t *rktp; int r; - if (partition == RD_KAFKA_PARTITION_UA) { - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); - return -1; - } - - rd_kafka_topic_wrlock(rkt); - if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0)) && - !(rktp = rd_kafka_toppar_desired_get(rkt, partition))) { - rd_kafka_topic_wrunlock(rkt); - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - ESRCH); - return -1; - } + if (partition == RD_KAFKA_PARTITION_UA) { + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); + return -1; + } + + rd_kafka_topic_wrlock(rkt); + if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0)) && + !(rktp = rd_kafka_toppar_desired_get(rkt, partition))) { + rd_kafka_topic_wrunlock(rkt); + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + ESRCH); + return -1; + } rd_kafka_topic_wrunlock(rkt); r = rd_kafka_consume_stop0(rktp); - /* set_last_error() called by stop0() */ + /* set_last_error() called by stop0() */ rd_kafka_toppar_destroy(rktp); @@ -2753,31 +2740,31 @@ int rd_kafka_consume_stop (rd_kafka_topic_t *app_rkt, int32_t partition) { -rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *app_rkt, - int32_t partition, - int64_t offset, - int timeout_ms) { +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *app_rkt, + int32_t partition, + int64_t offset, + int timeout_ms) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); - rd_kafka_toppar_t *rktp; + rd_kafka_toppar_t *rktp; rd_kafka_q_t *tmpq = NULL; rd_kafka_resp_err_t err; rd_kafka_replyq_t replyq = RD_KAFKA_NO_REPLYQ; /* FIXME: simple consumer check */ - if (partition == RD_KAFKA_PARTITION_UA) + if (partition == RD_KAFKA_PARTITION_UA) return RD_KAFKA_RESP_ERR__INVALID_ARG; - rd_kafka_topic_rdlock(rkt); - if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0)) && - !(rktp = rd_kafka_toppar_desired_get(rkt, partition))) { - rd_kafka_topic_rdunlock(rkt); + rd_kafka_topic_rdlock(rkt); + if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0)) && + !(rktp = rd_kafka_toppar_desired_get(rkt, partition))) { + rd_kafka_topic_rdunlock(rkt); return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - } - rd_kafka_topic_rdunlock(rkt); + } + rd_kafka_topic_rdunlock(rkt); if (timeout_ms) { - tmpq = rd_kafka_q_new(rkt->rkt_rk); + tmpq = rd_kafka_q_new(rkt->rkt_rk); replyq = RD_KAFKA_REPLYQ(tmpq, 0); } @@ -2788,7 +2775,7 @@ rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *app_rkt, return err; } - rd_kafka_toppar_destroy(rktp); + rd_kafka_toppar_destroy(rktp); if (tmpq) { err = rd_kafka_q_wait_result(tmpq, timeout_ms); @@ -2801,18 +2788,18 @@ rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *app_rkt, rd_kafka_error_t * -rd_kafka_seek_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions, - int timeout_ms) { +rd_kafka_seek_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions, + int timeout_ms) { rd_kafka_q_t *tmpq = NULL; rd_kafka_topic_partition_t *rktpar; rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); - int cnt = 0; + int cnt = 0; if (rk->rk_type != RD_KAFKA_CONSUMER) return rd_kafka_error_new( - RD_KAFKA_RESP_ERR__INVALID_ARG, - "Must only be used on consumer instance"); + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Must only be used on consumer instance"); if (!partitions || partitions->cnt == 0) return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, @@ -2825,11 +2812,9 @@ rd_kafka_seek_partitions (rd_kafka_t *rk, rd_kafka_toppar_t *rktp; rd_kafka_resp_err_t err; - rktp = rd_kafka_toppar_get2(rk, - rktpar->topic, - rktpar->partition, - rd_false/*no-ua-on-miss*/, - rd_false/*no-create-on-miss*/); + rktp = rd_kafka_toppar_get2( + rk, rktpar->topic, rktpar->partition, + rd_false /*no-ua-on-miss*/, rd_false /*no-create-on-miss*/); if (!rktp) { rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; continue; @@ -2859,9 +2844,10 @@ rd_kafka_seek_partitions (rd_kafka_t *rk, rd_kafka_q_destroy_owner(tmpq); return rd_kafka_error_new( - RD_KAFKA_RESP_ERR__TIMED_OUT, - "Timed out waiting for %d remaining partition " - "seek(s) to finish", cnt); + RD_KAFKA_RESP_ERR__TIMED_OUT, + "Timed out waiting for %d remaining partition " + "seek(s) to finish", + cnt); } if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) { @@ -2875,9 +2861,8 @@ rd_kafka_seek_partitions (rd_kafka_t *rk, rd_assert(rko->rko_rktp); rktpar = rd_kafka_topic_partition_list_find( - partitions, - rko->rko_rktp->rktp_rkt->rkt_topic->str, - rko->rko_rktp->rktp_partition); + partitions, rko->rko_rktp->rktp_rkt->rkt_topic->str, + rko->rko_rktp->rktp_partition); rd_assert(rktpar); rktpar->err = rko->rko_err; @@ -2894,75 +2879,76 @@ rd_kafka_seek_partitions (rd_kafka_t *rk, -static ssize_t rd_kafka_consume_batch0 (rd_kafka_q_t *rkq, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size) { - /* Populate application's rkmessages array. */ - return rd_kafka_q_serve_rkmessages(rkq, timeout_ms, - rkmessages, rkmessages_size); +static ssize_t rd_kafka_consume_batch0(rd_kafka_q_t *rkq, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size) { + /* Populate application's rkmessages array. */ + return rd_kafka_q_serve_rkmessages(rkq, timeout_ms, rkmessages, + rkmessages_size); } -ssize_t rd_kafka_consume_batch (rd_kafka_topic_t *app_rkt, int32_t partition, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size) { +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *app_rkt, + int32_t partition, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); - rd_kafka_toppar_t *rktp; - ssize_t cnt; - - /* Get toppar */ - rd_kafka_topic_rdlock(rkt); - rktp = rd_kafka_toppar_get(rkt, partition, 0/*no ua on miss*/); - if (unlikely(!rktp)) - rktp = rd_kafka_toppar_desired_get(rkt, partition); - rd_kafka_topic_rdunlock(rkt); - - if (unlikely(!rktp)) { - /* No such toppar known */ - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - ESRCH); - return -1; - } + rd_kafka_toppar_t *rktp; + ssize_t cnt; + + /* Get toppar */ + rd_kafka_topic_rdlock(rkt); + rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no ua on miss*/); + if (unlikely(!rktp)) + rktp = rd_kafka_toppar_desired_get(rkt, partition); + rd_kafka_topic_rdunlock(rkt); + + if (unlikely(!rktp)) { + /* No such toppar known */ + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + ESRCH); + return -1; + } - /* Populate application's rkmessages array. */ - cnt = rd_kafka_q_serve_rkmessages(rktp->rktp_fetchq, timeout_ms, - rkmessages, rkmessages_size); + /* Populate application's rkmessages array. */ + cnt = rd_kafka_q_serve_rkmessages(rktp->rktp_fetchq, timeout_ms, + rkmessages, rkmessages_size); - rd_kafka_toppar_destroy(rktp); /* refcnt from .._get() */ + rd_kafka_toppar_destroy(rktp); /* refcnt from .._get() */ - rd_kafka_set_last_error(0, 0); + rd_kafka_set_last_error(0, 0); - return cnt; + return cnt; } -ssize_t rd_kafka_consume_batch_queue (rd_kafka_queue_t *rkqu, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size) { - /* Populate application's rkmessages array. */ - return rd_kafka_consume_batch0(rkqu->rkqu_q, timeout_ms, - rkmessages, rkmessages_size); +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size) { + /* Populate application's rkmessages array. */ + return rd_kafka_consume_batch0(rkqu->rkqu_q, timeout_ms, rkmessages, + rkmessages_size); } struct consume_ctx { - void (*consume_cb) (rd_kafka_message_t *rkmessage, void *opaque); - void *opaque; + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque); + void *opaque; }; /** * Trampoline for application's consume_cb() */ -static rd_kafka_op_res_t -rd_kafka_consume_cb (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque) { - struct consume_ctx *ctx = opaque; - rd_kafka_message_t *rkmessage; +static rd_kafka_op_res_t rd_kafka_consume_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) { + struct consume_ctx *ctx = opaque; + rd_kafka_message_t *rkmessage; if (unlikely(rd_kafka_op_version_outdated(rko, 0)) || rko->rko_type == RD_KAFKA_OP_BARRIER) { @@ -2970,9 +2956,9 @@ rd_kafka_consume_cb (rd_kafka_t *rk, return RD_KAFKA_OP_RES_HANDLED; } - rkmessage = rd_kafka_message_get(rko); + rkmessage = rd_kafka_message_get(rko); - rd_kafka_op_offset_store(rk, rko); + rd_kafka_op_offset_store(rk, rko); ctx->consume_cb(rkmessage, ctx->opaque); @@ -2983,20 +2969,19 @@ rd_kafka_consume_cb (rd_kafka_t *rk, -static rd_kafka_op_res_t -rd_kafka_consume_callback0 (rd_kafka_q_t *rkq, int timeout_ms, int max_cnt, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *opaque), - void *opaque) { - struct consume_ctx ctx = { .consume_cb = consume_cb, .opaque = opaque }; +static rd_kafka_op_res_t rd_kafka_consume_callback0( + rd_kafka_q_t *rkq, + int timeout_ms, + int max_cnt, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque), + void *opaque) { + struct consume_ctx ctx = {.consume_cb = consume_cb, .opaque = opaque}; rd_kafka_op_res_t res; if (timeout_ms) rd_kafka_app_poll_blocking(rkq->rkq_rk); - res = rd_kafka_q_serve(rkq, timeout_ms, max_cnt, - RD_KAFKA_Q_CB_RETURN, + res = rd_kafka_q_serve(rkq, timeout_ms, max_cnt, RD_KAFKA_Q_CB_RETURN, rd_kafka_consume_cb, &ctx); rd_kafka_app_polled(rkq->rkq_rk); @@ -3005,51 +2990,50 @@ rd_kafka_consume_callback0 (rd_kafka_q_t *rkq, int timeout_ms, int max_cnt, } -int rd_kafka_consume_callback (rd_kafka_topic_t *app_rkt, int32_t partition, - int timeout_ms, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *opaque), - void *opaque) { +int rd_kafka_consume_callback(rd_kafka_topic_t *app_rkt, + int32_t partition, + int timeout_ms, + void (*consume_cb)(rd_kafka_message_t *rkmessage, + void *opaque), + void *opaque) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); - rd_kafka_toppar_t *rktp; - int r; - - /* Get toppar */ - rd_kafka_topic_rdlock(rkt); - rktp = rd_kafka_toppar_get(rkt, partition, 0/*no ua on miss*/); - if (unlikely(!rktp)) - rktp = rd_kafka_toppar_desired_get(rkt, partition); - rd_kafka_topic_rdunlock(rkt); - - if (unlikely(!rktp)) { - /* No such toppar known */ - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - ESRCH); - return -1; - } - - r = rd_kafka_consume_callback0(rktp->rktp_fetchq, timeout_ms, + rd_kafka_toppar_t *rktp; + int r; + + /* Get toppar */ + rd_kafka_topic_rdlock(rkt); + rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no ua on miss*/); + if (unlikely(!rktp)) + rktp = rd_kafka_toppar_desired_get(rkt, partition); + rd_kafka_topic_rdunlock(rkt); + + if (unlikely(!rktp)) { + /* No such toppar known */ + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + ESRCH); + return -1; + } + + r = rd_kafka_consume_callback0(rktp->rktp_fetchq, timeout_ms, rkt->rkt_conf.consume_callback_max_msgs, - consume_cb, opaque); + consume_cb, opaque); - rd_kafka_toppar_destroy(rktp); + rd_kafka_toppar_destroy(rktp); - rd_kafka_set_last_error(0, 0); + rd_kafka_set_last_error(0, 0); - return r; + return r; } -int rd_kafka_consume_callback_queue (rd_kafka_queue_t *rkqu, - int timeout_ms, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *opaque), - void *opaque) { - return rd_kafka_consume_callback0(rkqu->rkqu_q, timeout_ms, 0, - consume_cb, opaque); +int rd_kafka_consume_callback_queue( + rd_kafka_queue_t *rkqu, + int timeout_ms, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque), + void *opaque) { + return rd_kafka_consume_callback0(rkqu->rkqu_q, timeout_ms, 0, + consume_cb, opaque); } @@ -3059,23 +3043,22 @@ int rd_kafka_consume_callback_queue (rd_kafka_queue_t *rkqu, * registered for matching events, this includes consumer_cb() * in which case no message will be returned. */ -static rd_kafka_message_t *rd_kafka_consume0 (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - int timeout_ms) { - rd_kafka_op_t *rko; - rd_kafka_message_t *rkmessage = NULL; - rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); +static rd_kafka_message_t * +rd_kafka_consume0(rd_kafka_t *rk, rd_kafka_q_t *rkq, int timeout_ms) { + rd_kafka_op_t *rko; + rd_kafka_message_t *rkmessage = NULL; + rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); if (timeout_ms) rd_kafka_app_poll_blocking(rk); - rd_kafka_yield_thread = 0; - while ((rko = rd_kafka_q_pop(rkq, - rd_timeout_remains_us(abs_timeout), 0))) { + rd_kafka_yield_thread = 0; + while (( + rko = rd_kafka_q_pop(rkq, rd_timeout_remains_us(abs_timeout), 0))) { rd_kafka_op_res_t res; - res = rd_kafka_poll_cb(rk, rkq, rko, - RD_KAFKA_Q_CB_RETURN, NULL); + res = + rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_RETURN, NULL); if (res == RD_KAFKA_OP_RES_PASS) break; @@ -3084,8 +3067,7 @@ static rd_kafka_message_t *rd_kafka_consume0 (rd_kafka_t *rk, rd_kafka_yield_thread)) { /* Callback called rd_kafka_yield(), we must * stop dispatching the queue and return. */ - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INTR, - EINTR); + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INTR, EINTR); rd_kafka_app_polled(rk); return NULL; } @@ -3094,69 +3076,66 @@ static rd_kafka_message_t *rd_kafka_consume0 (rd_kafka_t *rk, continue; } - if (!rko) { - /* Timeout reached with no op returned. */ - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT, - ETIMEDOUT); + if (!rko) { + /* Timeout reached with no op returned. */ + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT, + ETIMEDOUT); rd_kafka_app_polled(rk); - return NULL; - } + return NULL; + } - rd_kafka_assert(rk, - rko->rko_type == RD_KAFKA_OP_FETCH || - rko->rko_type == RD_KAFKA_OP_CONSUMER_ERR); + rd_kafka_assert(rk, rko->rko_type == RD_KAFKA_OP_FETCH || + rko->rko_type == RD_KAFKA_OP_CONSUMER_ERR); - /* Get rkmessage from rko */ - rkmessage = rd_kafka_message_get(rko); + /* Get rkmessage from rko */ + rkmessage = rd_kafka_message_get(rko); - /* Store offset */ - rd_kafka_op_offset_store(rk, rko); + /* Store offset */ + rd_kafka_op_offset_store(rk, rko); - rd_kafka_set_last_error(0, 0); + rd_kafka_set_last_error(0, 0); rd_kafka_app_polled(rk); - return rkmessage; + return rkmessage; } -rd_kafka_message_t *rd_kafka_consume (rd_kafka_topic_t *app_rkt, - int32_t partition, - int timeout_ms) { +rd_kafka_message_t * +rd_kafka_consume(rd_kafka_topic_t *app_rkt, int32_t partition, int timeout_ms) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); - rd_kafka_toppar_t *rktp; - rd_kafka_message_t *rkmessage; - - rd_kafka_topic_rdlock(rkt); - rktp = rd_kafka_toppar_get(rkt, partition, 0/*no ua on miss*/); - if (unlikely(!rktp)) - rktp = rd_kafka_toppar_desired_get(rkt, partition); - rd_kafka_topic_rdunlock(rkt); - - if (unlikely(!rktp)) { - /* No such toppar known */ - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - ESRCH); - return NULL; - } + rd_kafka_toppar_t *rktp; + rd_kafka_message_t *rkmessage; + + rd_kafka_topic_rdlock(rkt); + rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no ua on miss*/); + if (unlikely(!rktp)) + rktp = rd_kafka_toppar_desired_get(rkt, partition); + rd_kafka_topic_rdunlock(rkt); + + if (unlikely(!rktp)) { + /* No such toppar known */ + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + ESRCH); + return NULL; + } - rkmessage = rd_kafka_consume0(rkt->rkt_rk, - rktp->rktp_fetchq, timeout_ms); + rkmessage = + rd_kafka_consume0(rkt->rkt_rk, rktp->rktp_fetchq, timeout_ms); - rd_kafka_toppar_destroy(rktp); /* refcnt from .._get() */ + rd_kafka_toppar_destroy(rktp); /* refcnt from .._get() */ - return rkmessage; + return rkmessage; } -rd_kafka_message_t *rd_kafka_consume_queue (rd_kafka_queue_t *rkqu, - int timeout_ms) { - return rd_kafka_consume0(rkqu->rkqu_rk, rkqu->rkqu_q, timeout_ms); +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, + int timeout_ms) { + return rd_kafka_consume0(rkqu->rkqu_rk, rkqu->rkqu_q, timeout_ms); } - -rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk) { +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk) { rd_kafka_cgrp_t *rkcg; if (!(rkcg = rd_kafka_cgrp_get(rk))) @@ -3168,9 +3147,7 @@ rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk) { - -rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, - int timeout_ms) { +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms) { rd_kafka_cgrp_t *rkcg; if (unlikely(!(rkcg = rd_kafka_cgrp_get(rk)))) { @@ -3183,11 +3160,11 @@ rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, } -rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk) { +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk) { rd_kafka_cgrp_t *rkcg; rd_kafka_op_t *rko; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__TIMED_OUT; - rd_kafka_q_t *rkq; + rd_kafka_q_t *rkq; if (!(rkcg = rd_kafka_cgrp_get(rk))) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; @@ -3202,11 +3179,11 @@ rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk) { rd_kafka_dbg(rk, CONSUMER, "CLOSE", "Closing consumer"); - /* Redirect cgrp queue to our temporary queue to make sure - * all posted ops (e.g., rebalance callbacks) are served by - * this function. */ - rkq = rd_kafka_q_new(rk); - rd_kafka_q_fwd_set(rkcg->rkcg_q, rkq); + /* Redirect cgrp queue to our temporary queue to make sure + * all posted ops (e.g., rebalance callbacks) are served by + * this function. */ + rkq = rd_kafka_q_new(rk); + rd_kafka_q_fwd_set(rkcg->rkcg_q, rkq); rd_kafka_cgrp_terminate(rkcg, RD_KAFKA_REPLYQ(rkq, 0)); /* async */ @@ -3223,8 +3200,7 @@ rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk) { /* Purge ops already enqueued */ rd_kafka_q_purge(rkq); } else { - rd_kafka_dbg(rk, CONSUMER, "CLOSE", - "Waiting for close events"); + rd_kafka_dbg(rk, CONSUMER, "CLOSE", "Waiting for close events"); while ((rko = rd_kafka_q_pop(rkq, RD_POLL_INFINITE, 0))) { rd_kafka_op_res_t res; if ((rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) == @@ -3253,13 +3229,13 @@ rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk) { rd_kafka_resp_err_t -rd_kafka_committed (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions, - int timeout_ms) { +rd_kafka_committed(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions, + int timeout_ms) { rd_kafka_q_t *rkq; rd_kafka_resp_err_t err; rd_kafka_cgrp_t *rkcg; - rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); + rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); if (!partitions) return RD_KAFKA_RESP_ERR__INVALID_ARG; @@ -3267,53 +3243,53 @@ rd_kafka_committed (rd_kafka_t *rk, if (!(rkcg = rd_kafka_cgrp_get(rk))) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; - /* Set default offsets. */ - rd_kafka_topic_partition_list_reset_offsets(partitions, + /* Set default offsets. */ + rd_kafka_topic_partition_list_reset_offsets(partitions, RD_KAFKA_OFFSET_INVALID); - rkq = rd_kafka_q_new(rk); + rkq = rd_kafka_q_new(rk); do { rd_kafka_op_t *rko; - int state_version = rd_kafka_brokers_get_state_version(rk); + int state_version = rd_kafka_brokers_get_state_version(rk); rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH); - rd_kafka_op_set_replyq(rko, rkq, NULL); + rd_kafka_op_set_replyq(rko, rkq, NULL); /* Issue #827 * Copy partition list to avoid use-after-free if we time out * here, the app frees the list, and then cgrp starts * processing the op. */ - rko->rko_u.offset_fetch.partitions = - rd_kafka_topic_partition_list_copy(partitions); + rko->rko_u.offset_fetch.partitions = + rd_kafka_topic_partition_list_copy(partitions); rko->rko_u.offset_fetch.require_stable = - rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED; - rko->rko_u.offset_fetch.do_free = 1; + rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED; + rko->rko_u.offset_fetch.do_free = 1; if (!rd_kafka_q_enq(rkcg->rkcg_ops, rko)) { err = RD_KAFKA_RESP_ERR__DESTROY; break; } - rko = rd_kafka_q_pop(rkq, - rd_timeout_remains_us(abs_timeout), 0); + rko = + rd_kafka_q_pop(rkq, rd_timeout_remains_us(abs_timeout), 0); if (rko) { if (!(err = rko->rko_err)) rd_kafka_topic_partition_list_update( - partitions, - rko->rko_u.offset_fetch.partitions); + partitions, + rko->rko_u.offset_fetch.partitions); else if ((err == RD_KAFKA_RESP_ERR__WAIT_COORD || - err == RD_KAFKA_RESP_ERR__TRANSPORT) && - !rd_kafka_brokers_wait_state_change( - rk, state_version, - rd_timeout_remains(abs_timeout))) - err = RD_KAFKA_RESP_ERR__TIMED_OUT; + err == RD_KAFKA_RESP_ERR__TRANSPORT) && + !rd_kafka_brokers_wait_state_change( + rk, state_version, + rd_timeout_remains(abs_timeout))) + err = RD_KAFKA_RESP_ERR__TIMED_OUT; rd_kafka_op_destroy(rko); } else err = RD_KAFKA_RESP_ERR__TIMED_OUT; } while (err == RD_KAFKA_RESP_ERR__TRANSPORT || - err == RD_KAFKA_RESP_ERR__WAIT_COORD); + err == RD_KAFKA_RESP_ERR__WAIT_COORD); rd_kafka_q_destroy_owner(rkq); @@ -3323,27 +3299,26 @@ rd_kafka_committed (rd_kafka_t *rk, rd_kafka_resp_err_t -rd_kafka_position (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions) { - int i; - - for (i = 0 ; i < partitions->cnt ; i++) { - rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; - rd_kafka_toppar_t *rktp; - - if (!(rktp = rd_kafka_toppar_get2(rk, rktpar->topic, - rktpar->partition, 0, 1))) { - rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - rktpar->offset = RD_KAFKA_OFFSET_INVALID; - continue; - } - - rd_kafka_toppar_lock(rktp); - rktpar->offset = rktp->rktp_app_offset; - rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR; - rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(rktp); - } +rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions) { + int i; + + for (i = 0; i < partitions->cnt; i++) { + rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; + rd_kafka_toppar_t *rktp; + + if (!(rktp = rd_kafka_toppar_get2(rk, rktpar->topic, + rktpar->partition, 0, 1))) { + rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + rktpar->offset = RD_KAFKA_OFFSET_INVALID; + continue; + } + + rd_kafka_toppar_lock(rktp); + rktpar->offset = rktp->rktp_app_offset; + rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_destroy(rktp); + } return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -3351,22 +3326,22 @@ rd_kafka_position (rd_kafka_t *rk, struct _query_wmark_offsets_state { - rd_kafka_resp_err_t err; - const char *topic; - int32_t partition; - int64_t offsets[2]; - int offidx; /* next offset to set from response */ - rd_ts_t ts_end; - int state_version; /* Broker state version */ + rd_kafka_resp_err_t err; + const char *topic; + int32_t partition; + int64_t offsets[2]; + int offidx; /* next offset to set from response */ + rd_ts_t ts_end; + int state_version; /* Broker state version */ }; -static void rd_kafka_query_wmark_offsets_resp_cb (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - struct _query_wmark_offsets_state *state; +static void rd_kafka_query_wmark_offsets_resp_cb(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + struct _query_wmark_offsets_state *state; rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_t *rktpar; @@ -3379,32 +3354,30 @@ static void rd_kafka_query_wmark_offsets_resp_cb (rd_kafka_t *rk, state = opaque; offsets = rd_kafka_topic_partition_list_new(1); - err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, - offsets, NULL); + err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, offsets, + NULL); if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) { rd_kafka_topic_partition_list_destroy(offsets); return; /* Retrying */ } - /* Retry if no broker connection is available yet. */ - if (err == RD_KAFKA_RESP_ERR__TRANSPORT && - rkb && - rd_kafka_brokers_wait_state_change( - rkb->rkb_rk, state->state_version, - rd_timeout_remains(state->ts_end))) { - /* Retry */ - state->state_version = rd_kafka_brokers_get_state_version(rk); - request->rkbuf_retries = 0; - if (rd_kafka_buf_retry(rkb, request)) { + /* Retry if no broker connection is available yet. */ + if (err == RD_KAFKA_RESP_ERR__TRANSPORT && rkb && + rd_kafka_brokers_wait_state_change( + rkb->rkb_rk, state->state_version, + rd_timeout_remains(state->ts_end))) { + /* Retry */ + state->state_version = rd_kafka_brokers_get_state_version(rk); + request->rkbuf_retries = 0; + if (rd_kafka_buf_retry(rkb, request)) { rd_kafka_topic_partition_list_destroy(offsets); return; /* Retry in progress */ } - /* FALLTHRU */ - } + /* FALLTHRU */ + } /* Partition not seen in response. */ - if (!(rktpar = rd_kafka_topic_partition_list_find(offsets, - state->topic, + if (!(rktpar = rd_kafka_topic_partition_list_find(offsets, state->topic, state->partition))) err = RD_KAFKA_RESP_ERR__BAD_MSG; else if (rktpar->err) @@ -3421,10 +3394,12 @@ static void rd_kafka_query_wmark_offsets_resp_cb (rd_kafka_t *rk, } -rd_kafka_resp_err_t -rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, - int32_t partition, - int64_t *low, int64_t *high, int timeout_ms) { +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t *low, + int64_t *high, + int timeout_ms) { rd_kafka_q_t *rkq; struct _query_wmark_offsets_state state; rd_ts_t ts_end = rd_timeout_init(timeout_ms); @@ -3435,8 +3410,8 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, rd_kafka_resp_err_t err; partitions = rd_kafka_topic_partition_list_new(1); - rktpar = rd_kafka_topic_partition_list_add(partitions, - topic, partition); + rktpar = + rd_kafka_topic_partition_list_add(partitions, topic, partition); rd_list_init(&leaders, partitions->cnt, (void *)rd_kafka_partition_leader_destroy); @@ -3444,9 +3419,9 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, err = rd_kafka_topic_partition_list_query_leaders(rk, partitions, &leaders, timeout_ms); if (err) { - rd_list_destroy(&leaders); - rd_kafka_topic_partition_list_destroy(partitions); - return err; + rd_list_destroy(&leaders); + rd_kafka_topic_partition_list_destroy(partitions); + return err; } leader = rd_list_elem(&leaders, 0); @@ -3455,27 +3430,25 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, /* Due to KAFKA-1588 we need to send a request for each wanted offset, * in this case one for the low watermark and one for the high. */ - state.topic = topic; - state.partition = partition; - state.offsets[0] = RD_KAFKA_OFFSET_BEGINNING; - state.offsets[1] = RD_KAFKA_OFFSET_END; - state.offidx = 0; - state.err = RD_KAFKA_RESP_ERR__IN_PROGRESS; - state.ts_end = ts_end; + state.topic = topic; + state.partition = partition; + state.offsets[0] = RD_KAFKA_OFFSET_BEGINNING; + state.offsets[1] = RD_KAFKA_OFFSET_END; + state.offidx = 0; + state.err = RD_KAFKA_RESP_ERR__IN_PROGRESS; + state.ts_end = ts_end; state.state_version = rd_kafka_brokers_get_state_version(rk); - rktpar->offset = RD_KAFKA_OFFSET_BEGINNING; - rd_kafka_ListOffsetsRequest(leader->rkb, partitions, - RD_KAFKA_REPLYQ(rkq, 0), - rd_kafka_query_wmark_offsets_resp_cb, - &state); + rktpar->offset = RD_KAFKA_OFFSET_BEGINNING; + rd_kafka_ListOffsetsRequest( + leader->rkb, partitions, RD_KAFKA_REPLYQ(rkq, 0), + rd_kafka_query_wmark_offsets_resp_cb, &state); - rktpar->offset = RD_KAFKA_OFFSET_END; - rd_kafka_ListOffsetsRequest(leader->rkb, partitions, - RD_KAFKA_REPLYQ(rkq, 0), - rd_kafka_query_wmark_offsets_resp_cb, - &state); + rktpar->offset = RD_KAFKA_OFFSET_END; + rd_kafka_ListOffsetsRequest( + leader->rkb, partitions, RD_KAFKA_REPLYQ(rkq, 0), + rd_kafka_query_wmark_offsets_resp_cb, &state); rd_kafka_topic_partition_list_destroy(partitions); rd_list_destroy(&leaders); @@ -3483,8 +3456,8 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, /* Wait for reply (or timeout) */ while (state.err == RD_KAFKA_RESP_ERR__IN_PROGRESS && rd_kafka_q_serve(rkq, 100, 0, RD_KAFKA_Q_CB_CALLBACK, - rd_kafka_poll_cb, NULL) != - RD_KAFKA_OP_RES_YIELD) + rd_kafka_poll_cb, + NULL) != RD_KAFKA_OP_RES_YIELD) ; rd_kafka_q_destroy_owner(rkq); @@ -3496,10 +3469,10 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, /* We are not certain about the returned order. */ if (state.offsets[0] < state.offsets[1]) { - *low = state.offsets[0]; - *high = state.offsets[1]; + *low = state.offsets[0]; + *high = state.offsets[1]; } else { - *low = state.offsets[1]; + *low = state.offsets[1]; *high = state.offsets[0]; } @@ -3511,24 +3484,25 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, } -rd_kafka_resp_err_t -rd_kafka_get_watermark_offsets (rd_kafka_t *rk, const char *topic, - int32_t partition, - int64_t *low, int64_t *high) { - rd_kafka_toppar_t *rktp; +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t *low, + int64_t *high) { + rd_kafka_toppar_t *rktp; - rktp = rd_kafka_toppar_get2(rk, topic, partition, 0, 1); - if (!rktp) - return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + rktp = rd_kafka_toppar_get2(rk, topic, partition, 0, 1); + if (!rktp) + return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - rd_kafka_toppar_lock(rktp); - *low = rktp->rktp_lo_offset; - *high = rktp->rktp_hi_offset; - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_lock(rktp); + *low = rktp->rktp_lo_offset; + *high = rktp->rktp_hi_offset; + rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(rktp); + rd_kafka_toppar_destroy(rktp); - return RD_KAFKA_RESP_ERR_NO_ERROR; + return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -3546,12 +3520,12 @@ struct _get_offsets_for_times { /** * @brief Handle OffsetRequest responses */ -static void rd_kafka_get_offsets_for_times_resp_cb (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_get_offsets_for_times_resp_cb(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { struct _get_offsets_for_times *state; if (err == RD_KAFKA_RESP_ERR__DESTROY) { @@ -3568,13 +3542,12 @@ static void rd_kafka_get_offsets_for_times_resp_cb (rd_kafka_t *rk, return; /* Retrying */ /* Retry if no broker connection is available yet. */ - if (err == RD_KAFKA_RESP_ERR__TRANSPORT && - rkb && + if (err == RD_KAFKA_RESP_ERR__TRANSPORT && rkb && rd_kafka_brokers_wait_state_change( - rkb->rkb_rk, state->state_version, - rd_timeout_remains(state->ts_end))) { + rkb->rkb_rk, state->state_version, + rd_timeout_remains(state->ts_end))) { /* Retry */ - state->state_version = rd_kafka_brokers_get_state_version(rk); + state->state_version = rd_kafka_brokers_get_state_version(rk); request->rkbuf_retries = 0; if (rd_kafka_buf_retry(rkb, request)) return; /* Retry in progress */ @@ -3589,12 +3562,12 @@ static void rd_kafka_get_offsets_for_times_resp_cb (rd_kafka_t *rk, rd_kafka_resp_err_t -rd_kafka_offsets_for_times (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *offsets, - int timeout_ms) { +rd_kafka_offsets_for_times(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *offsets, + int timeout_ms) { rd_kafka_q_t *rkq; struct _get_offsets_for_times state = RD_ZERO_INIT; - rd_ts_t ts_end = rd_timeout_init(timeout_ms); + rd_ts_t ts_end = rd_timeout_init(timeout_ms); rd_list_t leaders; int i; rd_kafka_resp_err_t err; @@ -3618,16 +3591,14 @@ rd_kafka_offsets_for_times (rd_kafka_t *rk, rkq = rd_kafka_q_new(rk); state.wait_reply = 0; - state.results = rd_kafka_topic_partition_list_new(offsets->cnt); + state.results = rd_kafka_topic_partition_list_new(offsets->cnt); /* For each leader send a request for its partitions */ RD_LIST_FOREACH(leader, &leaders, i) { state.wait_reply++; rd_kafka_ListOffsetsRequest( - leader->rkb, leader->partitions, - RD_KAFKA_REPLYQ(rkq, 0), - rd_kafka_get_offsets_for_times_resp_cb, - &state); + leader->rkb, leader->partitions, RD_KAFKA_REPLYQ(rkq, 0), + rd_kafka_get_offsets_for_times_resp_cb, &state); } rd_list_destroy(&leaders); @@ -3663,30 +3634,30 @@ rd_kafka_offsets_for_times (rd_kafka_t *rk, * * @locality any thread that serves op queues */ -rd_kafka_op_res_t -rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque) { - rd_kafka_msg_t *rkm; +rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) { + rd_kafka_msg_t *rkm; rd_kafka_op_res_t res = RD_KAFKA_OP_RES_HANDLED; /* Special handling for events based on cb_type */ - if (cb_type == RD_KAFKA_Q_CB_EVENT && - rd_kafka_event_setup(rk, rko)) { + if (cb_type == RD_KAFKA_Q_CB_EVENT && rd_kafka_event_setup(rk, rko)) { /* Return-as-event requested. */ return RD_KAFKA_OP_RES_PASS; /* Return as event */ } - switch ((int)rko->rko_type) - { + switch ((int)rko->rko_type) { case RD_KAFKA_OP_FETCH: if (!rk->rk_conf.consume_cb || cb_type == RD_KAFKA_Q_CB_RETURN || cb_type == RD_KAFKA_Q_CB_FORCE_RETURN) return RD_KAFKA_OP_RES_PASS; /* Dont handle here */ else { - struct consume_ctx ctx = { - .consume_cb = rk->rk_conf.consume_cb, - .opaque = rk->rk_conf.opaque }; + struct consume_ctx ctx = {.consume_cb = + rk->rk_conf.consume_cb, + .opaque = rk->rk_conf.opaque}; return rd_kafka_consume_cb(rk, rkq, rko, cb_type, &ctx); } @@ -3695,9 +3666,8 @@ rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, case RD_KAFKA_OP_REBALANCE: if (rk->rk_conf.rebalance_cb) rk->rk_conf.rebalance_cb( - rk, rko->rko_err, - rko->rko_u.rebalance.partitions, - rk->rk_conf.opaque); + rk, rko->rko_err, rko->rko_u.rebalance.partitions, + rk->rk_conf.opaque); else { /** If EVENT_REBALANCE is enabled but rebalance_cb * isn't, we need to perform a dummy assign for the @@ -3705,22 +3675,22 @@ rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, * with consumer_close() */ rd_kafka_dbg(rk, CGRP, "UNASSIGN", "Forcing unassign of %d partition(s)", - rko->rko_u.rebalance.partitions ? - rko->rko_u.rebalance.partitions->cnt : 0); + rko->rko_u.rebalance.partitions + ? rko->rko_u.rebalance.partitions->cnt + : 0); rd_kafka_assign(rk, NULL); } break; case RD_KAFKA_OP_OFFSET_COMMIT | RD_KAFKA_OP_REPLY: - if (!rko->rko_u.offset_commit.cb) - return RD_KAFKA_OP_RES_PASS; /* Dont handle here */ - rko->rko_u.offset_commit.cb( - rk, rko->rko_err, - rko->rko_u.offset_commit.partitions, - rko->rko_u.offset_commit.opaque); + if (!rko->rko_u.offset_commit.cb) + return RD_KAFKA_OP_RES_PASS; /* Dont handle here */ + rko->rko_u.offset_commit.cb(rk, rko->rko_err, + rko->rko_u.offset_commit.partitions, + rko->rko_u.offset_commit.opaque); break; - case RD_KAFKA_OP_FETCH_STOP|RD_KAFKA_OP_REPLY: + case RD_KAFKA_OP_FETCH_STOP | RD_KAFKA_OP_REPLY: /* Reply from toppar FETCH_STOP */ rd_kafka_assignment_partition_stopped(rk, rko->rko_rktp); break; @@ -3738,28 +3708,26 @@ rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, /* return as message_t to application */ return RD_KAFKA_OP_RES_PASS; } - /* FALLTHRU */ + /* FALLTHRU */ - case RD_KAFKA_OP_ERR: - if (rk->rk_conf.error_cb) - rk->rk_conf.error_cb(rk, rko->rko_err, - rko->rko_u.err.errstr, + case RD_KAFKA_OP_ERR: + if (rk->rk_conf.error_cb) + rk->rk_conf.error_cb(rk, rko->rko_err, + rko->rko_u.err.errstr, rk->rk_conf.opaque); else - rd_kafka_log(rk, LOG_ERR, "ERROR", - "%s: %s", - rk->rk_name, - rko->rko_u.err.errstr); + rd_kafka_log(rk, LOG_ERR, "ERROR", "%s: %s", + rk->rk_name, rko->rko_u.err.errstr); break; - case RD_KAFKA_OP_DR: - /* Delivery report: - * call application DR callback for each message. */ - while ((rkm = TAILQ_FIRST(&rko->rko_u.dr.msgq.rkmq_msgs))) { + case RD_KAFKA_OP_DR: + /* Delivery report: + * call application DR callback for each message. */ + while ((rkm = TAILQ_FIRST(&rko->rko_u.dr.msgq.rkmq_msgs))) { rd_kafka_message_t *rkmessage; - TAILQ_REMOVE(&rko->rko_u.dr.msgq.rkmq_msgs, - rkm, rkm_link); + TAILQ_REMOVE(&rko->rko_u.dr.msgq.rkmq_msgs, rkm, + rkm_link); rkmessage = rd_kafka_message_get_from_rkm(rko, rkm); @@ -3768,25 +3736,24 @@ rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, rk->rk_conf.opaque); } else if (rk->rk_conf.dr_cb) { - rk->rk_conf.dr_cb(rk, - rkmessage->payload, - rkmessage->len, - rkmessage->err, - rk->rk_conf.opaque, - rkmessage->_private); + rk->rk_conf.dr_cb( + rk, rkmessage->payload, rkmessage->len, + rkmessage->err, rk->rk_conf.opaque, + rkmessage->_private); } else if (rk->rk_drmode == RD_KAFKA_DR_MODE_EVENT) { - rd_kafka_log(rk, LOG_WARNING, "DRDROP", - "Dropped delivery report for " - "message to " - "%s [%"PRId32"] (%s) with " - "opaque %p: flush() or poll() " - "should not be called when " - "EVENT_DR is enabled", - rd_kafka_topic_name(rkmessage-> - rkt), - rkmessage->partition, - rd_kafka_err2name(rkmessage->err), - rkmessage->_private); + rd_kafka_log( + rk, LOG_WARNING, "DRDROP", + "Dropped delivery report for " + "message to " + "%s [%" PRId32 + "] (%s) with " + "opaque %p: flush() or poll() " + "should not be called when " + "EVENT_DR is enabled", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, + rd_kafka_err2name(rkmessage->err), + rkmessage->_private); } else { rd_assert(!*"BUG: neither a delivery report " "callback or EVENT_DR flag set"); @@ -3798,42 +3765,41 @@ rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, /* Callback called yield(), * re-enqueue the op (if there are any * remaining messages). */ - if (!TAILQ_EMPTY(&rko->rko_u.dr.msgq. - rkmq_msgs)) + if (!TAILQ_EMPTY(&rko->rko_u.dr.msgq.rkmq_msgs)) rd_kafka_q_reenq(rkq, rko); else rd_kafka_op_destroy(rko); return RD_KAFKA_OP_RES_YIELD; } - } + } - rd_kafka_msgq_init(&rko->rko_u.dr.msgq); + rd_kafka_msgq_init(&rko->rko_u.dr.msgq); - break; + break; - case RD_KAFKA_OP_THROTTLE: - if (rk->rk_conf.throttle_cb) - rk->rk_conf.throttle_cb(rk, rko->rko_u.throttle.nodename, - rko->rko_u.throttle.nodeid, - rko->rko_u.throttle. - throttle_time, - rk->rk_conf.opaque); - break; + case RD_KAFKA_OP_THROTTLE: + if (rk->rk_conf.throttle_cb) + rk->rk_conf.throttle_cb( + rk, rko->rko_u.throttle.nodename, + rko->rko_u.throttle.nodeid, + rko->rko_u.throttle.throttle_time, + rk->rk_conf.opaque); + break; - case RD_KAFKA_OP_STATS: - /* Statistics */ - if (rk->rk_conf.stats_cb && - rk->rk_conf.stats_cb(rk, rko->rko_u.stats.json, + case RD_KAFKA_OP_STATS: + /* Statistics */ + if (rk->rk_conf.stats_cb && + rk->rk_conf.stats_cb(rk, rko->rko_u.stats.json, rko->rko_u.stats.json_len, - rk->rk_conf.opaque) == 1) - rko->rko_u.stats.json = NULL; /* Application wanted json ptr */ - break; + rk->rk_conf.opaque) == 1) + rko->rko_u.stats.json = + NULL; /* Application wanted json ptr */ + break; case RD_KAFKA_OP_LOG: if (likely(rk->rk_conf.log_cb && rk->rk_conf.log_level >= rko->rko_u.log.level)) - rk->rk_conf.log_cb(rk, - rko->rko_u.log.level, + rk->rk_conf.log_cb(rk, rko->rko_u.log.level, rko->rko_u.log.fac, rko->rko_u.log.str); break; @@ -3887,14 +3853,14 @@ rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, return res; } -int rd_kafka_poll (rd_kafka_t *rk, int timeout_ms) { +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms) { int r; if (timeout_ms) rd_kafka_app_poll_blocking(rk); - r = rd_kafka_q_serve(rk->rk_rep, timeout_ms, 0, - RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL); + r = rd_kafka_q_serve(rk->rk_rep, timeout_ms, 0, RD_KAFKA_Q_CB_CALLBACK, + rd_kafka_poll_cb, NULL); rd_kafka_app_polled(rk); @@ -3902,7 +3868,7 @@ int rd_kafka_poll (rd_kafka_t *rk, int timeout_ms) { } -rd_kafka_event_t *rd_kafka_queue_poll (rd_kafka_queue_t *rkqu, int timeout_ms) { +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms) { rd_kafka_op_t *rko; if (timeout_ms) @@ -3919,7 +3885,7 @@ rd_kafka_event_t *rd_kafka_queue_poll (rd_kafka_queue_t *rkqu, int timeout_ms) { return rko; } -int rd_kafka_queue_poll_callback (rd_kafka_queue_t *rkqu, int timeout_ms) { +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms) { int r; if (timeout_ms) @@ -3935,90 +3901,97 @@ int rd_kafka_queue_poll_callback (rd_kafka_queue_t *rkqu, int timeout_ms) { -static void rd_kafka_toppar_dump (FILE *fp, const char *indent, - rd_kafka_toppar_t *rktp) { +static void +rd_kafka_toppar_dump(FILE *fp, const char *indent, rd_kafka_toppar_t *rktp) { - fprintf(fp, "%s%.*s [%"PRId32"] broker %s, " + fprintf(fp, + "%s%.*s [%" PRId32 + "] broker %s, " "leader_id %s\n", - indent, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rktp->rktp_broker ? - rktp->rktp_broker->rkb_name : "none", - rktp->rktp_leader ? - rktp->rktp_leader->rkb_name : "none"); - fprintf(fp, - "%s refcnt %i\n" - "%s msgq: %i messages\n" - "%s xmit_msgq: %i messages\n" - "%s total: %"PRIu64" messages, %"PRIu64" bytes\n", - indent, rd_refcnt_get(&rktp->rktp_refcnt), - indent, rktp->rktp_msgq.rkmq_msg_cnt, - indent, rktp->rktp_xmit_msgq.rkmq_msg_cnt, - indent, rd_atomic64_get(&rktp->rktp_c.tx_msgs), + indent, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rktp->rktp_broker ? rktp->rktp_broker->rkb_name : "none", + rktp->rktp_leader ? rktp->rktp_leader->rkb_name : "none"); + fprintf(fp, + "%s refcnt %i\n" + "%s msgq: %i messages\n" + "%s xmit_msgq: %i messages\n" + "%s total: %" PRIu64 " messages, %" PRIu64 " bytes\n", + indent, rd_refcnt_get(&rktp->rktp_refcnt), indent, + rktp->rktp_msgq.rkmq_msg_cnt, indent, + rktp->rktp_xmit_msgq.rkmq_msg_cnt, indent, + rd_atomic64_get(&rktp->rktp_c.tx_msgs), rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes)); } -static void rd_kafka_broker_dump (FILE *fp, rd_kafka_broker_t *rkb, int locks) { - rd_kafka_toppar_t *rktp; +static void rd_kafka_broker_dump(FILE *fp, rd_kafka_broker_t *rkb, int locks) { + rd_kafka_toppar_t *rktp; if (locks) rd_kafka_broker_lock(rkb); - fprintf(fp, " rd_kafka_broker_t %p: %s NodeId %"PRId32 + fprintf(fp, + " rd_kafka_broker_t %p: %s NodeId %" PRId32 " in state %s (for %.3fs)\n", rkb, rkb->rkb_name, rkb->rkb_nodeid, rd_kafka_broker_state_names[rkb->rkb_state], - rkb->rkb_ts_state ? - (float)(rd_clock() - rkb->rkb_ts_state) / 1000000.0f : - 0.0f); + rkb->rkb_ts_state + ? (float)(rd_clock() - rkb->rkb_ts_state) / 1000000.0f + : 0.0f); fprintf(fp, " refcnt %i\n", rd_refcnt_get(&rkb->rkb_refcnt)); fprintf(fp, " outbuf_cnt: %i waitresp_cnt: %i\n", rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt), rd_atomic32_get(&rkb->rkb_waitresps.rkbq_cnt)); fprintf(fp, - " %"PRIu64 " messages sent, %"PRIu64" bytes, " - "%"PRIu64" errors, %"PRIu64" timeouts\n" - " %"PRIu64 " messages received, %"PRIu64" bytes, " - "%"PRIu64" errors\n" - " %"PRIu64 " messageset transmissions were retried\n", - rd_atomic64_get(&rkb->rkb_c.tx), rd_atomic64_get(&rkb->rkb_c.tx_bytes), - rd_atomic64_get(&rkb->rkb_c.tx_err), rd_atomic64_get(&rkb->rkb_c.req_timeouts), - rd_atomic64_get(&rkb->rkb_c.rx), rd_atomic64_get(&rkb->rkb_c.rx_bytes), + " %" PRIu64 " messages sent, %" PRIu64 + " bytes, " + "%" PRIu64 " errors, %" PRIu64 + " timeouts\n" + " %" PRIu64 " messages received, %" PRIu64 + " bytes, " + "%" PRIu64 + " errors\n" + " %" PRIu64 " messageset transmissions were retried\n", + rd_atomic64_get(&rkb->rkb_c.tx), + rd_atomic64_get(&rkb->rkb_c.tx_bytes), + rd_atomic64_get(&rkb->rkb_c.tx_err), + rd_atomic64_get(&rkb->rkb_c.req_timeouts), + rd_atomic64_get(&rkb->rkb_c.rx), + rd_atomic64_get(&rkb->rkb_c.rx_bytes), rd_atomic64_get(&rkb->rkb_c.rx_err), rd_atomic64_get(&rkb->rkb_c.tx_retries)); fprintf(fp, " %i toppars:\n", rkb->rkb_toppar_cnt); TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) - rd_kafka_toppar_dump(fp, " ", rktp); + rd_kafka_toppar_dump(fp, " ", rktp); if (locks) { rd_kafka_broker_unlock(rkb); } } -static void rd_kafka_dump0 (FILE *fp, rd_kafka_t *rk, int locks) { - rd_kafka_broker_t *rkb; - rd_kafka_topic_t *rkt; +static void rd_kafka_dump0(FILE *fp, rd_kafka_t *rk, int locks) { + rd_kafka_broker_t *rkb; + rd_kafka_topic_t *rkt; rd_kafka_toppar_t *rktp; int i; - unsigned int tot_cnt; - size_t tot_size; + unsigned int tot_cnt; + size_t tot_size; - rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size); + rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size); - if (locks) + if (locks) rd_kafka_rdlock(rk); #if ENABLE_DEVEL fprintf(fp, "rd_kafka_op_cnt: %d\n", rd_atomic32_get(&rd_kafka_op_cnt)); #endif - fprintf(fp, "rd_kafka_t %p: %s\n", rk, rk->rk_name); + fprintf(fp, "rd_kafka_t %p: %s\n", rk, rk->rk_name); - fprintf(fp, " producer.msg_cnt %u (%"PRIusz" bytes)\n", - tot_cnt, tot_size); - fprintf(fp, " rk_rep reply queue: %i ops\n", - rd_kafka_q_len(rk->rk_rep)); + fprintf(fp, " producer.msg_cnt %u (%" PRIusz " bytes)\n", tot_cnt, + tot_size); + fprintf(fp, " rk_rep reply queue: %i ops\n", + rd_kafka_q_len(rk->rk_rep)); - fprintf(fp, " brokers:\n"); + fprintf(fp, " brokers:\n"); if (locks) mtx_lock(&rk->rk_internal_rkb_lock); if (rk->rk_internal_rkb) @@ -4026,9 +3999,9 @@ static void rd_kafka_dump0 (FILE *fp, rd_kafka_t *rk, int locks) { if (locks) mtx_unlock(&rk->rk_internal_rkb_lock); - TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { rd_kafka_broker_dump(fp, rkb, locks); - } + } fprintf(fp, " cgrp:\n"); if (rk->rk_cgrp) { @@ -4037,37 +4010,40 @@ static void rd_kafka_dump0 (FILE *fp, rd_kafka_t *rk, int locks) { RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), rd_kafka_cgrp_state_names[rkcg->rkcg_state], rkcg->rkcg_flags); - fprintf(fp, " coord_id %"PRId32", broker %s\n", + fprintf(fp, " coord_id %" PRId32 ", broker %s\n", rkcg->rkcg_coord_id, - rkcg->rkcg_curr_coord ? - rd_kafka_broker_name(rkcg->rkcg_curr_coord):"(none)"); + rkcg->rkcg_curr_coord + ? rd_kafka_broker_name(rkcg->rkcg_curr_coord) + : "(none)"); fprintf(fp, " toppars:\n"); RD_LIST_FOREACH(rktp, &rkcg->rkcg_toppars, i) { - fprintf(fp, " %.*s [%"PRId32"] in state %s\n", + fprintf(fp, " %.*s [%" PRId32 "] in state %s\n", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rd_kafka_fetch_states[rktp->rktp_fetch_state]); } } - fprintf(fp, " topics:\n"); - TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { - fprintf(fp, " %.*s with %"PRId32" partitions, state %s, " + fprintf(fp, " topics:\n"); + TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { + fprintf(fp, + " %.*s with %" PRId32 + " partitions, state %s, " "refcnt %i\n", - RD_KAFKAP_STR_PR(rkt->rkt_topic), - rkt->rkt_partition_cnt, + RD_KAFKAP_STR_PR(rkt->rkt_topic), + rkt->rkt_partition_cnt, rd_kafka_topic_state_names[rkt->rkt_state], rd_refcnt_get(&rkt->rkt_refcnt)); - if (rkt->rkt_ua) - rd_kafka_toppar_dump(fp, " ", rkt->rkt_ua); + if (rkt->rkt_ua) + rd_kafka_toppar_dump(fp, " ", rkt->rkt_ua); if (rd_list_empty(&rkt->rkt_desp)) { fprintf(fp, " desired partitions:"); - RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) - fprintf(fp, " %"PRId32, rktp->rktp_partition); + RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) + fprintf(fp, " %" PRId32, rktp->rktp_partition); fprintf(fp, "\n"); } - } + } fprintf(fp, "\n"); rd_kafka_metadata_cache_dump(fp, rk); @@ -4076,15 +4052,15 @@ static void rd_kafka_dump0 (FILE *fp, rd_kafka_t *rk, int locks) { rd_kafka_rdunlock(rk); } -void rd_kafka_dump (FILE *fp, rd_kafka_t *rk) { +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk) { if (rk) - rd_kafka_dump0(fp, rk, 1/*locks*/); + rd_kafka_dump0(fp, rk, 1 /*locks*/); } -const char *rd_kafka_name (const rd_kafka_t *rk) { - return rk->rk_name; +const char *rd_kafka_name(const rd_kafka_t *rk) { + return rk->rk_name; } rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk) { @@ -4092,26 +4068,26 @@ rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk) { } -char *rd_kafka_memberid (const rd_kafka_t *rk) { - rd_kafka_op_t *rko; - rd_kafka_cgrp_t *rkcg; - char *memberid; +char *rd_kafka_memberid(const rd_kafka_t *rk) { + rd_kafka_op_t *rko; + rd_kafka_cgrp_t *rkcg; + char *memberid; - if (!(rkcg = rd_kafka_cgrp_get(rk))) - return NULL; + if (!(rkcg = rd_kafka_cgrp_get(rk))) + return NULL; - rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_NAME); - if (!rko) - return NULL; - memberid = rko->rko_u.name.str; - rko->rko_u.name.str = NULL; - rd_kafka_op_destroy(rko); + rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_NAME); + if (!rko) + return NULL; + memberid = rko->rko_u.name.str; + rko->rko_u.name.str = NULL; + rd_kafka_op_destroy(rko); - return memberid; + return memberid; } -char *rd_kafka_clusterid (rd_kafka_t *rk, int timeout_ms) { +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms) { rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); /* ClusterId is returned in Metadata >=V2 responses and @@ -4153,7 +4129,7 @@ char *rd_kafka_clusterid (rd_kafka_t *rk, int timeout_ms) { } -int32_t rd_kafka_controllerid (rd_kafka_t *rk, int timeout_ms) { +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms) { rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); /* ControllerId is returned in Metadata >=V1 responses and @@ -4197,22 +4173,22 @@ int32_t rd_kafka_controllerid (rd_kafka_t *rk, int timeout_ms) { } -void *rd_kafka_opaque (const rd_kafka_t *rk) { +void *rd_kafka_opaque(const rd_kafka_t *rk) { return rk->rk_conf.opaque; } -int rd_kafka_outq_len (rd_kafka_t *rk) { +int rd_kafka_outq_len(rd_kafka_t *rk) { return rd_kafka_curr_msgs_cnt(rk) + rd_kafka_q_len(rk->rk_rep) + - (rk->rk_background.q ? rd_kafka_q_len(rk->rk_background.q) : 0); + (rk->rk_background.q ? rd_kafka_q_len(rk->rk_background.q) : 0); } -rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms) { +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms) { unsigned int msg_cnt = 0; - if (rk->rk_type != RD_KAFKA_PRODUCER) - return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; + if (rk->rk_type != RD_KAFKA_PRODUCER) + return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; rd_kafka_yield_thread = 0; @@ -4221,9 +4197,9 @@ rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms) { * time should be considered immediate. */ rd_atomic32_add(&rk->rk_flushing, 1); - /* Wake up all broker threads to trigger the produce_serve() call. - * If this flush() call finishes before the broker wakes up - * then no flushing will be performed by that broker thread. */ + /* Wake up all broker threads to trigger the produce_serve() call. + * If this flush() call finishes before the broker wakes up + * then no flushing will be performed by that broker thread. */ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_UP); if (rk->rk_drmode == RD_KAFKA_DR_MODE_EVENT) { @@ -4244,25 +4220,24 @@ rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms) { * where timeout_ms==RD_POLL_NOWAIT to make sure poll is * called at least once. */ rd_ts_t ts_end = rd_timeout_init(timeout_ms); - int tmout = RD_POLL_NOWAIT; - int qlen = 0; + int tmout = RD_POLL_NOWAIT; + int qlen = 0; do { rd_kafka_poll(rk, tmout); - qlen = rd_kafka_q_len(rk->rk_rep); + qlen = rd_kafka_q_len(rk->rk_rep); msg_cnt = rd_kafka_curr_msgs_cnt(rk); - } while (qlen + msg_cnt > 0 && - !rd_kafka_yield_thread && + } while (qlen + msg_cnt > 0 && !rd_kafka_yield_thread && (tmout = rd_timeout_remains_limit(ts_end, 10)) != - RD_POLL_NOWAIT); + RD_POLL_NOWAIT); msg_cnt += qlen; } rd_atomic32_sub(&rk->rk_flushing, 1); - return msg_cnt > 0 ? RD_KAFKA_RESP_ERR__TIMED_OUT : - RD_KAFKA_RESP_ERR_NO_ERROR; + return msg_cnt > 0 ? RD_KAFKA_RESP_ERR__TIMED_OUT + : RD_KAFKA_RESP_ERR_NO_ERROR; } /** @@ -4280,8 +4255,7 @@ rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms) { * @locks_required rd_kafka_*lock() * @locks_acquired rd_kafka_topic_rdlock() */ -static int -rd_kafka_purge_toppars (rd_kafka_t *rk, int purge_flags) { +static int rd_kafka_purge_toppars(rd_kafka_t *rk, int purge_flags) { rd_kafka_topic_t *rkt; int cnt = 0; @@ -4290,17 +4264,17 @@ rd_kafka_purge_toppars (rd_kafka_t *rk, int purge_flags) { int i; rd_kafka_topic_rdlock(rkt); - for (i = 0 ; i < rkt->rkt_partition_cnt ; i++) + for (i = 0; i < rkt->rkt_partition_cnt; i++) cnt += rd_kafka_toppar_purge_queues( - rkt->rkt_p[i], purge_flags, rd_false/*!xmit*/); + rkt->rkt_p[i], purge_flags, rd_false /*!xmit*/); RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) - cnt += rd_kafka_toppar_purge_queues( - rktp, purge_flags, rd_false/*!xmit*/); + cnt += rd_kafka_toppar_purge_queues(rktp, purge_flags, + rd_false /*!xmit*/); if (rkt->rkt_ua) cnt += rd_kafka_toppar_purge_queues( - rkt->rkt_ua, purge_flags, rd_false/*!xmit*/); + rkt->rkt_ua, purge_flags, rd_false /*!xmit*/); rd_kafka_topic_rdunlock(rkt); } @@ -4308,10 +4282,10 @@ rd_kafka_purge_toppars (rd_kafka_t *rk, int purge_flags) { } -rd_kafka_resp_err_t rd_kafka_purge (rd_kafka_t *rk, int purge_flags) { +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags) { rd_kafka_broker_t *rkb; rd_kafka_q_t *tmpq = NULL; - int waitcnt = 0; + int waitcnt = 0; if (rk->rk_type != RD_KAFKA_PRODUCER) return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; @@ -4362,99 +4336,91 @@ rd_kafka_resp_err_t rd_kafka_purge (rd_kafka_t *rk, int purge_flags) { - /** * @returns a csv string of purge flags in thread-local storage */ -const char *rd_kafka_purge_flags2str (int flags) { - static const char *names[] = { - "queue", - "inflight", - "non-blocking", - NULL - }; +const char *rd_kafka_purge_flags2str(int flags) { + static const char *names[] = {"queue", "inflight", "non-blocking", + NULL}; static RD_TLS char ret[64]; return rd_flags2str(ret, sizeof(ret), names, flags); } -int rd_kafka_version (void) { - return RD_KAFKA_VERSION; +int rd_kafka_version(void) { + return RD_KAFKA_VERSION; } -const char *rd_kafka_version_str (void) { - static RD_TLS char ret[128]; - size_t of = 0, r; +const char *rd_kafka_version_str(void) { + static RD_TLS char ret[128]; + size_t of = 0, r; - if (*ret) - return ret; + if (*ret) + return ret; #ifdef LIBRDKAFKA_GIT_VERSION - if (*LIBRDKAFKA_GIT_VERSION) { - of = rd_snprintf(ret, sizeof(ret), "%s", - *LIBRDKAFKA_GIT_VERSION == 'v' ? - LIBRDKAFKA_GIT_VERSION+1 : - LIBRDKAFKA_GIT_VERSION); - if (of > sizeof(ret)) - of = sizeof(ret); - } + if (*LIBRDKAFKA_GIT_VERSION) { + of = rd_snprintf(ret, sizeof(ret), "%s", + *LIBRDKAFKA_GIT_VERSION == 'v' + ? LIBRDKAFKA_GIT_VERSION + 1 + : LIBRDKAFKA_GIT_VERSION); + if (of > sizeof(ret)) + of = sizeof(ret); + } #endif -#define _my_sprintf(...) do { \ - r = rd_snprintf(ret+of, sizeof(ret)-of, __VA_ARGS__); \ - if (r > sizeof(ret)-of) \ - r = sizeof(ret)-of; \ - of += r; \ - } while(0) - - if (of == 0) { - int ver = rd_kafka_version(); - int prel = (ver & 0xff); - _my_sprintf("%i.%i.%i", - (ver >> 24) & 0xff, - (ver >> 16) & 0xff, - (ver >> 8) & 0xff); - if (prel != 0xff) { - /* pre-builds below 200 are just running numbers, - * above 200 are RC numbers. */ - if (prel <= 200) - _my_sprintf("-pre%d", prel); - else - _my_sprintf("-RC%d", prel - 200); - } - } +#define _my_sprintf(...) \ + do { \ + r = rd_snprintf(ret + of, sizeof(ret) - of, __VA_ARGS__); \ + if (r > sizeof(ret) - of) \ + r = sizeof(ret) - of; \ + of += r; \ + } while (0) + + if (of == 0) { + int ver = rd_kafka_version(); + int prel = (ver & 0xff); + _my_sprintf("%i.%i.%i", (ver >> 24) & 0xff, (ver >> 16) & 0xff, + (ver >> 8) & 0xff); + if (prel != 0xff) { + /* pre-builds below 200 are just running numbers, + * above 200 are RC numbers. */ + if (prel <= 200) + _my_sprintf("-pre%d", prel); + else + _my_sprintf("-RC%d", prel - 200); + } + } #if ENABLE_DEVEL - _my_sprintf("-devel"); + _my_sprintf("-devel"); #endif #if WITHOUT_OPTIMIZATION - _my_sprintf("-O0"); + _my_sprintf("-O0"); #endif - return ret; + return ret; } /** * Assert trampoline to print some debugging information on crash. */ -void -RD_NORETURN -rd_kafka_crash (const char *file, int line, const char *function, - rd_kafka_t *rk, const char *reason) { - fprintf(stderr, "*** %s:%i:%s: %s ***\n", - file, line, function, reason); +void RD_NORETURN rd_kafka_crash(const char *file, + int line, + const char *function, + rd_kafka_t *rk, + const char *reason) { + fprintf(stderr, "*** %s:%i:%s: %s ***\n", file, line, function, reason); if (rk) - rd_kafka_dump0(stderr, rk, 0/*no locks*/); + rd_kafka_dump0(stderr, rk, 0 /*no locks*/); abort(); } - - struct list_groups_state { rd_kafka_q_t *q; rd_kafka_resp_err_t err; @@ -4464,12 +4430,12 @@ struct list_groups_state { int grplist_size; }; -static void rd_kafka_DescribeGroups_resp_cb (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_DescribeGroups_resp_cb(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque) { struct list_groups_state *state; const int log_decode_errors = LOG_ERR; int cnt; @@ -4498,8 +4464,8 @@ static void rd_kafka_DescribeGroups_resp_cb (rd_kafka_t *rk, /* Grow group array */ state->grplist_size *= 2; state->grplist->groups = - rd_realloc(state->grplist->groups, - state->grplist_size * + rd_realloc(state->grplist->groups, + state->grplist_size * sizeof(*state->grplist->groups)); } @@ -4519,20 +4485,20 @@ static void rd_kafka_DescribeGroups_resp_cb (rd_kafka_t *rk, } rd_kafka_broker_lock(rkb); - gi->broker.id = rkb->rkb_nodeid; + gi->broker.id = rkb->rkb_nodeid; gi->broker.host = rd_strdup(rkb->rkb_origname); gi->broker.port = rkb->rkb_port; rd_kafka_broker_unlock(rkb); - gi->err = ErrorCode; - gi->group = RD_KAFKAP_STR_DUP(&Group); - gi->state = RD_KAFKAP_STR_DUP(&GroupState); + gi->err = ErrorCode; + gi->group = RD_KAFKAP_STR_DUP(&Group); + gi->state = RD_KAFKAP_STR_DUP(&GroupState); gi->protocol_type = RD_KAFKAP_STR_DUP(&ProtoType); - gi->protocol = RD_KAFKAP_STR_DUP(&Proto); + gi->protocol = RD_KAFKAP_STR_DUP(&Proto); if (MemberCnt > 0) gi->members = - rd_malloc(MemberCnt * sizeof(*gi->members)); + rd_malloc(MemberCnt * sizeof(*gi->members)); while (MemberCnt-- > 0) { rd_kafkap_str_t MemberId, ClientId, ClientHost; @@ -4548,30 +4514,29 @@ static void rd_kafka_DescribeGroups_resp_cb (rd_kafka_t *rk, rd_kafka_buf_read_bytes(reply, &Meta); rd_kafka_buf_read_bytes(reply, &Assignment); - mi->member_id = RD_KAFKAP_STR_DUP(&MemberId); - mi->client_id = RD_KAFKAP_STR_DUP(&ClientId); + mi->member_id = RD_KAFKAP_STR_DUP(&MemberId); + mi->client_id = RD_KAFKAP_STR_DUP(&ClientId); mi->client_host = RD_KAFKAP_STR_DUP(&ClientHost); if (RD_KAFKAP_BYTES_LEN(&Meta) == 0) { mi->member_metadata_size = 0; - mi->member_metadata = NULL; + mi->member_metadata = NULL; } else { mi->member_metadata_size = - RD_KAFKAP_BYTES_LEN(&Meta); - mi->member_metadata = - rd_memdup(Meta.data, - mi->member_metadata_size); + RD_KAFKAP_BYTES_LEN(&Meta); + mi->member_metadata = rd_memdup( + Meta.data, mi->member_metadata_size); } if (RD_KAFKAP_BYTES_LEN(&Assignment) == 0) { mi->member_assignment_size = 0; - mi->member_assignment = NULL; + mi->member_assignment = NULL; } else { mi->member_assignment_size = - RD_KAFKAP_BYTES_LEN(&Assignment); + RD_KAFKAP_BYTES_LEN(&Assignment); mi->member_assignment = - rd_memdup(Assignment.data, - mi->member_assignment_size); + rd_memdup(Assignment.data, + mi->member_assignment_size); } } } @@ -4580,16 +4545,16 @@ static void rd_kafka_DescribeGroups_resp_cb (rd_kafka_t *rk, state->err = err; return; - err_parse: +err_parse: state->err = reply->rkbuf_err; } -static void rd_kafka_ListGroups_resp_cb (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_ListGroups_resp_cb(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque) { struct list_groups_state *state; const int log_decode_errors = LOG_ERR; int16_t ErrorCode; @@ -4646,11 +4611,9 @@ static void rd_kafka_ListGroups_resp_cb (rd_kafka_t *rk, if (i > 0) { state->wait_cnt++; - rd_kafka_DescribeGroupsRequest(rkb, - (const char **)grps, i, - RD_KAFKA_REPLYQ(state->q, 0), - rd_kafka_DescribeGroups_resp_cb, - state); + rd_kafka_DescribeGroupsRequest( + rkb, (const char **)grps, i, RD_KAFKA_REPLYQ(state->q, 0), + rd_kafka_DescribeGroups_resp_cb, state); while (i-- > 0) rd_free(grps[i]); @@ -4663,44 +4626,45 @@ static void rd_kafka_ListGroups_resp_cb (rd_kafka_t *rk, state->err = err; return; - err_parse: +err_parse: if (grps) rd_free(grps); state->err = reply->rkbuf_err; } rd_kafka_resp_err_t -rd_kafka_list_groups (rd_kafka_t *rk, const char *group, - const struct rd_kafka_group_list **grplistp, - int timeout_ms) { +rd_kafka_list_groups(rd_kafka_t *rk, + const char *group, + const struct rd_kafka_group_list **grplistp, + int timeout_ms) { rd_kafka_broker_t *rkb; - int rkb_cnt = 0; + int rkb_cnt = 0; struct list_groups_state state = RD_ZERO_INIT; - rd_ts_t ts_end = rd_timeout_init(timeout_ms); - int state_version = rd_kafka_brokers_get_state_version(rk); + rd_ts_t ts_end = rd_timeout_init(timeout_ms); + int state_version = rd_kafka_brokers_get_state_version(rk); /* Wait until metadata has been fetched from cluster so * that we have a full broker list. - * This state only happens during initial client setup, after that - * there'll always be a cached metadata copy. */ + * This state only happens during initial client setup, after that + * there'll always be a cached metadata copy. */ rd_kafka_rdlock(rk); while (!rk->rk_ts_metadata) { rd_kafka_rdunlock(rk); - if (!rd_kafka_brokers_wait_state_change( - rk, state_version, rd_timeout_remains(ts_end))) + if (!rd_kafka_brokers_wait_state_change( + rk, state_version, rd_timeout_remains(ts_end))) return RD_KAFKA_RESP_ERR__TIMED_OUT; rd_kafka_rdlock(rk); } - state.q = rd_kafka_q_new(rk); + state.q = rd_kafka_q_new(rk); state.desired_group = group; - state.grplist = rd_calloc(1, sizeof(*state.grplist)); - state.grplist_size = group ? 1 : 32; + state.grplist = rd_calloc(1, sizeof(*state.grplist)); + state.grplist_size = group ? 1 : 32; - state.grplist->groups = rd_malloc(state.grplist_size * - sizeof(*state.grplist->groups)); + state.grplist->groups = + rd_malloc(state.grplist_size * sizeof(*state.grplist->groups)); /* Query each broker for its list of groups */ TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { @@ -4713,10 +4677,8 @@ rd_kafka_list_groups (rd_kafka_t *rk, const char *group, state.wait_cnt++; rkb_cnt++; - rd_kafka_ListGroupsRequest(rkb, - RD_KAFKA_REPLYQ(state.q, 0), - rd_kafka_ListGroups_resp_cb, - &state); + rd_kafka_ListGroupsRequest(rkb, RD_KAFKA_REPLYQ(state.q, 0), + rd_kafka_ListGroups_resp_cb, &state); } rd_kafka_rdunlock(rk); @@ -4727,8 +4689,8 @@ rd_kafka_list_groups (rd_kafka_t *rk, const char *group, int remains; while (state.wait_cnt > 0 && - !rd_timeout_expired((remains = - rd_timeout_remains(ts_end)))) { + !rd_timeout_expired( + (remains = rd_timeout_remains(ts_end)))) { rd_kafka_q_serve(state.q, remains, 0, RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL); @@ -4756,9 +4718,9 @@ rd_kafka_list_groups (rd_kafka_t *rk, const char *group, } -void rd_kafka_group_list_destroy (const struct rd_kafka_group_list *grplist0) { +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist0) { struct rd_kafka_group_list *grplist = - (struct rd_kafka_group_list *)grplist0; + (struct rd_kafka_group_list *)grplist0; while (grplist->group_cnt-- > 0) { struct rd_kafka_group_info *gi; @@ -4804,17 +4766,17 @@ void rd_kafka_group_list_destroy (const struct rd_kafka_group_list *grplist0) { const char *rd_kafka_get_debug_contexts(void) { - return RD_KAFKA_DEBUG_CONTEXTS; + return RD_KAFKA_DEBUG_CONTEXTS; } -int rd_kafka_path_is_dir (const char *path) { +int rd_kafka_path_is_dir(const char *path) { #ifdef _WIN32 - struct _stat st; - return (_stat(path, &st) == 0 && st.st_mode & S_IFDIR); + struct _stat st; + return (_stat(path, &st) == 0 && st.st_mode & S_IFDIR); #else - struct stat st; - return (stat(path, &st) == 0 && S_ISDIR(st.st_mode)); + struct stat st; + return (stat(path, &st) == 0 && S_ISDIR(st.st_mode)); #endif } @@ -4822,7 +4784,7 @@ int rd_kafka_path_is_dir (const char *path) { /** * @returns true if directory is empty or can't be accessed, else false. */ -rd_bool_t rd_kafka_dir_is_empty (const char *path) { +rd_bool_t rd_kafka_dir_is_empty(const char *path) { #if _WIN32 /* FIXME: Unsupported */ return rd_true; @@ -4840,14 +4802,13 @@ rd_bool_t rd_kafka_dir_is_empty (const char *path) { while ((d = readdir(dir))) { - if (!strcmp(d->d_name, ".") || - !strcmp(d->d_name, "..")) + if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, "..")) continue; #if defined(__sun) ret = stat(d->d_name, &st); if (ret != 0) { - return rd_true; // Can't be accessed + return rd_true; // Can't be accessed } if (S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode)) { @@ -4866,7 +4827,7 @@ rd_bool_t rd_kafka_dir_is_empty (const char *path) { } -void *rd_kafka_mem_malloc (rd_kafka_t *rk, size_t size) { +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size) { return rd_malloc(size); } @@ -4874,15 +4835,15 @@ void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size) { return rd_calloc(num, size); } -void rd_kafka_mem_free (rd_kafka_t *rk, void *ptr) { +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr) { rd_free(ptr); } -int rd_kafka_errno (void) { +int rd_kafka_errno(void) { return errno; } -int rd_kafka_unittest (void) { +int rd_kafka_unittest(void) { return rd_unittest(); } diff --git a/src/rdkafka.h b/src/rdkafka.h index 60fa182426..d181b9567c 100644 --- a/src/rdkafka.h +++ b/src/rdkafka.h @@ -60,13 +60,13 @@ extern "C" { #ifndef WIN32_MEAN_AND_LEAN #define WIN32_MEAN_AND_LEAN #endif -#include /* for sockaddr, .. */ +#include /* for sockaddr, .. */ #ifndef _SSIZE_T_DEFINED #define _SSIZE_T_DEFINED typedef SSIZE_T ssize_t; #endif #define RD_UNUSED -#define RD_INLINE __inline +#define RD_INLINE __inline #define RD_DEPRECATED __declspec(deprecated) #define RD_FORMAT(...) #undef RD_EXPORT @@ -92,7 +92,7 @@ typedef SSIZE_T ssize_t; #define RD_DEPRECATED __attribute__((deprecated)) #if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) -#define RD_FORMAT(...) __attribute__((format (__VA_ARGS__))) +#define RD_FORMAT(...) __attribute__((format(__VA_ARGS__))) #else #define RD_FORMAT(...) #endif @@ -109,29 +109,36 @@ typedef SSIZE_T ssize_t; * @returns \p RET */ #if LIBRDKAFKA_TYPECHECKS -#define _LRK_TYPECHECK(RET,TYPE,ARG) \ - ({ if (0) { TYPE __t RD_UNUSED = (ARG); } RET; }) - -#define _LRK_TYPECHECK2(RET,TYPE,ARG,TYPE2,ARG2) \ - ({ \ - if (0) { \ - TYPE __t RD_UNUSED = (ARG); \ - TYPE2 __t2 RD_UNUSED = (ARG2); \ - } \ - RET; }) - -#define _LRK_TYPECHECK3(RET,TYPE,ARG,TYPE2,ARG2,TYPE3,ARG3) \ - ({ \ - if (0) { \ - TYPE __t RD_UNUSED = (ARG); \ - TYPE2 __t2 RD_UNUSED = (ARG2); \ - TYPE3 __t3 RD_UNUSED = (ARG3); \ - } \ - RET; }) +#define _LRK_TYPECHECK(RET, TYPE, ARG) \ + ({ \ + if (0) { \ + TYPE __t RD_UNUSED = (ARG); \ + } \ + RET; \ + }) + +#define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) \ + ({ \ + if (0) { \ + TYPE __t RD_UNUSED = (ARG); \ + TYPE2 __t2 RD_UNUSED = (ARG2); \ + } \ + RET; \ + }) + +#define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) \ + ({ \ + if (0) { \ + TYPE __t RD_UNUSED = (ARG); \ + TYPE2 __t2 RD_UNUSED = (ARG2); \ + TYPE3 __t3 RD_UNUSED = (ARG3); \ + } \ + RET; \ + }) #else -#define _LRK_TYPECHECK(RET,TYPE,ARG) (RET) -#define _LRK_TYPECHECK2(RET,TYPE,ARG,TYPE2,ARG2) (RET) -#define _LRK_TYPECHECK3(RET,TYPE,ARG,TYPE2,ARG2,TYPE3,ARG3) (RET) +#define _LRK_TYPECHECK(RET, TYPE, ARG) (RET) +#define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) (RET) +#define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) (RET) #endif /* @endcond */ @@ -158,7 +165,7 @@ typedef SSIZE_T ssize_t; * @remark This value should only be used during compile time, * for runtime checks of version use rd_kafka_version() */ -#define RD_KAFKA_VERSION 0x010802ff +#define RD_KAFKA_VERSION 0x010802ff /** * @brief Returns the librdkafka version as integer. @@ -177,7 +184,7 @@ int rd_kafka_version(void); * @returns Version string */ RD_EXPORT -const char *rd_kafka_version_str (void); +const char *rd_kafka_version_str(void); /**@}*/ @@ -198,8 +205,8 @@ const char *rd_kafka_version_str (void); * @sa rd_kafka_new() */ typedef enum rd_kafka_type_t { - RD_KAFKA_PRODUCER, /**< Producer client */ - RD_KAFKA_CONSUMER /**< Consumer client */ + RD_KAFKA_PRODUCER, /**< Producer client */ + RD_KAFKA_CONSUMER /**< Consumer client */ } rd_kafka_type_t; @@ -209,9 +216,9 @@ typedef enum rd_kafka_type_t { * @sa rd_kafka_message_timestamp() */ typedef enum rd_kafka_timestamp_type_t { - RD_KAFKA_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ - RD_KAFKA_TIMESTAMP_CREATE_TIME, /**< Message creation time */ - RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME /**< Log append time */ + RD_KAFKA_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ + RD_KAFKA_TIMESTAMP_CREATE_TIME, /**< Message creation time */ + RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME /**< Log append time */ } rd_kafka_timestamp_type_t; @@ -232,8 +239,10 @@ const char *rd_kafka_get_debug_contexts(void); * linking another version of the library. * Use rd_kafka_get_debug_contexts() instead. */ -#define RD_KAFKA_DEBUG_CONTEXTS \ - "all,generic,broker,topic,metadata,feature,queue,msg,protocol,cgrp,security,fetch,interceptor,plugin,consumer,admin,eos,mock,assignor,conf" +#define RD_KAFKA_DEBUG_CONTEXTS \ + "all,generic,broker,topic,metadata,feature,queue,msg,protocol,cgrp," \ + "security,fetch,interceptor,plugin,consumer,admin,eos,mock,assignor," \ + "conf" /* @cond NO_DOC */ @@ -246,7 +255,7 @@ typedef struct rd_kafka_queue_s rd_kafka_queue_t; typedef struct rd_kafka_op_s rd_kafka_event_t; typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t; typedef struct rd_kafka_consumer_group_metadata_s -rd_kafka_consumer_group_metadata_t; + rd_kafka_consumer_group_metadata_t; typedef struct rd_kafka_error_s rd_kafka_error_t; typedef struct rd_kafka_headers_s rd_kafka_headers_t; typedef struct rd_kafka_group_result_s rd_kafka_group_result_t; @@ -266,80 +275,80 @@ typedef struct rd_kafka_group_result_s rd_kafka_group_result_t; * @sa Use rd_kafka_err2str() to translate an error code a human readable string */ typedef enum { - /* Internal errors to rdkafka: */ - /** Begin internal error codes */ - RD_KAFKA_RESP_ERR__BEGIN = -200, - /** Received message is incorrect */ - RD_KAFKA_RESP_ERR__BAD_MSG = -199, - /** Bad/unknown compression */ - RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198, - /** Broker is going away */ - RD_KAFKA_RESP_ERR__DESTROY = -197, - /** Generic failure */ - RD_KAFKA_RESP_ERR__FAIL = -196, - /** Broker transport failure */ - RD_KAFKA_RESP_ERR__TRANSPORT = -195, - /** Critical system resource */ - RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194, - /** Failed to resolve broker */ - RD_KAFKA_RESP_ERR__RESOLVE = -193, - /** Produced message timed out*/ - RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192, - /** Reached the end of the topic+partition queue on - * the broker. Not really an error. - * This event is disabled by default, - * see the `enable.partition.eof` configuration property. */ - RD_KAFKA_RESP_ERR__PARTITION_EOF = -191, - /** Permanent: Partition does not exist in cluster. */ - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190, - /** File or filesystem error */ - RD_KAFKA_RESP_ERR__FS = -189, - /** Permanent: Topic does not exist in cluster. */ - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188, - /** All broker connections are down. */ - RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187, - /** Invalid argument, or invalid configuration */ - RD_KAFKA_RESP_ERR__INVALID_ARG = -186, - /** Operation timed out */ - RD_KAFKA_RESP_ERR__TIMED_OUT = -185, - /** Queue is full */ - RD_KAFKA_RESP_ERR__QUEUE_FULL = -184, - /** ISR count < required.acks */ + /* Internal errors to rdkafka: */ + /** Begin internal error codes */ + RD_KAFKA_RESP_ERR__BEGIN = -200, + /** Received message is incorrect */ + RD_KAFKA_RESP_ERR__BAD_MSG = -199, + /** Bad/unknown compression */ + RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198, + /** Broker is going away */ + RD_KAFKA_RESP_ERR__DESTROY = -197, + /** Generic failure */ + RD_KAFKA_RESP_ERR__FAIL = -196, + /** Broker transport failure */ + RD_KAFKA_RESP_ERR__TRANSPORT = -195, + /** Critical system resource */ + RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194, + /** Failed to resolve broker */ + RD_KAFKA_RESP_ERR__RESOLVE = -193, + /** Produced message timed out*/ + RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192, + /** Reached the end of the topic+partition queue on + * the broker. Not really an error. + * This event is disabled by default, + * see the `enable.partition.eof` configuration property. */ + RD_KAFKA_RESP_ERR__PARTITION_EOF = -191, + /** Permanent: Partition does not exist in cluster. */ + RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190, + /** File or filesystem error */ + RD_KAFKA_RESP_ERR__FS = -189, + /** Permanent: Topic does not exist in cluster. */ + RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188, + /** All broker connections are down. */ + RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187, + /** Invalid argument, or invalid configuration */ + RD_KAFKA_RESP_ERR__INVALID_ARG = -186, + /** Operation timed out */ + RD_KAFKA_RESP_ERR__TIMED_OUT = -185, + /** Queue is full */ + RD_KAFKA_RESP_ERR__QUEUE_FULL = -184, + /** ISR count < required.acks */ RD_KAFKA_RESP_ERR__ISR_INSUFF = -183, - /** Broker node update */ + /** Broker node update */ RD_KAFKA_RESP_ERR__NODE_UPDATE = -182, - /** SSL error */ - RD_KAFKA_RESP_ERR__SSL = -181, - /** Waiting for coordinator to become available. */ + /** SSL error */ + RD_KAFKA_RESP_ERR__SSL = -181, + /** Waiting for coordinator to become available. */ RD_KAFKA_RESP_ERR__WAIT_COORD = -180, - /** Unknown client group */ + /** Unknown client group */ RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = -179, - /** Operation in progress */ + /** Operation in progress */ RD_KAFKA_RESP_ERR__IN_PROGRESS = -178, - /** Previous operation in progress, wait for it to finish. */ + /** Previous operation in progress, wait for it to finish. */ RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = -177, - /** This operation would interfere with an existing subscription */ + /** This operation would interfere with an existing subscription */ RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = -176, - /** Assigned partitions (rebalance_cb) */ + /** Assigned partitions (rebalance_cb) */ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175, - /** Revoked partitions (rebalance_cb) */ + /** Revoked partitions (rebalance_cb) */ RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174, - /** Conflicting use */ + /** Conflicting use */ RD_KAFKA_RESP_ERR__CONFLICT = -173, - /** Wrong state */ + /** Wrong state */ RD_KAFKA_RESP_ERR__STATE = -172, - /** Unknown protocol */ + /** Unknown protocol */ RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = -171, - /** Not implemented */ + /** Not implemented */ RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = -170, - /** Authentication failure*/ - RD_KAFKA_RESP_ERR__AUTHENTICATION = -169, - /** No stored offset */ - RD_KAFKA_RESP_ERR__NO_OFFSET = -168, - /** Outdated */ - RD_KAFKA_RESP_ERR__OUTDATED = -167, - /** Timed out in queue */ - RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166, + /** Authentication failure*/ + RD_KAFKA_RESP_ERR__AUTHENTICATION = -169, + /** No stored offset */ + RD_KAFKA_RESP_ERR__NO_OFFSET = -168, + /** Outdated */ + RD_KAFKA_RESP_ERR__OUTDATED = -167, + /** Timed out in queue */ + RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166, /** Feature not supported by broker */ RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = -165, /** Awaiting cache update */ @@ -393,109 +402,109 @@ typedef enum { /** No offset to automatically reset to */ RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = -140, - /** End internal error codes */ - RD_KAFKA_RESP_ERR__END = -100, - - /* Kafka broker errors: */ - /** Unknown broker error */ - RD_KAFKA_RESP_ERR_UNKNOWN = -1, - /** Success */ - RD_KAFKA_RESP_ERR_NO_ERROR = 0, - /** Offset out of range */ - RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1, - /** Invalid message */ - RD_KAFKA_RESP_ERR_INVALID_MSG = 2, - /** Unknown topic or partition */ - RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3, - /** Invalid message size */ - RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4, - /** Leader not available */ - RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5, - /** Not leader for partition */ - RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6, - /** Request timed out */ - RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7, - /** Broker not available */ - RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8, - /** Replica not available */ - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9, - /** Message size too large */ - RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10, - /** StaleControllerEpochCode */ - RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11, - /** Offset metadata string too large */ - RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12, - /** Broker disconnected before response received */ - RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13, + /** End internal error codes */ + RD_KAFKA_RESP_ERR__END = -100, + + /* Kafka broker errors: */ + /** Unknown broker error */ + RD_KAFKA_RESP_ERR_UNKNOWN = -1, + /** Success */ + RD_KAFKA_RESP_ERR_NO_ERROR = 0, + /** Offset out of range */ + RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1, + /** Invalid message */ + RD_KAFKA_RESP_ERR_INVALID_MSG = 2, + /** Unknown topic or partition */ + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3, + /** Invalid message size */ + RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4, + /** Leader not available */ + RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5, + /** Not leader for partition */ + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6, + /** Request timed out */ + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7, + /** Broker not available */ + RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8, + /** Replica not available */ + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9, + /** Message size too large */ + RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10, + /** StaleControllerEpochCode */ + RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11, + /** Offset metadata string too large */ + RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12, + /** Broker disconnected before response received */ + RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13, /** Coordinator load in progress */ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14, - /** Group coordinator load in progress */ -#define RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS \ +/** Group coordinator load in progress */ +#define RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS \ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS /** Coordinator not available */ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15, - /** Group coordinator not available */ -#define RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE \ +/** Group coordinator not available */ +#define RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE \ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE /** Not coordinator */ RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16, - /** Not coordinator for group */ -#define RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP \ +/** Not coordinator for group */ +#define RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP \ RD_KAFKA_RESP_ERR_NOT_COORDINATOR - /** Invalid topic */ + /** Invalid topic */ RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17, - /** Message batch larger than configured server segment size */ + /** Message batch larger than configured server segment size */ RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18, - /** Not enough in-sync replicas */ + /** Not enough in-sync replicas */ RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19, - /** Message(s) written to insufficient number of in-sync replicas */ + /** Message(s) written to insufficient number of in-sync replicas */ RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20, - /** Invalid required acks value */ + /** Invalid required acks value */ RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21, - /** Specified group generation id is not valid */ + /** Specified group generation id is not valid */ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22, - /** Inconsistent group protocol */ + /** Inconsistent group protocol */ RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23, - /** Invalid group.id */ - RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24, - /** Unknown member */ + /** Invalid group.id */ + RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24, + /** Unknown member */ RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25, - /** Invalid session timeout */ + /** Invalid session timeout */ RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26, - /** Group rebalance in progress */ - RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27, - /** Commit offset data size is not valid */ + /** Group rebalance in progress */ + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27, + /** Commit offset data size is not valid */ RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28, - /** Topic authorization failed */ + /** Topic authorization failed */ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29, - /** Group authorization failed */ - RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30, - /** Cluster authorization failed */ - RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31, - /** Invalid timestamp */ - RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32, - /** Unsupported SASL mechanism */ - RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33, - /** Illegal SASL state */ - RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34, - /** Unuspported version */ - RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35, - /** Topic already exists */ - RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36, - /** Invalid number of partitions */ - RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37, - /** Invalid replication factor */ - RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38, - /** Invalid replica assignment */ - RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39, - /** Invalid config */ - RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40, - /** Not controller for cluster */ - RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41, - /** Invalid request */ - RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42, - /** Message format on broker does not support request */ - RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, + /** Group authorization failed */ + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30, + /** Cluster authorization failed */ + RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31, + /** Invalid timestamp */ + RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32, + /** Unsupported SASL mechanism */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33, + /** Illegal SASL state */ + RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34, + /** Unuspported version */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35, + /** Topic already exists */ + RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36, + /** Invalid number of partitions */ + RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37, + /** Invalid replication factor */ + RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38, + /** Invalid replica assignment */ + RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39, + /** Invalid config */ + RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40, + /** Not controller for cluster */ + RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41, + /** Invalid request */ + RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42, + /** Message format on broker does not support request */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, /** Policy violation */ RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44, /** Broker received an out of order sequence number */ @@ -527,7 +536,8 @@ typedef enum { RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55, /** Disk error when trying to access log file on the disk */ RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56, - /** The user-specified log directory is not found in the broker config */ + /** The user-specified log directory is not found in the broker config + */ RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57, /** SASL Authentication failed */ RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58, @@ -624,9 +634,9 @@ typedef enum { * the full set of librdkafka error codes. */ struct rd_kafka_err_desc { - rd_kafka_resp_err_t code;/**< Error code */ - const char *name; /**< Error name, same as code enum sans prefix */ - const char *desc; /**< Human readable error description. */ + rd_kafka_resp_err_t code; /**< Error code */ + const char *name; /**< Error name, same as code enum sans prefix */ + const char *desc; /**< Human readable error description. */ }; @@ -634,9 +644,8 @@ struct rd_kafka_err_desc { * @brief Returns the full list of error codes. */ RD_EXPORT -void rd_kafka_get_err_descs (const struct rd_kafka_err_desc **errdescs, - size_t *cntp); - +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, + size_t *cntp); @@ -646,7 +655,7 @@ void rd_kafka_get_err_descs (const struct rd_kafka_err_desc **errdescs, * @param err Error code to translate */ RD_EXPORT -const char *rd_kafka_err2str (rd_kafka_resp_err_t err); +const char *rd_kafka_err2str(rd_kafka_resp_err_t err); @@ -656,7 +665,7 @@ const char *rd_kafka_err2str (rd_kafka_resp_err_t err); * @param err Error code to translate */ RD_EXPORT -const char *rd_kafka_err2name (rd_kafka_resp_err_t err); +const char *rd_kafka_err2name(rd_kafka_resp_err_t err); /** @@ -685,7 +694,7 @@ const char *rd_kafka_err2name (rd_kafka_resp_err_t err); * and should not be used, use rd_kafka_last_error() instead. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_last_error (void); +rd_kafka_resp_err_t rd_kafka_last_error(void); /** @@ -712,8 +721,7 @@ rd_kafka_resp_err_t rd_kafka_last_error (void); * * @sa rd_kafka_last_error() */ -RD_EXPORT RD_DEPRECATED -rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); +RD_EXPORT RD_DEPRECATED rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); /** @@ -728,9 +736,7 @@ rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); * @deprecated Use rd_kafka_last_error() to retrieve the last error code * set by the legacy librdkafka APIs. */ -RD_EXPORT RD_DEPRECATED -int rd_kafka_errno (void); - +RD_EXPORT RD_DEPRECATED int rd_kafka_errno(void); @@ -761,8 +767,8 @@ int rd_kafka_errno (void); * any other error code. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_fatal_error (rd_kafka_t *rk, - char *errstr, size_t errstr_size); +rd_kafka_resp_err_t +rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size); /** @@ -782,9 +788,9 @@ rd_kafka_resp_err_t rd_kafka_fatal_error (rd_kafka_t *rk, * RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous fatal error * has already been triggered. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_test_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason); /** @@ -792,7 +798,7 @@ rd_kafka_test_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, * \p error is NULL. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_error_code (const rd_kafka_error_t *error); +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error); /** * @returns the error code name for \p error, e.g, "ERR_UNKNOWN_MEMBER_ID", @@ -803,7 +809,7 @@ rd_kafka_resp_err_t rd_kafka_error_code (const rd_kafka_error_t *error); * @sa rd_kafka_err2name() */ RD_EXPORT -const char *rd_kafka_error_name (const rd_kafka_error_t *error); +const char *rd_kafka_error_name(const rd_kafka_error_t *error); /** * @returns a human readable error string for \p error, @@ -812,7 +818,7 @@ const char *rd_kafka_error_name (const rd_kafka_error_t *error); * @remark The lifetime of the returned pointer is the same as the error object. */ RD_EXPORT -const char *rd_kafka_error_string (const rd_kafka_error_t *error); +const char *rd_kafka_error_string(const rd_kafka_error_t *error); /** @@ -820,7 +826,7 @@ const char *rd_kafka_error_string (const rd_kafka_error_t *error); * instance is no longer usable, else 0 (also if \p error is NULL). */ RD_EXPORT -int rd_kafka_error_is_fatal (const rd_kafka_error_t *error); +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error); /** @@ -828,7 +834,7 @@ int rd_kafka_error_is_fatal (const rd_kafka_error_t *error); * else 0 (also if \p error is NULL). */ RD_EXPORT -int rd_kafka_error_is_retriable (const rd_kafka_error_t *error); +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error); /** @@ -842,7 +848,7 @@ int rd_kafka_error_is_retriable (const rd_kafka_error_t *error); * by the transactional API. */ RD_EXPORT -int rd_kafka_error_txn_requires_abort (const rd_kafka_error_t *error); +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error); /** * @brief Free and destroy an error object. @@ -850,7 +856,7 @@ int rd_kafka_error_txn_requires_abort (const rd_kafka_error_t *error); * @remark As a conveniance it is permitted to pass a NULL \p error. */ RD_EXPORT -void rd_kafka_error_destroy (rd_kafka_error_t *error); +void rd_kafka_error_destroy(rd_kafka_error_t *error); /** @@ -862,9 +868,9 @@ void rd_kafka_error_destroy (rd_kafka_error_t *error); * The returned object must be destroyed with rd_kafka_error_destroy(). */ RD_EXPORT -rd_kafka_error_t *rd_kafka_error_new (rd_kafka_resp_err_t code, - const char *fmt, ...) - RD_FORMAT(printf, 2, 3); +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, + const char *fmt, + ...) RD_FORMAT(printf, 2, 3); /** @@ -883,15 +889,15 @@ rd_kafka_error_t *rd_kafka_error_new (rd_kafka_resp_err_t code, * @sa rd_kafka_topic_partition_list_new() */ typedef struct rd_kafka_topic_partition_s { - char *topic; /**< Topic name */ - int32_t partition; /**< Partition */ - int64_t offset; /**< Offset */ - void *metadata; /**< Metadata */ - size_t metadata_size; /**< Metadata size */ - void *opaque; /**< Opaque value for application use */ - rd_kafka_resp_err_t err; /**< Error code, depending on use. */ - void *_private; /**< INTERNAL USE ONLY, - * INITIALIZE TO ZERO, DO NOT TOUCH */ + char *topic; /**< Topic name */ + int32_t partition; /**< Partition */ + int64_t offset; /**< Offset */ + void *metadata; /**< Metadata */ + size_t metadata_size; /**< Metadata size */ + void *opaque; /**< Opaque value for application use */ + rd_kafka_resp_err_t err; /**< Error code, depending on use. */ + void *_private; /**< INTERNAL USE ONLY, + * INITIALIZE TO ZERO, DO NOT TOUCH */ } rd_kafka_topic_partition_t; @@ -900,7 +906,7 @@ typedef struct rd_kafka_topic_partition_s { * @remark This must not be called for elements in a topic partition list. */ RD_EXPORT -void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar); +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar); /** @@ -908,8 +914,8 @@ void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar); * */ typedef struct rd_kafka_topic_partition_list_s { - int cnt; /**< Current number of elements */ - int size; /**< Current allocated size */ + int cnt; /**< Current number of elements */ + int size; /**< Current allocated size */ rd_kafka_topic_partition_t *elems; /**< Element array[] */ } rd_kafka_topic_partition_list_t; @@ -929,15 +935,15 @@ typedef struct rd_kafka_topic_partition_list_s { * @sa rd_kafka_topic_partition_list_add() */ RD_EXPORT -rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new (int size); +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size); /** * @brief Free all resources used by the list and the list itself. */ RD_EXPORT -void -rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rkparlist); +void rd_kafka_topic_partition_list_destroy( + rd_kafka_topic_partition_list_t *rkparlist); /** * @brief Add topic+partition to list @@ -950,8 +956,9 @@ rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rkparlis */ RD_EXPORT rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_add (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); +rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); /** @@ -963,11 +970,11 @@ rd_kafka_topic_partition_list_add (rd_kafka_topic_partition_list_t *rktparlist, * @param stop Last partition of range (inclusive) */ RD_EXPORT -void -rd_kafka_topic_partition_list_add_range (rd_kafka_topic_partition_list_t - *rktparlist, - const char *topic, - int32_t start, int32_t stop); +void rd_kafka_topic_partition_list_add_range( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t start, + int32_t stop); @@ -983,9 +990,10 @@ rd_kafka_topic_partition_list_add_range (rd_kafka_topic_partition_list_t * @remark Any held indices to elems[] are unusable after this call returns 1. */ RD_EXPORT -int -rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); +int rd_kafka_topic_partition_list_del( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); /** @@ -996,10 +1004,9 @@ rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist, * @sa rd_kafka_topic_partition_list_del() */ RD_EXPORT -int -rd_kafka_topic_partition_list_del_by_idx ( - rd_kafka_topic_partition_list_t *rktparlist, - int idx); +int rd_kafka_topic_partition_list_del_by_idx( + rd_kafka_topic_partition_list_t *rktparlist, + int idx); /** @@ -1011,8 +1018,7 @@ rd_kafka_topic_partition_list_del_by_idx ( */ RD_EXPORT rd_kafka_topic_partition_list_t * -rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src); - +rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src); @@ -1024,9 +1030,11 @@ rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src); * in the list. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset ( - rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition, int64_t offset); +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition, + int64_t offset); @@ -1036,10 +1044,10 @@ rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset ( * @returns a pointer to the first matching element, or NULL if not found. */ RD_EXPORT -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_find ( - const rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); /** @@ -1051,11 +1059,10 @@ rd_kafka_topic_partition_list_find ( * \p cmp_opaque is provided as the \p cmp_opaque argument to \p cmp. * */ -RD_EXPORT void -rd_kafka_topic_partition_list_sort (rd_kafka_topic_partition_list_t *rktparlist, - int (*cmp) (const void *a, const void *b, - void *cmp_opaque), - void *cmp_opaque); +RD_EXPORT void rd_kafka_topic_partition_list_sort( + rd_kafka_topic_partition_list_t *rktparlist, + int (*cmp)(const void *a, const void *b, void *cmp_opaque), + void *cmp_opaque); /**@}*/ @@ -1080,14 +1087,14 @@ typedef enum rd_kafka_vtype_t { RD_KAFKA_VTYPE_TOPIC, /**< (const char *) Topic name */ RD_KAFKA_VTYPE_RKT, /**< (rd_kafka_topic_t *) Topic handle */ RD_KAFKA_VTYPE_PARTITION, /**< (int32_t) Partition */ - RD_KAFKA_VTYPE_VALUE, /**< (void *, size_t) Message value (payload)*/ - RD_KAFKA_VTYPE_KEY, /**< (void *, size_t) Message key */ - RD_KAFKA_VTYPE_OPAQUE, /**< (void *) Per-message application opaque - * value. This is the same as - * the _private field in - * rd_kafka_message_t, also known - * as the msg_opaque. */ - RD_KAFKA_VTYPE_MSGFLAGS, /**< (int) RD_KAFKA_MSG_F_.. flags */ + RD_KAFKA_VTYPE_VALUE, /**< (void *, size_t) Message value (payload)*/ + RD_KAFKA_VTYPE_KEY, /**< (void *, size_t) Message key */ + RD_KAFKA_VTYPE_OPAQUE, /**< (void *) Per-message application opaque + * value. This is the same as + * the _private field in + * rd_kafka_message_t, also known + * as the msg_opaque. */ + RD_KAFKA_VTYPE_MSGFLAGS, /**< (int) RD_KAFKA_MSG_F_.. flags */ RD_KAFKA_VTYPE_TIMESTAMP, /**< (int64_t) Milliseconds since epoch UTC */ RD_KAFKA_VTYPE_HEADER, /**< (const char *, const void *, ssize_t) * Message Header */ @@ -1102,7 +1109,7 @@ typedef enum rd_kafka_vtype_t { * to which RD_KAFKA_VTYPE_... */ typedef struct rd_kafka_vu_s { - rd_kafka_vtype_t vtype; /**< RD_KAFKA_VTYPE_.. */ + rd_kafka_vtype_t vtype; /**< RD_KAFKA_VTYPE_.. */ /** Value union, see RD_KAFKA_V_.. macros for which field to use. */ union { const char *cstr; @@ -1121,7 +1128,7 @@ typedef struct rd_kafka_vu_s { } header; rd_kafka_headers_t *headers; void *ptr; - char _pad[64]; /**< Padding size for future-proofness */ + char _pad[64]; /**< Padding size for future-proofness */ } u; } rd_kafka_vu_t; @@ -1140,41 +1147,41 @@ typedef struct rd_kafka_vu_s { * * rd_kafka_vu_t field: u.cstr */ -#define RD_KAFKA_V_TOPIC(topic) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_TOPIC, const char *, topic), \ - (const char *)topic +#define RD_KAFKA_V_TOPIC(topic) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_TOPIC, const char *, topic), \ + (const char *)topic /*! * Topic object (rd_kafka_topic_t *) * * rd_kafka_vu_t field: u.rkt */ -#define RD_KAFKA_V_RKT(rkt) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_RKT, rd_kafka_topic_t *, rkt), \ - (rd_kafka_topic_t *)rkt +#define RD_KAFKA_V_RKT(rkt) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_RKT, rd_kafka_topic_t *, rkt), \ + (rd_kafka_topic_t *)rkt /*! * Partition (int32_t) * * rd_kafka_vu_t field: u.i32 */ -#define RD_KAFKA_V_PARTITION(partition) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_PARTITION, int32_t, partition), \ - (int32_t)partition +#define RD_KAFKA_V_PARTITION(partition) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_PARTITION, int32_t, partition), \ + (int32_t)partition /*! * Message value/payload pointer and length (void *, size_t) * * rd_kafka_vu_t fields: u.mem.ptr, u.mem.size */ -#define RD_KAFKA_V_VALUE(VALUE,LEN) \ - _LRK_TYPECHECK2(RD_KAFKA_VTYPE_VALUE, void *, VALUE, size_t, LEN), \ - (void *)VALUE, (size_t)LEN +#define RD_KAFKA_V_VALUE(VALUE, LEN) \ + _LRK_TYPECHECK2(RD_KAFKA_VTYPE_VALUE, void *, VALUE, size_t, LEN), \ + (void *)VALUE, (size_t)LEN /*! * Message key pointer and length (const void *, size_t) * * rd_kafka_vu_t field: u.mem.ptr, rd_kafka_vu.t.u.mem.size */ -#define RD_KAFKA_V_KEY(KEY,LEN) \ - _LRK_TYPECHECK2(RD_KAFKA_VTYPE_KEY, const void *, KEY, size_t, LEN), \ - (void *)KEY, (size_t)LEN +#define RD_KAFKA_V_KEY(KEY, LEN) \ + _LRK_TYPECHECK2(RD_KAFKA_VTYPE_KEY, const void *, KEY, size_t, LEN), \ + (void *)KEY, (size_t)LEN /*! * Message opaque pointer (void *) * Same as \c msg_opaque, \c produce(.., msg_opaque), @@ -1182,27 +1189,26 @@ typedef struct rd_kafka_vu_s { * * rd_kafka_vu_t field: u.ptr */ -#define RD_KAFKA_V_OPAQUE(msg_opaque) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_OPAQUE, void *, msg_opaque), \ - (void *)msg_opaque +#define RD_KAFKA_V_OPAQUE(msg_opaque) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_OPAQUE, void *, msg_opaque), \ + (void *)msg_opaque /*! * Message flags (int) * @sa RD_KAFKA_MSG_F_COPY, et.al. * * rd_kafka_vu_t field: u.i */ -#define RD_KAFKA_V_MSGFLAGS(msgflags) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_MSGFLAGS, int, msgflags), \ - (int)msgflags +#define RD_KAFKA_V_MSGFLAGS(msgflags) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_MSGFLAGS, int, msgflags), (int)msgflags /*! * Timestamp in milliseconds since epoch UTC (int64_t). * A value of 0 will use the current wall-clock time. * * rd_kafka_vu_t field: u.i64 */ -#define RD_KAFKA_V_TIMESTAMP(timestamp) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_TIMESTAMP, int64_t, timestamp), \ - (int64_t)timestamp +#define RD_KAFKA_V_TIMESTAMP(timestamp) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_TIMESTAMP, int64_t, timestamp), \ + (int64_t)timestamp /*! * Add Message Header (const char *NAME, const void *VALUE, ssize_t LEN). * @sa rd_kafka_header_add() @@ -1211,10 +1217,10 @@ typedef struct rd_kafka_vu_s { * * rd_kafka_vu_t fields: u.header.name, u.header.val, u.header.size */ -#define RD_KAFKA_V_HEADER(NAME,VALUE,LEN) \ - _LRK_TYPECHECK3(RD_KAFKA_VTYPE_HEADER, const char *, NAME, \ - const void *, VALUE, ssize_t, LEN), \ - (const char *)NAME, (const void *)VALUE, (ssize_t)LEN +#define RD_KAFKA_V_HEADER(NAME, VALUE, LEN) \ + _LRK_TYPECHECK3(RD_KAFKA_VTYPE_HEADER, const char *, NAME, \ + const void *, VALUE, ssize_t, LEN), \ + (const char *)NAME, (const void *)VALUE, (ssize_t)LEN /*! * Message Headers list (rd_kafka_headers_t *). @@ -1227,9 +1233,9 @@ typedef struct rd_kafka_vu_s { * * rd_kafka_vu_t fields: u.headers */ -#define RD_KAFKA_V_HEADERS(HDRS) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_HEADERS, rd_kafka_headers_t *, HDRS), \ - (rd_kafka_headers_t *)HDRS +#define RD_KAFKA_V_HEADERS(HDRS) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_HEADERS, rd_kafka_headers_t *, HDRS), \ + (rd_kafka_headers_t *)HDRS /**@}*/ @@ -1262,19 +1268,19 @@ typedef struct rd_kafka_vu_s { * Any number of headers may be added, updated and * removed regardless of the initial count. */ -RD_EXPORT rd_kafka_headers_t *rd_kafka_headers_new (size_t initial_count); +RD_EXPORT rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count); /** * @brief Destroy the headers list. The object and any returned value pointers * are not usable after this call. */ -RD_EXPORT void rd_kafka_headers_destroy (rd_kafka_headers_t *hdrs); +RD_EXPORT void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs); /** * @brief Make a copy of headers list \p src. */ RD_EXPORT rd_kafka_headers_t * -rd_kafka_headers_copy (const rd_kafka_headers_t *src); +rd_kafka_headers_copy(const rd_kafka_headers_t *src); /** * @brief Add header with name \p name and value \p val (copied) of size @@ -1293,10 +1299,11 @@ rd_kafka_headers_copy (const rd_kafka_headers_t *src); * @returns RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, * else RD_KAFKA_RESP_ERR_NO_ERROR. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_add (rd_kafka_headers_t *hdrs, - const char *name, ssize_t name_size, - const void *value, ssize_t value_size); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, + const char *name, + ssize_t name_size, + const void *value, + ssize_t value_size); /** * @brief Remove all headers for the given key (if any). @@ -1305,8 +1312,8 @@ rd_kafka_header_add (rd_kafka_headers_t *hdrs, * RD_KAFKA_RESP_ERR__NOENT if no matching headers were found, * else RD_KAFKA_RESP_ERR_NO_ERROR if headers were removed. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_remove (rd_kafka_headers_t *hdrs, const char *name); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, + const char *name); /** @@ -1327,8 +1334,10 @@ rd_kafka_header_remove (rd_kafka_headers_t *hdrs, const char *name); * the header item is valid. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_get_last (const rd_kafka_headers_t *hdrs, - const char *name, const void **valuep, size_t *sizep); +rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, + const char *name, + const void **valuep, + size_t *sizep); /** * @brief Iterator for headers matching \p name. @@ -1344,8 +1353,11 @@ rd_kafka_header_get_last (const rd_kafka_headers_t *hdrs, * @param sizep (out) Set to the value's size (not including null-terminator). */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_get (const rd_kafka_headers_t *hdrs, size_t idx, - const char *name, const void **valuep, size_t *sizep); +rd_kafka_header_get(const rd_kafka_headers_t *hdrs, + size_t idx, + const char *name, + const void **valuep, + size_t *sizep); /** @@ -1356,9 +1368,11 @@ rd_kafka_header_get (const rd_kafka_headers_t *hdrs, size_t idx, * @sa rd_kafka_header_get() */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_get_all (const rd_kafka_headers_t *hdrs, size_t idx, - const char **namep, - const void **valuep, size_t *sizep); +rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, + size_t idx, + const char **namep, + const void **valuep, + size_t *sizep); @@ -1391,34 +1405,34 @@ rd_kafka_header_get_all (const rd_kafka_headers_t *hdrs, size_t idx, * rd_kafka_message_destroy() unless otherwise noted. */ typedef struct rd_kafka_message_s { - rd_kafka_resp_err_t err; /**< Non-zero for error signaling. */ - rd_kafka_topic_t *rkt; /**< Topic */ - int32_t partition; /**< Partition */ - void *payload; /**< Producer: original message payload. - * Consumer: Depends on the value of \c err : - * - \c err==0: Message payload. - * - \c err!=0: Error string */ - size_t len; /**< Depends on the value of \c err : - * - \c err==0: Message payload length - * - \c err!=0: Error string length */ - void *key; /**< Depends on the value of \c err : - * - \c err==0: Optional message key */ - size_t key_len; /**< Depends on the value of \c err : - * - \c err==0: Optional message key length*/ - int64_t offset; /**< Consumer: - * - Message offset (or offset for error - * if \c err!=0 if applicable). - * Producer, dr_msg_cb: - * Message offset assigned by broker. - * May be RD_KAFKA_OFFSET_INVALID - * for retried messages when - * idempotence is enabled. */ - void *_private; /**< Consumer: - * - rdkafka private pointer: DO NOT MODIFY - * Producer: - * - dr_msg_cb: - * msg_opaque from produce() call or - * RD_KAFKA_V_OPAQUE from producev(). */ + rd_kafka_resp_err_t err; /**< Non-zero for error signaling. */ + rd_kafka_topic_t *rkt; /**< Topic */ + int32_t partition; /**< Partition */ + void *payload; /**< Producer: original message payload. + * Consumer: Depends on the value of \c err : + * - \c err==0: Message payload. + * - \c err!=0: Error string */ + size_t len; /**< Depends on the value of \c err : + * - \c err==0: Message payload length + * - \c err!=0: Error string length */ + void *key; /**< Depends on the value of \c err : + * - \c err==0: Optional message key */ + size_t key_len; /**< Depends on the value of \c err : + * - \c err==0: Optional message key length*/ + int64_t offset; /**< Consumer: + * - Message offset (or offset for error + * if \c err!=0 if applicable). + * Producer, dr_msg_cb: + * Message offset assigned by broker. + * May be RD_KAFKA_OFFSET_INVALID + * for retried messages when + * idempotence is enabled. */ + void *_private; /**< Consumer: + * - rdkafka private pointer: DO NOT MODIFY + * Producer: + * - dr_msg_cb: + * msg_opaque from produce() call or + * RD_KAFKA_V_OPAQUE from producev(). */ } rd_kafka_message_t; @@ -1430,7 +1444,6 @@ void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage); - /** * @brief Returns the error string for an errored rd_kafka_message_t or NULL if * there was no error. @@ -1438,7 +1451,7 @@ void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage); * @remark This function MUST NOT be used with the producer. */ RD_EXPORT -const char *rd_kafka_message_errstr (const rd_kafka_message_t *rkmessage); +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage); /** @@ -1453,8 +1466,8 @@ const char *rd_kafka_message_errstr (const rd_kafka_message_t *rkmessage); * @remark Message timestamps require broker version 0.10.0 or later. */ RD_EXPORT -int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage, - rd_kafka_timestamp_type_t *tstype); +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, + rd_kafka_timestamp_type_t *tstype); @@ -1465,7 +1478,7 @@ int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage, * @returns the latency in microseconds, or -1 if not available. */ RD_EXPORT -int64_t rd_kafka_message_latency (const rd_kafka_message_t *rkmessage); +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage); /** @@ -1475,7 +1488,7 @@ int64_t rd_kafka_message_latency (const rd_kafka_message_t *rkmessage); * @returns a broker id if known, else -1. */ RD_EXPORT -int32_t rd_kafka_message_broker_id (const rd_kafka_message_t *rkmessage); +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage); /** @@ -1495,8 +1508,8 @@ int32_t rd_kafka_message_broker_id (const rd_kafka_message_t *rkmessage); * the first call to this function. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_message_headers (const rd_kafka_message_t *rkmessage, - rd_kafka_headers_t **hdrsp); +rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, + rd_kafka_headers_t **hdrsp); /** * @brief Get the message header list and detach the list from the message @@ -1510,8 +1523,8 @@ rd_kafka_message_headers (const rd_kafka_message_t *rkmessage, * @sa rd_kafka_message_headers */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_message_detach_headers (rd_kafka_message_t *rkmessage, - rd_kafka_headers_t **hdrsp); +rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, + rd_kafka_headers_t **hdrsp); /** @@ -1527,8 +1540,8 @@ rd_kafka_message_detach_headers (rd_kafka_message_t *rkmessage, * @remark The existing headers object, if any, will be destroyed. */ RD_EXPORT -void rd_kafka_message_set_headers (rd_kafka_message_t *rkmessage, - rd_kafka_headers_t *hdrs); +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, + rd_kafka_headers_t *hdrs); /** @@ -1536,7 +1549,7 @@ void rd_kafka_message_set_headers (rd_kafka_message_t *rkmessage, * * @param hdrs Headers to count */ -RD_EXPORT size_t rd_kafka_header_cnt (const rd_kafka_headers_t *hdrs); +RD_EXPORT size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs); /** @@ -1558,7 +1571,7 @@ typedef enum { /** Message was written to the log and acknowledged by the broker. * No reason for application to retry. * Note: this value should only be trusted with \c acks=all. */ - RD_KAFKA_MSG_STATUS_PERSISTED = 2 + RD_KAFKA_MSG_STATUS_PERSISTED = 2 } rd_kafka_msg_status_t; @@ -1569,7 +1582,7 @@ typedef enum { * interceptors. */ RD_EXPORT rd_kafka_msg_status_t -rd_kafka_message_status (const rd_kafka_message_t *rkmessage); +rd_kafka_message_status(const rd_kafka_message_t *rkmessage); /**@}*/ @@ -1587,11 +1600,11 @@ rd_kafka_message_status (const rd_kafka_message_t *rkmessage); * @brief Configuration result type */ typedef enum { - RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */ - RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value or + RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */ + RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value or * property or value not supported in * this build. */ - RD_KAFKA_CONF_OK = 0 /**< Configuration okay */ + RD_KAFKA_CONF_OK = 0 /**< Configuration okay */ } rd_kafka_conf_res_t; @@ -1654,9 +1667,9 @@ rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf); * prefixes to filter out (ignore) when copying. */ RD_EXPORT -rd_kafka_conf_t *rd_kafka_conf_dup_filter (const rd_kafka_conf_t *conf, - size_t filter_cnt, - const char **filter); +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, + size_t filter_cnt, + const char **filter); @@ -1669,7 +1682,7 @@ rd_kafka_conf_t *rd_kafka_conf_dup_filter (const rd_kafka_conf_t *conf, * as the rd_kafka_t object. */ RD_EXPORT -const rd_kafka_conf_t *rd_kafka_conf (rd_kafka_t *rk); +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk); /** @@ -1693,9 +1706,10 @@ const rd_kafka_conf_t *rd_kafka_conf (rd_kafka_t *rk); */ RD_EXPORT rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, - const char *name, - const char *value, - char *errstr, size_t errstr_size); + const char *name, + const char *value, + char *errstr, + size_t errstr_size); /** @@ -1742,11 +1756,9 @@ void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events); * * @sa rd_kafka_queue_get_background */ -RD_EXPORT void -rd_kafka_conf_set_background_event_cb (rd_kafka_conf_t *conf, - void (*event_cb) (rd_kafka_t *rk, - rd_kafka_event_t *rkev, - void *opaque)); +RD_EXPORT void rd_kafka_conf_set_background_event_cb( + rd_kafka_conf_t *conf, + void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque)); /** @@ -1754,10 +1766,12 @@ rd_kafka_conf_set_background_event_cb (rd_kafka_conf_t *conf, */ RD_EXPORT void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, - void (*dr_cb) (rd_kafka_t *rk, - void *payload, size_t len, - rd_kafka_resp_err_t err, - void *opaque, void *msg_opaque)); + void (*dr_cb)(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque)); /** * @brief \b Producer: Set delivery report callback in provided \p conf object. @@ -1790,11 +1804,11 @@ void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, * acknowledged. */ RD_EXPORT -void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, - void (*dr_msg_cb) (rd_kafka_t *rk, - const rd_kafka_message_t * - rkmessage, - void *opaque)); +void rd_kafka_conf_set_dr_msg_cb( + rd_kafka_conf_t *conf, + void (*dr_msg_cb)(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque)); /** @@ -1805,10 +1819,9 @@ void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, * rd_kafka_conf_set_opaque(). */ RD_EXPORT -void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, - void (*consume_cb) (rd_kafka_message_t * - rkmessage, - void *opaque)); +void rd_kafka_conf_set_consume_cb( + rd_kafka_conf_t *conf, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque)); /** * @brief \b Consumer: Set rebalance callback for use with @@ -1915,12 +1928,12 @@ void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, * the examples/ directory. */ RD_EXPORT -void rd_kafka_conf_set_rebalance_cb ( - rd_kafka_conf_t *conf, - void (*rebalance_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque)); +void rd_kafka_conf_set_rebalance_cb( + rd_kafka_conf_t *conf, + void (*rebalance_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque)); @@ -1942,12 +1955,12 @@ void rd_kafka_conf_set_rebalance_cb ( * rd_kafka_conf_set_opaque(). */ RD_EXPORT -void rd_kafka_conf_set_offset_commit_cb ( - rd_kafka_conf_t *conf, - void (*offset_commit_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque)); +void rd_kafka_conf_set_offset_commit_cb( + rd_kafka_conf_t *conf, + void (*offset_commit_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque)); /** @@ -1974,9 +1987,10 @@ void rd_kafka_conf_set_offset_commit_cb ( */ RD_EXPORT void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, - void (*error_cb) (rd_kafka_t *rk, int err, - const char *reason, - void *opaque)); + void (*error_cb)(rd_kafka_t *rk, + int err, + const char *reason, + void *opaque)); /** * @brief Set throttle callback. @@ -1996,13 +2010,12 @@ void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, * @remark Requires broker version 0.9.0 or later. */ RD_EXPORT -void rd_kafka_conf_set_throttle_cb (rd_kafka_conf_t *conf, - void (*throttle_cb) ( - rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int throttle_time_ms, - void *opaque)); +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, + void (*throttle_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque)); /** @@ -2023,8 +2036,10 @@ void rd_kafka_conf_set_throttle_cb (rd_kafka_conf_t *conf, */ RD_EXPORT void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, - void (*log_cb) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf)); + void (*log_cb)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf)); /** @@ -2050,11 +2065,9 @@ void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, * See STATISTICS.md for a full definition of the JSON object. */ RD_EXPORT -void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, - int (*stats_cb) (rd_kafka_t *rk, - char *json, - size_t json_len, - void *opaque)); +void rd_kafka_conf_set_stats_cb( + rd_kafka_conf_t *conf, + int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque)); /** * @brief Set SASL/OAUTHBEARER token refresh callback in provided conf object. @@ -2106,11 +2119,11 @@ void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, * @sa rd_kafka_queue_get_sasl() */ RD_EXPORT -void rd_kafka_conf_set_oauthbearer_token_refresh_cb ( - rd_kafka_conf_t *conf, - void (*oauthbearer_token_refresh_cb) (rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque)); +void rd_kafka_conf_set_oauthbearer_token_refresh_cb( + rd_kafka_conf_t *conf, + void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque)); /** * @brief Enable/disable creation of a queue specific to SASL events @@ -2140,7 +2153,7 @@ void rd_kafka_conf_set_oauthbearer_token_refresh_cb ( */ RD_EXPORT -void rd_kafka_conf_enable_sasl_queue (rd_kafka_conf_t *conf, int enable); +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable); /** @@ -2161,10 +2174,9 @@ void rd_kafka_conf_enable_sasl_queue (rd_kafka_conf_t *conf, int enable); * @remark The callback will be called from an internal librdkafka thread. */ RD_EXPORT -void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, - int (*socket_cb) (int domain, int type, - int protocol, - void *opaque)); +void rd_kafka_conf_set_socket_cb( + rd_kafka_conf_t *conf, + int (*socket_cb)(int domain, int type, int protocol, void *opaque)); @@ -2184,12 +2196,12 @@ void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, * @remark The callback will be called from an internal librdkafka thread. */ RD_EXPORT void -rd_kafka_conf_set_connect_cb (rd_kafka_conf_t *conf, - int (*connect_cb) (int sockfd, - const struct sockaddr *addr, - int addrlen, - const char *id, - void *opaque)); +rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, + int (*connect_cb)(int sockfd, + const struct sockaddr *addr, + int addrlen, + const char *id, + void *opaque)); /** * @brief Set close socket callback. @@ -2201,10 +2213,9 @@ rd_kafka_conf_set_connect_cb (rd_kafka_conf_t *conf, * * @remark The callback will be called from an internal librdkafka thread. */ -RD_EXPORT void -rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t *conf, - int (*closesocket_cb) (int sockfd, - void *opaque)); +RD_EXPORT void rd_kafka_conf_set_closesocket_cb( + rd_kafka_conf_t *conf, + int (*closesocket_cb)(int sockfd, void *opaque)); @@ -2227,10 +2238,9 @@ rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t *conf, * @remark The callback will be called from an internal librdkafka thread. */ RD_EXPORT -void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf, - int (*open_cb) (const char *pathname, - int flags, mode_t mode, - void *opaque)); +void rd_kafka_conf_set_open_cb( + rd_kafka_conf_t *conf, + int (*open_cb)(const char *pathname, int flags, mode_t mode, void *opaque)); #endif @@ -2276,16 +2286,18 @@ void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf, * for a list of \p x509_error codes. */ RD_EXPORT -rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb ( - rd_kafka_conf_t *conf, - int (*ssl_cert_verify_cb) (rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - char *errstr, size_t errstr_size, - void *opaque)); +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb( + rd_kafka_conf_t *conf, + int (*ssl_cert_verify_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + char *errstr, + size_t errstr_size, + void *opaque)); /** @@ -2310,9 +2322,9 @@ typedef enum rd_kafka_cert_type_t { * @sa rd_kafka_conf_set_ssl_cert */ typedef enum rd_kafka_cert_enc_t { - RD_KAFKA_CERT_ENC_PKCS12, /**< PKCS#12 */ - RD_KAFKA_CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ - RD_KAFKA_CERT_ENC_PEM, /**< PEM */ + RD_KAFKA_CERT_ENC_PKCS12, /**< PKCS#12 */ + RD_KAFKA_CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ + RD_KAFKA_CERT_ENC_PEM, /**< PEM */ RD_KAFKA_CERT_ENC__CNT, } rd_kafka_cert_enc_t; @@ -2352,11 +2364,13 @@ typedef enum rd_kafka_cert_enc_t { * `ssl.ca.pem` configuration property. */ RD_EXPORT rd_kafka_conf_res_t -rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf, - rd_kafka_cert_type_t cert_type, - rd_kafka_cert_enc_t cert_enc, - const void *buffer, size_t size, - char *errstr, size_t errstr_size); +rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, + rd_kafka_cert_type_t cert_type, + rd_kafka_cert_enc_t cert_enc, + const void *buffer, + size_t size, + char *errstr, + size_t errstr_size); /** @@ -2374,8 +2388,8 @@ rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf, * use it. */ RD_EXPORT -void rd_kafka_conf_set_engine_callback_data (rd_kafka_conf_t *conf, - void *callback_data); +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, + void *callback_data); /** @@ -2409,8 +2423,8 @@ void *rd_kafka_opaque(const rd_kafka_t *rk); * global rd_kafka_conf_t object instead. */ RD_EXPORT -void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf); +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf); /** * @brief Gets the default topic configuration as previously set with @@ -2425,7 +2439,7 @@ void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf, * rd_kafka_conf_set_default_topic_conf(). */ RD_EXPORT rd_kafka_topic_conf_t * -rd_kafka_conf_get_default_topic_conf (rd_kafka_conf_t *conf); +rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf); /** @@ -2448,9 +2462,10 @@ rd_kafka_conf_get_default_topic_conf (rd_kafka_conf_t *conf); * \p RD_KAFKA_CONF_UNKNOWN. */ RD_EXPORT -rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf, - const char *name, - char *dest, size_t *dest_size); +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, + const char *name, + char *dest, + size_t *dest_size); /** @@ -2459,9 +2474,10 @@ rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf, * @sa rd_kafka_conf_get() */ RD_EXPORT -rd_kafka_conf_res_t rd_kafka_topic_conf_get (const rd_kafka_topic_conf_t *conf, - const char *name, - char *dest, size_t *dest_size); +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, + const char *name, + char *dest, + size_t *dest_size); /** @@ -2486,7 +2502,7 @@ const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp); */ RD_EXPORT const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, - size_t *cntp); + size_t *cntp); /** * @brief Frees a configuration dump returned from `rd_kafka_conf_dump()` or @@ -2530,15 +2546,15 @@ rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void); * @brief Creates a copy/duplicate of topic configuration object \p conf. */ RD_EXPORT -rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t - *conf); +rd_kafka_topic_conf_t * +rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf); /** * @brief Creates a copy/duplicate of \p rk 's default topic configuration * object. */ RD_EXPORT -rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup (rd_kafka_t *rk); +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk); /** @@ -2558,9 +2574,10 @@ void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf); */ RD_EXPORT rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, - const char *name, - const char *value, - char *errstr, size_t errstr_size); + const char *name, + const char *value, + char *errstr, + size_t errstr_size); /** * @brief Sets the application's opaque pointer that will be passed to all topic @@ -2593,15 +2610,14 @@ void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, * could not be performed. */ RD_EXPORT -void -rd_kafka_topic_conf_set_partitioner_cb (rd_kafka_topic_conf_t *topic_conf, - int32_t (*partitioner) ( - const rd_kafka_topic_t *rkt, - const void *keydata, - size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque)); +void rd_kafka_topic_conf_set_partitioner_cb( + rd_kafka_topic_conf_t *topic_conf, + int32_t (*partitioner)(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque)); /** @@ -2629,11 +2645,10 @@ rd_kafka_topic_conf_set_partitioner_cb (rd_kafka_topic_conf_t *topic_conf, * @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL, * DO NOT USE IN PRODUCTION. */ -RD_EXPORT void -rd_kafka_topic_conf_set_msg_order_cmp (rd_kafka_topic_conf_t *topic_conf, - int (*msg_order_cmp) ( - const rd_kafka_message_t *a, - const rd_kafka_message_t *b)); +RD_EXPORT void rd_kafka_topic_conf_set_msg_order_cmp( + rd_kafka_topic_conf_t *topic_conf, + int (*msg_order_cmp)(const rd_kafka_message_t *a, + const rd_kafka_message_t *b)); /** @@ -2645,13 +2660,13 @@ rd_kafka_topic_conf_set_msg_order_cmp (rd_kafka_topic_conf_t *topic_conf, */ RD_EXPORT int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, - int32_t partition); + int32_t partition); /******************************************************************* - * * + * * * Partitioners provided by rdkafka * - * * + * * *******************************************************************/ /** @@ -2669,9 +2684,11 @@ int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, */ RD_EXPORT int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, void *msg_opaque); + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** * @brief Consistent partitioner. @@ -2687,10 +2704,12 @@ int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, * the CRC value of the key */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_consistent (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, void *msg_opaque); +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** * @brief Consistent-Random partitioner. @@ -2708,10 +2727,12 @@ int32_t rd_kafka_msg_partitioner_consistent (const rd_kafka_topic_t *rkt, * the CRC value of the key (if provided) */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_consistent_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, void *msg_opaque); +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** @@ -2728,11 +2749,12 @@ int32_t rd_kafka_msg_partitioner_consistent_random (const rd_kafka_topic_t *rkt, * @returns a partition between 0 and \p partition_cnt - 1. */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_murmur2 (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** * @brief Consistent-Random Murmur2 partitioner (Java compatible). @@ -2749,11 +2771,12 @@ int32_t rd_kafka_msg_partitioner_murmur2 (const rd_kafka_topic_t *rkt, * @returns a partition between 0 and \p partition_cnt - 1. */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_murmur2_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** @@ -2770,11 +2793,12 @@ int32_t rd_kafka_msg_partitioner_murmur2_random (const rd_kafka_topic_t *rkt, * @returns a partition between 0 and \p partition_cnt - 1. */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_fnv1a (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** @@ -2792,11 +2816,12 @@ int32_t rd_kafka_msg_partitioner_fnv1a (const rd_kafka_topic_t *rkt, * @returns a partition between 0 and \p partition_cnt - 1. */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_fnv1a_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /**@}*/ @@ -2812,7 +2837,6 @@ int32_t rd_kafka_msg_partitioner_fnv1a_random (const rd_kafka_topic_t *rkt, - /** * @brief Creates a new Kafka handle and starts its operation according to the * specified \p type (\p RD_KAFKA_CONSUMER or \p RD_KAFKA_PRODUCER). @@ -2840,8 +2864,10 @@ int32_t rd_kafka_msg_partitioner_fnv1a_random (const rd_kafka_topic_t *rkt, * @sa To destroy the Kafka handle, use rd_kafka_destroy(). */ RD_EXPORT -rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, - char *errstr, size_t errstr_size); +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, + rd_kafka_conf_t *conf, + char *errstr, + size_t errstr_size); /** @@ -2859,7 +2885,7 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, * @sa rd_kafka_destroy_flags() */ RD_EXPORT -void rd_kafka_destroy(rd_kafka_t *rk); +void rd_kafka_destroy(rd_kafka_t *rk); /** @@ -2867,7 +2893,7 @@ void rd_kafka_destroy(rd_kafka_t *rk); * */ RD_EXPORT -void rd_kafka_destroy_flags (rd_kafka_t *rk, int flags); +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags); /** * @brief Flags for rd_kafka_destroy_flags() @@ -2912,7 +2938,7 @@ rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk); * rd_kafka_mem_free() */ RD_EXPORT -char *rd_kafka_memberid (const rd_kafka_t *rk); +char *rd_kafka_memberid(const rd_kafka_t *rk); @@ -2935,7 +2961,7 @@ char *rd_kafka_memberid (const rd_kafka_t *rk); * if no ClusterId could be retrieved in the allotted timespan. */ RD_EXPORT -char *rd_kafka_clusterid (rd_kafka_t *rk, int timeout_ms); +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms); /** @@ -2954,7 +2980,7 @@ char *rd_kafka_clusterid (rd_kafka_t *rk, int timeout_ms); * retrieved in the allotted timespan. */ RD_EXPORT -int32_t rd_kafka_controllerid (rd_kafka_t *rk, int timeout_ms); +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms); /** @@ -2979,8 +3005,9 @@ int32_t rd_kafka_controllerid (rd_kafka_t *rk, int timeout_ms); * @sa rd_kafka_topic_destroy() */ RD_EXPORT -rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, - rd_kafka_topic_conf_t *conf); +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, + const char *topic, + rd_kafka_topic_conf_t *conf); @@ -3008,7 +3035,7 @@ const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt); * with rd_kafka_topic_conf_set_opaque(). */ RD_EXPORT -void *rd_kafka_topic_opaque (const rd_kafka_topic_t *rkt); +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt); /** @@ -3017,7 +3044,7 @@ void *rd_kafka_topic_opaque (const rd_kafka_topic_t *rkt); * The unassigned partition is used by the producer API for messages * that should be partitioned using the configured or default partitioner. */ -#define RD_KAFKA_PARTITION_UA ((int32_t)-1) +#define RD_KAFKA_PARTITION_UA ((int32_t)-1) /** @@ -3042,7 +3069,8 @@ void *rd_kafka_topic_opaque (const rd_kafka_topic_t *rkt); * - error callbacks (rd_kafka_conf_set_error_cb()) [all] * - stats callbacks (rd_kafka_conf_set_stats_cb()) [all] * - throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all] - * - OAUTHBEARER token refresh callbacks (rd_kafka_conf_set_oauthbearer_token_refresh_cb()) [all] + * - OAUTHBEARER token refresh callbacks + * (rd_kafka_conf_set_oauthbearer_token_refresh_cb()) [all] * * @returns the number of events served. */ @@ -3061,8 +3089,7 @@ int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms); * @remark This function MUST ONLY be called from within a librdkafka callback. */ RD_EXPORT -void rd_kafka_yield (rd_kafka_t *rk); - +void rd_kafka_yield(rd_kafka_t *rk); @@ -3074,8 +3101,8 @@ void rd_kafka_yield (rd_kafka_t *rk); * @returns RD_KAFKA_RESP_ERR_NO_ERROR */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_pause_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); +rd_kafka_pause_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions); @@ -3087,9 +3114,8 @@ rd_kafka_pause_partitions (rd_kafka_t *rk, * @returns RD_KAFKA_RESP_ERR_NO_ERROR */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_resume_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); - +rd_kafka_resume_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions); @@ -3102,9 +3128,12 @@ rd_kafka_resume_partitions (rd_kafka_t *rk, * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_query_watermark_offsets (rd_kafka_t *rk, - const char *topic, int32_t partition, - int64_t *low, int64_t *high, int timeout_ms); +rd_kafka_query_watermark_offsets(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t *low, + int64_t *high, + int timeout_ms); /** @@ -3123,10 +3152,11 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, * * @remark Shall only be used with an active consumer instance. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_get_watermark_offsets (rd_kafka_t *rk, - const char *topic, int32_t partition, - int64_t *low, int64_t *high); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t *low, + int64_t *high); @@ -3143,7 +3173,8 @@ rd_kafka_get_watermark_offsets (rd_kafka_t *rk, * The function will block for at most \p timeout_ms milliseconds. * * @remark Duplicate Topic+Partitions are not supported. - * @remark Per-partition errors may be returned in \c rd_kafka_topic_partition_t.err + * @remark Per-partition errors may be returned in \c + * rd_kafka_topic_partition_t.err * * @returns RD_KAFKA_RESP_ERR_NO_ERROR if offsets were be queried (do note * that per-partition errors might be set), @@ -3155,9 +3186,9 @@ rd_kafka_get_watermark_offsets (rd_kafka_t *rk, * for the given partitions. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_offsets_for_times (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *offsets, - int timeout_ms); +rd_kafka_offsets_for_times(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *offsets, + int timeout_ms); @@ -3175,7 +3206,7 @@ rd_kafka_offsets_for_times (rd_kafka_t *rk, * rd_kafka_mem_free() */ RD_EXPORT -void *rd_kafka_mem_calloc (rd_kafka_t *rk, size_t num, size_t size); +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size); @@ -3193,7 +3224,7 @@ void *rd_kafka_mem_calloc (rd_kafka_t *rk, size_t num, size_t size); * rd_kafka_mem_free() */ RD_EXPORT -void *rd_kafka_mem_malloc (rd_kafka_t *rk, size_t size); +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size); @@ -3214,15 +3245,13 @@ void *rd_kafka_mem_malloc (rd_kafka_t *rk, size_t size); * that explicitly mention using this function for freeing. */ RD_EXPORT -void rd_kafka_mem_free (rd_kafka_t *rk, void *ptr); +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr); /**@}*/ - - /** * @name Queue API * @{ @@ -3257,7 +3286,7 @@ void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu); * Use rd_kafka_queue_destroy() to loose the reference. */ RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk); +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk); @@ -3271,7 +3300,7 @@ rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk); * @sa rd_kafka_sasl_background_callbacks_enable() */ RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_sasl (rd_kafka_t *rk); +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk); /** @@ -3290,7 +3319,7 @@ rd_kafka_queue_t *rd_kafka_queue_get_sasl (rd_kafka_t *rk); * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb() */ RD_EXPORT -rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable (rd_kafka_t *rk); +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk); /** @@ -3303,7 +3332,7 @@ rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable (rd_kafka_t *rk); * prior to calling rd_kafka_consumer_close(). */ RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_consumer (rd_kafka_t *rk); +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk); /** * @returns a reference to the partition's queue, or NULL if @@ -3316,9 +3345,9 @@ rd_kafka_queue_t *rd_kafka_queue_get_consumer (rd_kafka_t *rk); * @remark This function only works on consumers. */ RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t *rk, - const char *topic, - int32_t partition); +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, + const char *topic, + int32_t partition); /** * @returns a reference to the background thread queue, or NULL if the @@ -3343,7 +3372,7 @@ rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t *rk, * or forwarded from. */ RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_background (rd_kafka_t *rk); +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk); /** @@ -3357,7 +3386,7 @@ rd_kafka_queue_t *rd_kafka_queue_get_background (rd_kafka_t *rk); * queue. */ RD_EXPORT -void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst); +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst); /** * @brief Forward librdkafka logs (and debug) to the specified queue @@ -3377,15 +3406,15 @@ void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst); * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_set_log_queue (rd_kafka_t *rk, - rd_kafka_queue_t *rkqu); +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, + rd_kafka_queue_t *rkqu); /** * @returns the current number of elements in queue. */ RD_EXPORT -size_t rd_kafka_queue_length (rd_kafka_queue_t *rkqu); +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu); /** @@ -3406,8 +3435,10 @@ size_t rd_kafka_queue_length (rd_kafka_queue_t *rkqu); * @remark The file-descriptor/socket must be set to non-blocking. */ RD_EXPORT -void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd, - const void *payload, size_t size); +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, + int fd, + const void *payload, + size_t size); /** * @brief Enable callback event triggering for queue. @@ -3426,10 +3457,10 @@ void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd, * handle). */ RD_EXPORT -void rd_kafka_queue_cb_event_enable (rd_kafka_queue_t *rkqu, - void (*event_cb) (rd_kafka_t *rk, - void *qev_opaque), - void *qev_opaque); +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, + void (*event_cb)(rd_kafka_t *rk, + void *qev_opaque), + void *qev_opaque); /** @@ -3440,7 +3471,7 @@ void rd_kafka_queue_cb_event_enable (rd_kafka_queue_t *rkqu, * Must not be used from signal handlers since that may cause deadlocks. */ RD_EXPORT -void rd_kafka_queue_yield (rd_kafka_queue_t *rkqu); +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu); /**@}*/ @@ -3453,12 +3484,15 @@ void rd_kafka_queue_yield (rd_kafka_queue_t *rkqu); */ -#define RD_KAFKA_OFFSET_BEGINNING -2 /**< Start consuming from beginning of - * kafka partition queue: oldest msg */ -#define RD_KAFKA_OFFSET_END -1 /**< Start consuming from end of kafka - * partition queue: next msg */ -#define RD_KAFKA_OFFSET_STORED -1000 /**< Start consuming from offset retrieved - * from offset store */ +#define RD_KAFKA_OFFSET_BEGINNING \ + -2 /**< Start consuming from beginning of \ + * kafka partition queue: oldest msg */ +#define RD_KAFKA_OFFSET_END \ + -1 /**< Start consuming from end of kafka \ + * partition queue: next msg */ +#define RD_KAFKA_OFFSET_STORED \ + -1000 /**< Start consuming from offset retrieved \ + * from offset store */ #define RD_KAFKA_OFFSET_INVALID -1001 /**< Invalid offset */ @@ -3471,7 +3505,7 @@ void rd_kafka_queue_yield (rd_kafka_queue_t *rkqu); * * That is, if current end offset is 12345 and \p CNT is 200, it will start * consuming from offset \c 12345-200 = \c 12145. */ -#define RD_KAFKA_OFFSET_TAIL(CNT) (RD_KAFKA_OFFSET_TAIL_BASE - (CNT)) +#define RD_KAFKA_OFFSET_TAIL(CNT) (RD_KAFKA_OFFSET_TAIL_BASE - (CNT)) /** * @brief Start consuming messages for topic \p rkt and \p partition @@ -3507,8 +3541,9 @@ void rd_kafka_queue_yield (rd_kafka_queue_t *rkqu); * Use `rd_kafka_errno2err()` to convert sytem \c errno to `rd_kafka_resp_err_t` */ RD_EXPORT -int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, - int64_t offset); +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset); /** * @brief Same as rd_kafka_consume_start() but re-routes incoming messages to @@ -3525,8 +3560,10 @@ int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, * be combined for the same topic and partition. */ RD_EXPORT -int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, - int64_t offset, rd_kafka_queue_t *rkqu); +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset, + rd_kafka_queue_t *rkqu); /** * @brief Stop consuming messages for topic \p rkt and \p partition, purging @@ -3570,10 +3607,10 @@ int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition); * @deprecated Use rd_kafka_seek_partitions(). */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *rkt, - int32_t partition, - int64_t offset, - int timeout_ms); +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset, + int timeout_ms); @@ -3607,9 +3644,9 @@ rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *rkt, * @returns NULL on success or an error object on failure. */ RD_EXPORT rd_kafka_error_t * -rd_kafka_seek_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions, - int timeout_ms); +rd_kafka_seek_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions, + int timeout_ms); /** @@ -3637,8 +3674,8 @@ rd_kafka_seek_partitions (rd_kafka_t *rk, * passing message to application. */ RD_EXPORT -rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, - int timeout_ms); +rd_kafka_message_t * +rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms); @@ -3668,10 +3705,11 @@ rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, * passing message to application. */ RD_EXPORT -ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size); +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, + int32_t partition, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size); @@ -3709,11 +3747,11 @@ ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, * poll/queue based alternatives. */ RD_EXPORT -int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, + int32_t partition, int timeout_ms, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *commit_opaque), + void (*consume_cb)(rd_kafka_message_t *rkmessage, + void *commit_opaque), void *commit_opaque); @@ -3744,9 +3782,9 @@ rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, */ RD_EXPORT ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size); + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size); /** * @brief Consume multiple messages from queue with callback @@ -3758,19 +3796,17 @@ ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, * poll/queue based alternatives. */ RD_EXPORT -int rd_kafka_consume_callback_queue (rd_kafka_queue_t *rkqu, - int timeout_ms, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *commit_opaque), - void *commit_opaque); +int rd_kafka_consume_callback_queue( + rd_kafka_queue_t *rkqu, + int timeout_ms, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), + void *commit_opaque); /**@}*/ - /** * @name Simple Consumer API (legacy): Topic+partition offset store. * @{ @@ -3793,8 +3829,8 @@ int rd_kafka_consume_callback_queue (rd_kafka_queue_t *rkqu, * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_offset_store (rd_kafka_topic_t *rkt, - int32_t partition, int64_t offset); +rd_kafka_resp_err_t +rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset); /** @@ -3818,13 +3854,12 @@ rd_kafka_resp_err_t rd_kafka_offset_store (rd_kafka_topic_t *rkt, * is true. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_offsets_store (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *offsets); +rd_kafka_offsets_store(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *offsets); /**@}*/ - /** * @name KafkaConsumer (C) * @{ @@ -3874,15 +3909,15 @@ rd_kafka_offsets_store (rd_kafka_t *rk, * RD_KAFKA_RESP_ERR__FATAL if the consumer has raised a fatal error. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_subscribe (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *topics); +rd_kafka_subscribe(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *topics); /** * @brief Unsubscribe from the current subscription set. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_unsubscribe (rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk); /** @@ -3895,8 +3930,7 @@ rd_kafka_resp_err_t rd_kafka_unsubscribe (rd_kafka_t *rk); * rd_kafka_topic_partition_list_destroy on the returned list. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_subscription (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t **topics); +rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics); @@ -3930,7 +3964,7 @@ rd_kafka_subscription (rd_kafka_t *rk, * @sa rd_kafka_message_t */ RD_EXPORT -rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, int timeout_ms); +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms); /** * @brief Close down the KafkaConsumer. @@ -3950,7 +3984,7 @@ rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, int timeout_ms); * */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk); /** @@ -3971,9 +4005,8 @@ rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk); * rd_kafka_error_destroy(). */ RD_EXPORT rd_kafka_error_t * -rd_kafka_incremental_assign (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t - *partitions); +rd_kafka_incremental_assign(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions); /** @@ -3993,10 +4026,9 @@ rd_kafka_incremental_assign (rd_kafka_t *rk, * @remark The returned error object (if not NULL) must be destroyed with * rd_kafka_error_destroy(). */ -RD_EXPORT rd_kafka_error_t * -rd_kafka_incremental_unassign (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t - *partitions); +RD_EXPORT rd_kafka_error_t *rd_kafka_incremental_unassign( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions); /** @@ -4013,7 +4045,7 @@ rd_kafka_incremental_unassign (rd_kafka_t *rk, * @returns NULL on error, or one of "NONE", "EAGER", "COOPERATIVE" on success. */ RD_EXPORT -const char *rd_kafka_rebalance_protocol (rd_kafka_t *rk); +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk); /** @@ -4037,8 +4069,8 @@ const char *rd_kafka_rebalance_protocol (rd_kafka_t *rk); * a fatal error. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_assign (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *partitions); +rd_kafka_assign(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions); /** * @brief Returns the current partition assignment as set by rd_kafka_assign() @@ -4057,8 +4089,8 @@ rd_kafka_assign (rd_kafka_t *rk, * since an application is free to assign any partitions. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_assignment (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t **partitions); +rd_kafka_assignment(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t **partitions); /** @@ -4077,8 +4109,7 @@ rd_kafka_assignment (rd_kafka_t *rk, * @returns Returns 1 if the current partition assignment is considered * lost, 0 otherwise. */ -RD_EXPORT int -rd_kafka_assignment_lost (rd_kafka_t *rk); +RD_EXPORT int rd_kafka_assignment_lost(rd_kafka_t *rk); /** @@ -4102,8 +4133,9 @@ rd_kafka_assignment_lost (rd_kafka_t *rk); * a fatal error. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_commit (rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, - int async); +rd_kafka_commit(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + int async); /** @@ -4113,8 +4145,9 @@ rd_kafka_commit (rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, * @sa rd_kafka_commit */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - int async); +rd_kafka_commit_message(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + int async); /** @@ -4140,14 +4173,14 @@ rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, * @sa rd_kafka_conf_set_offset_commit_cb() */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_commit_queue (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_queue_t *rkqu, - void (*cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *commit_opaque), - void *commit_opaque); +rd_kafka_commit_queue(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_queue_t *rkqu, + void (*cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *commit_opaque), + void *commit_opaque); /** @@ -4168,9 +4201,9 @@ rd_kafka_commit_queue (rd_kafka_t *rk, * Else returns an error code. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_committed (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions, - int timeout_ms); +rd_kafka_committed(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions, + int timeout_ms); @@ -4178,8 +4211,8 @@ rd_kafka_committed (rd_kafka_t *rk, * @brief Retrieve current positions (offsets) for topics+partitions. * * The \p offset field of each requested partition will be set to the offset - * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there was - * no previous message. + * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there + * was no previous message. * * @remark In this context the last consumed message is the offset consumed * by the current librdkafka instance and, in case of rebalancing, not @@ -4191,9 +4224,7 @@ rd_kafka_committed (rd_kafka_t *rk, * Else returns an error code. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_position (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); - +rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions); @@ -4209,7 +4240,7 @@ rd_kafka_position (rd_kafka_t *rk, * @sa rd_kafka_send_offsets_to_transaction() */ RD_EXPORT rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata (rd_kafka_t *rk); +rd_kafka_consumer_group_metadata(rd_kafka_t *rk); /** @@ -4222,7 +4253,7 @@ rd_kafka_consumer_group_metadata (rd_kafka_t *rk); * rd_kafka_consumer_group_metadata_destroy(). */ RD_EXPORT rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata_new (const char *group_id); +rd_kafka_consumer_group_metadata_new(const char *group_id); /** @@ -4238,11 +4269,10 @@ rd_kafka_consumer_group_metadata_new (const char *group_id); * rd_kafka_consumer_group_metadata_destroy(). */ RD_EXPORT rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata_new_with_genid (const char *group_id, - int32_t generation_id, - const char *member_id, - const char - *group_instance_id); +rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, + int32_t generation_id, + const char *member_id, + const char *group_instance_id); /** @@ -4250,7 +4280,7 @@ rd_kafka_consumer_group_metadata_new_with_genid (const char *group_id, * rd_kafka_consumer_group_metadata(). */ RD_EXPORT void -rd_kafka_consumer_group_metadata_destroy (rd_kafka_consumer_group_metadata_t *); +rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *); /** @@ -4273,10 +4303,10 @@ rd_kafka_consumer_group_metadata_destroy (rd_kafka_consumer_group_metadata_t *); * * @sa rd_kafka_consumer_group_metadata_read() */ -RD_EXPORT rd_kafka_error_t * -rd_kafka_consumer_group_metadata_write ( - const rd_kafka_consumer_group_metadata_t *cgmd, - void **bufferp, size_t *sizep); +RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_write( + const rd_kafka_consumer_group_metadata_t *cgmd, + void **bufferp, + size_t *sizep); /** * @brief Reads serialized consumer group metadata and returns a @@ -4298,10 +4328,10 @@ rd_kafka_consumer_group_metadata_write ( * * @sa rd_kafka_consumer_group_metadata_write() */ -RD_EXPORT rd_kafka_error_t * -rd_kafka_consumer_group_metadata_read ( - rd_kafka_consumer_group_metadata_t **cgmdp, - const void *buffer, size_t size); +RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_read( + rd_kafka_consumer_group_metadata_t **cgmdp, + const void *buffer, + size_t size); /**@}*/ @@ -4318,21 +4348,27 @@ rd_kafka_consumer_group_metadata_read ( /** * @brief Producer message flags */ -#define RD_KAFKA_MSG_F_FREE 0x1 /**< Delegate freeing of payload to rdkafka. */ -#define RD_KAFKA_MSG_F_COPY 0x2 /**< rdkafka will make a copy of the payload. */ -#define RD_KAFKA_MSG_F_BLOCK 0x4 /**< Block produce*() on message queue full. - * WARNING: If a delivery report callback - * is used the application MUST - * call rd_kafka_poll() (or equiv.) - * to make sure delivered messages - * are drained from the internal - * delivery report queue. - * Failure to do so will result - * in indefinately blocking on - * the produce() call when the - * message queue is full. */ -#define RD_KAFKA_MSG_F_PARTITION 0x8 /**< produce_batch() will honor - * per-message partition. */ +#define RD_KAFKA_MSG_F_FREE \ + 0x1 /**< Delegate freeing of payload to rdkafka. \ + */ +#define RD_KAFKA_MSG_F_COPY \ + 0x2 /**< rdkafka will make a copy of the payload. \ + */ +#define RD_KAFKA_MSG_F_BLOCK \ + 0x4 /**< Block produce*() on message queue full. \ + * WARNING: If a delivery report callback \ + * is used the application MUST \ + * call rd_kafka_poll() (or equiv.) \ + * to make sure delivered messages \ + * are drained from the internal \ + * delivery report queue. \ + * Failure to do so will result \ + * in indefinately blocking on \ + * the produce() call when the \ + * message queue is full. */ +#define RD_KAFKA_MSG_F_PARTITION \ + 0x8 /**< produce_batch() will honor \ + * per-message partition. */ @@ -4373,13 +4409,11 @@ rd_kafka_consumer_group_metadata_read ( * RD_KAFKA_MSG_F_BLOCK - block \p produce*() call if * \p queue.buffering.max.messages or * \p queue.buffering.max.kbytes are exceeded. - * Messages are considered in-queue from the point they - * are accepted by produce() until their corresponding - * delivery report callback/event returns. - * It is thus a requirement to call - * rd_kafka_poll() (or equiv.) from a separate - * thread when F_BLOCK is used. - * See WARNING on \c RD_KAFKA_MSG_F_BLOCK above. + * Messages are considered in-queue from the point + * they are accepted by produce() until their corresponding delivery report + * callback/event returns. It is thus a requirement to call rd_kafka_poll() (or + * equiv.) from a separate thread when F_BLOCK is used. See WARNING on \c + * RD_KAFKA_MSG_F_BLOCK above. * * RD_KAFKA_MSG_F_FREE - rdkafka will free(3) \p payload when it is done * with it. @@ -4439,11 +4473,14 @@ rd_kafka_consumer_group_metadata_read ( * @sa Use rd_kafka_errno2err() to convert `errno` to rdkafka error code. */ RD_EXPORT -int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t keylen, - void *msg_opaque); +int rd_kafka_produce(rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t keylen, + void *msg_opaque); /** @@ -4460,7 +4497,7 @@ int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, * @sa rd_kafka_produce, rd_kafka_produceva, RD_KAFKA_V_END */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...); +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...); /** @@ -4475,9 +4512,8 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...); * @sa rd_kafka_produce, rd_kafka_producev, RD_KAFKA_V_END */ RD_EXPORT -rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, - const rd_kafka_vu_t *vus, - size_t cnt); +rd_kafka_error_t * +rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt); /** @@ -4510,10 +4546,11 @@ rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, * the provided \p rkmessages. */ RD_EXPORT -int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, - int msgflags, - rd_kafka_message_t *rkmessages, int message_cnt); - +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + rd_kafka_message_t *rkmessages, + int message_cnt); @@ -4542,7 +4579,7 @@ int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, * @sa rd_kafka_outq_len() */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms); +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms); @@ -4577,7 +4614,7 @@ rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms); * client instance. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_purge (rd_kafka_t *rk, int purge_flags); +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags); /** @@ -4609,43 +4646,43 @@ rd_kafka_resp_err_t rd_kafka_purge (rd_kafka_t *rk, int purge_flags); /** -* @name Metadata API -* @{ -* -* -*/ + * @name Metadata API + * @{ + * + * + */ /** * @brief Broker information */ typedef struct rd_kafka_metadata_broker { - int32_t id; /**< Broker Id */ - char *host; /**< Broker hostname */ - int port; /**< Broker listening port */ + int32_t id; /**< Broker Id */ + char *host; /**< Broker hostname */ + int port; /**< Broker listening port */ } rd_kafka_metadata_broker_t; /** * @brief Partition information */ typedef struct rd_kafka_metadata_partition { - int32_t id; /**< Partition Id */ - rd_kafka_resp_err_t err; /**< Partition error reported by broker */ - int32_t leader; /**< Leader broker */ - int replica_cnt; /**< Number of brokers in \p replicas */ - int32_t *replicas; /**< Replica brokers */ - int isr_cnt; /**< Number of ISR brokers in \p isrs */ - int32_t *isrs; /**< In-Sync-Replica brokers */ + int32_t id; /**< Partition Id */ + rd_kafka_resp_err_t err; /**< Partition error reported by broker */ + int32_t leader; /**< Leader broker */ + int replica_cnt; /**< Number of brokers in \p replicas */ + int32_t *replicas; /**< Replica brokers */ + int isr_cnt; /**< Number of ISR brokers in \p isrs */ + int32_t *isrs; /**< In-Sync-Replica brokers */ } rd_kafka_metadata_partition_t; /** * @brief Topic information */ typedef struct rd_kafka_metadata_topic { - char *topic; /**< Topic name */ - int partition_cnt; /**< Number of partitions in \p partitions*/ + char *topic; /**< Topic name */ + int partition_cnt; /**< Number of partitions in \p partitions*/ struct rd_kafka_metadata_partition *partitions; /**< Partitions */ - rd_kafka_resp_err_t err; /**< Topic error reported by broker */ + rd_kafka_resp_err_t err; /**< Topic error reported by broker */ } rd_kafka_metadata_topic_t; @@ -4653,14 +4690,14 @@ typedef struct rd_kafka_metadata_topic { * @brief Metadata container */ typedef struct rd_kafka_metadata { - int broker_cnt; /**< Number of brokers in \p brokers */ - struct rd_kafka_metadata_broker *brokers; /**< Brokers */ + int broker_cnt; /**< Number of brokers in \p brokers */ + struct rd_kafka_metadata_broker *brokers; /**< Brokers */ - int topic_cnt; /**< Number of topics in \p topics */ - struct rd_kafka_metadata_topic *topics; /**< Topics */ + int topic_cnt; /**< Number of topics in \p topics */ + struct rd_kafka_metadata_topic *topics; /**< Topics */ - int32_t orig_broker_id; /**< Broker originating this metadata */ - char *orig_broker_name; /**< Name of originating broker */ + int32_t orig_broker_id; /**< Broker originating this metadata */ + char *orig_broker_name; /**< Name of originating broker */ } rd_kafka_metadata_t; @@ -4686,10 +4723,11 @@ typedef struct rd_kafka_metadata { */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_metadata (rd_kafka_t *rk, int all_topics, - rd_kafka_topic_t *only_rkt, - const struct rd_kafka_metadata **metadatap, - int timeout_ms); +rd_kafka_metadata(rd_kafka_t *rk, + int all_topics, + rd_kafka_topic_t *only_rkt, + const struct rd_kafka_metadata **metadatap, + int timeout_ms); /** * @brief Release metadata memory. @@ -4703,11 +4741,11 @@ void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata); /** -* @name Client group information -* @{ -* -* -*/ + * @name Client group information + * @{ + * + * + */ /** @@ -4723,10 +4761,10 @@ struct rd_kafka_group_member_info { char *client_host; /**< Client's hostname */ void *member_metadata; /**< Member metadata (binary), * format depends on \p protocol_type. */ - int member_metadata_size; /**< Member metadata size in bytes */ + int member_metadata_size; /**< Member metadata size in bytes */ void *member_assignment; /**< Member assignment (binary), * format depends on \p protocol_type. */ - int member_assignment_size; /**< Member assignment size in bytes */ + int member_assignment_size; /**< Member assignment size in bytes */ }; /** @@ -4740,7 +4778,7 @@ struct rd_kafka_group_info { char *protocol_type; /**< Group protocol type */ char *protocol; /**< Group protocol */ struct rd_kafka_group_member_info *members; /**< Group members */ - int member_cnt; /**< Group member count */ + int member_cnt; /**< Group member count */ }; /** @@ -4749,8 +4787,8 @@ struct rd_kafka_group_info { * @sa rd_kafka_group_list_destroy() to release list memory. */ struct rd_kafka_group_list { - struct rd_kafka_group_info *groups; /**< Groups */ - int group_cnt; /**< Group count */ + struct rd_kafka_group_info *groups; /**< Groups */ + int group_cnt; /**< Group count */ }; @@ -4783,15 +4821,16 @@ struct rd_kafka_group_list { */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_list_groups (rd_kafka_t *rk, const char *group, - const struct rd_kafka_group_list **grplistp, - int timeout_ms); +rd_kafka_list_groups(rd_kafka_t *rk, + const char *group, + const struct rd_kafka_group_list **grplistp, + int timeout_ms); /** * @brief Release list memory */ RD_EXPORT -void rd_kafka_group_list_destroy (const struct rd_kafka_group_list *grplist); +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist); /**@}*/ @@ -4842,7 +4881,6 @@ int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist); - /** * @brief Set logger function. * @@ -4855,10 +4893,12 @@ int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist); * * @remark \p rk may be passed as NULL in the callback. */ -RD_EXPORT RD_DEPRECATED -void rd_kafka_set_logger(rd_kafka_t *rk, - void (*func) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf)); +RD_EXPORT RD_DEPRECATED void +rd_kafka_set_logger(rd_kafka_t *rk, + void (*func)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf)); /** @@ -4878,8 +4918,10 @@ void rd_kafka_set_log_level(rd_kafka_t *rk, int level); * @brief Builtin (default) log sink: print to stderr */ RD_EXPORT -void rd_kafka_log_print(const rd_kafka_t *rk, int level, - const char *fac, const char *buf); +void rd_kafka_log_print(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf); /** @@ -4888,8 +4930,10 @@ void rd_kafka_log_print(const rd_kafka_t *rk, int level, * with syslog support. */ RD_EXPORT -void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, - const char *fac, const char *buf); +void rd_kafka_log_syslog(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf); /** @@ -4915,7 +4959,7 @@ void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, * @sa rd_kafka_flush() */ RD_EXPORT -int rd_kafka_outq_len(rd_kafka_t *rk); +int rd_kafka_outq_len(rd_kafka_t *rk); @@ -4971,14 +5015,13 @@ int rd_kafka_wait_destroyed(int timeout_ms); * @returns the number of failures, or 0 if all tests passed. */ RD_EXPORT -int rd_kafka_unittest (void); +int rd_kafka_unittest(void); /**@}*/ - /** * @name Experimental APIs * @{ @@ -4992,7 +5035,7 @@ int rd_kafka_unittest (void); * main queue with rd_kafka_poll_set_consumer(). */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk); /**@}*/ @@ -5011,27 +5054,30 @@ rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk); * @brief Event types */ typedef int rd_kafka_event_type_t; -#define RD_KAFKA_EVENT_NONE 0x0 /**< Unset value */ -#define RD_KAFKA_EVENT_DR 0x1 /**< Producer Delivery report batch */ -#define RD_KAFKA_EVENT_FETCH 0x2 /**< Fetched message (consumer) */ -#define RD_KAFKA_EVENT_LOG 0x4 /**< Log message */ -#define RD_KAFKA_EVENT_ERROR 0x8 /**< Error */ -#define RD_KAFKA_EVENT_REBALANCE 0x10 /**< Group rebalance (consumer) */ -#define RD_KAFKA_EVENT_OFFSET_COMMIT 0x20 /**< Offset commit result */ -#define RD_KAFKA_EVENT_STATS 0x40 /**< Stats */ -#define RD_KAFKA_EVENT_CREATETOPICS_RESULT 100 /**< CreateTopics_result_t */ -#define RD_KAFKA_EVENT_DELETETOPICS_RESULT 101 /**< DeleteTopics_result_t */ -#define RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT 102 /**< CreatePartitions_result_t */ +#define RD_KAFKA_EVENT_NONE 0x0 /**< Unset value */ +#define RD_KAFKA_EVENT_DR 0x1 /**< Producer Delivery report batch */ +#define RD_KAFKA_EVENT_FETCH 0x2 /**< Fetched message (consumer) */ +#define RD_KAFKA_EVENT_LOG 0x4 /**< Log message */ +#define RD_KAFKA_EVENT_ERROR 0x8 /**< Error */ +#define RD_KAFKA_EVENT_REBALANCE 0x10 /**< Group rebalance (consumer) */ +#define RD_KAFKA_EVENT_OFFSET_COMMIT 0x20 /**< Offset commit result */ +#define RD_KAFKA_EVENT_STATS 0x40 /**< Stats */ +#define RD_KAFKA_EVENT_CREATETOPICS_RESULT 100 /**< CreateTopics_result_t */ +#define RD_KAFKA_EVENT_DELETETOPICS_RESULT 101 /**< DeleteTopics_result_t */ +#define RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT \ + 102 /**< CreatePartitions_result_t */ #define RD_KAFKA_EVENT_ALTERCONFIGS_RESULT 103 /**< AlterConfigs_result_t */ -#define RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT 104 /**< DescribeConfigs_result_t */ +#define RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT \ + 104 /**< DescribeConfigs_result_t */ #define RD_KAFKA_EVENT_DELETERECORDS_RESULT 105 /**< DeleteRecords_result_t */ -#define RD_KAFKA_EVENT_DELETEGROUPS_RESULT 106 /**< DeleteGroups_result_t */ +#define RD_KAFKA_EVENT_DELETEGROUPS_RESULT 106 /**< DeleteGroups_result_t */ /** DeleteConsumerGroupOffsets_result_t */ #define RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT 107 -#define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH 0x100 /**< SASL/OAUTHBEARER - token needs to be - refreshed */ -#define RD_KAFKA_EVENT_BACKGROUND 0x200 /**< Enable background thread. */ +#define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH \ + 0x100 /**< SASL/OAUTHBEARER \ + token needs to be \ + refreshed */ +#define RD_KAFKA_EVENT_BACKGROUND 0x200 /**< Enable background thread. */ /** @@ -5041,7 +5087,7 @@ typedef int rd_kafka_event_type_t; * RD_KAFKA_EVENT_NONE is returned. */ RD_EXPORT -rd_kafka_event_type_t rd_kafka_event_type (const rd_kafka_event_t *rkev); +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev); /** * @returns the event type's name for the given event. @@ -5050,7 +5096,7 @@ rd_kafka_event_type_t rd_kafka_event_type (const rd_kafka_event_t *rkev); * the name for RD_KAFKA_EVENT_NONE is returned. */ RD_EXPORT -const char *rd_kafka_event_name (const rd_kafka_event_t *rkev); +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev); /** @@ -5063,7 +5109,7 @@ const char *rd_kafka_event_name (const rd_kafka_event_t *rkev); * no action is performed. */ RD_EXPORT -void rd_kafka_event_destroy (rd_kafka_event_t *rkev); +void rd_kafka_event_destroy(rd_kafka_event_t *rkev); /** @@ -5082,7 +5128,7 @@ void rd_kafka_event_destroy (rd_kafka_event_t *rkev); * from this function prior to passing message to application. */ RD_EXPORT -const rd_kafka_message_t *rd_kafka_event_message_next (rd_kafka_event_t *rkev); +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev); /** @@ -5099,9 +5145,9 @@ const rd_kafka_message_t *rd_kafka_event_message_next (rd_kafka_event_t *rkev); * from this function prior to passing message to application. */ RD_EXPORT -size_t rd_kafka_event_message_array (rd_kafka_event_t *rkev, - const rd_kafka_message_t **rkmessages, - size_t size); +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, + const rd_kafka_message_t **rkmessages, + size_t size); /** @@ -5112,7 +5158,7 @@ size_t rd_kafka_event_message_array (rd_kafka_event_t *rkev, * - RD_KAFKA_EVENT_DR (>=1 message(s)) */ RD_EXPORT -size_t rd_kafka_event_message_count (rd_kafka_event_t *rkev); +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev); /** @@ -5127,7 +5173,7 @@ size_t rd_kafka_event_message_count (rd_kafka_event_t *rkev); * - RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: value of sasl.oauthbearer.config */ RD_EXPORT -const char *rd_kafka_event_config_string (rd_kafka_event_t *rkev); +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev); /** @@ -5139,7 +5185,7 @@ const char *rd_kafka_event_config_string (rd_kafka_event_t *rkev); * - all */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_event_error (rd_kafka_event_t *rkev); +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev); /** @@ -5151,7 +5197,7 @@ rd_kafka_resp_err_t rd_kafka_event_error (rd_kafka_event_t *rkev); * - all */ RD_EXPORT -const char *rd_kafka_event_error_string (rd_kafka_event_t *rkev); +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev); /** @@ -5163,7 +5209,7 @@ const char *rd_kafka_event_error_string (rd_kafka_event_t *rkev); * @sa rd_kafka_fatal_error() */ RD_EXPORT -int rd_kafka_event_error_is_fatal (rd_kafka_event_t *rkev); +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev); /** @@ -5182,7 +5228,7 @@ int rd_kafka_event_error_is_fatal (rd_kafka_event_t *rkev); * - RD_KAFKA_EVENT_DELETERECORDS_RESULT */ RD_EXPORT -void *rd_kafka_event_opaque (rd_kafka_event_t *rkev); +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev); /** @@ -5194,8 +5240,10 @@ void *rd_kafka_event_opaque (rd_kafka_event_t *rkev); * @returns 0 on success or -1 if unsupported event type. */ RD_EXPORT -int rd_kafka_event_log (rd_kafka_event_t *rkev, - const char **fac, const char **str, int *level); +int rd_kafka_event_log(rd_kafka_event_t *rkev, + const char **fac, + const char **str, + int *level); /** @@ -5210,8 +5258,9 @@ int rd_kafka_event_log (rd_kafka_event_t *rkev, * @returns 0 on success or -1 if unsupported event type. */ RD_EXPORT -int rd_kafka_event_debug_contexts (rd_kafka_event_t *rkev, - char *dst, size_t dstsize); +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, + char *dst, + size_t dstsize); /** @@ -5222,37 +5271,40 @@ int rd_kafka_event_debug_contexts (rd_kafka_event_t *rkev, * * @returns stats json string. * - * @remark the returned string will be freed automatically along with the event object + * @remark the returned string will be freed automatically along with the event + * object * */ RD_EXPORT -const char *rd_kafka_event_stats (rd_kafka_event_t *rkev); +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev); /** * @returns the topic partition list from the event. * - * @remark The list MUST NOT be freed with rd_kafka_topic_partition_list_destroy() + * @remark The list MUST NOT be freed with + * rd_kafka_topic_partition_list_destroy() * * Event types: * - RD_KAFKA_EVENT_REBALANCE * - RD_KAFKA_EVENT_OFFSET_COMMIT */ RD_EXPORT rd_kafka_topic_partition_list_t * -rd_kafka_event_topic_partition_list (rd_kafka_event_t *rkev); +rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev); /** - * @returns a newly allocated topic_partition container, if applicable for the event type, - * else NULL. + * @returns a newly allocated topic_partition container, if applicable for the + * event type, else NULL. * - * @remark The returned pointer MUST be freed with rd_kafka_topic_partition_destroy(). + * @remark The returned pointer MUST be freed with + * rd_kafka_topic_partition_destroy(). * * Event types: * RD_KAFKA_EVENT_ERROR (for partition level errors) */ RD_EXPORT rd_kafka_topic_partition_t * -rd_kafka_event_topic_partition (rd_kafka_event_t *rkev); +rd_kafka_event_topic_partition(rd_kafka_event_t *rkev); /*! CreateTopics result type */ @@ -5282,7 +5334,7 @@ typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t; * RD_KAFKA_EVENT_CREATETOPICS_RESULT */ RD_EXPORT const rd_kafka_CreateTopics_result_t * -rd_kafka_event_CreateTopics_result (rd_kafka_event_t *rkev); +rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev); /** * @brief Get DeleteTopics result. @@ -5294,7 +5346,7 @@ rd_kafka_event_CreateTopics_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_DELETETOPICS_RESULT */ RD_EXPORT const rd_kafka_DeleteTopics_result_t * -rd_kafka_event_DeleteTopics_result (rd_kafka_event_t *rkev); +rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev); /** * @brief Get CreatePartitions result. @@ -5306,7 +5358,7 @@ rd_kafka_event_DeleteTopics_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT */ RD_EXPORT const rd_kafka_CreatePartitions_result_t * -rd_kafka_event_CreatePartitions_result (rd_kafka_event_t *rkev); +rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev); /** * @brief Get AlterConfigs result. @@ -5318,7 +5370,7 @@ rd_kafka_event_CreatePartitions_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_ALTERCONFIGS_RESULT */ RD_EXPORT const rd_kafka_AlterConfigs_result_t * -rd_kafka_event_AlterConfigs_result (rd_kafka_event_t *rkev); +rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev); /** * @brief Get DescribeConfigs result. @@ -5330,7 +5382,7 @@ rd_kafka_event_AlterConfigs_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT */ RD_EXPORT const rd_kafka_DescribeConfigs_result_t * -rd_kafka_event_DescribeConfigs_result (rd_kafka_event_t *rkev); +rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev); /** * @returns the result of a DeleteRecords request, or NULL if event is of @@ -5340,7 +5392,7 @@ rd_kafka_event_DescribeConfigs_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_DELETERECORDS_RESULT */ RD_EXPORT const rd_kafka_DeleteRecords_result_t * -rd_kafka_event_DeleteRecords_result (rd_kafka_event_t *rkev); +rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev); /** * @brief Get DeleteGroups result. @@ -5352,7 +5404,7 @@ rd_kafka_event_DeleteRecords_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_DELETEGROUPS_RESULT */ RD_EXPORT const rd_kafka_DeleteGroups_result_t * -rd_kafka_event_DeleteGroups_result (rd_kafka_event_t *rkev); +rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev); /** * @brief Get DeleteConsumerGroupOffsets result. @@ -5364,7 +5416,7 @@ rd_kafka_event_DeleteGroups_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT */ RD_EXPORT const rd_kafka_DeleteConsumerGroupOffsets_result_t * -rd_kafka_event_DeleteConsumerGroupOffsets_result (rd_kafka_event_t *rkev); +rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev); /** * @brief Poll a queue for an event for max \p timeout_ms. @@ -5376,23 +5428,24 @@ rd_kafka_event_DeleteConsumerGroupOffsets_result (rd_kafka_event_t *rkev); * @sa rd_kafka_conf_set_background_event_cb() */ RD_EXPORT -rd_kafka_event_t *rd_kafka_queue_poll (rd_kafka_queue_t *rkqu, int timeout_ms); +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms); /** -* @brief Poll a queue for events served through callbacks for max \p timeout_ms. -* -* @returns the number of events served. -* -* @remark This API must only be used for queues with callbacks registered -* for all expected event types. E.g., not a message queue. -* -* @remark Also see rd_kafka_conf_set_background_event_cb() for triggering -* event callbacks from a librdkafka-managed background thread. -* -* @sa rd_kafka_conf_set_background_event_cb() -*/ + * @brief Poll a queue for events served through callbacks for max \p + * timeout_ms. + * + * @returns the number of events served. + * + * @remark This API must only be used for queues with callbacks registered + * for all expected event types. E.g., not a message queue. + * + * @remark Also see rd_kafka_conf_set_background_event_cb() for triggering + * event callbacks from a librdkafka-managed background thread. + * + * @sa rd_kafka_conf_set_background_event_cb() + */ RD_EXPORT -int rd_kafka_queue_poll_callback (rd_kafka_queue_t *rkqu, int timeout_ms); +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms); /**@}*/ @@ -5437,10 +5490,11 @@ int rd_kafka_queue_poll_callback (rd_kafka_queue_t *rkqu, int timeout_ms); * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. */ -typedef rd_kafka_resp_err_t -(rd_kafka_plugin_f_conf_init_t) (rd_kafka_conf_t *conf, - void **plug_opaquep, - char *errstr, size_t errstr_size); +typedef rd_kafka_resp_err_t(rd_kafka_plugin_f_conf_init_t)( + rd_kafka_conf_t *conf, + void **plug_opaquep, + char *errstr, + size_t errstr_size); /**@}*/ @@ -5521,11 +5575,13 @@ typedef rd_kafka_resp_err_t * interceptor in the chain, finally ending up at the built-in * configuration handler. */ -typedef rd_kafka_conf_res_t -(rd_kafka_interceptor_f_on_conf_set_t) (rd_kafka_conf_t *conf, - const char *name, const char *val, - char *errstr, size_t errstr_size, - void *ic_opaque); +typedef rd_kafka_conf_res_t(rd_kafka_interceptor_f_on_conf_set_t)( + rd_kafka_conf_t *conf, + const char *name, + const char *val, + char *errstr, + size_t errstr_size, + void *ic_opaque); /** @@ -5544,12 +5600,12 @@ typedef rd_kafka_conf_res_t * @remark No on_conf_* interceptors are copied to the new configuration * object on rd_kafka_conf_dup(). */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_conf_dup_t) (rd_kafka_conf_t *new_conf, - const rd_kafka_conf_t *old_conf, - size_t filter_cnt, - const char **filter, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_dup_t)( + rd_kafka_conf_t *new_conf, + const rd_kafka_conf_t *old_conf, + size_t filter_cnt, + const char **filter, + void *ic_opaque); /** @@ -5558,8 +5614,8 @@ typedef rd_kafka_resp_err_t * * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_conf_destroy_t) (void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_destroy_t)( + void *ic_opaque); /** @@ -5579,10 +5635,12 @@ typedef rd_kafka_resp_err_t * other rk-specific APIs than rd_kafka_interceptor_add..(). * */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_new_t) (rd_kafka_t *rk, const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_new_t)( + rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size); /** @@ -5592,9 +5650,8 @@ typedef rd_kafka_resp_err_t * @param rk The client instance. * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_destroy_t) (rd_kafka_t *rk, void *ic_opaque); - +typedef rd_kafka_resp_err_t( + rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque); @@ -5618,10 +5675,10 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_send_t) (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_send_t)( + rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque); /** * @brief on_acknowledgement() is called to inform interceptors that a message @@ -5645,10 +5702,10 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_acknowledgement_t) (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_acknowledgement_t)( + rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque); /** @@ -5667,10 +5724,10 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_consume_t) (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_consume_t)( + rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque); /** * @brief on_commit() is called on completed or failed offset commit. @@ -5692,11 +5749,11 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_commit_t) ( - rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_resp_err_t err, void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_commit_t)( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err, + void *ic_opaque); /** @@ -5720,17 +5777,16 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_request_sent_t) ( - rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_request_sent_t)( + rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + void *ic_opaque); /** @@ -5758,19 +5814,18 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_response_received_t) ( - rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - int64_t rtt, - rd_kafka_resp_err_t err, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_response_received_t)( + rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque); /** @@ -5789,12 +5844,11 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_thread_start_t) ( - rd_kafka_t *rk, - rd_kafka_thread_type_t thread_type, - const char *thread_name, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_start_t)( + rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type, + const char *thread_name, + void *ic_opaque); /** @@ -5816,12 +5870,11 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_thread_exit_t) ( - rd_kafka_t *rk, - rd_kafka_thread_type_t thread_type, - const char *thread_name, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_exit_t)( + rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type, + const char *thread_name, + void *ic_opaque); @@ -5837,11 +5890,11 @@ typedef rd_kafka_resp_err_t * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_set ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, + void *ic_opaque); /** @@ -5856,11 +5909,11 @@ rd_kafka_conf_interceptor_add_on_conf_set ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_dup ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, + void *ic_opaque); /** * @brief Append an on_conf_destroy() interceptor. @@ -5875,11 +5928,11 @@ rd_kafka_conf_interceptor_add_on_conf_dup ( * @remark Multiple on_conf_destroy() interceptors are allowed to be added * to the same configuration object. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_destroy ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, + void *ic_opaque); /** @@ -5889,7 +5942,7 @@ rd_kafka_conf_interceptor_add_on_conf_destroy ( * @param ic_name Interceptor name, used in logging. * @param on_new Function pointer. * @param ic_opaque Opaque value that will be passed to the function. - * + * * @remark Since the on_new() interceptor is added to the configuration object * it may be copied by rd_kafka_conf_dup(). * An interceptor implementation must thus be able to handle @@ -5904,10 +5957,10 @@ rd_kafka_conf_interceptor_add_on_conf_destroy ( * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_new ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_new_t *on_new, - void *ic_opaque); +rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_new_t *on_new, + void *ic_opaque); @@ -5923,11 +5976,11 @@ rd_kafka_conf_interceptor_add_on_new ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_destroy ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_destroy_t *on_destroy, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_destroy_t *on_destroy, + void *ic_opaque); /** @@ -5943,10 +5996,10 @@ rd_kafka_interceptor_add_on_destroy ( * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_send ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_send_t *on_send, - void *ic_opaque); +rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_send_t *on_send, + void *ic_opaque); /** * @brief Append an on_acknowledgement() interceptor. @@ -5960,11 +6013,11 @@ rd_kafka_interceptor_add_on_send ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_acknowledgement ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, + void *ic_opaque); /** @@ -5979,11 +6032,11 @@ rd_kafka_interceptor_add_on_acknowledgement ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_consume ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_consume_t *on_consume, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_consume_t *on_consume, + void *ic_opaque); /** @@ -5998,11 +6051,11 @@ rd_kafka_interceptor_add_on_consume ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_commit ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_commit_t *on_commit, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_commit_t *on_commit, + void *ic_opaque); /** @@ -6017,11 +6070,11 @@ rd_kafka_interceptor_add_on_commit ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_request_sent ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, + void *ic_opaque); /** @@ -6036,11 +6089,11 @@ rd_kafka_interceptor_add_on_request_sent ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_response_received ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_response_received_t *on_response_received, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_response_received_t *on_response_received, + void *ic_opaque); /** @@ -6055,11 +6108,11 @@ rd_kafka_interceptor_add_on_response_received ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_thread_start ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, + void *ic_opaque); /** @@ -6074,11 +6127,11 @@ rd_kafka_interceptor_add_on_thread_start ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_thread_exit ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, + void *ic_opaque); @@ -6103,7 +6156,7 @@ rd_kafka_interceptor_add_on_thread_exit ( * @returns the error code for the given topic result. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_topic_result_error (const rd_kafka_topic_result_t *topicres); +rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres); /** * @returns the human readable error string for the given topic result, @@ -6112,7 +6165,7 @@ rd_kafka_topic_result_error (const rd_kafka_topic_result_t *topicres); * @remark lifetime of the returned string is the same as the \p topicres. */ RD_EXPORT const char * -rd_kafka_topic_result_error_string (const rd_kafka_topic_result_t *topicres); +rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres); /** * @returns the name of the topic for the given topic result. @@ -6120,7 +6173,7 @@ rd_kafka_topic_result_error_string (const rd_kafka_topic_result_t *topicres); * */ RD_EXPORT const char * -rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres); +rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres); /** * @brief Group result provides per-group operation result information. @@ -6132,7 +6185,7 @@ rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres); * @remark lifetime of the returned error is the same as the \p groupres. */ RD_EXPORT const rd_kafka_error_t * -rd_kafka_group_result_error (const rd_kafka_group_result_t *groupres); +rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres); /** * @returns the name of the group for the given group result. @@ -6140,7 +6193,7 @@ rd_kafka_group_result_error (const rd_kafka_group_result_t *groupres); * */ RD_EXPORT const char * -rd_kafka_group_result_name (const rd_kafka_group_result_t *groupres); +rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres); /** * @returns the partitions/offsets for the given group result, if applicable @@ -6148,7 +6201,7 @@ rd_kafka_group_result_name (const rd_kafka_group_result_t *groupres); * @remark lifetime of the returned list is the same as the \p groupres. */ RD_EXPORT const rd_kafka_topic_partition_list_t * -rd_kafka_group_result_partitions (const rd_kafka_group_result_t *groupres); +rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres); /**@}*/ @@ -6187,7 +6240,7 @@ rd_kafka_group_result_partitions (const rd_kafka_group_result_t *groupres); * Locally triggered errors: * - \c RD_KAFKA_RESP_ERR__TIMED_OUT - (Controller) broker connection did not * become available in the time allowed by AdminOption_set_request_timeout. - */ + */ /** @@ -6208,7 +6261,7 @@ typedef enum rd_kafka_admin_op_t { RD_KAFKA_ADMIN_OP_DELETEGROUPS, /**< DeleteGroups */ /** DeleteConsumerGroupOffsets */ RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, - RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */ + RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */ } rd_kafka_admin_op_t; /** @@ -6245,13 +6298,13 @@ typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t; * an unknown API op type. */ RD_EXPORT rd_kafka_AdminOptions_t * -rd_kafka_AdminOptions_new (rd_kafka_t *rk, rd_kafka_admin_op_t for_api); +rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api); /** * @brief Destroy a AdminOptions object. */ -RD_EXPORT void rd_kafka_AdminOptions_destroy (rd_kafka_AdminOptions_t *options); +RD_EXPORT void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options); /** @@ -6273,9 +6326,10 @@ RD_EXPORT void rd_kafka_AdminOptions_destroy (rd_kafka_AdminOptions_t *options); * @remark This option is valid for all Admin API requests. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_request_timeout (rd_kafka_AdminOptions_t *options, - int timeout_ms, - char *errstr, size_t errstr_size); +rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, + int timeout_ms, + char *errstr, + size_t errstr_size); /** @@ -6305,9 +6359,10 @@ rd_kafka_AdminOptions_set_request_timeout (rd_kafka_AdminOptions_t *options, * CreatePartitions, and DeleteRecords. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_operation_timeout (rd_kafka_AdminOptions_t *options, - int timeout_ms, - char *errstr, size_t errstr_size); +rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, + int timeout_ms, + char *errstr, + size_t errstr_size); /** @@ -6329,9 +6384,10 @@ rd_kafka_AdminOptions_set_operation_timeout (rd_kafka_AdminOptions_t *options, * CreatePartitions, AlterConfigs. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_validate_only (rd_kafka_AdminOptions_t *options, +rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, - char *errstr, size_t errstr_size); + char *errstr, + size_t errstr_size); /** @@ -6360,9 +6416,10 @@ rd_kafka_AdminOptions_set_validate_only (rd_kafka_AdminOptions_t *options, * does not know where to send. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_broker (rd_kafka_AdminOptions_t *options, - int32_t broker_id, - char *errstr, size_t errstr_size); +rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, + int32_t broker_id, + char *errstr, + size_t errstr_size); @@ -6371,11 +6428,8 @@ rd_kafka_AdminOptions_set_broker (rd_kafka_AdminOptions_t *options, * result event using rd_kafka_event_opaque() */ RD_EXPORT void -rd_kafka_AdminOptions_set_opaque (rd_kafka_AdminOptions_t *options, - void *ev_opaque); - - - +rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, + void *ev_opaque); @@ -6409,17 +6463,17 @@ typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t; * are invalid. * Use rd_kafka_NewTopic_destroy() to free object when done. */ -RD_EXPORT rd_kafka_NewTopic_t * -rd_kafka_NewTopic_new (const char *topic, int num_partitions, - int replication_factor, - char *errstr, size_t errstr_size); +RD_EXPORT rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, + int num_partitions, + int replication_factor, + char *errstr, + size_t errstr_size); /** * @brief Destroy and free a NewTopic object previously created with * rd_kafka_NewTopic_new() */ -RD_EXPORT void -rd_kafka_NewTopic_destroy (rd_kafka_NewTopic_t *new_topic); +RD_EXPORT void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic); /** @@ -6427,9 +6481,8 @@ rd_kafka_NewTopic_destroy (rd_kafka_NewTopic_t *new_topic); * array (of \p new_topic_cnt elements). * The array itself is not freed. */ -RD_EXPORT void -rd_kafka_NewTopic_destroy_array (rd_kafka_NewTopic_t **new_topics, - size_t new_topic_cnt); +RD_EXPORT void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, + size_t new_topic_cnt); /** @@ -6454,11 +6507,12 @@ rd_kafka_NewTopic_destroy_array (rd_kafka_NewTopic_t **new_topics, * @sa rd_kafka_AdminOptions_set_validate_only() */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, - int32_t partition, - int32_t *broker_ids, - size_t broker_id_cnt, - char *errstr, size_t errstr_size); +rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, + int32_t partition, + int32_t *broker_ids, + size_t broker_id_cnt, + char *errstr, + size_t errstr_size); /** * @brief Set (broker-side) topic configuration name/value pair. @@ -6473,8 +6527,9 @@ rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, * @sa http://kafka.apache.org/documentation.html#topicconfigs */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_NewTopic_set_config (rd_kafka_NewTopic_t *new_topic, - const char *name, const char *value); +rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, + const char *name, + const char *value); /** @@ -6495,12 +6550,11 @@ rd_kafka_NewTopic_set_config (rd_kafka_NewTopic_t *new_topic, * @remark The result event type emitted on the supplied queue is of type * \c RD_KAFKA_EVENT_CREATETOPICS_RESULT */ -RD_EXPORT void -rd_kafka_CreateTopics (rd_kafka_t *rk, - rd_kafka_NewTopic_t **new_topics, - size_t new_topic_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +RD_EXPORT void rd_kafka_CreateTopics(rd_kafka_t *rk, + rd_kafka_NewTopic_t **new_topics, + size_t new_topic_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); /* @@ -6515,12 +6569,9 @@ rd_kafka_CreateTopics (rd_kafka_t *rk, * @param result Result to get topics from. * @param cntp Updated to the number of elements in the array. */ -RD_EXPORT const rd_kafka_topic_result_t ** -rd_kafka_CreateTopics_result_topics ( - const rd_kafka_CreateTopics_result_t *result, - size_t *cntp); - - +RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics( + const rd_kafka_CreateTopics_result_t *result, + size_t *cntp); @@ -6541,15 +6592,13 @@ typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t; * @returns a new allocated DeleteTopic object. * Use rd_kafka_DeleteTopic_destroy() to free object when done. */ -RD_EXPORT rd_kafka_DeleteTopic_t * -rd_kafka_DeleteTopic_new (const char *topic); +RD_EXPORT rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic); /** * @brief Destroy and free a DeleteTopic object previously created with * rd_kafka_DeleteTopic_new() */ -RD_EXPORT void -rd_kafka_DeleteTopic_destroy (rd_kafka_DeleteTopic_t *del_topic); +RD_EXPORT void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic); /** * @brief Helper function to destroy all DeleteTopic objects in @@ -6557,8 +6606,8 @@ rd_kafka_DeleteTopic_destroy (rd_kafka_DeleteTopic_t *del_topic); * The array itself is not freed. */ RD_EXPORT void -rd_kafka_DeleteTopic_destroy_array (rd_kafka_DeleteTopic_t **del_topics, - size_t del_topic_cnt); +rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, + size_t del_topic_cnt); /** * @brief Delete topics from cluster as specified by the \p topics @@ -6574,11 +6623,11 @@ rd_kafka_DeleteTopic_destroy_array (rd_kafka_DeleteTopic_t **del_topics, * \c RD_KAFKA_EVENT_DELETETOPICS_RESULT */ RD_EXPORT -void rd_kafka_DeleteTopics (rd_kafka_t *rk, - rd_kafka_DeleteTopic_t **del_topics, - size_t del_topic_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +void rd_kafka_DeleteTopics(rd_kafka_t *rk, + rd_kafka_DeleteTopic_t **del_topics, + size_t del_topic_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); @@ -6594,13 +6643,9 @@ void rd_kafka_DeleteTopics (rd_kafka_t *rk, * @param result Result to get topic results from. * @param cntp is updated to the number of elements in the array. */ -RD_EXPORT const rd_kafka_topic_result_t ** -rd_kafka_DeleteTopics_result_topics ( - const rd_kafka_DeleteTopics_result_t *result, - size_t *cntp); - - - +RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics( + const rd_kafka_DeleteTopics_result_t *result, + size_t *cntp); @@ -6629,15 +6674,17 @@ typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t; * Use rd_kafka_NewPartitions_destroy() to free object when done. */ RD_EXPORT rd_kafka_NewPartitions_t * -rd_kafka_NewPartitions_new (const char *topic, size_t new_total_cnt, - char *errstr, size_t errstr_size); +rd_kafka_NewPartitions_new(const char *topic, + size_t new_total_cnt, + char *errstr, + size_t errstr_size); /** * @brief Destroy and free a NewPartitions object previously created with * rd_kafka_NewPartitions_new() */ RD_EXPORT void -rd_kafka_NewPartitions_destroy (rd_kafka_NewPartitions_t *new_parts); +rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts); /** * @brief Helper function to destroy all NewPartitions objects in the @@ -6645,8 +6692,8 @@ rd_kafka_NewPartitions_destroy (rd_kafka_NewPartitions_t *new_parts); * The array itself is not freed. */ RD_EXPORT void -rd_kafka_NewPartitions_destroy_array (rd_kafka_NewPartitions_t **new_parts, - size_t new_parts_cnt); +rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, + size_t new_parts_cnt); /** * @brief Set the replica (broker id) assignment for \p new_partition_idx to the @@ -6670,13 +6717,13 @@ rd_kafka_NewPartitions_destroy_array (rd_kafka_NewPartitions_t **new_parts, * * @sa rd_kafka_AdminOptions_set_validate_only() */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *new_parts, - int32_t new_partition_idx, - int32_t *broker_ids, - size_t broker_id_cnt, - char *errstr, - size_t errstr_size); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment( + rd_kafka_NewPartitions_t *new_parts, + int32_t new_partition_idx, + int32_t *broker_ids, + size_t broker_id_cnt, + char *errstr, + size_t errstr_size); /** @@ -6697,12 +6744,11 @@ rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *new_par * @remark The result event type emitted on the supplied queue is of type * \c RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT */ -RD_EXPORT void -rd_kafka_CreatePartitions (rd_kafka_t *rk, - rd_kafka_NewPartitions_t **new_parts, - size_t new_parts_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +RD_EXPORT void rd_kafka_CreatePartitions(rd_kafka_t *rk, + rd_kafka_NewPartitions_t **new_parts, + size_t new_parts_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); @@ -6719,11 +6765,9 @@ rd_kafka_CreatePartitions (rd_kafka_t *rk, * @param cntp is updated to the number of elements in the array. */ RD_EXPORT const rd_kafka_topic_result_t ** -rd_kafka_CreatePartitions_result_topics ( - const rd_kafka_CreatePartitions_result_t *result, - size_t *cntp); - - +rd_kafka_CreatePartitions_result_topics( + const rd_kafka_CreatePartitions_result_t *result, + size_t *cntp); @@ -6768,7 +6812,7 @@ typedef enum rd_kafka_ConfigSource_t { * @returns a string representation of the \p confsource. */ RD_EXPORT const char * -rd_kafka_ConfigSource_name (rd_kafka_ConfigSource_t confsource); +rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource); /*! Apache Kafka configuration entry. */ @@ -6778,27 +6822,27 @@ typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t; * @returns the configuration property name */ RD_EXPORT const char * -rd_kafka_ConfigEntry_name (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry); /** * @returns the configuration value, may be NULL for sensitive or unset * properties. */ RD_EXPORT const char * -rd_kafka_ConfigEntry_value (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry); /** * @returns the config source. */ RD_EXPORT rd_kafka_ConfigSource_t -rd_kafka_ConfigEntry_source (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry); /** * @returns 1 if the config property is read-only on the broker, else 0. * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. */ RD_EXPORT int -rd_kafka_ConfigEntry_is_read_only (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry); /** * @returns 1 if the config property is set to its default value on the broker, @@ -6806,7 +6850,7 @@ rd_kafka_ConfigEntry_is_read_only (const rd_kafka_ConfigEntry_t *entry); * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. */ RD_EXPORT int -rd_kafka_ConfigEntry_is_default (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry); /** * @returns 1 if the config property contains sensitive information (such as @@ -6816,13 +6860,13 @@ rd_kafka_ConfigEntry_is_default (const rd_kafka_ConfigEntry_t *entry); * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. */ RD_EXPORT int -rd_kafka_ConfigEntry_is_sensitive (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry); /** * @returns 1 if this entry is a synonym, else 0. */ RD_EXPORT int -rd_kafka_ConfigEntry_is_synonym (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry); /** @@ -6836,19 +6880,18 @@ rd_kafka_ConfigEntry_is_synonym (const rd_kafka_ConfigEntry_t *entry); * otherwise returns NULL. */ RD_EXPORT const rd_kafka_ConfigEntry_t ** -rd_kafka_ConfigEntry_synonyms (const rd_kafka_ConfigEntry_t *entry, - size_t *cntp); - +rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, + size_t *cntp); /*! Apache Kafka resource types */ typedef enum rd_kafka_ResourceType_t { RD_KAFKA_RESOURCE_UNKNOWN = 0, /**< Unknown */ - RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */ - RD_KAFKA_RESOURCE_TOPIC = 2, /**< Topic */ - RD_KAFKA_RESOURCE_GROUP = 3, /**< Group */ - RD_KAFKA_RESOURCE_BROKER = 4, /**< Broker */ + RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */ + RD_KAFKA_RESOURCE_TOPIC = 2, /**< Topic */ + RD_KAFKA_RESOURCE_GROUP = 3, /**< Group */ + RD_KAFKA_RESOURCE_BROKER = 4, /**< Broker */ RD_KAFKA_RESOURCE__CNT, /**< Number of resource types defined */ } rd_kafka_ResourceType_t; @@ -6856,7 +6899,7 @@ typedef enum rd_kafka_ResourceType_t { * @returns a string representation of the \p restype */ RD_EXPORT const char * -rd_kafka_ResourceType_name (rd_kafka_ResourceType_t restype); +rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype); /*! Apache Kafka configuration resource. */ typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t; @@ -6871,15 +6914,15 @@ typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t; * @returns a newly allocated object */ RD_EXPORT rd_kafka_ConfigResource_t * -rd_kafka_ConfigResource_new (rd_kafka_ResourceType_t restype, - const char *resname); +rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, + const char *resname); /** * @brief Destroy and free a ConfigResource object previously created with * rd_kafka_ConfigResource_new() */ RD_EXPORT void -rd_kafka_ConfigResource_destroy (rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config); /** @@ -6888,8 +6931,8 @@ rd_kafka_ConfigResource_destroy (rd_kafka_ConfigResource_t *config); * The array itself is not freed. */ RD_EXPORT void -rd_kafka_ConfigResource_destroy_array (rd_kafka_ConfigResource_t **config, - size_t config_cnt); +rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, + size_t config_cnt); /** @@ -6906,8 +6949,9 @@ rd_kafka_ConfigResource_destroy_array (rd_kafka_ConfigResource_t **config, * or RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_ConfigResource_set_config (rd_kafka_ConfigResource_t *config, - const char *name, const char *value); +rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, + const char *name, + const char *value); /** @@ -6919,8 +6963,8 @@ rd_kafka_ConfigResource_set_config (rd_kafka_ConfigResource_t *config, * @param cntp is updated to the number of elements in the array. */ RD_EXPORT const rd_kafka_ConfigEntry_t ** -rd_kafka_ConfigResource_configs (const rd_kafka_ConfigResource_t *config, - size_t *cntp); +rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, + size_t *cntp); @@ -6928,26 +6972,26 @@ rd_kafka_ConfigResource_configs (const rd_kafka_ConfigResource_t *config, * @returns the ResourceType for \p config */ RD_EXPORT rd_kafka_ResourceType_t -rd_kafka_ConfigResource_type (const rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config); /** * @returns the name for \p config */ RD_EXPORT const char * -rd_kafka_ConfigResource_name (const rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config); /** * @returns the error for this resource from an AlterConfigs request */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_ConfigResource_error (const rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config); /** * @returns the error string for this resource from an AlterConfigs * request, or NULL if no error. */ RD_EXPORT const char * -rd_kafka_ConfigResource_error_string (const rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config); /* @@ -6977,11 +7021,11 @@ rd_kafka_ConfigResource_error_string (const rd_kafka_ConfigResource_t *config); * */ RD_EXPORT -void rd_kafka_AlterConfigs (rd_kafka_t *rk, - rd_kafka_ConfigResource_t **configs, - size_t config_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +void rd_kafka_AlterConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); /* @@ -7003,12 +7047,9 @@ void rd_kafka_AlterConfigs (rd_kafka_t *rk, * @returns an array of ConfigResource elements, or NULL if not available. */ RD_EXPORT const rd_kafka_ConfigResource_t ** -rd_kafka_AlterConfigs_result_resources ( - const rd_kafka_AlterConfigs_result_t *result, - size_t *cntp); - - - +rd_kafka_AlterConfigs_result_resources( + const rd_kafka_AlterConfigs_result_t *result, + size_t *cntp); @@ -7043,12 +7084,11 @@ rd_kafka_AlterConfigs_result_resources ( * in the resource. */ RD_EXPORT -void rd_kafka_DescribeConfigs (rd_kafka_t *rk, - rd_kafka_ConfigResource_t **configs, - size_t config_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); - +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); @@ -7065,9 +7105,9 @@ void rd_kafka_DescribeConfigs (rd_kafka_t *rk, * @param cntp is updated to the number of elements in the array. */ RD_EXPORT const rd_kafka_ConfigResource_t ** -rd_kafka_DescribeConfigs_result_resources ( - const rd_kafka_DescribeConfigs_result_t *result, - size_t *cntp); +rd_kafka_DescribeConfigs_result_resources( + const rd_kafka_DescribeConfigs_result_t *result, + size_t *cntp); /* @@ -7095,16 +7135,15 @@ typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t; * @returns a new allocated DeleteRecords object. * Use rd_kafka_DeleteRecords_destroy() to free object when done. */ -RD_EXPORT rd_kafka_DeleteRecords_t * -rd_kafka_DeleteRecords_new (const rd_kafka_topic_partition_list_t * - before_offsets); +RD_EXPORT rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new( + const rd_kafka_topic_partition_list_t *before_offsets); /** * @brief Destroy and free a DeleteRecords object previously created with * rd_kafka_DeleteRecords_new() */ RD_EXPORT void -rd_kafka_DeleteRecords_destroy (rd_kafka_DeleteRecords_t *del_records); +rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records); /** * @brief Helper function to destroy all DeleteRecords objects in @@ -7112,8 +7151,8 @@ rd_kafka_DeleteRecords_destroy (rd_kafka_DeleteRecords_t *del_records); * The array itself is not freed. */ RD_EXPORT void -rd_kafka_DeleteRecords_destroy_array (rd_kafka_DeleteRecords_t **del_records, - size_t del_record_cnt); +rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, + size_t del_record_cnt); /** * @brief Delete records (messages) in topic partitions older than the @@ -7136,12 +7175,11 @@ rd_kafka_DeleteRecords_destroy_array (rd_kafka_DeleteRecords_t **del_records, * @remark The result event type emitted on the supplied queue is of type * \c RD_KAFKA_EVENT_DELETERECORDS_RESULT */ -RD_EXPORT void -rd_kafka_DeleteRecords (rd_kafka_t *rk, - rd_kafka_DeleteRecords_t **del_records, - size_t del_record_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +RD_EXPORT void rd_kafka_DeleteRecords(rd_kafka_t *rk, + rd_kafka_DeleteRecords_t **del_records, + size_t del_record_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); /* @@ -7158,7 +7196,7 @@ rd_kafka_DeleteRecords (rd_kafka_t *rk, * The returned object's life-time is the same as the \p result object. */ RD_EXPORT const rd_kafka_topic_partition_list_t * -rd_kafka_DeleteRecords_result_offsets ( +rd_kafka_DeleteRecords_result_offsets( const rd_kafka_DeleteRecords_result_t *result); /* @@ -7179,15 +7217,13 @@ typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t; * @returns a new allocated DeleteGroup object. * Use rd_kafka_DeleteGroup_destroy() to free object when done. */ -RD_EXPORT rd_kafka_DeleteGroup_t * -rd_kafka_DeleteGroup_new (const char *group); +RD_EXPORT rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group); /** * @brief Destroy and free a DeleteGroup object previously created with * rd_kafka_DeleteGroup_new() */ -RD_EXPORT void -rd_kafka_DeleteGroup_destroy (rd_kafka_DeleteGroup_t *del_group); +RD_EXPORT void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group); /** * @brief Helper function to destroy all DeleteGroup objects in @@ -7195,8 +7231,8 @@ rd_kafka_DeleteGroup_destroy (rd_kafka_DeleteGroup_t *del_group); * The array itself is not freed. */ RD_EXPORT void -rd_kafka_DeleteGroup_destroy_array (rd_kafka_DeleteGroup_t **del_groups, - size_t del_group_cnt); +rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt); /** * @brief Delete groups from cluster as specified by the \p del_groups @@ -7212,11 +7248,11 @@ rd_kafka_DeleteGroup_destroy_array (rd_kafka_DeleteGroup_t **del_groups, * \c RD_KAFKA_EVENT_DELETEGROUPS_RESULT */ RD_EXPORT -void rd_kafka_DeleteGroups (rd_kafka_t *rk, - rd_kafka_DeleteGroup_t **del_groups, - size_t del_group_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +void rd_kafka_DeleteGroups(rd_kafka_t *rk, + rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); @@ -7232,10 +7268,9 @@ void rd_kafka_DeleteGroups (rd_kafka_t *rk, * @param result Result to get group results from. * @param cntp is updated to the number of elements in the array. */ -RD_EXPORT const rd_kafka_group_result_t ** -rd_kafka_DeleteGroups_result_groups ( - const rd_kafka_DeleteGroups_result_t *result, - size_t *cntp); +RD_EXPORT const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups( + const rd_kafka_DeleteGroups_result_t *result, + size_t *cntp); /* @@ -7246,7 +7281,7 @@ rd_kafka_DeleteGroups_result_groups ( /*! Represents consumer group committed offsets to be deleted. */ typedef struct rd_kafka_DeleteConsumerGroupOffsets_s -rd_kafka_DeleteConsumerGroupOffsets_t; + rd_kafka_DeleteConsumerGroupOffsets_t; /** * @brief Create a new DeleteConsumerGroupOffsets object. @@ -7261,27 +7296,25 @@ rd_kafka_DeleteConsumerGroupOffsets_t; * object when done. */ RD_EXPORT rd_kafka_DeleteConsumerGroupOffsets_t * -rd_kafka_DeleteConsumerGroupOffsets_new (const char *group, - const rd_kafka_topic_partition_list_t - *partitions); +rd_kafka_DeleteConsumerGroupOffsets_new( + const char *group, + const rd_kafka_topic_partition_list_t *partitions); /** * @brief Destroy and free a DeleteConsumerGroupOffsets object previously * created with rd_kafka_DeleteConsumerGroupOffsets_new() */ -RD_EXPORT void -rd_kafka_DeleteConsumerGroupOffsets_destroy ( - rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets); +RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy( + rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets); /** * @brief Helper function to destroy all DeleteConsumerGroupOffsets objects in * the \p del_grpoffsets array (of \p del_grpoffsets_cnt elements). * The array itself is not freed. */ -RD_EXPORT void -rd_kafka_DeleteConsumerGroupOffsets_destroy_array ( - rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, - size_t del_grpoffset_cnt); +RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy_array( + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffset_cnt); /** * @brief Delete committed offsets for a set of partitions in a conusmer @@ -7302,12 +7335,12 @@ rd_kafka_DeleteConsumerGroupOffsets_destroy_array ( * @remark The current implementation only supports one group per invocation. */ RD_EXPORT -void rd_kafka_DeleteConsumerGroupOffsets ( - rd_kafka_t *rk, - rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, - size_t del_grpoffsets_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +void rd_kafka_DeleteConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); @@ -7324,9 +7357,9 @@ void rd_kafka_DeleteConsumerGroupOffsets ( * @param cntp is updated to the number of elements in the array. */ RD_EXPORT const rd_kafka_group_result_t ** -rd_kafka_DeleteConsumerGroupOffsets_result_groups ( - const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, - size_t *cntp); +rd_kafka_DeleteConsumerGroupOffsets_result_groups( + const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, + size_t *cntp); /**@}*/ @@ -7384,12 +7417,14 @@ rd_kafka_DeleteConsumerGroupOffsets_result_groups ( */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token (rd_kafka_t *rk, - const char *token_value, - int64_t md_lifetime_ms, - const char *md_principal_name, - const char **extensions, size_t extension_size, - char *errstr, size_t errstr_size); +rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, + const char *token_value, + int64_t md_lifetime_ms, + const char *md_principal_name, + const char **extensions, + size_t extension_size, + char *errstr, + size_t errstr_size); /** * @brief SASL/OAUTHBEARER token refresh failure indicator. @@ -7412,8 +7447,8 @@ rd_kafka_oauthbearer_set_token (rd_kafka_t *rk, * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb */ RD_EXPORT -rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token_failure (rd_kafka_t *rk, const char *errstr); +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, + const char *errstr); /**@}*/ @@ -7596,8 +7631,7 @@ rd_kafka_oauthbearer_set_token_failure (rd_kafka_t *rk, const char *errstr); * rd_kafka_error_destroy(). */ RD_EXPORT -rd_kafka_error_t * -rd_kafka_init_transactions (rd_kafka_t *rk, int timeout_ms); +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms); @@ -7646,7 +7680,7 @@ rd_kafka_init_transactions (rd_kafka_t *rk, int timeout_ms); * rd_kafka_error_destroy(). */ RD_EXPORT -rd_kafka_error_t *rd_kafka_begin_transaction (rd_kafka_t *rk); +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk); /** @@ -7714,12 +7748,11 @@ rd_kafka_error_t *rd_kafka_begin_transaction (rd_kafka_t *rk); * rd_kafka_error_destroy(). */ RD_EXPORT -rd_kafka_error_t * -rd_kafka_send_offsets_to_transaction ( - rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - const rd_kafka_consumer_group_metadata_t *cgmetadata, - int timeout_ms); +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + const rd_kafka_consumer_group_metadata_t *cgmetadata, + int timeout_ms); /** @@ -7786,8 +7819,7 @@ rd_kafka_send_offsets_to_transaction ( * rd_kafka_error_destroy(). */ RD_EXPORT -rd_kafka_error_t * -rd_kafka_commit_transaction (rd_kafka_t *rk, int timeout_ms); +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms); /** @@ -7846,8 +7878,7 @@ rd_kafka_commit_transaction (rd_kafka_t *rk, int timeout_ms); * rd_kafka_error_destroy(). */ RD_EXPORT -rd_kafka_error_t * -rd_kafka_abort_transaction (rd_kafka_t *rk, int timeout_ms); +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms); /**@}*/ diff --git a/src/rdkafka_admin.c b/src/rdkafka_admin.c index a6591b77fd..9a63b1e1c9 100644 --- a/src/rdkafka_admin.c +++ b/src/rdkafka_admin.c @@ -37,12 +37,9 @@ /** @brief Descriptive strings for rko_u.admin_request.state */ static const char *rd_kafka_admin_state_desc[] = { - "initializing", - "waiting for broker", - "waiting for controller", - "waiting for fanouts", - "constructing request", - "waiting for response from broker", + "initializing", "waiting for broker", + "waiting for controller", "waiting for fanouts", + "constructing request", "waiting for response from broker", }; @@ -230,36 +227,35 @@ static const char *rd_kafka_admin_state_desc[] = { * @enum Admin request target broker. Must be negative values since the field * used is broker_id. */ -enum { - RD_KAFKA_ADMIN_TARGET_CONTROLLER = -1, /**< Cluster controller */ - RD_KAFKA_ADMIN_TARGET_COORDINATOR = -2, /**< (Group) Coordinator */ - RD_KAFKA_ADMIN_TARGET_FANOUT = -3, /**< This rko is a fanout and - * and has no target broker */ +enum { RD_KAFKA_ADMIN_TARGET_CONTROLLER = -1, /**< Cluster controller */ + RD_KAFKA_ADMIN_TARGET_COORDINATOR = -2, /**< (Group) Coordinator */ + RD_KAFKA_ADMIN_TARGET_FANOUT = -3, /**< This rko is a fanout and + * and has no target broker */ }; /** * @brief Admin op callback types */ -typedef rd_kafka_resp_err_t (rd_kafka_admin_Request_cb_t) ( - rd_kafka_broker_t *rkb, - const rd_list_t *configs /*(ConfigResource_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) - RD_WARN_UNUSED_RESULT; - -typedef rd_kafka_resp_err_t (rd_kafka_admin_Response_parse_cb_t) ( - rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, size_t errstr_size) - RD_WARN_UNUSED_RESULT; - -typedef void (rd_kafka_admin_fanout_PartialResponse_cb_t) ( - rd_kafka_op_t *rko_req, - const rd_kafka_op_t *rko_partial); +typedef rd_kafka_resp_err_t(rd_kafka_admin_Request_cb_t)( + rd_kafka_broker_t *rkb, + const rd_list_t *configs /*(ConfigResource_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) RD_WARN_UNUSED_RESULT; + +typedef rd_kafka_resp_err_t(rd_kafka_admin_Response_parse_cb_t)( + rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) RD_WARN_UNUSED_RESULT; + +typedef void(rd_kafka_admin_fanout_PartialResponse_cb_t)( + rd_kafka_op_t *rko_req, + const rd_kafka_op_t *rko_partial); typedef rd_list_copy_cb_t rd_kafka_admin_fanout_CopyResult_cb_t; @@ -288,28 +284,29 @@ struct rd_kafka_admin_fanout_worker_cbs { }; /* Forward declarations */ -static void rd_kafka_admin_common_worker_destroy (rd_kafka_t *rk, - rd_kafka_op_t *rko, - rd_bool_t do_destroy); -static void rd_kafka_AdminOptions_init (rd_kafka_t *rk, - rd_kafka_AdminOptions_t *options); +static void rd_kafka_admin_common_worker_destroy(rd_kafka_t *rk, + rd_kafka_op_t *rko, + rd_bool_t do_destroy); +static void rd_kafka_AdminOptions_init(rd_kafka_t *rk, + rd_kafka_AdminOptions_t *options); static rd_kafka_op_res_t -rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko); +rd_kafka_admin_worker(rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko); static rd_kafka_ConfigEntry_t * -rd_kafka_ConfigEntry_copy (const rd_kafka_ConfigEntry_t *src); -static void rd_kafka_ConfigEntry_free (void *ptr); -static void *rd_kafka_ConfigEntry_list_copy (const void *src, void *opaque); +rd_kafka_ConfigEntry_copy(const rd_kafka_ConfigEntry_t *src); +static void rd_kafka_ConfigEntry_free(void *ptr); +static void *rd_kafka_ConfigEntry_list_copy(const void *src, void *opaque); -static void rd_kafka_admin_handle_response (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque); +static void rd_kafka_admin_handle_response(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque); static rd_kafka_op_res_t -rd_kafka_admin_fanout_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko_fanout); +rd_kafka_admin_fanout_worker(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko_fanout); /** @@ -325,24 +322,23 @@ rd_kafka_admin_fanout_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, * @remark This moves the rko_req's admin_request.args list from \p rko_req * to the returned rko. The \p rko_req args will be emptied. */ -static rd_kafka_op_t *rd_kafka_admin_result_new (rd_kafka_op_t *rko_req) { +static rd_kafka_op_t *rd_kafka_admin_result_new(rd_kafka_op_t *rko_req) { rd_kafka_op_t *rko_result; rd_kafka_op_t *rko_fanout; if ((rko_fanout = rko_req->rko_u.admin_request.fanout_parent)) { /* If this is a fanned out request the rko_result needs to be * handled by the fanout worker rather than the application. */ - rko_result = rd_kafka_op_new_cb( - rko_req->rko_rk, - RD_KAFKA_OP_ADMIN_RESULT, - rd_kafka_admin_fanout_worker); + rko_result = rd_kafka_op_new_cb(rko_req->rko_rk, + RD_KAFKA_OP_ADMIN_RESULT, + rd_kafka_admin_fanout_worker); /* Transfer fanout pointer to result */ rko_result->rko_u.admin_result.fanout_parent = rko_fanout; - rko_req->rko_u.admin_request.fanout_parent = NULL; + rko_req->rko_u.admin_request.fanout_parent = NULL; /* Set event type based on original fanout ops reqtype, * e.g., ..OP_DELETERECORDS */ rko_result->rko_u.admin_result.reqtype = - rko_fanout->rko_u.admin_request.fanout.reqtype; + rko_fanout->rko_u.admin_request.fanout.reqtype; } else { rko_result = rd_kafka_op_new(RD_KAFKA_OP_ADMIN_RESULT); @@ -352,17 +348,16 @@ static rd_kafka_op_t *rd_kafka_admin_result_new (rd_kafka_op_t *rko_req) { * application request type. */ if (rko_req->rko_type == RD_KAFKA_OP_ADMIN_FANOUT) rko_result->rko_u.admin_result.reqtype = - rko_req->rko_u.admin_request.fanout.reqtype; + rko_req->rko_u.admin_request.fanout.reqtype; else rko_result->rko_u.admin_result.reqtype = - rko_req->rko_type; + rko_req->rko_type; } rko_result->rko_rk = rko_req->rko_rk; - rko_result->rko_u.admin_result.opaque = - rd_kafka_confval_get_ptr(&rko_req->rko_u.admin_request. - options.opaque); + rko_result->rko_u.admin_result.opaque = rd_kafka_confval_get_ptr( + &rko_req->rko_u.admin_request.options.opaque); /* Move request arguments (list) from request to result. * This is mainly so that partial_response() knows what arguments @@ -379,9 +374,10 @@ static rd_kafka_op_t *rd_kafka_admin_result_new (rd_kafka_op_t *rko_req) { /** * @brief Set error code and error string on admin_result op \p rko. */ -static void rd_kafka_admin_result_set_err0 (rd_kafka_op_t *rko, - rd_kafka_resp_err_t err, - const char *fmt, va_list ap) { +static void rd_kafka_admin_result_set_err0(rd_kafka_op_t *rko, + rd_kafka_resp_err_t err, + const char *fmt, + va_list ap) { char buf[512]; rd_vsnprintf(buf, sizeof(buf), fmt, ap); @@ -401,10 +397,11 @@ static void rd_kafka_admin_result_set_err0 (rd_kafka_op_t *rko, /** * @sa rd_kafka_admin_result_set_err0 */ -static RD_UNUSED RD_FORMAT(printf, 3, 4) - void rd_kafka_admin_result_set_err (rd_kafka_op_t *rko, - rd_kafka_resp_err_t err, - const char *fmt, ...) { +static RD_UNUSED RD_FORMAT(printf, 3, 4) void rd_kafka_admin_result_set_err( + rd_kafka_op_t *rko, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { va_list ap; va_start(ap, fmt); @@ -415,11 +412,9 @@ static RD_UNUSED RD_FORMAT(printf, 3, 4) /** * @brief Enqueue admin_result on application's queue. */ -static RD_INLINE -void rd_kafka_admin_result_enq (rd_kafka_op_t *rko_req, - rd_kafka_op_t *rko_result) { - rd_kafka_replyq_enq(&rko_req->rko_u.admin_request.replyq, - rko_result, +static RD_INLINE void rd_kafka_admin_result_enq(rd_kafka_op_t *rko_req, + rd_kafka_op_t *rko_result) { + rd_kafka_replyq_enq(&rko_req->rko_u.admin_request.replyq, rko_result, rko_req->rko_u.admin_request.replyq.version); } @@ -429,10 +424,12 @@ void rd_kafka_admin_result_enq (rd_kafka_op_t *rko_req, * @remark This function will NOT destroy the \p rko_req, so don't forget to * call rd_kafka_admin_common_worker_destroy() when done with the rko. */ -static RD_FORMAT(printf, 3, 4) - void rd_kafka_admin_result_fail (rd_kafka_op_t *rko_req, - rd_kafka_resp_err_t err, - const char *fmt, ...) { +static RD_FORMAT(printf, + 3, + 4) void rd_kafka_admin_result_fail(rd_kafka_op_t *rko_req, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { va_list ap; rd_kafka_op_t *rko_result; @@ -459,12 +456,12 @@ static RD_FORMAT(printf, 3, 4) * @remark To be used as a callback for \c rd_kafka_coord_req */ static rd_kafka_resp_err_t -rd_kafka_admin_coord_request (rd_kafka_broker_t *rkb, - rd_kafka_op_t *rko_ignore, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { - rd_kafka_t *rk = rkb->rkb_rk; +rd_kafka_admin_coord_request(rd_kafka_broker_t *rkb, + rd_kafka_op_t *rko_ignore, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_enq_once_t *eonce = opaque; rd_kafka_op_t *rko; char errstr[512]; @@ -479,21 +476,16 @@ rd_kafka_admin_coord_request (rd_kafka_broker_t *rkb, rd_kafka_enq_once_add_source(eonce, "coordinator response"); err = rko->rko_u.admin_request.cbs->request( - rkb, - &rko->rko_u.admin_request.args, - &rko->rko_u.admin_request.options, - errstr, sizeof(errstr), - replyq, - rd_kafka_admin_handle_response, - eonce); + rkb, &rko->rko_u.admin_request.args, + &rko->rko_u.admin_request.options, errstr, sizeof(errstr), replyq, + rd_kafka_admin_handle_response, eonce); if (err) { rd_kafka_enq_once_del_source(eonce, "coordinator response"); rd_kafka_admin_result_fail( - rko, err, - "%s worker failed to send request: %s", - rd_kafka_op2str(rko->rko_type), errstr); + rko, err, "%s worker failed to send request: %s", + rd_kafka_op2str(rko->rko_type), errstr); rd_kafka_admin_common_worker_destroy(rk, rko, - rd_true/*destroy*/); + rd_true /*destroy*/); } return err; } @@ -503,33 +495,31 @@ rd_kafka_admin_coord_request (rd_kafka_broker_t *rkb, * @brief Return the topics list from a topic-related result object. */ static const rd_kafka_topic_result_t ** -rd_kafka_admin_result_ret_topics (const rd_kafka_op_t *rko, - size_t *cntp) { +rd_kafka_admin_result_ret_topics(const rd_kafka_op_t *rko, size_t *cntp) { rd_kafka_op_type_t reqtype = - rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; rd_assert(reqtype == RD_KAFKA_OP_CREATETOPICS || reqtype == RD_KAFKA_OP_DELETETOPICS || reqtype == RD_KAFKA_OP_CREATEPARTITIONS); *cntp = rd_list_cnt(&rko->rko_u.admin_result.results); - return (const rd_kafka_topic_result_t **)rko->rko_u.admin_result. - results.rl_elems; + return (const rd_kafka_topic_result_t **) + rko->rko_u.admin_result.results.rl_elems; } /** * @brief Return the ConfigResource list from a config-related result object. */ static const rd_kafka_ConfigResource_t ** -rd_kafka_admin_result_ret_resources (const rd_kafka_op_t *rko, - size_t *cntp) { +rd_kafka_admin_result_ret_resources(const rd_kafka_op_t *rko, size_t *cntp) { rd_kafka_op_type_t reqtype = - rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; rd_assert(reqtype == RD_KAFKA_OP_ALTERCONFIGS || reqtype == RD_KAFKA_OP_DESCRIBECONFIGS); *cntp = rd_list_cnt(&rko->rko_u.admin_result.results); - return (const rd_kafka_ConfigResource_t **)rko->rko_u.admin_result. - results.rl_elems; + return (const rd_kafka_ConfigResource_t **) + rko->rko_u.admin_result.results.rl_elems; } @@ -537,16 +527,15 @@ rd_kafka_admin_result_ret_resources (const rd_kafka_op_t *rko, * @brief Return the groups list from a group-related result object. */ static const rd_kafka_group_result_t ** -rd_kafka_admin_result_ret_groups (const rd_kafka_op_t *rko, - size_t *cntp) { +rd_kafka_admin_result_ret_groups(const rd_kafka_op_t *rko, size_t *cntp) { rd_kafka_op_type_t reqtype = - rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; rd_assert(reqtype == RD_KAFKA_OP_DELETEGROUPS || reqtype == RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS); *cntp = rd_list_cnt(&rko->rko_u.admin_result.results); - return (const rd_kafka_group_result_t **)rko->rko_u.admin_result. - results.rl_elems; + return (const rd_kafka_group_result_t **) + rko->rko_u.admin_result.results.rl_elems; } /** @@ -563,12 +552,12 @@ rd_kafka_admin_result_ret_groups (const rd_kafka_op_t *rko, * @locality application thread */ static rd_kafka_op_t * -rd_kafka_admin_request_op_new (rd_kafka_t *rk, - rd_kafka_op_type_t optype, - rd_kafka_event_type_t reply_event_type, - const struct rd_kafka_admin_worker_cbs *cbs, - const rd_kafka_AdminOptions_t *options, - rd_kafka_q_t *rkq) { +rd_kafka_admin_request_op_new(rd_kafka_t *rk, + rd_kafka_op_type_t optype, + rd_kafka_event_type_t reply_event_type, + const struct rd_kafka_admin_worker_cbs *cbs, + const rd_kafka_AdminOptions_t *options, + rd_kafka_q_t *rkq) { rd_kafka_op_t *rko; rd_assert(rk); @@ -593,14 +582,13 @@ rd_kafka_admin_request_op_new (rd_kafka_t *rk, /* Calculate absolute timeout */ rko->rko_u.admin_request.abs_timeout = - rd_timeout_init( - rd_kafka_confval_get_int(&rko->rko_u.admin_request. - options.request_timeout)); + rd_timeout_init(rd_kafka_confval_get_int( + &rko->rko_u.admin_request.options.request_timeout)); /* Setup enq-op-once, which is triggered by either timer code * or future wait-controller code. */ rko->rko_u.admin_request.eonce = - rd_kafka_enq_once_new(rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + rd_kafka_enq_once_new(rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); /* The timer itself must be started from the rdkafka main thread, * not here. */ @@ -616,15 +604,14 @@ rd_kafka_admin_request_op_new (rd_kafka_t *rk, /** * @returns the remaining request timeout in milliseconds. */ -static RD_INLINE int rd_kafka_admin_timeout_remains (rd_kafka_op_t *rko) { +static RD_INLINE int rd_kafka_admin_timeout_remains(rd_kafka_op_t *rko) { return rd_timeout_remains(rko->rko_u.admin_request.abs_timeout); } /** * @returns the remaining request timeout in microseconds. */ -static RD_INLINE rd_ts_t -rd_kafka_admin_timeout_remains_us (rd_kafka_op_t *rko) { +static RD_INLINE rd_ts_t rd_kafka_admin_timeout_remains_us(rd_kafka_op_t *rko) { return rd_timeout_remains_us(rko->rko_u.admin_request.abs_timeout); } @@ -632,8 +619,8 @@ rd_kafka_admin_timeout_remains_us (rd_kafka_op_t *rko) { /** * @brief Timer timeout callback for the admin rko's eonce object. */ -static void rd_kafka_admin_eonce_timeout_cb (rd_kafka_timers_t *rkts, - void *arg) { +static void rd_kafka_admin_eonce_timeout_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_enq_once_t *eonce = arg; rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR__TIMED_OUT, @@ -646,23 +633,22 @@ static void rd_kafka_admin_eonce_timeout_cb (rd_kafka_timers_t *rkts, * @brief Common worker destroy to be called in destroy: label * in worker. */ -static void rd_kafka_admin_common_worker_destroy (rd_kafka_t *rk, - rd_kafka_op_t *rko, - rd_bool_t do_destroy) { +static void rd_kafka_admin_common_worker_destroy(rd_kafka_t *rk, + rd_kafka_op_t *rko, + rd_bool_t do_destroy) { int timer_was_stopped; /* Free resources for this op. */ - timer_was_stopped = - rd_kafka_timer_stop(&rk->rk_timers, - &rko->rko_u.admin_request.tmr, rd_true); + timer_was_stopped = rd_kafka_timer_stop( + &rk->rk_timers, &rko->rko_u.admin_request.tmr, rd_true); if (rko->rko_u.admin_request.eonce) { /* Remove the stopped timer's eonce reference since its * callback will not have fired if we stopped the timer. */ if (timer_was_stopped) - rd_kafka_enq_once_del_source(rko->rko_u.admin_request. - eonce, "timeout timer"); + rd_kafka_enq_once_del_source( + rko->rko_u.admin_request.eonce, "timeout timer"); /* This is thread-safe to do even if there are outstanding * timers or wait-controller references to the eonce @@ -688,13 +674,12 @@ static void rd_kafka_admin_common_worker_destroy (rd_kafka_t *rk, * @returns the broker rkb with refcount increased, or NULL if not yet * available. */ -static rd_kafka_broker_t * -rd_kafka_admin_common_get_broker (rd_kafka_t *rk, - rd_kafka_op_t *rko, - int32_t broker_id) { +static rd_kafka_broker_t *rd_kafka_admin_common_get_broker(rd_kafka_t *rk, + rd_kafka_op_t *rko, + int32_t broker_id) { rd_kafka_broker_t *rkb; - rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up broker %"PRId32, + rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up broker %" PRId32, rd_kafka_op2str(rko->rko_type), broker_id); /* Since we're iterating over this broker_async() call @@ -702,8 +687,8 @@ rd_kafka_admin_common_get_broker (rd_kafka_t *rk, * we need to re-enable the eonce to be triggered again (which * is not necessary the first time we get here, but there * is no harm doing it then either). */ - rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, - rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, rko, + RD_KAFKA_REPLYQ(rk->rk_ops, 0)); /* Look up the broker asynchronously, if the broker * is not available the eonce is registered for broker @@ -713,14 +698,14 @@ rd_kafka_admin_common_get_broker (rd_kafka_t *rk, * again and hopefully get an rkb back, otherwise defer a new * async wait. Repeat until success or timeout. */ if (!(rkb = rd_kafka_broker_get_async( - rk, broker_id, RD_KAFKA_BROKER_STATE_UP, - rko->rko_u.admin_request.eonce))) { + rk, broker_id, RD_KAFKA_BROKER_STATE_UP, + rko->rko_u.admin_request.eonce))) { /* Broker not available, wait asynchronously * for broker metadata code to trigger eonce. */ return NULL; } - rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: broker %"PRId32" is %s", + rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: broker %" PRId32 " is %s", rd_kafka_op2str(rko->rko_type), broker_id, rkb->rkb_name); return rkb; @@ -737,8 +722,7 @@ rd_kafka_admin_common_get_broker (rd_kafka_t *rk, * available. */ static rd_kafka_broker_t * -rd_kafka_admin_common_get_controller (rd_kafka_t *rk, - rd_kafka_op_t *rko) { +rd_kafka_admin_common_get_controller(rd_kafka_t *rk, rd_kafka_op_t *rko) { rd_kafka_broker_t *rkb; rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up controller", @@ -749,8 +733,8 @@ rd_kafka_admin_common_get_controller (rd_kafka_t *rk, * we need to re-enable the eonce to be triggered again (which * is not necessary the first time we get here, but there * is no harm doing it then either). */ - rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, - rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, rko, + RD_KAFKA_REPLYQ(rk->rk_ops, 0)); /* Look up the controller asynchronously, if the controller * is not available the eonce is registered for broker @@ -760,8 +744,8 @@ rd_kafka_admin_common_get_controller (rd_kafka_t *rk, * again and hopefully get an rkb back, otherwise defer a new * async wait. Repeat until success or timeout. */ if (!(rkb = rd_kafka_broker_controller_async( - rk, RD_KAFKA_BROKER_STATE_UP, - rko->rko_u.admin_request.eonce))) { + rk, RD_KAFKA_BROKER_STATE_UP, + rko->rko_u.admin_request.eonce))) { /* Controller not available, wait asynchronously * for controller code to trigger eonce. */ return NULL; @@ -780,12 +764,12 @@ rd_kafka_admin_common_get_controller (rd_kafka_t *rk, * * @param opaque is the eonce from the worker protocol request call. */ -static void rd_kafka_admin_handle_response (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_admin_handle_response(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque) { rd_kafka_enq_once_t *eonce = opaque; rd_kafka_op_t *rko; @@ -796,40 +780,38 @@ static void rd_kafka_admin_handle_response (rd_kafka_t *rk, /* The operation timed out and the worker was * dismantled while we were waiting for broker response, * do nothing - everything has been cleaned up. */ - rd_kafka_dbg(rk, ADMIN, "ADMIN", - "Dropping outdated %sResponse with return code %s", - request ? - rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey): - "???", - rd_kafka_err2str(err)); + rd_kafka_dbg( + rk, ADMIN, "ADMIN", + "Dropping outdated %sResponse with return code %s", + request ? rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey) + : "???", + rd_kafka_err2str(err)); return; } /* Attach reply buffer to rko for parsing in the worker. */ rd_assert(!rko->rko_u.admin_request.reply_buf); rko->rko_u.admin_request.reply_buf = reply; - rko->rko_err = err; + rko->rko_err = err; if (rko->rko_op_cb(rk, NULL, rko) == RD_KAFKA_OP_RES_HANDLED) rd_kafka_op_destroy(rko); - } /** * @brief Generic handler for protocol responses, calls the admin ops' * Response_parse_cb and enqueues the result to the caller's queue. */ -static void rd_kafka_admin_response_parse (rd_kafka_op_t *rko) { +static void rd_kafka_admin_response_parse(rd_kafka_op_t *rko) { rd_kafka_resp_err_t err; rd_kafka_op_t *rko_result = NULL; char errstr[512]; if (rko->rko_err) { - rd_kafka_admin_result_fail( - rko, rko->rko_err, - "%s worker request failed: %s", - rd_kafka_op2str(rko->rko_type), - rd_kafka_err2str(rko->rko_err)); + rd_kafka_admin_result_fail(rko, rko->rko_err, + "%s worker request failed: %s", + rd_kafka_op2str(rko->rko_type), + rd_kafka_err2str(rko->rko_err)); return; } @@ -837,14 +819,12 @@ static void rd_kafka_admin_response_parse (rd_kafka_op_t *rko) { * Let callback parse response and provide result in rko_result * which is then enqueued on the reply queue. */ err = rko->rko_u.admin_request.cbs->parse( - rko, &rko_result, - rko->rko_u.admin_request.reply_buf, - errstr, sizeof(errstr)); + rko, &rko_result, rko->rko_u.admin_request.reply_buf, errstr, + sizeof(errstr)); if (err) { rd_kafka_admin_result_fail( - rko, err, - "%s worker failed to parse response: %s", - rd_kafka_op2str(rko->rko_type), errstr); + rko, err, "%s worker failed to parse response: %s", + rd_kafka_op2str(rko->rko_type), errstr); return; } @@ -857,47 +837,42 @@ static void rd_kafka_admin_response_parse (rd_kafka_op_t *rko) { /** * @brief Generic handler for coord_req() responses. */ -static void -rd_kafka_admin_coord_response_parse (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_admin_coord_response_parse(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { rd_kafka_op_t *rko_result; rd_kafka_enq_once_t *eonce = opaque; rd_kafka_op_t *rko; char errstr[512]; - rko = rd_kafka_enq_once_del_source_return(eonce, - "coordinator response"); + rko = + rd_kafka_enq_once_del_source_return(eonce, "coordinator response"); if (!rko) /* Admin request has timed out and been destroyed */ return; if (err) { rd_kafka_admin_result_fail( - rko, err, - "%s worker coordinator request failed: %s", - rd_kafka_op2str(rko->rko_type), - rd_kafka_err2str(err)); + rko, err, "%s worker coordinator request failed: %s", + rd_kafka_op2str(rko->rko_type), rd_kafka_err2str(err)); rd_kafka_admin_common_worker_destroy(rk, rko, - rd_true/*destroy*/); + rd_true /*destroy*/); return; } - err = rko->rko_u.admin_request.cbs->parse( - rko, &rko_result, rkbuf, - errstr, sizeof(errstr)); + err = rko->rko_u.admin_request.cbs->parse(rko, &rko_result, rkbuf, + errstr, sizeof(errstr)); if (err) { rd_kafka_admin_result_fail( - rko, err, - "%s worker failed to parse coordinator %sResponse: %s", - rd_kafka_op2str(rko->rko_type), - rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey), - errstr); + rko, err, + "%s worker failed to parse coordinator %sResponse: %s", + rd_kafka_op2str(rko->rko_type), + rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey), errstr); rd_kafka_admin_common_worker_destroy(rk, rko, - rd_true/*destroy*/); + rd_true /*destroy*/); return; } @@ -931,7 +906,7 @@ rd_kafka_admin_coord_response_parse (rd_kafka_t *rk, * @returns a hint to the op code whether the rko should be destroyed or not. */ static rd_kafka_op_res_t -rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { +rd_kafka_admin_worker(rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { const char *name = rd_kafka_op2str(rko->rko_type); rd_ts_t timeout_in; rd_kafka_broker_t *rkb = NULL; @@ -939,17 +914,17 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { char errstr[512]; /* ADMIN_FANOUT handled by fanout_worker() */ - rd_assert((rko->rko_type & ~ RD_KAFKA_OP_FLAGMASK) != + rd_assert((rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) != RD_KAFKA_OP_ADMIN_FANOUT); if (rd_kafka_terminating(rk)) { - rd_kafka_dbg(rk, ADMIN, name, - "%s worker called in state %s: " - "handle is terminating: %s", - name, - rd_kafka_admin_state_desc[rko->rko_u. - admin_request.state], - rd_kafka_err2str(rko->rko_err)); + rd_kafka_dbg( + rk, ADMIN, name, + "%s worker called in state %s: " + "handle is terminating: %s", + name, + rd_kafka_admin_state_desc[rko->rko_u.admin_request.state], + rd_kafka_err2str(rko->rko_err)); rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__DESTROY, "Handle is terminating: %s", rd_kafka_err2str(rko->rko_err)); @@ -962,9 +937,7 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { goto destroy; /* rko being destroyed (silent) */ } - rd_kafka_dbg(rk, ADMIN, name, - "%s worker called in state %s: %s", - name, + rd_kafka_dbg(rk, ADMIN, name, "%s worker called in state %s: %s", name, rd_kafka_admin_state_desc[rko->rko_u.admin_request.state], rd_kafka_err2str(rko->rko_err)); @@ -973,11 +946,9 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { /* Check for errors raised asynchronously (e.g., by timer) */ if (rko->rko_err) { rd_kafka_admin_result_fail( - rko, rko->rko_err, - "Failed while %s: %s", - rd_kafka_admin_state_desc[rko->rko_u. - admin_request.state], - rd_kafka_err2str(rko->rko_err)); + rko, rko->rko_err, "Failed while %s: %s", + rd_kafka_admin_state_desc[rko->rko_u.admin_request.state], + rd_kafka_err2str(rko->rko_err)); goto destroy; } @@ -985,18 +956,14 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { timeout_in = rd_kafka_admin_timeout_remains_us(rko); if (timeout_in <= 0) { rd_kafka_admin_result_fail( - rko, RD_KAFKA_RESP_ERR__TIMED_OUT, - "Timed out %s", - rd_kafka_admin_state_desc[rko->rko_u. - admin_request.state]); + rko, RD_KAFKA_RESP_ERR__TIMED_OUT, "Timed out %s", + rd_kafka_admin_state_desc[rko->rko_u.admin_request.state]); goto destroy; } - redo: - switch (rko->rko_u.admin_request.state) - { - case RD_KAFKA_ADMIN_STATE_INIT: - { +redo: + switch (rko->rko_u.admin_request.state) { + case RD_KAFKA_ADMIN_STATE_INIT: { int32_t broker_id; /* First call. */ @@ -1004,21 +971,20 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { /* Set up timeout timer. */ rd_kafka_enq_once_add_source(rko->rko_u.admin_request.eonce, "timeout timer"); - rd_kafka_timer_start_oneshot(&rk->rk_timers, - &rko->rko_u.admin_request.tmr, - rd_true, timeout_in, - rd_kafka_admin_eonce_timeout_cb, - rko->rko_u.admin_request.eonce); + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rko->rko_u.admin_request.tmr, rd_true, + timeout_in, rd_kafka_admin_eonce_timeout_cb, + rko->rko_u.admin_request.eonce); /* Use explicitly specified broker_id, if available. */ broker_id = (int32_t)rd_kafka_confval_get_int( - &rko->rko_u.admin_request.options.broker); + &rko->rko_u.admin_request.options.broker); if (broker_id != -1) { rd_kafka_dbg(rk, ADMIN, name, "%s using explicitly " - "set broker id %"PRId32 - " rather than %"PRId32, + "set broker id %" PRId32 + " rather than %" PRId32, name, broker_id, rko->rko_u.admin_request.broker_id); rko->rko_u.admin_request.broker_id = broker_id; @@ -1028,26 +994,24 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { } /* Resolve target broker(s) */ - switch (rko->rko_u.admin_request.broker_id) - { + switch (rko->rko_u.admin_request.broker_id) { case RD_KAFKA_ADMIN_TARGET_CONTROLLER: /* Controller */ rko->rko_u.admin_request.state = - RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER; - goto redo; /* Trigger next state immediately */ + RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER; + goto redo; /* Trigger next state immediately */ case RD_KAFKA_ADMIN_TARGET_COORDINATOR: /* Group (or other) coordinator */ rko->rko_u.admin_request.state = - RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE; - rd_kafka_enq_once_add_source(rko->rko_u.admin_request. - eonce, - "coordinator request"); + RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE; + rd_kafka_enq_once_add_source( + rko->rko_u.admin_request.eonce, + "coordinator request"); rd_kafka_coord_req(rk, rko->rko_u.admin_request.coordtype, rko->rko_u.admin_request.coordkey, - rd_kafka_admin_coord_request, - NULL, + rd_kafka_admin_coord_request, NULL, rd_kafka_admin_timeout_remains(rko), RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_admin_coord_response_parse, @@ -1066,8 +1030,8 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { /* Specific broker */ rd_assert(rko->rko_u.admin_request.broker_id >= 0); rko->rko_u.admin_request.state = - RD_KAFKA_ADMIN_STATE_WAIT_BROKER; - goto redo; /* Trigger next state immediately */ + RD_KAFKA_ADMIN_STATE_WAIT_BROKER; + goto redo; /* Trigger next state immediately */ } } @@ -1075,13 +1039,13 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { case RD_KAFKA_ADMIN_STATE_WAIT_BROKER: /* Broker lookup */ if (!(rkb = rd_kafka_admin_common_get_broker( - rk, rko, rko->rko_u.admin_request.broker_id))) { + rk, rko, rko->rko_u.admin_request.broker_id))) { /* Still waiting for broker to become available */ return RD_KAFKA_OP_RES_KEEP; } rko->rko_u.admin_request.state = - RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST; + RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST; goto redo; case RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER: @@ -1091,7 +1055,7 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { } rko->rko_u.admin_request.state = - RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST; + RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST; goto redo; case RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS: @@ -1115,26 +1079,24 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { /* Send request (async) */ err = rko->rko_u.admin_request.cbs->request( - rkb, - &rko->rko_u.admin_request.args, - &rko->rko_u.admin_request.options, - errstr, sizeof(errstr), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_admin_handle_response, - rko->rko_u.admin_request.eonce); + rkb, &rko->rko_u.admin_request.args, + &rko->rko_u.admin_request.options, errstr, sizeof(errstr), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_admin_handle_response, + rko->rko_u.admin_request.eonce); /* Loose broker refcount from get_broker(), get_controller() */ rd_kafka_broker_destroy(rkb); if (err) { rd_kafka_enq_once_del_source( - rko->rko_u.admin_request.eonce, "send"); + rko->rko_u.admin_request.eonce, "send"); rd_kafka_admin_result_fail(rko, err, "%s", errstr); goto destroy; } rko->rko_u.admin_request.state = - RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE; + RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE; /* Wait asynchronously for broker response, which will * trigger the eonce and worker to be called again. */ @@ -1148,11 +1110,10 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { return RD_KAFKA_OP_RES_KEEP; - destroy: +destroy: rd_kafka_admin_common_worker_destroy(rk, rko, - rd_false/*don't destroy*/); + rd_false /*don't destroy*/); return RD_KAFKA_OP_RES_HANDLED; /* trigger's op_destroy() */ - } @@ -1173,26 +1134,25 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { * @locality application thread */ static rd_kafka_op_t * -rd_kafka_admin_fanout_op_new (rd_kafka_t *rk, - rd_kafka_op_type_t req_type, - rd_kafka_event_type_t reply_event_type, - const struct rd_kafka_admin_fanout_worker_cbs - *cbs, - const rd_kafka_AdminOptions_t *options, - rd_kafka_q_t *rkq) { +rd_kafka_admin_fanout_op_new(rd_kafka_t *rk, + rd_kafka_op_type_t req_type, + rd_kafka_event_type_t reply_event_type, + const struct rd_kafka_admin_fanout_worker_cbs *cbs, + const rd_kafka_AdminOptions_t *options, + rd_kafka_q_t *rkq) { rd_kafka_op_t *rko; rd_assert(rk); rd_assert(rkq); rd_assert(cbs); - rko = rd_kafka_op_new(RD_KAFKA_OP_ADMIN_FANOUT); + rko = rd_kafka_op_new(RD_KAFKA_OP_ADMIN_FANOUT); rko->rko_rk = rk; rko->rko_u.admin_request.reply_event_type = reply_event_type; rko->rko_u.admin_request.fanout.cbs = - (struct rd_kafka_admin_fanout_worker_cbs *)cbs; + (struct rd_kafka_admin_fanout_worker_cbs *)cbs; /* Make a copy of the options */ if (options) @@ -1205,9 +1165,8 @@ rd_kafka_admin_fanout_op_new (rd_kafka_t *rk, /* Calculate absolute timeout */ rko->rko_u.admin_request.abs_timeout = - rd_timeout_init( - rd_kafka_confval_get_int(&rko->rko_u.admin_request. - options.request_timeout)); + rd_timeout_init(rd_kafka_confval_get_int( + &rko->rko_u.admin_request.options.request_timeout)); /* Set up replyq */ rd_kafka_set_replyq(&rko->rko_u.admin_request.replyq, rkq, 0); @@ -1238,12 +1197,12 @@ rd_kafka_admin_fanout_op_new (rd_kafka_t *rk, * * @returns a hint to the op code whether the rko should be destroyed or not. */ -static rd_kafka_op_res_t -rd_kafka_admin_fanout_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +static rd_kafka_op_res_t rd_kafka_admin_fanout_worker(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_op_t *rko_fanout = rko->rko_u.admin_result.fanout_parent; - const char *name = rd_kafka_op2str(rko_fanout->rko_u.admin_request. - fanout.reqtype); + const char *name = + rd_kafka_op2str(rko_fanout->rko_u.admin_request.fanout.reqtype); rd_kafka_op_t *rko_result; RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_ADMIN_RESULT); @@ -1258,8 +1217,7 @@ rd_kafka_admin_fanout_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_dbg(rk, ADMIN, name, "%s fanout worker called for fanned out op %s: " "handle is terminating: %s", - name, - rd_kafka_op2str(rko->rko_type), + name, rd_kafka_op2str(rko->rko_type), rd_kafka_err2str(rko_fanout->rko_err)); if (!rko->rko_err) rko->rko_err = RD_KAFKA_RESP_ERR__DESTROY; @@ -1268,14 +1226,13 @@ rd_kafka_admin_fanout_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_dbg(rk, ADMIN, name, "%s fanout worker called for %s with %d request(s) " "outstanding: %s", - name, - rd_kafka_op2str(rko->rko_type), + name, rd_kafka_op2str(rko->rko_type), rko_fanout->rko_u.admin_request.fanout.outstanding, rd_kafka_err2str(rko_fanout->rko_err)); /* Add partial response to rko_fanout's result list. */ - rko_fanout->rko_u.admin_request. - fanout.cbs->partial_response(rko_fanout, rko); + rko_fanout->rko_u.admin_request.fanout.cbs->partial_response(rko_fanout, + rko); if (rko_fanout->rko_u.admin_request.fanout.outstanding > 0) /* Wait for outstanding requests to finish */ @@ -1286,8 +1243,8 @@ rd_kafka_admin_fanout_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, &rko_fanout->rko_u.admin_request.fanout.results); rd_list_copy_to(&rko_result->rko_u.admin_result.results, &rko_fanout->rko_u.admin_request.fanout.results, - rko_fanout->rko_u.admin_request. - fanout.cbs->copy_result, NULL); + rko_fanout->rko_u.admin_request.fanout.cbs->copy_result, + NULL); /* Enqueue result on application queue, we're done. */ rd_kafka_replyq_enq(&rko_fanout->rko_u.admin_request.replyq, rko_result, @@ -1311,9 +1268,10 @@ rd_kafka_admin_fanout_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, */ rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_request_timeout (rd_kafka_AdminOptions_t *options, - int timeout_ms, - char *errstr, size_t errstr_size) { +rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, + int timeout_ms, + char *errstr, + size_t errstr_size) { return rd_kafka_confval_set_type(&options->request_timeout, RD_KAFKA_CONFVAL_INT, &timeout_ms, errstr, errstr_size); @@ -1321,9 +1279,10 @@ rd_kafka_AdminOptions_set_request_timeout (rd_kafka_AdminOptions_t *options, rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_operation_timeout (rd_kafka_AdminOptions_t *options, - int timeout_ms, - char *errstr, size_t errstr_size) { +rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, + int timeout_ms, + char *errstr, + size_t errstr_size) { return rd_kafka_confval_set_type(&options->operation_timeout, RD_KAFKA_CONFVAL_INT, &timeout_ms, errstr, errstr_size); @@ -1331,18 +1290,20 @@ rd_kafka_AdminOptions_set_operation_timeout (rd_kafka_AdminOptions_t *options, rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_validate_only (rd_kafka_AdminOptions_t *options, +rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, - char *errstr, size_t errstr_size) { + char *errstr, + size_t errstr_size) { return rd_kafka_confval_set_type(&options->validate_only, RD_KAFKA_CONFVAL_INT, &true_or_false, errstr, errstr_size); } rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_incremental (rd_kafka_AdminOptions_t *options, - int true_or_false, - char *errstr, size_t errstr_size) { +rd_kafka_AdminOptions_set_incremental(rd_kafka_AdminOptions_t *options, + int true_or_false, + char *errstr, + size_t errstr_size) { rd_snprintf(errstr, errstr_size, "Incremental updates currently not supported, see KIP-248"); return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; @@ -1353,32 +1314,30 @@ rd_kafka_AdminOptions_set_incremental (rd_kafka_AdminOptions_t *options, } rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_broker (rd_kafka_AdminOptions_t *options, - int32_t broker_id, - char *errstr, size_t errstr_size) { +rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, + int32_t broker_id, + char *errstr, + size_t errstr_size) { int ibroker_id = (int)broker_id; - return rd_kafka_confval_set_type(&options->broker, - RD_KAFKA_CONFVAL_INT, - &ibroker_id, - errstr, errstr_size); + return rd_kafka_confval_set_type(&options->broker, RD_KAFKA_CONFVAL_INT, + &ibroker_id, errstr, errstr_size); } -void -rd_kafka_AdminOptions_set_opaque (rd_kafka_AdminOptions_t *options, - void *opaque) { - rd_kafka_confval_set_type(&options->opaque, - RD_KAFKA_CONFVAL_PTR, opaque, NULL, 0); +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, + void *opaque) { + rd_kafka_confval_set_type(&options->opaque, RD_KAFKA_CONFVAL_PTR, + opaque, NULL, 0); } /** * @brief Initialize and set up defaults for AdminOptions */ -static void rd_kafka_AdminOptions_init (rd_kafka_t *rk, - rd_kafka_AdminOptions_t *options) { +static void rd_kafka_AdminOptions_init(rd_kafka_t *rk, + rd_kafka_AdminOptions_t *options) { rd_kafka_confval_init_int(&options->request_timeout, "request_timeout", - 0, 3600*1000, + 0, 3600 * 1000, rk->rk_conf.admin.request_timeout_ms); if (options->for_api == RD_KAFKA_ADMIN_OP_ANY || @@ -1387,8 +1346,7 @@ static void rd_kafka_AdminOptions_init (rd_kafka_t *rk, options->for_api == RD_KAFKA_ADMIN_OP_CREATEPARTITIONS || options->for_api == RD_KAFKA_ADMIN_OP_DELETERECORDS) rd_kafka_confval_init_int(&options->operation_timeout, - "operation_timeout", - -1, 3600*1000, + "operation_timeout", -1, 3600 * 1000, rk->rk_conf.admin.request_timeout_ms); else rd_kafka_confval_disable(&options->operation_timeout, @@ -1399,29 +1357,25 @@ static void rd_kafka_AdminOptions_init (rd_kafka_t *rk, options->for_api == RD_KAFKA_ADMIN_OP_CREATEPARTITIONS || options->for_api == RD_KAFKA_ADMIN_OP_ALTERCONFIGS) rd_kafka_confval_init_int(&options->validate_only, - "validate_only", - 0, 1, 0); + "validate_only", 0, 1, 0); else rd_kafka_confval_disable(&options->validate_only, "validate_only"); if (options->for_api == RD_KAFKA_ADMIN_OP_ANY || options->for_api == RD_KAFKA_ADMIN_OP_ALTERCONFIGS) - rd_kafka_confval_init_int(&options->incremental, - "incremental", + rd_kafka_confval_init_int(&options->incremental, "incremental", 0, 1, 0); else - rd_kafka_confval_disable(&options->incremental, - "incremental"); + rd_kafka_confval_disable(&options->incremental, "incremental"); - rd_kafka_confval_init_int(&options->broker, "broker", - 0, INT32_MAX, -1); + rd_kafka_confval_init_int(&options->broker, "broker", 0, INT32_MAX, -1); rd_kafka_confval_init_ptr(&options->opaque, "opaque"); } rd_kafka_AdminOptions_t * -rd_kafka_AdminOptions_new (rd_kafka_t *rk, rd_kafka_admin_op_t for_api) { +rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api) { rd_kafka_AdminOptions_t *options; if ((int)for_api < 0 || for_api >= RD_KAFKA_ADMIN_OP__CNT) @@ -1436,7 +1390,7 @@ rd_kafka_AdminOptions_new (rd_kafka_t *rk, rd_kafka_admin_op_t for_api) { return options; } -void rd_kafka_AdminOptions_destroy (rd_kafka_AdminOptions_t *options) { +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options) { rd_free(options); } @@ -1444,9 +1398,6 @@ void rd_kafka_AdminOptions_destroy (rd_kafka_AdminOptions_t *options) { - - - /** * @name CreateTopics * @{ @@ -1457,11 +1408,11 @@ void rd_kafka_AdminOptions_destroy (rd_kafka_AdminOptions_t *options) { -rd_kafka_NewTopic_t * -rd_kafka_NewTopic_new (const char *topic, - int num_partitions, - int replication_factor, - char *errstr, size_t errstr_size) { +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, + int num_partitions, + int replication_factor, + char *errstr, + size_t errstr_size) { rd_kafka_NewTopic_t *new_topic; if (!topic) { @@ -1470,7 +1421,8 @@ rd_kafka_NewTopic_new (const char *topic, } if (num_partitions < -1 || num_partitions > RD_KAFKAP_PARTITIONS_MAX) { - rd_snprintf(errstr, errstr_size, "num_partitions out of " + rd_snprintf(errstr, errstr_size, + "num_partitions out of " "expected range %d..%d or -1 for broker default", 1, RD_KAFKAP_PARTITIONS_MAX); return NULL; @@ -1484,29 +1436,28 @@ rd_kafka_NewTopic_new (const char *topic, return NULL; } - new_topic = rd_calloc(1, sizeof(*new_topic)); - new_topic->topic = rd_strdup(topic); - new_topic->num_partitions = num_partitions; + new_topic = rd_calloc(1, sizeof(*new_topic)); + new_topic->topic = rd_strdup(topic); + new_topic->num_partitions = num_partitions; new_topic->replication_factor = replication_factor; /* List of int32 lists */ rd_list_init(&new_topic->replicas, 0, rd_list_destroy_free); rd_list_prealloc_elems(&new_topic->replicas, 0, num_partitions == -1 ? 0 : num_partitions, - 0/*nozero*/); + 0 /*nozero*/); /* List of ConfigEntrys */ rd_list_init(&new_topic->config, 0, rd_kafka_ConfigEntry_free); return new_topic; - } /** * @brief Topic name comparator for NewTopic_t */ -static int rd_kafka_NewTopic_cmp (const void *_a, const void *_b) { +static int rd_kafka_NewTopic_cmp(const void *_a, const void *_b) { const rd_kafka_NewTopic_t *a = _a, *b = _b; return strcmp(a->topic, b->topic); } @@ -1517,7 +1468,7 @@ static int rd_kafka_NewTopic_cmp (const void *_a, const void *_b) { * @brief Allocate a new NewTopic and make a copy of \p src */ static rd_kafka_NewTopic_t * -rd_kafka_NewTopic_copy (const rd_kafka_NewTopic_t *src) { +rd_kafka_NewTopic_copy(const rd_kafka_NewTopic_t *src) { rd_kafka_NewTopic_t *dst; dst = rd_kafka_NewTopic_new(src->topic, src->num_partitions, @@ -1536,32 +1487,32 @@ rd_kafka_NewTopic_copy (const rd_kafka_NewTopic_t *src) { return dst; } -void rd_kafka_NewTopic_destroy (rd_kafka_NewTopic_t *new_topic) { +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic) { rd_list_destroy(&new_topic->replicas); rd_list_destroy(&new_topic->config); rd_free(new_topic->topic); rd_free(new_topic); } -static void rd_kafka_NewTopic_free (void *ptr) { +static void rd_kafka_NewTopic_free(void *ptr) { rd_kafka_NewTopic_destroy(ptr); } -void -rd_kafka_NewTopic_destroy_array (rd_kafka_NewTopic_t **new_topics, - size_t new_topic_cnt) { +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, + size_t new_topic_cnt) { size_t i; - for (i = 0 ; i < new_topic_cnt ; i++) + for (i = 0; i < new_topic_cnt; i++) rd_kafka_NewTopic_destroy(new_topics[i]); } rd_kafka_resp_err_t -rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, - int32_t partition, - int32_t *broker_ids, - size_t broker_id_cnt, - char *errstr, size_t errstr_size) { +rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, + int32_t partition, + int32_t *broker_ids, + size_t broker_id_cnt, + char *errstr, + size_t errstr_size) { rd_list_t *rl; int i; @@ -1582,7 +1533,7 @@ rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, rd_snprintf(errstr, errstr_size, "Partitions must be added in order, " "starting at 0: expecting partition %d, " - "not %"PRId32, + "not %" PRId32, rd_list_cnt(&new_topic->replicas), partition); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -1598,7 +1549,7 @@ rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, rl = rd_list_init_int32(rd_list_new(0, NULL), (int)broker_id_cnt); - for (i = 0 ; i < (int)broker_id_cnt ; i++) + for (i = 0; i < (int)broker_id_cnt; i++) rd_list_set_int32(rl, i, broker_ids[i]); rd_list_add(&new_topic->replicas, rl); @@ -1611,16 +1562,17 @@ rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, * @brief Generic constructor of ConfigEntry which is also added to \p rl */ static rd_kafka_resp_err_t -rd_kafka_admin_add_config0 (rd_list_t *rl, - const char *name, const char *value, - rd_kafka_AlterOperation_t operation) { +rd_kafka_admin_add_config0(rd_list_t *rl, + const char *name, + const char *value, + rd_kafka_AlterOperation_t operation) { rd_kafka_ConfigEntry_t *entry; if (!name) return RD_KAFKA_RESP_ERR__INVALID_ARG; - entry = rd_calloc(1, sizeof(*entry)); - entry->kv = rd_strtup_new(name, value); + entry = rd_calloc(1, sizeof(*entry)); + entry->kv = rd_strtup_new(name, value); entry->a.operation = operation; rd_list_add(rl, entry); @@ -1629,9 +1581,9 @@ rd_kafka_admin_add_config0 (rd_list_t *rl, } -rd_kafka_resp_err_t -rd_kafka_NewTopic_set_config (rd_kafka_NewTopic_t *new_topic, - const char *name, const char *value) { +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, + const char *name, + const char *value) { return rd_kafka_admin_add_config0(&new_topic->config, name, value, RD_KAFKA_ALTER_OP_ADD); } @@ -1642,14 +1594,15 @@ rd_kafka_NewTopic_set_config (rd_kafka_NewTopic_t *new_topic, * @brief Parse CreateTopicsResponse and create ADMIN_RESULT op. */ static rd_kafka_resp_err_t -rd_kafka_CreateTopicsResponse_parse (rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, size_t errstr_size) { +rd_kafka_CreateTopicsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { const int log_decode_errors = LOG_ERR; - rd_kafka_broker_t *rkb = reply->rkbuf_rkb; - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_op_t *rko_result = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_op_t *rko_result = NULL; int32_t topic_cnt; int i; @@ -1664,10 +1617,11 @@ rd_kafka_CreateTopicsResponse_parse (rd_kafka_op_t *rko_req, if (topic_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) rd_kafka_buf_parse_fail( - reply, - "Received %"PRId32" topics in response " - "when only %d were requested", topic_cnt, - rd_list_cnt(&rko_req->rko_u.admin_request.args)); + reply, + "Received %" PRId32 + " topics in response " + "when only %d were requested", + topic_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args)); rko_result = rd_kafka_admin_result_new(rko_req); @@ -1675,11 +1629,11 @@ rd_kafka_CreateTopicsResponse_parse (rd_kafka_op_t *rko_req, rd_list_init(&rko_result->rko_u.admin_result.results, topic_cnt, rd_kafka_topic_result_free); - for (i = 0 ; i < (int)topic_cnt ; i++) { + for (i = 0; i < (int)topic_cnt; i++) { rd_kafkap_str_t ktopic; int16_t error_code; rd_kafkap_str_t error_msg = RD_KAFKAP_STR_INITIALIZER; - char *this_errstr = NULL; + char *this_errstr = NULL; rd_kafka_topic_result_t *terr; rd_kafka_NewTopic_t skel; int orig_pos; @@ -1696,10 +1650,10 @@ rd_kafka_CreateTopicsResponse_parse (rd_kafka_op_t *rko_req, * we hide this error code from the application * since the topic creation is in fact in progress. */ if (error_code == RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT && - rd_kafka_confval_get_int(&rko_req->rko_u. - admin_request.options. - operation_timeout) <= 0) { - error_code = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_confval_get_int(&rko_req->rko_u.admin_request + .options.operation_timeout) <= + 0) { + error_code = RD_KAFKA_RESP_ERR_NO_ERROR; this_errstr = NULL; } @@ -1707,10 +1661,9 @@ rd_kafka_CreateTopicsResponse_parse (rd_kafka_op_t *rko_req, if (RD_KAFKAP_STR_IS_NULL(&error_msg) || RD_KAFKAP_STR_LEN(&error_msg) == 0) this_errstr = - (char *)rd_kafka_err2str(error_code); + (char *)rd_kafka_err2str(error_code); else RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg); - } terr = rd_kafka_topic_result_new(ktopic.str, @@ -1721,24 +1674,23 @@ rd_kafka_CreateTopicsResponse_parse (rd_kafka_op_t *rko_req, * in the same order as they were requested. The broker * does not maintain ordering unfortunately. */ skel.topic = terr->topic; - orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, + orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, &skel, rd_kafka_NewTopic_cmp); if (orig_pos == -1) { rd_kafka_topic_result_destroy(terr); rd_kafka_buf_parse_fail( - reply, - "Broker returned topic %.*s that was not " - "included in the original request", - RD_KAFKAP_STR_PR(&ktopic)); + reply, + "Broker returned topic %.*s that was not " + "included in the original request", + RD_KAFKAP_STR_PR(&ktopic)); } if (rd_list_elem(&rko_result->rko_u.admin_result.results, orig_pos) != NULL) { rd_kafka_topic_result_destroy(terr); rd_kafka_buf_parse_fail( - reply, - "Broker returned topic %.*s multiple times", - RD_KAFKAP_STR_PR(&ktopic)); + reply, "Broker returned topic %.*s multiple times", + RD_KAFKAP_STR_PR(&ktopic)); } rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos, @@ -1749,7 +1701,7 @@ rd_kafka_CreateTopicsResponse_parse (rd_kafka_op_t *rko_req, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: if (rko_result) rd_kafka_op_destroy(rko_result); @@ -1761,29 +1713,28 @@ rd_kafka_CreateTopicsResponse_parse (rd_kafka_op_t *rko_req, } -void rd_kafka_CreateTopics (rd_kafka_t *rk, - rd_kafka_NewTopic_t **new_topics, - size_t new_topic_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { +void rd_kafka_CreateTopics(rd_kafka_t *rk, + rd_kafka_NewTopic_t **new_topics, + size_t new_topic_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { rd_kafka_op_t *rko; size_t i; static const struct rd_kafka_admin_worker_cbs cbs = { - rd_kafka_CreateTopicsRequest, - rd_kafka_CreateTopicsResponse_parse, + rd_kafka_CreateTopicsRequest, + rd_kafka_CreateTopicsResponse_parse, }; rd_assert(rkqu); - rko = rd_kafka_admin_request_op_new(rk, - RD_KAFKA_OP_CREATETOPICS, + rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_CREATETOPICS, RD_KAFKA_EVENT_CREATETOPICS_RESULT, &cbs, options, rkqu->rkqu_q); rd_list_init(&rko->rko_u.admin_request.args, (int)new_topic_cnt, rd_kafka_NewTopic_free); - for (i = 0 ; i < new_topic_cnt ; i++) + for (i = 0; i < new_topic_cnt; i++) rd_list_add(&rko->rko_u.admin_request.args, rd_kafka_NewTopic_copy(new_topics[i])); @@ -1797,10 +1748,9 @@ void rd_kafka_CreateTopics (rd_kafka_t *rk, * The returned \p topics life-time is the same as the \p result object. * @param cntp is updated to the number of elements in the array. */ -const rd_kafka_topic_result_t ** -rd_kafka_CreateTopics_result_topics ( - const rd_kafka_CreateTopics_result_t *result, - size_t *cntp) { +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics( + const rd_kafka_CreateTopics_result_t *result, + size_t *cntp) { return rd_kafka_admin_result_ret_topics((const rd_kafka_op_t *)result, cntp); } @@ -1809,7 +1759,6 @@ rd_kafka_CreateTopics_result_topics ( - /** * @name Delete topics * @{ @@ -1819,31 +1768,31 @@ rd_kafka_CreateTopics_result_topics ( * */ -rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new (const char *topic) { +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic) { size_t tsize = strlen(topic) + 1; rd_kafka_DeleteTopic_t *del_topic; /* Single allocation */ - del_topic = rd_malloc(sizeof(*del_topic) + tsize); + del_topic = rd_malloc(sizeof(*del_topic) + tsize); del_topic->topic = del_topic->data; memcpy(del_topic->topic, topic, tsize); return del_topic; } -void rd_kafka_DeleteTopic_destroy (rd_kafka_DeleteTopic_t *del_topic) { +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic) { rd_free(del_topic); } -static void rd_kafka_DeleteTopic_free (void *ptr) { +static void rd_kafka_DeleteTopic_free(void *ptr) { rd_kafka_DeleteTopic_destroy(ptr); } -void rd_kafka_DeleteTopic_destroy_array (rd_kafka_DeleteTopic_t **del_topics, - size_t del_topic_cnt) { +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, + size_t del_topic_cnt) { size_t i; - for (i = 0 ; i < del_topic_cnt ; i++) + for (i = 0; i < del_topic_cnt; i++) rd_kafka_DeleteTopic_destroy(del_topics[i]); } @@ -1851,7 +1800,7 @@ void rd_kafka_DeleteTopic_destroy_array (rd_kafka_DeleteTopic_t **del_topics, /** * @brief Topic name comparator for DeleteTopic_t */ -static int rd_kafka_DeleteTopic_cmp (const void *_a, const void *_b) { +static int rd_kafka_DeleteTopic_cmp(const void *_a, const void *_b) { const rd_kafka_DeleteTopic_t *a = _a, *b = _b; return strcmp(a->topic, b->topic); } @@ -1860,28 +1809,25 @@ static int rd_kafka_DeleteTopic_cmp (const void *_a, const void *_b) { * @brief Allocate a new DeleteTopic and make a copy of \p src */ static rd_kafka_DeleteTopic_t * -rd_kafka_DeleteTopic_copy (const rd_kafka_DeleteTopic_t *src) { +rd_kafka_DeleteTopic_copy(const rd_kafka_DeleteTopic_t *src) { return rd_kafka_DeleteTopic_new(src->topic); } - - - - /** * @brief Parse DeleteTopicsResponse and create ADMIN_RESULT op. */ static rd_kafka_resp_err_t -rd_kafka_DeleteTopicsResponse_parse (rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, size_t errstr_size) { +rd_kafka_DeleteTopicsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { const int log_decode_errors = LOG_ERR; - rd_kafka_broker_t *rkb = reply->rkbuf_rkb; - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_op_t *rko_result = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_op_t *rko_result = NULL; int32_t topic_cnt; int i; @@ -1896,17 +1842,18 @@ rd_kafka_DeleteTopicsResponse_parse (rd_kafka_op_t *rko_req, if (topic_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) rd_kafka_buf_parse_fail( - reply, - "Received %"PRId32" topics in response " - "when only %d were requested", topic_cnt, - rd_list_cnt(&rko_req->rko_u.admin_request.args)); + reply, + "Received %" PRId32 + " topics in response " + "when only %d were requested", + topic_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args)); rko_result = rd_kafka_admin_result_new(rko_req); rd_list_init(&rko_result->rko_u.admin_result.results, topic_cnt, rd_kafka_topic_result_free); - for (i = 0 ; i < (int)topic_cnt ; i++) { + for (i = 0; i < (int)topic_cnt; i++) { rd_kafkap_str_t ktopic; int16_t error_code; rd_kafka_topic_result_t *terr; @@ -1922,41 +1869,37 @@ rd_kafka_DeleteTopicsResponse_parse (rd_kafka_op_t *rko_req, * we hide this error code from the application * since the topic creation is in fact in progress. */ if (error_code == RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT && - rd_kafka_confval_get_int(&rko_req->rko_u. - admin_request.options. - operation_timeout) <= 0) { + rd_kafka_confval_get_int(&rko_req->rko_u.admin_request + .options.operation_timeout) <= + 0) { error_code = RD_KAFKA_RESP_ERR_NO_ERROR; } - terr = rd_kafka_topic_result_new(ktopic.str, - RD_KAFKAP_STR_LEN(&ktopic), - error_code, - error_code ? - rd_kafka_err2str(error_code) : - NULL); + terr = rd_kafka_topic_result_new( + ktopic.str, RD_KAFKAP_STR_LEN(&ktopic), error_code, + error_code ? rd_kafka_err2str(error_code) : NULL); /* As a convenience to the application we insert topic result * in the same order as they were requested. The broker * does not maintain ordering unfortunately. */ skel.topic = terr->topic; - orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, + orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, &skel, rd_kafka_DeleteTopic_cmp); if (orig_pos == -1) { rd_kafka_topic_result_destroy(terr); rd_kafka_buf_parse_fail( - reply, - "Broker returned topic %.*s that was not " - "included in the original request", - RD_KAFKAP_STR_PR(&ktopic)); + reply, + "Broker returned topic %.*s that was not " + "included in the original request", + RD_KAFKAP_STR_PR(&ktopic)); } if (rd_list_elem(&rko_result->rko_u.admin_result.results, orig_pos) != NULL) { rd_kafka_topic_result_destroy(terr); rd_kafka_buf_parse_fail( - reply, - "Broker returned topic %.*s multiple times", - RD_KAFKAP_STR_PR(&ktopic)); + reply, "Broker returned topic %.*s multiple times", + RD_KAFKAP_STR_PR(&ktopic)); } rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos, @@ -1967,7 +1910,7 @@ rd_kafka_DeleteTopicsResponse_parse (rd_kafka_op_t *rko_req, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: if (rko_result) rd_kafka_op_destroy(rko_result); @@ -1980,32 +1923,28 @@ rd_kafka_DeleteTopicsResponse_parse (rd_kafka_op_t *rko_req, - - - -void rd_kafka_DeleteTopics (rd_kafka_t *rk, - rd_kafka_DeleteTopic_t **del_topics, - size_t del_topic_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { +void rd_kafka_DeleteTopics(rd_kafka_t *rk, + rd_kafka_DeleteTopic_t **del_topics, + size_t del_topic_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { rd_kafka_op_t *rko; size_t i; static const struct rd_kafka_admin_worker_cbs cbs = { - rd_kafka_DeleteTopicsRequest, - rd_kafka_DeleteTopicsResponse_parse, + rd_kafka_DeleteTopicsRequest, + rd_kafka_DeleteTopicsResponse_parse, }; rd_assert(rkqu); - rko = rd_kafka_admin_request_op_new(rk, - RD_KAFKA_OP_DELETETOPICS, + rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_DELETETOPICS, RD_KAFKA_EVENT_DELETETOPICS_RESULT, &cbs, options, rkqu->rkqu_q); rd_list_init(&rko->rko_u.admin_request.args, (int)del_topic_cnt, rd_kafka_DeleteTopic_free); - for (i = 0 ; i < del_topic_cnt ; i++) + for (i = 0; i < del_topic_cnt; i++) rd_list_add(&rko->rko_u.admin_request.args, rd_kafka_DeleteTopic_copy(del_topics[i])); @@ -2019,17 +1958,15 @@ void rd_kafka_DeleteTopics (rd_kafka_t *rk, * The returned \p topics life-time is the same as the \p result object. * @param cntp is updated to the number of elements in the array. */ -const rd_kafka_topic_result_t ** -rd_kafka_DeleteTopics_result_topics ( - const rd_kafka_DeleteTopics_result_t *result, - size_t *cntp) { +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics( + const rd_kafka_DeleteTopics_result_t *result, + size_t *cntp) { return rd_kafka_admin_result_ret_topics((const rd_kafka_op_t *)result, cntp); } - /** * @name Create partitions * @{ @@ -2039,29 +1976,31 @@ rd_kafka_DeleteTopics_result_topics ( * */ -rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new (const char *topic, - size_t new_total_cnt, - char *errstr, - size_t errstr_size) { +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, + size_t new_total_cnt, + char *errstr, + size_t errstr_size) { size_t tsize = strlen(topic) + 1; rd_kafka_NewPartitions_t *newps; if (new_total_cnt < 1 || new_total_cnt > RD_KAFKAP_PARTITIONS_MAX) { - rd_snprintf(errstr, errstr_size, "new_total_cnt out of " + rd_snprintf(errstr, errstr_size, + "new_total_cnt out of " "expected range %d..%d", 1, RD_KAFKAP_PARTITIONS_MAX); return NULL; } /* Single allocation */ - newps = rd_malloc(sizeof(*newps) + tsize); + newps = rd_malloc(sizeof(*newps) + tsize); newps->total_cnt = new_total_cnt; - newps->topic = newps->data; + newps->topic = newps->data; memcpy(newps->topic, topic, tsize); /* List of int32 lists */ rd_list_init(&newps->replicas, 0, rd_list_destroy_free); - rd_list_prealloc_elems(&newps->replicas, 0, new_total_cnt, 0/*nozero*/); + rd_list_prealloc_elems(&newps->replicas, 0, new_total_cnt, + 0 /*nozero*/); return newps; } @@ -2069,7 +2008,7 @@ rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new (const char *topic, /** * @brief Topic name comparator for NewPartitions_t */ -static int rd_kafka_NewPartitions_cmp (const void *_a, const void *_b) { +static int rd_kafka_NewPartitions_cmp(const void *_a, const void *_b) { const rd_kafka_NewPartitions_t *a = _a, *b = _b; return strcmp(a->topic, b->topic); } @@ -2079,7 +2018,7 @@ static int rd_kafka_NewPartitions_cmp (const void *_a, const void *_b) { * @brief Allocate a new CreatePartitions and make a copy of \p src */ static rd_kafka_NewPartitions_t * -rd_kafka_NewPartitions_copy (const rd_kafka_NewPartitions_t *src) { +rd_kafka_NewPartitions_copy(const rd_kafka_NewPartitions_t *src) { rd_kafka_NewPartitions_t *dst; dst = rd_kafka_NewPartitions_new(src->topic, src->total_cnt, NULL, 0); @@ -2092,34 +2031,32 @@ rd_kafka_NewPartitions_copy (const rd_kafka_NewPartitions_t *src) { return dst; } -void rd_kafka_NewPartitions_destroy (rd_kafka_NewPartitions_t *newps) { +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *newps) { rd_list_destroy(&newps->replicas); rd_free(newps); } -static void rd_kafka_NewPartitions_free (void *ptr) { +static void rd_kafka_NewPartitions_free(void *ptr) { rd_kafka_NewPartitions_destroy(ptr); } -void rd_kafka_NewPartitions_destroy_array (rd_kafka_NewPartitions_t **newps, - size_t newps_cnt) { +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **newps, + size_t newps_cnt) { size_t i; - for (i = 0 ; i < newps_cnt ; i++) + for (i = 0; i < newps_cnt; i++) rd_kafka_NewPartitions_destroy(newps[i]); } - - rd_kafka_resp_err_t -rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *newp, - int32_t new_partition_idx, - int32_t *broker_ids, - size_t broker_id_cnt, - char *errstr, - size_t errstr_size) { +rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *newp, + int32_t new_partition_idx, + int32_t *broker_ids, + size_t broker_id_cnt, + char *errstr, + size_t errstr_size) { rd_list_t *rl; int i; @@ -2128,7 +2065,7 @@ rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *newp, rd_snprintf(errstr, errstr_size, "Partitions must be added in order, " "starting at 0: expecting partition " - "index %d, not %"PRId32, + "index %d, not %" PRId32, rd_list_cnt(&newp->replicas), new_partition_idx); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -2143,7 +2080,7 @@ rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *newp, rl = rd_list_init_int32(rd_list_new(0, NULL), (int)broker_id_cnt); - for (i = 0 ; i < (int)broker_id_cnt ; i++) + for (i = 0; i < (int)broker_id_cnt; i++) rd_list_set_int32(rl, i, broker_ids[i]); rd_list_add(&newp->replicas, rl); @@ -2153,20 +2090,19 @@ rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *newp, - /** * @brief Parse CreatePartitionsResponse and create ADMIN_RESULT op. */ static rd_kafka_resp_err_t -rd_kafka_CreatePartitionsResponse_parse (rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, - size_t errstr_size) { +rd_kafka_CreatePartitionsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { const int log_decode_errors = LOG_ERR; - rd_kafka_broker_t *rkb = reply->rkbuf_rkb; - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_op_t *rko_result = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_op_t *rko_result = NULL; int32_t topic_cnt; int i; int32_t Throttle_Time; @@ -2179,17 +2115,18 @@ rd_kafka_CreatePartitionsResponse_parse (rd_kafka_op_t *rko_req, if (topic_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) rd_kafka_buf_parse_fail( - reply, - "Received %"PRId32" topics in response " - "when only %d were requested", topic_cnt, - rd_list_cnt(&rko_req->rko_u.admin_request.args)); + reply, + "Received %" PRId32 + " topics in response " + "when only %d were requested", + topic_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args)); rko_result = rd_kafka_admin_result_new(rko_req); rd_list_init(&rko_result->rko_u.admin_result.results, topic_cnt, rd_kafka_topic_result_free); - for (i = 0 ; i < (int)topic_cnt ; i++) { + for (i = 0; i < (int)topic_cnt; i++) { rd_kafkap_str_t ktopic; int16_t error_code; char *this_errstr = NULL; @@ -2208,9 +2145,9 @@ rd_kafka_CreatePartitionsResponse_parse (rd_kafka_op_t *rko_req, * we hide this error code from the application * since the topic creation is in fact in progress. */ if (error_code == RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT && - rd_kafka_confval_get_int(&rko_req->rko_u. - admin_request.options. - operation_timeout) <= 0) { + rd_kafka_confval_get_int(&rko_req->rko_u.admin_request + .options.operation_timeout) <= + 0) { error_code = RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -2218,39 +2155,36 @@ rd_kafka_CreatePartitionsResponse_parse (rd_kafka_op_t *rko_req, if (RD_KAFKAP_STR_IS_NULL(&error_msg) || RD_KAFKAP_STR_LEN(&error_msg) == 0) this_errstr = - (char *)rd_kafka_err2str(error_code); + (char *)rd_kafka_err2str(error_code); else RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg); } - terr = rd_kafka_topic_result_new(ktopic.str, - RD_KAFKAP_STR_LEN(&ktopic), - error_code, - error_code ? - this_errstr : NULL); + terr = rd_kafka_topic_result_new( + ktopic.str, RD_KAFKAP_STR_LEN(&ktopic), error_code, + error_code ? this_errstr : NULL); /* As a convenience to the application we insert topic result * in the same order as they were requested. The broker * does not maintain ordering unfortunately. */ skel.topic = terr->topic; - orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, + orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, &skel, rd_kafka_NewPartitions_cmp); if (orig_pos == -1) { rd_kafka_topic_result_destroy(terr); rd_kafka_buf_parse_fail( - reply, - "Broker returned topic %.*s that was not " - "included in the original request", - RD_KAFKAP_STR_PR(&ktopic)); + reply, + "Broker returned topic %.*s that was not " + "included in the original request", + RD_KAFKAP_STR_PR(&ktopic)); } if (rd_list_elem(&rko_result->rko_u.admin_result.results, orig_pos) != NULL) { rd_kafka_topic_result_destroy(terr); rd_kafka_buf_parse_fail( - reply, - "Broker returned topic %.*s multiple times", - RD_KAFKAP_STR_PR(&ktopic)); + reply, "Broker returned topic %.*s multiple times", + RD_KAFKAP_STR_PR(&ktopic)); } rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos, @@ -2261,7 +2195,7 @@ rd_kafka_CreatePartitionsResponse_parse (rd_kafka_op_t *rko_req, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: if (rko_result) rd_kafka_op_destroy(rko_result); @@ -2274,34 +2208,29 @@ rd_kafka_CreatePartitionsResponse_parse (rd_kafka_op_t *rko_req, - - - - -void rd_kafka_CreatePartitions (rd_kafka_t *rk, - rd_kafka_NewPartitions_t **newps, - size_t newps_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { +void rd_kafka_CreatePartitions(rd_kafka_t *rk, + rd_kafka_NewPartitions_t **newps, + size_t newps_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { rd_kafka_op_t *rko; size_t i; static const struct rd_kafka_admin_worker_cbs cbs = { - rd_kafka_CreatePartitionsRequest, - rd_kafka_CreatePartitionsResponse_parse, + rd_kafka_CreatePartitionsRequest, + rd_kafka_CreatePartitionsResponse_parse, }; rd_assert(rkqu); rko = rd_kafka_admin_request_op_new( - rk, - RD_KAFKA_OP_CREATEPARTITIONS, - RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, - &cbs, options, rkqu->rkqu_q); + rk, RD_KAFKA_OP_CREATEPARTITIONS, + RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, &cbs, options, + rkqu->rkqu_q); rd_list_init(&rko->rko_u.admin_request.args, (int)newps_cnt, rd_kafka_NewPartitions_free); - for (i = 0 ; i < newps_cnt ; i++) + for (i = 0; i < newps_cnt; i++) rd_list_add(&rko->rko_u.admin_request.args, rd_kafka_NewPartitions_copy(newps[i])); @@ -2315,10 +2244,9 @@ void rd_kafka_CreatePartitions (rd_kafka_t *rk, * The returned \p topics life-time is the same as the \p result object. * @param cntp is updated to the number of elements in the array. */ -const rd_kafka_topic_result_t ** -rd_kafka_CreatePartitions_result_topics ( - const rd_kafka_CreatePartitions_result_t *result, - size_t *cntp) { +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics( + const rd_kafka_CreatePartitions_result_t *result, + size_t *cntp) { return rd_kafka_admin_result_ret_topics((const rd_kafka_op_t *)result, cntp); } @@ -2327,7 +2255,6 @@ rd_kafka_CreatePartitions_result_topics ( - /** * @name ConfigEntry * @{ @@ -2336,14 +2263,14 @@ rd_kafka_CreatePartitions_result_topics ( * */ -static void rd_kafka_ConfigEntry_destroy (rd_kafka_ConfigEntry_t *entry) { +static void rd_kafka_ConfigEntry_destroy(rd_kafka_ConfigEntry_t *entry) { rd_strtup_destroy(entry->kv); rd_list_destroy(&entry->synonyms); rd_free(entry); } -static void rd_kafka_ConfigEntry_free (void *ptr) { +static void rd_kafka_ConfigEntry_free(void *ptr) { rd_kafka_ConfigEntry_destroy((rd_kafka_ConfigEntry_t *)ptr); } @@ -2356,15 +2283,16 @@ static void rd_kafka_ConfigEntry_free (void *ptr) { * @param value Config entry value, or NULL * @param value_len Length of value, or -1 to use strlen() */ -static rd_kafka_ConfigEntry_t * -rd_kafka_ConfigEntry_new0 (const char *name, size_t name_len, - const char *value, size_t value_len) { +static rd_kafka_ConfigEntry_t *rd_kafka_ConfigEntry_new0(const char *name, + size_t name_len, + const char *value, + size_t value_len) { rd_kafka_ConfigEntry_t *entry; if (!name) return NULL; - entry = rd_calloc(1, sizeof(*entry)); + entry = rd_calloc(1, sizeof(*entry)); entry->kv = rd_strtup_new0(name, name_len, value, value_len); rd_list_init(&entry->synonyms, 0, rd_kafka_ConfigEntry_free); @@ -2377,22 +2305,21 @@ rd_kafka_ConfigEntry_new0 (const char *name, size_t name_len, /** * @sa rd_kafka_ConfigEntry_new0 */ -static rd_kafka_ConfigEntry_t * -rd_kafka_ConfigEntry_new (const char *name, const char *value) { +static rd_kafka_ConfigEntry_t *rd_kafka_ConfigEntry_new(const char *name, + const char *value) { return rd_kafka_ConfigEntry_new0(name, -1, value, -1); } - /** * @brief Allocate a new AlterConfigs and make a copy of \p src */ static rd_kafka_ConfigEntry_t * -rd_kafka_ConfigEntry_copy (const rd_kafka_ConfigEntry_t *src) { +rd_kafka_ConfigEntry_copy(const rd_kafka_ConfigEntry_t *src) { rd_kafka_ConfigEntry_t *dst; - dst = rd_kafka_ConfigEntry_new(src->kv->name, src->kv->value); + dst = rd_kafka_ConfigEntry_new(src->kv->name, src->kv->value); dst->a = src->a; rd_list_destroy(&dst->synonyms); /* created in .._new() */ @@ -2403,49 +2330,47 @@ rd_kafka_ConfigEntry_copy (const rd_kafka_ConfigEntry_t *src) { return dst; } -static void *rd_kafka_ConfigEntry_list_copy (const void *src, void *opaque) { +static void *rd_kafka_ConfigEntry_list_copy(const void *src, void *opaque) { return rd_kafka_ConfigEntry_copy((const rd_kafka_ConfigEntry_t *)src); } -const char *rd_kafka_ConfigEntry_name (const rd_kafka_ConfigEntry_t *entry) { +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry) { return entry->kv->name; } -const char * -rd_kafka_ConfigEntry_value (const rd_kafka_ConfigEntry_t *entry) { +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry) { return entry->kv->value; } rd_kafka_ConfigSource_t -rd_kafka_ConfigEntry_source (const rd_kafka_ConfigEntry_t *entry) { +rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry) { return entry->a.source; } -int rd_kafka_ConfigEntry_is_read_only (const rd_kafka_ConfigEntry_t *entry) { +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry) { return entry->a.is_readonly; } -int rd_kafka_ConfigEntry_is_default (const rd_kafka_ConfigEntry_t *entry) { +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry) { return entry->a.is_default; } -int rd_kafka_ConfigEntry_is_sensitive (const rd_kafka_ConfigEntry_t *entry) { +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry) { return entry->a.is_sensitive; } -int rd_kafka_ConfigEntry_is_synonym (const rd_kafka_ConfigEntry_t *entry) { +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry) { return entry->a.is_synonym; } const rd_kafka_ConfigEntry_t ** -rd_kafka_ConfigEntry_synonyms (const rd_kafka_ConfigEntry_t *entry, - size_t *cntp) { +rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, + size_t *cntp) { *cntp = rd_list_cnt(&entry->synonyms); if (!*cntp) return NULL; return (const rd_kafka_ConfigEntry_t **)entry->synonyms.rl_elems; - } @@ -2461,15 +2386,11 @@ rd_kafka_ConfigEntry_synonyms (const rd_kafka_ConfigEntry_t *entry, * */ -const char * -rd_kafka_ConfigSource_name (rd_kafka_ConfigSource_t confsource) { +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource) { static const char *names[] = { - "UNKNOWN_CONFIG", - "DYNAMIC_TOPIC_CONFIG", - "DYNAMIC_BROKER_CONFIG", - "DYNAMIC_DEFAULT_BROKER_CONFIG", - "STATIC_BROKER_CONFIG", - "DEFAULT_CONFIG", + "UNKNOWN_CONFIG", "DYNAMIC_TOPIC_CONFIG", + "DYNAMIC_BROKER_CONFIG", "DYNAMIC_DEFAULT_BROKER_CONFIG", + "STATIC_BROKER_CONFIG", "DEFAULT_CONFIG", }; if ((unsigned int)confsource >= @@ -2491,18 +2412,12 @@ rd_kafka_ConfigSource_name (rd_kafka_ConfigSource_t confsource) { * */ -const char * -rd_kafka_ResourceType_name (rd_kafka_ResourceType_t restype) { +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype) { static const char *names[] = { - "UNKNOWN", - "ANY", - "TOPIC", - "GROUP", - "BROKER", + "UNKNOWN", "ANY", "TOPIC", "GROUP", "BROKER", }; - if ((unsigned int)restype >= - (unsigned int)RD_KAFKA_RESOURCE__CNT) + if ((unsigned int)restype >= (unsigned int)RD_KAFKA_RESOURCE__CNT) return "UNSUPPORTED"; return names[restype]; @@ -2510,15 +2425,15 @@ rd_kafka_ResourceType_name (rd_kafka_ResourceType_t restype) { rd_kafka_ConfigResource_t * -rd_kafka_ConfigResource_new (rd_kafka_ResourceType_t restype, - const char *resname) { +rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, + const char *resname) { rd_kafka_ConfigResource_t *config; size_t namesz = resname ? strlen(resname) : 0; if (!namesz || (int)restype < 0) return NULL; - config = rd_calloc(1, sizeof(*config) + namesz + 1); + config = rd_calloc(1, sizeof(*config) + namesz + 1); config->name = config->data; memcpy(config->name, resname, namesz + 1); config->restype = restype; @@ -2528,22 +2443,22 @@ rd_kafka_ConfigResource_new (rd_kafka_ResourceType_t restype, return config; } -void rd_kafka_ConfigResource_destroy (rd_kafka_ConfigResource_t *config) { +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config) { rd_list_destroy(&config->config); if (config->errstr) rd_free(config->errstr); rd_free(config); } -static void rd_kafka_ConfigResource_free (void *ptr) { +static void rd_kafka_ConfigResource_free(void *ptr) { rd_kafka_ConfigResource_destroy((rd_kafka_ConfigResource_t *)ptr); } -void rd_kafka_ConfigResource_destroy_array (rd_kafka_ConfigResource_t **config, - size_t config_cnt) { +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, + size_t config_cnt) { size_t i; - for (i = 0 ; i < config_cnt ; i++) + for (i = 0; i < config_cnt; i++) rd_kafka_ConfigResource_destroy(config[i]); } @@ -2551,7 +2466,7 @@ void rd_kafka_ConfigResource_destroy_array (rd_kafka_ConfigResource_t **config, /** * @brief Type and name comparator for ConfigResource_t */ -static int rd_kafka_ConfigResource_cmp (const void *_a, const void *_b) { +static int rd_kafka_ConfigResource_cmp(const void *_a, const void *_b) { const rd_kafka_ConfigResource_t *a = _a, *b = _b; int r = RD_CMP(a->restype, b->restype); if (r) @@ -2563,7 +2478,7 @@ static int rd_kafka_ConfigResource_cmp (const void *_a, const void *_b) { * @brief Allocate a new AlterConfigs and make a copy of \p src */ static rd_kafka_ConfigResource_t * -rd_kafka_ConfigResource_copy (const rd_kafka_ConfigResource_t *src) { +rd_kafka_ConfigResource_copy(const rd_kafka_ConfigResource_t *src) { rd_kafka_ConfigResource_t *dst; dst = rd_kafka_ConfigResource_new(src->restype, src->name); @@ -2578,15 +2493,16 @@ rd_kafka_ConfigResource_copy (const rd_kafka_ConfigResource_t *src) { static void -rd_kafka_ConfigResource_add_ConfigEntry (rd_kafka_ConfigResource_t *config, - rd_kafka_ConfigEntry_t *entry) { +rd_kafka_ConfigResource_add_ConfigEntry(rd_kafka_ConfigResource_t *config, + rd_kafka_ConfigEntry_t *entry) { rd_list_add(&config->config, entry); } rd_kafka_resp_err_t -rd_kafka_ConfigResource_add_config (rd_kafka_ConfigResource_t *config, - const char *name, const char *value) { +rd_kafka_ConfigResource_add_config(rd_kafka_ConfigResource_t *config, + const char *name, + const char *value) { if (!name || !*name || !value) return RD_KAFKA_RESP_ERR__INVALID_ARG; @@ -2595,8 +2511,9 @@ rd_kafka_ConfigResource_add_config (rd_kafka_ConfigResource_t *config, } rd_kafka_resp_err_t -rd_kafka_ConfigResource_set_config (rd_kafka_ConfigResource_t *config, - const char *name, const char *value) { +rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, + const char *name, + const char *value) { if (!name || !*name || !value) return RD_KAFKA_RESP_ERR__INVALID_ARG; @@ -2605,8 +2522,8 @@ rd_kafka_ConfigResource_set_config (rd_kafka_ConfigResource_t *config, } rd_kafka_resp_err_t -rd_kafka_ConfigResource_delete_config (rd_kafka_ConfigResource_t *config, - const char *name) { +rd_kafka_ConfigResource_delete_config(rd_kafka_ConfigResource_t *config, + const char *name) { if (!name || !*name) return RD_KAFKA_RESP_ERR__INVALID_ARG; @@ -2616,8 +2533,8 @@ rd_kafka_ConfigResource_delete_config (rd_kafka_ConfigResource_t *config, const rd_kafka_ConfigEntry_t ** -rd_kafka_ConfigResource_configs (const rd_kafka_ConfigResource_t *config, - size_t *cntp) { +rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, + size_t *cntp) { *cntp = rd_list_cnt(&config->config); if (!*cntp) return NULL; @@ -2626,24 +2543,23 @@ rd_kafka_ConfigResource_configs (const rd_kafka_ConfigResource_t *config, - rd_kafka_ResourceType_t -rd_kafka_ConfigResource_type (const rd_kafka_ConfigResource_t *config) { +rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config) { return config->restype; } const char * -rd_kafka_ConfigResource_name (const rd_kafka_ConfigResource_t *config) { +rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config) { return config->name; } rd_kafka_resp_err_t -rd_kafka_ConfigResource_error (const rd_kafka_ConfigResource_t *config) { +rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config) { return config->err; } const char * -rd_kafka_ConfigResource_error_string (const rd_kafka_ConfigResource_t *config) { +rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config) { if (!config->err) return NULL; if (config->errstr) @@ -2664,10 +2580,10 @@ rd_kafka_ConfigResource_error_string (const rd_kafka_ConfigResource_t *config) { * is returned and \p broker_idp is set to use the coordinator. */ static rd_kafka_resp_err_t -rd_kafka_ConfigResource_get_single_broker_id (const rd_list_t *configs, - int32_t *broker_idp, - char *errstr, - size_t errstr_size) { +rd_kafka_ConfigResource_get_single_broker_id(const rd_list_t *configs, + int32_t *broker_idp, + char *errstr, + size_t errstr_size) { const rd_kafka_ConfigResource_t *config; int i; int32_t broker_id = RD_KAFKA_ADMIN_TARGET_CONTROLLER; /* Some default @@ -2730,14 +2646,15 @@ rd_kafka_ConfigResource_get_single_broker_id (const rd_list_t *configs, * @brief Parse AlterConfigsResponse and create ADMIN_RESULT op. */ static rd_kafka_resp_err_t -rd_kafka_AlterConfigsResponse_parse (rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, size_t errstr_size) { +rd_kafka_AlterConfigsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { const int log_decode_errors = LOG_ERR; - rd_kafka_broker_t *rkb = reply->rkbuf_rkb; - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_op_t *rko_result = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_op_t *rko_result = NULL; int32_t res_cnt; int i; int32_t Throttle_Time; @@ -2749,8 +2666,10 @@ rd_kafka_AlterConfigsResponse_parse (rd_kafka_op_t *rko_req, if (res_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) { rd_snprintf(errstr, errstr_size, - "Received %"PRId32" ConfigResources in response " - "when only %d were requested", res_cnt, + "Received %" PRId32 + " ConfigResources in response " + "when only %d were requested", + res_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args)); return RD_KAFKA_RESP_ERR__BAD_MSG; } @@ -2760,7 +2679,7 @@ rd_kafka_AlterConfigsResponse_parse (rd_kafka_op_t *rko_req, rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt, rd_kafka_ConfigResource_free); - for (i = 0 ; i < (int)res_cnt ; i++) { + for (i = 0; i < (int)res_cnt; i++) { int16_t error_code; rd_kafkap_str_t error_msg; int8_t res_type; @@ -2781,15 +2700,15 @@ rd_kafka_AlterConfigsResponse_parse (rd_kafka_op_t *rko_req, if (RD_KAFKAP_STR_IS_NULL(&error_msg) || RD_KAFKAP_STR_LEN(&error_msg) == 0) this_errstr = - (char *)rd_kafka_err2str(error_code); + (char *)rd_kafka_err2str(error_code); else RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg); } config = rd_kafka_ConfigResource_new(res_type, res_name); if (!config) { - rd_kafka_log(rko_req->rko_rk, LOG_ERR, - "ADMIN", "AlterConfigs returned " + rd_kafka_log(rko_req->rko_rk, LOG_ERR, "ADMIN", + "AlterConfigs returned " "unsupported ConfigResource #%d with " "type %d and name \"%s\": ignoring", i, res_type, res_name); @@ -2804,27 +2723,27 @@ rd_kafka_AlterConfigsResponse_parse (rd_kafka_op_t *rko_req, * in the same order as they were requested. The broker * does not maintain ordering unfortunately. */ skel.restype = config->restype; - skel.name = config->name; + skel.name = config->name; orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, &skel, rd_kafka_ConfigResource_cmp); if (orig_pos == -1) { rd_kafka_ConfigResource_destroy(config); rd_kafka_buf_parse_fail( - reply, - "Broker returned ConfigResource %d,%s " - "that was not " - "included in the original request", - res_type, res_name); + reply, + "Broker returned ConfigResource %d,%s " + "that was not " + "included in the original request", + res_type, res_name); } if (rd_list_elem(&rko_result->rko_u.admin_result.results, orig_pos) != NULL) { rd_kafka_ConfigResource_destroy(config); rd_kafka_buf_parse_fail( - reply, - "Broker returned ConfigResource %d,%s " - "multiple times", - res_type, res_name); + reply, + "Broker returned ConfigResource %d,%s " + "multiple times", + res_type, res_name); } rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos, @@ -2835,7 +2754,7 @@ rd_kafka_AlterConfigsResponse_parse (rd_kafka_op_t *rko_req, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: if (rko_result) rd_kafka_op_destroy(rko_result); @@ -2848,33 +2767,30 @@ rd_kafka_AlterConfigsResponse_parse (rd_kafka_op_t *rko_req, - -void rd_kafka_AlterConfigs (rd_kafka_t *rk, - rd_kafka_ConfigResource_t **configs, - size_t config_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { +void rd_kafka_AlterConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { rd_kafka_op_t *rko; size_t i; rd_kafka_resp_err_t err; char errstr[256]; static const struct rd_kafka_admin_worker_cbs cbs = { - rd_kafka_AlterConfigsRequest, - rd_kafka_AlterConfigsResponse_parse, + rd_kafka_AlterConfigsRequest, + rd_kafka_AlterConfigsResponse_parse, }; rd_assert(rkqu); - rko = rd_kafka_admin_request_op_new( - rk, - RD_KAFKA_OP_ALTERCONFIGS, - RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, - &cbs, options, rkqu->rkqu_q); + rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_ALTERCONFIGS, + RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, + &cbs, options, rkqu->rkqu_q); rd_list_init(&rko->rko_u.admin_request.args, (int)config_cnt, rd_kafka_ConfigResource_free); - for (i = 0 ; i < config_cnt ; i++) + for (i = 0; i < config_cnt; i++) rd_list_add(&rko->rko_u.admin_request.args, rd_kafka_ConfigResource_copy(configs[i])); @@ -2884,13 +2800,12 @@ void rd_kafka_AlterConfigs (rd_kafka_t *rk, * Multiple BROKER resources are not allowed. */ err = rd_kafka_ConfigResource_get_single_broker_id( - &rko->rko_u.admin_request.args, - &rko->rko_u.admin_request.broker_id, - errstr, sizeof(errstr)); + &rko->rko_u.admin_request.args, &rko->rko_u.admin_request.broker_id, + errstr, sizeof(errstr)); if (err) { rd_kafka_admin_result_fail(rko, err, "%s", errstr); rd_kafka_admin_common_worker_destroy(rk, rko, - rd_true/*destroy*/); + rd_true /*destroy*/); return; } @@ -2898,19 +2813,17 @@ void rd_kafka_AlterConfigs (rd_kafka_t *rk, } -const rd_kafka_ConfigResource_t ** -rd_kafka_AlterConfigs_result_resources ( - const rd_kafka_AlterConfigs_result_t *result, - size_t *cntp) { +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources( + const rd_kafka_AlterConfigs_result_t *result, + size_t *cntp) { return rd_kafka_admin_result_ret_resources( - (const rd_kafka_op_t *)result, cntp); + (const rd_kafka_op_t *)result, cntp); } /**@}*/ - /** * @name DescribeConfigs * @{ @@ -2924,19 +2837,20 @@ rd_kafka_AlterConfigs_result_resources ( * @brief Parse DescribeConfigsResponse and create ADMIN_RESULT op. */ static rd_kafka_resp_err_t -rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, size_t errstr_size) { +rd_kafka_DescribeConfigsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { const int log_decode_errors = LOG_ERR; - rd_kafka_broker_t *rkb = reply->rkbuf_rkb; - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_op_t *rko_result = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_op_t *rko_result = NULL; int32_t res_cnt; int i; int32_t Throttle_Time; rd_kafka_ConfigResource_t *config = NULL; - rd_kafka_ConfigEntry_t *entry = NULL; + rd_kafka_ConfigEntry_t *entry = NULL; rd_kafka_buf_read_i32(reply, &Throttle_Time); rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time); @@ -2946,17 +2860,18 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, if (res_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) rd_kafka_buf_parse_fail( - reply, - "Received %"PRId32" ConfigResources in response " - "when only %d were requested", res_cnt, - rd_list_cnt(&rko_req->rko_u.admin_request.args)); + reply, + "Received %" PRId32 + " ConfigResources in response " + "when only %d were requested", + res_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args)); rko_result = rd_kafka_admin_result_new(rko_req); rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt, rd_kafka_ConfigResource_free); - for (i = 0 ; i < (int)res_cnt ; i++) { + for (i = 0; i < (int)res_cnt; i++) { int16_t error_code; rd_kafkap_str_t error_msg; int8_t res_type; @@ -2978,15 +2893,15 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, if (RD_KAFKAP_STR_IS_NULL(&error_msg) || RD_KAFKAP_STR_LEN(&error_msg) == 0) this_errstr = - (char *)rd_kafka_err2str(error_code); + (char *)rd_kafka_err2str(error_code); else RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg); } config = rd_kafka_ConfigResource_new(res_type, res_name); if (!config) { - rd_kafka_log(rko_req->rko_rk, LOG_ERR, - "ADMIN", "DescribeConfigs returned " + rd_kafka_log(rko_req->rko_rk, LOG_ERR, "ADMIN", + "DescribeConfigs returned " "unsupported ConfigResource #%d with " "type %d and name \"%s\": ignoring", i, res_type, res_name); @@ -3000,7 +2915,7 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, /* #config_entries */ rd_kafka_buf_read_i32(reply, &entry_cnt); - for (ci = 0 ; ci < (int)entry_cnt ; ci++) { + for (ci = 0; ci < (int)entry_cnt; ci++) { rd_kafkap_str_t config_name, config_value; int32_t syn_cnt; int si; @@ -3009,10 +2924,8 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, rd_kafka_buf_read_str(reply, &config_value); entry = rd_kafka_ConfigEntry_new0( - config_name.str, - RD_KAFKAP_STR_LEN(&config_name), - config_value.str, - RD_KAFKAP_STR_LEN(&config_value)); + config_name.str, RD_KAFKAP_STR_LEN(&config_name), + config_value.str, RD_KAFKAP_STR_LEN(&config_value)); rd_kafka_buf_read_bool(reply, &entry->a.is_readonly); @@ -3025,7 +2938,7 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, &entry->a.is_default); if (entry->a.is_default) entry->a.source = - RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG; + RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG; } else { int8_t config_source; rd_kafka_buf_read_i8(reply, &config_source); @@ -3034,7 +2947,6 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, if (entry->a.source == RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG) entry->a.is_default = 1; - } rd_kafka_buf_read_bool(reply, &entry->a.is_sensitive); @@ -3046,14 +2958,13 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, if (syn_cnt > 100000) rd_kafka_buf_parse_fail( - reply, - "Broker returned %"PRId32 - " config synonyms for " - "ConfigResource %d,%s: " - "limit is 100000", - syn_cnt, - config->restype, - config->name); + reply, + "Broker returned %" PRId32 + " config synonyms for " + "ConfigResource %d,%s: " + "limit is 100000", + syn_cnt, config->restype, + config->name); if (syn_cnt > 0) rd_list_grow(&entry->synonyms, syn_cnt); @@ -3066,7 +2977,7 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, /* Read synonyms (ApiVersion 1) */ - for (si = 0 ; si < (int)syn_cnt ; si++) { + for (si = 0; si < (int)syn_cnt; si++) { rd_kafkap_str_t syn_name, syn_value; int8_t syn_source; rd_kafka_ConfigEntry_t *syn_entry; @@ -3076,32 +2987,30 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, rd_kafka_buf_read_i8(reply, &syn_source); syn_entry = rd_kafka_ConfigEntry_new0( - syn_name.str, - RD_KAFKAP_STR_LEN(&syn_name), - syn_value.str, - RD_KAFKAP_STR_LEN(&syn_value)); + syn_name.str, RD_KAFKAP_STR_LEN(&syn_name), + syn_value.str, + RD_KAFKAP_STR_LEN(&syn_value)); if (!syn_entry) rd_kafka_buf_parse_fail( - reply, - "Broker returned invalid " - "synonym #%d " - "for ConfigEntry #%d (%s) " - "and ConfigResource %d,%s: " - "syn_name.len %d, " - "syn_value.len %d", - si, ci, entry->kv->name, - config->restype, config->name, - (int)syn_name.len, - (int)syn_value.len); - - syn_entry->a.source = syn_source; + reply, + "Broker returned invalid " + "synonym #%d " + "for ConfigEntry #%d (%s) " + "and ConfigResource %d,%s: " + "syn_name.len %d, " + "syn_value.len %d", + si, ci, entry->kv->name, + config->restype, config->name, + (int)syn_name.len, + (int)syn_value.len); + + syn_entry->a.source = syn_source; syn_entry->a.is_synonym = 1; rd_list_add(&entry->synonyms, syn_entry); } - rd_kafka_ConfigResource_add_ConfigEntry( - config, entry); + rd_kafka_ConfigResource_add_ConfigEntry(config, entry); entry = NULL; } @@ -3109,24 +3018,24 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, * in the same order as they were requested. The broker * does not maintain ordering unfortunately. */ skel.restype = config->restype; - skel.name = config->name; + skel.name = config->name; orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, &skel, rd_kafka_ConfigResource_cmp); if (orig_pos == -1) rd_kafka_buf_parse_fail( - reply, - "Broker returned ConfigResource %d,%s " - "that was not " - "included in the original request", - res_type, res_name); + reply, + "Broker returned ConfigResource %d,%s " + "that was not " + "included in the original request", + res_type, res_name); if (rd_list_elem(&rko_result->rko_u.admin_result.results, orig_pos) != NULL) rd_kafka_buf_parse_fail( - reply, - "Broker returned ConfigResource %d,%s " - "multiple times", - res_type, res_name); + reply, + "Broker returned ConfigResource %d,%s " + "multiple times", + res_type, res_name); rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos, config); @@ -3137,7 +3046,7 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: if (entry) rd_kafka_ConfigEntry_destroy(entry); if (config) @@ -3155,32 +3064,30 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, -void rd_kafka_DescribeConfigs (rd_kafka_t *rk, - rd_kafka_ConfigResource_t **configs, - size_t config_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { rd_kafka_op_t *rko; size_t i; rd_kafka_resp_err_t err; char errstr[256]; static const struct rd_kafka_admin_worker_cbs cbs = { - rd_kafka_DescribeConfigsRequest, - rd_kafka_DescribeConfigsResponse_parse, + rd_kafka_DescribeConfigsRequest, + rd_kafka_DescribeConfigsResponse_parse, }; rd_assert(rkqu); rko = rd_kafka_admin_request_op_new( - rk, - RD_KAFKA_OP_DESCRIBECONFIGS, - RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, - &cbs, options, rkqu->rkqu_q); + rk, RD_KAFKA_OP_DESCRIBECONFIGS, + RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, &cbs, options, rkqu->rkqu_q); rd_list_init(&rko->rko_u.admin_request.args, (int)config_cnt, rd_kafka_ConfigResource_free); - for (i = 0 ; i < config_cnt ; i++) + for (i = 0; i < config_cnt; i++) rd_list_add(&rko->rko_u.admin_request.args, rd_kafka_ConfigResource_copy(configs[i])); @@ -3190,13 +3097,12 @@ void rd_kafka_DescribeConfigs (rd_kafka_t *rk, * Multiple BROKER resources are not allowed. */ err = rd_kafka_ConfigResource_get_single_broker_id( - &rko->rko_u.admin_request.args, - &rko->rko_u.admin_request.broker_id, - errstr, sizeof(errstr)); + &rko->rko_u.admin_request.args, &rko->rko_u.admin_request.broker_id, + errstr, sizeof(errstr)); if (err) { rd_kafka_admin_result_fail(rko, err, "%s", errstr); rd_kafka_admin_common_worker_destroy(rk, rko, - rd_true/*destroy*/); + rd_true /*destroy*/); return; } @@ -3205,13 +3111,11 @@ void rd_kafka_DescribeConfigs (rd_kafka_t *rk, - -const rd_kafka_ConfigResource_t ** -rd_kafka_DescribeConfigs_result_resources ( - const rd_kafka_DescribeConfigs_result_t *result, - size_t *cntp) { +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources( + const rd_kafka_DescribeConfigs_result_t *result, + size_t *cntp) { return rd_kafka_admin_result_ret_resources( - (const rd_kafka_op_t *)result, cntp); + (const rd_kafka_op_t *)result, cntp); } /**@}*/ @@ -3225,28 +3129,27 @@ rd_kafka_DescribeConfigs_result_resources ( * */ -rd_kafka_DeleteRecords_t * -rd_kafka_DeleteRecords_new (const rd_kafka_topic_partition_list_t * - before_offsets) { +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new( + const rd_kafka_topic_partition_list_t *before_offsets) { rd_kafka_DeleteRecords_t *del_records; del_records = rd_calloc(1, sizeof(*del_records)); del_records->offsets = - rd_kafka_topic_partition_list_copy(before_offsets); + rd_kafka_topic_partition_list_copy(before_offsets); return del_records; } -void rd_kafka_DeleteRecords_destroy (rd_kafka_DeleteRecords_t *del_records) { +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records) { rd_kafka_topic_partition_list_destroy(del_records->offsets); rd_free(del_records); } -void rd_kafka_DeleteRecords_destroy_array (rd_kafka_DeleteRecords_t ** - del_records, - size_t del_record_cnt) { +void rd_kafka_DeleteRecords_destroy_array( + rd_kafka_DeleteRecords_t **del_records, + size_t del_record_cnt) { size_t i; - for (i = 0 ; i < del_record_cnt ; i++) + for (i = 0; i < del_record_cnt; i++) rd_kafka_DeleteRecords_destroy(del_records[i]); } @@ -3256,8 +3159,8 @@ void rd_kafka_DeleteRecords_destroy_array (rd_kafka_DeleteRecords_t ** * into the user response list. */ static void -rd_kafka_DeleteRecords_response_merge (rd_kafka_op_t *rko_fanout, - const rd_kafka_op_t *rko_partial) { +rd_kafka_DeleteRecords_response_merge(rd_kafka_op_t *rko_fanout, + const rd_kafka_op_t *rko_partial) { rd_kafka_t *rk = rko_fanout->rko_rk; const rd_kafka_topic_partition_list_t *partitions; rd_kafka_topic_partition_list_t *respartitions; @@ -3267,8 +3170,8 @@ rd_kafka_DeleteRecords_response_merge (rd_kafka_op_t *rko_fanout, RD_KAFKA_EVENT_DELETERECORDS_RESULT); /* All partitions (offsets) from the DeleteRecords() call */ - respartitions = rd_list_elem(&rko_fanout->rko_u.admin_request. - fanout.results, 0); + respartitions = + rd_list_elem(&rko_fanout->rko_u.admin_request.fanout.results, 0); if (rko_partial->rko_err) { /* If there was a request-level error, set the error on @@ -3277,17 +3180,16 @@ rd_kafka_DeleteRecords_response_merge (rd_kafka_op_t *rko_fanout, rd_kafka_topic_partition_t *reqpartition; /* Partitions (offsets) from this DeleteRecordsRequest */ - reqpartitions = rd_list_elem(&rko_partial->rko_u. - admin_result.args, 0); + reqpartitions = + rd_list_elem(&rko_partial->rko_u.admin_result.args, 0); RD_KAFKA_TPLIST_FOREACH(reqpartition, reqpartitions) { rd_kafka_topic_partition_t *respart; /* Find result partition */ respart = rd_kafka_topic_partition_list_find( - respartitions, - reqpartition->topic, - reqpartition->partition); + respartitions, reqpartition->topic, + reqpartition->partition); rd_assert(respart || !*"respart not found"); @@ -3306,22 +3208,21 @@ rd_kafka_DeleteRecords_response_merge (rd_kafka_op_t *rko_fanout, /* Find result partition */ respart = rd_kafka_topic_partition_list_find( - respartitions, - partition->topic, - partition->partition); + respartitions, partition->topic, partition->partition); if (unlikely(!respart)) { rd_dassert(!*"partition not found"); rd_kafka_log(rk, LOG_WARNING, "DELETERECORDS", "DeleteRecords response contains " - "unexpected %s [%"PRId32"] which " + "unexpected %s [%" PRId32 + "] which " "was not in the request list: ignored", partition->topic, partition->partition); continue; } respart->offset = partition->offset; - respart->err = partition->err; + respart->err = partition->err; } } @@ -3331,19 +3232,19 @@ rd_kafka_DeleteRecords_response_merge (rd_kafka_op_t *rko_fanout, * @brief Parse DeleteRecordsResponse and create ADMIN_RESULT op. */ static rd_kafka_resp_err_t -rd_kafka_DeleteRecordsResponse_parse (rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, size_t errstr_size) { +rd_kafka_DeleteRecordsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { const int log_decode_errors = LOG_ERR; rd_kafka_op_t *rko_result; rd_kafka_topic_partition_list_t *offsets; rd_kafka_buf_read_throttle_time(reply); - offsets = rd_kafka_buf_read_topic_partitions(reply, 0, - rd_true/*read_offset*/, - rd_true/*read_part_errs*/); + offsets = rd_kafka_buf_read_topic_partitions( + reply, 0, rd_true /*read_offset*/, rd_true /*read_part_errs*/); if (!offsets) rd_kafka_buf_parse_fail(reply, "Failed to parse topic partitions"); @@ -3373,21 +3274,21 @@ rd_kafka_DeleteRecordsResponse_parse (rd_kafka_op_t *rko_req, * @param rko Reply op (RD_KAFKA_OP_LEADERS). */ static rd_kafka_op_res_t -rd_kafka_DeleteRecords_leaders_queried_cb (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *reply) { +rd_kafka_DeleteRecords_leaders_queried_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *reply) { rd_kafka_resp_err_t err = reply->rko_err; const rd_list_t *leaders = - reply->rko_u.leaders.leaders; /* Possibly NULL (on err) */ + reply->rko_u.leaders.leaders; /* Possibly NULL (on err) */ rd_kafka_topic_partition_list_t *partitions = - reply->rko_u.leaders.partitions; /* Possibly NULL (on err) */ + reply->rko_u.leaders.partitions; /* Possibly NULL (on err) */ rd_kafka_op_t *rko_fanout = reply->rko_u.leaders.opaque; rd_kafka_topic_partition_t *rktpar; rd_kafka_topic_partition_list_t *offsets; const struct rd_kafka_partition_leader *leader; static const struct rd_kafka_admin_worker_cbs cbs = { - rd_kafka_DeleteRecordsRequest, - rd_kafka_DeleteRecordsResponse_parse, + rd_kafka_DeleteRecordsRequest, + rd_kafka_DeleteRecordsResponse_parse, }; int i; @@ -3411,7 +3312,7 @@ rd_kafka_DeleteRecords_leaders_queried_cb (rd_kafka_t *rk, continue; rktpar2 = rd_kafka_topic_partition_list_find( - offsets, rktpar->topic, rktpar->partition); + offsets, rktpar->topic, rktpar->partition); rd_assert(rktpar2); rktpar2->err = rktpar->err; } @@ -3420,13 +3321,11 @@ rd_kafka_DeleteRecords_leaders_queried_cb (rd_kafka_t *rk, if (err) { err: rd_kafka_admin_result_fail( - rko_fanout, - err, - "Failed to query partition leaders: %s", - err == RD_KAFKA_RESP_ERR__NOENT ? - "No leaders found" : rd_kafka_err2str(err)); + rko_fanout, err, "Failed to query partition leaders: %s", + err == RD_KAFKA_RESP_ERR__NOENT ? "No leaders found" + : rd_kafka_err2str(err)); rd_kafka_admin_common_worker_destroy(rk, rko_fanout, - rd_true/*destroy*/); + rd_true /*destroy*/); return RD_KAFKA_OP_RES_HANDLED; } @@ -3438,19 +3337,16 @@ rd_kafka_DeleteRecords_leaders_queried_cb (rd_kafka_t *rk, rd_kafka_topic_partition_list_copy(offsets)); rko_fanout->rko_u.admin_request.fanout.outstanding = - rd_list_cnt(leaders); + rd_list_cnt(leaders); rd_assert(rd_list_cnt(leaders) > 0); /* For each leader send a request for its partitions */ RD_LIST_FOREACH(leader, leaders, i) { - rd_kafka_op_t *rko = - rd_kafka_admin_request_op_new( - rk, - RD_KAFKA_OP_DELETERECORDS, - RD_KAFKA_EVENT_DELETERECORDS_RESULT, - &cbs, &rko_fanout->rko_u.admin_request.options, - rk->rk_ops); + rd_kafka_op_t *rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_DELETERECORDS, + RD_KAFKA_EVENT_DELETERECORDS_RESULT, &cbs, + &rko_fanout->rko_u.admin_request.options, rk->rk_ops); rko->rko_u.admin_request.fanout_parent = rko_fanout; rko->rko_u.admin_request.broker_id = leader->rkb->rkb_nodeid; @@ -3458,9 +3354,9 @@ rd_kafka_DeleteRecords_leaders_queried_cb (rd_kafka_t *rk, rd_list_init(&rko->rko_u.admin_request.args, 1, rd_kafka_topic_partition_list_destroy_free); - rd_list_add(&rko->rko_u.admin_request.args, - rd_kafka_topic_partition_list_copy( - leader->partitions)); + rd_list_add( + &rko->rko_u.admin_request.args, + rd_kafka_topic_partition_list_copy(leader->partitions)); /* Enqueue op for admin_worker() to transition to next state */ rd_kafka_q_enq(rk->rk_ops, rko); @@ -3470,15 +3366,15 @@ rd_kafka_DeleteRecords_leaders_queried_cb (rd_kafka_t *rk, } -void rd_kafka_DeleteRecords (rd_kafka_t *rk, - rd_kafka_DeleteRecords_t **del_records, - size_t del_record_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { +void rd_kafka_DeleteRecords(rd_kafka_t *rk, + rd_kafka_DeleteRecords_t **del_records, + size_t del_record_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { rd_kafka_op_t *rko_fanout; static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = { - rd_kafka_DeleteRecords_response_merge, - rd_kafka_topic_partition_list_copy_opaque, + rd_kafka_DeleteRecords_response_merge, + rd_kafka_topic_partition_list_copy_opaque, }; const rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_list_t *copied_offsets; @@ -3486,10 +3382,8 @@ void rd_kafka_DeleteRecords (rd_kafka_t *rk, rd_assert(rkqu); rko_fanout = rd_kafka_admin_fanout_op_new( - rk, - RD_KAFKA_OP_DELETERECORDS, - RD_KAFKA_EVENT_DELETERECORDS_RESULT, - &fanout_cbs, options, rkqu->rkqu_q); + rk, RD_KAFKA_OP_DELETERECORDS, RD_KAFKA_EVENT_DELETERECORDS_RESULT, + &fanout_cbs, options, rkqu->rkqu_q); if (del_record_cnt != 1) { /* We only support one DeleteRecords per call since there @@ -3500,7 +3394,7 @@ void rd_kafka_DeleteRecords (rd_kafka_t *rk, "Exactly one DeleteRecords must be " "passed"); rd_kafka_admin_common_worker_destroy(rk, rko_fanout, - rd_true/*destroy*/); + rd_true /*destroy*/); return; } @@ -3511,20 +3405,20 @@ void rd_kafka_DeleteRecords (rd_kafka_t *rk, RD_KAFKA_RESP_ERR__INVALID_ARG, "No records to delete"); rd_kafka_admin_common_worker_destroy(rk, rko_fanout, - rd_true/*destroy*/); + rd_true /*destroy*/); return; } /* Copy offsets list and store it on the request op */ copied_offsets = rd_kafka_topic_partition_list_copy(offsets); if (rd_kafka_topic_partition_list_has_duplicates( - copied_offsets, rd_false/*check partition*/)) { + copied_offsets, rd_false /*check partition*/)) { rd_kafka_topic_partition_list_destroy(copied_offsets); rd_kafka_admin_result_fail(rko_fanout, RD_KAFKA_RESP_ERR__INVALID_ARG, "Duplicate partitions not allowed"); rd_kafka_admin_common_worker_destroy(rk, rko_fanout, - rd_true/*destroy*/); + rd_true /*destroy*/); return; } @@ -3539,11 +3433,9 @@ void rd_kafka_DeleteRecords (rd_kafka_t *rk, /* Async query for partition leaders */ rd_kafka_topic_partition_list_query_leaders_async( - rk, copied_offsets, - rd_kafka_admin_timeout_remains(rko_fanout), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_DeleteRecords_leaders_queried_cb, - rko_fanout); + rk, copied_offsets, rd_kafka_admin_timeout_remains(rko_fanout), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_DeleteRecords_leaders_queried_cb, rko_fanout); } @@ -3552,23 +3444,22 @@ void rd_kafka_DeleteRecords (rd_kafka_t *rk, * * The returned \p offsets life-time is the same as the \p result object. */ -const rd_kafka_topic_partition_list_t * -rd_kafka_DeleteRecords_result_offsets ( - const rd_kafka_DeleteRecords_result_t *result) { +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets( + const rd_kafka_DeleteRecords_result_t *result) { const rd_kafka_topic_partition_list_t *offsets; - const rd_kafka_op_t *rko = (const rd_kafka_op_t *) result; + const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result; size_t cnt; rd_kafka_op_type_t reqtype = - rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; rd_assert(reqtype == RD_KAFKA_OP_DELETERECORDS); cnt = rd_list_cnt(&rko->rko_u.admin_result.results); rd_assert(cnt == 1); - offsets = (const rd_kafka_topic_partition_list_t *) - rd_list_elem(&rko->rko_u.admin_result.results, 0); + offsets = (const rd_kafka_topic_partition_list_t *)rd_list_elem( + &rko->rko_u.admin_result.results, 0); rd_assert(offsets); @@ -3586,37 +3477,37 @@ rd_kafka_DeleteRecords_result_offsets ( * */ -rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new (const char *group) { +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group) { size_t tsize = strlen(group) + 1; rd_kafka_DeleteGroup_t *del_group; /* Single allocation */ - del_group = rd_malloc(sizeof(*del_group) + tsize); + del_group = rd_malloc(sizeof(*del_group) + tsize); del_group->group = del_group->data; memcpy(del_group->group, group, tsize); return del_group; } -void rd_kafka_DeleteGroup_destroy (rd_kafka_DeleteGroup_t *del_group) { +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group) { rd_free(del_group); } -static void rd_kafka_DeleteGroup_free (void *ptr) { +static void rd_kafka_DeleteGroup_free(void *ptr) { rd_kafka_DeleteGroup_destroy(ptr); } -void rd_kafka_DeleteGroup_destroy_array (rd_kafka_DeleteGroup_t **del_groups, - size_t del_group_cnt) { +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt) { size_t i; - for (i = 0 ; i < del_group_cnt ; i++) + for (i = 0; i < del_group_cnt; i++) rd_kafka_DeleteGroup_destroy(del_groups[i]); } /** * @brief Group name comparator for DeleteGroup_t */ -static int rd_kafka_DeleteGroup_cmp (const void *_a, const void *_b) { +static int rd_kafka_DeleteGroup_cmp(const void *_a, const void *_b) { const rd_kafka_DeleteGroup_t *a = _a, *b = _b; return strcmp(a->group, b->group); } @@ -3625,7 +3516,7 @@ static int rd_kafka_DeleteGroup_cmp (const void *_a, const void *_b) { * @brief Allocate a new DeleteGroup and make a copy of \p src */ static rd_kafka_DeleteGroup_t * -rd_kafka_DeleteGroup_copy (const rd_kafka_DeleteGroup_t *src) { +rd_kafka_DeleteGroup_copy(const rd_kafka_DeleteGroup_t *src) { return rd_kafka_DeleteGroup_new(src->group); } @@ -3634,10 +3525,11 @@ rd_kafka_DeleteGroup_copy (const rd_kafka_DeleteGroup_t *src) { * @brief Parse DeleteGroupsResponse and create ADMIN_RESULT op. */ static rd_kafka_resp_err_t -rd_kafka_DeleteGroupsResponse_parse (rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, size_t errstr_size) { +rd_kafka_DeleteGroupsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { const int log_decode_errors = LOG_ERR; int32_t group_cnt; int i; @@ -3650,17 +3542,17 @@ rd_kafka_DeleteGroupsResponse_parse (rd_kafka_op_t *rko_req, if (group_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) rd_kafka_buf_parse_fail( - reply, - "Received %"PRId32" groups in response " - "when only %d were requested", group_cnt, - rd_list_cnt(&rko_req->rko_u.admin_request.args)); + reply, + "Received %" PRId32 + " groups in response " + "when only %d were requested", + group_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args)); rko_result = rd_kafka_admin_result_new(rko_req); - rd_list_init(&rko_result->rko_u.admin_result.results, - group_cnt, + rd_list_init(&rko_result->rko_u.admin_result.results, group_cnt, rd_kafka_group_result_free); - for (i = 0 ; i < (int)group_cnt ; i++) { + for (i = 0; i < (int)group_cnt; i++) { rd_kafkap_str_t kgroup; int16_t error_code; rd_kafka_group_result_t *groupres; @@ -3669,11 +3561,8 @@ rd_kafka_DeleteGroupsResponse_parse (rd_kafka_op_t *rko_req, rd_kafka_buf_read_i16(reply, &error_code); groupres = rd_kafka_group_result_new( - kgroup.str, - RD_KAFKAP_STR_LEN(&kgroup), - NULL, - error_code ? - rd_kafka_error_new(error_code, NULL) : NULL); + kgroup.str, RD_KAFKAP_STR_LEN(&kgroup), NULL, + error_code ? rd_kafka_error_new(error_code, NULL) : NULL); rd_list_add(&rko_result->rko_u.admin_result.results, groupres); } @@ -3695,12 +3584,12 @@ rd_kafka_DeleteGroupsResponse_parse (rd_kafka_op_t *rko_req, /** @brief Merge the DeleteGroups response from a single broker * into the user response list. */ -void rd_kafka_DeleteGroups_response_merge (rd_kafka_op_t *rko_fanout, - const rd_kafka_op_t *rko_partial) { +void rd_kafka_DeleteGroups_response_merge(rd_kafka_op_t *rko_fanout, + const rd_kafka_op_t *rko_partial) { const rd_kafka_group_result_t *groupres = NULL; rd_kafka_group_result_t *newgroupres; const rd_kafka_DeleteGroup_t *grp = - rko_partial->rko_u.admin_result.opaque; + rko_partial->rko_u.admin_result.opaque; int orig_pos; rd_assert(rko_partial->rko_evtype == @@ -3709,66 +3598,63 @@ void rd_kafka_DeleteGroups_response_merge (rd_kafka_op_t *rko_fanout, if (!rko_partial->rko_err) { /* Proper results. * We only send one group per request, make sure it matches */ - groupres = rd_list_elem(&rko_partial->rko_u.admin_result. - results, 0); + groupres = + rd_list_elem(&rko_partial->rko_u.admin_result.results, 0); rd_assert(groupres); rd_assert(!strcmp(groupres->group, grp->group)); newgroupres = rd_kafka_group_result_copy(groupres); } else { /* Op errored, e.g. timeout */ newgroupres = rd_kafka_group_result_new( - grp->group, -1, NULL, - rd_kafka_error_new(rko_partial->rko_err, NULL)); + grp->group, -1, NULL, + rd_kafka_error_new(rko_partial->rko_err, NULL)); } /* As a convenience to the application we insert group result * in the same order as they were requested. */ - orig_pos = rd_list_index(&rko_fanout->rko_u.admin_request.args, - grp, rd_kafka_DeleteGroup_cmp); + orig_pos = rd_list_index(&rko_fanout->rko_u.admin_request.args, grp, + rd_kafka_DeleteGroup_cmp); rd_assert(orig_pos != -1); /* Make sure result is not already set */ - rd_assert(rd_list_elem(&rko_fanout->rko_u.admin_request. - fanout.results, orig_pos) == NULL); + rd_assert(rd_list_elem(&rko_fanout->rko_u.admin_request.fanout.results, + orig_pos) == NULL); - rd_list_set(&rko_fanout->rko_u.admin_request.fanout.results, - orig_pos, newgroupres); + rd_list_set(&rko_fanout->rko_u.admin_request.fanout.results, orig_pos, + newgroupres); } -void rd_kafka_DeleteGroups (rd_kafka_t *rk, - rd_kafka_DeleteGroup_t **del_groups, - size_t del_group_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { +void rd_kafka_DeleteGroups(rd_kafka_t *rk, + rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { rd_kafka_op_t *rko_fanout; rd_list_t dup_list; size_t i; static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = { - rd_kafka_DeleteGroups_response_merge, - rd_kafka_group_result_copy_opaque, + rd_kafka_DeleteGroups_response_merge, + rd_kafka_group_result_copy_opaque, }; rd_assert(rkqu); rko_fanout = rd_kafka_admin_fanout_op_new( - rk, - RD_KAFKA_OP_DELETEGROUPS, - RD_KAFKA_EVENT_DELETEGROUPS_RESULT, - &fanout_cbs, options, rkqu->rkqu_q); + rk, RD_KAFKA_OP_DELETEGROUPS, RD_KAFKA_EVENT_DELETEGROUPS_RESULT, + &fanout_cbs, options, rkqu->rkqu_q); if (del_group_cnt == 0) { rd_kafka_admin_result_fail(rko_fanout, RD_KAFKA_RESP_ERR__INVALID_ARG, "No groups to delete"); rd_kafka_admin_common_worker_destroy(rk, rko_fanout, - rd_true/*destroy*/); + rd_true /*destroy*/); return; } /* Copy group list and store it on the request op. * Maintain original ordering. */ - rd_list_init(&rko_fanout->rko_u.admin_request.args, - (int)del_group_cnt, + rd_list_init(&rko_fanout->rko_u.admin_request.args, (int)del_group_cnt, rd_kafka_DeleteGroup_free); for (i = 0; i < del_group_cnt; i++) rd_list_add(&rko_fanout->rko_u.admin_request.args, @@ -3779,11 +3665,9 @@ void rd_kafka_DeleteGroups (rd_kafka_t *rk, * duplicates, we don't want the original list sorted since we want * to maintain ordering. */ rd_list_init(&dup_list, - rd_list_cnt(&rko_fanout->rko_u.admin_request.args), - NULL); - rd_list_copy_to(&dup_list, - &rko_fanout->rko_u.admin_request.args, - NULL, NULL); + rd_list_cnt(&rko_fanout->rko_u.admin_request.args), NULL); + rd_list_copy_to(&dup_list, &rko_fanout->rko_u.admin_request.args, NULL, + NULL); rd_list_sort(&dup_list, rd_kafka_DeleteGroup_cmp); if (rd_list_find_duplicate(&dup_list, rd_kafka_DeleteGroup_cmp)) { rd_list_destroy(&dup_list); @@ -3791,7 +3675,7 @@ void rd_kafka_DeleteGroups (rd_kafka_t *rk, RD_KAFKA_RESP_ERR__INVALID_ARG, "Duplicate groups not allowed"); rd_kafka_admin_common_worker_destroy(rk, rko_fanout, - rd_true/*destroy*/); + rd_true /*destroy*/); return; } @@ -3800,8 +3684,7 @@ void rd_kafka_DeleteGroups (rd_kafka_t *rk, /* Prepare results list where fanned out op's results will be * accumulated. */ rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results, - (int)del_group_cnt, - rd_kafka_group_result_free); + (int)del_group_cnt, rd_kafka_group_result_free); rko_fanout->rko_u.admin_request.fanout.outstanding = (int)del_group_cnt; /* Create individual request ops for each group. @@ -3809,32 +3692,28 @@ void rd_kafka_DeleteGroups (rd_kafka_t *rk, * coordinator into one op. */ for (i = 0; i < del_group_cnt; i++) { static const struct rd_kafka_admin_worker_cbs cbs = { - rd_kafka_DeleteGroupsRequest, - rd_kafka_DeleteGroupsResponse_parse, + rd_kafka_DeleteGroupsRequest, + rd_kafka_DeleteGroupsResponse_parse, }; - rd_kafka_DeleteGroup_t *grp = rd_list_elem( - &rko_fanout->rko_u.admin_request.args, (int)i); - rd_kafka_op_t *rko = - rd_kafka_admin_request_op_new( - rk, - RD_KAFKA_OP_DELETEGROUPS, - RD_KAFKA_EVENT_DELETEGROUPS_RESULT, - &cbs, - options, - rk->rk_ops); + rd_kafka_DeleteGroup_t *grp = + rd_list_elem(&rko_fanout->rko_u.admin_request.args, (int)i); + rd_kafka_op_t *rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_DELETEGROUPS, + RD_KAFKA_EVENT_DELETEGROUPS_RESULT, &cbs, options, + rk->rk_ops); rko->rko_u.admin_request.fanout_parent = rko_fanout; rko->rko_u.admin_request.broker_id = - RD_KAFKA_ADMIN_TARGET_COORDINATOR; + RD_KAFKA_ADMIN_TARGET_COORDINATOR; rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP; - rko->rko_u.admin_request.coordkey = rd_strdup(grp->group); + rko->rko_u.admin_request.coordkey = rd_strdup(grp->group); /* Set the group name as the opaque so the fanout worker use it * to fill in errors. * References rko_fanout's memory, which will always outlive * the fanned out op. */ rd_kafka_AdminOptions_set_opaque( - &rko->rko_u.admin_request.options, grp); + &rko->rko_u.admin_request.options, grp); rd_list_init(&rko->rko_u.admin_request.args, 1, rd_kafka_DeleteGroup_free); @@ -3852,10 +3731,9 @@ void rd_kafka_DeleteGroups (rd_kafka_t *rk, * The returned \p groups life-time is the same as the \p result object. * @param cntp is updated to the number of elements in the array. */ -const rd_kafka_group_result_t ** -rd_kafka_DeleteGroups_result_groups ( - const rd_kafka_DeleteGroups_result_t *result, - size_t *cntp) { +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups( + const rd_kafka_DeleteGroups_result_t *result, + size_t *cntp) { return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result, cntp); } @@ -3873,40 +3751,39 @@ rd_kafka_DeleteGroups_result_groups ( * */ -rd_kafka_DeleteConsumerGroupOffsets_t * -rd_kafka_DeleteConsumerGroupOffsets_new (const char *group, - const rd_kafka_topic_partition_list_t - *partitions) { +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new( + const char *group, + const rd_kafka_topic_partition_list_t *partitions) { size_t tsize = strlen(group) + 1; rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets; rd_assert(partitions); /* Single allocation */ - del_grpoffsets = rd_malloc(sizeof(*del_grpoffsets) + tsize); + del_grpoffsets = rd_malloc(sizeof(*del_grpoffsets) + tsize); del_grpoffsets->group = del_grpoffsets->data; memcpy(del_grpoffsets->group, group, tsize); del_grpoffsets->partitions = - rd_kafka_topic_partition_list_copy(partitions); + rd_kafka_topic_partition_list_copy(partitions); return del_grpoffsets; } -void rd_kafka_DeleteConsumerGroupOffsets_destroy ( - rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets) { +void rd_kafka_DeleteConsumerGroupOffsets_destroy( + rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets) { rd_kafka_topic_partition_list_destroy(del_grpoffsets->partitions); rd_free(del_grpoffsets); } -static void rd_kafka_DeleteConsumerGroupOffsets_free (void *ptr) { +static void rd_kafka_DeleteConsumerGroupOffsets_free(void *ptr) { rd_kafka_DeleteConsumerGroupOffsets_destroy(ptr); } -void rd_kafka_DeleteConsumerGroupOffsets_destroy_array ( - rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, - size_t del_grpoffsets_cnt) { +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array( + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffsets_cnt) { size_t i; - for (i = 0 ; i < del_grpoffsets_cnt ; i++) + for (i = 0; i < del_grpoffsets_cnt; i++) rd_kafka_DeleteConsumerGroupOffsets_destroy(del_grpoffsets[i]); } @@ -3915,8 +3792,8 @@ void rd_kafka_DeleteConsumerGroupOffsets_destroy_array ( * @brief Allocate a new DeleteGroup and make a copy of \p src */ static rd_kafka_DeleteConsumerGroupOffsets_t * -rd_kafka_DeleteConsumerGroupOffsets_copy ( - const rd_kafka_DeleteConsumerGroupOffsets_t *src) { +rd_kafka_DeleteConsumerGroupOffsets_copy( + const rd_kafka_DeleteConsumerGroupOffsets_t *src) { return rd_kafka_DeleteConsumerGroupOffsets_new(src->group, src->partitions); } @@ -3926,10 +3803,11 @@ rd_kafka_DeleteConsumerGroupOffsets_copy ( * @brief Parse OffsetDeleteResponse and create ADMIN_RESULT op. */ static rd_kafka_resp_err_t -rd_kafka_OffsetDeleteResponse_parse (rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, size_t errstr_size) { +rd_kafka_OffsetDeleteResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { const int log_decode_errors = LOG_ERR; rd_kafka_op_t *rko_result; int16_t ErrorCode; @@ -3946,10 +3824,8 @@ rd_kafka_OffsetDeleteResponse_parse (rd_kafka_op_t *rko_req, rd_kafka_buf_read_throttle_time(reply); - partitions = rd_kafka_buf_read_topic_partitions(reply, - 16, - rd_false/*no offset */, - rd_true/*read error*/); + partitions = rd_kafka_buf_read_topic_partitions( + reply, 16, rd_false /*no offset */, rd_true /*read error*/); if (!partitions) { rd_snprintf(errstr, errstr_size, "Failed to parse OffsetDeleteResponse partitions"); @@ -3958,7 +3834,7 @@ rd_kafka_OffsetDeleteResponse_parse (rd_kafka_op_t *rko_req, /* Create result op and group_result_t */ - rko_result = rd_kafka_admin_result_new(rko_req); + rko_result = rd_kafka_admin_result_new(rko_req); del_grpoffsets = rd_list_elem(&rko_result->rko_u.admin_result.args, 0); rd_list_init(&rko_result->rko_u.admin_result.results, 1, @@ -3972,7 +3848,7 @@ rd_kafka_OffsetDeleteResponse_parse (rd_kafka_op_t *rko_req, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: rd_snprintf(errstr, errstr_size, "OffsetDelete response protocol parse failure: %s", rd_kafka_err2str(reply->rkbuf_err)); @@ -3980,52 +3856,48 @@ rd_kafka_OffsetDeleteResponse_parse (rd_kafka_op_t *rko_req, } -void rd_kafka_DeleteConsumerGroupOffsets ( - rd_kafka_t *rk, - rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, - size_t del_grpoffsets_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { +void rd_kafka_DeleteConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { static const struct rd_kafka_admin_worker_cbs cbs = { - rd_kafka_OffsetDeleteRequest, - rd_kafka_OffsetDeleteResponse_parse, + rd_kafka_OffsetDeleteRequest, + rd_kafka_OffsetDeleteResponse_parse, }; rd_kafka_op_t *rko; rd_assert(rkqu); rko = rd_kafka_admin_request_op_new( - rk, - RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS, - RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT, - &cbs, options, rkqu->rkqu_q); + rk, RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS, + RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT, &cbs, options, + rkqu->rkqu_q); if (del_grpoffsets_cnt != 1) { /* For simplicity we only support one single group for now */ - rd_kafka_admin_result_fail(rko, - RD_KAFKA_RESP_ERR__INVALID_ARG, + rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG, "Exactly one " "DeleteConsumerGroupOffsets must " "be passed"); rd_kafka_admin_common_worker_destroy(rk, rko, - rd_true/*destroy*/); + rd_true /*destroy*/); return; } - rko->rko_u.admin_request.broker_id = - RD_KAFKA_ADMIN_TARGET_COORDINATOR; + rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_COORDINATOR; rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP; - rko->rko_u.admin_request.coordkey = - rd_strdup(del_grpoffsets[0]->group); + rko->rko_u.admin_request.coordkey = rd_strdup(del_grpoffsets[0]->group); /* Store copy of group on request so the group name can be reached * from the response parser. */ rd_list_init(&rko->rko_u.admin_request.args, 1, rd_kafka_DeleteConsumerGroupOffsets_free); - rd_list_add(&rko->rko_u.admin_request.args, - rd_kafka_DeleteConsumerGroupOffsets_copy( - del_grpoffsets[0])); + rd_list_add( + &rko->rko_u.admin_request.args, + rd_kafka_DeleteConsumerGroupOffsets_copy(del_grpoffsets[0])); rd_kafka_q_enq(rk->rk_ops, rko); } @@ -4038,19 +3910,19 @@ void rd_kafka_DeleteConsumerGroupOffsets ( * @param cntp is updated to the number of elements in the array. */ const rd_kafka_group_result_t ** -rd_kafka_DeleteConsumerGroupOffsets_result_groups ( - const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, - size_t *cntp) { +rd_kafka_DeleteConsumerGroupOffsets_result_groups( + const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, + size_t *cntp) { return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result, cntp); } RD_EXPORT -void rd_kafka_DeleteConsumerGroupOffsets ( - rd_kafka_t *rk, - rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, - size_t del_grpoffsets_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +void rd_kafka_DeleteConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); /**@}*/ diff --git a/src/rdkafka_admin.h b/src/rdkafka_admin.h index bfbb2e262f..36a6b6f448 100644 --- a/src/rdkafka_admin.h +++ b/src/rdkafka_admin.h @@ -42,17 +42,17 @@ * to make sure it is copied properly. */ struct rd_kafka_AdminOptions_s { - rd_kafka_admin_op_t for_api; /**< Limit allowed options to - * this API (optional) */ + rd_kafka_admin_op_t for_api; /**< Limit allowed options to + * this API (optional) */ /* Generic */ - rd_kafka_confval_t request_timeout;/**< I32: Full request timeout, - * includes looking up leader - * broker, - * waiting for req/response, - * etc. */ - rd_ts_t abs_timeout; /**< Absolute timeout calculated - * from .timeout */ + rd_kafka_confval_t request_timeout; /**< I32: Full request timeout, + * includes looking up leader + * broker, + * waiting for req/response, + * etc. */ + rd_ts_t abs_timeout; /**< Absolute timeout calculated + * from .timeout */ /* Specific for one or more APIs */ rd_kafka_confval_t operation_timeout; /**< I32: Timeout on broker. @@ -62,30 +62,30 @@ struct rd_kafka_AdminOptions_s { * DeleteRecords * DeleteTopics */ - rd_kafka_confval_t validate_only; /**< BOOL: Only validate (on broker), - * but don't perform action. - * Valid for: - * CreateTopics - * CreatePartitions - * AlterConfigs - */ - - rd_kafka_confval_t incremental; /**< BOOL: Incremental rather than - * absolute application - * of config. - * Valid for: - * AlterConfigs - */ - - rd_kafka_confval_t broker; /**< INT: Explicitly override - * broker id to send - * requests to. - * Valid for: - * all - */ - - rd_kafka_confval_t opaque; /**< PTR: Application opaque. - * Valid for all. */ + rd_kafka_confval_t validate_only; /**< BOOL: Only validate (on broker), + * but don't perform action. + * Valid for: + * CreateTopics + * CreatePartitions + * AlterConfigs + */ + + rd_kafka_confval_t incremental; /**< BOOL: Incremental rather than + * absolute application + * of config. + * Valid for: + * AlterConfigs + */ + + rd_kafka_confval_t broker; /**< INT: Explicitly override + * broker id to send + * requests to. + * Valid for: + * all + */ + + rd_kafka_confval_t opaque; /**< PTR: Application opaque. + * Valid for all. */ }; @@ -104,11 +104,11 @@ struct rd_kafka_NewTopic_s { int replication_factor; /**< Replication factor */ /* Optional */ - rd_list_t replicas; /**< Type (rd_list_t (int32_t)): - * Array of replica lists indexed by - * partition, size num_partitions. */ - rd_list_t config; /**< Type (rd_kafka_ConfigEntry_t *): - * List of configuration entries */ + rd_list_t replicas; /**< Type (rd_list_t (int32_t)): + * Array of replica lists indexed by + * partition, size num_partitions. */ + rd_list_t config; /**< Type (rd_kafka_ConfigEntry_t *): + * List of configuration entries */ }; /**@}*/ @@ -123,13 +123,13 @@ struct rd_kafka_NewTopic_s { * @brief DeleteTopics result */ struct rd_kafka_DeleteTopics_result_s { - rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */ + rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */ }; struct rd_kafka_DeleteTopic_s { - char *topic; /**< Points to data */ - char data[1]; /**< The topic name is allocated along with - * the struct here. */ + char *topic; /**< Points to data */ + char data[1]; /**< The topic name is allocated along with + * the struct here. */ }; /**@}*/ @@ -146,7 +146,7 @@ struct rd_kafka_DeleteTopic_s { * @brief CreatePartitions result */ struct rd_kafka_CreatePartitions_result_s { - rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */ + rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */ }; struct rd_kafka_NewPartitions_s { @@ -154,15 +154,15 @@ struct rd_kafka_NewPartitions_s { size_t total_cnt; /**< New total partition count */ /* Optional */ - rd_list_t replicas; /**< Type (rd_list_t (int32_t)): - * Array of replica lists indexed by - * new partition relative index. - * Size is dynamic since we don't - * know how many partitions are actually - * being added by total_cnt */ - - char data[1]; /**< The topic name is allocated along with - * the struct here. */ + rd_list_t replicas; /**< Type (rd_list_t (int32_t)): + * Array of replica lists indexed by + * new partition relative index. + * Size is dynamic since we don't + * know how many partitions are actually + * being added by total_cnt */ + + char data[1]; /**< The topic name is allocated along with + * the struct here. */ }; /**@}*/ @@ -176,27 +176,27 @@ struct rd_kafka_NewPartitions_s { /* KIP-248 */ typedef enum rd_kafka_AlterOperation_t { - RD_KAFKA_ALTER_OP_ADD = 0, - RD_KAFKA_ALTER_OP_SET = 1, + RD_KAFKA_ALTER_OP_ADD = 0, + RD_KAFKA_ALTER_OP_SET = 1, RD_KAFKA_ALTER_OP_DELETE = 2, } rd_kafka_AlterOperation_t; struct rd_kafka_ConfigEntry_s { - rd_strtup_t *kv; /**< Name/Value pair */ + rd_strtup_t *kv; /**< Name/Value pair */ /* Response */ /* Attributes: this is a struct for easy copying */ struct { rd_kafka_AlterOperation_t operation; /**< Operation */ - rd_kafka_ConfigSource_t source; /**< Config source */ - rd_bool_t is_readonly; /**< Value is read-only (on broker) */ - rd_bool_t is_default; /**< Value is at its default */ - rd_bool_t is_sensitive; /**< Value is sensitive */ - rd_bool_t is_synonym; /**< Value is synonym */ + rd_kafka_ConfigSource_t source; /**< Config source */ + rd_bool_t is_readonly; /**< Value is read-only (on broker) */ + rd_bool_t is_default; /**< Value is at its default */ + rd_bool_t is_sensitive; /**< Value is sensitive */ + rd_bool_t is_synonym; /**< Value is synonym */ } a; - rd_list_t synonyms; /**< Type (rd_kafka_configEntry *) */ + rd_list_t synonyms; /**< Type (rd_kafka_configEntry *) */ }; /** @@ -214,16 +214,15 @@ struct rd_kafka_ConfigResource_s { * List of config props */ /* Response */ - rd_kafka_resp_err_t err; /**< Response error code */ - char *errstr; /**< Response error string */ + rd_kafka_resp_err_t err; /**< Response error code */ + char *errstr; /**< Response error string */ - char data[1]; /**< The name is allocated along with - * the struct here. */ + char data[1]; /**< The name is allocated along with + * the struct here. */ }; - /**@}*/ /** @@ -233,15 +232,14 @@ struct rd_kafka_ConfigResource_s { - struct rd_kafka_AlterConfigs_result_s { - rd_list_t resources; /**< Type (rd_kafka_ConfigResource_t *) */ + rd_list_t resources; /**< Type (rd_kafka_ConfigResource_t *) */ }; struct rd_kafka_ConfigResource_result_s { - rd_list_t resources; /**< Type (struct rd_kafka_ConfigResource *): - * List of config resources, sans config - * but with response error values. */ + rd_list_t resources; /**< Type (struct rd_kafka_ConfigResource *): + * List of config resources, sans config + * but with response error values. */ }; /**@}*/ @@ -254,7 +252,7 @@ struct rd_kafka_ConfigResource_result_s { */ struct rd_kafka_DescribeConfigs_result_s { - rd_list_t configs; /**< Type (rd_kafka_ConfigResource_t *) */ + rd_list_t configs; /**< Type (rd_kafka_ConfigResource_t *) */ }; /**@}*/ @@ -267,9 +265,9 @@ struct rd_kafka_DescribeConfigs_result_s { struct rd_kafka_DeleteGroup_s { - char *group; /**< Points to data */ - char data[1]; /**< The group name is allocated along with - * the struct here. */ + char *group; /**< Points to data */ + char data[1]; /**< The group name is allocated along with + * the struct here. */ }; /**@}*/ @@ -296,14 +294,14 @@ struct rd_kafka_DeleteRecords_s { * @brief DeleteConsumerGroupOffsets result */ struct rd_kafka_DeleteConsumerGroupOffsets_result_s { - rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */ + rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */ }; struct rd_kafka_DeleteConsumerGroupOffsets_s { - char *group; /**< Points to data */ + char *group; /**< Points to data */ rd_kafka_topic_partition_list_t *partitions; - char data[1]; /**< The group name is allocated along with - * the struct here. */ + char data[1]; /**< The group name is allocated along with + * the struct here. */ }; /**@}*/ diff --git a/src/rdkafka_assignment.c b/src/rdkafka_assignment.c index 85bd898b74..dbb2eee70f 100644 --- a/src/rdkafka_assignment.c +++ b/src/rdkafka_assignment.c @@ -99,27 +99,23 @@ #include "rdkafka_request.h" -static void rd_kafka_assignment_dump (rd_kafka_t *rk) { +static void rd_kafka_assignment_dump(rd_kafka_t *rk) { rd_kafka_dbg(rk, CGRP, "DUMP", "Assignment dump (started_cnt=%d, wait_stop_cnt=%d)", rk->rk_consumer.assignment.started_cnt, rk->rk_consumer.assignment.wait_stop_cnt); - rd_kafka_topic_partition_list_log( - rk, "DUMP_ALL", RD_KAFKA_DBG_CGRP, - rk->rk_consumer.assignment.all); + rd_kafka_topic_partition_list_log(rk, "DUMP_ALL", RD_KAFKA_DBG_CGRP, + rk->rk_consumer.assignment.all); - rd_kafka_topic_partition_list_log( - rk, "DUMP_PND", RD_KAFKA_DBG_CGRP, - rk->rk_consumer.assignment.pending); + rd_kafka_topic_partition_list_log(rk, "DUMP_PND", RD_KAFKA_DBG_CGRP, + rk->rk_consumer.assignment.pending); - rd_kafka_topic_partition_list_log( - rk, "DUMP_QRY", RD_KAFKA_DBG_CGRP, - rk->rk_consumer.assignment.queried); + rd_kafka_topic_partition_list_log(rk, "DUMP_QRY", RD_KAFKA_DBG_CGRP, + rk->rk_consumer.assignment.queried); - rd_kafka_topic_partition_list_log( - rk, "DUMP_REM", RD_KAFKA_DBG_CGRP, - rk->rk_consumer.assignment.removed); + rd_kafka_topic_partition_list_log(rk, "DUMP_REM", RD_KAFKA_DBG_CGRP, + rk->rk_consumer.assignment.removed); } /** @@ -133,20 +129,21 @@ static void rd_kafka_assignment_dump (rd_kafka_t *rk) { * Called from the FetchOffsets response handler below. */ static void -rd_kafka_assignment_apply_offsets (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *offsets, - rd_kafka_resp_err_t err) { +rd_kafka_assignment_apply_offsets(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err) { rd_kafka_topic_partition_t *rktpar; RD_KAFKA_TPLIST_FOREACH(rktpar, offsets) { rd_kafka_toppar_t *rktp = rktpar->_private; /* May be NULL */ if (!rd_kafka_topic_partition_list_del( - rk->rk_consumer.assignment.queried, - rktpar->topic, rktpar->partition)) { + rk->rk_consumer.assignment.queried, rktpar->topic, + rktpar->partition)) { rd_kafka_dbg(rk, CGRP, "OFFSETFETCH", "Ignoring OffsetFetch " - "response for %s [%"PRId32"] which is no " + "response for %s [%" PRId32 + "] which is no " "longer in the queried list " "(possibly unassigned?)", rktpar->topic, rktpar->partition); @@ -164,27 +161,24 @@ rd_kafka_assignment_apply_offsets (rd_kafka_t *rk, * later handling by the assignment state machine. */ rd_kafka_dbg(rk, CGRP, "OFFSETFETCH", - "Adding %s [%"PRId32"] back to pending " + "Adding %s [%" PRId32 + "] back to pending " "list because on-going transaction is " "blocking offset retrieval", - rktpar->topic, - rktpar->partition); + rktpar->topic, rktpar->partition); rd_kafka_topic_partition_list_add_copy( - rk->rk_consumer.assignment.pending, rktpar); + rk->rk_consumer.assignment.pending, rktpar); } else if (rktpar->err) { /* Partition-level error */ rd_kafka_consumer_err( - rk->rk_consumer.q, RD_KAFKA_NODEID_UA, - rktpar->err, 0, - rktpar->topic, rktp, - RD_KAFKA_OFFSET_INVALID, - "Failed to fetch committed offset for " - "group \"%s\" topic %s [%"PRId32"]: %s", - rk->rk_group_id->str, - rktpar->topic, rktpar->partition, - rd_kafka_err2str(rktpar->err)); + rk->rk_consumer.q, RD_KAFKA_NODEID_UA, rktpar->err, + 0, rktpar->topic, rktp, RD_KAFKA_OFFSET_INVALID, + "Failed to fetch committed offset for " + "group \"%s\" topic %s [%" PRId32 "]: %s", + rk->rk_group_id->str, rktpar->topic, + rktpar->partition, rd_kafka_err2str(rktpar->err)); /* The partition will not be added back to .pending * and thus only reside on .all until the application @@ -201,17 +195,16 @@ rd_kafka_assignment_apply_offsets (rd_kafka_t *rk, /* Add partition to pending list where serve() * will start the fetcher. */ rd_kafka_dbg(rk, CGRP, "OFFSETFETCH", - "Adding %s [%"PRId32"] back to pending " + "Adding %s [%" PRId32 + "] back to pending " "list with offset %s", - rktpar->topic, - rktpar->partition, + rktpar->topic, rktpar->partition, rd_kafka_offset2str(rktpar->offset)); rd_kafka_topic_partition_list_add_copy( - rk->rk_consumer.assignment.pending, rktpar); + rk->rk_consumer.assignment.pending, rktpar); } /* Do nothing for request-level errors (err is set). */ - } if (offsets->cnt > 0) @@ -228,19 +221,18 @@ rd_kafka_assignment_apply_offsets (rd_kafka_t *rk, * * @locality rdkafka main thread */ -static void -rd_kafka_assignment_handle_OffsetFetch (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_assignment_handle_OffsetFetch(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque) { rd_kafka_topic_partition_list_t *offsets = NULL; - int64_t *req_assignment_version = (int64_t *)opaque; + int64_t *req_assignment_version = (int64_t *)opaque; /* Only allow retries if there's been no change to the assignment, * otherwise rely on assignment state machine to retry. */ - rd_bool_t allow_retry = *req_assignment_version == - rk->rk_consumer.assignment.version; + rd_bool_t allow_retry = + *req_assignment_version == rk->rk_consumer.assignment.version; if (err == RD_KAFKA_RESP_ERR__DESTROY) { /* Termination, quick cleanup. */ @@ -248,11 +240,9 @@ rd_kafka_assignment_handle_OffsetFetch (rd_kafka_t *rk, return; } - err = rd_kafka_handle_OffsetFetch(rk, rkb, err, - reply, request, &offsets, - rd_true/* Update toppars */, - rd_true/* Add parts */, - allow_retry); + err = rd_kafka_handle_OffsetFetch( + rk, rkb, err, reply, request, &offsets, + rd_true /* Update toppars */, rd_true /* Add parts */, allow_retry); if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) { if (offsets) rd_kafka_topic_partition_list_destroy(offsets); @@ -268,18 +258,15 @@ rd_kafka_assignment_handle_OffsetFetch (rd_kafka_t *rk, if (!err) err = RD_KAFKA_RESP_ERR__NO_OFFSET; - rd_kafka_dbg(rk, CGRP, "OFFSET", - "Offset fetch error: %s", + rd_kafka_dbg(rk, CGRP, "OFFSET", "Offset fetch error: %s", rd_kafka_err2str(err)); - rd_kafka_consumer_err(rk->rk_consumer.q, - rd_kafka_broker_id(rkb), - err, 0, NULL, NULL, - RD_KAFKA_OFFSET_INVALID, - "Failed to fetch committed " - "offsets for partitions " - "in group \"%s\": %s", - rk->rk_group_id->str, - rd_kafka_err2str(err)); + rd_kafka_consumer_err( + rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, NULL, + NULL, RD_KAFKA_OFFSET_INVALID, + "Failed to fetch committed " + "offsets for partitions " + "in group \"%s\": %s", + rk->rk_group_id->str, rd_kafka_err2str(err)); return; } @@ -290,15 +277,12 @@ rd_kafka_assignment_handle_OffsetFetch (rd_kafka_t *rk, rd_kafka_dbg(rk, CGRP, "OFFSET", "Offset fetch error for %d partition(s): %s", offsets->cnt, rd_kafka_err2str(err)); - rd_kafka_consumer_err(rk->rk_consumer.q, - rd_kafka_broker_id(rkb), - err, 0, NULL, NULL, - RD_KAFKA_OFFSET_INVALID, - "Failed to fetch committed offsets for " - "%d partition(s) in group \"%s\": %s", - offsets->cnt, - rk->rk_group_id->str, - rd_kafka_err2str(err)); + rd_kafka_consumer_err( + rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, NULL, + NULL, RD_KAFKA_OFFSET_INVALID, + "Failed to fetch committed offsets for " + "%d partition(s) in group \"%s\": %s", + offsets->cnt, rk->rk_group_id->str, rd_kafka_err2str(err)); } /* Apply the fetched offsets to the assignment */ @@ -313,8 +297,7 @@ rd_kafka_assignment_handle_OffsetFetch (rd_kafka_t *rk, * * @returns >0 if there are removal operations in progress, else 0. */ -static int -rd_kafka_assignment_serve_removals (rd_kafka_t *rk) { +static int rd_kafka_assignment_serve_removals(rd_kafka_t *rk) { rd_kafka_topic_partition_t *rktpar; int valid_offsets = 0; @@ -327,25 +310,24 @@ rd_kafka_assignment_serve_removals (rd_kafka_t *rk) { * Outstanding OffsetFetch query results will be ignored * for partitions that are no longer on the .queried list. */ was_pending = rd_kafka_topic_partition_list_del( - rk->rk_consumer.assignment.pending, - rktpar->topic, rktpar->partition); + rk->rk_consumer.assignment.pending, rktpar->topic, + rktpar->partition); was_queried = rd_kafka_topic_partition_list_del( - rk->rk_consumer.assignment.queried, - rktpar->topic, rktpar->partition); + rk->rk_consumer.assignment.queried, rktpar->topic, + rktpar->partition); if (rktp->rktp_started) { /* Partition was started, stop the fetcher. */ rd_assert(rk->rk_consumer.assignment.started_cnt > 0); rd_kafka_toppar_op_fetch_stop( - rktp, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + rktp, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); rk->rk_consumer.assignment.wait_stop_cnt++; } /* Reset the (lib) pause flag which may have been set by * the cgrp when scheduling the rebalance callback. */ - rd_kafka_toppar_op_pause_resume(rktp, - rd_false/*resume*/, + rd_kafka_toppar_op_pause_resume(rktp, rd_false /*resume*/, RD_KAFKA_TOPPAR_F_LIB_PAUSE, RD_KAFKA_NO_REPLYQ); @@ -368,17 +350,17 @@ rd_kafka_assignment_serve_removals (rd_kafka_t *rk) { rd_kafka_toppar_unlock(rktp); rd_kafka_dbg(rk, CGRP, "REMOVE", - "Removing %s [%"PRId32"] from assignment " + "Removing %s [%" PRId32 + "] from assignment " "(started=%s, pending=%s, queried=%s, " "stored offset=%s)", rktpar->topic, rktpar->partition, RD_STR_ToF(rktp->rktp_started), - RD_STR_ToF(was_pending), - RD_STR_ToF(was_queried), + RD_STR_ToF(was_pending), RD_STR_ToF(was_queried), rd_kafka_offset2str(rktpar->offset)); } - rd_kafka_dbg(rk, CONSUMER|RD_KAFKA_DBG_CGRP, "REMOVE", + rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REMOVE", "Served %d removed partition(s), " "with %d offset(s) to commit", rk->rk_consumer.assignment.removed->cnt, valid_offsets); @@ -387,21 +369,18 @@ rd_kafka_assignment_serve_removals (rd_kafka_t *rk) { * Commit final offsets to broker for the removed partitions, * unless this is a consumer destruction with a close() call. */ if (valid_offsets > 0 && - rk->rk_conf.offset_store_method == - RD_KAFKA_OFFSET_METHOD_BROKER && - rk->rk_cgrp && - rk->rk_conf.enable_auto_commit && + rk->rk_conf.offset_store_method == RD_KAFKA_OFFSET_METHOD_BROKER && + rk->rk_cgrp && rk->rk_conf.enable_auto_commit && !rd_kafka_destroy_flags_no_consumer_close(rk)) rd_kafka_cgrp_assigned_offsets_commit( - rk->rk_cgrp, - rk->rk_consumer.assignment.removed, - rd_false /* use offsets from .removed */, - "unassigned partitions"); + rk->rk_cgrp, rk->rk_consumer.assignment.removed, + rd_false /* use offsets from .removed */, + "unassigned partitions"); rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.removed); return rk->rk_consumer.assignment.wait_stop_cnt + - rk->rk_consumer.wait_commit_cnt; + rk->rk_consumer.wait_commit_cnt; } @@ -414,8 +393,7 @@ rd_kafka_assignment_serve_removals (rd_kafka_t *rk) { * @returns >0 if there are pending operations in progress for the current * assignment, else 0. */ -static int -rd_kafka_assignment_serve_pending (rd_kafka_t *rk) { +static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) { rd_kafka_topic_partition_list_t *partitions_to_query = NULL; /* We can query committed offsets only if all of the following are true: * - We have a group coordinator. @@ -426,21 +404,20 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) { * is unassigned and then assigned again). */ rd_kafka_broker_t *coord = - rk->rk_cgrp ? rd_kafka_cgrp_get_coord(rk->rk_cgrp) : NULL; + rk->rk_cgrp ? rd_kafka_cgrp_get_coord(rk->rk_cgrp) : NULL; rd_bool_t can_query_offsets = - coord && - rk->rk_consumer.wait_commit_cnt == 0 && - rk->rk_consumer.assignment.queried->cnt == 0; + coord && rk->rk_consumer.wait_commit_cnt == 0 && + rk->rk_consumer.assignment.queried->cnt == 0; int i; if (can_query_offsets) partitions_to_query = rd_kafka_topic_partition_list_new( - rk->rk_consumer.assignment.pending->cnt); + rk->rk_consumer.assignment.pending->cnt); /* Scan the list backwards so removals are cheap (no array shuffle) */ - for (i = rk->rk_consumer.assignment.pending->cnt - 1 ; i >= 0 ; i--) { + for (i = rk->rk_consumer.assignment.pending->cnt - 1; i >= 0; i--) { rd_kafka_topic_partition_t *rktpar = - &rk->rk_consumer.assignment.pending->elems[i]; + &rk->rk_consumer.assignment.pending->elems[i]; rd_kafka_toppar_t *rktp = rktpar->_private; /* Borrow ref */ rd_assert(!rktp->rktp_started); @@ -462,25 +439,23 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) { rd_kafka_dbg(rk, CGRP, "SRVPEND", "Starting pending assigned partition " - "%s [%"PRId32"] at offset %s", + "%s [%" PRId32 "] at offset %s", rktpar->topic, rktpar->partition, rd_kafka_offset2str(rktpar->offset)); /* Reset the (lib) pause flag which may have been set by * the cgrp when scheduling the rebalance callback. */ rd_kafka_toppar_op_pause_resume( - rktp, - rd_false/*resume*/, - RD_KAFKA_TOPPAR_F_LIB_PAUSE, - RD_KAFKA_NO_REPLYQ); + rktp, rd_false /*resume*/, + RD_KAFKA_TOPPAR_F_LIB_PAUSE, RD_KAFKA_NO_REPLYQ); /* Start the fetcher */ rktp->rktp_started = rd_true; rk->rk_consumer.assignment.started_cnt++; - rd_kafka_toppar_op_fetch_start( - rktp, rktpar->offset, - rk->rk_consumer.q, RD_KAFKA_NO_REPLYQ); + rd_kafka_toppar_op_fetch_start(rktp, rktpar->offset, + rk->rk_consumer.q, + RD_KAFKA_NO_REPLYQ); } else if (can_query_offsets) { @@ -491,42 +466,44 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) { * to the group coordinator. */ rd_dassert(!rd_kafka_topic_partition_list_find( - rk->rk_consumer.assignment.queried, - rktpar->topic, rktpar->partition)); + rk->rk_consumer.assignment.queried, rktpar->topic, + rktpar->partition)); rd_kafka_topic_partition_list_add_copy( - partitions_to_query, rktpar); + partitions_to_query, rktpar); rd_kafka_topic_partition_list_add_copy( - rk->rk_consumer.assignment.queried, rktpar); + rk->rk_consumer.assignment.queried, rktpar); rd_kafka_dbg(rk, CGRP, "SRVPEND", "Querying committed offset for pending " - "assigned partition %s [%"PRId32"]", + "assigned partition %s [%" PRId32 "]", rktpar->topic, rktpar->partition); } else { - rd_kafka_dbg(rk, CGRP, "SRVPEND", - "Pending assignment partition " - "%s [%"PRId32"] can't fetch committed " - "offset yet " - "(cgrp state %s, awaiting %d commits, " - "%d partition(s) already being queried)", - rktpar->topic, rktpar->partition, - rk->rk_cgrp ? - rd_kafka_cgrp_state_names[ - rk->rk_cgrp->rkcg_state] : - "n/a", - rk->rk_consumer.wait_commit_cnt, - rk->rk_consumer.assignment.queried->cnt); + rd_kafka_dbg( + rk, CGRP, "SRVPEND", + "Pending assignment partition " + "%s [%" PRId32 + "] can't fetch committed " + "offset yet " + "(cgrp state %s, awaiting %d commits, " + "%d partition(s) already being queried)", + rktpar->topic, rktpar->partition, + rk->rk_cgrp + ? rd_kafka_cgrp_state_names[rk->rk_cgrp + ->rkcg_state] + : "n/a", + rk->rk_consumer.wait_commit_cnt, + rk->rk_consumer.assignment.queried->cnt); continue; /* Keep rktpar on pending list */ } /* Remove rktpar from the pending list */ rd_kafka_topic_partition_list_del_by_idx( - rk->rk_consumer.assignment.pending, i); + rk->rk_consumer.assignment.pending, i); } @@ -534,7 +511,7 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) { if (coord) rd_kafka_broker_destroy(coord); return rk->rk_consumer.assignment.pending->cnt + - rk->rk_consumer.assignment.queried->cnt; + rk->rk_consumer.assignment.queried->cnt; } @@ -548,14 +525,13 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) { partitions_to_query->cnt); rd_kafka_OffsetFetchRequest( - coord, - partitions_to_query, - rk->rk_conf.isolation_level == - RD_KAFKA_READ_COMMITTED/*require_stable*/, - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_assignment_handle_OffsetFetch, - /* Must be freed by handler */ - (void *)req_assignment_version); + coord, partitions_to_query, + rk->rk_conf.isolation_level == + RD_KAFKA_READ_COMMITTED /*require_stable*/, + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_assignment_handle_OffsetFetch, + /* Must be freed by handler */ + (void *)req_assignment_version); } if (coord) @@ -564,7 +540,7 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) { rd_kafka_topic_partition_list_destroy(partitions_to_query); return rk->rk_consumer.assignment.pending->cnt + - rk->rk_consumer.assignment.queried->cnt; + rk->rk_consumer.assignment.queried->cnt; } @@ -577,9 +553,9 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) { * - wait_commit_cnt reaches 0 * - partition fetcher is stopped */ -void rd_kafka_assignment_serve (rd_kafka_t *rk) { +void rd_kafka_assignment_serve(rd_kafka_t *rk) { int inp_removals = 0; - int inp_pending = 0; + int inp_pending = 0; rd_kafka_assignment_dump(rk); @@ -593,15 +569,15 @@ void rd_kafka_assignment_serve (rd_kafka_t *rk) { * to finish (since we might need the committed offsets as start * offsets). */ if (rk->rk_consumer.assignment.wait_stop_cnt == 0 && - rk->rk_consumer.wait_commit_cnt == 0 && - inp_removals == 0 && + rk->rk_consumer.wait_commit_cnt == 0 && inp_removals == 0 && rk->rk_consumer.assignment.pending->cnt > 0) inp_pending = rd_kafka_assignment_serve_pending(rk); if (inp_removals + inp_pending + - rk->rk_consumer.assignment.queried->cnt + - rk->rk_consumer.assignment.wait_stop_cnt + - rk->rk_consumer.wait_commit_cnt == 0) { + rk->rk_consumer.assignment.queried->cnt + + rk->rk_consumer.assignment.wait_stop_cnt + + rk->rk_consumer.wait_commit_cnt == + 0) { /* No assignment operations in progress, * signal assignment done back to cgrp to let it * transition to its next state if necessary. @@ -615,8 +591,7 @@ void rd_kafka_assignment_serve (rd_kafka_t *rk) { "with %d pending adds, %d offset queries, " "%d partitions awaiting stop and " "%d offset commits in progress", - rk->rk_consumer.assignment.all->cnt, - inp_pending, + rk->rk_consumer.assignment.all->cnt, inp_pending, rk->rk_consumer.assignment.queried->cnt, rk->rk_consumer.assignment.wait_stop_cnt, rk->rk_consumer.wait_commit_cnt); @@ -628,12 +603,12 @@ void rd_kafka_assignment_serve (rd_kafka_t *rk) { * @returns true if the current or previous assignment has operations in * progress, such as waiting for partition fetchers to stop. */ -rd_bool_t rd_kafka_assignment_in_progress (rd_kafka_t *rk) { +rd_bool_t rd_kafka_assignment_in_progress(rd_kafka_t *rk) { return rk->rk_consumer.wait_commit_cnt > 0 || - rk->rk_consumer.assignment.wait_stop_cnt > 0 || - rk->rk_consumer.assignment.pending->cnt > 0 || - rk->rk_consumer.assignment.queried->cnt > 0 || - rk->rk_consumer.assignment.removed->cnt > 0; + rk->rk_consumer.assignment.wait_stop_cnt > 0 || + rk->rk_consumer.assignment.pending->cnt > 0 || + rk->rk_consumer.assignment.queried->cnt > 0 || + rk->rk_consumer.assignment.removed->cnt > 0; } @@ -645,17 +620,16 @@ rd_bool_t rd_kafka_assignment_in_progress (rd_kafka_t *rk) { * * @returns the number of partitions removed. */ -int rd_kafka_assignment_clear (rd_kafka_t *rk) { +int rd_kafka_assignment_clear(rd_kafka_t *rk) { int cnt = rk->rk_consumer.assignment.all->cnt; if (cnt == 0) { - rd_kafka_dbg(rk, CONSUMER|RD_KAFKA_DBG_CGRP, - "CLEARASSIGN", + rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLEARASSIGN", "No current assignment to clear"); return 0; } - rd_kafka_dbg(rk, CONSUMER|RD_KAFKA_DBG_CGRP, "CLEARASSIGN", + rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLEARASSIGN", "Clearing current assignment of %d partition(s)", rk->rk_consumer.assignment.all->cnt); @@ -663,8 +637,7 @@ int rd_kafka_assignment_clear (rd_kafka_t *rk) { rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.queried); rd_kafka_topic_partition_list_add_list( - rk->rk_consumer.assignment.removed, - rk->rk_consumer.assignment.all); + rk->rk_consumer.assignment.removed, rk->rk_consumer.assignment.all); rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.all); rk->rk_consumer.assignment.version++; @@ -683,8 +656,8 @@ int rd_kafka_assignment_clear (rd_kafka_t *rk) { * return from this function. */ rd_kafka_error_t * -rd_kafka_assignment_add (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions) { +rd_kafka_assignment_add(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions) { rd_bool_t was_empty = rk->rk_consumer.assignment.all->cnt == 0; int i; @@ -692,10 +665,10 @@ rd_kafka_assignment_add (rd_kafka_t *rk, * invalid offsets in the input partitions. */ rd_kafka_topic_partition_list_sort(partitions, NULL, NULL); - for (i = 0 ; i < partitions->cnt ; i++) { + for (i = 0; i < partitions->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; const rd_kafka_topic_partition_t *prev = - i > 0 ? &partitions->elems[i-1] : NULL; + i > 0 ? &partitions->elems[i - 1] : NULL; if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset) && rktpar->offset != RD_KAFKA_OFFSET_BEGINNING && @@ -704,26 +677,26 @@ rd_kafka_assignment_add (rd_kafka_t *rk, rktpar->offset != RD_KAFKA_OFFSET_INVALID && rktpar->offset > RD_KAFKA_OFFSET_TAIL_BASE) return rd_kafka_error_new( - RD_KAFKA_RESP_ERR__INVALID_ARG, - "%s [%"PRId32"] has invalid start offset %" - PRId64, - rktpar->topic, rktpar->partition, - rktpar->offset); + RD_KAFKA_RESP_ERR__INVALID_ARG, + "%s [%" PRId32 + "] has invalid start offset %" PRId64, + rktpar->topic, rktpar->partition, rktpar->offset); if (prev && !rd_kafka_topic_partition_cmp(rktpar, prev)) return rd_kafka_error_new( - RD_KAFKA_RESP_ERR__INVALID_ARG, - "Duplicate %s [%"PRId32"] in input list", - rktpar->topic, rktpar->partition); + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate %s [%" PRId32 "] in input list", + rktpar->topic, rktpar->partition); if (rd_kafka_topic_partition_list_find( - rk->rk_consumer.assignment.all, - rktpar->topic, rktpar->partition)) - return rd_kafka_error_new( - RD_KAFKA_RESP_ERR__CONFLICT, - "%s [%"PRId32"] is already part of the " - "current assignment", - rktpar->topic, rktpar->partition); + rk->rk_consumer.assignment.all, rktpar->topic, + rktpar->partition)) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__CONFLICT, + "%s [%" PRId32 + "] is already part of the " + "current assignment", + rktpar->topic, + rktpar->partition); /* Translate RD_KAFKA_OFFSET_INVALID to RD_KAFKA_OFFSET_STORED, * i.e., read from committed offset, since we use INVALID @@ -737,8 +710,7 @@ rd_kafka_assignment_add (rd_kafka_t *rk, * This is to make sure the rktp stays alive while unassigning * any previous assignment in the call to * assignment_clear() below. */ - rd_kafka_topic_partition_ensure_toppar(rk, rktpar, - rd_true); + rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true); } /* Add the new list of partitions to the current assignment. @@ -747,22 +719,19 @@ rd_kafka_assignment_add (rd_kafka_t *rk, rd_kafka_topic_partition_list_add_list(rk->rk_consumer.assignment.all, partitions); if (!was_empty) - rd_kafka_topic_partition_list_sort(rk->rk_consumer. - assignment.all, - NULL, NULL); + rd_kafka_topic_partition_list_sort( + rk->rk_consumer.assignment.all, NULL, NULL); /* And add to .pending for serve_pending() to handle. */ - rd_kafka_topic_partition_list_add_list(rk->rk_consumer. - assignment.pending, - partitions); + rd_kafka_topic_partition_list_add_list( + rk->rk_consumer.assignment.pending, partitions); - rd_kafka_dbg(rk, CONSUMER|RD_KAFKA_DBG_CGRP, "ASSIGNMENT", + rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "ASSIGNMENT", "Added %d partition(s) to assignment which " "now consists of %d partition(s) where of %d are in " "pending state and %d are being queried", - partitions->cnt, - rk->rk_consumer.assignment.all->cnt, + partitions->cnt, rk->rk_consumer.assignment.all->cnt, rk->rk_consumer.assignment.pending->cnt, rk->rk_consumer.assignment.queried->cnt); @@ -782,35 +751,35 @@ rd_kafka_assignment_add (rd_kafka_t *rk, * return from this function. */ rd_kafka_error_t * -rd_kafka_assignment_subtract (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions) { +rd_kafka_assignment_subtract(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions) { int i; int matched_queried_partitions = 0; int assignment_pre_cnt; if (rk->rk_consumer.assignment.all->cnt == 0 && partitions->cnt > 0) return rd_kafka_error_new( - RD_KAFKA_RESP_ERR__INVALID_ARG, - "Can't subtract from empty assignment"); + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Can't subtract from empty assignment"); /* Verify that all partitions in \p partitions are in the assignment * before starting to modify the assignment. */ rd_kafka_topic_partition_list_sort(partitions, NULL, NULL); - for (i = 0 ; i < partitions->cnt ; i++) { + for (i = 0; i < partitions->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; if (!rd_kafka_topic_partition_list_find( - rk->rk_consumer.assignment.all, - rktpar->topic, rktpar->partition)) + rk->rk_consumer.assignment.all, rktpar->topic, + rktpar->partition)) return rd_kafka_error_new( - RD_KAFKA_RESP_ERR__INVALID_ARG, - "%s [%"PRId32"] can't be unassigned since " - "it is not in the current assignment", - rktpar->topic, rktpar->partition); + RD_KAFKA_RESP_ERR__INVALID_ARG, + "%s [%" PRId32 + "] can't be unassigned since " + "it is not in the current assignment", + rktpar->topic, rktpar->partition); - rd_kafka_topic_partition_ensure_toppar(rk, rktpar, - rd_true); + rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true); } @@ -819,38 +788,39 @@ rd_kafka_assignment_subtract (rd_kafka_t *rk, /* Remove partitions in reverse order to avoid excessive * array shuffling of .all. * Add the removed partitions to .pending for serve() to handle. */ - for (i = partitions->cnt-1 ; i >= 0 ; i--) { + for (i = partitions->cnt - 1; i >= 0; i--) { const rd_kafka_topic_partition_t *rktpar = - &partitions->elems[i]; + &partitions->elems[i]; if (!rd_kafka_topic_partition_list_del( - rk->rk_consumer.assignment.all, - rktpar->topic, rktpar->partition)) - RD_BUG("Removed partition %s [%"PRId32"] not found " + rk->rk_consumer.assignment.all, rktpar->topic, + rktpar->partition)) + RD_BUG("Removed partition %s [%" PRId32 + "] not found " "in assignment.all", rktpar->topic, rktpar->partition); if (rd_kafka_topic_partition_list_del( - rk->rk_consumer.assignment.queried, - rktpar->topic, rktpar->partition)) + rk->rk_consumer.assignment.queried, rktpar->topic, + rktpar->partition)) matched_queried_partitions++; else rd_kafka_topic_partition_list_del( - rk->rk_consumer.assignment.pending, - rktpar->topic, rktpar->partition); + rk->rk_consumer.assignment.pending, rktpar->topic, + rktpar->partition); /* Add to .removed list which will be served by * serve_removals(). */ rd_kafka_topic_partition_list_add_copy( - rk->rk_consumer.assignment.removed, rktpar); + rk->rk_consumer.assignment.removed, rktpar); } rd_kafka_dbg(rk, CGRP, "REMOVEASSIGN", "Removed %d partition(s) " "(%d with outstanding offset queries) from assignment " "of %d partition(s)", - partitions->cnt, - matched_queried_partitions, assignment_pre_cnt); + partitions->cnt, matched_queried_partitions, + assignment_pre_cnt); if (rk->rk_consumer.assignment.all->cnt == 0) { /* Some safe checking */ @@ -867,8 +837,8 @@ rd_kafka_assignment_subtract (rd_kafka_t *rk, /** * @brief Call when partition fetcher has stopped. */ -void rd_kafka_assignment_partition_stopped (rd_kafka_t *rk, - rd_kafka_toppar_t *rktp) { +void rd_kafka_assignment_partition_stopped(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp) { rd_assert(rk->rk_consumer.assignment.wait_stop_cnt > 0); rk->rk_consumer.assignment.wait_stop_cnt--; @@ -895,7 +865,7 @@ void rd_kafka_assignment_partition_stopped (rd_kafka_t *rk, * Partitions will be resumed by calling rd_kafka_assignment_resume() or * from either serve_removals() or serve_pending() above. */ -void rd_kafka_assignment_pause (rd_kafka_t *rk, const char *reason) { +void rd_kafka_assignment_pause(rd_kafka_t *rk, const char *reason) { if (rk->rk_consumer.assignment.all->cnt == 0) return; @@ -904,9 +874,7 @@ void rd_kafka_assignment_pause (rd_kafka_t *rk, const char *reason) { "Pausing fetchers for %d assigned partition(s): %s", rk->rk_consumer.assignment.all->cnt, reason); - rd_kafka_toppars_pause_resume(rk, - rd_true/*pause*/, - RD_ASYNC, + rd_kafka_toppars_pause_resume(rk, rd_true /*pause*/, RD_ASYNC, RD_KAFKA_TOPPAR_F_LIB_PAUSE, rk->rk_consumer.assignment.all); } @@ -915,7 +883,7 @@ void rd_kafka_assignment_pause (rd_kafka_t *rk, const char *reason) { * @brief Resume fetching of the currently assigned partitions which have * previously been paused by rd_kafka_assignment_pause(). */ -void rd_kafka_assignment_resume (rd_kafka_t *rk, const char *reason) { +void rd_kafka_assignment_resume(rd_kafka_t *rk, const char *reason) { if (rk->rk_consumer.assignment.all->cnt == 0) return; @@ -924,9 +892,7 @@ void rd_kafka_assignment_resume (rd_kafka_t *rk, const char *reason) { "Resuming fetchers for %d assigned partition(s): %s", rk->rk_consumer.assignment.all->cnt, reason); - rd_kafka_toppars_pause_resume(rk, - rd_false/*resume*/, - RD_ASYNC, + rd_kafka_toppars_pause_resume(rk, rd_false /*resume*/, RD_ASYNC, RD_KAFKA_TOPPAR_F_LIB_PAUSE, rk->rk_consumer.assignment.all); } @@ -936,30 +902,28 @@ void rd_kafka_assignment_resume (rd_kafka_t *rk, const char *reason) { /** * @brief Destroy assignment state (but not \p assignment itself) */ -void rd_kafka_assignment_destroy (rd_kafka_t *rk) { +void rd_kafka_assignment_destroy(rd_kafka_t *rk) { if (!rk->rk_consumer.assignment.all) return; /* rd_kafka_assignment_init() not called */ + rd_kafka_topic_partition_list_destroy(rk->rk_consumer.assignment.all); rd_kafka_topic_partition_list_destroy( - rk->rk_consumer.assignment.all); - rd_kafka_topic_partition_list_destroy( - rk->rk_consumer.assignment.pending); + rk->rk_consumer.assignment.pending); rd_kafka_topic_partition_list_destroy( - rk->rk_consumer.assignment.queried); + rk->rk_consumer.assignment.queried); rd_kafka_topic_partition_list_destroy( - rk->rk_consumer.assignment.removed); + rk->rk_consumer.assignment.removed); } /** * @brief Initialize the assignment struct. */ -void rd_kafka_assignment_init (rd_kafka_t *rk) { - rk->rk_consumer.assignment.all = - rd_kafka_topic_partition_list_new(100); +void rd_kafka_assignment_init(rd_kafka_t *rk) { + rk->rk_consumer.assignment.all = rd_kafka_topic_partition_list_new(100); rk->rk_consumer.assignment.pending = - rd_kafka_topic_partition_list_new(100); + rd_kafka_topic_partition_list_new(100); rk->rk_consumer.assignment.queried = - rd_kafka_topic_partition_list_new(100); + rd_kafka_topic_partition_list_new(100); rk->rk_consumer.assignment.removed = - rd_kafka_topic_partition_list_new(100); + rd_kafka_topic_partition_list_new(100); } diff --git a/src/rdkafka_assignment.h b/src/rdkafka_assignment.h index 8e5122c257..fa51bb10c3 100644 --- a/src/rdkafka_assignment.h +++ b/src/rdkafka_assignment.h @@ -54,20 +54,20 @@ typedef struct rd_kafka_assignment_s { } rd_kafka_assignment_t; -int rd_kafka_assignment_clear (rd_kafka_t *rk); +int rd_kafka_assignment_clear(rd_kafka_t *rk); rd_kafka_error_t * -rd_kafka_assignment_add (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); +rd_kafka_assignment_add(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions); rd_kafka_error_t * -rd_kafka_assignment_subtract (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); -void rd_kafka_assignment_partition_stopped (rd_kafka_t *rk, - rd_kafka_toppar_t *rktp); -void rd_kafka_assignment_pause (rd_kafka_t *rk, const char *reason); -void rd_kafka_assignment_resume (rd_kafka_t *rk, const char *reason); -void rd_kafka_assignment_serve (rd_kafka_t *rk); -rd_bool_t rd_kafka_assignment_in_progress (rd_kafka_t *rk); -void rd_kafka_assignment_destroy (rd_kafka_t *rk); -void rd_kafka_assignment_init (rd_kafka_t *rk); +rd_kafka_assignment_subtract(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions); +void rd_kafka_assignment_partition_stopped(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp); +void rd_kafka_assignment_pause(rd_kafka_t *rk, const char *reason); +void rd_kafka_assignment_resume(rd_kafka_t *rk, const char *reason); +void rd_kafka_assignment_serve(rd_kafka_t *rk); +rd_bool_t rd_kafka_assignment_in_progress(rd_kafka_t *rk); +void rd_kafka_assignment_destroy(rd_kafka_t *rk); +void rd_kafka_assignment_init(rd_kafka_t *rk); #endif /* _RDKAFKA_ASSIGNMENT_H_ */ diff --git a/src/rdkafka_assignor.c b/src/rdkafka_assignor.c index b2b7705c39..25825dcb46 100644 --- a/src/rdkafka_assignor.c +++ b/src/rdkafka_assignor.c @@ -35,7 +35,7 @@ /** * Clear out and free any memory used by the member, but not the rkgm itself. */ -void rd_kafka_group_member_clear (rd_kafka_group_member_t *rkgm) { +void rd_kafka_group_member_clear(rd_kafka_group_member_t *rkgm) { if (rkgm->rkgm_owned) rd_kafka_topic_partition_list_destroy(rkgm->rkgm_owned); @@ -66,11 +66,9 @@ void rd_kafka_group_member_clear (rd_kafka_group_member_t *rkgm) { /** * @brief Group member comparator (takes rd_kafka_group_member_t *) */ -int rd_kafka_group_member_cmp (const void *_a, const void *_b) { - const rd_kafka_group_member_t *a = - (const rd_kafka_group_member_t *)_a; - const rd_kafka_group_member_t *b = - (const rd_kafka_group_member_t *)_b; +int rd_kafka_group_member_cmp(const void *_a, const void *_b) { + const rd_kafka_group_member_t *a = (const rd_kafka_group_member_t *)_a; + const rd_kafka_group_member_t *b = (const rd_kafka_group_member_t *)_b; /* Use the group instance id to compare static group members */ if (!RD_KAFKAP_STR_IS_NULL(a->rkgm_group_instance_id) && @@ -85,37 +83,36 @@ int rd_kafka_group_member_cmp (const void *_a, const void *_b) { /** * Returns true if member subscribes to topic, else false. */ -int -rd_kafka_group_member_find_subscription (rd_kafka_t *rk, - const rd_kafka_group_member_t *rkgm, - const char *topic) { - int i; - - /* Match against member's subscription. */ - for (i = 0 ; i < rkgm->rkgm_subscription->cnt ; i++) { +int rd_kafka_group_member_find_subscription(rd_kafka_t *rk, + const rd_kafka_group_member_t *rkgm, + const char *topic) { + int i; + + /* Match against member's subscription. */ + for (i = 0; i < rkgm->rkgm_subscription->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &rkgm->rkgm_subscription->elems[i]; + &rkgm->rkgm_subscription->elems[i]; - if (rd_kafka_topic_partition_match(rk, rkgm, rktpar, - topic, NULL)) - return 1; - } + if (rd_kafka_topic_partition_match(rk, rkgm, rktpar, topic, + NULL)) + return 1; + } - return 0; + return 0; } -rd_kafkap_bytes_t * -rd_kafka_consumer_protocol_member_metadata_new ( - const rd_list_t *topics, - const void *userdata, size_t userdata_size, - const rd_kafka_topic_partition_list_t *owned_partitions) { +rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new( + const rd_list_t *topics, + const void *userdata, + size_t userdata_size, + const rd_kafka_topic_partition_list_t *owned_partitions) { rd_kafka_buf_t *rkbuf; rd_kafkap_bytes_t *kbytes; int i; - int topic_cnt = rd_list_cnt(topics); - const rd_kafka_topic_info_t *tinfo; + int topic_cnt = rd_list_cnt(topics); + const rd_kafka_topic_info_t *tinfo; size_t len; /* @@ -134,8 +131,8 @@ rd_kafka_consumer_protocol_member_metadata_new ( /* Version */ rd_kafka_buf_write_i16(rkbuf, 1); rd_kafka_buf_write_i32(rkbuf, topic_cnt); - RD_LIST_FOREACH(tinfo, topics, i) - rd_kafka_buf_write_str(rkbuf, tinfo->topic, -1); + RD_LIST_FOREACH(tinfo, topics, i) + rd_kafka_buf_write_str(rkbuf, tinfo->topic, -1); if (userdata) rd_kafka_buf_write_bytes(rkbuf, userdata, userdata_size); else /* Kafka 0.9.0.0 can't parse NULL bytes, so we provide empty, @@ -149,17 +146,15 @@ rd_kafka_consumer_protocol_member_metadata_new ( rd_kafka_buf_write_i32(rkbuf, 0); /* Topic count */ else rd_kafka_buf_write_topic_partitions( - rkbuf, - owned_partitions, - rd_false /*don't skip invalid offsets*/, - rd_false /*any offset*/, - rd_false /*don't write offsets*/, - rd_false /*don't write epoch*/, - rd_false /*don't write metadata*/); + rkbuf, owned_partitions, + rd_false /*don't skip invalid offsets*/, + rd_false /*any offset*/, rd_false /*don't write offsets*/, + rd_false /*don't write epoch*/, + rd_false /*don't write metadata*/); /* Get binary buffer and allocate a new Kafka Bytes with a copy. */ rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf); - len = rd_slice_remains(&rkbuf->rkbuf_reader); + len = rd_slice_remains(&rkbuf->rkbuf_reader); kbytes = rd_kafkap_bytes_new(NULL, (int32_t)len); rd_slice_read(&rkbuf->rkbuf_reader, (void *)kbytes->data, len); rd_kafka_buf_destroy(rkbuf); @@ -169,15 +164,13 @@ rd_kafka_consumer_protocol_member_metadata_new ( - -rd_kafkap_bytes_t * -rd_kafka_assignor_get_metadata_with_empty_userdata (const rd_kafka_assignor_t *rkas, - void *assignor_state, - const rd_list_t *topics, - const rd_kafka_topic_partition_list_t - *owned_partitions) { - return rd_kafka_consumer_protocol_member_metadata_new( - topics, NULL, 0, owned_partitions); +rd_kafkap_bytes_t *rd_kafka_assignor_get_metadata_with_empty_userdata( + const rd_kafka_assignor_t *rkas, + void *assignor_state, + const rd_list_t *topics, + const rd_kafka_topic_partition_list_t *owned_partitions) { + return rd_kafka_consumer_protocol_member_metadata_new(topics, NULL, 0, + owned_partitions); } @@ -185,30 +178,30 @@ rd_kafka_assignor_get_metadata_with_empty_userdata (const rd_kafka_assignor_t *r /** * Returns 1 if all subscriptions are satifised for this member, else 0. */ -static int rd_kafka_member_subscription_match ( - rd_kafka_cgrp_t *rkcg, - rd_kafka_group_member_t *rkgm, - const rd_kafka_metadata_topic_t *topic_metadata, - rd_kafka_assignor_topic_t *eligible_topic) { +static int rd_kafka_member_subscription_match( + rd_kafka_cgrp_t *rkcg, + rd_kafka_group_member_t *rkgm, + const rd_kafka_metadata_topic_t *topic_metadata, + rd_kafka_assignor_topic_t *eligible_topic) { int i; int has_regex = 0; - int matched = 0; + int matched = 0; /* Match against member's subscription. */ - for (i = 0 ; i < rkgm->rkgm_subscription->cnt ; i++) { + for (i = 0; i < rkgm->rkgm_subscription->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &rkgm->rkgm_subscription->elems[i]; - int matched_by_regex = 0; - - if (rd_kafka_topic_partition_match(rkcg->rkcg_rk, rkgm, rktpar, - topic_metadata->topic, - &matched_by_regex)) { - rd_list_add(&rkgm->rkgm_eligible, - (void *)topic_metadata); - matched++; - has_regex += matched_by_regex; - } - } + &rkgm->rkgm_subscription->elems[i]; + int matched_by_regex = 0; + + if (rd_kafka_topic_partition_match(rkcg->rkcg_rk, rkgm, rktpar, + topic_metadata->topic, + &matched_by_regex)) { + rd_list_add(&rkgm->rkgm_eligible, + (void *)topic_metadata); + matched++; + has_regex += matched_by_regex; + } + } if (matched) rd_list_add(&eligible_topic->members, rkgm); @@ -221,17 +214,16 @@ static int rd_kafka_member_subscription_match ( } -static void -rd_kafka_assignor_topic_destroy (rd_kafka_assignor_topic_t *at) { +static void rd_kafka_assignor_topic_destroy(rd_kafka_assignor_topic_t *at) { rd_list_destroy(&at->members); rd_free(at); } -int rd_kafka_assignor_topic_cmp (const void *_a, const void *_b) { +int rd_kafka_assignor_topic_cmp(const void *_a, const void *_b) { const rd_kafka_assignor_topic_t *a = - *(const rd_kafka_assignor_topic_t * const *)_a; + *(const rd_kafka_assignor_topic_t *const *)_a; const rd_kafka_assignor_topic_t *b = - *(const rd_kafka_assignor_topic_t * const *)_b; + *(const rd_kafka_assignor_topic_t *const *)_b; return strcmp(a->metadata->topic, b->metadata->topic); } @@ -243,11 +235,11 @@ int rd_kafka_assignor_topic_cmp (const void *_a, const void *_b) { * returned in `eligible_topics`. */ static void -rd_kafka_member_subscriptions_map (rd_kafka_cgrp_t *rkcg, - rd_list_t *eligible_topics, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - int member_cnt) { +rd_kafka_member_subscriptions_map(rd_kafka_cgrp_t *rkcg, + rd_list_t *eligible_topics, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + int member_cnt) { int ti; rd_kafka_assignor_topic_t *eligible_topic = NULL; @@ -256,16 +248,16 @@ rd_kafka_member_subscriptions_map (rd_kafka_cgrp_t *rkcg, /* For each topic in the cluster, scan through the member list * to find matching subscriptions. */ - for (ti = 0 ; ti < metadata->topic_cnt ; ti++) { + for (ti = 0; ti < metadata->topic_cnt; ti++) { int i; /* Ignore topics in blacklist */ if (rkcg->rkcg_rk->rk_conf.topic_blacklist && - rd_kafka_pattern_match(rkcg->rkcg_rk->rk_conf. - topic_blacklist, - metadata->topics[ti].topic)) { - rd_kafka_dbg(rkcg->rkcg_rk, TOPIC|RD_KAFKA_DBG_ASSIGNOR, - "BLACKLIST", + rd_kafka_pattern_match( + rkcg->rkcg_rk->rk_conf.topic_blacklist, + metadata->topics[ti].topic)) { + rd_kafka_dbg(rkcg->rkcg_rk, + TOPIC | RD_KAFKA_DBG_ASSIGNOR, "BLACKLIST", "Assignor ignoring blacklisted " "topic \"%s\"", metadata->topics[ti].topic); @@ -278,12 +270,12 @@ rd_kafka_member_subscriptions_map (rd_kafka_cgrp_t *rkcg, rd_list_init(&eligible_topic->members, member_cnt, NULL); /* For each member: scan through its topic subscription */ - for (i = 0 ; i < member_cnt ; i++) { + for (i = 0; i < member_cnt; i++) { /* Match topic against existing metadata, incl regex matching. */ rd_kafka_member_subscription_match( - rkcg, &members[i], &metadata->topics[ti], - eligible_topic); + rkcg, &members[i], &metadata->topics[ti], + eligible_topic); } if (rd_list_empty(&eligible_topic->members)) { @@ -301,13 +293,13 @@ rd_kafka_member_subscriptions_map (rd_kafka_cgrp_t *rkcg, } -rd_kafka_resp_err_t -rd_kafka_assignor_run (rd_kafka_cgrp_t *rkcg, - const rd_kafka_assignor_t *rkas, - rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - int member_cnt, - char *errstr, size_t errstr_size) { +rd_kafka_resp_err_t rd_kafka_assignor_run(rd_kafka_cgrp_t *rkcg, + const rd_kafka_assignor_t *rkas, + rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + int member_cnt, + char *errstr, + size_t errstr_size) { rd_kafka_resp_err_t err; rd_ts_t ts_start = rd_clock(); int i; @@ -321,94 +313,82 @@ rd_kafka_assignor_run (rd_kafka_cgrp_t *rkcg, if (rkcg->rkcg_rk->rk_conf.debug & - (RD_KAFKA_DBG_CGRP|RD_KAFKA_DBG_ASSIGNOR)) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_ASSIGNOR, - "ASSIGN", - "Group \"%s\" running %s assignor for " - "%d member(s) and " - "%d eligible subscribed topic(s):", - rkcg->rkcg_group_id->str, - rkas->rkas_protocol_name->str, - member_cnt, - eligible_topics.rl_cnt); - - for (i = 0 ; i < member_cnt ; i++) { + (RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_ASSIGNOR)) { + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN", + "Group \"%s\" running %s assignor for " + "%d member(s) and " + "%d eligible subscribed topic(s):", + rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str, + member_cnt, eligible_topics.rl_cnt); + + for (i = 0; i < member_cnt; i++) { const rd_kafka_group_member_t *member = &members[i]; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_ASSIGNOR, - "ASSIGN", - " Member \"%.*s\"%s with " - "%d owned partition(s) and " - "%d subscribed topic(s):", - RD_KAFKAP_STR_PR(member->rkgm_member_id), - !rd_kafkap_str_cmp(member->rkgm_member_id, - rkcg->rkcg_member_id) ? - " (me)":"", - member->rkgm_owned ? - member->rkgm_owned->cnt : 0, - member->rkgm_subscription->cnt); - for (j = 0 ; j < member->rkgm_subscription->cnt ; j++) { + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, + "ASSIGN", + " Member \"%.*s\"%s with " + "%d owned partition(s) and " + "%d subscribed topic(s):", + RD_KAFKAP_STR_PR(member->rkgm_member_id), + !rd_kafkap_str_cmp(member->rkgm_member_id, + rkcg->rkcg_member_id) + ? " (me)" + : "", + member->rkgm_owned ? member->rkgm_owned->cnt : 0, + member->rkgm_subscription->cnt); + for (j = 0; j < member->rkgm_subscription->cnt; j++) { const rd_kafka_topic_partition_t *p = - &member->rkgm_subscription->elems[j]; + &member->rkgm_subscription->elems[j]; rd_kafka_dbg(rkcg->rkcg_rk, - CGRP|RD_KAFKA_DBG_ASSIGNOR, - "ASSIGN", - " %s [%"PRId32"]", + CGRP | RD_KAFKA_DBG_ASSIGNOR, + "ASSIGN", " %s [%" PRId32 "]", p->topic, p->partition); } } - - } /* Call assignors assign callback */ - err = rkas->rkas_assign_cb(rkcg->rkcg_rk, rkas, - rkcg->rkcg_member_id->str, - metadata, - members, member_cnt, - (rd_kafka_assignor_topic_t **) - eligible_topics.rl_elems, - eligible_topics.rl_cnt, - errstr, errstr_size, - rkas->rkas_opaque); + err = rkas->rkas_assign_cb( + rkcg->rkcg_rk, rkas, rkcg->rkcg_member_id->str, metadata, members, + member_cnt, (rd_kafka_assignor_topic_t **)eligible_topics.rl_elems, + eligible_topics.rl_cnt, errstr, errstr_size, rkas->rkas_opaque); if (err) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_ASSIGNOR, - "ASSIGN", - "Group \"%s\" %s assignment failed " - "for %d member(s): %s", - rkcg->rkcg_group_id->str, - rkas->rkas_protocol_name->str, - (int)member_cnt, errstr); + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN", + "Group \"%s\" %s assignment failed " + "for %d member(s): %s", + rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str, + (int)member_cnt, errstr); } else if (rkcg->rkcg_rk->rk_conf.debug & - (RD_KAFKA_DBG_CGRP|RD_KAFKA_DBG_ASSIGNOR)) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_ASSIGNOR, - "ASSIGN", - "Group \"%s\" %s assignment for %d member(s) " - "finished in %.3fms:", - rkcg->rkcg_group_id->str, - rkas->rkas_protocol_name->str, - (int)member_cnt, - (float)(rd_clock() - ts_start)/1000.0f); - for (i = 0 ; i < member_cnt ; i++) { + (RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_ASSIGNOR)) { + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN", + "Group \"%s\" %s assignment for %d member(s) " + "finished in %.3fms:", + rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str, + (int)member_cnt, (float)(rd_clock() - ts_start) / 1000.0f); + for (i = 0; i < member_cnt; i++) { const rd_kafka_group_member_t *member = &members[i]; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_ASSIGNOR, - "ASSIGN", + rd_kafka_dbg(rkcg->rkcg_rk, + CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN", " Member \"%.*s\"%s assigned " "%d partition(s):", RD_KAFKAP_STR_PR(member->rkgm_member_id), !rd_kafkap_str_cmp(member->rkgm_member_id, - rkcg->rkcg_member_id) ? - " (me)":"", + rkcg->rkcg_member_id) + ? " (me)" + : "", member->rkgm_assignment->cnt); - for (j = 0 ; j < member->rkgm_assignment->cnt ; j++) { + for (j = 0; j < member->rkgm_assignment->cnt; j++) { const rd_kafka_topic_partition_t *p = - &member->rkgm_assignment->elems[j]; + &member->rkgm_assignment->elems[j]; rd_kafka_dbg(rkcg->rkcg_rk, - CGRP|RD_KAFKA_DBG_ASSIGNOR, - "ASSIGN", - " %s [%"PRId32"]", + CGRP | RD_KAFKA_DBG_ASSIGNOR, + "ASSIGN", " %s [%" PRId32 "]", p->topic, p->partition); } } @@ -423,8 +403,8 @@ rd_kafka_assignor_run (rd_kafka_cgrp_t *rkcg, /** * Assignor protocol string comparator */ -static int rd_kafka_assignor_cmp_str (const void *_a, const void *_b) { - const char *a = _a; +static int rd_kafka_assignor_cmp_str(const void *_a, const void *_b) { + const char *a = _a; const rd_kafka_assignor_t *b = _b; return rd_kafkap_str_cmp_str2(a, b->rkas_protocol_name); @@ -436,18 +416,18 @@ static int rd_kafka_assignor_cmp_str (const void *_a, const void *_b) { * Locality: any * Locks: none */ -rd_kafka_assignor_t * -rd_kafka_assignor_find (rd_kafka_t *rk, const char *protocol) { - return (rd_kafka_assignor_t *) - rd_list_find(&rk->rk_conf.partition_assignors, protocol, - rd_kafka_assignor_cmp_str); +rd_kafka_assignor_t *rd_kafka_assignor_find(rd_kafka_t *rk, + const char *protocol) { + return (rd_kafka_assignor_t *)rd_list_find( + &rk->rk_conf.partition_assignors, protocol, + rd_kafka_assignor_cmp_str); } /** * Destroys an assignor (but does not unlink). */ -static void rd_kafka_assignor_destroy (rd_kafka_assignor_t *rkas) { +static void rd_kafka_assignor_destroy(rd_kafka_assignor_t *rkas) { rd_kafkap_str_destroy(rkas->rkas_protocol_type); rd_kafkap_str_destroy(rkas->rkas_protocol_name); rd_free(rkas); @@ -462,8 +442,8 @@ rd_kafka_resp_err_t rd_kafka_assignor_rebalance_protocol_check(const rd_kafka_conf_t *conf) { int i; rd_kafka_assignor_t *rkas; - rd_kafka_rebalance_protocol_t rebalance_protocol - = RD_KAFKA_REBALANCE_PROTOCOL_NONE; + rd_kafka_rebalance_protocol_t rebalance_protocol = + RD_KAFKA_REBALANCE_PROTOCOL_NONE; RD_LIST_FOREACH(rkas, &conf->partition_assignors, i) { if (!rkas->rkas_enabled) @@ -482,36 +462,36 @@ rd_kafka_assignor_rebalance_protocol_check(const rd_kafka_conf_t *conf) { /** * @brief Add an assignor. */ -rd_kafka_resp_err_t -rd_kafka_assignor_add (rd_kafka_t *rk, - const char *protocol_type, - const char *protocol_name, - rd_kafka_rebalance_protocol_t rebalance_protocol, - rd_kafka_resp_err_t (*assign_cb) ( - rd_kafka_t *rk, - const struct rd_kafka_assignor_s *rkas, - const char *member_id, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - size_t member_cnt, - rd_kafka_assignor_topic_t **eligible_topics, - size_t eligible_topic_cnt, - char *errstr, size_t errstr_size, void *opaque), - rd_kafkap_bytes_t *(*get_metadata_cb) ( - const struct rd_kafka_assignor_s *rkas, - void *assignor_state, - const rd_list_t *topics, - const rd_kafka_topic_partition_list_t - *owned_partitions), - void (*on_assignment_cb) ( - const struct rd_kafka_assignor_s *rkas, - void **assignor_state, - const rd_kafka_topic_partition_list_t *assignment, - const rd_kafkap_bytes_t *userdata, - const rd_kafka_consumer_group_metadata_t *rkcgm), - void (*destroy_state_cb) (void *assignor_state), - int (*unittest_cb) (void), - void *opaque) { +rd_kafka_resp_err_t rd_kafka_assignor_add( + rd_kafka_t *rk, + const char *protocol_type, + const char *protocol_name, + rd_kafka_rebalance_protocol_t rebalance_protocol, + rd_kafka_resp_err_t (*assign_cb)( + rd_kafka_t *rk, + const struct rd_kafka_assignor_s *rkas, + const char *member_id, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + size_t member_cnt, + rd_kafka_assignor_topic_t **eligible_topics, + size_t eligible_topic_cnt, + char *errstr, + size_t errstr_size, + void *opaque), + rd_kafkap_bytes_t *(*get_metadata_cb)( + const struct rd_kafka_assignor_s *rkas, + void *assignor_state, + const rd_list_t *topics, + const rd_kafka_topic_partition_list_t *owned_partitions), + void (*on_assignment_cb)(const struct rd_kafka_assignor_s *rkas, + void **assignor_state, + const rd_kafka_topic_partition_list_t *assignment, + const rd_kafkap_bytes_t *userdata, + const rd_kafka_consumer_group_metadata_t *rkcgm), + void (*destroy_state_cb)(void *assignor_state), + int (*unittest_cb)(void), + void *opaque) { rd_kafka_assignor_t *rkas; if (rd_kafkap_str_cmp_str(rk->rk_conf.group_protocol_type, @@ -536,7 +516,7 @@ rd_kafka_assignor_add (rd_kafka_t *rk, rkas->rkas_on_assignment_cb = on_assignment_cb; rkas->rkas_destroy_state_cb = destroy_state_cb; rkas->rkas_unittest = unittest_cb; - rkas->rkas_opaque = opaque; + rkas->rkas_opaque = opaque; rd_list_add(&rk->rk_conf.partition_assignors, rkas); @@ -545,25 +525,25 @@ rd_kafka_assignor_add (rd_kafka_t *rk, /* Right trim string of whitespaces */ -static void rtrim (char *s) { - char *e = s + strlen(s); +static void rtrim(char *s) { + char *e = s + strlen(s); - if (e == s) - return; + if (e == s) + return; - while (e >= s && isspace(*e)) - e--; + while (e >= s && isspace(*e)) + e--; - *e = '\0'; + *e = '\0'; } /** * Initialize assignor list based on configuration. */ -int rd_kafka_assignors_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { - char *wanted; - char *s; +int rd_kafka_assignors_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) { + char *wanted; + char *s; rd_list_init(&rk->rk_conf.partition_assignors, 3, (void *)rd_kafka_assignor_destroy); @@ -573,32 +553,33 @@ int rd_kafka_assignors_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { rd_kafka_roundrobin_assignor_init(rk); rd_kafka_sticky_assignor_init(rk); - rd_strdupa(&wanted, rk->rk_conf.partition_assignment_strategy); + rd_strdupa(&wanted, rk->rk_conf.partition_assignment_strategy); - s = wanted; - while (*s) { - rd_kafka_assignor_t *rkas = NULL; - char *t; + s = wanted; + while (*s) { + rd_kafka_assignor_t *rkas = NULL; + char *t; - /* Left trim */ - while (*s == ' ' || *s == ',') - s++; + /* Left trim */ + while (*s == ' ' || *s == ',') + s++; - if ((t = strchr(s, ','))) { - *t = '\0'; - t++; - } else { - t = s + strlen(s); - } + if ((t = strchr(s, ','))) { + *t = '\0'; + t++; + } else { + t = s + strlen(s); + } - /* Right trim */ - rtrim(s); + /* Right trim */ + rtrim(s); rkas = rd_kafka_assignor_find(rk, s); if (!rkas) { rd_snprintf(errstr, errstr_size, "Unsupported partition.assignment.strategy:" - " %s", s); + " %s", + s); return -1; } @@ -607,8 +588,8 @@ int rd_kafka_assignors_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { rk->rk_conf.enabled_assignor_cnt++; } - s = t; - } + s = t; + } if (rd_kafka_assignor_rebalance_protocol_check(&rk->rk_conf)) { rd_snprintf(errstr, errstr_size, @@ -620,7 +601,7 @@ int rd_kafka_assignors_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { return -1; } - return 0; + return 0; } @@ -628,7 +609,7 @@ int rd_kafka_assignors_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { /** * Free assignors */ -void rd_kafka_assignors_term (rd_kafka_t *rk) { +void rd_kafka_assignors_term(rd_kafka_t *rk) { rd_list_destroy(&rk->rk_conf.partition_assignors); } @@ -637,7 +618,7 @@ void rd_kafka_assignors_term (rd_kafka_t *rk) { /** * @brief Unittest for assignors */ -static int ut_assignors (void) { +static int ut_assignors(void) { const struct { const char *name; int topic_cnt; @@ -648,7 +629,7 @@ static int ut_assignors (void) { int member_cnt; struct { const char *name; - int topic_cnt; + int topic_cnt; const char *topics[12]; } members[3]; int expect_cnt; @@ -660,204 +641,206 @@ static int ut_assignors (void) { } members[3]; } expect[2]; } tests[] = { - /* - * Test cases - */ - { - .name = "Symmetrical subscription", - .topic_cnt = 4, - .topics = { - { "a", 3 }, /* a:0 a:1 a:2 */ - { "b", 4, }, /* b:0 b:1 b:2 b:3 */ - { "c", 2 }, /* c:0 c:1 */ - { "d", 1 }, /* d:0 */ - }, - .member_cnt = 2, - .members = { - { .name = "consumer1", - .topic_cnt = 4, - .topics = { "d", "b", "a", "c" } }, - { .name = "consumer2", - .topic_cnt = 4, - .topics = { "a", "b", "c", "d" } }, - }, - .expect_cnt = 2, - .expect = { - { .protocol_name = "range", - .members = { - /* Consumer1 */ - { 6, - { "a:0", "a:1", - "b:0", "b:1", - "c:0", - "d:0" } }, - /* Consumer2 */ - { 4, - { "a:2", - "b:2" ,"b:3", - "c:1" } }, - }, + /* + * Test cases + */ + { + .name = "Symmetrical subscription", + .topic_cnt = 4, + .topics = + { + {"a", 3}, /* a:0 a:1 a:2 */ + { + "b", + 4, + }, /* b:0 b:1 b:2 b:3 */ + {"c", 2}, /* c:0 c:1 */ + {"d", 1}, /* d:0 */ + }, + .member_cnt = 2, + .members = + { + {.name = "consumer1", + .topic_cnt = 4, + .topics = {"d", "b", "a", "c"}}, + {.name = "consumer2", + .topic_cnt = 4, + .topics = {"a", "b", "c", "d"}}, + }, + .expect_cnt = 2, + .expect = + { + { + .protocol_name = "range", + .members = + { + /* Consumer1 */ + {6, + {"a:0", "a:1", "b:0", "b:1", "c:0", + "d:0"}}, + /* Consumer2 */ + {4, {"a:2", "b:2", "b:3", "c:1"}}, }, - { .protocol_name = "roundrobin", - .members = { - /* Consumer1 */ - { 5, - { "a:0", "a:2", - "b:1", "b:3", - "c:1" } }, - /* Consumer2 */ - { 5, - { "a:1", - "b:0" ,"b:2", - "c:0", - "d:0" } }, - }, - }, - }, - }, - { - .name = "1*3 partitions (asymmetrical)", - .topic_cnt = 1, - .topics = { - { "a", 3 }, }, - .member_cnt = 2, - .members = { - { .name = "consumer1", - .topic_cnt = 3, - .topics = { "a", "b", "c" } }, - { .name = "consumer2", - .topic_cnt = 1, - .topics = { "a" } }, - }, - .expect_cnt = 2, - .expect = { - { .protocol_name = "range", - .members = { - /* Consumer1. - * range assignor applies - * per topic. */ - { 2, - { "a:0", "a:1" } }, - /* Consumer2 */ - { 1, - { "a:2" } }, - }, + { + .protocol_name = "roundrobin", + .members = + { + /* Consumer1 */ + {5, {"a:0", "a:2", "b:1", "b:3", "c:1"}}, + /* Consumer2 */ + {5, {"a:1", "b:0", "b:2", "c:0", "d:0"}}, }, - { .protocol_name = "roundrobin", - .members = { - /* Consumer1 */ - { 2, - { "a:0", "a:2" } }, - /* Consumer2 */ - { 1, - { "a:1" } }, - }, + }, + }, + }, + { + .name = "1*3 partitions (asymmetrical)", + .topic_cnt = 1, + .topics = + { + {"a", 3}, + }, + .member_cnt = 2, + .members = + { + {.name = "consumer1", + .topic_cnt = 3, + .topics = {"a", "b", "c"}}, + {.name = "consumer2", .topic_cnt = 1, .topics = {"a"}}, + }, + .expect_cnt = 2, + .expect = + { + { + .protocol_name = "range", + .members = + { + /* Consumer1. + * range assignor applies + * per topic. */ + {2, {"a:0", "a:1"}}, + /* Consumer2 */ + {1, {"a:2"}}, }, }, - }, - { - .name = "#2121 (asymmetrical)", - .topic_cnt = 12, - .topics = { - { "a", 1 }, - { "b", 1 }, - { "c", 1 }, - { "d", 1 }, - { "e", 1 }, - { "f", 1 }, - { "g", 1 }, - { "h", 1 }, - { "i", 1 }, - { "j", 1 }, - { "k", 1 }, - { "l", 1 }, + { + .protocol_name = "roundrobin", + .members = + { + /* Consumer1 */ + {2, {"a:0", "a:2"}}, + /* Consumer2 */ + {1, {"a:1"}}, + }, }, - .member_cnt = 2, - .members = { - { .name = "consumer1", - .topic_cnt = 12, - .topics = { - "a", - "b", - "c", - "d", - "e", - "f", - "g", - "h", - "i", - "j", - "k", - "l", - }, + }, + }, + { + .name = "#2121 (asymmetrical)", + .topic_cnt = 12, + .topics = + { + {"a", 1}, + {"b", 1}, + {"c", 1}, + {"d", 1}, + {"e", 1}, + {"f", 1}, + {"g", 1}, + {"h", 1}, + {"i", 1}, + {"j", 1}, + {"k", 1}, + {"l", 1}, + }, + .member_cnt = 2, + .members = + { + { + .name = "consumer1", + .topic_cnt = 12, + .topics = + { + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", }, - { .name = "consumer2", /* must be second */ - .topic_cnt = 5, - .topics = { - "b", - "d", - "f", - "h", - "l", - }, + }, + { + .name = "consumer2", /* must be second */ + .topic_cnt = 5, + .topics = + { + "b", + "d", + "f", + "h", + "l", }, }, - .expect_cnt = 2, - .expect = { - { .protocol_name = "range", - .members = { - /* Consumer1. - * All partitions. */ - { 12, - { - "a:0", - "b:0", - "c:0", - "d:0", - "e:0", - "f:0", - "g:0", - "h:0", - "i:0", - "j:0", - "k:0", - "l:0", - } - }, - /* Consumer2 */ - { 0 }, - }, + }, + .expect_cnt = 2, + .expect = + { + { + .protocol_name = "range", + .members = + { + /* Consumer1. + * All partitions. */ + {12, + { + "a:0", + "b:0", + "c:0", + "d:0", + "e:0", + "f:0", + "g:0", + "h:0", + "i:0", + "j:0", + "k:0", + "l:0", + }}, + /* Consumer2 */ + {0}, }, - { .protocol_name = "roundrobin", - .members = { - /* Consumer1 */ - { 7, - { - "a:0", - "c:0", - "e:0", - "g:0", - "i:0", - "j:0", - "k:0", - }, - }, - /* Consumer2 */ - { 5, - { - "b:0", - "d:0", - "f:0", - "h:0", - "l:0" - } - }, + }, + { + .protocol_name = "roundrobin", + .members = + { + /* Consumer1 */ + { + 7, + { + "a:0", + "c:0", + "e:0", + "g:0", + "i:0", + "j:0", + "k:0", }, + }, + /* Consumer2 */ + {5, {"b:0", "d:0", "f:0", "h:0", "l:0"}}, }, }, - }, - { NULL }, + }, + }, + {NULL}, }; rd_kafka_conf_t *conf; rd_kafka_t *rk; @@ -867,28 +850,28 @@ static int ut_assignors (void) { conf = rd_kafka_conf_new(); rd_kafka_conf_set(conf, "group.id", "group", NULL, 0); - rd_kafka_conf_set(conf, "debug", rd_getenv("TEST_DEBUG", NULL), - NULL, 0); + rd_kafka_conf_set(conf, "debug", rd_getenv("TEST_DEBUG", NULL), NULL, + 0); rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, NULL, 0); RD_UT_ASSERT(rk != NULL, "Failed to create consumer"); /* Run through test cases */ - for (i = 0 ; tests[i].name ; i++) { + for (i = 0; tests[i].name; i++) { int ie, it, im; rd_kafka_metadata_t metadata; rd_kafka_group_member_t *members; /* Create topic metadata */ metadata.topic_cnt = tests[i].topic_cnt; - metadata.topics = rd_alloca(sizeof(*metadata.topics) * - metadata.topic_cnt); + metadata.topics = + rd_alloca(sizeof(*metadata.topics) * metadata.topic_cnt); memset(metadata.topics, 0, sizeof(*metadata.topics) * metadata.topic_cnt); - for (it = 0 ; it < metadata.topic_cnt ; it++) { + for (it = 0; it < metadata.topic_cnt; it++) { metadata.topics[it].topic = - (char *)tests[i].topics[it].name; + (char *)tests[i].topics[it].name; metadata.topics[it].partition_cnt = - tests[i].topics[it].partition_cnt; + tests[i].topics[it].partition_cnt; metadata.topics[it].partitions = NULL; /* Not used */ } @@ -896,98 +879,100 @@ static int ut_assignors (void) { members = rd_alloca(sizeof(*members) * tests[i].member_cnt); memset(members, 0, sizeof(*members) * tests[i].member_cnt); - for (im = 0 ; im < tests[i].member_cnt ; im++) { + for (im = 0; im < tests[i].member_cnt; im++) { rd_kafka_group_member_t *rkgm = &members[im]; rkgm->rkgm_member_id = - rd_kafkap_str_new(tests[i].members[im].name, - -1); + rd_kafkap_str_new(tests[i].members[im].name, -1); rkgm->rkgm_group_instance_id = - rd_kafkap_str_new(tests[i].members[im].name, - -1); + rd_kafkap_str_new(tests[i].members[im].name, -1); rd_list_init(&rkgm->rkgm_eligible, tests[i].members[im].topic_cnt, NULL); rkgm->rkgm_subscription = - rd_kafka_topic_partition_list_new( - tests[i].members[im].topic_cnt); + rd_kafka_topic_partition_list_new( + tests[i].members[im].topic_cnt); for (it = 0; it < tests[i].members[im].topic_cnt; it++) rd_kafka_topic_partition_list_add( - rkgm->rkgm_subscription, - tests[i].members[im].topics[it], - RD_KAFKA_PARTITION_UA); + rkgm->rkgm_subscription, + tests[i].members[im].topics[it], + RD_KAFKA_PARTITION_UA); rkgm->rkgm_userdata = NULL; rkgm->rkgm_assignment = - rd_kafka_topic_partition_list_new( - rkgm->rkgm_subscription->size); + rd_kafka_topic_partition_list_new( + rkgm->rkgm_subscription->size); } /* For each assignor verify that the assignment * matches the expection set out in the test case. */ - for (ie = 0 ; ie < tests[i].expect_cnt ; ie++) { + for (ie = 0; ie < tests[i].expect_cnt; ie++) { rd_kafka_resp_err_t err; char errstr[256]; - RD_UT_SAY("Test case %s: %s assignor", - tests[i].name, + RD_UT_SAY("Test case %s: %s assignor", tests[i].name, tests[i].expect[ie].protocol_name); - if (!(rkas = rd_kafka_assignor_find(rk, - tests[i].expect[ie].protocol_name))) { - RD_UT_FAIL("Assignor test case %s for %s failed: " - "assignor not found", - tests[i].name, - tests[i].expect[ie].protocol_name); + if (!(rkas = rd_kafka_assignor_find( + rk, tests[i].expect[ie].protocol_name))) { + RD_UT_FAIL( + "Assignor test case %s for %s failed: " + "assignor not found", + tests[i].name, + tests[i].expect[ie].protocol_name); } /* Run assignor */ err = rd_kafka_assignor_run( - rk->rk_cgrp, rkas, - &metadata, - members, tests[i].member_cnt, - errstr, sizeof(errstr)); + rk->rk_cgrp, rkas, &metadata, members, + tests[i].member_cnt, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "Assignor case %s for %s failed: %s", tests[i].name, - tests[i].expect[ie].protocol_name, - errstr); + tests[i].expect[ie].protocol_name, errstr); /* Verify assignments */ - for (im = 0 ; im < tests[i].member_cnt ; im++) { + for (im = 0; im < tests[i].member_cnt; im++) { rd_kafka_group_member_t *rkgm = &members[im]; int ia; if (rkgm->rkgm_assignment->cnt != - tests[i].expect[ie].members[im]. - partition_cnt) { + tests[i] + .expect[ie] + .members[im] + .partition_cnt) { RD_UT_WARN( - " Member %.*s assignment count " - "mismatch: %d != %d", - RD_KAFKAP_STR_PR( - rkgm->rkgm_member_id), - rkgm->rkgm_assignment->cnt, - tests[i].expect[ie].members[im]. - partition_cnt); + " Member %.*s assignment count " + "mismatch: %d != %d", + RD_KAFKAP_STR_PR( + rkgm->rkgm_member_id), + rkgm->rkgm_assignment->cnt, + tests[i] + .expect[ie] + .members[im] + .partition_cnt); fails++; } if (rkgm->rkgm_assignment->cnt > 0) rd_kafka_topic_partition_list_sort_by_topic( - rkgm->rkgm_assignment); + rkgm->rkgm_assignment); - for (ia = 0 ; - ia < rkgm->rkgm_assignment->cnt ; ia++) { + for (ia = 0; ia < rkgm->rkgm_assignment->cnt; + ia++) { rd_kafka_topic_partition_t *p = - &rkgm->rkgm_assignment-> - elems[ia]; + &rkgm->rkgm_assignment->elems[ia]; char part[64]; const char *exp = - ia < tests[i].expect[ie]. - members[im].partition_cnt ? - tests[i].expect[ie]. - members[im].partitions[ia] : - "(none)"; + ia < tests[i] + .expect[ie] + .members[im] + .partition_cnt + ? tests[i] + .expect[ie] + .members[im] + .partitions[ia] + : "(none)"; rd_snprintf(part, sizeof(part), "%s:%d", p->topic, @@ -1005,31 +990,29 @@ static int ut_assignors (void) { if (strcmp(part, exp)) { RD_UT_WARN( - " Member %.*s " - "assignment %d/%d " - "mismatch: %s != %s", - RD_KAFKAP_STR_PR( - rkgm-> - rkgm_member_id), - ia, - rkgm->rkgm_assignment-> - cnt-1, - part, exp); + " Member %.*s " + "assignment %d/%d " + "mismatch: %s != %s", + RD_KAFKAP_STR_PR( + rkgm->rkgm_member_id), + ia, + rkgm->rkgm_assignment->cnt - + 1, + part, exp); fails++; } } /* Reset assignment for next loop */ rd_kafka_topic_partition_list_destroy( - rkgm->rkgm_assignment); + rkgm->rkgm_assignment); rkgm->rkgm_assignment = - rd_kafka_topic_partition_list_new( - rkgm->rkgm_subscription->size); + rd_kafka_topic_partition_list_new( + rkgm->rkgm_subscription->size); } - } - for (im = 0 ; im < tests[i].member_cnt ; im++) { + for (im = 0; im < tests[i].member_cnt; im++) { rd_kafka_group_member_t *rkgm = &members[im]; rd_kafka_group_member_clear(rkgm); } @@ -1054,6 +1037,6 @@ static int ut_assignors (void) { /** * @brief Unit tests for assignors */ -int unittest_assignors (void) { +int unittest_assignors(void) { return ut_assignors(); } diff --git a/src/rdkafka_assignor.h b/src/rdkafka_assignor.h index 07d413c3e1..ad82be9b70 100644 --- a/src/rdkafka_assignor.h +++ b/src/rdkafka_assignor.h @@ -46,8 +46,6 @@ typedef enum rd_kafka_rebalance_protocol_t { - - typedef struct rd_kafka_group_member_s { /** Subscribed topics (partition field is ignored). */ rd_kafka_topic_partition_list_t *rkgm_subscription; @@ -60,26 +58,25 @@ typedef struct rd_kafka_group_member_s { rd_kafka_topic_partition_list_t *rkgm_owned; /** List of eligible topics in subscription. E.g., subscribed topics * that exist. */ - rd_list_t rkgm_eligible; + rd_list_t rkgm_eligible; /** Member id (e.g., client.id-some-uuid). */ - rd_kafkap_str_t *rkgm_member_id; + rd_kafkap_str_t *rkgm_member_id; /** Group instance id. */ - rd_kafkap_str_t *rkgm_group_instance_id; + rd_kafkap_str_t *rkgm_group_instance_id; /** Member-specific opaque userdata. */ - rd_kafkap_bytes_t *rkgm_userdata; + rd_kafkap_bytes_t *rkgm_userdata; /** Member metadata, e.g., the currently owned partitions. */ - rd_kafkap_bytes_t *rkgm_member_metadata; + rd_kafkap_bytes_t *rkgm_member_metadata; /** Group generation id. */ - int rkgm_generation; + int rkgm_generation; } rd_kafka_group_member_t; -int rd_kafka_group_member_cmp (const void *_a, const void *_b); +int rd_kafka_group_member_cmp(const void *_a, const void *_b); -int -rd_kafka_group_member_find_subscription (rd_kafka_t *rk, - const rd_kafka_group_member_t *rkgm, - const char *topic); +int rd_kafka_group_member_find_subscription(rd_kafka_t *rk, + const rd_kafka_group_member_t *rkgm, + const char *topic); /** @@ -88,126 +85,125 @@ rd_kafka_group_member_find_subscription (rd_kafka_t *rk, */ typedef struct rd_kafka_assignor_topic_s { const rd_kafka_metadata_topic_t *metadata; - rd_list_t members; /* rd_kafka_group_member_t * */ + rd_list_t members; /* rd_kafka_group_member_t * */ } rd_kafka_assignor_topic_t; -int rd_kafka_assignor_topic_cmp (const void *_a, const void *_b); +int rd_kafka_assignor_topic_cmp(const void *_a, const void *_b); typedef struct rd_kafka_assignor_s { rd_kafkap_str_t *rkas_protocol_type; rd_kafkap_str_t *rkas_protocol_name; - int rkas_enabled; + int rkas_enabled; rd_kafka_rebalance_protocol_t rkas_protocol; - rd_kafka_resp_err_t (*rkas_assign_cb) ( - rd_kafka_t *rk, - const struct rd_kafka_assignor_s *rkas, - const char *member_id, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - size_t member_cnt, - rd_kafka_assignor_topic_t **eligible_topics, - size_t eligible_topic_cnt, - char *errstr, - size_t errstr_size, - void *opaque); - - rd_kafkap_bytes_t *(*rkas_get_metadata_cb) ( - const struct rd_kafka_assignor_s *rkas, - void *assignor_state, - const rd_list_t *topics, - const rd_kafka_topic_partition_list_t *owned_partitions); - - void (*rkas_on_assignment_cb) ( - const struct rd_kafka_assignor_s *rkas, - void **assignor_state, - const rd_kafka_topic_partition_list_t *assignment, - const rd_kafkap_bytes_t *assignment_userdata, - const rd_kafka_consumer_group_metadata_t *rkcgm); - - void (*rkas_destroy_state_cb) (void *assignor_state); - - int (*rkas_unittest) (void); + rd_kafka_resp_err_t (*rkas_assign_cb)( + rd_kafka_t *rk, + const struct rd_kafka_assignor_s *rkas, + const char *member_id, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + size_t member_cnt, + rd_kafka_assignor_topic_t **eligible_topics, + size_t eligible_topic_cnt, + char *errstr, + size_t errstr_size, + void *opaque); + + rd_kafkap_bytes_t *(*rkas_get_metadata_cb)( + const struct rd_kafka_assignor_s *rkas, + void *assignor_state, + const rd_list_t *topics, + const rd_kafka_topic_partition_list_t *owned_partitions); + + void (*rkas_on_assignment_cb)( + const struct rd_kafka_assignor_s *rkas, + void **assignor_state, + const rd_kafka_topic_partition_list_t *assignment, + const rd_kafkap_bytes_t *assignment_userdata, + const rd_kafka_consumer_group_metadata_t *rkcgm); + + void (*rkas_destroy_state_cb)(void *assignor_state); + + int (*rkas_unittest)(void); void *rkas_opaque; } rd_kafka_assignor_t; -rd_kafka_resp_err_t -rd_kafka_assignor_add (rd_kafka_t *rk, - const char *protocol_type, - const char *protocol_name, - rd_kafka_rebalance_protocol_t rebalance_protocol, - rd_kafka_resp_err_t (*assign_cb) ( - rd_kafka_t *rk, - const struct rd_kafka_assignor_s *rkas, - const char *member_id, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - size_t member_cnt, - rd_kafka_assignor_topic_t **eligible_topics, - size_t eligible_topic_cnt, - char *errstr, size_t errstr_size, void *opaque), - rd_kafkap_bytes_t *(*get_metadata_cb) ( - const struct rd_kafka_assignor_s *rkas, - void *assignor_state, - const rd_list_t *topics, - const rd_kafka_topic_partition_list_t - *owned_partitions), - void (*on_assignment_cb) ( - const struct rd_kafka_assignor_s *rkas, - void **assignor_state, - const rd_kafka_topic_partition_list_t *assignment, - const rd_kafkap_bytes_t *userdata, - const rd_kafka_consumer_group_metadata_t *rkcgm), - void (*destroy_state_cb) (void *assignor_state), - int (*unittest_cb) (void), - void *opaque); - -rd_kafkap_bytes_t * -rd_kafka_consumer_protocol_member_metadata_new (const rd_list_t *topics, - const void *userdata, - size_t userdata_size, - const rd_kafka_topic_partition_list_t - *owned_partitions); - -rd_kafkap_bytes_t * -rd_kafka_assignor_get_metadata_with_empty_userdata (const rd_kafka_assignor_t *rkas, - void *assignor_state, - const rd_list_t *topics, - const rd_kafka_topic_partition_list_t - *owned_partitions); - - -void rd_kafka_assignor_update_subscription (const rd_kafka_assignor_t *rkas, - const rd_kafka_topic_partition_list_t - *subscription); - - -rd_kafka_resp_err_t -rd_kafka_assignor_run (struct rd_kafka_cgrp_s *rkcg, - const rd_kafka_assignor_t *rkas, - rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, int member_cnt, - char *errstr, size_t errstr_size); - -rd_kafka_assignor_t * -rd_kafka_assignor_find (rd_kafka_t *rk, const char *protocol); - -int rd_kafka_assignors_init (rd_kafka_t *rk, char *errstr, size_t errstr_size); -void rd_kafka_assignors_term (rd_kafka_t *rk); - - - -void rd_kafka_group_member_clear (rd_kafka_group_member_t *rkgm); - - -rd_kafka_resp_err_t rd_kafka_range_assignor_init (rd_kafka_t *rk); -rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init (rd_kafka_t *rk); -rd_kafka_resp_err_t rd_kafka_sticky_assignor_init (rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_assignor_add( + rd_kafka_t *rk, + const char *protocol_type, + const char *protocol_name, + rd_kafka_rebalance_protocol_t rebalance_protocol, + rd_kafka_resp_err_t (*assign_cb)( + rd_kafka_t *rk, + const struct rd_kafka_assignor_s *rkas, + const char *member_id, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + size_t member_cnt, + rd_kafka_assignor_topic_t **eligible_topics, + size_t eligible_topic_cnt, + char *errstr, + size_t errstr_size, + void *opaque), + rd_kafkap_bytes_t *(*get_metadata_cb)( + const struct rd_kafka_assignor_s *rkas, + void *assignor_state, + const rd_list_t *topics, + const rd_kafka_topic_partition_list_t *owned_partitions), + void (*on_assignment_cb)(const struct rd_kafka_assignor_s *rkas, + void **assignor_state, + const rd_kafka_topic_partition_list_t *assignment, + const rd_kafkap_bytes_t *userdata, + const rd_kafka_consumer_group_metadata_t *rkcgm), + void (*destroy_state_cb)(void *assignor_state), + int (*unittest_cb)(void), + void *opaque); + +rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new( + const rd_list_t *topics, + const void *userdata, + size_t userdata_size, + const rd_kafka_topic_partition_list_t *owned_partitions); + +rd_kafkap_bytes_t *rd_kafka_assignor_get_metadata_with_empty_userdata( + const rd_kafka_assignor_t *rkas, + void *assignor_state, + const rd_list_t *topics, + const rd_kafka_topic_partition_list_t *owned_partitions); + + +void rd_kafka_assignor_update_subscription( + const rd_kafka_assignor_t *rkas, + const rd_kafka_topic_partition_list_t *subscription); + + +rd_kafka_resp_err_t rd_kafka_assignor_run(struct rd_kafka_cgrp_s *rkcg, + const rd_kafka_assignor_t *rkas, + rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + int member_cnt, + char *errstr, + size_t errstr_size); + +rd_kafka_assignor_t *rd_kafka_assignor_find(rd_kafka_t *rk, + const char *protocol); + +int rd_kafka_assignors_init(rd_kafka_t *rk, char *errstr, size_t errstr_size); +void rd_kafka_assignors_term(rd_kafka_t *rk); + + + +void rd_kafka_group_member_clear(rd_kafka_group_member_t *rkgm); + + +rd_kafka_resp_err_t rd_kafka_range_assignor_init(rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init(rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_sticky_assignor_init(rd_kafka_t *rk); #endif /* _RDKAFKA_ASSIGNOR_H_ */ diff --git a/src/rdkafka_aux.c b/src/rdkafka_aux.c index 37b149f795..44768fe0bd 100644 --- a/src/rdkafka_aux.c +++ b/src/rdkafka_aux.c @@ -32,17 +32,17 @@ #include "rdkafka_error.h" rd_kafka_resp_err_t -rd_kafka_topic_result_error (const rd_kafka_topic_result_t *topicres) { +rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres) { return topicres->err; } const char * -rd_kafka_topic_result_error_string (const rd_kafka_topic_result_t *topicres) { +rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres) { return topicres->errstr; } const char * -rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres) { +rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres) { return topicres->topic; } @@ -58,10 +58,10 @@ rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres) { * All input arguments are copied. */ -rd_kafka_topic_result_t * -rd_kafka_topic_result_new (const char *topic, ssize_t topic_size, - rd_kafka_resp_err_t err, - const char *errstr) { +rd_kafka_topic_result_t *rd_kafka_topic_result_new(const char *topic, + ssize_t topic_size, + rd_kafka_resp_err_t err, + const char *errstr) { size_t tlen = topic_size != -1 ? (size_t)topic_size : strlen(topic); size_t elen = errstr ? strlen(errstr) + 1 : 0; rd_kafka_topic_result_t *terr; @@ -88,50 +88,46 @@ rd_kafka_topic_result_new (const char *topic, ssize_t topic_size, /** * @brief Destroy topic_result */ -void rd_kafka_topic_result_destroy (rd_kafka_topic_result_t *terr) { +void rd_kafka_topic_result_destroy(rd_kafka_topic_result_t *terr) { rd_free(terr); } /** * @brief Destroy-variant suitable for rd_list free_cb use. */ -void rd_kafka_topic_result_free (void *ptr) { +void rd_kafka_topic_result_free(void *ptr) { rd_kafka_topic_result_destroy((rd_kafka_topic_result_t *)ptr); } const rd_kafka_error_t * -rd_kafka_group_result_error (const rd_kafka_group_result_t *groupres) { +rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres) { return groupres->error; } const char * -rd_kafka_group_result_name (const rd_kafka_group_result_t *groupres) { +rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres) { return groupres->group; } const rd_kafka_topic_partition_list_t * -rd_kafka_group_result_partitions (const rd_kafka_group_result_t *groupres) { +rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres) { return groupres->partitions; } rd_kafka_group_result_t * -rd_kafka_group_result_copy (const rd_kafka_group_result_t *groupres) { - return rd_kafka_group_result_new(groupres->group, - -1, - groupres->partitions, - groupres->error ? - rd_kafka_error_copy(groupres->error) : - NULL); +rd_kafka_group_result_copy(const rd_kafka_group_result_t *groupres) { + return rd_kafka_group_result_new( + groupres->group, -1, groupres->partitions, + groupres->error ? rd_kafka_error_copy(groupres->error) : NULL); } /** * @brief Same as rd_kafka_group_result_copy() but suitable for * rd_list_copy(). The \p opaque is ignored. */ -void * -rd_kafka_group_result_copy_opaque (const void *src_groupres, - void *opaque) { - return rd_kafka_group_result_copy(src_groupres); +void *rd_kafka_group_result_copy_opaque(const void *src_groupres, + void *opaque) { + return rd_kafka_group_result_copy(src_groupres); } @@ -147,9 +143,10 @@ rd_kafka_group_result_copy_opaque (const void *src_groupres, */ rd_kafka_group_result_t * -rd_kafka_group_result_new (const char *group, ssize_t group_size, - const rd_kafka_topic_partition_list_t *partitions, - rd_kafka_error_t *error) { +rd_kafka_group_result_new(const char *group, + ssize_t group_size, + const rd_kafka_topic_partition_list_t *partitions, + rd_kafka_error_t *error) { size_t glen = group_size != -1 ? (size_t)group_size : strlen(group); rd_kafka_group_result_t *groupres; @@ -161,8 +158,8 @@ rd_kafka_group_result_new (const char *group, ssize_t group_size, groupres->group[glen] = '\0'; if (partitions) - groupres->partitions = rd_kafka_topic_partition_list_copy( - partitions); + groupres->partitions = + rd_kafka_topic_partition_list_copy(partitions); groupres->error = error; @@ -170,10 +167,10 @@ rd_kafka_group_result_new (const char *group, ssize_t group_size, } - /** +/** * @brief Destroy group_result */ -void rd_kafka_group_result_destroy (rd_kafka_group_result_t *groupres) { +void rd_kafka_group_result_destroy(rd_kafka_group_result_t *groupres) { if (groupres->partitions) rd_kafka_topic_partition_list_destroy(groupres->partitions); if (groupres->error) @@ -181,9 +178,9 @@ void rd_kafka_group_result_destroy (rd_kafka_group_result_t *groupres) { rd_free(groupres); } - /** +/** * @brief Destroy-variant suitable for rd_list free_cb use. */ -void rd_kafka_group_result_free (void *ptr) { +void rd_kafka_group_result_free(void *ptr) { rd_kafka_group_result_destroy((rd_kafka_group_result_t *)ptr); } diff --git a/src/rdkafka_aux.h b/src/rdkafka_aux.h index c4cea4d997..cdd2901bde 100644 --- a/src/rdkafka_aux.h +++ b/src/rdkafka_aux.h @@ -47,16 +47,16 @@ struct rd_kafka_topic_result_s { char *topic; /**< Points to data */ rd_kafka_resp_err_t err; /**< Error code */ char *errstr; /**< Points to data after topic, unless NULL */ - char data[1]; /**< topic followed by errstr */ + char data[1]; /**< topic followed by errstr */ }; -void rd_kafka_topic_result_destroy (rd_kafka_topic_result_t *terr); -void rd_kafka_topic_result_free (void *ptr); +void rd_kafka_topic_result_destroy(rd_kafka_topic_result_t *terr); +void rd_kafka_topic_result_free(void *ptr); -rd_kafka_topic_result_t * -rd_kafka_topic_result_new (const char *topic, ssize_t topic_size, - rd_kafka_resp_err_t err, - const char *errstr); +rd_kafka_topic_result_t *rd_kafka_topic_result_new(const char *topic, + ssize_t topic_size, + rd_kafka_resp_err_t err, + const char *errstr); /**@}*/ @@ -71,22 +71,21 @@ struct rd_kafka_group_result_s { rd_kafka_error_t *error; /**< Error object, or NULL on success */ /** Partitions, used by DeleteConsumerGroupOffsets. */ rd_kafka_topic_partition_list_t *partitions; - char data[1]; /**< Group name */ + char data[1]; /**< Group name */ }; -void rd_kafka_group_result_destroy (rd_kafka_group_result_t *terr); -void rd_kafka_group_result_free (void *ptr); +void rd_kafka_group_result_destroy(rd_kafka_group_result_t *terr); +void rd_kafka_group_result_free(void *ptr); rd_kafka_group_result_t * -rd_kafka_group_result_new (const char *group, ssize_t group_size, - const rd_kafka_topic_partition_list_t *partitions, - rd_kafka_error_t *error); +rd_kafka_group_result_new(const char *group, + ssize_t group_size, + const rd_kafka_topic_partition_list_t *partitions, + rd_kafka_error_t *error); rd_kafka_group_result_t * -rd_kafka_group_result_copy (const rd_kafka_group_result_t *groupres); -void * -rd_kafka_group_result_copy_opaque (const void *src_groupres, - void *opaque); +rd_kafka_group_result_copy(const rd_kafka_group_result_t *groupres); +void *rd_kafka_group_result_copy_opaque(const void *src_groupres, void *opaque); /**@}*/ #endif /* _RDKAFKA_AUX_H_ */ diff --git a/src/rdkafka_background.c b/src/rdkafka_background.c index 178cb714a7..4bf0c9d1db 100644 --- a/src/rdkafka_background.c +++ b/src/rdkafka_background.c @@ -43,8 +43,8 @@ * @brief Call the registered background_event_cb. * @locality rdkafka background queue thread */ -static RD_INLINE void -rd_kafka_call_background_event_cb (rd_kafka_t *rk, rd_kafka_op_t *rko) { +static RD_INLINE void rd_kafka_call_background_event_cb(rd_kafka_t *rk, + rd_kafka_op_t *rko) { rd_assert(!rk->rk_background.calling); rk->rk_background.calling = 1; @@ -64,11 +64,11 @@ rd_kafka_call_background_event_cb (rd_kafka_t *rk, rd_kafka_op_t *rko) { * APIs to the background queue. */ static rd_kafka_op_res_t -rd_kafka_background_queue_serve (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, - void *opaque) { +rd_kafka_background_queue_serve(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) { rd_kafka_op_res_t res; /* @@ -109,7 +109,7 @@ rd_kafka_background_queue_serve (rd_kafka_t *rk, /** * @brief Main loop for background queue thread. */ -int rd_kafka_background_thread_main (void *arg) { +int rd_kafka_background_thread_main(void *arg) { rd_kafka_t *rk = arg; rd_kafka_set_thread_name("background"); @@ -130,7 +130,7 @@ int rd_kafka_background_thread_main (void *arg) { mtx_unlock(&rk->rk_init_lock); while (likely(!rd_kafka_terminating(rk))) { - rd_kafka_q_serve(rk->rk_background.q, 10*1000, 0, + rd_kafka_q_serve(rk->rk_background.q, 10 * 1000, 0, RD_KAFKA_Q_CB_RETURN, rd_kafka_background_queue_serve, NULL); } @@ -144,8 +144,7 @@ int rd_kafka_background_thread_main (void *arg) { rd_kafka_q_disable(rk->rk_background.q); rd_kafka_q_purge(rk->rk_background.q); - rd_kafka_dbg(rk, GENERIC, "BGQUEUE", - "Background queue thread exiting"); + rd_kafka_dbg(rk, GENERIC, "BGQUEUE", "Background queue thread exiting"); rd_kafka_interceptors_on_thread_exit(rk, RD_KAFKA_THREAD_BACKGROUND); @@ -161,9 +160,9 @@ int rd_kafka_background_thread_main (void *arg) { * @locks_acquired rk_init_lock * @locks_required rd_kafka_wrlock() */ -rd_kafka_resp_err_t rd_kafka_background_thread_create (rd_kafka_t *rk, - char *errstr, - size_t errstr_size) { +rd_kafka_resp_err_t rd_kafka_background_thread_create(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { #ifndef _WIN32 sigset_t newset, oldset; #endif @@ -188,9 +187,8 @@ rd_kafka_resp_err_t rd_kafka_background_thread_create (rd_kafka_t *rk, sigemptyset(&oldset); sigfillset(&newset); if (rk->rk_conf.term_sig) { - struct sigaction sa_term = { - .sa_handler = rd_kafka_term_sig_handler - }; + struct sigaction sa_term = {.sa_handler = + rd_kafka_term_sig_handler}; sigaction(rk->rk_conf.term_sig, &sa_term, NULL); } pthread_sigmask(SIG_SETMASK, &newset, &oldset); diff --git a/src/rdkafka_broker.c b/src/rdkafka_broker.c index 7a3ca2fa21..7bc6b0e11a 100644 --- a/src/rdkafka_broker.c +++ b/src/rdkafka_broker.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2015, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -78,26 +78,16 @@ static const int rd_kafka_max_block_ms = 1000; const char *rd_kafka_broker_state_names[] = { - "INIT", - "DOWN", - "TRY_CONNECT", - "CONNECT", - "SSL_HANDSHAKE", - "AUTH_LEGACY", - "UP", - "UPDATE", - "APIVERSION_QUERY", - "AUTH_HANDSHAKE", - "AUTH_REQ" -}; + "INIT", "DOWN", "TRY_CONNECT", "CONNECT", "SSL_HANDSHAKE", + "AUTH_LEGACY", "UP", "UPDATE", "APIVERSION_QUERY", "AUTH_HANDSHAKE", + "AUTH_REQ"}; const char *rd_kafka_secproto_names[] = { - [RD_KAFKA_PROTO_PLAINTEXT] = "plaintext", - [RD_KAFKA_PROTO_SSL] = "ssl", - [RD_KAFKA_PROTO_SASL_PLAINTEXT] = "sasl_plaintext", - [RD_KAFKA_PROTO_SASL_SSL] = "sasl_ssl", - NULL -}; + [RD_KAFKA_PROTO_PLAINTEXT] = "plaintext", + [RD_KAFKA_PROTO_SSL] = "ssl", + [RD_KAFKA_PROTO_SASL_PLAINTEXT] = "sasl_plaintext", + [RD_KAFKA_PROTO_SASL_SSL] = "sasl_ssl", + NULL}; /** @@ -112,9 +102,9 @@ const char *rd_kafka_secproto_names[] = { * @locaility broker thread */ static RD_INLINE rd_bool_t -rd_kafka_broker_needs_persistent_connection (rd_kafka_broker_t *rkb) { +rd_kafka_broker_needs_persistent_connection(rd_kafka_broker_t *rkb) { return rkb->rkb_persistconn.internal || - rd_atomic32_get(&rkb->rkb_persistconn.coord); + rd_atomic32_get(&rkb->rkb_persistconn.coord); } @@ -123,63 +113,65 @@ rd_kafka_broker_needs_persistent_connection (rd_kafka_broker_t *rkb) { * @locality broker thread * @locks none */ -static RD_INLINE int -rd_kafka_broker_needs_connection (rd_kafka_broker_t *rkb) { +static RD_INLINE int rd_kafka_broker_needs_connection(rd_kafka_broker_t *rkb) { return rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT && - !rd_kafka_terminating(rkb->rkb_rk) && - !rd_kafka_fatal_error_code(rkb->rkb_rk) && - (!rkb->rkb_rk->rk_conf.sparse_connections || - rd_kafka_broker_needs_persistent_connection(rkb)); + !rd_kafka_terminating(rkb->rkb_rk) && + !rd_kafka_fatal_error_code(rkb->rkb_rk) && + (!rkb->rkb_rk->rk_conf.sparse_connections || + rd_kafka_broker_needs_persistent_connection(rkb)); } -static void rd_kafka_broker_handle_purge_queues (rd_kafka_broker_t *rkb, - rd_kafka_op_t *rko); -static void rd_kafka_broker_trigger_monitors (rd_kafka_broker_t *rkb); +static void rd_kafka_broker_handle_purge_queues(rd_kafka_broker_t *rkb, + rd_kafka_op_t *rko); +static void rd_kafka_broker_trigger_monitors(rd_kafka_broker_t *rkb); -#define rd_kafka_broker_terminating(rkb) \ +#define rd_kafka_broker_terminating(rkb) \ (rd_refcnt_get(&(rkb)->rkb_refcnt) <= 1) /** * Construct broker nodename. */ -static void rd_kafka_mk_nodename (char *dest, size_t dsize, - const char *name, uint16_t port) { +static void rd_kafka_mk_nodename(char *dest, + size_t dsize, + const char *name, + uint16_t port) { rd_snprintf(dest, dsize, "%s:%hu", name, port); } /** * Construct descriptive broker name */ -static void rd_kafka_mk_brokername (char *dest, size_t dsize, - rd_kafka_secproto_t proto, - const char *nodename, int32_t nodeid, - rd_kafka_confsource_t source) { +static void rd_kafka_mk_brokername(char *dest, + size_t dsize, + rd_kafka_secproto_t proto, + const char *nodename, + int32_t nodeid, + rd_kafka_confsource_t source) { /* Prepend protocol name to brokername, unless it is a * standard plaintext or logical broker in which case we * omit the protocol part. */ - if (proto != RD_KAFKA_PROTO_PLAINTEXT && - source != RD_KAFKA_LOGICAL) { - int r = rd_snprintf(dest, dsize, "%s://", - rd_kafka_secproto_names[proto]); - if (r >= (int)dsize) /* Skip proto name if it wont fit.. */ - r = 0; - - dest += r; - dsize -= r; - } - - if (nodeid == RD_KAFKA_NODEID_UA) - rd_snprintf(dest, dsize, "%s%s", - nodename, - source == RD_KAFKA_LOGICAL ? "" : - (source == RD_KAFKA_INTERNAL ? - "/internal" : "/bootstrap")); - else - rd_snprintf(dest, dsize, "%s/%"PRId32, nodename, nodeid); + if (proto != RD_KAFKA_PROTO_PLAINTEXT && source != RD_KAFKA_LOGICAL) { + int r = rd_snprintf(dest, dsize, "%s://", + rd_kafka_secproto_names[proto]); + if (r >= (int)dsize) /* Skip proto name if it wont fit.. */ + r = 0; + + dest += r; + dsize -= r; + } + + if (nodeid == RD_KAFKA_NODEID_UA) + rd_snprintf(dest, dsize, "%s%s", nodename, + source == RD_KAFKA_LOGICAL + ? "" + : (source == RD_KAFKA_INTERNAL ? "/internal" + : "/bootstrap")); + else + rd_snprintf(dest, dsize, "%s/%" PRId32, nodename, nodeid); } @@ -189,17 +181,16 @@ static void rd_kafka_mk_brokername (char *dest, size_t dsize, * @locks broker_lock MUST be held * @locality broker thread */ -static void rd_kafka_broker_feature_enable (rd_kafka_broker_t *rkb, - int features) { - if (features & rkb->rkb_features) - return; - - rkb->rkb_features |= features; - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE, - "FEATURE", - "Updated enabled protocol features +%s to %s", - rd_kafka_features2str(features), - rd_kafka_features2str(rkb->rkb_features)); +static void rd_kafka_broker_feature_enable(rd_kafka_broker_t *rkb, + int features) { + if (features & rkb->rkb_features) + return; + + rkb->rkb_features |= features; + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE, + "FEATURE", "Updated enabled protocol features +%s to %s", + rd_kafka_features2str(features), + rd_kafka_features2str(rkb->rkb_features)); } @@ -209,17 +200,16 @@ static void rd_kafka_broker_feature_enable (rd_kafka_broker_t *rkb, * @locks broker_lock MUST be held * @locality broker thread */ -static void rd_kafka_broker_feature_disable (rd_kafka_broker_t *rkb, - int features) { - if (!(features & rkb->rkb_features)) - return; - - rkb->rkb_features &= ~features; - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE, - "FEATURE", - "Updated enabled protocol features -%s to %s", - rd_kafka_features2str(features), - rd_kafka_features2str(rkb->rkb_features)); +static void rd_kafka_broker_feature_disable(rd_kafka_broker_t *rkb, + int features) { + if (!(features & rkb->rkb_features)) + return; + + rkb->rkb_features &= ~features; + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE, + "FEATURE", "Updated enabled protocol features -%s to %s", + rd_kafka_features2str(features), + rd_kafka_features2str(rkb->rkb_features)); } @@ -231,14 +221,14 @@ static void rd_kafka_broker_feature_disable (rd_kafka_broker_t *rkb, * @locality broker thread * @locks rd_kafka_broker_lock() */ -static void rd_kafka_broker_features_set (rd_kafka_broker_t *rkb, int features) { - if (rkb->rkb_features == features) - return; - - rkb->rkb_features = features; - rd_rkb_dbg(rkb, BROKER, "FEATURE", - "Updated enabled protocol features to %s", - rd_kafka_features2str(rkb->rkb_features)); +static void rd_kafka_broker_features_set(rd_kafka_broker_t *rkb, int features) { + if (rkb->rkb_features == features) + return; + + rkb->rkb_features = features; + rd_rkb_dbg(rkb, BROKER, "FEATURE", + "Updated enabled protocol features to %s", + rd_kafka_features2str(rkb->rkb_features)); } @@ -251,12 +241,13 @@ static void rd_kafka_broker_features_set (rd_kafka_broker_t *rkb, int features) * @locks none * @locality any */ -int16_t rd_kafka_broker_ApiVersion_supported (rd_kafka_broker_t *rkb, - int16_t ApiKey, - int16_t minver, int16_t maxver, - int *featuresp) { - struct rd_kafka_ApiVersion skel = { .ApiKey = ApiKey }; - struct rd_kafka_ApiVersion ret = RD_ZERO_INIT, *retp; +int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb, + int16_t ApiKey, + int16_t minver, + int16_t maxver, + int *featuresp) { + struct rd_kafka_ApiVersion skel = {.ApiKey = ApiKey}; + struct rd_kafka_ApiVersion ret = RD_ZERO_INIT, *retp; rd_kafka_broker_lock(rkb); if (featuresp) @@ -268,9 +259,9 @@ int16_t rd_kafka_broker_ApiVersion_supported (rd_kafka_broker_t *rkb, return maxver; } - retp = bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt, - sizeof(*rkb->rkb_ApiVersions), - rd_kafka_ApiVersion_key_cmp); + retp = + bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt, + sizeof(*rkb->rkb_ApiVersions), rd_kafka_ApiVersion_key_cmp); if (retp) ret = *retp; rd_kafka_broker_unlock(rkb); @@ -299,45 +290,43 @@ int16_t rd_kafka_broker_ApiVersion_supported (rd_kafka_broker_t *rkb, * @locks rd_kafka_broker_lock() MUST be held. * @locality broker thread */ -void rd_kafka_broker_set_state (rd_kafka_broker_t *rkb, int state) { - if ((int)rkb->rkb_state == state) - return; - - rd_kafka_dbg(rkb->rkb_rk, BROKER, "STATE", - "%s: Broker changed state %s -> %s", - rkb->rkb_name, - rd_kafka_broker_state_names[rkb->rkb_state], - rd_kafka_broker_state_names[state]); - - if (rkb->rkb_source == RD_KAFKA_INTERNAL) { - /* no-op */ - } else if (state == RD_KAFKA_BROKER_STATE_DOWN && - !rkb->rkb_down_reported) { +void rd_kafka_broker_set_state(rd_kafka_broker_t *rkb, int state) { + if ((int)rkb->rkb_state == state) + return; + + rd_kafka_dbg(rkb->rkb_rk, BROKER, "STATE", + "%s: Broker changed state %s -> %s", rkb->rkb_name, + rd_kafka_broker_state_names[rkb->rkb_state], + rd_kafka_broker_state_names[state]); + + if (rkb->rkb_source == RD_KAFKA_INTERNAL) { + /* no-op */ + } else if (state == RD_KAFKA_BROKER_STATE_DOWN && + !rkb->rkb_down_reported) { /* Propagate ALL_BROKERS_DOWN event if all brokers are * now down, unless we're terminating. * Only trigger for brokers that has an address set, * e.g., not logical brokers that lost their address. */ if (rd_atomic32_add(&rkb->rkb_rk->rk_broker_down_cnt, 1) == - rd_atomic32_get(&rkb->rkb_rk->rk_broker_cnt) - - rd_atomic32_get(&rkb->rkb_rk->rk_broker_addrless_cnt) && + rd_atomic32_get(&rkb->rkb_rk->rk_broker_cnt) - + rd_atomic32_get( + &rkb->rkb_rk->rk_broker_addrless_cnt) && !rd_kafka_broker_is_addrless(rkb) && !rd_kafka_terminating(rkb->rkb_rk)) - rd_kafka_op_err(rkb->rkb_rk, - RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, - "%i/%i brokers are down", - rd_atomic32_get(&rkb->rkb_rk-> - rk_broker_down_cnt), - rd_atomic32_get(&rkb->rkb_rk-> - rk_broker_cnt) - - rd_atomic32_get(&rkb->rkb_rk-> - rk_broker_addrless_cnt)); - rkb->rkb_down_reported = 1; + rd_kafka_op_err( + rkb->rkb_rk, RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, + "%i/%i brokers are down", + rd_atomic32_get(&rkb->rkb_rk->rk_broker_down_cnt), + rd_atomic32_get(&rkb->rkb_rk->rk_broker_cnt) - + rd_atomic32_get( + &rkb->rkb_rk->rk_broker_addrless_cnt)); + rkb->rkb_down_reported = 1; } else if (rd_kafka_broker_state_is_up(state) && - rkb->rkb_down_reported) { - rd_atomic32_sub(&rkb->rkb_rk->rk_broker_down_cnt, 1); - rkb->rkb_down_reported = 0; - } + rkb->rkb_down_reported) { + rd_atomic32_sub(&rkb->rkb_rk->rk_broker_down_cnt, 1); + rkb->rkb_down_reported = 0; + } if (rkb->rkb_source != RD_KAFKA_INTERNAL) { if (rd_kafka_broker_state_is_up(state) && @@ -348,8 +337,8 @@ void rd_kafka_broker_set_state (rd_kafka_broker_t *rkb, int state) { rd_kafka_broker_trigger_monitors(rkb); if (RD_KAFKA_BROKER_IS_LOGICAL(rkb)) - rd_atomic32_add(&rkb->rkb_rk-> - rk_logical_broker_up_cnt, 1); + rd_atomic32_add( + &rkb->rkb_rk->rk_logical_broker_up_cnt, 1); } else if (rd_kafka_broker_state_is_up(rkb->rkb_state) && !rd_kafka_broker_state_is_up(state)) { @@ -359,15 +348,15 @@ void rd_kafka_broker_set_state (rd_kafka_broker_t *rkb, int state) { rd_kafka_broker_trigger_monitors(rkb); if (RD_KAFKA_BROKER_IS_LOGICAL(rkb)) - rd_atomic32_sub(&rkb->rkb_rk-> - rk_logical_broker_up_cnt, 1); + rd_atomic32_sub( + &rkb->rkb_rk->rk_logical_broker_up_cnt, 1); } } - rkb->rkb_state = state; + rkb->rkb_state = state; rkb->rkb_ts_state = rd_clock(); - rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); + rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); } @@ -384,14 +373,16 @@ void rd_kafka_broker_set_state (rd_kafka_broker_t *rkb, int state) { * @locks none * @locality broker thread */ -static void rd_kafka_broker_set_error (rd_kafka_broker_t *rkb, int level, - rd_kafka_resp_err_t err, - const char *fmt, va_list ap) { +static void rd_kafka_broker_set_error(rd_kafka_broker_t *rkb, + int level, + rd_kafka_resp_err_t err, + const char *fmt, + va_list ap) { char errstr[512]; char extra[128]; size_t of = 0, ofe; rd_bool_t identical, suppress; - int state_duration_ms = (int)((rd_clock() - rkb->rkb_ts_state)/1000); + int state_duration_ms = (int)((rd_clock() - rkb->rkb_ts_state) / 1000); /* If this is a logical broker we include its current nodename/address @@ -399,8 +390,8 @@ static void rd_kafka_broker_set_error (rd_kafka_broker_t *rkb, int level, rd_kafka_broker_lock(rkb); if (rkb->rkb_source == RD_KAFKA_LOGICAL && !rd_kafka_broker_is_addrless(rkb)) { - of = (size_t)rd_snprintf(errstr, sizeof(errstr), "%s: ", - rkb->rkb_nodename); + of = (size_t)rd_snprintf(errstr, sizeof(errstr), + "%s: ", rkb->rkb_nodename); if (of > sizeof(errstr)) of = 0; /* If nodename overflows the entire buffer we * skip it completely since the error message @@ -408,9 +399,9 @@ static void rd_kafka_broker_set_error (rd_kafka_broker_t *rkb, int level, } rd_kafka_broker_unlock(rkb); - ofe = (size_t)rd_vsnprintf(errstr+of, sizeof(errstr)-of, fmt, ap); - if (ofe > sizeof(errstr)-of) - ofe = sizeof(errstr)-of; + ofe = (size_t)rd_vsnprintf(errstr + of, sizeof(errstr) - of, fmt, ap); + if (ofe > sizeof(errstr) - of) + ofe = sizeof(errstr) - of; of += ofe; /* Provide more meaningful error messages in certain cases */ @@ -425,41 +416,41 @@ static void rd_kafka_broker_set_error (rd_kafka_broker_t *rkb, int level, if (rkb->rkb_proto != RD_KAFKA_PROTO_SSL && rkb->rkb_proto != RD_KAFKA_PROTO_SASL_SSL) rd_kafka_broker_set_error( - rkb, level, err, - "Disconnected while requesting " - "ApiVersion: " - "might be caused by incorrect " - "security.protocol configuration " - "(connecting to a SSL listener?) or " - "broker version is < 0.10 " - "(see api.version.request)", - ap/*ignored*/); + rkb, level, err, + "Disconnected while requesting " + "ApiVersion: " + "might be caused by incorrect " + "security.protocol configuration " + "(connecting to a SSL listener?) or " + "broker version is < 0.10 " + "(see api.version.request)", + ap /*ignored*/); else rd_kafka_broker_set_error( - rkb, level, err, - "Disconnected while requesting " - "ApiVersion: " - "might be caused by broker version " - "< 0.10 (see api.version.request)", - ap/*ignored*/); + rkb, level, err, + "Disconnected while requesting " + "ApiVersion: " + "might be caused by broker version " + "< 0.10 (see api.version.request)", + ap /*ignored*/); return; } else if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP && - state_duration_ms < 2000/*2s*/ && + state_duration_ms < 2000 /*2s*/ && rkb->rkb_rk->rk_conf.security_protocol != - RD_KAFKA_PROTO_SASL_SSL && + RD_KAFKA_PROTO_SASL_SSL && rkb->rkb_rk->rk_conf.security_protocol != - RD_KAFKA_PROTO_SASL_PLAINTEXT) { + RD_KAFKA_PROTO_SASL_PLAINTEXT) { /* If disconnected shortly after transitioning to UP * state it typically means the broker listener is * configured for SASL authentication but the client * is not. */ rd_kafka_broker_set_error( - rkb, level, err, - "Disconnected: verify that security.protocol " - "is correctly configured, broker might " - "require SASL authentication", - ap/*ignored*/); + rkb, level, err, + "Disconnected: verify that security.protocol " + "is correctly configured, broker might " + "require SASL authentication", + ap /*ignored*/); return; } } @@ -468,10 +459,9 @@ static void rd_kafka_broker_set_error (rd_kafka_broker_t *rkb, int level, * the variable suffix "after Xms in state Y"), if so we should * suppress it. */ identical = err == rkb->rkb_last_err.err && - !strcmp(rkb->rkb_last_err.errstr, errstr); - suppress = identical && - rd_interval(&rkb->rkb_suppress.fail_error, - 30 * 1000 * 1000 /*30s*/, 0) <= 0; + !strcmp(rkb->rkb_last_err.errstr, errstr); + suppress = identical && rd_interval(&rkb->rkb_suppress.fail_error, + 30 * 1000 * 1000 /*30s*/, 0) <= 0; /* Copy last error prior to adding extras */ rkb->rkb_last_err.err = err; @@ -479,19 +469,18 @@ static void rd_kafka_broker_set_error (rd_kafka_broker_t *rkb, int level, sizeof(rkb->rkb_last_err.errstr)); /* Time since last state change to help debug connection issues */ - ofe = rd_snprintf(extra, sizeof(extra), - "after %dms in state %s", + ofe = rd_snprintf(extra, sizeof(extra), "after %dms in state %s", state_duration_ms, rd_kafka_broker_state_names[rkb->rkb_state]); /* Number of suppressed identical logs */ if (identical && !suppress && rkb->rkb_last_err.cnt >= 1 && ofe + 30 < sizeof(extra)) { - size_t r = (size_t)rd_snprintf( - extra+ofe, sizeof(extra)-ofe, - ", %d identical error(s) suppressed", - rkb->rkb_last_err.cnt); - if (r < sizeof(extra)-ofe) + size_t r = + (size_t)rd_snprintf(extra + ofe, sizeof(extra) - ofe, + ", %d identical error(s) suppressed", + rkb->rkb_last_err.cnt); + if (r < sizeof(extra) - ofe) ofe += r; else ofe = sizeof(extra); @@ -499,12 +488,10 @@ static void rd_kafka_broker_set_error (rd_kafka_broker_t *rkb, int level, /* Append the extra info if there is enough room */ if (ofe > 0 && of + ofe + 4 < sizeof(errstr)) - rd_snprintf(errstr+of, sizeof(errstr)-of, - " (%s)", extra); + rd_snprintf(errstr + of, sizeof(errstr) - of, " (%s)", extra); /* Don't log interrupt-wakeups when terminating */ - if (err == RD_KAFKA_RESP_ERR__INTR && - rd_kafka_terminating(rkb->rkb_rk)) + if (err == RD_KAFKA_RESP_ERR__INTR && rd_kafka_terminating(rkb->rkb_rk)) suppress = rd_true; if (!suppress) @@ -512,14 +499,14 @@ static void rd_kafka_broker_set_error (rd_kafka_broker_t *rkb, int level, else rkb->rkb_last_err.cnt++; - rd_rkb_dbg(rkb, BROKER, "FAIL", "%s (%s)%s%s", - errstr, rd_kafka_err2name(err), + rd_rkb_dbg(rkb, BROKER, "FAIL", "%s (%s)%s%s", errstr, + rd_kafka_err2name(err), identical ? ": identical to last error" : "", suppress ? ": error log suppressed" : ""); if (level != LOG_DEBUG && (level <= LOG_CRIT || !suppress)) { - rd_kafka_log(rkb->rkb_rk, level, "FAIL", - "%s: %s", rkb->rkb_name, errstr); + rd_kafka_log(rkb->rkb_rk, level, "FAIL", "%s: %s", + rkb->rkb_name, errstr); /* Send ERR op to application for processing. */ rd_kafka_q_op_err(rkb->rkb_rk->rk_rep, err, "%s: %s", @@ -538,103 +525,107 @@ static void rd_kafka_broker_set_error (rd_kafka_broker_t *rkb, int level, * * @locality broker thread */ -void rd_kafka_broker_fail (rd_kafka_broker_t *rkb, - int level, rd_kafka_resp_err_t err, - const char *fmt, ...) { - va_list ap; - rd_kafka_bufq_t tmpq_waitresp, tmpq; +void rd_kafka_broker_fail(rd_kafka_broker_t *rkb, + int level, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { + va_list ap; + rd_kafka_bufq_t tmpq_waitresp, tmpq; int old_state; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - if (rkb->rkb_transport) { - rd_kafka_transport_close(rkb->rkb_transport); - rkb->rkb_transport = NULL; + if (rkb->rkb_transport) { + rd_kafka_transport_close(rkb->rkb_transport); + rkb->rkb_transport = NULL; if (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP) rd_atomic32_add(&rkb->rkb_c.disconnects, 1); - } + } - rkb->rkb_req_timeouts = 0; + rkb->rkb_req_timeouts = 0; - if (rkb->rkb_recv_buf) { - rd_kafka_buf_destroy(rkb->rkb_recv_buf); - rkb->rkb_recv_buf = NULL; - } + if (rkb->rkb_recv_buf) { + rd_kafka_buf_destroy(rkb->rkb_recv_buf); + rkb->rkb_recv_buf = NULL; + } va_start(ap, fmt); rd_kafka_broker_set_error(rkb, level, err, fmt, ap); va_end(ap); - rd_kafka_broker_lock(rkb); + rd_kafka_broker_lock(rkb); - /* If we're currently asking for ApiVersion and the connection - * went down it probably means the broker does not support that request - * and tore down the connection. In this case we disable that feature flag. */ - if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_APIVERSION_QUERY) - rd_kafka_broker_feature_disable(rkb, RD_KAFKA_FEATURE_APIVERSION); + /* If we're currently asking for ApiVersion and the connection + * went down it probably means the broker does not support that request + * and tore down the connection. In this case we disable that feature + * flag. */ + if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_APIVERSION_QUERY) + rd_kafka_broker_feature_disable(rkb, + RD_KAFKA_FEATURE_APIVERSION); - /* Set broker state */ + /* Set broker state */ old_state = rkb->rkb_state; - rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_DOWN); + rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_DOWN); - /* Unlock broker since a requeue will try to lock it. */ - rd_kafka_broker_unlock(rkb); + /* Unlock broker since a requeue will try to lock it. */ + rd_kafka_broker_unlock(rkb); rd_atomic64_set(&rkb->rkb_c.ts_send, 0); rd_atomic64_set(&rkb->rkb_c.ts_recv, 0); - /* - * Purge all buffers - * (put bufs on a temporary queue since bufs may be requeued, - * make sure outstanding requests are re-enqueued before - * bufs on outbufs queue.) - */ - rd_kafka_bufq_init(&tmpq_waitresp); - rd_kafka_bufq_init(&tmpq); - rd_kafka_bufq_concat(&tmpq_waitresp, &rkb->rkb_waitresps); - rd_kafka_bufq_concat(&tmpq, &rkb->rkb_outbufs); + /* + * Purge all buffers + * (put bufs on a temporary queue since bufs may be requeued, + * make sure outstanding requests are re-enqueued before + * bufs on outbufs queue.) + */ + rd_kafka_bufq_init(&tmpq_waitresp); + rd_kafka_bufq_init(&tmpq); + rd_kafka_bufq_concat(&tmpq_waitresp, &rkb->rkb_waitresps); + rd_kafka_bufq_concat(&tmpq, &rkb->rkb_outbufs); rd_atomic32_init(&rkb->rkb_blocking_request_cnt, 0); /* Purge the in-flight buffers (might get re-enqueued in case * of retries). */ - rd_kafka_bufq_purge(rkb, &tmpq_waitresp, err); + rd_kafka_bufq_purge(rkb, &tmpq_waitresp, err); /* Purge the waiting-in-output-queue buffers, * might also get re-enqueued. */ rd_kafka_bufq_purge(rkb, &tmpq, /* If failure was caused by a timeout, * adjust the error code for in-queue requests. */ - err == RD_KAFKA_RESP_ERR__TIMED_OUT ? - RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE : err); - - /* Update bufq for connection reset: - * - Purge connection-setup requests from outbufs since they will be - * reissued on the next connect. - * - Reset any partially sent buffer's offset. - */ - rd_kafka_bufq_connection_reset(rkb, &rkb->rkb_outbufs); - - /* Extra debugging for tracking termination-hang issues: - * show what is keeping this broker from decommissioning. */ - if (rd_kafka_terminating(rkb->rkb_rk) && - !rd_kafka_broker_terminating(rkb)) { - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "BRKTERM", - "terminating: broker still has %d refcnt(s), " - "%"PRId32" buffer(s), %d partition(s)", - rd_refcnt_get(&rkb->rkb_refcnt), - rd_kafka_bufq_cnt(&rkb->rkb_outbufs), - rkb->rkb_toppar_cnt); - rd_kafka_bufq_dump(rkb, "BRKOUTBUFS", &rkb->rkb_outbufs); - } + err == RD_KAFKA_RESP_ERR__TIMED_OUT + ? RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE + : err); + + /* Update bufq for connection reset: + * - Purge connection-setup requests from outbufs since they will be + * reissued on the next connect. + * - Reset any partially sent buffer's offset. + */ + rd_kafka_bufq_connection_reset(rkb, &rkb->rkb_outbufs); + + /* Extra debugging for tracking termination-hang issues: + * show what is keeping this broker from decommissioning. */ + if (rd_kafka_terminating(rkb->rkb_rk) && + !rd_kafka_broker_terminating(rkb)) { + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "BRKTERM", + "terminating: broker still has %d refcnt(s), " + "%" PRId32 " buffer(s), %d partition(s)", + rd_refcnt_get(&rkb->rkb_refcnt), + rd_kafka_bufq_cnt(&rkb->rkb_outbufs), + rkb->rkb_toppar_cnt); + rd_kafka_bufq_dump(rkb, "BRKOUTBUFS", &rkb->rkb_outbufs); + } /* Query for topic leaders to quickly pick up on failover. */ if (err != RD_KAFKA_RESP_ERR__DESTROY && old_state >= RD_KAFKA_BROKER_STATE_UP) - rd_kafka_metadata_refresh_known_topics(rkb->rkb_rk, NULL, - rd_true/*force*/, - "broker down"); + rd_kafka_metadata_refresh_known_topics( + rkb->rkb_rk, NULL, rd_true /*force*/, "broker down"); } @@ -644,9 +635,9 @@ void rd_kafka_broker_fail (rd_kafka_broker_t *rkb, * * @locality broker thread */ -void rd_kafka_broker_conn_closed (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - const char *errstr) { +void rd_kafka_broker_conn_closed(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + const char *errstr) { int log_level = LOG_ERR; if (!rkb->rkb_rk->rk_conf.log_connection_close) { @@ -671,10 +662,11 @@ void rd_kafka_broker_conn_closed (rd_kafka_broker_t *rkb, */ rd_ts_t now = rd_clock(); rd_ts_t minidle = - RD_MAX(60*1000/*60s*/, - rkb->rkb_rk->rk_conf.socket_timeout_ms) * 1000; + RD_MAX(60 * 1000 /*60s*/, + rkb->rkb_rk->rk_conf.socket_timeout_ms) * + 1000; int inflight = rd_kafka_bufq_cnt(&rkb->rkb_waitresps); - int inqueue = rd_kafka_bufq_cnt(&rkb->rkb_outbufs); + int inqueue = rd_kafka_bufq_cnt(&rkb->rkb_outbufs); if (rkb->rkb_ts_state + minidle < now && rd_atomic64_get(&rkb->rkb_c.ts_send) + minidle < now && @@ -700,12 +692,11 @@ void rd_kafka_broker_conn_closed (rd_kafka_broker_t *rkb, * * @locality broker thread */ -static int -rd_kafka_broker_bufq_purge_by_toppar (rd_kafka_broker_t *rkb, - rd_kafka_bufq_t *rkbq, - int64_t ApiKey, - rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err) { +static int rd_kafka_broker_bufq_purge_by_toppar(rd_kafka_broker_t *rkb, + rd_kafka_bufq_t *rkbq, + int64_t ApiKey, + rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err) { rd_kafka_buf_t *rkbuf, *tmp; int cnt = 0; @@ -747,30 +738,30 @@ rd_kafka_broker_bufq_purge_by_toppar (rd_kafka_broker_t *rkb, * * @locality broker thread */ -static int rd_kafka_broker_bufq_timeout_scan (rd_kafka_broker_t *rkb, - int is_waitresp_q, - rd_kafka_bufq_t *rkbq, - int *partial_cntp, - int16_t ApiKey, - rd_kafka_resp_err_t err, - rd_ts_t now, - const char *description, - int log_first_n) { - rd_kafka_buf_t *rkbuf, *tmp; - int cnt = 0; +static int rd_kafka_broker_bufq_timeout_scan(rd_kafka_broker_t *rkb, + int is_waitresp_q, + rd_kafka_bufq_t *rkbq, + int *partial_cntp, + int16_t ApiKey, + rd_kafka_resp_err_t err, + rd_ts_t now, + const char *description, + int log_first_n) { + rd_kafka_buf_t *rkbuf, *tmp; + int cnt = 0; int idx = -1; const rd_kafka_buf_t *holb; - restart: +restart: holb = TAILQ_FIRST(&rkbq->rkbq_bufs); - TAILQ_FOREACH_SAFE(rkbuf, &rkbq->rkbq_bufs, rkbuf_link, tmp) { + TAILQ_FOREACH_SAFE(rkbuf, &rkbq->rkbq_bufs, rkbuf_link, tmp) { rd_kafka_broker_state_t pre_state, post_state; idx++; - if (likely(now && rkbuf->rkbuf_ts_timeout > now)) - continue; + if (likely(now && rkbuf->rkbuf_ts_timeout > now)) + continue; if (ApiKey != -1 && rkbuf->rkbuf_reqhdr.ApiKey != ApiKey) continue; @@ -778,13 +769,13 @@ static int rd_kafka_broker_bufq_timeout_scan (rd_kafka_broker_t *rkb, if (partial_cntp && rd_slice_offset(&rkbuf->rkbuf_reader) > 0) (*partial_cntp)++; - /* Convert rkbuf_ts_sent to elapsed time since request */ - if (rkbuf->rkbuf_ts_sent) - rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent; - else - rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_enq; + /* Convert rkbuf_ts_sent to elapsed time since request */ + if (rkbuf->rkbuf_ts_sent) + rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent; + else + rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_enq; - rd_kafka_bufq_deq(rkbq, rkbuf); + rd_kafka_bufq_deq(rkbq, rkbuf); if (now && cnt < log_first_n) { char holbstr[128]; @@ -797,36 +788,37 @@ static int rd_kafka_broker_bufq_timeout_scan (rd_kafka_broker_t *rkb, * In this case log what is likely holding up the * requests and what caused this request to time out. */ if (holb && holb == TAILQ_FIRST(&rkbq->rkbq_bufs)) { - rd_snprintf(holbstr, sizeof(holbstr), - ": possibly held back by " - "preceeding%s %sRequest with " - "timeout in %dms", - (holb->rkbuf_flags & - RD_KAFKA_OP_F_BLOCKING) ? - " blocking" : "", - rd_kafka_ApiKey2str(holb-> - rkbuf_reqhdr. - ApiKey), - (int)((holb->rkbuf_ts_timeout - - now) / 1000)); + rd_snprintf( + holbstr, sizeof(holbstr), + ": possibly held back by " + "preceeding%s %sRequest with " + "timeout in %dms", + (holb->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING) + ? " blocking" + : "", + rd_kafka_ApiKey2str( + holb->rkbuf_reqhdr.ApiKey), + (int)((holb->rkbuf_ts_timeout - now) / + 1000)); /* Only log the HOLB once */ holb = NULL; } else { *holbstr = '\0'; } - rd_rkb_log(rkb, LOG_NOTICE, "REQTMOUT", - "Timed out %sRequest %s " - "(after %"PRId64"ms, timeout #%d)%s", - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr. - ApiKey), - description, rkbuf->rkbuf_ts_sent/1000, cnt, - holbstr); + rd_rkb_log( + rkb, LOG_NOTICE, "REQTMOUT", + "Timed out %sRequest %s " + "(after %" PRId64 "ms, timeout #%d)%s", + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + description, rkbuf->rkbuf_ts_sent / 1000, cnt, + holbstr); } - if (is_waitresp_q && rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING - && rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, 1) == 0) - rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); + if (is_waitresp_q && + rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING && + rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, 1) == 0) + rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); pre_state = rd_kafka_broker_get_state(rkb); @@ -848,9 +840,9 @@ static int rd_kafka_broker_bufq_timeout_scan (rd_kafka_broker_t *rkb, /* Else start scanning the queue from the beginning. */ goto restart; } - } + } - return cnt; + return cnt; } @@ -859,26 +851,26 @@ static int rd_kafka_broker_bufq_timeout_scan (rd_kafka_broker_t *rkb, * * Locality: Broker thread */ -static void rd_kafka_broker_timeout_scan (rd_kafka_broker_t *rkb, rd_ts_t now) { +static void rd_kafka_broker_timeout_scan(rd_kafka_broker_t *rkb, rd_ts_t now) { int inflight_cnt, retry_cnt, outq_cnt; int partial_cnt = 0; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); /* In-flight requests waiting for response */ inflight_cnt = rd_kafka_broker_bufq_timeout_scan( - rkb, 1, &rkb->rkb_waitresps, NULL, -1, - RD_KAFKA_RESP_ERR__TIMED_OUT, now, "in flight", 5); - /* Requests in retry queue */ - retry_cnt = rd_kafka_broker_bufq_timeout_scan( - rkb, 0, &rkb->rkb_retrybufs, NULL, -1, - RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, now, "in retry queue", 0); + rkb, 1, &rkb->rkb_waitresps, NULL, -1, RD_KAFKA_RESP_ERR__TIMED_OUT, + now, "in flight", 5); + /* Requests in retry queue */ + retry_cnt = rd_kafka_broker_bufq_timeout_scan( + rkb, 0, &rkb->rkb_retrybufs, NULL, -1, + RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, now, "in retry queue", 0); /* Requests in local queue not sent yet. * partial_cnt is included in outq_cnt and denotes a request * that has been partially transmitted. */ outq_cnt = rd_kafka_broker_bufq_timeout_scan( - rkb, 0, &rkb->rkb_outbufs, &partial_cnt, -1, - RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, now, "in output queue", 0); + rkb, 0, &rkb->rkb_outbufs, &partial_cnt, -1, + RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, now, "in output queue", 0); if (inflight_cnt + retry_cnt + outq_cnt + partial_cnt > 0) { rd_rkb_log(rkb, LOG_WARNING, "REQTMOUT", @@ -897,7 +889,7 @@ static void rd_kafka_broker_timeout_scan (rd_kafka_broker_t *rkb, rd_ts_t now) { if (partial_cnt > 0 || (rkb->rkb_rk->rk_conf.socket_max_fails && rkb->rkb_req_timeouts >= - rkb->rkb_rk->rk_conf.socket_max_fails && + rkb->rkb_rk->rk_conf.socket_max_fails && rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP)) { char rttinfo[32]; /* Print average RTT (if avail) to help diagnose. */ @@ -905,7 +897,7 @@ static void rd_kafka_broker_timeout_scan (rd_kafka_broker_t *rkb, rd_ts_t now) { if (rkb->rkb_avg_rtt.ra_v.avg) rd_snprintf(rttinfo, sizeof(rttinfo), " (average rtt %.3fms)", - (float)(rkb->rkb_avg_rtt.ra_v.avg/ + (float)(rkb->rkb_avg_rtt.ra_v.avg / 1000.0f)); else rttinfo[0] = 0; @@ -920,92 +912,87 @@ static void rd_kafka_broker_timeout_scan (rd_kafka_broker_t *rkb, rd_ts_t now) { -static ssize_t -rd_kafka_broker_send (rd_kafka_broker_t *rkb, rd_slice_t *slice) { - ssize_t r; - char errstr[128]; +static ssize_t rd_kafka_broker_send(rd_kafka_broker_t *rkb, rd_slice_t *slice) { + ssize_t r; + char errstr[128]; - rd_kafka_assert(rkb->rkb_rk, rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP); - rd_kafka_assert(rkb->rkb_rk, rkb->rkb_transport); + rd_kafka_assert(rkb->rkb_rk, + rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP); + rd_kafka_assert(rkb->rkb_rk, rkb->rkb_transport); - r = rd_kafka_transport_send(rkb->rkb_transport, slice, - errstr, sizeof(errstr)); + r = rd_kafka_transport_send(rkb->rkb_transport, slice, errstr, + sizeof(errstr)); - if (r == -1) { - rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, + if (r == -1) { + rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, "Send failed: %s", errstr); - rd_atomic64_add(&rkb->rkb_c.tx_err, 1); - return -1; - } + rd_atomic64_add(&rkb->rkb_c.tx_err, 1); + return -1; + } - rd_atomic64_add(&rkb->rkb_c.tx_bytes, r); - rd_atomic64_add(&rkb->rkb_c.tx, 1); - return r; + rd_atomic64_add(&rkb->rkb_c.tx_bytes, r); + rd_atomic64_add(&rkb->rkb_c.tx, 1); + return r; } - -static int rd_kafka_broker_resolve (rd_kafka_broker_t *rkb, - const char *nodename, - rd_bool_t reset_cached_addr) { - const char *errstr; +static int rd_kafka_broker_resolve(rd_kafka_broker_t *rkb, + const char *nodename, + rd_bool_t reset_cached_addr) { + const char *errstr; int save_idx = 0; if (!*nodename && rkb->rkb_source == RD_KAFKA_LOGICAL) { - rd_kafka_broker_fail(rkb, LOG_DEBUG, - RD_KAFKA_RESP_ERR__RESOLVE, + rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__RESOLVE, "Logical broker has no address yet"); return -1; } - if (rkb->rkb_rsal && - (reset_cached_addr || - rkb->rkb_ts_rsal_last + (rkb->rkb_rk->rk_conf.broker_addr_ttl*1000) - < rd_clock())) { - /* Address list has expired. */ + if (rkb->rkb_rsal && + (reset_cached_addr || + rkb->rkb_ts_rsal_last + + (rkb->rkb_rk->rk_conf.broker_addr_ttl * 1000) < + rd_clock())) { + /* Address list has expired. */ /* Save the address index to make sure we still round-robin * if we get the same address list back */ save_idx = rkb->rkb_rsal->rsal_curr; - rd_sockaddr_list_destroy(rkb->rkb_rsal); - rkb->rkb_rsal = NULL; - } - - if (!rkb->rkb_rsal) { - /* Resolve */ - rkb->rkb_rsal = rd_getaddrinfo(nodename, - RD_KAFKA_PORT_STR, - AI_ADDRCONFIG, - rkb->rkb_rk->rk_conf. - broker_addr_family, - SOCK_STREAM, - IPPROTO_TCP, &errstr); - - if (!rkb->rkb_rsal) { - rd_kafka_broker_fail(rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__RESOLVE, - "Failed to resolve '%s': %s", - nodename, errstr); - return -1; + rd_sockaddr_list_destroy(rkb->rkb_rsal); + rkb->rkb_rsal = NULL; + } + + if (!rkb->rkb_rsal) { + /* Resolve */ + rkb->rkb_rsal = + rd_getaddrinfo(nodename, RD_KAFKA_PORT_STR, AI_ADDRCONFIG, + rkb->rkb_rk->rk_conf.broker_addr_family, + SOCK_STREAM, IPPROTO_TCP, &errstr); + + if (!rkb->rkb_rsal) { + rd_kafka_broker_fail( + rkb, LOG_ERR, RD_KAFKA_RESP_ERR__RESOLVE, + "Failed to resolve '%s': %s", nodename, errstr); + return -1; } else { rkb->rkb_ts_rsal_last = rd_clock(); /* Continue at previous round-robin position */ if (rkb->rkb_rsal->rsal_cnt > save_idx) rkb->rkb_rsal->rsal_curr = save_idx; } - } + } - return 0; + return 0; } -static void rd_kafka_broker_buf_enq0 (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf) { +static void rd_kafka_broker_buf_enq0(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf) { rd_ts_t now; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); if (rkb->rkb_rk->rk_conf.sparse_connections && rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT) { @@ -1018,7 +1005,7 @@ static void rd_kafka_broker_buf_enq0 (rd_kafka_broker_t *rkb, rd_kafka_broker_unlock(rkb); } - now = rd_clock(); + now = rd_clock(); rkbuf->rkbuf_ts_enq = now; rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_SENT; @@ -1027,8 +1014,8 @@ static void rd_kafka_broker_buf_enq0 (rd_kafka_broker_t *rkb, if (likely(rkbuf->rkbuf_prio == RD_KAFKA_PRIO_NORMAL)) { /* Insert request at tail of queue */ - TAILQ_INSERT_TAIL(&rkb->rkb_outbufs.rkbq_bufs, - rkbuf, rkbuf_link); + TAILQ_INSERT_TAIL(&rkb->rkb_outbufs.rkbq_bufs, rkbuf, + rkbuf_link); } else { /* Insert request after any requests with a higher or @@ -1048,11 +1035,11 @@ static void rd_kafka_broker_buf_enq0 (rd_kafka_broker_t *rkb, } if (after) - TAILQ_INSERT_AFTER(&rkb->rkb_outbufs.rkbq_bufs, - after, rkbuf, rkbuf_link); + TAILQ_INSERT_AFTER(&rkb->rkb_outbufs.rkbq_bufs, after, + rkbuf, rkbuf_link); else - TAILQ_INSERT_HEAD(&rkb->rkb_outbufs.rkbq_bufs, - rkbuf, rkbuf_link); + TAILQ_INSERT_HEAD(&rkb->rkb_outbufs.rkbq_bufs, rkbuf, + rkbuf_link); } rd_atomic32_add(&rkb->rkb_outbufs.rkbq_cnt, 1); @@ -1065,7 +1052,7 @@ static void rd_kafka_broker_buf_enq0 (rd_kafka_broker_t *rkb, /** * Finalize a stuffed rkbuf for sending to broker. */ -static void rd_kafka_buf_finalize (rd_kafka_t *rk, rd_kafka_buf_t *rkbuf) { +static void rd_kafka_buf_finalize(rd_kafka_t *rk, rd_kafka_buf_t *rkbuf) { size_t totsize; rd_assert(!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE)); @@ -1088,14 +1075,14 @@ static void rd_kafka_buf_finalize (rd_kafka_t *rk, rd_kafka_buf_t *rkbuf) { rd_kafka_buf_update_i32(rkbuf, 0, (int32_t)totsize); /* ApiVersion */ - rd_kafka_buf_update_i16(rkbuf, 4+2, rkbuf->rkbuf_reqhdr.ApiVersion); + rd_kafka_buf_update_i16(rkbuf, 4 + 2, rkbuf->rkbuf_reqhdr.ApiVersion); } -void rd_kafka_broker_buf_enq1 (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_broker_buf_enq1(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rkbuf->rkbuf_cb = resp_cb; @@ -1113,13 +1100,13 @@ void rd_kafka_broker_buf_enq1 (rd_kafka_broker_t *rkb, * * Locality: broker thread */ -static int rd_kafka_broker_buf_enq2 (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_broker_buf_enq2(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf) { if (unlikely(rkb->rkb_source == RD_KAFKA_INTERNAL)) { /* Fail request immediately if this is the internal broker. */ rd_kafka_buf_callback(rkb->rkb_rk, rkb, - RD_KAFKA_RESP_ERR__TRANSPORT, - NULL, rkbuf); + RD_KAFKA_RESP_ERR__TRANSPORT, NULL, + rkbuf); return -1; } @@ -1136,11 +1123,11 @@ static int rd_kafka_broker_buf_enq2 (rd_kafka_broker_t *rkb, * * Locality: any thread */ -void rd_kafka_broker_buf_enq_replyq (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_broker_buf_enq_replyq(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { assert(rkbuf->rkbuf_rkb == rkb); if (resp_cb) { @@ -1148,26 +1135,25 @@ void rd_kafka_broker_buf_enq_replyq (rd_kafka_broker_t *rkb, rkbuf->rkbuf_cb = resp_cb; rkbuf->rkbuf_opaque = opaque; } else { - rd_dassert(!replyq.q); - } + rd_dassert(!replyq.q); + } /* Unmaked buffers will be finalized after the make callback. */ if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE)) rd_kafka_buf_finalize(rkb->rkb_rk, rkbuf); - if (thrd_is_current(rkb->rkb_thread)) { - rd_kafka_broker_buf_enq2(rkb, rkbuf); + if (thrd_is_current(rkb->rkb_thread)) { + rd_kafka_broker_buf_enq2(rkb, rkbuf); - } else { - rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_BUF); - rko->rko_u.xbuf.rkbuf = rkbuf; - rd_kafka_q_enq(rkb->rkb_ops, rko); - } + } else { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_BUF); + rko->rko_u.xbuf.rkbuf = rkbuf; + rd_kafka_q_enq(rkb->rkb_ops, rko); + } } - /** * @returns the current broker state change version. * Pass this value to future rd_kafka_brokers_wait_state_change() calls @@ -1175,12 +1161,12 @@ void rd_kafka_broker_buf_enq_replyq (rd_kafka_broker_t *rkb, * an initial call to some API that fails and the sub-sequent * .._wait_state_change() call. */ -int rd_kafka_brokers_get_state_version (rd_kafka_t *rk) { - int version; - mtx_lock(&rk->rk_broker_state_change_lock); - version = rk->rk_broker_state_change_version; - mtx_unlock(&rk->rk_broker_state_change_lock); - return version; +int rd_kafka_brokers_get_state_version(rd_kafka_t *rk) { + int version; + mtx_lock(&rk->rk_broker_state_change_lock); + version = rk->rk_broker_state_change_version; + mtx_unlock(&rk->rk_broker_state_change_lock); + return version; } /** @@ -1201,18 +1187,19 @@ int rd_kafka_brokers_get_state_version (rd_kafka_t *rk) { * * @locality any thread */ -int rd_kafka_brokers_wait_state_change (rd_kafka_t *rk, int stored_version, - int timeout_ms) { - int r; - mtx_lock(&rk->rk_broker_state_change_lock); - if (stored_version != rk->rk_broker_state_change_version) - r = 1; - else - r = cnd_timedwait_ms(&rk->rk_broker_state_change_cnd, - &rk->rk_broker_state_change_lock, - timeout_ms) == thrd_success; - mtx_unlock(&rk->rk_broker_state_change_lock); - return r; +int rd_kafka_brokers_wait_state_change(rd_kafka_t *rk, + int stored_version, + int timeout_ms) { + int r; + mtx_lock(&rk->rk_broker_state_change_lock); + if (stored_version != rk->rk_broker_state_change_version) + r = 1; + else + r = cnd_timedwait_ms(&rk->rk_broker_state_change_cnd, + &rk->rk_broker_state_change_lock, + timeout_ms) == thrd_success; + mtx_unlock(&rk->rk_broker_state_change_lock); + return r; } @@ -1228,9 +1215,9 @@ int rd_kafka_brokers_wait_state_change (rd_kafka_t *rk, int stored_version, * or 0 if the \p stored_version is outdated in which case the * caller should redo the broker lookup. */ -int rd_kafka_brokers_wait_state_change_async (rd_kafka_t *rk, - int stored_version, - rd_kafka_enq_once_t *eonce) { +int rd_kafka_brokers_wait_state_change_async(rd_kafka_t *rk, + int stored_version, + rd_kafka_enq_once_t *eonce) { int r = 1; mtx_lock(&rk->rk_broker_state_change_lock); @@ -1250,8 +1237,8 @@ int rd_kafka_brokers_wait_state_change_async (rd_kafka_t *rk, * @brief eonce trigger callback for rd_list_apply() call in * rd_kafka_brokers_broadcast_state_change() */ -static int -rd_kafka_broker_state_change_trigger_eonce (void *elem, void *opaque) { +static int rd_kafka_broker_state_change_trigger_eonce(void *elem, + void *opaque) { rd_kafka_enq_once_t *eonce = elem; rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR_NO_ERROR, "broker state change"); @@ -1264,10 +1251,9 @@ rd_kafka_broker_state_change_trigger_eonce (void *elem, void *opaque) { * * @locality any thread */ -void rd_kafka_brokers_broadcast_state_change (rd_kafka_t *rk) { +void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk) { - rd_kafka_dbg(rk, GENERIC, "BROADCAST", - "Broadcasting state change"); + rd_kafka_dbg(rk, GENERIC, "BROADCAST", "Broadcasting state change"); mtx_lock(&rk->rk_broker_state_change_lock); @@ -1305,15 +1291,16 @@ void rd_kafka_brokers_broadcast_state_change (rd_kafka_t *rk) { * @locality any */ static rd_kafka_broker_t * -rd_kafka_broker_random0 (const char *func, int line, - rd_kafka_t *rk, - rd_bool_t is_up, - int state, - int *filtered_cnt, - int (*filter) (rd_kafka_broker_t *rk, void *opaque), - void *opaque) { +rd_kafka_broker_random0(const char *func, + int line, + rd_kafka_t *rk, + rd_bool_t is_up, + int state, + int *filtered_cnt, + int (*filter)(rd_kafka_broker_t *rk, void *opaque), + void *opaque) { rd_kafka_broker_t *rkb, *good = NULL; - int cnt = 0; + int cnt = 0; int fcnt = 0; TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { @@ -1337,8 +1324,8 @@ rd_kafka_broker_random0 (const char *func, int line, cnt += 1; } } - rd_kafka_broker_unlock(rkb); - } + rd_kafka_broker_unlock(rkb); + } if (filtered_cnt) *filtered_cnt = fcnt; @@ -1346,9 +1333,9 @@ rd_kafka_broker_random0 (const char *func, int line, return good; } -#define rd_kafka_broker_random(rk,state,filter,opaque) \ - rd_kafka_broker_random0(__FUNCTION__, __LINE__, \ - rk, rd_false, state, NULL, filter, opaque) +#define rd_kafka_broker_random(rk, state, filter, opaque) \ + rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, rd_false, state, \ + NULL, filter, opaque) /** @@ -1369,12 +1356,12 @@ rd_kafka_broker_random0 (const char *func, int line, * @locality any */ static rd_kafka_broker_t * -rd_kafka_broker_weighted (rd_kafka_t *rk, - int (*weight_cb) (rd_kafka_broker_t *rkb), - int features) { +rd_kafka_broker_weighted(rd_kafka_t *rk, + int (*weight_cb)(rd_kafka_broker_t *rkb), + int features) { rd_kafka_broker_t *rkb, *good = NULL; int highest = 0; - int cnt = 0; + int cnt = 0; TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { int weight; @@ -1391,7 +1378,7 @@ rd_kafka_broker_weighted (rd_kafka_t *rk, if (weight > highest) { highest = weight; - cnt = 0; + cnt = 0; } /* If same weight (cnt > 0), use reservoir sampling */ @@ -1428,28 +1415,28 @@ rd_kafka_broker_weighted (rd_kafka_t *rk, * * @locks_required rkb */ -static int rd_kafka_broker_weight_usable (rd_kafka_broker_t *rkb) { +static int rd_kafka_broker_weight_usable(rd_kafka_broker_t *rkb) { int weight = 0; if (!rd_kafka_broker_state_is_up(rkb->rkb_state)) return 0; - weight += 2000 * (rkb->rkb_nodeid != -1 && - !RD_KAFKA_BROKER_IS_LOGICAL(rkb)); + weight += + 2000 * (rkb->rkb_nodeid != -1 && !RD_KAFKA_BROKER_IS_LOGICAL(rkb)); weight += 10 * !RD_KAFKA_BROKER_IS_LOGICAL(rkb); if (likely(!rd_atomic32_get(&rkb->rkb_blocking_request_cnt))) { rd_ts_t tx_last = rd_atomic64_get(&rkb->rkb_c.ts_send); - int idle = (int)((rd_clock() - - (tx_last > 0 ? tx_last : rkb->rkb_ts_state)) - / 1000000); + int idle = (int)((rd_clock() - + (tx_last > 0 ? tx_last : rkb->rkb_ts_state)) / + 1000000); weight += 1; /* is not blocking */ /* Prefer least idle broker (based on last 10 minutes use) */ if (idle < 0) ; /*clock going backwards? do nothing */ - else if (idle < 600/*10 minutes*/) + else if (idle < 600 /*10 minutes*/) weight += 1000 + (600 - idle); else /* Else least idle hours (capped to 100h) */ weight += 100 + (100 - RD_MIN((idle / 3600), 100)); @@ -1471,11 +1458,12 @@ static int rd_kafka_broker_weight_usable (rd_kafka_broker_t *rkb) { * @locks rd_kafka_*lock(rk) MUST be held. * @locality any thread */ -rd_kafka_broker_t *rd_kafka_broker_any (rd_kafka_t *rk, int state, - int (*filter) (rd_kafka_broker_t *rkb, - void *opaque), - void *opaque, - const char *reason) { +rd_kafka_broker_t *rd_kafka_broker_any(rd_kafka_t *rk, + int state, + int (*filter)(rd_kafka_broker_t *rkb, + void *opaque), + void *opaque, + const char *reason) { rd_kafka_broker_t *rkb; rkb = rd_kafka_broker_random(rk, state, filter, opaque); @@ -1502,17 +1490,17 @@ rd_kafka_broker_t *rd_kafka_broker_any (rd_kafka_t *rk, int state, * @locks rd_kafka_*lock(rk) MUST be held. * @locality any thread */ -rd_kafka_broker_t * -rd_kafka_broker_any_up (rd_kafka_t *rk, - int *filtered_cnt, - int (*filter) (rd_kafka_broker_t *rkb, - void *opaque), - void *opaque, const char *reason) { +rd_kafka_broker_t *rd_kafka_broker_any_up(rd_kafka_t *rk, + int *filtered_cnt, + int (*filter)(rd_kafka_broker_t *rkb, + void *opaque), + void *opaque, + const char *reason) { rd_kafka_broker_t *rkb; - rkb = rd_kafka_broker_random0(__FUNCTION__, __LINE__, - rk, rd_true/*is_up*/, -1, - filtered_cnt, filter, opaque); + rkb = rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, + rd_true /*is_up*/, -1, filtered_cnt, + filter, opaque); if (!rkb && rk->rk_conf.sparse_connections) { /* Sparse connections: @@ -1538,11 +1526,11 @@ rd_kafka_broker_any_up (rd_kafka_t *rk, * * @sa rd_kafka_broker_any_up() */ -rd_kafka_broker_t *rd_kafka_broker_any_usable (rd_kafka_t *rk, - int timeout_ms, - rd_dolock_t do_lock, - int features, - const char *reason) { +rd_kafka_broker_t *rd_kafka_broker_any_usable(rd_kafka_t *rk, + int timeout_ms, + rd_dolock_t do_lock, + int features, + const char *reason) { const rd_ts_t ts_end = rd_timeout_init(timeout_ms); while (1) { @@ -1553,9 +1541,8 @@ rd_kafka_broker_t *rd_kafka_broker_any_usable (rd_kafka_t *rk, if (do_lock) rd_kafka_rdlock(rk); - rkb = rd_kafka_broker_weighted(rk, - rd_kafka_broker_weight_usable, - features); + rkb = rd_kafka_broker_weighted( + rk, rd_kafka_broker_weight_usable, features); if (!rkb && rk->rk_conf.sparse_connections) { /* Sparse connections: @@ -1596,9 +1583,10 @@ rd_kafka_broker_t *rd_kafka_broker_any_usable (rd_kafka_t *rk, * @locks none * @locality any thread */ -rd_kafka_broker_t * -rd_kafka_broker_get_async (rd_kafka_t *rk, int32_t broker_id, int state, - rd_kafka_enq_once_t *eonce) { +rd_kafka_broker_t *rd_kafka_broker_get_async(rd_kafka_t *rk, + int32_t broker_id, + int state, + rd_kafka_enq_once_t *eonce) { int version; do { rd_kafka_broker_t *rkb; @@ -1628,8 +1616,8 @@ rd_kafka_broker_get_async (rd_kafka_t *rk, int32_t broker_id, int state, * @locality any thread */ -static rd_kafka_broker_t *rd_kafka_broker_controller_nowait (rd_kafka_t *rk, - int state) { +static rd_kafka_broker_t *rd_kafka_broker_controller_nowait(rd_kafka_t *rk, + int state) { rd_kafka_broker_t *rkb; rd_kafka_rdlock(rk); @@ -1664,8 +1652,9 @@ static rd_kafka_broker_t *rd_kafka_broker_controller_nowait (rd_kafka_t *rk, * @locality any thread */ rd_kafka_broker_t * -rd_kafka_broker_controller_async (rd_kafka_t *rk, int state, - rd_kafka_enq_once_t *eonce) { +rd_kafka_broker_controller_async(rd_kafka_t *rk, + int state, + rd_kafka_enq_once_t *eonce) { int version; do { rd_kafka_broker_t *rkb; @@ -1691,8 +1680,8 @@ rd_kafka_broker_controller_async (rd_kafka_t *rk, int state, * @locks none * @locality any thread */ -rd_kafka_broker_t *rd_kafka_broker_controller (rd_kafka_t *rk, int state, - rd_ts_t abs_timeout) { +rd_kafka_broker_t * +rd_kafka_broker_controller(rd_kafka_t *rk, int state, rd_ts_t abs_timeout) { while (1) { int version = rd_kafka_brokers_get_state_version(rk); @@ -1713,87 +1702,76 @@ rd_kafka_broker_t *rd_kafka_broker_controller (rd_kafka_t *rk, int state, - /** * Find a waitresp (rkbuf awaiting response) by the correlation id. */ -static rd_kafka_buf_t *rd_kafka_waitresp_find (rd_kafka_broker_t *rkb, - int32_t corrid) { - rd_kafka_buf_t *rkbuf; - rd_ts_t now = rd_clock(); - - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - - TAILQ_FOREACH(rkbuf, &rkb->rkb_waitresps.rkbq_bufs, rkbuf_link) - if (rkbuf->rkbuf_corrid == corrid) { - /* Convert ts_sent to RTT */ - rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent; - rd_avg_add(&rkb->rkb_avg_rtt, rkbuf->rkbuf_ts_sent); - - if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING && - rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, - 1) == 1) - rd_kafka_brokers_broadcast_state_change( - rkb->rkb_rk); - - rd_kafka_bufq_deq(&rkb->rkb_waitresps, rkbuf); - return rkbuf; - } - return NULL; -} +static rd_kafka_buf_t *rd_kafka_waitresp_find(rd_kafka_broker_t *rkb, + int32_t corrid) { + rd_kafka_buf_t *rkbuf; + rd_ts_t now = rd_clock(); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + + TAILQ_FOREACH(rkbuf, &rkb->rkb_waitresps.rkbq_bufs, rkbuf_link) + if (rkbuf->rkbuf_corrid == corrid) { + /* Convert ts_sent to RTT */ + rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent; + rd_avg_add(&rkb->rkb_avg_rtt, rkbuf->rkbuf_ts_sent); + + if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING && + rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, 1) == 1) + rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); + + rd_kafka_bufq_deq(&rkb->rkb_waitresps, rkbuf); + return rkbuf; + } + return NULL; +} /** * Map a response message to a request. */ -static int rd_kafka_req_response (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf) { - rd_kafka_buf_t *req; +static int rd_kafka_req_response(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf) { + rd_kafka_buf_t *req; int log_decode_errors = LOG_ERR; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - /* Find corresponding request message by correlation id */ - if (unlikely(!(req = - rd_kafka_waitresp_find(rkb, - rkbuf->rkbuf_reshdr.CorrId)))) { - /* unknown response. probably due to request timeout */ + /* Find corresponding request message by correlation id */ + if (unlikely(!(req = rd_kafka_waitresp_find( + rkb, rkbuf->rkbuf_reshdr.CorrId)))) { + /* unknown response. probably due to request timeout */ rd_atomic64_add(&rkb->rkb_c.rx_corrid_err, 1); - rd_rkb_dbg(rkb, BROKER, "RESPONSE", - "Response for unknown CorrId %"PRId32" (timed out?)", - rkbuf->rkbuf_reshdr.CorrId); + rd_rkb_dbg(rkb, BROKER, "RESPONSE", + "Response for unknown CorrId %" PRId32 + " (timed out?)", + rkbuf->rkbuf_reshdr.CorrId); rd_kafka_interceptors_on_response_received( - rkb->rkb_rk, - -1, - rd_kafka_broker_name(rkb), - rkb->rkb_nodeid, - -1, - -1, - rkbuf->rkbuf_reshdr.CorrId, - rkbuf->rkbuf_totlen, - -1, - RD_KAFKA_RESP_ERR__NOENT); + rkb->rkb_rk, -1, rd_kafka_broker_name(rkb), rkb->rkb_nodeid, + -1, -1, rkbuf->rkbuf_reshdr.CorrId, rkbuf->rkbuf_totlen, -1, + RD_KAFKA_RESP_ERR__NOENT); rd_kafka_buf_destroy(rkbuf); return -1; - } + } - rd_rkb_dbg(rkb, PROTOCOL, "RECV", - "Received %sResponse (v%hd, %"PRIusz" bytes, CorrId %"PRId32 - ", rtt %.2fms)", - rd_kafka_ApiKey2str(req->rkbuf_reqhdr.ApiKey), - req->rkbuf_reqhdr.ApiVersion, - rkbuf->rkbuf_totlen, rkbuf->rkbuf_reshdr.CorrId, - (float)req->rkbuf_ts_sent / 1000.0f); + rd_rkb_dbg(rkb, PROTOCOL, "RECV", + "Received %sResponse (v%hd, %" PRIusz + " bytes, CorrId %" PRId32 ", rtt %.2fms)", + rd_kafka_ApiKey2str(req->rkbuf_reqhdr.ApiKey), + req->rkbuf_reqhdr.ApiVersion, rkbuf->rkbuf_totlen, + rkbuf->rkbuf_reshdr.CorrId, + (float)req->rkbuf_ts_sent / 1000.0f); /* Copy request's header and certain flags to response object's * reqhdr for convenience. */ rkbuf->rkbuf_reqhdr = req->rkbuf_reqhdr; - rkbuf->rkbuf_flags |= (req->rkbuf_flags & - RD_KAFKA_BUF_FLAGS_RESP_COPY_MASK); - rkbuf->rkbuf_ts_sent = req->rkbuf_ts_sent; /* copy rtt */ + rkbuf->rkbuf_flags |= + (req->rkbuf_flags & RD_KAFKA_BUF_FLAGS_RESP_COPY_MASK); + rkbuf->rkbuf_ts_sent = req->rkbuf_ts_sent; /* copy rtt */ /* Set up response reader slice starting past the response header */ rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, @@ -1812,12 +1790,12 @@ static int rd_kafka_req_response (rd_kafka_broker_t *rkb, } else rd_assert(rkbuf->rkbuf_rkb == rkb); - /* Call callback. */ + /* Call callback. */ rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, rkbuf, req); - return 0; + return 0; - err_parse: +err_parse: rd_atomic64_add(&rkb->rkb_c.rx_err, 1); rd_kafka_buf_callback(rkb->rkb_rk, rkb, rkbuf->rkbuf_err, NULL, req); rd_kafka_buf_destroy(rkbuf); @@ -1826,14 +1804,13 @@ static int rd_kafka_req_response (rd_kafka_broker_t *rkb, - -int rd_kafka_recv (rd_kafka_broker_t *rkb) { - rd_kafka_buf_t *rkbuf; - ssize_t r; +int rd_kafka_recv(rd_kafka_broker_t *rkb) { + rd_kafka_buf_t *rkbuf; + ssize_t r; /* errstr is not set by buf_read errors, so default it here. */ - char errstr[512] = "Protocol parse failure"; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - const int log_decode_errors = LOG_ERR; + char errstr[512] = "Protocol parse failure"; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + const int log_decode_errors = LOG_ERR; /* It is impossible to estimate the correct size of the response @@ -1843,16 +1820,15 @@ int rd_kafka_recv (rd_kafka_broker_t *rkb) { * buffer and call receive again. * All this in an async fashion (e.g., partial reads). */ - if (!(rkbuf = rkb->rkb_recv_buf)) { - /* No receive in progress: create new buffer */ + if (!(rkbuf = rkb->rkb_recv_buf)) { + /* No receive in progress: create new buffer */ rkbuf = rd_kafka_buf_new(2, RD_KAFKAP_RESHDR_SIZE); - rkb->rkb_recv_buf = rkbuf; + rkb->rkb_recv_buf = rkbuf; /* Set up buffer reader for the response header. */ - rd_buf_write_ensure(&rkbuf->rkbuf_buf, - RD_KAFKAP_RESHDR_SIZE, + rd_buf_write_ensure(&rkbuf->rkbuf_buf, RD_KAFKAP_RESHDR_SIZE, RD_KAFKAP_RESHDR_SIZE); } @@ -1870,15 +1846,15 @@ int rd_kafka_recv (rd_kafka_broker_t *rkb) { rd_atomic64_set(&rkb->rkb_c.ts_recv, rd_clock()); - if (rkbuf->rkbuf_totlen == 0) { - /* Packet length not known yet. */ + if (rkbuf->rkbuf_totlen == 0) { + /* Packet length not known yet. */ if (unlikely(rd_buf_write_pos(&rkbuf->rkbuf_buf) < RD_KAFKAP_RESHDR_SIZE)) { - /* Need response header for packet length and corrid. - * Wait for more data. */ - return 0; - } + /* Need response header for packet length and corrid. + * Wait for more data. */ + return 0; + } rd_assert(!rkbuf->rkbuf_rkb); rkbuf->rkbuf_rkb = rkb; /* Protocol parsing code needs @@ -1892,69 +1868,69 @@ int rd_kafka_recv (rd_kafka_broker_t *rkb) { rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0, RD_KAFKAP_RESHDR_SIZE); - /* Read protocol header */ - rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.Size); - rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.CorrId); + /* Read protocol header */ + rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.Size); + rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.CorrId); rkbuf->rkbuf_rkb = NULL; /* Reset */ - rkbuf->rkbuf_totlen = rkbuf->rkbuf_reshdr.Size; + rkbuf->rkbuf_totlen = rkbuf->rkbuf_reshdr.Size; - /* Make sure message size is within tolerable limits. */ - if (rkbuf->rkbuf_totlen < 4/*CorrId*/ || - rkbuf->rkbuf_totlen > - (size_t)rkb->rkb_rk->rk_conf.recv_max_msg_size) { + /* Make sure message size is within tolerable limits. */ + if (rkbuf->rkbuf_totlen < 4 /*CorrId*/ || + rkbuf->rkbuf_totlen > + (size_t)rkb->rkb_rk->rk_conf.recv_max_msg_size) { rd_snprintf(errstr, sizeof(errstr), - "Invalid response size %"PRId32" (0..%i): " + "Invalid response size %" PRId32 + " (0..%i): " "increase receive.message.max.bytes", rkbuf->rkbuf_reshdr.Size, rkb->rkb_rk->rk_conf.recv_max_msg_size); err = RD_KAFKA_RESP_ERR__BAD_MSG; - rd_atomic64_add(&rkb->rkb_c.rx_err, 1); - goto err; - } + rd_atomic64_add(&rkb->rkb_c.rx_err, 1); + goto err; + } - rkbuf->rkbuf_totlen -= 4; /*CorrId*/ + rkbuf->rkbuf_totlen -= 4; /*CorrId*/ - if (rkbuf->rkbuf_totlen > 0) { - /* Allocate another buffer that fits all data (short of - * the common response header). We want all - * data to be in contigious memory. */ + if (rkbuf->rkbuf_totlen > 0) { + /* Allocate another buffer that fits all data (short of + * the common response header). We want all + * data to be in contigious memory. */ rd_buf_write_ensure_contig(&rkbuf->rkbuf_buf, rkbuf->rkbuf_totlen); - } - } + } + } if (rd_buf_write_pos(&rkbuf->rkbuf_buf) - RD_KAFKAP_RESHDR_SIZE == rkbuf->rkbuf_totlen) { - /* Message is complete, pass it on to the original requester. */ - rkb->rkb_recv_buf = NULL; + /* Message is complete, pass it on to the original requester. */ + rkb->rkb_recv_buf = NULL; rd_atomic64_add(&rkb->rkb_c.rx, 1); rd_atomic64_add(&rkb->rkb_c.rx_bytes, rd_buf_write_pos(&rkbuf->rkbuf_buf)); - rd_kafka_req_response(rkb, rkbuf); - } + rd_kafka_req_response(rkb, rkbuf); + } - return 1; + return 1; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: +err: if (!strcmp(errstr, "Disconnected")) rd_kafka_broker_conn_closed(rkb, err, errstr); else - rd_kafka_broker_fail(rkb, LOG_ERR, err, - "Receive failed: %s", errstr); - return -1; + rd_kafka_broker_fail(rkb, LOG_ERR, err, "Receive failed: %s", + errstr); + return -1; } /** * Linux version of socket_cb providing racefree CLOEXEC. */ -int rd_kafka_socket_cb_linux (int domain, int type, int protocol, - void *opaque) { +int rd_kafka_socket_cb_linux(int domain, int type, int protocol, void *opaque) { #ifdef SOCK_CLOEXEC return socket(domain, type | SOCK_CLOEXEC, protocol); #else @@ -1966,16 +1942,19 @@ int rd_kafka_socket_cb_linux (int domain, int type, int protocol, * Fallback version of socket_cb NOT providing racefree CLOEXEC, * but setting CLOEXEC after socket creation (if FD_CLOEXEC is defined). */ -int rd_kafka_socket_cb_generic (int domain, int type, int protocol, - void *opaque) { +int rd_kafka_socket_cb_generic(int domain, + int type, + int protocol, + void *opaque) { int s; int on = 1; - s = (int)socket(domain, type, protocol); + s = (int)socket(domain, type, protocol); if (s == -1) return -1; #ifdef FD_CLOEXEC if (fcntl(s, F_SETFD, FD_CLOEXEC, &on) == -1) - fprintf(stderr, "WARNING: librdkafka: %s: " + fprintf(stderr, + "WARNING: librdkafka: %s: " "fcntl(FD_CLOEXEC) failed: %s: ignoring\n", __FUNCTION__, rd_strerror(errno)); #endif @@ -1993,9 +1972,9 @@ int rd_kafka_socket_cb_generic (int domain, int type, int protocol, * @locks none */ static void -rd_kafka_broker_update_reconnect_backoff (rd_kafka_broker_t *rkb, - const rd_kafka_conf_t *conf, - rd_ts_t now) { +rd_kafka_broker_update_reconnect_backoff(rd_kafka_broker_t *rkb, + const rd_kafka_conf_t *conf, + rd_ts_t now) { int backoff; /* If last connection attempt was more than reconnect.backoff.max.ms @@ -2009,14 +1988,13 @@ rd_kafka_broker_update_reconnect_backoff (rd_kafka_broker_t *rkb, backoff = rd_jitter((int)((float)rkb->rkb_reconnect_backoff_ms * 0.75), (int)((float)rkb->rkb_reconnect_backoff_ms * 1.5)); - /* Cap to reconnect.backoff.max.ms. */ + /* Cap to reconnect.backoff.max.ms. */ backoff = RD_MIN(backoff, conf->reconnect_backoff_max_ms); /* Set time of next reconnect */ - rkb->rkb_ts_reconnect = now + (backoff * 1000); - rkb->rkb_reconnect_backoff_ms = - RD_MIN(rkb->rkb_reconnect_backoff_ms * 2, - conf->reconnect_backoff_max_ms); + rkb->rkb_ts_reconnect = now + (backoff * 1000); + rkb->rkb_reconnect_backoff_ms = RD_MIN( + rkb->rkb_reconnect_backoff_ms * 2, conf->reconnect_backoff_max_ms); } @@ -2030,8 +2008,7 @@ rd_kafka_broker_update_reconnect_backoff (rd_kafka_broker_t *rkb, */ static RD_INLINE int -rd_kafka_broker_reconnect_backoff (const rd_kafka_broker_t *rkb, - rd_ts_t now) { +rd_kafka_broker_reconnect_backoff(const rd_kafka_broker_t *rkb, rd_ts_t now) { rd_ts_t remains; if (unlikely(rkb->rkb_ts_reconnect == 0)) @@ -2048,13 +2025,11 @@ rd_kafka_broker_reconnect_backoff (const rd_kafka_broker_t *rkb, /** * @brief Unittest for reconnect.backoff.ms */ -static int rd_ut_reconnect_backoff (void) { +static int rd_ut_reconnect_backoff(void) { rd_kafka_broker_t rkb = RD_ZERO_INIT; - rd_kafka_conf_t conf = { - .reconnect_backoff_ms = 10, - .reconnect_backoff_max_ms = 90 - }; - rd_ts_t now = 1000000; + rd_kafka_conf_t conf = {.reconnect_backoff_ms = 10, + .reconnect_backoff_max_ms = 90}; + rd_ts_t now = 1000000; int backoff; rkb.rkb_reconnect_backoff_ms = conf.reconnect_backoff_ms; @@ -2102,15 +2077,14 @@ static int rd_ut_reconnect_backoff (void) { * @returns -1 on error, 0 if broker does not have a hostname, or 1 * if the connection is now in progress. */ -static int rd_kafka_broker_connect (rd_kafka_broker_t *rkb) { - const rd_sockaddr_inx_t *sinx; - char errstr[512]; +static int rd_kafka_broker_connect(rd_kafka_broker_t *rkb) { + const rd_sockaddr_inx_t *sinx; + char errstr[512]; char nodename[RD_KAFKA_NODENAME_SIZE]; rd_bool_t reset_cached_addr = rd_false; - rd_rkb_dbg(rkb, BROKER, "CONNECT", - "broker in state %s connecting", - rd_kafka_broker_state_names[rkb->rkb_state]); + rd_rkb_dbg(rkb, BROKER, "CONNECT", "broker in state %s connecting", + rd_kafka_broker_state_names[rkb->rkb_state]); rd_atomic32_add(&rkb->rkb_c.connects, 1); @@ -2139,17 +2113,16 @@ static int rd_kafka_broker_connect (rd_kafka_broker_t *rkb) { if (rd_kafka_broker_resolve(rkb, nodename, reset_cached_addr) == -1) return -1; - sinx = rd_sockaddr_list_next(rkb->rkb_rsal); + sinx = rd_sockaddr_list_next(rkb->rkb_rsal); - rd_kafka_assert(rkb->rkb_rk, !rkb->rkb_transport); + rd_kafka_assert(rkb->rkb_rk, !rkb->rkb_transport); - if (!(rkb->rkb_transport = - rd_kafka_transport_connect(rkb, sinx, errstr, sizeof(errstr)))) { - rd_kafka_broker_fail(rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__TRANSPORT, + if (!(rkb->rkb_transport = rd_kafka_transport_connect( + rkb, sinx, errstr, sizeof(errstr)))) { + rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, "%s", errstr); - return -1; - } + return -1; + } return 1; } @@ -2161,27 +2134,26 @@ static int rd_kafka_broker_connect (rd_kafka_broker_t *rkb) { * * @locality Broker thread */ -void rd_kafka_broker_connect_up (rd_kafka_broker_t *rkb) { +void rd_kafka_broker_connect_up(rd_kafka_broker_t *rkb) { - rkb->rkb_max_inflight = rkb->rkb_rk->rk_conf.max_inflight; + rkb->rkb_max_inflight = rkb->rkb_rk->rk_conf.max_inflight; - rd_kafka_broker_lock(rkb); - rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_UP); - rd_kafka_broker_unlock(rkb); + rd_kafka_broker_lock(rkb); + rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_UP); + rd_kafka_broker_unlock(rkb); /* Request metadata (async): * try locally known topics first and if there are none try * getting just the broker list. */ - if (rd_kafka_metadata_refresh_known_topics(NULL, rkb, - rd_false/*dont force*/, - "connected") == + if (rd_kafka_metadata_refresh_known_topics( + NULL, rkb, rd_false /*dont force*/, "connected") == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) rd_kafka_metadata_refresh_brokers(NULL, rkb, "connected"); } -static void rd_kafka_broker_connect_auth (rd_kafka_broker_t *rkb); +static void rd_kafka_broker_connect_auth(rd_kafka_broker_t *rkb); /** @@ -2189,73 +2161,68 @@ static void rd_kafka_broker_connect_auth (rd_kafka_broker_t *rkb); * the broker state. * */ -static void -rd_kafka_broker_handle_SaslHandshake (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_broker_handle_SaslHandshake(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { const int log_decode_errors = LOG_ERR; - int32_t MechCnt; - int16_t ErrorCode; - int i = 0; - char *mechs = "(n/a)"; - size_t msz, mof = 0; + int32_t MechCnt; + int16_t ErrorCode; + int i = 0; + char *mechs = "(n/a)"; + size_t msz, mof = 0; - if (err == RD_KAFKA_RESP_ERR__DESTROY) - return; + if (err == RD_KAFKA_RESP_ERR__DESTROY) + return; if (err) goto err; - rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); rd_kafka_buf_read_i32(rkbuf, &MechCnt); if (MechCnt < 0 || MechCnt > 100) - rd_kafka_buf_parse_fail(rkbuf, - "Invalid MechanismCount %"PRId32, - MechCnt); + rd_kafka_buf_parse_fail( + rkbuf, "Invalid MechanismCount %" PRId32, MechCnt); - /* Build a CSV string of supported mechanisms. */ - msz = RD_MIN(511, 1 + (MechCnt * 32)); - mechs = rd_alloca(msz); - *mechs = '\0'; + /* Build a CSV string of supported mechanisms. */ + msz = RD_MIN(511, 1 + (MechCnt * 32)); + mechs = rd_alloca(msz); + *mechs = '\0'; - for (i = 0 ; i < MechCnt ; i++) { - rd_kafkap_str_t mech; - rd_kafka_buf_read_str(rkbuf, &mech); + for (i = 0; i < MechCnt; i++) { + rd_kafkap_str_t mech; + rd_kafka_buf_read_str(rkbuf, &mech); - mof += rd_snprintf(mechs+mof, msz-mof, "%s%.*s", - i ? ",":"", RD_KAFKAP_STR_PR(&mech)); + mof += rd_snprintf(mechs + mof, msz - mof, "%s%.*s", + i ? "," : "", RD_KAFKAP_STR_PR(&mech)); - if (mof >= msz) - break; + if (mof >= msz) + break; } - rd_rkb_dbg(rkb, - PROTOCOL | RD_KAFKA_DBG_SECURITY | RD_KAFKA_DBG_BROKER, - "SASLMECHS", "Broker supported SASL mechanisms: %s", - mechs); + rd_rkb_dbg(rkb, PROTOCOL | RD_KAFKA_DBG_SECURITY | RD_KAFKA_DBG_BROKER, + "SASLMECHS", "Broker supported SASL mechanisms: %s", mechs); if (ErrorCode) { - err = ErrorCode; - goto err; - } + err = ErrorCode; + goto err; + } - /* Circle back to connect_auth() to start proper AUTH state. */ - rd_kafka_broker_connect_auth(rkb); - return; + /* Circle back to connect_auth() to start proper AUTH state. */ + rd_kafka_broker_connect_auth(rkb); + return; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: - rd_kafka_broker_fail(rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__AUTHENTICATION, - "SASL %s mechanism handshake failed: %s: " - "broker's supported mechanisms: %s", +err: + rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__AUTHENTICATION, + "SASL %s mechanism handshake failed: %s: " + "broker's supported mechanisms: %s", rkb->rkb_rk->rk_conf.sasl.mechanisms, - rd_kafka_err2str(err), mechs); + rd_kafka_err2str(err), mechs); } @@ -2268,64 +2235,63 @@ rd_kafka_broker_handle_SaslHandshake (rd_kafka_t *rk, * * @locks_acquired rkb */ -static void rd_kafka_broker_connect_auth (rd_kafka_broker_t *rkb) { - - if ((rkb->rkb_proto == RD_KAFKA_PROTO_SASL_PLAINTEXT || - rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL)) { - - rd_rkb_dbg(rkb, SECURITY | RD_KAFKA_DBG_BROKER, "AUTH", - "Auth in state %s (handshake %ssupported)", - rd_kafka_broker_state_names[rkb->rkb_state], - (rkb->rkb_features&RD_KAFKA_FEATURE_SASL_HANDSHAKE) - ? "" : "not "); - - /* Broker >= 0.10.0: send request to select mechanism */ - if (rkb->rkb_state != RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE && - (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE)) { - - rd_kafka_broker_lock(rkb); - rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE); - rd_kafka_broker_unlock(rkb); - - rd_kafka_SaslHandshakeRequest( - rkb, rkb->rkb_rk->rk_conf.sasl.mechanisms, - RD_KAFKA_NO_REPLYQ, - rd_kafka_broker_handle_SaslHandshake, - NULL); - } else { - /* Either Handshake succeeded (protocol selected) - * or Handshakes were not supported. - * In both cases continue with authentication. */ - char sasl_errstr[512]; - - rd_kafka_broker_lock(rkb); +static void rd_kafka_broker_connect_auth(rd_kafka_broker_t *rkb) { + + if ((rkb->rkb_proto == RD_KAFKA_PROTO_SASL_PLAINTEXT || + rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL)) { + + rd_rkb_dbg(rkb, SECURITY | RD_KAFKA_DBG_BROKER, "AUTH", + "Auth in state %s (handshake %ssupported)", + rd_kafka_broker_state_names[rkb->rkb_state], + (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE) + ? "" + : "not "); + + /* Broker >= 0.10.0: send request to select mechanism */ + if (rkb->rkb_state != RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE && + (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE)) { + + rd_kafka_broker_lock(rkb); rd_kafka_broker_set_state( - rkb, - (rkb->rkb_features & - RD_KAFKA_FEATURE_SASL_AUTH_REQ) ? - RD_KAFKA_BROKER_STATE_AUTH_REQ : - RD_KAFKA_BROKER_STATE_AUTH_LEGACY); - rd_kafka_broker_unlock(rkb); - - if (rd_kafka_sasl_client_new( - rkb->rkb_transport, sasl_errstr, - sizeof(sasl_errstr)) == -1) { - rd_kafka_broker_fail( - rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__AUTHENTICATION, - "Failed to initialize " - "SASL authentication: %s", - sasl_errstr); - return; - } - } - - return; - } - - /* No authentication required. */ - rd_kafka_broker_connect_up(rkb); + rkb, RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE); + rd_kafka_broker_unlock(rkb); + + rd_kafka_SaslHandshakeRequest( + rkb, rkb->rkb_rk->rk_conf.sasl.mechanisms, + RD_KAFKA_NO_REPLYQ, + rd_kafka_broker_handle_SaslHandshake, NULL); + } else { + /* Either Handshake succeeded (protocol selected) + * or Handshakes were not supported. + * In both cases continue with authentication. */ + char sasl_errstr[512]; + + rd_kafka_broker_lock(rkb); + rd_kafka_broker_set_state( + rkb, + (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_AUTH_REQ) + ? RD_KAFKA_BROKER_STATE_AUTH_REQ + : RD_KAFKA_BROKER_STATE_AUTH_LEGACY); + rd_kafka_broker_unlock(rkb); + + if (rd_kafka_sasl_client_new( + rkb->rkb_transport, sasl_errstr, + sizeof(sasl_errstr)) == -1) { + rd_kafka_broker_fail( + rkb, LOG_ERR, + RD_KAFKA_RESP_ERR__AUTHENTICATION, + "Failed to initialize " + "SASL authentication: %s", + sasl_errstr); + return; + } + } + + return; + } + + /* No authentication required. */ + rd_kafka_broker_connect_up(rkb); } @@ -2342,57 +2308,56 @@ static void rd_kafka_broker_connect_auth (rd_kafka_broker_t *rkb) { * @locality Broker thread * @locks_required rkb */ -static void rd_kafka_broker_set_api_versions (rd_kafka_broker_t *rkb, - struct rd_kafka_ApiVersion *apis, - size_t api_cnt) { +static void rd_kafka_broker_set_api_versions(rd_kafka_broker_t *rkb, + struct rd_kafka_ApiVersion *apis, + size_t api_cnt) { - if (rkb->rkb_ApiVersions) - rd_free(rkb->rkb_ApiVersions); + if (rkb->rkb_ApiVersions) + rd_free(rkb->rkb_ApiVersions); - if (!apis) { - rd_rkb_dbg(rkb, PROTOCOL | RD_KAFKA_DBG_BROKER, "APIVERSION", - "Using (configuration fallback) %s protocol features", - rkb->rkb_rk->rk_conf.broker_version_fallback); + if (!apis) { + rd_rkb_dbg( + rkb, PROTOCOL | RD_KAFKA_DBG_BROKER, "APIVERSION", + "Using (configuration fallback) %s protocol features", + rkb->rkb_rk->rk_conf.broker_version_fallback); - rd_kafka_get_legacy_ApiVersions(rkb->rkb_rk->rk_conf. - broker_version_fallback, - &apis, &api_cnt, - rkb->rkb_rk->rk_conf. - broker_version_fallback); + rd_kafka_get_legacy_ApiVersions( + rkb->rkb_rk->rk_conf.broker_version_fallback, &apis, + &api_cnt, rkb->rkb_rk->rk_conf.broker_version_fallback); - /* Make a copy to store on broker. */ - rd_kafka_ApiVersions_copy(apis, api_cnt, &apis, &api_cnt); - } + /* Make a copy to store on broker. */ + rd_kafka_ApiVersions_copy(apis, api_cnt, &apis, &api_cnt); + } - rkb->rkb_ApiVersions = apis; - rkb->rkb_ApiVersions_cnt = api_cnt; + rkb->rkb_ApiVersions = apis; + rkb->rkb_ApiVersions_cnt = api_cnt; - /* Update feature set based on supported broker APIs. */ - rd_kafka_broker_features_set(rkb, - rd_kafka_features_check(rkb, apis, api_cnt)); + /* Update feature set based on supported broker APIs. */ + rd_kafka_broker_features_set( + rkb, rd_kafka_features_check(rkb, apis, api_cnt)); } /** * Handler for ApiVersion response. */ -static void -rd_kafka_broker_handle_ApiVersion (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, void *opaque) { - struct rd_kafka_ApiVersion *apis = NULL; - size_t api_cnt = 0; - int16_t retry_ApiVersion = -1; +static void rd_kafka_broker_handle_ApiVersion(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + struct rd_kafka_ApiVersion *apis = NULL; + size_t api_cnt = 0; + int16_t retry_ApiVersion = -1; - if (err == RD_KAFKA_RESP_ERR__DESTROY) - return; + if (err == RD_KAFKA_RESP_ERR__DESTROY) + return; - err = rd_kafka_handle_ApiVersion(rk, rkb, err, rkbuf, request, - &apis, &api_cnt); + err = rd_kafka_handle_ApiVersion(rk, rkb, err, rkbuf, request, &apis, + &api_cnt); /* Broker does not support our ApiVersionRequest version, * see if we can downgrade to an older version. */ @@ -2401,11 +2366,11 @@ rd_kafka_broker_handle_ApiVersion (rd_kafka_t *rk, /* Find the broker's highest supported version for * ApiVersionRequest and use that to retry. */ - for (i = 0 ; i < api_cnt ; i++) { + for (i = 0; i < api_cnt; i++) { if (apis[i].ApiKey == RD_KAFKAP_ApiVersion) { - retry_ApiVersion = RD_MIN( - request->rkbuf_reqhdr.ApiVersion - 1, - apis[i].MaxVer); + retry_ApiVersion = + RD_MIN(request->rkbuf_reqhdr.ApiVersion - 1, + apis[i].MaxVer); break; } } @@ -2433,38 +2398,36 @@ rd_kafka_broker_handle_ApiVersion (rd_kafka_t *rk, if (retry_ApiVersion != -1) { /* Retry request with a lower version */ - rd_rkb_dbg(rkb, - BROKER|RD_KAFKA_DBG_FEATURE|RD_KAFKA_DBG_PROTOCOL, - "APIVERSION", - "ApiVersionRequest v%hd failed due to %s: " - "retrying with v%hd", - request->rkbuf_reqhdr.ApiVersion, - rd_kafka_err2name(err), retry_ApiVersion); - rd_kafka_ApiVersionRequest(rkb, retry_ApiVersion, - RD_KAFKA_NO_REPLYQ, - rd_kafka_broker_handle_ApiVersion, - NULL); + rd_rkb_dbg( + rkb, BROKER | RD_KAFKA_DBG_FEATURE | RD_KAFKA_DBG_PROTOCOL, + "APIVERSION", + "ApiVersionRequest v%hd failed due to %s: " + "retrying with v%hd", + request->rkbuf_reqhdr.ApiVersion, rd_kafka_err2name(err), + retry_ApiVersion); + rd_kafka_ApiVersionRequest( + rkb, retry_ApiVersion, RD_KAFKA_NO_REPLYQ, + rd_kafka_broker_handle_ApiVersion, NULL); return; } - if (err) { + if (err) { if (rkb->rkb_transport) rd_kafka_broker_fail( - rkb, LOG_WARNING, - RD_KAFKA_RESP_ERR__TRANSPORT, - "ApiVersionRequest failed: %s: " - "probably due to broker version < 0.10 " - "(see api.version.request configuration)", - rd_kafka_err2str(err)); - return; - } + rkb, LOG_WARNING, RD_KAFKA_RESP_ERR__TRANSPORT, + "ApiVersionRequest failed: %s: " + "probably due to broker version < 0.10 " + "(see api.version.request configuration)", + rd_kafka_err2str(err)); + return; + } rd_kafka_broker_lock(rkb); rd_kafka_broker_set_api_versions(rkb, apis, api_cnt); rd_kafka_broker_unlock(rkb); - rd_kafka_broker_connect_auth(rkb); + rd_kafka_broker_connect_auth(rkb); } @@ -2475,32 +2438,33 @@ rd_kafka_broker_handle_ApiVersion (rd_kafka_t *rk, * @locks_acquired rkb * @locality broker thread */ -void rd_kafka_broker_connect_done (rd_kafka_broker_t *rkb, const char *errstr) { +void rd_kafka_broker_connect_done(rd_kafka_broker_t *rkb, const char *errstr) { - if (errstr) { - /* Connect failed */ + if (errstr) { + /* Connect failed */ rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, "%s", errstr); - return; - } + return; + } - /* Connect succeeded */ - rkb->rkb_connid++; - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, - "CONNECTED", "Connected (#%d)", rkb->rkb_connid); - rkb->rkb_max_inflight = 1; /* Hold back other requests until - * ApiVersion, SaslHandshake, etc - * are done. */ + /* Connect succeeded */ + rkb->rkb_connid++; + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "CONNECTED", + "Connected (#%d)", rkb->rkb_connid); + rkb->rkb_max_inflight = 1; /* Hold back other requests until + * ApiVersion, SaslHandshake, etc + * are done. */ - rd_kafka_transport_poll_set(rkb->rkb_transport, POLLIN); + rd_kafka_transport_poll_set(rkb->rkb_transport, POLLIN); rd_kafka_broker_lock(rkb); - if (rkb->rkb_rk->rk_conf.api_version_request && - rd_interval_immediate(&rkb->rkb_ApiVersion_fail_intvl, 0, 0) > 0) { - /* Use ApiVersion to query broker for supported API versions. */ - rd_kafka_broker_feature_enable(rkb, RD_KAFKA_FEATURE_APIVERSION); - } + if (rkb->rkb_rk->rk_conf.api_version_request && + rd_interval_immediate(&rkb->rkb_ApiVersion_fail_intvl, 0, 0) > 0) { + /* Use ApiVersion to query broker for supported API versions. */ + rd_kafka_broker_feature_enable(rkb, + RD_KAFKA_FEATURE_APIVERSION); + } if (!(rkb->rkb_features & RD_KAFKA_FEATURE_APIVERSION)) { /* Use configured broker.version.fallback to @@ -2512,27 +2476,26 @@ void rd_kafka_broker_connect_done (rd_kafka_broker_t *rkb, const char *errstr) { rd_kafka_broker_set_api_versions(rkb, NULL, 0); } - if (rkb->rkb_features & RD_KAFKA_FEATURE_APIVERSION) { - /* Query broker for supported API versions. - * This may fail with a disconnect on non-supporting brokers - * so hold off any other requests until we get a response, - * and if the connection is torn down we disable this feature. + if (rkb->rkb_features & RD_KAFKA_FEATURE_APIVERSION) { + /* Query broker for supported API versions. + * This may fail with a disconnect on non-supporting brokers + * so hold off any other requests until we get a response, + * and if the connection is torn down we disable this feature. */ rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_APIVERSION_QUERY); + rkb, RD_KAFKA_BROKER_STATE_APIVERSION_QUERY); rd_kafka_broker_unlock(rkb); - rd_kafka_ApiVersionRequest( - rkb, -1 /* Use highest version we support */, - RD_KAFKA_NO_REPLYQ, - rd_kafka_broker_handle_ApiVersion, NULL); - } else { + rd_kafka_ApiVersionRequest( + rkb, -1 /* Use highest version we support */, + RD_KAFKA_NO_REPLYQ, rd_kafka_broker_handle_ApiVersion, + NULL); + } else { rd_kafka_broker_unlock(rkb); - /* Authenticate if necessary */ - rd_kafka_broker_connect_auth(rkb); - } - + /* Authenticate if necessary */ + rd_kafka_broker_connect_auth(rkb); + } } @@ -2543,12 +2506,10 @@ void rd_kafka_broker_connect_done (rd_kafka_broker_t *rkb, const char *errstr) { * @locality broker thread * @locks none */ -static RD_INLINE int -rd_kafka_broker_request_supported (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf) { - struct rd_kafka_ApiVersion skel = { - .ApiKey = rkbuf->rkbuf_reqhdr.ApiKey - }; +static RD_INLINE int rd_kafka_broker_request_supported(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf) { + struct rd_kafka_ApiVersion skel = {.ApiKey = + rkbuf->rkbuf_reqhdr.ApiKey}; struct rd_kafka_ApiVersion *ret; if (unlikely(rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_ApiVersion)) @@ -2560,17 +2521,17 @@ rd_kafka_broker_request_supported (rd_kafka_broker_t *rkb, * set of APIs. */ if (rkbuf->rkbuf_features) return (rkb->rkb_features & rkbuf->rkbuf_features) == - rkbuf->rkbuf_features; + rkbuf->rkbuf_features; /* Then try the ApiVersion map. */ - ret = bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt, - sizeof(*rkb->rkb_ApiVersions), - rd_kafka_ApiVersion_key_cmp); + ret = + bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt, + sizeof(*rkb->rkb_ApiVersions), rd_kafka_ApiVersion_key_cmp); if (!ret) return 0; return ret->MinVer <= rkbuf->rkbuf_reqhdr.ApiVersion && - rkbuf->rkbuf_reqhdr.ApiVersion <= ret->MaxVer; + rkbuf->rkbuf_reqhdr.ApiVersion <= ret->MaxVer; } @@ -2579,16 +2540,16 @@ rd_kafka_broker_request_supported (rd_kafka_broker_t *rkb, * * Locality: io thread */ -int rd_kafka_send (rd_kafka_broker_t *rkb) { - rd_kafka_buf_t *rkbuf; - unsigned int cnt = 0; +int rd_kafka_send(rd_kafka_broker_t *rkb) { + rd_kafka_buf_t *rkbuf; + unsigned int cnt = 0; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP && - rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight && - (rkbuf = TAILQ_FIRST(&rkb->rkb_outbufs.rkbq_bufs))) { - ssize_t r; + while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP && + rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight && + (rkbuf = TAILQ_FIRST(&rkb->rkb_outbufs.rkbq_bufs))) { + ssize_t r; size_t pre_of = rd_slice_offset(&rkbuf->rkbuf_reader); rd_ts_t now; @@ -2598,7 +2559,7 @@ int rd_kafka_send (rd_kafka_broker_t *rkb) { rd_kafka_resp_err_t err; err = rkbuf->rkbuf_make_req_cb( - rkb, rkbuf, rkbuf->rkbuf_make_opaque); + rkb, rkbuf, rkbuf->rkbuf_make_opaque); rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_NEED_MAKE; @@ -2606,7 +2567,7 @@ int rd_kafka_send (rd_kafka_broker_t *rkb) { if (rkbuf->rkbuf_free_make_opaque_cb && rkbuf->rkbuf_make_opaque) { rkbuf->rkbuf_free_make_opaque_cb( - rkbuf->rkbuf_make_opaque); + rkbuf->rkbuf_make_opaque); rkbuf->rkbuf_make_opaque = NULL; } @@ -2615,9 +2576,8 @@ int rd_kafka_send (rd_kafka_broker_t *rkb) { rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "MAKEREQ", "Failed to make %sRequest: %s", - rd_kafka_ApiKey2str(rkbuf-> - rkbuf_reqhdr. - ApiKey), + rd_kafka_ApiKey2str( + rkbuf->rkbuf_reqhdr.ApiKey), rd_kafka_err2str(err)); rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, NULL, rkbuf); @@ -2630,54 +2590,54 @@ int rd_kafka_send (rd_kafka_broker_t *rkb) { /* Check for broker support */ if (unlikely(!rd_kafka_broker_request_supported(rkb, rkbuf))) { rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf); - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, - "UNSUPPORTED", - "Failing %sResponse " - "(v%hd, %"PRIusz" bytes, CorrId %"PRId32"): " - "request not supported by broker " - "(missing api.version.request=false or " - "incorrect broker.version.fallback config?)", - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr. - ApiKey), - rkbuf->rkbuf_reqhdr.ApiVersion, - rkbuf->rkbuf_totlen, - rkbuf->rkbuf_reshdr.CorrId); + rd_rkb_dbg( + rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "UNSUPPORTED", + "Failing %sResponse " + "(v%hd, %" PRIusz " bytes, CorrId %" PRId32 + "): " + "request not supported by broker " + "(missing api.version.request=false or " + "incorrect broker.version.fallback config?)", + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + rkbuf->rkbuf_reqhdr.ApiVersion, rkbuf->rkbuf_totlen, + rkbuf->rkbuf_reshdr.CorrId); rd_kafka_buf_callback( - rkb->rkb_rk, rkb, - RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, - NULL, rkbuf); + rkb->rkb_rk, rkb, + RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, NULL, + rkbuf); continue; } - /* Set CorrId header field, unless this is the latter part - * of a partial send in which case the corrid has already - * been set. - * Due to how SSL_write() will accept a buffer but still - * return 0 in some cases we can't rely on the buffer offset - * but need to use corrid to check this. SSL_write() expects - * us to send the same buffer again when 0 is returned. - */ - if (rkbuf->rkbuf_corrid == 0 || - rkbuf->rkbuf_connid != rkb->rkb_connid) { + /* Set CorrId header field, unless this is the latter part + * of a partial send in which case the corrid has already + * been set. + * Due to how SSL_write() will accept a buffer but still + * return 0 in some cases we can't rely on the buffer offset + * but need to use corrid to check this. SSL_write() expects + * us to send the same buffer again when 0 is returned. + */ + if (rkbuf->rkbuf_corrid == 0 || + rkbuf->rkbuf_connid != rkb->rkb_connid) { rd_assert(rd_slice_offset(&rkbuf->rkbuf_reader) == 0); - rkbuf->rkbuf_corrid = ++rkb->rkb_corrid; - rd_kafka_buf_update_i32(rkbuf, 4+2+2, - rkbuf->rkbuf_corrid); - rkbuf->rkbuf_connid = rkb->rkb_connid; - } else if (pre_of > RD_KAFKAP_REQHDR_SIZE) { - rd_kafka_assert(NULL, - rkbuf->rkbuf_connid == rkb->rkb_connid); + rkbuf->rkbuf_corrid = ++rkb->rkb_corrid; + rd_kafka_buf_update_i32(rkbuf, 4 + 2 + 2, + rkbuf->rkbuf_corrid); + rkbuf->rkbuf_connid = rkb->rkb_connid; + } else if (pre_of > RD_KAFKAP_REQHDR_SIZE) { + rd_kafka_assert(NULL, + rkbuf->rkbuf_connid == rkb->rkb_connid); } - if (0) { - rd_rkb_dbg(rkb, PROTOCOL, "SEND", - "Send %s corrid %"PRId32" at " - "offset %"PRIusz"/%"PRIusz, - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr. - ApiKey), - rkbuf->rkbuf_corrid, - pre_of, rd_slice_size(&rkbuf->rkbuf_reader)); - } + if (0) { + rd_rkb_dbg( + rkb, PROTOCOL, "SEND", + "Send %s corrid %" PRId32 + " at " + "offset %" PRIusz "/%" PRIusz, + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + rkbuf->rkbuf_corrid, pre_of, + rd_slice_size(&rkbuf->rkbuf_reader)); + } if ((r = rd_kafka_broker_send(rkb, &rkbuf->rkbuf_reader)) == -1) return -1; @@ -2687,27 +2647,28 @@ int rd_kafka_send (rd_kafka_broker_t *rkb) { /* Partial send? Continue next time. */ if (rd_slice_remains(&rkbuf->rkbuf_reader) > 0) { - rd_rkb_dbg(rkb, PROTOCOL, "SEND", - "Sent partial %sRequest " - "(v%hd, " - "%"PRIdsz"+%"PRIdsz"/%"PRIusz" bytes, " - "CorrId %"PRId32")", - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr. - ApiKey), - rkbuf->rkbuf_reqhdr.ApiVersion, - (ssize_t)pre_of, r, - rd_slice_size(&rkbuf->rkbuf_reader), - rkbuf->rkbuf_corrid); + rd_rkb_dbg( + rkb, PROTOCOL, "SEND", + "Sent partial %sRequest " + "(v%hd, " + "%" PRIdsz "+%" PRIdsz "/%" PRIusz + " bytes, " + "CorrId %" PRId32 ")", + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + rkbuf->rkbuf_reqhdr.ApiVersion, (ssize_t)pre_of, r, + rd_slice_size(&rkbuf->rkbuf_reader), + rkbuf->rkbuf_corrid); return 0; } - rd_rkb_dbg(rkb, PROTOCOL, "SEND", - "Sent %sRequest (v%hd, %"PRIusz" bytes @ %"PRIusz", " - "CorrId %"PRId32")", - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + rd_rkb_dbg(rkb, PROTOCOL, "SEND", + "Sent %sRequest (v%hd, %" PRIusz " bytes @ %" PRIusz + ", " + "CorrId %" PRId32 ")", + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), rkbuf->rkbuf_reqhdr.ApiVersion, - rd_slice_size(&rkbuf->rkbuf_reader), - pre_of, rkbuf->rkbuf_corrid); + rd_slice_size(&rkbuf->rkbuf_reader), pre_of, + rkbuf->rkbuf_corrid); rd_atomic64_add(&rkb->rkb_c.reqtype[rkbuf->rkbuf_reqhdr.ApiKey], 1); @@ -2716,40 +2677,40 @@ int rd_kafka_send (rd_kafka_broker_t *rkb) { if (likely(rkb->rkb_transport != NULL)) rd_kafka_transport_request_sent(rkb, rkbuf); - /* Entire buffer sent, unlink from outbuf */ - rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf); + /* Entire buffer sent, unlink from outbuf */ + rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf); rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_SENT; - /* Store time for RTT calculation */ - rkbuf->rkbuf_ts_sent = now; + /* Store time for RTT calculation */ + rkbuf->rkbuf_ts_sent = now; /* Add to outbuf_latency averager */ rd_avg_add(&rkb->rkb_avg_outbuf_latency, rkbuf->rkbuf_ts_sent - rkbuf->rkbuf_ts_enq); if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING && - rd_atomic32_add(&rkb->rkb_blocking_request_cnt, 1) == 1) - rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); - - /* Put buffer on response wait list unless we are not - * expecting a response (required_acks=0). */ - if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NO_RESPONSE)) - rd_kafka_bufq_enq(&rkb->rkb_waitresps, rkbuf); - else { /* Call buffer callback for delivery report. */ + rd_atomic32_add(&rkb->rkb_blocking_request_cnt, 1) == 1) + rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); + + /* Put buffer on response wait list unless we are not + * expecting a response (required_acks=0). */ + if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NO_RESPONSE)) + rd_kafka_bufq_enq(&rkb->rkb_waitresps, rkbuf); + else { /* Call buffer callback for delivery report. */ rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, NULL, rkbuf); } - cnt++; - } + cnt++; + } - return cnt; + return cnt; } /** * Add 'rkbuf' to broker 'rkb's retry queue. */ -void rd_kafka_broker_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) { +void rd_kafka_broker_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) { /* Restore original replyq since replyq.q will have been NULLed * by buf_callback()/replyq_enq(). */ @@ -2761,37 +2722,37 @@ void rd_kafka_broker_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) { /* If called from another thread than rkb's broker thread * enqueue the buffer on the broker's op queue. */ if (!thrd_is_current(rkb->rkb_thread)) { - rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_RETRY); + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_RETRY); rko->rko_u.xbuf.rkbuf = rkbuf; rd_kafka_q_enq(rkb->rkb_ops, rko); return; } rd_rkb_dbg(rkb, PROTOCOL, "RETRY", - "Retrying %sRequest (v%hd, %"PRIusz" bytes, retry %d/%d, " - "prev CorrId %"PRId32") in %dms", + "Retrying %sRequest (v%hd, %" PRIusz + " bytes, retry %d/%d, " + "prev CorrId %" PRId32 ") in %dms", rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), rkbuf->rkbuf_reqhdr.ApiVersion, - rd_slice_size(&rkbuf->rkbuf_reader), - rkbuf->rkbuf_retries, rkbuf->rkbuf_max_retries, - rkbuf->rkbuf_corrid, + rd_slice_size(&rkbuf->rkbuf_reader), rkbuf->rkbuf_retries, + rkbuf->rkbuf_max_retries, rkbuf->rkbuf_corrid, rkb->rkb_rk->rk_conf.retry_backoff_ms); - rd_atomic64_add(&rkb->rkb_c.tx_retries, 1); + rd_atomic64_add(&rkb->rkb_c.tx_retries, 1); - rkbuf->rkbuf_ts_retry = rd_clock() + - (rkb->rkb_rk->rk_conf.retry_backoff_ms * 1000); + rkbuf->rkbuf_ts_retry = + rd_clock() + (rkb->rkb_rk->rk_conf.retry_backoff_ms * 1000); /* Precaution: time out the request if it hasn't moved from the * retry queue within the retry interval (such as when the broker is * down). */ // FIXME: implememt this properly. - rkbuf->rkbuf_ts_timeout = rkbuf->rkbuf_ts_retry + (5*1000*1000); + rkbuf->rkbuf_ts_timeout = rkbuf->rkbuf_ts_retry + (5 * 1000 * 1000); /* Reset send offset */ rd_slice_seek(&rkbuf->rkbuf_reader, 0); - rkbuf->rkbuf_corrid = 0; + rkbuf->rkbuf_corrid = 0; - rd_kafka_bufq_enq(&rkb->rkb_retrybufs, rkbuf); + rd_kafka_bufq_enq(&rkb->rkb_retrybufs, rkbuf); } @@ -2799,24 +2760,24 @@ void rd_kafka_broker_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) { * Move buffers that have expired their retry backoff time from the * retry queue to the outbuf. */ -static void rd_kafka_broker_retry_bufs_move (rd_kafka_broker_t *rkb, - rd_ts_t *next_wakeup) { - rd_ts_t now = rd_clock(); - rd_kafka_buf_t *rkbuf; +static void rd_kafka_broker_retry_bufs_move(rd_kafka_broker_t *rkb, + rd_ts_t *next_wakeup) { + rd_ts_t now = rd_clock(); + rd_kafka_buf_t *rkbuf; int cnt = 0; - while ((rkbuf = TAILQ_FIRST(&rkb->rkb_retrybufs.rkbq_bufs))) { - if (rkbuf->rkbuf_ts_retry > now) { + while ((rkbuf = TAILQ_FIRST(&rkb->rkb_retrybufs.rkbq_bufs))) { + if (rkbuf->rkbuf_ts_retry > now) { if (rkbuf->rkbuf_ts_retry < *next_wakeup) *next_wakeup = rkbuf->rkbuf_ts_retry; - break; + break; } - rd_kafka_bufq_deq(&rkb->rkb_retrybufs, rkbuf); + rd_kafka_bufq_deq(&rkb->rkb_retrybufs, rkbuf); rd_kafka_broker_buf_enq0(rkb, rkbuf); cnt++; - } + } if (cnt > 0) rd_rkb_dbg(rkb, BROKER, "RETRY", @@ -2833,13 +2794,13 @@ static void rd_kafka_broker_retry_bufs_move (rd_kafka_broker_t *rkb, * To avoid extra iterations, the \p err and \p status are set on * the message as they are popped off the OP_DR msgq in rd_kafka_poll() et.al */ -void rd_kafka_dr_msgq (rd_kafka_topic_t *rkt, - rd_kafka_msgq_t *rkmq, - rd_kafka_resp_err_t err) { +void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt, + rd_kafka_msgq_t *rkmq, + rd_kafka_resp_err_t err) { rd_kafka_t *rk = rkt->rkt_rk; - if (unlikely(rd_kafka_msgq_len(rkmq) == 0)) - return; + if (unlikely(rd_kafka_msgq_len(rkmq) == 0)) + return; if (err && rd_kafka_is_transactional(rk)) rd_atomic64_add(&rk->rk_eos.txn_dr_fails, @@ -2849,26 +2810,26 @@ void rd_kafka_dr_msgq (rd_kafka_topic_t *rkt, rd_kafka_interceptors_on_acknowledgement_queue(rk, rkmq, err); if (rk->rk_drmode != RD_KAFKA_DR_MODE_NONE && - (!rk->rk_conf.dr_err_only || err)) { - /* Pass all messages to application thread in one op. */ - rd_kafka_op_t *rko; + (!rk->rk_conf.dr_err_only || err)) { + /* Pass all messages to application thread in one op. */ + rd_kafka_op_t *rko; - rko = rd_kafka_op_new(RD_KAFKA_OP_DR); - rko->rko_err = err; - rko->rko_u.dr.rkt = rd_kafka_topic_keep(rkt); - rd_kafka_msgq_init(&rko->rko_u.dr.msgq); + rko = rd_kafka_op_new(RD_KAFKA_OP_DR); + rko->rko_err = err; + rko->rko_u.dr.rkt = rd_kafka_topic_keep(rkt); + rd_kafka_msgq_init(&rko->rko_u.dr.msgq); - /* Move all messages to op's msgq */ - rd_kafka_msgq_move(&rko->rko_u.dr.msgq, rkmq); + /* Move all messages to op's msgq */ + rd_kafka_msgq_move(&rko->rko_u.dr.msgq, rkmq); - rd_kafka_q_enq(rk->rk_rep, rko); + rd_kafka_q_enq(rk->rk_rep, rko); - } else { - /* No delivery report callback. */ + } else { + /* No delivery report callback. */ /* Destroy the messages right away. */ rd_kafka_msgq_purge(rk, rkmq); - } + } } @@ -2878,11 +2839,11 @@ void rd_kafka_dr_msgq (rd_kafka_topic_t *rkt, * @locks none * @locality broker thread - either last or current leader */ -void rd_kafka_dr_implicit_ack (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - uint64_t last_msgid) { - rd_kafka_msgq_t acked = RD_KAFKA_MSGQ_INITIALIZER(acked); - rd_kafka_msgq_t acked2 = RD_KAFKA_MSGQ_INITIALIZER(acked2); +void rd_kafka_dr_implicit_ack(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + uint64_t last_msgid) { + rd_kafka_msgq_t acked = RD_KAFKA_MSGQ_INITIALIZER(acked); + rd_kafka_msgq_t acked2 = RD_KAFKA_MSGQ_INITIALIZER(acked2); rd_kafka_msg_status_t status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; if (rktp->rktp_rkt->rkt_conf.required_acks != 0) @@ -2890,8 +2851,7 @@ void rd_kafka_dr_implicit_ack (rd_kafka_broker_t *rkb, rd_kafka_msgq_move_acked(&acked, &rktp->rktp_xmit_msgq, last_msgid, status); - rd_kafka_msgq_move_acked(&acked2, &rktp->rktp_msgq, last_msgid, - status); + rd_kafka_msgq_move_acked(&acked2, &rktp->rktp_msgq, last_msgid, status); /* Insert acked2 into acked in correct order */ rd_kafka_msgq_insert_msgq(&acked, &acked2, @@ -2900,17 +2860,18 @@ void rd_kafka_dr_implicit_ack (rd_kafka_broker_t *rkb, if (!rd_kafka_msgq_len(&acked)) return; - rd_rkb_dbg(rkb, MSG|RD_KAFKA_DBG_EOS, "IMPLICITACK", - "%.*s [%"PRId32"] %d message(s) implicitly acked " - "by subsequent batch success " - "(msgids %"PRIu64"..%"PRIu64", " - "last acked %"PRIu64")", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&acked), - rd_kafka_msgq_first(&acked)->rkm_u.producer.msgid, - rd_kafka_msgq_last(&acked)->rkm_u.producer.msgid, - last_msgid); + rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "IMPLICITACK", + "%.*s [%" PRId32 + "] %d message(s) implicitly acked " + "by subsequent batch success " + "(msgids %" PRIu64 "..%" PRIu64 + ", " + "last acked %" PRIu64 ")", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_msgq_len(&acked), + rd_kafka_msgq_first(&acked)->rkm_u.producer.msgid, + rd_kafka_msgq_last(&acked)->rkm_u.producer.msgid, + last_msgid); /* Trigger delivery reports */ rd_kafka_dr_msgq(rktp->rktp_rkt, &acked, RD_KAFKA_RESP_ERR_NO_ERROR); @@ -2918,13 +2879,6 @@ void rd_kafka_dr_implicit_ack (rd_kafka_broker_t *rkb, - - - - - - - /** * @brief Map existing partitions to this broker using the * toppar's leader_id. Only undelegated partitions @@ -2933,7 +2887,7 @@ void rd_kafka_dr_implicit_ack (rd_kafka_broker_t *rkb, * @locks none * @locality any */ -static void rd_kafka_broker_map_partitions (rd_kafka_broker_t *rkb) { +static void rd_kafka_broker_map_partitions(rd_kafka_broker_t *rkb) { rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_topic_t *rkt; int cnt = 0; @@ -2946,16 +2900,17 @@ static void rd_kafka_broker_map_partitions (rd_kafka_broker_t *rkb) { int i; rd_kafka_topic_wrlock(rkt); - for (i = 0 ; i < rkt->rkt_partition_cnt ; i++) { + for (i = 0; i < rkt->rkt_partition_cnt; i++) { rd_kafka_toppar_t *rktp = rkt->rkt_p[i]; - /* Only map undelegated partitions matching this broker*/ + /* Only map undelegated partitions matching this + * broker*/ rd_kafka_toppar_lock(rktp); if (rktp->rktp_leader_id == rkb->rkb_nodeid && !(rktp->rktp_broker && rktp->rktp_next_broker)) { rd_kafka_toppar_broker_update( - rktp, rktp->rktp_leader_id, rkb, - "broker node information updated"); + rktp, rktp->rktp_leader_id, rkb, + "broker node information updated"); cnt++; } rd_kafka_toppar_unlock(rktp); @@ -2964,7 +2919,7 @@ static void rd_kafka_broker_map_partitions (rd_kafka_broker_t *rkb) { } rd_kafka_rdunlock(rk); - rd_rkb_dbg(rkb, TOPIC|RD_KAFKA_DBG_BROKER, "LEADER", + rd_rkb_dbg(rkb, TOPIC | RD_KAFKA_DBG_BROKER, "LEADER", "Mapped %d partition(s) to broker", cnt); } @@ -2972,7 +2927,7 @@ static void rd_kafka_broker_map_partitions (rd_kafka_broker_t *rkb) { /** * @brief Broker id comparator */ -static int rd_kafka_broker_cmp_by_id (const void *_a, const void *_b) { +static int rd_kafka_broker_cmp_by_id(const void *_a, const void *_b) { const rd_kafka_broker_t *a = _a, *b = _b; return RD_CMP(a->rkb_nodeid, b->rkb_nodeid); } @@ -2984,8 +2939,8 @@ static int rd_kafka_broker_cmp_by_id (const void *_a, const void *_b) { * @locality any * @locks none */ -static void rd_kafka_broker_set_logname (rd_kafka_broker_t *rkb, - const char *logname) { +static void rd_kafka_broker_set_logname(rd_kafka_broker_t *rkb, + const char *logname) { mtx_lock(&rkb->rkb_logname_lock); if (rkb->rkb_logname) rd_free(rkb->rkb_logname); @@ -3005,7 +2960,7 @@ static void rd_kafka_broker_set_logname (rd_kafka_broker_t *rkb, * @locality broker thread * @locks none */ -static void rd_kafka_broker_prepare_destroy (rd_kafka_broker_t *rkb) { +static void rd_kafka_broker_prepare_destroy(rd_kafka_broker_t *rkb) { rd_kafka_broker_monitor_del(&rkb->rkb_coord_monitor); } @@ -3018,37 +2973,28 @@ static void rd_kafka_broker_prepare_destroy (rd_kafka_broker_t *rkb) { * @locality broker thread * @locks none */ -static RD_WARN_UNUSED_RESULT -rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, - rd_kafka_op_t *rko) { +static RD_WARN_UNUSED_RESULT rd_bool_t +rd_kafka_broker_op_serve(rd_kafka_broker_t *rkb, rd_kafka_op_t *rko) { rd_kafka_toppar_t *rktp; rd_kafka_resp_err_t topic_err; rd_bool_t wakeup = rd_false; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - switch (rko->rko_type) - { - case RD_KAFKA_OP_NODE_UPDATE: - { - enum { - _UPD_NAME = 0x1, - _UPD_ID = 0x2 - } updated = 0; + switch (rko->rko_type) { + case RD_KAFKA_OP_NODE_UPDATE: { + enum { _UPD_NAME = 0x1, _UPD_ID = 0x2 } updated = 0; char brokername[RD_KAFKA_NODENAME_SIZE]; /* Need kafka_wrlock for updating rk_broker_by_id */ rd_kafka_wrlock(rkb->rkb_rk); rd_kafka_broker_lock(rkb); - if (strcmp(rkb->rkb_nodename, - rko->rko_u.node.nodename)) { + if (strcmp(rkb->rkb_nodename, rko->rko_u.node.nodename)) { rd_rkb_dbg(rkb, BROKER, "UPDATE", "Nodename changed from %s to %s", - rkb->rkb_nodename, - rko->rko_u.node.nodename); - rd_strlcpy(rkb->rkb_nodename, - rko->rko_u.node.nodename, + rkb->rkb_nodename, rko->rko_u.node.nodename); + rd_strlcpy(rkb->rkb_nodename, rko->rko_u.node.nodename, sizeof(rkb->rkb_nodename)); rkb->rkb_nodename_epoch++; updated |= _UPD_NAME; @@ -3059,14 +3005,14 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, rko->rko_u.node.nodeid != rkb->rkb_nodeid) { int32_t old_nodeid = rkb->rkb_nodeid; rd_rkb_dbg(rkb, BROKER, "UPDATE", - "NodeId changed from %"PRId32" to %"PRId32, - rkb->rkb_nodeid, - rko->rko_u.node.nodeid); + "NodeId changed from %" PRId32 + " to %" PRId32, + rkb->rkb_nodeid, rko->rko_u.node.nodeid); rkb->rkb_nodeid = rko->rko_u.node.nodeid; /* Update system thread name */ - rd_kafka_set_thread_sysname("rdk:broker%"PRId32, + rd_kafka_set_thread_sysname("rdk:broker%" PRId32, rkb->rkb_nodeid); /* Update broker_by_id sorted list */ @@ -3079,16 +3025,15 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, } rd_kafka_mk_brokername(brokername, sizeof(brokername), - rkb->rkb_proto, - rkb->rkb_nodename, rkb->rkb_nodeid, - RD_KAFKA_LEARNED); + rkb->rkb_proto, rkb->rkb_nodename, + rkb->rkb_nodeid, RD_KAFKA_LEARNED); if (strcmp(rkb->rkb_name, brokername)) { /* Udate the name copy used for logging. */ rd_kafka_broker_set_logname(rkb, brokername); rd_rkb_dbg(rkb, BROKER, "UPDATE", - "Name changed from %s to %s", - rkb->rkb_name, brokername); + "Name changed from %s to %s", rkb->rkb_name, + brokername); rd_strlcpy(rkb->rkb_name, brokername, sizeof(rkb->rkb_name)); } @@ -3103,13 +3048,13 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, /* Map existing partitions to this broker. */ rd_kafka_broker_map_partitions(rkb); - /* If broker is currently in state up we need - * to trigger a state change so it exits its - * state&type based .._serve() loop. */ + /* If broker is currently in state up we need + * to trigger a state change so it exits its + * state&type based .._serve() loop. */ rd_kafka_broker_lock(rkb); - if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP) - rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_UPDATE); + if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP) + rd_kafka_broker_set_state( + rkb, RD_KAFKA_BROKER_STATE_UPDATE); rd_kafka_broker_unlock(rkb); } @@ -3133,22 +3078,23 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, case RD_KAFKA_OP_PARTITION_JOIN: /* - * Add partition to broker toppars - */ + * Add partition to broker toppars + */ rktp = rko->rko_rktp; rd_kafka_toppar_lock(rktp); /* Abort join if instance is terminating */ if (rd_kafka_terminating(rkb->rkb_rk) || - (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE)) { + (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE)) { rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", - "Topic %s [%"PRId32"]: not joining broker: " + "Topic %s [%" PRId32 + "]: not joining broker: " "%s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rd_kafka_terminating(rkb->rkb_rk) ? - "instance is terminating" : - "partition removed"); + rd_kafka_terminating(rkb->rkb_rk) + ? "instance is terminating" + : "partition removed"); rd_kafka_broker_destroy(rktp->rktp_next_broker); rktp->rktp_next_broker = NULL; @@ -3158,14 +3104,16 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, /* See if we are still the next broker */ if (rktp->rktp_next_broker != rkb) { - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", - "Topic %s [%"PRId32"]: not joining broker " - "(next broker %s)", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_next_broker ? - rd_kafka_broker_name(rktp->rktp_next_broker): - "(none)"); + rd_rkb_dbg( + rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", + "Topic %s [%" PRId32 + "]: not joining broker " + "(next broker %s)", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rktp->rktp_next_broker + ? rd_kafka_broker_name(rktp->rktp_next_broker) + : "(none)"); /* Need temporary refcount so we can safely unlock * after q_enq(). */ @@ -3182,21 +3130,21 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, } rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", - "Topic %s [%"PRId32"]: joining broker " + "Topic %s [%" PRId32 + "]: joining broker " "(rktp %p, %d message(s) queued)", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, rktp, - rd_kafka_msgq_len(&rktp->rktp_msgq)); + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rktp, rd_kafka_msgq_len(&rktp->rktp_msgq)); rd_kafka_assert(NULL, !(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_RKB)); rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ON_RKB; rd_kafka_toppar_keep(rktp); rd_kafka_broker_lock(rkb); - TAILQ_INSERT_TAIL(&rkb->rkb_toppars, rktp, rktp_rkblink); - rkb->rkb_toppar_cnt++; + TAILQ_INSERT_TAIL(&rkb->rkb_toppars, rktp, rktp_rkblink); + rkb->rkb_toppar_cnt++; rd_kafka_broker_unlock(rkb); - rktp->rktp_broker = rkb; + rktp->rktp_broker = rkb; rd_assert(!rktp->rktp_msgq_wakeup_q); rktp->rktp_msgq_wakeup_q = rd_kafka_q_keep(rkb->rkb_ops); rd_kafka_broker_keep(rkb); @@ -3209,10 +3157,10 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, * the previous leader to finish before * producing anything to this new leader. */ rd_kafka_idemp_drain_toppar( - rktp, - "wait for outstanding requests to " - "finish before producing to " - "new leader"); + rktp, + "wait for outstanding requests to " + "finish before producing to " + "new leader"); } } @@ -3221,83 +3169,83 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, rd_kafka_toppar_unlock(rktp); - rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); + rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); break; case RD_KAFKA_OP_PARTITION_LEAVE: /* - * Remove partition from broker toppars - */ + * Remove partition from broker toppars + */ rktp = rko->rko_rktp; /* If there is a topic-wide error, use it as error code * when failing messages below. */ topic_err = rd_kafka_topic_get_error(rktp->rktp_rkt); - rd_kafka_toppar_lock(rktp); - - /* Multiple PARTITION_LEAVEs are possible during partition - * migration, make sure we're supposed to handle this one. */ - if (unlikely(rktp->rktp_broker != rkb)) { - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", - "Topic %s [%"PRId32"]: " - "ignoring PARTITION_LEAVE: " - "not delegated to broker (%s)", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_broker ? - rd_kafka_broker_name(rktp->rktp_broker) : - "none"); - rd_kafka_toppar_unlock(rktp); - break; - } - rd_kafka_toppar_unlock(rktp); - - /* Remove from fetcher list */ - rd_kafka_toppar_fetch_decide(rktp, rkb, 1/*force remove*/); + rd_kafka_toppar_lock(rktp); + + /* Multiple PARTITION_LEAVEs are possible during partition + * migration, make sure we're supposed to handle this one. */ + if (unlikely(rktp->rktp_broker != rkb)) { + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", + "Topic %s [%" PRId32 + "]: " + "ignoring PARTITION_LEAVE: " + "not delegated to broker (%s)", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rktp->rktp_broker + ? rd_kafka_broker_name(rktp->rktp_broker) + : "none"); + rd_kafka_toppar_unlock(rktp); + break; + } + rd_kafka_toppar_unlock(rktp); + + /* Remove from fetcher list */ + rd_kafka_toppar_fetch_decide(rktp, rkb, 1 /*force remove*/); if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER) { /* Purge any ProduceRequests for this toppar * in the output queue. */ rd_kafka_broker_bufq_purge_by_toppar( - rkb, - &rkb->rkb_outbufs, - RD_KAFKAP_Produce, rktp, - RD_KAFKA_RESP_ERR__RETRY); + rkb, &rkb->rkb_outbufs, RD_KAFKAP_Produce, rktp, + RD_KAFKA_RESP_ERR__RETRY); } - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", - "Topic %s [%"PRId32"]: leaving broker " - "(%d messages in xmitq, next broker %s, rktp %p)", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rd_kafka_msgq_len(&rktp->rktp_xmit_msgq), - rktp->rktp_next_broker ? - rd_kafka_broker_name(rktp->rktp_next_broker) : - "(none)", rktp); + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", + "Topic %s [%" PRId32 + "]: leaving broker " + "(%d messages in xmitq, next broker %s, rktp %p)", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_msgq_len(&rktp->rktp_xmit_msgq), + rktp->rktp_next_broker + ? rd_kafka_broker_name(rktp->rktp_next_broker) + : "(none)", + rktp); /* Insert xmitq(broker-local) messages to the msgq(global) * at their sorted position to maintain ordering. */ - rd_kafka_msgq_insert_msgq(&rktp->rktp_msgq, - &rktp->rktp_xmit_msgq, - rktp->rktp_rkt->rkt_conf. - msg_order_cmp); + rd_kafka_msgq_insert_msgq( + &rktp->rktp_msgq, &rktp->rktp_xmit_msgq, + rktp->rktp_rkt->rkt_conf.msg_order_cmp); if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER) rd_kafka_broker_active_toppar_del(rkb, rktp, "leaving"); rd_kafka_broker_lock(rkb); - TAILQ_REMOVE(&rkb->rkb_toppars, rktp, rktp_rkblink); - rkb->rkb_toppar_cnt--; + TAILQ_REMOVE(&rkb->rkb_toppars, rktp, rktp_rkblink); + rkb->rkb_toppar_cnt--; rd_kafka_broker_unlock(rkb); rd_kafka_broker_destroy(rktp->rktp_broker); if (rktp->rktp_msgq_wakeup_q) { rd_kafka_q_destroy(rktp->rktp_msgq_wakeup_q); rktp->rktp_msgq_wakeup_q = NULL; } - rktp->rktp_broker = NULL; + rktp->rktp_broker = NULL; rd_assert(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_RKB); rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ON_RKB; @@ -3308,25 +3256,28 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, rd_kafka_q_enq(rktp->rktp_next_broker->rkb_ops, rko); rko = NULL; } else { - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", - "Topic %s [%"PRId32"]: no next broker, " - "failing %d message(s) in partition queue", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_msgq_len(&rktp->rktp_msgq)); - rd_kafka_assert(NULL, rd_kafka_msgq_len(&rktp->rktp_xmit_msgq) == 0); - rd_kafka_dr_msgq(rktp->rktp_rkt, &rktp->rktp_msgq, - rd_kafka_terminating(rkb->rkb_rk) ? - RD_KAFKA_RESP_ERR__DESTROY : - (topic_err ? topic_err : - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)); - - } + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", + "Topic %s [%" PRId32 + "]: no next broker, " + "failing %d message(s) in partition queue", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_msgq_len(&rktp->rktp_msgq)); + rd_kafka_assert(NULL, rd_kafka_msgq_len( + &rktp->rktp_xmit_msgq) == 0); + rd_kafka_dr_msgq( + rktp->rktp_rkt, &rktp->rktp_msgq, + rd_kafka_terminating(rkb->rkb_rk) + ? RD_KAFKA_RESP_ERR__DESTROY + : (topic_err + ? topic_err + : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)); + } rd_kafka_toppar_unlock(rktp); rd_kafka_toppar_destroy(rktp); /* from JOIN */ - rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); + rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); break; case RD_KAFKA_OP_TERMINATE: @@ -3336,8 +3287,8 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, "%d refcnts, %d toppar(s), %d active toppar(s), " "%d outbufs, %d waitresps, %d retrybufs", rd_kafka_broker_state_names[rkb->rkb_state], - rd_refcnt_get(&rkb->rkb_refcnt), - rkb->rkb_toppar_cnt, rkb->rkb_active_toppar_cnt, + rd_refcnt_get(&rkb->rkb_refcnt), rkb->rkb_toppar_cnt, + rkb->rkb_active_toppar_cnt, (int)rd_kafka_bufq_cnt(&rkb->rkb_outbufs), (int)rd_kafka_bufq_cnt(&rkb->rkb_waitresps), (int)rd_kafka_bufq_cnt(&rkb->rkb_retrybufs)); @@ -3345,8 +3296,7 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, * and trigger a state change. * This makes sure any eonce dependent on state changes * are triggered. */ - rd_kafka_broker_fail(rkb, LOG_DEBUG, - RD_KAFKA_RESP_ERR__DESTROY, + rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__DESTROY, "Client is terminating"); rd_kafka_broker_prepare_destroy(rkb); @@ -3371,7 +3321,7 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, rkb->rkb_persistconn.internal++; rd_kafka_broker_lock(rkb); rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); + rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); rd_kafka_broker_unlock(rkb); } else if (rkb->rkb_state >= @@ -3382,16 +3332,16 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, * close the current connection. */ rd_kafka_broker_lock(rkb); - do_disconnect = (rkb->rkb_connect_epoch != - rkb->rkb_nodename_epoch); + do_disconnect = + (rkb->rkb_connect_epoch != rkb->rkb_nodename_epoch); rd_kafka_broker_unlock(rkb); if (do_disconnect) rd_kafka_broker_fail( - rkb, LOG_DEBUG, - RD_KAFKA_RESP_ERR__TRANSPORT, - "Closing connection due to " - "nodename change"); + rkb, LOG_DEBUG, + RD_KAFKA_RESP_ERR__TRANSPORT, + "Closing connection due to " + "nodename change"); } /* Expedite next reconnect */ @@ -3417,8 +3367,8 @@ rd_bool_t rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, * @brief Serve broker ops. * @returns the number of ops served */ -static RD_WARN_UNUSED_RESULT -int rd_kafka_broker_ops_serve (rd_kafka_broker_t *rkb, rd_ts_t timeout_us) { +static RD_WARN_UNUSED_RESULT int +rd_kafka_broker_ops_serve(rd_kafka_broker_t *rkb, rd_ts_t timeout_us) { rd_kafka_op_t *rko; int cnt = 0; @@ -3448,9 +3398,8 @@ int rd_kafka_broker_ops_serve (rd_kafka_broker_t *rkb, rd_ts_t timeout_us) { * @locality broker thread * @locks none */ -static RD_WARN_UNUSED_RESULT -rd_bool_t rd_kafka_broker_ops_io_serve (rd_kafka_broker_t *rkb, - rd_ts_t abs_timeout) { +static RD_WARN_UNUSED_RESULT rd_bool_t +rd_kafka_broker_ops_io_serve(rd_kafka_broker_t *rkb, rd_ts_t abs_timeout) { rd_ts_t now; rd_bool_t wakeup; @@ -3459,8 +3408,8 @@ rd_bool_t rd_kafka_broker_ops_io_serve (rd_kafka_broker_t *rkb, else if (unlikely(rd_kafka_broker_needs_connection(rkb))) abs_timeout = RD_POLL_NOWAIT; else if (unlikely(abs_timeout == RD_POLL_INFINITE)) - abs_timeout = rd_clock() + - ((rd_ts_t)rd_kafka_max_block_ms * 1000); + abs_timeout = + rd_clock() + ((rd_ts_t)rd_kafka_max_block_ms * 1000); if (likely(rkb->rkb_transport != NULL)) { @@ -3470,23 +3419,23 @@ rd_bool_t rd_kafka_broker_ops_io_serve (rd_kafka_broker_t *rkb, * use a timeout or not. */ if (rd_kafka_transport_io_serve( - rkb->rkb_transport, rkb->rkb_ops, - rd_timeout_remains(abs_timeout))) + rkb->rkb_transport, rkb->rkb_ops, + rd_timeout_remains(abs_timeout))) abs_timeout = RD_POLL_NOWAIT; } /* Serve broker ops */ - wakeup = rd_kafka_broker_ops_serve(rkb, - rd_timeout_remains_us(abs_timeout)); + wakeup = + rd_kafka_broker_ops_serve(rkb, rd_timeout_remains_us(abs_timeout)); /* An op might have triggered the need for a connection, if so * transition to TRY_CONNECT state. */ if (unlikely(rd_kafka_broker_needs_connection(rkb) && rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT)) { rd_kafka_broker_lock(rkb); - rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); + rd_kafka_broker_set_state(rkb, + RD_KAFKA_BROKER_STATE_TRY_CONNECT); rd_kafka_broker_unlock(rkb); wakeup = rd_true; } @@ -3508,7 +3457,7 @@ rd_bool_t rd_kafka_broker_ops_io_serve (rd_kafka_broker_t *rkb, * * @locality broker thread */ -static rd_ts_t rd_kafka_broker_consumer_toppars_serve (rd_kafka_broker_t *rkb) { +static rd_ts_t rd_kafka_broker_consumer_toppars_serve(rd_kafka_broker_t *rkb) { rd_kafka_toppar_t *rktp, *rktp_tmp; rd_ts_t min_backoff = RD_TS_MAX; @@ -3537,10 +3486,10 @@ static rd_ts_t rd_kafka_broker_consumer_toppars_serve (rd_kafka_broker_t *rkb) { * @locality toppar's broker handler thread * @locks toppar_lock MUST be held */ -static int rd_kafka_broker_toppar_msgq_scan (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - rd_ts_t now, - rd_ts_t *abs_next_timeout) { +static int rd_kafka_broker_toppar_msgq_scan(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + rd_ts_t now, + rd_ts_t *abs_next_timeout) { rd_kafka_msgq_t xtimedout = RD_KAFKA_MSGQ_INITIALIZER(xtimedout); rd_kafka_msgq_t qtimedout = RD_KAFKA_MSGQ_INITIALIZER(qtimedout); int xcnt, qcnt, cnt; @@ -3549,13 +3498,13 @@ static int rd_kafka_broker_toppar_msgq_scan (rd_kafka_broker_t *rkb, *abs_next_timeout = 0; - xcnt = rd_kafka_msgq_age_scan(rktp, &rktp->rktp_xmit_msgq, - &xtimedout, now, &next); + xcnt = rd_kafka_msgq_age_scan(rktp, &rktp->rktp_xmit_msgq, &xtimedout, + now, &next); if (next && next < *abs_next_timeout) *abs_next_timeout = next; - qcnt = rd_kafka_msgq_age_scan(rktp, &rktp->rktp_msgq, - &qtimedout, now, &next); + qcnt = rd_kafka_msgq_age_scan(rktp, &rktp->rktp_msgq, &qtimedout, now, + &next); if (next && (!*abs_next_timeout || next < *abs_next_timeout)) *abs_next_timeout = next; @@ -3568,13 +3517,15 @@ static int rd_kafka_broker_toppar_msgq_scan (rd_kafka_broker_t *rkb, rktp->rktp_rkt->rkt_conf.msg_order_cmp); first = rd_kafka_msgq_first(&xtimedout)->rkm_u.producer.msgid; - last = rd_kafka_msgq_last(&xtimedout)->rkm_u.producer.msgid; + last = rd_kafka_msgq_last(&xtimedout)->rkm_u.producer.msgid; rd_rkb_dbg(rkb, MSG, "TIMEOUT", - "%s [%"PRId32"]: timed out %d+%d message(s) " - "(MsgId %"PRIu64"..%"PRIu64"): message.timeout.ms exceeded", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - xcnt, qcnt, first, last); + "%s [%" PRId32 + "]: timed out %d+%d message(s) " + "(MsgId %" PRIu64 "..%" PRIu64 + "): message.timeout.ms exceeded", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, xcnt, + qcnt, first, last); /* Trigger delivery report for timed out messages */ rd_kafka_dr_msgq(rktp->rktp_rkt, &xtimedout, @@ -3593,10 +3544,10 @@ static int rd_kafka_broker_toppar_msgq_scan (rd_kafka_broker_t *rkb, * * @locality internal broker thread. */ -static rd_ts_t -rd_kafka_broker_toppars_timeout_scan (rd_kafka_broker_t *rkb, rd_ts_t now) { +static rd_ts_t rd_kafka_broker_toppars_timeout_scan(rd_kafka_broker_t *rkb, + rd_ts_t now) { rd_kafka_toppar_t *rktp; - rd_ts_t next = now + (1000*1000); + rd_ts_t next = now + (1000 * 1000); TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) { rd_ts_t this_next; @@ -3626,8 +3577,8 @@ rd_kafka_broker_toppars_timeout_scan (rd_kafka_broker_t *rkb, rd_ts_t now) { /** * @brief Idle function for the internal broker handle. */ -static void rd_kafka_broker_internal_serve (rd_kafka_broker_t *rkb, - rd_ts_t abs_timeout) { +static void rd_kafka_broker_internal_serve(rd_kafka_broker_t *rkb, + rd_ts_t abs_timeout) { int initial_state = rkb->rkb_state; rd_bool_t wakeup; @@ -3639,8 +3590,7 @@ static void rd_kafka_broker_internal_serve (rd_kafka_broker_t *rkb, wakeup = rd_kafka_broker_ops_io_serve(rkb, abs_timeout); } while (!rd_kafka_broker_terminating(rkb) && - (int)rkb->rkb_state == initial_state && - !wakeup && + (int)rkb->rkb_state == initial_state && !wakeup && !rd_timeout_expired(rd_timeout_remains(abs_timeout))); } else { /* Producer */ @@ -3651,15 +3601,14 @@ static void rd_kafka_broker_internal_serve (rd_kafka_broker_t *rkb, if (now >= next_timeout_scan) next_timeout_scan = - rd_kafka_broker_toppars_timeout_scan( - rkb, now); + rd_kafka_broker_toppars_timeout_scan(rkb, + now); wakeup = rd_kafka_broker_ops_io_serve( - rkb, RD_MIN(abs_timeout, next_timeout_scan)); + rkb, RD_MIN(abs_timeout, next_timeout_scan)); } while (!rd_kafka_broker_terminating(rkb) && - (int)rkb->rkb_state == initial_state && - !wakeup && + (int)rkb->rkb_state == initial_state && !wakeup && !rd_timeout_expired(rd_timeout_remains(abs_timeout))); } } @@ -3671,7 +3620,7 @@ static void rd_kafka_broker_internal_serve (rd_kafka_broker_t *rkb, */ static RD_INLINE unsigned int -rd_kafka_broker_outbufs_space (rd_kafka_broker_t *rkb) { +rd_kafka_broker_outbufs_space(rd_kafka_broker_t *rkb) { int r = rkb->rkb_rk->rk_conf.queue_backpressure_thres - rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt); return r < 0 ? 0 : (unsigned int)r; @@ -3694,21 +3643,21 @@ rd_kafka_broker_outbufs_space (rd_kafka_broker_t *rkb) { * @locks none * @locality broker thread */ -static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - const rd_kafka_pid_t pid, - rd_ts_t now, - rd_ts_t *next_wakeup, - rd_bool_t do_timeout_scan, - rd_bool_t may_send, - rd_bool_t flushing) { +static int rd_kafka_toppar_producer_serve(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const rd_kafka_pid_t pid, + rd_ts_t now, + rd_ts_t *next_wakeup, + rd_bool_t do_timeout_scan, + rd_bool_t may_send, + rd_bool_t flushing) { int cnt = 0; int r; rd_kafka_msg_t *rkm; int move_cnt = 0; int max_requests; int reqcnt; - int inflight = 0; + int inflight = 0; uint64_t epoch_base_msgid = 0; /* By limiting the number of not-yet-sent buffers (rkb_outbufs) we @@ -3733,8 +3682,8 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, rd_ts_t next; /* Scan queues for msg timeouts */ - timeoutcnt = rd_kafka_broker_toppar_msgq_scan(rkb, rktp, now, - &next); + timeoutcnt = + rd_kafka_broker_toppar_msgq_scan(rkb, rktp, now, &next); if (next && next < *next_wakeup) *next_wakeup = next; @@ -3757,13 +3706,11 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, rd_kafka_toppar_unlock(rktp); rd_kafka_idemp_drain_epoch_bump( - rkb->rkb_rk, - RD_KAFKA_RESP_ERR__TIMED_OUT, - "%d message(s) timed out " - "on %s [%"PRId32"]", - timeoutcnt, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition); + rkb->rkb_rk, RD_KAFKA_RESP_ERR__TIMED_OUT, + "%d message(s) timed out " + "on %s [%" PRId32 "]", + timeoutcnt, rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition); return 0; } } @@ -3786,10 +3733,9 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, /* Move messages from locked partition produce queue * to broker-local xmit queue. */ if ((move_cnt = rktp->rktp_msgq.rkmq_msg_cnt) > 0) - rd_kafka_msgq_insert_msgq(&rktp->rktp_xmit_msgq, - &rktp->rktp_msgq, - rktp->rktp_rkt->rkt_conf. - msg_order_cmp); + rd_kafka_msgq_insert_msgq( + &rktp->rktp_xmit_msgq, &rktp->rktp_msgq, + rktp->rktp_rkt->rkt_conf.msg_order_cmp); } rd_kafka_toppar_unlock(rktp); @@ -3804,21 +3750,20 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, /* Flush any ProduceRequests for this partition in the * output buffer queue to speed up recovery. */ rd_kafka_broker_bufq_purge_by_toppar( - rkb, - &rkb->rkb_outbufs, - RD_KAFKAP_Produce, rktp, - RD_KAFKA_RESP_ERR__RETRY); + rkb, &rkb->rkb_outbufs, RD_KAFKAP_Produce, rktp, + RD_KAFKA_RESP_ERR__RETRY); did_purge = rd_true; if (rd_kafka_pid_valid(rktp->rktp_eos.pid)) - rd_rkb_dbg(rkb, QUEUE, "TOPPAR", - "%.*s [%"PRId32"] PID has changed: " - "must drain requests for all " - "partitions before resuming reset " - "of PID", - RD_KAFKAP_STR_PR(rktp->rktp_rkt-> - rkt_topic), - rktp->rktp_partition); + rd_rkb_dbg( + rkb, QUEUE, "TOPPAR", + "%.*s [%" PRId32 + "] PID has changed: " + "must drain requests for all " + "partitions before resuming reset " + "of PID", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); } inflight = rd_atomic32_get(&rktp->rktp_msgs_inflight); @@ -3832,31 +3777,31 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, * has changed, or timed out messages * have been removed from the queue. */ - rd_rkb_dbg(rkb, QUEUE, "TOPPAR", - "%.*s [%"PRId32"] waiting for " - "%d in-flight request(s) to drain " - "from queue before continuing " - "to produce", - RD_KAFKAP_STR_PR(rktp->rktp_rkt-> - rkt_topic), - rktp->rktp_partition, - inflight); + rd_rkb_dbg( + rkb, QUEUE, "TOPPAR", + "%.*s [%" PRId32 + "] waiting for " + "%d in-flight request(s) to drain " + "from queue before continuing " + "to produce", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, inflight); /* Flush any ProduceRequests for this * partition in the output buffer queue to * speed up draining. */ if (!did_purge) rd_kafka_broker_bufq_purge_by_toppar( - rkb, - &rkb->rkb_outbufs, - RD_KAFKAP_Produce, rktp, - RD_KAFKA_RESP_ERR__RETRY); + rkb, &rkb->rkb_outbufs, + RD_KAFKAP_Produce, rktp, + RD_KAFKA_RESP_ERR__RETRY); return 0; } rd_rkb_dbg(rkb, QUEUE, "TOPPAR", - "%.*s [%"PRId32"] all in-flight requests " + "%.*s [%" PRId32 + "] all in-flight requests " "drained from queue", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition); @@ -3882,11 +3827,11 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, rd_kafka_msgq_verify_order(rktp, &rktp->rktp_xmit_msgq, 0, rd_false); rd_rkb_dbg(rkb, QUEUE, "TOPPAR", - "%.*s [%"PRId32"] %d message(s) in " + "%.*s [%" PRId32 + "] %d message(s) in " "xmit queue (%d added from partition queue)", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - r, move_cnt); + rktp->rktp_partition, r, move_cnt); rkm = TAILQ_FIRST(&rktp->rktp_xmit_msgq.rkmq_msgs); rd_dassert(rkm != NULL); @@ -3899,7 +3844,7 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, * are outstanding messages in-flight, in which case * we eventually come back here to retry. */ if (!rd_kafka_toppar_pid_change( - rktp, pid, rkm->rkm_u.producer.msgid)) + rktp, pid, rkm->rkm_u.producer.msgid)) return 0; } @@ -3914,27 +3859,26 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, if (unlikely(rkb->rkb_state != RD_KAFKA_BROKER_STATE_UP)) { /* There are messages to send but connection is not up. */ rd_rkb_dbg(rkb, BROKER, "TOPPAR", - "%.*s [%"PRId32"] " + "%.*s [%" PRId32 + "] " "%d message(s) queued but broker not up", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - r); + rktp->rktp_partition, r); rkb->rkb_persistconn.internal++; return 0; } /* Attempt to fill the batch size, but limit our waiting * to queue.buffering.max.ms, batch.num.messages, and batch.size. */ - if (!flushing && - r < rkb->rkb_rk->rk_conf.batch_num_messages && + if (!flushing && r < rkb->rkb_rk->rk_conf.batch_num_messages && rktp->rktp_xmit_msgq.rkmq_msg_bytes < - (int64_t)rkb->rkb_rk->rk_conf.batch_size) { + (int64_t)rkb->rkb_rk->rk_conf.batch_size) { rd_ts_t wait_max; /* Calculate maximum wait-time to honour * queue.buffering.max.ms contract. */ wait_max = rd_kafka_msg_enq_time(rkm) + - rkb->rkb_rk->rk_conf.buffering_max_us; + rkb->rkb_rk->rk_conf.buffering_max_us; if (wait_max > now) { /* Wait for more messages or queue.buffering.max.ms @@ -3955,7 +3899,7 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, /* Send Produce requests for this toppar, honouring the * queue backpressure threshold. */ - for (reqcnt = 0 ; reqcnt < max_requests ; reqcnt++) { + for (reqcnt = 0; reqcnt < max_requests; reqcnt++) { r = rd_kafka_ProduceRequest(rkb, rktp, pid, epoch_base_msgid); if (likely(r > 0)) cnt += r; @@ -3981,16 +3925,16 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, * * @returns the total number of messages produced. */ -static int rd_kafka_broker_produce_toppars (rd_kafka_broker_t *rkb, - rd_ts_t now, - rd_ts_t *next_wakeup, - rd_bool_t do_timeout_scan) { +static int rd_kafka_broker_produce_toppars(rd_kafka_broker_t *rkb, + rd_ts_t now, + rd_ts_t *next_wakeup, + rd_bool_t do_timeout_scan) { rd_kafka_toppar_t *rktp; - int cnt = 0; + int cnt = 0; rd_ts_t ret_next_wakeup = *next_wakeup; - rd_kafka_pid_t pid = RD_KAFKA_PID_INITIALIZER; - rd_bool_t may_send = rd_true; - rd_bool_t flushing = rd_false; + rd_kafka_pid_t pid = RD_KAFKA_PID_INITIALIZER; + rd_bool_t may_send = rd_true; + rd_bool_t flushing = rd_false; /* Round-robin serve each toppar. */ rktp = rkb->rkb_active_toppar_next; @@ -4023,22 +3967,20 @@ static int rd_kafka_broker_produce_toppars (rd_kafka_broker_t *rkb, /* Try producing toppar */ cnt += rd_kafka_toppar_producer_serve( - rkb, rktp, pid, now, &this_next_wakeup, - do_timeout_scan, may_send, flushing); + rkb, rktp, pid, now, &this_next_wakeup, do_timeout_scan, + may_send, flushing); if (this_next_wakeup < ret_next_wakeup) ret_next_wakeup = this_next_wakeup; - } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb-> - rkb_active_toppars, - rktp, rktp_activelink)) != + } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, + rktp_activelink)) != rkb->rkb_active_toppar_next); /* Update next starting toppar to produce in round-robin list. */ rd_kafka_broker_active_toppar_next( - rkb, - CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, - rktp, rktp_activelink)); + rkb, + CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, rktp_activelink)); *next_wakeup = ret_next_wakeup; @@ -4048,8 +3990,8 @@ static int rd_kafka_broker_produce_toppars (rd_kafka_broker_t *rkb, /** * @brief Producer serving */ -static void rd_kafka_broker_producer_serve (rd_kafka_broker_t *rkb, - rd_ts_t abs_timeout) { +static void rd_kafka_broker_producer_serve(rd_kafka_broker_t *rkb, + rd_ts_t abs_timeout) { rd_interval_t timeout_scan; unsigned int initial_state = rkb->rkb_state; rd_ts_t now; @@ -4059,7 +4001,7 @@ static void rd_kafka_broker_producer_serve (rd_kafka_broker_t *rkb, rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - rd_kafka_broker_lock(rkb); + rd_kafka_broker_lock(rkb); while (!rd_kafka_broker_terminating(rkb) && rkb->rkb_state == initial_state && @@ -4067,55 +4009,51 @@ static void rd_kafka_broker_producer_serve (rd_kafka_broker_t *rkb, rd_bool_t do_timeout_scan; rd_ts_t next_wakeup = abs_timeout; - rd_kafka_broker_unlock(rkb); + rd_kafka_broker_unlock(rkb); /* Perform timeout scan on first iteration, thus * on each state change, to make sure messages in * partition rktp_xmit_msgq are timed out before * being attempted to re-transmit. */ - do_timeout_scan = cnt++ == 0 || - rd_interval(&timeout_scan, 1000*1000, now) >= 0; + do_timeout_scan = + cnt++ == 0 || + rd_interval(&timeout_scan, 1000 * 1000, now) >= 0; rd_kafka_broker_produce_toppars(rkb, now, &next_wakeup, do_timeout_scan); - /* Check and move retry buffers */ - if (unlikely(rd_atomic32_get(&rkb->rkb_retrybufs.rkbq_cnt) > 0)) - rd_kafka_broker_retry_bufs_move(rkb, &next_wakeup); + /* Check and move retry buffers */ + if (unlikely(rd_atomic32_get(&rkb->rkb_retrybufs.rkbq_cnt) > 0)) + rd_kafka_broker_retry_bufs_move(rkb, &next_wakeup); if (rd_kafka_broker_ops_io_serve(rkb, next_wakeup)) return; /* Wakeup */ - rd_kafka_broker_lock(rkb); - } + rd_kafka_broker_lock(rkb); + } - rd_kafka_broker_unlock(rkb); + rd_kafka_broker_unlock(rkb); } - - - - /** * Backoff the next Fetch request (due to error). */ -static void rd_kafka_broker_fetch_backoff (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err) { - int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms; +static void rd_kafka_broker_fetch_backoff(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err) { + int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms; rkb->rkb_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000); - rd_rkb_dbg(rkb, FETCH, "BACKOFF", - "Fetch backoff for %dms: %s", + rd_rkb_dbg(rkb, FETCH, "BACKOFF", "Fetch backoff for %dms: %s", backoff_ms, rd_kafka_err2str(err)); } /** * @brief Backoff the next Fetch for specific partition */ -static void rd_kafka_toppar_fetch_backoff (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err) { +static void rd_kafka_toppar_fetch_backoff(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err) { int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms; /* Don't back off on reaching end of partition */ @@ -4130,10 +4068,9 @@ static void rd_kafka_toppar_fetch_backoff (rd_kafka_broker_t *rkb, rktp->rktp_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000); rd_rkb_dbg(rkb, FETCH, "BACKOFF", - "%s [%"PRId32"]: Fetch backoff for %dms%s%s", + "%s [%" PRId32 "]: Fetch backoff for %dms%s%s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - backoff_ms, - err ? ": " : "", + backoff_ms, err ? ": " : "", err ? rd_kafka_err2str(err) : ""); } @@ -4146,17 +4083,16 @@ static void rd_kafka_toppar_fetch_backoff (rd_kafka_broker_t *rkb, * * @locality broker thread */ -static void -rd_kafka_fetch_preferred_replica_handle (rd_kafka_toppar_t *rktp, - rd_kafka_buf_t *rkbuf, - rd_kafka_broker_t *rkb, - int32_t preferred_id) { - const rd_ts_t one_minute = 60*1000*1000; - const rd_ts_t five_seconds = 5*1000*1000; +static void rd_kafka_fetch_preferred_replica_handle(rd_kafka_toppar_t *rktp, + rd_kafka_buf_t *rkbuf, + rd_kafka_broker_t *rkb, + int32_t preferred_id) { + const rd_ts_t one_minute = 60 * 1000 * 1000; + const rd_ts_t five_seconds = 5 * 1000 * 1000; rd_kafka_broker_t *preferred_rkb; rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; - rd_ts_t new_intvl = rd_interval_immediate(&rktp->rktp_new_lease_intvl, - one_minute, 0); + rd_ts_t new_intvl = + rd_interval_immediate(&rktp->rktp_new_lease_intvl, one_minute, 0); if (new_intvl < 0) { /* In lieu of KIP-320, the toppar is delegated back to @@ -4175,69 +4111,68 @@ rd_kafka_fetch_preferred_replica_handle (rd_kafka_toppar_t *rktp, if (rd_interval_immediate(&rktp->rktp_new_lease_log_intvl, one_minute, 0) > 0) rd_rkb_log(rkb, LOG_NOTICE, "FETCH", - "%.*s [%"PRId32"]: preferred replica " - "(%"PRId32") lease changing too quickly " - "(%"PRId64"s < 60s): possibly due to " + "%.*s [%" PRId32 + "]: preferred replica " + "(%" PRId32 + ") lease changing too quickly " + "(%" PRId64 + "s < 60s): possibly due to " "unavailable replica or stale cluster " "state: backing off next fetch", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - preferred_id, - (one_minute - -new_intvl)/(1000*1000)); + rktp->rktp_partition, preferred_id, + (one_minute - -new_intvl) / (1000 * 1000)); - rd_kafka_toppar_fetch_backoff(rkb, - rktp, RD_KAFKA_RESP_ERR_NO_ERROR); + rd_kafka_toppar_fetch_backoff(rkb, rktp, + RD_KAFKA_RESP_ERR_NO_ERROR); } rd_kafka_rdlock(rk); - preferred_rkb = rd_kafka_broker_find_by_nodeid(rk, - preferred_id); + preferred_rkb = rd_kafka_broker_find_by_nodeid(rk, preferred_id); rd_kafka_rdunlock(rk); if (preferred_rkb) { rd_interval_reset_to_now(&rktp->rktp_lease_intvl, 0); rd_kafka_toppar_lock(rktp); - rd_kafka_toppar_broker_update(rktp, preferred_id, - preferred_rkb, + rd_kafka_toppar_broker_update(rktp, preferred_id, preferred_rkb, "preferred replica updated"); rd_kafka_toppar_unlock(rktp); rd_kafka_broker_destroy(preferred_rkb); return; } - if (rd_interval_immediate(&rktp->rktp_metadata_intvl, - five_seconds, 0) > 0) { + if (rd_interval_immediate(&rktp->rktp_metadata_intvl, five_seconds, 0) > + 0) { rd_rkb_log(rkb, LOG_NOTICE, "FETCH", - "%.*s [%"PRId32"]: preferred replica (%"PRId32") " + "%.*s [%" PRId32 "]: preferred replica (%" PRId32 + ") " "is unknown: refreshing metadata", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - preferred_id); + rktp->rktp_partition, preferred_id); rd_kafka_metadata_refresh_brokers( - rktp->rktp_rkt->rkt_rk, NULL, - "preferred replica unavailable"); + rktp->rktp_rkt->rkt_rk, NULL, + "preferred replica unavailable"); } - rd_kafka_toppar_fetch_backoff( - rkb, rktp, RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE); + rd_kafka_toppar_fetch_backoff(rkb, rktp, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE); } /** * @brief Handle partition-specific Fetch error. */ -static void rd_kafka_fetch_reply_handle_partition_error ( - rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - const struct rd_kafka_toppar_ver *tver, - rd_kafka_resp_err_t err, - int64_t HighwaterMarkOffset) { +static void rd_kafka_fetch_reply_handle_partition_error( + rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const struct rd_kafka_toppar_ver *tver, + rd_kafka_resp_err_t err, + int64_t HighwaterMarkOffset) { /* Some errors should be passed to the * application while some handled by rdkafka */ - switch (err) - { + switch (err) { /* Errors handled by rdkafka */ case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: @@ -4261,34 +4196,32 @@ static void rd_kafka_fetch_reply_handle_partition_error ( * Handle by retrying FETCH (with backoff). */ rd_rkb_dbg(rkb, MSG, "FETCH", - "Topic %s [%"PRId32"]: Offset %"PRId64" not " - "available on broker %"PRId32" (leader %"PRId32"): " + "Topic %s [%" PRId32 "]: Offset %" PRId64 + " not " + "available on broker %" PRId32 " (leader %" PRId32 + "): " "retrying", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rktp->rktp_offsets. - fetch_offset, - rktp->rktp_broker_id, - rktp->rktp_leader_id); + rktp->rktp_offsets.fetch_offset, + rktp->rktp_broker_id, rktp->rktp_leader_id); break; - case RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE: - { + case RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE: { int64_t err_offset; if (rktp->rktp_broker_id != rktp->rktp_leader_id && rktp->rktp_offsets.fetch_offset > HighwaterMarkOffset) { - rd_kafka_log(rkb->rkb_rk, - LOG_WARNING, "FETCH", - "Topic %s [%"PRId32"]: Offset %"PRId64 - " out of range (HighwaterMark %"PRId64 + rd_kafka_log(rkb->rkb_rk, LOG_WARNING, "FETCH", + "Topic %s [%" PRId32 "]: Offset %" PRId64 + " out of range (HighwaterMark %" PRId64 " fetching from " - "broker %"PRId32" (leader %"PRId32"): " + "broker %" PRId32 " (leader %" PRId32 + "): " "reverting to leader", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_offsets.fetch_offset, - HighwaterMarkOffset, - rktp->rktp_broker_id, + HighwaterMarkOffset, rktp->rktp_broker_id, rktp->rktp_leader_id); /* Out of range error cannot be taken as definitive @@ -4305,8 +4238,7 @@ static void rd_kafka_fetch_reply_handle_partition_error ( rd_kafka_offset_reset(rktp, err_offset, err, "fetch failed due to requested offset " "not available on the broker"); - } - break; + } break; case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED: /* If we're not authorized to access the @@ -4314,15 +4246,11 @@ static void rd_kafka_fetch_reply_handle_partition_error ( * further Fetch requests. */ if (rktp->rktp_last_error != err) { rd_kafka_consumer_err( - rktp->rktp_fetchq, - rd_kafka_broker_id(rkb), - err, - tver->version, - NULL, rktp, - rktp->rktp_offsets.fetch_offset, - "Fetch from broker %"PRId32" failed: %s", - rd_kafka_broker_id(rkb), - rd_kafka_err2str(err)); + rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err, + tver->version, NULL, rktp, + rktp->rktp_offsets.fetch_offset, + "Fetch from broker %" PRId32 " failed: %s", + rd_kafka_broker_id(rkb), rd_kafka_err2str(err)); rktp->rktp_last_error = err; } break; @@ -4331,32 +4259,27 @@ static void rd_kafka_fetch_reply_handle_partition_error ( /* Application errors */ case RD_KAFKA_RESP_ERR__PARTITION_EOF: if (rkb->rkb_rk->rk_conf.enable_partition_eof) - rd_kafka_consumer_err( - rktp->rktp_fetchq, - rd_kafka_broker_id(rkb), - err, tver->version, - NULL, rktp, - rktp->rktp_offsets.fetch_offset, - "Fetch from broker %"PRId32" reached end of " - "partition at offset %"PRId64 - " (HighwaterMark %"PRId64")", - rd_kafka_broker_id(rkb), - rktp->rktp_offsets.fetch_offset, - HighwaterMarkOffset); + rd_kafka_consumer_err(rktp->rktp_fetchq, + rd_kafka_broker_id(rkb), err, + tver->version, NULL, rktp, + rktp->rktp_offsets.fetch_offset, + "Fetch from broker %" PRId32 + " reached end of " + "partition at offset %" PRId64 + " (HighwaterMark %" PRId64 ")", + rd_kafka_broker_id(rkb), + rktp->rktp_offsets.fetch_offset, + HighwaterMarkOffset); break; case RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE: default: /* and all other errors */ rd_dassert(tver->version > 0); rd_kafka_consumer_err( - rktp->rktp_fetchq, - rd_kafka_broker_id(rkb), - err, tver->version, - NULL, rktp, - rktp->rktp_offsets.fetch_offset, - "Fetch from broker %"PRId32" failed: %s", - rd_kafka_broker_id(rkb), - rd_kafka_err2str(err)); + rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err, + tver->version, NULL, rktp, rktp->rktp_offsets.fetch_offset, + "Fetch from broker %" PRId32 " failed: %s", + rd_kafka_broker_id(rkb), rd_kafka_err2str(err)); break; } @@ -4371,21 +4294,22 @@ static void rd_kafka_fetch_reply_handle_partition_error ( * Returns 0 on success or an error code on failure. */ static rd_kafka_resp_err_t -rd_kafka_fetch_reply_handle (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, rd_kafka_buf_t *request) { - int32_t TopicArrayCnt; - int i; +rd_kafka_fetch_reply_handle(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request) { + int32_t TopicArrayCnt; + int i; const int log_decode_errors = LOG_ERR; - rd_kafka_topic_t *rkt = NULL; - int16_t ErrorCode = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_topic_t *rkt = NULL; + int16_t ErrorCode = RD_KAFKA_RESP_ERR_NO_ERROR; - if (rd_kafka_buf_ApiVersion(request) >= 1) { - int32_t Throttle_Time; - rd_kafka_buf_read_i32(rkbuf, &Throttle_Time); + if (rd_kafka_buf_ApiVersion(request) >= 1) { + int32_t Throttle_Time; + rd_kafka_buf_read_i32(rkbuf, &Throttle_Time); - rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep, - Throttle_Time); - } + rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep, + Throttle_Time); + } if (rd_kafka_buf_ApiVersion(request) >= 7) { int32_t SessionId; @@ -4393,118 +4317,133 @@ rd_kafka_fetch_reply_handle (rd_kafka_broker_t *rkb, rd_kafka_buf_read_i32(rkbuf, &SessionId); } - rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt); - /* Verify that TopicArrayCnt seems to be in line with remaining size */ - rd_kafka_buf_check_len(rkbuf, - TopicArrayCnt * (3/*topic min size*/ + - 4/*PartitionArrayCnt*/ + - 4+2+8+4/*inner header*/)); + rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt); + /* Verify that TopicArrayCnt seems to be in line with remaining size */ + rd_kafka_buf_check_len(rkbuf, + TopicArrayCnt * (3 /*topic min size*/ + + 4 /*PartitionArrayCnt*/ + 4 + + 2 + 8 + 4 /*inner header*/)); - for (i = 0 ; i < TopicArrayCnt ; i++) { - rd_kafkap_str_t topic; - int32_t fetch_version; - int32_t PartitionArrayCnt; - int j; + for (i = 0; i < TopicArrayCnt; i++) { + rd_kafkap_str_t topic; + int32_t fetch_version; + int32_t PartitionArrayCnt; + int j; - rd_kafka_buf_read_str(rkbuf, &topic); - rd_kafka_buf_read_i32(rkbuf, &PartitionArrayCnt); + rd_kafka_buf_read_str(rkbuf, &topic); + rd_kafka_buf_read_i32(rkbuf, &PartitionArrayCnt); rkt = rd_kafka_topic_find0(rkb->rkb_rk, &topic); - for (j = 0 ; j < PartitionArrayCnt ; j++) { - struct rd_kafka_toppar_ver *tver, tver_skel; - rd_kafka_toppar_t *rktp = NULL; + for (j = 0; j < PartitionArrayCnt; j++) { + struct rd_kafka_toppar_ver *tver, tver_skel; + rd_kafka_toppar_t *rktp = NULL; rd_kafka_aborted_txns_t *aborted_txns = NULL; rd_slice_t save_slice; struct { int32_t Partition; int16_t ErrorCode; int64_t HighwaterMarkOffset; - int64_t LastStableOffset; /* v4 */ - int64_t LogStartOffset; /* v5 */ + int64_t LastStableOffset; /* v4 */ + int64_t LogStartOffset; /* v5 */ int32_t MessageSetSize; - int32_t PreferredReadReplica; /* v11 */ + int32_t PreferredReadReplica; /* v11 */ } hdr; rd_kafka_resp_err_t err; int64_t end_offset; - rd_kafka_buf_read_i32(rkbuf, &hdr.Partition); - rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode); + rd_kafka_buf_read_i32(rkbuf, &hdr.Partition); + rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode); if (ErrorCode) hdr.ErrorCode = ErrorCode; - rd_kafka_buf_read_i64(rkbuf, &hdr.HighwaterMarkOffset); + rd_kafka_buf_read_i64(rkbuf, &hdr.HighwaterMarkOffset); end_offset = hdr.HighwaterMarkOffset; hdr.LastStableOffset = RD_KAFKA_OFFSET_INVALID; - hdr.LogStartOffset = RD_KAFKA_OFFSET_INVALID; + hdr.LogStartOffset = RD_KAFKA_OFFSET_INVALID; if (rd_kafka_buf_ApiVersion(request) >= 4) { int32_t AbortedTxnCnt; rd_kafka_buf_read_i64(rkbuf, &hdr.LastStableOffset); if (rd_kafka_buf_ApiVersion(request) >= 5) - rd_kafka_buf_read_i64(rkbuf, - &hdr.LogStartOffset); + rd_kafka_buf_read_i64( + rkbuf, &hdr.LogStartOffset); - rd_kafka_buf_read_i32(rkbuf, - &AbortedTxnCnt); + rd_kafka_buf_read_i32(rkbuf, &AbortedTxnCnt); if (rkb->rkb_rk->rk_conf.isolation_level == - RD_KAFKA_READ_UNCOMMITTED) { + RD_KAFKA_READ_UNCOMMITTED) { if (unlikely(AbortedTxnCnt > 0)) { - rd_rkb_log(rkb, LOG_ERR, - "FETCH", - "%.*s [%"PRId32"]: " - "%"PRId32" aborted " - "transaction(s) " - "encountered in " - "READ_UNCOMMITTED " - "fetch response: " - "ignoring.", - RD_KAFKAP_STR_PR( - &topic), - hdr.Partition, - AbortedTxnCnt); - - rd_kafka_buf_skip(rkbuf, - AbortedTxnCnt - * (8+8)); + rd_rkb_log( + rkb, LOG_ERR, "FETCH", + "%.*s [%" PRId32 + "]: " + "%" PRId32 + " aborted " + "transaction(s) " + "encountered in " + "READ_UNCOMMITTED " + "fetch response: " + "ignoring.", + RD_KAFKAP_STR_PR(&topic), + hdr.Partition, + AbortedTxnCnt); + + rd_kafka_buf_skip( + rkbuf, + AbortedTxnCnt * (8 + 8)); } } else { - /* Older brokers may return LSO -1, - * in which case we use the HWM. */ + /* Older brokers may return LSO -1, + * in which case we use the HWM. */ if (hdr.LastStableOffset >= 0) - end_offset = hdr.LastStableOffset; + end_offset = + hdr.LastStableOffset; if (AbortedTxnCnt > 0) { int k; - if (unlikely(AbortedTxnCnt > 1000000)) + if (unlikely(AbortedTxnCnt > + 1000000)) rd_kafka_buf_parse_fail( - rkbuf, - "%.*s [%"PRId32"]: " - "invalid AbortedTxnCnt %"PRId32, - RD_KAFKAP_STR_PR(&topic), - hdr.Partition, - AbortedTxnCnt); - - aborted_txns = rd_kafka_aborted_txns_new(AbortedTxnCnt); - for (k = 0 ; k < AbortedTxnCnt; k++) { + rkbuf, + "%.*s [%" PRId32 + "]: " + "invalid " + "AbortedTxnCnt " + "%" PRId32, + RD_KAFKAP_STR_PR( + &topic), + hdr.Partition, + AbortedTxnCnt); + + aborted_txns = + rd_kafka_aborted_txns_new( + AbortedTxnCnt); + for (k = 0; k < AbortedTxnCnt; + k++) { int64_t PID; int64_t FirstOffset; - rd_kafka_buf_read_i64(rkbuf, &PID); - rd_kafka_buf_read_i64(rkbuf, &FirstOffset); - rd_kafka_aborted_txns_add(aborted_txns, PID, FirstOffset); + rd_kafka_buf_read_i64( + rkbuf, &PID); + rd_kafka_buf_read_i64( + rkbuf, + &FirstOffset); + rd_kafka_aborted_txns_add( + aborted_txns, PID, + FirstOffset); } - rd_kafka_aborted_txns_sort(aborted_txns); + rd_kafka_aborted_txns_sort( + aborted_txns); } } } if (rd_kafka_buf_ApiVersion(request) >= 11) - rd_kafka_buf_read_i32(rkbuf, - &hdr.PreferredReadReplica); + rd_kafka_buf_read_i32( + rkbuf, &hdr.PreferredReadReplica); else hdr.PreferredReadReplica = -1; @@ -4512,35 +4451,35 @@ rd_kafka_fetch_reply_handle (rd_kafka_broker_t *rkb, if (unlikely(hdr.MessageSetSize < 0)) rd_kafka_buf_parse_fail( - rkbuf, - "%.*s [%"PRId32"]: " - "invalid MessageSetSize %"PRId32, - RD_KAFKAP_STR_PR(&topic), - hdr.Partition, - hdr.MessageSetSize); - - /* Look up topic+partition */ + rkbuf, + "%.*s [%" PRId32 + "]: " + "invalid MessageSetSize %" PRId32, + RD_KAFKAP_STR_PR(&topic), hdr.Partition, + hdr.MessageSetSize); + + /* Look up topic+partition */ if (likely(rkt != NULL)) { rd_kafka_topic_rdlock(rkt); - rktp = rd_kafka_toppar_get( - rkt, hdr.Partition, 0/*no ua-on-miss*/); + rktp = rd_kafka_toppar_get(rkt, hdr.Partition, + 0 /*no ua-on-miss*/); rd_kafka_topic_rdunlock(rkt); } - if (unlikely(!rkt || !rktp)) { - rd_rkb_dbg(rkb, TOPIC, "UNKTOPIC", - "Received Fetch response " - "(error %hu) for unknown topic " - "%.*s [%"PRId32"]: ignoring", - hdr.ErrorCode, - RD_KAFKAP_STR_PR(&topic), - hdr.Partition); - rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); + if (unlikely(!rkt || !rktp)) { + rd_rkb_dbg(rkb, TOPIC, "UNKTOPIC", + "Received Fetch response " + "(error %hu) for unknown topic " + "%.*s [%" PRId32 "]: ignoring", + hdr.ErrorCode, + RD_KAFKAP_STR_PR(&topic), + hdr.Partition); + rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); if (aborted_txns) rd_kafka_aborted_txns_destroy( - aborted_txns); - continue; - } + aborted_txns); + continue; + } rd_kafka_toppar_lock(rktp); rktp->rktp_lo_offset = hdr.LogStartOffset; @@ -4555,28 +4494,30 @@ rd_kafka_fetch_reply_handle (rd_kafka_broker_t *rkb, if (hdr.PreferredReadReplica != -1) { rd_kafka_fetch_preferred_replica_handle( - rktp, rkbuf, rkb, - hdr.PreferredReadReplica); + rktp, rkbuf, rkb, hdr.PreferredReadReplica); if (unlikely(hdr.MessageSetSize != 0)) { - rd_rkb_log(rkb, LOG_WARNING, "FETCH", - "%.*s [%"PRId32"]: Fetch " - "response has both " - "preferred read replica " - "and non-zero message set " - "size: %"PRId32": " - "skipping messages", - RD_KAFKAP_STR_PR(rktp-> - rktp_rkt->rkt_topic), - rktp->rktp_partition, - hdr.MessageSetSize); + rd_rkb_log( + rkb, LOG_WARNING, "FETCH", + "%.*s [%" PRId32 + "]: Fetch " + "response has both " + "preferred read replica " + "and non-zero message set " + "size: %" PRId32 + ": " + "skipping messages", + RD_KAFKAP_STR_PR( + rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + hdr.MessageSetSize); rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); } if (aborted_txns) rd_kafka_aborted_txns_destroy( - aborted_txns); + aborted_txns); rd_kafka_toppar_destroy(rktp); /* from get */ continue; } @@ -4588,7 +4529,8 @@ rd_kafka_fetch_reply_handle (rd_kafka_broker_t *rkb, if (unlikely(rktp->rktp_broker != rkb)) { rd_kafka_toppar_unlock(rktp); rd_rkb_dbg(rkb, MSG, "FETCH", - "%.*s [%"PRId32"]: " + "%.*s [%" PRId32 + "]: " "partition broker has changed: " "discarding fetch response", RD_KAFKAP_STR_PR(&topic), @@ -4597,72 +4539,74 @@ rd_kafka_fetch_reply_handle (rd_kafka_broker_t *rkb, rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); if (aborted_txns) rd_kafka_aborted_txns_destroy( - aborted_txns); + aborted_txns); continue; } - fetch_version = rktp->rktp_fetch_version; + fetch_version = rktp->rktp_fetch_version; rd_kafka_toppar_unlock(rktp); - /* Check if this Fetch is for an outdated fetch version, + /* Check if this Fetch is for an outdated fetch version, * or the original rktp was removed and a new one * created (due to partition count decreasing and * then increasing again, which can happen in * desynchronized clusters): if so ignore it. */ - tver_skel.rktp = rktp; - tver = rd_list_find(request->rkbuf_rktp_vers, - &tver_skel, - rd_kafka_toppar_ver_cmp); - rd_kafka_assert(NULL, tver); + tver_skel.rktp = rktp; + tver = + rd_list_find(request->rkbuf_rktp_vers, &tver_skel, + rd_kafka_toppar_ver_cmp); + rd_kafka_assert(NULL, tver); if (tver->rktp != rktp || tver->version < fetch_version) { rd_rkb_dbg(rkb, MSG, "DROP", - "%s [%"PRId32"]: " + "%s [%" PRId32 + "]: " "dropping outdated fetch response " "(v%d < %d or old rktp)", rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - tver->version, fetch_version); - rd_atomic64_add(&rktp->rktp_c. rx_ver_drops, 1); + rktp->rktp_partition, tver->version, + fetch_version); + rd_atomic64_add(&rktp->rktp_c.rx_ver_drops, 1); rd_kafka_toppar_destroy(rktp); /* from get */ rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); if (aborted_txns) rd_kafka_aborted_txns_destroy( - aborted_txns); + aborted_txns); continue; } - rd_rkb_dbg(rkb, MSG, "FETCH", - "Topic %.*s [%"PRId32"] MessageSet " - "size %"PRId32", error \"%s\", " - "MaxOffset %"PRId64", " - "LSO %"PRId64", " - "Ver %"PRId32"/%"PRId32, - RD_KAFKAP_STR_PR(&topic), hdr.Partition, - hdr.MessageSetSize, - rd_kafka_err2str(hdr.ErrorCode), - hdr.HighwaterMarkOffset, - hdr.LastStableOffset, - tver->version, fetch_version); - - /* If this is the last message of the queue, - * signal EOF back to the application. */ - if (end_offset == - rktp->rktp_offsets.fetch_offset - && - rktp->rktp_offsets.eof_offset != - rktp->rktp_offsets.fetch_offset) { - hdr.ErrorCode = - RD_KAFKA_RESP_ERR__PARTITION_EOF; - rktp->rktp_offsets.eof_offset = - rktp->rktp_offsets.fetch_offset; - } + rd_rkb_dbg( + rkb, MSG, "FETCH", + "Topic %.*s [%" PRId32 + "] MessageSet " + "size %" PRId32 + ", error \"%s\", " + "MaxOffset %" PRId64 + ", " + "LSO %" PRId64 + ", " + "Ver %" PRId32 "/%" PRId32, + RD_KAFKAP_STR_PR(&topic), hdr.Partition, + hdr.MessageSetSize, rd_kafka_err2str(hdr.ErrorCode), + hdr.HighwaterMarkOffset, hdr.LastStableOffset, + tver->version, fetch_version); + + /* If this is the last message of the queue, + * signal EOF back to the application. */ + if (end_offset == rktp->rktp_offsets.fetch_offset && + rktp->rktp_offsets.eof_offset != + rktp->rktp_offsets.fetch_offset) { + hdr.ErrorCode = + RD_KAFKA_RESP_ERR__PARTITION_EOF; + rktp->rktp_offsets.eof_offset = + rktp->rktp_offsets.fetch_offset; + } if (unlikely(hdr.ErrorCode != RD_KAFKA_RESP_ERR_NO_ERROR)) { /* Handle partition-level errors. */ rd_kafka_fetch_reply_handle_partition_error( - rkb, rktp, tver, hdr.ErrorCode, - hdr.HighwaterMarkOffset); + rkb, rktp, tver, hdr.ErrorCode, + hdr.HighwaterMarkOffset); rd_kafka_toppar_destroy(rktp); /* from get()*/ @@ -4670,38 +4614,36 @@ rd_kafka_fetch_reply_handle (rd_kafka_broker_t *rkb, if (aborted_txns) rd_kafka_aborted_txns_destroy( - aborted_txns); + aborted_txns); continue; } /* No error, clear any previous fetch error. */ rktp->rktp_last_error = RD_KAFKA_RESP_ERR_NO_ERROR; - if (unlikely(hdr.MessageSetSize <= 0)) { - rd_kafka_toppar_destroy(rktp); /*from get()*/ + if (unlikely(hdr.MessageSetSize <= 0)) { + rd_kafka_toppar_destroy(rktp); /*from get()*/ if (aborted_txns) rd_kafka_aborted_txns_destroy( - aborted_txns); - continue; - } + aborted_txns); + continue; + } /** * Parse MessageSet */ if (!rd_slice_narrow_relative( - &rkbuf->rkbuf_reader, - &save_slice, - (size_t)hdr.MessageSetSize)) + &rkbuf->rkbuf_reader, &save_slice, + (size_t)hdr.MessageSetSize)) rd_kafka_buf_check_len(rkbuf, hdr.MessageSetSize); /* Parse messages */ - err = rd_kafka_msgset_parse( - rkbuf, request, rktp, aborted_txns, tver); + err = rd_kafka_msgset_parse(rkbuf, request, rktp, + aborted_txns, tver); if (aborted_txns) - rd_kafka_aborted_txns_destroy( - aborted_txns); + rd_kafka_aborted_txns_destroy(aborted_txns); rd_slice_widen(&rkbuf->rkbuf_reader, &save_slice); /* Continue with next partition regardless of @@ -4718,135 +4660,126 @@ rd_kafka_fetch_reply_handle (rd_kafka_broker_t *rkb, rd_kafka_topic_destroy0(rkt); rkt = NULL; } - } + } - if (rd_kafka_buf_read_remain(rkbuf) != 0) { - rd_kafka_buf_parse_fail(rkbuf, - "Remaining data after message set " - "parse: %"PRIusz" bytes", - rd_kafka_buf_read_remain(rkbuf)); - RD_NOTREACHED(); - } + if (rd_kafka_buf_read_remain(rkbuf) != 0) { + rd_kafka_buf_parse_fail(rkbuf, + "Remaining data after message set " + "parse: %" PRIusz " bytes", + rd_kafka_buf_read_remain(rkbuf)); + RD_NOTREACHED(); + } - return 0; + return 0; err_parse: if (rkt) rd_kafka_topic_destroy0(rkt); - rd_rkb_dbg(rkb, MSG, "BADMSG", "Bad message (Fetch v%d): " - "is broker.version.fallback incorrectly set?", - (int)request->rkbuf_reqhdr.ApiVersion); - return rkbuf->rkbuf_err; + rd_rkb_dbg(rkb, MSG, "BADMSG", + "Bad message (Fetch v%d): " + "is broker.version.fallback incorrectly set?", + (int)request->rkbuf_reqhdr.ApiVersion); + return rkbuf->rkbuf_err; } -static void rd_kafka_broker_fetch_reply (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_broker_fetch_reply(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque) { if (err == RD_KAFKA_RESP_ERR__DESTROY) return; /* Terminating */ - rd_kafka_assert(rkb->rkb_rk, rkb->rkb_fetching > 0); - rkb->rkb_fetching = 0; + rd_kafka_assert(rkb->rkb_rk, rkb->rkb_fetching > 0); + rkb->rkb_fetching = 0; - /* Parse and handle the messages (unless the request errored) */ - if (!err && reply) - err = rd_kafka_fetch_reply_handle(rkb, reply, request); + /* Parse and handle the messages (unless the request errored) */ + if (!err && reply) + err = rd_kafka_fetch_reply_handle(rkb, reply, request); - if (unlikely(err)) { + if (unlikely(err)) { char tmp[128]; rd_rkb_dbg(rkb, MSG, "FETCH", "Fetch reply: %s", rd_kafka_err2str(err)); - switch (err) - { - case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: - case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION: - case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE: + switch (err) { + case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: + case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION: + case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE: /* Request metadata information update */ - rd_snprintf(tmp, sizeof(tmp), - "FetchRequest failed: %s", + rd_snprintf(tmp, sizeof(tmp), "FetchRequest failed: %s", rd_kafka_err2str(err)); rd_kafka_metadata_refresh_known_topics( - rkb->rkb_rk, NULL, rd_true/*force*/, tmp); + rkb->rkb_rk, NULL, rd_true /*force*/, tmp); /* FALLTHRU */ - case RD_KAFKA_RESP_ERR__TRANSPORT: - case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT: + case RD_KAFKA_RESP_ERR__TRANSPORT: + case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT: case RD_KAFKA_RESP_ERR__MSG_TIMED_OUT: - /* The fetch is already intervalled from + /* The fetch is already intervalled from * consumer_serve() so dont retry. */ - break; + break; - default: - break; - } + default: + break; + } - rd_kafka_broker_fetch_backoff(rkb, err); - /* FALLTHRU */ - } + rd_kafka_broker_fetch_backoff(rkb, err); + /* FALLTHRU */ + } } - - - - - - - - /** * Build and send a Fetch request message for all underflowed toppars * for a specific broker. */ -static int rd_kafka_broker_fetch_toppars (rd_kafka_broker_t *rkb, rd_ts_t now) { - rd_kafka_toppar_t *rktp; - rd_kafka_buf_t *rkbuf; - int cnt = 0; - size_t of_TopicArrayCnt = 0; - int TopicArrayCnt = 0; - size_t of_PartitionArrayCnt = 0; - int PartitionArrayCnt = 0; - rd_kafka_topic_t *rkt_last = NULL; - int16_t ApiVersion = 0; - - /* Create buffer and segments: - * 1 x ReplicaId MaxWaitTime MinBytes TopicArrayCnt - * N x topic name - * N x PartitionArrayCnt Partition FetchOffset MaxBytes - * where N = number of toppars. - * Since we dont keep track of the number of topics served by - * this broker, only the partition count, we do a worst-case calc - * when allocating and assume each partition is on its own topic - */ +static int rd_kafka_broker_fetch_toppars(rd_kafka_broker_t *rkb, rd_ts_t now) { + rd_kafka_toppar_t *rktp; + rd_kafka_buf_t *rkbuf; + int cnt = 0; + size_t of_TopicArrayCnt = 0; + int TopicArrayCnt = 0; + size_t of_PartitionArrayCnt = 0; + int PartitionArrayCnt = 0; + rd_kafka_topic_t *rkt_last = NULL; + int16_t ApiVersion = 0; + + /* Create buffer and segments: + * 1 x ReplicaId MaxWaitTime MinBytes TopicArrayCnt + * N x topic name + * N x PartitionArrayCnt Partition FetchOffset MaxBytes + * where N = number of toppars. + * Since we dont keep track of the number of topics served by + * this broker, only the partition count, we do a worst-case calc + * when allocating and assume each partition is on its own topic + */ if (unlikely(rkb->rkb_active_toppar_cnt == 0)) return 0; - rkbuf = rd_kafka_buf_new_request( - rkb, RD_KAFKAP_Fetch, 1, - /* ReplicaId+MaxWaitTime+MinBytes+MaxBytes+IsolationLevel+ - * SessionId+Epoch+TopicCnt */ - 4+4+4+4+1+4+4+4+ + rkbuf = rd_kafka_buf_new_request( + rkb, RD_KAFKAP_Fetch, 1, + /* ReplicaId+MaxWaitTime+MinBytes+MaxBytes+IsolationLevel+ + * SessionId+Epoch+TopicCnt */ + 4 + 4 + 4 + 4 + 1 + 4 + 4 + 4 + /* N x PartCnt+Partition+CurrentLeaderEpoch+FetchOffset+ * LogStartOffset+MaxBytes+?TopicNameLen?*/ - (rkb->rkb_active_toppar_cnt * (4+4+4+8+8+4+40)) + + (rkb->rkb_active_toppar_cnt * (4 + 4 + 4 + 8 + 8 + 4 + 40)) + /* ForgottenTopicsCnt */ - 4+ + 4 + /* N x ForgottenTopicsData */ 0); - ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_Fetch, 0, 11, NULL); + ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, RD_KAFKAP_Fetch, + 0, 11, NULL); if (rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER2) rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, @@ -4859,13 +4792,13 @@ static int rd_kafka_broker_fetch_toppars (rd_kafka_broker_t *rkb, rd_ts_t now) { RD_KAFKA_FEATURE_THROTTLETIME); - /* FetchRequest header */ - /* ReplicaId */ - rd_kafka_buf_write_i32(rkbuf, -1); - /* MaxWaitTime */ - rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_wait_max_ms); - /* MinBytes */ - rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_min_bytes); + /* FetchRequest header */ + /* ReplicaId */ + rd_kafka_buf_write_i32(rkbuf, -1); + /* MaxWaitTime */ + rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_wait_max_ms); + /* MinBytes */ + rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_min_bytes); if (rd_kafka_buf_ApiVersion(rkbuf) >= 3) /* MaxBytes */ @@ -4884,104 +4817,102 @@ static int rd_kafka_broker_fetch_toppars (rd_kafka_broker_t *rkb, rd_ts_t now) { rd_kafka_buf_write_i32(rkbuf, -1); } - /* Write zero TopicArrayCnt but store pointer for later update */ - of_TopicArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); + /* Write zero TopicArrayCnt but store pointer for later update */ + of_TopicArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); /* Prepare map for storing the fetch version for each partition, * this will later be checked in Fetch response to purge outdated * responses (e.g., after a seek). */ - rkbuf->rkbuf_rktp_vers = rd_list_new( - 0, (void *)rd_kafka_toppar_ver_destroy); + rkbuf->rkbuf_rktp_vers = + rd_list_new(0, (void *)rd_kafka_toppar_ver_destroy); rd_list_prealloc_elems(rkbuf->rkbuf_rktp_vers, sizeof(struct rd_kafka_toppar_ver), rkb->rkb_active_toppar_cnt, 0); - /* Round-robin start of the list. */ + /* Round-robin start of the list. */ rktp = rkb->rkb_active_toppar_next; do { - struct rd_kafka_toppar_ver *tver; - - if (rkt_last != rktp->rktp_rkt) { - if (rkt_last != NULL) { - /* Update PartitionArrayCnt */ - rd_kafka_buf_update_i32(rkbuf, - of_PartitionArrayCnt, - PartitionArrayCnt); - } + struct rd_kafka_toppar_ver *tver; + + if (rkt_last != rktp->rktp_rkt) { + if (rkt_last != NULL) { + /* Update PartitionArrayCnt */ + rd_kafka_buf_update_i32(rkbuf, + of_PartitionArrayCnt, + PartitionArrayCnt); + } /* Topic name */ - rd_kafka_buf_write_kstr(rkbuf, + rd_kafka_buf_write_kstr(rkbuf, rktp->rktp_rkt->rkt_topic); - TopicArrayCnt++; - rkt_last = rktp->rktp_rkt; + TopicArrayCnt++; + rkt_last = rktp->rktp_rkt; /* Partition count */ - of_PartitionArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); - PartitionArrayCnt = 0; - } + of_PartitionArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); + PartitionArrayCnt = 0; + } - PartitionArrayCnt++; + PartitionArrayCnt++; - /* Partition */ - rd_kafka_buf_write_i32(rkbuf, rktp->rktp_partition); + /* Partition */ + rd_kafka_buf_write_i32(rkbuf, rktp->rktp_partition); if (rd_kafka_buf_ApiVersion(rkbuf) >= 9) /* CurrentLeaderEpoch */ rd_kafka_buf_write_i32(rkbuf, -1); - /* FetchOffset */ - rd_kafka_buf_write_i64(rkbuf, rktp->rktp_offsets.fetch_offset); + /* FetchOffset */ + rd_kafka_buf_write_i64(rkbuf, rktp->rktp_offsets.fetch_offset); if (rd_kafka_buf_ApiVersion(rkbuf) >= 5) /* LogStartOffset - only used by follower replica */ rd_kafka_buf_write_i64(rkbuf, -1); - /* MaxBytes */ - rd_kafka_buf_write_i32(rkbuf, rktp->rktp_fetch_msg_max_bytes); + /* MaxBytes */ + rd_kafka_buf_write_i32(rkbuf, rktp->rktp_fetch_msg_max_bytes); - rd_rkb_dbg(rkb, FETCH, "FETCH", - "Fetch topic %.*s [%"PRId32"] at offset %"PRId64 - " (v%d)", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, + rd_rkb_dbg(rkb, FETCH, "FETCH", + "Fetch topic %.*s [%" PRId32 "] at offset %" PRId64 + " (v%d)", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rktp->rktp_offsets.fetch_offset, - rktp->rktp_fetch_version); + rktp->rktp_fetch_version); /* We must have a valid fetch offset when we get here */ rd_dassert(rktp->rktp_offsets.fetch_offset >= 0); - /* Add toppar + op version mapping. */ - tver = rd_list_add(rkbuf->rkbuf_rktp_vers, NULL); - tver->rktp = rd_kafka_toppar_keep(rktp); - tver->version = rktp->rktp_fetch_version; + /* Add toppar + op version mapping. */ + tver = rd_list_add(rkbuf->rkbuf_rktp_vers, NULL); + tver->rktp = rd_kafka_toppar_keep(rktp); + tver->version = rktp->rktp_fetch_version; - cnt++; - } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, - rktp, rktp_activelink)) != + cnt++; + } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, + rktp_activelink)) != rkb->rkb_active_toppar_next); /* Update next toppar to fetch in round-robin list. */ rd_kafka_broker_active_toppar_next( - rkb, - rktp ? - CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, - rktp, rktp_activelink) : NULL); + rkb, rktp ? CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, + rktp_activelink) + : NULL); - rd_rkb_dbg(rkb, FETCH, "FETCH", "Fetch %i/%i/%i toppar(s)", - cnt, rkb->rkb_active_toppar_cnt, rkb->rkb_toppar_cnt); - if (!cnt) { - rd_kafka_buf_destroy(rkbuf); - return cnt; - } + rd_rkb_dbg(rkb, FETCH, "FETCH", "Fetch %i/%i/%i toppar(s)", cnt, + rkb->rkb_active_toppar_cnt, rkb->rkb_toppar_cnt); + if (!cnt) { + rd_kafka_buf_destroy(rkbuf); + return cnt; + } - if (rkt_last != NULL) { - /* Update last topic's PartitionArrayCnt */ - rd_kafka_buf_update_i32(rkbuf, - of_PartitionArrayCnt, - PartitionArrayCnt); - } + if (rkt_last != NULL) { + /* Update last topic's PartitionArrayCnt */ + rd_kafka_buf_update_i32(rkbuf, of_PartitionArrayCnt, + PartitionArrayCnt); + } - /* Update TopicArrayCnt */ - rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, TopicArrayCnt); + /* Update TopicArrayCnt */ + rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, TopicArrayCnt); if (rd_kafka_buf_ApiVersion(rkbuf) >= 7) @@ -4992,7 +4923,7 @@ static int rd_kafka_broker_fetch_toppars (rd_kafka_broker_t *rkb, rd_ts_t now) { if (rd_kafka_buf_ApiVersion(rkbuf) >= 11) /* RackId */ rd_kafka_buf_write_kstr(rkbuf, - rkb->rkb_rk->rk_conf.client_rack); + rkb->rkb_rk->rk_conf.client_rack); /* Consider Fetch requests blocking if fetch.wait.max.ms >= 1s */ if (rkb->rkb_rk->rk_conf.fetch_wait_max_ms >= 1000) @@ -5001,39 +4932,38 @@ static int rd_kafka_broker_fetch_toppars (rd_kafka_broker_t *rkb, rd_ts_t now) { /* Use configured timeout */ rd_kafka_buf_set_timeout(rkbuf, rkb->rkb_rk->rk_conf.socket_timeout_ms + - rkb->rkb_rk->rk_conf.fetch_wait_max_ms, + rkb->rkb_rk->rk_conf.fetch_wait_max_ms, now); - /* Sort toppar versions for quicker lookups in Fetch response. */ - rd_list_sort(rkbuf->rkbuf_rktp_vers, rd_kafka_toppar_ver_cmp); + /* Sort toppar versions for quicker lookups in Fetch response. */ + rd_list_sort(rkbuf->rkbuf_rktp_vers, rd_kafka_toppar_ver_cmp); - rkb->rkb_fetching = 1; + rkb->rkb_fetching = 1; rd_kafka_broker_buf_enq1(rkb, rkbuf, rd_kafka_broker_fetch_reply, NULL); - return cnt; + return cnt; } - /** * Consumer serving */ -static void rd_kafka_broker_consumer_serve (rd_kafka_broker_t *rkb, - rd_ts_t abs_timeout) { +static void rd_kafka_broker_consumer_serve(rd_kafka_broker_t *rkb, + rd_ts_t abs_timeout) { unsigned int initial_state = rkb->rkb_state; rd_ts_t now; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - rd_kafka_broker_lock(rkb); + rd_kafka_broker_lock(rkb); while (!rd_kafka_broker_terminating(rkb) && rkb->rkb_state == initial_state && abs_timeout > (now = rd_clock())) { rd_ts_t min_backoff; - rd_kafka_broker_unlock(rkb); + rd_kafka_broker_unlock(rkb); /* Serve toppars */ min_backoff = rd_kafka_broker_consumer_toppars_serve(rkb); @@ -5058,18 +4988,17 @@ static void rd_kafka_broker_consumer_serve (rd_kafka_broker_t *rkb, min_backoff = abs_timeout; } else if (min_backoff < RD_TS_MAX) rd_rkb_dbg(rkb, FETCH, "FETCH", - "Fetch backoff for %"PRId64 - "ms", - (min_backoff-now)/1000); + "Fetch backoff for %" PRId64 "ms", + (min_backoff - now) / 1000); } else { /* Nothing needs to be done, next wakeup * is from ops, state change, IO, or this timeout */ min_backoff = abs_timeout; } - /* Check and move retry buffers */ - if (unlikely(rd_atomic32_get(&rkb->rkb_retrybufs.rkbq_cnt) > 0)) - rd_kafka_broker_retry_bufs_move(rkb, &min_backoff); + /* Check and move retry buffers */ + if (unlikely(rd_atomic32_get(&rkb->rkb_retrybufs.rkbq_cnt) > 0)) + rd_kafka_broker_retry_bufs_move(rkb, &min_backoff); if (min_backoff > abs_timeout) min_backoff = abs_timeout; @@ -5077,10 +5006,10 @@ static void rd_kafka_broker_consumer_serve (rd_kafka_broker_t *rkb, if (rd_kafka_broker_ops_io_serve(rkb, min_backoff)) return; /* Wakeup */ - rd_kafka_broker_lock(rkb); - } + rd_kafka_broker_lock(rkb); + } - rd_kafka_broker_unlock(rkb); + rd_kafka_broker_unlock(rkb); } @@ -5094,9 +5023,9 @@ static void rd_kafka_broker_consumer_serve (rd_kafka_broker_t *rkb, * * @locality broker thread */ -static RD_INLINE void rd_kafka_broker_idle_check (rd_kafka_broker_t *rkb) { - rd_ts_t ts_send = rd_atomic64_get(&rkb->rkb_c.ts_send); - rd_ts_t ts_recv = rd_atomic64_get(&rkb->rkb_c.ts_recv); +static RD_INLINE void rd_kafka_broker_idle_check(rd_kafka_broker_t *rkb) { + rd_ts_t ts_send = rd_atomic64_get(&rkb->rkb_c.ts_send); + rd_ts_t ts_recv = rd_atomic64_get(&rkb->rkb_c.ts_recv); rd_ts_t ts_last_activity = RD_MAX(ts_send, ts_recv); int idle_ms; @@ -5110,8 +5039,7 @@ static RD_INLINE void rd_kafka_broker_idle_check (rd_kafka_broker_t *rkb) { if (likely(idle_ms < rkb->rkb_rk->rk_conf.connections_max_idle_ms)) return; - rd_kafka_broker_fail(rkb, LOG_DEBUG, - RD_KAFKA_RESP_ERR__TRANSPORT, + rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__TRANSPORT, "Connection max idle time exceeded " "(%dms since last activity)", idle_ms); @@ -5187,7 +5115,7 @@ static RD_INLINE void rd_kafka_broker_idle_check (rd_kafka_broker_t *rkb) { * @locality broker thread * @locks none */ -static void rd_kafka_broker_serve (rd_kafka_broker_t *rkb, int timeout_ms) { +static void rd_kafka_broker_serve(rd_kafka_broker_t *rkb, int timeout_ms) { rd_ts_t abs_timeout; if (unlikely(rd_kafka_terminating(rkb->rkb_rk) || @@ -5207,7 +5135,7 @@ static void rd_kafka_broker_serve (rd_kafka_broker_t *rkb, int timeout_ms) { * The value is reset here on each serve(). If there are queued * requests we know right away that a connection is needed. */ rkb->rkb_persistconn.internal = - rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt) > 0; + rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt) > 0; if (rkb->rkb_source == RD_KAFKA_INTERNAL) { rd_kafka_broker_internal_serve(rkb, abs_timeout); @@ -5221,7 +5149,7 @@ static void rd_kafka_broker_serve (rd_kafka_broker_t *rkb, int timeout_ms) { if (rkb->rkb_rk->rk_conf.connections_max_idle_ms && rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP) - rd_kafka_broker_idle_check(rkb); + rd_kafka_broker_idle_check(rkb); } @@ -5233,42 +5161,40 @@ static void rd_kafka_broker_serve (rd_kafka_broker_t *rkb, int timeout_ms) { * @locks_acquired none */ static rd_bool_t -rd_kafka_broker_addresses_exhausted (const rd_kafka_broker_t *rkb) { - return !rkb->rkb_rsal || - rkb->rkb_rsal->rsal_cnt == 0 || - rkb->rkb_rsal->rsal_curr + 1 == rkb->rkb_rsal->rsal_cnt; +rd_kafka_broker_addresses_exhausted(const rd_kafka_broker_t *rkb) { + return !rkb->rkb_rsal || rkb->rkb_rsal->rsal_cnt == 0 || + rkb->rkb_rsal->rsal_curr + 1 == rkb->rkb_rsal->rsal_cnt; } -static int rd_kafka_broker_thread_main (void *arg) { - rd_kafka_broker_t *rkb = arg; - rd_kafka_t *rk = rkb->rkb_rk; +static int rd_kafka_broker_thread_main(void *arg) { + rd_kafka_broker_t *rkb = arg; + rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_set_thread_name("%s", rkb->rkb_name); - rd_kafka_set_thread_sysname("rdk:broker%"PRId32, rkb->rkb_nodeid); + rd_kafka_set_thread_sysname("rdk:broker%" PRId32, rkb->rkb_nodeid); rd_kafka_interceptors_on_thread_start(rk, RD_KAFKA_THREAD_BROKER); - (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1); + (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1); /* Our own refcount was increased just prior to thread creation, * when refcount drops to 1 it is just us left and the broker * thread should terminate. */ - /* Acquire lock (which was held by thread creator during creation) - * to synchronise state. */ - rd_kafka_broker_lock(rkb); - rd_kafka_broker_unlock(rkb); + /* Acquire lock (which was held by thread creator during creation) + * to synchronise state. */ + rd_kafka_broker_lock(rkb); + rd_kafka_broker_unlock(rkb); - rd_rkb_dbg(rkb, BROKER, "BRKMAIN", "Enter main broker thread"); + rd_rkb_dbg(rkb, BROKER, "BRKMAIN", "Enter main broker thread"); - while (!rd_kafka_broker_terminating(rkb)) { + while (!rd_kafka_broker_terminating(rkb)) { int backoff; int r; redo: - switch (rkb->rkb_state) - { + switch (rkb->rkb_state) { case RD_KAFKA_BROKER_STATE_INIT: /* Check if there is demand for a connection * to this broker, if so jump to TRY_CONNECT state. */ @@ -5283,7 +5209,7 @@ static int rd_kafka_broker_thread_main (void *arg) { * which might trigger a ALL_BROKERS_DOWN error. */ rd_kafka_broker_lock(rkb); rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); + rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); rd_kafka_broker_unlock(rkb); goto redo; /* effectively a fallthru to TRY_CONNECT */ @@ -5291,18 +5217,18 @@ static int rd_kafka_broker_thread_main (void *arg) { rd_kafka_broker_lock(rkb); if (rkb->rkb_rk->rk_conf.sparse_connections) rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_INIT); + rkb, RD_KAFKA_BROKER_STATE_INIT); else rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); + rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); rd_kafka_broker_unlock(rkb); goto redo; /* effectively a fallthru to TRY_CONNECT */ case RD_KAFKA_BROKER_STATE_TRY_CONNECT: if (rkb->rkb_source == RD_KAFKA_INTERNAL) { rd_kafka_broker_lock(rkb); - rd_kafka_broker_set_state(rkb, - RD_KAFKA_BROKER_STATE_UP); + rd_kafka_broker_set_state( + rkb, RD_KAFKA_BROKER_STATE_UP); rd_kafka_broker_unlock(rkb); break; } @@ -5322,8 +5248,8 @@ static int rd_kafka_broker_thread_main (void *arg) { /* Throttle & jitter reconnects to avoid * thundering horde of reconnecting clients after * a broker / network outage. Issue #403 */ - backoff = rd_kafka_broker_reconnect_backoff(rkb, - rd_clock()); + backoff = + rd_kafka_broker_reconnect_backoff(rkb, rd_clock()); if (backoff > 0) { rd_rkb_dbg(rkb, BROKER, "RECONNECT", "Delaying next reconnect by %dms", @@ -5332,8 +5258,8 @@ static int rd_kafka_broker_thread_main (void *arg) { continue; } - /* Initiate asynchronous connection attempt. - * Only the host lookup is blocking here. */ + /* Initiate asynchronous connection attempt. + * Only the host lookup is blocking here. */ r = rd_kafka_broker_connect(rkb); if (r == -1) { /* Immediate failure, most likely host @@ -5343,8 +5269,8 @@ static int rd_kafka_broker_thread_main (void *arg) { * short while to avoid busy looping. */ if (rd_kafka_broker_addresses_exhausted(rkb)) rd_kafka_broker_serve( - rkb, rd_kafka_max_block_ms); - } else if (r == 0) { + rkb, rd_kafka_max_block_ms); + } else if (r == 0) { /* Broker has no hostname yet, wait * for hostname to be set and connection * triggered by received OP_CONNECT. */ @@ -5355,14 +5281,14 @@ static int rd_kafka_broker_thread_main (void *arg) { * have changed to STATE_CONNECT. */ } - break; + break; - case RD_KAFKA_BROKER_STATE_CONNECT: + case RD_KAFKA_BROKER_STATE_CONNECT: case RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE: - case RD_KAFKA_BROKER_STATE_AUTH_LEGACY: + case RD_KAFKA_BROKER_STATE_AUTH_LEGACY: case RD_KAFKA_BROKER_STATE_AUTH_REQ: - case RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE: - case RD_KAFKA_BROKER_STATE_APIVERSION_QUERY: + case RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE: + case RD_KAFKA_BROKER_STATE_APIVERSION_QUERY: /* Asynchronous connect in progress. */ rd_kafka_broker_serve(rkb, rd_kafka_max_block_ms); @@ -5373,60 +5299,59 @@ static int rd_kafka_broker_thread_main (void *arg) { if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_DOWN && rd_kafka_broker_addresses_exhausted(rkb)) rd_kafka_broker_update_reconnect_backoff( - rkb, &rkb->rkb_rk->rk_conf, rd_clock()); - break; + rkb, &rkb->rkb_rk->rk_conf, rd_clock()); + break; case RD_KAFKA_BROKER_STATE_UPDATE: /* FALLTHRU */ - case RD_KAFKA_BROKER_STATE_UP: + case RD_KAFKA_BROKER_STATE_UP: rd_kafka_broker_serve(rkb, rd_kafka_max_block_ms); - if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UPDATE) { + if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UPDATE) { rd_kafka_broker_lock(rkb); - rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_UP); + rd_kafka_broker_set_state( + rkb, RD_KAFKA_BROKER_STATE_UP); rd_kafka_broker_unlock(rkb); - } - break; - } + } + break; + } if (rd_kafka_terminating(rkb->rkb_rk)) { /* Handle is terminating: fail the send+retry queue * to speed up termination, otherwise we'll * need to wait for request timeouts. */ r = rd_kafka_broker_bufq_timeout_scan( - rkb, 0, &rkb->rkb_outbufs, NULL, -1, - RD_KAFKA_RESP_ERR__DESTROY, 0, NULL, 0); + rkb, 0, &rkb->rkb_outbufs, NULL, -1, + RD_KAFKA_RESP_ERR__DESTROY, 0, NULL, 0); r += rd_kafka_broker_bufq_timeout_scan( - rkb, 0, &rkb->rkb_retrybufs, NULL, -1, - RD_KAFKA_RESP_ERR__DESTROY, 0, NULL, 0); - rd_rkb_dbg(rkb, BROKER, "TERMINATE", - "Handle is terminating in state %s: " - "%d refcnts (%p), %d toppar(s), " - "%d active toppar(s), " - "%d outbufs, %d waitresps, %d retrybufs: " - "failed %d request(s) in retry+outbuf", - rd_kafka_broker_state_names[rkb->rkb_state], - rd_refcnt_get(&rkb->rkb_refcnt), - &rkb->rkb_refcnt, - rkb->rkb_toppar_cnt, - rkb->rkb_active_toppar_cnt, - (int)rd_kafka_bufq_cnt(&rkb->rkb_outbufs), - (int)rd_kafka_bufq_cnt(&rkb->rkb_waitresps), - (int)rd_kafka_bufq_cnt(&rkb->rkb_retrybufs), - r); + rkb, 0, &rkb->rkb_retrybufs, NULL, -1, + RD_KAFKA_RESP_ERR__DESTROY, 0, NULL, 0); + rd_rkb_dbg( + rkb, BROKER, "TERMINATE", + "Handle is terminating in state %s: " + "%d refcnts (%p), %d toppar(s), " + "%d active toppar(s), " + "%d outbufs, %d waitresps, %d retrybufs: " + "failed %d request(s) in retry+outbuf", + rd_kafka_broker_state_names[rkb->rkb_state], + rd_refcnt_get(&rkb->rkb_refcnt), &rkb->rkb_refcnt, + rkb->rkb_toppar_cnt, rkb->rkb_active_toppar_cnt, + (int)rd_kafka_bufq_cnt(&rkb->rkb_outbufs), + (int)rd_kafka_bufq_cnt(&rkb->rkb_waitresps), + (int)rd_kafka_bufq_cnt(&rkb->rkb_retrybufs), r); } - } + } - if (rkb->rkb_source != RD_KAFKA_INTERNAL) { - rd_kafka_wrlock(rkb->rkb_rk); - TAILQ_REMOVE(&rkb->rkb_rk->rk_brokers, rkb, rkb_link); + if (rkb->rkb_source != RD_KAFKA_INTERNAL) { + rd_kafka_wrlock(rkb->rkb_rk); + TAILQ_REMOVE(&rkb->rkb_rk->rk_brokers, rkb, rkb_link); if (rkb->rkb_nodeid != -1 && !RD_KAFKA_BROKER_IS_LOGICAL(rkb)) rd_list_remove(&rkb->rkb_rk->rk_broker_by_id, rkb); - (void)rd_atomic32_sub(&rkb->rkb_rk->rk_broker_cnt, 1); - rd_kafka_wrunlock(rkb->rkb_rk); - } + (void)rd_atomic32_sub(&rkb->rkb_rk->rk_broker_cnt, 1); + rd_kafka_wrunlock(rkb->rkb_rk); + } - rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__DESTROY, + rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__DESTROY, "Broker handle is terminating"); /* Disable and drain ops queue. @@ -5438,7 +5363,7 @@ static int rd_kafka_broker_thread_main (void *arg) { while (rd_kafka_broker_ops_serve(rkb, RD_POLL_NOWAIT)) ; - rd_kafka_broker_destroy(rkb); + rd_kafka_broker_destroy(rkb); #if WITH_SSL /* Remove OpenSSL per-thread error state to avoid memory leaks */ @@ -5452,16 +5377,16 @@ static int rd_kafka_broker_thread_main (void *arg) { rd_kafka_interceptors_on_thread_exit(rk, RD_KAFKA_THREAD_BROKER); - rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1); + rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1); - return 0; + return 0; } /** * Final destructor. Refcnt must be 0. */ -void rd_kafka_broker_destroy_final (rd_kafka_broker_t *rkb) { +void rd_kafka_broker_destroy_final(rd_kafka_broker_t *rkb) { rd_assert(thrd_is_current(rkb->rkb_thread)); rd_assert(TAILQ_EMPTY(&rkb->rkb_monitors)); @@ -5472,9 +5397,8 @@ void rd_kafka_broker_destroy_final (rd_kafka_broker_t *rkb) { if (rkb->rkb_source != RD_KAFKA_INTERNAL && (rkb->rkb_rk->rk_conf.security_protocol == - RD_KAFKA_PROTO_SASL_PLAINTEXT || - rkb->rkb_rk->rk_conf.security_protocol == - RD_KAFKA_PROTO_SASL_SSL)) + RD_KAFKA_PROTO_SASL_PLAINTEXT || + rkb->rkb_rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL)) rd_kafka_sasl_broker_term(rkb); if (rkb->rkb_wakeup_fd[0] != -1) @@ -5482,23 +5406,23 @@ void rd_kafka_broker_destroy_final (rd_kafka_broker_t *rkb) { if (rkb->rkb_wakeup_fd[1] != -1) rd_close(rkb->rkb_wakeup_fd[1]); - if (rkb->rkb_recv_buf) - rd_kafka_buf_destroy(rkb->rkb_recv_buf); + if (rkb->rkb_recv_buf) + rd_kafka_buf_destroy(rkb->rkb_recv_buf); - if (rkb->rkb_rsal) - rd_sockaddr_list_destroy(rkb->rkb_rsal); + if (rkb->rkb_rsal) + rd_sockaddr_list_destroy(rkb->rkb_rsal); - if (rkb->rkb_ApiVersions) - rd_free(rkb->rkb_ApiVersions); + if (rkb->rkb_ApiVersions) + rd_free(rkb->rkb_ApiVersions); rd_free(rkb->rkb_origname); - rd_kafka_q_purge(rkb->rkb_ops); + rd_kafka_q_purge(rkb->rkb_ops); rd_kafka_q_destroy_owner(rkb->rkb_ops); rd_avg_destroy(&rkb->rkb_avg_int_latency); rd_avg_destroy(&rkb->rkb_avg_outbuf_latency); rd_avg_destroy(&rkb->rkb_avg_rtt); - rd_avg_destroy(&rkb->rkb_avg_throttle); + rd_avg_destroy(&rkb->rkb_avg_throttle); mtx_lock(&rkb->rkb_logname_lock); rd_free(rkb->rkb_logname); @@ -5506,27 +5430,27 @@ void rd_kafka_broker_destroy_final (rd_kafka_broker_t *rkb) { mtx_unlock(&rkb->rkb_logname_lock); mtx_destroy(&rkb->rkb_logname_lock); - mtx_destroy(&rkb->rkb_lock); + mtx_destroy(&rkb->rkb_lock); rd_refcnt_destroy(&rkb->rkb_refcnt); - rd_free(rkb); + rd_free(rkb); } /** * Returns the internal broker with refcnt increased. */ -rd_kafka_broker_t *rd_kafka_broker_internal (rd_kafka_t *rk) { - rd_kafka_broker_t *rkb; +rd_kafka_broker_t *rd_kafka_broker_internal(rd_kafka_t *rk) { + rd_kafka_broker_t *rkb; mtx_lock(&rk->rk_internal_rkb_lock); - rkb = rk->rk_internal_rkb; - if (rkb) - rd_kafka_broker_keep(rkb); + rkb = rk->rk_internal_rkb; + if (rkb) + rd_kafka_broker_keep(rkb); mtx_unlock(&rk->rk_internal_rkb_lock); - return rkb; + return rkb; } @@ -5538,57 +5462,57 @@ rd_kafka_broker_t *rd_kafka_broker_internal (rd_kafka_t *rk) { * * Locks: rd_kafka_wrlock(rk) must be held */ -rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, - rd_kafka_confsource_t source, - rd_kafka_secproto_t proto, - const char *name, uint16_t port, - int32_t nodeid) { - rd_kafka_broker_t *rkb; +rd_kafka_broker_t *rd_kafka_broker_add(rd_kafka_t *rk, + rd_kafka_confsource_t source, + rd_kafka_secproto_t proto, + const char *name, + uint16_t port, + int32_t nodeid) { + rd_kafka_broker_t *rkb; #ifndef _WIN32 int r; sigset_t newset, oldset; #endif - rkb = rd_calloc(1, sizeof(*rkb)); + rkb = rd_calloc(1, sizeof(*rkb)); if (source != RD_KAFKA_LOGICAL) { rd_kafka_mk_nodename(rkb->rkb_nodename, - sizeof(rkb->rkb_nodename), - name, port); + sizeof(rkb->rkb_nodename), name, port); rd_kafka_mk_brokername(rkb->rkb_name, sizeof(rkb->rkb_name), - proto, rkb->rkb_nodename, - nodeid, source); + proto, rkb->rkb_nodename, nodeid, + source); } else { /* Logical broker does not have a nodename (address) or port * at initialization. */ rd_snprintf(rkb->rkb_name, sizeof(rkb->rkb_name), "%s", name); } - rkb->rkb_source = source; - rkb->rkb_rk = rk; + rkb->rkb_source = source; + rkb->rkb_rk = rk; rkb->rkb_ts_state = rd_clock(); - rkb->rkb_nodeid = nodeid; - rkb->rkb_proto = proto; - rkb->rkb_port = port; + rkb->rkb_nodeid = nodeid; + rkb->rkb_proto = proto; + rkb->rkb_port = port; rkb->rkb_origname = rd_strdup(name); - mtx_init(&rkb->rkb_lock, mtx_plain); + mtx_init(&rkb->rkb_lock, mtx_plain); mtx_init(&rkb->rkb_logname_lock, mtx_plain); rkb->rkb_logname = rd_strdup(rkb->rkb_name); - TAILQ_INIT(&rkb->rkb_toppars); + TAILQ_INIT(&rkb->rkb_toppars); CIRCLEQ_INIT(&rkb->rkb_active_toppars); TAILQ_INIT(&rkb->rkb_monitors); - rd_kafka_bufq_init(&rkb->rkb_outbufs); - rd_kafka_bufq_init(&rkb->rkb_waitresps); - rd_kafka_bufq_init(&rkb->rkb_retrybufs); - rkb->rkb_ops = rd_kafka_q_new(rk); - rd_avg_init(&rkb->rkb_avg_int_latency, RD_AVG_GAUGE, 0, 100*1000, 2, - rk->rk_conf.stats_interval_ms ? 1 : 0); - rd_avg_init(&rkb->rkb_avg_outbuf_latency, RD_AVG_GAUGE, 0, 100*1000, 2, + rd_kafka_bufq_init(&rkb->rkb_outbufs); + rd_kafka_bufq_init(&rkb->rkb_waitresps); + rd_kafka_bufq_init(&rkb->rkb_retrybufs); + rkb->rkb_ops = rd_kafka_q_new(rk); + rd_avg_init(&rkb->rkb_avg_int_latency, RD_AVG_GAUGE, 0, 100 * 1000, 2, rk->rk_conf.stats_interval_ms ? 1 : 0); - rd_avg_init(&rkb->rkb_avg_rtt, RD_AVG_GAUGE, 0, 500*1000, 2, + rd_avg_init(&rkb->rkb_avg_outbuf_latency, RD_AVG_GAUGE, 0, 100 * 1000, + 2, rk->rk_conf.stats_interval_ms ? 1 : 0); + rd_avg_init(&rkb->rkb_avg_rtt, RD_AVG_GAUGE, 0, 500 * 1000, 2, rk->rk_conf.stats_interval_ms ? 1 : 0); - rd_avg_init(&rkb->rkb_avg_throttle, RD_AVG_GAUGE, 0, 5000*1000, 2, + rd_avg_init(&rkb->rkb_avg_throttle, RD_AVG_GAUGE, 0, 5000 * 1000, 2, rk->rk_conf.stats_interval_ms ? 1 : 0); rd_refcnt_init(&rkb->rkb_refcnt, 0); rd_kafka_broker_keep(rkb); /* rk_broker's refcount */ @@ -5603,8 +5527,8 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, if (rkb->rkb_rk->rk_conf.api_version_request) { rd_interval_init(&rkb->rkb_ApiVersion_fail_intvl); rd_interval_fixed( - &rkb->rkb_ApiVersion_fail_intvl, - (rd_ts_t)rkb->rkb_rk->rk_conf.api_version_fallback_ms * + &rkb->rkb_ApiVersion_fail_intvl, + (rd_ts_t)rkb->rkb_rk->rk_conf.api_version_fallback_ms * 1000); } @@ -5618,12 +5542,12 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, * thread, which the new thread will inherit its sigmask from, * and then restore the original sigmask of the calling thread when * we're done creating the thread. - * NOTE: term_sig remains unblocked since we use it on termination - * to quickly interrupt system calls. */ + * NOTE: term_sig remains unblocked since we use it on termination + * to quickly interrupt system calls. */ sigemptyset(&oldset); sigfillset(&newset); - if (rkb->rkb_rk->rk_conf.term_sig) - sigdelset(&newset, rkb->rkb_rk->rk_conf.term_sig); + if (rkb->rkb_rk->rk_conf.term_sig) + sigdelset(&newset, rkb->rkb_rk->rk_conf.term_sig); pthread_sigmask(SIG_SETMASK, &newset, &oldset); #endif @@ -5633,8 +5557,8 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, * the write fails (silently) but this has no effect on latency * since the POLLIN flag will already have been raised for fd. */ - rkb->rkb_wakeup_fd[0] = -1; - rkb->rkb_wakeup_fd[1] = -1; + rkb->rkb_wakeup_fd[0] = -1; + rkb->rkb_wakeup_fd[1] = -1; #ifndef _WIN32 if ((r = rd_pipe_nonblocking(rkb->rkb_wakeup_fd)) == -1) { @@ -5657,33 +5581,33 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, #endif /* Lock broker's lock here to synchronise state, i.e., hold off - * the broker thread until we've finalized the rkb. */ - rd_kafka_broker_lock(rkb); + * the broker thread until we've finalized the rkb. */ + rd_kafka_broker_lock(rkb); rd_kafka_broker_keep(rkb); /* broker thread's refcnt */ - if (thrd_create(&rkb->rkb_thread, - rd_kafka_broker_thread_main, rkb) != thrd_success) { - rd_kafka_broker_unlock(rkb); + if (thrd_create(&rkb->rkb_thread, rd_kafka_broker_thread_main, rkb) != + thrd_success) { + rd_kafka_broker_unlock(rkb); rd_kafka_log(rk, LOG_CRIT, "THREAD", "Unable to create broker thread"); - /* Send ERR op back to application for processing. */ - rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, - "Unable to create broker thread"); + /* Send ERR op back to application for processing. */ + rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, + "Unable to create broker thread"); - rd_free(rkb); + rd_free(rkb); #ifndef _WIN32 - /* Restore sigmask of caller */ - pthread_sigmask(SIG_SETMASK, &oldset, NULL); + /* Restore sigmask of caller */ + pthread_sigmask(SIG_SETMASK, &oldset, NULL); #endif - return NULL; - } + return NULL; + } if (rkb->rkb_source != RD_KAFKA_INTERNAL) { if (rk->rk_conf.security_protocol == - RD_KAFKA_PROTO_SASL_PLAINTEXT || + RD_KAFKA_PROTO_SASL_PLAINTEXT || rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL) rd_kafka_sasl_broker_init(rkb); @@ -5691,8 +5615,8 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, * newer brokers are more relevant than old ones, * and in particular LEARNED brokers are more relevant * than CONFIGURED (bootstrap) and LOGICAL brokers. */ - TAILQ_INSERT_HEAD(&rkb->rkb_rk->rk_brokers, rkb, rkb_link); - (void)rd_atomic32_add(&rkb->rkb_rk->rk_broker_cnt, 1); + TAILQ_INSERT_HEAD(&rkb->rkb_rk->rk_brokers, rkb, rkb_link); + (void)rd_atomic32_add(&rkb->rkb_rk->rk_broker_cnt, 1); if (rkb->rkb_nodeid != -1 && !RD_KAFKA_BROKER_IS_LOGICAL(rkb)) { rd_list_add(&rkb->rkb_rk->rk_broker_by_id, rkb); @@ -5700,26 +5624,26 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, rd_kafka_broker_cmp_by_id); } - rd_rkb_dbg(rkb, BROKER, "BROKER", - "Added new broker with NodeId %"PRId32, - rkb->rkb_nodeid); - } + rd_rkb_dbg(rkb, BROKER, "BROKER", + "Added new broker with NodeId %" PRId32, + rkb->rkb_nodeid); + } - rd_kafka_broker_unlock(rkb); + rd_kafka_broker_unlock(rkb); /* Add broker state monitor for the coordinator request to use. - * This is needed by the transactions implementation and DeleteGroups. */ - rd_kafka_broker_monitor_add(&rkb->rkb_coord_monitor, rkb, - rk->rk_ops, + * This is needed by the transactions implementation and DeleteGroups. + */ + rd_kafka_broker_monitor_add(&rkb->rkb_coord_monitor, rkb, rk->rk_ops, rd_kafka_coord_rkb_monitor_cb); #ifndef _WIN32 - /* Restore sigmask of caller */ - pthread_sigmask(SIG_SETMASK, &oldset, NULL); + /* Restore sigmask of caller */ + pthread_sigmask(SIG_SETMASK, &oldset, NULL); #endif - return rkb; + return rkb; } @@ -5746,14 +5670,14 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, * @locality any rdkafka thread * @locks none */ -rd_kafka_broker_t *rd_kafka_broker_add_logical (rd_kafka_t *rk, - const char *name) { +rd_kafka_broker_t *rd_kafka_broker_add_logical(rd_kafka_t *rk, + const char *name) { rd_kafka_broker_t *rkb; rd_kafka_wrlock(rk); rkb = rd_kafka_broker_add(rk, RD_KAFKA_LOGICAL, - rk->rk_conf.security_protocol, - name, 0/*port*/, -1/*brokerid*/); + rk->rk_conf.security_protocol, name, + 0 /*port*/, -1 /*brokerid*/); rd_assert(rkb && *"failed to create broker thread"); rd_kafka_wrunlock(rk); @@ -5783,8 +5707,8 @@ rd_kafka_broker_t *rd_kafka_broker_add_logical (rd_kafka_t *rk, * * @locks none */ -void rd_kafka_broker_set_nodename (rd_kafka_broker_t *rkb, - rd_kafka_broker_t *from_rkb) { +void rd_kafka_broker_set_nodename(rd_kafka_broker_t *rkb, + rd_kafka_broker_t *from_rkb) { char nodename[RD_KAFKA_NODENAME_SIZE]; char brokername[RD_KAFKA_NODENAME_SIZE]; int32_t nodeid; @@ -5802,7 +5726,7 @@ void rd_kafka_broker_set_nodename (rd_kafka_broker_t *rkb, rd_kafka_broker_unlock(from_rkb); } else { *nodename = '\0'; - nodeid = -1; + nodeid = -1; } /* Set nodename on rkb */ @@ -5819,7 +5743,7 @@ void rd_kafka_broker_set_nodename (rd_kafka_broker_t *rkb, if (rkb->rkb_nodeid != nodeid) { rd_rkb_dbg(rkb, BROKER, "NODEID", - "Broker nodeid changed from %"PRId32" to %"PRId32, + "Broker nodeid changed from %" PRId32 " to %" PRId32, rkb->rkb_nodeid, nodeid); rkb->rkb_nodeid = nodeid; } @@ -5828,10 +5752,8 @@ void rd_kafka_broker_set_nodename (rd_kafka_broker_t *rkb, /* Update the log name to include (or exclude) the nodeid. * The nodeid is appended as "..logname../nodeid" */ - rd_kafka_mk_brokername(brokername, sizeof(brokername), - rkb->rkb_proto, - rkb->rkb_name, nodeid, - rkb->rkb_source); + rd_kafka_mk_brokername(brokername, sizeof(brokername), rkb->rkb_proto, + rkb->rkb_name, nodeid, rkb->rkb_source); rd_kafka_broker_set_logname(rkb, brokername); @@ -5858,14 +5780,14 @@ void rd_kafka_broker_set_nodename (rd_kafka_broker_t *rkb, * @locks: rd_kafka_*lock() MUST be held * @remark caller must release rkb reference by rd_kafka_broker_destroy() */ -rd_kafka_broker_t * -rd_kafka_broker_find_by_nodeid0_fl (const char *func, int line, - rd_kafka_t *rk, - int32_t nodeid, - int state, - rd_bool_t do_connect) { +rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl(const char *func, + int line, + rd_kafka_t *rk, + int32_t nodeid, + int state, + rd_bool_t do_connect) { rd_kafka_broker_t *rkb; - rd_kafka_broker_t skel = { .rkb_nodeid = nodeid }; + rd_kafka_broker_t skel = {.rkb_nodeid = nodeid}; if (rd_kafka_terminating(rk)) return NULL; @@ -5899,31 +5821,30 @@ rd_kafka_broker_find_by_nodeid0_fl (const char *func, int line, * Locks: rd_kafka_rdlock(rk) must be held * NOTE: caller must release rkb reference by rd_kafka_broker_destroy() */ -static rd_kafka_broker_t *rd_kafka_broker_find (rd_kafka_t *rk, - rd_kafka_secproto_t proto, - const char *name, - uint16_t port) { - rd_kafka_broker_t *rkb; - char nodename[RD_KAFKA_NODENAME_SIZE]; +static rd_kafka_broker_t *rd_kafka_broker_find(rd_kafka_t *rk, + rd_kafka_secproto_t proto, + const char *name, + uint16_t port) { + rd_kafka_broker_t *rkb; + char nodename[RD_KAFKA_NODENAME_SIZE]; rd_kafka_mk_nodename(nodename, sizeof(nodename), name, port); - TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { if (RD_KAFKA_BROKER_IS_LOGICAL(rkb)) continue; - rd_kafka_broker_lock(rkb); - if (!rd_kafka_terminating(rk) && - rkb->rkb_proto == proto && - !strcmp(rkb->rkb_nodename, nodename)) { - rd_kafka_broker_keep(rkb); - rd_kafka_broker_unlock(rkb); - return rkb; - } - rd_kafka_broker_unlock(rkb); - } - - return NULL; + rd_kafka_broker_lock(rkb); + if (!rd_kafka_terminating(rk) && rkb->rkb_proto == proto && + !strcmp(rkb->rkb_nodename, nodename)) { + rd_kafka_broker_keep(rkb); + rd_kafka_broker_unlock(rkb); + return rkb; + } + rd_kafka_broker_unlock(rkb); + } + + return NULL; } @@ -5934,106 +5855,109 @@ static rd_kafka_broker_t *rd_kafka_broker_find (rd_kafka_t *rk, * * Returns 0 on success or -1 on parse error. */ -static int rd_kafka_broker_name_parse (rd_kafka_t *rk, - char **name, - rd_kafka_secproto_t *proto, - const char **host, - uint16_t *port) { - char *s = *name; - char *orig; - char *n, *t, *t2; - - /* Save a temporary copy of the original name for logging purposes */ - rd_strdupa(&orig, *name); - - /* Find end of this name (either by delimiter or end of string */ - if ((n = strchr(s, ','))) - *n = '\0'; - else - n = s + strlen(s)-1; - - - /* Check if this looks like an url. */ - if ((t = strstr(s, "://"))) { - int i; - /* "proto://host[:port]" */ - - if (t == s) { - rd_kafka_log(rk, LOG_WARNING, "BROKER", - "Broker name \"%s\" parse error: " - "empty protocol name", orig); - return -1; - } - - /* Make protocol uppercase */ - for (t2 = s ; t2 < t ; t2++) - *t2 = toupper(*t2); - - *t = '\0'; - - /* Find matching protocol by name. */ - for (i = 0 ; i < RD_KAFKA_PROTO_NUM ; i++) - if (!rd_strcasecmp(s, rd_kafka_secproto_names[i])) - break; - - /* Unsupported protocol */ - if (i == RD_KAFKA_PROTO_NUM) { - rd_kafka_log(rk, LOG_WARNING, "BROKER", - "Broker name \"%s\" parse error: " - "unsupported protocol \"%s\"", orig, s); - - return -1; - } - - *proto = i; +static int rd_kafka_broker_name_parse(rd_kafka_t *rk, + char **name, + rd_kafka_secproto_t *proto, + const char **host, + uint16_t *port) { + char *s = *name; + char *orig; + char *n, *t, *t2; + + /* Save a temporary copy of the original name for logging purposes */ + rd_strdupa(&orig, *name); + + /* Find end of this name (either by delimiter or end of string */ + if ((n = strchr(s, ','))) + *n = '\0'; + else + n = s + strlen(s) - 1; + + + /* Check if this looks like an url. */ + if ((t = strstr(s, "://"))) { + int i; + /* "proto://host[:port]" */ + + if (t == s) { + rd_kafka_log(rk, LOG_WARNING, "BROKER", + "Broker name \"%s\" parse error: " + "empty protocol name", + orig); + return -1; + } + + /* Make protocol uppercase */ + for (t2 = s; t2 < t; t2++) + *t2 = toupper(*t2); + + *t = '\0'; + + /* Find matching protocol by name. */ + for (i = 0; i < RD_KAFKA_PROTO_NUM; i++) + if (!rd_strcasecmp(s, rd_kafka_secproto_names[i])) + break; + + /* Unsupported protocol */ + if (i == RD_KAFKA_PROTO_NUM) { + rd_kafka_log(rk, LOG_WARNING, "BROKER", + "Broker name \"%s\" parse error: " + "unsupported protocol \"%s\"", + orig, s); + + return -1; + } + + *proto = i; /* Enforce protocol */ - if (rk->rk_conf.security_protocol != *proto) { - rd_kafka_log(rk, LOG_WARNING, "BROKER", - "Broker name \"%s\" parse error: " - "protocol \"%s\" does not match " - "security.protocol setting \"%s\"", - orig, s, - rd_kafka_secproto_names[ - rk->rk_conf.security_protocol]); - return -1; - } - - /* Hostname starts here */ - s = t+3; - - /* Ignore anything that looks like the path part of an URL */ - if ((t = strchr(s, '/'))) - *t = '\0'; - - } else - *proto = rk->rk_conf.security_protocol; /* Default protocol */ - - - *port = RD_KAFKA_PORT; - /* Check if port has been specified, but try to identify IPv6 - * addresses first: - * t = last ':' in string - * t2 = first ':' in string - * If t and t2 are equal then only one ":" exists in name - * and thus an IPv4 address with port specified. - * Else if not equal and t is prefixed with "]" then it's an - * IPv6 address with port specified. - * Else no port specified. */ - if ((t = strrchr(s, ':')) && - ((t2 = strchr(s, ':')) == t || *(t-1) == ']')) { - *t = '\0'; - *port = atoi(t+1); - } - - /* Empty host name -> localhost */ - if (!*s) - s = "localhost"; - - *host = s; - *name = n+1; /* past this name. e.g., next name/delimiter to parse */ - - return 0; + if (rk->rk_conf.security_protocol != *proto) { + rd_kafka_log( + rk, LOG_WARNING, "BROKER", + "Broker name \"%s\" parse error: " + "protocol \"%s\" does not match " + "security.protocol setting \"%s\"", + orig, s, + rd_kafka_secproto_names[rk->rk_conf + .security_protocol]); + return -1; + } + + /* Hostname starts here */ + s = t + 3; + + /* Ignore anything that looks like the path part of an URL */ + if ((t = strchr(s, '/'))) + *t = '\0'; + + } else + *proto = rk->rk_conf.security_protocol; /* Default protocol */ + + + *port = RD_KAFKA_PORT; + /* Check if port has been specified, but try to identify IPv6 + * addresses first: + * t = last ':' in string + * t2 = first ':' in string + * If t and t2 are equal then only one ":" exists in name + * and thus an IPv4 address with port specified. + * Else if not equal and t is prefixed with "]" then it's an + * IPv6 address with port specified. + * Else no port specified. */ + if ((t = strrchr(s, ':')) && + ((t2 = strchr(s, ':')) == t || *(t - 1) == ']')) { + *t = '\0'; + *port = atoi(t + 1); + } + + /* Empty host name -> localhost */ + if (!*s) + s = "localhost"; + + *host = s; + *name = n + 1; /* past this name. e.g., next name/delimiter to parse */ + + return 0; } /** @@ -6043,48 +5967,48 @@ static int rd_kafka_broker_name_parse (rd_kafka_t *rk, * @locality any thread * @locks none */ -int rd_kafka_brokers_add0 (rd_kafka_t *rk, const char *brokerlist) { - char *s_copy = rd_strdup(brokerlist); - char *s = s_copy; - int cnt = 0; - rd_kafka_broker_t *rkb; +int rd_kafka_brokers_add0(rd_kafka_t *rk, const char *brokerlist) { + char *s_copy = rd_strdup(brokerlist); + char *s = s_copy; + int cnt = 0; + rd_kafka_broker_t *rkb; int pre_cnt = rd_atomic32_get(&rk->rk_broker_cnt); - /* Parse comma-separated list of brokers. */ - while (*s) { - uint16_t port; - const char *host; - rd_kafka_secproto_t proto; + /* Parse comma-separated list of brokers. */ + while (*s) { + uint16_t port; + const char *host; + rd_kafka_secproto_t proto; - if (*s == ',' || *s == ' ') { - s++; - continue; - } + if (*s == ',' || *s == ' ') { + s++; + continue; + } - if (rd_kafka_broker_name_parse(rk, &s, &proto, - &host, &port) == -1) - break; + if (rd_kafka_broker_name_parse(rk, &s, &proto, &host, &port) == + -1) + break; - rd_kafka_wrlock(rk); + rd_kafka_wrlock(rk); - if ((rkb = rd_kafka_broker_find(rk, proto, host, port)) && - rkb->rkb_source == RD_KAFKA_CONFIGURED) { - cnt++; - } else if (rd_kafka_broker_add(rk, RD_KAFKA_CONFIGURED, - proto, host, port, - RD_KAFKA_NODEID_UA) != NULL) - cnt++; + if ((rkb = rd_kafka_broker_find(rk, proto, host, port)) && + rkb->rkb_source == RD_KAFKA_CONFIGURED) { + cnt++; + } else if (rd_kafka_broker_add(rk, RD_KAFKA_CONFIGURED, proto, + host, port, + RD_KAFKA_NODEID_UA) != NULL) + cnt++; - /* If rd_kafka_broker_find returned a broker its - * reference needs to be released - * See issue #193 */ - if (rkb) - rd_kafka_broker_destroy(rkb); + /* If rd_kafka_broker_find returned a broker its + * reference needs to be released + * See issue #193 */ + if (rkb) + rd_kafka_broker_destroy(rkb); - rd_kafka_wrunlock(rk); - } + rd_kafka_wrunlock(rk); + } - rd_free(s_copy); + rd_free(s_copy); if (rk->rk_conf.sparse_connections && cnt > 0 && pre_cnt == 0) { /* Sparse connections: @@ -6096,11 +6020,11 @@ int rd_kafka_brokers_add0 (rd_kafka_t *rk, const char *brokerlist) { rd_kafka_rdunlock(rk); } - return cnt; + return cnt; } -int rd_kafka_brokers_add (rd_kafka_t *rk, const char *brokerlist) { +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist) { return rd_kafka_brokers_add0(rk, brokerlist); } @@ -6114,43 +6038,43 @@ int rd_kafka_brokers_add (rd_kafka_t *rk, const char *brokerlist) { * @locks none * @locality any */ -void -rd_kafka_broker_update (rd_kafka_t *rk, rd_kafka_secproto_t proto, - const struct rd_kafka_metadata_broker *mdb, - rd_kafka_broker_t **rkbp) { - rd_kafka_broker_t *rkb; +void rd_kafka_broker_update(rd_kafka_t *rk, + rd_kafka_secproto_t proto, + const struct rd_kafka_metadata_broker *mdb, + rd_kafka_broker_t **rkbp) { + rd_kafka_broker_t *rkb; char nodename[RD_KAFKA_NODENAME_SIZE]; int needs_update = 0; rd_kafka_mk_nodename(nodename, sizeof(nodename), mdb->host, mdb->port); - rd_kafka_wrlock(rk); - if (unlikely(rd_kafka_terminating(rk))) { - /* Dont update metadata while terminating, do this - * after acquiring lock for proper synchronisation */ - rd_kafka_wrunlock(rk); + rd_kafka_wrlock(rk); + if (unlikely(rd_kafka_terminating(rk))) { + /* Dont update metadata while terminating, do this + * after acquiring lock for proper synchronisation */ + rd_kafka_wrunlock(rk); if (rkbp) *rkbp = NULL; return; - } + } - if ((rkb = rd_kafka_broker_find_by_nodeid(rk, mdb->id))) { + if ((rkb = rd_kafka_broker_find_by_nodeid(rk, mdb->id))) { /* Broker matched by nodeid, see if we need to update * the hostname. */ if (strcmp(rkb->rkb_nodename, nodename)) needs_update = 1; - } else if ((rkb = rd_kafka_broker_find(rk, proto, - mdb->host, mdb->port))) { + } else if ((rkb = rd_kafka_broker_find(rk, proto, mdb->host, + mdb->port))) { /* Broker matched by hostname (but not by nodeid), * update the nodeid. */ needs_update = 1; } else if ((rkb = rd_kafka_broker_add(rk, RD_KAFKA_LEARNED, proto, - mdb->host, mdb->port, mdb->id))){ - rd_kafka_broker_keep(rkb); - } + mdb->host, mdb->port, mdb->id))) { + rd_kafka_broker_keep(rkb); + } - rd_kafka_wrunlock(rk); + rd_kafka_wrunlock(rk); if (rkb) { /* Existing broker */ @@ -6159,13 +6083,13 @@ rd_kafka_broker_update (rd_kafka_t *rk, rd_kafka_secproto_t proto, rko = rd_kafka_op_new(RD_KAFKA_OP_NODE_UPDATE); rd_strlcpy(rko->rko_u.node.nodename, nodename, sizeof(rko->rko_u.node.nodename)); - rko->rko_u.node.nodeid = mdb->id; + rko->rko_u.node.nodeid = mdb->id; /* Perform a blocking op request so that all * broker-related state, such as the rk broker list, * is up to date by the time this call returns. * Ignore&destroy the response. */ rd_kafka_op_err_destroy( - rd_kafka_op_req(rkb->rkb_ops, rko, -1)); + rd_kafka_op_req(rkb->rkb_ops, rko, -1)); } } @@ -6183,7 +6107,7 @@ rd_kafka_broker_update (rd_kafka_t *rk, rd_kafka_secproto_t proto, * @locks_required none * @locks_acquired rkb_lock */ -int32_t rd_kafka_broker_id (rd_kafka_broker_t *rkb) { +int32_t rd_kafka_broker_id(rd_kafka_broker_t *rkb) { int32_t broker_id; if (unlikely(!rkb)) @@ -6208,7 +6132,7 @@ int32_t rd_kafka_broker_id (rd_kafka_broker_t *rkb) { * Locks: none * Locality: any thread */ -const char *rd_kafka_broker_name (rd_kafka_broker_t *rkb) { +const char *rd_kafka_broker_name(rd_kafka_broker_t *rkb) { static RD_TLS char ret[4][RD_KAFKA_NODENAME_SIZE]; static RD_TLS int reti = 0; @@ -6228,7 +6152,7 @@ const char *rd_kafka_broker_name (rd_kafka_broker_t *rkb) { * @locality any * @locks any */ -void rd_kafka_broker_wakeup (rd_kafka_broker_t *rkb) { +void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_WAKEUP); rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH); rd_kafka_q_enq(rkb->rkb_ops, rko); @@ -6243,7 +6167,7 @@ void rd_kafka_broker_wakeup (rd_kafka_broker_t *rkb) { * * @returns the number of broker threads woken up */ -int rd_kafka_all_brokers_wakeup (rd_kafka_t *rk, int min_state) { +int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk, int min_state) { int cnt = 0; rd_kafka_broker_t *rkb; @@ -6268,8 +6192,8 @@ int rd_kafka_all_brokers_wakeup (rd_kafka_t *rk, int min_state) { /** * @brief Filter out brokers that have at least one connection attempt. */ -static int rd_kafka_broker_filter_never_connected (rd_kafka_broker_t *rkb, - void *opaque) { +static int rd_kafka_broker_filter_never_connected(rd_kafka_broker_t *rkb, + void *opaque) { return rd_atomic32_get(&rkb->rkb_c.connects); } @@ -6284,7 +6208,7 @@ static int rd_kafka_broker_filter_never_connected (rd_kafka_broker_t *rkb, * @locality any * @locks rd_kafka_rdlock() MUST be held */ -void rd_kafka_connect_any (rd_kafka_t *rk, const char *reason) { +void rd_kafka_connect_any(rd_kafka_t *rk, const char *reason) { rd_kafka_broker_t *rkb; rd_ts_t suppr; @@ -6293,21 +6217,23 @@ void rd_kafka_connect_any (rd_kafka_t *rk, const char *reason) { * should not be reused for other purposes. * rd_kafka_broker_random() will not return LOGICAL brokers. */ if (rd_atomic32_get(&rk->rk_broker_up_cnt) - - rd_atomic32_get(&rk->rk_logical_broker_up_cnt) > 0 || + rd_atomic32_get(&rk->rk_logical_broker_up_cnt) > + 0 || rd_atomic32_get(&rk->rk_broker_cnt) - - rd_atomic32_get(&rk->rk_broker_addrless_cnt) == 0) + rd_atomic32_get(&rk->rk_broker_addrless_cnt) == + 0) return; mtx_lock(&rk->rk_suppress.sparse_connect_lock); suppr = rd_interval(&rk->rk_suppress.sparse_connect_random, - rk->rk_conf.sparse_connect_intvl*1000, 0); + rk->rk_conf.sparse_connect_intvl * 1000, 0); mtx_unlock(&rk->rk_suppress.sparse_connect_lock); if (suppr <= 0) { - rd_kafka_dbg(rk, BROKER|RD_KAFKA_DBG_GENERIC, "CONNECT", + rd_kafka_dbg(rk, BROKER | RD_KAFKA_DBG_GENERIC, "CONNECT", "Not selecting any broker for cluster connection: " - "still suppressed for %"PRId64"ms: %s", - -suppr/1000, reason); + "still suppressed for %" PRId64 "ms: %s", + -suppr / 1000, reason); return; } @@ -6327,13 +6253,13 @@ void rd_kafka_connect_any (rd_kafka_t *rk, const char *reason) { * this happens if there are brokers in > INIT state, * in which case they're already connecting. */ - rd_kafka_dbg(rk, BROKER|RD_KAFKA_DBG_GENERIC, "CONNECT", + rd_kafka_dbg(rk, BROKER | RD_KAFKA_DBG_GENERIC, "CONNECT", "Cluster connection already in progress: %s", reason); return; } - rd_rkb_dbg(rkb, BROKER|RD_KAFKA_DBG_GENERIC, "CONNECT", + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_GENERIC, "CONNECT", "Selected for cluster connection: " "%s (broker has %d connection attempt(s))", reason, rd_atomic32_get(&rkb->rkb_c.connects)); @@ -6351,11 +6277,12 @@ void rd_kafka_connect_any (rd_kafka_t *rk, const char *reason) { * @locality any * @locks none */ -void rd_kafka_broker_purge_queues (rd_kafka_broker_t *rkb, int purge_flags, - rd_kafka_replyq_t replyq) { +void rd_kafka_broker_purge_queues(rd_kafka_broker_t *rkb, + int purge_flags, + rd_kafka_replyq_t replyq) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_PURGE); rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH); - rko->rko_replyq = replyq; + rko->rko_replyq = replyq; rko->rko_u.purge.flags = purge_flags; rd_kafka_q_enq(rkb->rkb_ops, rko); } @@ -6367,12 +6294,12 @@ void rd_kafka_broker_purge_queues (rd_kafka_broker_t *rkb, int purge_flags, * @locality broker thread * @locks none */ -static void rd_kafka_broker_handle_purge_queues (rd_kafka_broker_t *rkb, - rd_kafka_op_t *rko) { - int purge_flags = rko->rko_u.purge.flags; +static void rd_kafka_broker_handle_purge_queues(rd_kafka_broker_t *rkb, + rd_kafka_op_t *rko) { + int purge_flags = rko->rko_u.purge.flags; int inflight_cnt = 0, retry_cnt = 0, outq_cnt = 0, partial_cnt = 0; - rd_rkb_dbg(rkb, QUEUE|RD_KAFKA_DBG_TOPIC, "PURGE", + rd_rkb_dbg(rkb, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGE", "Purging queues with flags %s", rd_kafka_purge_flags2str(purge_flags)); @@ -6385,36 +6312,34 @@ static void rd_kafka_broker_handle_purge_queues (rd_kafka_broker_t *rkb, /* Purge in-flight ProduceRequests */ if (purge_flags & RD_KAFKA_PURGE_F_INFLIGHT) inflight_cnt = rd_kafka_broker_bufq_timeout_scan( - rkb, 1, &rkb->rkb_waitresps, NULL, RD_KAFKAP_Produce, - RD_KAFKA_RESP_ERR__PURGE_INFLIGHT, 0, NULL, 0); + rkb, 1, &rkb->rkb_waitresps, NULL, RD_KAFKAP_Produce, + RD_KAFKA_RESP_ERR__PURGE_INFLIGHT, 0, NULL, 0); if (purge_flags & RD_KAFKA_PURGE_F_QUEUE) { /* Requests in retry queue */ retry_cnt = rd_kafka_broker_bufq_timeout_scan( - rkb, 0, &rkb->rkb_retrybufs, NULL, RD_KAFKAP_Produce, - RD_KAFKA_RESP_ERR__PURGE_QUEUE, 0, NULL, 0); + rkb, 0, &rkb->rkb_retrybufs, NULL, RD_KAFKAP_Produce, + RD_KAFKA_RESP_ERR__PURGE_QUEUE, 0, NULL, 0); /* Requests in transmit queue not completely sent yet. * partial_cnt is included in outq_cnt and denotes a request * that has been partially transmitted. */ outq_cnt = rd_kafka_broker_bufq_timeout_scan( - rkb, 0, &rkb->rkb_outbufs, &partial_cnt, - RD_KAFKAP_Produce, RD_KAFKA_RESP_ERR__PURGE_QUEUE, 0, - NULL, 0); + rkb, 0, &rkb->rkb_outbufs, &partial_cnt, RD_KAFKAP_Produce, + RD_KAFKA_RESP_ERR__PURGE_QUEUE, 0, NULL, 0); /* Purging a partially transmitted request will mess up * the protocol stream, so we need to disconnect from the broker * to get a clean protocol socket. */ if (partial_cnt) rd_kafka_broker_fail( - rkb, - LOG_DEBUG, - RD_KAFKA_RESP_ERR__PURGE_QUEUE, - "Purged %d partially sent request: " - "forcing disconnect", partial_cnt); + rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__PURGE_QUEUE, + "Purged %d partially sent request: " + "forcing disconnect", + partial_cnt); } - rd_rkb_dbg(rkb, QUEUE|RD_KAFKA_DBG_TOPIC, "PURGEQ", + rd_rkb_dbg(rkb, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGEQ", "Purged %i in-flight, %i retry-queued, " "%i out-queue, %i partially-sent requests", inflight_cnt, retry_cnt, outq_cnt, partial_cnt); @@ -6422,24 +6347,23 @@ static void rd_kafka_broker_handle_purge_queues (rd_kafka_broker_t *rkb, /* Purge partition queues */ if (purge_flags & RD_KAFKA_PURGE_F_QUEUE) { rd_kafka_toppar_t *rktp; - int msg_cnt = 0; + int msg_cnt = 0; int part_cnt = 0; TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) { int r; r = rd_kafka_toppar_purge_queues( - rktp, purge_flags, - rd_true/*include xmit msgq*/); + rktp, purge_flags, rd_true /*include xmit msgq*/); if (r > 0) { msg_cnt += r; part_cnt++; } } - rd_rkb_dbg(rkb, QUEUE|RD_KAFKA_DBG_TOPIC, "PURGEQ", - "Purged %i message(s) from %d partition(s)", - msg_cnt, part_cnt); + rd_rkb_dbg(rkb, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGEQ", + "Purged %i message(s) from %d partition(s)", msg_cnt, + part_cnt); } rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); @@ -6455,9 +6379,9 @@ static void rd_kafka_broker_handle_purge_queues (rd_kafka_broker_t *rkb, * @locality broker thread * @locks rktp_lock MUST be held */ -void rd_kafka_broker_active_toppar_add (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - const char *reason) { +void rd_kafka_broker_active_toppar_add(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const char *reason) { int is_consumer = rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER; if (is_consumer && rktp->rktp_fetch) @@ -6473,14 +6397,13 @@ void rd_kafka_broker_active_toppar_add (rd_kafka_broker_t *rkb, rd_kafka_broker_active_toppar_next(rkb, rktp); rd_rkb_dbg(rkb, TOPIC, "FETCHADD", - "Added %.*s [%"PRId32"] to %s list (%d entries, opv %d, " + "Added %.*s [%" PRId32 + "] to %s list (%d entries, opv %d, " "%d messages queued): %s", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - is_consumer ? "fetch" : "active", + rktp->rktp_partition, is_consumer ? "fetch" : "active", rkb->rkb_active_toppar_cnt, rktp->rktp_fetch_version, - rd_kafka_msgq_len(&rktp->rktp_msgq), - reason); + rd_kafka_msgq_len(&rktp->rktp_msgq), reason); } @@ -6490,9 +6413,9 @@ void rd_kafka_broker_active_toppar_add (rd_kafka_broker_t *rkb, * Locality: broker thread * Locks: none */ -void rd_kafka_broker_active_toppar_del (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - const char *reason) { +void rd_kafka_broker_active_toppar_del(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const char *reason) { int is_consumer = rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER; if (is_consumer && !rktp->rktp_fetch) @@ -6508,19 +6431,18 @@ void rd_kafka_broker_active_toppar_del (rd_kafka_broker_t *rkb, if (rkb->rkb_active_toppar_next == rktp) { /* Update next pointer */ rd_kafka_broker_active_toppar_next( - rkb, CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, - rktp, rktp_activelink)); + rkb, CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, + rktp_activelink)); } rd_rkb_dbg(rkb, TOPIC, "FETCHADD", - "Removed %.*s [%"PRId32"] from %s list " + "Removed %.*s [%" PRId32 + "] from %s list " "(%d entries, opv %d): %s", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - is_consumer ? "fetch" : "active", + rktp->rktp_partition, is_consumer ? "fetch" : "active", rkb->rkb_active_toppar_cnt, rktp->rktp_fetch_version, reason); - } @@ -6532,7 +6454,7 @@ void rd_kafka_broker_active_toppar_del (rd_kafka_broker_t *rkb, * @locality any * @locks none */ -void rd_kafka_broker_schedule_connection (rd_kafka_broker_t *rkb) { +void rd_kafka_broker_schedule_connection(rd_kafka_broker_t *rkb) { rd_kafka_op_t *rko; rko = rd_kafka_op_new(RD_KAFKA_OP_CONNECT); @@ -6548,9 +6470,8 @@ void rd_kafka_broker_schedule_connection (rd_kafka_broker_t *rkb) { * @locality any * @locks none */ -void -rd_kafka_broker_persistent_connection_add (rd_kafka_broker_t *rkb, - rd_atomic32_t *acntp) { +void rd_kafka_broker_persistent_connection_add(rd_kafka_broker_t *rkb, + rd_atomic32_t *acntp) { if (rd_atomic32_add(acntp, 1) == 1) { /* First one, trigger event. */ @@ -6566,9 +6487,8 @@ rd_kafka_broker_persistent_connection_add (rd_kafka_broker_t *rkb, * @locality any * @locks none */ -void -rd_kafka_broker_persistent_connection_del (rd_kafka_broker_t *rkb, - rd_atomic32_t *acntp) { +void rd_kafka_broker_persistent_connection_del(rd_kafka_broker_t *rkb, + rd_atomic32_t *acntp) { int32_t r = rd_atomic32_sub(acntp, 1); rd_assert(r >= 0); } @@ -6582,9 +6502,9 @@ rd_kafka_broker_persistent_connection_del (rd_kafka_broker_t *rkb, * @locality monitoree's op handler thread * @locks none */ -static rd_kafka_op_res_t rd_kafka_broker_monitor_op_cb (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +static rd_kafka_op_res_t rd_kafka_broker_monitor_op_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { if (rko->rko_err != RD_KAFKA_RESP_ERR__DESTROY) rko->rko_u.broker_monitor.cb(rko->rko_u.broker_monitor.rkb); return RD_KAFKA_OP_RES_HANDLED; @@ -6597,14 +6517,13 @@ static rd_kafka_op_res_t rd_kafka_broker_monitor_op_cb (rd_kafka_t *rk, * @locality broker thread * @locks rkb_lock MUST be held */ -static void rd_kafka_broker_trigger_monitors (rd_kafka_broker_t *rkb) { +static void rd_kafka_broker_trigger_monitors(rd_kafka_broker_t *rkb) { rd_kafka_broker_monitor_t *rkbmon; TAILQ_FOREACH(rkbmon, &rkb->rkb_monitors, rkbmon_link) { - rd_kafka_op_t *rko = rd_kafka_op_new_cb( - rkb->rkb_rk, - RD_KAFKA_OP_BROKER_MONITOR, - rd_kafka_broker_monitor_op_cb); + rd_kafka_op_t *rko = + rd_kafka_op_new_cb(rkb->rkb_rk, RD_KAFKA_OP_BROKER_MONITOR, + rd_kafka_broker_monitor_op_cb); rd_kafka_broker_keep(rkb); rko->rko_u.broker_monitor.rkb = rkb; rko->rko_u.broker_monitor.cb = rkbmon->rkbmon_cb; @@ -6631,15 +6550,15 @@ static void rd_kafka_broker_trigger_monitors (rd_kafka_broker_t *rkb) { * @locks none * @locality any */ -void rd_kafka_broker_monitor_add (rd_kafka_broker_monitor_t *rkbmon, - rd_kafka_broker_t *rkb, - rd_kafka_q_t *rkq, - void (*callback) (rd_kafka_broker_t *rkb)) { +void rd_kafka_broker_monitor_add(rd_kafka_broker_monitor_t *rkbmon, + rd_kafka_broker_t *rkb, + rd_kafka_q_t *rkq, + void (*callback)(rd_kafka_broker_t *rkb)) { rd_assert(!rkbmon->rkbmon_rkb); - rkbmon->rkbmon_rkb = rkb; - rkbmon->rkbmon_q = rkq; + rkbmon->rkbmon_rkb = rkb; + rkbmon->rkbmon_q = rkq; rd_kafka_q_keep(rkbmon->rkbmon_q); - rkbmon->rkbmon_cb = callback; + rkbmon->rkbmon_cb = callback; rd_kafka_broker_keep(rkb); @@ -6660,7 +6579,7 @@ void rd_kafka_broker_monitor_add (rd_kafka_broker_monitor_t *rkbmon, * @locks none * @locality any */ -void rd_kafka_broker_monitor_del (rd_kafka_broker_monitor_t *rkbmon) { +void rd_kafka_broker_monitor_del(rd_kafka_broker_monitor_t *rkbmon) { rd_kafka_broker_t *rkb = rkbmon->rkbmon_rkb; if (!rkb) @@ -6682,7 +6601,7 @@ void rd_kafka_broker_monitor_del (rd_kafka_broker_monitor_t *rkbmon) { * @{ * */ -int unittest_broker (void) { +int unittest_broker(void) { int fails = 0; fails += rd_ut_reconnect_backoff(); diff --git a/src/rdkafka_broker.h b/src/rdkafka_broker.h index 936607705a..1ee7a04f48 100644 --- a/src/rdkafka_broker.h +++ b/src/rdkafka_broker.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012,2013 Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -39,7 +39,7 @@ extern const char *rd_kafka_secproto_names[]; /** * @enum Broker states */ -typedef enum { +typedef enum { RD_KAFKA_BROKER_STATE_INIT, RD_KAFKA_BROKER_STATE_DOWN, RD_KAFKA_BROKER_STATE_TRY_CONNECT, @@ -70,7 +70,7 @@ typedef struct rd_kafka_broker_monitor_s { /**< Callback triggered on the monitoree's op handler thread. * Do note that the callback might be triggered even after * it has been deleted due to the queueing nature of op queues. */ - void (*rkbmon_cb) (rd_kafka_broker_t *rkb); + void (*rkbmon_cb)(rd_kafka_broker_t *rkb); } rd_kafka_broker_monitor_t; @@ -78,35 +78,35 @@ typedef struct rd_kafka_broker_monitor_s { * @struct Broker instance */ struct rd_kafka_broker_s { /* rd_kafka_broker_t */ - TAILQ_ENTRY(rd_kafka_broker_s) rkb_link; + TAILQ_ENTRY(rd_kafka_broker_s) rkb_link; - int32_t rkb_nodeid; /**< Broker Node Id. - * @locks rkb_lock */ + int32_t rkb_nodeid; /**< Broker Node Id. + * @locks rkb_lock */ #define RD_KAFKA_NODEID_UA -1 - rd_sockaddr_list_t *rkb_rsal; - rd_ts_t rkb_ts_rsal_last; - const rd_sockaddr_inx_t *rkb_addr_last; /* Last used connect address */ + rd_sockaddr_list_t *rkb_rsal; + rd_ts_t rkb_ts_rsal_last; + const rd_sockaddr_inx_t *rkb_addr_last; /* Last used connect address */ - rd_kafka_transport_t *rkb_transport; + rd_kafka_transport_t *rkb_transport; - uint32_t rkb_corrid; - int rkb_connid; /* Connection id, increased by - * one for each connection by - * this broker. Used as a safe-guard - * to help troubleshooting buffer - * problems across disconnects. */ + uint32_t rkb_corrid; + int rkb_connid; /* Connection id, increased by + * one for each connection by + * this broker. Used as a safe-guard + * to help troubleshooting buffer + * problems across disconnects. */ - rd_kafka_q_t *rkb_ops; + rd_kafka_q_t *rkb_ops; - mtx_t rkb_lock; + mtx_t rkb_lock; - int rkb_blocking_max_ms; /* Maximum IO poll blocking - * time. */ + int rkb_blocking_max_ms; /* Maximum IO poll blocking + * time. */ /* Toppars handled by this broker */ - TAILQ_HEAD(, rd_kafka_toppar_s) rkb_toppars; - int rkb_toppar_cnt; + TAILQ_HEAD(, rd_kafka_toppar_s) rkb_toppars; + int rkb_toppar_cnt; /* Active toppars that are eligible for: * - (consumer) fetching due to underflow @@ -115,138 +115,139 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */ * The circleq provides round-robin scheduling for both cases. */ CIRCLEQ_HEAD(, rd_kafka_toppar_s) rkb_active_toppars; - int rkb_active_toppar_cnt; - rd_kafka_toppar_t *rkb_active_toppar_next; /* Next 'first' toppar - * in fetch list. - * This is used for - * round-robin. */ + int rkb_active_toppar_cnt; + rd_kafka_toppar_t *rkb_active_toppar_next; /* Next 'first' toppar + * in fetch list. + * This is used for + * round-robin. */ - rd_kafka_cgrp_t *rkb_cgrp; + rd_kafka_cgrp_t *rkb_cgrp; - rd_ts_t rkb_ts_fetch_backoff; - int rkb_fetching; + rd_ts_t rkb_ts_fetch_backoff; + int rkb_fetching; - rd_kafka_broker_state_t rkb_state; /**< Current broker state */ + rd_kafka_broker_state_t rkb_state; /**< Current broker state */ - rd_ts_t rkb_ts_state; /* Timestamp of last - * state change */ - rd_interval_t rkb_timeout_scan_intvl; /* Waitresp timeout scan - * interval. */ + rd_ts_t rkb_ts_state; /* Timestamp of last + * state change */ + rd_interval_t rkb_timeout_scan_intvl; /* Waitresp timeout scan + * interval. */ - rd_atomic32_t rkb_blocking_request_cnt; /* The number of - * in-flight blocking - * requests. - * A blocking request is - * one that is known to - * possibly block on the - * broker for longer than - * the typical processing - * time, e.g.: - * JoinGroup, SyncGroup */ + rd_atomic32_t rkb_blocking_request_cnt; /* The number of + * in-flight blocking + * requests. + * A blocking request is + * one that is known to + * possibly block on the + * broker for longer than + * the typical processing + * time, e.g.: + * JoinGroup, SyncGroup */ - int rkb_features; /* Protocol features supported - * by this broker. - * See RD_KAFKA_FEATURE_* in - * rdkafka_proto.h */ + int rkb_features; /* Protocol features supported + * by this broker. + * See RD_KAFKA_FEATURE_* in + * rdkafka_proto.h */ struct rd_kafka_ApiVersion *rkb_ApiVersions; /* Broker's supported APIs * (MUST be sorted) */ - size_t rkb_ApiVersions_cnt; - rd_interval_t rkb_ApiVersion_fail_intvl; /* Controls how long - * the fallback proto - * will be used after - * ApiVersionRequest - * failure. */ - - rd_kafka_confsource_t rkb_source; - struct { - rd_atomic64_t tx_bytes; - rd_atomic64_t tx; /**< Kafka requests */ - rd_atomic64_t tx_err; - rd_atomic64_t tx_retries; - rd_atomic64_t req_timeouts; /* Accumulated value */ - - rd_atomic64_t rx_bytes; - rd_atomic64_t rx; /**< Kafka responses */ - rd_atomic64_t rx_err; - rd_atomic64_t rx_corrid_err; /* CorrId misses */ - rd_atomic64_t rx_partial; /* Partial messages received + size_t rkb_ApiVersions_cnt; + rd_interval_t rkb_ApiVersion_fail_intvl; /* Controls how long + * the fallback proto + * will be used after + * ApiVersionRequest + * failure. */ + + rd_kafka_confsource_t rkb_source; + struct { + rd_atomic64_t tx_bytes; + rd_atomic64_t tx; /**< Kafka requests */ + rd_atomic64_t tx_err; + rd_atomic64_t tx_retries; + rd_atomic64_t req_timeouts; /* Accumulated value */ + + rd_atomic64_t rx_bytes; + rd_atomic64_t rx; /**< Kafka responses */ + rd_atomic64_t rx_err; + rd_atomic64_t rx_corrid_err; /* CorrId misses */ + rd_atomic64_t rx_partial; /* Partial messages received * and dropped. */ - rd_atomic64_t zbuf_grow; /* Compression/decompression buffer grows needed */ + rd_atomic64_t zbuf_grow; /* Compression/decompression buffer + grows needed */ rd_atomic64_t buf_grow; /* rkbuf grows needed */ rd_atomic64_t wakeups; /* Poll wakeups */ - rd_atomic32_t connects; /**< Connection attempts, - * successful or not. */ + rd_atomic32_t connects; /**< Connection attempts, + * successful or not. */ - rd_atomic32_t disconnects; /**< Disconnects. - * Always peer-triggered. */ + rd_atomic32_t disconnects; /**< Disconnects. + * Always peer-triggered. */ rd_atomic64_t reqtype[RD_KAFKAP__NUM]; /**< Per request-type * counter */ - rd_atomic64_t ts_send; /**< Timestamp of last send */ - rd_atomic64_t ts_recv; /**< Timestamp of last receive */ - } rkb_c; + rd_atomic64_t ts_send; /**< Timestamp of last send */ + rd_atomic64_t ts_recv; /**< Timestamp of last receive */ + } rkb_c; - int rkb_req_timeouts; /* Current value */ + int rkb_req_timeouts; /* Current value */ - thrd_t rkb_thread; + thrd_t rkb_thread; - rd_refcnt_t rkb_refcnt; + rd_refcnt_t rkb_refcnt; - rd_kafka_t *rkb_rk; + rd_kafka_t *rkb_rk; - rd_kafka_buf_t *rkb_recv_buf; + rd_kafka_buf_t *rkb_recv_buf; - int rkb_max_inflight; /* Maximum number of in-flight - * requests to broker. - * Compared to rkb_waitresps length.*/ - rd_kafka_bufq_t rkb_outbufs; - rd_kafka_bufq_t rkb_waitresps; - rd_kafka_bufq_t rkb_retrybufs; + int rkb_max_inflight; /* Maximum number of in-flight + * requests to broker. + * Compared to rkb_waitresps length.*/ + rd_kafka_bufq_t rkb_outbufs; + rd_kafka_bufq_t rkb_waitresps; + rd_kafka_bufq_t rkb_retrybufs; - rd_avg_t rkb_avg_int_latency;/* Current internal latency period*/ - rd_avg_t rkb_avg_outbuf_latency; /**< Current latency - * between buf_enq0 - * and writing to socket - */ - rd_avg_t rkb_avg_rtt; /* Current RTT period */ - rd_avg_t rkb_avg_throttle; /* Current throttle period */ + rd_avg_t rkb_avg_int_latency; /* Current internal latency period*/ + rd_avg_t rkb_avg_outbuf_latency; /**< Current latency + * between buf_enq0 + * and writing to socket + */ + rd_avg_t rkb_avg_rtt; /* Current RTT period */ + rd_avg_t rkb_avg_throttle; /* Current throttle period */ /* These are all protected by rkb_lock */ - char rkb_name[RD_KAFKA_NODENAME_SIZE]; /* Displ name */ - char rkb_nodename[RD_KAFKA_NODENAME_SIZE]; /* host:port*/ - uint16_t rkb_port; /* TCP port */ - char *rkb_origname; /* Original - * host name */ - int rkb_nodename_epoch; /**< Bumped each time - * the nodename is changed. - * Compared to - * rkb_connect_epoch - * to trigger a reconnect - * for logical broker - * when the nodename is - * updated. */ - int rkb_connect_epoch; /**< The value of - * rkb_nodename_epoch at the - * last connection attempt. - */ + char rkb_name[RD_KAFKA_NODENAME_SIZE]; /* Displ name */ + char rkb_nodename[RD_KAFKA_NODENAME_SIZE]; /* host:port*/ + uint16_t rkb_port; /* TCP port */ + char *rkb_origname; /* Original + * host name */ + int rkb_nodename_epoch; /**< Bumped each time + * the nodename is changed. + * Compared to + * rkb_connect_epoch + * to trigger a reconnect + * for logical broker + * when the nodename is + * updated. */ + int rkb_connect_epoch; /**< The value of + * rkb_nodename_epoch at the + * last connection attempt. + */ /* Logging name is a copy of rkb_name, protected by its own mutex */ - char *rkb_logname; - mtx_t rkb_logname_lock; + char *rkb_logname; + mtx_t rkb_logname_lock; - rd_socket_t rkb_wakeup_fd[2]; /* Wake-up fds (r/w) to wake - * up from IO-wait when - * queues have content. */ + rd_socket_t rkb_wakeup_fd[2]; /* Wake-up fds (r/w) to wake + * up from IO-wait when + * queues have content. */ /**< Current, exponentially increased, reconnect backoff. */ - int rkb_reconnect_backoff_ms; + int rkb_reconnect_backoff_ms; /**< Absolute timestamp of next allowed reconnect. */ - rd_ts_t rkb_ts_reconnect; + rd_ts_t rkb_ts_reconnect; /**< Persistent connection demand is tracked by * an counter for each type of demand. @@ -284,11 +285,11 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */ * Will trigger the coord_req fsm on broker state change. */ rd_kafka_broker_monitor_t rkb_coord_monitor; - rd_kafka_secproto_t rkb_proto; + rd_kafka_secproto_t rkb_proto; - int rkb_down_reported; /* Down event reported */ + int rkb_down_reported; /* Down event reported */ #if WITH_SASL_CYRUS - rd_kafka_timer_t rkb_sasl_kinit_refresh_tmr; + rd_kafka_timer_t rkb_sasl_kinit_refresh_tmr; #endif @@ -313,12 +314,12 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */ struct { char errstr[512]; /**< Last error string */ rd_kafka_resp_err_t err; /**< Last error code */ - int cnt; /**< Number of identical errors */ + int cnt; /**< Number of identical errors */ } rkb_last_err; }; -#define rd_kafka_broker_keep(rkb) rd_refcnt_add(&(rkb)->rkb_refcnt) -#define rd_kafka_broker_keep_fl(FUNC,LINE,RKB) \ +#define rd_kafka_broker_keep(rkb) rd_refcnt_add(&(rkb)->rkb_refcnt) +#define rd_kafka_broker_keep_fl(FUNC, LINE, RKB) \ rd_refcnt_add_fl(FUNC, LINE, &(RKB)->rkb_refcnt) #define rd_kafka_broker_lock(rkb) mtx_lock(&(rkb)->rkb_lock) #define rd_kafka_broker_unlock(rkb) mtx_unlock(&(rkb)->rkb_lock) @@ -330,8 +331,8 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */ * @locks broker_lock MUST NOT be held. * @locality any */ -static RD_INLINE RD_UNUSED -rd_kafka_broker_state_t rd_kafka_broker_get_state (rd_kafka_broker_t *rkb) { +static RD_INLINE RD_UNUSED rd_kafka_broker_state_t +rd_kafka_broker_get_state(rd_kafka_broker_t *rkb) { rd_kafka_broker_state_t state; rd_kafka_broker_lock(rkb); state = rkb->rkb_state; @@ -341,12 +342,11 @@ rd_kafka_broker_state_t rd_kafka_broker_get_state (rd_kafka_broker_t *rkb) { - /** * @returns true if the broker state is UP or UPDATE */ -#define rd_kafka_broker_state_is_up(state) \ - ((state) == RD_KAFKA_BROKER_STATE_UP || \ +#define rd_kafka_broker_state_is_up(state) \ + ((state) == RD_KAFKA_BROKER_STATE_UP || \ (state) == RD_KAFKA_BROKER_STATE_UPDATE) @@ -356,7 +356,7 @@ rd_kafka_broker_state_t rd_kafka_broker_get_state (rd_kafka_broker_t *rkb) { * @locality any */ static RD_UNUSED RD_INLINE rd_bool_t -rd_kafka_broker_is_up (rd_kafka_broker_t *rkb) { +rd_kafka_broker_is_up(rd_kafka_broker_t *rkb) { rd_kafka_broker_state_t state = rd_kafka_broker_get_state(rkb); return rd_kafka_broker_state_is_up(state); } @@ -365,8 +365,8 @@ rd_kafka_broker_is_up (rd_kafka_broker_t *rkb) { /** * @brief Broker comparator */ -static RD_UNUSED RD_INLINE int rd_kafka_broker_cmp (const void *_a, - const void *_b) { +static RD_UNUSED RD_INLINE int rd_kafka_broker_cmp(const void *_a, + const void *_b) { const rd_kafka_broker_t *a = _a, *b = _b; return RD_CMP(a, b); } @@ -375,174 +375,185 @@ static RD_UNUSED RD_INLINE int rd_kafka_broker_cmp (const void *_a, /** * @returns true if broker supports \p features, else false. */ -static RD_UNUSED -int rd_kafka_broker_supports (rd_kafka_broker_t *rkb, int features) { +static RD_UNUSED int rd_kafka_broker_supports(rd_kafka_broker_t *rkb, + int features) { const rd_bool_t do_lock = !thrd_is_current(rkb->rkb_thread); - int r; + int r; if (do_lock) rd_kafka_broker_lock(rkb); - r = (rkb->rkb_features & features) == features; + r = (rkb->rkb_features & features) == features; if (do_lock) rd_kafka_broker_unlock(rkb); - return r; + return r; } -int16_t rd_kafka_broker_ApiVersion_supported (rd_kafka_broker_t *rkb, - int16_t ApiKey, - int16_t minver, int16_t maxver, - int *featuresp); +int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb, + int16_t ApiKey, + int16_t minver, + int16_t maxver, + int *featuresp); -rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl (const char *func, - int line, - rd_kafka_t *rk, - int32_t nodeid, - int state, - rd_bool_t do_connect); +rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl(const char *func, + int line, + rd_kafka_t *rk, + int32_t nodeid, + int state, + rd_bool_t do_connect); -#define rd_kafka_broker_find_by_nodeid0(rk,nodeid,state,do_connect) \ - rd_kafka_broker_find_by_nodeid0_fl(__FUNCTION__,__LINE__, \ - rk,nodeid,state,do_connect) -#define rd_kafka_broker_find_by_nodeid(rk,nodeid) \ - rd_kafka_broker_find_by_nodeid0(rk,nodeid,-1,rd_false) +#define rd_kafka_broker_find_by_nodeid0(rk, nodeid, state, do_connect) \ + rd_kafka_broker_find_by_nodeid0_fl(__FUNCTION__, __LINE__, rk, nodeid, \ + state, do_connect) +#define rd_kafka_broker_find_by_nodeid(rk, nodeid) \ + rd_kafka_broker_find_by_nodeid0(rk, nodeid, -1, rd_false) /** * Filter out brokers that don't support Idempotent Producer. */ static RD_INLINE RD_UNUSED int -rd_kafka_broker_filter_non_idempotent (rd_kafka_broker_t *rkb, void *opaque) { +rd_kafka_broker_filter_non_idempotent(rd_kafka_broker_t *rkb, void *opaque) { return !(rkb->rkb_features & RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER); } -rd_kafka_broker_t *rd_kafka_broker_any (rd_kafka_t *rk, int state, - int (*filter) (rd_kafka_broker_t *rkb, - void *opaque), - void *opaque, const char *reason); -rd_kafka_broker_t * -rd_kafka_broker_any_up (rd_kafka_t *rk, - int *filtered_cnt, - int (*filter) (rd_kafka_broker_t *rkb, - void *opaque), - void *opaque, const char *reason); -rd_kafka_broker_t *rd_kafka_broker_any_usable (rd_kafka_t *rk, int timeout_ms, - rd_dolock_t do_lock, - int features, - const char *reason); - -rd_kafka_broker_t *rd_kafka_broker_prefer (rd_kafka_t *rk, int32_t broker_id, - int state); +rd_kafka_broker_t *rd_kafka_broker_any(rd_kafka_t *rk, + int state, + int (*filter)(rd_kafka_broker_t *rkb, + void *opaque), + void *opaque, + const char *reason); +rd_kafka_broker_t *rd_kafka_broker_any_up(rd_kafka_t *rk, + int *filtered_cnt, + int (*filter)(rd_kafka_broker_t *rkb, + void *opaque), + void *opaque, + const char *reason); +rd_kafka_broker_t *rd_kafka_broker_any_usable(rd_kafka_t *rk, + int timeout_ms, + rd_dolock_t do_lock, + int features, + const char *reason); rd_kafka_broker_t * -rd_kafka_broker_get_async (rd_kafka_t *rk, int32_t broker_id, int state, - rd_kafka_enq_once_t *eonce); +rd_kafka_broker_prefer(rd_kafka_t *rk, int32_t broker_id, int state); + +rd_kafka_broker_t *rd_kafka_broker_get_async(rd_kafka_t *rk, + int32_t broker_id, + int state, + rd_kafka_enq_once_t *eonce); -rd_kafka_broker_t *rd_kafka_broker_controller (rd_kafka_t *rk, int state, - rd_ts_t abs_timeout); rd_kafka_broker_t * -rd_kafka_broker_controller_async (rd_kafka_t *rk, int state, - rd_kafka_enq_once_t *eonce); +rd_kafka_broker_controller(rd_kafka_t *rk, int state, rd_ts_t abs_timeout); +rd_kafka_broker_t *rd_kafka_broker_controller_async(rd_kafka_t *rk, + int state, + rd_kafka_enq_once_t *eonce); -int rd_kafka_brokers_add0 (rd_kafka_t *rk, const char *brokerlist); -void rd_kafka_broker_set_state (rd_kafka_broker_t *rkb, int state); +int rd_kafka_brokers_add0(rd_kafka_t *rk, const char *brokerlist); +void rd_kafka_broker_set_state(rd_kafka_broker_t *rkb, int state); -void rd_kafka_broker_fail (rd_kafka_broker_t *rkb, - int level, rd_kafka_resp_err_t err, - const char *fmt, ...) RD_FORMAT(printf, 4, 5); +void rd_kafka_broker_fail(rd_kafka_broker_t *rkb, + int level, + rd_kafka_resp_err_t err, + const char *fmt, + ...) RD_FORMAT(printf, 4, 5); -void rd_kafka_broker_conn_closed (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - const char *errstr); +void rd_kafka_broker_conn_closed(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + const char *errstr); -void rd_kafka_broker_destroy_final (rd_kafka_broker_t *rkb); +void rd_kafka_broker_destroy_final(rd_kafka_broker_t *rkb); -#define rd_kafka_broker_destroy(rkb) \ - rd_refcnt_destroywrapper(&(rkb)->rkb_refcnt, \ +#define rd_kafka_broker_destroy(rkb) \ + rd_refcnt_destroywrapper(&(rkb)->rkb_refcnt, \ rd_kafka_broker_destroy_final(rkb)) -void -rd_kafka_broker_update (rd_kafka_t *rk, rd_kafka_secproto_t proto, - const struct rd_kafka_metadata_broker *mdb, - rd_kafka_broker_t **rkbp); -rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, - rd_kafka_confsource_t source, - rd_kafka_secproto_t proto, - const char *name, uint16_t port, - int32_t nodeid); +void rd_kafka_broker_update(rd_kafka_t *rk, + rd_kafka_secproto_t proto, + const struct rd_kafka_metadata_broker *mdb, + rd_kafka_broker_t **rkbp); +rd_kafka_broker_t *rd_kafka_broker_add(rd_kafka_t *rk, + rd_kafka_confsource_t source, + rd_kafka_secproto_t proto, + const char *name, + uint16_t port, + int32_t nodeid); -rd_kafka_broker_t *rd_kafka_broker_add_logical (rd_kafka_t *rk, - const char *name); +rd_kafka_broker_t *rd_kafka_broker_add_logical(rd_kafka_t *rk, + const char *name); /** @define returns true if broker is logical. No locking is needed. */ #define RD_KAFKA_BROKER_IS_LOGICAL(rkb) ((rkb)->rkb_source == RD_KAFKA_LOGICAL) -void rd_kafka_broker_set_nodename (rd_kafka_broker_t *rkb, - rd_kafka_broker_t *from_rkb); +void rd_kafka_broker_set_nodename(rd_kafka_broker_t *rkb, + rd_kafka_broker_t *from_rkb); -void rd_kafka_broker_connect_up (rd_kafka_broker_t *rkb); -void rd_kafka_broker_connect_done (rd_kafka_broker_t *rkb, const char *errstr); +void rd_kafka_broker_connect_up(rd_kafka_broker_t *rkb); +void rd_kafka_broker_connect_done(rd_kafka_broker_t *rkb, const char *errstr); -int rd_kafka_send (rd_kafka_broker_t *rkb); -int rd_kafka_recv (rd_kafka_broker_t *rkb); +int rd_kafka_send(rd_kafka_broker_t *rkb); +int rd_kafka_recv(rd_kafka_broker_t *rkb); -void rd_kafka_dr_msgq (rd_kafka_topic_t *rkt, - rd_kafka_msgq_t *rkmq, rd_kafka_resp_err_t err); +void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt, + rd_kafka_msgq_t *rkmq, + rd_kafka_resp_err_t err); -void rd_kafka_dr_implicit_ack (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - uint64_t last_msgid); +void rd_kafka_dr_implicit_ack(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + uint64_t last_msgid); -void rd_kafka_broker_buf_enq1 (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +void rd_kafka_broker_buf_enq1(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); -void rd_kafka_broker_buf_enq_replyq (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +void rd_kafka_broker_buf_enq_replyq(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); -void rd_kafka_broker_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf); +void rd_kafka_broker_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf); -rd_kafka_broker_t *rd_kafka_broker_internal (rd_kafka_t *rk); +rd_kafka_broker_t *rd_kafka_broker_internal(rd_kafka_t *rk); -void msghdr_print (rd_kafka_t *rk, - const char *what, const struct msghdr *msg, - int hexdump); +void msghdr_print(rd_kafka_t *rk, + const char *what, + const struct msghdr *msg, + int hexdump); -int32_t rd_kafka_broker_id (rd_kafka_broker_t *rkb); -const char *rd_kafka_broker_name (rd_kafka_broker_t *rkb); -void rd_kafka_broker_wakeup (rd_kafka_broker_t *rkb); -int rd_kafka_all_brokers_wakeup (rd_kafka_t *rk, - int min_state); +int32_t rd_kafka_broker_id(rd_kafka_broker_t *rkb); +const char *rd_kafka_broker_name(rd_kafka_broker_t *rkb); +void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb); +int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk, int min_state); -void rd_kafka_connect_any (rd_kafka_t *rk, const char *reason); +void rd_kafka_connect_any(rd_kafka_t *rk, const char *reason); -void rd_kafka_broker_purge_queues (rd_kafka_broker_t *rkb, int purge_flags, - rd_kafka_replyq_t replyq); +void rd_kafka_broker_purge_queues(rd_kafka_broker_t *rkb, + int purge_flags, + rd_kafka_replyq_t replyq); -int rd_kafka_brokers_get_state_version (rd_kafka_t *rk); -int rd_kafka_brokers_wait_state_change (rd_kafka_t *rk, int stored_version, - int timeout_ms); -int rd_kafka_brokers_wait_state_change_async (rd_kafka_t *rk, - int stored_version, - rd_kafka_enq_once_t *eonce); -void rd_kafka_brokers_broadcast_state_change (rd_kafka_t *rk); +int rd_kafka_brokers_get_state_version(rd_kafka_t *rk); +int rd_kafka_brokers_wait_state_change(rd_kafka_t *rk, + int stored_version, + int timeout_ms); +int rd_kafka_brokers_wait_state_change_async(rd_kafka_t *rk, + int stored_version, + rd_kafka_enq_once_t *eonce); +void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk); /** * Updates the current toppar active round-robin next pointer. */ -static RD_INLINE RD_UNUSED -void rd_kafka_broker_active_toppar_next (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *sugg_next) { +static RD_INLINE RD_UNUSED void +rd_kafka_broker_active_toppar_next(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *sugg_next) { if (CIRCLEQ_EMPTY(&rkb->rkb_active_toppars) || (void *)sugg_next == CIRCLEQ_ENDC(&rkb->rkb_active_toppars)) rkb->rkb_active_toppar_next = NULL; @@ -550,37 +561,35 @@ void rd_kafka_broker_active_toppar_next (rd_kafka_broker_t *rkb, rkb->rkb_active_toppar_next = sugg_next; else rkb->rkb_active_toppar_next = - CIRCLEQ_FIRST(&rkb->rkb_active_toppars); + CIRCLEQ_FIRST(&rkb->rkb_active_toppars); } -void rd_kafka_broker_active_toppar_add (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - const char *reason); +void rd_kafka_broker_active_toppar_add(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const char *reason); -void rd_kafka_broker_active_toppar_del (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - const char *reason); +void rd_kafka_broker_active_toppar_del(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const char *reason); -void rd_kafka_broker_schedule_connection (rd_kafka_broker_t *rkb); +void rd_kafka_broker_schedule_connection(rd_kafka_broker_t *rkb); -void -rd_kafka_broker_persistent_connection_add (rd_kafka_broker_t *rkb, - rd_atomic32_t *acntp); +void rd_kafka_broker_persistent_connection_add(rd_kafka_broker_t *rkb, + rd_atomic32_t *acntp); -void -rd_kafka_broker_persistent_connection_del (rd_kafka_broker_t *rkb, - rd_atomic32_t *acntp); +void rd_kafka_broker_persistent_connection_del(rd_kafka_broker_t *rkb, + rd_atomic32_t *acntp); -void rd_kafka_broker_monitor_add (rd_kafka_broker_monitor_t *rkbmon, - rd_kafka_broker_t *rkb, - rd_kafka_q_t *rkq, - void (*callback) (rd_kafka_broker_t *rkb)); +void rd_kafka_broker_monitor_add(rd_kafka_broker_monitor_t *rkbmon, + rd_kafka_broker_t *rkb, + rd_kafka_q_t *rkq, + void (*callback)(rd_kafka_broker_t *rkb)); -void rd_kafka_broker_monitor_del (rd_kafka_broker_monitor_t *rkbmon); +void rd_kafka_broker_monitor_del(rd_kafka_broker_monitor_t *rkbmon); -int unittest_broker (void); +int unittest_broker(void); #endif /* _RDKAFKA_BROKER_H_ */ diff --git a/src/rdkafka_buf.c b/src/rdkafka_buf.c index 27e67a2de9..3da0fa50cd 100644 --- a/src/rdkafka_buf.c +++ b/src/rdkafka_buf.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2015, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -31,10 +31,9 @@ #include "rdkafka_broker.h" #include "rdkafka_interceptor.h" -void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf) { +void rd_kafka_buf_destroy_final(rd_kafka_buf_t *rkbuf) { - switch (rkbuf->rkbuf_reqhdr.ApiKey) - { + switch (rkbuf->rkbuf_reqhdr.ApiKey) { case RD_KAFKAP_Metadata: if (rkbuf->rkbuf_u.Metadata.topics) rd_list_destroy(rkbuf->rkbuf_u.Metadata.topics); @@ -77,7 +76,7 @@ void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf) { rd_refcnt_destroy(&rkbuf->rkbuf_refcnt); - rd_free(rkbuf); + rd_free(rkbuf); } @@ -87,8 +86,11 @@ void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf) { * * \p buf will NOT be freed by the buffer. */ -void rd_kafka_buf_push0 (rd_kafka_buf_t *rkbuf, const void *buf, size_t len, - int allow_crc_calc, void (*free_cb) (void *)) { +void rd_kafka_buf_push0(rd_kafka_buf_t *rkbuf, + const void *buf, + size_t len, + int allow_crc_calc, + void (*free_cb)(void *)) { rd_buf_push(&rkbuf->rkbuf_buf, buf, len, free_cb); if (allow_crc_calc && (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC)) @@ -105,7 +107,7 @@ void rd_kafka_buf_push0 (rd_kafka_buf_t *rkbuf, const void *buf, size_t len, * If \p rk is non-NULL (typical case): * Additional space for the Kafka protocol headers is inserted automatically. */ -rd_kafka_buf_t *rd_kafka_buf_new0 (int segcnt, size_t size, int flags) { +rd_kafka_buf_t *rd_kafka_buf_new0(int segcnt, size_t size, int flags) { rd_kafka_buf_t *rkbuf; rkbuf = rd_calloc(1, sizeof(*rkbuf)); @@ -123,10 +125,11 @@ rd_kafka_buf_t *rd_kafka_buf_new0 (int segcnt, size_t size, int flags) { * @brief Create new request buffer with the request-header written (will * need to be updated with Length, etc, later) */ -rd_kafka_buf_t *rd_kafka_buf_new_request0 (rd_kafka_broker_t *rkb, - int16_t ApiKey, - int segcnt, size_t size, - rd_bool_t is_flexver) { +rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb, + int16_t ApiKey, + int segcnt, + size_t size, + rd_bool_t is_flexver) { rd_kafka_buf_t *rkbuf; /* Make room for common protocol request headers */ @@ -175,7 +178,6 @@ rd_kafka_buf_t *rd_kafka_buf_new_request0 (rd_kafka_broker_t *rkb, - /** * @brief Create new read-only rkbuf shadowing a memory region. * @@ -183,60 +185,61 @@ rd_kafka_buf_t *rd_kafka_buf_new_request0 (rd_kafka_broker_t *rkb, * buffer refcount reaches 0. * @remark the buffer may only be read from, not written to. */ -rd_kafka_buf_t *rd_kafka_buf_new_shadow (const void *ptr, size_t size, - void (*free_cb) (void *)) { - rd_kafka_buf_t *rkbuf; +rd_kafka_buf_t * +rd_kafka_buf_new_shadow(const void *ptr, size_t size, void (*free_cb)(void *)) { + rd_kafka_buf_t *rkbuf; - rkbuf = rd_calloc(1, sizeof(*rkbuf)); + rkbuf = rd_calloc(1, sizeof(*rkbuf)); rkbuf->rkbuf_reqhdr.ApiKey = RD_KAFKAP_None; rd_buf_init(&rkbuf->rkbuf_buf, 1, 0); rd_buf_push(&rkbuf->rkbuf_buf, ptr, size, free_cb); - rkbuf->rkbuf_totlen = size; + rkbuf->rkbuf_totlen = size; /* Initialize reader slice */ rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf); rd_refcnt_init(&rkbuf->rkbuf_refcnt, 1); - return rkbuf; + return rkbuf; } -void rd_kafka_bufq_enq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) { - TAILQ_INSERT_TAIL(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link); +void rd_kafka_bufq_enq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) { + TAILQ_INSERT_TAIL(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link); rd_atomic32_add(&rkbufq->rkbq_cnt, 1); if (rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_Produce) rd_atomic32_add(&rkbufq->rkbq_msg_cnt, rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq)); } -void rd_kafka_bufq_deq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) { - TAILQ_REMOVE(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link); - rd_kafka_assert(NULL, rd_atomic32_get(&rkbufq->rkbq_cnt) > 0); - rd_atomic32_sub(&rkbufq->rkbq_cnt, 1); +void rd_kafka_bufq_deq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) { + TAILQ_REMOVE(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link); + rd_kafka_assert(NULL, rd_atomic32_get(&rkbufq->rkbq_cnt) > 0); + rd_atomic32_sub(&rkbufq->rkbq_cnt, 1); if (rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_Produce) rd_atomic32_sub(&rkbufq->rkbq_msg_cnt, rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq)); } void rd_kafka_bufq_init(rd_kafka_bufq_t *rkbufq) { - TAILQ_INIT(&rkbufq->rkbq_bufs); - rd_atomic32_init(&rkbufq->rkbq_cnt, 0); - rd_atomic32_init(&rkbufq->rkbq_msg_cnt, 0); + TAILQ_INIT(&rkbufq->rkbq_bufs); + rd_atomic32_init(&rkbufq->rkbq_cnt, 0); + rd_atomic32_init(&rkbufq->rkbq_msg_cnt, 0); } /** * Concat all buffers from 'src' to tail of 'dst' */ -void rd_kafka_bufq_concat (rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src) { - TAILQ_CONCAT(&dst->rkbq_bufs, &src->rkbq_bufs, rkbuf_link); - (void)rd_atomic32_add(&dst->rkbq_cnt, rd_atomic32_get(&src->rkbq_cnt)); - (void)rd_atomic32_add(&dst->rkbq_msg_cnt, rd_atomic32_get(&src->rkbq_msg_cnt)); - rd_kafka_bufq_init(src); +void rd_kafka_bufq_concat(rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src) { + TAILQ_CONCAT(&dst->rkbq_bufs, &src->rkbq_bufs, rkbuf_link); + (void)rd_atomic32_add(&dst->rkbq_cnt, rd_atomic32_get(&src->rkbq_cnt)); + (void)rd_atomic32_add(&dst->rkbq_msg_cnt, + rd_atomic32_get(&src->rkbq_msg_cnt)); + rd_kafka_bufq_init(src); } /** @@ -245,17 +248,17 @@ void rd_kafka_bufq_concat (rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src) { * or rkb_outbufs since buffers may be re-enqueued on those queues. * 'rkbufq' needs to be bufq_init():ed before reuse after this call. */ -void rd_kafka_bufq_purge (rd_kafka_broker_t *rkb, - rd_kafka_bufq_t *rkbufq, - rd_kafka_resp_err_t err) { - rd_kafka_buf_t *rkbuf, *tmp; +void rd_kafka_bufq_purge(rd_kafka_broker_t *rkb, + rd_kafka_bufq_t *rkbufq, + rd_kafka_resp_err_t err) { + rd_kafka_buf_t *rkbuf, *tmp; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - rd_rkb_dbg(rkb, QUEUE, "BUFQ", "Purging bufq with %i buffers", - rd_atomic32_get(&rkbufq->rkbq_cnt)); + rd_rkb_dbg(rkb, QUEUE, "BUFQ", "Purging bufq with %i buffers", + rd_atomic32_get(&rkbufq->rkbq_cnt)); - TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) { + TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) { rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, NULL, rkbuf); } } @@ -271,27 +274,26 @@ void rd_kafka_bufq_purge (rd_kafka_broker_t *rkb, * ApiVersion * SaslHandshake */ -void rd_kafka_bufq_connection_reset (rd_kafka_broker_t *rkb, - rd_kafka_bufq_t *rkbufq) { - rd_kafka_buf_t *rkbuf, *tmp; +void rd_kafka_bufq_connection_reset(rd_kafka_broker_t *rkb, + rd_kafka_bufq_t *rkbufq) { + rd_kafka_buf_t *rkbuf, *tmp; rd_ts_t now = rd_clock(); - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - - rd_rkb_dbg(rkb, QUEUE, "BUFQ", - "Updating %d buffers on connection reset", - rd_atomic32_get(&rkbufq->rkbq_cnt)); - - TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) { - switch (rkbuf->rkbuf_reqhdr.ApiKey) - { - case RD_KAFKAP_ApiVersion: - case RD_KAFKAP_SaslHandshake: - rd_kafka_bufq_deq(rkbufq, rkbuf); - rd_kafka_buf_callback(rkb->rkb_rk, rkb, - RD_KAFKA_RESP_ERR__DESTROY, - NULL, rkbuf); - break; + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + + rd_rkb_dbg(rkb, QUEUE, "BUFQ", + "Updating %d buffers on connection reset", + rd_atomic32_get(&rkbufq->rkbq_cnt)); + + TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) { + switch (rkbuf->rkbuf_reqhdr.ApiKey) { + case RD_KAFKAP_ApiVersion: + case RD_KAFKAP_SaslHandshake: + rd_kafka_bufq_deq(rkbufq, rkbuf); + rd_kafka_buf_callback(rkb->rkb_rk, rkb, + RD_KAFKA_RESP_ERR__DESTROY, NULL, + rkbuf); + break; default: /* Reset buffer send position and corrid */ rd_slice_seek(&rkbuf->rkbuf_reader, 0); @@ -299,13 +301,14 @@ void rd_kafka_bufq_connection_reset (rd_kafka_broker_t *rkb, /* Reset timeout */ rd_kafka_buf_calc_timeout(rkb->rkb_rk, rkbuf, now); break; - } + } } } -void rd_kafka_bufq_dump (rd_kafka_broker_t *rkb, const char *fac, - rd_kafka_bufq_t *rkbq) { +void rd_kafka_bufq_dump(rd_kafka_broker_t *rkb, + const char *fac, + rd_kafka_bufq_t *rkbq) { rd_kafka_buf_t *rkbuf; int cnt = rd_kafka_bufq_cnt(rkbq); rd_ts_t now; @@ -319,28 +322,31 @@ void rd_kafka_bufq_dump (rd_kafka_broker_t *rkb, const char *fac, TAILQ_FOREACH(rkbuf, &rkbq->rkbq_bufs, rkbuf_link) { rd_rkb_dbg(rkb, BROKER, fac, - " Buffer %s (%"PRIusz" bytes, corrid %"PRId32", " + " Buffer %s (%" PRIusz " bytes, corrid %" PRId32 + ", " "connid %d, prio %d, retry %d in %lldms, " "timeout in %lldms)", rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), rkbuf->rkbuf_totlen, rkbuf->rkbuf_corrid, rkbuf->rkbuf_connid, rkbuf->rkbuf_prio, rkbuf->rkbuf_retries, - rkbuf->rkbuf_ts_retry ? - (rkbuf->rkbuf_ts_retry - now) / 1000LL : 0, - rkbuf->rkbuf_ts_timeout ? - (rkbuf->rkbuf_ts_timeout - now) / 1000LL : 0); + rkbuf->rkbuf_ts_retry + ? (rkbuf->rkbuf_ts_retry - now) / 1000LL + : 0, + rkbuf->rkbuf_ts_timeout + ? (rkbuf->rkbuf_ts_timeout - now) / 1000LL + : 0); } } - /** * @brief Calculate the effective timeout for a request attempt */ -void rd_kafka_buf_calc_timeout (const rd_kafka_t *rk, rd_kafka_buf_t *rkbuf, - rd_ts_t now) { +void rd_kafka_buf_calc_timeout(const rd_kafka_t *rk, + rd_kafka_buf_t *rkbuf, + rd_ts_t now) { if (likely(rkbuf->rkbuf_rel_timeout)) { /* Default: * Relative timeout, set request timeout to @@ -348,11 +354,11 @@ void rd_kafka_buf_calc_timeout (const rd_kafka_t *rk, rd_kafka_buf_t *rkbuf, rkbuf->rkbuf_ts_timeout = now + rkbuf->rkbuf_rel_timeout * 1000; } else if (!rkbuf->rkbuf_force_timeout) { /* Use absolute timeout, limited by socket.timeout.ms */ - rd_ts_t sock_timeout = now + - rk->rk_conf.socket_timeout_ms * 1000; + rd_ts_t sock_timeout = + now + rk->rk_conf.socket_timeout_ms * 1000; rkbuf->rkbuf_ts_timeout = - RD_MIN(sock_timeout, rkbuf->rkbuf_abs_timeout); + RD_MIN(sock_timeout, rkbuf->rkbuf_abs_timeout); } else { /* Use absolue timeout without limit. */ rkbuf->rkbuf_ts_timeout = rkbuf->rkbuf_abs_timeout; @@ -367,64 +373,62 @@ void rd_kafka_buf_calc_timeout (const rd_kafka_t *rk, rd_kafka_buf_t *rkbuf, * (rkb_outbufs) then the retry counter is not increased. * Returns 1 if the request was scheduled for retry, else 0. */ -int rd_kafka_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) { +int rd_kafka_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) { int incr_retry = rd_kafka_buf_was_sent(rkbuf) ? 1 : 0; /* Don't allow retries of dummy/empty buffers */ rd_assert(rd_buf_len(&rkbuf->rkbuf_buf) > 0); - if (unlikely(!rkb || - rkb->rkb_source == RD_KAFKA_INTERNAL || - rd_kafka_terminating(rkb->rkb_rk) || - rkbuf->rkbuf_retries + incr_retry > - rkbuf->rkbuf_max_retries)) + if (unlikely(!rkb || rkb->rkb_source == RD_KAFKA_INTERNAL || + rd_kafka_terminating(rkb->rkb_rk) || + rkbuf->rkbuf_retries + incr_retry > + rkbuf->rkbuf_max_retries)) return 0; /* Absolute timeout, check for expiry. */ - if (rkbuf->rkbuf_abs_timeout && - rkbuf->rkbuf_abs_timeout < rd_clock()) + if (rkbuf->rkbuf_abs_timeout && rkbuf->rkbuf_abs_timeout < rd_clock()) return 0; /* Expired */ - /* Try again */ - rkbuf->rkbuf_ts_sent = 0; + /* Try again */ + rkbuf->rkbuf_ts_sent = 0; rkbuf->rkbuf_ts_timeout = 0; /* Will be updated in calc_timeout() */ - rkbuf->rkbuf_retries += incr_retry; - rd_kafka_buf_keep(rkbuf); - rd_kafka_broker_buf_retry(rkb, rkbuf); - return 1; + rkbuf->rkbuf_retries += incr_retry; + rd_kafka_buf_keep(rkbuf); + rd_kafka_broker_buf_retry(rkb, rkbuf); + return 1; } /** * @brief Handle RD_KAFKA_OP_RECV_BUF. */ -void rd_kafka_buf_handle_op (rd_kafka_op_t *rko, rd_kafka_resp_err_t err) { +void rd_kafka_buf_handle_op(rd_kafka_op_t *rko, rd_kafka_resp_err_t err) { rd_kafka_buf_t *request, *response; rd_kafka_t *rk; - request = rko->rko_u.xbuf.rkbuf; + request = rko->rko_u.xbuf.rkbuf; rko->rko_u.xbuf.rkbuf = NULL; /* NULL on op_destroy() */ - if (request->rkbuf_replyq.q) { - int32_t version = request->rkbuf_replyq.version; + if (request->rkbuf_replyq.q) { + int32_t version = request->rkbuf_replyq.version; /* Current queue usage is done, but retain original replyq for * future retries, stealing * the current reference. */ request->rkbuf_orig_replyq = request->rkbuf_replyq; rd_kafka_replyq_clear(&request->rkbuf_replyq); - /* Callback might need to version check so we retain the - * version across the clear() call which clears it. */ - request->rkbuf_replyq.version = version; - } + /* Callback might need to version check so we retain the + * version across the clear() call which clears it. */ + request->rkbuf_replyq.version = version; + } - if (!request->rkbuf_cb) { - rd_kafka_buf_destroy(request); - return; - } + if (!request->rkbuf_cb) { + rd_kafka_buf_destroy(request); + return; + } /* Let buf_callback() do destroy()s */ - response = request->rkbuf_response; /* May be NULL */ + response = request->rkbuf_response; /* May be NULL */ request->rkbuf_response = NULL; if (!(rk = rko->rko_rk)) { @@ -450,27 +454,24 @@ void rd_kafka_buf_handle_op (rd_kafka_op_t *rko, rd_kafka_resp_err_t err) { * The decision to retry, and the call to buf_retry(), is delegated * to the buffer's response callback. */ -void rd_kafka_buf_callback (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err, - rd_kafka_buf_t *response, rd_kafka_buf_t *request){ +void rd_kafka_buf_callback(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *response, + rd_kafka_buf_t *request) { rd_kafka_interceptors_on_response_received( - rk, - -1, - rkb ? rd_kafka_broker_name(rkb) : "", - rkb ? rd_kafka_broker_id(rkb) : -1, - request->rkbuf_reqhdr.ApiKey, - request->rkbuf_reqhdr.ApiVersion, - request->rkbuf_reshdr.CorrId, - response ? response->rkbuf_totlen : 0, - response ? response->rkbuf_ts_sent : -1, - err); + rk, -1, rkb ? rd_kafka_broker_name(rkb) : "", + rkb ? rd_kafka_broker_id(rkb) : -1, request->rkbuf_reqhdr.ApiKey, + request->rkbuf_reqhdr.ApiVersion, request->rkbuf_reshdr.CorrId, + response ? response->rkbuf_totlen : 0, + response ? response->rkbuf_ts_sent : -1, err); if (err != RD_KAFKA_RESP_ERR__DESTROY && request->rkbuf_replyq.q) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF); - rd_kafka_assert(NULL, !request->rkbuf_response); - request->rkbuf_response = response; + rd_kafka_assert(NULL, !request->rkbuf_response); + request->rkbuf_response = response; /* Increment refcnt since rko_rkbuf will be decref:ed * if replyq_enq() fails and we dont want the rkbuf gone in that @@ -485,10 +486,10 @@ void rd_kafka_buf_callback (rd_kafka_t *rk, rd_kafka_replyq_copy(&request->rkbuf_orig_replyq, &request->rkbuf_replyq); - rd_kafka_replyq_enq(&request->rkbuf_replyq, rko, 0); + rd_kafka_replyq_enq(&request->rkbuf_replyq, rko, 0); - rd_kafka_buf_destroy(request); /* from keep above */ - return; + rd_kafka_buf_destroy(request); /* from keep above */ + return; } if (request->rkbuf_cb) @@ -496,8 +497,8 @@ void rd_kafka_buf_callback (rd_kafka_t *rk, request->rkbuf_opaque); rd_kafka_buf_destroy(request); - if (response) - rd_kafka_buf_destroy(response); + if (response) + rd_kafka_buf_destroy(response); } @@ -511,15 +512,15 @@ void rd_kafka_buf_callback (rd_kafka_t *rk, * * See rd_kafka_make_req_cb_t documentation for more info. */ -void rd_kafka_buf_set_maker (rd_kafka_buf_t *rkbuf, - rd_kafka_make_req_cb_t *make_cb, - void *make_opaque, - void (*free_make_opaque_cb) (void *make_opaque)) { +void rd_kafka_buf_set_maker(rd_kafka_buf_t *rkbuf, + rd_kafka_make_req_cb_t *make_cb, + void *make_opaque, + void (*free_make_opaque_cb)(void *make_opaque)) { rd_assert(!rkbuf->rkbuf_make_req_cb && !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE)); - rkbuf->rkbuf_make_req_cb = make_cb; - rkbuf->rkbuf_make_opaque = make_opaque; + rkbuf->rkbuf_make_req_cb = make_cb; + rkbuf->rkbuf_make_opaque = make_opaque; rkbuf->rkbuf_free_make_opaque_cb = free_make_opaque_cb; rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_NEED_MAKE; diff --git a/src/rdkafka_buf.h b/src/rdkafka_buf.h index 2798adf4fd..78762036b0 100644 --- a/src/rdkafka_buf.h +++ b/src/rdkafka_buf.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2015, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -36,7 +36,7 @@ typedef struct rd_kafka_broker_s rd_kafka_broker_t; -#define RD_KAFKA_HEADERS_IOV_CNT 2 +#define RD_KAFKA_HEADERS_IOV_CNT 2 /** @@ -44,39 +44,37 @@ typedef struct rd_kafka_broker_s rd_kafka_broker_t; * effective and platform safe struct writes. */ typedef struct rd_tmpabuf_s { - size_t size; - size_t of; - char *buf; - int failed; - int assert_on_fail; + size_t size; + size_t of; + char *buf; + int failed; + int assert_on_fail; } rd_tmpabuf_t; /** * @brief Allocate new tmpabuf with \p size bytes pre-allocated. */ static RD_UNUSED void -rd_tmpabuf_new (rd_tmpabuf_t *tab, size_t size, int assert_on_fail) { - tab->buf = rd_malloc(size); - tab->size = size; - tab->of = 0; - tab->failed = 0; - tab->assert_on_fail = assert_on_fail; +rd_tmpabuf_new(rd_tmpabuf_t *tab, size_t size, int assert_on_fail) { + tab->buf = rd_malloc(size); + tab->size = size; + tab->of = 0; + tab->failed = 0; + tab->assert_on_fail = assert_on_fail; } /** * @brief Free memory allocated by tmpabuf */ -static RD_UNUSED void -rd_tmpabuf_destroy (rd_tmpabuf_t *tab) { - rd_free(tab->buf); +static RD_UNUSED void rd_tmpabuf_destroy(rd_tmpabuf_t *tab) { + rd_free(tab->buf); } /** * @returns 1 if a previous operation failed. */ -static RD_UNUSED RD_INLINE int -rd_tmpabuf_failed (rd_tmpabuf_t *tab) { - return tab->failed; +static RD_UNUSED RD_INLINE int rd_tmpabuf_failed(rd_tmpabuf_t *tab) { + return tab->failed; } /** @@ -87,65 +85,67 @@ rd_tmpabuf_failed (rd_tmpabuf_t *tab) { * in the tmpabuf. */ static RD_UNUSED void * -rd_tmpabuf_alloc0 (const char *func, int line, rd_tmpabuf_t *tab, size_t size) { - void *ptr; - - if (unlikely(tab->failed)) - return NULL; - - if (unlikely(tab->of + size > tab->size)) { - if (tab->assert_on_fail) { - fprintf(stderr, - "%s: %s:%d: requested size %"PRIusz" + %"PRIusz" > %"PRIusz"\n", - __FUNCTION__, func, line, tab->of, size, - tab->size); - assert(!*"rd_tmpabuf_alloc: not enough size in buffer"); - } - return NULL; - } +rd_tmpabuf_alloc0(const char *func, int line, rd_tmpabuf_t *tab, size_t size) { + void *ptr; + + if (unlikely(tab->failed)) + return NULL; + + if (unlikely(tab->of + size > tab->size)) { + if (tab->assert_on_fail) { + fprintf(stderr, + "%s: %s:%d: requested size %" PRIusz + " + %" PRIusz " > %" PRIusz "\n", + __FUNCTION__, func, line, tab->of, size, + tab->size); + assert(!*"rd_tmpabuf_alloc: not enough size in buffer"); + } + return NULL; + } ptr = (void *)(tab->buf + tab->of); - tab->of += RD_ROUNDUP(size, 8); + tab->of += RD_ROUNDUP(size, 8); - return ptr; + return ptr; } -#define rd_tmpabuf_alloc(tab,size) \ - rd_tmpabuf_alloc0(__FUNCTION__,__LINE__,tab,size) +#define rd_tmpabuf_alloc(tab, size) \ + rd_tmpabuf_alloc0(__FUNCTION__, __LINE__, tab, size) /** * @brief Write \p buf of \p size bytes to tmpabuf memory in an aligned fashion. * * @returns the allocated and written-to pointer (within the tmpabuf) on success - * or NULL if the requested number of bytes + alignment is not available - * in the tmpabuf. + * or NULL if the requested number of bytes + alignment is not + * available in the tmpabuf. */ -static RD_UNUSED void * -rd_tmpabuf_write0 (const char *func, int line, - rd_tmpabuf_t *tab, const void *buf, size_t size) { - void *ptr = rd_tmpabuf_alloc0(func, line, tab, size); +static RD_UNUSED void *rd_tmpabuf_write0(const char *func, + int line, + rd_tmpabuf_t *tab, + const void *buf, + size_t size) { + void *ptr = rd_tmpabuf_alloc0(func, line, tab, size); if (likely(ptr && size)) memcpy(ptr, buf, size); - return ptr; + return ptr; } -#define rd_tmpabuf_write(tab,buf,size) \ - rd_tmpabuf_write0(__FUNCTION__, __LINE__, tab, buf, size) +#define rd_tmpabuf_write(tab, buf, size) \ + rd_tmpabuf_write0(__FUNCTION__, __LINE__, tab, buf, size) /** * @brief Wrapper for rd_tmpabuf_write() that takes a nul-terminated string. */ -static RD_UNUSED char * -rd_tmpabuf_write_str0 (const char *func, int line, - rd_tmpabuf_t *tab, const char *str) { - return rd_tmpabuf_write0(func, line, tab, str, strlen(str)+1); +static RD_UNUSED char *rd_tmpabuf_write_str0(const char *func, + int line, + rd_tmpabuf_t *tab, + const char *str) { + return rd_tmpabuf_write0(func, line, tab, str, strlen(str) + 1); } -#define rd_tmpabuf_write_str(tab,str) \ - rd_tmpabuf_write_str0(__FUNCTION__, __LINE__, tab, str) - - +#define rd_tmpabuf_write_str(tab, str) \ + rd_tmpabuf_write_str0(__FUNCTION__, __LINE__, tab, str) @@ -160,24 +160,23 @@ rd_tmpabuf_write_str0 (const char *func, int line, * * NOTE: rkb, reply and request may be NULL, depending on error situation. */ -typedef void (rd_kafka_resp_cb_t) (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque); +typedef void(rd_kafka_resp_cb_t)(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque); /** * @brief Sender callback. This callback is used to construct and send (enq) * a rkbuf on a particular broker. */ -typedef rd_kafka_resp_err_t (rd_kafka_send_req_cb_t) ( - rd_kafka_broker_t *rkb, - rd_kafka_op_t *rko, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *reply_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_send_req_cb_t)(rd_kafka_broker_t *rkb, + rd_kafka_op_t *rko, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *reply_opaque); /** @@ -217,10 +216,9 @@ typedef rd_kafka_resp_err_t (rd_kafka_send_req_cb_t) ( * See rd_kafka_ListOffsetsRequest() in rdkafka_request.c for an example. * */ -typedef rd_kafka_resp_err_t (rd_kafka_make_req_cb_t) ( - rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, - void *make_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_make_req_cb_t)(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + void *make_opaque); /** * @struct Request and response buffer @@ -231,78 +229,78 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */ int32_t rkbuf_corrid; - rd_ts_t rkbuf_ts_retry; /* Absolute send retry time */ + rd_ts_t rkbuf_ts_retry; /* Absolute send retry time */ - int rkbuf_flags; /* RD_KAFKA_OP_F */ + int rkbuf_flags; /* RD_KAFKA_OP_F */ /** What convenience flags to copy from request to response along * with the reqhdr. */ -#define RD_KAFKA_BUF_FLAGS_RESP_COPY_MASK (RD_KAFKA_OP_F_FLEXVER) +#define RD_KAFKA_BUF_FLAGS_RESP_COPY_MASK (RD_KAFKA_OP_F_FLEXVER) rd_kafka_prio_t rkbuf_prio; /**< Request priority */ - rd_buf_t rkbuf_buf; /**< Send/Recv byte buffer */ - rd_slice_t rkbuf_reader; /**< Buffer slice reader for rkbuf_buf */ - - int rkbuf_connid; /* broker connection id (used when buffer - * was partially sent). */ - size_t rkbuf_totlen; /* recv: total expected length, - * send: not used */ - - rd_crc32_t rkbuf_crc; /* Current CRC calculation */ - - struct rd_kafkap_reqhdr rkbuf_reqhdr; /* Request header. - * These fields are encoded - * and written to output buffer - * on buffer finalization. - * Note: - * The request's - * reqhdr is copied to the - * response's reqhdr as a - * convenience. */ - struct rd_kafkap_reshdr rkbuf_reshdr; /* Response header. - * Decoded fields are copied - * here from the buffer - * to provide an ease-of-use - * interface to the header */ - - int32_t rkbuf_expected_size; /* expected size of message */ - - rd_kafka_replyq_t rkbuf_replyq; /* Enqueue response on replyq */ - rd_kafka_replyq_t rkbuf_orig_replyq; /* Original replyq to be used - * for retries from inside - * the rkbuf_cb() callback - * since rkbuf_replyq will - * have been reset. */ - rd_kafka_resp_cb_t *rkbuf_cb; /* Response callback */ - struct rd_kafka_buf_s *rkbuf_response; /* Response buffer */ + rd_buf_t rkbuf_buf; /**< Send/Recv byte buffer */ + rd_slice_t rkbuf_reader; /**< Buffer slice reader for rkbuf_buf */ + + int rkbuf_connid; /* broker connection id (used when buffer + * was partially sent). */ + size_t rkbuf_totlen; /* recv: total expected length, + * send: not used */ + + rd_crc32_t rkbuf_crc; /* Current CRC calculation */ + + struct rd_kafkap_reqhdr rkbuf_reqhdr; /* Request header. + * These fields are encoded + * and written to output buffer + * on buffer finalization. + * Note: + * The request's + * reqhdr is copied to the + * response's reqhdr as a + * convenience. */ + struct rd_kafkap_reshdr rkbuf_reshdr; /* Response header. + * Decoded fields are copied + * here from the buffer + * to provide an ease-of-use + * interface to the header */ + + int32_t rkbuf_expected_size; /* expected size of message */ + + rd_kafka_replyq_t rkbuf_replyq; /* Enqueue response on replyq */ + rd_kafka_replyq_t rkbuf_orig_replyq; /* Original replyq to be used + * for retries from inside + * the rkbuf_cb() callback + * since rkbuf_replyq will + * have been reset. */ + rd_kafka_resp_cb_t *rkbuf_cb; /* Response callback */ + struct rd_kafka_buf_s *rkbuf_response; /* Response buffer */ rd_kafka_make_req_cb_t *rkbuf_make_req_cb; /**< Callback to construct * the request itself. * Will be used if * RD_KAFKA_OP_F_NEED_MAKE * is set. */ - void *rkbuf_make_opaque; /**< Opaque passed to rkbuf_make_req_cb. - * Will be freed automatically after use - * by the rkbuf code. */ - void (*rkbuf_free_make_opaque_cb) (void *); /**< Free function for - * rkbuf_make_opaque. */ + void *rkbuf_make_opaque; /**< Opaque passed to rkbuf_make_req_cb. + * Will be freed automatically after use + * by the rkbuf code. */ + void (*rkbuf_free_make_opaque_cb)(void *); /**< Free function for + * rkbuf_make_opaque. */ struct rd_kafka_broker_s *rkbuf_rkb; rd_refcnt_t rkbuf_refcnt; - void *rkbuf_opaque; + void *rkbuf_opaque; - int rkbuf_max_retries; /**< Maximum retries to attempt. */ - int rkbuf_retries; /**< Retries so far. */ + int rkbuf_max_retries; /**< Maximum retries to attempt. */ + int rkbuf_retries; /**< Retries so far. */ - int rkbuf_features; /* Required feature(s) that must be - * supported by broker. */ + int rkbuf_features; /* Required feature(s) that must be + * supported by broker. */ rd_ts_t rkbuf_ts_enq; - rd_ts_t rkbuf_ts_sent; /* Initially: Absolute time of transmission, - * after response: RTT. */ + rd_ts_t rkbuf_ts_sent; /* Initially: Absolute time of transmission, + * after response: RTT. */ /* Request timeouts: * rkbuf_ts_timeout is the effective absolute request timeout used @@ -337,40 +335,41 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */ * @warning rkb_retrybufs is NOT purged on broker down. */ rd_ts_t rkbuf_ts_timeout; /* Request timeout (absolute time). */ - rd_ts_t rkbuf_abs_timeout;/* Absolute timeout for request, including - * retries. - * Mutually exclusive with rkbuf_rel_timeout*/ - int rkbuf_rel_timeout;/* Relative timeout (ms), used for retries. - * Defaults to socket.timeout.ms. - * Mutually exclusive with rkbuf_abs_timeout*/ + rd_ts_t + rkbuf_abs_timeout; /* Absolute timeout for request, including + * retries. + * Mutually exclusive with rkbuf_rel_timeout*/ + int rkbuf_rel_timeout; /* Relative timeout (ms), used for retries. + * Defaults to socket.timeout.ms. + * Mutually exclusive with rkbuf_abs_timeout*/ rd_bool_t rkbuf_force_timeout; /**< Force request timeout to be * remaining abs_timeout regardless * of socket.timeout.ms. */ - int64_t rkbuf_offset; /* Used by OffsetCommit */ + int64_t rkbuf_offset; /* Used by OffsetCommit */ - rd_list_t *rkbuf_rktp_vers; /* Toppar + Op Version map. - * Used by FetchRequest. */ + rd_list_t *rkbuf_rktp_vers; /* Toppar + Op Version map. + * Used by FetchRequest. */ - rd_kafka_resp_err_t rkbuf_err; /* Buffer parsing error code */ + rd_kafka_resp_err_t rkbuf_err; /* Buffer parsing error code */ union { struct { - rd_list_t *topics; /* Requested topics (char *) */ - char *reason; /* Textual reason */ - rd_kafka_op_t *rko; /* Originating rko with replyq - * (if any) */ - rd_bool_t all_topics; /**< Full/All topics requested */ + rd_list_t *topics; /* Requested topics (char *) */ + char *reason; /* Textual reason */ + rd_kafka_op_t *rko; /* Originating rko with replyq + * (if any) */ + rd_bool_t all_topics; /**< Full/All topics requested */ rd_bool_t cgrp_update; /**< Update cgrp with topic * status from response. */ - int *decr; /* Decrement this integer by one - * when request is complete: - * typically points to metadata - * cache's full_.._sent. - * Will be performed with - * decr_lock held. */ + int *decr; /* Decrement this integer by one + * when request is complete: + * typically points to metadata + * cache's full_.._sent. + * Will be performed with + * decr_lock held. */ mtx_t *decr_lock; } Metadata; @@ -378,8 +377,8 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */ rd_kafka_msgbatch_t batch; /**< MessageSet/batch */ } Produce; struct { - rd_bool_t commit; /**< true = txn commit, - * false = txn abort */ + rd_bool_t commit; /**< true = txn commit, + * false = txn abort */ } EndTxn; } rkbuf_u; @@ -396,7 +395,6 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */ - /** * @name Read buffer interface * @@ -409,99 +407,105 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */ * to log parse errors (or 0 to turn off logging). */ -#define rd_kafka_buf_parse_fail(rkbuf,...) do { \ - if (log_decode_errors > 0) { \ - rd_kafka_assert(NULL, rkbuf->rkbuf_rkb); \ - rd_rkb_log(rkbuf->rkbuf_rkb, log_decode_errors, \ - "PROTOERR", \ - "Protocol parse failure for %s v%hd%s " \ - "at %"PRIusz"/%"PRIusz" (%s:%i) " \ - "(incorrect broker.version.fallback?)", \ - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr. \ - ApiKey), \ - rkbuf->rkbuf_reqhdr.ApiVersion, \ - (rkbuf->rkbuf_flags&RD_KAFKA_OP_F_FLEXVER? \ - "(flex)":""), \ - rd_slice_offset(&rkbuf->rkbuf_reader), \ - rd_slice_size(&rkbuf->rkbuf_reader), \ - __FUNCTION__, __LINE__); \ - rd_rkb_log(rkbuf->rkbuf_rkb, log_decode_errors, \ - "PROTOERR", __VA_ARGS__); \ - } \ - (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__BAD_MSG; \ - goto err_parse; \ - } while (0) +#define rd_kafka_buf_parse_fail(rkbuf, ...) \ + do { \ + if (log_decode_errors > 0) { \ + rd_kafka_assert(NULL, rkbuf->rkbuf_rkb); \ + rd_rkb_log( \ + rkbuf->rkbuf_rkb, log_decode_errors, "PROTOERR", \ + "Protocol parse failure for %s v%hd%s " \ + "at %" PRIusz "/%" PRIusz \ + " (%s:%i) " \ + "(incorrect broker.version.fallback?)", \ + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), \ + rkbuf->rkbuf_reqhdr.ApiVersion, \ + (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER \ + ? "(flex)" \ + : ""), \ + rd_slice_offset(&rkbuf->rkbuf_reader), \ + rd_slice_size(&rkbuf->rkbuf_reader), __FUNCTION__, \ + __LINE__); \ + rd_rkb_log(rkbuf->rkbuf_rkb, log_decode_errors, \ + "PROTOERR", __VA_ARGS__); \ + } \ + (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__BAD_MSG; \ + goto err_parse; \ + } while (0) /** * @name Fail buffer reading due to buffer underflow. */ -#define rd_kafka_buf_underflow_fail(rkbuf,wantedlen,...) do { \ - if (log_decode_errors > 0) { \ - rd_kafka_assert(NULL, rkbuf->rkbuf_rkb); \ - char __tmpstr[256]; \ - rd_snprintf(__tmpstr, sizeof(__tmpstr), \ - ": " __VA_ARGS__); \ - if (strlen(__tmpstr) == 2) __tmpstr[0] = '\0'; \ - rd_rkb_log(rkbuf->rkbuf_rkb, log_decode_errors, \ - "PROTOUFLOW", \ - "Protocol read buffer underflow " \ - "for %s v%hd " \ - "at %"PRIusz"/%"PRIusz" (%s:%i): " \ - "expected %"PRIusz" bytes > " \ - "%"PRIusz" remaining bytes (%s)%s", \ - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr. \ - ApiKey), \ - rkbuf->rkbuf_reqhdr.ApiVersion, \ - rd_slice_offset(&rkbuf->rkbuf_reader), \ - rd_slice_size(&rkbuf->rkbuf_reader), \ - __FUNCTION__, __LINE__, \ - wantedlen, \ - rd_slice_remains(&rkbuf->rkbuf_reader), \ - rkbuf->rkbuf_uflow_mitigation ? \ - rkbuf->rkbuf_uflow_mitigation : \ - "incorrect broker.version.fallback?", \ - __tmpstr); \ - } \ - (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__UNDERFLOW; \ - goto err_parse; \ +#define rd_kafka_buf_underflow_fail(rkbuf, wantedlen, ...) \ + do { \ + if (log_decode_errors > 0) { \ + rd_kafka_assert(NULL, rkbuf->rkbuf_rkb); \ + char __tmpstr[256]; \ + rd_snprintf(__tmpstr, sizeof(__tmpstr), \ + ": " __VA_ARGS__); \ + if (strlen(__tmpstr) == 2) \ + __tmpstr[0] = '\0'; \ + rd_rkb_log( \ + rkbuf->rkbuf_rkb, log_decode_errors, "PROTOUFLOW", \ + "Protocol read buffer underflow " \ + "for %s v%hd " \ + "at %" PRIusz "/%" PRIusz \ + " (%s:%i): " \ + "expected %" PRIusz \ + " bytes > " \ + "%" PRIusz " remaining bytes (%s)%s", \ + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), \ + rkbuf->rkbuf_reqhdr.ApiVersion, \ + rd_slice_offset(&rkbuf->rkbuf_reader), \ + rd_slice_size(&rkbuf->rkbuf_reader), __FUNCTION__, \ + __LINE__, wantedlen, \ + rd_slice_remains(&rkbuf->rkbuf_reader), \ + rkbuf->rkbuf_uflow_mitigation \ + ? rkbuf->rkbuf_uflow_mitigation \ + : "incorrect broker.version.fallback?", \ + __tmpstr); \ + } \ + (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__UNDERFLOW; \ + goto err_parse; \ } while (0) /** * Returns the number of remaining bytes available to read. */ -#define rd_kafka_buf_read_remain(rkbuf) \ - rd_slice_remains(&(rkbuf)->rkbuf_reader) +#define rd_kafka_buf_read_remain(rkbuf) rd_slice_remains(&(rkbuf)->rkbuf_reader) /** * Checks that at least 'len' bytes remain to be read in buffer, else fails. */ -#define rd_kafka_buf_check_len(rkbuf,len) do { \ - size_t __len0 = (size_t)(len); \ - if (unlikely(__len0 > rd_kafka_buf_read_remain(rkbuf))) { \ - rd_kafka_buf_underflow_fail(rkbuf, __len0); \ - } \ +#define rd_kafka_buf_check_len(rkbuf, len) \ + do { \ + size_t __len0 = (size_t)(len); \ + if (unlikely(__len0 > rd_kafka_buf_read_remain(rkbuf))) { \ + rd_kafka_buf_underflow_fail(rkbuf, __len0); \ + } \ } while (0) /** * Skip (as in read and ignore) the next 'len' bytes. */ -#define rd_kafka_buf_skip(rkbuf, len) do { \ - size_t __len1 = (size_t)(len); \ - if (__len1 && \ - !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \ - rd_kafka_buf_check_len(rkbuf, __len1); \ +#define rd_kafka_buf_skip(rkbuf, len) \ + do { \ + size_t __len1 = (size_t)(len); \ + if (__len1 && \ + !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \ + rd_kafka_buf_check_len(rkbuf, __len1); \ } while (0) /** * Skip (as in read and ignore) up to fixed position \p pos. */ -#define rd_kafka_buf_skip_to(rkbuf, pos) do { \ - size_t __len1 = (size_t)(pos) - \ - rd_slice_offset(&(rkbuf)->rkbuf_reader); \ - if (__len1 && \ - !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \ - rd_kafka_buf_check_len(rkbuf, __len1); \ +#define rd_kafka_buf_skip_to(rkbuf, pos) \ + do { \ + size_t __len1 = \ + (size_t)(pos)-rd_slice_offset(&(rkbuf)->rkbuf_reader); \ + if (__len1 && \ + !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \ + rd_kafka_buf_check_len(rkbuf, __len1); \ } while (0) @@ -509,10 +513,11 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */ /** * Read 'len' bytes and copy to 'dstptr' */ -#define rd_kafka_buf_read(rkbuf,dstptr,len) do { \ - size_t __len2 = (size_t)(len); \ - if (!rd_slice_read(&(rkbuf)->rkbuf_reader, dstptr, __len2)) \ - rd_kafka_buf_check_len(rkbuf, __len2); \ +#define rd_kafka_buf_read(rkbuf, dstptr, len) \ + do { \ + size_t __len2 = (size_t)(len); \ + if (!rd_slice_read(&(rkbuf)->rkbuf_reader, dstptr, __len2)) \ + rd_kafka_buf_check_len(rkbuf, __len2); \ } while (0) @@ -520,99 +525,111 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */ * @brief Read \p len bytes at slice offset \p offset and copy to \p dstptr * without affecting the current reader position. */ -#define rd_kafka_buf_peek(rkbuf,offset,dstptr,len) do { \ - size_t __len2 = (size_t)(len); \ - if (!rd_slice_peek(&(rkbuf)->rkbuf_reader, offset, \ - dstptr, __len2)) \ - rd_kafka_buf_check_len(rkbuf, (offset)+(__len2)); \ +#define rd_kafka_buf_peek(rkbuf, offset, dstptr, len) \ + do { \ + size_t __len2 = (size_t)(len); \ + if (!rd_slice_peek(&(rkbuf)->rkbuf_reader, offset, dstptr, \ + __len2)) \ + rd_kafka_buf_check_len(rkbuf, (offset) + (__len2)); \ } while (0) /** * Read a 16,32,64-bit integer and store it in 'dstptr' */ -#define rd_kafka_buf_read_i64(rkbuf,dstptr) do { \ - int64_t _v; \ - rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \ - *(dstptr) = be64toh(_v); \ +#define rd_kafka_buf_read_i64(rkbuf, dstptr) \ + do { \ + int64_t _v; \ + rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \ + *(dstptr) = be64toh(_v); \ } while (0) -#define rd_kafka_buf_peek_i64(rkbuf,of,dstptr) do { \ - int64_t _v; \ - rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \ - *(dstptr) = be64toh(_v); \ +#define rd_kafka_buf_peek_i64(rkbuf, of, dstptr) \ + do { \ + int64_t _v; \ + rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \ + *(dstptr) = be64toh(_v); \ } while (0) -#define rd_kafka_buf_read_i32(rkbuf,dstptr) do { \ - int32_t _v; \ - rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \ - *(dstptr) = be32toh(_v); \ +#define rd_kafka_buf_read_i32(rkbuf, dstptr) \ + do { \ + int32_t _v; \ + rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \ + *(dstptr) = be32toh(_v); \ } while (0) -#define rd_kafka_buf_peek_i32(rkbuf,of,dstptr) do { \ - int32_t _v; \ - rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \ - *(dstptr) = be32toh(_v); \ +#define rd_kafka_buf_peek_i32(rkbuf, of, dstptr) \ + do { \ + int32_t _v; \ + rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \ + *(dstptr) = be32toh(_v); \ } while (0) /* Same as .._read_i32 but does a direct assignment. * dst is assumed to be a scalar, not pointer. */ -#define rd_kafka_buf_read_i32a(rkbuf, dst) do { \ - int32_t _v; \ - rd_kafka_buf_read(rkbuf, &_v, 4); \ - dst = (int32_t) be32toh(_v); \ - } while (0) - -#define rd_kafka_buf_read_i16(rkbuf,dstptr) do { \ - int16_t _v; \ - rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \ - *(dstptr) = (int16_t)be16toh(_v); \ +#define rd_kafka_buf_read_i32a(rkbuf, dst) \ + do { \ + int32_t _v; \ + rd_kafka_buf_read(rkbuf, &_v, 4); \ + dst = (int32_t)be32toh(_v); \ + } while (0) + +#define rd_kafka_buf_read_i16(rkbuf, dstptr) \ + do { \ + int16_t _v; \ + rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \ + *(dstptr) = (int16_t)be16toh(_v); \ } while (0) -#define rd_kafka_buf_read_i16a(rkbuf, dst) do { \ - int16_t _v; \ - rd_kafka_buf_read(rkbuf, &_v, 2); \ - dst = (int16_t)be16toh(_v); \ - } while (0) +#define rd_kafka_buf_read_i16a(rkbuf, dst) \ + do { \ + int16_t _v; \ + rd_kafka_buf_read(rkbuf, &_v, 2); \ + dst = (int16_t)be16toh(_v); \ + } while (0) #define rd_kafka_buf_read_i8(rkbuf, dst) rd_kafka_buf_read(rkbuf, dst, 1) -#define rd_kafka_buf_peek_i8(rkbuf,of,dst) rd_kafka_buf_peek(rkbuf,of,dst,1) +#define rd_kafka_buf_peek_i8(rkbuf, of, dst) \ + rd_kafka_buf_peek(rkbuf, of, dst, 1) -#define rd_kafka_buf_read_bool(rkbuf, dstptr) do { \ - int8_t _v; \ - rd_bool_t *_dst = dstptr; \ - rd_kafka_buf_read(rkbuf, &_v, 1); \ - *_dst = (rd_bool_t)_v; \ +#define rd_kafka_buf_read_bool(rkbuf, dstptr) \ + do { \ + int8_t _v; \ + rd_bool_t *_dst = dstptr; \ + rd_kafka_buf_read(rkbuf, &_v, 1); \ + *_dst = (rd_bool_t)_v; \ } while (0) /** * @brief Read varint and store in int64_t \p dst */ -#define rd_kafka_buf_read_varint(rkbuf,dst) do { \ - int64_t _v; \ - size_t _r = rd_slice_read_varint(&(rkbuf)->rkbuf_reader, &_v);\ - if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \ - rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \ - "varint parsing failed");\ - *(dst) = _v; \ +#define rd_kafka_buf_read_varint(rkbuf, dst) \ + do { \ + int64_t _v; \ + size_t _r = rd_slice_read_varint(&(rkbuf)->rkbuf_reader, &_v); \ + if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \ + rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \ + "varint parsing failed"); \ + *(dst) = _v; \ } while (0) /** * @brief Read unsigned varint and store in uint64_t \p dst */ -#define rd_kafka_buf_read_uvarint(rkbuf,dst) do { \ - uint64_t _v; \ - size_t _r = rd_slice_read_uvarint(&(rkbuf)->rkbuf_reader, \ - &_v); \ - if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \ - rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \ +#define rd_kafka_buf_read_uvarint(rkbuf, dst) \ + do { \ + uint64_t _v; \ + size_t _r = \ + rd_slice_read_uvarint(&(rkbuf)->rkbuf_reader, &_v); \ + if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \ + rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \ "uvarint parsing failed"); \ - *(dst) = _v; \ + *(dst) = _v; \ } while (0) @@ -621,71 +638,71 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */ * standard String representation (2+N). * * The kstr data will be updated to point to the rkbuf. */ -#define rd_kafka_buf_read_str(rkbuf, kstr) do { \ - int _klen; \ - if ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { \ - uint64_t _uva; \ - rd_kafka_buf_read_uvarint(rkbuf, &_uva); \ - (kstr)->len = ((int32_t)_uva) - 1; \ - _klen = (kstr)->len; \ - } else { \ - rd_kafka_buf_read_i16a(rkbuf, (kstr)->len); \ - _klen = RD_KAFKAP_STR_LEN(kstr); \ - } \ - if (RD_KAFKAP_STR_IS_NULL(kstr)) \ - (kstr)->str = NULL; \ - else if (RD_KAFKAP_STR_LEN(kstr) == 0) \ - (kstr)->str = ""; \ - else if (!((kstr)->str = \ - rd_slice_ensure_contig(&rkbuf->rkbuf_reader, \ - _klen))) \ - rd_kafka_buf_check_len(rkbuf, _klen); \ +#define rd_kafka_buf_read_str(rkbuf, kstr) \ + do { \ + int _klen; \ + if ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { \ + uint64_t _uva; \ + rd_kafka_buf_read_uvarint(rkbuf, &_uva); \ + (kstr)->len = ((int32_t)_uva) - 1; \ + _klen = (kstr)->len; \ + } else { \ + rd_kafka_buf_read_i16a(rkbuf, (kstr)->len); \ + _klen = RD_KAFKAP_STR_LEN(kstr); \ + } \ + if (RD_KAFKAP_STR_IS_NULL(kstr)) \ + (kstr)->str = NULL; \ + else if (RD_KAFKAP_STR_LEN(kstr) == 0) \ + (kstr)->str = ""; \ + else if (!((kstr)->str = rd_slice_ensure_contig( \ + &rkbuf->rkbuf_reader, _klen))) \ + rd_kafka_buf_check_len(rkbuf, _klen); \ } while (0) /* Read Kafka String representation (2+N) and write it to the \p tmpabuf * with a trailing nul byte. */ -#define rd_kafka_buf_read_str_tmpabuf(rkbuf, tmpabuf, dst) do { \ - rd_kafkap_str_t _kstr; \ - size_t _slen; \ - char *_dst; \ - rd_kafka_buf_read_str(rkbuf, &_kstr); \ - _slen = RD_KAFKAP_STR_LEN(&_kstr); \ - if (!(_dst = \ - rd_tmpabuf_write(tmpabuf, _kstr.str, _slen+1))) \ - rd_kafka_buf_parse_fail( \ - rkbuf, \ - "Not enough room in tmpabuf: " \ - "%"PRIusz"+%"PRIusz \ - " > %"PRIusz, \ - (tmpabuf)->of, _slen+1, (tmpabuf)->size); \ - _dst[_slen] = '\0'; \ - dst = (void *)_dst; \ - } while (0) +#define rd_kafka_buf_read_str_tmpabuf(rkbuf, tmpabuf, dst) \ + do { \ + rd_kafkap_str_t _kstr; \ + size_t _slen; \ + char *_dst; \ + rd_kafka_buf_read_str(rkbuf, &_kstr); \ + _slen = RD_KAFKAP_STR_LEN(&_kstr); \ + if (!(_dst = rd_tmpabuf_write(tmpabuf, _kstr.str, _slen + 1))) \ + rd_kafka_buf_parse_fail( \ + rkbuf, \ + "Not enough room in tmpabuf: " \ + "%" PRIusz "+%" PRIusz " > %" PRIusz, \ + (tmpabuf)->of, _slen + 1, (tmpabuf)->size); \ + _dst[_slen] = '\0'; \ + dst = (void *)_dst; \ + } while (0) /** * Skip a string. */ -#define rd_kafka_buf_skip_str(rkbuf) do { \ - int16_t _slen; \ - rd_kafka_buf_read_i16(rkbuf, &_slen); \ - rd_kafka_buf_skip(rkbuf, RD_KAFKAP_STR_LEN0(_slen)); \ - } while (0) +#define rd_kafka_buf_skip_str(rkbuf) \ + do { \ + int16_t _slen; \ + rd_kafka_buf_read_i16(rkbuf, &_slen); \ + rd_kafka_buf_skip(rkbuf, RD_KAFKAP_STR_LEN0(_slen)); \ + } while (0) /* Read Kafka Bytes representation (4+N). * The 'kbytes' will be updated to point to rkbuf data */ -#define rd_kafka_buf_read_bytes(rkbuf, kbytes) do { \ - int _klen; \ - rd_kafka_buf_read_i32a(rkbuf, _klen); \ - (kbytes)->len = _klen; \ - if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \ - (kbytes)->data = NULL; \ - (kbytes)->len = 0; \ - } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0) \ - (kbytes)->data = ""; \ - else if (!((kbytes)->data = \ - rd_slice_ensure_contig(&(rkbuf)->rkbuf_reader, \ - _klen))) \ - rd_kafka_buf_check_len(rkbuf, _klen); \ +#define rd_kafka_buf_read_bytes(rkbuf, kbytes) \ + do { \ + int _klen; \ + rd_kafka_buf_read_i32a(rkbuf, _klen); \ + (kbytes)->len = _klen; \ + if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \ + (kbytes)->data = NULL; \ + (kbytes)->len = 0; \ + } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0) \ + (kbytes)->data = ""; \ + else if (!((kbytes)->data = rd_slice_ensure_contig( \ + &(rkbuf)->rkbuf_reader, _klen))) \ + rd_kafka_buf_check_len(rkbuf, _klen); \ } while (0) @@ -693,34 +710,35 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */ * @brief Read \p size bytes from buffer, setting \p *ptr to the start * of the memory region. */ -#define rd_kafka_buf_read_ptr(rkbuf,ptr,size) do { \ - size_t _klen = size; \ - if (!(*(ptr) = (void *) \ - rd_slice_ensure_contig(&(rkbuf)->rkbuf_reader, _klen))) \ - rd_kafka_buf_check_len(rkbuf, _klen); \ +#define rd_kafka_buf_read_ptr(rkbuf, ptr, size) \ + do { \ + size_t _klen = size; \ + if (!(*(ptr) = (void *)rd_slice_ensure_contig( \ + &(rkbuf)->rkbuf_reader, _klen))) \ + rd_kafka_buf_check_len(rkbuf, _klen); \ } while (0) /** * @brief Read varint-lengted Kafka Bytes representation */ -#define rd_kafka_buf_read_bytes_varint(rkbuf,kbytes) do { \ - int64_t _len2; \ - size_t _r = rd_slice_read_varint(&(rkbuf)->rkbuf_reader, \ - &_len2); \ - if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \ - rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \ - "varint parsing failed"); \ - (kbytes)->len = (int32_t)_len2; \ - if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \ - (kbytes)->data = NULL; \ - (kbytes)->len = 0; \ - } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0) \ - (kbytes)->data = ""; \ - else if (!((kbytes)->data = \ - rd_slice_ensure_contig(&(rkbuf)->rkbuf_reader, \ - (size_t)_len2))) \ - rd_kafka_buf_check_len(rkbuf, _len2); \ +#define rd_kafka_buf_read_bytes_varint(rkbuf, kbytes) \ + do { \ + int64_t _len2; \ + size_t _r = \ + rd_slice_read_varint(&(rkbuf)->rkbuf_reader, &_len2); \ + if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \ + rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \ + "varint parsing failed"); \ + (kbytes)->len = (int32_t)_len2; \ + if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \ + (kbytes)->data = NULL; \ + (kbytes)->len = 0; \ + } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0) \ + (kbytes)->data = ""; \ + else if (!((kbytes)->data = rd_slice_ensure_contig( \ + &(rkbuf)->rkbuf_reader, (size_t)_len2))) \ + rd_kafka_buf_check_len(rkbuf, _len2); \ } while (0) @@ -728,30 +746,33 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */ * @brief Read throttle_time_ms (i32) from response and pass the value * to the throttle handling code. */ -#define rd_kafka_buf_read_throttle_time(rkbuf) do { \ - int32_t _throttle_time_ms; \ - rd_kafka_buf_read_i32(rkbuf, &_throttle_time_ms); \ - rd_kafka_op_throttle_time((rkbuf)->rkbuf_rkb, \ - (rkbuf)->rkbuf_rkb->rkb_rk->rk_rep, \ - _throttle_time_ms); \ +#define rd_kafka_buf_read_throttle_time(rkbuf) \ + do { \ + int32_t _throttle_time_ms; \ + rd_kafka_buf_read_i32(rkbuf, &_throttle_time_ms); \ + rd_kafka_op_throttle_time((rkbuf)->rkbuf_rkb, \ + (rkbuf)->rkbuf_rkb->rkb_rk->rk_rep, \ + _throttle_time_ms); \ } while (0) /** * @brief Discard all KIP-482 Tags at the current position in the buffer. */ -#define rd_kafka_buf_skip_tags(rkbuf) do { \ - uint64_t _tagcnt; \ - if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \ - break; \ - rd_kafka_buf_read_uvarint(rkbuf, &_tagcnt); \ - while (_tagcnt-- > 0) { \ - uint64_t _tagtype, _taglen; \ - rd_kafka_buf_read_uvarint(rkbuf, &_tagtype); \ - rd_kafka_buf_read_uvarint(rkbuf, &_taglen); \ - if (_taglen > 1) \ - rd_kafka_buf_skip(rkbuf, (size_t)(_taglen - 1)); \ - } \ +#define rd_kafka_buf_skip_tags(rkbuf) \ + do { \ + uint64_t _tagcnt; \ + if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \ + break; \ + rd_kafka_buf_read_uvarint(rkbuf, &_tagcnt); \ + while (_tagcnt-- > 0) { \ + uint64_t _tagtype, _taglen; \ + rd_kafka_buf_read_uvarint(rkbuf, &_tagtype); \ + rd_kafka_buf_read_uvarint(rkbuf, &_taglen); \ + if (_taglen > 1) \ + rd_kafka_buf_skip(rkbuf, \ + (size_t)(_taglen - 1)); \ + } \ } while (0) /** @@ -759,43 +780,43 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */ * @remark Currently always writes empty tags. * @remark Change to ..write_uvarint() when actual tags are supported. */ -#define rd_kafka_buf_write_tags(rkbuf) do { \ - if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \ - break; \ - rd_kafka_buf_write_i8(rkbuf, 0); \ +#define rd_kafka_buf_write_tags(rkbuf) \ + do { \ + if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \ + break; \ + rd_kafka_buf_write_i8(rkbuf, 0); \ } while (0) /** * @brief Reads an ARRAY or COMPACT_ARRAY count depending on buffer type. */ -#define rd_kafka_buf_read_arraycnt(rkbuf,arrcnt,maxval) do { \ - if ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { \ - uint64_t _uva; \ - rd_kafka_buf_read_uvarint(rkbuf, &_uva); \ - *(arrcnt) = (int32_t)_uva - 1; \ - } else { \ - rd_kafka_buf_read_i32(rkbuf, arrcnt); \ - } \ - if (*(arrcnt) < 0 || ((maxval) != -1 && *(arrcnt) > (maxval))) \ - rd_kafka_buf_parse_fail(rkbuf, \ - "ApiArrayCnt %"PRId32" out of range", \ - *(arrcnt)); \ +#define rd_kafka_buf_read_arraycnt(rkbuf, arrcnt, maxval) \ + do { \ + if ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { \ + uint64_t _uva; \ + rd_kafka_buf_read_uvarint(rkbuf, &_uva); \ + *(arrcnt) = (int32_t)_uva - 1; \ + } else { \ + rd_kafka_buf_read_i32(rkbuf, arrcnt); \ + } \ + if (*(arrcnt) < 0 || ((maxval) != -1 && *(arrcnt) > (maxval))) \ + rd_kafka_buf_parse_fail( \ + rkbuf, "ApiArrayCnt %" PRId32 " out of range", \ + *(arrcnt)); \ } while (0) - /** * @returns true if buffer has been sent on wire, else 0. */ -#define rd_kafka_buf_was_sent(rkbuf) \ - ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_SENT) +#define rd_kafka_buf_was_sent(rkbuf) ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_SENT) typedef struct rd_kafka_bufq_s { - TAILQ_HEAD(, rd_kafka_buf_s) rkbq_bufs; - rd_atomic32_t rkbq_cnt; - rd_atomic32_t rkbq_msg_cnt; + TAILQ_HEAD(, rd_kafka_buf_s) rkbq_bufs; + rd_atomic32_t rkbq_cnt; + rd_atomic32_t rkbq_msg_cnt; } rd_kafka_bufq_t; #define rd_kafka_bufq_cnt(rkbq) rd_atomic32_get(&(rkbq)->rkbq_cnt) @@ -809,7 +830,7 @@ typedef struct rd_kafka_bufq_s { * The relative timeout value is reused upon request retry. */ static RD_INLINE void -rd_kafka_buf_set_timeout (rd_kafka_buf_t *rkbuf, int timeout_ms, rd_ts_t now) { +rd_kafka_buf_set_timeout(rd_kafka_buf_t *rkbuf, int timeout_ms, rd_ts_t now) { if (!now) now = rd_clock(); rkbuf->rkbuf_rel_timeout = timeout_ms; @@ -820,8 +841,9 @@ rd_kafka_buf_set_timeout (rd_kafka_buf_t *rkbuf, int timeout_ms, rd_ts_t now) { /** * @brief Calculate the effective timeout for a request attempt */ -void rd_kafka_buf_calc_timeout (const rd_kafka_t *rk, rd_kafka_buf_t *rkbuf, - rd_ts_t now); +void rd_kafka_buf_calc_timeout(const rd_kafka_t *rk, + rd_kafka_buf_t *rkbuf, + rd_ts_t now); /** @@ -835,67 +857,75 @@ void rd_kafka_buf_calc_timeout (const rd_kafka_t *rk, rd_kafka_buf_t *rkbuf, * * The remaining time is used as timeout for request retries. */ -static RD_INLINE void -rd_kafka_buf_set_abs_timeout0 (rd_kafka_buf_t *rkbuf, int timeout_ms, - rd_ts_t now, rd_bool_t force) { +static RD_INLINE void rd_kafka_buf_set_abs_timeout0(rd_kafka_buf_t *rkbuf, + int timeout_ms, + rd_ts_t now, + rd_bool_t force) { if (!now) now = rd_clock(); - rkbuf->rkbuf_rel_timeout = 0; - rkbuf->rkbuf_abs_timeout = now + ((rd_ts_t)timeout_ms * 1000); + rkbuf->rkbuf_rel_timeout = 0; + rkbuf->rkbuf_abs_timeout = now + ((rd_ts_t)timeout_ms * 1000); rkbuf->rkbuf_force_timeout = force; } -#define rd_kafka_buf_set_abs_timeout(rkbuf,timeout_ms,now) \ - rd_kafka_buf_set_abs_timeout0(rkbuf,timeout_ms,now,rd_false) +#define rd_kafka_buf_set_abs_timeout(rkbuf, timeout_ms, now) \ + rd_kafka_buf_set_abs_timeout0(rkbuf, timeout_ms, now, rd_false) -#define rd_kafka_buf_set_abs_timeout_force(rkbuf,timeout_ms,now) \ - rd_kafka_buf_set_abs_timeout0(rkbuf,timeout_ms,now,rd_true) +#define rd_kafka_buf_set_abs_timeout_force(rkbuf, timeout_ms, now) \ + rd_kafka_buf_set_abs_timeout0(rkbuf, timeout_ms, now, rd_true) #define rd_kafka_buf_keep(rkbuf) rd_refcnt_add(&(rkbuf)->rkbuf_refcnt) -#define rd_kafka_buf_destroy(rkbuf) \ - rd_refcnt_destroywrapper(&(rkbuf)->rkbuf_refcnt, \ +#define rd_kafka_buf_destroy(rkbuf) \ + rd_refcnt_destroywrapper(&(rkbuf)->rkbuf_refcnt, \ rd_kafka_buf_destroy_final(rkbuf)) -void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf); -void rd_kafka_buf_push0 (rd_kafka_buf_t *rkbuf, const void *buf, size_t len, - int allow_crc_calc, void (*free_cb) (void *)); -#define rd_kafka_buf_push(rkbuf,buf,len,free_cb) \ - rd_kafka_buf_push0(rkbuf,buf,len,1/*allow_crc*/,free_cb) -rd_kafka_buf_t *rd_kafka_buf_new0 (int segcnt, size_t size, int flags); -#define rd_kafka_buf_new(segcnt,size) \ - rd_kafka_buf_new0(segcnt,size,0) -rd_kafka_buf_t *rd_kafka_buf_new_request0 (rd_kafka_broker_t *rkb, - int16_t ApiKey, - int segcnt, size_t size, - rd_bool_t is_flexver); -#define rd_kafka_buf_new_request(rkb,ApiKey,segcnt,size) \ - rd_kafka_buf_new_request0(rkb,ApiKey,segcnt,size,rd_false) \ - -#define rd_kafka_buf_new_flexver_request(rkb,ApiKey,segcnt,size,is_flexver) \ - rd_kafka_buf_new_request0(rkb,ApiKey,segcnt,size,is_flexver) \ - -rd_kafka_buf_t *rd_kafka_buf_new_shadow (const void *ptr, size_t size, - void (*free_cb) (void *)); -void rd_kafka_bufq_enq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf); -void rd_kafka_bufq_deq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf); +void rd_kafka_buf_destroy_final(rd_kafka_buf_t *rkbuf); +void rd_kafka_buf_push0(rd_kafka_buf_t *rkbuf, + const void *buf, + size_t len, + int allow_crc_calc, + void (*free_cb)(void *)); +#define rd_kafka_buf_push(rkbuf, buf, len, free_cb) \ + rd_kafka_buf_push0(rkbuf, buf, len, 1 /*allow_crc*/, free_cb) +rd_kafka_buf_t *rd_kafka_buf_new0(int segcnt, size_t size, int flags); +#define rd_kafka_buf_new(segcnt, size) rd_kafka_buf_new0(segcnt, size, 0) +rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb, + int16_t ApiKey, + int segcnt, + size_t size, + rd_bool_t is_flexver); +#define rd_kafka_buf_new_request(rkb, ApiKey, segcnt, size) \ + rd_kafka_buf_new_request0(rkb, ApiKey, segcnt, size, rd_false) + +#define rd_kafka_buf_new_flexver_request(rkb, ApiKey, segcnt, size, \ + is_flexver) \ + rd_kafka_buf_new_request0(rkb, ApiKey, segcnt, size, is_flexver) + +rd_kafka_buf_t * +rd_kafka_buf_new_shadow(const void *ptr, size_t size, void (*free_cb)(void *)); +void rd_kafka_bufq_enq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf); +void rd_kafka_bufq_deq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf); void rd_kafka_bufq_init(rd_kafka_bufq_t *rkbufq); -void rd_kafka_bufq_concat (rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src); -void rd_kafka_bufq_purge (rd_kafka_broker_t *rkb, - rd_kafka_bufq_t *rkbufq, - rd_kafka_resp_err_t err); -void rd_kafka_bufq_connection_reset (rd_kafka_broker_t *rkb, - rd_kafka_bufq_t *rkbufq); -void rd_kafka_bufq_dump (rd_kafka_broker_t *rkb, const char *fac, - rd_kafka_bufq_t *rkbq); +void rd_kafka_bufq_concat(rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src); +void rd_kafka_bufq_purge(rd_kafka_broker_t *rkb, + rd_kafka_bufq_t *rkbufq, + rd_kafka_resp_err_t err); +void rd_kafka_bufq_connection_reset(rd_kafka_broker_t *rkb, + rd_kafka_bufq_t *rkbufq); +void rd_kafka_bufq_dump(rd_kafka_broker_t *rkb, + const char *fac, + rd_kafka_bufq_t *rkbq); -int rd_kafka_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf); +int rd_kafka_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf); -void rd_kafka_buf_handle_op (rd_kafka_op_t *rko, rd_kafka_resp_err_t err); -void rd_kafka_buf_callback (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err, - rd_kafka_buf_t *response, rd_kafka_buf_t *request); +void rd_kafka_buf_handle_op(rd_kafka_op_t *rko, rd_kafka_resp_err_t err); +void rd_kafka_buf_callback(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *response, + rd_kafka_buf_t *request); @@ -909,10 +939,11 @@ void rd_kafka_buf_callback (rd_kafka_t *rk, * Set request API type version */ static RD_UNUSED RD_INLINE void -rd_kafka_buf_ApiVersion_set (rd_kafka_buf_t *rkbuf, - int16_t version, int features) { +rd_kafka_buf_ApiVersion_set(rd_kafka_buf_t *rkbuf, + int16_t version, + int features) { rkbuf->rkbuf_reqhdr.ApiVersion = version; - rkbuf->rkbuf_features = features; + rkbuf->rkbuf_features = features; } @@ -928,8 +959,9 @@ rd_kafka_buf_ApiVersion_set (rd_kafka_buf_t *rkbuf, * There must be enough space allocated in the rkbuf. * Returns offset to written destination buffer. */ -static RD_INLINE size_t rd_kafka_buf_write (rd_kafka_buf_t *rkbuf, - const void *data, size_t len) { +static RD_INLINE size_t rd_kafka_buf_write(rd_kafka_buf_t *rkbuf, + const void *data, + size_t len) { size_t r; r = rd_buf_write(&rkbuf->rkbuf_buf, data, len); @@ -950,8 +982,10 @@ static RD_INLINE size_t rd_kafka_buf_write (rd_kafka_buf_t *rkbuf, * NOTE: rd_kafka_buf_update() MUST NOT be called when a CRC calculation * is in progress (between rd_kafka_buf_crc_init() & .._crc_finalize()) */ -static RD_INLINE void rd_kafka_buf_update (rd_kafka_buf_t *rkbuf, size_t of, - const void *data, size_t len) { +static RD_INLINE void rd_kafka_buf_update(rd_kafka_buf_t *rkbuf, + size_t of, + const void *data, + size_t len) { rd_kafka_assert(NULL, !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC)); rd_buf_write_update(&rkbuf->rkbuf_buf, of, data, len); } @@ -959,8 +993,7 @@ static RD_INLINE void rd_kafka_buf_update (rd_kafka_buf_t *rkbuf, size_t of, /** * Write int8_t to buffer. */ -static RD_INLINE size_t rd_kafka_buf_write_i8 (rd_kafka_buf_t *rkbuf, - int8_t v) { +static RD_INLINE size_t rd_kafka_buf_write_i8(rd_kafka_buf_t *rkbuf, int8_t v) { return rd_kafka_buf_write(rkbuf, &v, sizeof(v)); } @@ -968,8 +1001,8 @@ static RD_INLINE size_t rd_kafka_buf_write_i8 (rd_kafka_buf_t *rkbuf, * Update int8_t in buffer at offset 'of'. * 'of' should have been previously returned by `.._buf_write_i8()`. */ -static RD_INLINE void rd_kafka_buf_update_i8 (rd_kafka_buf_t *rkbuf, - size_t of, int8_t v) { +static RD_INLINE void +rd_kafka_buf_update_i8(rd_kafka_buf_t *rkbuf, size_t of, int8_t v) { rd_kafka_buf_update(rkbuf, of, &v, sizeof(v)); } @@ -977,8 +1010,8 @@ static RD_INLINE void rd_kafka_buf_update_i8 (rd_kafka_buf_t *rkbuf, * Write int16_t to buffer. * The value will be endian-swapped before write. */ -static RD_INLINE size_t rd_kafka_buf_write_i16 (rd_kafka_buf_t *rkbuf, - int16_t v) { +static RD_INLINE size_t rd_kafka_buf_write_i16(rd_kafka_buf_t *rkbuf, + int16_t v) { v = htobe16(v); return rd_kafka_buf_write(rkbuf, &v, sizeof(v)); } @@ -987,8 +1020,8 @@ static RD_INLINE size_t rd_kafka_buf_write_i16 (rd_kafka_buf_t *rkbuf, * Update int16_t in buffer at offset 'of'. * 'of' should have been previously returned by `.._buf_write_i16()`. */ -static RD_INLINE void rd_kafka_buf_update_i16 (rd_kafka_buf_t *rkbuf, - size_t of, int16_t v) { +static RD_INLINE void +rd_kafka_buf_update_i16(rd_kafka_buf_t *rkbuf, size_t of, int16_t v) { v = htobe16(v); rd_kafka_buf_update(rkbuf, of, &v, sizeof(v)); } @@ -997,7 +1030,7 @@ static RD_INLINE void rd_kafka_buf_update_i16 (rd_kafka_buf_t *rkbuf, * Write int32_t to buffer. * The value will be endian-swapped before write. */ -static RD_INLINE size_t rd_kafka_buf_write_i32 (rd_kafka_buf_t *rkbuf, +static RD_INLINE size_t rd_kafka_buf_write_i32(rd_kafka_buf_t *rkbuf, int32_t v) { v = (int32_t)htobe32(v); return rd_kafka_buf_write(rkbuf, &v, sizeof(v)); @@ -1007,8 +1040,8 @@ static RD_INLINE size_t rd_kafka_buf_write_i32 (rd_kafka_buf_t *rkbuf, * Update int32_t in buffer at offset 'of'. * 'of' should have been previously returned by `.._buf_write_i32()`. */ -static RD_INLINE void rd_kafka_buf_update_i32 (rd_kafka_buf_t *rkbuf, - size_t of, int32_t v) { +static RD_INLINE void +rd_kafka_buf_update_i32(rd_kafka_buf_t *rkbuf, size_t of, int32_t v) { v = htobe32(v); rd_kafka_buf_update(rkbuf, of, &v, sizeof(v)); } @@ -1017,8 +1050,8 @@ static RD_INLINE void rd_kafka_buf_update_i32 (rd_kafka_buf_t *rkbuf, * Update int32_t in buffer at offset 'of'. * 'of' should have been previously returned by `.._buf_write_i32()`. */ -static RD_INLINE void rd_kafka_buf_update_u32 (rd_kafka_buf_t *rkbuf, - size_t of, uint32_t v) { +static RD_INLINE void +rd_kafka_buf_update_u32(rd_kafka_buf_t *rkbuf, size_t of, uint32_t v) { v = htobe32(v); rd_kafka_buf_update(rkbuf, of, &v, sizeof(v)); } @@ -1043,8 +1076,8 @@ static RD_INLINE void rd_kafka_buf_update_u32 (rd_kafka_buf_t *rkbuf, * @remark For flexibleVersions this will shrink the buffer and move data * and may thus be costly. */ -static RD_INLINE void rd_kafka_buf_finalize_arraycnt (rd_kafka_buf_t *rkbuf, - size_t of, int cnt) { +static RD_INLINE void +rd_kafka_buf_finalize_arraycnt(rd_kafka_buf_t *rkbuf, size_t of, int cnt) { char buf[sizeof(int32_t)]; size_t sz, r; @@ -1066,7 +1099,8 @@ static RD_INLINE void rd_kafka_buf_finalize_arraycnt (rd_kafka_buf_t *rkbuf, if (sz < sizeof(int32_t)) { /* Varint occupies less space than the allotted 4 bytes, erase * the remaining bytes. */ - r = rd_buf_erase(&rkbuf->rkbuf_buf, of+sz, sizeof(int32_t)-sz); + r = rd_buf_erase(&rkbuf->rkbuf_buf, of + sz, + sizeof(int32_t) - sz); rd_assert(r == sizeof(int32_t) - sz); } } @@ -1076,8 +1110,8 @@ static RD_INLINE void rd_kafka_buf_finalize_arraycnt (rd_kafka_buf_t *rkbuf, * Write int64_t to buffer. * The value will be endian-swapped before write. */ -static RD_INLINE size_t rd_kafka_buf_write_i64 (rd_kafka_buf_t *rkbuf, - int64_t v) { +static RD_INLINE size_t rd_kafka_buf_write_i64(rd_kafka_buf_t *rkbuf, + int64_t v) { v = htobe64(v); return rd_kafka_buf_write(rkbuf, &v, sizeof(v)); } @@ -1086,8 +1120,8 @@ static RD_INLINE size_t rd_kafka_buf_write_i64 (rd_kafka_buf_t *rkbuf, * Update int64_t in buffer at address 'ptr'. * 'of' should have been previously returned by `.._buf_write_i64()`. */ -static RD_INLINE void rd_kafka_buf_update_i64 (rd_kafka_buf_t *rkbuf, - size_t of, int64_t v) { +static RD_INLINE void +rd_kafka_buf_update_i64(rd_kafka_buf_t *rkbuf, size_t of, int64_t v) { v = htobe64(v); rd_kafka_buf_update(rkbuf, of, &v, sizeof(v)); } @@ -1096,8 +1130,8 @@ static RD_INLINE void rd_kafka_buf_update_i64 (rd_kafka_buf_t *rkbuf, /** * @brief Write varint-encoded signed value to buffer. */ -static RD_INLINE size_t -rd_kafka_buf_write_varint (rd_kafka_buf_t *rkbuf, int64_t v) { +static RD_INLINE size_t rd_kafka_buf_write_varint(rd_kafka_buf_t *rkbuf, + int64_t v) { char varint[RD_UVARINT_ENC_SIZEOF(v)]; size_t sz; @@ -1109,8 +1143,8 @@ rd_kafka_buf_write_varint (rd_kafka_buf_t *rkbuf, int64_t v) { /** * @brief Write varint-encoded unsigned value to buffer. */ -static RD_INLINE size_t -rd_kafka_buf_write_uvarint (rd_kafka_buf_t *rkbuf, uint64_t v) { +static RD_INLINE size_t rd_kafka_buf_write_uvarint(rd_kafka_buf_t *rkbuf, + uint64_t v) { char varint[RD_UVARINT_ENC_SIZEOF(v)]; size_t sz; @@ -1127,7 +1161,7 @@ rd_kafka_buf_write_uvarint (rd_kafka_buf_t *rkbuf, uint64_t v) { * * @returns the offset in \p rkbuf where the string was written. */ -static RD_INLINE size_t rd_kafka_buf_write_kstr (rd_kafka_buf_t *rkbuf, +static RD_INLINE size_t rd_kafka_buf_write_kstr(rd_kafka_buf_t *rkbuf, const rd_kafkap_str_t *kstr) { size_t len, r; @@ -1142,7 +1176,7 @@ static RD_INLINE size_t rd_kafka_buf_write_kstr (rd_kafka_buf_t *rkbuf, RD_KAFKAP_STR_SIZE(kstr)); len = RD_KAFKAP_STR_LEN(kstr); - r = rd_kafka_buf_write_i16(rkbuf, (int16_t)len); + r = rd_kafka_buf_write_i16(rkbuf, (int16_t)len); rd_kafka_buf_write(rkbuf, kstr->str, len); return r; @@ -1160,7 +1194,7 @@ static RD_INLINE size_t rd_kafka_buf_write_kstr (rd_kafka_buf_t *rkbuf, r = rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)len); if (len > 1) - rd_kafka_buf_write(rkbuf, kstr->str, len-1); + rd_kafka_buf_write(rkbuf, kstr->str, len - 1); return r; } @@ -1171,9 +1205,9 @@ static RD_INLINE size_t rd_kafka_buf_write_kstr (rd_kafka_buf_t *rkbuf, * * @remark Copies the string. */ -static RD_INLINE size_t -rd_kafka_buf_write_str (rd_kafka_buf_t *rkbuf, - const char *str, size_t len) { +static RD_INLINE size_t rd_kafka_buf_write_str(rd_kafka_buf_t *rkbuf, + const char *str, + size_t len) { size_t r; if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) { @@ -1182,7 +1216,7 @@ rd_kafka_buf_write_str (rd_kafka_buf_t *rkbuf, len = RD_KAFKAP_STR_LEN_NULL; else if (len == (size_t)-1) len = strlen(str); - r = rd_kafka_buf_write_i16(rkbuf, (int16_t) len); + r = rd_kafka_buf_write_i16(rkbuf, (int16_t)len); if (str) rd_kafka_buf_write(rkbuf, str, len); return r; @@ -1202,7 +1236,7 @@ rd_kafka_buf_write_str (rd_kafka_buf_t *rkbuf, r = rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)len); if (len > 1) - rd_kafka_buf_write(rkbuf, str, len-1); + rd_kafka_buf_write(rkbuf, str, len - 1); return r; } @@ -1211,10 +1245,10 @@ rd_kafka_buf_write_str (rd_kafka_buf_t *rkbuf, /** * Push (i.e., no copy) Kafka string to buffer iovec */ -static RD_INLINE void rd_kafka_buf_push_kstr (rd_kafka_buf_t *rkbuf, +static RD_INLINE void rd_kafka_buf_push_kstr(rd_kafka_buf_t *rkbuf, const rd_kafkap_str_t *kstr) { - rd_kafka_buf_push(rkbuf, RD_KAFKAP_STR_SER(kstr), - RD_KAFKAP_STR_SIZE(kstr), NULL); + rd_kafka_buf_push(rkbuf, RD_KAFKAP_STR_SER(kstr), + RD_KAFKAP_STR_SIZE(kstr), NULL); } @@ -1223,8 +1257,8 @@ static RD_INLINE void rd_kafka_buf_push_kstr (rd_kafka_buf_t *rkbuf, * Write (copy) Kafka bytes to buffer. */ static RD_INLINE size_t -rd_kafka_buf_write_kbytes (rd_kafka_buf_t *rkbuf, - const rd_kafkap_bytes_t *kbytes) { +rd_kafka_buf_write_kbytes(rd_kafka_buf_t *rkbuf, + const rd_kafkap_bytes_t *kbytes) { size_t len; if (!kbytes || RD_KAFKAP_BYTES_IS_NULL(kbytes)) @@ -1244,21 +1278,23 @@ rd_kafka_buf_write_kbytes (rd_kafka_buf_t *rkbuf, /** * Push (i.e., no copy) Kafka bytes to buffer iovec */ -static RD_INLINE void rd_kafka_buf_push_kbytes (rd_kafka_buf_t *rkbuf, - const rd_kafkap_bytes_t *kbytes){ - rd_kafka_buf_push(rkbuf, RD_KAFKAP_BYTES_SER(kbytes), - RD_KAFKAP_BYTES_SIZE(kbytes), NULL); +static RD_INLINE void +rd_kafka_buf_push_kbytes(rd_kafka_buf_t *rkbuf, + const rd_kafkap_bytes_t *kbytes) { + rd_kafka_buf_push(rkbuf, RD_KAFKAP_BYTES_SER(kbytes), + RD_KAFKAP_BYTES_SIZE(kbytes), NULL); } /** * Write (copy) binary bytes to buffer as Kafka bytes encapsulate data. */ -static RD_INLINE size_t rd_kafka_buf_write_bytes (rd_kafka_buf_t *rkbuf, - const void *payload, size_t size) { +static RD_INLINE size_t rd_kafka_buf_write_bytes(rd_kafka_buf_t *rkbuf, + const void *payload, + size_t size) { size_t r; if (!payload) size = RD_KAFKAP_BYTES_LEN_NULL; - r = rd_kafka_buf_write_i32(rkbuf, (int32_t) size); + r = rd_kafka_buf_write_i32(rkbuf, (int32_t)size); if (payload) rd_kafka_buf_write(rkbuf, payload, size); return r; @@ -1268,8 +1304,8 @@ static RD_INLINE size_t rd_kafka_buf_write_bytes (rd_kafka_buf_t *rkbuf, /** * @brief Write bool to buffer. */ -static RD_INLINE size_t rd_kafka_buf_write_bool (rd_kafka_buf_t *rkbuf, - rd_bool_t v) { +static RD_INLINE size_t rd_kafka_buf_write_bool(rd_kafka_buf_t *rkbuf, + rd_bool_t v) { return rd_kafka_buf_write_i8(rkbuf, (int8_t)v); } @@ -1280,36 +1316,38 @@ static RD_INLINE size_t rd_kafka_buf_write_bool (rd_kafka_buf_t *rkbuf, * * Returns the buffer offset of the first byte. */ -size_t rd_kafka_buf_write_Message (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, - int64_t Offset, int8_t MagicByte, - int8_t Attributes, int64_t Timestamp, - const void *key, int32_t key_len, - const void *payload, int32_t len, - int *outlenp); +size_t rd_kafka_buf_write_Message(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + int64_t Offset, + int8_t MagicByte, + int8_t Attributes, + int64_t Timestamp, + const void *key, + int32_t key_len, + const void *payload, + int32_t len, + int *outlenp); /** * Start calculating CRC from now and track it in '*crcp'. */ -static RD_INLINE RD_UNUSED void rd_kafka_buf_crc_init (rd_kafka_buf_t *rkbuf) { - rd_kafka_assert(NULL, !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC)); - rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_CRC; - rkbuf->rkbuf_crc = rd_crc32_init(); +static RD_INLINE RD_UNUSED void rd_kafka_buf_crc_init(rd_kafka_buf_t *rkbuf) { + rd_kafka_assert(NULL, !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC)); + rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_CRC; + rkbuf->rkbuf_crc = rd_crc32_init(); } /** * Finalizes CRC calculation and returns the calculated checksum. */ -static RD_INLINE RD_UNUSED -rd_crc32_t rd_kafka_buf_crc_finalize (rd_kafka_buf_t *rkbuf) { - rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_CRC; - return rd_crc32_finalize(rkbuf->rkbuf_crc); +static RD_INLINE RD_UNUSED rd_crc32_t +rd_kafka_buf_crc_finalize(rd_kafka_buf_t *rkbuf) { + rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_CRC; + return rd_crc32_finalize(rkbuf->rkbuf_crc); } - - /** * @brief Check if buffer's replyq.version is outdated. * @param rkbuf: may be NULL, for convenience. @@ -1317,15 +1355,15 @@ rd_crc32_t rd_kafka_buf_crc_finalize (rd_kafka_buf_t *rkbuf) { * @returns 1 if this is an outdated buffer, else 0. */ static RD_UNUSED RD_INLINE int -rd_kafka_buf_version_outdated (const rd_kafka_buf_t *rkbuf, int version) { +rd_kafka_buf_version_outdated(const rd_kafka_buf_t *rkbuf, int version) { return rkbuf && rkbuf->rkbuf_replyq.version && - rkbuf->rkbuf_replyq.version < version; + rkbuf->rkbuf_replyq.version < version; } -void rd_kafka_buf_set_maker (rd_kafka_buf_t *rkbuf, - rd_kafka_make_req_cb_t *make_cb, - void *make_opaque, - void (*free_make_opaque_cb) (void *make_opaque)); +void rd_kafka_buf_set_maker(rd_kafka_buf_t *rkbuf, + rd_kafka_make_req_cb_t *make_cb, + void *make_opaque, + void (*free_make_opaque_cb)(void *make_opaque)); #endif /* _RDKAFKA_BUF_H_ */ diff --git a/src/rdkafka_cert.c b/src/rdkafka_cert.c index d4401b3ea5..dc51708738 100644 --- a/src/rdkafka_cert.c +++ b/src/rdkafka_cert.c @@ -47,15 +47,15 @@ * * @locality application thread */ -static int rd_kafka_conf_ssl_passwd_cb (char *buf, int size, int rwflag, - void *userdata) { +static int +rd_kafka_conf_ssl_passwd_cb(char *buf, int size, int rwflag, void *userdata) { const rd_kafka_conf_t *conf = userdata; int pwlen; if (!conf->ssl.key_password) return -1; - pwlen = (int) strlen(conf->ssl.key_password); + pwlen = (int)strlen(conf->ssl.key_password); memcpy(buf, conf->ssl.key_password, RD_MIN(pwlen, size)); return pwlen; @@ -63,23 +63,16 @@ static int rd_kafka_conf_ssl_passwd_cb (char *buf, int size, int rwflag, -static const char *rd_kafka_cert_type_names[] = { - "public-key", - "private-key", - "CA" -}; +static const char *rd_kafka_cert_type_names[] = {"public-key", "private-key", + "CA"}; -static const char *rd_kafka_cert_enc_names[] = { - "PKCS#12", - "DER", - "PEM" -}; +static const char *rd_kafka_cert_enc_names[] = {"PKCS#12", "DER", "PEM"}; /** * @brief Destroy a certificate */ -static void rd_kafka_cert_destroy (rd_kafka_cert_t *cert) { +static void rd_kafka_cert_destroy(rd_kafka_cert_t *cert) { if (rd_refcnt_sub(&cert->refcnt) > 0) return; @@ -97,7 +90,7 @@ static void rd_kafka_cert_destroy (rd_kafka_cert_t *cert) { /** * @brief Create a copy of a cert */ -static rd_kafka_cert_t *rd_kafka_cert_dup (rd_kafka_cert_t *src) { +static rd_kafka_cert_t *rd_kafka_cert_dup(rd_kafka_cert_t *src) { rd_refcnt_add(&src->refcnt); return src; } @@ -105,29 +98,27 @@ static rd_kafka_cert_t *rd_kafka_cert_dup (rd_kafka_cert_t *src) { /** * @brief Print the OpenSSL error stack do stdout, for development use. */ -static RD_UNUSED void rd_kafka_print_ssl_errors (void) { +static RD_UNUSED void rd_kafka_print_ssl_errors(void) { unsigned long l; const char *file, *data; int line, flags; - while ((l = ERR_get_error_line_data(&file, &line, - &data, &flags)) != 0) { + while ((l = ERR_get_error_line_data(&file, &line, &data, &flags)) != + 0) { char buf[256]; ERR_error_string_n(l, buf, sizeof(buf)); - printf("ERR: %s:%d: %s: %s:\n", - file, line, buf, (flags & ERR_TXT_STRING) ? data : ""); - printf(" %lu:%s : %s : %s : %d : %s (%p, %d, fl 0x%x)\n", - l, - ERR_lib_error_string(l), - ERR_func_error_string(l), - file, line, - (flags & ERR_TXT_STRING) && data && *data ? - data : ERR_reason_error_string(l), + printf("ERR: %s:%d: %s: %s:\n", file, line, buf, + (flags & ERR_TXT_STRING) ? data : ""); + printf(" %lu:%s : %s : %s : %d : %s (%p, %d, fl 0x%x)\n", l, + ERR_lib_error_string(l), ERR_func_error_string(l), file, + line, + (flags & ERR_TXT_STRING) && data && *data + ? data + : ERR_reason_error_string(l), data, data ? (int)strlen(data) : -1, flags & ERR_TXT_STRING); - } } @@ -136,38 +127,37 @@ static RD_UNUSED void rd_kafka_print_ssl_errors (void) { * or NULL on failure in which case errstr will have a human-readable * error string written to it. */ -static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, - rd_kafka_cert_type_t type, - rd_kafka_cert_enc_t encoding, - const void *buffer, size_t size, - char *errstr, size_t errstr_size) { +static rd_kafka_cert_t *rd_kafka_cert_new(const rd_kafka_conf_t *conf, + rd_kafka_cert_type_t type, + rd_kafka_cert_enc_t encoding, + const void *buffer, + size_t size, + char *errstr, + size_t errstr_size) { static const rd_bool_t - valid[RD_KAFKA_CERT__CNT][RD_KAFKA_CERT_ENC__CNT] = { + valid[RD_KAFKA_CERT__CNT][RD_KAFKA_CERT_ENC__CNT] = { /* Valid encodings per certificate type */ - [RD_KAFKA_CERT_PUBLIC_KEY] = { - [RD_KAFKA_CERT_ENC_PKCS12] = rd_true, - [RD_KAFKA_CERT_ENC_DER] = rd_true, - [RD_KAFKA_CERT_ENC_PEM] = rd_true - }, - [RD_KAFKA_CERT_PRIVATE_KEY] = { - [RD_KAFKA_CERT_ENC_PKCS12] = rd_true, - [RD_KAFKA_CERT_ENC_DER] = rd_true, - [RD_KAFKA_CERT_ENC_PEM] = rd_true - }, - [RD_KAFKA_CERT_CA] = { - [RD_KAFKA_CERT_ENC_PKCS12] = rd_true, - [RD_KAFKA_CERT_ENC_DER] = rd_true, - [RD_KAFKA_CERT_ENC_PEM] = rd_true - }, - }; + [RD_KAFKA_CERT_PUBLIC_KEY] = {[RD_KAFKA_CERT_ENC_PKCS12] = + rd_true, + [RD_KAFKA_CERT_ENC_DER] = rd_true, + [RD_KAFKA_CERT_ENC_PEM] = + rd_true}, + [RD_KAFKA_CERT_PRIVATE_KEY] = + {[RD_KAFKA_CERT_ENC_PKCS12] = rd_true, + [RD_KAFKA_CERT_ENC_DER] = rd_true, + [RD_KAFKA_CERT_ENC_PEM] = rd_true}, + [RD_KAFKA_CERT_CA] = {[RD_KAFKA_CERT_ENC_PKCS12] = rd_true, + [RD_KAFKA_CERT_ENC_DER] = rd_true, + [RD_KAFKA_CERT_ENC_PEM] = rd_true}, + }; const char *action = ""; BIO *bio; rd_kafka_cert_t *cert = NULL; - PKCS12 *p12 = NULL; + PKCS12 *p12 = NULL; if ((int)type < 0 || type >= RD_KAFKA_CERT__CNT) { - rd_snprintf(errstr, errstr_size, - "Invalid certificate type %d", (int)type); + rd_snprintf(errstr, errstr_size, "Invalid certificate type %d", + (int)type); return NULL; } @@ -186,148 +176,136 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, } action = "read memory"; - bio = BIO_new_mem_buf((void *)buffer, (long)size); + bio = BIO_new_mem_buf((void *)buffer, (long)size); if (!bio) goto fail; if (encoding == RD_KAFKA_CERT_ENC_PKCS12) { action = "read PKCS#12"; - p12 = d2i_PKCS12_bio(bio, NULL); + p12 = d2i_PKCS12_bio(bio, NULL); if (!p12) goto fail; } - cert = rd_calloc(1, sizeof(*cert)); - cert->type = type; + cert = rd_calloc(1, sizeof(*cert)); + cert->type = type; cert->encoding = encoding; rd_refcnt_init(&cert->refcnt, 1); - switch (type) - { + switch (type) { case RD_KAFKA_CERT_CA: cert->store = X509_STORE_new(); - switch (encoding) - { - case RD_KAFKA_CERT_ENC_PKCS12: - { - EVP_PKEY *ign_pkey; - X509 *ign_cert; - STACK_OF(X509) *cas = NULL; - int i; - - action = "parse PKCS#12"; - if (!PKCS12_parse(p12, conf->ssl.key_password, - &ign_pkey, &ign_cert, - &cas)) - goto fail; + switch (encoding) { + case RD_KAFKA_CERT_ENC_PKCS12: { + EVP_PKEY *ign_pkey; + X509 *ign_cert; + STACK_OF(X509) *cas = NULL; + int i; + + action = "parse PKCS#12"; + if (!PKCS12_parse(p12, conf->ssl.key_password, + &ign_pkey, &ign_cert, &cas)) + goto fail; - EVP_PKEY_free(ign_pkey); - X509_free(ign_cert); + EVP_PKEY_free(ign_pkey); + X509_free(ign_cert); + + if (!cas || sk_X509_num(cas) < 1) { + action = + "retrieve at least one CA " + "cert from PKCS#12"; + if (cas) + sk_X509_pop_free(cas, X509_free); + goto fail; + } - if (!cas || sk_X509_num(cas) < 1) { - action = "retrieve at least one CA " - "cert from PKCS#12"; - if (cas) - sk_X509_pop_free(cas, - X509_free); + for (i = 0; i < sk_X509_num(cas); i++) { + if (!X509_STORE_add_cert( + cert->store, sk_X509_value(cas, i))) { + action = + "add certificate to " + "X.509 store"; + sk_X509_pop_free(cas, X509_free); goto fail; } + } - for (i = 0 ; i < sk_X509_num(cas) ; i++) { - if (!X509_STORE_add_cert( - cert->store, - sk_X509_value(cas, i))) { - action = "add certificate to " - "X.509 store"; - sk_X509_pop_free(cas, - X509_free); - goto fail; - } - } + sk_X509_pop_free(cas, X509_free); + } break; + + case RD_KAFKA_CERT_ENC_DER: { + X509 *x509; + + action = "read DER / X.509 ASN.1"; + if (!(x509 = d2i_X509_bio(bio, NULL))) + goto fail; - sk_X509_pop_free(cas, X509_free); + if (!X509_STORE_add_cert(cert->store, x509)) { + action = + "add certificate to " + "X.509 store"; + X509_free(x509); + goto fail; } - break; + } break; + + case RD_KAFKA_CERT_ENC_PEM: { + X509 *x509; + int cnt = 0; - case RD_KAFKA_CERT_ENC_DER: - { - X509 *x509; + action = "read PEM"; - action = "read DER / X.509 ASN.1"; - if (!(x509 = d2i_X509_bio(bio, NULL))) - goto fail; + /* This will read one certificate per call + * until an error occurs or the end of the + * buffer is reached (which is an error + * we'll need to clear). */ + while ((x509 = PEM_read_bio_X509( + bio, NULL, rd_kafka_conf_ssl_passwd_cb, + (void *)conf))) { if (!X509_STORE_add_cert(cert->store, x509)) { - action = "add certificate to " - "X.509 store"; + action = + "add certificate to " + "X.509 store"; X509_free(x509); goto fail; } + + cnt++; } - break; - case RD_KAFKA_CERT_ENC_PEM: - { - X509 *x509; - int cnt = 0; - - action = "read PEM"; - - /* This will read one certificate per call - * until an error occurs or the end of the - * buffer is reached (which is an error - * we'll need to clear). */ - while ((x509 = - PEM_read_bio_X509( - bio, NULL, - rd_kafka_conf_ssl_passwd_cb, - (void *)conf))) { - - if (!X509_STORE_add_cert(cert->store, - x509)) { - action = "add certificate to " - "X.509 store"; - X509_free(x509); - goto fail; - } - - cnt++; - } + if (!BIO_eof(bio)) { + /* Encountered parse error before + * reaching end, propagate error and + * fail. */ + goto fail; + } - if (!BIO_eof(bio)) { - /* Encountered parse error before - * reaching end, propagate error and - * fail. */ - goto fail; - } + if (!cnt) { + action = + "retrieve at least one " + "CA cert from PEM"; - if (!cnt) { - action = "retrieve at least one " - "CA cert from PEM"; + goto fail; + } - goto fail; - } + /* Reached end, which is raised as an error, + * so clear it since it is not. */ + ERR_clear_error(); + } break; - /* Reached end, which is raised as an error, - * so clear it since it is not. */ - ERR_clear_error(); - } + default: + RD_NOTREACHED(); break; - - default: - RD_NOTREACHED(); - break; } break; case RD_KAFKA_CERT_PUBLIC_KEY: - switch (encoding) - { - case RD_KAFKA_CERT_ENC_PKCS12: - { + switch (encoding) { + case RD_KAFKA_CERT_ENC_PKCS12: { EVP_PKEY *ign_pkey; action = "parse PKCS#12"; @@ -340,21 +318,20 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, action = "retrieve public key"; if (!cert->x509) goto fail; - } - break; + } break; case RD_KAFKA_CERT_ENC_DER: - action = "read DER / X.509 ASN.1"; + action = "read DER / X.509 ASN.1"; cert->x509 = d2i_X509_bio(bio, NULL); if (!cert->x509) goto fail; break; case RD_KAFKA_CERT_ENC_PEM: - action = "read PEM"; + action = "read PEM"; cert->x509 = PEM_read_bio_X509( - bio, NULL, rd_kafka_conf_ssl_passwd_cb, - (void *)conf); + bio, NULL, rd_kafka_conf_ssl_passwd_cb, + (void *)conf); if (!cert->x509) goto fail; break; @@ -367,10 +344,8 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, case RD_KAFKA_CERT_PRIVATE_KEY: - switch (encoding) - { - case RD_KAFKA_CERT_ENC_PKCS12: - { + switch (encoding) { + case RD_KAFKA_CERT_ENC_PKCS12: { X509 *x509; action = "parse PKCS#12"; @@ -383,22 +358,22 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, action = "retrieve private key"; if (!cert->pkey) goto fail; - } - break; + } break; case RD_KAFKA_CERT_ENC_DER: - action = "read DER / X.509 ASN.1 and " - "convert to EVP_PKEY"; + action = + "read DER / X.509 ASN.1 and " + "convert to EVP_PKEY"; cert->pkey = d2i_PrivateKey_bio(bio, NULL); if (!cert->pkey) goto fail; break; case RD_KAFKA_CERT_ENC_PEM: - action = "read PEM"; + action = "read PEM"; cert->pkey = PEM_read_bio_PrivateKey( - bio, NULL, rd_kafka_conf_ssl_passwd_cb, - (void *)conf); + bio, NULL, rd_kafka_conf_ssl_passwd_cb, + (void *)conf); if (!cert->pkey) goto fail; break; @@ -421,11 +396,9 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, return cert; - fail: - rd_snprintf(errstr, errstr_size, - "Failed to %s %s (encoding %s): %s", - action, - rd_kafka_cert_type_names[type], +fail: + rd_snprintf(errstr, errstr_size, "Failed to %s %s (encoding %s): %s", + action, rd_kafka_cert_type_names[type], rd_kafka_cert_enc_names[encoding], rd_kafka_ssl_last_error_str()); @@ -448,12 +421,13 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, * @{ */ -rd_kafka_conf_res_t -rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf, - rd_kafka_cert_type_t cert_type, - rd_kafka_cert_enc_t cert_enc, - const void *buffer, size_t size, - char *errstr, size_t errstr_size) { +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, + rd_kafka_cert_type_t cert_type, + rd_kafka_cert_enc_t cert_enc, + const void *buffer, + size_t size, + char *errstr, + size_t errstr_size) { #if !WITH_SSL rd_snprintf(errstr, errstr_size, "librdkafka not built with OpenSSL support"); @@ -461,15 +435,14 @@ rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf, #else rd_kafka_cert_t *cert; rd_kafka_cert_t **cert_map[RD_KAFKA_CERT__CNT] = { - [RD_KAFKA_CERT_PUBLIC_KEY] = &conf->ssl.cert, - [RD_KAFKA_CERT_PRIVATE_KEY] = &conf->ssl.key, - [RD_KAFKA_CERT_CA] = &conf->ssl.ca - }; + [RD_KAFKA_CERT_PUBLIC_KEY] = &conf->ssl.cert, + [RD_KAFKA_CERT_PRIVATE_KEY] = &conf->ssl.key, + [RD_KAFKA_CERT_CA] = &conf->ssl.ca}; rd_kafka_cert_t **certp; if ((int)cert_type < 0 || cert_type >= RD_KAFKA_CERT__CNT) { - rd_snprintf(errstr, errstr_size, - "Invalid certificate type %d", (int)cert_type); + rd_snprintf(errstr, errstr_size, "Invalid certificate type %d", + (int)cert_type); return RD_KAFKA_CONF_INVALID; } @@ -506,7 +479,7 @@ rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf, /** * @brief Destructor called when configuration object is destroyed. */ -void rd_kafka_conf_cert_dtor (int scope, void *pconf) { +void rd_kafka_conf_cert_dtor(int scope, void *pconf) { #if WITH_SSL rd_kafka_conf_t *conf = pconf; assert(scope == _RK_GLOBAL); @@ -529,11 +502,15 @@ void rd_kafka_conf_cert_dtor (int scope, void *pconf) { * @brief Copy-constructor called when configuration object \p psrcp is * duplicated to \p dstp. */ -void rd_kafka_conf_cert_copy (int scope, void *pdst, const void *psrc, - void *dstptr, const void *srcptr, - size_t filter_cnt, const char **filter) { +void rd_kafka_conf_cert_copy(int scope, + void *pdst, + const void *psrc, + void *dstptr, + const void *srcptr, + size_t filter_cnt, + const char **filter) { #if WITH_SSL - rd_kafka_conf_t *dconf = pdst; + rd_kafka_conf_t *dconf = pdst; const rd_kafka_conf_t *sconf = psrc; assert(scope == _RK_GLOBAL); diff --git a/src/rdkafka_cert.h b/src/rdkafka_cert.h index 756fb01d14..b53f46c010 100644 --- a/src/rdkafka_cert.h +++ b/src/rdkafka_cert.h @@ -40,18 +40,22 @@ */ typedef struct rd_kafka_cert_s { rd_kafka_cert_type_t type; - rd_kafka_cert_enc_t encoding; - rd_refcnt_t refcnt; + rd_kafka_cert_enc_t encoding; + rd_refcnt_t refcnt; #if WITH_SSL - X509 *x509; /**< Certificate (public key) */ - EVP_PKEY *pkey; /**< Private key */ - X509_STORE *store; /**< CA certificate chain store */ + X509 *x509; /**< Certificate (public key) */ + EVP_PKEY *pkey; /**< Private key */ + X509_STORE *store; /**< CA certificate chain store */ #endif } rd_kafka_cert_t; -void rd_kafka_conf_cert_dtor (int scope, void *pconf); -void rd_kafka_conf_cert_copy (int scope, void *pdst, const void *psrc, - void *dstptr, const void *srcptr, - size_t filter_cnt, const char **filter); +void rd_kafka_conf_cert_dtor(int scope, void *pconf); +void rd_kafka_conf_cert_copy(int scope, + void *pdst, + const void *psrc, + void *dstptr, + const void *srcptr, + size_t filter_cnt, + const char **filter); #endif /* _RDKAFKA_CERT_H_ */ diff --git a/src/rdkafka_cgrp.c b/src/rdkafka_cgrp.c index 22fe361deb..7830d1c65d 100644 --- a/src/rdkafka_cgrp.c +++ b/src/rdkafka_cgrp.c @@ -43,67 +43,65 @@ #include #include -static void rd_kafka_cgrp_offset_commit_tmr_cb (rd_kafka_timers_t *rkts, - void *arg); +static void rd_kafka_cgrp_offset_commit_tmr_cb(rd_kafka_timers_t *rkts, + void *arg); static rd_kafka_error_t * -rd_kafka_cgrp_assign (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t *assignment); -static rd_kafka_error_t *rd_kafka_cgrp_unassign (rd_kafka_cgrp_t *rkcg); +rd_kafka_cgrp_assign(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *assignment); +static rd_kafka_error_t *rd_kafka_cgrp_unassign(rd_kafka_cgrp_t *rkcg); static rd_kafka_error_t * -rd_kafka_cgrp_incremental_assign (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t - *partitions); +rd_kafka_cgrp_incremental_assign(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *partitions); static rd_kafka_error_t * -rd_kafka_cgrp_incremental_unassign (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t - *partitions); +rd_kafka_cgrp_incremental_unassign(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *partitions); -static rd_kafka_op_res_t -rd_kafka_cgrp_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, rd_kafka_q_cb_type_t cb_type, - void *opaque); +static rd_kafka_op_res_t rd_kafka_cgrp_op_serve(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque); -static void rd_kafka_cgrp_group_leader_reset (rd_kafka_cgrp_t *rkcg, - const char *reason); +static void rd_kafka_cgrp_group_leader_reset(rd_kafka_cgrp_t *rkcg, + const char *reason); -static RD_INLINE int rd_kafka_cgrp_try_terminate (rd_kafka_cgrp_t *rkcg); +static RD_INLINE int rd_kafka_cgrp_try_terminate(rd_kafka_cgrp_t *rkcg); -static void rd_kafka_cgrp_revoke_all_rejoin (rd_kafka_cgrp_t *rkcg, - rd_bool_t assignment_lost, - rd_bool_t initiating, - const char *reason); -static void rd_kafka_cgrp_revoke_all_rejoin_maybe (rd_kafka_cgrp_t *rkcg, - rd_bool_t - assignment_lost, - rd_bool_t initiating, - const char *reason); +static void rd_kafka_cgrp_revoke_all_rejoin(rd_kafka_cgrp_t *rkcg, + rd_bool_t assignment_lost, + rd_bool_t initiating, + const char *reason); +static void rd_kafka_cgrp_revoke_all_rejoin_maybe(rd_kafka_cgrp_t *rkcg, + rd_bool_t assignment_lost, + rd_bool_t initiating, + const char *reason); -static void rd_kafka_cgrp_group_is_rebalancing (rd_kafka_cgrp_t *rkcg); +static void rd_kafka_cgrp_group_is_rebalancing(rd_kafka_cgrp_t *rkcg); static void -rd_kafka_cgrp_max_poll_interval_check_tmr_cb (rd_kafka_timers_t *rkts, - void *arg); +rd_kafka_cgrp_max_poll_interval_check_tmr_cb(rd_kafka_timers_t *rkts, + void *arg); static rd_kafka_resp_err_t -rd_kafka_cgrp_subscribe (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t *rktparlist); +rd_kafka_cgrp_subscribe(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *rktparlist); -static void rd_kafka_cgrp_group_assignment_set ( - rd_kafka_cgrp_t *rkcg, - const rd_kafka_topic_partition_list_t *partitions); -static void rd_kafka_cgrp_group_assignment_modify ( - rd_kafka_cgrp_t *rkcg, - rd_bool_t add, - const rd_kafka_topic_partition_list_t *partitions); +static void rd_kafka_cgrp_group_assignment_set( + rd_kafka_cgrp_t *rkcg, + const rd_kafka_topic_partition_list_t *partitions); +static void rd_kafka_cgrp_group_assignment_modify( + rd_kafka_cgrp_t *rkcg, + rd_bool_t add, + const rd_kafka_topic_partition_list_t *partitions); static void -rd_kafka_cgrp_handle_assignment (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t *assignment); +rd_kafka_cgrp_handle_assignment(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *assignment); /** * @returns true if the current assignment is lost. */ -rd_bool_t rd_kafka_cgrp_assignment_is_lost (rd_kafka_cgrp_t *rkcg) { +rd_bool_t rd_kafka_cgrp_assignment_is_lost(rd_kafka_cgrp_t *rkcg) { return rd_atomic32_get(&rkcg->rkcg_assignment_lost) != 0; } @@ -112,11 +110,11 @@ rd_bool_t rd_kafka_cgrp_assignment_is_lost (rd_kafka_cgrp_t *rkcg) { * @brief Call when the current assignment has been lost, with a * human-readable reason. */ -static void rd_kafka_cgrp_assignment_set_lost (rd_kafka_cgrp_t *rkcg, - char *fmt, ...) - RD_FORMAT(printf, 2, 3); -static void rd_kafka_cgrp_assignment_set_lost (rd_kafka_cgrp_t *rkcg, - char *fmt, ...) { +static void rd_kafka_cgrp_assignment_set_lost(rd_kafka_cgrp_t *rkcg, + char *fmt, + ...) RD_FORMAT(printf, 2, 3); +static void +rd_kafka_cgrp_assignment_set_lost(rd_kafka_cgrp_t *rkcg, char *fmt, ...) { va_list ap; char reason[256]; @@ -127,11 +125,10 @@ static void rd_kafka_cgrp_assignment_set_lost (rd_kafka_cgrp_t *rkcg, rd_vsnprintf(reason, sizeof(reason), fmt, ap); va_end(ap); - rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER|RD_KAFKA_DBG_CGRP, "LOST", + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "LOST", "Group \"%s\": " "current assignment of %d partition(s) lost: %s", - rkcg->rkcg_group_id->str, - rkcg->rkcg_group_assignment->cnt, + rkcg->rkcg_group_id->str, rkcg->rkcg_group_assignment->cnt, reason); rd_atomic32_set(&rkcg->rkcg_assignment_lost, rd_true); @@ -142,8 +139,8 @@ static void rd_kafka_cgrp_assignment_set_lost (rd_kafka_cgrp_t *rkcg, * @brief Call when the current assignment is no longer considered lost, with a * human-readable reason. */ -static void rd_kafka_cgrp_assignment_clear_lost (rd_kafka_cgrp_t *rkcg, - char *fmt, ...) { +static void +rd_kafka_cgrp_assignment_clear_lost(rd_kafka_cgrp_t *rkcg, char *fmt, ...) { va_list ap; char reason[256]; @@ -154,7 +151,7 @@ static void rd_kafka_cgrp_assignment_clear_lost (rd_kafka_cgrp_t *rkcg, rd_vsnprintf(reason, sizeof(reason), fmt, ap); va_end(ap); - rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER|RD_KAFKA_DBG_CGRP, "LOST", + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "LOST", "Group \"%s\": " "current assignment no longer considered lost: %s", rkcg->rkcg_group_id->str, reason); @@ -172,7 +169,7 @@ static void rd_kafka_cgrp_assignment_clear_lost (rd_kafka_cgrp_t *rkcg, * @locality main thread */ rd_kafka_rebalance_protocol_t -rd_kafka_cgrp_rebalance_protocol (rd_kafka_cgrp_t *rkcg) { +rd_kafka_cgrp_rebalance_protocol(rd_kafka_cgrp_t *rkcg) { if (!rkcg->rkcg_assignor) return RD_KAFKA_REBALANCE_PROTOCOL_NONE; return rkcg->rkcg_assignor->rkas_protocol; @@ -185,7 +182,7 @@ rd_kafka_cgrp_rebalance_protocol (rd_kafka_cgrp_t *rkcg) { * the join-state machine to proceed before the current state * is done. */ -static rd_bool_t rd_kafka_cgrp_awaiting_response (rd_kafka_cgrp_t *rkcg) { +static rd_bool_t rd_kafka_cgrp_awaiting_response(rd_kafka_cgrp_t *rkcg) { return rkcg->rkcg_wait_resp != -1; } @@ -199,8 +196,7 @@ static rd_bool_t rd_kafka_cgrp_awaiting_response (rd_kafka_cgrp_t *rkcg) { * * @locality main thread */ -static void rd_kafka_cgrp_set_wait_resp (rd_kafka_cgrp_t *rkcg, - int16_t ApiKey) { +static void rd_kafka_cgrp_set_wait_resp(rd_kafka_cgrp_t *rkcg, int16_t ApiKey) { rd_assert(rkcg->rkcg_wait_resp == -1); rkcg->rkcg_wait_resp = ApiKey; } @@ -213,15 +209,14 @@ static void rd_kafka_cgrp_set_wait_resp (rd_kafka_cgrp_t *rkcg, * * @locality main thread */ -static void rd_kafka_cgrp_clear_wait_resp (rd_kafka_cgrp_t *rkcg, - int16_t ApiKey) { +static void rd_kafka_cgrp_clear_wait_resp(rd_kafka_cgrp_t *rkcg, + int16_t ApiKey) { rd_assert(rkcg->rkcg_wait_resp == ApiKey); rkcg->rkcg_wait_resp = -1; } - /** * @struct Auxillary glue type used for COOPERATIVE rebalance set operations. */ @@ -230,19 +225,19 @@ typedef struct PartitionMemberInfo_s { rd_bool_t members_match; } PartitionMemberInfo_t; -static PartitionMemberInfo_t *PartitionMemberInfo_new ( - const rd_kafka_group_member_t *member, - rd_bool_t members_match) { +static PartitionMemberInfo_t * +PartitionMemberInfo_new(const rd_kafka_group_member_t *member, + rd_bool_t members_match) { PartitionMemberInfo_t *pmi; - pmi = rd_calloc(1, sizeof(*pmi)); - pmi->member = member; + pmi = rd_calloc(1, sizeof(*pmi)); + pmi->member = member; pmi->members_match = members_match; return pmi; } -static void PartitionMemberInfo_free (void *p) { +static void PartitionMemberInfo_free(void *p) { PartitionMemberInfo_t *pmi = p; rd_free(pmi); } @@ -254,8 +249,8 @@ typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *, /** * @returns true if consumer has joined the group and thus requires a leave. */ -#define RD_KAFKA_CGRP_HAS_JOINED(rkcg) \ - (rkcg->rkcg_member_id != NULL && \ +#define RD_KAFKA_CGRP_HAS_JOINED(rkcg) \ + (rkcg->rkcg_member_id != NULL && \ RD_KAFKAP_STR_LEN((rkcg)->rkcg_member_id) > 0) @@ -263,11 +258,11 @@ typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *, * @returns true if cgrp is waiting for a rebalance_cb to be handled by * the application. */ -#define RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) \ - ((rkcg)->rkcg_join_state == \ - RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL || \ - (rkcg)->rkcg_join_state == \ - RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL) +#define RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) \ + ((rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL || \ + (rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL) /** * @returns true if a rebalance is in progress. @@ -282,46 +277,40 @@ typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *, * 5. An incremental rebalancing is in progress. * 6. A rebalance-induced rejoin is in progress. */ -#define RD_KAFKA_CGRP_REBALANCING(rkcg) \ - ((RD_KAFKA_CGRP_HAS_JOINED(rkcg) && \ - ((rkcg)->rkcg_join_state == \ - RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN || \ - (rkcg)->rkcg_join_state == \ - RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA)) || \ - (rkcg)->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC || \ - (rkcg)->rkcg_join_state == \ - RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE || \ - (rkcg)->rkcg_join_state == \ - RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE || \ - (rkcg)->rkcg_join_state == \ - RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL || \ - (rkcg)->rkcg_join_state == \ - RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL || \ - (rkcg)->rkcg_rebalance_incr_assignment != NULL || \ +#define RD_KAFKA_CGRP_REBALANCING(rkcg) \ + ((RD_KAFKA_CGRP_HAS_JOINED(rkcg) && \ + ((rkcg)->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN || \ + (rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA)) || \ + (rkcg)->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC || \ + (rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE || \ + (rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE || \ + (rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL || \ + (rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL || \ + (rkcg)->rkcg_rebalance_incr_assignment != NULL || \ (rkcg)->rkcg_rebalance_rejoin) const char *rd_kafka_cgrp_state_names[] = { - "init", - "term", - "query-coord", - "wait-coord", - "wait-broker", - "wait-broker-transport", - "up" -}; + "init", "term", "query-coord", + "wait-coord", "wait-broker", "wait-broker-transport", + "up"}; const char *rd_kafka_cgrp_join_state_names[] = { - "init", - "wait-join", - "wait-metadata", - "wait-sync", - "wait-assign-call", - "wait-unassign-call", - "wait-unassign-to-complete", - "wait-incr-unassign-to-complete", - "steady", + "init", + "wait-join", + "wait-metadata", + "wait-sync", + "wait-assign-call", + "wait-unassign-call", + "wait-unassign-to-complete", + "wait-incr-unassign-to-complete", + "steady", }; @@ -330,7 +319,7 @@ const char *rd_kafka_cgrp_join_state_names[] = { * * @returns 1 if the state was changed, else 0. */ -static int rd_kafka_cgrp_set_state (rd_kafka_cgrp_t *rkcg, int state) { +static int rd_kafka_cgrp_set_state(rd_kafka_cgrp_t *rkcg, int state) { if ((int)rkcg->rkcg_state == state) return 0; @@ -341,16 +330,16 @@ static int rd_kafka_cgrp_set_state (rd_kafka_cgrp_t *rkcg, int state) { rd_kafka_cgrp_state_names[rkcg->rkcg_state], rd_kafka_cgrp_state_names[state], rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); - rkcg->rkcg_state = state; + rkcg->rkcg_state = state; rkcg->rkcg_ts_statechange = rd_clock(); - rd_kafka_brokers_broadcast_state_change(rkcg->rkcg_rk); + rd_kafka_brokers_broadcast_state_change(rkcg->rkcg_rk); return 1; } -void rd_kafka_cgrp_set_join_state (rd_kafka_cgrp_t *rkcg, int join_state) { +void rd_kafka_cgrp_set_join_state(rd_kafka_cgrp_t *rkcg, int join_state) { if ((int)rkcg->rkcg_join_state == join_state) return; @@ -365,16 +354,16 @@ void rd_kafka_cgrp_set_join_state (rd_kafka_cgrp_t *rkcg, int join_state) { } -void rd_kafka_cgrp_destroy_final (rd_kafka_cgrp_t *rkcg) { +void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg) { rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_subscription); rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_group_leader.members); rd_kafka_cgrp_set_member_id(rkcg, NULL); if (rkcg->rkcg_group_instance_id) - rd_kafkap_str_destroy(rkcg->rkcg_group_instance_id); + rd_kafkap_str_destroy(rkcg->rkcg_group_instance_id); rd_kafka_q_destroy_owner(rkcg->rkcg_q); rd_kafka_q_destroy_owner(rkcg->rkcg_ops); - rd_kafka_q_destroy_owner(rkcg->rkcg_wait_coord_q); + rd_kafka_q_destroy_owner(rkcg->rkcg_wait_coord_q); rd_kafka_assert(rkcg->rkcg_rk, TAILQ_EMPTY(&rkcg->rkcg_topics)); rd_kafka_assert(rkcg->rkcg_rk, rd_list_empty(&rkcg->rkcg_toppars)); rd_list_destroy(&rkcg->rkcg_toppars); @@ -382,7 +371,7 @@ void rd_kafka_cgrp_destroy_final (rd_kafka_cgrp_t *rkcg) { rd_kafka_topic_partition_list_destroy(rkcg->rkcg_errored_topics); if (rkcg->rkcg_assignor && rkcg->rkcg_assignor->rkas_destroy_state_cb) rkcg->rkcg_assignor->rkas_destroy_state_cb( - rkcg->rkcg_assignor_state); + rkcg->rkcg_assignor_state); rd_free(rkcg); } @@ -397,43 +386,44 @@ void rd_kafka_cgrp_destroy_final (rd_kafka_cgrp_t *rkcg) { * @param reset if true the timeout is updated even if the session has expired. */ static RD_INLINE void -rd_kafka_cgrp_update_session_timeout (rd_kafka_cgrp_t *rkcg, rd_bool_t reset) { +rd_kafka_cgrp_update_session_timeout(rd_kafka_cgrp_t *rkcg, rd_bool_t reset) { if (reset || rkcg->rkcg_ts_session_timeout != 0) - rkcg->rkcg_ts_session_timeout = rd_clock() + - (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms*1000); + rkcg->rkcg_ts_session_timeout = + rd_clock() + + (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000); } -rd_kafka_cgrp_t *rd_kafka_cgrp_new (rd_kafka_t *rk, - const rd_kafkap_str_t *group_id, - const rd_kafkap_str_t *client_id) { +rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk, + const rd_kafkap_str_t *group_id, + const rd_kafkap_str_t *client_id) { rd_kafka_cgrp_t *rkcg; rkcg = rd_calloc(1, sizeof(*rkcg)); - rkcg->rkcg_rk = rk; - rkcg->rkcg_group_id = group_id; - rkcg->rkcg_client_id = client_id; - rkcg->rkcg_coord_id = -1; + rkcg->rkcg_rk = rk; + rkcg->rkcg_group_id = group_id; + rkcg->rkcg_client_id = client_id; + rkcg->rkcg_coord_id = -1; rkcg->rkcg_generation_id = -1; - rkcg->rkcg_wait_resp = -1; + rkcg->rkcg_wait_resp = -1; - rkcg->rkcg_ops = rd_kafka_q_new(rk); - rkcg->rkcg_ops->rkq_serve = rd_kafka_cgrp_op_serve; - rkcg->rkcg_ops->rkq_opaque = rkcg; - rkcg->rkcg_wait_coord_q = rd_kafka_q_new(rk); - rkcg->rkcg_wait_coord_q->rkq_serve = rkcg->rkcg_ops->rkq_serve; + rkcg->rkcg_ops = rd_kafka_q_new(rk); + rkcg->rkcg_ops->rkq_serve = rd_kafka_cgrp_op_serve; + rkcg->rkcg_ops->rkq_opaque = rkcg; + rkcg->rkcg_wait_coord_q = rd_kafka_q_new(rk); + rkcg->rkcg_wait_coord_q->rkq_serve = rkcg->rkcg_ops->rkq_serve; rkcg->rkcg_wait_coord_q->rkq_opaque = rkcg->rkcg_ops->rkq_opaque; - rkcg->rkcg_q = rd_kafka_q_new(rk); + rkcg->rkcg_q = rd_kafka_q_new(rk); rkcg->rkcg_group_instance_id = - rd_kafkap_str_new(rk->rk_conf.group_instance_id, -1); + rd_kafkap_str_new(rk->rk_conf.group_instance_id, -1); TAILQ_INIT(&rkcg->rkcg_topics); rd_list_init(&rkcg->rkcg_toppars, 32, NULL); rd_kafka_cgrp_set_member_id(rkcg, ""); rkcg->rkcg_subscribed_topics = - rd_list_new(0, (void *)rd_kafka_topic_info_destroy); + rd_list_new(0, (void *)rd_kafka_topic_info_destroy); rd_interval_init(&rkcg->rkcg_coord_query_intvl); rd_interval_init(&rkcg->rkcg_heartbeat_intvl); rd_interval_init(&rkcg->rkcg_join_intvl); @@ -453,12 +443,10 @@ rd_kafka_cgrp_t *rd_kafka_cgrp_new (rd_kafka_t *rk, if (rk->rk_conf.enable_auto_commit && rk->rk_conf.auto_commit_interval_ms > 0) - rd_kafka_timer_start(&rk->rk_timers, - &rkcg->rkcg_offset_commit_tmr, - rk->rk_conf. - auto_commit_interval_ms * 1000ll, - rd_kafka_cgrp_offset_commit_tmr_cb, - rkcg); + rd_kafka_timer_start( + &rk->rk_timers, &rkcg->rkcg_offset_commit_tmr, + rk->rk_conf.auto_commit_interval_ms * 1000ll, + rd_kafka_cgrp_offset_commit_tmr_cb, rkcg); return rkcg; } @@ -467,8 +455,8 @@ rd_kafka_cgrp_t *rd_kafka_cgrp_new (rd_kafka_t *rk, /** * @brief Set the group coordinator broker. */ -static void rd_kafka_cgrp_coord_set_broker (rd_kafka_cgrp_t *rkcg, - rd_kafka_broker_t *rkb) { +static void rd_kafka_cgrp_coord_set_broker(rd_kafka_cgrp_t *rkcg, + rd_kafka_broker_t *rkb) { rd_assert(rkcg->rkcg_curr_coord == NULL); @@ -491,7 +479,7 @@ static void rd_kafka_cgrp_coord_set_broker (rd_kafka_cgrp_t *rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT); rd_kafka_broker_persistent_connection_add( - rkcg->rkcg_coord, &rkcg->rkcg_coord->rkb_persistconn.coord); + rkcg->rkcg_coord, &rkcg->rkcg_coord->rkb_persistconn.coord); /* Set the logical coordinator's nodename to the * proper broker's nodename, this will trigger a (re)connect @@ -503,7 +491,7 @@ static void rd_kafka_cgrp_coord_set_broker (rd_kafka_cgrp_t *rkcg, /** * @brief Reset/clear the group coordinator broker. */ -static void rd_kafka_cgrp_coord_clear_broker (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_coord_clear_broker(rd_kafka_cgrp_t *rkcg) { rd_kafka_broker_t *rkb = rkcg->rkcg_curr_coord; rd_assert(rkcg->rkcg_curr_coord); @@ -515,8 +503,7 @@ static void rd_kafka_cgrp_coord_clear_broker (rd_kafka_cgrp_t *rkcg) { rd_assert(rkcg->rkcg_coord); rd_kafka_broker_persistent_connection_del( - rkcg->rkcg_coord, - &rkcg->rkcg_coord->rkb_persistconn.coord); + rkcg->rkcg_coord, &rkcg->rkcg_coord->rkb_persistconn.coord); /* Clear the ephemeral broker's nodename. * This will also trigger a disconnect. */ @@ -534,8 +521,7 @@ static void rd_kafka_cgrp_coord_clear_broker (rd_kafka_cgrp_t *rkcg) { * * @returns 1 if the coordinator, or state, was updated, else 0. */ -static int rd_kafka_cgrp_coord_update (rd_kafka_cgrp_t *rkcg, - int32_t coord_id) { +static int rd_kafka_cgrp_coord_update(rd_kafka_cgrp_t *rkcg, int32_t coord_id) { /* Don't do anything while terminating */ if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM) @@ -544,8 +530,8 @@ static int rd_kafka_cgrp_coord_update (rd_kafka_cgrp_t *rkcg, /* Check if coordinator changed */ if (rkcg->rkcg_coord_id != coord_id) { rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPCOORD", - "Group \"%.*s\" changing coordinator %"PRId32 - " -> %"PRId32, + "Group \"%.*s\" changing coordinator %" PRId32 + " -> %" PRId32, RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), rkcg->rkcg_coord_id, coord_id); @@ -563,8 +549,7 @@ static int rd_kafka_cgrp_coord_update (rd_kafka_cgrp_t *rkcg, * corresponding broker handle. */ if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP) return rd_kafka_cgrp_set_state( - rkcg, - RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT); + rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT); } else if (rkcg->rkcg_coord_id != -1) { rd_kafka_broker_t *rkb; @@ -590,15 +575,14 @@ static int rd_kafka_cgrp_coord_update (rd_kafka_cgrp_t *rkcg, /* Coordinator is known but no corresponding * broker handle. */ return rd_kafka_cgrp_set_state( - rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER); - + rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER); } } else { /* Coordinator still not known, re-query */ if (rkcg->rkcg_state >= RD_KAFKA_CGRP_STATE_WAIT_COORD) return rd_kafka_cgrp_set_state( - rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); + rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); } return 0; /* no change */ @@ -606,24 +590,23 @@ static int rd_kafka_cgrp_coord_update (rd_kafka_cgrp_t *rkcg, - /** * Handle FindCoordinator response */ -static void rd_kafka_cgrp_handle_FindCoordinator (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_cgrp_handle_FindCoordinator(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; + int16_t ErrorCode = 0; int32_t CoordId; rd_kafkap_str_t CoordHost = RD_ZERO_INIT; int32_t CoordPort; - rd_kafka_cgrp_t *rkcg = opaque; + rd_kafka_cgrp_t *rkcg = opaque; struct rd_kafka_metadata_broker mdb = RD_ZERO_INIT; - char *errstr = NULL; + char *errstr = NULL; int actions; if (likely(!(ErrorCode = err))) { @@ -651,13 +634,13 @@ static void rd_kafka_cgrp_handle_FindCoordinator (rd_kafka_t *rk, mdb.id = CoordId; - RD_KAFKAP_STR_DUPA(&mdb.host, &CoordHost); - mdb.port = CoordPort; + RD_KAFKAP_STR_DUPA(&mdb.host, &CoordHost); + mdb.port = CoordPort; rd_rkb_dbg(rkb, CGRP, "CGRPCOORD", - "Group \"%.*s\" coordinator is %s:%i id %"PRId32, - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - mdb.host, mdb.port, mdb.id); + "Group \"%.*s\" coordinator is %s:%i id %" PRId32, + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), mdb.host, mdb.port, + mdb.id); rd_kafka_broker_update(rkb->rkb_rk, rkb->rkb_proto, &mdb, NULL); rd_kafka_cgrp_coord_update(rkcg, CoordId); @@ -681,21 +664,18 @@ static void rd_kafka_cgrp_handle_FindCoordinator (rd_kafka_t *rk, return; actions = rd_kafka_err_action( - rkb, ErrorCode, request, + rkb, ErrorCode, request, - RD_KAFKA_ERR_ACTION_RETRY|RD_KAFKA_ERR_ACTION_REFRESH, - RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH, + RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR__TRANSPORT, + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TRANSPORT, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR__TIMED_OUT, + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TIMED_OUT, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_END); @@ -706,10 +686,9 @@ static void rd_kafka_cgrp_handle_FindCoordinator (rd_kafka_t *rk, rkcg->rkcg_last_err != ErrorCode) { /* Propagate non-retriable errors to the application */ rd_kafka_consumer_err( - rkcg->rkcg_q, rd_kafka_broker_id(rkb), - ErrorCode, 0, NULL, NULL, - RD_KAFKA_OFFSET_INVALID, - "FindCoordinator response error: %s", errstr); + rkcg->rkcg_q, rd_kafka_broker_id(rkb), ErrorCode, 0, + NULL, NULL, RD_KAFKA_OFFSET_INVALID, + "FindCoordinator response error: %s", errstr); /* Suppress repeated errors */ rkcg->rkcg_last_err = ErrorCode; @@ -717,8 +696,7 @@ static void rd_kafka_cgrp_handle_FindCoordinator (rd_kafka_t *rk, /* Retries are performed by the timer-intervalled * coord queries, continue querying */ - rd_kafka_cgrp_set_state( - rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); + rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); } rd_kafka_cgrp_serve(rkcg); /* Serve updated state, if possible */ @@ -731,36 +709,33 @@ static void rd_kafka_cgrp_handle_FindCoordinator (rd_kafka_t *rk, * * Locality: main thread */ -void rd_kafka_cgrp_coord_query (rd_kafka_cgrp_t *rkcg, - const char *reason) { - rd_kafka_broker_t *rkb; +void rd_kafka_cgrp_coord_query(rd_kafka_cgrp_t *rkcg, const char *reason) { + rd_kafka_broker_t *rkb; rd_kafka_resp_err_t err; - rkb = rd_kafka_broker_any_usable(rkcg->rkcg_rk, - RD_POLL_NOWAIT, - RD_DO_LOCK, - RD_KAFKA_FEATURE_BROKER_GROUP_COORD, - "coordinator query"); - - if (!rkb) { - /* Reset the interval because there were no brokers. When a - * broker becomes available, we want to query it immediately. */ - rd_interval_reset(&rkcg->rkcg_coord_query_intvl); - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPQUERY", - "Group \"%.*s\": " - "no broker available for coordinator query: %s", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason); - return; - } + rkb = rd_kafka_broker_any_usable( + rkcg->rkcg_rk, RD_POLL_NOWAIT, RD_DO_LOCK, + RD_KAFKA_FEATURE_BROKER_GROUP_COORD, "coordinator query"); + + if (!rkb) { + /* Reset the interval because there were no brokers. When a + * broker becomes available, we want to query it immediately. */ + rd_interval_reset(&rkcg->rkcg_coord_query_intvl); + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPQUERY", + "Group \"%.*s\": " + "no broker available for coordinator query: %s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason); + return; + } rd_rkb_dbg(rkb, CGRP, "CGRPQUERY", "Group \"%.*s\": querying for coordinator: %s", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason); err = rd_kafka_FindCoordinatorRequest( - rkb, RD_KAFKA_COORD_GROUP, rkcg->rkcg_group_id->str, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_cgrp_handle_FindCoordinator, rkcg); + rkb, RD_KAFKA_COORD_GROUP, rkcg->rkcg_group_id->str, + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_FindCoordinator, rkcg); if (err) { rd_rkb_dbg(rkb, CGRP, "CGRPQUERY", @@ -775,7 +750,7 @@ void rd_kafka_cgrp_coord_query (rd_kafka_cgrp_t *rkcg, if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_QUERY_COORD) rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_WAIT_COORD); - rd_kafka_broker_destroy(rkb); + rd_kafka_broker_destroy(rkb); /* Back off the next intervalled query since we just sent one. */ rd_interval_reset_to_now(&rkcg->rkcg_coord_query_intvl, 0); @@ -786,19 +761,20 @@ void rd_kafka_cgrp_coord_query (rd_kafka_cgrp_t *rkcg, * * @locality main thread */ -void rd_kafka_cgrp_coord_dead (rd_kafka_cgrp_t *rkcg, rd_kafka_resp_err_t err, - const char *reason) { +void rd_kafka_cgrp_coord_dead(rd_kafka_cgrp_t *rkcg, + rd_kafka_resp_err_t err, + const char *reason) { rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COORD", "Group \"%.*s\": " - "marking the coordinator (%"PRId32") dead: %s: %s", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rkcg->rkcg_coord_id, rd_kafka_err2str(err), reason); + "marking the coordinator (%" PRId32 ") dead: %s: %s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), rkcg->rkcg_coord_id, + rd_kafka_err2str(err), reason); - rd_kafka_cgrp_coord_update(rkcg, -1); + rd_kafka_cgrp_coord_update(rkcg, -1); - /* Re-query for coordinator */ - rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); - rd_kafka_cgrp_coord_query(rkcg, reason); + /* Re-query for coordinator */ + rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); + rd_kafka_cgrp_coord_query(rkcg, reason); } @@ -809,7 +785,7 @@ void rd_kafka_cgrp_coord_dead (rd_kafka_cgrp_t *rkcg, rd_kafka_resp_err_t err, * @locks_required none * @locks_acquired none */ -rd_kafka_broker_t *rd_kafka_cgrp_get_coord (rd_kafka_cgrp_t *rkcg) { +rd_kafka_broker_t *rd_kafka_cgrp_get_coord(rd_kafka_cgrp_t *rkcg) { if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || !rkcg->rkcg_coord) return NULL; @@ -824,15 +800,15 @@ rd_kafka_broker_t *rd_kafka_cgrp_get_coord (rd_kafka_cgrp_t *rkcg) { * @param opaque must be the cgrp handle. * @locality rdkafka main thread (unless err==ERR__DESTROY) */ -static void rd_kafka_cgrp_handle_LeaveGroup (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_cgrp_t *rkcg = opaque; +static void rd_kafka_cgrp_handle_LeaveGroup(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = opaque; const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; + int16_t ErrorCode = 0; if (err) { ErrorCode = err; @@ -865,13 +841,13 @@ static void rd_kafka_cgrp_handle_LeaveGroup (rd_kafka_t *rk, return; - err_parse: +err_parse: ErrorCode = rkbuf->rkbuf_err; goto err; } -static void rd_kafka_cgrp_leave (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_leave(rd_kafka_cgrp_t *rkcg) { char *member_id; RD_KAFKAP_STR_DUPA(&member_id, rkcg->rkcg_member_id); @@ -899,15 +875,12 @@ static void rd_kafka_cgrp_leave (rd_kafka_cgrp_t *rkcg) { if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_UP) { rd_rkb_dbg(rkcg->rkcg_curr_coord, CONSUMER, "LEAVE", "Leaving group"); - rd_kafka_LeaveGroupRequest(rkcg->rkcg_coord, - rkcg->rkcg_group_id->str, - member_id, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_cgrp_handle_LeaveGroup, - rkcg); + rd_kafka_LeaveGroupRequest( + rkcg->rkcg_coord, rkcg->rkcg_group_id->str, member_id, + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_LeaveGroup, rkcg); } else - rd_kafka_cgrp_handle_LeaveGroup(rkcg->rkcg_rk, - rkcg->rkcg_coord, + rd_kafka_cgrp_handle_LeaveGroup(rkcg->rkcg_rk, rkcg->rkcg_coord, RD_KAFKA_RESP_ERR__WAIT_COORD, NULL, NULL, rkcg); } @@ -918,7 +891,7 @@ static void rd_kafka_cgrp_leave (rd_kafka_cgrp_t *rkcg) { * * @returns true if a LeaveGroup was issued, else false. */ -static rd_bool_t rd_kafka_cgrp_leave_maybe (rd_kafka_cgrp_t *rkcg) { +static rd_bool_t rd_kafka_cgrp_leave_maybe(rd_kafka_cgrp_t *rkcg) { /* We were not instructed to leave in the first place. */ if (!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE)) @@ -955,12 +928,11 @@ static rd_bool_t rd_kafka_cgrp_leave_maybe (rd_kafka_cgrp_t *rkcg) { * * @remarks does not take ownership of \p partitions. */ -void -rd_kafka_rebalance_op_incr (rd_kafka_cgrp_t *rkcg, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - rd_bool_t rejoin, - const char *reason) { +void rd_kafka_rebalance_op_incr(rd_kafka_cgrp_t *rkcg, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + rd_bool_t rejoin, + const char *reason) { rd_kafka_error_t *error; /* Flag to rejoin after completion of the incr_assign or incr_unassign, @@ -983,10 +955,9 @@ rd_kafka_rebalance_op_incr (rd_kafka_cgrp_t *rkcg, } rd_kafka_cgrp_set_join_state( - rkcg, - err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS ? - RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL : - RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL); + rkcg, err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS + ? RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL + : RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL); /* Schedule application rebalance callback/event if enabled */ if (rkcg->rkcg_rk->rk_conf.enabled_events & RD_KAFKA_EVENT_REBALANCE) { @@ -996,8 +967,10 @@ rd_kafka_rebalance_op_incr (rd_kafka_cgrp_t *rkcg, "Group \"%s\": delegating incremental %s of %d " "partition(s) to application on queue %s: %s", rkcg->rkcg_group_id->str, - err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS ? - "revoke" : "assign", partitions->cnt, + err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS + ? "revoke" + : "assign", + partitions->cnt, rd_kafka_q_dest_name(rkcg->rkcg_q), reason); /* Pause currently assigned partitions while waiting for @@ -1009,10 +982,10 @@ rd_kafka_rebalance_op_incr (rd_kafka_cgrp_t *rkcg, rd_kafka_assignment_pause(rkcg->rkcg_rk, "incremental rebalance"); - rko = rd_kafka_op_new(RD_KAFKA_OP_REBALANCE); + rko = rd_kafka_op_new(RD_KAFKA_OP_REBALANCE); rko->rko_err = err; rko->rko_u.rebalance.partitions = - rd_kafka_topic_partition_list_copy(partitions); + rd_kafka_topic_partition_list_copy(partitions); if (rd_kafka_q_enq(rkcg->rkcg_q, rko)) goto done; /* Rebalance op successfully enqueued */ @@ -1021,8 +994,9 @@ rd_kafka_rebalance_op_incr (rd_kafka_cgrp_t *rkcg, "Group \"%s\": ops queue is disabled, not " "delegating partition %s to application", rkcg->rkcg_group_id->str, - err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS ? - "unassign" : "assign"); + err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS + ? "unassign" + : "assign"); /* FALLTHRU */ } @@ -1043,10 +1017,10 @@ rd_kafka_rebalance_op_incr (rd_kafka_cgrp_t *rkcg, "of %d partition(s) failed: %s: " "unassigning all partitions and rejoining", rkcg->rkcg_group_id->str, - err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS ? - "unassign" : "assign", - partitions->cnt, - rd_kafka_error_string(error)); + err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS + ? "unassign" + : "assign", + partitions->cnt, rd_kafka_error_string(error)); rd_kafka_error_destroy(error); rd_kafka_cgrp_set_join_state(rkcg, @@ -1060,13 +1034,11 @@ rd_kafka_rebalance_op_incr (rd_kafka_cgrp_t *rkcg, /* Now serve the assignment to make updates */ rd_kafka_assignment_serve(rkcg->rkcg_rk); - done: +done: /* Update the current group assignment based on the * added/removed partitions. */ rd_kafka_cgrp_group_assignment_modify( - rkcg, - err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - partitions); + rkcg, err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, partitions); } @@ -1079,11 +1051,10 @@ rd_kafka_rebalance_op_incr (rd_kafka_cgrp_t *rkcg, * * @remarks \p partitions is copied. */ -static void -rd_kafka_rebalance_op (rd_kafka_cgrp_t *rkcg, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *assignment, - const char *reason) { +static void rd_kafka_rebalance_op(rd_kafka_cgrp_t *rkcg, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *assignment, + const char *reason) { rd_kafka_error_t *error; rd_kafka_wrlock(rkcg->rkcg_rk); @@ -1104,10 +1075,9 @@ rd_kafka_rebalance_op (rd_kafka_cgrp_t *rkcg, rd_assert(assignment != NULL); rd_kafka_cgrp_set_join_state( - rkcg, - err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS ? - RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL : - RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL); + rkcg, err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS + ? RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL + : RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL); /* Schedule application rebalance callback/event if enabled */ if (rkcg->rkcg_rk->rk_conf.enabled_events & RD_KAFKA_EVENT_REBALANCE) { @@ -1117,8 +1087,10 @@ rd_kafka_rebalance_op (rd_kafka_cgrp_t *rkcg, "Group \"%s\": delegating %s of %d partition(s) " "to application on queue %s: %s", rkcg->rkcg_group_id->str, - err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS ? - "revoke":"assign", assignment->cnt, + err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS + ? "revoke" + : "assign", + assignment->cnt, rd_kafka_q_dest_name(rkcg->rkcg_q), reason); /* Pause currently assigned partitions while waiting for @@ -1129,10 +1101,10 @@ rd_kafka_rebalance_op (rd_kafka_cgrp_t *rkcg, * might have lost in the rebalance. */ rd_kafka_assignment_pause(rkcg->rkcg_rk, "rebalance"); - rko = rd_kafka_op_new(RD_KAFKA_OP_REBALANCE); + rko = rd_kafka_op_new(RD_KAFKA_OP_REBALANCE); rko->rko_err = err; rko->rko_u.rebalance.partitions = - rd_kafka_topic_partition_list_copy(assignment); + rd_kafka_topic_partition_list_copy(assignment); if (rd_kafka_q_enq(rkcg->rkcg_q, rko)) goto done; /* Rebalance op successfully enqueued */ @@ -1141,8 +1113,9 @@ rd_kafka_rebalance_op (rd_kafka_cgrp_t *rkcg, "Group \"%s\": ops queue is disabled, not " "delegating partition %s to application", rkcg->rkcg_group_id->str, - err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS ? - "unassign" : "assign"); + err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS + ? "unassign" + : "assign"); /* FALLTHRU */ } @@ -1164,8 +1137,9 @@ rd_kafka_rebalance_op (rd_kafka_cgrp_t *rkcg, "of %d partition(s) failed: %s: " "unassigning all partitions and rejoining", rkcg->rkcg_group_id->str, - err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS ? - "unassign" : "assign", + err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS + ? "unassign" + : "assign", rkcg->rkcg_group_assignment->cnt, rd_kafka_error_string(error)); rd_kafka_error_destroy(error); @@ -1181,7 +1155,7 @@ rd_kafka_rebalance_op (rd_kafka_cgrp_t *rkcg, /* Now serve the assignment to make updates */ rd_kafka_assignment_serve(rkcg->rkcg_rk); - done: +done: if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) rd_kafka_cgrp_group_assignment_set(rkcg, assignment); else @@ -1195,10 +1169,10 @@ rd_kafka_rebalance_op (rd_kafka_cgrp_t *rkcg, * @remark This function must not have any side-effects but setting the * join state. */ -static void rd_kafka_cgrp_rejoin (rd_kafka_cgrp_t *rkcg, const char *fmt, ...) - RD_FORMAT(printf, 2, 3); +static void rd_kafka_cgrp_rejoin(rd_kafka_cgrp_t *rkcg, const char *fmt, ...) + RD_FORMAT(printf, 2, 3); -static void rd_kafka_cgrp_rejoin (rd_kafka_cgrp_t *rkcg, const char *fmt, ...) { +static void rd_kafka_cgrp_rejoin(rd_kafka_cgrp_t *rkcg, const char *fmt, ...) { char reason[512]; va_list ap; char astr[128]; @@ -1214,24 +1188,23 @@ static void rd_kafka_cgrp_rejoin (rd_kafka_cgrp_t *rkcg, const char *fmt, ...) { rd_snprintf(astr, sizeof(astr), " without an assignment"); if (rkcg->rkcg_subscription || rkcg->rkcg_next_subscription) { - rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER|RD_KAFKA_DBG_CGRP, - "REJOIN", - "Group \"%s\": %s group%s: %s", - rkcg->rkcg_group_id->str, - rkcg->rkcg_join_state == - RD_KAFKA_CGRP_JOIN_STATE_INIT ? - "Joining" : "Rejoining", - astr, reason); + rd_kafka_dbg( + rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REJOIN", + "Group \"%s\": %s group%s: %s", rkcg->rkcg_group_id->str, + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT + ? "Joining" + : "Rejoining", + astr, reason); } else { - rd_kafka_dbg(rkcg->rkcg_rk,CONSUMER|RD_KAFKA_DBG_CGRP, - "NOREJOIN", - "Group \"%s\": Not %s group%s: %s: " - "no subscribed topics", - rkcg->rkcg_group_id->str, - rkcg->rkcg_join_state == - RD_KAFKA_CGRP_JOIN_STATE_INIT ? - "joining" : "rejoining", - astr, reason); + rd_kafka_dbg( + rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "NOREJOIN", + "Group \"%s\": Not %s group%s: %s: " + "no subscribed topics", + rkcg->rkcg_group_id->str, + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT + ? "joining" + : "rejoining", + astr, reason); rd_kafka_cgrp_leave_maybe(rkcg); } @@ -1252,34 +1225,29 @@ static void rd_kafka_cgrp_rejoin (rd_kafka_cgrp_t *rkcg, const char *fmt, ...) { * else rkgm_assignment partitions will be collected. */ static map_toppar_member_info_t * -rd_kafka_collect_partitions (const rd_kafka_group_member_t *members, - size_t member_cnt, - size_t par_cnt, - rd_bool_t collect_owned) { +rd_kafka_collect_partitions(const rd_kafka_group_member_t *members, + size_t member_cnt, + size_t par_cnt, + rd_bool_t collect_owned) { size_t i; map_toppar_member_info_t *collected = rd_calloc(1, sizeof(*collected)); - RD_MAP_INIT( - collected, - par_cnt, - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - rd_kafka_topic_partition_destroy_free, - PartitionMemberInfo_free); - - for (i = 0 ; irkgm_owned - : rkgm->rkgm_assignment; + const rd_kafka_topic_partition_list_t *toppars = + collect_owned ? rkgm->rkgm_owned : rkgm->rkgm_assignment; - for (j = 0; j<(size_t)toppars->cnt; j++) { + for (j = 0; j < (size_t)toppars->cnt; j++) { rd_kafka_topic_partition_t *rktpar = - rd_kafka_topic_partition_copy( - &toppars->elems[j]); + rd_kafka_topic_partition_copy(&toppars->elems[j]); PartitionMemberInfo_t *pmi = - PartitionMemberInfo_new(rkgm, rd_false); + PartitionMemberInfo_new(rkgm, rd_false); RD_MAP_SET(collected, rktpar, pmi); } } @@ -1298,21 +1266,17 @@ rd_kafka_collect_partitions (const rd_kafka_group_member_t *members, * to NULL. */ static map_toppar_member_info_t * -rd_kafka_member_partitions_intersect ( - map_toppar_member_info_t *a, - map_toppar_member_info_t *b) { +rd_kafka_member_partitions_intersect(map_toppar_member_info_t *a, + map_toppar_member_info_t *b) { const rd_kafka_topic_partition_t *key; const PartitionMemberInfo_t *a_v; map_toppar_member_info_t *intersection = - rd_calloc(1, sizeof(*intersection)); + rd_calloc(1, sizeof(*intersection)); RD_MAP_INIT( - intersection, - RD_MIN(a ? RD_MAP_CNT(a) : 1, b ? RD_MAP_CNT(b) : 1), - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - rd_kafka_topic_partition_destroy_free, - PartitionMemberInfo_free); + intersection, RD_MIN(a ? RD_MAP_CNT(a) : 1, b ? RD_MAP_CNT(b) : 1), + rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free); if (!a || !b) return intersection; @@ -1325,16 +1289,11 @@ rd_kafka_member_partitions_intersect ( continue; members_match = - a_v->member && - b_v->member && - rd_kafka_group_member_cmp(a_v->member, - b_v->member) == 0; - - RD_MAP_SET(intersection, - rd_kafka_topic_partition_copy(key), - PartitionMemberInfo_new( - b_v->member, - members_match)); + a_v->member && b_v->member && + rd_kafka_group_member_cmp(a_v->member, b_v->member) == 0; + + RD_MAP_SET(intersection, rd_kafka_topic_partition_copy(key), + PartitionMemberInfo_new(b_v->member, members_match)); } return intersection; @@ -1348,34 +1307,29 @@ rd_kafka_member_partitions_intersect ( * corresponding element in \p a */ static map_toppar_member_info_t * -rd_kafka_member_partitions_subtract ( - map_toppar_member_info_t *a, - map_toppar_member_info_t *b) { +rd_kafka_member_partitions_subtract(map_toppar_member_info_t *a, + map_toppar_member_info_t *b) { const rd_kafka_topic_partition_t *key; const PartitionMemberInfo_t *a_v; map_toppar_member_info_t *difference = - rd_calloc(1, sizeof(*difference)); + rd_calloc(1, sizeof(*difference)); - RD_MAP_INIT( - difference, - a ? RD_MAP_CNT(a) : 1, - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - rd_kafka_topic_partition_destroy_free, - PartitionMemberInfo_free); + RD_MAP_INIT(difference, a ? RD_MAP_CNT(a) : 1, + rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, + PartitionMemberInfo_free); if (!a) return difference; RD_MAP_FOREACH(key, a_v, a) { - const PartitionMemberInfo_t *b_v = b ? RD_MAP_GET(b, key) - : NULL; + const PartitionMemberInfo_t *b_v = + b ? RD_MAP_GET(b, key) : NULL; if (!b_v) - RD_MAP_SET(difference, - rd_kafka_topic_partition_copy(key), - PartitionMemberInfo_new(a_v->member, - rd_false)); + RD_MAP_SET( + difference, rd_kafka_topic_partition_copy(key), + PartitionMemberInfo_new(a_v->member, rd_false)); } return difference; @@ -1386,10 +1340,10 @@ rd_kafka_member_partitions_subtract ( * @brief Adjust the partition assignment as provided by the assignor * according to the COOPERATIVE protocol. */ -static void rd_kafka_cooperative_protocol_adjust_assignment ( - rd_kafka_cgrp_t *rkcg, - rd_kafka_group_member_t *members, - int member_cnt) { +static void rd_kafka_cooperative_protocol_adjust_assignment( + rd_kafka_cgrp_t *rkcg, + rd_kafka_group_member_t *members, + int member_cnt) { /* https://cwiki.apache.org/confluence/display/KAFKA/KIP-429%3A+Kafk\ a+Consumer+Incremental+Rebalance+Protocol */ @@ -1397,8 +1351,8 @@ static void rd_kafka_cooperative_protocol_adjust_assignment ( int i; int expected_max_assignment_size; int total_assigned = 0; - int not_revoking = 0; - size_t par_cnt = 0; + int not_revoking = 0; + size_t par_cnt = 0; const rd_kafka_topic_partition_t *toppar; const PartitionMemberInfo_t *pmi; map_toppar_member_info_t *assigned; @@ -1407,54 +1361,43 @@ static void rd_kafka_cooperative_protocol_adjust_assignment ( map_toppar_member_info_t *ready_to_migrate; map_toppar_member_info_t *unknown_but_owned; - for (i = 0 ; icnt; - assigned = rd_kafka_collect_partitions(members, - member_cnt, - par_cnt, - rd_false/*assigned*/); + assigned = rd_kafka_collect_partitions(members, member_cnt, par_cnt, + rd_false /*assigned*/); - owned = rd_kafka_collect_partitions(members, - member_cnt, - par_cnt, - rd_true/*owned*/); + owned = rd_kafka_collect_partitions(members, member_cnt, par_cnt, + rd_true /*owned*/); rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP", "Group \"%s\": Partitions owned by members: %d, " "partitions assigned by assignor: %d", - rkcg->rkcg_group_id->str, - (int)RD_MAP_CNT(owned), (int)RD_MAP_CNT(assigned)); + rkcg->rkcg_group_id->str, (int)RD_MAP_CNT(owned), + (int)RD_MAP_CNT(assigned)); /* Still owned by some members */ - maybe_revoking = - rd_kafka_member_partitions_intersect(assigned, - owned); + maybe_revoking = rd_kafka_member_partitions_intersect(assigned, owned); /* Not previously owned by anyone */ - ready_to_migrate = - rd_kafka_member_partitions_subtract(assigned, - owned); + ready_to_migrate = rd_kafka_member_partitions_subtract(assigned, owned); /* Don't exist in assigned partitions */ unknown_but_owned = - rd_kafka_member_partitions_subtract(owned, - assigned); + rd_kafka_member_partitions_subtract(owned, assigned); /* Rough guess at a size that is a bit higher than * the maximum number of partitions likely to be * assigned to any partition. */ expected_max_assignment_size = - (int)(RD_MAP_CNT(assigned) / member_cnt) + 4; + (int)(RD_MAP_CNT(assigned) / member_cnt) + 4; - for (i = 0 ; i < member_cnt ; i++) { + for (i = 0; i < member_cnt; i++) { rd_kafka_group_member_t *rkgm = &members[i]; - rd_kafka_topic_partition_list_destroy( - rkgm->rkgm_assignment); + rd_kafka_topic_partition_list_destroy(rkgm->rkgm_assignment); - rkgm->rkgm_assignment = - rd_kafka_topic_partition_list_new( - expected_max_assignment_size); + rkgm->rkgm_assignment = rd_kafka_topic_partition_list_new( + expected_max_assignment_size); } /* For maybe-revoking-partitions, check if the owner has @@ -1469,10 +1412,9 @@ static void rd_kafka_cooperative_protocol_adjust_assignment ( continue; /* Owner hasn't changed. */ - rd_kafka_topic_partition_list_add( - pmi->member->rkgm_assignment, - toppar->topic, - toppar->partition); + rd_kafka_topic_partition_list_add(pmi->member->rkgm_assignment, + toppar->topic, + toppar->partition); total_assigned++; not_revoking++; @@ -1484,10 +1426,9 @@ static void rd_kafka_cooperative_protocol_adjust_assignment ( * newly-assigned-partitions directly. */ RD_MAP_FOREACH(toppar, pmi, ready_to_migrate) { - rd_kafka_topic_partition_list_add( - pmi->member->rkgm_assignment, - toppar->topic, - toppar->partition); + rd_kafka_topic_partition_list_add(pmi->member->rkgm_assignment, + toppar->topic, + toppar->partition); total_assigned++; } @@ -1498,24 +1439,23 @@ static void rd_kafka_cooperative_protocol_adjust_assignment ( * anyway. */ RD_MAP_FOREACH(toppar, pmi, unknown_but_owned) { - rd_kafka_topic_partition_list_add( - pmi->member->rkgm_assignment, - toppar->topic, - toppar->partition); + rd_kafka_topic_partition_list_add(pmi->member->rkgm_assignment, + toppar->topic, + toppar->partition); total_assigned++; } rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP", - "Group \"%s\": COOPERATIVE protocol collection sizes: " - "maybe revoking: %d, ready to migrate: %d, unknown but " - "owned: %d", rkcg->rkcg_group_id->str, - (int)RD_MAP_CNT(maybe_revoking), - (int)RD_MAP_CNT(ready_to_migrate), - (int)RD_MAP_CNT(unknown_but_owned)); + "Group \"%s\": COOPERATIVE protocol collection sizes: " + "maybe revoking: %d, ready to migrate: %d, unknown but " + "owned: %d", + rkcg->rkcg_group_id->str, (int)RD_MAP_CNT(maybe_revoking), + (int)RD_MAP_CNT(ready_to_migrate), + (int)RD_MAP_CNT(unknown_but_owned)); rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP", - "Group \"%s\": %d partitions assigned to consumers", - rkcg->rkcg_group_id->str, total_assigned); + "Group \"%s\": %d partitions assigned to consumers", + rkcg->rkcg_group_id->str, total_assigned); RD_MAP_DESTROY_AND_FREE(maybe_revoking); RD_MAP_DESTROY_AND_FREE(ready_to_migrate); @@ -1528,73 +1468,68 @@ static void rd_kafka_cooperative_protocol_adjust_assignment ( /** * @brief Parses and handles the MemberState from a SyncGroupResponse. */ -static void -rd_kafka_cgrp_handle_SyncGroup_memberstate (rd_kafka_cgrp_t *rkcg, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - const rd_kafkap_bytes_t - *member_state) { - rd_kafka_buf_t *rkbuf = NULL; +static void rd_kafka_cgrp_handle_SyncGroup_memberstate( + rd_kafka_cgrp_t *rkcg, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + const rd_kafkap_bytes_t *member_state) { + rd_kafka_buf_t *rkbuf = NULL; rd_kafka_topic_partition_list_t *assignment = NULL; - const int log_decode_errors = LOG_ERR; + const int log_decode_errors = LOG_ERR; int16_t Version; rd_kafkap_bytes_t UserData; - /* Dont handle new assignments when terminating */ - if (!err && rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) - err = RD_KAFKA_RESP_ERR__DESTROY; + /* Dont handle new assignments when terminating */ + if (!err && rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) + err = RD_KAFKA_RESP_ERR__DESTROY; if (err) goto err; - if (RD_KAFKAP_BYTES_LEN(member_state) == 0) { - /* Empty assignment. */ - assignment = rd_kafka_topic_partition_list_new(0); - memset(&UserData, 0, sizeof(UserData)); - goto done; - } + if (RD_KAFKAP_BYTES_LEN(member_state) == 0) { + /* Empty assignment. */ + assignment = rd_kafka_topic_partition_list_new(0); + memset(&UserData, 0, sizeof(UserData)); + goto done; + } /* Parse assignment from MemberState */ - rkbuf = rd_kafka_buf_new_shadow(member_state->data, - RD_KAFKAP_BYTES_LEN(member_state), - NULL); - /* Protocol parser needs a broker handle to log errors on. */ - if (rkb) { - rkbuf->rkbuf_rkb = rkb; - rd_kafka_broker_keep(rkb); - } else - rkbuf->rkbuf_rkb = rd_kafka_broker_internal(rkcg->rkcg_rk); + rkbuf = rd_kafka_buf_new_shadow( + member_state->data, RD_KAFKAP_BYTES_LEN(member_state), NULL); + /* Protocol parser needs a broker handle to log errors on. */ + if (rkb) { + rkbuf->rkbuf_rkb = rkb; + rd_kafka_broker_keep(rkb); + } else + rkbuf->rkbuf_rkb = rd_kafka_broker_internal(rkcg->rkcg_rk); rd_kafka_buf_read_i16(rkbuf, &Version); - if (!(assignment = rd_kafka_buf_read_topic_partitions(rkbuf, 0, - rd_false, - rd_false))) + if (!(assignment = rd_kafka_buf_read_topic_partitions( + rkbuf, 0, rd_false, rd_false))) goto err_parse; rd_kafka_buf_read_bytes(rkbuf, &UserData); - done: - rd_kafka_cgrp_update_session_timeout(rkcg, rd_true/*reset timeout*/); +done: + rd_kafka_cgrp_update_session_timeout(rkcg, rd_true /*reset timeout*/); rd_assert(rkcg->rkcg_assignor); if (rkcg->rkcg_assignor->rkas_on_assignment_cb) { char *member_id; RD_KAFKAP_STR_DUPA(&member_id, rkcg->rkcg_member_id); rd_kafka_consumer_group_metadata_t *cgmd = - rd_kafka_consumer_group_metadata_new_with_genid( - rkcg->rkcg_rk->rk_conf.group_id_str, - rkcg->rkcg_generation_id, member_id, - rkcg->rkcg_rk->rk_conf.group_instance_id); + rd_kafka_consumer_group_metadata_new_with_genid( + rkcg->rkcg_rk->rk_conf.group_id_str, + rkcg->rkcg_generation_id, member_id, + rkcg->rkcg_rk->rk_conf.group_instance_id); rkcg->rkcg_assignor->rkas_on_assignment_cb( - rkcg->rkcg_assignor, - &(rkcg->rkcg_assignor_state), - assignment, &UserData, cgmd); + rkcg->rkcg_assignor, &(rkcg->rkcg_assignor_state), + assignment, &UserData, cgmd); rd_kafka_consumer_group_metadata_destroy(cgmd); } // FIXME: Remove when we're done debugging. rd_kafka_topic_partition_list_log(rkcg->rkcg_rk, "ASSIGNMENT", - RD_KAFKA_DBG_CGRP, - assignment); + RD_KAFKA_DBG_CGRP, assignment); /* Set the new assignment */ rd_kafka_cgrp_handle_assignment(rkcg, assignment); @@ -1606,10 +1541,10 @@ rd_kafka_cgrp_handle_SyncGroup_memberstate (rd_kafka_cgrp_t *rkcg, return; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: +err: if (rkbuf) rd_kafka_buf_destroy(rkbuf); @@ -1630,14 +1565,12 @@ rd_kafka_cgrp_handle_SyncGroup_memberstate (rd_kafka_cgrp_t *rkcg, rd_kafka_cgrp_set_member_id(rkcg, ""); if (rd_kafka_cgrp_rebalance_protocol(rkcg) == - RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && + RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && (err == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION || err == RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)) rd_kafka_cgrp_revoke_all_rejoin( - rkcg, - rd_true/*assignment is lost*/, - rd_true/*this consumer is initiating*/, - "SyncGroup error"); + rkcg, rd_true /*assignment is lost*/, + rd_true /*this consumer is initiating*/, "SyncGroup error"); else rd_kafka_cgrp_rejoin(rkcg, "SyncGroup error: %s", rd_kafka_err2str(err)); @@ -1648,24 +1581,24 @@ rd_kafka_cgrp_handle_SyncGroup_memberstate (rd_kafka_cgrp_t *rkcg, /** * @brief Cgrp handler for SyncGroup responses. opaque must be the cgrp handle. */ -static void rd_kafka_cgrp_handle_SyncGroup (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_cgrp_t *rkcg = opaque; - const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; +static void rd_kafka_cgrp_handle_SyncGroup(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = opaque; + const int log_decode_errors = LOG_ERR; + int16_t ErrorCode = 0; rd_kafkap_bytes_t MemberState = RD_ZERO_INIT; int actions; if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) { - rd_kafka_dbg(rkb->rkb_rk, CGRP, "SYNCGROUP", - "SyncGroup response: discarding outdated request " - "(now in join-state %s)", - rd_kafka_cgrp_join_state_names[rkcg-> - rkcg_join_state]); + rd_kafka_dbg( + rkb->rkb_rk, CGRP, "SYNCGROUP", + "SyncGroup response: discarding outdated request " + "(now in join-state %s)", + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); rd_kafka_cgrp_clear_wait_resp(rkcg, RD_KAFKAP_SyncGroup); return; } @@ -1688,8 +1621,7 @@ static void rd_kafka_cgrp_handle_SyncGroup (rd_kafka_t *rk, if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { /* Re-query for coordinator */ rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ, - RD_KAFKA_OP_COORD_QUERY, - ErrorCode); + RD_KAFKA_OP_COORD_QUERY, ErrorCode); /* FALLTHRU */ } @@ -1714,7 +1646,7 @@ static void rd_kafka_cgrp_handle_SyncGroup (rd_kafka_t *rk, return; - err_parse: +err_parse: ErrorCode = rkbuf->rkbuf_err; goto err; } @@ -1723,13 +1655,12 @@ static void rd_kafka_cgrp_handle_SyncGroup (rd_kafka_t *rk, /** * @brief Run group assignment. */ -static void -rd_kafka_cgrp_assignor_run (rd_kafka_cgrp_t *rkcg, - rd_kafka_assignor_t *rkas, - rd_kafka_resp_err_t err, - rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - int member_cnt) { +static void rd_kafka_cgrp_assignor_run(rd_kafka_cgrp_t *rkcg, + rd_kafka_assignor_t *rkas, + rd_kafka_resp_err_t err, + rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + int member_cnt) { char errstr[512]; if (err) { @@ -1742,8 +1673,7 @@ rd_kafka_cgrp_assignor_run (rd_kafka_cgrp_t *rkcg, *errstr = '\0'; /* Run assignor */ - err = rd_kafka_assignor_run(rkcg, rkas, metadata, - members, member_cnt, + err = rd_kafka_assignor_run(rkcg, rkas, metadata, members, member_cnt, errstr, sizeof(errstr)); if (err) { @@ -1753,15 +1683,13 @@ rd_kafka_cgrp_assignor_run (rd_kafka_cgrp_t *rkcg, goto err; } - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, "ASSIGNOR", + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "ASSIGNOR", "Group \"%s\": \"%s\" assignor run for %d member(s)", - rkcg->rkcg_group_id->str, - rkas->rkas_protocol_name->str, + rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str, member_cnt); if (rkas->rkas_protocol == RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE) - rd_kafka_cooperative_protocol_adjust_assignment(rkcg, - members, + rd_kafka_cooperative_protocol_adjust_assignment(rkcg, members, member_cnt); rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC); @@ -1769,22 +1697,18 @@ rd_kafka_cgrp_assignor_run (rd_kafka_cgrp_t *rkcg, rd_kafka_cgrp_set_wait_resp(rkcg, RD_KAFKAP_SyncGroup); /* Respond to broker with assignment set or error */ - rd_kafka_SyncGroupRequest(rkcg->rkcg_coord, - rkcg->rkcg_group_id, - rkcg->rkcg_generation_id, - rkcg->rkcg_member_id, - rkcg->rkcg_group_instance_id, - members, err ? 0 : member_cnt, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_cgrp_handle_SyncGroup, rkcg); + rd_kafka_SyncGroupRequest( + rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_generation_id, + rkcg->rkcg_member_id, rkcg->rkcg_group_instance_id, members, + err ? 0 : member_cnt, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_SyncGroup, rkcg); return; err: rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "ASSIGNOR", "Group \"%s\": failed to run assignor \"%s\" for " "%d member(s): %s", - rkcg->rkcg_group_id->str, - rkas->rkas_protocol_name->str, + rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str, member_cnt, errstr); rd_kafka_cgrp_rejoin(rkcg, "%s assignor failed: %s", @@ -1797,9 +1721,9 @@ rd_kafka_cgrp_assignor_run (rd_kafka_cgrp_t *rkcg, * @brief Op callback from handle_JoinGroup */ static rd_kafka_op_res_t -rd_kafka_cgrp_assignor_handle_Metadata_op (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +rd_kafka_cgrp_assignor_handle_Metadata_op(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) @@ -1816,9 +1740,8 @@ rd_kafka_cgrp_assignor_handle_Metadata_op (rd_kafka_t *rk, return RD_KAFKA_OP_RES_HANDLED; } - rd_kafka_cgrp_assignor_run(rkcg, - rkcg->rkcg_assignor, - rko->rko_err, rko->rko_u.metadata.md, + rd_kafka_cgrp_assignor_run(rkcg, rkcg->rkcg_assignor, rko->rko_err, + rko->rko_u.metadata.md, rkcg->rkcg_group_leader.members, rkcg->rkcg_group_leader.member_cnt); @@ -1834,22 +1757,21 @@ rd_kafka_cgrp_assignor_handle_Metadata_op (rd_kafka_t *rk, * * Returns 0 on success or -1 on error. */ -static int -rd_kafka_group_MemberMetadata_consumer_read ( - rd_kafka_broker_t *rkb, rd_kafka_group_member_t *rkgm, - const rd_kafkap_bytes_t *MemberMetadata) { +static int rd_kafka_group_MemberMetadata_consumer_read( + rd_kafka_broker_t *rkb, + rd_kafka_group_member_t *rkgm, + const rd_kafkap_bytes_t *MemberMetadata) { rd_kafka_buf_t *rkbuf; int16_t Version; int32_t subscription_cnt; rd_kafkap_bytes_t UserData; const int log_decode_errors = LOG_ERR; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG; /* Create a shadow-buffer pointing to the metadata to ease parsing. */ - rkbuf = rd_kafka_buf_new_shadow(MemberMetadata->data, - RD_KAFKAP_BYTES_LEN(MemberMetadata), - NULL); + rkbuf = rd_kafka_buf_new_shadow( + MemberMetadata->data, RD_KAFKAP_BYTES_LEN(MemberMetadata), NULL); rd_kafka_buf_read_i16(rkbuf, &Version); rd_kafka_buf_read_i32(rkbuf, &subscription_cnt); @@ -1858,16 +1780,15 @@ rd_kafka_group_MemberMetadata_consumer_read ( goto err; rkgm->rkgm_subscription = - rd_kafka_topic_partition_list_new(subscription_cnt); + rd_kafka_topic_partition_list_new(subscription_cnt); while (subscription_cnt-- > 0) { rd_kafkap_str_t Topic; char *topic_name; rd_kafka_buf_read_str(rkbuf, &Topic); RD_KAFKAP_STR_DUPA(&topic_name, &Topic); - rd_kafka_topic_partition_list_add(rkgm->rkgm_subscription, - topic_name, - RD_KAFKA_PARTITION_UA); + rd_kafka_topic_partition_list_add( + rkgm->rkgm_subscription, topic_name, RD_KAFKA_PARTITION_UA); } rd_kafka_buf_read_bytes(rkbuf, &UserData); @@ -1875,24 +1796,23 @@ rd_kafka_group_MemberMetadata_consumer_read ( if (Version >= 1 && !(rkgm->rkgm_owned = rd_kafka_buf_read_topic_partitions( - rkbuf, 0, rd_false, rd_false))) + rkbuf, 0, rd_false, rd_false))) goto err; rd_kafka_buf_destroy(rkbuf); return 0; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: +err: rd_rkb_dbg(rkb, CGRP, "MEMBERMETA", "Failed to parse MemberMetadata for \"%.*s\": %s", RD_KAFKAP_STR_PR(rkgm->rkgm_member_id), rd_kafka_err2str(err)); if (rkgm->rkgm_subscription) { - rd_kafka_topic_partition_list_destroy(rkgm-> - rkgm_subscription); + rd_kafka_topic_partition_list_destroy(rkgm->rkgm_subscription); rkgm->rkgm_subscription = NULL; } @@ -1907,21 +1827,21 @@ rd_kafka_group_MemberMetadata_consumer_read ( * * @locality rdkafka main thread (unless ERR__DESTROY: arbitrary thread) */ -static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_cgrp_t *rkcg = opaque; +static void rd_kafka_cgrp_handle_JoinGroup(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = opaque; const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; + int16_t ErrorCode = 0; int32_t GenerationId; rd_kafkap_str_t Protocol, LeaderId; rd_kafkap_str_t MyMemberId = RD_KAFKAP_STR_INITIALIZER; int32_t member_cnt; int actions; - int i_am_leader = 0; + int i_am_leader = 0; rd_kafka_assignor_t *rkas = NULL; rd_kafka_cgrp_clear_wait_resp(rkcg, RD_KAFKAP_JoinGroup); @@ -1931,11 +1851,11 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, return; /* Terminating */ if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN) { - rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP", - "JoinGroup response: discarding outdated request " - "(now in join-state %s)", - rd_kafka_cgrp_join_state_names[rkcg-> - rkcg_join_state]); + rd_kafka_dbg( + rkb->rkb_rk, CGRP, "JOINGROUP", + "JoinGroup response: discarding outdated request " + "(now in join-state %s)", + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); return; } @@ -1965,30 +1885,33 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, protocol_name)) || !rkas->rkas_enabled) { rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP", - "Unsupported assignment strategy \"%s\"", - protocol_name); + "Unsupported assignment strategy \"%s\"", + protocol_name); if (rkcg->rkcg_assignor) { if (rkcg->rkcg_assignor->rkas_destroy_state_cb) - rkcg->rkcg_assignor->rkas_destroy_state_cb( + rkcg->rkcg_assignor + ->rkas_destroy_state_cb( rkcg->rkcg_assignor_state); rkcg->rkcg_assignor_state = NULL; - rkcg->rkcg_assignor = NULL; + rkcg->rkcg_assignor = NULL; } ErrorCode = RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL; } - } + } rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP", - "JoinGroup response: GenerationId %"PRId32", " + "JoinGroup response: GenerationId %" PRId32 + ", " "Protocol %.*s, LeaderId %.*s%s, my MemberId %.*s, " - "member metadata count ""%"PRId32": %s", - GenerationId, - RD_KAFKAP_STR_PR(&Protocol), + "member metadata count " + "%" PRId32 ": %s", + GenerationId, RD_KAFKAP_STR_PR(&Protocol), RD_KAFKAP_STR_PR(&LeaderId), RD_KAFKAP_STR_LEN(&MyMemberId) && - !rd_kafkap_str_cmp(&LeaderId, &MyMemberId) ? " (me)" : "", - RD_KAFKAP_STR_PR(&MyMemberId), - member_cnt, + !rd_kafkap_str_cmp(&LeaderId, &MyMemberId) + ? " (me)" + : "", + RD_KAFKAP_STR_PR(&MyMemberId), member_cnt, ErrorCode ? rd_kafka_err2str(ErrorCode) : "(no error)"); if (!ErrorCode) { @@ -1998,14 +1921,14 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, rkcg->rkcg_generation_id = GenerationId; i_am_leader = !rd_kafkap_str_cmp(&LeaderId, &MyMemberId); } else { - rd_interval_backoff(&rkcg->rkcg_join_intvl, 1000*1000); + rd_interval_backoff(&rkcg->rkcg_join_intvl, 1000 * 1000); goto err; } if (rkcg->rkcg_assignor && rkcg->rkcg_assignor != rkas) { if (rkcg->rkcg_assignor->rkas_destroy_state_cb) rkcg->rkcg_assignor->rkas_destroy_state_cb( - rkcg->rkcg_assignor_state); + rkcg->rkcg_assignor_state); rkcg->rkcg_assignor_state = NULL; } rkcg->rkcg_assignor = rkas; @@ -2018,7 +1941,7 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, rd_kafka_op_t *rko; rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP", "I am elected leader for group \"%s\" " - "with %"PRId32" member(s)", + "with %" PRId32 " member(s)", rkcg->rkcg_group_id->str, member_cnt); if (member_cnt > 100000) { @@ -2030,38 +1953,38 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, members = rd_calloc(member_cnt, sizeof(*members)); - for (i = 0 ; i < member_cnt ; i++) { + for (i = 0; i < member_cnt; i++) { rd_kafkap_str_t MemberId; rd_kafkap_bytes_t MemberMetadata; rd_kafka_group_member_t *rkgm; - rd_kafkap_str_t GroupInstanceId = RD_KAFKAP_STR_INITIALIZER; + rd_kafkap_str_t GroupInstanceId = + RD_KAFKAP_STR_INITIALIZER; rd_kafka_buf_read_str(rkbuf, &MemberId); if (request->rkbuf_reqhdr.ApiVersion >= 5) rd_kafka_buf_read_str(rkbuf, &GroupInstanceId); rd_kafka_buf_read_bytes(rkbuf, &MemberMetadata); - rkgm = &members[sub_cnt]; + rkgm = &members[sub_cnt]; rkgm->rkgm_member_id = rd_kafkap_str_copy(&MemberId); rkgm->rkgm_group_instance_id = - rd_kafkap_str_copy(&GroupInstanceId); + rd_kafkap_str_copy(&GroupInstanceId); rd_list_init(&rkgm->rkgm_eligible, 0, NULL); rkgm->rkgm_generation = -1; if (rd_kafka_group_MemberMetadata_consumer_read( - rkb, rkgm, &MemberMetadata)) { + rkb, rkgm, &MemberMetadata)) { /* Failed to parse this member's metadata, * ignore it. */ } else { sub_cnt++; rkgm->rkgm_assignment = - rd_kafka_topic_partition_list_new( - rkgm->rkgm_subscription->cnt); + rd_kafka_topic_partition_list_new( + rkgm->rkgm_subscription->cnt); rd_kafka_topic_partition_list_get_topic_names( - rkgm->rkgm_subscription, &topics, - 0/*dont include regex*/); + rkgm->rkgm_subscription, &topics, + 0 /*dont include regex*/); } - } /* FIXME: What to do if parsing failed for some/all members? @@ -2076,64 +1999,56 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, rkcg->rkcg_group_leader.member_cnt = sub_cnt; rd_kafka_cgrp_set_join_state( - rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA); + rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA); /* The assignor will need metadata so fetch it asynchronously * and run the assignor when we get a reply. * Create a callback op that the generic metadata code * will trigger when metadata has been parsed. */ rko = rd_kafka_op_new_cb( - rkcg->rkcg_rk, RD_KAFKA_OP_METADATA, - rd_kafka_cgrp_assignor_handle_Metadata_op); + rkcg->rkcg_rk, RD_KAFKA_OP_METADATA, + rd_kafka_cgrp_assignor_handle_Metadata_op); rd_kafka_op_set_replyq(rko, rkcg->rkcg_ops, NULL); rd_kafka_MetadataRequest( - rkb, &topics, - "partition assignor", - rd_false/*!allow_auto_create*/, - /* cgrp_update=false: - * Since the subscription list may not be identical - * across all members of the group and thus the - * Metadata response may not be identical to this - * consumer's subscription list, we want to - * avoid triggering a rejoin or error propagation - * on receiving the response since some topics - * may be missing. */ - rd_false, - rko); + rkb, &topics, "partition assignor", + rd_false /*!allow_auto_create*/, + /* cgrp_update=false: + * Since the subscription list may not be identical + * across all members of the group and thus the + * Metadata response may not be identical to this + * consumer's subscription list, we want to + * avoid triggering a rejoin or error propagation + * on receiving the response since some topics + * may be missing. */ + rd_false, rko); rd_list_destroy(&topics); } else { rd_kafka_cgrp_set_join_state( - rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC); + rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC); rd_kafka_cgrp_set_wait_resp(rkcg, RD_KAFKAP_SyncGroup); - rd_kafka_SyncGroupRequest(rkb, rkcg->rkcg_group_id, - rkcg->rkcg_generation_id, - rkcg->rkcg_member_id, - rkcg->rkcg_group_instance_id, - NULL, 0, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_cgrp_handle_SyncGroup, rkcg); - + rd_kafka_SyncGroupRequest( + rkb, rkcg->rkcg_group_id, rkcg->rkcg_generation_id, + rkcg->rkcg_member_id, rkcg->rkcg_group_instance_id, NULL, 0, + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_SyncGroup, rkcg); } err: - actions = rd_kafka_err_action(rkb, ErrorCode, request, - RD_KAFKA_ERR_ACTION_IGNORE, - RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, + actions = rd_kafka_err_action( + rkb, ErrorCode, request, RD_KAFKA_ERR_ACTION_IGNORE, + RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, - RD_KAFKA_ERR_ACTION_IGNORE, - RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED, + RD_KAFKA_ERR_ACTION_IGNORE, RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED, - RD_KAFKA_ERR_ACTION_IGNORE, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_ERR_ACTION_IGNORE, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID, + RD_KAFKA_ERR_ACTION_PERMANENT, RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_END); if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { /* Re-query for coordinator */ @@ -2155,12 +2070,11 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, ErrorCode = RD_KAFKA_RESP_ERR__FATAL; } else if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) - rd_kafka_consumer_err(rkcg->rkcg_q, - rd_kafka_broker_id(rkb), - ErrorCode, 0, NULL, NULL, - RD_KAFKA_OFFSET_INVALID, - "JoinGroup failed: %s", - rd_kafka_err2str(ErrorCode)); + rd_kafka_consumer_err( + rkcg->rkcg_q, rd_kafka_broker_id(rkb), ErrorCode, 0, + NULL, NULL, RD_KAFKA_OFFSET_INVALID, + "JoinGroup failed: %s", + rd_kafka_err2str(ErrorCode)); if (ErrorCode == RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID) rd_kafka_cgrp_set_member_id(rkcg, ""); @@ -2177,24 +2091,21 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, } if (rd_kafka_cgrp_rebalance_protocol(rkcg) == - RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && + RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && (ErrorCode == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION || ErrorCode == RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED)) rd_kafka_cgrp_revoke_all_rejoin( - rkcg, - rd_true/*assignment is lost*/, - rd_true/*this consumer is initiating*/, - "JoinGroup error"); + rkcg, rd_true /*assignment is lost*/, + rd_true /*this consumer is initiating*/, + "JoinGroup error"); else - rd_kafka_cgrp_rejoin(rkcg, - "JoinGroup error: %s", + rd_kafka_cgrp_rejoin(rkcg, "JoinGroup error: %s", rd_kafka_err2str(ErrorCode)); - } return; - err_parse: +err_parse: ErrorCode = rkbuf->rkbuf_err; goto err; } @@ -2203,15 +2114,15 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, /** * @brief Check subscription against requested Metadata. */ -static rd_kafka_op_res_t -rd_kafka_cgrp_handle_Metadata_op (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +static rd_kafka_op_res_t rd_kafka_cgrp_handle_Metadata_op(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) return RD_KAFKA_OP_RES_HANDLED; /* Terminating */ - rd_kafka_cgrp_metadata_update_check(rkcg, rd_false/*dont rejoin*/); + rd_kafka_cgrp_metadata_update_check(rkcg, rd_false /*dont rejoin*/); return RD_KAFKA_OP_RES_HANDLED; } @@ -2226,9 +2137,9 @@ rd_kafka_cgrp_handle_Metadata_op (rd_kafka_t *rk, rd_kafka_q_t *rkq, * @locks none * @locality rdkafka main thread */ -static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg, - int *metadata_agep, - const char *reason) { +static int rd_kafka_cgrp_metadata_refresh(rd_kafka_cgrp_t *rkcg, + int *metadata_agep, + const char *reason) { rd_kafka_t *rk = rkcg->rkcg_rk; rd_kafka_op_t *rko; rd_list_t topics; @@ -2237,9 +2148,8 @@ static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg, rd_list_init(&topics, 8, rd_free); /* Insert all non-wildcard topics in cache. */ - rd_kafka_metadata_cache_hint_rktparlist(rkcg->rkcg_rk, - rkcg->rkcg_subscription, - NULL, 0/*dont replace*/); + rd_kafka_metadata_cache_hint_rktparlist( + rkcg->rkcg_rk, rkcg->rkcg_subscription, NULL, 0 /*dont replace*/); if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION) { /* For wildcard subscriptions make sure the @@ -2247,14 +2157,14 @@ static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg, int metadata_age = -1; if (rk->rk_ts_full_metadata) - metadata_age = (int)(rd_clock() - - rk->rk_ts_full_metadata)/1000; + metadata_age = + (int)(rd_clock() - rk->rk_ts_full_metadata) / 1000; *metadata_agep = metadata_age; if (metadata_age != -1 && metadata_age <= rk->rk_conf.metadata_max_age_ms) { - rd_kafka_dbg(rk, CGRP|RD_KAFKA_DBG_METADATA, + rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA, "CGRPMETADATA", "%s: metadata for wildcard subscription " "is up to date (%dms old)", @@ -2268,7 +2178,7 @@ static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg, int r; rd_kafka_topic_partition_list_get_topic_names( - rkcg->rkcg_subscription, &topics, 0/*no regexps*/); + rkcg->rkcg_subscription, &topics, 0 /*no regexps*/); rd_kafka_rdlock(rk); r = rd_kafka_metadata_cache_topics_count_exists(rk, &topics, @@ -2276,17 +2186,16 @@ static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg, rd_kafka_rdunlock(rk); if (r == rd_list_cnt(&topics)) { - rd_kafka_dbg(rk, CGRP|RD_KAFKA_DBG_METADATA, + rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA, "CGRPMETADATA", "%s: metadata for subscription " - "is up to date (%dms old)", reason, - *metadata_agep); + "is up to date (%dms old)", + reason, *metadata_agep); rd_list_destroy(&topics); return 0; /* Up-to-date and all topics exist. */ } - rd_kafka_dbg(rk, CGRP|RD_KAFKA_DBG_METADATA, - "CGRPMETADATA", + rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA, "CGRPMETADATA", "%s: metadata for subscription " "only available for %d/%d topics (%dms old)", reason, r, rd_list_cnt(&topics), *metadata_agep); @@ -2299,12 +2208,10 @@ static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg, rd_kafka_op_set_replyq(rko, rkcg->rkcg_ops, 0); err = rd_kafka_metadata_request(rkcg->rkcg_rk, NULL, &topics, - rd_false/*!allow auto create */, - rd_true/*cgrp_update*/, - reason, rko); + rd_false /*!allow auto create */, + rd_true /*cgrp_update*/, reason, rko); if (err) { - rd_kafka_dbg(rk, CGRP|RD_KAFKA_DBG_METADATA, - "CGRPMETADATA", + rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA, "CGRPMETADATA", "%s: need to refresh metadata (%dms old) " "but no usable brokers available: %s", reason, *metadata_agep, rd_kafka_err2str(err)); @@ -2318,7 +2225,7 @@ static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg, -static void rd_kafka_cgrp_join (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_join(rd_kafka_cgrp_t *rkcg) { int metadata_age; if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || @@ -2355,86 +2262,83 @@ static void rd_kafka_cgrp_join (rd_kafka_cgrp_t *rkcg) { * refresh metadata if necessary. */ if (rd_kafka_cgrp_metadata_refresh(rkcg, &metadata_age, "consumer join") == 1) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, "JOIN", + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, + "JOIN", "Group \"%.*s\": " "postponing join until up-to-date " "metadata is available", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id)); - rd_assert(rkcg->rkcg_join_state == - RD_KAFKA_CGRP_JOIN_STATE_INIT || - /* Possible via rd_kafka_cgrp_modify_subscription */ - rkcg->rkcg_join_state == - RD_KAFKA_CGRP_JOIN_STATE_STEADY); + rd_assert( + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT || + /* Possible via rd_kafka_cgrp_modify_subscription */ + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY); rd_kafka_cgrp_set_join_state( - rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA); + rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA); return; /* ^ async call */ } if (rd_list_empty(rkcg->rkcg_subscribed_topics)) rd_kafka_cgrp_metadata_update_check(rkcg, - rd_false/*dont join*/); + rd_false /*dont join*/); if (rd_list_empty(rkcg->rkcg_subscribed_topics)) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, "JOIN", - "Group \"%.*s\": " - "no matching topics based on %dms old metadata: " - "next metadata refresh in %dms", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - metadata_age, - rkcg->rkcg_rk->rk_conf. - metadata_refresh_interval_ms - metadata_age); + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "JOIN", + "Group \"%.*s\": " + "no matching topics based on %dms old metadata: " + "next metadata refresh in %dms", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), metadata_age, + rkcg->rkcg_rk->rk_conf.metadata_refresh_interval_ms - + metadata_age); return; } - rd_rkb_dbg(rkcg->rkcg_curr_coord, CONSUMER|RD_KAFKA_DBG_CGRP, "JOIN", - "Joining group \"%.*s\" with %d subscribed topic(s) and " - "member id \"%.*s\"", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_list_cnt(rkcg->rkcg_subscribed_topics), - rkcg->rkcg_member_id ? - RD_KAFKAP_STR_LEN(rkcg->rkcg_member_id) : 0, - rkcg->rkcg_member_id ? rkcg->rkcg_member_id->str : ""); + rd_rkb_dbg( + rkcg->rkcg_curr_coord, CONSUMER | RD_KAFKA_DBG_CGRP, "JOIN", + "Joining group \"%.*s\" with %d subscribed topic(s) and " + "member id \"%.*s\"", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_list_cnt(rkcg->rkcg_subscribed_topics), + rkcg->rkcg_member_id ? RD_KAFKAP_STR_LEN(rkcg->rkcg_member_id) : 0, + rkcg->rkcg_member_id ? rkcg->rkcg_member_id->str : ""); rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN); rd_kafka_cgrp_set_wait_resp(rkcg, RD_KAFKAP_JoinGroup); - rd_kafka_JoinGroupRequest(rkcg->rkcg_coord, rkcg->rkcg_group_id, - rkcg->rkcg_member_id, - rkcg->rkcg_group_instance_id, - rkcg->rkcg_rk->rk_conf.group_protocol_type, - rkcg->rkcg_subscribed_topics, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_cgrp_handle_JoinGroup, rkcg); + rd_kafka_JoinGroupRequest( + rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_member_id, + rkcg->rkcg_group_instance_id, + rkcg->rkcg_rk->rk_conf.group_protocol_type, + rkcg->rkcg_subscribed_topics, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_JoinGroup, rkcg); } /** * Rejoin group on update to effective subscribed topics list */ -static void rd_kafka_cgrp_revoke_rejoin (rd_kafka_cgrp_t *rkcg, - const char *reason) { +static void rd_kafka_cgrp_revoke_rejoin(rd_kafka_cgrp_t *rkcg, + const char *reason) { /* * Clean-up group leader duties, if any. */ rd_kafka_cgrp_group_leader_reset(rkcg, "group (re)join"); - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "REJOIN", - "Group \"%.*s\" (re)joining in join-state %s " - "with %d assigned partition(s): %s", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - rkcg->rkcg_group_assignment ? - rkcg->rkcg_group_assignment->cnt : 0, - reason); - - rd_kafka_cgrp_revoke_all_rejoin(rkcg, - rd_false/*not lost*/, - rd_true/*initiating*/, - reason); + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "REJOIN", + "Group \"%.*s\" (re)joining in join-state %s " + "with %d assigned partition(s): %s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0, + reason); + + rd_kafka_cgrp_revoke_all_rejoin(rkcg, rd_false /*not lost*/, + rd_true /*initiating*/, reason); } /** @@ -2448,9 +2352,8 @@ static void rd_kafka_cgrp_revoke_rejoin (rd_kafka_cgrp_t *rkcg, * * @remark Takes ownership of \p tinfos */ -static rd_bool_t -rd_kafka_cgrp_update_subscribed_topics (rd_kafka_cgrp_t *rkcg, - rd_list_t *tinfos) { +static rd_bool_t rd_kafka_cgrp_update_subscribed_topics(rd_kafka_cgrp_t *rkcg, + rd_list_t *tinfos) { rd_kafka_topic_info_t *tinfo; int i; @@ -2483,18 +2386,17 @@ rd_kafka_cgrp_update_subscribed_topics (rd_kafka_cgrp_t *rkcg, return rd_false; } - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_METADATA, "SUBSCRIPTION", - "Group \"%.*s\": effective subscription list changed " - "from %d to %d topic(s):", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_list_cnt(rkcg->rkcg_subscribed_topics), - rd_list_cnt(tinfos)); + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_METADATA, "SUBSCRIPTION", + "Group \"%.*s\": effective subscription list changed " + "from %d to %d topic(s):", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_list_cnt(rkcg->rkcg_subscribed_topics), rd_list_cnt(tinfos)); RD_LIST_FOREACH(tinfo, tinfos, i) - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_METADATA, - "SUBSCRIPTION", - " Topic %s with %d partition(s)", - tinfo->topic, tinfo->partition_cnt); + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_METADATA, + "SUBSCRIPTION", " Topic %s with %d partition(s)", + tinfo->topic, tinfo->partition_cnt); rd_list_destroy(rkcg->rkcg_subscribed_topics); @@ -2507,16 +2409,16 @@ rd_kafka_cgrp_update_subscribed_topics (rd_kafka_cgrp_t *rkcg, /** * @brief Handle Heartbeat response. */ -void rd_kafka_cgrp_handle_Heartbeat (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; +void rd_kafka_cgrp_handle_Heartbeat(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; - int actions = 0; + int16_t ErrorCode = 0; + int actions = 0; if (err == RD_KAFKA_RESP_ERR__DESTROY) return; @@ -2539,58 +2441,57 @@ void rd_kafka_cgrp_handle_Heartbeat (rd_kafka_t *rk, } rd_kafka_cgrp_update_session_timeout( - rkcg, rd_false/*don't update if session has expired*/); + rkcg, rd_false /*don't update if session has expired*/); return; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: +err: rkcg->rkcg_last_heartbeat_err = err; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", - "Group \"%s\" heartbeat error response in " - "state %s (join-state %s, %d partition(s) assigned): %s", - rkcg->rkcg_group_id->str, - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - rkcg->rkcg_group_assignment ? - rkcg->rkcg_group_assignment->cnt : 0, - rd_kafka_err2str(err)); - - if (rkcg->rkcg_join_state <= RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", - "Heartbeat response: discarding outdated " - "request (now in join-state %s)", - rd_kafka_cgrp_join_state_names[rkcg-> - rkcg_join_state]); - return; - } - - switch (err) - { - case RD_KAFKA_RESP_ERR__DESTROY: - /* quick cleanup */ + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Group \"%s\" heartbeat error response in " + "state %s (join-state %s, %d partition(s) assigned): %s", + rkcg->rkcg_group_id->str, + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0, + rd_kafka_err2str(err)); + + if (rkcg->rkcg_join_state <= RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) { + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Heartbeat response: discarding outdated " + "request (now in join-state %s)", + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + return; + } + + switch (err) { + case RD_KAFKA_RESP_ERR__DESTROY: + /* quick cleanup */ return; - case RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP: - case RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR__TRANSPORT: + case RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP: + case RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR__TRANSPORT: rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER, "HEARTBEAT", "Heartbeat failed due to coordinator (%s) " "no longer available: %s: " "re-querying for coordinator", - rkcg->rkcg_curr_coord ? - rd_kafka_broker_name(rkcg->rkcg_curr_coord) : - "none", + rkcg->rkcg_curr_coord + ? rd_kafka_broker_name(rkcg->rkcg_curr_coord) + : "none", rd_kafka_err2str(err)); - /* Remain in joined state and keep querying for coordinator */ + /* Remain in joined state and keep querying for coordinator */ actions = RD_KAFKA_ERR_ACTION_REFRESH; break; case RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS: rd_kafka_cgrp_update_session_timeout( - rkcg, rd_false/*don't update if session has expired*/); + rkcg, rd_false /*don't update if session has expired*/); /* No further action if already rebalancing */ if (RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg)) return; @@ -2599,17 +2500,15 @@ void rd_kafka_cgrp_handle_Heartbeat (rd_kafka_t *rk, case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID: rd_kafka_cgrp_set_member_id(rkcg, ""); - rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, - rd_true/*lost*/, - rd_true/*initiating*/, + rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/, + rd_true /*initiating*/, "resetting member-id"); return; case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION: rkcg->rkcg_generation_id = -1; - rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, - rd_true/*lost*/, - rd_true/*initiating*/, + rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/, + rd_true /*initiating*/, "illegal generation"); return; @@ -2617,11 +2516,11 @@ void rd_kafka_cgrp_handle_Heartbeat (rd_kafka_t *rk, rd_kafka_set_fatal_error(rkcg->rkcg_rk, err, "Fatal consumer error: %s", rd_kafka_err2str(err)); - rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, - rd_true,/*assignment lost*/ - rd_true,/*initiating*/ - "consumer fenced by " - "newer instance"); + rd_kafka_cgrp_revoke_all_rejoin_maybe( + rkcg, rd_true, /*assignment lost*/ + rd_true, /*initiating*/ + "consumer fenced by " + "newer instance"); return; default: @@ -2649,7 +2548,7 @@ void rd_kafka_cgrp_handle_Heartbeat (rd_kafka_t *rk, /** * @brief Send Heartbeat */ -static void rd_kafka_cgrp_heartbeat (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_heartbeat(rd_kafka_cgrp_t *rkcg) { /* Don't send heartbeats if max.poll.interval.ms was exceeded */ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED) return; @@ -2659,18 +2558,17 @@ static void rd_kafka_cgrp_heartbeat (rd_kafka_cgrp_t *rkcg) { return; rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; - rd_kafka_HeartbeatRequest(rkcg->rkcg_coord, rkcg->rkcg_group_id, - rkcg->rkcg_generation_id, - rkcg->rkcg_member_id, - rkcg->rkcg_group_instance_id, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_cgrp_handle_Heartbeat, NULL); + rd_kafka_HeartbeatRequest( + rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_generation_id, + rkcg->rkcg_member_id, rkcg->rkcg_group_instance_id, + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), rd_kafka_cgrp_handle_Heartbeat, + NULL); } /** * Cgrp is now terminated: decommission it and signal back to application. */ -static void rd_kafka_cgrp_terminated (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_terminated(rd_kafka_cgrp_t *rkcg) { if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATED) return; /* terminated() may be called multiple times, * make sure to only terminate once. */ @@ -2683,20 +2581,20 @@ static void rd_kafka_cgrp_terminated (rd_kafka_cgrp_t *rkcg) { rd_kafka_assert(NULL, rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM); rd_kafka_timer_stop(&rkcg->rkcg_rk->rk_timers, - &rkcg->rkcg_offset_commit_tmr, 1/*lock*/); + &rkcg->rkcg_offset_commit_tmr, 1 /*lock*/); - rd_kafka_q_purge(rkcg->rkcg_wait_coord_q); + rd_kafka_q_purge(rkcg->rkcg_wait_coord_q); - /* Disable and empty ops queue since there will be no - * (broker) thread serving it anymore after the unassign_broker - * below. - * This prevents hang on destroy where responses are enqueued on rkcg_ops - * without anything serving the queue. */ - rd_kafka_q_disable(rkcg->rkcg_ops); - rd_kafka_q_purge(rkcg->rkcg_ops); + /* Disable and empty ops queue since there will be no + * (broker) thread serving it anymore after the unassign_broker + * below. + * This prevents hang on destroy where responses are enqueued on + * rkcg_ops without anything serving the queue. */ + rd_kafka_q_disable(rkcg->rkcg_ops); + rd_kafka_q_purge(rkcg->rkcg_ops); - if (rkcg->rkcg_curr_coord) - rd_kafka_cgrp_coord_clear_broker(rkcg); + if (rkcg->rkcg_curr_coord) + rd_kafka_cgrp_coord_clear_broker(rkcg); if (rkcg->rkcg_coord) { rd_kafka_broker_destroy(rkcg->rkcg_coord); @@ -2706,7 +2604,7 @@ static void rd_kafka_cgrp_terminated (rd_kafka_cgrp_t *rkcg) { if (rkcg->rkcg_reply_rko) { /* Signal back to application. */ rd_kafka_replyq_enq(&rkcg->rkcg_reply_rko->rko_replyq, - rkcg->rkcg_reply_rko, 0); + rkcg->rkcg_reply_rko, 0); rkcg->rkcg_reply_rko = NULL; } @@ -2719,31 +2617,31 @@ static void rd_kafka_cgrp_terminated (rd_kafka_cgrp_t *rkcg) { * then progress to final termination and return 1. * Else returns 0. */ -static RD_INLINE int rd_kafka_cgrp_try_terminate (rd_kafka_cgrp_t *rkcg) { +static RD_INLINE int rd_kafka_cgrp_try_terminate(rd_kafka_cgrp_t *rkcg) { if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM) return 1; - if (likely(!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE))) - return 0; - - /* Check if wait-coord queue has timed out. */ - if (rd_kafka_q_len(rkcg->rkcg_wait_coord_q) > 0 && - rkcg->rkcg_ts_terminate + - (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000) < - rd_clock()) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM", - "Group \"%s\": timing out %d op(s) in " - "wait-for-coordinator queue", - rkcg->rkcg_group_id->str, - rd_kafka_q_len(rkcg->rkcg_wait_coord_q)); - rd_kafka_q_disable(rkcg->rkcg_wait_coord_q); - if (rd_kafka_q_concat(rkcg->rkcg_ops, - rkcg->rkcg_wait_coord_q) == -1) { - /* ops queue shut down, purge coord queue */ - rd_kafka_q_purge(rkcg->rkcg_wait_coord_q); - } - } + if (likely(!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE))) + return 0; + + /* Check if wait-coord queue has timed out. */ + if (rd_kafka_q_len(rkcg->rkcg_wait_coord_q) > 0 && + rkcg->rkcg_ts_terminate + + (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000) < + rd_clock()) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM", + "Group \"%s\": timing out %d op(s) in " + "wait-for-coordinator queue", + rkcg->rkcg_group_id->str, + rd_kafka_q_len(rkcg->rkcg_wait_coord_q)); + rd_kafka_q_disable(rkcg->rkcg_wait_coord_q); + if (rd_kafka_q_concat(rkcg->rkcg_ops, + rkcg->rkcg_wait_coord_q) == -1) { + /* ops queue shut down, purge coord queue */ + rd_kafka_q_purge(rkcg->rkcg_wait_coord_q); + } + } if (!RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) && rd_list_empty(&rkcg->rkcg_toppars) && @@ -2760,28 +2658,29 @@ static RD_INLINE int rd_kafka_cgrp_try_terminate (rd_kafka_cgrp_t *rkcg) { return 1; } else { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM", - "Group \"%s\": " - "waiting for %s%d toppar(s), " - "%s" - "%d commit(s)%s%s%s (state %s, join-state %s) " - "before terminating", - rkcg->rkcg_group_id->str, - RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) ? - "assign call, ": "", - rd_list_cnt(&rkcg->rkcg_toppars), - rd_kafka_assignment_in_progress(rkcg->rkcg_rk) ? - "assignment in progress, " : "", - rkcg->rkcg_rk->rk_consumer.wait_commit_cnt, - (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE)? - ", wait-leave," : "", - rkcg->rkcg_rebalance_rejoin ? - ", rebalance_rejoin,": "", - (rkcg->rkcg_rebalance_incr_assignment != NULL)? - ", rebalance_incr_assignment,": "", - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[ - rkcg->rkcg_join_state]); + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "CGRPTERM", + "Group \"%s\": " + "waiting for %s%d toppar(s), " + "%s" + "%d commit(s)%s%s%s (state %s, join-state %s) " + "before terminating", + rkcg->rkcg_group_id->str, + RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) ? "assign call, " : "", + rd_list_cnt(&rkcg->rkcg_toppars), + rd_kafka_assignment_in_progress(rkcg->rkcg_rk) + ? "assignment in progress, " + : "", + rkcg->rkcg_rk->rk_consumer.wait_commit_cnt, + (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE) + ? ", wait-leave," + : "", + rkcg->rkcg_rebalance_rejoin ? ", rebalance_rejoin," : "", + (rkcg->rkcg_rebalance_incr_assignment != NULL) + ? ", rebalance_incr_assignment," + : "", + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); return 0; } } @@ -2792,12 +2691,11 @@ static RD_INLINE int rd_kafka_cgrp_try_terminate (rd_kafka_cgrp_t *rkcg) { * * @locks none */ -static void rd_kafka_cgrp_partition_add (rd_kafka_cgrp_t *rkcg, - rd_kafka_toppar_t *rktp) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP,"PARTADD", - "Group \"%s\": add %s [%"PRId32"]", - rkcg->rkcg_group_id->str, - rktp->rktp_rkt->rkt_topic->str, +static void rd_kafka_cgrp_partition_add(rd_kafka_cgrp_t *rkcg, + rd_kafka_toppar_t *rktp) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTADD", + "Group \"%s\": add %s [%" PRId32 "]", + rkcg->rkcg_group_id->str, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); rd_kafka_toppar_lock(rktp); @@ -2814,12 +2712,11 @@ static void rd_kafka_cgrp_partition_add (rd_kafka_cgrp_t *rkcg, * * @locks none */ -static void rd_kafka_cgrp_partition_del (rd_kafka_cgrp_t *rkcg, - rd_kafka_toppar_t *rktp) { +static void rd_kafka_cgrp_partition_del(rd_kafka_cgrp_t *rkcg, + rd_kafka_toppar_t *rktp) { rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTDEL", - "Group \"%s\": delete %s [%"PRId32"]", - rkcg->rkcg_group_id->str, - rktp->rktp_rkt->rkt_topic->str, + "Group \"%s\": delete %s [%" PRId32 "]", + rkcg->rkcg_group_id->str, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); rd_kafka_toppar_lock(rktp); @@ -2836,16 +2733,15 @@ static void rd_kafka_cgrp_partition_del (rd_kafka_cgrp_t *rkcg, - /** * @brief Defer offset commit (rko) until coordinator is available. * * @returns 1 if the rko was deferred or 0 if the defer queue is disabled * or rko already deferred. */ -static int rd_kafka_cgrp_defer_offset_commit (rd_kafka_cgrp_t *rkcg, - rd_kafka_op_t *rko, - const char *reason) { +static int rd_kafka_cgrp_defer_offset_commit(rd_kafka_cgrp_t *rkcg, + rd_kafka_op_t *rko, + const char *reason) { /* wait_coord_q is disabled session.timeout.ms after * group close() has been initated. */ @@ -2859,16 +2755,15 @@ static int rd_kafka_cgrp_defer_offset_commit (rd_kafka_cgrp_t *rkcg, "coordinator (%s) is unavailable: " "retrying later", rkcg->rkcg_group_id->str, - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - reason, - rkcg->rkcg_curr_coord ? - rd_kafka_broker_name(rkcg->rkcg_curr_coord) : - "none"); + rd_kafka_cgrp_state_names[rkcg->rkcg_state], reason, + rkcg->rkcg_curr_coord + ? rd_kafka_broker_name(rkcg->rkcg_curr_coord) + : "none"); rko->rko_flags |= RD_KAFKA_OP_F_REPROCESS; - rko->rko_u.offset_commit.ts_timeout = rd_clock() + - (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms - * 1000); + rko->rko_u.offset_commit.ts_timeout = + rd_clock() + + (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000); rd_kafka_q_enq(rkcg->rkcg_wait_coord_q, rko); return 1; @@ -2881,16 +2776,15 @@ static int rd_kafka_cgrp_defer_offset_commit (rd_kafka_cgrp_t *rkcg, * @remark \p offsets may be NULL if \p err is set * @returns the number of partitions with errors encountered */ -static int -rd_kafka_cgrp_update_committed_offsets (rd_kafka_cgrp_t *rkcg, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t - *offsets) { +static int rd_kafka_cgrp_update_committed_offsets( + rd_kafka_cgrp_t *rkcg, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets) { int i; int errcnt = 0; /* Update toppars' committed offset or global error */ - for (i = 0 ; offsets && i < offsets->cnt ; i++) { + for (i = 0; offsets && i < offsets->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &offsets->elems[i]; rd_kafka_toppar_t *rktp; @@ -2905,15 +2799,15 @@ rd_kafka_cgrp_update_committed_offsets (rd_kafka_cgrp_t *rkcg, rktpar->err = err; if (rktpar->err) { - rd_kafka_dbg(rkcg->rkcg_rk, TOPIC, - "OFFSET", + rd_kafka_dbg(rkcg->rkcg_rk, TOPIC, "OFFSET", "OffsetCommit failed for " - "%s [%"PRId32"] at offset " - "%"PRId64" in join-state %s: %s", + "%s [%" PRId32 + "] at offset " + "%" PRId64 " in join-state %s: %s", rktpar->topic, rktpar->partition, rktpar->offset, - rd_kafka_cgrp_join_state_names[ - rkcg->rkcg_join_state], + rd_kafka_cgrp_join_state_names + [rkcg->rkcg_join_state], rd_kafka_err2str(rktpar->err)); errcnt++; @@ -2945,15 +2839,14 @@ rd_kafka_cgrp_update_committed_offsets (rd_kafka_cgrp_t *rkcg, * @param errcnt Are the number of partitions in \p offsets that failed * offset commit. */ -static void -rd_kafka_cgrp_propagate_commit_result ( - rd_kafka_cgrp_t *rkcg, - rd_kafka_op_t *rko_orig, - rd_kafka_resp_err_t err, - int errcnt, - rd_kafka_topic_partition_list_t *offsets) { - - const rd_kafka_t *rk = rkcg->rkcg_rk; +static void rd_kafka_cgrp_propagate_commit_result( + rd_kafka_cgrp_t *rkcg, + rd_kafka_op_t *rko_orig, + rd_kafka_resp_err_t err, + int errcnt, + rd_kafka_topic_partition_list_t *offsets) { + + const rd_kafka_t *rk = rkcg->rkcg_rk; int offset_commit_cb_served = 0; /* If no special callback is set but a offset_commit_cb has @@ -2965,10 +2858,10 @@ rd_kafka_cgrp_propagate_commit_result ( if (offsets) rko_reply->rko_u.offset_commit.partitions = - rd_kafka_topic_partition_list_copy(offsets); + rd_kafka_topic_partition_list_copy(offsets); rko_reply->rko_u.offset_commit.cb = - rk->rk_conf.offset_commit_cb; + rk->rk_conf.offset_commit_cb; rko_reply->rko_u.offset_commit.opaque = rk->rk_conf.opaque; rd_kafka_q_enq(rk->rk_rep, rko_reply); @@ -2986,43 +2879,39 @@ rd_kafka_cgrp_propagate_commit_result ( rko_reply->rko_u.offset_commit = rko_orig->rko_u.offset_commit; if (offsets) rko_reply->rko_u.offset_commit.partitions = - rd_kafka_topic_partition_list_copy(offsets); + rd_kafka_topic_partition_list_copy(offsets); if (rko_reply->rko_u.offset_commit.reason) rko_reply->rko_u.offset_commit.reason = - rd_strdup(rko_reply->rko_u. - offset_commit.reason); + rd_strdup(rko_reply->rko_u.offset_commit.reason); rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko_reply, 0); offset_commit_cb_served++; } - if (!offset_commit_cb_served && - offsets && - (errcnt > 0 || - (err != RD_KAFKA_RESP_ERR_NO_ERROR && - err != RD_KAFKA_RESP_ERR__NO_OFFSET))) { + if (!offset_commit_cb_served && offsets && + (errcnt > 0 || (err != RD_KAFKA_RESP_ERR_NO_ERROR && + err != RD_KAFKA_RESP_ERR__NO_OFFSET))) { /* If there is no callback or handler for this (auto) * commit then log an error (#1043) */ char tmp[512]; rd_kafka_topic_partition_list_str( - offsets, tmp, sizeof(tmp), - /* Print per-partition errors unless there was a - * request-level error. */ - RD_KAFKA_FMT_F_OFFSET | + offsets, tmp, sizeof(tmp), + /* Print per-partition errors unless there was a + * request-level error. */ + RD_KAFKA_FMT_F_OFFSET | (errcnt ? RD_KAFKA_FMT_F_ONLY_ERR : 0)); - rd_kafka_log(rkcg->rkcg_rk, LOG_WARNING, "COMMITFAIL", - "Offset commit (%s) failed " - "for %d/%d partition(s) in join-state %s: " - "%s%s%s", - rko_orig->rko_u.offset_commit.reason, - errcnt ? errcnt : offsets->cnt, offsets->cnt, - rd_kafka_cgrp_join_state_names[rkcg-> - rkcg_join_state], - errcnt ? rd_kafka_err2str(err) : "", - errcnt ? ": " : "", - tmp); + rd_kafka_log( + rkcg->rkcg_rk, LOG_WARNING, "COMMITFAIL", + "Offset commit (%s) failed " + "for %d/%d partition(s) in join-state %s: " + "%s%s%s", + rko_orig->rko_u.offset_commit.reason, + errcnt ? errcnt : offsets->cnt, offsets->cnt, + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + errcnt ? rd_kafka_err2str(err) : "", errcnt ? ": " : "", + tmp); } } @@ -3034,22 +2923,22 @@ rd_kafka_cgrp_propagate_commit_result ( * @remark \p rkb, rkbuf, and request may be NULL in a number of * error cases (e.g., _NO_OFFSET, _WAIT_COORD) */ -static void rd_kafka_cgrp_op_handle_OffsetCommit (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; +static void rd_kafka_cgrp_op_handle_OffsetCommit(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; rd_kafka_op_t *rko_orig = opaque; rd_kafka_topic_partition_list_t *offsets = - rko_orig->rko_u.offset_commit.partitions; /* maybe NULL */ + rko_orig->rko_u.offset_commit.partitions; /* maybe NULL */ int errcnt; RD_KAFKA_OP_TYPE_ASSERT(rko_orig, RD_KAFKA_OP_OFFSET_COMMIT); - err = rd_kafka_handle_OffsetCommit(rk, rkb, err, rkbuf, - request, offsets); + err = + rd_kafka_handle_OffsetCommit(rk, rkb, err, rkbuf, request, offsets); /* Suppress empty commit debug logs if allowed */ if (err != RD_KAFKA_RESP_ERR__NO_OFFSET || @@ -3060,8 +2949,8 @@ static void rd_kafka_cgrp_op_handle_OffsetCommit (rd_kafka_t *rk, "join-state %s: " "%s: returned: %s", offsets ? offsets->cnt : -1, - rd_kafka_cgrp_join_state_names[ - rkcg->rkcg_join_state], + rd_kafka_cgrp_join_state_names + [rkcg->rkcg_join_state], rko_orig->rko_u.offset_commit.reason, rd_kafka_err2str(err)); else @@ -3071,8 +2960,8 @@ static void rd_kafka_cgrp_op_handle_OffsetCommit (rd_kafka_t *rk, "%s: %s: " "returned: %s", offsets ? offsets->cnt : -1, - rd_kafka_cgrp_join_state_names[ - rkcg->rkcg_join_state], + rd_kafka_cgrp_join_state_names + [rkcg->rkcg_join_state], rko_orig->rko_u.offset_commit.reason, rd_kafka_err2str(err)); } @@ -3081,26 +2970,23 @@ static void rd_kafka_cgrp_op_handle_OffsetCommit (rd_kafka_t *rk, /* * Error handling */ - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID: /* Revoke assignment and rebalance on unknown member */ rd_kafka_cgrp_set_member_id(rk->rk_cgrp, ""); rd_kafka_cgrp_revoke_all_rejoin_maybe( - rkcg, - rd_true/*assignment is lost*/, - rd_true/*this consumer is initiating*/, - "OffsetCommit error: Unknown member"); + rkcg, rd_true /*assignment is lost*/, + rd_true /*this consumer is initiating*/, + "OffsetCommit error: Unknown member"); break; case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION: /* Revoke assignment and rebalance on illegal generation */ rk->rk_cgrp->rkcg_generation_id = -1; rd_kafka_cgrp_revoke_all_rejoin_maybe( - rkcg, - rd_true/*assignment is lost*/, - rd_true/*this consumer is initiating*/, - "OffsetCommit error: Illegal generation"); + rkcg, rd_true /*assignment is lost*/, + rd_true /*this consumer is initiating*/, + "OffsetCommit error: Illegal generation"); break; case RD_KAFKA_RESP_ERR__IN_PROGRESS: @@ -3126,8 +3012,7 @@ static void rd_kafka_cgrp_op_handle_OffsetCommit (rd_kafka_t *rk, /* Call on_commit interceptors */ if (err != RD_KAFKA_RESP_ERR__NO_OFFSET && - err != RD_KAFKA_RESP_ERR__DESTROY && - offsets && offsets->cnt > 0) + err != RD_KAFKA_RESP_ERR__DESTROY && offsets && offsets->cnt > 0) rd_kafka_interceptors_on_commit(rk, offsets, err); /* Keep track of outstanding commits */ @@ -3150,8 +3035,8 @@ static void rd_kafka_cgrp_op_handle_OffsetCommit (rd_kafka_t *rk, rko_orig->rko_u.offset_commit.silent_empty)) { /* Propagate commit results (success or permanent error) * unless we're shutting down or commit was empty. */ - rd_kafka_cgrp_propagate_commit_result(rkcg, rko_orig, - err, errcnt, offsets); + rd_kafka_cgrp_propagate_commit_result(rkcg, rko_orig, err, + errcnt, offsets); } rd_kafka_op_destroy(rko_orig); @@ -3160,13 +3045,12 @@ static void rd_kafka_cgrp_op_handle_OffsetCommit (rd_kafka_t *rk, * transition to the next state. */ if (rk->rk_consumer.wait_commit_cnt == 0) rd_kafka_assignment_serve(rk); - - } -static size_t rd_kafka_topic_partition_has_absolute_offset ( - const rd_kafka_topic_partition_t *rktpar, void *opaque) { +static size_t rd_kafka_topic_partition_has_absolute_offset( + const rd_kafka_topic_partition_t *rktpar, + void *opaque) { return rktpar->offset >= 0 ? 1 : 0; } @@ -3185,12 +3069,12 @@ static size_t rd_kafka_topic_partition_has_absolute_offset ( * * Locality: cgrp thread */ -static void rd_kafka_cgrp_offsets_commit (rd_kafka_cgrp_t *rkcg, - rd_kafka_op_t *rko, - rd_bool_t set_offsets, - const char *reason) { - rd_kafka_topic_partition_list_t *offsets; - rd_kafka_resp_err_t err; +static void rd_kafka_cgrp_offsets_commit(rd_kafka_cgrp_t *rkcg, + rd_kafka_op_t *rko, + rd_bool_t set_offsets, + const char *reason) { + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_resp_err_t err; int valid_offsets = 0; int r; rd_kafka_buf_t *rkbuf; @@ -3213,24 +3097,24 @@ static void rd_kafka_cgrp_offsets_commit (rd_kafka_cgrp_t *rkcg, } rko->rko_u.offset_commit.partitions = - rd_kafka_topic_partition_list_copy( - rkcg->rkcg_rk->rk_consumer.assignment.all); + rd_kafka_topic_partition_list_copy( + rkcg->rkcg_rk->rk_consumer.assignment.all); } - offsets = rko->rko_u.offset_commit.partitions; + offsets = rko->rko_u.offset_commit.partitions; if (offsets) { /* Set offsets to commits */ if (set_offsets) rd_kafka_topic_partition_list_set_offsets( - rkcg->rkcg_rk, rko->rko_u.offset_commit.partitions, 1, - RD_KAFKA_OFFSET_INVALID/* def */, - 1 /* is commit */); + rkcg->rkcg_rk, rko->rko_u.offset_commit.partitions, + 1, RD_KAFKA_OFFSET_INVALID /* def */, + 1 /* is commit */); /* Check the number of valid offsets to commit. */ valid_offsets = (int)rd_kafka_topic_partition_list_sum( - offsets, - rd_kafka_topic_partition_has_absolute_offset, NULL); + offsets, rd_kafka_topic_partition_has_absolute_offset, + NULL); } if (rd_kafka_fatal_error_code(rkcg->rkcg_rk)) { @@ -3239,14 +3123,14 @@ static void rd_kafka_cgrp_offsets_commit (rd_kafka_cgrp_t *rkcg, goto err; } - if (!valid_offsets) { + if (!valid_offsets) { /* No valid offsets */ err = RD_KAFKA_RESP_ERR__NO_OFFSET; goto err; - } + } if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP) { - rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER|RD_KAFKA_DBG_CGRP, + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "COMMIT", "Deferring \"%s\" offset commit " "for %d partition(s) in state %s: " @@ -3254,15 +3138,15 @@ static void rd_kafka_cgrp_offsets_commit (rd_kafka_cgrp_t *rkcg, reason, valid_offsets, rd_kafka_cgrp_state_names[rkcg->rkcg_state]); - if (rd_kafka_cgrp_defer_offset_commit(rkcg, rko, reason)) - return; + if (rd_kafka_cgrp_defer_offset_commit(rkcg, rko, reason)) + return; - err = RD_KAFKA_RESP_ERR__WAIT_COORD; + err = RD_KAFKA_RESP_ERR__WAIT_COORD; goto err; } - rd_rkb_dbg(rkcg->rkcg_coord, CONSUMER|RD_KAFKA_DBG_CGRP, "COMMIT", + rd_rkb_dbg(rkcg->rkcg_coord, CONSUMER | RD_KAFKA_DBG_CGRP, "COMMIT", "Committing offsets for %d partition(s) with " "generation-id %" PRId32 " in join-state %s: %s", valid_offsets, rkcg->rkcg_generation_id, @@ -3271,21 +3155,18 @@ static void rd_kafka_cgrp_offsets_commit (rd_kafka_cgrp_t *rkcg, /* Send OffsetCommit */ r = rd_kafka_OffsetCommitRequest( - rkcg->rkcg_coord, rkcg, offsets, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_cgrp_op_handle_OffsetCommit, rko, - reason); + rkcg->rkcg_coord, rkcg, offsets, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_op_handle_OffsetCommit, rko, reason); /* Must have valid offsets to commit if we get here */ rd_kafka_assert(NULL, r != 0); return; - err: +err: if (err != RD_KAFKA_RESP_ERR__NO_OFFSET) - rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER|RD_KAFKA_DBG_CGRP, - "COMMIT", - "OffsetCommit internal error: %s", + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, + "COMMIT", "OffsetCommit internal error: %s", rd_kafka_err2str(err)); /* Propagate error through dummy buffer object that will @@ -3293,18 +3174,17 @@ static void rd_kafka_cgrp_offsets_commit (rd_kafka_cgrp_t *rkcg, * any recursive calls from op_handle_OffsetCommit -> * assignment_serve() and then back to cgrp_assigned_offsets_commit() */ - reply = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF); + reply = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF); reply->rko_rk = rkcg->rkcg_rk; /* Set rk since the rkbuf will not * have a rkb to reach it. */ reply->rko_err = err; - rkbuf = rd_kafka_buf_new(0, 0); - rkbuf->rkbuf_cb = rd_kafka_cgrp_op_handle_OffsetCommit; - rkbuf->rkbuf_opaque = rko; + rkbuf = rd_kafka_buf_new(0, 0); + rkbuf->rkbuf_cb = rd_kafka_cgrp_op_handle_OffsetCommit; + rkbuf->rkbuf_opaque = rko; reply->rko_u.xbuf.rkbuf = rkbuf; rd_kafka_q_enq(rkcg->rkcg_ops, reply); - } @@ -3317,12 +3197,11 @@ static void rd_kafka_cgrp_offsets_commit (rd_kafka_cgrp_t *rkcg, * * rkcg_wait_commit_cnt will be increased accordingly. */ -void -rd_kafka_cgrp_assigned_offsets_commit ( - rd_kafka_cgrp_t *rkcg, - const rd_kafka_topic_partition_list_t *offsets, - rd_bool_t set_offsets, - const char *reason) { +void rd_kafka_cgrp_assigned_offsets_commit( + rd_kafka_cgrp_t *rkcg, + const rd_kafka_topic_partition_list_t *offsets, + rd_bool_t set_offsets, + const char *reason) { rd_kafka_op_t *rko; if (rd_kafka_cgrp_assignment_is_lost(rkcg)) { @@ -3333,21 +3212,21 @@ rd_kafka_cgrp_assigned_offsets_commit ( return; } - rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT); + rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT); rko->rko_u.offset_commit.reason = rd_strdup(reason); if (rkcg->rkcg_rk->rk_conf.enabled_events & RD_KAFKA_EVENT_OFFSET_COMMIT) { /* Send results to application */ - rd_kafka_op_set_replyq(rko, rkcg->rkcg_rk->rk_rep, 0); - rko->rko_u.offset_commit.cb = - rkcg->rkcg_rk->rk_conf.offset_commit_cb; /*maybe NULL*/ - rko->rko_u.offset_commit.opaque = rkcg->rkcg_rk->rk_conf.opaque; - } + rd_kafka_op_set_replyq(rko, rkcg->rkcg_rk->rk_rep, 0); + rko->rko_u.offset_commit.cb = + rkcg->rkcg_rk->rk_conf.offset_commit_cb; /*maybe NULL*/ + rko->rko_u.offset_commit.opaque = rkcg->rkcg_rk->rk_conf.opaque; + } /* NULL partitions means current assignment */ if (offsets) rko->rko_u.offset_commit.partitions = - rd_kafka_topic_partition_list_copy(offsets); - rko->rko_u.offset_commit.silent_empty = 1; + rd_kafka_topic_partition_list_copy(offsets); + rko->rko_u.offset_commit.silent_empty = 1; rd_kafka_cgrp_offsets_commit(rkcg, rko, set_offsets, reason); } @@ -3359,8 +3238,8 @@ rd_kafka_cgrp_assigned_offsets_commit ( * * Locality: rdkafka main thread */ -static void rd_kafka_cgrp_offset_commit_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { +static void rd_kafka_cgrp_offset_commit_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_cgrp_t *rkcg = arg; /* Don't attempt auto commit when rebalancing or initializing since @@ -3369,9 +3248,8 @@ static void rd_kafka_cgrp_offset_commit_tmr_cb (rd_kafka_timers_t *rkts, rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_STEADY) return; - rd_kafka_cgrp_assigned_offsets_commit(rkcg, NULL, - rd_true/*set offsets*/, - "cgrp auto commit timer"); + rd_kafka_cgrp_assigned_offsets_commit( + rkcg, NULL, rd_true /*set offsets*/, "cgrp auto commit timer"); } @@ -3383,7 +3261,7 @@ static void rd_kafka_cgrp_offset_commit_tmr_cb (rd_kafka_timers_t *rkts, * @returns rd_true if a subscribe was scheduled, else false. */ static rd_bool_t -rd_kafka_trigger_waiting_subscribe_maybe (rd_kafka_cgrp_t *rkcg) { +rd_kafka_trigger_waiting_subscribe_maybe(rd_kafka_cgrp_t *rkcg) { if (rkcg->rkcg_next_subscription || rkcg->rkcg_next_unsubscribe) { /* Skip the join backoff */ @@ -3403,9 +3281,8 @@ rd_kafka_trigger_waiting_subscribe_maybe (rd_kafka_cgrp_t *rkcg) { * @returns an error object or NULL on success. */ static rd_kafka_error_t * -rd_kafka_cgrp_incremental_assign (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t - *partitions) { +rd_kafka_cgrp_incremental_assign(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *partitions) { rd_kafka_error_t *error; error = rd_kafka_assignment_add(rkcg->rkcg_rk, partitions); @@ -3416,9 +3293,8 @@ rd_kafka_cgrp_incremental_assign (rd_kafka_cgrp_t *rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL) { rd_kafka_assignment_resume(rkcg->rkcg_rk, "incremental assign called"); - rd_kafka_cgrp_set_join_state( - rkcg, - RD_KAFKA_CGRP_JOIN_STATE_STEADY); + rd_kafka_cgrp_set_join_state(rkcg, + RD_KAFKA_CGRP_JOIN_STATE_STEADY); if (rkcg->rkcg_subscription) { /* If using subscribe(), start a timer to enforce @@ -3429,11 +3305,10 @@ rd_kafka_cgrp_incremental_assign (rd_kafka_cgrp_t *rkcg, * (that is updated on ..poll()). * The timer interval is 2 hz. */ rd_kafka_timer_start( - &rkcg->rkcg_rk->rk_timers, - &rkcg->rkcg_max_poll_interval_tmr, - 500 * 1000ll /* 500ms */, - rd_kafka_cgrp_max_poll_interval_check_tmr_cb, - rkcg); + &rkcg->rkcg_rk->rk_timers, + &rkcg->rkcg_max_poll_interval_tmr, + 500 * 1000ll /* 500ms */, + rd_kafka_cgrp_max_poll_interval_check_tmr_cb, rkcg); } } @@ -3455,10 +3330,9 @@ rd_kafka_cgrp_incremental_assign (rd_kafka_cgrp_t *rkcg, * * @returns An error object or NULL on success. */ -static rd_kafka_error_t * -rd_kafka_cgrp_incremental_unassign (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t - *partitions) { +static rd_kafka_error_t *rd_kafka_cgrp_incremental_unassign( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *partitions) { rd_kafka_error_t *error; error = rd_kafka_assignment_subtract(rkcg->rkcg_rk, partitions); @@ -3470,8 +3344,8 @@ rd_kafka_cgrp_incremental_unassign (rd_kafka_cgrp_t *rkcg, rd_kafka_assignment_resume(rkcg->rkcg_rk, "incremental unassign called"); rd_kafka_cgrp_set_join_state( - rkcg, - RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE); + rkcg, + RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE); } rd_kafka_cgrp_assignment_clear_lost(rkcg, @@ -3485,14 +3359,15 @@ rd_kafka_cgrp_incremental_unassign (rd_kafka_cgrp_t *rkcg, * @brief Call when all incremental unassign operations are done to transition * to the next state. */ -static void rd_kafka_cgrp_incr_unassign_done (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_incr_unassign_done(rd_kafka_cgrp_t *rkcg) { /* If this action was underway when a terminate was initiated, it will * be left to complete. Now that's done, unassign all partitions */ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) { rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN", "Group \"%s\" is terminating, initiating full " - "unassign", rkcg->rkcg_group_id->str); + "unassign", + rkcg->rkcg_group_id->str); rd_kafka_cgrp_unassign(rkcg); return; } @@ -3509,15 +3384,14 @@ static void rd_kafka_cgrp_incr_unassign_done (rd_kafka_cgrp_t *rkcg) { * a re-join should occur following the assign. */ - rd_kafka_rebalance_op_incr( - rkcg, - RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - rkcg->rkcg_rebalance_incr_assignment, - rd_true/*rejoin following assign*/, - "cooperative assign after revoke"); + rd_kafka_rebalance_op_incr(rkcg, + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rkcg->rkcg_rebalance_incr_assignment, + rd_true /*rejoin following assign*/, + "cooperative assign after revoke"); rd_kafka_topic_partition_list_destroy( - rkcg->rkcg_rebalance_incr_assignment); + rkcg->rkcg_rebalance_incr_assignment); rkcg->rkcg_rebalance_incr_assignment = NULL; /* Note: rkcg_rebalance_rejoin is actioned / reset in @@ -3541,7 +3415,6 @@ static void rd_kafka_cgrp_incr_unassign_done (rd_kafka_cgrp_t *rkcg) { * a steady state. */ rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_STEADY); - } } @@ -3550,7 +3423,7 @@ static void rd_kafka_cgrp_incr_unassign_done (rd_kafka_cgrp_t *rkcg) { * @brief Call when all absolute (non-incremental) unassign operations are done * to transition to the next state. */ -static void rd_kafka_cgrp_unassign_done (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_unassign_done(rd_kafka_cgrp_t *rkcg) { rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN", "Group \"%s\": unassign done in state %s " "(join-state %s)", @@ -3583,7 +3456,7 @@ static void rd_kafka_cgrp_unassign_done (rd_kafka_cgrp_t *rkcg) { * @remark This may be called spontaneously without any need for a state * change in the rkcg. */ -void rd_kafka_cgrp_assignment_done (rd_kafka_cgrp_t *rkcg) { +void rd_kafka_cgrp_assignment_done(rd_kafka_cgrp_t *rkcg) { rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNDONE", "Group \"%s\": " "assignment operations done in join-state %s " @@ -3592,8 +3465,7 @@ void rd_kafka_cgrp_assignment_done (rd_kafka_cgrp_t *rkcg) { rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], RD_STR_ToF(rkcg->rkcg_rebalance_rejoin)); - switch (rkcg->rkcg_join_state) - { + switch (rkcg->rkcg_join_state) { case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE: rd_kafka_cgrp_unassign_done(rkcg); break; @@ -3614,10 +3486,10 @@ void rd_kafka_cgrp_assignment_done (rd_kafka_cgrp_t *rkcg) { rd_interval_reset(&rkcg->rkcg_join_intvl); rd_kafka_cgrp_rejoin( - rkcg, - "rejoining group to redistribute " - "previously owned partitions to other " - "group members"); + rkcg, + "rejoining group to redistribute " + "previously owned partitions to other " + "group members"); break; } @@ -3640,8 +3512,7 @@ void rd_kafka_cgrp_assignment_done (rd_kafka_cgrp_t *rkcg) { /** * @brief Remove existing assignment. */ -static rd_kafka_error_t * -rd_kafka_cgrp_unassign (rd_kafka_cgrp_t *rkcg) { +static rd_kafka_error_t *rd_kafka_cgrp_unassign(rd_kafka_cgrp_t *rkcg) { rd_kafka_assignment_clear(rkcg->rkcg_rk); @@ -3649,8 +3520,7 @@ rd_kafka_cgrp_unassign (rd_kafka_cgrp_t *rkcg) { RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL) { rd_kafka_assignment_resume(rkcg->rkcg_rk, "unassign called"); rd_kafka_cgrp_set_join_state( - rkcg, - RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE); + rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE); } rd_kafka_cgrp_assignment_clear_lost(rkcg, "unassign() called"); @@ -3666,15 +3536,14 @@ rd_kafka_cgrp_unassign (rd_kafka_cgrp_t *rkcg) { * @returns NULL on success or an error if a fatal error has been raised. */ static rd_kafka_error_t * -rd_kafka_cgrp_assign (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t *assignment) { +rd_kafka_cgrp_assign(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *assignment) { rd_kafka_error_t *error; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, "ASSIGN", + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "ASSIGN", "Group \"%s\": new assignment of %d partition(s) " "in join-state %s", - rkcg->rkcg_group_id->str, - assignment ? assignment->cnt : 0, + rkcg->rkcg_group_id->str, assignment ? assignment->cnt : 0, rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); /* Clear existing assignment, if any, and serve its removals. */ @@ -3690,9 +3559,8 @@ rd_kafka_cgrp_assign (rd_kafka_cgrp_t *rkcg, if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL) { rd_kafka_assignment_resume(rkcg->rkcg_rk, "assign called"); - rd_kafka_cgrp_set_join_state( - rkcg, - RD_KAFKA_CGRP_JOIN_STATE_STEADY); + rd_kafka_cgrp_set_join_state(rkcg, + RD_KAFKA_CGRP_JOIN_STATE_STEADY); if (rkcg->rkcg_subscription) { /* If using subscribe(), start a timer to enforce @@ -3703,11 +3571,10 @@ rd_kafka_cgrp_assign (rd_kafka_cgrp_t *rkcg, * (that is updated on ..poll()). * The timer interval is 2 hz. */ rd_kafka_timer_start( - &rkcg->rkcg_rk->rk_timers, - &rkcg->rkcg_max_poll_interval_tmr, - 500 * 1000ll /* 500ms */, - rd_kafka_cgrp_max_poll_interval_check_tmr_cb, - rkcg); + &rkcg->rkcg_rk->rk_timers, + &rkcg->rkcg_max_poll_interval_tmr, + 500 * 1000ll /* 500ms */, + rd_kafka_cgrp_max_poll_interval_check_tmr_cb, rkcg); } } @@ -3722,27 +3589,22 @@ rd_kafka_cgrp_assign (rd_kafka_cgrp_t *rkcg, * * @remark \p rktparlist may be NULL. */ -static map_toppar_member_info_t * -rd_kafka_toppar_list_to_toppar_member_info_map (rd_kafka_topic_partition_list_t - *rktparlist) { +static map_toppar_member_info_t *rd_kafka_toppar_list_to_toppar_member_info_map( + rd_kafka_topic_partition_list_t *rktparlist) { map_toppar_member_info_t *map = rd_calloc(1, sizeof(*map)); const rd_kafka_topic_partition_t *rktpar; - RD_MAP_INIT( - map, - rktparlist ? rktparlist->cnt : 0, - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - rd_kafka_topic_partition_destroy_free, - PartitionMemberInfo_free); + RD_MAP_INIT(map, rktparlist ? rktparlist->cnt : 0, + rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, + PartitionMemberInfo_free); if (!rktparlist) return map; RD_KAFKA_TPLIST_FOREACH(rktpar, rktparlist) - RD_MAP_SET(map, - rd_kafka_topic_partition_copy(rktpar), - PartitionMemberInfo_new(NULL, rd_false)); + RD_MAP_SET(map, rd_kafka_topic_partition_copy(rktpar), + PartitionMemberInfo_new(NULL, rd_false)); return map; } @@ -3753,15 +3615,13 @@ rd_kafka_toppar_list_to_toppar_member_info_map (rd_kafka_topic_partition_list_t * to the keys of \p map. */ static rd_kafka_topic_partition_list_t * -rd_kafka_toppar_member_info_map_to_list (map_toppar_member_info_t *map) { +rd_kafka_toppar_member_info_map_to_list(map_toppar_member_info_t *map) { const rd_kafka_topic_partition_t *k; rd_kafka_topic_partition_list_t *list = - rd_kafka_topic_partition_list_new((int)RD_MAP_CNT(map)); + rd_kafka_topic_partition_list_new((int)RD_MAP_CNT(map)); RD_MAP_FOREACH_KEY(k, map) { - rd_kafka_topic_partition_list_add(list, - k->topic, - k->partition); + rd_kafka_topic_partition_list_add(list, k->topic, k->partition); } return list; @@ -3772,10 +3632,9 @@ rd_kafka_toppar_member_info_map_to_list (map_toppar_member_info_t *map) { * @brief Handle a rebalance-triggered partition assignment * (COOPERATIVE case). */ -static void -rd_kafka_cgrp_handle_assignment_cooperative (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t - *assignment) { +static void rd_kafka_cgrp_handle_assignment_cooperative( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *assignment) { map_toppar_member_info_t *new_assignment_set; map_toppar_member_info_t *old_assignment_set; map_toppar_member_info_t *newly_added_set; @@ -3784,29 +3643,24 @@ rd_kafka_cgrp_handle_assignment_cooperative (rd_kafka_cgrp_t *rkcg, rd_kafka_topic_partition_list_t *revoked; new_assignment_set = - rd_kafka_toppar_list_to_toppar_member_info_map(assignment); + rd_kafka_toppar_list_to_toppar_member_info_map(assignment); - old_assignment_set = - rd_kafka_toppar_list_to_toppar_member_info_map( - rkcg->rkcg_group_assignment); + old_assignment_set = rd_kafka_toppar_list_to_toppar_member_info_map( + rkcg->rkcg_group_assignment); - newly_added_set = - rd_kafka_member_partitions_subtract( - new_assignment_set, old_assignment_set); - revoked_set = - rd_kafka_member_partitions_subtract( - old_assignment_set, new_assignment_set); + newly_added_set = rd_kafka_member_partitions_subtract( + new_assignment_set, old_assignment_set); + revoked_set = rd_kafka_member_partitions_subtract(old_assignment_set, + new_assignment_set); newly_added = rd_kafka_toppar_member_info_map_to_list(newly_added_set); - revoked = rd_kafka_toppar_member_info_map_to_list(revoked_set); + revoked = rd_kafka_toppar_member_info_map_to_list(revoked_set); rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COOPASSIGN", "Group \"%s\": incremental assignment: %d newly added, " "%d revoked partitions based on assignment of %d " "partitions", - rkcg->rkcg_group_id->str, - newly_added->cnt, - revoked->cnt, + rkcg->rkcg_group_id->str, newly_added->cnt, revoked->cnt, assignment->cnt); if (revoked->cnt > 0) { @@ -3815,24 +3669,24 @@ rd_kafka_cgrp_handle_assignment_cooperative (rd_kafka_cgrp_t *rkcg, * unassign op. */ rkcg->rkcg_rebalance_incr_assignment = newly_added; - newly_added = NULL; + newly_added = NULL; - rd_kafka_rebalance_op_incr( - rkcg, - RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, - revoked, rd_false/*no rejoin following - unassign*/, "sync group revoke"); + rd_kafka_rebalance_op_incr(rkcg, + RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + revoked, rd_false /*no rejoin + following unassign*/ + , + "sync group revoke"); } else { /* There are no revoked partitions - trigger the assign * rebalance op, and flag that the group does not need * to be re-joined */ - rd_kafka_rebalance_op_incr(rkcg, - RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - newly_added, - rd_false/*no rejoin following assign*/, - "sync group assign"); + rd_kafka_rebalance_op_incr( + rkcg, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, newly_added, + rd_false /*no rejoin following assign*/, + "sync group assign"); } if (newly_added) @@ -3850,19 +3704,19 @@ rd_kafka_cgrp_handle_assignment_cooperative (rd_kafka_cgrp_t *rkcg, * * Will replace the current group assignment, if any. */ -static void rd_kafka_cgrp_group_assignment_set ( - rd_kafka_cgrp_t *rkcg, - const rd_kafka_topic_partition_list_t *partitions) { +static void rd_kafka_cgrp_group_assignment_set( + rd_kafka_cgrp_t *rkcg, + const rd_kafka_topic_partition_list_t *partitions) { if (rkcg->rkcg_group_assignment) rd_kafka_topic_partition_list_destroy( - rkcg->rkcg_group_assignment); + rkcg->rkcg_group_assignment); if (partitions) { rkcg->rkcg_group_assignment = - rd_kafka_topic_partition_list_copy(partitions); + rd_kafka_topic_partition_list_copy(partitions); rd_kafka_topic_partition_list_sort_by_topic( - rkcg->rkcg_group_assignment); + rkcg->rkcg_group_assignment); rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNMENT", "Group \"%s\": setting group assignment to %d " "partition(s)", @@ -3877,14 +3731,14 @@ static void rd_kafka_cgrp_group_assignment_set ( } rd_kafka_wrlock(rkcg->rkcg_rk); - rkcg->rkcg_c.assignment_size = rkcg->rkcg_group_assignment ? - rkcg->rkcg_group_assignment->cnt : 0; + rkcg->rkcg_c.assignment_size = + rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0; rd_kafka_wrunlock(rkcg->rkcg_rk); if (rkcg->rkcg_group_assignment) rd_kafka_topic_partition_list_log( - rkcg->rkcg_rk, "GRPASSIGNMENT", RD_KAFKA_DBG_CGRP, - rkcg->rkcg_group_assignment); + rkcg->rkcg_rk, "GRPASSIGNMENT", RD_KAFKA_DBG_CGRP, + rkcg->rkcg_group_assignment); } @@ -3899,20 +3753,19 @@ static void rd_kafka_cgrp_group_assignment_set ( * To be used with incremental rebalancing. * */ -static void rd_kafka_cgrp_group_assignment_modify ( - rd_kafka_cgrp_t *rkcg, - rd_bool_t add, - const rd_kafka_topic_partition_list_t *partitions) { +static void rd_kafka_cgrp_group_assignment_modify( + rd_kafka_cgrp_t *rkcg, + rd_bool_t add, + const rd_kafka_topic_partition_list_t *partitions) { const rd_kafka_topic_partition_t *rktpar; int precnt; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNMENT", - "Group \"%s\": %d partition(s) being %s group assignment " - "of %d partition(s)", - rkcg->rkcg_group_id->str, - partitions->cnt, - add ? "added to" : "removed from", - rkcg->rkcg_group_assignment ? - rkcg->rkcg_group_assignment->cnt : 0); + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "ASSIGNMENT", + "Group \"%s\": %d partition(s) being %s group assignment " + "of %d partition(s)", + rkcg->rkcg_group_id->str, partitions->cnt, + add ? "added to" : "removed from", + rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0); if (partitions == rkcg->rkcg_group_assignment) { /* \p partitions is the actual assignment, which @@ -3923,9 +3776,8 @@ static void rd_kafka_cgrp_group_assignment_modify ( return; } - if (add && - (!rkcg->rkcg_group_assignment || - rkcg->rkcg_group_assignment->cnt == 0)) { + if (add && (!rkcg->rkcg_group_assignment || + rkcg->rkcg_group_assignment->cnt == 0)) { /* Adding to an empty assignment is a set operation. */ rd_kafka_cgrp_group_assignment_set(rkcg, partitions); return; @@ -3943,22 +3795,20 @@ static void rd_kafka_cgrp_group_assignment_modify ( int idx; idx = rd_kafka_topic_partition_list_find_idx( - rkcg->rkcg_group_assignment, - rktpar->topic, - rktpar->partition); + rkcg->rkcg_group_assignment, rktpar->topic, + rktpar->partition); if (add) { rd_assert(idx == -1); rd_kafka_topic_partition_list_add_copy( - rkcg->rkcg_group_assignment, rktpar); + rkcg->rkcg_group_assignment, rktpar); } else { rd_assert(idx != -1); rd_kafka_topic_partition_list_del_by_idx( - rkcg->rkcg_group_assignment, idx); - + rkcg->rkcg_group_assignment, idx); } } @@ -3971,22 +3821,22 @@ static void rd_kafka_cgrp_group_assignment_modify ( if (rkcg->rkcg_group_assignment->cnt == 0) { rd_kafka_topic_partition_list_destroy( - rkcg->rkcg_group_assignment); + rkcg->rkcg_group_assignment); rkcg->rkcg_group_assignment = NULL; } else if (add) rd_kafka_topic_partition_list_sort_by_topic( - rkcg->rkcg_group_assignment); + rkcg->rkcg_group_assignment); rd_kafka_wrlock(rkcg->rkcg_rk); - rkcg->rkcg_c.assignment_size = rkcg->rkcg_group_assignment ? - rkcg->rkcg_group_assignment->cnt : 0; + rkcg->rkcg_c.assignment_size = + rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0; rd_kafka_wrunlock(rkcg->rkcg_rk); if (rkcg->rkcg_group_assignment) rd_kafka_topic_partition_list_log( - rkcg->rkcg_rk, "GRPASSIGNMENT", RD_KAFKA_DBG_CGRP, - rkcg->rkcg_group_assignment); + rkcg->rkcg_rk, "GRPASSIGNMENT", RD_KAFKA_DBG_CGRP, + rkcg->rkcg_group_assignment); } @@ -4002,13 +3852,12 @@ static void rd_kafka_cgrp_group_assignment_modify ( * updating the assign():ment. */ static void -rd_kafka_cgrp_handle_assignment (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t *assignment) { +rd_kafka_cgrp_handle_assignment(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *assignment) { if (rd_kafka_cgrp_rebalance_protocol(rkcg) == RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE) { - rd_kafka_cgrp_handle_assignment_cooperative(rkcg, - assignment); + rd_kafka_cgrp_handle_assignment_cooperative(rkcg, assignment); } else { rd_kafka_rebalance_op(rkcg, @@ -4023,8 +3872,8 @@ rd_kafka_cgrp_handle_assignment (rd_kafka_cgrp_t *rkcg, * * Locality: cgrp thread */ -static void rd_kafka_cgrp_group_leader_reset (rd_kafka_cgrp_t *rkcg, - const char *reason) { +static void rd_kafka_cgrp_group_leader_reset(rd_kafka_cgrp_t *rkcg, + const char *reason) { rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "GRPLEADER", "Group \"%.*s\": resetting group leader info: %s", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason); @@ -4032,9 +3881,9 @@ static void rd_kafka_cgrp_group_leader_reset (rd_kafka_cgrp_t *rkcg, if (rkcg->rkcg_group_leader.members) { int i; - for (i = 0 ; i < rkcg->rkcg_group_leader.member_cnt ; i++) - rd_kafka_group_member_clear(&rkcg->rkcg_group_leader. - members[i]); + for (i = 0; i < rkcg->rkcg_group_leader.member_cnt; i++) + rd_kafka_group_member_clear( + &rkcg->rkcg_group_leader.members[i]); rkcg->rkcg_group_leader.member_cnt = 0; rd_free(rkcg->rkcg_group_leader.members); rkcg->rkcg_group_leader.members = NULL; @@ -4045,13 +3894,12 @@ static void rd_kafka_cgrp_group_leader_reset (rd_kafka_cgrp_t *rkcg, /** * @brief React to a RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS broker response. */ -static void rd_kafka_cgrp_group_is_rebalancing (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_group_is_rebalancing(rd_kafka_cgrp_t *rkcg) { if (rd_kafka_cgrp_rebalance_protocol(rkcg) == RD_KAFKA_REBALANCE_PROTOCOL_EAGER) { - rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, - rd_false/*lost*/, - rd_false/*initiating*/, + rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_false /*lost*/, + rd_false /*initiating*/, "rebalance in progress"); return; } @@ -4062,20 +3910,21 @@ static void rd_kafka_cgrp_group_is_rebalancing (rd_kafka_cgrp_t *rkcg) { * not prior to JoinGroup as with the EAGER case. */ if (RD_KAFKA_CGRP_REBALANCING(rkcg)) { - rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER|RD_KAFKA_DBG_CGRP, - "REBALANCE", "Group \"%.*s\": skipping " - "COOPERATIVE rebalance in state %s " - "(join-state %s)%s%s%s", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[ - rkcg->rkcg_join_state], - RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) - ? " (awaiting assign call)" : "", - (rkcg->rkcg_rebalance_incr_assignment != NULL) - ? " (incremental assignment pending)": "", - rkcg->rkcg_rebalance_rejoin - ? " (rebalance rejoin)": ""); + rd_kafka_dbg( + rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REBALANCE", + "Group \"%.*s\": skipping " + "COOPERATIVE rebalance in state %s " + "(join-state %s)%s%s%s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) + ? " (awaiting assign call)" + : "", + (rkcg->rkcg_rebalance_incr_assignment != NULL) + ? " (incremental assignment pending)" + : "", + rkcg->rkcg_rebalance_rejoin ? " (rebalance rejoin)" : ""); return; } @@ -4090,35 +3939,36 @@ static void rd_kafka_cgrp_group_is_rebalancing (rd_kafka_cgrp_t *rkcg) { * rejoin. Does nothing if a rebalance workflow is already in * progress */ -static void rd_kafka_cgrp_revoke_all_rejoin_maybe (rd_kafka_cgrp_t *rkcg, - rd_bool_t assignment_lost, - rd_bool_t initiating, - const char *reason) { +static void rd_kafka_cgrp_revoke_all_rejoin_maybe(rd_kafka_cgrp_t *rkcg, + rd_bool_t assignment_lost, + rd_bool_t initiating, + const char *reason) { if (RD_KAFKA_CGRP_REBALANCING(rkcg)) { rd_kafka_dbg( - rkcg->rkcg_rk, CONSUMER|RD_KAFKA_DBG_CGRP, - "REBALANCE", "Group \"%.*s\": rebalance (%s) " - "already in progress, skipping in state %s " - "(join-state %s) with %d assigned partition(s)%s%s%s: " - "%s", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_kafka_rebalance_protocol2str( - rd_kafka_cgrp_rebalance_protocol(rkcg)), - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - rkcg->rkcg_group_assignment ? - rkcg->rkcg_group_assignment->cnt : 0, - assignment_lost ? " (lost)" : "", - rkcg->rkcg_rebalance_incr_assignment ? - ", incremental assignment in progress" : "", - rkcg->rkcg_rebalance_rejoin ? - ", rejoin on rebalance" : "", - reason); + rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REBALANCE", + "Group \"%.*s\": rebalance (%s) " + "already in progress, skipping in state %s " + "(join-state %s) with %d assigned partition(s)%s%s%s: " + "%s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_rebalance_protocol2str( + rd_kafka_cgrp_rebalance_protocol(rkcg)), + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + rkcg->rkcg_group_assignment + ? rkcg->rkcg_group_assignment->cnt + : 0, + assignment_lost ? " (lost)" : "", + rkcg->rkcg_rebalance_incr_assignment + ? ", incremental assignment in progress" + : "", + rkcg->rkcg_rebalance_rejoin ? ", rejoin on rebalance" : "", + reason); return; } - rd_kafka_cgrp_revoke_all_rejoin(rkcg, assignment_lost, - initiating, reason); + rd_kafka_cgrp_revoke_all_rejoin(rkcg, assignment_lost, initiating, + reason); } @@ -4127,30 +3977,29 @@ static void rd_kafka_cgrp_revoke_all_rejoin_maybe (rd_kafka_cgrp_t *rkcg, * revoke partitions, and transition to INIT state for (eventual) * rejoin. */ -static void rd_kafka_cgrp_revoke_all_rejoin (rd_kafka_cgrp_t *rkcg, - rd_bool_t assignment_lost, - rd_bool_t initiating, - const char *reason) { +static void rd_kafka_cgrp_revoke_all_rejoin(rd_kafka_cgrp_t *rkcg, + rd_bool_t assignment_lost, + rd_bool_t initiating, + const char *reason) { rd_kafka_rebalance_protocol_t protocol = - rd_kafka_cgrp_rebalance_protocol(rkcg); + rd_kafka_cgrp_rebalance_protocol(rkcg); rd_bool_t terminating = - unlikely(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE); + unlikely(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE); - rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER|RD_KAFKA_DBG_CGRP, "REBALANCE", - "Group \"%.*s\" %s (%s) in state %s (join-state %s) " - "with %d assigned partition(s)%s: %s", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - initiating ? "initiating rebalance" : "is rebalancing", - rd_kafka_rebalance_protocol2str(protocol), - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - rkcg->rkcg_group_assignment ? - rkcg->rkcg_group_assignment->cnt : 0, - assignment_lost ? " (lost)" : "", - reason); + rd_kafka_dbg( + rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REBALANCE", + "Group \"%.*s\" %s (%s) in state %s (join-state %s) " + "with %d assigned partition(s)%s: %s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + initiating ? "initiating rebalance" : "is rebalancing", + rd_kafka_rebalance_protocol2str(protocol), + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0, + assignment_lost ? " (lost)" : "", reason); rd_snprintf(rkcg->rkcg_c.rebalance_reason, sizeof(rkcg->rkcg_c.rebalance_reason), "%s", reason); @@ -4163,8 +4012,8 @@ static void rd_kafka_cgrp_revoke_all_rejoin (rd_kafka_cgrp_t *rkcg, if (assignment_lost) rd_kafka_cgrp_assignment_set_lost( - rkcg, "%s: revoking assignment and rejoining", - reason); + rkcg, "%s: revoking assignment and rejoining", + reason); /* Schedule application rebalance op if there is an existing * assignment (albeit perhaps empty) and there is no @@ -4172,9 +4021,8 @@ static void rd_kafka_cgrp_revoke_all_rejoin (rd_kafka_cgrp_t *rkcg, if (rkcg->rkcg_group_assignment && !RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg)) { rd_kafka_rebalance_op( - rkcg, - RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, - rkcg->rkcg_group_assignment, reason); + rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + rkcg->rkcg_group_assignment, reason); } else { /* Skip the join backoff */ rd_interval_reset(&rkcg->rkcg_join_intvl); @@ -4191,16 +4039,15 @@ static void rd_kafka_cgrp_revoke_all_rejoin (rd_kafka_cgrp_t *rkcg, /* All partitions should never be revoked unless terminating, leaving * the group, or on assignment lost. Another scenario represents a * logic error. Fail fast in this case. */ - if (!(terminating || - assignment_lost || + if (!(terminating || assignment_lost || (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE))) { rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "REBALANCE", "Group \"%s\": unexpected instruction to revoke " "current assignment and rebalance " "(terminating=%d, assignment_lost=%d, " "LEAVE_ON_UNASSIGN_DONE=%d)", - rkcg->rkcg_group_id->str, - terminating, assignment_lost, + rkcg->rkcg_group_id->str, terminating, + assignment_lost, (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE)); rd_dassert(!*"BUG: unexpected instruction to revoke " @@ -4211,35 +4058,35 @@ static void rd_kafka_cgrp_revoke_all_rejoin (rd_kafka_cgrp_t *rkcg, rkcg->rkcg_group_assignment->cnt > 0) { if (assignment_lost) rd_kafka_cgrp_assignment_set_lost( - rkcg, - "%s: revoking incremental assignment " - "and rejoining", reason); - - rd_kafka_dbg(rkcg->rkcg_rk, - CONSUMER|RD_KAFKA_DBG_CGRP, - "REBALANCE", "Group \"%.*s\": revoking " - "all %d partition(s)%s%s", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rkcg, + "%s: revoking incremental assignment " + "and rejoining", + reason); + + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, + "REBALANCE", + "Group \"%.*s\": revoking " + "all %d partition(s)%s%s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), rkcg->rkcg_group_assignment->cnt, - terminating ? " (terminating)" : "", - assignment_lost ? " (assignment lost)" : ""); + terminating ? " (terminating)" : "", + assignment_lost ? " (assignment lost)" : ""); rd_kafka_rebalance_op_incr( - rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, - rkcg->rkcg_group_assignment, - terminating ? rd_false : rd_true /*rejoin*/, - reason); + rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + rkcg->rkcg_group_assignment, + terminating ? rd_false : rd_true /*rejoin*/, reason); return; } if (terminating) { /* If terminating, then don't rejoin group. */ - rd_kafka_dbg(rkcg->rkcg_rk, - CONSUMER|RD_KAFKA_DBG_CGRP, - "REBALANCE", "Group \"%.*s\": consumer is " - "terminating, skipping rejoin", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id)); + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, + "REBALANCE", + "Group \"%.*s\": consumer is " + "terminating, skipping rejoin", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id)); return; } @@ -4254,10 +4101,10 @@ static void rd_kafka_cgrp_revoke_all_rejoin (rd_kafka_cgrp_t *rkcg, * @locks none */ static void -rd_kafka_cgrp_max_poll_interval_check_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { +rd_kafka_cgrp_max_poll_interval_check_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_cgrp_t *rkcg = arg; - rd_kafka_t *rk = rkcg->rkcg_rk; + rd_kafka_t *rk = rkcg->rkcg_rk; int exceeded; exceeded = rd_kafka_max_poll_exceeded(rk); @@ -4274,8 +4121,8 @@ rd_kafka_cgrp_max_poll_interval_check_tmr_cb (rd_kafka_timers_t *rkts, rk->rk_conf.max_poll_interval_ms, exceeded); rd_kafka_consumer_err(rkcg->rkcg_q, RD_KAFKA_NODEID_UA, - RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED, - 0, NULL, NULL, RD_KAFKA_OFFSET_INVALID, + RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED, 0, NULL, + NULL, RD_KAFKA_OFFSET_INVALID, "Application maximum poll interval (%dms) " "exceeded by %dms", rk->rk_conf.max_poll_interval_ms, exceeded); @@ -4283,7 +4130,7 @@ rd_kafka_cgrp_max_poll_interval_check_tmr_cb (rd_kafka_timers_t *rkts, rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED; rd_kafka_timer_stop(rkts, &rkcg->rkcg_max_poll_interval_tmr, - 1/*lock*/); + 1 /*lock*/); /* Leave the group before calling rebalance since the standard leave * will be triggered first after the rebalance callback has been served. @@ -4301,9 +4148,8 @@ rd_kafka_cgrp_max_poll_interval_check_tmr_cb (rd_kafka_timers_t *rkts, rd_kafka_cgrp_set_member_id(rkcg, ""); /* Trigger rebalance */ - rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, - rd_true/*lost*/, - rd_true/*initiating*/, + rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/, + rd_true /*initiating*/, "max.poll.interval.ms exceeded"); } @@ -4319,13 +4165,13 @@ rd_kafka_cgrp_max_poll_interval_check_tmr_cb (rd_kafka_timers_t *rkts, * * @remark Assumes ownership of \p errored. */ -static void -rd_kafka_propagate_consumer_topic_errors ( - rd_kafka_cgrp_t *rkcg, rd_kafka_topic_partition_list_t *errored, - const char *error_prefix) { +static void rd_kafka_propagate_consumer_topic_errors( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *errored, + const char *error_prefix) { int i; - for (i = 0 ; i < errored->cnt ; i++) { + for (i = 0; i < errored->cnt; i++) { rd_kafka_topic_partition_t *topic = &errored->elems[i]; rd_kafka_topic_partition_t *prev; @@ -4341,26 +4187,21 @@ rd_kafka_propagate_consumer_topic_errors ( /* Check if this topic errored previously */ prev = rd_kafka_topic_partition_list_find( - rkcg->rkcg_errored_topics, topic->topic, - RD_KAFKA_PARTITION_UA); + rkcg->rkcg_errored_topics, topic->topic, + RD_KAFKA_PARTITION_UA); if (prev && prev->err == topic->err) continue; /* This topic already reported same error */ - rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER|RD_KAFKA_DBG_TOPIC, - "TOPICERR", - "%s: %s: %s", - error_prefix, topic->topic, - rd_kafka_err2str(topic->err)); + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_TOPIC, + "TOPICERR", "%s: %s: %s", error_prefix, + topic->topic, rd_kafka_err2str(topic->err)); /* Send consumer error to application */ - rd_kafka_consumer_err(rkcg->rkcg_q, RD_KAFKA_NODEID_UA, - topic->err, 0, - topic->topic, NULL, - RD_KAFKA_OFFSET_INVALID, - "%s: %s: %s", - error_prefix, topic->topic, - rd_kafka_err2str(topic->err)); + rd_kafka_consumer_err( + rkcg->rkcg_q, RD_KAFKA_NODEID_UA, topic->err, 0, + topic->topic, NULL, RD_KAFKA_OFFSET_INVALID, "%s: %s: %s", + error_prefix, topic->topic, rd_kafka_err2str(topic->err)); } rd_kafka_topic_partition_list_destroy(rkcg->rkcg_errored_topics); @@ -4372,26 +4213,26 @@ rd_kafka_propagate_consumer_topic_errors ( * @brief Work out the topics currently subscribed to that do not * match any pattern in \p subscription. */ -static rd_kafka_topic_partition_list_t * -rd_kafka_cgrp_get_unsubscribing_topics (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t - *subscription) { +static rd_kafka_topic_partition_list_t *rd_kafka_cgrp_get_unsubscribing_topics( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *subscription) { int i; rd_kafka_topic_partition_list_t *result; result = rd_kafka_topic_partition_list_new( - rkcg->rkcg_subscribed_topics->rl_cnt); + rkcg->rkcg_subscribed_topics->rl_cnt); /* TODO: Something that isn't O(N*M) */ - for (i=0; irkcg_subscribed_topics->rl_cnt; i++) { + for (i = 0; i < rkcg->rkcg_subscribed_topics->rl_cnt; i++) { int j; - const char *topic = ((rd_kafka_topic_info_t *) - rkcg->rkcg_subscribed_topics->rl_elems[i])->topic; + const char *topic = + ((rd_kafka_topic_info_t *) + rkcg->rkcg_subscribed_topics->rl_elems[i]) + ->topic; - for (j=0; jcnt; j++) { + for (j = 0; j < subscription->cnt; j++) { const char *pattern = subscription->elems[j].topic; - if (rd_kafka_topic_match(rkcg->rkcg_rk, - pattern, + if (rd_kafka_topic_match(rkcg->rkcg_rk, pattern, topic)) { break; } @@ -4399,8 +4240,7 @@ rd_kafka_cgrp_get_unsubscribing_topics (rd_kafka_cgrp_t *rkcg, if (j == subscription->cnt) rd_kafka_topic_partition_list_add( - result, topic, - RD_KAFKA_PARTITION_UA); + result, topic, RD_KAFKA_PARTITION_UA); } if (result->cnt == 0) { @@ -4418,8 +4258,8 @@ rd_kafka_cgrp_get_unsubscribing_topics (rd_kafka_cgrp_t *rkcg, */ static rd_kafka_topic_partition_list_t * rd_kafka_cgrp_calculate_subscribe_revoking_partitions( - rd_kafka_cgrp_t *rkcg, - const rd_kafka_topic_partition_list_t *unsubscribing) { + rd_kafka_cgrp_t *rkcg, + const rd_kafka_topic_partition_list_t *unsubscribing) { rd_kafka_topic_partition_list_t *revoking; const rd_kafka_topic_partition_t *rktpar; @@ -4430,8 +4270,8 @@ rd_kafka_cgrp_calculate_subscribe_revoking_partitions( rkcg->rkcg_group_assignment->cnt == 0) return NULL; - revoking = rd_kafka_topic_partition_list_new( - rkcg->rkcg_group_assignment->cnt); + revoking = + rd_kafka_topic_partition_list_new(rkcg->rkcg_group_assignment->cnt); /* TODO: Something that isn't O(N*M). */ RD_KAFKA_TPLIST_FOREACH(rktpar, unsubscribing) { @@ -4440,9 +4280,8 @@ rd_kafka_cgrp_calculate_subscribe_revoking_partitions( RD_KAFKA_TPLIST_FOREACH(assigned, rkcg->rkcg_group_assignment) { if (!strcmp(assigned->topic, rktpar->topic)) { rd_kafka_topic_partition_list_add( - revoking, - assigned->topic, - assigned->partition); + revoking, assigned->topic, + assigned->partition); continue; } } @@ -4464,9 +4303,8 @@ rd_kafka_cgrp_calculate_subscribe_revoking_partitions( * @remark Assumes ownership of \p rktparlist. */ static rd_kafka_resp_err_t -rd_kafka_cgrp_modify_subscription (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t - *rktparlist) { +rd_kafka_cgrp_modify_subscription(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *rktparlist) { rd_kafka_topic_partition_list_t *unsubscribing_topics; rd_kafka_topic_partition_list_t *revoking; rd_list_t *tinfos; @@ -4481,32 +4319,31 @@ rd_kafka_cgrp_modify_subscription (rd_kafka_cgrp_t *rkcg, /* Topics in rkcg_subscribed_topics that don't match any pattern in the new subscription. */ - unsubscribing_topics = rd_kafka_cgrp_get_unsubscribing_topics( - rkcg, rktparlist); + unsubscribing_topics = + rd_kafka_cgrp_get_unsubscribing_topics(rkcg, rktparlist); /* Currently assigned topic partitions that are no longer desired. */ revoking = rd_kafka_cgrp_calculate_subscribe_revoking_partitions( - rkcg, unsubscribing_topics); + rkcg, unsubscribing_topics); rd_kafka_topic_partition_list_destroy(rkcg->rkcg_subscription); rkcg->rkcg_subscription = rktparlist; if (rd_kafka_cgrp_metadata_refresh(rkcg, &metadata_age, "modify subscription") == 1) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "MODSUB", "Group \"%.*s\": postponing join until " "up-to-date metadata is available", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id)); - rd_assert(rkcg->rkcg_join_state == - RD_KAFKA_CGRP_JOIN_STATE_INIT || - /* Possible via rd_kafka_cgrp_modify_subscription */ - rkcg->rkcg_join_state == - RD_KAFKA_CGRP_JOIN_STATE_STEADY); + rd_assert( + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT || + /* Possible via rd_kafka_cgrp_modify_subscription */ + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY); rd_kafka_cgrp_set_join_state( - rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA); + rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA); /* Revoke/join will occur after metadata refresh completes */ @@ -4514,19 +4351,18 @@ rd_kafka_cgrp_modify_subscription (rd_kafka_cgrp_t *rkcg, rd_kafka_topic_partition_list_destroy(revoking); if (unsubscribing_topics) rd_kafka_topic_partition_list_destroy( - unsubscribing_topics); + unsubscribing_topics); return RD_KAFKA_RESP_ERR_NO_ERROR; } - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE", + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE", "Group \"%.*s\": modifying subscription of size %d to " "new subscription of size %d, removing %d topic(s), " "revoking %d partition(s) (join-state %s)", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - old_cnt, rkcg->rkcg_subscription->cnt, - unsubscribing_topics ? - unsubscribing_topics->cnt : 0, + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), old_cnt, + rkcg->rkcg_subscription->cnt, + unsubscribing_topics ? unsubscribing_topics->cnt : 0, revoking ? revoking->cnt : 0, rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); @@ -4542,38 +4378,33 @@ rd_kafka_cgrp_modify_subscription (rd_kafka_cgrp_t *rkcg, errored = rd_kafka_topic_partition_list_new(0); if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION) - rd_kafka_metadata_topic_match(rkcg->rkcg_rk, - tinfos, rkcg->rkcg_subscription, - errored); + rd_kafka_metadata_topic_match(rkcg->rkcg_rk, tinfos, + rkcg->rkcg_subscription, errored); else - rd_kafka_metadata_topic_filter(rkcg->rkcg_rk, - tinfos, - rkcg->rkcg_subscription, - errored); + rd_kafka_metadata_topic_filter( + rkcg->rkcg_rk, tinfos, rkcg->rkcg_subscription, errored); /* Propagate consumer errors for any non-existent or errored topics. * The function takes ownership of errored. */ rd_kafka_propagate_consumer_topic_errors( - rkcg, errored, "Subscribed topic not available"); + rkcg, errored, "Subscribed topic not available"); - if (rd_kafka_cgrp_update_subscribed_topics(rkcg, tinfos) && - !revoking) { + if (rd_kafka_cgrp_update_subscribed_topics(rkcg, tinfos) && !revoking) { rd_kafka_cgrp_rejoin(rkcg, "Subscription modified"); return RD_KAFKA_RESP_ERR_NO_ERROR; } if (revoking) { - rd_kafka_dbg(rkcg->rkcg_rk, - CONSUMER|RD_KAFKA_DBG_CGRP, - "REBALANCE", "Group \"%.*s\" revoking " + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, + "REBALANCE", + "Group \"%.*s\" revoking " "%d of %d partition(s)", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - revoking->cnt, - rkcg->rkcg_group_assignment->cnt); + revoking->cnt, rkcg->rkcg_group_assignment->cnt); - rd_kafka_rebalance_op_incr(rkcg, - RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, - revoking, rd_true/*rejoin*/, "subscribe"); + rd_kafka_rebalance_op_incr( + rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, revoking, + rd_true /*rejoin*/, "subscribe"); rd_kafka_topic_partition_list_destroy(revoking); } @@ -4585,8 +4416,8 @@ rd_kafka_cgrp_modify_subscription (rd_kafka_cgrp_t *rkcg, /** * Remove existing topic subscription. */ -static rd_kafka_resp_err_t -rd_kafka_cgrp_unsubscribe (rd_kafka_cgrp_t *rkcg, rd_bool_t leave_group) { +static rd_kafka_resp_err_t rd_kafka_cgrp_unsubscribe(rd_kafka_cgrp_t *rkcg, + rd_bool_t leave_group) { rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNSUBSCRIBE", "Group \"%.*s\": unsubscribe from current %ssubscription " @@ -4597,12 +4428,11 @@ rd_kafka_cgrp_unsubscribe (rd_kafka_cgrp_t *rkcg, rd_bool_t leave_group) { rkcg->rkcg_subscription ? rkcg->rkcg_subscription->cnt : 0, RD_STR_ToF(leave_group), RD_STR_ToF(RD_KAFKA_CGRP_HAS_JOINED(rkcg)), - rkcg->rkcg_member_id ? - rkcg->rkcg_member_id->str : "n/a", + rkcg->rkcg_member_id ? rkcg->rkcg_member_id->str : "n/a", rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); rd_kafka_timer_stop(&rkcg->rkcg_rk->rk_timers, - &rkcg->rkcg_max_poll_interval_tmr, 1/*lock*/); + &rkcg->rkcg_max_poll_interval_tmr, 1 /*lock*/); if (rkcg->rkcg_subscription) { rd_kafka_topic_partition_list_destroy(rkcg->rkcg_subscription); @@ -4621,9 +4451,8 @@ rd_kafka_cgrp_unsubscribe (rd_kafka_cgrp_t *rkcg, rd_bool_t leave_group) { /* FIXME: Why are we only revoking if !assignment_lost ? */ if (!rd_kafka_cgrp_assignment_is_lost(rkcg)) - rd_kafka_cgrp_revoke_all_rejoin(rkcg, - rd_false/*not lost*/, - rd_true/*initiating*/, + rd_kafka_cgrp_revoke_all_rejoin(rkcg, rd_false /*not lost*/, + rd_true /*initiating*/, "unsubscribe"); rkcg->rkcg_flags &= ~(RD_KAFKA_CGRP_F_SUBSCRIPTION | @@ -4637,10 +4466,10 @@ rd_kafka_cgrp_unsubscribe (rd_kafka_cgrp_t *rkcg, rd_bool_t leave_group) { * Set new atomic topic subscription. */ static rd_kafka_resp_err_t -rd_kafka_cgrp_subscribe (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t *rktparlist) { +rd_kafka_cgrp_subscribe(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *rktparlist) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE", + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE", "Group \"%.*s\": subscribe to new %ssubscription " "of %d topics (join-state %s)", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), @@ -4656,25 +4485,25 @@ rd_kafka_cgrp_subscribe (rd_kafka_cgrp_t *rkcg, if (rd_kafka_fatal_error_code(rkcg->rkcg_rk)) { if (rkcg->rkcg_subscription) rd_kafka_cgrp_unsubscribe(rkcg, - rd_true/*leave group*/); + rd_true /*leave group*/); return RD_KAFKA_RESP_ERR__FATAL; } /* Clear any existing postponed subscribe. */ if (rkcg->rkcg_next_subscription) rd_kafka_topic_partition_list_destroy_free( - rkcg->rkcg_next_subscription); + rkcg->rkcg_next_subscription); rkcg->rkcg_next_subscription = NULL; - rkcg->rkcg_next_unsubscribe = rd_false; + rkcg->rkcg_next_unsubscribe = rd_false; if (RD_KAFKA_CGRP_REBALANCING(rkcg)) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, - "SUBSCRIBE", "Group \"%.*s\": postponing " - "subscribe until previous rebalance " - "completes (join-state %s)", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_kafka_cgrp_join_state_names[ - rkcg->rkcg_join_state]); + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE", + "Group \"%.*s\": postponing " + "subscribe until previous rebalance " + "completes (join-state %s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); if (!rktparlist) rkcg->rkcg_next_unsubscribe = rd_true; @@ -4685,18 +4514,17 @@ rd_kafka_cgrp_subscribe (rd_kafka_cgrp_t *rkcg, } if (rd_kafka_cgrp_rebalance_protocol(rkcg) == - RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && - rktparlist && - rkcg->rkcg_subscription) + RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && + rktparlist && rkcg->rkcg_subscription) return rd_kafka_cgrp_modify_subscription(rkcg, rktparlist); /* Remove existing subscription first */ if (rkcg->rkcg_subscription) rd_kafka_cgrp_unsubscribe( - rkcg, - rktparlist ? - rd_false/* don't leave group if new subscription */ : - rd_true/* leave group if no new subscription */); + rkcg, + rktparlist + ? rd_false /* don't leave group if new subscription */ + : rd_true /* leave group if no new subscription */); if (!rktparlist) return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -4715,9 +4543,6 @@ rd_kafka_cgrp_subscribe (rd_kafka_cgrp_t *rkcg, - - - /** * Same as cgrp_terminate() but called from the cgrp/main thread upon receiving * the op 'rko' from cgrp_terminate(). @@ -4726,10 +4551,9 @@ rd_kafka_cgrp_subscribe (rd_kafka_cgrp_t *rkcg, * * Locality: main thread */ -void -rd_kafka_cgrp_terminate0 (rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko) { +void rd_kafka_cgrp_terminate0(rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko) { - rd_kafka_assert(NULL, thrd_is_current(rkcg->rkcg_rk->rk_thread)); + rd_kafka_assert(NULL, thrd_is_current(rkcg->rkcg_rk->rk_thread)); rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM", "Terminating group \"%.*s\" in state %s " @@ -4739,38 +4563,36 @@ rd_kafka_cgrp_terminate0 (rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko) { rd_list_cnt(&rkcg->rkcg_toppars)); if (unlikely(rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM || - (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) || - rkcg->rkcg_reply_rko != NULL)) { + (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) || + rkcg->rkcg_reply_rko != NULL)) { /* Already terminating or handling a previous terminate */ - if (rko) { - rd_kafka_q_t *rkq = rko->rko_replyq.q; - rko->rko_replyq.q = NULL; - rd_kafka_consumer_err(rkq, RD_KAFKA_NODEID_UA, - RD_KAFKA_RESP_ERR__IN_PROGRESS, - rko->rko_replyq.version, - NULL, NULL, - RD_KAFKA_OFFSET_INVALID, - "Group is %s", - rkcg->rkcg_reply_rko ? - "terminating":"terminated"); - rd_kafka_q_destroy(rkq); - rd_kafka_op_destroy(rko); - } + if (rko) { + rd_kafka_q_t *rkq = rko->rko_replyq.q; + rko->rko_replyq.q = NULL; + rd_kafka_consumer_err( + rkq, RD_KAFKA_NODEID_UA, + RD_KAFKA_RESP_ERR__IN_PROGRESS, + rko->rko_replyq.version, NULL, NULL, + RD_KAFKA_OFFSET_INVALID, "Group is %s", + rkcg->rkcg_reply_rko ? "terminating" + : "terminated"); + rd_kafka_q_destroy(rkq); + rd_kafka_op_destroy(rko); + } return; } /* Mark for stopping, the actual state transition * is performed when all toppars have left. */ rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_TERMINATE; - rkcg->rkcg_ts_terminate = rd_clock(); - rkcg->rkcg_reply_rko = rko; + rkcg->rkcg_ts_terminate = rd_clock(); + rkcg->rkcg_reply_rko = rko; if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION) rd_kafka_cgrp_unsubscribe( - rkcg, - /* Leave group if this is a controlled shutdown */ - !rd_kafka_destroy_flags_no_consumer_close( - rkcg->rkcg_rk)); + rkcg, + /* Leave group if this is a controlled shutdown */ + !rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk)); /* Reset the wait-for-LeaveGroup flag if there is an outstanding * LeaveGroupRequest being waited on (from a prior unsubscribe), but @@ -4801,8 +4623,8 @@ rd_kafka_cgrp_terminate0 (rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko) { * * Locality: any thread */ -void rd_kafka_cgrp_terminate (rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq) { - rd_kafka_assert(NULL, !thrd_is_current(rkcg->rkcg_rk->rk_thread)); +void rd_kafka_cgrp_terminate(rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq) { + rd_kafka_assert(NULL, !thrd_is_current(rkcg->rkcg_rk->rk_thread)); rd_kafka_cgrp_op(rkcg, NULL, replyq, RD_KAFKA_OP_TERMINATE, 0); } @@ -4816,11 +4638,11 @@ struct _op_timeout_offset_commit { /** * q_filter callback for expiring OFFSET_COMMIT timeouts. */ -static int rd_kafka_op_offset_commit_timeout_check (rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, - void *opaque) { +static int rd_kafka_op_offset_commit_timeout_check(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + void *opaque) { struct _op_timeout_offset_commit *state = - (struct _op_timeout_offset_commit*)opaque; + (struct _op_timeout_offset_commit *)opaque; if (likely(rko->rko_type != RD_KAFKA_OP_OFFSET_COMMIT || rko->rko_u.offset_commit.ts_timeout == 0 || @@ -4840,13 +4662,13 @@ static int rd_kafka_op_offset_commit_timeout_check (rd_kafka_q_t *rkq, /** * Scan for various timeouts. */ -static void rd_kafka_cgrp_timeout_scan (rd_kafka_cgrp_t *rkcg, rd_ts_t now) { +static void rd_kafka_cgrp_timeout_scan(rd_kafka_cgrp_t *rkcg, rd_ts_t now) { struct _op_timeout_offset_commit ofc_state; int i, cnt = 0; rd_kafka_op_t *rko; ofc_state.now = now; - ofc_state.rk = rkcg->rkcg_rk; + ofc_state.rk = rkcg->rkcg_rk; rd_list_init(&ofc_state.expired, 0, NULL); cnt += rd_kafka_q_apply(rkcg->rkcg_wait_coord_q, @@ -4854,10 +4676,9 @@ static void rd_kafka_cgrp_timeout_scan (rd_kafka_cgrp_t *rkcg, rd_ts_t now) { &ofc_state); RD_LIST_FOREACH(rko, &ofc_state.expired, i) - rd_kafka_cgrp_op_handle_OffsetCommit( - rkcg->rkcg_rk, NULL, - RD_KAFKA_RESP_ERR__WAIT_COORD, - NULL, NULL, rko); + rd_kafka_cgrp_op_handle_OffsetCommit(rkcg->rkcg_rk, NULL, + RD_KAFKA_RESP_ERR__WAIT_COORD, + NULL, NULL, rko); rd_list_destroy(&ofc_state.expired); @@ -4866,8 +4687,6 @@ static void rd_kafka_cgrp_timeout_scan (rd_kafka_cgrp_t *rkcg, rd_ts_t now) { "Group \"%.*s\": timed out %d op(s), %d remain", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), cnt, rd_kafka_q_len(rkcg->rkcg_wait_coord_q)); - - } @@ -4876,12 +4695,12 @@ static void rd_kafka_cgrp_timeout_scan (rd_kafka_cgrp_t *rkcg, rd_ts_t now) { * @locality rdkafka main thread * @locks none */ -static void rd_kafka_cgrp_handle_assign_op (rd_kafka_cgrp_t *rkcg, - rd_kafka_op_t *rko) { +static void rd_kafka_cgrp_handle_assign_op(rd_kafka_cgrp_t *rkcg, + rd_kafka_op_t *rko) { rd_kafka_error_t *error = NULL; if (rd_kafka_cgrp_rebalance_protocol(rkcg) == - RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && + RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && !(rko->rko_u.assign.method == RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN || rko->rko_u.assign.method == RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN)) error = rd_kafka_error_new(RD_KAFKA_RESP_ERR__STATE, @@ -4893,7 +4712,7 @@ static void rd_kafka_cgrp_handle_assign_op (rd_kafka_cgrp_t *rkcg, "COOPERATIVE"); else if (rd_kafka_cgrp_rebalance_protocol(rkcg) == - RD_KAFKA_REBALANCE_PROTOCOL_EAGER && + RD_KAFKA_REBALANCE_PROTOCOL_EAGER && !(rko->rko_u.assign.method == RD_KAFKA_ASSIGN_METHOD_ASSIGN)) error = rd_kafka_error_new(RD_KAFKA_RESP_ERR__STATE, "Changes to the current assignment " @@ -4906,43 +4725,41 @@ static void rd_kafka_cgrp_handle_assign_op (rd_kafka_cgrp_t *rkcg, /* Treat all assignments as unassign when a fatal error is * raised or the cgrp is terminating. */ - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, - "ASSIGN", "Group \"%s\": Consumer %s: " + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, + "ASSIGN", + "Group \"%s\": Consumer %s: " "treating assign as unassign", rkcg->rkcg_group_id->str, - rd_kafka_fatal_error_code(rkcg->rkcg_rk) ? - "has raised a fatal error" : "is terminating"); + rd_kafka_fatal_error_code(rkcg->rkcg_rk) + ? "has raised a fatal error" + : "is terminating"); if (rko->rko_u.assign.partitions) { rd_kafka_topic_partition_list_destroy( - rko->rko_u.assign.partitions); + rko->rko_u.assign.partitions); rko->rko_u.assign.partitions = NULL; } rko->rko_u.assign.method = RD_KAFKA_ASSIGN_METHOD_ASSIGN; } if (!error) { - switch (rko->rko_u.assign.method) - { + switch (rko->rko_u.assign.method) { case RD_KAFKA_ASSIGN_METHOD_ASSIGN: /* New atomic assignment (partitions != NULL), * or unassignment (partitions == NULL) */ if (rko->rko_u.assign.partitions) error = rd_kafka_cgrp_assign( - rkcg, - rko->rko_u.assign.partitions); + rkcg, rko->rko_u.assign.partitions); else error = rd_kafka_cgrp_unassign(rkcg); break; case RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN: error = rd_kafka_cgrp_incremental_assign( - rkcg, - rko->rko_u.assign.partitions); + rkcg, rko->rko_u.assign.partitions); break; case RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN: error = rd_kafka_cgrp_incremental_unassign( - rkcg, - rko->rko_u.assign.partitions); + rkcg, rko->rko_u.assign.partitions); break; default: RD_NOTREACHED(); @@ -4952,8 +4769,6 @@ static void rd_kafka_cgrp_handle_assign_op (rd_kafka_cgrp_t *rkcg, /* If call succeeded serve the assignment */ if (!error) rd_kafka_assignment_serve(rkcg->rkcg_rk); - - } if (error) { @@ -4975,10 +4790,11 @@ static void rd_kafka_cgrp_handle_assign_op (rd_kafka_cgrp_t *rkcg, * @locality rdkafka main thread * @locks none */ -static rd_kafka_op_res_t -rd_kafka_cgrp_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, rd_kafka_q_cb_type_t cb_type, - void *opaque) { +static rd_kafka_op_res_t rd_kafka_cgrp_op_serve(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) { rd_kafka_cgrp_t *rkcg = opaque; rd_kafka_toppar_t *rktp; rd_kafka_resp_err_t err; @@ -4987,45 +4803,45 @@ rd_kafka_cgrp_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, rktp = rko->rko_rktp; if (rktp && !silent_op) - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPOP", - "Group \"%.*s\" received op %s in state %s " - "(join-state %s) for %.*s [%"PRId32"]", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_kafka_op2str(rko->rko_type), - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[rkcg-> - rkcg_join_state], - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition); + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "CGRPOP", + "Group \"%.*s\" received op %s in state %s " + "(join-state %s) for %.*s [%" PRId32 "]", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_op2str(rko->rko_type), + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); else if (!silent_op) - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPOP", - "Group \"%.*s\" received op %s in state %s " - "(join-state %s)", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_kafka_op2str(rko->rko_type), - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[rkcg-> - rkcg_join_state]); - - switch ((int)rko->rko_type) - { + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "CGRPOP", + "Group \"%.*s\" received op %s in state %s " + "(join-state %s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_op2str(rko->rko_type), + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + + switch ((int)rko->rko_type) { case RD_KAFKA_OP_NAME: /* Return the currently assigned member id. */ if (rkcg->rkcg_member_id) rko->rko_u.name.str = - RD_KAFKAP_STR_DUP(rkcg->rkcg_member_id); + RD_KAFKAP_STR_DUP(rkcg->rkcg_member_id); rd_kafka_op_reply(rko, 0); rko = NULL; break; case RD_KAFKA_OP_CG_METADATA: /* Return the current consumer group metadata. */ - rko->rko_u.cg_metadata = rkcg->rkcg_member_id + rko->rko_u.cg_metadata = + rkcg->rkcg_member_id ? rd_kafka_consumer_group_metadata_new_with_genid( - rkcg->rkcg_rk->rk_conf.group_id_str, - rkcg->rkcg_generation_id, - rkcg->rkcg_member_id->str, - rkcg->rkcg_rk->rk_conf.group_instance_id) + rkcg->rkcg_rk->rk_conf.group_id_str, + rkcg->rkcg_generation_id, + rkcg->rkcg_member_id->str, + rkcg->rkcg_rk->rk_conf.group_instance_id) : NULL; rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); rko = NULL; @@ -5035,19 +4851,17 @@ rd_kafka_cgrp_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)) { rd_kafka_op_handle_OffsetFetch( - rkcg->rkcg_rk, NULL, - RD_KAFKA_RESP_ERR__WAIT_COORD, - NULL, NULL, rko); + rkcg->rkcg_rk, NULL, RD_KAFKA_RESP_ERR__WAIT_COORD, + NULL, NULL, rko); rko = NULL; /* rko freed by handler */ break; } rd_kafka_OffsetFetchRequest( - rkcg->rkcg_coord, - rko->rko_u.offset_fetch.partitions, - rko->rko_u.offset_fetch.require_stable, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_op_handle_OffsetFetch, rko); + rkcg->rkcg_coord, rko->rko_u.offset_fetch.partitions, + rko->rko_u.offset_fetch.require_stable, + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_op_handle_OffsetFetch, rko); rko = NULL; /* rko now owned by request */ break; @@ -5056,8 +4870,7 @@ rd_kafka_cgrp_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, /* If terminating tell the partition to leave */ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) - rd_kafka_toppar_op_fetch_stop( - rktp, RD_KAFKA_NO_REPLYQ); + rd_kafka_toppar_op_fetch_stop(rktp, RD_KAFKA_NO_REPLYQ); break; case RD_KAFKA_OP_PARTITION_LEAVE: @@ -5070,27 +4883,25 @@ rd_kafka_cgrp_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, /* only set offsets * if no partitions were * specified. */ - rko->rko_u.offset_commit. - partitions ? - 0 : 1 /* set_offsets*/, + rko->rko_u.offset_commit.partitions + ? 0 + : 1 /* set_offsets*/, rko->rko_u.offset_commit.reason); rko = NULL; /* rko now owned by request */ break; case RD_KAFKA_OP_COORD_QUERY: - rd_kafka_cgrp_coord_query(rkcg, - rko->rko_err ? - rd_kafka_err2str(rko-> - rko_err): - "from op"); + rd_kafka_cgrp_coord_query( + rkcg, + rko->rko_err ? rd_kafka_err2str(rko->rko_err) : "from op"); break; case RD_KAFKA_OP_SUBSCRIBE: rd_kafka_app_polled(rk); /* New atomic subscription (may be NULL) */ - err = rd_kafka_cgrp_subscribe( - rkcg, rko->rko_u.subscribe.topics); + err = + rd_kafka_cgrp_subscribe(rkcg, rko->rko_u.subscribe.topics); if (!err) /* now owned by rkcg */ rko->rko_u.subscribe.topics = NULL; @@ -5107,14 +4918,14 @@ rd_kafka_cgrp_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, case RD_KAFKA_OP_GET_SUBSCRIPTION: if (rkcg->rkcg_next_subscription) rko->rko_u.subscribe.topics = - rd_kafka_topic_partition_list_copy( - rkcg->rkcg_next_subscription); + rd_kafka_topic_partition_list_copy( + rkcg->rkcg_next_subscription); else if (rkcg->rkcg_next_unsubscribe) rko->rko_u.subscribe.topics = NULL; else if (rkcg->rkcg_subscription) rko->rko_u.subscribe.topics = - rd_kafka_topic_partition_list_copy( - rkcg->rkcg_subscription); + rd_kafka_topic_partition_list_copy( + rkcg->rkcg_subscription); rd_kafka_op_reply(rko, 0); rko = NULL; break; @@ -5122,8 +4933,8 @@ rd_kafka_cgrp_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, case RD_KAFKA_OP_GET_ASSIGNMENT: /* This is the consumer assignment, not the group assignment. */ rko->rko_u.assign.partitions = - rd_kafka_topic_partition_list_copy( - rkcg->rkcg_rk->rk_consumer.assignment.all); + rd_kafka_topic_partition_list_copy( + rkcg->rkcg_rk->rk_consumer.assignment.all); rd_kafka_op_reply(rko, 0); rko = NULL; @@ -5131,8 +4942,8 @@ rd_kafka_cgrp_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, case RD_KAFKA_OP_GET_REBALANCE_PROTOCOL: rko->rko_u.rebalance_protocol.str = - rd_kafka_rebalance_protocol2str( - rd_kafka_cgrp_rebalance_protocol(rkcg)); + rd_kafka_rebalance_protocol2str( + rd_kafka_cgrp_rebalance_protocol(rkcg)); rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); rko = NULL; break; @@ -5158,8 +4969,8 @@ rd_kafka_cgrp_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, * @returns true if the session timeout has expired (due to no successful * Heartbeats in session.timeout.ms) and triggers a rebalance. */ -static rd_bool_t -rd_kafka_cgrp_session_timeout_check (rd_kafka_cgrp_t *rkcg, rd_ts_t now) { +static rd_bool_t rd_kafka_cgrp_session_timeout_check(rd_kafka_cgrp_t *rkcg, + rd_ts_t now) { rd_ts_t delta; char buf[256]; @@ -5174,10 +4985,11 @@ rd_kafka_cgrp_session_timeout_check (rd_kafka_cgrp_t *rkcg, rd_ts_t now) { rd_snprintf(buf, sizeof(buf), "Consumer group session timed out (in join-state %s) after " - "%"PRId64" ms without a successful response from the " - "group coordinator (broker %"PRId32", last error was %s)", + "%" PRId64 + " ms without a successful response from the " + "group coordinator (broker %" PRId32 ", last error was %s)", rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - delta/1000, rkcg->rkcg_coord_id, + delta / 1000, rkcg->rkcg_coord_id, rd_kafka_err2str(rkcg->rkcg_last_heartbeat_err)); rkcg->rkcg_last_heartbeat_err = RD_KAFKA_RESP_ERR_NO_ERROR; @@ -5193,10 +5005,8 @@ rd_kafka_cgrp_session_timeout_check (rd_kafka_cgrp_t *rkcg, rd_ts_t now) { rd_kafka_cgrp_set_member_id(rkcg, ""); /* Revoke and rebalance */ - rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, - rd_true/*lost*/, - rd_true/*initiating*/, - buf); + rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/, + rd_true /*initiating*/, buf); return rd_true; } @@ -5205,38 +5015,39 @@ rd_kafka_cgrp_session_timeout_check (rd_kafka_cgrp_t *rkcg, rd_ts_t now) { /** * @brief Apply the next waiting subscribe/unsubscribe, if any. */ -static void rd_kafka_cgrp_apply_next_subscribe (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_apply_next_subscribe(rd_kafka_cgrp_t *rkcg) { rd_assert(rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT); if (rkcg->rkcg_next_subscription) { rd_kafka_topic_partition_list_t *next_subscription = - rkcg->rkcg_next_subscription; + rkcg->rkcg_next_subscription; rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIBE", "Group \"%s\": invoking waiting postponed " - "subscribe", rkcg->rkcg_group_id->str); + "subscribe", + rkcg->rkcg_group_id->str); rkcg->rkcg_next_subscription = NULL; rd_kafka_cgrp_subscribe(rkcg, next_subscription); } else if (rkcg->rkcg_next_unsubscribe) { rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIBE", "Group \"%s\": invoking waiting postponed " - "unsubscribe", rkcg->rkcg_group_id->str); + "unsubscribe", + rkcg->rkcg_group_id->str); rkcg->rkcg_next_unsubscribe = rd_false; - rd_kafka_cgrp_unsubscribe(rkcg, rd_true/*Leave*/); + rd_kafka_cgrp_unsubscribe(rkcg, rd_true /*Leave*/); } } /** * Client group's join state handling */ -static void rd_kafka_cgrp_join_state_serve (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_join_state_serve(rd_kafka_cgrp_t *rkcg) { rd_ts_t now = rd_clock(); if (unlikely(rd_kafka_fatal_error_code(rkcg->rkcg_rk))) return; - switch (rkcg->rkcg_join_state) - { + switch (rkcg->rkcg_join_state) { case RD_KAFKA_CGRP_JOIN_STATE_INIT: if (unlikely(rd_kafka_cgrp_awaiting_response(rkcg))) break; @@ -5248,8 +5059,8 @@ static void rd_kafka_cgrp_join_state_serve (rd_kafka_cgrp_t *rkcg) { if (!rkcg->rkcg_subscription) break; - if (rd_interval_immediate(&rkcg->rkcg_join_intvl, - 1000*1000, now) > 0) + if (rd_interval_immediate(&rkcg->rkcg_join_intvl, 1000 * 1000, + now) > 0) rd_kafka_cgrp_join(rkcg); break; @@ -5260,46 +5071,46 @@ static void rd_kafka_cgrp_join_state_serve (rd_kafka_cgrp_t *rkcg) { /* FIXME: I think we might have to send heartbeats in * in WAIT_INCR_UNASSIGN, yes-no? */ case RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE: - break; + break; case RD_KAFKA_CGRP_JOIN_STATE_STEADY: case RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL: case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL: if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION && - rd_interval(&rkcg->rkcg_heartbeat_intvl, - rkcg->rkcg_rk->rk_conf. - group_heartbeat_intvl_ms * 1000, now) > 0) + rd_interval( + &rkcg->rkcg_heartbeat_intvl, + rkcg->rkcg_rk->rk_conf.group_heartbeat_intvl_ms * 1000, + now) > 0) rd_kafka_cgrp_heartbeat(rkcg); break; } - } /** * Client group handling. * Called from main thread to serve the operational aspects of a cgrp. */ -void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg) { - rd_kafka_broker_t *rkb = rkcg->rkcg_coord; - int rkb_state = RD_KAFKA_BROKER_STATE_INIT; +void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg) { + rd_kafka_broker_t *rkb = rkcg->rkcg_coord; + int rkb_state = RD_KAFKA_BROKER_STATE_INIT; rd_ts_t now; - if (rkb) { - rd_kafka_broker_lock(rkb); - rkb_state = rkb->rkb_state; - rd_kafka_broker_unlock(rkb); - - /* Go back to querying state if we lost the current coordinator - * connection. */ - if (rkb_state < RD_KAFKA_BROKER_STATE_UP && - rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_UP) - rd_kafka_cgrp_set_state(rkcg, - RD_KAFKA_CGRP_STATE_QUERY_COORD); - } + if (rkb) { + rd_kafka_broker_lock(rkb); + rkb_state = rkb->rkb_state; + rd_kafka_broker_unlock(rkb); + + /* Go back to querying state if we lost the current coordinator + * connection. */ + if (rkb_state < RD_KAFKA_BROKER_STATE_UP && + rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_UP) + rd_kafka_cgrp_set_state( + rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); + } now = rd_clock(); - /* Check for cgrp termination */ - if (unlikely(rd_kafka_cgrp_try_terminate(rkcg))) { + /* Check for cgrp termination */ + if (unlikely(rd_kafka_cgrp_try_terminate(rkcg))) { rd_kafka_cgrp_terminated(rkcg); return; /* cgrp terminated */ } @@ -5313,9 +5124,8 @@ void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg) { if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY) rd_kafka_cgrp_session_timeout_check(rkcg, now); - retry: - switch (rkcg->rkcg_state) - { +retry: + switch (rkcg->rkcg_state) { case RD_KAFKA_CGRP_STATE_TERM: break; @@ -5326,7 +5136,7 @@ void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg) { case RD_KAFKA_CGRP_STATE_QUERY_COORD: /* Query for coordinator. */ if (rd_interval_immediate(&rkcg->rkcg_coord_query_intvl, - 500*1000, now) > 0) + 500 * 1000, now) > 0) rd_kafka_cgrp_coord_query(rkcg, "intervaled in " "state query-coord"); @@ -5343,8 +5153,8 @@ void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg) { * to speed up next transition. */ /* Coordinator query */ - if (rd_interval(&rkcg->rkcg_coord_query_intvl, - 1000*1000, now) > 0) + if (rd_interval(&rkcg->rkcg_coord_query_intvl, 1000 * 1000, + now) > 0) rd_kafka_cgrp_coord_query(rkcg, "intervaled in " "state wait-broker"); @@ -5352,17 +5162,17 @@ void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg) { case RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT: /* Waiting for broker transport to come up. - * Also make sure broker supports groups. */ + * Also make sure broker supports groups. */ if (rkb_state < RD_KAFKA_BROKER_STATE_UP || !rkb || - !rd_kafka_broker_supports( - rkb, RD_KAFKA_FEATURE_BROKER_GROUP_COORD)) { - /* Coordinator query */ - if (rd_interval(&rkcg->rkcg_coord_query_intvl, - 1000*1000, now) > 0) - rd_kafka_cgrp_coord_query( - rkcg, - "intervaled in state " - "wait-broker-transport"); + !rd_kafka_broker_supports( + rkb, RD_KAFKA_FEATURE_BROKER_GROUP_COORD)) { + /* Coordinator query */ + if (rd_interval(&rkcg->rkcg_coord_query_intvl, + 1000 * 1000, now) > 0) + rd_kafka_cgrp_coord_query( + rkcg, + "intervaled in state " + "wait-broker-transport"); } else { rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_UP); @@ -5376,47 +5186,47 @@ void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg) { break; case RD_KAFKA_CGRP_STATE_UP: - /* Move any ops awaiting the coordinator to the ops queue - * for reprocessing. */ - rd_kafka_q_concat(rkcg->rkcg_ops, rkcg->rkcg_wait_coord_q); + /* Move any ops awaiting the coordinator to the ops queue + * for reprocessing. */ + rd_kafka_q_concat(rkcg->rkcg_ops, rkcg->rkcg_wait_coord_q); /* Relaxed coordinator queries. */ if (rd_interval(&rkcg->rkcg_coord_query_intvl, - rkcg->rkcg_rk->rk_conf. - coord_query_intvl_ms * 1000, now) > 0) + rkcg->rkcg_rk->rk_conf.coord_query_intvl_ms * + 1000, + now) > 0) rd_kafka_cgrp_coord_query(rkcg, "intervaled in state up"); rd_kafka_cgrp_join_state_serve(rkcg); break; - } if (unlikely(rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP && - rd_interval(&rkcg->rkcg_timeout_scan_intvl, - 1000*1000, now) > 0)) + rd_interval(&rkcg->rkcg_timeout_scan_intvl, 1000 * 1000, + now) > 0)) rd_kafka_cgrp_timeout_scan(rkcg, now); } - - /** * Send an op to a cgrp. * * Locality: any thread */ -void rd_kafka_cgrp_op (rd_kafka_cgrp_t *rkcg, rd_kafka_toppar_t *rktp, - rd_kafka_replyq_t replyq, rd_kafka_op_type_t type, - rd_kafka_resp_err_t err) { +void rd_kafka_cgrp_op(rd_kafka_cgrp_t *rkcg, + rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq, + rd_kafka_op_type_t type, + rd_kafka_resp_err_t err) { rd_kafka_op_t *rko; - rko = rd_kafka_op_new(type); - rko->rko_err = err; - rko->rko_replyq = replyq; + rko = rd_kafka_op_new(type); + rko->rko_err = err; + rko->rko_replyq = replyq; - if (rktp) + if (rktp) rko->rko_rktp = rd_kafka_toppar_keep(rktp); rd_kafka_q_enq(rkcg->rkcg_ops, rko); @@ -5424,11 +5234,7 @@ void rd_kafka_cgrp_op (rd_kafka_cgrp_t *rkcg, rd_kafka_toppar_t *rktp, - - - - -void rd_kafka_cgrp_set_member_id (rd_kafka_cgrp_t *rkcg, const char *member_id){ +void rd_kafka_cgrp_set_member_id(rd_kafka_cgrp_t *rkcg, const char *member_id) { if (rkcg->rkcg_member_id && member_id && !rd_kafkap_str_cmp_str(rkcg->rkcg_member_id, member_id)) return; /* No change */ @@ -5436,8 +5242,8 @@ void rd_kafka_cgrp_set_member_id (rd_kafka_cgrp_t *rkcg, const char *member_id){ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "MEMBERID", "Group \"%.*s\": updating member id \"%s\" -> \"%s\"", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rkcg->rkcg_member_id ? - rkcg->rkcg_member_id->str : "(not-set)", + rkcg->rkcg_member_id ? rkcg->rkcg_member_id->str + : "(not-set)", member_id ? member_id : "(not-set)"); if (rkcg->rkcg_member_id) { @@ -5455,7 +5261,7 @@ void rd_kafka_cgrp_set_member_id (rd_kafka_cgrp_t *rkcg, const char *member_id){ * deleted or re-created topics). */ static rd_kafka_topic_partition_list_t * -rd_kafka_cgrp_owned_but_not_exist_partitions (rd_kafka_cgrp_t *rkcg) { +rd_kafka_cgrp_owned_but_not_exist_partitions(rd_kafka_cgrp_t *rkcg) { rd_kafka_topic_partition_list_t *result = NULL; const rd_kafka_topic_partition_t *curr; @@ -5463,19 +5269,17 @@ rd_kafka_cgrp_owned_but_not_exist_partitions (rd_kafka_cgrp_t *rkcg) { return NULL; RD_KAFKA_TPLIST_FOREACH(curr, rkcg->rkcg_group_assignment) { - if (rd_list_find(rkcg->rkcg_subscribed_topics, - curr->topic, rd_kafka_topic_info_topic_cmp)) + if (rd_list_find(rkcg->rkcg_subscribed_topics, curr->topic, + rd_kafka_topic_info_topic_cmp)) continue; if (!result) result = rd_kafka_topic_partition_list_new( - rkcg->rkcg_group_assignment->cnt); + rkcg->rkcg_group_assignment->cnt); - rd_kafka_topic_partition_list_add0(__FUNCTION__,__LINE__, - result, - curr->topic, - curr->partition, - curr->_private); + rd_kafka_topic_partition_list_add0( + __FUNCTION__, __LINE__, result, curr->topic, + curr->partition, curr->_private); } return result; @@ -5491,8 +5295,8 @@ rd_kafka_cgrp_owned_but_not_exist_partitions (rd_kafka_cgrp_t *rkcg) { * @locks none * @locality rdkafka main thread */ -void rd_kafka_cgrp_metadata_update_check (rd_kafka_cgrp_t *rkcg, - rd_bool_t do_join) { +void rd_kafka_cgrp_metadata_update_check(rd_kafka_cgrp_t *rkcg, + rd_bool_t do_join) { rd_list_t *tinfos; rd_kafka_topic_partition_list_t *errored; rd_bool_t changed; @@ -5514,14 +5318,11 @@ void rd_kafka_cgrp_metadata_update_check (rd_kafka_cgrp_t *rkcg, (void *)rd_kafka_topic_info_destroy); if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION) - rd_kafka_metadata_topic_match(rkcg->rkcg_rk, - tinfos, rkcg->rkcg_subscription, - errored); + rd_kafka_metadata_topic_match(rkcg->rkcg_rk, tinfos, + rkcg->rkcg_subscription, errored); else - rd_kafka_metadata_topic_filter(rkcg->rkcg_rk, - tinfos, - rkcg->rkcg_subscription, - errored); + rd_kafka_metadata_topic_filter( + rkcg->rkcg_rk, tinfos, rkcg->rkcg_subscription, errored); /* @@ -5529,7 +5330,7 @@ void rd_kafka_cgrp_metadata_update_check (rd_kafka_cgrp_t *rkcg, * The function takes ownership of errored. */ rd_kafka_propagate_consumer_topic_errors( - rkcg, errored, "Subscribed topic not available"); + rkcg, errored, "Subscribed topic not available"); /* * Update effective list of topics (takes ownership of \c tinfos) @@ -5547,7 +5348,7 @@ void rd_kafka_cgrp_metadata_update_check (rd_kafka_cgrp_t *rkcg, /* List of subscribed topics changed, trigger rejoin. */ rd_kafka_dbg(rkcg->rkcg_rk, - CGRP|RD_KAFKA_DBG_METADATA|RD_KAFKA_DBG_CONSUMER, + CGRP | RD_KAFKA_DBG_METADATA | RD_KAFKA_DBG_CONSUMER, "REJOIN", "Group \"%.*s\": " "subscription updated from metadata change: " @@ -5560,25 +5361,23 @@ void rd_kafka_cgrp_metadata_update_check (rd_kafka_cgrp_t *rkcg, /* Partitions from deleted topics */ rd_kafka_topic_partition_list_t *owned_but_not_exist = - rd_kafka_cgrp_owned_but_not_exist_partitions( - rkcg); + rd_kafka_cgrp_owned_but_not_exist_partitions(rkcg); if (owned_but_not_exist) { rd_kafka_cgrp_assignment_set_lost( - rkcg, - "%d subscribed topic(s) no longer exist", - owned_but_not_exist->cnt); + rkcg, "%d subscribed topic(s) no longer exist", + owned_but_not_exist->cnt); rd_kafka_rebalance_op_incr( - rkcg, - RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, - owned_but_not_exist, - rkcg->rkcg_group_leader.members != NULL - /* Rejoin group following revoke's - * unassign if we are leader */, - "topics not available"); + rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + owned_but_not_exist, + rkcg->rkcg_group_leader.members != NULL + /* Rejoin group following revoke's + * unassign if we are leader */ + , + "topics not available"); rd_kafka_topic_partition_list_destroy( - owned_but_not_exist); + owned_but_not_exist); } else { /* Nothing to revoke, rejoin group if we are the @@ -5589,10 +5388,9 @@ void rd_kafka_cgrp_metadata_update_check (rd_kafka_cgrp_t *rkcg, * aren't? * Going against the KIP and rejoining here. */ rd_kafka_cgrp_rejoin( - rkcg, - "Metadata for subscribed topic(s) has " - "changed"); - + rkcg, + "Metadata for subscribed topic(s) has " + "changed"); } } else { @@ -5609,28 +5407,26 @@ void rd_kafka_cgrp_metadata_update_check (rd_kafka_cgrp_t *rkcg, rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata_new (const char *group_id) { +rd_kafka_consumer_group_metadata_new(const char *group_id) { rd_kafka_consumer_group_metadata_t *cgmetadata; - cgmetadata = rd_kafka_consumer_group_metadata_new_with_genid(group_id, - -1, "", - NULL); + cgmetadata = rd_kafka_consumer_group_metadata_new_with_genid( + group_id, -1, "", NULL); return cgmetadata; } rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata_new_with_genid (const char *group_id, - int32_t generation_id, - const char *member_id, - const char - *group_instance_id) { +rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, + int32_t generation_id, + const char *member_id, + const char *group_instance_id) { rd_kafka_consumer_group_metadata_t *cgmetadata; - cgmetadata = rd_calloc(1, sizeof(*cgmetadata)); - cgmetadata->group_id = rd_strdup(group_id); + cgmetadata = rd_calloc(1, sizeof(*cgmetadata)); + cgmetadata->group_id = rd_strdup(group_id); cgmetadata->generation_id = generation_id; - cgmetadata->member_id = rd_strdup(member_id); + cgmetadata->member_id = rd_strdup(member_id); if (group_instance_id) cgmetadata->group_instance_id = rd_strdup(group_instance_id); @@ -5638,7 +5434,7 @@ rd_kafka_consumer_group_metadata_new_with_genid (const char *group_id, } rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata (rd_kafka_t *rk) { +rd_kafka_consumer_group_metadata(rd_kafka_t *rk) { rd_kafka_consumer_group_metadata_t *cgmetadata; rd_kafka_op_t *rko; rd_kafka_cgrp_t *rkcg; @@ -5650,16 +5446,15 @@ rd_kafka_consumer_group_metadata (rd_kafka_t *rk) { if (!rko) return NULL; - cgmetadata = rko->rko_u.cg_metadata; + cgmetadata = rko->rko_u.cg_metadata; rko->rko_u.cg_metadata = NULL; rd_kafka_op_destroy(rko); return cgmetadata; } -void -rd_kafka_consumer_group_metadata_destroy ( - rd_kafka_consumer_group_metadata_t *cgmetadata) { +void rd_kafka_consumer_group_metadata_destroy( + rd_kafka_consumer_group_metadata_t *cgmetadata) { rd_free(cgmetadata->group_id); rd_free(cgmetadata->member_id); if (cgmetadata->group_instance_id) @@ -5667,18 +5462,17 @@ rd_kafka_consumer_group_metadata_destroy ( rd_free(cgmetadata); } -rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata_dup ( - const rd_kafka_consumer_group_metadata_t *cgmetadata) { +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_dup( + const rd_kafka_consumer_group_metadata_t *cgmetadata) { rd_kafka_consumer_group_metadata_t *ret; - ret = rd_calloc(1, sizeof(*cgmetadata)); - ret->group_id = rd_strdup(cgmetadata->group_id); + ret = rd_calloc(1, sizeof(*cgmetadata)); + ret->group_id = rd_strdup(cgmetadata->group_id); ret->generation_id = cgmetadata->generation_id; - ret->member_id = rd_strdup(cgmetadata->member_id); + ret->member_id = rd_strdup(cgmetadata->member_id); if (cgmetadata->group_instance_id) - ret->group_instance_id = rd_strdup( - cgmetadata->group_instance_id); + ret->group_instance_id = + rd_strdup(cgmetadata->group_instance_id); return ret; } @@ -5691,20 +5485,22 @@ rd_kafka_consumer_group_metadata_dup ( */ static const char rd_kafka_consumer_group_metadata_magic[7] = "CGMDv2:"; -rd_kafka_error_t *rd_kafka_consumer_group_metadata_write ( - const rd_kafka_consumer_group_metadata_t *cgmd, - void **bufferp, size_t *sizep) { +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write( + const rd_kafka_consumer_group_metadata_t *cgmd, + void **bufferp, + size_t *sizep) { char *buf; size_t size; - size_t of = 0; - size_t magic_len = sizeof(rd_kafka_consumer_group_metadata_magic); + size_t of = 0; + size_t magic_len = sizeof(rd_kafka_consumer_group_metadata_magic); size_t groupid_len = strlen(cgmd->group_id) + 1; - size_t generationid_len = sizeof(cgmd->generation_id); - size_t member_id_len = strlen(cgmd->member_id) + 1; + size_t generationid_len = sizeof(cgmd->generation_id); + size_t member_id_len = strlen(cgmd->member_id) + 1; int8_t group_instance_id_is_null = cgmd->group_instance_id ? 0 : 1; - size_t group_instance_id_is_null_len = sizeof(group_instance_id_is_null); - size_t group_instance_id_len = cgmd->group_instance_id - ? strlen(cgmd->group_instance_id) + 1 : 0; + size_t group_instance_id_is_null_len = + sizeof(group_instance_id_is_null); + size_t group_instance_id_len = + cgmd->group_instance_id ? strlen(cgmd->group_instance_id) + 1 : 0; size = magic_len + groupid_len + generationid_len + member_id_len + group_instance_id_is_null_len + group_instance_id_len; @@ -5714,26 +5510,28 @@ rd_kafka_error_t *rd_kafka_consumer_group_metadata_write ( memcpy(buf, rd_kafka_consumer_group_metadata_magic, magic_len); of += magic_len; - memcpy(buf+of, &cgmd->generation_id, generationid_len); + memcpy(buf + of, &cgmd->generation_id, generationid_len); of += generationid_len; - memcpy(buf+of, cgmd->group_id, groupid_len); + memcpy(buf + of, cgmd->group_id, groupid_len); of += groupid_len; - memcpy(buf+of, cgmd->member_id, member_id_len); + memcpy(buf + of, cgmd->member_id, member_id_len); of += member_id_len; - memcpy(buf+of, &group_instance_id_is_null, group_instance_id_is_null_len); + memcpy(buf + of, &group_instance_id_is_null, + group_instance_id_is_null_len); of += group_instance_id_is_null_len; if (!group_instance_id_is_null) - memcpy(buf+of, cgmd->group_instance_id, group_instance_id_len); + memcpy(buf + of, cgmd->group_instance_id, + group_instance_id_len); of += group_instance_id_len; rd_assert(of == size); *bufferp = buf; - *sizep = size; + *sizep = size; return NULL; } @@ -5746,16 +5544,17 @@ rd_kafka_error_t *rd_kafka_consumer_group_metadata_write ( **/ static const char *str_is_printable(const char *s, const char *end) { const char *c; - for (c = s ; *c && c != end ; c++) + for (c = s; *c && c != end; c++) if (!isprint((int)*c)) return NULL; return c + 1; } -rd_kafka_error_t *rd_kafka_consumer_group_metadata_read ( - rd_kafka_consumer_group_metadata_t **cgmdp, - const void *buffer, size_t size) { +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read( + rd_kafka_consumer_group_metadata_t **cgmdp, + const void *buffer, + size_t size) { const char *buf = (const char *)buffer; const char *end = buf + size; const char *next; @@ -5775,25 +5574,25 @@ rd_kafka_error_t *rd_kafka_consumer_group_metadata_read ( return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG, "Input buffer is not a serialized " "consumer group metadata object"); - memcpy(&generation_id, buf+magic_len, generationid_len); + memcpy(&generation_id, buf + magic_len, generationid_len); group_id = buf + magic_len + generationid_len; - next = str_is_printable(group_id, end); + next = str_is_printable(group_id, end); if (!next) return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG, "Input buffer group id is not safe"); member_id = next; - next = str_is_printable(member_id, end); + next = str_is_printable(member_id, end); if (!next) return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG, "Input buffer member id is not " "safe"); - group_instance_id_is_null = (int8_t)*(next++); + group_instance_id_is_null = (int8_t) * (next++); if (!group_instance_id_is_null) { group_instance_id = next; - next = str_is_printable(group_instance_id, end); + next = str_is_printable(group_instance_id, end); if (!next) return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG, "Input buffer group " @@ -5805,54 +5604,46 @@ rd_kafka_error_t *rd_kafka_consumer_group_metadata_read ( "Input buffer bad length"); *cgmdp = rd_kafka_consumer_group_metadata_new_with_genid( - group_id, - generation_id, - member_id, - group_instance_id); + group_id, generation_id, member_id, group_instance_id); return NULL; } -static int unittest_consumer_group_metadata_iteration(const char *group_id, - int32_t generation_id, - const char *member_id, - const char *group_instance_id) { +static int +unittest_consumer_group_metadata_iteration(const char *group_id, + int32_t generation_id, + const char *member_id, + const char *group_instance_id) { rd_kafka_consumer_group_metadata_t *cgmd; void *buffer, *buffer2; size_t size, size2; rd_kafka_error_t *error; cgmd = rd_kafka_consumer_group_metadata_new_with_genid( - group_id, - generation_id, - member_id, - group_instance_id); + group_id, generation_id, member_id, group_instance_id); RD_UT_ASSERT(cgmd != NULL, "failed to create metadata"); - error = rd_kafka_consumer_group_metadata_write(cgmd, &buffer, - &size); + error = rd_kafka_consumer_group_metadata_write(cgmd, &buffer, &size); RD_UT_ASSERT(!error, "metadata_write failed: %s", - rd_kafka_error_string(error)); + rd_kafka_error_string(error)); rd_kafka_consumer_group_metadata_destroy(cgmd); - cgmd = NULL; - error = rd_kafka_consumer_group_metadata_read(&cgmd, buffer, - size); + cgmd = NULL; + error = rd_kafka_consumer_group_metadata_read(&cgmd, buffer, size); RD_UT_ASSERT(!error, "metadata_read failed: %s", - rd_kafka_error_string(error)); + rd_kafka_error_string(error)); /* Serialize again and compare buffers */ - error = rd_kafka_consumer_group_metadata_write(cgmd, &buffer2, - &size2); + error = rd_kafka_consumer_group_metadata_write(cgmd, &buffer2, &size2); RD_UT_ASSERT(!error, "metadata_write failed: %s", - rd_kafka_error_string(error)); + rd_kafka_error_string(error)); RD_UT_ASSERT(size == size2 && !memcmp(buffer, buffer2, size), - "metadata_read/write size or content mismatch: " - "size %"PRIusz", size2 %"PRIusz, - size, size2); + "metadata_read/write size or content mismatch: " + "size %" PRIusz ", size2 %" PRIusz, + size, size2); rd_kafka_consumer_group_metadata_destroy(cgmd); rd_free(buffer); @@ -5862,14 +5653,14 @@ static int unittest_consumer_group_metadata_iteration(const char *group_id, } -static int unittest_consumer_group_metadata (void) { +static int unittest_consumer_group_metadata(void) { const char *ids[] = { - "mY. random id:.", - "0", - "2222222222222222222222221111111111111111111111111111112222", - "", - "NULL", - NULL, + "mY. random id:.", + "0", + "2222222222222222222222221111111111111111111111111111112222", + "", + "NULL", + NULL, }; int i, j, k, gen_id; int ret; @@ -5877,21 +5668,20 @@ static int unittest_consumer_group_metadata (void) { const char *member_id; const char *group_instance_id; - for (i = 0 ; ids[i] ; i++) { - for (j = 0; ids[j] ; j++) { + for (i = 0; ids[i]; i++) { + for (j = 0; ids[j]; j++) { for (k = 0; ids[k]; k++) { - for (gen_id = -1; gen_id<1; gen_id++) { - group_id = ids[i]; - member_id = ids[j]; + for (gen_id = -1; gen_id < 1; gen_id++) { + group_id = ids[i]; + member_id = ids[j]; group_instance_id = ids[k]; - if (strcmp(group_instance_id, - "NULL") == 0) + if (strcmp(group_instance_id, "NULL") == + 0) group_instance_id = NULL; - ret = unittest_consumer_group_metadata_iteration( - group_id, - gen_id, - member_id, - group_instance_id); + ret = + unittest_consumer_group_metadata_iteration( + group_id, gen_id, member_id, + group_instance_id); if (ret) return ret; } @@ -5903,12 +5693,12 @@ static int unittest_consumer_group_metadata (void) { } -static int unittest_set_intersect (void) { +static int unittest_set_intersect(void) { size_t par_cnt = 10; map_toppar_member_info_t *dst; rd_kafka_topic_partition_t *toppar; PartitionMemberInfo_t *v; - char *id = "id"; + char *id = "id"; rd_kafkap_str_t id1 = RD_KAFKAP_STR_INITIALIZER; rd_kafkap_str_t id2 = RD_KAFKAP_STR_INITIALIZER; rd_kafka_group_member_t *gm1; @@ -5920,51 +5710,42 @@ static int unittest_set_intersect (void) { id2.str = id; map_toppar_member_info_t a = RD_MAP_INITIALIZER( - par_cnt, - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - rd_kafka_topic_partition_destroy_free, - PartitionMemberInfo_free); + par_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free); map_toppar_member_info_t b = RD_MAP_INITIALIZER( - par_cnt, - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - rd_kafka_topic_partition_destroy_free, - PartitionMemberInfo_free); - - gm1 = rd_calloc(1, sizeof(*gm1)); - gm1->rkgm_member_id = &id1; + par_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free); + + gm1 = rd_calloc(1, sizeof(*gm1)); + gm1->rkgm_member_id = &id1; gm1->rkgm_group_instance_id = &id1; - gm2 = rd_calloc(1, sizeof(*gm2)); - gm2->rkgm_member_id = &id2; + gm2 = rd_calloc(1, sizeof(*gm2)); + gm2->rkgm_member_id = &id2; gm2->rkgm_group_instance_id = &id2; - RD_MAP_SET(&a, - rd_kafka_topic_partition_new("t1", 4), + RD_MAP_SET(&a, rd_kafka_topic_partition_new("t1", 4), PartitionMemberInfo_new(gm1, rd_false)); - RD_MAP_SET(&a, - rd_kafka_topic_partition_new("t2", 4), + RD_MAP_SET(&a, rd_kafka_topic_partition_new("t2", 4), PartitionMemberInfo_new(gm1, rd_false)); - RD_MAP_SET(&a, - rd_kafka_topic_partition_new("t1", 7), + RD_MAP_SET(&a, rd_kafka_topic_partition_new("t1", 7), PartitionMemberInfo_new(gm1, rd_false)); - RD_MAP_SET(&b, - rd_kafka_topic_partition_new("t2", 7), + RD_MAP_SET(&b, rd_kafka_topic_partition_new("t2", 7), PartitionMemberInfo_new(gm1, rd_false)); - RD_MAP_SET(&b, - rd_kafka_topic_partition_new("t1", 4), + RD_MAP_SET(&b, rd_kafka_topic_partition_new("t1", 4), PartitionMemberInfo_new(gm2, rd_false)); dst = rd_kafka_member_partitions_intersect(&a, &b); - RD_UT_ASSERT(RD_MAP_CNT(&a) == 3, - "expected a cnt to be 3 not %d", (int)RD_MAP_CNT(&a)); - RD_UT_ASSERT(RD_MAP_CNT(&b) == 2, - "expected b cnt to be 2 not %d", (int)RD_MAP_CNT(&b)); - RD_UT_ASSERT(RD_MAP_CNT(dst) == 1, - "expected dst cnt to be 1 not %d", (int)RD_MAP_CNT(dst)); + RD_UT_ASSERT(RD_MAP_CNT(&a) == 3, "expected a cnt to be 3 not %d", + (int)RD_MAP_CNT(&a)); + RD_UT_ASSERT(RD_MAP_CNT(&b) == 2, "expected b cnt to be 2 not %d", + (int)RD_MAP_CNT(&b)); + RD_UT_ASSERT(RD_MAP_CNT(dst) == 1, "expected dst cnt to be 1 not %d", + (int)RD_MAP_CNT(dst)); toppar = rd_kafka_topic_partition_new("t1", 4); RD_UT_ASSERT((v = RD_MAP_GET(dst, toppar)), "unexpected element"); @@ -5983,50 +5764,41 @@ static int unittest_set_intersect (void) { } -static int unittest_set_subtract (void) { +static int unittest_set_subtract(void) { size_t par_cnt = 10; rd_kafka_topic_partition_t *toppar; map_toppar_member_info_t *dst; map_toppar_member_info_t a = RD_MAP_INITIALIZER( - par_cnt, - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - rd_kafka_topic_partition_destroy_free, - PartitionMemberInfo_free); + par_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free); map_toppar_member_info_t b = RD_MAP_INITIALIZER( - par_cnt, - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - rd_kafka_topic_partition_destroy_free, - PartitionMemberInfo_free); - - RD_MAP_SET(&a, - rd_kafka_topic_partition_new("t1", 4), + par_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free); + + RD_MAP_SET(&a, rd_kafka_topic_partition_new("t1", 4), PartitionMemberInfo_new(NULL, rd_false)); - RD_MAP_SET(&a, - rd_kafka_topic_partition_new("t2", 7), + RD_MAP_SET(&a, rd_kafka_topic_partition_new("t2", 7), PartitionMemberInfo_new(NULL, rd_false)); - RD_MAP_SET(&b, - rd_kafka_topic_partition_new("t2", 4), + RD_MAP_SET(&b, rd_kafka_topic_partition_new("t2", 4), PartitionMemberInfo_new(NULL, rd_false)); - RD_MAP_SET(&b, - rd_kafka_topic_partition_new("t1", 4), + RD_MAP_SET(&b, rd_kafka_topic_partition_new("t1", 4), PartitionMemberInfo_new(NULL, rd_false)); - RD_MAP_SET(&b, - rd_kafka_topic_partition_new("t1", 7), + RD_MAP_SET(&b, rd_kafka_topic_partition_new("t1", 7), PartitionMemberInfo_new(NULL, rd_false)); dst = rd_kafka_member_partitions_subtract(&a, &b); - RD_UT_ASSERT(RD_MAP_CNT(&a) == 2, - "expected a cnt to be 2 not %d", (int)RD_MAP_CNT(&a)); - RD_UT_ASSERT(RD_MAP_CNT(&b) == 3, - "expected b cnt to be 3 not %d", (int)RD_MAP_CNT(&b)); - RD_UT_ASSERT(RD_MAP_CNT(dst) == 1, - "expected dst cnt to be 1 not %d", (int)RD_MAP_CNT(dst)); + RD_UT_ASSERT(RD_MAP_CNT(&a) == 2, "expected a cnt to be 2 not %d", + (int)RD_MAP_CNT(&a)); + RD_UT_ASSERT(RD_MAP_CNT(&b) == 3, "expected b cnt to be 3 not %d", + (int)RD_MAP_CNT(&b)); + RD_UT_ASSERT(RD_MAP_CNT(dst) == 1, "expected dst cnt to be 1 not %d", + (int)RD_MAP_CNT(dst)); toppar = rd_kafka_topic_partition_new("t2", 7); RD_UT_ASSERT(RD_MAP_GET(dst, toppar), "unexpected element"); @@ -6041,24 +5813,20 @@ static int unittest_set_subtract (void) { } -static int unittest_map_to_list (void) { +static int unittest_map_to_list(void) { rd_kafka_topic_partition_list_t *list; map_toppar_member_info_t map = RD_MAP_INITIALIZER( - 10, - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - rd_kafka_topic_partition_destroy_free, - PartitionMemberInfo_free); - - RD_MAP_SET(&map, - rd_kafka_topic_partition_new("t1", 101), + 10, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free); + + RD_MAP_SET(&map, rd_kafka_topic_partition_new("t1", 101), PartitionMemberInfo_new(NULL, rd_false)); list = rd_kafka_toppar_member_info_map_to_list(&map); - RD_UT_ASSERT(list->cnt == 1, - "expecting list size of 1 not %d.", list->cnt); + RD_UT_ASSERT(list->cnt == 1, "expecting list size of 1 not %d.", + list->cnt); RD_UT_ASSERT(list->elems[0].partition == 101, "expecting partition 101 not %d", list->elems[0].partition); @@ -6072,19 +5840,19 @@ static int unittest_map_to_list (void) { } -static int unittest_list_to_map (void) { +static int unittest_list_to_map(void) { rd_kafka_topic_partition_t *toppar; map_toppar_member_info_t *map; rd_kafka_topic_partition_list_t *list = - rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(list, "topic1", 201); rd_kafka_topic_partition_list_add(list, "topic2", 202); map = rd_kafka_toppar_list_to_toppar_member_info_map(list); - RD_UT_ASSERT(RD_MAP_CNT(map) == 2, - "expected map cnt to be 2 not %d", (int)RD_MAP_CNT(map)); + RD_UT_ASSERT(RD_MAP_CNT(map) == 2, "expected map cnt to be 2 not %d", + (int)RD_MAP_CNT(map)); toppar = rd_kafka_topic_partition_new("topic1", 201); RD_UT_ASSERT(RD_MAP_GET(map, toppar), "expected topic1 [201] to exist in map"); @@ -6105,7 +5873,7 @@ static int unittest_list_to_map (void) { /** * @brief Consumer group unit tests */ -int unittest_cgrp (void) { +int unittest_cgrp(void) { int fails = 0; fails += unittest_consumer_group_metadata(); diff --git a/src/rdkafka_cgrp.h b/src/rdkafka_cgrp.h index 783b3ef802..b1d09de343 100644 --- a/src/rdkafka_cgrp.h +++ b/src/rdkafka_cgrp.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2015, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -52,10 +52,10 @@ extern const char *rd_kafka_cgrp_join_state_names[]; * Client group */ typedef struct rd_kafka_cgrp_s { - const rd_kafkap_str_t *rkcg_group_id; - rd_kafkap_str_t *rkcg_member_id; /* Last assigned MemberId */ - rd_kafkap_str_t *rkcg_group_instance_id; - const rd_kafkap_str_t *rkcg_client_id; + const rd_kafkap_str_t *rkcg_group_id; + rd_kafkap_str_t *rkcg_member_id; /* Last assigned MemberId */ + rd_kafkap_str_t *rkcg_group_instance_id; + const rd_kafkap_str_t *rkcg_client_id; enum { /* Init state */ @@ -79,8 +79,8 @@ typedef struct rd_kafka_cgrp_s { /* Coordinator is up and manager is assigned. */ RD_KAFKA_CGRP_STATE_UP, } rkcg_state; - rd_ts_t rkcg_ts_statechange; /* Timestamp of last - * state change. */ + rd_ts_t rkcg_ts_statechange; /* Timestamp of last + * state change. */ enum { @@ -121,87 +121,93 @@ typedef struct rd_kafka_cgrp_s { int member_cnt; } rkcg_group_leader; - rd_kafka_q_t *rkcg_q; /* Application poll queue */ - rd_kafka_q_t *rkcg_ops; /* Manager ops queue */ - rd_kafka_q_t *rkcg_wait_coord_q; /* Ops awaiting coord */ - int rkcg_flags; -#define RD_KAFKA_CGRP_F_TERMINATE 0x1 /* Terminate cgrp (async) */ -#define RD_KAFKA_CGRP_F_TERMINATED 0x2 /* Cgrp terminated */ -#define RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE 0x8 /* Send LeaveGroup when - * unassign is done */ -#define RD_KAFKA_CGRP_F_SUBSCRIPTION 0x10 /* If set: - * subscription - * else: - * static assignment */ -#define RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT 0x20 /* A Heartbeat request - * is in transit, dont - * send a new one. */ -#define RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION 0x40 /* Subscription contains - * wildcards. */ -#define RD_KAFKA_CGRP_F_WAIT_LEAVE 0x80 /* Wait for LeaveGroup - * to be sent. - * This is used to stall - * termination until - * the LeaveGroupRequest - * is responded to, - * otherwise it risks - * being dropped in the - * output queue when - * the broker is destroyed. - */ -#define RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED 0x100 /**< max.poll.interval.ms - * was exceeded and we - * left the group. - * Do not rejoin until - * the application has - * polled again. */ - - rd_interval_t rkcg_coord_query_intvl; /* Coordinator query intvl*/ - rd_interval_t rkcg_heartbeat_intvl; /* Heartbeat intvl */ - rd_interval_t rkcg_join_intvl; /* JoinGroup interval */ - rd_interval_t rkcg_timeout_scan_intvl; /* Timeout scanner */ - - rd_ts_t rkcg_ts_session_timeout; /**< Absolute session - * timeout enforced by - * the consumer, this - * value is updated on - * Heartbeat success, - * etc. */ + rd_kafka_q_t *rkcg_q; /* Application poll queue */ + rd_kafka_q_t *rkcg_ops; /* Manager ops queue */ + rd_kafka_q_t *rkcg_wait_coord_q; /* Ops awaiting coord */ + int rkcg_flags; +#define RD_KAFKA_CGRP_F_TERMINATE 0x1 /* Terminate cgrp (async) */ +#define RD_KAFKA_CGRP_F_TERMINATED 0x2 /* Cgrp terminated */ +#define RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE \ + 0x8 /* Send LeaveGroup when \ + * unassign is done */ +#define RD_KAFKA_CGRP_F_SUBSCRIPTION \ + 0x10 /* If set: \ + * subscription \ + * else: \ + * static assignment */ +#define RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT \ + 0x20 /* A Heartbeat request \ + * is in transit, dont \ + * send a new one. */ +#define RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION \ + 0x40 /* Subscription contains \ + * wildcards. */ +#define RD_KAFKA_CGRP_F_WAIT_LEAVE \ + 0x80 /* Wait for LeaveGroup \ + * to be sent. \ + * This is used to stall \ + * termination until \ + * the LeaveGroupRequest \ + * is responded to, \ + * otherwise it risks \ + * being dropped in the \ + * output queue when \ + * the broker is destroyed. \ + */ +#define RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED \ + 0x100 /**< max.poll.interval.ms \ + * was exceeded and we \ + * left the group. \ + * Do not rejoin until \ + * the application has \ + * polled again. */ + + rd_interval_t rkcg_coord_query_intvl; /* Coordinator query intvl*/ + rd_interval_t rkcg_heartbeat_intvl; /* Heartbeat intvl */ + rd_interval_t rkcg_join_intvl; /* JoinGroup interval */ + rd_interval_t rkcg_timeout_scan_intvl; /* Timeout scanner */ + + rd_ts_t rkcg_ts_session_timeout; /**< Absolute session + * timeout enforced by + * the consumer, this + * value is updated on + * Heartbeat success, + * etc. */ rd_kafka_resp_err_t rkcg_last_heartbeat_err; /**< Last Heartbeat error, * used for logging. */ - TAILQ_HEAD(, rd_kafka_topic_s) rkcg_topics;/* Topics subscribed to */ + TAILQ_HEAD(, rd_kafka_topic_s) rkcg_topics; /* Topics subscribed to */ - rd_list_t rkcg_toppars; /* Toppars subscribed to*/ + rd_list_t rkcg_toppars; /* Toppars subscribed to*/ - int32_t rkcg_generation_id; /* Current generation id */ + int32_t rkcg_generation_id; /* Current generation id */ - rd_kafka_assignor_t *rkcg_assignor; /**< The current partition - * assignor. used by both - * leader and members. */ - void *rkcg_assignor_state; /**< current partition - * assignor state */ + rd_kafka_assignor_t *rkcg_assignor; /**< The current partition + * assignor. used by both + * leader and members. */ + void *rkcg_assignor_state; /**< current partition + * assignor state */ - int32_t rkcg_coord_id; /**< Current coordinator id, - * or -1 if not known. */ + int32_t rkcg_coord_id; /**< Current coordinator id, + * or -1 if not known. */ - rd_kafka_broker_t *rkcg_curr_coord; /**< Current coordinator - * broker handle, or NULL. - * rkcg_coord's nodename is - * updated to this broker's - * nodename when there is a - * coordinator change. */ - rd_kafka_broker_t *rkcg_coord; /**< The dedicated coordinator - * broker handle. - * Will be updated when the - * coordinator changes. */ + rd_kafka_broker_t *rkcg_curr_coord; /**< Current coordinator + * broker handle, or NULL. + * rkcg_coord's nodename is + * updated to this broker's + * nodename when there is a + * coordinator change. */ + rd_kafka_broker_t *rkcg_coord; /**< The dedicated coordinator + * broker handle. + * Will be updated when the + * coordinator changes. */ - int16_t rkcg_wait_resp; /**< Awaiting response for this - * ApiKey. - * Makes sure only one - * JoinGroup or SyncGroup - * request is outstanding. - * Unset value is -1. */ + int16_t rkcg_wait_resp; /**< Awaiting response for this + * ApiKey. + * Makes sure only one + * JoinGroup or SyncGroup + * request is outstanding. + * Unset value is -1. */ /** Current subscription */ rd_kafka_topic_partition_list_t *rkcg_subscription; @@ -254,119 +260,117 @@ typedef struct rd_kafka_cgrp_s { * incremental unassign. */ rd_bool_t rkcg_rebalance_rejoin; - rd_kafka_resp_err_t rkcg_last_err; /* Last error propagated to - * application. - * This is for silencing - * same errors. */ + rd_kafka_resp_err_t rkcg_last_err; /* Last error propagated to + * application. + * This is for silencing + * same errors. */ - rd_kafka_timer_t rkcg_offset_commit_tmr; /* Offset commit timer */ - rd_kafka_timer_t rkcg_max_poll_interval_tmr; /**< Enforce the max - * poll interval. */ + rd_kafka_timer_t rkcg_offset_commit_tmr; /* Offset commit timer */ + rd_kafka_timer_t rkcg_max_poll_interval_tmr; /**< Enforce the max + * poll interval. */ - rd_kafka_t *rkcg_rk; + rd_kafka_t *rkcg_rk; - rd_kafka_op_t *rkcg_reply_rko; /* Send reply for op - * (OP_TERMINATE) - * to this rko's queue. */ + rd_kafka_op_t *rkcg_reply_rko; /* Send reply for op + * (OP_TERMINATE) + * to this rko's queue. */ - rd_ts_t rkcg_ts_terminate; /* Timestamp of when - * cgrp termination was - * initiated. */ + rd_ts_t rkcg_ts_terminate; /* Timestamp of when + * cgrp termination was + * initiated. */ /* Protected by rd_kafka_*lock() */ struct { - rd_ts_t ts_rebalance; /* Timestamp of - * last rebalance */ - int rebalance_cnt; /* Number of - rebalances */ - char rebalance_reason[256]; /**< Last rebalance - * reason */ - int assignment_size; /* Partition count - * of last rebalance - * assignment */ + rd_ts_t ts_rebalance; /* Timestamp of + * last rebalance */ + int rebalance_cnt; /* Number of + rebalances */ + char rebalance_reason[256]; /**< Last rebalance + * reason */ + int assignment_size; /* Partition count + * of last rebalance + * assignment */ } rkcg_c; } rd_kafka_cgrp_t; - - /* Check if broker is the coordinator */ -#define RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg,rkb) \ - ((rkcg)->rkcg_coord_id != -1 && \ +#define RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg, rkb) \ + ((rkcg)->rkcg_coord_id != -1 && \ (rkcg)->rkcg_coord_id == (rkb)->rkb_nodeid) /** * @returns true if cgrp is using static group membership */ -#define RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg) \ +#define RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg) \ !RD_KAFKAP_STR_IS_NULL((rkcg)->rkcg_group_instance_id) extern const char *rd_kafka_cgrp_state_names[]; extern const char *rd_kafka_cgrp_join_state_names[]; -void rd_kafka_cgrp_destroy_final (rd_kafka_cgrp_t *rkcg); -rd_kafka_cgrp_t *rd_kafka_cgrp_new (rd_kafka_t *rk, - const rd_kafkap_str_t *group_id, - const rd_kafkap_str_t *client_id); -void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg); +void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg); +rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk, + const rd_kafkap_str_t *group_id, + const rd_kafkap_str_t *client_id); +void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg); -void rd_kafka_cgrp_op (rd_kafka_cgrp_t *rkcg, rd_kafka_toppar_t *rktp, - rd_kafka_replyq_t replyq, rd_kafka_op_type_t type, - rd_kafka_resp_err_t err); -void rd_kafka_cgrp_terminate0 (rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko); -void rd_kafka_cgrp_terminate (rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq); +void rd_kafka_cgrp_op(rd_kafka_cgrp_t *rkcg, + rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq, + rd_kafka_op_type_t type, + rd_kafka_resp_err_t err); +void rd_kafka_cgrp_terminate0(rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko); +void rd_kafka_cgrp_terminate(rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq); -rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_del (rd_kafka_cgrp_t *rkcg, - const char *pattern); -rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_add (rd_kafka_cgrp_t *rkcg, - const char *pattern); +rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_del(rd_kafka_cgrp_t *rkcg, + const char *pattern); +rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_add(rd_kafka_cgrp_t *rkcg, + const char *pattern); -int rd_kafka_cgrp_topic_check (rd_kafka_cgrp_t *rkcg, const char *topic); +int rd_kafka_cgrp_topic_check(rd_kafka_cgrp_t *rkcg, const char *topic); -void rd_kafka_cgrp_set_member_id (rd_kafka_cgrp_t *rkcg, const char *member_id); +void rd_kafka_cgrp_set_member_id(rd_kafka_cgrp_t *rkcg, const char *member_id); -void rd_kafka_cgrp_set_join_state (rd_kafka_cgrp_t *rkcg, int join_state); +void rd_kafka_cgrp_set_join_state(rd_kafka_cgrp_t *rkcg, int join_state); -rd_kafka_broker_t *rd_kafka_cgrp_get_coord (rd_kafka_cgrp_t *rkcg); -void rd_kafka_cgrp_coord_query (rd_kafka_cgrp_t *rkcg, - const char *reason); -void rd_kafka_cgrp_coord_dead (rd_kafka_cgrp_t *rkcg, rd_kafka_resp_err_t err, - const char *reason); -void rd_kafka_cgrp_metadata_update_check (rd_kafka_cgrp_t *rkcg, - rd_bool_t do_join); +rd_kafka_broker_t *rd_kafka_cgrp_get_coord(rd_kafka_cgrp_t *rkcg); +void rd_kafka_cgrp_coord_query(rd_kafka_cgrp_t *rkcg, const char *reason); +void rd_kafka_cgrp_coord_dead(rd_kafka_cgrp_t *rkcg, + rd_kafka_resp_err_t err, + const char *reason); +void rd_kafka_cgrp_metadata_update_check(rd_kafka_cgrp_t *rkcg, + rd_bool_t do_join); #define rd_kafka_cgrp_get(rk) ((rk)->rk_cgrp) -void -rd_kafka_cgrp_assigned_offsets_commit (rd_kafka_cgrp_t *rkcg, - const rd_kafka_topic_partition_list_t - *offsets, rd_bool_t set_offsets, - const char *reason); +void rd_kafka_cgrp_assigned_offsets_commit( + rd_kafka_cgrp_t *rkcg, + const rd_kafka_topic_partition_list_t *offsets, + rd_bool_t set_offsets, + const char *reason); -void rd_kafka_cgrp_assignment_done (rd_kafka_cgrp_t *rkcg); +void rd_kafka_cgrp_assignment_done(rd_kafka_cgrp_t *rkcg); -rd_bool_t rd_kafka_cgrp_assignment_is_lost (rd_kafka_cgrp_t *rkcg); +rd_bool_t rd_kafka_cgrp_assignment_is_lost(rd_kafka_cgrp_t *rkcg); struct rd_kafka_consumer_group_metadata_s { char *group_id; int32_t generation_id; char *member_id; - char *group_instance_id; /**< Optional (NULL) */ + char *group_instance_id; /**< Optional (NULL) */ }; -rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata_dup ( - const rd_kafka_consumer_group_metadata_t *cgmetadata); +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_dup( + const rd_kafka_consumer_group_metadata_t *cgmetadata); static RD_UNUSED const char * -rd_kafka_rebalance_protocol2str (rd_kafka_rebalance_protocol_t protocol) { - switch (protocol) - { +rd_kafka_rebalance_protocol2str(rd_kafka_rebalance_protocol_t protocol) { + switch (protocol) { case RD_KAFKA_REBALANCE_PROTOCOL_EAGER: return "EAGER"; case RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE: diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c index 35592cf2c0..0172cfa766 100644 --- a/src/rdkafka_conf.c +++ b/src/rdkafka_conf.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012,2013 Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -56,64 +56,71 @@ #endif struct rd_kafka_property { - rd_kafka_conf_scope_t scope; - const char *name; - enum { - _RK_C_STR, - _RK_C_INT, - _RK_C_DBL, /* Double */ - _RK_C_S2I, /* String to Integer mapping. - * Supports limited canonical str->int mappings - * using s2i[] */ - _RK_C_S2F, /* CSV String to Integer flag mapping (OR:ed) */ - _RK_C_BOOL, - _RK_C_PTR, /* Only settable through special set functions */ - _RK_C_PATLIST, /* Pattern list */ - _RK_C_KSTR, /* Kafka string */ - _RK_C_ALIAS, /* Alias: points to other property through .sdef */ - _RK_C_INTERNAL, /* Internal, don't expose to application */ - _RK_C_INVALID, /* Invalid property, used to catch known - * but unsupported Java properties. */ - } type; - int offset; - const char *desc; - int vmin; - int vmax; - int vdef; /* Default value (int) */ - const char *sdef; /* Default value (string) */ - void *pdef; /* Default value (pointer) */ - double ddef; /* Default value (double) */ + rd_kafka_conf_scope_t scope; + const char *name; + enum { _RK_C_STR, + _RK_C_INT, + _RK_C_DBL, /* Double */ + _RK_C_S2I, /* String to Integer mapping. + * Supports limited canonical str->int mappings + * using s2i[] */ + _RK_C_S2F, /* CSV String to Integer flag mapping (OR:ed) */ + _RK_C_BOOL, + _RK_C_PTR, /* Only settable through special set functions */ + _RK_C_PATLIST, /* Pattern list */ + _RK_C_KSTR, /* Kafka string */ + _RK_C_ALIAS, /* Alias: points to other property through .sdef */ + _RK_C_INTERNAL, /* Internal, don't expose to application */ + _RK_C_INVALID, /* Invalid property, used to catch known + * but unsupported Java properties. */ + } type; + int offset; + const char *desc; + int vmin; + int vmax; + int vdef; /* Default value (int) */ + const char *sdef; /* Default value (string) */ + void *pdef; /* Default value (pointer) */ + double ddef; /* Default value (double) */ double dmin; double dmax; - struct { - int val; - const char *str; + struct { + int val; + const char *str; const char *unsupported; /**< Reason for value not being * supported in this build. */ - } s2i[20]; /* _RK_C_S2I and _RK_C_S2F */ + } s2i[20]; /* _RK_C_S2I and _RK_C_S2F */ const char *unsupported; /**< Reason for propery not being supported * in this build. * Will be included in the conf_set() * error string. */ - /* Value validator (STR) */ - int (*validate) (const struct rd_kafka_property *prop, - const char *val, int ival); + /* Value validator (STR) */ + int (*validate)(const struct rd_kafka_property *prop, + const char *val, + int ival); /* Configuration object constructors and destructor for use when * the property value itself is not used, or needs extra care. */ - void (*ctor) (int scope, void *pconf); - void (*dtor) (int scope, void *pconf); - void (*copy) (int scope, void *pdst, const void *psrc, - void *dstptr, const void *srcptr, - size_t filter_cnt, const char **filter); - - rd_kafka_conf_res_t (*set) (int scope, void *pconf, - const char *name, const char *value, - void *dstptr, - rd_kafka_conf_set_mode_t set_mode, - char *errstr, size_t errstr_size); + void (*ctor)(int scope, void *pconf); + void (*dtor)(int scope, void *pconf); + void (*copy)(int scope, + void *pdst, + const void *psrc, + void *dstptr, + const void *srcptr, + size_t filter_cnt, + const char **filter); + + rd_kafka_conf_res_t (*set)(int scope, + void *pconf, + const char *name, + const char *value, + void *dstptr, + rd_kafka_conf_set_mode_t set_mode, + char *errstr, + size_t errstr_size); }; @@ -126,18 +133,20 @@ struct rd_kafka_property { #define _UNSUPPORTED_SSL .unsupported = "OpenSSL not available at build time" #endif -#if OPENSSL_VERSION_NUMBER >= 0x1000200fL && defined(WITH_SSL) && !defined(LIBRESSL_VERSION_NUMBER) +#if OPENSSL_VERSION_NUMBER >= 0x1000200fL && defined(WITH_SSL) && \ + !defined(LIBRESSL_VERSION_NUMBER) #define _UNSUPPORTED_OPENSSL_1_0_2 .unsupported = NULL #else -#define _UNSUPPORTED_OPENSSL_1_0_2 .unsupported = \ - "OpenSSL >= 1.0.2 not available at build time" +#define _UNSUPPORTED_OPENSSL_1_0_2 \ + .unsupported = "OpenSSL >= 1.0.2 not available at build time" #endif -#if OPENSSL_VERSION_NUMBER >= 0x10100000 && defined(WITH_SSL) && !defined(LIBRESSL_VERSION_NUMBER) +#if OPENSSL_VERSION_NUMBER >= 0x10100000 && defined(WITH_SSL) && \ + !defined(LIBRESSL_VERSION_NUMBER) #define _UNSUPPORTED_OPENSSL_1_1_0 .unsupported = NULL #else -#define _UNSUPPORTED_OPENSSL_1_1_0 .unsupported = \ - "OpenSSL >= 1.1.0 not available at build time" +#define _UNSUPPORTED_OPENSSL_1_1_0 \ + .unsupported = "OpenSSL >= 1.1.0 not available at build time" #endif @@ -164,34 +173,37 @@ struct rd_kafka_property { #define _UNSUPPORTED_OIDC .unsupported = NULL #else #define _UNSUPPORTED_HTTP .unsupported = "libcurl not available at build time" -#define _UNSUPPORTED_OIDC .unsupported = \ - "OAuth/OIDC depends on libcurl which was not available " \ - "at build time" +#define _UNSUPPORTED_OIDC \ + .unsupported = \ + "OAuth/OIDC depends on libcurl which was not available " \ + "at build time" #endif #ifdef _WIN32 -#define _UNSUPPORTED_WIN32_GSSAPI .unsupported = \ - "Kerberos keytabs are not supported on Windows, " \ - "instead the logged on " \ - "user's credentials are used through native SSPI" +#define _UNSUPPORTED_WIN32_GSSAPI \ + .unsupported = \ + "Kerberos keytabs are not supported on Windows, " \ + "instead the logged on " \ + "user's credentials are used through native SSPI" #else - #define _UNSUPPORTED_WIN32_GSSAPI .unsupported = NULL +#define _UNSUPPORTED_WIN32_GSSAPI .unsupported = NULL #endif #if defined(_WIN32) || defined(WITH_SASL_CYRUS) #define _UNSUPPORTED_GSSAPI .unsupported = NULL #else -#define _UNSUPPORTED_GSSAPI .unsupported = \ - "cyrus-sasl/libsasl2 not available at build time" +#define _UNSUPPORTED_GSSAPI \ + .unsupported = "cyrus-sasl/libsasl2 not available at build time" #endif #define _UNSUPPORTED_OAUTHBEARER _UNSUPPORTED_SSL static rd_kafka_conf_res_t -rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, - char *dest, size_t *dest_size); - +rd_kafka_anyconf_get0(const void *conf, + const struct rd_kafka_property *prop, + char *dest, + size_t *dest_size); @@ -199,7 +211,7 @@ rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, * @returns a unique index for property \p prop, using the byte position * of the field. */ -static RD_INLINE int rd_kafka_prop2idx (const struct rd_kafka_property *prop) { +static RD_INLINE int rd_kafka_prop2idx(const struct rd_kafka_property *prop) { return prop->offset; } @@ -215,12 +227,12 @@ static RD_INLINE int rd_kafka_prop2idx (const struct rd_kafka_property *prop) { * * \p is_modified 1: set as modified, 0: clear modified */ -static void rd_kafka_anyconf_set_modified (void *conf, - const struct rd_kafka_property *prop, - int is_modified) { - int idx = rd_kafka_prop2idx(prop); - int bkt = idx / 64; - uint64_t bit = (uint64_t)1 << (idx % 64); +static void rd_kafka_anyconf_set_modified(void *conf, + const struct rd_kafka_property *prop, + int is_modified) { + int idx = rd_kafka_prop2idx(prop); + int bkt = idx / 64; + uint64_t bit = (uint64_t)1 << (idx % 64); struct rd_kafka_anyconf_hdr *confhdr = conf; rd_assert(idx < RD_KAFKA_CONF_PROPS_IDX_MAX && @@ -236,7 +248,7 @@ static void rd_kafka_anyconf_set_modified (void *conf, * @brief Clear is_modified for all properties. * @warning Does NOT clear/reset the value. */ -static void rd_kafka_anyconf_clear_all_is_modified (void *conf) { +static void rd_kafka_anyconf_clear_all_is_modified(void *conf) { struct rd_kafka_anyconf_hdr *confhdr = conf; memset(confhdr, 0, sizeof(*confhdr)); @@ -247,11 +259,11 @@ static void rd_kafka_anyconf_clear_all_is_modified (void *conf) { * @returns true of the property has been set/modified, else false. */ static rd_bool_t -rd_kafka_anyconf_is_modified (const void *conf, - const struct rd_kafka_property *prop) { - int idx = rd_kafka_prop2idx(prop); - int bkt = idx / 64; - uint64_t bit = (uint64_t)1 << (idx % 64); +rd_kafka_anyconf_is_modified(const void *conf, + const struct rd_kafka_property *prop) { + int idx = rd_kafka_prop2idx(prop); + int bkt = idx / 64; + uint64_t bit = (uint64_t)1 << (idx % 64); const struct rd_kafka_anyconf_hdr *confhdr = conf; return !!(confhdr->modified[bkt] & bit); @@ -260,12 +272,11 @@ rd_kafka_anyconf_is_modified (const void *conf, /** * @returns true if any property in \p conf has been set/modified. */ -static rd_bool_t -rd_kafka_anyconf_is_any_modified (const void *conf) { +static rd_bool_t rd_kafka_anyconf_is_any_modified(const void *conf) { const struct rd_kafka_anyconf_hdr *confhdr = conf; int i; - for (i = 0 ; i < (int)RD_ARRAYSIZE(confhdr->modified) ; i++) + for (i = 0; i < (int)RD_ARRAYSIZE(confhdr->modified); i++) if (confhdr->modified[i]) return rd_true; @@ -278,35 +289,35 @@ rd_kafka_anyconf_is_any_modified (const void *conf) { * @brief Validate \p broker.version.fallback property. */ static int -rd_kafka_conf_validate_broker_version (const struct rd_kafka_property *prop, - const char *val, int ival) { - struct rd_kafka_ApiVersion *apis; - size_t api_cnt; - return rd_kafka_get_legacy_ApiVersions(val, &apis, &api_cnt, NULL); +rd_kafka_conf_validate_broker_version(const struct rd_kafka_property *prop, + const char *val, + int ival) { + struct rd_kafka_ApiVersion *apis; + size_t api_cnt; + return rd_kafka_get_legacy_ApiVersions(val, &apis, &api_cnt, NULL); } /** * @brief Validate that string is a single item, without delimters (, space). */ static RD_UNUSED int -rd_kafka_conf_validate_single (const struct rd_kafka_property *prop, - const char *val, int ival) { - return !strchr(val, ',') && !strchr(val, ' '); +rd_kafka_conf_validate_single(const struct rd_kafka_property *prop, + const char *val, + int ival) { + return !strchr(val, ',') && !strchr(val, ' '); } /** * @brief Validate builtin partitioner string */ static RD_UNUSED int -rd_kafka_conf_validate_partitioner (const struct rd_kafka_property *prop, - const char *val, int ival) { - return !strcmp(val, "random") || - !strcmp(val, "consistent") || - !strcmp(val, "consistent_random") || - !strcmp(val, "murmur2") || - !strcmp(val, "murmur2_random") || - !strcmp(val, "fnv1a") || - !strcmp(val, "fnv1a_random"); +rd_kafka_conf_validate_partitioner(const struct rd_kafka_property *prop, + const char *val, + int ival) { + return !strcmp(val, "random") || !strcmp(val, "consistent") || + !strcmp(val, "consistent_random") || !strcmp(val, "murmur2") || + !strcmp(val, "murmur2_random") || !strcmp(val, "fnv1a") || + !strcmp(val, "fnv1a_random"); } @@ -314,1430 +325,1269 @@ rd_kafka_conf_validate_partitioner (const struct rd_kafka_property *prop, * librdkafka configuration property definitions. */ static const struct rd_kafka_property rd_kafka_properties[] = { - /* Global properties */ - { _RK_GLOBAL, "builtin.features", _RK_C_S2F, _RK(builtin_features), - "Indicates the builtin features for this build of librdkafka. " - "An application can either query this value or attempt to set it " - "with its list of required features to check for library support.", - 0, 0x7fffffff, 0xffff, - .s2i = { - { 0x1, "gzip", _UNSUPPORTED_ZLIB }, - { 0x2, "snappy", _UNSUPPORTED_SNAPPY }, - { 0x4, "ssl", _UNSUPPORTED_SSL }, - { 0x8, "sasl" }, - { 0x10, "regex" }, - { 0x20, "lz4" }, - { 0x40, "sasl_gssapi", _UNSUPPORTED_GSSAPI }, - { 0x80, "sasl_plain" }, - { 0x100, "sasl_scram", _UNSUPPORTED_SSL }, - { 0x200, "plugins" + /* Global properties */ + {_RK_GLOBAL, "builtin.features", _RK_C_S2F, _RK(builtin_features), + "Indicates the builtin features for this build of librdkafka. " + "An application can either query this value or attempt to set it " + "with its list of required features to check for library support.", + 0, 0x7fffffff, 0xffff, + .s2i = {{0x1, "gzip", _UNSUPPORTED_ZLIB}, + {0x2, "snappy", _UNSUPPORTED_SNAPPY}, + {0x4, "ssl", _UNSUPPORTED_SSL}, + {0x8, "sasl"}, + {0x10, "regex"}, + {0x20, "lz4"}, + {0x40, "sasl_gssapi", _UNSUPPORTED_GSSAPI}, + {0x80, "sasl_plain"}, + {0x100, "sasl_scram", _UNSUPPORTED_SSL}, + {0x200, "plugins" #if !WITH_PLUGINS - , .unsupported = "libdl/dlopen(3) not available at " - "build time" + , + .unsupported = "libdl/dlopen(3) not available at " + "build time" #endif - }, - { 0x400, "zstd", _UNSUPPORTED_ZSTD }, - { 0x800, "sasl_oauthbearer", _UNSUPPORTED_SSL }, - { 0x1000, "http", _UNSUPPORTED_HTTP }, - { 0x2000, "oidc", _UNSUPPORTED_OIDC }, - { 0, NULL } - } - }, - { _RK_GLOBAL, "client.id", _RK_C_STR, _RK(client_id_str), - "Client identifier.", - .sdef = "rdkafka" }, - { _RK_GLOBAL|_RK_HIDDEN, "client.software.name", _RK_C_STR, - _RK(sw_name), - "Client software name as reported to broker version >= v2.4.0. " - "Broker-side character restrictions apply, as of broker version " - "v2.4.0 the allowed characters are `a-zA-Z0-9.-`. The local client " - "will replace any other character with `-` and strip leading and " - "trailing non-alphanumeric characters before tranmission to " - "the broker. " - "This property should only be set by high-level language " - "librdkafka client bindings.", - .sdef = "librdkafka" - }, - { _RK_GLOBAL|_RK_HIDDEN, "client.software.version", _RK_C_STR, - _RK(sw_version), - "Client software version as reported to broker version >= v2.4.0. " - "Broker-side character restrictions apply, as of broker version " - "v2.4.0 the allowed characters are `a-zA-Z0-9.-`. The local client " - "will replace any other character with `-` and strip leading and " - "trailing non-alphanumeric characters before tranmission to " - "the broker. " - "This property should only be set by high-level language " - "librdkafka client bindings." - "If changing this property it is highly recommended to append the " - "librdkafka version.", - }, - { _RK_GLOBAL|_RK_HIGH, "metadata.broker.list", _RK_C_STR, - _RK(brokerlist), - "Initial list of brokers as a CSV list of broker host or host:port. " - "The application may also use `rd_kafka_brokers_add()` to add " - "brokers during runtime." }, - { _RK_GLOBAL|_RK_HIGH, "bootstrap.servers", _RK_C_ALIAS, 0, - "See metadata.broker.list", - .sdef = "metadata.broker.list" }, - { _RK_GLOBAL|_RK_MED, "message.max.bytes", _RK_C_INT, _RK(max_msg_size), - "Maximum Kafka protocol request message size. " - "Due to differing framing overhead between protocol versions the " - "producer is unable to reliably enforce a strict max message limit " - "at produce time and may exceed the maximum size by one message in " - "protocol ProduceRequests, the broker will enforce the the topic's " - "`max.message.bytes` limit (see Apache Kafka documentation).", - 1000, 1000000000, 1000000 }, - { _RK_GLOBAL, "message.copy.max.bytes", _RK_C_INT, - _RK(msg_copy_max_size), - "Maximum size for message to be copied to buffer. " - "Messages larger than this will be passed by reference (zero-copy) " - "at the expense of larger iovecs.", - 0, 1000000000, 0xffff }, - { _RK_GLOBAL|_RK_MED, "receive.message.max.bytes", _RK_C_INT, - _RK(recv_max_msg_size), - "Maximum Kafka protocol response message size. " - "This serves as a safety precaution to avoid memory exhaustion in " - "case of protocol hickups. " - "This value must be at least `fetch.max.bytes` + 512 to allow " - "for protocol overhead; the value is adjusted automatically " - "unless the configuration property is explicitly set.", - 1000, INT_MAX, 100000000 }, - { _RK_GLOBAL, "max.in.flight.requests.per.connection", _RK_C_INT, - _RK(max_inflight), - "Maximum number of in-flight requests per broker connection. " - "This is a generic property applied to all broker communication, " - "however it is primarily relevant to produce requests. " - "In particular, note that other mechanisms limit the number " - "of outstanding consumer fetch request per broker to one.", - 1, 1000000, 1000000 }, - { _RK_GLOBAL, "max.in.flight", _RK_C_ALIAS, - .sdef = "max.in.flight.requests.per.connection" }, - { _RK_GLOBAL|_RK_DEPRECATED|_RK_HIDDEN, - "metadata.request.timeout.ms", _RK_C_INT, - _RK(metadata_request_timeout_ms), - "Not used.", - 10, 900*1000, 10 }, - { _RK_GLOBAL, "topic.metadata.refresh.interval.ms", _RK_C_INT, - _RK(metadata_refresh_interval_ms), - "Period of time in milliseconds at which topic and broker " - "metadata is refreshed in order to proactively discover any new " - "brokers, topics, partitions or partition leader changes. " - "Use -1 to disable the intervalled refresh (not recommended). " - "If there are no locally referenced topics " - "(no topic objects created, no messages produced, " - "no subscription or no assignment) then only the broker list will " - "be refreshed every interval but no more often than every 10s.", - -1, 3600*1000, 5*60*1000 }, - { _RK_GLOBAL, "metadata.max.age.ms", _RK_C_INT, - _RK(metadata_max_age_ms), - "Metadata cache max age. " - "Defaults to topic.metadata.refresh.interval.ms * 3", - 1, 24*3600*1000, 5*60*1000 * 3 }, - { _RK_GLOBAL, "topic.metadata.refresh.fast.interval.ms", _RK_C_INT, - _RK(metadata_refresh_fast_interval_ms), - "When a topic loses its leader a new metadata request will be " - "enqueued with this initial interval, exponentially increasing " - "until the topic metadata has been refreshed. " - "This is used to recover quickly from transitioning leader brokers.", - 1, 60*1000, 250 }, - { _RK_GLOBAL|_RK_DEPRECATED, - "topic.metadata.refresh.fast.cnt", _RK_C_INT, - _RK(metadata_refresh_fast_cnt), - "No longer used.", - 0, 1000, 10 }, - { _RK_GLOBAL, "topic.metadata.refresh.sparse", _RK_C_BOOL, - _RK(metadata_refresh_sparse), - "Sparse metadata requests (consumes less network bandwidth)", - 0, 1, 1 }, - { _RK_GLOBAL, "topic.metadata.propagation.max.ms", _RK_C_INT, - _RK(metadata_propagation_max_ms), - "Apache Kafka topic creation is asynchronous and it takes some " - "time for a new topic to propagate throughout the cluster to all " - "brokers. " - "If a client requests topic metadata after manual topic creation but " - "before the topic has been fully propagated to the broker the " - "client is requesting metadata from, the topic will seem to be " - "non-existent and the client will mark the topic as such, " - "failing queued produced messages with `ERR__UNKNOWN_TOPIC`. " - "This setting delays marking a topic as non-existent until the " - "configured propagation max time has passed. " - "The maximum propagation time is calculated from the time the " - "topic is first referenced in the client, e.g., on produce().", - 0, 60*60*1000, 30*1000 }, - { _RK_GLOBAL, "topic.blacklist", _RK_C_PATLIST, - _RK(topic_blacklist), - "Topic blacklist, a comma-separated list of regular expressions " - "for matching topic names that should be ignored in " - "broker metadata information as if the topics did not exist." }, - { _RK_GLOBAL|_RK_MED, "debug", _RK_C_S2F, _RK(debug), - "A comma-separated list of debug contexts to enable. " - "Detailed Producer debugging: broker,topic,msg. " - "Consumer: consumer,cgrp,topic,fetch", - .s2i = { - { RD_KAFKA_DBG_GENERIC, "generic" }, - { RD_KAFKA_DBG_BROKER, "broker" }, - { RD_KAFKA_DBG_TOPIC, "topic" }, - { RD_KAFKA_DBG_METADATA, "metadata" }, - { RD_KAFKA_DBG_FEATURE, "feature" }, - { RD_KAFKA_DBG_QUEUE, "queue" }, - { RD_KAFKA_DBG_MSG, "msg" }, - { RD_KAFKA_DBG_PROTOCOL, "protocol" }, - { RD_KAFKA_DBG_CGRP, "cgrp" }, - { RD_KAFKA_DBG_SECURITY, "security" }, - { RD_KAFKA_DBG_FETCH, "fetch" }, - { RD_KAFKA_DBG_INTERCEPTOR, "interceptor" }, - { RD_KAFKA_DBG_PLUGIN, "plugin" }, - { RD_KAFKA_DBG_CONSUMER, "consumer" }, - { RD_KAFKA_DBG_ADMIN, "admin" }, - { RD_KAFKA_DBG_EOS, "eos" }, - { RD_KAFKA_DBG_MOCK, "mock" }, - { RD_KAFKA_DBG_ASSIGNOR, "assignor" }, - { RD_KAFKA_DBG_CONF, "conf" }, - { RD_KAFKA_DBG_ALL, "all" } - } }, - { _RK_GLOBAL, "socket.timeout.ms", _RK_C_INT, _RK(socket_timeout_ms), - "Default timeout for network requests. " - "Producer: ProduceRequests will use the lesser value of " - "`socket.timeout.ms` and remaining `message.timeout.ms` for the " - "first message in the batch. " - "Consumer: FetchRequests will use " - "`fetch.wait.max.ms` + `socket.timeout.ms`. " - "Admin: Admin requests will use `socket.timeout.ms` or explicitly " - "set `rd_kafka_AdminOptions_set_operation_timeout()` value.", - 10, 300*1000, 60*1000 }, - { _RK_GLOBAL|_RK_DEPRECATED, "socket.blocking.max.ms", _RK_C_INT, - _RK(socket_blocking_max_ms), - "No longer used.", - 1, 60*1000, 1000 }, - { _RK_GLOBAL, "socket.send.buffer.bytes", _RK_C_INT, - _RK(socket_sndbuf_size), - "Broker socket send buffer size. System default is used if 0.", - 0, 100000000, 0 }, - { _RK_GLOBAL, "socket.receive.buffer.bytes", _RK_C_INT, - _RK(socket_rcvbuf_size), - "Broker socket receive buffer size. System default is used if 0.", - 0, 100000000, 0 }, - { _RK_GLOBAL, "socket.keepalive.enable", _RK_C_BOOL, - _RK(socket_keepalive), - "Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets", - 0, 1, 0 + }, + {0x400, "zstd", _UNSUPPORTED_ZSTD}, + {0x800, "sasl_oauthbearer", _UNSUPPORTED_SSL}, + {0x1000, "http", _UNSUPPORTED_HTTP}, + {0x2000, "oidc", _UNSUPPORTED_OIDC}, + {0, NULL}}}, + {_RK_GLOBAL, "client.id", _RK_C_STR, _RK(client_id_str), + "Client identifier.", .sdef = "rdkafka"}, + {_RK_GLOBAL | _RK_HIDDEN, "client.software.name", _RK_C_STR, _RK(sw_name), + "Client software name as reported to broker version >= v2.4.0. " + "Broker-side character restrictions apply, as of broker version " + "v2.4.0 the allowed characters are `a-zA-Z0-9.-`. The local client " + "will replace any other character with `-` and strip leading and " + "trailing non-alphanumeric characters before tranmission to " + "the broker. " + "This property should only be set by high-level language " + "librdkafka client bindings.", + .sdef = "librdkafka"}, + { + _RK_GLOBAL | _RK_HIDDEN, + "client.software.version", + _RK_C_STR, + _RK(sw_version), + "Client software version as reported to broker version >= v2.4.0. " + "Broker-side character restrictions apply, as of broker version " + "v2.4.0 the allowed characters are `a-zA-Z0-9.-`. The local client " + "will replace any other character with `-` and strip leading and " + "trailing non-alphanumeric characters before tranmission to " + "the broker. " + "This property should only be set by high-level language " + "librdkafka client bindings." + "If changing this property it is highly recommended to append the " + "librdkafka version.", + }, + {_RK_GLOBAL | _RK_HIGH, "metadata.broker.list", _RK_C_STR, _RK(brokerlist), + "Initial list of brokers as a CSV list of broker host or host:port. " + "The application may also use `rd_kafka_brokers_add()` to add " + "brokers during runtime."}, + {_RK_GLOBAL | _RK_HIGH, "bootstrap.servers", _RK_C_ALIAS, 0, + "See metadata.broker.list", .sdef = "metadata.broker.list"}, + {_RK_GLOBAL | _RK_MED, "message.max.bytes", _RK_C_INT, _RK(max_msg_size), + "Maximum Kafka protocol request message size. " + "Due to differing framing overhead between protocol versions the " + "producer is unable to reliably enforce a strict max message limit " + "at produce time and may exceed the maximum size by one message in " + "protocol ProduceRequests, the broker will enforce the the topic's " + "`max.message.bytes` limit (see Apache Kafka documentation).", + 1000, 1000000000, 1000000}, + {_RK_GLOBAL, "message.copy.max.bytes", _RK_C_INT, _RK(msg_copy_max_size), + "Maximum size for message to be copied to buffer. " + "Messages larger than this will be passed by reference (zero-copy) " + "at the expense of larger iovecs.", + 0, 1000000000, 0xffff}, + {_RK_GLOBAL | _RK_MED, "receive.message.max.bytes", _RK_C_INT, + _RK(recv_max_msg_size), + "Maximum Kafka protocol response message size. " + "This serves as a safety precaution to avoid memory exhaustion in " + "case of protocol hickups. " + "This value must be at least `fetch.max.bytes` + 512 to allow " + "for protocol overhead; the value is adjusted automatically " + "unless the configuration property is explicitly set.", + 1000, INT_MAX, 100000000}, + {_RK_GLOBAL, "max.in.flight.requests.per.connection", _RK_C_INT, + _RK(max_inflight), + "Maximum number of in-flight requests per broker connection. " + "This is a generic property applied to all broker communication, " + "however it is primarily relevant to produce requests. " + "In particular, note that other mechanisms limit the number " + "of outstanding consumer fetch request per broker to one.", + 1, 1000000, 1000000}, + {_RK_GLOBAL, "max.in.flight", _RK_C_ALIAS, + .sdef = "max.in.flight.requests.per.connection"}, + {_RK_GLOBAL | _RK_DEPRECATED | _RK_HIDDEN, "metadata.request.timeout.ms", + _RK_C_INT, _RK(metadata_request_timeout_ms), "Not used.", 10, 900 * 1000, + 10}, + {_RK_GLOBAL, "topic.metadata.refresh.interval.ms", _RK_C_INT, + _RK(metadata_refresh_interval_ms), + "Period of time in milliseconds at which topic and broker " + "metadata is refreshed in order to proactively discover any new " + "brokers, topics, partitions or partition leader changes. " + "Use -1 to disable the intervalled refresh (not recommended). " + "If there are no locally referenced topics " + "(no topic objects created, no messages produced, " + "no subscription or no assignment) then only the broker list will " + "be refreshed every interval but no more often than every 10s.", + -1, 3600 * 1000, 5 * 60 * 1000}, + {_RK_GLOBAL, "metadata.max.age.ms", _RK_C_INT, _RK(metadata_max_age_ms), + "Metadata cache max age. " + "Defaults to topic.metadata.refresh.interval.ms * 3", + 1, 24 * 3600 * 1000, 5 * 60 * 1000 * 3}, + {_RK_GLOBAL, "topic.metadata.refresh.fast.interval.ms", _RK_C_INT, + _RK(metadata_refresh_fast_interval_ms), + "When a topic loses its leader a new metadata request will be " + "enqueued with this initial interval, exponentially increasing " + "until the topic metadata has been refreshed. " + "This is used to recover quickly from transitioning leader brokers.", + 1, 60 * 1000, 250}, + {_RK_GLOBAL | _RK_DEPRECATED, "topic.metadata.refresh.fast.cnt", _RK_C_INT, + _RK(metadata_refresh_fast_cnt), "No longer used.", 0, 1000, 10}, + {_RK_GLOBAL, "topic.metadata.refresh.sparse", _RK_C_BOOL, + _RK(metadata_refresh_sparse), + "Sparse metadata requests (consumes less network bandwidth)", 0, 1, 1}, + {_RK_GLOBAL, "topic.metadata.propagation.max.ms", _RK_C_INT, + _RK(metadata_propagation_max_ms), + "Apache Kafka topic creation is asynchronous and it takes some " + "time for a new topic to propagate throughout the cluster to all " + "brokers. " + "If a client requests topic metadata after manual topic creation but " + "before the topic has been fully propagated to the broker the " + "client is requesting metadata from, the topic will seem to be " + "non-existent and the client will mark the topic as such, " + "failing queued produced messages with `ERR__UNKNOWN_TOPIC`. " + "This setting delays marking a topic as non-existent until the " + "configured propagation max time has passed. " + "The maximum propagation time is calculated from the time the " + "topic is first referenced in the client, e.g., on produce().", + 0, 60 * 60 * 1000, 30 * 1000}, + {_RK_GLOBAL, "topic.blacklist", _RK_C_PATLIST, _RK(topic_blacklist), + "Topic blacklist, a comma-separated list of regular expressions " + "for matching topic names that should be ignored in " + "broker metadata information as if the topics did not exist."}, + {_RK_GLOBAL | _RK_MED, "debug", _RK_C_S2F, _RK(debug), + "A comma-separated list of debug contexts to enable. " + "Detailed Producer debugging: broker,topic,msg. " + "Consumer: consumer,cgrp,topic,fetch", + .s2i = {{RD_KAFKA_DBG_GENERIC, "generic"}, + {RD_KAFKA_DBG_BROKER, "broker"}, + {RD_KAFKA_DBG_TOPIC, "topic"}, + {RD_KAFKA_DBG_METADATA, "metadata"}, + {RD_KAFKA_DBG_FEATURE, "feature"}, + {RD_KAFKA_DBG_QUEUE, "queue"}, + {RD_KAFKA_DBG_MSG, "msg"}, + {RD_KAFKA_DBG_PROTOCOL, "protocol"}, + {RD_KAFKA_DBG_CGRP, "cgrp"}, + {RD_KAFKA_DBG_SECURITY, "security"}, + {RD_KAFKA_DBG_FETCH, "fetch"}, + {RD_KAFKA_DBG_INTERCEPTOR, "interceptor"}, + {RD_KAFKA_DBG_PLUGIN, "plugin"}, + {RD_KAFKA_DBG_CONSUMER, "consumer"}, + {RD_KAFKA_DBG_ADMIN, "admin"}, + {RD_KAFKA_DBG_EOS, "eos"}, + {RD_KAFKA_DBG_MOCK, "mock"}, + {RD_KAFKA_DBG_ASSIGNOR, "assignor"}, + {RD_KAFKA_DBG_CONF, "conf"}, + {RD_KAFKA_DBG_ALL, "all"}}}, + {_RK_GLOBAL, "socket.timeout.ms", _RK_C_INT, _RK(socket_timeout_ms), + "Default timeout for network requests. " + "Producer: ProduceRequests will use the lesser value of " + "`socket.timeout.ms` and remaining `message.timeout.ms` for the " + "first message in the batch. " + "Consumer: FetchRequests will use " + "`fetch.wait.max.ms` + `socket.timeout.ms`. " + "Admin: Admin requests will use `socket.timeout.ms` or explicitly " + "set `rd_kafka_AdminOptions_set_operation_timeout()` value.", + 10, 300 * 1000, 60 * 1000}, + {_RK_GLOBAL | _RK_DEPRECATED, "socket.blocking.max.ms", _RK_C_INT, + _RK(socket_blocking_max_ms), "No longer used.", 1, 60 * 1000, 1000}, + {_RK_GLOBAL, "socket.send.buffer.bytes", _RK_C_INT, _RK(socket_sndbuf_size), + "Broker socket send buffer size. System default is used if 0.", 0, + 100000000, 0}, + {_RK_GLOBAL, "socket.receive.buffer.bytes", _RK_C_INT, + _RK(socket_rcvbuf_size), + "Broker socket receive buffer size. System default is used if 0.", 0, + 100000000, 0}, + {_RK_GLOBAL, "socket.keepalive.enable", _RK_C_BOOL, _RK(socket_keepalive), + "Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets", 0, 1, 0 #ifndef SO_KEEPALIVE - , .unsupported = "SO_KEEPALIVE not available at build time" + , + .unsupported = "SO_KEEPALIVE not available at build time" #endif - }, - { _RK_GLOBAL, "socket.nagle.disable", _RK_C_BOOL, - _RK(socket_nagle_disable), - "Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.", - 0, 1, 0 + }, + {_RK_GLOBAL, "socket.nagle.disable", _RK_C_BOOL, _RK(socket_nagle_disable), + "Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.", 0, 1, 0 #ifndef TCP_NODELAY - , .unsupported = "TCP_NODELAY not available at build time" + , + .unsupported = "TCP_NODELAY not available at build time" #endif - }, - { _RK_GLOBAL, "socket.max.fails", _RK_C_INT, - _RK(socket_max_fails), - "Disconnect from broker when this number of send failures " - "(e.g., timed out requests) is reached. Disable with 0. " - "WARNING: It is highly recommended to leave this setting at " - "its default value of 1 to avoid the client and broker to " - "become desynchronized in case of request timeouts. " - "NOTE: The connection is automatically re-established.", - 0, 1000000, 1 }, - { _RK_GLOBAL, "broker.address.ttl", _RK_C_INT, - _RK(broker_addr_ttl), - "How long to cache the broker address resolving " - "results (milliseconds).", - 0, 86400*1000, 1*1000 }, - { _RK_GLOBAL, "broker.address.family", _RK_C_S2I, - _RK(broker_addr_family), - "Allowed broker IP address families: any, v4, v6", - .vdef = AF_UNSPEC, - .s2i = { - { AF_UNSPEC, "any" }, - { AF_INET, "v4" }, - { AF_INET6, "v6" }, - } }, - { _RK_GLOBAL|_RK_MED, "connections.max.idle.ms", - _RK_C_INT, - _RK(connections_max_idle_ms), - "Close broker connections after the specified time of " - "inactivity. " - "Disable with 0. " - "If this property is left at its default value some heuristics are " - "performed to determine a suitable default value, this is currently " - "limited to identifying brokers on Azure " - "(see librdkafka issue #3109 for more info).", - 0, INT_MAX, 0 }, - { _RK_GLOBAL|_RK_MED|_RK_HIDDEN, "enable.sparse.connections", - _RK_C_BOOL, - _RK(sparse_connections), - "When enabled the client will only connect to brokers " - "it needs to communicate with. When disabled the client " - "will maintain connections to all brokers in the cluster.", - 0, 1, 1 }, - { _RK_GLOBAL|_RK_DEPRECATED, "reconnect.backoff.jitter.ms", _RK_C_INT, - _RK(reconnect_jitter_ms), - "No longer used. See `reconnect.backoff.ms` and " - "`reconnect.backoff.max.ms`.", - 0, 60*60*1000, 0 }, - { _RK_GLOBAL|_RK_MED, "reconnect.backoff.ms", _RK_C_INT, - _RK(reconnect_backoff_ms), - "The initial time to wait before reconnecting to a broker " - "after the connection has been closed. " - "The time is increased exponentially until " - "`reconnect.backoff.max.ms` is reached. " - "-25% to +50% jitter is applied to each reconnect backoff. " - "A value of 0 disables the backoff and reconnects immediately.", - 0, 60*60*1000, 100 }, - { _RK_GLOBAL|_RK_MED, "reconnect.backoff.max.ms", _RK_C_INT, - _RK(reconnect_backoff_max_ms), - "The maximum time to wait before reconnecting to a broker " - "after the connection has been closed.", - 0, 60*60*1000, 10*1000 }, - { _RK_GLOBAL|_RK_HIGH, "statistics.interval.ms", _RK_C_INT, - _RK(stats_interval_ms), - "librdkafka statistics emit interval. The application also needs to " - "register a stats callback using `rd_kafka_conf_set_stats_cb()`. " - "The granularity is 1000ms. A value of 0 disables statistics.", - 0, 86400*1000, 0 }, - { _RK_GLOBAL, "enabled_events", _RK_C_INT, - _RK(enabled_events), - "See `rd_kafka_conf_set_events()`", - 0, 0x7fffffff, 0 }, - { _RK_GLOBAL, "error_cb", _RK_C_PTR, - _RK(error_cb), - "Error callback (set with rd_kafka_conf_set_error_cb())" }, - { _RK_GLOBAL, "throttle_cb", _RK_C_PTR, - _RK(throttle_cb), - "Throttle callback (set with rd_kafka_conf_set_throttle_cb())" }, - { _RK_GLOBAL, "stats_cb", _RK_C_PTR, - _RK(stats_cb), - "Statistics callback (set with rd_kafka_conf_set_stats_cb())" }, - { _RK_GLOBAL, "log_cb", _RK_C_PTR, - _RK(log_cb), - "Log callback (set with rd_kafka_conf_set_log_cb())", - .pdef = rd_kafka_log_print }, - { _RK_GLOBAL, "log_level", _RK_C_INT, - _RK(log_level), - "Logging level (syslog(3) levels)", - 0, 7, 6 }, - { _RK_GLOBAL, "log.queue", _RK_C_BOOL, _RK(log_queue), - "Disable spontaneous log_cb from internal librdkafka " - "threads, instead enqueue log messages on queue set with " - "`rd_kafka_set_log_queue()` and serve log callbacks or " - "events through the standard poll APIs. " - "**NOTE**: Log messages will linger in a temporary queue " - "until the log queue has been set.", - 0, 1, 0 }, - { _RK_GLOBAL, "log.thread.name", _RK_C_BOOL, - _RK(log_thread_name), - "Print internal thread name in log messages " - "(useful for debugging librdkafka internals)", - 0, 1, 1 }, - { _RK_GLOBAL, "enable.random.seed", _RK_C_BOOL, - _RK(enable_random_seed), - "If enabled librdkafka will initialize the PRNG " - "with srand(current_time.milliseconds) on the first invocation of " - "rd_kafka_new() (required only if rand_r() is not available on your platform). " - "If disabled the application must call srand() prior to calling rd_kafka_new().", - 0, 1, 1 }, - { _RK_GLOBAL, "log.connection.close", _RK_C_BOOL, - _RK(log_connection_close), - "Log broker disconnects. " - "It might be useful to turn this off when interacting with " - "0.9 brokers with an aggressive `connection.max.idle.ms` value.", - 0, 1, 1 }, - { _RK_GLOBAL, "background_event_cb", _RK_C_PTR, - _RK(background_event_cb), - "Background queue event callback " - "(set with rd_kafka_conf_set_background_event_cb())" }, - { _RK_GLOBAL, "socket_cb", _RK_C_PTR, - _RK(socket_cb), - "Socket creation callback to provide race-free CLOEXEC", - .pdef = + }, + {_RK_GLOBAL, "socket.max.fails", _RK_C_INT, _RK(socket_max_fails), + "Disconnect from broker when this number of send failures " + "(e.g., timed out requests) is reached. Disable with 0. " + "WARNING: It is highly recommended to leave this setting at " + "its default value of 1 to avoid the client and broker to " + "become desynchronized in case of request timeouts. " + "NOTE: The connection is automatically re-established.", + 0, 1000000, 1}, + {_RK_GLOBAL, "broker.address.ttl", _RK_C_INT, _RK(broker_addr_ttl), + "How long to cache the broker address resolving " + "results (milliseconds).", + 0, 86400 * 1000, 1 * 1000}, + {_RK_GLOBAL, "broker.address.family", _RK_C_S2I, _RK(broker_addr_family), + "Allowed broker IP address families: any, v4, v6", .vdef = AF_UNSPEC, + .s2i = + { + {AF_UNSPEC, "any"}, + {AF_INET, "v4"}, + {AF_INET6, "v6"}, + }}, + {_RK_GLOBAL | _RK_MED, "connections.max.idle.ms", _RK_C_INT, + _RK(connections_max_idle_ms), + "Close broker connections after the specified time of " + "inactivity. " + "Disable with 0. " + "If this property is left at its default value some heuristics are " + "performed to determine a suitable default value, this is currently " + "limited to identifying brokers on Azure " + "(see librdkafka issue #3109 for more info).", + 0, INT_MAX, 0}, + {_RK_GLOBAL | _RK_MED | _RK_HIDDEN, "enable.sparse.connections", _RK_C_BOOL, + _RK(sparse_connections), + "When enabled the client will only connect to brokers " + "it needs to communicate with. When disabled the client " + "will maintain connections to all brokers in the cluster.", + 0, 1, 1}, + {_RK_GLOBAL | _RK_DEPRECATED, "reconnect.backoff.jitter.ms", _RK_C_INT, + _RK(reconnect_jitter_ms), + "No longer used. See `reconnect.backoff.ms` and " + "`reconnect.backoff.max.ms`.", + 0, 60 * 60 * 1000, 0}, + {_RK_GLOBAL | _RK_MED, "reconnect.backoff.ms", _RK_C_INT, + _RK(reconnect_backoff_ms), + "The initial time to wait before reconnecting to a broker " + "after the connection has been closed. " + "The time is increased exponentially until " + "`reconnect.backoff.max.ms` is reached. " + "-25% to +50% jitter is applied to each reconnect backoff. " + "A value of 0 disables the backoff and reconnects immediately.", + 0, 60 * 60 * 1000, 100}, + {_RK_GLOBAL | _RK_MED, "reconnect.backoff.max.ms", _RK_C_INT, + _RK(reconnect_backoff_max_ms), + "The maximum time to wait before reconnecting to a broker " + "after the connection has been closed.", + 0, 60 * 60 * 1000, 10 * 1000}, + {_RK_GLOBAL | _RK_HIGH, "statistics.interval.ms", _RK_C_INT, + _RK(stats_interval_ms), + "librdkafka statistics emit interval. The application also needs to " + "register a stats callback using `rd_kafka_conf_set_stats_cb()`. " + "The granularity is 1000ms. A value of 0 disables statistics.", + 0, 86400 * 1000, 0}, + {_RK_GLOBAL, "enabled_events", _RK_C_INT, _RK(enabled_events), + "See `rd_kafka_conf_set_events()`", 0, 0x7fffffff, 0}, + {_RK_GLOBAL, "error_cb", _RK_C_PTR, _RK(error_cb), + "Error callback (set with rd_kafka_conf_set_error_cb())"}, + {_RK_GLOBAL, "throttle_cb", _RK_C_PTR, _RK(throttle_cb), + "Throttle callback (set with rd_kafka_conf_set_throttle_cb())"}, + {_RK_GLOBAL, "stats_cb", _RK_C_PTR, _RK(stats_cb), + "Statistics callback (set with rd_kafka_conf_set_stats_cb())"}, + {_RK_GLOBAL, "log_cb", _RK_C_PTR, _RK(log_cb), + "Log callback (set with rd_kafka_conf_set_log_cb())", + .pdef = rd_kafka_log_print}, + {_RK_GLOBAL, "log_level", _RK_C_INT, _RK(log_level), + "Logging level (syslog(3) levels)", 0, 7, 6}, + {_RK_GLOBAL, "log.queue", _RK_C_BOOL, _RK(log_queue), + "Disable spontaneous log_cb from internal librdkafka " + "threads, instead enqueue log messages on queue set with " + "`rd_kafka_set_log_queue()` and serve log callbacks or " + "events through the standard poll APIs. " + "**NOTE**: Log messages will linger in a temporary queue " + "until the log queue has been set.", + 0, 1, 0}, + {_RK_GLOBAL, "log.thread.name", _RK_C_BOOL, _RK(log_thread_name), + "Print internal thread name in log messages " + "(useful for debugging librdkafka internals)", + 0, 1, 1}, + {_RK_GLOBAL, "enable.random.seed", _RK_C_BOOL, _RK(enable_random_seed), + "If enabled librdkafka will initialize the PRNG " + "with srand(current_time.milliseconds) on the first invocation of " + "rd_kafka_new() (required only if rand_r() is not available on your " + "platform). " + "If disabled the application must call srand() prior to calling " + "rd_kafka_new().", + 0, 1, 1}, + {_RK_GLOBAL, "log.connection.close", _RK_C_BOOL, _RK(log_connection_close), + "Log broker disconnects. " + "It might be useful to turn this off when interacting with " + "0.9 brokers with an aggressive `connection.max.idle.ms` value.", + 0, 1, 1}, + {_RK_GLOBAL, "background_event_cb", _RK_C_PTR, _RK(background_event_cb), + "Background queue event callback " + "(set with rd_kafka_conf_set_background_event_cb())"}, + {_RK_GLOBAL, "socket_cb", _RK_C_PTR, _RK(socket_cb), + "Socket creation callback to provide race-free CLOEXEC", + .pdef = #ifdef __linux__ - rd_kafka_socket_cb_linux + rd_kafka_socket_cb_linux #else rd_kafka_socket_cb_generic #endif - }, - { _RK_GLOBAL, "connect_cb", _RK_C_PTR, - _RK(connect_cb), - "Socket connect callback", - }, - { _RK_GLOBAL, "closesocket_cb", _RK_C_PTR, - _RK(closesocket_cb), - "Socket close callback", - }, - { _RK_GLOBAL, "open_cb", _RK_C_PTR, - _RK(open_cb), - "File open callback to provide race-free CLOEXEC", - .pdef = + }, + { + _RK_GLOBAL, + "connect_cb", + _RK_C_PTR, + _RK(connect_cb), + "Socket connect callback", + }, + { + _RK_GLOBAL, + "closesocket_cb", + _RK_C_PTR, + _RK(closesocket_cb), + "Socket close callback", + }, + {_RK_GLOBAL, "open_cb", _RK_C_PTR, _RK(open_cb), + "File open callback to provide race-free CLOEXEC", + .pdef = #ifdef __linux__ - rd_kafka_open_cb_linux + rd_kafka_open_cb_linux #else rd_kafka_open_cb_generic #endif - }, - { _RK_GLOBAL, "opaque", _RK_C_PTR, - _RK(opaque), - "Application opaque (set with rd_kafka_conf_set_opaque())" }, - { _RK_GLOBAL, "default_topic_conf", _RK_C_PTR, - _RK(topic_conf), - "Default topic configuration for automatically subscribed topics" }, - { _RK_GLOBAL, "internal.termination.signal", _RK_C_INT, - _RK(term_sig), - "Signal that librdkafka will use to quickly terminate on " - "rd_kafka_destroy(). If this signal is not set then there will be a " - "delay before rd_kafka_wait_destroyed() returns true " - "as internal threads are timing out their system calls. " - "If this signal is set however the delay will be minimal. " - "The application should mask this signal as an internal " - "signal handler is installed.", - 0, 128, 0 }, - { _RK_GLOBAL|_RK_HIGH, "api.version.request", _RK_C_BOOL, - _RK(api_version_request), - "Request broker's supported API versions to adjust functionality to " - "available protocol features. If set to false, or the " - "ApiVersionRequest fails, the fallback version " - "`broker.version.fallback` will be used. " - "**NOTE**: Depends on broker version >=0.10.0. If the request is not " - "supported by (an older) broker the `broker.version.fallback` fallback is used.", - 0, 1, 1 }, - { _RK_GLOBAL, "api.version.request.timeout.ms", _RK_C_INT, - _RK(api_version_request_timeout_ms), - "Timeout for broker API version requests.", - 1, 5*60*1000, 10*1000 }, - { _RK_GLOBAL|_RK_MED, "api.version.fallback.ms", _RK_C_INT, - _RK(api_version_fallback_ms), - "Dictates how long the `broker.version.fallback` fallback is used " - "in the case the ApiVersionRequest fails. " - "**NOTE**: The ApiVersionRequest is only issued when a new connection " - "to the broker is made (such as after an upgrade).", - 0, 86400*7*1000, 0 }, - - { _RK_GLOBAL|_RK_MED, "broker.version.fallback", _RK_C_STR, - _RK(broker_version_fallback), - "Older broker versions (before 0.10.0) provide no way for a client to query " - "for supported protocol features " - "(ApiVersionRequest, see `api.version.request`) making it impossible " - "for the client to know what features it may use. " - "As a workaround a user may set this property to the expected broker " - "version and the client will automatically adjust its feature set " - "accordingly if the ApiVersionRequest fails (or is disabled). " - "The fallback broker version will be used for `api.version.fallback.ms`. " - "Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. " - "Any other value >= 0.10, such as 0.10.2.1, " - "enables ApiVersionRequests.", - .sdef = "0.10.0", - .validate = rd_kafka_conf_validate_broker_version }, - - /* Security related global properties */ - { _RK_GLOBAL|_RK_HIGH, "security.protocol", _RK_C_S2I, - _RK(security_protocol), - "Protocol used to communicate with brokers.", - .vdef = RD_KAFKA_PROTO_PLAINTEXT, - .s2i = { - { RD_KAFKA_PROTO_PLAINTEXT, "plaintext" }, - { RD_KAFKA_PROTO_SSL, "ssl", _UNSUPPORTED_SSL }, - { RD_KAFKA_PROTO_SASL_PLAINTEXT, "sasl_plaintext" }, - { RD_KAFKA_PROTO_SASL_SSL, "sasl_ssl", - _UNSUPPORTED_SSL }, - { 0, NULL } - } }, - - { _RK_GLOBAL, "ssl.cipher.suites", _RK_C_STR, - _RK(ssl.cipher_suites), - "A cipher suite is a named combination of authentication, " - "encryption, MAC and key exchange algorithm used to negotiate the " - "security settings for a network connection using TLS or SSL network " - "protocol. See manual page for `ciphers(1)` and " - "`SSL_CTX_set_cipher_list(3).", - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL, "ssl.curves.list", _RK_C_STR, - _RK(ssl.curves_list), - "The supported-curves extension in the TLS ClientHello message specifies " - "the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client " - "is willing to have the server use. See manual page for " - "`SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required.", - _UNSUPPORTED_OPENSSL_1_0_2 - }, - { _RK_GLOBAL, "ssl.sigalgs.list", _RK_C_STR, - _RK(ssl.sigalgs_list), - "The client uses the TLS ClientHello signature_algorithms extension " - "to indicate to the server which signature/hash algorithm pairs " - "may be used in digital signatures. See manual page for " - "`SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required.", - _UNSUPPORTED_OPENSSL_1_0_2 - }, - { _RK_GLOBAL|_RK_SENSITIVE, "ssl.key.location", _RK_C_STR, - _RK(ssl.key_location), - "Path to client's private key (PEM) used for authentication.", - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL|_RK_SENSITIVE, "ssl.key.password", _RK_C_STR, - _RK(ssl.key_password), - "Private key passphrase (for use with `ssl.key.location` " - "and `set_ssl_cert()`)", - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL|_RK_SENSITIVE, "ssl.key.pem", _RK_C_STR, - _RK(ssl.key_pem), - "Client's private key string (PEM format) used for authentication.", - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL|_RK_SENSITIVE, "ssl_key", _RK_C_INTERNAL, - _RK(ssl.key), - "Client's private key as set by rd_kafka_conf_set_ssl_cert()", - .dtor = rd_kafka_conf_cert_dtor, - .copy = rd_kafka_conf_cert_copy, - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL, "ssl.certificate.location", _RK_C_STR, - _RK(ssl.cert_location), - "Path to client's public key (PEM) used for authentication.", - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL, "ssl.certificate.pem", _RK_C_STR, - _RK(ssl.cert_pem), - "Client's public key string (PEM format) used for authentication.", - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL, "ssl_certificate", _RK_C_INTERNAL, - _RK(ssl.key), - "Client's public key as set by rd_kafka_conf_set_ssl_cert()", - .dtor = rd_kafka_conf_cert_dtor, - .copy = rd_kafka_conf_cert_copy, - _UNSUPPORTED_SSL - }, - - { _RK_GLOBAL, "ssl.ca.location", _RK_C_STR, - _RK(ssl.ca_location), - "File or directory path to CA certificate(s) for verifying " - "the broker's key. " - "Defaults: " - "On Windows the system's CA certificates are automatically looked " - "up in the Windows Root certificate store. " - "On Mac OSX this configuration defaults to `probe`. " - "It is recommended to install openssl using Homebrew, " - "to provide CA certificates. " - "On Linux install the distribution's ca-certificates package. " - "If OpenSSL is statically linked or `ssl.ca.location` is set to " - "`probe` a list of standard paths will be probed and the first one " - "found will be used as the default CA certificate location path. " - "If OpenSSL is dynamically linked the OpenSSL library's default " - "path will be used (see `OPENSSLDIR` in `openssl version -a`).", - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL|_RK_SENSITIVE, "ssl.ca.pem", _RK_C_STR, - _RK(ssl.ca_pem), - "CA certificate string (PEM format) for verifying the broker's key.", - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL, "ssl_ca", _RK_C_INTERNAL, - _RK(ssl.ca), - "CA certificate as set by rd_kafka_conf_set_ssl_cert()", - .dtor = rd_kafka_conf_cert_dtor, - .copy = rd_kafka_conf_cert_copy, - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL, "ssl.ca.certificate.stores", _RK_C_STR, - _RK(ssl.ca_cert_stores), - "Comma-separated list of Windows Certificate stores to load " - "CA certificates from. Certificates will be loaded in the same " - "order as stores are specified. If no certificates can be loaded " - "from any of the specified stores an error is logged and the " - "OpenSSL library's default CA location is used instead. " - "Store names are typically one or more of: MY, Root, Trust, CA.", - .sdef = "Root", + }, + {_RK_GLOBAL, "opaque", _RK_C_PTR, _RK(opaque), + "Application opaque (set with rd_kafka_conf_set_opaque())"}, + {_RK_GLOBAL, "default_topic_conf", _RK_C_PTR, _RK(topic_conf), + "Default topic configuration for automatically subscribed topics"}, + {_RK_GLOBAL, "internal.termination.signal", _RK_C_INT, _RK(term_sig), + "Signal that librdkafka will use to quickly terminate on " + "rd_kafka_destroy(). If this signal is not set then there will be a " + "delay before rd_kafka_wait_destroyed() returns true " + "as internal threads are timing out their system calls. " + "If this signal is set however the delay will be minimal. " + "The application should mask this signal as an internal " + "signal handler is installed.", + 0, 128, 0}, + {_RK_GLOBAL | _RK_HIGH, "api.version.request", _RK_C_BOOL, + _RK(api_version_request), + "Request broker's supported API versions to adjust functionality to " + "available protocol features. If set to false, or the " + "ApiVersionRequest fails, the fallback version " + "`broker.version.fallback` will be used. " + "**NOTE**: Depends on broker version >=0.10.0. If the request is not " + "supported by (an older) broker the `broker.version.fallback` fallback is " + "used.", + 0, 1, 1}, + {_RK_GLOBAL, "api.version.request.timeout.ms", _RK_C_INT, + _RK(api_version_request_timeout_ms), + "Timeout for broker API version requests.", 1, 5 * 60 * 1000, 10 * 1000}, + {_RK_GLOBAL | _RK_MED, "api.version.fallback.ms", _RK_C_INT, + _RK(api_version_fallback_ms), + "Dictates how long the `broker.version.fallback` fallback is used " + "in the case the ApiVersionRequest fails. " + "**NOTE**: The ApiVersionRequest is only issued when a new connection " + "to the broker is made (such as after an upgrade).", + 0, 86400 * 7 * 1000, 0}, + + {_RK_GLOBAL | _RK_MED, "broker.version.fallback", _RK_C_STR, + _RK(broker_version_fallback), + "Older broker versions (before 0.10.0) provide no way for a client to " + "query " + "for supported protocol features " + "(ApiVersionRequest, see `api.version.request`) making it impossible " + "for the client to know what features it may use. " + "As a workaround a user may set this property to the expected broker " + "version and the client will automatically adjust its feature set " + "accordingly if the ApiVersionRequest fails (or is disabled). " + "The fallback broker version will be used for `api.version.fallback.ms`. " + "Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. " + "Any other value >= 0.10, such as 0.10.2.1, " + "enables ApiVersionRequests.", + .sdef = "0.10.0", .validate = rd_kafka_conf_validate_broker_version}, + + /* Security related global properties */ + {_RK_GLOBAL | _RK_HIGH, "security.protocol", _RK_C_S2I, + _RK(security_protocol), "Protocol used to communicate with brokers.", + .vdef = RD_KAFKA_PROTO_PLAINTEXT, + .s2i = {{RD_KAFKA_PROTO_PLAINTEXT, "plaintext"}, + {RD_KAFKA_PROTO_SSL, "ssl", _UNSUPPORTED_SSL}, + {RD_KAFKA_PROTO_SASL_PLAINTEXT, "sasl_plaintext"}, + {RD_KAFKA_PROTO_SASL_SSL, "sasl_ssl", _UNSUPPORTED_SSL}, + {0, NULL}}}, + + {_RK_GLOBAL, "ssl.cipher.suites", _RK_C_STR, _RK(ssl.cipher_suites), + "A cipher suite is a named combination of authentication, " + "encryption, MAC and key exchange algorithm used to negotiate the " + "security settings for a network connection using TLS or SSL network " + "protocol. See manual page for `ciphers(1)` and " + "`SSL_CTX_set_cipher_list(3).", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.curves.list", _RK_C_STR, _RK(ssl.curves_list), + "The supported-curves extension in the TLS ClientHello message specifies " + "the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client " + "is willing to have the server use. See manual page for " + "`SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required.", + _UNSUPPORTED_OPENSSL_1_0_2}, + {_RK_GLOBAL, "ssl.sigalgs.list", _RK_C_STR, _RK(ssl.sigalgs_list), + "The client uses the TLS ClientHello signature_algorithms extension " + "to indicate to the server which signature/hash algorithm pairs " + "may be used in digital signatures. See manual page for " + "`SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required.", + _UNSUPPORTED_OPENSSL_1_0_2}, + {_RK_GLOBAL | _RK_SENSITIVE, "ssl.key.location", _RK_C_STR, + _RK(ssl.key_location), + "Path to client's private key (PEM) used for authentication.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL | _RK_SENSITIVE, "ssl.key.password", _RK_C_STR, + _RK(ssl.key_password), + "Private key passphrase (for use with `ssl.key.location` " + "and `set_ssl_cert()`)", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL | _RK_SENSITIVE, "ssl.key.pem", _RK_C_STR, _RK(ssl.key_pem), + "Client's private key string (PEM format) used for authentication.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL | _RK_SENSITIVE, "ssl_key", _RK_C_INTERNAL, _RK(ssl.key), + "Client's private key as set by rd_kafka_conf_set_ssl_cert()", + .dtor = rd_kafka_conf_cert_dtor, .copy = rd_kafka_conf_cert_copy, + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.certificate.location", _RK_C_STR, _RK(ssl.cert_location), + "Path to client's public key (PEM) used for authentication.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.certificate.pem", _RK_C_STR, _RK(ssl.cert_pem), + "Client's public key string (PEM format) used for authentication.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl_certificate", _RK_C_INTERNAL, _RK(ssl.key), + "Client's public key as set by rd_kafka_conf_set_ssl_cert()", + .dtor = rd_kafka_conf_cert_dtor, .copy = rd_kafka_conf_cert_copy, + _UNSUPPORTED_SSL}, + + {_RK_GLOBAL, "ssl.ca.location", _RK_C_STR, _RK(ssl.ca_location), + "File or directory path to CA certificate(s) for verifying " + "the broker's key. " + "Defaults: " + "On Windows the system's CA certificates are automatically looked " + "up in the Windows Root certificate store. " + "On Mac OSX this configuration defaults to `probe`. " + "It is recommended to install openssl using Homebrew, " + "to provide CA certificates. " + "On Linux install the distribution's ca-certificates package. " + "If OpenSSL is statically linked or `ssl.ca.location` is set to " + "`probe` a list of standard paths will be probed and the first one " + "found will be used as the default CA certificate location path. " + "If OpenSSL is dynamically linked the OpenSSL library's default " + "path will be used (see `OPENSSLDIR` in `openssl version -a`).", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL | _RK_SENSITIVE, "ssl.ca.pem", _RK_C_STR, _RK(ssl.ca_pem), + "CA certificate string (PEM format) for verifying the broker's key.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl_ca", _RK_C_INTERNAL, _RK(ssl.ca), + "CA certificate as set by rd_kafka_conf_set_ssl_cert()", + .dtor = rd_kafka_conf_cert_dtor, .copy = rd_kafka_conf_cert_copy, + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.ca.certificate.stores", _RK_C_STR, + _RK(ssl.ca_cert_stores), + "Comma-separated list of Windows Certificate stores to load " + "CA certificates from. Certificates will be loaded in the same " + "order as stores are specified. If no certificates can be loaded " + "from any of the specified stores an error is logged and the " + "OpenSSL library's default CA location is used instead. " + "Store names are typically one or more of: MY, Root, Trust, CA.", + .sdef = "Root", #if !defined(_WIN32) - .unsupported = "configuration only valid on Windows" + .unsupported = "configuration only valid on Windows" #endif - }, - - { _RK_GLOBAL, "ssl.crl.location", _RK_C_STR, - _RK(ssl.crl_location), - "Path to CRL for verifying broker's certificate validity.", - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL, "ssl.keystore.location", _RK_C_STR, - _RK(ssl.keystore_location), - "Path to client's keystore (PKCS#12) used for authentication.", - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL|_RK_SENSITIVE, "ssl.keystore.password", _RK_C_STR, - _RK(ssl.keystore_password), - "Client's keystore (PKCS#12) password.", - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL, "ssl.engine.location", _RK_C_STR, - _RK(ssl.engine_location), - "Path to OpenSSL engine library. OpenSSL >= 1.1.0 required.", - _UNSUPPORTED_OPENSSL_1_1_0 - }, - { _RK_GLOBAL, "ssl.engine.id", _RK_C_STR, - _RK(ssl.engine_id), - "OpenSSL engine id is the name used for loading engine.", - .sdef = "dynamic", - _UNSUPPORTED_OPENSSL_1_1_0 - }, - { _RK_GLOBAL, "ssl_engine_callback_data", _RK_C_PTR, - _RK(ssl.engine_callback_data), - "OpenSSL engine callback data (set " - "with rd_kafka_conf_set_engine_callback_data()).", - _UNSUPPORTED_OPENSSL_1_1_0 - }, - { _RK_GLOBAL, "enable.ssl.certificate.verification", _RK_C_BOOL, - _RK(ssl.enable_verify), - "Enable OpenSSL's builtin broker (server) certificate verification. " - "This verification can be extended by the application by " - "implementing a certificate_verify_cb.", - 0, 1, 1, - _UNSUPPORTED_SSL - }, - { _RK_GLOBAL, "ssl.endpoint.identification.algorithm", _RK_C_S2I, - _RK(ssl.endpoint_identification), - "Endpoint identification algorithm to validate broker " - "hostname using broker certificate. " - "https - Server (broker) hostname verification as " - "specified in RFC2818. " - "none - No endpoint verification. " - "OpenSSL >= 1.0.2 required.", - .vdef = RD_KAFKA_SSL_ENDPOINT_ID_NONE, - .s2i = { - { RD_KAFKA_SSL_ENDPOINT_ID_NONE, "none" }, - { RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, "https" } - }, - _UNSUPPORTED_OPENSSL_1_0_2 - }, - { _RK_GLOBAL, "ssl.certificate.verify_cb", _RK_C_PTR, - _RK(ssl.cert_verify_cb), - "Callback to verify the broker certificate chain.", - _UNSUPPORTED_SSL - }, - - /* Point user in the right direction if they try to apply - * Java client SSL / JAAS properties. */ - { _RK_GLOBAL, "ssl.truststore.location", _RK_C_INVALID, - _RK(dummy), - "Java TrustStores are not supported, use `ssl.ca.location` " - "and a certificate file instead. " - "See https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka " - "for more information." - }, - { _RK_GLOBAL, "sasl.jaas.config", _RK_C_INVALID, - _RK(dummy), - "Java JAAS configuration is not supported, see " - "https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka " - "for more information." - }, - - {_RK_GLOBAL|_RK_HIGH, "sasl.mechanisms", _RK_C_STR, - _RK(sasl.mechanisms), - "SASL mechanism to use for authentication. " - "Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. " - "**NOTE**: Despite the name only one mechanism must be configured.", - .sdef = "GSSAPI", - .validate = rd_kafka_conf_validate_single }, - {_RK_GLOBAL|_RK_HIGH, "sasl.mechanism", _RK_C_ALIAS, - .sdef = "sasl.mechanisms" }, - { _RK_GLOBAL, "sasl.kerberos.service.name", _RK_C_STR, - _RK(sasl.service_name), - "Kerberos principal name that Kafka runs as, " - "not including /hostname@REALM", - .sdef = "kafka" }, - { _RK_GLOBAL, "sasl.kerberos.principal", _RK_C_STR, - _RK(sasl.principal), - "This client's Kerberos principal name. " - "(Not supported on Windows, will use the logon user's principal).", - .sdef = "kafkaclient" }, - { _RK_GLOBAL, "sasl.kerberos.kinit.cmd", _RK_C_STR, - _RK(sasl.kinit_cmd), - "Shell command to refresh or acquire the client's Kerberos ticket. " - "This command is executed on client creation and every " - "sasl.kerberos.min.time.before.relogin (0=disable). " - "%{config.prop.name} is replaced by corresponding config " - "object value.", - .sdef = - /* First attempt to refresh, else acquire. */ - "kinit -R -t \"%{sasl.kerberos.keytab}\" " - "-k %{sasl.kerberos.principal} || " - "kinit -t \"%{sasl.kerberos.keytab}\" -k %{sasl.kerberos.principal}", - _UNSUPPORTED_WIN32_GSSAPI - }, - { _RK_GLOBAL, "sasl.kerberos.keytab", _RK_C_STR, - _RK(sasl.keytab), - "Path to Kerberos keytab file. " - "This configuration property is only used as a variable in " - "`sasl.kerberos.kinit.cmd` as " - "` ... -t \"%{sasl.kerberos.keytab}\"`.", - _UNSUPPORTED_WIN32_GSSAPI - }, - { _RK_GLOBAL, "sasl.kerberos.min.time.before.relogin", _RK_C_INT, - _RK(sasl.relogin_min_time), - "Minimum time in milliseconds between key refresh attempts. " - "Disable automatic key refresh by setting this property to 0.", - 0, 86400*1000, 60*1000, - _UNSUPPORTED_WIN32_GSSAPI - }, - { _RK_GLOBAL|_RK_HIGH|_RK_SENSITIVE, "sasl.username", _RK_C_STR, - _RK(sasl.username), - "SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms" }, - { _RK_GLOBAL|_RK_HIGH|_RK_SENSITIVE, "sasl.password", _RK_C_STR, - _RK(sasl.password), - "SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism" }, - { _RK_GLOBAL|_RK_SENSITIVE, "sasl.oauthbearer.config", _RK_C_STR, - _RK(sasl.oauthbearer_config), - "SASL/OAUTHBEARER configuration. The format is " - "implementation-dependent and must be parsed accordingly. The " - "default unsecured token implementation (see " - "https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes " - "space-separated name=value pairs with valid names including " - "principalClaimName, principal, scopeClaimName, scope, and " - "lifeSeconds. The default value for principalClaimName is \"sub\", " - "the default value for scopeClaimName is \"scope\", and the default " - "value for lifeSeconds is 3600. The scope value is CSV format with " - "the default value being no/empty scope. For example: " - "`principalClaimName=azp principal=admin scopeClaimName=roles " - "scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions " - "can be communicated to the broker via " - "`extension_NAME=value`. For example: " - "`principal=admin extension_traceId=123`", - _UNSUPPORTED_OAUTHBEARER - }, - { _RK_GLOBAL, "enable.sasl.oauthbearer.unsecure.jwt", _RK_C_BOOL, - _RK(sasl.enable_oauthbearer_unsecure_jwt), - "Enable the builtin unsecure JWT OAUTHBEARER token handler " - "if no oauthbearer_refresh_cb has been set. " - "This builtin handler should only be used for development " - "or testing, and not in production.", - 0, 1, 0, - _UNSUPPORTED_OAUTHBEARER - }, - { _RK_GLOBAL, "oauthbearer_token_refresh_cb", _RK_C_PTR, - _RK(sasl.oauthbearer.token_refresh_cb), - "SASL/OAUTHBEARER token refresh callback (set with " - "rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by " - "rd_kafka_poll(), et.al. " - "This callback will be triggered when it is time to refresh " - "the client's OAUTHBEARER token. " - "Also see `rd_kafka_conf_enable_sasl_queue()`.", - _UNSUPPORTED_OAUTHBEARER - }, - { _RK_GLOBAL|_RK_HIDDEN, "enable_sasl_queue", _RK_C_BOOL, - _RK(sasl.enable_callback_queue), - "Enable the SASL callback queue " - "(set with rd_kafka_conf_enable_sasl_queue()).", - 0, 1, 0, - }, - { _RK_GLOBAL, "sasl.oauthbearer.method", _RK_C_S2I, - _RK(sasl.oauthbearer.method), - "Set to \"default\" or \"oidc\" to control which login method " - "is used. If set it to \"oidc\", OAuth/OIDC login method will " - "be used. " - "sasl.oauthbearer.client.id, sasl.oauthbearer.client.secret, " - "sasl.oauthbearer.scope, sasl.oauthbearer.extensions, " - "and sasl.oauthbearer.token.endpoint.url are needed if " - "sasl.oauthbearer.method is set to \"oidc\".", - .vdef = RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT, - .s2i = { - { RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT, "default" }, - { RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC, "oidc" } - }, - _UNSUPPORTED_OIDC - }, - { _RK_GLOBAL, "sasl.oauthbearer.client.id", _RK_C_STR, - _RK(sasl.oauthbearer.client_id), - "It's a public identifier for the application. " - "It must be unique across all clients that the " - "authorization server handles. " - "This is only used when sasl.oauthbearer.method is set to oidc.", - _UNSUPPORTED_OIDC - }, - { _RK_GLOBAL, "sasl.oauthbearer.client.secret", _RK_C_STR, - _RK(sasl.oauthbearer.client_secret), - "A client secret only known to the application and the " - "authorization server. This should be a sufficiently random string " - "that are not guessable. " - "This is only used when sasl.oauthbearer.method is set to \"oidc\".", - _UNSUPPORTED_OIDC - }, - { _RK_GLOBAL, "sasl.oauthbearer.scope", _RK_C_STR, - _RK(sasl.oauthbearer.scope), - "Client use this to specify the scope of the access request to the " - "broker. " - "This is only used when sasl.oauthbearer.method is set to \"oidc\".", - _UNSUPPORTED_OIDC - }, - { _RK_GLOBAL, "sasl.oauthbearer.extensions", _RK_C_STR, - _RK(sasl.oauthbearer.extensions_str), - "Allow additional information to be provided to the broker. " - "It's comma-separated list of key=value pairs. " - "The example of the input is " - "\"supportFeatureX=true,organizationId=sales-emea\"." - " This is only used when sasl.oauthbearer.method is set " - "to \"oidc\".", - _UNSUPPORTED_OIDC - }, - { _RK_GLOBAL, "sasl.oauthbearer.token.endpoint.url", _RK_C_STR, - _RK(sasl.oauthbearer.token_endpoint_url), - "OAUTH issuer token endpoint HTTP(S) URI used to retrieve the " - "token. " - "This is only used when sasl.oauthbearer.method is set to \"oidc\".", - _UNSUPPORTED_OIDC - }, - - /* Plugins */ - { _RK_GLOBAL, "plugin.library.paths", _RK_C_STR, - _RK(plugin_paths), - "List of plugin libraries to load (; separated). " - "The library search path is platform dependent (see dlopen(3) for " - "Unix and LoadLibrary() for Windows). If no filename extension is " - "specified the platform-specific extension (such as .dll or .so) " - "will be appended automatically.", + }, + + {_RK_GLOBAL, "ssl.crl.location", _RK_C_STR, _RK(ssl.crl_location), + "Path to CRL for verifying broker's certificate validity.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.keystore.location", _RK_C_STR, _RK(ssl.keystore_location), + "Path to client's keystore (PKCS#12) used for authentication.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL | _RK_SENSITIVE, "ssl.keystore.password", _RK_C_STR, + _RK(ssl.keystore_password), "Client's keystore (PKCS#12) password.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.engine.location", _RK_C_STR, _RK(ssl.engine_location), + "Path to OpenSSL engine library. OpenSSL >= 1.1.0 required.", + _UNSUPPORTED_OPENSSL_1_1_0}, + {_RK_GLOBAL, "ssl.engine.id", _RK_C_STR, _RK(ssl.engine_id), + "OpenSSL engine id is the name used for loading engine.", + .sdef = "dynamic", _UNSUPPORTED_OPENSSL_1_1_0}, + {_RK_GLOBAL, "ssl_engine_callback_data", _RK_C_PTR, + _RK(ssl.engine_callback_data), + "OpenSSL engine callback data (set " + "with rd_kafka_conf_set_engine_callback_data()).", + _UNSUPPORTED_OPENSSL_1_1_0}, + {_RK_GLOBAL, "enable.ssl.certificate.verification", _RK_C_BOOL, + _RK(ssl.enable_verify), + "Enable OpenSSL's builtin broker (server) certificate verification. " + "This verification can be extended by the application by " + "implementing a certificate_verify_cb.", + 0, 1, 1, _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.endpoint.identification.algorithm", _RK_C_S2I, + _RK(ssl.endpoint_identification), + "Endpoint identification algorithm to validate broker " + "hostname using broker certificate. " + "https - Server (broker) hostname verification as " + "specified in RFC2818. " + "none - No endpoint verification. " + "OpenSSL >= 1.0.2 required.", + .vdef = RD_KAFKA_SSL_ENDPOINT_ID_NONE, + .s2i = {{RD_KAFKA_SSL_ENDPOINT_ID_NONE, "none"}, + {RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, "https"}}, + _UNSUPPORTED_OPENSSL_1_0_2}, + {_RK_GLOBAL, "ssl.certificate.verify_cb", _RK_C_PTR, + _RK(ssl.cert_verify_cb), + "Callback to verify the broker certificate chain.", _UNSUPPORTED_SSL}, + + /* Point user in the right direction if they try to apply + * Java client SSL / JAAS properties. */ + {_RK_GLOBAL, "ssl.truststore.location", _RK_C_INVALID, _RK(dummy), + "Java TrustStores are not supported, use `ssl.ca.location` " + "and a certificate file instead. " + "See " + "https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka " + "for more information."}, + {_RK_GLOBAL, "sasl.jaas.config", _RK_C_INVALID, _RK(dummy), + "Java JAAS configuration is not supported, see " + "https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka " + "for more information."}, + + {_RK_GLOBAL | _RK_HIGH, "sasl.mechanisms", _RK_C_STR, _RK(sasl.mechanisms), + "SASL mechanism to use for authentication. " + "Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. " + "**NOTE**: Despite the name only one mechanism must be configured.", + .sdef = "GSSAPI", .validate = rd_kafka_conf_validate_single}, + {_RK_GLOBAL | _RK_HIGH, "sasl.mechanism", _RK_C_ALIAS, + .sdef = "sasl.mechanisms"}, + {_RK_GLOBAL, "sasl.kerberos.service.name", _RK_C_STR, + _RK(sasl.service_name), + "Kerberos principal name that Kafka runs as, " + "not including /hostname@REALM", + .sdef = "kafka"}, + {_RK_GLOBAL, "sasl.kerberos.principal", _RK_C_STR, _RK(sasl.principal), + "This client's Kerberos principal name. " + "(Not supported on Windows, will use the logon user's principal).", + .sdef = "kafkaclient"}, + {_RK_GLOBAL, "sasl.kerberos.kinit.cmd", _RK_C_STR, _RK(sasl.kinit_cmd), + "Shell command to refresh or acquire the client's Kerberos ticket. " + "This command is executed on client creation and every " + "sasl.kerberos.min.time.before.relogin (0=disable). " + "%{config.prop.name} is replaced by corresponding config " + "object value.", + .sdef = + /* First attempt to refresh, else acquire. */ + "kinit -R -t \"%{sasl.kerberos.keytab}\" " + "-k %{sasl.kerberos.principal} || " + "kinit -t \"%{sasl.kerberos.keytab}\" -k %{sasl.kerberos.principal}", + _UNSUPPORTED_WIN32_GSSAPI}, + {_RK_GLOBAL, "sasl.kerberos.keytab", _RK_C_STR, _RK(sasl.keytab), + "Path to Kerberos keytab file. " + "This configuration property is only used as a variable in " + "`sasl.kerberos.kinit.cmd` as " + "` ... -t \"%{sasl.kerberos.keytab}\"`.", + _UNSUPPORTED_WIN32_GSSAPI}, + {_RK_GLOBAL, "sasl.kerberos.min.time.before.relogin", _RK_C_INT, + _RK(sasl.relogin_min_time), + "Minimum time in milliseconds between key refresh attempts. " + "Disable automatic key refresh by setting this property to 0.", + 0, 86400 * 1000, 60 * 1000, _UNSUPPORTED_WIN32_GSSAPI}, + {_RK_GLOBAL | _RK_HIGH | _RK_SENSITIVE, "sasl.username", _RK_C_STR, + _RK(sasl.username), + "SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms"}, + {_RK_GLOBAL | _RK_HIGH | _RK_SENSITIVE, "sasl.password", _RK_C_STR, + _RK(sasl.password), + "SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism"}, + {_RK_GLOBAL | _RK_SENSITIVE, "sasl.oauthbearer.config", _RK_C_STR, + _RK(sasl.oauthbearer_config), + "SASL/OAUTHBEARER configuration. The format is " + "implementation-dependent and must be parsed accordingly. The " + "default unsecured token implementation (see " + "https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes " + "space-separated name=value pairs with valid names including " + "principalClaimName, principal, scopeClaimName, scope, and " + "lifeSeconds. The default value for principalClaimName is \"sub\", " + "the default value for scopeClaimName is \"scope\", and the default " + "value for lifeSeconds is 3600. The scope value is CSV format with " + "the default value being no/empty scope. For example: " + "`principalClaimName=azp principal=admin scopeClaimName=roles " + "scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions " + "can be communicated to the broker via " + "`extension_NAME=value`. For example: " + "`principal=admin extension_traceId=123`", + _UNSUPPORTED_OAUTHBEARER}, + {_RK_GLOBAL, "enable.sasl.oauthbearer.unsecure.jwt", _RK_C_BOOL, + _RK(sasl.enable_oauthbearer_unsecure_jwt), + "Enable the builtin unsecure JWT OAUTHBEARER token handler " + "if no oauthbearer_refresh_cb has been set. " + "This builtin handler should only be used for development " + "or testing, and not in production.", + 0, 1, 0, _UNSUPPORTED_OAUTHBEARER}, + {_RK_GLOBAL, "oauthbearer_token_refresh_cb", _RK_C_PTR, + _RK(sasl.oauthbearer.token_refresh_cb), + "SASL/OAUTHBEARER token refresh callback (set with " + "rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by " + "rd_kafka_poll(), et.al. " + "This callback will be triggered when it is time to refresh " + "the client's OAUTHBEARER token. " + "Also see `rd_kafka_conf_enable_sasl_queue()`.", + _UNSUPPORTED_OAUTHBEARER}, + { + _RK_GLOBAL | _RK_HIDDEN, + "enable_sasl_queue", + _RK_C_BOOL, + _RK(sasl.enable_callback_queue), + "Enable the SASL callback queue " + "(set with rd_kafka_conf_enable_sasl_queue()).", + 0, + 1, + 0, + }, + {_RK_GLOBAL, "sasl.oauthbearer.method", _RK_C_S2I, + _RK(sasl.oauthbearer.method), + "Set to \"default\" or \"oidc\" to control which login method " + "is used. If set it to \"oidc\", OAuth/OIDC login method will " + "be used. " + "sasl.oauthbearer.client.id, sasl.oauthbearer.client.secret, " + "sasl.oauthbearer.scope, sasl.oauthbearer.extensions, " + "and sasl.oauthbearer.token.endpoint.url are needed if " + "sasl.oauthbearer.method is set to \"oidc\".", + .vdef = RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT, + .s2i = {{RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT, "default"}, + {RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC, "oidc"}}, + _UNSUPPORTED_OIDC}, + {_RK_GLOBAL, "sasl.oauthbearer.client.id", _RK_C_STR, + _RK(sasl.oauthbearer.client_id), + "It's a public identifier for the application. " + "It must be unique across all clients that the " + "authorization server handles. " + "This is only used when sasl.oauthbearer.method is set to oidc.", + _UNSUPPORTED_OIDC}, + {_RK_GLOBAL, "sasl.oauthbearer.client.secret", _RK_C_STR, + _RK(sasl.oauthbearer.client_secret), + "A client secret only known to the application and the " + "authorization server. This should be a sufficiently random string " + "that are not guessable. " + "This is only used when sasl.oauthbearer.method is set to \"oidc\".", + _UNSUPPORTED_OIDC}, + {_RK_GLOBAL, "sasl.oauthbearer.scope", _RK_C_STR, + _RK(sasl.oauthbearer.scope), + "Client use this to specify the scope of the access request to the " + "broker. " + "This is only used when sasl.oauthbearer.method is set to \"oidc\".", + _UNSUPPORTED_OIDC}, + {_RK_GLOBAL, "sasl.oauthbearer.extensions", _RK_C_STR, + _RK(sasl.oauthbearer.extensions_str), + "Allow additional information to be provided to the broker. " + "It's comma-separated list of key=value pairs. " + "The example of the input is " + "\"supportFeatureX=true,organizationId=sales-emea\"." + " This is only used when sasl.oauthbearer.method is set " + "to \"oidc\".", + _UNSUPPORTED_OIDC}, + {_RK_GLOBAL, "sasl.oauthbearer.token.endpoint.url", _RK_C_STR, + _RK(sasl.oauthbearer.token_endpoint_url), + "OAUTH issuer token endpoint HTTP(S) URI used to retrieve the " + "token. " + "This is only used when sasl.oauthbearer.method is set to \"oidc\".", + _UNSUPPORTED_OIDC}, + + /* Plugins */ + {_RK_GLOBAL, "plugin.library.paths", _RK_C_STR, _RK(plugin_paths), + "List of plugin libraries to load (; separated). " + "The library search path is platform dependent (see dlopen(3) for " + "Unix and LoadLibrary() for Windows). If no filename extension is " + "specified the platform-specific extension (such as .dll or .so) " + "will be appended automatically.", #if WITH_PLUGINS - .set = rd_kafka_plugins_conf_set + .set = rd_kafka_plugins_conf_set #else .unsupported = "libdl/dlopen(3) not available at build time" #endif - }, - - /* Interceptors are added through specific API and not exposed - * as configuration properties. - * The interceptor property must be defined after plugin.library.paths - * so that the plugin libraries are properly loaded before - * interceptors are configured when duplicating configuration objects.*/ - { _RK_GLOBAL, "interceptors", _RK_C_INTERNAL, - _RK(interceptors), - "Interceptors added through rd_kafka_conf_interceptor_add_..() " - "and any configuration handled by interceptors.", - .ctor = rd_kafka_conf_interceptor_ctor, - .dtor = rd_kafka_conf_interceptor_dtor, - .copy = rd_kafka_conf_interceptor_copy }, - - /* Test mocks. */ - { _RK_GLOBAL|_RK_HIDDEN, "test.mock.num.brokers", _RK_C_INT, - _RK(mock.broker_cnt), - "Number of mock brokers to create. " - "This will automatically overwrite `bootstrap.servers` with the " - "mock broker list.", - 0, 10000, 0 }, - - /* Unit test interfaces. - * These are not part of the public API and may change at any time. - * Only to be used by the librdkafka tests. */ - { _RK_GLOBAL|_RK_HIDDEN, "ut_handle_ProduceResponse", _RK_C_PTR, - _RK(ut.handle_ProduceResponse), - "ProduceResponse handler: " - "rd_kafka_resp_err_t (*cb) (rd_kafka_t *rk, " - "int32_t brokerid, uint64_t msgid, rd_kafka_resp_err_t err)" }, - - /* Global consumer group properties */ - { _RK_GLOBAL|_RK_CGRP|_RK_HIGH, "group.id", _RK_C_STR, - _RK(group_id_str), - "Client group id string. All clients sharing the same group.id " - "belong to the same group." }, - { _RK_GLOBAL|_RK_CGRP|_RK_MED, - "group.instance.id", _RK_C_STR, - _RK(group_instance_id), - "Enable static group membership. " - "Static group members are able to leave and rejoin a group " - "within the configured `session.timeout.ms` without prompting a " - "group rebalance. This should be used in combination with a larger " - "`session.timeout.ms` to avoid group rebalances caused by transient " - "unavailability (e.g. process restarts). " - "Requires broker version >= 2.3.0."}, - { _RK_GLOBAL|_RK_CGRP|_RK_MED, "partition.assignment.strategy", - _RK_C_STR, - _RK(partition_assignment_strategy), - "The name of one or more partition assignment strategies. The " - "elected group leader will use a strategy supported by all " - "members of the group to assign partitions to group members. If " - "there is more than one eligible strategy, preference is " - "determined by the order of this list (strategies earlier in the " - "list have higher priority). " - "Cooperative and non-cooperative (eager) strategies must not be " - "mixed. " - "Available strategies: range, roundrobin, cooperative-sticky.", - .sdef = "range,roundrobin" }, - { _RK_GLOBAL|_RK_CGRP|_RK_HIGH, "session.timeout.ms", _RK_C_INT, - _RK(group_session_timeout_ms), - "Client group session and failure detection timeout. " - "The consumer sends periodic heartbeats (heartbeat.interval.ms) " - "to indicate its liveness to the broker. If no hearts are " - "received by the broker for a group member within the " - "session timeout, the broker will remove the consumer from " - "the group and trigger a rebalance. " - "The allowed range is configured with the **broker** configuration " - "properties `group.min.session.timeout.ms` and " - "`group.max.session.timeout.ms`. " - "Also see `max.poll.interval.ms`.", - 1, 3600*1000, 45*1000 }, - { _RK_GLOBAL|_RK_CGRP, "heartbeat.interval.ms", _RK_C_INT, - _RK(group_heartbeat_intvl_ms), - "Group session keepalive heartbeat interval.", - 1, 3600*1000, 3*1000 }, - { _RK_GLOBAL|_RK_CGRP, "group.protocol.type", _RK_C_KSTR, - _RK(group_protocol_type), - "Group protocol type. NOTE: Currently, the only supported group " - "protocol type is `consumer`.", - .sdef = "consumer" }, - { _RK_GLOBAL|_RK_CGRP, "coordinator.query.interval.ms", _RK_C_INT, - _RK(coord_query_intvl_ms), - "How often to query for the current client group coordinator. " - "If the currently assigned coordinator is down the configured " - "query interval will be divided by ten to more quickly recover " - "in case of coordinator reassignment.", - 1, 3600*1000, 10*60*1000 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_HIGH, "max.poll.interval.ms", _RK_C_INT, - _RK(max_poll_interval_ms), - "Maximum allowed time between calls to consume messages " - "(e.g., rd_kafka_consumer_poll()) for high-level consumers. " - "If this interval is exceeded the consumer is considered failed " - "and the group will rebalance in order to reassign the " - "partitions to another consumer group member. " - "Warning: Offset commits may be not possible at this point. " - "Note: It is recommended to set `enable.auto.offset.store=false` " - "for long-time processing applications and then explicitly store " - "offsets (using offsets_store()) *after* message processing, to " - "make sure offsets are not auto-committed prior to processing " - "has finished. " - "The interval is checked two times per second. " - "See KIP-62 for more information.", - 1, 86400*1000, 300000 - }, - - /* Global consumer properties */ - { _RK_GLOBAL|_RK_CONSUMER|_RK_HIGH, "enable.auto.commit", _RK_C_BOOL, - _RK(enable_auto_commit), - "Automatically and periodically commit offsets in the background. " - "Note: setting this to false does not prevent the consumer from " - "fetching previously committed start offsets. To circumvent this " - "behaviour set specific start offsets per partition in the call " - "to assign().", - 0, 1, 1 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "auto.commit.interval.ms", - _RK_C_INT, - _RK(auto_commit_interval_ms), - "The frequency in milliseconds that the consumer offsets " - "are committed (written) to offset storage. (0 = disable). " - "This setting is used by the high-level consumer.", - 0, 86400*1000, 5*1000 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_HIGH, "enable.auto.offset.store", - _RK_C_BOOL, - _RK(enable_auto_offset_store), - "Automatically store offset of last message provided to " - "application. " - "The offset store is an in-memory store of the next offset to " - "(auto-)commit for each partition.", - 0, 1, 1 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "queued.min.messages", _RK_C_INT, - _RK(queued_min_msgs), - "Minimum number of messages per topic+partition " - "librdkafka tries to maintain in the local consumer queue.", - 1, 10000000, 100000 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "queued.max.messages.kbytes", - _RK_C_INT, - _RK(queued_max_msg_kbytes), - "Maximum number of kilobytes of queued pre-fetched messages " - "in the local consumer queue. " - "If using the high-level consumer this setting applies to the " - "single consumer queue, regardless of the number of partitions. " - "When using the legacy simple consumer or when separate " - "partition queues are used this setting applies per partition. " - "This value may be overshot by fetch.message.max.bytes. " - "This property has higher priority than queued.min.messages.", - 1, INT_MAX/1024, 0x10000/*64MB*/ }, - { _RK_GLOBAL|_RK_CONSUMER, "fetch.wait.max.ms", _RK_C_INT, - _RK(fetch_wait_max_ms), - "Maximum time the broker may wait to fill the Fetch response " - "with fetch.min.bytes of messages.", - 0, 300*1000, 500 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "fetch.message.max.bytes", - _RK_C_INT, - _RK(fetch_msg_max_bytes), - "Initial maximum number of bytes per topic+partition to request when " - "fetching messages from the broker. " - "If the client encounters a message larger than this value " - "it will gradually try to increase it until the " - "entire message can be fetched.", - 1, 1000000000, 1024*1024 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "max.partition.fetch.bytes", - _RK_C_ALIAS, - .sdef = "fetch.message.max.bytes" }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "fetch.max.bytes", _RK_C_INT, - _RK(fetch_max_bytes), - "Maximum amount of data the broker shall return for a Fetch request. " - "Messages are fetched in batches by the consumer and if the first " - "message batch in the first non-empty partition of the Fetch request " - "is larger than this value, then the message batch will still be " - "returned to ensure the consumer can make progress. " - "The maximum message batch size accepted by the broker is defined " - "via `message.max.bytes` (broker config) or " - "`max.message.bytes` (broker topic config). " - "`fetch.max.bytes` is automatically adjusted upwards to be " - "at least `message.max.bytes` (consumer config).", - 0, INT_MAX-512, 50*1024*1024 /* 50MB */ }, - { _RK_GLOBAL|_RK_CONSUMER, "fetch.min.bytes", _RK_C_INT, - _RK(fetch_min_bytes), - "Minimum number of bytes the broker responds with. " - "If fetch.wait.max.ms expires the accumulated data will " - "be sent to the client regardless of this setting.", - 1, 100000000, 1 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "fetch.error.backoff.ms", _RK_C_INT, - _RK(fetch_error_backoff_ms), - "How long to postpone the next fetch request for a " - "topic+partition in case of a fetch error.", - 0, 300*1000, 500 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_DEPRECATED, "offset.store.method", - _RK_C_S2I, - _RK(offset_store_method), - "Offset commit store method: " - "'file' - DEPRECATED: local file store (offset.store.path, et.al), " - "'broker' - broker commit store " - "(requires Apache Kafka 0.8.2 or later on the broker).", - .vdef = RD_KAFKA_OFFSET_METHOD_BROKER, - .s2i = { - { RD_KAFKA_OFFSET_METHOD_NONE, "none" }, - { RD_KAFKA_OFFSET_METHOD_FILE, "file" }, - { RD_KAFKA_OFFSET_METHOD_BROKER, "broker" } - } - }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_HIGH, "isolation.level", - _RK_C_S2I, - _RK(isolation_level), - "Controls how to read messages written transactionally: " - "`read_committed` - only return transactional messages which have " - "been committed. `read_uncommitted` - return all messages, even " - "transactional messages which have been aborted.", - .vdef = RD_KAFKA_READ_COMMITTED, - .s2i = { - { RD_KAFKA_READ_UNCOMMITTED, "read_uncommitted" }, - { RD_KAFKA_READ_COMMITTED, "read_committed" } - } - }, - { _RK_GLOBAL|_RK_CONSUMER, "consume_cb", _RK_C_PTR, - _RK(consume_cb), - "Message consume callback (set with rd_kafka_conf_set_consume_cb())"}, - { _RK_GLOBAL|_RK_CONSUMER, "rebalance_cb", _RK_C_PTR, - _RK(rebalance_cb), - "Called after consumer group has been rebalanced " - "(set with rd_kafka_conf_set_rebalance_cb())" }, - { _RK_GLOBAL|_RK_CONSUMER, "offset_commit_cb", _RK_C_PTR, - _RK(offset_commit_cb), - "Offset commit result propagation callback. " - "(set with rd_kafka_conf_set_offset_commit_cb())" }, - { _RK_GLOBAL|_RK_CONSUMER, "enable.partition.eof", _RK_C_BOOL, - _RK(enable_partition_eof), - "Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the " - "consumer reaches the end of a partition.", - 0, 1, 0 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "check.crcs", _RK_C_BOOL, - _RK(check_crcs), - "Verify CRC32 of consumed messages, ensuring no on-the-wire or " - "on-disk corruption to the messages occurred. This check comes " - "at slightly increased CPU usage.", - 0, 1, 0 }, - { _RK_GLOBAL|_RK_CONSUMER, "allow.auto.create.topics", _RK_C_BOOL, - _RK(allow_auto_create_topics), - "Allow automatic topic creation on the broker when subscribing to " - "or assigning non-existent topics. " - "The broker must also be configured with " - "`auto.create.topics.enable=true` for this configuraiton to " - "take effect. " - "Note: The default value (false) is different from the " - "Java consumer (true). " - "Requires broker version >= 0.11.0.0, for older broker versions " - "only the broker configuration applies.", - 0, 1, 0 }, - { _RK_GLOBAL, "client.rack", _RK_C_KSTR, - _RK(client_rack), - "A rack identifier for this client. This can be any string value " - "which indicates where this client is physically located. It " - "corresponds with the broker config `broker.rack`.", - .sdef = "" }, - - /* Global producer properties */ - { _RK_GLOBAL|_RK_PRODUCER|_RK_HIGH, "transactional.id", _RK_C_STR, - _RK(eos.transactional_id), - "Enables the transactional producer. " - "The transactional.id is used to identify the same transactional " - "producer instance across process restarts. " - "It allows the producer to guarantee that transactions corresponding " - "to earlier instances of the same producer have been finalized " - "prior to starting any new transactions, and that any " - "zombie instances are fenced off. " - "If no transactional.id is provided, then the producer is limited " - "to idempotent delivery (if enable.idempotence is set). " - "Requires broker version >= 0.11.0." }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_MED, "transaction.timeout.ms", _RK_C_INT, - _RK(eos.transaction_timeout_ms), - "The maximum amount of time in milliseconds that the transaction " - "coordinator will wait for a transaction status update from the " - "producer before proactively aborting the ongoing transaction. " - "If this value is larger than the `transaction.max.timeout.ms` " - "setting in the broker, the init_transactions() call will fail with " - "ERR_INVALID_TRANSACTION_TIMEOUT. " - "The transaction timeout automatically adjusts " - "`message.timeout.ms` and `socket.timeout.ms`, unless explicitly " - "configured in which case they must not exceed the " - "transaction timeout (`socket.timeout.ms` must be at least 100ms " - "lower than `transaction.timeout.ms`). " - "This is also the default timeout value if no timeout (-1) is " - "supplied to the transactional API methods.", - 1000, INT_MAX, 60000 }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_HIGH, "enable.idempotence", _RK_C_BOOL, - _RK(eos.idempotence), - "When set to `true`, the producer will ensure that messages are " - "successfully produced exactly once and in the original produce " - "order. " - "The following configuration properties are adjusted automatically " - "(if not modified by the user) when idempotence is enabled: " - "`max.in.flight.requests.per.connection=" - RD_KAFKA_IDEMP_MAX_INFLIGHT_STR "` (must be less than or " - "equal to " RD_KAFKA_IDEMP_MAX_INFLIGHT_STR "), `retries=INT32_MAX` " - "(must be greater than 0), `acks=all`, `queuing.strategy=fifo`. " - "Producer instantation will fail if user-supplied configuration " - "is incompatible.", - 0, 1, 0 }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_EXPERIMENTAL, "enable.gapless.guarantee", - _RK_C_BOOL, - _RK(eos.gapless), - "When set to `true`, any error that could result in a gap " - "in the produced message series when a batch of messages fails, " - "will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop " - "the producer. " - "Messages failing due to `message.timeout.ms` are not covered " - "by this guarantee. " - "Requires `enable.idempotence=true`.", - 0, 1, 0 }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_HIGH, "queue.buffering.max.messages", - _RK_C_INT, - _RK(queue_buffering_max_msgs), - "Maximum number of messages allowed on the producer queue. " - "This queue is shared by all topics and partitions.", - 1, 10000000, 100000 }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_HIGH, "queue.buffering.max.kbytes", - _RK_C_INT, - _RK(queue_buffering_max_kbytes), - "Maximum total message size sum allowed on the producer queue. " - "This queue is shared by all topics and partitions. " - "This property has higher priority than queue.buffering.max.messages.", - 1, INT_MAX, 0x100000/*1GB*/ }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_HIGH, "queue.buffering.max.ms", - _RK_C_DBL, - _RK(buffering_max_ms_dbl), - "Delay in milliseconds to wait for messages in the producer queue " - "to accumulate before constructing message batches (MessageSets) to " - "transmit to brokers. " - "A higher value allows larger and more effective " - "(less overhead, improved compression) batches of messages to " - "accumulate at the expense of increased message delivery latency.", - .dmin = 0, .dmax = 900.0*1000.0, .ddef = 5.0 }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_HIGH, "linger.ms", _RK_C_ALIAS, - .sdef = "queue.buffering.max.ms" }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_HIGH, "message.send.max.retries", - _RK_C_INT, - _RK(max_retries), - "How many times to retry sending a failing Message. " - "**Note:** retrying may cause reordering unless " - "`enable.idempotence` is set to true.", - 0, INT32_MAX, INT32_MAX }, - { _RK_GLOBAL | _RK_PRODUCER, "retries", _RK_C_ALIAS, - .sdef = "message.send.max.retries" }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_MED, "retry.backoff.ms", _RK_C_INT, - _RK(retry_backoff_ms), - "The backoff time in milliseconds before retrying a protocol request.", - 1, 300*1000, 100 }, - - { _RK_GLOBAL|_RK_PRODUCER, "queue.buffering.backpressure.threshold", - _RK_C_INT, _RK(queue_backpressure_thres), - "The threshold of outstanding not yet transmitted broker requests " - "needed to backpressure the producer's message accumulator. " - "If the number of not yet transmitted requests equals or exceeds " - "this number, produce request creation that would have otherwise " - "been triggered (for example, in accordance with linger.ms) will be " - "delayed. A lower number yields larger and more effective batches. " - "A higher value can improve latency when using compression on slow " - "machines.", - 1, 1000000, 1 }, - - { _RK_GLOBAL|_RK_PRODUCER|_RK_MED, "compression.codec", _RK_C_S2I, - _RK(compression_codec), - "compression codec to use for compressing message sets. " - "This is the default value for all topics, may be overridden by " - "the topic configuration property `compression.codec`. ", - .vdef = RD_KAFKA_COMPRESSION_NONE, - .s2i = { - { RD_KAFKA_COMPRESSION_NONE, "none" }, - { RD_KAFKA_COMPRESSION_GZIP, "gzip", - _UNSUPPORTED_ZLIB }, - { RD_KAFKA_COMPRESSION_SNAPPY, "snappy", - _UNSUPPORTED_SNAPPY }, - { RD_KAFKA_COMPRESSION_LZ4, "lz4" }, - { RD_KAFKA_COMPRESSION_ZSTD, "zstd", - _UNSUPPORTED_ZSTD }, - { 0 } - } - }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_MED, "compression.type", _RK_C_ALIAS, - .sdef = "compression.codec" }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_MED, "batch.num.messages", _RK_C_INT, - _RK(batch_num_messages), - "Maximum number of messages batched in one MessageSet. " - "The total MessageSet size is also limited by batch.size and " - "message.max.bytes.", - 1, 1000000, 10000 }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_MED, "batch.size", _RK_C_INT, - _RK(batch_size), - "Maximum size (in bytes) of all messages batched in one MessageSet, " - "including protocol framing overhead. " - "This limit is applied after the first message has been added " - "to the batch, regardless of the first message's size, this is to " - "ensure that messages that exceed batch.size are produced. " - "The total MessageSet size is also limited by batch.num.messages and " - "message.max.bytes.", - 1, INT_MAX, 1000000 }, - { _RK_GLOBAL|_RK_PRODUCER, "delivery.report.only.error", _RK_C_BOOL, - _RK(dr_err_only), - "Only provide delivery reports for failed messages.", - 0, 1, 0 }, - { _RK_GLOBAL|_RK_PRODUCER, "dr_cb", _RK_C_PTR, - _RK(dr_cb), - "Delivery report callback (set with rd_kafka_conf_set_dr_cb())" }, - { _RK_GLOBAL|_RK_PRODUCER, "dr_msg_cb", _RK_C_PTR, - _RK(dr_msg_cb), - "Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())" }, - { _RK_GLOBAL|_RK_PRODUCER, "sticky.partitioning.linger.ms", _RK_C_INT, - _RK(sticky_partition_linger_ms), - "Delay in milliseconds to wait to assign new sticky partitions for " - "each topic. " - "By default, set to double the time of linger.ms. To disable sticky " - "behavior, set to 0. " - "This behavior affects messages with the key NULL in all cases, and " - "messages with key lengths of zero when the consistent_random " - "partitioner is in use. " - "These messages would otherwise be assigned randomly. " - "A higher value allows for more effective batching of these " - "messages.", - 0, 900000, 10 }, - - - /* - * Topic properties - */ - - /* Topic producer properties */ - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "request.required.acks", _RK_C_INT, - _RKT(required_acks), - "This field indicates the number of acknowledgements the leader " - "broker must receive from ISR brokers before responding to the " - "request: " - "*0*=Broker does not send any response/ack to client, " - "*-1* or *all*=Broker will block until message is committed by all " - "in sync replicas (ISRs). If there are less than " - "`min.insync.replicas` (broker configuration) in the ISR set the " - "produce request will fail.", - -1, 1000, -1, - .s2i = { - { -1, "all" }, - } - }, - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "acks", _RK_C_ALIAS, - .sdef = "request.required.acks" }, - - { _RK_TOPIC|_RK_PRODUCER|_RK_MED, "request.timeout.ms", _RK_C_INT, - _RKT(request_timeout_ms), - "The ack timeout of the producer request in milliseconds. " - "This value is only enforced by the broker and relies " - "on `request.required.acks` being != 0.", - 1, 900*1000, 30*1000 }, - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "message.timeout.ms", _RK_C_INT, - _RKT(message_timeout_ms), - "Local message timeout. " - "This value is only enforced locally and limits the time a " - "produced message waits for successful delivery. " - "A time of 0 is infinite. " - "This is the maximum time librdkafka may use to deliver a message " - "(including retries). Delivery error occurs when either the retry " - "count or the message timeout are exceeded. " - "The message timeout is automatically adjusted to " - "`transaction.timeout.ms` if `transactional.id` is configured.", - 0, INT32_MAX, 300*1000 }, - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "delivery.timeout.ms", _RK_C_ALIAS, - .sdef = "message.timeout.ms" }, - { _RK_TOPIC|_RK_PRODUCER|_RK_DEPRECATED|_RK_EXPERIMENTAL, - "queuing.strategy", _RK_C_S2I, - _RKT(queuing_strategy), - "Producer queuing strategy. FIFO preserves produce ordering, " - "while LIFO prioritizes new messages.", - .vdef = 0, - .s2i = { - { RD_KAFKA_QUEUE_FIFO, "fifo" }, - { RD_KAFKA_QUEUE_LIFO, "lifo" } - } - }, - { _RK_TOPIC|_RK_PRODUCER|_RK_DEPRECATED, - "produce.offset.report", _RK_C_BOOL, - _RKT(produce_offset_report), - "No longer used.", - 0, 1, 0 }, - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "partitioner", _RK_C_STR, - _RKT(partitioner_str), - "Partitioner: " - "`random` - random distribution, " - "`consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), " - "`consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), " - "`murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), " - "`murmur2_random` - Java Producer compatible Murmur2 hash of key " - "(NULL keys are randomly partitioned. This is functionally equivalent " - "to the default partitioner in the Java Producer.), " - "`fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), " - "`fnv1a_random` - FNV-1a hash of key (NULL keys are randomly partitioned).", - .sdef = "consistent_random", - .validate = rd_kafka_conf_validate_partitioner }, - { _RK_TOPIC|_RK_PRODUCER, "partitioner_cb", _RK_C_PTR, - _RKT(partitioner), - "Custom partitioner callback " - "(set with rd_kafka_topic_conf_set_partitioner_cb())" }, - { _RK_TOPIC|_RK_PRODUCER|_RK_DEPRECATED|_RK_EXPERIMENTAL, - "msg_order_cmp", _RK_C_PTR, - _RKT(msg_order_cmp), - "Message queue ordering comparator " - "(set with rd_kafka_topic_conf_set_msg_order_cmp()). " - "Also see `queuing.strategy`." }, - { _RK_TOPIC, "opaque", _RK_C_PTR, - _RKT(opaque), - "Application opaque (set with rd_kafka_topic_conf_set_opaque())" }, - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "compression.codec", _RK_C_S2I, - _RKT(compression_codec), - "Compression codec to use for compressing message sets. " - "inherit = inherit global compression.codec configuration.", - .vdef = RD_KAFKA_COMPRESSION_INHERIT, - .s2i = { - { RD_KAFKA_COMPRESSION_NONE, "none" }, - { RD_KAFKA_COMPRESSION_GZIP, "gzip", - _UNSUPPORTED_ZLIB }, - { RD_KAFKA_COMPRESSION_SNAPPY, "snappy", - _UNSUPPORTED_SNAPPY }, - { RD_KAFKA_COMPRESSION_LZ4, "lz4" }, - { RD_KAFKA_COMPRESSION_ZSTD, "zstd", - _UNSUPPORTED_ZSTD }, - { RD_KAFKA_COMPRESSION_INHERIT, "inherit" }, - { 0 } - } - }, - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "compression.type", _RK_C_ALIAS, - .sdef = "compression.codec" }, - { _RK_TOPIC|_RK_PRODUCER|_RK_MED, "compression.level", _RK_C_INT, - _RKT(compression_level), - "Compression level parameter for algorithm selected by configuration " - "property `compression.codec`. Higher values will result in better " - "compression at the cost of more CPU usage. Usable range is " - "algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; " - "-1 = codec-dependent default compression level.", - RD_KAFKA_COMPLEVEL_MIN, - RD_KAFKA_COMPLEVEL_MAX, - RD_KAFKA_COMPLEVEL_DEFAULT }, - - - /* Topic consumer properties */ - { _RK_TOPIC|_RK_CONSUMER|_RK_DEPRECATED, "auto.commit.enable", - _RK_C_BOOL, - _RKT(auto_commit), - "[**LEGACY PROPERTY:** This property is used by the simple legacy " - "consumer only. When using the high-level KafkaConsumer, the global " - "`enable.auto.commit` property must be used instead]. " - "If true, periodically commit offset of the last message handed " - "to the application. This committed offset will be used when the " - "process restarts to pick up where it left off. " - "If false, the application will have to call " - "`rd_kafka_offset_store()` to store an offset (optional). " - "Offsets will be written to broker or local file according to " - "offset.store.method.", - 0, 1, 1 }, - { _RK_TOPIC|_RK_CONSUMER, "enable.auto.commit", _RK_C_ALIAS, - .sdef = "auto.commit.enable" }, - { _RK_TOPIC|_RK_CONSUMER|_RK_HIGH, "auto.commit.interval.ms", - _RK_C_INT, - _RKT(auto_commit_interval_ms), - "[**LEGACY PROPERTY:** This setting is used by the simple legacy " - "consumer only. When using the high-level KafkaConsumer, the " - "global `auto.commit.interval.ms` property must be used instead]. " - "The frequency in milliseconds that the consumer offsets " - "are committed (written) to offset storage.", - 10, 86400*1000, 60*1000 }, - { _RK_TOPIC|_RK_CONSUMER|_RK_HIGH, "auto.offset.reset", _RK_C_S2I, - _RKT(auto_offset_reset), - "Action to take when there is no initial offset in offset store " - "or the desired offset is out of range: " - "'smallest','earliest' - automatically reset the offset to the smallest offset, " - "'largest','latest' - automatically reset the offset to the largest offset, " - "'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is " - "retrieved by consuming messages and checking 'message->err'.", - .vdef = RD_KAFKA_OFFSET_END, - .s2i = { - { RD_KAFKA_OFFSET_BEGINNING, "smallest" }, - { RD_KAFKA_OFFSET_BEGINNING, "earliest" }, - { RD_KAFKA_OFFSET_BEGINNING, "beginning" }, - { RD_KAFKA_OFFSET_END, "largest" }, - { RD_KAFKA_OFFSET_END, "latest" }, - { RD_KAFKA_OFFSET_END, "end" }, - { RD_KAFKA_OFFSET_INVALID, "error" }, - } - }, - { _RK_TOPIC|_RK_CONSUMER|_RK_DEPRECATED, "offset.store.path", - _RK_C_STR, - _RKT(offset_store_path), - "Path to local file for storing offsets. If the path is a directory " - "a filename will be automatically generated in that directory based " - "on the topic and partition. " - "File-based offset storage will be removed in a future version.", - .sdef = "." }, - - { _RK_TOPIC|_RK_CONSUMER|_RK_DEPRECATED, - "offset.store.sync.interval.ms", _RK_C_INT, - _RKT(offset_store_sync_interval_ms), - "fsync() interval for the offset file, in milliseconds. " - "Use -1 to disable syncing, and 0 for immediate sync after " - "each write. " - "File-based offset storage will be removed in a future version.", - -1, 86400*1000, -1 }, - - { _RK_TOPIC|_RK_CONSUMER|_RK_DEPRECATED, "offset.store.method", - _RK_C_S2I, - _RKT(offset_store_method), - "Offset commit store method: " - "'file' - DEPRECATED: local file store (offset.store.path, et.al), " - "'broker' - broker commit store " - "(requires \"group.id\" to be configured and " - "Apache Kafka 0.8.2 or later on the broker.).", - .vdef = RD_KAFKA_OFFSET_METHOD_BROKER, - .s2i = { - { RD_KAFKA_OFFSET_METHOD_FILE, "file" }, - { RD_KAFKA_OFFSET_METHOD_BROKER, "broker" } - } - }, - - { _RK_TOPIC|_RK_CONSUMER, "consume.callback.max.messages", _RK_C_INT, - _RKT(consume_callback_max_msgs), - "Maximum number of messages to dispatch in " - "one `rd_kafka_consume_callback*()` call (0 = unlimited)", - 0, 1000000, 0 }, - - { 0, /* End */ } -}; + }, + + /* Interceptors are added through specific API and not exposed + * as configuration properties. + * The interceptor property must be defined after plugin.library.paths + * so that the plugin libraries are properly loaded before + * interceptors are configured when duplicating configuration objects.*/ + {_RK_GLOBAL, "interceptors", _RK_C_INTERNAL, _RK(interceptors), + "Interceptors added through rd_kafka_conf_interceptor_add_..() " + "and any configuration handled by interceptors.", + .ctor = rd_kafka_conf_interceptor_ctor, + .dtor = rd_kafka_conf_interceptor_dtor, + .copy = rd_kafka_conf_interceptor_copy}, + + /* Test mocks. */ + {_RK_GLOBAL | _RK_HIDDEN, "test.mock.num.brokers", _RK_C_INT, + _RK(mock.broker_cnt), + "Number of mock brokers to create. " + "This will automatically overwrite `bootstrap.servers` with the " + "mock broker list.", + 0, 10000, 0}, + + /* Unit test interfaces. + * These are not part of the public API and may change at any time. + * Only to be used by the librdkafka tests. */ + {_RK_GLOBAL | _RK_HIDDEN, "ut_handle_ProduceResponse", _RK_C_PTR, + _RK(ut.handle_ProduceResponse), + "ProduceResponse handler: " + "rd_kafka_resp_err_t (*cb) (rd_kafka_t *rk, " + "int32_t brokerid, uint64_t msgid, rd_kafka_resp_err_t err)"}, + + /* Global consumer group properties */ + {_RK_GLOBAL | _RK_CGRP | _RK_HIGH, "group.id", _RK_C_STR, _RK(group_id_str), + "Client group id string. All clients sharing the same group.id " + "belong to the same group."}, + {_RK_GLOBAL | _RK_CGRP | _RK_MED, "group.instance.id", _RK_C_STR, + _RK(group_instance_id), + "Enable static group membership. " + "Static group members are able to leave and rejoin a group " + "within the configured `session.timeout.ms` without prompting a " + "group rebalance. This should be used in combination with a larger " + "`session.timeout.ms` to avoid group rebalances caused by transient " + "unavailability (e.g. process restarts). " + "Requires broker version >= 2.3.0."}, + {_RK_GLOBAL | _RK_CGRP | _RK_MED, "partition.assignment.strategy", + _RK_C_STR, _RK(partition_assignment_strategy), + "The name of one or more partition assignment strategies. The " + "elected group leader will use a strategy supported by all " + "members of the group to assign partitions to group members. If " + "there is more than one eligible strategy, preference is " + "determined by the order of this list (strategies earlier in the " + "list have higher priority). " + "Cooperative and non-cooperative (eager) strategies must not be " + "mixed. " + "Available strategies: range, roundrobin, cooperative-sticky.", + .sdef = "range,roundrobin"}, + {_RK_GLOBAL | _RK_CGRP | _RK_HIGH, "session.timeout.ms", _RK_C_INT, + _RK(group_session_timeout_ms), + "Client group session and failure detection timeout. " + "The consumer sends periodic heartbeats (heartbeat.interval.ms) " + "to indicate its liveness to the broker. If no hearts are " + "received by the broker for a group member within the " + "session timeout, the broker will remove the consumer from " + "the group and trigger a rebalance. " + "The allowed range is configured with the **broker** configuration " + "properties `group.min.session.timeout.ms` and " + "`group.max.session.timeout.ms`. " + "Also see `max.poll.interval.ms`.", + 1, 3600 * 1000, 45 * 1000}, + {_RK_GLOBAL | _RK_CGRP, "heartbeat.interval.ms", _RK_C_INT, + _RK(group_heartbeat_intvl_ms), + "Group session keepalive heartbeat interval.", 1, 3600 * 1000, 3 * 1000}, + {_RK_GLOBAL | _RK_CGRP, "group.protocol.type", _RK_C_KSTR, + _RK(group_protocol_type), + "Group protocol type. NOTE: Currently, the only supported group " + "protocol type is `consumer`.", + .sdef = "consumer"}, + {_RK_GLOBAL | _RK_CGRP, "coordinator.query.interval.ms", _RK_C_INT, + _RK(coord_query_intvl_ms), + "How often to query for the current client group coordinator. " + "If the currently assigned coordinator is down the configured " + "query interval will be divided by ten to more quickly recover " + "in case of coordinator reassignment.", + 1, 3600 * 1000, 10 * 60 * 1000}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "max.poll.interval.ms", _RK_C_INT, + _RK(max_poll_interval_ms), + "Maximum allowed time between calls to consume messages " + "(e.g., rd_kafka_consumer_poll()) for high-level consumers. " + "If this interval is exceeded the consumer is considered failed " + "and the group will rebalance in order to reassign the " + "partitions to another consumer group member. " + "Warning: Offset commits may be not possible at this point. " + "Note: It is recommended to set `enable.auto.offset.store=false` " + "for long-time processing applications and then explicitly store " + "offsets (using offsets_store()) *after* message processing, to " + "make sure offsets are not auto-committed prior to processing " + "has finished. " + "The interval is checked two times per second. " + "See KIP-62 for more information.", + 1, 86400 * 1000, 300000}, + + /* Global consumer properties */ + {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "enable.auto.commit", _RK_C_BOOL, + _RK(enable_auto_commit), + "Automatically and periodically commit offsets in the background. " + "Note: setting this to false does not prevent the consumer from " + "fetching previously committed start offsets. To circumvent this " + "behaviour set specific start offsets per partition in the call " + "to assign().", + 0, 1, 1}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "auto.commit.interval.ms", _RK_C_INT, + _RK(auto_commit_interval_ms), + "The frequency in milliseconds that the consumer offsets " + "are committed (written) to offset storage. (0 = disable). " + "This setting is used by the high-level consumer.", + 0, 86400 * 1000, 5 * 1000}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "enable.auto.offset.store", + _RK_C_BOOL, _RK(enable_auto_offset_store), + "Automatically store offset of last message provided to " + "application. " + "The offset store is an in-memory store of the next offset to " + "(auto-)commit for each partition.", + 0, 1, 1}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "queued.min.messages", _RK_C_INT, + _RK(queued_min_msgs), + "Minimum number of messages per topic+partition " + "librdkafka tries to maintain in the local consumer queue.", + 1, 10000000, 100000}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "queued.max.messages.kbytes", + _RK_C_INT, _RK(queued_max_msg_kbytes), + "Maximum number of kilobytes of queued pre-fetched messages " + "in the local consumer queue. " + "If using the high-level consumer this setting applies to the " + "single consumer queue, regardless of the number of partitions. " + "When using the legacy simple consumer or when separate " + "partition queues are used this setting applies per partition. " + "This value may be overshot by fetch.message.max.bytes. " + "This property has higher priority than queued.min.messages.", + 1, INT_MAX / 1024, 0x10000 /*64MB*/}, + {_RK_GLOBAL | _RK_CONSUMER, "fetch.wait.max.ms", _RK_C_INT, + _RK(fetch_wait_max_ms), + "Maximum time the broker may wait to fill the Fetch response " + "with fetch.min.bytes of messages.", + 0, 300 * 1000, 500}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.message.max.bytes", _RK_C_INT, + _RK(fetch_msg_max_bytes), + "Initial maximum number of bytes per topic+partition to request when " + "fetching messages from the broker. " + "If the client encounters a message larger than this value " + "it will gradually try to increase it until the " + "entire message can be fetched.", + 1, 1000000000, 1024 * 1024}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "max.partition.fetch.bytes", + _RK_C_ALIAS, .sdef = "fetch.message.max.bytes"}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.max.bytes", _RK_C_INT, + _RK(fetch_max_bytes), + "Maximum amount of data the broker shall return for a Fetch request. " + "Messages are fetched in batches by the consumer and if the first " + "message batch in the first non-empty partition of the Fetch request " + "is larger than this value, then the message batch will still be " + "returned to ensure the consumer can make progress. " + "The maximum message batch size accepted by the broker is defined " + "via `message.max.bytes` (broker config) or " + "`max.message.bytes` (broker topic config). " + "`fetch.max.bytes` is automatically adjusted upwards to be " + "at least `message.max.bytes` (consumer config).", + 0, INT_MAX - 512, 50 * 1024 * 1024 /* 50MB */}, + {_RK_GLOBAL | _RK_CONSUMER, "fetch.min.bytes", _RK_C_INT, + _RK(fetch_min_bytes), + "Minimum number of bytes the broker responds with. " + "If fetch.wait.max.ms expires the accumulated data will " + "be sent to the client regardless of this setting.", + 1, 100000000, 1}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.error.backoff.ms", _RK_C_INT, + _RK(fetch_error_backoff_ms), + "How long to postpone the next fetch request for a " + "topic+partition in case of a fetch error.", + 0, 300 * 1000, 500}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.method", + _RK_C_S2I, _RK(offset_store_method), + "Offset commit store method: " + "'file' - DEPRECATED: local file store (offset.store.path, et.al), " + "'broker' - broker commit store " + "(requires Apache Kafka 0.8.2 or later on the broker).", + .vdef = RD_KAFKA_OFFSET_METHOD_BROKER, + .s2i = {{RD_KAFKA_OFFSET_METHOD_NONE, "none"}, + {RD_KAFKA_OFFSET_METHOD_FILE, "file"}, + {RD_KAFKA_OFFSET_METHOD_BROKER, "broker"}}}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "isolation.level", _RK_C_S2I, + _RK(isolation_level), + "Controls how to read messages written transactionally: " + "`read_committed` - only return transactional messages which have " + "been committed. `read_uncommitted` - return all messages, even " + "transactional messages which have been aborted.", + .vdef = RD_KAFKA_READ_COMMITTED, + .s2i = {{RD_KAFKA_READ_UNCOMMITTED, "read_uncommitted"}, + {RD_KAFKA_READ_COMMITTED, "read_committed"}}}, + {_RK_GLOBAL | _RK_CONSUMER, "consume_cb", _RK_C_PTR, _RK(consume_cb), + "Message consume callback (set with rd_kafka_conf_set_consume_cb())"}, + {_RK_GLOBAL | _RK_CONSUMER, "rebalance_cb", _RK_C_PTR, _RK(rebalance_cb), + "Called after consumer group has been rebalanced " + "(set with rd_kafka_conf_set_rebalance_cb())"}, + {_RK_GLOBAL | _RK_CONSUMER, "offset_commit_cb", _RK_C_PTR, + _RK(offset_commit_cb), + "Offset commit result propagation callback. " + "(set with rd_kafka_conf_set_offset_commit_cb())"}, + {_RK_GLOBAL | _RK_CONSUMER, "enable.partition.eof", _RK_C_BOOL, + _RK(enable_partition_eof), + "Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the " + "consumer reaches the end of a partition.", + 0, 1, 0}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "check.crcs", _RK_C_BOOL, + _RK(check_crcs), + "Verify CRC32 of consumed messages, ensuring no on-the-wire or " + "on-disk corruption to the messages occurred. This check comes " + "at slightly increased CPU usage.", + 0, 1, 0}, + {_RK_GLOBAL | _RK_CONSUMER, "allow.auto.create.topics", _RK_C_BOOL, + _RK(allow_auto_create_topics), + "Allow automatic topic creation on the broker when subscribing to " + "or assigning non-existent topics. " + "The broker must also be configured with " + "`auto.create.topics.enable=true` for this configuraiton to " + "take effect. " + "Note: The default value (false) is different from the " + "Java consumer (true). " + "Requires broker version >= 0.11.0.0, for older broker versions " + "only the broker configuration applies.", + 0, 1, 0}, + {_RK_GLOBAL, "client.rack", _RK_C_KSTR, _RK(client_rack), + "A rack identifier for this client. This can be any string value " + "which indicates where this client is physically located. It " + "corresponds with the broker config `broker.rack`.", + .sdef = ""}, + + /* Global producer properties */ + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "transactional.id", _RK_C_STR, + _RK(eos.transactional_id), + "Enables the transactional producer. " + "The transactional.id is used to identify the same transactional " + "producer instance across process restarts. " + "It allows the producer to guarantee that transactions corresponding " + "to earlier instances of the same producer have been finalized " + "prior to starting any new transactions, and that any " + "zombie instances are fenced off. " + "If no transactional.id is provided, then the producer is limited " + "to idempotent delivery (if enable.idempotence is set). " + "Requires broker version >= 0.11.0."}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "transaction.timeout.ms", _RK_C_INT, + _RK(eos.transaction_timeout_ms), + "The maximum amount of time in milliseconds that the transaction " + "coordinator will wait for a transaction status update from the " + "producer before proactively aborting the ongoing transaction. " + "If this value is larger than the `transaction.max.timeout.ms` " + "setting in the broker, the init_transactions() call will fail with " + "ERR_INVALID_TRANSACTION_TIMEOUT. " + "The transaction timeout automatically adjusts " + "`message.timeout.ms` and `socket.timeout.ms`, unless explicitly " + "configured in which case they must not exceed the " + "transaction timeout (`socket.timeout.ms` must be at least 100ms " + "lower than `transaction.timeout.ms`). " + "This is also the default timeout value if no timeout (-1) is " + "supplied to the transactional API methods.", + 1000, INT_MAX, 60000}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "enable.idempotence", _RK_C_BOOL, + _RK(eos.idempotence), + "When set to `true`, the producer will ensure that messages are " + "successfully produced exactly once and in the original produce " + "order. " + "The following configuration properties are adjusted automatically " + "(if not modified by the user) when idempotence is enabled: " + "`max.in.flight.requests.per.connection=" RD_KAFKA_IDEMP_MAX_INFLIGHT_STR + "` (must be less than or " + "equal to " RD_KAFKA_IDEMP_MAX_INFLIGHT_STR "), `retries=INT32_MAX` " + "(must be greater than 0), `acks=all`, `queuing.strategy=fifo`. " + "Producer instantation will fail if user-supplied configuration " + "is incompatible.", + 0, 1, 0}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_EXPERIMENTAL, "enable.gapless.guarantee", + _RK_C_BOOL, _RK(eos.gapless), + "When set to `true`, any error that could result in a gap " + "in the produced message series when a batch of messages fails, " + "will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop " + "the producer. " + "Messages failing due to `message.timeout.ms` are not covered " + "by this guarantee. " + "Requires `enable.idempotence=true`.", + 0, 1, 0}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.messages", + _RK_C_INT, _RK(queue_buffering_max_msgs), + "Maximum number of messages allowed on the producer queue. " + "This queue is shared by all topics and partitions.", + 1, 10000000, 100000}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.kbytes", + _RK_C_INT, _RK(queue_buffering_max_kbytes), + "Maximum total message size sum allowed on the producer queue. " + "This queue is shared by all topics and partitions. " + "This property has higher priority than queue.buffering.max.messages.", + 1, INT_MAX, 0x100000 /*1GB*/}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.ms", _RK_C_DBL, + _RK(buffering_max_ms_dbl), + "Delay in milliseconds to wait for messages in the producer queue " + "to accumulate before constructing message batches (MessageSets) to " + "transmit to brokers. " + "A higher value allows larger and more effective " + "(less overhead, improved compression) batches of messages to " + "accumulate at the expense of increased message delivery latency.", + .dmin = 0, .dmax = 900.0 * 1000.0, .ddef = 5.0}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "linger.ms", _RK_C_ALIAS, + .sdef = "queue.buffering.max.ms"}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "message.send.max.retries", + _RK_C_INT, _RK(max_retries), + "How many times to retry sending a failing Message. " + "**Note:** retrying may cause reordering unless " + "`enable.idempotence` is set to true.", + 0, INT32_MAX, INT32_MAX}, + {_RK_GLOBAL | _RK_PRODUCER, "retries", _RK_C_ALIAS, + .sdef = "message.send.max.retries"}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "retry.backoff.ms", _RK_C_INT, + _RK(retry_backoff_ms), + "The backoff time in milliseconds before retrying a protocol request.", 1, + 300 * 1000, 100}, + + {_RK_GLOBAL | _RK_PRODUCER, "queue.buffering.backpressure.threshold", + _RK_C_INT, _RK(queue_backpressure_thres), + "The threshold of outstanding not yet transmitted broker requests " + "needed to backpressure the producer's message accumulator. " + "If the number of not yet transmitted requests equals or exceeds " + "this number, produce request creation that would have otherwise " + "been triggered (for example, in accordance with linger.ms) will be " + "delayed. A lower number yields larger and more effective batches. " + "A higher value can improve latency when using compression on slow " + "machines.", + 1, 1000000, 1}, + + {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "compression.codec", _RK_C_S2I, + _RK(compression_codec), + "compression codec to use for compressing message sets. " + "This is the default value for all topics, may be overridden by " + "the topic configuration property `compression.codec`. ", + .vdef = RD_KAFKA_COMPRESSION_NONE, + .s2i = {{RD_KAFKA_COMPRESSION_NONE, "none"}, + {RD_KAFKA_COMPRESSION_GZIP, "gzip", _UNSUPPORTED_ZLIB}, + {RD_KAFKA_COMPRESSION_SNAPPY, "snappy", _UNSUPPORTED_SNAPPY}, + {RD_KAFKA_COMPRESSION_LZ4, "lz4"}, + {RD_KAFKA_COMPRESSION_ZSTD, "zstd", _UNSUPPORTED_ZSTD}, + {0}}}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "compression.type", _RK_C_ALIAS, + .sdef = "compression.codec"}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "batch.num.messages", _RK_C_INT, + _RK(batch_num_messages), + "Maximum number of messages batched in one MessageSet. " + "The total MessageSet size is also limited by batch.size and " + "message.max.bytes.", + 1, 1000000, 10000}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "batch.size", _RK_C_INT, + _RK(batch_size), + "Maximum size (in bytes) of all messages batched in one MessageSet, " + "including protocol framing overhead. " + "This limit is applied after the first message has been added " + "to the batch, regardless of the first message's size, this is to " + "ensure that messages that exceed batch.size are produced. " + "The total MessageSet size is also limited by batch.num.messages and " + "message.max.bytes.", + 1, INT_MAX, 1000000}, + {_RK_GLOBAL | _RK_PRODUCER, "delivery.report.only.error", _RK_C_BOOL, + _RK(dr_err_only), "Only provide delivery reports for failed messages.", 0, + 1, 0}, + {_RK_GLOBAL | _RK_PRODUCER, "dr_cb", _RK_C_PTR, _RK(dr_cb), + "Delivery report callback (set with rd_kafka_conf_set_dr_cb())"}, + {_RK_GLOBAL | _RK_PRODUCER, "dr_msg_cb", _RK_C_PTR, _RK(dr_msg_cb), + "Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())"}, + {_RK_GLOBAL | _RK_PRODUCER, "sticky.partitioning.linger.ms", _RK_C_INT, + _RK(sticky_partition_linger_ms), + "Delay in milliseconds to wait to assign new sticky partitions for " + "each topic. " + "By default, set to double the time of linger.ms. To disable sticky " + "behavior, set to 0. " + "This behavior affects messages with the key NULL in all cases, and " + "messages with key lengths of zero when the consistent_random " + "partitioner is in use. " + "These messages would otherwise be assigned randomly. " + "A higher value allows for more effective batching of these " + "messages.", + 0, 900000, 10}, + + + /* + * Topic properties + */ + + /* Topic producer properties */ + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "request.required.acks", _RK_C_INT, + _RKT(required_acks), + "This field indicates the number of acknowledgements the leader " + "broker must receive from ISR brokers before responding to the " + "request: " + "*0*=Broker does not send any response/ack to client, " + "*-1* or *all*=Broker will block until message is committed by all " + "in sync replicas (ISRs). If there are less than " + "`min.insync.replicas` (broker configuration) in the ISR set the " + "produce request will fail.", + -1, 1000, -1, + .s2i = + { + {-1, "all"}, + }}, + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "acks", _RK_C_ALIAS, + .sdef = "request.required.acks"}, + + {_RK_TOPIC | _RK_PRODUCER | _RK_MED, "request.timeout.ms", _RK_C_INT, + _RKT(request_timeout_ms), + "The ack timeout of the producer request in milliseconds. " + "This value is only enforced by the broker and relies " + "on `request.required.acks` being != 0.", + 1, 900 * 1000, 30 * 1000}, + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "message.timeout.ms", _RK_C_INT, + _RKT(message_timeout_ms), + "Local message timeout. " + "This value is only enforced locally and limits the time a " + "produced message waits for successful delivery. " + "A time of 0 is infinite. " + "This is the maximum time librdkafka may use to deliver a message " + "(including retries). Delivery error occurs when either the retry " + "count or the message timeout are exceeded. " + "The message timeout is automatically adjusted to " + "`transaction.timeout.ms` if `transactional.id` is configured.", + 0, INT32_MAX, 300 * 1000}, + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "delivery.timeout.ms", _RK_C_ALIAS, + .sdef = "message.timeout.ms"}, + {_RK_TOPIC | _RK_PRODUCER | _RK_DEPRECATED | _RK_EXPERIMENTAL, + "queuing.strategy", _RK_C_S2I, _RKT(queuing_strategy), + "Producer queuing strategy. FIFO preserves produce ordering, " + "while LIFO prioritizes new messages.", + .vdef = 0, + .s2i = {{RD_KAFKA_QUEUE_FIFO, "fifo"}, {RD_KAFKA_QUEUE_LIFO, "lifo"}}}, + {_RK_TOPIC | _RK_PRODUCER | _RK_DEPRECATED, "produce.offset.report", + _RK_C_BOOL, _RKT(produce_offset_report), "No longer used.", 0, 1, 0}, + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "partitioner", _RK_C_STR, + _RKT(partitioner_str), + "Partitioner: " + "`random` - random distribution, " + "`consistent` - CRC32 hash of key (Empty and NULL keys are mapped to " + "single partition), " + "`consistent_random` - CRC32 hash of key (Empty and NULL keys are " + "randomly partitioned), " + "`murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are " + "mapped to single partition), " + "`murmur2_random` - Java Producer compatible Murmur2 hash of key " + "(NULL keys are randomly partitioned. This is functionally equivalent " + "to the default partitioner in the Java Producer.), " + "`fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), " + "`fnv1a_random` - FNV-1a hash of key (NULL keys are randomly " + "partitioned).", + .sdef = "consistent_random", + .validate = rd_kafka_conf_validate_partitioner}, + {_RK_TOPIC | _RK_PRODUCER, "partitioner_cb", _RK_C_PTR, _RKT(partitioner), + "Custom partitioner callback " + "(set with rd_kafka_topic_conf_set_partitioner_cb())"}, + {_RK_TOPIC | _RK_PRODUCER | _RK_DEPRECATED | _RK_EXPERIMENTAL, + "msg_order_cmp", _RK_C_PTR, _RKT(msg_order_cmp), + "Message queue ordering comparator " + "(set with rd_kafka_topic_conf_set_msg_order_cmp()). " + "Also see `queuing.strategy`."}, + {_RK_TOPIC, "opaque", _RK_C_PTR, _RKT(opaque), + "Application opaque (set with rd_kafka_topic_conf_set_opaque())"}, + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "compression.codec", _RK_C_S2I, + _RKT(compression_codec), + "Compression codec to use for compressing message sets. " + "inherit = inherit global compression.codec configuration.", + .vdef = RD_KAFKA_COMPRESSION_INHERIT, + .s2i = {{RD_KAFKA_COMPRESSION_NONE, "none"}, + {RD_KAFKA_COMPRESSION_GZIP, "gzip", _UNSUPPORTED_ZLIB}, + {RD_KAFKA_COMPRESSION_SNAPPY, "snappy", _UNSUPPORTED_SNAPPY}, + {RD_KAFKA_COMPRESSION_LZ4, "lz4"}, + {RD_KAFKA_COMPRESSION_ZSTD, "zstd", _UNSUPPORTED_ZSTD}, + {RD_KAFKA_COMPRESSION_INHERIT, "inherit"}, + {0}}}, + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "compression.type", _RK_C_ALIAS, + .sdef = "compression.codec"}, + {_RK_TOPIC | _RK_PRODUCER | _RK_MED, "compression.level", _RK_C_INT, + _RKT(compression_level), + "Compression level parameter for algorithm selected by configuration " + "property `compression.codec`. Higher values will result in better " + "compression at the cost of more CPU usage. Usable range is " + "algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; " + "-1 = codec-dependent default compression level.", + RD_KAFKA_COMPLEVEL_MIN, RD_KAFKA_COMPLEVEL_MAX, + RD_KAFKA_COMPLEVEL_DEFAULT}, + + + /* Topic consumer properties */ + {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "auto.commit.enable", + _RK_C_BOOL, _RKT(auto_commit), + "[**LEGACY PROPERTY:** This property is used by the simple legacy " + "consumer only. When using the high-level KafkaConsumer, the global " + "`enable.auto.commit` property must be used instead]. " + "If true, periodically commit offset of the last message handed " + "to the application. This committed offset will be used when the " + "process restarts to pick up where it left off. " + "If false, the application will have to call " + "`rd_kafka_offset_store()` to store an offset (optional). " + "Offsets will be written to broker or local file according to " + "offset.store.method.", + 0, 1, 1}, + {_RK_TOPIC | _RK_CONSUMER, "enable.auto.commit", _RK_C_ALIAS, + .sdef = "auto.commit.enable"}, + {_RK_TOPIC | _RK_CONSUMER | _RK_HIGH, "auto.commit.interval.ms", _RK_C_INT, + _RKT(auto_commit_interval_ms), + "[**LEGACY PROPERTY:** This setting is used by the simple legacy " + "consumer only. When using the high-level KafkaConsumer, the " + "global `auto.commit.interval.ms` property must be used instead]. " + "The frequency in milliseconds that the consumer offsets " + "are committed (written) to offset storage.", + 10, 86400 * 1000, 60 * 1000}, + {_RK_TOPIC | _RK_CONSUMER | _RK_HIGH, "auto.offset.reset", _RK_C_S2I, + _RKT(auto_offset_reset), + "Action to take when there is no initial offset in offset store " + "or the desired offset is out of range: " + "'smallest','earliest' - automatically reset the offset to the smallest " + "offset, " + "'largest','latest' - automatically reset the offset to the largest " + "offset, " + "'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is " + "retrieved by consuming messages and checking 'message->err'.", + .vdef = RD_KAFKA_OFFSET_END, + .s2i = + { + {RD_KAFKA_OFFSET_BEGINNING, "smallest"}, + {RD_KAFKA_OFFSET_BEGINNING, "earliest"}, + {RD_KAFKA_OFFSET_BEGINNING, "beginning"}, + {RD_KAFKA_OFFSET_END, "largest"}, + {RD_KAFKA_OFFSET_END, "latest"}, + {RD_KAFKA_OFFSET_END, "end"}, + {RD_KAFKA_OFFSET_INVALID, "error"}, + }}, + {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.path", _RK_C_STR, + _RKT(offset_store_path), + "Path to local file for storing offsets. If the path is a directory " + "a filename will be automatically generated in that directory based " + "on the topic and partition. " + "File-based offset storage will be removed in a future version.", + .sdef = "."}, + + {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.sync.interval.ms", + _RK_C_INT, _RKT(offset_store_sync_interval_ms), + "fsync() interval for the offset file, in milliseconds. " + "Use -1 to disable syncing, and 0 for immediate sync after " + "each write. " + "File-based offset storage will be removed in a future version.", + -1, 86400 * 1000, -1}, + + {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.method", + _RK_C_S2I, _RKT(offset_store_method), + "Offset commit store method: " + "'file' - DEPRECATED: local file store (offset.store.path, et.al), " + "'broker' - broker commit store " + "(requires \"group.id\" to be configured and " + "Apache Kafka 0.8.2 or later on the broker.).", + .vdef = RD_KAFKA_OFFSET_METHOD_BROKER, + .s2i = {{RD_KAFKA_OFFSET_METHOD_FILE, "file"}, + {RD_KAFKA_OFFSET_METHOD_BROKER, "broker"}}}, + + {_RK_TOPIC | _RK_CONSUMER, "consume.callback.max.messages", _RK_C_INT, + _RKT(consume_callback_max_msgs), + "Maximum number of messages to dispatch in " + "one `rd_kafka_consume_callback*()` call (0 = unlimited)", + 0, 1000000, 0}, + + {0, /* End */}}; /** * @returns the property object for \p name in \p scope, or NULL if not found. * @remark does not work with interceptor configs. */ -const struct rd_kafka_property * -rd_kafka_conf_prop_find (int scope, const char *name) { +const struct rd_kafka_property *rd_kafka_conf_prop_find(int scope, + const char *name) { const struct rd_kafka_property *prop; - restart: - for (prop = rd_kafka_properties ; prop->name ; prop++) { +restart: + for (prop = rd_kafka_properties; prop->name; prop++) { if (!(prop->scope & scope)) continue; @@ -1762,12 +1612,12 @@ rd_kafka_conf_prop_find (int scope, const char *name) { * @returns rd_true if property has been set/modified, else rd_false. * If \p name is unknown 0 is returned. */ -rd_bool_t rd_kafka_conf_is_modified (const rd_kafka_conf_t *conf, - const char *name) { +rd_bool_t rd_kafka_conf_is_modified(const rd_kafka_conf_t *conf, + const char *name) { const struct rd_kafka_property *prop; if (!(prop = rd_kafka_conf_prop_find(_RK_GLOBAL, name))) - return rd_false; + return rd_false; return rd_kafka_anyconf_is_modified(conf, prop); } @@ -1777,13 +1627,13 @@ rd_bool_t rd_kafka_conf_is_modified (const rd_kafka_conf_t *conf, * @returns true if property has been set/modified, else 0. * If \p name is unknown 0 is returned. */ -static -rd_bool_t rd_kafka_topic_conf_is_modified (const rd_kafka_topic_conf_t *conf, - const char *name) { +static rd_bool_t +rd_kafka_topic_conf_is_modified(const rd_kafka_topic_conf_t *conf, + const char *name) { const struct rd_kafka_property *prop; if (!(prop = rd_kafka_conf_prop_find(_RK_TOPIC, name))) - return 0; + return 0; return rd_kafka_anyconf_is_modified(conf, prop); } @@ -1791,24 +1641,25 @@ rd_bool_t rd_kafka_topic_conf_is_modified (const rd_kafka_topic_conf_t *conf, static rd_kafka_conf_res_t -rd_kafka_anyconf_set_prop0 (int scope, void *conf, - const struct rd_kafka_property *prop, - const char *istr, int ival, rd_kafka_conf_set_mode_t set_mode, - char *errstr, size_t errstr_size) { +rd_kafka_anyconf_set_prop0(int scope, + void *conf, + const struct rd_kafka_property *prop, + const char *istr, + int ival, + rd_kafka_conf_set_mode_t set_mode, + char *errstr, + size_t errstr_size) { rd_kafka_conf_res_t res; -#define _RK_PTR(TYPE,BASE,OFFSET) (TYPE)(void *)(((char *)(BASE))+(OFFSET)) +#define _RK_PTR(TYPE, BASE, OFFSET) (TYPE)(void *)(((char *)(BASE)) + (OFFSET)) /* Try interceptors first (only for GLOBAL config) */ if (scope & _RK_GLOBAL) { if (prop->type == _RK_C_PTR || prop->type == _RK_C_INTERNAL) res = RD_KAFKA_CONF_UNKNOWN; else - res = rd_kafka_interceptors_on_conf_set(conf, - prop->name, - istr, - errstr, - errstr_size); + res = rd_kafka_interceptors_on_conf_set( + conf, prop->name, istr, errstr, errstr_size); if (res != RD_KAFKA_CONF_UNKNOWN) return res; } @@ -1818,8 +1669,8 @@ rd_kafka_anyconf_set_prop0 (int scope, void *conf, /* Custom setter */ res = prop->set(scope, conf, prop->name, istr, - _RK_PTR(void *, conf, prop->offset), - set_mode, errstr, errstr_size); + _RK_PTR(void *, conf, prop->offset), set_mode, + errstr, errstr_size); if (res != RD_KAFKA_CONF_OK) return res; @@ -1827,64 +1678,57 @@ rd_kafka_anyconf_set_prop0 (int scope, void *conf, /* FALLTHRU so that property value is set. */ } - switch (prop->type) - { - case _RK_C_STR: - { - char **str = _RK_PTR(char **, conf, prop->offset); - if (*str) - rd_free(*str); - if (istr) - *str = rd_strdup(istr); - else - *str = prop->sdef ? rd_strdup(prop->sdef) : NULL; + switch (prop->type) { + case _RK_C_STR: { + char **str = _RK_PTR(char **, conf, prop->offset); + if (*str) + rd_free(*str); + if (istr) + *str = rd_strdup(istr); + else + *str = prop->sdef ? rd_strdup(prop->sdef) : NULL; break; - } - case _RK_C_KSTR: - { - rd_kafkap_str_t **kstr = _RK_PTR(rd_kafkap_str_t **, conf, - prop->offset); + } + case _RK_C_KSTR: { + rd_kafkap_str_t **kstr = + _RK_PTR(rd_kafkap_str_t **, conf, prop->offset); if (*kstr) rd_kafkap_str_destroy(*kstr); if (istr) *kstr = rd_kafkap_str_new(istr, -1); else - *kstr = prop->sdef ? - rd_kafkap_str_new(prop->sdef, -1) : NULL; + *kstr = prop->sdef ? rd_kafkap_str_new(prop->sdef, -1) + : NULL; break; } - case _RK_C_PTR: - *_RK_PTR(const void **, conf, prop->offset) = istr; + case _RK_C_PTR: + *_RK_PTR(const void **, conf, prop->offset) = istr; break; - case _RK_C_BOOL: - case _RK_C_INT: - case _RK_C_S2I: - case _RK_C_S2F: - { - int *val = _RK_PTR(int *, conf, prop->offset); - - if (prop->type == _RK_C_S2F) { - switch (set_mode) - { - case _RK_CONF_PROP_SET_REPLACE: - *val = ival; - break; - case _RK_CONF_PROP_SET_ADD: - *val |= ival; - break; - case _RK_CONF_PROP_SET_DEL: - *val &= ~ival; - break; - } - } else { - /* Single assignment */ - *val = ival; - - } + case _RK_C_BOOL: + case _RK_C_INT: + case _RK_C_S2I: + case _RK_C_S2F: { + int *val = _RK_PTR(int *, conf, prop->offset); + + if (prop->type == _RK_C_S2F) { + switch (set_mode) { + case _RK_CONF_PROP_SET_REPLACE: + *val = ival; + break; + case _RK_CONF_PROP_SET_ADD: + *val |= ival; + break; + case _RK_CONF_PROP_SET_DEL: + *val &= ~ival; + break; + } + } else { + /* Single assignment */ + *val = ival; + } break; - } - case _RK_C_DBL: - { + } + case _RK_C_DBL: { double *val = _RK_PTR(double *, conf, prop->offset); if (istr) { char *endptr; @@ -1897,25 +1741,22 @@ rd_kafka_anyconf_set_prop0 (int scope, void *conf, break; } - case _RK_C_PATLIST: - { + case _RK_C_PATLIST: { /* Split comma-separated list into individual regex expressions * that are verified and then append to the provided list. */ rd_kafka_pattern_list_t **plist; plist = _RK_PTR(rd_kafka_pattern_list_t **, conf, prop->offset); - if (*plist) - rd_kafka_pattern_list_destroy(*plist); + if (*plist) + rd_kafka_pattern_list_destroy(*plist); - if (istr) { - if (!(*plist = - rd_kafka_pattern_list_new(istr, - errstr, - (int)errstr_size))) - return RD_KAFKA_CONF_INVALID; - } else - *plist = NULL; + if (istr) { + if (!(*plist = rd_kafka_pattern_list_new( + istr, errstr, (int)errstr_size))) + return RD_KAFKA_CONF_INVALID; + } else + *plist = NULL; break; } @@ -1924,12 +1765,12 @@ rd_kafka_anyconf_set_prop0 (int scope, void *conf, /* Probably handled by setter */ break; - default: - rd_kafka_assert(NULL, !*"unknown conf type"); - } + default: + rd_kafka_assert(NULL, !*"unknown conf type"); + } - rd_kafka_anyconf_set_modified(conf, prop, 1/*modified*/); + rd_kafka_anyconf_set_modified(conf, prop, 1 /*modified*/); return RD_KAFKA_CONF_OK; } @@ -1938,17 +1779,16 @@ rd_kafka_anyconf_set_prop0 (int scope, void *conf, * @brief Find s2i (string-to-int mapping) entry and return its array index, * or -1 on miss. */ -static int rd_kafka_conf_s2i_find (const struct rd_kafka_property *prop, - const char *value) { - int j; +static int rd_kafka_conf_s2i_find(const struct rd_kafka_property *prop, + const char *value) { + int j; - for (j = 0 ; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { - if (prop->s2i[j].str && - !rd_strcasecmp(prop->s2i[j].str, value)) - return j; - } + for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { + if (prop->s2i[j].str && !rd_strcasecmp(prop->s2i[j].str, value)) + return j; + } - return -1; + return -1; } @@ -1960,12 +1800,14 @@ static int rd_kafka_conf_s2i_find (const struct rd_kafka_property *prop, * Should not be allowed from the conf_set() string interface. */ static rd_kafka_conf_res_t -rd_kafka_anyconf_set_prop (int scope, void *conf, - const struct rd_kafka_property *prop, - const char *value, - int allow_specific, - char *errstr, size_t errstr_size) { - int ival; +rd_kafka_anyconf_set_prop(int scope, + void *conf, + const struct rd_kafka_property *prop, + const char *value, + int allow_specific, + char *errstr, + size_t errstr_size) { + int ival; if (prop->unsupported) { rd_snprintf(errstr, errstr_size, @@ -1975,9 +1817,8 @@ rd_kafka_anyconf_set_prop (int scope, void *conf, return RD_KAFKA_CONF_INVALID; } - switch (prop->type) - { - case _RK_C_STR: + switch (prop->type) { + case _RK_C_STR: /* Left-trim string(likes) */ if (value) while (isspace((int)*value)) @@ -1985,38 +1826,38 @@ rd_kafka_anyconf_set_prop (int scope, void *conf, /* FALLTHRU */ case _RK_C_KSTR: - if (prop->s2i[0].str) { - int match; - - if (!value || - (match = rd_kafka_conf_s2i_find(prop, value)) == -1){ - rd_snprintf(errstr, errstr_size, - "Invalid value for " - "configuration property \"%s\": " - "%s", - prop->name, value); - return RD_KAFKA_CONF_INVALID; - } - - /* Replace value string with canonical form */ - value = prop->s2i[match].str; - } - /* FALLTHRU */ + if (prop->s2i[0].str) { + int match; + + if (!value || (match = rd_kafka_conf_s2i_find( + prop, value)) == -1) { + rd_snprintf(errstr, errstr_size, + "Invalid value for " + "configuration property \"%s\": " + "%s", + prop->name, value); + return RD_KAFKA_CONF_INVALID; + } + + /* Replace value string with canonical form */ + value = prop->s2i[match].str; + } + /* FALLTHRU */ case _RK_C_PATLIST: - if (prop->validate && - (!value || !prop->validate(prop, value, -1))) { - rd_snprintf(errstr, errstr_size, - "Invalid value for " - "configuration property \"%s\": %s", - prop->name, value); - return RD_KAFKA_CONF_INVALID; - } - - return rd_kafka_anyconf_set_prop0(scope, conf, prop, value, 0, - _RK_CONF_PROP_SET_REPLACE, + if (prop->validate && + (!value || !prop->validate(prop, value, -1))) { + rd_snprintf(errstr, errstr_size, + "Invalid value for " + "configuration property \"%s\": %s", + prop->name, value); + return RD_KAFKA_CONF_INVALID; + } + + return rd_kafka_anyconf_set_prop0(scope, conf, prop, value, 0, + _RK_CONF_PROP_SET_REPLACE, errstr, errstr_size); - case _RK_C_PTR: + case _RK_C_PTR: /* Allow hidden internal unit test properties to * be set from generic conf_set() interface. */ if (!allow_specific && !(prop->scope & _RK_HIDDEN)) { @@ -2030,59 +1871,59 @@ rd_kafka_anyconf_set_prop (int scope, void *conf, _RK_CONF_PROP_SET_REPLACE, errstr, errstr_size); - case _RK_C_BOOL: - if (!value) { - rd_snprintf(errstr, errstr_size, - "Bool configuration property \"%s\" cannot " - "be set to empty value", prop->name); - return RD_KAFKA_CONF_INVALID; - } - - - if (!rd_strcasecmp(value, "true") || - !rd_strcasecmp(value, "t") || - !strcmp(value, "1")) - ival = 1; - else if (!rd_strcasecmp(value, "false") || - !rd_strcasecmp(value, "f") || - !strcmp(value, "0")) - ival = 0; - else { - rd_snprintf(errstr, errstr_size, - "Expected bool value for \"%s\": " - "true or false", prop->name); - return RD_KAFKA_CONF_INVALID; - } - - rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival, - _RK_CONF_PROP_SET_REPLACE, - errstr, errstr_size); - return RD_KAFKA_CONF_OK; - - case _RK_C_INT: - { - const char *end; - - if (!value) { - rd_snprintf(errstr, errstr_size, - "Integer configuration " - "property \"%s\" cannot be set " - "to empty value", prop->name); - return RD_KAFKA_CONF_INVALID; - } - - ival = (int)strtol(value, (char **)&end, 0); - if (end == value) { - /* Non numeric, check s2i for string mapping */ - int match = rd_kafka_conf_s2i_find(prop, value); - - if (match == -1) { - rd_snprintf(errstr, errstr_size, - "Invalid value for " - "configuration property \"%s\"", - prop->name); - return RD_KAFKA_CONF_INVALID; - } + case _RK_C_BOOL: + if (!value) { + rd_snprintf(errstr, errstr_size, + "Bool configuration property \"%s\" cannot " + "be set to empty value", + prop->name); + return RD_KAFKA_CONF_INVALID; + } + + + if (!rd_strcasecmp(value, "true") || + !rd_strcasecmp(value, "t") || !strcmp(value, "1")) + ival = 1; + else if (!rd_strcasecmp(value, "false") || + !rd_strcasecmp(value, "f") || !strcmp(value, "0")) + ival = 0; + else { + rd_snprintf(errstr, errstr_size, + "Expected bool value for \"%s\": " + "true or false", + prop->name); + return RD_KAFKA_CONF_INVALID; + } + + rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival, + _RK_CONF_PROP_SET_REPLACE, errstr, + errstr_size); + return RD_KAFKA_CONF_OK; + + case _RK_C_INT: { + const char *end; + + if (!value) { + rd_snprintf(errstr, errstr_size, + "Integer configuration " + "property \"%s\" cannot be set " + "to empty value", + prop->name); + return RD_KAFKA_CONF_INVALID; + } + + ival = (int)strtol(value, (char **)&end, 0); + if (end == value) { + /* Non numeric, check s2i for string mapping */ + int match = rd_kafka_conf_s2i_find(prop, value); + + if (match == -1) { + rd_snprintf(errstr, errstr_size, + "Invalid value for " + "configuration property \"%s\"", + prop->name); + return RD_KAFKA_CONF_INVALID; + } if (prop->s2i[match].unsupported) { rd_snprintf(errstr, errstr_size, @@ -2093,36 +1934,33 @@ rd_kafka_anyconf_set_prop (int scope, void *conf, return RD_KAFKA_CONF_INVALID; } - ival = prop->s2i[match].val; - } - - if (ival < prop->vmin || - ival > prop->vmax) { - rd_snprintf(errstr, errstr_size, - "Configuration property \"%s\" value " - "%i is outside allowed range %i..%i\n", - prop->name, ival, - prop->vmin, - prop->vmax); - return RD_KAFKA_CONF_INVALID; - } - - rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival, - _RK_CONF_PROP_SET_REPLACE, - errstr, errstr_size); - return RD_KAFKA_CONF_OK; - } + ival = prop->s2i[match].val; + } - case _RK_C_DBL: - { + if (ival < prop->vmin || ival > prop->vmax) { + rd_snprintf(errstr, errstr_size, + "Configuration property \"%s\" value " + "%i is outside allowed range %i..%i\n", + prop->name, ival, prop->vmin, prop->vmax); + return RD_KAFKA_CONF_INVALID; + } + + rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival, + _RK_CONF_PROP_SET_REPLACE, errstr, + errstr_size); + return RD_KAFKA_CONF_OK; + } + + case _RK_C_DBL: { const char *end; double dval; if (!value) { rd_snprintf(errstr, errstr_size, - "Float configuration " - "property \"%s\" cannot be set " - "to empty value", prop->name); + "Float configuration " + "property \"%s\" cannot be set " + "to empty value", + prop->name); return RD_KAFKA_CONF_INVALID; } @@ -2135,131 +1973,127 @@ rd_kafka_anyconf_set_prop (int scope, void *conf, return RD_KAFKA_CONF_INVALID; } - if (dval < prop->dmin || - dval > prop->dmax) { + if (dval < prop->dmin || dval > prop->dmax) { rd_snprintf(errstr, errstr_size, - "Configuration property \"%s\" value " - "%g is outside allowed range %g..%g\n", - prop->name, dval, - prop->dmin, - prop->dmax); + "Configuration property \"%s\" value " + "%g is outside allowed range %g..%g\n", + prop->name, dval, prop->dmin, prop->dmax); return RD_KAFKA_CONF_INVALID; } rd_kafka_anyconf_set_prop0(scope, conf, prop, value, 0, - _RK_CONF_PROP_SET_REPLACE, - errstr, errstr_size); + _RK_CONF_PROP_SET_REPLACE, errstr, + errstr_size); return RD_KAFKA_CONF_OK; } - case _RK_C_S2I: - case _RK_C_S2F: - { - int j; - const char *next; - - if (!value) { - rd_snprintf(errstr, errstr_size, - "Configuration " - "property \"%s\" cannot be set " - "to empty value", prop->name); - return RD_KAFKA_CONF_INVALID; - } - - next = value; - while (next && *next) { - const char *s, *t; - rd_kafka_conf_set_mode_t set_mode = _RK_CONF_PROP_SET_ADD; /* S2F */ - - s = next; - - if (prop->type == _RK_C_S2F && - (t = strchr(s, ','))) { - /* CSV flag field */ - next = t+1; - } else { - /* Single string */ - t = s+strlen(s); - next = NULL; - } - - - /* Left trim */ - while (s < t && isspace((int)*s)) - s++; - - /* Right trim */ - while (t > s && isspace((int)*t)) - t--; - - /* S2F: +/- prefix */ - if (prop->type == _RK_C_S2F) { - if (*s == '+') { - set_mode = _RK_CONF_PROP_SET_ADD; - s++; - } else if (*s == '-') { - set_mode = _RK_CONF_PROP_SET_DEL; - s++; - } - } - - /* Empty string? */ - if (s == t) - continue; - - /* Match string to s2i table entry */ - for (j = 0 ; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { - int new_val; - - if (!prop->s2i[j].str) - continue; - - if (strlen(prop->s2i[j].str) == (size_t)(t-s) && - !rd_strncasecmp(prop->s2i[j].str, s, - (int)(t-s))) - new_val = prop->s2i[j].val; - else - continue; + case _RK_C_S2I: + case _RK_C_S2F: { + int j; + const char *next; + + if (!value) { + rd_snprintf(errstr, errstr_size, + "Configuration " + "property \"%s\" cannot be set " + "to empty value", + prop->name); + return RD_KAFKA_CONF_INVALID; + } + + next = value; + while (next && *next) { + const char *s, *t; + rd_kafka_conf_set_mode_t set_mode = + _RK_CONF_PROP_SET_ADD; /* S2F */ + + s = next; + + if (prop->type == _RK_C_S2F && (t = strchr(s, ','))) { + /* CSV flag field */ + next = t + 1; + } else { + /* Single string */ + t = s + strlen(s); + next = NULL; + } + + + /* Left trim */ + while (s < t && isspace((int)*s)) + s++; + + /* Right trim */ + while (t > s && isspace((int)*t)) + t--; + + /* S2F: +/- prefix */ + if (prop->type == _RK_C_S2F) { + if (*s == '+') { + set_mode = _RK_CONF_PROP_SET_ADD; + s++; + } else if (*s == '-') { + set_mode = _RK_CONF_PROP_SET_DEL; + s++; + } + } + + /* Empty string? */ + if (s == t) + continue; + + /* Match string to s2i table entry */ + for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { + int new_val; + + if (!prop->s2i[j].str) + continue; + + if (strlen(prop->s2i[j].str) == + (size_t)(t - s) && + !rd_strncasecmp(prop->s2i[j].str, s, + (int)(t - s))) + new_val = prop->s2i[j].val; + else + continue; if (prop->s2i[j].unsupported) { rd_snprintf( - errstr, errstr_size, - "Unsupported value \"%.*s\" " - "for configuration property " - "\"%s\": %s", - (int)(t-s), s, prop->name, - prop->s2i[j].unsupported); + errstr, errstr_size, + "Unsupported value \"%.*s\" " + "for configuration property " + "\"%s\": %s", + (int)(t - s), s, prop->name, + prop->s2i[j].unsupported); return RD_KAFKA_CONF_INVALID; } - rd_kafka_anyconf_set_prop0(scope, conf, prop, - value, new_val, - set_mode, - errstr, errstr_size); - - if (prop->type == _RK_C_S2F) { - /* Flags: OR it in: do next */ - break; - } else { - /* Single assignment */ - return RD_KAFKA_CONF_OK; - } - } - - /* S2F: Good match: continue with next */ - if (j < (int)RD_ARRAYSIZE(prop->s2i)) - continue; - - /* No match */ - rd_snprintf(errstr, errstr_size, - "Invalid value \"%.*s\" for " - "configuration property \"%s\"", - (int)(t-s), s, prop->name); - return RD_KAFKA_CONF_INVALID; - - } - return RD_KAFKA_CONF_OK; - } + rd_kafka_anyconf_set_prop0( + scope, conf, prop, value, new_val, set_mode, + errstr, errstr_size); + + if (prop->type == _RK_C_S2F) { + /* Flags: OR it in: do next */ + break; + } else { + /* Single assignment */ + return RD_KAFKA_CONF_OK; + } + } + + /* S2F: Good match: continue with next */ + if (j < (int)RD_ARRAYSIZE(prop->s2i)) + continue; + + /* No match */ + rd_snprintf(errstr, errstr_size, + "Invalid value \"%.*s\" for " + "configuration property \"%s\"", + (int)(t - s), s, prop->name); + return RD_KAFKA_CONF_INVALID; + } + return RD_KAFKA_CONF_OK; + } case _RK_C_INTERNAL: rd_snprintf(errstr, errstr_size, @@ -2271,79 +2105,79 @@ rd_kafka_anyconf_set_prop (int scope, void *conf, rd_snprintf(errstr, errstr_size, "%s", prop->desc); return RD_KAFKA_CONF_INVALID; - default: + default: rd_kafka_assert(NULL, !*"unknown conf type"); - } + } - /* not reachable */ - return RD_KAFKA_CONF_INVALID; + /* not reachable */ + return RD_KAFKA_CONF_INVALID; } -static void rd_kafka_defaultconf_set (int scope, void *conf) { - const struct rd_kafka_property *prop; +static void rd_kafka_defaultconf_set(int scope, void *conf) { + const struct rd_kafka_property *prop; - for (prop = rd_kafka_properties ; prop->name ; prop++) { - if (!(prop->scope & scope)) - continue; + for (prop = rd_kafka_properties; prop->name; prop++) { + if (!(prop->scope & scope)) + continue; - if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID) - continue; + if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID) + continue; if (prop->ctor) prop->ctor(scope, conf); if (prop->sdef || prop->vdef || prop->pdef || !rd_dbl_zero(prop->ddef)) - rd_kafka_anyconf_set_prop0(scope, conf, prop, - prop->sdef ? - prop->sdef : prop->pdef, - prop->vdef, - _RK_CONF_PROP_SET_REPLACE, - NULL, 0); - } + rd_kafka_anyconf_set_prop0( + scope, conf, prop, + prop->sdef ? prop->sdef : prop->pdef, prop->vdef, + _RK_CONF_PROP_SET_REPLACE, NULL, 0); + } } -rd_kafka_conf_t *rd_kafka_conf_new (void) { - rd_kafka_conf_t *conf = rd_calloc(1, sizeof(*conf)); +rd_kafka_conf_t *rd_kafka_conf_new(void) { + rd_kafka_conf_t *conf = rd_calloc(1, sizeof(*conf)); rd_assert(RD_KAFKA_CONF_PROPS_IDX_MAX > sizeof(*conf) && *"Increase RD_KAFKA_CONF_PROPS_IDX_MAX"); - rd_kafka_defaultconf_set(_RK_GLOBAL, conf); + rd_kafka_defaultconf_set(_RK_GLOBAL, conf); rd_kafka_anyconf_clear_all_is_modified(conf); - return conf; + return conf; } -rd_kafka_topic_conf_t *rd_kafka_topic_conf_new (void) { - rd_kafka_topic_conf_t *tconf = rd_calloc(1, sizeof(*tconf)); +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void) { + rd_kafka_topic_conf_t *tconf = rd_calloc(1, sizeof(*tconf)); rd_assert(RD_KAFKA_CONF_PROPS_IDX_MAX > sizeof(*tconf) && *"Increase RD_KAFKA_CONF_PROPS_IDX_MAX"); - rd_kafka_defaultconf_set(_RK_TOPIC, tconf); + rd_kafka_defaultconf_set(_RK_TOPIC, tconf); rd_kafka_anyconf_clear_all_is_modified(tconf); - return tconf; + return tconf; } -static int rd_kafka_anyconf_set (int scope, void *conf, - const char *name, const char *value, - char *errstr, size_t errstr_size) { - char estmp[1]; - const struct rd_kafka_property *prop; +static int rd_kafka_anyconf_set(int scope, + void *conf, + const char *name, + const char *value, + char *errstr, + size_t errstr_size) { + char estmp[1]; + const struct rd_kafka_property *prop; rd_kafka_conf_res_t res; - if (!errstr) { - errstr = estmp; - errstr_size = 0; - } + if (!errstr) { + errstr = estmp; + errstr_size = 0; + } - if (value && !*value) - value = NULL; + if (value && !*value) + value = NULL; /* Try interceptors first (only for GLOBAL config for now) */ if (scope & _RK_GLOBAL) { res = rd_kafka_interceptors_on_conf_set( - (rd_kafka_conf_t *)conf, name, value, - errstr, errstr_size); + (rd_kafka_conf_t *)conf, name, value, errstr, errstr_size); /* Handled (successfully or not) by interceptor. */ if (res != RD_KAFKA_CONF_UNKNOWN) return res; @@ -2352,28 +2186,27 @@ static int rd_kafka_anyconf_set (int scope, void *conf, /* Then global config */ - for (prop = rd_kafka_properties ; prop->name ; prop++) { + for (prop = rd_kafka_properties; prop->name; prop++) { - if (!(prop->scope & scope)) - continue; + if (!(prop->scope & scope)) + continue; - if (strcmp(prop->name, name)) - continue; + if (strcmp(prop->name, name)) + continue; - if (prop->type == _RK_C_ALIAS) - return rd_kafka_anyconf_set(scope, conf, - prop->sdef, value, - errstr, errstr_size); + if (prop->type == _RK_C_ALIAS) + return rd_kafka_anyconf_set(scope, conf, prop->sdef, + value, errstr, errstr_size); return rd_kafka_anyconf_set_prop(scope, conf, prop, value, - 0/*don't allow specifics*/, + 0 /*don't allow specifics*/, errstr, errstr_size); - } + } - rd_snprintf(errstr, errstr_size, - "No such configuration property: \"%s\"", name); + rd_snprintf(errstr, errstr_size, + "No such configuration property: \"%s\"", name); - return RD_KAFKA_CONF_UNKNOWN; + return RD_KAFKA_CONF_UNKNOWN; } @@ -2387,27 +2220,28 @@ static int rd_kafka_anyconf_set (int scope, void *conf, * Implemented as a macro to have rd_assert() print the original function. */ -#define rd_kafka_anyconf_set_internal(SCOPE,CONF,NAME,VALUE) do { \ - const struct rd_kafka_property *_prop; \ - rd_kafka_conf_res_t _res; \ - _prop = rd_kafka_conf_prop_find(SCOPE, NAME); \ - rd_assert(_prop && *"invalid property name"); \ - _res = rd_kafka_anyconf_set_prop(SCOPE, CONF, _prop, \ - (const void *)VALUE, \ - 1/*allow-specifics*/, \ - NULL, 0); \ - rd_assert(_res == RD_KAFKA_CONF_OK); \ +#define rd_kafka_anyconf_set_internal(SCOPE, CONF, NAME, VALUE) \ + do { \ + const struct rd_kafka_property *_prop; \ + rd_kafka_conf_res_t _res; \ + _prop = rd_kafka_conf_prop_find(SCOPE, NAME); \ + rd_assert(_prop && * "invalid property name"); \ + _res = rd_kafka_anyconf_set_prop( \ + SCOPE, CONF, _prop, (const void *)VALUE, \ + 1 /*allow-specifics*/, NULL, 0); \ + rd_assert(_res == RD_KAFKA_CONF_OK); \ } while (0) -rd_kafka_conf_res_t rd_kafka_conf_set (rd_kafka_conf_t *conf, - const char *name, - const char *value, - char *errstr, size_t errstr_size) { +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, + const char *name, + const char *value, + char *errstr, + size_t errstr_size) { rd_kafka_conf_res_t res; - res = rd_kafka_anyconf_set(_RK_GLOBAL, conf, name, value, - errstr, errstr_size); + res = rd_kafka_anyconf_set(_RK_GLOBAL, conf, name, value, errstr, + errstr_size); if (res != RD_KAFKA_CONF_UNKNOWN) return res; @@ -2421,20 +2255,21 @@ rd_kafka_conf_res_t rd_kafka_conf_set (rd_kafka_conf_t *conf, rd_kafka_topic_conf_new()); } - return rd_kafka_topic_conf_set(conf->topic_conf, name, value, - errstr, errstr_size); + return rd_kafka_topic_conf_set(conf->topic_conf, name, value, errstr, + errstr_size); } -rd_kafka_conf_res_t rd_kafka_topic_conf_set (rd_kafka_topic_conf_t *conf, - const char *name, - const char *value, - char *errstr, size_t errstr_size) { - if (!strncmp(name, "topic.", strlen("topic."))) - name += strlen("topic."); +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, + const char *name, + const char *value, + char *errstr, + size_t errstr_size) { + if (!strncmp(name, "topic.", strlen("topic."))) + name += strlen("topic."); - return rd_kafka_anyconf_set(_RK_TOPIC, conf, name, value, - errstr, errstr_size); + return rd_kafka_anyconf_set(_RK_TOPIC, conf, name, value, errstr, + errstr_size); } @@ -2442,7 +2277,7 @@ rd_kafka_conf_res_t rd_kafka_topic_conf_set (rd_kafka_topic_conf_t *conf, * @brief Overwrites the contents of \p str up until but not including * the nul-term. */ -void rd_kafka_desensitize_str (char *str) { +void rd_kafka_desensitize_str(char *str) { size_t len; static const char redacted[] = "(REDACTED)"; @@ -2452,7 +2287,7 @@ void rd_kafka_desensitize_str (char *str) { #else volatile char *volatile s; - for (s = str ; *s ; s++) + for (s = str; *s; s++) *s = '\0'; len = (size_t)(s - str); @@ -2464,20 +2299,18 @@ void rd_kafka_desensitize_str (char *str) { - /** * @brief Overwrite the value of \p prop, if sensitive. */ static RD_INLINE void -rd_kafka_anyconf_prop_desensitize (int scope, void *conf, - const struct rd_kafka_property *prop) { +rd_kafka_anyconf_prop_desensitize(int scope, + void *conf, + const struct rd_kafka_property *prop) { if (likely(!(prop->scope & _RK_SENSITIVE))) return; - switch (prop->type) - { - case _RK_C_STR: - { + switch (prop->type) { + case _RK_C_STR: { char **str = _RK_PTR(char **, conf, prop->offset); if (*str) rd_kafka_desensitize_str(*str); @@ -2501,10 +2334,10 @@ rd_kafka_anyconf_prop_desensitize (int scope, void *conf, /** * @brief Desensitize all sensitive properties in \p conf */ -static void rd_kafka_anyconf_desensitize (int scope, void *conf) { +static void rd_kafka_anyconf_desensitize(int scope, void *conf) { const struct rd_kafka_property *prop; - for (prop = rd_kafka_properties; prop->name ; prop++) { + for (prop = rd_kafka_properties; prop->name; prop++) { if (!(prop->scope & scope)) continue; @@ -2515,73 +2348,66 @@ static void rd_kafka_anyconf_desensitize (int scope, void *conf) { /** * @brief Overwrite the values of sensitive properties */ -void rd_kafka_conf_desensitize (rd_kafka_conf_t *conf) { +void rd_kafka_conf_desensitize(rd_kafka_conf_t *conf) { if (conf->topic_conf) - rd_kafka_anyconf_desensitize(_RK_TOPIC, - conf->topic_conf); + rd_kafka_anyconf_desensitize(_RK_TOPIC, conf->topic_conf); rd_kafka_anyconf_desensitize(_RK_GLOBAL, conf); } /** * @brief Overwrite the values of sensitive properties */ -void rd_kafka_topic_conf_desensitize (rd_kafka_topic_conf_t *tconf) { +void rd_kafka_topic_conf_desensitize(rd_kafka_topic_conf_t *tconf) { rd_kafka_anyconf_desensitize(_RK_TOPIC, tconf); } -static void rd_kafka_anyconf_clear (int scope, void *conf, - const struct rd_kafka_property *prop) { +static void rd_kafka_anyconf_clear(int scope, + void *conf, + const struct rd_kafka_property *prop) { rd_kafka_anyconf_prop_desensitize(scope, conf, prop); - switch (prop->type) - { - case _RK_C_STR: - { - char **str = _RK_PTR(char **, conf, prop->offset); + switch (prop->type) { + case _RK_C_STR: { + char **str = _RK_PTR(char **, conf, prop->offset); - if (*str) { + if (*str) { if (prop->set) { prop->set(scope, conf, prop->name, NULL, *str, _RK_CONF_PROP_SET_DEL, NULL, 0); /* FALLTHRU */ } rd_free(*str); - *str = NULL; - } - } - break; + *str = NULL; + } + } break; - case _RK_C_KSTR: - { - rd_kafkap_str_t **kstr = _RK_PTR(rd_kafkap_str_t **, conf, - prop->offset); + case _RK_C_KSTR: { + rd_kafkap_str_t **kstr = + _RK_PTR(rd_kafkap_str_t **, conf, prop->offset); if (*kstr) { rd_kafkap_str_destroy(*kstr); *kstr = NULL; } - } - break; + } break; - case _RK_C_PATLIST: - { + case _RK_C_PATLIST: { rd_kafka_pattern_list_t **plist; plist = _RK_PTR(rd_kafka_pattern_list_t **, conf, prop->offset); - if (*plist) { - rd_kafka_pattern_list_destroy(*plist); - *plist = NULL; - } - } - break; + if (*plist) { + rd_kafka_pattern_list_destroy(*plist); + *plist = NULL; + } + } break; case _RK_C_PTR: if (_RK_PTR(void *, conf, prop->offset) != NULL) { if (!strcmp(prop->name, "default_topic_conf")) { rd_kafka_topic_conf_t **tconf; - tconf = _RK_PTR(rd_kafka_topic_conf_t **, - conf, prop->offset); + tconf = _RK_PTR(rd_kafka_topic_conf_t **, conf, + prop->offset); if (*tconf) { rd_kafka_topic_conf_destroy(*tconf); *tconf = NULL; @@ -2590,61 +2416,63 @@ static void rd_kafka_anyconf_clear (int scope, void *conf, } break; - default: - break; - } + default: + break; + } if (prop->dtor) prop->dtor(scope, conf); - } -void rd_kafka_anyconf_destroy (int scope, void *conf) { - const struct rd_kafka_property *prop; +void rd_kafka_anyconf_destroy(int scope, void *conf) { + const struct rd_kafka_property *prop; /* Call on_conf_destroy() interceptors */ if (scope == _RK_GLOBAL) rd_kafka_interceptors_on_conf_destroy(conf); - for (prop = rd_kafka_properties; prop->name ; prop++) { - if (!(prop->scope & scope)) - continue; + for (prop = rd_kafka_properties; prop->name; prop++) { + if (!(prop->scope & scope)) + continue; - rd_kafka_anyconf_clear(scope, conf, prop); - } + rd_kafka_anyconf_clear(scope, conf, prop); + } } -void rd_kafka_conf_destroy (rd_kafka_conf_t *conf) { - rd_kafka_anyconf_destroy(_RK_GLOBAL, conf); - //FIXME: partition_assignors - rd_free(conf); +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf) { + rd_kafka_anyconf_destroy(_RK_GLOBAL, conf); + // FIXME: partition_assignors + rd_free(conf); } -void rd_kafka_topic_conf_destroy (rd_kafka_topic_conf_t *topic_conf) { - rd_kafka_anyconf_destroy(_RK_TOPIC, topic_conf); - rd_free(topic_conf); +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf) { + rd_kafka_anyconf_destroy(_RK_TOPIC, topic_conf); + rd_free(topic_conf); } -static void rd_kafka_anyconf_copy (int scope, void *dst, const void *src, - size_t filter_cnt, const char **filter) { - const struct rd_kafka_property *prop; +static void rd_kafka_anyconf_copy(int scope, + void *dst, + const void *src, + size_t filter_cnt, + const char **filter) { + const struct rd_kafka_property *prop; - for (prop = rd_kafka_properties ; prop->name ; prop++) { - const char *val = NULL; - int ival = 0; + for (prop = rd_kafka_properties; prop->name; prop++) { + const char *val = NULL; + int ival = 0; char *valstr; size_t valsz; size_t fi; size_t nlen; - if (!(prop->scope & scope)) - continue; + if (!(prop->scope & scope)) + continue; - if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID) - continue; + if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID) + continue; /* Skip properties that have not been set, * unless it is an internal one which requires @@ -2655,7 +2483,7 @@ static void rd_kafka_anyconf_copy (int scope, void *dst, const void *src, /* Apply filter, if any. */ nlen = strlen(prop->name); - for (fi = 0 ; fi < filter_cnt ; fi++) { + for (fi = 0; fi < filter_cnt; fi++) { size_t flen = strlen(filter[fi]); if (nlen >= flen && !strncmp(filter[fi], prop->name, flen)) @@ -2664,31 +2492,28 @@ static void rd_kafka_anyconf_copy (int scope, void *dst, const void *src, if (fi < filter_cnt) continue; /* Filter matched */ - switch (prop->type) - { - case _RK_C_STR: - case _RK_C_PTR: - val = *_RK_PTR(const char **, src, prop->offset); + switch (prop->type) { + case _RK_C_STR: + case _RK_C_PTR: + val = *_RK_PTR(const char **, src, prop->offset); if (!strcmp(prop->name, "default_topic_conf") && val) val = (void *)rd_kafka_topic_conf_dup( - (const rd_kafka_topic_conf_t *) - (void *)val); - break; - case _RK_C_KSTR: - { - rd_kafkap_str_t **kstr = _RK_PTR(rd_kafkap_str_t **, - src, prop->offset); + (const rd_kafka_topic_conf_t *)(void *)val); + break; + case _RK_C_KSTR: { + rd_kafkap_str_t **kstr = + _RK_PTR(rd_kafkap_str_t **, src, prop->offset); if (*kstr) val = (*kstr)->str; break; } - case _RK_C_BOOL: - case _RK_C_INT: - case _RK_C_S2I: - case _RK_C_S2F: - ival = *_RK_PTR(const int *, src, prop->offset); + case _RK_C_BOOL: + case _RK_C_INT: + case _RK_C_S2I: + case _RK_C_S2F: + ival = *_RK_PTR(const int *, src, prop->offset); /* Get string representation of configuration value. */ valsz = 0; @@ -2696,7 +2521,7 @@ static void rd_kafka_anyconf_copy (int scope, void *dst, const void *src, valstr = rd_alloca(valsz); rd_kafka_anyconf_get0(src, prop, valstr, &valsz); val = valstr; - break; + break; case _RK_C_DBL: /* Get string representation of configuration value. */ valsz = 0; @@ -2705,21 +2530,20 @@ static void rd_kafka_anyconf_copy (int scope, void *dst, const void *src, rd_kafka_anyconf_get0(src, prop, valstr, &valsz); val = valstr; break; - case _RK_C_PATLIST: - { + case _RK_C_PATLIST: { const rd_kafka_pattern_list_t **plist; - plist = _RK_PTR(const rd_kafka_pattern_list_t **, - src, prop->offset); - if (*plist) - val = (*plist)->rkpl_orig; + plist = _RK_PTR(const rd_kafka_pattern_list_t **, src, + prop->offset); + if (*plist) + val = (*plist)->rkpl_orig; break; } case _RK_C_INTERNAL: /* Handled by ->copy() below. */ break; - default: - continue; - } + default: + continue; + } if (prop->copy) prop->copy(scope, dst, src, @@ -2729,136 +2553,137 @@ static void rd_kafka_anyconf_copy (int scope, void *dst, const void *src, rd_kafka_anyconf_set_prop0(scope, dst, prop, val, ival, _RK_CONF_PROP_SET_REPLACE, NULL, 0); - } + } } -rd_kafka_conf_t *rd_kafka_conf_dup (const rd_kafka_conf_t *conf) { - rd_kafka_conf_t *new = rd_kafka_conf_new(); +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf) { + rd_kafka_conf_t *new = rd_kafka_conf_new(); rd_kafka_interceptors_on_conf_dup(new, conf, 0, NULL); rd_kafka_anyconf_copy(_RK_GLOBAL, new, conf, 0, NULL); - return new; + return new; } -rd_kafka_conf_t *rd_kafka_conf_dup_filter (const rd_kafka_conf_t *conf, - size_t filter_cnt, - const char **filter) { - rd_kafka_conf_t *new = rd_kafka_conf_new(); +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, + size_t filter_cnt, + const char **filter) { + rd_kafka_conf_t *new = rd_kafka_conf_new(); rd_kafka_interceptors_on_conf_dup(new, conf, filter_cnt, filter); rd_kafka_anyconf_copy(_RK_GLOBAL, new, conf, filter_cnt, filter); - return new; + return new; } -rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup (const rd_kafka_topic_conf_t - *conf) { - rd_kafka_topic_conf_t *new = rd_kafka_topic_conf_new(); +rd_kafka_topic_conf_t * +rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf) { + rd_kafka_topic_conf_t *new = rd_kafka_topic_conf_new(); - rd_kafka_anyconf_copy(_RK_TOPIC, new, conf, 0, NULL); + rd_kafka_anyconf_copy(_RK_TOPIC, new, conf, 0, NULL); - return new; + return new; } -rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup (rd_kafka_t *rk) { +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk) { if (rk->rk_conf.topic_conf) return rd_kafka_topic_conf_dup(rk->rk_conf.topic_conf); else return rd_kafka_topic_conf_new(); } -void rd_kafka_conf_set_events (rd_kafka_conf_t *conf, int events) { +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events) { char tmp[32]; rd_snprintf(tmp, sizeof(tmp), "%d", events); rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "enabled_events", tmp); } -void -rd_kafka_conf_set_background_event_cb (rd_kafka_conf_t *conf, - void (*event_cb) (rd_kafka_t *rk, - rd_kafka_event_t *rkev, - void *opaque)) { +void rd_kafka_conf_set_background_event_cb( + rd_kafka_conf_t *conf, + void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "background_event_cb", event_cb); } -void rd_kafka_conf_set_dr_cb (rd_kafka_conf_t *conf, - void (*dr_cb) (rd_kafka_t *rk, - void *payload, size_t len, - rd_kafka_resp_err_t err, - void *opaque, void *msg_opaque)) { +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, + void (*dr_cb)(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "dr_cb", dr_cb); } -void rd_kafka_conf_set_dr_msg_cb (rd_kafka_conf_t *conf, - void (*dr_msg_cb) (rd_kafka_t *rk, - const rd_kafka_message_t * - rkmessage, - void *opaque)) { +void rd_kafka_conf_set_dr_msg_cb( + rd_kafka_conf_t *conf, + void (*dr_msg_cb)(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "dr_msg_cb", dr_msg_cb); } -void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, - void (*consume_cb) (rd_kafka_message_t * - rkmessage, - void *opaque)) { +void rd_kafka_conf_set_consume_cb( + rd_kafka_conf_t *conf, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "consume_cb", consume_cb); } -void rd_kafka_conf_set_rebalance_cb ( - rd_kafka_conf_t *conf, - void (*rebalance_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque)) { +void rd_kafka_conf_set_rebalance_cb( + rd_kafka_conf_t *conf, + void (*rebalance_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "rebalance_cb", rebalance_cb); } -void rd_kafka_conf_set_offset_commit_cb ( - rd_kafka_conf_t *conf, - void (*offset_commit_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque)) { +void rd_kafka_conf_set_offset_commit_cb( + rd_kafka_conf_t *conf, + void (*offset_commit_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "offset_commit_cb", offset_commit_cb); } -void rd_kafka_conf_set_error_cb (rd_kafka_conf_t *conf, - void (*error_cb) (rd_kafka_t *rk, int err, - const char *reason, - void *opaque)) { +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, + void (*error_cb)(rd_kafka_t *rk, + int err, + const char *reason, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "error_cb", error_cb); } -void rd_kafka_conf_set_throttle_cb (rd_kafka_conf_t *conf, - void (*throttle_cb) ( - rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int throttle_time_ms, - void *opaque)) { +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, + void (*throttle_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "throttle_cb", throttle_cb); } -void rd_kafka_conf_set_log_cb (rd_kafka_conf_t *conf, - void (*log_cb) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf)) { +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, + void (*log_cb)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf)) { #if !WITH_SYSLOG if (log_cb == rd_kafka_log_syslog) rd_assert(!*"syslog support not enabled in this build"); @@ -2867,55 +2692,50 @@ void rd_kafka_conf_set_log_cb (rd_kafka_conf_t *conf, } -void rd_kafka_conf_set_stats_cb (rd_kafka_conf_t *conf, - int (*stats_cb) (rd_kafka_t *rk, - char *json, - size_t json_len, - void *opaque)) { +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, + int (*stats_cb)(rd_kafka_t *rk, + char *json, + size_t json_len, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "stats_cb", stats_cb); } -void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, - void (*oauthbearer_token_refresh_cb) ( - rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque)) { +void rd_kafka_conf_set_oauthbearer_token_refresh_cb( + rd_kafka_conf_t *conf, + void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque)) { #if WITH_SASL_OAUTHBEARER rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, - "oauthbearer_token_refresh_cb", oauthbearer_token_refresh_cb); + "oauthbearer_token_refresh_cb", + oauthbearer_token_refresh_cb); #endif } -void rd_kafka_conf_enable_sasl_queue (rd_kafka_conf_t *conf, int enable) { - rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, - "enable_sasl_queue", +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable) { + rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "enable_sasl_queue", (enable ? "true" : "false")); - } -void rd_kafka_conf_set_socket_cb (rd_kafka_conf_t *conf, - int (*socket_cb) (int domain, int type, - int protocol, - void *opaque)) { - rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "socket_cb", - socket_cb); +void rd_kafka_conf_set_socket_cb( + rd_kafka_conf_t *conf, + int (*socket_cb)(int domain, int type, int protocol, void *opaque)) { + rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "socket_cb", socket_cb); } -void -rd_kafka_conf_set_connect_cb (rd_kafka_conf_t *conf, - int (*connect_cb) (int sockfd, - const struct sockaddr *addr, - int addrlen, - const char *id, - void *opaque)) { +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, + int (*connect_cb)(int sockfd, + const struct sockaddr *addr, + int addrlen, + const char *id, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "connect_cb", connect_cb); } -void -rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t *conf, - int (*closesocket_cb) (int sockfd, - void *opaque)) { +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, + int (*closesocket_cb)(int sockfd, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "closesocket_cb", closesocket_cb); } @@ -2923,52 +2743,52 @@ rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t *conf, #ifndef _WIN32 -void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf, - int (*open_cb) (const char *pathname, - int flags, mode_t mode, - void *opaque)) { +void rd_kafka_conf_set_open_cb(rd_kafka_conf_t *conf, + int (*open_cb)(const char *pathname, + int flags, + mode_t mode, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "open_cb", open_cb); } #endif -rd_kafka_conf_res_t -rd_kafka_conf_set_ssl_cert_verify_cb ( - rd_kafka_conf_t *conf, - int (*ssl_cert_verify_cb) (rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int *x509_set_error, - int depth, - const char *buf, size_t size, - char *errstr, size_t errstr_size, - void *opaque)) { +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb( + rd_kafka_conf_t *conf, + int (*ssl_cert_verify_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int *x509_set_error, + int depth, + const char *buf, + size_t size, + char *errstr, + size_t errstr_size, + void *opaque)) { #if !WITH_SSL return RD_KAFKA_CONF_INVALID; #else - rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, - "ssl.certificate.verify_cb", - ssl_cert_verify_cb); + rd_kafka_anyconf_set_internal( + _RK_GLOBAL, conf, "ssl.certificate.verify_cb", ssl_cert_verify_cb); return RD_KAFKA_CONF_OK; #endif } -void rd_kafka_conf_set_opaque (rd_kafka_conf_t *conf, void *opaque) { +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "opaque", opaque); } -void rd_kafka_conf_set_engine_callback_data (rd_kafka_conf_t *conf, - void *callback_data) { - rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, - "ssl_engine_callback_data", - callback_data); +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, + void *callback_data) { + rd_kafka_anyconf_set_internal( + _RK_GLOBAL, conf, "ssl_engine_callback_data", callback_data); } -void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf) { +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf) { if (conf->topic_conf) { if (rd_kafka_anyconf_is_any_modified(conf->topic_conf)) conf->warn.default_topic_conf_overwritten = rd_true; @@ -2980,41 +2800,38 @@ void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf, } rd_kafka_topic_conf_t * -rd_kafka_conf_get_default_topic_conf (rd_kafka_conf_t *conf) { +rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf) { return conf->topic_conf; } -void -rd_kafka_topic_conf_set_partitioner_cb (rd_kafka_topic_conf_t *topic_conf, - int32_t (*partitioner) ( - const rd_kafka_topic_t *rkt, - const void *keydata, - size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque)) { +void rd_kafka_topic_conf_set_partitioner_cb( + rd_kafka_topic_conf_t *topic_conf, + int32_t (*partitioner)(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque)) { rd_kafka_anyconf_set_internal(_RK_TOPIC, topic_conf, "partitioner_cb", partitioner); } -void -rd_kafka_topic_conf_set_msg_order_cmp (rd_kafka_topic_conf_t *topic_conf, - int (*msg_order_cmp) ( - const rd_kafka_message_t *a, - const rd_kafka_message_t *b)) { +void rd_kafka_topic_conf_set_msg_order_cmp( + rd_kafka_topic_conf_t *topic_conf, + int (*msg_order_cmp)(const rd_kafka_message_t *a, + const rd_kafka_message_t *b)) { rd_kafka_anyconf_set_internal(_RK_TOPIC, topic_conf, "msg_order_cmp", msg_order_cmp); } -void rd_kafka_topic_conf_set_opaque (rd_kafka_topic_conf_t *topic_conf, - void *opaque) { +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *topic_conf, + void *opaque) { rd_kafka_anyconf_set_internal(_RK_TOPIC, topic_conf, "opaque", opaque); } - /** * @brief Convert flags \p ival to csv-string using S2F property \p prop. * @@ -3031,46 +2848,45 @@ void rd_kafka_topic_conf_set_opaque (rd_kafka_topic_conf_t *topic_conf, * total number of bytes needed. * */ -static -size_t rd_kafka_conf_flags2str (char *dest, size_t dest_size, const char *delim, - const struct rd_kafka_property *prop, - int ival, - rd_bool_t include_unsupported) { - size_t of = 0; - int j; - - if (dest && dest_size > 0) - *dest = '\0'; - - /* Phase 1: scan for set flags, accumulate needed size. - * Phase 2: write to dest */ - for (j = 0 ; prop->s2i[j].str ; j++) { - if (prop->type == _RK_C_S2F && ival != -1 && - (ival & prop->s2i[j].val) != prop->s2i[j].val) - continue; - else if (prop->type == _RK_C_S2I && - ival != -1 && prop->s2i[j].val != ival) - continue; +static size_t rd_kafka_conf_flags2str(char *dest, + size_t dest_size, + const char *delim, + const struct rd_kafka_property *prop, + int ival, + rd_bool_t include_unsupported) { + size_t of = 0; + int j; + + if (dest && dest_size > 0) + *dest = '\0'; + + /* Phase 1: scan for set flags, accumulate needed size. + * Phase 2: write to dest */ + for (j = 0; prop->s2i[j].str; j++) { + if (prop->type == _RK_C_S2F && ival != -1 && + (ival & prop->s2i[j].val) != prop->s2i[j].val) + continue; + else if (prop->type == _RK_C_S2I && ival != -1 && + prop->s2i[j].val != ival) + continue; else if (prop->s2i[j].unsupported && !include_unsupported) continue; - if (!dest) - of += strlen(prop->s2i[j].str) + (of > 0 ? 1 : 0); - else { - size_t r; - r = rd_snprintf(dest+of, dest_size-of, - "%s%s", - of > 0 ? delim:"", - prop->s2i[j].str); - if (r > dest_size-of) { - r = dest_size-of; - break; - } - of += r; - } - } - - return of+1/*nul*/; + if (!dest) + of += strlen(prop->s2i[j].str) + (of > 0 ? 1 : 0); + else { + size_t r; + r = rd_snprintf(dest + of, dest_size - of, "%s%s", + of > 0 ? delim : "", prop->s2i[j].str); + if (r > dest_size - of) { + r = dest_size - of; + break; + } + of += r; + } + } + + return of + 1 /*nul*/; } @@ -3078,23 +2894,23 @@ size_t rd_kafka_conf_flags2str (char *dest, size_t dest_size, const char *delim, * Return "original"(re-created) configuration value string */ static rd_kafka_conf_res_t -rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, - char *dest, size_t *dest_size) { +rd_kafka_anyconf_get0(const void *conf, + const struct rd_kafka_property *prop, + char *dest, + size_t *dest_size) { char tmp[22]; const char *val = NULL; - size_t val_len = 0; + size_t val_len = 0; int j; - switch (prop->type) - { + switch (prop->type) { case _RK_C_STR: val = *_RK_PTR(const char **, conf, prop->offset); break; - case _RK_C_KSTR: - { - const rd_kafkap_str_t **kstr = _RK_PTR(const rd_kafkap_str_t **, - conf, prop->offset); + case _RK_C_KSTR: { + const rd_kafkap_str_t **kstr = + _RK_PTR(const rd_kafkap_str_t **, conf, prop->offset); if (*kstr) val = (*kstr)->str; break; @@ -3125,7 +2941,7 @@ rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, break; case _RK_C_S2I: - for (j = 0 ; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { + for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { if (prop->s2i[j].val == *_RK_PTR(int *, conf, prop->offset)) { val = prop->s2i[j].str; @@ -3134,29 +2950,26 @@ rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, } break; - case _RK_C_S2F: - { + case _RK_C_S2F: { const int ival = *_RK_PTR(const int *, conf, prop->offset); - val_len = rd_kafka_conf_flags2str(dest, - dest ? *dest_size : 0, ",", - prop, ival, - rd_false/*only supported*/); - if (dest) { - val_len = 0; - val = dest; - dest = NULL; - } - break; - } + val_len = rd_kafka_conf_flags2str(dest, dest ? *dest_size : 0, + ",", prop, ival, + rd_false /*only supported*/); + if (dest) { + val_len = 0; + val = dest; + dest = NULL; + } + break; + } - case _RK_C_PATLIST: - { + case _RK_C_PATLIST: { const rd_kafka_pattern_list_t **plist; - plist = _RK_PTR(const rd_kafka_pattern_list_t **, - conf, prop->offset); - if (*plist) - val = (*plist)->rkpl_orig; + plist = _RK_PTR(const rd_kafka_pattern_list_t **, conf, + prop->offset); + if (*plist) + val = (*plist)->rkpl_orig; break; } @@ -3165,7 +2978,7 @@ rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, } if (val_len) { - *dest_size = val_len+1; + *dest_size = val_len + 1; return RD_KAFKA_CONF_OK; } @@ -3175,32 +2988,33 @@ rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, val_len = strlen(val); if (dest) { - size_t use_len = RD_MIN(val_len, (*dest_size)-1); + size_t use_len = RD_MIN(val_len, (*dest_size) - 1); memcpy(dest, val, use_len); dest[use_len] = '\0'; } /* Return needed size */ - *dest_size = val_len+1; + *dest_size = val_len + 1; return RD_KAFKA_CONF_OK; } -static rd_kafka_conf_res_t rd_kafka_anyconf_get (int scope, const void *conf, - const char *name, - char *dest, size_t *dest_size){ - const struct rd_kafka_property *prop; +static rd_kafka_conf_res_t rd_kafka_anyconf_get(int scope, + const void *conf, + const char *name, + char *dest, + size_t *dest_size) { + const struct rd_kafka_property *prop; - for (prop = rd_kafka_properties; prop->name ; prop++) { + for (prop = rd_kafka_properties; prop->name; prop++) { - if (!(prop->scope & scope) || strcmp(prop->name, name)) - continue; + if (!(prop->scope & scope) || strcmp(prop->name, name)) + continue; - if (prop->type == _RK_C_ALIAS) - return rd_kafka_anyconf_get(scope, conf, - prop->sdef, - dest, dest_size); + if (prop->type == _RK_C_ALIAS) + return rd_kafka_anyconf_get(scope, conf, prop->sdef, + dest, dest_size); if (rd_kafka_anyconf_get0(conf, prop, dest, dest_size) == RD_KAFKA_CONF_OK) @@ -3210,15 +3024,17 @@ static rd_kafka_conf_res_t rd_kafka_anyconf_get (int scope, const void *conf, return RD_KAFKA_CONF_UNKNOWN; } -rd_kafka_conf_res_t rd_kafka_topic_conf_get (const rd_kafka_topic_conf_t *conf, - const char *name, - char *dest, size_t *dest_size) { +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, + const char *name, + char *dest, + size_t *dest_size) { return rd_kafka_anyconf_get(_RK_TOPIC, conf, name, dest, dest_size); } -rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf, - const char *name, - char *dest, size_t *dest_size) { +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, + const char *name, + char *dest, + size_t *dest_size) { rd_kafka_conf_res_t res; res = rd_kafka_anyconf_get(_RK_GLOBAL, conf, name, dest, dest_size); if (res != RD_KAFKA_CONF_UNKNOWN || !conf->topic_conf) @@ -3231,30 +3047,31 @@ rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf, } -static const char **rd_kafka_anyconf_dump (int scope, const void *conf, - size_t *cntp, - rd_bool_t only_modified, - rd_bool_t redact_sensitive) { - const struct rd_kafka_property *prop; - char **arr; - int cnt = 0; +static const char **rd_kafka_anyconf_dump(int scope, + const void *conf, + size_t *cntp, + rd_bool_t only_modified, + rd_bool_t redact_sensitive) { + const struct rd_kafka_property *prop; + char **arr; + int cnt = 0; - arr = rd_calloc(sizeof(char *), RD_ARRAYSIZE(rd_kafka_properties)*2); + arr = rd_calloc(sizeof(char *), RD_ARRAYSIZE(rd_kafka_properties) * 2); - for (prop = rd_kafka_properties; prop->name ; prop++) { + for (prop = rd_kafka_properties; prop->name; prop++) { char *val = NULL; size_t val_size; - if (!(prop->scope & scope)) - continue; + if (!(prop->scope & scope)) + continue; if (only_modified && !rd_kafka_anyconf_is_modified(conf, prop)) continue; - /* Skip aliases, show original property instead. + /* Skip aliases, show original property instead. * Skip invalids. */ - if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID) - continue; + if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID) + continue; if (redact_sensitive && (prop->scope & _RK_SENSITIVE)) { val = rd_strdup("[redacted]"); @@ -3272,36 +3089,34 @@ static const char **rd_kafka_anyconf_dump (int scope, const void *conf, arr[cnt++] = rd_strdup(prop->name); arr[cnt++] = val; - } + } - *cntp = cnt; + *cntp = cnt; - return (const char **)arr; + return (const char **)arr; } -const char **rd_kafka_conf_dump (rd_kafka_conf_t *conf, size_t *cntp) { - return rd_kafka_anyconf_dump(_RK_GLOBAL, conf, cntp, - rd_false/*all*/, - rd_false/*don't redact*/); +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp) { + return rd_kafka_anyconf_dump(_RK_GLOBAL, conf, cntp, rd_false /*all*/, + rd_false /*don't redact*/); } -const char **rd_kafka_topic_conf_dump (rd_kafka_topic_conf_t *conf, - size_t *cntp) { - return rd_kafka_anyconf_dump(_RK_TOPIC, conf, cntp, - rd_false/*all*/, - rd_false/*don't redact*/); +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, + size_t *cntp) { + return rd_kafka_anyconf_dump(_RK_TOPIC, conf, cntp, rd_false /*all*/, + rd_false /*don't redact*/); } -void rd_kafka_conf_dump_free (const char **arr, size_t cnt) { - char **_arr = (char **)arr; - unsigned int i; +void rd_kafka_conf_dump_free(const char **arr, size_t cnt) { + char **_arr = (char **)arr; + unsigned int i; - for (i = 0 ; i < cnt ; i++) - if (_arr[i]) - rd_free(_arr[i]); + for (i = 0; i < cnt; i++) + if (_arr[i]) + rd_free(_arr[i]); - rd_free(_arr); + rd_free(_arr); } @@ -3309,33 +3124,36 @@ void rd_kafka_conf_dump_free (const char **arr, size_t cnt) { /** * @brief Dump configured properties to debug log. */ -void rd_kafka_anyconf_dump_dbg (rd_kafka_t *rk, int scope, const void *conf, - const char *description) { +void rd_kafka_anyconf_dump_dbg(rd_kafka_t *rk, + int scope, + const void *conf, + const char *description) { const char **arr; size_t cnt; size_t i; - arr = rd_kafka_anyconf_dump(scope, conf, &cnt, - rd_true/*modified only*/, - rd_true/*redact sensitive*/); + arr = + rd_kafka_anyconf_dump(scope, conf, &cnt, rd_true /*modified only*/, + rd_true /*redact sensitive*/); if (cnt > 0) rd_kafka_dbg(rk, CONF, "CONF", "%s:", description); - for (i = 0 ; i < cnt ; i += 2) - rd_kafka_dbg(rk, CONF, "CONF", " %s = %s", arr[i], arr[i+1]); + for (i = 0; i < cnt; i += 2) + rd_kafka_dbg(rk, CONF, "CONF", " %s = %s", arr[i], arr[i + 1]); rd_kafka_conf_dump_free(arr, cnt); } -void rd_kafka_conf_properties_show (FILE *fp) { - const struct rd_kafka_property *prop0; - int last = 0; - int j; - char tmp[512]; - const char *dash80 = "----------------------------------------" - "----------------------------------------"; +void rd_kafka_conf_properties_show(FILE *fp) { + const struct rd_kafka_property *prop0; + int last = 0; + int j; + char tmp[512]; + const char *dash80 = + "----------------------------------------" + "----------------------------------------"; - for (prop0 = rd_kafka_properties; prop0->name ; prop0++) { - const char *typeinfo = ""; + for (prop0 = rd_kafka_properties; prop0->name; prop0++) { + const char *typeinfo = ""; const char *importance; const struct rd_kafka_property *prop = prop0; @@ -3347,121 +3165,116 @@ void rd_kafka_conf_properties_show (FILE *fp) { if (prop->type == _RK_C_INVALID) continue; - if (!(prop->scope & last)) { - fprintf(fp, - "%s## %s configuration properties\n\n", - last ? "\n\n":"", - prop->scope == _RK_GLOBAL ? "Global": "Topic"); + if (!(prop->scope & last)) { + fprintf(fp, "%s## %s configuration properties\n\n", + last ? "\n\n" : "", + prop->scope == _RK_GLOBAL ? "Global" : "Topic"); - fprintf(fp, - "%-40s | %3s | %-15s | %13s | %-10s | %-25s\n" - "%.*s-|-%.*s-|-%.*s-|-%.*s:|-%.*s-| -%.*s\n", - "Property", "C/P", "Range", - "Default", "Importance", "Description", - 40, dash80, 3, dash80, 15, dash80, - 13, dash80, 10, dash80, 25, dash80); + fprintf(fp, + "%-40s | %3s | %-15s | %13s | %-10s | %-25s\n" + "%.*s-|-%.*s-|-%.*s-|-%.*s:|-%.*s-| -%.*s\n", + "Property", "C/P", "Range", "Default", + "Importance", "Description", 40, dash80, 3, + dash80, 15, dash80, 13, dash80, 10, dash80, 25, + dash80); - last = prop->scope & (_RK_GLOBAL|_RK_TOPIC); - - } + last = prop->scope & (_RK_GLOBAL | _RK_TOPIC); + } - fprintf(fp, "%-40s | ", prop->name); + fprintf(fp, "%-40s | ", prop->name); /* For aliases, use the aliased property from here on * so that the alias property shows up with proper * ranges, defaults, etc. */ if (prop->type == _RK_C_ALIAS) { - prop = rd_kafka_conf_prop_find(prop->scope, - prop->sdef); + prop = rd_kafka_conf_prop_find(prop->scope, prop->sdef); rd_assert(prop && *"BUG: " "alias points to unknown config property"); } fprintf(fp, "%3s | ", (!(prop->scope & _RK_PRODUCER) == - !(prop->scope & _RK_CONSUMER) ? " * " : - ((prop->scope & _RK_PRODUCER) ? " P " : " C "))); + !(prop->scope & _RK_CONSUMER) + ? " * " + : ((prop->scope & _RK_PRODUCER) ? " P " : " C "))); - switch (prop->type) - { - case _RK_C_STR: + switch (prop->type) { + case _RK_C_STR: case _RK_C_KSTR: - typeinfo = "string"; + typeinfo = "string"; case _RK_C_PATLIST: - if (prop->type == _RK_C_PATLIST) - typeinfo = "pattern list"; - if (prop->s2i[0].str) { - rd_kafka_conf_flags2str( - tmp, sizeof(tmp), ", ", - prop, -1, - rd_true/*include unsupported*/); - fprintf(fp, "%-15s | %13s", - tmp, prop->sdef ? prop->sdef : ""); - } else { - fprintf(fp, "%-15s | %13s", - "", prop->sdef ? prop->sdef : ""); - } - break; - case _RK_C_BOOL: - typeinfo = "boolean"; - fprintf(fp, "%-15s | %13s", "true, false", - prop->vdef ? "true" : "false"); - break; - case _RK_C_INT: - typeinfo = "integer"; - rd_snprintf(tmp, sizeof(tmp), - "%d .. %d", prop->vmin, prop->vmax); - fprintf(fp, "%-15s | %13i", tmp, prop->vdef); - break; + if (prop->type == _RK_C_PATLIST) + typeinfo = "pattern list"; + if (prop->s2i[0].str) { + rd_kafka_conf_flags2str( + tmp, sizeof(tmp), ", ", prop, -1, + rd_true /*include unsupported*/); + fprintf(fp, "%-15s | %13s", tmp, + prop->sdef ? prop->sdef : ""); + } else { + fprintf(fp, "%-15s | %13s", "", + prop->sdef ? prop->sdef : ""); + } + break; + case _RK_C_BOOL: + typeinfo = "boolean"; + fprintf(fp, "%-15s | %13s", "true, false", + prop->vdef ? "true" : "false"); + break; + case _RK_C_INT: + typeinfo = "integer"; + rd_snprintf(tmp, sizeof(tmp), "%d .. %d", prop->vmin, + prop->vmax); + fprintf(fp, "%-15s | %13i", tmp, prop->vdef); + break; case _RK_C_DBL: typeinfo = "float"; /* more user-friendly than double */ - rd_snprintf(tmp, sizeof(tmp), - "%g .. %g", prop->dmin, prop->dmax); + rd_snprintf(tmp, sizeof(tmp), "%g .. %g", prop->dmin, + prop->dmax); fprintf(fp, "%-15s | %13g", tmp, prop->ddef); break; - case _RK_C_S2I: - typeinfo = "enum value"; - rd_kafka_conf_flags2str(tmp, sizeof(tmp), ", ", - prop, -1, - rd_true/*include unsupported*/); - fprintf(fp, "%-15s | ", tmp); - - for (j = 0 ; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { - if (prop->s2i[j].val == prop->vdef) { - fprintf(fp, "%13s", prop->s2i[j].str); - break; - } - } - if (j == RD_ARRAYSIZE(prop->s2i)) - fprintf(fp, "%13s", " "); - break; - - case _RK_C_S2F: - typeinfo = "CSV flags"; - /* Dont duplicate builtin.features value in - * both Range and Default */ - if (!strcmp(prop->name, "builtin.features")) - *tmp = '\0'; - else - rd_kafka_conf_flags2str( - tmp, sizeof(tmp), ", ", - prop, -1, - rd_true/*include unsupported*/); - fprintf(fp, "%-15s | ", tmp); - rd_kafka_conf_flags2str(tmp, sizeof(tmp), ", ", - prop, prop->vdef, - rd_true/*include unsupported*/); - fprintf(fp, "%13s", tmp); - - break; - case _RK_C_PTR: + case _RK_C_S2I: + typeinfo = "enum value"; + rd_kafka_conf_flags2str( + tmp, sizeof(tmp), ", ", prop, -1, + rd_true /*include unsupported*/); + fprintf(fp, "%-15s | ", tmp); + + for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { + if (prop->s2i[j].val == prop->vdef) { + fprintf(fp, "%13s", prop->s2i[j].str); + break; + } + } + if (j == RD_ARRAYSIZE(prop->s2i)) + fprintf(fp, "%13s", " "); + break; + + case _RK_C_S2F: + typeinfo = "CSV flags"; + /* Dont duplicate builtin.features value in + * both Range and Default */ + if (!strcmp(prop->name, "builtin.features")) + *tmp = '\0'; + else + rd_kafka_conf_flags2str( + tmp, sizeof(tmp), ", ", prop, -1, + rd_true /*include unsupported*/); + fprintf(fp, "%-15s | ", tmp); + rd_kafka_conf_flags2str( + tmp, sizeof(tmp), ", ", prop, prop->vdef, + rd_true /*include unsupported*/); + fprintf(fp, "%13s", tmp); + + break; + case _RK_C_PTR: case _RK_C_INTERNAL: typeinfo = "see dedicated API"; /* FALLTHRU */ - default: - fprintf(fp, "%-15s | %-13s", "", " "); - break; - } + default: + fprintf(fp, "%-15s | %-13s", "", " "); + break; + } if (prop->scope & _RK_HIGH) importance = "high"; @@ -3473,7 +3286,8 @@ void rd_kafka_conf_properties_show (FILE *fp) { fprintf(fp, " | %-10s | ", importance); if (prop->scope & _RK_EXPERIMENTAL) - fprintf(fp, "**EXPERIMENTAL**: " + fprintf(fp, + "**EXPERIMENTAL**: " "subject to change or removal. "); if (prop->scope & _RK_DEPRECATED) @@ -3484,8 +3298,7 @@ void rd_kafka_conf_properties_show (FILE *fp) { if (prop0->type == _RK_C_ALIAS) fprintf(fp, "Alias for `%s`: ", prop0->sdef); - fprintf(fp, "%s
*Type: %s*\n", prop->desc, - typeinfo); + fprintf(fp, "%s
*Type: %s*\n", prop->desc, typeinfo); } fprintf(fp, "\n"); fprintf(fp, "### C/P legend: C = Consumer, P = Producer, * = both\n"); @@ -3493,7 +3306,6 @@ void rd_kafka_conf_properties_show (FILE *fp) { - /** * @name Configuration value methods * @@ -3508,12 +3320,14 @@ void rd_kafka_conf_properties_show (FILE *fp) { * * @oaram name Property name, must be a const static string (will not be copied) */ -void rd_kafka_confval_init_int (rd_kafka_confval_t *confval, - const char *name, - int vmin, int vmax, int vdef) { - confval->name = name; +void rd_kafka_confval_init_int(rd_kafka_confval_t *confval, + const char *name, + int vmin, + int vmax, + int vdef) { + confval->name = name; confval->is_enabled = 1; - confval->valuetype = RD_KAFKA_CONFVAL_INT; + confval->valuetype = RD_KAFKA_CONFVAL_INT; confval->u.INT.vmin = vmin; confval->u.INT.vmax = vmax; confval->u.INT.vdef = vdef; @@ -3525,12 +3339,11 @@ void rd_kafka_confval_init_int (rd_kafka_confval_t *confval, * * @oaram name Property name, must be a const static string (will not be copied) */ -void rd_kafka_confval_init_ptr (rd_kafka_confval_t *confval, - const char *name) { - confval->name = name; +void rd_kafka_confval_init_ptr(rd_kafka_confval_t *confval, const char *name) { + confval->name = name; confval->is_enabled = 1; - confval->valuetype = RD_KAFKA_CONFVAL_PTR; - confval->u.PTR = NULL; + confval->valuetype = RD_KAFKA_CONFVAL_PTR; + confval->u.PTR = NULL; } /** @@ -3538,8 +3351,8 @@ void rd_kafka_confval_init_ptr (rd_kafka_confval_t *confval, * * @oaram name Property name, must be a const static string (will not be copied) */ -void rd_kafka_confval_disable (rd_kafka_confval_t *confval, const char *name) { - confval->name = name; +void rd_kafka_confval_disable(rd_kafka_confval_t *confval, const char *name) { + confval->name = name; confval->is_enabled = 0; } @@ -3555,11 +3368,11 @@ void rd_kafka_confval_disable (rd_kafka_confval_t *confval, const char *name) { * RD_KAFKA_RESP_ERR__INVALID_ARG if the value was of incorrect type, * out of range, or otherwise not a valid value. */ -rd_kafka_resp_err_t -rd_kafka_confval_set_type (rd_kafka_confval_t *confval, - rd_kafka_confval_type_t valuetype, - const void *valuep, - char *errstr, size_t errstr_size) { +rd_kafka_resp_err_t rd_kafka_confval_set_type(rd_kafka_confval_t *confval, + rd_kafka_confval_type_t valuetype, + const void *valuep, + char *errstr, + size_t errstr_size) { if (!confval->is_enabled) { rd_snprintf(errstr, errstr_size, @@ -3568,22 +3381,19 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, return RD_KAFKA_RESP_ERR__INVALID_ARG; } - switch (confval->valuetype) - { - case RD_KAFKA_CONFVAL_INT: - { + switch (confval->valuetype) { + case RD_KAFKA_CONFVAL_INT: { int v; const char *end; if (!valuep) { /* Revert to default */ confval->u.INT.v = confval->u.INT.vdef; - confval->is_set = 0; + confval->is_set = 0; return RD_KAFKA_RESP_ERR_NO_ERROR; } - switch (valuetype) - { + switch (valuetype) { case RD_KAFKA_CONFVAL_INT: v = *(const int *)valuep; break; @@ -3600,7 +3410,8 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, default: rd_snprintf(errstr, errstr_size, "Invalid value type for \"%s\": " - "expecting integer", confval->name); + "expecting integer", + confval->name); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -3610,27 +3421,24 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, rd_snprintf(errstr, errstr_size, "Invalid value type for \"%s\": " "expecting integer in range %d..%d", - confval->name, - confval->u.INT.vmin, + confval->name, confval->u.INT.vmin, confval->u.INT.vmax); return RD_KAFKA_RESP_ERR__INVALID_ARG; } confval->u.INT.v = v; - confval->is_set = 1; - } - break; + confval->is_set = 1; + } break; - case RD_KAFKA_CONFVAL_STR: - { + case RD_KAFKA_CONFVAL_STR: { size_t vlen; const char *v = (const char *)valuep; if (!valuep) { confval->is_set = 0; if (confval->u.STR.vdef) - confval->u.STR.v = rd_strdup(confval->u.STR. - vdef); + confval->u.STR.v = + rd_strdup(confval->u.STR.vdef); else confval->u.STR.v = NULL; } @@ -3638,7 +3446,8 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, if (valuetype != RD_KAFKA_CONFVAL_STR) { rd_snprintf(errstr, errstr_size, "Invalid value type for \"%s\": " - "expecting string", confval->name); + "expecting string", + confval->name); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -3649,9 +3458,8 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, rd_snprintf(errstr, errstr_size, "Invalid value for \"%s\": " "expecting string with length " - "%"PRIusz"..%"PRIusz, - confval->name, - confval->u.STR.minlen, + "%" PRIusz "..%" PRIusz, + confval->name, confval->u.STR.minlen, confval->u.STR.maxlen); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -3660,8 +3468,7 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, rd_free(confval->u.STR.v); confval->u.STR.v = rd_strdup(v); - } - break; + } break; case RD_KAFKA_CONFVAL_PTR: confval->u.PTR = (void *)valuep; @@ -3676,38 +3483,37 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, } -int rd_kafka_confval_get_int (const rd_kafka_confval_t *confval) { +int rd_kafka_confval_get_int(const rd_kafka_confval_t *confval) { rd_assert(confval->valuetype == RD_KAFKA_CONFVAL_INT); return confval->u.INT.v; } -const char *rd_kafka_confval_get_str (const rd_kafka_confval_t *confval) { +const char *rd_kafka_confval_get_str(const rd_kafka_confval_t *confval) { rd_assert(confval->valuetype == RD_KAFKA_CONFVAL_STR); return confval->u.STR.v; } -void *rd_kafka_confval_get_ptr (const rd_kafka_confval_t *confval) { +void *rd_kafka_confval_get_ptr(const rd_kafka_confval_t *confval) { rd_assert(confval->valuetype == RD_KAFKA_CONFVAL_PTR); return confval->u.PTR; } -#define _is_alphanum(C) ( \ - ((C) >= 'a' && (C) <= 'z') || \ - ((C) >= 'A' && (C) <= 'Z') || \ - ((C) >= '0' && (C) <= '9')) +#define _is_alphanum(C) \ + (((C) >= 'a' && (C) <= 'z') || ((C) >= 'A' && (C) <= 'Z') || \ + ((C) >= '0' && (C) <= '9')) /** * @returns true if the string is KIP-511 safe, else false. */ -static rd_bool_t rd_kafka_sw_str_is_safe (const char *str) { +static rd_bool_t rd_kafka_sw_str_is_safe(const char *str) { const char *s; if (!*str) return rd_true; - for (s = str ; *s ; s++) { + for (s = str; *s; s++) { int c = (int)*s; if (unlikely(!(_is_alphanum(c) || c == '-' || c == '.'))) @@ -3717,7 +3523,7 @@ static rd_bool_t rd_kafka_sw_str_is_safe (const char *str) { /* Verify that the string begins and ends with a-zA-Z0-9 */ if (!_is_alphanum(*str)) return rd_false; - if (!_is_alphanum(*(s-1))) + if (!_is_alphanum(*(s - 1))) return rd_false; return rd_true; @@ -3730,18 +3536,17 @@ static rd_bool_t rd_kafka_sw_str_is_safe (const char *str) { * * @warning The \p str is modified in-place. */ -static void rd_kafka_sw_str_sanitize_inplace (char *str) { +static void rd_kafka_sw_str_sanitize_inplace(char *str) { char *s = str, *d = str; /* Strip any leading non-alphanums */ while (!_is_alphanum(*s)) s++; - for (; *s ; s++) { + for (; *s; s++) { int c = (int)*s; - if (unlikely(!(_is_alphanum(c) || - c == '-' || c == '.'))) + if (unlikely(!(_is_alphanum(c) || c == '-' || c == '.'))) *d = '-'; else *d = *s; @@ -3751,7 +3556,7 @@ static void rd_kafka_sw_str_sanitize_inplace (char *str) { *d = '\0'; /* Strip any trailing non-alphanums */ - for (d = d-1 ; d >= str && !_is_alphanum(*d) ; d--) + for (d = d - 1; d >= str && !_is_alphanum(*d); d--) *d = '\0'; } @@ -3772,16 +3577,15 @@ static void rd_kafka_sw_str_sanitize_inplace (char *str) { * on success. The array count is returned in \p cntp. * The returned pointer must be freed with rd_free(). */ -static RD_UNUSED -char **rd_kafka_conf_kv_split (const char **input, size_t incnt, - size_t *cntp) { +static RD_UNUSED char ** +rd_kafka_conf_kv_split(const char **input, size_t incnt, size_t *cntp) { size_t i; char **out, *p; - size_t lens = 0; + size_t lens = 0; size_t outcnt = 0; /* First calculate total length needed for key-value strings. */ - for (i = 0 ; i < incnt ; i++) { + for (i = 0; i < incnt; i++) { const char *t = strchr(input[i], '='); /* No "=", or "=" at beginning of string. */ @@ -3794,12 +3598,12 @@ char **rd_kafka_conf_kv_split (const char **input, size_t incnt, /* Allocate array along with elements in one go */ out = rd_malloc((sizeof(*out) * incnt * 2) + lens); - p = (char *)(&out[incnt * 2]); + p = (char *)(&out[incnt * 2]); - for (i = 0 ; i < incnt ; i++) { - const char *t = strchr(input[i], '='); - size_t namelen = (size_t)(t - input[i]); - size_t valuelen = strlen(t+1); + for (i = 0; i < incnt; i++) { + const char *t = strchr(input[i], '='); + size_t namelen = (size_t)(t - input[i]); + size_t valuelen = strlen(t + 1); /* Copy name */ out[outcnt++] = p; @@ -3809,7 +3613,7 @@ char **rd_kafka_conf_kv_split (const char **input, size_t incnt, /* Copy value */ out[outcnt++] = p; - memcpy(p, t+1, valuelen + 1); + memcpy(p, t + 1, valuelen + 1); p += valuelen; *(p++) = '\0'; } @@ -3827,8 +3631,8 @@ char **rd_kafka_conf_kv_split (const char **input, size_t incnt, * * @returns an error string if configuration is incorrect, else NULL. */ -const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, - rd_kafka_conf_t *conf) { +const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype, + rd_kafka_conf_t *conf) { const char *errstr; if (!conf->sw_name) @@ -3836,8 +3640,7 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, NULL, 0); if (!conf->sw_version) rd_kafka_conf_set(conf, "client.software.version", - rd_kafka_version_str(), - NULL, 0); + rd_kafka_version_str(), NULL, 0); /* The client.software.name and .version are sent to the broker * with the ApiVersionRequest starting with AK 2.4.0 (KIP-511). @@ -3857,7 +3660,7 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, #if WITH_SSL if (conf->ssl.keystore_location && !conf->ssl.keystore_password) return "`ssl.keystore.password` is mandatory when " - "`ssl.keystore.location` is set"; + "`ssl.keystore.location` is set"; if (conf->ssl.ca && (conf->ssl.ca_location || conf->ssl.ca_pem)) return "`ssl.ca.location` or `ssl.ca.pem`, and memory-based " "set_ssl_cert(CERT_CA) are mutually exclusive."; @@ -3873,20 +3676,20 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, if (conf->sasl.enable_oauthbearer_unsecure_jwt && conf->sasl.oauthbearer.token_refresh_cb) return "`enable.sasl.oauthbearer.unsecure.jwt` and " - "`oauthbearer_token_refresh_cb` are " - "mutually exclusive"; + "`oauthbearer_token_refresh_cb` are " + "mutually exclusive"; if (conf->sasl.enable_oauthbearer_unsecure_jwt && conf->sasl.oauthbearer.method == - RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC) + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC) return "`enable.sasl.oauthbearer.unsecure.jwt` and " - "`sasl.oauthbearer.method=oidc` are " - "mutually exclusive"; + "`sasl.oauthbearer.method=oidc` are " + "mutually exclusive"; /* Enable background thread for the builtin OIDC handler, * unless a refresh callback has been set. */ if (conf->sasl.oauthbearer.method == - RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC && + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC && !conf->sasl.oauthbearer.token_refresh_cb) conf->enabled_events |= RD_KAFKA_EVENT_BACKGROUND; } @@ -3901,12 +3704,12 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, if (rd_kafka_conf_is_modified(conf, "fetch.max.bytes")) { if (conf->fetch_max_bytes < conf->max_msg_size) return "`fetch.max.bytes` must be >= " - "`message.max.bytes`"; + "`message.max.bytes`"; } else { - conf->fetch_max_bytes = RD_MAX( - RD_MIN(conf->fetch_max_bytes, - conf->queued_max_msg_kbytes * 1024), - conf->max_msg_size); + conf->fetch_max_bytes = + RD_MAX(RD_MIN(conf->fetch_max_bytes, + conf->queued_max_msg_kbytes * 1024), + conf->max_msg_size); } /* Automatically adjust 'receive.message.max.bytes' to @@ -3918,17 +3721,16 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, if (conf->fetch_max_bytes + 512 > conf->recv_max_msg_size) return "`receive.message.max.bytes` must be >= " - "`fetch.max.bytes` + 512"; + "`fetch.max.bytes` + 512"; } else { conf->recv_max_msg_size = - RD_MAX(conf->recv_max_msg_size, - conf->fetch_max_bytes + 512); + RD_MAX(conf->recv_max_msg_size, + conf->fetch_max_bytes + 512); } - if (conf->max_poll_interval_ms < - conf->group_session_timeout_ms) + if (conf->max_poll_interval_ms < conf->group_session_timeout_ms) return "`max.poll.interval.ms`must be >= " - "`session.timeout.ms`"; + "`session.timeout.ms`"; /* Simplifies rd_kafka_is_idempotent() which is producer-only */ conf->eos.idempotence = 0; @@ -3939,9 +3741,9 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, /* Auto enable idempotence unless * explicitly disabled */ if (rd_kafka_conf_is_modified( - conf, "enable.idempotence")) + conf, "enable.idempotence")) return "`transactional.id` requires " - "`enable.idempotence=true`"; + "`enable.idempotence=true`"; conf->eos.idempotence = rd_true; } @@ -3950,14 +3752,13 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, * before the transaction times out. */ if (!rd_kafka_conf_is_modified(conf, "socket.timeout.ms")) - conf->socket_timeout_ms = - RD_MAX(conf->eos. - transaction_timeout_ms - 100, - 900); + conf->socket_timeout_ms = RD_MAX( + conf->eos.transaction_timeout_ms - 100, + 900); else if (conf->eos.transaction_timeout_ms + 100 < conf->socket_timeout_ms) return "`socket.timeout.ms` must be set <= " - "`transaction.timeout.ms` + 100"; + "`transaction.timeout.ms` + 100"; } if (conf->eos.idempotence) { @@ -3967,34 +3768,36 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, if (conf->max_inflight > RD_KAFKA_IDEMP_MAX_INFLIGHT) return "`max.in.flight` must be " - "set <= " - RD_KAFKA_IDEMP_MAX_INFLIGHT_STR - " when `enable.idempotence` " - "is true"; + "set " + "<=" + " " RD_KAFKA_IDEMP_MAX_INFLIGHT_STR + " when `enable.idempotence` " + "is true"; } else { conf->max_inflight = - RD_MIN(conf->max_inflight, - RD_KAFKA_IDEMP_MAX_INFLIGHT); + RD_MIN(conf->max_inflight, + RD_KAFKA_IDEMP_MAX_INFLIGHT); } if (rd_kafka_conf_is_modified(conf, "retries")) { if (conf->max_retries < 1) return "`retries` must be set >= 1 " - "when `enable.idempotence` is " - "true"; + "when `enable.idempotence` is " + "true"; } else { conf->max_retries = INT32_MAX; } if (rd_kafka_conf_is_modified( - conf, - "queue.buffering.backpressure.threshold") - && conf->queue_backpressure_thres > 1) - return "`queue.buffering.backpressure.threshold` " - "must be set to 1 when " - "`enable.idempotence` is true"; + conf, + "queue.buffering.backpressure.threshold") && + conf->queue_backpressure_thres > 1) + return "`queue.buffering.backpressure." + "threshold` " + "must be set to 1 when " + "`enable.idempotence` is true"; else conf->queue_backpressure_thres = 1; @@ -4004,40 +3807,39 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, } else { if (conf->eos.gapless && rd_kafka_conf_is_modified( - conf, "enable.gapless.guarantee")) + conf, "enable.gapless.guarantee")) return "`enable.gapless.guarantee` requires " - "`enable.idempotence` to be enabled"; + "`enable.idempotence` to be enabled"; } - if (!rd_kafka_conf_is_modified( - conf, "sticky.partitioning.linger.ms")) - conf->sticky_partition_linger_ms = (int) RD_MIN(900000, - (rd_ts_t) (2 * conf->buffering_max_ms_dbl)); + if (!rd_kafka_conf_is_modified(conf, + "sticky.partitioning.linger.ms")) + conf->sticky_partition_linger_ms = (int)RD_MIN( + 900000, (rd_ts_t)(2 * conf->buffering_max_ms_dbl)); } if (!rd_kafka_conf_is_modified(conf, "metadata.max.age.ms") && conf->metadata_refresh_interval_ms > 0) conf->metadata_max_age_ms = - conf->metadata_refresh_interval_ms * 3; + conf->metadata_refresh_interval_ms * 3; if (conf->reconnect_backoff_max_ms < conf->reconnect_backoff_ms) return "`reconnect.backoff.max.ms` must be >= " - "`reconnect.max.ms`"; + "`reconnect.max.ms`"; if (conf->sparse_connections) { /* Set sparse connection random selection interval to * 10 < reconnect.backoff.ms / 2 < 1000. */ conf->sparse_connect_intvl = - RD_MAX(11, RD_MIN(conf->reconnect_backoff_ms/2, 1000)); + RD_MAX(11, RD_MIN(conf->reconnect_backoff_ms / 2, 1000)); } if (!rd_kafka_conf_is_modified(conf, "connections.max.idle.ms") && - conf->brokerlist && - rd_strcasestr(conf->brokerlist, "azure")) { + conf->brokerlist && rd_strcasestr(conf->brokerlist, "azure")) { /* Issue #3109: * Default connections.max.idle.ms to <4 minutes on Azure. */ - conf->connections_max_idle_ms = (4*60-10) * 1000; + conf->connections_max_idle_ms = (4 * 60 - 10) * 1000; } if (!rd_kafka_conf_is_modified(conf, "allow.auto.create.topics")) { @@ -4057,16 +3859,16 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, if (tconf->message_timeout_ms != 0 && (double)tconf->message_timeout_ms <= - conf->buffering_max_ms_dbl) { + conf->buffering_max_ms_dbl) { if (rd_kafka_topic_conf_is_modified( - tconf, "linger.ms")) + tconf, "linger.ms")) return "`message.timeout.ms` must be " - "greater than `linger.ms`"; + "greater than `linger.ms`"; else /* Auto adjust linger.ms to be lower * than message.timeout.ms */ conf->buffering_max_ms_dbl = - (double)tconf-> - message_timeout_ms - 0.1; + (double)tconf->message_timeout_ms - + 0.1; } } @@ -4093,9 +3895,9 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, * * @returns an error string if configuration is incorrect, else NULL. */ -const char *rd_kafka_topic_conf_finalize (rd_kafka_type_t cltype, - const rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf) { +const char *rd_kafka_topic_conf_finalize(rd_kafka_type_t cltype, + const rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf) { if (cltype != RD_KAFKA_PRODUCER) return NULL; @@ -4105,7 +3907,7 @@ const char *rd_kafka_topic_conf_finalize (rd_kafka_type_t cltype, if (rd_kafka_topic_conf_is_modified(tconf, "acks")) { if (tconf->required_acks != -1) return "`acks` must be set to `all` when " - "`enable.idempotence` is true"; + "`enable.idempotence` is true"; } else { tconf->required_acks = -1; /* all */ } @@ -4115,8 +3917,8 @@ const char *rd_kafka_topic_conf_finalize (rd_kafka_type_t cltype, "queuing.strategy")) { if (tconf->queuing_strategy != RD_KAFKA_QUEUE_FIFO) return "`queuing.strategy` must be set to " - "`fifo` when `enable.idempotence` is " - "true"; + "`fifo` when `enable.idempotence` is " + "true"; } else { tconf->queuing_strategy = RD_KAFKA_QUEUE_FIFO; } @@ -4124,14 +3926,14 @@ const char *rd_kafka_topic_conf_finalize (rd_kafka_type_t cltype, /* Ensure message.timeout.ms <= transaction.timeout.ms */ if (conf->eos.transactional_id) { if (!rd_kafka_topic_conf_is_modified( - tconf, "message.timeout.ms")) + tconf, "message.timeout.ms")) tconf->message_timeout_ms = - conf->eos.transaction_timeout_ms; + conf->eos.transaction_timeout_ms; else if (tconf->message_timeout_ms > conf->eos.transaction_timeout_ms) return "`message.timeout.ms` must be set <= " - "`transaction.timeout.ms`"; - } + "`transaction.timeout.ms`"; + } } if (tconf->message_timeout_ms != 0 && @@ -4148,17 +3950,17 @@ const char *rd_kafka_topic_conf_finalize (rd_kafka_type_t cltype, * configuration properties. * @returns the number of warnings logged. */ -static int rd_kafka_anyconf_warn_deprecated (rd_kafka_t *rk, - rd_kafka_conf_scope_t scope, - const void *conf) { +static int rd_kafka_anyconf_warn_deprecated(rd_kafka_t *rk, + rd_kafka_conf_scope_t scope, + const void *conf) { const struct rd_kafka_property *prop; - int warn_type = rk->rk_type == RD_KAFKA_PRODUCER ? - _RK_CONSUMER : _RK_PRODUCER; - int warn_on = _RK_DEPRECATED|_RK_EXPERIMENTAL|warn_type; + int warn_type = + rk->rk_type == RD_KAFKA_PRODUCER ? _RK_CONSUMER : _RK_PRODUCER; + int warn_on = _RK_DEPRECATED | _RK_EXPERIMENTAL | warn_type; int cnt = 0; - for (prop = rd_kafka_properties; prop->name ; prop++) { + for (prop = rd_kafka_properties; prop->name; prop++) { int match = prop->scope & warn_on; if (likely(!(prop->scope & scope) || !match)) @@ -4173,8 +3975,8 @@ static int rd_kafka_anyconf_warn_deprecated (rd_kafka_t *rk, prop->name, match & _RK_DEPRECATED ? "deprecated" : "", match == warn_on ? " and " : "", - match & _RK_EXPERIMENTAL ? - "experimental" : "", + match & _RK_EXPERIMENTAL ? "experimental" + : "", prop->desc); if (match & warn_type) @@ -4183,10 +3985,10 @@ static int rd_kafka_anyconf_warn_deprecated (rd_kafka_t *rk, "is a %s property and will be ignored by " "this %s instance", prop->name, - warn_type == _RK_PRODUCER ? - "producer" : "consumer", - warn_type == _RK_PRODUCER ? - "consumer" : "producer"); + warn_type == _RK_PRODUCER ? "producer" + : "consumer", + warn_type == _RK_PRODUCER ? "consumer" + : "producer"); cnt++; } @@ -4204,17 +4006,16 @@ static int rd_kafka_anyconf_warn_deprecated (rd_kafka_t *rk, * @locality any * @locks none */ -int rd_kafka_conf_warn (rd_kafka_t *rk) { +int rd_kafka_conf_warn(rd_kafka_t *rk) { int cnt = 0; cnt = rd_kafka_anyconf_warn_deprecated(rk, _RK_GLOBAL, &rk->rk_conf); if (rk->rk_conf.topic_conf) - cnt += rd_kafka_anyconf_warn_deprecated( - rk, _RK_TOPIC, rk->rk_conf.topic_conf); + cnt += rd_kafka_anyconf_warn_deprecated(rk, _RK_TOPIC, + rk->rk_conf.topic_conf); if (rk->rk_conf.warn.default_topic_conf_overwritten) - rd_kafka_log(rk, LOG_WARNING, - "CONFWARN", + rd_kafka_log(rk, LOG_WARNING, "CONFWARN", "Topic configuration properties set in the " "global configuration were overwritten by " "explicitly setting a default_topic_conf: " @@ -4224,8 +4025,7 @@ int rd_kafka_conf_warn (rd_kafka_t *rk) { if (rk->rk_type == RD_KAFKA_CONSUMER) { if (rk->rk_conf.fetch_wait_max_ms + 1000 > rk->rk_conf.socket_timeout_ms) - rd_kafka_log(rk, LOG_WARNING, - "CONFWARN", + rd_kafka_log(rk, LOG_WARNING, "CONFWARN", "Configuration property " "`fetch.wait.max.ms` (%d) should be " "set lower than `socket.timeout.ms` (%d) " @@ -4262,7 +4062,8 @@ int rd_kafka_conf_warn (rd_kafka_t *rk) { "may only contain 'a-zA-Z0-9.-', other characters " "will be replaced with '-'"); - if (rd_kafka_conf_is_modified(&rk->rk_conf, "client.software.version") && + if (rd_kafka_conf_is_modified(&rk->rk_conf, + "client.software.version") && !rd_kafka_sw_str_is_safe(rk->rk_conf.sw_version)) rd_kafka_log(rk, LOG_WARNING, "CONFWARN", "Configuration property `client.software.verison` " @@ -4279,7 +4080,7 @@ int rd_kafka_conf_warn (rd_kafka_t *rk) { } -const rd_kafka_conf_t *rd_kafka_conf (rd_kafka_t *rk) { +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk) { return &rk->rk_conf; } @@ -4287,7 +4088,7 @@ const rd_kafka_conf_t *rd_kafka_conf (rd_kafka_t *rk) { /** * @brief Unittests */ -int unittest_conf (void) { +int unittest_conf(void) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *tconf; rd_kafka_conf_res_t res, res2; @@ -4298,15 +4099,15 @@ int unittest_conf (void) { size_t readlen; const char *errstr2; - conf = rd_kafka_conf_new(); + conf = rd_kafka_conf_new(); tconf = rd_kafka_topic_conf_new(); - res = rd_kafka_conf_set(conf, "unknown.thing", "foo", - errstr, sizeof(errstr)); + res = rd_kafka_conf_set(conf, "unknown.thing", "foo", errstr, + sizeof(errstr)); RD_UT_ASSERT(res == RD_KAFKA_CONF_UNKNOWN, "fail"); RD_UT_ASSERT(*errstr, "fail"); - for (iteration = 0 ; iteration < 5 ; iteration++) { + for (iteration = 0; iteration < 5; iteration++) { int cnt; @@ -4316,16 +4117,17 @@ int unittest_conf (void) { * 2 - Check is_modified. * 3 - Set all config properties, read back and verify. * 4 - Check is_modified. */ - for (prop = rd_kafka_properties, cnt = 0 ; prop->name ; + for (prop = rd_kafka_properties, cnt = 0; prop->name; prop++, cnt++) { const char *val; char tmp[64]; - int odd = cnt & 1; + int odd = cnt & 1; int do_set = iteration == 3 || (iteration == 1 && odd); rd_bool_t is_modified; - int exp_is_modified = !prop->unsupported && - (iteration >= 3 || - (iteration > 0 && (do_set || odd))); + int exp_is_modified = + !prop->unsupported && + (iteration >= 3 || + (iteration > 0 && (do_set || odd))); readlen = sizeof(readval); @@ -4334,8 +4136,7 @@ int unittest_conf (void) { !strcmp(prop->name, "builtin.features")) continue; - switch (prop->type) - { + switch (prop->type) { case _RK_C_STR: case _RK_C_KSTR: case _RK_C_PATLIST: @@ -4375,33 +4176,28 @@ int unittest_conf (void) { if (prop->scope & _RK_GLOBAL) { if (do_set) - res = rd_kafka_conf_set(conf, - prop->name, val, - errstr, - sizeof(errstr)); + res = rd_kafka_conf_set( + conf, prop->name, val, errstr, + sizeof(errstr)); - res2 = rd_kafka_conf_get(conf, - prop->name, + res2 = rd_kafka_conf_get(conf, prop->name, readval, &readlen); - is_modified = rd_kafka_conf_is_modified( - conf, prop->name); + is_modified = + rd_kafka_conf_is_modified(conf, prop->name); } else if (prop->scope & _RK_TOPIC) { - if (do_set) + if (do_set) res = rd_kafka_topic_conf_set( - tconf, - prop->name, val, - errstr, sizeof(errstr)); + tconf, prop->name, val, errstr, + sizeof(errstr)); - res2 = rd_kafka_topic_conf_get(tconf, - prop->name, - readval, - &readlen); + res2 = rd_kafka_topic_conf_get( + tconf, prop->name, readval, &readlen); is_modified = rd_kafka_topic_conf_is_modified( - tconf, prop->name); + tconf, prop->name); } else { RD_NOTREACHED(); @@ -4433,7 +4229,6 @@ int unittest_conf (void) { "Property %s was set but " "is_modified=%d", prop->name, is_modified); - } assert(is_modified == exp_is_modified); @@ -4441,8 +4236,7 @@ int unittest_conf (void) { "Property %s is_modified=%d, " "exp_is_modified=%d " "(iter %d, odd %d, do_set %d)", - prop->name, is_modified, - exp_is_modified, + prop->name, is_modified, exp_is_modified, iteration, odd, do_set); } } @@ -4451,11 +4245,12 @@ int unittest_conf (void) { res = rd_kafka_conf_set(conf, "max.in.flight", "19", NULL, 0); RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "%d", res); - RD_UT_ASSERT(rd_kafka_conf_is_modified(conf, "max.in.flight") == rd_true, + RD_UT_ASSERT(rd_kafka_conf_is_modified(conf, "max.in.flight") == + rd_true, "fail"); RD_UT_ASSERT(rd_kafka_conf_is_modified( - conf, - "max.in.flight.requests.per.connection") == rd_true, + conf, "max.in.flight.requests.per.connection") == + rd_true, "fail"); rd_kafka_conf_destroy(conf); @@ -4464,7 +4259,7 @@ int unittest_conf (void) { /* Verify that software.client.* string-safing works */ conf = rd_kafka_conf_new(); - res = rd_kafka_conf_set(conf, "client.software.name", + res = rd_kafka_conf_set(conf, "client.software.name", " .~aba. va! !.~~", NULL, 0); RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "%d", res); res = rd_kafka_conf_set(conf, "client.software.version", @@ -4475,16 +4270,16 @@ int unittest_conf (void) { RD_UT_ASSERT(!errstr2, "conf_finalize() failed: %s", errstr2); readlen = sizeof(readval); - res2 = rd_kafka_conf_get(conf, "client.software.name", - readval, &readlen); + res2 = + rd_kafka_conf_get(conf, "client.software.name", readval, &readlen); RD_UT_ASSERT(res2 == RD_KAFKA_CONF_OK, "%d", res2); RD_UT_ASSERT(!strcmp(readval, "aba.-va"), "client.software.* safification failed: \"%s\"", readval); RD_UT_SAY("Safified client.software.name=\"%s\"", readval); readlen = sizeof(readval); - res2 = rd_kafka_conf_get(conf, "client.software.version", - readval, &readlen); + res2 = rd_kafka_conf_get(conf, "client.software.version", readval, + &readlen); RD_UT_ASSERT(res2 == RD_KAFKA_CONF_OK, "%d", res2); RD_UT_ASSERT(!strcmp(readval, "1.2.3.4.5----a"), "client.software.* safification failed: \"%s\"", readval); diff --git a/src/rdkafka_conf.h b/src/rdkafka_conf.h index fd39286a6d..3e51e401bd 100644 --- a/src/rdkafka_conf.h +++ b/src/rdkafka_conf.h @@ -46,30 +46,28 @@ struct rd_kafka_transport_s; * MessageSet compression codecs */ typedef enum { - RD_KAFKA_COMPRESSION_NONE, - RD_KAFKA_COMPRESSION_GZIP = RD_KAFKA_MSG_ATTR_GZIP, - RD_KAFKA_COMPRESSION_SNAPPY = RD_KAFKA_MSG_ATTR_SNAPPY, - RD_KAFKA_COMPRESSION_LZ4 = RD_KAFKA_MSG_ATTR_LZ4, - RD_KAFKA_COMPRESSION_ZSTD = RD_KAFKA_MSG_ATTR_ZSTD, - RD_KAFKA_COMPRESSION_INHERIT, /* Inherit setting from global conf */ + RD_KAFKA_COMPRESSION_NONE, + RD_KAFKA_COMPRESSION_GZIP = RD_KAFKA_MSG_ATTR_GZIP, + RD_KAFKA_COMPRESSION_SNAPPY = RD_KAFKA_MSG_ATTR_SNAPPY, + RD_KAFKA_COMPRESSION_LZ4 = RD_KAFKA_MSG_ATTR_LZ4, + RD_KAFKA_COMPRESSION_ZSTD = RD_KAFKA_MSG_ATTR_ZSTD, + RD_KAFKA_COMPRESSION_INHERIT, /* Inherit setting from global conf */ RD_KAFKA_COMPRESSION_NUM } rd_kafka_compression_t; static RD_INLINE RD_UNUSED const char * -rd_kafka_compression2str (rd_kafka_compression_t compr) { +rd_kafka_compression2str(rd_kafka_compression_t compr) { static const char *names[RD_KAFKA_COMPRESSION_NUM] = { - [RD_KAFKA_COMPRESSION_NONE] = "none", - [RD_KAFKA_COMPRESSION_GZIP] = "gzip", - [RD_KAFKA_COMPRESSION_SNAPPY] = "snappy", - [RD_KAFKA_COMPRESSION_LZ4] = "lz4", - [RD_KAFKA_COMPRESSION_ZSTD] = "zstd", - [RD_KAFKA_COMPRESSION_INHERIT] = "inherit" - }; + [RD_KAFKA_COMPRESSION_NONE] = "none", + [RD_KAFKA_COMPRESSION_GZIP] = "gzip", + [RD_KAFKA_COMPRESSION_SNAPPY] = "snappy", + [RD_KAFKA_COMPRESSION_LZ4] = "lz4", + [RD_KAFKA_COMPRESSION_ZSTD] = "zstd", + [RD_KAFKA_COMPRESSION_INHERIT] = "inherit"}; static RD_TLS char ret[32]; if ((int)compr < 0 || compr >= RD_KAFKA_COMPRESSION_NUM) { - rd_snprintf(ret, sizeof(ret), - "codec0x%x?", (int)compr); + rd_snprintf(ret, sizeof(ret), "codec0x%x?", (int)compr); return ret; } @@ -80,56 +78,52 @@ rd_kafka_compression2str (rd_kafka_compression_t compr) { * MessageSet compression levels */ typedef enum { - RD_KAFKA_COMPLEVEL_DEFAULT = -1, - RD_KAFKA_COMPLEVEL_MIN = -1, - RD_KAFKA_COMPLEVEL_GZIP_MAX = 9, - RD_KAFKA_COMPLEVEL_LZ4_MAX = 12, - RD_KAFKA_COMPLEVEL_SNAPPY_MAX = 0, - RD_KAFKA_COMPLEVEL_ZSTD_MAX = 22, - RD_KAFKA_COMPLEVEL_MAX = 12 + RD_KAFKA_COMPLEVEL_DEFAULT = -1, + RD_KAFKA_COMPLEVEL_MIN = -1, + RD_KAFKA_COMPLEVEL_GZIP_MAX = 9, + RD_KAFKA_COMPLEVEL_LZ4_MAX = 12, + RD_KAFKA_COMPLEVEL_SNAPPY_MAX = 0, + RD_KAFKA_COMPLEVEL_ZSTD_MAX = 22, + RD_KAFKA_COMPLEVEL_MAX = 12 } rd_kafka_complevel_t; typedef enum { - RD_KAFKA_PROTO_PLAINTEXT, - RD_KAFKA_PROTO_SSL, - RD_KAFKA_PROTO_SASL_PLAINTEXT, - RD_KAFKA_PROTO_SASL_SSL, - RD_KAFKA_PROTO_NUM, + RD_KAFKA_PROTO_PLAINTEXT, + RD_KAFKA_PROTO_SSL, + RD_KAFKA_PROTO_SASL_PLAINTEXT, + RD_KAFKA_PROTO_SASL_SSL, + RD_KAFKA_PROTO_NUM, } rd_kafka_secproto_t; typedef enum { - RD_KAFKA_CONFIGURED, - RD_KAFKA_LEARNED, - RD_KAFKA_INTERNAL, + RD_KAFKA_CONFIGURED, + RD_KAFKA_LEARNED, + RD_KAFKA_INTERNAL, RD_KAFKA_LOGICAL } rd_kafka_confsource_t; -static RD_INLINE RD_UNUSED -const char *rd_kafka_confsource2str (rd_kafka_confsource_t source) { - static const char *names[] = { - "configured", - "learned", - "internal", - "logical" - }; +static RD_INLINE RD_UNUSED const char * +rd_kafka_confsource2str(rd_kafka_confsource_t source) { + static const char *names[] = {"configured", "learned", "internal", + "logical"}; return names[source]; } -typedef enum { - _RK_GLOBAL = 0x1, - _RK_PRODUCER = 0x2, - _RK_CONSUMER = 0x4, - _RK_TOPIC = 0x8, - _RK_CGRP = 0x10, - _RK_DEPRECATED = 0x20, - _RK_HIDDEN = 0x40, - _RK_HIGH = 0x80, /* High Importance */ - _RK_MED = 0x100, /* Medium Importance */ +typedef enum { + _RK_GLOBAL = 0x1, + _RK_PRODUCER = 0x2, + _RK_CONSUMER = 0x4, + _RK_TOPIC = 0x8, + _RK_CGRP = 0x10, + _RK_DEPRECATED = 0x20, + _RK_HIDDEN = 0x40, + _RK_HIGH = 0x80, /* High Importance */ + _RK_MED = 0x100, /* Medium Importance */ _RK_EXPERIMENTAL = 0x200, /* Experimental (unsupported) property */ - _RK_SENSITIVE = 0x400 /* The configuration property's value + _RK_SENSITIVE = 0x400 /* The configuration property's value * might contain sensitive information. */ } rd_kafka_conf_scope_t; @@ -138,9 +132,9 @@ typedef enum { #define _RK_CGRP _RK_CONSUMER typedef enum { - _RK_CONF_PROP_SET_REPLACE, /* Replace current value (default) */ - _RK_CONF_PROP_SET_ADD, /* Add value (S2F) */ - _RK_CONF_PROP_SET_DEL /* Remove value (S2F) */ + _RK_CONF_PROP_SET_REPLACE, /* Replace current value (default) */ + _RK_CONF_PROP_SET_ADD, /* Add value (S2F) */ + _RK_CONF_PROP_SET_DEL /* Remove value (S2F) */ } rd_kafka_conf_set_mode_t; @@ -158,12 +152,12 @@ typedef enum { typedef enum { RD_KAFKA_SSL_ENDPOINT_ID_NONE, - RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, /**< RFC2818 */ + RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, /**< RFC2818 */ } rd_kafka_ssl_endpoint_id_t; /* Increase in steps of 64 as needed. * This must be larger than sizeof(rd_kafka_[topic_]conf_t) */ -#define RD_KAFKA_CONF_PROPS_IDX_MAX (64*30) +#define RD_KAFKA_CONF_PROPS_IDX_MAX (64 * 30) /** * @struct rd_kafka_anyconf_t @@ -172,7 +166,7 @@ typedef enum { * It provides a way to track which property has been modified. */ struct rd_kafka_anyconf_hdr { - uint64_t modified[RD_KAFKA_CONF_PROPS_IDX_MAX/64]; + uint64_t modified[RD_KAFKA_CONF_PROPS_IDX_MAX / 64]; }; @@ -184,48 +178,48 @@ struct rd_kafka_anyconf_hdr { * */ struct rd_kafka_conf_s { - struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */ - - /* - * Generic configuration - */ - int enabled_events; - int max_msg_size; - int msg_copy_max_size; - int recv_max_msg_size; - int max_inflight; - int metadata_request_timeout_ms; - int metadata_refresh_interval_ms; - int metadata_refresh_fast_cnt; - int metadata_refresh_fast_interval_ms; - int metadata_refresh_sparse; - int metadata_max_age_ms; - int metadata_propagation_max_ms; - int debug; - int broker_addr_ttl; - int broker_addr_family; - int socket_timeout_ms; - int socket_blocking_max_ms; - int socket_sndbuf_size; - int socket_rcvbuf_size; - int socket_keepalive; - int socket_nagle_disable; - int socket_max_fails; - char *client_id_str; - char *brokerlist; - int stats_interval_ms; - int term_sig; - int reconnect_backoff_ms; - int reconnect_backoff_max_ms; - int reconnect_jitter_ms; - int connections_max_idle_ms; - int sparse_connections; - int sparse_connect_intvl; - int api_version_request; - int api_version_request_timeout_ms; - int api_version_fallback_ms; - char *broker_version_fallback; - rd_kafka_secproto_t security_protocol; + struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */ + + /* + * Generic configuration + */ + int enabled_events; + int max_msg_size; + int msg_copy_max_size; + int recv_max_msg_size; + int max_inflight; + int metadata_request_timeout_ms; + int metadata_refresh_interval_ms; + int metadata_refresh_fast_cnt; + int metadata_refresh_fast_interval_ms; + int metadata_refresh_sparse; + int metadata_max_age_ms; + int metadata_propagation_max_ms; + int debug; + int broker_addr_ttl; + int broker_addr_family; + int socket_timeout_ms; + int socket_blocking_max_ms; + int socket_sndbuf_size; + int socket_rcvbuf_size; + int socket_keepalive; + int socket_nagle_disable; + int socket_max_fails; + char *client_id_str; + char *brokerlist; + int stats_interval_ms; + int term_sig; + int reconnect_backoff_ms; + int reconnect_backoff_max_ms; + int reconnect_jitter_ms; + int connections_max_idle_ms; + int sparse_connections; + int sparse_connect_intvl; + int api_version_request; + int api_version_request_timeout_ms; + int api_version_fallback_ms; + char *broker_version_fallback; + rd_kafka_secproto_t security_protocol; struct { #if WITH_SSL @@ -255,16 +249,18 @@ struct rd_kafka_conf_s { void *engine_callback_data; char *keystore_location; char *keystore_password; - int endpoint_identification; - int enable_verify; - int (*cert_verify_cb) (rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - char *errstr, size_t errstr_size, - void *opaque); + int endpoint_identification; + int enable_verify; + int (*cert_verify_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + char *errstr, + size_t errstr_size, + void *opaque); } ssl; struct { @@ -274,22 +270,23 @@ struct rd_kafka_conf_s { char *service_name; char *kinit_cmd; char *keytab; - int relogin_min_time; + int relogin_min_time; char *username; char *password; #if WITH_SASL_SCRAM /* SCRAM EVP-wrapped hash function * (return value from EVP_shaX()) */ - const void/*EVP_MD*/ *scram_evp; + const void /*EVP_MD*/ *scram_evp; /* SCRAM direct hash function (e.g., SHA256()) */ - unsigned char *(*scram_H) (const unsigned char *d, size_t n, - unsigned char *md); + unsigned char *(*scram_H)(const unsigned char *d, + size_t n, + unsigned char *md); /* Hash size */ - size_t scram_H_size; + size_t scram_H_size; #endif char *oauthbearer_config; - int enable_oauthbearer_unsecure_jwt; - int enable_callback_queue; + int enable_oauthbearer_unsecure_jwt; + int enable_callback_queue; struct { rd_kafka_oauthbearer_method_t method; char *token_endpoint_url; @@ -298,10 +295,9 @@ struct rd_kafka_conf_s { char *scope; char *extensions_str; /* SASL/OAUTHBEARER token refresh event callback */ - void (*token_refresh_cb) ( - rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque); + void (*token_refresh_cb)(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque); } oauthbearer; } sasl; @@ -313,195 +309,207 @@ struct rd_kafka_conf_s { /* Interceptors */ struct { /* rd_kafka_interceptor_method_t lists */ - rd_list_t on_conf_set; /* on_conf_set interceptors - * (not copied on conf_dup()) */ - rd_list_t on_conf_dup; /* .. (not copied) */ - rd_list_t on_conf_destroy; /* .. (not copied) */ - rd_list_t on_new; /* .. (copied) */ - rd_list_t on_destroy; /* .. (copied) */ - rd_list_t on_send; /* .. (copied) */ - rd_list_t on_acknowledgement; /* .. (copied) */ - rd_list_t on_consume; /* .. (copied) */ - rd_list_t on_commit; /* .. (copied) */ - rd_list_t on_request_sent; /* .. (copied) */ - rd_list_t on_response_received;/* .. (copied) */ - rd_list_t on_thread_start; /* .. (copied) */ - rd_list_t on_thread_exit; /* .. (copied) */ + rd_list_t on_conf_set; /* on_conf_set interceptors + * (not copied on conf_dup()) */ + rd_list_t on_conf_dup; /* .. (not copied) */ + rd_list_t on_conf_destroy; /* .. (not copied) */ + rd_list_t on_new; /* .. (copied) */ + rd_list_t on_destroy; /* .. (copied) */ + rd_list_t on_send; /* .. (copied) */ + rd_list_t on_acknowledgement; /* .. (copied) */ + rd_list_t on_consume; /* .. (copied) */ + rd_list_t on_commit; /* .. (copied) */ + rd_list_t on_request_sent; /* .. (copied) */ + rd_list_t on_response_received; /* .. (copied) */ + rd_list_t on_thread_start; /* .. (copied) */ + rd_list_t on_thread_exit; /* .. (copied) */ /* rd_strtup_t list */ - rd_list_t config; /* Configuration name=val's - * handled by interceptors. */ + rd_list_t config; /* Configuration name=val's + * handled by interceptors. */ } interceptors; /* Client group configuration */ - int coord_query_intvl_ms; - int max_poll_interval_ms; - - int builtin_features; - /* - * Consumer configuration - */ - int check_crcs; - int queued_min_msgs; - int queued_max_msg_kbytes; + int coord_query_intvl_ms; + int max_poll_interval_ms; + + int builtin_features; + /* + * Consumer configuration + */ + int check_crcs; + int queued_min_msgs; + int queued_max_msg_kbytes; int64_t queued_max_msg_bytes; - int fetch_wait_max_ms; - int fetch_msg_max_bytes; - int fetch_max_bytes; - int fetch_min_bytes; - int fetch_error_backoff_ms; - char *group_id_str; - char *group_instance_id; - int allow_auto_create_topics; + int fetch_wait_max_ms; + int fetch_msg_max_bytes; + int fetch_max_bytes; + int fetch_min_bytes; + int fetch_error_backoff_ms; + char *group_id_str; + char *group_instance_id; + int allow_auto_create_topics; rd_kafka_pattern_list_t *topic_blacklist; struct rd_kafka_topic_conf_s *topic_conf; /* Default topic config * for automatically * subscribed topics. */ int enable_auto_commit; - int enable_auto_offset_store; + int enable_auto_offset_store; int auto_commit_interval_ms; int group_session_timeout_ms; int group_heartbeat_intvl_ms; rd_kafkap_str_t *group_protocol_type; char *partition_assignment_strategy; rd_list_t partition_assignors; - int enabled_assignor_cnt; + int enabled_assignor_cnt; - void (*rebalance_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque); + void (*rebalance_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque); - void (*offset_commit_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque); + void (*offset_commit_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque); rd_kafka_offset_method_t offset_store_method; rd_kafka_isolation_level_t isolation_level; - int enable_partition_eof; + int enable_partition_eof; - rd_kafkap_str_t *client_rack; + rd_kafkap_str_t *client_rack; - /* - * Producer configuration - */ + /* + * Producer configuration + */ struct { /* * Idempotence */ - int idempotence; /**< Enable Idempotent Producer */ - rd_bool_t gapless; /**< Raise fatal error if - * gapless guarantee can't be - * satisfied. */ + int idempotence; /**< Enable Idempotent Producer */ + rd_bool_t gapless; /**< Raise fatal error if + * gapless guarantee can't be + * satisfied. */ /* * Transactions */ - char *transactional_id; /**< Transactional Id */ - int transaction_timeout_ms; /**< Transaction timeout */ + char *transactional_id; /**< Transactional Id */ + int transaction_timeout_ms; /**< Transaction timeout */ } eos; - int queue_buffering_max_msgs; - int queue_buffering_max_kbytes; + int queue_buffering_max_msgs; + int queue_buffering_max_kbytes; double buffering_max_ms_dbl; /**< This is the configured value */ - rd_ts_t buffering_max_us; /**< This is the value used in the code */ - int queue_backpressure_thres; - int max_retries; - int retry_backoff_ms; - int batch_num_messages; - int batch_size; - rd_kafka_compression_t compression_codec; - int dr_err_only; - int sticky_partition_linger_ms; - - /* Message delivery report callback. - * Called once for each produced message, either on - * successful and acknowledged delivery to the broker in which - * case 'err' is 0, or if the message could not be delivered - * in which case 'err' is non-zero (use rd_kafka_err2str() - * to obtain a human-readable error reason). - * - * If the message was produced with neither RD_KAFKA_MSG_F_FREE - * or RD_KAFKA_MSG_F_COPY set then 'payload' is the original - * pointer provided to rd_kafka_produce(). - * rdkafka will not perform any further actions on 'payload' - * at this point and the application may rd_free the payload data - * at this point. - * - * 'opaque' is 'conf.opaque', while 'msg_opaque' is - * the opaque pointer provided in the rd_kafka_produce() call. - */ - void (*dr_cb) (rd_kafka_t *rk, - void *payload, size_t len, - rd_kafka_resp_err_t err, - void *opaque, void *msg_opaque); - - void (*dr_msg_cb) (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque); + rd_ts_t buffering_max_us; /**< This is the value used in the code */ + int queue_backpressure_thres; + int max_retries; + int retry_backoff_ms; + int batch_num_messages; + int batch_size; + rd_kafka_compression_t compression_codec; + int dr_err_only; + int sticky_partition_linger_ms; + + /* Message delivery report callback. + * Called once for each produced message, either on + * successful and acknowledged delivery to the broker in which + * case 'err' is 0, or if the message could not be delivered + * in which case 'err' is non-zero (use rd_kafka_err2str() + * to obtain a human-readable error reason). + * + * If the message was produced with neither RD_KAFKA_MSG_F_FREE + * or RD_KAFKA_MSG_F_COPY set then 'payload' is the original + * pointer provided to rd_kafka_produce(). + * rdkafka will not perform any further actions on 'payload' + * at this point and the application may rd_free the payload data + * at this point. + * + * 'opaque' is 'conf.opaque', while 'msg_opaque' is + * the opaque pointer provided in the rd_kafka_produce() call. + */ + void (*dr_cb)(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque); + + void (*dr_msg_cb)(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque); /* Consume callback */ - void (*consume_cb) (rd_kafka_message_t *rkmessage, void *opaque); + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque); /* Log callback */ - void (*log_cb) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf); - int log_level; - int log_queue; - int log_thread_name; - int log_connection_close; + void (*log_cb)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf); + int log_level; + int log_queue; + int log_thread_name; + int log_connection_close; /* PRNG seeding */ - int enable_random_seed; + int enable_random_seed; /* Error callback */ - void (*error_cb) (rd_kafka_t *rk, int err, - const char *reason, void *opaque); - - /* Throttle callback */ - void (*throttle_cb) (rd_kafka_t *rk, const char *broker_name, - int32_t broker_id, int throttle_time_ms, - void *opaque); + void (*error_cb)(rd_kafka_t *rk, + int err, + const char *reason, + void *opaque); + + /* Throttle callback */ + void (*throttle_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque); - /* Stats callback */ - int (*stats_cb) (rd_kafka_t *rk, - char *json, - size_t json_len, - void *opaque); + /* Stats callback */ + int (*stats_cb)(rd_kafka_t *rk, + char *json, + size_t json_len, + void *opaque); /* Socket creation callback */ - int (*socket_cb) (int domain, int type, int protocol, void *opaque); + int (*socket_cb)(int domain, int type, int protocol, void *opaque); /* Connect callback */ - int (*connect_cb) (int sockfd, - const struct sockaddr *addr, - int addrlen, - const char *id, - void *opaque); + int (*connect_cb)(int sockfd, + const struct sockaddr *addr, + int addrlen, + const char *id, + void *opaque); /* Close socket callback */ - int (*closesocket_cb) (int sockfd, void *opaque); + int (*closesocket_cb)(int sockfd, void *opaque); - /* File open callback */ - int (*open_cb) (const char *pathname, int flags, mode_t mode, - void *opaque); + /* File open callback */ + int (*open_cb)(const char *pathname, + int flags, + mode_t mode, + void *opaque); /* Background queue event callback */ - void (*background_event_cb) (rd_kafka_t *rk, rd_kafka_event_t *rkev, - void *opaque); + void (*background_event_cb)(rd_kafka_t *rk, + rd_kafka_event_t *rkev, + void *opaque); - /* Opaque passed to callbacks. */ - void *opaque; + /* Opaque passed to callbacks. */ + void *opaque; /* For use with value-less properties. */ - int dummy; + int dummy; /* Admin client defaults */ struct { - int request_timeout_ms; /* AdminOptions.request_timeout */ + int request_timeout_ms; /* AdminOptions.request_timeout */ } admin; @@ -509,7 +517,7 @@ struct rd_kafka_conf_s { * Test mocks */ struct { - int broker_cnt; /**< Number of mock brokers */ + int broker_cnt; /**< Number of mock brokers */ } mock; /* @@ -517,11 +525,11 @@ struct rd_kafka_conf_s { */ struct { /**< Inject errors in ProduceResponse handler */ - rd_kafka_resp_err_t (*handle_ProduceResponse) ( - rd_kafka_t *rk, - int32_t brokerid, - uint64_t msgid, - rd_kafka_resp_err_t err); + rd_kafka_resp_err_t (*handle_ProduceResponse)( + rd_kafka_t *rk, + int32_t brokerid, + uint64_t msgid, + rd_kafka_resp_err_t err); } ut; char *sw_name; /**< Software/client name */ @@ -534,81 +542,90 @@ struct rd_kafka_conf_s { } warn; }; -int rd_kafka_socket_cb_linux (int domain, int type, int protocol, void *opaque); -int rd_kafka_socket_cb_generic (int domain, int type, int protocol, - void *opaque); +int rd_kafka_socket_cb_linux(int domain, int type, int protocol, void *opaque); +int rd_kafka_socket_cb_generic(int domain, + int type, + int protocol, + void *opaque); #ifndef _WIN32 -int rd_kafka_open_cb_linux (const char *pathname, int flags, mode_t mode, - void *opaque); +int rd_kafka_open_cb_linux(const char *pathname, + int flags, + mode_t mode, + void *opaque); #endif -int rd_kafka_open_cb_generic (const char *pathname, int flags, mode_t mode, - void *opaque); +int rd_kafka_open_cb_generic(const char *pathname, + int flags, + mode_t mode, + void *opaque); struct rd_kafka_topic_conf_s { - struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */ + struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */ - int required_acks; - int32_t request_timeout_ms; - int message_timeout_ms; + int required_acks; + int32_t request_timeout_ms; + int message_timeout_ms; - int32_t (*partitioner) (const rd_kafka_topic_t *rkt, - const void *keydata, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); - char *partitioner_str; + int32_t (*partitioner)(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); + char *partitioner_str; rd_bool_t random_partitioner; /**< rd_true - random - * rd_false - sticky */ + * rd_false - sticky */ int queuing_strategy; /* RD_KAFKA_QUEUE_FIFO|LIFO */ - int (*msg_order_cmp) (const void *a, const void *b); + int (*msg_order_cmp)(const void *a, const void *b); - rd_kafka_compression_t compression_codec; - rd_kafka_complevel_t compression_level; - int produce_offset_report; + rd_kafka_compression_t compression_codec; + rd_kafka_complevel_t compression_level; + int produce_offset_report; - int consume_callback_max_msgs; - int auto_commit; - int auto_commit_interval_ms; - int auto_offset_reset; - char *offset_store_path; - int offset_store_sync_interval_ms; + int consume_callback_max_msgs; + int auto_commit; + int auto_commit_interval_ms; + int auto_offset_reset; + char *offset_store_path; + int offset_store_sync_interval_ms; rd_kafka_offset_method_t offset_store_method; - /* Application provided opaque pointer (this is rkt_opaque) */ - void *opaque; + /* Application provided opaque pointer (this is rkt_opaque) */ + void *opaque; }; -void rd_kafka_anyconf_destroy (int scope, void *conf); +void rd_kafka_anyconf_destroy(int scope, void *conf); -rd_bool_t rd_kafka_conf_is_modified (const rd_kafka_conf_t *conf, - const char *name); +rd_bool_t rd_kafka_conf_is_modified(const rd_kafka_conf_t *conf, + const char *name); -void rd_kafka_desensitize_str (char *str); +void rd_kafka_desensitize_str(char *str); -void rd_kafka_conf_desensitize (rd_kafka_conf_t *conf); -void rd_kafka_topic_conf_desensitize (rd_kafka_topic_conf_t *tconf); +void rd_kafka_conf_desensitize(rd_kafka_conf_t *conf); +void rd_kafka_topic_conf_desensitize(rd_kafka_topic_conf_t *tconf); -const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, - rd_kafka_conf_t *conf); -const char *rd_kafka_topic_conf_finalize (rd_kafka_type_t cltype, - const rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf); +const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype, + rd_kafka_conf_t *conf); +const char *rd_kafka_topic_conf_finalize(rd_kafka_type_t cltype, + const rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf); -int rd_kafka_conf_warn (rd_kafka_t *rk); +int rd_kafka_conf_warn(rd_kafka_t *rk); -void rd_kafka_anyconf_dump_dbg (rd_kafka_t *rk, int scope, const void *conf, - const char *description); +void rd_kafka_anyconf_dump_dbg(rd_kafka_t *rk, + int scope, + const void *conf, + const char *description); #include "rdkafka_confval.h" -int unittest_conf (void); +int unittest_conf(void); #endif /* _RDKAFKA_CONF_H_ */ diff --git a/src/rdkafka_confval.h b/src/rdkafka_confval.h index 56ec875ea0..3f2bad549e 100644 --- a/src/rdkafka_confval.h +++ b/src/rdkafka_confval.h @@ -55,40 +55,41 @@ typedef struct rd_kafka_confval_s { int is_enabled; /**< Confval is enabled. */ union { struct { - int v; /**< Current value */ - int vmin; /**< Minimum value (inclusive) */ - int vmax; /**< Maximum value (inclusive) */ - int vdef; /**< Default value */ + int v; /**< Current value */ + int vmin; /**< Minimum value (inclusive) */ + int vmax; /**< Maximum value (inclusive) */ + int vdef; /**< Default value */ } INT; struct { - char *v; /**< Current value */ - int allowempty; /**< Allow empty string as value */ - size_t minlen; /**< Minimum string length excl \0 */ - size_t maxlen; /**< Maximum string length excl \0 */ - const char *vdef; /**< Default value */ + char *v; /**< Current value */ + int allowempty; /**< Allow empty string as value */ + size_t minlen; /**< Minimum string length excl \0 */ + size_t maxlen; /**< Maximum string length excl \0 */ + const char *vdef; /**< Default value */ } STR; - void *PTR; /**< Pointer */ + void *PTR; /**< Pointer */ } u; } rd_kafka_confval_t; -void rd_kafka_confval_init_int (rd_kafka_confval_t *confval, - const char *name, - int vmin, int vmax, int vdef); -void rd_kafka_confval_init_ptr (rd_kafka_confval_t *confval, - const char *name); -void rd_kafka_confval_disable (rd_kafka_confval_t *confval, const char *name); +void rd_kafka_confval_init_int(rd_kafka_confval_t *confval, + const char *name, + int vmin, + int vmax, + int vdef); +void rd_kafka_confval_init_ptr(rd_kafka_confval_t *confval, const char *name); +void rd_kafka_confval_disable(rd_kafka_confval_t *confval, const char *name); -rd_kafka_resp_err_t -rd_kafka_confval_set_type (rd_kafka_confval_t *confval, - rd_kafka_confval_type_t valuetype, - const void *valuep, - char *errstr, size_t errstr_size); +rd_kafka_resp_err_t rd_kafka_confval_set_type(rd_kafka_confval_t *confval, + rd_kafka_confval_type_t valuetype, + const void *valuep, + char *errstr, + size_t errstr_size); -int rd_kafka_confval_get_int (const rd_kafka_confval_t *confval); -const char *rd_kafka_confval_get_str (const rd_kafka_confval_t *confval); -void *rd_kafka_confval_get_ptr (const rd_kafka_confval_t *confval); +int rd_kafka_confval_get_int(const rd_kafka_confval_t *confval); +const char *rd_kafka_confval_get_str(const rd_kafka_confval_t *confval); +void *rd_kafka_confval_get_ptr(const rd_kafka_confval_t *confval); /**@}*/ diff --git a/src/rdkafka_coord.c b/src/rdkafka_coord.c index 6801d751ac..03c3c0c430 100644 --- a/src/rdkafka_coord.c +++ b/src/rdkafka_coord.c @@ -37,8 +37,8 @@ * @{ * */ -void rd_kafka_coord_cache_entry_destroy (rd_kafka_coord_cache_t *cc, - rd_kafka_coord_cache_entry_t *cce) { +void rd_kafka_coord_cache_entry_destroy(rd_kafka_coord_cache_t *cc, + rd_kafka_coord_cache_entry_t *cce) { rd_assert(cc->cc_cnt > 0); rd_free(cce->cce_coordkey); rd_kafka_broker_destroy(cce->cce_rkb); @@ -53,7 +53,7 @@ void rd_kafka_coord_cache_entry_destroy (rd_kafka_coord_cache_t *cc, * * @locality rdkafka main thread */ -void rd_kafka_coord_cache_expire (rd_kafka_coord_cache_t *cc) { +void rd_kafka_coord_cache_expire(rd_kafka_coord_cache_t *cc) { rd_kafka_coord_cache_entry_t *cce, *next; rd_ts_t expire = rd_clock() - cc->cc_expire_thres; @@ -71,9 +71,9 @@ void rd_kafka_coord_cache_expire (rd_kafka_coord_cache_t *cc) { static rd_kafka_coord_cache_entry_t * -rd_kafka_coord_cache_find (rd_kafka_coord_cache_t *cc, - rd_kafka_coordtype_t coordtype, - const char *coordkey) { +rd_kafka_coord_cache_find(rd_kafka_coord_cache_t *cc, + rd_kafka_coordtype_t coordtype, + const char *coordkey) { rd_kafka_coord_cache_entry_t *cce; TAILQ_FOREACH(cce, &cc->cc_entries, cce_link) { @@ -83,10 +83,9 @@ rd_kafka_coord_cache_find (rd_kafka_coord_cache_t *cc, cce->cce_ts_used = rd_clock(); if (TAILQ_FIRST(&cc->cc_entries) != cce) { /* Move to head of list */ - TAILQ_REMOVE(&cc->cc_entries, - cce, cce_link); - TAILQ_INSERT_HEAD(&cc->cc_entries, - cce, cce_link); + TAILQ_REMOVE(&cc->cc_entries, cce, cce_link); + TAILQ_INSERT_HEAD(&cc->cc_entries, cce, + cce_link); } return cce; } @@ -96,9 +95,9 @@ rd_kafka_coord_cache_find (rd_kafka_coord_cache_t *cc, } -rd_kafka_broker_t *rd_kafka_coord_cache_get (rd_kafka_coord_cache_t *cc, - rd_kafka_coordtype_t coordtype, - const char *coordkey) { +rd_kafka_broker_t *rd_kafka_coord_cache_get(rd_kafka_coord_cache_t *cc, + rd_kafka_coordtype_t coordtype, + const char *coordkey) { rd_kafka_coord_cache_entry_t *cce; cce = rd_kafka_coord_cache_find(cc, coordtype, coordkey); @@ -111,25 +110,24 @@ rd_kafka_broker_t *rd_kafka_coord_cache_get (rd_kafka_coord_cache_t *cc, -static void rd_kafka_coord_cache_add (rd_kafka_coord_cache_t *cc, - rd_kafka_coordtype_t coordtype, - const char *coordkey, - rd_kafka_broker_t *rkb) { +static void rd_kafka_coord_cache_add(rd_kafka_coord_cache_t *cc, + rd_kafka_coordtype_t coordtype, + const char *coordkey, + rd_kafka_broker_t *rkb) { rd_kafka_coord_cache_entry_t *cce; if (!(cce = rd_kafka_coord_cache_find(cc, coordtype, coordkey))) { if (cc->cc_cnt > 10) { /* Not enough room in cache, remove least used entry */ - rd_kafka_coord_cache_entry_t *rem = - TAILQ_LAST(&cc->cc_entries, - rd_kafka_coord_cache_head_s); + rd_kafka_coord_cache_entry_t *rem = TAILQ_LAST( + &cc->cc_entries, rd_kafka_coord_cache_head_s); rd_kafka_coord_cache_entry_destroy(cc, rem); } - cce = rd_calloc(1, sizeof(*cce)); + cce = rd_calloc(1, sizeof(*cce)); cce->cce_coordtype = coordtype; - cce->cce_coordkey = rd_strdup(coordkey); - cce->cce_ts_used = rd_clock(); + cce->cce_coordkey = rd_strdup(coordkey); + cce->cce_ts_used = rd_clock(); TAILQ_INSERT_HEAD(&cc->cc_entries, cce, cce_link); cc->cc_cnt++; @@ -152,8 +150,8 @@ static void rd_kafka_coord_cache_add (rd_kafka_coord_cache_t *cc, * @locality rdkafka main thread * @locks none */ -void rd_kafka_coord_cache_evict (rd_kafka_coord_cache_t *cc, - rd_kafka_broker_t *rkb) { +void rd_kafka_coord_cache_evict(rd_kafka_coord_cache_t *cc, + rd_kafka_broker_t *rkb) { rd_kafka_coord_cache_entry_t *cce, *tmp; TAILQ_FOREACH_SAFE(cce, &cc->cc_entries, cce_link, tmp) { @@ -165,7 +163,7 @@ void rd_kafka_coord_cache_evict (rd_kafka_coord_cache_t *cc, /** * @brief Destroy all coord cache entries. */ -void rd_kafka_coord_cache_destroy (rd_kafka_coord_cache_t *cc) { +void rd_kafka_coord_cache_destroy(rd_kafka_coord_cache_t *cc) { rd_kafka_coord_cache_entry_t *cce; while ((cce = TAILQ_FIRST(&cc->cc_entries))) @@ -178,10 +176,10 @@ void rd_kafka_coord_cache_destroy (rd_kafka_coord_cache_t *cc) { * * Locking of the coord-cache is up to the owner. */ -void rd_kafka_coord_cache_init (rd_kafka_coord_cache_t *cc, - int expire_thres_ms) { +void rd_kafka_coord_cache_init(rd_kafka_coord_cache_t *cc, + int expire_thres_ms) { TAILQ_INIT(&cc->cc_entries); - cc->cc_cnt = 0; + cc->cc_cnt = 0; cc->cc_expire_thres = expire_thres_ms * 1000; } @@ -196,8 +194,7 @@ void rd_kafka_coord_cache_init (rd_kafka_coord_cache_t *cc, -static void rd_kafka_coord_req_fsm (rd_kafka_t *rk, rd_kafka_coord_req_t *creq); - +static void rd_kafka_coord_req_fsm(rd_kafka_t *rk, rd_kafka_coord_req_t *creq); @@ -215,28 +212,28 @@ static void rd_kafka_coord_req_fsm (rd_kafka_t *rk, rd_kafka_coord_req_t *creq); * @locality rdkafka main thread * @locks none */ -void rd_kafka_coord_req (rd_kafka_t *rk, - rd_kafka_coordtype_t coordtype, - const char *coordkey, - rd_kafka_send_req_cb_t *send_req_cb, - rd_kafka_op_t *rko, - int timeout_ms, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *reply_opaque) { +void rd_kafka_coord_req(rd_kafka_t *rk, + rd_kafka_coordtype_t coordtype, + const char *coordkey, + rd_kafka_send_req_cb_t *send_req_cb, + rd_kafka_op_t *rko, + int timeout_ms, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *reply_opaque) { rd_kafka_coord_req_t *creq; - creq = rd_calloc(1, sizeof(*creq)); - creq->creq_coordtype = coordtype; - creq->creq_coordkey = rd_strdup(coordkey); - creq->creq_ts_timeout = rd_timeout_init(timeout_ms); - creq->creq_send_req_cb = send_req_cb; - creq->creq_rko = rko; - creq->creq_replyq = replyq; - creq->creq_resp_cb = resp_cb; + creq = rd_calloc(1, sizeof(*creq)); + creq->creq_coordtype = coordtype; + creq->creq_coordkey = rd_strdup(coordkey); + creq->creq_ts_timeout = rd_timeout_init(timeout_ms); + creq->creq_send_req_cb = send_req_cb; + creq->creq_rko = rko; + creq->creq_replyq = replyq; + creq->creq_resp_cb = resp_cb; creq->creq_reply_opaque = reply_opaque; - creq->creq_refcnt = 1; - creq->creq_done = rd_false; + creq->creq_refcnt = 1; + creq->creq_done = rd_false; TAILQ_INSERT_TAIL(&rk->rk_coord_reqs, creq, creq_link); @@ -252,9 +249,9 @@ void rd_kafka_coord_req (rd_kafka_t *rk, * * @returns true if creq was destroyed, else false. */ -static rd_bool_t -rd_kafka_coord_req_destroy (rd_kafka_t *rk, rd_kafka_coord_req_t *creq, - rd_bool_t done) { +static rd_bool_t rd_kafka_coord_req_destroy(rd_kafka_t *rk, + rd_kafka_coord_req_t *creq, + rd_bool_t done) { rd_assert(creq->creq_refcnt > 0); @@ -278,41 +275,41 @@ rd_kafka_coord_req_destroy (rd_kafka_t *rk, rd_kafka_coord_req_t *creq, return rd_true; } -static void rd_kafka_coord_req_keep (rd_kafka_coord_req_t *creq) { +static void rd_kafka_coord_req_keep(rd_kafka_coord_req_t *creq) { creq->creq_refcnt++; } -static void rd_kafka_coord_req_fail (rd_kafka_t *rk, rd_kafka_coord_req_t *creq, - rd_kafka_resp_err_t err) { +static void rd_kafka_coord_req_fail(rd_kafka_t *rk, + rd_kafka_coord_req_t *creq, + rd_kafka_resp_err_t err) { rd_kafka_op_t *reply; rd_kafka_buf_t *rkbuf; - reply = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF); - reply->rko_rk = rk; /* Set rk since the rkbuf will not have a rkb - * to reach it. */ + reply = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF); + reply->rko_rk = rk; /* Set rk since the rkbuf will not have a rkb + * to reach it. */ reply->rko_err = err; /* Need a dummy rkbuf to pass state to the buf resp_cb */ - rkbuf = rd_kafka_buf_new(0, 0); - rkbuf->rkbuf_cb = creq->creq_resp_cb; - rkbuf->rkbuf_opaque = creq->creq_reply_opaque; + rkbuf = rd_kafka_buf_new(0, 0); + rkbuf->rkbuf_cb = creq->creq_resp_cb; + rkbuf->rkbuf_opaque = creq->creq_reply_opaque; reply->rko_u.xbuf.rkbuf = rkbuf; rd_kafka_replyq_enq(&creq->creq_replyq, reply, 0); - rd_kafka_coord_req_destroy(rk, creq, rd_true/*done*/); + rd_kafka_coord_req_destroy(rk, creq, rd_true /*done*/); } -static void -rd_kafka_coord_req_handle_FindCoordinator (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_coord_req_handle_FindCoordinator(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { const int log_decode_errors = LOG_ERR; - rd_kafka_coord_req_t *creq = opaque; + rd_kafka_coord_req_t *creq = opaque; int16_t ErrorCode; rd_kafkap_str_t Host; int32_t NodeId, Port; @@ -339,8 +336,8 @@ rd_kafka_coord_req_handle_FindCoordinator (rd_kafka_t *rk, rd_kafkap_str_t ErrorMsg; rd_kafka_buf_read_str(rkbuf, &ErrorMsg); if (ErrorCode) - rd_snprintf(errstr, sizeof(errstr), - "%.*s", RD_KAFKAP_STR_PR(&ErrorMsg)); + rd_snprintf(errstr, sizeof(errstr), "%.*s", + RD_KAFKAP_STR_PR(&ErrorMsg)); } if ((err = ErrorCode)) @@ -366,44 +363,40 @@ rd_kafka_coord_req_handle_FindCoordinator (rd_kafka_t *rk, } - rd_kafka_coord_cache_add(&rk->rk_coord_cache, - creq->creq_coordtype, - creq->creq_coordkey, - coord); + rd_kafka_coord_cache_add(&rk->rk_coord_cache, creq->creq_coordtype, + creq->creq_coordkey, coord); rd_kafka_broker_destroy(coord); /* refcnt from broker_update() */ rd_kafka_coord_req_fsm(rk, creq); /* Drop refcount from req_fsm() */ - rd_kafka_coord_req_destroy(rk, creq, rd_false/*!done*/); + rd_kafka_coord_req_destroy(rk, creq, rd_false /*!done*/); return; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: +err: actions = rd_kafka_err_action( - rkb, err, request, + rkb, err, request, - RD_KAFKA_ERR_ACTION_SPECIAL, - RD_KAFKA_RESP_ERR__DESTROY, + RD_KAFKA_ERR_ACTION_SPECIAL, RD_KAFKA_RESP_ERR__DESTROY, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED, + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED, + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED, - RD_KAFKA_ERR_ACTION_REFRESH, - RD_KAFKA_RESP_ERR__TRANSPORT, + RD_KAFKA_ERR_ACTION_REFRESH, RD_KAFKA_RESP_ERR__TRANSPORT, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, + RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_END); if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) { rd_kafka_coord_req_fail(rk, creq, err); @@ -417,14 +410,11 @@ rd_kafka_coord_req_handle_FindCoordinator (rd_kafka_t *rk, /* Rely on state broadcast to trigger retry */ /* Drop refcount from req_fsm() */ - rd_kafka_coord_req_destroy(rk, creq, rd_false/*!done*/); + rd_kafka_coord_req_destroy(rk, creq, rd_false /*!done*/); } - - - /** * @brief State machine for async coordinator requests. * @@ -433,8 +423,7 @@ rd_kafka_coord_req_handle_FindCoordinator (rd_kafka_t *rk, * @locality any * @locks none */ -static void -rd_kafka_coord_req_fsm (rd_kafka_t *rk, rd_kafka_coord_req_t *creq) { +static void rd_kafka_coord_req_fsm(rd_kafka_t *rk, rd_kafka_coord_req_t *creq) { rd_kafka_broker_t *rkb; rd_kafka_resp_err_t err; @@ -450,9 +439,8 @@ rd_kafka_coord_req_fsm (rd_kafka_t *rk, rd_kafka_coord_req_t *creq) { } /* Check cache first */ - rkb = rd_kafka_coord_cache_get(&rk->rk_coord_cache, - creq->creq_coordtype, - creq->creq_coordkey); + rkb = rd_kafka_coord_cache_get( + &rk->rk_coord_cache, creq->creq_coordtype, creq->creq_coordkey); if (rkb) { if (rd_kafka_broker_is_up(rkb)) { @@ -471,7 +459,7 @@ rd_kafka_coord_req_fsm (rd_kafka_t *rk, rd_kafka_coord_req_t *creq) { rd_kafka_coord_req_fail(rk, creq, err); } else { rd_kafka_coord_req_destroy(rk, creq, - rd_true/*done*/); + rd_true /*done*/); } } else { @@ -500,17 +488,16 @@ rd_kafka_coord_req_fsm (rd_kafka_t *rk, rd_kafka_coord_req_t *creq) { * the state machine. */ rd_kafka_coord_req_keep(creq); err = rd_kafka_FindCoordinatorRequest( - rkb, creq->creq_coordtype, creq->creq_coordkey, - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_coord_req_handle_FindCoordinator, - creq); + rkb, creq->creq_coordtype, creq->creq_coordkey, + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_coord_req_handle_FindCoordinator, creq); rd_kafka_broker_destroy(rkb); if (err) { rd_kafka_coord_req_fail(rk, creq, err); /* from keep() above */ - rd_kafka_coord_req_destroy(rk, creq, rd_false/*!done*/); + rd_kafka_coord_req_destroy(rk, creq, rd_false /*!done*/); } } @@ -523,14 +510,14 @@ rd_kafka_coord_req_fsm (rd_kafka_t *rk, rd_kafka_coord_req_t *creq) { * @locality rdkafka main thread * @locks none */ -void rd_kafka_coord_rkb_monitor_cb (rd_kafka_broker_t *rkb) { +void rd_kafka_coord_rkb_monitor_cb(rd_kafka_broker_t *rkb) { rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_coord_req_t *creq, *tmp; /* Run through all coord_req fsms */ TAILQ_FOREACH_SAFE(creq, &rk->rk_coord_reqs, creq_link, tmp) - rd_kafka_coord_req_fsm(rk, creq); + rd_kafka_coord_req_fsm(rk, creq); } @@ -538,7 +525,7 @@ void rd_kafka_coord_rkb_monitor_cb (rd_kafka_broker_t *rkb) { /** * @brief Instance is terminating: destroy all coord reqs */ -void rd_kafka_coord_reqs_term (rd_kafka_t *rk) { +void rd_kafka_coord_reqs_term(rd_kafka_t *rk) { rd_kafka_coord_req_t *creq; while ((creq = TAILQ_FIRST(&rk->rk_coord_reqs))) @@ -549,7 +536,7 @@ void rd_kafka_coord_reqs_term (rd_kafka_t *rk) { /** * @brief Initialize coord reqs list. */ -void rd_kafka_coord_reqs_init (rd_kafka_t *rk) { +void rd_kafka_coord_reqs_init(rd_kafka_t *rk) { TAILQ_INIT(&rk->rk_coord_reqs); } diff --git a/src/rdkafka_coord.h b/src/rdkafka_coord.h index 2387cfc4e9..488c181a03 100644 --- a/src/rdkafka_coord.h +++ b/src/rdkafka_coord.h @@ -30,8 +30,8 @@ #define _RDKAFKA_COORD_H_ -typedef TAILQ_HEAD(rd_kafka_coord_cache_head_s, rd_kafka_coord_cache_entry_s) - rd_kafka_coord_cache_head_t; +typedef TAILQ_HEAD(rd_kafka_coord_cache_head_s, + rd_kafka_coord_cache_entry_s) rd_kafka_coord_cache_head_t; /** * @brief Coordinator cache entry @@ -39,10 +39,10 @@ typedef TAILQ_HEAD(rd_kafka_coord_cache_head_s, rd_kafka_coord_cache_entry_s) typedef struct rd_kafka_coord_cache_entry_s { TAILQ_ENTRY(rd_kafka_coord_cache_entry_s) cce_link; rd_kafka_coordtype_t cce_coordtype; /**< Coordinator type */ - char *cce_coordkey; /**< Coordinator type key, + char *cce_coordkey; /**< Coordinator type key, * e.g the group id */ - rd_ts_t cce_ts_used; /**< Last used timestamp */ - rd_kafka_broker_t *cce_rkb; /**< The cached coordinator */ + rd_ts_t cce_ts_used; /**< Last used timestamp */ + rd_kafka_broker_t *cce_rkb; /**< The cached coordinator */ } rd_kafka_coord_cache_entry_t; @@ -50,21 +50,19 @@ typedef struct rd_kafka_coord_cache_entry_s { * @brief Coordinator cache */ typedef struct rd_kafka_coord_cache_s { - rd_kafka_coord_cache_head_t cc_entries; /**< Cache entries */ - int cc_cnt; /**< Number of entries */ - rd_ts_t cc_expire_thres; /**< Entries not used in - * this long will be - * expired */ + rd_kafka_coord_cache_head_t cc_entries; /**< Cache entries */ + int cc_cnt; /**< Number of entries */ + rd_ts_t cc_expire_thres; /**< Entries not used in + * this long will be + * expired */ } rd_kafka_coord_cache_t; -void rd_kafka_coord_cache_expire (rd_kafka_coord_cache_t *cc); -void rd_kafka_coord_cache_evict (rd_kafka_coord_cache_t *cc, - rd_kafka_broker_t *rkb); -void rd_kafka_coord_cache_destroy (rd_kafka_coord_cache_t *cc); -void rd_kafka_coord_cache_init (rd_kafka_coord_cache_t *cc, - int expire_thres_ms); - +void rd_kafka_coord_cache_expire(rd_kafka_coord_cache_t *cc); +void rd_kafka_coord_cache_evict(rd_kafka_coord_cache_t *cc, + rd_kafka_broker_t *rkb); +void rd_kafka_coord_cache_destroy(rd_kafka_coord_cache_t *cc); +void rd_kafka_coord_cache_init(rd_kafka_coord_cache_t *cc, int expire_thres_ms); @@ -78,52 +76,52 @@ void rd_kafka_coord_cache_init (rd_kafka_coord_cache_t *cc, */ typedef struct rd_kafka_coord_req_s { TAILQ_ENTRY(rd_kafka_coord_req_s) creq_link; /**< rk_coord_reqs */ - rd_kafka_coordtype_t creq_coordtype; /**< Coordinator type */ - char *creq_coordkey; /**< Coordinator key */ + rd_kafka_coordtype_t creq_coordtype; /**< Coordinator type */ + char *creq_coordkey; /**< Coordinator key */ - rd_kafka_op_t *creq_rko; /**< Requester's rko that is - * provided to creq_send_req_cb - * (optional). */ - rd_ts_t creq_ts_timeout; /**< Absolute timeout. - * Will fail with an error - * code pertaining to the - * current state */ + rd_kafka_op_t *creq_rko; /**< Requester's rko that is + * provided to creq_send_req_cb + * (optional). */ + rd_ts_t creq_ts_timeout; /**< Absolute timeout. + * Will fail with an error + * code pertaining to the + * current state */ rd_kafka_send_req_cb_t *creq_send_req_cb; /**< Sender callback */ - rd_kafka_replyq_t creq_replyq; /**< Reply queue */ - rd_kafka_resp_cb_t *creq_resp_cb; /**< Reply queue response - * parsing callback for the - * request sent by - * send_req_cb */ - void *creq_reply_opaque; /**< Opaque passed to - * creq_send_req_cb and - * creq_resp_cb. */ - - int creq_refcnt; /**< Internal reply queue for - * FindCoordinator requests - * which is forwarded to the - * rk_ops queue, but allows - * destroying the creq even - * with outstanding - * FindCoordinator requests. */ - rd_bool_t creq_done; /**< True if request was sent */ + rd_kafka_replyq_t creq_replyq; /**< Reply queue */ + rd_kafka_resp_cb_t *creq_resp_cb; /**< Reply queue response + * parsing callback for the + * request sent by + * send_req_cb */ + void *creq_reply_opaque; /**< Opaque passed to + * creq_send_req_cb and + * creq_resp_cb. */ + + int creq_refcnt; /**< Internal reply queue for + * FindCoordinator requests + * which is forwarded to the + * rk_ops queue, but allows + * destroying the creq even + * with outstanding + * FindCoordinator requests. */ + rd_bool_t creq_done; /**< True if request was sent */ } rd_kafka_coord_req_t; -void rd_kafka_coord_req (rd_kafka_t *rk, - rd_kafka_coordtype_t coordtype, - const char *coordkey, - rd_kafka_send_req_cb_t *send_req_cb, - rd_kafka_op_t *rko, - int timeout_ms, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *reply_opaque); +void rd_kafka_coord_req(rd_kafka_t *rk, + rd_kafka_coordtype_t coordtype, + const char *coordkey, + rd_kafka_send_req_cb_t *send_req_cb, + rd_kafka_op_t *rko, + int timeout_ms, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *reply_opaque); -void rd_kafka_coord_rkb_monitor_cb (rd_kafka_broker_t *rkb); +void rd_kafka_coord_rkb_monitor_cb(rd_kafka_broker_t *rkb); -void rd_kafka_coord_reqs_term (rd_kafka_t *rk); -void rd_kafka_coord_reqs_init (rd_kafka_t *rk); +void rd_kafka_coord_reqs_term(rd_kafka_t *rk); +void rd_kafka_coord_reqs_init(rd_kafka_t *rk); #endif /* _RDKAFKA_COORD_H_ */ diff --git a/src/rdkafka_error.c b/src/rdkafka_error.c index 23d0433053..d9d980fb50 100644 --- a/src/rdkafka_error.c +++ b/src/rdkafka_error.c @@ -38,7 +38,7 @@ #include -void rd_kafka_error_destroy (rd_kafka_error_t *error) { +void rd_kafka_error_destroy(rd_kafka_error_t *error) { if (error) rd_free(error); } @@ -47,8 +47,8 @@ void rd_kafka_error_destroy (rd_kafka_error_t *error) { /** * @brief Creates a new error object using the optional va-args format list. */ -rd_kafka_error_t *rd_kafka_error_new_v (rd_kafka_resp_err_t code, - const char *fmt, va_list ap) { +rd_kafka_error_t * +rd_kafka_error_new_v(rd_kafka_resp_err_t code, const char *fmt, va_list ap) { rd_kafka_error_t *error; ssize_t strsz = 0; @@ -59,14 +59,14 @@ rd_kafka_error_t *rd_kafka_error_new_v (rd_kafka_resp_err_t code, va_end(ap2); } - error = rd_malloc(sizeof(*error) + strsz); - error->code = code; - error->fatal = rd_false; - error->retriable = rd_false; + error = rd_malloc(sizeof(*error) + strsz); + error->code = code; + error->fatal = rd_false; + error->retriable = rd_false; error->txn_requires_abort = rd_false; if (strsz > 0) { - error->errstr = (char *)(error+1); + error->errstr = (char *)(error + 1); rd_vsnprintf(error->errstr, strsz, fmt, ap); } else { error->errstr = NULL; @@ -75,7 +75,7 @@ rd_kafka_error_t *rd_kafka_error_new_v (rd_kafka_resp_err_t code, return error; } -rd_kafka_error_t *rd_kafka_error_copy (const rd_kafka_error_t *src) { +rd_kafka_error_t *rd_kafka_error_copy(const rd_kafka_error_t *src) { rd_kafka_error_t *error; ssize_t strsz = 0; @@ -83,14 +83,14 @@ rd_kafka_error_t *rd_kafka_error_copy (const rd_kafka_error_t *src) { strsz = strlen(src->errstr); } - error = rd_malloc(sizeof(*error) + strsz); - error->code = src->code; - error->fatal = src->fatal; - error->retriable = src->retriable; + error = rd_malloc(sizeof(*error) + strsz); + error->code = src->code; + error->fatal = src->fatal; + error->retriable = src->retriable; error->txn_requires_abort = src->txn_requires_abort; if (strsz > 0) { - error->errstr = (char *)(error+1); + error->errstr = (char *)(error + 1); rd_strlcpy(error->errstr, src->errstr, strsz); } else { error->errstr = NULL; @@ -100,8 +100,8 @@ rd_kafka_error_t *rd_kafka_error_copy (const rd_kafka_error_t *src) { } -rd_kafka_error_t *rd_kafka_error_new (rd_kafka_resp_err_t code, - const char *fmt, ...) { +rd_kafka_error_t * +rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...) { rd_kafka_error_t *error; va_list ap; @@ -112,8 +112,8 @@ rd_kafka_error_t *rd_kafka_error_new (rd_kafka_resp_err_t code, return error; } -rd_kafka_error_t *rd_kafka_error_new_fatal (rd_kafka_resp_err_t code, - const char *fmt, ...) { +rd_kafka_error_t * +rd_kafka_error_new_fatal(rd_kafka_resp_err_t code, const char *fmt, ...) { rd_kafka_error_t *error; va_list ap; @@ -126,8 +126,8 @@ rd_kafka_error_t *rd_kafka_error_new_fatal (rd_kafka_resp_err_t code, return error; } -rd_kafka_error_t *rd_kafka_error_new_retriable (rd_kafka_resp_err_t code, - const char *fmt, ...) { +rd_kafka_error_t * +rd_kafka_error_new_retriable(rd_kafka_resp_err_t code, const char *fmt, ...) { rd_kafka_error_t *error; va_list ap; @@ -140,8 +140,10 @@ rd_kafka_error_t *rd_kafka_error_new_retriable (rd_kafka_resp_err_t code, return error; } -rd_kafka_error_t *rd_kafka_error_new_txn_requires_abort ( - rd_kafka_resp_err_t code, const char *fmt, ...) { +rd_kafka_error_t * +rd_kafka_error_new_txn_requires_abort(rd_kafka_resp_err_t code, + const char *fmt, + ...) { rd_kafka_error_t *error; va_list ap; @@ -155,43 +157,43 @@ rd_kafka_error_t *rd_kafka_error_new_txn_requires_abort ( } -rd_kafka_resp_err_t rd_kafka_error_code (const rd_kafka_error_t *error) { +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error) { return error ? error->code : RD_KAFKA_RESP_ERR_NO_ERROR; } -const char *rd_kafka_error_name (const rd_kafka_error_t *error) { +const char *rd_kafka_error_name(const rd_kafka_error_t *error) { return error ? rd_kafka_err2name(error->code) : ""; } -const char *rd_kafka_error_string (const rd_kafka_error_t *error) { +const char *rd_kafka_error_string(const rd_kafka_error_t *error) { if (!error) return ""; return error->errstr ? error->errstr : rd_kafka_err2str(error->code); } -int rd_kafka_error_is_fatal (const rd_kafka_error_t *error) { +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error) { return error && error->fatal ? 1 : 0; } -int rd_kafka_error_is_retriable (const rd_kafka_error_t *error) { +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error) { return error && error->retriable ? 1 : 0; } -int rd_kafka_error_txn_requires_abort (const rd_kafka_error_t *error) { +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error) { return error && error->txn_requires_abort ? 1 : 0; } -void rd_kafka_error_set_fatal (rd_kafka_error_t *error) { +void rd_kafka_error_set_fatal(rd_kafka_error_t *error) { error->fatal = rd_true; } -void rd_kafka_error_set_retriable (rd_kafka_error_t *error) { +void rd_kafka_error_set_retriable(rd_kafka_error_t *error) { error->retriable = rd_true; } -void rd_kafka_error_set_txn_requires_abort (rd_kafka_error_t *error) { +void rd_kafka_error_set_txn_requires_abort(rd_kafka_error_t *error) { error->txn_requires_abort = rd_true; } @@ -203,9 +205,9 @@ void rd_kafka_error_set_txn_requires_abort (rd_kafka_error_t *error) { * * @remark The \p error object is destroyed. */ -rd_kafka_resp_err_t -rd_kafka_error_to_legacy (rd_kafka_error_t *error, - char *errstr, size_t errstr_size) { +rd_kafka_resp_err_t rd_kafka_error_to_legacy(rd_kafka_error_t *error, + char *errstr, + size_t errstr_size) { rd_kafka_resp_err_t err = error->code; rd_snprintf(errstr, errstr_size, "%s", rd_kafka_error_string(error)); diff --git a/src/rdkafka_error.h b/src/rdkafka_error.h index e790e240c4..c2f02dffc6 100644 --- a/src/rdkafka_error.h +++ b/src/rdkafka_error.h @@ -45,32 +45,34 @@ struct rd_kafka_error_s { * Possibly NULL. */ rd_bool_t fatal; /**< This error is a fatal error. */ rd_bool_t retriable; /**< Operation is retriable. */ - rd_bool_t txn_requires_abort; /**< This is an abortable transaction error.*/ + rd_bool_t + txn_requires_abort; /**< This is an abortable transaction error.*/ }; -rd_kafka_error_t *rd_kafka_error_new_v (rd_kafka_resp_err_t code, - const char *fmt, va_list ap); +rd_kafka_error_t * +rd_kafka_error_new_v(rd_kafka_resp_err_t code, const char *fmt, va_list ap); -rd_kafka_error_t *rd_kafka_error_copy (const rd_kafka_error_t *src); +rd_kafka_error_t *rd_kafka_error_copy(const rd_kafka_error_t *src); -void rd_kafka_error_set_fatal (rd_kafka_error_t *error); -void rd_kafka_error_set_retriable (rd_kafka_error_t *error); -void rd_kafka_error_set_txn_requires_abort (rd_kafka_error_t *error); +void rd_kafka_error_set_fatal(rd_kafka_error_t *error); +void rd_kafka_error_set_retriable(rd_kafka_error_t *error); +void rd_kafka_error_set_txn_requires_abort(rd_kafka_error_t *error); -rd_kafka_error_t *rd_kafka_error_new_fatal (rd_kafka_resp_err_t code, - const char *fmt, ...) - RD_FORMAT(printf, 2, 3); -rd_kafka_error_t *rd_kafka_error_new_retriable (rd_kafka_resp_err_t code, - const char *fmt, ...) - RD_FORMAT(printf, 2, 3); +rd_kafka_error_t *rd_kafka_error_new_fatal(rd_kafka_resp_err_t code, + const char *fmt, + ...) RD_FORMAT(printf, 2, 3); +rd_kafka_error_t *rd_kafka_error_new_retriable(rd_kafka_resp_err_t code, + const char *fmt, + ...) RD_FORMAT(printf, 2, 3); rd_kafka_error_t * -rd_kafka_error_new_txn_requires_abort (rd_kafka_resp_err_t code, - const char *fmt, ...) - RD_FORMAT(printf, 2, 3); +rd_kafka_error_new_txn_requires_abort(rd_kafka_resp_err_t code, + const char *fmt, + ...) RD_FORMAT(printf, 2, 3); -rd_kafka_resp_err_t rd_kafka_error_to_legacy (rd_kafka_error_t *error, - char *errstr, size_t errstr_size); +rd_kafka_resp_err_t rd_kafka_error_to_legacy(rd_kafka_error_t *error, + char *errstr, + size_t errstr_size); #endif /* _RDKAFKA_ERROR_H_ */ diff --git a/src/rdkafka_event.c b/src/rdkafka_event.c index 962bed060d..4d7a8d6595 100644 --- a/src/rdkafka_event.c +++ b/src/rdkafka_event.c @@ -3,24 +3,24 @@ * * Copyright (c) 2016 Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -30,29 +30,28 @@ #include "rdkafka_event.h" #include "rd.h" -rd_kafka_event_type_t rd_kafka_event_type (const rd_kafka_event_t *rkev) { - return rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE; +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev) { + return rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE; } -const char *rd_kafka_event_name (const rd_kafka_event_t *rkev) { - switch (rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE) - { - case RD_KAFKA_EVENT_NONE: - return "(NONE)"; - case RD_KAFKA_EVENT_DR: - return "DeliveryReport"; - case RD_KAFKA_EVENT_FETCH: - return "Fetch"; - case RD_KAFKA_EVENT_LOG: - return "Log"; - case RD_KAFKA_EVENT_ERROR: - return "Error"; - case RD_KAFKA_EVENT_REBALANCE: - return "Rebalance"; - case RD_KAFKA_EVENT_OFFSET_COMMIT: - return "OffsetCommit"; - case RD_KAFKA_EVENT_STATS: - return "Stats"; +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev) { + switch (rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE) { + case RD_KAFKA_EVENT_NONE: + return "(NONE)"; + case RD_KAFKA_EVENT_DR: + return "DeliveryReport"; + case RD_KAFKA_EVENT_FETCH: + return "Fetch"; + case RD_KAFKA_EVENT_LOG: + return "Log"; + case RD_KAFKA_EVENT_ERROR: + return "Error"; + case RD_KAFKA_EVENT_REBALANCE: + return "Rebalance"; + case RD_KAFKA_EVENT_OFFSET_COMMIT: + return "OffsetCommit"; + case RD_KAFKA_EVENT_STATS: + return "Stats"; case RD_KAFKA_EVENT_CREATETOPICS_RESULT: return "CreateTopicsResult"; case RD_KAFKA_EVENT_DELETETOPICS_RESULT: @@ -71,18 +70,17 @@ const char *rd_kafka_event_name (const rd_kafka_event_t *rkev) { return "DeleteConsumerGroupOffsetsResult"; case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: return "SaslOAuthBearerTokenRefresh"; - default: - return "?unknown?"; - } + default: + return "?unknown?"; + } } - -void rd_kafka_event_destroy (rd_kafka_event_t *rkev) { - if (unlikely(!rkev)) - return; - rd_kafka_op_destroy(rkev); +void rd_kafka_event_destroy(rd_kafka_event_t *rkev) { + if (unlikely(!rkev)) + return; + rd_kafka_op_destroy(rkev); } @@ -91,80 +89,76 @@ void rd_kafka_event_destroy (rd_kafka_event_t *rkev) { * @remark messages will be freed automatically when event is destroyed, * application MUST NOT call rd_kafka_message_destroy() */ -const rd_kafka_message_t * -rd_kafka_event_message_next (rd_kafka_event_t *rkev) { - rd_kafka_op_t *rko = rkev; - rd_kafka_msg_t *rkm; - rd_kafka_msgq_t *rkmq, *rkmq2; - rd_kafka_message_t *rkmessage; - - switch (rkev->rko_type) - { - case RD_KAFKA_OP_DR: - rkmq = &rko->rko_u.dr.msgq; - rkmq2 = &rko->rko_u.dr.msgq2; - break; +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev) { + rd_kafka_op_t *rko = rkev; + rd_kafka_msg_t *rkm; + rd_kafka_msgq_t *rkmq, *rkmq2; + rd_kafka_message_t *rkmessage; + + switch (rkev->rko_type) { + case RD_KAFKA_OP_DR: + rkmq = &rko->rko_u.dr.msgq; + rkmq2 = &rko->rko_u.dr.msgq2; + break; - case RD_KAFKA_OP_FETCH: - /* Just one message */ - if (rko->rko_u.fetch.evidx++ > 0) - return NULL; + case RD_KAFKA_OP_FETCH: + /* Just one message */ + if (rko->rko_u.fetch.evidx++ > 0) + return NULL; - rkmessage = rd_kafka_message_get(rko); - if (unlikely(!rkmessage)) - return NULL; + rkmessage = rd_kafka_message_get(rko); + if (unlikely(!rkmessage)) + return NULL; - /* Store offset */ - rd_kafka_op_offset_store(NULL, rko); + /* Store offset */ + rd_kafka_op_offset_store(NULL, rko); - return rkmessage; + return rkmessage; - default: - return NULL; - } + default: + return NULL; + } - if (unlikely(!(rkm = TAILQ_FIRST(&rkmq->rkmq_msgs)))) - return NULL; + if (unlikely(!(rkm = TAILQ_FIRST(&rkmq->rkmq_msgs)))) + return NULL; - rd_kafka_msgq_deq(rkmq, rkm, 1); + rd_kafka_msgq_deq(rkmq, rkm, 1); - /* Put rkm on secondary message queue which will be purged later. */ - rd_kafka_msgq_enq(rkmq2, rkm); + /* Put rkm on secondary message queue which will be purged later. */ + rd_kafka_msgq_enq(rkmq2, rkm); - return rd_kafka_message_get_from_rkm(rko, rkm); + return rd_kafka_message_get_from_rkm(rko, rkm); } -size_t rd_kafka_event_message_array (rd_kafka_event_t *rkev, - const rd_kafka_message_t **rkmessages, - size_t size) { - size_t cnt = 0; - const rd_kafka_message_t *rkmessage; +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, + const rd_kafka_message_t **rkmessages, + size_t size) { + size_t cnt = 0; + const rd_kafka_message_t *rkmessage; - while (cnt < size && (rkmessage = rd_kafka_event_message_next(rkev))) - rkmessages[cnt++] = rkmessage; + while (cnt < size && (rkmessage = rd_kafka_event_message_next(rkev))) + rkmessages[cnt++] = rkmessage; - return cnt; + return cnt; } -size_t rd_kafka_event_message_count (rd_kafka_event_t *rkev) { - switch (rkev->rko_evtype) - { - case RD_KAFKA_EVENT_DR: +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev) { + switch (rkev->rko_evtype) { + case RD_KAFKA_EVENT_DR: return (size_t)rkev->rko_u.dr.msgq.rkmq_msg_cnt; - case RD_KAFKA_EVENT_FETCH: - return 1; - default: - return 0; - } + case RD_KAFKA_EVENT_FETCH: + return 1; + default: + return 0; + } } -const char *rd_kafka_event_config_string (rd_kafka_event_t *rkev) { - switch (rkev->rko_evtype) - { +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev) { + switch (rkev->rko_evtype) { #if WITH_SASL_OAUTHBEARER case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: return rkev->rko_rk->rk_conf.sasl.oauthbearer_config; @@ -174,17 +168,16 @@ const char *rd_kafka_event_config_string (rd_kafka_event_t *rkev) { } } -rd_kafka_resp_err_t rd_kafka_event_error (rd_kafka_event_t *rkev) { - return rkev->rko_err; +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev) { + return rkev->rko_err; } -const char *rd_kafka_event_error_string (rd_kafka_event_t *rkev) { - switch (rkev->rko_type) - { - case RD_KAFKA_OP_ERR: - case RD_KAFKA_OP_CONSUMER_ERR: - if (rkev->rko_u.err.errstr) - return rkev->rko_u.err.errstr; +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev) { + switch (rkev->rko_type) { + case RD_KAFKA_OP_ERR: + case RD_KAFKA_OP_CONSUMER_ERR: + if (rkev->rko_u.err.errstr) + return rkev->rko_u.err.errstr; break; case RD_KAFKA_OP_ADMIN_RESULT: if (rkev->rko_u.admin_result.errstr) @@ -197,114 +190,98 @@ const char *rd_kafka_event_error_string (rd_kafka_event_t *rkev) { return rd_kafka_err2str(rkev->rko_err); } -int rd_kafka_event_error_is_fatal (rd_kafka_event_t *rkev) { +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev) { return rkev->rko_u.err.fatal; } -void *rd_kafka_event_opaque (rd_kafka_event_t *rkev) { - switch (rkev->rko_type & ~RD_KAFKA_OP_FLAGMASK) - { - case RD_KAFKA_OP_OFFSET_COMMIT: - return rkev->rko_u.offset_commit.opaque; +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev) { + switch (rkev->rko_type & ~RD_KAFKA_OP_FLAGMASK) { + case RD_KAFKA_OP_OFFSET_COMMIT: + return rkev->rko_u.offset_commit.opaque; case RD_KAFKA_OP_ADMIN_RESULT: return rkev->rko_u.admin_result.opaque; - default: - return NULL; - } + default: + return NULL; + } } -int rd_kafka_event_log (rd_kafka_event_t *rkev, const char **fac, - const char **str, int *level) { - if (unlikely(rkev->rko_evtype != RD_KAFKA_EVENT_LOG)) - return -1; +int rd_kafka_event_log(rd_kafka_event_t *rkev, + const char **fac, + const char **str, + int *level) { + if (unlikely(rkev->rko_evtype != RD_KAFKA_EVENT_LOG)) + return -1; - if (likely(fac != NULL)) + if (likely(fac != NULL)) *fac = rkev->rko_u.log.fac; - if (likely(str != NULL)) - *str = rkev->rko_u.log.str; - if (likely(level != NULL)) - *level = rkev->rko_u.log.level; + if (likely(str != NULL)) + *str = rkev->rko_u.log.str; + if (likely(level != NULL)) + *level = rkev->rko_u.log.level; - return 0; + return 0; } -int rd_kafka_event_debug_contexts (rd_kafka_event_t *rkev, - char *dst, size_t dstsize) { +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, + char *dst, + size_t dstsize) { static const char *names[] = { - "generic", - "broker", - "topic", - "metadata", - "feature", - "queue", - "msg", - "protocol", - "cgrp", - "security", - "fetch", - "interceptor", - "plugin", - "consumer", - "admin", - "eos", - "mock", - NULL - }; + "generic", "broker", "topic", "metadata", "feature", + "queue", "msg", "protocol", "cgrp", "security", + "fetch", "interceptor", "plugin", "consumer", "admin", + "eos", "mock", NULL}; if (unlikely(rkev->rko_evtype != RD_KAFKA_EVENT_LOG)) return -1; rd_flags2str(dst, dstsize, names, rkev->rko_u.log.ctx); return 0; } -const char *rd_kafka_event_stats (rd_kafka_event_t *rkev) { - return rkev->rko_u.stats.json; +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev) { + return rkev->rko_u.stats.json; } rd_kafka_topic_partition_list_t * -rd_kafka_event_topic_partition_list (rd_kafka_event_t *rkev) { - switch (rkev->rko_evtype) - { - case RD_KAFKA_EVENT_REBALANCE: - return rkev->rko_u.rebalance.partitions; - case RD_KAFKA_EVENT_OFFSET_COMMIT: - return rkev->rko_u.offset_commit.partitions; - default: - return NULL; - } +rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev) { + switch (rkev->rko_evtype) { + case RD_KAFKA_EVENT_REBALANCE: + return rkev->rko_u.rebalance.partitions; + case RD_KAFKA_EVENT_OFFSET_COMMIT: + return rkev->rko_u.offset_commit.partitions; + default: + return NULL; + } } rd_kafka_topic_partition_t * -rd_kafka_event_topic_partition (rd_kafka_event_t *rkev) { - rd_kafka_topic_partition_t *rktpar; - - if (unlikely(!rkev->rko_rktp)) - return NULL; +rd_kafka_event_topic_partition(rd_kafka_event_t *rkev) { + rd_kafka_topic_partition_t *rktpar; - rktpar = rd_kafka_topic_partition_new_from_rktp(rkev->rko_rktp); + if (unlikely(!rkev->rko_rktp)) + return NULL; - switch (rkev->rko_type) - { - case RD_KAFKA_OP_ERR: - case RD_KAFKA_OP_CONSUMER_ERR: - rktpar->offset = rkev->rko_u.err.offset; - break; - default: - break; - } + rktpar = rd_kafka_topic_partition_new_from_rktp(rkev->rko_rktp); - rktpar->err = rkev->rko_err; + switch (rkev->rko_type) { + case RD_KAFKA_OP_ERR: + case RD_KAFKA_OP_CONSUMER_ERR: + rktpar->offset = rkev->rko_u.err.offset; + break; + default: + break; + } - return rktpar; + rktpar->err = rkev->rko_err; + return rktpar; } const rd_kafka_CreateTopics_result_t * -rd_kafka_event_CreateTopics_result (rd_kafka_event_t *rkev) { +rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev) { if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_CREATETOPICS_RESULT) return NULL; else @@ -313,7 +290,7 @@ rd_kafka_event_CreateTopics_result (rd_kafka_event_t *rkev) { const rd_kafka_DeleteTopics_result_t * -rd_kafka_event_DeleteTopics_result (rd_kafka_event_t *rkev) { +rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev) { if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETETOPICS_RESULT) return NULL; else @@ -322,7 +299,7 @@ rd_kafka_event_DeleteTopics_result (rd_kafka_event_t *rkev) { const rd_kafka_CreatePartitions_result_t * -rd_kafka_event_CreatePartitions_result (rd_kafka_event_t *rkev) { +rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev) { if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT) return NULL; else @@ -331,7 +308,7 @@ rd_kafka_event_CreatePartitions_result (rd_kafka_event_t *rkev) { const rd_kafka_AlterConfigs_result_t * -rd_kafka_event_AlterConfigs_result (rd_kafka_event_t *rkev) { +rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev) { if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_ALTERCONFIGS_RESULT) return NULL; else @@ -340,7 +317,7 @@ rd_kafka_event_AlterConfigs_result (rd_kafka_event_t *rkev) { const rd_kafka_DescribeConfigs_result_t * -rd_kafka_event_DescribeConfigs_result (rd_kafka_event_t *rkev) { +rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev) { if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT) return NULL; else @@ -348,7 +325,7 @@ rd_kafka_event_DescribeConfigs_result (rd_kafka_event_t *rkev) { } const rd_kafka_DeleteRecords_result_t * -rd_kafka_event_DeleteRecords_result (rd_kafka_event_t *rkev) { +rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev) { if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETERECORDS_RESULT) return NULL; else @@ -356,7 +333,7 @@ rd_kafka_event_DeleteRecords_result (rd_kafka_event_t *rkev) { } const rd_kafka_DeleteGroups_result_t * -rd_kafka_event_DeleteGroups_result (rd_kafka_event_t *rkev) { +rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev) { if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETEGROUPS_RESULT) return NULL; else @@ -364,12 +341,11 @@ rd_kafka_event_DeleteGroups_result (rd_kafka_event_t *rkev) { } const rd_kafka_DeleteConsumerGroupOffsets_result_t * -rd_kafka_event_DeleteConsumerGroupOffsets_result (rd_kafka_event_t *rkev) { - if (!rkev || - rkev->rko_evtype != - RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT) +rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev) { + if (!rkev || rkev->rko_evtype != + RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT) return NULL; else - return (const rd_kafka_DeleteConsumerGroupOffsets_result_t *) - rkev; + return ( + const rd_kafka_DeleteConsumerGroupOffsets_result_t *)rkev; } diff --git a/src/rdkafka_event.h b/src/rdkafka_event.h index 53215ff094..7281fec177 100644 --- a/src/rdkafka_event.h +++ b/src/rdkafka_event.h @@ -3,24 +3,24 @@ * * Copyright (c) 2016 Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -31,21 +31,21 @@ * @brief Converts op type to event type. * @returns the event type, or 0 if the op cannot be mapped to an event. */ -static RD_UNUSED RD_INLINE -rd_kafka_event_type_t rd_kafka_op2event (rd_kafka_op_type_t optype) { - static const rd_kafka_event_type_t map[RD_KAFKA_OP__END] = { - [RD_KAFKA_OP_DR] = RD_KAFKA_EVENT_DR, - [RD_KAFKA_OP_FETCH] = RD_KAFKA_EVENT_FETCH, - [RD_KAFKA_OP_ERR] = RD_KAFKA_EVENT_ERROR, - [RD_KAFKA_OP_CONSUMER_ERR] = RD_KAFKA_EVENT_ERROR, - [RD_KAFKA_OP_REBALANCE] = RD_KAFKA_EVENT_REBALANCE, - [RD_KAFKA_OP_OFFSET_COMMIT] = RD_KAFKA_EVENT_OFFSET_COMMIT, - [RD_KAFKA_OP_LOG] = RD_KAFKA_EVENT_LOG, - [RD_KAFKA_OP_STATS] = RD_KAFKA_EVENT_STATS, - [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH - }; +static RD_UNUSED RD_INLINE rd_kafka_event_type_t +rd_kafka_op2event(rd_kafka_op_type_t optype) { + static const rd_kafka_event_type_t map[RD_KAFKA_OP__END] = { + [RD_KAFKA_OP_DR] = RD_KAFKA_EVENT_DR, + [RD_KAFKA_OP_FETCH] = RD_KAFKA_EVENT_FETCH, + [RD_KAFKA_OP_ERR] = RD_KAFKA_EVENT_ERROR, + [RD_KAFKA_OP_CONSUMER_ERR] = RD_KAFKA_EVENT_ERROR, + [RD_KAFKA_OP_REBALANCE] = RD_KAFKA_EVENT_REBALANCE, + [RD_KAFKA_OP_OFFSET_COMMIT] = RD_KAFKA_EVENT_OFFSET_COMMIT, + [RD_KAFKA_OP_LOG] = RD_KAFKA_EVENT_LOG, + [RD_KAFKA_OP_STATS] = RD_KAFKA_EVENT_STATS, + [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = + RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH}; - return map[(int)optype & ~RD_KAFKA_OP_FLAGMASK]; + return map[(int)optype & ~RD_KAFKA_OP_FLAGMASK]; } @@ -53,8 +53,8 @@ rd_kafka_event_type_t rd_kafka_op2event (rd_kafka_op_type_t optype) { * @brief Attempt to set up an event based on rko. * @returns 1 if op is event:able and set up, else 0. */ -static RD_UNUSED RD_INLINE -int rd_kafka_event_setup (rd_kafka_t *rk, rd_kafka_op_t *rko) { +static RD_UNUSED RD_INLINE int rd_kafka_event_setup(rd_kafka_t *rk, + rd_kafka_op_t *rko) { if (unlikely(rko->rko_flags & RD_KAFKA_OP_F_FORCE_CB)) return 0; @@ -62,17 +62,16 @@ int rd_kafka_event_setup (rd_kafka_t *rk, rd_kafka_op_t *rko) { if (!rko->rko_evtype) rko->rko_evtype = rd_kafka_op2event(rko->rko_type); - switch (rko->rko_evtype) - { - case RD_KAFKA_EVENT_NONE: - return 0; + switch (rko->rko_evtype) { + case RD_KAFKA_EVENT_NONE: + return 0; - case RD_KAFKA_EVENT_DR: - rko->rko_rk = rk; - rd_dassert(!rko->rko_u.dr.do_purge2); - rd_kafka_msgq_init(&rko->rko_u.dr.msgq2); - rko->rko_u.dr.do_purge2 = 1; - return 1; + case RD_KAFKA_EVENT_DR: + rko->rko_rk = rk; + rd_dassert(!rko->rko_u.dr.do_purge2); + rd_kafka_msgq_init(&rko->rko_u.dr.msgq2); + rko->rko_u.dr.do_purge2 = 1; + return 1; case RD_KAFKA_EVENT_ERROR: if (rko->rko_err == RD_KAFKA_RESP_ERR__FATAL) { @@ -86,12 +85,12 @@ int rd_kafka_event_setup (rd_kafka_t *rk, rd_kafka_op_t *rko) { if (rko->rko_u.err.errstr) rd_free(rko->rko_u.err.errstr); rko->rko_u.err.errstr = rd_strdup(errstr); - rko->rko_u.err.fatal = 1; + rko->rko_u.err.fatal = 1; } } return 1; - case RD_KAFKA_EVENT_REBALANCE: + case RD_KAFKA_EVENT_REBALANCE: case RD_KAFKA_EVENT_LOG: case RD_KAFKA_EVENT_OFFSET_COMMIT: case RD_KAFKA_EVENT_STATS: @@ -104,9 +103,9 @@ int rd_kafka_event_setup (rd_kafka_t *rk, rd_kafka_op_t *rko) { case RD_KAFKA_EVENT_DELETEGROUPS_RESULT: case RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT: case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: - return 1; + return 1; - default: - return 0; - } + default: + return 0; + } } diff --git a/src/rdkafka_feature.c b/src/rdkafka_feature.c index 562b809af9..a2fc085c5b 100644 --- a/src/rdkafka_feature.c +++ b/src/rdkafka_feature.c @@ -3,24 +3,24 @@ * * Copyright (c) 2016, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -32,236 +32,226 @@ #include -static const char *rd_kafka_feature_names[] = { - "MsgVer1", - "ApiVersion", - "BrokerBalancedConsumer", - "ThrottleTime", - "Sasl", - "SaslHandshake", - "BrokerGroupCoordinator", - "LZ4", - "OffsetTime", - "MsgVer2", - "IdempotentProducer", - "ZSTD", - "SaslAuthReq", - "UnitTest", - NULL -}; +static const char *rd_kafka_feature_names[] = {"MsgVer1", + "ApiVersion", + "BrokerBalancedConsumer", + "ThrottleTime", + "Sasl", + "SaslHandshake", + "BrokerGroupCoordinator", + "LZ4", + "OffsetTime", + "MsgVer2", + "IdempotentProducer", + "ZSTD", + "SaslAuthReq", + "UnitTest", + NULL}; static const struct rd_kafka_feature_map { - /* RD_KAFKA_FEATURE_... */ - int feature; + /* RD_KAFKA_FEATURE_... */ + int feature; - /* Depends on the following ApiVersions overlapping with - * what the broker supports: */ - struct rd_kafka_ApiVersion depends[RD_KAFKAP__NUM]; + /* Depends on the following ApiVersions overlapping with + * what the broker supports: */ + struct rd_kafka_ApiVersion depends[RD_KAFKAP__NUM]; } rd_kafka_feature_map[] = { - /** - * @brief List of features and the ApiVersions they depend on. - * - * The dependency list consists of the ApiKey followed by this - * client's supported minimum and maximum API versions. - * As long as this list and its versions overlaps with the - * broker supported API versions the feature will be enabled. - */ - { - - /* @brief >=0.10.0: Message.MagicByte version 1: - * Relative offsets (KIP-31) and message timestamps (KIP-32). */ - .feature = RD_KAFKA_FEATURE_MSGVER1, - .depends = { - { RD_KAFKAP_Produce, 2, 2 }, - { RD_KAFKAP_Fetch, 2, 2 }, - { -1 }, - }, - }, - { - /* @brief >=0.11.0: Message.MagicByte version 2 */ - .feature = RD_KAFKA_FEATURE_MSGVER2, - .depends = { - { RD_KAFKAP_Produce, 3, 3 }, - { RD_KAFKAP_Fetch, 4, 4 }, - { -1 }, - }, - }, - { - /* @brief >=0.10.0: ApiVersionQuery support. - * @remark This is a bit of chicken-and-egg problem but needs to be - * set by feature_check() to avoid the feature being cleared - * even when broker supports it. */ - .feature = RD_KAFKA_FEATURE_APIVERSION, - .depends = { - { RD_KAFKAP_ApiVersion, 0, 0 }, - { -1 }, - }, - }, - { - /* @brief >=0.8.2.0: Broker-based Group coordinator */ - .feature = RD_KAFKA_FEATURE_BROKER_GROUP_COORD, - .depends = { - { RD_KAFKAP_FindCoordinator, 0, 0 }, - { -1 }, - }, - }, - { - /* @brief >=0.9.0: Broker-based balanced consumer groups. */ - .feature = RD_KAFKA_FEATURE_BROKER_BALANCED_CONSUMER, - .depends = { - { RD_KAFKAP_FindCoordinator, 0, 0 }, - { RD_KAFKAP_OffsetCommit, 1, 2 }, - { RD_KAFKAP_OffsetFetch, 1, 1 }, - { RD_KAFKAP_JoinGroup, 0, 0 }, - { RD_KAFKAP_SyncGroup, 0, 0 }, - { RD_KAFKAP_Heartbeat, 0, 0 }, - { RD_KAFKAP_LeaveGroup, 0, 0 }, - { -1 }, - }, - }, - { - /* @brief >=0.9.0: ThrottleTime */ - .feature = RD_KAFKA_FEATURE_THROTTLETIME, - .depends = { - { RD_KAFKAP_Produce, 1, 2 }, - { RD_KAFKAP_Fetch, 1, 2 }, - { -1 }, - }, - - }, - { - /* @brief >=0.9.0: SASL (GSSAPI) authentication. - * Since SASL is not using the Kafka protocol - * we must use something else to map us to the - * proper broker version support: - * JoinGroup was released along with SASL in 0.9.0. */ - .feature = RD_KAFKA_FEATURE_SASL_GSSAPI, - .depends = { - { RD_KAFKAP_JoinGroup, 0, 0 }, - { -1 }, - }, - }, - { - /* @brief >=0.10.0: SASL mechanism handshake (KIP-43) - * to automatically support other mechanisms - * than GSSAPI, such as PLAIN. */ - .feature = RD_KAFKA_FEATURE_SASL_HANDSHAKE, - .depends = { - { RD_KAFKAP_SaslHandshake, 0, 0 }, - { -1 }, - }, - }, - { - /* @brief >=0.8.2: LZ4 compression. - * Since LZ4 initially did not rely on a specific API - * type or version (it does in >=0.10.0) - * we must use something else to map us to the - * proper broker version support: - * GrooupCoordinator was released in 0.8.2 */ - .feature = RD_KAFKA_FEATURE_LZ4, - .depends = { - { RD_KAFKAP_FindCoordinator, 0, 0 }, - { -1 }, - }, - }, - { - /* @brief >=0.10.1.0: Offset v1 (KIP-79) - * Time-based offset requests */ - .feature = RD_KAFKA_FEATURE_OFFSET_TIME, - .depends = { - { RD_KAFKAP_ListOffsets, 1, 1 }, - { -1 }, - } - }, - { - /* @brief >=0.11.0.0: Idempotent Producer*/ - .feature = RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER, - .depends = { - { RD_KAFKAP_InitProducerId, 0, 0 }, - { -1 }, - } - }, - { - /* @brief >=2.1.0-IV2: Support ZStandard Compression Codec (KIP-110) */ - .feature = RD_KAFKA_FEATURE_ZSTD, - .depends = { - { RD_KAFKAP_Produce, 7, 7 }, - { RD_KAFKAP_Fetch, 10, 10 }, - { -1 }, - }, - }, - { - /* @brief >=1.0.0: SaslAuthenticateRequest */ - .feature = RD_KAFKA_FEATURE_SASL_AUTH_REQ, - .depends = { - { RD_KAFKAP_SaslHandshake, 1, 1 }, - { RD_KAFKAP_SaslAuthenticate, 0, 0 }, - { -1 }, - }, - }, - { .feature = 0 }, /* sentinel */ + /** + * @brief List of features and the ApiVersions they depend on. + * + * The dependency list consists of the ApiKey followed by this + * client's supported minimum and maximum API versions. + * As long as this list and its versions overlaps with the + * broker supported API versions the feature will be enabled. + */ + { + + /* @brief >=0.10.0: Message.MagicByte version 1: + * Relative offsets (KIP-31) and message timestamps (KIP-32). */ + .feature = RD_KAFKA_FEATURE_MSGVER1, + .depends = + { + {RD_KAFKAP_Produce, 2, 2}, + {RD_KAFKAP_Fetch, 2, 2}, + {-1}, + }, + }, + { + /* @brief >=0.11.0: Message.MagicByte version 2 */ + .feature = RD_KAFKA_FEATURE_MSGVER2, + .depends = + { + {RD_KAFKAP_Produce, 3, 3}, + {RD_KAFKAP_Fetch, 4, 4}, + {-1}, + }, + }, + { + /* @brief >=0.10.0: ApiVersionQuery support. + * @remark This is a bit of chicken-and-egg problem but needs to be + * set by feature_check() to avoid the feature being cleared + * even when broker supports it. */ + .feature = RD_KAFKA_FEATURE_APIVERSION, + .depends = + { + {RD_KAFKAP_ApiVersion, 0, 0}, + {-1}, + }, + }, + { + /* @brief >=0.8.2.0: Broker-based Group coordinator */ + .feature = RD_KAFKA_FEATURE_BROKER_GROUP_COORD, + .depends = + { + {RD_KAFKAP_FindCoordinator, 0, 0}, + {-1}, + }, + }, + { + /* @brief >=0.9.0: Broker-based balanced consumer groups. */ + .feature = RD_KAFKA_FEATURE_BROKER_BALANCED_CONSUMER, + .depends = + { + {RD_KAFKAP_FindCoordinator, 0, 0}, + {RD_KAFKAP_OffsetCommit, 1, 2}, + {RD_KAFKAP_OffsetFetch, 1, 1}, + {RD_KAFKAP_JoinGroup, 0, 0}, + {RD_KAFKAP_SyncGroup, 0, 0}, + {RD_KAFKAP_Heartbeat, 0, 0}, + {RD_KAFKAP_LeaveGroup, 0, 0}, + {-1}, + }, + }, + { + /* @brief >=0.9.0: ThrottleTime */ + .feature = RD_KAFKA_FEATURE_THROTTLETIME, + .depends = + { + {RD_KAFKAP_Produce, 1, 2}, + {RD_KAFKAP_Fetch, 1, 2}, + {-1}, + }, + + }, + { + /* @brief >=0.9.0: SASL (GSSAPI) authentication. + * Since SASL is not using the Kafka protocol + * we must use something else to map us to the + * proper broker version support: + * JoinGroup was released along with SASL in 0.9.0. */ + .feature = RD_KAFKA_FEATURE_SASL_GSSAPI, + .depends = + { + {RD_KAFKAP_JoinGroup, 0, 0}, + {-1}, + }, + }, + { + /* @brief >=0.10.0: SASL mechanism handshake (KIP-43) + * to automatically support other mechanisms + * than GSSAPI, such as PLAIN. */ + .feature = RD_KAFKA_FEATURE_SASL_HANDSHAKE, + .depends = + { + {RD_KAFKAP_SaslHandshake, 0, 0}, + {-1}, + }, + }, + { + /* @brief >=0.8.2: LZ4 compression. + * Since LZ4 initially did not rely on a specific API + * type or version (it does in >=0.10.0) + * we must use something else to map us to the + * proper broker version support: + * GrooupCoordinator was released in 0.8.2 */ + .feature = RD_KAFKA_FEATURE_LZ4, + .depends = + { + {RD_KAFKAP_FindCoordinator, 0, 0}, + {-1}, + }, + }, + {/* @brief >=0.10.1.0: Offset v1 (KIP-79) + * Time-based offset requests */ + .feature = RD_KAFKA_FEATURE_OFFSET_TIME, + .depends = + { + {RD_KAFKAP_ListOffsets, 1, 1}, + {-1}, + }}, + {/* @brief >=0.11.0.0: Idempotent Producer*/ + .feature = RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER, + .depends = + { + {RD_KAFKAP_InitProducerId, 0, 0}, + {-1}, + }}, + { + /* @brief >=2.1.0-IV2: Support ZStandard Compression Codec (KIP-110) */ + .feature = RD_KAFKA_FEATURE_ZSTD, + .depends = + { + {RD_KAFKAP_Produce, 7, 7}, + {RD_KAFKAP_Fetch, 10, 10}, + {-1}, + }, + }, + { + /* @brief >=1.0.0: SaslAuthenticateRequest */ + .feature = RD_KAFKA_FEATURE_SASL_AUTH_REQ, + .depends = + { + {RD_KAFKAP_SaslHandshake, 1, 1}, + {RD_KAFKAP_SaslAuthenticate, 0, 0}, + {-1}, + }, + }, + {.feature = 0}, /* sentinel */ }; /** - * @brief In absence of KIP-35 support in earlier broker versions we provide hardcoded - * lists that corresponds to older broker versions. + * @brief In absence of KIP-35 support in earlier broker versions we provide + * hardcoded lists that corresponds to older broker versions. */ /* >= 0.10.0.0: dummy for all future versions that support ApiVersionRequest */ static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_Queryable[] = { - { RD_KAFKAP_ApiVersion, 0, 0 } -}; + {RD_KAFKAP_ApiVersion, 0, 0}}; /* =~ 0.9.0 */ static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_9_0[] = { - { RD_KAFKAP_Produce, 0, 1 }, - { RD_KAFKAP_Fetch, 0, 1 }, - { RD_KAFKAP_ListOffsets, 0, 0 }, - { RD_KAFKAP_Metadata, 0, 0 }, - { RD_KAFKAP_OffsetCommit, 0, 2 }, - { RD_KAFKAP_OffsetFetch, 0, 1 }, - { RD_KAFKAP_FindCoordinator, 0, 0 }, - { RD_KAFKAP_JoinGroup, 0, 0 }, - { RD_KAFKAP_Heartbeat, 0, 0 }, - { RD_KAFKAP_LeaveGroup, 0, 0 }, - { RD_KAFKAP_SyncGroup, 0, 0 }, - { RD_KAFKAP_DescribeGroups, 0, 0 }, - { RD_KAFKAP_ListGroups, 0, 0 } -}; + {RD_KAFKAP_Produce, 0, 1}, {RD_KAFKAP_Fetch, 0, 1}, + {RD_KAFKAP_ListOffsets, 0, 0}, {RD_KAFKAP_Metadata, 0, 0}, + {RD_KAFKAP_OffsetCommit, 0, 2}, {RD_KAFKAP_OffsetFetch, 0, 1}, + {RD_KAFKAP_FindCoordinator, 0, 0}, {RD_KAFKAP_JoinGroup, 0, 0}, + {RD_KAFKAP_Heartbeat, 0, 0}, {RD_KAFKAP_LeaveGroup, 0, 0}, + {RD_KAFKAP_SyncGroup, 0, 0}, {RD_KAFKAP_DescribeGroups, 0, 0}, + {RD_KAFKAP_ListGroups, 0, 0}}; /* =~ 0.8.2 */ static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_2[] = { - { RD_KAFKAP_Produce, 0, 0 }, - { RD_KAFKAP_Fetch, 0, 0 }, - { RD_KAFKAP_ListOffsets, 0, 0 }, - { RD_KAFKAP_Metadata, 0, 0 }, - { RD_KAFKAP_OffsetCommit, 0, 1 }, - { RD_KAFKAP_OffsetFetch, 0, 1 }, - { RD_KAFKAP_FindCoordinator, 0, 0 } -}; + {RD_KAFKAP_Produce, 0, 0}, {RD_KAFKAP_Fetch, 0, 0}, + {RD_KAFKAP_ListOffsets, 0, 0}, {RD_KAFKAP_Metadata, 0, 0}, + {RD_KAFKAP_OffsetCommit, 0, 1}, {RD_KAFKAP_OffsetFetch, 0, 1}, + {RD_KAFKAP_FindCoordinator, 0, 0}}; /* =~ 0.8.1 */ static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_1[] = { - { RD_KAFKAP_Produce, 0, 0 }, - { RD_KAFKAP_Fetch, 0, 0 }, - { RD_KAFKAP_ListOffsets, 0, 0 }, - { RD_KAFKAP_Metadata, 0, 0 }, - { RD_KAFKAP_OffsetCommit, 0, 1 }, - { RD_KAFKAP_OffsetFetch, 0, 0 } -}; + {RD_KAFKAP_Produce, 0, 0}, {RD_KAFKAP_Fetch, 0, 0}, + {RD_KAFKAP_ListOffsets, 0, 0}, {RD_KAFKAP_Metadata, 0, 0}, + {RD_KAFKAP_OffsetCommit, 0, 1}, {RD_KAFKAP_OffsetFetch, 0, 0}}; /* =~ 0.8.0 */ static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_0[] = { - { RD_KAFKAP_Produce, 0, 0 }, - { RD_KAFKAP_Fetch, 0, 0 }, - { RD_KAFKAP_ListOffsets, 0, 0 }, - { RD_KAFKAP_Metadata, 0, 0 } -}; + {RD_KAFKAP_Produce, 0, 0}, + {RD_KAFKAP_Fetch, 0, 0}, + {RD_KAFKAP_ListOffsets, 0, 0}, + {RD_KAFKAP_Metadata, 0, 0}}; /** @@ -269,55 +259,58 @@ static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_0[] = { * support the ApiVersionQuery request. E.g., brokers <0.10.0. * * @param broker_version Broker version to match (longest prefix matching). - * @param use_default If no match is found return the default APIs (but return 0). + * @param use_default If no match is found return the default APIs (but return + * 0). * * @returns 1 if \p broker_version was recognized: \p *apisp will point to * the ApiVersion list and *api_cntp will be set to its element count. - * 0 if \p broker_version was not recognized: \p *apisp remains unchanged. + * 0 if \p broker_version was not recognized: \p *apisp remains + * unchanged. * */ -int rd_kafka_get_legacy_ApiVersions (const char *broker_version, - struct rd_kafka_ApiVersion **apisp, - size_t *api_cntp, const char *fallback) { - static const struct { - const char *pfx; - struct rd_kafka_ApiVersion *apis; - size_t api_cnt; - } vermap[] = { -#define _VERMAP(PFX,APIS) { PFX, APIS, RD_ARRAYSIZE(APIS) } - _VERMAP("0.9.0", rd_kafka_ApiVersion_0_9_0), - _VERMAP("0.8.2", rd_kafka_ApiVersion_0_8_2), - _VERMAP("0.8.1", rd_kafka_ApiVersion_0_8_1), - _VERMAP("0.8.0", rd_kafka_ApiVersion_0_8_0), - { "0.7.", NULL }, /* Unsupported */ - { "0.6.", NULL }, /* Unsupported */ - _VERMAP("", rd_kafka_ApiVersion_Queryable), - { NULL } - }; - int i; - int fallback_i = -1; - int ret = 0; - - *apisp = NULL; +int rd_kafka_get_legacy_ApiVersions(const char *broker_version, + struct rd_kafka_ApiVersion **apisp, + size_t *api_cntp, + const char *fallback) { + static const struct { + const char *pfx; + struct rd_kafka_ApiVersion *apis; + size_t api_cnt; + } vermap[] = { +#define _VERMAP(PFX, APIS) {PFX, APIS, RD_ARRAYSIZE(APIS)} + _VERMAP("0.9.0", rd_kafka_ApiVersion_0_9_0), + _VERMAP("0.8.2", rd_kafka_ApiVersion_0_8_2), + _VERMAP("0.8.1", rd_kafka_ApiVersion_0_8_1), + _VERMAP("0.8.0", rd_kafka_ApiVersion_0_8_0), + {"0.7.", NULL}, /* Unsupported */ + {"0.6.", NULL}, /* Unsupported */ + _VERMAP("", rd_kafka_ApiVersion_Queryable), + {NULL}}; + int i; + int fallback_i = -1; + int ret = 0; + + *apisp = NULL; *api_cntp = 0; - for (i = 0 ; vermap[i].pfx ; i++) { - if (!strncmp(vermap[i].pfx, broker_version, strlen(vermap[i].pfx))) { - if (!vermap[i].apis) - return 0; - *apisp = vermap[i].apis; - *api_cntp = vermap[i].api_cnt; - ret = 1; + for (i = 0; vermap[i].pfx; i++) { + if (!strncmp(vermap[i].pfx, broker_version, + strlen(vermap[i].pfx))) { + if (!vermap[i].apis) + return 0; + *apisp = vermap[i].apis; + *api_cntp = vermap[i].api_cnt; + ret = 1; break; - } else if (fallback && !strcmp(vermap[i].pfx, fallback)) - fallback_i = i; - } + } else if (fallback && !strcmp(vermap[i].pfx, fallback)) + fallback_i = i; + } - if (!*apisp && fallback) { - rd_kafka_assert(NULL, fallback_i != -1); - *apisp = vermap[fallback_i].apis; - *api_cntp = vermap[fallback_i].api_cnt; - } + if (!*apisp && fallback) { + rd_kafka_assert(NULL, fallback_i != -1); + *apisp = vermap[fallback_i].apis; + *api_cntp = vermap[fallback_i].api_cnt; + } return ret; } @@ -327,22 +320,20 @@ int rd_kafka_get_legacy_ApiVersions (const char *broker_version, * @returns 1 if the provided broker version (probably) * supports api.version.request. */ -int rd_kafka_ApiVersion_is_queryable (const char *broker_version) { - struct rd_kafka_ApiVersion *apis; - size_t api_cnt; +int rd_kafka_ApiVersion_is_queryable(const char *broker_version) { + struct rd_kafka_ApiVersion *apis; + size_t api_cnt; - if (!rd_kafka_get_legacy_ApiVersions(broker_version, - &apis, &api_cnt, 0)) - return 0; + if (!rd_kafka_get_legacy_ApiVersions(broker_version, &apis, &api_cnt, + 0)) + return 0; - return apis == rd_kafka_ApiVersion_Queryable; + return apis == rd_kafka_ApiVersion_Queryable; } - - /** * @brief Check if match's versions overlaps with \p apis. * @@ -350,16 +341,17 @@ int rd_kafka_ApiVersion_is_queryable (const char *broker_version) { * @remark \p apis must be sorted using rd_kafka_ApiVersion_key_cmp() */ static RD_INLINE int -rd_kafka_ApiVersion_check (const struct rd_kafka_ApiVersion *apis, size_t api_cnt, - const struct rd_kafka_ApiVersion *match) { - const struct rd_kafka_ApiVersion *api; +rd_kafka_ApiVersion_check(const struct rd_kafka_ApiVersion *apis, + size_t api_cnt, + const struct rd_kafka_ApiVersion *match) { + const struct rd_kafka_ApiVersion *api; - api = bsearch(match, apis, api_cnt, sizeof(*apis), - rd_kafka_ApiVersion_key_cmp); - if (unlikely(!api)) - return 0; + api = bsearch(match, apis, api_cnt, sizeof(*apis), + rd_kafka_ApiVersion_key_cmp); + if (unlikely(!api)) + return 0; - return match->MinVer <= api->MaxVer && api->MinVer <= match->MaxVer; + return match->MinVer <= api->MaxVer && api->MinVer <= match->MaxVer; } @@ -374,50 +366,50 @@ rd_kafka_ApiVersion_check (const struct rd_kafka_ApiVersion *apis, size_t api_cn * * @returns the supported features (bitmask) to enable. */ -int rd_kafka_features_check (rd_kafka_broker_t *rkb, - struct rd_kafka_ApiVersion *broker_apis, - size_t broker_api_cnt) { - int features = 0; - int i; - - /* Scan through features. */ - for (i = 0 ; rd_kafka_feature_map[i].feature != 0 ; i++) { - const struct rd_kafka_ApiVersion *match; - int fails = 0; - - /* For each feature check that all its API dependencies - * can be fullfilled. */ - - for (match = &rd_kafka_feature_map[i].depends[0] ; - match->ApiKey != -1 ; match++) { - int r; - - r = rd_kafka_ApiVersion_check(broker_apis, broker_api_cnt, - match); - - rd_rkb_dbg(rkb, FEATURE, "APIVERSION", - " Feature %s: %s (%hd..%hd) " - "%ssupported by broker", - rd_kafka_features2str(rd_kafka_feature_map[i]. - feature), - rd_kafka_ApiKey2str(match->ApiKey), - match->MinVer, match->MaxVer, - r ? "" : "NOT "); - - fails += !r; - } - - rd_rkb_dbg(rkb, FEATURE, "APIVERSION", - "%s feature %s", - fails ? "Disabling" : "Enabling", - rd_kafka_features2str(rd_kafka_feature_map[i].feature)); - - - if (!fails) - features |= rd_kafka_feature_map[i].feature; - } - - return features; +int rd_kafka_features_check(rd_kafka_broker_t *rkb, + struct rd_kafka_ApiVersion *broker_apis, + size_t broker_api_cnt) { + int features = 0; + int i; + + /* Scan through features. */ + for (i = 0; rd_kafka_feature_map[i].feature != 0; i++) { + const struct rd_kafka_ApiVersion *match; + int fails = 0; + + /* For each feature check that all its API dependencies + * can be fullfilled. */ + + for (match = &rd_kafka_feature_map[i].depends[0]; + match->ApiKey != -1; match++) { + int r; + + r = rd_kafka_ApiVersion_check(broker_apis, + broker_api_cnt, match); + + rd_rkb_dbg(rkb, FEATURE, "APIVERSION", + " Feature %s: %s (%hd..%hd) " + "%ssupported by broker", + rd_kafka_features2str( + rd_kafka_feature_map[i].feature), + rd_kafka_ApiKey2str(match->ApiKey), + match->MinVer, match->MaxVer, + r ? "" : "NOT "); + + fails += !r; + } + + rd_rkb_dbg( + rkb, FEATURE, "APIVERSION", "%s feature %s", + fails ? "Disabling" : "Enabling", + rd_kafka_features2str(rd_kafka_feature_map[i].feature)); + + + if (!fails) + features |= rd_kafka_feature_map[i].feature; + } + + return features; } @@ -425,49 +417,44 @@ int rd_kafka_features_check (rd_kafka_broker_t *rkb, /** * @brief Make an allocated and sorted copy of \p src. */ -void -rd_kafka_ApiVersions_copy (const struct rd_kafka_ApiVersion *src, - size_t src_cnt, - struct rd_kafka_ApiVersion **dstp, - size_t *dst_cntp) { - *dstp = rd_memdup(src, sizeof(*src) * src_cnt); +void rd_kafka_ApiVersions_copy(const struct rd_kafka_ApiVersion *src, + size_t src_cnt, + struct rd_kafka_ApiVersion **dstp, + size_t *dst_cntp) { + *dstp = rd_memdup(src, sizeof(*src) * src_cnt); *dst_cntp = src_cnt; qsort(*dstp, *dst_cntp, sizeof(**dstp), rd_kafka_ApiVersion_key_cmp); } - - - /** * @returns a human-readable feature flag string. */ -const char *rd_kafka_features2str (int features) { - static RD_TLS char ret[4][256]; - size_t of = 0; - static RD_TLS int reti = 0; - int i; - - reti = (reti + 1) % 4; - - *ret[reti] = '\0'; - for (i = 0 ; rd_kafka_feature_names[i] ; i++) { - int r; - if (!(features & (1 << i))) - continue; - - r = rd_snprintf(ret[reti]+of, sizeof(ret[reti])-of, "%s%s", - of == 0 ? "" : ",", - rd_kafka_feature_names[i]); - if ((size_t)r > sizeof(ret[reti])-of) { - /* Out of space */ - memcpy(&ret[reti][sizeof(ret[reti])-3], "..", 3); - break; - } - - of += r; - } - - return ret[reti]; +const char *rd_kafka_features2str(int features) { + static RD_TLS char ret[4][256]; + size_t of = 0; + static RD_TLS int reti = 0; + int i; + + reti = (reti + 1) % 4; + + *ret[reti] = '\0'; + for (i = 0; rd_kafka_feature_names[i]; i++) { + int r; + if (!(features & (1 << i))) + continue; + + r = rd_snprintf(ret[reti] + of, sizeof(ret[reti]) - of, "%s%s", + of == 0 ? "" : ",", rd_kafka_feature_names[i]); + if ((size_t)r > sizeof(ret[reti]) - of) { + /* Out of space */ + memcpy(&ret[reti][sizeof(ret[reti]) - 3], "..", 3); + break; + } + + of += r; + } + + return ret[reti]; } diff --git a/src/rdkafka_feature.h b/src/rdkafka_feature.h index c3817d96db..a651a07df0 100644 --- a/src/rdkafka_feature.h +++ b/src/rdkafka_feature.h @@ -3,24 +3,24 @@ * * Copyright (c) 2016, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -36,19 +36,19 @@ /* Message version 1 (MagicByte=1): * + relative offsets (KIP-31) * + timestamps (KIP-32) */ -#define RD_KAFKA_FEATURE_MSGVER1 0x1 +#define RD_KAFKA_FEATURE_MSGVER1 0x1 /* ApiVersionQuery support (KIP-35) */ #define RD_KAFKA_FEATURE_APIVERSION 0x2 - /* >= 0.9: Broker-based Balanced Consumer */ +/* >= 0.9: Broker-based Balanced Consumer */ #define RD_KAFKA_FEATURE_BROKER_BALANCED_CONSUMER 0x4 /* >= 0.9: Produce/Fetch ThrottleTime reporting */ #define RD_KAFKA_FEATURE_THROTTLETIME 0x8 /* >= 0.9: SASL GSSAPI support */ -#define RD_KAFKA_FEATURE_SASL_GSSAPI 0x10 +#define RD_KAFKA_FEATURE_SASL_GSSAPI 0x10 /* >= 0.10: SaslMechanismRequest (KIP-43) */ #define RD_KAFKA_FEATURE_SASL_HANDSHAKE 0x20 @@ -64,7 +64,7 @@ /* >= 0.11.0.0: Message version 2 (MagicByte=2): * + EOS message format KIP-98 */ -#define RD_KAFKA_FEATURE_MSGVER2 0x200 +#define RD_KAFKA_FEATURE_MSGVER2 0x200 /* >= 0.11.0.0: Idempotent Producer support */ #define RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER 0x400 @@ -80,20 +80,23 @@ #define RD_KAFKA_FEATURE_UNITTEST 0x4000 /* All features (except UNITTEST) */ -#define RD_KAFKA_FEATURE_ALL (0xffff & ~RD_KAFKA_FEATURE_UNITTEST) +#define RD_KAFKA_FEATURE_ALL (0xffff & ~RD_KAFKA_FEATURE_UNITTEST) -int rd_kafka_get_legacy_ApiVersions (const char *broker_version, - struct rd_kafka_ApiVersion **apisp, - size_t *api_cntp, const char *fallback); -int rd_kafka_ApiVersion_is_queryable (const char *broker_version); -void rd_kafka_ApiVersions_copy (const struct rd_kafka_ApiVersion *src, size_t src_cnt, - struct rd_kafka_ApiVersion **dstp, size_t *dst_cntp); -int rd_kafka_features_check (rd_kafka_broker_t *rkb, - struct rd_kafka_ApiVersion *broker_apis, - size_t broker_api_cnt); +int rd_kafka_get_legacy_ApiVersions(const char *broker_version, + struct rd_kafka_ApiVersion **apisp, + size_t *api_cntp, + const char *fallback); +int rd_kafka_ApiVersion_is_queryable(const char *broker_version); +void rd_kafka_ApiVersions_copy(const struct rd_kafka_ApiVersion *src, + size_t src_cnt, + struct rd_kafka_ApiVersion **dstp, + size_t *dst_cntp); +int rd_kafka_features_check(rd_kafka_broker_t *rkb, + struct rd_kafka_ApiVersion *broker_apis, + size_t broker_api_cnt); -const char *rd_kafka_features2str (int features); +const char *rd_kafka_features2str(int features); #endif /* _RDKAFKA_FEATURE_H_ */ diff --git a/src/rdkafka_header.c b/src/rdkafka_header.c index 08ca0aa743..98359b424c 100644 --- a/src/rdkafka_header.c +++ b/src/rdkafka_header.c @@ -31,14 +31,14 @@ -#define rd_kafka_header_destroy rd_free +#define rd_kafka_header_destroy rd_free -void rd_kafka_headers_destroy (rd_kafka_headers_t *hdrs) { +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs) { rd_list_destroy(&hdrs->rkhdrs_list); rd_free(hdrs); } -rd_kafka_headers_t *rd_kafka_headers_new (size_t initial_count) { +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count) { rd_kafka_headers_t *hdrs; hdrs = rd_malloc(sizeof(*hdrs)); @@ -49,18 +49,16 @@ rd_kafka_headers_t *rd_kafka_headers_new (size_t initial_count) { return hdrs; } -static void *rd_kafka_header_copy (const void *_src, void *opaque) { - rd_kafka_headers_t *hdrs = opaque; +static void *rd_kafka_header_copy(const void *_src, void *opaque) { + rd_kafka_headers_t *hdrs = opaque; const rd_kafka_header_t *src = (const rd_kafka_header_t *)_src; return (void *)rd_kafka_header_add( - hdrs, - src->rkhdr_name, src->rkhdr_name_size, - src->rkhdr_value, src->rkhdr_value_size); + hdrs, src->rkhdr_name, src->rkhdr_name_size, src->rkhdr_value, + src->rkhdr_value_size); } -rd_kafka_headers_t * -rd_kafka_headers_copy (const rd_kafka_headers_t *src) { +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src) { rd_kafka_headers_t *dst; dst = rd_malloc(sizeof(*dst)); @@ -75,10 +73,11 @@ rd_kafka_headers_copy (const rd_kafka_headers_t *src) { -rd_kafka_resp_err_t -rd_kafka_header_add (rd_kafka_headers_t *hdrs, - const char *name, ssize_t name_size, - const void *value, ssize_t value_size) { +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, + const char *name, + ssize_t name_size, + const void *value, + ssize_t value_size) { rd_kafka_header_t *hdr; char varint_NameLen[RD_UVARINT_ENC_SIZEOF(int32_t)]; char varint_ValueLen[RD_UVARINT_ENC_SIZEOF(int32_t)]; @@ -97,7 +96,7 @@ rd_kafka_header_add (rd_kafka_headers_t *hdrs, hdr->rkhdr_name[name_size] = '\0'; if (likely(value != NULL)) { - hdr->rkhdr_value = hdr->rkhdr_name+name_size+1; + hdr->rkhdr_value = hdr->rkhdr_name + name_size + 1; memcpy((void *)hdr->rkhdr_value, value, value_size); hdr->rkhdr_value[value_size] = '\0'; hdr->rkhdr_value_size = value_size; @@ -110,12 +109,10 @@ rd_kafka_header_add (rd_kafka_headers_t *hdrs, /* Calculate serialized size of header */ hdr->rkhdr_ser_size = name_size + value_size; - hdr->rkhdr_ser_size += rd_uvarint_enc_i64(varint_NameLen, - sizeof(varint_NameLen), - name_size); - hdr->rkhdr_ser_size += rd_uvarint_enc_i64(varint_ValueLen, - sizeof(varint_ValueLen), - value_size); + hdr->rkhdr_ser_size += rd_uvarint_enc_i64( + varint_NameLen, sizeof(varint_NameLen), name_size); + hdr->rkhdr_ser_size += rd_uvarint_enc_i64( + varint_ValueLen, sizeof(varint_ValueLen), value_size); hdrs->rkhdrs_ser_size += hdr->rkhdr_ser_size; return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -125,15 +122,15 @@ rd_kafka_header_add (rd_kafka_headers_t *hdrs, /** * @brief header_t(name) to char * comparator */ -static int rd_kafka_header_cmp_str (void *_a, void *_b) { +static int rd_kafka_header_cmp_str(void *_a, void *_b) { const rd_kafka_header_t *a = _a; - const char *b = _b; + const char *b = _b; return strcmp(a->rkhdr_name, b); } -rd_kafka_resp_err_t rd_kafka_header_remove (rd_kafka_headers_t *hdrs, - const char *name) { +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, + const char *name) { size_t ser_size = 0; rd_kafka_header_t *hdr; int i; @@ -156,10 +153,10 @@ rd_kafka_resp_err_t rd_kafka_header_remove (rd_kafka_headers_t *hdrs, return RD_KAFKA_RESP_ERR_NO_ERROR; } -rd_kafka_resp_err_t -rd_kafka_header_get_last (const rd_kafka_headers_t *hdrs, - const char *name, - const void **valuep, size_t *sizep) { +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, + const char *name, + const void **valuep, + size_t *sizep) { const rd_kafka_header_t *hdr; int i; size_t name_size = strlen(name); @@ -168,7 +165,7 @@ rd_kafka_header_get_last (const rd_kafka_headers_t *hdrs, if (hdr->rkhdr_name_size == name_size && !strcmp(hdr->rkhdr_name, name)) { *valuep = hdr->rkhdr_value; - *sizep = hdr->rkhdr_value_size; + *sizep = hdr->rkhdr_value_size; return RD_KAFKA_RESP_ERR_NO_ERROR; } } @@ -177,21 +174,21 @@ rd_kafka_header_get_last (const rd_kafka_headers_t *hdrs, } -rd_kafka_resp_err_t -rd_kafka_header_get (const rd_kafka_headers_t *hdrs, size_t idx, - const char *name, - const void **valuep, size_t *sizep) { +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, + size_t idx, + const char *name, + const void **valuep, + size_t *sizep) { const rd_kafka_header_t *hdr; int i; - size_t mi = 0; /* index for matching names */ + size_t mi = 0; /* index for matching names */ size_t name_size = strlen(name); RD_LIST_FOREACH(hdr, &hdrs->rkhdrs_list, i) { if (hdr->rkhdr_name_size == name_size && - !strcmp(hdr->rkhdr_name, name) && - mi++ == idx) { + !strcmp(hdr->rkhdr_name, name) && mi++ == idx) { *valuep = hdr->rkhdr_value; - *sizep = hdr->rkhdr_value_size; + *sizep = hdr->rkhdr_value_size; return RD_KAFKA_RESP_ERR_NO_ERROR; } } @@ -200,10 +197,11 @@ rd_kafka_header_get (const rd_kafka_headers_t *hdrs, size_t idx, } -rd_kafka_resp_err_t -rd_kafka_header_get_all (const rd_kafka_headers_t *hdrs, size_t idx, - const char **namep, - const void **valuep, size_t *sizep) { +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, + size_t idx, + const char **namep, + const void **valuep, + size_t *sizep) { const rd_kafka_header_t *hdr; hdr = rd_list_elem(&hdrs->rkhdrs_list, (int)idx); diff --git a/src/rdkafka_header.h b/src/rdkafka_header.h index b8f14a32b9..bd6b0e9593 100644 --- a/src/rdkafka_header.h +++ b/src/rdkafka_header.h @@ -36,8 +36,8 @@ * with additional fields to keep track of the total on-wire size. */ struct rd_kafka_headers_s { - rd_list_t rkhdrs_list; /**< List of (rd_kafka_header_t *) */ - size_t rkhdrs_ser_size; /**< Total serialized size of headers */ + rd_list_t rkhdrs_list; /**< List of (rd_kafka_header_t *) */ + size_t rkhdrs_ser_size; /**< Total serialized size of headers */ }; @@ -56,11 +56,11 @@ typedef struct rd_kafka_header_s { size_t rkhdr_ser_size; /**< Serialized size */ size_t rkhdr_value_size; /**< Value length (without nul-term) */ size_t rkhdr_name_size; /**< Header name size (w/o nul-term) */ - char *rkhdr_value; /**< Header value (nul-terminated string but + char *rkhdr_value; /**< Header value (nul-terminated string but * considered binary). * Will be NULL for null values, else * points to rkhdr_name+.. */ - char rkhdr_name[1]; /**< Header name (nul-terminated string). + char rkhdr_name[1]; /**< Header name (nul-terminated string). * Followed by allocation for value+nul */ } rd_kafka_header_t; @@ -69,7 +69,7 @@ typedef struct rd_kafka_header_s { * @returns the serialized size for the headers */ static RD_INLINE RD_UNUSED size_t -rd_kafka_headers_serialized_size (const rd_kafka_headers_t *hdrs) { +rd_kafka_headers_serialized_size(const rd_kafka_headers_t *hdrs) { return hdrs->rkhdrs_ser_size; } diff --git a/src/rdkafka_idempotence.c b/src/rdkafka_idempotence.c index a2e9dad151..f79be76b95 100644 --- a/src/rdkafka_idempotence.c +++ b/src/rdkafka_idempotence.c @@ -47,17 +47,17 @@ * */ -static void rd_kafka_idemp_pid_timer_restart (rd_kafka_t *rk, - rd_bool_t immediate, - const char *reason); +static void rd_kafka_idemp_pid_timer_restart(rd_kafka_t *rk, + rd_bool_t immediate, + const char *reason); /** * @brief Set the producer's idempotence state. * @locks rd_kafka_wrlock() MUST be held */ -void rd_kafka_idemp_set_state (rd_kafka_t *rk, - rd_kafka_idemp_state_t new_state) { +void rd_kafka_idemp_set_state(rd_kafka_t *rk, + rd_kafka_idemp_state_t new_state) { if (rk->rk_eos.idemp_state == new_state) return; @@ -70,8 +70,7 @@ void rd_kafka_idemp_set_state (rd_kafka_t *rk, rd_kafka_dbg(rk, EOS, "IDEMPSTATE", "Denying state change %s -> %s since a " "fatal error has been raised", - rd_kafka_idemp_state2str(rk->rk_eos. - idemp_state), + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), rd_kafka_idemp_state2str(new_state)); rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_FATAL_ERROR); return; @@ -79,11 +78,10 @@ void rd_kafka_idemp_set_state (rd_kafka_t *rk, rd_kafka_dbg(rk, EOS, "IDEMPSTATE", "Idempotent producer state change %s -> %s", - rd_kafka_idemp_state2str(rk->rk_eos. - idemp_state), + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), rd_kafka_idemp_state2str(new_state)); - rk->rk_eos.idemp_state = new_state; + rk->rk_eos.idemp_state = new_state; rk->rk_eos.ts_idemp_state = rd_clock(); /* Inform transaction manager of state change */ @@ -93,8 +91,6 @@ void rd_kafka_idemp_set_state (rd_kafka_t *rk, - - /** * @brief Find a usable broker suitable for acquiring Pid * or Coordinator query. @@ -103,10 +99,10 @@ void rd_kafka_idemp_set_state (rd_kafka_t *rk, * * @returns a broker with increased refcount, or NULL on error. */ -rd_kafka_broker_t * -rd_kafka_idemp_broker_any (rd_kafka_t *rk, - rd_kafka_resp_err_t *errp, - char *errstr, size_t errstr_size) { +rd_kafka_broker_t *rd_kafka_idemp_broker_any(rd_kafka_t *rk, + rd_kafka_resp_err_t *errp, + char *errstr, + size_t errstr_size) { rd_kafka_broker_t *rkb; int up_cnt; @@ -122,15 +118,17 @@ rd_kafka_idemp_broker_any (rd_kafka_t *rk, "%s not supported by " "any of the %d connected broker(s): requires " "Apache Kafka broker version >= 0.11.0", - rd_kafka_is_transactional(rk) ? - "Transactions" : "Idempotent producer", + rd_kafka_is_transactional(rk) + ? "Transactions" + : "Idempotent producer", up_cnt); } else { *errp = RD_KAFKA_RESP_ERR__TRANSPORT; rd_snprintf(errstr, errstr_size, "No brokers available for %s (%d broker(s) known)", - rd_kafka_is_transactional(rk) ? - "Transactions" : "Idempotent producer", + rd_kafka_is_transactional(rk) + ? "Transactions" + : "Idempotent producer", rd_atomic32_get(&rk->rk_broker_cnt)); } @@ -152,14 +150,13 @@ rd_kafka_idemp_broker_any (rd_kafka_t *rk, * @locks rd_kafka_wrlock() MUST be held * @locality rdkafka main thread */ -rd_bool_t rd_kafka_idemp_check_error (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - const char *errstr, - rd_bool_t is_fatal) { +rd_bool_t rd_kafka_idemp_check_error(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *errstr, + rd_bool_t is_fatal) { const char *preface = ""; - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE: case RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT: case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED: @@ -170,7 +167,7 @@ rd_bool_t rd_kafka_idemp_check_error (rd_kafka_t *rk, case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH: is_fatal = rd_true; /* Normalize error */ - err = RD_KAFKA_RESP_ERR__FENCED; + err = RD_KAFKA_RESP_ERR__FENCED; preface = "Producer fenced by newer instance: "; break; @@ -182,11 +179,11 @@ rd_bool_t rd_kafka_idemp_check_error (rd_kafka_t *rk, return rd_false; if (rd_kafka_is_transactional(rk)) - rd_kafka_txn_set_fatal_error(rk, RD_DONT_LOCK, - err, "%s%s", preface, errstr); + rd_kafka_txn_set_fatal_error(rk, RD_DONT_LOCK, err, "%s%s", + preface, errstr); else - rd_kafka_set_fatal_error0(rk, RD_DONT_LOCK, - err, "%s%s", preface, errstr); + rd_kafka_set_fatal_error0(rk, RD_DONT_LOCK, err, "%s%s", + preface, errstr); rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_FATAL_ERROR); @@ -202,7 +199,7 @@ rd_bool_t rd_kafka_idemp_check_error (rd_kafka_t *rk, * @locality rdkafka main thread * @locks rd_kafka_wrlock() MUST be held. */ -void rd_kafka_idemp_pid_fsm (rd_kafka_t *rk) { +void rd_kafka_idemp_pid_fsm(rd_kafka_t *rk) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_broker_t *rkb; @@ -213,9 +210,8 @@ void rd_kafka_idemp_pid_fsm (rd_kafka_t *rk) { if (unlikely(rd_kafka_fatal_error_code(rk))) return; - redo: - switch (rk->rk_eos.idemp_state) - { +redo: + switch (rk->rk_eos.idemp_state) { case RD_KAFKA_IDEMP_STATE_INIT: case RD_KAFKA_IDEMP_STATE_TERM: case RD_KAFKA_IDEMP_STATE_FATAL_ERROR: @@ -230,7 +226,7 @@ void rd_kafka_idemp_pid_fsm (rd_kafka_t *rk) { if (!rd_kafka_is_transactional(rk) || rk->rk_eos.txn_curr_coord) { rd_kafka_idemp_set_state( - rk, RD_KAFKA_IDEMP_STATE_WAIT_TRANSPORT); + rk, RD_KAFKA_IDEMP_STATE_WAIT_TRANSPORT); goto redo; } @@ -254,12 +250,11 @@ void rd_kafka_idemp_pid_fsm (rd_kafka_t *rk) { rd_kafka_broker_keep(rkb); } else { - rkb = rd_kafka_idemp_broker_any(rk, &err, - errstr, sizeof(errstr)); + rkb = rd_kafka_idemp_broker_any(rk, &err, errstr, + sizeof(errstr)); - if (!rkb && - rd_kafka_idemp_check_error(rk, err, errstr, - rd_false)) + if (!rkb && rd_kafka_idemp_check_error(rk, err, errstr, + rd_false)) return; /* Fatal error */ } @@ -267,10 +262,9 @@ void rd_kafka_idemp_pid_fsm (rd_kafka_t *rk) { /* The coordinator broker monitor will re-trigger * the fsm sooner if txn_coord has a state change, * else rely on the timer to retry. */ - rd_kafka_idemp_pid_timer_restart(rk, rd_false, - rkb ? - "No broker available" : - "Coordinator not up"); + rd_kafka_idemp_pid_timer_restart( + rk, rd_false, + rkb ? "No broker available" : "Coordinator not up"); if (rkb) rd_kafka_broker_destroy(rkb); @@ -299,14 +293,13 @@ void rd_kafka_idemp_pid_fsm (rd_kafka_t *rk) { } err = rd_kafka_InitProducerIdRequest( - rkb, - rk->rk_conf.eos.transactional_id, - rk->rk_conf.eos.transaction_timeout_ms, - rd_kafka_pid_valid(rk->rk_eos.pid) ? - &rk->rk_eos.pid : NULL, - errstr+err_of, sizeof(errstr)-err_of, - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_handle_InitProducerId, NULL); + rkb, rk->rk_conf.eos.transactional_id, + rk->rk_conf.eos.transaction_timeout_ms, + rd_kafka_pid_valid(rk->rk_eos.pid) ? &rk->rk_eos.pid + : NULL, + errstr + err_of, sizeof(errstr) - err_of, + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_handle_InitProducerId, NULL); if (err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE && rd_kafka_pid_valid(rk->rk_eos.pid)) @@ -315,13 +308,9 @@ void rd_kafka_idemp_pid_fsm (rd_kafka_t *rk) { rd_rkb_dbg(rkb, EOS, "GETPID", "Acquiring ProducerId"); err = rd_kafka_InitProducerIdRequest( - rkb, - NULL, - -1, - NULL, - errstr, sizeof(errstr), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_handle_InitProducerId, NULL); + rkb, NULL, -1, NULL, errstr, sizeof(errstr), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_handle_InitProducerId, NULL); } rd_kafka_broker_destroy(rkb); @@ -329,7 +318,8 @@ void rd_kafka_idemp_pid_fsm (rd_kafka_t *rk) { if (err) { rd_rkb_dbg(rkb, EOS, "GETPID", "Can't acquire ProducerId from " - "this broker: %s", errstr); + "this broker: %s", + errstr); if (rd_kafka_idemp_check_error(rk, err, errstr, is_fatal)) @@ -372,7 +362,7 @@ void rd_kafka_idemp_pid_fsm (rd_kafka_t *rk) { * @locality rdkafka main thread * @locks none */ -static void rd_kafka_idemp_pid_timer_cb (rd_kafka_timers_t *rkts, void *arg) { +static void rd_kafka_idemp_pid_timer_cb(rd_kafka_timers_t *rkts, void *arg) { rd_kafka_t *rk = arg; rd_kafka_wrlock(rk); @@ -389,14 +379,14 @@ static void rd_kafka_idemp_pid_timer_cb (rd_kafka_timers_t *rkts, void *arg) { * @locality any * @locks none */ -static void rd_kafka_idemp_pid_timer_restart (rd_kafka_t *rk, - rd_bool_t immediate, - const char *reason) { +static void rd_kafka_idemp_pid_timer_restart(rd_kafka_t *rk, + rd_bool_t immediate, + const char *reason) { rd_kafka_dbg(rk, EOS, "TXN", "Starting PID FSM timer%s: %s", immediate ? " (fire immediately)" : "", reason); - rd_kafka_timer_start_oneshot(&rk->rk_timers, - &rk->rk_eos.pid_tmr, rd_true, - 1000 * (immediate ? 1 : 500/*500ms*/), + rd_kafka_timer_start_oneshot(&rk->rk_timers, &rk->rk_eos.pid_tmr, + rd_true, + 1000 * (immediate ? 1 : 500 /*500ms*/), rd_kafka_idemp_pid_timer_cb, rk); } @@ -407,13 +397,13 @@ static void rd_kafka_idemp_pid_timer_restart (rd_kafka_t *rk, * @locality rdkafka main thread * @locks none */ -void rd_kafka_idemp_request_pid_failed (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err) { +void rd_kafka_idemp_request_pid_failed(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err) { rd_kafka_t *rk = rkb->rkb_rk; char errstr[512]; - rd_rkb_dbg(rkb, EOS, "GETPID", - "Failed to acquire PID: %s", rd_kafka_err2str(err)); + rd_rkb_dbg(rkb, EOS, "GETPID", "Failed to acquire PID: %s", + rd_kafka_err2str(err)); if (err == RD_KAFKA_RESP_ERR__DESTROY) return; /* Ignore */ @@ -422,8 +412,8 @@ void rd_kafka_idemp_request_pid_failed (rd_kafka_broker_t *rkb, rd_snprintf(errstr, sizeof(errstr), "Failed to acquire %s PID from broker %s: %s", - rd_kafka_is_transactional(rk) ? - "transactional" : "idempotence", + rd_kafka_is_transactional(rk) ? "transactional" + : "idempotence", rd_kafka_broker_name(rkb), rd_kafka_err2str(err)); rd_kafka_wrlock(rk); @@ -459,8 +449,8 @@ void rd_kafka_idemp_request_pid_failed (rd_kafka_broker_t *rkb, * @locality rdkafka main thread * @locks none */ -void rd_kafka_idemp_pid_update (rd_kafka_broker_t *rkb, - const rd_kafka_pid_t pid) { +void rd_kafka_idemp_pid_update(rd_kafka_broker_t *rkb, + const rd_kafka_pid_t pid) { rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_wrlock(rk); @@ -477,7 +467,7 @@ void rd_kafka_idemp_pid_update (rd_kafka_broker_t *rkb, if (!rd_kafka_pid_valid(pid)) { rd_kafka_wrunlock(rk); rd_rkb_log(rkb, LOG_WARNING, "GETPID", - "Acquired invalid PID{%"PRId64",%hd}: ignoring", + "Acquired invalid PID{%" PRId64 ",%hd}: ignoring", pid.id, pid.epoch); rd_kafka_idemp_request_pid_failed(rkb, RD_KAFKA_RESP_ERR__BAD_MSG); @@ -485,13 +475,12 @@ void rd_kafka_idemp_pid_update (rd_kafka_broker_t *rkb, } if (rd_kafka_pid_valid(rk->rk_eos.pid)) - rd_kafka_dbg(rk, EOS, "GETPID", - "Acquired %s (previous %s)", + rd_kafka_dbg(rk, EOS, "GETPID", "Acquired %s (previous %s)", rd_kafka_pid2str(pid), rd_kafka_pid2str(rk->rk_eos.pid)); else - rd_kafka_dbg(rk, EOS, "GETPID", - "Acquired %s", rd_kafka_pid2str(pid)); + rd_kafka_dbg(rk, EOS, "GETPID", "Acquired %s", + rd_kafka_pid2str(pid)); rk->rk_eos.pid = pid; rk->rk_eos.epoch_cnt++; @@ -514,8 +503,8 @@ void rd_kafka_idemp_pid_update (rd_kafka_broker_t *rkb, * @locality any * @locks none */ -static void rd_kafka_idemp_drain_done (rd_kafka_t *rk) { - rd_bool_t restart_tmr = rd_false; +static void rd_kafka_idemp_drain_done(rd_kafka_t *rk) { + rd_bool_t restart_tmr = rd_false; rd_bool_t wakeup_brokers = rd_false; rd_kafka_wrlock(rk); @@ -560,7 +549,6 @@ static void rd_kafka_idemp_drain_done (rd_kafka_t *rk) { * that were waiting for a Producer ID). */ if (wakeup_brokers) rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT); - } /** @@ -570,7 +558,7 @@ static void rd_kafka_idemp_drain_done (rd_kafka_t *rk) { * @locality any * @locks none */ -static RD_INLINE void rd_kafka_idemp_check_drain_done (rd_kafka_t *rk) { +static RD_INLINE void rd_kafka_idemp_check_drain_done(rd_kafka_t *rk) { if (rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt) == 0) rd_kafka_idemp_drain_done(rk); } @@ -585,14 +573,13 @@ static RD_INLINE void rd_kafka_idemp_check_drain_done (rd_kafka_t *rk) { * @locality any * @locks none */ -void rd_kafka_idemp_drain_reset (rd_kafka_t *rk, const char *reason) { +void rd_kafka_idemp_drain_reset(rd_kafka_t *rk, const char *reason) { rd_kafka_wrlock(rk); rd_kafka_dbg(rk, EOS, "DRAIN", "Beginning partition drain for %s reset " "for %d partition(s) with in-flight requests: %s", rd_kafka_pid2str(rk->rk_eos.pid), - rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt), - reason); + rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt), reason); rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_DRAIN_RESET); rd_kafka_wrunlock(rk); @@ -613,8 +600,10 @@ void rd_kafka_idemp_drain_reset (rd_kafka_t *rk, const char *reason) { * @locality any * @locks none */ -void rd_kafka_idemp_drain_epoch_bump (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *fmt, ...) { +void rd_kafka_idemp_drain_epoch_bump(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { va_list ap; char buf[256]; @@ -646,13 +635,12 @@ void rd_kafka_idemp_drain_epoch_bump (rd_kafka_t *rk, rd_kafka_resp_err_t err, * @locks toppar_lock MUST be held * @locality broker thread (leader or not) */ -void rd_kafka_idemp_drain_toppar (rd_kafka_toppar_t *rktp, - const char *reason) { +void rd_kafka_idemp_drain_toppar(rd_kafka_toppar_t *rktp, const char *reason) { if (rktp->rktp_eos.wait_drain) return; - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, EOS|RD_KAFKA_DBG_TOPIC, "DRAIN", - "%.*s [%"PRId32"] beginning partition drain: %s", + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, EOS | RD_KAFKA_DBG_TOPIC, "DRAIN", + "%.*s [%" PRId32 "] beginning partition drain: %s", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, reason); rktp->rktp_eos.wait_drain = rd_true; @@ -665,8 +653,8 @@ void rd_kafka_idemp_drain_toppar (rd_kafka_toppar_t *rktp, * @locality any * @locks none */ -void rd_kafka_idemp_inflight_toppar_sub (rd_kafka_t *rk, - rd_kafka_toppar_t *rktp) { +void rd_kafka_idemp_inflight_toppar_sub(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp) { int r = rd_atomic32_sub(&rk->rk_eos.inflight_toppar_cnt, 1); if (r == 0) { @@ -686,8 +674,8 @@ void rd_kafka_idemp_inflight_toppar_sub (rd_kafka_t *rk, * @locality toppar handler thread * @locks none */ -void rd_kafka_idemp_inflight_toppar_add (rd_kafka_t *rk, - rd_kafka_toppar_t *rktp) { +void rd_kafka_idemp_inflight_toppar_add(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp) { rd_atomic32_add(&rk->rk_eos.inflight_toppar_cnt, 1); } @@ -699,7 +687,7 @@ void rd_kafka_idemp_inflight_toppar_add (rd_kafka_t *rk, * @locality rdkafka main thread * @locks none */ -void rd_kafka_idemp_start (rd_kafka_t *rk, rd_bool_t immediate) { +void rd_kafka_idemp_start(rd_kafka_t *rk, rd_bool_t immediate) { if (rd_kafka_terminating(rk)) return; @@ -721,7 +709,7 @@ void rd_kafka_idemp_start (rd_kafka_t *rk, rd_bool_t immediate) { * @locality rdkafka main thread * @locks none / not needed from rd_kafka_new() */ -void rd_kafka_idemp_init (rd_kafka_t *rk) { +void rd_kafka_idemp_init(rd_kafka_t *rk) { rd_assert(thrd_is_current(rk->rk_thread)); rd_atomic32_init(&rk->rk_eos.inflight_toppar_cnt, 0); @@ -737,7 +725,7 @@ void rd_kafka_idemp_init (rd_kafka_t *rk) { * so just set the state to indicate that we want to * acquire a PID as soon as possible and start * the timer. */ - rd_kafka_idemp_start(rk, rd_false/*non-immediate*/); + rd_kafka_idemp_start(rk, rd_false /*non-immediate*/); } @@ -747,7 +735,7 @@ void rd_kafka_idemp_init (rd_kafka_t *rk) { * @locality rdkafka main thread * @locks rd_kafka_wrlock() MUST be held */ -void rd_kafka_idemp_term (rd_kafka_t *rk) { +void rd_kafka_idemp_term(rd_kafka_t *rk) { rd_assert(thrd_is_current(rk->rk_thread)); rd_kafka_wrlock(rk); diff --git a/src/rdkafka_idempotence.h b/src/rdkafka_idempotence.h index 8be8ae75dd..814e567814 100644 --- a/src/rdkafka_idempotence.h +++ b/src/rdkafka_idempotence.h @@ -35,7 +35,7 @@ * @define The broker maintains a window of the 5 last Produce requests * for a partition to be able to de-deduplicate resends. */ -#define RD_KAFKA_IDEMP_MAX_INFLIGHT 5 +#define RD_KAFKA_IDEMP_MAX_INFLIGHT 5 #define RD_KAFKA_IDEMP_MAX_INFLIGHT_STR "5" /* For printouts */ /** @@ -49,7 +49,7 @@ * @locks none */ static RD_UNUSED RD_INLINE rd_kafka_pid_t -rd_kafka_idemp_get_pid0 (rd_kafka_t *rk, rd_bool_t do_lock) { +rd_kafka_idemp_get_pid0(rd_kafka_t *rk, rd_bool_t do_lock) { rd_kafka_pid_t pid; if (do_lock) @@ -64,34 +64,34 @@ rd_kafka_idemp_get_pid0 (rd_kafka_t *rk, rd_bool_t do_lock) { return pid; } -#define rd_kafka_idemp_get_pid(rk) rd_kafka_idemp_get_pid0(rk,rd_true/*lock*/) - -void rd_kafka_idemp_set_state (rd_kafka_t *rk, - rd_kafka_idemp_state_t new_state); -void rd_kafka_idemp_request_pid_failed (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err); -void rd_kafka_idemp_pid_update (rd_kafka_broker_t *rkb, - const rd_kafka_pid_t pid); -void rd_kafka_idemp_pid_fsm (rd_kafka_t *rk); -void rd_kafka_idemp_drain_reset (rd_kafka_t *rk, const char *reason); -void rd_kafka_idemp_drain_epoch_bump (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *fmt, ...) - RD_FORMAT(printf, 3, 4); -void rd_kafka_idemp_drain_toppar (rd_kafka_toppar_t *rktp, const char *reason); -void rd_kafka_idemp_inflight_toppar_sub (rd_kafka_t *rk, - rd_kafka_toppar_t *rktp); -void rd_kafka_idemp_inflight_toppar_add (rd_kafka_t *rk, - rd_kafka_toppar_t *rktp); - -rd_kafka_broker_t * -rd_kafka_idemp_broker_any (rd_kafka_t *rk, - rd_kafka_resp_err_t *errp, - char *errstr, size_t errstr_size); - -rd_bool_t rd_kafka_idemp_check_error (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - const char *errstr, - rd_bool_t is_fatal); +#define rd_kafka_idemp_get_pid(rk) rd_kafka_idemp_get_pid0(rk, rd_true /*lock*/) + +void rd_kafka_idemp_set_state(rd_kafka_t *rk, rd_kafka_idemp_state_t new_state); +void rd_kafka_idemp_request_pid_failed(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err); +void rd_kafka_idemp_pid_update(rd_kafka_broker_t *rkb, + const rd_kafka_pid_t pid); +void rd_kafka_idemp_pid_fsm(rd_kafka_t *rk); +void rd_kafka_idemp_drain_reset(rd_kafka_t *rk, const char *reason); +void rd_kafka_idemp_drain_epoch_bump(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *fmt, + ...) RD_FORMAT(printf, 3, 4); +void rd_kafka_idemp_drain_toppar(rd_kafka_toppar_t *rktp, const char *reason); +void rd_kafka_idemp_inflight_toppar_sub(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp); +void rd_kafka_idemp_inflight_toppar_add(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp); + +rd_kafka_broker_t *rd_kafka_idemp_broker_any(rd_kafka_t *rk, + rd_kafka_resp_err_t *errp, + char *errstr, + size_t errstr_size); + +rd_bool_t rd_kafka_idemp_check_error(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *errstr, + rd_bool_t is_fatal); /** @@ -114,17 +114,18 @@ rd_bool_t rd_kafka_idemp_check_error (rd_kafka_t *rk, * @locality any thread * @locks none */ -#define rd_kafka_idemp_set_fatal_error(RK,ERR,...) do { \ - if (rd_kafka_is_transactional(RK)) \ - rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, ERR, \ - __VA_ARGS__); \ - else \ - rd_kafka_set_fatal_error(RK, ERR, __VA_ARGS__); \ +#define rd_kafka_idemp_set_fatal_error(RK, ERR, ...) \ + do { \ + if (rd_kafka_is_transactional(RK)) \ + rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, ERR, \ + __VA_ARGS__); \ + else \ + rd_kafka_set_fatal_error(RK, ERR, __VA_ARGS__); \ } while (0) -void rd_kafka_idemp_start (rd_kafka_t *rk, rd_bool_t immediate); -void rd_kafka_idemp_init (rd_kafka_t *rk); -void rd_kafka_idemp_term (rd_kafka_t *rk); +void rd_kafka_idemp_start(rd_kafka_t *rk, rd_bool_t immediate); +void rd_kafka_idemp_init(rd_kafka_t *rk); +void rd_kafka_idemp_term(rd_kafka_t *rk); #endif /* _RD_KAFKA_IDEMPOTENCE_H_ */ diff --git a/src/rdkafka_int.h b/src/rdkafka_int.h index 64ba5ea63a..f46e066ad6 100644 --- a/src/rdkafka_int.h +++ b/src/rdkafka_int.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -30,7 +30,7 @@ #define _RDKAFKA_INT_H_ #ifndef _WIN32 -#define _GNU_SOURCE /* for strndup() */ +#define _GNU_SOURCE /* for strndup() */ #endif #ifdef _MSC_VER @@ -57,18 +57,19 @@ typedef int mode_t; - -#define rd_kafka_assert(rk, cond) do { \ - if (unlikely(!(cond))) \ - rd_kafka_crash(__FILE__,__LINE__, __FUNCTION__, \ - (rk), "assert: " # cond); \ +#define rd_kafka_assert(rk, cond) \ + do { \ + if (unlikely(!(cond))) \ + rd_kafka_crash(__FILE__, __LINE__, __FUNCTION__, (rk), \ + "assert: " #cond); \ } while (0) -void -RD_NORETURN -rd_kafka_crash (const char *file, int line, const char *function, - rd_kafka_t *rk, const char *reason); +void RD_NORETURN rd_kafka_crash(const char *file, + int line, + const char *function, + rd_kafka_t *rk, + const char *reason); /* Forward declarations */ @@ -101,13 +102,12 @@ typedef struct rd_kafka_lwtopic_s rd_kafka_lwtopic_t; /** * Protocol level sanity */ -#define RD_KAFKAP_BROKERS_MAX 10000 -#define RD_KAFKAP_TOPICS_MAX 1000000 -#define RD_KAFKAP_PARTITIONS_MAX 100000 - +#define RD_KAFKAP_BROKERS_MAX 10000 +#define RD_KAFKAP_TOPICS_MAX 1000000 +#define RD_KAFKAP_PARTITIONS_MAX 100000 -#define RD_KAFKA_OFFSET_IS_LOGICAL(OFF) ((OFF) < 0) +#define RD_KAFKA_OFFSET_IS_LOGICAL(OFF) ((OFF) < 0) @@ -115,46 +115,37 @@ typedef struct rd_kafka_lwtopic_s rd_kafka_lwtopic_t; * @enum Idempotent Producer state */ typedef enum { - RD_KAFKA_IDEMP_STATE_INIT, /**< Initial state */ - RD_KAFKA_IDEMP_STATE_TERM, /**< Instance is terminating */ + RD_KAFKA_IDEMP_STATE_INIT, /**< Initial state */ + RD_KAFKA_IDEMP_STATE_TERM, /**< Instance is terminating */ RD_KAFKA_IDEMP_STATE_FATAL_ERROR, /**< A fatal error has been raised */ - RD_KAFKA_IDEMP_STATE_REQ_PID, /**< Request new PID */ + RD_KAFKA_IDEMP_STATE_REQ_PID, /**< Request new PID */ RD_KAFKA_IDEMP_STATE_WAIT_TRANSPORT, /**< Waiting for coordinator to * become available. */ - RD_KAFKA_IDEMP_STATE_WAIT_PID, /**< PID requested, waiting for reply */ - RD_KAFKA_IDEMP_STATE_ASSIGNED, /**< New PID assigned */ + RD_KAFKA_IDEMP_STATE_WAIT_PID, /**< PID requested, waiting for reply */ + RD_KAFKA_IDEMP_STATE_ASSIGNED, /**< New PID assigned */ RD_KAFKA_IDEMP_STATE_DRAIN_RESET, /**< Wait for outstanding * ProduceRequests to finish * before resetting and * re-requesting a new PID. */ - RD_KAFKA_IDEMP_STATE_DRAIN_BUMP, /**< Wait for outstanding - * ProduceRequests to finish - * before bumping the current - * epoch. */ + RD_KAFKA_IDEMP_STATE_DRAIN_BUMP, /**< Wait for outstanding + * ProduceRequests to finish + * before bumping the current + * epoch. */ } rd_kafka_idemp_state_t; /** * @returns the idemp_state_t string representation */ static RD_UNUSED const char * -rd_kafka_idemp_state2str (rd_kafka_idemp_state_t state) { +rd_kafka_idemp_state2str(rd_kafka_idemp_state_t state) { static const char *names[] = { - "Init", - "Terminate", - "FatalError", - "RequestPID", - "WaitTransport", - "WaitPID", - "Assigned", - "DrainReset", - "DrainBump" - }; + "Init", "Terminate", "FatalError", "RequestPID", "WaitTransport", + "WaitPID", "Assigned", "DrainReset", "DrainBump"}; return names[state]; } - /** * @enum Transactional Producer state */ @@ -194,127 +185,126 @@ typedef enum { * @returns the txn_state_t string representation */ static RD_UNUSED const char * -rd_kafka_txn_state2str (rd_kafka_txn_state_t state) { - static const char *names[] = { - "Init", - "WaitPID", - "ReadyNotAcked", - "Ready", - "InTransaction", - "BeginCommit", - "CommittingTransaction", - "CommitNotAcked", - "AbortingTransaction", - "AbortedNotAcked", - "AbortableError", - "FatalError" - }; +rd_kafka_txn_state2str(rd_kafka_txn_state_t state) { + static const char *names[] = {"Init", + "WaitPID", + "ReadyNotAcked", + "Ready", + "InTransaction", + "BeginCommit", + "CommittingTransaction", + "CommitNotAcked", + "AbortingTransaction", + "AbortedNotAcked", + "AbortableError", + "FatalError"}; return names[state]; } - - /** * Kafka handle, internal representation of the application's rd_kafka_t. */ struct rd_kafka_s { - rd_kafka_q_t *rk_rep; /* kafka -> application reply queue */ - rd_kafka_q_t *rk_ops; /* any -> rdkafka main thread ops */ + rd_kafka_q_t *rk_rep; /* kafka -> application reply queue */ + rd_kafka_q_t *rk_ops; /* any -> rdkafka main thread ops */ - TAILQ_HEAD(, rd_kafka_broker_s) rk_brokers; - rd_list_t rk_broker_by_id; /* Fast id lookups. */ - rd_atomic32_t rk_broker_cnt; + TAILQ_HEAD(, rd_kafka_broker_s) rk_brokers; + rd_list_t rk_broker_by_id; /* Fast id lookups. */ + rd_atomic32_t rk_broker_cnt; /**< Number of brokers in state >= UP */ - rd_atomic32_t rk_broker_up_cnt; + rd_atomic32_t rk_broker_up_cnt; /**< Number of logical brokers in state >= UP, this is a sub-set * of rk_broker_up_cnt. */ - rd_atomic32_t rk_logical_broker_up_cnt; + rd_atomic32_t rk_logical_broker_up_cnt; /**< Number of brokers that are down, only includes brokers * that have had at least one connection attempt. */ - rd_atomic32_t rk_broker_down_cnt; + rd_atomic32_t rk_broker_down_cnt; /**< Logical brokers currently without an address. * Used for calculating ERR__ALL_BROKERS_DOWN. */ - rd_atomic32_t rk_broker_addrless_cnt; + rd_atomic32_t rk_broker_addrless_cnt; - mtx_t rk_internal_rkb_lock; - rd_kafka_broker_t *rk_internal_rkb; + mtx_t rk_internal_rkb_lock; + rd_kafka_broker_t *rk_internal_rkb; - /* Broadcasting of broker state changes to wake up - * functions waiting for a state change. */ - cnd_t rk_broker_state_change_cnd; - mtx_t rk_broker_state_change_lock; - int rk_broker_state_change_version; + /* Broadcasting of broker state changes to wake up + * functions waiting for a state change. */ + cnd_t rk_broker_state_change_cnd; + mtx_t rk_broker_state_change_lock; + int rk_broker_state_change_version; /* List of (rd_kafka_enq_once_t*) objects waiting for broker * state changes. Protected by rk_broker_state_change_lock. */ rd_list_t rk_broker_state_change_waiters; /**< (rd_kafka_enq_once_t*) */ - TAILQ_HEAD(, rd_kafka_topic_s) rk_topics; - int rk_topic_cnt; + TAILQ_HEAD(, rd_kafka_topic_s) rk_topics; + int rk_topic_cnt; struct rd_kafka_cgrp_s *rk_cgrp; - rd_kafka_conf_t rk_conf; - rd_kafka_q_t *rk_logq; /* Log queue if `log.queue` set */ - char rk_name[128]; - rd_kafkap_str_t *rk_client_id; - rd_kafkap_str_t *rk_group_id; /* Consumer group id */ - - rd_atomic32_t rk_terminate; /**< Set to RD_KAFKA_DESTROY_F_.. - * flags instance - * is being destroyed. - * The value set is the - * destroy flags from - * rd_kafka_destroy*() and - * the two internal flags shown - * below. - * - * Order: - * 1. user_flags | .._F_DESTROY_CALLED - * is set in rd_kafka_destroy*(). - * 2. consumer_close() is called - * for consumers. - * 3. .._F_TERMINATE is set to - * signal all background threads - * to terminate. - */ - -#define RD_KAFKA_DESTROY_F_TERMINATE 0x1 /**< Internal flag to make sure - * rk_terminate is set to non-zero - * value even if user passed - * no destroy flags. */ -#define RD_KAFKA_DESTROY_F_DESTROY_CALLED 0x2 /**< Application has called - * ..destroy*() and we've - * begun the termination - * process. - * This flag is needed to avoid - * rk_terminate from being - * 0 when destroy_flags() - * is called with flags=0 - * and prior to _F_TERMINATE - * has been set. */ -#define RD_KAFKA_DESTROY_F_IMMEDIATE 0x4 /**< Immediate non-blocking - * destruction without waiting - * for all resources - * to be cleaned up. - * WARNING: Memory and resource - * leaks possible. - * This flag automatically sets - * .._NO_CONSUMER_CLOSE. */ - - - rwlock_t rk_lock; - rd_kafka_type_t rk_type; - struct timeval rk_tv_state_change; - - rd_atomic64_t rk_ts_last_poll; /**< Timestamp of last application - * consumer_poll() call - * (or equivalent). - * Used to enforce - * max.poll.interval.ms. - * Only relevant for consumer. */ + rd_kafka_conf_t rk_conf; + rd_kafka_q_t *rk_logq; /* Log queue if `log.queue` set */ + char rk_name[128]; + rd_kafkap_str_t *rk_client_id; + rd_kafkap_str_t *rk_group_id; /* Consumer group id */ + + rd_atomic32_t rk_terminate; /**< Set to RD_KAFKA_DESTROY_F_.. + * flags instance + * is being destroyed. + * The value set is the + * destroy flags from + * rd_kafka_destroy*() and + * the two internal flags shown + * below. + * + * Order: + * 1. user_flags | .._F_DESTROY_CALLED + * is set in rd_kafka_destroy*(). + * 2. consumer_close() is called + * for consumers. + * 3. .._F_TERMINATE is set to + * signal all background threads + * to terminate. + */ + +#define RD_KAFKA_DESTROY_F_TERMINATE \ + 0x1 /**< Internal flag to make sure \ + * rk_terminate is set to non-zero \ + * value even if user passed \ + * no destroy flags. */ +#define RD_KAFKA_DESTROY_F_DESTROY_CALLED \ + 0x2 /**< Application has called \ + * ..destroy*() and we've \ + * begun the termination \ + * process. \ + * This flag is needed to avoid \ + * rk_terminate from being \ + * 0 when destroy_flags() \ + * is called with flags=0 \ + * and prior to _F_TERMINATE \ + * has been set. */ +#define RD_KAFKA_DESTROY_F_IMMEDIATE \ + 0x4 /**< Immediate non-blocking \ + * destruction without waiting \ + * for all resources \ + * to be cleaned up. \ + * WARNING: Memory and resource \ + * leaks possible. \ + * This flag automatically sets \ + * .._NO_CONSUMER_CLOSE. */ + + + rwlock_t rk_lock; + rd_kafka_type_t rk_type; + struct timeval rk_tv_state_change; + + rd_atomic64_t rk_ts_last_poll; /**< Timestamp of last application + * consumer_poll() call + * (or equivalent). + * Used to enforce + * max.poll.interval.ms. + * Only relevant for consumer. */ /* First fatal error. */ struct { rd_atomic32_t err; /**< rd_kafka_resp_err_t */ @@ -323,32 +313,31 @@ struct rd_kafka_s { * the first one is stored. */ } rk_fatal; - rd_atomic32_t rk_last_throttle; /* Last throttle_time_ms value - * from broker. */ + rd_atomic32_t rk_last_throttle; /* Last throttle_time_ms value + * from broker. */ /* Locks: rd_kafka_*lock() */ - rd_ts_t rk_ts_metadata; /* Timestamp of most recent - * metadata. */ + rd_ts_t rk_ts_metadata; /* Timestamp of most recent + * metadata. */ - struct rd_kafka_metadata *rk_full_metadata; /* Last full metadata. */ - rd_ts_t rk_ts_full_metadata; /* Timesstamp of .. */ + struct rd_kafka_metadata *rk_full_metadata; /* Last full metadata. */ + rd_ts_t rk_ts_full_metadata; /* Timesstamp of .. */ struct rd_kafka_metadata_cache rk_metadata_cache; /* Metadata cache */ - char *rk_clusterid; /* ClusterId from metadata */ - int32_t rk_controllerid; /* ControllerId from metadata */ + char *rk_clusterid; /* ClusterId from metadata */ + int32_t rk_controllerid; /* ControllerId from metadata */ /**< Producer: Delivery report mode */ - enum { - RD_KAFKA_DR_MODE_NONE, /**< No delivery reports */ - RD_KAFKA_DR_MODE_CB, /**< Delivery reports through callback */ - RD_KAFKA_DR_MODE_EVENT, /**< Delivery reports through event API*/ + enum { RD_KAFKA_DR_MODE_NONE, /**< No delivery reports */ + RD_KAFKA_DR_MODE_CB, /**< Delivery reports through callback */ + RD_KAFKA_DR_MODE_EVENT, /**< Delivery reports through event API*/ } rk_drmode; /* Simple consumer count: * >0: Running in legacy / Simple Consumer mode, * 0: No consumers running * <0: Running in High level consumer mode */ - rd_atomic32_t rk_simple_cnt; + rd_atomic32_t rk_simple_cnt; /** * Exactly Once Semantics and Idempotent Producer @@ -361,13 +350,13 @@ struct rd_kafka_s { */ rd_kafka_idemp_state_t idemp_state; /**< Idempotent Producer * state */ - rd_ts_t ts_idemp_state;/**< Last state change */ - rd_kafka_pid_t pid; /**< Current Producer ID and Epoch */ - int epoch_cnt; /**< Number of times pid/epoch changed */ + rd_ts_t ts_idemp_state; /**< Last state change */ + rd_kafka_pid_t pid; /**< Current Producer ID and Epoch */ + int epoch_cnt; /**< Number of times pid/epoch changed */ rd_atomic32_t inflight_toppar_cnt; /**< Current number of * toppars with inflight * requests. */ - rd_kafka_timer_t pid_tmr; /**< PID FSM timer */ + rd_kafka_timer_t pid_tmr; /**< PID FSM timer */ /* * Transactions @@ -376,17 +365,17 @@ struct rd_kafka_s { * unless a specific lock is mentioned in the doc string. * */ - rd_atomic32_t txn_may_enq; /**< Transaction state allows - * application to enqueue - * (produce) messages. */ + rd_atomic32_t txn_may_enq; /**< Transaction state allows + * application to enqueue + * (produce) messages. */ rd_kafkap_str_t *transactional_id; /**< transactional.id */ - rd_kafka_txn_state_t txn_state; /**< Transactional state. - * @locks rk_lock */ - rd_ts_t ts_txn_state; /**< Last state change. - * @locks rk_lock */ - rd_kafka_broker_t *txn_coord; /**< Transaction coordinator, - * this is a logical broker.*/ + rd_kafka_txn_state_t txn_state; /**< Transactional state. + * @locks rk_lock */ + rd_ts_t ts_txn_state; /**< Last state change. + * @locks rk_lock */ + rd_kafka_broker_t *txn_coord; /**< Transaction coordinator, + * this is a logical broker.*/ rd_kafka_broker_t *txn_curr_coord; /**< Current actual coord * broker. * This is only used to @@ -413,29 +402,33 @@ struct rd_kafka_s { rd_kafka_timer_t tmr; /**< Timeout timer, the timeout * is specified by the app. */ - int flags; /**< Flags */ -#define RD_KAFKA_TXN_CURR_API_F_ABORT_ON_TIMEOUT 0x1 /**< Set state to abortable - * error on timeout, - * i.e., fail the txn, - * and set txn_requires_abort - * on the returned error. - */ -#define RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT 0x2 /**< Set retriable flag - * on the error - * on timeout. */ -#define RD_KAFKA_TXN_CURR_API_F_FOR_REUSE 0x4 /**< Do not reset the - * current API when it - * completes successfully - * Instead keep it alive - * and allow reuse with - * .._F_REUSE, blocking - * any non-F_REUSE - * curr API calls. */ -#define RD_KAFKA_TXN_CURR_API_F_REUSE 0x8 /**< Reuse/continue with - * current API state. - * This is used for - * multi-stage APIs, - * such as txn commit. */ + int flags; /**< Flags */ +#define RD_KAFKA_TXN_CURR_API_F_ABORT_ON_TIMEOUT \ + 0x1 /**< Set state to abortable \ + * error on timeout, \ + * i.e., fail the txn, \ + * and set txn_requires_abort \ + * on the returned error. \ + */ +#define RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT \ + 0x2 /**< Set retriable flag \ + * on the error \ + * on timeout. */ +#define RD_KAFKA_TXN_CURR_API_F_FOR_REUSE \ + 0x4 /**< Do not reset the \ + * current API when it \ + * completes successfully \ + * Instead keep it alive \ + * and allow reuse with \ + * .._F_REUSE, blocking \ + * any non-F_REUSE \ + * curr API calls. */ +#define RD_KAFKA_TXN_CURR_API_F_REUSE \ + 0x8 /**< Reuse/continue with \ + * current API state. \ + * This is used for \ + * multi-stage APIs, \ + * such as txn commit. */ } txn_curr_api; /**< Copy (and reference) of the original init_transactions(), @@ -454,25 +447,25 @@ struct rd_kafka_s { */ rd_kafka_q_t *txn_init_rkq; - int txn_req_cnt; /**< Number of transaction - * requests sent. - * This is incremented when a - * AddPartitionsToTxn or - * AddOffsetsToTxn request - * has been sent for the - * current transaction, - * to keep track of - * whether the broker is - * aware of the current - * transaction and thus - * requires an EndTxn request - * on abort or not. */ + int txn_req_cnt; /**< Number of transaction + * requests sent. + * This is incremented when a + * AddPartitionsToTxn or + * AddOffsetsToTxn request + * has been sent for the + * current transaction, + * to keep track of + * whether the broker is + * aware of the current + * transaction and thus + * requires an EndTxn request + * on abort or not. */ /**< Timer to trigger registration of pending partitions */ - rd_kafka_timer_t txn_register_parts_tmr; + rd_kafka_timer_t txn_register_parts_tmr; /**< Lock for txn_pending_rktps and txn_waitresp_rktps */ - mtx_t txn_pending_lock; + mtx_t txn_pending_lock; /**< Partitions pending being added to transaction. */ rd_kafka_toppar_tqhead_t txn_pending_rktps; @@ -493,19 +486,19 @@ struct rd_kafka_s { rd_kafka_resp_err_t txn_err; /**< Current transaction error string, if any. */ - char *txn_errstr; + char *txn_errstr; /**< Last InitProducerIdRequest error. */ rd_kafka_resp_err_t txn_init_err; /**< Waiting for transaction coordinator query response */ - rd_bool_t txn_wait_coord; + rd_bool_t txn_wait_coord; /**< Transaction coordinator query timer */ - rd_kafka_timer_t txn_coord_tmr; + rd_kafka_timer_t txn_coord_tmr; } rk_eos; - rd_atomic32_t rk_flushing; /**< Application is calling flush(). */ + rd_atomic32_t rk_flushing; /**< Application is calling flush(). */ /** * Consumer state @@ -529,49 +522,50 @@ struct rd_kafka_s { * @locks none * @locality rdkafka main thread */ - rd_kafka_coord_cache_t rk_coord_cache; /**< Coordinator cache */ + rd_kafka_coord_cache_t rk_coord_cache; /**< Coordinator cache */ - TAILQ_HEAD(, rd_kafka_coord_req_s) rk_coord_reqs; /**< Coordinator - * requests */ + TAILQ_HEAD(, rd_kafka_coord_req_s) + rk_coord_reqs; /**< Coordinator + * requests */ - struct { - mtx_t lock; /* Protects acces to this struct */ - cnd_t cnd; /* For waking up blocking injectors */ - unsigned int cnt; /* Current message count */ - size_t size; /* Current message size sum */ - unsigned int max_cnt; /* Max limit */ - size_t max_size; /* Max limit */ - } rk_curr_msgs; + struct { + mtx_t lock; /* Protects acces to this struct */ + cnd_t cnd; /* For waking up blocking injectors */ + unsigned int cnt; /* Current message count */ + size_t size; /* Current message size sum */ + unsigned int max_cnt; /* Max limit */ + size_t max_size; /* Max limit */ + } rk_curr_msgs; rd_kafka_timers_t rk_timers; - thrd_t rk_thread; + thrd_t rk_thread; - int rk_initialized; /**< Will be > 0 when the rd_kafka_t - * instance has been fully initialized. */ + int rk_initialized; /**< Will be > 0 when the rd_kafka_t + * instance has been fully initialized. */ - int rk_init_wait_cnt; /**< Number of background threads that - * need to finish initialization. */ - cnd_t rk_init_cnd; /**< Cond-var used to wait for main thread - * to finish its initialization before - * before rd_kafka_new() returns. */ - mtx_t rk_init_lock; /**< Lock for rk_init_wait and _cmd */ + int rk_init_wait_cnt; /**< Number of background threads that + * need to finish initialization. */ + cnd_t rk_init_cnd; /**< Cond-var used to wait for main thread + * to finish its initialization before + * before rd_kafka_new() returns. */ + mtx_t rk_init_lock; /**< Lock for rk_init_wait and _cmd */ - rd_ts_t rk_ts_created; /**< Timestamp (monotonic clock) of - * rd_kafka_t creation. */ + rd_ts_t rk_ts_created; /**< Timestamp (monotonic clock) of + * rd_kafka_t creation. */ /** * Background thread and queue, * enabled by setting `background_event_cb()`. */ struct { - rd_kafka_q_t *q; /**< Queue served by background thread. */ - thrd_t thread; /**< Background thread. */ - int calling; /**< Indicates whether the event callback - * is being called, reset back to 0 - * when the callback returns. - * This can be used for troubleshooting - * purposes. */ + rd_kafka_q_t *q; /**< Queue served by background thread. */ + thrd_t thread; /**< Background thread. */ + int calling; /**< Indicates whether the event callback + * is being called, reset back to 0 + * when the callback returns. + * This can be used for troubleshooting + * purposes. */ } rk_background; @@ -590,7 +584,7 @@ struct rd_kafka_s { */ rd_interval_t sparse_connect_random; /**< Lock for sparse_connect_random */ - mtx_t sparse_connect_lock; + mtx_t sparse_connect_lock; /**< Broker metadata refresh interval: * this is rate-limiting the number of topic-less @@ -627,10 +621,10 @@ struct rd_kafka_s { } rk_mock; }; -#define rd_kafka_wrlock(rk) rwlock_wrlock(&(rk)->rk_lock) -#define rd_kafka_rdlock(rk) rwlock_rdlock(&(rk)->rk_lock) -#define rd_kafka_rdunlock(rk) rwlock_rdunlock(&(rk)->rk_lock) -#define rd_kafka_wrunlock(rk) rwlock_wrunlock(&(rk)->rk_lock) +#define rd_kafka_wrlock(rk) rwlock_wrlock(&(rk)->rk_lock) +#define rd_kafka_rdlock(rk) rwlock_rdlock(&(rk)->rk_lock) +#define rd_kafka_rdunlock(rk) rwlock_rdunlock(&(rk)->rk_lock) +#define rd_kafka_wrunlock(rk) rwlock_wrunlock(&(rk)->rk_lock) /** @@ -648,37 +642,38 @@ struct rd_kafka_s { * and then reacquire with a read-lock. */ static RD_INLINE RD_UNUSED rd_kafka_resp_err_t -rd_kafka_curr_msgs_add (rd_kafka_t *rk, unsigned int cnt, size_t size, - int block, rwlock_t *rdlock) { - - if (rk->rk_type != RD_KAFKA_PRODUCER) - return RD_KAFKA_RESP_ERR_NO_ERROR; - - mtx_lock(&rk->rk_curr_msgs.lock); - while (unlikely(rk->rk_curr_msgs.cnt + cnt > - rk->rk_curr_msgs.max_cnt || - (unsigned long long)(rk->rk_curr_msgs.size + size) > - (unsigned long long)rk->rk_curr_msgs.max_size)) { - if (!block) { - mtx_unlock(&rk->rk_curr_msgs.lock); - return RD_KAFKA_RESP_ERR__QUEUE_FULL; - } +rd_kafka_curr_msgs_add(rd_kafka_t *rk, + unsigned int cnt, + size_t size, + int block, + rwlock_t *rdlock) { + + if (rk->rk_type != RD_KAFKA_PRODUCER) + return RD_KAFKA_RESP_ERR_NO_ERROR; + + mtx_lock(&rk->rk_curr_msgs.lock); + while (unlikely(rk->rk_curr_msgs.cnt + cnt > rk->rk_curr_msgs.max_cnt || + (unsigned long long)(rk->rk_curr_msgs.size + size) > + (unsigned long long)rk->rk_curr_msgs.max_size)) { + if (!block) { + mtx_unlock(&rk->rk_curr_msgs.lock); + return RD_KAFKA_RESP_ERR__QUEUE_FULL; + } if (rdlock) rwlock_rdunlock(rdlock); - cnd_wait(&rk->rk_curr_msgs.cnd, &rk->rk_curr_msgs.lock); + cnd_wait(&rk->rk_curr_msgs.cnd, &rk->rk_curr_msgs.lock); if (rdlock) rwlock_rdlock(rdlock); + } - } - - rk->rk_curr_msgs.cnt += cnt; - rk->rk_curr_msgs.size += size; - mtx_unlock(&rk->rk_curr_msgs.lock); + rk->rk_curr_msgs.cnt += cnt; + rk->rk_curr_msgs.size += size; + mtx_unlock(&rk->rk_curr_msgs.lock); - return RD_KAFKA_RESP_ERR_NO_ERROR; + return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -688,16 +683,15 @@ rd_kafka_curr_msgs_add (rd_kafka_t *rk, unsigned int cnt, size_t size, * for any waiting & blocking threads. */ static RD_INLINE RD_UNUSED void -rd_kafka_curr_msgs_sub (rd_kafka_t *rk, unsigned int cnt, size_t size) { +rd_kafka_curr_msgs_sub(rd_kafka_t *rk, unsigned int cnt, size_t size) { int broadcast = 0; - if (rk->rk_type != RD_KAFKA_PRODUCER) - return; + if (rk->rk_type != RD_KAFKA_PRODUCER) + return; - mtx_lock(&rk->rk_curr_msgs.lock); - rd_kafka_assert(NULL, - rk->rk_curr_msgs.cnt >= cnt && - rk->rk_curr_msgs.size >= size); + mtx_lock(&rk->rk_curr_msgs.lock); + rd_kafka_assert(NULL, rk->rk_curr_msgs.cnt >= cnt && + rk->rk_curr_msgs.size >= size); /* If the subtraction would pass one of the thresholds * broadcast a wake-up to any waiting listeners. */ @@ -708,40 +702,39 @@ rd_kafka_curr_msgs_sub (rd_kafka_t *rk, unsigned int cnt, size_t size) { rk->rk_curr_msgs.size - size < rk->rk_curr_msgs.max_size)) broadcast = 1; - rk->rk_curr_msgs.cnt -= cnt; - rk->rk_curr_msgs.size -= size; + rk->rk_curr_msgs.cnt -= cnt; + rk->rk_curr_msgs.size -= size; if (unlikely(broadcast)) cnd_broadcast(&rk->rk_curr_msgs.cnd); - mtx_unlock(&rk->rk_curr_msgs.lock); + mtx_unlock(&rk->rk_curr_msgs.lock); } static RD_INLINE RD_UNUSED void -rd_kafka_curr_msgs_get (rd_kafka_t *rk, unsigned int *cntp, size_t *sizep) { - if (rk->rk_type != RD_KAFKA_PRODUCER) { - *cntp = 0; - *sizep = 0; - return; - } - - mtx_lock(&rk->rk_curr_msgs.lock); - *cntp = rk->rk_curr_msgs.cnt; - *sizep = rk->rk_curr_msgs.size; - mtx_unlock(&rk->rk_curr_msgs.lock); +rd_kafka_curr_msgs_get(rd_kafka_t *rk, unsigned int *cntp, size_t *sizep) { + if (rk->rk_type != RD_KAFKA_PRODUCER) { + *cntp = 0; + *sizep = 0; + return; + } + + mtx_lock(&rk->rk_curr_msgs.lock); + *cntp = rk->rk_curr_msgs.cnt; + *sizep = rk->rk_curr_msgs.size; + mtx_unlock(&rk->rk_curr_msgs.lock); } -static RD_INLINE RD_UNUSED int -rd_kafka_curr_msgs_cnt (rd_kafka_t *rk) { - int cnt; - if (rk->rk_type != RD_KAFKA_PRODUCER) - return 0; +static RD_INLINE RD_UNUSED int rd_kafka_curr_msgs_cnt(rd_kafka_t *rk) { + int cnt; + if (rk->rk_type != RD_KAFKA_PRODUCER) + return 0; - mtx_lock(&rk->rk_curr_msgs.lock); - cnt = rk->rk_curr_msgs.cnt; - mtx_unlock(&rk->rk_curr_msgs.lock); + mtx_lock(&rk->rk_curr_msgs.lock); + cnt = rk->rk_curr_msgs.cnt; + mtx_unlock(&rk->rk_curr_msgs.lock); - return cnt; + return cnt; } /** @@ -751,8 +744,9 @@ rd_kafka_curr_msgs_cnt (rd_kafka_t *rk) { * The remaining messages are returned in \p *curr_msgsp */ static RD_INLINE RD_UNUSED rd_bool_t -rd_kafka_curr_msgs_wait_zero (rd_kafka_t *rk, int timeout_ms, - unsigned int *curr_msgsp) { +rd_kafka_curr_msgs_wait_zero(rd_kafka_t *rk, + int timeout_ms, + unsigned int *curr_msgsp) { unsigned int cnt; struct timespec tspec; @@ -771,9 +765,9 @@ rd_kafka_curr_msgs_wait_zero (rd_kafka_t *rk, int timeout_ms, return cnt == 0; } -void rd_kafka_destroy_final (rd_kafka_t *rk); +void rd_kafka_destroy_final(rd_kafka_t *rk); -void rd_kafka_global_init (void); +void rd_kafka_global_init(void); /** * @returns true if \p rk handle is terminating. @@ -784,25 +778,25 @@ void rd_kafka_global_init (void); * That code should instead just check that rk_terminate is non-zero * (the _F_DESTROY_CALLED flag will be set). */ -#define rd_kafka_terminating(rk) (rd_atomic32_get(&(rk)->rk_terminate) & \ - RD_KAFKA_DESTROY_F_TERMINATE) +#define rd_kafka_terminating(rk) \ + (rd_atomic32_get(&(rk)->rk_terminate) & RD_KAFKA_DESTROY_F_TERMINATE) /** * @returns the destroy flags set matching \p flags, which might be * a subset of the flags. */ -#define rd_kafka_destroy_flags_check(rk,flags) \ +#define rd_kafka_destroy_flags_check(rk, flags) \ (rd_atomic32_get(&(rk)->rk_terminate) & (flags)) /** * @returns true if no consumer callbacks, or standard consumer_close * behaviour, should be triggered. */ -#define rd_kafka_destroy_flags_no_consumer_close(rk) \ +#define rd_kafka_destroy_flags_no_consumer_close(rk) \ rd_kafka_destroy_flags_check(rk, RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE) -#define rd_kafka_is_simple_consumer(rk) \ +#define rd_kafka_is_simple_consumer(rk) \ (rd_atomic32_get(&(rk)->rk_simple_cnt) > 0) -int rd_kafka_simple_consumer_add (rd_kafka_t *rk); +int rd_kafka_simple_consumer_add(rd_kafka_t *rk); /** @@ -813,14 +807,15 @@ int rd_kafka_simple_consumer_add (rd_kafka_t *rk); /** * @returns true if the producer is transactional (producer only). */ -#define rd_kafka_is_transactional(rk) \ +#define rd_kafka_is_transactional(rk) \ ((rk)->rk_conf.eos.transactional_id != NULL) -#define RD_KAFKA_PURGE_F_ABORT_TXN 0x100 /**< Internal flag used when - * aborting transaction */ +#define RD_KAFKA_PURGE_F_ABORT_TXN \ + 0x100 /**< Internal flag used when \ + * aborting transaction */ #define RD_KAFKA_PURGE_F_MASK 0x107 -const char *rd_kafka_purge_flags2str (int flags); +const char *rd_kafka_purge_flags2str(int flags); #include "rdkafka_topic.h" @@ -828,98 +823,90 @@ const char *rd_kafka_purge_flags2str (int flags); - - - - - - - - - - - /** * Debug contexts */ -#define RD_KAFKA_DBG_GENERIC 0x1 -#define RD_KAFKA_DBG_BROKER 0x2 -#define RD_KAFKA_DBG_TOPIC 0x4 -#define RD_KAFKA_DBG_METADATA 0x8 -#define RD_KAFKA_DBG_FEATURE 0x10 -#define RD_KAFKA_DBG_QUEUE 0x20 -#define RD_KAFKA_DBG_MSG 0x40 -#define RD_KAFKA_DBG_PROTOCOL 0x80 -#define RD_KAFKA_DBG_CGRP 0x100 -#define RD_KAFKA_DBG_SECURITY 0x200 -#define RD_KAFKA_DBG_FETCH 0x400 -#define RD_KAFKA_DBG_INTERCEPTOR 0x800 -#define RD_KAFKA_DBG_PLUGIN 0x1000 -#define RD_KAFKA_DBG_CONSUMER 0x2000 -#define RD_KAFKA_DBG_ADMIN 0x4000 -#define RD_KAFKA_DBG_EOS 0x8000 -#define RD_KAFKA_DBG_MOCK 0x10000 -#define RD_KAFKA_DBG_ASSIGNOR 0x20000 -#define RD_KAFKA_DBG_CONF 0x40000 -#define RD_KAFKA_DBG_ALL 0xfffff -#define RD_KAFKA_DBG_NONE 0x0 +#define RD_KAFKA_DBG_GENERIC 0x1 +#define RD_KAFKA_DBG_BROKER 0x2 +#define RD_KAFKA_DBG_TOPIC 0x4 +#define RD_KAFKA_DBG_METADATA 0x8 +#define RD_KAFKA_DBG_FEATURE 0x10 +#define RD_KAFKA_DBG_QUEUE 0x20 +#define RD_KAFKA_DBG_MSG 0x40 +#define RD_KAFKA_DBG_PROTOCOL 0x80 +#define RD_KAFKA_DBG_CGRP 0x100 +#define RD_KAFKA_DBG_SECURITY 0x200 +#define RD_KAFKA_DBG_FETCH 0x400 +#define RD_KAFKA_DBG_INTERCEPTOR 0x800 +#define RD_KAFKA_DBG_PLUGIN 0x1000 +#define RD_KAFKA_DBG_CONSUMER 0x2000 +#define RD_KAFKA_DBG_ADMIN 0x4000 +#define RD_KAFKA_DBG_EOS 0x8000 +#define RD_KAFKA_DBG_MOCK 0x10000 +#define RD_KAFKA_DBG_ASSIGNOR 0x20000 +#define RD_KAFKA_DBG_CONF 0x40000 +#define RD_KAFKA_DBG_ALL 0xfffff +#define RD_KAFKA_DBG_NONE 0x0 void rd_kafka_log0(const rd_kafka_conf_t *conf, - const rd_kafka_t *rk, const char *extra, int level, + const rd_kafka_t *rk, + const char *extra, + int level, int ctx, - const char *fac, const char *fmt, ...) RD_FORMAT(printf, - 7, 8); - -#define rd_kafka_log(rk,level,fac,...) \ - rd_kafka_log0(&rk->rk_conf, rk, NULL, level, \ - RD_KAFKA_DBG_NONE, fac, __VA_ARGS__) - -#define rd_kafka_dbg(rk,ctx,fac,...) do { \ - if (unlikely((rk)->rk_conf.debug & (RD_KAFKA_DBG_ ## ctx))) \ - rd_kafka_log0(&rk->rk_conf,rk,NULL, \ - LOG_DEBUG,(RD_KAFKA_DBG_ ## ctx), \ - fac,__VA_ARGS__); \ + const char *fac, + const char *fmt, + ...) RD_FORMAT(printf, 7, 8); + +#define rd_kafka_log(rk, level, fac, ...) \ + rd_kafka_log0(&rk->rk_conf, rk, NULL, level, RD_KAFKA_DBG_NONE, fac, \ + __VA_ARGS__) + +#define rd_kafka_dbg(rk, ctx, fac, ...) \ + do { \ + if (unlikely((rk)->rk_conf.debug & (RD_KAFKA_DBG_##ctx))) \ + rd_kafka_log0(&rk->rk_conf, rk, NULL, LOG_DEBUG, \ + (RD_KAFKA_DBG_##ctx), fac, __VA_ARGS__); \ } while (0) /* dbg() not requiring an rk, just the conf object, for early logging */ -#define rd_kafka_dbg0(conf,ctx,fac,...) do { \ - if (unlikely((conf)->debug & (RD_KAFKA_DBG_ ## ctx))) \ - rd_kafka_log0(conf,NULL,NULL, \ - LOG_DEBUG,(RD_KAFKA_DBG_ ## ctx), \ - fac,__VA_ARGS__); \ +#define rd_kafka_dbg0(conf, ctx, fac, ...) \ + do { \ + if (unlikely((conf)->debug & (RD_KAFKA_DBG_##ctx))) \ + rd_kafka_log0(conf, NULL, NULL, LOG_DEBUG, \ + (RD_KAFKA_DBG_##ctx), fac, __VA_ARGS__); \ } while (0) /* NOTE: The local copy of _logname is needed due rkb_logname_lock lock-ordering * when logging another broker's name in the message. */ -#define rd_rkb_log0(rkb,level,ctx,fac,...) do { \ - char _logname[RD_KAFKA_NODENAME_SIZE]; \ - mtx_lock(&(rkb)->rkb_logname_lock); \ - rd_strlcpy(_logname, rkb->rkb_logname, sizeof(_logname)); \ - mtx_unlock(&(rkb)->rkb_logname_lock); \ - rd_kafka_log0(&(rkb)->rkb_rk->rk_conf, \ - (rkb)->rkb_rk, _logname, \ - level, ctx, fac, __VA_ARGS__); \ +#define rd_rkb_log0(rkb, level, ctx, fac, ...) \ + do { \ + char _logname[RD_KAFKA_NODENAME_SIZE]; \ + mtx_lock(&(rkb)->rkb_logname_lock); \ + rd_strlcpy(_logname, rkb->rkb_logname, sizeof(_logname)); \ + mtx_unlock(&(rkb)->rkb_logname_lock); \ + rd_kafka_log0(&(rkb)->rkb_rk->rk_conf, (rkb)->rkb_rk, \ + _logname, level, ctx, fac, __VA_ARGS__); \ } while (0) -#define rd_rkb_log(rkb,level,fac,...) \ - rd_rkb_log0(rkb,level,RD_KAFKA_DBG_NONE,fac, __VA_ARGS__) +#define rd_rkb_log(rkb, level, fac, ...) \ + rd_rkb_log0(rkb, level, RD_KAFKA_DBG_NONE, fac, __VA_ARGS__) -#define rd_rkb_dbg(rkb,ctx,fac,...) do { \ - if (unlikely((rkb)->rkb_rk->rk_conf.debug & \ - (RD_KAFKA_DBG_ ## ctx))) { \ - rd_rkb_log0(rkb, LOG_DEBUG,(RD_KAFKA_DBG_ ## ctx), \ - fac, __VA_ARGS__); \ - } \ +#define rd_rkb_dbg(rkb, ctx, fac, ...) \ + do { \ + if (unlikely((rkb)->rkb_rk->rk_conf.debug & \ + (RD_KAFKA_DBG_##ctx))) { \ + rd_rkb_log0(rkb, LOG_DEBUG, (RD_KAFKA_DBG_##ctx), fac, \ + __VA_ARGS__); \ + } \ } while (0) extern rd_kafka_resp_err_t RD_TLS rd_kafka_last_error_code; -static RD_UNUSED RD_INLINE -rd_kafka_resp_err_t rd_kafka_set_last_error (rd_kafka_resp_err_t err, - int errnox) { +static RD_UNUSED RD_INLINE rd_kafka_resp_err_t +rd_kafka_set_last_error(rd_kafka_resp_err_t err, int errnox) { if (errnox) { /* MSVC: * This is the correct way to set errno on Windows, @@ -930,19 +917,21 @@ rd_kafka_resp_err_t rd_kafka_set_last_error (rd_kafka_resp_err_t err, * when using librdkafka as a dynamically loaded DLL. */ rd_set_errno(errnox); } - rd_kafka_last_error_code = err; - return err; + rd_kafka_last_error_code = err; + return err; } -int rd_kafka_set_fatal_error0 (rd_kafka_t *rk, rd_dolock_t do_lock, - rd_kafka_resp_err_t err, - const char *fmt, ...) RD_FORMAT(printf, 4, 5); -#define rd_kafka_set_fatal_error(rk,err,fmt,...) \ +int rd_kafka_set_fatal_error0(rd_kafka_t *rk, + rd_dolock_t do_lock, + rd_kafka_resp_err_t err, + const char *fmt, + ...) RD_FORMAT(printf, 4, 5); +#define rd_kafka_set_fatal_error(rk, err, fmt, ...) \ rd_kafka_set_fatal_error0(rk, RD_DO_LOCK, err, fmt, __VA_ARGS__) static RD_INLINE RD_UNUSED rd_kafka_resp_err_t -rd_kafka_fatal_error_code (rd_kafka_t *rk) { +rd_kafka_fatal_error_code(rd_kafka_t *rk) { /* This is an optimization to avoid an atomic read which are costly * on some platforms: * Fatal errors are currently only raised by the idempotent producer @@ -958,17 +947,19 @@ rd_kafka_fatal_error_code (rd_kafka_t *rk) { extern rd_atomic32_t rd_kafka_thread_cnt_curr; extern char RD_TLS rd_kafka_thread_name[64]; -void rd_kafka_set_thread_name (const char *fmt, ...) RD_FORMAT(printf, 1, 2); -void rd_kafka_set_thread_sysname (const char *fmt, ...) RD_FORMAT(printf, 1, 2); +void rd_kafka_set_thread_name(const char *fmt, ...) RD_FORMAT(printf, 1, 2); +void rd_kafka_set_thread_sysname(const char *fmt, ...) RD_FORMAT(printf, 1, 2); -int rd_kafka_path_is_dir (const char *path); -rd_bool_t rd_kafka_dir_is_empty (const char *path); +int rd_kafka_path_is_dir(const char *path); +rd_bool_t rd_kafka_dir_is_empty(const char *path); -rd_kafka_op_res_t -rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque); +rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque); -rd_kafka_resp_err_t rd_kafka_subscribe_rkt (rd_kafka_topic_t *rkt); +rd_kafka_resp_err_t rd_kafka_subscribe_rkt(rd_kafka_topic_t *rkt); /** @@ -980,8 +971,7 @@ rd_kafka_resp_err_t rd_kafka_subscribe_rkt (rd_kafka_topic_t *rkt); * @locality any * @locks none */ -static RD_INLINE RD_UNUSED int -rd_kafka_max_poll_exceeded (rd_kafka_t *rk) { +static RD_INLINE RD_UNUSED int rd_kafka_max_poll_exceeded(rd_kafka_t *rk) { rd_ts_t last_poll; int exceeded; @@ -996,7 +986,7 @@ rd_kafka_max_poll_exceeded (rd_kafka_t *rk) { return 0; exceeded = (int)((rd_clock() - last_poll) / 1000ll) - - rk->rk_conf.max_poll_interval_ms; + rk->rk_conf.max_poll_interval_ms; if (unlikely(exceeded > 0)) return exceeded; @@ -1017,8 +1007,7 @@ rd_kafka_max_poll_exceeded (rd_kafka_t *rk) { * @locality any * @locks none */ -static RD_INLINE RD_UNUSED void -rd_kafka_app_poll_blocking (rd_kafka_t *rk) { +static RD_INLINE RD_UNUSED void rd_kafka_app_poll_blocking(rd_kafka_t *rk) { if (rk->rk_type == RD_KAFKA_CONSUMER) rd_atomic64_set(&rk->rk_ts_last_poll, INT64_MAX); } @@ -1031,23 +1020,22 @@ rd_kafka_app_poll_blocking (rd_kafka_t *rk) { * @locality any * @locks none */ -static RD_INLINE RD_UNUSED void -rd_kafka_app_polled (rd_kafka_t *rk) { +static RD_INLINE RD_UNUSED void rd_kafka_app_polled(rd_kafka_t *rk) { if (rk->rk_type == RD_KAFKA_CONSUMER) rd_atomic64_set(&rk->rk_ts_last_poll, rd_clock()); } -void rd_kafka_term_sig_handler (int sig); +void rd_kafka_term_sig_handler(int sig); /** * rdkafka_background.c */ -int rd_kafka_background_thread_main (void *arg); -rd_kafka_resp_err_t rd_kafka_background_thread_create (rd_kafka_t *rk, - char *errstr, - size_t errstr_size); +int rd_kafka_background_thread_main(void *arg); +rd_kafka_resp_err_t rd_kafka_background_thread_create(rd_kafka_t *rk, + char *errstr, + size_t errstr_size); #endif /* _RDKAFKA_INT_H_ */ diff --git a/src/rdkafka_interceptor.c b/src/rdkafka_interceptor.c index 0ea976aab1..6f86553923 100644 --- a/src/rdkafka_interceptor.c +++ b/src/rdkafka_interceptor.c @@ -38,17 +38,17 @@ typedef struct rd_kafka_interceptor_method_s { rd_kafka_interceptor_f_on_conf_set_t *on_conf_set; rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup; rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy; - rd_kafka_interceptor_f_on_new_t *on_new; + rd_kafka_interceptor_f_on_new_t *on_new; rd_kafka_interceptor_f_on_destroy_t *on_destroy; - rd_kafka_interceptor_f_on_send_t *on_send; + rd_kafka_interceptor_f_on_send_t *on_send; rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement; rd_kafka_interceptor_f_on_consume_t *on_consume; - rd_kafka_interceptor_f_on_commit_t *on_commit; + rd_kafka_interceptor_f_on_commit_t *on_commit; rd_kafka_interceptor_f_on_request_sent_t *on_request_sent; rd_kafka_interceptor_f_on_response_received_t - *on_response_received; + *on_response_received; rd_kafka_interceptor_f_on_thread_start_t *on_thread_start; - rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit; + rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit; void *generic; /* For easy assignment */ } u; @@ -59,8 +59,7 @@ typedef struct rd_kafka_interceptor_method_s { /** * @brief Destroy interceptor methodtion reference */ -static void -rd_kafka_interceptor_method_destroy (void *ptr) { +static void rd_kafka_interceptor_method_destroy(void *ptr) { rd_kafka_interceptor_method_t *method = ptr; rd_free(method->ic_name); rd_free(method); @@ -68,39 +67,33 @@ rd_kafka_interceptor_method_destroy (void *ptr) { - - /** * @brief Handle an interceptor on_... methodtion call failures. */ static RD_INLINE void -rd_kafka_interceptor_failed (rd_kafka_t *rk, - const rd_kafka_interceptor_method_t *method, - const char *method_name, rd_kafka_resp_err_t err, - const rd_kafka_message_t *rkmessage, - const char *errstr) { +rd_kafka_interceptor_failed(rd_kafka_t *rk, + const rd_kafka_interceptor_method_t *method, + const char *method_name, + rd_kafka_resp_err_t err, + const rd_kafka_message_t *rkmessage, + const char *errstr) { /* FIXME: Suppress log messages, eventually */ if (rkmessage) - rd_kafka_log(rk, LOG_WARNING, "ICFAIL", - "Interceptor %s failed %s for " - "message on %s [%"PRId32"] @ %"PRId64 - ": %s%s%s", - method->ic_name, method_name, - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset, - rd_kafka_err2str(err), - errstr ? ": " : "", - errstr ? errstr : ""); + rd_kafka_log( + rk, LOG_WARNING, "ICFAIL", + "Interceptor %s failed %s for " + "message on %s [%" PRId32 "] @ %" PRId64 ": %s%s%s", + method->ic_name, method_name, + rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, + rkmessage->offset, rd_kafka_err2str(err), + errstr ? ": " : "", errstr ? errstr : ""); else rd_kafka_log(rk, LOG_WARNING, "ICFAIL", "Interceptor %s failed %s: %s%s%s", method->ic_name, method_name, - rd_kafka_err2str(err), - errstr ? ": " : "", + rd_kafka_err2str(err), errstr ? ": " : "", errstr ? errstr : ""); - } @@ -110,14 +103,15 @@ rd_kafka_interceptor_failed (rd_kafka_t *rk, * Duplicates are rejected */ static rd_kafka_interceptor_method_t * -rd_kafka_interceptor_method_new (const char *ic_name, - void *func, void *ic_opaque) { +rd_kafka_interceptor_method_new(const char *ic_name, + void *func, + void *ic_opaque) { rd_kafka_interceptor_method_t *method; - method = rd_calloc(1, sizeof(*method)); - method->ic_name = rd_strdup(ic_name); - method->ic_opaque = ic_opaque; - method->u.generic = func; + method = rd_calloc(1, sizeof(*method)); + method->ic_name = rd_strdup(ic_name); + method->ic_opaque = ic_opaque; + method->u.generic = func; return method; } @@ -126,7 +120,7 @@ rd_kafka_interceptor_method_new (const char *ic_name, /** * @brief Method comparator to be used for finding, not sorting. */ -static int rd_kafka_interceptor_method_cmp (const void *_a, const void *_b) { +static int rd_kafka_interceptor_method_cmp(const void *_a, const void *_b) { const rd_kafka_interceptor_method_t *a = _a, *b = _b; if (a->u.generic != b->u.generic) @@ -138,14 +132,13 @@ static int rd_kafka_interceptor_method_cmp (const void *_a, const void *_b) { /** * @brief Add interceptor method reference */ -static rd_kafka_resp_err_t -rd_kafka_interceptor_method_add (rd_list_t *list, const char *ic_name, - void *func, void *ic_opaque) { +static rd_kafka_resp_err_t rd_kafka_interceptor_method_add(rd_list_t *list, + const char *ic_name, + void *func, + void *ic_opaque) { rd_kafka_interceptor_method_t *method; - const rd_kafka_interceptor_method_t skel = { - .ic_name = (char *)ic_name, - .u = { .generic = func } - }; + const rd_kafka_interceptor_method_t skel = {.ic_name = (char *)ic_name, + .u = {.generic = func}}; /* Reject same method from same interceptor. * This is needed to avoid duplicate interceptors when configuration @@ -167,7 +160,7 @@ rd_kafka_interceptor_method_add (rd_list_t *list, const char *ic_name, * @locality application thread calling rd_kafka_conf_destroy() or * rd_kafka_destroy() */ -void rd_kafka_interceptors_destroy (rd_kafka_conf_t *conf) { +void rd_kafka_interceptors_destroy(rd_kafka_conf_t *conf) { rd_list_destroy(&conf->interceptors.on_conf_set); rd_list_destroy(&conf->interceptors.on_conf_dup); rd_list_destroy(&conf->interceptors.on_conf_destroy); @@ -191,47 +184,46 @@ void rd_kafka_interceptors_destroy (rd_kafka_conf_t *conf) { * @brief Initialize interceptor sub-system for config object. * @locality application thread */ -static void -rd_kafka_interceptors_init (rd_kafka_conf_t *conf) { +static void rd_kafka_interceptors_init(rd_kafka_conf_t *conf) { rd_list_init(&conf->interceptors.on_conf_set, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_conf_dup, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; /* conf_destroy() allows duplicates entries. */ rd_list_init(&conf->interceptors.on_conf_destroy, 0, rd_kafka_interceptor_method_destroy); rd_list_init(&conf->interceptors.on_new, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_destroy, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_send, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_acknowledgement, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_consume, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_commit, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_request_sent, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_response_received, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_thread_start, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_thread_exit, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; /* Interceptor config */ rd_list_init(&conf->interceptors.config, 0, @@ -240,7 +232,6 @@ rd_kafka_interceptors_init (rd_kafka_conf_t *conf) { - /** * @name Configuration backend */ @@ -249,7 +240,7 @@ rd_kafka_interceptors_init (rd_kafka_conf_t *conf) { /** * @brief Constructor called when configuration object is created. */ -void rd_kafka_conf_interceptor_ctor (int scope, void *pconf) { +void rd_kafka_conf_interceptor_ctor(int scope, void *pconf) { rd_kafka_conf_t *conf = pconf; assert(scope == _RK_GLOBAL); rd_kafka_interceptors_init(conf); @@ -258,7 +249,7 @@ void rd_kafka_conf_interceptor_ctor (int scope, void *pconf) { /** * @brief Destructor called when configuration object is destroyed. */ -void rd_kafka_conf_interceptor_dtor (int scope, void *pconf) { +void rd_kafka_conf_interceptor_dtor(int scope, void *pconf) { rd_kafka_conf_t *conf = pconf; assert(scope == _RK_GLOBAL); rd_kafka_interceptors_destroy(conf); @@ -270,10 +261,14 @@ void rd_kafka_conf_interceptor_dtor (int scope, void *pconf) { * @remark Interceptors are NOT copied, but interceptor config is. * */ -void rd_kafka_conf_interceptor_copy (int scope, void *pdst, const void *psrc, - void *dstptr, const void *srcptr, - size_t filter_cnt, const char **filter) { - rd_kafka_conf_t *dconf = pdst; +void rd_kafka_conf_interceptor_copy(int scope, + void *pdst, + const void *psrc, + void *dstptr, + const void *srcptr, + size_t filter_cnt, + const char **filter) { + rd_kafka_conf_t *dconf = pdst; const rd_kafka_conf_t *sconf = psrc; int i; const rd_strtup_t *confval; @@ -289,10 +284,10 @@ void rd_kafka_conf_interceptor_copy (int scope, void *pdst, const void *psrc, size_t nlen = strlen(confval->name); /* Apply filter */ - for (fi = 0 ; fi < filter_cnt ; fi++) { + for (fi = 0; fi < filter_cnt; fi++) { size_t flen = strlen(filter[fi]); - if (nlen >= flen && !strncmp(filter[fi], confval->name, - flen)) + if (nlen >= flen && + !strncmp(filter[fi], confval->name, flen)) break; } @@ -300,32 +295,31 @@ void rd_kafka_conf_interceptor_copy (int scope, void *pdst, const void *psrc, continue; /* Filter matched: ignore property. */ /* Ignore errors for now */ - rd_kafka_conf_set(dconf, confval->name, confval->value, - NULL, 0); + rd_kafka_conf_set(dconf, confval->name, confval->value, NULL, + 0); } } - /** * @brief Call interceptor on_conf_set methods. * @locality application thread calling rd_kafka_conf_set() and * rd_kafka_conf_dup() */ -rd_kafka_conf_res_t -rd_kafka_interceptors_on_conf_set (rd_kafka_conf_t *conf, - const char *name, const char *val, - char *errstr, size_t errstr_size) { +rd_kafka_conf_res_t rd_kafka_interceptors_on_conf_set(rd_kafka_conf_t *conf, + const char *name, + const char *val, + char *errstr, + size_t errstr_size) { rd_kafka_interceptor_method_t *method; int i; RD_LIST_FOREACH(method, &conf->interceptors.on_conf_set, i) { rd_kafka_conf_res_t res; - res = method->u.on_conf_set(conf, - name, val, errstr, errstr_size, - method->ic_opaque); + res = method->u.on_conf_set(conf, name, val, errstr, + errstr_size, method->ic_opaque); if (res == RD_KAFKA_CONF_UNKNOWN) continue; @@ -345,17 +339,17 @@ rd_kafka_interceptors_on_conf_set (rd_kafka_conf_t *conf, * @brief Call interceptor on_conf_dup methods. * @locality application thread calling rd_kafka_conf_dup() */ -void -rd_kafka_interceptors_on_conf_dup (rd_kafka_conf_t *new_conf, - const rd_kafka_conf_t *old_conf, - size_t filter_cnt, const char **filter) { +void rd_kafka_interceptors_on_conf_dup(rd_kafka_conf_t *new_conf, + const rd_kafka_conf_t *old_conf, + size_t filter_cnt, + const char **filter) { rd_kafka_interceptor_method_t *method; int i; RD_LIST_FOREACH(method, &old_conf->interceptors.on_conf_dup, i) { /* FIXME: Ignore error for now */ - method->u.on_conf_dup(new_conf, old_conf, - filter_cnt, filter, method->ic_opaque); + method->u.on_conf_dup(new_conf, old_conf, filter_cnt, filter, + method->ic_opaque); } } @@ -365,8 +359,7 @@ rd_kafka_interceptors_on_conf_dup (rd_kafka_conf_t *new_conf, * @locality application thread calling rd_kafka_conf_destroy(), rd_kafka_new(), * rd_kafka_destroy() */ -void -rd_kafka_interceptors_on_conf_destroy (rd_kafka_conf_t *conf) { +void rd_kafka_interceptors_on_conf_destroy(rd_kafka_conf_t *conf) { rd_kafka_interceptor_method_t *method; int i; @@ -381,8 +374,7 @@ rd_kafka_interceptors_on_conf_destroy (rd_kafka_conf_t *conf) { * @brief Call interceptor on_new methods. * @locality application thread calling rd_kafka_new() */ -void -rd_kafka_interceptors_on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf) { +void rd_kafka_interceptors_on_new(rd_kafka_t *rk, const rd_kafka_conf_t *conf) { rd_kafka_interceptor_method_t *method; int i; char errstr[512]; @@ -390,8 +382,8 @@ rd_kafka_interceptors_on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf) { RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_new, i) { rd_kafka_resp_err_t err; - err = method->u.on_new(rk, conf, method->ic_opaque, - errstr, sizeof(errstr)); + err = method->u.on_new(rk, conf, method->ic_opaque, errstr, + sizeof(errstr)); if (unlikely(err)) rd_kafka_interceptor_failed(rk, method, "on_new", err, NULL, errstr); @@ -404,8 +396,7 @@ rd_kafka_interceptors_on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf) { * @brief Call interceptor on_destroy methods. * @locality application thread calling rd_kafka_new() or rd_kafka_destroy() */ -void -rd_kafka_interceptors_on_destroy (rd_kafka_t *rk) { +void rd_kafka_interceptors_on_destroy(rd_kafka_t *rk) { rd_kafka_interceptor_method_t *method; int i; @@ -425,8 +416,8 @@ rd_kafka_interceptors_on_destroy (rd_kafka_t *rk) { * @brief Call interceptor on_send methods. * @locality application thread calling produce() */ -void -rd_kafka_interceptors_on_send (rd_kafka_t *rk, rd_kafka_message_t *rkmessage) { +void rd_kafka_interceptors_on_send(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage) { rd_kafka_interceptor_method_t *method; int i; @@ -447,14 +438,13 @@ rd_kafka_interceptors_on_send (rd_kafka_t *rk, rd_kafka_message_t *rkmessage) { * @locality application thread calling poll(), or the broker thread if * if dr callback has been set. */ -void -rd_kafka_interceptors_on_acknowledgement (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage) { +void rd_kafka_interceptors_on_acknowledgement(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage) { rd_kafka_interceptor_method_t *method; int i; - RD_LIST_FOREACH(method, - &rk->rk_conf.interceptors.on_acknowledgement, i) { + RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_acknowledgement, + i) { rd_kafka_resp_err_t err; err = method->u.on_acknowledgement(rk, rkmessage, @@ -474,10 +464,10 @@ rd_kafka_interceptors_on_acknowledgement (rd_kafka_t *rk, * * @locality broker thread */ -void -rd_kafka_interceptors_on_acknowledgement_queue (rd_kafka_t *rk, - rd_kafka_msgq_t *rkmq, - rd_kafka_resp_err_t force_err) { +void rd_kafka_interceptors_on_acknowledgement_queue( + rd_kafka_t *rk, + rd_kafka_msgq_t *rkmq, + rd_kafka_resp_err_t force_err) { rd_kafka_msg_t *rkm; RD_KAFKA_MSGQ_FOREACH(rkm, rkmq) { @@ -494,21 +484,18 @@ rd_kafka_interceptors_on_acknowledgement_queue (rd_kafka_t *rk, * @locality application thread calling poll(), consume() or similar prior to * passing the message to the application. */ -void -rd_kafka_interceptors_on_consume (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage) { +void rd_kafka_interceptors_on_consume(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage) { rd_kafka_interceptor_method_t *method; int i; RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_consume, i) { rd_kafka_resp_err_t err; - err = method->u.on_consume(rk, rkmessage, - method->ic_opaque); + err = method->u.on_consume(rk, rkmessage, method->ic_opaque); if (unlikely(err)) - rd_kafka_interceptor_failed(rk, method, - "on_consume", err, - rkmessage, NULL); + rd_kafka_interceptor_failed(rk, method, "on_consume", + err, rkmessage, NULL); } } @@ -518,22 +505,21 @@ rd_kafka_interceptors_on_consume (rd_kafka_t *rk, * @locality application thread calling poll(), consume() or similar, * or rdkafka main thread if no commit_cb or handler registered. */ -void -rd_kafka_interceptors_on_commit (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_resp_err_t err) { +void rd_kafka_interceptors_on_commit( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err) { rd_kafka_interceptor_method_t *method; int i; RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_commit, i) { rd_kafka_resp_err_t ic_err; - ic_err = method->u.on_commit(rk, offsets, err, - method->ic_opaque); + ic_err = + method->u.on_commit(rk, offsets, err, method->ic_opaque); if (unlikely(ic_err)) - rd_kafka_interceptor_failed(rk, method, - "on_commit", ic_err, NULL, - NULL); + rd_kafka_interceptor_failed(rk, method, "on_commit", + ic_err, NULL, NULL); } } @@ -542,33 +528,26 @@ rd_kafka_interceptors_on_commit (rd_kafka_t *rk, * @brief Call interceptor on_request_sent methods * @locality internal broker thread */ -void rd_kafka_interceptors_on_request_sent (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size) { +void rd_kafka_interceptors_on_request_sent(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size) { rd_kafka_interceptor_method_t *method; int i; RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_request_sent, i) { rd_kafka_resp_err_t ic_err; - ic_err = method->u.on_request_sent(rk, - sockfd, - brokername, - brokerid, - ApiKey, - ApiVersion, - CorrId, - size, - method->ic_opaque); + ic_err = method->u.on_request_sent( + rk, sockfd, brokername, brokerid, ApiKey, ApiVersion, + CorrId, size, method->ic_opaque); if (unlikely(ic_err)) - rd_kafka_interceptor_failed(rk, method, - "on_request_sent", - ic_err, NULL, NULL); + rd_kafka_interceptor_failed( + rk, method, "on_request_sent", ic_err, NULL, NULL); } } @@ -577,16 +556,16 @@ void rd_kafka_interceptors_on_request_sent (rd_kafka_t *rk, * @brief Call interceptor on_response_received methods * @locality internal broker thread */ -void rd_kafka_interceptors_on_response_received (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - int64_t rtt, - rd_kafka_resp_err_t err) { +void rd_kafka_interceptors_on_response_received(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err) { rd_kafka_interceptor_method_t *method; int i; @@ -594,17 +573,9 @@ void rd_kafka_interceptors_on_response_received (rd_kafka_t *rk, i) { rd_kafka_resp_err_t ic_err; - ic_err = method->u.on_response_received(rk, - sockfd, - brokername, - brokerid, - ApiKey, - ApiVersion, - CorrId, - size, - rtt, - err, - method->ic_opaque); + ic_err = method->u.on_response_received( + rk, sockfd, brokername, brokerid, ApiKey, ApiVersion, + CorrId, size, rtt, err, method->ic_opaque); if (unlikely(ic_err)) rd_kafka_interceptor_failed(rk, method, "on_response_received", @@ -613,43 +584,36 @@ void rd_kafka_interceptors_on_response_received (rd_kafka_t *rk, } -void -rd_kafka_interceptors_on_thread_start (rd_kafka_t *rk, - rd_kafka_thread_type_t thread_type) { +void rd_kafka_interceptors_on_thread_start(rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type) { rd_kafka_interceptor_method_t *method; int i; RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_thread_start, i) { rd_kafka_resp_err_t ic_err; - ic_err = method->u.on_thread_start(rk, - thread_type, - rd_kafka_thread_name, - method->ic_opaque); + ic_err = method->u.on_thread_start( + rk, thread_type, rd_kafka_thread_name, method->ic_opaque); if (unlikely(ic_err)) - rd_kafka_interceptor_failed(rk, method, - "on_thread_start", - ic_err, NULL, NULL); + rd_kafka_interceptor_failed( + rk, method, "on_thread_start", ic_err, NULL, NULL); } } -void rd_kafka_interceptors_on_thread_exit (rd_kafka_t *rk, - rd_kafka_thread_type_t thread_type) { +void rd_kafka_interceptors_on_thread_exit(rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type) { rd_kafka_interceptor_method_t *method; int i; RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_thread_exit, i) { rd_kafka_resp_err_t ic_err; - ic_err = method->u.on_thread_exit(rk, - thread_type, - rd_kafka_thread_name, - method->ic_opaque); + ic_err = method->u.on_thread_exit( + rk, thread_type, rd_kafka_thread_name, method->ic_opaque); if (unlikely(ic_err)) - rd_kafka_interceptor_failed(rk, method, - "on_thread_exit", - ic_err, NULL, NULL); + rd_kafka_interceptor_failed( + rk, method, "on_thread_exit", ic_err, NULL, NULL); } } @@ -661,161 +625,149 @@ void rd_kafka_interceptors_on_thread_exit (rd_kafka_t *rk, */ -rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_set ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, + void *ic_opaque) { return rd_kafka_interceptor_method_add(&conf->interceptors.on_conf_set, ic_name, (void *)on_conf_set, ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_dup ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, + void *ic_opaque) { return rd_kafka_interceptor_method_add(&conf->interceptors.on_conf_dup, ic_name, (void *)on_conf_dup, ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_destroy ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, - void *ic_opaque) { - return rd_kafka_interceptor_method_add(&conf->interceptors.on_conf_destroy, - ic_name, (void *)on_conf_destroy, - ic_opaque); +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, + void *ic_opaque) { + return rd_kafka_interceptor_method_add( + &conf->interceptors.on_conf_destroy, ic_name, + (void *)on_conf_destroy, ic_opaque); } rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_new ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_new_t *on_new, - void *ic_opaque) { - return rd_kafka_interceptor_method_add(&conf->interceptors.on_new, - ic_name, (void *)on_new, - ic_opaque); +rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_new_t *on_new, + void *ic_opaque) { + return rd_kafka_interceptor_method_add( + &conf->interceptors.on_new, ic_name, (void *)on_new, ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_destroy ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_destroy_t *on_destroy, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_destroy_t *on_destroy, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors.on_destroy, - ic_name, (void *)on_destroy, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_destroy, ic_name, (void *)on_destroy, + ic_opaque); } rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_send ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_send_t *on_send, - void *ic_opaque) { +rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_send_t *on_send, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors.on_send, - ic_name, (void *)on_send, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_send, ic_name, (void *)on_send, + ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_acknowledgement ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors. - on_acknowledgement, - ic_name, - (void *)on_acknowledgement, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_acknowledgement, ic_name, + (void *)on_acknowledgement, ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_consume ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_consume_t *on_consume, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_consume_t *on_consume, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors. - on_consume, - ic_name, (void *)on_consume, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_consume, ic_name, (void *)on_consume, + ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_commit ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_commit_t *on_commit, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_commit_t *on_commit, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors. - on_commit, - ic_name, (void *)on_commit, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_commit, ic_name, (void *)on_commit, + ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_request_sent ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors. - on_request_sent, - ic_name, (void *)on_request_sent, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_request_sent, ic_name, + (void *)on_request_sent, ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_response_received ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_response_received_t *on_response_received, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_response_received_t *on_response_received, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors. - on_response_received, - ic_name, - (void *)on_response_received, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_response_received, ic_name, + (void *)on_response_received, ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_thread_start ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors. - on_thread_start, - ic_name, - (void *)on_thread_start, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_thread_start, ic_name, + (void *)on_thread_start, ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_thread_exit ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors. - on_thread_exit, - ic_name, - (void *)on_thread_exit, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_thread_exit, ic_name, + (void *)on_thread_exit, ic_opaque); } diff --git a/src/rdkafka_interceptor.h b/src/rdkafka_interceptor.h index 158522698c..2e15441a36 100644 --- a/src/rdkafka_interceptor.h +++ b/src/rdkafka_interceptor.h @@ -29,68 +29,69 @@ #ifndef _RDKAFKA_INTERCEPTOR_H #define _RDKAFKA_INTERCEPTOR_H -rd_kafka_conf_res_t -rd_kafka_interceptors_on_conf_set (rd_kafka_conf_t *conf, - const char *name, const char *val, - char *errstr, size_t errstr_size); -void -rd_kafka_interceptors_on_conf_dup (rd_kafka_conf_t *new_conf, - const rd_kafka_conf_t *old_conf, - size_t filter_cnt, const char **filter); -void -rd_kafka_interceptors_on_conf_destroy (rd_kafka_conf_t *conf) ; -void -rd_kafka_interceptors_on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf); -void -rd_kafka_interceptors_on_destroy (rd_kafka_t *rk); -void -rd_kafka_interceptors_on_send (rd_kafka_t *rk, rd_kafka_message_t *rkmessage); -void -rd_kafka_interceptors_on_acknowledgement (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage); -void -rd_kafka_interceptors_on_acknowledgement_queue (rd_kafka_t *rk, - rd_kafka_msgq_t *rkmq, - rd_kafka_resp_err_t force_err); +rd_kafka_conf_res_t rd_kafka_interceptors_on_conf_set(rd_kafka_conf_t *conf, + const char *name, + const char *val, + char *errstr, + size_t errstr_size); +void rd_kafka_interceptors_on_conf_dup(rd_kafka_conf_t *new_conf, + const rd_kafka_conf_t *old_conf, + size_t filter_cnt, + const char **filter); +void rd_kafka_interceptors_on_conf_destroy(rd_kafka_conf_t *conf); +void rd_kafka_interceptors_on_new(rd_kafka_t *rk, const rd_kafka_conf_t *conf); +void rd_kafka_interceptors_on_destroy(rd_kafka_t *rk); +void rd_kafka_interceptors_on_send(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage); +void rd_kafka_interceptors_on_acknowledgement(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage); +void rd_kafka_interceptors_on_acknowledgement_queue( + rd_kafka_t *rk, + rd_kafka_msgq_t *rkmq, + rd_kafka_resp_err_t force_err); -void rd_kafka_interceptors_on_consume (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage); -void -rd_kafka_interceptors_on_commit (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_resp_err_t err); +void rd_kafka_interceptors_on_consume(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage); +void rd_kafka_interceptors_on_commit( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err); -void rd_kafka_interceptors_on_request_sent (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size); +void rd_kafka_interceptors_on_request_sent(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size); -void rd_kafka_interceptors_on_response_received (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - int64_t rtt, - rd_kafka_resp_err_t err); +void rd_kafka_interceptors_on_response_received(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err); -void rd_kafka_interceptors_on_thread_start (rd_kafka_t *rk, - rd_kafka_thread_type_t thread_type); -void rd_kafka_interceptors_on_thread_exit (rd_kafka_t *rk, +void rd_kafka_interceptors_on_thread_start(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type); +void rd_kafka_interceptors_on_thread_exit(rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type); -void rd_kafka_conf_interceptor_ctor (int scope, void *pconf); -void rd_kafka_conf_interceptor_dtor (int scope, void *pconf); -void rd_kafka_conf_interceptor_copy (int scope, void *pdst, const void *psrc, - void *dstptr, const void *srcptr, - size_t filter_cnt, const char **filter); +void rd_kafka_conf_interceptor_ctor(int scope, void *pconf); +void rd_kafka_conf_interceptor_dtor(int scope, void *pconf); +void rd_kafka_conf_interceptor_copy(int scope, + void *pdst, + const void *psrc, + void *dstptr, + const void *srcptr, + size_t filter_cnt, + const char **filter); -void rd_kafka_interceptors_destroy (rd_kafka_conf_t *conf); +void rd_kafka_interceptors_destroy(rd_kafka_conf_t *conf); #endif /* _RDKAFKA_INTERCEPTOR_H */ diff --git a/src/rdkafka_lz4.c b/src/rdkafka_lz4.c index 41714279b3..b52108bb1f 100644 --- a/src/rdkafka_lz4.c +++ b/src/rdkafka_lz4.c @@ -47,9 +47,10 @@ * Returns an error on failure to fix (nothing modified), else NO_ERROR. */ static rd_kafka_resp_err_t -rd_kafka_lz4_decompress_fixup_bad_framing (rd_kafka_broker_t *rkb, - char *inbuf, size_t inlen) { - static const char magic[4] = { 0x04, 0x22, 0x4d, 0x18 }; +rd_kafka_lz4_decompress_fixup_bad_framing(rd_kafka_broker_t *rkb, + char *inbuf, + size_t inlen) { + static const char magic[4] = {0x04, 0x22, 0x4d, 0x18}; uint8_t FLG, HC, correct_HC; size_t of = 4; @@ -60,15 +61,15 @@ rd_kafka_lz4_decompress_fixup_bad_framing (rd_kafka_broker_t *rkb, * [ int64_t contentSize; ] * int8_t HC; */ - if (inlen < 4+3 || memcmp(inbuf, magic, 4)) { - rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP", + if (inlen < 4 + 3 || memcmp(inbuf, magic, 4)) { + rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP", "Unable to fix-up legacy LZ4 framing " - "(%"PRIusz" bytes): invalid length or magic value", + "(%" PRIusz " bytes): invalid length or magic value", inlen); return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } - of = 4; /* past magic */ + of = 4; /* past magic */ FLG = inbuf[of++]; of++; /* BD */ @@ -76,9 +77,9 @@ rd_kafka_lz4_decompress_fixup_bad_framing (rd_kafka_broker_t *rkb, of += 8; if (of >= inlen) { - rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP", + rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP", "Unable to fix-up legacy LZ4 framing " - "(%"PRIusz" bytes): requires %"PRIusz" bytes", + "(%" PRIusz " bytes): requires %" PRIusz " bytes", inlen, of); return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } @@ -87,7 +88,7 @@ rd_kafka_lz4_decompress_fixup_bad_framing (rd_kafka_broker_t *rkb, HC = inbuf[of]; /* Calculate correct header hash code */ - correct_HC = (XXH32(inbuf+4, of-4, 0) >> 8) & 0xff; + correct_HC = (XXH32(inbuf + 4, of - 4, 0) >> 8) & 0xff; if (HC != correct_HC) inbuf[of] = correct_HC; @@ -106,9 +107,10 @@ rd_kafka_lz4_decompress_fixup_bad_framing (rd_kafka_broker_t *rkb, * else NO_ERROR. */ static rd_kafka_resp_err_t -rd_kafka_lz4_compress_break_framing (rd_kafka_broker_t *rkb, - char *outbuf, size_t outlen) { - static const char magic[4] = { 0x04, 0x22, 0x4d, 0x18 }; +rd_kafka_lz4_compress_break_framing(rd_kafka_broker_t *rkb, + char *outbuf, + size_t outlen) { + static const char magic[4] = {0x04, 0x22, 0x4d, 0x18}; uint8_t FLG, HC, bad_HC; size_t of = 4; @@ -119,15 +121,15 @@ rd_kafka_lz4_compress_break_framing (rd_kafka_broker_t *rkb, * [ int64_t contentSize; ] * int8_t HC; */ - if (outlen < 4+3 || memcmp(outbuf, magic, 4)) { - rd_rkb_dbg(rkb, BROKER, "LZ4FIXDOWN", + if (outlen < 4 + 3 || memcmp(outbuf, magic, 4)) { + rd_rkb_dbg(rkb, BROKER, "LZ4FIXDOWN", "Unable to break legacy LZ4 framing " - "(%"PRIusz" bytes): invalid length or magic value", + "(%" PRIusz " bytes): invalid length or magic value", outlen); return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } - of = 4; /* past magic */ + of = 4; /* past magic */ FLG = outbuf[of++]; of++; /* BD */ @@ -135,9 +137,9 @@ rd_kafka_lz4_compress_break_framing (rd_kafka_broker_t *rkb, of += 8; if (of >= outlen) { - rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP", + rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP", "Unable to break legacy LZ4 framing " - "(%"PRIusz" bytes): requires %"PRIusz" bytes", + "(%" PRIusz " bytes): requires %" PRIusz " bytes", outlen, of); return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } @@ -164,10 +166,13 @@ rd_kafka_lz4_compress_break_framing (rd_kafka_broker_t *rkb, * * @remark May modify \p inbuf (if not \p proper_hc) */ -rd_kafka_resp_err_t -rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, - char *inbuf, size_t inlen, - void **outbuf, size_t *outlenp) { +rd_kafka_resp_err_t rd_kafka_lz4_decompress(rd_kafka_broker_t *rkb, + int proper_hc, + int64_t Offset, + char *inbuf, + size_t inlen, + void **outbuf, + size_t *outlenp) { LZ4F_errorCode_t code; LZ4F_decompressionContext_t dctx; LZ4F_frameInfo_t fi; @@ -177,7 +182,7 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, size_t estimated_uncompressed_size; size_t outlen; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - char *out = NULL; + char *out = NULL; *outbuf = NULL; @@ -193,14 +198,13 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, /* The original/legacy LZ4 framing in Kafka was buggy and * calculated the LZ4 framing header hash code (HC) incorrectly. * We do a fix-up of it here. */ - if ((err = rd_kafka_lz4_decompress_fixup_bad_framing(rkb, - inbuf, + if ((err = rd_kafka_lz4_decompress_fixup_bad_framing(rkb, inbuf, inlen))) goto done; } in_sz = inlen; - r = LZ4F_getFrameInfo(dctx, &fi, (const void *)inbuf, &in_sz); + r = LZ4F_getFrameInfo(dctx, &fi, (const void *)inbuf, &in_sz); if (LZ4F_isError(r)) { rd_rkb_dbg(rkb, BROKER, "LZ4DECOMPR", "Failed to gather LZ4 frame info: %s", @@ -211,14 +215,13 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, /* If uncompressed size is unknown or out of bounds, use a sane * default (4x compression) and reallocate if needed - * More info on max size: http://stackoverflow.com/a/25751871/1821055 + * More info on max size: http://stackoverflow.com/a/25751871/1821055 * More info on lz4 compression ratios seen for different data sets: * http://dev.ti.com/tirex/content/simplelink_msp432p4_sdk_1_50_00_12/docs/lz4/users_guide/docguide.llQpgm/benchmarking.html */ if (fi.contentSize == 0 || fi.contentSize > inlen * 255) { estimated_uncompressed_size = RD_MIN( - inlen * 4, - (size_t)(rkb->rkb_rk->rk_conf.max_msg_size)); + inlen * 4, (size_t)(rkb->rkb_rk->rk_conf.max_msg_size)); } else { estimated_uncompressed_size = (size_t)fi.contentSize; } @@ -229,7 +232,7 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, if (!out) { rd_rkb_log(rkb, LOG_WARNING, "LZ4DEC", "Unable to allocate decompression " - "buffer of %"PRIusz" bytes: %s", + "buffer of %" PRIusz " bytes: %s", estimated_uncompressed_size, rd_strerror(errno)); err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; goto done; @@ -238,26 +241,27 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, /* Decompress input buffer to output buffer until input is exhausted. */ outlen = estimated_uncompressed_size; - in_of = in_sz; + in_of = in_sz; out_of = 0; while (in_of < inlen) { out_sz = outlen - out_of; - in_sz = inlen - in_of; - r = LZ4F_decompress(dctx, out+out_of, &out_sz, - inbuf+in_of, &in_sz, NULL); + in_sz = inlen - in_of; + r = LZ4F_decompress(dctx, out + out_of, &out_sz, inbuf + in_of, + &in_sz, NULL); if (unlikely(LZ4F_isError(r))) { rd_rkb_dbg(rkb, MSG, "LZ4DEC", "Failed to LZ4 (%s HC) decompress message " - "(offset %"PRId64") at " - "payload offset %"PRIusz"/%"PRIusz": %s", - proper_hc ? "proper":"legacy", - Offset, in_of, inlen, LZ4F_getErrorName(r)); + "(offset %" PRId64 + ") at " + "payload offset %" PRIusz "/%" PRIusz ": %s", + proper_hc ? "proper" : "legacy", Offset, + in_of, inlen, LZ4F_getErrorName(r)); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; } rd_kafka_assert(NULL, out_of + out_sz <= outlen && - in_of + in_sz <= inlen); + in_of + in_sz <= inlen); out_of += out_sz; in_of += in_sz; if (r == 0) @@ -276,8 +280,9 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, if (!(tmp = rd_realloc(out, outlen + extra))) { rd_rkb_log(rkb, LOG_WARNING, "LZ4DEC", "Unable to grow decompression " - "buffer to %"PRIusz"+%"PRIusz" bytes: %s", - outlen, extra,rd_strerror(errno)); + "buffer to %" PRIusz "+%" PRIusz + " bytes: %s", + outlen, extra, rd_strerror(errno)); err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; goto done; } @@ -290,18 +295,19 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, if (in_of < inlen) { rd_rkb_dbg(rkb, MSG, "LZ4DEC", "Failed to LZ4 (%s HC) decompress message " - "(offset %"PRId64"): " - "%"PRIusz" (out of %"PRIusz") bytes remaining", - proper_hc ? "proper":"legacy", - Offset, inlen-in_of, inlen); + "(offset %" PRId64 + "): " + "%" PRIusz " (out of %" PRIusz ") bytes remaining", + proper_hc ? "proper" : "legacy", Offset, + inlen - in_of, inlen); err = RD_KAFKA_RESP_ERR__BAD_MSG; goto done; } - *outbuf = out; + *outbuf = out; *outlenp = out_of; - done: +done: code = LZ4F_freeDecompressionContext(dctx); if (LZ4F_isError(code)) { rd_rkb_dbg(rkb, BROKER, "LZ4DECOMPR", @@ -319,20 +325,24 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, /** * Allocate space for \p *outbuf and compress all \p iovlen buffers in \p iov. - * @param proper_hc generate a proper HC (checksum) (kafka >=0.10.0.0, MsgVersion >= 1) + * @param proper_hc generate a proper HC (checksum) (kafka >=0.10.0.0, + * MsgVersion >= 1) * @param MessageSetSize indicates (at least) full uncompressed data size, * possibly including MessageSet fields that will not * be compressed. * * @returns allocated buffer in \p *outbuf, length in \p *outlenp. */ -rd_kafka_resp_err_t -rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, - rd_slice_t *slice, void **outbuf, size_t *outlenp) { +rd_kafka_resp_err_t rd_kafka_lz4_compress(rd_kafka_broker_t *rkb, + int proper_hc, + int comp_level, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp) { LZ4F_compressionContext_t cctx; LZ4F_errorCode_t r; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - size_t len = rd_slice_remains(slice); + size_t len = rd_slice_remains(slice); size_t out_sz; size_t out_of = 0; char *out; @@ -340,11 +350,9 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, size_t rlen; /* Required by Kafka */ - const LZ4F_preferences_t prefs = - { - .frameInfo = { .blockMode = LZ4F_blockIndependent }, - .compressionLevel = comp_level - }; + const LZ4F_preferences_t prefs = { + .frameInfo = {.blockMode = LZ4F_blockIndependent}, + .compressionLevel = comp_level}; *outbuf = NULL; @@ -352,7 +360,7 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, if (LZ4F_isError(out_sz)) { rd_rkb_dbg(rkb, MSG, "LZ4COMPR", "Unable to query LZ4 compressed size " - "(for %"PRIusz" uncompressed bytes): %s", + "(for %" PRIusz " uncompressed bytes): %s", len, LZ4F_getErrorName(out_sz)); return RD_KAFKA_RESP_ERR__BAD_MSG; } @@ -361,7 +369,7 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, if (!out) { rd_rkb_dbg(rkb, MSG, "LZ4COMPR", "Unable to allocate output buffer " - "(%"PRIusz" bytes): %s", + "(%" PRIusz " bytes): %s", out_sz, rd_strerror(errno)); return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; } @@ -379,7 +387,7 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, if (LZ4F_isError(r)) { rd_rkb_dbg(rkb, MSG, "LZ4COMPR", "Unable to begin LZ4 compression " - "(out buffer is %"PRIusz" bytes): %s", + "(out buffer is %" PRIusz " bytes): %s", out_sz, LZ4F_getErrorName(r)); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; @@ -389,16 +397,17 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, while ((rlen = rd_slice_reader(slice, &p))) { rd_assert(out_of < out_sz); - r = LZ4F_compressUpdate(cctx, out+out_of, out_sz-out_of, - p, rlen, NULL); + r = LZ4F_compressUpdate(cctx, out + out_of, out_sz - out_of, p, + rlen, NULL); if (unlikely(LZ4F_isError(r))) { rd_rkb_dbg(rkb, MSG, "LZ4COMPR", "LZ4 compression failed " - "(at of %"PRIusz" bytes, with " - "%"PRIusz" bytes remaining in out buffer): " + "(at of %" PRIusz + " bytes, with " + "%" PRIusz + " bytes remaining in out buffer): " "%s", - rlen, out_sz - out_of, - LZ4F_getErrorName(r)); + rlen, out_sz - out_of, LZ4F_getErrorName(r)); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; } @@ -408,11 +417,11 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, rd_assert(rd_slice_remains(slice) == 0); - r = LZ4F_compressEnd(cctx, out+out_of, out_sz-out_of, NULL); + r = LZ4F_compressEnd(cctx, out + out_of, out_sz - out_of, NULL); if (unlikely(LZ4F_isError(r))) { rd_rkb_dbg(rkb, MSG, "LZ4COMPR", "Failed to finalize LZ4 compression " - "of %"PRIusz" bytes: %s", + "of %" PRIusz " bytes: %s", len, LZ4F_getErrorName(r)); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; @@ -423,20 +432,19 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, /* For the broken legacy framing we need to mess up the header checksum * so that the Kafka client / broker code accepts it. */ if (!proper_hc) - if ((err = rd_kafka_lz4_compress_break_framing(rkb, - out, out_of))) + if ((err = + rd_kafka_lz4_compress_break_framing(rkb, out, out_of))) goto done; *outbuf = out; *outlenp = out_of; - done: +done: LZ4F_freeCompressionContext(cctx); if (err) rd_free(out); return err; - } diff --git a/src/rdkafka_lz4.h b/src/rdkafka_lz4.h index 996db92178..eb0ef98836 100644 --- a/src/rdkafka_lz4.h +++ b/src/rdkafka_lz4.h @@ -31,13 +31,19 @@ #define _RDKAFKA_LZ4_H_ -rd_kafka_resp_err_t -rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, - char *inbuf, size_t inlen, - void **outbuf, size_t *outlenp); +rd_kafka_resp_err_t rd_kafka_lz4_decompress(rd_kafka_broker_t *rkb, + int proper_hc, + int64_t Offset, + char *inbuf, + size_t inlen, + void **outbuf, + size_t *outlenp); -rd_kafka_resp_err_t -rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, - rd_slice_t *slice, void **outbuf, size_t *outlenp); +rd_kafka_resp_err_t rd_kafka_lz4_compress(rd_kafka_broker_t *rkb, + int proper_hc, + int comp_level, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp); #endif /* _RDKAFKA_LZ4_H_ */ diff --git a/src/rdkafka_metadata.c b/src/rdkafka_metadata.c index 32a99044bb..d5ceed95fa 100644 --- a/src/rdkafka_metadata.c +++ b/src/rdkafka_metadata.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -40,24 +40,25 @@ rd_kafka_resp_err_t -rd_kafka_metadata (rd_kafka_t *rk, int all_topics, - rd_kafka_topic_t *only_rkt, - const struct rd_kafka_metadata **metadatap, - int timeout_ms) { +rd_kafka_metadata(rd_kafka_t *rk, + int all_topics, + rd_kafka_topic_t *only_rkt, + const struct rd_kafka_metadata **metadatap, + int timeout_ms) { rd_kafka_q_t *rkq; rd_kafka_broker_t *rkb; rd_kafka_op_t *rko; - rd_ts_t ts_end = rd_timeout_init(timeout_ms); + rd_ts_t ts_end = rd_timeout_init(timeout_ms); rd_list_t topics; rd_bool_t allow_auto_create_topics = - rk->rk_conf.allow_auto_create_topics; + rk->rk_conf.allow_auto_create_topics; /* Query any broker that is up, and if none are up pick the first one, * if we're lucky it will be up before the timeout */ rkb = rd_kafka_broker_any_usable(rk, timeout_ms, RD_DO_LOCK, 0, "application metadata request"); - if (!rkb) - return RD_KAFKA_RESP_ERR__TRANSPORT; + if (!rkb) + return RD_KAFKA_RESP_ERR__TRANSPORT; rkq = rd_kafka_q_new(rk); @@ -74,7 +75,6 @@ rd_kafka_metadata (rd_kafka_t *rk, int all_topics, if (rd_list_cnt(&topics) == cache_cnt) allow_auto_create_topics = rd_true; } - } /* Async: request metadata */ @@ -90,8 +90,7 @@ rd_kafka_metadata (rd_kafka_t *rk, int all_topics, * topics in the cluster, since a * partial request may make it seem * like some subscribed topics are missing. */ - all_topics ? rd_true : rd_false, - rko); + all_topics ? rd_true : rd_false, rko); rd_list_destroy(&topics); rd_kafka_broker_destroy(rkb); @@ -114,7 +113,7 @@ rd_kafka_metadata (rd_kafka_t *rk, int all_topics, /* Reply: pass metadata pointer to application who now owns it*/ rd_kafka_assert(rk, rko->rko_u.metadata.md); - *metadatap = rko->rko_u.metadata.md; + *metadatap = rko->rko_u.metadata.md; rko->rko_u.metadata.md = NULL; rd_kafka_op_destroy(rko); @@ -123,7 +122,7 @@ rd_kafka_metadata (rd_kafka_t *rk, int all_topics, -void rd_kafka_metadata_destroy (const struct rd_kafka_metadata *metadata) { +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata) { rd_free((void *)metadata); } @@ -132,85 +131,73 @@ void rd_kafka_metadata_destroy (const struct rd_kafka_metadata *metadata) { * @returns a newly allocated copy of metadata \p src of size \p size */ struct rd_kafka_metadata * -rd_kafka_metadata_copy (const struct rd_kafka_metadata *src, size_t size) { - struct rd_kafka_metadata *md; - rd_tmpabuf_t tbuf; - int i; +rd_kafka_metadata_copy(const struct rd_kafka_metadata *src, size_t size) { + struct rd_kafka_metadata *md; + rd_tmpabuf_t tbuf; + int i; - /* metadata is stored in one contigious buffer where structs and - * and pointed-to fields are layed out in a memory aligned fashion. - * rd_tmpabuf_t provides the infrastructure to do this. - * Because of this we copy all the structs verbatim but - * any pointer fields needs to be copied explicitly to update - * the pointer address. */ - rd_tmpabuf_new(&tbuf, size, 1/*assert on fail*/); - md = rd_tmpabuf_write(&tbuf, src, sizeof(*md)); + /* metadata is stored in one contigious buffer where structs and + * and pointed-to fields are layed out in a memory aligned fashion. + * rd_tmpabuf_t provides the infrastructure to do this. + * Because of this we copy all the structs verbatim but + * any pointer fields needs to be copied explicitly to update + * the pointer address. */ + rd_tmpabuf_new(&tbuf, size, 1 /*assert on fail*/); + md = rd_tmpabuf_write(&tbuf, src, sizeof(*md)); - rd_tmpabuf_write_str(&tbuf, src->orig_broker_name); + rd_tmpabuf_write_str(&tbuf, src->orig_broker_name); - /* Copy Brokers */ - md->brokers = rd_tmpabuf_write(&tbuf, src->brokers, - md->broker_cnt * sizeof(*md->brokers)); + /* Copy Brokers */ + md->brokers = rd_tmpabuf_write(&tbuf, src->brokers, + md->broker_cnt * sizeof(*md->brokers)); - for (i = 0 ; i < md->broker_cnt ; i++) - md->brokers[i].host = - rd_tmpabuf_write_str(&tbuf, src->brokers[i].host); + for (i = 0; i < md->broker_cnt; i++) + md->brokers[i].host = + rd_tmpabuf_write_str(&tbuf, src->brokers[i].host); - /* Copy TopicMetadata */ + /* Copy TopicMetadata */ md->topics = rd_tmpabuf_write(&tbuf, src->topics, - md->topic_cnt * sizeof(*md->topics)); - - for (i = 0 ; i < md->topic_cnt ; i++) { - int j; - - md->topics[i].topic = rd_tmpabuf_write_str(&tbuf, - src->topics[i].topic); - - - /* Copy partitions */ - md->topics[i].partitions = - rd_tmpabuf_write(&tbuf, src->topics[i].partitions, - md->topics[i].partition_cnt * - sizeof(*md->topics[i].partitions)); - - for (j = 0 ; j < md->topics[i].partition_cnt ; j++) { - /* Copy replicas and ISRs */ - md->topics[i].partitions[j].replicas = - rd_tmpabuf_write(&tbuf, - src->topics[i].partitions[j]. - replicas, - md->topics[i].partitions[j]. - replica_cnt * - sizeof(*md->topics[i]. - partitions[j]. - replicas)); - - md->topics[i].partitions[j].isrs = - rd_tmpabuf_write(&tbuf, - src->topics[i].partitions[j]. - isrs, - md->topics[i].partitions[j]. - isr_cnt * - sizeof(*md->topics[i]. - partitions[j]. - isrs)); - - } - } - - /* Check for tmpabuf errors */ - if (rd_tmpabuf_failed(&tbuf)) - rd_kafka_assert(NULL, !*"metadata copy failed"); - - /* Delibarely not destroying the tmpabuf since we return - * its allocated memory. */ - - return md; -} + md->topic_cnt * sizeof(*md->topics)); + + for (i = 0; i < md->topic_cnt; i++) { + int j; + + md->topics[i].topic = + rd_tmpabuf_write_str(&tbuf, src->topics[i].topic); + /* Copy partitions */ + md->topics[i].partitions = + rd_tmpabuf_write(&tbuf, src->topics[i].partitions, + md->topics[i].partition_cnt * + sizeof(*md->topics[i].partitions)); + + for (j = 0; j < md->topics[i].partition_cnt; j++) { + /* Copy replicas and ISRs */ + md->topics[i].partitions[j].replicas = rd_tmpabuf_write( + &tbuf, src->topics[i].partitions[j].replicas, + md->topics[i].partitions[j].replica_cnt * + sizeof(*md->topics[i].partitions[j].replicas)); + + md->topics[i].partitions[j].isrs = rd_tmpabuf_write( + &tbuf, src->topics[i].partitions[j].isrs, + md->topics[i].partitions[j].isr_cnt * + sizeof(*md->topics[i].partitions[j].isrs)); + } + } + + /* Check for tmpabuf errors */ + if (rd_tmpabuf_failed(&tbuf)) + rd_kafka_assert(NULL, !*"metadata copy failed"); + + /* Delibarely not destroying the tmpabuf since we return + * its allocated memory. */ + + return md; +} + /** @@ -226,45 +213,45 @@ rd_kafka_metadata_copy (const struct rd_kafka_metadata *src, size_t size) { * * @locality rdkafka main thread */ -rd_kafka_resp_err_t -rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *request, - rd_kafka_buf_t *rkbuf, - struct rd_kafka_metadata **mdp) { +rd_kafka_resp_err_t rd_kafka_parse_Metadata(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *request, + rd_kafka_buf_t *rkbuf, + struct rd_kafka_metadata **mdp) { rd_kafka_t *rk = rkb->rkb_rk; int i, j, k; rd_tmpabuf_t tbuf; struct rd_kafka_metadata *md; size_t rkb_namelen; - const int log_decode_errors = LOG_ERR; - rd_list_t *missing_topics = NULL; + const int log_decode_errors = LOG_ERR; + rd_list_t *missing_topics = NULL; const rd_list_t *requested_topics = request->rkbuf_u.Metadata.topics; rd_bool_t all_topics = request->rkbuf_u.Metadata.all_topics; - rd_bool_t cgrp_update = request->rkbuf_u.Metadata.cgrp_update && - rk->rk_cgrp; - const char *reason = request->rkbuf_u.Metadata.reason ? - request->rkbuf_u.Metadata.reason : "(no reason)"; - int ApiVersion = request->rkbuf_reqhdr.ApiVersion; + rd_bool_t cgrp_update = + request->rkbuf_u.Metadata.cgrp_update && rk->rk_cgrp; + const char *reason = request->rkbuf_u.Metadata.reason + ? request->rkbuf_u.Metadata.reason + : "(no reason)"; + int ApiVersion = request->rkbuf_reqhdr.ApiVersion; rd_kafkap_str_t cluster_id = RD_ZERO_INIT; - int32_t controller_id = -1; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - int broker_changes = 0; - int topic_changes = 0; + int32_t controller_id = -1; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + int broker_changes = 0; + int topic_changes = 0; rd_kafka_assert(NULL, thrd_is_current(rk->rk_thread)); /* Remove topics from missing_topics as they are seen in Metadata. */ if (requested_topics) - missing_topics = rd_list_copy(requested_topics, - rd_list_string_copy, NULL); + missing_topics = + rd_list_copy(requested_topics, rd_list_string_copy, NULL); rd_kafka_broker_lock(rkb); - rkb_namelen = strlen(rkb->rkb_name)+1; + rkb_namelen = strlen(rkb->rkb_name) + 1; /* We assume that the marshalled representation is * no more than 4 times larger than the wire representation. */ rd_tmpabuf_new(&tbuf, sizeof(*md) + rkb_namelen + (rkbuf->rkbuf_totlen * 4), - 0/*dont assert on fail*/); + 0 /*dont assert on fail*/); if (!(md = rd_tmpabuf_alloc(&tbuf, sizeof(*md)))) { rd_kafka_broker_unlock(rkb); @@ -273,8 +260,8 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, } md->orig_broker_id = rkb->rkb_nodeid; - md->orig_broker_name = rd_tmpabuf_write(&tbuf, - rkb->rkb_name, rkb_namelen); + md->orig_broker_name = + rd_tmpabuf_write(&tbuf, rkb->rkb_name, rkb_namelen); rd_kafka_broker_unlock(rkb); if (ApiVersion >= 3) @@ -287,14 +274,15 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, md->broker_cnt, RD_KAFKAP_BROKERS_MAX); if (!(md->brokers = rd_tmpabuf_alloc(&tbuf, md->broker_cnt * - sizeof(*md->brokers)))) + sizeof(*md->brokers)))) rd_kafka_buf_parse_fail(rkbuf, "%d brokers: tmpabuf memory shortage", md->broker_cnt); - for (i = 0 ; i < md->broker_cnt ; i++) { + for (i = 0; i < md->broker_cnt; i++) { rd_kafka_buf_read_i32a(rkbuf, md->brokers[i].id); - rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf, md->brokers[i].host); + rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf, + md->brokers[i].host); rd_kafka_buf_read_i32a(rkbuf, md->brokers[i].port); if (ApiVersion >= 1) { @@ -308,8 +296,8 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, if (ApiVersion >= 1) { rd_kafka_buf_read_i32(rkbuf, &controller_id); - rd_rkb_dbg(rkb, METADATA, - "METADATA", "ClusterId: %.*s, ControllerId: %"PRId32, + rd_rkb_dbg(rkb, METADATA, "METADATA", + "ClusterId: %.*s, ControllerId: %" PRId32, RD_KAFKAP_STR_PR(&cluster_id), controller_id); } @@ -321,20 +309,19 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, md->broker_cnt, md->topic_cnt); if (md->topic_cnt > RD_KAFKAP_TOPICS_MAX) - rd_kafka_buf_parse_fail(rkbuf, "TopicMetadata_cnt %"PRId32 - " > TOPICS_MAX %i", - md->topic_cnt, RD_KAFKAP_TOPICS_MAX); + rd_kafka_buf_parse_fail( + rkbuf, "TopicMetadata_cnt %" PRId32 " > TOPICS_MAX %i", + md->topic_cnt, RD_KAFKAP_TOPICS_MAX); - if (!(md->topics = rd_tmpabuf_alloc(&tbuf, - md->topic_cnt * - sizeof(*md->topics)))) - rd_kafka_buf_parse_fail(rkbuf, - "%d topics: tmpabuf memory shortage", - md->topic_cnt); + if (!(md->topics = + rd_tmpabuf_alloc(&tbuf, md->topic_cnt * sizeof(*md->topics)))) + rd_kafka_buf_parse_fail( + rkbuf, "%d topics: tmpabuf memory shortage", md->topic_cnt); - for (i = 0 ; i < md->topic_cnt ; i++) { + for (i = 0; i < md->topic_cnt; i++) { rd_kafka_buf_read_i16a(rkbuf, md->topics[i].err); - rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf, md->topics[i].topic); + rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf, + md->topics[i].topic); if (ApiVersion >= 1) { int8_t is_internal; rd_kafka_buf_read_i8(rkbuf, &is_internal); @@ -350,95 +337,100 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, i, md->topics[i].partition_cnt, RD_KAFKAP_PARTITIONS_MAX); - if (!(md->topics[i].partitions = - rd_tmpabuf_alloc(&tbuf, - md->topics[i].partition_cnt * - sizeof(*md->topics[i].partitions)))) + if (!(md->topics[i].partitions = rd_tmpabuf_alloc( + &tbuf, md->topics[i].partition_cnt * + sizeof(*md->topics[i].partitions)))) rd_kafka_buf_parse_fail(rkbuf, "%s: %d partitions: " "tmpabuf memory shortage", md->topics[i].topic, md->topics[i].partition_cnt); - for (j = 0 ; j < md->topics[i].partition_cnt ; j++) { - rd_kafka_buf_read_i16a(rkbuf, md->topics[i].partitions[j].err); - rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].id); - rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].leader); + for (j = 0; j < md->topics[i].partition_cnt; j++) { + rd_kafka_buf_read_i16a(rkbuf, + md->topics[i].partitions[j].err); + rd_kafka_buf_read_i32a(rkbuf, + md->topics[i].partitions[j].id); + rd_kafka_buf_read_i32a( + rkbuf, md->topics[i].partitions[j].leader); /* Replicas */ - rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].replica_cnt); + rd_kafka_buf_read_i32a( + rkbuf, md->topics[i].partitions[j].replica_cnt); if (md->topics[i].partitions[j].replica_cnt > RD_KAFKAP_BROKERS_MAX) - rd_kafka_buf_parse_fail(rkbuf, - "TopicMetadata[%i]." - "PartitionMetadata[%i]." - "Replica_cnt " - "%i > BROKERS_MAX %i", - i, j, - md->topics[i]. - partitions[j]. - replica_cnt, - RD_KAFKAP_BROKERS_MAX); + rd_kafka_buf_parse_fail( + rkbuf, + "TopicMetadata[%i]." + "PartitionMetadata[%i]." + "Replica_cnt " + "%i > BROKERS_MAX %i", + i, j, + md->topics[i].partitions[j].replica_cnt, + RD_KAFKAP_BROKERS_MAX); if (!(md->topics[i].partitions[j].replicas = - rd_tmpabuf_alloc(&tbuf, - md->topics[i]. - partitions[j].replica_cnt * - sizeof(*md->topics[i]. - partitions[j].replicas)))) + rd_tmpabuf_alloc( + &tbuf, + md->topics[i].partitions[j].replica_cnt * + sizeof(*md->topics[i] + .partitions[j] + .replicas)))) rd_kafka_buf_parse_fail( - rkbuf, - "%s [%"PRId32"]: %d replicas: " - "tmpabuf memory shortage", - md->topics[i].topic, - md->topics[i].partitions[j].id, - md->topics[i].partitions[j].replica_cnt); + rkbuf, + "%s [%" PRId32 + "]: %d replicas: " + "tmpabuf memory shortage", + md->topics[i].topic, + md->topics[i].partitions[j].id, + md->topics[i].partitions[j].replica_cnt); - for (k = 0 ; - k < md->topics[i].partitions[j].replica_cnt; k++) - rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j]. - replicas[k]); + for (k = 0; k < md->topics[i].partitions[j].replica_cnt; + k++) + rd_kafka_buf_read_i32a( + rkbuf, + md->topics[i].partitions[j].replicas[k]); /* Isrs */ - rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].isr_cnt); + rd_kafka_buf_read_i32a( + rkbuf, md->topics[i].partitions[j].isr_cnt); if (md->topics[i].partitions[j].isr_cnt > RD_KAFKAP_BROKERS_MAX) - rd_kafka_buf_parse_fail(rkbuf, - "TopicMetadata[%i]." - "PartitionMetadata[%i]." - "Isr_cnt " - "%i > BROKERS_MAX %i", - i, j, - md->topics[i]. - partitions[j].isr_cnt, - RD_KAFKAP_BROKERS_MAX); - - if (!(md->topics[i].partitions[j].isrs = - rd_tmpabuf_alloc(&tbuf, - md->topics[i]. - partitions[j].isr_cnt * - sizeof(*md->topics[i]. - partitions[j].isrs)))) rd_kafka_buf_parse_fail( - rkbuf, - "%s [%"PRId32"]: %d isrs: " - "tmpabuf memory shortage", - md->topics[i].topic, - md->topics[i].partitions[j].id, - md->topics[i].partitions[j].isr_cnt); - - - for (k = 0 ; - k < md->topics[i].partitions[j].isr_cnt; k++) - rd_kafka_buf_read_i32a(rkbuf, md->topics[i]. - partitions[j].isrs[k]); - + rkbuf, + "TopicMetadata[%i]." + "PartitionMetadata[%i]." + "Isr_cnt " + "%i > BROKERS_MAX %i", + i, j, md->topics[i].partitions[j].isr_cnt, + RD_KAFKAP_BROKERS_MAX); + + if (!(md->topics[i] + .partitions[j] + .isrs = rd_tmpabuf_alloc( + &tbuf, + md->topics[i].partitions[j].isr_cnt * + sizeof( + *md->topics[i].partitions[j].isrs)))) + rd_kafka_buf_parse_fail( + rkbuf, + "%s [%" PRId32 + "]: %d isrs: " + "tmpabuf memory shortage", + md->topics[i].topic, + md->topics[i].partitions[j].id, + md->topics[i].partitions[j].isr_cnt); + + + for (k = 0; k < md->topics[i].partitions[j].isr_cnt; + k++) + rd_kafka_buf_read_i32a( + rkbuf, md->topics[i].partitions[j].isrs[k]); } /* Sort partitions by partition id */ - qsort(md->topics[i].partitions, - md->topics[i].partition_cnt, + qsort(md->topics[i].partitions, md->topics[i].partition_cnt, sizeof(*md->topics[i].partitions), rd_kafka_metadata_partition_id_cmp); } @@ -460,24 +452,21 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, } /* Update our list of brokers. */ - for (i = 0 ; i < md->broker_cnt ; i++) { + for (i = 0; i < md->broker_cnt; i++) { rd_rkb_dbg(rkb, METADATA, "METADATA", - " Broker #%i/%i: %s:%i NodeId %"PRId32, - i, md->broker_cnt, - md->brokers[i].host, - md->brokers[i].port, - md->brokers[i].id); + " Broker #%i/%i: %s:%i NodeId %" PRId32, i, + md->broker_cnt, md->brokers[i].host, + md->brokers[i].port, md->brokers[i].id); rd_kafka_broker_update(rkb->rkb_rk, rkb->rkb_proto, &md->brokers[i], NULL); } /* Update partition count and leader for each topic we know about */ - for (i = 0 ; i < md->topic_cnt ; i++) { + for (i = 0; i < md->topic_cnt; i++) { rd_kafka_metadata_topic_t *mdt = &md->topics[i]; rd_rkb_dbg(rkb, METADATA, "METADATA", - " Topic #%i/%i: %s with %i partitions%s%s", - i, md->topic_cnt, mdt->topic, - mdt->partition_cnt, + " Topic #%i/%i: %s with %i partitions%s%s", i, + md->topic_cnt, mdt->topic, mdt->partition_cnt, mdt->err ? ": " : "", mdt->err ? rd_kafka_err2str(mdt->err) : ""); @@ -487,7 +476,8 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, mdt->topic)) { rd_rkb_dbg(rkb, TOPIC, "BLACKLIST", "Ignoring blacklisted topic \"%s\" " - "in metadata", mdt->topic); + "in metadata", + mdt->topic); continue; } @@ -511,12 +501,11 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, rd_list_free_cb(missing_topics, rd_list_remove_cmp(missing_topics, mdt->topic, - (void*)strcmp)); + (void *)strcmp)); if (!all_topics) { rd_kafka_wrlock(rk); rd_kafka_metadata_cache_topic_update( - rk, mdt, - rd_false/*propagate later*/); + rk, mdt, rd_false /*propagate later*/); topic_changes++; rd_kafka_wrunlock(rk); } @@ -530,16 +519,16 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, rd_rkb_dbg(rkb, TOPIC, "METADATA", "%d/%d requested topic(s) seen in metadata", rd_list_cnt(requested_topics) - - rd_list_cnt(missing_topics), + rd_list_cnt(missing_topics), rd_list_cnt(requested_topics)); - for (i = 0 ; i < rd_list_cnt(missing_topics) ; i++) + for (i = 0; i < rd_list_cnt(missing_topics); i++) rd_rkb_dbg(rkb, TOPIC, "METADATA", "wanted %s", (char *)(missing_topics->rl_elems[i])); RD_LIST_FOREACH(topic, missing_topics, i) { rd_kafka_topic_t *rkt; - rkt = rd_kafka_topic_find(rkb->rkb_rk, - topic, 1/*lock*/); + rkt = + rd_kafka_topic_find(rkb->rkb_rk, topic, 1 /*lock*/); if (rkt) { /* Received metadata response contained no * information about topic 'rkt' and thus @@ -548,7 +537,7 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, * Mark the topic as non-existent */ rd_kafka_topic_wrlock(rkt); rd_kafka_topic_set_notexists( - rkt, RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC); + rkt, RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC); rd_kafka_topic_wrunlock(rkt); rd_kafka_topic_destroy0(rkt); @@ -565,10 +554,9 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, if (RD_KAFKAP_STR_LEN(&cluster_id) > 0 && (!rk->rk_clusterid || rd_kafkap_str_cmp_str(&cluster_id, rk->rk_clusterid))) { - rd_rkb_dbg(rkb, BROKER|RD_KAFKA_DBG_GENERIC, "CLUSTERID", + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_GENERIC, "CLUSTERID", "ClusterId update \"%s\" -> \"%.*s\"", - rk->rk_clusterid ? - rk->rk_clusterid : "", + rk->rk_clusterid ? rk->rk_clusterid : "", RD_KAFKAP_STR_PR(&cluster_id)); if (rk->rk_clusterid) { rd_kafka_log(rk, LOG_WARNING, "CLUSTERID", @@ -588,20 +576,21 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, /* Update controller id. */ if (rkb->rkb_rk->rk_controllerid != controller_id) { rd_rkb_dbg(rkb, BROKER, "CONTROLLERID", - "ControllerId update %"PRId32" -> %"PRId32, + "ControllerId update %" PRId32 " -> %" PRId32, rkb->rkb_rk->rk_controllerid, controller_id); rkb->rkb_rk->rk_controllerid = controller_id; broker_changes++; } if (all_topics) { - rd_kafka_metadata_cache_update(rkb->rkb_rk, - md, 1/*abs update*/); + rd_kafka_metadata_cache_update(rkb->rkb_rk, md, + 1 /*abs update*/); if (rkb->rkb_rk->rk_full_metadata) - rd_kafka_metadata_destroy(rkb->rkb_rk->rk_full_metadata); + rd_kafka_metadata_destroy( + rkb->rkb_rk->rk_full_metadata); rkb->rkb_rk->rk_full_metadata = - rd_kafka_metadata_copy(md, tbuf.of); + rd_kafka_metadata_copy(md, tbuf.of); rkb->rkb_rk->rk_ts_full_metadata = rkb->rkb_rk->rk_ts_metadata; rd_rkb_dbg(rkb, METADATA, "METADATA", "Caching full metadata with " @@ -632,8 +621,8 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, * the effective subscription of available topics) as to not * propagate non-included topics as non-existent. */ if (cgrp_update && (requested_topics || all_topics)) - rd_kafka_cgrp_metadata_update_check( - rkb->rkb_rk->rk_cgrp, rd_true/*do join*/); + rd_kafka_cgrp_metadata_update_check(rkb->rkb_rk->rk_cgrp, + rd_true /*do join*/); /* Try to acquire a Producer ID from this broker if we * don't have one. */ @@ -657,9 +646,9 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: +err: if (requested_topics) { /* Failed requests shall purge cache hints for * the requested topics. */ @@ -692,9 +681,10 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, * @locality any */ size_t -rd_kafka_metadata_topic_match (rd_kafka_t *rk, rd_list_t *tinfos, - const rd_kafka_topic_partition_list_t *match, - rd_kafka_topic_partition_list_t *errored) { +rd_kafka_metadata_topic_match(rd_kafka_t *rk, + rd_list_t *tinfos, + const rd_kafka_topic_partition_list_t *match, + rd_kafka_topic_partition_list_t *errored) { int ti, i; size_t cnt = 0; const struct rd_kafka_metadata *metadata; @@ -717,7 +707,7 @@ rd_kafka_metadata_topic_match (rd_kafka_t *rk, rd_list_t *tinfos, /* For each topic in the cluster, scan through the match list * to find matching topic. */ - for (ti = 0 ; ti < metadata->topic_cnt ; ti++) { + for (ti = 0; ti < metadata->topic_cnt; ti++) { const char *topic = metadata->topics[ti].topic; /* Ignore topics in blacklist */ @@ -726,28 +716,27 @@ rd_kafka_metadata_topic_match (rd_kafka_t *rk, rd_list_t *tinfos, continue; /* Scan for matches */ - for (i = 0 ; i < match->cnt ; i++) { - if (!rd_kafka_topic_match(rk, - match->elems[i].topic, topic)) + for (i = 0; i < match->cnt; i++) { + if (!rd_kafka_topic_match(rk, match->elems[i].topic, + topic)) continue; /* Remove from unmatched */ rd_kafka_topic_partition_list_del( - unmatched, match->elems[i].topic, - RD_KAFKA_PARTITION_UA); + unmatched, match->elems[i].topic, + RD_KAFKA_PARTITION_UA); if (metadata->topics[ti].err) { rd_kafka_topic_partition_list_add( - errored, topic, - RD_KAFKA_PARTITION_UA)->err = - metadata->topics[ti].err; + errored, topic, RD_KAFKA_PARTITION_UA) + ->err = metadata->topics[ti].err; continue; /* Skip errored topics */ } - rd_list_add(tinfos, - rd_kafka_topic_info_new( - topic, - metadata->topics[ti].partition_cnt)); + rd_list_add( + tinfos, + rd_kafka_topic_info_new( + topic, metadata->topics[ti].partition_cnt)); cnt++; } @@ -756,13 +745,12 @@ rd_kafka_metadata_topic_match (rd_kafka_t *rk, rd_list_t *tinfos, /* Any topics/patterns still in unmatched did not match any * existing topics, add them to `errored`. */ - for (i = 0 ; i < unmatched->cnt ; i++) { + for (i = 0; i < unmatched->cnt; i++) { rd_kafka_topic_partition_t *elem = &unmatched->elems[i]; - rd_kafka_topic_partition_list_add(errored, - elem->topic, - RD_KAFKA_PARTITION_UA)->err = - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; + rd_kafka_topic_partition_list_add(errored, elem->topic, + RD_KAFKA_PARTITION_UA) + ->err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; } rd_kafka_topic_partition_list_destroy(unmatched); @@ -783,15 +771,16 @@ rd_kafka_metadata_topic_match (rd_kafka_t *rk, rd_list_t *tinfos, * @locks none */ size_t -rd_kafka_metadata_topic_filter (rd_kafka_t *rk, rd_list_t *tinfos, - const rd_kafka_topic_partition_list_t *match, - rd_kafka_topic_partition_list_t *errored) { +rd_kafka_metadata_topic_filter(rd_kafka_t *rk, + rd_list_t *tinfos, + const rd_kafka_topic_partition_list_t *match, + rd_kafka_topic_partition_list_t *errored) { int i; size_t cnt = 0; rd_kafka_rdlock(rk); /* For each topic in match, look up the topic in the cache. */ - for (i = 0 ; i < match->cnt ; i++) { + for (i = 0; i < match->cnt; i++) { const char *topic = match->elems[i].topic; const rd_kafka_metadata_topic_t *mtopic; @@ -800,21 +789,20 @@ rd_kafka_metadata_topic_filter (rd_kafka_t *rk, rd_list_t *tinfos, rd_kafka_pattern_match(rk->rk_conf.topic_blacklist, topic)) continue; - mtopic = rd_kafka_metadata_cache_topic_get(rk, topic, - 1/*valid*/); + mtopic = + rd_kafka_metadata_cache_topic_get(rk, topic, 1 /*valid*/); if (!mtopic) - rd_kafka_topic_partition_list_add( - errored, topic, RD_KAFKA_PARTITION_UA)->err = - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; + rd_kafka_topic_partition_list_add(errored, topic, + RD_KAFKA_PARTITION_UA) + ->err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; else if (mtopic->err) - rd_kafka_topic_partition_list_add( - errored, topic, RD_KAFKA_PARTITION_UA)->err = - mtopic->err; + rd_kafka_topic_partition_list_add(errored, topic, + RD_KAFKA_PARTITION_UA) + ->err = mtopic->err; else { - rd_list_add(tinfos, - rd_kafka_topic_info_new( - topic, mtopic->partition_cnt)); + rd_list_add(tinfos, rd_kafka_topic_info_new( + topic, mtopic->partition_cnt)); cnt++; } @@ -825,37 +813,35 @@ rd_kafka_metadata_topic_filter (rd_kafka_t *rk, rd_list_t *tinfos, } -void rd_kafka_metadata_log (rd_kafka_t *rk, const char *fac, - const struct rd_kafka_metadata *md) { +void rd_kafka_metadata_log(rd_kafka_t *rk, + const char *fac, + const struct rd_kafka_metadata *md) { int i; rd_kafka_dbg(rk, METADATA, fac, "Metadata with %d broker(s) and %d topic(s):", md->broker_cnt, md->topic_cnt); - for (i = 0 ; i < md->broker_cnt ; i++) { + for (i = 0; i < md->broker_cnt; i++) { rd_kafka_dbg(rk, METADATA, fac, - " Broker #%i/%i: %s:%i NodeId %"PRId32, - i, md->broker_cnt, - md->brokers[i].host, - md->brokers[i].port, - md->brokers[i].id); + " Broker #%i/%i: %s:%i NodeId %" PRId32, i, + md->broker_cnt, md->brokers[i].host, + md->brokers[i].port, md->brokers[i].id); } - for (i = 0 ; i < md->topic_cnt ; i++) { - rd_kafka_dbg(rk, METADATA, fac, - " Topic #%i/%i: %s with %i partitions%s%s", - i, md->topic_cnt, md->topics[i].topic, - md->topics[i].partition_cnt, - md->topics[i].err ? ": " : "", - md->topics[i].err ? - rd_kafka_err2str(md->topics[i].err) : ""); + for (i = 0; i < md->topic_cnt; i++) { + rd_kafka_dbg( + rk, METADATA, fac, + " Topic #%i/%i: %s with %i partitions%s%s", i, + md->topic_cnt, md->topics[i].topic, + md->topics[i].partition_cnt, md->topics[i].err ? ": " : "", + md->topics[i].err ? rd_kafka_err2str(md->topics[i].err) + : ""); } } - /** * @brief Refresh metadata for \p topics * @@ -874,11 +860,13 @@ void rd_kafka_metadata_log (rd_kafka_t *rk, const char *fac, * @locks none */ rd_kafka_resp_err_t -rd_kafka_metadata_refresh_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const rd_list_t *topics, rd_bool_t force, - rd_bool_t allow_auto_create, - rd_bool_t cgrp_update, - const char *reason) { +rd_kafka_metadata_refresh_topics(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const rd_list_t *topics, + rd_bool_t force, + rd_bool_t allow_auto_create, + rd_bool_t cgrp_update, + const char *reason) { rd_list_t q_topics; int destroy_rkb = 0; @@ -890,15 +878,14 @@ rd_kafka_metadata_refresh_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, rd_kafka_wrlock(rk); if (!rkb) { - if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, - RD_DONT_LOCK, 0, - reason))) { + if (!(rkb = rd_kafka_broker_any_usable( + rk, RD_POLL_NOWAIT, RD_DONT_LOCK, 0, reason))) { /* Hint cache that something is interested in * these topics so that they will be included in * a future all known_topics query. */ rd_kafka_metadata_cache_hint(rk, topics, NULL, RD_KAFKA_RESP_ERR__NOENT, - 0/*dont replace*/); + 0 /*dont replace*/); rd_kafka_wrunlock(rk); rd_kafka_dbg(rk, METADATA, "METADATA", @@ -920,7 +907,7 @@ rd_kafka_metadata_refresh_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * q_topics will contain remaining topics to query. */ rd_kafka_metadata_cache_hint(rk, topics, &q_topics, RD_KAFKA_RESP_ERR__WAIT_CACHE, - rd_false/*dont replace*/); + rd_false /*dont replace*/); rd_kafka_wrunlock(rk); if (rd_list_cnt(&q_topics) == 0) { @@ -970,8 +957,10 @@ rd_kafka_metadata_refresh_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * @locks none */ rd_kafka_resp_err_t -rd_kafka_metadata_refresh_known_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - rd_bool_t force, const char *reason) { +rd_kafka_metadata_refresh_known_topics(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_bool_t force, + const char *reason) { rd_list_t topics; rd_kafka_resp_err_t err; int cache_cnt = 0; @@ -986,17 +975,14 @@ rd_kafka_metadata_refresh_known_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, /* Allow topic auto creation if there are locally known topics (rkt) * and not just cached (to be queried) topics. */ allow_auto_create_topics = rk->rk_conf.allow_auto_create_topics && - rd_list_cnt(&topics) > cache_cnt; + rd_list_cnt(&topics) > cache_cnt; if (rd_list_cnt(&topics) == 0) err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; else err = rd_kafka_metadata_refresh_topics( - rk, rkb, - &topics, force, - allow_auto_create_topics, - rd_false/*!cgrp_update*/, - reason); + rk, rkb, &topics, force, allow_auto_create_topics, + rd_false /*!cgrp_update*/, reason); rd_list_destroy(&topics); @@ -1018,14 +1004,14 @@ rd_kafka_metadata_refresh_known_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * @locks_acquired rk(read) */ rd_kafka_resp_err_t -rd_kafka_metadata_refresh_consumer_topics (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - const char *reason) { +rd_kafka_metadata_refresh_consumer_topics(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *reason) { rd_list_t topics; rd_kafka_resp_err_t err; rd_kafka_cgrp_t *rkcg; rd_bool_t allow_auto_create_topics = - rk->rk_conf.allow_auto_create_topics; + rk->rk_conf.allow_auto_create_topics; int cache_cnt = 0; if (!rk) { @@ -1054,18 +1040,15 @@ rd_kafka_metadata_refresh_consumer_topics (rd_kafka_t *rk, /* Add subscribed (non-wildcard) topics, if any. */ if (rkcg->rkcg_subscription) rd_kafka_topic_partition_list_get_topic_names( - rkcg->rkcg_subscription, &topics, - rd_false/*no wildcards*/); + rkcg->rkcg_subscription, &topics, + rd_false /*no wildcards*/); if (rd_list_cnt(&topics) == 0) err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; else err = rd_kafka_metadata_refresh_topics( - rk, rkb, &topics, - rd_true/*force*/, - allow_auto_create_topics, - rd_true/*cgrp_update*/, - reason); + rk, rkb, &topics, rd_true /*force*/, + allow_auto_create_topics, rd_true /*cgrp_update*/, reason); rd_list_destroy(&topics); @@ -1087,13 +1070,13 @@ rd_kafka_metadata_refresh_consumer_topics (rd_kafka_t *rk, * @locality any * @locks none */ -rd_kafka_resp_err_t -rd_kafka_metadata_refresh_brokers (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const char *reason) { +rd_kafka_resp_err_t rd_kafka_metadata_refresh_brokers(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *reason) { return rd_kafka_metadata_request(rk, rkb, NULL /*brokers only*/, - rd_false/*!allow auto create topics*/, - rd_false/*no cgrp update */, - reason, NULL); + rd_false /*!allow auto create topics*/, + rd_false /*no cgrp update */, reason, + NULL); } @@ -1106,9 +1089,9 @@ rd_kafka_metadata_refresh_brokers (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * @locality any * @locks none */ -rd_kafka_resp_err_t -rd_kafka_metadata_refresh_all (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const char *reason) { +rd_kafka_resp_err_t rd_kafka_metadata_refresh_all(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *reason) { int destroy_rkb = 0; rd_list_t topics; @@ -1119,17 +1102,15 @@ rd_kafka_metadata_refresh_all (rd_kafka_t *rk, rd_kafka_broker_t *rkb, if (!rkb) { if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, - RD_DO_LOCK, 0, - reason))) + RD_DO_LOCK, 0, reason))) return RD_KAFKA_RESP_ERR__TRANSPORT; destroy_rkb = 1; } rd_list_init(&topics, 0, NULL); /* empty list = all topics */ rd_kafka_MetadataRequest(rkb, &topics, reason, - rd_false/*no auto create*/, - rd_true/*cgrp update*/, - NULL); + rd_false /*no auto create*/, + rd_true /*cgrp update*/, NULL); rd_list_destroy(&topics); if (destroy_rkb) @@ -1150,23 +1131,23 @@ rd_kafka_metadata_refresh_all (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * @locality any */ rd_kafka_resp_err_t -rd_kafka_metadata_request (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const rd_list_t *topics, - rd_bool_t allow_auto_create_topics, - rd_bool_t cgrp_update, - const char *reason, rd_kafka_op_t *rko) { +rd_kafka_metadata_request(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const rd_list_t *topics, + rd_bool_t allow_auto_create_topics, + rd_bool_t cgrp_update, + const char *reason, + rd_kafka_op_t *rko) { int destroy_rkb = 0; if (!rkb) { if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, - RD_DO_LOCK, 0, - reason))) + RD_DO_LOCK, 0, reason))) return RD_KAFKA_RESP_ERR__TRANSPORT; destroy_rkb = 1; } - rd_kafka_MetadataRequest(rkb, topics, reason, - allow_auto_create_topics, + rd_kafka_MetadataRequest(rkb, topics, reason, allow_auto_create_topics, cgrp_update, rko); if (destroy_rkb) @@ -1183,9 +1164,9 @@ rd_kafka_metadata_request (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * @locks none * @locality rdkafka main thread */ -static void rd_kafka_metadata_leader_query_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { - rd_kafka_t *rk = rkts->rkts_rk; +static void rd_kafka_metadata_leader_query_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_t *rk = rkts->rkts_rk; rd_kafka_timer_t *rtmr = &rk->rk_metadata_cache.rkmc_query_tmr; rd_kafka_topic_t *rkt; rd_list_t topics; @@ -1203,14 +1184,16 @@ static void rd_kafka_metadata_leader_query_tmr_cb (rd_kafka_timers_t *rkts, continue; } - require_metadata = rkt->rkt_flags & RD_KAFKA_TOPIC_F_LEADER_UNAVAIL; + require_metadata = + rkt->rkt_flags & RD_KAFKA_TOPIC_F_LEADER_UNAVAIL; /* Check if any partitions are missing brokers. */ - for (i = 0 ; !require_metadata && i < rkt->rkt_partition_cnt ; i++) { + for (i = 0; !require_metadata && i < rkt->rkt_partition_cnt; + i++) { rd_kafka_toppar_t *rktp = rkt->rkt_p[i]; rd_kafka_toppar_lock(rktp); - require_metadata = !rktp->rktp_broker && - !rktp->rktp_next_broker; + require_metadata = + !rktp->rktp_broker && !rktp->rktp_next_broker; rd_kafka_toppar_unlock(rktp); } @@ -1224,21 +1207,19 @@ static void rd_kafka_metadata_leader_query_tmr_cb (rd_kafka_timers_t *rkts, if (rd_list_cnt(&topics) == 0) { /* No leader-less topics+partitions, stop the timer. */ - rd_kafka_timer_stop(rkts, rtmr, 1/*lock*/); + rd_kafka_timer_stop(rkts, rtmr, 1 /*lock*/); } else { rd_kafka_metadata_refresh_topics( - rk, NULL, &topics, - rd_true/*force*/, - rk->rk_conf.allow_auto_create_topics, - rd_false/*!cgrp_update*/, - "partition leader query"); + rk, NULL, &topics, rd_true /*force*/, + rk->rk_conf.allow_auto_create_topics, + rd_false /*!cgrp_update*/, "partition leader query"); /* Back off next query exponentially until we reach * the standard query interval - then stop the timer * since the intervalled querier will do the job for us. */ if (rk->rk_conf.metadata_refresh_interval_ms > 0 && rtmr->rtmr_interval * 2 / 1000 >= - rk->rk_conf.metadata_refresh_interval_ms) - rd_kafka_timer_stop(rkts, rtmr, 1/*lock*/); + rk->rk_conf.metadata_refresh_interval_ms) + rd_kafka_timer_stop(rkts, rtmr, 1 /*lock*/); else rd_kafka_timer_exp_backoff(rkts, rtmr); } @@ -1257,30 +1238,26 @@ static void rd_kafka_metadata_leader_query_tmr_cb (rd_kafka_timers_t *rkts, * @locks none * @locality any */ -void rd_kafka_metadata_fast_leader_query (rd_kafka_t *rk) { +void rd_kafka_metadata_fast_leader_query(rd_kafka_t *rk) { rd_ts_t next; /* Restart the timer if it will speed things up. */ - next = rd_kafka_timer_next(&rk->rk_timers, - &rk->rk_metadata_cache.rkmc_query_tmr, - 1/*lock*/); + next = rd_kafka_timer_next( + &rk->rk_timers, &rk->rk_metadata_cache.rkmc_query_tmr, 1 /*lock*/); if (next == -1 /* not started */ || next > - (rd_ts_t)rk->rk_conf.metadata_refresh_fast_interval_ms * 1000) { - rd_kafka_dbg(rk, METADATA|RD_KAFKA_DBG_TOPIC, "FASTQUERY", + (rd_ts_t)rk->rk_conf.metadata_refresh_fast_interval_ms * 1000) { + rd_kafka_dbg(rk, METADATA | RD_KAFKA_DBG_TOPIC, "FASTQUERY", "Starting fast leader query"); - rd_kafka_timer_start(&rk->rk_timers, - &rk->rk_metadata_cache.rkmc_query_tmr, - rk->rk_conf. - metadata_refresh_fast_interval_ms*1000, - rd_kafka_metadata_leader_query_tmr_cb, - NULL); + rd_kafka_timer_start( + &rk->rk_timers, &rk->rk_metadata_cache.rkmc_query_tmr, + rk->rk_conf.metadata_refresh_fast_interval_ms * 1000, + rd_kafka_metadata_leader_query_tmr_cb, NULL); } } - /** * @brief Create mock Metadata (for testing) based on the provided topics. * @@ -1293,8 +1270,8 @@ void rd_kafka_metadata_fast_leader_query (rd_kafka_t *rk) { * @sa rd_kafka_metadata_copy() */ rd_kafka_metadata_t * -rd_kafka_metadata_new_topic_mock (const rd_kafka_metadata_topic_t *topics, - size_t topic_cnt) { +rd_kafka_metadata_new_topic_mock(const rd_kafka_metadata_topic_t *topics, + size_t topic_cnt) { rd_kafka_metadata_t *md; rd_tmpabuf_t tbuf; size_t topic_names_size = 0; @@ -1303,7 +1280,7 @@ rd_kafka_metadata_new_topic_mock (const rd_kafka_metadata_topic_t *topics, /* Calculate total partition count and topic names size before * allocating memory. */ - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { topic_names_size += 1 + strlen(topics[i].topic); total_partition_cnt += topics[i].partition_cnt; } @@ -1311,36 +1288,33 @@ rd_kafka_metadata_new_topic_mock (const rd_kafka_metadata_topic_t *topics, /* Allocate contiguous buffer which will back all the memory * needed by the final metadata_t object */ - rd_tmpabuf_new(&tbuf, - sizeof(*md) + - (sizeof(*md->topics) * topic_cnt) + - topic_names_size + - (64/*topic name size..*/ * topic_cnt) + - (sizeof(*md->topics[0].partitions) * - total_partition_cnt), - 1/*assert on fail*/); + rd_tmpabuf_new( + &tbuf, + sizeof(*md) + (sizeof(*md->topics) * topic_cnt) + topic_names_size + + (64 /*topic name size..*/ * topic_cnt) + + (sizeof(*md->topics[0].partitions) * total_partition_cnt), + 1 /*assert on fail*/); md = rd_tmpabuf_alloc(&tbuf, sizeof(*md)); memset(md, 0, sizeof(*md)); md->topic_cnt = (int)topic_cnt; - md->topics = rd_tmpabuf_alloc(&tbuf, - md->topic_cnt * sizeof(*md->topics)); + md->topics = + rd_tmpabuf_alloc(&tbuf, md->topic_cnt * sizeof(*md->topics)); - for (i = 0 ; i < (size_t)md->topic_cnt ; i++) { + for (i = 0; i < (size_t)md->topic_cnt; i++) { int j; - md->topics[i].topic = rd_tmpabuf_write_str( - &tbuf, topics[i].topic); + md->topics[i].topic = + rd_tmpabuf_write_str(&tbuf, topics[i].topic); md->topics[i].partition_cnt = topics[i].partition_cnt; - md->topics[i].err = RD_KAFKA_RESP_ERR_NO_ERROR; + md->topics[i].err = RD_KAFKA_RESP_ERR_NO_ERROR; - md->topics[i].partitions = - rd_tmpabuf_alloc(&tbuf, - md->topics[i].partition_cnt * - sizeof(*md->topics[i].partitions)); + md->topics[i].partitions = rd_tmpabuf_alloc( + &tbuf, md->topics[i].partition_cnt * + sizeof(*md->topics[i].partitions)); - for (j = 0 ; j < md->topics[i].partition_cnt ; j++) { + for (j = 0; j < md->topics[i].partition_cnt; j++) { memset(&md->topics[i].partitions[j], 0, sizeof(md->topics[i].partitions[j])); md->topics[i].partitions[j].id = j; @@ -1368,7 +1342,7 @@ rd_kafka_metadata_new_topic_mock (const rd_kafka_metadata_topic_t *topics, * * @sa rd_kafka_metadata_new_topic_mock() */ -rd_kafka_metadata_t *rd_kafka_metadata_new_topic_mockv (size_t topic_cnt, ...) { +rd_kafka_metadata_t *rd_kafka_metadata_new_topic_mockv(size_t topic_cnt, ...) { rd_kafka_metadata_topic_t *topics; va_list ap; size_t i; @@ -1376,8 +1350,8 @@ rd_kafka_metadata_t *rd_kafka_metadata_new_topic_mockv (size_t topic_cnt, ...) { topics = rd_alloca(sizeof(*topics) * topic_cnt); va_start(ap, topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) { - topics[i].topic = va_arg(ap, char *); + for (i = 0; i < topic_cnt; i++) { + topics[i].topic = va_arg(ap, char *); topics[i].partition_cnt = va_arg(ap, int); } va_end(ap); diff --git a/src/rdkafka_metadata.h b/src/rdkafka_metadata.h index 8dad539986..b77bc19ed7 100644 --- a/src/rdkafka_metadata.h +++ b/src/rdkafka_metadata.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2015, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -31,64 +31,72 @@ #include "rdavl.h" -rd_kafka_resp_err_t -rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *request, rd_kafka_buf_t *rkbuf, - struct rd_kafka_metadata **mdp); +rd_kafka_resp_err_t rd_kafka_parse_Metadata(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *request, + rd_kafka_buf_t *rkbuf, + struct rd_kafka_metadata **mdp); struct rd_kafka_metadata * -rd_kafka_metadata_copy (const struct rd_kafka_metadata *md, size_t size); +rd_kafka_metadata_copy(const struct rd_kafka_metadata *md, size_t size); size_t -rd_kafka_metadata_topic_match (rd_kafka_t *rk, rd_list_t *tinfos, +rd_kafka_metadata_topic_match(rd_kafka_t *rk, + rd_list_t *tinfos, + const rd_kafka_topic_partition_list_t *match, + rd_kafka_topic_partition_list_t *errored); +size_t +rd_kafka_metadata_topic_filter(rd_kafka_t *rk, + rd_list_t *tinfos, const rd_kafka_topic_partition_list_t *match, rd_kafka_topic_partition_list_t *errored); -size_t -rd_kafka_metadata_topic_filter (rd_kafka_t *rk, rd_list_t *tinfos, - const rd_kafka_topic_partition_list_t *match, - rd_kafka_topic_partition_list_t *errored); -void rd_kafka_metadata_log (rd_kafka_t *rk, const char *fac, - const struct rd_kafka_metadata *md); +void rd_kafka_metadata_log(rd_kafka_t *rk, + const char *fac, + const struct rd_kafka_metadata *md); rd_kafka_resp_err_t -rd_kafka_metadata_refresh_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const rd_list_t *topics, rd_bool_t force, - rd_bool_t allow_auto_create, - rd_bool_t cgrp_update, - const char *reason); -rd_kafka_resp_err_t -rd_kafka_metadata_refresh_known_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - rd_bool_t force, const char *reason); -rd_kafka_resp_err_t -rd_kafka_metadata_refresh_consumer_topics (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - const char *reason); +rd_kafka_metadata_refresh_topics(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const rd_list_t *topics, + rd_bool_t force, + rd_bool_t allow_auto_create, + rd_bool_t cgrp_update, + const char *reason); rd_kafka_resp_err_t -rd_kafka_metadata_refresh_brokers (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const char *reason); +rd_kafka_metadata_refresh_known_topics(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_bool_t force, + const char *reason); rd_kafka_resp_err_t -rd_kafka_metadata_refresh_all (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const char *reason); +rd_kafka_metadata_refresh_consumer_topics(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *reason); +rd_kafka_resp_err_t rd_kafka_metadata_refresh_brokers(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *reason); +rd_kafka_resp_err_t rd_kafka_metadata_refresh_all(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *reason); rd_kafka_resp_err_t -rd_kafka_metadata_request (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const rd_list_t *topics, - rd_bool_t allow_auto_create_topics, - rd_bool_t cgrp_update, - const char *reason, rd_kafka_op_t *rko); +rd_kafka_metadata_request(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const rd_list_t *topics, + rd_bool_t allow_auto_create_topics, + rd_bool_t cgrp_update, + const char *reason, + rd_kafka_op_t *rko); -int rd_kafka_metadata_partition_id_cmp (const void *_a, - const void *_b); +int rd_kafka_metadata_partition_id_cmp(const void *_a, const void *_b); rd_kafka_metadata_t * -rd_kafka_metadata_new_topic_mock (const rd_kafka_metadata_topic_t *topics, - size_t topic_cnt); -rd_kafka_metadata_t *rd_kafka_metadata_new_topic_mockv (size_t topic_cnt, ...); +rd_kafka_metadata_new_topic_mock(const rd_kafka_metadata_topic_t *topics, + size_t topic_cnt); +rd_kafka_metadata_t *rd_kafka_metadata_new_topic_mockv(size_t topic_cnt, ...); /** @@ -98,102 +106,102 @@ rd_kafka_metadata_t *rd_kafka_metadata_new_topic_mockv (size_t topic_cnt, ...); */ struct rd_kafka_metadata_cache_entry { - rd_avl_node_t rkmce_avlnode; /* rkmc_avl */ + rd_avl_node_t rkmce_avlnode; /* rkmc_avl */ TAILQ_ENTRY(rd_kafka_metadata_cache_entry) rkmce_link; /* rkmc_expiry */ - rd_ts_t rkmce_ts_expires; /* Expire time */ - rd_ts_t rkmce_ts_insert; /* Insert time */ - rd_kafka_metadata_topic_t rkmce_mtopic; /* Cached topic metadata */ + rd_ts_t rkmce_ts_expires; /* Expire time */ + rd_ts_t rkmce_ts_insert; /* Insert time */ + rd_kafka_metadata_topic_t rkmce_mtopic; /* Cached topic metadata */ /* rkmce_partitions memory points here. */ }; -#define RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY(ERR) \ - ((ERR) == RD_KAFKA_RESP_ERR__WAIT_CACHE || \ +#define RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY(ERR) \ + ((ERR) == RD_KAFKA_RESP_ERR__WAIT_CACHE || \ (ERR) == RD_KAFKA_RESP_ERR__NOENT) -#define RD_KAFKA_METADATA_CACHE_VALID(rkmce) \ +#define RD_KAFKA_METADATA_CACHE_VALID(rkmce) \ !RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY((rkmce)->rkmce_mtopic.err) struct rd_kafka_metadata_cache { - rd_avl_t rkmc_avl; + rd_avl_t rkmc_avl; TAILQ_HEAD(, rd_kafka_metadata_cache_entry) rkmc_expiry; rd_kafka_timer_t rkmc_expiry_tmr; - int rkmc_cnt; + int rkmc_cnt; /* Protected by rk_lock */ - rd_list_t rkmc_observers; /**< (rd_kafka_enq_once_t*) */ + rd_list_t rkmc_observers; /**< (rd_kafka_enq_once_t*) */ /* Protected by full_lock: */ - mtx_t rkmc_full_lock; - int rkmc_full_topics_sent; /* Full MetadataRequest for - * all topics has been sent, - * awaiting response. */ - int rkmc_full_brokers_sent; /* Full MetadataRequest for - * all brokers (but not topics) - * has been sent, - * awaiting response. */ + mtx_t rkmc_full_lock; + int rkmc_full_topics_sent; /* Full MetadataRequest for + * all topics has been sent, + * awaiting response. */ + int rkmc_full_brokers_sent; /* Full MetadataRequest for + * all brokers (but not topics) + * has been sent, + * awaiting response. */ rd_kafka_timer_t rkmc_query_tmr; /* Query timer for topic's without * leaders. */ - cnd_t rkmc_cnd; /* cache_wait_change() cond. */ - mtx_t rkmc_cnd_lock; /* lock for rkmc_cnd */ + cnd_t rkmc_cnd; /* cache_wait_change() cond. */ + mtx_t rkmc_cnd_lock; /* lock for rkmc_cnd */ }; -void rd_kafka_metadata_cache_expiry_start (rd_kafka_t *rk); -void -rd_kafka_metadata_cache_topic_update (rd_kafka_t *rk, - const rd_kafka_metadata_topic_t *mdt, - rd_bool_t propagate); -void rd_kafka_metadata_cache_update (rd_kafka_t *rk, - const rd_kafka_metadata_t *md, - int abs_update); -void rd_kafka_metadata_cache_propagate_changes (rd_kafka_t *rk); +void rd_kafka_metadata_cache_expiry_start(rd_kafka_t *rk); +void rd_kafka_metadata_cache_topic_update(rd_kafka_t *rk, + const rd_kafka_metadata_topic_t *mdt, + rd_bool_t propagate); +void rd_kafka_metadata_cache_update(rd_kafka_t *rk, + const rd_kafka_metadata_t *md, + int abs_update); +void rd_kafka_metadata_cache_propagate_changes(rd_kafka_t *rk); struct rd_kafka_metadata_cache_entry * -rd_kafka_metadata_cache_find (rd_kafka_t *rk, const char *topic, int valid); -void rd_kafka_metadata_cache_purge_hints (rd_kafka_t *rk, - const rd_list_t *topics); -int rd_kafka_metadata_cache_hint (rd_kafka_t *rk, - const rd_list_t *topics, rd_list_t *dst, - rd_kafka_resp_err_t err, - rd_bool_t replace); - -int rd_kafka_metadata_cache_hint_rktparlist ( - rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *dst, - int replace); +rd_kafka_metadata_cache_find(rd_kafka_t *rk, const char *topic, int valid); +void rd_kafka_metadata_cache_purge_hints(rd_kafka_t *rk, + const rd_list_t *topics); +int rd_kafka_metadata_cache_hint(rd_kafka_t *rk, + const rd_list_t *topics, + rd_list_t *dst, + rd_kafka_resp_err_t err, + rd_bool_t replace); + +int rd_kafka_metadata_cache_hint_rktparlist( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *dst, + int replace); const rd_kafka_metadata_topic_t * -rd_kafka_metadata_cache_topic_get (rd_kafka_t *rk, const char *topic, - int valid); -int rd_kafka_metadata_cache_topic_partition_get ( - rd_kafka_t *rk, - const rd_kafka_metadata_topic_t **mtopicp, - const rd_kafka_metadata_partition_t **mpartp, - const char *topic, int32_t partition, int valid); - -int rd_kafka_metadata_cache_topics_count_exists (rd_kafka_t *rk, - const rd_list_t *topics, - int *metadata_agep); - -void rd_kafka_metadata_fast_leader_query (rd_kafka_t *rk); - -void rd_kafka_metadata_cache_init (rd_kafka_t *rk); -void rd_kafka_metadata_cache_destroy (rd_kafka_t *rk); -void rd_kafka_metadata_cache_purge (rd_kafka_t *rk, rd_bool_t purge_observers); -int rd_kafka_metadata_cache_wait_change (rd_kafka_t *rk, int timeout_ms); -void rd_kafka_metadata_cache_dump (FILE *fp, rd_kafka_t *rk); - -int rd_kafka_metadata_cache_topics_to_list (rd_kafka_t *rk, - rd_list_t *topics); - -void -rd_kafka_metadata_cache_wait_state_change_async (rd_kafka_t *rk, - rd_kafka_enq_once_t *eonce); +rd_kafka_metadata_cache_topic_get(rd_kafka_t *rk, const char *topic, int valid); +int rd_kafka_metadata_cache_topic_partition_get( + rd_kafka_t *rk, + const rd_kafka_metadata_topic_t **mtopicp, + const rd_kafka_metadata_partition_t **mpartp, + const char *topic, + int32_t partition, + int valid); + +int rd_kafka_metadata_cache_topics_count_exists(rd_kafka_t *rk, + const rd_list_t *topics, + int *metadata_agep); + +void rd_kafka_metadata_fast_leader_query(rd_kafka_t *rk); + +void rd_kafka_metadata_cache_init(rd_kafka_t *rk); +void rd_kafka_metadata_cache_destroy(rd_kafka_t *rk); +void rd_kafka_metadata_cache_purge(rd_kafka_t *rk, rd_bool_t purge_observers); +int rd_kafka_metadata_cache_wait_change(rd_kafka_t *rk, int timeout_ms); +void rd_kafka_metadata_cache_dump(FILE *fp, rd_kafka_t *rk); + +int rd_kafka_metadata_cache_topics_to_list(rd_kafka_t *rk, rd_list_t *topics); + +void rd_kafka_metadata_cache_wait_state_change_async( + rd_kafka_t *rk, + rd_kafka_enq_once_t *eonce); /**@}*/ #endif /* _RDKAFKA_METADATA_H_ */ diff --git a/src/rdkafka_metadata_cache.c b/src/rdkafka_metadata_cache.c index a08a5abc8a..822d0cb2f1 100644 --- a/src/rdkafka_metadata_cache.c +++ b/src/rdkafka_metadata_cache.c @@ -76,9 +76,9 @@ * @locks rd_kafka_wrlock() */ static RD_INLINE void -rd_kafka_metadata_cache_delete (rd_kafka_t *rk, - struct rd_kafka_metadata_cache_entry *rkmce, - int unlink_avl) { +rd_kafka_metadata_cache_delete(rd_kafka_t *rk, + struct rd_kafka_metadata_cache_entry *rkmce, + int unlink_avl) { if (unlink_avl) RD_AVL_REMOVE_ELM(&rk->rk_metadata_cache.rkmc_avl, rkmce); TAILQ_REMOVE(&rk->rk_metadata_cache.rkmc_expiry, rkmce, rkmce_link); @@ -93,8 +93,8 @@ rd_kafka_metadata_cache_delete (rd_kafka_t *rk, * @locks rd_kafka_wrlock() * @returns 1 if entry was found and removed, else 0. */ -static int rd_kafka_metadata_cache_delete_by_name (rd_kafka_t *rk, - const char *topic) { +static int rd_kafka_metadata_cache_delete_by_name(rd_kafka_t *rk, + const char *topic) { struct rd_kafka_metadata_cache_entry *rkmce; rkmce = rd_kafka_metadata_cache_find(rk, topic, 1); @@ -103,15 +103,15 @@ static int rd_kafka_metadata_cache_delete_by_name (rd_kafka_t *rk, return rkmce ? 1 : 0; } -static int rd_kafka_metadata_cache_evict (rd_kafka_t *rk); +static int rd_kafka_metadata_cache_evict(rd_kafka_t *rk); /** * @brief Cache eviction timer callback. * @locality rdkafka main thread * @locks NOT rd_kafka_*lock() */ -static void rd_kafka_metadata_cache_evict_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { +static void rd_kafka_metadata_cache_evict_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_t *rk = arg; rd_kafka_wrlock(rk); @@ -128,8 +128,8 @@ static void rd_kafka_metadata_cache_evict_tmr_cb (rd_kafka_timers_t *rkts, * * @locks rd_kafka_wrlock() */ -static int rd_kafka_metadata_cache_evict (rd_kafka_t *rk) { - int cnt = 0; +static int rd_kafka_metadata_cache_evict(rd_kafka_t *rk) { + int cnt = 0; rd_ts_t now = rd_clock(); struct rd_kafka_metadata_cache_entry *rkmce; @@ -143,8 +143,7 @@ static int rd_kafka_metadata_cache_evict (rd_kafka_t *rk) { rd_kafka_timer_start(&rk->rk_timers, &rk->rk_metadata_cache.rkmc_expiry_tmr, rkmce->rkmce_ts_expires - now, - rd_kafka_metadata_cache_evict_tmr_cb, - rk); + rd_kafka_metadata_cache_evict_tmr_cb, rk); else rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_metadata_cache.rkmc_expiry_tmr, 1); @@ -169,7 +168,7 @@ static int rd_kafka_metadata_cache_evict (rd_kafka_t *rk) { * @locks rd_kafka_*lock() */ struct rd_kafka_metadata_cache_entry * -rd_kafka_metadata_cache_find (rd_kafka_t *rk, const char *topic, int valid) { +rd_kafka_metadata_cache_find(rd_kafka_t *rk, const char *topic, int valid) { struct rd_kafka_metadata_cache_entry skel, *rkmce; skel.rkmce_mtopic.topic = (char *)topic; rkmce = RD_AVL_FIND(&rk->rk_metadata_cache.rkmc_avl, &skel); @@ -182,8 +181,7 @@ rd_kafka_metadata_cache_find (rd_kafka_t *rk, const char *topic, int valid) { /** * @brief Partition (id) comparator */ -int rd_kafka_metadata_partition_id_cmp (const void *_a, - const void *_b) { +int rd_kafka_metadata_partition_id_cmp(const void *_a, const void *_b) { const rd_kafka_metadata_partition_t *a = _a, *b = _b; return RD_CMP(a->id, b->id); } @@ -197,9 +195,10 @@ int rd_kafka_metadata_partition_id_cmp (const void *_a, * @locks_required rd_kafka_wrlock() */ static struct rd_kafka_metadata_cache_entry * -rd_kafka_metadata_cache_insert (rd_kafka_t *rk, - const rd_kafka_metadata_topic_t *mtopic, - rd_ts_t now, rd_ts_t ts_expires) { +rd_kafka_metadata_cache_insert(rd_kafka_t *rk, + const rd_kafka_metadata_topic_t *mtopic, + rd_ts_t now, + rd_ts_t ts_expires) { struct rd_kafka_metadata_cache_entry *rkmce, *old; size_t topic_len; rd_tmpabuf_t tbuf; @@ -214,10 +213,10 @@ rd_kafka_metadata_cache_insert (rd_kafka_t *rk, topic_len = strlen(mtopic->topic) + 1; rd_tmpabuf_new(&tbuf, RD_ROUNDUP(sizeof(*rkmce), 8) + - RD_ROUNDUP(topic_len, 8) + - (mtopic->partition_cnt * - RD_ROUNDUP(sizeof(*mtopic->partitions), 8)), - 1/*assert on fail*/); + RD_ROUNDUP(topic_len, 8) + + (mtopic->partition_cnt * + RD_ROUNDUP(sizeof(*mtopic->partitions), 8)), + 1 /*assert on fail*/); rkmce = rd_tmpabuf_alloc(&tbuf, sizeof(*rkmce)); @@ -227,30 +226,28 @@ rd_kafka_metadata_cache_insert (rd_kafka_t *rk, rkmce->rkmce_mtopic.topic = rd_tmpabuf_write_str(&tbuf, mtopic->topic); /* Copy partition array and update pointer */ - rkmce->rkmce_mtopic.partitions = - rd_tmpabuf_write(&tbuf, mtopic->partitions, - mtopic->partition_cnt * - sizeof(*mtopic->partitions)); + rkmce->rkmce_mtopic.partitions = rd_tmpabuf_write( + &tbuf, mtopic->partitions, + mtopic->partition_cnt * sizeof(*mtopic->partitions)); /* Clear uncached fields. */ - for (i = 0 ; i < mtopic->partition_cnt ; i++) { - rkmce->rkmce_mtopic.partitions[i].replicas = NULL; + for (i = 0; i < mtopic->partition_cnt; i++) { + rkmce->rkmce_mtopic.partitions[i].replicas = NULL; rkmce->rkmce_mtopic.partitions[i].replica_cnt = 0; - rkmce->rkmce_mtopic.partitions[i].isrs = NULL; - rkmce->rkmce_mtopic.partitions[i].isr_cnt = 0; + rkmce->rkmce_mtopic.partitions[i].isrs = NULL; + rkmce->rkmce_mtopic.partitions[i].isr_cnt = 0; } /* Sort partitions for future bsearch() lookups. */ - qsort(rkmce->rkmce_mtopic.partitions, - rkmce->rkmce_mtopic.partition_cnt, + qsort(rkmce->rkmce_mtopic.partitions, rkmce->rkmce_mtopic.partition_cnt, sizeof(*rkmce->rkmce_mtopic.partitions), rd_kafka_metadata_partition_id_cmp); - TAILQ_INSERT_TAIL(&rk->rk_metadata_cache.rkmc_expiry, - rkmce, rkmce_link); + TAILQ_INSERT_TAIL(&rk->rk_metadata_cache.rkmc_expiry, rkmce, + rkmce_link); rk->rk_metadata_cache.rkmc_cnt++; rkmce->rkmce_ts_expires = ts_expires; - rkmce->rkmce_ts_insert = now; + rkmce->rkmce_ts_insert = now; /* Insert (and replace existing) entry. */ old = RD_AVL_INSERT(&rk->rk_metadata_cache.rkmc_avl, rkmce, @@ -269,7 +266,7 @@ rd_kafka_metadata_cache_insert (rd_kafka_t *rk, * * @locks_required rd_kafka_wrlock() */ -void rd_kafka_metadata_cache_purge (rd_kafka_t *rk, rd_bool_t purge_observers) { +void rd_kafka_metadata_cache_purge(rd_kafka_t *rk, rd_bool_t purge_observers) { struct rd_kafka_metadata_cache_entry *rkmce; int was_empty = TAILQ_EMPTY(&rk->rk_metadata_cache.rkmc_expiry); @@ -293,15 +290,14 @@ void rd_kafka_metadata_cache_purge (rd_kafka_t *rk, rd_bool_t purge_observers) { * * @locks rd_kafka_wrlock() */ -void rd_kafka_metadata_cache_expiry_start (rd_kafka_t *rk) { +void rd_kafka_metadata_cache_expiry_start(rd_kafka_t *rk) { struct rd_kafka_metadata_cache_entry *rkmce; if ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry))) rd_kafka_timer_start(&rk->rk_timers, &rk->rk_metadata_cache.rkmc_expiry_tmr, rkmce->rkmce_ts_expires - rd_clock(), - rd_kafka_metadata_cache_evict_tmr_cb, - rk); + rd_kafka_metadata_cache_evict_tmr_cb, rk); } /** @@ -325,13 +321,12 @@ void rd_kafka_metadata_cache_expiry_start (rd_kafka_t *rk) { * * @locks rd_kafka_wrlock() */ -void -rd_kafka_metadata_cache_topic_update (rd_kafka_t *rk, - const rd_kafka_metadata_topic_t *mdt, - rd_bool_t propagate) { - rd_ts_t now = rd_clock(); +void rd_kafka_metadata_cache_topic_update(rd_kafka_t *rk, + const rd_kafka_metadata_topic_t *mdt, + rd_bool_t propagate) { + rd_ts_t now = rd_clock(); rd_ts_t ts_expires = now + (rk->rk_conf.metadata_max_age_ms * 1000); - int changed = 1; + int changed = 1; /* Cache unknown topics for a short while (100ms) to allow the cgrp * logic to find negative cache hits. */ @@ -343,8 +338,8 @@ rd_kafka_metadata_cache_topic_update (rd_kafka_t *rk, mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) rd_kafka_metadata_cache_insert(rk, mdt, now, ts_expires); else - changed = rd_kafka_metadata_cache_delete_by_name(rk, - mdt->topic); + changed = + rd_kafka_metadata_cache_delete_by_name(rk, mdt->topic); if (changed && propagate) rd_kafka_metadata_cache_propagate_changes(rk); @@ -358,24 +353,23 @@ rd_kafka_metadata_cache_topic_update (rd_kafka_t *rk, * * @locks rd_kafka_wrlock() */ -void rd_kafka_metadata_cache_update (rd_kafka_t *rk, - const rd_kafka_metadata_t *md, - int abs_update) { +void rd_kafka_metadata_cache_update(rd_kafka_t *rk, + const rd_kafka_metadata_t *md, + int abs_update) { struct rd_kafka_metadata_cache_entry *rkmce; - rd_ts_t now = rd_clock(); + rd_ts_t now = rd_clock(); rd_ts_t ts_expires = now + (rk->rk_conf.metadata_max_age_ms * 1000); int i; rd_kafka_dbg(rk, METADATA, "METADATA", "%s of metadata cache with %d topic(s)", - abs_update ? "Absolute update" : "Update", - md->topic_cnt); + abs_update ? "Absolute update" : "Update", md->topic_cnt); if (abs_update) - rd_kafka_metadata_cache_purge(rk, rd_false/*not observers*/); + rd_kafka_metadata_cache_purge(rk, rd_false /*not observers*/); - for (i = 0 ; i < md->topic_cnt ; i++) + for (i = 0; i < md->topic_cnt; i++) rd_kafka_metadata_cache_insert(rk, &md->topics[i], now, ts_expires); @@ -384,8 +378,7 @@ void rd_kafka_metadata_cache_update (rd_kafka_t *rk, rd_kafka_timer_start(&rk->rk_timers, &rk->rk_metadata_cache.rkmc_expiry_tmr, rkmce->rkmce_ts_expires - now, - rd_kafka_metadata_cache_evict_tmr_cb, - rk); + rd_kafka_metadata_cache_evict_tmr_cb, rk); if (md->topic_cnt > 0 || abs_update) rd_kafka_metadata_cache_propagate_changes(rk); @@ -400,8 +393,8 @@ void rd_kafka_metadata_cache_update (rd_kafka_t *rk, * * @locks rd_kafka_wrlock() */ -void rd_kafka_metadata_cache_purge_hints (rd_kafka_t *rk, - const rd_list_t *topics) { +void rd_kafka_metadata_cache_purge_hints(rd_kafka_t *rk, + const rd_list_t *topics) { const char *topic; int i; int cnt = 0; @@ -409,19 +402,19 @@ void rd_kafka_metadata_cache_purge_hints (rd_kafka_t *rk, RD_LIST_FOREACH(topic, topics, i) { struct rd_kafka_metadata_cache_entry *rkmce; - if (!(rkmce = rd_kafka_metadata_cache_find(rk, topic, - 0/*any*/)) || + if (!(rkmce = + rd_kafka_metadata_cache_find(rk, topic, 0 /*any*/)) || RD_KAFKA_METADATA_CACHE_VALID(rkmce)) continue; - rd_kafka_metadata_cache_delete(rk, rkmce, 1/*unlink avl*/); + rd_kafka_metadata_cache_delete(rk, rkmce, 1 /*unlink avl*/); cnt++; } if (cnt > 0) { rd_kafka_dbg(rk, METADATA, "METADATA", - "Purged %d/%d cached topic hint(s)", - cnt, rd_list_cnt(topics)); + "Purged %d/%d cached topic hint(s)", cnt, + rd_list_cnt(topics)); rd_kafka_metadata_cache_propagate_changes(rk); } } @@ -448,30 +441,28 @@ void rd_kafka_metadata_cache_purge_hints (rd_kafka_t *rk, * * @locks_required rd_kafka_wrlock() */ -int rd_kafka_metadata_cache_hint (rd_kafka_t *rk, - const rd_list_t *topics, rd_list_t *dst, - rd_kafka_resp_err_t err, - rd_bool_t replace) { +int rd_kafka_metadata_cache_hint(rd_kafka_t *rk, + const rd_list_t *topics, + rd_list_t *dst, + rd_kafka_resp_err_t err, + rd_bool_t replace) { const char *topic; - rd_ts_t now = rd_clock(); + rd_ts_t now = rd_clock(); rd_ts_t ts_expires = now + (rk->rk_conf.socket_timeout_ms * 1000); int i; int cnt = 0; RD_LIST_FOREACH(topic, topics, i) { - rd_kafka_metadata_topic_t mtopic = { - .topic = (char *)topic, - .err = err - }; + rd_kafka_metadata_topic_t mtopic = {.topic = (char *)topic, + .err = err}; /*const*/ struct rd_kafka_metadata_cache_entry *rkmce; /* !replace: Dont overwrite valid entries */ - if (!replace && - (rkmce = - rd_kafka_metadata_cache_find(rk, topic, 0/*any*/))) { + if (!replace && (rkmce = rd_kafka_metadata_cache_find( + rk, topic, 0 /*any*/))) { if (RD_KAFKA_METADATA_CACHE_VALID(rkmce) || (dst && rkmce->rkmce_mtopic.err != - RD_KAFKA_RESP_ERR__NOENT)) + RD_KAFKA_RESP_ERR__NOENT)) continue; rkmce->rkmce_mtopic.err = err; /* FALLTHRU */ @@ -482,7 +473,6 @@ int rd_kafka_metadata_cache_hint (rd_kafka_t *rk, if (dst) rd_list_add(dst, rd_strdup(topic)); - } if (cnt > 0) @@ -500,21 +490,20 @@ int rd_kafka_metadata_cache_hint (rd_kafka_t *rk, * * @locks_acquired rd_kafka_wrlock() */ -int rd_kafka_metadata_cache_hint_rktparlist ( - rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *dst, - int replace) { +int rd_kafka_metadata_cache_hint_rktparlist( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *dst, + int replace) { rd_list_t topics; int r; rd_list_init(&topics, rktparlist->cnt, rd_free); rd_kafka_topic_partition_list_get_topic_names(rktparlist, &topics, - 0/*dont include regex*/); + 0 /*dont include regex*/); rd_kafka_wrlock(rk); - r = rd_kafka_metadata_cache_hint(rk, &topics, dst, - RD_KAFKA_RESP_ERR__WAIT_CACHE, - replace); + r = rd_kafka_metadata_cache_hint( + rk, &topics, dst, RD_KAFKA_RESP_ERR__WAIT_CACHE, replace); rd_kafka_wrunlock(rk); rd_list_destroy(&topics); @@ -525,7 +514,7 @@ int rd_kafka_metadata_cache_hint_rktparlist ( /** * @brief Cache entry comparator (on topic name) */ -static int rd_kafka_metadata_cache_entry_cmp (const void *_a, const void *_b) { +static int rd_kafka_metadata_cache_entry_cmp(const void *_a, const void *_b) { const struct rd_kafka_metadata_cache_entry *a = _a, *b = _b; return strcmp(a->rkmce_mtopic.topic, b->rkmce_mtopic.topic); } @@ -536,7 +525,7 @@ static int rd_kafka_metadata_cache_entry_cmp (const void *_a, const void *_b) { * * @locks rd_kafka_wrlock() */ -void rd_kafka_metadata_cache_init (rd_kafka_t *rk) { +void rd_kafka_metadata_cache_init(rd_kafka_t *rk) { rd_avl_init(&rk->rk_metadata_cache.rkmc_avl, rd_kafka_metadata_cache_entry_cmp, 0); TAILQ_INIT(&rk->rk_metadata_cache.rkmc_expiry); @@ -552,11 +541,11 @@ void rd_kafka_metadata_cache_init (rd_kafka_t *rk) { * * @locks_required rd_kafka_wrlock() */ -void rd_kafka_metadata_cache_destroy (rd_kafka_t *rk) { +void rd_kafka_metadata_cache_destroy(rd_kafka_t *rk) { rd_list_destroy(&rk->rk_metadata_cache.rkmc_observers); rd_kafka_timer_stop(&rk->rk_timers, - &rk->rk_metadata_cache.rkmc_query_tmr, 1/*lock*/); - rd_kafka_metadata_cache_purge(rk, rd_true/*observers too*/); + &rk->rk_metadata_cache.rkmc_query_tmr, 1 /*lock*/); + rd_kafka_metadata_cache_purge(rk, rd_true /*observers too*/); mtx_destroy(&rk->rk_metadata_cache.rkmc_full_lock); mtx_destroy(&rk->rk_metadata_cache.rkmc_cnd_lock); cnd_destroy(&rk->rk_metadata_cache.rkmc_cnd); @@ -570,9 +559,9 @@ void rd_kafka_metadata_cache_destroy (rd_kafka_t *rk) { * * @locks_required rd_kafka_wrlock() */ -void -rd_kafka_metadata_cache_wait_state_change_async (rd_kafka_t *rk, - rd_kafka_enq_once_t *eonce) { +void rd_kafka_metadata_cache_wait_state_change_async( + rd_kafka_t *rk, + rd_kafka_enq_once_t *eonce) { rd_kafka_enq_once_add_source(eonce, "wait metadata cache change"); rd_list_add(&rk->rk_metadata_cache.rkmc_observers, eonce); } @@ -585,21 +574,19 @@ rd_kafka_metadata_cache_wait_state_change_async (rd_kafka_t *rk, * @locks none * @locality any */ -int rd_kafka_metadata_cache_wait_change (rd_kafka_t *rk, int timeout_ms) { +int rd_kafka_metadata_cache_wait_change(rd_kafka_t *rk, int timeout_ms) { int r; #if ENABLE_DEVEL rd_ts_t ts_start = rd_clock(); #endif mtx_lock(&rk->rk_metadata_cache.rkmc_cnd_lock); r = cnd_timedwait_ms(&rk->rk_metadata_cache.rkmc_cnd, - &rk->rk_metadata_cache.rkmc_cnd_lock, - timeout_ms); + &rk->rk_metadata_cache.rkmc_cnd_lock, timeout_ms); mtx_unlock(&rk->rk_metadata_cache.rkmc_cnd_lock); #if ENABLE_DEVEL - rd_kafka_dbg(rk, METADATA, "CACHEWAIT", - "%s wait took %dms: %s", - __FUNCTION__, (int)((rd_clock() - ts_start)/1000), + rd_kafka_dbg(rk, METADATA, "CACHEWAIT", "%s wait took %dms: %s", + __FUNCTION__, (int)((rd_clock() - ts_start) / 1000), r == thrd_success ? "succeeded" : "timed out"); #endif return r == thrd_success; @@ -611,8 +598,8 @@ int rd_kafka_metadata_cache_wait_change (rd_kafka_t *rk, int timeout_ms) { * rd_kafka_metadata_cache_propagate_changes() */ static int -rd_kafka_metadata_cache_propagate_changes_trigger_eonce (void *elem, - void *opaque) { +rd_kafka_metadata_cache_propagate_changes_trigger_eonce(void *elem, + void *opaque) { rd_kafka_enq_once_t *eonce = elem; rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR_NO_ERROR, "wait metadata cache change"); @@ -627,7 +614,7 @@ rd_kafka_metadata_cache_propagate_changes_trigger_eonce (void *elem, * @locks_acquired rkmc_cnd_lock * @locality any */ -void rd_kafka_metadata_cache_propagate_changes (rd_kafka_t *rk) { +void rd_kafka_metadata_cache_propagate_changes(rd_kafka_t *rk) { mtx_lock(&rk->rk_metadata_cache.rkmc_cnd_lock); cnd_broadcast(&rk->rk_metadata_cache.rkmc_cnd); mtx_unlock(&rk->rk_metadata_cache.rkmc_cnd_lock); @@ -636,7 +623,6 @@ void rd_kafka_metadata_cache_propagate_changes (rd_kafka_t *rk) { rd_list_apply(&rk->rk_metadata_cache.rkmc_observers, rd_kafka_metadata_cache_propagate_changes_trigger_eonce, NULL); - } /** @@ -646,8 +632,9 @@ void rd_kafka_metadata_cache_propagate_changes (rd_kafka_t *rk) { * @locks rd_kafka_*lock() */ const rd_kafka_metadata_topic_t * -rd_kafka_metadata_cache_topic_get (rd_kafka_t *rk, const char *topic, - int valid) { +rd_kafka_metadata_cache_topic_get(rd_kafka_t *rk, + const char *topic, + int valid) { struct rd_kafka_metadata_cache_entry *rkmce; if (!(rkmce = rd_kafka_metadata_cache_find(rk, topic, valid))) @@ -658,7 +645,6 @@ rd_kafka_metadata_cache_topic_get (rd_kafka_t *rk, const char *topic, - /** * @brief Looks up the shared metadata for a partition along with its topic. * @@ -674,18 +660,20 @@ rd_kafka_metadata_cache_topic_get (rd_kafka_t *rk, const char *topic, * * @locks rd_kafka_*lock() */ -int rd_kafka_metadata_cache_topic_partition_get ( - rd_kafka_t *rk, - const rd_kafka_metadata_topic_t **mtopicp, - const rd_kafka_metadata_partition_t **mpartp, - const char *topic, int32_t partition, int valid) { +int rd_kafka_metadata_cache_topic_partition_get( + rd_kafka_t *rk, + const rd_kafka_metadata_topic_t **mtopicp, + const rd_kafka_metadata_partition_t **mpartp, + const char *topic, + int32_t partition, + int valid) { const rd_kafka_metadata_topic_t *mtopic; const rd_kafka_metadata_partition_t *mpart; - rd_kafka_metadata_partition_t skel = { .id = partition }; + rd_kafka_metadata_partition_t skel = {.id = partition}; *mtopicp = NULL; - *mpartp = NULL; + *mpartp = NULL; if (!(mtopic = rd_kafka_metadata_cache_topic_get(rk, topic, valid))) return -1; @@ -696,8 +684,7 @@ int rd_kafka_metadata_cache_topic_partition_get ( return -1; /* Partitions array may be sparse so use bsearch lookup. */ - mpart = bsearch(&skel, mtopic->partitions, - mtopic->partition_cnt, + mpart = bsearch(&skel, mtopic->partitions, mtopic->partition_cnt, sizeof(*mtopic->partitions), rd_kafka_metadata_partition_id_cmp); @@ -718,12 +705,12 @@ int rd_kafka_metadata_cache_topic_partition_get ( * * @locks rd_kafka_*lock() */ -int rd_kafka_metadata_cache_topics_count_exists (rd_kafka_t *rk, - const rd_list_t *topics, - int *metadata_agep) { +int rd_kafka_metadata_cache_topics_count_exists(rd_kafka_t *rk, + const rd_list_t *topics, + int *metadata_agep) { const char *topic; int i; - int cnt = 0; + int cnt = 0; int max_age = -1; RD_LIST_FOREACH(topic, topics, i) { @@ -731,10 +718,10 @@ int rd_kafka_metadata_cache_topics_count_exists (rd_kafka_t *rk, int age; if (!(rkmce = rd_kafka_metadata_cache_find(rk, topic, - 1/*valid only*/))) + 1 /*valid only*/))) continue; - age = (int)((rd_clock() - rkmce->rkmce_ts_insert)/1000); + age = (int)((rd_clock() - rkmce->rkmce_ts_insert) / 1000); if (age > max_age) max_age = age; cnt++; @@ -743,7 +730,6 @@ int rd_kafka_metadata_cache_topics_count_exists (rd_kafka_t *rk, *metadata_agep = max_age; return cnt; - } @@ -756,8 +742,7 @@ int rd_kafka_metadata_cache_topics_count_exists (rd_kafka_t *rk, * * @locks_required rd_kafka_*lock() */ -int rd_kafka_metadata_cache_topics_to_list (rd_kafka_t *rk, - rd_list_t *topics) { +int rd_kafka_metadata_cache_topics_to_list(rd_kafka_t *rk, rd_list_t *topics) { const struct rd_kafka_metadata_cache_entry *rkmce; int precnt = rd_list_cnt(topics); @@ -782,26 +767,25 @@ int rd_kafka_metadata_cache_topics_to_list (rd_kafka_t *rk, * * @locks rd_kafka_*lock() */ -void rd_kafka_metadata_cache_dump (FILE *fp, rd_kafka_t *rk) { +void rd_kafka_metadata_cache_dump(FILE *fp, rd_kafka_t *rk) { const struct rd_kafka_metadata_cache *rkmc = &rk->rk_metadata_cache; const struct rd_kafka_metadata_cache_entry *rkmce; rd_ts_t now = rd_clock(); - fprintf(fp, - "Metadata cache with %d entries:\n", - rkmc->rkmc_cnt); + fprintf(fp, "Metadata cache with %d entries:\n", rkmc->rkmc_cnt); TAILQ_FOREACH(rkmce, &rkmc->rkmc_expiry, rkmce_link) { fprintf(fp, " %s (inserted %dms ago, expires in %dms, " "%d partition(s), %s)%s%s\n", rkmce->rkmce_mtopic.topic, - (int)((now - rkmce->rkmce_ts_insert)/1000), - (int)((rkmce->rkmce_ts_expires - now)/1000), + (int)((now - rkmce->rkmce_ts_insert) / 1000), + (int)((rkmce->rkmce_ts_expires - now) / 1000), rkmce->rkmce_mtopic.partition_cnt, - RD_KAFKA_METADATA_CACHE_VALID(rkmce) ? "valid":"hint", + RD_KAFKA_METADATA_CACHE_VALID(rkmce) ? "valid" : "hint", rkmce->rkmce_mtopic.err ? " error: " : "", - rkmce->rkmce_mtopic.err ? - rd_kafka_err2str(rkmce->rkmce_mtopic.err) : ""); + rkmce->rkmce_mtopic.err + ? rd_kafka_err2str(rkmce->rkmce_mtopic.err) + : ""); } } diff --git a/src/rdkafka_mock.c b/src/rdkafka_mock.c index 468de2cece..12c4b06781 100644 --- a/src/rdkafka_mock.c +++ b/src/rdkafka_mock.c @@ -40,30 +40,28 @@ #include -static void -rd_kafka_mock_cluster_destroy0 (rd_kafka_mock_cluster_t *mcluster); +static void rd_kafka_mock_cluster_destroy0(rd_kafka_mock_cluster_t *mcluster); static rd_kafka_mock_broker_t * -rd_kafka_mock_broker_find (const rd_kafka_mock_cluster_t *mcluster, - int32_t broker_id) { +rd_kafka_mock_broker_find(const rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id) { const rd_kafka_mock_broker_t *mrkb; TAILQ_FOREACH(mrkb, &mcluster->brokers, link) - if (mrkb->id == broker_id) - return (rd_kafka_mock_broker_t *)mrkb; + if (mrkb->id == broker_id) + return (rd_kafka_mock_broker_t *)mrkb; return NULL; } - /** * @brief Unlink and free message set. */ -static void rd_kafka_mock_msgset_destroy (rd_kafka_mock_partition_t *mpart, - rd_kafka_mock_msgset_t *mset) { +static void rd_kafka_mock_msgset_destroy(rd_kafka_mock_partition_t *mpart, + rd_kafka_mock_msgset_t *mset) { const rd_kafka_mock_msgset_t *next = TAILQ_NEXT(mset, link); /* Removing last messageset */ @@ -89,8 +87,9 @@ static void rd_kafka_mock_msgset_destroy (rd_kafka_mock_partition_t *mpart, * and appends it to the partition log. */ static rd_kafka_mock_msgset_t * -rd_kafka_mock_msgset_new (rd_kafka_mock_partition_t *mpart, - const rd_kafkap_bytes_t *bytes, size_t msgcnt) { +rd_kafka_mock_msgset_new(rd_kafka_mock_partition_t *mpart, + const rd_kafkap_bytes_t *bytes, + size_t msgcnt) { rd_kafka_mock_msgset_t *mset; size_t totsize = sizeof(*mset) + RD_KAFKAP_BYTES_LEN(bytes); int64_t BaseOffset; @@ -102,8 +101,8 @@ rd_kafka_mock_msgset_new (rd_kafka_mock_partition_t *mpart, rd_assert(mset != NULL); mset->first_offset = mpart->end_offset; - mset->last_offset = mset->first_offset + msgcnt - 1; - mpart->end_offset = mset->last_offset + 1; + mset->last_offset = mset->first_offset + msgcnt - 1; + mpart->end_offset = mset->last_offset + 1; if (mpart->update_follower_end_offset) mpart->follower_end_offset = mpart->end_offset; mpart->cnt++; @@ -111,7 +110,7 @@ rd_kafka_mock_msgset_new (rd_kafka_mock_partition_t *mpart, mset->bytes.len = bytes->len; - mset->bytes.data = (void *)(mset+1); + mset->bytes.data = (void *)(mset + 1); memcpy((void *)mset->bytes.data, bytes->data, mset->bytes.len); mpart->size += mset->bytes.len; @@ -123,23 +122,23 @@ rd_kafka_mock_msgset_new (rd_kafka_mock_partition_t *mpart, /* Remove old msgsets until within limits */ while (mpart->cnt > 1 && - (mpart->cnt > mpart->max_cnt || - mpart->size > mpart->max_size)) + (mpart->cnt > mpart->max_cnt || mpart->size > mpart->max_size)) rd_kafka_mock_msgset_destroy(mpart, TAILQ_FIRST(&mpart->msgsets)); TAILQ_INSERT_TAIL(&mpart->msgsets, mset, link); rd_kafka_dbg(mpart->topic->cluster->rk, MOCK, "MOCK", - "Broker %"PRId32": Log append %s [%"PRId32"] " - "%"PRIusz" messages, %"PRId32" bytes at offset %"PRId64 - " (log now %"PRId64"..%"PRId64", " - "original start %"PRId64")", - mpart->leader->id, mpart->topic->name, mpart->id, - msgcnt, RD_KAFKAP_BYTES_LEN(&mset->bytes), - mset->first_offset, - mpart->start_offset, mpart->end_offset, - orig_start_offset); + "Broker %" PRId32 ": Log append %s [%" PRId32 + "] " + "%" PRIusz " messages, %" PRId32 + " bytes at offset %" PRId64 " (log now %" PRId64 + "..%" PRId64 + ", " + "original start %" PRId64 ")", + mpart->leader->id, mpart->topic->name, mpart->id, msgcnt, + RD_KAFKAP_BYTES_LEN(&mset->bytes), mset->first_offset, + mpart->start_offset, mpart->end_offset, orig_start_offset); return mset; } @@ -148,25 +147,23 @@ rd_kafka_mock_msgset_new (rd_kafka_mock_partition_t *mpart, * @brief Find message set containing \p offset */ const rd_kafka_mock_msgset_t * -rd_kafka_mock_msgset_find (const rd_kafka_mock_partition_t *mpart, - int64_t offset, rd_bool_t on_follower) { +rd_kafka_mock_msgset_find(const rd_kafka_mock_partition_t *mpart, + int64_t offset, + rd_bool_t on_follower) { const rd_kafka_mock_msgset_t *mset; if (!on_follower && - (offset < mpart->start_offset || - offset > mpart->end_offset)) + (offset < mpart->start_offset || offset > mpart->end_offset)) return NULL; - if (on_follower && - (offset < mpart->follower_start_offset || - offset > mpart->follower_end_offset)) + if (on_follower && (offset < mpart->follower_start_offset || + offset > mpart->follower_end_offset)) return NULL; /* FIXME: Maintain an index */ TAILQ_FOREACH(mset, &mpart->msgsets, link) { - if (mset->first_offset <= offset && - offset <= mset->last_offset) + if (mset->first_offset <= offset && offset <= mset->last_offset) return mset; } @@ -180,9 +177,9 @@ rd_kafka_mock_msgset_find (const rd_kafka_mock_partition_t *mpart, * @param BaseOffset will contain the first assigned offset of the message set. */ rd_kafka_resp_err_t -rd_kafka_mock_partition_log_append (rd_kafka_mock_partition_t *mpart, - const rd_kafkap_bytes_t *bytes, - int64_t *BaseOffset) { +rd_kafka_mock_partition_log_append(rd_kafka_mock_partition_t *mpart, + const rd_kafkap_bytes_t *bytes, + int64_t *BaseOffset) { const int log_decode_errors = LOG_ERR; rd_kafka_buf_t *rkbuf; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; @@ -192,10 +189,10 @@ rd_kafka_mock_partition_log_append (rd_kafka_mock_partition_t *mpart, /* Partially parse the MessageSet in \p bytes to get * the message count. */ - rkbuf = rd_kafka_buf_new_shadow(bytes->data, - RD_KAFKAP_BYTES_LEN(bytes), NULL); + rkbuf = rd_kafka_buf_new_shadow(bytes->data, RD_KAFKAP_BYTES_LEN(bytes), + NULL); - rd_kafka_buf_peek_i8(rkbuf, 8+4+4, &MagicByte); + rd_kafka_buf_peek_i8(rkbuf, 8 + 4 + 4, &MagicByte); if (MagicByte != 2) { /* We only support MsgVersion 2 for now */ err = RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION; @@ -206,8 +203,8 @@ rd_kafka_mock_partition_log_append (rd_kafka_mock_partition_t *mpart, &RecordCount); if (RecordCount < 1 || - (size_t)RecordCount > - RD_KAFKAP_BYTES_LEN(bytes) / RD_KAFKAP_MESSAGE_V2_MIN_OVERHEAD) { + (size_t)RecordCount > RD_KAFKAP_BYTES_LEN(bytes) / + RD_KAFKAP_MESSAGE_V2_MIN_OVERHEAD) { err = RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE; goto err; } @@ -220,9 +217,9 @@ rd_kafka_mock_partition_log_append (rd_kafka_mock_partition_t *mpart, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: +err: rd_kafka_buf_destroy(rkbuf); return err; } @@ -232,8 +229,8 @@ rd_kafka_mock_partition_log_append (rd_kafka_mock_partition_t *mpart, * @brief Set the partition leader, or NULL for leader-less. */ static void -rd_kafka_mock_partition_set_leader0 (rd_kafka_mock_partition_t *mpart, - rd_kafka_mock_broker_t *mrkb) { +rd_kafka_mock_partition_set_leader0(rd_kafka_mock_partition_t *mpart, + rd_kafka_mock_broker_t *mrkb) { mpart->leader = mrkb; } @@ -242,17 +239,17 @@ rd_kafka_mock_partition_set_leader0 (rd_kafka_mock_partition_t *mpart, * @brief Automatically assign replicas for partition */ static void -rd_kafka_mock_partition_assign_replicas (rd_kafka_mock_partition_t *mpart) { +rd_kafka_mock_partition_assign_replicas(rd_kafka_mock_partition_t *mpart) { rd_kafka_mock_cluster_t *mcluster = mpart->topic->cluster; - int replica_cnt = RD_MIN(mcluster->defaults.replication_factor, - mcluster->broker_cnt); + int replica_cnt = + RD_MIN(mcluster->defaults.replication_factor, mcluster->broker_cnt); rd_kafka_mock_broker_t *mrkb; int i = 0; if (mpart->replicas) rd_free(mpart->replicas); - mpart->replicas = rd_calloc(replica_cnt, sizeof(*mpart->replicas)); + mpart->replicas = rd_calloc(replica_cnt, sizeof(*mpart->replicas)); mpart->replica_cnt = replica_cnt; /* FIXME: randomize this using perhaps reservoir sampling */ @@ -264,7 +261,7 @@ rd_kafka_mock_partition_assign_replicas (rd_kafka_mock_partition_t *mpart) { /* Select a random leader */ rd_kafka_mock_partition_set_leader0( - mpart, mpart->replicas[rd_jitter(0, replica_cnt-1)]); + mpart, mpart->replicas[rd_jitter(0, replica_cnt - 1)]); } @@ -273,8 +270,8 @@ rd_kafka_mock_partition_assign_replicas (rd_kafka_mock_partition_t *mpart) { * @brief Unlink and destroy committed offset */ static void -rd_kafka_mock_committed_offset_destroy (rd_kafka_mock_partition_t *mpart, - rd_kafka_mock_committed_offset_t *coff){ +rd_kafka_mock_committed_offset_destroy(rd_kafka_mock_partition_t *mpart, + rd_kafka_mock_committed_offset_t *coff) { rd_kafkap_str_destroy(coff->metadata); TAILQ_REMOVE(&mpart->committed_offsets, coff, link); rd_free(coff); @@ -285,8 +282,8 @@ rd_kafka_mock_committed_offset_destroy (rd_kafka_mock_partition_t *mpart, * @brief Find previously committed offset for group. */ rd_kafka_mock_committed_offset_t * -rd_kafka_mock_committed_offset_find (const rd_kafka_mock_partition_t *mpart, - const rd_kafkap_str_t *group) { +rd_kafka_mock_committed_offset_find(const rd_kafka_mock_partition_t *mpart, + const rd_kafkap_str_t *group) { const rd_kafka_mock_committed_offset_t *coff; TAILQ_FOREACH(coff, &mpart->committed_offsets, link) { @@ -302,9 +299,10 @@ rd_kafka_mock_committed_offset_find (const rd_kafka_mock_partition_t *mpart, * @brief Commit offset for group */ rd_kafka_mock_committed_offset_t * -rd_kafka_mock_commit_offset (rd_kafka_mock_partition_t *mpart, - const rd_kafkap_str_t *group, int64_t offset, - const rd_kafkap_str_t *metadata) { +rd_kafka_mock_commit_offset(rd_kafka_mock_partition_t *mpart, + const rd_kafkap_str_t *group, + int64_t offset, + const rd_kafkap_str_t *metadata) { rd_kafka_mock_committed_offset_t *coff; if (!(coff = rd_kafka_mock_committed_offset_find(mpart, group))) { @@ -329,7 +327,7 @@ rd_kafka_mock_commit_offset (rd_kafka_mock_partition_t *mpart, coff->offset = offset; rd_kafka_dbg(mpart->topic->cluster->rk, MOCK, "MOCK", - "Topic %s [%"PRId32"] committing offset %"PRId64 + "Topic %s [%" PRId32 "] committing offset %" PRId64 " for group %.*s", mpart->topic->name, mpart->id, offset, RD_KAFKAP_STR_PR(group)); @@ -340,35 +338,36 @@ rd_kafka_mock_commit_offset (rd_kafka_mock_partition_t *mpart, /** * @brief Destroy resources for partition, but the \p mpart itself is not freed. */ -static void rd_kafka_mock_partition_destroy (rd_kafka_mock_partition_t *mpart) { +static void rd_kafka_mock_partition_destroy(rd_kafka_mock_partition_t *mpart) { rd_kafka_mock_msgset_t *mset, *tmp; rd_kafka_mock_committed_offset_t *coff, *tmpcoff; TAILQ_FOREACH_SAFE(mset, &mpart->msgsets, link, tmp) - rd_kafka_mock_msgset_destroy(mpart, mset); + rd_kafka_mock_msgset_destroy(mpart, mset); TAILQ_FOREACH_SAFE(coff, &mpart->committed_offsets, link, tmpcoff) - rd_kafka_mock_committed_offset_destroy(mpart, coff); + rd_kafka_mock_committed_offset_destroy(mpart, coff); rd_free(mpart->replicas); } -static void rd_kafka_mock_partition_init (rd_kafka_mock_topic_t *mtopic, - rd_kafka_mock_partition_t *mpart, - int id, int replication_factor) { +static void rd_kafka_mock_partition_init(rd_kafka_mock_topic_t *mtopic, + rd_kafka_mock_partition_t *mpart, + int id, + int replication_factor) { mpart->topic = mtopic; - mpart->id = id; + mpart->id = id; mpart->follower_id = -1; TAILQ_INIT(&mpart->msgsets); - mpart->max_size = 1024*1024*5; - mpart->max_cnt = 100000; + mpart->max_size = 1024 * 1024 * 5; + mpart->max_cnt = 100000; mpart->update_follower_start_offset = rd_true; - mpart->update_follower_end_offset = rd_true; + mpart->update_follower_end_offset = rd_true; TAILQ_INIT(&mpart->committed_offsets); @@ -376,8 +375,8 @@ static void rd_kafka_mock_partition_init (rd_kafka_mock_topic_t *mtopic, } rd_kafka_mock_partition_t * -rd_kafka_mock_partition_find (const rd_kafka_mock_topic_t *mtopic, - int32_t partition) { +rd_kafka_mock_partition_find(const rd_kafka_mock_topic_t *mtopic, + int32_t partition) { if (partition < 0 || partition >= mtopic->partition_cnt) return NULL; @@ -385,10 +384,10 @@ rd_kafka_mock_partition_find (const rd_kafka_mock_topic_t *mtopic, } -static void rd_kafka_mock_topic_destroy (rd_kafka_mock_topic_t *mtopic) { +static void rd_kafka_mock_topic_destroy(rd_kafka_mock_topic_t *mtopic) { int i; - for (i = 0 ; i < mtopic->partition_cnt ; i++) + for (i = 0; i < mtopic->partition_cnt; i++) rd_kafka_mock_partition_destroy(&mtopic->partitions[i]); TAILQ_REMOVE(&mtopic->cluster->topics, mtopic, link); @@ -401,22 +400,24 @@ static void rd_kafka_mock_topic_destroy (rd_kafka_mock_topic_t *mtopic) { static rd_kafka_mock_topic_t * -rd_kafka_mock_topic_new (rd_kafka_mock_cluster_t *mcluster, const char *topic, - int partition_cnt, int replication_factor) { +rd_kafka_mock_topic_new(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition_cnt, + int replication_factor) { rd_kafka_mock_topic_t *mtopic; int i; - mtopic = rd_calloc(1, sizeof(*mtopic)); - mtopic->name = rd_strdup(topic); + mtopic = rd_calloc(1, sizeof(*mtopic)); + mtopic->name = rd_strdup(topic); mtopic->cluster = mcluster; mtopic->partition_cnt = partition_cnt; - mtopic->partitions = rd_calloc(partition_cnt, - sizeof(*mtopic->partitions)); + mtopic->partitions = + rd_calloc(partition_cnt, sizeof(*mtopic->partitions)); - for (i = 0 ; i < partition_cnt ; i++) - rd_kafka_mock_partition_init(mtopic, &mtopic->partitions[i], - i, replication_factor); + for (i = 0; i < partition_cnt; i++) + rd_kafka_mock_partition_init(mtopic, &mtopic->partitions[i], i, + replication_factor); TAILQ_INSERT_TAIL(&mcluster->topics, mtopic, link); mcluster->topic_cnt++; @@ -431,8 +432,8 @@ rd_kafka_mock_topic_new (rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_mock_topic_t * -rd_kafka_mock_topic_find (const rd_kafka_mock_cluster_t *mcluster, - const char *name) { +rd_kafka_mock_topic_find(const rd_kafka_mock_cluster_t *mcluster, + const char *name) { const rd_kafka_mock_topic_t *mtopic; TAILQ_FOREACH(mtopic, &mcluster->topics, link) { @@ -445,8 +446,8 @@ rd_kafka_mock_topic_find (const rd_kafka_mock_cluster_t *mcluster, rd_kafka_mock_topic_t * -rd_kafka_mock_topic_find_by_kstr (const rd_kafka_mock_cluster_t *mcluster, - const rd_kafkap_str_t *kname) { +rd_kafka_mock_topic_find_by_kstr(const rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *kname) { const rd_kafka_mock_topic_t *mtopic; TAILQ_FOREACH(mtopic, &mcluster->topics, link) { @@ -468,15 +469,16 @@ rd_kafka_mock_topic_find_by_kstr (const rd_kafka_mock_cluster_t *mcluster, * new topics on real clusters. */ rd_kafka_mock_topic_t * -rd_kafka_mock_topic_auto_create (rd_kafka_mock_cluster_t *mcluster, - const char *topic, int partition_cnt, - rd_kafka_resp_err_t *errp) { +rd_kafka_mock_topic_auto_create(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition_cnt, + rd_kafka_resp_err_t *errp) { rd_assert(!rd_kafka_mock_topic_find(mcluster, topic)); - *errp = 0; // FIXME? RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE; + *errp = 0; // FIXME? RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE; return rd_kafka_mock_topic_new(mcluster, topic, - partition_cnt == -1 ? - mcluster->defaults.partition_cnt : - partition_cnt, + partition_cnt == -1 + ? mcluster->defaults.partition_cnt + : partition_cnt, mcluster->defaults.replication_factor); } @@ -489,16 +491,17 @@ rd_kafka_mock_topic_auto_create (rd_kafka_mock_cluster_t *mcluster, * Otherwise use the default. */ rd_kafka_mock_topic_t * -rd_kafka_mock_topic_get (rd_kafka_mock_cluster_t *mcluster, const char *topic, - int partition_cnt) { +rd_kafka_mock_topic_get(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition_cnt) { rd_kafka_mock_topic_t *mtopic; rd_kafka_resp_err_t err; if ((mtopic = rd_kafka_mock_topic_find(mcluster, topic))) return mtopic; - return rd_kafka_mock_topic_auto_create(mcluster, topic, - partition_cnt, &err); + return rd_kafka_mock_topic_auto_create(mcluster, topic, partition_cnt, + &err); } /** @@ -507,14 +510,15 @@ rd_kafka_mock_topic_get (rd_kafka_mock_cluster_t *mcluster, const char *topic, * @returns NULL if topic already exists and partition is out of range. */ static rd_kafka_mock_partition_t * -rd_kafka_mock_partition_get (rd_kafka_mock_cluster_t *mcluster, - const char *topic, int32_t partition) { +rd_kafka_mock_partition_get(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition) { rd_kafka_mock_topic_t *mtopic; rd_kafka_resp_err_t err; if (!(mtopic = rd_kafka_mock_topic_find(mcluster, topic))) mtopic = rd_kafka_mock_topic_auto_create(mcluster, topic, - partition+1, &err); + partition + 1, &err); if (partition >= mtopic->partition_cnt) return NULL; @@ -527,11 +531,12 @@ rd_kafka_mock_partition_get (rd_kafka_mock_cluster_t *mcluster, * @brief Set IO events for fd */ static void -rd_kafka_mock_cluster_io_set_events (rd_kafka_mock_cluster_t *mcluster, - rd_socket_t fd, int events) { +rd_kafka_mock_cluster_io_set_events(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + int events) { int i; - for (i = 0 ; i < mcluster->fd_cnt ; i++) { + for (i = 0; i < mcluster->fd_cnt; i++) { if (mcluster->fds[i].fd == fd) { mcluster->fds[i].events |= events; return; @@ -545,11 +550,13 @@ rd_kafka_mock_cluster_io_set_events (rd_kafka_mock_cluster_t *mcluster, * @brief Set or clear single IO events for fd */ static void -rd_kafka_mock_cluster_io_set_event (rd_kafka_mock_cluster_t *mcluster, - rd_socket_t fd, rd_bool_t set, int event) { +rd_kafka_mock_cluster_io_set_event(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + rd_bool_t set, + int event) { int i; - for (i = 0 ; i < mcluster->fd_cnt ; i++) { + for (i = 0; i < mcluster->fd_cnt; i++) { if (mcluster->fds[i].fd == fd) { if (set) mcluster->fds[i].events |= event; @@ -567,11 +574,12 @@ rd_kafka_mock_cluster_io_set_event (rd_kafka_mock_cluster_t *mcluster, * @brief Clear IO events for fd */ static void -rd_kafka_mock_cluster_io_clear_events (rd_kafka_mock_cluster_t *mcluster, - rd_socket_t fd, int events) { +rd_kafka_mock_cluster_io_clear_events(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + int events) { int i; - for (i = 0 ; i < mcluster->fd_cnt ; i++) { + for (i = 0; i < mcluster->fd_cnt; i++) { if (mcluster->fds[i].fd == fd) { mcluster->fds[i].events &= ~events; return; @@ -582,21 +590,21 @@ rd_kafka_mock_cluster_io_clear_events (rd_kafka_mock_cluster_t *mcluster, } -static void rd_kafka_mock_cluster_io_del (rd_kafka_mock_cluster_t *mcluster, - rd_socket_t fd) { +static void rd_kafka_mock_cluster_io_del(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd) { int i; - for (i = 0 ; i < mcluster->fd_cnt ; i++) { + for (i = 0; i < mcluster->fd_cnt; i++) { if (mcluster->fds[i].fd == fd) { if (i + 1 < mcluster->fd_cnt) { memmove(&mcluster->fds[i], - &mcluster->fds[i+1], + &mcluster->fds[i + 1], sizeof(*mcluster->fds) * - (mcluster->fd_cnt - i)); + (mcluster->fd_cnt - i)); memmove(&mcluster->handlers[i], - &mcluster->handlers[i+1], + &mcluster->handlers[i + 1], sizeof(*mcluster->handlers) * - (mcluster->fd_cnt - i)); + (mcluster->fd_cnt - i)); } mcluster->fd_cnt--; @@ -611,47 +619,47 @@ static void rd_kafka_mock_cluster_io_del (rd_kafka_mock_cluster_t *mcluster, /** * @brief Add \p fd to IO poll with initial desired events (POLLIN, et.al). */ -static void rd_kafka_mock_cluster_io_add (rd_kafka_mock_cluster_t *mcluster, - rd_socket_t fd, int events, - rd_kafka_mock_io_handler_t handler, - void *opaque) { +static void rd_kafka_mock_cluster_io_add(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + int events, + rd_kafka_mock_io_handler_t handler, + void *opaque) { if (mcluster->fd_cnt + 1 >= mcluster->fd_size) { mcluster->fd_size += 8; - mcluster->fds = rd_realloc(mcluster->fds, - sizeof(*mcluster->fds) * - mcluster->fd_size); - mcluster->handlers = rd_realloc(mcluster->handlers, - sizeof(*mcluster->handlers) * - mcluster->fd_size); + mcluster->fds = rd_realloc( + mcluster->fds, sizeof(*mcluster->fds) * mcluster->fd_size); + mcluster->handlers = + rd_realloc(mcluster->handlers, + sizeof(*mcluster->handlers) * mcluster->fd_size); } memset(&mcluster->fds[mcluster->fd_cnt], 0, sizeof(mcluster->fds[mcluster->fd_cnt])); - mcluster->fds[mcluster->fd_cnt].fd = fd; - mcluster->fds[mcluster->fd_cnt].events = events; - mcluster->fds[mcluster->fd_cnt].revents = 0; - mcluster->handlers[mcluster->fd_cnt].cb = handler; + mcluster->fds[mcluster->fd_cnt].fd = fd; + mcluster->fds[mcluster->fd_cnt].events = events; + mcluster->fds[mcluster->fd_cnt].revents = 0; + mcluster->handlers[mcluster->fd_cnt].cb = handler; mcluster->handlers[mcluster->fd_cnt].opaque = opaque; mcluster->fd_cnt++; } -static void rd_kafka_mock_connection_close (rd_kafka_mock_connection_t *mconn, - const char *reason) { +static void rd_kafka_mock_connection_close(rd_kafka_mock_connection_t *mconn, + const char *reason) { rd_kafka_buf_t *rkbuf; rd_kafka_dbg(mconn->broker->cluster->rk, MOCK, "MOCK", - "Broker %"PRId32": Connection from %s closed: %s", + "Broker %" PRId32 ": Connection from %s closed: %s", mconn->broker->id, rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT), reason); rd_kafka_mock_cgrps_connection_closed(mconn->broker->cluster, mconn); - rd_kafka_timer_stop(&mconn->broker->cluster->timers, - &mconn->write_tmr, rd_true); + rd_kafka_timer_stop(&mconn->broker->cluster->timers, &mconn->write_tmr, + rd_true); while ((rkbuf = TAILQ_FIRST(&mconn->outbufs.rkbq_bufs))) { rd_kafka_bufq_deq(&mconn->outbufs, rkbuf); @@ -669,8 +677,8 @@ static void rd_kafka_mock_connection_close (rd_kafka_mock_connection_t *mconn, } -void rd_kafka_mock_connection_send_response (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *resp) { +void rd_kafka_mock_connection_send_response(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp) { if (resp->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { /* Empty struct tags */ @@ -681,12 +689,12 @@ void rd_kafka_mock_connection_send_response (rd_kafka_mock_connection_t *mconn, resp->rkbuf_ts_sent += rd_clock(); resp->rkbuf_reshdr.Size = - (int32_t)(rd_buf_write_pos(&resp->rkbuf_buf) - 4); + (int32_t)(rd_buf_write_pos(&resp->rkbuf_buf) - 4); rd_kafka_buf_update_i32(resp, 0, resp->rkbuf_reshdr.Size); rd_kafka_dbg(mconn->broker->cluster->rk, MOCK, "MOCK", - "Broker %"PRId32": Sending %sResponseV%hd to %s", + "Broker %" PRId32 ": Sending %sResponseV%hd to %s", mconn->broker->id, rd_kafka_ApiKey2str(resp->rkbuf_reqhdr.ApiKey), resp->rkbuf_reqhdr.ApiVersion, @@ -697,9 +705,8 @@ void rd_kafka_mock_connection_send_response (rd_kafka_mock_connection_t *mconn, rd_kafka_bufq_enq(&mconn->outbufs, resp); - rd_kafka_mock_cluster_io_set_events(mconn->broker->cluster, - mconn->transport->rktrans_s, - POLLOUT); + rd_kafka_mock_cluster_io_set_events( + mconn->broker->cluster, mconn->transport->rktrans_s, POLLOUT); } @@ -710,10 +717,10 @@ void rd_kafka_mock_connection_send_response (rd_kafka_mock_connection_t *mconn, * -1 on error. */ static int -rd_kafka_mock_connection_read_request (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t **rkbufp) { +rd_kafka_mock_connection_read_request(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t **rkbufp) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; - rd_kafka_t *rk = mcluster->rk; + rd_kafka_t *rk = mcluster->rk; const rd_bool_t log_decode_errors = rd_true; rd_kafka_buf_t *rkbuf; char errstr[128]; @@ -723,89 +730,79 @@ rd_kafka_mock_connection_read_request (rd_kafka_mock_connection_t *mconn, /* Initial read for a protocol request. * Allocate enough room for the protocol header * (where the total size is located). */ - rkbuf = mconn->rxbuf = rd_kafka_buf_new(2, - RD_KAFKAP_REQHDR_SIZE); + rkbuf = mconn->rxbuf = + rd_kafka_buf_new(2, RD_KAFKAP_REQHDR_SIZE); /* Protocol parsing code needs the rkb for logging */ rkbuf->rkbuf_rkb = mconn->broker->cluster->dummy_rkb; rd_kafka_broker_keep(rkbuf->rkbuf_rkb); /* Make room for request header */ - rd_buf_write_ensure(&rkbuf->rkbuf_buf, - RD_KAFKAP_REQHDR_SIZE, + rd_buf_write_ensure(&rkbuf->rkbuf_buf, RD_KAFKAP_REQHDR_SIZE, RD_KAFKAP_REQHDR_SIZE); } /* Read as much data as possible from the socket into the * connection receive buffer. */ - r = rd_kafka_transport_recv(mconn->transport, &rkbuf->rkbuf_buf, - errstr, sizeof(errstr)); + r = rd_kafka_transport_recv(mconn->transport, &rkbuf->rkbuf_buf, errstr, + sizeof(errstr)); if (r == -1) { - rd_kafka_dbg(rk, MOCK, "MOCK", - "Broker %"PRId32": Connection %s: " - "receive failed: %s", - mconn->broker->id, - rd_sockaddr2str(&mconn->peer, - RD_SOCKADDR2STR_F_PORT), - errstr); + rd_kafka_dbg( + rk, MOCK, "MOCK", + "Broker %" PRId32 + ": Connection %s: " + "receive failed: %s", + mconn->broker->id, + rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT), + errstr); return -1; } else if (r == 0) { return 0; /* Need more data */ } - if (rd_buf_write_pos(&rkbuf->rkbuf_buf) == - RD_KAFKAP_REQHDR_SIZE) { + if (rd_buf_write_pos(&rkbuf->rkbuf_buf) == RD_KAFKAP_REQHDR_SIZE) { /* Received the full header, now check full request * size and allocate the buffer accordingly. */ /* Initialize reader */ - rd_slice_init(&rkbuf->rkbuf_reader, - &rkbuf->rkbuf_buf, 0, + rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0, RD_KAFKAP_REQHDR_SIZE); - rd_kafka_buf_read_i32(rkbuf, - &rkbuf->rkbuf_reqhdr.Size); - rd_kafka_buf_read_i16(rkbuf, - &rkbuf->rkbuf_reqhdr.ApiKey); - rd_kafka_buf_read_i16(rkbuf, - &rkbuf->rkbuf_reqhdr.ApiVersion); + rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reqhdr.Size); + rd_kafka_buf_read_i16(rkbuf, &rkbuf->rkbuf_reqhdr.ApiKey); + rd_kafka_buf_read_i16(rkbuf, &rkbuf->rkbuf_reqhdr.ApiVersion); if (rkbuf->rkbuf_reqhdr.ApiKey < 0 || rkbuf->rkbuf_reqhdr.ApiKey >= RD_KAFKAP__NUM) { - rd_kafka_buf_parse_fail(rkbuf, - "Invalid ApiKey %hd from %s", - rkbuf->rkbuf_reqhdr.ApiKey, - rd_sockaddr2str( - &mconn->peer, - RD_SOCKADDR2STR_F_PORT)); + rd_kafka_buf_parse_fail( + rkbuf, "Invalid ApiKey %hd from %s", + rkbuf->rkbuf_reqhdr.ApiKey, + rd_sockaddr2str(&mconn->peer, + RD_SOCKADDR2STR_F_PORT)); RD_NOTREACHED(); } /* Check if request version has flexible fields (KIP-482) */ - if (mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey]. - FlexVersion != -1 && + if (mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey] + .FlexVersion != -1 && rkbuf->rkbuf_reqhdr.ApiVersion >= - mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey]. - FlexVersion) + mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey] + .FlexVersion) rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER; - rd_kafka_buf_read_i32(rkbuf, - &rkbuf->rkbuf_reqhdr.CorrId); + rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reqhdr.CorrId); rkbuf->rkbuf_totlen = rkbuf->rkbuf_reqhdr.Size + 4; if (rkbuf->rkbuf_totlen < RD_KAFKAP_REQHDR_SIZE + 2 || rkbuf->rkbuf_totlen > - (size_t)rk->rk_conf.recv_max_msg_size) { + (size_t)rk->rk_conf.recv_max_msg_size) { rd_kafka_buf_parse_fail( - rkbuf, - "Invalid request size %"PRId32 - " from %s", - rkbuf->rkbuf_reqhdr.Size, - rd_sockaddr2str( - &mconn->peer, - RD_SOCKADDR2STR_F_PORT)); + rkbuf, "Invalid request size %" PRId32 " from %s", + rkbuf->rkbuf_reqhdr.Size, + rd_sockaddr2str(&mconn->peer, + RD_SOCKADDR2STR_F_PORT)); RD_NOTREACHED(); } @@ -814,18 +811,18 @@ rd_kafka_mock_connection_read_request (rd_kafka_mock_connection_t *mconn, if (!rkbuf->rkbuf_totlen) { /* Empty request (valid) */ - *rkbufp = rkbuf; + *rkbufp = rkbuf; mconn->rxbuf = NULL; return 1; } /* Allocate space for the request payload */ - rd_buf_write_ensure(&rkbuf->rkbuf_buf, - rkbuf->rkbuf_totlen, + rd_buf_write_ensure(&rkbuf->rkbuf_buf, rkbuf->rkbuf_totlen, rkbuf->rkbuf_totlen); } else if (rd_buf_write_pos(&rkbuf->rkbuf_buf) - - RD_KAFKAP_REQHDR_SIZE == rkbuf->rkbuf_totlen) { + RD_KAFKAP_REQHDR_SIZE == + rkbuf->rkbuf_totlen) { /* The full request is now read into the buffer. */ /* Set up response reader slice starting past the @@ -833,7 +830,7 @@ rd_kafka_mock_connection_read_request (rd_kafka_mock_connection_t *mconn, rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, RD_KAFKAP_REQHDR_SIZE, rd_buf_len(&rkbuf->rkbuf_buf) - - RD_KAFKAP_REQHDR_SIZE); + RD_KAFKAP_REQHDR_SIZE); /* For convenience, shave off the ClientId */ rd_kafka_buf_skip_str(rkbuf); @@ -842,7 +839,7 @@ rd_kafka_mock_connection_read_request (rd_kafka_mock_connection_t *mconn, rd_kafka_buf_skip_tags(rkbuf); /* Return the buffer to the caller */ - *rkbufp = rkbuf; + *rkbufp = rkbuf; mconn->rxbuf = NULL; return 1; } @@ -850,11 +847,11 @@ rd_kafka_mock_connection_read_request (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: return -1; } -rd_kafka_buf_t *rd_kafka_mock_buf_new_response (const rd_kafka_buf_t *request) { +rd_kafka_buf_t *rd_kafka_mock_buf_new_response(const rd_kafka_buf_t *request) { rd_kafka_buf_t *rkbuf = rd_kafka_buf_new(1, 100); /* Copy request header so the ApiVersion remains known */ @@ -879,30 +876,29 @@ rd_kafka_buf_t *rd_kafka_mock_buf_new_response (const rd_kafka_buf_t *request) { - - /** * @brief Parse protocol request. * * @returns 0 on success, -1 on parse error. */ static int -rd_kafka_mock_connection_parse_request (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +rd_kafka_mock_connection_parse_request(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; - rd_kafka_t *rk = mcluster->rk; + rd_kafka_t *rk = mcluster->rk; if (rkbuf->rkbuf_reqhdr.ApiKey < 0 || rkbuf->rkbuf_reqhdr.ApiKey >= RD_KAFKAP__NUM || !mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey].cb) { - rd_kafka_log(rk, LOG_ERR, "MOCK", - "Broker %"PRId32": unsupported %sRequestV%hd " - "from %s", - mconn->broker->id, - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), - rkbuf->rkbuf_reqhdr.ApiVersion, - rd_sockaddr2str(&mconn->peer, - RD_SOCKADDR2STR_F_PORT)); + rd_kafka_log( + rk, LOG_ERR, "MOCK", + "Broker %" PRId32 + ": unsupported %sRequestV%hd " + "from %s", + mconn->broker->id, + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + rkbuf->rkbuf_reqhdr.ApiVersion, + rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT)); return -1; } @@ -910,22 +906,22 @@ rd_kafka_mock_connection_parse_request (rd_kafka_mock_connection_t *mconn, * make sure the ApiVersion is supported. */ if (rkbuf->rkbuf_reqhdr.ApiKey != RD_KAFKAP_ApiVersion && !rd_kafka_mock_cluster_ApiVersion_check( - mcluster, - rkbuf->rkbuf_reqhdr.ApiKey, - rkbuf->rkbuf_reqhdr.ApiVersion)) { - rd_kafka_log(rk, LOG_ERR, "MOCK", - "Broker %"PRId32": unsupported %sRequest " - "version %hd from %s", - mconn->broker->id, - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), - rkbuf->rkbuf_reqhdr.ApiVersion, - rd_sockaddr2str(&mconn->peer, - RD_SOCKADDR2STR_F_PORT)); + mcluster, rkbuf->rkbuf_reqhdr.ApiKey, + rkbuf->rkbuf_reqhdr.ApiVersion)) { + rd_kafka_log( + rk, LOG_ERR, "MOCK", + "Broker %" PRId32 + ": unsupported %sRequest " + "version %hd from %s", + mconn->broker->id, + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + rkbuf->rkbuf_reqhdr.ApiVersion, + rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT)); return -1; } rd_kafka_dbg(rk, MOCK, "MOCK", - "Broker %"PRId32": Received %sRequestV%hd from %s", + "Broker %" PRId32 ": Received %sRequestV%hd from %s", mconn->broker->id, rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), rkbuf->rkbuf_reqhdr.ApiVersion, @@ -940,13 +936,12 @@ rd_kafka_mock_connection_parse_request (rd_kafka_mock_connection_t *mconn, * @brief Timer callback to set the POLLOUT flag for a connection after * the delay has expired. */ -static void rd_kafka_mock_connection_write_out_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { +static void rd_kafka_mock_connection_write_out_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_mock_connection_t *mconn = arg; - rd_kafka_mock_cluster_io_set_events(mconn->broker->cluster, - mconn->transport->rktrans_s, - POLLOUT); + rd_kafka_mock_cluster_io_set_events( + mconn->broker->cluster, mconn->transport->rktrans_s, POLLOUT); } @@ -957,7 +952,7 @@ static void rd_kafka_mock_connection_write_out_tmr_cb (rd_kafka_timers_t *rkts, * -1 on error. */ static ssize_t -rd_kafka_mock_connection_write_out (rd_kafka_mock_connection_t *mconn) { +rd_kafka_mock_connection_write_out(rd_kafka_mock_connection_t *mconn) { rd_kafka_buf_t *rkbuf; rd_ts_t now = rd_clock(); rd_ts_t rtt = mconn->broker->rtt; @@ -978,18 +973,14 @@ rd_kafka_mock_connection_write_out (rd_kafka_mock_connection_t *mconn) { if (ts_delay) { /* Delay response */ rd_kafka_timer_start_oneshot( - &mconn->broker->cluster->timers, - &mconn->write_tmr, - rd_false, - ts_delay-now, - rd_kafka_mock_connection_write_out_tmr_cb, - mconn); + &mconn->broker->cluster->timers, &mconn->write_tmr, + rd_false, ts_delay - now, + rd_kafka_mock_connection_write_out_tmr_cb, mconn); break; } if ((r = rd_kafka_transport_send(mconn->transport, - &rkbuf->rkbuf_reader, - errstr, + &rkbuf->rkbuf_reader, errstr, sizeof(errstr))) == -1) return -1; @@ -1002,9 +993,8 @@ rd_kafka_mock_connection_write_out (rd_kafka_mock_connection_t *mconn) { rd_kafka_buf_destroy(rkbuf); } - rd_kafka_mock_cluster_io_clear_events(mconn->broker->cluster, - mconn->transport->rktrans_s, - POLLOUT); + rd_kafka_mock_cluster_io_clear_events( + mconn->broker->cluster, mconn->transport->rktrans_s, POLLOUT); return 1; } @@ -1016,7 +1006,7 @@ rd_kafka_mock_connection_write_out (rd_kafka_mock_connection_t *mconn) { * Use to check if any responses should be sent when RTT has changed. */ static void -rd_kafka_mock_broker_connections_write_out (rd_kafka_mock_broker_t *mrkb) { +rd_kafka_mock_broker_connections_write_out(rd_kafka_mock_broker_t *mrkb) { rd_kafka_mock_connection_t *mconn, *tmp; /* Need a safe loop since connections may be removed on send error */ @@ -1029,9 +1019,10 @@ rd_kafka_mock_broker_connections_write_out (rd_kafka_mock_broker_t *mrkb) { /** * @brief Per-Connection IO handler */ -static void rd_kafka_mock_connection_io (rd_kafka_mock_cluster_t *mcluster, - rd_socket_t fd, - int events, void *opaque) { +static void rd_kafka_mock_connection_io(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + int events, + void *opaque) { rd_kafka_mock_connection_t *mconn = opaque; if (events & POLLIN) { @@ -1062,7 +1053,7 @@ static void rd_kafka_mock_connection_io (rd_kafka_mock_cluster_t *mcluster, } } - if (events & (POLLERR|POLLHUP)) { + if (events & (POLLERR | POLLHUP)) { rd_kafka_mock_connection_close(mconn, "Disconnected"); return; } @@ -1079,8 +1070,8 @@ static void rd_kafka_mock_connection_io (rd_kafka_mock_cluster_t *mcluster, /** * @brief Set connection as blocking, POLLIN will not be served. */ -void rd_kafka_mock_connection_set_blocking (rd_kafka_mock_connection_t *mconn, - rd_bool_t blocking) { +void rd_kafka_mock_connection_set_blocking(rd_kafka_mock_connection_t *mconn, + rd_bool_t blocking) { rd_kafka_mock_cluster_io_set_event(mconn->broker->cluster, mconn->transport->rktrans_s, !blocking, POLLIN); @@ -1088,8 +1079,9 @@ void rd_kafka_mock_connection_set_blocking (rd_kafka_mock_connection_t *mconn, static rd_kafka_mock_connection_t * -rd_kafka_mock_connection_new (rd_kafka_mock_broker_t *mrkb, rd_socket_t fd, - const struct sockaddr_in *peer) { +rd_kafka_mock_connection_new(rd_kafka_mock_broker_t *mrkb, + rd_socket_t fd, + const struct sockaddr_in *peer) { rd_kafka_mock_connection_t *mconn; rd_kafka_transport_t *rktrans; char errstr[128]; @@ -1099,35 +1091,33 @@ rd_kafka_mock_connection_new (rd_kafka_mock_broker_t *mrkb, rd_socket_t fd, return NULL; } - rktrans = rd_kafka_transport_new(mrkb->cluster->dummy_rkb, fd, - errstr, sizeof(errstr)); + rktrans = rd_kafka_transport_new(mrkb->cluster->dummy_rkb, fd, errstr, + sizeof(errstr)); if (!rktrans) { rd_kafka_log(mrkb->cluster->rk, LOG_ERR, "MOCK", "Failed to create transport for new " - "mock connection: %s", errstr); + "mock connection: %s", + errstr); rd_close(fd); return NULL; } rd_kafka_transport_post_connect_setup(rktrans); - mconn = rd_calloc(1, sizeof(*mconn)); - mconn->broker = mrkb; + mconn = rd_calloc(1, sizeof(*mconn)); + mconn->broker = mrkb; mconn->transport = rktrans; - mconn->peer = *peer; + mconn->peer = *peer; rd_kafka_bufq_init(&mconn->outbufs); TAILQ_INSERT_TAIL(&mrkb->connections, mconn, link); - rd_kafka_mock_cluster_io_add(mrkb->cluster, - mconn->transport->rktrans_s, - POLLIN, - rd_kafka_mock_connection_io, + rd_kafka_mock_cluster_io_add(mrkb->cluster, mconn->transport->rktrans_s, + POLLIN, rd_kafka_mock_connection_io, mconn); rd_kafka_dbg(mrkb->cluster->rk, MOCK, "MOCK", - "Broker %"PRId32": New connection from %s", - mrkb->id, + "Broker %" PRId32 ": New connection from %s", mrkb->id, rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT)); return mconn; @@ -1135,9 +1125,10 @@ rd_kafka_mock_connection_new (rd_kafka_mock_broker_t *mrkb, rd_socket_t fd, -static void rd_kafka_mock_cluster_op_io (rd_kafka_mock_cluster_t *mcluster, - rd_socket_t fd, - int events, void *opaque) { +static void rd_kafka_mock_cluster_op_io(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + int events, + void *opaque) { /* Read wake-up fd data and throw away, just used for wake-ups*/ char buf[1024]; while (rd_read(fd, buf, sizeof(buf)) > 0) @@ -1145,8 +1136,8 @@ static void rd_kafka_mock_cluster_op_io (rd_kafka_mock_cluster_t *mcluster, } -static int rd_kafka_mock_cluster_io_poll (rd_kafka_mock_cluster_t *mcluster, - int timeout_ms) { +static int rd_kafka_mock_cluster_io_poll(rd_kafka_mock_cluster_t *mcluster, + int timeout_ms) { int r; int i; @@ -1164,7 +1155,7 @@ static int rd_kafka_mock_cluster_io_poll (rd_kafka_mock_cluster_t *mcluster, RD_KAFKA_Q_CB_CALLBACK, NULL, NULL); /* Handle IO events, if any, and if not terminating */ - for (i = 0 ; mcluster->run && r > 0 && i < mcluster->fd_cnt ; i++) { + for (i = 0; mcluster->run && r > 0 && i < mcluster->fd_cnt; i++) { if (!mcluster->fds[i].revents) continue; @@ -1179,7 +1170,7 @@ static int rd_kafka_mock_cluster_io_poll (rd_kafka_mock_cluster_t *mcluster, } -static int rd_kafka_mock_cluster_thread_main (void *arg) { +static int rd_kafka_mock_cluster_thread_main(void *arg) { rd_kafka_mock_cluster_t *mcluster = arg; rd_kafka_set_thread_name("mock"); @@ -1189,18 +1180,17 @@ static int rd_kafka_mock_cluster_thread_main (void *arg) { rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1); /* Op wakeup fd */ - rd_kafka_mock_cluster_io_add(mcluster, mcluster->wakeup_fds[0], - POLLIN, + rd_kafka_mock_cluster_io_add(mcluster, mcluster->wakeup_fds[0], POLLIN, rd_kafka_mock_cluster_op_io, NULL); mcluster->run = rd_true; while (mcluster->run) { - int sleeptime = - (int)((rd_kafka_timers_next( - &mcluster->timers, - 1000*1000/*1s*/, - 1/*lock*/) + 999) / 1000); + int sleeptime = (int)((rd_kafka_timers_next(&mcluster->timers, + 1000 * 1000 /*1s*/, + 1 /*lock*/) + + 999) / + 1000); if (rd_kafka_mock_cluster_io_poll(mcluster, sleeptime) == -1) break; @@ -1222,12 +1212,13 @@ static int rd_kafka_mock_cluster_thread_main (void *arg) { -static void rd_kafka_mock_broker_listen_io (rd_kafka_mock_cluster_t *mcluster, - rd_socket_t fd, - int events, void *opaque) { +static void rd_kafka_mock_broker_listen_io(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + int events, + void *opaque) { rd_kafka_mock_broker_t *mrkb = opaque; - if (events & (POLLERR|POLLHUP)) + if (events & (POLLERR | POLLHUP)) rd_assert(!*"Mock broker listen socket error"); if (events & POLLIN) { @@ -1252,8 +1243,8 @@ static void rd_kafka_mock_broker_listen_io (rd_kafka_mock_cluster_t *mcluster, /** * @brief Close all connections to broker. */ -static void rd_kafka_mock_broker_close_all (rd_kafka_mock_broker_t *mrkb, - const char *reason) { +static void rd_kafka_mock_broker_close_all(rd_kafka_mock_broker_t *mrkb, + const char *reason) { rd_kafka_mock_connection_t *mconn; while ((mconn = TAILQ_FIRST(&mrkb->connections))) @@ -1264,14 +1255,14 @@ static void rd_kafka_mock_broker_close_all (rd_kafka_mock_broker_t *mrkb, * @brief Destroy error stack, must be unlinked. */ static void -rd_kafka_mock_error_stack_destroy (rd_kafka_mock_error_stack_t *errstack) { +rd_kafka_mock_error_stack_destroy(rd_kafka_mock_error_stack_t *errstack) { if (errstack->errs) rd_free(errstack->errs); rd_free(errstack); } -static void rd_kafka_mock_broker_destroy (rd_kafka_mock_broker_t *mrkb) { +static void rd_kafka_mock_broker_destroy(rd_kafka_mock_broker_t *mrkb) { rd_kafka_mock_error_stack_t *errstack; rd_kafka_mock_broker_close_all(mrkb, "Destroying broker"); @@ -1292,23 +1283,19 @@ static void rd_kafka_mock_broker_destroy (rd_kafka_mock_broker_t *mrkb) { static rd_kafka_mock_broker_t * -rd_kafka_mock_broker_new (rd_kafka_mock_cluster_t *mcluster, - int32_t broker_id) { +rd_kafka_mock_broker_new(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id) { rd_kafka_mock_broker_t *mrkb; rd_socket_t listen_s; struct sockaddr_in sin = { - .sin_family = AF_INET, - .sin_addr = { - .s_addr = htonl(INADDR_LOOPBACK) - } - }; + .sin_family = AF_INET, + .sin_addr = {.s_addr = htonl(INADDR_LOOPBACK)}}; socklen_t sin_len = sizeof(sin); /* * Create and bind socket to any loopback port */ - listen_s = rd_kafka_socket_cb_linux(AF_INET, SOCK_STREAM, IPPROTO_TCP, - NULL); + listen_s = + rd_kafka_socket_cb_linux(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL); if (listen_s == RD_SOCKET_ERROR) { rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK", "Unable to create mock broker listen socket: %s", @@ -1350,14 +1337,14 @@ rd_kafka_mock_broker_new (rd_kafka_mock_cluster_t *mcluster, */ mrkb = rd_calloc(1, sizeof(*mrkb)); - mrkb->id = broker_id; - mrkb->cluster = mcluster; - mrkb->up = rd_true; + mrkb->id = broker_id; + mrkb->cluster = mcluster; + mrkb->up = rd_true; mrkb->listen_s = listen_s; - mrkb->port = ntohs(sin.sin_port); + mrkb->port = ntohs(sin.sin_port); rd_snprintf(mrkb->advertised_listener, - sizeof(mrkb->advertised_listener), - "%s", rd_sockaddr2str(&sin, 0)); + sizeof(mrkb->advertised_listener), "%s", + rd_sockaddr2str(&sin, 0)); TAILQ_INIT(&mrkb->connections); TAILQ_INIT(&mrkb->errstacks); @@ -1375,7 +1362,7 @@ rd_kafka_mock_broker_new (rd_kafka_mock_cluster_t *mcluster, /** * @returns the coordtype_t for a coord type string, or -1 on error. */ -static rd_kafka_coordtype_t rd_kafka_mock_coord_str2type (const char *str) { +static rd_kafka_coordtype_t rd_kafka_mock_coord_str2type(const char *str) { if (!strcmp(str, "transaction")) return RD_KAFKA_COORD_TXN; else if (!strcmp(str, "group")) @@ -1388,8 +1375,8 @@ static rd_kafka_coordtype_t rd_kafka_mock_coord_str2type (const char *str) { /** * @brief Unlink and destroy coordinator. */ -static void rd_kafka_mock_coord_destroy (rd_kafka_mock_cluster_t *mcluster, - rd_kafka_mock_coord_t *mcoord) { +static void rd_kafka_mock_coord_destroy(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_mock_coord_t *mcoord) { TAILQ_REMOVE(&mcluster->coords, mcoord, link); rd_free(mcoord->key); rd_free(mcoord); @@ -1399,8 +1386,9 @@ static void rd_kafka_mock_coord_destroy (rd_kafka_mock_cluster_t *mcluster, * @brief Find coordinator by type and key. */ static rd_kafka_mock_coord_t * -rd_kafka_mock_coord_find (rd_kafka_mock_cluster_t *mcluster, - rd_kafka_coordtype_t type, const char *key) { +rd_kafka_mock_coord_find(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_coordtype_t type, + const char *key) { rd_kafka_mock_coord_t *mcoord; TAILQ_FOREACH(mcoord, &mcluster->coords, link) { @@ -1416,9 +1404,9 @@ rd_kafka_mock_coord_find (rd_kafka_mock_cluster_t *mcluster, * @returns the coordinator for KeyType,Key (e.g., GROUP,mygroup). */ rd_kafka_mock_broker_t * -rd_kafka_mock_cluster_get_coord (rd_kafka_mock_cluster_t *mcluster, - rd_kafka_coordtype_t KeyType, - const rd_kafkap_str_t *Key) { +rd_kafka_mock_cluster_get_coord(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_coordtype_t KeyType, + const rd_kafkap_str_t *Key) { rd_kafka_mock_broker_t *mrkb; rd_kafka_mock_coord_t *mcoord; char *key; @@ -1432,12 +1420,12 @@ rd_kafka_mock_cluster_get_coord (rd_kafka_mock_cluster_t *mcluster, /* Else hash the key to select an available broker. */ hash = rd_crc32(Key->str, RD_KAFKAP_STR_LEN(Key)); - idx = (int)(hash % mcluster->broker_cnt); + idx = (int)(hash % mcluster->broker_cnt); /* Use the broker index in the list */ TAILQ_FOREACH(mrkb, &mcluster->brokers, link) - if (idx-- == 0) - return mrkb; + if (idx-- == 0) + return mrkb; RD_NOTREACHED(); return NULL; @@ -1449,9 +1437,10 @@ rd_kafka_mock_cluster_get_coord (rd_kafka_mock_cluster_t *mcluster, * and \p key. */ static rd_kafka_mock_coord_t * -rd_kafka_mock_coord_set (rd_kafka_mock_cluster_t *mcluster, - const char *key_type, const char *key, - int32_t broker_id) { +rd_kafka_mock_coord_set(rd_kafka_mock_cluster_t *mcluster, + const char *key_type, + const char *key, + int32_t broker_id) { rd_kafka_mock_coord_t *mcoord; rd_kafka_coordtype_t type; @@ -1461,9 +1450,9 @@ rd_kafka_mock_coord_set (rd_kafka_mock_cluster_t *mcluster, if ((mcoord = rd_kafka_mock_coord_find(mcluster, type, key))) rd_kafka_mock_coord_destroy(mcluster, mcoord); - mcoord = rd_calloc(1, sizeof(*mcoord)); - mcoord->type = type; - mcoord->key = rd_strdup(key); + mcoord = rd_calloc(1, sizeof(*mcoord)); + mcoord->type = type; + mcoord->key = rd_strdup(key); mcoord->broker_id = broker_id; TAILQ_INSERT_TAIL(&mcluster->coords, mcoord, link); @@ -1477,8 +1466,8 @@ rd_kafka_mock_coord_set (rd_kafka_mock_cluster_t *mcluster, * if no error. */ static rd_kafka_mock_error_rtt_t -rd_kafka_mock_error_stack_next (rd_kafka_mock_error_stack_t *errstack) { - rd_kafka_mock_error_rtt_t err_rtt = { RD_KAFKA_RESP_ERR_NO_ERROR, 0 }; +rd_kafka_mock_error_stack_next(rd_kafka_mock_error_stack_t *errstack) { + rd_kafka_mock_error_rtt_t err_rtt = {RD_KAFKA_RESP_ERR_NO_ERROR, 0}; if (likely(errstack->cnt == 0)) return err_rtt; @@ -1497,13 +1486,13 @@ rd_kafka_mock_error_stack_next (rd_kafka_mock_error_stack_t *errstack) { * @brief Find an error stack based on \p ApiKey */ static rd_kafka_mock_error_stack_t * -rd_kafka_mock_error_stack_find (const rd_kafka_mock_error_stack_head_t *shead, - int16_t ApiKey) { +rd_kafka_mock_error_stack_find(const rd_kafka_mock_error_stack_head_t *shead, + int16_t ApiKey) { const rd_kafka_mock_error_stack_t *errstack; TAILQ_FOREACH(errstack, shead, link) - if (errstack->ApiKey == ApiKey) - return (rd_kafka_mock_error_stack_t *)errstack; + if (errstack->ApiKey == ApiKey) + return (rd_kafka_mock_error_stack_t *)errstack; return NULL; } @@ -1514,8 +1503,8 @@ rd_kafka_mock_error_stack_find (const rd_kafka_mock_error_stack_head_t *shead, * @brief Find or create an error stack based on \p ApiKey */ static rd_kafka_mock_error_stack_t * -rd_kafka_mock_error_stack_get (rd_kafka_mock_error_stack_head_t *shead, - int16_t ApiKey) { +rd_kafka_mock_error_stack_get(rd_kafka_mock_error_stack_head_t *shead, + int16_t ApiKey) { rd_kafka_mock_error_stack_t *errstack; if ((errstack = rd_kafka_mock_error_stack_find(shead, ApiKey))) @@ -1538,8 +1527,8 @@ rd_kafka_mock_error_stack_get (rd_kafka_mock_error_stack_head_t *shead, * provided response \p resp buffer. */ rd_kafka_resp_err_t -rd_kafka_mock_next_request_error (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *resp) { +rd_kafka_mock_next_request_error(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; rd_kafka_mock_error_stack_t *errstack; rd_kafka_mock_error_rtt_t err_rtt; @@ -1550,15 +1539,14 @@ rd_kafka_mock_next_request_error (rd_kafka_mock_connection_t *mconn, resp->rkbuf_reqhdr.ApiKey); if (likely(!errstack)) { errstack = rd_kafka_mock_error_stack_find( - &mcluster->errstacks, - resp->rkbuf_reqhdr.ApiKey); + &mcluster->errstacks, resp->rkbuf_reqhdr.ApiKey); if (likely(!errstack)) { mtx_unlock(&mcluster->lock); return RD_KAFKA_RESP_ERR_NO_ERROR; } } - err_rtt = rd_kafka_mock_error_stack_next(errstack); + err_rtt = rd_kafka_mock_error_stack_next(errstack); resp->rkbuf_ts_sent = err_rtt.rtt; mtx_unlock(&mcluster->lock); @@ -1573,12 +1561,13 @@ rd_kafka_mock_next_request_error (rd_kafka_mock_connection_t *mconn, * be sent. * Note: Delayed disconnects (rtt-based) are not supported. */ if (err_rtt.err == RD_KAFKA_RESP_ERR__TRANSPORT) { - rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", - "Broker %"PRId32": Forcing close of connection " - "from %s", - mconn->broker->id, - rd_sockaddr2str(&mconn->peer, - RD_SOCKADDR2STR_F_PORT)); + rd_kafka_dbg( + mcluster->rk, MOCK, "MOCK", + "Broker %" PRId32 + ": Forcing close of connection " + "from %s", + mconn->broker->id, + rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT)); rd_kafka_transport_shutdown(mconn->transport); } @@ -1587,8 +1576,8 @@ rd_kafka_mock_next_request_error (rd_kafka_mock_connection_t *mconn, } -void rd_kafka_mock_clear_request_errors (rd_kafka_mock_cluster_t *mcluster, - int16_t ApiKey) { +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey) { rd_kafka_mock_error_stack_t *errstack; mtx_lock(&mcluster->lock); @@ -1601,11 +1590,11 @@ void rd_kafka_mock_clear_request_errors (rd_kafka_mock_cluster_t *mcluster, } -void -rd_kafka_mock_push_request_errors_array (rd_kafka_mock_cluster_t *mcluster, - int16_t ApiKey, - size_t cnt, - const rd_kafka_resp_err_t *errors) { +void rd_kafka_mock_push_request_errors_array( + rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + size_t cnt, + const rd_kafka_resp_err_t *errors) { rd_kafka_mock_error_stack_t *errstack; size_t totcnt; size_t i; @@ -1618,27 +1607,28 @@ rd_kafka_mock_push_request_errors_array (rd_kafka_mock_cluster_t *mcluster, if (totcnt > errstack->size) { errstack->size = totcnt + 4; - errstack->errs = rd_realloc(errstack->errs, - errstack->size * - sizeof(*errstack->errs)); + errstack->errs = rd_realloc( + errstack->errs, errstack->size * sizeof(*errstack->errs)); } - for (i = 0 ; i < cnt ; i++) { - errstack->errs[errstack->cnt].err = errors[i]; + for (i = 0; i < cnt; i++) { + errstack->errs[errstack->cnt].err = errors[i]; errstack->errs[errstack->cnt++].rtt = 0; } mtx_unlock(&mcluster->lock); } -void rd_kafka_mock_push_request_errors (rd_kafka_mock_cluster_t *mcluster, - int16_t ApiKey, size_t cnt, ...) { +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + size_t cnt, + ...) { va_list ap; rd_kafka_resp_err_t *errors = rd_alloca(sizeof(*errors) * cnt); size_t i; va_start(ap, cnt); - for (i = 0 ; i < cnt ; i++) + for (i = 0; i < cnt; i++) errors[i] = va_arg(ap, rd_kafka_resp_err_t); rd_kafka_mock_push_request_errors_array(mcluster, ApiKey, cnt, errors); @@ -1646,9 +1636,11 @@ void rd_kafka_mock_push_request_errors (rd_kafka_mock_cluster_t *mcluster, rd_kafka_resp_err_t -rd_kafka_mock_broker_push_request_error_rtts (rd_kafka_mock_cluster_t *mcluster, - int32_t broker_id, - int16_t ApiKey, size_t cnt, ...) { +rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + int16_t ApiKey, + size_t cnt, + ...) { rd_kafka_mock_broker_t *mrkb; va_list ap; rd_kafka_mock_error_stack_t *errstack; @@ -1667,17 +1659,16 @@ rd_kafka_mock_broker_push_request_error_rtts (rd_kafka_mock_cluster_t *mcluster, if (totcnt > errstack->size) { errstack->size = totcnt + 4; - errstack->errs = rd_realloc(errstack->errs, - errstack->size * - sizeof(*errstack->errs)); + errstack->errs = rd_realloc( + errstack->errs, errstack->size * sizeof(*errstack->errs)); } va_start(ap, cnt); while (cnt-- > 0) { errstack->errs[errstack->cnt].err = - va_arg(ap, rd_kafka_resp_err_t); + va_arg(ap, rd_kafka_resp_err_t); errstack->errs[errstack->cnt++].rtt = - ((rd_ts_t)va_arg(ap, int)) * 1000; + ((rd_ts_t)va_arg(ap, int)) * 1000; } va_end(ap); @@ -1687,14 +1678,14 @@ rd_kafka_mock_broker_push_request_error_rtts (rd_kafka_mock_cluster_t *mcluster, } -void rd_kafka_mock_topic_set_error (rd_kafka_mock_cluster_t *mcluster, - const char *topic, - rd_kafka_resp_err_t err) { +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + rd_kafka_resp_err_t err) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); rko->rko_u.mock.name = rd_strdup(topic); - rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR; - rko->rko_u.mock.err = err; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR; + rko->rko_u.mock.err = err; rko = rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE); if (rko) @@ -1703,168 +1694,172 @@ void rd_kafka_mock_topic_set_error (rd_kafka_mock_cluster_t *mcluster, rd_kafka_resp_err_t -rd_kafka_mock_topic_create (rd_kafka_mock_cluster_t *mcluster, - const char *topic, int partition_cnt, - int replication_factor) { +rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition_cnt, + int replication_factor) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); rko->rko_u.mock.name = rd_strdup(topic); - rko->rko_u.mock.lo = partition_cnt; - rko->rko_u.mock.hi = replication_factor; - rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_TOPIC_CREATE; + rko->rko_u.mock.lo = partition_cnt; + rko->rko_u.mock.hi = replication_factor; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_TOPIC_CREATE; return rd_kafka_op_err_destroy( - rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); } rd_kafka_resp_err_t -rd_kafka_mock_partition_set_leader (rd_kafka_mock_cluster_t *mcluster, - const char *topic, int32_t partition, - int32_t broker_id) { +rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition, + int32_t broker_id) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); - rko->rko_u.mock.name = rd_strdup(topic); - rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_LEADER; + rko->rko_u.mock.name = rd_strdup(topic); + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_LEADER; rko->rko_u.mock.partition = partition; rko->rko_u.mock.broker_id = broker_id; return rd_kafka_op_err_destroy( - rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); } rd_kafka_resp_err_t -rd_kafka_mock_partition_set_follower (rd_kafka_mock_cluster_t *mcluster, - const char *topic, int32_t partition, - int32_t broker_id) { +rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition, + int32_t broker_id) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); - rko->rko_u.mock.name = rd_strdup(topic); - rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER; + rko->rko_u.mock.name = rd_strdup(topic); + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER; rko->rko_u.mock.partition = partition; rko->rko_u.mock.broker_id = broker_id; return rd_kafka_op_err_destroy( - rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); } rd_kafka_resp_err_t -rd_kafka_mock_partition_set_follower_wmarks (rd_kafka_mock_cluster_t *mcluster, - const char *topic, - int32_t partition, - int64_t lo, int64_t hi) { +rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition, + int64_t lo, + int64_t hi) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); - rko->rko_u.mock.name = rd_strdup(topic); - rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS; + rko->rko_u.mock.name = rd_strdup(topic); + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS; rko->rko_u.mock.partition = partition; - rko->rko_u.mock.lo = lo; - rko->rko_u.mock.hi = hi; + rko->rko_u.mock.lo = lo; + rko->rko_u.mock.hi = hi; return rd_kafka_op_err_destroy( - rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); } rd_kafka_resp_err_t -rd_kafka_mock_broker_set_down (rd_kafka_mock_cluster_t *mcluster, - int32_t broker_id) { +rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); rko->rko_u.mock.broker_id = broker_id; - rko->rko_u.mock.lo = rd_false; - rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN; + rko->rko_u.mock.lo = rd_false; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN; return rd_kafka_op_err_destroy( - rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); } rd_kafka_resp_err_t -rd_kafka_mock_broker_set_up (rd_kafka_mock_cluster_t *mcluster, - int32_t broker_id) { +rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); rko->rko_u.mock.broker_id = broker_id; - rko->rko_u.mock.lo = rd_true; - rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN; + rko->rko_u.mock.lo = rd_true; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN; return rd_kafka_op_err_destroy( - rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); } rd_kafka_resp_err_t -rd_kafka_mock_broker_set_rtt (rd_kafka_mock_cluster_t *mcluster, - int32_t broker_id, int rtt_ms) { +rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + int rtt_ms) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); rko->rko_u.mock.broker_id = broker_id; - rko->rko_u.mock.lo = rtt_ms; - rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_RTT; + rko->rko_u.mock.lo = rtt_ms; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_RTT; return rd_kafka_op_err_destroy( - rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); } rd_kafka_resp_err_t -rd_kafka_mock_broker_set_rack (rd_kafka_mock_cluster_t *mcluster, - int32_t broker_id, const char *rack) { +rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + const char *rack) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); rko->rko_u.mock.broker_id = broker_id; - rko->rko_u.mock.name = rd_strdup(rack); - rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_RACK; + rko->rko_u.mock.name = rd_strdup(rack); + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_RACK; return rd_kafka_op_err_destroy( - rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); } rd_kafka_resp_err_t -rd_kafka_mock_coordinator_set (rd_kafka_mock_cluster_t *mcluster, - const char *key_type, const char *key, - int32_t broker_id) { +rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, + const char *key_type, + const char *key, + int32_t broker_id) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); - rko->rko_u.mock.name = rd_strdup(key_type); - rko->rko_u.mock.str = rd_strdup(key); + rko->rko_u.mock.name = rd_strdup(key_type); + rko->rko_u.mock.str = rd_strdup(key); rko->rko_u.mock.broker_id = broker_id; - rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_COORD_SET; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_COORD_SET; return rd_kafka_op_err_destroy( - rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); } rd_kafka_resp_err_t -rd_kafka_mock_set_apiversion (rd_kafka_mock_cluster_t *mcluster, - int16_t ApiKey, - int16_t MinVersion, int16_t MaxVersion) { +rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + int16_t MinVersion, + int16_t MaxVersion) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); rko->rko_u.mock.partition = ApiKey; - rko->rko_u.mock.lo = MinVersion; - rko->rko_u.mock.hi = MaxVersion; - rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_APIVERSION_SET; + rko->rko_u.mock.lo = MinVersion; + rko->rko_u.mock.hi = MaxVersion; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_APIVERSION_SET; return rd_kafka_op_err_destroy( - rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); } - - - /** * @brief Handle command op * * @locality mcluster thread */ static rd_kafka_resp_err_t -rd_kafka_mock_cluster_cmd (rd_kafka_mock_cluster_t *mcluster, - rd_kafka_op_t *rko) { +rd_kafka_mock_cluster_cmd(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_op_t *rko) { rd_kafka_mock_topic_t *mtopic; rd_kafka_mock_partition_t *mpart; rd_kafka_mock_broker_t *mrkb; - switch (rko->rko_u.mock.cmd) - { + switch (rko->rko_u.mock.cmd) { case RD_KAFKA_MOCK_CMD_TOPIC_CREATE: if (rd_kafka_mock_topic_find(mcluster, rko->rko_u.mock.name)) return RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS; @@ -1878,21 +1873,20 @@ rd_kafka_mock_cluster_cmd (rd_kafka_mock_cluster_t *mcluster, break; case RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR: - mtopic = rd_kafka_mock_topic_get(mcluster, - rko->rko_u.mock.name, -1); + mtopic = + rd_kafka_mock_topic_get(mcluster, rko->rko_u.mock.name, -1); mtopic->err = rko->rko_u.mock.err; break; case RD_KAFKA_MOCK_CMD_PART_SET_LEADER: - mpart = rd_kafka_mock_partition_get(mcluster, - rko->rko_u.mock.name, - rko->rko_u.mock.partition); + mpart = rd_kafka_mock_partition_get( + mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition); if (!mpart) return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; if (rko->rko_u.mock.broker_id != -1) { mrkb = rd_kafka_mock_broker_find( - mcluster, rko->rko_u.mock.broker_id); + mcluster, rko->rko_u.mock.broker_id); if (!mrkb) return RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE; } else { @@ -1900,7 +1894,7 @@ rd_kafka_mock_cluster_cmd (rd_kafka_mock_cluster_t *mcluster, } rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", - "Set %s [%"PRId32"] leader to %"PRId32, + "Set %s [%" PRId32 "] leader to %" PRId32, rko->rko_u.mock.name, rko->rko_u.mock.partition, rko->rko_u.mock.broker_id); @@ -1908,15 +1902,15 @@ rd_kafka_mock_cluster_cmd (rd_kafka_mock_cluster_t *mcluster, break; case RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER: - mpart = rd_kafka_mock_partition_get(mcluster, - rko->rko_u.mock.name, - rko->rko_u.mock.partition); + mpart = rd_kafka_mock_partition_get( + mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition); if (!mpart) return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", - "Set %s [%"PRId32"] preferred follower " - "to %"PRId32, + "Set %s [%" PRId32 + "] preferred follower " + "to %" PRId32, rko->rko_u.mock.name, rko->rko_u.mock.partition, rko->rko_u.mock.broker_id); @@ -1924,15 +1918,15 @@ rd_kafka_mock_cluster_cmd (rd_kafka_mock_cluster_t *mcluster, break; case RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS: - mpart = rd_kafka_mock_partition_get(mcluster, - rko->rko_u.mock.name, - rko->rko_u.mock.partition); + mpart = rd_kafka_mock_partition_get( + mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition); if (!mpart) return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", - "Set %s [%"PRId32"] follower " - "watermark offsets to %"PRId64"..%"PRId64, + "Set %s [%" PRId32 + "] follower " + "watermark offsets to %" PRId64 "..%" PRId64, rko->rko_u.mock.name, rko->rko_u.mock.partition, rko->rko_u.mock.lo, rko->rko_u.mock.hi); @@ -1945,10 +1939,10 @@ rd_kafka_mock_cluster_cmd (rd_kafka_mock_cluster_t *mcluster, } if (rko->rko_u.mock.hi == -1) { - mpart->follower_end_offset = mpart->end_offset; + mpart->follower_end_offset = mpart->end_offset; mpart->update_follower_end_offset = rd_true; } else { - mpart->follower_end_offset = rko->rko_u.mock.hi; + mpart->follower_end_offset = rko->rko_u.mock.hi; mpart->update_follower_end_offset = rd_false; } break; @@ -1994,8 +1988,7 @@ rd_kafka_mock_cluster_cmd (rd_kafka_mock_cluster_t *mcluster, break; case RD_KAFKA_MOCK_CMD_COORD_SET: - if (!rd_kafka_mock_coord_set(mcluster, - rko->rko_u.mock.name, + if (!rd_kafka_mock_coord_set(mcluster, rko->rko_u.mock.name, rko->rko_u.mock.str, rko->rko_u.mock.broker_id)) return RD_KAFKA_RESP_ERR__INVALID_ARG; @@ -2006,10 +1999,10 @@ rd_kafka_mock_cluster_cmd (rd_kafka_mock_cluster_t *mcluster, rko->rko_u.mock.partition >= RD_KAFKAP__NUM) return RD_KAFKA_RESP_ERR__INVALID_ARG; - mcluster->api_handlers[(int)rko->rko_u.mock.partition]. - MinVersion = (int16_t)rko->rko_u.mock.lo; - mcluster->api_handlers[(int)rko->rko_u.mock.partition]. - MaxVersion = (int16_t)rko->rko_u.mock.hi; + mcluster->api_handlers[(int)rko->rko_u.mock.partition] + .MinVersion = (int16_t)rko->rko_u.mock.lo; + mcluster->api_handlers[(int)rko->rko_u.mock.partition] + .MaxVersion = (int16_t)rko->rko_u.mock.hi; break; default: @@ -2022,14 +2015,15 @@ rd_kafka_mock_cluster_cmd (rd_kafka_mock_cluster_t *mcluster, static rd_kafka_op_res_t -rd_kafka_mock_cluster_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque) { +rd_kafka_mock_cluster_op_serve(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) { rd_kafka_mock_cluster_t *mcluster = opaque; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - switch ((int)rko->rko_type) - { + switch ((int)rko->rko_type) { case RD_KAFKA_OP_TERMINATE: mcluster->run = rd_false; break; @@ -2052,8 +2046,7 @@ rd_kafka_mock_cluster_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, /** * @brief Destroy cluster (internal) */ -static void -rd_kafka_mock_cluster_destroy0 (rd_kafka_mock_cluster_t *mcluster) { +static void rd_kafka_mock_cluster_destroy0(rd_kafka_mock_cluster_t *mcluster) { rd_kafka_mock_topic_t *mtopic; rd_kafka_mock_broker_t *mrkb; rd_kafka_mock_cgrp_t *mcgrp; @@ -2114,7 +2107,7 @@ rd_kafka_mock_cluster_destroy0 (rd_kafka_mock_cluster_t *mcluster) { -void rd_kafka_mock_cluster_destroy (rd_kafka_mock_cluster_t *mcluster) { +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster) { int res; rd_kafka_op_t *rko; @@ -2136,27 +2129,26 @@ void rd_kafka_mock_cluster_destroy (rd_kafka_mock_cluster_t *mcluster) { -rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new (rd_kafka_t *rk, - int broker_cnt) { +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, + int broker_cnt) { rd_kafka_mock_cluster_t *mcluster; rd_kafka_mock_broker_t *mrkb; int i, r; size_t bootstraps_len = 0; size_t of; - mcluster = rd_calloc(1, sizeof(*mcluster)); + mcluster = rd_calloc(1, sizeof(*mcluster)); mcluster->rk = rk; - mcluster->dummy_rkb = rd_kafka_broker_add(rk, RD_KAFKA_INTERNAL, - RD_KAFKA_PROTO_PLAINTEXT, - "mock", 0, - RD_KAFKA_NODEID_UA); - rd_snprintf(mcluster->id, sizeof(mcluster->id), - "mockCluster%lx", (intptr_t)mcluster >> 2); + mcluster->dummy_rkb = + rd_kafka_broker_add(rk, RD_KAFKA_INTERNAL, RD_KAFKA_PROTO_PLAINTEXT, + "mock", 0, RD_KAFKA_NODEID_UA); + rd_snprintf(mcluster->id, sizeof(mcluster->id), "mockCluster%lx", + (intptr_t)mcluster >> 2); TAILQ_INIT(&mcluster->brokers); - for (i = 1 ; i <= broker_cnt ; i++) { + for (i = 1; i <= broker_cnt; i++) { if (!(mrkb = rd_kafka_mock_broker_new(mcluster, i))) { rd_kafka_mock_cluster_destroy(mcluster); return NULL; @@ -2169,7 +2161,7 @@ rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new (rd_kafka_t *rk, mtx_init(&mcluster->lock, mtx_plain); TAILQ_INIT(&mcluster->topics); - mcluster->defaults.partition_cnt = 4; + mcluster->defaults.partition_cnt = 4; mcluster->defaults.replication_factor = RD_MIN(3, broker_cnt); TAILQ_INIT(&mcluster->cgrps); @@ -2185,8 +2177,8 @@ rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new (rd_kafka_t *rk, /* Use an op queue for controlling the cluster in * a thread-safe manner without locking. */ - mcluster->ops = rd_kafka_q_new(rk); - mcluster->ops->rkq_serve = rd_kafka_mock_cluster_op_serve; + mcluster->ops = rd_kafka_q_new(rk); + mcluster->ops->rkq_serve = rd_kafka_mock_cluster_op_serve; mcluster->ops->rkq_opaque = mcluster; rd_kafka_timers_init(&mcluster->timers, rk, mcluster->ops); @@ -2198,14 +2190,13 @@ rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new (rd_kafka_t *rk, } else { const char onebyte = 1; rd_kafka_q_io_event_enable(mcluster->ops, - mcluster->wakeup_fds[1], - &onebyte, sizeof(onebyte)); + mcluster->wakeup_fds[1], &onebyte, + sizeof(onebyte)); } - if (thrd_create(&mcluster->thread, - rd_kafka_mock_cluster_thread_main, mcluster) != - thrd_success) { + if (thrd_create(&mcluster->thread, rd_kafka_mock_cluster_thread_main, + mcluster) != thrd_success) { rd_kafka_log(rk, LOG_CRIT, "MOCK", "Failed to create mock cluster thread: %s", rd_strerror(errno)); @@ -2216,12 +2207,10 @@ rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new (rd_kafka_t *rk, /* Construct bootstrap.servers list */ mcluster->bootstraps = rd_malloc(bootstraps_len + 1); - of = 0; + of = 0; TAILQ_FOREACH(mrkb, &mcluster->brokers, link) { - r = rd_snprintf(&mcluster->bootstraps[of], - bootstraps_len - of, - "%s%s:%d", - of > 0 ? "," : "", + r = rd_snprintf(&mcluster->bootstraps[of], bootstraps_len - of, + "%s%s:%d", of > 0 ? "," : "", mrkb->advertised_listener, mrkb->port); of += r; rd_assert(of < bootstraps_len); @@ -2238,17 +2227,16 @@ rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new (rd_kafka_t *rk, rd_kafka_t * -rd_kafka_mock_cluster_handle (const rd_kafka_mock_cluster_t *mcluster) { +rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster) { return (rd_kafka_t *)mcluster->rk; } -rd_kafka_mock_cluster_t * -rd_kafka_handle_mock_cluster (const rd_kafka_t *rk) { +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk) { return (rd_kafka_mock_cluster_t *)rk->rk_mock.cluster; } const char * -rd_kafka_mock_cluster_bootstraps (const rd_kafka_mock_cluster_t *mcluster) { +rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster) { return mcluster->bootstraps; } diff --git a/src/rdkafka_mock.h b/src/rdkafka_mock.h index 915ba67a38..006ffad23e 100644 --- a/src/rdkafka_mock.h +++ b/src/rdkafka_mock.h @@ -92,15 +92,15 @@ typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t; * to operate as usual. */ RD_EXPORT -rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new (rd_kafka_t *rk, - int broker_cnt); +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, + int broker_cnt); /** * @brief Destroy mock cluster. */ RD_EXPORT -void rd_kafka_mock_cluster_destroy (rd_kafka_mock_cluster_t *mcluster); +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster); @@ -109,7 +109,7 @@ void rd_kafka_mock_cluster_destroy (rd_kafka_mock_cluster_t *mcluster); * rd_kafka_mock_cluster_new(). */ RD_EXPORT rd_kafka_t * -rd_kafka_mock_cluster_handle (const rd_kafka_mock_cluster_t *mcluster); +rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster); /** @@ -118,7 +118,7 @@ rd_kafka_mock_cluster_handle (const rd_kafka_mock_cluster_t *mcluster); * or NULL if no such instance. */ RD_EXPORT rd_kafka_mock_cluster_t * -rd_kafka_handle_mock_cluster (const rd_kafka_t *rk); +rd_kafka_handle_mock_cluster(const rd_kafka_t *rk); @@ -126,15 +126,15 @@ rd_kafka_handle_mock_cluster (const rd_kafka_t *rk); * @returns the mock cluster's bootstrap.servers list */ RD_EXPORT const char * -rd_kafka_mock_cluster_bootstraps (const rd_kafka_mock_cluster_t *mcluster); +rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster); /** * @brief Clear the cluster's error state for the given \p ApiKey. */ RD_EXPORT -void rd_kafka_mock_clear_request_errors (rd_kafka_mock_cluster_t *mcluster, - int16_t ApiKey); +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey); /** @@ -152,8 +152,10 @@ void rd_kafka_mock_clear_request_errors (rd_kafka_mock_cluster_t *mcluster, * requests. */ RD_EXPORT -void rd_kafka_mock_push_request_errors (rd_kafka_mock_cluster_t *mcluster, - int16_t ApiKey, size_t cnt, ...); +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + size_t cnt, + ...); /** @@ -161,10 +163,10 @@ void rd_kafka_mock_push_request_errors (rd_kafka_mock_cluster_t *mcluster, * an array of errors. */ RD_EXPORT void -rd_kafka_mock_push_request_errors_array (rd_kafka_mock_cluster_t *mcluster, - int16_t ApiKey, - size_t cnt, - const rd_kafka_resp_err_t *errors); +rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + size_t cnt, + const rd_kafka_resp_err_t *errors); /** @@ -184,9 +186,11 @@ rd_kafka_mock_push_request_errors_array (rd_kafka_mock_cluster_t *mcluster, * @remark The broker errors take precedence over the cluster errors. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_mock_broker_push_request_error_rtts (rd_kafka_mock_cluster_t *mcluster, - int32_t broker_id, - int16_t ApiKey, size_t cnt, ...); +rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + int16_t ApiKey, + size_t cnt, + ...); /** @@ -195,9 +199,9 @@ rd_kafka_mock_broker_push_request_error_rtts (rd_kafka_mock_cluster_t *mcluster, * Currently only used for TopicMetadataRequest and AddPartitionsToTxnRequest. */ RD_EXPORT -void rd_kafka_mock_topic_set_error (rd_kafka_mock_cluster_t *mcluster, - const char *topic, - rd_kafka_resp_err_t err); +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + rd_kafka_resp_err_t err); /** @@ -210,9 +214,10 @@ void rd_kafka_mock_topic_set_error (rd_kafka_mock_cluster_t *mcluster, * mock broker. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_mock_topic_create (rd_kafka_mock_cluster_t *mcluster, - const char *topic, int partition_cnt, - int replication_factor); +rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition_cnt, + int replication_factor); /** @@ -224,9 +229,10 @@ rd_kafka_mock_topic_create (rd_kafka_mock_cluster_t *mcluster, * partition leader-less. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_mock_partition_set_leader (rd_kafka_mock_cluster_t *mcluster, - const char *topic, int32_t partition, - int32_t broker_id); +rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition, + int32_t broker_id); /** * @brief Sets the partition's preferred replica / follower. @@ -236,9 +242,10 @@ rd_kafka_mock_partition_set_leader (rd_kafka_mock_cluster_t *mcluster, * \p broker_id does not need to point to an existing broker. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_mock_partition_set_follower (rd_kafka_mock_cluster_t *mcluster, - const char *topic, int32_t partition, - int32_t broker_id); +rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition, + int32_t broker_id); /** * @brief Sets the partition's preferred replica / follower low and high @@ -250,10 +257,11 @@ rd_kafka_mock_partition_set_follower (rd_kafka_mock_cluster_t *mcluster, * watermark. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_mock_partition_set_follower_wmarks (rd_kafka_mock_cluster_t *mcluster, - const char *topic, - int32_t partition, - int64_t lo, int64_t hi); +rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition, + int64_t lo, + int64_t hi); /** @@ -261,31 +269,33 @@ rd_kafka_mock_partition_set_follower_wmarks (rd_kafka_mock_cluster_t *mcluster, * This does NOT trigger leader change. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_mock_broker_set_down (rd_kafka_mock_cluster_t *mcluster, - int32_t broker_id); +rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id); /** * @brief Makes the broker accept connections again. * This does NOT trigger leader change. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_mock_broker_set_up (rd_kafka_mock_cluster_t *mcluster, - int32_t broker_id); +rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id); /** * @brief Set broker round-trip-time delay in milliseconds. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_mock_broker_set_rtt (rd_kafka_mock_cluster_t *mcluster, - int32_t broker_id, int rtt_ms); +rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + int rtt_ms); /** * @brief Sets the broker's rack as reported in Metadata to the client. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_mock_broker_set_rack (rd_kafka_mock_cluster_t *mcluster, - int32_t broker_id, const char *rack); +rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + const char *rack); @@ -298,9 +308,10 @@ rd_kafka_mock_broker_set_rack (rd_kafka_mock_cluster_t *mcluster, * @param broker_id The new coordinator, does not have to be a valid broker. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_mock_coordinator_set (rd_kafka_mock_cluster_t *mcluster, - const char *key_type, const char *key, - int32_t broker_id); +rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, + const char *key_type, + const char *key, + int32_t broker_id); @@ -318,9 +329,10 @@ rd_kafka_mock_coordinator_set (rd_kafka_mock_cluster_t *mcluster, * @param MinVersion Maximum version supported (or -1 to disable). */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_mock_set_apiversion (rd_kafka_mock_cluster_t *mcluster, - int16_t ApiKey, - int16_t MinVersion, int16_t MaxVersion); +rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + int16_t MinVersion, + int16_t MaxVersion); /**@}*/ diff --git a/src/rdkafka_mock_cgrp.c b/src/rdkafka_mock_cgrp.c index c734b1a686..8f71fb48c9 100644 --- a/src/rdkafka_mock_cgrp.c +++ b/src/rdkafka_mock_cgrp.c @@ -37,23 +37,18 @@ static const char *rd_kafka_mock_cgrp_state_names[] = { - "Empty", - "Joining", - "Syncing", - "Rebalancing", - "Up" -}; + "Empty", "Joining", "Syncing", "Rebalancing", "Up"}; -static void rd_kafka_mock_cgrp_rebalance (rd_kafka_mock_cgrp_t *mcgrp, - const char *reason); +static void rd_kafka_mock_cgrp_rebalance(rd_kafka_mock_cgrp_t *mcgrp, + const char *reason); static void -rd_kafka_mock_cgrp_member_destroy (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_cgrp_member_t *member); +rd_kafka_mock_cgrp_member_destroy(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member); -static void rd_kafka_mock_cgrp_set_state (rd_kafka_mock_cgrp_t *mcgrp, - unsigned int new_state, - const char *reason) { +static void rd_kafka_mock_cgrp_set_state(rd_kafka_mock_cgrp_t *mcgrp, + unsigned int new_state, + const char *reason) { if (mcgrp->state == new_state) return; @@ -71,8 +66,8 @@ static void rd_kafka_mock_cgrp_set_state (rd_kafka_mock_cgrp_t *mcgrp, /** * @brief Mark member as active (restart session timer) */ -void rd_kafka_mock_cgrp_member_active (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_cgrp_member_t *member) { +void rd_kafka_mock_cgrp_member_active(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member) { rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK", "Marking mock consumer group member %s as active", member->id); @@ -86,15 +81,14 @@ void rd_kafka_mock_cgrp_member_active (rd_kafka_mock_cgrp_t *mcgrp, * @param member may be NULL. */ rd_kafka_resp_err_t -rd_kafka_mock_cgrp_check_state (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_cgrp_member_t *member, - const rd_kafka_buf_t *request, - int32_t generation_id) { - int16_t ApiKey = request->rkbuf_reqhdr.ApiKey; - rd_bool_t has_generation_id = - ApiKey == RD_KAFKAP_SyncGroup || - ApiKey == RD_KAFKAP_Heartbeat || - ApiKey == RD_KAFKAP_OffsetCommit; +rd_kafka_mock_cgrp_check_state(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member, + const rd_kafka_buf_t *request, + int32_t generation_id) { + int16_t ApiKey = request->rkbuf_reqhdr.ApiKey; + rd_bool_t has_generation_id = ApiKey == RD_KAFKAP_SyncGroup || + ApiKey == RD_KAFKAP_Heartbeat || + ApiKey == RD_KAFKAP_OffsetCommit; if (has_generation_id && generation_id != mcgrp->generation_id) return RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION; @@ -102,8 +96,7 @@ rd_kafka_mock_cgrp_check_state (rd_kafka_mock_cgrp_t *mcgrp, if (ApiKey == RD_KAFKAP_OffsetCommit && !member) return RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID; - switch (mcgrp->state) - { + switch (mcgrp->state) { case RD_KAFKA_MOCK_CGRP_STATE_EMPTY: if (ApiKey == RD_KAFKAP_JoinGroup) return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -148,10 +141,10 @@ rd_kafka_mock_cgrp_check_state (rd_kafka_mock_cgrp_t *mcgrp, /** * @brief Set a member's assignment (from leader's SyncGroupRequest) */ -void -rd_kafka_mock_cgrp_member_assignment_set (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_cgrp_member_t *member, - const rd_kafkap_bytes_t *Metadata) { +void rd_kafka_mock_cgrp_member_assignment_set( + rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member, + const rd_kafkap_bytes_t *Metadata) { if (member->assignment) { rd_assert(mcgrp->assignment_cnt > 0); mcgrp->assignment_cnt--; @@ -169,8 +162,8 @@ rd_kafka_mock_cgrp_member_assignment_set (rd_kafka_mock_cgrp_t *mcgrp, /** * @brief Sync done (successfully) or failed, send responses back to members. */ -static void rd_kafka_mock_cgrp_sync_done (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_resp_err_t err) { +static void rd_kafka_mock_cgrp_sync_done(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_resp_err_t err) { rd_kafka_mock_cgrp_member_t *member; TAILQ_FOREACH(member, &mcgrp->members, link) { @@ -183,9 +176,8 @@ static void rd_kafka_mock_cgrp_sync_done (rd_kafka_mock_cgrp_t *mcgrp, rd_kafka_buf_write_i16(resp, err); /* ErrorCode */ /* MemberState */ - rd_kafka_buf_write_kbytes(resp, - !err ? - member->assignment : NULL); + rd_kafka_buf_write_kbytes( + resp, !err ? member->assignment : NULL); } rd_kafka_mock_cgrp_member_assignment_set(mcgrp, member, NULL); @@ -195,7 +187,7 @@ static void rd_kafka_mock_cgrp_sync_done (rd_kafka_mock_cgrp_t *mcgrp, rd_false); if (resp) rd_kafka_mock_connection_send_response( - member->conn, resp); + member->conn, resp); } else if (resp) { /* Member has disconnected. */ rd_kafka_buf_destroy(resp); @@ -208,7 +200,7 @@ static void rd_kafka_mock_cgrp_sync_done (rd_kafka_mock_cgrp_t *mcgrp, * @brief Check if all members have sent SyncGroupRequests, if so, propagate * assignment to members. */ -static void rd_kafka_mock_cgrp_sync_check (rd_kafka_mock_cgrp_t *mcgrp) { +static void rd_kafka_mock_cgrp_sync_check(rd_kafka_mock_cgrp_t *mcgrp) { rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK", "Mock consumer group %s: awaiting %d/%d syncing members " @@ -231,10 +223,10 @@ static void rd_kafka_mock_cgrp_sync_check (rd_kafka_mock_cgrp_t *mcgrp) { * received. */ rd_kafka_resp_err_t -rd_kafka_mock_cgrp_member_sync_set (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_cgrp_member_t *member, - rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *resp) { +rd_kafka_mock_cgrp_member_sync_set(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member, + rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp) { if (mcgrp->state != RD_KAFKA_MOCK_CGRP_STATE_SYNCING) return RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS; /* FIXME */ @@ -258,8 +250,8 @@ rd_kafka_mock_cgrp_member_sync_set (rd_kafka_mock_cgrp_t *mcgrp, * @brief Member is explicitly leaving the group (through LeaveGroupRequest) */ rd_kafka_resp_err_t -rd_kafka_mock_cgrp_member_leave (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_cgrp_member_t *member) { +rd_kafka_mock_cgrp_member_leave(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member) { rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK", "Member %s is leaving group %s", member->id, mcgrp->id); @@ -274,11 +266,11 @@ rd_kafka_mock_cgrp_member_leave (rd_kafka_mock_cgrp_t *mcgrp, /** * @brief Destroys/frees an array of protocols, including the array itself. */ -void rd_kafka_mock_cgrp_protos_destroy (rd_kafka_mock_cgrp_proto_t *protos, - int proto_cnt) { +void rd_kafka_mock_cgrp_protos_destroy(rd_kafka_mock_cgrp_proto_t *protos, + int proto_cnt) { int i; - for (i = 0 ; i < proto_cnt ; i++) { + for (i = 0; i < proto_cnt; i++) { rd_free(protos[i].name); if (protos[i].metadata) rd_free(protos[i].metadata); @@ -288,13 +280,13 @@ void rd_kafka_mock_cgrp_protos_destroy (rd_kafka_mock_cgrp_proto_t *protos, } static void -rd_kafka_mock_cgrp_rebalance_timer_restart (rd_kafka_mock_cgrp_t *mcgrp, - int timeout_ms); +rd_kafka_mock_cgrp_rebalance_timer_restart(rd_kafka_mock_cgrp_t *mcgrp, + int timeout_ms); /** * @brief Elect consumer group leader and send JoinGroup responses */ -static void rd_kafka_mock_cgrp_elect_leader (rd_kafka_mock_cgrp_t *mcgrp) { +static void rd_kafka_mock_cgrp_elect_leader(rd_kafka_mock_cgrp_t *mcgrp) { rd_kafka_mock_cgrp_member_t *member; rd_assert(mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_JOINING); @@ -321,7 +313,7 @@ static void rd_kafka_mock_cgrp_elect_leader (rd_kafka_mock_cgrp_t *mcgrp) { /* Send JoinGroupResponses to all members */ TAILQ_FOREACH(member, &mcgrp->members, link) { rd_bool_t is_leader = member == mcgrp->leader; - int member_cnt = is_leader ? mcgrp->member_cnt : 0; + int member_cnt = is_leader ? mcgrp->member_cnt : 0; rd_kafka_buf_t *resp; rd_kafka_mock_cgrp_member_t *member2; rd_kafka_mock_connection_t *mconn; @@ -330,9 +322,9 @@ static void rd_kafka_mock_cgrp_elect_leader (rd_kafka_mock_cgrp_t *mcgrp) { * reconnect or time out from the group. */ if (!member->conn || !member->resp) continue; - mconn = member->conn; + mconn = member->conn; member->conn = NULL; - resp = member->resp; + resp = member->resp; member->resp = NULL; rd_assert(resp->rkbuf_reqhdr.ApiKey == RD_KAFKAP_JoinGroup); @@ -350,15 +342,15 @@ static void rd_kafka_mock_cgrp_elect_leader (rd_kafka_mock_cgrp_t *mcgrp) { rd_kafka_buf_write_str(resp, member2->id, -1); if (resp->rkbuf_reqhdr.ApiVersion >= 5) rd_kafka_buf_write_str( - resp, - member2->group_instance_id, -1); + resp, member2->group_instance_id, + -1); /* FIXME: look up correct protocol name */ rd_assert(!rd_kafkap_str_cmp_str( - member2->protos[0].name, - mcgrp->protocol_name)); + member2->protos[0].name, + mcgrp->protocol_name)); rd_kafka_buf_write_kbytes( - resp, member2->protos[0].metadata); + resp, member2->protos[0].metadata); } } @@ -385,8 +377,8 @@ static void rd_kafka_mock_cgrp_elect_leader (rd_kafka_mock_cgrp_t *mcgrp) { /** * @brief Trigger group rebalance. */ -static void rd_kafka_mock_cgrp_rebalance (rd_kafka_mock_cgrp_t *mcgrp, - const char *reason) { +static void rd_kafka_mock_cgrp_rebalance(rd_kafka_mock_cgrp_t *mcgrp, + const char *reason) { int timeout_ms; if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_JOINING) @@ -402,14 +394,14 @@ static void rd_kafka_mock_cgrp_rebalance (rd_kafka_mock_cgrp_t *mcgrp, else /* Let the rebalance delay be a bit shorter than the * session timeout so that we don't time out waiting members * who are also subject to the session timeout. */ - timeout_ms = mcgrp->session_timeout_ms > 1000 ? - mcgrp->session_timeout_ms - 1000 : - mcgrp->session_timeout_ms; + timeout_ms = mcgrp->session_timeout_ms > 1000 + ? mcgrp->session_timeout_ms - 1000 + : mcgrp->session_timeout_ms; if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_SYNCING) /* Abort current Syncing state */ rd_kafka_mock_cgrp_sync_done( - mcgrp, RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS); + mcgrp, RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS); rd_kafka_mock_cgrp_set_state(mcgrp, RD_KAFKA_MOCK_CGRP_STATE_JOINING, reason); @@ -419,13 +411,12 @@ static void rd_kafka_mock_cgrp_rebalance (rd_kafka_mock_cgrp_t *mcgrp, /** * @brief Consumer group state machine triggered by timer events. */ -static void rd_kafka_mock_cgrp_fsm_timeout (rd_kafka_mock_cgrp_t *mcgrp) { +static void rd_kafka_mock_cgrp_fsm_timeout(rd_kafka_mock_cgrp_t *mcgrp) { rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK", "Mock consumer group %s FSM timeout in state %s", mcgrp->id, rd_kafka_mock_cgrp_state_names[mcgrp->state]); - switch (mcgrp->state) - { + switch (mcgrp->state) { case RD_KAFKA_MOCK_CGRP_STATE_EMPTY: /* No members, do nothing */ break; @@ -435,8 +426,8 @@ static void rd_kafka_mock_cgrp_fsm_timeout (rd_kafka_mock_cgrp_t *mcgrp) { rd_kafka_mock_cgrp_elect_leader(mcgrp); else rd_kafka_mock_cgrp_set_state( - mcgrp, RD_KAFKA_MOCK_CGRP_STATE_EMPTY, - "no members joined"); + mcgrp, RD_KAFKA_MOCK_CGRP_STATE_EMPTY, + "no members joined"); break; case RD_KAFKA_MOCK_CGRP_STATE_SYNCING: @@ -444,12 +435,11 @@ static void rd_kafka_mock_cgrp_fsm_timeout (rd_kafka_mock_cgrp_t *mcgrp) { /* Send error response to all waiting members */ rd_kafka_mock_cgrp_sync_done( - mcgrp, - RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS /* FIXME */); + mcgrp, RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS /* FIXME */); rd_kafka_mock_cgrp_set_state( - mcgrp, RD_KAFKA_MOCK_CGRP_STATE_REBALANCING, - "timed out waiting for all members to synchronize"); + mcgrp, RD_KAFKA_MOCK_CGRP_STATE_REBALANCING, + "timed out waiting for all members to synchronize"); break; case RD_KAFKA_MOCK_CGRP_STATE_REBALANCING: @@ -467,8 +457,8 @@ static void rd_kafka_mock_cgrp_fsm_timeout (rd_kafka_mock_cgrp_t *mcgrp) { } } -static void rd_kafka_mcgrp_rebalance_timer_cb (rd_kafka_timers_t *rkts, - void *arg) { +static void rd_kafka_mcgrp_rebalance_timer_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_mock_cgrp_t *mcgrp = arg; rd_kafka_mock_cgrp_fsm_timeout(mcgrp); @@ -479,21 +469,17 @@ static void rd_kafka_mcgrp_rebalance_timer_cb (rd_kafka_timers_t *rkts, * @brief Restart the rebalance timer, postponing leader election. */ static void -rd_kafka_mock_cgrp_rebalance_timer_restart (rd_kafka_mock_cgrp_t *mcgrp, - int timeout_ms) { - rd_kafka_timer_start_oneshot(&mcgrp->cluster->timers, - &mcgrp->rebalance_tmr, - rd_true, - timeout_ms * 1000, - rd_kafka_mcgrp_rebalance_timer_cb, - mcgrp); - +rd_kafka_mock_cgrp_rebalance_timer_restart(rd_kafka_mock_cgrp_t *mcgrp, + int timeout_ms) { + rd_kafka_timer_start_oneshot( + &mcgrp->cluster->timers, &mcgrp->rebalance_tmr, rd_true, + timeout_ms * 1000, rd_kafka_mcgrp_rebalance_timer_cb, mcgrp); } static void -rd_kafka_mock_cgrp_member_destroy (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_cgrp_member_t *member) { +rd_kafka_mock_cgrp_member_destroy(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member) { rd_assert(mcgrp->member_cnt > 0); TAILQ_REMOVE(&mcgrp->members, member, link); mcgrp->member_cnt--; @@ -518,8 +504,8 @@ rd_kafka_mock_cgrp_member_destroy (rd_kafka_mock_cgrp_t *mcgrp, * @brief Find member in group. */ rd_kafka_mock_cgrp_member_t * -rd_kafka_mock_cgrp_member_find (const rd_kafka_mock_cgrp_t *mcgrp, - const rd_kafkap_str_t *MemberId) { +rd_kafka_mock_cgrp_member_find(const rd_kafka_mock_cgrp_t *mcgrp, + const rd_kafkap_str_t *MemberId) { const rd_kafka_mock_cgrp_member_t *member; TAILQ_FOREACH(member, &mcgrp->members, link) { if (!rd_kafkap_str_cmp_str(MemberId, member->id)) @@ -534,14 +520,14 @@ rd_kafka_mock_cgrp_member_find (const rd_kafka_mock_cgrp_t *mcgrp, * @brief Update or add member to consumer group */ rd_kafka_resp_err_t -rd_kafka_mock_cgrp_member_add (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *resp, - const rd_kafkap_str_t *MemberId, - const rd_kafkap_str_t *ProtocolType, - rd_kafka_mock_cgrp_proto_t *protos, - int proto_cnt, - int session_timeout_ms) { +rd_kafka_mock_cgrp_member_add(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp, + const rd_kafkap_str_t *MemberId, + const rd_kafkap_str_t *ProtocolType, + rd_kafka_mock_cgrp_proto_t *protos, + int proto_cnt, + int session_timeout_ms) { rd_kafka_mock_cgrp_member_t *member; rd_kafka_resp_err_t err; @@ -575,7 +561,7 @@ rd_kafka_mock_cgrp_member_add (rd_kafka_mock_cgrp_t *mcgrp, if (member->protos) rd_kafka_mock_cgrp_protos_destroy(member->protos, member->proto_cnt); - member->protos = protos; + member->protos = protos; member->proto_cnt = proto_cnt; rd_assert(!member->resp); @@ -589,16 +575,17 @@ rd_kafka_mock_cgrp_member_add (rd_kafka_mock_cgrp_t *mcgrp, /** * @brief Check if any members have exceeded the session timeout. */ -static void -rd_kafka_mock_cgrp_session_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { +static void rd_kafka_mock_cgrp_session_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_mock_cgrp_t *mcgrp = arg; rd_kafka_mock_cgrp_member_t *member, *tmp; - rd_ts_t now = rd_clock(); + rd_ts_t now = rd_clock(); int timeout_cnt = 0; TAILQ_FOREACH_SAFE(member, &mcgrp->members, link, tmp) { if (member->ts_last_activity + - (mcgrp->session_timeout_ms * 1000) > now) + (mcgrp->session_timeout_ms * 1000) > + now) continue; rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK", @@ -614,15 +601,15 @@ rd_kafka_mock_cgrp_session_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { } -void rd_kafka_mock_cgrp_destroy (rd_kafka_mock_cgrp_t *mcgrp) { +void rd_kafka_mock_cgrp_destroy(rd_kafka_mock_cgrp_t *mcgrp) { rd_kafka_mock_cgrp_member_t *member; TAILQ_REMOVE(&mcgrp->cluster->cgrps, mcgrp, link); - rd_kafka_timer_stop(&mcgrp->cluster->timers, - &mcgrp->rebalance_tmr, rd_true); - rd_kafka_timer_stop(&mcgrp->cluster->timers, - &mcgrp->session_tmr, rd_true); + rd_kafka_timer_stop(&mcgrp->cluster->timers, &mcgrp->rebalance_tmr, + rd_true); + rd_kafka_timer_stop(&mcgrp->cluster->timers, &mcgrp->session_tmr, + rd_true); rd_free(mcgrp->id); rd_free(mcgrp->protocol_type); if (mcgrp->protocol_name) @@ -633,9 +620,8 @@ void rd_kafka_mock_cgrp_destroy (rd_kafka_mock_cgrp_t *mcgrp) { } -rd_kafka_mock_cgrp_t * -rd_kafka_mock_cgrp_find (rd_kafka_mock_cluster_t *mcluster, - const rd_kafkap_str_t *GroupId) { +rd_kafka_mock_cgrp_t *rd_kafka_mock_cgrp_find(rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *GroupId) { rd_kafka_mock_cgrp_t *mcgrp; TAILQ_FOREACH(mcgrp, &mcluster->cgrps, link) { if (!rd_kafkap_str_cmp_str(GroupId, mcgrp->id)) @@ -650,9 +636,9 @@ rd_kafka_mock_cgrp_find (rd_kafka_mock_cluster_t *mcluster, * @brief Find or create a consumer group */ rd_kafka_mock_cgrp_t * -rd_kafka_mock_cgrp_get (rd_kafka_mock_cluster_t *mcluster, - const rd_kafkap_str_t *GroupId, - const rd_kafkap_str_t *ProtocolType) { +rd_kafka_mock_cgrp_get(rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *GroupId, + const rd_kafkap_str_t *ProtocolType) { rd_kafka_mock_cgrp_t *mcgrp; mcgrp = rd_kafka_mock_cgrp_find(mcluster, GroupId); @@ -663,14 +649,13 @@ rd_kafka_mock_cgrp_get (rd_kafka_mock_cluster_t *mcluster, mcgrp = rd_calloc(1, sizeof(*mcgrp)); - mcgrp->cluster = mcluster; - mcgrp->id = RD_KAFKAP_STR_DUP(GroupId); + mcgrp->cluster = mcluster; + mcgrp->id = RD_KAFKAP_STR_DUP(GroupId); mcgrp->protocol_type = RD_KAFKAP_STR_DUP(ProtocolType); mcgrp->generation_id = 1; TAILQ_INIT(&mcgrp->members); - rd_kafka_timer_start(&mcluster->timers, - &mcgrp->session_tmr, - 1000*1000 /*1s*/, + rd_kafka_timer_start(&mcluster->timers, &mcgrp->session_tmr, + 1000 * 1000 /*1s*/, rd_kafka_mock_cgrp_session_tmr_cb, mcgrp); TAILQ_INSERT_TAIL(&mcluster->cgrps, mcgrp, link); @@ -683,8 +668,8 @@ rd_kafka_mock_cgrp_get (rd_kafka_mock_cluster_t *mcluster, * @brief A client connection closed, check if any cgrp has any state * for this connection that needs to be cleared. */ -void rd_kafka_mock_cgrps_connection_closed (rd_kafka_mock_cluster_t *mcluster, - rd_kafka_mock_connection_t *mconn) { +void rd_kafka_mock_cgrps_connection_closed(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_mock_connection_t *mconn) { rd_kafka_mock_cgrp_t *mcgrp; TAILQ_FOREACH(mcgrp, &mcluster->cgrps, link) { diff --git a/src/rdkafka_mock_handlers.c b/src/rdkafka_mock_handlers.c index 54e963ba7b..eb6e46f1c6 100644 --- a/src/rdkafka_mock_handlers.c +++ b/src/rdkafka_mock_handlers.c @@ -41,12 +41,11 @@ - /** * @brief Handle ProduceRequest */ -static int rd_kafka_mock_handle_Produce (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_mock_handle_Produce(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { const rd_bool_t log_decode_errors = rd_true; rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); @@ -89,7 +88,7 @@ static int rd_kafka_mock_handle_Produce (rd_kafka_mock_connection_t *mconn, rd_kafka_mock_partition_t *mpart = NULL; rd_kafkap_bytes_t records; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - int64_t BaseOffset = -1; + int64_t BaseOffset = -1; rd_kafka_buf_read_i32(rkbuf, &Partition); @@ -107,12 +106,13 @@ static int rd_kafka_mock_handle_Produce (rd_kafka_mock_connection_t *mconn, else if (!mpart) err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; else if (mpart->leader != mconn->broker) - err = RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION; + err = + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION; /* Append to partition log */ if (!err) err = rd_kafka_mock_partition_log_append( - mpart, &records, &BaseOffset); + mpart, &records, &BaseOffset); /* Response: ErrorCode */ rd_kafka_buf_write_i16(resp, err); @@ -141,7 +141,7 @@ static int rd_kafka_mock_handle_Produce (rd_kafka_mock_connection_t *mconn, if (rkbuf->rkbuf_reqhdr.ApiVersion >= 6) { /* Response: LogStartOffset */ rd_kafka_buf_write_i64( - resp, mpart->start_offset); + resp, mpart->start_offset); } } } @@ -156,7 +156,7 @@ static int rd_kafka_mock_handle_Produce (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } @@ -166,14 +166,14 @@ static int rd_kafka_mock_handle_Produce (rd_kafka_mock_connection_t *mconn, /** * @brief Handle FetchRequest */ -static int rd_kafka_mock_handle_Fetch (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_mock_handle_Fetch(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { const rd_bool_t log_decode_errors = rd_true; rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); rd_kafka_resp_err_t all_err; int32_t ReplicaId, MaxWait, MinBytes, MaxBytes = -1, SessionId = -1, - Epoch, TopicsCnt; + Epoch, TopicsCnt; int8_t IsolationLevel; size_t totsize = 0; @@ -230,9 +230,9 @@ static int rd_kafka_mock_handle_Fetch (rd_kafka_mock_connection_t *mconn, int32_t Partition, CurrentLeaderEpoch, PartMaxBytes; int64_t FetchOffset, LogStartOffset; rd_kafka_mock_partition_t *mpart = NULL; - rd_kafka_resp_err_t err = all_err; + rd_kafka_resp_err_t err = all_err; rd_bool_t on_follower; - size_t partsize = 0; + size_t partsize = 0; const rd_kafka_mock_msgset_t *mset = NULL; rd_kafka_buf_read_i32(rkbuf, &Partition); @@ -257,27 +257,28 @@ static int rd_kafka_mock_handle_Fetch (rd_kafka_mock_connection_t *mconn, /* Fetch is directed at follower and this is * the follower broker. */ - on_follower = mpart && - mpart->follower_id == mconn->broker->id; + on_follower = + mpart && mpart->follower_id == mconn->broker->id; if (!all_err && !mpart) err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; - else if (!all_err && - mpart->leader != mconn->broker && + else if (!all_err && mpart->leader != mconn->broker && !on_follower) - err = RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION; + err = + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION; /* Find MessageSet for FetchOffset */ if (!err && FetchOffset != mpart->end_offset) { if (on_follower && FetchOffset <= mpart->end_offset && FetchOffset > mpart->follower_end_offset) - err = RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE; + err = + RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE; else if (!(mset = rd_kafka_mock_msgset_find( - mpart, - FetchOffset, - on_follower))) - err = RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE; + mpart, FetchOffset, + on_follower))) + err = + RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE; } @@ -285,27 +286,26 @@ static int rd_kafka_mock_handle_Fetch (rd_kafka_mock_connection_t *mconn, rd_kafka_buf_write_i16(resp, err); /* Response: Highwatermark */ - rd_kafka_buf_write_i64(resp, - mpart ? - (on_follower ? - mpart->follower_end_offset : - mpart->end_offset) : -1); + rd_kafka_buf_write_i64( + resp, + mpart ? (on_follower ? mpart->follower_end_offset + : mpart->end_offset) + : -1); if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) { /* Response: LastStableOffset */ - rd_kafka_buf_write_i64(resp, - mpart ? - mpart->end_offset : -1); + rd_kafka_buf_write_i64( + resp, mpart ? mpart->end_offset : -1); } if (rkbuf->rkbuf_reqhdr.ApiVersion >= 5) { /* Response: LogStartOffset */ rd_kafka_buf_write_i64( - resp, - !mpart ? -1 : - (on_follower ? - mpart->follower_start_offset : - mpart->start_offset)); + resp, + !mpart ? -1 + : (on_follower + ? mpart->follower_start_offset + : mpart->start_offset)); } if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) { @@ -316,26 +316,25 @@ static int rd_kafka_mock_handle_Fetch (rd_kafka_mock_connection_t *mconn, if (rkbuf->rkbuf_reqhdr.ApiVersion >= 11) { int32_t PreferredReadReplica = - mpart && - mpart->leader == mconn->broker && - mpart->follower_id != -1 ? - mpart->follower_id : -1; + mpart && mpart->leader == mconn->broker && + mpart->follower_id != -1 + ? mpart->follower_id + : -1; /* Response: PreferredReplica */ - rd_kafka_buf_write_i32( - resp, PreferredReadReplica); + rd_kafka_buf_write_i32(resp, + PreferredReadReplica); if (PreferredReadReplica != -1) { /* Don't return any data when * PreferredReadReplica is set */ - mset = NULL; + mset = NULL; MaxWait = 0; } } - if (mset && - partsize < (size_t)PartMaxBytes && + if (mset && partsize < (size_t)PartMaxBytes && totsize < (size_t)MaxBytes) { /* Response: Records */ rd_kafka_buf_write_kbytes(resp, &mset->bytes); @@ -385,19 +384,18 @@ static int rd_kafka_mock_handle_Fetch (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } - /** * @brief Handle ListOffsets */ -static int rd_kafka_mock_handle_ListOffsets (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_mock_handle_ListOffsets(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { const rd_bool_t log_decode_errors = rd_true; rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); @@ -442,7 +440,7 @@ static int rd_kafka_mock_handle_ListOffsets (rd_kafka_mock_connection_t *mconn, int32_t Partition, CurrentLeaderEpoch; int64_t Timestamp, MaxNumOffsets, Offset = -1; rd_kafka_mock_partition_t *mpart = NULL; - rd_kafka_resp_err_t err = all_err; + rd_kafka_resp_err_t err = all_err; rd_kafka_buf_read_i32(rkbuf, &Partition); @@ -464,9 +462,9 @@ static int rd_kafka_mock_handle_ListOffsets (rd_kafka_mock_connection_t *mconn, if (!all_err && !mpart) err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; - else if (!all_err && - mpart->leader != mconn->broker) - err = RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION; + else if (!all_err && mpart->leader != mconn->broker) + err = + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION; /* Response: ErrorCode */ @@ -504,14 +502,12 @@ static int rd_kafka_mock_handle_ListOffsets (rd_kafka_mock_connection_t *mconn, } rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", - "Topic %.*s [%"PRId32"] returning " - "offset %"PRId64" for %s: %s", - RD_KAFKAP_STR_PR(&Topic), - Partition, - Offset, - rd_kafka_offset2str(Timestamp), + "Topic %.*s [%" PRId32 + "] returning " + "offset %" PRId64 " for %s: %s", + RD_KAFKAP_STR_PR(&Topic), Partition, + Offset, rd_kafka_offset2str(Timestamp), rd_kafka_err2str(err)); - } } @@ -520,7 +516,7 @@ static int rd_kafka_mock_handle_ListOffsets (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } @@ -529,8 +525,8 @@ static int rd_kafka_mock_handle_ListOffsets (rd_kafka_mock_connection_t *mconn, /** * @brief Handle OffsetFetch (fetch committed offsets) */ -static int rd_kafka_mock_handle_OffsetFetch (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_mock_handle_OffsetFetch(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { const rd_bool_t log_decode_errors = rd_true; rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); @@ -577,9 +573,9 @@ static int rd_kafka_mock_handle_OffsetFetch (rd_kafka_mock_connection_t *mconn, while (PartitionCnt-- > 0) { int32_t Partition; - rd_kafka_mock_partition_t *mpart = NULL; + rd_kafka_mock_partition_t *mpart = NULL; const rd_kafka_mock_committed_offset_t *coff = NULL; - rd_kafka_resp_err_t err = all_err; + rd_kafka_resp_err_t err = all_err; rd_kafka_buf_read_i32(rkbuf, &Partition); @@ -595,7 +591,7 @@ static int rd_kafka_mock_handle_OffsetFetch (rd_kafka_mock_connection_t *mconn, if (!err) coff = rd_kafka_mock_committed_offset_find( - mpart, &GroupId); + mpart, &GroupId); /* Response: CommittedOffset */ rd_kafka_buf_write_i64(resp, coff ? coff->offset : -1); @@ -614,14 +610,16 @@ static int rd_kafka_mock_handle_OffsetFetch (rd_kafka_mock_connection_t *mconn, if (coff) rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", - "Topic %s [%"PRId32"] returning " - "committed offset %"PRId64 + "Topic %s [%" PRId32 + "] returning " + "committed offset %" PRId64 " for group %s", mtopic->name, mpart->id, coff->offset, coff->group); else rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", - "Topic %.*s [%"PRId32"] has no " + "Topic %.*s [%" PRId32 + "] has no " "committed offset for group %.*s: " "%s", RD_KAFKAP_STR_PR(&Topic), @@ -641,7 +639,7 @@ static int rd_kafka_mock_handle_OffsetFetch (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } @@ -651,8 +649,8 @@ static int rd_kafka_mock_handle_OffsetFetch (rd_kafka_mock_connection_t *mconn, /** * @brief Handle OffsetCommit */ -static int rd_kafka_mock_handle_OffsetCommit (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_mock_handle_OffsetCommit(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { const rd_bool_t log_decode_errors = rd_true; rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); @@ -701,13 +699,13 @@ static int rd_kafka_mock_handle_OffsetCommit (rd_kafka_mock_connection_t *mconn, if (!RD_KAFKAP_STR_IS_NULL(&MemberId)) member = rd_kafka_mock_cgrp_member_find( - mcgrp, &MemberId); + mcgrp, &MemberId); if (!member) all_err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID; else all_err = rd_kafka_mock_cgrp_check_state( - mcgrp, member, rkbuf, GenerationId); + mcgrp, member, rkbuf, GenerationId); } /* FIXME: also check that partitions are assigned to member */ @@ -736,7 +734,7 @@ static int rd_kafka_mock_handle_OffsetCommit (rd_kafka_mock_connection_t *mconn, while (PartitionCnt-- > 0) { int32_t Partition; rd_kafka_mock_partition_t *mpart = NULL; - rd_kafka_resp_err_t err = all_err; + rd_kafka_resp_err_t err = all_err; int64_t CommittedOffset; rd_kafkap_str_t Metadata; @@ -768,9 +766,9 @@ static int rd_kafka_mock_handle_OffsetCommit (rd_kafka_mock_connection_t *mconn, rd_kafka_buf_read_str(rkbuf, &Metadata); if (!err) - rd_kafka_mock_commit_offset( - mpart, &GroupId, CommittedOffset, - &Metadata); + rd_kafka_mock_commit_offset(mpart, &GroupId, + CommittedOffset, + &Metadata); /* Response: ErrorCode */ rd_kafka_buf_write_i16(resp, err); @@ -781,19 +779,18 @@ static int rd_kafka_mock_handle_OffsetCommit (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } - /** * @brief Handle ApiVersionRequest */ -static int rd_kafka_mock_handle_ApiVersion (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf); +static int rd_kafka_mock_handle_ApiVersion(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf); /** @@ -802,11 +799,11 @@ static int rd_kafka_mock_handle_ApiVersion (rd_kafka_mock_connection_t *mconn, * @param mtopic may be NULL */ static void -rd_kafka_mock_buf_write_Metadata_Topic (rd_kafka_buf_t *resp, - int16_t ApiVersion, - const char *topic, - const rd_kafka_mock_topic_t *mtopic, - rd_kafka_resp_err_t err) { +rd_kafka_mock_buf_write_Metadata_Topic(rd_kafka_buf_t *resp, + int16_t ApiVersion, + const char *topic, + const rd_kafka_mock_topic_t *mtopic, + rd_kafka_resp_err_t err) { int i; /* Response: Topics.ErrorCode */ @@ -820,9 +817,8 @@ rd_kafka_mock_buf_write_Metadata_Topic (rd_kafka_buf_t *resp, /* Response: Topics.#Partitions */ rd_kafka_buf_write_i32(resp, mtopic ? mtopic->partition_cnt : 0); - for (i = 0 ; mtopic && i < mtopic->partition_cnt ; i++) { - const rd_kafka_mock_partition_t *mpart = - &mtopic->partitions[i]; + for (i = 0; mtopic && i < mtopic->partition_cnt; i++) { + const rd_kafka_mock_partition_t *mpart = &mtopic->partitions[i]; int r; /* Response: ..Partitions.ErrorCode */ @@ -831,8 +827,7 @@ rd_kafka_mock_buf_write_Metadata_Topic (rd_kafka_buf_t *resp, rd_kafka_buf_write_i32(resp, mpart->id); /* Response: ..Partitions.Leader */ rd_kafka_buf_write_i32(resp, - mpart->leader ? - mpart->leader->id : -1); + mpart->leader ? mpart->leader->id : -1); if (ApiVersion >= 7) { /* Response: ..Partitions.LeaderEpoch */ @@ -840,21 +835,15 @@ rd_kafka_mock_buf_write_Metadata_Topic (rd_kafka_buf_t *resp, } /* Response: ..Partitions.#ReplicaNodes */ - rd_kafka_buf_write_i32(resp, - mpart->replica_cnt); - for (r = 0 ; r < mpart->replica_cnt ; r++) - rd_kafka_buf_write_i32( - resp, - mpart->replicas[r]->id); + rd_kafka_buf_write_i32(resp, mpart->replica_cnt); + for (r = 0; r < mpart->replica_cnt; r++) + rd_kafka_buf_write_i32(resp, mpart->replicas[r]->id); /* Response: ..Partitions.#IsrNodes */ /* Let Replicas == ISRs for now */ - rd_kafka_buf_write_i32(resp, - mpart->replica_cnt); - for (r = 0 ; r < mpart->replica_cnt ; r++) - rd_kafka_buf_write_i32( - resp, - mpart->replicas[r]->id); + rd_kafka_buf_write_i32(resp, mpart->replica_cnt); + for (r = 0; r < mpart->replica_cnt; r++) + rd_kafka_buf_write_i32(resp, mpart->replicas[r]->id); if (ApiVersion >= 5) { /* Response: ...OfflineReplicas */ @@ -867,15 +856,15 @@ rd_kafka_mock_buf_write_Metadata_Topic (rd_kafka_buf_t *resp, /** * @brief Handle MetadataRequest */ -static int rd_kafka_mock_handle_Metadata (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_mock_handle_Metadata(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { const rd_bool_t log_decode_errors = rd_true; rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; - rd_bool_t AllowAutoTopicCreation = rd_true; + rd_bool_t AllowAutoTopicCreation = rd_true; rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); const rd_kafka_mock_broker_t *mrkb; rd_kafka_topic_partition_list_t *requested_topics = NULL; - rd_bool_t list_all_topics = rd_false; + rd_bool_t list_all_topics = rd_false; int32_t TopicsCnt; int i; @@ -918,7 +907,7 @@ static int rd_kafka_mock_handle_Metadata (rd_kafka_mock_connection_t *mconn, else if (rkbuf->rkbuf_reqhdr.ApiVersion == 0 || TopicsCnt == -1) list_all_topics = rd_true; - for (i = 0 ; i < TopicsCnt ; i++) { + for (i = 0; i < TopicsCnt; i++) { rd_kafkap_str_t Topic; char *topic; @@ -948,32 +937,31 @@ static int rd_kafka_mock_handle_Metadata (rd_kafka_mock_connection_t *mconn, TAILQ_FOREACH(mtopic, &mcluster->topics, link) { rd_kafka_mock_buf_write_Metadata_Topic( - resp, rkbuf->rkbuf_reqhdr.ApiVersion, - mtopic->name, mtopic, - RD_KAFKA_RESP_ERR_NO_ERROR); + resp, rkbuf->rkbuf_reqhdr.ApiVersion, mtopic->name, + mtopic, RD_KAFKA_RESP_ERR_NO_ERROR); } } else if (requested_topics) { /* Response: #Topics */ rd_kafka_buf_write_i32(resp, requested_topics->cnt); - for (i = 0 ; i < requested_topics->cnt ; i++) { + for (i = 0; i < requested_topics->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &requested_topics->elems[i]; + &requested_topics->elems[i]; rd_kafka_mock_topic_t *mtopic; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - mtopic = rd_kafka_mock_topic_find(mcluster, - rktpar->topic); + mtopic = + rd_kafka_mock_topic_find(mcluster, rktpar->topic); if (!mtopic && AllowAutoTopicCreation) mtopic = rd_kafka_mock_topic_auto_create( - mcluster, rktpar->topic, -1, &err); + mcluster, rktpar->topic, -1, &err); else if (!mtopic) err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; rd_kafka_mock_buf_write_Metadata_Topic( - resp, rkbuf->rkbuf_reqhdr.ApiVersion, - rktpar->topic, mtopic, err); + resp, rkbuf->rkbuf_reqhdr.ApiVersion, rktpar->topic, + mtopic, err); } if (rkbuf->rkbuf_reqhdr.ApiVersion >= 8) { @@ -997,13 +985,12 @@ static int rd_kafka_mock_handle_Metadata (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: if (requested_topics) rd_kafka_topic_partition_list_destroy(requested_topics); rd_kafka_buf_destroy(resp); return -1; - } @@ -1011,13 +998,13 @@ static int rd_kafka_mock_handle_Metadata (rd_kafka_mock_connection_t *mconn, * @brief Handle FindCoordinatorRequest */ static int -rd_kafka_mock_handle_FindCoordinator (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +rd_kafka_mock_handle_FindCoordinator(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; const rd_bool_t log_decode_errors = rd_true; rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); rd_kafkap_str_t Key; - int8_t KeyType = RD_KAFKA_COORD_GROUP; + int8_t KeyType = RD_KAFKA_COORD_GROUP; const rd_kafka_mock_broker_t *mrkb = NULL; rd_kafka_resp_err_t err; @@ -1042,8 +1029,7 @@ rd_kafka_mock_handle_FindCoordinator (rd_kafka_mock_connection_t *mconn, err = rd_kafka_mock_next_request_error(mconn, resp); if (!err && RD_KAFKAP_STR_LEN(&Key) > 0) { - mrkb = rd_kafka_mock_cluster_get_coord(mcluster, - KeyType, &Key); + mrkb = rd_kafka_mock_cluster_get_coord(mcluster, KeyType, &Key); rd_assert(mrkb); } @@ -1075,7 +1061,7 @@ rd_kafka_mock_handle_FindCoordinator (rd_kafka_mock_connection_t *mconn, rd_kafka_mock_connection_send_response(mconn, resp); return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } @@ -1085,9 +1071,8 @@ rd_kafka_mock_handle_FindCoordinator (rd_kafka_mock_connection_t *mconn, /** * @brief Handle JoinGroupRequest */ -static int -rd_kafka_mock_handle_JoinGroup (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_mock_handle_JoinGroup(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; rd_kafka_mock_broker_t *mrkb; const rd_bool_t log_decode_errors = rd_true; @@ -1096,7 +1081,7 @@ rd_kafka_mock_handle_JoinGroup (rd_kafka_mock_connection_t *mconn, rd_kafkap_str_t GroupInstanceId = RD_KAFKAP_STR_INITIALIZER; int32_t SessionTimeoutMs; int32_t MaxPollIntervalMs = -1; - int32_t ProtocolCnt = 0; + int32_t ProtocolCnt = 0; int32_t i; rd_kafka_resp_err_t err; rd_kafka_mock_cgrp_t *mcgrp; @@ -1114,7 +1099,7 @@ rd_kafka_mock_handle_JoinGroup (rd_kafka_mock_connection_t *mconn, if (ProtocolCnt > 1000) { rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", - "JoinGroupRequest: ProtocolCnt %"PRId32 + "JoinGroupRequest: ProtocolCnt %" PRId32 " > max allowed 1000", ProtocolCnt); rd_kafka_buf_destroy(resp); @@ -1122,12 +1107,12 @@ rd_kafka_mock_handle_JoinGroup (rd_kafka_mock_connection_t *mconn, } protos = rd_malloc(sizeof(*protos) * ProtocolCnt); - for (i = 0 ; i < ProtocolCnt ; i++) { + for (i = 0; i < ProtocolCnt; i++) { rd_kafkap_str_t ProtocolName; rd_kafkap_bytes_t Metadata; rd_kafka_buf_read_str(rkbuf, &ProtocolName); rd_kafka_buf_read_bytes(rkbuf, &Metadata); - protos[i].name = rd_kafkap_str_copy(&ProtocolName); + protos[i].name = rd_kafkap_str_copy(&ProtocolName); protos[i].metadata = rd_kafkap_bytes_copy(&Metadata); } @@ -1143,9 +1128,8 @@ rd_kafka_mock_handle_JoinGroup (rd_kafka_mock_connection_t *mconn, err = rd_kafka_mock_next_request_error(mconn, resp); if (!err) { - mrkb = rd_kafka_mock_cluster_get_coord(mcluster, - RD_KAFKA_COORD_GROUP, - &GroupId); + mrkb = rd_kafka_mock_cluster_get_coord( + mcluster, RD_KAFKA_COORD_GROUP, &GroupId); if (!mrkb) err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE; @@ -1154,17 +1138,15 @@ rd_kafka_mock_handle_JoinGroup (rd_kafka_mock_connection_t *mconn, } if (!err) { - mcgrp = rd_kafka_mock_cgrp_get(mcluster, - &GroupId, &ProtocolType); + mcgrp = + rd_kafka_mock_cgrp_get(mcluster, &GroupId, &ProtocolType); rd_assert(mcgrp); /* This triggers an async rebalance, the response will be * sent later. */ err = rd_kafka_mock_cgrp_member_add( - mcgrp, mconn, resp, - &MemberId, &ProtocolType, - protos, ProtocolCnt, - SessionTimeoutMs); + mcgrp, mconn, resp, &MemberId, &ProtocolType, protos, + ProtocolCnt, SessionTimeoutMs); if (!err) { /* .._add() assumes ownership of resp and protos */ protos = NULL; @@ -1176,18 +1158,18 @@ rd_kafka_mock_handle_JoinGroup (rd_kafka_mock_connection_t *mconn, rd_kafka_mock_cgrp_protos_destroy(protos, ProtocolCnt); /* Error case */ - rd_kafka_buf_write_i16(resp, err); /* ErrorCode */ - rd_kafka_buf_write_i32(resp, -1); /* GenerationId */ + rd_kafka_buf_write_i16(resp, err); /* ErrorCode */ + rd_kafka_buf_write_i32(resp, -1); /* GenerationId */ rd_kafka_buf_write_str(resp, NULL, -1); /* ProtocolName */ rd_kafka_buf_write_str(resp, NULL, -1); /* LeaderId */ - rd_kafka_buf_write_kstr(resp, NULL); /* MemberId */ - rd_kafka_buf_write_i32(resp, 0); /* MemberCnt */ + rd_kafka_buf_write_kstr(resp, NULL); /* MemberId */ + rd_kafka_buf_write_i32(resp, 0); /* MemberCnt */ rd_kafka_mock_connection_send_response(mconn, resp); return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); if (protos) rd_kafka_mock_cgrp_protos_destroy(protos, ProtocolCnt); @@ -1198,9 +1180,8 @@ rd_kafka_mock_handle_JoinGroup (rd_kafka_mock_connection_t *mconn, /** * @brief Handle HeartbeatRequest */ -static int -rd_kafka_mock_handle_Heartbeat (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_mock_handle_Heartbeat(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; rd_kafka_mock_broker_t *mrkb; const rd_bool_t log_decode_errors = rd_true; @@ -1229,9 +1210,8 @@ rd_kafka_mock_handle_Heartbeat (rd_kafka_mock_connection_t *mconn, /* Inject error, if any */ err = rd_kafka_mock_next_request_error(mconn, resp); if (!err) { - mrkb = rd_kafka_mock_cluster_get_coord(mcluster, - RD_KAFKA_COORD_GROUP, - &GroupId); + mrkb = rd_kafka_mock_cluster_get_coord( + mcluster, RD_KAFKA_COORD_GROUP, &GroupId); if (!mrkb) err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE; @@ -1264,7 +1244,7 @@ rd_kafka_mock_handle_Heartbeat (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } @@ -1273,9 +1253,8 @@ rd_kafka_mock_handle_Heartbeat (rd_kafka_mock_connection_t *mconn, /** * @brief Handle LeaveGroupRequest */ -static int -rd_kafka_mock_handle_LeaveGroup (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_mock_handle_LeaveGroup(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; rd_kafka_mock_broker_t *mrkb; const rd_bool_t log_decode_errors = rd_true; @@ -1300,9 +1279,8 @@ rd_kafka_mock_handle_LeaveGroup (rd_kafka_mock_connection_t *mconn, /* Inject error, if any */ err = rd_kafka_mock_next_request_error(mconn, resp); if (!err) { - mrkb = rd_kafka_mock_cluster_get_coord(mcluster, - RD_KAFKA_COORD_GROUP, - &GroupId); + mrkb = rd_kafka_mock_cluster_get_coord( + mcluster, RD_KAFKA_COORD_GROUP, &GroupId); if (!mrkb) err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE; @@ -1334,7 +1312,7 @@ rd_kafka_mock_handle_LeaveGroup (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } @@ -1344,9 +1322,8 @@ rd_kafka_mock_handle_LeaveGroup (rd_kafka_mock_connection_t *mconn, /** * @brief Handle SyncGroupRequest */ -static int -rd_kafka_mock_handle_SyncGroup (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_mock_handle_SyncGroup(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; rd_kafka_mock_broker_t *mrkb; const rd_bool_t log_decode_errors = rd_true; @@ -1356,7 +1333,7 @@ rd_kafka_mock_handle_SyncGroup (rd_kafka_mock_connection_t *mconn, int32_t GenerationId, AssignmentCnt; int32_t i; rd_kafka_resp_err_t err; - rd_kafka_mock_cgrp_t *mcgrp = NULL; + rd_kafka_mock_cgrp_t *mcgrp = NULL; rd_kafka_mock_cgrp_member_t *member = NULL; rd_kafka_buf_read_str(rkbuf, &GroupId); @@ -1377,9 +1354,8 @@ rd_kafka_mock_handle_SyncGroup (rd_kafka_mock_connection_t *mconn, /* Inject error, if any */ err = rd_kafka_mock_next_request_error(mconn, resp); if (!err) { - mrkb = rd_kafka_mock_cluster_get_coord(mcluster, - RD_KAFKA_COORD_GROUP, - &GroupId); + mrkb = rd_kafka_mock_cluster_get_coord( + mcluster, RD_KAFKA_COORD_GROUP, &GroupId); if (!mrkb) err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE; @@ -1410,12 +1386,14 @@ rd_kafka_mock_handle_SyncGroup (rd_kafka_mock_connection_t *mconn, rd_bool_t is_leader = mcgrp->leader && mcgrp->leader == member; if (AssignmentCnt > 0 && !is_leader) - err = RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION; /* FIXME */ + err = + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION; /* FIXME + */ else if (AssignmentCnt == 0 && is_leader) err = RD_KAFKA_RESP_ERR_INVALID_PARTITIONS; /* FIXME */ } - for (i = 0 ; i < AssignmentCnt ; i++) { + for (i = 0; i < AssignmentCnt; i++) { rd_kafkap_str_t MemberId2; rd_kafkap_bytes_t Metadata; rd_kafka_mock_cgrp_member_t *member2; @@ -1445,14 +1423,14 @@ rd_kafka_mock_handle_SyncGroup (rd_kafka_mock_connection_t *mconn, } /* Error case */ - rd_kafka_buf_write_i16(resp, err); /* ErrorCode */ + rd_kafka_buf_write_i16(resp, err); /* ErrorCode */ rd_kafka_buf_write_bytes(resp, NULL, -1); /* MemberState */ rd_kafka_mock_connection_send_response(mconn, resp); return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } @@ -1463,11 +1441,11 @@ rd_kafka_mock_handle_SyncGroup (rd_kafka_mock_connection_t *mconn, * @brief Generate a unique ProducerID */ static const rd_kafka_pid_t -rd_kafka_mock_pid_new (rd_kafka_mock_cluster_t *mcluster) { +rd_kafka_mock_pid_new(rd_kafka_mock_cluster_t *mcluster) { rd_kafka_pid_t *pid = rd_malloc(sizeof(*pid)); rd_kafka_pid_t ret; - pid->id = rd_jitter(1, 900000) * 1000; + pid->id = rd_jitter(1, 900000) * 1000; pid->epoch = 0; mtx_lock(&mcluster->lock); @@ -1483,8 +1461,8 @@ rd_kafka_mock_pid_new (rd_kafka_mock_cluster_t *mcluster) { * @brief Checks if the given pid is known, else returns an error. */ static rd_kafka_resp_err_t -rd_kafka_mock_pid_check (rd_kafka_mock_cluster_t *mcluster, - const rd_kafka_pid_t check_pid) { +rd_kafka_mock_pid_check(rd_kafka_mock_cluster_t *mcluster, + const rd_kafka_pid_t check_pid) { const rd_kafka_pid_t *pid; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; @@ -1506,8 +1484,8 @@ rd_kafka_mock_pid_check (rd_kafka_mock_cluster_t *mcluster, * if the current_pid does not match an existing pid. */ static rd_kafka_resp_err_t -rd_kafka_mock_pid_bump (rd_kafka_mock_cluster_t *mcluster, - rd_kafka_pid_t *current_pid) { +rd_kafka_mock_pid_bump(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_pid_t *current_pid) { rd_kafka_pid_t *pid; mtx_lock(&mcluster->lock); @@ -1526,8 +1504,8 @@ rd_kafka_mock_pid_bump (rd_kafka_mock_cluster_t *mcluster, *current_pid = *pid; mtx_unlock(&mcluster->lock); - rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", - "Bumped PID %s", rd_kafka_pid2str(*current_pid)); + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", "Bumped PID %s", + rd_kafka_pid2str(*current_pid)); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -1537,13 +1515,13 @@ rd_kafka_mock_pid_bump (rd_kafka_mock_cluster_t *mcluster, * @brief Handle InitProducerId */ static int -rd_kafka_mock_handle_InitProducerId (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +rd_kafka_mock_handle_InitProducerId(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; const rd_bool_t log_decode_errors = rd_true; rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); rd_kafkap_str_t TransactionalId; - rd_kafka_pid_t pid = RD_KAFKA_PID_INITIALIZER; + rd_kafka_pid_t pid = RD_KAFKA_PID_INITIALIZER; rd_kafka_pid_t current_pid = RD_KAFKA_PID_INITIALIZER; int32_t TxnTimeoutMs; rd_kafka_resp_err_t err; @@ -1574,8 +1552,8 @@ rd_kafka_mock_handle_InitProducerId (rd_kafka_mock_connection_t *mconn, if (RD_KAFKAP_STR_LEN(&TransactionalId) == 0) err = RD_KAFKA_RESP_ERR_INVALID_REQUEST; else if (rd_kafka_mock_cluster_get_coord( - mcluster, RD_KAFKA_COORD_TXN, - &TransactionalId) != mconn->broker) + mcluster, RD_KAFKA_COORD_TXN, &TransactionalId) != + mconn->broker) err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; } @@ -1585,8 +1563,7 @@ rd_kafka_mock_handle_InitProducerId (rd_kafka_mock_connection_t *mconn, * to bump the epoch (KIP-360). * Verify that current_pid matches and then * bump the epoch. */ - err = rd_kafka_mock_pid_bump(mcluster, - ¤t_pid); + err = rd_kafka_mock_pid_bump(mcluster, ¤t_pid); if (!err) pid = current_pid; @@ -1608,7 +1585,7 @@ rd_kafka_mock_handle_InitProducerId (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } @@ -1619,8 +1596,8 @@ rd_kafka_mock_handle_InitProducerId (rd_kafka_mock_connection_t *mconn, * @brief Handle AddPartitionsToTxn */ static int -rd_kafka_mock_handle_AddPartitionsToTxn (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +rd_kafka_mock_handle_AddPartitionsToTxn(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; const rd_bool_t log_decode_errors = rd_true; rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); @@ -1648,8 +1625,7 @@ rd_kafka_mock_handle_AddPartitionsToTxn (rd_kafka_mock_connection_t *mconn, all_err = rd_kafka_mock_next_request_error(mconn, resp); if (!all_err && - rd_kafka_mock_cluster_get_coord(mcluster, - RD_KAFKA_COORD_TXN, + rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_TXN, &TransactionalId) != mconn->broker) all_err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; @@ -1682,8 +1658,8 @@ rd_kafka_mock_handle_AddPartitionsToTxn (rd_kafka_mock_connection_t *mconn, /* Response: Partition */ rd_kafka_buf_write_i32(resp, Partition); - if (!mtopic || - Partition < 0 || Partition >= mtopic->partition_cnt) + if (!mtopic || Partition < 0 || + Partition >= mtopic->partition_cnt) err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; else if (mtopic && mtopic->err) err = mtopic->err; @@ -1697,7 +1673,7 @@ rd_kafka_mock_handle_AddPartitionsToTxn (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } @@ -1707,8 +1683,8 @@ rd_kafka_mock_handle_AddPartitionsToTxn (rd_kafka_mock_connection_t *mconn, * @brief Handle AddOffsetsToTxn */ static int -rd_kafka_mock_handle_AddOffsetsToTxn (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +rd_kafka_mock_handle_AddOffsetsToTxn(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; const rd_bool_t log_decode_errors = rd_true; rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); @@ -1732,8 +1708,7 @@ rd_kafka_mock_handle_AddOffsetsToTxn (rd_kafka_mock_connection_t *mconn, err = rd_kafka_mock_next_request_error(mconn, resp); if (!err && - rd_kafka_mock_cluster_get_coord(mcluster, - RD_KAFKA_COORD_TXN, + rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_TXN, &TransactionalId) != mconn->broker) err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; @@ -1747,7 +1722,7 @@ rd_kafka_mock_handle_AddOffsetsToTxn (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } @@ -1757,8 +1732,8 @@ rd_kafka_mock_handle_AddOffsetsToTxn (rd_kafka_mock_connection_t *mconn, * @brief Handle TxnOffsetCommit */ static int -rd_kafka_mock_handle_TxnOffsetCommit (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +rd_kafka_mock_handle_TxnOffsetCommit(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; const rd_bool_t log_decode_errors = rd_true; rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); @@ -1788,8 +1763,7 @@ rd_kafka_mock_handle_TxnOffsetCommit (rd_kafka_mock_connection_t *mconn, err = rd_kafka_mock_next_request_error(mconn, resp); if (!err && - rd_kafka_mock_cluster_get_coord(mcluster, - RD_KAFKA_COORD_GROUP, + rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_GROUP, &GroupId) != mconn->broker) err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; @@ -1843,7 +1817,7 @@ rd_kafka_mock_handle_TxnOffsetCommit (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } @@ -1852,9 +1826,8 @@ rd_kafka_mock_handle_TxnOffsetCommit (rd_kafka_mock_connection_t *mconn, /** * @brief Handle EndTxn */ -static int -rd_kafka_mock_handle_EndTxn (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_mock_handle_EndTxn(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; const rd_bool_t log_decode_errors = rd_true; rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); @@ -1883,8 +1856,7 @@ rd_kafka_mock_handle_EndTxn (rd_kafka_mock_connection_t *mconn, err = rd_kafka_mock_next_request_error(mconn, resp); if (!err && - rd_kafka_mock_cluster_get_coord(mcluster, - RD_KAFKA_COORD_TXN, + rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_TXN, &TransactionalId) != mconn->broker) err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; @@ -1898,7 +1870,7 @@ rd_kafka_mock_handle_EndTxn (rd_kafka_mock_connection_t *mconn, return 0; - err_parse: +err_parse: rd_kafka_buf_destroy(resp); return -1; } @@ -1908,31 +1880,30 @@ rd_kafka_mock_handle_EndTxn (rd_kafka_mock_connection_t *mconn, * @brief Default request handlers */ const struct rd_kafka_mock_api_handler -rd_kafka_mock_api_handlers[RD_KAFKAP__NUM] = { + rd_kafka_mock_api_handlers[RD_KAFKAP__NUM] = { /* [request-type] = { MinVersion, MaxVersion, FlexVersion, callback } */ - [RD_KAFKAP_Produce] = { 0, 7, -1, rd_kafka_mock_handle_Produce }, - [RD_KAFKAP_Fetch] = { 0, 11, -1, rd_kafka_mock_handle_Fetch }, - [RD_KAFKAP_ListOffsets] = { 0, 5, -1, rd_kafka_mock_handle_ListOffsets }, - [RD_KAFKAP_OffsetFetch] = { 0, 5, 6, rd_kafka_mock_handle_OffsetFetch }, - [RD_KAFKAP_OffsetCommit] = { 0, 7, 8, - rd_kafka_mock_handle_OffsetCommit }, - [RD_KAFKAP_ApiVersion] = { 0, 2, 3, rd_kafka_mock_handle_ApiVersion }, - [RD_KAFKAP_Metadata] = { 0, 2, 9, rd_kafka_mock_handle_Metadata }, - [RD_KAFKAP_FindCoordinator] = { 0, 2, 3, - rd_kafka_mock_handle_FindCoordinator }, - [RD_KAFKAP_InitProducerId] = { 0, 4, 2, - rd_kafka_mock_handle_InitProducerId }, - [RD_KAFKAP_JoinGroup] = { 0, 5, 6, rd_kafka_mock_handle_JoinGroup }, - [RD_KAFKAP_Heartbeat] = { 0, 3, 4, rd_kafka_mock_handle_Heartbeat }, - [RD_KAFKAP_LeaveGroup] = { 0, 1, 4, rd_kafka_mock_handle_LeaveGroup }, - [RD_KAFKAP_SyncGroup] = { 0, 3, 4, rd_kafka_mock_handle_SyncGroup }, - [RD_KAFKAP_AddPartitionsToTxn] = { 0, 1, -1, - rd_kafka_mock_handle_AddPartitionsToTxn }, - [RD_KAFKAP_AddOffsetsToTxn] = { 0, 1, -1, - rd_kafka_mock_handle_AddOffsetsToTxn }, - [RD_KAFKAP_TxnOffsetCommit] = { 0, 2, 3, - rd_kafka_mock_handle_TxnOffsetCommit }, - [RD_KAFKAP_EndTxn] = { 0, 1, -1, rd_kafka_mock_handle_EndTxn }, + [RD_KAFKAP_Produce] = {0, 7, -1, rd_kafka_mock_handle_Produce}, + [RD_KAFKAP_Fetch] = {0, 11, -1, rd_kafka_mock_handle_Fetch}, + [RD_KAFKAP_ListOffsets] = {0, 5, -1, rd_kafka_mock_handle_ListOffsets}, + [RD_KAFKAP_OffsetFetch] = {0, 5, 6, rd_kafka_mock_handle_OffsetFetch}, + [RD_KAFKAP_OffsetCommit] = {0, 7, 8, rd_kafka_mock_handle_OffsetCommit}, + [RD_KAFKAP_ApiVersion] = {0, 2, 3, rd_kafka_mock_handle_ApiVersion}, + [RD_KAFKAP_Metadata] = {0, 2, 9, rd_kafka_mock_handle_Metadata}, + [RD_KAFKAP_FindCoordinator] = {0, 2, 3, + rd_kafka_mock_handle_FindCoordinator}, + [RD_KAFKAP_InitProducerId] = {0, 4, 2, + rd_kafka_mock_handle_InitProducerId}, + [RD_KAFKAP_JoinGroup] = {0, 5, 6, rd_kafka_mock_handle_JoinGroup}, + [RD_KAFKAP_Heartbeat] = {0, 3, 4, rd_kafka_mock_handle_Heartbeat}, + [RD_KAFKAP_LeaveGroup] = {0, 1, 4, rd_kafka_mock_handle_LeaveGroup}, + [RD_KAFKAP_SyncGroup] = {0, 3, 4, rd_kafka_mock_handle_SyncGroup}, + [RD_KAFKAP_AddPartitionsToTxn] = + {0, 1, -1, rd_kafka_mock_handle_AddPartitionsToTxn}, + [RD_KAFKAP_AddOffsetsToTxn] = {0, 1, -1, + rd_kafka_mock_handle_AddOffsetsToTxn}, + [RD_KAFKAP_TxnOffsetCommit] = {0, 2, 3, + rd_kafka_mock_handle_TxnOffsetCommit}, + [RD_KAFKAP_EndTxn] = {0, 1, -1, rd_kafka_mock_handle_EndTxn}, }; @@ -1940,20 +1911,21 @@ rd_kafka_mock_api_handlers[RD_KAFKAP__NUM] = { /** * @brief Handle ApiVersionRequest. * - * @remark This is the only handler that needs to handle unsupported ApiVersions. + * @remark This is the only handler that needs to handle unsupported + * ApiVersions. */ -static int rd_kafka_mock_handle_ApiVersion (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_mock_handle_ApiVersion(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); size_t of_ApiKeysCnt; - int cnt = 0; + int cnt = 0; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; int i; if (!rd_kafka_mock_cluster_ApiVersion_check( - mcluster, - rkbuf->rkbuf_reqhdr.ApiKey, rkbuf->rkbuf_reqhdr.ApiVersion)) + mcluster, rkbuf->rkbuf_reqhdr.ApiKey, + rkbuf->rkbuf_reqhdr.ApiVersion)) err = RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION; /* ApiVersionRequest/Response with flexver (>=v3) has a mix @@ -1972,7 +1944,7 @@ static int rd_kafka_mock_handle_ApiVersion (rd_kafka_mock_connection_t *mconn, else of_ApiKeysCnt = rd_kafka_buf_write_i32(resp, 0); - for (i = 0 ; i < RD_KAFKAP__NUM ; i++) { + for (i = 0; i < RD_KAFKAP__NUM; i++) { if (!mcluster->api_handlers[i].cb || mcluster->api_handlers[i].MaxVersion == -1) continue; @@ -1986,11 +1958,11 @@ static int rd_kafka_mock_handle_ApiVersion (rd_kafka_mock_connection_t *mconn, /* ApiKey */ rd_kafka_buf_write_i16(resp, (int16_t)i); /* MinVersion */ - rd_kafka_buf_write_i16( - resp, mcluster->api_handlers[i].MinVersion); + rd_kafka_buf_write_i16(resp, + mcluster->api_handlers[i].MinVersion); /* MaxVersion */ - rd_kafka_buf_write_i16( - resp, mcluster->api_handlers[i].MaxVersion); + rd_kafka_buf_write_i16(resp, + mcluster->api_handlers[i].MaxVersion); cnt++; } diff --git a/src/rdkafka_mock_int.h b/src/rdkafka_mock_int.h index 95f174030f..1f1179ce80 100644 --- a/src/rdkafka_mock_int.h +++ b/src/rdkafka_mock_int.h @@ -49,16 +49,16 @@ typedef struct rd_kafka_mock_error_rtt_s { */ typedef struct rd_kafka_mock_error_stack_s { TAILQ_ENTRY(rd_kafka_mock_error_stack_s) link; - int16_t ApiKey; /**< Optional ApiKey for which this stack - * applies to, else -1. */ - size_t cnt; /**< Current number of errors in .errs */ - size_t size; /**< Current allocated size for .errs (in elements) */ + int16_t ApiKey; /**< Optional ApiKey for which this stack + * applies to, else -1. */ + size_t cnt; /**< Current number of errors in .errs */ + size_t size; /**< Current allocated size for .errs (in elements) */ rd_kafka_mock_error_rtt_t *errs; /**< Array of errors/rtts */ } rd_kafka_mock_error_stack_t; typedef TAILQ_HEAD(rd_kafka_mock_error_stack_head_s, rd_kafka_mock_error_stack_s) - rd_kafka_mock_error_stack_head_t; + rd_kafka_mock_error_stack_head_t; /** @@ -77,10 +77,10 @@ typedef struct rd_kafka_mock_cgrp_member_s { char *id; /**< MemberId */ char *group_instance_id; /**< Group instance id */ rd_ts_t ts_last_activity; /**< Last activity, e.g., Heartbeat */ - rd_kafka_mock_cgrp_proto_t *protos; /**< Protocol names */ - int proto_cnt; /**< Number of protocols */ - rd_kafkap_bytes_t *assignment; /**< Current assignment */ - rd_kafka_buf_t *resp; /**< Current response buffer */ + rd_kafka_mock_cgrp_proto_t *protos; /**< Protocol names */ + int proto_cnt; /**< Number of protocols */ + rd_kafkap_bytes_t *assignment; /**< Current assignment */ + rd_kafka_buf_t *resp; /**< Current response buffer */ struct rd_kafka_mock_connection_s *conn; /**< Connection, may be NULL * if there is no ongoing * request. */ @@ -93,24 +93,23 @@ typedef struct rd_kafka_mock_cgrp_s { TAILQ_ENTRY(rd_kafka_mock_cgrp_s) link; struct rd_kafka_mock_cluster_s *cluster; /**< Cluster */ struct rd_kafka_mock_connection_s *conn; /**< Connection */ - char *id; /**< Group Id */ - char *protocol_type; /**< Protocol type */ - char *protocol_name; /**< Elected protocol name */ - int32_t generation_id; /**< Generation Id */ - int session_timeout_ms; /**< Session timeout */ - enum { - RD_KAFKA_MOCK_CGRP_STATE_EMPTY, /* No members */ - RD_KAFKA_MOCK_CGRP_STATE_JOINING, /* Members are joining */ - RD_KAFKA_MOCK_CGRP_STATE_SYNCING, /* Syncing assignments */ - RD_KAFKA_MOCK_CGRP_STATE_REBALANCING, /* Rebalance triggered */ - RD_KAFKA_MOCK_CGRP_STATE_UP, /* Group is operational */ - } state; /**< Consumer group state */ - rd_kafka_timer_t session_tmr; /**< Session timeout timer */ - rd_kafka_timer_t rebalance_tmr; /**< Rebalance state timer */ + char *id; /**< Group Id */ + char *protocol_type; /**< Protocol type */ + char *protocol_name; /**< Elected protocol name */ + int32_t generation_id; /**< Generation Id */ + int session_timeout_ms; /**< Session timeout */ + enum { RD_KAFKA_MOCK_CGRP_STATE_EMPTY, /* No members */ + RD_KAFKA_MOCK_CGRP_STATE_JOINING, /* Members are joining */ + RD_KAFKA_MOCK_CGRP_STATE_SYNCING, /* Syncing assignments */ + RD_KAFKA_MOCK_CGRP_STATE_REBALANCING, /* Rebalance triggered */ + RD_KAFKA_MOCK_CGRP_STATE_UP, /* Group is operational */ + } state; /**< Consumer group state */ + rd_kafka_timer_t session_tmr; /**< Session timeout timer */ + rd_kafka_timer_t rebalance_tmr; /**< Rebalance state timer */ TAILQ_HEAD(, rd_kafka_mock_cgrp_member_s) members; /**< Group members */ - int member_cnt; /**< Number of group members */ + int member_cnt; /**< Number of group members */ int last_member_cnt; /**< Mumber of group members at last election */ - int assignment_cnt; /**< Number of member assignments in last Sync */ + int assignment_cnt; /**< Number of member assignments in last Sync */ rd_kafka_mock_cgrp_member_t *leader; /**< Elected leader */ } rd_kafka_mock_cgrp_t; @@ -120,11 +119,11 @@ typedef struct rd_kafka_mock_cgrp_s { typedef struct rd_kafka_mock_connection_s { TAILQ_ENTRY(rd_kafka_mock_connection_s) link; rd_kafka_transport_t *transport; /**< Socket transport */ - rd_kafka_buf_t *rxbuf; /**< Receive buffer */ - rd_kafka_bufq_t outbufs; /**< Send buffers */ - short *poll_events; /**< Events to poll, points to - * the broker's pfd array */ - struct sockaddr_in peer; /**< Peer address */ + rd_kafka_buf_t *rxbuf; /**< Receive buffer */ + rd_kafka_bufq_t outbufs; /**< Send buffers */ + short *poll_events; /**< Events to poll, points to + * the broker's pfd array */ + struct sockaddr_in peer; /**< Peer address */ struct rd_kafka_mock_broker_s *broker; rd_kafka_timer_t write_tmr; /**< Socket write delay timer */ } rd_kafka_mock_connection_t; @@ -136,13 +135,13 @@ typedef struct rd_kafka_mock_connection_s { typedef struct rd_kafka_mock_broker_s { TAILQ_ENTRY(rd_kafka_mock_broker_s) link; int32_t id; - char advertised_listener[128]; - int port; - char *rack; + char advertised_listener[128]; + int port; + char *rack; rd_bool_t up; - rd_ts_t rtt; /**< RTT in microseconds */ + rd_ts_t rtt; /**< RTT in microseconds */ - rd_socket_t listen_s; /**< listen() socket */ + rd_socket_t listen_s; /**< listen() socket */ TAILQ_HEAD(, rd_kafka_mock_connection_s) connections; @@ -159,8 +158,8 @@ typedef struct rd_kafka_mock_broker_s { */ typedef struct rd_kafka_mock_msgset_s { TAILQ_ENTRY(rd_kafka_mock_msgset_s) link; - int64_t first_offset; /**< First offset in batch */ - int64_t last_offset; /**< Last offset in batch */ + int64_t first_offset; /**< First offset in batch */ + int64_t last_offset; /**< Last offset in batch */ rd_kafkap_bytes_t bytes; /* Space for bytes.data is allocated after the msgset_t */ } rd_kafka_mock_msgset_t; @@ -172,8 +171,8 @@ typedef struct rd_kafka_mock_msgset_s { typedef struct rd_kafka_mock_committed_offset_s { /**< mpart.committed_offsets */ TAILQ_ENTRY(rd_kafka_mock_committed_offset_s) link; - char *group; /**< Allocated along with the struct */ - int64_t offset; /**< Committed offset */ + char *group; /**< Allocated along with the struct */ + int64_t offset; /**< Committed offset */ rd_kafkap_str_t *metadata; /**< Metadata, allocated separately */ } rd_kafka_mock_committed_offset_t; @@ -185,10 +184,10 @@ typedef struct rd_kafka_mock_partition_s { TAILQ_ENTRY(rd_kafka_mock_partition_s) leader_link; int32_t id; - int64_t start_offset; /**< Actual/leader start offset */ - int64_t end_offset; /**< Actual/leader end offset */ - int64_t follower_start_offset; /**< Follower's start offset */ - int64_t follower_end_offset; /**< Follower's end offset */ + int64_t start_offset; /**< Actual/leader start offset */ + int64_t end_offset; /**< Actual/leader end offset */ + int64_t follower_start_offset; /**< Follower's start offset */ + int64_t follower_end_offset; /**< Follower's end offset */ rd_bool_t update_follower_start_offset; /**< Keep follower_start_offset * in synch with start_offset */ @@ -197,19 +196,19 @@ typedef struct rd_kafka_mock_partition_s { */ TAILQ_HEAD(, rd_kafka_mock_msgset_s) msgsets; - size_t size; /**< Total size of all .msgsets */ - size_t cnt; /**< Total count of .msgsets */ - size_t max_size; /**< Maximum size of all .msgsets, may be overshot. */ - size_t max_cnt; /**< Maximum number of .msgsets */ + size_t size; /**< Total size of all .msgsets */ + size_t cnt; /**< Total count of .msgsets */ + size_t max_size; /**< Maximum size of all .msgsets, may be overshot. */ + size_t max_cnt; /**< Maximum number of .msgsets */ /**< Committed offsets */ TAILQ_HEAD(, rd_kafka_mock_committed_offset_s) committed_offsets; - rd_kafka_mock_broker_t *leader; + rd_kafka_mock_broker_t *leader; rd_kafka_mock_broker_t **replicas; - int replica_cnt; + int replica_cnt; - int32_t follower_id; /**< Preferred replica/follower */ + int32_t follower_id; /**< Preferred replica/follower */ struct rd_kafka_mock_topic_s *topic; } rd_kafka_mock_partition_t; @@ -220,13 +219,13 @@ typedef struct rd_kafka_mock_partition_s { */ typedef struct rd_kafka_mock_topic_s { TAILQ_ENTRY(rd_kafka_mock_topic_s) link; - char *name; + char *name; rd_kafka_mock_partition_t *partitions; - int partition_cnt; + int partition_cnt; - rd_kafka_resp_err_t err; /**< Error to return in protocol requests - * for this topic. */ + rd_kafka_resp_err_t err; /**< Error to return in protocol requests + * for this topic. */ struct rd_kafka_mock_cluster_s *cluster; } rd_kafka_mock_topic_t; @@ -237,25 +236,26 @@ typedef struct rd_kafka_mock_topic_s { typedef struct rd_kafka_mock_coord_s { TAILQ_ENTRY(rd_kafka_mock_coord_s) link; rd_kafka_coordtype_t type; - char *key; + char *key; int32_t broker_id; } rd_kafka_mock_coord_t; -typedef void (rd_kafka_mock_io_handler_t) (struct rd_kafka_mock_cluster_s - *mcluster, - rd_socket_t fd, - int events, void *opaque); +typedef void(rd_kafka_mock_io_handler_t)( + struct rd_kafka_mock_cluster_s *mcluster, + rd_socket_t fd, + int events, + void *opaque); struct rd_kafka_mock_api_handler { int16_t MinVersion; int16_t MaxVersion; - int16_t FlexVersion; /**< First Flexible version */ - int (*cb) (rd_kafka_mock_connection_t *mconn, rd_kafka_buf_t *rkbuf); + int16_t FlexVersion; /**< First Flexible version */ + int (*cb)(rd_kafka_mock_connection_t *mconn, rd_kafka_buf_t *rkbuf); }; extern const struct rd_kafka_mock_api_handler -rd_kafka_mock_api_handlers[RD_KAFKAP__NUM]; + rd_kafka_mock_api_handlers[RD_KAFKAP__NUM]; @@ -268,11 +268,11 @@ rd_kafka_mock_api_handlers[RD_KAFKAP__NUM]; * No locking is needed. */ struct rd_kafka_mock_cluster_s { - char id[32]; /**< Generated cluster id */ + char id[32]; /**< Generated cluster id */ rd_kafka_t *rk; - int32_t controller_id; /**< Current controller */ + int32_t controller_id; /**< Current controller */ TAILQ_HEAD(, rd_kafka_mock_broker_s) brokers; int broker_cnt; @@ -291,25 +291,25 @@ struct rd_kafka_mock_cluster_s { char *bootstraps; /**< bootstrap.servers */ - thrd_t thread; /**< Mock thread */ + thrd_t thread; /**< Mock thread */ rd_kafka_q_t *ops; /**< Control ops queue for interacting with the * cluster. */ - rd_socket_t wakeup_fds[2]; /**< Wake-up fds for use with .ops */ + rd_socket_t wakeup_fds[2]; /**< Wake-up fds for use with .ops */ - rd_bool_t run; /**< Cluster will run while this value is true */ + rd_bool_t run; /**< Cluster will run while this value is true */ - int fd_cnt; /**< Number of file descriptors */ - int fd_size; /**< Allocated size of .fds - * and .handlers */ - struct pollfd *fds; /**< Dynamic array */ + int fd_cnt; /**< Number of file descriptors */ + int fd_size; /**< Allocated size of .fds + * and .handlers */ + struct pollfd *fds; /**< Dynamic array */ - rd_kafka_broker_t *dummy_rkb; /**< Some internal librdkafka APIs - * that we are reusing requires a - * broker object, we use the - * internal broker and store it - * here for convenient access. */ + rd_kafka_broker_t *dummy_rkb; /**< Some internal librdkafka APIs + * that we are reusing requires a + * broker object, we use the + * internal broker and store it + * here for convenient access. */ struct { int partition_cnt; /**< Auto topic create part cnt */ @@ -320,7 +320,7 @@ struct rd_kafka_mock_cluster_s { struct { rd_kafka_mock_io_handler_t *cb; /**< Callback */ void *opaque; /**< Callbacks' opaque */ - } *handlers; + } * handlers; /**< Per-protocol request error stack. */ rd_kafka_mock_error_stack_head_t errstacks; @@ -334,66 +334,67 @@ struct rd_kafka_mock_cluster_s { */ mtx_t lock; - rd_kafka_timers_t timers; /**< Timers */ + rd_kafka_timers_t timers; /**< Timers */ }; - - -rd_kafka_buf_t *rd_kafka_mock_buf_new_response (const rd_kafka_buf_t *request); -void rd_kafka_mock_connection_send_response (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *resp); -void rd_kafka_mock_connection_set_blocking (rd_kafka_mock_connection_t *mconn, - rd_bool_t blocking); +rd_kafka_buf_t *rd_kafka_mock_buf_new_response(const rd_kafka_buf_t *request); +void rd_kafka_mock_connection_send_response(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp); +void rd_kafka_mock_connection_set_blocking(rd_kafka_mock_connection_t *mconn, + rd_bool_t blocking); rd_kafka_mock_partition_t * -rd_kafka_mock_partition_find (const rd_kafka_mock_topic_t *mtopic, - int32_t partition); +rd_kafka_mock_partition_find(const rd_kafka_mock_topic_t *mtopic, + int32_t partition); rd_kafka_mock_topic_t * -rd_kafka_mock_topic_auto_create (rd_kafka_mock_cluster_t *mcluster, - const char *topic, int partition_cnt, - rd_kafka_resp_err_t *errp); +rd_kafka_mock_topic_auto_create(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition_cnt, + rd_kafka_resp_err_t *errp); rd_kafka_mock_topic_t * -rd_kafka_mock_topic_find (const rd_kafka_mock_cluster_t *mcluster, - const char *name); +rd_kafka_mock_topic_find(const rd_kafka_mock_cluster_t *mcluster, + const char *name); rd_kafka_mock_topic_t * -rd_kafka_mock_topic_find_by_kstr (const rd_kafka_mock_cluster_t *mcluster, - const rd_kafkap_str_t *kname); +rd_kafka_mock_topic_find_by_kstr(const rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *kname); rd_kafka_mock_broker_t * -rd_kafka_mock_cluster_get_coord (rd_kafka_mock_cluster_t *mcluster, - rd_kafka_coordtype_t KeyType, - const rd_kafkap_str_t *Key); +rd_kafka_mock_cluster_get_coord(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_coordtype_t KeyType, + const rd_kafkap_str_t *Key); rd_kafka_mock_committed_offset_t * -rd_kafka_mock_committed_offset_find (const rd_kafka_mock_partition_t *mpart, - const rd_kafkap_str_t *group); +rd_kafka_mock_committed_offset_find(const rd_kafka_mock_partition_t *mpart, + const rd_kafkap_str_t *group); rd_kafka_mock_committed_offset_t * -rd_kafka_mock_commit_offset (rd_kafka_mock_partition_t *mpart, - const rd_kafkap_str_t *group, int64_t offset, - const rd_kafkap_str_t *metadata); +rd_kafka_mock_commit_offset(rd_kafka_mock_partition_t *mpart, + const rd_kafkap_str_t *group, + int64_t offset, + const rd_kafkap_str_t *metadata); const rd_kafka_mock_msgset_t * -rd_kafka_mock_msgset_find (const rd_kafka_mock_partition_t *mpart, - int64_t offset, rd_bool_t on_follower); +rd_kafka_mock_msgset_find(const rd_kafka_mock_partition_t *mpart, + int64_t offset, + rd_bool_t on_follower); rd_kafka_resp_err_t -rd_kafka_mock_next_request_error (rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *resp); +rd_kafka_mock_next_request_error(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp); rd_kafka_resp_err_t -rd_kafka_mock_partition_log_append (rd_kafka_mock_partition_t *mpart, - const rd_kafkap_bytes_t *bytes, - int64_t *BaseOffset); +rd_kafka_mock_partition_log_append(rd_kafka_mock_partition_t *mpart, + const rd_kafkap_bytes_t *bytes, + int64_t *BaseOffset); /** * @returns true if the ApiVersion is supported, else false. */ static RD_UNUSED rd_bool_t -rd_kafka_mock_cluster_ApiVersion_check (const rd_kafka_mock_cluster_t *mcluster, - int16_t ApiKey, - int16_t ApiVersion) { +rd_kafka_mock_cluster_ApiVersion_check(const rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + int16_t ApiVersion) { return (ApiVersion >= mcluster->api_handlers[ApiKey].MinVersion && ApiVersion <= mcluster->api_handlers[ApiKey].MaxVersion); } @@ -403,49 +404,48 @@ rd_kafka_mock_cluster_ApiVersion_check (const rd_kafka_mock_cluster_t *mcluster, * @name Mock consumer group (rdkafka_mock_cgrp.c) * @{ */ -void rd_kafka_mock_cgrp_member_active (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_cgrp_member_t *member); -void -rd_kafka_mock_cgrp_member_assignment_set (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_cgrp_member_t *member, - const rd_kafkap_bytes_t *Metadata); +void rd_kafka_mock_cgrp_member_active(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member); +void rd_kafka_mock_cgrp_member_assignment_set( + rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member, + const rd_kafkap_bytes_t *Metadata); rd_kafka_resp_err_t -rd_kafka_mock_cgrp_member_sync_set (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_cgrp_member_t *member, - rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *resp); +rd_kafka_mock_cgrp_member_sync_set(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member, + rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp); rd_kafka_resp_err_t -rd_kafka_mock_cgrp_member_leave (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_cgrp_member_t *member); -void rd_kafka_mock_cgrp_protos_destroy (rd_kafka_mock_cgrp_proto_t *protos, - int proto_cnt); +rd_kafka_mock_cgrp_member_leave(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member); +void rd_kafka_mock_cgrp_protos_destroy(rd_kafka_mock_cgrp_proto_t *protos, + int proto_cnt); rd_kafka_resp_err_t -rd_kafka_mock_cgrp_member_add (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_connection_t *mconn, - rd_kafka_buf_t *resp, - const rd_kafkap_str_t *MemberId, - const rd_kafkap_str_t *ProtocolType, - rd_kafka_mock_cgrp_proto_t *protos, - int proto_cnt, - int session_timeout_ms); +rd_kafka_mock_cgrp_member_add(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp, + const rd_kafkap_str_t *MemberId, + const rd_kafkap_str_t *ProtocolType, + rd_kafka_mock_cgrp_proto_t *protos, + int proto_cnt, + int session_timeout_ms); rd_kafka_resp_err_t -rd_kafka_mock_cgrp_check_state (rd_kafka_mock_cgrp_t *mcgrp, - rd_kafka_mock_cgrp_member_t *member, - const rd_kafka_buf_t *request, - int32_t generation_id); +rd_kafka_mock_cgrp_check_state(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member, + const rd_kafka_buf_t *request, + int32_t generation_id); rd_kafka_mock_cgrp_member_t * -rd_kafka_mock_cgrp_member_find (const rd_kafka_mock_cgrp_t *mcgrp, - const rd_kafkap_str_t *MemberId); -void rd_kafka_mock_cgrp_destroy (rd_kafka_mock_cgrp_t *mcgrp); -rd_kafka_mock_cgrp_t * -rd_kafka_mock_cgrp_find (rd_kafka_mock_cluster_t *mcluster, - const rd_kafkap_str_t *GroupId); +rd_kafka_mock_cgrp_member_find(const rd_kafka_mock_cgrp_t *mcgrp, + const rd_kafkap_str_t *MemberId); +void rd_kafka_mock_cgrp_destroy(rd_kafka_mock_cgrp_t *mcgrp); +rd_kafka_mock_cgrp_t *rd_kafka_mock_cgrp_find(rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *GroupId); rd_kafka_mock_cgrp_t * -rd_kafka_mock_cgrp_get (rd_kafka_mock_cluster_t *mcluster, - const rd_kafkap_str_t *GroupId, - const rd_kafkap_str_t *ProtocolType); -void rd_kafka_mock_cgrps_connection_closed (rd_kafka_mock_cluster_t *mcluster, - rd_kafka_mock_connection_t *mconn); +rd_kafka_mock_cgrp_get(rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *GroupId, + const rd_kafkap_str_t *ProtocolType); +void rd_kafka_mock_cgrps_connection_closed(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_mock_connection_t *mconn); /** diff --git a/src/rdkafka_msg.c b/src/rdkafka_msg.c index b8818dd396..9bd2b8d31b 100644 --- a/src/rdkafka_msg.c +++ b/src/rdkafka_msg.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012,2013 Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -47,7 +47,7 @@ #include -const char *rd_kafka_message_errstr (const rd_kafka_message_t *rkmessage) { +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage) { if (!rkmessage->err) return NULL; @@ -69,7 +69,7 @@ const char *rd_kafka_message_errstr (const rd_kafka_message_t *rkmessage) { * @remarks Also sets the corresponding errno. */ static RD_INLINE rd_kafka_resp_err_t -rd_kafka_check_produce (rd_kafka_t *rk, rd_kafka_error_t **errorp) { +rd_kafka_check_produce(rd_kafka_t *rk, rd_kafka_error_t **errorp) { rd_kafka_resp_err_t err; if (unlikely((err = rd_kafka_fatal_error_code(rk)))) { @@ -77,10 +77,10 @@ rd_kafka_check_produce (rd_kafka_t *rk, rd_kafka_error_t **errorp) { if (errorp) { rd_kafka_rdlock(rk); *errorp = rd_kafka_error_new_fatal( - err, - "Producing not allowed since a previous fatal " - "error was raised: %s", - rk->rk_fatal.errstr); + err, + "Producing not allowed since a previous fatal " + "error was raised: %s", + rk->rk_fatal.errstr); rd_kafka_rdunlock(rk); } return RD_KAFKA_RESP_ERR__FATAL; @@ -95,9 +95,9 @@ rd_kafka_check_produce (rd_kafka_t *rk, rd_kafka_error_t **errorp) { if (errorp) { rd_kafka_rdlock(rk); *errorp = rd_kafka_error_new( - RD_KAFKA_RESP_ERR__STATE, - "Producing not allowed in transactional state %s", - rd_kafka_txn_state2str(rk->rk_eos.txn_state)); + RD_KAFKA_RESP_ERR__STATE, + "Producing not allowed in transactional state %s", + rd_kafka_txn_state2str(rk->rk_eos.txn_state)); rd_kafka_rdunlock(rk); } @@ -105,27 +105,25 @@ rd_kafka_check_produce (rd_kafka_t *rk, rd_kafka_error_t **errorp) { } -void rd_kafka_msg_destroy (rd_kafka_t *rk, rd_kafka_msg_t *rkm) { -//FIXME - if (rkm->rkm_flags & RD_KAFKA_MSG_F_ACCOUNT) { - rd_dassert(rk || rkm->rkm_rkmessage.rkt); - rd_kafka_curr_msgs_sub( - rk ? rk : - rkm->rkm_rkmessage.rkt->rkt_rk, - 1, rkm->rkm_len); - } +void rd_kafka_msg_destroy(rd_kafka_t *rk, rd_kafka_msg_t *rkm) { + // FIXME + if (rkm->rkm_flags & RD_KAFKA_MSG_F_ACCOUNT) { + rd_dassert(rk || rkm->rkm_rkmessage.rkt); + rd_kafka_curr_msgs_sub(rk ? rk : rkm->rkm_rkmessage.rkt->rkt_rk, + 1, rkm->rkm_len); + } if (rkm->rkm_headers) rd_kafka_headers_destroy(rkm->rkm_headers); - if (likely(rkm->rkm_rkmessage.rkt != NULL)) - rd_kafka_topic_destroy0(rkm->rkm_rkmessage.rkt); + if (likely(rkm->rkm_rkmessage.rkt != NULL)) + rd_kafka_topic_destroy0(rkm->rkm_rkmessage.rkt); - if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE && rkm->rkm_payload) - rd_free(rkm->rkm_payload); + if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE && rkm->rkm_payload) + rd_free(rkm->rkm_payload); - if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE_RKM) - rd_free(rkm); + if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE_RKM) + rd_free(rkm); } @@ -136,71 +134,71 @@ void rd_kafka_msg_destroy (rd_kafka_t *rk, rd_kafka_msg_t *rkm) { * * @returns the new message */ -static -rd_kafka_msg_t *rd_kafka_msg_new00 (rd_kafka_topic_t *rkt, - int32_t partition, - int msgflags, - char *payload, size_t len, - const void *key, size_t keylen, - void *msg_opaque) { - rd_kafka_msg_t *rkm; - size_t mlen = sizeof(*rkm); - char *p; - - /* If we are to make a copy of the payload, allocate space for it too */ - if (msgflags & RD_KAFKA_MSG_F_COPY) { - msgflags &= ~RD_KAFKA_MSG_F_FREE; - mlen += len; - } - - mlen += keylen; - - /* Note: using rd_malloc here, not rd_calloc, so make sure all fields - * are properly set up. */ - rkm = rd_malloc(mlen); - rkm->rkm_err = 0; - rkm->rkm_flags = (RD_KAFKA_MSG_F_PRODUCER | - RD_KAFKA_MSG_F_FREE_RKM | msgflags); - rkm->rkm_len = len; - rkm->rkm_opaque = msg_opaque; - rkm->rkm_rkmessage.rkt = rd_kafka_topic_keep(rkt); +static rd_kafka_msg_t *rd_kafka_msg_new00(rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + char *payload, + size_t len, + const void *key, + size_t keylen, + void *msg_opaque) { + rd_kafka_msg_t *rkm; + size_t mlen = sizeof(*rkm); + char *p; - rkm->rkm_broker_id = -1; - rkm->rkm_partition = partition; - rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID; - rkm->rkm_timestamp = 0; - rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE; - rkm->rkm_status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; - rkm->rkm_headers = NULL; - - p = (char *)(rkm+1); - - if (payload && msgflags & RD_KAFKA_MSG_F_COPY) { - /* Copy payload to space following the ..msg_t */ - rkm->rkm_payload = p; - memcpy(rkm->rkm_payload, payload, len); - p += len; - - } else { - /* Just point to the provided payload. */ - rkm->rkm_payload = payload; - } - - if (key) { - rkm->rkm_key = p; - rkm->rkm_key_len = keylen; - memcpy(rkm->rkm_key, key, keylen); - } else { - rkm->rkm_key = NULL; - rkm->rkm_key_len = 0; - } + /* If we are to make a copy of the payload, allocate space for it too */ + if (msgflags & RD_KAFKA_MSG_F_COPY) { + msgflags &= ~RD_KAFKA_MSG_F_FREE; + mlen += len; + } + + mlen += keylen; + + /* Note: using rd_malloc here, not rd_calloc, so make sure all fields + * are properly set up. */ + rkm = rd_malloc(mlen); + rkm->rkm_err = 0; + rkm->rkm_flags = + (RD_KAFKA_MSG_F_PRODUCER | RD_KAFKA_MSG_F_FREE_RKM | msgflags); + rkm->rkm_len = len; + rkm->rkm_opaque = msg_opaque; + rkm->rkm_rkmessage.rkt = rd_kafka_topic_keep(rkt); + + rkm->rkm_broker_id = -1; + rkm->rkm_partition = partition; + rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID; + rkm->rkm_timestamp = 0; + rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE; + rkm->rkm_status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; + rkm->rkm_headers = NULL; + + p = (char *)(rkm + 1); + + if (payload && msgflags & RD_KAFKA_MSG_F_COPY) { + /* Copy payload to space following the ..msg_t */ + rkm->rkm_payload = p; + memcpy(rkm->rkm_payload, payload, len); + p += len; + + } else { + /* Just point to the provided payload. */ + rkm->rkm_payload = payload; + } + + if (key) { + rkm->rkm_key = p; + rkm->rkm_key_len = keylen; + memcpy(rkm->rkm_key, key, keylen); + } else { + rkm->rkm_key = NULL; + rkm->rkm_key_len = 0; + } return rkm; } - /** * @brief Create a new Producer message. * @@ -209,30 +207,32 @@ rd_kafka_msg_t *rd_kafka_msg_new00 (rd_kafka_topic_t *rkt, * Returns 0 on success or -1 on error. * Both errno and 'errp' are set appropriately. */ -static rd_kafka_msg_t *rd_kafka_msg_new0 (rd_kafka_topic_t *rkt, - int32_t force_partition, - int msgflags, - char *payload, size_t len, - const void *key, size_t keylen, - void *msg_opaque, - rd_kafka_resp_err_t *errp, - int *errnop, - rd_kafka_headers_t *hdrs, - int64_t timestamp, - rd_ts_t now) { - rd_kafka_msg_t *rkm; +static rd_kafka_msg_t *rd_kafka_msg_new0(rd_kafka_topic_t *rkt, + int32_t force_partition, + int msgflags, + char *payload, + size_t len, + const void *key, + size_t keylen, + void *msg_opaque, + rd_kafka_resp_err_t *errp, + int *errnop, + rd_kafka_headers_t *hdrs, + int64_t timestamp, + rd_ts_t now) { + rd_kafka_msg_t *rkm; size_t hdrs_size = 0; - if (unlikely(!payload)) - len = 0; - if (!key) - keylen = 0; + if (unlikely(!payload)) + len = 0; + if (!key) + keylen = 0; if (hdrs) hdrs_size = rd_kafka_headers_serialized_size(hdrs); if (unlikely(len > INT32_MAX || keylen > INT32_MAX || rd_kafka_msg_max_wire_size(keylen, len, hdrs_size) > - (size_t)rkt->rkt_rk->rk_conf.max_msg_size)) { + (size_t)rkt->rkt_rk->rk_conf.max_msg_size)) { *errp = RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE; if (errnop) *errnop = EMSGSIZE; @@ -241,30 +241,31 @@ static rd_kafka_msg_t *rd_kafka_msg_new0 (rd_kafka_topic_t *rkt, if (msgflags & RD_KAFKA_MSG_F_BLOCK) *errp = rd_kafka_curr_msgs_add( - rkt->rkt_rk, 1, len, 1/*block*/, - (msgflags & RD_KAFKA_MSG_F_RKT_RDLOCKED) ? - &rkt->rkt_lock : NULL); + rkt->rkt_rk, 1, len, 1 /*block*/, + (msgflags & RD_KAFKA_MSG_F_RKT_RDLOCKED) ? &rkt->rkt_lock + : NULL); else *errp = rd_kafka_curr_msgs_add(rkt->rkt_rk, 1, len, 0, NULL); if (unlikely(*errp)) { - if (errnop) - *errnop = ENOBUFS; - return NULL; - } + if (errnop) + *errnop = ENOBUFS; + return NULL; + } - rkm = rd_kafka_msg_new00(rkt, force_partition, - msgflags|RD_KAFKA_MSG_F_ACCOUNT /* curr_msgs_add() */, - payload, len, key, keylen, msg_opaque); + rkm = rd_kafka_msg_new00( + rkt, force_partition, + msgflags | RD_KAFKA_MSG_F_ACCOUNT /* curr_msgs_add() */, payload, + len, key, keylen, msg_opaque); memset(&rkm->rkm_u.producer, 0, sizeof(rkm->rkm_u.producer)); if (timestamp) - rkm->rkm_timestamp = timestamp; + rkm->rkm_timestamp = timestamp; else - rkm->rkm_timestamp = rd_uclock()/1000; - rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME; + rkm->rkm_timestamp = rd_uclock() / 1000; + rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME; if (hdrs) { rd_dassert(!rkm->rkm_headers); @@ -273,12 +274,12 @@ static rd_kafka_msg_t *rd_kafka_msg_new0 (rd_kafka_topic_t *rkt, rkm->rkm_ts_enq = now; - if (rkt->rkt_conf.message_timeout_ms == 0) { - rkm->rkm_ts_timeout = INT64_MAX; - } else { - rkm->rkm_ts_timeout = now + - (int64_t) rkt->rkt_conf.message_timeout_ms * 1000; - } + if (rkt->rkt_conf.message_timeout_ms == 0) { + rkm->rkm_ts_timeout = INT64_MAX; + } else { + rkm->rkm_ts_timeout = + now + (int64_t)rkt->rkt_conf.message_timeout_ms * 1000; + } /* Call interceptor chain for on_send */ rd_kafka_interceptors_on_send(rkt->rkt_rk, &rkm->rkm_rkmessage); @@ -299,90 +300,90 @@ static rd_kafka_msg_t *rd_kafka_msg_new0 (rd_kafka_topic_t *rkt, * * @locks none */ -int rd_kafka_msg_new (rd_kafka_topic_t *rkt, int32_t force_partition, - int msgflags, - char *payload, size_t len, - const void *key, size_t keylen, - void *msg_opaque) { - rd_kafka_msg_t *rkm; - rd_kafka_resp_err_t err; - int errnox; +int rd_kafka_msg_new(rd_kafka_topic_t *rkt, + int32_t force_partition, + int msgflags, + char *payload, + size_t len, + const void *key, + size_t keylen, + void *msg_opaque) { + rd_kafka_msg_t *rkm; + rd_kafka_resp_err_t err; + int errnox; if (unlikely((err = rd_kafka_check_produce(rkt->rkt_rk, NULL)))) return -1; /* Create message */ - rkm = rd_kafka_msg_new0(rkt, force_partition, msgflags, - payload, len, key, keylen, msg_opaque, - &err, &errnox, NULL, 0, rd_clock()); + rkm = rd_kafka_msg_new0(rkt, force_partition, msgflags, payload, len, + key, keylen, msg_opaque, &err, &errnox, NULL, 0, + rd_clock()); if (unlikely(!rkm)) { /* errno is already set by msg_new() */ - rd_kafka_set_last_error(err, errnox); + rd_kafka_set_last_error(err, errnox); return -1; } /* Partition the message */ - err = rd_kafka_msg_partitioner(rkt, rkm, 1); - if (likely(!err)) { - rd_kafka_set_last_error(0, 0); - return 0; - } + err = rd_kafka_msg_partitioner(rkt, rkm, 1); + if (likely(!err)) { + rd_kafka_set_last_error(0, 0); + return 0; + } /* Interceptor: unroll failing messages by triggering on_ack.. */ rkm->rkm_err = err; rd_kafka_interceptors_on_acknowledgement(rkt->rkt_rk, &rkm->rkm_rkmessage); - /* Handle partitioner failures: it only fails when the application - * attempts to force a destination partition that does not exist - * in the cluster. Note we must clear the RD_KAFKA_MSG_F_FREE - * flag since our contract says we don't free the payload on - * failure. */ + /* Handle partitioner failures: it only fails when the application + * attempts to force a destination partition that does not exist + * in the cluster. Note we must clear the RD_KAFKA_MSG_F_FREE + * flag since our contract says we don't free the payload on + * failure. */ - rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE; - rd_kafka_msg_destroy(rkt->rkt_rk, rkm); + rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE; + rd_kafka_msg_destroy(rkt->rkt_rk, rkm); - /* Translate error codes to errnos. */ - if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) - rd_kafka_set_last_error(err, ESRCH); - else if (err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) - rd_kafka_set_last_error(err, ENOENT); - else - rd_kafka_set_last_error(err, EINVAL); /* NOTREACHED */ + /* Translate error codes to errnos. */ + if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + rd_kafka_set_last_error(err, ESRCH); + else if (err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) + rd_kafka_set_last_error(err, ENOENT); + else + rd_kafka_set_last_error(err, EINVAL); /* NOTREACHED */ - return -1; + return -1; } /** @remark Keep rd_kafka_produceva() and rd_kafka_producev() in synch */ -rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, - const rd_kafka_vu_t *vus, - size_t cnt) { +rd_kafka_error_t * +rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt) { rd_kafka_msg_t s_rkm = { - /* Message defaults */ - .rkm_partition = RD_KAFKA_PARTITION_UA, - .rkm_timestamp = 0, /* current time */ + /* Message defaults */ + .rkm_partition = RD_KAFKA_PARTITION_UA, + .rkm_timestamp = 0, /* current time */ }; - rd_kafka_msg_t *rkm = &s_rkm; - rd_kafka_topic_t *rkt = NULL; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - rd_kafka_error_t *error = NULL; - rd_kafka_headers_t *hdrs = NULL; + rd_kafka_msg_t *rkm = &s_rkm; + rd_kafka_topic_t *rkt = NULL; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_error_t *error = NULL; + rd_kafka_headers_t *hdrs = NULL; rd_kafka_headers_t *app_hdrs = NULL; /* App-provided headers list */ size_t i; if (unlikely(rd_kafka_check_produce(rk, &error))) return error; - for (i = 0 ; i < cnt ; i++) { + for (i = 0; i < cnt; i++) { const rd_kafka_vu_t *vu = &vus[i]; - switch (vu->vtype) - { + switch (vu->vtype) { case RD_KAFKA_VTYPE_TOPIC: - rkt = rd_kafka_topic_new0(rk, - vu->u.cstr, - NULL, NULL, 1); + rkt = + rd_kafka_topic_new0(rk, vu->u.cstr, NULL, NULL, 1); break; case RD_KAFKA_VTYPE_RKT: @@ -396,11 +397,11 @@ rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, case RD_KAFKA_VTYPE_VALUE: rkm->rkm_payload = vu->u.mem.ptr; - rkm->rkm_len = vu->u.mem.size; + rkm->rkm_len = vu->u.mem.size; break; case RD_KAFKA_VTYPE_KEY: - rkm->rkm_key = vu->u.mem.ptr; + rkm->rkm_key = vu->u.mem.ptr; rkm->rkm_key_len = vu->u.mem.size; break; @@ -419,24 +420,22 @@ rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, case RD_KAFKA_VTYPE_HEADER: if (unlikely(app_hdrs != NULL)) { error = rd_kafka_error_new( - RD_KAFKA_RESP_ERR__CONFLICT, - "VTYPE_HEADER and VTYPE_HEADERS " - "are mutually exclusive"); + RD_KAFKA_RESP_ERR__CONFLICT, + "VTYPE_HEADER and VTYPE_HEADERS " + "are mutually exclusive"); goto err; } if (unlikely(!hdrs)) hdrs = rd_kafka_headers_new(8); - err = rd_kafka_header_add(hdrs, - vu->u.header.name, -1, + err = rd_kafka_header_add(hdrs, vu->u.header.name, -1, vu->u.header.val, vu->u.header.size); if (unlikely(err)) { error = rd_kafka_error_new( - err, - "Failed to add header: %s", - rd_kafka_err2str(err)); + err, "Failed to add header: %s", + rd_kafka_err2str(err)); goto err; } break; @@ -444,9 +443,9 @@ rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, case RD_KAFKA_VTYPE_HEADERS: if (unlikely(hdrs != NULL)) { error = rd_kafka_error_new( - RD_KAFKA_RESP_ERR__CONFLICT, - "VTYPE_HEADERS and VTYPE_HEADER " - "are mutually exclusive"); + RD_KAFKA_RESP_ERR__CONFLICT, + "VTYPE_HEADERS and VTYPE_HEADER " + "are mutually exclusive"); goto err; } app_hdrs = vu->u.headers; @@ -454,8 +453,8 @@ rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, default: error = rd_kafka_error_new( - RD_KAFKA_RESP_ERR__INVALID_ARG, - "Unsupported VTYPE %d", (int)vu->vtype); + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Unsupported VTYPE %d", (int)vu->vtype); goto err; } } @@ -463,28 +462,19 @@ rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, rd_assert(!error); if (unlikely(!rkt)) { - error = rd_kafka_error_new( - RD_KAFKA_RESP_ERR__INVALID_ARG, - "Topic name or object required"); + error = rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, + "Topic name or object required"); goto err; } - rkm = rd_kafka_msg_new0(rkt, - rkm->rkm_partition, - rkm->rkm_flags, - rkm->rkm_payload, rkm->rkm_len, - rkm->rkm_key, rkm->rkm_key_len, - rkm->rkm_opaque, - &err, NULL, - app_hdrs ? app_hdrs : hdrs, - rkm->rkm_timestamp, - rd_clock()); + rkm = rd_kafka_msg_new0( + rkt, rkm->rkm_partition, rkm->rkm_flags, rkm->rkm_payload, + rkm->rkm_len, rkm->rkm_key, rkm->rkm_key_len, rkm->rkm_opaque, &err, + NULL, app_hdrs ? app_hdrs : hdrs, rkm->rkm_timestamp, rd_clock()); if (unlikely(err)) { - error = rd_kafka_error_new( - err, - "Failed to produce message: %s", - rd_kafka_err2str(err)); + error = rd_kafka_error_new(err, "Failed to produce message: %s", + rd_kafka_err2str(err)); goto err; } @@ -513,8 +503,7 @@ rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, rd_kafka_msg_destroy(rk, rkm); - error = rd_kafka_error_new(err, - "Failed to enqueue message: %s", + error = rd_kafka_error_new(err, "Failed to enqueue message: %s", rd_kafka_err2str(err)); goto err; } @@ -523,7 +512,7 @@ rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, return NULL; - err: +err: if (rkt) rd_kafka_topic_destroy0(rkt); @@ -537,18 +526,18 @@ rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, /** @remark Keep rd_kafka_produceva() and rd_kafka_producev() in synch */ -rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...) { va_list ap; rd_kafka_msg_t s_rkm = { - /* Message defaults */ - .rkm_partition = RD_KAFKA_PARTITION_UA, - .rkm_timestamp = 0, /* current time */ + /* Message defaults */ + .rkm_partition = RD_KAFKA_PARTITION_UA, + .rkm_timestamp = 0, /* current time */ }; rd_kafka_msg_t *rkm = &s_rkm; rd_kafka_vtype_t vtype; rd_kafka_topic_t *rkt = NULL; rd_kafka_resp_err_t err; - rd_kafka_headers_t *hdrs = NULL; + rd_kafka_headers_t *hdrs = NULL; rd_kafka_headers_t *app_hdrs = NULL; /* App-provided headers list */ if (unlikely((err = rd_kafka_check_produce(rk, NULL)))) @@ -557,17 +546,15 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { va_start(ap, rk); while (!err && (vtype = va_arg(ap, rd_kafka_vtype_t)) != RD_KAFKA_VTYPE_END) { - switch (vtype) - { + switch (vtype) { case RD_KAFKA_VTYPE_TOPIC: - rkt = rd_kafka_topic_new0(rk, - va_arg(ap, const char *), + rkt = rd_kafka_topic_new0(rk, va_arg(ap, const char *), NULL, NULL, 1); break; case RD_KAFKA_VTYPE_RKT: rkt = rd_kafka_topic_proper( - va_arg(ap, rd_kafka_topic_t *)); + va_arg(ap, rd_kafka_topic_t *)); rd_kafka_topic_keep(rkt); break; @@ -577,11 +564,11 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { case RD_KAFKA_VTYPE_VALUE: rkm->rkm_payload = va_arg(ap, void *); - rkm->rkm_len = va_arg(ap, size_t); + rkm->rkm_len = va_arg(ap, size_t); break; case RD_KAFKA_VTYPE_KEY: - rkm->rkm_key = va_arg(ap, void *); + rkm->rkm_key = va_arg(ap, void *); rkm->rkm_key_len = va_arg(ap, size_t); break; @@ -597,8 +584,7 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { rkm->rkm_timestamp = va_arg(ap, int64_t); break; - case RD_KAFKA_VTYPE_HEADER: - { + case RD_KAFKA_VTYPE_HEADER: { const char *name; const void *value; ssize_t size; @@ -611,13 +597,12 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { if (unlikely(!hdrs)) hdrs = rd_kafka_headers_new(8); - name = va_arg(ap, const char *); + name = va_arg(ap, const char *); value = va_arg(ap, const void *); - size = va_arg(ap, ssize_t); + size = va_arg(ap, ssize_t); err = rd_kafka_header_add(hdrs, name, -1, value, size); - } - break; + } break; case RD_KAFKA_VTYPE_HEADERS: if (unlikely(hdrs != NULL)) { @@ -639,16 +624,11 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { return RD_KAFKA_RESP_ERR__INVALID_ARG; if (likely(!err)) - rkm = rd_kafka_msg_new0(rkt, - rkm->rkm_partition, - rkm->rkm_flags, - rkm->rkm_payload, rkm->rkm_len, - rkm->rkm_key, rkm->rkm_key_len, - rkm->rkm_opaque, - &err, NULL, - app_hdrs ? app_hdrs : hdrs, - rkm->rkm_timestamp, - rd_clock()); + rkm = rd_kafka_msg_new0( + rkt, rkm->rkm_partition, rkm->rkm_flags, rkm->rkm_payload, + rkm->rkm_len, rkm->rkm_key, rkm->rkm_key_len, + rkm->rkm_opaque, &err, NULL, app_hdrs ? app_hdrs : hdrs, + rkm->rkm_timestamp, rd_clock()); if (unlikely(err)) { rd_kafka_topic_destroy0(rkt); @@ -695,14 +675,16 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { * @locality any application thread * @locks none */ -int rd_kafka_produce (rd_kafka_topic_t *rkt, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t keylen, - void *msg_opaque) { - return rd_kafka_msg_new(rkt, partition, - msgflags, payload, len, - key, keylen, msg_opaque); +int rd_kafka_produce(rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t keylen, + void *msg_opaque) { + return rd_kafka_msg_new(rkt, partition, msgflags, payload, len, key, + keylen, msg_opaque); } @@ -712,18 +694,20 @@ int rd_kafka_produce (rd_kafka_topic_t *rkt, int32_t partition, * Returns the number of messages succesfully queued for producing. * Each message's .err will be set accordingly. */ -int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, - int msgflags, - rd_kafka_message_t *rkmessages, int message_cnt) { +int rd_kafka_produce_batch(rd_kafka_topic_t *app_rkt, + int32_t partition, + int msgflags, + rd_kafka_message_t *rkmessages, + int message_cnt) { rd_kafka_msgq_t tmpq = RD_KAFKA_MSGQ_INITIALIZER(tmpq); int i; - int64_t utc_now = rd_uclock() / 1000; - rd_ts_t now = rd_clock(); - int good = 0; + int64_t utc_now = rd_uclock() / 1000; + rd_ts_t now = rd_clock(); + int good = 0; int multiple_partitions = (partition == RD_KAFKA_PARTITION_UA || (msgflags & RD_KAFKA_MSG_F_PARTITION)); rd_kafka_resp_err_t all_err; - rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); + rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); rd_kafka_toppar_t *rktp = NULL; /* Propagated per-message below */ @@ -733,7 +717,7 @@ int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, if (!multiple_partitions) { /* Single partition: look up the rktp once. */ rktp = rd_kafka_toppar_get_avail(rkt, partition, - 1/*ua on miss*/, &all_err); + 1 /*ua on miss*/, &all_err); } else { /* Indicate to lower-level msg_new..() that rkt is locked @@ -741,7 +725,7 @@ int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, msgflags |= RD_KAFKA_MSG_F_RKT_RDLOCKED; } - for (i = 0 ; i < message_cnt ; i++) { + for (i = 0; i < message_cnt; i++) { rd_kafka_msg_t *rkm; /* Propagate error for all messages. */ @@ -751,22 +735,20 @@ int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, } /* Create message */ - rkm = rd_kafka_msg_new0(rkt, - (msgflags & RD_KAFKA_MSG_F_PARTITION) ? - rkmessages[i].partition : partition, - msgflags, - rkmessages[i].payload, - rkmessages[i].len, - rkmessages[i].key, - rkmessages[i].key_len, - rkmessages[i]._private, - &rkmessages[i].err, NULL, - NULL, utc_now, now); + rkm = rd_kafka_msg_new0( + rkt, + (msgflags & RD_KAFKA_MSG_F_PARTITION) + ? rkmessages[i].partition + : partition, + msgflags, rkmessages[i].payload, rkmessages[i].len, + rkmessages[i].key, rkmessages[i].key_len, + rkmessages[i]._private, &rkmessages[i].err, NULL, NULL, + utc_now, now); if (unlikely(!rkm)) { - if (rkmessages[i].err == RD_KAFKA_RESP_ERR__QUEUE_FULL) - all_err = rkmessages[i].err; + if (rkmessages[i].err == RD_KAFKA_RESP_ERR__QUEUE_FULL) + all_err = rkmessages[i].err; continue; - } + } /* Three cases here: * partition==UA: run the partitioner (slow) @@ -777,19 +759,17 @@ int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, if (multiple_partitions) { if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA) { /* Partition the message */ - rkmessages[i].err = - rd_kafka_msg_partitioner( - rkt, rkm, 0/*already locked*/); + rkmessages[i].err = rd_kafka_msg_partitioner( + rkt, rkm, 0 /*already locked*/); } else { - if (rktp == NULL || - rkm->rkm_partition != - rktp->rktp_partition) { + if (rktp == NULL || rkm->rkm_partition != + rktp->rktp_partition) { rd_kafka_resp_err_t err; if (rktp != NULL) rd_kafka_toppar_destroy(rktp); rktp = rd_kafka_toppar_get_avail( - rkt, rkm->rkm_partition, - 1/*ua on miss*/, &err); + rkt, rkm->rkm_partition, + 1 /*ua on miss*/, &err); if (unlikely(!rktp)) { rkmessages[i].err = err; @@ -807,7 +787,7 @@ int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, if (unlikely(rkmessages[i].err)) { /* Interceptors: Unroll on_send by on_ack.. */ rd_kafka_interceptors_on_acknowledgement( - rkt->rkt_rk, &rkmessages[i]); + rkt->rkt_rk, &rkmessages[i]); rd_kafka_msg_destroy(rkt->rkt_rk, rkm); continue; @@ -825,7 +805,7 @@ int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, rd_kafka_topic_rdunlock(rkt); - if (!multiple_partitions && good > 0 && + if (!multiple_partitions && good > 0 && rd_kafka_is_transactional(rkt->rkt_rk) && rktp->rktp_partition != RD_KAFKA_PARTITION_UA) { /* Add single destination partition to transaction */ @@ -850,11 +830,11 @@ int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, * @locality any * @locks toppar_lock MUST be held */ -int rd_kafka_msgq_age_scan (rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq, - rd_kafka_msgq_t *timedout, - rd_ts_t now, - rd_ts_t *abs_next_timeout) { +int rd_kafka_msgq_age_scan(rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq, + rd_kafka_msgq_t *timedout, + rd_ts_t now, + rd_ts_t *abs_next_timeout) { rd_kafka_msg_t *rkm, *tmp, *first = NULL; int cnt = timedout->rkmq_msg_cnt; @@ -882,19 +862,18 @@ int rd_kafka_msgq_age_scan (rd_kafka_toppar_t *rktp, } -int -rd_kafka_msgq_enq_sorted0 (rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm, - int (*order_cmp) (const void *, const void *)) { - TAILQ_INSERT_SORTED(&rkmq->rkmq_msgs, rkm, rd_kafka_msg_t *, - rkm_link, order_cmp); - rkmq->rkmq_msg_bytes += rkm->rkm_len+rkm->rkm_key_len; +int rd_kafka_msgq_enq_sorted0(rd_kafka_msgq_t *rkmq, + rd_kafka_msg_t *rkm, + int (*order_cmp)(const void *, const void *)) { + TAILQ_INSERT_SORTED(&rkmq->rkmq_msgs, rkm, rd_kafka_msg_t *, rkm_link, + order_cmp); + rkmq->rkmq_msg_bytes += rkm->rkm_len + rkm->rkm_key_len; return ++rkmq->rkmq_msg_cnt; } -int rd_kafka_msgq_enq_sorted (const rd_kafka_topic_t *rkt, - rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm) { +int rd_kafka_msgq_enq_sorted(const rd_kafka_topic_t *rkt, + rd_kafka_msgq_t *rkmq, + rd_kafka_msg_t *rkm) { rd_dassert(rkm->rkm_u.producer.msgid != 0); return rd_kafka_msgq_enq_sorted0(rkmq, rkm, rkt->rkt_conf.msg_order_cmp); @@ -921,28 +900,28 @@ int rd_kafka_msgq_enq_sorted (const rd_kafka_topic_t *rkt, * @returns the insert position element, or NULL if \p rkm should be * added at tail of queue. */ -rd_kafka_msg_t *rd_kafka_msgq_find_pos (const rd_kafka_msgq_t *rkmq, - const rd_kafka_msg_t *start_pos, - const rd_kafka_msg_t *rkm, - int (*cmp) (const void *, - const void *), - int *cntp, int64_t *bytesp) { +rd_kafka_msg_t *rd_kafka_msgq_find_pos(const rd_kafka_msgq_t *rkmq, + const rd_kafka_msg_t *start_pos, + const rd_kafka_msg_t *rkm, + int (*cmp)(const void *, const void *), + int *cntp, + int64_t *bytesp) { const rd_kafka_msg_t *curr; - int cnt = 0; + int cnt = 0; int64_t bytes = 0; - for (curr = start_pos ? start_pos : rd_kafka_msgq_first(rkmq) ; - curr ; curr = TAILQ_NEXT(curr, rkm_link)) { + for (curr = start_pos ? start_pos : rd_kafka_msgq_first(rkmq); curr; + curr = TAILQ_NEXT(curr, rkm_link)) { if (cmp(rkm, curr) < 0) { if (cntp) { - *cntp = cnt; + *cntp = cnt; *bytesp = bytes; } return (rd_kafka_msg_t *)curr; } if (cntp) { cnt++; - bytes += rkm->rkm_len+rkm->rkm_key_len; + bytes += rkm->rkm_len + rkm->rkm_key_len; } } @@ -960,9 +939,11 @@ rd_kafka_msg_t *rd_kafka_msgq_find_pos (const rd_kafka_msgq_t *rkmq, * \p leftq after the split. * @param bytes is the bytes counterpart to \p cnt. */ -void rd_kafka_msgq_split (rd_kafka_msgq_t *leftq, rd_kafka_msgq_t *rightq, - rd_kafka_msg_t *first_right, - int cnt, int64_t bytes) { +void rd_kafka_msgq_split(rd_kafka_msgq_t *leftq, + rd_kafka_msgq_t *rightq, + rd_kafka_msg_t *first_right, + int cnt, + int64_t bytes) { rd_kafka_msg_t *llast; rd_assert(first_right != TAILQ_FIRST(&leftq->rkmq_msgs)); @@ -972,12 +953,12 @@ void rd_kafka_msgq_split (rd_kafka_msgq_t *leftq, rd_kafka_msgq_t *rightq, rd_kafka_msgq_init(rightq); rightq->rkmq_msgs.tqh_first = first_right; - rightq->rkmq_msgs.tqh_last = leftq->rkmq_msgs.tqh_last; + rightq->rkmq_msgs.tqh_last = leftq->rkmq_msgs.tqh_last; first_right->rkm_link.tqe_prev = &rightq->rkmq_msgs.tqh_first; leftq->rkmq_msgs.tqh_last = &llast->rkm_link.tqe_next; - llast->rkm_link.tqe_next = NULL; + llast->rkm_link.tqe_next = NULL; rightq->rkmq_msg_cnt = leftq->rkmq_msg_cnt - cnt; rightq->rkmq_msg_bytes = leftq->rkmq_msg_bytes - bytes; @@ -992,17 +973,19 @@ void rd_kafka_msgq_split (rd_kafka_msgq_t *leftq, rd_kafka_msgq_t *rightq, /** * @brief Set per-message metadata for all messages in \p rkmq */ -void rd_kafka_msgq_set_metadata (rd_kafka_msgq_t *rkmq, int32_t broker_id, - int64_t base_offset, int64_t timestamp, - rd_kafka_msg_status_t status) { +void rd_kafka_msgq_set_metadata(rd_kafka_msgq_t *rkmq, + int32_t broker_id, + int64_t base_offset, + int64_t timestamp, + rd_kafka_msg_status_t status) { rd_kafka_msg_t *rkm; TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) { rkm->rkm_broker_id = broker_id; - rkm->rkm_offset = base_offset++; + rkm->rkm_offset = base_offset++; if (timestamp != -1) { rkm->rkm_timestamp = timestamp; - rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME; + rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME; } /* Don't downgrade a message from any form of PERSISTED @@ -1010,7 +993,8 @@ void rd_kafka_msgq_set_metadata (rd_kafka_msgq_t *rkmq, int32_t broker_id, * PERSISTED can't be changed. * E.g., a previous ack or in-flight timeout. */ if (unlikely(status == RD_KAFKA_MSG_STATUS_NOT_PERSISTED && - rkm->rkm_status != RD_KAFKA_MSG_STATUS_NOT_PERSISTED)) + rkm->rkm_status != + RD_KAFKA_MSG_STATUS_NOT_PERSISTED)) continue; rkm->rkm_status = status; @@ -1023,15 +1007,16 @@ void rd_kafka_msgq_set_metadata (rd_kafka_msgq_t *rkmq, int32_t broker_id, * * @remark src must be ordered */ -void rd_kafka_msgq_move_acked (rd_kafka_msgq_t *dest, rd_kafka_msgq_t *src, - uint64_t last_msgid, - rd_kafka_msg_status_t status) { +void rd_kafka_msgq_move_acked(rd_kafka_msgq_t *dest, + rd_kafka_msgq_t *src, + uint64_t last_msgid, + rd_kafka_msg_status_t status) { rd_kafka_msg_t *rkm; while ((rkm = rd_kafka_msgq_first(src)) && rkm->rkm_u.producer.msgid <= last_msgid) { rd_kafka_msgq_deq(src, rkm, 1); - rd_kafka_msgq_enq(dest, rkm); + rd_kafka_msgq_enq(dest, rkm); rkm->rkm_status = status; } @@ -1042,100 +1027,92 @@ void rd_kafka_msgq_move_acked (rd_kafka_msgq_t *dest, rd_kafka_msgq_t *src, -int32_t rd_kafka_msg_partitioner_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { - int32_t p = rd_jitter(0, partition_cnt-1); - if (unlikely(!rd_kafka_topic_partition_available(rkt, p))) - return rd_jitter(0, partition_cnt-1); - else - return p; +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { + int32_t p = rd_jitter(0, partition_cnt - 1); + if (unlikely(!rd_kafka_topic_partition_available(rkt, p))) + return rd_jitter(0, partition_cnt - 1); + else + return p; } -int32_t rd_kafka_msg_partitioner_consistent (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { - return rd_crc32(key, keylen) % partition_cnt; +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { + return rd_crc32(key, keylen) % partition_cnt; } -int32_t rd_kafka_msg_partitioner_consistent_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { - if (keylen == 0) - return rd_kafka_msg_partitioner_random(rkt, - key, - keylen, - partition_cnt, - rkt_opaque, - msg_opaque); - else - return rd_kafka_msg_partitioner_consistent(rkt, - key, - keylen, - partition_cnt, - rkt_opaque, - msg_opaque); +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { + if (keylen == 0) + return rd_kafka_msg_partitioner_random( + rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque); + else + return rd_kafka_msg_partitioner_consistent( + rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque); } -int32_t rd_kafka_msg_partitioner_murmur2 (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { return (rd_murmur2(key, keylen) & 0x7fffffff) % partition_cnt; } -int32_t rd_kafka_msg_partitioner_murmur2_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { if (!key) - return rd_kafka_msg_partitioner_random(rkt, - key, - keylen, - partition_cnt, - rkt_opaque, - msg_opaque); + return rd_kafka_msg_partitioner_random( + rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque); else return (rd_murmur2(key, keylen) & 0x7fffffff) % partition_cnt; } -int32_t rd_kafka_msg_partitioner_fnv1a (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { return rd_fnv1a(key, keylen) % partition_cnt; } -int32_t rd_kafka_msg_partitioner_fnv1a_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { if (!key) - return rd_kafka_msg_partitioner_random(rkt, - key, - keylen, - partition_cnt, - rkt_opaque, - msg_opaque); + return rd_kafka_msg_partitioner_random( + rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque); else return rd_fnv1a(key, keylen) % partition_cnt; } -int32_t rd_kafka_msg_sticky_partition (rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { +int32_t rd_kafka_msg_sticky_partition(rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { if (!rd_kafka_topic_partition_available(rkt, rkt->rkt_sticky_partition)) rd_interval_expedite(&rkt->rkt_sticky_intvl, 0); @@ -1143,15 +1120,10 @@ int32_t rd_kafka_msg_sticky_partition (rd_kafka_topic_t *rkt, if (rd_interval(&rkt->rkt_sticky_intvl, rkt->rkt_rk->rk_conf.sticky_partition_linger_ms * 1000, 0) > 0) { - rkt->rkt_sticky_partition = - rd_kafka_msg_partitioner_random(rkt, - key, - keylen, - partition_cnt, - rkt_opaque, - msg_opaque); + rkt->rkt_sticky_partition = rd_kafka_msg_partitioner_random( + rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque); rd_kafka_dbg(rkt->rkt_rk, TOPIC, "PARTITIONER", - "%s [%"PRId32"] is the new sticky partition", + "%s [%" PRId32 "] is the new sticky partition", rkt->rkt_topic->str, rkt->rkt_sticky_partition); } @@ -1169,30 +1141,30 @@ int32_t rd_kafka_msg_sticky_partition (rd_kafka_topic_t *rkt, * @locality any * @locks rd_kafka_ */ -int rd_kafka_msg_partitioner (rd_kafka_topic_t *rkt, rd_kafka_msg_t *rkm, - rd_dolock_t do_lock) { - int32_t partition; - rd_kafka_toppar_t *rktp_new; - rd_kafka_resp_err_t err; +int rd_kafka_msg_partitioner(rd_kafka_topic_t *rkt, + rd_kafka_msg_t *rkm, + rd_dolock_t do_lock) { + int32_t partition; + rd_kafka_toppar_t *rktp_new; + rd_kafka_resp_err_t err; - if (do_lock) - rd_kafka_topic_rdlock(rkt); + if (do_lock) + rd_kafka_topic_rdlock(rkt); - switch (rkt->rkt_state) - { + switch (rkt->rkt_state) { case RD_KAFKA_TOPIC_S_UNKNOWN: /* No metadata received from cluster yet. * Put message in UA partition and re-run partitioner when * cluster comes up. */ - partition = RD_KAFKA_PARTITION_UA; + partition = RD_KAFKA_PARTITION_UA; break; case RD_KAFKA_TOPIC_S_NOTEXISTS: /* Topic not found in cluster. * Fail message immediately. */ err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; - if (do_lock) - rd_kafka_topic_rdunlock(rkt); + if (do_lock) + rd_kafka_topic_rdunlock(rkt); return err; case RD_KAFKA_TOPIC_S_ERROR: @@ -1221,23 +1193,16 @@ int rd_kafka_msg_partitioner (rd_kafka_topic_t *rkt, rd_kafka_msg_t *rkm, (!rkm->rkm_key || (rkm->rkm_key_len == 0 && rkt->rkt_conf.partitioner == - rd_kafka_msg_partitioner_consistent_random))) { - partition = - rd_kafka_msg_sticky_partition( - rkt, - rkm->rkm_key, - rkm->rkm_key_len, - rkt->rkt_partition_cnt, - rkt->rkt_conf.opaque, - rkm->rkm_opaque); + rd_kafka_msg_partitioner_consistent_random))) { + partition = rd_kafka_msg_sticky_partition( + rkt, rkm->rkm_key, rkm->rkm_key_len, + rkt->rkt_partition_cnt, + rkt->rkt_conf.opaque, rkm->rkm_opaque); } else { - partition = rkt->rkt_conf. - partitioner(rkt, - rkm->rkm_key, - rkm->rkm_key_len, - rkt->rkt_partition_cnt, - rkt->rkt_conf.opaque, - rkm->rkm_opaque); + partition = rkt->rkt_conf.partitioner( + rkt, rkm->rkm_key, rkm->rkm_key_len, + rkt->rkt_partition_cnt, + rkt->rkt_conf.opaque, rkm->rkm_opaque); } } else partition = rkm->rkm_partition; @@ -1256,21 +1221,21 @@ int rd_kafka_msg_partitioner (rd_kafka_topic_t *rkt, rd_kafka_msg_t *rkm, break; } - /* Get new partition */ - rktp_new = rd_kafka_toppar_get(rkt, partition, 0); + /* Get new partition */ + rktp_new = rd_kafka_toppar_get(rkt, partition, 0); - if (unlikely(!rktp_new)) { - /* Unknown topic or partition */ - if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) - err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; - else - err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + if (unlikely(!rktp_new)) { + /* Unknown topic or partition */ + if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) + err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; + else + err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - if (do_lock) - rd_kafka_topic_rdunlock(rkt); + if (do_lock) + rd_kafka_topic_rdunlock(rkt); - return err; - } + return err; + } rd_atomic64_add(&rktp_new->rktp_c.producer_enq_msgs, 1); @@ -1278,10 +1243,10 @@ int rd_kafka_msg_partitioner (rd_kafka_topic_t *rkt, rd_kafka_msg_t *rkm, if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA) rkm->rkm_partition = partition; - /* Partition is available: enqueue msg on partition's queue */ - rd_kafka_toppar_enq_msg(rktp_new, rkm); - if (do_lock) - rd_kafka_topic_rdunlock(rkt); + /* Partition is available: enqueue msg on partition's queue */ + rd_kafka_toppar_enq_msg(rktp_new, rkm); + if (do_lock) + rd_kafka_topic_rdunlock(rkt); if (rktp_new->rktp_partition != RD_KAFKA_PARTITION_UA && rd_kafka_is_transactional(rkt->rkt_rk)) { @@ -1289,17 +1254,16 @@ int rd_kafka_msg_partitioner (rd_kafka_topic_t *rkt, rd_kafka_msg_t *rkm, rd_kafka_txn_add_partition(rktp_new); } - rd_kafka_toppar_destroy(rktp_new); /* from _get() */ - return 0; + rd_kafka_toppar_destroy(rktp_new); /* from _get() */ + return 0; } - /** * @name Public message type (rd_kafka_message_t) */ -void rd_kafka_message_destroy (rd_kafka_message_t *rkmessage) { +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage) { rd_kafka_op_t *rko; if (likely((rko = (rd_kafka_op_t *)rkmessage->_private) != NULL)) @@ -1311,7 +1275,7 @@ void rd_kafka_message_destroy (rd_kafka_message_t *rkmessage) { } -rd_kafka_message_t *rd_kafka_message_new (void) { +rd_kafka_message_t *rd_kafka_message_new(void) { rd_kafka_msg_t *rkm = rd_calloc(1, sizeof(*rkm)); rkm->rkm_flags = RD_KAFKA_MSG_F_FREE_RKM; rkm->rkm_broker_id = -1; @@ -1324,7 +1288,7 @@ rd_kafka_message_t *rd_kafka_message_new (void) { * @remark Will trigger on_consume() interceptors if any. */ static rd_kafka_message_t * -rd_kafka_message_setup (rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) { +rd_kafka_message_setup(rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) { rd_kafka_topic_t *rkt; rd_kafka_toppar_t *rktp = NULL; @@ -1333,7 +1297,7 @@ rd_kafka_message_setup (rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) { } else { if (rko->rko_rktp) { rktp = rko->rko_rktp; - rkt = rktp->rktp_rkt; + rkt = rktp->rktp_rkt; } else rkt = NULL; @@ -1351,8 +1315,7 @@ rd_kafka_message_setup (rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) { rkmessage->err = rko->rko_err; /* Call on_consume interceptors */ - switch (rko->rko_type) - { + switch (rko->rko_type) { case RD_KAFKA_OP_FETCH: if (!rkmessage->err && rkt) rd_kafka_interceptors_on_consume(rkt->rkt_rk, @@ -1372,8 +1335,8 @@ rd_kafka_message_setup (rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) { * @brief Get rkmessage from rkm (for EVENT_DR) * @remark Must only be called just prior to passing a dr to the application. */ -rd_kafka_message_t *rd_kafka_message_get_from_rkm (rd_kafka_op_t *rko, - rd_kafka_msg_t *rkm) { +rd_kafka_message_t *rd_kafka_message_get_from_rkm(rd_kafka_op_t *rko, + rd_kafka_msg_t *rkm) { return rd_kafka_message_setup(rko, &rkm->rkm_rkmessage); } @@ -1384,14 +1347,13 @@ rd_kafka_message_t *rd_kafka_message_get_from_rkm (rd_kafka_op_t *rko, * @remark Will trigger on_consume() interceptors, if any. * @returns a rkmessage (bound to the rko). */ -rd_kafka_message_t *rd_kafka_message_get (rd_kafka_op_t *rko) { +rd_kafka_message_t *rd_kafka_message_get(rd_kafka_op_t *rko) { rd_kafka_message_t *rkmessage; if (!rko) return rd_kafka_message_new(); /* empty */ - switch (rko->rko_type) - { + switch (rko->rko_type) { case RD_KAFKA_OP_FETCH: /* Use embedded rkmessage */ rkmessage = &rko->rko_u.fetch.rkm.rkm_rkmessage; @@ -1399,11 +1361,11 @@ rd_kafka_message_t *rd_kafka_message_get (rd_kafka_op_t *rko) { case RD_KAFKA_OP_ERR: case RD_KAFKA_OP_CONSUMER_ERR: - rkmessage = &rko->rko_u.err.rkm.rkm_rkmessage; + rkmessage = &rko->rko_u.err.rkm.rkm_rkmessage; rkmessage->payload = rko->rko_u.err.errstr; - rkmessage->len = rkmessage->payload ? - strlen(rkmessage->payload) : 0; - rkmessage->offset = rko->rko_u.err.offset; + rkmessage->len = + rkmessage->payload ? strlen(rkmessage->payload) : 0; + rkmessage->offset = rko->rko_u.err.offset; break; default: @@ -1416,8 +1378,8 @@ rd_kafka_message_t *rd_kafka_message_get (rd_kafka_op_t *rko) { } -int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage, - rd_kafka_timestamp_type_t *tstype) { +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, + rd_kafka_timestamp_type_t *tstype) { rd_kafka_msg_t *rkm; if (rkmessage->err) { @@ -1435,7 +1397,7 @@ int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage, } -int64_t rd_kafka_message_latency (const rd_kafka_message_t *rkmessage) { +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage) { rd_kafka_msg_t *rkm; rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage); @@ -1447,7 +1409,7 @@ int64_t rd_kafka_message_latency (const rd_kafka_message_t *rkmessage) { } -int32_t rd_kafka_message_broker_id (const rd_kafka_message_t *rkmessage) { +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage) { rd_kafka_msg_t *rkm; rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage); @@ -1461,11 +1423,11 @@ int32_t rd_kafka_message_broker_id (const rd_kafka_message_t *rkmessage) { * @brief Parse serialized message headers and populate * rkm->rkm_headers (which must be NULL). */ -static rd_kafka_resp_err_t rd_kafka_msg_headers_parse (rd_kafka_msg_t *rkm) { +static rd_kafka_resp_err_t rd_kafka_msg_headers_parse(rd_kafka_msg_t *rkm) { rd_kafka_buf_t *rkbuf; int64_t HeaderCount; const int log_decode_errors = 0; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG; int i; rd_kafka_headers_t *hdrs = NULL; @@ -1474,10 +1436,9 @@ static rd_kafka_resp_err_t rd_kafka_msg_headers_parse (rd_kafka_msg_t *rkm) { if (RD_KAFKAP_BYTES_LEN(&rkm->rkm_u.consumer.binhdrs) == 0) return RD_KAFKA_RESP_ERR__NOENT; - rkbuf = rd_kafka_buf_new_shadow(rkm->rkm_u.consumer.binhdrs.data, - RD_KAFKAP_BYTES_LEN(&rkm->rkm_u. - consumer.binhdrs), - NULL); + rkbuf = rd_kafka_buf_new_shadow( + rkm->rkm_u.consumer.binhdrs.data, + RD_KAFKAP_BYTES_LEN(&rkm->rkm_u.consumer.binhdrs), NULL); rd_kafka_buf_read_varint(rkbuf, &HeaderCount); @@ -1491,7 +1452,7 @@ static rd_kafka_resp_err_t rd_kafka_msg_headers_parse (rd_kafka_msg_t *rkm) { hdrs = rd_kafka_headers_new((size_t)HeaderCount); - for (i = 0 ; (int64_t)i < HeaderCount ; i++) { + for (i = 0; (int64_t)i < HeaderCount; i++) { int64_t KeyLen, ValueLen; const char *Key, *Value; @@ -1504,8 +1465,8 @@ static rd_kafka_resp_err_t rd_kafka_msg_headers_parse (rd_kafka_msg_t *rkm) { else rd_kafka_buf_read_ptr(rkbuf, &Value, (size_t)ValueLen); - rd_kafka_header_add(hdrs, Key, (ssize_t)KeyLen, - Value, (ssize_t)ValueLen); + rd_kafka_header_add(hdrs, Key, (ssize_t)KeyLen, Value, + (ssize_t)ValueLen); } rkm->rkm_headers = hdrs; @@ -1513,7 +1474,7 @@ static rd_kafka_resp_err_t rd_kafka_msg_headers_parse (rd_kafka_msg_t *rkm) { rd_kafka_buf_destroy(rkbuf); return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: err = rkbuf->rkbuf_err; rd_kafka_buf_destroy(rkbuf); if (hdrs) @@ -1523,10 +1484,9 @@ static rd_kafka_resp_err_t rd_kafka_msg_headers_parse (rd_kafka_msg_t *rkm) { - rd_kafka_resp_err_t -rd_kafka_message_headers (const rd_kafka_message_t *rkmessage, - rd_kafka_headers_t **hdrsp) { +rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, + rd_kafka_headers_t **hdrsp) { rd_kafka_msg_t *rkm; rd_kafka_resp_err_t err; @@ -1558,8 +1518,8 @@ rd_kafka_message_headers (const rd_kafka_message_t *rkmessage, rd_kafka_resp_err_t -rd_kafka_message_detach_headers (rd_kafka_message_t *rkmessage, - rd_kafka_headers_t **hdrsp) { +rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, + rd_kafka_headers_t **hdrsp) { rd_kafka_msg_t *rkm; rd_kafka_resp_err_t err; @@ -1574,8 +1534,8 @@ rd_kafka_message_detach_headers (rd_kafka_message_t *rkmessage, } -void rd_kafka_message_set_headers (rd_kafka_message_t *rkmessage, - rd_kafka_headers_t *hdrs) { +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, + rd_kafka_headers_t *hdrs) { rd_kafka_msg_t *rkm; rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage); @@ -1591,7 +1551,7 @@ void rd_kafka_message_set_headers (rd_kafka_message_t *rkmessage, rd_kafka_msg_status_t -rd_kafka_message_status (const rd_kafka_message_t *rkmessage) { +rd_kafka_message_status(const rd_kafka_message_t *rkmessage) { rd_kafka_msg_t *rkm; rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage); @@ -1600,29 +1560,29 @@ rd_kafka_message_status (const rd_kafka_message_t *rkmessage) { } -void rd_kafka_msgq_dump (FILE *fp, const char *what, rd_kafka_msgq_t *rkmq) { +void rd_kafka_msgq_dump(FILE *fp, const char *what, rd_kafka_msgq_t *rkmq) { rd_kafka_msg_t *rkm; int cnt = 0; - fprintf(fp, "%s msgq_dump (%d messages, %"PRIusz" bytes):\n", what, + fprintf(fp, "%s msgq_dump (%d messages, %" PRIusz " bytes):\n", what, rd_kafka_msgq_len(rkmq), rd_kafka_msgq_size(rkmq)); TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) { - fprintf(fp, " [%"PRId32"]@%"PRId64 - ": rkm msgid %"PRIu64": \"%.*s\"\n", + fprintf(fp, + " [%" PRId32 "]@%" PRId64 ": rkm msgid %" PRIu64 + ": \"%.*s\"\n", rkm->rkm_partition, rkm->rkm_offset, - rkm->rkm_u.producer.msgid, - (int)rkm->rkm_len, (const char *)rkm->rkm_payload); + rkm->rkm_u.producer.msgid, (int)rkm->rkm_len, + (const char *)rkm->rkm_payload); rd_assert(cnt++ < rkmq->rkmq_msg_cnt); } } - /** * @brief Destroy resources associated with msgbatch */ -void rd_kafka_msgbatch_destroy (rd_kafka_msgbatch_t *rkmb) { +void rd_kafka_msgbatch_destroy(rd_kafka_msgbatch_t *rkmb) { if (rkmb->rktp) { rd_kafka_toppar_destroy(rkmb->rktp); rkmb->rktp = NULL; @@ -1635,18 +1595,18 @@ void rd_kafka_msgbatch_destroy (rd_kafka_msgbatch_t *rkmb) { /** * @brief Initialize a message batch for the Idempotent Producer. */ -void rd_kafka_msgbatch_init (rd_kafka_msgbatch_t *rkmb, - rd_kafka_toppar_t *rktp, - rd_kafka_pid_t pid, - uint64_t epoch_base_msgid) { +void rd_kafka_msgbatch_init(rd_kafka_msgbatch_t *rkmb, + rd_kafka_toppar_t *rktp, + rd_kafka_pid_t pid, + uint64_t epoch_base_msgid) { memset(rkmb, 0, sizeof(*rkmb)); rkmb->rktp = rd_kafka_toppar_keep(rktp); rd_kafka_msgq_init(&rkmb->msgq); - rkmb->pid = pid; - rkmb->first_seq = -1; + rkmb->pid = pid; + rkmb->first_seq = -1; rkmb->epoch_base_msgid = epoch_base_msgid; } @@ -1657,8 +1617,8 @@ void rd_kafka_msgbatch_init (rd_kafka_msgbatch_t *rkmb, * * @param rkm is the first message in the batch. */ -void rd_kafka_msgbatch_set_first_msg (rd_kafka_msgbatch_t *rkmb, - rd_kafka_msg_t *rkm) { +void rd_kafka_msgbatch_set_first_msg(rd_kafka_msgbatch_t *rkmb, + rd_kafka_msg_t *rkm) { rd_assert(rkmb->first_msgid == 0); if (!rd_kafka_pid_valid(rkmb->pid)) @@ -1691,9 +1651,9 @@ void rd_kafka_msgbatch_set_first_msg (rd_kafka_msgbatch_t *rkmb, * @remark This function assumes the batch will be transmitted and increases * the toppar's in-flight count. */ -void rd_kafka_msgbatch_ready_produce (rd_kafka_msgbatch_t *rkmb) { +void rd_kafka_msgbatch_ready_produce(rd_kafka_msgbatch_t *rkmb) { rd_kafka_toppar_t *rktp = rkmb->rktp; - rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; + rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; /* Keep track of number of requests in-flight per partition, * and the number of partitions with in-flight requests when @@ -1701,7 +1661,7 @@ void rd_kafka_msgbatch_ready_produce (rd_kafka_msgbatch_t *rkmb) { * before resetting the PID. */ if (rd_atomic32_add(&rktp->rktp_msgs_inflight, rd_kafka_msgq_len(&rkmb->msgq)) == - rd_kafka_msgq_len(&rkmb->msgq) && + rd_kafka_msgq_len(&rkmb->msgq) && rd_kafka_is_idempotent(rk)) rd_kafka_idemp_inflight_toppar_add(rk, rktp); } @@ -1711,15 +1671,16 @@ void rd_kafka_msgbatch_ready_produce (rd_kafka_msgbatch_t *rkmb) { * @brief Verify order (by msgid) in message queue. * For development use only. */ -void rd_kafka_msgq_verify_order0 (const char *function, int line, - const rd_kafka_toppar_t *rktp, - const rd_kafka_msgq_t *rkmq, - uint64_t exp_first_msgid, - rd_bool_t gapless) { +void rd_kafka_msgq_verify_order0(const char *function, + int line, + const rd_kafka_toppar_t *rktp, + const rd_kafka_msgq_t *rkmq, + uint64_t exp_first_msgid, + rd_bool_t gapless) { const rd_kafka_msg_t *rkm; uint64_t exp; - int errcnt = 0; - int cnt = 0; + int errcnt = 0; + int cnt = 0; const char *topic = rktp ? rktp->rktp_rkt->rkt_topic->str : "n/a"; int32_t partition = rktp ? rktp->rktp_partition : -1; @@ -1742,40 +1703,38 @@ void rd_kafka_msgq_verify_order0 (const char *function, int line, topic, partition, cnt, rkm, rkm->rkm_u.producer.msgid); #endif - if (gapless && - rkm->rkm_u.producer.msgid != exp) { - printf("%s:%d: %s [%"PRId32"]: rkm #%d (%p) " - "msgid %"PRIu64": " - "expected msgid %"PRIu64"\n", - function, line, - topic, partition, - cnt, rkm, rkm->rkm_u.producer.msgid, - exp); + if (gapless && rkm->rkm_u.producer.msgid != exp) { + printf("%s:%d: %s [%" PRId32 + "]: rkm #%d (%p) " + "msgid %" PRIu64 + ": " + "expected msgid %" PRIu64 "\n", + function, line, topic, partition, cnt, rkm, + rkm->rkm_u.producer.msgid, exp); errcnt++; } else if (!gapless && rkm->rkm_u.producer.msgid < exp) { - printf("%s:%d: %s [%"PRId32"]: rkm #%d (%p) " - "msgid %"PRIu64": " - "expected increased msgid >= %"PRIu64"\n", - function, line, - topic, partition, - cnt, rkm, rkm->rkm_u.producer.msgid, - exp); + printf("%s:%d: %s [%" PRId32 + "]: rkm #%d (%p) " + "msgid %" PRIu64 + ": " + "expected increased msgid >= %" PRIu64 "\n", + function, line, topic, partition, cnt, rkm, + rkm->rkm_u.producer.msgid, exp); errcnt++; } else exp++; if (cnt >= rkmq->rkmq_msg_cnt) { - printf("%s:%d: %s [%"PRId32"]: rkm #%d (%p) " - "msgid %"PRIu64": loop in queue?\n", - function, line, - topic, partition, - cnt, rkm, rkm->rkm_u.producer.msgid); + printf("%s:%d: %s [%" PRId32 + "]: rkm #%d (%p) " + "msgid %" PRIu64 ": loop in queue?\n", + function, line, topic, partition, cnt, rkm, + rkm->rkm_u.producer.msgid); errcnt++; break; } cnt++; - } rd_assert(!errcnt); @@ -1790,18 +1749,18 @@ void rd_kafka_msgq_verify_order0 (const char *function, int line, /** * @brief Unittest: message allocator */ -rd_kafka_msg_t *ut_rd_kafka_msg_new (size_t msgsize) { +rd_kafka_msg_t *ut_rd_kafka_msg_new(size_t msgsize) { rd_kafka_msg_t *rkm; - rkm = rd_calloc(1, sizeof(*rkm)); - rkm->rkm_flags = RD_KAFKA_MSG_F_FREE_RKM; - rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID; - rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE; + rkm = rd_calloc(1, sizeof(*rkm)); + rkm->rkm_flags = RD_KAFKA_MSG_F_FREE_RKM; + rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID; + rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE; if (msgsize) { rd_assert(msgsize <= sizeof(*rkm)); rkm->rkm_payload = rkm; - rkm->rkm_len = msgsize; + rkm->rkm_len = msgsize; } return rkm; @@ -1812,11 +1771,11 @@ rd_kafka_msg_t *ut_rd_kafka_msg_new (size_t msgsize) { /** * @brief Unittest: destroy all messages in queue */ -void ut_rd_kafka_msgq_purge (rd_kafka_msgq_t *rkmq) { +void ut_rd_kafka_msgq_purge(rd_kafka_msgq_t *rkmq) { rd_kafka_msg_t *rkm, *tmp; TAILQ_FOREACH_SAFE(rkm, &rkmq->rkmq_msgs, rkm_link, tmp) - rd_kafka_msg_destroy(NULL, rkm); + rd_kafka_msg_destroy(NULL, rkm); rd_kafka_msgq_init(rkmq); @@ -1824,15 +1783,16 @@ void ut_rd_kafka_msgq_purge (rd_kafka_msgq_t *rkmq) { -static int ut_verify_msgq_order (const char *what, - const rd_kafka_msgq_t *rkmq, - uint64_t first, uint64_t last, - rd_bool_t req_consecutive) { +static int ut_verify_msgq_order(const char *what, + const rd_kafka_msgq_t *rkmq, + uint64_t first, + uint64_t last, + rd_bool_t req_consecutive) { const rd_kafka_msg_t *rkm; uint64_t expected = first; - int incr = first < last ? +1 : -1; - int fails = 0; - int cnt = 0; + int incr = first < last ? +1 : -1; + int fails = 0; + int cnt = 0; TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) { if ((req_consecutive && @@ -1840,12 +1800,10 @@ static int ut_verify_msgq_order (const char *what, (!req_consecutive && rkm->rkm_u.producer.msgid < expected)) { if (fails++ < 100) - RD_UT_SAY("%s: expected msgid %s %"PRIu64 - " not %"PRIu64" at index #%d", - what, - req_consecutive ? "==" : ">=", - expected, - rkm->rkm_u.producer.msgid, + RD_UT_SAY("%s: expected msgid %s %" PRIu64 + " not %" PRIu64 " at index #%d", + what, req_consecutive ? "==" : ">=", + expected, rkm->rkm_u.producer.msgid, cnt); } @@ -1866,18 +1824,19 @@ static int ut_verify_msgq_order (const char *what, /** * @brief Verify ordering comparator for message queues. */ -static int unittest_msgq_order (const char *what, int fifo, - int (*cmp) (const void *, const void *)) { +static int unittest_msgq_order(const char *what, + int fifo, + int (*cmp)(const void *, const void *)) { rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq); rd_kafka_msg_t *rkm; rd_kafka_msgq_t sendq, sendq2; const size_t msgsize = 100; int i; - RD_UT_SAY("%s: testing in %s mode", what, fifo? "FIFO" : "LIFO"); + RD_UT_SAY("%s: testing in %s mode", what, fifo ? "FIFO" : "LIFO"); - for (i = 1 ; i <= 6 ; i++) { - rkm = ut_rd_kafka_msg_new(msgsize); + for (i = 1; i <= 6; i++) { + rkm = ut_rd_kafka_msg_new(msgsize); rkm->rkm_u.producer.msgid = i; rd_kafka_msgq_enq_sorted0(&rkmq, rkm, cmp); } @@ -1991,7 +1950,7 @@ static int unittest_msgq_order (const char *what, int fifo, while (rd_kafka_msgq_len(&sendq2) < 3) rd_kafka_msgq_enq(&sendq2, rd_kafka_msgq_pop(&rkmq)); - rkm = ut_rd_kafka_msg_new(msgsize); + rkm = ut_rd_kafka_msg_new(msgsize); rkm->rkm_u.producer.msgid = i; rd_kafka_msgq_enq_sorted0(&rkmq, rkm, cmp); @@ -2016,8 +1975,8 @@ static int unittest_msgq_order (const char *what, int fifo, } RD_UT_ASSERT(rd_kafka_msgq_size(&rkmq) == - rd_kafka_msgq_len(&rkmq) * msgsize, - "expected msgq size %"PRIusz", not %"PRIusz, + rd_kafka_msgq_len(&rkmq) * msgsize, + "expected msgq size %" PRIusz ", not %" PRIusz, (size_t)rd_kafka_msgq_len(&rkmq) * msgsize, rd_kafka_msgq_size(&rkmq)); @@ -2027,38 +1986,37 @@ static int unittest_msgq_order (const char *what, int fifo, ut_rd_kafka_msgq_purge(&rkmq); return 0; - } /** * @brief Verify that rd_kafka_seq_wrap() works. */ -static int unittest_msg_seq_wrap (void) { +static int unittest_msg_seq_wrap(void) { static const struct exp { int64_t in; int32_t out; } exp[] = { - { 0, 0 }, - { 1, 1 }, - { (int64_t)INT32_MAX+2, 1 }, - { (int64_t)INT32_MAX+1, 0 }, - { INT32_MAX, INT32_MAX }, - { INT32_MAX-1, INT32_MAX-1 }, - { INT32_MAX-2, INT32_MAX-2 }, - { ((int64_t)1<<33)-2, INT32_MAX-1 }, - { ((int64_t)1<<33)-1, INT32_MAX }, - { ((int64_t)1<<34), 0 }, - { ((int64_t)1<<35)+3, 3 }, - { 1710+1229, 2939 }, - { -1, -1 }, + {0, 0}, + {1, 1}, + {(int64_t)INT32_MAX + 2, 1}, + {(int64_t)INT32_MAX + 1, 0}, + {INT32_MAX, INT32_MAX}, + {INT32_MAX - 1, INT32_MAX - 1}, + {INT32_MAX - 2, INT32_MAX - 2}, + {((int64_t)1 << 33) - 2, INT32_MAX - 1}, + {((int64_t)1 << 33) - 1, INT32_MAX}, + {((int64_t)1 << 34), 0}, + {((int64_t)1 << 35) + 3, 3}, + {1710 + 1229, 2939}, + {-1, -1}, }; int i; - for (i = 0 ; exp[i].in != -1 ; i++) { + for (i = 0; exp[i].in != -1; i++) { int32_t wseq = rd_kafka_seq_wrap(exp[i].in); RD_UT_ASSERT(wseq == exp[i].out, - "Expected seq_wrap(%"PRId64") -> %"PRId32 - ", not %"PRId32, + "Expected seq_wrap(%" PRId64 ") -> %" PRId32 + ", not %" PRId32, exp[i].in, exp[i].out, wseq); } @@ -2069,12 +2027,14 @@ static int unittest_msg_seq_wrap (void) { /** * @brief Populate message queue with message ids from lo..hi (inclusive) */ -static void ut_msgq_populate (rd_kafka_msgq_t *rkmq, uint64_t lo, uint64_t hi, - size_t msgsize) { +static void ut_msgq_populate(rd_kafka_msgq_t *rkmq, + uint64_t lo, + uint64_t hi, + size_t msgsize) { uint64_t i; - for (i = lo ; i <= hi ; i++) { - rd_kafka_msg_t *rkm = ut_rd_kafka_msg_new(msgsize); + for (i = lo; i <= hi; i++) { + rd_kafka_msg_t *rkm = ut_rd_kafka_msg_new(msgsize); rkm->rkm_u.producer.msgid = i; rd_kafka_msgq_enq(rkmq, rkm); } @@ -2091,17 +2051,17 @@ struct ut_msg_range { * All source ranges are combined into a single queue before insert. */ static int -unittest_msgq_insert_all_sort (const char *what, - double max_us_per_msg, - double *ret_us_per_msg, - const struct ut_msg_range *src_ranges, - const struct ut_msg_range *dest_ranges) { +unittest_msgq_insert_all_sort(const char *what, + double max_us_per_msg, + double *ret_us_per_msg, + const struct ut_msg_range *src_ranges, + const struct ut_msg_range *dest_ranges) { rd_kafka_msgq_t destq, srcq; int i; uint64_t lo = UINT64_MAX, hi = 0; - uint64_t cnt = 0; + uint64_t cnt = 0; const size_t msgsize = 100; - size_t totsize = 0; + size_t totsize = 0; rd_ts_t ts; double us_per_msg; @@ -2110,7 +2070,7 @@ unittest_msgq_insert_all_sort (const char *what, rd_kafka_msgq_init(&destq); rd_kafka_msgq_init(&srcq); - for (i = 0 ; src_ranges[i].hi > 0 ; i++) { + for (i = 0; src_ranges[i].hi > 0; i++) { uint64_t this_cnt; ut_msgq_populate(&srcq, src_ranges[i].lo, src_ranges[i].hi, @@ -2124,7 +2084,7 @@ unittest_msgq_insert_all_sort (const char *what, totsize += msgsize * (size_t)this_cnt; } - for (i = 0 ; dest_ranges[i].hi > 0 ; i++) { + for (i = 0; dest_ranges[i].hi > 0; i++) { uint64_t this_cnt; ut_msgq_populate(&destq, dest_ranges[i].lo, dest_ranges[i].hi, @@ -2143,24 +2103,24 @@ unittest_msgq_insert_all_sort (const char *what, ts = rd_clock(); rd_kafka_msgq_insert_msgq(&destq, &srcq, rd_kafka_msg_cmp_msgid); - ts = rd_clock() - ts; + ts = rd_clock() - ts; us_per_msg = (double)ts / (double)cnt; - RD_UT_SAY("Done: took %"PRId64"us, %.4fus/msg", - ts, us_per_msg); + RD_UT_SAY("Done: took %" PRId64 "us, %.4fus/msg", ts, us_per_msg); RD_UT_ASSERT(rd_kafka_msgq_len(&srcq) == 0, "srcq should be empty, but contains %d messages", rd_kafka_msgq_len(&srcq)); RD_UT_ASSERT(rd_kafka_msgq_len(&destq) == (int)cnt, - "destq should contain %d messages, not %d", - (int)cnt, rd_kafka_msgq_len(&destq)); + "destq should contain %d messages, not %d", (int)cnt, + rd_kafka_msgq_len(&destq)); if (ut_verify_msgq_order("after", &destq, lo, hi, rd_false)) return 1; RD_UT_ASSERT(rd_kafka_msgq_size(&destq) == totsize, - "expected destq size to be %"PRIusz" bytes, not %"PRIusz, + "expected destq size to be %" PRIusz + " bytes, not %" PRIusz, totsize, rd_kafka_msgq_size(&destq)); ut_rd_kafka_msgq_purge(&srcq); @@ -2186,18 +2146,18 @@ unittest_msgq_insert_all_sort (const char *what, * Inserts each source range individually. */ static int -unittest_msgq_insert_each_sort (const char *what, - double max_us_per_msg, - double *ret_us_per_msg, - const struct ut_msg_range *src_ranges, - const struct ut_msg_range *dest_ranges) { +unittest_msgq_insert_each_sort(const char *what, + double max_us_per_msg, + double *ret_us_per_msg, + const struct ut_msg_range *src_ranges, + const struct ut_msg_range *dest_ranges) { rd_kafka_msgq_t destq; int i; uint64_t lo = UINT64_MAX, hi = 0; - uint64_t cnt = 0; - uint64_t scnt = 0; + uint64_t cnt = 0; + uint64_t scnt = 0; const size_t msgsize = 100; - size_t totsize = 0; + size_t totsize = 0; double us_per_msg; rd_ts_t accum_ts = 0; @@ -2205,7 +2165,7 @@ unittest_msgq_insert_each_sort (const char *what, rd_kafka_msgq_init(&destq); - for (i = 0 ; dest_ranges[i].hi > 0 ; i++) { + for (i = 0; dest_ranges[i].hi > 0; i++) { uint64_t this_cnt; ut_msgq_populate(&destq, dest_ranges[i].lo, dest_ranges[i].hi, @@ -2220,7 +2180,7 @@ unittest_msgq_insert_each_sort (const char *what, } - for (i = 0 ; src_ranges[i].hi > 0 ; i++) { + for (i = 0; src_ranges[i].hi > 0; i++) { rd_kafka_msgq_t srcq; uint64_t this_cnt; rd_ts_t ts; @@ -2238,9 +2198,10 @@ unittest_msgq_insert_each_sort (const char *what, scnt += this_cnt; totsize += msgsize * (size_t)this_cnt; - RD_UT_SAY("Begin insert of %d messages into destq with " - "%d messages", - rd_kafka_msgq_len(&srcq), rd_kafka_msgq_len(&destq)); + RD_UT_SAY( + "Begin insert of %d messages into destq with " + "%d messages", + rd_kafka_msgq_len(&srcq), rd_kafka_msgq_len(&destq)); ts = rd_clock(); rd_kafka_msgq_insert_msgq(&destq, &srcq, @@ -2248,8 +2209,8 @@ unittest_msgq_insert_each_sort (const char *what, ts = rd_clock() - ts; accum_ts += ts; - RD_UT_SAY("Done: took %"PRId64"us, %.4fus/msg", - ts, (double)ts / (double)this_cnt); + RD_UT_SAY("Done: took %" PRId64 "us, %.4fus/msg", ts, + (double)ts / (double)this_cnt); RD_UT_ASSERT(rd_kafka_msgq_len(&srcq) == 0, "srcq should be empty, but contains %d messages", @@ -2262,8 +2223,8 @@ unittest_msgq_insert_each_sort (const char *what, return 1; RD_UT_ASSERT(rd_kafka_msgq_size(&destq) == totsize, - "expected destq size to be %"PRIusz - " bytes, not %"PRIusz, + "expected destq size to be %" PRIusz + " bytes, not %" PRIusz, totsize, rd_kafka_msgq_size(&destq)); ut_rd_kafka_msgq_purge(&srcq); @@ -2273,7 +2234,8 @@ unittest_msgq_insert_each_sort (const char *what, us_per_msg = (double)accum_ts / (double)scnt; - RD_UT_SAY("Total: %.4fus/msg over %"PRId64" messages in %"PRId64"us", + RD_UT_SAY("Total: %.4fus/msg over %" PRId64 " messages in %" PRId64 + "us", us_per_msg, scnt, accum_ts); if (!rd_unittest_slow) @@ -2296,12 +2258,11 @@ unittest_msgq_insert_each_sort (const char *what, /** * @brief Calls both insert_all and insert_each */ -static int -unittest_msgq_insert_sort (const char *what, - double max_us_per_msg, - double *ret_us_per_msg, - const struct ut_msg_range *src_ranges, - const struct ut_msg_range *dest_ranges) { +static int unittest_msgq_insert_sort(const char *what, + double max_us_per_msg, + double *ret_us_per_msg, + const struct ut_msg_range *src_ranges, + const struct ut_msg_range *dest_ranges) { double ret_all = 0.0, ret_each = 0.0; int r; @@ -2322,23 +2283,17 @@ unittest_msgq_insert_sort (const char *what, } -int unittest_msg (void) { - int fails = 0; +int unittest_msg(void) { + int fails = 0; double insert_baseline = 0.0; fails += unittest_msgq_order("FIFO", 1, rd_kafka_msg_cmp_msgid); fails += unittest_msg_seq_wrap(); fails += unittest_msgq_insert_sort( - "get baseline insert time", 100000.0, &insert_baseline, - (const struct ut_msg_range[]){ - { 1, 1 }, - { 3, 3 }, - { 0, 0 }}, - (const struct ut_msg_range[]) { - { 2, 2 }, - { 4, 4 }, - { 0, 0 }}); + "get baseline insert time", 100000.0, &insert_baseline, + (const struct ut_msg_range[]) {{1, 1}, {3, 3}, {0, 0}}, + (const struct ut_msg_range[]) {{2, 2}, {4, 4}, {0, 0}}); /* Allow some wiggle room in baseline time. */ if (insert_baseline < 0.1) @@ -2346,69 +2301,55 @@ int unittest_msg (void) { insert_baseline *= 3; fails += unittest_msgq_insert_sort( - "single-message ranges", insert_baseline, NULL, - (const struct ut_msg_range[]){ - { 2, 2 }, - { 4, 4 }, - { 9, 9 }, - { 33692864, 33692864 }, - { 0, 0 }}, - (const struct ut_msg_range[]) { - { 1, 1 }, - { 3, 3 }, - { 5, 5 }, - { 10, 10 }, - { 33692865, 33692865 }, - { 0, 0 }}); + "single-message ranges", insert_baseline, NULL, + (const struct ut_msg_range[]) { + {2, 2}, {4, 4}, {9, 9}, {33692864, 33692864}, {0, 0}}, + (const struct ut_msg_range[]) {{1, 1}, + {3, 3}, + {5, 5}, + {10, 10}, + {33692865, 33692865}, + {0, 0}}); fails += unittest_msgq_insert_sort( - "many messages", insert_baseline, NULL, - (const struct ut_msg_range[]){ - { 100000, 200000 }, - { 400000, 450000 }, - { 900000, 920000 }, - { 33692864, 33751992 }, - { 33906868, 33993690 }, - { 40000000, 44000000 }, - { 0, 0 }}, - (const struct ut_msg_range[]) { - { 1, 199 }, - { 350000, 360000 }, - { 500000, 500010 }, - { 1000000, 1000200 }, - { 33751993, 33906867 }, - { 50000001, 50000001 }, - { 0, 0 }}); + "many messages", insert_baseline, NULL, + (const struct ut_msg_range[]) {{100000, 200000}, + {400000, 450000}, + {900000, 920000}, + {33692864, 33751992}, + {33906868, 33993690}, + {40000000, 44000000}, + {0, 0}}, + (const struct ut_msg_range[]) {{1, 199}, + {350000, 360000}, + {500000, 500010}, + {1000000, 1000200}, + {33751993, 33906867}, + {50000001, 50000001}, + {0, 0}}); fails += unittest_msgq_insert_sort( - "issue #2508", insert_baseline, NULL, - (const struct ut_msg_range[]){ - { 33692864, 33751992 }, - { 33906868, 33993690 }, - { 0, 0 }}, - (const struct ut_msg_range[]) { - { 33751993, 33906867 }, - { 0, 0 }}); + "issue #2508", insert_baseline, NULL, + (const struct ut_msg_range[]) { + {33692864, 33751992}, {33906868, 33993690}, {0, 0}}, + (const struct ut_msg_range[]) {{33751993, 33906867}, {0, 0}}); /* The standard case where all of the srcq * goes after the destq. * Create a big destq and a number of small srcqs. * Should not result in O(n) scans to find the insert position. */ fails += unittest_msgq_insert_sort( - "issue #2450 (v1.2.1 regression)", insert_baseline, NULL, - (const struct ut_msg_range[]){ - { 200000, 200001 }, - { 200002, 200006 }, - { 200009, 200012 }, - { 200015, 200016 }, - { 200020, 200022 }, - { 200030, 200090 }, - { 200091, 200092 }, - { 200093, 200094 }, - { 200095, 200096 }, - { 200097, 200099 }, - { 0, 0 }}, - (const struct ut_msg_range[]) { - { 1, 199999 }, - { 0, 0 }}); + "issue #2450 (v1.2.1 regression)", insert_baseline, NULL, + (const struct ut_msg_range[]) {{200000, 200001}, + {200002, 200006}, + {200009, 200012}, + {200015, 200016}, + {200020, 200022}, + {200030, 200090}, + {200091, 200092}, + {200093, 200094}, + {200095, 200096}, + {200097, 200099}, + {0, 0}}, + (const struct ut_msg_range[]) {{1, 199999}, {0, 0}}); return fails; } diff --git a/src/rdkafka_msg.h b/src/rdkafka_msg.h index 939db6c69f..3743dfba25 100644 --- a/src/rdkafka_msg.h +++ b/src/rdkafka_msg.h @@ -3,22 +3,22 @@ * * Copyright (c) 2012,2013 Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * PRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * and/or other materials provided with the distribution. + * PRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -36,7 +36,7 @@ /** * @brief Internal RD_KAFKA_MSG_F_.. flags */ -#define RD_KAFKA_MSG_F_RKT_RDLOCKED 0x100000 /* rkt is rdlock():ed */ +#define RD_KAFKA_MSG_F_RKT_RDLOCKED 0x100000 /* rkt is rdlock():ed */ /** @@ -56,7 +56,8 @@ * * Attributes: * ------------------------------------------------------------------------------------------------- - * | Unused (6-15) | Control (5) | Transactional (4) | Timestamp Type (3) | Compression Type (0-2) | + * | Unused (6-15) | Control (5) | Transactional (4) | Timestamp Type (3) | + * Compression Type (0-2) | * ------------------------------------------------------------------------------------------------- */ /* Compression types same as MsgVersion 0 above */ @@ -66,32 +67,32 @@ typedef struct rd_kafka_msg_s { - rd_kafka_message_t rkm_rkmessage; /* MUST be first field */ -#define rkm_len rkm_rkmessage.len -#define rkm_payload rkm_rkmessage.payload -#define rkm_opaque rkm_rkmessage._private -#define rkm_partition rkm_rkmessage.partition -#define rkm_offset rkm_rkmessage.offset -#define rkm_key rkm_rkmessage.key -#define rkm_key_len rkm_rkmessage.key_len -#define rkm_err rkm_rkmessage.err - - TAILQ_ENTRY(rd_kafka_msg_s) rkm_link; - - int rkm_flags; - /* @remark These additional flags must not collide with - * the RD_KAFKA_MSG_F_* flags in rdkafka.h */ -#define RD_KAFKA_MSG_F_FREE_RKM 0x10000 /* msg_t is allocated */ -#define RD_KAFKA_MSG_F_ACCOUNT 0x20000 /* accounted for in curr_msgs */ -#define RD_KAFKA_MSG_F_PRODUCER 0x40000 /* Producer message */ -#define RD_KAFKA_MSG_F_CONTROL 0x80000 /* Control message */ - - rd_kafka_timestamp_type_t rkm_tstype; /* rkm_timestamp type */ - int64_t rkm_timestamp; /* Message format V1. - * Meaning of timestamp depends on - * message Attribute LogAppendtime (broker) - * or CreateTime (producer). - * Unit is milliseconds since epoch (UTC).*/ + rd_kafka_message_t rkm_rkmessage; /* MUST be first field */ +#define rkm_len rkm_rkmessage.len +#define rkm_payload rkm_rkmessage.payload +#define rkm_opaque rkm_rkmessage._private +#define rkm_partition rkm_rkmessage.partition +#define rkm_offset rkm_rkmessage.offset +#define rkm_key rkm_rkmessage.key +#define rkm_key_len rkm_rkmessage.key_len +#define rkm_err rkm_rkmessage.err + + TAILQ_ENTRY(rd_kafka_msg_s) rkm_link; + + int rkm_flags; + /* @remark These additional flags must not collide with + * the RD_KAFKA_MSG_F_* flags in rdkafka.h */ +#define RD_KAFKA_MSG_F_FREE_RKM 0x10000 /* msg_t is allocated */ +#define RD_KAFKA_MSG_F_ACCOUNT 0x20000 /* accounted for in curr_msgs */ +#define RD_KAFKA_MSG_F_PRODUCER 0x40000 /* Producer message */ +#define RD_KAFKA_MSG_F_CONTROL 0x80000 /* Control message */ + + rd_kafka_timestamp_type_t rkm_tstype; /* rkm_timestamp type */ + int64_t rkm_timestamp; /* Message format V1. + * Meaning of timestamp depends on + * message Attribute LogAppendtime (broker) + * or CreateTime (producer). + * Unit is milliseconds since epoch (UTC).*/ rd_kafka_headers_t *rkm_headers; /**< Parsed headers list, if any. */ @@ -105,13 +106,13 @@ typedef struct rd_kafka_msg_s { union { struct { - rd_ts_t ts_timeout; /* Message timeout */ - rd_ts_t ts_enq; /* Enqueue/Produce time */ - rd_ts_t ts_backoff; /* Backoff next Produce until - * this time. */ - uint64_t msgid; /**< Message sequencial id, - * used to maintain ordering. - * Starts at 1. */ + rd_ts_t ts_timeout; /* Message timeout */ + rd_ts_t ts_enq; /* Enqueue/Produce time */ + rd_ts_t ts_backoff; /* Backoff next Produce until + * this time. */ + uint64_t msgid; /**< Message sequencial id, + * used to maintain ordering. + * Starts at 1. */ uint64_t last_msgid; /**< On retry this is set * on the first message * in a batch to point @@ -120,7 +121,7 @@ typedef struct rd_kafka_msg_s { * the batch can be * identically reconstructed. */ - int retries; /* Number of retries so far */ + int retries; /* Number of retries so far */ } producer; #define rkm_ts_timeout rkm_u.producer.ts_timeout #define rkm_ts_enq rkm_u.producer.ts_enq @@ -145,13 +146,12 @@ TAILQ_HEAD(rd_kafka_msg_head_s, rd_kafka_msg_s); * @remark Depending on message version (MagicByte) the actual size * may be smaller. */ -static RD_INLINE RD_UNUSED -size_t rd_kafka_msg_wire_size (const rd_kafka_msg_t *rkm, int MsgVersion) { +static RD_INLINE RD_UNUSED size_t +rd_kafka_msg_wire_size(const rd_kafka_msg_t *rkm, int MsgVersion) { static const size_t overheads[] = { - [0] = RD_KAFKAP_MESSAGE_V0_OVERHEAD, - [1] = RD_KAFKAP_MESSAGE_V1_OVERHEAD, - [2] = RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD - }; + [0] = RD_KAFKAP_MESSAGE_V0_OVERHEAD, + [1] = RD_KAFKAP_MESSAGE_V1_OVERHEAD, + [2] = RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD}; size_t size; rd_dassert(MsgVersion >= 0 && MsgVersion <= 2); @@ -169,41 +169,38 @@ size_t rd_kafka_msg_wire_size (const rd_kafka_msg_t *rkm, int MsgVersion) { * @remark This does not account for the ProduceRequest, et.al, just the * per-message overhead. */ -static RD_INLINE RD_UNUSED -size_t rd_kafka_msg_max_wire_size (size_t keylen, size_t valuelen, - size_t hdrslen) { - return RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD + - keylen + valuelen + hdrslen; +static RD_INLINE RD_UNUSED size_t rd_kafka_msg_max_wire_size(size_t keylen, + size_t valuelen, + size_t hdrslen) { + return RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD + keylen + valuelen + hdrslen; } /** * @returns the enveloping rd_kafka_msg_t pointer for a rd_kafka_msg_t * wrapped rd_kafka_message_t. */ -static RD_INLINE RD_UNUSED -rd_kafka_msg_t *rd_kafka_message2msg (rd_kafka_message_t *rkmessage) { - return (rd_kafka_msg_t *)rkmessage; +static RD_INLINE RD_UNUSED rd_kafka_msg_t * +rd_kafka_message2msg(rd_kafka_message_t *rkmessage) { + return (rd_kafka_msg_t *)rkmessage; } - - /** * @brief Message queue with message and byte counters. */ TAILQ_HEAD(rd_kafka_msgs_head_s, rd_kafka_msg_s); typedef struct rd_kafka_msgq_s { - struct rd_kafka_msgs_head_s rkmq_msgs; /* TAILQ_HEAD */ + struct rd_kafka_msgs_head_s rkmq_msgs; /* TAILQ_HEAD */ int32_t rkmq_msg_cnt; int64_t rkmq_msg_bytes; } rd_kafka_msgq_t; -#define RD_KAFKA_MSGQ_INITIALIZER(rkmq) \ - { .rkmq_msgs = TAILQ_HEAD_INITIALIZER((rkmq).rkmq_msgs) } +#define RD_KAFKA_MSGQ_INITIALIZER(rkmq) \ + { .rkmq_msgs = TAILQ_HEAD_INITIALIZER((rkmq).rkmq_msgs) } -#define RD_KAFKA_MSGQ_FOREACH(elm,head) \ - TAILQ_FOREACH(elm, &(head)->rkmq_msgs, rkm_link) +#define RD_KAFKA_MSGQ_FOREACH(elm, head) \ + TAILQ_FOREACH(elm, &(head)->rkmq_msgs, rkm_link) /* @brief Check if queue is empty. Proper locks must be held. */ #define RD_KAFKA_MSGQ_EMPTY(rkmq) TAILQ_EMPTY(&(rkmq)->rkmq_msgs) @@ -211,48 +208,52 @@ typedef struct rd_kafka_msgq_s { /** * Returns the number of messages in the specified queue. */ -static RD_INLINE RD_UNUSED -int rd_kafka_msgq_len (const rd_kafka_msgq_t *rkmq) { +static RD_INLINE RD_UNUSED int rd_kafka_msgq_len(const rd_kafka_msgq_t *rkmq) { return (int)rkmq->rkmq_msg_cnt; } /** * Returns the total number of bytes in the specified queue. */ -static RD_INLINE RD_UNUSED -size_t rd_kafka_msgq_size (const rd_kafka_msgq_t *rkmq) { +static RD_INLINE RD_UNUSED size_t +rd_kafka_msgq_size(const rd_kafka_msgq_t *rkmq) { return (size_t)rkmq->rkmq_msg_bytes; } -void rd_kafka_msg_destroy (rd_kafka_t *rk, rd_kafka_msg_t *rkm); +void rd_kafka_msg_destroy(rd_kafka_t *rk, rd_kafka_msg_t *rkm); -int rd_kafka_msg_new (rd_kafka_topic_t *rkt, int32_t force_partition, - int msgflags, - char *payload, size_t len, - const void *keydata, size_t keylen, - void *msg_opaque); +int rd_kafka_msg_new(rd_kafka_topic_t *rkt, + int32_t force_partition, + int msgflags, + char *payload, + size_t len, + const void *keydata, + size_t keylen, + void *msg_opaque); -static RD_INLINE RD_UNUSED void rd_kafka_msgq_init (rd_kafka_msgq_t *rkmq) { +static RD_INLINE RD_UNUSED void rd_kafka_msgq_init(rd_kafka_msgq_t *rkmq) { TAILQ_INIT(&rkmq->rkmq_msgs); rkmq->rkmq_msg_cnt = 0; rkmq->rkmq_msg_bytes = 0; } #if ENABLE_DEVEL -#define rd_kafka_msgq_verify_order(rktp,rkmq,exp_first_msgid,gapless) \ - rd_kafka_msgq_verify_order0(__FUNCTION__, __LINE__, \ - rktp, rkmq, exp_first_msgid, gapless) +#define rd_kafka_msgq_verify_order(rktp, rkmq, exp_first_msgid, gapless) \ + rd_kafka_msgq_verify_order0(__FUNCTION__, __LINE__, rktp, rkmq, \ + exp_first_msgid, gapless) #else -#define rd_kafka_msgq_verify_order(rktp,rkmq,exp_first_msgid,gapless) \ - do { } while (0) +#define rd_kafka_msgq_verify_order(rktp, rkmq, exp_first_msgid, gapless) \ + do { \ + } while (0) #endif -void rd_kafka_msgq_verify_order0 (const char *function, int line, - const struct rd_kafka_toppar_s *rktp, - const rd_kafka_msgq_t *rkmq, - uint64_t exp_first_msgid, - rd_bool_t gapless); +void rd_kafka_msgq_verify_order0(const char *function, + int line, + const struct rd_kafka_toppar_s *rktp, + const rd_kafka_msgq_t *rkmq, + uint64_t exp_first_msgid, + rd_bool_t gapless); /** @@ -260,12 +261,12 @@ void rd_kafka_msgq_verify_order0 (const char *function, int line, * 'src' will be cleared. * Proper locks for 'src' and 'dst' must be held. */ -static RD_INLINE RD_UNUSED void rd_kafka_msgq_concat (rd_kafka_msgq_t *dst, - rd_kafka_msgq_t *src) { - TAILQ_CONCAT(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link); - dst->rkmq_msg_cnt += src->rkmq_msg_cnt; +static RD_INLINE RD_UNUSED void rd_kafka_msgq_concat(rd_kafka_msgq_t *dst, + rd_kafka_msgq_t *src) { + TAILQ_CONCAT(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link); + dst->rkmq_msg_cnt += src->rkmq_msg_cnt; dst->rkmq_msg_bytes += src->rkmq_msg_bytes; - rd_kafka_msgq_init(src); + rd_kafka_msgq_init(src); rd_kafka_msgq_verify_order(NULL, dst, 0, rd_false); } @@ -273,12 +274,12 @@ static RD_INLINE RD_UNUSED void rd_kafka_msgq_concat (rd_kafka_msgq_t *dst, * Move queue 'src' to 'dst' (overwrites dst) * Source will be cleared. */ -static RD_INLINE RD_UNUSED void rd_kafka_msgq_move (rd_kafka_msgq_t *dst, - rd_kafka_msgq_t *src) { - TAILQ_MOVE(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link); +static RD_INLINE RD_UNUSED void rd_kafka_msgq_move(rd_kafka_msgq_t *dst, + rd_kafka_msgq_t *src) { + TAILQ_MOVE(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link); dst->rkmq_msg_cnt = src->rkmq_msg_cnt; dst->rkmq_msg_bytes = src->rkmq_msg_bytes; - rd_kafka_msgq_init(src); + rd_kafka_msgq_init(src); rd_kafka_msgq_verify_order(NULL, dst, 0, rd_false); } @@ -289,8 +290,8 @@ static RD_INLINE RD_UNUSED void rd_kafka_msgq_move (rd_kafka_msgq_t *dst, * * @locks proper locks for \p src and \p dst MUST be held. */ -static RD_INLINE RD_UNUSED void rd_kafka_msgq_prepend (rd_kafka_msgq_t *dst, - rd_kafka_msgq_t *src) { +static RD_INLINE RD_UNUSED void rd_kafka_msgq_prepend(rd_kafka_msgq_t *dst, + rd_kafka_msgq_t *src) { rd_kafka_msgq_concat(src, dst); rd_kafka_msgq_move(dst, src); rd_kafka_msgq_verify_order(NULL, dst, 0, rd_false); @@ -300,50 +301,49 @@ static RD_INLINE RD_UNUSED void rd_kafka_msgq_prepend (rd_kafka_msgq_t *dst, /** * rd_free all msgs in msgq and reinitialize the msgq. */ -static RD_INLINE RD_UNUSED void rd_kafka_msgq_purge (rd_kafka_t *rk, +static RD_INLINE RD_UNUSED void rd_kafka_msgq_purge(rd_kafka_t *rk, rd_kafka_msgq_t *rkmq) { - rd_kafka_msg_t *rkm, *next; + rd_kafka_msg_t *rkm, *next; - next = TAILQ_FIRST(&rkmq->rkmq_msgs); - while (next) { - rkm = next; - next = TAILQ_NEXT(next, rkm_link); + next = TAILQ_FIRST(&rkmq->rkmq_msgs); + while (next) { + rkm = next; + next = TAILQ_NEXT(next, rkm_link); - rd_kafka_msg_destroy(rk, rkm); - } + rd_kafka_msg_destroy(rk, rkm); + } - rd_kafka_msgq_init(rkmq); + rd_kafka_msgq_init(rkmq); } /** * Remove message from message queue */ -static RD_INLINE RD_UNUSED -rd_kafka_msg_t *rd_kafka_msgq_deq (rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm, - int do_count) { - if (likely(do_count)) { - rd_kafka_assert(NULL, rkmq->rkmq_msg_cnt > 0); - rd_kafka_assert(NULL, rkmq->rkmq_msg_bytes >= - (int64_t)(rkm->rkm_len+rkm->rkm_key_len)); +static RD_INLINE RD_UNUSED rd_kafka_msg_t * +rd_kafka_msgq_deq(rd_kafka_msgq_t *rkmq, rd_kafka_msg_t *rkm, int do_count) { + if (likely(do_count)) { + rd_kafka_assert(NULL, rkmq->rkmq_msg_cnt > 0); + rd_kafka_assert(NULL, + rkmq->rkmq_msg_bytes >= + (int64_t)(rkm->rkm_len + rkm->rkm_key_len)); rkmq->rkmq_msg_cnt--; - rkmq->rkmq_msg_bytes -= rkm->rkm_len+rkm->rkm_key_len; - } + rkmq->rkmq_msg_bytes -= rkm->rkm_len + rkm->rkm_key_len; + } - TAILQ_REMOVE(&rkmq->rkmq_msgs, rkm, rkm_link); + TAILQ_REMOVE(&rkmq->rkmq_msgs, rkm, rkm_link); - return rkm; + return rkm; } -static RD_INLINE RD_UNUSED -rd_kafka_msg_t *rd_kafka_msgq_pop (rd_kafka_msgq_t *rkmq) { - rd_kafka_msg_t *rkm; +static RD_INLINE RD_UNUSED rd_kafka_msg_t * +rd_kafka_msgq_pop(rd_kafka_msgq_t *rkmq) { + rd_kafka_msg_t *rkm; - if (((rkm = TAILQ_FIRST(&rkmq->rkmq_msgs)))) - rd_kafka_msgq_deq(rkmq, rkm, 1); + if (((rkm = TAILQ_FIRST(&rkmq->rkmq_msgs)))) + rd_kafka_msgq_deq(rkmq, rkm, 1); - return rkm; + return rkm; } @@ -352,8 +352,8 @@ rd_kafka_msg_t *rd_kafka_msgq_pop (rd_kafka_msgq_t *rkmq) { * * @locks caller's responsibility */ -static RD_INLINE RD_UNUSED -rd_kafka_msg_t *rd_kafka_msgq_first (const rd_kafka_msgq_t *rkmq) { +static RD_INLINE RD_UNUSED rd_kafka_msg_t * +rd_kafka_msgq_first(const rd_kafka_msgq_t *rkmq) { return TAILQ_FIRST(&rkmq->rkmq_msgs); } @@ -362,8 +362,8 @@ rd_kafka_msg_t *rd_kafka_msgq_first (const rd_kafka_msgq_t *rkmq) { * * @locks caller's responsibility */ -static RD_INLINE RD_UNUSED -rd_kafka_msg_t *rd_kafka_msgq_last (const rd_kafka_msgq_t *rkmq) { +static RD_INLINE RD_UNUSED rd_kafka_msg_t * +rd_kafka_msgq_last(const rd_kafka_msgq_t *rkmq) { return TAILQ_LAST(&rkmq->rkmq_msgs, rd_kafka_msgs_head_s); } @@ -373,8 +373,8 @@ rd_kafka_msg_t *rd_kafka_msgq_last (const rd_kafka_msgq_t *rkmq) { * * @locks caller's responsibility */ -static RD_INLINE RD_UNUSED -uint64_t rd_kafka_msgq_first_msgid (const rd_kafka_msgq_t *rkmq) { +static RD_INLINE RD_UNUSED uint64_t +rd_kafka_msgq_first_msgid(const rd_kafka_msgq_t *rkmq) { const rd_kafka_msg_t *rkm = TAILQ_FIRST(&rkmq->rkmq_msgs); if (rkm) return rkm->rkm_u.producer.msgid; @@ -387,8 +387,7 @@ uint64_t rd_kafka_msgq_first_msgid (const rd_kafka_msgq_t *rkmq) { * @brief Message ordering comparator using the message id * number to order messages in ascending order (FIFO). */ -static RD_INLINE -int rd_kafka_msg_cmp_msgid (const void *_a, const void *_b) { +static RD_INLINE int rd_kafka_msg_cmp_msgid(const void *_a, const void *_b) { const rd_kafka_msg_t *a = _a, *b = _b; rd_dassert(a->rkm_u.producer.msgid); @@ -400,8 +399,8 @@ int rd_kafka_msg_cmp_msgid (const void *_a, const void *_b) { * @brief Message ordering comparator using the message id * number to order messages in descending order (LIFO). */ -static RD_INLINE -int rd_kafka_msg_cmp_msgid_lifo (const void *_a, const void *_b) { +static RD_INLINE int rd_kafka_msg_cmp_msgid_lifo(const void *_a, + const void *_b) { const rd_kafka_msg_t *a = _a, *b = _b; rd_dassert(a->rkm_u.producer.msgid); @@ -416,10 +415,9 @@ int rd_kafka_msg_cmp_msgid_lifo (const void *_a, const void *_b) { * @warning The message must have a msgid set. * @returns the message count of the queue after enqueuing the message. */ -int -rd_kafka_msgq_enq_sorted0 (rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm, - int (*order_cmp) (const void *, const void *)); +int rd_kafka_msgq_enq_sorted0(rd_kafka_msgq_t *rkmq, + rd_kafka_msg_t *rkm, + int (*order_cmp)(const void *, const void *)); /** * @brief Insert message at its sorted position using the msgid. @@ -427,27 +425,27 @@ rd_kafka_msgq_enq_sorted0 (rd_kafka_msgq_t *rkmq, * @warning The message must have a msgid set. * @returns the message count of the queue after enqueuing the message. */ -int rd_kafka_msgq_enq_sorted (const rd_kafka_topic_t *rkt, - rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm); +int rd_kafka_msgq_enq_sorted(const rd_kafka_topic_t *rkt, + rd_kafka_msgq_t *rkmq, + rd_kafka_msg_t *rkm); /** * Insert message at head of message queue. */ -static RD_INLINE RD_UNUSED void rd_kafka_msgq_insert (rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm) { - TAILQ_INSERT_HEAD(&rkmq->rkmq_msgs, rkm, rkm_link); +static RD_INLINE RD_UNUSED void rd_kafka_msgq_insert(rd_kafka_msgq_t *rkmq, + rd_kafka_msg_t *rkm) { + TAILQ_INSERT_HEAD(&rkmq->rkmq_msgs, rkm, rkm_link); rkmq->rkmq_msg_cnt++; - rkmq->rkmq_msg_bytes += rkm->rkm_len+rkm->rkm_key_len; + rkmq->rkmq_msg_bytes += rkm->rkm_len + rkm->rkm_key_len; } /** * Append message to tail of message queue. */ -static RD_INLINE RD_UNUSED int rd_kafka_msgq_enq (rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm) { +static RD_INLINE RD_UNUSED int rd_kafka_msgq_enq(rd_kafka_msgq_t *rkmq, + rd_kafka_msg_t *rkm) { TAILQ_INSERT_TAIL(&rkmq->rkmq_msgs, rkm, rkm_link); - rkmq->rkmq_msg_bytes += rkm->rkm_len+rkm->rkm_key_len; + rkmq->rkmq_msg_bytes += rkm->rkm_len + rkm->rkm_key_len; return (int)++rkmq->rkmq_msg_cnt; } @@ -456,11 +454,10 @@ static RD_INLINE RD_UNUSED int rd_kafka_msgq_enq (rd_kafka_msgq_t *rkmq, * @returns true if the MsgId extents (first, last) in the two queues overlap. */ static RD_INLINE RD_UNUSED rd_bool_t -rd_kafka_msgq_overlap (const rd_kafka_msgq_t *a, const rd_kafka_msgq_t *b) { +rd_kafka_msgq_overlap(const rd_kafka_msgq_t *a, const rd_kafka_msgq_t *b) { const rd_kafka_msg_t *fa, *la, *fb, *lb; - if (RD_KAFKA_MSGQ_EMPTY(a) || - RD_KAFKA_MSGQ_EMPTY(b)) + if (RD_KAFKA_MSGQ_EMPTY(a) || RD_KAFKA_MSGQ_EMPTY(b)) return rd_false; fa = rd_kafka_msgq_first(a); @@ -468,9 +465,9 @@ rd_kafka_msgq_overlap (const rd_kafka_msgq_t *a, const rd_kafka_msgq_t *b) { la = rd_kafka_msgq_last(a); lb = rd_kafka_msgq_last(b); - return (rd_bool_t) - (fa->rkm_u.producer.msgid <= lb->rkm_u.producer.msgid && - fb->rkm_u.producer.msgid <= la->rkm_u.producer.msgid); + return (rd_bool_t)( + fa->rkm_u.producer.msgid <= lb->rkm_u.producer.msgid && + fb->rkm_u.producer.msgid <= la->rkm_u.producer.msgid); } /** @@ -479,53 +476,59 @@ rd_kafka_msgq_overlap (const rd_kafka_msgq_t *a, const rd_kafka_msgq_t *b) { * messages. * 'timedout' must be initialized. */ -int rd_kafka_msgq_age_scan (struct rd_kafka_toppar_s *rktp, - rd_kafka_msgq_t *rkmq, - rd_kafka_msgq_t *timedout, - rd_ts_t now, - rd_ts_t *abs_next_timeout); - -void rd_kafka_msgq_split (rd_kafka_msgq_t *leftq, rd_kafka_msgq_t *rightq, - rd_kafka_msg_t *first_right, - int cnt, int64_t bytes); - -rd_kafka_msg_t *rd_kafka_msgq_find_pos (const rd_kafka_msgq_t *rkmq, - const rd_kafka_msg_t *start_pos, - const rd_kafka_msg_t *rkm, - int (*cmp) (const void *, - const void *), - int *cntp, int64_t *bytesp); - -void rd_kafka_msgq_set_metadata (rd_kafka_msgq_t *rkmq, int32_t broker_id, - int64_t base_offset, int64_t timestamp, - rd_kafka_msg_status_t status); - -void rd_kafka_msgq_move_acked (rd_kafka_msgq_t *dest, rd_kafka_msgq_t *src, - uint64_t last_msgid, - rd_kafka_msg_status_t status); - -int rd_kafka_msg_partitioner (rd_kafka_topic_t *rkt, rd_kafka_msg_t *rkm, - rd_dolock_t do_lock); - - -rd_kafka_message_t *rd_kafka_message_get (struct rd_kafka_op_s *rko); -rd_kafka_message_t *rd_kafka_message_get_from_rkm (struct rd_kafka_op_s *rko, - rd_kafka_msg_t *rkm); -rd_kafka_message_t *rd_kafka_message_new (void); +int rd_kafka_msgq_age_scan(struct rd_kafka_toppar_s *rktp, + rd_kafka_msgq_t *rkmq, + rd_kafka_msgq_t *timedout, + rd_ts_t now, + rd_ts_t *abs_next_timeout); + +void rd_kafka_msgq_split(rd_kafka_msgq_t *leftq, + rd_kafka_msgq_t *rightq, + rd_kafka_msg_t *first_right, + int cnt, + int64_t bytes); + +rd_kafka_msg_t *rd_kafka_msgq_find_pos(const rd_kafka_msgq_t *rkmq, + const rd_kafka_msg_t *start_pos, + const rd_kafka_msg_t *rkm, + int (*cmp)(const void *, const void *), + int *cntp, + int64_t *bytesp); + +void rd_kafka_msgq_set_metadata(rd_kafka_msgq_t *rkmq, + int32_t broker_id, + int64_t base_offset, + int64_t timestamp, + rd_kafka_msg_status_t status); + +void rd_kafka_msgq_move_acked(rd_kafka_msgq_t *dest, + rd_kafka_msgq_t *src, + uint64_t last_msgid, + rd_kafka_msg_status_t status); + +int rd_kafka_msg_partitioner(rd_kafka_topic_t *rkt, + rd_kafka_msg_t *rkm, + rd_dolock_t do_lock); + + +rd_kafka_message_t *rd_kafka_message_get(struct rd_kafka_op_s *rko); +rd_kafka_message_t *rd_kafka_message_get_from_rkm(struct rd_kafka_op_s *rko, + rd_kafka_msg_t *rkm); +rd_kafka_message_t *rd_kafka_message_new(void); /** * @returns a (possibly) wrapped Kafka protocol message sequence counter * for the non-overflowing \p seq. */ -static RD_INLINE RD_UNUSED int32_t rd_kafka_seq_wrap (int64_t seq) { +static RD_INLINE RD_UNUSED int32_t rd_kafka_seq_wrap(int64_t seq) { return (int32_t)(seq & (int64_t)INT32_MAX); } -void rd_kafka_msgq_dump (FILE *fp, const char *what, rd_kafka_msgq_t *rkmq); +void rd_kafka_msgq_dump(FILE *fp, const char *what, rd_kafka_msgq_t *rkmq); -rd_kafka_msg_t *ut_rd_kafka_msg_new (size_t msgsize); -void ut_rd_kafka_msgq_purge (rd_kafka_msgq_t *rkmq); -int unittest_msg (void); +rd_kafka_msg_t *ut_rd_kafka_msg_new(size_t msgsize); +void ut_rd_kafka_msgq_purge(rd_kafka_msgq_t *rkmq); +int unittest_msg(void); #endif /* _RDKAFKA_MSG_H_ */ diff --git a/src/rdkafka_msgbatch.h b/src/rdkafka_msgbatch.h index 31b6e72dad..09c7977067 100644 --- a/src/rdkafka_msgbatch.h +++ b/src/rdkafka_msgbatch.h @@ -28,35 +28,35 @@ #define _RDKAFKA_MSGBATCH_H_ typedef struct rd_kafka_msgbatch_s { - rd_kafka_toppar_t *rktp; /**< Reference to partition */ + rd_kafka_toppar_t *rktp; /**< Reference to partition */ - rd_kafka_msgq_t msgq; /**< Messages in batch */ + rd_kafka_msgq_t msgq; /**< Messages in batch */ /* Following fields are for Idempotent Producer use */ - rd_kafka_pid_t pid; /**< Producer Id and Epoch */ - int32_t first_seq; /**< Base sequence */ - int64_t first_msgid; /**< Base msgid */ - uint64_t epoch_base_msgid; /**< The partition epoch's - * base msgid. */ - uint64_t last_msgid; /**< Last message to add to batch. - * This is used when reconstructing - * batches for resends with - * the idempotent producer which - * require retries to have the - * exact same messages in them. */ + rd_kafka_pid_t pid; /**< Producer Id and Epoch */ + int32_t first_seq; /**< Base sequence */ + int64_t first_msgid; /**< Base msgid */ + uint64_t epoch_base_msgid; /**< The partition epoch's + * base msgid. */ + uint64_t last_msgid; /**< Last message to add to batch. + * This is used when reconstructing + * batches for resends with + * the idempotent producer which + * require retries to have the + * exact same messages in them. */ } rd_kafka_msgbatch_t; /* defined in rdkafka_msg.c */ -void rd_kafka_msgbatch_destroy (rd_kafka_msgbatch_t *rkmb); -void rd_kafka_msgbatch_init (rd_kafka_msgbatch_t *rkmb, - rd_kafka_toppar_t *rktp, - rd_kafka_pid_t pid, - uint64_t epoch_base_msgid); -void rd_kafka_msgbatch_set_first_msg (rd_kafka_msgbatch_t *rkmb, - rd_kafka_msg_t *rkm); -void rd_kafka_msgbatch_ready_produce (rd_kafka_msgbatch_t *rkmb); +void rd_kafka_msgbatch_destroy(rd_kafka_msgbatch_t *rkmb); +void rd_kafka_msgbatch_init(rd_kafka_msgbatch_t *rkmb, + rd_kafka_toppar_t *rktp, + rd_kafka_pid_t pid, + uint64_t epoch_base_msgid); +void rd_kafka_msgbatch_set_first_msg(rd_kafka_msgbatch_t *rkmb, + rd_kafka_msg_t *rkm); +void rd_kafka_msgbatch_ready_produce(rd_kafka_msgbatch_t *rkmb); #endif /* _RDKAFKA_MSGBATCH_H_ */ diff --git a/src/rdkafka_msgset.h b/src/rdkafka_msgset.h index 420455e3f2..b79f1c946c 100644 --- a/src/rdkafka_msgset.h +++ b/src/rdkafka_msgset.h @@ -46,41 +46,37 @@ typedef struct rd_kafka_aborted_txns_s { } rd_kafka_aborted_txns_t; -rd_kafka_aborted_txns_t *rd_kafka_aborted_txns_new (int32_t txn_cnt); +rd_kafka_aborted_txns_t *rd_kafka_aborted_txns_new(int32_t txn_cnt); -void -rd_kafka_aborted_txns_destroy (rd_kafka_aborted_txns_t *aborted_txns); +void rd_kafka_aborted_txns_destroy(rd_kafka_aborted_txns_t *aborted_txns); -void -rd_kafka_aborted_txns_sort (rd_kafka_aborted_txns_t *aborted_txns); +void rd_kafka_aborted_txns_sort(rd_kafka_aborted_txns_t *aborted_txns); -void -rd_kafka_aborted_txns_add (rd_kafka_aborted_txns_t *aborted_txns, - int64_t pid, - int64_t first_offset); +void rd_kafka_aborted_txns_add(rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid, + int64_t first_offset); /** * @name MessageSet writers */ -rd_kafka_buf_t * -rd_kafka_msgset_create_ProduceRequest (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq, - const rd_kafka_pid_t pid, - uint64_t epoch_base_msgid, - size_t *MessageSetSizep); +rd_kafka_buf_t *rd_kafka_msgset_create_ProduceRequest(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq, + const rd_kafka_pid_t pid, + uint64_t epoch_base_msgid, + size_t *MessageSetSizep); /** * @name MessageSet readers */ rd_kafka_resp_err_t -rd_kafka_msgset_parse (rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_toppar_t *rktp, - rd_kafka_aborted_txns_t *aborted_txns, - const struct rd_kafka_toppar_ver *tver); +rd_kafka_msgset_parse(rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_toppar_t *rktp, + rd_kafka_aborted_txns_t *aborted_txns, + const struct rd_kafka_toppar_ver *tver); -int unittest_aborted_txns (void); +int unittest_aborted_txns(void); #endif /* _RDKAFKA_MSGSET_H_ */ diff --git a/src/rdkafka_msgset_reader.c b/src/rdkafka_msgset_reader.c index a9d28e4312..fdbd114104 100644 --- a/src/rdkafka_msgset_reader.c +++ b/src/rdkafka_msgset_reader.c @@ -80,18 +80,19 @@ static RD_INLINE int64_t -rd_kafka_aborted_txns_pop_offset (rd_kafka_aborted_txns_t *aborted_txns, - int64_t pid, int64_t max_offset); +rd_kafka_aborted_txns_pop_offset(rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid, + int64_t max_offset); static RD_INLINE int64_t -rd_kafka_aborted_txns_get_offset (const rd_kafka_aborted_txns_t *aborted_txns, - int64_t pid); +rd_kafka_aborted_txns_get_offset(const rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid); struct msgset_v2_hdr { int64_t BaseOffset; int32_t Length; int32_t PartitionLeaderEpoch; - int8_t MagicByte; + int8_t MagicByte; int32_t Crc; int16_t Attributes; int32_t LastOffsetDelta; @@ -119,85 +120,87 @@ typedef struct rd_kafka_aborted_txn_start_offsets_s { typedef struct rd_kafka_msgset_reader_s { - rd_kafka_buf_t *msetr_rkbuf; /**< Response read buffer */ + rd_kafka_buf_t *msetr_rkbuf; /**< Response read buffer */ - int msetr_relative_offsets; /**< Bool: using relative offsets */ + int msetr_relative_offsets; /**< Bool: using relative offsets */ /**< Outer/wrapper Message fields. */ struct { - int64_t offset; /**< Relative_offsets: outer message's - * Offset (last offset) */ + int64_t offset; /**< Relative_offsets: outer message's + * Offset (last offset) */ rd_kafka_timestamp_type_t tstype; /**< Compressed * MessageSet's * timestamp type. */ int64_t timestamp; /**< ... timestamp*/ } msetr_outer; - struct msgset_v2_hdr *msetr_v2_hdr; /**< MessageSet v2 header */ + struct msgset_v2_hdr *msetr_v2_hdr; /**< MessageSet v2 header */ /* * Aborted Transaction Start Offsets. These are arranged in a map * (ABORTED_TXN_OFFSETS), with PID as the key and value as follows: - * - OFFSETS: sorted list of aborted transaction start offsets (ascending) + * - OFFSETS: sorted list of aborted transaction start offsets + * (ascending) * - IDX: an index into OFFSETS list, initialized to 0. * * The logic for processing fetched data is as follows (note: this is * different from the Java client): * - * 1. If the message is a transaction control message and the status is ABORT - * then increment ABORTED_TXN_OFFSETS(PID).IDX. note: sanity check that - * OFFSETS[ABORTED_TXN_OFFSETS(PID).IDX] is less than the current offset - * before incrementing. If the status is COMMIT, do nothing. + * 1. If the message is a transaction control message and the status is + * ABORT then increment ABORTED_TXN_OFFSETS(PID).IDX. note: sanity check + * that OFFSETS[ABORTED_TXN_OFFSETS(PID).IDX] is less than the current + * offset before incrementing. If the status is COMMIT, do nothing. * - * 2. If the message is a normal message, find the corresponding OFFSETS list - * in ABORTED_TXN_OFFSETS. If it doesn't exist, then keep the message. If - * the PID does exist, compare ABORTED_TXN_OFFSETS(PID).IDX with - * len(OFFSETS). If it's >= then the message should be kept. If not, - * compare the message offset with OFFSETS[ABORTED_TXN_OFFSETS(PID).IDX]. - * If it's greater than or equal to this value, then the message should be - * ignored. If it's less than, then the message should be kept. + * 2. If the message is a normal message, find the corresponding OFFSETS + * list in ABORTED_TXN_OFFSETS. If it doesn't exist, then keep the + * message. If the PID does exist, compare ABORTED_TXN_OFFSETS(PID).IDX + * with len(OFFSETS). If it's >= then the message should be kept. If + * not, compare the message offset with + * OFFSETS[ABORTED_TXN_OFFSETS(PID).IDX]. If it's greater than or equal + * to this value, then the message should be ignored. If it's less than, + * then the message should be kept. * - * Note: A MessageSet comprises messages from at most one transaction, so the - * logic in step 2 is done at the message set level. + * Note: A MessageSet comprises messages from at most one transaction, + * so the logic in step 2 is done at the message set level. */ rd_kafka_aborted_txns_t *msetr_aborted_txns; const struct rd_kafka_toppar_ver *msetr_tver; /**< Toppar op version of * request. */ - int32_t msetr_broker_id; /**< Broker id (of msetr_rkb) */ - rd_kafka_broker_t *msetr_rkb; /* @warning Not a refcounted - * reference! */ - rd_kafka_toppar_t *msetr_rktp; /* @warning Not a refcounted - * reference! */ - - int msetr_msgcnt; /**< Number of messages in rkq */ - int64_t msetr_msg_bytes; /**< Number of bytes in rkq */ - rd_kafka_q_t msetr_rkq; /**< Temp Message and error queue */ - rd_kafka_q_t *msetr_par_rkq; /**< Parent message and error queue, - * the temp msetr_rkq will be moved - * to this queue when parsing - * is done. - * Refcount is not increased. */ - - int64_t msetr_next_offset; /**< Next offset to fetch after - * this reader run is done. - * Optional: only used for special - * cases where the per-message offset - * can't be relied on for next - * fetch offset, such as with - * compacted topics. */ - - int msetr_ctrl_cnt; /**< Number of control messages - * or MessageSets received. */ - - const char *msetr_srcname; /**< Optional message source string, - * used in debug logging to - * indicate messages were - * from an inner compressed - * message set. - * Not freed (use const memory). - * Add trailing space. */ + int32_t msetr_broker_id; /**< Broker id (of msetr_rkb) */ + rd_kafka_broker_t *msetr_rkb; /* @warning Not a refcounted + * reference! */ + rd_kafka_toppar_t *msetr_rktp; /* @warning Not a refcounted + * reference! */ + + int msetr_msgcnt; /**< Number of messages in rkq */ + int64_t msetr_msg_bytes; /**< Number of bytes in rkq */ + rd_kafka_q_t msetr_rkq; /**< Temp Message and error queue */ + rd_kafka_q_t *msetr_par_rkq; /**< Parent message and error queue, + * the temp msetr_rkq will be moved + * to this queue when parsing + * is done. + * Refcount is not increased. */ + + int64_t msetr_next_offset; /**< Next offset to fetch after + * this reader run is done. + * Optional: only used for special + * cases where the per-message offset + * can't be relied on for next + * fetch offset, such as with + * compacted topics. */ + + int msetr_ctrl_cnt; /**< Number of control messages + * or MessageSets received. */ + + const char *msetr_srcname; /**< Optional message source string, + * used in debug logging to + * indicate messages were + * from an inner compressed + * message set. + * Not freed (use const memory). + * Add trailing space. */ rd_kafka_compression_t msetr_compression; /**< Compression codec */ } rd_kafka_msgset_reader_t; @@ -206,31 +209,30 @@ typedef struct rd_kafka_msgset_reader_s { /* Forward declarations */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_run (rd_kafka_msgset_reader_t *msetr); +rd_kafka_msgset_reader_run(rd_kafka_msgset_reader_t *msetr); static rd_kafka_resp_err_t -rd_kafka_msgset_reader_msgs_v2 (rd_kafka_msgset_reader_t *msetr); +rd_kafka_msgset_reader_msgs_v2(rd_kafka_msgset_reader_t *msetr); /** * @brief Set up a MessageSet reader but don't start reading messages. */ -static void -rd_kafka_msgset_reader_init (rd_kafka_msgset_reader_t *msetr, - rd_kafka_buf_t *rkbuf, - rd_kafka_toppar_t *rktp, - const struct rd_kafka_toppar_ver *tver, - rd_kafka_aborted_txns_t *aborted_txns, - rd_kafka_q_t *par_rkq) { +static void rd_kafka_msgset_reader_init(rd_kafka_msgset_reader_t *msetr, + rd_kafka_buf_t *rkbuf, + rd_kafka_toppar_t *rktp, + const struct rd_kafka_toppar_ver *tver, + rd_kafka_aborted_txns_t *aborted_txns, + rd_kafka_q_t *par_rkq) { memset(msetr, 0, sizeof(*msetr)); - msetr->msetr_rkb = rkbuf->rkbuf_rkb; - msetr->msetr_broker_id = rd_kafka_broker_id(msetr->msetr_rkb); - msetr->msetr_rktp = rktp; + msetr->msetr_rkb = rkbuf->rkbuf_rkb; + msetr->msetr_broker_id = rd_kafka_broker_id(msetr->msetr_rkb); + msetr->msetr_rktp = rktp; msetr->msetr_aborted_txns = aborted_txns; - msetr->msetr_tver = tver; - msetr->msetr_rkbuf = rkbuf; - msetr->msetr_srcname = ""; + msetr->msetr_tver = tver; + msetr->msetr_rkbuf = rkbuf; + msetr->msetr_srcname = ""; rkbuf->rkbuf_uflow_mitigation = "truncated response from broker (ok)"; @@ -251,19 +253,19 @@ rd_kafka_msgset_reader_init (rd_kafka_msgset_reader_t *msetr, - - /** * @brief Decompress MessageSet, pass the uncompressed MessageSet to * the MessageSet reader. */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, - int MsgVersion, int Attributes, - int64_t Timestamp, int64_t Offset, - const void *compressed, - size_t compressed_size) { - struct iovec iov = { .iov_base = NULL, .iov_len = 0 }; +rd_kafka_msgset_reader_decompress(rd_kafka_msgset_reader_t *msetr, + int MsgVersion, + int Attributes, + int64_t Timestamp, + int64_t Offset, + const void *compressed, + size_t compressed_size) { + struct iovec iov = {.iov_base = NULL, .iov_len = 0}; rd_kafka_toppar_t *rktp = msetr->msetr_rktp; int codec = Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; @@ -271,21 +273,19 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, msetr->msetr_compression = codec; - switch (codec) - { + switch (codec) { #if WITH_ZLIB - case RD_KAFKA_COMPRESSION_GZIP: - { + case RD_KAFKA_COMPRESSION_GZIP: { uint64_t outlenx = 0; /* Decompress Message payload */ - iov.iov_base = rd_gz_decompress(compressed, (int)compressed_size, - &outlenx); + iov.iov_base = rd_gz_decompress(compressed, + (int)compressed_size, &outlenx); if (unlikely(!iov.iov_base)) { rd_rkb_dbg(msetr->msetr_rkb, MSG, "GZIP", "Failed to decompress Gzip " - "message at offset %"PRId64 - " of %"PRIusz" bytes: " + "message at offset %" PRId64 " of %" PRIusz + " bytes: " "ignoring message", Offset, compressed_size); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; @@ -293,19 +293,17 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, } iov.iov_len = (size_t)outlenx; - } - break; + } break; #endif #if WITH_SNAPPY - case RD_KAFKA_COMPRESSION_SNAPPY: - { + case RD_KAFKA_COMPRESSION_SNAPPY: { const char *inbuf = compressed; - size_t inlen = compressed_size; + size_t inlen = compressed_size; int r; - static const unsigned char snappy_java_magic[] = - { 0x82, 'S','N','A','P','P','Y', 0 }; - static const size_t snappy_java_hdrlen = 8+4+4; + static const unsigned char snappy_java_magic[] = { + 0x82, 'S', 'N', 'A', 'P', 'P', 'Y', 0}; + static const size_t snappy_java_hdrlen = 8 + 4 + 4; /* snappy-java adds its own header (SnappyCodec) * which is not compatible with the official Snappy @@ -319,22 +317,22 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, /* snappy-java framing */ char errstr[128]; - inbuf = inbuf + snappy_java_hdrlen; + inbuf = inbuf + snappy_java_hdrlen; inlen -= snappy_java_hdrlen; iov.iov_base = rd_kafka_snappy_java_uncompress( - inbuf, inlen, - &iov.iov_len, - errstr, sizeof(errstr)); + inbuf, inlen, &iov.iov_len, errstr, sizeof(errstr)); if (unlikely(!iov.iov_base)) { rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY", - "%s [%"PRId32"]: " + "%s [%" PRId32 + "]: " "Snappy decompression for message " - "at offset %"PRId64" failed: %s: " + "at offset %" PRId64 + " failed: %s: " "ignoring message", rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - Offset, errstr); + rktp->rktp_partition, Offset, + errstr); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto err; } @@ -345,12 +343,13 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, /* Acquire uncompressed length */ if (unlikely(!rd_kafka_snappy_uncompressed_length( - inbuf, inlen, &iov.iov_len))) { + inbuf, inlen, &iov.iov_len))) { rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY", "Failed to get length of Snappy " "compressed payload " - "for message at offset %"PRId64 - " (%"PRIusz" bytes): " + "for message at offset %" PRId64 + " (%" PRIusz + " bytes): " "ignoring message", Offset, inlen); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; @@ -362,9 +361,10 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, if (unlikely(!iov.iov_base)) { rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY", "Failed to allocate Snappy " - "decompress buffer of size %"PRIusz - "for message at offset %"PRId64 - " (%"PRIusz" bytes): %s: " + "decompress buffer of size %" PRIusz + "for message at offset %" PRId64 + " (%" PRIusz + " bytes): %s: " "ignoring message", iov.iov_len, Offset, inlen, rd_strerror(errno)); @@ -374,60 +374,53 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, /* Uncompress to outbuf */ if (unlikely((r = rd_kafka_snappy_uncompress( - inbuf, inlen, iov.iov_base)))) { + inbuf, inlen, iov.iov_base)))) { rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY", "Failed to decompress Snappy " "payload for message at offset " - "%"PRId64" (%"PRIusz" bytes): %s: " + "%" PRId64 " (%" PRIusz + " bytes): %s: " "ignoring message", Offset, inlen, - rd_strerror(-r/*negative errno*/)); + rd_strerror(-r /*negative errno*/)); rd_free(iov.iov_base); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto err; } } - } - break; + } break; #endif - case RD_KAFKA_COMPRESSION_LZ4: - { - err = rd_kafka_lz4_decompress(msetr->msetr_rkb, - /* Proper HC? */ - MsgVersion >= 1 ? 1 : 0, - Offset, - /* @warning Will modify compressed - * if no proper HC */ - (char *)compressed, - compressed_size, - &iov.iov_base, &iov.iov_len); + case RD_KAFKA_COMPRESSION_LZ4: { + err = + rd_kafka_lz4_decompress(msetr->msetr_rkb, + /* Proper HC? */ + MsgVersion >= 1 ? 1 : 0, Offset, + /* @warning Will modify compressed + * if no proper HC */ + (char *)compressed, compressed_size, + &iov.iov_base, &iov.iov_len); if (err) goto err; - } - break; + } break; #if WITH_ZSTD - case RD_KAFKA_COMPRESSION_ZSTD: - { - err = rd_kafka_zstd_decompress(msetr->msetr_rkb, - (char *)compressed, - compressed_size, - &iov.iov_base, &iov.iov_len); + case RD_KAFKA_COMPRESSION_ZSTD: { + err = rd_kafka_zstd_decompress( + msetr->msetr_rkb, (char *)compressed, compressed_size, + &iov.iov_base, &iov.iov_len); if (err) goto err; - } - break; + } break; #endif default: rd_rkb_dbg(msetr->msetr_rkb, MSG, "CODEC", - "%s [%"PRId32"]: Message at offset %"PRId64 + "%s [%" PRId32 "]: Message at offset %" PRId64 " with unsupported " "compression codec 0x%x: message ignored", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, Offset, (int)codec); err = RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; @@ -459,14 +452,11 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, /* Pass decompressed data (inner Messageset) * to new instance of the MessageSet parser. */ rd_kafka_msgset_reader_t inner_msetr; - rd_kafka_msgset_reader_init(&inner_msetr, - rkbufz, - msetr->msetr_rktp, - msetr->msetr_tver, - /* there is no aborted transaction - * support for MsgVersion < 2 */ - NULL, - &msetr->msetr_rkq); + rd_kafka_msgset_reader_init( + &inner_msetr, rkbufz, msetr->msetr_rktp, msetr->msetr_tver, + /* there is no aborted transaction + * support for MsgVersion < 2 */ + NULL, &msetr->msetr_rkq); inner_msetr.msetr_srcname = "compressed "; @@ -474,13 +464,13 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, /* postproc() will convert relative to * absolute offsets */ inner_msetr.msetr_relative_offsets = 1; - inner_msetr.msetr_outer.offset = Offset; + inner_msetr.msetr_outer.offset = Offset; /* Apply single LogAppendTime timestamp for * all messages. */ if (Attributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME) { inner_msetr.msetr_outer.tstype = - RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME; + RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME; inner_msetr.msetr_outer.timestamp = Timestamp; } } @@ -498,7 +488,7 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, rd_kafka_buf_t *orig_rkbuf = msetr->msetr_rkbuf; rkbufz->rkbuf_uflow_mitigation = - "truncated response from broker (ok)"; + "truncated response from broker (ok)"; /* Temporarily replace read buffer with uncompressed buffer */ msetr->msetr_rkbuf = rkbufz; @@ -516,19 +506,17 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, return err; - err: +err: /* Enqueue error messsage: * Create op and push on temporary queue. */ - rd_kafka_consumer_err(&msetr->msetr_rkq, msetr->msetr_broker_id, - err, msetr->msetr_tver->version, - NULL, rktp, Offset, - "Decompression (codec 0x%x) of message at %"PRIu64 - " of %"PRIusz" bytes failed: %s", - codec, Offset, compressed_size, - rd_kafka_err2str(err)); + rd_kafka_consumer_err( + &msetr->msetr_rkq, msetr->msetr_broker_id, err, + msetr->msetr_tver->version, NULL, rktp, Offset, + "Decompression (codec 0x%x) of message at %" PRIu64 " of %" PRIusz + " bytes failed: %s", + codec, Offset, compressed_size, rd_kafka_err2str(err)); return err; - } @@ -541,18 +529,18 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, * parsing (such as for partial Messages). */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { - rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; +rd_kafka_msgset_reader_msg_v0_1(rd_kafka_msgset_reader_t *msetr) { + rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; rd_kafka_toppar_t *rktp = msetr->msetr_rktp; - rd_kafka_broker_t *rkb = msetr->msetr_rkb; + rd_kafka_broker_t *rkb = msetr->msetr_rkb; struct { - int64_t Offset; /* MessageSet header */ - int32_t MessageSize; /* MessageSet header */ + int64_t Offset; /* MessageSet header */ + int32_t MessageSize; /* MessageSet header */ uint32_t Crc; - int8_t MagicByte; /* MsgVersion */ - int8_t Attributes; - int64_t Timestamp; /* v1 */ - } hdr; /* Message header */ + int8_t MagicByte; /* MsgVersion */ + int8_t Attributes; + int64_t Timestamp; /* v1 */ + } hdr; /* Message header */ rd_kafkap_bytes_t Key; rd_kafkap_bytes_t Value; int32_t Value_len; @@ -560,11 +548,13 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { size_t hdrsize = 6; /* Header size following MessageSize */ rd_slice_t crc_slice; rd_kafka_msg_t *rkm; - int relative_offsets = 0; + int relative_offsets = 0; const char *reloff_str = ""; /* Only log decoding errors if protocol debugging enabled. */ - int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & - RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0; + int log_decode_errors = + (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL) + ? LOG_DEBUG + : 0; size_t message_end; rd_kafka_buf_read_i64(rkbuf, &hdr.Offset); @@ -582,23 +572,23 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { if (hdr.MagicByte == 1) { /* MsgVersion */ rd_kafka_buf_read_i64(rkbuf, &hdr.Timestamp); hdrsize += 8; - /* MsgVersion 1 has relative offsets for compressed MessageSets*/ + /* MsgVersion 1 has relative offsets for compressed + * MessageSets*/ if (!(hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) && msetr->msetr_relative_offsets) { relative_offsets = 1; - reloff_str = "relative "; + reloff_str = "relative "; } } else hdr.Timestamp = 0; /* Verify MessageSize */ if (unlikely(hdr.MessageSize < (ssize_t)hdrsize)) - rd_kafka_buf_parse_fail(rkbuf, - "Message at %soffset %"PRId64 - " MessageSize %"PRId32 - " < hdrsize %"PRIusz, - reloff_str, - hdr.Offset, hdr.MessageSize, hdrsize); + rd_kafka_buf_parse_fail( + rkbuf, + "Message at %soffset %" PRId64 " MessageSize %" PRId32 + " < hdrsize %" PRIusz, + reloff_str, hdr.Offset, hdr.MessageSize, hdrsize); /* Early check for partial messages */ rd_kafka_buf_check_len(rkbuf, hdr.MessageSize - hdrsize); @@ -613,20 +603,18 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { if (unlikely(hdr.Crc != calc_crc)) { /* Propagate CRC error to application and * continue with next message. */ - rd_kafka_consumer_err(&msetr->msetr_rkq, - msetr->msetr_broker_id, - RD_KAFKA_RESP_ERR__BAD_MSG, - msetr->msetr_tver->version, - NULL, rktp, - hdr.Offset, - "Message at %soffset %"PRId64 - " (%"PRId32" bytes) " - "failed CRC32 check " - "(original 0x%"PRIx32" != " - "calculated 0x%"PRIx32")", - reloff_str, hdr.Offset, - hdr.MessageSize, - hdr.Crc, calc_crc); + rd_kafka_consumer_err( + &msetr->msetr_rkq, msetr->msetr_broker_id, + RD_KAFKA_RESP_ERR__BAD_MSG, + msetr->msetr_tver->version, NULL, rktp, hdr.Offset, + "Message at %soffset %" PRId64 " (%" PRId32 + " bytes) " + "failed CRC32 check " + "(original 0x%" PRIx32 + " != " + "calculated 0x%" PRIx32 ")", + reloff_str, hdr.Offset, hdr.MessageSize, hdr.Crc, + calc_crc); rd_kafka_buf_skip_to(rkbuf, message_end); rd_atomic64_add(&rkb->rkb_c.rx_err, 1); /* Continue with next message */ @@ -656,15 +644,14 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { * the messageset, and it also means * we cant perform this offset check here * in that case. */ - if (!relative_offsets && - hdr.Offset < rktp->rktp_offsets.fetch_offset) + if (!relative_offsets && hdr.Offset < rktp->rktp_offsets.fetch_offset) return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next msg */ /* Handle compressed MessageSet */ if (unlikely(hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK)) return rd_kafka_msgset_reader_decompress( - msetr, hdr.MagicByte, hdr.Attributes, hdr.Timestamp, - hdr.Offset, Value.data, Value_len); + msetr, hdr.MagicByte, hdr.Attributes, hdr.Timestamp, + hdr.Offset, Value.data, Value_len); /* Pure uncompressed message, this is the innermost @@ -672,15 +659,12 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { * MessageSets have been peeled off. */ /* Create op/message container for message. */ - rko = rd_kafka_op_new_fetch_msg(&rkm, rktp, msetr->msetr_tver->version, - rkbuf, - hdr.Offset, - (size_t)RD_KAFKAP_BYTES_LEN(&Key), - RD_KAFKAP_BYTES_IS_NULL(&Key) ? - NULL : Key.data, - (size_t)RD_KAFKAP_BYTES_LEN(&Value), - RD_KAFKAP_BYTES_IS_NULL(&Value) ? - NULL : Value.data); + rko = rd_kafka_op_new_fetch_msg( + &rkm, rktp, msetr->msetr_tver->version, rkbuf, hdr.Offset, + (size_t)RD_KAFKAP_BYTES_LEN(&Key), + RD_KAFKAP_BYTES_IS_NULL(&Key) ? NULL : Key.data, + (size_t)RD_KAFKAP_BYTES_LEN(&Value), + RD_KAFKAP_BYTES_IS_NULL(&Value) ? NULL : Value.data); rkm->rkm_broker_id = msetr->msetr_broker_id; @@ -707,7 +691,7 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue */ - err_parse: +err_parse: /* Count all parse errors as partial message errors. */ rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1); return rkbuf->rkbuf_err; @@ -715,20 +699,19 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { - /** * @brief Message parser for MsgVersion v2 */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { - rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; +rd_kafka_msgset_reader_msg_v2(rd_kafka_msgset_reader_t *msetr) { + rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; rd_kafka_toppar_t *rktp = msetr->msetr_rktp; struct { int64_t Length; - int8_t MsgAttributes; + int8_t MsgAttributes; int64_t TimestampDelta; int64_t OffsetDelta; - int64_t Offset; /* Absolute offset */ + int64_t Offset; /* Absolute offset */ rd_kafkap_bytes_t Key; rd_kafkap_bytes_t Value; rd_kafkap_bytes_t Headers; @@ -736,12 +719,15 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { rd_kafka_op_t *rko; rd_kafka_msg_t *rkm; /* Only log decoding errors if protocol debugging enabled. */ - int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & - RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0; + int log_decode_errors = + (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL) + ? LOG_DEBUG + : 0; size_t message_end; rd_kafka_buf_read_varint(rkbuf, &hdr.Length); - message_end = rd_slice_offset(&rkbuf->rkbuf_reader)+(size_t)hdr.Length; + message_end = + rd_slice_offset(&rkbuf->rkbuf_reader) + (size_t)hdr.Length; rd_kafka_buf_read_i8(rkbuf, &hdr.MsgAttributes); rd_kafka_buf_read_varint(rkbuf, &hdr.TimestampDelta); @@ -751,10 +737,10 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { /* Skip message if outdated */ if (hdr.Offset < rktp->rktp_offsets.fetch_offset) { rd_rkb_dbg(msetr->msetr_rkb, MSG, "MSG", - "%s [%"PRId32"]: " - "Skip offset %"PRId64" < fetch_offset %"PRId64, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, + "%s [%" PRId32 + "]: " + "Skip offset %" PRId64 " < fetch_offset %" PRId64, + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, hdr.Offset, rktp->rktp_offsets.fetch_offset); rd_kafka_buf_skip_to(rkbuf, message_end); return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next msg */ @@ -772,43 +758,50 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { rd_kafka_buf_read_varint(rkbuf, &ctrl_data.KeySize); if (unlikely(ctrl_data.KeySize < 2)) - rd_kafka_buf_parse_fail(rkbuf, - "%s [%"PRId32"]: " - "Ctrl message at offset %"PRId64 - " has invalid key size %"PRId64, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - hdr.Offset, ctrl_data.KeySize); + rd_kafka_buf_parse_fail( + rkbuf, + "%s [%" PRId32 + "]: " + "Ctrl message at offset %" PRId64 + " has invalid key size %" PRId64, + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, hdr.Offset, + ctrl_data.KeySize); rd_kafka_buf_read_i16(rkbuf, &ctrl_data.Version); if (ctrl_data.Version != 0) { rd_rkb_dbg(msetr->msetr_rkb, MSG, "MSG", - "%s [%"PRId32"]: " - "Skipping ctrl msg with " - "unsupported version %"PRId16 - " at offset %"PRId64, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - ctrl_data.Version, hdr.Offset); + "%s [%" PRId32 + "]: " + "Skipping ctrl msg with " + "unsupported version %" PRId16 + " at offset %" PRId64, + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, ctrl_data.Version, + hdr.Offset); rd_kafka_buf_skip_to(rkbuf, message_end); - return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next msg */ + return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next + msg */ } if (unlikely(ctrl_data.KeySize != 4)) - rd_kafka_buf_parse_fail(rkbuf, - "%s [%"PRId32"]: " - "Ctrl message at offset %"PRId64 - " has invalid key size %"PRId64, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - hdr.Offset, ctrl_data.KeySize); + rd_kafka_buf_parse_fail( + rkbuf, + "%s [%" PRId32 + "]: " + "Ctrl message at offset %" PRId64 + " has invalid key size %" PRId64, + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, hdr.Offset, + ctrl_data.KeySize); rd_kafka_buf_read_i16(rkbuf, &ctrl_data.Type); /* Client is uninterested in value of commit marker */ - rd_kafka_buf_skip(rkbuf, (int32_t)(message_end - - rd_slice_offset(&rkbuf->rkbuf_reader))); + rd_kafka_buf_skip( + rkbuf, (int32_t)(message_end - + rd_slice_offset(&rkbuf->rkbuf_reader))); switch (ctrl_data.Type) { case RD_KAFKA_CTRL_MSG_COMMIT: @@ -817,20 +810,22 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { case RD_KAFKA_CTRL_MSG_ABORT: if (msetr->msetr_rkb->rkb_rk->rk_conf.isolation_level != - RD_KAFKA_READ_COMMITTED) + RD_KAFKA_READ_COMMITTED) break; if (unlikely(!msetr->msetr_aborted_txns)) { rd_rkb_dbg(msetr->msetr_rkb, - MSG|RD_KAFKA_DBG_EOS, "TXN", - "%s [%"PRId32"] received abort txn " - "ctrl msg at offset %"PRId64" for " - "PID %"PRId64", but there are no " + MSG | RD_KAFKA_DBG_EOS, "TXN", + "%s [%" PRId32 + "] received abort txn " + "ctrl msg at offset %" PRId64 + " for " + "PID %" PRId64 + ", but there are no " "known aborted transactions: " "ignoring", rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - hdr.Offset, + rktp->rktp_partition, hdr.Offset, msetr->msetr_v2_hdr->PID); break; } @@ -838,23 +833,24 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { /* This marks the end of this (aborted) transaction, * advance to next aborted transaction in list */ aborted_txn_start_offset = - rd_kafka_aborted_txns_pop_offset( - msetr->msetr_aborted_txns, - msetr->msetr_v2_hdr->PID, - hdr.Offset); + rd_kafka_aborted_txns_pop_offset( + msetr->msetr_aborted_txns, + msetr->msetr_v2_hdr->PID, hdr.Offset); if (unlikely(aborted_txn_start_offset == -1)) { rd_rkb_dbg(msetr->msetr_rkb, - MSG|RD_KAFKA_DBG_EOS, "TXN", - "%s [%"PRId32"] received abort txn " - "ctrl msg at offset %"PRId64" for " - "PID %"PRId64", but this offset is " + MSG | RD_KAFKA_DBG_EOS, "TXN", + "%s [%" PRId32 + "] received abort txn " + "ctrl msg at offset %" PRId64 + " for " + "PID %" PRId64 + ", but this offset is " "not listed as an aborted " "transaction: aborted transaction " "was possibly empty: ignoring", rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - hdr.Offset, + rktp->rktp_partition, hdr.Offset, msetr->msetr_v2_hdr->PID); break; } @@ -862,20 +858,22 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { default: - rd_rkb_dbg(msetr->msetr_rkb, MSG, "TXN" - "%s [%"PRId32"]: " - "Unsupported ctrl message " - "type %"PRId16" at offset" - " %"PRId64": ignoring", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - ctrl_data.Type, hdr.Offset); + rd_rkb_dbg(msetr->msetr_rkb, MSG, + "TXN" + "%s [%" PRId32 + "]: " + "Unsupported ctrl message " + "type %" PRId16 + " at offset" + " %" PRId64 ": ignoring", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, ctrl_data.Type, + hdr.Offset); break; } - rko = rd_kafka_op_new_ctrl_msg( - rktp, msetr->msetr_tver->version, - rkbuf, hdr.Offset); + rko = rd_kafka_op_new_ctrl_msg(rktp, msetr->msetr_tver->version, + rkbuf, hdr.Offset); rd_kafka_q_enq(&msetr->msetr_rkq, rko); msetr->msetr_msgcnt++; @@ -884,27 +882,25 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { /* Regular message */ - /* Note: messages in aborted transactions are skipped at the MessageSet level */ + /* Note: messages in aborted transactions are skipped at the MessageSet + * level */ rd_kafka_buf_read_bytes_varint(rkbuf, &hdr.Key); rd_kafka_buf_read_bytes_varint(rkbuf, &hdr.Value); /* We parse the Headers later, just store the size (possibly truncated) * and pointer to the headers. */ - hdr.Headers.len = (int32_t)(message_end - - rd_slice_offset(&rkbuf->rkbuf_reader)); + hdr.Headers.len = + (int32_t)(message_end - rd_slice_offset(&rkbuf->rkbuf_reader)); rd_kafka_buf_read_ptr(rkbuf, &hdr.Headers.data, hdr.Headers.len); /* Create op/message container for message. */ - rko = rd_kafka_op_new_fetch_msg(&rkm, - rktp, msetr->msetr_tver->version, rkbuf, - hdr.Offset, - (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Key), - RD_KAFKAP_BYTES_IS_NULL(&hdr.Key) ? - NULL : hdr.Key.data, - (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Value), - RD_KAFKAP_BYTES_IS_NULL(&hdr.Value) ? - NULL : hdr.Value.data); + rko = rd_kafka_op_new_fetch_msg( + &rkm, rktp, msetr->msetr_tver->version, rkbuf, hdr.Offset, + (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Key), + RD_KAFKAP_BYTES_IS_NULL(&hdr.Key) ? NULL : hdr.Key.data, + (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Value), + RD_KAFKAP_BYTES_IS_NULL(&hdr.Value) ? NULL : hdr.Value.data); rkm->rkm_broker_id = msetr->msetr_broker_id; @@ -924,12 +920,12 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { if ((msetr->msetr_v2_hdr->Attributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME) || (hdr.MsgAttributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME)) { - rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME; + rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME; rkm->rkm_timestamp = msetr->msetr_v2_hdr->MaxTimestamp; } else { rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME; rkm->rkm_timestamp = - msetr->msetr_v2_hdr->BaseTimestamp + hdr.TimestampDelta; + msetr->msetr_v2_hdr->BaseTimestamp + hdr.TimestampDelta; } @@ -940,7 +936,7 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: /* Count all parse errors as partial message errors. */ rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1); return rkbuf->rkbuf_err; @@ -951,40 +947,42 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { * @brief Read v2 messages from current buffer position. */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_msgs_v2 (rd_kafka_msgset_reader_t *msetr) { - rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; +rd_kafka_msgset_reader_msgs_v2(rd_kafka_msgset_reader_t *msetr) { + rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; rd_kafka_toppar_t *rktp = msetr->msetr_rktp; /* Only log decoding errors if protocol debugging enabled. */ - int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & - RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0; + int log_decode_errors = + (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL) + ? LOG_DEBUG + : 0; if (msetr->msetr_aborted_txns != NULL && (msetr->msetr_v2_hdr->Attributes & - (RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL| + (RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL | RD_KAFKA_MSGSET_V2_ATTR_CONTROL)) == - RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL) { + RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL) { /* Transactional non-control MessageSet: * check if it is part of an aborted transaction. */ - int64_t txn_start_offset = - rd_kafka_aborted_txns_get_offset( - msetr->msetr_aborted_txns, - msetr->msetr_v2_hdr->PID); + int64_t txn_start_offset = rd_kafka_aborted_txns_get_offset( + msetr->msetr_aborted_txns, msetr->msetr_v2_hdr->PID); if (txn_start_offset != -1 && - msetr->msetr_v2_hdr->BaseOffset >= - txn_start_offset) { + msetr->msetr_v2_hdr->BaseOffset >= txn_start_offset) { /* MessageSet is part of aborted transaction */ rd_rkb_dbg(msetr->msetr_rkb, MSG, "MSG", - "%s [%"PRId32"]: " - "Skipping %"PRId32" message(s) " + "%s [%" PRId32 + "]: " + "Skipping %" PRId32 + " message(s) " "in aborted transaction " - "at offset %"PRId64 " for PID %"PRId64, + "at offset %" PRId64 " for PID %" PRId64, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, msetr->msetr_v2_hdr->RecordCount, - txn_start_offset, - msetr->msetr_v2_hdr->PID); - rd_kafka_buf_skip(msetr->msetr_rkbuf, rd_slice_remains( + txn_start_offset, msetr->msetr_v2_hdr->PID); + rd_kafka_buf_skip( + msetr->msetr_rkbuf, + rd_slice_remains( &msetr->msetr_rkbuf->rkbuf_reader)); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -1012,8 +1010,8 @@ rd_kafka_msgset_reader_msgs_v2 (rd_kafka_msgset_reader_t *msetr) { * @brief MessageSet reader for MsgVersion v2 (FetchRequest v4) */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { - rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; +rd_kafka_msgset_reader_v2(rd_kafka_msgset_reader_t *msetr) { + rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; rd_kafka_toppar_t *rktp = msetr->msetr_rktp; struct msgset_v2_hdr hdr; rd_slice_t save_slice; @@ -1022,36 +1020,38 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { size_t payload_size; int64_t LastOffset; /* Last absolute Offset in MessageSet header */ /* Only log decoding errors if protocol debugging enabled. */ - int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & - RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0; + int log_decode_errors = + (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL) + ? LOG_DEBUG + : 0; rd_kafka_buf_read_i64(rkbuf, &hdr.BaseOffset); rd_kafka_buf_read_i32(rkbuf, &hdr.Length); - len_start = rd_slice_offset(&rkbuf->rkbuf_reader); + len_start = rd_slice_offset(&rkbuf->rkbuf_reader); if (unlikely(hdr.Length < RD_KAFKAP_MSGSET_V2_SIZE - 8 - 4)) rd_kafka_buf_parse_fail(rkbuf, - "%s [%"PRId32"] " - "MessageSet at offset %"PRId64 - " length %"PRId32" < header size %d", + "%s [%" PRId32 + "] " + "MessageSet at offset %" PRId64 + " length %" PRId32 " < header size %d", rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - hdr.BaseOffset, hdr.Length, + rktp->rktp_partition, hdr.BaseOffset, + hdr.Length, RD_KAFKAP_MSGSET_V2_SIZE - 8 - 4); rd_kafka_buf_read_i32(rkbuf, &hdr.PartitionLeaderEpoch); - rd_kafka_buf_read_i8(rkbuf, &hdr.MagicByte); + rd_kafka_buf_read_i8(rkbuf, &hdr.MagicByte); rd_kafka_buf_read_i32(rkbuf, &hdr.Crc); if (msetr->msetr_rkb->rkb_rk->rk_conf.check_crcs) { /* Verify CRC32C if desired. */ uint32_t calc_crc; rd_slice_t crc_slice; - size_t crc_len = hdr.Length-4-1-4; + size_t crc_len = hdr.Length - 4 - 1 - 4; - if (!rd_slice_narrow_copy_relative( - &rkbuf->rkbuf_reader, - &crc_slice, crc_len)) + if (!rd_slice_narrow_copy_relative(&rkbuf->rkbuf_reader, + &crc_slice, crc_len)) rd_kafka_buf_check_len(rkbuf, crc_len); calc_crc = rd_slice_crc32c(&crc_slice); @@ -1059,19 +1059,18 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { if (unlikely((uint32_t)hdr.Crc != calc_crc)) { /* Propagate CRC error to application and * continue with next message. */ - rd_kafka_consumer_err(&msetr->msetr_rkq, - msetr->msetr_broker_id, - RD_KAFKA_RESP_ERR__BAD_MSG, - msetr->msetr_tver->version, - NULL, rktp, - hdr.BaseOffset, - "MessageSet at offset %"PRId64 - " (%"PRId32" bytes) " - "failed CRC32C check " - "(original 0x%"PRIx32" != " - "calculated 0x%"PRIx32")", - hdr.BaseOffset, - hdr.Length, hdr.Crc, calc_crc); + rd_kafka_consumer_err( + &msetr->msetr_rkq, msetr->msetr_broker_id, + RD_KAFKA_RESP_ERR__BAD_MSG, + msetr->msetr_tver->version, NULL, rktp, + hdr.BaseOffset, + "MessageSet at offset %" PRId64 " (%" PRId32 + " bytes) " + "failed CRC32C check " + "(original 0x%" PRIx32 + " != " + "calculated 0x%" PRIx32 ")", + hdr.BaseOffset, hdr.Length, hdr.Crc, calc_crc); rd_kafka_buf_skip_to(rkbuf, crc_len); rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_err, 1); return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -1089,17 +1088,17 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { rd_kafka_buf_read_i32(rkbuf, &hdr.RecordCount); /* Payload size is hdr.Length - MessageSet headers */ - payload_size = hdr.Length - (rd_slice_offset(&rkbuf->rkbuf_reader) - - len_start); + payload_size = + hdr.Length - (rd_slice_offset(&rkbuf->rkbuf_reader) - len_start); if (unlikely(payload_size > rd_kafka_buf_read_remain(rkbuf))) - rd_kafka_buf_underflow_fail(rkbuf, payload_size, - "%s [%"PRId32"] " - "MessageSet at offset %"PRId64 - " payload size %"PRIusz, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - hdr.BaseOffset, payload_size); + rd_kafka_buf_underflow_fail( + rkbuf, payload_size, + "%s [%" PRId32 + "] " + "MessageSet at offset %" PRId64 " payload size %" PRIusz, + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + hdr.BaseOffset, payload_size); /* If entire MessageSet contains old outdated offsets, skip it. */ if (LastOffset < rktp->rktp_offsets.fetch_offset) { @@ -1116,14 +1115,14 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { if (hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) { const void *compressed; - compressed = rd_slice_ensure_contig(&rkbuf->rkbuf_reader, - payload_size); + compressed = + rd_slice_ensure_contig(&rkbuf->rkbuf_reader, payload_size); rd_assert(compressed); err = rd_kafka_msgset_reader_decompress( - msetr, 2/*MsgVersion v2*/, hdr.Attributes, - hdr.BaseTimestamp, hdr.BaseOffset, - compressed, payload_size); + msetr, 2 /*MsgVersion v2*/, hdr.Attributes, + hdr.BaseTimestamp, hdr.BaseOffset, compressed, + payload_size); if (err) goto err; @@ -1133,8 +1132,8 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { /* Save original slice, reduce size of the current one to * be limited by the MessageSet.Length, and then start reading * messages until the lesser slice is exhausted. */ - if (!rd_slice_narrow_relative(&rkbuf->rkbuf_reader, - &save_slice, payload_size)) + if (!rd_slice_narrow_relative(&rkbuf->rkbuf_reader, &save_slice, + payload_size)) rd_kafka_buf_check_len(rkbuf, payload_size); /* Read messages */ @@ -1148,7 +1147,7 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { } - done: +done: /* Set the next fetch offset to the MessageSet header's last offset + 1 * to avoid getting stuck on compacted MessageSets where the last * Message in the MessageSet has an Offset < MessageSet header's @@ -1159,12 +1158,12 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: /* Count all parse errors as partial message errors. */ rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1); err = rkbuf->rkbuf_err; /* FALLTHRU */ - err: +err: msetr->msetr_v2_hdr = NULL; return err; } @@ -1179,16 +1178,18 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { * unsupported. */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_peek_msg_version (rd_kafka_msgset_reader_t *msetr, - int8_t *MagicBytep) { - rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; +rd_kafka_msgset_reader_peek_msg_version(rd_kafka_msgset_reader_t *msetr, + int8_t *MagicBytep) { + rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; rd_kafka_toppar_t *rktp = msetr->msetr_rktp; /* Only log decoding errors if protocol debugging enabled. */ - int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & - RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0; + int log_decode_errors = + (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL) + ? LOG_DEBUG + : 0; size_t read_offset = rd_slice_offset(&rkbuf->rkbuf_reader); - rd_kafka_buf_peek_i8(rkbuf, read_offset+8+4+4, MagicBytep); + rd_kafka_buf_peek_i8(rkbuf, read_offset + 8 + 4 + 4, MagicBytep); if (unlikely(*MagicBytep < 0 || *MagicBytep > 2)) { int64_t Offset; /* For error logging */ @@ -1199,26 +1200,28 @@ rd_kafka_msgset_reader_peek_msg_version (rd_kafka_msgset_reader_t *msetr, rd_rkb_dbg(msetr->msetr_rkb, MSG | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FETCH, "MAGICBYTE", - "%s [%"PRId32"]: " + "%s [%" PRId32 + "]: " "Unsupported Message(Set) MagicByte %d at " - "offset %"PRId64" " - "(buffer position %"PRIusz"/%"PRIusz"): skipping", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - (int)*MagicBytep, Offset, - read_offset, rd_slice_size(&rkbuf->rkbuf_reader)); + "offset %" PRId64 + " " + "(buffer position %" PRIusz "/%" PRIusz + "): skipping", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + (int)*MagicBytep, Offset, read_offset, + rd_slice_size(&rkbuf->rkbuf_reader)); if (Offset >= msetr->msetr_rktp->rktp_offsets.fetch_offset) { rd_kafka_consumer_err( - &msetr->msetr_rkq, - msetr->msetr_broker_id, - RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED, - msetr->msetr_tver->version, NULL, rktp, Offset, - "Unsupported Message(Set) MagicByte %d " - "at offset %"PRId64, - (int)*MagicBytep, Offset); + &msetr->msetr_rkq, msetr->msetr_broker_id, + RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED, + msetr->msetr_tver->version, NULL, rktp, Offset, + "Unsupported Message(Set) MagicByte %d " + "at offset %" PRId64, + (int)*MagicBytep, Offset); /* Skip message(set) */ - msetr->msetr_rktp->rktp_offsets.fetch_offset = Offset+1; + msetr->msetr_rktp->rktp_offsets.fetch_offset = + Offset + 1; } /* Skip this Message(Set). @@ -1232,7 +1235,7 @@ rd_kafka_msgset_reader_peek_msg_version (rd_kafka_msgset_reader_t *msetr, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: return RD_KAFKA_RESP_ERR__BAD_MSG; } @@ -1241,16 +1244,14 @@ rd_kafka_msgset_reader_peek_msg_version (rd_kafka_msgset_reader_t *msetr, * @brief Parse and read messages from msgset reader buffer. */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader (rd_kafka_msgset_reader_t *msetr) { +rd_kafka_msgset_reader(rd_kafka_msgset_reader_t *msetr) { rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; - rd_kafka_resp_err_t (*reader[]) - (rd_kafka_msgset_reader_t *) = { - /* Indexed by MsgVersion/MagicByte, pointing to - * a Msg(Set)Version reader */ - [0] = rd_kafka_msgset_reader_msg_v0_1, - [1] = rd_kafka_msgset_reader_msg_v0_1, - [2] = rd_kafka_msgset_reader_v2 - }; + rd_kafka_resp_err_t (*reader[])(rd_kafka_msgset_reader_t *) = { + /* Indexed by MsgVersion/MagicByte, pointing to + * a Msg(Set)Version reader */ + [0] = rd_kafka_msgset_reader_msg_v0_1, + [1] = rd_kafka_msgset_reader_msg_v0_1, + [2] = rd_kafka_msgset_reader_v2}; rd_kafka_resp_err_t err; /* Parse MessageSets until the slice is exhausted or an @@ -1261,8 +1262,8 @@ rd_kafka_msgset_reader (rd_kafka_msgset_reader_t *msetr) { /* We dont know the MsgVersion at this point, peek where the * MagicByte resides both in MsgVersion v0..1 and v2 to * know which MessageSet reader to use. */ - err = rd_kafka_msgset_reader_peek_msg_version(msetr, - &MagicByte); + err = + rd_kafka_msgset_reader_peek_msg_version(msetr, &MagicByte); if (unlikely(err)) { if (err == RD_KAFKA_RESP_ERR__BAD_MSG) /* Read underflow, not an error. @@ -1291,33 +1292,29 @@ rd_kafka_msgset_reader (rd_kafka_msgset_reader_t *msetr) { * @param last_offsetp will be set to the offset of the last message in the set, * or -1 if not applicable. */ -static void rd_kafka_msgset_reader_postproc (rd_kafka_msgset_reader_t *msetr, - int64_t *last_offsetp) { +static void rd_kafka_msgset_reader_postproc(rd_kafka_msgset_reader_t *msetr, + int64_t *last_offsetp) { rd_kafka_op_t *rko; - rko = rd_kafka_q_last(&msetr->msetr_rkq, - RD_KAFKA_OP_FETCH, + rko = rd_kafka_q_last(&msetr->msetr_rkq, RD_KAFKA_OP_FETCH, 0 /* no error ops */); if (rko) { - *last_offsetp = rko->rko_u.fetch.rkm.rkm_offset; - - if (*last_offsetp != -1 && msetr->msetr_relative_offsets) { - /* Update messages to absolute offsets - * and purge any messages older than the current - * fetch offset. */ - rd_kafka_q_fix_offsets(&msetr->msetr_rkq, - msetr->msetr_rktp->rktp_offsets. - fetch_offset, - msetr->msetr_outer.offset - - *last_offsetp); - } + *last_offsetp = rko->rko_u.fetch.rkm.rkm_offset; + + if (*last_offsetp != -1 && msetr->msetr_relative_offsets) { + /* Update messages to absolute offsets + * and purge any messages older than the current + * fetch offset. */ + rd_kafka_q_fix_offsets( + &msetr->msetr_rkq, + msetr->msetr_rktp->rktp_offsets.fetch_offset, + msetr->msetr_outer.offset - *last_offsetp); + } } } - - /** * @brief Run the MessageSet reader, read messages until buffer is * exhausted (or error encountered), enqueue parsed messages on @@ -1330,7 +1327,7 @@ static void rd_kafka_msgset_reader_postproc (rd_kafka_msgset_reader_t *msetr, * busy-looping. */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_run (rd_kafka_msgset_reader_t *msetr) { +rd_kafka_msgset_reader_run(rd_kafka_msgset_reader_t *msetr) { rd_kafka_toppar_t *rktp = msetr->msetr_rktp; rd_kafka_resp_err_t err; int64_t last_offset = -1; @@ -1348,26 +1345,26 @@ rd_kafka_msgset_reader_run (rd_kafka_msgset_reader_t *msetr) { if (msetr->msetr_ctrl_cnt > 0) { /* Noop */ - } else if (rktp->rktp_fetch_msg_max_bytes < (1 << 30)) { + } else if (rktp->rktp_fetch_msg_max_bytes < (1 << 30)) { rktp->rktp_fetch_msg_max_bytes *= 2; rd_rkb_dbg(msetr->msetr_rkb, FETCH, "CONSUME", - "Topic %s [%"PRId32"]: Increasing " - "max fetch bytes to %"PRId32, + "Topic %s [%" PRId32 + "]: Increasing " + "max fetch bytes to %" PRId32, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_fetch_msg_max_bytes); } else if (!err) { rd_kafka_consumer_err( - &msetr->msetr_rkq, - msetr->msetr_broker_id, - RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, - msetr->msetr_tver->version, - NULL, rktp, - rktp->rktp_offsets.fetch_offset, - "Message at offset %"PRId64" " - "might be too large to fetch, try increasing " - "receive.message.max.bytes", - rktp->rktp_offsets.fetch_offset); + &msetr->msetr_rkq, msetr->msetr_broker_id, + RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, + msetr->msetr_tver->version, NULL, rktp, + rktp->rktp_offsets.fetch_offset, + "Message at offset %" PRId64 + " " + "might be too large to fetch, try increasing " + "receive.message.max.bytes", + rktp->rktp_offsets.fetch_offset); } } else { @@ -1382,21 +1379,21 @@ rd_kafka_msgset_reader_run (rd_kafka_msgset_reader_t *msetr) { err = RD_KAFKA_RESP_ERR_NO_ERROR; } - rd_rkb_dbg(msetr->msetr_rkb, MSG | RD_KAFKA_DBG_FETCH, "CONSUME", - "Enqueue %i %smessage(s) (%"PRId64" bytes, %d ops) on " - "%s [%"PRId32"] " - "fetch queue (qlen %d, v%d, last_offset %"PRId64 - ", %d ctrl msgs, %s)", - msetr->msetr_msgcnt, msetr->msetr_srcname, - msetr->msetr_msg_bytes, - rd_kafka_q_len(&msetr->msetr_rkq), - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, rd_kafka_q_len(msetr->msetr_par_rkq), - msetr->msetr_tver->version, last_offset, - msetr->msetr_ctrl_cnt, - msetr->msetr_compression ? - rd_kafka_compression2str(msetr->msetr_compression) : - "uncompressed"); + rd_rkb_dbg( + msetr->msetr_rkb, MSG | RD_KAFKA_DBG_FETCH, "CONSUME", + "Enqueue %i %smessage(s) (%" PRId64 + " bytes, %d ops) on " + "%s [%" PRId32 + "] " + "fetch queue (qlen %d, v%d, last_offset %" PRId64 + ", %d ctrl msgs, %s)", + msetr->msetr_msgcnt, msetr->msetr_srcname, msetr->msetr_msg_bytes, + rd_kafka_q_len(&msetr->msetr_rkq), rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, rd_kafka_q_len(msetr->msetr_par_rkq), + msetr->msetr_tver->version, last_offset, msetr->msetr_ctrl_cnt, + msetr->msetr_compression + ? rd_kafka_compression2str(msetr->msetr_compression) + : "uncompressed"); /* Concat all messages&errors onto the parent's queue * (the partition's fetch queue) */ @@ -1431,16 +1428,15 @@ rd_kafka_msgset_reader_run (rd_kafka_msgset_reader_t *msetr) { * @returns see rd_kafka_msgset_reader_run() */ rd_kafka_resp_err_t -rd_kafka_msgset_parse (rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_toppar_t *rktp, - rd_kafka_aborted_txns_t *aborted_txns, - const struct rd_kafka_toppar_ver *tver) { +rd_kafka_msgset_parse(rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_toppar_t *rktp, + rd_kafka_aborted_txns_t *aborted_txns, + const struct rd_kafka_toppar_ver *tver) { rd_kafka_msgset_reader_t msetr; rd_kafka_resp_err_t err; - rd_kafka_msgset_reader_init(&msetr, rkbuf, rktp, tver, - aborted_txns, + rd_kafka_msgset_reader_init(&msetr, rkbuf, rktp, tver, aborted_txns, rktp->rktp_fetchq); /* Parse and handle the message set */ @@ -1455,14 +1451,13 @@ rd_kafka_msgset_parse (rd_kafka_buf_t *rkbuf, (int64_t)msetr.msetr_msg_bytes); return err; - } /** * @brief Offset comparator */ -static int rd_kafka_offset_cmp (const void *_a, const void *_b) { +static int rd_kafka_offset_cmp(const void *_a, const void *_b) { const int64_t *a = _a, *b = _b; return (*a > *b) - (*a < *b); } @@ -1471,7 +1466,7 @@ static int rd_kafka_offset_cmp (const void *_a, const void *_b) { /** * @brief Pid comparator for rd_kafka_aborted_txn_start_offsets_t */ -static int rd_kafka_aborted_txn_cmp_by_pid (const void *_a, const void *_b) { +static int rd_kafka_aborted_txn_cmp_by_pid(const void *_a, const void *_b) { const rd_kafka_aborted_txn_start_offsets_t *a = _a, *b = _b; return (a->pid > b->pid) - (a->pid < b->pid); } @@ -1480,7 +1475,7 @@ static int rd_kafka_aborted_txn_cmp_by_pid (const void *_a, const void *_b) { /** * @brief Free resources associated with an AVL tree node. */ -static void rd_kafka_aborted_txn_node_destroy (void *_node_ptr) { +static void rd_kafka_aborted_txn_node_destroy(void *_node_ptr) { rd_kafka_aborted_txn_start_offsets_t *node_ptr = _node_ptr; rd_list_destroy(&node_ptr->offsets); rd_free(node_ptr); @@ -1491,8 +1486,7 @@ static void rd_kafka_aborted_txn_node_destroy (void *_node_ptr) { * @brief Allocate memory for, and initialize a new * rd_kafka_aborted_txns_t struct. */ -rd_kafka_aborted_txns_t * -rd_kafka_aborted_txns_new (int32_t txn_cnt) { +rd_kafka_aborted_txns_t *rd_kafka_aborted_txns_new(int32_t txn_cnt) { rd_kafka_aborted_txns_t *aborted_txns; aborted_txns = rd_malloc(sizeof(*aborted_txns)); rd_avl_init(&aborted_txns->avl, rd_kafka_aborted_txn_cmp_by_pid, 0); @@ -1507,8 +1501,7 @@ rd_kafka_aborted_txns_new (int32_t txn_cnt) { * @brief Free all resources associated with a * rd_kafka_aborted_txns_t struct. */ -void -rd_kafka_aborted_txns_destroy (rd_kafka_aborted_txns_t *aborted_txns) { +void rd_kafka_aborted_txns_destroy(rd_kafka_aborted_txns_t *aborted_txns) { rd_list_destroy(&aborted_txns->list); rd_avl_destroy(&aborted_txns->avl); rd_free(aborted_txns); @@ -1520,7 +1513,7 @@ rd_kafka_aborted_txns_destroy (rd_kafka_aborted_txns_t *aborted_txns) { * the specified pid. */ static RD_INLINE rd_kafka_aborted_txn_start_offsets_t * -rd_kafka_aborted_txns_offsets_for_pid (rd_kafka_aborted_txns_t *aborted_txns, +rd_kafka_aborted_txns_offsets_for_pid(rd_kafka_aborted_txns_t *aborted_txns, int64_t pid) { rd_kafka_aborted_txn_start_offsets_t node; node.pid = pid; @@ -1544,12 +1537,13 @@ rd_kafka_aborted_txns_offsets_for_pid (rd_kafka_aborted_txns_t *aborted_txns, * @returns the start offset or -1 if there is none. */ static int64_t -rd_kafka_aborted_txns_next_offset (rd_kafka_aborted_txns_t *aborted_txns, - int64_t pid, rd_bool_t increment_idx, - int64_t max_offset) { +rd_kafka_aborted_txns_next_offset(rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid, + rd_bool_t increment_idx, + int64_t max_offset) { int64_t abort_start_offset; - rd_kafka_aborted_txn_start_offsets_t *node_ptr - = rd_kafka_aborted_txns_offsets_for_pid(aborted_txns, pid); + rd_kafka_aborted_txn_start_offsets_t *node_ptr = + rd_kafka_aborted_txns_offsets_for_pid(aborted_txns, pid); if (node_ptr == NULL) return -1; @@ -1557,9 +1551,8 @@ rd_kafka_aborted_txns_next_offset (rd_kafka_aborted_txns_t *aborted_txns, if (unlikely(node_ptr->offsets_idx >= rd_list_cnt(&node_ptr->offsets))) return -1; - abort_start_offset = - *((int64_t *)rd_list_elem(&node_ptr->offsets, - node_ptr->offsets_idx)); + abort_start_offset = *( + (int64_t *)rd_list_elem(&node_ptr->offsets, node_ptr->offsets_idx)); if (unlikely(abort_start_offset > max_offset)) return -1; @@ -1585,8 +1578,9 @@ rd_kafka_aborted_txns_next_offset (rd_kafka_aborted_txns_t *aborted_txns, * @returns the start offset or -1 if there is none. */ static RD_INLINE int64_t -rd_kafka_aborted_txns_pop_offset (rd_kafka_aborted_txns_t *aborted_txns, - int64_t pid, int64_t max_offset) { +rd_kafka_aborted_txns_pop_offset(rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid, + int64_t max_offset) { return rd_kafka_aborted_txns_next_offset(aborted_txns, pid, rd_true, max_offset); } @@ -1599,11 +1593,10 @@ rd_kafka_aborted_txns_pop_offset (rd_kafka_aborted_txns_t *aborted_txns, * @returns the start offset or -1 if there is none. */ static RD_INLINE int64_t -rd_kafka_aborted_txns_get_offset (const rd_kafka_aborted_txns_t *aborted_txns, - int64_t pid) { +rd_kafka_aborted_txns_get_offset(const rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid) { return rd_kafka_aborted_txns_next_offset( - (rd_kafka_aborted_txns_t *)aborted_txns, pid, rd_false, - INT64_MAX); + (rd_kafka_aborted_txns_t *)aborted_txns, pid, rd_false, INT64_MAX); } @@ -1611,28 +1604,26 @@ rd_kafka_aborted_txns_get_offset (const rd_kafka_aborted_txns_t *aborted_txns, * @brief Add a transaction start offset corresponding * to the specified pid to the aborted_txns collection. */ -void -rd_kafka_aborted_txns_add (rd_kafka_aborted_txns_t *aborted_txns, - int64_t pid, - int64_t first_offset) { +void rd_kafka_aborted_txns_add(rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid, + int64_t first_offset) { int64_t *v; - rd_kafka_aborted_txn_start_offsets_t *node_ptr - = rd_kafka_aborted_txns_offsets_for_pid(aborted_txns, pid); + rd_kafka_aborted_txn_start_offsets_t *node_ptr = + rd_kafka_aborted_txns_offsets_for_pid(aborted_txns, pid); if (!node_ptr) { - node_ptr = rd_malloc(sizeof(*node_ptr)); - node_ptr->pid = pid; + node_ptr = rd_malloc(sizeof(*node_ptr)); + node_ptr->pid = pid; node_ptr->offsets_idx = 0; rd_list_init(&node_ptr->offsets, 0, NULL); /* Each PID list has no more than AbortedTxnCnt elements */ - rd_list_prealloc_elems(&node_ptr->offsets, - sizeof(int64_t), - aborted_txns->cnt, 0); + rd_list_prealloc_elems(&node_ptr->offsets, sizeof(int64_t), + aborted_txns->cnt, 0); RD_AVL_INSERT(&aborted_txns->avl, node_ptr, avl_node); rd_list_add(&aborted_txns->list, node_ptr); } - v = rd_list_add(&node_ptr->offsets, NULL); + v = rd_list_add(&node_ptr->offsets, NULL); *v = first_offset; } @@ -1641,12 +1632,11 @@ rd_kafka_aborted_txns_add (rd_kafka_aborted_txns_t *aborted_txns, * @brief Sort each of the abort transaction start * offset lists for each pid. */ -void -rd_kafka_aborted_txns_sort (rd_kafka_aborted_txns_t *aborted_txns) { +void rd_kafka_aborted_txns_sort(rd_kafka_aborted_txns_t *aborted_txns) { int k; for (k = 0; k < rd_list_cnt(&aborted_txns->list); k++) { rd_kafka_aborted_txn_start_offsets_t *el = - rd_list_elem(&aborted_txns->list, k); + rd_list_elem(&aborted_txns->list, k); rd_list_sort(&el->offsets, rd_kafka_offset_cmp); } } @@ -1656,7 +1646,7 @@ rd_kafka_aborted_txns_sort (rd_kafka_aborted_txns_t *aborted_txns) { * @brief Unit tests for all functions that operate on * rd_kafka_aborted_txns_t */ -int unittest_aborted_txns (void) { +int unittest_aborted_txns(void) { rd_kafka_aborted_txns_t *aborted_txns = NULL; int64_t start_offset; @@ -1670,88 +1660,101 @@ int unittest_aborted_txns (void) { rd_kafka_aborted_txns_add(aborted_txns, 1, 3); rd_kafka_aborted_txns_sort(aborted_txns); - start_offset = rd_kafka_aborted_txns_get_offset( - aborted_txns, 1); + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1); RD_UT_ASSERT(3 == start_offset, - "queried start offset was %"PRId64", " - "expected 3", start_offset); + "queried start offset was %" PRId64 + ", " + "expected 3", + start_offset); - start_offset = rd_kafka_aborted_txns_get_offset( - aborted_txns, 1); + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1); RD_UT_ASSERT(3 == start_offset, - "queried start offset was %"PRId64", " - "expected 3", start_offset); + "queried start offset was %" PRId64 + ", " + "expected 3", + start_offset); - start_offset = rd_kafka_aborted_txns_pop_offset( - aborted_txns, 1, INT64_MAX); + start_offset = + rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX); RD_UT_ASSERT(3 == start_offset, - "queried start offset was %"PRId64", " - "expected 3", start_offset); + "queried start offset was %" PRId64 + ", " + "expected 3", + start_offset); - start_offset = rd_kafka_aborted_txns_get_offset( - aborted_txns, 1); + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1); RD_UT_ASSERT(10 == start_offset, - "queried start offset was %"PRId64", " - "expected 10", start_offset); + "queried start offset was %" PRId64 + ", " + "expected 10", + start_offset); - start_offset = rd_kafka_aborted_txns_get_offset( - aborted_txns, 2); + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2); RD_UT_ASSERT(7 == start_offset, - "queried start offset was %"PRId64", " - "expected 7", start_offset); + "queried start offset was %" PRId64 + ", " + "expected 7", + start_offset); rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX); - start_offset = rd_kafka_aborted_txns_get_offset( - aborted_txns, 1); + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1); RD_UT_ASSERT(42 == start_offset, - "queried start offset was %"PRId64", " - "expected 42", start_offset); + "queried start offset was %" PRId64 + ", " + "expected 42", + start_offset); rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX); - start_offset = rd_kafka_aborted_txns_get_offset( - aborted_txns, 1); + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1); RD_UT_ASSERT(44 == start_offset, - "queried start offset was %"PRId64", " - "expected 44", start_offset); + "queried start offset was %" PRId64 + ", " + "expected 44", + start_offset); - start_offset = rd_kafka_aborted_txns_get_offset( - aborted_txns, 2); + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2); RD_UT_ASSERT(7 == start_offset, - "queried start offset was %"PRId64", " - "expected 7", start_offset); + "queried start offset was %" PRId64 + ", " + "expected 7", + start_offset); rd_kafka_aborted_txns_pop_offset(aborted_txns, 2, INT64_MAX); - start_offset = rd_kafka_aborted_txns_get_offset( - aborted_txns, 2); + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2); RD_UT_ASSERT(11 == start_offset, - "queried start offset was %"PRId64", " - "expected 11", start_offset); + "queried start offset was %" PRId64 + ", " + "expected 11", + start_offset); /* error cases */ - start_offset = rd_kafka_aborted_txns_get_offset( - aborted_txns, 3); + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 3); RD_UT_ASSERT(-1 == start_offset, - "queried start offset was %"PRId64", " - "expected -1", start_offset); + "queried start offset was %" PRId64 + ", " + "expected -1", + start_offset); rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX); rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX); rd_kafka_aborted_txns_pop_offset(aborted_txns, 2, INT64_MAX); - start_offset = rd_kafka_aborted_txns_get_offset( - aborted_txns, 1); + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1); RD_UT_ASSERT(-1 == start_offset, - "queried start offset was %"PRId64", " - "expected -1", start_offset); + "queried start offset was %" PRId64 + ", " + "expected -1", + start_offset); - start_offset = rd_kafka_aborted_txns_get_offset( - aborted_txns, 2); + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2); RD_UT_ASSERT(-1 == start_offset, - "queried start offset was %"PRId64", " - "expected -1", start_offset); + "queried start offset was %" PRId64 + ", " + "expected -1", + start_offset); rd_kafka_aborted_txns_destroy(aborted_txns); diff --git a/src/rdkafka_msgset_writer.c b/src/rdkafka_msgset_writer.c index 0b0a8a34a9..d09b22da07 100644 --- a/src/rdkafka_msgset_writer.c +++ b/src/rdkafka_msgset_writer.c @@ -49,47 +49,47 @@ static const int16_t rd_kafka_ProduceRequest_max_version = 7; typedef struct rd_kafka_msgset_writer_s { - rd_kafka_buf_t *msetw_rkbuf; /* Backing store buffer (refcounted)*/ + rd_kafka_buf_t *msetw_rkbuf; /* Backing store buffer (refcounted)*/ - int16_t msetw_ApiVersion; /* ProduceRequest ApiVersion */ - int msetw_MsgVersion; /* MsgVersion to construct */ - int msetw_features; /* Protocol features to use */ + int16_t msetw_ApiVersion; /* ProduceRequest ApiVersion */ + int msetw_MsgVersion; /* MsgVersion to construct */ + int msetw_features; /* Protocol features to use */ rd_kafka_compression_t msetw_compression; /**< Compression type */ - int msetw_msgcntmax; /* Max number of messages to send - * in a batch. */ - size_t msetw_messages_len; /* Total size of Messages, with Message - * framing but without - * MessageSet header */ - size_t msetw_messages_kvlen; /* Total size of Message keys - * and values */ + int msetw_msgcntmax; /* Max number of messages to send + * in a batch. */ + size_t msetw_messages_len; /* Total size of Messages, with Message + * framing but without + * MessageSet header */ + size_t msetw_messages_kvlen; /* Total size of Message keys + * and values */ - size_t msetw_MessageSetSize; /* Current MessageSetSize value */ - size_t msetw_of_MessageSetSize; /* offset of MessageSetSize */ - size_t msetw_of_start; /* offset of MessageSet */ + size_t msetw_MessageSetSize; /* Current MessageSetSize value */ + size_t msetw_of_MessageSetSize; /* offset of MessageSetSize */ + size_t msetw_of_start; /* offset of MessageSet */ - int msetw_relative_offsets; /* Bool: use relative offsets */ + int msetw_relative_offsets; /* Bool: use relative offsets */ /* For MessageSet v2 */ - int msetw_Attributes; /* MessageSet Attributes */ - int64_t msetw_MaxTimestamp; /* Maximum timestamp in batch */ - size_t msetw_of_CRC; /* offset of MessageSet.CRC */ + int msetw_Attributes; /* MessageSet Attributes */ + int64_t msetw_MaxTimestamp; /* Maximum timestamp in batch */ + size_t msetw_of_CRC; /* offset of MessageSet.CRC */ rd_kafka_msgbatch_t *msetw_batch; /**< Convenience pointer to * rkbuf_u.Produce.batch */ /* First message information */ struct { - size_t of; /* rkbuf's first message position */ - int64_t timestamp; + size_t of; /* rkbuf's first message position */ + int64_t timestamp; } msetw_firstmsg; - rd_kafka_pid_t msetw_pid; /**< Idempotent producer's - * current Producer Id */ - rd_kafka_broker_t *msetw_rkb; /* @warning Not a refcounted - * reference! */ - rd_kafka_toppar_t *msetw_rktp; /* @warning Not a refcounted - * reference! */ - rd_kafka_msgq_t *msetw_msgq; /**< Input message queue */ + rd_kafka_pid_t msetw_pid; /**< Idempotent producer's + * current Producer Id */ + rd_kafka_broker_t *msetw_rkb; /* @warning Not a refcounted + * reference! */ + rd_kafka_toppar_t *msetw_rktp; /* @warning Not a refcounted + * reference! */ + rd_kafka_msgq_t *msetw_msgq; /**< Input message queue */ } rd_kafka_msgset_writer_t; @@ -102,34 +102,34 @@ typedef struct rd_kafka_msgset_writer_s { * @locality broker thread */ static RD_INLINE int -rd_kafka_msgset_writer_select_MsgVersion (rd_kafka_msgset_writer_t *msetw) { - rd_kafka_broker_t *rkb = msetw->msetw_rkb; - rd_kafka_toppar_t *rktp = msetw->msetw_rktp; +rd_kafka_msgset_writer_select_MsgVersion(rd_kafka_msgset_writer_t *msetw) { + rd_kafka_broker_t *rkb = msetw->msetw_rkb; + rd_kafka_toppar_t *rktp = msetw->msetw_rktp; const int16_t max_ApiVersion = rd_kafka_ProduceRequest_max_version; - int16_t min_ApiVersion = 0; + int16_t min_ApiVersion = 0; int feature; /* Map compression types to required feature and ApiVersion */ static const struct { int feature; int16_t ApiVersion; } compr_req[RD_KAFKA_COMPRESSION_NUM] = { - [RD_KAFKA_COMPRESSION_LZ4] = { RD_KAFKA_FEATURE_LZ4, 0 }, + [RD_KAFKA_COMPRESSION_LZ4] = {RD_KAFKA_FEATURE_LZ4, 0}, #if WITH_ZSTD - [RD_KAFKA_COMPRESSION_ZSTD] = { RD_KAFKA_FEATURE_ZSTD, 7 }, + [RD_KAFKA_COMPRESSION_ZSTD] = {RD_KAFKA_FEATURE_ZSTD, 7}, #endif }; if ((feature = rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER2)) { - min_ApiVersion = 3; + min_ApiVersion = 3; msetw->msetw_MsgVersion = 2; msetw->msetw_features |= feature; } else if ((feature = rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER1)) { - min_ApiVersion = 2; + min_ApiVersion = 2; msetw->msetw_MsgVersion = 1; msetw->msetw_features |= feature; } else { if ((feature = - rkb->rkb_features & RD_KAFKA_FEATURE_THROTTLETIME)) { + rkb->rkb_features & RD_KAFKA_FEATURE_THROTTLETIME)) { min_ApiVersion = 1; msetw->msetw_features |= feature; } else @@ -145,49 +145,49 @@ rd_kafka_msgset_writer_select_MsgVersion (rd_kafka_msgset_writer_t *msetw) { */ if (msetw->msetw_compression && (rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_Produce, - 0, compr_req[msetw->msetw_compression].ApiVersion, - NULL) == -1 || + rkb, RD_KAFKAP_Produce, 0, + compr_req[msetw->msetw_compression].ApiVersion, NULL) == -1 || (compr_req[msetw->msetw_compression].feature && !(msetw->msetw_rkb->rkb_features & compr_req[msetw->msetw_compression].feature)))) { - if (unlikely(rd_interval( - &rkb->rkb_suppress.unsupported_compression, - /* at most once per day */ - (rd_ts_t)86400 * 1000 * 1000, 0) > 0)) - rd_rkb_log(rkb, LOG_NOTICE, "COMPRESSION", - "%.*s [%"PRId32"]: " - "Broker does not support compression " - "type %s: not compressing batch", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_compression2str( - msetw->msetw_compression)); + if (unlikely( + rd_interval(&rkb->rkb_suppress.unsupported_compression, + /* at most once per day */ + (rd_ts_t)86400 * 1000 * 1000, 0) > 0)) + rd_rkb_log( + rkb, LOG_NOTICE, "COMPRESSION", + "%.*s [%" PRId32 + "]: " + "Broker does not support compression " + "type %s: not compressing batch", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_compression2str(msetw->msetw_compression)); else - rd_rkb_dbg(rkb, MSG, "PRODUCE", - "%.*s [%"PRId32"]: " - "Broker does not support compression " - "type %s: not compressing batch", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_compression2str( - msetw->msetw_compression)); + rd_rkb_dbg( + rkb, MSG, "PRODUCE", + "%.*s [%" PRId32 + "]: " + "Broker does not support compression " + "type %s: not compressing batch", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_compression2str(msetw->msetw_compression)); msetw->msetw_compression = RD_KAFKA_COMPRESSION_NONE; } else { /* Broker supports this compression type. */ msetw->msetw_features |= - compr_req[msetw->msetw_compression].feature; + compr_req[msetw->msetw_compression].feature; if (min_ApiVersion < compr_req[msetw->msetw_compression].ApiVersion) min_ApiVersion = - compr_req[msetw->msetw_compression].ApiVersion; + compr_req[msetw->msetw_compression].ApiVersion; } /* MsgVersion specific setup. */ - switch (msetw->msetw_MsgVersion) - { + switch (msetw->msetw_MsgVersion) { case 2: msetw->msetw_relative_offsets = 1; /* OffsetDelta */ break; @@ -199,8 +199,7 @@ rd_kafka_msgset_writer_select_MsgVersion (rd_kafka_msgset_writer_t *msetw) { /* Set the highest ApiVersion supported by us and broker */ msetw->msetw_ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, - RD_KAFKAP_Produce, min_ApiVersion, max_ApiVersion, NULL); + rkb, RD_KAFKAP_Produce, min_ApiVersion, max_ApiVersion, NULL); if (msetw->msetw_ApiVersion == -1) { rd_kafka_msg_t *rkm; @@ -208,17 +207,18 @@ rd_kafka_msgset_writer_select_MsgVersion (rd_kafka_msgset_writer_t *msetw) { * no matching ProduceRequest versions, which should never * happen. */ rd_rkb_log(rkb, LOG_ERR, "PRODUCE", - "%.*s [%"PRId32"]: " + "%.*s [%" PRId32 + "]: " "No viable ProduceRequest ApiVersions (v%d..%d) " "supported by broker: unable to produce", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - min_ApiVersion, max_ApiVersion); + rktp->rktp_partition, min_ApiVersion, + max_ApiVersion); /* Back off and retry in 5s */ rkm = rd_kafka_msgq_first(msetw->msetw_msgq); rd_assert(rkm); - rkm->rkm_u.producer.ts_backoff = rd_clock() + (5 * 1000*1000); + rkm->rkm_u.producer.ts_backoff = rd_clock() + (5 * 1000 * 1000); return -1; } @@ -239,12 +239,11 @@ rd_kafka_msgset_writer_select_MsgVersion (rd_kafka_msgset_writer_t *msetw) { * The allocated size is the minimum of message.max.bytes * or queued_bytes + msgcntmax * msg_overhead */ -static void -rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { - rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk; +static void rd_kafka_msgset_writer_alloc_buf(rd_kafka_msgset_writer_t *msetw) { + rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk; size_t msg_overhead = 0; - size_t hdrsize = 0; - size_t msgsetsize = 0; + size_t hdrsize = 0; + size_t msgsetsize = 0; size_t bufsize; rd_kafka_assert(NULL, !msetw->msetw_rkbuf); @@ -267,8 +266,7 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { /* * ProduceRequest header sizes */ - switch (msetw->msetw_ApiVersion) - { + switch (msetw->msetw_ApiVersion) { case 7: case 6: case 5: @@ -281,13 +279,12 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { case 1: case 2: hdrsize += - /* RequiredAcks + Timeout + TopicCnt */ - 2 + 4 + 4 + - /* Topic */ - RD_KAFKAP_STR_SIZE(msetw->msetw_rktp-> - rktp_rkt->rkt_topic) + - /* PartitionCnt + Partition + MessageSetSize */ - 4 + 4 + 4; + /* RequiredAcks + Timeout + TopicCnt */ + 2 + 4 + 4 + + /* Topic */ + RD_KAFKAP_STR_SIZE(msetw->msetw_rktp->rktp_rkt->rkt_topic) + + /* PartitionCnt + Partition + MessageSetSize */ + 4 + 4 + 4; msgsetsize += 4; /* MessageSetSize */ break; @@ -300,8 +297,7 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { * - (Worst-case) Message overhead: message fields * - MessageSet header size */ - switch (msetw->msetw_MsgVersion) - { + switch (msetw->msetw_MsgVersion) { case 0: /* MsgVer0 */ msg_overhead = RD_KAFKAP_MESSAGE_V0_OVERHEAD; @@ -316,20 +312,14 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { msg_overhead += RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD; /* MessageSet header fields */ - msgsetsize += - 8 /* BaseOffset */ + - 4 /* Length */ + - 4 /* PartitionLeaderEpoch */ + - 1 /* Magic (MsgVersion) */ + - 4 /* CRC (CRC32C) */ + - 2 /* Attributes */ + - 4 /* LastOffsetDelta */ + - 8 /* BaseTimestamp */ + - 8 /* MaxTimestamp */ + - 8 /* ProducerId */ + - 2 /* ProducerEpoch */ + - 4 /* BaseSequence */ + - 4 /* RecordCount */; + msgsetsize += 8 /* BaseOffset */ + 4 /* Length */ + + 4 /* PartitionLeaderEpoch */ + + 1 /* Magic (MsgVersion) */ + + 4 /* CRC (CRC32C) */ + 2 /* Attributes */ + + 4 /* LastOffsetDelta */ + 8 /* BaseTimestamp */ + + 8 /* MaxTimestamp */ + 8 /* ProducerId */ + + 2 /* ProducerEpoch */ + 4 /* BaseSequence */ + + 4 /* RecordCount */; break; default: @@ -346,9 +336,9 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { */ if (rk->rk_conf.msg_copy_max_size > 0) { size_t queued_bytes = rd_kafka_msgq_size(msetw->msetw_msgq); - bufsize += RD_MIN(queued_bytes, - (size_t)rk->rk_conf.msg_copy_max_size * - msetw->msetw_msgcntmax); + bufsize += + RD_MIN(queued_bytes, (size_t)rk->rk_conf.msg_copy_max_size * + msetw->msetw_msgcntmax); } /* Add estimed per-message overhead */ @@ -363,12 +353,10 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { * and allocate auxilliery space for message headers, etc. */ msetw->msetw_rkbuf = - rd_kafka_buf_new_request(msetw->msetw_rkb, RD_KAFKAP_Produce, - msetw->msetw_msgcntmax/2 + 10, - bufsize); + rd_kafka_buf_new_request(msetw->msetw_rkb, RD_KAFKAP_Produce, + msetw->msetw_msgcntmax / 2 + 10, bufsize); - rd_kafka_buf_ApiVersion_set(msetw->msetw_rkbuf, - msetw->msetw_ApiVersion, + rd_kafka_buf_ApiVersion_set(msetw->msetw_rkbuf, msetw->msetw_ApiVersion, msetw->msetw_features); } @@ -377,9 +365,8 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { * @brief Write the MessageSet header. * @remark Must only be called for MsgVersion 2 */ -static void -rd_kafka_msgset_writer_write_MessageSet_v2_header ( - rd_kafka_msgset_writer_t *msetw) { +static void rd_kafka_msgset_writer_write_MessageSet_v2_header( + rd_kafka_msgset_writer_t *msetw) { rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; rd_kafka_assert(NULL, msetw->msetw_ApiVersion >= 3); @@ -427,7 +414,6 @@ rd_kafka_msgset_writer_write_MessageSet_v2_header ( /* RecordCount: udpated later */ rd_kafka_buf_write_i32(rkbuf, 0); - } @@ -438,10 +424,10 @@ rd_kafka_msgset_writer_write_MessageSet_v2_header ( * msetw_MessageSetSize will have been set to the messageset header. */ static void -rd_kafka_msgset_writer_write_Produce_header (rd_kafka_msgset_writer_t *msetw) { +rd_kafka_msgset_writer_write_Produce_header(rd_kafka_msgset_writer_t *msetw) { rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; - rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk; + rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk; rd_kafka_topic_t *rkt = msetw->msetw_rktp->rktp_rkt; /* V3: TransactionalId */ @@ -493,12 +479,12 @@ rd_kafka_msgset_writer_write_Produce_header (rd_kafka_msgset_writer_t *msetw) { * * @locality broker thread */ -static int rd_kafka_msgset_writer_init (rd_kafka_msgset_writer_t *msetw, - rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq, - rd_kafka_pid_t pid, - uint64_t epoch_base_msgid) { +static int rd_kafka_msgset_writer_init(rd_kafka_msgset_writer_t *msetw, + rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq, + rd_kafka_pid_t pid, + uint64_t epoch_base_msgid) { int msgcnt = rd_kafka_msgq_len(rkmq); if (msgcnt == 0) @@ -507,16 +493,15 @@ static int rd_kafka_msgset_writer_init (rd_kafka_msgset_writer_t *msetw, memset(msetw, 0, sizeof(*msetw)); msetw->msetw_rktp = rktp; - msetw->msetw_rkb = rkb; + msetw->msetw_rkb = rkb; msetw->msetw_msgq = rkmq; - msetw->msetw_pid = pid; + msetw->msetw_pid = pid; /* Max number of messages to send in a batch, * limited by current queue size or configured batch size, * whichever is lower. */ - msetw->msetw_msgcntmax = RD_MIN(msgcnt, - rkb->rkb_rk->rk_conf. - batch_num_messages); + msetw->msetw_msgcntmax = + RD_MIN(msgcnt, rkb->rkb_rk->rk_conf.batch_num_messages); rd_dassert(msetw->msetw_msgcntmax > 0); /* Select MsgVersion to use */ @@ -533,11 +518,11 @@ static int rd_kafka_msgset_writer_init (rd_kafka_msgset_writer_t *msetw, * is located. * Record the current buffer position so it can be rewound later * in case of compression. */ - msetw->msetw_firstmsg.of = rd_buf_write_pos(&msetw->msetw_rkbuf-> - rkbuf_buf); + msetw->msetw_firstmsg.of = + rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf); - rd_kafka_msgbatch_init(&msetw->msetw_rkbuf->rkbuf_u.Produce.batch, - rktp, pid, epoch_base_msgid); + rd_kafka_msgbatch_init(&msetw->msetw_rkbuf->rkbuf_u.Produce.batch, rktp, + pid, epoch_base_msgid); msetw->msetw_batch = &msetw->msetw_rkbuf->rkbuf_u.Produce.batch; return msetw->msetw_msgcntmax; @@ -549,10 +534,10 @@ static int rd_kafka_msgset_writer_init (rd_kafka_msgset_writer_t *msetw, * @brief Copy or link message payload to buffer. */ static RD_INLINE void -rd_kafka_msgset_writer_write_msg_payload (rd_kafka_msgset_writer_t *msetw, - const rd_kafka_msg_t *rkm, - void (*free_cb)(void *)) { - const rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk; +rd_kafka_msgset_writer_write_msg_payload(rd_kafka_msgset_writer_t *msetw, + const rd_kafka_msg_t *rkm, + void (*free_cb)(void *)) { + const rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk; rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; /* If payload is below the copy limit and there is still @@ -560,8 +545,7 @@ rd_kafka_msgset_writer_write_msg_payload (rd_kafka_msgset_writer_t *msetw, * otherwise we push a reference to the memory. */ if (rkm->rkm_len <= (size_t)rk->rk_conf.msg_copy_max_size && rd_buf_write_remains(&rkbuf->rkbuf_buf) > rkm->rkm_len) { - rd_kafka_buf_write(rkbuf, - rkm->rkm_payload, rkm->rkm_len); + rd_kafka_buf_write(rkbuf, rkm->rkm_payload, rkm->rkm_len); if (free_cb) free_cb(rkm->rkm_payload); } else @@ -577,8 +561,8 @@ rd_kafka_msgset_writer_write_msg_payload (rd_kafka_msgset_writer_t *msetw, * @returns the number of bytes written to msetw->msetw_rkbuf */ static size_t -rd_kafka_msgset_writer_write_msg_headers (rd_kafka_msgset_writer_t *msetw, - const rd_kafka_headers_t *hdrs) { +rd_kafka_msgset_writer_write_msg_headers(rd_kafka_msgset_writer_t *msetw, + const rd_kafka_headers_t *hdrs) { rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; const rd_kafka_header_t *hdr; int i; @@ -587,13 +571,12 @@ rd_kafka_msgset_writer_write_msg_headers (rd_kafka_msgset_writer_t *msetw, RD_LIST_FOREACH(hdr, &hdrs->rkhdrs_list, i) { rd_kafka_buf_write_varint(rkbuf, hdr->rkhdr_name_size); - rd_kafka_buf_write(rkbuf, - hdr->rkhdr_name, hdr->rkhdr_name_size); - rd_kafka_buf_write_varint(rkbuf, - hdr->rkhdr_value ? - (int64_t)hdr->rkhdr_value_size : -1); - rd_kafka_buf_write(rkbuf, - hdr->rkhdr_value, + rd_kafka_buf_write(rkbuf, hdr->rkhdr_name, + hdr->rkhdr_name_size); + rd_kafka_buf_write_varint( + rkbuf, + hdr->rkhdr_value ? (int64_t)hdr->rkhdr_value_size : -1); + rd_kafka_buf_write(rkbuf, hdr->rkhdr_value, hdr->rkhdr_value_size); } @@ -610,11 +593,11 @@ rd_kafka_msgset_writer_write_msg_headers (rd_kafka_msgset_writer_t *msetw, * @returns the number of bytes written. */ static size_t -rd_kafka_msgset_writer_write_msg_v0_1 (rd_kafka_msgset_writer_t *msetw, - rd_kafka_msg_t *rkm, - int64_t Offset, - int8_t MsgAttributes, - void (*free_cb)(void *)) { +rd_kafka_msgset_writer_write_msg_v0_1(rd_kafka_msgset_writer_t *msetw, + rd_kafka_msg_t *rkm, + int64_t Offset, + int8_t MsgAttributes, + void (*free_cb)(void *)) { rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; size_t MessageSize; size_t of_Crc; @@ -627,10 +610,9 @@ rd_kafka_msgset_writer_write_msg_v0_1 (rd_kafka_msgset_writer_t *msetw, rd_kafka_buf_write_i64(rkbuf, Offset); /* MessageSize */ - MessageSize = - 4 + 1 + 1 + /* Crc+MagicByte+Attributes */ - 4 /* KeyLength */ + rkm->rkm_key_len + - 4 /* ValueLength */ + rkm->rkm_len; + MessageSize = 4 + 1 + 1 + /* Crc+MagicByte+Attributes */ + 4 /* KeyLength */ + rkm->rkm_key_len + + 4 /* ValueLength */ + rkm->rkm_len; if (msetw->msetw_MsgVersion == 1) MessageSize += 8; /* Timestamp i64 */ @@ -672,7 +654,7 @@ rd_kafka_msgset_writer_write_msg_v0_1 (rd_kafka_msgset_writer_t *msetw, /* Return written message size */ - return 8/*Offset*/ + 4/*MessageSize*/ + MessageSize; + return 8 /*Offset*/ + 4 /*MessageSize*/ + MessageSize; } /** @@ -680,13 +662,13 @@ rd_kafka_msgset_writer_write_msg_v0_1 (rd_kafka_msgset_writer_t *msetw, * @returns the number of bytes written. */ static size_t -rd_kafka_msgset_writer_write_msg_v2 (rd_kafka_msgset_writer_t *msetw, - rd_kafka_msg_t *rkm, - int64_t Offset, - int8_t MsgAttributes, - void (*free_cb)(void *)) { +rd_kafka_msgset_writer_write_msg_v2(rd_kafka_msgset_writer_t *msetw, + rd_kafka_msg_t *rkm, + int64_t Offset, + int8_t MsgAttributes, + void (*free_cb)(void *)) { rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; - size_t MessageSize = 0; + size_t MessageSize = 0; char varint_Length[RD_UVARINT_ENC_SIZEOF(int32_t)]; char varint_TimestampDelta[RD_UVARINT_ENC_SIZEOF(int64_t)]; char varint_OffsetDelta[RD_UVARINT_ENC_SIZEOF(int64_t)]; @@ -699,7 +681,7 @@ rd_kafka_msgset_writer_write_msg_v2 (rd_kafka_msgset_writer_t *msetw, size_t sz_KeyLen; size_t sz_ValueLen; size_t sz_HeaderCount; - int HeaderCount = 0; + int HeaderCount = 0; size_t HeaderSize = 0; if (rkm->rkm_headers) { @@ -712,34 +694,27 @@ rd_kafka_msgset_writer_write_msg_v2 (rd_kafka_msgset_writer_t *msetw, * correct varint encoded width. */ sz_TimestampDelta = rd_uvarint_enc_i64( - varint_TimestampDelta, sizeof(varint_TimestampDelta), - rkm->rkm_timestamp - msetw->msetw_firstmsg.timestamp); - sz_OffsetDelta = rd_uvarint_enc_i64( - varint_OffsetDelta, sizeof(varint_OffsetDelta), Offset); - sz_KeyLen = rd_uvarint_enc_i32( - varint_KeyLen, sizeof(varint_KeyLen), - rkm->rkm_key ? (int32_t)rkm->rkm_key_len : - (int32_t)RD_KAFKAP_BYTES_LEN_NULL); + varint_TimestampDelta, sizeof(varint_TimestampDelta), + rkm->rkm_timestamp - msetw->msetw_firstmsg.timestamp); + sz_OffsetDelta = rd_uvarint_enc_i64(varint_OffsetDelta, + sizeof(varint_OffsetDelta), Offset); + sz_KeyLen = rd_uvarint_enc_i32(varint_KeyLen, sizeof(varint_KeyLen), + rkm->rkm_key + ? (int32_t)rkm->rkm_key_len + : (int32_t)RD_KAFKAP_BYTES_LEN_NULL); sz_ValueLen = rd_uvarint_enc_i32( - varint_ValueLen, sizeof(varint_ValueLen), - rkm->rkm_payload ? (int32_t)rkm->rkm_len : - (int32_t)RD_KAFKAP_BYTES_LEN_NULL); - sz_HeaderCount = rd_uvarint_enc_i32( - varint_HeaderCount, sizeof(varint_HeaderCount), - (int32_t)HeaderCount); + varint_ValueLen, sizeof(varint_ValueLen), + rkm->rkm_payload ? (int32_t)rkm->rkm_len + : (int32_t)RD_KAFKAP_BYTES_LEN_NULL); + sz_HeaderCount = + rd_uvarint_enc_i32(varint_HeaderCount, sizeof(varint_HeaderCount), + (int32_t)HeaderCount); /* Calculate MessageSize without length of Length (added later) * to store it in Length. */ - MessageSize = - 1 /* MsgAttributes */ + - sz_TimestampDelta + - sz_OffsetDelta + - sz_KeyLen + - rkm->rkm_key_len + - sz_ValueLen + - rkm->rkm_len + - sz_HeaderCount + - HeaderSize; + MessageSize = 1 /* MsgAttributes */ + sz_TimestampDelta + + sz_OffsetDelta + sz_KeyLen + rkm->rkm_key_len + + sz_ValueLen + rkm->rkm_len + sz_HeaderCount + HeaderSize; /* Length */ sz_Length = rd_uvarint_enc_i64(varint_Length, sizeof(varint_Length), @@ -788,19 +763,17 @@ rd_kafka_msgset_writer_write_msg_v2 (rd_kafka_msgset_writer_t *msetw, * @brief Write message to messageset buffer. * @returns the number of bytes written. */ -static size_t -rd_kafka_msgset_writer_write_msg (rd_kafka_msgset_writer_t *msetw, - rd_kafka_msg_t *rkm, - int64_t Offset, int8_t MsgAttributes, - void (*free_cb)(void *)) { +static size_t rd_kafka_msgset_writer_write_msg(rd_kafka_msgset_writer_t *msetw, + rd_kafka_msg_t *rkm, + int64_t Offset, + int8_t MsgAttributes, + void (*free_cb)(void *)) { size_t outlen; - size_t (*writer[]) (rd_kafka_msgset_writer_t *, - rd_kafka_msg_t *, int64_t, int8_t, - void (*)(void *)) = { - [0] = rd_kafka_msgset_writer_write_msg_v0_1, - [1] = rd_kafka_msgset_writer_write_msg_v0_1, - [2] = rd_kafka_msgset_writer_write_msg_v2 - }; + size_t (*writer[])(rd_kafka_msgset_writer_t *, rd_kafka_msg_t *, + int64_t, int8_t, void (*)(void *)) = { + [0] = rd_kafka_msgset_writer_write_msg_v0_1, + [1] = rd_kafka_msgset_writer_write_msg_v0_1, + [2] = rd_kafka_msgset_writer_write_msg_v2}; size_t actual_written; size_t pre_pos; @@ -809,18 +782,16 @@ rd_kafka_msgset_writer_write_msg (rd_kafka_msgset_writer_t *msetw, pre_pos = rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf); - outlen = writer[msetw->msetw_MsgVersion](msetw, rkm, - Offset, MsgAttributes, - free_cb); + outlen = writer[msetw->msetw_MsgVersion](msetw, rkm, Offset, + MsgAttributes, free_cb); - actual_written = rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) - - pre_pos; + actual_written = + rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) - pre_pos; rd_assert(outlen <= - rd_kafka_msg_wire_size(rkm, msetw->msetw_MsgVersion)); + rd_kafka_msg_wire_size(rkm, msetw->msetw_MsgVersion)); rd_assert(outlen == actual_written); return outlen; - } /** @@ -831,26 +802,24 @@ rd_kafka_msgset_writer_write_msg (rd_kafka_msgset_writer_t *msetw, * * @returns 1 on success or 0 on error. */ -static int -rd_kafka_msgset_writer_write_msgq (rd_kafka_msgset_writer_t *msetw, - rd_kafka_msgq_t *rkmq) { +static int rd_kafka_msgset_writer_write_msgq(rd_kafka_msgset_writer_t *msetw, + rd_kafka_msgq_t *rkmq) { rd_kafka_toppar_t *rktp = msetw->msetw_rktp; - rd_kafka_broker_t *rkb = msetw->msetw_rkb; - size_t len = rd_buf_len(&msetw->msetw_rkbuf->rkbuf_buf); - size_t max_msg_size = RD_MIN((size_t)msetw->msetw_rkb->rkb_rk-> - rk_conf.max_msg_size, - (size_t)msetw->msetw_rkb->rkb_rk-> - rk_conf.batch_size); + rd_kafka_broker_t *rkb = msetw->msetw_rkb; + size_t len = rd_buf_len(&msetw->msetw_rkbuf->rkbuf_buf); + size_t max_msg_size = + RD_MIN((size_t)msetw->msetw_rkb->rkb_rk->rk_conf.max_msg_size, + (size_t)msetw->msetw_rkb->rkb_rk->rk_conf.batch_size); rd_ts_t int_latency_base; rd_ts_t MaxTimestamp = 0; rd_kafka_msg_t *rkm; - int msgcnt = 0; + int msgcnt = 0; const rd_ts_t now = rd_clock(); /* Internal latency calculation base. * Uses rkm_ts_timeout which is enqueue time + timeout */ - int_latency_base = now + - ((rd_ts_t) rktp->rktp_rkt->rkt_conf.message_timeout_ms * 1000); + int_latency_base = + now + ((rd_ts_t)rktp->rktp_rkt->rkt_conf.message_timeout_ms * 1000); /* Acquire BaseTimestamp from first message. */ rkm = TAILQ_FIRST(&rkmq->rkmq_msgs); @@ -866,15 +835,16 @@ rd_kafka_msgset_writer_write_msgq (rd_kafka_msgset_writer_t *msetw, do { if (unlikely(msetw->msetw_batch->last_msgid && msetw->msetw_batch->last_msgid < - rkm->rkm_u.producer.msgid)) { + rkm->rkm_u.producer.msgid)) { rd_rkb_dbg(rkb, MSG, "PRODUCE", - "%.*s [%"PRId32"]: " + "%.*s [%" PRId32 + "]: " "Reconstructed MessageSet " - "(%d message(s), %"PRIusz" bytes, " - "MsgIds %"PRIu64"..%"PRIu64")", + "(%d message(s), %" PRIusz + " bytes, " + "MsgIds %" PRIu64 "..%" PRIu64 ")", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - msgcnt, len, + rktp->rktp_partition, msgcnt, len, msetw->msetw_batch->first_msgid, msetw->msetw_batch->last_msgid); break; @@ -888,18 +858,18 @@ rd_kafka_msgset_writer_write_msgq (rd_kafka_msgset_writer_t *msetw, * overshoot the message.max.bytes limit by one message to * avoid getting stuck here. * The actual messageset size is enforced by the broker. */ - if (unlikely(msgcnt == msetw->msetw_msgcntmax || - (msgcnt > 0 && - len + rd_kafka_msg_wire_size(rkm, msetw-> - msetw_MsgVersion) > - max_msg_size))) { + if (unlikely( + msgcnt == msetw->msetw_msgcntmax || + (msgcnt > 0 && len + rd_kafka_msg_wire_size( + rkm, msetw->msetw_MsgVersion) > + max_msg_size))) { rd_rkb_dbg(rkb, MSG, "PRODUCE", - "%.*s [%"PRId32"]: " + "%.*s [%" PRId32 + "]: " "No more space in current MessageSet " - "(%i message(s), %"PRIusz" bytes)", + "(%i message(s), %" PRIusz " bytes)", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - msgcnt, len); + rktp->rktp_partition, msgcnt, len); break; } @@ -951,17 +921,17 @@ rd_kafka_msgset_writer_write_msgq (rd_kafka_msgset_writer_t *msetw, if (unlikely(lastmsg->rkm_u.producer.msgid != msetw->msetw_batch->last_msgid)) { rd_kafka_set_fatal_error( - rkb->rkb_rk, - RD_KAFKA_RESP_ERR__INCONSISTENT, - "Unable to reconstruct MessageSet " - "(currently with %d message(s)) " - "with msgid range %"PRIu64"..%"PRIu64": " - "last message added has msgid %"PRIu64": " - "unable to guarantee consistency", - msgcnt, - msetw->msetw_batch->first_msgid, - msetw->msetw_batch->last_msgid, - lastmsg->rkm_u.producer.msgid); + rkb->rkb_rk, RD_KAFKA_RESP_ERR__INCONSISTENT, + "Unable to reconstruct MessageSet " + "(currently with %d message(s)) " + "with msgid range %" PRIu64 "..%" PRIu64 + ": " + "last message added has msgid %" PRIu64 + ": " + "unable to guarantee consistency", + msgcnt, msetw->msetw_batch->first_msgid, + msetw->msetw_batch->last_msgid, + lastmsg->rkm_u.producer.msgid); return 0; } } @@ -973,12 +943,11 @@ rd_kafka_msgset_writer_write_msgq (rd_kafka_msgset_writer_t *msetw, /** * @brief Compress messageset using gzip/zlib */ -static int -rd_kafka_msgset_writer_compress_gzip (rd_kafka_msgset_writer_t *msetw, - rd_slice_t *slice, - struct iovec *ciov) { +static int rd_kafka_msgset_writer_compress_gzip(rd_kafka_msgset_writer_t *msetw, + rd_slice_t *slice, + struct iovec *ciov) { - rd_kafka_broker_t *rkb = msetw->msetw_rkb; + rd_kafka_broker_t *rkb = msetw->msetw_rkb; rd_kafka_toppar_t *rktp = msetw->msetw_rktp; z_stream strm; size_t len = rd_slice_remains(slice); @@ -986,46 +955,47 @@ rd_kafka_msgset_writer_compress_gzip (rd_kafka_msgset_writer_t *msetw, size_t rlen; int r; int comp_level = - msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; + msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; memset(&strm, 0, sizeof(strm)); - r = deflateInit2(&strm, comp_level, - Z_DEFLATED, 15+16, - 8, Z_DEFAULT_STRATEGY); + r = deflateInit2(&strm, comp_level, Z_DEFLATED, 15 + 16, 8, + Z_DEFAULT_STRATEGY); if (r != Z_OK) { rd_rkb_log(rkb, LOG_ERR, "GZIP", "Failed to initialize gzip for " - "compressing %"PRIusz" bytes in " - "topic %.*s [%"PRId32"]: %s (%i): " + "compressing %" PRIusz + " bytes in " + "topic %.*s [%" PRId32 + "]: %s (%i): " "sending uncompressed", - len, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - strm.msg ? strm.msg : "", r); + len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, strm.msg ? strm.msg : "", r); return -1; } /* Calculate maximum compressed size and * allocate an output buffer accordingly, being * prefixed with the Message header. */ - ciov->iov_len = deflateBound(&strm, (uLong)rd_slice_remains(slice)); + ciov->iov_len = deflateBound(&strm, (uLong)rd_slice_remains(slice)); ciov->iov_base = rd_malloc(ciov->iov_len); strm.next_out = (void *)ciov->iov_base; - strm.avail_out = (uInt)ciov->iov_len; + strm.avail_out = (uInt)ciov->iov_len; /* Iterate through each segment and compress it. */ while ((rlen = rd_slice_reader(slice, &p))) { strm.next_in = (void *)p; - strm.avail_in = (uInt)rlen; + strm.avail_in = (uInt)rlen; /* Compress message */ if ((r = deflate(&strm, Z_NO_FLUSH) != Z_OK)) { rd_rkb_log(rkb, LOG_ERR, "GZIP", "Failed to gzip-compress " - "%"PRIusz" bytes (%"PRIusz" total) for " - "topic %.*s [%"PRId32"]: " + "%" PRIusz " bytes (%" PRIusz + " total) for " + "topic %.*s [%" PRId32 + "]: " "%s (%i): " "sending uncompressed", rlen, len, @@ -1044,14 +1014,14 @@ rd_kafka_msgset_writer_compress_gzip (rd_kafka_msgset_writer_t *msetw, if ((r = deflate(&strm, Z_FINISH)) != Z_STREAM_END) { rd_rkb_log(rkb, LOG_ERR, "GZIP", "Failed to finish gzip compression " - " of %"PRIusz" bytes for " - "topic %.*s [%"PRId32"]: " + " of %" PRIusz + " bytes for " + "topic %.*s [%" PRId32 + "]: " "%s (%i): " "sending uncompressed", - len, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - strm.msg ? strm.msg : "", r); + len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, strm.msg ? strm.msg : "", r); deflateEnd(&strm); rd_free(ciov->iov_base); return -1; @@ -1072,9 +1042,10 @@ rd_kafka_msgset_writer_compress_gzip (rd_kafka_msgset_writer_t *msetw, * @brief Compress messageset using Snappy */ static int -rd_kafka_msgset_writer_compress_snappy (rd_kafka_msgset_writer_t *msetw, - rd_slice_t *slice, struct iovec *ciov) { - rd_kafka_broker_t *rkb = msetw->msetw_rkb; +rd_kafka_msgset_writer_compress_snappy(rd_kafka_msgset_writer_t *msetw, + rd_slice_t *slice, + struct iovec *ciov) { + rd_kafka_broker_t *rkb = msetw->msetw_rkb; rd_kafka_toppar_t *rktp = msetw->msetw_rktp; struct iovec *iov; size_t iov_max, iov_cnt; @@ -1083,15 +1054,15 @@ rd_kafka_msgset_writer_compress_snappy (rd_kafka_msgset_writer_t *msetw, int r; /* Initialize snappy compression environment */ - rd_kafka_snappy_init_env_sg(&senv, 1/*iov enable*/); + rd_kafka_snappy_init_env_sg(&senv, 1 /*iov enable*/); /* Calculate maximum compressed size and * allocate an output buffer accordingly. */ - ciov->iov_len = rd_kafka_snappy_max_compressed_length(len); + ciov->iov_len = rd_kafka_snappy_max_compressed_length(len); ciov->iov_base = rd_malloc(ciov->iov_len); iov_max = slice->buf->rbuf_segment_cnt; - iov = rd_alloca(sizeof(*iov) * iov_max); + iov = rd_alloca(sizeof(*iov) * iov_max); rd_slice_get_iov(slice, iov, &iov_cnt, iov_max, len); @@ -1100,12 +1071,13 @@ rd_kafka_msgset_writer_compress_snappy (rd_kafka_msgset_writer_t *msetw, ciov)) != 0) { rd_rkb_log(rkb, LOG_ERR, "SNAPPY", "Failed to snappy-compress " - "%"PRIusz" bytes for " - "topic %.*s [%"PRId32"]: %s: " + "%" PRIusz + " bytes for " + "topic %.*s [%" PRId32 + "]: %s: " "sending uncompressed", len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_strerror(-r)); + rktp->rktp_partition, rd_strerror(-r)); rd_free(ciov->iov_base); return -1; } @@ -1120,17 +1092,17 @@ rd_kafka_msgset_writer_compress_snappy (rd_kafka_msgset_writer_t *msetw, /** * @brief Compress messageset using LZ4F */ -static int -rd_kafka_msgset_writer_compress_lz4 (rd_kafka_msgset_writer_t *msetw, - rd_slice_t *slice, struct iovec *ciov) { +static int rd_kafka_msgset_writer_compress_lz4(rd_kafka_msgset_writer_t *msetw, + rd_slice_t *slice, + struct iovec *ciov) { rd_kafka_resp_err_t err; int comp_level = - msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; + msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; err = rd_kafka_lz4_compress(msetw->msetw_rkb, /* Correct or incorrect HC */ msetw->msetw_MsgVersion >= 1 ? 1 : 0, - comp_level, - slice, &ciov->iov_base, &ciov->iov_len); + comp_level, slice, &ciov->iov_base, + &ciov->iov_len); return (err ? -1 : 0); } @@ -1138,15 +1110,14 @@ rd_kafka_msgset_writer_compress_lz4 (rd_kafka_msgset_writer_t *msetw, /** * @brief Compress messageset using ZSTD */ -static int -rd_kafka_msgset_writer_compress_zstd (rd_kafka_msgset_writer_t *msetw, - rd_slice_t *slice, struct iovec *ciov) { +static int rd_kafka_msgset_writer_compress_zstd(rd_kafka_msgset_writer_t *msetw, + rd_slice_t *slice, + struct iovec *ciov) { rd_kafka_resp_err_t err; int comp_level = - msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; - err = rd_kafka_zstd_compress(msetw->msetw_rkb, - comp_level, - slice, &ciov->iov_base, &ciov->iov_len); + msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; + err = rd_kafka_zstd_compress(msetw->msetw_rkb, comp_level, slice, + &ciov->iov_base, &ciov->iov_len); return (err ? -1 : 0); } #endif @@ -1159,14 +1130,13 @@ rd_kafka_msgset_writer_compress_zstd (rd_kafka_msgset_writer_t *msetw, * @remark Compression failures are not critical, we'll just send the * the messageset uncompressed. */ -static int -rd_kafka_msgset_writer_compress (rd_kafka_msgset_writer_t *msetw, - size_t *outlenp) { +static int rd_kafka_msgset_writer_compress(rd_kafka_msgset_writer_t *msetw, + size_t *outlenp) { rd_buf_t *rbuf = &msetw->msetw_rkbuf->rkbuf_buf; rd_slice_t slice; - size_t len = *outlenp; + size_t len = *outlenp; struct iovec ciov = RD_ZERO_INIT; /* Compressed output buffer */ - int r = -1; + int r = -1; size_t outlen; rd_assert(rd_buf_len(rbuf) >= msetw->msetw_firstmsg.of + len); @@ -1175,8 +1145,7 @@ rd_kafka_msgset_writer_compress (rd_kafka_msgset_writer_t *msetw, r = rd_slice_init(&slice, rbuf, msetw->msetw_firstmsg.of, len); rd_assert(r == 0 || !*"invalid firstmsg position"); - switch (msetw->msetw_compression) - { + switch (msetw->msetw_compression) { #if WITH_ZLIB case RD_KAFKA_COMPRESSION_GZIP: r = rd_kafka_msgset_writer_compress_gzip(msetw, &slice, &ciov); @@ -1238,15 +1207,13 @@ rd_kafka_msgset_writer_compress (rd_kafka_msgset_writer_t *msetw, } else { /* Older MessageSets envelope/wrap the compressed MessageSet * in an outer Message. */ - rd_kafka_msg_t rkm = { - .rkm_len = ciov.iov_len, - .rkm_payload = ciov.iov_base, - .rkm_timestamp = msetw->msetw_firstmsg.timestamp - }; - outlen = rd_kafka_msgset_writer_write_msg( - msetw, &rkm, 0, - msetw->msetw_compression, - rd_free/*free for ciov.iov_base*/); + rd_kafka_msg_t rkm = {.rkm_len = ciov.iov_len, + .rkm_payload = ciov.iov_base, + .rkm_timestamp = + msetw->msetw_firstmsg.timestamp}; + outlen = rd_kafka_msgset_writer_write_msg( + msetw, &rkm, 0, msetw->msetw_compression, + rd_free /*free for ciov.iov_base*/); } *outlenp = outlen; @@ -1256,23 +1223,22 @@ rd_kafka_msgset_writer_compress (rd_kafka_msgset_writer_t *msetw, - /** * @brief Calculate MessageSet v2 CRC (CRC32C) when messageset is complete. */ static void -rd_kafka_msgset_writer_calc_crc_v2 (rd_kafka_msgset_writer_t *msetw) { +rd_kafka_msgset_writer_calc_crc_v2(rd_kafka_msgset_writer_t *msetw) { int32_t crc; rd_slice_t slice; int r; r = rd_slice_init(&slice, &msetw->msetw_rkbuf->rkbuf_buf, - msetw->msetw_of_CRC+4, + msetw->msetw_of_CRC + 4, rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) - - msetw->msetw_of_CRC-4); - rd_assert(!r && *"slice_init failed"); + msetw->msetw_of_CRC - 4); + rd_assert(!r && *"slice_init failed"); - /* CRC32C calculation */ + /* CRC32C calculation */ crc = rd_slice_crc32c(&slice); /* Update CRC at MessageSet v2 CRC offset */ @@ -1282,77 +1248,76 @@ rd_kafka_msgset_writer_calc_crc_v2 (rd_kafka_msgset_writer_t *msetw) { /** * @brief Finalize MessageSet v2 header fields. */ -static void -rd_kafka_msgset_writer_finalize_MessageSet_v2_header ( - rd_kafka_msgset_writer_t *msetw) { +static void rd_kafka_msgset_writer_finalize_MessageSet_v2_header( + rd_kafka_msgset_writer_t *msetw) { rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; - int msgcnt = rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq); + int msgcnt = rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq); rd_kafka_assert(NULL, msgcnt > 0); rd_kafka_assert(NULL, msetw->msetw_ApiVersion >= 3); - msetw->msetw_MessageSetSize = RD_KAFKAP_MSGSET_V2_SIZE + - msetw->msetw_messages_len; + msetw->msetw_MessageSetSize = + RD_KAFKAP_MSGSET_V2_SIZE + msetw->msetw_messages_len; /* MessageSet.Length is the same as * MessageSetSize minus field widths for FirstOffset+Length */ - rd_kafka_buf_update_i32(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_Length, - (int32_t)msetw->msetw_MessageSetSize - (8+4)); + rd_kafka_buf_update_i32( + rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_Length, + (int32_t)msetw->msetw_MessageSetSize - (8 + 4)); msetw->msetw_Attributes |= RD_KAFKA_MSG_ATTR_CREATE_TIME; if (rd_kafka_is_transactional(msetw->msetw_rkb->rkb_rk)) msetw->msetw_Attributes |= - RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL; + RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL; - rd_kafka_buf_update_i16(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_Attributes, - msetw->msetw_Attributes); + rd_kafka_buf_update_i16( + rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_Attributes, + msetw->msetw_Attributes); - rd_kafka_buf_update_i32(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta, - msgcnt-1); + rd_kafka_buf_update_i32(rkbuf, + msetw->msetw_of_start + + RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta, + msgcnt - 1); - rd_kafka_buf_update_i64(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp, - msetw->msetw_firstmsg.timestamp); + rd_kafka_buf_update_i64( + rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp, + msetw->msetw_firstmsg.timestamp); - rd_kafka_buf_update_i64(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp, - msetw->msetw_MaxTimestamp); + rd_kafka_buf_update_i64( + rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp, + msetw->msetw_MaxTimestamp); - rd_kafka_buf_update_i32(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_BaseSequence, - msetw->msetw_batch->first_seq); + rd_kafka_buf_update_i32( + rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_BaseSequence, + msetw->msetw_batch->first_seq); - rd_kafka_buf_update_i32(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_RecordCount, msgcnt); + rd_kafka_buf_update_i32( + rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_RecordCount, + msgcnt); rd_kafka_msgset_writer_calc_crc_v2(msetw); } - /** * @brief Finalize the MessageSet header, if applicable. */ static void -rd_kafka_msgset_writer_finalize_MessageSet (rd_kafka_msgset_writer_t *msetw) { +rd_kafka_msgset_writer_finalize_MessageSet(rd_kafka_msgset_writer_t *msetw) { rd_dassert(msetw->msetw_messages_len > 0); if (msetw->msetw_MsgVersion == 2) rd_kafka_msgset_writer_finalize_MessageSet_v2_header(msetw); else - msetw->msetw_MessageSetSize = RD_KAFKAP_MSGSET_V0_SIZE + - msetw->msetw_messages_len; + msetw->msetw_MessageSetSize = + RD_KAFKAP_MSGSET_V0_SIZE + msetw->msetw_messages_len; /* Update MessageSetSize */ rd_kafka_buf_update_i32(msetw->msetw_rkbuf, msetw->msetw_of_MessageSetSize, (int32_t)msetw->msetw_MessageSetSize); - } @@ -1371,28 +1336,29 @@ rd_kafka_msgset_writer_finalize_MessageSet (rd_kafka_msgset_writer_t *msetw) { * in messageset. */ static rd_kafka_buf_t * -rd_kafka_msgset_writer_finalize (rd_kafka_msgset_writer_t *msetw, - size_t *MessageSetSizep) { - rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; +rd_kafka_msgset_writer_finalize(rd_kafka_msgset_writer_t *msetw, + size_t *MessageSetSizep) { + rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; rd_kafka_toppar_t *rktp = msetw->msetw_rktp; size_t len; int cnt; /* No messages added, bail out early. */ - if (unlikely((cnt = - rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq)) == 0)) { + if (unlikely((cnt = rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq)) == + 0)) { rd_kafka_buf_destroy(rkbuf); return NULL; } /* Total size of messages */ len = rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) - - msetw->msetw_firstmsg.of; + msetw->msetw_firstmsg.of; rd_assert(len > 0); rd_assert(len <= (size_t)rktp->rktp_rkt->rkt_rk->rk_conf.max_msg_size); rd_atomic64_add(&rktp->rktp_c.tx_msgs, cnt); - rd_atomic64_add(&rktp->rktp_c.tx_msg_bytes, msetw->msetw_messages_kvlen); + rd_atomic64_add(&rktp->rktp_c.tx_msg_bytes, + msetw->msetw_messages_kvlen); /* Idempotent Producer: * Store request's PID for matching on response @@ -1415,19 +1381,21 @@ rd_kafka_msgset_writer_finalize (rd_kafka_msgset_writer_t *msetw, *MessageSetSizep = msetw->msetw_MessageSetSize; rd_rkb_dbg(msetw->msetw_rkb, MSG, "PRODUCE", - "%s [%"PRId32"]: " - "Produce MessageSet with %i message(s) (%"PRIusz" bytes, " - "ApiVersion %d, MsgVersion %d, MsgId %"PRIu64", " - "BaseSeq %"PRId32", %s, %s)", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - cnt, msetw->msetw_MessageSetSize, - msetw->msetw_ApiVersion, msetw->msetw_MsgVersion, - msetw->msetw_batch->first_msgid, + "%s [%" PRId32 + "]: " + "Produce MessageSet with %i message(s) (%" PRIusz + " bytes, " + "ApiVersion %d, MsgVersion %d, MsgId %" PRIu64 + ", " + "BaseSeq %" PRId32 ", %s, %s)", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, cnt, + msetw->msetw_MessageSetSize, msetw->msetw_ApiVersion, + msetw->msetw_MsgVersion, msetw->msetw_batch->first_msgid, msetw->msetw_batch->first_seq, rd_kafka_pid2str(msetw->msetw_pid), - msetw->msetw_compression ? - rd_kafka_compression2str(msetw->msetw_compression) : - "uncompressed"); + msetw->msetw_compression + ? rd_kafka_compression2str(msetw->msetw_compression) + : "uncompressed"); rd_kafka_msgq_verify_order(rktp, &msetw->msetw_batch->msgq, msetw->msetw_batch->first_msgid, rd_false); @@ -1452,26 +1420,25 @@ rd_kafka_msgset_writer_finalize (rd_kafka_msgset_writer_t *msetw, * * @locality broker thread */ -rd_kafka_buf_t * -rd_kafka_msgset_create_ProduceRequest (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq, - const rd_kafka_pid_t pid, - uint64_t epoch_base_msgid, - size_t *MessageSetSizep) { +rd_kafka_buf_t *rd_kafka_msgset_create_ProduceRequest(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq, + const rd_kafka_pid_t pid, + uint64_t epoch_base_msgid, + size_t *MessageSetSizep) { rd_kafka_msgset_writer_t msetw; - if (rd_kafka_msgset_writer_init(&msetw, rkb, rktp, rkmq, - pid, epoch_base_msgid) <= 0) + if (rd_kafka_msgset_writer_init(&msetw, rkb, rktp, rkmq, pid, + epoch_base_msgid) <= 0) return NULL; if (!rd_kafka_msgset_writer_write_msgq(&msetw, msetw.msetw_msgq)) { /* Error while writing messages to MessageSet, * move all messages back on the xmit queue. */ rd_kafka_msgq_insert_msgq( - rkmq, &msetw.msetw_batch->msgq, - rktp->rktp_rkt->rkt_conf.msg_order_cmp); + rkmq, &msetw.msetw_batch->msgq, + rktp->rktp_rkt->rkt_conf.msg_order_cmp); } return rd_kafka_msgset_writer_finalize(&msetw, MessageSetSizep); diff --git a/src/rdkafka_offset.c b/src/rdkafka_offset.c index 0e895d16ff..14f2d4441f 100644 --- a/src/rdkafka_offset.c +++ b/src/rdkafka_offset.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012,2013 Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -68,14 +68,14 @@ /** * Convert an absolute or logical offset to string. */ -const char *rd_kafka_offset2str (int64_t offset) { +const char *rd_kafka_offset2str(int64_t offset) { static RD_TLS char ret[16][32]; static RD_TLS int i = 0; i = (i + 1) % 16; if (offset >= 0) - rd_snprintf(ret[i], sizeof(ret[i]), "%"PRId64, offset); + rd_snprintf(ret[i], sizeof(ret[i]), "%" PRId64, offset); else if (offset == RD_KAFKA_OFFSET_BEGINNING) return "BEGINNING"; else if (offset == RD_KAFKA_OFFSET_END) @@ -86,19 +86,19 @@ const char *rd_kafka_offset2str (int64_t offset) { return "INVALID"; else if (offset <= RD_KAFKA_OFFSET_TAIL_BASE) rd_snprintf(ret[i], sizeof(ret[i]), "TAIL(%lld)", - llabs(offset - RD_KAFKA_OFFSET_TAIL_BASE)); + llabs(offset - RD_KAFKA_OFFSET_TAIL_BASE)); else - rd_snprintf(ret[i], sizeof(ret[i]), "%"PRId64"?", offset); + rd_snprintf(ret[i], sizeof(ret[i]), "%" PRId64 "?", offset); return ret[i]; } -static void rd_kafka_offset_file_close (rd_kafka_toppar_t *rktp) { - if (!rktp->rktp_offset_fp) - return; +static void rd_kafka_offset_file_close(rd_kafka_toppar_t *rktp) { + if (!rktp->rktp_offset_fp) + return; - fclose(rktp->rktp_offset_fp); - rktp->rktp_offset_fp = NULL; + fclose(rktp->rktp_offset_fp); + rktp->rktp_offset_fp = NULL; } @@ -106,10 +106,12 @@ static void rd_kafka_offset_file_close (rd_kafka_toppar_t *rktp) { /** * Linux version of open callback providing racefree CLOEXEC. */ -int rd_kafka_open_cb_linux (const char *pathname, int flags, mode_t mode, - void *opaque) { +int rd_kafka_open_cb_linux(const char *pathname, + int flags, + mode_t mode, + void *opaque) { #ifdef O_CLOEXEC - return open(pathname, flags|O_CLOEXEC, mode); + return open(pathname, flags | O_CLOEXEC, mode); #else return rd_kafka_open_cb_generic(pathname, flags, mode, opaque); #endif @@ -120,12 +122,14 @@ int rd_kafka_open_cb_linux (const char *pathname, int flags, mode_t mode, * Fallback version of open_cb NOT providing racefree CLOEXEC, * but setting CLOEXEC after file open (if FD_CLOEXEC is defined). */ -int rd_kafka_open_cb_generic (const char *pathname, int flags, mode_t mode, - void *opaque) { +int rd_kafka_open_cb_generic(const char *pathname, + int flags, + mode_t mode, + void *opaque) { #ifndef _WIN32 - int fd; + int fd; int on = 1; - fd = open(pathname, flags, mode); + fd = open(pathname, flags, mode); if (fd == -1) return -1; #ifdef FD_CLOEXEC @@ -133,121 +137,117 @@ int rd_kafka_open_cb_generic (const char *pathname, int flags, mode_t mode, #endif return fd; #else - int fd; - if (_sopen_s(&fd, pathname, flags, _SH_DENYNO, mode) != 0) - return -1; - return fd; + int fd; + if (_sopen_s(&fd, pathname, flags, _SH_DENYNO, mode) != 0) + return -1; + return fd; #endif } -static int rd_kafka_offset_file_open (rd_kafka_toppar_t *rktp) { +static int rd_kafka_offset_file_open(rd_kafka_toppar_t *rktp) { rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; int fd; #ifndef _WIN32 - mode_t mode = 0644; + mode_t mode = 0644; #else - mode_t mode = _S_IREAD|_S_IWRITE; + mode_t mode = _S_IREAD | _S_IWRITE; #endif - if ((fd = rk->rk_conf.open_cb(rktp->rktp_offset_path, - O_CREAT|O_RDWR, mode, - rk->rk_conf.opaque)) == -1) { - rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, - RD_KAFKA_RESP_ERR__FS, - "%s [%"PRId32"]: " - "Failed to open offset file %s: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_offset_path, rd_strerror(errno)); - return -1; - } - - rktp->rktp_offset_fp = + if ((fd = rk->rk_conf.open_cb(rktp->rktp_offset_path, O_CREAT | O_RDWR, + mode, rk->rk_conf.opaque)) == -1) { + rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, + "%s [%" PRId32 + "]: " + "Failed to open offset file %s: %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, rktp->rktp_offset_path, + rd_strerror(errno)); + return -1; + } + + rktp->rktp_offset_fp = #ifndef _WIN32 - fdopen(fd, "r+"); + fdopen(fd, "r+"); #else - _fdopen(fd, "r+"); + _fdopen(fd, "r+"); #endif - return 0; + return 0; } -static int64_t rd_kafka_offset_file_read (rd_kafka_toppar_t *rktp) { - char buf[22]; - char *end; - int64_t offset; - size_t r; - - if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) { - rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, - RD_KAFKA_RESP_ERR__FS, - "%s [%"PRId32"]: " - "Seek (for read) failed on offset file %s: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_offset_path, - rd_strerror(errno)); - rd_kafka_offset_file_close(rktp); - return RD_KAFKA_OFFSET_INVALID; - } - - r = fread(buf, 1, sizeof(buf) - 1, rktp->rktp_offset_fp); - if (r == 0) { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: offset file (%s) is empty", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_offset_path); - return RD_KAFKA_OFFSET_INVALID; - } - - buf[r] = '\0'; - - offset = strtoull(buf, &end, 10); - if (buf == end) { - rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, - RD_KAFKA_RESP_ERR__FS, - "%s [%"PRId32"]: " - "Unable to parse offset in %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_offset_path); - return RD_KAFKA_OFFSET_INVALID; - } - - - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: Read offset %"PRId64" from offset " - "file (%s)", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - offset, rktp->rktp_offset_path); - - return offset; +static int64_t rd_kafka_offset_file_read(rd_kafka_toppar_t *rktp) { + char buf[22]; + char *end; + int64_t offset; + size_t r; + + if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) { + rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, + "%s [%" PRId32 + "]: " + "Seek (for read) failed on offset file %s: %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, rktp->rktp_offset_path, + rd_strerror(errno)); + rd_kafka_offset_file_close(rktp); + return RD_KAFKA_OFFSET_INVALID; + } + + r = fread(buf, 1, sizeof(buf) - 1, rktp->rktp_offset_fp); + if (r == 0) { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "%s [%" PRId32 "]: offset file (%s) is empty", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, rktp->rktp_offset_path); + return RD_KAFKA_OFFSET_INVALID; + } + + buf[r] = '\0'; + + offset = strtoull(buf, &end, 10); + if (buf == end) { + rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, + "%s [%" PRId32 + "]: " + "Unable to parse offset in %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, rktp->rktp_offset_path); + return RD_KAFKA_OFFSET_INVALID; + } + + + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "%s [%" PRId32 "]: Read offset %" PRId64 + " from offset " + "file (%s)", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + offset, rktp->rktp_offset_path); + + return offset; } /** * Sync/flush offset file. */ -static int rd_kafka_offset_file_sync (rd_kafka_toppar_t *rktp) { +static int rd_kafka_offset_file_sync(rd_kafka_toppar_t *rktp) { if (!rktp->rktp_offset_fp) return 0; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "SYNC", - "%s [%"PRId32"]: offset file sync", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition); + "%s [%" PRId32 "]: offset file sync", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); #ifndef _WIN32 - (void)fflush(rktp->rktp_offset_fp); - (void)fsync(fileno(rktp->rktp_offset_fp)); // FIXME + (void)fflush(rktp->rktp_offset_fp); + (void)fsync(fileno(rktp->rktp_offset_fp)); // FIXME #else - // FIXME - // FlushFileBuffers(_get_osfhandle(fileno(rktp->rktp_offset_fp))); + // FIXME + // FlushFileBuffers(_get_osfhandle(fileno(rktp->rktp_offset_fp))); #endif - return 0; + return 0; } @@ -257,82 +257,83 @@ static int rd_kafka_offset_file_sync (rd_kafka_toppar_t *rktp) { * Locality: toppar's broker thread */ static rd_kafka_resp_err_t -rd_kafka_offset_file_commit (rd_kafka_toppar_t *rktp) { - rd_kafka_topic_t *rkt = rktp->rktp_rkt; - int attempt; +rd_kafka_offset_file_commit(rd_kafka_toppar_t *rktp) { + rd_kafka_topic_t *rkt = rktp->rktp_rkt; + int attempt; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - int64_t offset = rktp->rktp_stored_offset; - - for (attempt = 0 ; attempt < 2 ; attempt++) { - char buf[22]; - int len; - - if (!rktp->rktp_offset_fp) - if (rd_kafka_offset_file_open(rktp) == -1) - continue; - - if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) { - rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, - RD_KAFKA_RESP_ERR__FS, - "%s [%"PRId32"]: " - "Seek failed on offset file %s: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_offset_path, - rd_strerror(errno)); + int64_t offset = rktp->rktp_stored_offset; + + for (attempt = 0; attempt < 2; attempt++) { + char buf[22]; + int len; + + if (!rktp->rktp_offset_fp) + if (rd_kafka_offset_file_open(rktp) == -1) + continue; + + if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) { + rd_kafka_op_err( + rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, + "%s [%" PRId32 + "]: " + "Seek failed on offset file %s: %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, rktp->rktp_offset_path, + rd_strerror(errno)); err = RD_KAFKA_RESP_ERR__FS; - rd_kafka_offset_file_close(rktp); - continue; - } - - len = rd_snprintf(buf, sizeof(buf), "%"PRId64"\n", offset); - - if (fwrite(buf, 1, len, rktp->rktp_offset_fp) < 1) { - rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, - RD_KAFKA_RESP_ERR__FS, - "%s [%"PRId32"]: " - "Failed to write offset %"PRId64" to " - "offset file %s: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - offset, - rktp->rktp_offset_path, - rd_strerror(errno)); + rd_kafka_offset_file_close(rktp); + continue; + } + + len = rd_snprintf(buf, sizeof(buf), "%" PRId64 "\n", offset); + + if (fwrite(buf, 1, len, rktp->rktp_offset_fp) < 1) { + rd_kafka_op_err( + rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, + "%s [%" PRId32 + "]: " + "Failed to write offset %" PRId64 + " to " + "offset file %s: %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, offset, + rktp->rktp_offset_path, rd_strerror(errno)); err = RD_KAFKA_RESP_ERR__FS; - rd_kafka_offset_file_close(rktp); - continue; - } + rd_kafka_offset_file_close(rktp); + continue; + } /* Need to flush before truncate to preserve write ordering */ (void)fflush(rktp->rktp_offset_fp); - /* Truncate file */ + /* Truncate file */ #ifdef _WIN32 - if (_chsize_s(_fileno(rktp->rktp_offset_fp), len) == -1) - ; /* Ignore truncate failures */ + if (_chsize_s(_fileno(rktp->rktp_offset_fp), len) == -1) + ; /* Ignore truncate failures */ #else - if (ftruncate(fileno(rktp->rktp_offset_fp), len) == -1) - ; /* Ignore truncate failures */ + if (ftruncate(fileno(rktp->rktp_offset_fp), len) == -1) + ; /* Ignore truncate failures */ #endif - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: wrote offset %"PRId64" to " - "file %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, offset, - rktp->rktp_offset_path); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "%s [%" PRId32 "]: wrote offset %" PRId64 + " to " + "file %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, offset, + rktp->rktp_offset_path); - rktp->rktp_committed_offset = offset; + rktp->rktp_committed_offset = offset; - /* If sync interval is set to immediate we sync right away. */ - if (rkt->rkt_conf.offset_store_sync_interval_ms == 0) - rd_kafka_offset_file_sync(rktp); + /* If sync interval is set to immediate we sync right away. */ + if (rkt->rkt_conf.offset_store_sync_interval_ms == 0) + rd_kafka_offset_file_sync(rktp); - return RD_KAFKA_RESP_ERR_NO_ERROR; - } + return RD_KAFKA_RESP_ERR_NO_ERROR; + } - return err; + return err; } @@ -344,16 +345,16 @@ rd_kafka_offset_file_commit (rd_kafka_toppar_t *rktp) { * Makes a copy of \p offsets (may be NULL for current assignment) */ static rd_kafka_resp_err_t -rd_kafka_commit0 (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_toppar_t *rktp, - rd_kafka_replyq_t replyq, - void (*cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque), - void *opaque, - const char *reason) { +rd_kafka_commit0(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq, + void (*cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque), + void *opaque, + const char *reason) { rd_kafka_cgrp_t *rkcg; rd_kafka_op_t *rko; @@ -362,15 +363,15 @@ rd_kafka_commit0 (rd_kafka_t *rk, rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT); rko->rko_u.offset_commit.reason = rd_strdup(reason); - rko->rko_replyq = replyq; - rko->rko_u.offset_commit.cb = cb; - rko->rko_u.offset_commit.opaque = opaque; - if (rktp) - rko->rko_rktp = rd_kafka_toppar_keep(rktp); + rko->rko_replyq = replyq; + rko->rko_u.offset_commit.cb = cb; + rko->rko_u.offset_commit.opaque = opaque; + if (rktp) + rko->rko_rktp = rd_kafka_toppar_keep(rktp); if (offsets) - rko->rko_u.offset_commit.partitions = - rd_kafka_topic_partition_list_copy(offsets); + rko->rko_u.offset_commit.partitions = + rd_kafka_topic_partition_list_copy(offsets); rd_kafka_q_enq(rkcg->rkcg_ops, rko); @@ -379,24 +380,24 @@ rd_kafka_commit0 (rd_kafka_t *rk, - /** * NOTE: 'offsets' may be NULL, see official documentation. */ rd_kafka_resp_err_t -rd_kafka_commit (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, int async) { +rd_kafka_commit(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + int async) { rd_kafka_cgrp_t *rkcg; - rd_kafka_resp_err_t err; - rd_kafka_q_t *repq = NULL; - rd_kafka_replyq_t rq = RD_KAFKA_NO_REPLYQ; + rd_kafka_resp_err_t err; + rd_kafka_q_t *repq = NULL; + rd_kafka_replyq_t rq = RD_KAFKA_NO_REPLYQ; if (!(rkcg = rd_kafka_cgrp_get(rk))) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; if (!async) { repq = rd_kafka_q_new(rk); - rq = RD_KAFKA_REPLYQ(repq, 0); + rq = RD_KAFKA_REPLYQ(repq, 0); } err = rd_kafka_commit0(rk, offsets, NULL, rq, NULL, NULL, "manual"); @@ -407,13 +408,13 @@ rd_kafka_commit (rd_kafka_t *rk, if (!async) rd_kafka_q_destroy_owner(repq); - return err; + return err; } -rd_kafka_resp_err_t -rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - int async) { +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + int async) { rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_t *rktpar; rd_kafka_resp_err_t err; @@ -422,10 +423,9 @@ rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, return RD_KAFKA_RESP_ERR__INVALID_ARG; offsets = rd_kafka_topic_partition_list_new(1); - rktpar = rd_kafka_topic_partition_list_add( - offsets, rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition); - rktpar->offset = rkmessage->offset+1; + rktpar = rd_kafka_topic_partition_list_add( + offsets, rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition); + rktpar->offset = rkmessage->offset + 1; err = rd_kafka_commit(rk, offsets, async); @@ -437,41 +437,38 @@ rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, rd_kafka_resp_err_t -rd_kafka_commit_queue (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_queue_t *rkqu, - void (*cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque), - void *opaque) { - rd_kafka_q_t *rkq; - rd_kafka_resp_err_t err; +rd_kafka_commit_queue(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_queue_t *rkqu, + void (*cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque), + void *opaque) { + rd_kafka_q_t *rkq; + rd_kafka_resp_err_t err; if (!rd_kafka_cgrp_get(rk)) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; - if (rkqu) - rkq = rkqu->rkqu_q; - else - rkq = rd_kafka_q_new(rk); - - err = rd_kafka_commit0(rk, offsets, NULL, - RD_KAFKA_REPLYQ(rkq, 0), - cb, opaque, "manual"); - - if (!rkqu) { - rd_kafka_op_t *rko = - rd_kafka_q_pop_serve(rkq, RD_POLL_INFINITE, - 0, RD_KAFKA_Q_CB_FORCE_RETURN, - NULL, NULL); - if (!rko) - err = RD_KAFKA_RESP_ERR__TIMED_OUT; - else { + if (rkqu) + rkq = rkqu->rkqu_q; + else + rkq = rd_kafka_q_new(rk); + + err = rd_kafka_commit0(rk, offsets, NULL, RD_KAFKA_REPLYQ(rkq, 0), cb, + opaque, "manual"); + + if (!rkqu) { + rd_kafka_op_t *rko = rd_kafka_q_pop_serve( + rkq, RD_POLL_INFINITE, 0, RD_KAFKA_Q_CB_FORCE_RETURN, NULL, + NULL); + if (!rko) + err = RD_KAFKA_RESP_ERR__TIMED_OUT; + else { if (cb) cb(rk, rko->rko_err, - rko->rko_u.offset_commit.partitions, - opaque); + rko->rko_u.offset_commit.partitions, opaque); err = rko->rko_err; rd_kafka_op_destroy(rko); } @@ -480,14 +477,13 @@ rd_kafka_commit_queue (rd_kafka_t *rk, rd_kafka_q_destroy(rkq); else rd_kafka_q_destroy_owner(rkq); - } + } - return err; + return err; } - /** * Called when a broker commit is done. * @@ -495,10 +491,10 @@ rd_kafka_commit_queue (rd_kafka_t *rk, * Locks: none */ static void -rd_kafka_offset_broker_commit_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque) { +rd_kafka_offset_broker_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque) { rd_kafka_toppar_t *rktp; rd_kafka_topic_partition_t *rktpar; @@ -510,30 +506,27 @@ rd_kafka_offset_broker_commit_cb (rd_kafka_t *rk, rktpar = &offsets->elems[0]; - if (!(rktp = rd_kafka_topic_partition_get_toppar(rk, rktpar, - rd_false))) { - rd_kafka_dbg(rk, TOPIC, "OFFSETCOMMIT", - "No local partition found for %s [%"PRId32"] " - "while parsing OffsetCommit response " - "(offset %"PRId64", error \"%s\")", - rktpar->topic, - rktpar->partition, - rktpar->offset, - rd_kafka_err2str(rktpar->err)); + if (!(rktp = + rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false))) { + rd_kafka_dbg(rk, TOPIC, "OFFSETCOMMIT", + "No local partition found for %s [%" PRId32 + "] " + "while parsing OffsetCommit response " + "(offset %" PRId64 ", error \"%s\")", + rktpar->topic, rktpar->partition, rktpar->offset, + rd_kafka_err2str(rktpar->err)); return; } if (!err) err = rktpar->err; - rd_kafka_toppar_offset_commit_result(rktp, err, offsets); + rd_kafka_toppar_offset_commit_result(rktp, err, offsets); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: offset %"PRId64" %scommitted: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, rktpar->offset, - err ? "not " : "", - rd_kafka_err2str(err)); + "%s [%" PRId32 "]: offset %" PRId64 " %scommitted: %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rktpar->offset, err ? "not " : "", rd_kafka_err2str(err)); rktp->rktp_committing_offset = 0; @@ -547,7 +540,7 @@ rd_kafka_offset_broker_commit_cb (rd_kafka_t *rk, static rd_kafka_resp_err_t -rd_kafka_offset_broker_commit (rd_kafka_toppar_t *rktp, const char *reason) { +rd_kafka_offset_broker_commit(rd_kafka_toppar_t *rktp, const char *reason) { rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_t *rktpar; @@ -558,20 +551,19 @@ rd_kafka_offset_broker_commit (rd_kafka_toppar_t *rktp, const char *reason) { rktp->rktp_committing_offset = rktp->rktp_stored_offset; offsets = rd_kafka_topic_partition_list_new(1); - rktpar = rd_kafka_topic_partition_list_add( - offsets, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); + rktpar = rd_kafka_topic_partition_list_add( + offsets, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); rktpar->offset = rktp->rktp_committing_offset; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSETCMT", - "%.*s [%"PRId32"]: committing offset %"PRId64": %s", + "%.*s [%" PRId32 "]: committing offset %" PRId64 ": %s", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rktp->rktp_committing_offset, reason); rd_kafka_commit0(rktp->rktp_rkt->rkt_rk, offsets, rktp, - RD_KAFKA_REPLYQ(rktp->rktp_ops, 0), - rd_kafka_offset_broker_commit_cb, NULL, - reason); + RD_KAFKA_REPLYQ(rktp->rktp_ops, 0), + rd_kafka_offset_broker_commit_cb, NULL, reason); rd_kafka_topic_partition_list_destroy(offsets); @@ -580,23 +572,22 @@ rd_kafka_offset_broker_commit (rd_kafka_toppar_t *rktp, const char *reason) { - /** * Commit offset to backing store. * This might be an async operation. * * Locality: toppar handler thread */ -static -rd_kafka_resp_err_t rd_kafka_offset_commit (rd_kafka_toppar_t *rktp, - const char *reason) { +static rd_kafka_resp_err_t rd_kafka_offset_commit(rd_kafka_toppar_t *rktp, + const char *reason) { if (1) // FIXME - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: commit: " - "stored offset %"PRId64" > committed offset %"PRId64"?", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_stored_offset, rktp->rktp_committed_offset); + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "%s [%" PRId32 + "]: commit: " + "stored offset %" PRId64 " > committed offset %" PRId64 "?", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rktp->rktp_stored_offset, rktp->rktp_committed_offset); /* Already committed */ if (rktp->rktp_stored_offset <= rktp->rktp_committed_offset) @@ -606,8 +597,7 @@ rd_kafka_resp_err_t rd_kafka_offset_commit (rd_kafka_toppar_t *rktp, if (rktp->rktp_stored_offset <= rktp->rktp_committing_offset) return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS; - switch (rktp->rktp_rkt->rkt_conf.offset_store_method) - { + switch (rktp->rktp_rkt->rkt_conf.offset_store_method) { case RD_KAFKA_OFFSET_METHOD_FILE: return rd_kafka_offset_file_commit(rktp); case RD_KAFKA_OFFSET_METHOD_BROKER: @@ -620,16 +610,13 @@ rd_kafka_resp_err_t rd_kafka_offset_commit (rd_kafka_toppar_t *rktp, - - /** * Sync offset backing store. This is only used for METHOD_FILE. * * Locality: rktp's broker thread. */ -rd_kafka_resp_err_t rd_kafka_offset_sync (rd_kafka_toppar_t *rktp) { - switch (rktp->rktp_rkt->rkt_conf.offset_store_method) - { +rd_kafka_resp_err_t rd_kafka_offset_sync(rd_kafka_toppar_t *rktp) { + switch (rktp->rktp_rkt->rkt_conf.offset_store_method) { case RD_KAFKA_OFFSET_METHOD_FILE: return rd_kafka_offset_file_sync(rktp); default: @@ -644,96 +631,93 @@ rd_kafka_resp_err_t rd_kafka_offset_sync (rd_kafka_toppar_t *rktp) { * * NOTE: No locks must be held. */ -rd_kafka_resp_err_t rd_kafka_offset_store (rd_kafka_topic_t *app_rkt, - int32_t partition, int64_t offset) { +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *app_rkt, + int32_t partition, + int64_t offset) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); - rd_kafka_toppar_t *rktp; + rd_kafka_toppar_t *rktp; - /* Find toppar */ - rd_kafka_topic_rdlock(rkt); - if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0/*!ua_on_miss*/))) { - rd_kafka_topic_rdunlock(rkt); - return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - } - rd_kafka_topic_rdunlock(rkt); + /* Find toppar */ + rd_kafka_topic_rdlock(rkt); + if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0 /*!ua_on_miss*/))) { + rd_kafka_topic_rdunlock(rkt); + return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + } + rd_kafka_topic_rdunlock(rkt); - rd_kafka_offset_store0(rktp, offset+1, 1/*lock*/); + rd_kafka_offset_store0(rktp, offset + 1, 1 /*lock*/); - rd_kafka_toppar_destroy(rktp); + rd_kafka_toppar_destroy(rktp); - return RD_KAFKA_RESP_ERR_NO_ERROR; + return RD_KAFKA_RESP_ERR_NO_ERROR; } rd_kafka_resp_err_t -rd_kafka_offsets_store (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *offsets) { +rd_kafka_offsets_store(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *offsets) { int i; int ok_cnt = 0; if (rk->rk_conf.enable_auto_offset_store) return RD_KAFKA_RESP_ERR__INVALID_ARG; - for (i = 0 ; i < offsets->cnt ; i++) { + for (i = 0; i < offsets->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &offsets->elems[i]; rd_kafka_toppar_t *rktp; - rktp = rd_kafka_topic_partition_get_toppar(rk, rktpar, - rd_false); + rktp = + rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false); if (!rktp) { rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; continue; } - rd_kafka_offset_store0(rktp, rktpar->offset, 1/*lock*/); + rd_kafka_offset_store0(rktp, rktpar->offset, 1 /*lock*/); rd_kafka_toppar_destroy(rktp); rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR; ok_cnt++; } - return offsets->cnt > 0 && ok_cnt == 0 ? - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION : - RD_KAFKA_RESP_ERR_NO_ERROR; + return offsets->cnt > 0 && ok_cnt == 0 + ? RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION + : RD_KAFKA_RESP_ERR_NO_ERROR; } - - - /** * Decommissions the use of an offset file for a toppar. * The file content will not be touched and the file will not be removed. */ -static rd_kafka_resp_err_t rd_kafka_offset_file_term (rd_kafka_toppar_t *rktp) { +static rd_kafka_resp_err_t rd_kafka_offset_file_term(rd_kafka_toppar_t *rktp) { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Sync offset file if the sync is intervalled (> 0) */ if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) { rd_kafka_offset_file_sync(rktp); - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_sync_tmr, 1/*lock*/); - } + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_sync_tmr, 1 /*lock*/); + } - rd_kafka_offset_file_close(rktp); + rd_kafka_offset_file_close(rktp); - rd_free(rktp->rktp_offset_path); - rktp->rktp_offset_path = NULL; + rd_free(rktp->rktp_offset_path); + rktp->rktp_offset_path = NULL; return err; } -static rd_kafka_op_res_t -rd_kafka_offset_reset_op_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { - rd_kafka_toppar_t *rktp = rko->rko_rktp; - rd_kafka_toppar_lock(rktp); - rd_kafka_offset_reset(rktp, - rko->rko_u.offset_reset.offset, +static rd_kafka_op_res_t rd_kafka_offset_reset_op_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_toppar_t *rktp = rko->rko_rktp; + rd_kafka_toppar_lock(rktp); + rd_kafka_offset_reset(rktp, rko->rko_u.offset_reset.offset, rko->rko_err, rko->rko_u.offset_reset.reason); - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_unlock(rktp); return RD_KAFKA_OP_RES_HANDLED; } @@ -749,37 +733,39 @@ rd_kafka_offset_reset_op_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, * @locality: any. if not main thread, work will be enqued on main thread. * @ocks: toppar_lock() MUST be held */ -void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset, - rd_kafka_resp_err_t err, const char *reason) { - int64_t offset = RD_KAFKA_OFFSET_INVALID; +void rd_kafka_offset_reset(rd_kafka_toppar_t *rktp, + int64_t err_offset, + rd_kafka_resp_err_t err, + const char *reason) { + int64_t offset = RD_KAFKA_OFFSET_INVALID; const char *extra = ""; /* Enqueue op for toppar handler thread if we're on the wrong thread. */ if (!thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)) { - rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_RESET | - RD_KAFKA_OP_CB); - rko->rko_op_cb = rd_kafka_offset_reset_op_cb; - rko->rko_err = err; - rko->rko_rktp = rd_kafka_toppar_keep(rktp); + rd_kafka_op_t *rko = + rd_kafka_op_new(RD_KAFKA_OP_OFFSET_RESET | RD_KAFKA_OP_CB); + rko->rko_op_cb = rd_kafka_offset_reset_op_cb; + rko->rko_err = err; + rko->rko_rktp = rd_kafka_toppar_keep(rktp); rko->rko_u.offset_reset.offset = err_offset; - rko->rko_u.offset_reset.reason = rd_strdup(reason); + rko->rko_u.offset_reset.reason = rd_strdup(reason); rd_kafka_q_enq(rktp->rktp_ops, rko); return; } - if (err_offset == RD_KAFKA_OFFSET_INVALID || err) - offset = rktp->rktp_rkt->rkt_conf.auto_offset_reset; - else - offset = err_offset; + if (err_offset == RD_KAFKA_OFFSET_INVALID || err) + offset = rktp->rktp_rkt->rkt_conf.auto_offset_reset; + else + offset = err_offset; - if (offset == RD_KAFKA_OFFSET_INVALID) { - /* Error, auto.offset.reset tells us to error out. */ + if (offset == RD_KAFKA_OFFSET_INVALID) { + /* Error, auto.offset.reset tells us to error out. */ rd_kafka_consumer_err(rktp->rktp_fetchq, RD_KAFKA_NODEID_UA, - RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, - 0, NULL, rktp, err_offset, - "%s: %s", reason, rd_kafka_err2str(err)); - rd_kafka_toppar_set_fetch_state( - rktp, RD_KAFKA_TOPPAR_FETCH_NONE); + RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, 0, + NULL, rktp, err_offset, "%s: %s", reason, + rd_kafka_err2str(err)); + rd_kafka_toppar_set_fetch_state(rktp, + RD_KAFKA_TOPPAR_FETCH_NONE); } else if (offset == RD_KAFKA_OFFSET_BEGINNING && rktp->rktp_lo_offset >= 0) { @@ -787,7 +773,7 @@ void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset, * Note: The cached end offset (rktp_ls_offset) can't be * used here since the End offset is a constantly moving * target as new messages are produced. */ - extra = "cached BEGINNING offset "; + extra = "cached BEGINNING offset "; offset = rktp->rktp_lo_offset; rd_kafka_toppar_next_offset_handle(rktp, offset); @@ -795,39 +781,38 @@ void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset, /* Else query cluster for offset */ rktp->rktp_query_offset = offset; rd_kafka_toppar_set_fetch_state( - rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY); - } + rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY); + } /* Offset resets due to error are logged since they might have quite * critical impact. For non-errors, or for auto.offset.reset=error, * the reason is simply debug-logged. */ if (!err || err == RD_KAFKA_RESP_ERR__NO_OFFSET || offset == RD_KAFKA_OFFSET_INVALID) - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: offset reset (at offset %s) " - "to %s%s: %s: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_offset2str(err_offset), - extra, rd_kafka_offset2str(offset), - reason, rd_kafka_err2str(err)); + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "%s [%" PRId32 + "]: offset reset (at offset %s) " + "to %s%s: %s: %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_offset2str(err_offset), extra, + rd_kafka_offset2str(offset), reason, rd_kafka_err2str(err)); else - rd_kafka_log(rktp->rktp_rkt->rkt_rk, LOG_WARNING, "OFFSET", - "%s [%"PRId32"]: offset reset (at offset %s) " - "to %s%s: %s: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_offset2str(err_offset), - extra, rd_kafka_offset2str(offset), - reason, rd_kafka_err2str(err)); + rd_kafka_log( + rktp->rktp_rkt->rkt_rk, LOG_WARNING, "OFFSET", + "%s [%" PRId32 + "]: offset reset (at offset %s) " + "to %s%s: %s: %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_offset2str(err_offset), extra, + rd_kafka_offset2str(offset), reason, rd_kafka_err2str(err)); /* Note: If rktp is not delegated to the leader, then low and high offsets will necessarily be cached from the last FETCH request, and so this offset query will never occur in that case for BEGINNING / END logical offsets. */ - if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) - rd_kafka_toppar_offset_request(rktp, - rktp->rktp_query_offset, + if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) + rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_offset, err ? 100 : 0); } @@ -836,30 +821,29 @@ void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset, * Escape any special characters in filename 'in' and write escaped * string to 'out' (of max size out_size). */ -static char *mk_esc_filename (const char *in, char *out, size_t out_size) { +static char *mk_esc_filename(const char *in, char *out, size_t out_size) { const char *s = in; - char *o = out; + char *o = out; while (*s) { const char *esc; size_t esclen; - switch (*s) - { + switch (*s) { case '/': /* linux */ - esc = "%2F"; + esc = "%2F"; esclen = strlen(esc); break; case ':': /* osx, windows */ - esc = "%3A"; + esc = "%3A"; esclen = strlen(esc); break; case '\\': /* windows */ - esc = "%5C"; + esc = "%5C"; esclen = strlen(esc); break; default: - esc = s; + esc = s; esclen = 1; break; } @@ -880,9 +864,9 @@ static char *mk_esc_filename (const char *in, char *out, size_t out_size) { } -static void rd_kafka_offset_sync_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { - rd_kafka_toppar_t *rktp = arg; - rd_kafka_offset_sync(rktp); +static void rd_kafka_offset_sync_tmr_cb(rd_kafka_timers_t *rkts, void *arg) { + rd_kafka_toppar_t *rktp = arg; + rd_kafka_offset_sync(rktp); } @@ -892,72 +876,72 @@ static void rd_kafka_offset_sync_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { * Locality: rdkafka main thread * Locks: toppar_lock(rktp) must be held */ -static void rd_kafka_offset_file_init (rd_kafka_toppar_t *rktp) { - char spath[4096+1]; /* larger than escfile to avoid warning */ - const char *path = rktp->rktp_rkt->rkt_conf.offset_store_path; - int64_t offset = RD_KAFKA_OFFSET_INVALID; +static void rd_kafka_offset_file_init(rd_kafka_toppar_t *rktp) { + char spath[4096 + 1]; /* larger than escfile to avoid warning */ + const char *path = rktp->rktp_rkt->rkt_conf.offset_store_path; + int64_t offset = RD_KAFKA_OFFSET_INVALID; - if (rd_kafka_path_is_dir(path)) { + if (rd_kafka_path_is_dir(path)) { char tmpfile[1024]; char escfile[4096]; /* Include group.id in filename if configured. */ if (!RD_KAFKAP_STR_IS_NULL(rktp->rktp_rkt->rkt_rk->rk_group_id)) rd_snprintf(tmpfile, sizeof(tmpfile), - "%s-%"PRId32"-%.*s.offset", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_rk-> - rk_group_id)); + "%s-%" PRId32 "-%.*s.offset", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + RD_KAFKAP_STR_PR( + rktp->rktp_rkt->rkt_rk->rk_group_id)); else rd_snprintf(tmpfile, sizeof(tmpfile), - "%s-%"PRId32".offset", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition); + "%s-%" PRId32 ".offset", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition); /* Escape filename to make it safe. */ mk_esc_filename(tmpfile, escfile, sizeof(escfile)); - rd_snprintf(spath, sizeof(spath), "%s%s%s", - path, path[strlen(path)-1] == '/' ? "" : "/", escfile); + rd_snprintf(spath, sizeof(spath), "%s%s%s", path, + path[strlen(path) - 1] == '/' ? "" : "/", escfile); - path = spath; - } + path = spath; + } - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: using offset file %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - path); - rktp->rktp_offset_path = rd_strdup(path); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "%s [%" PRId32 "]: using offset file %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + path); + rktp->rktp_offset_path = rd_strdup(path); /* Set up the offset file sync interval. */ - if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) - rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_sync_tmr, - rktp->rktp_rkt->rkt_conf. - offset_store_sync_interval_ms * 1000ll, - rd_kafka_offset_sync_tmr_cb, rktp); - - if (rd_kafka_offset_file_open(rktp) != -1) { - /* Read offset from offset file. */ - offset = rd_kafka_offset_file_read(rktp); - } - - if (offset != RD_KAFKA_OFFSET_INVALID) { - /* Start fetching from offset */ - rktp->rktp_stored_offset = offset; - rktp->rktp_committed_offset = offset; + if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) + rd_kafka_timer_start( + &rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_sync_tmr, + rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms * + 1000ll, + rd_kafka_offset_sync_tmr_cb, rktp); + + if (rd_kafka_offset_file_open(rktp) != -1) { + /* Read offset from offset file. */ + offset = rd_kafka_offset_file_read(rktp); + } + + if (offset != RD_KAFKA_OFFSET_INVALID) { + /* Start fetching from offset */ + rktp->rktp_stored_offset = offset; + rktp->rktp_committed_offset = offset; rd_kafka_toppar_next_offset_handle(rktp, offset); - } else { - /* Offset was not usable: perform offset reset logic */ - rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID; - rd_kafka_offset_reset(rktp, RD_KAFKA_OFFSET_INVALID, - RD_KAFKA_RESP_ERR__FS, - "non-readable offset file"); - } + } else { + /* Offset was not usable: perform offset reset logic */ + rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID; + rd_kafka_offset_reset(rktp, RD_KAFKA_OFFSET_INVALID, + RD_KAFKA_RESP_ERR__FS, + "non-readable offset file"); + } } @@ -965,7 +949,8 @@ static void rd_kafka_offset_file_init (rd_kafka_toppar_t *rktp) { /** * Terminate broker offset store */ -static rd_kafka_resp_err_t rd_kafka_offset_broker_term (rd_kafka_toppar_t *rktp){ +static rd_kafka_resp_err_t +rd_kafka_offset_broker_term(rd_kafka_toppar_t *rktp) { return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -975,7 +960,7 @@ static rd_kafka_resp_err_t rd_kafka_offset_broker_term (rd_kafka_toppar_t *rktp) * When using KafkaConsumer (high-level consumer) this functionality is * disabled in favour of the cgrp commits for the entire set of subscriptions. */ -static void rd_kafka_offset_broker_init (rd_kafka_toppar_t *rktp) { +static void rd_kafka_offset_broker_init(rd_kafka_toppar_t *rktp) { if (!rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk)) return; rd_kafka_offset_reset(rktp, RD_KAFKA_OFFSET_STORED, @@ -990,22 +975,20 @@ static void rd_kafka_offset_broker_init (rd_kafka_toppar_t *rktp) { * * Locks: rd_kafka_toppar_lock() MUST be held. */ -void rd_kafka_offset_store_term (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err) { +void rd_kafka_offset_store_term(rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err) { rd_kafka_resp_err_t err2; - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "STORETERM", - "%s [%"PRId32"]: offset store terminating", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "STORETERM", + "%s [%" PRId32 "]: offset store terminating", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING; - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_commit_tmr, 1/*lock*/); + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_commit_tmr, 1 /*lock*/); - switch (rktp->rktp_rkt->rkt_conf.offset_store_method) - { + switch (rktp->rktp_rkt->rkt_conf.offset_store_method) { case RD_KAFKA_OFFSET_METHOD_FILE: err2 = rd_kafka_offset_file_term(rktp); break; @@ -1023,7 +1006,6 @@ void rd_kafka_offset_store_term (rd_kafka_toppar_t *rktp, err = err2; rd_kafka_toppar_fetch_stopped(rktp, err); - } @@ -1040,7 +1022,7 @@ void rd_kafka_offset_store_term (rd_kafka_toppar_t *rktp, * * Locks: rd_kafka_toppar_lock() MUST be held. */ -rd_kafka_resp_err_t rd_kafka_offset_store_stop (rd_kafka_toppar_t *rktp) { +rd_kafka_resp_err_t rd_kafka_offset_store_stop(rd_kafka_toppar_t *rktp) { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE)) @@ -1049,12 +1031,12 @@ rd_kafka_resp_err_t rd_kafka_offset_store_stop (rd_kafka_toppar_t *rktp) { rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: stopping offset store " - "(stored offset %"PRId64 - ", committed offset %"PRId64", EOF offset %"PRId64")", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_stored_offset, rktp->rktp_committed_offset, + "%s [%" PRId32 + "]: stopping offset store " + "(stored offset %" PRId64 ", committed offset %" PRId64 + ", EOF offset %" PRId64 ")", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rktp->rktp_stored_offset, rktp->rktp_committed_offset, rktp->rktp_offsets_fin.eof_offset); /* Store end offset for empty partitions */ @@ -1062,7 +1044,7 @@ rd_kafka_resp_err_t rd_kafka_offset_store_stop (rd_kafka_toppar_t *rktp) { rktp->rktp_stored_offset == RD_KAFKA_OFFSET_INVALID && rktp->rktp_offsets_fin.eof_offset > 0) rd_kafka_offset_store0(rktp, rktp->rktp_offsets_fin.eof_offset, - 0/*no lock*/); + 0 /*no lock*/); /* Commit offset to backing store. * This might be an async operation. */ @@ -1082,23 +1064,24 @@ rd_kafka_resp_err_t rd_kafka_offset_store_stop (rd_kafka_toppar_t *rktp) { } -static void rd_kafka_offset_auto_commit_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { - rd_kafka_toppar_t *rktp = arg; - rd_kafka_offset_commit(rktp, "auto commit timer"); +static void rd_kafka_offset_auto_commit_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_toppar_t *rktp = arg; + rd_kafka_offset_commit(rktp, "auto commit timer"); } -void rd_kafka_offset_query_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { - rd_kafka_toppar_t *rktp = arg; - rd_kafka_toppar_lock(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "Topic %s [%"PRId32"]: timed offset query for %s in " - "state %s", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rd_kafka_offset2str(rktp->rktp_query_offset), - rd_kafka_fetch_states[rktp->rktp_fetch_state]); - rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_offset, 0); - rd_kafka_toppar_unlock(rktp); +void rd_kafka_offset_query_tmr_cb(rd_kafka_timers_t *rkts, void *arg) { + rd_kafka_toppar_t *rktp = arg; + rd_kafka_toppar_lock(rktp); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "Topic %s [%" PRId32 + "]: timed offset query for %s in " + "state %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_offset2str(rktp->rktp_query_offset), + rd_kafka_fetch_states[rktp->rktp_fetch_state]); + rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_offset, 0); + rd_kafka_toppar_unlock(rktp); } @@ -1107,13 +1090,12 @@ void rd_kafka_offset_query_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { * * Locality: toppar handler thread */ -void rd_kafka_offset_store_init (rd_kafka_toppar_t *rktp) { - static const char *store_names[] = { "none", "file", "broker" }; +void rd_kafka_offset_store_init(rd_kafka_toppar_t *rktp) { + static const char *store_names[] = {"none", "file", "broker"}; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: using offset store method: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, + "%s [%" PRId32 "]: using offset store method: %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, store_names[rktp->rktp_rkt->rkt_conf.offset_store_method]); /* The committed offset is unknown at this point. */ @@ -1122,15 +1104,13 @@ void rd_kafka_offset_store_init (rd_kafka_toppar_t *rktp) { /* Set up the commit interval (for simple consumer). */ if (rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk) && rktp->rktp_rkt->rkt_conf.auto_commit_interval_ms > 0) - rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_commit_tmr, - rktp->rktp_rkt->rkt_conf. - auto_commit_interval_ms * 1000ll, - rd_kafka_offset_auto_commit_tmr_cb, - rktp); - - switch (rktp->rktp_rkt->rkt_conf.offset_store_method) - { + rd_kafka_timer_start( + &rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_commit_tmr, + rktp->rktp_rkt->rkt_conf.auto_commit_interval_ms * 1000ll, + rd_kafka_offset_auto_commit_tmr_cb, rktp); + + switch (rktp->rktp_rkt->rkt_conf.offset_store_method) { case RD_KAFKA_OFFSET_METHOD_FILE: rd_kafka_offset_file_init(rktp); break; @@ -1146,4 +1126,3 @@ void rd_kafka_offset_store_init (rd_kafka_toppar_t *rktp) { rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_OFFSET_STORE; } - diff --git a/src/rdkafka_offset.h b/src/rdkafka_offset.h index 1f99d2b8e7..2db254c28c 100644 --- a/src/rdkafka_offset.h +++ b/src/rdkafka_offset.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012,2013 Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -32,7 +32,7 @@ #include "rdkafka_partition.h" -const char *rd_kafka_offset2str (int64_t offset); +const char *rd_kafka_offset2str(int64_t offset); /** @@ -42,29 +42,30 @@ const char *rd_kafka_offset2str (int64_t offset); * * See head of rdkafka_offset.c for more information. */ -static RD_INLINE RD_UNUSED -void rd_kafka_offset_store0 (rd_kafka_toppar_t *rktp, int64_t offset, - int lock) { - if (lock) - rd_kafka_toppar_lock(rktp); - rktp->rktp_stored_offset = offset; - if (lock) - rd_kafka_toppar_unlock(rktp); +static RD_INLINE RD_UNUSED void +rd_kafka_offset_store0(rd_kafka_toppar_t *rktp, int64_t offset, int lock) { + if (lock) + rd_kafka_toppar_lock(rktp); + rktp->rktp_stored_offset = offset; + if (lock) + rd_kafka_toppar_unlock(rktp); } -rd_kafka_resp_err_t rd_kafka_offset_store (rd_kafka_topic_t *rkt, - int32_t partition, int64_t offset); +rd_kafka_resp_err_t +rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset); -rd_kafka_resp_err_t rd_kafka_offset_sync (rd_kafka_toppar_t *rktp); +rd_kafka_resp_err_t rd_kafka_offset_sync(rd_kafka_toppar_t *rktp); -void rd_kafka_offset_store_term (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err); -rd_kafka_resp_err_t rd_kafka_offset_store_stop (rd_kafka_toppar_t *rktp); -void rd_kafka_offset_store_init (rd_kafka_toppar_t *rktp); +void rd_kafka_offset_store_term(rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err); +rd_kafka_resp_err_t rd_kafka_offset_store_stop(rd_kafka_toppar_t *rktp); +void rd_kafka_offset_store_init(rd_kafka_toppar_t *rktp); -void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset, - rd_kafka_resp_err_t err, const char *reason); +void rd_kafka_offset_reset(rd_kafka_toppar_t *rktp, + int64_t err_offset, + rd_kafka_resp_err_t err, + const char *reason); -void rd_kafka_offset_query_tmr_cb (rd_kafka_timers_t *rkts, void *arg); +void rd_kafka_offset_query_tmr_cb(rd_kafka_timers_t *rkts, void *arg); #endif /* _RDKAFKA_OFFSET_H_ */ diff --git a/src/rdkafka_op.c b/src/rdkafka_op.c index 86d5f7e872..cfb7743ea7 100644 --- a/src/rdkafka_op.c +++ b/src/rdkafka_op.c @@ -40,62 +40,62 @@ rd_atomic32_t rd_kafka_op_cnt; -const char *rd_kafka_op2str (rd_kafka_op_type_t type) { - int skiplen = 6; +const char *rd_kafka_op2str(rd_kafka_op_type_t type) { + int skiplen = 6; static const char *names[RD_KAFKA_OP__END] = { - [RD_KAFKA_OP_NONE] = "REPLY:NONE", - [RD_KAFKA_OP_FETCH] = "REPLY:FETCH", - [RD_KAFKA_OP_ERR] = "REPLY:ERR", - [RD_KAFKA_OP_CONSUMER_ERR] = "REPLY:CONSUMER_ERR", - [RD_KAFKA_OP_DR] = "REPLY:DR", - [RD_KAFKA_OP_STATS] = "REPLY:STATS", - [RD_KAFKA_OP_OFFSET_COMMIT] = "REPLY:OFFSET_COMMIT", - [RD_KAFKA_OP_NODE_UPDATE] = "REPLY:NODE_UPDATE", - [RD_KAFKA_OP_XMIT_BUF] = "REPLY:XMIT_BUF", - [RD_KAFKA_OP_RECV_BUF] = "REPLY:RECV_BUF", - [RD_KAFKA_OP_XMIT_RETRY] = "REPLY:XMIT_RETRY", - [RD_KAFKA_OP_FETCH_START] = "REPLY:FETCH_START", - [RD_KAFKA_OP_FETCH_STOP] = "REPLY:FETCH_STOP", - [RD_KAFKA_OP_SEEK] = "REPLY:SEEK", - [RD_KAFKA_OP_PAUSE] = "REPLY:PAUSE", - [RD_KAFKA_OP_OFFSET_FETCH] = "REPLY:OFFSET_FETCH", - [RD_KAFKA_OP_PARTITION_JOIN] = "REPLY:PARTITION_JOIN", - [RD_KAFKA_OP_PARTITION_LEAVE] = "REPLY:PARTITION_LEAVE", - [RD_KAFKA_OP_REBALANCE] = "REPLY:REBALANCE", - [RD_KAFKA_OP_TERMINATE] = "REPLY:TERMINATE", - [RD_KAFKA_OP_COORD_QUERY] = "REPLY:COORD_QUERY", - [RD_KAFKA_OP_SUBSCRIBE] = "REPLY:SUBSCRIBE", - [RD_KAFKA_OP_ASSIGN] = "REPLY:ASSIGN", - [RD_KAFKA_OP_GET_SUBSCRIPTION] = "REPLY:GET_SUBSCRIPTION", - [RD_KAFKA_OP_GET_ASSIGNMENT] = "REPLY:GET_ASSIGNMENT", - [RD_KAFKA_OP_THROTTLE] = "REPLY:THROTTLE", - [RD_KAFKA_OP_NAME] = "REPLY:NAME", - [RD_KAFKA_OP_CG_METADATA] = "REPLY:CG_METADATA", - [RD_KAFKA_OP_OFFSET_RESET] = "REPLY:OFFSET_RESET", - [RD_KAFKA_OP_METADATA] = "REPLY:METADATA", - [RD_KAFKA_OP_LOG] = "REPLY:LOG", - [RD_KAFKA_OP_WAKEUP] = "REPLY:WAKEUP", - [RD_KAFKA_OP_CREATETOPICS] = "REPLY:CREATETOPICS", - [RD_KAFKA_OP_DELETETOPICS] = "REPLY:DELETETOPICS", - [RD_KAFKA_OP_CREATEPARTITIONS] = "REPLY:CREATEPARTITIONS", - [RD_KAFKA_OP_ALTERCONFIGS] = "REPLY:ALTERCONFIGS", - [RD_KAFKA_OP_DESCRIBECONFIGS] = "REPLY:DESCRIBECONFIGS", - [RD_KAFKA_OP_DELETERECORDS] = "REPLY:DELETERECORDS", - [RD_KAFKA_OP_DELETEGROUPS] = "REPLY:DELETEGROUPS", - [RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] = + [RD_KAFKA_OP_NONE] = "REPLY:NONE", + [RD_KAFKA_OP_FETCH] = "REPLY:FETCH", + [RD_KAFKA_OP_ERR] = "REPLY:ERR", + [RD_KAFKA_OP_CONSUMER_ERR] = "REPLY:CONSUMER_ERR", + [RD_KAFKA_OP_DR] = "REPLY:DR", + [RD_KAFKA_OP_STATS] = "REPLY:STATS", + [RD_KAFKA_OP_OFFSET_COMMIT] = "REPLY:OFFSET_COMMIT", + [RD_KAFKA_OP_NODE_UPDATE] = "REPLY:NODE_UPDATE", + [RD_KAFKA_OP_XMIT_BUF] = "REPLY:XMIT_BUF", + [RD_KAFKA_OP_RECV_BUF] = "REPLY:RECV_BUF", + [RD_KAFKA_OP_XMIT_RETRY] = "REPLY:XMIT_RETRY", + [RD_KAFKA_OP_FETCH_START] = "REPLY:FETCH_START", + [RD_KAFKA_OP_FETCH_STOP] = "REPLY:FETCH_STOP", + [RD_KAFKA_OP_SEEK] = "REPLY:SEEK", + [RD_KAFKA_OP_PAUSE] = "REPLY:PAUSE", + [RD_KAFKA_OP_OFFSET_FETCH] = "REPLY:OFFSET_FETCH", + [RD_KAFKA_OP_PARTITION_JOIN] = "REPLY:PARTITION_JOIN", + [RD_KAFKA_OP_PARTITION_LEAVE] = "REPLY:PARTITION_LEAVE", + [RD_KAFKA_OP_REBALANCE] = "REPLY:REBALANCE", + [RD_KAFKA_OP_TERMINATE] = "REPLY:TERMINATE", + [RD_KAFKA_OP_COORD_QUERY] = "REPLY:COORD_QUERY", + [RD_KAFKA_OP_SUBSCRIBE] = "REPLY:SUBSCRIBE", + [RD_KAFKA_OP_ASSIGN] = "REPLY:ASSIGN", + [RD_KAFKA_OP_GET_SUBSCRIPTION] = "REPLY:GET_SUBSCRIPTION", + [RD_KAFKA_OP_GET_ASSIGNMENT] = "REPLY:GET_ASSIGNMENT", + [RD_KAFKA_OP_THROTTLE] = "REPLY:THROTTLE", + [RD_KAFKA_OP_NAME] = "REPLY:NAME", + [RD_KAFKA_OP_CG_METADATA] = "REPLY:CG_METADATA", + [RD_KAFKA_OP_OFFSET_RESET] = "REPLY:OFFSET_RESET", + [RD_KAFKA_OP_METADATA] = "REPLY:METADATA", + [RD_KAFKA_OP_LOG] = "REPLY:LOG", + [RD_KAFKA_OP_WAKEUP] = "REPLY:WAKEUP", + [RD_KAFKA_OP_CREATETOPICS] = "REPLY:CREATETOPICS", + [RD_KAFKA_OP_DELETETOPICS] = "REPLY:DELETETOPICS", + [RD_KAFKA_OP_CREATEPARTITIONS] = "REPLY:CREATEPARTITIONS", + [RD_KAFKA_OP_ALTERCONFIGS] = "REPLY:ALTERCONFIGS", + [RD_KAFKA_OP_DESCRIBECONFIGS] = "REPLY:DESCRIBECONFIGS", + [RD_KAFKA_OP_DELETERECORDS] = "REPLY:DELETERECORDS", + [RD_KAFKA_OP_DELETEGROUPS] = "REPLY:DELETEGROUPS", + [RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] = "REPLY:DELETECONSUMERGROUPOFFSETS", - [RD_KAFKA_OP_ADMIN_FANOUT] = "REPLY:ADMIN_FANOUT", - [RD_KAFKA_OP_ADMIN_RESULT] = "REPLY:ADMIN_RESULT", - [RD_KAFKA_OP_PURGE] = "REPLY:PURGE", - [RD_KAFKA_OP_CONNECT] = "REPLY:CONNECT", - [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = "REPLY:OAUTHBEARER_REFRESH", - [RD_KAFKA_OP_MOCK] = "REPLY:MOCK", - [RD_KAFKA_OP_BROKER_MONITOR] = "REPLY:BROKER_MONITOR", - [RD_KAFKA_OP_TXN] = "REPLY:TXN", - [RD_KAFKA_OP_GET_REBALANCE_PROTOCOL] = + [RD_KAFKA_OP_ADMIN_FANOUT] = "REPLY:ADMIN_FANOUT", + [RD_KAFKA_OP_ADMIN_RESULT] = "REPLY:ADMIN_RESULT", + [RD_KAFKA_OP_PURGE] = "REPLY:PURGE", + [RD_KAFKA_OP_CONNECT] = "REPLY:CONNECT", + [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = "REPLY:OAUTHBEARER_REFRESH", + [RD_KAFKA_OP_MOCK] = "REPLY:MOCK", + [RD_KAFKA_OP_BROKER_MONITOR] = "REPLY:BROKER_MONITOR", + [RD_KAFKA_OP_TXN] = "REPLY:TXN", + [RD_KAFKA_OP_GET_REBALANCE_PROTOCOL] = "REPLY:GET_REBALANCE_PROTOCOL", - [RD_KAFKA_OP_LEADERS] = "REPLY:LEADERS", - [RD_KAFKA_OP_BARRIER] = "REPLY:BARRIER", + [RD_KAFKA_OP_LEADERS] = "REPLY:LEADERS", + [RD_KAFKA_OP_BARRIER] = "REPLY:BARRIER", }; if (type & RD_KAFKA_OP_REPLY) @@ -103,142 +103,138 @@ const char *rd_kafka_op2str (rd_kafka_op_type_t type) { rd_assert((names[type & ~RD_KAFKA_OP_FLAGMASK] != NULL) || !*"add OP type to rd_kafka_op2str()"); - return names[type & ~RD_KAFKA_OP_FLAGMASK]+skiplen; + return names[type & ~RD_KAFKA_OP_FLAGMASK] + skiplen; } -void rd_kafka_op_print (FILE *fp, const char *prefix, rd_kafka_op_t *rko) { - fprintf(fp, - "%s((rd_kafka_op_t*)%p)\n" - "%s Type: %s (0x%x), Version: %"PRId32"\n", - prefix, rko, - prefix, rd_kafka_op2str(rko->rko_type), rko->rko_type, - rko->rko_version); - if (rko->rko_err) - fprintf(fp, "%s Error: %s\n", - prefix, rd_kafka_err2str(rko->rko_err)); - if (rko->rko_replyq.q) - fprintf(fp, "%s Replyq %p v%d (%s)\n", - prefix, rko->rko_replyq.q, rko->rko_replyq.version, +void rd_kafka_op_print(FILE *fp, const char *prefix, rd_kafka_op_t *rko) { + fprintf(fp, + "%s((rd_kafka_op_t*)%p)\n" + "%s Type: %s (0x%x), Version: %" PRId32 "\n", + prefix, rko, prefix, rd_kafka_op2str(rko->rko_type), + rko->rko_type, rko->rko_version); + if (rko->rko_err) + fprintf(fp, "%s Error: %s\n", prefix, + rd_kafka_err2str(rko->rko_err)); + if (rko->rko_replyq.q) + fprintf(fp, "%s Replyq %p v%d (%s)\n", prefix, + rko->rko_replyq.q, rko->rko_replyq.version, #if ENABLE_DEVEL - rko->rko_replyq._id + rko->rko_replyq._id #else - "" + "" #endif - ); - if (rko->rko_rktp) { - fprintf(fp, "%s ((rd_kafka_toppar_t*)%p) " - "%s [%"PRId32"] v%d\n", - prefix, rko->rko_rktp, + ); + if (rko->rko_rktp) { + fprintf(fp, + "%s ((rd_kafka_toppar_t*)%p) " + "%s [%" PRId32 "] v%d\n", + prefix, rko->rko_rktp, rko->rko_rktp->rktp_rkt->rkt_topic->str, - rko->rko_rktp->rktp_partition, - rd_atomic32_get(&rko->rko_rktp->rktp_version)); - } - - switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) - { - case RD_KAFKA_OP_FETCH: - fprintf(fp, "%s Offset: %"PRId64"\n", - prefix, rko->rko_u.fetch.rkm.rkm_offset); - break; - case RD_KAFKA_OP_CONSUMER_ERR: - fprintf(fp, "%s Offset: %"PRId64"\n", - prefix, rko->rko_u.err.offset); - /* FALLTHRU */ - case RD_KAFKA_OP_ERR: - fprintf(fp, "%s Reason: %s\n", prefix, rko->rko_u.err.errstr); - break; - case RD_KAFKA_OP_DR: - fprintf(fp, "%s %"PRId32" messages on %s\n", prefix, - rko->rko_u.dr.msgq.rkmq_msg_cnt, - rko->rko_u.dr.rkt ? - rko->rko_u.dr.rkt->rkt_topic->str : "(n/a)"); - break; - case RD_KAFKA_OP_OFFSET_COMMIT: - fprintf(fp, "%s Callback: %p (opaque %p)\n", - prefix, rko->rko_u.offset_commit.cb, - rko->rko_u.offset_commit.opaque); - fprintf(fp, "%s %d partitions\n", - prefix, - rko->rko_u.offset_commit.partitions ? - rko->rko_u.offset_commit.partitions->cnt : 0); - break; + rko->rko_rktp->rktp_partition, + rd_atomic32_get(&rko->rko_rktp->rktp_version)); + } + + switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) { + case RD_KAFKA_OP_FETCH: + fprintf(fp, "%s Offset: %" PRId64 "\n", prefix, + rko->rko_u.fetch.rkm.rkm_offset); + break; + case RD_KAFKA_OP_CONSUMER_ERR: + fprintf(fp, "%s Offset: %" PRId64 "\n", prefix, + rko->rko_u.err.offset); + /* FALLTHRU */ + case RD_KAFKA_OP_ERR: + fprintf(fp, "%s Reason: %s\n", prefix, rko->rko_u.err.errstr); + break; + case RD_KAFKA_OP_DR: + fprintf(fp, "%s %" PRId32 " messages on %s\n", prefix, + rko->rko_u.dr.msgq.rkmq_msg_cnt, + rko->rko_u.dr.rkt ? rko->rko_u.dr.rkt->rkt_topic->str + : "(n/a)"); + break; + case RD_KAFKA_OP_OFFSET_COMMIT: + fprintf(fp, "%s Callback: %p (opaque %p)\n", prefix, + rko->rko_u.offset_commit.cb, + rko->rko_u.offset_commit.opaque); + fprintf(fp, "%s %d partitions\n", prefix, + rko->rko_u.offset_commit.partitions + ? rko->rko_u.offset_commit.partitions->cnt + : 0); + break; case RD_KAFKA_OP_LOG: - fprintf(fp, "%s Log: %%%d %s: %s\n", - prefix, rko->rko_u.log.level, - rko->rko_u.log.fac, + fprintf(fp, "%s Log: %%%d %s: %s\n", prefix, + rko->rko_u.log.level, rko->rko_u.log.fac, rko->rko_u.log.str); break; - default: - break; - } + default: + break; + } } -rd_kafka_op_t *rd_kafka_op_new0 (const char *source, rd_kafka_op_type_t type) { +rd_kafka_op_t *rd_kafka_op_new0(const char *source, rd_kafka_op_type_t type) { rd_kafka_op_t *rko; -#define _RD_KAFKA_OP_EMPTY 1234567 /* Special value to be able to assert - * on default-initialized (0) sizes - * if we forgot to add an op type to - * this list. */ +#define _RD_KAFKA_OP_EMPTY \ + 1234567 /* Special value to be able to assert \ + * on default-initialized (0) sizes \ + * if we forgot to add an op type to \ + * this list. */ static const size_t op2size[RD_KAFKA_OP__END] = { - [RD_KAFKA_OP_FETCH] = sizeof(rko->rko_u.fetch), - [RD_KAFKA_OP_ERR] = sizeof(rko->rko_u.err), - [RD_KAFKA_OP_CONSUMER_ERR] = sizeof(rko->rko_u.err), - [RD_KAFKA_OP_DR] = sizeof(rko->rko_u.dr), - [RD_KAFKA_OP_STATS] = sizeof(rko->rko_u.stats), - [RD_KAFKA_OP_OFFSET_COMMIT] = sizeof(rko->rko_u.offset_commit), - [RD_KAFKA_OP_NODE_UPDATE] = sizeof(rko->rko_u.node), - [RD_KAFKA_OP_XMIT_BUF] = sizeof(rko->rko_u.xbuf), - [RD_KAFKA_OP_RECV_BUF] = sizeof(rko->rko_u.xbuf), - [RD_KAFKA_OP_XMIT_RETRY] = sizeof(rko->rko_u.xbuf), - [RD_KAFKA_OP_FETCH_START] = sizeof(rko->rko_u.fetch_start), - [RD_KAFKA_OP_FETCH_STOP] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_SEEK] = sizeof(rko->rko_u.fetch_start), - [RD_KAFKA_OP_PAUSE] = sizeof(rko->rko_u.pause), - [RD_KAFKA_OP_OFFSET_FETCH] = sizeof(rko->rko_u.offset_fetch), - [RD_KAFKA_OP_PARTITION_JOIN] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_PARTITION_LEAVE] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_REBALANCE] = sizeof(rko->rko_u.rebalance), - [RD_KAFKA_OP_TERMINATE] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_COORD_QUERY] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_SUBSCRIBE] = sizeof(rko->rko_u.subscribe), - [RD_KAFKA_OP_ASSIGN] = sizeof(rko->rko_u.assign), - [RD_KAFKA_OP_GET_SUBSCRIPTION] = sizeof(rko->rko_u.subscribe), - [RD_KAFKA_OP_GET_ASSIGNMENT] = sizeof(rko->rko_u.assign), - [RD_KAFKA_OP_THROTTLE] = sizeof(rko->rko_u.throttle), - [RD_KAFKA_OP_NAME] = sizeof(rko->rko_u.name), - [RD_KAFKA_OP_CG_METADATA] = sizeof(rko->rko_u.cg_metadata), - [RD_KAFKA_OP_OFFSET_RESET] = sizeof(rko->rko_u.offset_reset), - [RD_KAFKA_OP_METADATA] = sizeof(rko->rko_u.metadata), - [RD_KAFKA_OP_LOG] = sizeof(rko->rko_u.log), - [RD_KAFKA_OP_WAKEUP] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_CREATETOPICS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_DELETETOPICS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_CREATEPARTITIONS] = - sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_ALTERCONFIGS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_DESCRIBECONFIGS] = - sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_DELETERECORDS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_DELETEGROUPS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] = + [RD_KAFKA_OP_FETCH] = sizeof(rko->rko_u.fetch), + [RD_KAFKA_OP_ERR] = sizeof(rko->rko_u.err), + [RD_KAFKA_OP_CONSUMER_ERR] = sizeof(rko->rko_u.err), + [RD_KAFKA_OP_DR] = sizeof(rko->rko_u.dr), + [RD_KAFKA_OP_STATS] = sizeof(rko->rko_u.stats), + [RD_KAFKA_OP_OFFSET_COMMIT] = sizeof(rko->rko_u.offset_commit), + [RD_KAFKA_OP_NODE_UPDATE] = sizeof(rko->rko_u.node), + [RD_KAFKA_OP_XMIT_BUF] = sizeof(rko->rko_u.xbuf), + [RD_KAFKA_OP_RECV_BUF] = sizeof(rko->rko_u.xbuf), + [RD_KAFKA_OP_XMIT_RETRY] = sizeof(rko->rko_u.xbuf), + [RD_KAFKA_OP_FETCH_START] = sizeof(rko->rko_u.fetch_start), + [RD_KAFKA_OP_FETCH_STOP] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_SEEK] = sizeof(rko->rko_u.fetch_start), + [RD_KAFKA_OP_PAUSE] = sizeof(rko->rko_u.pause), + [RD_KAFKA_OP_OFFSET_FETCH] = sizeof(rko->rko_u.offset_fetch), + [RD_KAFKA_OP_PARTITION_JOIN] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_PARTITION_LEAVE] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_REBALANCE] = sizeof(rko->rko_u.rebalance), + [RD_KAFKA_OP_TERMINATE] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_COORD_QUERY] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_SUBSCRIBE] = sizeof(rko->rko_u.subscribe), + [RD_KAFKA_OP_ASSIGN] = sizeof(rko->rko_u.assign), + [RD_KAFKA_OP_GET_SUBSCRIPTION] = sizeof(rko->rko_u.subscribe), + [RD_KAFKA_OP_GET_ASSIGNMENT] = sizeof(rko->rko_u.assign), + [RD_KAFKA_OP_THROTTLE] = sizeof(rko->rko_u.throttle), + [RD_KAFKA_OP_NAME] = sizeof(rko->rko_u.name), + [RD_KAFKA_OP_CG_METADATA] = sizeof(rko->rko_u.cg_metadata), + [RD_KAFKA_OP_OFFSET_RESET] = sizeof(rko->rko_u.offset_reset), + [RD_KAFKA_OP_METADATA] = sizeof(rko->rko_u.metadata), + [RD_KAFKA_OP_LOG] = sizeof(rko->rko_u.log), + [RD_KAFKA_OP_WAKEUP] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_CREATETOPICS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DELETETOPICS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_CREATEPARTITIONS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_ALTERCONFIGS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DESCRIBECONFIGS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DELETERECORDS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DELETEGROUPS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_ADMIN_FANOUT] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_ADMIN_RESULT] = sizeof(rko->rko_u.admin_result), - [RD_KAFKA_OP_PURGE] = sizeof(rko->rko_u.purge), - [RD_KAFKA_OP_CONNECT] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_MOCK] = sizeof(rko->rko_u.mock), - [RD_KAFKA_OP_BROKER_MONITOR] = - sizeof(rko->rko_u.broker_monitor), - [RD_KAFKA_OP_TXN] = sizeof(rko->rko_u.txn), - [RD_KAFKA_OP_GET_REBALANCE_PROTOCOL] = + [RD_KAFKA_OP_ADMIN_FANOUT] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_ADMIN_RESULT] = sizeof(rko->rko_u.admin_result), + [RD_KAFKA_OP_PURGE] = sizeof(rko->rko_u.purge), + [RD_KAFKA_OP_CONNECT] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_MOCK] = sizeof(rko->rko_u.mock), + [RD_KAFKA_OP_BROKER_MONITOR] = sizeof(rko->rko_u.broker_monitor), + [RD_KAFKA_OP_TXN] = sizeof(rko->rko_u.txn), + [RD_KAFKA_OP_GET_REBALANCE_PROTOCOL] = sizeof(rko->rko_u.rebalance_protocol), - [RD_KAFKA_OP_LEADERS] = sizeof(rko->rko_u.leaders), - [RD_KAFKA_OP_BARRIER] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_LEADERS] = sizeof(rko->rko_u.leaders), + [RD_KAFKA_OP_BARRIER] = _RD_KAFKA_OP_EMPTY, }; size_t tsize = op2size[type & ~RD_KAFKA_OP_FLAGMASK]; @@ -246,116 +242,115 @@ rd_kafka_op_t *rd_kafka_op_new0 (const char *source, rd_kafka_op_type_t type) { if (tsize == _RD_KAFKA_OP_EMPTY) tsize = 0; - rko = rd_calloc(1, sizeof(*rko)-sizeof(rko->rko_u)+tsize); - rko->rko_type = type; + rko = rd_calloc(1, sizeof(*rko) - sizeof(rko->rko_u) + tsize); + rko->rko_type = type; #if ENABLE_DEVEL rko->rko_source = source; rd_atomic32_add(&rd_kafka_op_cnt, 1); #endif - return rko; + return rko; } -void rd_kafka_op_destroy (rd_kafka_op_t *rko) { +void rd_kafka_op_destroy(rd_kafka_op_t *rko) { /* Call ops callback with ERR__DESTROY to let it * clean up its resources. */ if ((rko->rko_type & RD_KAFKA_OP_CB) && rko->rko_op_cb) { rd_kafka_op_res_t res; rko->rko_err = RD_KAFKA_RESP_ERR__DESTROY; - res = rko->rko_op_cb(rko->rko_rk, NULL, rko); + res = rko->rko_op_cb(rko->rko_rk, NULL, rko); rd_assert(res != RD_KAFKA_OP_RES_YIELD); rd_assert(res != RD_KAFKA_OP_RES_KEEP); } - switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) - { - case RD_KAFKA_OP_FETCH: - rd_kafka_msg_destroy(NULL, &rko->rko_u.fetch.rkm); - /* Decrease refcount on rkbuf to eventually rd_free shared buf*/ - if (rko->rko_u.fetch.rkbuf) - rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY); + switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) { + case RD_KAFKA_OP_FETCH: + rd_kafka_msg_destroy(NULL, &rko->rko_u.fetch.rkm); + /* Decrease refcount on rkbuf to eventually rd_free shared buf*/ + if (rko->rko_u.fetch.rkbuf) + rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY); - break; + break; - case RD_KAFKA_OP_OFFSET_FETCH: - if (rko->rko_u.offset_fetch.partitions && - rko->rko_u.offset_fetch.do_free) - rd_kafka_topic_partition_list_destroy( - rko->rko_u.offset_fetch.partitions); - break; + case RD_KAFKA_OP_OFFSET_FETCH: + if (rko->rko_u.offset_fetch.partitions && + rko->rko_u.offset_fetch.do_free) + rd_kafka_topic_partition_list_destroy( + rko->rko_u.offset_fetch.partitions); + break; - case RD_KAFKA_OP_OFFSET_COMMIT: - RD_IF_FREE(rko->rko_u.offset_commit.partitions, - rd_kafka_topic_partition_list_destroy); + case RD_KAFKA_OP_OFFSET_COMMIT: + RD_IF_FREE(rko->rko_u.offset_commit.partitions, + rd_kafka_topic_partition_list_destroy); RD_IF_FREE(rko->rko_u.offset_commit.reason, rd_free); - break; - - case RD_KAFKA_OP_SUBSCRIBE: - case RD_KAFKA_OP_GET_SUBSCRIPTION: - RD_IF_FREE(rko->rko_u.subscribe.topics, - rd_kafka_topic_partition_list_destroy); - break; - - case RD_KAFKA_OP_ASSIGN: - case RD_KAFKA_OP_GET_ASSIGNMENT: - RD_IF_FREE(rko->rko_u.assign.partitions, - rd_kafka_topic_partition_list_destroy); - break; - - case RD_KAFKA_OP_REBALANCE: - RD_IF_FREE(rko->rko_u.rebalance.partitions, - rd_kafka_topic_partition_list_destroy); - break; - - case RD_KAFKA_OP_NAME: - RD_IF_FREE(rko->rko_u.name.str, rd_free); - break; - - case RD_KAFKA_OP_CG_METADATA: + break; + + case RD_KAFKA_OP_SUBSCRIBE: + case RD_KAFKA_OP_GET_SUBSCRIPTION: + RD_IF_FREE(rko->rko_u.subscribe.topics, + rd_kafka_topic_partition_list_destroy); + break; + + case RD_KAFKA_OP_ASSIGN: + case RD_KAFKA_OP_GET_ASSIGNMENT: + RD_IF_FREE(rko->rko_u.assign.partitions, + rd_kafka_topic_partition_list_destroy); + break; + + case RD_KAFKA_OP_REBALANCE: + RD_IF_FREE(rko->rko_u.rebalance.partitions, + rd_kafka_topic_partition_list_destroy); + break; + + case RD_KAFKA_OP_NAME: + RD_IF_FREE(rko->rko_u.name.str, rd_free); + break; + + case RD_KAFKA_OP_CG_METADATA: RD_IF_FREE(rko->rko_u.cg_metadata, rd_kafka_consumer_group_metadata_destroy); - break; + break; - case RD_KAFKA_OP_ERR: - case RD_KAFKA_OP_CONSUMER_ERR: - RD_IF_FREE(rko->rko_u.err.errstr, rd_free); - rd_kafka_msg_destroy(NULL, &rko->rko_u.err.rkm); - break; + case RD_KAFKA_OP_ERR: + case RD_KAFKA_OP_CONSUMER_ERR: + RD_IF_FREE(rko->rko_u.err.errstr, rd_free); + rd_kafka_msg_destroy(NULL, &rko->rko_u.err.rkm); + break; - break; + break; - case RD_KAFKA_OP_THROTTLE: - RD_IF_FREE(rko->rko_u.throttle.nodename, rd_free); - break; + case RD_KAFKA_OP_THROTTLE: + RD_IF_FREE(rko->rko_u.throttle.nodename, rd_free); + break; - case RD_KAFKA_OP_STATS: - RD_IF_FREE(rko->rko_u.stats.json, rd_free); - break; + case RD_KAFKA_OP_STATS: + RD_IF_FREE(rko->rko_u.stats.json, rd_free); + break; - case RD_KAFKA_OP_XMIT_RETRY: - case RD_KAFKA_OP_XMIT_BUF: - case RD_KAFKA_OP_RECV_BUF: - if (rko->rko_u.xbuf.rkbuf) - rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY); + case RD_KAFKA_OP_XMIT_RETRY: + case RD_KAFKA_OP_XMIT_BUF: + case RD_KAFKA_OP_RECV_BUF: + if (rko->rko_u.xbuf.rkbuf) + rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY); - RD_IF_FREE(rko->rko_u.xbuf.rkbuf, rd_kafka_buf_destroy); - break; + RD_IF_FREE(rko->rko_u.xbuf.rkbuf, rd_kafka_buf_destroy); + break; - case RD_KAFKA_OP_DR: - rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq); - if (rko->rko_u.dr.do_purge2) - rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq2); + case RD_KAFKA_OP_DR: + rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq); + if (rko->rko_u.dr.do_purge2) + rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq2); - if (rko->rko_u.dr.rkt) - rd_kafka_topic_destroy0(rko->rko_u.dr.rkt); - break; + if (rko->rko_u.dr.rkt) + rd_kafka_topic_destroy0(rko->rko_u.dr.rkt); + break; - case RD_KAFKA_OP_OFFSET_RESET: - RD_IF_FREE(rko->rko_u.offset_reset.reason, rd_free); - break; + case RD_KAFKA_OP_OFFSET_RESET: + RD_IF_FREE(rko->rko_u.offset_reset.reason, rd_free); + break; case RD_KAFKA_OP_METADATA: RD_IF_FREE(rko->rko_u.metadata.md, rd_kafka_metadata_destroy); @@ -385,7 +380,8 @@ void rd_kafka_op_destroy (rd_kafka_op_t *rko) { case RD_KAFKA_OP_ADMIN_RESULT: rd_list_destroy(&rko->rko_u.admin_result.results); RD_IF_FREE(rko->rko_u.admin_result.errstr, rd_free); - rd_assert(!rko->rko_u.admin_result.fanout_parent);; + rd_assert(!rko->rko_u.admin_result.fanout_parent); + ; break; case RD_KAFKA_OP_MOCK: @@ -413,39 +409,33 @@ void rd_kafka_op_destroy (rd_kafka_op_t *rko) { rd_kafka_topic_partition_list_destroy); break; - default: - break; - } + default: + break; + } - RD_IF_FREE(rko->rko_rktp, rd_kafka_toppar_destroy); + RD_IF_FREE(rko->rko_rktp, rd_kafka_toppar_destroy); RD_IF_FREE(rko->rko_error, rd_kafka_error_destroy); - rd_kafka_replyq_destroy(&rko->rko_replyq); + rd_kafka_replyq_destroy(&rko->rko_replyq); #if ENABLE_DEVEL if (rd_atomic32_sub(&rd_kafka_op_cnt, 1) < 0) rd_kafka_assert(NULL, !*"rd_kafka_op_cnt < 0"); #endif - rd_free(rko); + rd_free(rko); } - - - - - - - - /** * Propagate an error event to the application on a specific queue. */ -void rd_kafka_q_op_err (rd_kafka_q_t *rkq, rd_kafka_resp_err_t err, - const char *fmt, ...) { +void rd_kafka_q_op_err(rd_kafka_q_t *rkq, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { va_list ap; char buf[2048]; rd_kafka_op_t *rko; @@ -454,8 +444,8 @@ void rd_kafka_q_op_err (rd_kafka_q_t *rkq, rd_kafka_resp_err_t err, rd_vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); - rko = rd_kafka_op_new(RD_KAFKA_OP_ERR); - rko->rko_err = err; + rko = rd_kafka_op_new(RD_KAFKA_OP_ERR); + rko->rko_err = err; rko->rko_u.err.errstr = rd_strdup(buf); rd_kafka_q_enq(rkq, rko); @@ -476,10 +466,15 @@ void rd_kafka_q_op_err (rd_kafka_q_t *rkq, rd_kafka_resp_err_t err, * * @sa rd_kafka_q_op_err() */ -void rd_kafka_consumer_err (rd_kafka_q_t *rkq, int32_t broker_id, - rd_kafka_resp_err_t err, int32_t version, - const char *topic, rd_kafka_toppar_t *rktp, - int64_t offset, const char *fmt, ...) { +void rd_kafka_consumer_err(rd_kafka_q_t *rkq, + int32_t broker_id, + rd_kafka_resp_err_t err, + int32_t version, + const char *topic, + rd_kafka_toppar_t *rktp, + int64_t offset, + const char *fmt, + ...) { va_list ap; char buf[2048]; rd_kafka_op_t *rko; @@ -488,9 +483,9 @@ void rd_kafka_consumer_err (rd_kafka_q_t *rkq, int32_t broker_id, rd_vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); - rko = rd_kafka_op_new(RD_KAFKA_OP_CONSUMER_ERR); - rko->rko_version = version; - rko->rko_err = err; + rko = rd_kafka_op_new(RD_KAFKA_OP_CONSUMER_ERR); + rko->rko_version = version; + rko->rko_err = err; rko->rko_u.err.offset = offset; rko->rko_u.err.errstr = rd_strdup(buf); rko->rko_u.err.rkm.rkm_broker_id = broker_id; @@ -499,8 +494,8 @@ void rd_kafka_consumer_err (rd_kafka_q_t *rkq, int32_t broker_id, rko->rko_rktp = rd_kafka_toppar_keep(rktp); else if (topic) rko->rko_u.err.rkm.rkm_rkmessage.rkt = - (rd_kafka_topic_t *)rd_kafka_lwtopic_new(rkq->rkq_rk, - topic); + (rd_kafka_topic_t *)rd_kafka_lwtopic_new(rkq->rkq_rk, + topic); rd_kafka_q_enq(rkq, rko); @@ -513,15 +508,15 @@ void rd_kafka_consumer_err (rd_kafka_q_t *rkq, int32_t broker_id, * RD_KAFKA_OP_CB, else the reply type will be the original rko_type OR:ed * with RD_KAFKA_OP_REPLY. */ -rd_kafka_op_t *rd_kafka_op_new_reply (rd_kafka_op_t *rko_orig, - rd_kafka_resp_err_t err) { +rd_kafka_op_t *rd_kafka_op_new_reply(rd_kafka_op_t *rko_orig, + rd_kafka_resp_err_t err) { rd_kafka_op_t *rko; rko = rd_kafka_op_new(rko_orig->rko_type | RD_KAFKA_OP_REPLY); - rd_kafka_op_get_reply_version(rko, rko_orig); - rko->rko_err = err; - if (rko_orig->rko_rktp) - rko->rko_rktp = rd_kafka_toppar_keep(rko_orig->rko_rktp); + rd_kafka_op_get_reply_version(rko, rko_orig); + rko->rko_err = err; + if (rko_orig->rko_rktp) + rko->rko_rktp = rd_kafka_toppar_keep(rko_orig->rko_rktp); return rko; } @@ -530,13 +525,13 @@ rd_kafka_op_t *rd_kafka_op_new_reply (rd_kafka_op_t *rko_orig, /** * @brief Create new callback op for type \p type */ -rd_kafka_op_t *rd_kafka_op_new_cb (rd_kafka_t *rk, - rd_kafka_op_type_t type, - rd_kafka_op_cb_t *cb) { +rd_kafka_op_t *rd_kafka_op_new_cb(rd_kafka_t *rk, + rd_kafka_op_type_t type, + rd_kafka_op_cb_t *cb) { rd_kafka_op_t *rko; - rko = rd_kafka_op_new(type | RD_KAFKA_OP_CB); + rko = rd_kafka_op_new(type | RD_KAFKA_OP_CB); rko->rko_op_cb = cb; - rko->rko_rk = rk; + rko->rko_rk = rk; return rko; } @@ -549,8 +544,7 @@ rd_kafka_op_t *rd_kafka_op_new_cb (rd_kafka_t *rk, * * @returns 1 if op was enqueued, else 0 and rko is destroyed. */ -int rd_kafka_op_reply (rd_kafka_op_t *rko, - rd_kafka_resp_err_t err) { +int rd_kafka_op_reply(rd_kafka_op_t *rko, rd_kafka_resp_err_t err) { if (!rko->rko_replyq.q) { rd_kafka_op_destroy(rko); @@ -561,7 +555,7 @@ int rd_kafka_op_reply (rd_kafka_op_t *rko, rko->rko_err = err; rko->rko_error = NULL; - return rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0); + return rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0); } @@ -574,8 +568,7 @@ int rd_kafka_op_reply (rd_kafka_op_t *rko, * * @returns 1 if op was enqueued, else 0 and rko is destroyed. */ -int rd_kafka_op_error_reply (rd_kafka_op_t *rko, - rd_kafka_error_t *error) { +int rd_kafka_op_error_reply(rd_kafka_op_t *rko, rd_kafka_error_t *error) { if (!rko->rko_replyq.q) { RD_IF_FREE(error, rd_kafka_error_destroy); @@ -584,8 +577,8 @@ int rd_kafka_op_error_reply (rd_kafka_op_t *rko, } rko->rko_type |= (rko->rko_op_cb ? RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY); - rko->rko_err = error ? rd_kafka_error_code(error) - : RD_KAFKA_RESP_ERR_NO_ERROR; + rko->rko_err = + error ? rd_kafka_error_code(error) : RD_KAFKA_RESP_ERR_NO_ERROR; rko->rko_error = error; return rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0); @@ -597,10 +590,10 @@ int rd_kafka_op_error_reply (rd_kafka_op_t *rko, * * @returns response on success or NULL if destq is disabled. */ -rd_kafka_op_t *rd_kafka_op_req0 (rd_kafka_q_t *destq, - rd_kafka_q_t *recvq, - rd_kafka_op_t *rko, - int timeout_ms) { +rd_kafka_op_t *rd_kafka_op_req0(rd_kafka_q_t *destq, + rd_kafka_q_t *recvq, + rd_kafka_op_t *rko, + int timeout_ms) { rd_kafka_op_t *reply; /* Indicate to destination where to send reply. */ @@ -621,9 +614,8 @@ rd_kafka_op_t *rd_kafka_op_req0 (rd_kafka_q_t *destq, * Send request to queue, wait for response. * Creates a temporary reply queue. */ -rd_kafka_op_t *rd_kafka_op_req (rd_kafka_q_t *destq, - rd_kafka_op_t *rko, - int timeout_ms) { +rd_kafka_op_t * +rd_kafka_op_req(rd_kafka_q_t *destq, rd_kafka_op_t *rko, int timeout_ms) { rd_kafka_q_t *recvq; rd_kafka_op_t *reply; @@ -640,7 +632,7 @@ rd_kafka_op_t *rd_kafka_op_req (rd_kafka_q_t *destq, /** * Send simple type-only request to queue, wait for response. */ -rd_kafka_op_t *rd_kafka_op_req2 (rd_kafka_q_t *destq, rd_kafka_op_type_t type) { +rd_kafka_op_t *rd_kafka_op_req2(rd_kafka_q_t *destq, rd_kafka_op_type_t type) { rd_kafka_op_t *rko; rko = rd_kafka_op_new(type); @@ -651,13 +643,13 @@ rd_kafka_op_t *rd_kafka_op_req2 (rd_kafka_q_t *destq, rd_kafka_op_type_t type) { /** * Destroys the rko and returns its err. */ -rd_kafka_resp_err_t rd_kafka_op_err_destroy (rd_kafka_op_t *rko) { +rd_kafka_resp_err_t rd_kafka_op_err_destroy(rd_kafka_op_t *rko) { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__TIMED_OUT; - if (rko) { - err = rko->rko_err; - rd_kafka_op_destroy(rko); - } + if (rko) { + err = rko->rko_err; + rd_kafka_op_destroy(rko); + } return err; } @@ -665,25 +657,24 @@ rd_kafka_resp_err_t rd_kafka_op_err_destroy (rd_kafka_op_t *rko) { /** * Destroys the rko and returns its error object or NULL if no error. */ -rd_kafka_error_t *rd_kafka_op_error_destroy (rd_kafka_op_t *rko) { +rd_kafka_error_t *rd_kafka_op_error_destroy(rd_kafka_op_t *rko) { if (rko) { rd_kafka_error_t *error = rko->rko_error; - rko->rko_error = NULL; + rko->rko_error = NULL; rd_kafka_op_destroy(rko); return error; } - return rd_kafka_error_new( - RD_KAFKA_RESP_ERR__TIMED_OUT, - "Operation timed out"); + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__TIMED_OUT, + "Operation timed out"); } /** * Call op callback */ -rd_kafka_op_res_t rd_kafka_op_call (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +rd_kafka_op_res_t +rd_kafka_op_call(rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { rd_kafka_op_res_t res; rd_assert(rko->rko_op_cb); res = rko->rko_op_cb(rk, rkq, rko); @@ -700,20 +691,15 @@ rd_kafka_op_res_t rd_kafka_op_call (rd_kafka_t *rk, rd_kafka_q_t *rkq, * control message. The rkm_flags property is set to * RD_KAFKA_MSG_F_CONTROL. */ -rd_kafka_op_t * -rd_kafka_op_new_ctrl_msg (rd_kafka_toppar_t *rktp, - int32_t version, - rd_kafka_buf_t *rkbuf, - int64_t offset) { +rd_kafka_op_t *rd_kafka_op_new_ctrl_msg(rd_kafka_toppar_t *rktp, + int32_t version, + rd_kafka_buf_t *rkbuf, + int64_t offset) { rd_kafka_msg_t *rkm; rd_kafka_op_t *rko; - rko = rd_kafka_op_new_fetch_msg( - &rkm, - rktp, version, rkbuf, - offset, - 0, NULL, - 0, NULL); + rko = rd_kafka_op_new_fetch_msg(&rkm, rktp, version, rkbuf, offset, 0, + NULL, 0, NULL); rkm->rkm_flags |= RD_KAFKA_MSG_F_CONTROL; @@ -727,22 +713,23 @@ rd_kafka_op_new_ctrl_msg (rd_kafka_toppar_t *rktp, * @param rkmp will be set to the embedded rkm in the rko (for convenience) * @param offset may be updated later if relative offset. */ -rd_kafka_op_t * -rd_kafka_op_new_fetch_msg (rd_kafka_msg_t **rkmp, - rd_kafka_toppar_t *rktp, - int32_t version, - rd_kafka_buf_t *rkbuf, - int64_t offset, - size_t key_len, const void *key, - size_t val_len, const void *val) { +rd_kafka_op_t *rd_kafka_op_new_fetch_msg(rd_kafka_msg_t **rkmp, + rd_kafka_toppar_t *rktp, + int32_t version, + rd_kafka_buf_t *rkbuf, + int64_t offset, + size_t key_len, + const void *key, + size_t val_len, + const void *val) { rd_kafka_msg_t *rkm; rd_kafka_op_t *rko; - rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH); + rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH); rko->rko_rktp = rd_kafka_toppar_keep(rktp); rko->rko_version = version; - rkm = &rko->rko_u.fetch.rkm; - *rkmp = rkm; + rkm = &rko->rko_u.fetch.rkm; + *rkmp = rkm; /* Since all the ops share the same payload buffer * a refcnt is used on the rkbuf that makes sure all @@ -752,14 +739,14 @@ rd_kafka_op_new_fetch_msg (rd_kafka_msg_t **rkmp, rko->rko_u.fetch.rkbuf = rkbuf; rd_kafka_buf_keep(rkbuf); - rkm->rkm_offset = offset; + rkm->rkm_offset = offset; - rkm->rkm_key = (void *)key; - rkm->rkm_key_len = key_len; + rkm->rkm_key = (void *)key; + rkm->rkm_key_len = key_len; - rkm->rkm_payload = (void *)val; - rkm->rkm_len = val_len; - rko->rko_len = (int32_t)rkm->rkm_len; + rkm->rkm_payload = (void *)val; + rkm->rkm_len = val_len; + rko->rko_len = (int32_t)rkm->rkm_len; rkm->rkm_partition = rktp->rktp_partition; @@ -774,39 +761,41 @@ rd_kafka_op_new_fetch_msg (rd_kafka_msg_t **rkmp, /** * Enqueue ERR__THROTTLE op, if desired. */ -void rd_kafka_op_throttle_time (rd_kafka_broker_t *rkb, - rd_kafka_q_t *rkq, - int throttle_time) { - rd_kafka_op_t *rko; +void rd_kafka_op_throttle_time(rd_kafka_broker_t *rkb, + rd_kafka_q_t *rkq, + int throttle_time) { + rd_kafka_op_t *rko; if (unlikely(throttle_time > 0)) rd_avg_add(&rkb->rkb_avg_throttle, throttle_time); - /* We send throttle events when: - * - throttle_time > 0 - * - throttle_time == 0 and last throttle_time > 0 - */ - if (!rkb->rkb_rk->rk_conf.throttle_cb || - (!throttle_time && !rd_atomic32_get(&rkb->rkb_rk->rk_last_throttle))) - return; + /* We send throttle events when: + * - throttle_time > 0 + * - throttle_time == 0 and last throttle_time > 0 + */ + if (!rkb->rkb_rk->rk_conf.throttle_cb || + (!throttle_time && + !rd_atomic32_get(&rkb->rkb_rk->rk_last_throttle))) + return; - rd_atomic32_set(&rkb->rkb_rk->rk_last_throttle, throttle_time); + rd_atomic32_set(&rkb->rkb_rk->rk_last_throttle, throttle_time); - rko = rd_kafka_op_new(RD_KAFKA_OP_THROTTLE); + rko = rd_kafka_op_new(RD_KAFKA_OP_THROTTLE); rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH); - rko->rko_u.throttle.nodename = rd_strdup(rkb->rkb_nodename); - rko->rko_u.throttle.nodeid = rkb->rkb_nodeid; - rko->rko_u.throttle.throttle_time = throttle_time; - rd_kafka_q_enq(rkq, rko); + rko->rko_u.throttle.nodename = rd_strdup(rkb->rkb_nodename); + rko->rko_u.throttle.nodeid = rkb->rkb_nodeid; + rko->rko_u.throttle.throttle_time = throttle_time; + rd_kafka_q_enq(rkq, rko); } /** * @brief Handle standard op types. */ -rd_kafka_op_res_t -rd_kafka_op_handle_std (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, int cb_type) { +rd_kafka_op_res_t rd_kafka_op_handle_std(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + int cb_type) { if (cb_type == RD_KAFKA_Q_CB_FORCE_RETURN) return RD_KAFKA_OP_RES_PASS; else if (unlikely(rd_kafka_op_is_ctrl_msg(rko))) { @@ -815,7 +804,7 @@ rd_kafka_op_handle_std (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_offset_store(rk, rko); return RD_KAFKA_OP_RES_HANDLED; } else if (cb_type != RD_KAFKA_Q_CB_EVENT && - rko->rko_type & RD_KAFKA_OP_CB) + rko->rko_type & RD_KAFKA_OP_CB) return rd_kafka_op_call(rk, rkq, rko); else if (rko->rko_type == RD_KAFKA_OP_RECV_BUF) /* Handle Response */ rd_kafka_buf_handle_op(rko, rko->rko_err); @@ -843,15 +832,17 @@ rd_kafka_op_handle_std (rd_kafka_t *rk, rd_kafka_q_t *rkq, * or YIELD if op was handled (maybe destroyed or re-enqueued) * and caller must propagate yield upwards (cancel and return). */ -rd_kafka_op_res_t -rd_kafka_op_handle (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque, - rd_kafka_q_serve_cb_t *callback) { +rd_kafka_op_res_t rd_kafka_op_handle(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque, + rd_kafka_q_serve_cb_t *callback) { rd_kafka_op_res_t res; if (rko->rko_serve) { - callback = rko->rko_serve; - opaque = rko->rko_serve_opaque; + callback = rko->rko_serve; + opaque = rko->rko_serve_opaque; rko->rko_serve = NULL; rko->rko_serve_opaque = NULL; } @@ -860,7 +851,8 @@ rd_kafka_op_handle (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, if (res == RD_KAFKA_OP_RES_KEEP) { /* Op was handled but must not be destroyed. */ return res; - } if (res == RD_KAFKA_OP_RES_HANDLED) { + } + if (res == RD_KAFKA_OP_RES_HANDLED) { rd_kafka_op_destroy(rko); return res; } else if (unlikely(res == RD_KAFKA_OP_RES_YIELD)) @@ -878,23 +870,23 @@ rd_kafka_op_handle (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, * * @locks rktp_lock and rk_lock MUST NOT be held */ -void rd_kafka_op_offset_store (rd_kafka_t *rk, rd_kafka_op_t *rko) { - rd_kafka_toppar_t *rktp; +void rd_kafka_op_offset_store(rd_kafka_t *rk, rd_kafka_op_t *rko) { + rd_kafka_toppar_t *rktp; int64_t offset; - if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH || rko->rko_err)) - return; + if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH || rko->rko_err)) + return; - rktp = rko->rko_rktp; + rktp = rko->rko_rktp; - if (unlikely(!rk)) - rk = rktp->rktp_rkt->rkt_rk; + if (unlikely(!rk)) + rk = rktp->rktp_rkt->rkt_rk; offset = rko->rko_u.fetch.rkm.rkm_rkmessage.offset + 1; - rd_kafka_toppar_lock(rktp); - rktp->rktp_app_offset = offset; - if (rk->rk_conf.enable_auto_offset_store) - rd_kafka_offset_store0(rktp, offset, 0/*no lock*/); - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_lock(rktp); + rktp->rktp_app_offset = offset; + if (rk->rk_conf.enable_auto_offset_store) + rd_kafka_offset_store0(rktp, offset, 0 /*no lock*/); + rd_kafka_toppar_unlock(rktp); } diff --git a/src/rdkafka_op.h b/src/rdkafka_op.h index 00fdb09400..5ce8aed817 100644 --- a/src/rdkafka_op.h +++ b/src/rdkafka_op.h @@ -44,18 +44,17 @@ typedef struct rd_kafka_op_s rd_kafka_op_t; * struct as-is and grabs hold of the existing .q refcount. * Think of replyq as a (Q,VERSION) tuple. */ typedef struct rd_kafka_replyq_s { - rd_kafka_q_t *q; - int32_t version; + rd_kafka_q_t *q; + int32_t version; #if ENABLE_DEVEL - char *_id; /* Devel id used for debugging reference leaks. - * Is a strdup() of the caller's function name, - * which makes for easy debugging with valgrind. */ + char *_id; /* Devel id used for debugging reference leaks. + * Is a strdup() of the caller's function name, + * which makes for easy debugging with valgrind. */ #endif } rd_kafka_replyq_t; - /** * Flags used by: * - rd_kafka_op_t.rko_flags @@ -67,63 +66,66 @@ typedef struct rd_kafka_replyq_s { #define RD_KAFKA_OP_F_BLOCKING 0x8 /* rkbuf: blocking protocol request */ #define RD_KAFKA_OP_F_REPROCESS 0x10 /* cgrp: Reprocess at a later time. */ #define RD_KAFKA_OP_F_SENT 0x20 /* rkbuf: request sent on wire */ -#define RD_KAFKA_OP_F_FLEXVER 0x40 /* rkbuf: flexible protocol version - * (KIP-482) */ -#define RD_KAFKA_OP_F_NEED_MAKE 0x80 /* rkbuf: request content has not - * been made yet, the make - * callback will be triggered - * to construct the request - * right before it is sent. */ -#define RD_KAFKA_OP_F_FORCE_CB 0x100 /* rko: force callback even if - * op type is eventable. */ +#define RD_KAFKA_OP_F_FLEXVER \ + 0x40 /* rkbuf: flexible protocol version \ + * (KIP-482) */ +#define RD_KAFKA_OP_F_NEED_MAKE \ + 0x80 /* rkbuf: request content has not \ + * been made yet, the make \ + * callback will be triggered \ + * to construct the request \ + * right before it is sent. */ +#define RD_KAFKA_OP_F_FORCE_CB \ + 0x100 /* rko: force callback even if \ + * op type is eventable. */ typedef enum { - RD_KAFKA_OP_NONE, /* No specific type, use OP_CB */ - RD_KAFKA_OP_FETCH, /* Kafka thread -> Application */ - RD_KAFKA_OP_ERR, /* Kafka thread -> Application */ + RD_KAFKA_OP_NONE, /* No specific type, use OP_CB */ + RD_KAFKA_OP_FETCH, /* Kafka thread -> Application */ + RD_KAFKA_OP_ERR, /* Kafka thread -> Application */ RD_KAFKA_OP_CONSUMER_ERR, /* Kafka thread -> Application */ - RD_KAFKA_OP_DR, /* Kafka thread -> Application - * Produce message delivery report */ - RD_KAFKA_OP_STATS, /* Kafka thread -> Application */ + RD_KAFKA_OP_DR, /* Kafka thread -> Application + * Produce message delivery report */ + RD_KAFKA_OP_STATS, /* Kafka thread -> Application */ RD_KAFKA_OP_OFFSET_COMMIT, /* any -> toppar's Broker thread */ RD_KAFKA_OP_NODE_UPDATE, /* any -> Broker thread: node update */ RD_KAFKA_OP_XMIT_BUF, /* transmit buffer: any -> broker thread */ RD_KAFKA_OP_RECV_BUF, /* received response buffer: broker thr -> any */ - RD_KAFKA_OP_XMIT_RETRY, /* retry buffer xmit: any -> broker thread */ - RD_KAFKA_OP_FETCH_START, /* Application -> toppar's handler thread */ - RD_KAFKA_OP_FETCH_STOP, /* Application -> toppar's handler thread */ - RD_KAFKA_OP_SEEK, /* Application -> toppar's handler thread */ - RD_KAFKA_OP_PAUSE, /* Application -> toppar's handler thread */ + RD_KAFKA_OP_XMIT_RETRY, /* retry buffer xmit: any -> broker thread */ + RD_KAFKA_OP_FETCH_START, /* Application -> toppar's handler thread */ + RD_KAFKA_OP_FETCH_STOP, /* Application -> toppar's handler thread */ + RD_KAFKA_OP_SEEK, /* Application -> toppar's handler thread */ + RD_KAFKA_OP_PAUSE, /* Application -> toppar's handler thread */ RD_KAFKA_OP_OFFSET_FETCH, /* Broker -> broker thread: fetch offsets * for topic. */ - RD_KAFKA_OP_PARTITION_JOIN, /* * -> cgrp op: add toppar to cgrp - * * -> broker op: add toppar to broker */ - RD_KAFKA_OP_PARTITION_LEAVE, /* * -> cgrp op: remove toppar from cgrp - * * -> broker op: remove toppar from rkb*/ - RD_KAFKA_OP_REBALANCE, /* broker thread -> app: - * group rebalance */ - RD_KAFKA_OP_TERMINATE, /* For generic use */ - RD_KAFKA_OP_COORD_QUERY, /* Query for coordinator */ - RD_KAFKA_OP_SUBSCRIBE, /* New subscription */ - RD_KAFKA_OP_ASSIGN, /* New assignment */ - RD_KAFKA_OP_GET_SUBSCRIPTION,/* Get current subscription. - * Reuses u.subscribe */ - RD_KAFKA_OP_GET_ASSIGNMENT, /* Get current assignment. - * Reuses u.assign */ - RD_KAFKA_OP_THROTTLE, /* Throttle info */ - RD_KAFKA_OP_NAME, /* Request name */ - RD_KAFKA_OP_CG_METADATA, /**< Request consumer metadata */ - RD_KAFKA_OP_OFFSET_RESET, /* Offset reset */ - RD_KAFKA_OP_METADATA, /* Metadata response */ - RD_KAFKA_OP_LOG, /* Log */ - RD_KAFKA_OP_WAKEUP, /* Wake-up signaling */ - RD_KAFKA_OP_CREATETOPICS, /**< Admin: CreateTopics: u.admin_request*/ - RD_KAFKA_OP_DELETETOPICS, /**< Admin: DeleteTopics: u.admin_request*/ - RD_KAFKA_OP_CREATEPARTITIONS,/**< Admin: CreatePartitions: - * u.admin_request*/ + RD_KAFKA_OP_PARTITION_JOIN, /* * -> cgrp op: add toppar to cgrp + * * -> broker op: add toppar to broker */ + RD_KAFKA_OP_PARTITION_LEAVE, /* * -> cgrp op: remove toppar from cgrp + * * -> broker op: remove toppar from rkb*/ + RD_KAFKA_OP_REBALANCE, /* broker thread -> app: + * group rebalance */ + RD_KAFKA_OP_TERMINATE, /* For generic use */ + RD_KAFKA_OP_COORD_QUERY, /* Query for coordinator */ + RD_KAFKA_OP_SUBSCRIBE, /* New subscription */ + RD_KAFKA_OP_ASSIGN, /* New assignment */ + RD_KAFKA_OP_GET_SUBSCRIPTION, /* Get current subscription. + * Reuses u.subscribe */ + RD_KAFKA_OP_GET_ASSIGNMENT, /* Get current assignment. + * Reuses u.assign */ + RD_KAFKA_OP_THROTTLE, /* Throttle info */ + RD_KAFKA_OP_NAME, /* Request name */ + RD_KAFKA_OP_CG_METADATA, /**< Request consumer metadata */ + RD_KAFKA_OP_OFFSET_RESET, /* Offset reset */ + RD_KAFKA_OP_METADATA, /* Metadata response */ + RD_KAFKA_OP_LOG, /* Log */ + RD_KAFKA_OP_WAKEUP, /* Wake-up signaling */ + RD_KAFKA_OP_CREATETOPICS, /**< Admin: CreateTopics: u.admin_request*/ + RD_KAFKA_OP_DELETETOPICS, /**< Admin: DeleteTopics: u.admin_request*/ + RD_KAFKA_OP_CREATEPARTITIONS, /**< Admin: CreatePartitions: + * u.admin_request*/ RD_KAFKA_OP_ALTERCONFIGS, /**< Admin: AlterConfigs: u.admin_request*/ RD_KAFKA_OP_DESCRIBECONFIGS, /**< Admin: DescribeConfigs: * u.admin_request*/ @@ -133,24 +135,24 @@ typedef enum { RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS, /**< Admin: * DeleteConsumerGroupOffsets * u.admin_request */ - RD_KAFKA_OP_ADMIN_FANOUT, /**< Admin: fanout request */ - RD_KAFKA_OP_ADMIN_RESULT, /**< Admin API .._result_t */ - RD_KAFKA_OP_PURGE, /**< Purge queues */ - RD_KAFKA_OP_CONNECT, /**< Connect (to broker) */ - RD_KAFKA_OP_OAUTHBEARER_REFRESH, /**< Refresh OAUTHBEARER token */ - RD_KAFKA_OP_MOCK, /**< Mock cluster command */ - RD_KAFKA_OP_BROKER_MONITOR, /**< Broker state change */ - RD_KAFKA_OP_TXN, /**< Transaction command */ + RD_KAFKA_OP_ADMIN_FANOUT, /**< Admin: fanout request */ + RD_KAFKA_OP_ADMIN_RESULT, /**< Admin API .._result_t */ + RD_KAFKA_OP_PURGE, /**< Purge queues */ + RD_KAFKA_OP_CONNECT, /**< Connect (to broker) */ + RD_KAFKA_OP_OAUTHBEARER_REFRESH, /**< Refresh OAUTHBEARER token */ + RD_KAFKA_OP_MOCK, /**< Mock cluster command */ + RD_KAFKA_OP_BROKER_MONITOR, /**< Broker state change */ + RD_KAFKA_OP_TXN, /**< Transaction command */ RD_KAFKA_OP_GET_REBALANCE_PROTOCOL, /**< Get rebalance protocol */ - RD_KAFKA_OP_LEADERS, /**< Partition leader query */ - RD_KAFKA_OP_BARRIER, /**< Version barrier bump */ + RD_KAFKA_OP_LEADERS, /**< Partition leader query */ + RD_KAFKA_OP_BARRIER, /**< Version barrier bump */ RD_KAFKA_OP__END } rd_kafka_op_type_t; /* Flags used with op_type_t */ -#define RD_KAFKA_OP_CB (int)(1 << 29) /* Callback op. */ -#define RD_KAFKA_OP_REPLY (int)(1 << 30) /* Reply op. */ -#define RD_KAFKA_OP_FLAGMASK (RD_KAFKA_OP_CB | RD_KAFKA_OP_REPLY) +#define RD_KAFKA_OP_CB (int)(1 << 29) /* Callback op. */ +#define RD_KAFKA_OP_REPLY (int)(1 << 30) /* Reply op. */ +#define RD_KAFKA_OP_FLAGMASK (RD_KAFKA_OP_CB | RD_KAFKA_OP_REPLY) /** @@ -161,11 +163,11 @@ typedef enum { * facing queues (rk_rep, rkcg_q, etc). */ typedef enum { - RD_KAFKA_PRIO_NORMAL = 0, /* Normal bulk, messages, DRs, etc. */ - RD_KAFKA_PRIO_MEDIUM, /* Prioritize in front of bulk, - * still at some scale. e.g. logs, .. */ - RD_KAFKA_PRIO_HIGH, /* Small scale high priority */ - RD_KAFKA_PRIO_FLASH /* Micro scale, immediate delivery. */ + RD_KAFKA_PRIO_NORMAL = 0, /* Normal bulk, messages, DRs, etc. */ + RD_KAFKA_PRIO_MEDIUM, /* Prioritize in front of bulk, + * still at some scale. e.g. logs, .. */ + RD_KAFKA_PRIO_HIGH, /* Small scale high priority */ + RD_KAFKA_PRIO_FLASH /* Micro scale, immediate delivery. */ } rd_kafka_prio_t; @@ -192,73 +194,73 @@ typedef enum { * @brief Queue serve callback call type */ typedef enum { - RD_KAFKA_Q_CB_INVALID, /* dont use */ - RD_KAFKA_Q_CB_CALLBACK,/* trigger callback based on op */ - RD_KAFKA_Q_CB_RETURN, /* return op rather than trigger callback - * (if possible)*/ + RD_KAFKA_Q_CB_INVALID, /* dont use */ + RD_KAFKA_Q_CB_CALLBACK, /* trigger callback based on op */ + RD_KAFKA_Q_CB_RETURN, /* return op rather than trigger callback + * (if possible)*/ RD_KAFKA_Q_CB_FORCE_RETURN, /* return op, regardless of callback. */ - RD_KAFKA_Q_CB_EVENT /* like _Q_CB_RETURN but return event_t:ed op */ + RD_KAFKA_Q_CB_EVENT /* like _Q_CB_RETURN but return event_t:ed op */ } rd_kafka_q_cb_type_t; /** * @brief Queue serve callback * @remark See rd_kafka_op_res_t docs for return semantics. */ -typedef rd_kafka_op_res_t -(rd_kafka_q_serve_cb_t) (rd_kafka_t *rk, - struct rd_kafka_q_s *rkq, - struct rd_kafka_op_s *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque) - RD_WARN_UNUSED_RESULT; +typedef rd_kafka_op_res_t(rd_kafka_q_serve_cb_t)(rd_kafka_t *rk, + struct rd_kafka_q_s *rkq, + struct rd_kafka_op_s *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) + RD_WARN_UNUSED_RESULT; /** * @brief Enumerates the assign op sub-types. */ typedef enum { - RD_KAFKA_ASSIGN_METHOD_ASSIGN, /**< Absolute assign/unassign */ - RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN, /**< Incremental assign */ - RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN /**< Incremental unassign */ + RD_KAFKA_ASSIGN_METHOD_ASSIGN, /**< Absolute assign/unassign */ + RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN, /**< Incremental assign */ + RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN /**< Incremental unassign */ } rd_kafka_assign_method_t; /** * @brief Op callback type */ -typedef rd_kafka_op_res_t (rd_kafka_op_cb_t) (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - struct rd_kafka_op_s *rko) - RD_WARN_UNUSED_RESULT; +typedef rd_kafka_op_res_t(rd_kafka_op_cb_t)(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + struct rd_kafka_op_s *rko) + RD_WARN_UNUSED_RESULT; /* Forward declaration */ struct rd_kafka_admin_worker_cbs; struct rd_kafka_admin_fanout_worker_cbs; -#define RD_KAFKA_OP_TYPE_ASSERT(rko,type) \ +#define RD_KAFKA_OP_TYPE_ASSERT(rko, type) \ rd_assert(((rko)->rko_type & ~RD_KAFKA_OP_FLAGMASK) == (type)) struct rd_kafka_op_s { - TAILQ_ENTRY(rd_kafka_op_s) rko_link; + TAILQ_ENTRY(rd_kafka_op_s) rko_link; - rd_kafka_op_type_t rko_type; /* Internal op type */ - rd_kafka_event_type_t rko_evtype; - int rko_flags; /* See RD_KAFKA_OP_F_... above */ - int32_t rko_version; - rd_kafka_resp_err_t rko_err; - rd_kafka_error_t *rko_error; - int32_t rko_len; /* Depends on type, typically the - * message length. */ - rd_kafka_prio_t rko_prio; /**< In-queue priority. - * Higher value means higher prio*/ + rd_kafka_op_type_t rko_type; /* Internal op type */ + rd_kafka_event_type_t rko_evtype; + int rko_flags; /* See RD_KAFKA_OP_F_... above */ + int32_t rko_version; + rd_kafka_resp_err_t rko_err; + rd_kafka_error_t *rko_error; + int32_t rko_len; /* Depends on type, typically the + * message length. */ + rd_kafka_prio_t rko_prio; /**< In-queue priority. + * Higher value means higher prio*/ - rd_kafka_toppar_t *rko_rktp; + rd_kafka_toppar_t *rko_rktp; /* - * Generic fields - */ + * Generic fields + */ - /* Indicates request: enqueue reply on rko_replyq.q with .version. - * .q is refcounted. */ - rd_kafka_replyq_t rko_replyq; + /* Indicates request: enqueue reply on rko_replyq.q with .version. + * .q is refcounted. */ + rd_kafka_replyq_t rko_replyq; /* Original queue's op serve callback and opaque, if any. * Mainly used for forwarded queues to use the original queue's @@ -266,50 +268,50 @@ struct rd_kafka_op_s { rd_kafka_q_serve_cb_t *rko_serve; void *rko_serve_opaque; - rd_kafka_t *rko_rk; + rd_kafka_t *rko_rk; #if ENABLE_DEVEL - const char *rko_source; /**< Where op was created */ + const char *rko_source; /**< Where op was created */ #endif /* RD_KAFKA_OP_CB */ rd_kafka_op_cb_t *rko_op_cb; - union { - struct { - rd_kafka_buf_t *rkbuf; - rd_kafka_msg_t rkm; - int evidx; - } fetch; + union { + struct { + rd_kafka_buf_t *rkbuf; + rd_kafka_msg_t rkm; + int evidx; + } fetch; - struct { - rd_kafka_topic_partition_list_t *partitions; + struct { + rd_kafka_topic_partition_list_t *partitions; /** Require stable (txn-commited) offsets */ rd_bool_t require_stable; - int do_free; /* free .partitions on destroy() */ - } offset_fetch; - - struct { - rd_kafka_topic_partition_list_t *partitions; - void (*cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque); - void *opaque; - int silent_empty; /**< Fail silently if there are no - * offsets to commit. */ + int do_free; /* free .partitions on destroy() */ + } offset_fetch; + + struct { + rd_kafka_topic_partition_list_t *partitions; + void (*cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque); + void *opaque; + int silent_empty; /**< Fail silently if there are no + * offsets to commit. */ rd_ts_t ts_timeout; char *reason; - } offset_commit; + } offset_commit; - struct { - rd_kafka_topic_partition_list_t *topics; - } subscribe; /* also used for GET_SUBSCRIPTION */ + struct { + rd_kafka_topic_partition_list_t *topics; + } subscribe; /* also used for GET_SUBSCRIPTION */ - struct { - rd_kafka_topic_partition_list_t *partitions; + struct { + rd_kafka_topic_partition_list_t *partitions; rd_kafka_assign_method_t method; - } assign; /* also used for GET_ASSIGNMENT */ + } assign; /* also used for GET_ASSIGNMENT */ struct { rd_kafka_topic_partition_list_t *partitions; @@ -319,36 +321,36 @@ struct rd_kafka_op_s { const char *str; } rebalance_protocol; - struct { - char *str; - } name; + struct { + char *str; + } name; rd_kafka_consumer_group_metadata_t *cg_metadata; - struct { - int64_t offset; - char *errstr; - rd_kafka_msg_t rkm; + struct { + int64_t offset; + char *errstr; + rd_kafka_msg_t rkm; rd_kafka_topic_t *rkt; - int fatal; /**< This was a ERR__FATAL error that has - * been translated to the fatal error - * code. */ - } err; /* used for ERR and CONSUMER_ERR */ - - struct { - int throttle_time; - int32_t nodeid; - char *nodename; - } throttle; - - struct { - char *json; - size_t json_len; - } stats; - - struct { - rd_kafka_buf_t *rkbuf; - } xbuf; /* XMIT_BUF and RECV_BUF */ + int fatal; /**< This was a ERR__FATAL error that has + * been translated to the fatal error + * code. */ + } err; /* used for ERR and CONSUMER_ERR */ + + struct { + int throttle_time; + int32_t nodeid; + char *nodename; + } throttle; + + struct { + char *json; + size_t json_len; + } stats; + + struct { + rd_kafka_buf_t *rkbuf; + } xbuf; /* XMIT_BUF and RECV_BUF */ /* RD_KAFKA_OP_METADATA */ struct { @@ -357,46 +359,46 @@ struct rd_kafka_op_s { * metadata requests. */ } metadata; - struct { - rd_kafka_topic_t *rkt; - rd_kafka_msgq_t msgq; - rd_kafka_msgq_t msgq2; - int do_purge2; - } dr; - - struct { - int32_t nodeid; - char nodename[RD_KAFKA_NODENAME_SIZE]; - } node; - - struct { - int64_t offset; - char *reason; - } offset_reset; - - struct { - int64_t offset; - struct rd_kafka_cgrp_s *rkcg; - } fetch_start; /* reused for SEEK */ - - struct { - int pause; - int flag; - } pause; + struct { + rd_kafka_topic_t *rkt; + rd_kafka_msgq_t msgq; + rd_kafka_msgq_t msgq2; + int do_purge2; + } dr; + + struct { + int32_t nodeid; + char nodename[RD_KAFKA_NODENAME_SIZE]; + } node; + + struct { + int64_t offset; + char *reason; + } offset_reset; + + struct { + int64_t offset; + struct rd_kafka_cgrp_s *rkcg; + } fetch_start; /* reused for SEEK */ + + struct { + int pause; + int flag; + } pause; struct { char fac[64]; - int level; + int level; char *str; - int ctx; + int ctx; } log; struct { - rd_kafka_AdminOptions_t options; /**< Copy of user's - * options */ - rd_ts_t abs_timeout; /**< Absolute timeout - * for this request. */ - rd_kafka_timer_t tmr; /**< Timeout timer */ + rd_kafka_AdminOptions_t options; /**< Copy of user's + * options */ + rd_ts_t abs_timeout; /**< Absolute timeout + * for this request. */ + rd_kafka_timer_t tmr; /**< Timeout timer */ struct rd_kafka_enq_once_s *eonce; /**< Enqueue op * only once, * used to @@ -408,9 +410,10 @@ struct rd_kafka_op_s { * controller, or * due to .tmr * timeout. */ - rd_list_t args;/**< Type depends on request, e.g. - * rd_kafka_NewTopic_t for CreateTopics - */ + rd_list_t + args; /**< Type depends on request, e.g. + * rd_kafka_NewTopic_t for CreateTopics + */ rd_kafka_buf_t *reply_buf; /**< Protocol reply, * temporary reference not @@ -420,13 +423,12 @@ struct rd_kafka_op_s { struct rd_kafka_admin_worker_cbs *cbs; /** Worker state */ - enum { - RD_KAFKA_ADMIN_STATE_INIT, - RD_KAFKA_ADMIN_STATE_WAIT_BROKER, - RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER, - RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS, - RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST, - RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE, + enum { RD_KAFKA_ADMIN_STATE_INIT, + RD_KAFKA_ADMIN_STATE_WAIT_BROKER, + RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER, + RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS, + RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST, + RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE, } state; int32_t broker_id; /**< Requested broker id to @@ -488,8 +490,8 @@ struct rd_kafka_op_s { * Type depends on request. */ - char *errstr; /**< Error string, if rko_err - * is set, else NULL. */ + char *errstr; /**< Error string, if rko_err + * is set, else NULL. */ rd_list_t results; /**< Type depends on request type: * @@ -501,9 +503,9 @@ struct rd_kafka_op_s { * AlterConfigs, DescribeConfigs */ - void *opaque; /**< Application's opaque as set by - * rd_kafka_AdminOptions_set_opaque - */ + void *opaque; /**< Application's opaque as set by + * rd_kafka_AdminOptions_set_opaque + */ /** A reference to the parent ADMIN_FANOUT op that * spawned this op, if applicable. NULL otherwise. */ @@ -516,17 +518,16 @@ struct rd_kafka_op_s { /**< Mock cluster command */ struct { - enum { - RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR, - RD_KAFKA_MOCK_CMD_TOPIC_CREATE, - RD_KAFKA_MOCK_CMD_PART_SET_LEADER, - RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER, - RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS, - RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN, - RD_KAFKA_MOCK_CMD_BROKER_SET_RTT, - RD_KAFKA_MOCK_CMD_BROKER_SET_RACK, - RD_KAFKA_MOCK_CMD_COORD_SET, - RD_KAFKA_MOCK_CMD_APIVERSION_SET, + enum { RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR, + RD_KAFKA_MOCK_CMD_TOPIC_CREATE, + RD_KAFKA_MOCK_CMD_PART_SET_LEADER, + RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER, + RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS, + RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN, + RD_KAFKA_MOCK_CMD_BROKER_SET_RTT, + RD_KAFKA_MOCK_CMD_BROKER_SET_RACK, + RD_KAFKA_MOCK_CMD_COORD_SET, + RD_KAFKA_MOCK_CMD_APIVERSION_SET, } cmd; rd_kafka_resp_err_t err; /**< Error for: @@ -570,7 +571,7 @@ struct rd_kafka_op_s { struct rd_kafka_broker_s *rkb; /**< Broker who's state * changed. */ /**< Callback to trigger on the op handler's thread. */ - void (*cb) (struct rd_kafka_broker_s *rkb); + void (*cb)(struct rd_kafka_broker_s *rkb); } broker_monitor; struct { @@ -578,7 +579,7 @@ struct rd_kafka_op_s { rd_kafka_consumer_group_metadata_t *cgmetadata; /** Consumer group id for AddOffsetsTo.. */ char *group_id; - int timeout_ms; /**< Operation timeout */ + int timeout_ms; /**< Operation timeout */ rd_ts_t abs_timeout; /**< Absolute time */ /**< Offsets to commit */ rd_kafka_topic_partition_list_t *offsets; @@ -633,97 +634,101 @@ TAILQ_HEAD(rd_kafka_op_head_s, rd_kafka_op_s); - -const char *rd_kafka_op2str (rd_kafka_op_type_t type); -void rd_kafka_op_destroy (rd_kafka_op_t *rko); -rd_kafka_op_t *rd_kafka_op_new0 (const char *source, rd_kafka_op_type_t type); +const char *rd_kafka_op2str(rd_kafka_op_type_t type); +void rd_kafka_op_destroy(rd_kafka_op_t *rko); +rd_kafka_op_t *rd_kafka_op_new0(const char *source, rd_kafka_op_type_t type); #if ENABLE_DEVEL #define _STRINGIFYX(A) #A -#define _STRINGIFY(A) _STRINGIFYX(A) -#define rd_kafka_op_new(type) \ +#define _STRINGIFY(A) _STRINGIFYX(A) +#define rd_kafka_op_new(type) \ rd_kafka_op_new0(__FILE__ ":" _STRINGIFY(__LINE__), type) #else #define rd_kafka_op_new(type) rd_kafka_op_new0(NULL, type) #endif -rd_kafka_op_t *rd_kafka_op_new_reply (rd_kafka_op_t *rko_orig, - rd_kafka_resp_err_t err); -rd_kafka_op_t *rd_kafka_op_new_cb (rd_kafka_t *rk, - rd_kafka_op_type_t type, - rd_kafka_op_cb_t *cb); -int rd_kafka_op_reply (rd_kafka_op_t *rko, - rd_kafka_resp_err_t err); -int rd_kafka_op_error_reply (rd_kafka_op_t *rko, - rd_kafka_error_t *error); - -#define rd_kafka_op_set_prio(rko,prio) ((rko)->rko_prio = prio) - -#define rd_kafka_op_err(rk,err,...) do { \ - if (!((rk)->rk_conf.enabled_events & RD_KAFKA_EVENT_ERROR)) { \ - rd_kafka_log(rk, LOG_ERR, "ERROR", __VA_ARGS__); \ - break; \ - } \ - rd_kafka_q_op_err((rk)->rk_rep, err, __VA_ARGS__); \ - } while (0) - -void rd_kafka_q_op_err (rd_kafka_q_t *rkq, rd_kafka_resp_err_t err, - const char *fmt, ...) - RD_FORMAT(printf, 3, 4); -void rd_kafka_consumer_err (rd_kafka_q_t *rkq, int32_t broker_id, - rd_kafka_resp_err_t err, int32_t version, - const char *topic, rd_kafka_toppar_t *rktp, - int64_t offset, const char *fmt, ...) - RD_FORMAT(printf, 8, 9); -rd_kafka_op_t *rd_kafka_op_req0 (rd_kafka_q_t *destq, - rd_kafka_q_t *recvq, - rd_kafka_op_t *rko, - int timeout_ms); -rd_kafka_op_t *rd_kafka_op_req (rd_kafka_q_t *destq, - rd_kafka_op_t *rko, - int timeout_ms); -rd_kafka_op_t *rd_kafka_op_req2 (rd_kafka_q_t *destq, rd_kafka_op_type_t type); -rd_kafka_resp_err_t rd_kafka_op_err_destroy (rd_kafka_op_t *rko); -rd_kafka_error_t *rd_kafka_op_error_destroy (rd_kafka_op_t *rko); - -rd_kafka_op_res_t rd_kafka_op_call (rd_kafka_t *rk, - rd_kafka_q_t *rkq, rd_kafka_op_t *rko) - RD_WARN_UNUSED_RESULT; - -rd_kafka_op_t * -rd_kafka_op_new_fetch_msg (rd_kafka_msg_t **rkmp, - rd_kafka_toppar_t *rktp, +rd_kafka_op_t *rd_kafka_op_new_reply(rd_kafka_op_t *rko_orig, + rd_kafka_resp_err_t err); +rd_kafka_op_t *rd_kafka_op_new_cb(rd_kafka_t *rk, + rd_kafka_op_type_t type, + rd_kafka_op_cb_t *cb); +int rd_kafka_op_reply(rd_kafka_op_t *rko, rd_kafka_resp_err_t err); +int rd_kafka_op_error_reply(rd_kafka_op_t *rko, rd_kafka_error_t *error); + +#define rd_kafka_op_set_prio(rko, prio) ((rko)->rko_prio = prio) + +#define rd_kafka_op_err(rk, err, ...) \ + do { \ + if (!((rk)->rk_conf.enabled_events & RD_KAFKA_EVENT_ERROR)) { \ + rd_kafka_log(rk, LOG_ERR, "ERROR", __VA_ARGS__); \ + break; \ + } \ + rd_kafka_q_op_err((rk)->rk_rep, err, __VA_ARGS__); \ + } while (0) + +void rd_kafka_q_op_err(rd_kafka_q_t *rkq, + rd_kafka_resp_err_t err, + const char *fmt, + ...) RD_FORMAT(printf, 3, 4); +void rd_kafka_consumer_err(rd_kafka_q_t *rkq, + int32_t broker_id, + rd_kafka_resp_err_t err, int32_t version, - rd_kafka_buf_t *rkbuf, + const char *topic, + rd_kafka_toppar_t *rktp, int64_t offset, - size_t key_len, const void *key, - size_t val_len, const void *val); - + const char *fmt, + ...) RD_FORMAT(printf, 8, 9); +rd_kafka_op_t *rd_kafka_op_req0(rd_kafka_q_t *destq, + rd_kafka_q_t *recvq, + rd_kafka_op_t *rko, + int timeout_ms); rd_kafka_op_t * -rd_kafka_op_new_ctrl_msg (rd_kafka_toppar_t *rktp, - int32_t version, - rd_kafka_buf_t *rkbuf, - int64_t offset); - -void rd_kafka_op_throttle_time (struct rd_kafka_broker_s *rkb, - rd_kafka_q_t *rkq, - int throttle_time); +rd_kafka_op_req(rd_kafka_q_t *destq, rd_kafka_op_t *rko, int timeout_ms); +rd_kafka_op_t *rd_kafka_op_req2(rd_kafka_q_t *destq, rd_kafka_op_type_t type); +rd_kafka_resp_err_t rd_kafka_op_err_destroy(rd_kafka_op_t *rko); +rd_kafka_error_t *rd_kafka_op_error_destroy(rd_kafka_op_t *rko); + +rd_kafka_op_res_t rd_kafka_op_call(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) RD_WARN_UNUSED_RESULT; + +rd_kafka_op_t *rd_kafka_op_new_fetch_msg(rd_kafka_msg_t **rkmp, + rd_kafka_toppar_t *rktp, + int32_t version, + rd_kafka_buf_t *rkbuf, + int64_t offset, + size_t key_len, + const void *key, + size_t val_len, + const void *val); + +rd_kafka_op_t *rd_kafka_op_new_ctrl_msg(rd_kafka_toppar_t *rktp, + int32_t version, + rd_kafka_buf_t *rkbuf, + int64_t offset); + +void rd_kafka_op_throttle_time(struct rd_kafka_broker_s *rkb, + rd_kafka_q_t *rkq, + int throttle_time); rd_kafka_op_res_t -rd_kafka_op_handle (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque, - rd_kafka_q_serve_cb_t *callback) RD_WARN_UNUSED_RESULT; +rd_kafka_op_handle(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque, + rd_kafka_q_serve_cb_t *callback) RD_WARN_UNUSED_RESULT; extern rd_atomic32_t rd_kafka_op_cnt; -void rd_kafka_op_print (FILE *fp, const char *prefix, rd_kafka_op_t *rko); +void rd_kafka_op_print(FILE *fp, const char *prefix, rd_kafka_op_t *rko); -void rd_kafka_op_offset_store (rd_kafka_t *rk, rd_kafka_op_t *rko); +void rd_kafka_op_offset_store(rd_kafka_t *rk, rd_kafka_op_t *rko); -#define rd_kafka_op_is_ctrl_msg(rko) \ - ((rko)->rko_type == RD_KAFKA_OP_FETCH && \ - !(rko)->rko_err && \ +#define rd_kafka_op_is_ctrl_msg(rko) \ + ((rko)->rko_type == RD_KAFKA_OP_FETCH && !(rko)->rko_err && \ ((rko)->rko_u.fetch.rkm.rkm_flags & RD_KAFKA_MSG_F_CONTROL)) @@ -732,8 +737,8 @@ void rd_kafka_op_offset_store (rd_kafka_t *rk, rd_kafka_op_t *rko); * @returns true if the rko's replyq is valid and the * rko's rktp version (if any) is not outdated. */ -#define rd_kafka_op_replyq_is_valid(RKO) \ - (rd_kafka_replyq_is_valid(&(RKO)->rko_replyq) && \ +#define rd_kafka_op_replyq_is_valid(RKO) \ + (rd_kafka_replyq_is_valid(&(RKO)->rko_replyq) && \ !rd_kafka_op_version_outdated((RKO), 0)) #endif /* _RDKAFKA_OP_H_ */ diff --git a/src/rdkafka_partition.c b/src/rdkafka_partition.c index d0cfdd2004..d86f6dd5f6 100644 --- a/src/rdkafka_partition.c +++ b/src/rdkafka_partition.c @@ -32,55 +32,51 @@ #include "rdkafka_offset.h" #include "rdkafka_partition.h" #include "rdregex.h" -#include "rdports.h" /* rd_qsort_r() */ +#include "rdports.h" /* rd_qsort_r() */ #include "rdunittest.h" -const char *rd_kafka_fetch_states[] = { - "none", - "stopping", - "stopped", - "offset-query", - "offset-wait", - "active" -}; +const char *rd_kafka_fetch_states[] = {"none", "stopping", "stopped", + "offset-query", "offset-wait", "active"}; -static rd_kafka_op_res_t -rd_kafka_toppar_op_serve (rd_kafka_t *rk, - rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque); +static rd_kafka_op_res_t rd_kafka_toppar_op_serve(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque); -static void rd_kafka_toppar_offset_retry (rd_kafka_toppar_t *rktp, - int backoff_ms, - const char *reason); +static void rd_kafka_toppar_offset_retry(rd_kafka_toppar_t *rktp, + int backoff_ms, + const char *reason); static RD_INLINE int32_t -rd_kafka_toppar_version_new_barrier0 (rd_kafka_toppar_t *rktp, - const char *func, int line) { - int32_t version = rd_atomic32_add(&rktp->rktp_version, 1); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BARRIER", - "%s [%"PRId32"]: %s:%d: new version barrier v%"PRId32, - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - func, line, version); - return version; +rd_kafka_toppar_version_new_barrier0(rd_kafka_toppar_t *rktp, + const char *func, + int line) { + int32_t version = rd_atomic32_add(&rktp->rktp_version, 1); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BARRIER", + "%s [%" PRId32 "]: %s:%d: new version barrier v%" PRId32, + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, func, + line, version); + return version; } -#define rd_kafka_toppar_version_new_barrier(rktp) \ - rd_kafka_toppar_version_new_barrier0(rktp, __FUNCTION__, __LINE__) +#define rd_kafka_toppar_version_new_barrier(rktp) \ + rd_kafka_toppar_version_new_barrier0(rktp, __FUNCTION__, __LINE__) /** * Toppar based OffsetResponse handling. * This is used for updating the low water mark for consumer lag. */ -static void rd_kafka_toppar_lag_handle_Offset (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_toppar_lag_handle_Offset(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { rd_kafka_toppar_t *rktp = opaque; rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_t *rktpar; @@ -88,8 +84,8 @@ static void rd_kafka_toppar_lag_handle_Offset (rd_kafka_t *rk, offsets = rd_kafka_topic_partition_list_new(1); /* Parse and return Offset */ - err = rd_kafka_handle_ListOffsets(rk, rkb, err, - rkbuf, request, offsets, NULL); + err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, offsets, + NULL); if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) { rd_kafka_topic_partition_list_destroy(offsets); @@ -97,9 +93,8 @@ static void rd_kafka_toppar_lag_handle_Offset (rd_kafka_t *rk, } if (!err && !(rktpar = rd_kafka_topic_partition_list_find( - offsets, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition))) + offsets, rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition))) err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; if (!err && !rktpar->err) { @@ -123,7 +118,7 @@ static void rd_kafka_toppar_lag_handle_Offset (rd_kafka_t *rk, * @locality toppar handle thread * @locks none */ -static void rd_kafka_toppar_consumer_lag_req (rd_kafka_toppar_t *rktp) { +static void rd_kafka_toppar_consumer_lag_req(rd_kafka_toppar_t *rktp) { rd_kafka_topic_partition_list_t *partitions; if (rktp->rktp_wait_consumer_lag_resp) @@ -139,16 +134,15 @@ static void rd_kafka_toppar_consumer_lag_req (rd_kafka_toppar_t *rktp) { */ if (!rktp->rktp_leader || (rktp->rktp_leader != rktp->rktp_broker)) { rd_kafka_toppar_unlock(rktp); - return; + return; } /* Also don't send a timed log start offset request if leader * broker supports FETCH >= v5, since this will be set when * doing fetch requests. */ - if (rd_kafka_broker_ApiVersion_supported(rktp->rktp_broker, - RD_KAFKAP_Fetch, 0, - 5, NULL) == 5) { + if (rd_kafka_broker_ApiVersion_supported( + rktp->rktp_broker, RD_KAFKAP_Fetch, 0, 5, NULL) == 5) { rd_kafka_toppar_unlock(rktp); return; } @@ -156,17 +150,15 @@ static void rd_kafka_toppar_consumer_lag_req (rd_kafka_toppar_t *rktp) { rktp->rktp_wait_consumer_lag_resp = 1; partitions = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(partitions, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition)->offset = - RD_KAFKA_OFFSET_BEGINNING; + rd_kafka_topic_partition_list_add( + partitions, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition) + ->offset = RD_KAFKA_OFFSET_BEGINNING; /* Ask for oldest offset. The newest offset is automatically * propagated in FetchResponse.HighwaterMark. */ - rd_kafka_ListOffsetsRequest(rktp->rktp_broker, partitions, - RD_KAFKA_REPLYQ(rktp->rktp_ops, 0), - rd_kafka_toppar_lag_handle_Offset, - rd_kafka_toppar_keep(rktp)); + rd_kafka_ListOffsetsRequest( + rktp->rktp_broker, partitions, RD_KAFKA_REPLYQ(rktp->rktp_ops, 0), + rd_kafka_toppar_lag_handle_Offset, rd_kafka_toppar_keep(rktp)); rd_kafka_toppar_unlock(rktp); @@ -180,10 +172,10 @@ static void rd_kafka_toppar_consumer_lag_req (rd_kafka_toppar_t *rktp) { * * Locality: toppar handler thread */ -static void rd_kafka_toppar_consumer_lag_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { - rd_kafka_toppar_t *rktp = arg; - rd_kafka_toppar_consumer_lag_req(rktp); +static void rd_kafka_toppar_consumer_lag_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_toppar_t *rktp = arg; + rd_kafka_toppar_consumer_lag_req(rktp); } /** @@ -194,13 +186,12 @@ static void rd_kafka_toppar_consumer_lag_tmr_cb (rd_kafka_timers_t *rkts, * @locks_required rd_kafka_toppar_lock() must be held. * @locality Toppar handler thread */ -void rd_kafka_toppar_op_version_bump (rd_kafka_toppar_t *rktp, - int32_t version) { +void rd_kafka_toppar_op_version_bump(rd_kafka_toppar_t *rktp, int32_t version) { rd_kafka_op_t *rko; rktp->rktp_op_version = version; - rko = rd_kafka_op_new(RD_KAFKA_OP_BARRIER); - rko->rko_version = version; + rko = rd_kafka_op_new(RD_KAFKA_OP_BARRIER); + rko->rko_version = version; rd_kafka_q_enq(rktp->rktp_fetchq, rko); } @@ -211,15 +202,16 @@ void rd_kafka_toppar_op_version_bump (rd_kafka_toppar_t *rktp, * Locks: rd_kafka_topic_wrlock() must be held. * Locks: rd_kafka_wrlock() must be held. */ -rd_kafka_toppar_t *rd_kafka_toppar_new0 (rd_kafka_topic_t *rkt, - int32_t partition, - const char *func, int line) { - rd_kafka_toppar_t *rktp; +rd_kafka_toppar_t *rd_kafka_toppar_new0(rd_kafka_topic_t *rkt, + int32_t partition, + const char *func, + int line) { + rd_kafka_toppar_t *rktp; - rktp = rd_calloc(1, sizeof(*rktp)); + rktp = rd_calloc(1, sizeof(*rktp)); - rktp->rktp_partition = partition; - rktp->rktp_rkt = rkt; + rktp->rktp_partition = partition; + rktp->rktp_rkt = rkt; rktp->rktp_leader_id = -1; rktp->rktp_broker_id = -1; rd_interval_init(&rktp->rktp_lease_intvl); @@ -230,33 +222,33 @@ rd_kafka_toppar_t *rd_kafka_toppar_new0 (rd_kafka_topic_t *rkt, * partition in topic metadata. */ if (partition != RD_KAFKA_PARTITION_UA) rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_UNKNOWN; - rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_NONE; - rktp->rktp_fetch_msg_max_bytes - = rkt->rkt_rk->rk_conf.fetch_msg_max_bytes; - rktp->rktp_offset_fp = NULL; + rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_NONE; + rktp->rktp_fetch_msg_max_bytes = + rkt->rkt_rk->rk_conf.fetch_msg_max_bytes; + rktp->rktp_offset_fp = NULL; rd_kafka_offset_stats_reset(&rktp->rktp_offsets); rd_kafka_offset_stats_reset(&rktp->rktp_offsets_fin); - rktp->rktp_ls_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_hi_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_lo_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_query_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_next_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_last_next_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_app_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_stored_offset = RD_KAFKA_OFFSET_INVALID; + rktp->rktp_ls_offset = RD_KAFKA_OFFSET_INVALID; + rktp->rktp_hi_offset = RD_KAFKA_OFFSET_INVALID; + rktp->rktp_lo_offset = RD_KAFKA_OFFSET_INVALID; + rktp->rktp_query_offset = RD_KAFKA_OFFSET_INVALID; + rktp->rktp_next_offset = RD_KAFKA_OFFSET_INVALID; + rktp->rktp_last_next_offset = RD_KAFKA_OFFSET_INVALID; + rktp->rktp_app_offset = RD_KAFKA_OFFSET_INVALID; + rktp->rktp_stored_offset = RD_KAFKA_OFFSET_INVALID; rktp->rktp_committing_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID; - rd_kafka_msgq_init(&rktp->rktp_msgq); - rd_kafka_msgq_init(&rktp->rktp_xmit_msgq); - mtx_init(&rktp->rktp_lock, mtx_plain); + rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID; + rd_kafka_msgq_init(&rktp->rktp_msgq); + rd_kafka_msgq_init(&rktp->rktp_xmit_msgq); + mtx_init(&rktp->rktp_lock, mtx_plain); rd_refcnt_init(&rktp->rktp_refcnt, 0); - rktp->rktp_fetchq = rd_kafka_q_new(rkt->rkt_rk); - rktp->rktp_ops = rd_kafka_q_new(rkt->rkt_rk); - rktp->rktp_ops->rkq_serve = rd_kafka_toppar_op_serve; + rktp->rktp_fetchq = rd_kafka_q_new(rkt->rkt_rk); + rktp->rktp_ops = rd_kafka_q_new(rkt->rkt_rk); + rktp->rktp_ops->rkq_serve = rd_kafka_toppar_op_serve; rktp->rktp_ops->rkq_opaque = rktp; rd_atomic32_init(&rktp->rktp_version, 1); - rktp->rktp_op_version = rd_atomic32_get(&rktp->rktp_version); + rktp->rktp_op_version = rd_atomic32_get(&rktp->rktp_version); rd_atomic32_init(&rktp->rktp_msgs_inflight, 0); rd_kafka_pid_reset(&rktp->rktp_eos.pid); @@ -276,23 +268,20 @@ rd_kafka_toppar_t *rd_kafka_toppar_new0 (rd_kafka_topic_t *rkt, int intvl = rkt->rkt_rk->rk_conf.stats_interval_ms; if (intvl < 10 * 1000 /* 10s */) intvl = 10 * 1000; - rd_kafka_timer_start(&rkt->rkt_rk->rk_timers, - &rktp->rktp_consumer_lag_tmr, - intvl * 1000ll, - rd_kafka_toppar_consumer_lag_tmr_cb, - rktp); + rd_kafka_timer_start( + &rkt->rkt_rk->rk_timers, &rktp->rktp_consumer_lag_tmr, + intvl * 1000ll, rd_kafka_toppar_consumer_lag_tmr_cb, rktp); } rktp->rktp_rkt = rd_kafka_topic_keep(rkt); - rd_kafka_q_fwd_set(rktp->rktp_ops, rkt->rkt_rk->rk_ops); + rd_kafka_q_fwd_set(rktp->rktp_ops, rkt->rkt_rk->rk_ops); rd_kafka_dbg(rkt->rkt_rk, TOPIC, "TOPPARNEW", - "NEW %s [%"PRId32"] %p refcnt %p (at %s:%d)", + "NEW %s [%" PRId32 "] %p refcnt %p (at %s:%d)", rkt->rkt_topic->str, rktp->rktp_partition, rktp, - &rktp->rktp_refcnt, - func, line); + &rktp->rktp_refcnt, func, line); - return rd_kafka_toppar_keep(rktp); + return rd_kafka_toppar_keep(rktp); } @@ -302,53 +291,53 @@ rd_kafka_toppar_t *rd_kafka_toppar_new0 (rd_kafka_topic_t *rkt, * * Locks: rd_kafka_toppar_lock() MUST be held */ -static void rd_kafka_toppar_remove (rd_kafka_toppar_t *rktp) { +static void rd_kafka_toppar_remove(rd_kafka_toppar_t *rktp) { rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARREMOVE", - "Removing toppar %s [%"PRId32"] %p", + "Removing toppar %s [%" PRId32 "] %p", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rktp); + rktp); - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_query_tmr, 1/*lock*/); - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_consumer_lag_tmr, 1/*lock*/); + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_query_tmr, 1 /*lock*/); + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_consumer_lag_tmr, 1 /*lock*/); - rd_kafka_q_fwd_set(rktp->rktp_ops, NULL); + rd_kafka_q_fwd_set(rktp->rktp_ops, NULL); } /** * Final destructor for partition. */ -void rd_kafka_toppar_destroy_final (rd_kafka_toppar_t *rktp) { +void rd_kafka_toppar_destroy_final(rd_kafka_toppar_t *rktp) { rd_kafka_toppar_remove(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESTROY", - "%s [%"PRId32"]: %p DESTROY_FINAL", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, rktp); - - /* Clear queues */ - rd_kafka_assert(rktp->rktp_rkt->rkt_rk, - rd_kafka_msgq_len(&rktp->rktp_xmit_msgq) == 0); - rd_kafka_dr_msgq(rktp->rktp_rkt, &rktp->rktp_msgq, - RD_KAFKA_RESP_ERR__DESTROY); - rd_kafka_q_destroy_owner(rktp->rktp_fetchq); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESTROY", + "%s [%" PRId32 "]: %p DESTROY_FINAL", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rktp); + + /* Clear queues */ + rd_kafka_assert(rktp->rktp_rkt->rkt_rk, + rd_kafka_msgq_len(&rktp->rktp_xmit_msgq) == 0); + rd_kafka_dr_msgq(rktp->rktp_rkt, &rktp->rktp_msgq, + RD_KAFKA_RESP_ERR__DESTROY); + rd_kafka_q_destroy_owner(rktp->rktp_fetchq); rd_kafka_q_destroy_owner(rktp->rktp_ops); - rd_kafka_replyq_destroy(&rktp->rktp_replyq); + rd_kafka_replyq_destroy(&rktp->rktp_replyq); - rd_kafka_topic_destroy0(rktp->rktp_rkt); + rd_kafka_topic_destroy0(rktp->rktp_rkt); - mtx_destroy(&rktp->rktp_lock); + mtx_destroy(&rktp->rktp_lock); if (rktp->rktp_leader) rd_kafka_broker_destroy(rktp->rktp_leader); rd_refcnt_destroy(&rktp->rktp_refcnt); - rd_free(rktp); + rd_free(rktp); } @@ -358,28 +347,27 @@ void rd_kafka_toppar_destroy_final (rd_kafka_toppar_t *rktp) { * Locality: broker thread * Locks: rd_kafka_toppar_lock() MUST be held. */ -void rd_kafka_toppar_set_fetch_state (rd_kafka_toppar_t *rktp, - int fetch_state) { - rd_kafka_assert(NULL, - thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)); +void rd_kafka_toppar_set_fetch_state(rd_kafka_toppar_t *rktp, int fetch_state) { + rd_kafka_assert(NULL, + thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)); if ((int)rktp->rktp_fetch_state == fetch_state) return; - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "PARTSTATE", - "Partition %.*s [%"PRId32"] changed fetch state %s -> %s", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_fetch_states[rktp->rktp_fetch_state], - rd_kafka_fetch_states[fetch_state]); + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC, "PARTSTATE", + "Partition %.*s [%" PRId32 "] changed fetch state %s -> %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, + rd_kafka_fetch_states[rktp->rktp_fetch_state], + rd_kafka_fetch_states[fetch_state]); rktp->rktp_fetch_state = fetch_state; if (fetch_state == RD_KAFKA_TOPPAR_FETCH_ACTIVE) rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, - CONSUMER|RD_KAFKA_DBG_TOPIC, - "FETCH", - "Partition %.*s [%"PRId32"] start fetching " + CONSUMER | RD_KAFKA_DBG_TOPIC, "FETCH", + "Partition %.*s [%" PRId32 + "] start fetching " "at offset %s", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, @@ -398,23 +386,24 @@ void rd_kafka_toppar_set_fetch_state (rd_kafka_toppar_t *rktp, * * Locks: Caller must hold rd_kafka_topic_*lock() */ -rd_kafka_toppar_t *rd_kafka_toppar_get0 (const char *func, int line, - const rd_kafka_topic_t *rkt, - int32_t partition, - int ua_on_miss) { +rd_kafka_toppar_t *rd_kafka_toppar_get0(const char *func, + int line, + const rd_kafka_topic_t *rkt, + int32_t partition, + int ua_on_miss) { rd_kafka_toppar_t *rktp; - if (partition >= 0 && partition < rkt->rkt_partition_cnt) - rktp = rkt->rkt_p[partition]; - else if (partition == RD_KAFKA_PARTITION_UA || ua_on_miss) - rktp = rkt->rkt_ua; - else - return NULL; + if (partition >= 0 && partition < rkt->rkt_partition_cnt) + rktp = rkt->rkt_p[partition]; + else if (partition == RD_KAFKA_PARTITION_UA || ua_on_miss) + rktp = rkt->rkt_ua; + else + return NULL; - if (rktp) + if (rktp) return rd_kafka_toppar_keep_fl(func, line, rktp); - return NULL; + return NULL; } @@ -425,24 +414,23 @@ rd_kafka_toppar_t *rd_kafka_toppar_get0 (const char *func, int line, * Locality: any * Locks: none */ -rd_kafka_toppar_t *rd_kafka_toppar_get2 (rd_kafka_t *rk, - const char *topic, - int32_t partition, - int ua_on_miss, - int create_on_miss) { +rd_kafka_toppar_t *rd_kafka_toppar_get2(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int ua_on_miss, + int create_on_miss) { rd_kafka_topic_t *rkt; rd_kafka_toppar_t *rktp; rd_kafka_wrlock(rk); /* Find or create topic */ - if (unlikely(!(rkt = rd_kafka_topic_find(rk, topic, 0/*no-lock*/)))) { + if (unlikely(!(rkt = rd_kafka_topic_find(rk, topic, 0 /*no-lock*/)))) { if (!create_on_miss) { rd_kafka_wrunlock(rk); return NULL; } - rkt = rd_kafka_topic_new0(rk, topic, NULL, - NULL, 0/*no-lock*/); + rkt = rd_kafka_topic_new0(rk, topic, NULL, NULL, 0 /*no-lock*/); if (!rkt) { rd_kafka_wrunlock(rk); rd_kafka_log(rk, LOG_ERR, "TOPIC", @@ -454,13 +442,13 @@ rd_kafka_toppar_t *rd_kafka_toppar_get2 (rd_kafka_t *rk, rd_kafka_wrunlock(rk); - rd_kafka_topic_wrlock(rkt); - rktp = rd_kafka_toppar_desired_add(rkt, partition); - rd_kafka_topic_wrunlock(rkt); + rd_kafka_topic_wrlock(rkt); + rktp = rd_kafka_toppar_desired_add(rkt, partition); + rd_kafka_topic_wrunlock(rkt); rd_kafka_topic_destroy0(rkt); - return rktp; + return rktp; } @@ -470,19 +458,18 @@ rd_kafka_toppar_t *rd_kafka_toppar_get2 (rd_kafka_t *rk, * * Locks: topic_*lock() MUST be held */ -rd_kafka_toppar_t * -rd_kafka_toppar_get_avail (const rd_kafka_topic_t *rkt, - int32_t partition, int ua_on_miss, - rd_kafka_resp_err_t *errp) { - rd_kafka_toppar_t *rktp; +rd_kafka_toppar_t *rd_kafka_toppar_get_avail(const rd_kafka_topic_t *rkt, + int32_t partition, + int ua_on_miss, + rd_kafka_resp_err_t *errp) { + rd_kafka_toppar_t *rktp; - switch (rkt->rkt_state) - { + switch (rkt->rkt_state) { case RD_KAFKA_TOPIC_S_UNKNOWN: /* No metadata received from cluster yet. * Put message in UA partition and re-run partitioner when * cluster comes up. */ - partition = RD_KAFKA_PARTITION_UA; + partition = RD_KAFKA_PARTITION_UA; break; case RD_KAFKA_TOPIC_S_NOTEXISTS: @@ -519,20 +506,20 @@ rd_kafka_toppar_get_avail (const rd_kafka_topic_t *rkt, break; } - /* Get new partition */ - rktp = rd_kafka_toppar_get(rkt, partition, 0); + /* Get new partition */ + rktp = rd_kafka_toppar_get(rkt, partition, 0); - if (unlikely(!rktp)) { - /* Unknown topic or partition */ - if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) - *errp = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; - else - *errp = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + if (unlikely(!rktp)) { + /* Unknown topic or partition */ + if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) + *errp = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; + else + *errp = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - return NULL; - } + return NULL; + } - return rktp; + return rktp; } @@ -550,17 +537,17 @@ rd_kafka_toppar_get_avail (const rd_kafka_topic_t *rkt, * Note: 'rktp' refcount is increased. */ -rd_kafka_toppar_t *rd_kafka_toppar_desired_get (rd_kafka_topic_t *rkt, - int32_t partition) { - rd_kafka_toppar_t *rktp; +rd_kafka_toppar_t *rd_kafka_toppar_desired_get(rd_kafka_topic_t *rkt, + int32_t partition) { + rd_kafka_toppar_t *rktp; int i; - RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) { - if (rktp->rktp_partition == partition) - return rd_kafka_toppar_keep(rktp); + RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) { + if (rktp->rktp_partition == partition) + return rd_kafka_toppar_keep(rktp); } - return NULL; + return NULL; } @@ -569,7 +556,7 @@ rd_kafka_toppar_t *rd_kafka_toppar_desired_get (rd_kafka_topic_t *rkt, * * Locks: rd_kafka_topic_wrlock() and toppar_lock() must be held. */ -void rd_kafka_toppar_desired_link (rd_kafka_toppar_t *rktp) { +void rd_kafka_toppar_desired_link(rd_kafka_toppar_t *rktp) { if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_DESP) return; /* Already linked */ @@ -585,7 +572,7 @@ void rd_kafka_toppar_desired_link (rd_kafka_toppar_t *rktp) { * * Locks: rd_kafka_topic_wrlock() and toppar_lock() must be held. */ -void rd_kafka_toppar_desired_unlink (rd_kafka_toppar_t *rktp) { +void rd_kafka_toppar_desired_unlink(rd_kafka_toppar_t *rktp) { if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_DESP)) return; /* Not linked */ @@ -603,12 +590,12 @@ void rd_kafka_toppar_desired_unlink (rd_kafka_toppar_t *rktp) { * * @remark toppar_lock() MUST be held */ -void rd_kafka_toppar_desired_add0 (rd_kafka_toppar_t *rktp) { +void rd_kafka_toppar_desired_add0(rd_kafka_toppar_t *rktp) { if ((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED)) return; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESIRED", - "%s [%"PRId32"]: marking as DESIRED", + "%s [%" PRId32 "]: marking as DESIRED", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); /* If toppar was marked for removal this is no longer @@ -619,8 +606,9 @@ void rd_kafka_toppar_desired_add0 (rd_kafka_toppar_t *rktp) { if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) { rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESIRED", - "%s [%"PRId32"]: adding to DESIRED list", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); + "%s [%" PRId32 "]: adding to DESIRED list", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition); rd_kafka_toppar_desired_link(rktp); } } @@ -632,11 +620,11 @@ void rd_kafka_toppar_desired_add0 (rd_kafka_toppar_t *rktp) { * * Locks: rd_kafka_topic_wrlock() must be held. */ -rd_kafka_toppar_t *rd_kafka_toppar_desired_add (rd_kafka_topic_t *rkt, - int32_t partition) { +rd_kafka_toppar_t *rd_kafka_toppar_desired_add(rd_kafka_topic_t *rkt, + int32_t partition) { rd_kafka_toppar_t *rktp; - rktp = rd_kafka_toppar_get(rkt, partition, 0/*no_ua_on_miss*/); + rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no_ua_on_miss*/); if (!rktp) rktp = rd_kafka_toppar_desired_get(rkt, partition); @@ -653,23 +641,22 @@ rd_kafka_toppar_t *rd_kafka_toppar_desired_add (rd_kafka_topic_t *rkt, - /** * Unmarks an 'rktp' as desired. * * Locks: rd_kafka_topic_wrlock() and rd_kafka_toppar_lock() MUST be held. */ -void rd_kafka_toppar_desired_del (rd_kafka_toppar_t *rktp) { +void rd_kafka_toppar_desired_del(rd_kafka_toppar_t *rktp) { - if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED)) - return; + if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED)) + return; - rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_DESIRED; + rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_DESIRED; rd_kafka_toppar_desired_unlink(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESP", - "Removing (un)desired topic %s [%"PRId32"]", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESP", + "Removing (un)desired topic %s [%" PRId32 "]", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) { /* If this partition does not exist in the cluster @@ -683,7 +670,7 @@ void rd_kafka_toppar_desired_del (rd_kafka_toppar_t *rktp) { /** * Append message at tail of 'rktp' message queue. */ -void rd_kafka_toppar_enq_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm) { +void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm) { int queue_len; rd_kafka_q_t *wakeup_q = NULL; @@ -702,8 +689,7 @@ void rd_kafka_toppar_enq_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm) { &rktp->rktp_msgq, rkm); } - if (unlikely(queue_len == 1 && - (wakeup_q = rktp->rktp_msgq_wakeup_q))) + if (unlikely(queue_len == 1 && (wakeup_q = rktp->rktp_msgq_wakeup_q))) rd_kafka_q_keep(wakeup_q); rd_kafka_toppar_unlock(rktp); @@ -723,11 +709,11 @@ void rd_kafka_toppar_enq_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm) { * Upon return \p srcq will contain any remaining messages that require * another insert position in \p destq. */ -static void -rd_kafka_msgq_insert_msgq_before (rd_kafka_msgq_t *destq, - rd_kafka_msg_t *insert_before, - rd_kafka_msgq_t *srcq, - int (*cmp) (const void *a, const void *b)) { +static void rd_kafka_msgq_insert_msgq_before(rd_kafka_msgq_t *destq, + rd_kafka_msg_t *insert_before, + rd_kafka_msgq_t *srcq, + int (*cmp)(const void *a, + const void *b)) { rd_kafka_msg_t *slast; rd_kafka_msgq_t tmpq; @@ -753,8 +739,7 @@ rd_kafka_msgq_insert_msgq_before (rd_kafka_msgq_t *destq, * insert_before, and a right part that will need another * insert position. */ - new_sfirst = rd_kafka_msgq_find_pos(srcq, NULL, - insert_before, + new_sfirst = rd_kafka_msgq_find_pos(srcq, NULL, insert_before, cmp, &cnt, &bytes); rd_assert(new_sfirst); @@ -771,16 +756,13 @@ rd_kafka_msgq_insert_msgq_before (rd_kafka_msgq_t *destq, * insert srcq at insert_before in destq. */ rd_dassert(!TAILQ_EMPTY(&destq->rkmq_msgs)); rd_dassert(!TAILQ_EMPTY(&srcq->rkmq_msgs)); - TAILQ_INSERT_LIST_BEFORE(&destq->rkmq_msgs, - insert_before, - &srcq->rkmq_msgs, - rd_kafka_msgs_head_s, - rd_kafka_msg_t *, - rkm_link); - destq->rkmq_msg_cnt += srcq->rkmq_msg_cnt; + TAILQ_INSERT_LIST_BEFORE(&destq->rkmq_msgs, insert_before, + &srcq->rkmq_msgs, rd_kafka_msgs_head_s, + rd_kafka_msg_t *, rkm_link); + destq->rkmq_msg_cnt += srcq->rkmq_msg_cnt; destq->rkmq_msg_bytes += srcq->rkmq_msg_bytes; - srcq->rkmq_msg_cnt = 0; - srcq->rkmq_msg_bytes = 0; + srcq->rkmq_msg_cnt = 0; + srcq->rkmq_msg_bytes = 0; rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false); rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false); @@ -796,9 +778,9 @@ rd_kafka_msgq_insert_msgq_before (rd_kafka_msgq_t *destq, * @brief Insert all messages from \p srcq into \p destq in their sorted * position (using \p cmp) */ -void rd_kafka_msgq_insert_msgq (rd_kafka_msgq_t *destq, - rd_kafka_msgq_t *srcq, - int (*cmp) (const void *a, const void *b)) { +void rd_kafka_msgq_insert_msgq(rd_kafka_msgq_t *destq, + rd_kafka_msgq_t *srcq, + int (*cmp)(const void *a, const void *b)) { rd_kafka_msg_t *sfirst, *dlast, *start_pos = NULL; if (unlikely(RD_KAFKA_MSGQ_EMPTY(srcq))) { @@ -825,7 +807,7 @@ void rd_kafka_msgq_insert_msgq (rd_kafka_msgq_t *destq, rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false); rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false); - dlast = rd_kafka_msgq_last(destq); + dlast = rd_kafka_msgq_last(destq); sfirst = rd_kafka_msgq_first(srcq); /* Most common case, all of srcq goes after destq */ @@ -844,13 +826,12 @@ void rd_kafka_msgq_insert_msgq (rd_kafka_msgq_t *destq, rd_kafka_msg_t *insert_before; /* Get insert position in destq of first element in srcq */ - insert_before = rd_kafka_msgq_find_pos(destq, start_pos, - sfirst, cmp, - NULL, NULL); + insert_before = rd_kafka_msgq_find_pos(destq, start_pos, sfirst, + cmp, NULL, NULL); /* Insert as much of srcq as possible at insert_before */ - rd_kafka_msgq_insert_msgq_before(destq, insert_before, - srcq, cmp); + rd_kafka_msgq_insert_msgq_before(destq, insert_before, srcq, + cmp); /* Remember the current destq position so the next find_pos() * does not have to re-scan destq and what was @@ -881,11 +862,13 @@ void rd_kafka_msgq_insert_msgq (rd_kafka_msgq_t *destq, * @returns 0 if all messages were retried, or 1 if some messages * could not be retried. */ -int rd_kafka_retry_msgq (rd_kafka_msgq_t *destq, - rd_kafka_msgq_t *srcq, - int incr_retry, int max_retries, rd_ts_t backoff, - rd_kafka_msg_status_t status, - int (*cmp) (const void *a, const void *b)) { +int rd_kafka_retry_msgq(rd_kafka_msgq_t *destq, + rd_kafka_msgq_t *srcq, + int incr_retry, + int max_retries, + rd_ts_t backoff, + rd_kafka_msg_status_t status, + int (*cmp)(const void *a, const void *b)) { rd_kafka_msgq_t retryable = RD_KAFKA_MSGQ_INITIALIZER(retryable); rd_kafka_msg_t *rkm, *tmp; @@ -903,7 +886,7 @@ int rd_kafka_retry_msgq (rd_kafka_msgq_t *destq, rd_kafka_msgq_enq(&retryable, rkm); rkm->rkm_u.producer.ts_backoff = backoff; - rkm->rkm_u.producer.retries += incr_retry; + rkm->rkm_u.producer.retries += incr_retry; /* Don't downgrade a message from any form of PERSISTED * to NOT_PERSISTED, since the original cause of indicating @@ -911,7 +894,7 @@ int rd_kafka_retry_msgq (rd_kafka_msgq_t *destq, * E.g., a previous ack or in-flight timeout. */ if (likely(!(status == RD_KAFKA_MSG_STATUS_NOT_PERSISTED && rkm->rkm_status != - RD_KAFKA_MSG_STATUS_NOT_PERSISTED))) + RD_KAFKA_MSG_STATUS_NOT_PERSISTED))) rkm->rkm_status = status; } @@ -938,9 +921,11 @@ int rd_kafka_retry_msgq (rd_kafka_msgq_t *destq, * @locality Broker thread (but not necessarily the leader broker thread) */ -int rd_kafka_toppar_retry_msgq (rd_kafka_toppar_t *rktp, rd_kafka_msgq_t *rkmq, - int incr_retry, rd_kafka_msg_status_t status) { - rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; +int rd_kafka_toppar_retry_msgq(rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq, + int incr_retry, + rd_kafka_msg_status_t status) { + rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; rd_ts_t backoff = rd_clock() + (rk->rk_conf.retry_backoff_ms * 1000); int r; @@ -948,9 +933,8 @@ int rd_kafka_toppar_retry_msgq (rd_kafka_toppar_t *rktp, rd_kafka_msgq_t *rkmq, return 1; rd_kafka_toppar_lock(rktp); - r = rd_kafka_retry_msgq(&rktp->rktp_msgq, rkmq, - incr_retry, rk->rk_conf.max_retries, - backoff, status, + r = rd_kafka_retry_msgq(&rktp->rktp_msgq, rkmq, incr_retry, + rk->rk_conf.max_retries, backoff, status, rktp->rktp_rkt->rkt_conf.msg_order_cmp); rd_kafka_toppar_unlock(rktp); @@ -962,8 +946,8 @@ int rd_kafka_toppar_retry_msgq (rd_kafka_toppar_t *rktp, rd_kafka_msgq_t *rkmq, * message queue. The queues must not overlap. * @remark \p rkmq will be cleared. */ -void rd_kafka_toppar_insert_msgq (rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq) { +void rd_kafka_toppar_insert_msgq(rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq) { rd_kafka_toppar_lock(rktp); rd_kafka_msgq_insert_msgq(&rktp->rktp_msgq, rkmq, rktp->rktp_rkt->rkt_conf.msg_order_cmp); @@ -976,7 +960,7 @@ void rd_kafka_toppar_insert_msgq (rd_kafka_toppar_t *rktp, * Helper method for purging queues when removing a toppar. * Locks: rd_kafka_toppar_lock() MUST be held */ -void rd_kafka_toppar_purge_and_disable_queues (rd_kafka_toppar_t *rktp) { +void rd_kafka_toppar_purge_and_disable_queues(rd_kafka_toppar_t *rktp) { rd_kafka_q_disable(rktp->rktp_fetchq); rd_kafka_q_purge(rktp->rktp_fetchq); rd_kafka_q_disable(rktp->rktp_ops); @@ -992,9 +976,9 @@ void rd_kafka_toppar_purge_and_disable_queues (rd_kafka_toppar_t *rktp) { * * @locks rd_kafka_toppar_lock() MUST be held */ -static void rd_kafka_toppar_broker_migrate (rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *old_rkb, - rd_kafka_broker_t *new_rkb) { +static void rd_kafka_toppar_broker_migrate(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *old_rkb, + rd_kafka_broker_t *new_rkb) { rd_kafka_op_t *rko; rd_kafka_broker_t *dest_rkb; int had_next_broker = rktp->rktp_next_broker ? 1 : 0; @@ -1027,25 +1011,25 @@ static void rd_kafka_toppar_broker_migrate (rd_kafka_toppar_t *rktp, /* If there is an existing broker for this toppar we let it * first handle its own leave and then trigger the join for * the next broker, if any. */ - rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE); + rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE); dest_rkb = old_rkb; } else { /* No existing broker, send join op directly to new broker. */ - rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_JOIN); + rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_JOIN); dest_rkb = new_rkb; } rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR", - "Migrating topic %.*s [%"PRId32"] %p from %s to %s " - "(sending %s to %s)", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, rktp, - old_rkb ? rd_kafka_broker_name(old_rkb) : "(none)", - new_rkb ? rd_kafka_broker_name(new_rkb) : "(none)", - rd_kafka_op2str(rko->rko_type), - rd_kafka_broker_name(dest_rkb)); + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR", + "Migrating topic %.*s [%" PRId32 + "] %p from %s to %s " + "(sending %s to %s)", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, + rktp, old_rkb ? rd_kafka_broker_name(old_rkb) : "(none)", + new_rkb ? rd_kafka_broker_name(new_rkb) : "(none)", + rd_kafka_op2str(rko->rko_type), rd_kafka_broker_name(dest_rkb)); rd_kafka_q_enq(dest_rkb->rkb_ops, rko); } @@ -1057,43 +1041,44 @@ static void rd_kafka_toppar_broker_migrate (rd_kafka_toppar_t *rktp, * * Locks: rd_kafka_toppar_lock() MUST be held */ -void rd_kafka_toppar_broker_leave_for_remove (rd_kafka_toppar_t *rktp) { +void rd_kafka_toppar_broker_leave_for_remove(rd_kafka_toppar_t *rktp) { rd_kafka_op_t *rko; rd_kafka_broker_t *dest_rkb; rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_REMOVE; - if (rktp->rktp_next_broker) - dest_rkb = rktp->rktp_next_broker; - else if (rktp->rktp_broker) - dest_rkb = rktp->rktp_broker; - else { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARDEL", - "%.*s [%"PRId32"] %p not handled by any broker: " - "not sending LEAVE for remove", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, rktp); - return; - } - - - /* Revert from offset-wait state back to offset-query - * prior to leaving the broker to avoid stalling - * on the new broker waiting for a offset reply from - * this old broker (that might not come and thus need - * to time out..slowly) */ - if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) - rd_kafka_toppar_set_fetch_state( - rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY); - - rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE); + if (rktp->rktp_next_broker) + dest_rkb = rktp->rktp_next_broker; + else if (rktp->rktp_broker) + dest_rkb = rktp->rktp_broker; + else { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARDEL", + "%.*s [%" PRId32 + "] %p not handled by any broker: " + "not sending LEAVE for remove", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rktp); + return; + } + + + /* Revert from offset-wait state back to offset-query + * prior to leaving the broker to avoid stalling + * on the new broker waiting for a offset reply from + * this old broker (that might not come and thus need + * to time out..slowly) */ + if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) + rd_kafka_toppar_set_fetch_state( + rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY); + + rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE); rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR", - "%.*s [%"PRId32"] %p sending final LEAVE for removal by %s", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, rktp, - rd_kafka_broker_name(dest_rkb)); + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR", + "%.*s [%" PRId32 "] %p sending final LEAVE for removal by %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, + rktp, rd_kafka_broker_name(dest_rkb)); rd_kafka_q_enq(dest_rkb->rkb_ops, rko); } @@ -1105,66 +1090,69 @@ void rd_kafka_toppar_broker_leave_for_remove (rd_kafka_toppar_t *rktp) { * * @locks Caller must have rd_kafka_toppar_lock(rktp) held. */ -void rd_kafka_toppar_broker_delegate (rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *rkb) { - rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; +void rd_kafka_toppar_broker_delegate(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *rkb) { + rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; int internal_fallback = 0; - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", - "%s [%"PRId32"]: delegate to broker %s " - "(rktp %p, term %d, ref %d)", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rkb ? rkb->rkb_name : "(none)", - rktp, rd_kafka_terminating(rk), - rd_refcnt_get(&rktp->rktp_refcnt)); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", + "%s [%" PRId32 + "]: delegate to broker %s " + "(rktp %p, term %d, ref %d)", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rkb ? rkb->rkb_name : "(none)", rktp, + rd_kafka_terminating(rk), + rd_refcnt_get(&rktp->rktp_refcnt)); /* Undelegated toppars are delgated to the internal * broker for bookkeeping. */ if (!rkb && !rd_kafka_terminating(rk)) { - rkb = rd_kafka_broker_internal(rk); + rkb = rd_kafka_broker_internal(rk); internal_fallback = 1; } - if (rktp->rktp_broker == rkb && !rktp->rktp_next_broker) { + if (rktp->rktp_broker == rkb && !rktp->rktp_next_broker) { rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", - "%.*s [%"PRId32"]: not updating broker: " + "%.*s [%" PRId32 + "]: not updating broker: " "already on correct broker %s", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rkb ? rd_kafka_broker_name(rkb) : "(none)"); if (internal_fallback) rd_kafka_broker_destroy(rkb); - return; + return; } - if (rktp->rktp_broker) - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", - "%.*s [%"PRId32"]: no longer delegated to " - "broker %s", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_broker_name(rktp->rktp_broker)); - - - if (rkb) { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", - "%.*s [%"PRId32"]: delegating to broker %s " - "for partition with %i messages " - "(%"PRIu64" bytes) queued", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_broker_name(rkb), + if (rktp->rktp_broker) + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", + "%.*s [%" PRId32 + "]: no longer delegated to " + "broker %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_broker_name(rktp->rktp_broker)); + + + if (rkb) { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", + "%.*s [%" PRId32 + "]: delegating to broker %s " + "for partition with %i messages " + "(%" PRIu64 " bytes) queued", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_broker_name(rkb), rktp->rktp_msgq.rkmq_msg_cnt, rktp->rktp_msgq.rkmq_msg_bytes); - } else { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", - "%.*s [%"PRId32"]: no broker delegated", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition); - } + } else { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", + "%.*s [%" PRId32 "]: no broker delegated", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); + } if (rktp->rktp_broker || rkb) rd_kafka_toppar_broker_migrate(rktp, rktp->rktp_broker, rkb); @@ -1175,43 +1163,32 @@ void rd_kafka_toppar_broker_delegate (rd_kafka_toppar_t *rktp, - - -void -rd_kafka_toppar_offset_commit_result (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets){ +void rd_kafka_toppar_offset_commit_result( + rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets) { if (err) - rd_kafka_consumer_err(rktp->rktp_fetchq, - /* FIXME: propagate broker_id */ - RD_KAFKA_NODEID_UA, - err, 0 /* FIXME:VERSION*/, - NULL, rktp, RD_KAFKA_OFFSET_INVALID, - "Offset commit failed: %s", - rd_kafka_err2str(err)); - - rd_kafka_toppar_lock(rktp); + rd_kafka_consumer_err( + rktp->rktp_fetchq, + /* FIXME: propagate broker_id */ + RD_KAFKA_NODEID_UA, err, 0 /* FIXME:VERSION*/, NULL, rktp, + RD_KAFKA_OFFSET_INVALID, "Offset commit failed: %s", + rd_kafka_err2str(err)); + + rd_kafka_toppar_lock(rktp); if (!err) rktp->rktp_committed_offset = offsets->elems[0].offset; - /* When stopping toppars: - * Final commit is now done (or failed), propagate. */ - if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) - rd_kafka_toppar_fetch_stopped(rktp, err); + /* When stopping toppars: + * Final commit is now done (or failed), propagate. */ + if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) + rd_kafka_toppar_fetch_stopped(rktp, err); - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_unlock(rktp); } - - - - - - - - /** * Handle the next offset to consume for a toppar. * This is used during initial setup when trying to figure out what @@ -1220,8 +1197,8 @@ rd_kafka_toppar_offset_commit_result (rd_kafka_toppar_t *rktp, * Locality: toppar handler thread. * Locks: toppar_lock(rktp) must be held */ -void rd_kafka_toppar_next_offset_handle (rd_kafka_toppar_t *rktp, - int64_t Offset) { +void rd_kafka_toppar_next_offset_handle(rd_kafka_toppar_t *rktp, + int64_t Offset) { if (RD_KAFKA_OFFSET_IS_LOGICAL(Offset)) { /* Offset storage returned logical offset (e.g. "end"), @@ -1238,12 +1215,10 @@ void rd_kafka_toppar_next_offset_handle (rd_kafka_toppar_t *rktp, } /* Adjust by TAIL count if, if wanted */ - if (rktp->rktp_query_offset <= - RD_KAFKA_OFFSET_TAIL_BASE) { + if (rktp->rktp_query_offset <= RD_KAFKA_OFFSET_TAIL_BASE) { int64_t orig_Offset = Offset; int64_t tail_cnt = - llabs(rktp->rktp_query_offset - - RD_KAFKA_OFFSET_TAIL_BASE); + llabs(rktp->rktp_query_offset - RD_KAFKA_OFFSET_TAIL_BASE); if (tail_cnt > Offset) Offset = 0; @@ -1251,13 +1226,15 @@ void rd_kafka_toppar_next_offset_handle (rd_kafka_toppar_t *rktp, Offset -= tail_cnt; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "OffsetReply for topic %s [%"PRId32"]: " - "offset %"PRId64": adjusting for " - "OFFSET_TAIL(%"PRId64"): " - "effective offset %"PRId64, + "OffsetReply for topic %s [%" PRId32 + "]: " + "offset %" PRId64 + ": adjusting for " + "OFFSET_TAIL(%" PRId64 + "): " + "effective offset %" PRId64, rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - orig_Offset, tail_cnt, + rktp->rktp_partition, orig_Offset, tail_cnt, Offset); } @@ -1268,7 +1245,6 @@ void rd_kafka_toppar_next_offset_handle (rd_kafka_toppar_t *rktp, /* Wake-up broker thread which might be idling on IO */ if (rktp->rktp_broker) rd_kafka_broker_wakeup(rktp->rktp_broker); - } @@ -1278,90 +1254,86 @@ void rd_kafka_toppar_next_offset_handle (rd_kafka_toppar_t *rktp, * * Locality: toppar thread */ -void rd_kafka_toppar_offset_fetch (rd_kafka_toppar_t *rktp, - rd_kafka_replyq_t replyq) { +void rd_kafka_toppar_offset_fetch(rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq) { rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; rd_kafka_topic_partition_list_t *part; rd_kafka_op_t *rko; rd_kafka_dbg(rk, TOPIC, "OFFSETREQ", - "Partition %.*s [%"PRId32"]: querying cgrp for " + "Partition %.*s [%" PRId32 + "]: querying cgrp for " "committed offset (opv %d)", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, replyq.version); part = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add0(__FUNCTION__,__LINE__,part, + rd_kafka_topic_partition_list_add0(__FUNCTION__, __LINE__, part, rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp); + rktp->rktp_partition, rktp); - rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH); - rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rko->rko_replyq = replyq; + rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH); + rko->rko_rktp = rd_kafka_toppar_keep(rktp); + rko->rko_replyq = replyq; - rko->rko_u.offset_fetch.partitions = part; + rko->rko_u.offset_fetch.partitions = part; rko->rko_u.offset_fetch.require_stable = - rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED; - rko->rko_u.offset_fetch.do_free = 1; + rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED; + rko->rko_u.offset_fetch.do_free = 1; rd_kafka_q_enq(rktp->rktp_cgrp->rkcg_ops, rko); } - /** * Toppar based OffsetResponse handling. * This is used for finding the next offset to Fetch. * * Locality: toppar handler thread */ -static void rd_kafka_toppar_handle_Offset (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_toppar_handle_Offset(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { rd_kafka_toppar_t *rktp = opaque; rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_t *rktpar; int64_t Offset; int actions = 0; - rd_kafka_toppar_lock(rktp); - /* Drop reply from previous partition leader */ - if (err != RD_KAFKA_RESP_ERR__DESTROY && rktp->rktp_broker != rkb) - err = RD_KAFKA_RESP_ERR__OUTDATED; - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_lock(rktp); + /* Drop reply from previous partition leader */ + if (err != RD_KAFKA_RESP_ERR__DESTROY && rktp->rktp_broker != rkb) + err = RD_KAFKA_RESP_ERR__OUTDATED; + rd_kafka_toppar_unlock(rktp); offsets = rd_kafka_topic_partition_list_new(1); - rd_rkb_dbg(rkb, TOPIC, "OFFSET", - "Offset reply for " - "topic %.*s [%"PRId32"] (v%d vs v%d)", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, request->rkbuf_replyq.version, - rktp->rktp_op_version); + rd_rkb_dbg(rkb, TOPIC, "OFFSET", + "Offset reply for " + "topic %.*s [%" PRId32 "] (v%d vs v%d)", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, request->rkbuf_replyq.version, + rktp->rktp_op_version); - rd_dassert(request->rkbuf_replyq.version > 0); - if (err != RD_KAFKA_RESP_ERR__DESTROY && + rd_dassert(request->rkbuf_replyq.version > 0); + if (err != RD_KAFKA_RESP_ERR__DESTROY && rd_kafka_buf_version_outdated(request, rktp->rktp_op_version)) { - /* Outdated request response, ignore. */ - err = RD_KAFKA_RESP_ERR__OUTDATED; - } + /* Outdated request response, ignore. */ + err = RD_KAFKA_RESP_ERR__OUTDATED; + } /* Parse and return Offset */ if (err != RD_KAFKA_RESP_ERR__OUTDATED) - err = rd_kafka_handle_ListOffsets(rk, rkb, err, - rkbuf, request, offsets, - &actions); - - if (!err && - !(rktpar = rd_kafka_topic_partition_list_find( - offsets, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition))) { + err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, + offsets, &actions); + + if (!err && !(rktpar = rd_kafka_topic_partition_list_find( + offsets, rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition))) { /* Request partition not found in response */ err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; actions |= RD_KAFKA_ERR_ACTION_PERMANENT; @@ -1370,10 +1342,10 @@ static void rd_kafka_toppar_handle_Offset (rd_kafka_t *rk, if (err) { rd_rkb_dbg(rkb, TOPIC, "OFFSET", "Offset reply error for " - "topic %.*s [%"PRId32"] (v%d, %s): %s", + "topic %.*s [%" PRId32 "] (v%d, %s): %s", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, request->rkbuf_replyq.version, - rd_kafka_err2str(err), + rd_kafka_err2str(err), rd_kafka_actions2str(actions)); rd_kafka_topic_partition_list_destroy(offsets); @@ -1385,7 +1357,7 @@ static void rd_kafka_toppar_handle_Offset (rd_kafka_t *rk, if (err == RD_KAFKA_RESP_ERR__OUTDATED) { rd_kafka_toppar_lock(rktp); rd_kafka_toppar_offset_retry( - rktp, 500, "outdated offset response"); + rktp, 500, "outdated offset response"); rd_kafka_toppar_unlock(rktp); } @@ -1393,13 +1365,13 @@ static void rd_kafka_toppar_handle_Offset (rd_kafka_t *rk, rd_kafka_toppar_destroy(rktp); return; - } else if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) - return; /* Retry in progress */ + } else if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) + return; /* Retry in progress */ rd_kafka_toppar_lock(rktp); - if (!(actions & (RD_KAFKA_ERR_ACTION_RETRY| + if (!(actions & (RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH))) { /* Permanent error. Trigger auto.offset.reset policy * and signal error back to application. */ @@ -1409,26 +1381,26 @@ static void rd_kafka_toppar_handle_Offset (rd_kafka_t *rk, "failed to query logical offset"); rd_kafka_consumer_err( - rktp->rktp_fetchq, rkb->rkb_nodeid, - err, 0, NULL, rktp, - (rktp->rktp_query_offset <= - RD_KAFKA_OFFSET_TAIL_BASE ? - rktp->rktp_query_offset - - RD_KAFKA_OFFSET_TAIL_BASE : - rktp->rktp_query_offset), - "Failed to query logical offset %s: %s", - rd_kafka_offset2str(rktp->rktp_query_offset), - rd_kafka_err2str(err)); + rktp->rktp_fetchq, rkb->rkb_nodeid, err, 0, NULL, + rktp, + (rktp->rktp_query_offset <= + RD_KAFKA_OFFSET_TAIL_BASE + ? rktp->rktp_query_offset - + RD_KAFKA_OFFSET_TAIL_BASE + : rktp->rktp_query_offset), + "Failed to query logical offset %s: %s", + rd_kafka_offset2str(rktp->rktp_query_offset), + rd_kafka_err2str(err)); } else { /* Temporary error. Schedule retry. */ char tmp[256]; - rd_snprintf(tmp, sizeof(tmp), - "failed to query logical offset %s: %s", - rd_kafka_offset2str( - rktp->rktp_query_offset), - rd_kafka_err2str(err)); + rd_snprintf( + tmp, sizeof(tmp), + "failed to query logical offset %s: %s", + rd_kafka_offset2str(rktp->rktp_query_offset), + rd_kafka_err2str(err)); rd_kafka_toppar_offset_retry(rktp, 500, tmp); } @@ -1442,16 +1414,17 @@ static void rd_kafka_toppar_handle_Offset (rd_kafka_t *rk, Offset = rktpar->offset; rd_kafka_topic_partition_list_destroy(offsets); - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "Offset %s request for %.*s [%"PRId32"] " - "returned offset %s (%"PRId64")", + "Offset %s request for %.*s [%" PRId32 + "] " + "returned offset %s (%" PRId64 ")", rd_kafka_offset2str(rktp->rktp_query_offset), RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rd_kafka_offset2str(Offset), Offset); rd_kafka_toppar_next_offset_handle(rktp, Offset); - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_unlock(rktp); rd_kafka_toppar_destroy(rktp); /* from request.opaque */ } @@ -1466,9 +1439,9 @@ static void rd_kafka_toppar_handle_Offset (rd_kafka_t *rk, * @locality toppar handler thread * @locks toppar_lock() MUST be held */ -static void rd_kafka_toppar_offset_retry (rd_kafka_toppar_t *rktp, - int backoff_ms, - const char *reason) { +static void rd_kafka_toppar_offset_retry(rd_kafka_toppar_t *rktp, + int backoff_ms, + const char *reason) { rd_ts_t tmr_next; int restart_tmr; @@ -1477,17 +1450,15 @@ static void rd_kafka_toppar_offset_retry (rd_kafka_toppar_t *rktp, tmr_next = rd_kafka_timer_next(&rktp->rktp_rkt->rkt_rk->rk_timers, &rktp->rktp_offset_query_tmr, 1); - restart_tmr = (tmr_next == -1 || - tmr_next > rd_clock() + (backoff_ms * 1000ll)); + restart_tmr = + (tmr_next == -1 || tmr_next > rd_clock() + (backoff_ms * 1000ll)); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: %s: %s for offset %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, + "%s [%" PRId32 "]: %s: %s for offset %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, reason, - restart_tmr ? - "(re)starting offset query timer" : - "offset query timer already scheduled", + restart_tmr ? "(re)starting offset query timer" + : "offset query timer already scheduled", rd_kafka_offset2str(rktp->rktp_query_offset)); rd_kafka_toppar_set_fetch_state(rktp, @@ -1496,7 +1467,7 @@ static void rd_kafka_toppar_offset_retry (rd_kafka_toppar_t *rktp, if (restart_tmr) rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers, &rktp->rktp_offset_query_tmr, - backoff_ms*1000ll, + backoff_ms * 1000ll, rd_kafka_offset_query_tmr_cb, rktp); } @@ -1511,12 +1482,13 @@ static void rd_kafka_toppar_offset_retry (rd_kafka_toppar_t *rktp, * Locality: toppar handler thread * Locks: toppar_lock() must be held */ -void rd_kafka_toppar_offset_request (rd_kafka_toppar_t *rktp, - int64_t query_offset, int backoff_ms) { - rd_kafka_broker_t *rkb; +void rd_kafka_toppar_offset_request(rd_kafka_toppar_t *rktp, + int64_t query_offset, + int backoff_ms) { + rd_kafka_broker_t *rkb; - rd_kafka_assert(NULL, - thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)); + rd_kafka_assert(NULL, + thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)); rkb = rktp->rktp_broker; @@ -1524,31 +1496,29 @@ void rd_kafka_toppar_offset_request (rd_kafka_toppar_t *rktp, backoff_ms = 500; if (backoff_ms) { - rd_kafka_toppar_offset_retry(rktp, backoff_ms, - !rkb ? - "no current leader for partition": - "backoff"); + rd_kafka_toppar_offset_retry( + rktp, backoff_ms, + !rkb ? "no current leader for partition" : "backoff"); return; } rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_query_tmr, 1/*lock*/); + &rktp->rktp_offset_query_tmr, 1 /*lock*/); - if (query_offset == RD_KAFKA_OFFSET_STORED && + if (query_offset == RD_KAFKA_OFFSET_STORED && rktp->rktp_rkt->rkt_conf.offset_store_method == - RD_KAFKA_OFFSET_METHOD_BROKER) { + RD_KAFKA_OFFSET_METHOD_BROKER) { /* * Get stored offset from broker based storage: * ask cgrp manager for offsets */ rd_kafka_toppar_offset_fetch( - rktp, - RD_KAFKA_REPLYQ(rktp->rktp_ops, - rktp->rktp_op_version)); + rktp, + RD_KAFKA_REPLYQ(rktp->rktp_ops, rktp->rktp_op_version)); - } else { + } else { rd_kafka_topic_partition_list_t *offsets; /* @@ -1556,36 +1526,35 @@ void rd_kafka_toppar_offset_request (rd_kafka_toppar_t *rktp, */ rd_rkb_dbg(rkb, TOPIC, "OFFREQ", - "Partition %.*s [%"PRId32"]: querying for logical " + "Partition %.*s [%" PRId32 + "]: querying for logical " "offset %s (opv %d)", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rd_kafka_offset2str(query_offset), - rktp->rktp_op_version); + rktp->rktp_op_version); rd_kafka_toppar_keep(rktp); /* refcnt for OffsetRequest opaque*/ - if (query_offset <= RD_KAFKA_OFFSET_TAIL_BASE) - query_offset = RD_KAFKA_OFFSET_END; + if (query_offset <= RD_KAFKA_OFFSET_TAIL_BASE) + query_offset = RD_KAFKA_OFFSET_END; offsets = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add( - offsets, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition)->offset = query_offset; + offsets, rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition) + ->offset = query_offset; rd_kafka_ListOffsetsRequest( - rkb, offsets, - RD_KAFKA_REPLYQ(rktp->rktp_ops, - rktp->rktp_op_version), - rd_kafka_toppar_handle_Offset, - rktp); + rkb, offsets, + RD_KAFKA_REPLYQ(rktp->rktp_ops, rktp->rktp_op_version), + rd_kafka_toppar_handle_Offset, rktp); rd_kafka_topic_partition_list_destroy(offsets); } rd_kafka_toppar_set_fetch_state(rktp, - RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT); + RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT); } @@ -1595,18 +1564,19 @@ void rd_kafka_toppar_offset_request (rd_kafka_toppar_t *rktp, * Locality: toppar handler thread * Locks: none */ -static void rd_kafka_toppar_fetch_start (rd_kafka_toppar_t *rktp, - int64_t offset, - rd_kafka_op_t *rko_orig) { - rd_kafka_cgrp_t *rkcg = rko_orig->rko_u.fetch_start.rkcg; +static void rd_kafka_toppar_fetch_start(rd_kafka_toppar_t *rktp, + int64_t offset, + rd_kafka_op_t *rko_orig) { + rd_kafka_cgrp_t *rkcg = rko_orig->rko_u.fetch_start.rkcg; rd_kafka_resp_err_t err = 0; - int32_t version = rko_orig->rko_version; + int32_t version = rko_orig->rko_version; - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH", - "Start fetch for %.*s [%"PRId32"] in " - "state %s at offset %s (v%"PRId32")", + "Start fetch for %.*s [%" PRId32 + "] in " + "state %s at offset %s (v%" PRId32 ")", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rd_kafka_fetch_states[rktp->rktp_fetch_state], @@ -1614,7 +1584,7 @@ static void rd_kafka_toppar_fetch_start (rd_kafka_toppar_t *rktp, if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) { err = RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS; - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_unlock(rktp); goto err_reply; } @@ -1630,33 +1600,32 @@ static void rd_kafka_toppar_fetch_start (rd_kafka_toppar_t *rktp, if (offset == RD_KAFKA_OFFSET_BEGINNING || - offset == RD_KAFKA_OFFSET_END || + offset == RD_KAFKA_OFFSET_END || offset <= RD_KAFKA_OFFSET_TAIL_BASE) { - rd_kafka_toppar_next_offset_handle(rktp, offset); + rd_kafka_toppar_next_offset_handle(rktp, offset); - } else if (offset == RD_KAFKA_OFFSET_STORED) { + } else if (offset == RD_KAFKA_OFFSET_STORED) { rd_kafka_offset_store_init(rktp); - } else if (offset == RD_KAFKA_OFFSET_INVALID) { - rd_kafka_offset_reset(rktp, offset, - RD_KAFKA_RESP_ERR__NO_OFFSET, - "no previously committed offset " - "available"); + } else if (offset == RD_KAFKA_OFFSET_INVALID) { + rd_kafka_offset_reset(rktp, offset, + RD_KAFKA_RESP_ERR__NO_OFFSET, + "no previously committed offset " + "available"); - } else { - rktp->rktp_next_offset = offset; + } else { + rktp->rktp_next_offset = offset; rd_kafka_toppar_set_fetch_state(rktp, - RD_KAFKA_TOPPAR_FETCH_ACTIVE); + RD_KAFKA_TOPPAR_FETCH_ACTIVE); /* Wake-up broker thread which might be idling on IO */ if (rktp->rktp_broker) rd_kafka_broker_wakeup(rktp->rktp_broker); - - } + } rktp->rktp_offsets_fin.eof_offset = RD_KAFKA_OFFSET_INVALID; - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_unlock(rktp); /* Signal back to caller thread that start has commenced, or err */ err_reply: @@ -1665,7 +1634,7 @@ static void rd_kafka_toppar_fetch_start (rd_kafka_toppar_t *rktp, rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH_START); - rko->rko_err = err; + rko->rko_err = err; rko->rko_rktp = rd_kafka_toppar_keep(rktp); rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko, 0); @@ -1674,7 +1643,6 @@ static void rd_kafka_toppar_fetch_start (rd_kafka_toppar_t *rktp, - /** * Mark toppar's fetch state as stopped (all decommissioning is done, * offsets are stored, etc). @@ -1682,8 +1650,8 @@ static void rd_kafka_toppar_fetch_start (rd_kafka_toppar_t *rktp, * Locality: toppar handler thread * Locks: toppar_lock(rktp) MUST be held */ -void rd_kafka_toppar_fetch_stopped (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err) { +void rd_kafka_toppar_fetch_stopped(rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err) { rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_STOPPED); @@ -1698,14 +1666,15 @@ void rd_kafka_toppar_fetch_stopped (rd_kafka_toppar_t *rktp, } /* Signal back to application thread that stop is done. */ - if (rktp->rktp_replyq.q) { - rd_kafka_op_t *rko; - rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH_STOP|RD_KAFKA_OP_REPLY); - rko->rko_err = err; - rko->rko_rktp = rd_kafka_toppar_keep(rktp); + if (rktp->rktp_replyq.q) { + rd_kafka_op_t *rko; + rko = + rd_kafka_op_new(RD_KAFKA_OP_FETCH_STOP | RD_KAFKA_OP_REPLY); + rko->rko_err = err; + rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rd_kafka_replyq_enq(&rktp->rktp_replyq, rko, 0); - } + rd_kafka_replyq_enq(&rktp->rktp_replyq, rko, 0); + } } @@ -1715,25 +1684,24 @@ void rd_kafka_toppar_fetch_stopped (rd_kafka_toppar_t *rktp, * * Locality: toppar handler thread */ -void rd_kafka_toppar_fetch_stop (rd_kafka_toppar_t *rktp, - rd_kafka_op_t *rko_orig) { +void rd_kafka_toppar_fetch_stop(rd_kafka_toppar_t *rktp, + rd_kafka_op_t *rko_orig) { int32_t version = rko_orig->rko_version; - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH", - "Stopping fetch for %.*s [%"PRId32"] in state %s (v%d)", + "Stopping fetch for %.*s [%" PRId32 "] in state %s (v%d)", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rd_kafka_fetch_states[rktp->rktp_fetch_state], version); rd_kafka_toppar_op_version_bump(rktp, version); - /* Abort pending offset lookups. */ - if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_query_tmr, - 1/*lock*/); + /* Abort pending offset lookups. */ + if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_query_tmr, 1 /*lock*/); /* Clear out the forwarding queue. */ rd_kafka_q_fwd_set(rktp->rktp_fetchq, NULL); @@ -1750,7 +1718,7 @@ void rd_kafka_toppar_fetch_stop (rd_kafka_toppar_t *rktp, * so no more operations after this call! */ rd_kafka_offset_store_stop(rktp); - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_unlock(rktp); } @@ -1760,19 +1728,20 @@ void rd_kafka_toppar_fetch_stop (rd_kafka_toppar_t *rktp, * * Locality: toppar handler thread */ -void rd_kafka_toppar_seek (rd_kafka_toppar_t *rktp, - int64_t offset, rd_kafka_op_t *rko_orig) { +void rd_kafka_toppar_seek(rd_kafka_toppar_t *rktp, + int64_t offset, + rd_kafka_op_t *rko_orig) { rd_kafka_resp_err_t err = 0; - int32_t version = rko_orig->rko_version; + int32_t version = rko_orig->rko_version; - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH", - "Seek %.*s [%"PRId32"] to offset %s " - "in state %s (v%"PRId32")", + "Seek %.*s [%" PRId32 + "] to offset %s " + "in state %s (v%" PRId32 ")", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_offset2str(offset), + rktp->rktp_partition, rd_kafka_offset2str(offset), rd_kafka_fetch_states[rktp->rktp_fetch_state], version); @@ -1783,42 +1752,41 @@ void rd_kafka_toppar_seek (rd_kafka_toppar_t *rktp, err = RD_KAFKA_RESP_ERR__STATE; goto err_reply; } else if (offset == RD_KAFKA_OFFSET_STORED) { - err = RD_KAFKA_RESP_ERR__INVALID_ARG; - goto err_reply; - } + err = RD_KAFKA_RESP_ERR__INVALID_ARG; + goto err_reply; + } rd_kafka_toppar_op_version_bump(rktp, version); - /* Abort pending offset lookups. */ - if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_query_tmr, - 1/*lock*/); + /* Abort pending offset lookups. */ + if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_query_tmr, 1 /*lock*/); - if (RD_KAFKA_OFFSET_IS_LOGICAL(offset)) - rd_kafka_toppar_next_offset_handle(rktp, offset); - else { - rktp->rktp_next_offset = offset; + if (RD_KAFKA_OFFSET_IS_LOGICAL(offset)) + rd_kafka_toppar_next_offset_handle(rktp, offset); + else { + rktp->rktp_next_offset = offset; rd_kafka_toppar_set_fetch_state(rktp, - RD_KAFKA_TOPPAR_FETCH_ACTIVE); + RD_KAFKA_TOPPAR_FETCH_ACTIVE); /* Wake-up broker thread which might be idling on IO */ if (rktp->rktp_broker) rd_kafka_broker_wakeup(rktp->rktp_broker); - } + } /* Signal back to caller thread that seek has commenced, or err */ err_reply: - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_unlock(rktp); if (rko_orig->rko_replyq.q) { rd_kafka_op_t *rko; - rko = rd_kafka_op_new(RD_KAFKA_OP_SEEK|RD_KAFKA_OP_REPLY); + rko = rd_kafka_op_new(RD_KAFKA_OP_SEEK | RD_KAFKA_OP_REPLY); rko->rko_err = err; - rko->rko_u.fetch_start.offset = - rko_orig->rko_u.fetch_start.offset; + rko->rko_u.fetch_start.offset = + rko_orig->rko_u.fetch_start.offset; rko->rko_rktp = rd_kafka_toppar_keep(rktp); rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko, 0); @@ -1833,119 +1801,120 @@ void rd_kafka_toppar_seek (rd_kafka_toppar_t *rktp, * * @locality toppar's handler thread */ -static void rd_kafka_toppar_pause_resume (rd_kafka_toppar_t *rktp, - rd_kafka_op_t *rko_orig) { - rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; - int pause = rko_orig->rko_u.pause.pause; - int flag = rko_orig->rko_u.pause.flag; +static void rd_kafka_toppar_pause_resume(rd_kafka_toppar_t *rktp, + rd_kafka_op_t *rko_orig) { + rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; + int pause = rko_orig->rko_u.pause.pause; + int flag = rko_orig->rko_u.pause.flag; int32_t version = rko_orig->rko_version; - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); rd_kafka_toppar_op_version_bump(rktp, version); if (!pause && (rktp->rktp_flags & flag) != flag) { rd_kafka_dbg(rk, TOPIC, "RESUME", - "Not resuming %s [%"PRId32"]: " + "Not resuming %s [%" PRId32 + "]: " "partition is not paused by %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - (flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? - "application" : "library")); + (flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? "application" + : "library")); rd_kafka_toppar_unlock(rktp); return; } - if (pause) { + if (pause) { /* Pause partition by setting either * RD_KAFKA_TOPPAR_F_APP_PAUSE or * RD_KAFKA_TOPPAR_F_LIB_PAUSE */ - rktp->rktp_flags |= flag; - - if (rk->rk_type == RD_KAFKA_CONSUMER) { - /* Save offset of last consumed message+1 as the - * next message to fetch on resume. */ - if (rktp->rktp_app_offset != RD_KAFKA_OFFSET_INVALID) { - rktp->rktp_next_offset = rktp->rktp_app_offset; - } - - rd_kafka_dbg(rk, TOPIC, pause?"PAUSE":"RESUME", - "%s %s [%"PRId32"]: at offset %s " - "(state %s, v%d)", - pause ? "Pause":"Resume", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_offset2str( - rktp->rktp_next_offset), - rd_kafka_fetch_states[rktp-> - rktp_fetch_state], - version); - } else { - rd_kafka_dbg(rk, TOPIC, pause?"PAUSE":"RESUME", - "%s %s [%"PRId32"] (state %s, v%d)", - pause ? "Pause":"Resume", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_fetch_states[rktp-> - rktp_fetch_state], - version); - } - - } else { + rktp->rktp_flags |= flag; + + if (rk->rk_type == RD_KAFKA_CONSUMER) { + /* Save offset of last consumed message+1 as the + * next message to fetch on resume. */ + if (rktp->rktp_app_offset != RD_KAFKA_OFFSET_INVALID) { + rktp->rktp_next_offset = rktp->rktp_app_offset; + } + + rd_kafka_dbg( + rk, TOPIC, pause ? "PAUSE" : "RESUME", + "%s %s [%" PRId32 + "]: at offset %s " + "(state %s, v%d)", + pause ? "Pause" : "Resume", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_offset2str(rktp->rktp_next_offset), + rd_kafka_fetch_states[rktp->rktp_fetch_state], + version); + } else { + rd_kafka_dbg( + rk, TOPIC, pause ? "PAUSE" : "RESUME", + "%s %s [%" PRId32 "] (state %s, v%d)", + pause ? "Pause" : "Resume", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_fetch_states[rktp->rktp_fetch_state], + version); + } + + } else { /* Unset the RD_KAFKA_TOPPAR_F_APP_PAUSE or * RD_KAFKA_TOPPAR_F_LIB_PAUSE flag */ - rktp->rktp_flags &= ~flag; - - if (rk->rk_type == RD_KAFKA_CONSUMER) { - rd_kafka_dbg(rk, TOPIC, pause?"PAUSE":"RESUME", - "%s %s [%"PRId32"]: at offset %s " - "(state %s, v%d)", - rktp->rktp_fetch_state == - RD_KAFKA_TOPPAR_FETCH_ACTIVE ? - "Resuming" : "Not resuming stopped", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_offset2str( - rktp->rktp_next_offset), - rd_kafka_fetch_states[rktp-> - rktp_fetch_state], - version); - - /* If the resuming offset is logical we - * need to trigger a seek (that performs the - * logical->absolute lookup logic) to get - * things going. - * Typical case is when a partition is paused - * before anything has been consumed by app - * yet thus having rktp_app_offset=INVALID. */ + rktp->rktp_flags &= ~flag; + + if (rk->rk_type == RD_KAFKA_CONSUMER) { + rd_kafka_dbg( + rk, TOPIC, pause ? "PAUSE" : "RESUME", + "%s %s [%" PRId32 + "]: at offset %s " + "(state %s, v%d)", + rktp->rktp_fetch_state == + RD_KAFKA_TOPPAR_FETCH_ACTIVE + ? "Resuming" + : "Not resuming stopped", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_offset2str(rktp->rktp_next_offset), + rd_kafka_fetch_states[rktp->rktp_fetch_state], + version); + + /* If the resuming offset is logical we + * need to trigger a seek (that performs the + * logical->absolute lookup logic) to get + * things going. + * Typical case is when a partition is paused + * before anything has been consumed by app + * yet thus having rktp_app_offset=INVALID. */ if (!RD_KAFKA_TOPPAR_IS_PAUSED(rktp) && (rktp->rktp_fetch_state == - RD_KAFKA_TOPPAR_FETCH_ACTIVE || + RD_KAFKA_TOPPAR_FETCH_ACTIVE || rktp->rktp_fetch_state == - RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) && + RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) && rktp->rktp_next_offset == RD_KAFKA_OFFSET_INVALID) - rd_kafka_toppar_next_offset_handle( - rktp, rktp->rktp_next_offset); - - } else - rd_kafka_dbg(rk, TOPIC, pause?"PAUSE":"RESUME", - "%s %s [%"PRId32"] (state %s, v%d)", - pause ? "Pause":"Resume", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_fetch_states[rktp-> - rktp_fetch_state], - version); - } - rd_kafka_toppar_unlock(rktp); - - if (pause && rk->rk_type == RD_KAFKA_CONSUMER) { - /* Flush partition's fetch queue */ - rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp, - rko_orig->rko_version); - } -} + rd_kafka_toppar_next_offset_handle( + rktp, rktp->rktp_next_offset); + + } else + rd_kafka_dbg( + rk, TOPIC, pause ? "PAUSE" : "RESUME", + "%s %s [%" PRId32 "] (state %s, v%d)", + pause ? "Pause" : "Resume", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_fetch_states[rktp->rktp_fetch_state], + version); + } + rd_kafka_toppar_unlock(rktp); + if (pause && rk->rk_type == RD_KAFKA_CONSUMER) { + /* Flush partition's fetch queue */ + rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp, + rko_orig->rko_version); + } +} @@ -1961,52 +1930,51 @@ static void rd_kafka_toppar_pause_resume (rd_kafka_toppar_t *rktp, * @locality broker thread * @locks none */ -rd_ts_t rd_kafka_toppar_fetch_decide (rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *rkb, - int force_remove) { - int should_fetch = 1; +rd_ts_t rd_kafka_toppar_fetch_decide(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *rkb, + int force_remove) { + int should_fetch = 1; const char *reason = ""; int32_t version; - rd_ts_t ts_backoff = 0; + rd_ts_t ts_backoff = 0; rd_bool_t lease_expired = rd_false; rd_kafka_toppar_lock(rktp); /* Check for preferred replica lease expiry */ - lease_expired = - rktp->rktp_leader_id != rktp->rktp_broker_id && - rd_interval(&rktp->rktp_lease_intvl, - 5*60*1000*1000/*5 minutes*/, 0) > 0; + lease_expired = rktp->rktp_leader_id != rktp->rktp_broker_id && + rd_interval(&rktp->rktp_lease_intvl, + 5 * 60 * 1000 * 1000 /*5 minutes*/, 0) > 0; if (lease_expired) { /* delete_to_leader() requires no locks to be held */ rd_kafka_toppar_unlock(rktp); rd_kafka_toppar_delegate_to_leader(rktp); rd_kafka_toppar_lock(rktp); - reason = "preferred replica lease expired"; + reason = "preferred replica lease expired"; + should_fetch = 0; + goto done; + } + + /* Forced removal from fetch list */ + if (unlikely(force_remove)) { + reason = "forced removal"; + should_fetch = 0; + goto done; + } + + if (unlikely((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE) != 0)) { + reason = "partition removed"; should_fetch = 0; goto done; } - /* Forced removal from fetch list */ - if (unlikely(force_remove)) { - reason = "forced removal"; - should_fetch = 0; - goto done; - } - - if (unlikely((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE) != 0)) { - reason = "partition removed"; - should_fetch = 0; - goto done; - } - - /* Skip toppars not in active fetch state */ - if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_ACTIVE) { - reason = "not in active fetch state"; - should_fetch = 0; - goto done; - } + /* Skip toppars not in active fetch state */ + if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_ACTIVE) { + reason = "not in active fetch state"; + should_fetch = 0; + goto done; + } /* Update broker thread's fetch op version */ version = rktp->rktp_op_version; @@ -2015,26 +1983,26 @@ rd_ts_t rd_kafka_toppar_fetch_decide (rd_kafka_toppar_t *rktp, rktp->rktp_offsets.fetch_offset == RD_KAFKA_OFFSET_INVALID) { /* New version barrier, something was modified from the * control plane. Reset and start over. - * Alternatively only the next_offset changed but not the - * barrier, which is the case when automatically triggering - * offset.reset (such as on PARTITION_EOF or + * Alternatively only the next_offset changed but not the + * barrier, which is the case when automatically triggering + * offset.reset (such as on PARTITION_EOF or * OFFSET_OUT_OF_RANGE). */ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCHDEC", - "Topic %s [%"PRId32"]: fetch decide: " + "Topic %s [%" PRId32 + "]: fetch decide: " "updating to version %d (was %d) at " - "offset %"PRId64" (was %"PRId64")", + "offset %" PRId64 " (was %" PRId64 ")", rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - version, rktp->rktp_fetch_version, - rktp->rktp_next_offset, + rktp->rktp_partition, version, + rktp->rktp_fetch_version, rktp->rktp_next_offset, rktp->rktp_offsets.fetch_offset); rd_kafka_offset_stats_reset(&rktp->rktp_offsets); /* New start offset */ rktp->rktp_offsets.fetch_offset = rktp->rktp_next_offset; - rktp->rktp_last_next_offset = rktp->rktp_next_offset; + rktp->rktp_last_next_offset = rktp->rktp_next_offset; rktp->rktp_fetch_version = version; @@ -2047,58 +2015,58 @@ rd_ts_t rd_kafka_toppar_fetch_decide (rd_kafka_toppar_t *rktp, } - if (RD_KAFKA_TOPPAR_IS_PAUSED(rktp)) { - should_fetch = 0; - reason = "paused"; + if (RD_KAFKA_TOPPAR_IS_PAUSED(rktp)) { + should_fetch = 0; + reason = "paused"; - } else if (RD_KAFKA_OFFSET_IS_LOGICAL(rktp->rktp_next_offset)) { + } else if (RD_KAFKA_OFFSET_IS_LOGICAL(rktp->rktp_next_offset)) { should_fetch = 0; - reason = "no concrete offset"; + reason = "no concrete offset"; } else if (rd_kafka_q_len(rktp->rktp_fetchq) >= - rkb->rkb_rk->rk_conf.queued_min_msgs) { - /* Skip toppars who's local message queue is already above - * the lower threshold. */ - reason = "queued.min.messages exceeded"; + rkb->rkb_rk->rk_conf.queued_min_msgs) { + /* Skip toppars who's local message queue is already above + * the lower threshold. */ + reason = "queued.min.messages exceeded"; should_fetch = 0; } else if ((int64_t)rd_kafka_q_size(rktp->rktp_fetchq) >= - rkb->rkb_rk->rk_conf.queued_max_msg_bytes) { - reason = "queued.max.messages.kbytes exceeded"; + rkb->rkb_rk->rk_conf.queued_max_msg_bytes) { + reason = "queued.max.messages.kbytes exceeded"; should_fetch = 0; } else if (rktp->rktp_ts_fetch_backoff > rd_clock()) { - reason = "fetch backed off"; - ts_backoff = rktp->rktp_ts_fetch_backoff; + reason = "fetch backed off"; + ts_backoff = rktp->rktp_ts_fetch_backoff; should_fetch = 0; } - done: +done: /* Copy offset stats to finalized place holder. */ rktp->rktp_offsets_fin = rktp->rktp_offsets; if (rktp->rktp_fetch != should_fetch) { - rd_rkb_dbg(rkb, FETCH, "FETCH", - "Topic %s [%"PRId32"] in state %s at offset %s " - "(%d/%d msgs, %"PRId64"/%d kb queued, " - "opv %"PRId32") is %s%s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_fetch_states[rktp->rktp_fetch_state], - rd_kafka_offset2str(rktp->rktp_next_offset), - rd_kafka_q_len(rktp->rktp_fetchq), - rkb->rkb_rk->rk_conf.queued_min_msgs, - rd_kafka_q_size(rktp->rktp_fetchq) / 1024, - rkb->rkb_rk->rk_conf.queued_max_msg_kbytes, - rktp->rktp_fetch_version, - should_fetch ? "fetchable" : "not fetchable: ", - reason); + rd_rkb_dbg( + rkb, FETCH, "FETCH", + "Topic %s [%" PRId32 + "] in state %s at offset %s " + "(%d/%d msgs, %" PRId64 + "/%d kb queued, " + "opv %" PRId32 ") is %s%s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_fetch_states[rktp->rktp_fetch_state], + rd_kafka_offset2str(rktp->rktp_next_offset), + rd_kafka_q_len(rktp->rktp_fetchq), + rkb->rkb_rk->rk_conf.queued_min_msgs, + rd_kafka_q_size(rktp->rktp_fetchq) / 1024, + rkb->rkb_rk->rk_conf.queued_max_msg_kbytes, + rktp->rktp_fetch_version, + should_fetch ? "fetchable" : "not fetchable: ", reason); if (should_fetch) { - rd_dassert(rktp->rktp_fetch_version > 0); - rd_kafka_broker_active_toppar_add(rkb, rktp, - *reason ? reason : - "fetchable"); + rd_dassert(rktp->rktp_fetch_version > 0); + rd_kafka_broker_active_toppar_add( + rkb, rktp, *reason ? reason : "fetchable"); } else { rd_kafka_broker_active_toppar_del(rkb, rktp, reason); } @@ -2125,8 +2093,8 @@ rd_ts_t rd_kafka_toppar_fetch_decide (rd_kafka_toppar_t *rktp, * @locality broker thread * @locks none */ -rd_ts_t rd_kafka_broker_consumer_toppar_serve (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp) { +rd_ts_t rd_kafka_broker_consumer_toppar_serve(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp) { return rd_kafka_toppar_fetch_decide(rktp, rkb, 0); } @@ -2142,98 +2110,95 @@ rd_ts_t rd_kafka_broker_consumer_toppar_serve (rd_kafka_broker_t *rkb, * * @locality toppar handler thread */ -static rd_kafka_op_res_t -rd_kafka_toppar_op_serve (rd_kafka_t *rk, - rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque) { - rd_kafka_toppar_t *rktp = NULL; - int outdated = 0; - - if (rko->rko_rktp) - rktp = rko->rko_rktp; - - if (rktp) { - outdated = rd_kafka_op_version_outdated(rko, - rktp->rktp_op_version); - - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OP", - "%.*s [%"PRId32"] received %sop %s " - "(v%"PRId32") in fetch-state %s (opv%d)", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - outdated ? "outdated ": "", - rd_kafka_op2str(rko->rko_type), - rko->rko_version, - rd_kafka_fetch_states[rktp->rktp_fetch_state], - rktp->rktp_op_version); - - if (outdated) { +static rd_kafka_op_res_t rd_kafka_toppar_op_serve(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) { + rd_kafka_toppar_t *rktp = NULL; + int outdated = 0; + + if (rko->rko_rktp) + rktp = rko->rko_rktp; + + if (rktp) { + outdated = + rd_kafka_op_version_outdated(rko, rktp->rktp_op_version); + + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OP", + "%.*s [%" PRId32 + "] received %sop %s " + "(v%" PRId32 ") in fetch-state %s (opv%d)", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, outdated ? "outdated " : "", + rd_kafka_op2str(rko->rko_type), rko->rko_version, + rd_kafka_fetch_states[rktp->rktp_fetch_state], + rktp->rktp_op_version); + + if (outdated) { #if ENABLE_DEVEL - rd_kafka_op_print(stdout, "PART_OUTDATED", rko); + rd_kafka_op_print(stdout, "PART_OUTDATED", rko); #endif rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR__OUTDATED); - return RD_KAFKA_OP_RES_HANDLED; - } - } + return RD_KAFKA_OP_RES_HANDLED; + } + } - switch ((int)rko->rko_type) - { - case RD_KAFKA_OP_FETCH_START: - rd_kafka_toppar_fetch_start(rktp, - rko->rko_u.fetch_start.offset, rko); - break; + switch ((int)rko->rko_type) { + case RD_KAFKA_OP_FETCH_START: + rd_kafka_toppar_fetch_start(rktp, rko->rko_u.fetch_start.offset, + rko); + break; - case RD_KAFKA_OP_FETCH_STOP: - rd_kafka_toppar_fetch_stop(rktp, rko); - break; + case RD_KAFKA_OP_FETCH_STOP: + rd_kafka_toppar_fetch_stop(rktp, rko); + break; - case RD_KAFKA_OP_SEEK: - rd_kafka_toppar_seek(rktp, rko->rko_u.fetch_start.offset, rko); - break; + case RD_KAFKA_OP_SEEK: + rd_kafka_toppar_seek(rktp, rko->rko_u.fetch_start.offset, rko); + break; - case RD_KAFKA_OP_PAUSE: - rd_kafka_toppar_pause_resume(rktp, rko); - break; + case RD_KAFKA_OP_PAUSE: + rd_kafka_toppar_pause_resume(rktp, rko); + break; case RD_KAFKA_OP_OFFSET_COMMIT | RD_KAFKA_OP_REPLY: rd_kafka_assert(NULL, rko->rko_u.offset_commit.cb); - rko->rko_u.offset_commit.cb( - rk, rko->rko_err, - rko->rko_u.offset_commit.partitions, - rko->rko_u.offset_commit.opaque); + rko->rko_u.offset_commit.cb(rk, rko->rko_err, + rko->rko_u.offset_commit.partitions, + rko->rko_u.offset_commit.opaque); break; - case RD_KAFKA_OP_OFFSET_FETCH | RD_KAFKA_OP_REPLY: - { + case RD_KAFKA_OP_OFFSET_FETCH | RD_KAFKA_OP_REPLY: { /* OffsetFetch reply */ rd_kafka_topic_partition_list_t *offsets = - rko->rko_u.offset_fetch.partitions; - int64_t offset = RD_KAFKA_OFFSET_INVALID; + rko->rko_u.offset_fetch.partitions; + int64_t offset = RD_KAFKA_OFFSET_INVALID; rktp = offsets->elems[0]._private; if (!rko->rko_err) { - /* Request succeeded but per-partition might have failed */ + /* Request succeeded but per-partition might have failed + */ rko->rko_err = offsets->elems[0].err; - offset = offsets->elems[0].offset; + offset = offsets->elems[0].offset; } offsets->elems[0]._private = NULL; rd_kafka_topic_partition_list_destroy(offsets); - rko->rko_u.offset_fetch.partitions = NULL; + rko->rko_u.offset_fetch.partitions = NULL; - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_query_tmr, - 1/*lock*/); + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_query_tmr, 1 /*lock*/); - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); - if (rko->rko_err) { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, - TOPIC, "OFFSET", - "Failed to fetch offset for " - "%.*s [%"PRId32"]: %s", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_err2str(rko->rko_err)); + if (rko->rko_err) { + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "Failed to fetch offset for " + "%.*s [%" PRId32 "]: %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_err2str(rko->rko_err)); /* Keep on querying until we succeed. */ rd_kafka_toppar_offset_retry(rktp, 500, @@ -2244,45 +2209,42 @@ rd_kafka_toppar_op_serve (rd_kafka_t *rk, /* Propagate error to application */ if (rko->rko_err != RD_KAFKA_RESP_ERR__WAIT_COORD && rko->rko_err != - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) rd_kafka_consumer_err( - rktp->rktp_fetchq, - RD_KAFKA_NODEID_UA, - rko->rko_err, 0, - NULL, rktp, - RD_KAFKA_OFFSET_INVALID, - "Failed to fetch " - "offsets from brokers: %s", - rd_kafka_err2str(rko->rko_err)); - - rd_kafka_toppar_destroy(rktp); - - break; - } - - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, - TOPIC, "OFFSET", - "%.*s [%"PRId32"]: OffsetFetch returned " - "offset %s (%"PRId64")", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_offset2str(offset), offset); - - if (offset > 0) - rktp->rktp_committed_offset = offset; - - if (offset >= 0) - rd_kafka_toppar_next_offset_handle(rktp, offset); - else - rd_kafka_offset_reset(rktp, offset, - RD_KAFKA_RESP_ERR__NO_OFFSET, - "no previously committed offset " - "available"); - rd_kafka_toppar_unlock(rktp); + rktp->rktp_fetchq, RD_KAFKA_NODEID_UA, + rko->rko_err, 0, NULL, rktp, + RD_KAFKA_OFFSET_INVALID, + "Failed to fetch " + "offsets from brokers: %s", + rd_kafka_err2str(rko->rko_err)); + + rd_kafka_toppar_destroy(rktp); + + break; + } + + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "%.*s [%" PRId32 + "]: OffsetFetch returned " + "offset %s (%" PRId64 ")", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_offset2str(offset), + offset); + + if (offset > 0) + rktp->rktp_committed_offset = offset; + + if (offset >= 0) + rd_kafka_toppar_next_offset_handle(rktp, offset); + else + rd_kafka_offset_reset(rktp, offset, + RD_KAFKA_RESP_ERR__NO_OFFSET, + "no previously committed offset " + "available"); + rd_kafka_toppar_unlock(rktp); rd_kafka_toppar_destroy(rktp); - } - break; + } break; default: rd_kafka_assert(NULL, !*"unknown type"); @@ -2296,17 +2258,16 @@ rd_kafka_toppar_op_serve (rd_kafka_t *rk, - - /** * Send command op to toppar (handled by toppar's thread). * * Locality: any thread */ -static void rd_kafka_toppar_op0 (rd_kafka_toppar_t *rktp, rd_kafka_op_t *rko, - rd_kafka_replyq_t replyq) { - rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rko->rko_replyq = replyq; +static void rd_kafka_toppar_op0(rd_kafka_toppar_t *rktp, + rd_kafka_op_t *rko, + rd_kafka_replyq_t replyq) { + rko->rko_rktp = rd_kafka_toppar_keep(rktp); + rko->rko_replyq = replyq; rd_kafka_q_enq(rktp->rktp_ops, rko); } @@ -2317,22 +2278,23 @@ static void rd_kafka_toppar_op0 (rd_kafka_toppar_t *rktp, rd_kafka_op_t *rko, * * Locality: any thread */ -static void rd_kafka_toppar_op (rd_kafka_toppar_t *rktp, - rd_kafka_op_type_t type, int32_t version, - int64_t offset, rd_kafka_cgrp_t *rkcg, - rd_kafka_replyq_t replyq) { +static void rd_kafka_toppar_op(rd_kafka_toppar_t *rktp, + rd_kafka_op_type_t type, + int32_t version, + int64_t offset, + rd_kafka_cgrp_t *rkcg, + rd_kafka_replyq_t replyq) { rd_kafka_op_t *rko; - rko = rd_kafka_op_new(type); - rko->rko_version = version; - if (type == RD_KAFKA_OP_FETCH_START || - type == RD_KAFKA_OP_SEEK) { - if (rkcg) - rko->rko_u.fetch_start.rkcg = rkcg; - rko->rko_u.fetch_start.offset = offset; - } + rko = rd_kafka_op_new(type); + rko->rko_version = version; + if (type == RD_KAFKA_OP_FETCH_START || type == RD_KAFKA_OP_SEEK) { + if (rkcg) + rko->rko_u.fetch_start.rkcg = rkcg; + rko->rko_u.fetch_start.offset = offset; + } - rd_kafka_toppar_op0(rktp, rko, replyq); + rd_kafka_toppar_op0(rktp, rko, replyq); } @@ -2346,31 +2308,31 @@ static void rd_kafka_toppar_op (rd_kafka_toppar_t *rktp, * * This is the thread-safe interface that can be called from any thread. */ -rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start (rd_kafka_toppar_t *rktp, - int64_t offset, - rd_kafka_q_t *fwdq, - rd_kafka_replyq_t replyq) { - int32_t version; +rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start(rd_kafka_toppar_t *rktp, + int64_t offset, + rd_kafka_q_t *fwdq, + rd_kafka_replyq_t replyq) { + int32_t version; rd_kafka_q_lock(rktp->rktp_fetchq); if (fwdq && !(rktp->rktp_fetchq->rkq_flags & RD_KAFKA_Q_F_FWD_APP)) - rd_kafka_q_fwd_set0(rktp->rktp_fetchq, fwdq, - 0, /* no do_lock */ + rd_kafka_q_fwd_set0(rktp->rktp_fetchq, fwdq, 0, /* no do_lock */ 0 /* no fwd_app */); rd_kafka_q_unlock(rktp->rktp_fetchq); - /* Bump version barrier. */ - version = rd_kafka_toppar_version_new_barrier(rktp); + /* Bump version barrier. */ + version = rd_kafka_toppar_version_new_barrier(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER", - "Start consuming %.*s [%"PRId32"] at " - "offset %s (v%"PRId32")", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, rd_kafka_offset2str(offset), - version); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER", + "Start consuming %.*s [%" PRId32 + "] at " + "offset %s (v%" PRId32 ")", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_offset2str(offset), + version); - rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_START, version, - offset, rktp->rktp_rkt->rkt_rk->rk_cgrp, replyq); + rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_START, version, offset, + rktp->rktp_rkt->rkt_rk->rk_cgrp, replyq); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -2382,20 +2344,20 @@ rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start (rd_kafka_toppar_t *rktp, * * Locality: any thread */ -rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop (rd_kafka_toppar_t *rktp, - rd_kafka_replyq_t replyq) { - int32_t version; +rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop(rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq) { + int32_t version; - /* Bump version barrier. */ + /* Bump version barrier. */ version = rd_kafka_toppar_version_new_barrier(rktp); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER", - "Stop consuming %.*s [%"PRId32"] (v%"PRId32")", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, version); + "Stop consuming %.*s [%" PRId32 "] (v%" PRId32 ")", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, version); - rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_STOP, version, - 0, NULL, replyq); + rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_STOP, version, 0, NULL, + replyq); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -2408,23 +2370,24 @@ rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop (rd_kafka_toppar_t *rktp, * * This is the thread-safe interface that can be called from any thread. */ -rd_kafka_resp_err_t rd_kafka_toppar_op_seek (rd_kafka_toppar_t *rktp, - int64_t offset, - rd_kafka_replyq_t replyq) { - int32_t version; +rd_kafka_resp_err_t rd_kafka_toppar_op_seek(rd_kafka_toppar_t *rktp, + int64_t offset, + rd_kafka_replyq_t replyq) { + int32_t version; - /* Bump version barrier. */ - version = rd_kafka_toppar_version_new_barrier(rktp); + /* Bump version barrier. */ + version = rd_kafka_toppar_version_new_barrier(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER", - "Seek %.*s [%"PRId32"] to " - "offset %s (v%"PRId32")", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, rd_kafka_offset2str(offset), - version); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER", + "Seek %.*s [%" PRId32 + "] to " + "offset %s (v%" PRId32 ")", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_offset2str(offset), + version); - rd_kafka_toppar_op(rktp, RD_KAFKA_OP_SEEK, version, - offset, NULL, replyq); + rd_kafka_toppar_op(rktp, RD_KAFKA_OP_SEEK, version, offset, NULL, + replyq); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -2439,25 +2402,26 @@ rd_kafka_resp_err_t rd_kafka_toppar_op_seek (rd_kafka_toppar_t *rktp, * * @locality any */ -rd_kafka_resp_err_t -rd_kafka_toppar_op_pause_resume (rd_kafka_toppar_t *rktp, int pause, int flag, - rd_kafka_replyq_t replyq) { - int32_t version; - rd_kafka_op_t *rko; +rd_kafka_resp_err_t rd_kafka_toppar_op_pause_resume(rd_kafka_toppar_t *rktp, + int pause, + int flag, + rd_kafka_replyq_t replyq) { + int32_t version; + rd_kafka_op_t *rko; - /* Bump version barrier. */ - version = rd_kafka_toppar_version_new_barrier(rktp); + /* Bump version barrier. */ + version = rd_kafka_toppar_version_new_barrier(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, pause ? "PAUSE":"RESUME", - "%s %.*s [%"PRId32"] (v%"PRId32")", - pause ? "Pause" : "Resume", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, version); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, pause ? "PAUSE" : "RESUME", + "%s %.*s [%" PRId32 "] (v%" PRId32 ")", + pause ? "Pause" : "Resume", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, version); - rko = rd_kafka_op_new(RD_KAFKA_OP_PAUSE); - rko->rko_version = version; - rko->rko_u.pause.pause = pause; - rko->rko_u.pause.flag = flag; + rko = rd_kafka_op_new(RD_KAFKA_OP_PAUSE); + rko->rko_version = version; + rko->rko_u.pause.pause = pause; + rko->rko_u.pause.flag = flag; rd_kafka_toppar_op0(rktp, rko, replyq); @@ -2474,8 +2438,8 @@ rd_kafka_toppar_op_pause_resume (rd_kafka_toppar_t *rktp, int pause, int flag, * @locality any * @locks none needed */ -void rd_kafka_toppar_pause (rd_kafka_toppar_t *rktp, int flag) { - rd_kafka_toppar_op_pause_resume(rktp, 1/*pause*/, flag, +void rd_kafka_toppar_pause(rd_kafka_toppar_t *rktp, int flag) { + rd_kafka_toppar_op_pause_resume(rktp, 1 /*pause*/, flag, RD_KAFKA_NO_REPLYQ); } @@ -2488,8 +2452,8 @@ void rd_kafka_toppar_pause (rd_kafka_toppar_t *rktp, int flag) { * @locality any * @locks none needed */ -void rd_kafka_toppar_resume (rd_kafka_toppar_t *rktp, int flag) { - rd_kafka_toppar_op_pause_resume(rktp, 1/*pause*/, flag, +void rd_kafka_toppar_resume(rd_kafka_toppar_t *rktp, int flag) { + rd_kafka_toppar_op_pause_resume(rktp, 1 /*pause*/, flag, RD_KAFKA_NO_REPLYQ); } @@ -2510,37 +2474,40 @@ void rd_kafka_toppar_resume (rd_kafka_toppar_t *rktp, int flag) { * by toppar_pause() in the toppar's handler thread. */ rd_kafka_resp_err_t -rd_kafka_toppars_pause_resume (rd_kafka_t *rk, - rd_bool_t pause, rd_async_t async, int flag, - rd_kafka_topic_partition_list_t *partitions) { +rd_kafka_toppars_pause_resume(rd_kafka_t *rk, + rd_bool_t pause, + rd_async_t async, + int flag, + rd_kafka_topic_partition_list_t *partitions) { int i; - int waitcnt = 0; + int waitcnt = 0; rd_kafka_q_t *tmpq = NULL; if (!async) tmpq = rd_kafka_q_new(rk); - rd_kafka_dbg(rk, TOPIC, pause ? "PAUSE":"RESUME", - "%s %s %d partition(s)", - flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? "Application" : "Library", - pause ? "pausing" : "resuming", partitions->cnt); + rd_kafka_dbg( + rk, TOPIC, pause ? "PAUSE" : "RESUME", "%s %s %d partition(s)", + flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? "Application" : "Library", + pause ? "pausing" : "resuming", partitions->cnt); - for (i = 0 ; i < partitions->cnt ; i++) { - rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; - rd_kafka_toppar_t *rktp; + for (i = 0; i < partitions->cnt; i++) { + rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; + rd_kafka_toppar_t *rktp; - rktp = rd_kafka_topic_partition_get_toppar(rk, rktpar, - rd_false); - if (!rktp) { - rd_kafka_dbg(rk, TOPIC, pause ? "PAUSE":"RESUME", - "%s %s [%"PRId32"]: skipped: " - "unknown partition", - pause ? "Pause":"Resume", - rktpar->topic, rktpar->partition); + rktp = + rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false); + if (!rktp) { + rd_kafka_dbg(rk, TOPIC, pause ? "PAUSE" : "RESUME", + "%s %s [%" PRId32 + "]: skipped: " + "unknown partition", + pause ? "Pause" : "Resume", rktpar->topic, + rktpar->partition); - rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - continue; - } + rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + continue; + } rd_kafka_toppar_op_pause_resume(rktp, pause, flag, RD_KAFKA_REPLYQ(tmpq, 0)); @@ -2548,10 +2515,10 @@ rd_kafka_toppars_pause_resume (rd_kafka_t *rk, if (!async) waitcnt++; - rd_kafka_toppar_destroy(rktp); + rd_kafka_toppar_destroy(rktp); - rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR; - } + rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR; + } if (!async) { while (waitcnt-- > 0) @@ -2560,30 +2527,27 @@ rd_kafka_toppars_pause_resume (rd_kafka_t *rk, rd_kafka_q_destroy_owner(tmpq); } - return RD_KAFKA_RESP_ERR_NO_ERROR; + return RD_KAFKA_RESP_ERR_NO_ERROR; } - - /** * Propagate error for toppar */ -void rd_kafka_toppar_enq_error (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err, - const char *reason) { +void rd_kafka_toppar_enq_error(rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err, + const char *reason) { rd_kafka_op_t *rko; char buf[512]; - rko = rd_kafka_op_new(RD_KAFKA_OP_ERR); + rko = rd_kafka_op_new(RD_KAFKA_OP_ERR); rko->rko_err = err; rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rd_snprintf(buf, sizeof(buf), "%.*s [%"PRId32"]: %s (%s)", + rd_snprintf(buf, sizeof(buf), "%.*s [%" PRId32 "]: %s (%s)", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, reason, - rd_kafka_err2str(err)); + rktp->rktp_partition, reason, rd_kafka_err2str(err)); rko->rko_u.err.errstr = rd_strdup(buf); @@ -2592,8 +2556,6 @@ void rd_kafka_toppar_enq_error (rd_kafka_toppar_t *rktp, - - /** * Returns the currently delegated broker for this toppar. * If \p proper_broker is set NULL will be returned if current handler @@ -2603,8 +2565,8 @@ void rd_kafka_toppar_enq_error (rd_kafka_toppar_t *rktp, * * Locks: none */ -rd_kafka_broker_t *rd_kafka_toppar_broker (rd_kafka_toppar_t *rktp, - int proper_broker) { +rd_kafka_broker_t *rd_kafka_toppar_broker(rd_kafka_toppar_t *rktp, + int proper_broker) { rd_kafka_broker_t *rkb; rd_kafka_toppar_lock(rktp); rkb = rktp->rktp_broker; @@ -2628,13 +2590,13 @@ rd_kafka_broker_t *rd_kafka_toppar_broker (rd_kafka_toppar_t *rktp, * @locks none * @locality any */ -void rd_kafka_toppar_leader_unavailable (rd_kafka_toppar_t *rktp, - const char *reason, - rd_kafka_resp_err_t err) { +void rd_kafka_toppar_leader_unavailable(rd_kafka_toppar_t *rktp, + const char *reason, + rd_kafka_resp_err_t err) { rd_kafka_topic_t *rkt = rktp->rktp_rkt; rd_kafka_dbg(rkt->rkt_rk, TOPIC, "BROKERUA", - "%s [%"PRId32"]: broker unavailable: %s: %s", + "%s [%" PRId32 "]: broker unavailable: %s: %s", rkt->rkt_topic->str, rktp->rktp_partition, reason, rd_kafka_err2str(err)); @@ -2647,27 +2609,27 @@ void rd_kafka_toppar_leader_unavailable (rd_kafka_toppar_t *rktp, const char * -rd_kafka_topic_partition_topic (const rd_kafka_topic_partition_t *rktpar) { +rd_kafka_topic_partition_topic(const rd_kafka_topic_partition_t *rktpar) { const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar; return rktp->rktp_rkt->rkt_topic->str; } int32_t -rd_kafka_topic_partition_partition (const rd_kafka_topic_partition_t *rktpar) { +rd_kafka_topic_partition_partition(const rd_kafka_topic_partition_t *rktpar) { const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar; return rktp->rktp_partition; } -void rd_kafka_topic_partition_get (const rd_kafka_topic_partition_t *rktpar, - const char **name, int32_t *partition) { +void rd_kafka_topic_partition_get(const rd_kafka_topic_partition_t *rktpar, + const char **name, + int32_t *partition) { const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar; - *name = rktp->rktp_rkt->rkt_topic->str; - *partition = rktp->rktp_partition; + *name = rktp->rktp_rkt->rkt_topic->str; + *partition = rktp->rktp_partition; } - /** * * rd_kafka_topic_partition_t lists @@ -2677,24 +2639,23 @@ void rd_kafka_topic_partition_get (const rd_kafka_topic_partition_t *rktpar, static void -rd_kafka_topic_partition_list_grow (rd_kafka_topic_partition_list_t *rktparlist, - int add_size) { +rd_kafka_topic_partition_list_grow(rd_kafka_topic_partition_list_t *rktparlist, + int add_size) { if (add_size < rktparlist->size) add_size = RD_MAX(rktparlist->size, 32); rktparlist->size += add_size; - rktparlist->elems = rd_realloc(rktparlist->elems, - sizeof(*rktparlist->elems) * - rktparlist->size); - + rktparlist->elems = rd_realloc( + rktparlist->elems, sizeof(*rktparlist->elems) * rktparlist->size); } /** * @brief Initialize a list for fitting \p size partitions. */ -void rd_kafka_topic_partition_list_init ( - rd_kafka_topic_partition_list_t *rktparlist, int size) { +void rd_kafka_topic_partition_list_init( + rd_kafka_topic_partition_list_t *rktparlist, + int size) { memset(rktparlist, 0, sizeof(*rktparlist)); if (size > 0) @@ -2705,7 +2666,7 @@ void rd_kafka_topic_partition_list_init ( /** * Create a list for fitting 'size' topic_partitions (rktp). */ -rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new (int size) { +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size) { rd_kafka_topic_partition_list_t *rktparlist; rktparlist = rd_calloc(1, sizeof(*rktparlist)); @@ -2718,52 +2679,53 @@ rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new (int size) { -rd_kafka_topic_partition_t *rd_kafka_topic_partition_new (const char *topic, - int32_t partition) { - rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar)); +rd_kafka_topic_partition_t *rd_kafka_topic_partition_new(const char *topic, + int32_t partition) { + rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar)); - rktpar->topic = rd_strdup(topic); - rktpar->partition = partition; + rktpar->topic = rd_strdup(topic); + rktpar->partition = partition; - return rktpar; + return rktpar; } rd_kafka_topic_partition_t * -rd_kafka_topic_partition_copy (const rd_kafka_topic_partition_t *src) { +rd_kafka_topic_partition_copy(const rd_kafka_topic_partition_t *src) { return rd_kafka_topic_partition_new(src->topic, src->partition); } /** Same as above but with generic void* signature */ -void *rd_kafka_topic_partition_copy_void (const void *src) { +void *rd_kafka_topic_partition_copy_void(const void *src) { return rd_kafka_topic_partition_copy(src); } rd_kafka_topic_partition_t * -rd_kafka_topic_partition_new_from_rktp (rd_kafka_toppar_t *rktp) { - rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar)); +rd_kafka_topic_partition_new_from_rktp(rd_kafka_toppar_t *rktp) { + rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar)); - rktpar->topic = RD_KAFKAP_STR_DUP(rktp->rktp_rkt->rkt_topic); - rktpar->partition = rktp->rktp_partition; + rktpar->topic = RD_KAFKAP_STR_DUP(rktp->rktp_rkt->rkt_topic); + rktpar->partition = rktp->rktp_partition; - return rktpar; + return rktpar; } static void -rd_kafka_topic_partition_destroy0 (rd_kafka_topic_partition_t *rktpar, int do_free) { - if (rktpar->topic) - rd_free(rktpar->topic); - if (rktpar->metadata) - rd_free(rktpar->metadata); - if (rktpar->_private) - rd_kafka_toppar_destroy((rd_kafka_toppar_t *)rktpar->_private); - - if (do_free) - rd_free(rktpar); +rd_kafka_topic_partition_destroy0(rd_kafka_topic_partition_t *rktpar, + int do_free) { + if (rktpar->topic) + rd_free(rktpar->topic); + if (rktpar->metadata) + rd_free(rktpar->metadata); + if (rktpar->_private) + rd_kafka_toppar_destroy((rd_kafka_toppar_t *)rktpar->_private); + + if (do_free) + rd_free(rktpar); } @@ -2772,23 +2734,23 @@ rd_kafka_topic_partition_destroy0 (rd_kafka_topic_partition_t *rktpar, int do_fr * * @remark The allocated size of the list will not shrink. */ -void rd_kafka_topic_partition_list_clear ( - rd_kafka_topic_partition_list_t *rktparlist) { +void rd_kafka_topic_partition_list_clear( + rd_kafka_topic_partition_list_t *rktparlist) { int i; - for (i = 0 ; i < rktparlist->cnt ; i++) + for (i = 0; i < rktparlist->cnt; i++) rd_kafka_topic_partition_destroy0(&rktparlist->elems[i], 0); rktparlist->cnt = 0; } -void rd_kafka_topic_partition_destroy_free (void *ptr) { - rd_kafka_topic_partition_destroy0(ptr, rd_true/*do_free*/); +void rd_kafka_topic_partition_destroy_free(void *ptr) { + rd_kafka_topic_partition_destroy0(ptr, rd_true /*do_free*/); } -void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar) { - rd_kafka_topic_partition_destroy0(rktpar, 1); +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar) { + rd_kafka_topic_partition_destroy0(rktpar, 1); } @@ -2796,12 +2758,12 @@ void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar) { * Destroys a list previously created with .._list_new() and drops * any references to contained toppars. */ -void -rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rktparlist) { +void rd_kafka_topic_partition_list_destroy( + rd_kafka_topic_partition_list_t *rktparlist) { int i; - for (i = 0 ; i < rktparlist->cnt ; i++) - rd_kafka_topic_partition_destroy0(&rktparlist->elems[i], 0); + for (i = 0; i < rktparlist->cnt; i++) + rd_kafka_topic_partition_destroy0(&rktparlist->elems[i], 0); if (rktparlist->elems) rd_free(rktparlist->elems); @@ -2814,9 +2776,9 @@ rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rktparli * @brief Wrapper for rd_kafka_topic_partition_list_destroy() that * matches the standard free(void *) signature, for callback use. */ -void rd_kafka_topic_partition_list_destroy_free (void *ptr) { +void rd_kafka_topic_partition_list_destroy_free(void *ptr) { rd_kafka_topic_partition_list_destroy( - (rd_kafka_topic_partition_list_t *)ptr); + (rd_kafka_topic_partition_list_t *)ptr); } @@ -2829,10 +2791,12 @@ void rd_kafka_topic_partition_list_destroy_free (void *ptr) { * Returns a pointer to the added element. */ rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_add0 (const char *func, int line, - rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition, - rd_kafka_toppar_t *_private) { +rd_kafka_topic_partition_list_add0(const char *func, + int line, + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition, + rd_kafka_toppar_t *_private) { rd_kafka_topic_partition_t *rktpar; if (rktparlist->cnt == rktparlist->size) rd_kafka_topic_partition_list_grow(rktparlist, 1); @@ -2840,10 +2804,10 @@ rd_kafka_topic_partition_list_add0 (const char *func, int line, rktpar = &rktparlist->elems[rktparlist->cnt++]; memset(rktpar, 0, sizeof(*rktpar)); - rktpar->topic = rd_strdup(topic); + rktpar->topic = rd_strdup(topic); rktpar->partition = partition; - rktpar->offset = RD_KAFKA_OFFSET_INVALID; - rktpar->_private = _private; + rktpar->offset = RD_KAFKA_OFFSET_INVALID; + rktpar->_private = _private; if (_private) rd_kafka_toppar_keep_fl(func, line, _private); @@ -2852,36 +2816,36 @@ rd_kafka_topic_partition_list_add0 (const char *func, int line, rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_add (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition) { - return rd_kafka_topic_partition_list_add0(__FUNCTION__,__LINE__, - rktparlist, - topic, partition, NULL); +rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition) { + return rd_kafka_topic_partition_list_add0( + __FUNCTION__, __LINE__, rktparlist, topic, partition, NULL); } /** * Adds a consecutive list of partitions to a list */ -void -rd_kafka_topic_partition_list_add_range (rd_kafka_topic_partition_list_t - *rktparlist, - const char *topic, - int32_t start, int32_t stop) { +void rd_kafka_topic_partition_list_add_range( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t start, + int32_t stop) { - for (; start <= stop ; start++) + for (; start <= stop; start++) rd_kafka_topic_partition_list_add(rktparlist, topic, start); } -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_upsert ( - rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition) { +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_upsert( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition) { rd_kafka_topic_partition_t *rktpar; - if ((rktpar = rd_kafka_topic_partition_list_find(rktparlist, - topic, partition))) + if ((rktpar = rd_kafka_topic_partition_list_find(rktparlist, topic, + partition))) return rktpar; return rd_kafka_topic_partition_list_add(rktparlist, topic, partition); @@ -2891,19 +2855,20 @@ rd_kafka_topic_partition_list_upsert ( /** * @brief Update \p dst with info from \p src. */ -void rd_kafka_topic_partition_update (rd_kafka_topic_partition_t *dst, - const rd_kafka_topic_partition_t *src) { +void rd_kafka_topic_partition_update(rd_kafka_topic_partition_t *dst, + const rd_kafka_topic_partition_t *src) { rd_dassert(!strcmp(dst->topic, src->topic)); rd_dassert(dst->partition == src->partition); rd_dassert(dst != src); dst->offset = src->offset; dst->opaque = src->opaque; - dst->err = src->err; + dst->err = src->err; if (src->metadata_size > 0) { - dst->metadata = rd_malloc(src->metadata_size); - dst->metadata_size = src->metadata_size;; + dst->metadata = rd_malloc(src->metadata_size); + dst->metadata_size = src->metadata_size; + ; memcpy(dst->metadata, src->metadata, dst->metadata_size); } } @@ -2911,17 +2876,14 @@ void rd_kafka_topic_partition_update (rd_kafka_topic_partition_t *dst, /** * @brief Creates a copy of \p rktpar and adds it to \p rktparlist */ -void rd_kafka_topic_partition_list_add_copy ( - rd_kafka_topic_partition_list_t *rktparlist, - const rd_kafka_topic_partition_t *rktpar) { +void rd_kafka_topic_partition_list_add_copy( + rd_kafka_topic_partition_list_t *rktparlist, + const rd_kafka_topic_partition_t *rktpar) { rd_kafka_topic_partition_t *dst; dst = rd_kafka_topic_partition_list_add0( - __FUNCTION__,__LINE__, - rktparlist, - rktpar->topic, - rktpar->partition, - rktpar->_private); + __FUNCTION__, __LINE__, rktparlist, rktpar->topic, + rktpar->partition, rktpar->_private); rd_kafka_topic_partition_update(dst, rktpar); } @@ -2932,13 +2894,13 @@ void rd_kafka_topic_partition_list_add_copy ( * Create and return a copy of list 'src' */ rd_kafka_topic_partition_list_t * -rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src){ +rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src) { rd_kafka_topic_partition_list_t *dst; int i; dst = rd_kafka_topic_partition_list_new(src->size); - for (i = 0 ; i < src->cnt ; i++) + for (i = 0; i < src->cnt; i++) rd_kafka_topic_partition_list_add_copy(dst, &src->elems[i]); return dst; } @@ -2947,8 +2909,7 @@ rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src){ * @brief Same as rd_kafka_topic_partition_list_copy() but suitable for * rd_list_copy(). The \p opaque is ignored. */ -void * -rd_kafka_topic_partition_list_copy_opaque (const void *src, void *opaque) { +void *rd_kafka_topic_partition_list_copy_opaque(const void *src, void *opaque) { return rd_kafka_topic_partition_list_copy(src); } @@ -2956,9 +2917,9 @@ rd_kafka_topic_partition_list_copy_opaque (const void *src, void *opaque) { * @brief Append copies of all elements in \p src to \p dst. * No duplicate-checks are performed. */ -void rd_kafka_topic_partition_list_add_list ( - rd_kafka_topic_partition_list_t *dst, - const rd_kafka_topic_partition_list_t *src) { +void rd_kafka_topic_partition_list_add_list( + rd_kafka_topic_partition_list_t *dst, + const rd_kafka_topic_partition_list_t *src) { int i; if (src->cnt == 0) @@ -2967,7 +2928,7 @@ void rd_kafka_topic_partition_list_add_list ( if (dst->size < dst->cnt + src->cnt) rd_kafka_topic_partition_list_grow(dst, src->cnt); - for (i = 0 ; i < src->cnt ; i++) + for (i = 0; i < src->cnt; i++) rd_kafka_topic_partition_list_add_copy(dst, &src->elems[i]); } @@ -2977,9 +2938,9 @@ void rd_kafka_topic_partition_list_add_list ( * * @warning This is an O(Na*Nb) operation. */ -int -rd_kafka_topic_partition_list_cmp (const void *_a, const void *_b, - int (*cmp) (const void *, const void *)) { +int rd_kafka_topic_partition_list_cmp(const void *_a, + const void *_b, + int (*cmp)(const void *, const void *)) { const rd_kafka_topic_partition_list_t *a = _a, *b = _b; int r; int i; @@ -2992,10 +2953,10 @@ rd_kafka_topic_partition_list_cmp (const void *_a, const void *_b, * for each element in A. * FIXME: If the list sizes are larger than X we could create a * temporary hash map instead. */ - for (i = 0 ; i < a->cnt ; i++) { + for (i = 0; i < a->cnt; i++) { int j; - for (j = 0 ; j < b->cnt ; j++) { + for (j = 0; j < b->cnt; j++) { r = cmp(&a->elems[i], &b->elems[j]); if (!r) break; @@ -3016,15 +2977,12 @@ rd_kafka_topic_partition_list_cmp (const void *_a, const void *_b, * WITHOUT refcnt increased. */ rd_kafka_toppar_t * -rd_kafka_topic_partition_ensure_toppar (rd_kafka_t *rk, - rd_kafka_topic_partition_t *rktpar, - rd_bool_t create_on_miss) { +rd_kafka_topic_partition_ensure_toppar(rd_kafka_t *rk, + rd_kafka_topic_partition_t *rktpar, + rd_bool_t create_on_miss) { if (!rktpar->_private) - rktpar->_private = - rd_kafka_toppar_get2(rk, - rktpar->topic, - rktpar->partition, 0, - create_on_miss); + rktpar->_private = rd_kafka_toppar_get2( + rk, rktpar->topic, rktpar->partition, 0, create_on_miss); return rktpar->_private; } @@ -3034,13 +2992,13 @@ rd_kafka_topic_partition_ensure_toppar (rd_kafka_t *rk, * @remark a new reference is returned. */ rd_kafka_toppar_t * -rd_kafka_topic_partition_get_toppar (rd_kafka_t *rk, - rd_kafka_topic_partition_t *rktpar, - rd_bool_t create_on_miss) { +rd_kafka_topic_partition_get_toppar(rd_kafka_t *rk, + rd_kafka_topic_partition_t *rktpar, + rd_bool_t create_on_miss) { rd_kafka_toppar_t *rktp; - rktp = rd_kafka_topic_partition_ensure_toppar(rk, rktpar, - create_on_miss); + rktp = + rd_kafka_topic_partition_ensure_toppar(rk, rktpar, create_on_miss); if (rktp) rd_kafka_toppar_keep(rktp); @@ -3049,10 +3007,10 @@ rd_kafka_topic_partition_get_toppar (rd_kafka_t *rk, } -int rd_kafka_topic_partition_cmp (const void *_a, const void *_b) { +int rd_kafka_topic_partition_cmp(const void *_a, const void *_b) { const rd_kafka_topic_partition_t *a = _a; const rd_kafka_topic_partition_t *b = _b; - int r = strcmp(a->topic, b->topic); + int r = strcmp(a->topic, b->topic); if (r) return r; else @@ -3060,21 +3018,22 @@ int rd_kafka_topic_partition_cmp (const void *_a, const void *_b) { } /** @brief Compare only the topic */ -int rd_kafka_topic_partition_cmp_topic (const void *_a, const void *_b) { +int rd_kafka_topic_partition_cmp_topic(const void *_a, const void *_b) { const rd_kafka_topic_partition_t *a = _a; const rd_kafka_topic_partition_t *b = _b; return strcmp(a->topic, b->topic); } -static int rd_kafka_topic_partition_cmp_opaque (const void *_a, const void *_b, - void *opaque) { +static int rd_kafka_topic_partition_cmp_opaque(const void *_a, + const void *_b, + void *opaque) { return rd_kafka_topic_partition_cmp(_a, _b); } /** @returns a hash of the topic and partition */ -unsigned int rd_kafka_topic_partition_hash (const void *_a) { +unsigned int rd_kafka_topic_partition_hash(const void *_a) { const rd_kafka_topic_partition_t *a = _a; - int r = 31 * 17 + a->partition; + int r = 31 * 17 + a->partition; return 31 * r + rd_string_hash(a->topic, -1); } @@ -3084,18 +3043,18 @@ unsigned int rd_kafka_topic_partition_hash (const void *_a) { * @brief Search 'rktparlist' for 'topic' and 'partition'. * @returns the elems[] index or -1 on miss. */ -static int -rd_kafka_topic_partition_list_find0 ( - const rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition, - int (*cmp) (const void *, const void *)) { +static int rd_kafka_topic_partition_list_find0( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition, + int (*cmp)(const void *, const void *)) { rd_kafka_topic_partition_t skel; int i; - skel.topic = (char *)topic; + skel.topic = (char *)topic; skel.partition = partition; - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { if (!cmp(&skel, &rktparlist->elems[i])) return i; } @@ -3103,12 +3062,12 @@ rd_kafka_topic_partition_list_find0 ( return -1; } -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_find ( - const rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition) { +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition) { int i = rd_kafka_topic_partition_list_find0( - rktparlist, topic, partition, rd_kafka_topic_partition_cmp); + rktparlist, topic, partition, rd_kafka_topic_partition_cmp); if (i == -1) return NULL; else @@ -3116,24 +3075,24 @@ rd_kafka_topic_partition_list_find ( } -int -rd_kafka_topic_partition_list_find_idx ( - const rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition) { +int rd_kafka_topic_partition_list_find_idx( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition) { return rd_kafka_topic_partition_list_find0( - rktparlist, topic, partition, rd_kafka_topic_partition_cmp); + rktparlist, topic, partition, rd_kafka_topic_partition_cmp); } /** * @returns the first element that matches \p topic, regardless of partition. */ -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_find_topic ( - const rd_kafka_topic_partition_list_t *rktparlist, const char *topic) { +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic) { int i = rd_kafka_topic_partition_list_find0( - rktparlist, topic, RD_KAFKA_PARTITION_UA, - rd_kafka_topic_partition_cmp_topic); + rktparlist, topic, RD_KAFKA_PARTITION_UA, + rd_kafka_topic_partition_cmp_topic); if (i == -1) return NULL; else @@ -3141,30 +3100,31 @@ rd_kafka_topic_partition_list_find_topic ( } -int -rd_kafka_topic_partition_list_del_by_idx (rd_kafka_topic_partition_list_t *rktparlist, - int idx) { - if (unlikely(idx < 0 || idx >= rktparlist->cnt)) - return 0; +int rd_kafka_topic_partition_list_del_by_idx( + rd_kafka_topic_partition_list_t *rktparlist, + int idx) { + if (unlikely(idx < 0 || idx >= rktparlist->cnt)) + return 0; - rd_kafka_topic_partition_destroy0(&rktparlist->elems[idx], 0); - memmove(&rktparlist->elems[idx], &rktparlist->elems[idx+1], - (rktparlist->cnt - idx - 1) * sizeof(rktparlist->elems[idx])); - rktparlist->cnt--; + rd_kafka_topic_partition_destroy0(&rktparlist->elems[idx], 0); + memmove(&rktparlist->elems[idx], &rktparlist->elems[idx + 1], + (rktparlist->cnt - idx - 1) * sizeof(rktparlist->elems[idx])); + rktparlist->cnt--; - return 1; + return 1; } -int -rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition) { +int rd_kafka_topic_partition_list_del( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition) { int i = rd_kafka_topic_partition_list_find0( - rktparlist, topic, partition, rd_kafka_topic_partition_cmp); - if (i == -1) - return 0; + rktparlist, topic, partition, rd_kafka_topic_partition_cmp); + if (i == -1) + return 0; - return rd_kafka_topic_partition_list_del_by_idx(rktparlist, i); + return rd_kafka_topic_partition_list_del_by_idx(rktparlist, i); } @@ -3173,89 +3133,89 @@ rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist, * Returns true if 'topic' matches the 'rktpar', else false. * On match, if rktpar is a regex pattern then 'matched_by_regex' is set to 1. */ -int rd_kafka_topic_partition_match (rd_kafka_t *rk, - const rd_kafka_group_member_t *rkgm, - const rd_kafka_topic_partition_t *rktpar, - const char *topic, int *matched_by_regex) { - int ret = 0; - - if (*rktpar->topic == '^') { - char errstr[128]; - - ret = rd_regex_match(rktpar->topic, topic, - errstr, sizeof(errstr)); - if (ret == -1) { - rd_kafka_dbg(rk, CGRP, - "SUBMATCH", - "Invalid regex for member " - "\"%.*s\" subscription \"%s\": %s", - RD_KAFKAP_STR_PR(rkgm->rkgm_member_id), - rktpar->topic, errstr); - return 0; - } - - if (ret && matched_by_regex) - *matched_by_regex = 1; - - } else if (!strcmp(rktpar->topic, topic)) { - - if (matched_by_regex) - *matched_by_regex = 0; - - ret = 1; - } - - return ret; +int rd_kafka_topic_partition_match(rd_kafka_t *rk, + const rd_kafka_group_member_t *rkgm, + const rd_kafka_topic_partition_t *rktpar, + const char *topic, + int *matched_by_regex) { + int ret = 0; + + if (*rktpar->topic == '^') { + char errstr[128]; + + ret = rd_regex_match(rktpar->topic, topic, errstr, + sizeof(errstr)); + if (ret == -1) { + rd_kafka_dbg(rk, CGRP, "SUBMATCH", + "Invalid regex for member " + "\"%.*s\" subscription \"%s\": %s", + RD_KAFKAP_STR_PR(rkgm->rkgm_member_id), + rktpar->topic, errstr); + return 0; + } + + if (ret && matched_by_regex) + *matched_by_regex = 1; + + } else if (!strcmp(rktpar->topic, topic)) { + + if (matched_by_regex) + *matched_by_regex = 0; + + ret = 1; + } + + return ret; } -void rd_kafka_topic_partition_list_sort ( - rd_kafka_topic_partition_list_t *rktparlist, - int (*cmp) (const void *, const void *, void *), - void *opaque) { +void rd_kafka_topic_partition_list_sort( + rd_kafka_topic_partition_list_t *rktparlist, + int (*cmp)(const void *, const void *, void *), + void *opaque) { if (!cmp) cmp = rd_kafka_topic_partition_cmp_opaque; rd_qsort_r(rktparlist->elems, rktparlist->cnt, - sizeof(*rktparlist->elems), - cmp, opaque); + sizeof(*rktparlist->elems), cmp, opaque); } -void rd_kafka_topic_partition_list_sort_by_topic ( - rd_kafka_topic_partition_list_t *rktparlist) { - rd_kafka_topic_partition_list_sort(rktparlist, - rd_kafka_topic_partition_cmp_opaque, - NULL); +void rd_kafka_topic_partition_list_sort_by_topic( + rd_kafka_topic_partition_list_t *rktparlist) { + rd_kafka_topic_partition_list_sort( + rktparlist, rd_kafka_topic_partition_cmp_opaque, NULL); } -rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset ( - rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition, int64_t offset) { - rd_kafka_topic_partition_t *rktpar; +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition, + int64_t offset) { + rd_kafka_topic_partition_t *rktpar; - if (!(rktpar = rd_kafka_topic_partition_list_find(rktparlist, - topic, partition))) - return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + if (!(rktpar = rd_kafka_topic_partition_list_find(rktparlist, topic, + partition))) + return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - rktpar->offset = offset; + rktpar->offset = offset; - return RD_KAFKA_RESP_ERR_NO_ERROR; + return RD_KAFKA_RESP_ERR_NO_ERROR; } /** * @brief Reset all offsets to the provided value. */ -void -rd_kafka_topic_partition_list_reset_offsets (rd_kafka_topic_partition_list_t *rktparlist, - int64_t offset) { +void rd_kafka_topic_partition_list_reset_offsets( + rd_kafka_topic_partition_list_t *rktparlist, + int64_t offset) { int i; - for (i = 0 ; i < rktparlist->cnt ; i++) - rktparlist->elems[i].offset = offset; + for (i = 0; i < rktparlist->cnt; i++) + rktparlist->elems[i].offset = offset; } @@ -3268,16 +3228,18 @@ rd_kafka_topic_partition_list_reset_offsets (rd_kafka_topic_partition_list_t *rk * * Returns the number of valid non-logical offsets (>=0). */ -int rd_kafka_topic_partition_list_set_offsets ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - int from_rktp, int64_t def_value, int is_commit) { +int rd_kafka_topic_partition_list_set_offsets( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + int from_rktp, + int64_t def_value, + int is_commit) { int i; - int valid_cnt = 0; + int valid_cnt = 0; - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i]; - const char *verb = "setting"; + const char *verb = "setting"; char preamble[80]; *preamble = '\0'; /* Avoid warning */ @@ -3286,67 +3248,67 @@ int rd_kafka_topic_partition_list_set_offsets ( rd_kafka_toppar_t *rktp = rktpar->_private; rd_kafka_toppar_lock(rktp); - if (rk->rk_conf.debug & (RD_KAFKA_DBG_CGRP | - RD_KAFKA_DBG_TOPIC)) + if (rk->rk_conf.debug & + (RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_TOPIC)) rd_snprintf(preamble, sizeof(preamble), - "stored offset %"PRId64 - ", committed offset %"PRId64": ", + "stored offset %" PRId64 + ", committed offset %" PRId64 ": ", rktp->rktp_stored_offset, rktp->rktp_committed_offset); - if (rktp->rktp_stored_offset > - rktp->rktp_committed_offset) { - verb = "setting stored"; - rktpar->offset = rktp->rktp_stored_offset; - } else { - rktpar->offset = RD_KAFKA_OFFSET_INVALID; - } + if (rktp->rktp_stored_offset > + rktp->rktp_committed_offset) { + verb = "setting stored"; + rktpar->offset = rktp->rktp_stored_offset; + } else { + rktpar->offset = RD_KAFKA_OFFSET_INVALID; + } rd_kafka_toppar_unlock(rktp); } else { - if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset)) { - verb = "setting default"; - rktpar->offset = def_value; - } else - verb = "keeping"; + if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset)) { + verb = "setting default"; + rktpar->offset = def_value; + } else + verb = "keeping"; } if (is_commit && rktpar->offset == RD_KAFKA_OFFSET_INVALID) rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_TOPIC, "OFFSET", - "Topic %s [%"PRId32"]: " + "Topic %s [%" PRId32 + "]: " "%snot including in commit", rktpar->topic, rktpar->partition, preamble); else rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_TOPIC, "OFFSET", - "Topic %s [%"PRId32"]: " + "Topic %s [%" PRId32 + "]: " "%s%s offset %s%s", - rktpar->topic, rktpar->partition, - preamble, - verb, - rd_kafka_offset2str(rktpar->offset), + rktpar->topic, rktpar->partition, preamble, + verb, rd_kafka_offset2str(rktpar->offset), is_commit ? " for commit" : ""); - if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset)) - valid_cnt++; + if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset)) + valid_cnt++; } - return valid_cnt; + return valid_cnt; } /** * @returns the number of partitions with absolute (non-logical) offsets set. */ -int rd_kafka_topic_partition_list_count_abs_offsets ( - const rd_kafka_topic_partition_list_t *rktparlist) { - int i; - int valid_cnt = 0; +int rd_kafka_topic_partition_list_count_abs_offsets( + const rd_kafka_topic_partition_list_t *rktparlist) { + int i; + int valid_cnt = 0; - for (i = 0 ; i < rktparlist->cnt ; i++) - if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktparlist->elems[i].offset)) - valid_cnt++; + for (i = 0; i < rktparlist->cnt; i++) + if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktparlist->elems[i].offset)) + valid_cnt++; - return valid_cnt; + return valid_cnt; } @@ -3356,23 +3318,18 @@ int rd_kafka_topic_partition_list_count_abs_offsets ( * * @param create_on_miss Create partition (and topic_t object) if necessary. */ -void -rd_kafka_topic_partition_list_update_toppars (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t - *rktparlist, - rd_bool_t create_on_miss) { +void rd_kafka_topic_partition_list_update_toppars( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_bool_t create_on_miss) { int i; - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i]; if (!rktpar->_private) - rktpar->_private = - rd_kafka_toppar_get2(rk, - rktpar->topic, - rktpar->partition, - 0/*not ua-on-miss*/, - create_on_miss); - + rktpar->_private = rd_kafka_toppar_get2( + rk, rktpar->topic, rktpar->partition, + 0 /*not ua-on-miss*/, create_on_miss); } } @@ -3408,14 +3365,13 @@ rd_kafka_topic_partition_list_update_toppars (rd_kafka_t *rk, * * @locks rd_kafka_*lock() MUST NOT be held */ -static rd_bool_t -rd_kafka_topic_partition_list_get_leaders ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *leaders, - rd_list_t *query_topics, - rd_bool_t query_unknown, - rd_kafka_enq_once_t *eonce) { +static rd_bool_t rd_kafka_topic_partition_list_get_leaders( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *leaders, + rd_list_t *query_topics, + rd_bool_t query_unknown, + rd_kafka_enq_once_t *eonce) { rd_bool_t complete; int cnt = 0; int i; @@ -3425,7 +3381,7 @@ rd_kafka_topic_partition_list_get_leaders ( else rd_kafka_rdlock(rk); - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i]; rd_kafka_topic_partition_t *rktpar2; rd_kafka_broker_t *rkb = NULL; @@ -3436,16 +3392,14 @@ rd_kafka_topic_partition_list_get_leaders ( rd_bool_t topic_wait_cache; rd_kafka_metadata_cache_topic_partition_get( - rk, &mtopic, &mpart, - rktpar->topic, rktpar->partition, - 0/*negative entries too*/); + rk, &mtopic, &mpart, rktpar->topic, rktpar->partition, + 0 /*negative entries too*/); topic_wait_cache = - !mtopic || - RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY(mtopic->err); + !mtopic || + RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY(mtopic->err); - if (!topic_wait_cache && - mtopic && + if (!topic_wait_cache && mtopic && mtopic->err != RD_KAFKA_RESP_ERR_NO_ERROR && mtopic->err != RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) { /* Topic permanently errored */ @@ -3463,13 +3417,13 @@ rd_kafka_topic_partition_list_get_leaders ( if (mpart && (mpart->leader == -1 || !(rkb = rd_kafka_broker_find_by_nodeid0( - rk, mpart->leader, -1/*any state*/, - rd_false)))) { + rk, mpart->leader, -1 /*any state*/, rd_false)))) { /* Partition has no (valid) leader. * This is a permanent error. */ rktpar->err = - mtopic->err ? mtopic->err : - RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE; + mtopic->err + ? mtopic->err + : RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE; continue; } @@ -3500,21 +3454,20 @@ rd_kafka_topic_partition_list_get_leaders ( rd_list_add(leaders, leader); } - rktpar2 = rd_kafka_topic_partition_list_find(leader->partitions, - rktpar->topic, - rktpar->partition); + rktpar2 = rd_kafka_topic_partition_list_find( + leader->partitions, rktpar->topic, rktpar->partition); if (rktpar2) { /* Already exists in partitions list, just update. */ rd_kafka_topic_partition_update(rktpar2, rktpar); } else { /* Make a copy of rktpar and add to partitions list */ rd_kafka_topic_partition_list_add_copy( - leader->partitions, rktpar); + leader->partitions, rktpar); } rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR; - rd_kafka_broker_destroy(rkb); /* loose refcount */ + rd_kafka_broker_destroy(rkb); /* loose refcount */ cnt++; } @@ -3537,8 +3490,8 @@ rd_kafka_topic_partition_list_get_leaders ( * @brief Timer timeout callback for query_leaders_async rko's eonce object. */ static void -rd_kafka_partition_leader_query_eonce_timeout_cb (rd_kafka_timers_t *rkts, - void *arg) { +rd_kafka_partition_leader_query_eonce_timeout_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_enq_once_t *eonce = arg; rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR__TIMED_OUT, "timeout timer"); @@ -3549,8 +3502,8 @@ rd_kafka_partition_leader_query_eonce_timeout_cb (rd_kafka_timers_t *rkts, * @brief Query timer callback for query_leaders_async rko's eonce object. */ static void -rd_kafka_partition_leader_query_eonce_timer_cb (rd_kafka_timers_t *rkts, - void *arg) { +rd_kafka_partition_leader_query_eonce_timer_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_enq_once_t *eonce = arg; rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR_NO_ERROR, "query timer"); @@ -3565,7 +3518,7 @@ rd_kafka_partition_leader_query_eonce_timer_cb (rd_kafka_timers_t *rkts, * @locality any */ static rd_kafka_op_res_t -rd_kafka_topic_partition_list_query_leaders_async_worker (rd_kafka_op_t *rko) { +rd_kafka_topic_partition_list_query_leaders_async_worker(rd_kafka_op_t *rko) { rd_kafka_t *rk = rko->rko_rk; rd_list_t query_topics, *leaders = NULL; rd_kafka_op_t *reply; @@ -3579,8 +3532,8 @@ rd_kafka_topic_partition_list_query_leaders_async_worker (rd_kafka_op_t *rko) { * are known we need to re-enable the eonce to be triggered again (which * is not necessary the first time we get here, but there * is no harm doing it then either). */ - rd_kafka_enq_once_reenable(rko->rko_u.leaders.eonce, - rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + rd_kafka_enq_once_reenable(rko->rko_u.leaders.eonce, rko, + RD_KAFKA_REPLYQ(rk->rk_ops, 0)); /* Look up the leaders in the metadata cache, if not all leaders * are known the eonce is registered for metadata cache changes @@ -3591,21 +3544,18 @@ rd_kafka_topic_partition_list_query_leaders_async_worker (rd_kafka_op_t *rko) { * hopefully get all leaders, otherwise defer a new async wait. * Repeat until success or timeout. */ - rd_list_init(&query_topics, 4 + rko->rko_u.leaders.partitions->cnt/2, + rd_list_init(&query_topics, 4 + rko->rko_u.leaders.partitions->cnt / 2, rd_free); leaders = rd_list_new(1 + rko->rko_u.leaders.partitions->cnt / 2, rd_kafka_partition_leader_destroy_free); if (rd_kafka_topic_partition_list_get_leaders( - rk, rko->rko_u.leaders.partitions, - leaders, - &query_topics, - /* Add unknown topics to query_topics only on the - * first query, after that we consider them permanently - * non-existent */ - rko->rko_u.leaders.query_cnt == 0, - rko->rko_u.leaders.eonce)) { + rk, rko->rko_u.leaders.partitions, leaders, &query_topics, + /* Add unknown topics to query_topics only on the + * first query, after that we consider them permanently + * non-existent */ + rko->rko_u.leaders.query_cnt == 0, rko->rko_u.leaders.eonce)) { /* All leaders now known (or failed), reply to caller */ rd_list_destroy(&query_topics); goto reply; @@ -3628,21 +3578,16 @@ rd_kafka_topic_partition_list_query_leaders_async_worker (rd_kafka_op_t *rko) { rd_kafka_enq_once_add_source(rko->rko_u.leaders.eonce, "query timer"); rd_kafka_timer_start_oneshot( - &rk->rk_timers, - &rko->rko_u.leaders.query_tmr, - rd_true, - 3*1000*1000 /* 3s */, - rd_kafka_partition_leader_query_eonce_timer_cb, - rko->rko_u.leaders.eonce); + &rk->rk_timers, &rko->rko_u.leaders.query_tmr, rd_true, + 3 * 1000 * 1000 /* 3s */, + rd_kafka_partition_leader_query_eonce_timer_cb, + rko->rko_u.leaders.eonce); /* Request metadata refresh */ rd_kafka_metadata_refresh_topics( - rk, NULL, &query_topics, - rd_true/*force*/, - rd_false/*!allow_auto_create*/, - rd_false/*!cgrp_update*/, - "query partition leaders"); - + rk, NULL, &query_topics, rd_true /*force*/, + rd_false /*!allow_auto_create*/, rd_false /*!cgrp_update*/, + "query partition leaders"); } rd_list_destroy(leaders); @@ -3651,16 +3596,14 @@ rd_kafka_topic_partition_list_query_leaders_async_worker (rd_kafka_op_t *rko) { /* Wait for next eonce trigger */ return RD_KAFKA_OP_RES_KEEP; /* rko is still used */ - reply: +reply: /* Decommission worker state and reply to caller */ - if (rd_kafka_timer_stop(&rk->rk_timers, - &rko->rko_u.leaders.query_tmr, + if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.query_tmr, RD_DO_LOCK)) rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce, "query timer"); - if (rd_kafka_timer_stop(&rk->rk_timers, - &rko->rko_u.leaders.timeout_tmr, + if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.timeout_tmr, RD_DO_LOCK)) rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce, "timeout timer"); @@ -3685,13 +3628,13 @@ rd_kafka_topic_partition_list_query_leaders_async_worker (rd_kafka_op_t *rko) { rd_kafka_op_get_reply_version(reply, rko); reply->rko_err = rko->rko_err; reply->rko_u.leaders.partitions = - rko->rko_u.leaders.partitions; /* Transfer ownership for - * partition list that - * now contains - * per-partition errors*/ + rko->rko_u.leaders.partitions; /* Transfer ownership for + * partition list that + * now contains + * per-partition errors*/ rko->rko_u.leaders.partitions = NULL; - reply->rko_u.leaders.leaders = leaders; /* Possibly NULL */ - reply->rko_u.leaders.opaque = rko->rko_u.leaders.opaque; + reply->rko_u.leaders.leaders = leaders; /* Possibly NULL */ + reply->rko_u.leaders.opaque = rko->rko_u.leaders.opaque; rd_kafka_replyq_enq(&rko->rko_u.leaders.replyq, reply, 0); } @@ -3701,10 +3644,10 @@ rd_kafka_topic_partition_list_query_leaders_async_worker (rd_kafka_op_t *rko) { static rd_kafka_op_res_t -rd_kafka_topic_partition_list_query_leaders_async_worker_op_cb ( - rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +rd_kafka_topic_partition_list_query_leaders_async_worker_op_cb( + rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { return rd_kafka_topic_partition_list_query_leaders_async_worker(rko); } @@ -3723,43 +3666,39 @@ rd_kafka_topic_partition_list_query_leaders_async_worker_op_cb ( * * @remark rd_kafka_*lock() MUST NOT be held */ -void -rd_kafka_topic_partition_list_query_leaders_async ( - rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *rktparlist, - int timeout_ms, - rd_kafka_replyq_t replyq, - rd_kafka_op_cb_t *cb, - void *opaque) { +void rd_kafka_topic_partition_list_query_leaders_async( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *rktparlist, + int timeout_ms, + rd_kafka_replyq_t replyq, + rd_kafka_op_cb_t *cb, + void *opaque) { rd_kafka_op_t *rko; rd_assert(rktparlist && rktparlist->cnt > 0); rd_assert(replyq.q); rko = rd_kafka_op_new_cb( - rk, - RD_KAFKA_OP_LEADERS, - rd_kafka_topic_partition_list_query_leaders_async_worker_op_cb); + rk, RD_KAFKA_OP_LEADERS, + rd_kafka_topic_partition_list_query_leaders_async_worker_op_cb); rko->rko_u.leaders.replyq = replyq; rko->rko_u.leaders.partitions = - rd_kafka_topic_partition_list_copy(rktparlist); + rd_kafka_topic_partition_list_copy(rktparlist); rko->rko_u.leaders.ts_timeout = rd_timeout_init(timeout_ms); - rko->rko_u.leaders.cb = cb; - rko->rko_u.leaders.opaque = opaque; + rko->rko_u.leaders.cb = cb; + rko->rko_u.leaders.opaque = opaque; /* Create an eonce to be triggered either by metadata cache update * (from refresh_topics()), query interval, or timeout. */ - rko->rko_u.leaders.eonce = rd_kafka_enq_once_new( - rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + rko->rko_u.leaders.eonce = + rd_kafka_enq_once_new(rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); rd_kafka_enq_once_add_source(rko->rko_u.leaders.eonce, "timeout timer"); rd_kafka_timer_start_oneshot( - &rk->rk_timers, - &rko->rko_u.leaders.timeout_tmr, - rd_true, - rd_timeout_remains_us(rko->rko_u.leaders.ts_timeout), - rd_kafka_partition_leader_query_eonce_timeout_cb, - rko->rko_u.leaders.eonce); + &rk->rk_timers, &rko->rko_u.leaders.timeout_tmr, rd_true, + rd_timeout_remains_us(rko->rko_u.leaders.ts_timeout), + rd_kafka_partition_leader_query_eonce_timeout_cb, + rko->rko_u.leaders.eonce); if (rd_kafka_topic_partition_list_query_leaders_async_worker(rko) == RD_KAFKA_OP_RES_HANDLED) @@ -3781,16 +3720,16 @@ rd_kafka_topic_partition_list_query_leaders_async ( * * @locks rd_kafka_*lock() MUST NOT be held */ -rd_kafka_resp_err_t -rd_kafka_topic_partition_list_query_leaders ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *leaders, int timeout_ms) { - rd_ts_t ts_end = rd_timeout_init(timeout_ms); +rd_kafka_resp_err_t rd_kafka_topic_partition_list_query_leaders( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *leaders, + int timeout_ms) { + rd_ts_t ts_end = rd_timeout_init(timeout_ms); rd_ts_t ts_query = 0; rd_ts_t now; int query_cnt = 0; - int i = 0; + int i = 0; /* Get all the partition leaders, try multiple times: * if there are no leaders after the first run fire off a leader @@ -3804,12 +3743,11 @@ rd_kafka_topic_partition_list_query_leaders ( rd_list_init(&query_topics, rktparlist->cnt, rd_free); rd_kafka_topic_partition_list_get_leaders( - rk, rktparlist, leaders, &query_topics, - /* Add unknown topics to query_topics only on the - * first query, after that we consider them - * permanently non-existent */ - query_cnt == 0, - NULL); + rk, rktparlist, leaders, &query_topics, + /* Add unknown topics to query_topics only on the + * first query, after that we consider them + * permanently non-existent */ + query_cnt == 0, NULL); if (rd_list_empty(&query_topics)) { /* No remaining topics to query: leader-list complete.*/ @@ -3828,27 +3766,26 @@ rd_kafka_topic_partition_list_query_leaders ( /* * Missing leader for some partitions */ - query_intvl = (i+1) * 100; /* add 100ms per iteration */ - if (query_intvl > 2*1000) - query_intvl = 2*1000; /* Cap to 2s */ + query_intvl = (i + 1) * 100; /* add 100ms per iteration */ + if (query_intvl > 2 * 1000) + query_intvl = 2 * 1000; /* Cap to 2s */ - if (now >= ts_query + (query_intvl*1000)) { + if (now >= ts_query + (query_intvl * 1000)) { /* Query metadata for missing leaders, * possibly creating the topic. */ rd_kafka_metadata_refresh_topics( - rk, NULL, &query_topics, - rd_true/*force*/, - rd_false/*!allow_auto_create*/, - rd_false/*!cgrp_update*/, - "query partition leaders"); + rk, NULL, &query_topics, rd_true /*force*/, + rd_false /*!allow_auto_create*/, + rd_false /*!cgrp_update*/, + "query partition leaders"); ts_query = now; query_cnt++; } else { /* Wait for broker ids to be updated from * metadata refresh above. */ - int wait_ms = rd_timeout_remains_limit(ts_end, - query_intvl); + int wait_ms = + rd_timeout_remains_limit(ts_end, query_intvl); rd_kafka_metadata_cache_wait_change(rk, wait_ms); } @@ -3873,20 +3810,19 @@ rd_kafka_topic_partition_list_query_leaders ( * * @returns the number of topics added. */ -int -rd_kafka_topic_partition_list_get_topics ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *rkts) { +int rd_kafka_topic_partition_list_get_topics( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *rkts) { int cnt = 0; int i; - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i]; rd_kafka_toppar_t *rktp; - rktp = rd_kafka_topic_partition_get_toppar(rk, rktpar, - rd_false); + rktp = + rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false); if (!rktp) { rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; continue; @@ -3913,15 +3849,16 @@ rd_kafka_topic_partition_list_get_topics ( * * @returns the number of topics added. */ -int -rd_kafka_topic_partition_list_get_topic_names ( - const rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *topics, int include_regex) { +int rd_kafka_topic_partition_list_get_topic_names( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *topics, + int include_regex) { int cnt = 0; int i; - for (i = 0 ; i < rktparlist->cnt ; i++) { - const rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i]; + for (i = 0; i < rktparlist->cnt; i++) { + const rd_kafka_topic_partition_t *rktpar = + &rktparlist->elems[i]; if (!include_regex && *rktpar->topic == '^') continue; @@ -3944,18 +3881,18 @@ rd_kafka_topic_partition_list_get_topic_names ( * * @returns a new list */ -rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match ( - const rd_kafka_topic_partition_list_t *rktparlist, - int (*match) (const void *elem, const void *opaque), - void *opaque) { +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match( + const rd_kafka_topic_partition_list_t *rktparlist, + int (*match)(const void *elem, const void *opaque), + void *opaque) { rd_kafka_topic_partition_list_t *newlist; int i; newlist = rd_kafka_topic_partition_list_new(0); - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &rktparlist->elems[i]; + &rktparlist->elems[i]; if (!match(rktpar, opaque)) continue; @@ -3966,37 +3903,41 @@ rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match ( return newlist; } -void -rd_kafka_topic_partition_list_log (rd_kafka_t *rk, const char *fac, int dbg, - const rd_kafka_topic_partition_list_t *rktparlist) { +void rd_kafka_topic_partition_list_log( + rd_kafka_t *rk, + const char *fac, + int dbg, + const rd_kafka_topic_partition_list_t *rktparlist) { int i; - rd_kafka_dbg(rk, NONE|dbg, fac, "List with %d partition(s):", - rktparlist->cnt); - for (i = 0 ; i < rktparlist->cnt ; i++) { - const rd_kafka_topic_partition_t *rktpar = - &rktparlist->elems[i]; - rd_kafka_dbg(rk, NONE|dbg, fac, " %s [%"PRId32"] offset %s%s%s", - rktpar->topic, rktpar->partition, - rd_kafka_offset2str(rktpar->offset), - rktpar->err ? ": error: " : "", - rktpar->err ? rd_kafka_err2str(rktpar->err) : ""); - } + rd_kafka_dbg(rk, NONE | dbg, fac, + "List with %d partition(s):", rktparlist->cnt); + for (i = 0; i < rktparlist->cnt; i++) { + const rd_kafka_topic_partition_t *rktpar = + &rktparlist->elems[i]; + rd_kafka_dbg(rk, NONE | dbg, fac, + " %s [%" PRId32 "] offset %s%s%s", rktpar->topic, + rktpar->partition, + rd_kafka_offset2str(rktpar->offset), + rktpar->err ? ": error: " : "", + rktpar->err ? rd_kafka_err2str(rktpar->err) : ""); + } } /** * @returns a comma-separated list of partitions. */ -const char * -rd_kafka_topic_partition_list_str (const rd_kafka_topic_partition_list_t *rktparlist, - char *dest, size_t dest_size, - int fmt_flags) { +const char *rd_kafka_topic_partition_list_str( + const rd_kafka_topic_partition_list_t *rktparlist, + char *dest, + size_t dest_size, + int fmt_flags) { int i; size_t of = 0; - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &rktparlist->elems[i]; + &rktparlist->elems[i]; char errstr[128]; char offsetstr[32]; int r; @@ -4005,29 +3946,28 @@ rd_kafka_topic_partition_list_str (const rd_kafka_topic_partition_list_t *rktpar continue; if (rktpar->err && !(fmt_flags & RD_KAFKA_FMT_F_NO_ERR)) - rd_snprintf(errstr, sizeof(errstr), - "(%s)", rd_kafka_err2str(rktpar->err)); + rd_snprintf(errstr, sizeof(errstr), "(%s)", + rd_kafka_err2str(rktpar->err)); else errstr[0] = '\0'; if (rktpar->offset != RD_KAFKA_OFFSET_INVALID) - rd_snprintf(offsetstr, sizeof(offsetstr), - "@%"PRId64, rktpar->offset); + rd_snprintf(offsetstr, sizeof(offsetstr), "@%" PRId64, + rktpar->offset); else offsetstr[0] = '\0'; - r = rd_snprintf(&dest[of], dest_size-of, + r = rd_snprintf(&dest[of], dest_size - of, "%s" - "%s[%"PRId32"]" + "%s[%" PRId32 + "]" "%s" "%s", - of == 0 ? "" : ", ", - rktpar->topic, rktpar->partition, - offsetstr, - errstr); + of == 0 ? "" : ", ", rktpar->topic, + rktpar->partition, offsetstr, errstr); - if ((size_t)r >= dest_size-of) { - rd_snprintf(&dest[dest_size-4], 4, "..."); + if ((size_t)r >= dest_size - of) { + rd_snprintf(&dest[dest_size - 4], 4, "..."); break; } @@ -4048,36 +3988,35 @@ rd_kafka_topic_partition_list_str (const rd_kafka_topic_partition_list_t *rktpar * - offset * - err * - * Will only update partitions that are in both dst and src, other partitions will - * remain unchanged. + * Will only update partitions that are in both dst and src, other partitions + * will remain unchanged. */ -void -rd_kafka_topic_partition_list_update (rd_kafka_topic_partition_list_t *dst, - const rd_kafka_topic_partition_list_t *src){ +void rd_kafka_topic_partition_list_update( + rd_kafka_topic_partition_list_t *dst, + const rd_kafka_topic_partition_list_t *src) { int i; - for (i = 0 ; i < dst->cnt ; i++) { + for (i = 0; i < dst->cnt; i++) { rd_kafka_topic_partition_t *d = &dst->elems[i]; rd_kafka_topic_partition_t *s; if (!(s = rd_kafka_topic_partition_list_find( - (rd_kafka_topic_partition_list_t *)src, - d->topic, d->partition))) + (rd_kafka_topic_partition_list_t *)src, d->topic, + d->partition))) continue; d->offset = s->offset; d->err = s->err; if (d->metadata) { rd_free(d->metadata); - d->metadata = NULL; + d->metadata = NULL; d->metadata_size = 0; } if (s->metadata_size > 0) { - d->metadata = - rd_malloc(s->metadata_size); + d->metadata = rd_malloc(s->metadata_size); d->metadata_size = s->metadata_size; memcpy((void *)d->metadata, s->metadata, - s->metadata_size); + s->metadata_size); } } } @@ -4086,17 +4025,16 @@ rd_kafka_topic_partition_list_update (rd_kafka_topic_partition_list_t *dst, /** * @returns the sum of \p cb called for each element. */ -size_t -rd_kafka_topic_partition_list_sum ( - const rd_kafka_topic_partition_list_t *rktparlist, - size_t (*cb) (const rd_kafka_topic_partition_t *rktpar, void *opaque), - void *opaque) { +size_t rd_kafka_topic_partition_list_sum( + const rd_kafka_topic_partition_list_t *rktparlist, + size_t (*cb)(const rd_kafka_topic_partition_t *rktpar, void *opaque), + void *opaque) { int i; size_t sum = 0; - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &rktparlist->elems[i]; + &rktparlist->elems[i]; sum += cb(rktpar, opaque); } @@ -4110,10 +4048,9 @@ rd_kafka_topic_partition_list_sum ( * * @remarks sorts the elements of the list. */ -rd_bool_t -rd_kafka_topic_partition_list_has_duplicates ( - rd_kafka_topic_partition_list_t *rktparlist, - rd_bool_t ignore_partition) { +rd_bool_t rd_kafka_topic_partition_list_has_duplicates( + rd_kafka_topic_partition_list_t *rktparlist, + rd_bool_t ignore_partition) { int i; @@ -4122,8 +4059,9 @@ rd_kafka_topic_partition_list_has_duplicates ( rd_kafka_topic_partition_list_sort_by_topic(rktparlist); - for (i=1; icnt; i++) { - const rd_kafka_topic_partition_t *p1 = &rktparlist->elems[i-1]; + for (i = 1; i < rktparlist->cnt; i++) { + const rd_kafka_topic_partition_t *p1 = + &rktparlist->elems[i - 1]; const rd_kafka_topic_partition_t *p2 = &rktparlist->elems[i]; if (((p1->partition == p2->partition) || ignore_partition) && @@ -4139,23 +4077,23 @@ rd_kafka_topic_partition_list_has_duplicates ( /** * @brief Set \c .err field \p err on all partitions in list. */ -void rd_kafka_topic_partition_list_set_err ( - rd_kafka_topic_partition_list_t *rktparlist, - rd_kafka_resp_err_t err) { +void rd_kafka_topic_partition_list_set_err( + rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_resp_err_t err) { int i; - for (i = 0 ; i < rktparlist->cnt ; i++) + for (i = 0; i < rktparlist->cnt; i++) rktparlist->elems[i].err = err; } /** * @brief Get the first set error in the partition list. */ -rd_kafka_resp_err_t rd_kafka_topic_partition_list_get_err ( - const rd_kafka_topic_partition_list_t *rktparlist) { +rd_kafka_resp_err_t rd_kafka_topic_partition_list_get_err( + const rd_kafka_topic_partition_list_t *rktparlist) { int i; - for (i = 0 ; i < rktparlist->cnt ; i++) + for (i = 0; i < rktparlist->cnt; i++) if (rktparlist->elems[i].err) return rktparlist->elems[i].err; @@ -4166,14 +4104,14 @@ rd_kafka_resp_err_t rd_kafka_topic_partition_list_get_err ( /** * @returns the number of wildcard/regex topics */ -int rd_kafka_topic_partition_list_regex_cnt ( - const rd_kafka_topic_partition_list_t *rktparlist) { +int rd_kafka_topic_partition_list_regex_cnt( + const rd_kafka_topic_partition_list_t *rktparlist) { int i; int cnt = 0; - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &rktparlist->elems[i]; + &rktparlist->elems[i]; cnt += *rktpar->topic == '^'; } return cnt; @@ -4190,18 +4128,18 @@ int rd_kafka_topic_partition_list_regex_cnt ( * @locality toppar handler thread * @locks toppar_lock MUST be held. */ -static void rd_kafka_toppar_reset_base_msgid (rd_kafka_toppar_t *rktp, - uint64_t new_base_msgid) { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, - TOPIC|RD_KAFKA_DBG_EOS, "RESETSEQ", - "%.*s [%"PRId32"] " - "resetting epoch base seq from %"PRIu64" to %"PRIu64, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rktp->rktp_eos.epoch_base_msgid, new_base_msgid); - - rktp->rktp_eos.next_ack_seq = 0; - rktp->rktp_eos.next_err_seq = 0; +static void rd_kafka_toppar_reset_base_msgid(rd_kafka_toppar_t *rktp, + uint64_t new_base_msgid) { + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "RESETSEQ", + "%.*s [%" PRId32 + "] " + "resetting epoch base seq from %" PRIu64 " to %" PRIu64, + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, + rktp->rktp_eos.epoch_base_msgid, new_base_msgid); + + rktp->rktp_eos.next_ack_seq = 0; + rktp->rktp_eos.next_err_seq = 0; rktp->rktp_eos.epoch_base_msgid = new_base_msgid; } @@ -4226,21 +4164,21 @@ static void rd_kafka_toppar_reset_base_msgid (rd_kafka_toppar_t *rktp, * @locality toppar handler thread * @locks none */ -int rd_kafka_toppar_pid_change (rd_kafka_toppar_t *rktp, rd_kafka_pid_t pid, - uint64_t base_msgid) { +int rd_kafka_toppar_pid_change(rd_kafka_toppar_t *rktp, + rd_kafka_pid_t pid, + uint64_t base_msgid) { int inflight = rd_atomic32_get(&rktp->rktp_msgs_inflight); if (unlikely(inflight > 0)) { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, - TOPIC|RD_KAFKA_DBG_EOS, "NEWPID", - "%.*s [%"PRId32"] will not change %s -> %s yet: " - "%d message(s) still in-flight from current " - "epoch", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_pid2str(rktp->rktp_eos.pid), - rd_kafka_pid2str(pid), - inflight); + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "NEWPID", + "%.*s [%" PRId32 + "] will not change %s -> %s yet: " + "%d message(s) still in-flight from current " + "epoch", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_pid2str(rktp->rktp_eos.pid), + rd_kafka_pid2str(pid), inflight); return 0; } @@ -4249,15 +4187,13 @@ int rd_kafka_toppar_pid_change (rd_kafka_toppar_t *rktp, rd_kafka_pid_t pid, "non-empty xmitq"); rd_kafka_toppar_lock(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, - TOPIC|RD_KAFKA_DBG_EOS, "NEWPID", - "%.*s [%"PRId32"] changed %s -> %s " - "with base MsgId %"PRIu64, + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "NEWPID", + "%.*s [%" PRId32 + "] changed %s -> %s " + "with base MsgId %" PRIu64, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_pid2str(rktp->rktp_eos.pid), - rd_kafka_pid2str(pid), - base_msgid); + rktp->rktp_partition, rd_kafka_pid2str(rktp->rktp_eos.pid), + rd_kafka_pid2str(pid), base_msgid); rktp->rktp_eos.pid = pid; rd_kafka_toppar_reset_base_msgid(rktp, base_msgid); @@ -4284,22 +4220,21 @@ int rd_kafka_toppar_pid_change (rd_kafka_toppar_t *rktp, rd_kafka_pid_t pid, * @locks_acquired rd_kafka_toppar_lock() * @locks_required none */ -int rd_kafka_toppar_purge_queues (rd_kafka_toppar_t *rktp, - int purge_flags, - rd_bool_t include_xmit_msgq) { - rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; +int rd_kafka_toppar_purge_queues(rd_kafka_toppar_t *rktp, + int purge_flags, + rd_bool_t include_xmit_msgq) { + rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq); int cnt; rd_assert(rk->rk_type == RD_KAFKA_PRODUCER); rd_kafka_dbg(rk, TOPIC, "PURGE", - "%s [%"PRId32"]: purging queues " + "%s [%" PRId32 + "]: purging queues " "(purge_flags 0x%x, %s xmit_msgq)", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - purge_flags, - include_xmit_msgq ? "include" : "exclude"); + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + purge_flags, include_xmit_msgq ? "include" : "exclude"); if (!(purge_flags & RD_KAFKA_PURGE_F_QUEUE)) return 0; @@ -4322,10 +4257,10 @@ int rd_kafka_toppar_purge_queues (rd_kafka_toppar_t *rktp, * will not be produced (retried) we need to adjust the * idempotence epoch's base msgid to skip the messages. */ rktp->rktp_eos.epoch_base_msgid += cnt; - rd_kafka_dbg(rk, - TOPIC|RD_KAFKA_DBG_EOS, "ADVBASE", - "%.*s [%"PRId32"] " - "advancing epoch base msgid to %"PRIu64 + rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_EOS, "ADVBASE", + "%.*s [%" PRId32 + "] " + "advancing epoch base msgid to %" PRIu64 " due to %d message(s) in aborted transaction", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, @@ -4345,7 +4280,7 @@ int rd_kafka_toppar_purge_queues (rd_kafka_toppar_t *rktp, * @locality application thread * @locks none */ -void rd_kafka_purge_ua_toppar_queues (rd_kafka_t *rk) { +void rd_kafka_purge_ua_toppar_queues(rd_kafka_t *rk) { rd_kafka_topic_t *rkt; int msg_cnt = 0, part_cnt = 0; @@ -4379,13 +4314,13 @@ void rd_kafka_purge_ua_toppar_queues (rd_kafka_t *rk) { } rd_kafka_rdunlock(rk); - rd_kafka_dbg(rk, QUEUE|RD_KAFKA_DBG_TOPIC, "PURGEQ", - "Purged %i message(s) from %d UA-partition(s)", - msg_cnt, part_cnt); + rd_kafka_dbg(rk, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGEQ", + "Purged %i message(s) from %d UA-partition(s)", msg_cnt, + part_cnt); } -void rd_kafka_partition_leader_destroy_free (void *ptr) { +void rd_kafka_partition_leader_destroy_free(void *ptr) { struct rd_kafka_partition_leader *leader = ptr; rd_kafka_partition_leader_destroy(leader); } diff --git a/src/rdkafka_partition.h b/src/rdkafka_partition.h index b65608071d..6e751ecd31 100644 --- a/src/rdkafka_partition.h +++ b/src/rdkafka_partition.h @@ -46,9 +46,9 @@ struct offset_stats { /** * @brief Reset offset_stats struct to default values */ -static RD_UNUSED void rd_kafka_offset_stats_reset (struct offset_stats *offs) { +static RD_UNUSED void rd_kafka_offset_stats_reset(struct offset_stats *offs) { offs->fetch_offset = 0; - offs->eof_offset = RD_KAFKA_OFFSET_INVALID; + offs->eof_offset = RD_KAFKA_OFFSET_INVALID; } @@ -56,40 +56,42 @@ static RD_UNUSED void rd_kafka_offset_stats_reset (struct offset_stats *offs) { * @brief Store information about a partition error for future use. */ struct rd_kafka_toppar_err { - rd_kafka_resp_err_t err; /**< Error code */ - int actions; /**< Request actions */ - rd_ts_t ts; /**< Timestamp */ - uint64_t base_msgid; /**< First msg msgid */ - int32_t base_seq; /**< Idempodent Producer: - * first msg sequence */ - int32_t last_seq; /**< Idempotent Producer: - * last msg sequence */ + rd_kafka_resp_err_t err; /**< Error code */ + int actions; /**< Request actions */ + rd_ts_t ts; /**< Timestamp */ + uint64_t base_msgid; /**< First msg msgid */ + int32_t base_seq; /**< Idempodent Producer: + * first msg sequence */ + int32_t last_seq; /**< Idempotent Producer: + * last msg sequence */ }; -typedef TAILQ_HEAD(rd_kafka_toppar_tqhead_s, rd_kafka_toppar_s) - rd_kafka_toppar_tqhead_t; +typedef TAILQ_HEAD(rd_kafka_toppar_tqhead_s, + rd_kafka_toppar_s) rd_kafka_toppar_tqhead_t; /** * Topic + Partition combination */ -struct rd_kafka_toppar_s { /* rd_kafka_toppar_t */ - TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rklink; /* rd_kafka_t link */ - TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rkblink; /* rd_kafka_broker_t link*/ - CIRCLEQ_ENTRY(rd_kafka_toppar_s) rktp_activelink; /* rkb_active_toppars */ - TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rktlink; /* rd_kafka_topic_t link*/ - TAILQ_ENTRY(rd_kafka_toppar_s) rktp_cgrplink;/* rd_kafka_cgrp_t link */ - TAILQ_ENTRY(rd_kafka_toppar_s) rktp_txnlink; /**< rd_kafka_t.rk_eos. - * txn_pend_rktps - * or txn_rktps */ - rd_kafka_topic_t *rktp_rkt; /**< This toppar's topic object */ - int32_t rktp_partition; - //LOCK: toppar_lock() + topic_wrlock() - //LOCK: .. in partition_available() - int32_t rktp_leader_id; /**< Current leader id. +struct rd_kafka_toppar_s { /* rd_kafka_toppar_t */ + TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rklink; /* rd_kafka_t link */ + TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rkblink; /* rd_kafka_broker_t link*/ + CIRCLEQ_ENTRY(rd_kafka_toppar_s) + rktp_activelink; /* rkb_active_toppars */ + TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rktlink; /* rd_kafka_topic_t link*/ + TAILQ_ENTRY(rd_kafka_toppar_s) rktp_cgrplink; /* rd_kafka_cgrp_t link */ + TAILQ_ENTRY(rd_kafka_toppar_s) + rktp_txnlink; /**< rd_kafka_t.rk_eos. + * txn_pend_rktps + * or txn_rktps */ + rd_kafka_topic_t *rktp_rkt; /**< This toppar's topic object */ + int32_t rktp_partition; + // LOCK: toppar_lock() + topic_wrlock() + // LOCK: .. in partition_available() + int32_t rktp_leader_id; /**< Current leader id. * This is updated directly * from metadata. */ - int32_t rktp_broker_id; /**< Current broker id. */ + int32_t rktp_broker_id; /**< Current broker id. */ rd_kafka_broker_t *rktp_leader; /**< Current leader broker. * This updated simultaneously * with rktp_leader_id. */ @@ -102,280 +104,285 @@ struct rd_kafka_toppar_s { /* rd_kafka_toppar_t */ * may lag. */ rd_kafka_broker_t *rktp_next_broker; /**< Next preferred broker after * async migration op. */ - rd_refcnt_t rktp_refcnt; - mtx_t rktp_lock; - - //LOCK: toppar_lock. toppar_insert_msg(), concat_msgq() - //LOCK: toppar_lock. toppar_enq_msg(), deq_msg(), toppar_retry_msgq() - rd_kafka_q_t *rktp_msgq_wakeup_q; /**< Wake-up queue */ - rd_kafka_msgq_t rktp_msgq; /* application->rdkafka queue. - * protected by rktp_lock */ - rd_kafka_msgq_t rktp_xmit_msgq; /* internal broker xmit queue. - * local to broker thread. */ - - int rktp_fetch; /* On rkb_active_toppars list */ - - /* Consumer */ - rd_kafka_q_t *rktp_fetchq; /* Queue of fetched messages - * from broker. - * Broker thread -> App */ - rd_kafka_q_t *rktp_ops; /* * -> Main thread */ - - rd_atomic32_t rktp_msgs_inflight; /**< Current number of - * messages in-flight to/from - * the broker. */ - - uint64_t rktp_msgid; /**< Current/last message id. - * Each message enqueued on a - * non-UA partition will get a - * partition-unique sequencial - * number assigned. - * This number is used to - * re-enqueue the message - * on resends but making sure - * the input ordering is still - * maintained, and used by - * the idempotent producer. - * Starts at 1. - * Protected by toppar_lock */ + rd_refcnt_t rktp_refcnt; + mtx_t rktp_lock; + + // LOCK: toppar_lock. toppar_insert_msg(), concat_msgq() + // LOCK: toppar_lock. toppar_enq_msg(), deq_msg(), toppar_retry_msgq() + rd_kafka_q_t *rktp_msgq_wakeup_q; /**< Wake-up queue */ + rd_kafka_msgq_t rktp_msgq; /* application->rdkafka queue. + * protected by rktp_lock */ + rd_kafka_msgq_t rktp_xmit_msgq; /* internal broker xmit queue. + * local to broker thread. */ + + int rktp_fetch; /* On rkb_active_toppars list */ + + /* Consumer */ + rd_kafka_q_t *rktp_fetchq; /* Queue of fetched messages + * from broker. + * Broker thread -> App */ + rd_kafka_q_t *rktp_ops; /* * -> Main thread */ + + rd_atomic32_t rktp_msgs_inflight; /**< Current number of + * messages in-flight to/from + * the broker. */ + + uint64_t rktp_msgid; /**< Current/last message id. + * Each message enqueued on a + * non-UA partition will get a + * partition-unique sequencial + * number assigned. + * This number is used to + * re-enqueue the message + * on resends but making sure + * the input ordering is still + * maintained, and used by + * the idempotent producer. + * Starts at 1. + * Protected by toppar_lock */ struct { - rd_kafka_pid_t pid; /**< Partition's last known - * Producer Id and epoch. - * Protected by toppar lock. - * Only updated in toppar - * handler thread. */ - uint64_t acked_msgid; /**< Highest acknowledged message. - * Protected by toppar lock. */ + rd_kafka_pid_t pid; /**< Partition's last known + * Producer Id and epoch. + * Protected by toppar lock. + * Only updated in toppar + * handler thread. */ + uint64_t acked_msgid; /**< Highest acknowledged message. + * Protected by toppar lock. */ uint64_t epoch_base_msgid; /**< This Producer epoch's - * base msgid. - * When a new epoch is - * acquired, or on transaction abort, - * the base_seq is set to the - * current rktp_msgid so that - * sub-sequent produce - * requests will have - * a sequence number series - * starting at 0. - * Protected by toppar_lock */ - int32_t next_ack_seq; /**< Next expected ack sequence. - * Protected by toppar lock. */ - int32_t next_err_seq; /**< Next expected error sequence. - * Used when draining outstanding - * issues. - * This value will be the same - * as next_ack_seq until a drainable - * error occurs, in which case it - * will advance past next_ack_seq. - * next_ack_seq can never be larger - * than next_err_seq. - * Protected by toppar lock. */ - rd_bool_t wait_drain; /**< All inflight requests must - * be drained/finish before - * resuming producing. - * This is set to true - * when a leader change - * happens so that the - * in-flight messages for the - * old brokers finish before - * the new broker starts sending. - * This as a step to ensure - * consistency. - * Only accessed from toppar - * handler thread. */ + * base msgid. + * When a new epoch is + * acquired, or on transaction + * abort, the base_seq is set to the + * current rktp_msgid so that + * sub-sequent produce + * requests will have + * a sequence number series + * starting at 0. + * Protected by toppar_lock */ + int32_t next_ack_seq; /**< Next expected ack sequence. + * Protected by toppar lock. */ + int32_t next_err_seq; /**< Next expected error sequence. + * Used when draining outstanding + * issues. + * This value will be the same + * as next_ack_seq until a drainable + * error occurs, in which case it + * will advance past next_ack_seq. + * next_ack_seq can never be larger + * than next_err_seq. + * Protected by toppar lock. */ + rd_bool_t wait_drain; /**< All inflight requests must + * be drained/finish before + * resuming producing. + * This is set to true + * when a leader change + * happens so that the + * in-flight messages for the + * old brokers finish before + * the new broker starts sending. + * This as a step to ensure + * consistency. + * Only accessed from toppar + * handler thread. */ } rktp_eos; - /** - * rktp version barriers - * - * rktp_version is the application/controller side's - * authoritative version, it depicts the most up to date state. - * This is what q_filter() matches an rko_version to. - * - * rktp_op_version is the last/current received state handled - * by the toppar in the broker thread. It is updated to rktp_version - * when receiving a new op. - * - * rktp_fetch_version is the current fetcher decision version. - * It is used in fetch_decide() to see if the fetch decision - * needs to be updated by comparing to rktp_op_version. - * - * Example: - * App thread : Send OP_START (v1 bump): rktp_version=1 - * Broker thread: Recv OP_START (v1): rktp_op_version=1 - * Broker thread: fetch_decide() detects that - * rktp_op_version != rktp_fetch_version and - * sets rktp_fetch_version=1. - * Broker thread: next Fetch request has it's tver state set to - * rktp_fetch_verison (v1). - * - * App thread : Send OP_SEEK (v2 bump): rktp_version=2 - * Broker thread: Recv OP_SEEK (v2): rktp_op_version=2 - * Broker thread: Recv IO FetchResponse with tver=1, - * when enqueued on rktp_fetchq they're discarded - * due to old version (tver= RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) - int32_t rktp_fetch_msg_max_bytes; /* Max number of bytes to - * fetch. - * Locality: broker thread - */ - - rd_ts_t rktp_ts_fetch_backoff; /* Back off fetcher for - * this partition until this - * absolute timestamp - * expires. */ - - int64_t rktp_query_offset; /* Offset to query broker for*/ - int64_t rktp_next_offset; /* Next offset to start - * fetching from. - * Locality: toppar thread */ - int64_t rktp_last_next_offset; /* Last next_offset handled - * by fetch_decide(). - * Locality: broker thread */ - int64_t rktp_app_offset; /* Last offset delivered to - * application + 1. - * Is reset to INVALID_OFFSET - * when partition is - * unassigned/stopped. */ - int64_t rktp_stored_offset; /* Last stored offset, but - * maybe not committed yet. */ - int64_t rktp_committing_offset; /* Offset currently being - * committed */ - int64_t rktp_committed_offset; /* Last committed offset */ - rd_ts_t rktp_ts_committed_offset; /* Timestamp of last - * commit */ - - struct offset_stats rktp_offsets; /* Current offsets. - * Locality: broker thread*/ + int32_t rktp_fetch_msg_max_bytes; /* Max number of bytes to + * fetch. + * Locality: broker thread + */ + + rd_ts_t rktp_ts_fetch_backoff; /* Back off fetcher for + * this partition until this + * absolute timestamp + * expires. */ + + int64_t rktp_query_offset; /* Offset to query broker for*/ + int64_t rktp_next_offset; /* Next offset to start + * fetching from. + * Locality: toppar thread */ + int64_t rktp_last_next_offset; /* Last next_offset handled + * by fetch_decide(). + * Locality: broker thread */ + int64_t rktp_app_offset; /* Last offset delivered to + * application + 1. + * Is reset to INVALID_OFFSET + * when partition is + * unassigned/stopped. */ + int64_t rktp_stored_offset; /* Last stored offset, but + * maybe not committed yet. */ + int64_t rktp_committing_offset; /* Offset currently being + * committed */ + int64_t rktp_committed_offset; /* Last committed offset */ + rd_ts_t rktp_ts_committed_offset; /* Timestamp of last + * commit */ + + struct offset_stats rktp_offsets; /* Current offsets. + * Locality: broker thread*/ struct offset_stats rktp_offsets_fin; /* Finalized offset for stats. * Updated periodically * by broker thread. * Locks: toppar_lock */ - int64_t rktp_ls_offset; /**< Current last stable offset - * Locks: toppar_lock */ - int64_t rktp_hi_offset; /* Current high watermark offset. - * Locks: toppar_lock */ - int64_t rktp_lo_offset; /* Current broker low offset. - * This is outside of the stats - * struct due to this field - * being populated by the - * toppar thread rather than - * the broker thread. - * Locality: toppar thread - * Locks: toppar_lock */ - - rd_ts_t rktp_ts_offset_lag; - - char *rktp_offset_path; /* Path to offset file */ - FILE *rktp_offset_fp; /* Offset file pointer */ - - rd_kafka_resp_err_t rktp_last_error; /**< Last Fetch error. - * Used for suppressing - * reoccuring errors. - * @locality broker thread */ - - rd_kafka_cgrp_t *rktp_cgrp; /* Belongs to this cgrp */ - - rd_bool_t rktp_started; /**< Fetcher is instructured to - * start. - * This is used by cgrp to keep - * track of whether the toppar has - * been started or not. */ - - rd_kafka_replyq_t rktp_replyq; /* Current replyq+version - * for propagating - * major operations, e.g., - * FETCH_STOP. */ - //LOCK: toppar_lock(). RD_KAFKA_TOPPAR_F_DESIRED - //LOCK: toppar_lock(). RD_KAFKA_TOPPAR_F_UNKNOWN - int rktp_flags; -#define RD_KAFKA_TOPPAR_F_DESIRED 0x1 /* This partition is desired - * by a consumer. */ -#define RD_KAFKA_TOPPAR_F_UNKNOWN 0x2 /* Topic is not yet or no longer - * seen on a broker. */ -#define RD_KAFKA_TOPPAR_F_OFFSET_STORE 0x4 /* Offset store is active */ -#define RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING 0x8 /* Offset store stopping */ -#define RD_KAFKA_TOPPAR_F_APP_PAUSE 0x10 /* App pause()d consumption */ -#define RD_KAFKA_TOPPAR_F_LIB_PAUSE 0x20 /* librdkafka paused consumption */ -#define RD_KAFKA_TOPPAR_F_REMOVE 0x40 /* partition removed from cluster */ -#define RD_KAFKA_TOPPAR_F_LEADER_ERR 0x80 /* Operation failed: - * leader might be missing. - * Typically set from - * ProduceResponse failure. */ -#define RD_KAFKA_TOPPAR_F_PEND_TXN 0x100 /* Partition is pending being added - * to a producer transaction. */ -#define RD_KAFKA_TOPPAR_F_IN_TXN 0x200 /* Partition is part of - * a producer transaction. */ -#define RD_KAFKA_TOPPAR_F_ON_DESP 0x400 /**< On rkt_desp list */ -#define RD_KAFKA_TOPPAR_F_ON_CGRP 0x800 /**< On rkcg_toppars list */ -#define RD_KAFKA_TOPPAR_F_ON_RKB 0x1000 /**< On rkb_toppars list */ - - /* - * Timers - */ - rd_kafka_timer_t rktp_offset_query_tmr; /* Offset query timer */ - rd_kafka_timer_t rktp_offset_commit_tmr; /* Offset commit timer */ - rd_kafka_timer_t rktp_offset_sync_tmr; /* Offset file sync timer */ + int64_t rktp_ls_offset; /**< Current last stable offset + * Locks: toppar_lock */ + int64_t rktp_hi_offset; /* Current high watermark offset. + * Locks: toppar_lock */ + int64_t rktp_lo_offset; /* Current broker low offset. + * This is outside of the stats + * struct due to this field + * being populated by the + * toppar thread rather than + * the broker thread. + * Locality: toppar thread + * Locks: toppar_lock */ + + rd_ts_t rktp_ts_offset_lag; + + char *rktp_offset_path; /* Path to offset file */ + FILE *rktp_offset_fp; /* Offset file pointer */ + + rd_kafka_resp_err_t rktp_last_error; /**< Last Fetch error. + * Used for suppressing + * reoccuring errors. + * @locality broker thread */ + + rd_kafka_cgrp_t *rktp_cgrp; /* Belongs to this cgrp */ + + rd_bool_t rktp_started; /**< Fetcher is instructured to + * start. + * This is used by cgrp to keep + * track of whether the toppar has + * been started or not. */ + + rd_kafka_replyq_t rktp_replyq; /* Current replyq+version + * for propagating + * major operations, e.g., + * FETCH_STOP. */ + // LOCK: toppar_lock(). RD_KAFKA_TOPPAR_F_DESIRED + // LOCK: toppar_lock(). RD_KAFKA_TOPPAR_F_UNKNOWN + int rktp_flags; +#define RD_KAFKA_TOPPAR_F_DESIRED \ + 0x1 /* This partition is desired \ + * by a consumer. */ +#define RD_KAFKA_TOPPAR_F_UNKNOWN \ + 0x2 /* Topic is not yet or no longer \ + * seen on a broker. */ +#define RD_KAFKA_TOPPAR_F_OFFSET_STORE 0x4 /* Offset store is active */ +#define RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING \ + 0x8 /* Offset store stopping \ + */ +#define RD_KAFKA_TOPPAR_F_APP_PAUSE 0x10 /* App pause()d consumption */ +#define RD_KAFKA_TOPPAR_F_LIB_PAUSE 0x20 /* librdkafka paused consumption */ +#define RD_KAFKA_TOPPAR_F_REMOVE 0x40 /* partition removed from cluster */ +#define RD_KAFKA_TOPPAR_F_LEADER_ERR \ + 0x80 /* Operation failed: \ + * leader might be missing. \ + * Typically set from \ + * ProduceResponse failure. */ +#define RD_KAFKA_TOPPAR_F_PEND_TXN \ + 0x100 /* Partition is pending being added \ + * to a producer transaction. */ +#define RD_KAFKA_TOPPAR_F_IN_TXN \ + 0x200 /* Partition is part of \ + * a producer transaction. */ +#define RD_KAFKA_TOPPAR_F_ON_DESP 0x400 /**< On rkt_desp list */ +#define RD_KAFKA_TOPPAR_F_ON_CGRP 0x800 /**< On rkcg_toppars list */ +#define RD_KAFKA_TOPPAR_F_ON_RKB 0x1000 /**< On rkb_toppars list */ + + /* + * Timers + */ + rd_kafka_timer_t rktp_offset_query_tmr; /* Offset query timer */ + rd_kafka_timer_t rktp_offset_commit_tmr; /* Offset commit timer */ + rd_kafka_timer_t rktp_offset_sync_tmr; /* Offset file sync timer */ rd_kafka_timer_t rktp_consumer_lag_tmr; /* Consumer lag monitoring - * timer */ - - rd_interval_t rktp_lease_intvl; /**< Preferred replica lease - * period */ - rd_interval_t rktp_new_lease_intvl; /**< Controls max frequency - * at which a new preferred - * replica lease can be - * created for a toppar. - */ - rd_interval_t rktp_new_lease_log_intvl; /**< .. and how often - * we log about it. */ - rd_interval_t rktp_metadata_intvl; /**< Controls max frequency - * of metadata requests - * in preferred replica - * handler. - */ - - int rktp_wait_consumer_lag_resp; /* Waiting for consumer lag - * response. */ + * timer */ + + rd_interval_t rktp_lease_intvl; /**< Preferred replica lease + * period */ + rd_interval_t rktp_new_lease_intvl; /**< Controls max frequency + * at which a new preferred + * replica lease can be + * created for a toppar. + */ + rd_interval_t rktp_new_lease_log_intvl; /**< .. and how often + * we log about it. */ + rd_interval_t rktp_metadata_intvl; /**< Controls max frequency + * of metadata requests + * in preferred replica + * handler. + */ + + int rktp_wait_consumer_lag_resp; /* Waiting for consumer lag + * response. */ struct rd_kafka_toppar_err rktp_last_err; /**< Last produce error */ struct { - rd_atomic64_t tx_msgs; /**< Producer: sent messages */ - rd_atomic64_t tx_msg_bytes; /**< .. bytes */ - rd_atomic64_t rx_msgs; /**< Consumer: received messages */ - rd_atomic64_t rx_msg_bytes; /**< .. bytes */ + rd_atomic64_t tx_msgs; /**< Producer: sent messages */ + rd_atomic64_t tx_msg_bytes; /**< .. bytes */ + rd_atomic64_t rx_msgs; /**< Consumer: received messages */ + rd_atomic64_t rx_msg_bytes; /**< .. bytes */ rd_atomic64_t producer_enq_msgs; /**< Producer: enqueued msgs */ - rd_atomic64_t rx_ver_drops; /**< Consumer: outdated message - * drops. */ + rd_atomic64_t rx_ver_drops; /**< Consumer: outdated message + * drops. */ } rktp_c; - }; @@ -383,220 +390,224 @@ struct rd_kafka_toppar_s { /* rd_kafka_toppar_t */ * Check if toppar is paused (consumer). * Locks: toppar_lock() MUST be held. */ -#define RD_KAFKA_TOPPAR_IS_PAUSED(rktp) \ - ((rktp)->rktp_flags & (RD_KAFKA_TOPPAR_F_APP_PAUSE | \ - RD_KAFKA_TOPPAR_F_LIB_PAUSE)) - +#define RD_KAFKA_TOPPAR_IS_PAUSED(rktp) \ + ((rktp)->rktp_flags & \ + (RD_KAFKA_TOPPAR_F_APP_PAUSE | RD_KAFKA_TOPPAR_F_LIB_PAUSE)) /** * @brief Increase refcount and return rktp object. */ -#define rd_kafka_toppar_keep(RKTP) \ - rd_kafka_toppar_keep0(__FUNCTION__,__LINE__,RKTP) +#define rd_kafka_toppar_keep(RKTP) \ + rd_kafka_toppar_keep0(__FUNCTION__, __LINE__, RKTP) -#define rd_kafka_toppar_keep_fl(FUNC,LINE,RKTP) \ - rd_kafka_toppar_keep0(FUNC,LINE,RKTP) +#define rd_kafka_toppar_keep_fl(FUNC, LINE, RKTP) \ + rd_kafka_toppar_keep0(FUNC, LINE, RKTP) -static RD_UNUSED RD_INLINE -rd_kafka_toppar_t *rd_kafka_toppar_keep0 (const char *func, int line, - rd_kafka_toppar_t *rktp) { +static RD_UNUSED RD_INLINE rd_kafka_toppar_t * +rd_kafka_toppar_keep0(const char *func, int line, rd_kafka_toppar_t *rktp) { rd_refcnt_add_fl(func, line, &rktp->rktp_refcnt); return rktp; } -void rd_kafka_toppar_destroy_final (rd_kafka_toppar_t *rktp); +void rd_kafka_toppar_destroy_final(rd_kafka_toppar_t *rktp); -#define rd_kafka_toppar_destroy(RKTP) do { \ - rd_kafka_toppar_t *_RKTP = (RKTP); \ - if (unlikely(rd_refcnt_sub(&_RKTP->rktp_refcnt) == 0)) \ - rd_kafka_toppar_destroy_final(_RKTP); \ +#define rd_kafka_toppar_destroy(RKTP) \ + do { \ + rd_kafka_toppar_t *_RKTP = (RKTP); \ + if (unlikely(rd_refcnt_sub(&_RKTP->rktp_refcnt) == 0)) \ + rd_kafka_toppar_destroy_final(_RKTP); \ } while (0) +#define rd_kafka_toppar_lock(rktp) mtx_lock(&(rktp)->rktp_lock) +#define rd_kafka_toppar_unlock(rktp) mtx_unlock(&(rktp)->rktp_lock) +static const char * +rd_kafka_toppar_name(const rd_kafka_toppar_t *rktp) RD_UNUSED; +static const char *rd_kafka_toppar_name(const rd_kafka_toppar_t *rktp) { + static RD_TLS char ret[256]; + rd_snprintf(ret, sizeof(ret), "%.*s [%" PRId32 "]", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); - -#define rd_kafka_toppar_lock(rktp) mtx_lock(&(rktp)->rktp_lock) -#define rd_kafka_toppar_unlock(rktp) mtx_unlock(&(rktp)->rktp_lock) - -static const char *rd_kafka_toppar_name (const rd_kafka_toppar_t *rktp) - RD_UNUSED; -static const char *rd_kafka_toppar_name (const rd_kafka_toppar_t *rktp) { - static RD_TLS char ret[256]; - - rd_snprintf(ret, sizeof(ret), "%.*s [%"PRId32"]", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition); - - return ret; + return ret; } -rd_kafka_toppar_t *rd_kafka_toppar_new0 (rd_kafka_topic_t *rkt, - int32_t partition, - const char *func, int line); -#define rd_kafka_toppar_new(rkt,partition) \ - rd_kafka_toppar_new0(rkt, partition, __FUNCTION__, __LINE__) -void rd_kafka_toppar_purge_and_disable_queues (rd_kafka_toppar_t *rktp); -void rd_kafka_toppar_set_fetch_state (rd_kafka_toppar_t *rktp, - int fetch_state); -void rd_kafka_toppar_insert_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm); -void rd_kafka_toppar_enq_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm); -int rd_kafka_retry_msgq (rd_kafka_msgq_t *destq, - rd_kafka_msgq_t *srcq, - int incr_retry, int max_retries, rd_ts_t backoff, - rd_kafka_msg_status_t status, - int (*cmp) (const void *a, const void *b)); -void rd_kafka_msgq_insert_msgq (rd_kafka_msgq_t *destq, - rd_kafka_msgq_t *srcq, - int (*cmp) (const void *a, const void *b)); -int rd_kafka_toppar_retry_msgq (rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq, - int incr_retry, rd_kafka_msg_status_t status); -void rd_kafka_toppar_insert_msgq (rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq); -void rd_kafka_toppar_enq_error (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err, - const char *reason); -rd_kafka_toppar_t *rd_kafka_toppar_get0 (const char *func, int line, - const rd_kafka_topic_t *rkt, - int32_t partition, - int ua_on_miss); -#define rd_kafka_toppar_get(rkt,partition,ua_on_miss) \ - rd_kafka_toppar_get0(__FUNCTION__,__LINE__,rkt,partition,ua_on_miss) -rd_kafka_toppar_t *rd_kafka_toppar_get2 (rd_kafka_t *rk, - const char *topic, - int32_t partition, - int ua_on_miss, - int create_on_miss); -rd_kafka_toppar_t * -rd_kafka_toppar_get_avail (const rd_kafka_topic_t *rkt, - int32_t partition, - int ua_on_miss, - rd_kafka_resp_err_t *errp); - -rd_kafka_toppar_t *rd_kafka_toppar_desired_get (rd_kafka_topic_t *rkt, - int32_t partition); -void rd_kafka_toppar_desired_add0 (rd_kafka_toppar_t *rktp); -rd_kafka_toppar_t *rd_kafka_toppar_desired_add (rd_kafka_topic_t *rkt, - int32_t partition); -void rd_kafka_toppar_desired_link (rd_kafka_toppar_t *rktp); -void rd_kafka_toppar_desired_unlink (rd_kafka_toppar_t *rktp); -void rd_kafka_toppar_desired_del (rd_kafka_toppar_t *rktp); - -void rd_kafka_toppar_next_offset_handle (rd_kafka_toppar_t *rktp, - int64_t Offset); - -void rd_kafka_toppar_broker_delegate (rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *rkb); - - -rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start (rd_kafka_toppar_t *rktp, - int64_t offset, - rd_kafka_q_t *fwdq, - rd_kafka_replyq_t replyq); - -rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop (rd_kafka_toppar_t *rktp, +rd_kafka_toppar_t *rd_kafka_toppar_new0(rd_kafka_topic_t *rkt, + int32_t partition, + const char *func, + int line); +#define rd_kafka_toppar_new(rkt, partition) \ + rd_kafka_toppar_new0(rkt, partition, __FUNCTION__, __LINE__) +void rd_kafka_toppar_purge_and_disable_queues(rd_kafka_toppar_t *rktp); +void rd_kafka_toppar_set_fetch_state(rd_kafka_toppar_t *rktp, int fetch_state); +void rd_kafka_toppar_insert_msg(rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm); +void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm); +int rd_kafka_retry_msgq(rd_kafka_msgq_t *destq, + rd_kafka_msgq_t *srcq, + int incr_retry, + int max_retries, + rd_ts_t backoff, + rd_kafka_msg_status_t status, + int (*cmp)(const void *a, const void *b)); +void rd_kafka_msgq_insert_msgq(rd_kafka_msgq_t *destq, + rd_kafka_msgq_t *srcq, + int (*cmp)(const void *a, const void *b)); +int rd_kafka_toppar_retry_msgq(rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq, + int incr_retry, + rd_kafka_msg_status_t status); +void rd_kafka_toppar_insert_msgq(rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq); +void rd_kafka_toppar_enq_error(rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err, + const char *reason); +rd_kafka_toppar_t *rd_kafka_toppar_get0(const char *func, + int line, + const rd_kafka_topic_t *rkt, + int32_t partition, + int ua_on_miss); +#define rd_kafka_toppar_get(rkt, partition, ua_on_miss) \ + rd_kafka_toppar_get0(__FUNCTION__, __LINE__, rkt, partition, ua_on_miss) +rd_kafka_toppar_t *rd_kafka_toppar_get2(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int ua_on_miss, + int create_on_miss); +rd_kafka_toppar_t *rd_kafka_toppar_get_avail(const rd_kafka_topic_t *rkt, + int32_t partition, + int ua_on_miss, + rd_kafka_resp_err_t *errp); + +rd_kafka_toppar_t *rd_kafka_toppar_desired_get(rd_kafka_topic_t *rkt, + int32_t partition); +void rd_kafka_toppar_desired_add0(rd_kafka_toppar_t *rktp); +rd_kafka_toppar_t *rd_kafka_toppar_desired_add(rd_kafka_topic_t *rkt, + int32_t partition); +void rd_kafka_toppar_desired_link(rd_kafka_toppar_t *rktp); +void rd_kafka_toppar_desired_unlink(rd_kafka_toppar_t *rktp); +void rd_kafka_toppar_desired_del(rd_kafka_toppar_t *rktp); + +void rd_kafka_toppar_next_offset_handle(rd_kafka_toppar_t *rktp, + int64_t Offset); + +void rd_kafka_toppar_broker_delegate(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *rkb); + + +rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start(rd_kafka_toppar_t *rktp, + int64_t offset, + rd_kafka_q_t *fwdq, rd_kafka_replyq_t replyq); -rd_kafka_resp_err_t rd_kafka_toppar_op_seek (rd_kafka_toppar_t *rktp, - int64_t offset, - rd_kafka_replyq_t replyq); +rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop(rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq); -rd_kafka_resp_err_t rd_kafka_toppar_op_pause (rd_kafka_toppar_t *rktp, - int pause, int flag); +rd_kafka_resp_err_t rd_kafka_toppar_op_seek(rd_kafka_toppar_t *rktp, + int64_t offset, + rd_kafka_replyq_t replyq); -void rd_kafka_toppar_fetch_stopped (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err); +rd_kafka_resp_err_t +rd_kafka_toppar_op_pause(rd_kafka_toppar_t *rktp, int pause, int flag); +void rd_kafka_toppar_fetch_stopped(rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err); -rd_ts_t rd_kafka_toppar_fetch_decide (rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *rkb, - int force_remove); +rd_ts_t rd_kafka_toppar_fetch_decide(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *rkb, + int force_remove); -rd_ts_t rd_kafka_broker_consumer_toppar_serve (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp); +rd_ts_t rd_kafka_broker_consumer_toppar_serve(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp); -void rd_kafka_toppar_offset_fetch (rd_kafka_toppar_t *rktp, - rd_kafka_replyq_t replyq); -void rd_kafka_toppar_offset_request (rd_kafka_toppar_t *rktp, - int64_t query_offset, int backoff_ms); +void rd_kafka_toppar_offset_fetch(rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq); -int rd_kafka_toppar_purge_queues (rd_kafka_toppar_t *rktp, - int purge_flags, - rd_bool_t include_xmit_msgq); +void rd_kafka_toppar_offset_request(rd_kafka_toppar_t *rktp, + int64_t query_offset, + int backoff_ms); -rd_kafka_broker_t *rd_kafka_toppar_broker (rd_kafka_toppar_t *rktp, - int proper_broker); -void rd_kafka_toppar_leader_unavailable (rd_kafka_toppar_t *rktp, - const char *reason, - rd_kafka_resp_err_t err); +int rd_kafka_toppar_purge_queues(rd_kafka_toppar_t *rktp, + int purge_flags, + rd_bool_t include_xmit_msgq); -void rd_kafka_toppar_pause (rd_kafka_toppar_t *rktp, int flag); -void rd_kafka_toppar_resume (rd_kafka_toppar_t *rktp, int flag); +rd_kafka_broker_t *rd_kafka_toppar_broker(rd_kafka_toppar_t *rktp, + int proper_broker); +void rd_kafka_toppar_leader_unavailable(rd_kafka_toppar_t *rktp, + const char *reason, + rd_kafka_resp_err_t err); +void rd_kafka_toppar_pause(rd_kafka_toppar_t *rktp, int flag); +void rd_kafka_toppar_resume(rd_kafka_toppar_t *rktp, int flag); + +rd_kafka_resp_err_t rd_kafka_toppar_op_pause_resume(rd_kafka_toppar_t *rktp, + int pause, + int flag, + rd_kafka_replyq_t replyq); rd_kafka_resp_err_t -rd_kafka_toppar_op_pause_resume (rd_kafka_toppar_t *rktp, int pause, int flag, - rd_kafka_replyq_t replyq); -rd_kafka_resp_err_t -rd_kafka_toppars_pause_resume (rd_kafka_t *rk, - rd_bool_t pause, rd_async_t async, int flag, - rd_kafka_topic_partition_list_t *partitions); +rd_kafka_toppars_pause_resume(rd_kafka_t *rk, + rd_bool_t pause, + rd_async_t async, + int flag, + rd_kafka_topic_partition_list_t *partitions); -rd_kafka_topic_partition_t *rd_kafka_topic_partition_new (const char *topic, - int32_t partition); -void rd_kafka_topic_partition_destroy_free (void *ptr); +rd_kafka_topic_partition_t *rd_kafka_topic_partition_new(const char *topic, + int32_t partition); +void rd_kafka_topic_partition_destroy_free(void *ptr); rd_kafka_topic_partition_t * -rd_kafka_topic_partition_copy (const rd_kafka_topic_partition_t *src); -void *rd_kafka_topic_partition_copy_void (const void *src); -void rd_kafka_topic_partition_destroy_free (void *ptr); +rd_kafka_topic_partition_copy(const rd_kafka_topic_partition_t *src); +void *rd_kafka_topic_partition_copy_void(const void *src); +void rd_kafka_topic_partition_destroy_free(void *ptr); rd_kafka_topic_partition_t * -rd_kafka_topic_partition_new_from_rktp (rd_kafka_toppar_t *rktp); +rd_kafka_topic_partition_new_from_rktp(rd_kafka_toppar_t *rktp); -void rd_kafka_topic_partition_list_init ( - rd_kafka_topic_partition_list_t *rktparlist, int size); -void rd_kafka_topic_partition_list_destroy_free (void *ptr); +void rd_kafka_topic_partition_list_init( + rd_kafka_topic_partition_list_t *rktparlist, + int size); +void rd_kafka_topic_partition_list_destroy_free(void *ptr); -void rd_kafka_topic_partition_list_clear ( - rd_kafka_topic_partition_list_t *rktparlist); +void rd_kafka_topic_partition_list_clear( + rd_kafka_topic_partition_list_t *rktparlist); rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_add0 (const char *func, int line, - rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition, - rd_kafka_toppar_t *_private); +rd_kafka_topic_partition_list_add0(const char *func, + int line, + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition, + rd_kafka_toppar_t *_private); -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_upsert ( - rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_upsert( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); -void rd_kafka_topic_partition_list_add_copy ( - rd_kafka_topic_partition_list_t *rktparlist, - const rd_kafka_topic_partition_t *rktpar); +void rd_kafka_topic_partition_list_add_copy( + rd_kafka_topic_partition_list_t *rktparlist, + const rd_kafka_topic_partition_t *rktpar); -void rd_kafka_topic_partition_list_add_list ( - rd_kafka_topic_partition_list_t *dst, - const rd_kafka_topic_partition_list_t *src); +void rd_kafka_topic_partition_list_add_list( + rd_kafka_topic_partition_list_t *dst, + const rd_kafka_topic_partition_list_t *src); /** * Traverse rd_kafka_topic_partition_list_t. * * @warning \p TPLIST modifications are not allowed. */ -#define RD_KAFKA_TPLIST_FOREACH(RKTPAR,TPLIST) \ - for (RKTPAR = &(TPLIST)->elems[0] ; \ - (RKTPAR) < &(TPLIST)->elems[(TPLIST)->cnt] ; \ - RKTPAR++) +#define RD_KAFKA_TPLIST_FOREACH(RKTPAR, TPLIST) \ + for (RKTPAR = &(TPLIST)->elems[0]; \ + (RKTPAR) < &(TPLIST)->elems[(TPLIST)->cnt]; RKTPAR++) /** * Traverse rd_kafka_topic_partition_list_t. @@ -604,172 +615,170 @@ void rd_kafka_topic_partition_list_add_list ( * @warning \p TPLIST modifications are not allowed, but removal of the * current \p RKTPAR element is allowed. */ -#define RD_KAFKA_TPLIST_FOREACH_REVERSE(RKTPAR,TPLIST) \ - for (RKTPAR = &(TPLIST)->elems[(TPLIST)->cnt-1] ; \ - (RKTPAR) >= &(TPLIST)->elems[0] ; \ - RKTPAR--) +#define RD_KAFKA_TPLIST_FOREACH_REVERSE(RKTPAR, TPLIST) \ + for (RKTPAR = &(TPLIST)->elems[(TPLIST)->cnt - 1]; \ + (RKTPAR) >= &(TPLIST)->elems[0]; RKTPAR--) -int rd_kafka_topic_partition_match (rd_kafka_t *rk, - const rd_kafka_group_member_t *rkgm, - const rd_kafka_topic_partition_t *rktpar, - const char *topic, int *matched_by_regex); +int rd_kafka_topic_partition_match(rd_kafka_t *rk, + const rd_kafka_group_member_t *rkgm, + const rd_kafka_topic_partition_t *rktpar, + const char *topic, + int *matched_by_regex); -int rd_kafka_topic_partition_cmp (const void *_a, const void *_b); -unsigned int rd_kafka_topic_partition_hash (const void *a); +int rd_kafka_topic_partition_cmp(const void *_a, const void *_b); +unsigned int rd_kafka_topic_partition_hash(const void *a); -int -rd_kafka_topic_partition_list_find_idx ( - const rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_find_topic ( - const rd_kafka_topic_partition_list_t *rktparlist, const char *topic); +int rd_kafka_topic_partition_list_find_idx( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic); -void rd_kafka_topic_partition_list_sort_by_topic ( - rd_kafka_topic_partition_list_t *rktparlist); +void rd_kafka_topic_partition_list_sort_by_topic( + rd_kafka_topic_partition_list_t *rktparlist); -void -rd_kafka_topic_partition_list_reset_offsets (rd_kafka_topic_partition_list_t - *rktparlist, - int64_t offset); +void rd_kafka_topic_partition_list_reset_offsets( + rd_kafka_topic_partition_list_t *rktparlist, + int64_t offset); -int rd_kafka_topic_partition_list_set_offsets ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - int from_rktp, int64_t def_value, int is_commit); +int rd_kafka_topic_partition_list_set_offsets( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + int from_rktp, + int64_t def_value, + int is_commit); -int rd_kafka_topic_partition_list_count_abs_offsets ( - const rd_kafka_topic_partition_list_t *rktparlist); +int rd_kafka_topic_partition_list_count_abs_offsets( + const rd_kafka_topic_partition_list_t *rktparlist); -int -rd_kafka_topic_partition_list_cmp (const void *_a, const void *_b, - int (*cmp) (const void *, const void *)); +int rd_kafka_topic_partition_list_cmp(const void *_a, + const void *_b, + int (*cmp)(const void *, const void *)); rd_kafka_toppar_t * -rd_kafka_topic_partition_ensure_toppar (rd_kafka_t *rk, - rd_kafka_topic_partition_t *rktpar, - rd_bool_t create_on_miss); - -rd_kafka_toppar_t * -rd_kafka_topic_partition_get_toppar (rd_kafka_t *rk, - rd_kafka_topic_partition_t *rktpar, - rd_bool_t create_on_miss) - RD_WARN_UNUSED_RESULT; - -void -rd_kafka_topic_partition_list_update_toppars (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t - *rktparlist, - rd_bool_t create_on_miss); - - -void -rd_kafka_topic_partition_list_query_leaders_async ( - rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *rktparlist, - int timeout_ms, - rd_kafka_replyq_t replyq, - rd_kafka_op_cb_t *cb, - void *opaque); - -rd_kafka_resp_err_t -rd_kafka_topic_partition_list_query_leaders ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *leaders, int timeout_ms); - -int -rd_kafka_topic_partition_list_get_topics ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *rkts); - -int -rd_kafka_topic_partition_list_get_topic_names ( - const rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *topics, int include_regex); - -void -rd_kafka_topic_partition_list_log (rd_kafka_t *rk, const char *fac, int dbg, - const rd_kafka_topic_partition_list_t *rktparlist); - -#define RD_KAFKA_FMT_F_OFFSET 0x1 /* Print offset */ -#define RD_KAFKA_FMT_F_ONLY_ERR 0x2 /* Only include errored entries */ -#define RD_KAFKA_FMT_F_NO_ERR 0x4 /* Dont print error string */ -const char * -rd_kafka_topic_partition_list_str (const rd_kafka_topic_partition_list_t *rktparlist, - char *dest, size_t dest_size, - int fmt_flags); - -void -rd_kafka_topic_partition_list_update (rd_kafka_topic_partition_list_t *dst, - const rd_kafka_topic_partition_list_t *src); - -int rd_kafka_topic_partition_leader_cmp (const void *_a, const void *_b); +rd_kafka_topic_partition_ensure_toppar(rd_kafka_t *rk, + rd_kafka_topic_partition_t *rktpar, + rd_bool_t create_on_miss); + +rd_kafka_toppar_t *rd_kafka_topic_partition_get_toppar( + rd_kafka_t *rk, + rd_kafka_topic_partition_t *rktpar, + rd_bool_t create_on_miss) RD_WARN_UNUSED_RESULT; + +void rd_kafka_topic_partition_list_update_toppars( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_bool_t create_on_miss); + + +void rd_kafka_topic_partition_list_query_leaders_async( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *rktparlist, + int timeout_ms, + rd_kafka_replyq_t replyq, + rd_kafka_op_cb_t *cb, + void *opaque); + +rd_kafka_resp_err_t rd_kafka_topic_partition_list_query_leaders( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *leaders, + int timeout_ms); + +int rd_kafka_topic_partition_list_get_topics( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *rkts); + +int rd_kafka_topic_partition_list_get_topic_names( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *topics, + int include_regex); + +void rd_kafka_topic_partition_list_log( + rd_kafka_t *rk, + const char *fac, + int dbg, + const rd_kafka_topic_partition_list_t *rktparlist); + +#define RD_KAFKA_FMT_F_OFFSET 0x1 /* Print offset */ +#define RD_KAFKA_FMT_F_ONLY_ERR 0x2 /* Only include errored entries */ +#define RD_KAFKA_FMT_F_NO_ERR 0x4 /* Dont print error string */ +const char *rd_kafka_topic_partition_list_str( + const rd_kafka_topic_partition_list_t *rktparlist, + char *dest, + size_t dest_size, + int fmt_flags); + +void rd_kafka_topic_partition_list_update( + rd_kafka_topic_partition_list_t *dst, + const rd_kafka_topic_partition_list_t *src); + +int rd_kafka_topic_partition_leader_cmp(const void *_a, const void *_b); /** * @brief Match function that returns true if partition has a valid offset. */ -static RD_UNUSED int rd_kafka_topic_partition_match_valid_offset ( - const void *elem, const void *opaque) { +static RD_UNUSED int +rd_kafka_topic_partition_match_valid_offset(const void *elem, + const void *opaque) { const rd_kafka_topic_partition_t *rktpar = elem; return rktpar->offset >= 0; } -rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match ( - const rd_kafka_topic_partition_list_t *rktparlist, - int (*match) (const void *elem, const void *opaque), - void *opaque); +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match( + const rd_kafka_topic_partition_list_t *rktparlist, + int (*match)(const void *elem, const void *opaque), + void *opaque); -size_t -rd_kafka_topic_partition_list_sum ( - const rd_kafka_topic_partition_list_t *rktparlist, - size_t (*cb) (const rd_kafka_topic_partition_t *rktpar, void *opaque), - void *opaque); +size_t rd_kafka_topic_partition_list_sum( + const rd_kafka_topic_partition_list_t *rktparlist, + size_t (*cb)(const rd_kafka_topic_partition_t *rktpar, void *opaque), + void *opaque); -rd_bool_t -rd_kafka_topic_partition_list_has_duplicates ( - rd_kafka_topic_partition_list_t *rktparlist, - rd_bool_t ignore_partition); +rd_bool_t rd_kafka_topic_partition_list_has_duplicates( + rd_kafka_topic_partition_list_t *rktparlist, + rd_bool_t ignore_partition); -void rd_kafka_topic_partition_list_set_err ( - rd_kafka_topic_partition_list_t *rktparlist, - rd_kafka_resp_err_t err); +void rd_kafka_topic_partition_list_set_err( + rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_resp_err_t err); -rd_kafka_resp_err_t rd_kafka_topic_partition_list_get_err ( - const rd_kafka_topic_partition_list_t *rktparlist); +rd_kafka_resp_err_t rd_kafka_topic_partition_list_get_err( + const rd_kafka_topic_partition_list_t *rktparlist); -int rd_kafka_topic_partition_list_regex_cnt ( - const rd_kafka_topic_partition_list_t *rktparlist); +int rd_kafka_topic_partition_list_regex_cnt( + const rd_kafka_topic_partition_list_t *rktparlist); -void * -rd_kafka_topic_partition_list_copy_opaque (const void *src, void *opaque); +void *rd_kafka_topic_partition_list_copy_opaque(const void *src, void *opaque); /** * @brief Toppar + Op version tuple used for mapping Fetched partitions * back to their fetch versions. */ struct rd_kafka_toppar_ver { - rd_kafka_toppar_t *rktp; - int32_t version; + rd_kafka_toppar_t *rktp; + int32_t version; }; /** * @brief Toppar + Op version comparator. */ -static RD_INLINE RD_UNUSED -int rd_kafka_toppar_ver_cmp (const void *_a, const void *_b) { - const struct rd_kafka_toppar_ver *a = _a, *b = _b; - const rd_kafka_toppar_t *rktp_a = a->rktp; +static RD_INLINE RD_UNUSED int rd_kafka_toppar_ver_cmp(const void *_a, + const void *_b) { + const struct rd_kafka_toppar_ver *a = _a, *b = _b; + const rd_kafka_toppar_t *rktp_a = a->rktp; const rd_kafka_toppar_t *rktp_b = b->rktp; - int r; + int r; - if (rktp_a->rktp_rkt != rktp_b->rktp_rkt && - (r = rd_kafkap_str_cmp(rktp_a->rktp_rkt->rkt_topic, - rktp_b->rktp_rkt->rkt_topic))) - return r; + if (rktp_a->rktp_rkt != rktp_b->rktp_rkt && + (r = rd_kafkap_str_cmp(rktp_a->rktp_rkt->rkt_topic, + rktp_b->rktp_rkt->rkt_topic))) + return r; return RD_CMP(rktp_a->rktp_partition, rktp_b->rktp_partition); } @@ -777,35 +786,35 @@ int rd_kafka_toppar_ver_cmp (const void *_a, const void *_b) { /** * @brief Frees up resources for \p tver but not the \p tver itself. */ -static RD_INLINE RD_UNUSED -void rd_kafka_toppar_ver_destroy (struct rd_kafka_toppar_ver *tver) { - rd_kafka_toppar_destroy(tver->rktp); +static RD_INLINE RD_UNUSED void +rd_kafka_toppar_ver_destroy(struct rd_kafka_toppar_ver *tver) { + rd_kafka_toppar_destroy(tver->rktp); } /** * @returns 1 if rko version is outdated, else 0. */ -static RD_INLINE RD_UNUSED -int rd_kafka_op_version_outdated (rd_kafka_op_t *rko, int version) { - if (!rko->rko_version) - return 0; - - if (version) - return rko->rko_version < version; - - if (rko->rko_rktp) - return rko->rko_version < - rd_atomic32_get(&rko->rko_rktp->rktp_version); - return 0; +static RD_INLINE RD_UNUSED int rd_kafka_op_version_outdated(rd_kafka_op_t *rko, + int version) { + if (!rko->rko_version) + return 0; + + if (version) + return rko->rko_version < version; + + if (rko->rko_rktp) + return rko->rko_version < + rd_atomic32_get(&rko->rko_rktp->rktp_version); + return 0; } -void -rd_kafka_toppar_offset_commit_result (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets); +void rd_kafka_toppar_offset_commit_result( + rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets); -void rd_kafka_toppar_broker_leave_for_remove (rd_kafka_toppar_t *rktp); +void rd_kafka_toppar_broker_leave_for_remove(rd_kafka_toppar_t *rktp); /** @@ -817,43 +826,42 @@ struct rd_kafka_partition_leader { }; static RD_UNUSED void -rd_kafka_partition_leader_destroy (struct rd_kafka_partition_leader *leader) { +rd_kafka_partition_leader_destroy(struct rd_kafka_partition_leader *leader) { rd_kafka_broker_destroy(leader->rkb); rd_kafka_topic_partition_list_destroy(leader->partitions); rd_free(leader); } -void rd_kafka_partition_leader_destroy_free (void *ptr); +void rd_kafka_partition_leader_destroy_free(void *ptr); static RD_UNUSED struct rd_kafka_partition_leader * -rd_kafka_partition_leader_new (rd_kafka_broker_t *rkb) { +rd_kafka_partition_leader_new(rd_kafka_broker_t *rkb) { struct rd_kafka_partition_leader *leader = rd_malloc(sizeof(*leader)); - leader->rkb = rkb; + leader->rkb = rkb; rd_kafka_broker_keep(rkb); leader->partitions = rd_kafka_topic_partition_list_new(0); return leader; } -static RD_UNUSED -int rd_kafka_partition_leader_cmp (const void *_a, const void *_b) { +static RD_UNUSED int rd_kafka_partition_leader_cmp(const void *_a, + const void *_b) { const struct rd_kafka_partition_leader *a = _a, *b = _b; return rd_kafka_broker_cmp(a->rkb, b->rkb); } -int rd_kafka_toppar_pid_change (rd_kafka_toppar_t *rktp, rd_kafka_pid_t pid, - uint64_t base_msgid); +int rd_kafka_toppar_pid_change(rd_kafka_toppar_t *rktp, + rd_kafka_pid_t pid, + uint64_t base_msgid); -int rd_kafka_toppar_handle_purge_queues (rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *rkb, - int purge_flags); -void rd_kafka_purge_ua_toppar_queues (rd_kafka_t *rk); +int rd_kafka_toppar_handle_purge_queues(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *rkb, + int purge_flags); +void rd_kafka_purge_ua_toppar_queues(rd_kafka_t *rk); -static RD_UNUSED -int rd_kafka_toppar_topic_cmp (const void *_a, const void *_b) { +static RD_UNUSED int rd_kafka_toppar_topic_cmp(const void *_a, const void *_b) { const rd_kafka_toppar_t *a = _a, *b = _b; - return strcmp(a->rktp_rkt->rkt_topic->str, - b->rktp_rkt->rkt_topic->str); + return strcmp(a->rktp_rkt->rkt_topic->str, b->rktp_rkt->rkt_topic->str); } diff --git a/src/rdkafka_pattern.c b/src/rdkafka_pattern.c index fc2d71126d..dfe3ef03e6 100644 --- a/src/rdkafka_pattern.c +++ b/src/rdkafka_pattern.c @@ -29,30 +29,30 @@ #include "rdkafka_int.h" #include "rdkafka_pattern.h" -void rd_kafka_pattern_destroy (rd_kafka_pattern_list_t *plist, - rd_kafka_pattern_t *rkpat) { +void rd_kafka_pattern_destroy(rd_kafka_pattern_list_t *plist, + rd_kafka_pattern_t *rkpat) { TAILQ_REMOVE(&plist->rkpl_head, rkpat, rkpat_link); - rd_regex_destroy(rkpat->rkpat_re); + rd_regex_destroy(rkpat->rkpat_re); rd_free(rkpat->rkpat_orig); rd_free(rkpat); } -void rd_kafka_pattern_add (rd_kafka_pattern_list_t *plist, - rd_kafka_pattern_t *rkpat) { +void rd_kafka_pattern_add(rd_kafka_pattern_list_t *plist, + rd_kafka_pattern_t *rkpat) { TAILQ_INSERT_TAIL(&plist->rkpl_head, rkpat, rkpat_link); } -rd_kafka_pattern_t *rd_kafka_pattern_new (const char *pattern, - char *errstr, int errstr_size) { +rd_kafka_pattern_t * +rd_kafka_pattern_new(const char *pattern, char *errstr, int errstr_size) { rd_kafka_pattern_t *rkpat; - rkpat = rd_calloc(1, sizeof(*rkpat)); + rkpat = rd_calloc(1, sizeof(*rkpat)); - /* Verify and precompile pattern */ - if (!(rkpat->rkpat_re = rd_regex_comp(pattern, errstr, errstr_size))) { - rd_free(rkpat); - return NULL; - } + /* Verify and precompile pattern */ + if (!(rkpat->rkpat_re = rd_regex_comp(pattern, errstr, errstr_size))) { + rd_free(rkpat); + return NULL; + } rkpat->rkpat_orig = rd_strdup(pattern); @@ -61,11 +61,11 @@ rd_kafka_pattern_t *rd_kafka_pattern_new (const char *pattern, -int rd_kafka_pattern_match (rd_kafka_pattern_list_t *plist, const char *str) { +int rd_kafka_pattern_match(rd_kafka_pattern_list_t *plist, const char *str) { rd_kafka_pattern_t *rkpat; TAILQ_FOREACH(rkpat, &plist->rkpl_head, rkpat_link) { - if (rd_regex_exec(rkpat->rkpat_re, str)) + if (rd_regex_exec(rkpat->rkpat_re, str)) return 1; } @@ -76,9 +76,10 @@ int rd_kafka_pattern_match (rd_kafka_pattern_list_t *plist, const char *str) { /** * Append pattern to list. */ -int rd_kafka_pattern_list_append (rd_kafka_pattern_list_t *plist, - const char *pattern, - char *errstr, int errstr_size) { +int rd_kafka_pattern_list_append(rd_kafka_pattern_list_t *plist, + const char *pattern, + char *errstr, + int errstr_size) { rd_kafka_pattern_t *rkpat; rkpat = rd_kafka_pattern_new(pattern, errstr, errstr_size); if (!rkpat) @@ -92,8 +93,8 @@ int rd_kafka_pattern_list_append (rd_kafka_pattern_list_t *plist, * Remove matching patterns. * Returns the number of removed patterns. */ -int rd_kafka_pattern_list_remove (rd_kafka_pattern_list_t *plist, - const char *pattern) { +int rd_kafka_pattern_list_remove(rd_kafka_pattern_list_t *plist, + const char *pattern) { rd_kafka_pattern_t *rkpat, *rkpat_tmp; int cnt = 0; @@ -109,11 +110,12 @@ int rd_kafka_pattern_list_remove (rd_kafka_pattern_list_t *plist, /** * Parse a patternlist and populate a list with it. */ -static int rd_kafka_pattern_list_parse (rd_kafka_pattern_list_t *plist, - const char *patternlist, - char *errstr, size_t errstr_size) { - char *s; - rd_strdupa(&s, patternlist); +static int rd_kafka_pattern_list_parse(rd_kafka_pattern_list_t *plist, + const char *patternlist, + char *errstr, + size_t errstr_size) { + char *s; + rd_strdupa(&s, patternlist); while (s && *s) { char *t = s; @@ -121,10 +123,10 @@ static int rd_kafka_pattern_list_parse (rd_kafka_pattern_list_t *plist, /* Find separator */ while ((t = strchr(t, ','))) { - if (t > s && *(t-1) == ',') { + if (t > s && *(t - 1) == ',') { /* separator was escaped, remove escape and scan again. */ - memmove(t-1, t, strlen(t)+1); + memmove(t - 1, t, strlen(t) + 1); t++; } else { *t = '\0'; @@ -137,7 +139,8 @@ static int rd_kafka_pattern_list_parse (rd_kafka_pattern_list_t *plist, sizeof(re_errstr)) == -1) { rd_snprintf(errstr, errstr_size, "Failed to parse pattern \"%s\": " - "%s", s, re_errstr); + "%s", + s, re_errstr); rd_kafka_pattern_list_clear(plist); return -1; } @@ -152,7 +155,7 @@ static int rd_kafka_pattern_list_parse (rd_kafka_pattern_list_t *plist, /** * Clear a pattern list. */ -void rd_kafka_pattern_list_clear (rd_kafka_pattern_list_t *plist) { +void rd_kafka_pattern_list_clear(rd_kafka_pattern_list_t *plist) { rd_kafka_pattern_t *rkpat; while ((rkpat = TAILQ_FIRST(&plist->rkpl_head))) @@ -168,7 +171,7 @@ void rd_kafka_pattern_list_clear (rd_kafka_pattern_list_t *plist) { /** * Free a pattern list previously created with list_new() */ -void rd_kafka_pattern_list_destroy (rd_kafka_pattern_list_t *plist) { +void rd_kafka_pattern_list_destroy(rd_kafka_pattern_list_t *plist) { rd_kafka_pattern_list_clear(plist); rd_free(plist); } @@ -177,13 +180,14 @@ void rd_kafka_pattern_list_destroy (rd_kafka_pattern_list_t *plist) { * Initialize a pattern list, optionally populating it with the * comma-separated patterns in 'patternlist'. */ -int rd_kafka_pattern_list_init (rd_kafka_pattern_list_t *plist, - const char *patternlist, - char *errstr, size_t errstr_size) { +int rd_kafka_pattern_list_init(rd_kafka_pattern_list_t *plist, + const char *patternlist, + char *errstr, + size_t errstr_size) { TAILQ_INIT(&plist->rkpl_head); if (patternlist) { - if (rd_kafka_pattern_list_parse(plist, patternlist, - errstr, errstr_size) == -1) + if (rd_kafka_pattern_list_parse(plist, patternlist, errstr, + errstr_size) == -1) return -1; plist->rkpl_orig = rd_strdup(patternlist); } else @@ -196,15 +200,15 @@ int rd_kafka_pattern_list_init (rd_kafka_pattern_list_t *plist, /** * Allocate and initialize a new list. */ -rd_kafka_pattern_list_t *rd_kafka_pattern_list_new (const char *patternlist, - char *errstr, - int errstr_size) { +rd_kafka_pattern_list_t *rd_kafka_pattern_list_new(const char *patternlist, + char *errstr, + int errstr_size) { rd_kafka_pattern_list_t *plist; plist = rd_calloc(1, sizeof(*plist)); - if (rd_kafka_pattern_list_init(plist, patternlist, - errstr, errstr_size) == -1) { + if (rd_kafka_pattern_list_init(plist, patternlist, errstr, + errstr_size) == -1) { rd_free(plist); return NULL; } @@ -217,8 +221,8 @@ rd_kafka_pattern_list_t *rd_kafka_pattern_list_new (const char *patternlist, * Make a copy of a pattern list. */ rd_kafka_pattern_list_t * -rd_kafka_pattern_list_copy (rd_kafka_pattern_list_t *src) { - char errstr[16]; - return rd_kafka_pattern_list_new(src->rkpl_orig, - errstr, sizeof(errstr)); +rd_kafka_pattern_list_copy(rd_kafka_pattern_list_t *src) { + char errstr[16]; + return rd_kafka_pattern_list_new(src->rkpl_orig, errstr, + sizeof(errstr)); } diff --git a/src/rdkafka_pattern.h b/src/rdkafka_pattern.h index fd53fec376..88d183cd32 100644 --- a/src/rdkafka_pattern.h +++ b/src/rdkafka_pattern.h @@ -31,38 +31,40 @@ #include "rdregex.h" typedef struct rd_kafka_pattern_s { - TAILQ_ENTRY(rd_kafka_pattern_s) rkpat_link; + TAILQ_ENTRY(rd_kafka_pattern_s) rkpat_link; - rd_regex_t *rkpat_re; /* Compiled regex */ - char *rkpat_orig; /* Original pattern */ + rd_regex_t *rkpat_re; /* Compiled regex */ + char *rkpat_orig; /* Original pattern */ } rd_kafka_pattern_t; typedef struct rd_kafka_pattern_list_s { - TAILQ_HEAD(,rd_kafka_pattern_s) rkpl_head; - char *rkpl_orig; + TAILQ_HEAD(, rd_kafka_pattern_s) rkpl_head; + char *rkpl_orig; } rd_kafka_pattern_list_t; -void rd_kafka_pattern_destroy (rd_kafka_pattern_list_t *plist, - rd_kafka_pattern_t *rkpat); -void rd_kafka_pattern_add (rd_kafka_pattern_list_t *plist, - rd_kafka_pattern_t *rkpat); -rd_kafka_pattern_t *rd_kafka_pattern_new (const char *pattern, - char *errstr, int errstr_size); -int rd_kafka_pattern_match (rd_kafka_pattern_list_t *plist, const char *str); -int rd_kafka_pattern_list_append (rd_kafka_pattern_list_t *plist, - const char *pattern, - char *errstr, int errstr_size); -int rd_kafka_pattern_list_remove (rd_kafka_pattern_list_t *plist, - const char *pattern); -void rd_kafka_pattern_list_clear (rd_kafka_pattern_list_t *plist); -void rd_kafka_pattern_list_destroy (rd_kafka_pattern_list_t *plist); -int rd_kafka_pattern_list_init (rd_kafka_pattern_list_t *plist, - const char *patternlist, - char *errstr, size_t errstr_size); -rd_kafka_pattern_list_t *rd_kafka_pattern_list_new (const char *patternlist, - char *errstr, - int errstr_size); +void rd_kafka_pattern_destroy(rd_kafka_pattern_list_t *plist, + rd_kafka_pattern_t *rkpat); +void rd_kafka_pattern_add(rd_kafka_pattern_list_t *plist, + rd_kafka_pattern_t *rkpat); +rd_kafka_pattern_t * +rd_kafka_pattern_new(const char *pattern, char *errstr, int errstr_size); +int rd_kafka_pattern_match(rd_kafka_pattern_list_t *plist, const char *str); +int rd_kafka_pattern_list_append(rd_kafka_pattern_list_t *plist, + const char *pattern, + char *errstr, + int errstr_size); +int rd_kafka_pattern_list_remove(rd_kafka_pattern_list_t *plist, + const char *pattern); +void rd_kafka_pattern_list_clear(rd_kafka_pattern_list_t *plist); +void rd_kafka_pattern_list_destroy(rd_kafka_pattern_list_t *plist); +int rd_kafka_pattern_list_init(rd_kafka_pattern_list_t *plist, + const char *patternlist, + char *errstr, + size_t errstr_size); +rd_kafka_pattern_list_t *rd_kafka_pattern_list_new(const char *patternlist, + char *errstr, + int errstr_size); rd_kafka_pattern_list_t * -rd_kafka_pattern_list_copy (rd_kafka_pattern_list_t *src); +rd_kafka_pattern_list_copy(rd_kafka_pattern_list_t *src); #endif /* _RDKAFKA_PATTERN_H_ */ diff --git a/src/rdkafka_plugin.c b/src/rdkafka_plugin.c index b899899a50..f58bc5060c 100644 --- a/src/rdkafka_plugin.c +++ b/src/rdkafka_plugin.c @@ -32,10 +32,10 @@ typedef struct rd_kafka_plugin_s { - char *rkplug_path; /* Library path */ - rd_kafka_t *rkplug_rk; /* Backpointer to the rk handle */ - void *rkplug_handle; /* dlopen (or similar) handle */ - void *rkplug_opaque; /* Plugin's opaque */ + char *rkplug_path; /* Library path */ + rd_kafka_t *rkplug_rk; /* Backpointer to the rk handle */ + void *rkplug_handle; /* dlopen (or similar) handle */ + void *rkplug_opaque; /* Plugin's opaque */ } rd_kafka_plugin_t; @@ -43,7 +43,7 @@ typedef struct rd_kafka_plugin_s { /** * @brief Plugin path comparator */ -static int rd_kafka_plugin_cmp (const void *_a, const void *_b) { +static int rd_kafka_plugin_cmp(const void *_a, const void *_b) { const rd_kafka_plugin_t *a = _a, *b = _b; return strcmp(a->rkplug_path, b->rkplug_path); @@ -60,11 +60,12 @@ static int rd_kafka_plugin_cmp (const void *_a, const void *_b) { * plugins referencing the library have been destroyed. * (dlopen() and LoadLibrary() does this for us) */ -static rd_kafka_resp_err_t -rd_kafka_plugin_new (rd_kafka_conf_t *conf, const char *path, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t rd_kafka_plugin_new(rd_kafka_conf_t *conf, + const char *path, + char *errstr, + size_t errstr_size) { rd_kafka_plugin_t *rkplug; - const rd_kafka_plugin_t skel = { .rkplug_path = (char *)path }; + const rd_kafka_plugin_t skel = {.rkplug_path = (char *)path}; rd_kafka_plugin_f_conf_init_t *conf_init; rd_kafka_resp_err_t err; void *handle; @@ -72,25 +73,23 @@ rd_kafka_plugin_new (rd_kafka_conf_t *conf, const char *path, /* Avoid duplicates */ if (rd_list_find(&conf->plugins, &skel, rd_kafka_plugin_cmp)) { - rd_snprintf(errstr, errstr_size, - "Ignoring duplicate plugin %s", path); + rd_snprintf(errstr, errstr_size, "Ignoring duplicate plugin %s", + path); return RD_KAFKA_RESP_ERR_NO_ERROR; } - rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", - "Loading plugin \"%s\"", path); + rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", "Loading plugin \"%s\"", path); /* Attempt to load library */ if (!(handle = rd_dl_open(path, errstr, errstr_size))) { rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", - "Failed to load plugin \"%s\": %s", - path, errstr); + "Failed to load plugin \"%s\": %s", path, errstr); return RD_KAFKA_RESP_ERR__FS; } /* Find conf_init() function */ - if (!(conf_init = rd_dl_sym(handle, "conf_init", - errstr, errstr_size))) { + if (!(conf_init = + rd_dl_sym(handle, "conf_init", errstr, errstr_size))) { rd_dl_close(handle); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -104,15 +103,14 @@ rd_kafka_plugin_new (rd_kafka_conf_t *conf, const char *path, return err; } - rkplug = rd_calloc(1, sizeof(*rkplug)); - rkplug->rkplug_path = rd_strdup(path); - rkplug->rkplug_handle = handle; + rkplug = rd_calloc(1, sizeof(*rkplug)); + rkplug->rkplug_path = rd_strdup(path); + rkplug->rkplug_handle = handle; rkplug->rkplug_opaque = plug_opaque; rd_list_add(&conf->plugins, rkplug); - rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", - "Plugin \"%s\" loaded", path); + rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", "Plugin \"%s\" loaded", path); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -127,7 +125,7 @@ rd_kafka_plugin_new (rd_kafka_conf_t *conf, const char *path, * This is true for POSIX dlopen() and Win32 LoadLibrary(). * @locality application thread */ -static void rd_kafka_plugin_destroy (rd_kafka_plugin_t *rkplug) { +static void rd_kafka_plugin_destroy(rd_kafka_plugin_t *rkplug) { rd_dl_close(rkplug->rkplug_handle); rd_free(rkplug->rkplug_path); rd_free(rkplug); @@ -143,9 +141,10 @@ static void rd_kafka_plugin_destroy (rd_kafka_plugin_t *rkplug) { * @returns the error code of the first failing plugin. * @locality application thread calling rd_kafka_new(). */ -static rd_kafka_conf_res_t -rd_kafka_plugins_conf_set0 (rd_kafka_conf_t *conf, const char *paths, - char *errstr, size_t errstr_size) { +static rd_kafka_conf_res_t rd_kafka_plugins_conf_set0(rd_kafka_conf_t *conf, + const char *paths, + char *errstr, + size_t errstr_size) { char *s; rd_list_destroy(&conf->plugins); @@ -158,8 +157,8 @@ rd_kafka_plugins_conf_set0 (rd_kafka_conf_t *conf, const char *paths, rd_strdupa(&s, paths); rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", - "Loading plugins from conf object %p: \"%s\"", - conf, paths); + "Loading plugins from conf object %p: \"%s\"", conf, + paths); while (s && *s) { char *path = s; @@ -168,13 +167,13 @@ rd_kafka_plugins_conf_set0 (rd_kafka_conf_t *conf, const char *paths, if ((t = strchr(s, ';'))) { *t = '\0'; - s = t+1; + s = t + 1; } else { s = NULL; } - if ((err = rd_kafka_plugin_new(conf, path, - errstr, errstr_size))) { + if ((err = rd_kafka_plugin_new(conf, path, errstr, + errstr_size))) { /* Failed to load plugin */ size_t elen = errstr_size > 0 ? strlen(errstr) : 0; @@ -182,7 +181,7 @@ rd_kafka_plugins_conf_set0 (rd_kafka_conf_t *conf, const char *paths, * plugin path to the error message. */ if (elen + strlen("(plugin )") + strlen(path) < errstr_size) - rd_snprintf(errstr+elen, errstr_size-elen, + rd_snprintf(errstr + elen, errstr_size - elen, " (plugin %s)", path); rd_list_destroy(&conf->plugins); @@ -197,13 +196,18 @@ rd_kafka_plugins_conf_set0 (rd_kafka_conf_t *conf, const char *paths, /** * @brief Conf setter for "plugin.library.paths" */ -rd_kafka_conf_res_t rd_kafka_plugins_conf_set ( - int scope, void *pconf, const char *name, const char *value, - void *dstptr, rd_kafka_conf_set_mode_t set_mode, - char *errstr, size_t errstr_size) { +rd_kafka_conf_res_t rd_kafka_plugins_conf_set(int scope, + void *pconf, + const char *name, + const char *value, + void *dstptr, + rd_kafka_conf_set_mode_t set_mode, + char *errstr, + size_t errstr_size) { assert(scope == _RK_GLOBAL); - return rd_kafka_plugins_conf_set0((rd_kafka_conf_t *)pconf, - set_mode == _RK_CONF_PROP_SET_DEL ? - NULL : value, errstr, errstr_size); + return rd_kafka_plugins_conf_set0( + (rd_kafka_conf_t *)pconf, + set_mode == _RK_CONF_PROP_SET_DEL ? NULL : value, errstr, + errstr_size); } diff --git a/src/rdkafka_plugin.h b/src/rdkafka_plugin.h index b588a7d016..1783d5f53c 100644 --- a/src/rdkafka_plugin.h +++ b/src/rdkafka_plugin.h @@ -29,9 +29,13 @@ #ifndef _RDKAFKA_PLUGIN_H #define _RDKAFKA_PLUGIN_H -rd_kafka_conf_res_t rd_kafka_plugins_conf_set ( - int scope, void *conf, const char *name, const char *value, - void *dstptr, rd_kafka_conf_set_mode_t set_mode, - char *errstr, size_t errstr_size); +rd_kafka_conf_res_t rd_kafka_plugins_conf_set(int scope, + void *conf, + const char *name, + const char *value, + void *dstptr, + rd_kafka_conf_set_mode_t set_mode, + char *errstr, + size_t errstr_size); #endif /* _RDKAFKA_PLUGIN_H */ diff --git a/src/rdkafka_proto.h b/src/rdkafka_proto.h index 598f7e9423..419a4640f2 100644 --- a/src/rdkafka_proto.h +++ b/src/rdkafka_proto.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012,2013 Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -43,7 +43,7 @@ #define RD_KAFKA_REQUEST_DEFAULT_RETRIES 2 /** Max (practically infinite) retry count */ -#define RD_KAFKA_REQUEST_MAX_RETRIES INT_MAX +#define RD_KAFKA_REQUEST_MAX_RETRIES INT_MAX /** Do not retry request */ #define RD_KAFKA_REQUEST_NO_RETRIES 0 @@ -53,22 +53,22 @@ * Request types */ struct rd_kafkap_reqhdr { - int32_t Size; - int16_t ApiKey; - int16_t ApiVersion; - int32_t CorrId; + int32_t Size; + int16_t ApiKey; + int16_t ApiVersion; + int32_t CorrId; /* ClientId follows */ }; -#define RD_KAFKAP_REQHDR_SIZE (4+2+2+4) -#define RD_KAFKAP_RESHDR_SIZE (4+4) +#define RD_KAFKAP_REQHDR_SIZE (4 + 2 + 2 + 4) +#define RD_KAFKAP_RESHDR_SIZE (4 + 4) /** * Response header */ struct rd_kafkap_reshdr { - int32_t Size; - int32_t CorrId; + int32_t Size; + int32_t CorrId; }; @@ -93,75 +93,72 @@ struct rd_kafkap_reshdr { * * Generate updates to this list with generate_proto.sh. */ -static RD_UNUSED -const char *rd_kafka_ApiKey2str (int16_t ApiKey) { +static RD_UNUSED const char *rd_kafka_ApiKey2str(int16_t ApiKey) { static const char *names[] = { - [RD_KAFKAP_Produce] = "Produce", - [RD_KAFKAP_Fetch] = "Fetch", - [RD_KAFKAP_ListOffsets] = "ListOffsets", - [RD_KAFKAP_Metadata] = "Metadata", - [RD_KAFKAP_LeaderAndIsr] = "LeaderAndIsr", - [RD_KAFKAP_StopReplica] = "StopReplica", - [RD_KAFKAP_UpdateMetadata] = "UpdateMetadata", - [RD_KAFKAP_ControlledShutdown] = "ControlledShutdown", - [RD_KAFKAP_OffsetCommit] = "OffsetCommit", - [RD_KAFKAP_OffsetFetch] = "OffsetFetch", - [RD_KAFKAP_FindCoordinator] = "FindCoordinator", - [RD_KAFKAP_JoinGroup] = "JoinGroup", - [RD_KAFKAP_Heartbeat] = "Heartbeat", - [RD_KAFKAP_LeaveGroup] = "LeaveGroup", - [RD_KAFKAP_SyncGroup] = "SyncGroup", - [RD_KAFKAP_DescribeGroups] = "DescribeGroups", - [RD_KAFKAP_ListGroups] = "ListGroups", - [RD_KAFKAP_SaslHandshake] = "SaslHandshake", - [RD_KAFKAP_ApiVersion] = "ApiVersion", - [RD_KAFKAP_CreateTopics] = "CreateTopics", - [RD_KAFKAP_DeleteTopics] = "DeleteTopics", - [RD_KAFKAP_DeleteRecords] = "DeleteRecords", - [RD_KAFKAP_InitProducerId] = "InitProducerId", - [RD_KAFKAP_OffsetForLeaderEpoch] = "OffsetForLeaderEpoch", - [RD_KAFKAP_AddPartitionsToTxn] = "AddPartitionsToTxn", - [RD_KAFKAP_AddOffsetsToTxn] = "AddOffsetsToTxn", - [RD_KAFKAP_EndTxn] = "EndTxn", - [RD_KAFKAP_WriteTxnMarkers] = "WriteTxnMarkers", - [RD_KAFKAP_TxnOffsetCommit] = "TxnOffsetCommit", - [RD_KAFKAP_DescribeAcls] = "DescribeAcls", - [RD_KAFKAP_CreateAcls] = "CreateAcls", - [RD_KAFKAP_DeleteAcls] = "DeleteAcls", - [RD_KAFKAP_DescribeConfigs] = "DescribeConfigs", - [RD_KAFKAP_AlterConfigs] = "AlterConfigs", - [RD_KAFKAP_AlterReplicaLogDirs] = "AlterReplicaLogDirs", - [RD_KAFKAP_DescribeLogDirs] = "DescribeLogDirs", - [RD_KAFKAP_SaslAuthenticate] = "SaslAuthenticate", - [RD_KAFKAP_CreatePartitions] = "CreatePartitions", - [RD_KAFKAP_CreateDelegationToken] = "CreateDelegationToken", - [RD_KAFKAP_RenewDelegationToken] = "RenewDelegationToken", - [RD_KAFKAP_ExpireDelegationToken] = "ExpireDelegationToken", - [RD_KAFKAP_DescribeDelegationToken] = "DescribeDelegationToken", - [RD_KAFKAP_DeleteGroups] = "DeleteGroups", - [RD_KAFKAP_ElectLeaders] = "ElectLeadersRequest", - [RD_KAFKAP_IncrementalAlterConfigs] = + [RD_KAFKAP_Produce] = "Produce", + [RD_KAFKAP_Fetch] = "Fetch", + [RD_KAFKAP_ListOffsets] = "ListOffsets", + [RD_KAFKAP_Metadata] = "Metadata", + [RD_KAFKAP_LeaderAndIsr] = "LeaderAndIsr", + [RD_KAFKAP_StopReplica] = "StopReplica", + [RD_KAFKAP_UpdateMetadata] = "UpdateMetadata", + [RD_KAFKAP_ControlledShutdown] = "ControlledShutdown", + [RD_KAFKAP_OffsetCommit] = "OffsetCommit", + [RD_KAFKAP_OffsetFetch] = "OffsetFetch", + [RD_KAFKAP_FindCoordinator] = "FindCoordinator", + [RD_KAFKAP_JoinGroup] = "JoinGroup", + [RD_KAFKAP_Heartbeat] = "Heartbeat", + [RD_KAFKAP_LeaveGroup] = "LeaveGroup", + [RD_KAFKAP_SyncGroup] = "SyncGroup", + [RD_KAFKAP_DescribeGroups] = "DescribeGroups", + [RD_KAFKAP_ListGroups] = "ListGroups", + [RD_KAFKAP_SaslHandshake] = "SaslHandshake", + [RD_KAFKAP_ApiVersion] = "ApiVersion", + [RD_KAFKAP_CreateTopics] = "CreateTopics", + [RD_KAFKAP_DeleteTopics] = "DeleteTopics", + [RD_KAFKAP_DeleteRecords] = "DeleteRecords", + [RD_KAFKAP_InitProducerId] = "InitProducerId", + [RD_KAFKAP_OffsetForLeaderEpoch] = "OffsetForLeaderEpoch", + [RD_KAFKAP_AddPartitionsToTxn] = "AddPartitionsToTxn", + [RD_KAFKAP_AddOffsetsToTxn] = "AddOffsetsToTxn", + [RD_KAFKAP_EndTxn] = "EndTxn", + [RD_KAFKAP_WriteTxnMarkers] = "WriteTxnMarkers", + [RD_KAFKAP_TxnOffsetCommit] = "TxnOffsetCommit", + [RD_KAFKAP_DescribeAcls] = "DescribeAcls", + [RD_KAFKAP_CreateAcls] = "CreateAcls", + [RD_KAFKAP_DeleteAcls] = "DeleteAcls", + [RD_KAFKAP_DescribeConfigs] = "DescribeConfigs", + [RD_KAFKAP_AlterConfigs] = "AlterConfigs", + [RD_KAFKAP_AlterReplicaLogDirs] = "AlterReplicaLogDirs", + [RD_KAFKAP_DescribeLogDirs] = "DescribeLogDirs", + [RD_KAFKAP_SaslAuthenticate] = "SaslAuthenticate", + [RD_KAFKAP_CreatePartitions] = "CreatePartitions", + [RD_KAFKAP_CreateDelegationToken] = "CreateDelegationToken", + [RD_KAFKAP_RenewDelegationToken] = "RenewDelegationToken", + [RD_KAFKAP_ExpireDelegationToken] = "ExpireDelegationToken", + [RD_KAFKAP_DescribeDelegationToken] = "DescribeDelegationToken", + [RD_KAFKAP_DeleteGroups] = "DeleteGroups", + [RD_KAFKAP_ElectLeaders] = "ElectLeadersRequest", + [RD_KAFKAP_IncrementalAlterConfigs] = "IncrementalAlterConfigsRequest", - [RD_KAFKAP_AlterPartitionReassignments] = + [RD_KAFKAP_AlterPartitionReassignments] = "AlterPartitionReassignmentsRequest", - [RD_KAFKAP_ListPartitionReassignments] = + [RD_KAFKAP_ListPartitionReassignments] = "ListPartitionReassignmentsRequest", - [RD_KAFKAP_OffsetDelete] = "OffsetDeleteRequest", - [RD_KAFKAP_DescribeClientQuotas] = - "DescribeClientQuotasRequest", - [RD_KAFKAP_AlterClientQuotas] = - "AlterClientQuotasRequest", - [RD_KAFKAP_DescribeUserScramCredentials] = + [RD_KAFKAP_OffsetDelete] = "OffsetDeleteRequest", + [RD_KAFKAP_DescribeClientQuotas] = "DescribeClientQuotasRequest", + [RD_KAFKAP_AlterClientQuotas] = "AlterClientQuotasRequest", + [RD_KAFKAP_DescribeUserScramCredentials] = "DescribeUserScramCredentialsRequest", - [RD_KAFKAP_AlterUserScramCredentials] = + [RD_KAFKAP_AlterUserScramCredentials] = "AlterUserScramCredentialsRequest", - [RD_KAFKAP_Vote] = "VoteRequest", - [RD_KAFKAP_BeginQuorumEpoch] = "BeginQuorumEpochRequest", - [RD_KAFKAP_EndQuorumEpoch] = "EndQuorumEpochRequest", - [RD_KAFKAP_DescribeQuorum] = "DescribeQuorumRequest", - [RD_KAFKAP_AlterIsr] = "AlterIsrRequest", - [RD_KAFKAP_UpdateFeatures] = "UpdateFeaturesRequest", - [RD_KAFKAP_Envelope] = "EnvelopeRequest", + [RD_KAFKAP_Vote] = "VoteRequest", + [RD_KAFKAP_BeginQuorumEpoch] = "BeginQuorumEpochRequest", + [RD_KAFKAP_EndQuorumEpoch] = "EndQuorumEpochRequest", + [RD_KAFKAP_DescribeQuorum] = "DescribeQuorumRequest", + [RD_KAFKAP_AlterIsr] = "AlterIsrRequest", + [RD_KAFKAP_UpdateFeatures] = "UpdateFeaturesRequest", + [RD_KAFKAP_Envelope] = "EnvelopeRequest", }; static RD_TLS char ret[64]; @@ -176,29 +173,24 @@ const char *rd_kafka_ApiKey2str (int16_t ApiKey) { - - - - - /** * @brief ApiKey version support tuple. */ struct rd_kafka_ApiVersion { - int16_t ApiKey; - int16_t MinVer; - int16_t MaxVer; + int16_t ApiKey; + int16_t MinVer; + int16_t MaxVer; }; /** * @brief ApiVersion.ApiKey comparator. */ -static RD_UNUSED -int rd_kafka_ApiVersion_key_cmp (const void *_a, const void *_b) { +static RD_UNUSED int rd_kafka_ApiVersion_key_cmp(const void *_a, + const void *_b) { const struct rd_kafka_ApiVersion *a = - (const struct rd_kafka_ApiVersion *)_a; + (const struct rd_kafka_ApiVersion *)_a; const struct rd_kafka_ApiVersion *b = - (const struct rd_kafka_ApiVersion *)_b; + (const struct rd_kafka_ApiVersion *)_b; return RD_CMP(a->ApiKey, b->ApiKey); } @@ -206,12 +198,12 @@ int rd_kafka_ApiVersion_key_cmp (const void *_a, const void *_b) { typedef enum { RD_KAFKA_READ_UNCOMMITTED = 0, - RD_KAFKA_READ_COMMITTED = 1 + RD_KAFKA_READ_COMMITTED = 1 } rd_kafka_isolation_level_t; -#define RD_KAFKA_CTRL_MSG_ABORT 0 +#define RD_KAFKA_CTRL_MSG_ABORT 0 #define RD_KAFKA_CTRL_MSG_COMMIT 1 @@ -220,7 +212,7 @@ typedef enum { */ typedef enum rd_kafka_coordtype_t { RD_KAFKA_COORD_GROUP = 0, - RD_KAFKA_COORD_TXN = 1 + RD_KAFKA_COORD_TXN = 1 } rd_kafka_coordtype_t; @@ -233,14 +225,14 @@ typedef enum rd_kafka_coordtype_t { * */ typedef struct rd_kafkap_str_s { - /* convenience header (aligned access, host endian) */ - int len; /* Kafka string length (-1=NULL, 0=empty, >0=string) */ - const char *str; /* points into data[] or other memory, - * not NULL-terminated */ + /* convenience header (aligned access, host endian) */ + int len; /* Kafka string length (-1=NULL, 0=empty, >0=string) */ + const char *str; /* points into data[] or other memory, + * not NULL-terminated */ } rd_kafkap_str_t; -#define RD_KAFKAP_STR_LEN_NULL -1 +#define RD_KAFKAP_STR_LEN_NULL -1 #define RD_KAFKAP_STR_IS_NULL(kstr) ((kstr)->len == RD_KAFKAP_STR_LEN_NULL) /* Returns the length of the string of a kafka protocol string representation */ @@ -253,32 +245,33 @@ typedef struct rd_kafkap_str_s { /** @returns true if kstr is pre-serialized through .._new() */ -#define RD_KAFKAP_STR_IS_SERIALIZED(kstr) \ - (((const char *)((kstr)+1))+2 == (const char *)((kstr)->str)) +#define RD_KAFKAP_STR_IS_SERIALIZED(kstr) \ + (((const char *)((kstr) + 1)) + 2 == (const char *)((kstr)->str)) /* Serialized Kafka string: only works for _new() kstrs. * Check with RD_KAFKAP_STR_IS_SERIALIZED */ -#define RD_KAFKAP_STR_SER(kstr) ((kstr)+1) +#define RD_KAFKAP_STR_SER(kstr) ((kstr) + 1) /* Macro suitable for "%.*s" printing. */ -#define RD_KAFKAP_STR_PR(kstr) \ - (int)((kstr)->len == RD_KAFKAP_STR_LEN_NULL ? 0 : (kstr)->len), \ - (kstr)->str +#define RD_KAFKAP_STR_PR(kstr) \ + (int)((kstr)->len == RD_KAFKAP_STR_LEN_NULL ? 0 : (kstr)->len), \ + (kstr)->str /* strndupa() a Kafka string */ -#define RD_KAFKAP_STR_DUPA(destptr,kstr) \ - rd_strndupa((destptr), (kstr)->str, RD_KAFKAP_STR_LEN(kstr)) +#define RD_KAFKAP_STR_DUPA(destptr, kstr) \ + rd_strndupa((destptr), (kstr)->str, RD_KAFKAP_STR_LEN(kstr)) /* strndup() a Kafka string */ #define RD_KAFKAP_STR_DUP(kstr) rd_strndup((kstr)->str, RD_KAFKAP_STR_LEN(kstr)) -#define RD_KAFKAP_STR_INITIALIZER { .len = RD_KAFKAP_STR_LEN_NULL, .str = NULL } +#define RD_KAFKAP_STR_INITIALIZER \ + { .len = RD_KAFKAP_STR_LEN_NULL, .str = NULL } /** * Frees a Kafka string previously allocated with `rd_kafkap_str_new()` */ -static RD_UNUSED void rd_kafkap_str_destroy (rd_kafkap_str_t *kstr) { - rd_free(kstr); +static RD_UNUSED void rd_kafkap_str_destroy(rd_kafkap_str_t *kstr) { + rd_free(kstr); } @@ -290,34 +283,34 @@ static RD_UNUSED void rd_kafkap_str_destroy (rd_kafkap_str_t *kstr) { * Nul-terminates the string, but the trailing \0 is not part of * the serialized string. */ -static RD_INLINE RD_UNUSED -rd_kafkap_str_t *rd_kafkap_str_new (const char *str, int len) { - rd_kafkap_str_t *kstr; - int16_t klen; - - if (!str) - len = RD_KAFKAP_STR_LEN_NULL; - else if (len == -1) - len = (int)strlen(str); - - kstr = (rd_kafkap_str_t *)rd_malloc(sizeof(*kstr) + 2 + - (len == RD_KAFKAP_STR_LEN_NULL ? 0 : len + 1)); - kstr->len = len; - - /* Serialised format: 16-bit string length */ - klen = htobe16(len); - memcpy(kstr+1, &klen, 2); - - /* Pre-Serialised format: non null-terminated string */ - if (len == RD_KAFKAP_STR_LEN_NULL) - kstr->str = NULL; - else { - kstr->str = ((const char *)(kstr+1))+2; - memcpy((void *)kstr->str, str, len); - ((char *)kstr->str)[len] = '\0'; - } - - return kstr; +static RD_INLINE RD_UNUSED rd_kafkap_str_t *rd_kafkap_str_new(const char *str, + int len) { + rd_kafkap_str_t *kstr; + int16_t klen; + + if (!str) + len = RD_KAFKAP_STR_LEN_NULL; + else if (len == -1) + len = (int)strlen(str); + + kstr = (rd_kafkap_str_t *)rd_malloc( + sizeof(*kstr) + 2 + (len == RD_KAFKAP_STR_LEN_NULL ? 0 : len + 1)); + kstr->len = len; + + /* Serialised format: 16-bit string length */ + klen = htobe16(len); + memcpy(kstr + 1, &klen, 2); + + /* Pre-Serialised format: non null-terminated string */ + if (len == RD_KAFKAP_STR_LEN_NULL) + kstr->str = NULL; + else { + kstr->str = ((const char *)(kstr + 1)) + 2; + memcpy((void *)kstr->str, str, len); + ((char *)kstr->str)[len] = '\0'; + } + + return kstr; } @@ -325,40 +318,40 @@ rd_kafkap_str_t *rd_kafkap_str_new (const char *str, int len) { * Makes a copy of `src`. The copy will be fully allocated and should * be freed with rd_kafka_pstr_destroy() */ -static RD_INLINE RD_UNUSED -rd_kafkap_str_t *rd_kafkap_str_copy (const rd_kafkap_str_t *src) { +static RD_INLINE RD_UNUSED rd_kafkap_str_t * +rd_kafkap_str_copy(const rd_kafkap_str_t *src) { return rd_kafkap_str_new(src->str, src->len); } -static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp (const rd_kafkap_str_t *a, - const rd_kafkap_str_t *b) { - int minlen = RD_MIN(a->len, b->len); - int r = memcmp(a->str, b->str, minlen); - if (r) - return r; - else +static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp(const rd_kafkap_str_t *a, + const rd_kafkap_str_t *b) { + int minlen = RD_MIN(a->len, b->len); + int r = memcmp(a->str, b->str, minlen); + if (r) + return r; + else return RD_CMP(a->len, b->len); } -static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp_str (const rd_kafkap_str_t *a, - const char *str) { - int len = (int)strlen(str); - int minlen = RD_MIN(a->len, len); - int r = memcmp(a->str, str, minlen); - if (r) - return r; - else +static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp_str(const rd_kafkap_str_t *a, + const char *str) { + int len = (int)strlen(str); + int minlen = RD_MIN(a->len, len); + int r = memcmp(a->str, str, minlen); + if (r) + return r; + else return RD_CMP(a->len, len); } -static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp_str2 (const char *str, - const rd_kafkap_str_t *b){ - int len = (int)strlen(str); - int minlen = RD_MIN(b->len, len); - int r = memcmp(str, b->str, minlen); - if (r) - return r; - else +static RD_INLINE RD_UNUSED int +rd_kafkap_str_cmp_str2(const char *str, const rd_kafkap_str_t *b) { + int len = (int)strlen(str); + int minlen = RD_MIN(b->len, len); + int r = memcmp(str, b->str, minlen); + if (r) + return r; + else return RD_CMP(len, b->len); } @@ -373,39 +366,40 @@ static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp_str2 (const char *str, * */ typedef struct rd_kafkap_bytes_s { - /* convenience header (aligned access, host endian) */ - int32_t len; /* Kafka bytes length (-1=NULL, 0=empty, >0=data) */ - const void *data; /* points just past the struct, or other memory, - * not NULL-terminated */ - const char _data[1]; /* Bytes following struct when new()ed */ + /* convenience header (aligned access, host endian) */ + int32_t len; /* Kafka bytes length (-1=NULL, 0=empty, >0=data) */ + const void *data; /* points just past the struct, or other memory, + * not NULL-terminated */ + const char _data[1]; /* Bytes following struct when new()ed */ } rd_kafkap_bytes_t; #define RD_KAFKAP_BYTES_LEN_NULL -1 -#define RD_KAFKAP_BYTES_IS_NULL(kbytes) \ - ((kbytes)->len == RD_KAFKAP_BYTES_LEN_NULL) +#define RD_KAFKAP_BYTES_IS_NULL(kbytes) \ + ((kbytes)->len == RD_KAFKAP_BYTES_LEN_NULL) /* Returns the length of the bytes of a kafka protocol bytes representation */ -#define RD_KAFKAP_BYTES_LEN0(len) ((len) == RD_KAFKAP_BYTES_LEN_NULL ? 0:(len)) +#define RD_KAFKAP_BYTES_LEN0(len) \ + ((len) == RD_KAFKAP_BYTES_LEN_NULL ? 0 : (len)) #define RD_KAFKAP_BYTES_LEN(kbytes) RD_KAFKAP_BYTES_LEN0((kbytes)->len) /* Returns the actual size of a kafka protocol bytes representation. */ -#define RD_KAFKAP_BYTES_SIZE0(len) (4 + RD_KAFKAP_BYTES_LEN0(len)) +#define RD_KAFKAP_BYTES_SIZE0(len) (4 + RD_KAFKAP_BYTES_LEN0(len)) #define RD_KAFKAP_BYTES_SIZE(kbytes) RD_KAFKAP_BYTES_SIZE0((kbytes)->len) /** @returns true if kbyes is pre-serialized through .._new() */ -#define RD_KAFKAP_BYTES_IS_SERIALIZED(kstr) \ - (((const char *)((kbytes)+1))+2 == (const char *)((kbytes)->data)) +#define RD_KAFKAP_BYTES_IS_SERIALIZED(kstr) \ + (((const char *)((kbytes) + 1)) + 2 == (const char *)((kbytes)->data)) /* Serialized Kafka bytes: only works for _new() kbytes */ -#define RD_KAFKAP_BYTES_SER(kbytes) ((kbytes)+1) +#define RD_KAFKAP_BYTES_SER(kbytes) ((kbytes) + 1) /** * Frees a Kafka bytes previously allocated with `rd_kafkap_bytes_new()` */ -static RD_UNUSED void rd_kafkap_bytes_destroy (rd_kafkap_bytes_t *kbytes) { - rd_free(kbytes); +static RD_UNUSED void rd_kafkap_bytes_destroy(rd_kafkap_bytes_t *kbytes) { + rd_free(kbytes); } @@ -420,30 +414,30 @@ static RD_UNUSED void rd_kafkap_bytes_destroy (rd_kafkap_bytes_t *kbytes) { * - Copy data (bytes!=NULL,len>0) * - No-copy, just alloc (bytes==NULL,len>0) */ -static RD_INLINE RD_UNUSED -rd_kafkap_bytes_t *rd_kafkap_bytes_new (const char *bytes, int32_t len) { - rd_kafkap_bytes_t *kbytes; - int32_t klen; +static RD_INLINE RD_UNUSED rd_kafkap_bytes_t * +rd_kafkap_bytes_new(const char *bytes, int32_t len) { + rd_kafkap_bytes_t *kbytes; + int32_t klen; - if (!bytes && !len) - len = RD_KAFKAP_BYTES_LEN_NULL; + if (!bytes && !len) + len = RD_KAFKAP_BYTES_LEN_NULL; - kbytes = (rd_kafkap_bytes_t *)rd_malloc(sizeof(*kbytes) + 4 + - (len == RD_KAFKAP_BYTES_LEN_NULL ? 0 : len)); - kbytes->len = len; + kbytes = (rd_kafkap_bytes_t *)rd_malloc( + sizeof(*kbytes) + 4 + (len == RD_KAFKAP_BYTES_LEN_NULL ? 0 : len)); + kbytes->len = len; - klen = htobe32(len); - memcpy((void *)(kbytes+1), &klen, 4); + klen = htobe32(len); + memcpy((void *)(kbytes + 1), &klen, 4); - if (len == RD_KAFKAP_BYTES_LEN_NULL) - kbytes->data = NULL; - else { - kbytes->data = ((const char *)(kbytes+1))+4; + if (len == RD_KAFKAP_BYTES_LEN_NULL) + kbytes->data = NULL; + else { + kbytes->data = ((const char *)(kbytes + 1)) + 4; if (bytes) memcpy((void *)kbytes->data, bytes, len); - } + } - return kbytes; + return kbytes; } @@ -451,42 +445,40 @@ rd_kafkap_bytes_t *rd_kafkap_bytes_new (const char *bytes, int32_t len) { * Makes a copy of `src`. The copy will be fully allocated and should * be freed with rd_kafkap_bytes_destroy() */ -static RD_INLINE RD_UNUSED -rd_kafkap_bytes_t *rd_kafkap_bytes_copy (const rd_kafkap_bytes_t *src) { - return rd_kafkap_bytes_new( - (const char *)src->data, src->len); +static RD_INLINE RD_UNUSED rd_kafkap_bytes_t * +rd_kafkap_bytes_copy(const rd_kafkap_bytes_t *src) { + return rd_kafkap_bytes_new((const char *)src->data, src->len); } -static RD_INLINE RD_UNUSED int rd_kafkap_bytes_cmp (const rd_kafkap_bytes_t *a, - const rd_kafkap_bytes_t *b) { - int minlen = RD_MIN(a->len, b->len); - int r = memcmp(a->data, b->data, minlen); - if (r) - return r; - else +static RD_INLINE RD_UNUSED int rd_kafkap_bytes_cmp(const rd_kafkap_bytes_t *a, + const rd_kafkap_bytes_t *b) { + int minlen = RD_MIN(a->len, b->len); + int r = memcmp(a->data, b->data, minlen); + if (r) + return r; + else return RD_CMP(a->len, b->len); } -static RD_INLINE RD_UNUSED -int rd_kafkap_bytes_cmp_data (const rd_kafkap_bytes_t *a, - const char *data, int len) { - int minlen = RD_MIN(a->len, len); - int r = memcmp(a->data, data, minlen); - if (r) - return r; - else +static RD_INLINE RD_UNUSED int +rd_kafkap_bytes_cmp_data(const rd_kafkap_bytes_t *a, + const char *data, + int len) { + int minlen = RD_MIN(a->len, len); + int r = memcmp(a->data, data, minlen); + if (r) + return r; + else return RD_CMP(a->len, len); } - typedef struct rd_kafka_buf_s rd_kafka_buf_t; -#define RD_KAFKA_NODENAME_SIZE 256 - +#define RD_KAFKA_NODENAME_SIZE 256 @@ -498,55 +490,39 @@ typedef struct rd_kafka_buf_s rd_kafka_buf_t; * MsgVersion v0..v1 */ /* Offset + MessageSize */ -#define RD_KAFKAP_MESSAGESET_V0_HDR_SIZE (8+4) +#define RD_KAFKAP_MESSAGESET_V0_HDR_SIZE (8 + 4) /* CRC + Magic + Attr + KeyLen + ValueLen */ -#define RD_KAFKAP_MESSAGE_V0_HDR_SIZE (4+1+1+4+4) +#define RD_KAFKAP_MESSAGE_V0_HDR_SIZE (4 + 1 + 1 + 4 + 4) /* CRC + Magic + Attr + Timestamp + KeyLen + ValueLen */ -#define RD_KAFKAP_MESSAGE_V1_HDR_SIZE (4+1+1+8+4+4) +#define RD_KAFKAP_MESSAGE_V1_HDR_SIZE (4 + 1 + 1 + 8 + 4 + 4) /* Maximum per-message overhead */ -#define RD_KAFKAP_MESSAGE_V0_OVERHEAD \ +#define RD_KAFKAP_MESSAGE_V0_OVERHEAD \ (RD_KAFKAP_MESSAGESET_V0_HDR_SIZE + RD_KAFKAP_MESSAGE_V0_HDR_SIZE) -#define RD_KAFKAP_MESSAGE_V1_OVERHEAD \ +#define RD_KAFKAP_MESSAGE_V1_OVERHEAD \ (RD_KAFKAP_MESSAGESET_V0_HDR_SIZE + RD_KAFKAP_MESSAGE_V1_HDR_SIZE) /** * MsgVersion v2 */ -#define RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD \ - ( \ - /* Length (varint) */ \ - RD_UVARINT_ENC_SIZEOF(int32_t) + \ - /* Attributes */ \ - 1 + \ - /* TimestampDelta (varint) */ \ - RD_UVARINT_ENC_SIZEOF(int64_t) + \ - /* OffsetDelta (varint) */ \ - RD_UVARINT_ENC_SIZEOF(int32_t) + \ - /* KeyLen (varint) */ \ - RD_UVARINT_ENC_SIZEOF(int32_t) + \ - /* ValueLen (varint) */ \ - RD_UVARINT_ENC_SIZEOF(int32_t) + \ - /* HeaderCnt (varint): */ \ - RD_UVARINT_ENC_SIZEOF(int32_t) \ - ) - -#define RD_KAFKAP_MESSAGE_V2_MIN_OVERHEAD \ - ( \ - /* Length (varint) */ \ - RD_UVARINT_ENC_SIZE_0() + \ - /* Attributes */ \ - 1 + \ - /* TimestampDelta (varint) */ \ - RD_UVARINT_ENC_SIZE_0() + \ - /* OffsetDelta (varint) */ \ - RD_UVARINT_ENC_SIZE_0() + \ - /* KeyLen (varint) */ \ - RD_UVARINT_ENC_SIZE_0() + \ - /* ValueLen (varint) */ \ - RD_UVARINT_ENC_SIZE_0() + \ - /* HeaderCnt (varint): */ \ - RD_UVARINT_ENC_SIZE_0() \ - ) +#define RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD \ + ( /* Length (varint) */ \ + RD_UVARINT_ENC_SIZEOF(int32_t) + /* Attributes */ \ + 1 + /* TimestampDelta (varint) */ \ + RD_UVARINT_ENC_SIZEOF(int64_t) + /* OffsetDelta (varint) */ \ + RD_UVARINT_ENC_SIZEOF(int32_t) + /* KeyLen (varint) */ \ + RD_UVARINT_ENC_SIZEOF(int32_t) + /* ValueLen (varint) */ \ + RD_UVARINT_ENC_SIZEOF(int32_t) + /* HeaderCnt (varint): */ \ + RD_UVARINT_ENC_SIZEOF(int32_t)) + +#define RD_KAFKAP_MESSAGE_V2_MIN_OVERHEAD \ + ( /* Length (varint) */ \ + RD_UVARINT_ENC_SIZE_0() + /* Attributes */ \ + 1 + /* TimestampDelta (varint) */ \ + RD_UVARINT_ENC_SIZE_0() + /* OffsetDelta (varint) */ \ + RD_UVARINT_ENC_SIZE_0() + /* KeyLen (varint) */ \ + RD_UVARINT_ENC_SIZE_0() + /* ValueLen (varint) */ \ + RD_UVARINT_ENC_SIZE_0() + /* HeaderCnt (varint): */ \ + RD_UVARINT_ENC_SIZE_0()) /** @@ -558,21 +534,23 @@ typedef struct rd_kafka_buf_s rd_kafka_buf_t; */ /* Old MessageSet header: none */ -#define RD_KAFKAP_MSGSET_V0_SIZE 0 +#define RD_KAFKAP_MSGSET_V0_SIZE 0 /* MessageSet v2 header */ -#define RD_KAFKAP_MSGSET_V2_SIZE (8+4+4+1+4+2+4+8+8+8+2+4+4) +#define RD_KAFKAP_MSGSET_V2_SIZE \ + (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2 + 4 + 4) /* Byte offsets for MessageSet fields */ -#define RD_KAFKAP_MSGSET_V2_OF_Length (8) -#define RD_KAFKAP_MSGSET_V2_OF_CRC (8+4+4+1) -#define RD_KAFKAP_MSGSET_V2_OF_Attributes (8+4+4+1+4) -#define RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta (8+4+4+1+4+2) -#define RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp (8+4+4+1+4+2+4) -#define RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp (8+4+4+1+4+2+4+8) -#define RD_KAFKAP_MSGSET_V2_OF_BaseSequence (8+4+4+1+4+2+4+8+8+8+2) -#define RD_KAFKAP_MSGSET_V2_OF_RecordCount (8+4+4+1+4+2+4+8+8+8+2+4) - +#define RD_KAFKAP_MSGSET_V2_OF_Length (8) +#define RD_KAFKAP_MSGSET_V2_OF_CRC (8 + 4 + 4 + 1) +#define RD_KAFKAP_MSGSET_V2_OF_Attributes (8 + 4 + 4 + 1 + 4) +#define RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta (8 + 4 + 4 + 1 + 4 + 2) +#define RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp (8 + 4 + 4 + 1 + 4 + 2 + 4) +#define RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8) +#define RD_KAFKAP_MSGSET_V2_OF_BaseSequence \ + (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2) +#define RD_KAFKAP_MSGSET_V2_OF_RecordCount \ + (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2 + 4) @@ -586,11 +564,12 @@ typedef struct rd_kafka_buf_s rd_kafka_buf_t; * @brief Producer ID and Epoch */ typedef struct rd_kafka_pid_s { - int64_t id; /**< Producer Id */ - int16_t epoch; /**< Producer Epoch */ + int64_t id; /**< Producer Id */ + int16_t epoch; /**< Producer Epoch */ } rd_kafka_pid_t; -#define RD_KAFKA_PID_INITIALIZER {-1,-1} +#define RD_KAFKA_PID_INITIALIZER \ + { -1, -1 } /** * @returns true if \p PID is valid @@ -600,15 +579,15 @@ typedef struct rd_kafka_pid_s { /** * @brief Check two pids for equality */ -static RD_UNUSED RD_INLINE int rd_kafka_pid_eq (const rd_kafka_pid_t a, - const rd_kafka_pid_t b) { +static RD_UNUSED RD_INLINE int rd_kafka_pid_eq(const rd_kafka_pid_t a, + const rd_kafka_pid_t b) { return a.id == b.id && a.epoch == b.epoch; } /** * @brief Pid+epoch comparator */ -static RD_UNUSED int rd_kafka_pid_cmp (const void *_a, const void *_b) { +static RD_UNUSED int rd_kafka_pid_cmp(const void *_a, const void *_b) { const rd_kafka_pid_t *a = _a, *b = _b; if (a->id < b->id) @@ -623,7 +602,7 @@ static RD_UNUSED int rd_kafka_pid_cmp (const void *_a, const void *_b) { /** * @brief Pid (not epoch) comparator */ -static RD_UNUSED int rd_kafka_pid_cmp_pid (const void *_a, const void *_b) { +static RD_UNUSED int rd_kafka_pid_cmp_pid(const void *_a, const void *_b) { const rd_kafka_pid_t *a = _a, *b = _b; if (a->id < b->id) @@ -639,8 +618,7 @@ static RD_UNUSED int rd_kafka_pid_cmp_pid (const void *_a, const void *_b) { * @returns the string representation of a PID in a thread-safe * static buffer. */ -static RD_UNUSED const char * -rd_kafka_pid2str (const rd_kafka_pid_t pid) { +static RD_UNUSED const char *rd_kafka_pid2str(const rd_kafka_pid_t pid) { static RD_TLS char buf[2][64]; static RD_TLS int i; @@ -649,8 +627,8 @@ rd_kafka_pid2str (const rd_kafka_pid_t pid) { i = (i + 1) % 2; - rd_snprintf(buf[i], sizeof(buf[i]), - "PID{Id:%" PRId64",Epoch:%hd}", pid.id, pid.epoch); + rd_snprintf(buf[i], sizeof(buf[i]), "PID{Id:%" PRId64 ",Epoch:%hd}", + pid.id, pid.epoch); return buf[i]; } @@ -658,8 +636,8 @@ rd_kafka_pid2str (const rd_kafka_pid_t pid) { /** * @brief Reset the PID to invalid/init state */ -static RD_UNUSED RD_INLINE void rd_kafka_pid_reset (rd_kafka_pid_t *pid) { - pid->id = -1; +static RD_UNUSED RD_INLINE void rd_kafka_pid_reset(rd_kafka_pid_t *pid) { + pid->id = -1; pid->epoch = -1; } @@ -668,10 +646,9 @@ static RD_UNUSED RD_INLINE void rd_kafka_pid_reset (rd_kafka_pid_t *pid) { * @brief Bump the epoch of a valid PID */ static RD_UNUSED RD_INLINE rd_kafka_pid_t -rd_kafka_pid_bump (const rd_kafka_pid_t old) { +rd_kafka_pid_bump(const rd_kafka_pid_t old) { rd_kafka_pid_t new_pid = { - old.id, - (int16_t)(((int)old.epoch + 1) & (int)INT16_MAX) }; + old.id, (int16_t)(((int)old.epoch + 1) & (int)INT16_MAX)}; return new_pid; } diff --git a/src/rdkafka_protocol.h b/src/rdkafka_protocol.h index 53c8b28cf1..aa9db5392b 100644 --- a/src/rdkafka_protocol.h +++ b/src/rdkafka_protocol.h @@ -36,7 +36,7 @@ * to C and C++ test code in tests/. */ -#define RD_KAFKA_PORT 9092 +#define RD_KAFKA_PORT 9092 #define RD_KAFKA_PORT_STR "9092" @@ -45,67 +45,67 @@ * * Generate updates to this list with generate_proto.sh. */ -#define RD_KAFKAP_None -1 -#define RD_KAFKAP_Produce 0 -#define RD_KAFKAP_Fetch 1 -#define RD_KAFKAP_ListOffsets 2 -#define RD_KAFKAP_Metadata 3 -#define RD_KAFKAP_LeaderAndIsr 4 -#define RD_KAFKAP_StopReplica 5 -#define RD_KAFKAP_UpdateMetadata 6 -#define RD_KAFKAP_ControlledShutdown 7 -#define RD_KAFKAP_OffsetCommit 8 -#define RD_KAFKAP_OffsetFetch 9 -#define RD_KAFKAP_FindCoordinator 10 -#define RD_KAFKAP_JoinGroup 11 -#define RD_KAFKAP_Heartbeat 12 -#define RD_KAFKAP_LeaveGroup 13 -#define RD_KAFKAP_SyncGroup 14 -#define RD_KAFKAP_DescribeGroups 15 -#define RD_KAFKAP_ListGroups 16 -#define RD_KAFKAP_SaslHandshake 17 -#define RD_KAFKAP_ApiVersion 18 -#define RD_KAFKAP_CreateTopics 19 -#define RD_KAFKAP_DeleteTopics 20 -#define RD_KAFKAP_DeleteRecords 21 -#define RD_KAFKAP_InitProducerId 22 -#define RD_KAFKAP_OffsetForLeaderEpoch 23 -#define RD_KAFKAP_AddPartitionsToTxn 24 -#define RD_KAFKAP_AddOffsetsToTxn 25 -#define RD_KAFKAP_EndTxn 26 -#define RD_KAFKAP_WriteTxnMarkers 27 -#define RD_KAFKAP_TxnOffsetCommit 28 -#define RD_KAFKAP_DescribeAcls 29 -#define RD_KAFKAP_CreateAcls 30 -#define RD_KAFKAP_DeleteAcls 31 -#define RD_KAFKAP_DescribeConfigs 32 -#define RD_KAFKAP_AlterConfigs 33 -#define RD_KAFKAP_AlterReplicaLogDirs 34 -#define RD_KAFKAP_DescribeLogDirs 35 -#define RD_KAFKAP_SaslAuthenticate 36 -#define RD_KAFKAP_CreatePartitions 37 -#define RD_KAFKAP_CreateDelegationToken 38 -#define RD_KAFKAP_RenewDelegationToken 39 -#define RD_KAFKAP_ExpireDelegationToken 40 -#define RD_KAFKAP_DescribeDelegationToken 41 -#define RD_KAFKAP_DeleteGroups 42 -#define RD_KAFKAP_ElectLeaders 43 -#define RD_KAFKAP_IncrementalAlterConfigs 44 -#define RD_KAFKAP_AlterPartitionReassignments 45 -#define RD_KAFKAP_ListPartitionReassignments 46 -#define RD_KAFKAP_OffsetDelete 47 -#define RD_KAFKAP_DescribeClientQuotas 48 -#define RD_KAFKAP_AlterClientQuotas 49 +#define RD_KAFKAP_None -1 +#define RD_KAFKAP_Produce 0 +#define RD_KAFKAP_Fetch 1 +#define RD_KAFKAP_ListOffsets 2 +#define RD_KAFKAP_Metadata 3 +#define RD_KAFKAP_LeaderAndIsr 4 +#define RD_KAFKAP_StopReplica 5 +#define RD_KAFKAP_UpdateMetadata 6 +#define RD_KAFKAP_ControlledShutdown 7 +#define RD_KAFKAP_OffsetCommit 8 +#define RD_KAFKAP_OffsetFetch 9 +#define RD_KAFKAP_FindCoordinator 10 +#define RD_KAFKAP_JoinGroup 11 +#define RD_KAFKAP_Heartbeat 12 +#define RD_KAFKAP_LeaveGroup 13 +#define RD_KAFKAP_SyncGroup 14 +#define RD_KAFKAP_DescribeGroups 15 +#define RD_KAFKAP_ListGroups 16 +#define RD_KAFKAP_SaslHandshake 17 +#define RD_KAFKAP_ApiVersion 18 +#define RD_KAFKAP_CreateTopics 19 +#define RD_KAFKAP_DeleteTopics 20 +#define RD_KAFKAP_DeleteRecords 21 +#define RD_KAFKAP_InitProducerId 22 +#define RD_KAFKAP_OffsetForLeaderEpoch 23 +#define RD_KAFKAP_AddPartitionsToTxn 24 +#define RD_KAFKAP_AddOffsetsToTxn 25 +#define RD_KAFKAP_EndTxn 26 +#define RD_KAFKAP_WriteTxnMarkers 27 +#define RD_KAFKAP_TxnOffsetCommit 28 +#define RD_KAFKAP_DescribeAcls 29 +#define RD_KAFKAP_CreateAcls 30 +#define RD_KAFKAP_DeleteAcls 31 +#define RD_KAFKAP_DescribeConfigs 32 +#define RD_KAFKAP_AlterConfigs 33 +#define RD_KAFKAP_AlterReplicaLogDirs 34 +#define RD_KAFKAP_DescribeLogDirs 35 +#define RD_KAFKAP_SaslAuthenticate 36 +#define RD_KAFKAP_CreatePartitions 37 +#define RD_KAFKAP_CreateDelegationToken 38 +#define RD_KAFKAP_RenewDelegationToken 39 +#define RD_KAFKAP_ExpireDelegationToken 40 +#define RD_KAFKAP_DescribeDelegationToken 41 +#define RD_KAFKAP_DeleteGroups 42 +#define RD_KAFKAP_ElectLeaders 43 +#define RD_KAFKAP_IncrementalAlterConfigs 44 +#define RD_KAFKAP_AlterPartitionReassignments 45 +#define RD_KAFKAP_ListPartitionReassignments 46 +#define RD_KAFKAP_OffsetDelete 47 +#define RD_KAFKAP_DescribeClientQuotas 48 +#define RD_KAFKAP_AlterClientQuotas 49 #define RD_KAFKAP_DescribeUserScramCredentials 50 -#define RD_KAFKAP_AlterUserScramCredentials 51 -#define RD_KAFKAP_Vote 52 -#define RD_KAFKAP_BeginQuorumEpoch 53 -#define RD_KAFKAP_EndQuorumEpoch 54 -#define RD_KAFKAP_DescribeQuorum 55 -#define RD_KAFKAP_AlterIsr 56 -#define RD_KAFKAP_UpdateFeatures 57 -#define RD_KAFKAP_Envelope 58 -#define RD_KAFKAP__NUM 59 +#define RD_KAFKAP_AlterUserScramCredentials 51 +#define RD_KAFKAP_Vote 52 +#define RD_KAFKAP_BeginQuorumEpoch 53 +#define RD_KAFKAP_EndQuorumEpoch 54 +#define RD_KAFKAP_DescribeQuorum 55 +#define RD_KAFKAP_AlterIsr 56 +#define RD_KAFKAP_UpdateFeatures 57 +#define RD_KAFKAP_Envelope 58 +#define RD_KAFKAP__NUM 59 #endif /* _RDKAFKA_PROTOCOL_H_ */ diff --git a/src/rdkafka_queue.c b/src/rdkafka_queue.c index 9bb5bea94c..56ef13e45d 100644 --- a/src/rdkafka_queue.c +++ b/src/rdkafka_queue.c @@ -33,7 +33,7 @@ int RD_TLS rd_kafka_yield_thread = 0; -void rd_kafka_yield (rd_kafka_t *rk) { +void rd_kafka_yield(rd_kafka_t *rk) { rd_kafka_yield_thread = 1; } @@ -43,7 +43,7 @@ void rd_kafka_yield (rd_kafka_t *rk) { * @returns rd_true if caller should yield, otherwise rd_false. * @remarks rkq_lock MUST be held */ -static RD_INLINE rd_bool_t rd_kafka_q_check_yield (rd_kafka_q_t *rkq) { +static RD_INLINE rd_bool_t rd_kafka_q_check_yield(rd_kafka_q_t *rkq) { if (!(rkq->rkq_flags & RD_KAFKA_Q_F_YIELD)) return rd_false; @@ -53,24 +53,24 @@ static RD_INLINE rd_bool_t rd_kafka_q_check_yield (rd_kafka_q_t *rkq) { /** * Destroy a queue. refcnt must be at zero. */ -void rd_kafka_q_destroy_final (rd_kafka_q_t *rkq) { +void rd_kafka_q_destroy_final(rd_kafka_q_t *rkq) { mtx_lock(&rkq->rkq_lock); - if (unlikely(rkq->rkq_qio != NULL)) { - rd_free(rkq->rkq_qio); - rkq->rkq_qio = NULL; - } + if (unlikely(rkq->rkq_qio != NULL)) { + rd_free(rkq->rkq_qio); + rkq->rkq_qio = NULL; + } /* Queue must have been disabled prior to final destruction, * this is to catch the case where the queue owner/poll does not * use rd_kafka_q_destroy_owner(). */ rd_dassert(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY)); - rd_kafka_q_disable0(rkq, 0/*no-lock*/); /* for the non-devel case */ - rd_kafka_q_fwd_set0(rkq, NULL, 0/*no-lock*/, 0 /*no-fwd-app*/); - rd_kafka_q_purge0(rkq, 0/*no-lock*/); - assert(!rkq->rkq_fwdq); + rd_kafka_q_disable0(rkq, 0 /*no-lock*/); /* for the non-devel case */ + rd_kafka_q_fwd_set0(rkq, NULL, 0 /*no-lock*/, 0 /*no-fwd-app*/); + rd_kafka_q_purge0(rkq, 0 /*no-lock*/); + assert(!rkq->rkq_fwdq); mtx_unlock(&rkq->rkq_lock); - mtx_destroy(&rkq->rkq_lock); - cnd_destroy(&rkq->rkq_cond); + mtx_destroy(&rkq->rkq_lock); + cnd_destroy(&rkq->rkq_cond); if (rkq->rkq_flags & RD_KAFKA_Q_F_ALLOCATED) rd_free(rkq); @@ -81,18 +81,20 @@ void rd_kafka_q_destroy_final (rd_kafka_q_t *rkq) { /** * Initialize a queue. */ -void rd_kafka_q_init0 (rd_kafka_q_t *rkq, rd_kafka_t *rk, - const char *func, int line) { +void rd_kafka_q_init0(rd_kafka_q_t *rkq, + rd_kafka_t *rk, + const char *func, + int line) { rd_kafka_q_reset(rkq); - rkq->rkq_fwdq = NULL; + rkq->rkq_fwdq = NULL; rkq->rkq_refcnt = 1; rkq->rkq_flags = RD_KAFKA_Q_F_READY; rkq->rkq_rk = rk; - rkq->rkq_qio = NULL; + rkq->rkq_qio = NULL; rkq->rkq_serve = NULL; rkq->rkq_opaque = NULL; - mtx_init(&rkq->rkq_lock, mtx_plain); - cnd_init(&rkq->rkq_cond); + mtx_init(&rkq->rkq_lock, mtx_plain); + cnd_init(&rkq->rkq_cond); #if ENABLE_DEVEL rd_snprintf(rkq->rkq_name, sizeof(rkq->rkq_name), "%s:%d", func, line); #else @@ -104,14 +106,14 @@ void rd_kafka_q_init0 (rd_kafka_q_t *rkq, rd_kafka_t *rk, /** * Allocate a new queue and initialize it. */ -rd_kafka_q_t *rd_kafka_q_new0 (rd_kafka_t *rk, const char *func, int line) { +rd_kafka_q_t *rd_kafka_q_new0(rd_kafka_t *rk, const char *func, int line) { rd_kafka_q_t *rkq = rd_malloc(sizeof(*rkq)); rd_kafka_q_init(rkq, rk); rkq->rkq_flags |= RD_KAFKA_Q_F_ALLOCATED; #if ENABLE_DEVEL - rd_snprintf(rkq->rkq_name, sizeof(rkq->rkq_name), "%s:%d", func, line); + rd_snprintf(rkq->rkq_name, sizeof(rkq->rkq_name), "%s:%d", func, line); #else - rkq->rkq_name = func; + rkq->rkq_name = func; #endif return rkq; } @@ -124,29 +126,31 @@ rd_kafka_q_t *rd_kafka_q_new0 (rd_kafka_t *rk, const char *func, int line) { * * All access to rkq_fwdq are protected by rkq_lock. */ -void rd_kafka_q_fwd_set0 (rd_kafka_q_t *srcq, rd_kafka_q_t *destq, - int do_lock, int fwd_app) { +void rd_kafka_q_fwd_set0(rd_kafka_q_t *srcq, + rd_kafka_q_t *destq, + int do_lock, + int fwd_app) { if (do_lock) mtx_lock(&srcq->rkq_lock); if (fwd_app) srcq->rkq_flags |= RD_KAFKA_Q_F_FWD_APP; - if (srcq->rkq_fwdq) { - rd_kafka_q_destroy(srcq->rkq_fwdq); - srcq->rkq_fwdq = NULL; - } - if (destq) { - rd_kafka_q_keep(destq); - - /* If rkq has ops in queue, append them to fwdq's queue. - * This is an irreversible operation. */ + if (srcq->rkq_fwdq) { + rd_kafka_q_destroy(srcq->rkq_fwdq); + srcq->rkq_fwdq = NULL; + } + if (destq) { + rd_kafka_q_keep(destq); + + /* If rkq has ops in queue, append them to fwdq's queue. + * This is an irreversible operation. */ if (srcq->rkq_qlen > 0) { - rd_dassert(destq->rkq_flags & RD_KAFKA_Q_F_READY); - rd_kafka_q_concat(destq, srcq); - } + rd_dassert(destq->rkq_flags & RD_KAFKA_Q_F_READY); + rd_kafka_q_concat(destq, srcq); + } - srcq->rkq_fwdq = destq; - } + srcq->rkq_fwdq = destq; + } if (do_lock) mtx_unlock(&srcq->rkq_lock); } @@ -154,9 +158,9 @@ void rd_kafka_q_fwd_set0 (rd_kafka_q_t *srcq, rd_kafka_q_t *destq, /** * Purge all entries from a queue. */ -int rd_kafka_q_purge0 (rd_kafka_q_t *rkq, int do_lock) { - rd_kafka_op_t *rko, *next; - TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); +int rd_kafka_q_purge0(rd_kafka_q_t *rkq, int do_lock) { + rd_kafka_op_t *rko, *next; + TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); rd_kafka_q_t *fwdq; int cnt = 0; @@ -171,25 +175,25 @@ int rd_kafka_q_purge0 (rd_kafka_q_t *rkq, int do_lock) { return cnt; } - /* Move ops queue to tmpq to avoid lock-order issue - * by locks taken from rd_kafka_op_destroy(). */ - TAILQ_MOVE(&tmpq, &rkq->rkq_q, rko_link); + /* Move ops queue to tmpq to avoid lock-order issue + * by locks taken from rd_kafka_op_destroy(). */ + TAILQ_MOVE(&tmpq, &rkq->rkq_q, rko_link); rd_kafka_q_mark_served(rkq); - /* Zero out queue */ + /* Zero out queue */ rd_kafka_q_reset(rkq); if (do_lock) mtx_unlock(&rkq->rkq_lock); - /* Destroy the ops */ - next = TAILQ_FIRST(&tmpq); - while ((rko = next)) { - next = TAILQ_NEXT(next, rko_link); - rd_kafka_op_destroy(rko); + /* Destroy the ops */ + next = TAILQ_FIRST(&tmpq); + while ((rko = next)) { + next = TAILQ_NEXT(next, rko_link); + rd_kafka_op_destroy(rko); cnt++; - } + } return cnt; } @@ -200,15 +204,16 @@ int rd_kafka_q_purge0 (rd_kafka_q_t *rkq, int do_lock) { * This shaves off the head of the queue, up until the first rko with * a non-matching rktp or version. */ -void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq, - rd_kafka_toppar_t *rktp, int version) { - rd_kafka_op_t *rko, *next; - TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); - int32_t cnt = 0; +void rd_kafka_q_purge_toppar_version(rd_kafka_q_t *rkq, + rd_kafka_toppar_t *rktp, + int version) { + rd_kafka_op_t *rko, *next; + TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); + int32_t cnt = 0; int64_t size = 0; rd_kafka_q_t *fwdq; - mtx_lock(&rkq->rkq_lock); + mtx_lock(&rkq->rkq_lock); if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) { mtx_unlock(&rkq->rkq_lock); @@ -220,8 +225,7 @@ void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq, /* Move ops to temporary queue and then destroy them from there * without locks to avoid lock-ordering problems in op_destroy() */ while ((rko = TAILQ_FIRST(&rkq->rkq_q)) && rko->rko_rktp && - rko->rko_rktp == rktp && - rko->rko_version < version) { + rko->rko_rktp == rktp && rko->rko_version < version) { TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link); TAILQ_INSERT_TAIL(&tmpq, rko, rko_link); cnt++; @@ -232,13 +236,13 @@ void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq, rkq->rkq_qlen -= cnt; rkq->rkq_qsize -= size; - mtx_unlock(&rkq->rkq_lock); + mtx_unlock(&rkq->rkq_lock); - next = TAILQ_FIRST(&tmpq); - while ((rko = next)) { - next = TAILQ_NEXT(next, rko_link); - rd_kafka_op_destroy(rko); - } + next = TAILQ_FIRST(&tmpq); + while ((rko = next)) { + next = TAILQ_NEXT(next, rko_link); + rd_kafka_op_destroy(rko); + } } @@ -247,74 +251,73 @@ void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq, * If 'cnt' == -1 all entries will be moved. * Returns the number of entries moved. */ -int rd_kafka_q_move_cnt (rd_kafka_q_t *dstq, rd_kafka_q_t *srcq, - int cnt, int do_locks) { - rd_kafka_op_t *rko; +int rd_kafka_q_move_cnt(rd_kafka_q_t *dstq, + rd_kafka_q_t *srcq, + int cnt, + int do_locks) { + rd_kafka_op_t *rko; int mcnt = 0; if (do_locks) { - mtx_lock(&srcq->rkq_lock); - mtx_lock(&dstq->rkq_lock); - } - - if (!dstq->rkq_fwdq && !srcq->rkq_fwdq) { - if (cnt > 0 && dstq->rkq_qlen == 0) - rd_kafka_q_io_event(dstq); - - /* Optimization, if 'cnt' is equal/larger than all - * items of 'srcq' we can move the entire queue. */ - if (cnt == -1 || - cnt >= (int)srcq->rkq_qlen) { + mtx_lock(&srcq->rkq_lock); + mtx_lock(&dstq->rkq_lock); + } + + if (!dstq->rkq_fwdq && !srcq->rkq_fwdq) { + if (cnt > 0 && dstq->rkq_qlen == 0) + rd_kafka_q_io_event(dstq); + + /* Optimization, if 'cnt' is equal/larger than all + * items of 'srcq' we can move the entire queue. */ + if (cnt == -1 || cnt >= (int)srcq->rkq_qlen) { mcnt = srcq->rkq_qlen; - rd_kafka_q_concat0(dstq, srcq, 0/*no-lock*/); - } else { - while (mcnt < cnt && - (rko = TAILQ_FIRST(&srcq->rkq_q))) { - TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link); + rd_kafka_q_concat0(dstq, srcq, 0 /*no-lock*/); + } else { + while (mcnt < cnt && + (rko = TAILQ_FIRST(&srcq->rkq_q))) { + TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link); if (likely(!rko->rko_prio)) TAILQ_INSERT_TAIL(&dstq->rkq_q, rko, rko_link); else TAILQ_INSERT_SORTED( - &dstq->rkq_q, rko, - rd_kafka_op_t *, rko_link, - rd_kafka_op_cmp_prio); + &dstq->rkq_q, rko, rd_kafka_op_t *, + rko_link, rd_kafka_op_cmp_prio); srcq->rkq_qlen--; dstq->rkq_qlen++; srcq->rkq_qsize -= rko->rko_len; dstq->rkq_qsize += rko->rko_len; - mcnt++; - } - } + mcnt++; + } + } rd_kafka_q_mark_served(srcq); - } else - mcnt = rd_kafka_q_move_cnt(dstq->rkq_fwdq ? dstq->rkq_fwdq:dstq, - srcq->rkq_fwdq ? srcq->rkq_fwdq:srcq, - cnt, do_locks); + } else + mcnt = rd_kafka_q_move_cnt( + dstq->rkq_fwdq ? dstq->rkq_fwdq : dstq, + srcq->rkq_fwdq ? srcq->rkq_fwdq : srcq, cnt, do_locks); - if (do_locks) { - mtx_unlock(&dstq->rkq_lock); - mtx_unlock(&srcq->rkq_lock); - } + if (do_locks) { + mtx_unlock(&dstq->rkq_lock); + mtx_unlock(&srcq->rkq_lock); + } - return mcnt; + return mcnt; } /** * Filters out outdated ops. */ -static RD_INLINE rd_kafka_op_t *rd_kafka_op_filter (rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, - int version) { +static RD_INLINE rd_kafka_op_t * +rd_kafka_op_filter(rd_kafka_q_t *rkq, rd_kafka_op_t *rko, int version) { if (unlikely(!rko)) return NULL; if (unlikely(rd_kafka_op_version_outdated(rko, version))) { - rd_kafka_q_deq0(rkq, rko); + rd_kafka_q_deq0(rkq, rko); rd_kafka_op_destroy(rko); return NULL; } @@ -339,17 +342,18 @@ static RD_INLINE rd_kafka_op_t *rd_kafka_op_filter (rd_kafka_q_t *rkq, * * Locality: any thread */ -rd_kafka_op_t *rd_kafka_q_pop_serve (rd_kafka_q_t *rkq, rd_ts_t timeout_us, - int32_t version, - rd_kafka_q_cb_type_t cb_type, - rd_kafka_q_serve_cb_t *callback, - void *opaque) { - rd_kafka_op_t *rko; +rd_kafka_op_t *rd_kafka_q_pop_serve(rd_kafka_q_t *rkq, + rd_ts_t timeout_us, + int32_t version, + rd_kafka_q_cb_type_t cb_type, + rd_kafka_q_serve_cb_t *callback, + void *opaque) { + rd_kafka_op_t *rko; rd_kafka_q_t *fwdq; rd_dassert(cb_type); - mtx_lock(&rkq->rkq_lock); + mtx_lock(&rkq->rkq_lock); rd_kafka_yield_thread = 0; if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) { @@ -393,7 +397,7 @@ rd_kafka_op_t *rd_kafka_q_pop_serve (rd_kafka_q_t *rkq, rd_ts_t timeout_us, is_locked = rd_true; goto retry; /* Next op */ } else if (unlikely(res == - RD_KAFKA_OP_RES_YIELD)) { + RD_KAFKA_OP_RES_YIELD)) { /* Callback yielded, unroll */ return NULL; } else @@ -409,38 +413,35 @@ rd_kafka_op_t *rd_kafka_q_pop_serve (rd_kafka_q_t *rkq, rd_ts_t timeout_us, if (!is_locked) mtx_lock(&rkq->rkq_lock); - if (cnd_timedwait_abs(&rkq->rkq_cond, - &rkq->rkq_lock, - &timeout_tspec) != - thrd_success) { - mtx_unlock(&rkq->rkq_lock); - return NULL; - } + if (cnd_timedwait_abs(&rkq->rkq_cond, &rkq->rkq_lock, + &timeout_tspec) != thrd_success) { + mtx_unlock(&rkq->rkq_lock); + return NULL; + } } } else { /* Since the q_pop may block we need to release the parent * queue's lock. */ mtx_unlock(&rkq->rkq_lock); - rko = rd_kafka_q_pop_serve(fwdq, timeout_us, version, - cb_type, callback, opaque); + rko = rd_kafka_q_pop_serve(fwdq, timeout_us, version, cb_type, + callback, opaque); rd_kafka_q_destroy(fwdq); } - return rko; + return rko; } -rd_kafka_op_t *rd_kafka_q_pop (rd_kafka_q_t *rkq, rd_ts_t timeout_us, - int32_t version) { +rd_kafka_op_t * +rd_kafka_q_pop(rd_kafka_q_t *rkq, rd_ts_t timeout_us, int32_t version) { return rd_kafka_q_pop_serve(rkq, timeout_us, version, - RD_KAFKA_Q_CB_RETURN, - NULL, NULL); + RD_KAFKA_Q_CB_RETURN, NULL, NULL); } /** - * Pop all available ops from a queue and call the provided + * Pop all available ops from a queue and call the provided * callback for each op. * `max_cnt` limits the number of ops served, 0 = no limit. * @@ -448,19 +449,22 @@ rd_kafka_op_t *rd_kafka_q_pop (rd_kafka_q_t *rkq, rd_ts_t timeout_us, * * Locality: any thread. */ -int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms, - int max_cnt, rd_kafka_q_cb_type_t cb_type, - rd_kafka_q_serve_cb_t *callback, void *opaque) { +int rd_kafka_q_serve(rd_kafka_q_t *rkq, + int timeout_ms, + int max_cnt, + rd_kafka_q_cb_type_t cb_type, + rd_kafka_q_serve_cb_t *callback, + void *opaque) { rd_kafka_t *rk = rkq->rkq_rk; - rd_kafka_op_t *rko; - rd_kafka_q_t localq; + rd_kafka_op_t *rko; + rd_kafka_q_t localq; rd_kafka_q_t *fwdq; int cnt = 0; struct timespec timeout_tspec; rd_dassert(cb_type); - mtx_lock(&rkq->rkq_lock); + mtx_lock(&rkq->rkq_lock); rd_dassert(TAILQ_EMPTY(&rkq->rkq_q) || rkq->rkq_qlen > 0); if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) { @@ -468,11 +472,11 @@ int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms, /* Since the q_pop may block we need to release the parent * queue's lock. */ mtx_unlock(&rkq->rkq_lock); - ret = rd_kafka_q_serve(fwdq, timeout_ms, max_cnt, - cb_type, callback, opaque); + ret = rd_kafka_q_serve(fwdq, timeout_ms, max_cnt, cb_type, + callback, opaque); rd_kafka_q_destroy(fwdq); - return ret; - } + return ret; + } rd_timeout_init_timespec(&timeout_tspec, timeout_ms); @@ -485,27 +489,27 @@ int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms, rd_kafka_q_mark_served(rkq); - if (!rko) { - mtx_unlock(&rkq->rkq_lock); - return 0; - } + if (!rko) { + mtx_unlock(&rkq->rkq_lock); + return 0; + } - /* Move the first `max_cnt` ops. */ - rd_kafka_q_init(&localq, rkq->rkq_rk); - rd_kafka_q_move_cnt(&localq, rkq, max_cnt == 0 ? -1/*all*/ : max_cnt, - 0/*no-locks*/); + /* Move the first `max_cnt` ops. */ + rd_kafka_q_init(&localq, rkq->rkq_rk); + rd_kafka_q_move_cnt(&localq, rkq, max_cnt == 0 ? -1 /*all*/ : max_cnt, + 0 /*no-locks*/); mtx_unlock(&rkq->rkq_lock); rd_kafka_yield_thread = 0; - /* Call callback for each op */ + /* Call callback for each op */ while ((rko = TAILQ_FIRST(&localq.rkq_q))) { rd_kafka_op_res_t res; rd_kafka_q_deq0(&localq, rko); - res = rd_kafka_op_handle(rk, &localq, rko, cb_type, - opaque, callback); + res = rd_kafka_op_handle(rk, &localq, rko, cb_type, opaque, + callback); /* op must have been handled */ rd_kafka_assert(NULL, res != RD_KAFKA_OP_RES_PASS); cnt++; @@ -519,11 +523,11 @@ int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms, rd_kafka_q_prepend(rkq, &localq); break; } - } + } - rd_kafka_q_destroy_owner(&localq); + rd_kafka_q_destroy_owner(&localq); - return cnt; + return cnt; } /** @@ -533,8 +537,9 @@ int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms, * * @locality Any thread. */ -static size_t rd_kafka_purge_outdated_messages (int32_t version, - rd_kafka_message_t **rkmessages, size_t cnt) { +static size_t rd_kafka_purge_outdated_messages(int32_t version, + rd_kafka_message_t **rkmessages, + size_t cnt) { size_t valid_count = 0; size_t i; @@ -562,26 +567,27 @@ static size_t rd_kafka_purge_outdated_messages (int32_t version, * Returns the number of messages added. */ -int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size) { - unsigned int cnt = 0; +int rd_kafka_q_serve_rkmessages(rd_kafka_q_t *rkq, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size) { + unsigned int cnt = 0; TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); rd_kafka_op_t *rko, *next; rd_kafka_t *rk = rkq->rkq_rk; rd_kafka_q_t *fwdq; struct timespec timeout_tspec; - mtx_lock(&rkq->rkq_lock); + mtx_lock(&rkq->rkq_lock); if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) { /* Since the q_pop may block we need to release the parent * queue's lock. */ mtx_unlock(&rkq->rkq_lock); - cnt = rd_kafka_q_serve_rkmessages(fwdq, timeout_ms, - rkmessages, rkmessages_size); + cnt = rd_kafka_q_serve_rkmessages(fwdq, timeout_ms, rkmessages, + rkmessages_size); rd_kafka_q_destroy(fwdq); - return cnt; - } + return cnt; + } mtx_unlock(&rkq->rkq_lock); if (timeout_ms) @@ -590,7 +596,7 @@ int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms, rd_timeout_init_timespec(&timeout_tspec, timeout_ms); rd_kafka_yield_thread = 0; - while (cnt < rkmessages_size) { + while (cnt < rkmessages_size) { rd_kafka_op_res_t res; mtx_lock(&rkq->rkq_lock); @@ -603,16 +609,16 @@ int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms, rd_kafka_q_mark_served(rkq); - if (!rko) { + if (!rko) { mtx_unlock(&rkq->rkq_lock); - break; /* Timed out */ + break; /* Timed out */ } - rd_kafka_q_deq0(rkq, rko); + rd_kafka_q_deq0(rkq, rko); mtx_unlock(&rkq->rkq_lock); - if (rd_kafka_op_version_outdated(rko, 0)) { + if (rd_kafka_op_version_outdated(rko, 0)) { /* Outdated op, put on discard queue */ TAILQ_INSERT_TAIL(&tmpq, rko, rko_link); continue; @@ -620,16 +626,14 @@ int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms, if (unlikely(rko->rko_type == RD_KAFKA_OP_BARRIER)) { cnt = (unsigned int)rd_kafka_purge_outdated_messages( - rko->rko_version, - rkmessages, - cnt); + rko->rko_version, rkmessages, cnt); rd_kafka_op_destroy(rko); continue; } /* Serve non-FETCH callbacks */ - res = rd_kafka_poll_cb(rk, rkq, rko, - RD_KAFKA_Q_CB_RETURN, NULL); + res = + rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_RETURN, NULL); if (res == RD_KAFKA_OP_RES_KEEP || res == RD_KAFKA_OP_RES_HANDLED) { /* Callback served, rko is destroyed (if HANDLED). */ @@ -641,8 +645,8 @@ int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms, } rd_dassert(res == RD_KAFKA_OP_RES_PASS); - /* Auto-store offset, if enabled. */ - if (!rko->rko_err && rko->rko_type == RD_KAFKA_OP_FETCH) { + /* Auto-store offset, if enabled. */ + if (!rko->rko_err && rko->rko_type == RD_KAFKA_OP_FETCH) { rd_kafka_op_offset_store(rk, rko); /* If this is a control messages, don't return @@ -653,26 +657,26 @@ int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms, } } - /* Get rkmessage from rko and append to array. */ - rkmessages[cnt++] = rd_kafka_message_get(rko); - } + /* Get rkmessage from rko and append to array. */ + rkmessages[cnt++] = rd_kafka_message_get(rko); + } /* Discard non-desired and already handled ops */ next = TAILQ_FIRST(&tmpq); while (next) { - rko = next; + rko = next; next = TAILQ_NEXT(next, rko_link); rd_kafka_op_destroy(rko); } rd_kafka_app_polled(rk); - return cnt; + return cnt; } -void rd_kafka_queue_destroy (rd_kafka_queue_t *rkqu) { +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu) { if (rkqu->rkqu_is_owner) rd_kafka_q_destroy_owner(rkqu->rkqu_q); else @@ -680,56 +684,54 @@ void rd_kafka_queue_destroy (rd_kafka_queue_t *rkqu) { rd_free(rkqu); } -rd_kafka_queue_t *rd_kafka_queue_new0 (rd_kafka_t *rk, rd_kafka_q_t *rkq) { - rd_kafka_queue_t *rkqu; +rd_kafka_queue_t *rd_kafka_queue_new0(rd_kafka_t *rk, rd_kafka_q_t *rkq) { + rd_kafka_queue_t *rkqu; - rkqu = rd_calloc(1, sizeof(*rkqu)); + rkqu = rd_calloc(1, sizeof(*rkqu)); - rkqu->rkqu_q = rkq; - rd_kafka_q_keep(rkq); + rkqu->rkqu_q = rkq; + rd_kafka_q_keep(rkq); rkqu->rkqu_rk = rk; - return rkqu; + return rkqu; } -rd_kafka_queue_t *rd_kafka_queue_new (rd_kafka_t *rk) { - rd_kafka_q_t *rkq; - rd_kafka_queue_t *rkqu; +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk) { + rd_kafka_q_t *rkq; + rd_kafka_queue_t *rkqu; - rkq = rd_kafka_q_new(rk); - rkqu = rd_kafka_queue_new0(rk, rkq); - rd_kafka_q_destroy(rkq); /* Loose refcount from q_new, one is held - * by queue_new0 */ + rkq = rd_kafka_q_new(rk); + rkqu = rd_kafka_queue_new0(rk, rkq); + rd_kafka_q_destroy(rkq); /* Loose refcount from q_new, one is held + * by queue_new0 */ rkqu->rkqu_is_owner = 1; - return rkqu; + return rkqu; } -rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk) { - return rd_kafka_queue_new0(rk, rk->rk_rep); +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk) { + return rd_kafka_queue_new0(rk, rk->rk_rep); } -rd_kafka_queue_t *rd_kafka_queue_get_consumer (rd_kafka_t *rk) { - if (!rk->rk_cgrp) - return NULL; - return rd_kafka_queue_new0(rk, rk->rk_cgrp->rkcg_q); +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk) { + if (!rk->rk_cgrp) + return NULL; + return rd_kafka_queue_new0(rk, rk->rk_cgrp->rkcg_q); } -rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t *rk, - const char *topic, - int32_t partition) { +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, + const char *topic, + int32_t partition) { rd_kafka_toppar_t *rktp; rd_kafka_queue_t *result; if (rk->rk_type == RD_KAFKA_PRODUCER) return NULL; - rktp = rd_kafka_toppar_get2(rk, topic, - partition, - 0, /* no ua_on_miss */ + rktp = rd_kafka_toppar_get2(rk, topic, partition, 0, /* no ua_on_miss */ 1 /* create_on_miss */); if (!rktp) @@ -741,15 +743,15 @@ rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t *rk, return result; } -rd_kafka_queue_t *rd_kafka_queue_get_background (rd_kafka_t *rk) { +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk) { rd_kafka_queue_t *rkqu; rd_kafka_wrlock(rk); if (!rk->rk_background.q) { char errstr[256]; - if (rd_kafka_background_thread_create(rk, - errstr, sizeof(errstr))) { + if (rd_kafka_background_thread_create(rk, errstr, + sizeof(errstr))) { rd_kafka_log(rk, LOG_ERR, "BACKGROUND", "Failed to create background thread: %s", errstr); @@ -764,8 +766,8 @@ rd_kafka_queue_t *rd_kafka_queue_get_background (rd_kafka_t *rk) { } -rd_kafka_resp_err_t rd_kafka_set_log_queue (rd_kafka_t *rk, - rd_kafka_queue_t *rkqu) { +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, + rd_kafka_queue_t *rkqu) { rd_kafka_q_t *rkq; if (!rkqu) rkq = rk->rk_rep; @@ -775,31 +777,33 @@ rd_kafka_resp_err_t rd_kafka_set_log_queue (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR_NO_ERROR; } -void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst) { +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst) { rd_kafka_q_fwd_set0(src->rkqu_q, dst ? dst->rkqu_q : NULL, 1, /* do_lock */ 1 /* fwd_app */); } -size_t rd_kafka_queue_length (rd_kafka_queue_t *rkqu) { - return (size_t)rd_kafka_q_len(rkqu->rkqu_q); +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu) { + return (size_t)rd_kafka_q_len(rkqu->rkqu_q); } /** * @brief Enable or disable(fd==-1) fd-based wake-ups for queue */ -void rd_kafka_q_io_event_enable (rd_kafka_q_t *rkq, rd_socket_t fd, - const void *payload, size_t size) { +void rd_kafka_q_io_event_enable(rd_kafka_q_t *rkq, + rd_socket_t fd, + const void *payload, + size_t size) { struct rd_kafka_q_io *qio = NULL; if (fd != -1) { - qio = rd_malloc(sizeof(*qio) + size); - qio->fd = fd; - qio->size = size; - qio->payload = (void *)(qio+1); - qio->sent = rd_false; - qio->event_cb = NULL; + qio = rd_malloc(sizeof(*qio) + size); + qio->fd = fd; + qio->size = size; + qio->payload = (void *)(qio + 1); + qio->sent = rd_false; + qio->event_cb = NULL; qio->event_cb_opaque = NULL; memcpy(qio->payload, payload, size); } @@ -815,16 +819,17 @@ void rd_kafka_q_io_event_enable (rd_kafka_q_t *rkq, rd_socket_t fd, } mtx_unlock(&rkq->rkq_lock); - } -void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd, - const void *payload, size_t size) { +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, + int fd, + const void *payload, + size_t size) { rd_kafka_q_io_event_enable(rkqu->rkqu_q, fd, payload, size); } -void rd_kafka_queue_yield (rd_kafka_queue_t *rkqu) { +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu) { rd_kafka_q_yield(rkqu->rkqu_q); } @@ -832,18 +837,17 @@ void rd_kafka_queue_yield (rd_kafka_queue_t *rkqu) { /** * @brief Enable or disable(event_cb==NULL) callback-based wake-ups for queue */ -void rd_kafka_q_cb_event_enable (rd_kafka_q_t *rkq, - void (*event_cb) (rd_kafka_t *rk, - void *opaque), - void *opaque) { +void rd_kafka_q_cb_event_enable(rd_kafka_q_t *rkq, + void (*event_cb)(rd_kafka_t *rk, void *opaque), + void *opaque) { struct rd_kafka_q_io *qio = NULL; if (event_cb) { - qio = rd_malloc(sizeof(*qio)); - qio->fd = -1; - qio->size = 0; - qio->payload = NULL; - qio->event_cb = event_cb; + qio = rd_malloc(sizeof(*qio)); + qio->fd = -1; + qio->size = 0; + qio->payload = NULL; + qio->event_cb = event_cb; qio->event_cb_opaque = opaque; } @@ -858,14 +862,13 @@ void rd_kafka_q_cb_event_enable (rd_kafka_q_t *rkq, } mtx_unlock(&rkq->rkq_lock); - } -void rd_kafka_queue_cb_event_enable (rd_kafka_queue_t *rkqu, - void (*event_cb) (rd_kafka_t *rk, - void *opaque), - void *opaque) { - rd_kafka_q_cb_event_enable (rkqu->rkqu_q, event_cb, opaque); +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, + void (*event_cb)(rd_kafka_t *rk, + void *opaque), + void *opaque) { + rd_kafka_q_cb_event_enable(rkqu->rkqu_q, event_cb, opaque); } @@ -873,7 +876,7 @@ void rd_kafka_queue_cb_event_enable (rd_kafka_queue_t *rkqu, * Helper: wait for single op on 'rkq', and return its error, * or .._TIMED_OUT on timeout. */ -rd_kafka_resp_err_t rd_kafka_q_wait_result (rd_kafka_q_t *rkq, int timeout_ms) { +rd_kafka_resp_err_t rd_kafka_q_wait_result(rd_kafka_q_t *rkq, int timeout_ms) { rd_kafka_op_t *rko; rd_kafka_resp_err_t err; @@ -899,27 +902,28 @@ rd_kafka_resp_err_t rd_kafka_q_wait_result (rd_kafka_q_t *rkq, int timeout_ms) { * interact with \p rkq through other means from the callback to avoid * deadlocks. */ -int rd_kafka_q_apply (rd_kafka_q_t *rkq, - int (*callback) (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - void *opaque), - void *opaque) { - rd_kafka_op_t *rko, *next; +int rd_kafka_q_apply(rd_kafka_q_t *rkq, + int (*callback)(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + void *opaque), + void *opaque) { + rd_kafka_op_t *rko, *next; rd_kafka_q_t *fwdq; int cnt = 0; mtx_lock(&rkq->rkq_lock); if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) { mtx_unlock(&rkq->rkq_lock); - cnt = rd_kafka_q_apply(fwdq, callback, opaque); + cnt = rd_kafka_q_apply(fwdq, callback, opaque); rd_kafka_q_destroy(fwdq); - return cnt; - } + return cnt; + } - next = TAILQ_FIRST(&rkq->rkq_q); - while ((rko = next)) { - next = TAILQ_NEXT(next, rko_link); + next = TAILQ_FIRST(&rkq->rkq_q); + while ((rko = next)) { + next = TAILQ_NEXT(next, rko_link); cnt += callback(rkq, rko, opaque); - } + } rd_kafka_q_mark_served(rkq); @@ -937,54 +941,56 @@ int rd_kafka_q_apply (rd_kafka_q_t *rkq, * @remark \p rkq locking is not performed (caller's responsibility) * @remark Must NOT be used on fwdq. */ -void rd_kafka_q_fix_offsets (rd_kafka_q_t *rkq, int64_t min_offset, - int64_t base_offset) { - rd_kafka_op_t *rko, *next; - int adj_len = 0; - int64_t adj_size = 0; +void rd_kafka_q_fix_offsets(rd_kafka_q_t *rkq, + int64_t min_offset, + int64_t base_offset) { + rd_kafka_op_t *rko, *next; + int adj_len = 0; + int64_t adj_size = 0; - rd_kafka_assert(NULL, !rkq->rkq_fwdq); + rd_kafka_assert(NULL, !rkq->rkq_fwdq); - next = TAILQ_FIRST(&rkq->rkq_q); - while ((rko = next)) { - next = TAILQ_NEXT(next, rko_link); + next = TAILQ_FIRST(&rkq->rkq_q); + while ((rko = next)) { + next = TAILQ_NEXT(next, rko_link); - if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH)) - continue; + if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH)) + continue; - rko->rko_u.fetch.rkm.rkm_offset += base_offset; + rko->rko_u.fetch.rkm.rkm_offset += base_offset; - if (rko->rko_u.fetch.rkm.rkm_offset < min_offset && - rko->rko_err != RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) { - adj_len++; - adj_size += rko->rko_len; - TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link); - rd_kafka_op_destroy(rko); - continue; - } - } + if (rko->rko_u.fetch.rkm.rkm_offset < min_offset && + rko->rko_err != RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) { + adj_len++; + adj_size += rko->rko_len; + TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link); + rd_kafka_op_destroy(rko); + continue; + } + } - rkq->rkq_qlen -= adj_len; - rkq->rkq_qsize -= adj_size; + rkq->rkq_qlen -= adj_len; + rkq->rkq_qsize -= adj_size; } /** * @brief Print information and contents of queue */ -void rd_kafka_q_dump (FILE *fp, rd_kafka_q_t *rkq) { +void rd_kafka_q_dump(FILE *fp, rd_kafka_q_t *rkq) { mtx_lock(&rkq->rkq_lock); - fprintf(fp, "Queue %p \"%s\" (refcnt %d, flags 0x%x, %d ops, " - "%"PRId64" bytes)\n", + fprintf(fp, + "Queue %p \"%s\" (refcnt %d, flags 0x%x, %d ops, " + "%" PRId64 " bytes)\n", rkq, rkq->rkq_name, rkq->rkq_refcnt, rkq->rkq_flags, rkq->rkq_qlen, rkq->rkq_qsize); if (rkq->rkq_qio) fprintf(fp, " QIO fd %d\n", (int)rkq->rkq_qio->fd); if (rkq->rkq_serve) - fprintf(fp, " Serve callback %p, opaque %p\n", - rkq->rkq_serve, rkq->rkq_opaque); + fprintf(fp, " Serve callback %p, opaque %p\n", rkq->rkq_serve, + rkq->rkq_opaque); if (rkq->rkq_fwdq) { fprintf(fp, " Forwarded ->\n"); @@ -995,20 +1001,22 @@ void rd_kafka_q_dump (FILE *fp, rd_kafka_q_t *rkq) { if (!TAILQ_EMPTY(&rkq->rkq_q)) fprintf(fp, " Queued ops:\n"); TAILQ_FOREACH(rko, &rkq->rkq_q, rko_link) { - fprintf(fp, " %p %s (v%"PRId32", flags 0x%x, " - "prio %d, len %"PRId32", source %s, " + fprintf(fp, + " %p %s (v%" PRId32 + ", flags 0x%x, " + "prio %d, len %" PRId32 + ", source %s, " "replyq %p)\n", rko, rd_kafka_op2str(rko->rko_type), - rko->rko_version, rko->rko_flags, - rko->rko_prio, rko->rko_len, - #if ENABLE_DEVEL + rko->rko_version, rko->rko_flags, rko->rko_prio, + rko->rko_len, +#if ENABLE_DEVEL rko->rko_source - #else +#else "-" - #endif +#endif , - rko->rko_replyq.q - ); + rko->rko_replyq.q); } } @@ -1016,7 +1024,7 @@ void rd_kafka_q_dump (FILE *fp, rd_kafka_q_t *rkq) { } -void rd_kafka_enq_once_trigger_destroy (void *ptr) { +void rd_kafka_enq_once_trigger_destroy(void *ptr) { rd_kafka_enq_once_t *eonce = ptr; rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR__DESTROY, "destroy"); diff --git a/src/rdkafka_queue.h b/src/rdkafka_queue.h index 33000fdf8c..2356ade603 100644 --- a/src/rdkafka_queue.h +++ b/src/rdkafka_queue.h @@ -37,8 +37,8 @@ #endif /** @brief Queueing strategy */ -#define RD_KAFKA_QUEUE_FIFO 0 -#define RD_KAFKA_QUEUE_LIFO 1 +#define RD_KAFKA_QUEUE_FIFO 0 +#define RD_KAFKA_QUEUE_LIFO 1 TAILQ_HEAD(rd_kafka_op_tailq, rd_kafka_op_s); @@ -50,31 +50,34 @@ TAILQ_HEAD(rd_kafka_op_tailq, rd_kafka_op_s); * clear the wakeup-sent flag. */ struct rd_kafka_q_s { - mtx_t rkq_lock; - cnd_t rkq_cond; - struct rd_kafka_q_s *rkq_fwdq; /* Forwarded/Routed queue. - * Used in place of this queue - * for all operations. */ - - struct rd_kafka_op_tailq rkq_q; /* TAILQ_HEAD(, rd_kafka_op_s) */ - int rkq_qlen; /* Number of entries in queue */ - int64_t rkq_qsize; /* Size of all entries in queue */ - int rkq_refcnt; - int rkq_flags; -#define RD_KAFKA_Q_F_ALLOCATED 0x1 /* Allocated: rd_free on destroy */ -#define RD_KAFKA_Q_F_READY 0x2 /* Queue is ready to be used. - * Flag is cleared on destroy */ -#define RD_KAFKA_Q_F_FWD_APP 0x4 /* Queue is being forwarded by a call - * to rd_kafka_queue_forward. */ -#define RD_KAFKA_Q_F_YIELD 0x8 /* Have waiters return even if - * no rko was enqueued. - * This is used to wake up a waiter - * by triggering the cond-var - * but without having to enqueue - * an op. */ - - rd_kafka_t *rkq_rk; - struct rd_kafka_q_io *rkq_qio; /* FD-based application signalling */ + mtx_t rkq_lock; + cnd_t rkq_cond; + struct rd_kafka_q_s *rkq_fwdq; /* Forwarded/Routed queue. + * Used in place of this queue + * for all operations. */ + + struct rd_kafka_op_tailq rkq_q; /* TAILQ_HEAD(, rd_kafka_op_s) */ + int rkq_qlen; /* Number of entries in queue */ + int64_t rkq_qsize; /* Size of all entries in queue */ + int rkq_refcnt; + int rkq_flags; +#define RD_KAFKA_Q_F_ALLOCATED 0x1 /* Allocated: rd_free on destroy */ +#define RD_KAFKA_Q_F_READY \ + 0x2 /* Queue is ready to be used. \ + * Flag is cleared on destroy */ +#define RD_KAFKA_Q_F_FWD_APP \ + 0x4 /* Queue is being forwarded by a call \ + * to rd_kafka_queue_forward. */ +#define RD_KAFKA_Q_F_YIELD \ + 0x8 /* Have waiters return even if \ + * no rko was enqueued. \ + * This is used to wake up a waiter \ + * by triggering the cond-var \ + * but without having to enqueue \ + * an op. */ + + rd_kafka_t *rkq_rk; + struct rd_kafka_q_io *rkq_qio; /* FD-based application signalling */ /* Op serve callback (optional). * Mainly used for forwarded queues to use the original queue's @@ -84,9 +87,9 @@ struct rd_kafka_q_s { void *rkq_opaque; #if ENABLE_DEVEL - char rkq_name[64]; /* Debugging: queue name (FUNC:LINE) */ + char rkq_name[64]; /* Debugging: queue name (FUNC:LINE) */ #else - const char *rkq_name; /* Debugging: queue name (FUNC) */ + const char *rkq_name; /* Debugging: queue name (FUNC) */ #endif }; @@ -94,15 +97,15 @@ struct rd_kafka_q_s { /* Application signalling state holder. */ struct rd_kafka_q_io { /* For FD-based signalling */ - rd_socket_t fd; - void *payload; - size_t size; + rd_socket_t fd; + void *payload; + size_t size; rd_bool_t sent; /**< Wake-up has been sent. * This field is reset to false by the queue * reader, allowing a new wake-up to be sent by a * subsequent writer. */ /* For callback-based signalling */ - void (*event_cb) (rd_kafka_t *rk, void *opaque); + void (*event_cb)(rd_kafka_t *rk, void *opaque); void *event_cb_opaque; }; @@ -112,81 +115,80 @@ struct rd_kafka_q_io { * @return true if queue is ready/enabled, else false. * @remark queue luck must be held by caller (if applicable) */ -static RD_INLINE RD_UNUSED -int rd_kafka_q_ready (rd_kafka_q_t *rkq) { - return rkq->rkq_flags & RD_KAFKA_Q_F_READY; +static RD_INLINE RD_UNUSED int rd_kafka_q_ready(rd_kafka_q_t *rkq) { + return rkq->rkq_flags & RD_KAFKA_Q_F_READY; } +void rd_kafka_q_init0(rd_kafka_q_t *rkq, + rd_kafka_t *rk, + const char *func, + int line); +#define rd_kafka_q_init(rkq, rk) \ + rd_kafka_q_init0(rkq, rk, __FUNCTION__, __LINE__) +rd_kafka_q_t *rd_kafka_q_new0(rd_kafka_t *rk, const char *func, int line); +#define rd_kafka_q_new(rk) rd_kafka_q_new0(rk, __FUNCTION__, __LINE__) +void rd_kafka_q_destroy_final(rd_kafka_q_t *rkq); -void rd_kafka_q_init0 (rd_kafka_q_t *rkq, rd_kafka_t *rk, - const char *func, int line); -#define rd_kafka_q_init(rkq,rk) rd_kafka_q_init0(rkq,rk,__FUNCTION__,__LINE__) -rd_kafka_q_t *rd_kafka_q_new0 (rd_kafka_t *rk, const char *func, int line); -#define rd_kafka_q_new(rk) rd_kafka_q_new0(rk,__FUNCTION__,__LINE__) -void rd_kafka_q_destroy_final (rd_kafka_q_t *rkq); - -#define rd_kafka_q_lock(rkqu) mtx_lock(&(rkqu)->rkq_lock) +#define rd_kafka_q_lock(rkqu) mtx_lock(&(rkqu)->rkq_lock) #define rd_kafka_q_unlock(rkqu) mtx_unlock(&(rkqu)->rkq_lock) -static RD_INLINE RD_UNUSED -rd_kafka_q_t *rd_kafka_q_keep (rd_kafka_q_t *rkq) { +static RD_INLINE RD_UNUSED rd_kafka_q_t *rd_kafka_q_keep(rd_kafka_q_t *rkq) { mtx_lock(&rkq->rkq_lock); rkq->rkq_refcnt++; mtx_unlock(&rkq->rkq_lock); - return rkq; + return rkq; } -static RD_INLINE RD_UNUSED -rd_kafka_q_t *rd_kafka_q_keep_nolock (rd_kafka_q_t *rkq) { +static RD_INLINE RD_UNUSED rd_kafka_q_t * +rd_kafka_q_keep_nolock(rd_kafka_q_t *rkq) { rkq->rkq_refcnt++; - return rkq; + return rkq; } /** * @returns the queue's name (used for debugging) */ -static RD_INLINE RD_UNUSED -const char *rd_kafka_q_name (rd_kafka_q_t *rkq) { - return rkq->rkq_name; +static RD_INLINE RD_UNUSED const char *rd_kafka_q_name(rd_kafka_q_t *rkq) { + return rkq->rkq_name; } /** * @returns the final destination queue name (after forwarding) * @remark rkq MUST NOT be locked */ -static RD_INLINE RD_UNUSED -const char *rd_kafka_q_dest_name (rd_kafka_q_t *rkq) { - const char *ret; - mtx_lock(&rkq->rkq_lock); - if (rkq->rkq_fwdq) - ret = rd_kafka_q_dest_name(rkq->rkq_fwdq); - else - ret = rd_kafka_q_name(rkq); - mtx_unlock(&rkq->rkq_lock); - return ret; +static RD_INLINE RD_UNUSED const char *rd_kafka_q_dest_name(rd_kafka_q_t *rkq) { + const char *ret; + mtx_lock(&rkq->rkq_lock); + if (rkq->rkq_fwdq) + ret = rd_kafka_q_dest_name(rkq->rkq_fwdq); + else + ret = rd_kafka_q_name(rkq); + mtx_unlock(&rkq->rkq_lock); + return ret; } /** * @brief Disable a queue. * Attempting to enqueue ops to the queue will destroy the ops. */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_disable0 (rd_kafka_q_t *rkq, int do_lock) { +static RD_INLINE RD_UNUSED void rd_kafka_q_disable0(rd_kafka_q_t *rkq, + int do_lock) { if (do_lock) mtx_lock(&rkq->rkq_lock); rkq->rkq_flags &= ~RD_KAFKA_Q_F_READY; if (do_lock) mtx_unlock(&rkq->rkq_lock); } -#define rd_kafka_q_disable(rkq) rd_kafka_q_disable0(rkq, 1/*lock*/) +#define rd_kafka_q_disable(rkq) rd_kafka_q_disable0(rkq, 1 /*lock*/) -int rd_kafka_q_purge0 (rd_kafka_q_t *rkq, int do_lock); -#define rd_kafka_q_purge(rkq) rd_kafka_q_purge0(rkq, 1/*lock*/) -void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq, - rd_kafka_toppar_t *rktp, int version); +int rd_kafka_q_purge0(rd_kafka_q_t *rkq, int do_lock); +#define rd_kafka_q_purge(rkq) rd_kafka_q_purge0(rkq, 1 /*lock*/) +void rd_kafka_q_purge_toppar_version(rd_kafka_q_t *rkq, + rd_kafka_toppar_t *rktp, + int version); /** * @brief Loose reference to queue, when refcount reaches 0 the queue @@ -194,8 +196,8 @@ void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq, * * @param disable Also disable the queue, to be used by owner of the queue. */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_destroy0 (rd_kafka_q_t *rkq, int disable) { +static RD_INLINE RD_UNUSED void rd_kafka_q_destroy0(rd_kafka_q_t *rkq, + int disable) { int do_delete = 0; if (disable) { @@ -203,8 +205,8 @@ void rd_kafka_q_destroy0 (rd_kafka_q_t *rkq, int disable) { * that reference this queue somehow), * we disable the queue and purge it with individual * locking. */ - rd_kafka_q_disable0(rkq, 1/*lock*/); - rd_kafka_q_purge0(rkq, 1/*lock*/); + rd_kafka_q_disable0(rkq, 1 /*lock*/); + rd_kafka_q_purge0(rkq, 1 /*lock*/); } mtx_lock(&rkq->rkq_lock); @@ -216,7 +218,7 @@ void rd_kafka_q_destroy0 (rd_kafka_q_t *rkq, int disable) { rd_kafka_q_destroy_final(rkq); } -#define rd_kafka_q_destroy(rkq) rd_kafka_q_destroy0(rkq, 0/*dont-disable*/) +#define rd_kafka_q_destroy(rkq) rd_kafka_q_destroy0(rkq, 0 /*dont-disable*/) /** * @brief Queue destroy method to be used by the owner (poller) of @@ -228,9 +230,8 @@ void rd_kafka_q_destroy0 (rd_kafka_q_t *rkq, int disable) { * but there is noone left to poll it, possibly resulting in a * hang on termination due to refcounts held by the op. */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_destroy_owner (rd_kafka_q_t *rkq) { - rd_kafka_q_destroy0(rkq, 1/*disable*/); +static RD_INLINE RD_UNUSED void rd_kafka_q_destroy_owner(rd_kafka_q_t *rkq) { + rd_kafka_q_destroy0(rkq, 1 /*disable*/); } @@ -239,11 +240,10 @@ void rd_kafka_q_destroy_owner (rd_kafka_q_t *rkq) { * WARNING: All messages will be lost and leaked. * NOTE: No locking is performed. */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_reset (rd_kafka_q_t *rkq) { - TAILQ_INIT(&rkq->rkq_q); +static RD_INLINE RD_UNUSED void rd_kafka_q_reset(rd_kafka_q_t *rkq) { + TAILQ_INIT(&rkq->rkq_q); rd_dassert(TAILQ_EMPTY(&rkq->rkq_q)); - rkq->rkq_qlen = 0; + rkq->rkq_qlen = 0; rkq->rkq_qsize = 0; } @@ -252,17 +252,19 @@ void rd_kafka_q_reset (rd_kafka_q_t *rkq) { /** * Forward 'srcq' to 'destq' */ -void rd_kafka_q_fwd_set0 (rd_kafka_q_t *srcq, rd_kafka_q_t *destq, - int do_lock, int fwd_app); -#define rd_kafka_q_fwd_set(S,D) rd_kafka_q_fwd_set0(S,D,1/*lock*/,\ - 0/*no fwd_app*/) +void rd_kafka_q_fwd_set0(rd_kafka_q_t *srcq, + rd_kafka_q_t *destq, + int do_lock, + int fwd_app); +#define rd_kafka_q_fwd_set(S, D) \ + rd_kafka_q_fwd_set0(S, D, 1 /*lock*/, 0 /*no fwd_app*/) /** * @returns the forward queue (if any) with its refcount increased. * @locks rd_kafka_q_lock(rkq) == !do_lock */ -static RD_INLINE RD_UNUSED -rd_kafka_q_t *rd_kafka_q_fwd_get (rd_kafka_q_t *rkq, int do_lock) { +static RD_INLINE RD_UNUSED rd_kafka_q_t *rd_kafka_q_fwd_get(rd_kafka_q_t *rkq, + int do_lock) { rd_kafka_q_t *fwdq; if (do_lock) mtx_lock(&rkq->rkq_lock); @@ -282,12 +284,12 @@ rd_kafka_q_t *rd_kafka_q_fwd_get (rd_kafka_q_t *rkq, int do_lock) { * * @remark Thread-safe. */ -static RD_INLINE RD_UNUSED int rd_kafka_q_is_fwded (rd_kafka_q_t *rkq) { - int r; - mtx_lock(&rkq->rkq_lock); - r = rkq->rkq_fwdq ? 1 : 0; - mtx_unlock(&rkq->rkq_lock); - return r; +static RD_INLINE RD_UNUSED int rd_kafka_q_is_fwded(rd_kafka_q_t *rkq) { + int r; + mtx_lock(&rkq->rkq_lock); + r = rkq->rkq_fwdq ? 1 : 0; + mtx_unlock(&rkq->rkq_lock); + return r; } @@ -297,11 +299,10 @@ static RD_INLINE RD_UNUSED int rd_kafka_q_is_fwded (rd_kafka_q_t *rkq) { * * @remark Queue MUST be locked */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_io_event (rd_kafka_q_t *rkq) { +static RD_INLINE RD_UNUSED void rd_kafka_q_io_event(rd_kafka_q_t *rkq) { - if (likely(!rkq->rkq_qio)) - return; + if (likely(!rkq->rkq_qio)) + return; if (rkq->rkq_qio->event_cb) { rkq->rkq_qio->event_cb(rkq->rkq_rk, @@ -331,8 +332,8 @@ void rd_kafka_q_io_event (rd_kafka_q_t *rkq) { * @brief rko->rko_prio comparator * @remark: descending order: higher priority takes preceedence. */ -static RD_INLINE RD_UNUSED -int rd_kafka_op_cmp_prio (const void *_a, const void *_b) { +static RD_INLINE RD_UNUSED int rd_kafka_op_cmp_prio(const void *_a, + const void *_b) { const rd_kafka_op_t *a = _a, *b = _b; return RD_CMP(b->rko_prio, a->rko_prio); @@ -342,8 +343,7 @@ int rd_kafka_op_cmp_prio (const void *_a, const void *_b) { /** * @brief Wake up waiters without enqueuing an op. */ -static RD_INLINE RD_UNUSED void -rd_kafka_q_yield (rd_kafka_q_t *rkq) { +static RD_INLINE RD_UNUSED void rd_kafka_q_yield(rd_kafka_q_t *rkq) { rd_kafka_q_t *fwdq; mtx_lock(&rkq->rkq_lock); @@ -368,8 +368,6 @@ rd_kafka_q_yield (rd_kafka_q_t *rkq) { rd_kafka_q_yield(fwdq); rd_kafka_q_destroy(fwdq); } - - } /** @@ -378,16 +376,16 @@ rd_kafka_q_yield (rd_kafka_q_t *rkq) { * @remark Will not perform locking, signaling, fwdq, READY checking, etc. */ static RD_INLINE RD_UNUSED void -rd_kafka_q_enq0 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, int at_head) { - if (likely(!rko->rko_prio)) - TAILQ_INSERT_TAIL(&rkq->rkq_q, rko, rko_link); - else if (at_head) - TAILQ_INSERT_HEAD(&rkq->rkq_q, rko, rko_link); - else - TAILQ_INSERT_SORTED(&rkq->rkq_q, rko, rd_kafka_op_t *, - rko_link, rd_kafka_op_cmp_prio); - rkq->rkq_qlen++; - rkq->rkq_qsize += rko->rko_len; +rd_kafka_q_enq0(rd_kafka_q_t *rkq, rd_kafka_op_t *rko, int at_head) { + if (likely(!rko->rko_prio)) + TAILQ_INSERT_TAIL(&rkq->rkq_q, rko, rko_link); + else if (at_head) + TAILQ_INSERT_HEAD(&rkq->rkq_q, rko, rko_link); + else + TAILQ_INSERT_SORTED(&rkq->rkq_q, rko, rd_kafka_op_t *, rko_link, + rd_kafka_op_cmp_prio); + rkq->rkq_qlen++; + rkq->rkq_qsize += rko->rko_len; } @@ -407,9 +405,11 @@ rd_kafka_q_enq0 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, int at_head) { * * @locality any thread. */ -static RD_INLINE RD_UNUSED -int rd_kafka_q_enq1 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_t *orig_destq, int at_head, int do_lock) { +static RD_INLINE RD_UNUSED int rd_kafka_q_enq1(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_t *orig_destq, + int at_head, + int do_lock) { rd_kafka_q_t *fwdq; if (do_lock) @@ -429,7 +429,7 @@ int rd_kafka_q_enq1 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, if (!rko->rko_serve && orig_destq->rkq_serve) { /* Store original queue's serve callback and opaque * prior to forwarding. */ - rko->rko_serve = orig_destq->rkq_serve; + rko->rko_serve = orig_destq->rkq_serve; rko->rko_serve_opaque = orig_destq->rkq_opaque; } @@ -443,7 +443,7 @@ int rd_kafka_q_enq1 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, } else { if (do_lock) mtx_unlock(&rkq->rkq_lock); - rd_kafka_q_enq1(fwdq, rko, orig_destq, at_head, 1/*do lock*/); + rd_kafka_q_enq1(fwdq, rko, orig_destq, at_head, 1 /*do lock*/); rd_kafka_q_destroy(fwdq); } @@ -461,9 +461,9 @@ int rd_kafka_q_enq1 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, * @locality any thread. * @locks rkq MUST NOT be locked */ -static RD_INLINE RD_UNUSED -int rd_kafka_q_enq (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { - return rd_kafka_q_enq1(rkq, rko, rkq, 0/*at tail*/, 1/*do lock*/); +static RD_INLINE RD_UNUSED int rd_kafka_q_enq(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + return rd_kafka_q_enq1(rkq, rko, rkq, 0 /*at tail*/, 1 /*do lock*/); } @@ -478,9 +478,9 @@ int rd_kafka_q_enq (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { * @locality any thread * @locks rkq MUST BE locked */ -static RD_INLINE RD_UNUSED -int rd_kafka_q_reenq (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { - return rd_kafka_q_enq1(rkq, rko, rkq, 1/*at head*/, 0/*don't lock*/); +static RD_INLINE RD_UNUSED int rd_kafka_q_reenq(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + return rd_kafka_q_enq1(rkq, rko, rkq, 1 /*at head*/, 0 /*don't lock*/); } @@ -490,9 +490,9 @@ int rd_kafka_q_reenq (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { * NOTE: rkq_lock MUST be held * Locality: any thread */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_deq0 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { - rd_dassert(rkq->rkq_qlen > 0 && +static RD_INLINE RD_UNUSED void rd_kafka_q_deq0(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_dassert(rkq->rkq_qlen > 0 && rkq->rkq_qsize >= (int64_t)rko->rko_len); TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link); @@ -510,8 +510,8 @@ void rd_kafka_q_deq0 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { * Should be called by all queue readers. * * @locks_required rkq must be locked. -*/ -static RD_INLINE RD_UNUSED void rd_kafka_q_mark_served (rd_kafka_q_t *rkq) { + */ +static RD_INLINE RD_UNUSED void rd_kafka_q_mark_served(rd_kafka_q_t *rkq) { if (rkq->rkq_qio) rkq->rkq_qio->sent = rd_false; } @@ -526,56 +526,53 @@ static RD_INLINE RD_UNUSED void rd_kafka_q_mark_served (rd_kafka_q_t *rkq) { * * @returns 0 if operation was performed or -1 if rkq is disabled. */ -static RD_INLINE RD_UNUSED -int rd_kafka_q_concat0 (rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, int do_lock) { - int r = 0; - - while (srcq->rkq_fwdq) /* Resolve source queue */ - srcq = srcq->rkq_fwdq; - if (unlikely(srcq->rkq_qlen == 0)) - return 0; /* Don't do anything if source queue is empty */ - - if (do_lock) - mtx_lock(&rkq->rkq_lock); - if (!rkq->rkq_fwdq) { +static RD_INLINE RD_UNUSED int +rd_kafka_q_concat0(rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, int do_lock) { + int r = 0; + + while (srcq->rkq_fwdq) /* Resolve source queue */ + srcq = srcq->rkq_fwdq; + if (unlikely(srcq->rkq_qlen == 0)) + return 0; /* Don't do anything if source queue is empty */ + + if (do_lock) + mtx_lock(&rkq->rkq_lock); + if (!rkq->rkq_fwdq) { rd_kafka_op_t *rko; - rd_dassert(TAILQ_EMPTY(&srcq->rkq_q) || - srcq->rkq_qlen > 0); - if (unlikely(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY))) { + rd_dassert(TAILQ_EMPTY(&srcq->rkq_q) || srcq->rkq_qlen > 0); + if (unlikely(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY))) { if (do_lock) mtx_unlock(&rkq->rkq_lock); - return -1; - } + return -1; + } /* First insert any prioritized ops from srcq * in the right position in rkq. */ while ((rko = TAILQ_FIRST(&srcq->rkq_q)) && rko->rko_prio > 0) { TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link); - TAILQ_INSERT_SORTED(&rkq->rkq_q, rko, - rd_kafka_op_t *, rko_link, - rd_kafka_op_cmp_prio); + TAILQ_INSERT_SORTED(&rkq->rkq_q, rko, rd_kafka_op_t *, + rko_link, rd_kafka_op_cmp_prio); } - TAILQ_CONCAT(&rkq->rkq_q, &srcq->rkq_q, rko_link); - if (rkq->rkq_qlen == 0) - rd_kafka_q_io_event(rkq); + TAILQ_CONCAT(&rkq->rkq_q, &srcq->rkq_q, rko_link); + if (rkq->rkq_qlen == 0) + rd_kafka_q_io_event(rkq); rkq->rkq_qlen += srcq->rkq_qlen; rkq->rkq_qsize += srcq->rkq_qsize; - cnd_signal(&rkq->rkq_cond); + cnd_signal(&rkq->rkq_cond); rd_kafka_q_mark_served(srcq); rd_kafka_q_reset(srcq); - } else - r = rd_kafka_q_concat0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq, - srcq, - rkq->rkq_fwdq ? do_lock : 0); - if (do_lock) - mtx_unlock(&rkq->rkq_lock); - - return r; + } else + r = rd_kafka_q_concat0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq, + srcq, rkq->rkq_fwdq ? do_lock : 0); + if (do_lock) + mtx_unlock(&rkq->rkq_lock); + + return r; } -#define rd_kafka_q_concat(dstq,srcq) rd_kafka_q_concat0(dstq,srcq,1/*lock*/) +#define rd_kafka_q_concat(dstq, srcq) rd_kafka_q_concat0(dstq, srcq, 1 /*lock*/) /** @@ -588,38 +585,37 @@ int rd_kafka_q_concat0 (rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, int do_lock) { * * @locality any thread. */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_prepend0 (rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, - int do_lock) { - if (do_lock) - mtx_lock(&rkq->rkq_lock); - if (!rkq->rkq_fwdq && !srcq->rkq_fwdq) { +static RD_INLINE RD_UNUSED void +rd_kafka_q_prepend0(rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, int do_lock) { + if (do_lock) + mtx_lock(&rkq->rkq_lock); + if (!rkq->rkq_fwdq && !srcq->rkq_fwdq) { /* FIXME: prio-aware */ /* Concat rkq on srcq */ TAILQ_CONCAT(&srcq->rkq_q, &rkq->rkq_q, rko_link); /* Move srcq to rkq */ TAILQ_MOVE(&rkq->rkq_q, &srcq->rkq_q, rko_link); - if (rkq->rkq_qlen == 0 && srcq->rkq_qlen > 0) + if (rkq->rkq_qlen == 0 && srcq->rkq_qlen > 0) rd_kafka_q_io_event(rkq); rkq->rkq_qlen += srcq->rkq_qlen; rkq->rkq_qsize += srcq->rkq_qsize; rd_kafka_q_mark_served(srcq); rd_kafka_q_reset(srcq); - } else - rd_kafka_q_prepend0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq, + } else + rd_kafka_q_prepend0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq, srcq->rkq_fwdq ? srcq->rkq_fwdq : srcq, rkq->rkq_fwdq ? do_lock : 0); - if (do_lock) - mtx_unlock(&rkq->rkq_lock); + if (do_lock) + mtx_unlock(&rkq->rkq_lock); } -#define rd_kafka_q_prepend(dstq,srcq) rd_kafka_q_prepend0(dstq,srcq,1/*lock*/) +#define rd_kafka_q_prepend(dstq, srcq) \ + rd_kafka_q_prepend0(dstq, srcq, 1 /*lock*/) /* Returns the number of elements in the queue */ -static RD_INLINE RD_UNUSED -int rd_kafka_q_len (rd_kafka_q_t *rkq) { +static RD_INLINE RD_UNUSED int rd_kafka_q_len(rd_kafka_q_t *rkq) { int qlen; rd_kafka_q_t *fwdq; mtx_lock(&rkq->rkq_lock); @@ -635,8 +631,7 @@ int rd_kafka_q_len (rd_kafka_q_t *rkq) { } /* Returns the total size of elements in the queue */ -static RD_INLINE RD_UNUSED -uint64_t rd_kafka_q_size (rd_kafka_q_t *rkq) { +static RD_INLINE RD_UNUSED uint64_t rd_kafka_q_size(rd_kafka_q_t *rkq) { uint64_t sz; rd_kafka_q_t *fwdq; mtx_lock(&rkq->rkq_lock); @@ -656,11 +651,11 @@ uint64_t rd_kafka_q_size (rd_kafka_q_t *rkq) { * \p rkq refcount (unless NULL), version, and debug id. */ static RD_INLINE RD_UNUSED rd_kafka_replyq_t -rd_kafka_replyq_make (rd_kafka_q_t *rkq, int version, const char *id) { +rd_kafka_replyq_make(rd_kafka_q_t *rkq, int version, const char *id) { rd_kafka_replyq_t replyq = RD_ZERO_INIT; if (rkq) { - replyq.q = rd_kafka_q_keep(rkq); + replyq.q = rd_kafka_q_keep(rkq); replyq.version = version; #if ENABLE_DEVEL replyq._id = rd_strdup(id); @@ -672,13 +667,20 @@ rd_kafka_replyq_make (rd_kafka_q_t *rkq, int version, const char *id) { /* Construct temporary on-stack replyq with increased Q refcount and * optional VERSION. */ -#define RD_KAFKA_REPLYQ(Q,VERSION) rd_kafka_replyq_make(Q,VERSION,__FUNCTION__) +#define RD_KAFKA_REPLYQ(Q, VERSION) \ + rd_kafka_replyq_make(Q, VERSION, __FUNCTION__) /* Construct temporary on-stack replyq for indicating no replyq. */ #if ENABLE_DEVEL -#define RD_KAFKA_NO_REPLYQ (rd_kafka_replyq_t){NULL, 0, NULL} +#define RD_KAFKA_NO_REPLYQ \ + (rd_kafka_replyq_t) { \ + NULL, 0, NULL \ + } #else -#define RD_KAFKA_NO_REPLYQ (rd_kafka_replyq_t){NULL, 0} +#define RD_KAFKA_NO_REPLYQ \ + (rd_kafka_replyq_t) { \ + NULL, 0 \ + } #endif @@ -686,7 +688,7 @@ rd_kafka_replyq_make (rd_kafka_q_t *rkq, int version, const char *id) { * @returns true if the replyq is valid, else false. */ static RD_INLINE RD_UNUSED rd_bool_t -rd_kafka_replyq_is_valid (rd_kafka_replyq_t *replyq) { +rd_kafka_replyq_is_valid(rd_kafka_replyq_t *replyq) { rd_bool_t valid = rd_true; if (!replyq->q) @@ -705,13 +707,13 @@ rd_kafka_replyq_is_valid (rd_kafka_replyq_t *replyq) { * Set up replyq. * Q refcnt is increased. */ -static RD_INLINE RD_UNUSED void -rd_kafka_set_replyq (rd_kafka_replyq_t *replyq, - rd_kafka_q_t *rkq, int32_t version) { - replyq->q = rkq ? rd_kafka_q_keep(rkq) : NULL; - replyq->version = version; +static RD_INLINE RD_UNUSED void rd_kafka_set_replyq(rd_kafka_replyq_t *replyq, + rd_kafka_q_t *rkq, + int32_t version) { + replyq->q = rkq ? rd_kafka_q_keep(rkq) : NULL; + replyq->version = version; #if ENABLE_DEVEL - replyq->_id = rd_strdup(__FUNCTION__); + replyq->_id = rd_strdup(__FUNCTION__); #endif } @@ -720,31 +722,33 @@ rd_kafka_set_replyq (rd_kafka_replyq_t *replyq, * Q refcnt is increased. */ static RD_INLINE RD_UNUSED void -rd_kafka_op_set_replyq (rd_kafka_op_t *rko, rd_kafka_q_t *rkq, - rd_atomic32_t *versionptr) { - rd_kafka_set_replyq(&rko->rko_replyq, rkq, - versionptr ? rd_atomic32_get(versionptr) : 0); +rd_kafka_op_set_replyq(rd_kafka_op_t *rko, + rd_kafka_q_t *rkq, + rd_atomic32_t *versionptr) { + rd_kafka_set_replyq(&rko->rko_replyq, rkq, + versionptr ? rd_atomic32_get(versionptr) : 0); } /* Set reply rko's version from replyq's version */ -#define rd_kafka_op_get_reply_version(REPLY_RKO, ORIG_RKO) do { \ - (REPLY_RKO)->rko_version = (ORIG_RKO)->rko_replyq.version; \ - } while (0) +#define rd_kafka_op_get_reply_version(REPLY_RKO, ORIG_RKO) \ + do { \ + (REPLY_RKO)->rko_version = (ORIG_RKO)->rko_replyq.version; \ + } while (0) /* Clear replyq holder without decreasing any .q references. */ static RD_INLINE RD_UNUSED void -rd_kafka_replyq_clear (rd_kafka_replyq_t *replyq) { - memset(replyq, 0, sizeof(*replyq)); +rd_kafka_replyq_clear(rd_kafka_replyq_t *replyq) { + memset(replyq, 0, sizeof(*replyq)); } /** * @brief Make a copy of \p src in \p dst, with its own queue reference */ -static RD_INLINE RD_UNUSED void -rd_kafka_replyq_copy (rd_kafka_replyq_t *dst, rd_kafka_replyq_t *src) { +static RD_INLINE RD_UNUSED void rd_kafka_replyq_copy(rd_kafka_replyq_t *dst, + rd_kafka_replyq_t *src) { dst->version = src->version; - dst->q = src->q; + dst->q = src->q; if (dst->q) rd_kafka_q_keep(dst->q); #if ENABLE_DEVEL @@ -760,16 +764,16 @@ rd_kafka_replyq_copy (rd_kafka_replyq_t *dst, rd_kafka_replyq_t *src) { * Clear replyq holder and destroy any .q references. */ static RD_INLINE RD_UNUSED void -rd_kafka_replyq_destroy (rd_kafka_replyq_t *replyq) { - if (replyq->q) - rd_kafka_q_destroy(replyq->q); +rd_kafka_replyq_destroy(rd_kafka_replyq_t *replyq) { + if (replyq->q) + rd_kafka_q_destroy(replyq->q); #if ENABLE_DEVEL - if (replyq->_id) { - rd_free(replyq->_id); - replyq->_id = NULL; - } + if (replyq->_id) { + rd_free(replyq->_id); + replyq->_id = NULL; + } #endif - rd_kafka_replyq_clear(replyq); + rd_kafka_replyq_clear(replyq); } @@ -782,68 +786,76 @@ rd_kafka_replyq_destroy (rd_kafka_replyq_t *replyq) { * * @returns Same as rd_kafka_q_enq() */ -static RD_INLINE RD_UNUSED int -rd_kafka_replyq_enq (rd_kafka_replyq_t *replyq, rd_kafka_op_t *rko, - int version) { - rd_kafka_q_t *rkq = replyq->q; - int r; - - if (version) - rko->rko_version = version; - else - rko->rko_version = replyq->version; - - /* The replyq queue reference is done after we've enqueued the rko - * so clear it here. */ +static RD_INLINE RD_UNUSED int rd_kafka_replyq_enq(rd_kafka_replyq_t *replyq, + rd_kafka_op_t *rko, + int version) { + rd_kafka_q_t *rkq = replyq->q; + int r; + + if (version) + rko->rko_version = version; + else + rko->rko_version = replyq->version; + + /* The replyq queue reference is done after we've enqueued the rko + * so clear it here. */ replyq->q = NULL; /* destroyed separately below */ #if ENABLE_DEVEL - if (replyq->_id) { - rd_free(replyq->_id); - replyq->_id = NULL; - } + if (replyq->_id) { + rd_free(replyq->_id); + replyq->_id = NULL; + } #endif - /* Retain replyq->version since it is used by buf_callback - * when dispatching the callback. */ + /* Retain replyq->version since it is used by buf_callback + * when dispatching the callback. */ - r = rd_kafka_q_enq(rkq, rko); + r = rd_kafka_q_enq(rkq, rko); - rd_kafka_q_destroy(rkq); + rd_kafka_q_destroy(rkq); - return r; + return r; } -rd_kafka_op_t *rd_kafka_q_pop_serve (rd_kafka_q_t *rkq, rd_ts_t timeout_us, - int32_t version, - rd_kafka_q_cb_type_t cb_type, - rd_kafka_q_serve_cb_t *callback, - void *opaque); -rd_kafka_op_t *rd_kafka_q_pop (rd_kafka_q_t *rkq, rd_ts_t timeout_us, - int32_t version); -int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms, int max_cnt, - rd_kafka_q_cb_type_t cb_type, - rd_kafka_q_serve_cb_t *callback, - void *opaque); - - -int rd_kafka_q_move_cnt (rd_kafka_q_t *dstq, rd_kafka_q_t *srcq, - int cnt, int do_locks); - -int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size); -rd_kafka_resp_err_t rd_kafka_q_wait_result (rd_kafka_q_t *rkq, int timeout_ms); - -int rd_kafka_q_apply (rd_kafka_q_t *rkq, - int (*callback) (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - void *opaque), - void *opaque); - -void rd_kafka_q_fix_offsets (rd_kafka_q_t *rkq, int64_t min_offset, - int64_t base_offset); +rd_kafka_op_t *rd_kafka_q_pop_serve(rd_kafka_q_t *rkq, + rd_ts_t timeout_us, + int32_t version, + rd_kafka_q_cb_type_t cb_type, + rd_kafka_q_serve_cb_t *callback, + void *opaque); +rd_kafka_op_t * +rd_kafka_q_pop(rd_kafka_q_t *rkq, rd_ts_t timeout_us, int32_t version); +int rd_kafka_q_serve(rd_kafka_q_t *rkq, + int timeout_ms, + int max_cnt, + rd_kafka_q_cb_type_t cb_type, + rd_kafka_q_serve_cb_t *callback, + void *opaque); + + +int rd_kafka_q_move_cnt(rd_kafka_q_t *dstq, + rd_kafka_q_t *srcq, + int cnt, + int do_locks); + +int rd_kafka_q_serve_rkmessages(rd_kafka_q_t *rkq, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size); +rd_kafka_resp_err_t rd_kafka_q_wait_result(rd_kafka_q_t *rkq, int timeout_ms); + +int rd_kafka_q_apply(rd_kafka_q_t *rkq, + int (*callback)(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + void *opaque), + void *opaque); + +void rd_kafka_q_fix_offsets(rd_kafka_q_t *rkq, + int64_t min_offset, + int64_t base_offset); /** * @returns the last op in the queue matching \p op_type and \p allow_err (bool) @@ -851,33 +863,33 @@ void rd_kafka_q_fix_offsets (rd_kafka_q_t *rkq, int64_t min_offset, * is not removed from the queue and may thus not be held for longer * than the lock is held. */ -static RD_INLINE RD_UNUSED -rd_kafka_op_t *rd_kafka_q_last (rd_kafka_q_t *rkq, rd_kafka_op_type_t op_type, - int allow_err) { - rd_kafka_op_t *rko; - TAILQ_FOREACH_REVERSE(rko, &rkq->rkq_q, rd_kafka_op_tailq, rko_link) { - if (rko->rko_type == op_type && - (allow_err || !rko->rko_err)) - return rko; - } - - return NULL; +static RD_INLINE RD_UNUSED rd_kafka_op_t * +rd_kafka_q_last(rd_kafka_q_t *rkq, rd_kafka_op_type_t op_type, int allow_err) { + rd_kafka_op_t *rko; + TAILQ_FOREACH_REVERSE(rko, &rkq->rkq_q, rd_kafka_op_tailq, rko_link) { + if (rko->rko_type == op_type && (allow_err || !rko->rko_err)) + return rko; + } + + return NULL; } -void rd_kafka_q_io_event_enable (rd_kafka_q_t *rkq, rd_socket_t fd, - const void *payload, size_t size); +void rd_kafka_q_io_event_enable(rd_kafka_q_t *rkq, + rd_socket_t fd, + const void *payload, + size_t size); /* Public interface */ struct rd_kafka_queue_s { - rd_kafka_q_t *rkqu_q; - rd_kafka_t *rkqu_rk; - int rkqu_is_owner; /**< Is owner/creator of rkqu_q */ + rd_kafka_q_t *rkqu_q; + rd_kafka_t *rkqu_rk; + int rkqu_is_owner; /**< Is owner/creator of rkqu_q */ }; -rd_kafka_queue_t *rd_kafka_queue_new0 (rd_kafka_t *rk, rd_kafka_q_t *rkq); +rd_kafka_queue_t *rd_kafka_queue_new0(rd_kafka_t *rk, rd_kafka_q_t *rkq); -void rd_kafka_q_dump (FILE *fp, rd_kafka_q_t *rkq); +void rd_kafka_q_dump(FILE *fp, rd_kafka_q_t *rkq); extern int RD_TLS rd_kafka_yield_thread; @@ -907,12 +919,11 @@ typedef struct rd_kafka_enq_once_s { * @brief Allocate and set up a new eonce and set the initial refcount to 1. * @remark This is to be called by the owner of the rko. */ -static RD_INLINE RD_UNUSED -rd_kafka_enq_once_t * -rd_kafka_enq_once_new (rd_kafka_op_t *rko, rd_kafka_replyq_t replyq) { +static RD_INLINE RD_UNUSED rd_kafka_enq_once_t * +rd_kafka_enq_once_new(rd_kafka_op_t *rko, rd_kafka_replyq_t replyq) { rd_kafka_enq_once_t *eonce = rd_calloc(1, sizeof(*eonce)); mtx_init(&eonce->lock, mtx_plain); - eonce->rko = rko; + eonce->rko = rko; eonce->replyq = replyq; /* struct copy */ eonce->refcnt = 1; return eonce; @@ -924,10 +935,10 @@ rd_kafka_enq_once_new (rd_kafka_op_t *rko, rd_kafka_replyq_t replyq) { * * @remark This is to be called by the owner. */ -static RD_INLINE RD_UNUSED -void -rd_kafka_enq_once_reenable (rd_kafka_enq_once_t *eonce, - rd_kafka_op_t *rko, rd_kafka_replyq_t replyq) { +static RD_INLINE RD_UNUSED void +rd_kafka_enq_once_reenable(rd_kafka_enq_once_t *eonce, + rd_kafka_op_t *rko, + rd_kafka_replyq_t replyq) { mtx_lock(&eonce->lock); eonce->rko = rko; rd_kafka_replyq_destroy(&eonce->replyq); @@ -940,8 +951,8 @@ rd_kafka_enq_once_reenable (rd_kafka_enq_once_t *eonce, * @brief Free eonce and its resources. Must only be called with refcnt==0 * and eonce->lock NOT held. */ -static RD_INLINE RD_UNUSED -void rd_kafka_enq_once_destroy0 (rd_kafka_enq_once_t *eonce) { +static RD_INLINE RD_UNUSED void +rd_kafka_enq_once_destroy0(rd_kafka_enq_once_t *eonce) { /* This must not be called with the rko or replyq still set, which would * indicate that no enqueueing was performed and that the owner * did not clean up, which is a bug. */ @@ -963,9 +974,8 @@ void rd_kafka_enq_once_destroy0 (rd_kafka_enq_once_t *eonce) { * @param srcdesc a human-readable descriptive string of the source. * May be used for future debugging. */ -static RD_INLINE RD_UNUSED -void rd_kafka_enq_once_add_source (rd_kafka_enq_once_t *eonce, - const char *srcdesc) { +static RD_INLINE RD_UNUSED void +rd_kafka_enq_once_add_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) { mtx_lock(&eonce->lock); eonce->refcnt++; mtx_unlock(&eonce->lock); @@ -983,9 +993,8 @@ void rd_kafka_enq_once_add_source (rd_kafka_enq_once_t *eonce, * This API is used to undo an add_source() from the * same code. */ -static RD_INLINE RD_UNUSED -void rd_kafka_enq_once_del_source (rd_kafka_enq_once_t *eonce, - const char *srcdesc) { +static RD_INLINE RD_UNUSED void +rd_kafka_enq_once_del_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) { int do_destroy; mtx_lock(&eonce->lock); @@ -1006,7 +1015,7 @@ void rd_kafka_enq_once_del_source (rd_kafka_enq_once_t *eonce, * rd_list_destroy() and the trigger error code is * always RD_KAFKA_RESP_ERR__DESTROY. */ -void rd_kafka_enq_once_trigger_destroy (void *ptr); +void rd_kafka_enq_once_trigger_destroy(void *ptr); /** @@ -1018,9 +1027,9 @@ void rd_kafka_enq_once_trigger_destroy (void *ptr); * * @remark The rko remains set on the eonce. */ -static RD_INLINE RD_UNUSED -rd_kafka_op_t *rd_kafka_enq_once_del_source_return (rd_kafka_enq_once_t *eonce, - const char *srcdesc) { +static RD_INLINE RD_UNUSED rd_kafka_op_t * +rd_kafka_enq_once_del_source_return(rd_kafka_enq_once_t *eonce, + const char *srcdesc) { rd_bool_t do_destroy; rd_kafka_op_t *rko; @@ -1050,12 +1059,12 @@ rd_kafka_op_t *rd_kafka_enq_once_del_source_return (rd_kafka_enq_once_t *eonce, * * @remark Must only be called by sources (non-owner). */ -static RD_INLINE RD_UNUSED -void rd_kafka_enq_once_trigger (rd_kafka_enq_once_t *eonce, - rd_kafka_resp_err_t err, - const char *srcdesc) { +static RD_INLINE RD_UNUSED void +rd_kafka_enq_once_trigger(rd_kafka_enq_once_t *eonce, + rd_kafka_resp_err_t err, + const char *srcdesc) { int do_destroy; - rd_kafka_op_t *rko = NULL; + rd_kafka_op_t *rko = NULL; rd_kafka_replyq_t replyq = RD_ZERO_INIT; mtx_lock(&eonce->lock); @@ -1071,7 +1080,7 @@ void rd_kafka_enq_once_trigger (rd_kafka_enq_once_t *eonce, * if the replyq has been disabled and the ops * destructor is called (which might then access the eonce * to clean up). */ - rko = eonce->rko; + rko = eonce->rko; replyq = eonce->replyq; eonce->rko = NULL; @@ -1097,9 +1106,9 @@ void rd_kafka_enq_once_trigger (rd_kafka_enq_once_t *eonce, * @brief Destroy eonce, must only be called by the owner. * There may be outstanding refcounts by non-owners after this call */ -static RD_INLINE RD_UNUSED -void rd_kafka_enq_once_destroy (rd_kafka_enq_once_t *eonce) { - int do_destroy; +static RD_INLINE RD_UNUSED void +rd_kafka_enq_once_destroy(rd_kafka_enq_once_t *eonce) { + int do_destroy; mtx_lock(&eonce->lock); rd_assert(eonce->refcnt > 0); @@ -1130,10 +1139,10 @@ void rd_kafka_enq_once_destroy (rd_kafka_enq_once_t *eonce) { * * @returns the eonce's rko object, if still available, else NULL. */ -static RD_INLINE RD_UNUSED -rd_kafka_op_t *rd_kafka_enq_once_disable (rd_kafka_enq_once_t *eonce) { - int do_destroy; - rd_kafka_op_t *rko; +static RD_INLINE RD_UNUSED rd_kafka_op_t * +rd_kafka_enq_once_disable(rd_kafka_enq_once_t *eonce) { + int do_destroy; + rd_kafka_op_t *rko; mtx_lock(&eonce->lock); rd_assert(eonce->refcnt > 0); @@ -1141,7 +1150,7 @@ rd_kafka_op_t *rd_kafka_enq_once_disable (rd_kafka_enq_once_t *eonce) { do_destroy = eonce->refcnt == 0; /* May be NULL */ - rko = eonce->rko; + rko = eonce->rko; eonce->rko = NULL; rd_kafka_replyq_destroy(&eonce->replyq); diff --git a/src/rdkafka_range_assignor.c b/src/rdkafka_range_assignor.c index 1af3eef8a9..c83f1f1a44 100644 --- a/src/rdkafka_range_assignor.c +++ b/src/rdkafka_range_assignor.c @@ -30,19 +30,20 @@ - - - /** - * Source: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java + * Source: + * https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java * - * The range assignor works on a per-topic basis. For each topic, we lay out the available partitions in numeric order - * and the consumers in lexicographic order. We then divide the number of partitions by the total number of - * consumers to determine the number of partitions to assign to each consumer. If it does not evenly - * divide, then the first few consumers will have one extra partition. + * The range assignor works on a per-topic basis. For each topic, we lay out the + * available partitions in numeric order and the consumers in lexicographic + * order. We then divide the number of partitions by the total number of + * consumers to determine the number of partitions to assign to each consumer. + * If it does not evenly divide, then the first few consumers will have one + * extra partition. * - * For example, suppose there are two consumers C0 and C1, two topics t0 and t1, and each topic has 3 partitions, - * resulting in partitions t0p0, t0p1, t0p2, t1p0, t1p1, and t1p2. + * For example, suppose there are two consumers C0 and C1, two topics t0 and t1, + * and each topic has 3 partitions, resulting in partitions t0p0, t0p1, t0p2, + * t1p0, t1p1, and t1p2. * * The assignment will be: * C0: [t0p0, t0p1, t1p0, t1p1] @@ -50,21 +51,22 @@ */ rd_kafka_resp_err_t -rd_kafka_range_assignor_assign_cb (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas, - const char *member_id, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - size_t member_cnt, - rd_kafka_assignor_topic_t **eligible_topics, - size_t eligible_topic_cnt, - char *errstr, size_t errstr_size, - void *opaque) { +rd_kafka_range_assignor_assign_cb(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + const char *member_id, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + size_t member_cnt, + rd_kafka_assignor_topic_t **eligible_topics, + size_t eligible_topic_cnt, + char *errstr, + size_t errstr_size, + void *opaque) { unsigned int ti; int i; /* The range assignor works on a per-topic basis. */ - for (ti = 0 ; ti < eligible_topic_cnt ; ti++) { + for (ti = 0; ti < eligible_topic_cnt; ti++) { rd_kafka_assignor_topic_t *eligible_topic = eligible_topics[ti]; int numPartitionsPerConsumer; int consumersWithExtraPartition; @@ -72,50 +74,51 @@ rd_kafka_range_assignor_assign_cb (rd_kafka_t *rk, /* For each topic, we lay out the available partitions in * numeric order and the consumers in lexicographic order. */ rd_list_sort(&eligible_topic->members, - rd_kafka_group_member_cmp); + rd_kafka_group_member_cmp); - /* We then divide the number of partitions by the total number of - * consumers to determine the number of partitions to assign to - * each consumer. */ + /* We then divide the number of partitions by the total number + * of consumers to determine the number of partitions to assign + * to each consumer. */ numPartitionsPerConsumer = - eligible_topic->metadata->partition_cnt / - rd_list_cnt(&eligible_topic->members); + eligible_topic->metadata->partition_cnt / + rd_list_cnt(&eligible_topic->members); /* If it does not evenly divide, then the first few consumers * will have one extra partition. */ - consumersWithExtraPartition = - eligible_topic->metadata->partition_cnt % - rd_list_cnt(&eligible_topic->members); - - rd_kafka_dbg(rk, CGRP, "ASSIGN", - "range: Topic %s with %d partition(s) and " - "%d subscribing member(s)", - eligible_topic->metadata->topic, - eligible_topic->metadata->partition_cnt, - rd_list_cnt(&eligible_topic->members)); - - for (i = 0 ; i < rd_list_cnt(&eligible_topic->members) ; i++) { - rd_kafka_group_member_t *rkgm = - rd_list_elem(&eligible_topic->members, i); - int start = numPartitionsPerConsumer * i + - RD_MIN(i, consumersWithExtraPartition); - int length = numPartitionsPerConsumer + - (i + 1 > consumersWithExtraPartition ? 0 : 1); - - if (length == 0) - continue; - - rd_kafka_dbg(rk, CGRP, "ASSIGN", - "range: Member \"%s\": " - "assigned topic %s partitions %d..%d", - rkgm->rkgm_member_id->str, - eligible_topic->metadata->topic, - start, start+length-1); - rd_kafka_topic_partition_list_add_range( - rkgm->rkgm_assignment, - eligible_topic->metadata->topic, - start, start+length-1); - } + consumersWithExtraPartition = + eligible_topic->metadata->partition_cnt % + rd_list_cnt(&eligible_topic->members); + + rd_kafka_dbg(rk, CGRP, "ASSIGN", + "range: Topic %s with %d partition(s) and " + "%d subscribing member(s)", + eligible_topic->metadata->topic, + eligible_topic->metadata->partition_cnt, + rd_list_cnt(&eligible_topic->members)); + + for (i = 0; i < rd_list_cnt(&eligible_topic->members); i++) { + rd_kafka_group_member_t *rkgm = + rd_list_elem(&eligible_topic->members, i); + int start = numPartitionsPerConsumer * i + + RD_MIN(i, consumersWithExtraPartition); + int length = + numPartitionsPerConsumer + + (i + 1 > consumersWithExtraPartition ? 0 : 1); + + if (length == 0) + continue; + + rd_kafka_dbg(rk, CGRP, "ASSIGN", + "range: Member \"%s\": " + "assigned topic %s partitions %d..%d", + rkgm->rkgm_member_id->str, + eligible_topic->metadata->topic, start, + start + length - 1); + rd_kafka_topic_partition_list_add_range( + rkgm->rkgm_assignment, + eligible_topic->metadata->topic, start, + start + length - 1); + } } return 0; @@ -126,11 +129,10 @@ rd_kafka_range_assignor_assign_cb (rd_kafka_t *rk, /** * @brief Initialzie and add range assignor. */ -rd_kafka_resp_err_t rd_kafka_range_assignor_init (rd_kafka_t *rk) { +rd_kafka_resp_err_t rd_kafka_range_assignor_init(rd_kafka_t *rk) { return rd_kafka_assignor_add( - rk, "consumer", "range", - RD_KAFKA_REBALANCE_PROTOCOL_EAGER, - rd_kafka_range_assignor_assign_cb, - rd_kafka_assignor_get_metadata_with_empty_userdata, - NULL, NULL, NULL, NULL); + rk, "consumer", "range", RD_KAFKA_REBALANCE_PROTOCOL_EAGER, + rd_kafka_range_assignor_assign_cb, + rd_kafka_assignor_get_metadata_with_empty_userdata, NULL, NULL, + NULL, NULL); } diff --git a/src/rdkafka_request.c b/src/rdkafka_request.c index e32952a5f3..b4bc684302 100644 --- a/src/rdkafka_request.c +++ b/src/rdkafka_request.c @@ -55,22 +55,14 @@ /* RD_KAFKA_ERR_ACTION_.. to string map */ static const char *rd_kafka_actions_descs[] = { - "Permanent", - "Ignore", - "Refresh", - "Retry", - "Inform", - "Special", - "MsgNotPersisted", - "MsgPossiblyPersisted", - "MsgPersisted", - NULL, + "Permanent", "Ignore", "Refresh", "Retry", + "Inform", "Special", "MsgNotPersisted", "MsgPossiblyPersisted", + "MsgPersisted", NULL, }; -const char *rd_kafka_actions2str (int actions) { +const char *rd_kafka_actions2str(int actions) { static RD_TLS char actstr[128]; - return rd_flags2str(actstr, sizeof(actstr), - rd_kafka_actions_descs, + return rd_flags2str(actstr, sizeof(actstr), rd_kafka_actions_descs, actions); } @@ -84,42 +76,42 @@ const char *rd_kafka_actions2str (int actions) { * * @warning \p request, \p rkbuf and \p rkb may be NULL. */ -int rd_kafka_err_action (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - const rd_kafka_buf_t *request, ...) { - va_list ap; +int rd_kafka_err_action(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + const rd_kafka_buf_t *request, + ...) { + va_list ap; int actions = 0; - int exp_act; + int exp_act; if (!err) return 0; - /* Match explicitly defined error mappings first. */ - va_start(ap, request); - while ((exp_act = va_arg(ap, int))) { - int exp_err = va_arg(ap, int); + /* Match explicitly defined error mappings first. */ + va_start(ap, request); + while ((exp_act = va_arg(ap, int))) { + int exp_err = va_arg(ap, int); - if (err == exp_err) - actions |= exp_act; - } - va_end(ap); + if (err == exp_err) + actions |= exp_act; + } + va_end(ap); /* Explicit error match. */ if (actions) { if (err && rkb && request) - rd_rkb_dbg(rkb, BROKER, "REQERR", - "%sRequest failed: %s: explicit actions %s", - rd_kafka_ApiKey2str(request->rkbuf_reqhdr. - ApiKey), - rd_kafka_err2str(err), - rd_kafka_actions2str(actions)); + rd_rkb_dbg( + rkb, BROKER, "REQERR", + "%sRequest failed: %s: explicit actions %s", + rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey), + rd_kafka_err2str(err), + rd_kafka_actions2str(actions)); return actions; } /* Default error matching */ - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR_NO_ERROR: break; case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: @@ -130,42 +122,42 @@ int rd_kafka_err_action (rd_kafka_broker_t *rkb, case RD_KAFKA_RESP_ERR_NOT_COORDINATOR: case RD_KAFKA_RESP_ERR__WAIT_COORD: /* Request metadata information update */ - actions |= RD_KAFKA_ERR_ACTION_REFRESH| - RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; + actions |= RD_KAFKA_ERR_ACTION_REFRESH | + RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; break; case RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR: /* Request metadata update and retry */ - actions |= RD_KAFKA_ERR_ACTION_REFRESH| - RD_KAFKA_ERR_ACTION_RETRY| - RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; + actions |= RD_KAFKA_ERR_ACTION_REFRESH | + RD_KAFKA_ERR_ACTION_RETRY | + RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; break; case RD_KAFKA_RESP_ERR__TRANSPORT: case RD_KAFKA_RESP_ERR__TIMED_OUT: case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT: case RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND: - actions |= RD_KAFKA_ERR_ACTION_RETRY| - RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED; + actions |= RD_KAFKA_ERR_ACTION_RETRY | + RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED; break; case RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS: /* Client-side wait-response/in-queue timeout */ case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE: - actions |= RD_KAFKA_ERR_ACTION_RETRY| - RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; + actions |= RD_KAFKA_ERR_ACTION_RETRY | + RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; break; case RD_KAFKA_RESP_ERR__PURGE_INFLIGHT: - actions |= RD_KAFKA_ERR_ACTION_PERMANENT| - RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED; + actions |= RD_KAFKA_ERR_ACTION_PERMANENT | + RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED; break; case RD_KAFKA_RESP_ERR__BAD_MSG: /* Buffer parse failures are typically a client-side bug, * treat them as permanent failures. */ - actions |= RD_KAFKA_ERR_ACTION_PERMANENT| - RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED; + actions |= RD_KAFKA_ERR_ACTION_PERMANENT | + RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED; break; case RD_KAFKA_RESP_ERR__DESTROY: @@ -173,13 +165,14 @@ int rd_kafka_err_action (rd_kafka_broker_t *rkb, case RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE: case RD_KAFKA_RESP_ERR__PURGE_QUEUE: default: - actions |= RD_KAFKA_ERR_ACTION_PERMANENT| - RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; + actions |= RD_KAFKA_ERR_ACTION_PERMANENT | + RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; break; } /* Fatal or permanent errors are not retriable */ - if (actions & (RD_KAFKA_ERR_ACTION_FATAL|RD_KAFKA_ERR_ACTION_PERMANENT)) + if (actions & + (RD_KAFKA_ERR_ACTION_FATAL | RD_KAFKA_ERR_ACTION_PERMANENT)) actions &= ~RD_KAFKA_ERR_ACTION_RETRY; /* If no request buffer was specified, which might be the case @@ -191,11 +184,10 @@ int rd_kafka_err_action (rd_kafka_broker_t *rkb, actions &= ~RD_KAFKA_ERR_ACTION_MSG_FLAGS; if (err && actions && rkb && request) - rd_rkb_dbg(rkb, BROKER, "REQERR", - "%sRequest failed: %s: actions %s", - rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey), - rd_kafka_err2str(err), - rd_kafka_actions2str(actions)); + rd_rkb_dbg( + rkb, BROKER, "REQERR", "%sRequest failed: %s: actions %s", + rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey), + rd_kafka_err2str(err), rd_kafka_actions2str(actions)); return actions; } @@ -211,19 +203,19 @@ int rd_kafka_err_action (rd_kafka_broker_t *rkb, * @returns a newly allocated list on success, or NULL on parse error. */ rd_kafka_topic_partition_list_t * -rd_kafka_buf_read_topic_partitions (rd_kafka_buf_t *rkbuf, - size_t estimated_part_cnt, - rd_bool_t read_offset, - rd_bool_t read_part_errs) { +rd_kafka_buf_read_topic_partitions(rd_kafka_buf_t *rkbuf, + size_t estimated_part_cnt, + rd_bool_t read_offset, + rd_bool_t read_part_errs) { const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; + int16_t ErrorCode = 0; int32_t TopicArrayCnt; rd_kafka_topic_partition_list_t *parts = NULL; rd_kafka_buf_read_arraycnt(rkbuf, &TopicArrayCnt, RD_KAFKAP_TOPICS_MAX); parts = rd_kafka_topic_partition_list_new( - RD_MAX(TopicArrayCnt, (int)estimated_part_cnt)); + RD_MAX(TopicArrayCnt, (int)estimated_part_cnt)); while (TopicArrayCnt-- > 0) { rd_kafkap_str_t kTopic; @@ -243,8 +235,8 @@ rd_kafka_buf_read_topic_partitions (rd_kafka_buf_t *rkbuf, rd_kafka_buf_read_i32(rkbuf, &Partition); - rktpar = rd_kafka_topic_partition_list_add( - parts, topic, Partition); + rktpar = rd_kafka_topic_partition_list_add(parts, topic, + Partition); if (read_offset) { rd_kafka_buf_read_i64(rkbuf, &Offset); @@ -264,7 +256,7 @@ rd_kafka_buf_read_topic_partitions (rd_kafka_buf_t *rkbuf, return parts; - err_parse: +err_parse: if (parts) rd_kafka_topic_partition_list_destroy(parts); @@ -279,22 +271,22 @@ rd_kafka_buf_read_topic_partitions (rd_kafka_buf_t *rkbuf, * * @remark The \p parts list MUST be sorted. */ -int rd_kafka_buf_write_topic_partitions ( - rd_kafka_buf_t *rkbuf, - const rd_kafka_topic_partition_list_t *parts, - rd_bool_t skip_invalid_offsets, - rd_bool_t only_invalid_offsets, - rd_bool_t write_Offset, - rd_bool_t write_Epoch, - rd_bool_t write_Metadata) { +int rd_kafka_buf_write_topic_partitions( + rd_kafka_buf_t *rkbuf, + const rd_kafka_topic_partition_list_t *parts, + rd_bool_t skip_invalid_offsets, + rd_bool_t only_invalid_offsets, + rd_bool_t write_Offset, + rd_bool_t write_Epoch, + rd_bool_t write_Metadata) { size_t of_TopicArrayCnt; size_t of_PartArrayCnt = 0; int TopicArrayCnt = 0, PartArrayCnt = 0; int i; const char *prev_topic = NULL; - int cnt = 0; + int cnt = 0; rd_bool_t partition_id_only = - !write_Offset && !write_Epoch && !write_Metadata; + !write_Offset && !write_Epoch && !write_Metadata; rd_assert(!only_invalid_offsets || (only_invalid_offsets != skip_invalid_offsets)); @@ -302,7 +294,7 @@ int rd_kafka_buf_write_topic_partitions ( /* TopicArrayCnt */ of_TopicArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf); - for (i = 0 ; i < parts->cnt ; i++) { + for (i = 0; i < parts->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = &parts->elems[i]; if (rktpar->offset < 0) { @@ -314,9 +306,8 @@ int rd_kafka_buf_write_topic_partitions ( if (!prev_topic || strcmp(rktpar->topic, prev_topic)) { /* Finish previous topic, if any. */ if (of_PartArrayCnt > 0) { - rd_kafka_buf_finalize_arraycnt(rkbuf, - of_PartArrayCnt, - PartArrayCnt); + rd_kafka_buf_finalize_arraycnt( + rkbuf, of_PartArrayCnt, PartArrayCnt); /* Tags for previous topic struct */ rd_kafka_buf_write_tags(rkbuf); } @@ -331,7 +322,7 @@ int rd_kafka_buf_write_topic_partitions ( /* PartitionArrayCnt: updated later */ of_PartArrayCnt = - rd_kafka_buf_write_arraycnt_pos(rkbuf); + rd_kafka_buf_write_arraycnt_pos(rkbuf); } /* Partition */ @@ -356,8 +347,7 @@ int rd_kafka_buf_write_topic_partitions ( if (!rktpar->metadata) rd_kafka_buf_write_str(rkbuf, "", 0); else - rd_kafka_buf_write_str(rkbuf, - rktpar->metadata, + rd_kafka_buf_write_str(rkbuf, rktpar->metadata, rktpar->metadata_size); } @@ -369,14 +359,13 @@ int rd_kafka_buf_write_topic_partitions ( } if (of_PartArrayCnt > 0) { - rd_kafka_buf_finalize_arraycnt(rkbuf, - of_PartArrayCnt, PartArrayCnt); + rd_kafka_buf_finalize_arraycnt(rkbuf, of_PartArrayCnt, + PartArrayCnt); /* Tags for topic struct */ rd_kafka_buf_write_tags(rkbuf); - } + } - rd_kafka_buf_finalize_arraycnt(rkbuf, - of_TopicArrayCnt, TopicArrayCnt); + rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicArrayCnt, TopicArrayCnt); return cnt; } @@ -389,17 +378,17 @@ int rd_kafka_buf_write_topic_partitions ( * and the transactional.id for RD_KAFKA_COORD_TXN */ rd_kafka_resp_err_t -rd_kafka_FindCoordinatorRequest (rd_kafka_broker_t *rkb, - rd_kafka_coordtype_t coordtype, - const char *coordkey, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_FindCoordinatorRequest(rd_kafka_broker_t *rkb, + rd_kafka_coordtype_t coordtype, + const char *coordkey, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion; ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_FindCoordinator, 0, 2, NULL); + rkb, RD_KAFKAP_FindCoordinator, 0, 2, NULL); if (coordtype != RD_KAFKA_COORD_GROUP && ApiVersion < 1) return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; @@ -421,7 +410,6 @@ rd_kafka_FindCoordinatorRequest (rd_kafka_broker_t *rkb, - /** * @brief Parses a ListOffsets reply. * @@ -433,8 +421,8 @@ rd_kafka_FindCoordinatorRequest (rd_kafka_broker_t *rkb, * partition error codes should be checked by the caller). */ static rd_kafka_resp_err_t -rd_kafka_parse_ListOffsets (rd_kafka_buf_t *rkbuf, - rd_kafka_topic_partition_list_t *offsets) { +rd_kafka_parse_ListOffsets(rd_kafka_buf_t *rkbuf, + rd_kafka_topic_partition_list_t *offsets) { const int log_decode_errors = LOG_ERR; int32_t TopicArrayCnt; int16_t api_version; @@ -485,18 +473,18 @@ rd_kafka_parse_ListOffsets (rd_kafka_buf_t *rkbuf, } rktpar = rd_kafka_topic_partition_list_add( - offsets, topic_name, kpartition); - rktpar->err = ErrorCode; + offsets, topic_name, kpartition); + rktpar->err = ErrorCode; rktpar->offset = Offset; - if (ErrorCode && !all_err) + if (ErrorCode && !all_err) all_err = ErrorCode; } } return all_err; - err_parse: +err_parse: return rkbuf->rkbuf_err; } @@ -513,14 +501,14 @@ rd_kafka_parse_ListOffsets (rd_kafka_buf_t *rkbuf, * On error \p actionsp (unless NULL) is updated with the recommended * error actions. */ -rd_kafka_resp_err_t rd_kafka_handle_ListOffsets (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t - *offsets, - int *actionsp) { +rd_kafka_resp_err_t +rd_kafka_handle_ListOffsets(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t *offsets, + int *actionsp) { int actions; @@ -530,53 +518,46 @@ rd_kafka_resp_err_t rd_kafka_handle_ListOffsets (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR_NO_ERROR; actions = rd_kafka_err_action( - rkb, err, request, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + rkb, err, request, RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, - RD_KAFKA_ERR_ACTION_REFRESH, - RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, + RD_KAFKA_ERR_ACTION_REFRESH, + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, - RD_KAFKA_ERR_ACTION_REFRESH, - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, + RD_KAFKA_ERR_ACTION_REFRESH, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, - RD_KAFKA_ERR_ACTION_REFRESH, - RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, + RD_KAFKA_ERR_ACTION_REFRESH, RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, - RD_KAFKA_ERR_ACTION_REFRESH, - RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE, + RD_KAFKA_ERR_ACTION_REFRESH, RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE, - RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE, + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE, - RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH, + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR__TRANSPORT, + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TRANSPORT, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_END); if (actionsp) *actionsp = actions; if (rkb) - rd_rkb_dbg(rkb, TOPIC, "OFFSET", - "OffsetRequest failed: %s (%s)", - rd_kafka_err2str(err), - rd_kafka_actions2str(actions)); + rd_rkb_dbg( + rkb, TOPIC, "OFFSET", "OffsetRequest failed: %s (%s)", + rd_kafka_err2str(err), rd_kafka_actions2str(actions)); if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { char tmp[256]; /* Re-query for leader */ - rd_snprintf(tmp, sizeof(tmp), - "ListOffsetsRequest failed: %s", + rd_snprintf(tmp, sizeof(tmp), "ListOffsetsRequest failed: %s", rd_kafka_err2str(err)); rd_kafka_metadata_refresh_known_topics(rk, NULL, - rd_true/*force*/, tmp); + rd_true /*force*/, tmp); } if ((actions & RD_KAFKA_ERR_ACTION_RETRY) && @@ -592,20 +573,19 @@ rd_kafka_resp_err_t rd_kafka_handle_ListOffsets (rd_kafka_t *rk, * @brief Async maker for ListOffsetsRequest. */ static rd_kafka_resp_err_t -rd_kafka_make_ListOffsetsRequest (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, - void *make_opaque) { +rd_kafka_make_ListOffsetsRequest(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + void *make_opaque) { const rd_kafka_topic_partition_list_t *partitions = - (const rd_kafka_topic_partition_list_t *)make_opaque; + (const rd_kafka_topic_partition_list_t *)make_opaque; int i; size_t of_TopicArrayCnt = 0, of_PartArrayCnt = 0; const char *last_topic = ""; int32_t topic_cnt = 0, part_cnt = 0; int16_t ApiVersion; - ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, - RD_KAFKAP_ListOffsets, - 0, 2, NULL); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_ListOffsets, 0, 2, NULL); if (ApiVersion == -1) return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; @@ -620,15 +600,14 @@ rd_kafka_make_ListOffsetsRequest (rd_kafka_broker_t *rkb, /* TopicArrayCnt */ of_TopicArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); /* updated later */ - for (i = 0 ; i < partitions->cnt ; i++) { + for (i = 0; i < partitions->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &partitions->elems[i]; + &partitions->elems[i]; if (strcmp(rktpar->topic, last_topic)) { /* Finish last topic, if any. */ if (of_PartArrayCnt > 0) - rd_kafka_buf_update_i32(rkbuf, - of_PartArrayCnt, + rd_kafka_buf_update_i32(rkbuf, of_PartArrayCnt, part_cnt); /* Topic */ @@ -664,9 +643,9 @@ rd_kafka_make_ListOffsetsRequest (rd_kafka_broker_t *rkb, rd_rkb_dbg(rkb, TOPIC, "OFFSET", "ListOffsetsRequest (v%hd, opv %d) " - "for %"PRId32" topic(s) and %"PRId32" partition(s)", - ApiVersion, rkbuf->rkbuf_replyq.version, - topic_cnt, partitions->cnt); + "for %" PRId32 " topic(s) and %" PRId32 " partition(s)", + ApiVersion, rkbuf->rkbuf_replyq.version, topic_cnt, + partitions->cnt); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -675,11 +654,11 @@ rd_kafka_make_ListOffsetsRequest (rd_kafka_broker_t *rkb, /** * @brief Send ListOffsetsRequest for partitions in \p partitions. */ -void rd_kafka_ListOffsetsRequest (rd_kafka_broker_t *rkb, - rd_kafka_topic_partition_list_t *partitions, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_ListOffsetsRequest(rd_kafka_broker_t *rkb, + rd_kafka_topic_partition_list_t *partitions, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; rd_kafka_topic_partition_list_t *make_parts; @@ -687,13 +666,13 @@ void rd_kafka_ListOffsetsRequest (rd_kafka_broker_t *rkb, rd_kafka_topic_partition_list_sort_by_topic(make_parts); rkbuf = rd_kafka_buf_new_request( - rkb, RD_KAFKAP_ListOffsets, 1, - /* ReplicaId+IsolationLevel+TopicArrayCnt+Topic */ - 4+1+4+100+ + rkb, RD_KAFKAP_ListOffsets, 1, + /* ReplicaId+IsolationLevel+TopicArrayCnt+Topic */ + 4 + 1 + 4 + 100 + /* PartArrayCnt */ 4 + /* partition_cnt * Partition+Time+MaxNumOffs */ - (make_parts->cnt * (4+8+4))); + (make_parts->cnt * (4 + 8 + 4))); /* Postpone creating the request contents until time to send, * at which time the ApiVersion is known. */ @@ -717,15 +696,15 @@ void rd_kafka_ListOffsetsRequest (rd_kafka_broker_t *rkb, * in \p *offsets. */ rd_kafka_resp_err_t -rd_kafka_handle_OffsetFetch (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t **offsets, - rd_bool_t update_toppar, - rd_bool_t add_part, - rd_bool_t allow_retry) { +rd_kafka_handle_OffsetFetch(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t **offsets, + rd_bool_t update_toppar, + rd_bool_t add_part, + rd_bool_t allow_retry) { const int log_decode_errors = LOG_ERR; int32_t TopicArrayCnt; int64_t offset = RD_KAFKA_OFFSET_INVALID; @@ -753,7 +732,7 @@ rd_kafka_handle_OffsetFetch (rd_kafka_t *rk, 0 /* !is commit */); rd_kafka_buf_read_arraycnt(rkbuf, &TopicArrayCnt, RD_KAFKAP_TOPICS_MAX); - for (i = 0 ; i < TopicArrayCnt ; i++) { + for (i = 0; i < TopicArrayCnt; i++) { rd_kafkap_str_t topic; int32_t PartArrayCnt; char *topic_name; @@ -766,7 +745,7 @@ rd_kafka_handle_OffsetFetch (rd_kafka_t *rk, RD_KAFKAP_STR_DUPA(&topic_name, &topic); - for (j = 0 ; j < PartArrayCnt ; j++) { + for (j = 0; j < PartArrayCnt; j++) { int32_t partition; rd_kafka_toppar_t *rktp; rd_kafka_topic_partition_t *rktpar; @@ -781,51 +760,51 @@ rd_kafka_handle_OffsetFetch (rd_kafka_t *rk, rd_kafka_buf_read_i16(rkbuf, &err2); rd_kafka_buf_skip_tags(rkbuf); - rktpar = rd_kafka_topic_partition_list_find(*offsets, - topic_name, - partition); + rktpar = rd_kafka_topic_partition_list_find( + *offsets, topic_name, partition); if (!rktpar && add_part) rktpar = rd_kafka_topic_partition_list_add( - *offsets, topic_name, partition); + *offsets, topic_name, partition); else if (!rktpar) { - rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH", - "OffsetFetchResponse: %s [%"PRId32"] " - "not found in local list: ignoring", - topic_name, partition); + rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH", + "OffsetFetchResponse: %s [%" PRId32 + "] " + "not found in local list: ignoring", + topic_name, partition); continue; - } + } seen_cnt++; - if (!(rktp = rktpar->_private)) { - rktp = rd_kafka_toppar_get2(rkb->rkb_rk, - topic_name, - partition, 0, 0); - /* May be NULL if topic is not locally known */ - rktpar->_private = rktp; - } - - /* broker reports invalid offset as -1 */ - if (offset == -1) - rktpar->offset = RD_KAFKA_OFFSET_INVALID; - else - rktpar->offset = offset; + if (!(rktp = rktpar->_private)) { + rktp = rd_kafka_toppar_get2( + rkb->rkb_rk, topic_name, partition, 0, 0); + /* May be NULL if topic is not locally known */ + rktpar->_private = rktp; + } + + /* broker reports invalid offset as -1 */ + if (offset == -1) + rktpar->offset = RD_KAFKA_OFFSET_INVALID; + else + rktpar->offset = offset; rktpar->err = err2; - rd_rkb_dbg( - rkb, TOPIC, "OFFSETFETCH", - "OffsetFetchResponse: %s [%"PRId32"] " - "offset %"PRId64", metadata %d byte(s): %s", - topic_name, partition, offset, - RD_KAFKAP_STR_LEN(&metadata), - rd_kafka_err2name(rktpar->err)); - - if (update_toppar && !err2 && rktp) { - /* Update toppar's committed offset */ - rd_kafka_toppar_lock(rktp); - rktp->rktp_committed_offset = rktpar->offset; - rd_kafka_toppar_unlock(rktp); - } + rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH", + "OffsetFetchResponse: %s [%" PRId32 + "] " + "offset %" PRId64 + ", metadata %d byte(s): %s", + topic_name, partition, offset, + RD_KAFKAP_STR_LEN(&metadata), + rd_kafka_err2name(rktpar->err)); + + if (update_toppar && !err2 && rktp) { + /* Update toppar's committed offset */ + rd_kafka_toppar_lock(rktp); + rktp->rktp_committed_offset = rktpar->offset; + rd_kafka_toppar_unlock(rktp); + } if (rktpar->err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) @@ -836,12 +815,12 @@ rd_kafka_handle_OffsetFetch (rd_kafka_t *rk, rd_free(rktpar->metadata); if (RD_KAFKAP_STR_IS_NULL(&metadata)) { - rktpar->metadata = NULL; + rktpar->metadata = NULL; rktpar->metadata_size = 0; } else { rktpar->metadata = RD_KAFKAP_STR_DUP(&metadata); rktpar->metadata_size = - RD_KAFKAP_STR_LEN(&metadata); + RD_KAFKAP_STR_LEN(&metadata); } } @@ -858,26 +837,24 @@ rd_kafka_handle_OffsetFetch (rd_kafka_t *rk, } - err: +err: if (!*offsets) - rd_rkb_dbg(rkb, TOPIC, "OFFFETCH", - "OffsetFetch returned %s", rd_kafka_err2str(err)); + rd_rkb_dbg(rkb, TOPIC, "OFFFETCH", "OffsetFetch returned %s", + rd_kafka_err2str(err)); else rd_rkb_dbg(rkb, TOPIC, "OFFFETCH", "OffsetFetch for %d/%d partition(s) " "(%d unstable partition(s)) returned %s", - seen_cnt, - (*offsets)->cnt, - retry_unstable, rd_kafka_err2str(err)); + seen_cnt, (*offsets)->cnt, retry_unstable, + rd_kafka_err2str(err)); - actions = rd_kafka_err_action(rkb, err, request, - RD_KAFKA_ERR_ACTION_END); + actions = + rd_kafka_err_action(rkb, err, request, RD_KAFKA_ERR_ACTION_END); if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { /* Re-query for coordinator */ - rd_kafka_cgrp_op(rkb->rkb_rk->rk_cgrp, NULL, - RD_KAFKA_NO_REPLYQ, - RD_KAFKA_OP_COORD_QUERY, err); + rd_kafka_cgrp_op(rkb->rkb_rk->rk_cgrp, NULL, RD_KAFKA_NO_REPLYQ, + RD_KAFKA_OP_COORD_QUERY, err); } if (actions & RD_KAFKA_ERR_ACTION_RETRY || retry_unstable) { @@ -886,9 +863,9 @@ rd_kafka_handle_OffsetFetch (rd_kafka_t *rk, /* FALLTHRU */ } - return err; + return err; - err_parse: +err_parse: err = rkbuf->rkbuf_err; goto err; } @@ -912,17 +889,17 @@ rd_kafka_handle_OffsetFetch (rd_kafka_t *rk, * * @locality cgrp's broker thread */ -void rd_kafka_op_handle_OffsetFetch (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +void rd_kafka_op_handle_OffsetFetch(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { rd_kafka_op_t *rko = opaque; rd_kafka_op_t *rko_reply; rd_kafka_topic_partition_list_t *offsets; - RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_OFFSET_FETCH); + RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_OFFSET_FETCH); if (err == RD_KAFKA_RESP_ERR__DESTROY) { /* Termination, quick cleanup. */ @@ -931,21 +908,19 @@ void rd_kafka_op_handle_OffsetFetch (rd_kafka_t *rk, } offsets = rd_kafka_topic_partition_list_copy( - rko->rko_u.offset_fetch.partitions); + rko->rko_u.offset_fetch.partitions); /* If all partitions already had usable offsets then there * was no request sent and thus no reply, the offsets list is * good to go.. */ if (rkbuf) { /* ..else parse the response (or perror) */ - err = rd_kafka_handle_OffsetFetch(rkb->rkb_rk, rkb, err, rkbuf, - request, &offsets, - rd_false/*dont update rktp*/, - rd_false/*dont add part*/, - /* Allow retries if replyq - * is valid */ - rd_kafka_op_replyq_is_valid( - rko)); + err = rd_kafka_handle_OffsetFetch( + rkb->rkb_rk, rkb, err, rkbuf, request, &offsets, + rd_false /*dont update rktp*/, rd_false /*dont add part*/, + /* Allow retries if replyq + * is valid */ + rd_kafka_op_replyq_is_valid(rko)); if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) { if (offsets) rd_kafka_topic_partition_list_destroy(offsets); @@ -953,23 +928,21 @@ void rd_kafka_op_handle_OffsetFetch (rd_kafka_t *rk, } } - rko_reply = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH|RD_KAFKA_OP_REPLY); - rko_reply->rko_err = err; + rko_reply = + rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH | RD_KAFKA_OP_REPLY); + rko_reply->rko_err = err; rko_reply->rko_u.offset_fetch.partitions = offsets; - rko_reply->rko_u.offset_fetch.do_free = 1; - if (rko->rko_rktp) - rko_reply->rko_rktp = rd_kafka_toppar_keep(rko->rko_rktp); + rko_reply->rko_u.offset_fetch.do_free = 1; + if (rko->rko_rktp) + rko_reply->rko_rktp = rd_kafka_toppar_keep(rko->rko_rktp); - rd_kafka_replyq_enq(&rko->rko_replyq, rko_reply, 0); + rd_kafka_replyq_enq(&rko->rko_replyq, rko_reply, 0); rd_kafka_op_destroy(rko); } - - - /** * Send OffsetFetchRequest for toppar. * @@ -980,28 +953,24 @@ void rd_kafka_op_handle_OffsetFetch (rd_kafka_t *rk, * @param require_stable Whether broker should return unstable offsets * (not yet transaction-committed). */ -void rd_kafka_OffsetFetchRequest (rd_kafka_broker_t *rkb, - rd_kafka_topic_partition_list_t *parts, - rd_bool_t require_stable, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb, + rd_kafka_topic_partition_list_t *parts, + rd_bool_t require_stable, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion; int PartCnt = 0; ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, - RD_KAFKAP_OffsetFetch, - 0, 7, NULL); + rkb, RD_KAFKAP_OffsetFetch, 0, 7, NULL); rkbuf = rd_kafka_buf_new_flexver_request( - rkb, RD_KAFKAP_OffsetFetch, 1, - RD_KAFKAP_STR_SIZE(rkb->rkb_rk->rk_group_id) + - 4 + - (parts->cnt * 32) + - 1, - ApiVersion >= 6 /*flexver*/); + rkb, RD_KAFKAP_OffsetFetch, 1, + RD_KAFKAP_STR_SIZE(rkb->rkb_rk->rk_group_id) + 4 + + (parts->cnt * 32) + 1, + ApiVersion >= 6 /*flexver*/); /* ConsumerGroup */ rd_kafka_buf_write_kstr(rkbuf, rkb->rkb_rk->rk_group_id); @@ -1011,12 +980,9 @@ void rd_kafka_OffsetFetchRequest (rd_kafka_broker_t *rkb, /* Write partition list, filtering out partitions with valid offsets */ PartCnt = rd_kafka_buf_write_topic_partitions( - rkbuf, parts, - rd_false/*include invalid offsets*/, - rd_false/*skip valid offsets */, - rd_false/*don't write offsets*/, - rd_false/*don't write epoch */, - rd_false/*don't write metadata*/); + rkbuf, parts, rd_false /*include invalid offsets*/, + rd_false /*skip valid offsets */, rd_false /*don't write offsets*/, + rd_false /*don't write epoch */, rd_false /*don't write metadata*/); if (ApiVersion >= 7) { /* RequireStable */ @@ -1026,8 +992,8 @@ void rd_kafka_OffsetFetchRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); rd_rkb_dbg(rkb, TOPIC, "OFFSET", - "OffsetFetchRequest(v%d) for %d/%d partition(s)", - ApiVersion, PartCnt, parts->cnt); + "OffsetFetchRequest(v%d) for %d/%d partition(s)", ApiVersion, + PartCnt, parts->cnt); if (PartCnt == 0) { /* No partitions needs OffsetFetch, enqueue empty @@ -1042,9 +1008,9 @@ void rd_kafka_OffsetFetchRequest (rd_kafka_broker_t *rkb, /* Let handler decide if retries should be performed */ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES; - rd_rkb_dbg(rkb, CGRP|RD_KAFKA_DBG_CONSUMER, "OFFSET", - "Fetch committed offsets for %d/%d partition(s)", - PartCnt, parts->cnt); + rd_rkb_dbg(rkb, CGRP | RD_KAFKA_DBG_CONSUMER, "OFFSET", + "Fetch committed offsets for %d/%d partition(s)", PartCnt, + parts->cnt); rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); } @@ -1054,69 +1020,67 @@ void rd_kafka_OffsetFetchRequest (rd_kafka_broker_t *rkb, /** * @brief Handle per-partition OffsetCommit errors and returns actions flags. */ -static int rd_kafka_handle_OffsetCommit_error ( - rd_kafka_broker_t *rkb, rd_kafka_buf_t *request, - const rd_kafka_topic_partition_t *rktpar) { +static int +rd_kafka_handle_OffsetCommit_error(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *request, + const rd_kafka_topic_partition_t *rktpar) { /* These actions are mimicking AK's ConsumerCoordinator.java */ return rd_kafka_err_action( - rkb, rktpar->err, request, + rkb, rktpar->err, request, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE, + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE, + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, + RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, - /* .._SPECIAL: mark coordinator dead, refresh and retry */ - RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_RETRY| + /* .._SPECIAL: mark coordinator dead, refresh and retry */ + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_SPECIAL, - RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, - RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_RETRY| + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_SPECIAL, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - /* Replicas possibly unavailable: - * Refresh coordinator (but don't mark as dead (!.._SPECIAL)), - * and retry */ - RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, + /* Replicas possibly unavailable: + * Refresh coordinator (but don't mark as dead (!.._SPECIAL)), + * and retry */ + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, - /* FIXME: There are some cases in the Java code where - * this is not treated as a fatal error. */ - RD_KAFKA_ERR_ACTION_PERMANENT|RD_KAFKA_ERR_ACTION_FATAL, - RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID, + /* FIXME: There are some cases in the Java code where + * this is not treated as a fatal error. */ + RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_FATAL, + RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, + RD_KAFKA_ERR_ACTION_PERMANENT, RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_ERR_ACTION_PERMANENT, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_END); } @@ -1131,15 +1095,15 @@ static int rd_kafka_handle_OffsetCommit_error ( * or any other error code if the request was not retried. */ rd_kafka_resp_err_t -rd_kafka_handle_OffsetCommit (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t *offsets) { +rd_kafka_handle_OffsetCommit(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t *offsets) { const int log_decode_errors = LOG_ERR; int32_t TopicArrayCnt; - int errcnt = 0; + int errcnt = 0; int partcnt = 0; int i; int actions = 0; @@ -1151,7 +1115,7 @@ rd_kafka_handle_OffsetCommit (rd_kafka_t *rk, rd_kafka_buf_read_throttle_time(rkbuf); rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt); - for (i = 0 ; i < TopicArrayCnt ; i++) { + for (i = 0; i < TopicArrayCnt; i++) { rd_kafkap_str_t topic; char *topic_str; int32_t PartArrayCnt; @@ -1162,7 +1126,7 @@ rd_kafka_handle_OffsetCommit (rd_kafka_t *rk, RD_KAFKAP_STR_DUPA(&topic_str, &topic); - for (j = 0 ; j < PartArrayCnt ; j++) { + for (j = 0; j < PartArrayCnt; j++) { int32_t partition; int16_t ErrorCode; rd_kafka_topic_partition_t *rktpar; @@ -1171,7 +1135,7 @@ rd_kafka_handle_OffsetCommit (rd_kafka_t *rk, rd_kafka_buf_read_i16(rkbuf, &ErrorCode); rktpar = rd_kafka_topic_partition_list_find( - offsets, topic_str, partition); + offsets, topic_str, partition); if (!rktpar) { /* Received offset for topic/partition we didn't @@ -1187,7 +1151,7 @@ rd_kafka_handle_OffsetCommit (rd_kafka_t *rk, /* Accumulate actions for per-partition * errors. */ actions |= rd_kafka_handle_OffsetCommit_error( - rkb, request, rktpar); + rkb, request, rktpar); } partcnt++; @@ -1201,23 +1165,22 @@ rd_kafka_handle_OffsetCommit (rd_kafka_t *rk, goto done; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: +err: if (!actions) /* Transport/Request-level error */ - actions = rd_kafka_err_action( - rkb, err, request, + actions = rd_kafka_err_action(rkb, err, request, - RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_SPECIAL| - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR__TRANSPORT, + RD_KAFKA_ERR_ACTION_REFRESH | + RD_KAFKA_ERR_ACTION_SPECIAL | + RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR__TRANSPORT, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_END); if (actions & RD_KAFKA_ERR_ACTION_FATAL) { - rd_kafka_set_fatal_error(rk, err, - "OffsetCommit failed: %s", + rd_kafka_set_fatal_error(rk, err, "OffsetCommit failed: %s", rd_kafka_err2str(err)); return err; } @@ -1238,45 +1201,43 @@ rd_kafka_handle_OffsetCommit (rd_kafka_t *rk, rd_kafka_buf_retry(rkb, request)) return RD_KAFKA_RESP_ERR__IN_PROGRESS; - done: +done: return err; } - /** * @brief Send OffsetCommitRequest for a list of partitions. * * @returns 0 if none of the partitions in \p offsets had valid offsets, * else 1. */ -int rd_kafka_OffsetCommitRequest (rd_kafka_broker_t *rkb, - rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t *offsets, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque, const char *reason) { - rd_kafka_buf_t *rkbuf; - ssize_t of_TopicCnt = -1; - int TopicCnt = 0; +int rd_kafka_OffsetCommitRequest(rd_kafka_broker_t *rkb, + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *offsets, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque, + const char *reason) { + rd_kafka_buf_t *rkbuf; + ssize_t of_TopicCnt = -1; + int TopicCnt = 0; const char *last_topic = NULL; - ssize_t of_PartCnt = -1; - int PartCnt = 0; - int tot_PartCnt = 0; + ssize_t of_PartCnt = -1; + int PartCnt = 0; + int tot_PartCnt = 0; int i; int16_t ApiVersion; int features; - ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, - RD_KAFKAP_OffsetCommit, - 0, 7, - &features); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_OffsetCommit, 0, 7, &features); rd_kafka_assert(NULL, offsets != NULL); - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_OffsetCommit, - 1, 100 + (offsets->cnt * 128)); + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_OffsetCommit, 1, + 100 + (offsets->cnt * 128)); /* ConsumerGroup */ rd_kafka_buf_write_kstr(rkbuf, rkcg->rkcg_group_id); @@ -1291,11 +1252,11 @@ int rd_kafka_OffsetCommitRequest (rd_kafka_broker_t *rkb, /* v7: GroupInstanceId */ if (ApiVersion >= 7) - rd_kafka_buf_write_kstr(rkbuf, rkcg->rkcg_group_instance_id); + rd_kafka_buf_write_kstr(rkbuf, rkcg->rkcg_group_instance_id); /* v2-4: RetentionTime */ if (ApiVersion >= 2 && ApiVersion <= 4) - rd_kafka_buf_write_i64(rkbuf, -1); + rd_kafka_buf_write_i64(rkbuf, -1); /* Sort offsets by topic */ rd_kafka_topic_partition_list_sort_by_topic(offsets); @@ -1303,12 +1264,12 @@ int rd_kafka_OffsetCommitRequest (rd_kafka_broker_t *rkb, /* TopicArrayCnt: Will be updated when we know the number of topics. */ of_TopicCnt = rd_kafka_buf_write_i32(rkbuf, 0); - for (i = 0 ; i < offsets->cnt ; i++) { + for (i = 0; i < offsets->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &offsets->elems[i]; - /* Skip partitions with invalid offset. */ - if (rktpar->offset < 0) - continue; + /* Skip partitions with invalid offset. */ + if (rktpar->offset < 0) + continue; if (last_topic == NULL || strcmp(last_topic, rktpar->topic)) { /* New topic */ @@ -1322,15 +1283,15 @@ int rd_kafka_OffsetCommitRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1); /* PartitionCnt, finalized later */ of_PartCnt = rd_kafka_buf_write_i32(rkbuf, 0); - PartCnt = 0; - last_topic = rktpar->topic; + PartCnt = 0; + last_topic = rktpar->topic; TopicCnt++; } /* Partition */ - rd_kafka_buf_write_i32(rkbuf, rktpar->partition); + rd_kafka_buf_write_i32(rkbuf, rktpar->partition); PartCnt++; - tot_PartCnt++; + tot_PartCnt++; /* Offset */ rd_kafka_buf_write_i64(rkbuf, rktpar->offset); @@ -1344,27 +1305,26 @@ int rd_kafka_OffsetCommitRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_write_i64(rkbuf, -1); /* Metadata */ - /* Java client 0.9.0 and broker <0.10.0 can't parse - * Null metadata fields, so as a workaround we send an - * empty string if it's Null. */ - if (!rktpar->metadata) - rd_kafka_buf_write_str(rkbuf, "", 0); - else - rd_kafka_buf_write_str(rkbuf, - rktpar->metadata, - rktpar->metadata_size); + /* Java client 0.9.0 and broker <0.10.0 can't parse + * Null metadata fields, so as a workaround we send an + * empty string if it's Null. */ + if (!rktpar->metadata) + rd_kafka_buf_write_str(rkbuf, "", 0); + else + rd_kafka_buf_write_str(rkbuf, rktpar->metadata, + rktpar->metadata_size); } - if (tot_PartCnt == 0) { - /* No topic+partitions had valid offsets to commit. */ - rd_kafka_replyq_destroy(&replyq); - rd_kafka_buf_destroy(rkbuf); - return 0; - } + if (tot_PartCnt == 0) { + /* No topic+partitions had valid offsets to commit. */ + rd_kafka_replyq_destroy(&replyq); + rd_kafka_buf_destroy(rkbuf); + return 0; + } /* Finalize previous PartitionCnt */ if (PartCnt > 0) - rd_kafka_buf_update_u32(rkbuf, of_PartCnt, PartCnt); + rd_kafka_buf_update_u32(rkbuf, of_PartCnt, PartCnt); /* Finalize TopicCnt */ rd_kafka_buf_update_u32(rkbuf, of_TopicCnt, TopicCnt); @@ -1375,10 +1335,9 @@ int rd_kafka_OffsetCommitRequest (rd_kafka_broker_t *rkb, "Enqueue OffsetCommitRequest(v%d, %d/%d partition(s))): %s", ApiVersion, tot_PartCnt, offsets->cnt, reason); - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); - - return 1; + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + return 1; } @@ -1397,24 +1356,25 @@ int rd_kafka_OffsetCommitRequest (rd_kafka_broker_t *rkb, * updated with a human readable error string. */ rd_kafka_resp_err_t -rd_kafka_OffsetDeleteRequest (rd_kafka_broker_t *rkb, - /** (rd_kafka_DeleteConsumerGroupOffsets_t*) */ - const rd_list_t *del_grpoffsets, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_OffsetDeleteRequest(rd_kafka_broker_t *rkb, + /** (rd_kafka_DeleteConsumerGroupOffsets_t*) */ + const rd_list_t *del_grpoffsets, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; int features; const rd_kafka_DeleteConsumerGroupOffsets_t *grpoffsets = - rd_list_elem(del_grpoffsets, 0); + rd_list_elem(del_grpoffsets, 0); rd_assert(rd_list_cnt(del_grpoffsets) == 1); ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_OffsetDelete, 0, 0, &features); + rkb, RD_KAFKAP_OffsetDelete, 0, 0, &features); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "OffsetDelete API (KIP-496) not supported " @@ -1423,21 +1383,18 @@ rd_kafka_OffsetDeleteRequest (rd_kafka_broker_t *rkb, return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; } - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_OffsetDelete, 1, - 2 + strlen(grpoffsets->group) + - (64 * grpoffsets->partitions->cnt)); + rkbuf = rd_kafka_buf_new_request( + rkb, RD_KAFKAP_OffsetDelete, 1, + 2 + strlen(grpoffsets->group) + (64 * grpoffsets->partitions->cnt)); /* GroupId */ rd_kafka_buf_write_str(rkbuf, grpoffsets->group, -1); rd_kafka_buf_write_topic_partitions( - rkbuf, - grpoffsets->partitions, - rd_false/*dont skip invalid offsets*/, - rd_false/*any offset*/, - rd_false/*dont write offsets*/, - rd_false/*dont write epoch*/, - rd_false/*dont write metadata*/); + rkbuf, grpoffsets->partitions, + rd_false /*dont skip invalid offsets*/, rd_false /*any offset*/, + rd_false /*dont write offsets*/, rd_false /*dont write epoch*/, + rd_false /*dont write metadata*/); rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); @@ -1452,9 +1409,9 @@ rd_kafka_OffsetDeleteRequest (rd_kafka_broker_t *rkb, * @brief Write "consumer" protocol type MemberState for SyncGroupRequest to * enveloping buffer \p rkbuf. */ -static void rd_kafka_group_MemberState_consumer_write ( - rd_kafka_buf_t *env_rkbuf, - const rd_kafka_group_member_t *rkgm) { +static void +rd_kafka_group_MemberState_consumer_write(rd_kafka_buf_t *env_rkbuf, + const rd_kafka_group_member_t *rkgm) { rd_kafka_buf_t *rkbuf; rd_slice_t slice; @@ -1462,13 +1419,10 @@ static void rd_kafka_group_MemberState_consumer_write ( rd_kafka_buf_write_i16(rkbuf, 0); /* Version */ rd_assert(rkgm->rkgm_assignment); rd_kafka_buf_write_topic_partitions( - rkbuf, - rkgm->rkgm_assignment, - rd_false /*don't skip invalid offsets*/, - rd_false /* any offset */, - rd_false /*don't write offsets*/, - rd_false /*don't write epoch*/, - rd_false /*don't write metadata*/); + rkbuf, rkgm->rkgm_assignment, + rd_false /*don't skip invalid offsets*/, rd_false /* any offset */, + rd_false /*don't write offsets*/, rd_false /*don't write epoch*/, + rd_false /*don't write metadata*/); rd_kafka_buf_write_kbytes(rkbuf, rkgm->rkgm_userdata); /* Get pointer to binary buffer */ @@ -1484,36 +1438,31 @@ static void rd_kafka_group_MemberState_consumer_write ( /** * Send SyncGroupRequest */ -void rd_kafka_SyncGroupRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - int32_t generation_id, - const rd_kafkap_str_t *member_id, - const rd_kafkap_str_t *group_instance_id, - const rd_kafka_group_member_t - *assignments, - int assignment_cnt, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_SyncGroupRequest(rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + int32_t generation_id, + const rd_kafkap_str_t *member_id, + const rd_kafkap_str_t *group_instance_id, + const rd_kafka_group_member_t *assignments, + int assignment_cnt, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int i; int16_t ApiVersion; int features; - ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, - RD_KAFKAP_SyncGroup, - 0, 3, - &features); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_SyncGroup, 0, 3, &features); - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SyncGroup, - 1, - RD_KAFKAP_STR_SIZE(group_id) + - 4 /* GenerationId */ + - RD_KAFKAP_STR_SIZE(member_id) + - RD_KAFKAP_STR_SIZE( - group_instance_id) + - 4 /* array size group_assignment */ + - (assignment_cnt * 100/*guess*/)); + rkbuf = rd_kafka_buf_new_request( + rkb, RD_KAFKAP_SyncGroup, 1, + RD_KAFKAP_STR_SIZE(group_id) + 4 /* GenerationId */ + + RD_KAFKAP_STR_SIZE(member_id) + + RD_KAFKAP_STR_SIZE(group_instance_id) + + 4 /* array size group_assignment */ + + (assignment_cnt * 100 /*guess*/)); rd_kafka_buf_write_kstr(rkbuf, group_id); rd_kafka_buf_write_i32(rkbuf, generation_id); rd_kafka_buf_write_kstr(rkbuf, member_id); @@ -1521,7 +1470,7 @@ void rd_kafka_SyncGroupRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_write_kstr(rkbuf, group_instance_id); rd_kafka_buf_write_i32(rkbuf, assignment_cnt); - for (i = 0 ; i < assignment_cnt ; i++) { + for (i = 0; i < assignment_cnt; i++) { const rd_kafka_group_member_t *rkgm = &assignments[i]; rd_kafka_buf_write_kstr(rkbuf, rkgm->rkgm_member_id); @@ -1531,10 +1480,10 @@ void rd_kafka_SyncGroupRequest (rd_kafka_broker_t *rkb, /* This is a blocking request */ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING; rd_kafka_buf_set_abs_timeout( - rkbuf, - rkb->rkb_rk->rk_conf.group_session_timeout_ms + - 3000/* 3s grace period*/, - 0); + rkbuf, + rkb->rkb_rk->rk_conf.group_session_timeout_ms + + 3000 /* 3s grace period*/, + 0); rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); @@ -1546,15 +1495,15 @@ void rd_kafka_SyncGroupRequest (rd_kafka_broker_t *rkb, /** * Send JoinGroupRequest */ -void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - const rd_kafkap_str_t *member_id, - const rd_kafkap_str_t *group_instance_id, - const rd_kafkap_str_t *protocol_type, - const rd_list_t *topics, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_JoinGroupRequest(rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + const rd_kafkap_str_t *member_id, + const rd_kafkap_str_t *group_instance_id, + const rd_kafkap_str_t *protocol_type, + const rd_list_t *topics, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_assignor_t *rkas; @@ -1562,42 +1511,36 @@ void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb, int16_t ApiVersion = 0; int features; - ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, - RD_KAFKAP_JoinGroup, - 0, 5, - &features); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_JoinGroup, 0, 5, &features); - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_JoinGroup, - 1, - RD_KAFKAP_STR_SIZE(group_id) + - 4 /* sessionTimeoutMs */ + - 4 /* rebalanceTimeoutMs */ + - RD_KAFKAP_STR_SIZE(member_id) + - RD_KAFKAP_STR_SIZE( - group_instance_id) + - RD_KAFKAP_STR_SIZE(protocol_type) + - 4 /* array count GroupProtocols */ + - (rd_list_cnt(topics) * 100)); + rkbuf = rd_kafka_buf_new_request( + rkb, RD_KAFKAP_JoinGroup, 1, + RD_KAFKAP_STR_SIZE(group_id) + 4 /* sessionTimeoutMs */ + + 4 /* rebalanceTimeoutMs */ + RD_KAFKAP_STR_SIZE(member_id) + + RD_KAFKAP_STR_SIZE(group_instance_id) + + RD_KAFKAP_STR_SIZE(protocol_type) + + 4 /* array count GroupProtocols */ + + (rd_list_cnt(topics) * 100)); rd_kafka_buf_write_kstr(rkbuf, group_id); rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.group_session_timeout_ms); if (ApiVersion >= 1) rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.max_poll_interval_ms); rd_kafka_buf_write_kstr(rkbuf, member_id); if (ApiVersion >= 5) - rd_kafka_buf_write_kstr(rkbuf, - group_instance_id); + rd_kafka_buf_write_kstr(rkbuf, group_instance_id); rd_kafka_buf_write_kstr(rkbuf, protocol_type); rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.enabled_assignor_cnt); RD_LIST_FOREACH(rkas, &rk->rk_conf.partition_assignors, i) { rd_kafkap_bytes_t *member_metadata; - if (!rkas->rkas_enabled) - continue; + if (!rkas->rkas_enabled) + continue; rd_kafka_buf_write_kstr(rkbuf, rkas->rkas_protocol_name); member_metadata = rkas->rkas_get_metadata_cb( - rkas, rk->rk_cgrp->rkcg_assignor_state, topics, - rk->rk_cgrp->rkcg_group_assignment); + rkas, rk->rk_cgrp->rkcg_assignor_state, topics, + rk->rk_cgrp->rkcg_group_assignment); rd_kafka_buf_write_kbytes(rkbuf, member_metadata); rd_kafkap_bytes_destroy(member_metadata); } @@ -1606,7 +1549,7 @@ void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb, if (ApiVersion < 1 && rk->rk_conf.max_poll_interval_ms > - rk->rk_conf.group_session_timeout_ms && + rk->rk_conf.group_session_timeout_ms && rd_interval(&rkb->rkb_suppress.unsupported_kip62, /* at most once per day */ (rd_ts_t)86400 * 1000 * 1000, 0) > 0) @@ -1622,8 +1565,7 @@ void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb, rk->rk_conf.group_session_timeout_ms); - if (ApiVersion < 5 && - rk->rk_conf.group_instance_id && + if (ApiVersion < 5 && rk->rk_conf.group_instance_id && rd_interval(&rkb->rkb_suppress.unsupported_kip345, /* at most once per day */ (rd_ts_t)86400 * 1000 * 1000, 0) > 0) @@ -1637,15 +1579,14 @@ void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb, /* Absolute timeout */ rd_kafka_buf_set_abs_timeout_force( - rkbuf, - /* Request timeout is max.poll.interval.ms + grace - * if the broker supports it, else - * session.timeout.ms + grace. */ - (ApiVersion >= 1 ? - rk->rk_conf.max_poll_interval_ms : - rk->rk_conf.group_session_timeout_ms) + - 3000/* 3s grace period*/, - 0); + rkbuf, + /* Request timeout is max.poll.interval.ms + grace + * if the broker supports it, else + * session.timeout.ms + grace. */ + (ApiVersion >= 1 ? rk->rk_conf.max_poll_interval_ms + : rk->rk_conf.group_session_timeout_ms) + + 3000 /* 3s grace period*/, + 0); /* This is a blocking request */ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING; @@ -1655,29 +1596,23 @@ void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb, - - - /** * Send LeaveGroupRequest */ -void rd_kafka_LeaveGroupRequest (rd_kafka_broker_t *rkb, - const char *group_id, - const char *member_id, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_LeaveGroupRequest(rd_kafka_broker_t *rkb, + const char *group_id, + const char *member_id, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; int features; - ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, - RD_KAFKAP_LeaveGroup, - 0, 1, - &features); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_LeaveGroup, 0, 1, &features); - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_LeaveGroup, - 1, 300); + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_LeaveGroup, 1, 300); rd_kafka_buf_write_str(rkbuf, group_id, -1); rd_kafka_buf_write_str(rkbuf, member_id, -1); @@ -1699,15 +1634,15 @@ void rd_kafka_LeaveGroupRequest (rd_kafka_broker_t *rkb, * Handler for LeaveGroup responses * opaque must be the cgrp handle. */ -void rd_kafka_handle_LeaveGroup (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_cgrp_t *rkcg = opaque; +void rd_kafka_handle_LeaveGroup(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = opaque; const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; + int16_t ErrorCode = 0; int actions; if (err) { @@ -1719,12 +1654,12 @@ void rd_kafka_handle_LeaveGroup (rd_kafka_t *rk, err: actions = rd_kafka_err_action(rkb, ErrorCode, request, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_END); if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { /* Re-query for coordinator */ rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ, - RD_KAFKA_OP_COORD_QUERY, ErrorCode); + RD_KAFKA_OP_COORD_QUERY, ErrorCode); } if (actions & RD_KAFKA_ERR_ACTION_RETRY) { @@ -1740,45 +1675,39 @@ void rd_kafka_handle_LeaveGroup (rd_kafka_t *rk, return; - err_parse: +err_parse: ErrorCode = rkbuf->rkbuf_err; goto err; } - - - /** * Send HeartbeatRequest */ -void rd_kafka_HeartbeatRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - int32_t generation_id, - const rd_kafkap_str_t *member_id, - const rd_kafkap_str_t *group_instance_id, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_HeartbeatRequest(rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + int32_t generation_id, + const rd_kafkap_str_t *member_id, + const rd_kafkap_str_t *group_instance_id, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; int features; - ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, - RD_KAFKAP_Heartbeat, - 0, 3, - &features); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_Heartbeat, 0, 3, &features); rd_rkb_dbg(rkb, CGRP, "HEARTBEAT", - "Heartbeat for group \"%s\" generation id %"PRId32, + "Heartbeat for group \"%s\" generation id %" PRId32, group_id->str, generation_id); - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_Heartbeat, - 1, + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_Heartbeat, 1, RD_KAFKAP_STR_SIZE(group_id) + - 4 /* GenerationId */ + - RD_KAFKAP_STR_SIZE(member_id)); + 4 /* GenerationId */ + + RD_KAFKAP_STR_SIZE(member_id)); rd_kafka_buf_write_kstr(rkbuf, group_id); rd_kafka_buf_write_i32(rkbuf, generation_id); @@ -1789,23 +1718,20 @@ void rd_kafka_HeartbeatRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); rd_kafka_buf_set_abs_timeout( - rkbuf, - rkb->rkb_rk->rk_conf.group_session_timeout_ms, - 0); + rkbuf, rkb->rkb_rk->rk_conf.group_session_timeout_ms, 0); rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); } - /** * Send ListGroupsRequest */ -void rd_kafka_ListGroupsRequest (rd_kafka_broker_t *rkb, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_ListGroupsRequest(rd_kafka_broker_t *rkb, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_ListGroups, 0, 0); @@ -1817,15 +1743,16 @@ void rd_kafka_ListGroupsRequest (rd_kafka_broker_t *rkb, /** * Send DescribeGroupsRequest */ -void rd_kafka_DescribeGroupsRequest (rd_kafka_broker_t *rkb, - const char **groups, int group_cnt, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_DescribeGroupsRequest(rd_kafka_broker_t *rkb, + const char **groups, + int group_cnt, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DescribeGroups, - 1, 32*group_cnt); + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DescribeGroups, 1, + 32 * group_cnt); rd_kafka_buf_write_i32(rkbuf, group_cnt); while (group_cnt-- > 0) @@ -1836,25 +1763,24 @@ void rd_kafka_DescribeGroupsRequest (rd_kafka_broker_t *rkb, - /** * @brief Generic handler for Metadata responses * * @locality rdkafka main thread */ -static void rd_kafka_handle_Metadata (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_op_t *rko = opaque; /* Possibly NULL */ +static void rd_kafka_handle_Metadata(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_op_t *rko = opaque; /* Possibly NULL */ struct rd_kafka_metadata *md = NULL; - const rd_list_t *topics = request->rkbuf_u.Metadata.topics; + const rd_list_t *topics = request->rkbuf_u.Metadata.topics; int actions; rd_kafka_assert(NULL, err == RD_KAFKA_RESP_ERR__DESTROY || - thrd_is_current(rk->rk_thread)); + thrd_is_current(rk->rk_thread)); /* Avoid metadata updates when we're terminating. */ if (rd_kafka_terminating(rkb->rkb_rk) || @@ -1884,7 +1810,7 @@ static void rd_kafka_handle_Metadata (rd_kafka_t *rk, if (rko && rko->rko_replyq.q) { /* Reply to metadata requester, passing on the metadata. * Reuse requesting rko for the reply. */ - rko->rko_err = err; + rko->rko_err = err; rko->rko_u.metadata.md = md; rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0); @@ -1896,14 +1822,13 @@ static void rd_kafka_handle_Metadata (rd_kafka_t *rk, goto done; - err: - actions = rd_kafka_err_action( - rkb, err, request, +err: + actions = rd_kafka_err_action(rkb, err, request, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR__PARTIAL, + RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR__PARTIAL, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_END); if (actions & RD_KAFKA_ERR_ACTION_RETRY) { if (rd_kafka_buf_retry(rkb, request)) @@ -1914,7 +1839,7 @@ static void rd_kafka_handle_Metadata (rd_kafka_t *rk, "Metadata request failed: %s: %s (%dms): %s", request->rkbuf_u.Metadata.reason, rd_kafka_err2str(err), - (int)(request->rkbuf_ts_sent/1000), + (int)(request->rkbuf_ts_sent / 1000), rd_kafka_actions2str(actions)); } @@ -1922,7 +1847,7 @@ static void rd_kafka_handle_Metadata (rd_kafka_t *rk, /* FALLTHRU */ - done: +done: if (rko) rd_kafka_op_destroy(rko); } @@ -1953,22 +1878,20 @@ static void rd_kafka_handle_Metadata (rd_kafka_t *rk, * otherwise RD_KAFKA_RESP_ERR_NO_ERROR. If \p rko is non-NULL the request * is sent regardless. */ -rd_kafka_resp_err_t -rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, - const rd_list_t *topics, const char *reason, - rd_bool_t allow_auto_create_topics, - rd_bool_t cgrp_update, - rd_kafka_op_t *rko) { +rd_kafka_resp_err_t rd_kafka_MetadataRequest(rd_kafka_broker_t *rkb, + const rd_list_t *topics, + const char *reason, + rd_bool_t allow_auto_create_topics, + rd_bool_t cgrp_update, + rd_kafka_op_t *rko) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; int features; - int topic_cnt = topics ? rd_list_cnt(topics) : 0; + int topic_cnt = topics ? rd_list_cnt(topics) : 0; int *full_incr = NULL; - ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, - RD_KAFKAP_Metadata, - 0, 4, - &features); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_Metadata, 0, 4, &features); rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_Metadata, 1, 4 + (50 * topic_cnt) + 1); @@ -1976,7 +1899,7 @@ rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, if (!reason) reason = ""; - rkbuf->rkbuf_u.Metadata.reason = rd_strdup(reason); + rkbuf->rkbuf_u.Metadata.reason = rd_strdup(reason); rkbuf->rkbuf_u.Metadata.cgrp_update = cgrp_update; if (!topics && ApiVersion >= 1) { @@ -1984,13 +1907,13 @@ rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_write_i32(rkbuf, 0); rd_rkb_dbg(rkb, METADATA, "METADATA", "Request metadata for brokers only: %s", reason); - full_incr = &rkb->rkb_rk->rk_metadata_cache. - rkmc_full_brokers_sent; + full_incr = + &rkb->rkb_rk->rk_metadata_cache.rkmc_full_brokers_sent; } else { if (topic_cnt == 0 && !rko) - full_incr = &rkb->rkb_rk->rk_metadata_cache. - rkmc_full_topics_sent; + full_incr = &rkb->rkb_rk->rk_metadata_cache + .rkmc_full_topics_sent; if (topic_cnt == 0 && ApiVersion >= 1) rd_kafka_buf_write_i32(rkbuf, -1); /* Null: all topics*/ @@ -2001,11 +1924,13 @@ rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, rkbuf->rkbuf_u.Metadata.all_topics = 1; rd_rkb_dbg(rkb, METADATA, "METADATA", "Request metadata for all topics: " - "%s", reason); + "%s", + reason); } else rd_rkb_dbg(rkb, METADATA, "METADATA", "Request metadata for %d topic(s): " - "%s", topic_cnt, reason); + "%s", + topic_cnt, reason); } if (full_incr) { @@ -2014,11 +1939,10 @@ rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, * Forced requests (app using metadata() API) are passed * through regardless. */ - mtx_lock(&rkb->rkb_rk->rk_metadata_cache. - rkmc_full_lock); + mtx_lock(&rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock); if (*full_incr > 0 && (!rko || !rko->rko_u.metadata.force)) { - mtx_unlock(&rkb->rkb_rk->rk_metadata_cache. - rkmc_full_lock); + mtx_unlock( + &rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock); rd_rkb_dbg(rkb, METADATA, "METADATA", "Skipping metadata request: %s: " "full request already in-transit", @@ -2028,11 +1952,10 @@ rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, } (*full_incr)++; - mtx_unlock(&rkb->rkb_rk->rk_metadata_cache. - rkmc_full_lock); + mtx_unlock(&rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock); rkbuf->rkbuf_u.Metadata.decr = full_incr; - rkbuf->rkbuf_u.Metadata.decr_lock = &rkb->rkb_rk-> - rk_metadata_cache.rkmc_full_lock; + rkbuf->rkbuf_u.Metadata.decr_lock = + &rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock; } @@ -2043,11 +1966,10 @@ rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, /* Maintain a copy of the topics list so we can purge * hints from the metadata cache on error. */ rkbuf->rkbuf_u.Metadata.topics = - rd_list_copy(topics, rd_list_string_copy, NULL); + rd_list_copy(topics, rd_list_string_copy, NULL); RD_LIST_FOREACH(topic, topics, i) - rd_kafka_buf_write_str(rkbuf, topic, -1); - + rd_kafka_buf_write_str(rkbuf, topic, -1); } if (ApiVersion >= 4) { @@ -2058,9 +1980,9 @@ rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, !rkb->rkb_rk->rk_conf.allow_auto_create_topics && rd_kafka_conf_is_modified(&rkb->rkb_rk->rk_conf, "allow.auto.create.topics") && - rd_interval(&rkb->rkb_rk->rk_suppress. - allow_auto_create_topics, - 30 * 60 * 1000 /* every 30 minutes */, 0) >= 0) { + rd_interval( + &rkb->rkb_rk->rk_suppress.allow_auto_create_topics, + 30 * 60 * 1000 /* every 30 minutes */, 0) >= 0) { /* Let user know we can't obey allow.auto.create.topics */ rd_rkb_log(rkb, LOG_WARNING, "AUTOCREATE", "allow.auto.create.topics=false not supported " @@ -2080,8 +2002,7 @@ rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, /* Handle response thru rk_ops, * but forward parsed result to * rko's replyq when done. */ - RD_KAFKA_REPLYQ(rkb->rkb_rk-> - rk_ops, 0), + RD_KAFKA_REPLYQ(rkb->rkb_rk->rk_ops, 0), rd_kafka_handle_Metadata, rko); return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -2089,12 +2010,6 @@ rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, - - - - - - /** * @brief Parses and handles ApiVersion reply. * @@ -2107,25 +2022,25 @@ rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, * @remark A valid \p apis might be returned even if an error is returned. */ rd_kafka_resp_err_t -rd_kafka_handle_ApiVersion (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - struct rd_kafka_ApiVersion **apis, - size_t *api_cnt) { +rd_kafka_handle_ApiVersion(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + struct rd_kafka_ApiVersion **apis, + size_t *api_cnt) { const int log_decode_errors = LOG_DEBUG; - int32_t ApiArrayCnt; - int16_t ErrorCode; - int i = 0; + int32_t ApiArrayCnt; + int16_t ErrorCode; + int i = 0; - *apis = NULL; + *apis = NULL; *api_cnt = 0; if (err) goto err; - rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); err = ErrorCode; rd_kafka_buf_read_arraycnt(rkbuf, &ApiArrayCnt, 1000); @@ -2135,22 +2050,21 @@ rd_kafka_handle_ApiVersion (rd_kafka_t *rk, goto err; } - rd_rkb_dbg(rkb, FEATURE, "APIVERSION", - "Broker API support:"); + rd_rkb_dbg(rkb, FEATURE, "APIVERSION", "Broker API support:"); - *apis = rd_malloc(sizeof(**apis) * ApiArrayCnt); + *apis = rd_malloc(sizeof(**apis) * ApiArrayCnt); - for (i = 0 ; i < ApiArrayCnt ; i++) { - struct rd_kafka_ApiVersion *api = &(*apis)[i]; + for (i = 0; i < ApiArrayCnt; i++) { + struct rd_kafka_ApiVersion *api = &(*apis)[i]; - rd_kafka_buf_read_i16(rkbuf, &api->ApiKey); - rd_kafka_buf_read_i16(rkbuf, &api->MinVer); - rd_kafka_buf_read_i16(rkbuf, &api->MaxVer); + rd_kafka_buf_read_i16(rkbuf, &api->ApiKey); + rd_kafka_buf_read_i16(rkbuf, &api->MinVer); + rd_kafka_buf_read_i16(rkbuf, &api->MaxVer); - rd_rkb_dbg(rkb, FEATURE, "APIVERSION", - " ApiKey %s (%hd) Versions %hd..%hd", - rd_kafka_ApiKey2str(api->ApiKey), - api->ApiKey, api->MinVer, api->MaxVer); + rd_rkb_dbg(rkb, FEATURE, "APIVERSION", + " ApiKey %s (%hd) Versions %hd..%hd", + rd_kafka_ApiKey2str(api->ApiKey), api->ApiKey, + api->MinVer, api->MaxVer); /* Discard struct tags */ rd_kafka_buf_skip_tags(rkbuf); @@ -2162,12 +2076,12 @@ rd_kafka_handle_ApiVersion (rd_kafka_t *rk, /* Discard end tags */ rd_kafka_buf_skip_tags(rkbuf); - *api_cnt = ApiArrayCnt; + *api_cnt = ApiArrayCnt; qsort(*apis, *api_cnt, sizeof(**apis), rd_kafka_ApiVersion_key_cmp); - goto done; + goto done; - err_parse: +err_parse: /* If the broker does not support our ApiVersionRequest version it * will respond with a version 0 response, which will most likely * fail parsing. Instead of propagating the parse error we @@ -2175,16 +2089,16 @@ rd_kafka_handle_ApiVersion (rd_kafka_t *rk, * we use the parse error. */ if (!err) err = rkbuf->rkbuf_err; - err: +err: /* There are no retryable errors. */ - if (*apis) - rd_free(*apis); + if (*apis) + rd_free(*apis); - *apis = NULL; + *apis = NULL; *api_cnt = 0; - done: +done: return err; } @@ -2196,19 +2110,18 @@ rd_kafka_handle_ApiVersion (rd_kafka_t *rk, * @param ApiVersion If -1 use the highest supported version, else use the * specified value. */ -void rd_kafka_ApiVersionRequest (rd_kafka_broker_t *rkb, - int16_t ApiVersion, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_ApiVersionRequest(rd_kafka_broker_t *rkb, + int16_t ApiVersion, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; if (ApiVersion == -1) ApiVersion = 3; - rkbuf = rd_kafka_buf_new_flexver_request(rkb, RD_KAFKAP_ApiVersion, - 1, 4, - ApiVersion >= 3/*flexver*/); + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_ApiVersion, 1, 4, ApiVersion >= 3 /*flexver*/); if (ApiVersion >= 3) { /* KIP-511 adds software name and version through the optional @@ -2218,7 +2131,7 @@ void rd_kafka_ApiVersionRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_write_str(rkbuf, rkb->rkb_rk->rk_conf.sw_name, -1); /* ClientSoftwareVersion */ - rd_kafka_buf_write_str(rkbuf,rkb->rkb_rk->rk_conf.sw_version, + rd_kafka_buf_write_str(rkbuf, rkb->rkb_rk->rk_conf.sw_version, -1); } @@ -2230,70 +2143,68 @@ void rd_kafka_ApiVersionRequest (rd_kafka_broker_t *rkb, * receive an unknown API request, so dont retry request on failure. */ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES; - /* 0.9.0.x brokers will not close the connection on unsupported - * API requests, so we minimize the timeout for the request. - * This is a regression on the broker part. */ + /* 0.9.0.x brokers will not close the connection on unsupported + * API requests, so we minimize the timeout for the request. + * This is a regression on the broker part. */ rd_kafka_buf_set_abs_timeout( - rkbuf, - rkb->rkb_rk->rk_conf.api_version_request_timeout_ms, - 0); + rkbuf, rkb->rkb_rk->rk_conf.api_version_request_timeout_ms, 0); rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); if (replyq.q) - rd_kafka_broker_buf_enq_replyq(rkb, - rkbuf, replyq, resp_cb, opaque); - else /* in broker thread */ - rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque); + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, + opaque); + else /* in broker thread */ + rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque); } /** * Send SaslHandshakeRequest (KIP-43) */ -void rd_kafka_SaslHandshakeRequest (rd_kafka_broker_t *rkb, - const char *mechanism, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_SaslHandshakeRequest(rd_kafka_broker_t *rkb, + const char *mechanism, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; - int mechlen = (int)strlen(mechanism); + int mechlen = (int)strlen(mechanism); int16_t ApiVersion; int features; - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SaslHandshake, - 1, RD_KAFKAP_STR_SIZE0(mechlen)); + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SaslHandshake, 1, + RD_KAFKAP_STR_SIZE0(mechlen)); /* Should be sent before any other requests since it is part of * the initial connection handshake. */ rkbuf->rkbuf_prio = RD_KAFKA_PRIO_FLASH; - rd_kafka_buf_write_str(rkbuf, mechanism, mechlen); + rd_kafka_buf_write_str(rkbuf, mechanism, mechlen); /* Non-supporting brokers will tear down the conneciton when they * receive an unknown API request or where the SASL GSSAPI * token type is not recognized, so dont retry request on failure. */ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES; - /* 0.9.0.x brokers will not close the connection on unsupported - * API requests, so we minimize the timeout of the request. - * This is a regression on the broker part. */ + /* 0.9.0.x brokers will not close the connection on unsupported + * API requests, so we minimize the timeout of the request. + * This is a regression on the broker part. */ if (!rkb->rkb_rk->rk_conf.api_version_request && - rkb->rkb_rk->rk_conf.socket_timeout_ms > 10*1000) - rd_kafka_buf_set_abs_timeout(rkbuf, 10*1000 /*10s*/, 0); + rkb->rkb_rk->rk_conf.socket_timeout_ms > 10 * 1000) + rd_kafka_buf_set_abs_timeout(rkbuf, 10 * 1000 /*10s*/, 0); /* ApiVersion 1 / RD_KAFKA_FEATURE_SASL_REQ enables * the SaslAuthenticateRequest */ ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_SaslHandshake, 0, 1, &features); + rkb, RD_KAFKAP_SaslHandshake, 0, 1, &features); rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); - if (replyq.q) - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, - resp_cb, opaque); - else /* in broker thread */ - rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque); + if (replyq.q) + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, + opaque); + else /* in broker thread */ + rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque); } @@ -2305,13 +2216,12 @@ void rd_kafka_SaslHandshakeRequest (rd_kafka_broker_t *rkb, * @locality broker thread * @locks none */ -void -rd_kafka_handle_SaslAuthenticate (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +void rd_kafka_handle_SaslAuthenticate(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { const int log_decode_errors = LOG_ERR; int16_t error_code; rd_kafkap_str_t error_str; @@ -2346,10 +2256,9 @@ rd_kafka_handle_SaslAuthenticate (rd_kafka_t *rk, rd_kafka_buf_read_bytes(rkbuf, &auth_data); /* Pass SASL auth frame to SASL handler */ - if (rd_kafka_sasl_recv(rkb->rkb_transport, - auth_data.data, - (size_t)RD_KAFKAP_BYTES_LEN(&auth_data), - errstr, sizeof(errstr)) == -1) { + if (rd_kafka_sasl_recv(rkb->rkb_transport, auth_data.data, + (size_t)RD_KAFKAP_BYTES_LEN(&auth_data), errstr, + sizeof(errstr)) == -1) { err = RD_KAFKA_RESP_ERR__AUTHENTICATION; goto err; } @@ -2357,26 +2266,27 @@ rd_kafka_handle_SaslAuthenticate (rd_kafka_t *rk, return; - err_parse: +err_parse: err = rkbuf->rkbuf_err; rd_snprintf(errstr, sizeof(errstr), "SaslAuthenticateResponse parsing failed: %s", rd_kafka_err2str(err)); - err: - rd_kafka_broker_fail(rkb, LOG_ERR, err, - "SASL authentication error: %s", errstr); +err: + rd_kafka_broker_fail(rkb, LOG_ERR, err, "SASL authentication error: %s", + errstr); } /** * @brief Send SaslAuthenticateRequest (KIP-152) */ -void rd_kafka_SaslAuthenticateRequest (rd_kafka_broker_t *rkb, - const void *buf, size_t size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_SaslAuthenticateRequest(rd_kafka_broker_t *rkb, + const void *buf, + size_t size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SaslAuthenticate, 0, 0); @@ -2393,8 +2303,8 @@ void rd_kafka_SaslAuthenticateRequest (rd_kafka_broker_t *rkb, rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES; if (replyq.q) - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, - resp_cb, opaque); + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, + opaque); else /* in broker thread */ rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque); } @@ -2415,11 +2325,11 @@ struct rd_kafka_Produce_result { * @locality broker thread */ static rd_kafka_resp_err_t -rd_kafka_handle_Produce_parse (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - struct rd_kafka_Produce_result *result) { +rd_kafka_handle_Produce_parse(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + struct rd_kafka_Produce_result *result) { int32_t TopicArrayCnt; int32_t PartitionArrayCnt; struct { @@ -2428,7 +2338,7 @@ rd_kafka_handle_Produce_parse (rd_kafka_broker_t *rkb, int64_t Offset; } hdr; const int log_decode_errors = LOG_ERR; - int64_t log_start_offset = -1; + int64_t log_start_offset = -1; rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt); if (TopicArrayCnt != 1) @@ -2468,9 +2378,9 @@ rd_kafka_handle_Produce_parse (rd_kafka_broker_t *rkb, return hdr.ErrorCode; - err_parse: +err_parse: return rkbuf->rkbuf_err; - err: +err: return RD_KAFKA_RESP_ERR__BAD_MSG; } @@ -2479,9 +2389,9 @@ rd_kafka_handle_Produce_parse (rd_kafka_broker_t *rkb, * @struct Hold temporary Produce error state */ struct rd_kafka_Produce_err { - rd_kafka_resp_err_t err; /**< Error code */ - int actions; /**< Actions to take */ - int incr_retry; /**< Increase per-message retry cnt */ + rd_kafka_resp_err_t err; /**< Error code */ + int actions; /**< Actions to take */ + int incr_retry; /**< Increase per-message retry cnt */ rd_kafka_msg_status_t status; /**< Messages persistence status */ /* Idempotent Producer */ @@ -2507,10 +2417,10 @@ struct rd_kafka_Produce_err { * @locks none */ static void -rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, - rd_kafka_msgbatch_t *batch, - struct rd_kafka_Produce_err *perr) { - rd_kafka_t *rk = rkb->rkb_rk; +rd_kafka_handle_idempotent_Produce_error(rd_kafka_broker_t *rkb, + rd_kafka_msgbatch_t *batch, + struct rd_kafka_Produce_err *perr) { + rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_toppar_t *rktp = batch->rktp; rd_kafka_msg_t *firstmsg, *lastmsg; int r; @@ -2522,7 +2432,7 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, rd_kafka_rdunlock(rkb->rkb_rk); firstmsg = rd_kafka_msgq_first(&batch->msgq); - lastmsg = rd_kafka_msgq_last(&batch->msgq); + lastmsg = rd_kafka_msgq_last(&batch->msgq); rd_assert(firstmsg && lastmsg); /* Store the last msgid of the batch @@ -2535,7 +2445,7 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, lastmsg->rkm_u.producer.msgid); } else { firstmsg->rkm_u.producer.last_msgid = - lastmsg->rkm_u.producer.msgid; + lastmsg->rkm_u.producer.msgid; } if (!rd_kafka_pid_eq(batch->pid, perr->rktp_pid)) { @@ -2544,13 +2454,13 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT; perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; - rd_rkb_dbg(rkb, MSG|RD_KAFKA_DBG_EOS, "ERRPID", - "%.*s [%"PRId32"] PID mismatch: " + rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "ERRPID", + "%.*s [%" PRId32 + "] PID mismatch: " "request %s != partition %s: " "failing messages with error %s", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_pid2str(batch->pid), + rktp->rktp_partition, rd_kafka_pid2str(batch->pid), rd_kafka_pid2str(perr->rktp_pid), rd_kafka_err2str(perr->err)); return; @@ -2559,8 +2469,7 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, /* * Special error handling */ - switch (perr->err) - { + switch (perr->err) { case RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER: /* Compare request's sequence to expected next * acked sequence. @@ -2589,28 +2498,27 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, * R2 to R4 which would be retried automatically. */ rd_kafka_idemp_set_fatal_error( - rk, perr->err, - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) failed " - "due to sequence desynchronization with " - "broker %"PRId32" (%s, base seq %"PRId32", " - "idemp state change %"PRId64"ms ago, " - "last partition error %s (actions %s, " - "base seq %"PRId32"..%"PRId32 - ", base msgid %"PRIu64", %"PRId64"ms ago)", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rkb->rkb_nodeid, - rd_kafka_pid2str(batch->pid), - batch->first_seq, - state_age / 1000, - rd_kafka_err2name(last_err.err), - rd_kafka_actions2str(last_err.actions), - last_err.base_seq, last_err.last_seq, - last_err.base_msgid, - last_err.ts ? - (now - last_err.ts)/1000 : -1); + rk, perr->err, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed " + "due to sequence desynchronization with " + "broker %" PRId32 " (%s, base seq %" PRId32 + ", " + "idemp state change %" PRId64 + "ms ago, " + "last partition error %s (actions %s, " + "base seq %" PRId32 "..%" PRId32 + ", base msgid %" PRIu64 ", %" PRId64 "ms ago)", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), rkb->rkb_nodeid, + rd_kafka_pid2str(batch->pid), batch->first_seq, + state_age / 1000, rd_kafka_err2name(last_err.err), + rd_kafka_actions2str(last_err.actions), + last_err.base_seq, last_err.last_seq, + last_err.base_msgid, + last_err.ts ? (now - last_err.ts) / 1000 : -1); perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT; perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; @@ -2629,38 +2537,40 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, * re-enqueue the messages for later retry * (without incrementing retries). */ - rd_rkb_dbg(rkb, MSG|RD_KAFKA_DBG_EOS, "ERRSEQ", - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) failed " - "due to skipped sequence numbers " - "(%s, base seq %"PRId32" > " - "next seq %"PRId32") " - "caused by previous failed request " - "(%s, actions %s, " - "base seq %"PRId32"..%"PRId32 - ", base msgid %"PRIu64", %"PRId64"ms ago): " - "recovering and retrying", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rd_kafka_pid2str(batch->pid), - batch->first_seq, - perr->next_ack_seq, - rd_kafka_err2name(last_err.err), - rd_kafka_actions2str(last_err.actions), - last_err.base_seq, last_err.last_seq, - last_err.base_msgid, - last_err.ts ? - (now - last_err.ts)/1000 : -1); + rd_rkb_dbg( + rkb, MSG | RD_KAFKA_DBG_EOS, "ERRSEQ", + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed " + "due to skipped sequence numbers " + "(%s, base seq %" PRId32 + " > " + "next seq %" PRId32 + ") " + "caused by previous failed request " + "(%s, actions %s, " + "base seq %" PRId32 "..%" PRId32 + ", base msgid %" PRIu64 ", %" PRId64 + "ms ago): " + "recovering and retrying", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), + rd_kafka_pid2str(batch->pid), batch->first_seq, + perr->next_ack_seq, rd_kafka_err2name(last_err.err), + rd_kafka_actions2str(last_err.actions), + last_err.base_seq, last_err.last_seq, + last_err.base_msgid, + last_err.ts ? (now - last_err.ts) / 1000 : -1); perr->incr_retry = 0; - perr->actions = RD_KAFKA_ERR_ACTION_RETRY; - perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; + perr->actions = RD_KAFKA_ERR_ACTION_RETRY; + perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; perr->update_next_ack = rd_false; perr->update_next_err = rd_true; rd_kafka_idemp_drain_epoch_bump( - rk, perr->err, "skipped sequence numbers"); + rk, perr->err, "skipped sequence numbers"); } else { /* Request's sequence is less than next ack, @@ -2668,28 +2578,27 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, * local bug or the broker did not respond * to the requests in order. */ rd_kafka_idemp_set_fatal_error( - rk, perr->err, - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) failed " - "with rewound sequence number on " - "broker %"PRId32" (%s, " - "base seq %"PRId32" < next seq %"PRId32"): " - "last error %s (actions %s, " - "base seq %"PRId32"..%"PRId32 - ", base msgid %"PRIu64", %"PRId64"ms ago)", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rkb->rkb_nodeid, - rd_kafka_pid2str(batch->pid), - batch->first_seq, - perr->next_ack_seq, - rd_kafka_err2name(last_err.err), - rd_kafka_actions2str(last_err.actions), - last_err.base_seq, last_err.last_seq, - last_err.base_msgid, - last_err.ts ? - (now - last_err.ts)/1000 : -1); + rk, perr->err, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed " + "with rewound sequence number on " + "broker %" PRId32 + " (%s, " + "base seq %" PRId32 " < next seq %" PRId32 + "): " + "last error %s (actions %s, " + "base seq %" PRId32 "..%" PRId32 + ", base msgid %" PRIu64 ", %" PRId64 "ms ago)", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), rkb->rkb_nodeid, + rd_kafka_pid2str(batch->pid), batch->first_seq, + perr->next_ack_seq, rd_kafka_err2name(last_err.err), + rd_kafka_actions2str(last_err.actions), + last_err.base_seq, last_err.last_seq, + last_err.base_msgid, + last_err.ts ? (now - last_err.ts) / 1000 : -1); perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT; perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; @@ -2709,23 +2618,24 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, * But first make sure the first message has actually * been retried, getting this error for a non-retried message * indicates a synchronization issue or bug. */ - rd_rkb_dbg(rkb, MSG|RD_KAFKA_DBG_EOS, "DUPSEQ", - "ProduceRequest for %.*s [%"PRId32"] " + rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "DUPSEQ", + "ProduceRequest for %.*s [%" PRId32 + "] " "with %d message(s) failed " "due to duplicate sequence number: " "previous send succeeded but was not acknowledged " - "(%s, base seq %"PRId32"): " + "(%s, base seq %" PRId32 + "): " "marking the messages successfully delivered", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rd_kafka_msgq_len(&batch->msgq), - rd_kafka_pid2str(batch->pid), - batch->first_seq); + rd_kafka_pid2str(batch->pid), batch->first_seq); /* Void error, delivery succeeded */ - perr->err = RD_KAFKA_RESP_ERR_NO_ERROR; - perr->actions = 0; - perr->status = RD_KAFKA_MSG_STATUS_PERSISTED; + perr->err = RD_KAFKA_RESP_ERR_NO_ERROR; + perr->actions = 0; + perr->status = RD_KAFKA_MSG_STATUS_PERSISTED; perr->update_next_ack = rd_true; perr->update_next_err = rd_true; break; @@ -2749,11 +2659,13 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, * the txnmgr, not here) we'll raise an abortable error and * flag that the epoch needs to be bumped on the coordinator. */ if (rd_kafka_is_transactional(rk)) { - rd_rkb_dbg(rkb, MSG|RD_KAFKA_DBG_EOS, "UNKPID", - "ProduceRequest for %.*s [%"PRId32"] " + rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "UNKPID", + "ProduceRequest for %.*s [%" PRId32 + "] " "with %d message(s) failed " "due to unknown producer id " - "(%s, base seq %"PRId32", %d retries): " + "(%s, base seq %" PRId32 + ", %d retries): " "failing the current transaction", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, @@ -2767,29 +2679,31 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, "unknown producer id"); rd_kafka_txn_set_abortable_error_with_bump( - rk, - RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) failed " - "due to unknown producer id", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq)); + rk, RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed " + "due to unknown producer id", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq)); perr->incr_retry = 0; - perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT; - perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; + perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT; + perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; perr->update_next_ack = rd_false; perr->update_next_err = rd_true; break; } else if (!firstmsg->rkm_u.producer.retries && perr->next_err_seq == batch->first_seq) { - rd_rkb_dbg(rkb, MSG|RD_KAFKA_DBG_EOS, "UNKPID", - "ProduceRequest for %.*s [%"PRId32"] " + rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "UNKPID", + "ProduceRequest for %.*s [%" PRId32 + "] " "with %d message(s) failed " "due to unknown producer id " - "(%s, base seq %"PRId32", %d retries): " + "(%s, base seq %" PRId32 + ", %d retries): " "no risk of duplication/reordering: " "resetting PID and retrying", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), @@ -2804,31 +2718,30 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, "unknown producer id"); perr->incr_retry = 0; - perr->actions = RD_KAFKA_ERR_ACTION_RETRY; - perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; + perr->actions = RD_KAFKA_ERR_ACTION_RETRY; + perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; perr->update_next_ack = rd_false; perr->update_next_err = rd_true; break; } rd_kafka_idemp_set_fatal_error( - rk, perr->err, - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) failed " - "due to unknown producer id (" - "broker %"PRId32" %s, base seq %"PRId32", %d retries): " - "unable to retry without risking " - "duplication/reordering", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rkb->rkb_nodeid, - rd_kafka_pid2str(batch->pid), - batch->first_seq, - firstmsg->rkm_u.producer.retries); - - perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT; - perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; + rk, perr->err, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed " + "due to unknown producer id (" + "broker %" PRId32 " %s, base seq %" PRId32 + ", %d retries): " + "unable to retry without risking " + "duplication/reordering", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_msgq_len(&batch->msgq), + rkb->rkb_nodeid, rd_kafka_pid2str(batch->pid), + batch->first_seq, firstmsg->rkm_u.producer.retries); + + perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT; + perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; perr->update_next_ack = rd_false; perr->update_next_err = rd_true; break; @@ -2859,11 +2772,11 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, * @locality broker thread (but not necessarily the leader broker) * @locks none */ -static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, - const rd_kafka_buf_t *request, - rd_kafka_msgbatch_t *batch, - struct rd_kafka_Produce_err *perr) { - rd_kafka_t *rk = rkb->rkb_rk; +static int rd_kafka_handle_Produce_error(rd_kafka_broker_t *rkb, + const rd_kafka_buf_t *request, + rd_kafka_msgbatch_t *batch, + struct rd_kafka_Produce_err *perr) { + rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_toppar_t *rktp = batch->rktp; int is_leader; @@ -2877,8 +2790,8 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, * check once if we're the leader (which allows relaxed * locking), and cache the current rktp's eos state vars. */ rd_kafka_toppar_lock(rktp); - is_leader = rktp->rktp_broker == rkb; - perr->rktp_pid = rktp->rktp_eos.pid; + is_leader = rktp->rktp_broker == rkb; + perr->rktp_pid = rktp->rktp_eos.pid; perr->next_ack_seq = rktp->rktp_eos.next_ack_seq; perr->next_err_seq = rktp->rktp_eos.next_err_seq; rd_kafka_toppar_unlock(rktp); @@ -2892,79 +2805,75 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, * all other errors are considered permanent failures. * (also see rd_kafka_err_action() for the default actions). */ perr->actions = rd_kafka_err_action( - rkb, perr->err, request, + rkb, perr->err, request, - RD_KAFKA_ERR_ACTION_REFRESH| + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED, - RD_KAFKA_RESP_ERR__TRANSPORT, + RD_KAFKA_RESP_ERR__TRANSPORT, - RD_KAFKA_ERR_ACTION_REFRESH| - RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, - RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, - RD_KAFKA_ERR_ACTION_PERMANENT| + RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, - RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, - RD_KAFKA_ERR_ACTION_REFRESH| - RD_KAFKA_ERR_ACTION_RETRY| + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, - RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, - RD_KAFKA_ERR_ACTION_RETRY| - RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, - RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, - RD_KAFKA_ERR_ACTION_RETRY| + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED, - RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND, - RD_KAFKA_ERR_ACTION_RETRY| - RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, - RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, + RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, - RD_KAFKA_ERR_ACTION_RETRY| + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED, - RD_KAFKA_RESP_ERR__TIMED_OUT, + RD_KAFKA_RESP_ERR__TIMED_OUT, - RD_KAFKA_ERR_ACTION_PERMANENT| + RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED, - RD_KAFKA_RESP_ERR__MSG_TIMED_OUT, + RD_KAFKA_RESP_ERR__MSG_TIMED_OUT, - /* All Idempotent Producer-specific errors are - * initially set as permanent errors, - * special handling may change the actions. */ - RD_KAFKA_ERR_ACTION_PERMANENT| + /* All Idempotent Producer-specific errors are + * initially set as permanent errors, + * special handling may change the actions. */ + RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED, - RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, + RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, - RD_KAFKA_ERR_ACTION_PERMANENT| + RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED, - RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER, + RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER, - RD_KAFKA_ERR_ACTION_PERMANENT| + RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, - RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, + RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, - RD_KAFKA_ERR_ACTION_PERMANENT| + RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, - RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH, + RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH, - /* Message was purged from out-queue due to - * Idempotent Producer Id change */ - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR__RETRY, + /* Message was purged from out-queue due to + * Idempotent Producer Id change */ + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__RETRY, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_END); rd_rkb_dbg(rkb, MSG, "MSGSET", - "%s [%"PRId32"]: MessageSet with %i message(s) " - "(MsgId %"PRIu64", BaseSeq %"PRId32") " + "%s [%" PRId32 + "]: MessageSet with %i message(s) " + "(MsgId %" PRIu64 ", BaseSeq %" PRId32 + ") " "encountered error: %s (actions %s)%s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - batch->first_msgid, batch->first_seq, - rd_kafka_err2str(perr->err), + rd_kafka_msgq_len(&batch->msgq), batch->first_msgid, + batch->first_seq, rd_kafka_err2str(perr->err), rd_kafka_actions2str(perr->actions), is_leader ? "" : " [NOT LEADER]"); @@ -2992,19 +2901,19 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, /* Save the last error for debugging sub-sequent errors, * useful for Idempotent Producer throubleshooting. */ rd_kafka_toppar_lock(rktp); - rktp->rktp_last_err.err = perr->err; - rktp->rktp_last_err.actions = perr->actions; - rktp->rktp_last_err.ts = rd_clock(); - rktp->rktp_last_err.base_seq = batch->first_seq; - rktp->rktp_last_err.last_seq = perr->last_seq; + rktp->rktp_last_err.err = perr->err; + rktp->rktp_last_err.actions = perr->actions; + rktp->rktp_last_err.ts = rd_clock(); + rktp->rktp_last_err.base_seq = batch->first_seq; + rktp->rktp_last_err.last_seq = perr->last_seq; rktp->rktp_last_err.base_msgid = batch->first_msgid; rd_kafka_toppar_unlock(rktp); /* * Handle actions */ - if (perr->actions & (RD_KAFKA_ERR_ACTION_REFRESH | - RD_KAFKA_ERR_ACTION_RETRY)) { + if (perr->actions & + (RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY)) { /* Retry (refresh also implies retry) */ if (perr->actions & RD_KAFKA_ERR_ACTION_REFRESH) { @@ -3017,8 +2926,8 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, * - it is a temporary error (hopefully) * - there is no chance of duplicate delivery */ - rd_kafka_toppar_leader_unavailable( - rktp, "produce", perr->err); + rd_kafka_toppar_leader_unavailable(rktp, "produce", + perr->err); /* We can't be certain the request wasn't * sent in case of transport failure, @@ -3082,8 +2991,7 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, * for each message is honoured, any messages that * would exceeded the retry count will not be * moved but instead fail below. */ - rd_kafka_toppar_retry_msgq(rktp, &batch->msgq, - perr->incr_retry, + rd_kafka_toppar_retry_msgq(rktp, &batch->msgq, perr->incr_retry, perr->status); if (rd_kafka_msgq_len(&batch->msgq) == 0) { @@ -3101,43 +3009,41 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, /* Producer was fenced by new transactional producer * with the same transactional.id */ rd_kafka_txn_set_fatal_error( - rk, RD_DO_LOCK, - RD_KAFKA_RESP_ERR__FENCED, - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) failed: %s " - "(broker %"PRId32" %s, base seq %"PRId32"): " - "transactional producer fenced by newer " - "producer instance", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rd_kafka_err2str(perr->err), - rkb->rkb_nodeid, - rd_kafka_pid2str(batch->pid), - batch->first_seq); + rk, RD_DO_LOCK, RD_KAFKA_RESP_ERR__FENCED, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed: %s " + "(broker %" PRId32 " %s, base seq %" PRId32 + "): " + "transactional producer fenced by newer " + "producer instance", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), + rd_kafka_err2str(perr->err), rkb->rkb_nodeid, + rd_kafka_pid2str(batch->pid), batch->first_seq); /* Drain outstanding requests and reset PID. */ rd_kafka_idemp_drain_reset( - rk, "fenced by new transactional producer"); + rk, "fenced by new transactional producer"); } else if (rd_kafka_is_transactional(rk)) { /* When transactional any permanent produce failure * would lead to an incomplete transaction, so raise * an abortable transaction error. */ rd_kafka_txn_set_abortable_error( - rk, - perr->err, - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) failed: %s " - "(broker %"PRId32" %s, base seq %"PRId32"): " - "current transaction must be aborted", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rd_kafka_err2str(perr->err), - rkb->rkb_nodeid, - rd_kafka_pid2str(batch->pid), - batch->first_seq); + rk, perr->err, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed: %s " + "(broker %" PRId32 " %s, base seq %" PRId32 + "): " + "current transaction must be aborted", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), + rd_kafka_err2str(perr->err), rkb->rkb_nodeid, + rd_kafka_pid2str(batch->pid), batch->first_seq); } else if (rk->rk_conf.eos.gapless) { /* A permanent non-idempotent error will lead to @@ -3146,30 +3052,30 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, * To satisfy the gapless guarantee we need to raise * a fatal error here. */ rd_kafka_idemp_set_fatal_error( - rk, RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE, - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) failed: " - "%s (broker %"PRId32" %s, base seq %"PRId32"): " - "unable to satisfy gap-less guarantee", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rd_kafka_err2str(perr->err), - rkb->rkb_nodeid, - rd_kafka_pid2str(batch->pid), - batch->first_seq); + rk, RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed: " + "%s (broker %" PRId32 " %s, base seq %" PRId32 + "): " + "unable to satisfy gap-less guarantee", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), + rd_kafka_err2str(perr->err), rkb->rkb_nodeid, + rd_kafka_pid2str(batch->pid), batch->first_seq); /* Drain outstanding requests and reset PID. */ rd_kafka_idemp_drain_reset( - rk, "unable to satisfy gap-less guarantee"); + rk, "unable to satisfy gap-less guarantee"); } else { /* If gapless is not set we bump the Epoch and * renumber the messages to send. */ /* Drain outstanding requests and bump the epoch .*/ - rd_kafka_idemp_drain_epoch_bump( - rk, perr->err, "message sequence gap"); + rd_kafka_idemp_drain_epoch_bump(rk, perr->err, + "message sequence gap"); } perr->update_next_ack = rd_false; @@ -3203,10 +3109,10 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, * @locality broker thread (but not necessarily the leader broker thread) */ static void -rd_kafka_handle_idempotent_Produce_success (rd_kafka_broker_t *rkb, - rd_kafka_msgbatch_t *batch, - int32_t next_seq) { - rd_kafka_t *rk = rkb->rkb_rk; +rd_kafka_handle_idempotent_Produce_success(rd_kafka_broker_t *rkb, + rd_kafka_msgbatch_t *batch, + int32_t next_seq) { + rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_toppar_t *rktp = batch->rktp; char fatal_err[512]; uint64_t first_msgid, last_msgid; @@ -3214,7 +3120,7 @@ rd_kafka_handle_idempotent_Produce_success (rd_kafka_broker_t *rkb, *fatal_err = '\0'; first_msgid = rd_kafka_msgq_first(&batch->msgq)->rkm_u.producer.msgid; - last_msgid = rd_kafka_msgq_last(&batch->msgq)->rkm_u.producer.msgid; + last_msgid = rd_kafka_msgq_last(&batch->msgq)->rkm_u.producer.msgid; rd_kafka_toppar_lock(rktp); @@ -3242,25 +3148,27 @@ rd_kafka_handle_idempotent_Produce_success (rd_kafka_broker_t *rkb, * the error string here and call * set_fatal_error() below after * toppar lock has been released. */ - rd_snprintf( - fatal_err, sizeof(fatal_err), - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) " - "succeeded when expecting failure " - "(broker %"PRId32" %s, " - "base seq %"PRId32", " - "next ack seq %"PRId32", " - "next err seq %"PRId32": " - "unable to retry without risking " - "duplication/reordering", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rkb->rkb_nodeid, - rd_kafka_pid2str(batch->pid), - batch->first_seq, - rktp->rktp_eos.next_ack_seq, - rktp->rktp_eos.next_err_seq); + rd_snprintf(fatal_err, sizeof(fatal_err), + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) " + "succeeded when expecting failure " + "(broker %" PRId32 + " %s, " + "base seq %" PRId32 + ", " + "next ack seq %" PRId32 + ", " + "next err seq %" PRId32 + ": " + "unable to retry without risking " + "duplication/reordering", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), rkb->rkb_nodeid, + rd_kafka_pid2str(batch->pid), batch->first_seq, + rktp->rktp_eos.next_ack_seq, + rktp->rktp_eos.next_err_seq); rktp->rktp_eos.next_err_seq = next_seq; } @@ -3269,8 +3177,7 @@ rd_kafka_handle_idempotent_Produce_success (rd_kafka_broker_t *rkb, /* Advance next expected err and/or ack sequence */ /* Only step err seq if it hasn't diverged. */ - if (rktp->rktp_eos.next_err_seq == - rktp->rktp_eos.next_ack_seq) + if (rktp->rktp_eos.next_err_seq == rktp->rktp_eos.next_ack_seq) rktp->rktp_eos.next_err_seq = next_seq; rktp->rktp_eos.next_ack_seq = next_seq; @@ -3289,7 +3196,7 @@ rd_kafka_handle_idempotent_Produce_success (rd_kafka_broker_t *rkb, * the toppar lock. */ if (unlikely(*fatal_err)) rd_kafka_idemp_set_fatal_error( - rk, RD_KAFKA_RESP_ERR__INCONSISTENT, "%s", fatal_err); + rk, RD_KAFKA_RESP_ERR__INCONSISTENT, "%s", fatal_err); } @@ -3301,16 +3208,15 @@ rd_kafka_handle_idempotent_Produce_success (rd_kafka_broker_t *rkb, * @localiy broker thread (but not necessarily the toppar's handler thread) * @locks none */ -static void -rd_kafka_msgbatch_handle_Produce_result ( - rd_kafka_broker_t *rkb, - rd_kafka_msgbatch_t *batch, - rd_kafka_resp_err_t err, - const struct rd_kafka_Produce_result *presult, - const rd_kafka_buf_t *request) { - - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_toppar_t *rktp = batch->rktp; +static void rd_kafka_msgbatch_handle_Produce_result( + rd_kafka_broker_t *rkb, + rd_kafka_msgbatch_t *batch, + rd_kafka_resp_err_t err, + const struct rd_kafka_Produce_result *presult, + const rd_kafka_buf_t *request) { + + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_toppar_t *rktp = batch->rktp; rd_kafka_msg_status_t status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; rd_bool_t last_inflight; int32_t next_seq; @@ -3327,12 +3233,12 @@ rd_kafka_msgbatch_handle_Produce_result ( if (likely(!err)) { rd_rkb_dbg(rkb, MSG, "MSGSET", - "%s [%"PRId32"]: MessageSet with %i message(s) " - "(MsgId %"PRIu64", BaseSeq %"PRId32") delivered", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - batch->first_msgid, batch->first_seq); + "%s [%" PRId32 + "]: MessageSet with %i message(s) " + "(MsgId %" PRIu64 ", BaseSeq %" PRId32 ") delivered", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), batch->first_msgid, + batch->first_seq); if (rktp->rktp_rkt->rkt_conf.required_acks != 0) status = RD_KAFKA_MSG_STATUS_PERSISTED; @@ -3343,14 +3249,13 @@ rd_kafka_msgbatch_handle_Produce_result ( } else { /* Error handling */ struct rd_kafka_Produce_err perr = { - .err = err, - .incr_retry = 1, - .status = status, - .update_next_ack = rd_true, - .update_next_err = rd_true, - .last_seq = (batch->first_seq + - rd_kafka_msgq_len(&batch->msgq) - 1) - }; + .err = err, + .incr_retry = 1, + .status = status, + .update_next_ack = rd_true, + .update_next_err = rd_true, + .last_seq = (batch->first_seq + + rd_kafka_msgq_len(&batch->msgq) - 1)}; rd_kafka_handle_Produce_error(rkb, request, batch, &perr); @@ -3372,10 +3277,8 @@ rd_kafka_msgbatch_handle_Produce_result ( /* Messages to retry will have been removed from the request's queue */ if (likely(rd_kafka_msgq_len(&batch->msgq) > 0)) { /* Set offset, timestamp and status for each message. */ - rd_kafka_msgq_set_metadata(&batch->msgq, - rkb->rkb_nodeid, - presult->offset, - presult->timestamp, + rd_kafka_msgq_set_metadata(&batch->msgq, rkb->rkb_nodeid, + presult->offset, presult->timestamp, status); /* Enqueue messages for delivery report. */ @@ -3401,36 +3304,30 @@ rd_kafka_msgbatch_handle_Produce_result ( * * @locality broker thread (but not necessarily the leader broker thread) */ -static void rd_kafka_handle_Produce (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_msgbatch_t *batch = &request->rkbuf_batch; - rd_kafka_toppar_t *rktp = batch->rktp; +static void rd_kafka_handle_Produce(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_msgbatch_t *batch = &request->rkbuf_batch; + rd_kafka_toppar_t *rktp = batch->rktp; struct rd_kafka_Produce_result result = { - .offset = RD_KAFKA_OFFSET_INVALID, - .timestamp = -1 - }; + .offset = RD_KAFKA_OFFSET_INVALID, .timestamp = -1}; /* Unit test interface: inject errors */ if (unlikely(rk->rk_conf.ut.handle_ProduceResponse != NULL)) { err = rk->rk_conf.ut.handle_ProduceResponse( - rkb->rkb_rk, - rkb->rkb_nodeid, - batch->first_msgid, - err); + rkb->rkb_rk, rkb->rkb_nodeid, batch->first_msgid, err); } /* Parse Produce reply (unless the request errored) */ if (!err && reply) - err = rd_kafka_handle_Produce_parse(rkb, rktp, - reply, request, + err = rd_kafka_handle_Produce_parse(rkb, rktp, reply, request, &result); - rd_kafka_msgbatch_handle_Produce_result(rkb, batch, err, - &result, request); + rd_kafka_msgbatch_handle_Produce_result(rkb, batch, err, &result, + request); } @@ -3441,9 +3338,10 @@ static void rd_kafka_handle_Produce (rd_kafka_t *rk, * * @locality broker thread */ -int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, - const rd_kafka_pid_t pid, - uint64_t epoch_base_msgid) { +int rd_kafka_ProduceRequest(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const rd_kafka_pid_t pid, + uint64_t epoch_base_msgid) { rd_kafka_buf_t *rkbuf; rd_kafka_topic_t *rkt = rktp->rktp_rkt; size_t MessageSetSize = 0; @@ -3456,10 +3354,9 @@ int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, * Create ProduceRequest with as many messages from the toppar * transmit queue as possible. */ - rkbuf = rd_kafka_msgset_create_ProduceRequest(rkb, rktp, - &rktp->rktp_xmit_msgq, - pid, epoch_base_msgid, - &MessageSetSize); + rkbuf = rd_kafka_msgset_create_ProduceRequest( + rkb, rktp, &rktp->rktp_xmit_msgq, pid, epoch_base_msgid, + &MessageSetSize); if (unlikely(!rkbuf)) return 0; @@ -3474,8 +3371,10 @@ int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, /* Use timeout from first message in batch */ now = rd_clock(); - first_msg_timeout = (rd_kafka_msgq_first(&rkbuf->rkbuf_batch.msgq)-> - rkm_ts_timeout - now) / 1000; + first_msg_timeout = + (rd_kafka_msgq_first(&rkbuf->rkbuf_batch.msgq)->rkm_ts_timeout - + now) / + 1000; if (unlikely(first_msg_timeout <= 0)) { /* Message has already timed out, allow 100 ms @@ -3490,8 +3389,7 @@ int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, * capped by socket.timeout.ms */ rd_kafka_buf_set_abs_timeout(rkbuf, tmout, now); - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, - RD_KAFKA_NO_REPLYQ, + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, RD_KAFKA_NO_REPLYQ, rd_kafka_handle_Produce, NULL); return cnt; @@ -3511,13 +3409,14 @@ int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, * updated with a human readable error string. */ rd_kafka_resp_err_t -rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *new_topics /*(NewTopic_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_CreateTopicsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *new_topics /*(NewTopic_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; int features; @@ -3532,7 +3431,7 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, } ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_CreateTopics, 0, 4, &features); + rkb, RD_KAFKAP_CreateTopics, 0, 4, &features); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "Topic Admin API (KIP-4) not supported " @@ -3551,9 +3450,8 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, } rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_CreateTopics, 1, - 4 + - (rd_list_cnt(new_topics) * 200) + - 4 + 1); + 4 + (rd_list_cnt(new_topics) * 200) + + 4 + 1); /* #topics */ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(new_topics)); @@ -3601,9 +3499,8 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, /* num_partitions */ rd_kafka_buf_write_i32(rkbuf, newt->num_partitions); /* replication_factor */ - rd_kafka_buf_write_i16(rkbuf, - (int16_t)newt-> - replication_factor); + rd_kafka_buf_write_i16( + rkbuf, (int16_t)newt->replication_factor); } /* #replica_assignment */ @@ -3611,7 +3508,7 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, /* Replicas per partition, see rdkafka_admin.[ch] * for how these are constructed. */ - for (partition = 0 ; partition < rd_list_cnt(&newt->replicas); + for (partition = 0; partition < rd_list_cnt(&newt->replicas); partition++) { const rd_list_t *replicas; int ri = 0; @@ -3625,10 +3522,10 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, /* #replicas */ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(replicas)); - for (ri = 0 ; ri < rd_list_cnt(replicas) ; ri++) { + for (ri = 0; ri < rd_list_cnt(replicas); ri++) { /* replica */ rd_kafka_buf_write_i32( - rkbuf, rd_list_get_int32(replicas, ri)); + rkbuf, rd_list_get_int32(replicas, ri)); } } @@ -3648,13 +3545,12 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_write_i32(rkbuf, op_timeout); if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) - rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout+1000, 0); + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); if (ApiVersion >= 1) { /* validate_only */ - rd_kafka_buf_write_i8(rkbuf, - rd_kafka_confval_get_int(&options-> - validate_only)); + rd_kafka_buf_write_i8( + rkbuf, rd_kafka_confval_get_int(&options->validate_only)); } rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); @@ -3678,13 +3574,14 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, * updated with a human readable error string. */ rd_kafka_resp_err_t -rd_kafka_DeleteTopicsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *del_topics /*(DeleteTopic_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_DeleteTopicsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *del_topics /*(DeleteTopic_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; int features; @@ -3699,7 +3596,7 @@ rd_kafka_DeleteTopicsRequest (rd_kafka_broker_t *rkb, } ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_DeleteTopics, 0, 1, &features); + rkb, RD_KAFKAP_DeleteTopics, 0, 1, &features); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "Topic Admin API (KIP-4) not supported " @@ -3708,11 +3605,10 @@ rd_kafka_DeleteTopicsRequest (rd_kafka_broker_t *rkb, return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; } - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteTopics, 1, - /* FIXME */ - 4 + - (rd_list_cnt(del_topics) * 100) + - 4); + rkbuf = + rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteTopics, 1, + /* FIXME */ + 4 + (rd_list_cnt(del_topics) * 100) + 4); /* #topics */ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(del_topics)); @@ -3725,7 +3621,7 @@ rd_kafka_DeleteTopicsRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_write_i32(rkbuf, op_timeout); if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) - rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout+1000, 0); + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); @@ -3751,14 +3647,15 @@ rd_kafka_DeleteTopicsRequest (rd_kafka_broker_t *rkb, * updated with a human readable error string. */ rd_kafka_resp_err_t -rd_kafka_DeleteRecordsRequest (rd_kafka_broker_t *rkb, - /*(rd_kafka_topic_partition_list_t*)*/ - const rd_list_t *offsets_list, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_DeleteRecordsRequest(rd_kafka_broker_t *rkb, + /*(rd_kafka_topic_partition_list_t*)*/ + const rd_list_t *offsets_list, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; int features; @@ -3768,7 +3665,7 @@ rd_kafka_DeleteRecordsRequest (rd_kafka_broker_t *rkb, partitions = rd_list_elem(offsets_list, 0); ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_DeleteRecords, 0, 1, &features); + rkb, RD_KAFKAP_DeleteRecords, 0, 1, &features); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "DeleteRecords Admin API (KIP-107) not supported " @@ -3777,24 +3674,19 @@ rd_kafka_DeleteRecordsRequest (rd_kafka_broker_t *rkb, } rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteRecords, 1, - 4 + - (partitions->cnt * 100) + - 4); + 4 + (partitions->cnt * 100) + 4); rd_kafka_buf_write_topic_partitions( - rkbuf, partitions, - rd_false /*don't skip invalid offsets*/, - rd_false /*any offset*/, - rd_true /*do write offsets*/, - rd_false /*don't write epoch*/, - rd_false /*don't write metadata*/); + rkbuf, partitions, rd_false /*don't skip invalid offsets*/, + rd_false /*any offset*/, rd_true /*do write offsets*/, + rd_false /*don't write epoch*/, rd_false /*don't write metadata*/); /* timeout */ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); rd_kafka_buf_write_i32(rkbuf, op_timeout); if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) - rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout+1000, 0); + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); @@ -3817,17 +3709,18 @@ rd_kafka_DeleteRecordsRequest (rd_kafka_broker_t *rkb, * updated with a human readable error string. */ rd_kafka_resp_err_t -rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, - /*(NewPartitions_t*)*/ - const rd_list_t *new_parts, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_CreatePartitionsRequest(rd_kafka_broker_t *rkb, + /*(NewPartitions_t*)*/ + const rd_list_t *new_parts, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; - int i = 0; + int i = 0; rd_kafka_NewPartitions_t *newp; int op_timeout; @@ -3838,7 +3731,7 @@ rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, } ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_CreatePartitions, 0, 0, NULL); + rkb, RD_KAFKAP_CreatePartitions, 0, 0, NULL); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "CreatePartitions (KIP-195) not supported " @@ -3848,9 +3741,8 @@ rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, } rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_CreatePartitions, 1, - 4 + - (rd_list_cnt(new_parts) * 200) + - 4 + 1); + 4 + (rd_list_cnt(new_parts) * 200) + + 4 + 1); /* #topics */ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(new_parts)); @@ -3872,8 +3764,8 @@ rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(&newp->replicas)); - while ((replicas = rd_list_elem(&newp->replicas, - ++pi))) { + while ( + (replicas = rd_list_elem(&newp->replicas, ++pi))) { int ri = 0; /* replica count */ @@ -3881,12 +3773,10 @@ rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, rd_list_cnt(replicas)); /* replica */ - for (ri = 0 ; ri < rd_list_cnt(replicas) ; - ri++) { + for (ri = 0; ri < rd_list_cnt(replicas); ri++) { rd_kafka_buf_write_i32( - rkbuf, - rd_list_get_int32(replicas, - ri)); + rkbuf, + rd_list_get_int32(replicas, ri)); } } } @@ -3897,11 +3787,11 @@ rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_write_i32(rkbuf, op_timeout); if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) - rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout+1000, 0); + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); /* validate_only */ rd_kafka_buf_write_i8( - rkbuf, rd_kafka_confval_get_int(&options->validate_only)); + rkbuf, rd_kafka_confval_get_int(&options->validate_only)); rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); @@ -3924,13 +3814,14 @@ rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, * updated with a human readable error string. */ rd_kafka_resp_err_t -rd_kafka_AlterConfigsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *configs /*(ConfigResource_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_AlterConfigsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *configs /*(ConfigResource_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; int i; @@ -3945,7 +3836,7 @@ rd_kafka_AlterConfigsRequest (rd_kafka_broker_t *rkb, } ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_AlterConfigs, 0, 0, NULL); + rkb, RD_KAFKAP_AlterConfigs, 0, 0, NULL); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "AlterConfigs (KIP-133) not supported " @@ -4009,11 +3900,11 @@ rd_kafka_AlterConfigsRequest (rd_kafka_broker_t *rkb, /* timeout */ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) - rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout+1000, 0); + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); /* validate_only */ rd_kafka_buf_write_i8( - rkbuf, rd_kafka_confval_get_int(&options->validate_only)); + rkbuf, rd_kafka_confval_get_int(&options->validate_only)); rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); @@ -4035,14 +3926,15 @@ rd_kafka_AlterConfigsRequest (rd_kafka_broker_t *rkb, * transmission, otherwise an error code and errstr will be * updated with a human readable error string. */ -rd_kafka_resp_err_t -rd_kafka_DescribeConfigsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *configs /*(ConfigResource_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_resp_err_t rd_kafka_DescribeConfigsRequest( + rd_kafka_broker_t *rkb, + const rd_list_t *configs /*(ConfigResource_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; int i; @@ -4057,7 +3949,7 @@ rd_kafka_DescribeConfigsRequest (rd_kafka_broker_t *rkb, } ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_DescribeConfigs, 0, 1, NULL); + rkb, RD_KAFKAP_DescribeConfigs, 0, 1, NULL); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "DescribeConfigs (KIP-133) not supported " @@ -4107,7 +3999,7 @@ rd_kafka_DescribeConfigsRequest (rd_kafka_broker_t *rkb, /* timeout */ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) - rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout+1000, 0); + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); @@ -4130,13 +4022,14 @@ rd_kafka_DescribeConfigsRequest (rd_kafka_broker_t *rkb, * updated with a human readable error string. */ rd_kafka_resp_err_t -rd_kafka_DeleteGroupsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *del_groups /*(DeleteGroup_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_DeleteGroupsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *del_groups /*(DeleteGroup_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; int features; @@ -4144,7 +4037,7 @@ rd_kafka_DeleteGroupsRequest (rd_kafka_broker_t *rkb, rd_kafka_DeleteGroup_t *delt; ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_DeleteGroups, 0, 1, &features); + rkb, RD_KAFKAP_DeleteGroups, 0, 1, &features); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "DeleteGroups Admin API (KIP-229) not supported " @@ -4153,10 +4046,9 @@ rd_kafka_DeleteGroupsRequest (rd_kafka_broker_t *rkb, return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; } - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteGroups, 1, - 4 + - (rd_list_cnt(del_groups) * 100) + - 4); + rkbuf = + rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteGroups, 1, + 4 + (rd_list_cnt(del_groups) * 100) + 4); /* #groups */ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(del_groups)); @@ -4178,13 +4070,12 @@ rd_kafka_DeleteGroupsRequest (rd_kafka_broker_t *rkb, * @locality rdkafka main thread * @locks none */ -void -rd_kafka_handle_InitProducerId (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +void rd_kafka_handle_InitProducerId(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { const int log_decode_errors = LOG_ERR; int16_t error_code; rd_kafka_pid_t pid; @@ -4205,9 +4096,9 @@ rd_kafka_handle_InitProducerId (rd_kafka_t *rk, return; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: +err: if (err == RD_KAFKA_RESP_ERR__DESTROY) return; @@ -4232,20 +4123,21 @@ rd_kafka_handle_InitProducerId (rd_kafka_t *rk, * updated with a human readable error string. */ rd_kafka_resp_err_t -rd_kafka_InitProducerIdRequest (rd_kafka_broker_t *rkb, - const char *transactional_id, - int transaction_timeout_ms, - const rd_kafka_pid_t *current_pid, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_InitProducerIdRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + int transaction_timeout_ms, + const rd_kafka_pid_t *current_pid, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion; if (current_pid) { ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_InitProducerId, 3, 4, NULL); + rkb, RD_KAFKAP_InitProducerId, 3, 4, NULL); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "InitProducerId (KIP-360) not supported by " @@ -4257,7 +4149,7 @@ rd_kafka_InitProducerIdRequest (rd_kafka_broker_t *rkb, } } else { ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_InitProducerId, 0, 4, NULL); + rkb, RD_KAFKAP_InitProducerId, 0, 4, NULL); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, @@ -4270,10 +4162,9 @@ rd_kafka_InitProducerIdRequest (rd_kafka_broker_t *rkb, } rkbuf = rd_kafka_buf_new_flexver_request( - rkb, RD_KAFKAP_InitProducerId, 1, - 2 + (transactional_id ? strlen(transactional_id) : 0) + - 4 + 8 + 4, - ApiVersion >= 2 /*flexver*/); + rkb, RD_KAFKAP_InitProducerId, 1, + 2 + (transactional_id ? strlen(transactional_id) : 0) + 4 + 8 + 4, + ApiVersion >= 2 /*flexver*/); /* transactional_id */ rd_kafka_buf_write_str(rkbuf, transactional_id, -1); @@ -4314,14 +4205,15 @@ rd_kafka_InitProducerIdRequest (rd_kafka_broker_t *rkb, * transmission, otherwise an error code. */ rd_kafka_resp_err_t -rd_kafka_AddPartitionsToTxnRequest (rd_kafka_broker_t *rkb, - const char *transactional_id, - rd_kafka_pid_t pid, - const rd_kafka_toppar_tqhead_t *rktps, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_AddPartitionsToTxnRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + rd_kafka_pid_t pid, + const rd_kafka_toppar_tqhead_t *rktps, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; rd_kafka_toppar_t *rktp; @@ -4331,7 +4223,7 @@ rd_kafka_AddPartitionsToTxnRequest (rd_kafka_broker_t *rkb, int TopicCnt = 0, PartCnt = 0; ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_AddPartitionsToTxn, 0, 0, NULL); + rkb, RD_KAFKAP_AddPartitionsToTxn, 0, 0, NULL); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "AddPartitionsToTxnRequest (KIP-98) not supported " @@ -4340,8 +4232,8 @@ rd_kafka_AddPartitionsToTxnRequest (rd_kafka_broker_t *rkb, return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; } - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_AddPartitionsToTxn, 1, - 500); + rkbuf = + rd_kafka_buf_new_request(rkb, RD_KAFKAP_AddPartitionsToTxn, 1, 500); /* transactional_id */ rd_kafka_buf_write_str(rkbuf, transactional_id, -1); @@ -4406,19 +4298,20 @@ rd_kafka_AddPartitionsToTxnRequest (rd_kafka_broker_t *rkb, * transmission, otherwise an error code. */ rd_kafka_resp_err_t -rd_kafka_AddOffsetsToTxnRequest (rd_kafka_broker_t *rkb, - const char *transactional_id, - rd_kafka_pid_t pid, - const char *group_id, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_AddOffsetsToTxnRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + rd_kafka_pid_t pid, + const char *group_id, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_AddOffsetsToTxn, 0, 0, NULL); + rkb, RD_KAFKAP_AddOffsetsToTxn, 0, 0, NULL); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "AddOffsetsToTxnRequest (KIP-98) not supported " @@ -4427,8 +4320,8 @@ rd_kafka_AddOffsetsToTxnRequest (rd_kafka_broker_t *rkb, return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; } - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_AddOffsetsToTxn, 1, - 100); + rkbuf = + rd_kafka_buf_new_request(rkb, RD_KAFKAP_AddOffsetsToTxn, 1, 100); /* transactional_id */ rd_kafka_buf_write_str(rkbuf, transactional_id, -1); @@ -4460,20 +4353,20 @@ rd_kafka_AddOffsetsToTxnRequest (rd_kafka_broker_t *rkb, * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for * transmission, otherwise an error code. */ -rd_kafka_resp_err_t -rd_kafka_EndTxnRequest (rd_kafka_broker_t *rkb, - const char *transactional_id, - rd_kafka_pid_t pid, - rd_bool_t committed, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_resp_err_t rd_kafka_EndTxnRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + rd_kafka_pid_t pid, + rd_bool_t committed, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; - ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_EndTxn, 0, 1, NULL); + ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, RD_KAFKAP_EndTxn, + 0, 1, NULL); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "EndTxnRequest (KIP-98) not supported " @@ -4482,8 +4375,7 @@ rd_kafka_EndTxnRequest (rd_kafka_broker_t *rkb, return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; } - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_EndTxn, 1, - 500); + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_EndTxn, 1, 500); /* transactional_id */ rd_kafka_buf_write_str(rkbuf, transactional_id, -1); @@ -4522,16 +4414,15 @@ rd_kafka_EndTxnRequest (rd_kafka_broker_t *rkb, * * @returns the number of messages added. */ -static int -ut_create_msgs (rd_kafka_msgq_t *rkmq, uint64_t msgid, int cnt) { +static int ut_create_msgs(rd_kafka_msgq_t *rkmq, uint64_t msgid, int cnt) { int i; - for (i = 0 ; i < cnt ; i++) { + for (i = 0; i < cnt; i++) { rd_kafka_msg_t *rkm; - rkm = ut_rd_kafka_msg_new(0); + rkm = ut_rd_kafka_msg_new(0); rkm->rkm_u.producer.msgid = msgid++; - rkm->rkm_ts_enq = rd_clock(); + rkm->rkm_ts_enq = rd_clock(); rkm->rkm_ts_timeout = rkm->rkm_ts_enq + (900 * 1000 * 1000); rd_kafka_msgq_enq(rkmq, rkm); @@ -4549,27 +4440,25 @@ ut_create_msgs (rd_kafka_msgq_t *rkmq, uint64_t msgid, int cnt) { * Batch 2,3 fails with out of order sequence * Retry Batch 1-3 should succeed. */ -static int unittest_idempotent_producer (void) { +static int unittest_idempotent_producer(void) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_broker_t *rkb; -#define _BATCH_CNT 4 +#define _BATCH_CNT 4 #define _MSGS_PER_BATCH 3 const int msgcnt = _BATCH_CNT * _MSGS_PER_BATCH; int remaining_batches; uint64_t msgid = 1; rd_kafka_toppar_t *rktp; - rd_kafka_pid_t pid = { .id = 1000, .epoch = 0 }; - struct rd_kafka_Produce_result result = { - .offset = 1, - .timestamp = 1000 - }; + rd_kafka_pid_t pid = {.id = 1000, .epoch = 0}; + struct rd_kafka_Produce_result result = {.offset = 1, + .timestamp = 1000}; rd_kafka_queue_t *rkqu; rd_kafka_event_t *rkev; rd_kafka_buf_t *request[_BATCH_CNT]; - int rcnt = 0; - int retry_msg_cnt = 0; - int drcnt = 0; + int rcnt = 0; + int retry_msg_cnt = 0; + int drcnt = 0; rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq); const char *tmp; int i, r; @@ -4622,11 +4511,10 @@ static int unittest_idempotent_producer (void) { remaining_batches = _BATCH_CNT; /* Create a ProduceRequest for each batch */ - for (rcnt = 0 ; rcnt < remaining_batches ; rcnt++) { + for (rcnt = 0; rcnt < remaining_batches; rcnt++) { size_t msize; request[rcnt] = rd_kafka_msgset_create_ProduceRequest( - rkb, rktp, &rkmq, rd_kafka_idemp_get_pid(rk), 0, - &msize); + rkb, rktp, &rkmq, rd_kafka_idemp_get_pid(rk), 0, &msize); RD_UT_ASSERT(request[rcnt], "request #%d failed", rcnt); } @@ -4643,14 +4531,13 @@ static int unittest_idempotent_producer (void) { i = 0; r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq); RD_UT_ASSERT(r == _MSGS_PER_BATCH, "."); - rd_kafka_msgbatch_handle_Produce_result( - rkb, &request[i]->rkbuf_batch, - RD_KAFKA_RESP_ERR_NO_ERROR, - &result, request[i]); + rd_kafka_msgbatch_handle_Produce_result(rkb, &request[i]->rkbuf_batch, + RD_KAFKA_RESP_ERR_NO_ERROR, + &result, request[i]); result.offset += r; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == 0, - "batch %d: expected no messages in rktp_msgq, not %d", - i, rd_kafka_msgq_len(&rktp->rktp_msgq)); + "batch %d: expected no messages in rktp_msgq, not %d", i, + rd_kafka_msgq_len(&rktp->rktp_msgq)); rd_kafka_buf_destroy(request[i]); remaining_batches--; @@ -4659,14 +4546,12 @@ static int unittest_idempotent_producer (void) { r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq); RD_UT_ASSERT(r == _MSGS_PER_BATCH, "."); rd_kafka_msgbatch_handle_Produce_result( - rkb, &request[i]->rkbuf_batch, - RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, - &result, request[i]); + rkb, &request[i]->rkbuf_batch, + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, &result, request[i]); retry_msg_cnt += r; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt, - "batch %d: expected %d messages in rktp_msgq, not %d", - i, retry_msg_cnt, - rd_kafka_msgq_len(&rktp->rktp_msgq)); + "batch %d: expected %d messages in rktp_msgq, not %d", i, + retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq)); rd_kafka_buf_destroy(request[i]); /* Batch 2: OUT_OF_ORDER, triggering retry .. */ @@ -4674,28 +4559,26 @@ static int unittest_idempotent_producer (void) { r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq); RD_UT_ASSERT(r == _MSGS_PER_BATCH, "."); rd_kafka_msgbatch_handle_Produce_result( - rkb, &request[i]->rkbuf_batch, - RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, - &result, request[i]); + rkb, &request[i]->rkbuf_batch, + RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, &result, + request[i]); retry_msg_cnt += r; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt, "batch %d: expected %d messages in rktp_xmit_msgq, not %d", - i, retry_msg_cnt, - rd_kafka_msgq_len(&rktp->rktp_msgq)); + i, retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq)); rd_kafka_buf_destroy(request[i]); /* Batch 3: OUT_OF_ORDER, triggering retry .. */ i = 3; r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq); rd_kafka_msgbatch_handle_Produce_result( - rkb, &request[i]->rkbuf_batch, - RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, - &result, request[i]); + rkb, &request[i]->rkbuf_batch, + RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, &result, + request[i]); retry_msg_cnt += r; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt, "batch %d: expected %d messages in rktp_xmit_msgq, not %d", - i, retry_msg_cnt, - rd_kafka_msgq_len(&rktp->rktp_msgq)); + i, retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq)); rd_kafka_buf_destroy(request[i]); @@ -4710,16 +4593,15 @@ static int unittest_idempotent_producer (void) { retry_msg_cnt, rd_kafka_msgq_len(&rkmq)); /* Sleep a short while to make sure the retry backoff expires. */ - rd_usleep(5*1000, NULL); /* 5ms */ + rd_usleep(5 * 1000, NULL); /* 5ms */ /* * Create requests for remaining batches. */ - for (rcnt = 0 ; rcnt < remaining_batches ; rcnt++) { + for (rcnt = 0; rcnt < remaining_batches; rcnt++) { size_t msize; request[rcnt] = rd_kafka_msgset_create_ProduceRequest( - rkb, rktp, &rkmq, rd_kafka_idemp_get_pid(rk), 0, - &msize); + rkb, rktp, &rkmq, rd_kafka_idemp_get_pid(rk), 0, &msize); RD_UT_ASSERT(request[rcnt], "Failed to create retry #%d (%d msgs in queue)", rcnt, rd_kafka_msgq_len(&rkmq)); @@ -4728,12 +4610,11 @@ static int unittest_idempotent_producer (void) { /* * Mock handling of each request, they will now succeed. */ - for (i = 0 ; i < rcnt ; i++) { + for (i = 0; i < rcnt; i++) { r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq); rd_kafka_msgbatch_handle_Produce_result( - rkb, &request[i]->rkbuf_batch, - RD_KAFKA_RESP_ERR_NO_ERROR, - &result, request[i]); + rkb, &request[i]->rkbuf_batch, RD_KAFKA_RESP_ERR_NO_ERROR, + &result, request[i]); result.offset += r; rd_kafka_buf_destroy(request[i]); } @@ -4741,8 +4622,7 @@ static int unittest_idempotent_producer (void) { retry_msg_cnt = 0; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt, "batch %d: expected %d messages in rktp_xmit_msgq, not %d", - i, retry_msg_cnt, - rd_kafka_msgq_len(&rktp->rktp_msgq)); + i, retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq)); /* * Wait for delivery reports, they should all be successful. @@ -4771,8 +4651,7 @@ static int unittest_idempotent_producer (void) { RD_UT_ASSERT(r == 0, "expected outq to return 0, not %d", r); /* Verify the expected number of good delivery reports were seen */ - RD_UT_ASSERT(drcnt == msgcnt, - "expected %d DRs, not %d", msgcnt, drcnt); + RD_UT_ASSERT(drcnt == msgcnt, "expected %d DRs, not %d", msgcnt, drcnt); rd_kafka_queue_destroy(rkqu); rd_kafka_toppar_destroy(rktp); @@ -4786,7 +4665,7 @@ static int unittest_idempotent_producer (void) { /** * @brief Request/response unit tests */ -int unittest_request (void) { +int unittest_request(void) { int fails = 0; fails += unittest_idempotent_producer(); diff --git a/src/rdkafka_request.h b/src/rdkafka_request.h index f7be29d2f1..64f6211681 100644 --- a/src/rdkafka_request.h +++ b/src/rdkafka_request.h @@ -32,341 +32,356 @@ #include "rdkafka_feature.h" -#define RD_KAFKA_ERR_ACTION_PERMANENT 0x1 /* Permanent error */ -#define RD_KAFKA_ERR_ACTION_IGNORE 0x2 /* Error can be ignored */ -#define RD_KAFKA_ERR_ACTION_REFRESH 0x4 /* Refresh state (e.g., metadata) */ -#define RD_KAFKA_ERR_ACTION_RETRY 0x8 /* Retry request after backoff */ +#define RD_KAFKA_ERR_ACTION_PERMANENT 0x1 /* Permanent error */ +#define RD_KAFKA_ERR_ACTION_IGNORE 0x2 /* Error can be ignored */ +#define RD_KAFKA_ERR_ACTION_REFRESH 0x4 /* Refresh state (e.g., metadata) */ +#define RD_KAFKA_ERR_ACTION_RETRY 0x8 /* Retry request after backoff */ #define RD_KAFKA_ERR_ACTION_INFORM 0x10 /* Inform application about err */ -#define RD_KAFKA_ERR_ACTION_SPECIAL 0x20 /* Special-purpose, depends on context */ +#define RD_KAFKA_ERR_ACTION_SPECIAL \ + 0x20 /* Special-purpose, depends on context */ #define RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED 0x40 /* ProduceReq msg status */ -#define RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED 0x80 /* ProduceReq msg status */ -#define RD_KAFKA_ERR_ACTION_MSG_PERSISTED 0x100 /* ProduceReq msg status */ -#define RD_KAFKA_ERR_ACTION_FATAL 0x200 /**< Fatal error */ -#define RD_KAFKA_ERR_ACTION_END 0 /* var-arg sentinel */ +#define RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED \ + 0x80 /* ProduceReq msg status */ +#define RD_KAFKA_ERR_ACTION_MSG_PERSISTED 0x100 /* ProduceReq msg status */ +#define RD_KAFKA_ERR_ACTION_FATAL 0x200 /**< Fatal error */ +#define RD_KAFKA_ERR_ACTION_END 0 /* var-arg sentinel */ /** @macro bitmask of the message persistence flags */ -#define RD_KAFKA_ERR_ACTION_MSG_FLAGS \ - (RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED | \ - RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED | \ +#define RD_KAFKA_ERR_ACTION_MSG_FLAGS \ + (RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED | \ + RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED | \ RD_KAFKA_ERR_ACTION_MSG_PERSISTED) -int rd_kafka_err_action (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - const rd_kafka_buf_t *request, ...); +int rd_kafka_err_action(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + const rd_kafka_buf_t *request, + ...); -const char *rd_kafka_actions2str (int actions); +const char *rd_kafka_actions2str(int actions); rd_kafka_topic_partition_list_t * -rd_kafka_buf_read_topic_partitions (rd_kafka_buf_t *rkbuf, - size_t estimated_part_cnt, - rd_bool_t read_offset, - rd_bool_t read_part_errs); -int rd_kafka_buf_write_topic_partitions ( - rd_kafka_buf_t *rkbuf, - const rd_kafka_topic_partition_list_t *parts, - rd_bool_t skip_invalid_offsets, - rd_bool_t only_invalid_offsets, - rd_bool_t write_Offset, - rd_bool_t write_Epoch, - rd_bool_t write_Metadata); +rd_kafka_buf_read_topic_partitions(rd_kafka_buf_t *rkbuf, + size_t estimated_part_cnt, + rd_bool_t read_offset, + rd_bool_t read_part_errs); +int rd_kafka_buf_write_topic_partitions( + rd_kafka_buf_t *rkbuf, + const rd_kafka_topic_partition_list_t *parts, + rd_bool_t skip_invalid_offsets, + rd_bool_t only_invalid_offsets, + rd_bool_t write_Offset, + rd_bool_t write_Epoch, + rd_bool_t write_Metadata); rd_kafka_resp_err_t -rd_kafka_FindCoordinatorRequest (rd_kafka_broker_t *rkb, - rd_kafka_coordtype_t coordtype, - const char *coordkey, +rd_kafka_FindCoordinatorRequest(rd_kafka_broker_t *rkb, + rd_kafka_coordtype_t coordtype, + const char *coordkey, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_resp_err_t +rd_kafka_handle_ListOffsets(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t *offsets, + int *actionsp); + +void rd_kafka_ListOffsetsRequest(rd_kafka_broker_t *rkb, + rd_kafka_topic_partition_list_t *offsets, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, void *opaque); -rd_kafka_resp_err_t rd_kafka_handle_ListOffsets (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t - *offsets, - int *actionsp); - -void rd_kafka_ListOffsetsRequest (rd_kafka_broker_t *rkb, - rd_kafka_topic_partition_list_t *offsets, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); - rd_kafka_resp_err_t -rd_kafka_handle_OffsetFetch (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t **offsets, - rd_bool_t update_toppar, - rd_bool_t add_part, - rd_bool_t allow_retry); - -void rd_kafka_op_handle_OffsetFetch (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque); +rd_kafka_handle_OffsetFetch(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t **offsets, + rd_bool_t update_toppar, + rd_bool_t add_part, + rd_bool_t allow_retry); + +void rd_kafka_op_handle_OffsetFetch(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); -void rd_kafka_OffsetFetchRequest (rd_kafka_broker_t *rkb, - rd_kafka_topic_partition_list_t *parts, - rd_bool_t require_stable, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb, + rd_kafka_topic_partition_list_t *parts, + rd_bool_t require_stable, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); rd_kafka_resp_err_t -rd_kafka_handle_OffsetCommit (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t *offsets); -int rd_kafka_OffsetCommitRequest (rd_kafka_broker_t *rkb, - rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t *offsets, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque, const char *reason); +rd_kafka_handle_OffsetCommit(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t *offsets); +int rd_kafka_OffsetCommitRequest(rd_kafka_broker_t *rkb, + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *offsets, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque, + const char *reason); rd_kafka_resp_err_t -rd_kafka_OffsetDeleteRequest (rd_kafka_broker_t *rkb, - /** (rd_kafka_DeleteConsumerGroupOffsets_t*) */ - const rd_list_t *del_grpoffsets, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_OffsetDeleteRequest(rd_kafka_broker_t *rkb, + /** (rd_kafka_DeleteConsumerGroupOffsets_t*) */ + const rd_list_t *del_grpoffsets, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + + +void rd_kafka_JoinGroupRequest(rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + const rd_kafkap_str_t *member_id, + const rd_kafkap_str_t *group_instance_id, + const rd_kafkap_str_t *protocol_type, + const rd_list_t *topics, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); -void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - const rd_kafkap_str_t *member_id, - const rd_kafkap_str_t *group_instance_id, - const rd_kafkap_str_t *protocol_type, - const rd_list_t *topics, +void rd_kafka_LeaveGroupRequest(rd_kafka_broker_t *rkb, + const char *group_id, + const char *member_id, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, void *opaque); +void rd_kafka_handle_LeaveGroup(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); +void rd_kafka_SyncGroupRequest(rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + int32_t generation_id, + const rd_kafkap_str_t *member_id, + const rd_kafkap_str_t *group_instance_id, + const rd_kafka_group_member_t *assignments, + int assignment_cnt, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); +void rd_kafka_handle_SyncGroup(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); -void rd_kafka_LeaveGroupRequest (rd_kafka_broker_t *rkb, - const char *group_id, - const char *member_id, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); -void rd_kafka_handle_LeaveGroup (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque); - -void rd_kafka_SyncGroupRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - int32_t generation_id, - const rd_kafkap_str_t *member_id, - const rd_kafkap_str_t *group_instance_id, - const rd_kafka_group_member_t - *assignments, - int assignment_cnt, +void rd_kafka_ListGroupsRequest(rd_kafka_broker_t *rkb, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, void *opaque); -void rd_kafka_handle_SyncGroup (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque); -void rd_kafka_ListGroupsRequest (rd_kafka_broker_t *rkb, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +void rd_kafka_DescribeGroupsRequest(rd_kafka_broker_t *rkb, + const char **groups, + int group_cnt, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); -void rd_kafka_DescribeGroupsRequest (rd_kafka_broker_t *rkb, - const char **groups, int group_cnt, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +void rd_kafka_HeartbeatRequest(rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + int32_t generation_id, + const rd_kafkap_str_t *member_id, + const rd_kafkap_str_t *group_instance_id, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_resp_err_t rd_kafka_MetadataRequest(rd_kafka_broker_t *rkb, + const rd_list_t *topics, + const char *reason, + rd_bool_t allow_auto_create_topics, + rd_bool_t cgrp_update, + rd_kafka_op_t *rko); -void rd_kafka_HeartbeatRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - int32_t generation_id, - const rd_kafkap_str_t *member_id, - const rd_kafkap_str_t *group_instance_id, +rd_kafka_resp_err_t +rd_kafka_handle_ApiVersion(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + struct rd_kafka_ApiVersion **apis, + size_t *api_cnt); +void rd_kafka_ApiVersionRequest(rd_kafka_broker_t *rkb, + int16_t ApiVersion, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, void *opaque); -rd_kafka_resp_err_t -rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, - const rd_list_t *topics, const char *reason, - rd_bool_t allow_auto_create_topics, - rd_bool_t cgrp_update, - rd_kafka_op_t *rko); +void rd_kafka_SaslHandshakeRequest(rd_kafka_broker_t *rkb, + const char *mechanism, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +void rd_kafka_handle_SaslAuthenticate(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); +void rd_kafka_SaslAuthenticateRequest(rd_kafka_broker_t *rkb, + const void *buf, + size_t size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +int rd_kafka_ProduceRequest(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const rd_kafka_pid_t pid, + uint64_t epoch_base_msgid); rd_kafka_resp_err_t -rd_kafka_handle_ApiVersion (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - struct rd_kafka_ApiVersion **apis, - size_t *api_cnt); -void rd_kafka_ApiVersionRequest (rd_kafka_broker_t *rkb, - int16_t ApiVersion, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); - -void rd_kafka_SaslHandshakeRequest (rd_kafka_broker_t *rkb, - const char *mechanism, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); - -void -rd_kafka_handle_SaslAuthenticate (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque); -void rd_kafka_SaslAuthenticateRequest (rd_kafka_broker_t *rkb, - const void *buf, size_t size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); - -int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, - const rd_kafka_pid_t pid, - uint64_t epoch_base_msgid); +rd_kafka_CreateTopicsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *new_topics /*(NewTopic_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); rd_kafka_resp_err_t -rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *new_topics /*(NewTopic_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_DeleteTopicsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *del_topics /*(DeleteTopic_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_resp_err_t rd_kafka_CreatePartitionsRequest( + rd_kafka_broker_t *rkb, + const rd_list_t *new_parts /*(NewPartitions_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); rd_kafka_resp_err_t -rd_kafka_DeleteTopicsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *del_topics /*(DeleteTopic_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_AlterConfigsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *configs /*(ConfigResource_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_resp_err_t rd_kafka_DescribeConfigsRequest( + rd_kafka_broker_t *rkb, + const rd_list_t *configs /*(ConfigResource_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); rd_kafka_resp_err_t -rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *new_parts /*(NewPartitions_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_DeleteGroupsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *del_groups /*(DeleteGroup_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +void rd_kafka_handle_InitProducerId(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); rd_kafka_resp_err_t -rd_kafka_AlterConfigsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *configs /*(ConfigResource_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_InitProducerIdRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + int transaction_timeout_ms, + const rd_kafka_pid_t *current_pid, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); rd_kafka_resp_err_t -rd_kafka_DescribeConfigsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *configs /*(ConfigResource_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_AddPartitionsToTxnRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + rd_kafka_pid_t pid, + const rd_kafka_toppar_tqhead_t *rktps, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); rd_kafka_resp_err_t -rd_kafka_DeleteGroupsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *del_groups /*(DeleteGroup_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); - -void -rd_kafka_handle_InitProducerId (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque); - -rd_kafka_resp_err_t -rd_kafka_InitProducerIdRequest (rd_kafka_broker_t *rkb, +rd_kafka_AddOffsetsToTxnRequest(rd_kafka_broker_t *rkb, const char *transactional_id, - int transaction_timeout_ms, - const rd_kafka_pid_t *current_pid, - char *errstr, size_t errstr_size, + rd_kafka_pid_t pid, + const char *group_id, + char *errstr, + size_t errstr_size, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, void *opaque); -rd_kafka_resp_err_t -rd_kafka_AddPartitionsToTxnRequest (rd_kafka_broker_t *rkb, - const char *transactional_id, - rd_kafka_pid_t pid, - const rd_kafka_toppar_tqhead_t *rktps, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); - -rd_kafka_resp_err_t -rd_kafka_AddOffsetsToTxnRequest (rd_kafka_broker_t *rkb, - const char *transactional_id, - rd_kafka_pid_t pid, - const char *group_id, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_resp_err_t rd_kafka_EndTxnRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + rd_kafka_pid_t pid, + rd_bool_t committed, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); -rd_kafka_resp_err_t -rd_kafka_EndTxnRequest (rd_kafka_broker_t *rkb, - const char *transactional_id, - rd_kafka_pid_t pid, - rd_bool_t committed, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); - -int unittest_request (void); +int unittest_request(void); rd_kafka_resp_err_t -rd_kafka_DeleteRecordsRequest (rd_kafka_broker_t *rkb, - /*(rd_topic_partition_list_t*)*/ - const rd_list_t *offsets_list, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_DeleteRecordsRequest(rd_kafka_broker_t *rkb, + /*(rd_topic_partition_list_t*)*/ + const rd_list_t *offsets_list, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); #endif /* _RDKAFKA_REQUEST_H_ */ diff --git a/src/rdkafka_roundrobin_assignor.c b/src/rdkafka_roundrobin_assignor.c index a3d826b709..6cb9193645 100644 --- a/src/rdkafka_roundrobin_assignor.c +++ b/src/rdkafka_roundrobin_assignor.c @@ -30,7 +30,8 @@ /** - * Source: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RoundRobinAssignor.java + * Source: + * https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RoundRobinAssignor.java * * The roundrobin assignor lays out all the available partitions and all the * available consumers. It then proceeds to do a roundrobin assignment from @@ -48,63 +49,61 @@ * C1: [t0p1, t1p0, t1p2] */ -rd_kafka_resp_err_t -rd_kafka_roundrobin_assignor_assign_cb (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas, - const char *member_id, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - size_t member_cnt, - rd_kafka_assignor_topic_t - **eligible_topics, - size_t eligible_topic_cnt, - char *errstr, size_t errstr_size, - void *opaque) { +rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_assign_cb( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + const char *member_id, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + size_t member_cnt, + rd_kafka_assignor_topic_t **eligible_topics, + size_t eligible_topic_cnt, + char *errstr, + size_t errstr_size, + void *opaque) { unsigned int ti; - int next = -1; /* Next member id */ + int next = -1; /* Next member id */ - /* Sort topics by name */ - qsort(eligible_topics, eligible_topic_cnt, sizeof(*eligible_topics), - rd_kafka_assignor_topic_cmp); + /* Sort topics by name */ + qsort(eligible_topics, eligible_topic_cnt, sizeof(*eligible_topics), + rd_kafka_assignor_topic_cmp); - /* Sort members by name */ - qsort(members, member_cnt, sizeof(*members), - rd_kafka_group_member_cmp); + /* Sort members by name */ + qsort(members, member_cnt, sizeof(*members), rd_kafka_group_member_cmp); - for (ti = 0 ; ti < eligible_topic_cnt ; ti++) { + for (ti = 0; ti < eligible_topic_cnt; ti++) { rd_kafka_assignor_topic_t *eligible_topic = eligible_topics[ti]; - int partition; + int partition; - /* For each topic+partition, assign one member (in a cyclic - * iteration) per partition until the partitions are exhausted*/ - for (partition = 0 ; - partition < eligible_topic->metadata->partition_cnt ; - partition++) { - rd_kafka_group_member_t *rkgm; + /* For each topic+partition, assign one member (in a cyclic + * iteration) per partition until the partitions are exhausted*/ + for (partition = 0; + partition < eligible_topic->metadata->partition_cnt; + partition++) { + rd_kafka_group_member_t *rkgm; /* Scan through members until we find one with a * subscription to this topic. */ do { - next = (next+1) % member_cnt; + next = (next + 1) % member_cnt; } while (!rd_kafka_group_member_find_subscription( - rk, &members[next], - eligible_topic->metadata->topic)); + rk, &members[next], + eligible_topic->metadata->topic)); - rkgm = &members[next]; + rkgm = &members[next]; - rd_kafka_dbg(rk, CGRP, "ASSIGN", - "roundrobin: Member \"%s\": " - "assigned topic %s partition %d", - rkgm->rkgm_member_id->str, - eligible_topic->metadata->topic, - partition); + rd_kafka_dbg(rk, CGRP, "ASSIGN", + "roundrobin: Member \"%s\": " + "assigned topic %s partition %d", + rkgm->rkgm_member_id->str, + eligible_topic->metadata->topic, + partition); - rd_kafka_topic_partition_list_add( - rkgm->rkgm_assignment, - eligible_topic->metadata->topic, partition); - - } - } + rd_kafka_topic_partition_list_add( + rkgm->rkgm_assignment, + eligible_topic->metadata->topic, partition); + } + } return 0; @@ -115,11 +114,10 @@ rd_kafka_roundrobin_assignor_assign_cb (rd_kafka_t *rk, /** * @brief Initialzie and add roundrobin assignor. */ -rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init (rd_kafka_t *rk) { +rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init(rd_kafka_t *rk) { return rd_kafka_assignor_add( - rk, "consumer", "roundrobin", - RD_KAFKA_REBALANCE_PROTOCOL_EAGER, - rd_kafka_roundrobin_assignor_assign_cb, - rd_kafka_assignor_get_metadata_with_empty_userdata, - NULL, NULL, NULL, NULL); + rk, "consumer", "roundrobin", RD_KAFKA_REBALANCE_PROTOCOL_EAGER, + rd_kafka_roundrobin_assignor_assign_cb, + rd_kafka_assignor_get_metadata_with_empty_userdata, NULL, NULL, + NULL, NULL); } diff --git a/src/rdkafka_sasl.c b/src/rdkafka_sasl.c index 44f46fe262..11770e510e 100644 --- a/src/rdkafka_sasl.c +++ b/src/rdkafka_sasl.c @@ -40,14 +40,16 @@ * * @warning This is a blocking call. */ -static int rd_kafka_sasl_send_legacy (rd_kafka_transport_t *rktrans, - const void *payload, int len, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_send_legacy(rd_kafka_transport_t *rktrans, + const void *payload, + int len, + char *errstr, + size_t errstr_size) { rd_buf_t buf; rd_slice_t slice; int32_t hdr; - rd_buf_init(&buf, 1+1, sizeof(hdr)); + rd_buf_init(&buf, 1 + 1, sizeof(hdr)); hdr = htobe32(len); rd_buf_write(&buf, &hdr, sizeof(hdr)); @@ -62,8 +64,8 @@ static int rd_kafka_sasl_send_legacy (rd_kafka_transport_t *rktrans, do { int r; - r = (int)rd_kafka_transport_send(rktrans, &slice, - errstr, errstr_size); + r = (int)rd_kafka_transport_send(rktrans, &slice, errstr, + errstr_size); if (r == -1) { rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", "SASL send failed: %s", errstr); @@ -75,7 +77,7 @@ static int rd_kafka_sasl_send_legacy (rd_kafka_transport_t *rktrans, break; /* Avoid busy-looping */ - rd_usleep(10*1000, NULL); + rd_usleep(10 * 1000, NULL); } while (1); @@ -89,28 +91,28 @@ static int rd_kafka_sasl_send_legacy (rd_kafka_transport_t *rktrans, * * @warning This is a blocking call when used with the legacy framing. */ -int rd_kafka_sasl_send (rd_kafka_transport_t *rktrans, - const void *payload, int len, - char *errstr, size_t errstr_size) { +int rd_kafka_sasl_send(rd_kafka_transport_t *rktrans, + const void *payload, + int len, + char *errstr, + size_t errstr_size) { rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; - rd_rkb_dbg(rkb, SECURITY, "SASL", - "Send SASL %s frame to broker (%d bytes)", - (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_AUTH_REQ) ? - "Kafka" : "legacy", - len); + rd_rkb_dbg( + rkb, SECURITY, "SASL", "Send SASL %s frame to broker (%d bytes)", + (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_AUTH_REQ) ? "Kafka" + : "legacy", + len); /* Blocking legacy framed send directly on the socket */ if (!(rkb->rkb_features & RD_KAFKA_FEATURE_SASL_AUTH_REQ)) - return rd_kafka_sasl_send_legacy(rktrans, payload, len, - errstr, errstr_size); + return rd_kafka_sasl_send_legacy(rktrans, payload, len, errstr, + errstr_size); /* Kafka-framed asynchronous send */ - rd_kafka_SaslAuthenticateRequest(rkb, - payload, (size_t)len, - RD_KAFKA_NO_REPLYQ, - rd_kafka_handle_SaslAuthenticate, - NULL); + rd_kafka_SaslAuthenticateRequest( + rkb, payload, (size_t)len, RD_KAFKA_NO_REPLYQ, + rd_kafka_handle_SaslAuthenticate, NULL); return 0; } @@ -121,7 +123,7 @@ int rd_kafka_sasl_send (rd_kafka_transport_t *rktrans, * * Transition to next connect state. */ -void rd_kafka_sasl_auth_done (rd_kafka_transport_t *rktrans) { +void rd_kafka_sasl_auth_done(rd_kafka_transport_t *rktrans) { /* Authenticated */ rd_kafka_broker_connect_up(rktrans->rktrans_rkb); } @@ -134,16 +136,17 @@ void rd_kafka_sasl_auth_done (rd_kafka_transport_t *rktrans) { * * @returns -1 on error, else 0. */ -int rd_kafka_sasl_recv (rd_kafka_transport_t *rktrans, - const void *buf, size_t len, - char *errstr, size_t errstr_size) { +int rd_kafka_sasl_recv(rd_kafka_transport_t *rktrans, + const void *buf, + size_t len, + char *errstr, + size_t errstr_size) { rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", - "Received SASL frame from broker (%"PRIusz" bytes)", len); + "Received SASL frame from broker (%" PRIusz " bytes)", len); - return rktrans->rktrans_rkb->rkb_rk-> - rk_conf.sasl.provider->recv(rktrans, buf, len, - errstr, errstr_size); + return rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.provider->recv( + rktrans, buf, len, errstr, errstr_size); } /** @@ -153,8 +156,10 @@ int rd_kafka_sasl_recv (rd_kafka_transport_t *rktrans, * * @returns -1 on error, else 0. */ -int rd_kafka_sasl_io_event (rd_kafka_transport_t *rktrans, int events, - char *errstr, size_t errstr_size) { +int rd_kafka_sasl_io_event(rd_kafka_transport_t *rktrans, + int events, + char *errstr, + size_t errstr_size) { rd_kafka_buf_t *rkbuf; int r; const void *buf; @@ -163,15 +168,15 @@ int rd_kafka_sasl_io_event (rd_kafka_transport_t *rktrans, int events, if (!(events & POLLIN)) return 0; - r = rd_kafka_transport_framed_recv(rktrans, &rkbuf, - errstr, errstr_size); + r = rd_kafka_transport_framed_recv(rktrans, &rkbuf, errstr, + errstr_size); if (r == -1) { if (!strcmp(errstr, "Disconnected")) rd_snprintf(errstr, errstr_size, "Disconnected: check client %s credentials " "and broker logs", - rktrans->rktrans_rkb->rkb_rk->rk_conf. - sasl.mechanisms); + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl + .mechanisms); return -1; } else if (r == 0) /* not fully received yet */ return 0; @@ -200,10 +205,9 @@ int rd_kafka_sasl_io_event (rd_kafka_transport_t *rktrans, int events, * @brief Close SASL session (from transport code) * @remark May be called on non-SASL transports (no-op) */ -void rd_kafka_sasl_close (rd_kafka_transport_t *rktrans) { +void rd_kafka_sasl_close(rd_kafka_transport_t *rktrans) { const struct rd_kafka_sasl_provider *provider = - rktrans->rktrans_rkb->rkb_rk->rk_conf. - sasl.provider; + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.provider; if (provider && provider->close) provider->close(rktrans); @@ -218,14 +222,15 @@ void rd_kafka_sasl_close (rd_kafka_transport_t *rktrans) { * * Locality: broker thread */ -int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, - char *errstr, size_t errstr_size) { - int r; - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; - rd_kafka_t *rk = rkb->rkb_rk; +int rd_kafka_sasl_client_new(rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size) { + int r; + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + rd_kafka_t *rk = rkb->rkb_rk; char *hostname, *t; const struct rd_kafka_sasl_provider *provider = - rk->rk_conf.sasl.provider; + rk->rk_conf.sasl.provider; /* Verify broker support: * - RD_KAFKA_FEATURE_SASL_GSSAPI - GSSAPI supported @@ -243,8 +248,9 @@ int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, "SASL Handshake not supported by broker " "(required by mechanism %s)%s", rk->rk_conf.sasl.mechanisms, - rk->rk_conf.api_version_request ? "" : - ": try api.version.request=true"); + rk->rk_conf.api_version_request + ? "" + : ": try api.version.request=true"); return -1; } @@ -253,14 +259,13 @@ int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, rd_kafka_broker_unlock(rktrans->rktrans_rkb); if ((t = strchr(hostname, ':'))) - *t = '\0'; /* remove ":port" */ + *t = '\0'; /* remove ":port" */ rd_rkb_dbg(rkb, SECURITY, "SASL", "Initializing SASL client: service name %s, " "hostname %s, mechanisms %s, provider %s", rk->rk_conf.sasl.service_name, hostname, - rk->rk_conf.sasl.mechanisms, - provider->name); + rk->rk_conf.sasl.mechanisms, provider->name); r = provider->client_new(rktrans, hostname, errstr, errstr_size); if (r != -1) @@ -271,9 +276,7 @@ int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, - - -rd_kafka_queue_t *rd_kafka_queue_get_sasl (rd_kafka_t *rk) { +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk) { if (!rk->rk_sasl.callback_q) return NULL; @@ -286,9 +289,9 @@ rd_kafka_queue_t *rd_kafka_queue_get_sasl (rd_kafka_t *rk) { * * Locality: broker thread */ -void rd_kafka_sasl_broker_term (rd_kafka_broker_t *rkb) { +void rd_kafka_sasl_broker_term(rd_kafka_broker_t *rkb) { const struct rd_kafka_sasl_provider *provider = - rkb->rkb_rk->rk_conf.sasl.provider; + rkb->rkb_rk->rk_conf.sasl.provider; if (provider->broker_term) provider->broker_term(rkb); } @@ -298,9 +301,9 @@ void rd_kafka_sasl_broker_term (rd_kafka_broker_t *rkb) { * * Locality: broker thread */ -void rd_kafka_sasl_broker_init (rd_kafka_broker_t *rkb) { +void rd_kafka_sasl_broker_init(rd_kafka_broker_t *rkb) { const struct rd_kafka_sasl_provider *provider = - rkb->rkb_rk->rk_conf.sasl.provider; + rkb->rkb_rk->rk_conf.sasl.provider; if (provider->broker_init) provider->broker_init(rkb); } @@ -313,9 +316,9 @@ void rd_kafka_sasl_broker_init (rd_kafka_broker_t *rkb) { * * @locality app thread (from rd_kafka_new()) */ -int rd_kafka_sasl_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { +int rd_kafka_sasl_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) { const struct rd_kafka_sasl_provider *provider = - rk->rk_conf.sasl.provider; + rk->rk_conf.sasl.provider; if (provider && provider->init) return provider->init(rk, errstr, errstr_size); @@ -329,9 +332,9 @@ int rd_kafka_sasl_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { * * @locality app thread (from rd_kafka_new()) or rdkafka main thread */ -void rd_kafka_sasl_term (rd_kafka_t *rk) { +void rd_kafka_sasl_term(rd_kafka_t *rk) { const struct rd_kafka_sasl_provider *provider = - rk->rk_conf.sasl.provider; + rk->rk_conf.sasl.provider; if (provider && provider->term) provider->term(rk); @@ -347,9 +350,9 @@ void rd_kafka_sasl_term (rd_kafka_t *rk) { * @locks none * @locality any thread */ -rd_bool_t rd_kafka_sasl_ready (rd_kafka_t *rk) { +rd_bool_t rd_kafka_sasl_ready(rd_kafka_t *rk) { const struct rd_kafka_sasl_provider *provider = - rk->rk_conf.sasl.provider; + rk->rk_conf.sasl.provider; if (provider && provider->ready) return provider->ready(rk); @@ -362,8 +365,9 @@ rd_bool_t rd_kafka_sasl_ready (rd_kafka_t *rk) { * @brief Select SASL provider for configured mechanism (singularis) * @returns 0 on success or -1 on failure. */ -int rd_kafka_sasl_select_provider (rd_kafka_t *rk, - char *errstr, size_t errstr_size) { +int rd_kafka_sasl_select_provider(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { const struct rd_kafka_sasl_provider *provider = NULL; if (!strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) { @@ -440,19 +444,19 @@ int rd_kafka_sasl_select_provider (rd_kafka_t *rk, } -rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable (rd_kafka_t *rk) { +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk) { rd_kafka_queue_t *saslq, *bgq; if (!(saslq = rd_kafka_queue_get_sasl(rk))) return rd_kafka_error_new( - RD_KAFKA_RESP_ERR__NOT_CONFIGURED, - "No SASL mechanism using callbacks is configured"); + RD_KAFKA_RESP_ERR__NOT_CONFIGURED, + "No SASL mechanism using callbacks is configured"); if (!(bgq = rd_kafka_queue_get_background(rk))) { rd_kafka_queue_destroy(saslq); return rd_kafka_error_new( - RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, - "The background thread is not available"); + RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, + "The background thread is not available"); } rd_kafka_queue_forward(saslq, bgq); @@ -467,7 +471,7 @@ rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable (rd_kafka_t *rk) { /** * Global SASL termination. */ -void rd_kafka_sasl_global_term (void) { +void rd_kafka_sasl_global_term(void) { #if WITH_SASL_CYRUS rd_kafka_sasl_cyrus_global_term(); #endif @@ -477,11 +481,10 @@ void rd_kafka_sasl_global_term (void) { /** * Global SASL init, called once per runtime. */ -int rd_kafka_sasl_global_init (void) { +int rd_kafka_sasl_global_init(void) { #if WITH_SASL_CYRUS return rd_kafka_sasl_cyrus_global_init(); #else return 0; #endif } - diff --git a/src/rdkafka_sasl.h b/src/rdkafka_sasl.h index e7bca8c3bf..d0dd01b8b2 100644 --- a/src/rdkafka_sasl.h +++ b/src/rdkafka_sasl.h @@ -31,27 +31,33 @@ -int rd_kafka_sasl_recv (rd_kafka_transport_t *rktrans, - const void *buf, size_t len, - char *errstr, size_t errstr_size); -int rd_kafka_sasl_io_event (rd_kafka_transport_t *rktrans, int events, - char *errstr, size_t errstr_size); -void rd_kafka_sasl_close (rd_kafka_transport_t *rktrans); -int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, - char *errstr, size_t errstr_size); - -void rd_kafka_sasl_broker_term (rd_kafka_broker_t *rkb); -void rd_kafka_sasl_broker_init (rd_kafka_broker_t *rkb); - -int rd_kafka_sasl_init (rd_kafka_t *rk, char *errstr, size_t errstr_size); -void rd_kafka_sasl_term (rd_kafka_t *rk); - -rd_bool_t rd_kafka_sasl_ready (rd_kafka_t *rk); - -void rd_kafka_sasl_global_term (void); -int rd_kafka_sasl_global_init (void); - -int rd_kafka_sasl_select_provider (rd_kafka_t *rk, - char *errstr, size_t errstr_size); +int rd_kafka_sasl_recv(rd_kafka_transport_t *rktrans, + const void *buf, + size_t len, + char *errstr, + size_t errstr_size); +int rd_kafka_sasl_io_event(rd_kafka_transport_t *rktrans, + int events, + char *errstr, + size_t errstr_size); +void rd_kafka_sasl_close(rd_kafka_transport_t *rktrans); +int rd_kafka_sasl_client_new(rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size); + +void rd_kafka_sasl_broker_term(rd_kafka_broker_t *rkb); +void rd_kafka_sasl_broker_init(rd_kafka_broker_t *rkb); + +int rd_kafka_sasl_init(rd_kafka_t *rk, char *errstr, size_t errstr_size); +void rd_kafka_sasl_term(rd_kafka_t *rk); + +rd_bool_t rd_kafka_sasl_ready(rd_kafka_t *rk); + +void rd_kafka_sasl_global_term(void); +int rd_kafka_sasl_global_init(void); + +int rd_kafka_sasl_select_provider(rd_kafka_t *rk, + char *errstr, + size_t errstr_size); #endif /* _RDKAFKA_SASL_H_ */ diff --git a/src/rdkafka_sasl_cyrus.c b/src/rdkafka_sasl_cyrus.c index 43c463a6a2..04f1ac9415 100644 --- a/src/rdkafka_sasl_cyrus.c +++ b/src/rdkafka_sasl_cyrus.c @@ -34,7 +34,7 @@ #include "rdstring.h" #if defined(__FreeBSD__) || defined(__OpenBSD__) -#include /* For WIF.. */ +#include /* For WIF.. */ #endif #ifdef __APPLE__ @@ -74,9 +74,11 @@ typedef struct rd_kafka_sasl_cyrus_state_s { /** * Handle received frame from broker. */ -static int rd_kafka_sasl_cyrus_recv (struct rd_kafka_transport_s *rktrans, - const void *buf, size_t size, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_cyrus_recv(struct rd_kafka_transport_s *rktrans, + const void *buf, + size_t size, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_cyrus_state_t *state = rktrans->rktrans_sasl.state; int r; int sendcnt = 0; @@ -89,15 +91,13 @@ static int rd_kafka_sasl_cyrus_recv (struct rd_kafka_transport_s *rktrans, const char *out; unsigned int outlen; - r = sasl_client_step(state->conn, - size > 0 ? buf : NULL, size, - &interact, - &out, &outlen); + r = sasl_client_step(state->conn, size > 0 ? buf : NULL, size, + &interact, &out, &outlen); if (r >= 0) { /* Note: outlen may be 0 here for an empty response */ - if (rd_kafka_sasl_send(rktrans, out, outlen, - errstr, errstr_size) == -1) + if (rd_kafka_sasl_send(rktrans, out, outlen, errstr, + errstr_size) == -1) return -1; sendcnt++; } @@ -105,16 +105,14 @@ static int rd_kafka_sasl_cyrus_recv (struct rd_kafka_transport_s *rktrans, if (r == SASL_INTERACT) rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", "SASL_INTERACT: %lu %s, %s, %s, %p", - interact->id, - interact->challenge, - interact->prompt, - interact->defresult, + interact->id, interact->challenge, + interact->prompt, interact->defresult, interact->result); } while (r == SASL_INTERACT); if (r == SASL_CONTINUE) - return 0; /* Wait for more data from broker */ + return 0; /* Wait for more data from broker */ else if (r != SASL_OK) { rd_snprintf(errstr, errstr_size, "SASL handshake failed (step): %s", @@ -138,8 +136,8 @@ static int rd_kafka_sasl_cyrus_recv (struct rd_kafka_transport_s *rktrans, rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", "%s authentication complete but awaiting " "final response from broker", - rktrans->rktrans_rkb->rkb_rk->rk_conf. - sasl.mechanisms); + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl + .mechanisms); return 0; } } @@ -163,8 +161,8 @@ static int rd_kafka_sasl_cyrus_recv (struct rd_kafka_transport_s *rktrans, authsrc = "(unknown)"; rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", - "Authenticated as %s using %s (%s)", - user, mech, authsrc); + "Authenticated as %s using %s (%s)", user, mech, + authsrc); } rd_kafka_sasl_auth_done(rktrans); @@ -174,9 +172,8 @@ static int rd_kafka_sasl_cyrus_recv (struct rd_kafka_transport_s *rktrans, - -static ssize_t render_callback (const char *key, char *buf, - size_t size, void *opaque) { +static ssize_t +render_callback(const char *key, char *buf, size_t size, void *opaque) { rd_kafka_t *rk = opaque; rd_kafka_conf_res_t res; size_t destsize = size; @@ -187,7 +184,7 @@ static ssize_t render_callback (const char *key, char *buf, return -1; /* Dont include \0 in returned size */ - return (destsize > 0 ? destsize-1 : destsize); + return (destsize > 0 ? destsize - 1 : destsize); } @@ -198,7 +195,7 @@ static ssize_t render_callback (const char *key, char *buf, * * @locality rdkafka main thread */ -static int rd_kafka_sasl_cyrus_kinit_refresh (rd_kafka_t *rk) { +static int rd_kafka_sasl_cyrus_kinit_refresh(rd_kafka_t *rk) { rd_kafka_sasl_cyrus_handle_t *handle = rk->rk_sasl.handle; int r; char *cmd; @@ -207,9 +204,8 @@ static int rd_kafka_sasl_cyrus_kinit_refresh (rd_kafka_t *rk) { int duration; /* Build kinit refresh command line using string rendering and config */ - cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd, - errstr, sizeof(errstr), - render_callback, rk); + cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd, errstr, + sizeof(errstr), render_callback, rk); if (!cmd) { rd_kafka_log(rk, LOG_ERR, "SASLREFRESH", "Failed to construct kinit command " @@ -233,8 +229,8 @@ static int rd_kafka_sasl_cyrus_kinit_refresh (rd_kafka_t *rk) { duration = (int)((rd_clock() - ts_start) / 1000); if (duration > 5000) rd_kafka_log(rk, LOG_WARNING, "SASLREFRESH", - "Slow Kerberos ticket refresh: %dms: %s", - duration, cmd); + "Slow Kerberos ticket refresh: %dms: %s", duration, + cmd); /* Regardless of outcome from the kinit command (it can fail * even if the ticket is available), we now allow broker connections. */ @@ -288,8 +284,8 @@ static int rd_kafka_sasl_cyrus_kinit_refresh (rd_kafka_t *rk) { * * @locality rdkafka main thread */ -static void rd_kafka_sasl_cyrus_kinit_refresh_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { +static void rd_kafka_sasl_cyrus_kinit_refresh_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_t *rk = arg; rd_kafka_sasl_cyrus_kinit_refresh(rk); @@ -302,10 +298,11 @@ static void rd_kafka_sasl_cyrus_kinit_refresh_tmr_cb (rd_kafka_timers_t *rkts, * libsasl callbacks * */ -static RD_UNUSED int -rd_kafka_sasl_cyrus_cb_getopt (void *context, const char *plugin_name, - const char *option, - const char **result, unsigned *len) { +static RD_UNUSED int rd_kafka_sasl_cyrus_cb_getopt(void *context, + const char *plugin_name, + const char *option, + const char **result, + unsigned *len) { rd_kafka_transport_t *rktrans = context; if (!strcmp(option, "client_mech_list")) @@ -317,14 +314,14 @@ rd_kafka_sasl_cyrus_cb_getopt (void *context, const char *plugin_name, *len = strlen(*result); rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", - "CB_GETOPT: plugin %s, option %s: returning %s", - plugin_name, option, *result); + "CB_GETOPT: plugin %s, option %s: returning %s", plugin_name, + option, *result); return SASL_OK; } -static int rd_kafka_sasl_cyrus_cb_log (void *context, int level, - const char *message) { +static int +rd_kafka_sasl_cyrus_cb_log(void *context, int level, const char *message) { rd_kafka_transport_t *rktrans = context; /* Provide a more helpful error message in case Kerberos @@ -333,29 +330,29 @@ static int rd_kafka_sasl_cyrus_cb_log (void *context, int level, strstr(rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.mechanisms, "GSSAPI")) message = - "Cyrus/libsasl2 is missing a GSSAPI module: " - "make sure the libsasl2-modules-gssapi-mit or " - "cyrus-sasl-gssapi packages are installed"; + "Cyrus/libsasl2 is missing a GSSAPI module: " + "make sure the libsasl2-modules-gssapi-mit or " + "cyrus-sasl-gssapi packages are installed"; /* Treat the "client step" log messages as debug. */ - if (level >= LOG_DEBUG || - !strncmp(message, "GSSAPI client step ", 19)) - rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", - "%s", message); + if (level >= LOG_DEBUG || !strncmp(message, "GSSAPI client step ", 19)) + rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", "%s", + message); else - rd_rkb_log(rktrans->rktrans_rkb, level, "LIBSASL", - "%s", message); + rd_rkb_log(rktrans->rktrans_rkb, level, "LIBSASL", "%s", + message); return SASL_OK; } -static int rd_kafka_sasl_cyrus_cb_getsimple (void *context, int id, - const char **result, unsigned *len) { +static int rd_kafka_sasl_cyrus_cb_getsimple(void *context, + int id, + const char **result, + unsigned *len) { rd_kafka_transport_t *rktrans = context; - switch (id) - { + switch (id) { case SASL_CB_USER: case SASL_CB_AUTHNAME: *result = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.username; @@ -376,8 +373,10 @@ static int rd_kafka_sasl_cyrus_cb_getsimple (void *context, int id, } -static int rd_kafka_sasl_cyrus_cb_getsecret (sasl_conn_t *conn, void *context, - int id, sasl_secret_t **psecret) { +static int rd_kafka_sasl_cyrus_cb_getsecret(sasl_conn_t *conn, + void *context, + int id, + sasl_secret_t **psecret) { rd_kafka_transport_t *rktrans = context; const char *password; @@ -393,21 +392,23 @@ static int rd_kafka_sasl_cyrus_cb_getsecret (sasl_conn_t *conn, void *context, } rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", - "CB_GETSECRET: id 0x%x: returning %s", - id, *psecret ? "(hidden)":"NULL"); + "CB_GETSECRET: id 0x%x: returning %s", id, + *psecret ? "(hidden)" : "NULL"); return SASL_OK; } -static int rd_kafka_sasl_cyrus_cb_chalprompt (void *context, int id, - const char *challenge, - const char *prompt, - const char *defres, - const char **result, unsigned *len) { +static int rd_kafka_sasl_cyrus_cb_chalprompt(void *context, + int id, + const char *challenge, + const char *prompt, + const char *defres, + const char **result, + unsigned *len) { rd_kafka_transport_t *rktrans = context; *result = "min_chalprompt"; - *len = strlen(*result); + *len = strlen(*result); rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", "CB_CHALPROMPT: id 0x%x, challenge %s, prompt %s, " @@ -417,9 +418,10 @@ static int rd_kafka_sasl_cyrus_cb_chalprompt (void *context, int id, return SASL_OK; } -static int rd_kafka_sasl_cyrus_cb_getrealm (void *context, int id, - const char **availrealms, - const char **result) { +static int rd_kafka_sasl_cyrus_cb_getrealm(void *context, + int id, + const char **availrealms, + const char **result) { rd_kafka_transport_t *rktrans = context; *result = *availrealms; @@ -431,36 +433,39 @@ static int rd_kafka_sasl_cyrus_cb_getrealm (void *context, int id, } -static RD_UNUSED int -rd_kafka_sasl_cyrus_cb_canon (sasl_conn_t *conn, - void *context, - const char *in, unsigned inlen, - unsigned flags, - const char *user_realm, - char *out, unsigned out_max, - unsigned *out_len) { +static RD_UNUSED int rd_kafka_sasl_cyrus_cb_canon(sasl_conn_t *conn, + void *context, + const char *in, + unsigned inlen, + unsigned flags, + const char *user_realm, + char *out, + unsigned out_max, + unsigned *out_len) { rd_kafka_transport_t *rktrans = context; - if (strstr(rktrans->rktrans_rkb->rkb_rk->rk_conf. - sasl.mechanisms, "GSSAPI")) { - *out_len = rd_snprintf(out, out_max, "%s", - rktrans->rktrans_rkb->rkb_rk-> - rk_conf.sasl.principal); - } else if (!strcmp(rktrans->rktrans_rkb->rkb_rk->rk_conf. - sasl.mechanisms, "PLAIN")) { + if (strstr(rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.mechanisms, + "GSSAPI")) { + *out_len = rd_snprintf( + out, out_max, "%s", + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.principal); + } else if (!strcmp( + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.mechanisms, + "PLAIN")) { *out_len = rd_snprintf(out, out_max, "%.*s", inlen, in); } else out = NULL; - rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", - "CB_CANON: flags 0x%x, \"%.*s\" @ \"%s\": returning \"%.*s\"", - flags, (int)inlen, in, user_realm, (int)(*out_len), out); + rd_rkb_dbg( + rktrans->rktrans_rkb, SECURITY, "LIBSASL", + "CB_CANON: flags 0x%x, \"%.*s\" @ \"%s\": returning \"%.*s\"", + flags, (int)inlen, in, user_realm, (int)(*out_len), out); return out ? SASL_OK : SASL_FAIL; } -static void rd_kafka_sasl_cyrus_close (struct rd_kafka_transport_s *rktrans) { +static void rd_kafka_sasl_cyrus_close(struct rd_kafka_transport_s *rktrans) { rd_kafka_sasl_cyrus_state_t *state = rktrans->rktrans_sasl.state; if (!state) @@ -479,37 +484,42 @@ static void rd_kafka_sasl_cyrus_close (struct rd_kafka_transport_s *rktrans) { * * Locality: broker thread */ -static int rd_kafka_sasl_cyrus_client_new (rd_kafka_transport_t *rktrans, - const char *hostname, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_cyrus_client_new(rd_kafka_transport_t *rktrans, + const char *hostname, + char *errstr, + size_t errstr_size) { int r; rd_kafka_sasl_cyrus_state_t *state; - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; - rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + rd_kafka_t *rk = rkb->rkb_rk; sasl_callback_t callbacks[16] = { - // { SASL_CB_GETOPT, (void *)rd_kafka_sasl_cyrus_cb_getopt, rktrans }, - { SASL_CB_LOG, (void *)rd_kafka_sasl_cyrus_cb_log, rktrans }, - { SASL_CB_AUTHNAME, (void *)rd_kafka_sasl_cyrus_cb_getsimple, rktrans }, - { SASL_CB_PASS, (void *)rd_kafka_sasl_cyrus_cb_getsecret, rktrans }, - { SASL_CB_ECHOPROMPT, (void *)rd_kafka_sasl_cyrus_cb_chalprompt, rktrans }, - { SASL_CB_GETREALM, (void *)rd_kafka_sasl_cyrus_cb_getrealm, rktrans }, - { SASL_CB_CANON_USER, (void *)rd_kafka_sasl_cyrus_cb_canon, rktrans }, - { SASL_CB_LIST_END } - }; - - state = rd_calloc(1, sizeof(*state)); + // { SASL_CB_GETOPT, (void *)rd_kafka_sasl_cyrus_cb_getopt, rktrans + // }, + {SASL_CB_LOG, (void *)rd_kafka_sasl_cyrus_cb_log, rktrans}, + {SASL_CB_AUTHNAME, (void *)rd_kafka_sasl_cyrus_cb_getsimple, + rktrans}, + {SASL_CB_PASS, (void *)rd_kafka_sasl_cyrus_cb_getsecret, rktrans}, + {SASL_CB_ECHOPROMPT, (void *)rd_kafka_sasl_cyrus_cb_chalprompt, + rktrans}, + {SASL_CB_GETREALM, (void *)rd_kafka_sasl_cyrus_cb_getrealm, + rktrans}, + {SASL_CB_CANON_USER, (void *)rd_kafka_sasl_cyrus_cb_canon, rktrans}, + {SASL_CB_LIST_END}}; + + state = rd_calloc(1, sizeof(*state)); rktrans->rktrans_sasl.state = state; /* SASL_CB_USER is needed for PLAIN but breaks GSSAPI */ if (!strcmp(rk->rk_conf.sasl.mechanisms, "PLAIN")) { int endidx; /* Find end of callbacks array */ - for (endidx = 0 ; - callbacks[endidx].id != SASL_CB_LIST_END ; endidx++) + for (endidx = 0; callbacks[endidx].id != SASL_CB_LIST_END; + endidx++) ; callbacks[endidx].id = SASL_CB_USER; - callbacks[endidx].proc = (void *)rd_kafka_sasl_cyrus_cb_getsimple; + callbacks[endidx].proc = + (void *)rd_kafka_sasl_cyrus_cb_getsimple; callbacks[endidx].context = rktrans; endidx++; callbacks[endidx].id = SASL_CB_LIST_END; @@ -517,8 +527,8 @@ static int rd_kafka_sasl_cyrus_client_new (rd_kafka_transport_t *rktrans, memcpy(state->callbacks, callbacks, sizeof(callbacks)); - r = sasl_client_new(rk->rk_conf.sasl.service_name, hostname, - NULL, NULL, /* no local & remote IP checks */ + r = sasl_client_new(rk->rk_conf.sasl.service_name, hostname, NULL, + NULL, /* no local & remote IP checks */ state->callbacks, 0, &state->conn); if (r != SASL_OK) { rd_snprintf(errstr, errstr_size, "%s", @@ -528,8 +538,8 @@ static int rd_kafka_sasl_cyrus_client_new (rd_kafka_transport_t *rktrans, if (rk->rk_conf.debug & RD_KAFKA_DBG_SECURITY) { const char *avail_mechs; - sasl_listmech(state->conn, NULL, NULL, " ", NULL, - &avail_mechs, NULL, NULL); + sasl_listmech(state->conn, NULL, NULL, " ", NULL, &avail_mechs, + NULL, NULL); rd_rkb_dbg(rkb, SECURITY, "SASL", "My supported SASL mechanisms: %s", avail_mechs); } @@ -539,27 +549,26 @@ static int rd_kafka_sasl_cyrus_client_new (rd_kafka_transport_t *rktrans, unsigned int outlen; const char *mech = NULL; - r = sasl_client_start(state->conn, - rk->rk_conf.sasl.mechanisms, + r = sasl_client_start(state->conn, rk->rk_conf.sasl.mechanisms, NULL, &out, &outlen, &mech); if (r >= 0) - if (rd_kafka_sasl_send(rktrans, out, outlen, - errstr, errstr_size)) + if (rd_kafka_sasl_send(rktrans, out, outlen, errstr, + errstr_size)) return -1; } while (r == SASL_INTERACT); if (r == SASL_OK) { - /* PLAIN is appearantly done here, but we still need to make sure - * the PLAIN frame is sent and we get a response back (but we must - * not pass the response to libsasl or it will fail). */ + /* PLAIN is appearantly done here, but we still need to make + * sure the PLAIN frame is sent and we get a response back (but + * we must not pass the response to libsasl or it will fail). */ rktrans->rktrans_sasl.complete = 1; return 0; } else if (r != SASL_CONTINUE) { rd_snprintf(errstr, errstr_size, - "SASL handshake failed (start (%d)): %s", - r, sasl_errdetail(state->conn)); + "SASL handshake failed (start (%d)): %s", r, + sasl_errdetail(state->conn)); return -1; } @@ -571,7 +580,7 @@ static int rd_kafka_sasl_cyrus_client_new (rd_kafka_transport_t *rktrans, * @brief SASL/GSSAPI is ready when at least one kinit command has been * executed (regardless of exit status). */ -static rd_bool_t rd_kafka_sasl_cyrus_ready (rd_kafka_t *rk) { +static rd_bool_t rd_kafka_sasl_cyrus_ready(rd_kafka_t *rk) { rd_kafka_sasl_cyrus_handle_t *handle = rk->rk_sasl.handle; if (!rk->rk_conf.sasl.relogin_min_time) return rd_true; @@ -584,16 +593,15 @@ static rd_bool_t rd_kafka_sasl_cyrus_ready (rd_kafka_t *rk) { /** * @brief Per-client-instance initializer */ -static int rd_kafka_sasl_cyrus_init (rd_kafka_t *rk, - char *errstr, size_t errstr_size) { +static int +rd_kafka_sasl_cyrus_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) { rd_kafka_sasl_cyrus_handle_t *handle; - if (!rk->rk_conf.sasl.relogin_min_time || - !rk->rk_conf.sasl.kinit_cmd || + if (!rk->rk_conf.sasl.relogin_min_time || !rk->rk_conf.sasl.kinit_cmd || strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) return 0; /* kinit not configured, no need to start timer */ - handle = rd_calloc(1, sizeof(*handle)); + handle = rd_calloc(1, sizeof(*handle)); rk->rk_sasl.handle = handle; rd_kafka_timer_start(&rk->rk_timers, &handle->kinit_refresh_tmr, @@ -603,7 +611,7 @@ static int rd_kafka_sasl_cyrus_init (rd_kafka_t *rk, /* Kick off the timer immediately to refresh the ticket. * (Timer is triggered from the main loop). */ rd_kafka_timer_override_once(&rk->rk_timers, &handle->kinit_refresh_tmr, - 0/*immediately*/); + 0 /*immediately*/); return 0; } @@ -612,7 +620,7 @@ static int rd_kafka_sasl_cyrus_init (rd_kafka_t *rk, /** * @brief Per-client-instance destructor */ -static void rd_kafka_sasl_cyrus_term (rd_kafka_t *rk) { +static void rd_kafka_sasl_cyrus_term(rd_kafka_t *rk) { rd_kafka_sasl_cyrus_handle_t *handle = rk->rk_sasl.handle; if (!handle) @@ -624,20 +632,19 @@ static void rd_kafka_sasl_cyrus_term (rd_kafka_t *rk) { } -static int rd_kafka_sasl_cyrus_conf_validate (rd_kafka_t *rk, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_cyrus_conf_validate(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { if (strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) return 0; - if (rk->rk_conf.sasl.relogin_min_time && - rk->rk_conf.sasl.kinit_cmd) { + if (rk->rk_conf.sasl.relogin_min_time && rk->rk_conf.sasl.kinit_cmd) { char *cmd; char tmperr[128]; - cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd, - tmperr, sizeof(tmperr), - render_callback, rk); + cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd, tmperr, + sizeof(tmperr), render_callback, rk); if (!cmd) { rd_snprintf(errstr, errstr_size, @@ -656,8 +663,9 @@ static int rd_kafka_sasl_cyrus_conf_validate (rd_kafka_t *rk, /** * Global SASL termination. */ -void rd_kafka_sasl_cyrus_global_term (void) { - /* NOTE: Should not be called since the application may be using SASL too*/ +void rd_kafka_sasl_cyrus_global_term(void) { + /* NOTE: Should not be called since the application may be using SASL + * too*/ /* sasl_done(); */ mtx_destroy(&rd_kafka_sasl_cyrus_kinit_lock); } @@ -666,7 +674,7 @@ void rd_kafka_sasl_cyrus_global_term (void) { /** * Global SASL init, called once per runtime. */ -int rd_kafka_sasl_cyrus_global_init (void) { +int rd_kafka_sasl_cyrus_global_init(void) { int r; mtx_init(&rd_kafka_sasl_cyrus_kinit_lock, mtx_plain); @@ -683,12 +691,11 @@ int rd_kafka_sasl_cyrus_global_init (void) { const struct rd_kafka_sasl_provider rd_kafka_sasl_cyrus_provider = { - .name = "Cyrus", - .init = rd_kafka_sasl_cyrus_init, - .term = rd_kafka_sasl_cyrus_term, - .client_new = rd_kafka_sasl_cyrus_client_new, - .recv = rd_kafka_sasl_cyrus_recv, - .close = rd_kafka_sasl_cyrus_close, - .ready = rd_kafka_sasl_cyrus_ready, - .conf_validate = rd_kafka_sasl_cyrus_conf_validate -}; + .name = "Cyrus", + .init = rd_kafka_sasl_cyrus_init, + .term = rd_kafka_sasl_cyrus_term, + .client_new = rd_kafka_sasl_cyrus_client_new, + .recv = rd_kafka_sasl_cyrus_recv, + .close = rd_kafka_sasl_cyrus_close, + .ready = rd_kafka_sasl_cyrus_ready, + .conf_validate = rd_kafka_sasl_cyrus_conf_validate}; diff --git a/src/rdkafka_sasl_int.h b/src/rdkafka_sasl_int.h index 583e76f19c..33e3bdd05f 100644 --- a/src/rdkafka_sasl_int.h +++ b/src/rdkafka_sasl_int.h @@ -33,28 +33,30 @@ struct rd_kafka_sasl_provider { const char *name; /** Per client-instance (rk) initializer */ - int (*init) (rd_kafka_t *rk, char *errstr, size_t errstr_size); + int (*init)(rd_kafka_t *rk, char *errstr, size_t errstr_size); /** Per client-instance (rk) destructor */ - void (*term) (rd_kafka_t *rk); + void (*term)(rd_kafka_t *rk); /** Returns rd_true if provider is ready to be used, else rd_false */ - rd_bool_t (*ready) (rd_kafka_t *rk); + rd_bool_t (*ready)(rd_kafka_t *rk); - int (*client_new) (rd_kafka_transport_t *rktrans, - const char *hostname, - char *errstr, size_t errstr_size); + int (*client_new)(rd_kafka_transport_t *rktrans, + const char *hostname, + char *errstr, + size_t errstr_size); - int (*recv) (struct rd_kafka_transport_s *s, - const void *buf, size_t size, - char *errstr, size_t errstr_size); - void (*close) (struct rd_kafka_transport_s *); + int (*recv)(struct rd_kafka_transport_s *s, + const void *buf, + size_t size, + char *errstr, + size_t errstr_size); + void (*close)(struct rd_kafka_transport_s *); - void (*broker_init) (rd_kafka_broker_t *rkb); - void (*broker_term) (rd_kafka_broker_t *rkb); + void (*broker_init)(rd_kafka_broker_t *rkb); + void (*broker_term)(rd_kafka_broker_t *rkb); - int (*conf_validate) (rd_kafka_t *rk, - char *errstr, size_t errstr_size); + int (*conf_validate)(rd_kafka_t *rk, char *errstr, size_t errstr_size); }; #ifdef _WIN32 @@ -63,8 +65,8 @@ extern const struct rd_kafka_sasl_provider rd_kafka_sasl_win32_provider; #if WITH_SASL_CYRUS extern const struct rd_kafka_sasl_provider rd_kafka_sasl_cyrus_provider; -void rd_kafka_sasl_cyrus_global_term (void); -int rd_kafka_sasl_cyrus_global_init (void); +void rd_kafka_sasl_cyrus_global_term(void); +int rd_kafka_sasl_cyrus_global_init(void); #endif extern const struct rd_kafka_sasl_provider rd_kafka_sasl_plain_provider; @@ -77,9 +79,11 @@ extern const struct rd_kafka_sasl_provider rd_kafka_sasl_scram_provider; extern const struct rd_kafka_sasl_provider rd_kafka_sasl_oauthbearer_provider; #endif -void rd_kafka_sasl_auth_done (rd_kafka_transport_t *rktrans); -int rd_kafka_sasl_send (rd_kafka_transport_t *rktrans, - const void *payload, int len, - char *errstr, size_t errstr_size); +void rd_kafka_sasl_auth_done(rd_kafka_transport_t *rktrans); +int rd_kafka_sasl_send(rd_kafka_transport_t *rktrans, + const void *payload, + int len, + char *errstr, + size_t errstr_size); #endif /* _RDKAFKA_SASL_INT_H_ */ diff --git a/src/rdkafka_sasl_oauthbearer.c b/src/rdkafka_sasl_oauthbearer.c index 3bff8908df..5ec3b34d50 100644 --- a/src/rdkafka_sasl_oauthbearer.c +++ b/src/rdkafka_sasl_oauthbearer.c @@ -120,12 +120,11 @@ struct rd_kafka_sasl_oauthbearer_token { * @brief Per-connection state */ struct rd_kafka_sasl_oauthbearer_state { - enum { - RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE, - RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG, - RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL, + enum { RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE, + RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG, + RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL, } state; - char * server_error_msg; + char *server_error_msg; /* * A place to store a consistent view of the token and extensions @@ -142,14 +141,14 @@ struct rd_kafka_sasl_oauthbearer_state { /** * @brief free memory inside the given token */ -static void rd_kafka_sasl_oauthbearer_token_free ( - struct rd_kafka_sasl_oauthbearer_token *token) { +static void rd_kafka_sasl_oauthbearer_token_free( + struct rd_kafka_sasl_oauthbearer_token *token) { size_t i; RD_IF_FREE(token->token_value, rd_free); RD_IF_FREE(token->md_principal_name, rd_free); - for (i = 0 ; i < token->extension_size ; i++) + for (i = 0; i < token->extension_size; i++) rd_free(token->extensions[i]); RD_IF_FREE(token->extensions, rd_free); @@ -163,10 +162,9 @@ static void rd_kafka_sasl_oauthbearer_token_free ( * * @locality Application thread */ -static rd_kafka_op_res_t -rd_kafka_oauthbearer_refresh_op (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +static rd_kafka_op_res_t rd_kafka_oauthbearer_refresh_op(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { /* The op callback is invoked when the op is destroyed via * rd_kafka_op_destroy() or rd_kafka_event_destroy(), so * make sure we don't refresh upon destruction since @@ -175,8 +173,8 @@ rd_kafka_oauthbearer_refresh_op (rd_kafka_t *rk, if (rko->rko_err != RD_KAFKA_RESP_ERR__DESTROY && rk->rk_conf.sasl.oauthbearer.token_refresh_cb) rk->rk_conf.sasl.oauthbearer.token_refresh_cb( - rk, rk->rk_conf.sasl.oauthbearer_config, - rk->rk_conf.opaque); + rk, rk->rk_conf.sasl.oauthbearer_config, + rk->rk_conf.opaque); return RD_KAFKA_OP_RES_HANDLED; } @@ -184,8 +182,8 @@ rd_kafka_oauthbearer_refresh_op (rd_kafka_t *rk, * @brief Enqueue a token refresh. * @locks rwlock_wrlock(&handle->lock) MUST be held */ -static void rd_kafka_oauthbearer_enqueue_token_refresh ( - rd_kafka_sasl_oauthbearer_handle_t *handle) { +static void rd_kafka_oauthbearer_enqueue_token_refresh( + rd_kafka_sasl_oauthbearer_handle_t *handle) { rd_kafka_op_t *rko; rko = rd_kafka_op_new_cb(handle->rk, RD_KAFKA_OP_OAUTHBEARER_REFRESH, @@ -210,9 +208,8 @@ static void rd_kafka_oauthbearer_enqueue_token_refresh ( * if necessary; the required lock is acquired and released. This method * returns immediately when SASL/OAUTHBEARER is not in use by the client. */ -static void -rd_kafka_oauthbearer_enqueue_token_refresh_if_necessary ( - rd_kafka_sasl_oauthbearer_handle_t *handle) { +static void rd_kafka_oauthbearer_enqueue_token_refresh_if_necessary( + rd_kafka_sasl_oauthbearer_handle_t *handle) { rd_ts_t now_wallclock; now_wallclock = rd_uclock(); @@ -233,7 +230,7 @@ rd_kafka_oauthbearer_enqueue_token_refresh_if_necessary ( * @locality any */ static rd_bool_t -rd_kafka_oauthbearer_has_token (rd_kafka_sasl_oauthbearer_handle_t *handle) { +rd_kafka_oauthbearer_has_token(rd_kafka_sasl_oauthbearer_handle_t *handle) { rd_bool_t retval_has_token; rwlock_rdlock(&handle->lock); @@ -247,8 +244,9 @@ rd_kafka_oauthbearer_has_token (rd_kafka_sasl_oauthbearer_handle_t *handle) { * @brief Verify that the provided \p key is valid. * @returns 0 on success or -1 if \p key is invalid. */ -static int check_oauthbearer_extension_key (const char *key, - char *errstr, size_t errstr_size) { +static int check_oauthbearer_extension_key(const char *key, + char *errstr, + size_t errstr_size) { const char *c; if (!strcmp(key, "auth")) { @@ -272,7 +270,7 @@ static int check_oauthbearer_extension_key (const char *key, return -1; } - for (c = key ; *c ; c++) { + for (c = key; *c; c++) { if (!(*c >= 'A' && *c <= 'Z') && !(*c >= 'a' && *c <= 'z')) { rd_snprintf(errstr, errstr_size, "SASL/OAUTHBEARER extension keys must " @@ -290,9 +288,9 @@ static int check_oauthbearer_extension_key (const char *key, * @brief Verify that the provided \p value is valid. * @returns 0 on success or -1 if \p value is invalid. */ -static int -check_oauthbearer_extension_value (const char *value, - char *errstr, size_t errstr_size) { +static int check_oauthbearer_extension_value(const char *value, + char *errstr, + size_t errstr_size) { const char *c; /* @@ -306,9 +304,9 @@ check_oauthbearer_extension_value (const char *value, * CR = %x0D ; carriage return * LF = %x0A ; linefeed */ - for (c = value ; *c ; c++) { - if (!(*c >= '\x21' && *c <= '\x7E') && *c != '\x20' - && *c != '\x09' && *c != '\x0D' && *c != '\x0A') { + for (c = value; *c; c++) { + if (!(*c >= '\x21' && *c <= '\x7E') && *c != '\x20' && + *c != '\x09' && *c != '\x0D' && *c != '\x0A') { rd_snprintf(errstr, errstr_size, "SASL/OAUTHBEARER extension values must " "only consist of space, horizontal tab, " @@ -361,13 +359,14 @@ check_oauthbearer_extension_value (const char *value, * @sa rd_kafka_oauthbearer_set_token_failure0 */ rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, - const char *token_value, - int64_t md_lifetime_ms, - const char *md_principal_name, - const char **extensions, - size_t extension_size, - char *errstr, size_t errstr_size) { +rd_kafka_oauthbearer_set_token0(rd_kafka_t *rk, + const char *token_value, + int64_t md_lifetime_ms, + const char *md_principal_name, + const char **extensions, + size_t extension_size, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle; size_t i; rd_ts_t now_wallclock; @@ -376,15 +375,17 @@ rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, /* Check if SASL/OAUTHBEARER is the configured auth mechanism */ if (rk->rk_conf.sasl.provider != &rd_kafka_sasl_oauthbearer_provider || !handle) { - rd_snprintf(errstr, errstr_size, "SASL/OAUTHBEARER is not the " + rd_snprintf(errstr, errstr_size, + "SASL/OAUTHBEARER is not the " "configured authentication mechanism"); return RD_KAFKA_RESP_ERR__STATE; } /* Check if there is an odd number of extension keys + values */ if (extension_size & 1) { - rd_snprintf(errstr, errstr_size, "Incorrect extension size " - "(must be a non-negative multiple of 2): %"PRIusz, + rd_snprintf(errstr, errstr_size, + "Incorrect extension size " + "(must be a non-negative multiple of 2): %" PRIusz, extension_size); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -394,8 +395,8 @@ rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, if (wts_md_lifetime <= now_wallclock) { rd_snprintf(errstr, errstr_size, "Must supply an unexpired token: " - "now=%"PRId64"ms, exp=%"PRId64"ms", - now_wallclock/1000, wts_md_lifetime/1000); + "now=%" PRId64 "ms, exp=%" PRId64 "ms", + now_wallclock / 1000, wts_md_lifetime / 1000); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -406,8 +407,7 @@ rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, for (i = 0; i + 1 < extension_size; i += 2) { if (check_oauthbearer_extension_key(extensions[i], errstr, errstr_size) == -1 || - check_oauthbearer_extension_value(extensions[i + 1], - errstr, + check_oauthbearer_extension_value(extensions[i + 1], errstr, errstr_size) == -1) return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -424,8 +424,7 @@ rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, /* Schedule a refresh 80% through its remaining lifetime */ handle->wts_refresh_after = - (rd_ts_t)(now_wallclock + 0.8 * - (wts_md_lifetime - now_wallclock)); + (rd_ts_t)(now_wallclock + 0.8 * (wts_md_lifetime - now_wallclock)); rd_list_clear(&handle->extensions); for (i = 0; i + 1 < extension_size; i += 2) @@ -464,7 +463,7 @@ rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, * @sa rd_kafka_oauthbearer_set_token0 */ rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token_failure0 (rd_kafka_t *rk, const char *errstr) { +rd_kafka_oauthbearer_set_token_failure0(rd_kafka_t *rk, const char *errstr) { rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle; rd_bool_t error_changed; @@ -477,13 +476,12 @@ rd_kafka_oauthbearer_set_token_failure0 (rd_kafka_t *rk, const char *errstr) { return RD_KAFKA_RESP_ERR__INVALID_ARG; rwlock_wrlock(&handle->lock); - error_changed = !handle->errstr || - strcmp(handle->errstr, errstr); + error_changed = !handle->errstr || strcmp(handle->errstr, errstr); RD_IF_FREE(handle->errstr, rd_free); handle->errstr = rd_strdup(errstr); /* Leave any existing token because it may have some life left, * schedule a refresh for 10 seconds later. */ - handle->wts_refresh_after = rd_uclock() + (10*1000*1000); + handle->wts_refresh_after = rd_uclock() + (10 * 1000 * 1000); rwlock_wrunlock(&handle->lock); /* Trigger an ERR__AUTHENTICATION error if the error changed. */ @@ -502,12 +500,12 @@ rd_kafka_oauthbearer_set_token_failure0 (rd_kafka_t *rk, const char *errstr) { * @returns -1 if string pointed to by \p value is non-empty (\p errstr set, no * memory allocated), else 0 (caller must free allocated memory). */ -static int -parse_ujws_config_value_for_prefix (char **loc, - const char *prefix, - const char value_end_char, - char **value, - char *errstr, size_t errstr_size) { +static int parse_ujws_config_value_for_prefix(char **loc, + const char *prefix, + const char value_end_char, + char **value, + char *errstr, + size_t errstr_size) { if (*value) { rd_snprintf(errstr, errstr_size, "Invalid sasl.oauthbearer.config: " @@ -540,9 +538,10 @@ parse_ujws_config_value_for_prefix (char **loc, * @returns -1 on failure (\p errstr set), else 0. */ static int -parse_ujws_config (const char *cfg, - struct rd_kafka_sasl_oauthbearer_parsed_ujws *parsed, - char *errstr, size_t errstr_size) { +parse_ujws_config(const char *cfg, + struct rd_kafka_sasl_oauthbearer_parsed_ujws *parsed, + char *errstr, + size_t errstr_size) { /* * Extensions: * @@ -560,15 +559,15 @@ parse_ujws_config (const char *cfg, */ static const char *prefix_principal_claim_name = "principalClaimName="; - static const char *prefix_principal = "principal="; - static const char *prefix_scope_claim_name = "scopeClaimName="; - static const char *prefix_scope = "scope="; - static const char *prefix_life_seconds = "lifeSeconds="; - static const char *prefix_extension = "extension_"; + static const char *prefix_principal = "principal="; + static const char *prefix_scope_claim_name = "scopeClaimName="; + static const char *prefix_scope = "scope="; + static const char *prefix_life_seconds = "lifeSeconds="; + static const char *prefix_extension = "extension_"; char *cfg_copy = rd_strdup(cfg); - char *loc = cfg_copy; - int r = 0; + char *loc = cfg_copy; + int r = 0; while (*loc != '\0' && !r) { if (*loc == ' ') @@ -576,10 +575,8 @@ parse_ujws_config (const char *cfg, else if (!strncmp(prefix_principal_claim_name, loc, strlen(prefix_principal_claim_name))) { r = parse_ujws_config_value_for_prefix( - &loc, - prefix_principal_claim_name, ' ', - &parsed->principal_claim_name, - errstr, errstr_size); + &loc, prefix_principal_claim_name, ' ', + &parsed->principal_claim_name, errstr, errstr_size); if (!r && !*parsed->principal_claim_name) { rd_snprintf(errstr, errstr_size, @@ -592,9 +589,8 @@ parse_ujws_config (const char *cfg, } else if (!strncmp(prefix_principal, loc, strlen(prefix_principal))) { r = parse_ujws_config_value_for_prefix( - &loc, - prefix_principal, ' ', &parsed->principal, - errstr, errstr_size); + &loc, prefix_principal, ' ', &parsed->principal, + errstr, errstr_size); if (!r && !*parsed->principal) { rd_snprintf(errstr, errstr_size, @@ -607,10 +603,8 @@ parse_ujws_config (const char *cfg, } else if (!strncmp(prefix_scope_claim_name, loc, strlen(prefix_scope_claim_name))) { r = parse_ujws_config_value_for_prefix( - &loc, - prefix_scope_claim_name, ' ', - &parsed->scope_claim_name, - errstr, errstr_size); + &loc, prefix_scope_claim_name, ' ', + &parsed->scope_claim_name, errstr, errstr_size); if (!r && !*parsed->scope_claim_name) { rd_snprintf(errstr, errstr_size, @@ -622,9 +616,8 @@ parse_ujws_config (const char *cfg, } else if (!strncmp(prefix_scope, loc, strlen(prefix_scope))) { r = parse_ujws_config_value_for_prefix( - &loc, - prefix_scope, ' ', &parsed->scope_csv_text, - errstr, errstr_size); + &loc, prefix_scope, ' ', &parsed->scope_csv_text, + errstr, errstr_size); if (!r && !*parsed->scope_csv_text) { rd_snprintf(errstr, errstr_size, @@ -639,9 +632,8 @@ parse_ujws_config (const char *cfg, char *life_seconds_text = NULL; r = parse_ujws_config_value_for_prefix( - &loc, - prefix_life_seconds, ' ', &life_seconds_text, - errstr, errstr_size); + &loc, prefix_life_seconds, ' ', &life_seconds_text, + errstr, errstr_size); if (!r && !*life_seconds_text) { rd_snprintf(errstr, errstr_size, @@ -653,8 +645,8 @@ parse_ujws_config (const char *cfg, } else if (!r) { long long life_seconds_long; char *end_ptr; - life_seconds_long = strtoll( - life_seconds_text, &end_ptr, 10); + life_seconds_long = + strtoll(life_seconds_text, &end_ptr, 10); if (*end_ptr != '\0') { rd_snprintf(errstr, errstr_size, "Invalid " @@ -675,7 +667,7 @@ parse_ujws_config (const char *cfg, r = -1; } else { parsed->life_seconds = - (int)life_seconds_long; + (int)life_seconds_long; } } @@ -686,9 +678,8 @@ parse_ujws_config (const char *cfg, char *extension_key = NULL; r = parse_ujws_config_value_for_prefix( - &loc, - prefix_extension, '=', &extension_key, errstr, - errstr_size); + &loc, prefix_extension, '=', &extension_key, errstr, + errstr_size); if (!r && !*extension_key) { rd_snprintf(errstr, errstr_size, @@ -700,13 +691,13 @@ parse_ujws_config (const char *cfg, } else if (!r) { char *extension_value = NULL; r = parse_ujws_config_value_for_prefix( - &loc, "", ' ', &extension_value, - errstr, errstr_size); + &loc, "", ' ', &extension_value, errstr, + errstr_size); if (!r) { - rd_list_add(&parsed->extensions, - rd_strtup_new( - extension_key, - extension_value)); + rd_list_add( + &parsed->extensions, + rd_strtup_new(extension_key, + extension_value)); rd_free(extension_value); } } @@ -732,11 +723,11 @@ parse_ujws_config (const char *cfg, * from the given information. * @returns allocated memory that the caller must free. */ -static char *create_jws_compact_serialization ( - const struct rd_kafka_sasl_oauthbearer_parsed_ujws *parsed, - rd_ts_t now_wallclock) { +static char *create_jws_compact_serialization( + const struct rd_kafka_sasl_oauthbearer_parsed_ujws *parsed, + rd_ts_t now_wallclock) { static const char *jose_header_encoded = - "eyJhbGciOiJub25lIn0"; // {"alg":"none"} + "eyJhbGciOiJub25lIn0"; // {"alg":"none"} int scope_json_length = 0; int max_json_length; double now_wallclock_seconds; @@ -757,7 +748,7 @@ static char *create_jws_compact_serialization ( /* Convert from csv to rd_list_t and * calculate json length. */ char *start = parsed->scope_csv_text; - char *curr = start; + char *curr = start; while (*curr != '\0') { /* Ignore empty elements (e.g. ",,") */ @@ -778,20 +769,19 @@ static char *create_jws_compact_serialization ( } if (!rd_list_find(&scope, start, (void *)strcmp)) - rd_list_add(&scope, - rd_strdup(start)); + rd_list_add(&scope, rd_strdup(start)); if (scope_json_length == 0) { - scope_json_length = 2 + // ," - (int)strlen(parsed->scope_claim_name) + - 4 + // ":[" - (int)strlen(start) + - 1 + // " - 1; // ] + scope_json_length = + 2 + // ," + (int)strlen(parsed->scope_claim_name) + + 4 + // ":[" + (int)strlen(start) + 1 + // " + 1; // ] } else { - scope_json_length += 2; // ," + scope_json_length += 2; // ," scope_json_length += (int)strlen(start); - scope_json_length += 1; // " + scope_json_length += 1; // " } start = curr; @@ -801,30 +791,27 @@ static char *create_jws_compact_serialization ( now_wallclock_seconds = now_wallclock / 1000000.0; /* Generate json */ - max_json_length = 2 + // {" - (int)strlen(parsed->principal_claim_name) + - 3 + // ":" - (int)strlen(parsed->principal) + - 8 + // ","iat": - 14 + // iat NumericDate (e.g. 1549251467.546) - 7 + // ,"exp": - 14 + // exp NumericDate (e.g. 1549252067.546) - scope_json_length + - 1; // } + max_json_length = 2 + // {" + (int)strlen(parsed->principal_claim_name) + + 3 + // ":" + (int)strlen(parsed->principal) + 8 + // ","iat": + 14 + // iat NumericDate (e.g. 1549251467.546) + 7 + // ,"exp": + 14 + // exp NumericDate (e.g. 1549252067.546) + scope_json_length + 1; // } /* Generate scope portion of json */ - scope_json = rd_malloc(scope_json_length + 1); + scope_json = rd_malloc(scope_json_length + 1); *scope_json = '\0'; - scope_curr = scope_json; + scope_curr = scope_json; for (i = 0; i < rd_list_cnt(&scope); i++) { if (i == 0) - scope_curr += rd_snprintf(scope_curr, - (size_t)(scope_json - + scope_json_length - + 1 - scope_curr), - ",\"%s\":[\"", - parsed->scope_claim_name); + scope_curr += rd_snprintf( + scope_curr, + (size_t)(scope_json + scope_json_length + 1 - + scope_curr), + ",\"%s\":[\"", parsed->scope_claim_name); else scope_curr += sprintf(scope_curr, "%s", ",\""); scope_curr += sprintf(scope_curr, "%s\"", @@ -836,22 +823,20 @@ static char *create_jws_compact_serialization ( claims_json = rd_malloc(max_json_length + 1); rd_snprintf(claims_json, max_json_length + 1, "{\"%s\":\"%s\",\"iat\":%.3f,\"exp\":%.3f%s}", - parsed->principal_claim_name, - parsed->principal, + parsed->principal_claim_name, parsed->principal, now_wallclock_seconds, - now_wallclock_seconds + parsed->life_seconds, - scope_json); + now_wallclock_seconds + parsed->life_seconds, scope_json); rd_free(scope_json); /* Convert to base64URL format, first to base64, then to base64URL */ retval_size = strlen(jose_header_encoded) + 1 + - (((max_json_length + 2) / 3) * 4) + 1 + 1; + (((max_json_length + 2) / 3) * 4) + 1 + 1; retval_jws = rd_malloc(retval_size); rd_snprintf(retval_jws, retval_size, "%s.", jose_header_encoded); jws_claims = retval_jws + strlen(retval_jws); - encode_len = EVP_EncodeBlock((uint8_t *)jws_claims, - (uint8_t *)claims_json, - (int)strlen(claims_json)); + encode_len = + EVP_EncodeBlock((uint8_t *)jws_claims, (uint8_t *)claims_json, + (int)strlen(claims_json)); rd_free(claims_json); jws_last_char = jws_claims + encode_len - 1; @@ -859,12 +844,12 @@ static char *create_jws_compact_serialization ( * and eliminate any padding. */ while (jws_last_char >= jws_claims && *jws_last_char == '=') --jws_last_char; - *(++jws_last_char) = '.'; + *(++jws_last_char) = '.'; *(jws_last_char + 1) = '\0'; /* Convert the 2 differing encode characters */ - for (jws_maybe_non_url_char = retval_jws; - *jws_maybe_non_url_char; jws_maybe_non_url_char++) + for (jws_maybe_non_url_char = retval_jws; *jws_maybe_non_url_char; + jws_maybe_non_url_char++) if (*jws_maybe_non_url_char == '+') *jws_maybe_non_url_char = '-'; else if (*jws_maybe_non_url_char == '/') @@ -885,14 +870,13 @@ static char *create_jws_compact_serialization ( * (and by implication, the `exp` claim) * @returns -1 on failure (\p errstr set), else 0. */ -static int -rd_kafka_oauthbearer_unsecured_token0 ( - struct rd_kafka_sasl_oauthbearer_token *token, - const char *cfg, - int64_t now_wallclock_ms, - char *errstr, size_t errstr_size) { - struct rd_kafka_sasl_oauthbearer_parsed_ujws parsed = - RD_ZERO_INIT; +static int rd_kafka_oauthbearer_unsecured_token0( + struct rd_kafka_sasl_oauthbearer_token *token, + const char *cfg, + int64_t now_wallclock_ms, + char *errstr, + size_t errstr_size) { + struct rd_kafka_sasl_oauthbearer_parsed_ujws parsed = RD_ZERO_INIT; int r; int i; @@ -951,23 +935,25 @@ rd_kafka_oauthbearer_unsecured_token0 ( char **extensionv; int extension_pair_count; char *jws = create_jws_compact_serialization( - &parsed, now_wallclock_ms * 1000); + &parsed, now_wallclock_ms * 1000); extension_pair_count = rd_list_cnt(&parsed.extensions); extensionv = rd_malloc(sizeof(*extensionv) * 2 * extension_pair_count); for (i = 0; i < extension_pair_count; ++i) { - rd_strtup_t *strtup = (rd_strtup_t *) - rd_list_elem(&parsed.extensions, i); - extensionv[2*i] = rd_strdup(strtup->name); - extensionv[2*i+1] = rd_strdup(strtup->value); + rd_strtup_t *strtup = + (rd_strtup_t *)rd_list_elem( + &parsed.extensions, i); + extensionv[2 * i] = rd_strdup(strtup->name); + extensionv[2 * i + 1] = + rd_strdup(strtup->value); } token->token_value = jws; - token->md_lifetime_ms = now_wallclock_ms + - parsed.life_seconds * 1000; + token->md_lifetime_ms = + now_wallclock_ms + parsed.life_seconds * 1000; token->md_principal_name = rd_strdup(parsed.principal); - token->extensions = extensionv; - token->extension_size = 2 * extension_pair_count; + token->extensions = extensionv; + token->extension_size = 2 * extension_pair_count; } } RD_IF_FREE(parsed.principal_claim_name, rd_free); @@ -1015,23 +1001,21 @@ rd_kafka_oauthbearer_unsecured_token0 ( * testing and development purposess -- so while the inflexibility of the * parsing rules is acknowledged, it is assumed that this is not problematic. */ -void -rd_kafka_oauthbearer_unsecured_token (rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque) { +void rd_kafka_oauthbearer_unsecured_token(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque) { char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token = RD_ZERO_INIT; rd_kafka_dbg(rk, SECURITY, "OAUTHBEARER", "Creating unsecured token"); - if (rd_kafka_oauthbearer_unsecured_token0( - &token, oauthbearer_config, - rd_uclock() / 1000, errstr, sizeof(errstr)) == -1 || + if (rd_kafka_oauthbearer_unsecured_token0(&token, oauthbearer_config, + rd_uclock() / 1000, errstr, + sizeof(errstr)) == -1 || rd_kafka_oauthbearer_set_token( - rk, token.token_value, - token.md_lifetime_ms, token.md_principal_name, - (const char **)token.extensions, token.extension_size, - errstr, sizeof(errstr)) == -1) { + rk, token.token_value, token.md_lifetime_ms, + token.md_principal_name, (const char **)token.extensions, + token.extension_size, errstr, sizeof(errstr)) == -1) { rd_kafka_oauthbearer_set_token_failure(rk, errstr); } @@ -1041,9 +1025,9 @@ rd_kafka_oauthbearer_unsecured_token (rd_kafka_t *rk, /** * @brief Close and free authentication state */ -static void rd_kafka_sasl_oauthbearer_close (rd_kafka_transport_t *rktrans) { +static void rd_kafka_sasl_oauthbearer_close(rd_kafka_transport_t *rktrans) { struct rd_kafka_sasl_oauthbearer_state *state = - rktrans->rktrans_sasl.state; + rktrans->rktrans_sasl.state; if (!state) return; @@ -1060,12 +1044,11 @@ static void rd_kafka_sasl_oauthbearer_close (rd_kafka_transport_t *rktrans) { /** * @brief Build client-first-message */ -static void -rd_kafka_sasl_oauthbearer_build_client_first_message ( - rd_kafka_transport_t *rktrans, - rd_chariov_t *out) { +static void rd_kafka_sasl_oauthbearer_build_client_first_message( + rd_kafka_transport_t *rktrans, + rd_chariov_t *out) { struct rd_kafka_sasl_oauthbearer_state *state = - rktrans->rktrans_sasl.state; + rktrans->rktrans_sasl.state; /* * https://tools.ietf.org/html/rfc7628#section-3.1 @@ -1078,49 +1061,47 @@ rd_kafka_sasl_oauthbearer_build_client_first_message ( */ static const char *gs2_header = "n,,"; - static const char *kvsep = "\x01"; - const int kvsep_size = (int)strlen(kvsep); - int extension_size = 0; + static const char *kvsep = "\x01"; + const int kvsep_size = (int)strlen(kvsep); + int extension_size = 0; int i; char *buf; int size_written; unsigned long r; - for (i = 0 ; i < rd_list_cnt(&state->extensions) ; i++) { + for (i = 0; i < rd_list_cnt(&state->extensions); i++) { rd_strtup_t *extension = rd_list_elem(&state->extensions, i); // kvpair = key "=" value kvsep - extension_size += (int)strlen(extension->name) + 1 // "=" - + (int)strlen(extension->value) + kvsep_size; + extension_size += (int)strlen(extension->name) + 1 // "=" + + (int)strlen(extension->value) + kvsep_size; } // client-resp = (gs2-header kvsep *kvpair kvsep) / kvsep - out->size = strlen(gs2_header) + kvsep_size - + strlen("auth=Bearer ") + strlen(state->token_value) - + kvsep_size + extension_size + kvsep_size; - out->ptr = rd_malloc(out->size+1); + out->size = strlen(gs2_header) + kvsep_size + strlen("auth=Bearer ") + + strlen(state->token_value) + kvsep_size + extension_size + + kvsep_size; + out->ptr = rd_malloc(out->size + 1); - buf = out->ptr; + buf = out->ptr; size_written = 0; - r = rd_snprintf(buf, out->size+1 - size_written, - "%s%sauth=Bearer %s%s", - gs2_header, kvsep, state->token_value, - kvsep); - rd_assert(r < out->size+1 - size_written); + r = rd_snprintf(buf, out->size + 1 - size_written, + "%s%sauth=Bearer %s%s", gs2_header, kvsep, + state->token_value, kvsep); + rd_assert(r < out->size + 1 - size_written); size_written += r; buf = out->ptr + size_written; - for (i = 0 ; i < rd_list_cnt(&state->extensions) ; i++) { + for (i = 0; i < rd_list_cnt(&state->extensions); i++) { rd_strtup_t *extension = rd_list_elem(&state->extensions, i); - r = rd_snprintf(buf, out->size+1 - size_written, - "%s=%s%s", + r = rd_snprintf(buf, out->size + 1 - size_written, "%s=%s%s", extension->name, extension->value, kvsep); - rd_assert(r < out->size+1 - size_written); + rd_assert(r < out->size + 1 - size_written); size_written += r; buf = out->ptr + size_written; } - r = rd_snprintf(buf, out->size+1 - size_written, "%s", kvsep); - rd_assert(r < out->size+1 - size_written); + r = rd_snprintf(buf, out->size + 1 - size_written, "%s", kvsep); + rd_assert(r < out->size + 1 - size_written); rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "OAUTHBEARER", "Built client first message"); @@ -1132,32 +1113,31 @@ rd_kafka_sasl_oauthbearer_build_client_first_message ( * @brief SASL OAUTHBEARER client state machine * @returns -1 on failure (\p errstr set), else 0. */ -static int rd_kafka_sasl_oauthbearer_fsm (rd_kafka_transport_t *rktrans, - const rd_chariov_t *in, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_oauthbearer_fsm(rd_kafka_transport_t *rktrans, + const rd_chariov_t *in, + char *errstr, + size_t errstr_size) { static const char *state_names[] = { - "client-first-message", - "server-first-message", - "server-failure-message", + "client-first-message", + "server-first-message", + "server-failure-message", }; struct rd_kafka_sasl_oauthbearer_state *state = - rktrans->rktrans_sasl.state; + rktrans->rktrans_sasl.state; rd_chariov_t out = RD_ZERO_INIT; - int r = -1; + int r = -1; rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "OAUTHBEARER", "SASL OAUTHBEARER client in state %s", state_names[state->state]); - switch (state->state) - { + switch (state->state) { case RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE: rd_dassert(!in); /* Not expecting any server-input */ rd_kafka_sasl_oauthbearer_build_client_first_message(rktrans, &out); - state->state = - RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG; + state->state = RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG; break; @@ -1186,11 +1166,11 @@ static int rd_kafka_sasl_oauthbearer_fsm (rd_kafka_transport_t *rktrans, * Send final kvsep (CTRL-A) character */ out.size = 1; - out.ptr = rd_malloc(out.size + 1); - rd_snprintf(out.ptr, out.size+1, "\x01"); + out.ptr = rd_malloc(out.size + 1); + rd_snprintf(out.ptr, out.size + 1, "\x01"); state->state = - RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL; - r = 0; // Will fail later in next state after sending response + RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL; + r = 0; // Will fail later in next state after sending response break; case RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL: @@ -1198,17 +1178,16 @@ static int rd_kafka_sasl_oauthbearer_fsm (rd_kafka_transport_t *rktrans, rd_snprintf(errstr, errstr_size, "SASL OAUTHBEARER authentication failed " "(principal=%s): %s", - state->md_principal_name, - state->server_error_msg); - rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY|RD_KAFKA_DBG_BROKER, + state->md_principal_name, state->server_error_msg); + rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER, "OAUTHBEARER", "%s", errstr); r = -1; break; } if (out.ptr) { - r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size, - errstr, errstr_size); + r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size, errstr, + errstr_size); rd_free(out.ptr); } @@ -1219,12 +1198,13 @@ static int rd_kafka_sasl_oauthbearer_fsm (rd_kafka_transport_t *rktrans, /** * @brief Handle received frame from broker. */ -static int rd_kafka_sasl_oauthbearer_recv (rd_kafka_transport_t *rktrans, - const void *buf, size_t size, - char *errstr, size_t errstr_size) { - const rd_chariov_t in = { .ptr = (char *)buf, .size = size }; - return rd_kafka_sasl_oauthbearer_fsm(rktrans, &in, - errstr, errstr_size); +static int rd_kafka_sasl_oauthbearer_recv(rd_kafka_transport_t *rktrans, + const void *buf, + size_t size, + char *errstr, + size_t errstr_size) { + const rd_chariov_t in = {.ptr = (char *)buf, .size = size}; + return rd_kafka_sasl_oauthbearer_fsm(rktrans, &in, errstr, errstr_size); } @@ -1235,15 +1215,15 @@ static int rd_kafka_sasl_oauthbearer_recv (rd_kafka_transport_t *rktrans, * * @locality broker thread */ -static int -rd_kafka_sasl_oauthbearer_client_new (rd_kafka_transport_t *rktrans, - const char *hostname, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_oauthbearer_client_new(rd_kafka_transport_t *rktrans, + const char *hostname, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_oauthbearer_handle_t *handle = - rktrans->rktrans_rkb->rkb_rk->rk_sasl.handle; + rktrans->rktrans_rkb->rkb_rk->rk_sasl.handle; struct rd_kafka_sasl_oauthbearer_state *state; - state = rd_calloc(1, sizeof(*state)); + state = rd_calloc(1, sizeof(*state)); state->state = RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE; /* @@ -1263,13 +1243,13 @@ rd_kafka_sasl_oauthbearer_client_new (rd_kafka_transport_t *rktrans, rd_snprintf(errstr, errstr_size, "OAUTHBEARER cannot log in because there " "is no token available; last error: %s", - handle->errstr ? - handle->errstr : "(not available)"); + handle->errstr ? handle->errstr + : "(not available)"); rwlock_rdunlock(&handle->lock); return -1; } - state->token_value = rd_strdup(handle->token_value); + state->token_value = rd_strdup(handle->token_value); state->md_principal_name = rd_strdup(handle->md_principal_name); rd_list_copy_to(&state->extensions, &handle->extensions, rd_strtup_list_copy, NULL); @@ -1277,8 +1257,8 @@ rd_kafka_sasl_oauthbearer_client_new (rd_kafka_transport_t *rktrans, rwlock_rdunlock(&handle->lock); /* Kick off the FSM */ - return rd_kafka_sasl_oauthbearer_fsm(rktrans, NULL, - errstr, errstr_size); + return rd_kafka_sasl_oauthbearer_fsm(rktrans, NULL, errstr, + errstr_size); } @@ -1288,9 +1268,9 @@ rd_kafka_sasl_oauthbearer_client_new (rd_kafka_transport_t *rktrans, * @locality rdkafka main thread */ static void -rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { - rd_kafka_t *rk = arg; +rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_t *rk = arg; rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle; /* Enqueue a token refresh if necessary */ @@ -1301,11 +1281,12 @@ rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb (rd_kafka_timers_t *rkts, /** * @brief Per-client-instance initializer */ -static int rd_kafka_sasl_oauthbearer_init (rd_kafka_t *rk, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_oauthbearer_init(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_oauthbearer_handle_t *handle; - handle = rd_calloc(1, sizeof(*handle)); + handle = rd_calloc(1, sizeof(*handle)); rk->rk_sasl.handle = handle; rwlock_init(&handle->lock); @@ -1315,10 +1296,9 @@ static int rd_kafka_sasl_oauthbearer_init (rd_kafka_t *rk, rd_list_init(&handle->extensions, 0, (void (*)(void *))rd_strtup_destroy); - rd_kafka_timer_start(&rk->rk_timers, &handle->token_refresh_tmr, - 1 * 1000 * 1000, - rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb, - rk); + rd_kafka_timer_start( + &rk->rk_timers, &handle->token_refresh_tmr, 1 * 1000 * 1000, + rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb, rk); /* Automatically refresh the token if using the builtin * unsecure JWS token refresher, to avoid an initial connection @@ -1326,8 +1306,8 @@ static int rd_kafka_sasl_oauthbearer_init (rd_kafka_t *rk, if (rk->rk_conf.sasl.oauthbearer.token_refresh_cb == rd_kafka_oauthbearer_unsecured_token) { rk->rk_conf.sasl.oauthbearer.token_refresh_cb( - rk, rk->rk_conf.sasl.oauthbearer_config, - rk->rk_conf.opaque); + rk, rk->rk_conf.sasl.oauthbearer_config, + rk->rk_conf.opaque); return 0; } @@ -1342,15 +1322,15 @@ static int rd_kafka_sasl_oauthbearer_init (rd_kafka_t *rk, } if (rk->rk_conf.sasl.oauthbearer.method == - RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC && + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC && #if FIXME /************************ FIXME when .._oidc.c is added ****/ rk->rk_conf.sasl.oauthbearer.token_refresh_cb == - rd_kafka_sasl_oauthbearer_oidc_token_refresh_cb + rd_kafka_sasl_oauthbearer_oidc_token_refresh_cb #else 1 #endif - ) /* move this paren up on the .._refresh_cb - * line when FIXME is fixed. */ + ) /* move this paren up on the .._refresh_cb + * line when FIXME is fixed. */ handle->internal_refresh = rd_true; /* Otherwise enqueue a refresh callback for the application. */ @@ -1363,7 +1343,7 @@ static int rd_kafka_sasl_oauthbearer_init (rd_kafka_t *rk, /** * @brief Per-client-instance destructor */ -static void rd_kafka_sasl_oauthbearer_term (rd_kafka_t *rk) { +static void rd_kafka_sasl_oauthbearer_term(rd_kafka_t *rk) { rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle; if (!handle) @@ -1382,7 +1362,6 @@ static void rd_kafka_sasl_oauthbearer_term (rd_kafka_t *rk) { rwlock_destroy(&handle->lock); rd_free(handle); - } @@ -1392,7 +1371,7 @@ static void rd_kafka_sasl_oauthbearer_term (rd_kafka_t *rk) { * available unless/until an initial token retrieval * succeeds, so wait for this precondition if necessary. */ -static rd_bool_t rd_kafka_sasl_oauthbearer_ready (rd_kafka_t *rk) { +static rd_bool_t rd_kafka_sasl_oauthbearer_ready(rd_kafka_t *rk) { rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle; if (!handle) @@ -1406,9 +1385,9 @@ static rd_bool_t rd_kafka_sasl_oauthbearer_ready (rd_kafka_t *rk) { * @brief Validate OAUTHBEARER config, which is a no-op * (we rely on initial token retrieval) */ -static int rd_kafka_sasl_oauthbearer_conf_validate (rd_kafka_t *rk, - char *errstr, - size_t errstr_size) { +static int rd_kafka_sasl_oauthbearer_conf_validate(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { /* * We must rely on the initial token retrieval as a proxy * for configuration validation because the configuration is @@ -1421,16 +1400,15 @@ static int rd_kafka_sasl_oauthbearer_conf_validate (rd_kafka_t *rk, - const struct rd_kafka_sasl_provider rd_kafka_sasl_oauthbearer_provider = { - .name = "OAUTHBEARER (builtin)", - .init = rd_kafka_sasl_oauthbearer_init, - .term = rd_kafka_sasl_oauthbearer_term, - .ready = rd_kafka_sasl_oauthbearer_ready, - .client_new = rd_kafka_sasl_oauthbearer_client_new, - .recv = rd_kafka_sasl_oauthbearer_recv, - .close = rd_kafka_sasl_oauthbearer_close, - .conf_validate = rd_kafka_sasl_oauthbearer_conf_validate, + .name = "OAUTHBEARER (builtin)", + .init = rd_kafka_sasl_oauthbearer_init, + .term = rd_kafka_sasl_oauthbearer_term, + .ready = rd_kafka_sasl_oauthbearer_ready, + .client_new = rd_kafka_sasl_oauthbearer_client_new, + .recv = rd_kafka_sasl_oauthbearer_recv, + .close = rd_kafka_sasl_oauthbearer_close, + .conf_validate = rd_kafka_sasl_oauthbearer_conf_validate, }; @@ -1445,39 +1423,39 @@ const struct rd_kafka_sasl_provider rd_kafka_sasl_oauthbearer_provider = { * @brief `sasl.oauthbearer.config` test: * should generate correct default values. */ -static int do_unittest_config_defaults (void) { - static const char *sasl_oauthbearer_config = "principal=fubar " - "scopeClaimName=whatever"; +static int do_unittest_config_defaults(void) { + static const char *sasl_oauthbearer_config = + "principal=fubar " + "scopeClaimName=whatever"; // default scope is empty, default lifetime is 3600 seconds // {"alg":"none"} // . // {"sub":"fubar","iat":1.000,"exp":3601.000} // - static const char *expected_token_value = "eyJhbGciOiJub25lIn0" - "." - "eyJzdWIiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6MzYwMS4wMDB9" - "."; + static const char *expected_token_value = + "eyJhbGciOiJub25lIn0" + "." + "eyJzdWIiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6MzYwMS4wMDB9" + "."; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r == -1) RD_UT_FAIL("Failed to create a token: %s: %s", sasl_oauthbearer_config, errstr); - RD_UT_ASSERT(token.md_lifetime_ms == - now_wallclock_ms + 3600 * 1000, - "Invalid md_lifetime_ms %"PRId64, token.md_lifetime_ms); + RD_UT_ASSERT(token.md_lifetime_ms == now_wallclock_ms + 3600 * 1000, + "Invalid md_lifetime_ms %" PRId64, token.md_lifetime_ms); RD_UT_ASSERT(!strcmp(token.md_principal_name, "fubar"), "Invalid md_principal_name %s", token.md_principal_name); RD_UT_ASSERT(!strcmp(token.token_value, expected_token_value), - "Invalid token_value %s, expected %s", - token.token_value, expected_token_value); + "Invalid token_value %s, expected %s", token.token_value, + expected_token_value); rd_kafka_sasl_oauthbearer_token_free(&token); @@ -1488,38 +1466,39 @@ static int do_unittest_config_defaults (void) { * @brief `sasl.oauthbearer.config` test: * should generate correct token for explicit scope and lifeSeconds values. */ -static int do_unittest_config_explicit_scope_and_life (void) { - static const char *sasl_oauthbearer_config = "principal=fubar " - "scope=role1,role2 lifeSeconds=60"; +static int do_unittest_config_explicit_scope_and_life(void) { + static const char *sasl_oauthbearer_config = + "principal=fubar " + "scope=role1,role2 lifeSeconds=60"; // {"alg":"none"} // . // {"sub":"fubar","iat":1.000,"exp":61.000,"scope":["role1","role2"]} // - static const char *expected_token_value = "eyJhbGciOiJub25lIn0" - "." - "eyJzdWIiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6NjEuMDAwLCJzY29wZ" - "SI6WyJyb2xlMSIsInJvbGUyIl19" - "."; + static const char *expected_token_value = + "eyJhbGciOiJub25lIn0" + "." + "eyJzdWIiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6NjEuMDAwLCJzY29wZ" + "SI6WyJyb2xlMSIsInJvbGUyIl19" + "."; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r == -1) RD_UT_FAIL("Failed to create a token: %s: %s", sasl_oauthbearer_config, errstr); RD_UT_ASSERT(token.md_lifetime_ms == now_wallclock_ms + 60 * 1000, - "Invalid md_lifetime_ms %"PRId64, token.md_lifetime_ms); + "Invalid md_lifetime_ms %" PRId64, token.md_lifetime_ms); RD_UT_ASSERT(!strcmp(token.md_principal_name, "fubar"), "Invalid md_principal_name %s", token.md_principal_name); RD_UT_ASSERT(!strcmp(token.token_value, expected_token_value), - "Invalid token_value %s, expected %s", - token.token_value, expected_token_value); + "Invalid token_value %s, expected %s", token.token_value, + expected_token_value); rd_kafka_sasl_oauthbearer_token_free(&token); @@ -1530,39 +1509,40 @@ static int do_unittest_config_explicit_scope_and_life (void) { * @brief `sasl.oauthbearer.config` test: * should generate correct token when all values are provided explicitly. */ -static int do_unittest_config_all_explicit_values (void) { - static const char *sasl_oauthbearer_config = "principal=fubar " - "principalClaimName=azp scope=role1,role2 " - "scopeClaimName=roles lifeSeconds=60"; +static int do_unittest_config_all_explicit_values(void) { + static const char *sasl_oauthbearer_config = + "principal=fubar " + "principalClaimName=azp scope=role1,role2 " + "scopeClaimName=roles lifeSeconds=60"; // {"alg":"none"} // . // {"azp":"fubar","iat":1.000,"exp":61.000,"roles":["role1","role2"]} // - static const char *expected_token_value = "eyJhbGciOiJub25lIn0" - "." - "eyJhenAiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6NjEuMDAwLCJyb2xlc" - "yI6WyJyb2xlMSIsInJvbGUyIl19" - "."; + static const char *expected_token_value = + "eyJhbGciOiJub25lIn0" + "." + "eyJhenAiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6NjEuMDAwLCJyb2xlc" + "yI6WyJyb2xlMSIsInJvbGUyIl19" + "."; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r == -1) RD_UT_FAIL("Failed to create a token: %s: %s", sasl_oauthbearer_config, errstr); RD_UT_ASSERT(token.md_lifetime_ms == now_wallclock_ms + 60 * 1000, - "Invalid md_lifetime_ms %"PRId64, token.md_lifetime_ms); + "Invalid md_lifetime_ms %" PRId64, token.md_lifetime_ms); RD_UT_ASSERT(!strcmp(token.md_principal_name, "fubar"), "Invalid md_principal_name %s", token.md_principal_name); RD_UT_ASSERT(!strcmp(token.token_value, expected_token_value), - "Invalid token_value %s, expected %s", - token.token_value, expected_token_value); + "Invalid token_value %s, expected %s", token.token_value, + expected_token_value); rd_kafka_sasl_oauthbearer_token_free(&token); @@ -1573,20 +1553,20 @@ static int do_unittest_config_all_explicit_values (void) { * @brief `sasl.oauthbearer.config` test: * should fail when no principal specified. */ -static int do_unittest_config_no_principal_should_fail (void) { - static const char *expected_msg = "Invalid sasl.oauthbearer.config: " - "no principal="; +static int do_unittest_config_no_principal_should_fail(void) { + static const char *expected_msg = + "Invalid sasl.oauthbearer.config: " + "no principal="; static const char *sasl_oauthbearer_config = - "extension_notaprincipal=hi"; + "extension_notaprincipal=hi"; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token = RD_ZERO_INIT; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r != -1) rd_kafka_sasl_oauthbearer_token_free(&token); @@ -1594,7 +1574,8 @@ static int do_unittest_config_no_principal_should_fail (void) { RD_UT_ASSERT(!strcmp(errstr, expected_msg), "Incorrect error message when no principal: " - "expected=%s received=%s", expected_msg, errstr); + "expected=%s received=%s", + expected_msg, errstr); RD_UT_PASS(); } @@ -1602,19 +1583,19 @@ static int do_unittest_config_no_principal_should_fail (void) { * @brief `sasl.oauthbearer.config` test: * should fail when no sasl.oauthbearer.config is specified. */ -static int do_unittest_config_empty_should_fail (void) { - static const char *expected_msg = "Invalid sasl.oauthbearer.config: " - "must not be empty"; +static int do_unittest_config_empty_should_fail(void) { + static const char *expected_msg = + "Invalid sasl.oauthbearer.config: " + "must not be empty"; static const char *sasl_oauthbearer_config = ""; - rd_ts_t now_wallclock_ms = 1000; + rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token = RD_ZERO_INIT; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r != -1) rd_kafka_sasl_oauthbearer_token_free(&token); @@ -1622,7 +1603,8 @@ static int do_unittest_config_empty_should_fail (void) { RD_UT_ASSERT(!strcmp(errstr, expected_msg), "Incorrect error message with empty config: " - "expected=%s received=%s", expected_msg, errstr); + "expected=%s received=%s", + expected_msg, errstr); RD_UT_PASS(); } @@ -1631,19 +1613,19 @@ static int do_unittest_config_empty_should_fail (void) { * should fail when something unrecognized is specified. */ static int do_unittest_config_unrecognized_should_fail(void) { - static const char *expected_msg = "Unrecognized " - "sasl.oauthbearer.config beginning at: unrecognized"; + static const char *expected_msg = + "Unrecognized " + "sasl.oauthbearer.config beginning at: unrecognized"; static const char *sasl_oauthbearer_config = - "principal=fubar unrecognized"; + "principal=fubar unrecognized"; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r != -1) rd_kafka_sasl_oauthbearer_token_free(&token); @@ -1651,7 +1633,8 @@ static int do_unittest_config_unrecognized_should_fail(void) { RD_UT_ASSERT(!strcmp(errstr, expected_msg), "Incorrect error message with something unrecognized: " - "expected=%s received=%s", expected_msg, errstr); + "expected=%s received=%s", + expected_msg, errstr); RD_UT_PASS(); } @@ -1661,39 +1644,33 @@ static int do_unittest_config_unrecognized_should_fail(void) { */ static int do_unittest_config_empty_value_should_fail(void) { static const char *sasl_oauthbearer_configs[] = { - "principal=", - "principal=fubar principalClaimName=", - "principal=fubar scope=", - "principal=fubar scopeClaimName=", - "principal=fubar lifeSeconds=" - }; + "principal=", "principal=fubar principalClaimName=", + "principal=fubar scope=", "principal=fubar scopeClaimName=", + "principal=fubar lifeSeconds="}; static const char *expected_prefix = - "Invalid sasl.oauthbearer.config: empty"; + "Invalid sasl.oauthbearer.config: empty"; size_t i; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; int r; - for (i = 0; - i < sizeof(sasl_oauthbearer_configs) / sizeof(const char *); + for (i = 0; i < sizeof(sasl_oauthbearer_configs) / sizeof(const char *); i++) { struct rd_kafka_sasl_oauthbearer_token token; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_configs[i], now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_configs[i], now_wallclock_ms, + errstr, sizeof(errstr)); if (r != -1) rd_kafka_sasl_oauthbearer_token_free(&token); RD_UT_ASSERT(r == -1, "Did not fail with an empty value: %s", sasl_oauthbearer_configs[i]); - RD_UT_ASSERT(!strncmp(expected_prefix, - errstr, strlen(expected_prefix)), - "Incorrect error message prefix when empty " - "(%s): expected=%s received=%s", - sasl_oauthbearer_configs[i], expected_prefix, - errstr); + RD_UT_ASSERT( + !strncmp(expected_prefix, errstr, strlen(expected_prefix)), + "Incorrect error message prefix when empty " + "(%s): expected=%s received=%s", + sasl_oauthbearer_configs[i], expected_prefix, errstr); } RD_UT_PASS(); } @@ -1704,38 +1681,34 @@ static int do_unittest_config_empty_value_should_fail(void) { */ static int do_unittest_config_value_with_quote_should_fail(void) { static const char *sasl_oauthbearer_configs[] = { - "principal=\"fu", - "principal=fubar principalClaimName=\"bar", - "principal=fubar scope=\"a,b,c", - "principal=fubar scopeClaimName=\"baz" - }; - static const char *expected_prefix = "Invalid " - "sasl.oauthbearer.config: '\"' cannot appear in "; + "principal=\"fu", "principal=fubar principalClaimName=\"bar", + "principal=fubar scope=\"a,b,c", + "principal=fubar scopeClaimName=\"baz"}; + static const char *expected_prefix = + "Invalid " + "sasl.oauthbearer.config: '\"' cannot appear in "; size_t i; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; int r; - for (i = 0; - i < sizeof(sasl_oauthbearer_configs) / sizeof(const char *); + for (i = 0; i < sizeof(sasl_oauthbearer_configs) / sizeof(const char *); i++) { struct rd_kafka_sasl_oauthbearer_token token; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_configs[i], now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_configs[i], now_wallclock_ms, + errstr, sizeof(errstr)); if (r != -1) rd_kafka_sasl_oauthbearer_token_free(&token); RD_UT_ASSERT(r == -1, "Did not fail with embedded quote: %s", sasl_oauthbearer_configs[i]); - RD_UT_ASSERT(!strncmp(expected_prefix, - errstr, strlen(expected_prefix)), - "Incorrect error message prefix with " - "embedded quote (%s): expected=%s received=%s", - sasl_oauthbearer_configs[i], expected_prefix, - errstr); + RD_UT_ASSERT( + !strncmp(expected_prefix, errstr, strlen(expected_prefix)), + "Incorrect error message prefix with " + "embedded quote (%s): expected=%s received=%s", + sasl_oauthbearer_configs[i], expected_prefix, errstr); } RD_UT_PASS(); } @@ -1745,30 +1718,30 @@ static int do_unittest_config_value_with_quote_should_fail(void) { * should generate correct extensions. */ static int do_unittest_config_extensions(void) { - static const char *sasl_oauthbearer_config = "principal=fubar " - "extension_a=b extension_yz=yzval"; + static const char *sasl_oauthbearer_config = + "principal=fubar " + "extension_a=b extension_yz=yzval"; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r == -1) RD_UT_FAIL("Failed to create a token: %s: %s", sasl_oauthbearer_config, errstr); RD_UT_ASSERT(token.extension_size == 4, - "Incorrect extensions: expected 4, received %"PRIusz, + "Incorrect extensions: expected 4, received %" PRIusz, token.extension_size); RD_UT_ASSERT(!strcmp(token.extensions[0], "a") && - !strcmp(token.extensions[1], "b") && - !strcmp(token.extensions[2], "yz") && - !strcmp(token.extensions[3], "yzval"), + !strcmp(token.extensions[1], "b") && + !strcmp(token.extensions[2], "yz") && + !strcmp(token.extensions[3], "yzval"), "Incorrect extensions: expected a=b and " "yz=yzval but received %s=%s and %s=%s", token.extensions[0], token.extensions[1], @@ -1783,19 +1756,14 @@ static int do_unittest_config_extensions(void) { * @brief make sure illegal extensions keys are rejected */ static int do_unittest_illegal_extension_keys_should_fail(void) { - static const char *illegal_keys[] = { - "", - "auth", - "a1", - " a" - }; + static const char *illegal_keys[] = {"", "auth", "a1", " a"}; size_t i; char errstr[512]; int r; for (i = 0; i < sizeof(illegal_keys) / sizeof(const char *); i++) { - r = check_oauthbearer_extension_key(illegal_keys[i], - errstr, sizeof(errstr)); + r = check_oauthbearer_extension_key(illegal_keys[i], errstr, + sizeof(errstr)); RD_UT_ASSERT(r == -1, "Did not recognize illegal extension key: %s", illegal_keys[i]); @@ -1807,20 +1775,21 @@ static int do_unittest_illegal_extension_keys_should_fail(void) { * @brief make sure illegal extensions keys are rejected */ static int do_unittest_odd_extension_size_should_fail(void) { - static const char *expected_errstr = "Incorrect extension size " - "(must be a non-negative multiple of 2): 1"; + static const char *expected_errstr = + "Incorrect extension size " + "(must be a non-negative multiple of 2): 1"; char errstr[512]; rd_kafka_resp_err_t err; - rd_kafka_t rk = RD_ZERO_INIT; + rd_kafka_t rk = RD_ZERO_INIT; rd_kafka_sasl_oauthbearer_handle_t handle = RD_ZERO_INIT; rk.rk_conf.sasl.provider = &rd_kafka_sasl_oauthbearer_provider; - rk.rk_sasl.handle = &handle; + rk.rk_sasl.handle = &handle; rwlock_init(&handle.lock); - err = rd_kafka_oauthbearer_set_token0(&rk, "abcd", 1000, "fubar", - NULL, 1, errstr, sizeof(errstr)); + err = rd_kafka_oauthbearer_set_token0(&rk, "abcd", 1000, "fubar", NULL, + 1, errstr, sizeof(errstr)); rwlock_destroy(&handle.lock); @@ -1835,7 +1804,7 @@ static int do_unittest_odd_extension_size_should_fail(void) { RD_UT_PASS(); } -int unittest_sasl_oauthbearer (void) { +int unittest_sasl_oauthbearer(void) { int fails = 0; fails += do_unittest_config_no_principal_should_fail(); diff --git a/src/rdkafka_sasl_oauthbearer.h b/src/rdkafka_sasl_oauthbearer.h index 8f1ae51c77..75ab51d02f 100644 --- a/src/rdkafka_sasl_oauthbearer.h +++ b/src/rdkafka_sasl_oauthbearer.h @@ -29,23 +29,24 @@ #ifndef _RDKAFKA_SASL_OAUTHBEARER_H_ #define _RDKAFKA_SASL_OAUTHBEARER_H_ -void rd_kafka_oauthbearer_unsecured_token (rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque); +void rd_kafka_oauthbearer_unsecured_token(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque); rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, - const char *token_value, - int64_t md_lifetime_ms, - const char *md_principal_name, - const char **extensions, - size_t extension_size, - char *errstr, size_t errstr_size); +rd_kafka_oauthbearer_set_token0(rd_kafka_t *rk, + const char *token_value, + int64_t md_lifetime_ms, + const char *md_principal_name, + const char **extensions, + size_t extension_size, + char *errstr, + size_t errstr_size); -rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token_failure0 (rd_kafka_t *rk, const char *errstr); +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure0(rd_kafka_t *rk, + const char *errstr); -int unittest_sasl_oauthbearer (void); +int unittest_sasl_oauthbearer(void); #endif /* _RDKAFKA_SASL_OAUTHBEARER_H_ */ diff --git a/src/rdkafka_sasl_plain.c b/src/rdkafka_sasl_plain.c index bdf4222dae..d99f22962b 100644 --- a/src/rdkafka_sasl_plain.c +++ b/src/rdkafka_sasl_plain.c @@ -40,13 +40,16 @@ /** * @brief Handle received frame from broker. */ -static int rd_kafka_sasl_plain_recv (struct rd_kafka_transport_s *rktrans, - const void *buf, size_t size, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_plain_recv(struct rd_kafka_transport_s *rktrans, + const void *buf, + size_t size, + char *errstr, + size_t errstr_size) { if (size) rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLPLAIN", "Received non-empty SASL PLAIN (builtin) " - "response from broker (%"PRIusz" bytes)", size); + "response from broker (%" PRIusz " bytes)", + size); rd_kafka_sasl_auth_done(rktrans); @@ -61,19 +64,22 @@ static int rd_kafka_sasl_plain_recv (struct rd_kafka_transport_s *rktrans, * * @locality broker thread */ -int rd_kafka_sasl_plain_client_new (rd_kafka_transport_t *rktrans, - const char *hostname, - char *errstr, size_t errstr_size) { +int rd_kafka_sasl_plain_client_new(rd_kafka_transport_t *rktrans, + const char *hostname, + char *errstr, + size_t errstr_size) { rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; - rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_t *rk = rkb->rkb_rk; /* [authzid] UTF8NUL authcid UTF8NUL passwd */ char *buf; - int of = 0; + int of = 0; int zidlen = 0; - int cidlen = rk->rk_conf.sasl.username ? - (int)strlen(rk->rk_conf.sasl.username) : 0; - int pwlen = rk->rk_conf.sasl.password ? - (int)strlen(rk->rk_conf.sasl.password) : 0; + int cidlen = rk->rk_conf.sasl.username + ? (int)strlen(rk->rk_conf.sasl.username) + : 0; + int pwlen = rk->rk_conf.sasl.password + ? (int)strlen(rk->rk_conf.sasl.password) + : 0; buf = rd_alloca(zidlen + 1 + cidlen + 1 + pwlen + 1); @@ -93,8 +99,7 @@ int rd_kafka_sasl_plain_client_new (rd_kafka_transport_t *rktrans, rd_rkb_dbg(rkb, SECURITY, "SASLPLAIN", "Sending SASL PLAIN (builtin) authentication token"); - if (rd_kafka_sasl_send(rktrans, buf, of, - errstr, errstr_size)) + if (rd_kafka_sasl_send(rktrans, buf, of, errstr, errstr_size)) return -1; /* PLAIN is appearantly done here, but we still need to make sure @@ -107,9 +112,9 @@ int rd_kafka_sasl_plain_client_new (rd_kafka_transport_t *rktrans, /** * @brief Validate PLAIN config */ -static int rd_kafka_sasl_plain_conf_validate (rd_kafka_t *rk, - char *errstr, - size_t errstr_size) { +static int rd_kafka_sasl_plain_conf_validate(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { if (!rk->rk_conf.sasl.username || !rk->rk_conf.sasl.password) { rd_snprintf(errstr, errstr_size, "sasl.username and sasl.password must be set"); @@ -121,8 +126,7 @@ static int rd_kafka_sasl_plain_conf_validate (rd_kafka_t *rk, const struct rd_kafka_sasl_provider rd_kafka_sasl_plain_provider = { - .name = "PLAIN (builtin)", - .client_new = rd_kafka_sasl_plain_client_new, - .recv = rd_kafka_sasl_plain_recv, - .conf_validate = rd_kafka_sasl_plain_conf_validate -}; + .name = "PLAIN (builtin)", + .client_new = rd_kafka_sasl_plain_client_new, + .recv = rd_kafka_sasl_plain_recv, + .conf_validate = rd_kafka_sasl_plain_conf_validate}; diff --git a/src/rdkafka_sasl_scram.c b/src/rdkafka_sasl_scram.c index 0eec5c6af3..a71091993e 100644 --- a/src/rdkafka_sasl_scram.c +++ b/src/rdkafka_sasl_scram.c @@ -52,22 +52,21 @@ * @brief Per-connection state */ struct rd_kafka_sasl_scram_state { - enum { - RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE, - RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE, - RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE, + enum { RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE, + RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE, + RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE, } state; rd_chariov_t cnonce; /* client c-nonce */ rd_chariov_t first_msg_bare; /* client-first-message-bare */ char *ServerSignatureB64; /* ServerSignature in Base64 */ - const EVP_MD *evp; /* Hash function pointer */ + const EVP_MD *evp; /* Hash function pointer */ }; /** * @brief Close and free authentication state */ -static void rd_kafka_sasl_scram_close (rd_kafka_transport_t *rktrans) { +static void rd_kafka_sasl_scram_close(rd_kafka_transport_t *rktrans) { struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; if (!state) @@ -85,12 +84,12 @@ static void rd_kafka_sasl_scram_close (rd_kafka_transport_t *rktrans) { * @brief Generates a nonce string (a random printable string) * @remark dst->ptr will be allocated and must be freed. */ -static void rd_kafka_sasl_scram_generate_nonce (rd_chariov_t *dst) { +static void rd_kafka_sasl_scram_generate_nonce(rd_chariov_t *dst) { int i; dst->size = 32; - dst->ptr = rd_malloc(dst->size+1); - for (i = 0 ; i < (int)dst->size ; i++) - dst->ptr[i] = (char)rd_jitter(0x2d/*-*/, 0x7e/*~*/); + dst->ptr = rd_malloc(dst->size + 1); + for (i = 0; i < (int)dst->size; i++) + dst->ptr[i] = (char)rd_jitter(0x2d /*-*/, 0x7e /*~*/); dst->ptr[i] = 0; } @@ -101,12 +100,14 @@ static void rd_kafka_sasl_scram_generate_nonce (rd_chariov_t *dst) { * on failure in which case an error is written to \p errstr * prefixed by \p description. */ -static char *rd_kafka_sasl_scram_get_attr (const rd_chariov_t *inbuf, char attr, - const char *description, - char *errstr, size_t errstr_size) { +static char *rd_kafka_sasl_scram_get_attr(const rd_chariov_t *inbuf, + char attr, + const char *description, + char *errstr, + size_t errstr_size) { size_t of = 0; - for (of = 0 ; of < inbuf->size ; ) { + for (of = 0; of < inbuf->size;) { const char *td; size_t len; @@ -118,23 +119,22 @@ static char *rd_kafka_sasl_scram_get_attr (const rd_chariov_t *inbuf, char attr, len = inbuf->size - of; /* Check if attr "x=" matches */ - if (inbuf->ptr[of] == attr && inbuf->size > of+1 && - inbuf->ptr[of+1] == '=') { + if (inbuf->ptr[of] == attr && inbuf->size > of + 1 && + inbuf->ptr[of + 1] == '=') { char *ret; of += 2; /* past = */ ret = rd_malloc(len - 2 + 1); memcpy(ret, &inbuf->ptr[of], len - 2); - ret[len-2] = '\0'; + ret[len - 2] = '\0'; return ret; } /* Not the attr we are looking for, skip * past the next delimiter and continue looking. */ - of += len+1; + of += len + 1; } - rd_snprintf(errstr, errstr_size, - "%s: could not find attribute (%c)", + rd_snprintf(errstr, errstr_size, "%s: could not find attribute (%c)", description, attr); return NULL; } @@ -144,7 +144,7 @@ static char *rd_kafka_sasl_scram_get_attr (const rd_chariov_t *inbuf, char attr, * @brief Base64 encode binary input \p in * @returns a newly allocated, base64-encoded string or NULL on error. */ -static char *rd_base64_encode (const rd_chariov_t *in) { +static char *rd_base64_encode(const rd_chariov_t *in) { char *ret; size_t ret_len, max_len; @@ -155,12 +155,13 @@ static char *rd_base64_encode (const rd_chariov_t *in) { /* This does not overflow given the |INT_MAX| bound, above. */ max_len = (((in->size + 2) / 3) * 4) + 1; - ret = rd_malloc(max_len); + ret = rd_malloc(max_len); if (ret == NULL) { return NULL; } - ret_len = EVP_EncodeBlock((uint8_t*)ret, (uint8_t*)in->ptr, (int)in->size); + ret_len = + EVP_EncodeBlock((uint8_t *)ret, (uint8_t *)in->ptr, (int)in->size); assert(ret_len < max_len); ret[ret_len] = 0; @@ -174,7 +175,7 @@ static char *rd_base64_encode (const rd_chariov_t *in) { * @returns -1 on invalid Base64, or 0 on successes in which case a * newly allocated binary string is set in out (and size). */ -static int rd_base64_decode (const rd_chariov_t *in, rd_chariov_t *out) { +static int rd_base64_decode(const rd_chariov_t *in, rd_chariov_t *out) { size_t ret_len; /* OpenSSL takes an |int| argument, so |in->size| must not exceed @@ -183,10 +184,10 @@ static int rd_base64_decode (const rd_chariov_t *in, rd_chariov_t *out) { return -1; } - ret_len = ((in->size / 4) * 3); - out->ptr = rd_malloc(ret_len+1); + ret_len = ((in->size / 4) * 3); + out->ptr = rd_malloc(ret_len + 1); - if (EVP_DecodeBlock((uint8_t*)out->ptr, (uint8_t*)in->ptr, + if (EVP_DecodeBlock((uint8_t *)out->ptr, (uint8_t *)in->ptr, (int)in->size) == -1) { rd_free(out->ptr); out->ptr = NULL; @@ -195,16 +196,16 @@ static int rd_base64_decode (const rd_chariov_t *in, rd_chariov_t *out) { /* EVP_DecodeBlock will pad the output with trailing NULs and count * them in the return value. */ - if (in->size > 1 && in->ptr[in->size-1] == '=') { - if (in->size > 2 && in->ptr[in->size-2] == '=') { - ret_len -= 2; - } else { - ret_len -= 1; - } + if (in->size > 1 && in->ptr[in->size - 1] == '=') { + if (in->size > 2 && in->ptr[in->size - 2] == '=') { + ret_len -= 2; + } else { + ret_len -= 1; + } } out->ptr[ret_len] = 0; - out->size = ret_len; + out->size = ret_len; return 0; } @@ -215,14 +216,13 @@ static int rd_base64_decode (const rd_chariov_t *in, rd_chariov_t *out) { * which must be at least EVP_MAX_MD_SIZE. * @returns 0 on success, else -1 */ -static int -rd_kafka_sasl_scram_H (rd_kafka_transport_t *rktrans, - const rd_chariov_t *str, - rd_chariov_t *out) { +static int rd_kafka_sasl_scram_H(rd_kafka_transport_t *rktrans, + const rd_chariov_t *str, + rd_chariov_t *out) { rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_H( - (const unsigned char *)str->ptr, str->size, - (unsigned char *)out->ptr); + (const unsigned char *)str->ptr, str->size, + (unsigned char *)out->ptr); out->size = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_H_size; return 0; @@ -233,17 +233,15 @@ rd_kafka_sasl_scram_H (rd_kafka_transport_t *rktrans, * which must be at least EVP_MAX_MD_SIZE. * @returns 0 on success, else -1 */ -static int -rd_kafka_sasl_scram_HMAC (rd_kafka_transport_t *rktrans, - const rd_chariov_t *key, - const rd_chariov_t *str, - rd_chariov_t *out) { +static int rd_kafka_sasl_scram_HMAC(rd_kafka_transport_t *rktrans, + const rd_chariov_t *key, + const rd_chariov_t *str, + rd_chariov_t *out) { const EVP_MD *evp = - rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp; + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp; unsigned int outsize; - if (!HMAC(evp, - (const unsigned char *)key->ptr, (int)key->size, + if (!HMAC(evp, (const unsigned char *)key->ptr, (int)key->size, (const unsigned char *)str->ptr, (int)str->size, (unsigned char *)out->ptr, &outsize)) { rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM", @@ -264,14 +262,14 @@ rd_kafka_sasl_scram_HMAC (rd_kafka_transport_t *rktrans, * at least EVP_MAX_MD_SIZE. Actual size is updated in \p *outsize. * @returns 0 on success, else -1 */ -static int -rd_kafka_sasl_scram_Hi (rd_kafka_transport_t *rktrans, - const rd_chariov_t *in, - const rd_chariov_t *salt, - int itcnt, rd_chariov_t *out) { +static int rd_kafka_sasl_scram_Hi(rd_kafka_transport_t *rktrans, + const rd_chariov_t *in, + const rd_chariov_t *salt, + int itcnt, + rd_chariov_t *out) { const EVP_MD *evp = - rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp; - unsigned int ressize = 0; + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp; + unsigned int ressize = 0; unsigned char tempres[EVP_MAX_MD_SIZE]; unsigned char *saltplus; int i; @@ -279,16 +277,14 @@ rd_kafka_sasl_scram_Hi (rd_kafka_transport_t *rktrans, /* U1 := HMAC(str, salt + INT(1)) */ saltplus = rd_alloca(salt->size + 4); memcpy(saltplus, salt->ptr, salt->size); - saltplus[salt->size] = 0; - saltplus[salt->size+1] = 0; - saltplus[salt->size+2] = 0; - saltplus[salt->size+3] = 1; + saltplus[salt->size] = 0; + saltplus[salt->size + 1] = 0; + saltplus[salt->size + 2] = 0; + saltplus[salt->size + 3] = 1; /* U1 := HMAC(str, salt + INT(1)) */ - if (!HMAC(evp, - (const unsigned char *)in->ptr, (int)in->size, - saltplus, salt->size+4, - tempres, &ressize)) { + if (!HMAC(evp, (const unsigned char *)in->ptr, (int)in->size, saltplus, + salt->size + 4, tempres, &ressize)) { rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM", "HMAC priming failed"); return -1; @@ -297,21 +293,20 @@ rd_kafka_sasl_scram_Hi (rd_kafka_transport_t *rktrans, memcpy(out->ptr, tempres, ressize); /* Ui-1 := HMAC(str, Ui-2) .. */ - for (i = 1 ; i < itcnt ; i++) { + for (i = 1; i < itcnt; i++) { unsigned char tempdest[EVP_MAX_MD_SIZE]; int j; - if (unlikely(!HMAC(evp, - (const unsigned char *)in->ptr, (int)in->size, - tempres, ressize, - tempdest, NULL))) { + if (unlikely(!HMAC(evp, (const unsigned char *)in->ptr, + (int)in->size, tempres, ressize, tempdest, + NULL))) { rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM", "Hi() HMAC #%d/%d failed", i, itcnt); return -1; } /* U1 XOR U2 .. */ - for (j = 0 ; j < (int)ressize ; j++) { + for (j = 0; j < (int)ressize; j++) { out->ptr[j] ^= tempdest[j]; tempres[j] = tempdest[j]; } @@ -327,16 +322,16 @@ rd_kafka_sasl_scram_Hi (rd_kafka_transport_t *rktrans, * @returns a SASL value-safe-char encoded string, replacing "," and "=" * with their escaped counterparts in a newly allocated string. */ -static char *rd_kafka_sasl_safe_string (const char *str) { - char *safe = NULL, *d = NULL/*avoid warning*/; +static char *rd_kafka_sasl_safe_string(const char *str) { + char *safe = NULL, *d = NULL /*avoid warning*/; int pass; size_t len = 0; /* Pass #1: scan for needed length and allocate. * Pass #2: encode string */ - for (pass = 0 ; pass < 2 ; pass++) { + for (pass = 0; pass < 2; pass++) { const char *s; - for (s = str ; *s ; s++) { + for (s = str; *s; s++) { if (pass == 0) { /* If this byte needs to be escaped then * 3 output bytes are needed instead of 1. */ @@ -357,7 +352,7 @@ static char *rd_kafka_sasl_safe_string (const char *str) { } if (pass == 0) - d = safe = rd_malloc(len+1); + d = safe = rd_malloc(len + 1); } rd_assert(d == safe + (int)len); @@ -371,11 +366,10 @@ static char *rd_kafka_sasl_safe_string (const char *str) { * @brief Build client-final-message-without-proof * @remark out->ptr will be allocated and must be freed. */ -static void -rd_kafka_sasl_scram_build_client_final_message_wo_proof ( - struct rd_kafka_sasl_scram_state *state, - const char *snonce, - rd_chariov_t *out) { +static void rd_kafka_sasl_scram_build_client_final_message_wo_proof( + struct rd_kafka_sasl_scram_state *state, + const char *snonce, + rd_chariov_t *out) { const char *attr_c = "biws"; /* base64 encode of "n,," */ /* @@ -383,11 +377,11 @@ rd_kafka_sasl_scram_build_client_final_message_wo_proof ( * channel-binding "," nonce ["," * extensions] */ - out->size = strlen("c=,r=") + strlen(attr_c) + - state->cnonce.size + strlen(snonce); - out->ptr = rd_malloc(out->size+1); - rd_snprintf(out->ptr, out->size+1, "c=%s,r=%.*s%s", - attr_c, (int)state->cnonce.size, state->cnonce.ptr, snonce); + out->size = strlen("c=,r=") + strlen(attr_c) + state->cnonce.size + + strlen(snonce); + out->ptr = rd_malloc(out->size + 1); + rd_snprintf(out->ptr, out->size + 1, "c=%s,r=%.*s%s", attr_c, + (int)state->cnonce.size, state->cnonce.ptr, snonce); } @@ -395,37 +389,29 @@ rd_kafka_sasl_scram_build_client_final_message_wo_proof ( * @brief Build client-final-message * @returns -1 on error. */ -static int -rd_kafka_sasl_scram_build_client_final_message ( - rd_kafka_transport_t *rktrans, - const rd_chariov_t *salt, - const char *server_nonce, - const rd_chariov_t *server_first_msg, - int itcnt, rd_chariov_t *out) { +static int rd_kafka_sasl_scram_build_client_final_message( + rd_kafka_transport_t *rktrans, + const rd_chariov_t *salt, + const char *server_nonce, + const rd_chariov_t *server_first_msg, + int itcnt, + rd_chariov_t *out) { struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; - const rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; - rd_chariov_t SaslPassword = - { .ptr = conf->sasl.password, - .size = strlen(conf->sasl.password) }; - rd_chariov_t SaltedPassword = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; - rd_chariov_t ClientKey = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; - rd_chariov_t ServerKey = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; - rd_chariov_t StoredKey = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; - rd_chariov_t AuthMessage = RD_ZERO_INIT; - rd_chariov_t ClientSignature = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; - rd_chariov_t ServerSignature = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; - const rd_chariov_t ClientKeyVerbatim = - { .ptr = "Client Key", .size = 10 }; - const rd_chariov_t ServerKeyVerbatim = - { .ptr = "Server Key", .size = 10 }; - rd_chariov_t ClientProof = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; + const rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; + rd_chariov_t SaslPassword = {.ptr = conf->sasl.password, + .size = strlen(conf->sasl.password)}; + rd_chariov_t SaltedPassword = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; + rd_chariov_t ClientKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; + rd_chariov_t ServerKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; + rd_chariov_t StoredKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; + rd_chariov_t AuthMessage = RD_ZERO_INIT; + rd_chariov_t ClientSignature = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; + rd_chariov_t ServerSignature = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; + const rd_chariov_t ClientKeyVerbatim = {.ptr = "Client Key", + .size = 10}; + const rd_chariov_t ServerKeyVerbatim = {.ptr = "Server Key", + .size = 10}; + rd_chariov_t ClientProof = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; rd_chariov_t client_final_msg_wo_proof; char *ClientProofB64; int i; @@ -446,15 +432,13 @@ rd_kafka_sasl_scram_build_client_final_message ( */ /* SaltedPassword := Hi(Normalize(password), salt, i) */ - if (rd_kafka_sasl_scram_Hi( - rktrans, &SaslPassword, salt, - itcnt, &SaltedPassword) == -1) + if (rd_kafka_sasl_scram_Hi(rktrans, &SaslPassword, salt, itcnt, + &SaltedPassword) == -1) return -1; /* ClientKey := HMAC(SaltedPassword, "Client Key") */ - if (rd_kafka_sasl_scram_HMAC( - rktrans, &SaltedPassword, &ClientKeyVerbatim, - &ClientKey) == -1) + if (rd_kafka_sasl_scram_HMAC(rktrans, &SaltedPassword, + &ClientKeyVerbatim, &ClientKey) == -1) return -1; /* StoredKey := H(ClientKey) */ @@ -463,18 +447,16 @@ rd_kafka_sasl_scram_build_client_final_message ( /* client-final-message-without-proof */ rd_kafka_sasl_scram_build_client_final_message_wo_proof( - state, server_nonce, &client_final_msg_wo_proof); + state, server_nonce, &client_final_msg_wo_proof); /* AuthMessage := client-first-message-bare + "," + * server-first-message + "," + * client-final-message-without-proof */ - AuthMessage.size = - state->first_msg_bare.size + 1 + - server_first_msg->size + 1 + - client_final_msg_wo_proof.size; - AuthMessage.ptr = rd_alloca(AuthMessage.size+1); - rd_snprintf(AuthMessage.ptr, AuthMessage.size+1, - "%.*s,%.*s,%.*s", + AuthMessage.size = state->first_msg_bare.size + 1 + + server_first_msg->size + 1 + + client_final_msg_wo_proof.size; + AuthMessage.ptr = rd_alloca(AuthMessage.size + 1); + rd_snprintf(AuthMessage.ptr, AuthMessage.size + 1, "%.*s,%.*s,%.*s", (int)state->first_msg_bare.size, state->first_msg_bare.ptr, (int)server_first_msg->size, server_first_msg->ptr, (int)client_final_msg_wo_proof.size, @@ -486,16 +468,15 @@ rd_kafka_sasl_scram_build_client_final_message ( */ /* ServerKey := HMAC(SaltedPassword, "Server Key") */ - if (rd_kafka_sasl_scram_HMAC( - rktrans, &SaltedPassword, &ServerKeyVerbatim, - &ServerKey) == -1) { + if (rd_kafka_sasl_scram_HMAC(rktrans, &SaltedPassword, + &ServerKeyVerbatim, &ServerKey) == -1) { rd_free(client_final_msg_wo_proof.ptr); return -1; } /* ServerSignature := HMAC(ServerKey, AuthMessage) */ - if (rd_kafka_sasl_scram_HMAC(rktrans, &ServerKey, - &AuthMessage, &ServerSignature) == -1) { + if (rd_kafka_sasl_scram_HMAC(rktrans, &ServerKey, &AuthMessage, + &ServerSignature) == -1) { rd_free(client_final_msg_wo_proof.ptr); return -1; } @@ -512,15 +493,15 @@ rd_kafka_sasl_scram_build_client_final_message ( */ /* ClientSignature := HMAC(StoredKey, AuthMessage) */ - if (rd_kafka_sasl_scram_HMAC(rktrans, &StoredKey, - &AuthMessage, &ClientSignature) == -1) { + if (rd_kafka_sasl_scram_HMAC(rktrans, &StoredKey, &AuthMessage, + &ClientSignature) == -1) { rd_free(client_final_msg_wo_proof.ptr); return -1; } /* ClientProof := ClientKey XOR ClientSignature */ assert(ClientKey.size == ClientSignature.size); - for (i = 0 ; i < (int)ClientKey.size ; i++) + for (i = 0; i < (int)ClientKey.size; i++) ClientProof.ptr[i] = ClientKey.ptr[i] ^ ClientSignature.ptr[i]; ClientProof.size = ClientKey.size; @@ -533,15 +514,13 @@ rd_kafka_sasl_scram_build_client_final_message ( } /* Construct client-final-message */ - out->size = client_final_msg_wo_proof.size + - strlen(",p=") + strlen(ClientProofB64); + out->size = client_final_msg_wo_proof.size + strlen(",p=") + + strlen(ClientProofB64); out->ptr = rd_malloc(out->size + 1); - rd_snprintf(out->ptr, out->size+1, - "%.*s,p=%s", + rd_snprintf(out->ptr, out->size + 1, "%.*s,p=%s", (int)client_final_msg_wo_proof.size, - client_final_msg_wo_proof.ptr, - ClientProofB64); + client_final_msg_wo_proof.ptr, ClientProofB64); rd_free(ClientProofB64); rd_free(client_final_msg_wo_proof.ptr); @@ -558,11 +537,11 @@ rd_kafka_sasl_scram_build_client_final_message ( * @returns -1 on error. */ static int -rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, - const rd_chariov_t *in, - rd_chariov_t *out, - char *errstr, - size_t errstr_size) { +rd_kafka_sasl_scram_handle_server_first_message(rd_kafka_transport_t *rktrans, + const rd_chariov_t *in, + rd_chariov_t *out, + char *errstr, + size_t errstr_size) { struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; char *server_nonce; rd_chariov_t salt_b64, salt; @@ -572,8 +551,7 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, char *attr_m; /* Mandatory future extension check */ - if ((attr_m = rd_kafka_sasl_scram_get_attr( - in, 'm', NULL, NULL, 0))) { + if ((attr_m = rd_kafka_sasl_scram_get_attr(in, 'm', NULL, NULL, 0))) { rd_snprintf(errstr, errstr_size, "Unsupported mandatory SCRAM extension"); rd_free(attr_m); @@ -582,9 +560,8 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, /* Server nonce */ if (!(server_nonce = rd_kafka_sasl_scram_get_attr( - in, 'r', - "Server nonce in server-first-message", - errstr, errstr_size))) + in, 'r', "Server nonce in server-first-message", errstr, + errstr_size))) return -1; if (strlen(server_nonce) <= state->cnonce.size || @@ -598,9 +575,8 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, /* Salt (Base64) */ if (!(salt_b64.ptr = rd_kafka_sasl_scram_get_attr( - in, 's', - "Salt in server-first-message", - errstr, errstr_size))) { + in, 's', "Salt in server-first-message", errstr, + errstr_size))) { rd_free(server_nonce); return -1; } @@ -618,9 +594,8 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, /* Iteration count (as string) */ if (!(itcntstr = rd_kafka_sasl_scram_get_attr( - in, 'i', - "Iteration count in server-first-message", - errstr, errstr_size))) { + in, 'i', "Iteration count in server-first-message", errstr, + errstr_size))) { rd_free(server_nonce); rd_free(salt.ptr); return -1; @@ -643,7 +618,7 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, /* Build client-final-message */ if (rd_kafka_sasl_scram_build_client_final_message( - rktrans, &salt, server_nonce, in, itcnt, out) == -1) { + rktrans, &salt, server_nonce, in, itcnt, out) == -1) { rd_snprintf(errstr, errstr_size, "Failed to build SCRAM client-final-message"); rd_free(salt.ptr); @@ -659,7 +634,7 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, /** * @brief Handle server-final-message - * + * * This is the end of authentication and the SCRAM state * will be freed at the end of this function regardless of * authentication outcome. @@ -667,16 +642,16 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, * @returns -1 on failure */ static int -rd_kafka_sasl_scram_handle_server_final_message ( - rd_kafka_transport_t *rktrans, - const rd_chariov_t *in, - char *errstr, size_t errstr_size) { +rd_kafka_sasl_scram_handle_server_final_message(rd_kafka_transport_t *rktrans, + const rd_chariov_t *in, + char *errstr, + size_t errstr_size) { struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; char *attr_v, *attr_e; if ((attr_e = rd_kafka_sasl_scram_get_attr( - in, 'e', "server-error in server-final-message", - errstr, errstr_size))) { + in, 'e', "server-error in server-final-message", errstr, + errstr_size))) { /* Authentication failed */ rd_snprintf(errstr, errstr_size, @@ -687,8 +662,8 @@ rd_kafka_sasl_scram_handle_server_final_message ( return -1; } else if ((attr_v = rd_kafka_sasl_scram_get_attr( - in, 'v', "verifier in server-final-message", - errstr, errstr_size))) { + in, 'v', "verifier in server-final-message", errstr, + errstr_size))) { const rd_kafka_conf_t *conf; /* Authentication succesful on server, @@ -712,10 +687,8 @@ rd_kafka_sasl_scram_handle_server_final_message ( conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER, - "SCRAMAUTH", - "Authenticated as %s using %s", - conf->sasl.username, - conf->sasl.mechanisms); + "SCRAMAUTH", "Authenticated as %s using %s", + conf->sasl.username, conf->sasl.mechanisms); rd_kafka_sasl_auth_done(rktrans); return 0; @@ -734,9 +707,8 @@ rd_kafka_sasl_scram_handle_server_final_message ( * @brief Build client-first-message */ static void -rd_kafka_sasl_scram_build_client_first_message ( - rd_kafka_transport_t *rktrans, - rd_chariov_t *out) { +rd_kafka_sasl_scram_build_client_first_message(rd_kafka_transport_t *rktrans, + rd_chariov_t *out) { char *sasl_username; struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; const rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; @@ -745,20 +717,18 @@ rd_kafka_sasl_scram_build_client_first_message ( sasl_username = rd_kafka_sasl_safe_string(conf->sasl.username); - out->size = strlen("n,,n=,r=") + strlen(sasl_username) + - state->cnonce.size; - out->ptr = rd_malloc(out->size+1); + out->size = + strlen("n,,n=,r=") + strlen(sasl_username) + state->cnonce.size; + out->ptr = rd_malloc(out->size + 1); - rd_snprintf(out->ptr, out->size+1, - "n,,n=%s,r=%.*s", - sasl_username, + rd_snprintf(out->ptr, out->size + 1, "n,,n=%s,r=%.*s", sasl_username, (int)state->cnonce.size, state->cnonce.ptr); rd_free(sasl_username); /* Save client-first-message-bare (skip gs2-header) */ - state->first_msg_bare.size = out->size-3; - state->first_msg_bare.ptr = rd_memdup(out->ptr+3, - state->first_msg_bare.size); + state->first_msg_bare.size = out->size - 3; + state->first_msg_bare.ptr = + rd_memdup(out->ptr + 3, state->first_msg_bare.size); } @@ -767,26 +737,25 @@ rd_kafka_sasl_scram_build_client_first_message ( * @brief SASL SCRAM client state machine * @returns -1 on failure (errstr set), else 0. */ -static int rd_kafka_sasl_scram_fsm (rd_kafka_transport_t *rktrans, - const rd_chariov_t *in, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_scram_fsm(rd_kafka_transport_t *rktrans, + const rd_chariov_t *in, + char *errstr, + size_t errstr_size) { static const char *state_names[] = { - "client-first-message", - "server-first-message", - "client-final-message", + "client-first-message", + "server-first-message", + "client-final-message", }; struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; - rd_chariov_t out = RD_ZERO_INIT; - int r = -1; - rd_ts_t ts_start = rd_clock(); - int prev_state = state->state; + rd_chariov_t out = RD_ZERO_INIT; + int r = -1; + rd_ts_t ts_start = rd_clock(); + int prev_state = state->state; rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLSCRAM", - "SASL SCRAM client in state %s", - state_names[state->state]); + "SASL SCRAM client in state %s", state_names[state->state]); - switch (state->state) - { + switch (state->state) { case RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE: rd_dassert(!in); /* Not expecting any server-input */ @@ -799,30 +768,30 @@ static int rd_kafka_sasl_scram_fsm (rd_kafka_transport_t *rktrans, rd_dassert(in); /* Requires server-input */ if (rd_kafka_sasl_scram_handle_server_first_message( - rktrans, in, &out, errstr, errstr_size) == -1) + rktrans, in, &out, errstr, errstr_size) == -1) return -1; state->state = RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE; break; case RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE: - rd_dassert(in); /* Requires server-input */ + rd_dassert(in); /* Requires server-input */ r = rd_kafka_sasl_scram_handle_server_final_message( - rktrans, in, errstr, errstr_size); + rktrans, in, errstr, errstr_size); break; } if (out.ptr) { - r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size, - errstr, errstr_size); + r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size, errstr, + errstr_size); rd_free(out.ptr); } ts_start = (rd_clock() - ts_start) / 1000; if (ts_start >= 100) rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM", - "SASL SCRAM state %s handled in %"PRId64"ms", + "SASL SCRAM state %s handled in %" PRId64 "ms", state_names[prev_state], ts_start); @@ -833,10 +802,12 @@ static int rd_kafka_sasl_scram_fsm (rd_kafka_transport_t *rktrans, /** * @brief Handle received frame from broker. */ -static int rd_kafka_sasl_scram_recv (rd_kafka_transport_t *rktrans, - const void *buf, size_t size, - char *errstr, size_t errstr_size) { - const rd_chariov_t in = { .ptr = (char *)buf, .size = size }; +static int rd_kafka_sasl_scram_recv(rd_kafka_transport_t *rktrans, + const void *buf, + size_t size, + char *errstr, + size_t errstr_size) { + const rd_chariov_t in = {.ptr = (char *)buf, .size = size}; return rd_kafka_sasl_scram_fsm(rktrans, &in, errstr, errstr_size); } @@ -848,12 +819,13 @@ static int rd_kafka_sasl_scram_recv (rd_kafka_transport_t *rktrans, * * @locality broker thread */ -static int rd_kafka_sasl_scram_client_new (rd_kafka_transport_t *rktrans, - const char *hostname, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_scram_client_new(rd_kafka_transport_t *rktrans, + const char *hostname, + char *errstr, + size_t errstr_size) { struct rd_kafka_sasl_scram_state *state; - state = rd_calloc(1, sizeof(*state)); + state = rd_calloc(1, sizeof(*state)); state->state = RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE; rktrans->rktrans_sasl.state = state; @@ -866,9 +838,9 @@ static int rd_kafka_sasl_scram_client_new (rd_kafka_transport_t *rktrans, /** * @brief Validate SCRAM config and look up the hash function */ -static int rd_kafka_sasl_scram_conf_validate (rd_kafka_t *rk, - char *errstr, - size_t errstr_size) { +static int rd_kafka_sasl_scram_conf_validate(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { const char *mech = rk->rk_conf.sasl.mechanisms; if (!rk->rk_conf.sasl.username || !rk->rk_conf.sasl.password) { @@ -878,16 +850,16 @@ static int rd_kafka_sasl_scram_conf_validate (rd_kafka_t *rk, } if (!strcmp(mech, "SCRAM-SHA-1")) { - rk->rk_conf.sasl.scram_evp = EVP_sha1(); - rk->rk_conf.sasl.scram_H = SHA1; + rk->rk_conf.sasl.scram_evp = EVP_sha1(); + rk->rk_conf.sasl.scram_H = SHA1; rk->rk_conf.sasl.scram_H_size = SHA_DIGEST_LENGTH; } else if (!strcmp(mech, "SCRAM-SHA-256")) { - rk->rk_conf.sasl.scram_evp = EVP_sha256(); - rk->rk_conf.sasl.scram_H = SHA256; + rk->rk_conf.sasl.scram_evp = EVP_sha256(); + rk->rk_conf.sasl.scram_H = SHA256; rk->rk_conf.sasl.scram_H_size = SHA256_DIGEST_LENGTH; } else if (!strcmp(mech, "SCRAM-SHA-512")) { - rk->rk_conf.sasl.scram_evp = EVP_sha512(); - rk->rk_conf.sasl.scram_H = SHA512; + rk->rk_conf.sasl.scram_evp = EVP_sha512(); + rk->rk_conf.sasl.scram_H = SHA512; rk->rk_conf.sasl.scram_H_size = SHA512_DIGEST_LENGTH; } else { rd_snprintf(errstr, errstr_size, @@ -902,13 +874,12 @@ static int rd_kafka_sasl_scram_conf_validate (rd_kafka_t *rk, - const struct rd_kafka_sasl_provider rd_kafka_sasl_scram_provider = { - .name = "SCRAM (builtin)", - .client_new = rd_kafka_sasl_scram_client_new, - .recv = rd_kafka_sasl_scram_recv, - .close = rd_kafka_sasl_scram_close, - .conf_validate = rd_kafka_sasl_scram_conf_validate, + .name = "SCRAM (builtin)", + .client_new = rd_kafka_sasl_scram_client_new, + .recv = rd_kafka_sasl_scram_recv, + .close = rd_kafka_sasl_scram_close, + .conf_validate = rd_kafka_sasl_scram_conf_validate, }; @@ -920,7 +891,7 @@ const struct rd_kafka_sasl_provider rd_kafka_sasl_scram_provider = { /** * @brief Verify that a random nonce is generated. */ -static int unittest_scram_nonce (void) { +static int unittest_scram_nonce(void) { rd_chariov_t out1 = RD_ZERO_INIT; rd_chariov_t out2 = RD_ZERO_INIT; @@ -945,29 +916,28 @@ static int unittest_scram_nonce (void) { * Needs to be run with ASAN (which is done in release-tests) for * proper verification. */ -static int unittest_scram_safe (void) { +static int unittest_scram_safe(void) { const char *inout[] = { - "just a string", - "just a string", + "just a string", + "just a string", - "another,one,that,needs=escaping!", - "another=2Cone=2Cthat=2Cneeds=3Descaping!", + "another,one,that,needs=escaping!", + "another=2Cone=2Cthat=2Cneeds=3Descaping!", - "overflow?============================", - "overflow?=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D" - "=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D", + "overflow?============================", + "overflow?=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D" + "=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D", - "=3D=3D=3D the mind boggles", - "=3D3D=3D3D=3D3D the mind boggles", + "=3D=3D=3D the mind boggles", + "=3D3D=3D3D=3D3D the mind boggles", - NULL, - NULL - }; + NULL, + NULL}; int i; - for (i = 0 ; inout[i] ; i += 2) { - char *out = rd_kafka_sasl_safe_string(inout[i]); - const char *expected = inout[i+1]; + for (i = 0; inout[i]; i += 2) { + char *out = rd_kafka_sasl_safe_string(inout[i]); + const char *expected = inout[i + 1]; RD_UT_ASSERT(!strcmp(out, expected), "Expected sasl_safe_string(%s) => %s, not %s\n", @@ -980,7 +950,7 @@ static int unittest_scram_safe (void) { } -int unittest_scram (void) { +int unittest_scram(void) { int fails = 0; fails += unittest_scram_nonce(); diff --git a/src/rdkafka_sasl_win32.c b/src/rdkafka_sasl_win32.c index 06e198560d..b07e1808d0 100644 --- a/src/rdkafka_sasl_win32.c +++ b/src/rdkafka_sasl_win32.c @@ -47,12 +47,12 @@ #include -#define RD_KAFKA_SASL_SSPI_CTX_ATTRS \ - (ISC_REQ_CONFIDENTIALITY | ISC_REQ_REPLAY_DETECT | \ - ISC_REQ_SEQUENCE_DETECT | ISC_REQ_CONNECTION) +#define RD_KAFKA_SASL_SSPI_CTX_ATTRS \ + (ISC_REQ_CONFIDENTIALITY | ISC_REQ_REPLAY_DETECT | \ + ISC_REQ_SEQUENCE_DETECT | ISC_REQ_CONNECTION) - /* Default maximum kerberos token size for newer versions of Windows */ +/* Default maximum kerberos token size for newer versions of Windows */ #define RD_KAFKA_SSPI_MAX_TOKEN_SIZE 48000 @@ -62,42 +62,41 @@ typedef struct rd_kafka_sasl_win32_state_s { CredHandle *cred; CtxtHandle *ctx; - wchar_t principal[512]; /* Broker service principal and hostname */ + wchar_t principal[512]; /* Broker service principal and hostname */ } rd_kafka_sasl_win32_state_t; /** * @returns the string representation of a SECURITY_STATUS error code */ -static const char *rd_kafka_sasl_sspi_err2str (SECURITY_STATUS sr) { - switch (sr) - { - case SEC_E_INSUFFICIENT_MEMORY: - return "Insufficient memory"; - case SEC_E_INTERNAL_ERROR: - return "Internal error"; - case SEC_E_INVALID_HANDLE: - return "Invalid handle"; - case SEC_E_INVALID_TOKEN: - return "Invalid token"; - case SEC_E_LOGON_DENIED: - return "Logon denied"; - case SEC_E_NO_AUTHENTICATING_AUTHORITY: - return "No authority could be contacted for authentication."; - case SEC_E_NO_CREDENTIALS: - return "No credentials"; - case SEC_E_TARGET_UNKNOWN: - return "Target unknown"; - case SEC_E_UNSUPPORTED_FUNCTION: - return "Unsupported functionality"; - case SEC_E_WRONG_CREDENTIAL_HANDLE: - return "The principal that received the authentication " - "request is not the same as the one passed " - "into the pszTargetName parameter. " - "This indicates a failure in mutual " - "authentication."; - default: - return "(no string representation)"; +static const char *rd_kafka_sasl_sspi_err2str(SECURITY_STATUS sr) { + switch (sr) { + case SEC_E_INSUFFICIENT_MEMORY: + return "Insufficient memory"; + case SEC_E_INTERNAL_ERROR: + return "Internal error"; + case SEC_E_INVALID_HANDLE: + return "Invalid handle"; + case SEC_E_INVALID_TOKEN: + return "Invalid token"; + case SEC_E_LOGON_DENIED: + return "Logon denied"; + case SEC_E_NO_AUTHENTICATING_AUTHORITY: + return "No authority could be contacted for authentication."; + case SEC_E_NO_CREDENTIALS: + return "No credentials"; + case SEC_E_TARGET_UNKNOWN: + return "Target unknown"; + case SEC_E_UNSUPPORTED_FUNCTION: + return "Unsupported functionality"; + case SEC_E_WRONG_CREDENTIAL_HANDLE: + return "The principal that received the authentication " + "request is not the same as the one passed " + "into the pszTargetName parameter. " + "This indicates a failure in mutual " + "authentication."; + default: + return "(no string representation)"; } } @@ -105,22 +104,23 @@ static const char *rd_kafka_sasl_sspi_err2str (SECURITY_STATUS sr) { /** * @brief Create new CredHandle */ -static CredHandle * -rd_kafka_sasl_sspi_cred_new (rd_kafka_transport_t *rktrans, - char *errstr, size_t errstr_size) { - TimeStamp expiry = { 0, 0 }; +static CredHandle *rd_kafka_sasl_sspi_cred_new(rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size) { + TimeStamp expiry = {0, 0}; SECURITY_STATUS sr; CredHandle *cred = rd_calloc(1, sizeof(*cred)); - sr = AcquireCredentialsHandle( - NULL, __TEXT("Kerberos"), SECPKG_CRED_OUTBOUND, - NULL, NULL, NULL, NULL, cred, &expiry); + sr = AcquireCredentialsHandle(NULL, __TEXT("Kerberos"), + SECPKG_CRED_OUTBOUND, NULL, NULL, NULL, + NULL, cred, &expiry); if (sr != SEC_E_OK) { rd_free(cred); rd_snprintf(errstr, errstr_size, "Failed to acquire CredentialsHandle: " - "error code %d", sr); + "error code %d", + sr); return NULL; } @@ -133,16 +133,18 @@ rd_kafka_sasl_sspi_cred_new (rd_kafka_transport_t *rktrans, /** - * @brief Start or continue SSPI-based authentication processing. - */ -static int rd_kafka_sasl_sspi_continue (rd_kafka_transport_t *rktrans, - const void *inbuf, size_t insize, - char *errstr, size_t errstr_size) { + * @brief Start or continue SSPI-based authentication processing. + */ +static int rd_kafka_sasl_sspi_continue(rd_kafka_transport_t *rktrans, + const void *inbuf, + size_t insize, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state; SecBufferDesc outbufdesc, inbufdesc; SecBuffer outsecbuf, insecbuf; BYTE outbuf[RD_KAFKA_SSPI_MAX_TOKEN_SIZE]; - TimeStamp lifespan = { 0, 0 }; + TimeStamp lifespan = {0, 0}; ULONG ret_ctxattrs; CtxtHandle *ctx; SECURITY_STATUS sr; @@ -150,13 +152,15 @@ static int rd_kafka_sasl_sspi_continue (rd_kafka_transport_t *rktrans, if (inbuf) { if (insize > ULONG_MAX) { rd_snprintf(errstr, errstr_size, - "Input buffer length too large (%"PRIusz") " - "and would overflow", insize); + "Input buffer length too large (%" PRIusz + ") " + "and would overflow", + insize); return -1; } inbufdesc.ulVersion = SECBUFFER_VERSION; - inbufdesc.cBuffers = 1; + inbufdesc.cBuffers = 1; inbufdesc.pBuffers = &insecbuf; insecbuf.cbBuffer = (unsigned long)insize; @@ -179,48 +183,46 @@ static int rd_kafka_sasl_sspi_continue (rd_kafka_transport_t *rktrans, } sr = InitializeSecurityContext( - state->cred, state->ctx, state->principal, - RD_KAFKA_SASL_SSPI_CTX_ATTRS | + state->cred, state->ctx, state->principal, + RD_KAFKA_SASL_SSPI_CTX_ATTRS | (state->ctx ? 0 : ISC_REQ_MUTUAL_AUTH | ISC_REQ_IDENTIFY), - 0, SECURITY_NATIVE_DREP, - inbuf ? &inbufdesc : NULL, - 0, ctx, &outbufdesc, &ret_ctxattrs, &lifespan); + 0, SECURITY_NATIVE_DREP, inbuf ? &inbufdesc : NULL, 0, ctx, + &outbufdesc, &ret_ctxattrs, &lifespan); if (!state->ctx) state->ctx = ctx; - switch (sr) - { - case SEC_E_OK: - rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH", - "Initialized security context"); - - rktrans->rktrans_sasl.complete = 1; - break; - case SEC_I_CONTINUE_NEEDED: - break; - case SEC_I_COMPLETE_NEEDED: - case SEC_I_COMPLETE_AND_CONTINUE: - rd_snprintf(errstr, errstr_size, - "CompleteAuthToken (Digest auth, %d) " - "not implemented", sr); - return -1; - case SEC_I_INCOMPLETE_CREDENTIALS: - rd_snprintf(errstr, errstr_size, - "Incomplete credentials: " - "invalid or untrusted certificate"); - return -1; - default: - rd_snprintf(errstr, errstr_size, - "InitializeSecurityContext " - "failed: %s (0x%x)", - rd_kafka_sasl_sspi_err2str(sr), sr); - return -1; + switch (sr) { + case SEC_E_OK: + rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH", + "Initialized security context"); + + rktrans->rktrans_sasl.complete = 1; + break; + case SEC_I_CONTINUE_NEEDED: + break; + case SEC_I_COMPLETE_NEEDED: + case SEC_I_COMPLETE_AND_CONTINUE: + rd_snprintf(errstr, errstr_size, + "CompleteAuthToken (Digest auth, %d) " + "not implemented", + sr); + return -1; + case SEC_I_INCOMPLETE_CREDENTIALS: + rd_snprintf(errstr, errstr_size, + "Incomplete credentials: " + "invalid or untrusted certificate"); + return -1; + default: + rd_snprintf(errstr, errstr_size, + "InitializeSecurityContext " + "failed: %s (0x%x)", + rd_kafka_sasl_sspi_err2str(sr), sr); + return -1; } - if (rd_kafka_sasl_send(rktrans, - outsecbuf.pvBuffer, outsecbuf.cbBuffer, - errstr, errstr_size) == -1) + if (rd_kafka_sasl_send(rktrans, outsecbuf.pvBuffer, outsecbuf.cbBuffer, + errstr, errstr_size) == -1) return -1; return 0; @@ -228,12 +230,12 @@ static int rd_kafka_sasl_sspi_continue (rd_kafka_transport_t *rktrans, /** -* @brief Sends the token response to the broker -*/ -static int rd_kafka_sasl_win32_send_response (rd_kafka_transport_t *rktrans, - char *errstr, - size_t errstr_size, - SecBuffer *server_token) { + * @brief Sends the token response to the broker + */ +static int rd_kafka_sasl_win32_send_response(rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size, + SecBuffer *server_token) { rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state; SECURITY_STATUS sr; SecBuffer in_buffer; @@ -270,7 +272,8 @@ static int rd_kafka_sasl_win32_send_response (rd_kafka_transport_t *rktrans, namelen = strlen(names.sUserName) + 1; if (namelen > ULONG_MAX) { rd_snprintf(errstr, errstr_size, - "User name length too large (%"PRIusz") " + "User name length too large (%" PRIusz + ") " "and would overflow"); return -1; } @@ -278,31 +281,32 @@ static int rd_kafka_sasl_win32_send_response (rd_kafka_transport_t *rktrans, in_buffer.pvBuffer = (char *)names.sUserName; in_buffer.cbBuffer = (unsigned long)namelen; - buffer_desc.cBuffers = 4; - buffer_desc.pBuffers = buffers; + buffer_desc.cBuffers = 4; + buffer_desc.pBuffers = buffers; buffer_desc.ulVersion = SECBUFFER_VERSION; /* security trailer */ - buffers[0].cbBuffer = sizes.cbSecurityTrailer; + buffers[0].cbBuffer = sizes.cbSecurityTrailer; buffers[0].BufferType = SECBUFFER_TOKEN; - buffers[0].pvBuffer = rd_calloc(1, sizes.cbSecurityTrailer); + buffers[0].pvBuffer = rd_calloc(1, sizes.cbSecurityTrailer); /* protection level and buffer size received from the server */ - buffers[1].cbBuffer = server_token->cbBuffer; + buffers[1].cbBuffer = server_token->cbBuffer; buffers[1].BufferType = SECBUFFER_DATA; - buffers[1].pvBuffer = rd_calloc(1, server_token->cbBuffer); - memcpy(buffers[1].pvBuffer, server_token->pvBuffer, server_token->cbBuffer); + buffers[1].pvBuffer = rd_calloc(1, server_token->cbBuffer); + memcpy(buffers[1].pvBuffer, server_token->pvBuffer, + server_token->cbBuffer); /* user principal */ - buffers[2].cbBuffer = in_buffer.cbBuffer; + buffers[2].cbBuffer = in_buffer.cbBuffer; buffers[2].BufferType = SECBUFFER_DATA; - buffers[2].pvBuffer = rd_calloc(1, buffers[2].cbBuffer); + buffers[2].pvBuffer = rd_calloc(1, buffers[2].cbBuffer); memcpy(buffers[2].pvBuffer, in_buffer.pvBuffer, in_buffer.cbBuffer); /* padding */ - buffers[3].cbBuffer = sizes.cbBlockSize; + buffers[3].cbBuffer = sizes.cbBlockSize; buffers[3].BufferType = SECBUFFER_PADDING; - buffers[3].pvBuffer = rd_calloc(1, buffers[2].cbBuffer); + buffers[3].pvBuffer = rd_calloc(1, buffers[2].cbBuffer); sr = EncryptMessage(state->ctx, KERB_WRAP_NO_ENCRYPT, &buffer_desc, 0); if (sr != SEC_E_OK) { @@ -318,33 +322,29 @@ static int rd_kafka_sasl_win32_send_response (rd_kafka_transport_t *rktrans, return -1; } - out_buffer.cbBuffer = buffers[0].cbBuffer + - buffers[1].cbBuffer + - buffers[2].cbBuffer + - buffers[3].cbBuffer; + out_buffer.cbBuffer = buffers[0].cbBuffer + buffers[1].cbBuffer + + buffers[2].cbBuffer + buffers[3].cbBuffer; - out_buffer.pvBuffer = rd_calloc(1, buffers[0].cbBuffer + - buffers[1].cbBuffer + - buffers[2].cbBuffer + - buffers[3].cbBuffer); + out_buffer.pvBuffer = + rd_calloc(1, buffers[0].cbBuffer + buffers[1].cbBuffer + + buffers[2].cbBuffer + buffers[3].cbBuffer); memcpy(out_buffer.pvBuffer, buffers[0].pvBuffer, buffers[0].cbBuffer); memcpy((unsigned char *)out_buffer.pvBuffer + (int)buffers[0].cbBuffer, buffers[1].pvBuffer, buffers[1].cbBuffer); - memcpy((unsigned char *)out_buffer.pvBuffer + - buffers[0].cbBuffer + buffers[1].cbBuffer, - buffers[2].pvBuffer, buffers[2].cbBuffer); + memcpy((unsigned char *)out_buffer.pvBuffer + buffers[0].cbBuffer + + buffers[1].cbBuffer, + buffers[2].pvBuffer, buffers[2].cbBuffer); - memcpy((unsigned char *)out_buffer.pvBuffer + - buffers[0].cbBuffer + buffers[1].cbBuffer + buffers[2].cbBuffer, - buffers[3].pvBuffer, buffers[3].cbBuffer); + memcpy((unsigned char *)out_buffer.pvBuffer + buffers[0].cbBuffer + + buffers[1].cbBuffer + buffers[2].cbBuffer, + buffers[3].pvBuffer, buffers[3].cbBuffer); - send_response = rd_kafka_sasl_send(rktrans, - out_buffer.pvBuffer, - out_buffer.cbBuffer, - errstr, errstr_size); + send_response = + rd_kafka_sasl_send(rktrans, out_buffer.pvBuffer, + out_buffer.cbBuffer, errstr, errstr_size); FreeContextBuffer(in_buffer.pvBuffer); rd_free(out_buffer.pvBuffer); @@ -358,13 +358,13 @@ static int rd_kafka_sasl_win32_send_response (rd_kafka_transport_t *rktrans, /** -* @brief Unwrap and validate token response from broker. -*/ -static int rd_kafka_sasl_win32_validate_token (rd_kafka_transport_t *rktrans, - const void *inbuf, - size_t insize, - char *errstr, - size_t errstr_size) { + * @brief Unwrap and validate token response from broker. + */ +static int rd_kafka_sasl_win32_validate_token(rd_kafka_transport_t *rktrans, + const void *inbuf, + size_t insize, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state; SecBuffer buffers[2]; SecBufferDesc buffer_desc; @@ -373,22 +373,23 @@ static int rd_kafka_sasl_win32_validate_token (rd_kafka_transport_t *rktrans, if (insize > ULONG_MAX) { rd_snprintf(errstr, errstr_size, - "Input buffer length too large (%"PRIusz") " + "Input buffer length too large (%" PRIusz + ") " "and would overflow"); return -1; } - buffer_desc.cBuffers = 2; - buffer_desc.pBuffers = buffers; + buffer_desc.cBuffers = 2; + buffer_desc.pBuffers = buffers; buffer_desc.ulVersion = SECBUFFER_VERSION; - buffers[0].cbBuffer = (unsigned long)insize; + buffers[0].cbBuffer = (unsigned long)insize; buffers[0].BufferType = SECBUFFER_STREAM; - buffers[0].pvBuffer = (void *)inbuf; + buffers[0].pvBuffer = (void *)inbuf; - buffers[1].cbBuffer = 0; + buffers[1].cbBuffer = 0; buffers[1].BufferType = SECBUFFER_DATA; - buffers[1].pvBuffer = NULL; + buffers[1].pvBuffer = NULL; sr = DecryptMessage(state->ctx, &buffer_desc, 0, NULL); if (sr != SEC_E_OK) { @@ -416,17 +417,19 @@ static int rd_kafka_sasl_win32_validate_token (rd_kafka_transport_t *rktrans, rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH", "Validated server token"); - return rd_kafka_sasl_win32_send_response(rktrans, errstr, - errstr_size, &buffers[1]); + return rd_kafka_sasl_win32_send_response(rktrans, errstr, errstr_size, + &buffers[1]); } /** -* @brief Handle SASL frame received from broker. -*/ -static int rd_kafka_sasl_win32_recv (struct rd_kafka_transport_s *rktrans, - const void *buf, size_t size, - char *errstr, size_t errstr_size) { + * @brief Handle SASL frame received from broker. + */ +static int rd_kafka_sasl_win32_recv(struct rd_kafka_transport_s *rktrans, + const void *buf, + size_t size, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state; if (rktrans->rktrans_sasl.complete) { @@ -442,7 +445,7 @@ static int rd_kafka_sasl_win32_recv (struct rd_kafka_transport_s *rktrans, int r; r = rd_kafka_sasl_win32_validate_token( - rktrans, buf, size, errstr, errstr_size); + rktrans, buf, size, errstr, errstr_size); if (r == -1) { rktrans->rktrans_sasl.complete = 0; @@ -464,15 +467,15 @@ static int rd_kafka_sasl_win32_recv (struct rd_kafka_transport_s *rktrans, return 0; } - return rd_kafka_sasl_sspi_continue(rktrans, buf, size, - errstr, errstr_size); + return rd_kafka_sasl_sspi_continue(rktrans, buf, size, errstr, + errstr_size); } /** -* @brief Decommission SSPI state -*/ -static void rd_kafka_sasl_win32_close (rd_kafka_transport_t *rktrans) { + * @brief Decommission SSPI state + */ +static void rd_kafka_sasl_win32_close(rd_kafka_transport_t *rktrans) { rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state; if (!state) @@ -490,9 +493,10 @@ static void rd_kafka_sasl_win32_close (rd_kafka_transport_t *rktrans) { } -static int rd_kafka_sasl_win32_client_new (rd_kafka_transport_t *rktrans, - const char *hostname, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_win32_client_new(rd_kafka_transport_t *rktrans, + const char *hostname, + char *errstr, + size_t errstr_size) { rd_kafka_t *rk = rktrans->rktrans_rkb->rkb_rk; rd_kafka_sasl_win32_state_t *state; @@ -503,21 +507,19 @@ static int rd_kafka_sasl_win32_client_new (rd_kafka_transport_t *rktrans, return -1; } - state = rd_calloc(1, sizeof(*state)); + state = rd_calloc(1, sizeof(*state)); rktrans->rktrans_sasl.state = state; - _snwprintf(state->principal, RD_ARRAYSIZE(state->principal), - L"%hs/%hs", + _snwprintf(state->principal, RD_ARRAYSIZE(state->principal), L"%hs/%hs", rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.service_name, hostname); - state->cred = rd_kafka_sasl_sspi_cred_new(rktrans, errstr, - errstr_size); + state->cred = rd_kafka_sasl_sspi_cred_new(rktrans, errstr, errstr_size); if (!state->cred) return -1; - if (rd_kafka_sasl_sspi_continue(rktrans, NULL, 0, - errstr, errstr_size) == -1) + if (rd_kafka_sasl_sspi_continue(rktrans, NULL, 0, errstr, + errstr_size) == -1) return -1; return 0; @@ -526,9 +528,9 @@ static int rd_kafka_sasl_win32_client_new (rd_kafka_transport_t *rktrans, /** * @brief Validate config */ -static int rd_kafka_sasl_win32_conf_validate (rd_kafka_t *rk, - char *errstr, - size_t errstr_size) { +static int rd_kafka_sasl_win32_conf_validate(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { if (!rk->rk_conf.sasl.service_name) { rd_snprintf(errstr, errstr_size, "sasl.kerberos.service.name must be set"); @@ -539,9 +541,8 @@ static int rd_kafka_sasl_win32_conf_validate (rd_kafka_t *rk, } const struct rd_kafka_sasl_provider rd_kafka_sasl_win32_provider = { - .name = "Win32 SSPI", - .client_new = rd_kafka_sasl_win32_client_new, - .recv = rd_kafka_sasl_win32_recv, - .close = rd_kafka_sasl_win32_close, - .conf_validate = rd_kafka_sasl_win32_conf_validate -}; + .name = "Win32 SSPI", + .client_new = rd_kafka_sasl_win32_client_new, + .recv = rd_kafka_sasl_win32_recv, + .close = rd_kafka_sasl_win32_close, + .conf_validate = rd_kafka_sasl_win32_conf_validate}; diff --git a/src/rdkafka_ssl.c b/src/rdkafka_ssl.c index 9d1f2d0a2b..2d5e138aa2 100644 --- a/src/rdkafka_ssl.c +++ b/src/rdkafka_ssl.c @@ -38,9 +38,9 @@ #ifdef _WIN32 #include -#pragma comment (lib, "crypt32.lib") -#pragma comment (lib, "libcrypto.lib") -#pragma comment (lib, "libssl.lib") +#pragma comment(lib, "crypt32.lib") +#pragma comment(lib, "libcrypto.lib") +#pragma comment(lib, "libssl.lib") #endif #include @@ -60,20 +60,20 @@ * We use in-code Valgrind macros to suppress those warnings. */ #include #else -#define VALGRIND_MAKE_MEM_DEFINED(A,B) +#define VALGRIND_MAKE_MEM_DEFINED(A, B) #endif #if OPENSSL_VERSION_NUMBER < 0x10100000L static mtx_t *rd_kafka_ssl_locks; -static int rd_kafka_ssl_locks_cnt; +static int rd_kafka_ssl_locks_cnt; #endif /** * @brief Close and destroy SSL session */ -void rd_kafka_transport_ssl_close (rd_kafka_transport_t *rktrans) { +void rd_kafka_transport_ssl_close(rd_kafka_transport_t *rktrans) { SSL_shutdown(rktrans->rktrans_ssl); SSL_free(rktrans->rktrans_ssl); rktrans->rktrans_ssl = NULL; @@ -85,7 +85,7 @@ void rd_kafka_transport_ssl_close (rd_kafka_transport_t *rktrans) { * the next SSL_*() operation fails. */ static RD_INLINE void -rd_kafka_transport_ssl_clear_error (rd_kafka_transport_t *rktrans) { +rd_kafka_transport_ssl_clear_error(rd_kafka_transport_t *rktrans) { ERR_clear_error(); #ifdef _WIN32 WSASetLastError(0); @@ -99,25 +99,22 @@ rd_kafka_transport_ssl_clear_error (rd_kafka_transport_t *rktrans) { * the last thread-local error in OpenSSL, or an empty string * if no error. */ -const char *rd_kafka_ssl_last_error_str (void) { +const char *rd_kafka_ssl_last_error_str(void) { static RD_TLS char errstr[256]; unsigned long l; const char *file, *data; int line, flags; - l = ERR_peek_last_error_line_data(&file, &line, - &data, &flags); + l = ERR_peek_last_error_line_data(&file, &line, &data, &flags); if (!l) return ""; - rd_snprintf(errstr, sizeof(errstr), - "%lu:%s:%s:%s:%d: %s", - l, - ERR_lib_error_string(l), - ERR_func_error_string(l), - file, line, - ((flags & ERR_TXT_STRING) && data && *data) ? - data : ERR_reason_error_string(l)); + rd_snprintf(errstr, sizeof(errstr), "%lu:%s:%s:%s:%d: %s", l, + ERR_lib_error_string(l), ERR_func_error_string(l), file, + line, + ((flags & ERR_TXT_STRING) && data && *data) + ? data + : ERR_reason_error_string(l)); return errstr; } @@ -129,8 +126,10 @@ const char *rd_kafka_ssl_last_error_str (void) { * If 'rkb' is non-NULL broker-specific logging will be used, * else it will fall back on global 'rk' debugging. */ -static char *rd_kafka_ssl_error (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - char *errstr, size_t errstr_size) { +static char *rd_kafka_ssl_error(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + char *errstr, + size_t errstr_size) { unsigned long l; const char *file, *data; int line, flags; @@ -141,8 +140,8 @@ static char *rd_kafka_ssl_error (rd_kafka_t *rk, rd_kafka_broker_t *rkb, rk = rkb->rkb_rk; } - while ((l = ERR_get_error_line_data(&file, &line, - &data, &flags)) != 0) { + while ((l = ERR_get_error_line_data(&file, &line, &data, &flags)) != + 0) { char buf[256]; if (cnt++ > 0) { @@ -160,15 +159,12 @@ static char *rd_kafka_ssl_error (rd_kafka_t *rk, rd_kafka_broker_t *rkb, /* Include openssl file:line if debugging is enabled */ if (rk->rk_conf.log_level >= LOG_DEBUG) - rd_snprintf(errstr, errstr_size, "%s:%d: %s%s%s", - file, line, buf, - data ? ": " : "", + rd_snprintf(errstr, errstr_size, "%s:%d: %s%s%s", file, + line, buf, data ? ": " : "", data ? data : ""); else - rd_snprintf(errstr, errstr_size, "%s%s%s", - buf, - data ? ": " : "", - data ? data : ""); + rd_snprintf(errstr, errstr_size, "%s%s%s", buf, + data ? ": " : "", data ? data : ""); } if (cnt == 0) @@ -188,13 +184,14 @@ static char *rd_kafka_ssl_error (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * Locality: broker thread */ static RD_INLINE int -rd_kafka_transport_ssl_io_update (rd_kafka_transport_t *rktrans, int ret, - char *errstr, size_t errstr_size) { +rd_kafka_transport_ssl_io_update(rd_kafka_transport_t *rktrans, + int ret, + char *errstr, + size_t errstr_size) { int serr = SSL_get_error(rktrans->rktrans_ssl, ret); int serr2; - switch (serr) - { + switch (serr) { case SSL_ERROR_WANT_READ: rd_kafka_transport_poll_set(rktrans, POLLIN); break; @@ -207,8 +204,8 @@ rd_kafka_transport_ssl_io_update (rd_kafka_transport_t *rktrans, int ret, case SSL_ERROR_SYSCALL: serr2 = ERR_peek_error(); if (serr2) - rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, - errstr, errstr_size); + rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, errstr, + errstr_size); else if (!rd_socket_errno || rd_socket_errno == ECONNRESET) rd_snprintf(errstr, errstr_size, "Disconnected"); else @@ -222,17 +219,18 @@ rd_kafka_transport_ssl_io_update (rd_kafka_transport_t *rktrans, int ret, return -1; default: - rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, - errstr, errstr_size); + rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, errstr, + errstr_size); return -1; } return 0; } -ssize_t rd_kafka_transport_ssl_send (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, - char *errstr, size_t errstr_size) { +ssize_t rd_kafka_transport_ssl_send(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size) { ssize_t sum = 0; const void *p; size_t rlen; @@ -246,8 +244,7 @@ ssize_t rd_kafka_transport_ssl_send (rd_kafka_transport_t *rktrans, r = SSL_write(rktrans->rktrans_ssl, p, (int)rlen); if (unlikely(r <= 0)) { - if (rd_kafka_transport_ssl_io_update(rktrans, r, - errstr, + if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr, errstr_size) == -1) return -1; else @@ -265,14 +262,14 @@ ssize_t rd_kafka_transport_ssl_send (rd_kafka_transport_t *rktrans, * the next SSL_write() call fail instead? */ if ((size_t)r < rlen) break; - } return sum; } -ssize_t rd_kafka_transport_ssl_recv (rd_kafka_transport_t *rktrans, - rd_buf_t *rbuf, - char *errstr, size_t errstr_size) { +ssize_t rd_kafka_transport_ssl_recv(rd_kafka_transport_t *rktrans, + rd_buf_t *rbuf, + char *errstr, + size_t errstr_size) { ssize_t sum = 0; void *p; size_t len; @@ -285,8 +282,7 @@ ssize_t rd_kafka_transport_ssl_recv (rd_kafka_transport_t *rktrans, r = SSL_read(rktrans->rktrans_ssl, p, (int)len); if (unlikely(r <= 0)) { - if (rd_kafka_transport_ssl_io_update(rktrans, r, - errstr, + if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr, errstr_size) == -1) return -1; else @@ -304,10 +300,8 @@ ssize_t rd_kafka_transport_ssl_recv (rd_kafka_transport_t *rktrans, * the next SSL_read() call fail instead? */ if ((size_t)r < len) break; - } return sum; - } @@ -316,8 +310,10 @@ ssize_t rd_kafka_transport_ssl_recv (rd_kafka_transport_t *rktrans, * * Locality: application thread */ -static int rd_kafka_transport_ssl_passwd_cb (char *buf, int size, int rwflag, - void *userdata) { +static int rd_kafka_transport_ssl_passwd_cb(char *buf, + int size, + int rwflag, + void *userdata) { rd_kafka_t *rk = userdata; int pwlen; @@ -332,7 +328,7 @@ static int rd_kafka_transport_ssl_passwd_cb (char *buf, int size, int rwflag, } - pwlen = (int) strlen(rk->rk_conf.ssl.key_password); + pwlen = (int)strlen(rk->rk_conf.ssl.key_password); memcpy(buf, rk->rk_conf.ssl.key_password, RD_MIN(pwlen, size)); return pwlen; @@ -348,23 +344,22 @@ static int rd_kafka_transport_ssl_passwd_cb (char *buf, int size, int rwflag, * * @sa SSL_CTX_set_verify() */ -static int -rd_kafka_transport_ssl_cert_verify_cb (int preverify_ok, - X509_STORE_CTX *x509_ctx) { +static int rd_kafka_transport_ssl_cert_verify_cb(int preverify_ok, + X509_STORE_CTX *x509_ctx) { rd_kafka_transport_t *rktrans = rd_kafka_curr_transport; rd_kafka_broker_t *rkb; rd_kafka_t *rk; X509 *cert; char *buf = NULL; - int buf_size; - int depth; - int x509_orig_error, x509_error; - char errstr[512]; - int ok; + int buf_size; + int depth; + int x509_orig_error, x509_error; + char errstr[512]; + int ok; rd_assert(rktrans != NULL); rkb = rktrans->rktrans_rkb; - rk = rkb->rkb_rk; + rk = rkb->rkb_rk; cert = X509_STORE_CTX_get_current_cert(x509_ctx); if (!cert) { @@ -387,14 +382,9 @@ rd_kafka_transport_ssl_cert_verify_cb (int preverify_ok, *errstr = '\0'; /* Call application's verification callback. */ - ok = rk->rk_conf.ssl.cert_verify_cb(rk, - rkb->rkb_nodename, - rkb->rkb_nodeid, - &x509_error, - depth, - buf, (size_t)buf_size, - errstr, sizeof(errstr), - rk->rk_conf.opaque); + ok = rk->rk_conf.ssl.cert_verify_cb( + rk, rkb->rkb_nodename, rkb->rkb_nodeid, &x509_error, depth, buf, + (size_t)buf_size, errstr, sizeof(errstr), rk->rk_conf.opaque); OPENSSL_free(buf); @@ -402,10 +392,10 @@ rd_kafka_transport_ssl_cert_verify_cb (int preverify_ok, char subject[128]; char issuer[128]; - X509_NAME_oneline(X509_get_subject_name(cert), - subject, sizeof(subject)); - X509_NAME_oneline(X509_get_issuer_name(cert), - issuer, sizeof(issuer)); + X509_NAME_oneline(X509_get_subject_name(cert), subject, + sizeof(subject)); + X509_NAME_oneline(X509_get_issuer_name(cert), issuer, + sizeof(issuer)); rd_rkb_log(rkb, LOG_ERR, "SSLCERTVRFY", "Certificate (subject=%s, issuer=%s) verification " "callback failed: %s", @@ -429,9 +419,9 @@ rd_kafka_transport_ssl_cert_verify_cb (int preverify_ok, * * @returns 0 on success or -1 on error. */ -static int -rd_kafka_transport_ssl_set_endpoint_id (rd_kafka_transport_t *rktrans, - char *errstr, size_t errstr_size) { +static int rd_kafka_transport_ssl_set_endpoint_id(rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size) { char name[RD_KAFKA_NODENAME_SIZE]; char *t; @@ -446,16 +436,16 @@ rd_kafka_transport_ssl_set_endpoint_id (rd_kafka_transport_t *rktrans, #if (OPENSSL_VERSION_NUMBER >= 0x0090806fL) && !defined(OPENSSL_NO_TLSEXT) /* If non-numerical hostname, send it for SNI */ - if (!(/*ipv6*/(strchr(name, ':') && - strspn(name, "0123456789abcdefABCDEF:.[]%") == - strlen(name)) || - /*ipv4*/strspn(name, "0123456789.") == strlen(name)) && + if (!(/*ipv6*/ (strchr(name, ':') && + strspn(name, "0123456789abcdefABCDEF:.[]%") == + strlen(name)) || + /*ipv4*/ strspn(name, "0123456789.") == strlen(name)) && !SSL_set_tlsext_host_name(rktrans->rktrans_ssl, name)) goto fail; #endif - if (rktrans->rktrans_rkb->rkb_rk->rk_conf. - ssl.endpoint_identification == RD_KAFKA_SSL_ENDPOINT_ID_NONE) + if (rktrans->rktrans_rkb->rkb_rk->rk_conf.ssl.endpoint_identification == + RD_KAFKA_SSL_ENDPOINT_ID_NONE) return 0; #if OPENSSL_VERSION_NUMBER >= 0x10100000 @@ -479,14 +469,12 @@ rd_kafka_transport_ssl_set_endpoint_id (rd_kafka_transport_t *rktrans, #endif rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "ENDPOINT", - "Enabled endpoint identification using hostname %s", - name); + "Enabled endpoint identification using hostname %s", name); return 0; - fail: - rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, - errstr, errstr_size); +fail: + rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, errstr, errstr_size); return -1; } @@ -496,9 +484,10 @@ rd_kafka_transport_ssl_set_endpoint_id (rd_kafka_transport_t *rktrans, * * @returns -1 on failure, else 0. */ -int rd_kafka_transport_ssl_connect (rd_kafka_broker_t *rkb, - rd_kafka_transport_t *rktrans, - char *errstr, size_t errstr_size) { +int rd_kafka_transport_ssl_connect(rd_kafka_broker_t *rkb, + rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size) { int r; rktrans->rktrans_ssl = SSL_new(rkb->rkb_rk->rk_conf.ssl.ctx); @@ -522,20 +511,20 @@ int rd_kafka_transport_ssl_connect (rd_kafka_broker_t *rkb, return 0; } - if (rd_kafka_transport_ssl_io_update(rktrans, r, - errstr, errstr_size) == -1) + if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr, errstr_size) == + -1) return -1; return 0; - fail: +fail: rd_kafka_ssl_error(NULL, rkb, errstr, errstr_size); return -1; } static RD_UNUSED int -rd_kafka_transport_ssl_io_event (rd_kafka_transport_t *rktrans, int events) { +rd_kafka_transport_ssl_io_event(rd_kafka_transport_t *rktrans, int events) { int r; char errstr[512]; @@ -543,19 +532,17 @@ rd_kafka_transport_ssl_io_event (rd_kafka_transport_t *rktrans, int events) { rd_kafka_transport_ssl_clear_error(rktrans); r = SSL_write(rktrans->rktrans_ssl, NULL, 0); - if (rd_kafka_transport_ssl_io_update(rktrans, r, - errstr, + if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr, sizeof(errstr)) == -1) goto fail; } return 0; - fail: +fail: /* Permanent error */ rd_kafka_broker_fail(rktrans->rktrans_rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__TRANSPORT, - "%s", errstr); + RD_KAFKA_RESP_ERR__TRANSPORT, "%s", errstr); return -1; } @@ -563,7 +550,7 @@ rd_kafka_transport_ssl_io_event (rd_kafka_transport_t *rktrans, int events) { /** * @brief Verify SSL handshake was valid. */ -static int rd_kafka_transport_ssl_verify (rd_kafka_transport_t *rktrans) { +static int rd_kafka_transport_ssl_verify(rd_kafka_transport_t *rktrans) { long int rl; X509 *cert; @@ -599,7 +586,7 @@ static int rd_kafka_transport_ssl_verify (rd_kafka_transport_t *rktrans) { * @returns -1 on error, 0 if handshake is still in progress, * or 1 on completion. */ -int rd_kafka_transport_ssl_handshake (rd_kafka_transport_t *rktrans) { +int rd_kafka_transport_ssl_handshake(rd_kafka_transport_t *rktrans) { rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; char errstr[512]; int r; @@ -613,34 +600,37 @@ int rd_kafka_transport_ssl_handshake (rd_kafka_transport_t *rktrans) { rd_kafka_transport_connect_done(rktrans, NULL); return 1; - } else if (rd_kafka_transport_ssl_io_update(rktrans, r, - errstr, + } else if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr, sizeof(errstr)) == -1) { - const char *extra = ""; + const char *extra = ""; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__SSL; if (strstr(errstr, "unexpected message")) - extra = ": client SSL authentication might be " - "required (see ssl.key.location and " - "ssl.certificate.location and consult the " - "broker logs for more information)"; - else if (strstr(errstr, "tls_process_server_certificate:" + extra = + ": client SSL authentication might be " + "required (see ssl.key.location and " + "ssl.certificate.location and consult the " + "broker logs for more information)"; + else if (strstr(errstr, + "tls_process_server_certificate:" "certificate verify failed") || - strstr(errstr, "get_server_certificate:" + strstr(errstr, + "get_server_certificate:" "certificate verify failed")) - extra = ": broker certificate could not be verified, " - "verify that ssl.ca.location is correctly " - "configured or root CA certificates are " - "installed" + extra = + ": broker certificate could not be verified, " + "verify that ssl.ca.location is correctly " + "configured or root CA certificates are " + "installed" #ifdef __APPLE__ - " (brew install openssl)" + " (brew install openssl)" #elif defined(_WIN32) - " (add broker's CA certificate to the Windows " - "Root certificate store)" + " (add broker's CA certificate to the Windows " + "Root certificate store)" #else - " (install ca-certificates package)" + " (install ca-certificates package)" #endif - ; + ; else if (!strcmp(errstr, "Disconnected")) { extra = ": connecting to a PLAINTEXT broker listener?"; /* Disconnects during handshake are most likely @@ -669,8 +659,8 @@ int rd_kafka_transport_ssl_handshake (rd_kafka_transport_t *rktrans) { * * @returns a new EVP_PKEY on success or NULL on error. */ -static EVP_PKEY *rd_kafka_ssl_PKEY_from_string (rd_kafka_t *rk, - const char *str) { +static EVP_PKEY *rd_kafka_ssl_PKEY_from_string(rd_kafka_t *rk, + const char *str) { BIO *bio = BIO_new_mem_buf((void *)str, -1); EVP_PKEY *pkey; @@ -689,12 +679,12 @@ static EVP_PKEY *rd_kafka_ssl_PKEY_from_string (rd_kafka_t *rk, * * @returns a new X509 on success or NULL on error. */ -static X509 *rd_kafka_ssl_X509_from_string (rd_kafka_t *rk, const char *str) { +static X509 *rd_kafka_ssl_X509_from_string(rd_kafka_t *rk, const char *str) { BIO *bio = BIO_new_mem_buf((void *)str, -1); X509 *x509; - x509 = PEM_read_bio_X509(bio, NULL, - rd_kafka_transport_ssl_passwd_cb, rk); + x509 = + PEM_read_bio_X509(bio, NULL, rd_kafka_transport_ssl_passwd_cb, rk); BIO_free(bio); @@ -707,8 +697,9 @@ static X509 *rd_kafka_ssl_X509_from_string (rd_kafka_t *rk, const char *str) { /** * @brief Attempt load CA certificates from a Windows Certificate store. */ -static int rd_kafka_ssl_win_load_cert_store (rd_kafka_t *rk, SSL_CTX *ctx, - const char *store_name) { +static int rd_kafka_ssl_win_load_cert_store(rd_kafka_t *rk, + SSL_CTX *ctx, + const char *store_name) { HCERTSTORE w_store; PCCERT_CONTEXT w_cctx = NULL; X509_STORE *store; @@ -728,24 +719,22 @@ static int rd_kafka_ssl_win_load_cert_store (rd_kafka_t *rk, SSL_CTX *ctx, return -1; } wstore_name = rd_alloca(sizeof(*wstore_name) * wsize); - werr = mbstowcs_s(NULL, wstore_name, wsize, store_name, + werr = mbstowcs_s(NULL, wstore_name, wsize, store_name, strlen(store_name)); rd_assert(!werr); - w_store = CertOpenStore(CERT_STORE_PROV_SYSTEM, - 0, - 0, - CERT_SYSTEM_STORE_CURRENT_USER| - CERT_STORE_READONLY_FLAG| - CERT_STORE_OPEN_EXISTING_FLAG, + w_store = CertOpenStore(CERT_STORE_PROV_SYSTEM, 0, 0, + CERT_SYSTEM_STORE_CURRENT_USER | + CERT_STORE_READONLY_FLAG | + CERT_STORE_OPEN_EXISTING_FLAG, wstore_name); if (!w_store) { - rd_kafka_log(rk, LOG_ERR, "CERTSTORE", - "Failed to open Windows certificate " - "%s store: %s", - store_name, - rd_strerror_w32(GetLastError(), errstr, - sizeof(errstr))); + rd_kafka_log( + rk, LOG_ERR, "CERTSTORE", + "Failed to open Windows certificate " + "%s store: %s", + store_name, + rd_strerror_w32(GetLastError(), errstr, sizeof(errstr))); return -1; } @@ -767,9 +756,9 @@ static int rd_kafka_ssl_win_load_cert_store (rd_kafka_t *rk, SSL_CTX *ctx, /* Add cert to OpenSSL's trust store */ if (!X509_STORE_add_cert(store, x509)) - fail_cnt++; + fail_cnt++; else - cnt++; + cnt++; X509_free(x509); } @@ -785,7 +774,7 @@ static int rd_kafka_ssl_win_load_cert_store (rd_kafka_t *rk, SSL_CTX *ctx, cnt, store_name, fail_cnt); if (cnt == 0 && fail_cnt > 0) - return -1; + return -1; return cnt; } @@ -795,9 +784,9 @@ static int rd_kafka_ssl_win_load_cert_store (rd_kafka_t *rk, SSL_CTX *ctx, * * @returns the number of successfully loaded certificates, or -1 on error. */ -static int rd_kafka_ssl_win_load_cert_stores (rd_kafka_t *rk, - SSL_CTX *ctx, - const char *store_names) { +static int rd_kafka_ssl_win_load_cert_stores(rd_kafka_t *rk, + SSL_CTX *ctx, + const char *store_names) { char *s; int cert_cnt = 0, fail_cnt = 0; @@ -824,9 +813,9 @@ static int rd_kafka_ssl_win_load_cert_stores (rd_kafka_t *rk, t = strchr(s, (int)','); if (t) { *t = '\0'; - s = t+1; - for (; t >= store_name && isspace((int)*t) ; t--) - *t = '\0'; + s = t + 1; + for (; t >= store_name && isspace((int)*t); t--) + *t = '\0'; } else { s = ""; } @@ -853,8 +842,8 @@ static int rd_kafka_ssl_win_load_cert_stores (rd_kafka_t *rk, * * @returns 0 if CA location was set, else -1. */ -static int rd_kafka_ssl_probe_and_set_default_ca_location (rd_kafka_t *rk, - SSL_CTX *ctx) { +static int rd_kafka_ssl_probe_and_set_default_ca_location(rd_kafka_t *rk, + SSL_CTX *ctx) { #if _WIN32 /* No standard location on Windows, CA certs are in the ROOT store. */ return -1; @@ -865,47 +854,47 @@ static int rd_kafka_ssl_probe_and_set_default_ca_location (rd_kafka_t *rk, * https://golang.org/search?q=certFiles and certDirectories */ static const char *paths[] = { - "/etc/pki/tls/certs/ca-bundle.crt", - "/etc/ssl/certs/ca-bundle.crt", - "/etc/pki/tls/certs/ca-bundle.trust.crt", - "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", + "/etc/pki/tls/certs/ca-bundle.crt", + "/etc/ssl/certs/ca-bundle.crt", + "/etc/pki/tls/certs/ca-bundle.trust.crt", + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", - "/etc/ssl/ca-bundle.pem", - "/etc/pki/tls/cacert.pem", - "/etc/ssl/cert.pem", - "/etc/ssl/cacert.pem", + "/etc/ssl/ca-bundle.pem", + "/etc/pki/tls/cacert.pem", + "/etc/ssl/cert.pem", + "/etc/ssl/cacert.pem", - "/etc/certs/ca-certificates.crt", - "/etc/ssl/certs/ca-certificates.crt", + "/etc/certs/ca-certificates.crt", + "/etc/ssl/certs/ca-certificates.crt", - "/etc/ssl/certs", + "/etc/ssl/certs", - "/usr/local/etc/ssl/cert.pem", - "/usr/local/etc/ssl/cacert.pem", + "/usr/local/etc/ssl/cert.pem", + "/usr/local/etc/ssl/cacert.pem", - "/usr/local/etc/ssl/certs/cert.pem", - "/usr/local/etc/ssl/certs/cacert.pem", + "/usr/local/etc/ssl/certs/cert.pem", + "/usr/local/etc/ssl/certs/cacert.pem", - /* BSD */ - "/usr/local/share/certs/ca-root-nss.crt", - "/etc/openssl/certs/ca-certificates.crt", + /* BSD */ + "/usr/local/share/certs/ca-root-nss.crt", + "/etc/openssl/certs/ca-certificates.crt", #ifdef __APPLE__ - "/private/etc/ssl/cert.pem", - "/private/etc/ssl/certs", - "/usr/local/etc/openssl@1.1/cert.pem", - "/usr/local/etc/openssl@1.0/cert.pem", - "/usr/local/etc/openssl/certs", - "/System/Library/OpenSSL", + "/private/etc/ssl/cert.pem", + "/private/etc/ssl/certs", + "/usr/local/etc/openssl@1.1/cert.pem", + "/usr/local/etc/openssl@1.0/cert.pem", + "/usr/local/etc/openssl/certs", + "/System/Library/OpenSSL", #endif #ifdef _AIX - "/var/ssl/certs/ca-bundle.crt", + "/var/ssl/certs/ca-bundle.crt", #endif - NULL, + NULL, }; const char *path = NULL; int i; - for (i = 0 ; (path = paths[i]) ; i++) { + for (i = 0; (path = paths[i]); i++) { struct stat st; rd_bool_t is_dir; int r; @@ -920,10 +909,10 @@ static int rd_kafka_ssl_probe_and_set_default_ca_location (rd_kafka_t *rk, rd_kafka_dbg(rk, SECURITY, "CACERTS", "Setting default CA certificate location " - "to %s, override with ssl.ca.location", path); + "to %s, override with ssl.ca.location", + path); - r = SSL_CTX_load_verify_locations(ctx, - is_dir ? NULL : path, + r = SSL_CTX_load_verify_locations(ctx, is_dir ? NULL : path, is_dir ? path : NULL); if (r != 1) { char errstr[512]; @@ -953,9 +942,11 @@ static int rd_kafka_ssl_probe_and_set_default_ca_location (rd_kafka_t *rk, * * @returns -1 on error, or 0 on success. */ -static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, - char *errstr, size_t errstr_size) { - rd_bool_t ca_probe = rd_true; +static int rd_kafka_ssl_set_certs(rd_kafka_t *rk, + SSL_CTX *ctx, + char *errstr, + size_t errstr_size) { + rd_bool_t ca_probe = rd_true; rd_bool_t check_pkey = rd_false; int r; @@ -980,21 +971,17 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, if (rk->rk_conf.ssl.ca_location && strcmp(rk->rk_conf.ssl.ca_location, "probe")) { /* CA certificate location, either file or directory. */ - int is_dir = rd_kafka_path_is_dir( - rk->rk_conf.ssl.ca_location); + int is_dir = + rd_kafka_path_is_dir(rk->rk_conf.ssl.ca_location); rd_kafka_dbg(rk, SECURITY, "SSL", "Loading CA certificate(s) from %s %s", is_dir ? "directory" : "file", rk->rk_conf.ssl.ca_location); - r = SSL_CTX_load_verify_locations(ctx, - !is_dir ? - rk->rk_conf.ssl. - ca_location : NULL, - is_dir ? - rk->rk_conf.ssl. - ca_location : NULL); + r = SSL_CTX_load_verify_locations( + ctx, !is_dir ? rk->rk_conf.ssl.ca_location : NULL, + is_dir ? rk->rk_conf.ssl.ca_location : NULL); if (r != 1) { rd_snprintf(errstr, errstr_size, @@ -1018,7 +1005,7 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, "Loading CA certificate from string"); x509 = rd_kafka_ssl_X509_from_string( - rk, rk->rk_conf.ssl.ca_pem); + rk, rk->rk_conf.ssl.ca_pem); if (!x509) { rd_snprintf(errstr, errstr_size, "ssl.ca.pem failed: " @@ -1045,18 +1032,20 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, /* Attempt to load CA root certificates from the * configured Windows certificate stores. */ r = rd_kafka_ssl_win_load_cert_stores( - rk, ctx, rk->rk_conf.ssl.ca_cert_stores); + rk, ctx, rk->rk_conf.ssl.ca_cert_stores); if (r == 0) { - rd_kafka_log(rk, LOG_NOTICE, "CERTSTORE", - "No CA certificates loaded from " - "Windows certificate stores: " - "falling back to default OpenSSL CA paths"); + rd_kafka_log( + rk, LOG_NOTICE, "CERTSTORE", + "No CA certificates loaded from " + "Windows certificate stores: " + "falling back to default OpenSSL CA paths"); r = -1; } else if (r == -1) - rd_kafka_log(rk, LOG_NOTICE, "CERTSTORE", - "Failed to load CA certificates from " - "Windows certificate stores: " - "falling back to default OpenSSL CA paths"); + rd_kafka_log( + rk, LOG_NOTICE, "CERTSTORE", + "Failed to load CA certificates from " + "Windows certificate stores: " + "falling back to default OpenSSL CA paths"); #else r = -1; #endif @@ -1066,7 +1055,7 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, #if WITH_STATIC_LIB_libcrypto || r == -1 #endif - ) { + ) { /* If OpenSSL was linked statically there is a risk * that the system installed CA certificate path * doesn't match the cert path of OpenSSL. @@ -1074,8 +1063,8 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, * of standard CA certificate paths and use the * first one that is found. * Ignore failures. */ - r = rd_kafka_ssl_probe_and_set_default_ca_location( - rk, ctx); + r = rd_kafka_ssl_probe_and_set_default_ca_location(rk, + ctx); } if (r == -1) { @@ -1086,25 +1075,24 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, if (r != 1) { char errstr2[512]; /* Read error and clear the error stack. */ - rd_kafka_ssl_error(rk, NULL, - errstr2, sizeof(errstr2)); + rd_kafka_ssl_error(rk, NULL, errstr2, + sizeof(errstr2)); rd_kafka_dbg( - rk, SECURITY, "SSL", - "SSL_CTX_set_default_verify_paths() " - "failed: %s: ignoring", errstr2); + rk, SECURITY, "SSL", + "SSL_CTX_set_default_verify_paths() " + "failed: %s: ignoring", + errstr2); } r = 0; } } if (rk->rk_conf.ssl.crl_location) { - rd_kafka_dbg(rk, SECURITY, "SSL", - "Loading CRL from file %s", + rd_kafka_dbg(rk, SECURITY, "SSL", "Loading CRL from file %s", rk->rk_conf.ssl.crl_location); - r = SSL_CTX_load_verify_locations(ctx, - rk->rk_conf.ssl.crl_location, - NULL); + r = SSL_CTX_load_verify_locations( + ctx, rk->rk_conf.ssl.crl_location, NULL); if (r != 1) { rd_snprintf(errstr, errstr_size, @@ -1113,8 +1101,7 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, } - rd_kafka_dbg(rk, SECURITY, "SSL", - "Enabling CRL checks"); + rd_kafka_dbg(rk, SECURITY, "SSL", "Enabling CRL checks"); X509_STORE_set_flags(SSL_CTX_get_cert_store(ctx), X509_V_FLAG_CRL_CHECK); @@ -1131,8 +1118,7 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, rd_assert(rk->rk_conf.ssl.cert->x509); r = SSL_CTX_use_certificate(ctx, rk->rk_conf.ssl.cert->x509); if (r != 1) { - rd_snprintf(errstr, errstr_size, - "ssl_cert failed: "); + rd_snprintf(errstr, errstr_size, "ssl_cert failed: "); return -1; } } @@ -1142,9 +1128,8 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, "Loading public key from file %s", rk->rk_conf.ssl.cert_location); - r = SSL_CTX_use_certificate_chain_file(ctx, - rk->rk_conf. - ssl.cert_location); + r = SSL_CTX_use_certificate_chain_file( + ctx, rk->rk_conf.ssl.cert_location); if (r != 1) { rd_snprintf(errstr, errstr_size, @@ -1159,8 +1144,8 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, rd_kafka_dbg(rk, SECURITY, "SSL", "Loading public key from string"); - x509 = rd_kafka_ssl_X509_from_string(rk, - rk->rk_conf.ssl.cert_pem); + x509 = + rd_kafka_ssl_X509_from_string(rk, rk->rk_conf.ssl.cert_pem); if (!x509) { rd_snprintf(errstr, errstr_size, "ssl.certificate.pem failed: " @@ -1203,9 +1188,8 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, "Loading private key file from %s", rk->rk_conf.ssl.key_location); - r = SSL_CTX_use_PrivateKey_file(ctx, - rk->rk_conf.ssl.key_location, - SSL_FILETYPE_PEM); + r = SSL_CTX_use_PrivateKey_file( + ctx, rk->rk_conf.ssl.key_location, SSL_FILETYPE_PEM); if (r != 1) { rd_snprintf(errstr, errstr_size, "ssl.key.location failed: "); @@ -1221,8 +1205,8 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, rd_kafka_dbg(rk, SECURITY, "SSL", "Loading private key from string"); - pkey = rd_kafka_ssl_PKEY_from_string(rk, - rk->rk_conf.ssl.key_pem); + pkey = + rd_kafka_ssl_PKEY_from_string(rk, rk->rk_conf.ssl.key_pem); if (!pkey) { rd_snprintf(errstr, errstr_size, "ssl.key.pem failed: " @@ -1281,8 +1265,8 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, pkey = EVP_PKEY_new(); cert = X509_new(); - if (!PKCS12_parse(p12, rk->rk_conf.ssl.keystore_password, - &pkey, &cert, &ca)) { + if (!PKCS12_parse(p12, rk->rk_conf.ssl.keystore_password, &pkey, + &cert, &ca)) { EVP_PKEY_free(pkey); X509_free(cert); PKCS12_free(p12); @@ -1329,12 +1313,12 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, STACK_OF(X509_NAME) *cert_names = sk_X509_NAME_new_null(); STACK_OF(X509_OBJECT) *roots = X509_STORE_get0_objects(SSL_CTX_get_cert_store(ctx)); - X509 *x509 = NULL; + X509 *x509 = NULL; EVP_PKEY *pkey = NULL; - int i = 0; + int i = 0; for (i = 0; i < sk_X509_OBJECT_num(roots); i++) { - x509 = X509_OBJECT_get0_X509(sk_X509_OBJECT_value(roots, - i)); + x509 = X509_OBJECT_get0_X509( + sk_X509_OBJECT_value(roots, i)); if (x509) sk_X509_NAME_push(cert_names, @@ -1345,11 +1329,9 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, sk_X509_NAME_free(cert_names); x509 = NULL; - r = ENGINE_load_ssl_client_cert( - rk->rk_conf.ssl.engine, NULL, - cert_names, &x509, &pkey, - NULL, NULL, - rk->rk_conf.ssl.engine_callback_data); + r = ENGINE_load_ssl_client_cert( + rk->rk_conf.ssl.engine, NULL, cert_names, &x509, &pkey, + NULL, NULL, rk->rk_conf.ssl.engine_callback_data); sk_X509_NAME_free(cert_names); if (r == -1 || !x509 || !pkey) { @@ -1397,8 +1379,7 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, /* Check that a valid private/public key combo was set. */ if (check_pkey && SSL_CTX_check_private_key(ctx) != 1) { - rd_snprintf(errstr, errstr_size, - "Private key check failed: "); + rd_snprintf(errstr, errstr_size, "Private key check failed: "); return -1; } @@ -1413,7 +1394,7 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, * * @locks rd_kafka_wrlock() MUST be held */ -void rd_kafka_ssl_ctx_term (rd_kafka_t *rk) { +void rd_kafka_ssl_ctx_term(rd_kafka_t *rk) { SSL_CTX_free(rk->rk_conf.ssl.ctx); rk->rk_conf.ssl.ctx = NULL; @@ -1429,9 +1410,8 @@ void rd_kafka_ssl_ctx_term (rd_kafka_t *rk) { * * @returns true on success, false on error. */ -static rd_bool_t rd_kafka_ssl_ctx_init_engine (rd_kafka_t *rk, - char *errstr, - size_t errstr_size) { +static rd_bool_t +rd_kafka_ssl_ctx_init_engine(rd_kafka_t *rk, char *errstr, size_t errstr_size) { ENGINE *engine; /* OpenSSL loads an engine as dynamic id and stores it in @@ -1451,8 +1431,7 @@ static rd_bool_t rd_kafka_ssl_ctx_init_engine (rd_kafka_t *rk, } if (!ENGINE_ctrl_cmd_string(engine, "SO_PATH", - rk->rk_conf.ssl.engine_location, - 0)) { + rk->rk_conf.ssl.engine_location, 0)) { ENGINE_free(engine); rd_snprintf(errstr, errstr_size, "OpenSSL engine initialization failed in" @@ -1460,8 +1439,7 @@ static rd_bool_t rd_kafka_ssl_ctx_init_engine (rd_kafka_t *rk, return rd_false; } - if (!ENGINE_ctrl_cmd_string(engine, "LIST_ADD", - "1", 0)) { + if (!ENGINE_ctrl_cmd_string(engine, "LIST_ADD", "1", 0)) { ENGINE_free(engine); rd_snprintf(errstr, errstr_size, "OpenSSL engine initialization failed in" @@ -1499,28 +1477,27 @@ static rd_bool_t rd_kafka_ssl_ctx_init_engine (rd_kafka_t *rk, * * @locks rd_kafka_wrlock() MUST be held */ -int rd_kafka_ssl_ctx_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { +int rd_kafka_ssl_ctx_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) { int r; SSL_CTX *ctx = NULL; const char *linking = #if WITH_STATIC_LIB_libcrypto - "statically linked " + "statically linked " #else - "" + "" #endif - ; + ; #if OPENSSL_VERSION_NUMBER >= 0x10100000 - rd_kafka_dbg(rk, SECURITY, "OPENSSL", "Using %sOpenSSL version %s " + rd_kafka_dbg(rk, SECURITY, "OPENSSL", + "Using %sOpenSSL version %s " "(0x%lx, librdkafka built with 0x%lx)", - linking, - OpenSSL_version(OPENSSL_VERSION), - OpenSSL_version_num(), - OPENSSL_VERSION_NUMBER); + linking, OpenSSL_version(OPENSSL_VERSION), + OpenSSL_version_num(), OPENSSL_VERSION_NUMBER); #else rd_kafka_dbg(rk, SECURITY, "OPENSSL", - "librdkafka built with %sOpenSSL version 0x%lx", - linking, OPENSSL_VERSION_NUMBER); + "librdkafka built with %sOpenSSL version 0x%lx", linking, + OPENSSL_VERSION_NUMBER); #endif if (errstr_size > 0) @@ -1542,8 +1519,7 @@ int rd_kafka_ssl_ctx_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { ctx = SSL_CTX_new(SSLv23_client_method()); #endif if (!ctx) { - rd_snprintf(errstr, errstr_size, - "SSL_CTX_new() failed: "); + rd_snprintf(errstr, errstr_size, "SSL_CTX_new() failed: "); goto fail; } @@ -1558,8 +1534,7 @@ int rd_kafka_ssl_ctx_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { /* Ciphers */ if (rk->rk_conf.ssl.cipher_suites) { - rd_kafka_dbg(rk, SECURITY, "SSL", - "Setting cipher list: %s", + rd_kafka_dbg(rk, SECURITY, "SSL", "Setting cipher list: %s", rk->rk_conf.ssl.cipher_suites); if (!SSL_CTX_set_cipher_list(ctx, rk->rk_conf.ssl.cipher_suites)) { @@ -1574,16 +1549,16 @@ int rd_kafka_ssl_ctx_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { /* Set up broker certificate verification. */ SSL_CTX_set_verify(ctx, - rk->rk_conf.ssl.enable_verify ? - SSL_VERIFY_PEER : SSL_VERIFY_NONE, - rk->rk_conf.ssl.cert_verify_cb ? - rd_kafka_transport_ssl_cert_verify_cb : NULL); + rk->rk_conf.ssl.enable_verify ? SSL_VERIFY_PEER + : SSL_VERIFY_NONE, + rk->rk_conf.ssl.cert_verify_cb + ? rd_kafka_transport_ssl_cert_verify_cb + : NULL); #if OPENSSL_VERSION_NUMBER >= 0x1000200fL && !defined(LIBRESSL_VERSION_NUMBER) /* Curves */ if (rk->rk_conf.ssl.curves_list) { - rd_kafka_dbg(rk, SECURITY, "SSL", - "Setting curves list: %s", + rd_kafka_dbg(rk, SECURITY, "SSL", "Setting curves list: %s", rk->rk_conf.ssl.curves_list); if (!SSL_CTX_set1_curves_list(ctx, rk->rk_conf.ssl.curves_list)) { @@ -1618,9 +1593,9 @@ int rd_kafka_ssl_ctx_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { return 0; - fail: +fail: r = (int)strlen(errstr); - rd_kafka_ssl_error(rk, NULL, errstr+r, + rd_kafka_ssl_error(rk, NULL, errstr + r, (int)errstr_size > r ? (int)errstr_size - r : 0); RD_IF_FREE(ctx, SSL_CTX_free); #if OPENSSL_VERSION_NUMBER >= 0x10100000 @@ -1633,7 +1608,7 @@ int rd_kafka_ssl_ctx_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { #if OPENSSL_VERSION_NUMBER < 0x10100000L static RD_UNUSED void -rd_kafka_transport_ssl_lock_cb (int mode, int i, const char *file, int line) { +rd_kafka_transport_ssl_lock_cb(int mode, int i, const char *file, int line) { if (mode & CRYPTO_LOCK) mtx_lock(&rd_kafka_ssl_locks[i]); else @@ -1641,7 +1616,7 @@ rd_kafka_transport_ssl_lock_cb (int mode, int i, const char *file, int line) { } #endif -static RD_UNUSED unsigned long rd_kafka_transport_ssl_threadid_cb (void) { +static RD_UNUSED unsigned long rd_kafka_transport_ssl_threadid_cb(void) { #ifdef _WIN32 /* Windows makes a distinction between thread handle * and thread id, which means we can't use the @@ -1653,8 +1628,8 @@ static RD_UNUSED unsigned long rd_kafka_transport_ssl_threadid_cb (void) { } #ifdef HAVE_OPENSSL_CRYPTO_THREADID_SET_CALLBACK -static void rd_kafka_transport_libcrypto_THREADID_callback(CRYPTO_THREADID *id) -{ +static void +rd_kafka_transport_libcrypto_THREADID_callback(CRYPTO_THREADID *id) { unsigned long thread_id = rd_kafka_transport_ssl_threadid_cb(); CRYPTO_THREADID_set_numeric(id, thread_id); @@ -1664,7 +1639,7 @@ static void rd_kafka_transport_libcrypto_THREADID_callback(CRYPTO_THREADID *id) /** * @brief Global OpenSSL cleanup. */ -void rd_kafka_ssl_term (void) { +void rd_kafka_ssl_term(void) { #if OPENSSL_VERSION_NUMBER < 0x10100000L int i; @@ -1676,7 +1651,7 @@ void rd_kafka_ssl_term (void) { CRYPTO_set_id_callback(NULL); #endif - for (i = 0 ; i < rd_kafka_ssl_locks_cnt ; i++) + for (i = 0; i < rd_kafka_ssl_locks_cnt; i++) mtx_destroy(&rd_kafka_ssl_locks[i]); rd_free(rd_kafka_ssl_locks); @@ -1688,21 +1663,22 @@ void rd_kafka_ssl_term (void) { /** * @brief Global (once per process) OpenSSL init. */ -void rd_kafka_ssl_init (void) { +void rd_kafka_ssl_init(void) { #if OPENSSL_VERSION_NUMBER < 0x10100000L int i; if (!CRYPTO_get_locking_callback()) { rd_kafka_ssl_locks_cnt = CRYPTO_num_locks(); - rd_kafka_ssl_locks = rd_malloc(rd_kafka_ssl_locks_cnt * + rd_kafka_ssl_locks = rd_malloc(rd_kafka_ssl_locks_cnt * sizeof(*rd_kafka_ssl_locks)); - for (i = 0 ; i < rd_kafka_ssl_locks_cnt ; i++) + for (i = 0; i < rd_kafka_ssl_locks_cnt; i++) mtx_init(&rd_kafka_ssl_locks[i], mtx_plain); CRYPTO_set_locking_callback(rd_kafka_transport_ssl_lock_cb); #ifdef HAVE_OPENSSL_CRYPTO_THREADID_SET_CALLBACK - CRYPTO_THREADID_set_callback(rd_kafka_transport_libcrypto_THREADID_callback); + CRYPTO_THREADID_set_callback( + rd_kafka_transport_libcrypto_THREADID_callback); #else CRYPTO_set_id_callback(rd_kafka_transport_ssl_threadid_cb); #endif diff --git a/src/rdkafka_ssl.h b/src/rdkafka_ssl.h index 222d53767f..325abbe1d4 100644 --- a/src/rdkafka_ssl.h +++ b/src/rdkafka_ssl.h @@ -30,26 +30,28 @@ #ifndef _RDKAFKA_SSL_H_ #define _RDKAFKA_SSL_H_ -void rd_kafka_transport_ssl_close (rd_kafka_transport_t *rktrans); -int rd_kafka_transport_ssl_connect (rd_kafka_broker_t *rkb, - rd_kafka_transport_t *rktrans, - char *errstr, size_t errstr_size); -int rd_kafka_transport_ssl_handshake (rd_kafka_transport_t *rktrans); -ssize_t rd_kafka_transport_ssl_send (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, - char *errstr, size_t errstr_size); -ssize_t rd_kafka_transport_ssl_recv (rd_kafka_transport_t *rktrans, - rd_buf_t *rbuf, - char *errstr, size_t errstr_size); - - -void rd_kafka_ssl_ctx_term (rd_kafka_t *rk); -int rd_kafka_ssl_ctx_init (rd_kafka_t *rk, - char *errstr, size_t errstr_size); - -void rd_kafka_ssl_term (void); +void rd_kafka_transport_ssl_close(rd_kafka_transport_t *rktrans); +int rd_kafka_transport_ssl_connect(rd_kafka_broker_t *rkb, + rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size); +int rd_kafka_transport_ssl_handshake(rd_kafka_transport_t *rktrans); +ssize_t rd_kafka_transport_ssl_send(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size); +ssize_t rd_kafka_transport_ssl_recv(rd_kafka_transport_t *rktrans, + rd_buf_t *rbuf, + char *errstr, + size_t errstr_size); + + +void rd_kafka_ssl_ctx_term(rd_kafka_t *rk); +int rd_kafka_ssl_ctx_init(rd_kafka_t *rk, char *errstr, size_t errstr_size); + +void rd_kafka_ssl_term(void); void rd_kafka_ssl_init(void); -const char *rd_kafka_ssl_last_error_str (void); +const char *rd_kafka_ssl_last_error_str(void); #endif /* _RDKAFKA_SSL_H_ */ diff --git a/src/rdkafka_sticky_assignor.c b/src/rdkafka_sticky_assignor.c index 0cf760d4fe..d0a6e03167 100644 --- a/src/rdkafka_sticky_assignor.c +++ b/src/rdkafka_sticky_assignor.c @@ -34,7 +34,7 @@ #include "rdunittest.h" #include -#include /* abs() */ +#include /* abs() */ /** * @name KIP-54 and KIP-341 Sticky assignor. @@ -52,7 +52,7 @@ /** @brief Assignor state from last rebalance */ typedef struct rd_kafka_sticky_assignor_state_s { rd_kafka_topic_partition_list_t *prev_assignment; - int32_t generation_id; + int32_t generation_id; } rd_kafka_sticky_assignor_state_t; @@ -70,15 +70,15 @@ typedef struct rd_kafka_sticky_assignor_state_s { * @sa PartitionMovements_t */ typedef struct ConsumerPair_s { - const char *src; /**< Source member id */ - const char *dst; /**< Destination member id */ + const char *src; /**< Source member id */ + const char *dst; /**< Destination member id */ } ConsumerPair_t; -static ConsumerPair_t *ConsumerPair_new (const char *src, const char *dst) { +static ConsumerPair_t *ConsumerPair_new(const char *src, const char *dst) { ConsumerPair_t *cpair; - cpair = rd_malloc(sizeof(*cpair)); + cpair = rd_malloc(sizeof(*cpair)); cpair->src = src ? rd_strdup(src) : NULL; cpair->dst = dst ? rd_strdup(dst) : NULL; @@ -86,7 +86,7 @@ static ConsumerPair_t *ConsumerPair_new (const char *src, const char *dst) { } -static void ConsumerPair_free (void *p) { +static void ConsumerPair_free(void *p) { ConsumerPair_t *cpair = p; if (cpair->src) rd_free((void *)cpair->src); @@ -95,7 +95,7 @@ static void ConsumerPair_free (void *p) { rd_free(cpair); } -static int ConsumerPair_cmp (const void *_a, const void *_b) { +static int ConsumerPair_cmp(const void *_a, const void *_b) { const ConsumerPair_t *a = _a, *b = _b; int r = strcmp(a->src ? a->src : "", b->src ? b->src : ""); if (r) @@ -104,10 +104,10 @@ static int ConsumerPair_cmp (const void *_a, const void *_b) { } -static unsigned int ConsumerPair_hash (const void *_a) { +static unsigned int ConsumerPair_hash(const void *_a) { const ConsumerPair_t *a = _a; return 31 * (a->src ? rd_map_str_hash(a->src) : 1) + - (a->dst ? rd_map_str_hash(a->dst) : 1); + (a->dst ? rd_map_str_hash(a->dst) : 1); } @@ -117,7 +117,7 @@ typedef struct ConsumerGenerationPair_s { int generation; } ConsumerGenerationPair_t; -static void ConsumerGenerationPair_destroy (void *ptr) { +static void ConsumerGenerationPair_destroy(void *ptr) { ConsumerGenerationPair_t *cgpair = ptr; rd_free(cgpair); } @@ -127,22 +127,21 @@ static void ConsumerGenerationPair_destroy (void *ptr) { * outlive the ConsumerGenerationPair_t object. */ static ConsumerGenerationPair_t * -ConsumerGenerationPair_new (const char *consumer, int generation) { +ConsumerGenerationPair_new(const char *consumer, int generation) { ConsumerGenerationPair_t *cgpair = rd_malloc(sizeof(*cgpair)); - cgpair->consumer = consumer; - cgpair->generation = generation; + cgpair->consumer = consumer; + cgpair->generation = generation; return cgpair; } -static int ConsumerGenerationPair_cmp_generation (const void *_a, - const void *_b) { +static int ConsumerGenerationPair_cmp_generation(const void *_a, + const void *_b) { const ConsumerGenerationPair_t *a = _a, *b = _b; return a->generation - b->generation; } - /** * Hash map types. * @@ -178,16 +177,16 @@ typedef RD_MAP_TYPE(const char *, /** Glue type helpers */ -static map_cpair_toppar_list_t *map_cpair_toppar_list_t_new (void) { +static map_cpair_toppar_list_t *map_cpair_toppar_list_t_new(void) { map_cpair_toppar_list_t *map = rd_calloc(1, sizeof(*map)); - RD_MAP_INIT(map, 0, ConsumerPair_cmp, ConsumerPair_hash, - NULL, rd_kafka_topic_partition_list_destroy_free); + RD_MAP_INIT(map, 0, ConsumerPair_cmp, ConsumerPair_hash, NULL, + rd_kafka_topic_partition_list_destroy_free); return map; } -static void map_cpair_toppar_list_t_free (void *ptr) { +static void map_cpair_toppar_list_t_free(void *ptr) { map_cpair_toppar_list_t *map = ptr; RD_MAP_DESTROY(map); rd_free(map); @@ -195,7 +194,6 @@ static void map_cpair_toppar_list_t_free (void *ptr) { - /** * @struct Provides current state of partition movements between consumers * for each topic, and possible movements for each partition. @@ -206,32 +204,25 @@ typedef struct PartitionMovements_s { } PartitionMovements_t; -static void PartitionMovements_init (PartitionMovements_t *pmov, - size_t topic_cnt) { - RD_MAP_INIT(&pmov->partitionMovements, - topic_cnt * 3, - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - NULL, - ConsumerPair_free); +static void PartitionMovements_init(PartitionMovements_t *pmov, + size_t topic_cnt) { + RD_MAP_INIT(&pmov->partitionMovements, topic_cnt * 3, + rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + NULL, ConsumerPair_free); - RD_MAP_INIT(&pmov->partitionMovementsByTopic, - topic_cnt, - rd_map_str_cmp, - rd_map_str_hash, - NULL, - map_cpair_toppar_list_t_free); + RD_MAP_INIT(&pmov->partitionMovementsByTopic, topic_cnt, rd_map_str_cmp, + rd_map_str_hash, NULL, map_cpair_toppar_list_t_free); } -static void PartitionMovements_destroy (PartitionMovements_t *pmov) { +static void PartitionMovements_destroy(PartitionMovements_t *pmov) { RD_MAP_DESTROY(&pmov->partitionMovementsByTopic); RD_MAP_DESTROY(&pmov->partitionMovements); } -static ConsumerPair_t *PartitionMovements_removeMovementRecordOfPartition ( - PartitionMovements_t *pmov, - const rd_kafka_topic_partition_t *toppar) { +static ConsumerPair_t *PartitionMovements_removeMovementRecordOfPartition( + PartitionMovements_t *pmov, + const rd_kafka_topic_partition_t *toppar) { ConsumerPair_t *cpair; map_cpair_toppar_list_t *partitionMovementsForThisTopic; @@ -241,13 +232,13 @@ static ConsumerPair_t *PartitionMovements_removeMovementRecordOfPartition ( rd_assert(cpair); partitionMovementsForThisTopic = - RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic); + RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic); plist = RD_MAP_GET(partitionMovementsForThisTopic, cpair); rd_assert(plist); - rd_kafka_topic_partition_list_del(plist, - toppar->topic, toppar->partition); + rd_kafka_topic_partition_list_del(plist, toppar->topic, + toppar->partition); if (plist->cnt == 0) RD_MAP_DELETE(partitionMovementsForThisTopic, cpair); if (RD_MAP_IS_EMPTY(partitionMovementsForThisTopic)) @@ -256,40 +247,39 @@ static ConsumerPair_t *PartitionMovements_removeMovementRecordOfPartition ( return cpair; } -static void PartitionMovements_addPartitionMovementRecord ( - PartitionMovements_t *pmov, - const rd_kafka_topic_partition_t *toppar, - ConsumerPair_t *cpair) { +static void PartitionMovements_addPartitionMovementRecord( + PartitionMovements_t *pmov, + const rd_kafka_topic_partition_t *toppar, + ConsumerPair_t *cpair) { map_cpair_toppar_list_t *partitionMovementsForThisTopic; rd_kafka_topic_partition_list_t *plist; RD_MAP_SET(&pmov->partitionMovements, toppar, cpair); partitionMovementsForThisTopic = - RD_MAP_GET_OR_SET(&pmov->partitionMovementsByTopic, - toppar->topic, - map_cpair_toppar_list_t_new()); + RD_MAP_GET_OR_SET(&pmov->partitionMovementsByTopic, toppar->topic, + map_cpair_toppar_list_t_new()); - plist = RD_MAP_GET_OR_SET(partitionMovementsForThisTopic, - cpair, + plist = RD_MAP_GET_OR_SET(partitionMovementsForThisTopic, cpair, rd_kafka_topic_partition_list_new(16)); - rd_kafka_topic_partition_list_add(plist, - toppar->topic, toppar->partition); + rd_kafka_topic_partition_list_add(plist, toppar->topic, + toppar->partition); } -static void PartitionMovements_movePartition ( - PartitionMovements_t *pmov, - const rd_kafka_topic_partition_t *toppar, - const char *old_consumer, const char *new_consumer) { +static void +PartitionMovements_movePartition(PartitionMovements_t *pmov, + const rd_kafka_topic_partition_t *toppar, + const char *old_consumer, + const char *new_consumer) { if (RD_MAP_GET(&pmov->partitionMovements, toppar)) { /* This partition has previously moved */ ConsumerPair_t *existing_cpair; existing_cpair = - PartitionMovements_removeMovementRecordOfPartition( - pmov, toppar); + PartitionMovements_removeMovementRecordOfPartition(pmov, + toppar); rd_assert(!rd_strcmp(existing_cpair->dst, old_consumer)); @@ -297,26 +287,25 @@ static void PartitionMovements_movePartition ( /* Partition is not moving back to its * previous consumer */ PartitionMovements_addPartitionMovementRecord( - pmov, toppar, - ConsumerPair_new(existing_cpair->src, - new_consumer)); + pmov, toppar, + ConsumerPair_new(existing_cpair->src, + new_consumer)); } } else { PartitionMovements_addPartitionMovementRecord( - pmov, toppar, - ConsumerPair_new(old_consumer, new_consumer)); + pmov, toppar, ConsumerPair_new(old_consumer, new_consumer)); } } static const rd_kafka_topic_partition_t * -PartitionMovements_getTheActualPartitionToBeMoved ( - PartitionMovements_t *pmov, - const rd_kafka_topic_partition_t *toppar, - const char *oldConsumer, const char *newConsumer) { +PartitionMovements_getTheActualPartitionToBeMoved( + PartitionMovements_t *pmov, + const rd_kafka_topic_partition_t *toppar, + const char *oldConsumer, + const char *newConsumer) { ConsumerPair_t *cpair; - ConsumerPair_t reverse_cpair = { .src = newConsumer, - .dst = oldConsumer }; + ConsumerPair_t reverse_cpair = {.src = newConsumer, .dst = oldConsumer}; map_cpair_toppar_list_t *partitionMovementsForThisTopic; rd_kafka_topic_partition_list_t *plist; @@ -332,7 +321,7 @@ PartitionMovements_getTheActualPartitionToBeMoved ( } partitionMovementsForThisTopic = - RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic); + RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic); plist = RD_MAP_GET(partitionMovementsForThisTopic, &reverse_cpair); if (!plist) @@ -343,16 +332,16 @@ PartitionMovements_getTheActualPartitionToBeMoved ( #if FIXME -static rd_bool_t hasCycles (map_cpair_toppar_list_t *pairs) { - return rd_true; // FIXME +static rd_bool_t hasCycles(map_cpair_toppar_list_t *pairs) { + return rd_true; // FIXME } /** * @remark This method is only used by the AbstractStickyAssignorTest * in the Java client. -*/ -static rd_bool_t PartitionMovements_isSticky (rd_kafka_t *rk, - PartitionMovements_t *pmov) { + */ +static rd_bool_t PartitionMovements_isSticky(rd_kafka_t *rk, + PartitionMovements_t *pmov) { const char *topic; map_cpair_toppar_list_t *topicMovementPairs; @@ -363,14 +352,13 @@ static rd_bool_t PartitionMovements_isSticky (rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions; rd_kafka_log( - rk, LOG_ERR, "STICKY", - "Sticky assignor: Stickiness is violated for " - "topic %s: partition movements for this topic " - "occurred among the following consumers: ", - topic); + rk, LOG_ERR, "STICKY", + "Sticky assignor: Stickiness is violated for " + "topic %s: partition movements for this topic " + "occurred among the following consumers: ", + topic); RD_MAP_FOREACH(cpair, partitions, topicMovementPairs) { - rd_kafka_log(rk, LOG_ERR, "STICKY", - " %s -> %s", + rd_kafka_log(rk, LOG_ERR, "STICKY", " %s -> %s", cpair->src, cpair->dst); } @@ -395,8 +383,8 @@ static rd_bool_t PartitionMovements_isSticky (rd_kafka_t *rk, * elem.key is the consumer member id string, * elem.value is the partition list. */ -static int sort_by_map_elem_val_toppar_list_cnt (const void *_a, - const void *_b) { +static int sort_by_map_elem_val_toppar_list_cnt(const void *_a, + const void *_b) { const rd_map_elem_t *a = _a, *b = _b; const rd_kafka_topic_partition_list_t *al = a->value, *bl = b->value; int r = al->cnt - bl->cnt; @@ -413,11 +401,11 @@ static int sort_by_map_elem_val_toppar_list_cnt (const void *_a, * assignments to consumers. */ static void -assignPartition (const rd_kafka_topic_partition_t *partition, - rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, - map_str_toppar_list_t *currentAssignment, - map_str_toppar_list_t *consumer2AllPotentialPartitions, - map_toppar_str_t *currentPartitionConsumer) { +assignPartition(const rd_kafka_topic_partition_t *partition, + rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_str_toppar_list_t *currentAssignment, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + map_toppar_str_t *currentPartitionConsumer) { const rd_map_elem_t *elem; int i; @@ -425,16 +413,15 @@ assignPartition (const rd_kafka_topic_partition_t *partition, const char *consumer = (const char *)elem->key; const rd_kafka_topic_partition_list_t *partitions; - partitions = RD_MAP_GET(consumer2AllPotentialPartitions, - consumer); - if (!rd_kafka_topic_partition_list_find(partitions, - partition->topic, - partition->partition)) + partitions = + RD_MAP_GET(consumer2AllPotentialPartitions, consumer); + if (!rd_kafka_topic_partition_list_find( + partitions, partition->topic, partition->partition)) continue; rd_kafka_topic_partition_list_add( - RD_MAP_GET(currentAssignment, consumer), - partition->topic, partition->partition); + RD_MAP_GET(currentAssignment, consumer), partition->topic, + partition->partition); RD_MAP_SET(currentPartitionConsumer, rd_kafka_topic_partition_copy(partition), consumer); @@ -451,14 +438,13 @@ assignPartition (const rd_kafka_topic_partition_t *partition, /** * @returns true if the partition has two or more potential consumers. */ -static RD_INLINE rd_bool_t -partitionCanParticipateInReassignment ( - const rd_kafka_topic_partition_t *partition, - map_toppar_list_t *partition2AllPotentialConsumers) { +static RD_INLINE rd_bool_t partitionCanParticipateInReassignment( + const rd_kafka_topic_partition_t *partition, + map_toppar_list_t *partition2AllPotentialConsumers) { rd_list_t *consumers; - if (!(consumers = RD_MAP_GET(partition2AllPotentialConsumers, - partition))) + if (!(consumers = + RD_MAP_GET(partition2AllPotentialConsumers, partition))) return rd_false; return rd_list_cnt(consumers) >= 2; @@ -469,18 +455,17 @@ partitionCanParticipateInReassignment ( * @returns true if consumer can participate in reassignment based on * its current assignment. */ -static RD_INLINE rd_bool_t -consumerCanParticipateInReassignment ( - rd_kafka_t *rk, - const char *consumer, - map_str_toppar_list_t *currentAssignment, - map_str_toppar_list_t *consumer2AllPotentialPartitions, - map_toppar_list_t *partition2AllPotentialConsumers) { +static RD_INLINE rd_bool_t consumerCanParticipateInReassignment( + rd_kafka_t *rk, + const char *consumer, + map_str_toppar_list_t *currentAssignment, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + map_toppar_list_t *partition2AllPotentialConsumers) { const rd_kafka_topic_partition_list_t *currentPartitions = - RD_MAP_GET(currentAssignment, consumer); + RD_MAP_GET(currentAssignment, consumer); int currentAssignmentSize = currentPartitions->cnt; - int maxAssignmentSize = RD_MAP_GET(consumer2AllPotentialPartitions, - consumer)->cnt; + int maxAssignmentSize = + RD_MAP_GET(consumer2AllPotentialPartitions, consumer)->cnt; int i; /* FIXME: And then what? Is this a local error? If so, assert. */ @@ -499,12 +484,12 @@ consumerCanParticipateInReassignment ( /* If any of the partitions assigned to a consumer is subject to * reassignment the consumer itself is subject to reassignment. */ - for (i = 0 ; i < currentPartitions->cnt ; i++) { + for (i = 0; i < currentPartitions->cnt; i++) { const rd_kafka_topic_partition_t *partition = - ¤tPartitions->elems[i]; + ¤tPartitions->elems[i]; if (partitionCanParticipateInReassignment( - partition, partition2AllPotentialConsumers)) + partition, partition2AllPotentialConsumers)) return rd_true; } @@ -515,30 +500,28 @@ consumerCanParticipateInReassignment ( /** * @brief Process moving partition from old consumer to new consumer. */ -static void processPartitionMovement ( - rd_kafka_t *rk, - PartitionMovements_t *partitionMovements, - const rd_kafka_topic_partition_t *partition, - const char *newConsumer, - map_str_toppar_list_t *currentAssignment, - rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, - map_toppar_str_t *currentPartitionConsumer) { - - const char *oldConsumer = RD_MAP_GET(currentPartitionConsumer, - partition); +static void processPartitionMovement( + rd_kafka_t *rk, + PartitionMovements_t *partitionMovements, + const rd_kafka_topic_partition_t *partition, + const char *newConsumer, + map_str_toppar_list_t *currentAssignment, + rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_toppar_str_t *currentPartitionConsumer) { + + const char *oldConsumer = + RD_MAP_GET(currentPartitionConsumer, partition); PartitionMovements_movePartition(partitionMovements, partition, oldConsumer, newConsumer); - rd_kafka_topic_partition_list_add(RD_MAP_GET(currentAssignment, - newConsumer), - partition->topic, - partition->partition); + rd_kafka_topic_partition_list_add( + RD_MAP_GET(currentAssignment, newConsumer), partition->topic, + partition->partition); - rd_kafka_topic_partition_list_del(RD_MAP_GET(currentAssignment, - oldConsumer), - partition->topic, - partition->partition); + rd_kafka_topic_partition_list_del( + RD_MAP_GET(currentAssignment, oldConsumer), partition->topic, + partition->partition); RD_MAP_SET(currentPartitionConsumer, rd_kafka_topic_partition_copy(partition), newConsumer); @@ -548,11 +531,9 @@ static void processPartitionMovement ( sort_by_map_elem_val_toppar_list_cnt); rd_kafka_dbg(rk, ASSIGNOR, "STICKY", - "%s [%"PRId32"] %sassigned to %s (from %s)", - partition->topic, - partition->partition, - oldConsumer ? "re" : "", - newConsumer, + "%s [%" PRId32 "] %sassigned to %s (from %s)", + partition->topic, partition->partition, + oldConsumer ? "re" : "", newConsumer, oldConsumer ? oldConsumer : "(none)"); } @@ -560,49 +541,40 @@ static void processPartitionMovement ( /** * @brief Reassign \p partition to \p newConsumer */ -static void -reassignPartitionToConsumer ( - rd_kafka_t *rk, - PartitionMovements_t *partitionMovements, - const rd_kafka_topic_partition_t *partition, - map_str_toppar_list_t *currentAssignment, - rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, - map_toppar_str_t *currentPartitionConsumer, - const char *newConsumer) { +static void reassignPartitionToConsumer( + rd_kafka_t *rk, + PartitionMovements_t *partitionMovements, + const rd_kafka_topic_partition_t *partition, + map_str_toppar_list_t *currentAssignment, + rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_toppar_str_t *currentPartitionConsumer, + const char *newConsumer) { const char *consumer = RD_MAP_GET(currentPartitionConsumer, partition); const rd_kafka_topic_partition_t *partitionToBeMoved; /* Find the correct partition movement considering * the stickiness requirement. */ - partitionToBeMoved = - PartitionMovements_getTheActualPartitionToBeMoved( - partitionMovements, - partition, - consumer, - newConsumer); - - processPartitionMovement( - rk, - partitionMovements, - partitionToBeMoved, - newConsumer, - currentAssignment, - sortedCurrentSubscriptions, - currentPartitionConsumer); + partitionToBeMoved = PartitionMovements_getTheActualPartitionToBeMoved( + partitionMovements, partition, consumer, newConsumer); + + processPartitionMovement(rk, partitionMovements, partitionToBeMoved, + newConsumer, currentAssignment, + sortedCurrentSubscriptions, + currentPartitionConsumer); } /** * @brief Reassign \p partition to an eligible new consumer. */ -static void reassignPartition ( - rd_kafka_t *rk, - PartitionMovements_t *partitionMovements, - const rd_kafka_topic_partition_t *partition, - map_str_toppar_list_t *currentAssignment, - rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, - map_toppar_str_t *currentPartitionConsumer, - map_str_toppar_list_t *consumer2AllPotentialPartitions) { +static void +reassignPartition(rd_kafka_t *rk, + PartitionMovements_t *partitionMovements, + const rd_kafka_topic_partition_t *partition, + map_str_toppar_list_t *currentAssignment, + rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_toppar_str_t *currentPartitionConsumer, + map_str_toppar_list_t *consumer2AllPotentialPartitions) { const rd_map_elem_t *elem; int i; @@ -612,18 +584,13 @@ static void reassignPartition ( const char *newConsumer = (const char *)elem->key; if (rd_kafka_topic_partition_list_find( - RD_MAP_GET(consumer2AllPotentialPartitions, - newConsumer), - partition->topic, - partition->partition)) { + RD_MAP_GET(consumer2AllPotentialPartitions, + newConsumer), + partition->topic, partition->partition)) { reassignPartitionToConsumer( - rk, - partitionMovements, - partition, - currentAssignment, - sortedCurrentSubscriptions, - currentPartitionConsumer, - newConsumer); + rk, partitionMovements, partition, + currentAssignment, sortedCurrentSubscriptions, + currentPartitionConsumer, newConsumer); return; } @@ -655,29 +622,30 @@ static void reassignPartition ( * @returns true if the given assignment is balanced; false otherwise */ static rd_bool_t -isBalanced (rd_kafka_t *rk, - map_str_toppar_list_t *currentAssignment, - const rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, - map_str_toppar_list_t *consumer2AllPotentialPartitions, - map_toppar_list_t *partition2AllPotentialConsumers) { - - int minimum = - ((const rd_kafka_topic_partition_list_t *) - ((const rd_map_elem_t *)rd_list_first( - sortedCurrentSubscriptions))->value)->cnt; - int maximum = - ((const rd_kafka_topic_partition_list_t *) - ((const rd_map_elem_t *)rd_list_last( - sortedCurrentSubscriptions))->value)->cnt; +isBalanced(rd_kafka_t *rk, + map_str_toppar_list_t *currentAssignment, + const rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + map_toppar_list_t *partition2AllPotentialConsumers) { + + int minimum = ((const rd_kafka_topic_partition_list_t + *)((const rd_map_elem_t *)rd_list_first( + sortedCurrentSubscriptions)) + ->value) + ->cnt; + int maximum = ((const rd_kafka_topic_partition_list_t + *)((const rd_map_elem_t *)rd_list_last( + sortedCurrentSubscriptions)) + ->value) + ->cnt; /* Mapping from partitions to the consumer assigned to them */ // FIXME: don't create prior to min/max check below */ map_toppar_str_t allPartitions = RD_MAP_INITIALIZER( - RD_MAP_CNT(partition2AllPotentialConsumers), - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - NULL /* references currentAssignment */, - NULL /* references currentAssignment */); + RD_MAP_CNT(partition2AllPotentialConsumers), + rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + NULL /* references currentAssignment */, + NULL /* references currentAssignment */); /* Iterators */ const rd_kafka_topic_partition_list_t *partitions; @@ -700,18 +668,19 @@ isBalanced (rd_kafka_t *rk, /* Create a mapping from partitions to the consumer assigned to them */ RD_MAP_FOREACH(consumer, partitions, currentAssignment) { - for (i = 0 ; i < partitions->cnt ; i++) { + for (i = 0; i < partitions->cnt; i++) { const rd_kafka_topic_partition_t *partition = - &partitions->elems[i]; + &partitions->elems[i]; const char *existing; if ((existing = RD_MAP_GET(&allPartitions, partition))) rd_kafka_log(rk, LOG_ERR, "STICKY", - "Sticky assignor: %s [%"PRId32"] " + "Sticky assignor: %s [%" PRId32 + "] " "is assigned to more than one " "consumer (%s and %s)", partition->topic, - partition->partition, - existing, consumer); + partition->partition, existing, + consumer); RD_MAP_SET(&allPartitions, partition, consumer); } @@ -730,11 +699,11 @@ isBalanced (rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *potentialTopicPartitions; const rd_kafka_topic_partition_list_t *consumerPartitions; - consumerPartitions = (const rd_kafka_topic_partition_list_t *) - elem->value; + consumerPartitions = + (const rd_kafka_topic_partition_list_t *)elem->value; potentialTopicPartitions = - RD_MAP_GET(consumer2AllPotentialPartitions, consumer); + RD_MAP_GET(consumer2AllPotentialPartitions, consumer); /* Skip if this consumer already has all the topic partitions * it can get. */ @@ -743,39 +712,36 @@ isBalanced (rd_kafka_t *rk, /* Otherwise make sure it can't get any more partitions */ - for (i = 0 ; i < potentialTopicPartitions->cnt ; i++) { + for (i = 0; i < potentialTopicPartitions->cnt; i++) { const rd_kafka_topic_partition_t *partition = - &potentialTopicPartitions->elems[i]; + &potentialTopicPartitions->elems[i]; const char *otherConsumer; int otherConsumerPartitionCount; if (rd_kafka_topic_partition_list_find( - consumerPartitions, - partition->topic, - partition->partition)) + consumerPartitions, partition->topic, + partition->partition)) continue; otherConsumer = RD_MAP_GET(&allPartitions, partition); - otherConsumerPartitionCount = RD_MAP_GET( - currentAssignment, otherConsumer)->cnt; + otherConsumerPartitionCount = + RD_MAP_GET(currentAssignment, otherConsumer)->cnt; if (consumerPartitions->cnt < otherConsumerPartitionCount) { - rd_kafka_dbg(rk, ASSIGNOR, "STICKY", - "%s [%"PRId32"] can be moved from " - "consumer %s (%d partition(s)) to " - "consumer %s (%d partition(s)) " - "for a more balanced assignment", - partition->topic, - partition->partition, - otherConsumer, - otherConsumerPartitionCount, - consumer, - consumerPartitions->cnt); + rd_kafka_dbg( + rk, ASSIGNOR, "STICKY", + "%s [%" PRId32 + "] can be moved from " + "consumer %s (%d partition(s)) to " + "consumer %s (%d partition(s)) " + "for a more balanced assignment", + partition->topic, partition->partition, + otherConsumer, otherConsumerPartitionCount, + consumer, consumerPartitions->cnt); RD_MAP_DESTROY(&allPartitions); return rd_false; } - } } @@ -790,16 +756,15 @@ isBalanced (rd_kafka_t *rk, * @returns true if reassignment was performed. */ static rd_bool_t -performReassignments ( - rd_kafka_t *rk, - PartitionMovements_t *partitionMovements, - rd_kafka_topic_partition_list_t *reassignablePartitions, - map_str_toppar_list_t *currentAssignment, - map_toppar_cgpair_t *prevAssignment, - rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, - map_str_toppar_list_t *consumer2AllPotentialPartitions, - map_toppar_list_t *partition2AllPotentialConsumers, - map_toppar_str_t *currentPartitionConsumer) { +performReassignments(rd_kafka_t *rk, + PartitionMovements_t *partitionMovements, + rd_kafka_topic_partition_list_t *reassignablePartitions, + map_str_toppar_list_t *currentAssignment, + map_toppar_cgpair_t *prevAssignment, + rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + map_toppar_list_t *partition2AllPotentialConsumers, + map_toppar_str_t *currentPartitionConsumer) { rd_bool_t reassignmentPerformed = rd_false; rd_bool_t modified, saveIsBalanced = rd_false; int iterations = 0; @@ -817,18 +782,16 @@ performReassignments ( * partition with least potential consumers and if needed) * until the full list is processed or a balance is achieved. */ - for (i = 0 ; i < reassignablePartitions->cnt && - !isBalanced(rk, - currentAssignment, - sortedCurrentSubscriptions, - consumer2AllPotentialPartitions, - partition2AllPotentialConsumers) ; + for (i = 0; i < reassignablePartitions->cnt && + !isBalanced(rk, currentAssignment, + sortedCurrentSubscriptions, + consumer2AllPotentialPartitions, + partition2AllPotentialConsumers); i++) { const rd_kafka_topic_partition_t *partition = - &reassignablePartitions->elems[i]; - const rd_list_t *consumers = - RD_MAP_GET(partition2AllPotentialConsumers, - partition); + &reassignablePartitions->elems[i]; + const rd_list_t *consumers = RD_MAP_GET( + partition2AllPotentialConsumers, partition); const char *consumer, *otherConsumer; const ConsumerGenerationPair_t *prevcgp; const rd_kafka_topic_partition_list_t *currAssignment; @@ -837,36 +800,34 @@ performReassignments ( /* FIXME: Is this a local error/bug? If so, assert */ if (rd_list_cnt(consumers) <= 1) rd_kafka_log( - rk, LOG_ERR, "STICKY", - "Sticky assignor: expected more than " - "one potential consumer for partition " - "%s [%"PRId32"]", - partition->topic, - partition->partition); + rk, LOG_ERR, "STICKY", + "Sticky assignor: expected more than " + "one potential consumer for partition " + "%s [%" PRId32 "]", + partition->topic, partition->partition); /* The partition must have a current consumer */ - consumer = RD_MAP_GET(currentPartitionConsumer, - partition); + consumer = + RD_MAP_GET(currentPartitionConsumer, partition); rd_assert(consumer); - currAssignment = RD_MAP_GET(currentAssignment, - consumer); + currAssignment = + RD_MAP_GET(currentAssignment, consumer); prevcgp = RD_MAP_GET(prevAssignment, partition); if (prevcgp && currAssignment->cnt > - RD_MAP_GET(currentAssignment, - prevcgp->consumer)->cnt + 1) { + RD_MAP_GET(currentAssignment, prevcgp->consumer) + ->cnt + + 1) { reassignPartitionToConsumer( - rk, - partitionMovements, - partition, - currentAssignment, - sortedCurrentSubscriptions, - currentPartitionConsumer, - prevcgp->consumer); + rk, partitionMovements, partition, + currentAssignment, + sortedCurrentSubscriptions, + currentPartitionConsumer, + prevcgp->consumer); reassignmentPerformed = rd_true; - modified = rd_true; + modified = rd_true; continue; } @@ -877,21 +838,20 @@ performReassignments ( continue; if (currAssignment->cnt <= - RD_MAP_GET(currentAssignment, - otherConsumer)->cnt + 1) + RD_MAP_GET(currentAssignment, otherConsumer) + ->cnt + + 1) continue; reassignPartition( - rk, - partitionMovements, - partition, - currentAssignment, - sortedCurrentSubscriptions, - currentPartitionConsumer, - consumer2AllPotentialPartitions); + rk, partitionMovements, partition, + currentAssignment, + sortedCurrentSubscriptions, + currentPartitionConsumer, + consumer2AllPotentialPartitions); reassignmentPerformed = rd_true; - modified = rd_true; + modified = rd_true; break; } } @@ -904,8 +864,8 @@ performReassignments ( rd_kafka_dbg(rk, ASSIGNOR, "STICKY", "Reassignment %sperformed after %d iteration(s) of %d " "reassignable partition(s)%s", - reassignmentPerformed ? "" : "not ", - iterations, reassignablePartitions->cnt, + reassignmentPerformed ? "" : "not ", iterations, + reassignablePartitions->cnt, saveIsBalanced ? ": assignment is balanced" : ""); return reassignmentPerformed; @@ -922,11 +882,11 @@ performReassignments ( * Lower balance score indicates a more balanced assignment. * FIXME: should be called imbalance score then? */ -static int getBalanceScore (map_str_toppar_list_t *assignment) { +static int getBalanceScore(map_str_toppar_list_t *assignment) { const char *consumer; const rd_kafka_topic_partition_list_t *partitions; int *sizes; - int cnt = 0; + int cnt = 0; int score = 0; int i, next; @@ -934,13 +894,13 @@ static int getBalanceScore (map_str_toppar_list_t *assignment) { if (RD_MAP_CNT(assignment) < 2) return 0; - sizes = rd_malloc(sizeof(*sizes) * RD_MAP_CNT(assignment)); + sizes = rd_malloc(sizeof(*sizes) * RD_MAP_CNT(assignment)); RD_MAP_FOREACH(consumer, partitions, assignment) - sizes[cnt++] = partitions->cnt; + sizes[cnt++] = partitions->cnt; - for (next = 0 ; next < cnt ; next++) - for (i = next+1 ; i < cnt ; i++) + for (next = 0; next < cnt; next++) + for (i = next + 1; i < cnt; i++) score += abs(sizes[next] - sizes[i]); rd_free(sizes); @@ -956,28 +916,28 @@ static int getBalanceScore (map_str_toppar_list_t *assignment) { /** * @brief Balance the current assignment using the data structures * created in assign_cb(). */ -static void -balance (rd_kafka_t *rk, - PartitionMovements_t *partitionMovements, - map_str_toppar_list_t *currentAssignment, - map_toppar_cgpair_t *prevAssignment, - rd_kafka_topic_partition_list_t *sortedPartitions, - rd_kafka_topic_partition_list_t *unassignedPartitions, - rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, - map_str_toppar_list_t *consumer2AllPotentialPartitions, - map_toppar_list_t *partition2AllPotentialConsumers, - map_toppar_str_t *currentPartitionConsumer, - rd_bool_t revocationRequired) { +static void balance(rd_kafka_t *rk, + PartitionMovements_t *partitionMovements, + map_str_toppar_list_t *currentAssignment, + map_toppar_cgpair_t *prevAssignment, + rd_kafka_topic_partition_list_t *sortedPartitions, + rd_kafka_topic_partition_list_t *unassignedPartitions, + rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + map_toppar_list_t *partition2AllPotentialConsumers, + map_toppar_str_t *currentPartitionConsumer, + rd_bool_t revocationRequired) { /* If the consumer with most assignments (thus the last element * in the ascendingly ordered sortedCurrentSubscriptions list) has * zero partitions assigned it means there is no current assignment * for any consumer and the group is thus initializing for the first * time. */ - rd_bool_t initializing = - ((const rd_kafka_topic_partition_list_t *) - ((const rd_map_elem_t *)rd_list_last( - sortedCurrentSubscriptions))->value)->cnt == 0; + rd_bool_t initializing = ((const rd_kafka_topic_partition_list_t + *)((const rd_map_elem_t *)rd_list_last( + sortedCurrentSubscriptions)) + ->value) + ->cnt == 0; rd_bool_t reassignmentPerformed = rd_false; map_str_toppar_list_t fixedAssignments = @@ -989,18 +949,15 @@ balance (rd_kafka_t *rk, * to currentAssignment at the end of * this function. */); - map_str_toppar_list_t preBalanceAssignment = - RD_MAP_INITIALIZER(RD_MAP_CNT(currentAssignment), - rd_map_str_cmp, - rd_map_str_hash, - NULL /* references currentAssignment */, - rd_kafka_topic_partition_list_destroy_free); - map_toppar_str_t preBalancePartitionConsumers = - RD_MAP_INITIALIZER(RD_MAP_CNT(partition2AllPotentialConsumers), - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - rd_kafka_topic_partition_destroy_free, - NULL /* refs currentPartitionConsumer */); + map_str_toppar_list_t preBalanceAssignment = RD_MAP_INITIALIZER( + RD_MAP_CNT(currentAssignment), rd_map_str_cmp, rd_map_str_hash, + NULL /* references currentAssignment */, + rd_kafka_topic_partition_list_destroy_free); + map_toppar_str_t preBalancePartitionConsumers = RD_MAP_INITIALIZER( + RD_MAP_CNT(partition2AllPotentialConsumers), + rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, + NULL /* refs currentPartitionConsumer */); int newScore, oldScore; /* Iterator variables */ const rd_kafka_topic_partition_t *partition; @@ -1009,7 +966,7 @@ balance (rd_kafka_t *rk, int i; /* Assign all unassigned partitions */ - for (i = 0 ; i < unassignedPartitions->cnt ; i++) { + for (i = 0; i < unassignedPartitions->cnt; i++) { partition = &unassignedPartitions->elems[i]; /* Skip if there is no potential consumer for the partition. @@ -1020,10 +977,9 @@ balance (rd_kafka_t *rk, continue; } - assignPartition(partition, sortedCurrentSubscriptions, - currentAssignment, - consumer2AllPotentialPartitions, - currentPartitionConsumer); + assignPartition( + partition, sortedCurrentSubscriptions, currentAssignment, + consumer2AllPotentialPartitions, currentPartitionConsumer); } @@ -1031,12 +987,11 @@ balance (rd_kafka_t *rk, * actually be reassigned. */ RD_MAP_FOREACH(partition, ignore, partition2AllPotentialConsumers) { if (partitionCanParticipateInReassignment( - partition, partition2AllPotentialConsumers)) + partition, partition2AllPotentialConsumers)) continue; - rd_kafka_topic_partition_list_del(sortedPartitions, - partition->topic, - partition->partition); + rd_kafka_topic_partition_list_del( + sortedPartitions, partition->topic, partition->partition); rd_kafka_topic_partition_list_del(unassignedPartitions, partition->topic, partition->partition); @@ -1053,11 +1008,9 @@ balance (rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions; if (consumerCanParticipateInReassignment( - rk, - consumer, - currentAssignment, - consumer2AllPotentialPartitions, - partition2AllPotentialConsumers)) + rk, consumer, currentAssignment, + consumer2AllPotentialPartitions, + partition2AllPotentialConsumers)) continue; rd_list_remove_elem(sortedCurrentSubscriptions, i); @@ -1065,7 +1018,7 @@ balance (rd_kafka_t *rk, * to rewind the iterator. */ partitions = rd_kafka_topic_partition_list_copy( - RD_MAP_GET(currentAssignment, consumer)); + RD_MAP_GET(currentAssignment, consumer)); RD_MAP_DELETE(currentAssignment, consumer); RD_MAP_SET(&fixedAssignments, consumer, partitions); @@ -1078,10 +1031,9 @@ balance (rd_kafka_t *rk, "(initializing=%s, revocationRequired=%s, " "%d fixed assignments)", (int)RD_MAP_CNT(consumer2AllPotentialPartitions), - sortedPartitions->cnt, - unassignedPartitions->cnt, - initializing ? "true":"false", - revocationRequired ? "true":"false", + sortedPartitions->cnt, unassignedPartitions->cnt, + initializing ? "true" : "false", + revocationRequired ? "true" : "false", (int)RD_MAP_CNT(&fixedAssignments)); /* Create a deep copy of the current assignment so we can revert to it @@ -1098,26 +1050,17 @@ balance (rd_kafka_t *rk, * changes, first try to balance by only moving newly added partitions. */ if (!revocationRequired && unassignedPartitions->cnt > 0) - performReassignments(rk, - partitionMovements, - unassignedPartitions, - currentAssignment, - prevAssignment, - sortedCurrentSubscriptions, - consumer2AllPotentialPartitions, - partition2AllPotentialConsumers, - currentPartitionConsumer); - - reassignmentPerformed = - performReassignments(rk, - partitionMovements, - sortedPartitions, - currentAssignment, - prevAssignment, - sortedCurrentSubscriptions, - consumer2AllPotentialPartitions, - partition2AllPotentialConsumers, - currentPartitionConsumer); + performReassignments( + rk, partitionMovements, unassignedPartitions, + currentAssignment, prevAssignment, + sortedCurrentSubscriptions, consumer2AllPotentialPartitions, + partition2AllPotentialConsumers, currentPartitionConsumer); + + reassignmentPerformed = performReassignments( + rk, partitionMovements, sortedPartitions, currentAssignment, + prevAssignment, sortedCurrentSubscriptions, + consumer2AllPotentialPartitions, partition2AllPotentialConsumers, + currentPartitionConsumer); /* If we are not preserving existing assignments and we have made * changes to the current assignment make sure we are getting a more @@ -1125,7 +1068,7 @@ balance (rd_kafka_t *rk, if (!initializing && reassignmentPerformed && (newScore = getBalanceScore(currentAssignment)) >= - (oldScore = getBalanceScore(&preBalanceAssignment))) { + (oldScore = getBalanceScore(&preBalanceAssignment))) { rd_kafka_dbg(rk, ASSIGNOR, "STICKY", "Reassignment performed but keeping previous " @@ -1136,9 +1079,10 @@ balance (rd_kafka_t *rk, newScore, (int)RD_MAP_CNT(currentAssignment), oldScore, (int)RD_MAP_CNT(&preBalanceAssignment)); - RD_MAP_COPY(currentAssignment, &preBalanceAssignment, - NULL /* just reference the key */, - (rd_map_copy_t*)rd_kafka_topic_partition_list_copy); + RD_MAP_COPY( + currentAssignment, &preBalanceAssignment, + NULL /* just reference the key */, + (rd_map_copy_t *)rd_kafka_topic_partition_list_copy); RD_MAP_CLEAR(currentPartitionConsumer); RD_MAP_COPY(currentPartitionConsumer, @@ -1157,7 +1101,7 @@ balance (rd_kafka_t *rk, RD_MAP_FOREACH_ELEM(elem, &fixedAssignments.rmap) { const char *consumer = elem->key; rd_kafka_topic_partition_list_t *partitions = - (rd_kafka_topic_partition_list_t *)elem->value; + (rd_kafka_topic_partition_list_t *)elem->value; RD_MAP_SET(currentAssignment, consumer, partitions); @@ -1174,28 +1118,20 @@ balance (rd_kafka_t *rk, - - - - - - - /** * @brief Populate subscriptions, current and previous assignments based on the * \p members assignments. */ -static void -prepopulateCurrentAssignments ( - rd_kafka_t *rk, - rd_kafka_group_member_t *members, - size_t member_cnt, - map_str_toppar_list_t *subscriptions, - map_str_toppar_list_t *currentAssignment, - map_toppar_cgpair_t *prevAssignment, - map_toppar_str_t *currentPartitionConsumer, - map_str_toppar_list_t *consumer2AllPotentialPartitions, - size_t estimated_partition_cnt) { +static void prepopulateCurrentAssignments( + rd_kafka_t *rk, + rd_kafka_group_member_t *members, + size_t member_cnt, + map_str_toppar_list_t *subscriptions, + map_str_toppar_list_t *currentAssignment, + map_toppar_cgpair_t *prevAssignment, + map_toppar_str_t *currentPartitionConsumer, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + size_t estimated_partition_cnt) { /* We need to process subscriptions' user data with each consumer's * reported generation in mind. @@ -1205,15 +1141,12 @@ prepopulateCurrentAssignments ( /* For each partition we create a sorted list (by generation) of * its consumers. */ - RD_MAP_LOCAL_INITIALIZER(sortedPartitionConsumersByGeneration, - member_cnt * 10 /* FIXME */, - const rd_kafka_topic_partition_t *, - /* List of ConsumerGenerationPair_t */ - rd_list_t *, - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - NULL, - rd_list_destroy_free); + RD_MAP_LOCAL_INITIALIZER( + sortedPartitionConsumersByGeneration, member_cnt * 10 /* FIXME */, + const rd_kafka_topic_partition_t *, + /* List of ConsumerGenerationPair_t */ + rd_list_t *, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, NULL, rd_list_destroy_free); const rd_kafka_topic_partition_t *partition; rd_list_t *consumers; int i; @@ -1222,7 +1155,7 @@ prepopulateCurrentAssignments ( * add the member and its generation to * sortedPartitionConsumersByGeneration (which is sorted afterwards) * indexed by the partition. */ - for (i = 0 ; i < (int)member_cnt ; i++) { + for (i = 0; i < (int)member_cnt; i++) { rd_kafka_group_member_t *consumer = &members[i]; int j; @@ -1235,42 +1168,40 @@ prepopulateCurrentAssignments ( RD_MAP_SET(consumer2AllPotentialPartitions, consumer->rkgm_member_id->str, rd_kafka_topic_partition_list_new( - (int)estimated_partition_cnt)); + (int)estimated_partition_cnt)); if (!consumer->rkgm_owned) continue; - for (j = 0 ; j < (int)consumer->rkgm_owned->cnt ; j++) { + for (j = 0; j < (int)consumer->rkgm_owned->cnt; j++) { partition = &consumer->rkgm_owned->elems[j]; consumers = RD_MAP_GET_OR_SET( - &sortedPartitionConsumersByGeneration, - partition, - rd_list_new(10, - ConsumerGenerationPair_destroy)); + &sortedPartitionConsumersByGeneration, partition, + rd_list_new(10, ConsumerGenerationPair_destroy)); if (consumer->rkgm_generation != -1 && rd_list_find( - consumers, &consumer->rkgm_generation, - ConsumerGenerationPair_cmp_generation)) { - rd_kafka_log(rk, LOG_WARNING, "STICKY", - "Sticky assignor: " - "%s [%"PRId32"] is assigned to " - "multiple consumers with same " - "generation %d: " - "skipping member %.*s", - partition->topic, - partition->partition, - consumer->rkgm_generation, - RD_KAFKAP_STR_PR(consumer-> - rkgm_member_id)); + consumers, &consumer->rkgm_generation, + ConsumerGenerationPair_cmp_generation)) { + rd_kafka_log( + rk, LOG_WARNING, "STICKY", + "Sticky assignor: " + "%s [%" PRId32 + "] is assigned to " + "multiple consumers with same " + "generation %d: " + "skipping member %.*s", + partition->topic, partition->partition, + consumer->rkgm_generation, + RD_KAFKAP_STR_PR(consumer->rkgm_member_id)); continue; } rd_list_add(consumers, ConsumerGenerationPair_new( - consumer->rkgm_member_id->str, - consumer->rkgm_generation)); + consumer->rkgm_member_id->str, + consumer->rkgm_generation)); RD_MAP_SET(currentPartitionConsumer, rd_kafka_topic_partition_copy(partition), @@ -1293,21 +1224,20 @@ prepopulateCurrentAssignments ( /* Add current (highest generation) consumer * to currentAssignment. */ - current = rd_list_elem(consumers, 0); + current = rd_list_elem(consumers, 0); partitions = RD_MAP_GET(currentAssignment, current->consumer); - rd_kafka_topic_partition_list_add(partitions, - partition->topic, + rd_kafka_topic_partition_list_add(partitions, partition->topic, partition->partition); /* Add previous (next highest generation) consumer, if any, * to prevAssignment. */ previous = rd_list_elem(consumers, 1); if (previous) - RD_MAP_SET(prevAssignment, - rd_kafka_topic_partition_copy(partition), - ConsumerGenerationPair_new( - previous->consumer, - previous->generation)); + RD_MAP_SET( + prevAssignment, + rd_kafka_topic_partition_copy(partition), + ConsumerGenerationPair_new(previous->consumer, + previous->generation)); } RD_MAP_DESTROY(&sortedPartitionConsumersByGeneration); @@ -1317,11 +1247,11 @@ prepopulateCurrentAssignments ( /** * @brief Populate maps for potential partitions per consumer and vice-versa. */ -static void populatePotentialMaps ( - const rd_kafka_assignor_topic_t *atopic, - map_toppar_list_t *partition2AllPotentialConsumers, - map_str_toppar_list_t *consumer2AllPotentialPartitions, - size_t estimated_partition_cnt) { +static void +populatePotentialMaps(const rd_kafka_assignor_topic_t *atopic, + map_toppar_list_t *partition2AllPotentialConsumers, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + size_t estimated_partition_cnt) { int i; const rd_kafka_group_member_t *rkgm; @@ -1336,32 +1266,31 @@ static void populatePotentialMaps ( RD_LIST_FOREACH(rkgm, &atopic->members, i) { const char *consumer = rkgm->rkgm_member_id->str; rd_kafka_topic_partition_list_t *partitions = - RD_MAP_GET(consumer2AllPotentialPartitions, consumer); + RD_MAP_GET(consumer2AllPotentialPartitions, consumer); int j; rd_assert(partitions != NULL); - for (j = 0 ; j < atopic->metadata->partition_cnt ; j++) { + for (j = 0; j < atopic->metadata->partition_cnt; j++) { rd_kafka_topic_partition_t *partition; rd_list_t *consumers; /* consumer2AllPotentialPartitions[consumer] += part */ partition = rd_kafka_topic_partition_list_add( - partitions, - atopic->metadata->topic, - atopic->metadata->partitions[j].id); + partitions, atopic->metadata->topic, + atopic->metadata->partitions[j].id); /* partition2AllPotentialConsumers[part] += consumer */ - if (!(consumers = RD_MAP_GET( - partition2AllPotentialConsumers, - partition))) { + if (!(consumers = + RD_MAP_GET(partition2AllPotentialConsumers, + partition))) { consumers = rd_list_new( - RD_MAX(2, - (int)estimated_partition_cnt/2), - NULL); - RD_MAP_SET(partition2AllPotentialConsumers, - rd_kafka_topic_partition_copy( - partition), consumers); + RD_MAX(2, (int)estimated_partition_cnt / 2), + NULL); + RD_MAP_SET( + partition2AllPotentialConsumers, + rd_kafka_topic_partition_copy(partition), + consumers); } rd_list_add(consumers, (void *)consumer); } @@ -1378,11 +1307,11 @@ static void populatePotentialMaps ( * are symmetrical we only check one of them. * ^ FIXME, but we do. */ -static rd_bool_t areSubscriptionsIdentical ( - map_toppar_list_t *partition2AllPotentialConsumers, - map_str_toppar_list_t *consumer2AllPotentialPartitions) { +static rd_bool_t areSubscriptionsIdentical( + map_toppar_list_t *partition2AllPotentialConsumers, + map_str_toppar_list_t *consumer2AllPotentialPartitions) { const void *ignore; - const rd_list_t *lcurr, *lprev = NULL; + const rd_list_t *lcurr, *lprev = NULL; const rd_kafka_topic_partition_list_t *pcurr, *pprev = NULL; RD_MAP_FOREACH(ignore, lcurr, partition2AllPotentialConsumers) { @@ -1393,7 +1322,7 @@ static rd_bool_t areSubscriptionsIdentical ( RD_MAP_FOREACH(ignore, pcurr, consumer2AllPotentialPartitions) { if (pprev && rd_kafka_topic_partition_list_cmp( - pcurr, pprev, rd_kafka_topic_partition_cmp)) + pcurr, pprev, rd_kafka_topic_partition_cmp)) return rd_false; pprev = pcurr; } @@ -1411,8 +1340,8 @@ static rd_bool_t areSubscriptionsIdentical ( * secondarily by the topic name. * Used by sortPartitions(). */ -static int toppar_sort_by_list_cnt (const void *_a, const void *_b, - void *opaque) { +static int +toppar_sort_by_list_cnt(const void *_a, const void *_b, void *opaque) { const rd_kafka_topic_partition_t *a = _a, *b = _b; const rd_list_t *al = a->opaque, *bl = b->opaque; int r = rd_list_cnt(al) - rd_list_cnt(bl); /* ascending order */ @@ -1430,32 +1359,30 @@ static int toppar_sort_by_list_cnt (const void *_a, const void *_b, * @returns The result of the partitions sort. */ static rd_kafka_topic_partition_list_t * -sortPartitions (rd_kafka_t *rk, - map_str_toppar_list_t *currentAssignment, - map_toppar_cgpair_t *prevAssignment, - rd_bool_t isFreshAssignment, - map_toppar_list_t *partition2AllPotentialConsumers, - map_str_toppar_list_t *consumer2AllPotentialPartitions) { +sortPartitions(rd_kafka_t *rk, + map_str_toppar_list_t *currentAssignment, + map_toppar_cgpair_t *prevAssignment, + rd_bool_t isFreshAssignment, + map_toppar_list_t *partition2AllPotentialConsumers, + map_str_toppar_list_t *consumer2AllPotentialPartitions) { rd_kafka_topic_partition_list_t *sortedPartitions; - map_str_toppar_list_t assignments = - RD_MAP_INITIALIZER(RD_MAP_CNT(currentAssignment), - rd_map_str_cmp, - rd_map_str_hash, - NULL, - rd_kafka_topic_partition_list_destroy_free); + map_str_toppar_list_t assignments = RD_MAP_INITIALIZER( + RD_MAP_CNT(currentAssignment), rd_map_str_cmp, rd_map_str_hash, + NULL, rd_kafka_topic_partition_list_destroy_free); rd_kafka_topic_partition_list_t *partitions; const rd_kafka_topic_partition_t *partition; const rd_list_t *consumers; const char *consumer; - rd_list_t sortedConsumers; /* element is the (rd_map_elem_t *) from - * assignments. */ + rd_list_t sortedConsumers; /* element is the (rd_map_elem_t *) from + * assignments. */ const rd_map_elem_t *elem; rd_bool_t wasEmpty; int i; sortedPartitions = rd_kafka_topic_partition_list_new( - (int)RD_MAP_CNT(partition2AllPotentialConsumers));; + (int)RD_MAP_CNT(partition2AllPotentialConsumers)); + ; rd_kafka_dbg(rk, ASSIGNOR, "STICKY", "Sort %d partitions in %s assignment", @@ -1469,16 +1396,14 @@ sortPartitions (rd_kafka_t *rk, * how many consumers can potentially use them. */ RD_MAP_FOREACH(partition, consumers, partition2AllPotentialConsumers) { - rd_kafka_topic_partition_list_add( - sortedPartitions, - partition->topic, - partition->partition)->opaque = - (void *)consumers; + rd_kafka_topic_partition_list_add(sortedPartitions, + partition->topic, + partition->partition) + ->opaque = (void *)consumers; } - rd_kafka_topic_partition_list_sort(sortedPartitions, - toppar_sort_by_list_cnt, - NULL); + rd_kafka_topic_partition_list_sort( + sortedPartitions, toppar_sort_by_list_cnt, NULL); RD_MAP_DESTROY(&assignments); @@ -1494,8 +1419,8 @@ sortPartitions (rd_kafka_t *rk, * partition count. The list element is the `rd_map_elem_t *` * of the assignments map. This allows us to get a sorted list * of consumers without too much data duplication. */ - rd_list_init(&sortedConsumers, - (int)RD_MAP_CNT(currentAssignment), NULL); + rd_list_init(&sortedConsumers, (int)RD_MAP_CNT(currentAssignment), + NULL); RD_MAP_FOREACH(consumer, partitions, currentAssignment) { rd_kafka_topic_partition_list_t *partitions2; @@ -1504,19 +1429,18 @@ sortPartitions (rd_kafka_t *rk, rd_kafka_topic_partition_list_sort(partitions, NULL, NULL); partitions2 = - rd_kafka_topic_partition_list_new(partitions->cnt); + rd_kafka_topic_partition_list_new(partitions->cnt); - for (i = 0 ; i < partitions->cnt ; i++) { + for (i = 0; i < partitions->cnt; i++) { partition = &partitions->elems[i]; /* Only add partitions from the current assignment * that still exist. */ if (RD_MAP_GET(partition2AllPotentialConsumers, - partition)) + partition)) rd_kafka_topic_partition_list_add( - partitions2, - partition->topic, - partition->partition); + partitions2, partition->topic, + partition->partition); } if (partitions2->cnt > 0) { @@ -1536,63 +1460,58 @@ sortPartitions (rd_kafka_t *rk, while (!rd_list_empty(&sortedConsumers)) { /* Take consumer with most partitions */ const rd_map_elem_t *elem = rd_list_last(&sortedConsumers); - const char *consumer = (const char *)elem->key; + const char *consumer = (const char *)elem->key; /* Currently assigned partitions to this consumer */ rd_kafka_topic_partition_list_t *remainingPartitions = - RD_MAP_GET(&assignments, consumer); + RD_MAP_GET(&assignments, consumer); /* Partitions that were assigned to a different consumer * last time */ rd_kafka_topic_partition_list_t *prevPartitions = - rd_kafka_topic_partition_list_new( - (int)RD_MAP_CNT(prevAssignment)); + rd_kafka_topic_partition_list_new( + (int)RD_MAP_CNT(prevAssignment)); rd_bool_t reSort = rd_true; /* From the partitions that had a different consumer before, * keep only those that are assigned to this consumer now. */ - for (i = 0 ; i < remainingPartitions->cnt ; i++) { + for (i = 0; i < remainingPartitions->cnt; i++) { partition = &remainingPartitions->elems[i]; if (RD_MAP_GET(prevAssignment, partition)) rd_kafka_topic_partition_list_add( - prevPartitions, - partition->topic, - partition->partition); + prevPartitions, partition->topic, + partition->partition); } if (prevPartitions->cnt > 0) { /* If there is a partition of this consumer that was * assigned to another consumer before, then mark * it as a good option for reassignment. */ - partition = - &prevPartitions->elems[0]; + partition = &prevPartitions->elems[0]; - rd_kafka_topic_partition_list_del( - remainingPartitions, - partition->topic, - partition->partition); + rd_kafka_topic_partition_list_del(remainingPartitions, + partition->topic, + partition->partition); - rd_kafka_topic_partition_list_add( - sortedPartitions, - partition->topic, - partition->partition); + rd_kafka_topic_partition_list_add(sortedPartitions, + partition->topic, + partition->partition); - rd_kafka_topic_partition_list_del_by_idx( - prevPartitions, 0); + rd_kafka_topic_partition_list_del_by_idx(prevPartitions, + 0); } else if (remainingPartitions->cnt > 0) { /* Otherwise mark any other one of the current * partitions as a reassignment candidate. */ partition = &remainingPartitions->elems[0]; - rd_kafka_topic_partition_list_add( - sortedPartitions, - partition->topic, - partition->partition); + rd_kafka_topic_partition_list_add(sortedPartitions, + partition->topic, + partition->partition); rd_kafka_topic_partition_list_del_by_idx( - remainingPartitions, 0); + remainingPartitions, 0); } else { rd_list_remove_elem(&sortedConsumers, - rd_list_cnt(&sortedConsumers)-1); + rd_list_cnt(&sortedConsumers) - 1); /* No need to re-sort the list (below) */ reSort = rd_false; } @@ -1613,17 +1532,16 @@ sortPartitions (rd_kafka_t *rk, wasEmpty = !sortedPartitions->cnt; RD_MAP_FOREACH(partition, consumers, partition2AllPotentialConsumers) - rd_kafka_topic_partition_list_upsert(sortedPartitions, - partition->topic, - partition->partition); + rd_kafka_topic_partition_list_upsert(sortedPartitions, partition->topic, + partition->partition); /* If all partitions were added in the foreach loop just above * it means there is no order to retain from the sorderConsumer loop * below and we sort the partitions according to their topic+partition * to get consistent results (mainly in tests). */ if (wasEmpty) - rd_kafka_topic_partition_list_sort(sortedPartitions, - NULL, NULL); + rd_kafka_topic_partition_list_sort(sortedPartitions, NULL, + NULL); rd_list_destroy(&sortedConsumers); RD_MAP_DESTROY(&assignments); @@ -1635,21 +1553,20 @@ sortPartitions (rd_kafka_t *rk, /** * @brief Transfer currentAssignment to members array. */ -static void assignToMembers (map_str_toppar_list_t *currentAssignment, - rd_kafka_group_member_t *members, - size_t member_cnt) { +static void assignToMembers(map_str_toppar_list_t *currentAssignment, + rd_kafka_group_member_t *members, + size_t member_cnt) { size_t i; - for (i = 0 ; i < member_cnt ; i++) { + for (i = 0; i < member_cnt; i++) { rd_kafka_group_member_t *rkgm = &members[i]; const rd_kafka_topic_partition_list_t *partitions = - RD_MAP_GET(currentAssignment, - rkgm->rkgm_member_id->str); + RD_MAP_GET(currentAssignment, rkgm->rkgm_member_id->str); if (rkgm->rkgm_assignment) rd_kafka_topic_partition_list_destroy( - rkgm->rkgm_assignment); - rkgm->rkgm_assignment = rd_kafka_topic_partition_list_copy( - partitions); + rkgm->rkgm_assignment); + rkgm->rkgm_assignment = + rd_kafka_topic_partition_list_copy(partitions); } } @@ -1660,43 +1577,38 @@ static void assignToMembers (map_str_toppar_list_t *currentAssignment, * This code is closely mimicking the AK Java AbstractStickyAssignor.assign(). */ rd_kafka_resp_err_t -rd_kafka_sticky_assignor_assign_cb (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas, - const char *member_id, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - size_t member_cnt, - rd_kafka_assignor_topic_t - **eligible_topics, - size_t eligible_topic_cnt, - char *errstr, size_t errstr_size, - void *opaque) { +rd_kafka_sticky_assignor_assign_cb(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + const char *member_id, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + size_t member_cnt, + rd_kafka_assignor_topic_t **eligible_topics, + size_t eligible_topic_cnt, + char *errstr, + size_t errstr_size, + void *opaque) { /* FIXME: Let the cgrp pass the actual eligible partition count */ size_t partition_cnt = member_cnt * 10; /* FIXME */ /* Map of subscriptions. This is \p member turned into a map. */ map_str_toppar_list_t subscriptions = - RD_MAP_INITIALIZER(member_cnt, - rd_map_str_cmp, - rd_map_str_hash, - NULL /* refs members.rkgm_member_id */, - NULL /* refs members.rkgm_subscription */); + RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash, + NULL /* refs members.rkgm_member_id */, + NULL /* refs members.rkgm_subscription */); /* Map member to current assignment */ map_str_toppar_list_t currentAssignment = - RD_MAP_INITIALIZER(member_cnt, - rd_map_str_cmp, - rd_map_str_hash, - NULL /* refs members.rkgm_member_id */, - rd_kafka_topic_partition_list_destroy_free); + RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash, + NULL /* refs members.rkgm_member_id */, + rd_kafka_topic_partition_list_destroy_free); /* Map partition to ConsumerGenerationPair */ map_toppar_cgpair_t prevAssignment = - RD_MAP_INITIALIZER(partition_cnt, - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - rd_kafka_topic_partition_destroy_free, - ConsumerGenerationPair_destroy); + RD_MAP_INITIALIZER(partition_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, + ConsumerGenerationPair_destroy); /* Partition assignment movements between consumers */ PartitionMovements_t partitionMovements; @@ -1707,29 +1619,24 @@ rd_kafka_sticky_assignor_assign_cb (rd_kafka_t *rk, * assigned to them. * Value is an rd_list_t* with elements referencing the \p members * \c rkgm_member_id->str. */ - map_toppar_list_t partition2AllPotentialConsumers = - RD_MAP_INITIALIZER(partition_cnt, - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - rd_kafka_topic_partition_destroy_free, - rd_list_destroy_free); + map_toppar_list_t partition2AllPotentialConsumers = RD_MAP_INITIALIZER( + partition_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, rd_list_destroy_free); /* Mapping of all consumers to all potential topic partitions that * can be assigned to them. */ map_str_toppar_list_t consumer2AllPotentialPartitions = - RD_MAP_INITIALIZER(member_cnt, - rd_map_str_cmp, - rd_map_str_hash, - NULL, - rd_kafka_topic_partition_list_destroy_free); + RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash, + NULL, + rd_kafka_topic_partition_list_destroy_free); /* Mapping of partition to current consumer. */ map_toppar_str_t currentPartitionConsumer = - RD_MAP_INITIALIZER(partition_cnt, - rd_kafka_topic_partition_cmp, - rd_kafka_topic_partition_hash, - rd_kafka_topic_partition_destroy_free, - NULL /* refs members.rkgm_member_id->str */); + RD_MAP_INITIALIZER(partition_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, + NULL /* refs members.rkgm_member_id->str */); rd_kafka_topic_partition_list_t *sortedPartitions; rd_kafka_topic_partition_list_t *unassignedPartitions; @@ -1747,39 +1654,31 @@ rd_kafka_sticky_assignor_assign_cb (rd_kafka_t *rk, PartitionMovements_init(&partitionMovements, eligible_topic_cnt); /* Prepopulate current and previous assignments */ - prepopulateCurrentAssignments(rk, - members, member_cnt, - &subscriptions, - ¤tAssignment, - &prevAssignment, - ¤tPartitionConsumer, - &consumer2AllPotentialPartitions, - partition_cnt); + prepopulateCurrentAssignments( + rk, members, member_cnt, &subscriptions, ¤tAssignment, + &prevAssignment, ¤tPartitionConsumer, + &consumer2AllPotentialPartitions, partition_cnt); isFreshAssignment = RD_MAP_IS_EMPTY(¤tAssignment); /* Populate partition2AllPotentialConsumers and * consumer2AllPotentialPartitions maps by each eligible topic. */ - for (i = 0 ; i < (int)eligible_topic_cnt ; i++) - populatePotentialMaps(eligible_topics[i], - &partition2AllPotentialConsumers, - &consumer2AllPotentialPartitions, - partition_cnt); + for (i = 0; i < (int)eligible_topic_cnt; i++) + populatePotentialMaps( + eligible_topics[i], &partition2AllPotentialConsumers, + &consumer2AllPotentialPartitions, partition_cnt); /* Sort valid partitions to minimize partition movements. */ - sortedPartitions = sortPartitions(rk, - ¤tAssignment, - &prevAssignment, - isFreshAssignment, - &partition2AllPotentialConsumers, - &consumer2AllPotentialPartitions); + sortedPartitions = sortPartitions( + rk, ¤tAssignment, &prevAssignment, isFreshAssignment, + &partition2AllPotentialConsumers, &consumer2AllPotentialPartitions); /* All partitions that need to be assigned (initially set to all * partitions but adjusted in the following loop) */ unassignedPartitions = - rd_kafka_topic_partition_list_copy(sortedPartitions); + rd_kafka_topic_partition_list_copy(sortedPartitions); RD_MAP_FOREACH(consumer, partitions, ¤tAssignment) { if (!RD_MAP_GET(&subscriptions, consumer)) { @@ -1794,9 +1693,9 @@ rd_kafka_sticky_assignor_assign_cb (rd_kafka_t *rk, consumer, partitions->cnt); - for (i = 0 ; i < partitions->cnt ; i++) { + for (i = 0; i < partitions->cnt; i++) { const rd_kafka_topic_partition_t *partition = - &partitions->elems[i]; + &partitions->elems[i]; RD_MAP_DELETE(¤tPartitionConsumer, partition); } @@ -1808,14 +1707,14 @@ rd_kafka_sticky_assignor_assign_cb (rd_kafka_t *rk, } else { /* Otherwise (the consumer still exists) */ - for (i = 0 ; i < partitions->cnt ; i++) { + for (i = 0; i < partitions->cnt; i++) { const rd_kafka_topic_partition_t *partition = - &partitions->elems[i]; + &partitions->elems[i]; rd_bool_t remove_part = rd_false; if (!RD_MAP_GET( - &partition2AllPotentialConsumers, - partition)) { + &partition2AllPotentialConsumers, + partition)) { /* If this partition of this consumer * no longer exists remove it from * currentAssignment of the consumer */ @@ -1824,17 +1723,17 @@ rd_kafka_sticky_assignor_assign_cb (rd_kafka_t *rk, partition); } else if (!rd_kafka_topic_partition_list_find( - RD_MAP_GET(&subscriptions, - consumer), - partition->topic, - RD_KAFKA_PARTITION_UA)) { + RD_MAP_GET(&subscriptions, + consumer), + partition->topic, + RD_KAFKA_PARTITION_UA)) { /* If this partition cannot remain * assigned to its current consumer * because the consumer is no longer * subscribed to its topic, remove it * from the currentAssignment of the * consumer. */ - remove_part = rd_true; + remove_part = rd_true; revocationRequired = rd_true; } else { /* Otherwise, remove the topic partition @@ -1845,14 +1744,14 @@ rd_kafka_sticky_assignor_assign_cb (rd_kafka_t *rk, * to preserve that assignment as much * as possible). */ rd_kafka_topic_partition_list_del( - unassignedPartitions, - partition->topic, - partition->partition); + unassignedPartitions, + partition->topic, + partition->partition); } if (remove_part) { rd_kafka_topic_partition_list_del_by_idx( - partitions, i); + partitions, i); i--; /* Since the current element was * removed we need the next for * loop iteration to stay at the @@ -1876,22 +1775,16 @@ rd_kafka_sticky_assignor_assign_cb (rd_kafka_t *rk, (int)RD_MAP_CNT(¤tAssignment), NULL); RD_MAP_FOREACH_ELEM(elem, ¤tAssignment.rmap) - rd_list_add(&sortedCurrentSubscriptions, (void *)elem); + rd_list_add(&sortedCurrentSubscriptions, (void *)elem); rd_list_sort(&sortedCurrentSubscriptions, sort_by_map_elem_val_toppar_list_cnt); /* Balance the available partitions across consumers */ - balance(rk, - &partitionMovements, - ¤tAssignment, - &prevAssignment, - sortedPartitions, - unassignedPartitions, - &sortedCurrentSubscriptions, - &consumer2AllPotentialPartitions, - &partition2AllPotentialConsumers, - ¤tPartitionConsumer, + balance(rk, &partitionMovements, ¤tAssignment, &prevAssignment, + sortedPartitions, unassignedPartitions, + &sortedCurrentSubscriptions, &consumer2AllPotentialPartitions, + &partition2AllPotentialConsumers, ¤tPartitionConsumer, revocationRequired); /* Transfer currentAssignment (now updated) to each member's @@ -1919,15 +1812,14 @@ rd_kafka_sticky_assignor_assign_cb (rd_kafka_t *rk, /** @brief FIXME docstring */ -static -void rd_kafka_sticky_assignor_on_assignment_cb ( - const rd_kafka_assignor_t *rkas, - void **assignor_state, - const rd_kafka_topic_partition_list_t *partitions, - const rd_kafkap_bytes_t *assignment_userdata, - const rd_kafka_consumer_group_metadata_t *rkcgm) { +static void rd_kafka_sticky_assignor_on_assignment_cb( + const rd_kafka_assignor_t *rkas, + void **assignor_state, + const rd_kafka_topic_partition_list_t *partitions, + const rd_kafkap_bytes_t *assignment_userdata, + const rd_kafka_consumer_group_metadata_t *rkcgm) { rd_kafka_sticky_assignor_state_t *state = - (rd_kafka_sticky_assignor_state_t *)*assignor_state; + (rd_kafka_sticky_assignor_state_t *)*assignor_state; if (!state) state = rd_calloc(1, sizeof(*state)); @@ -1935,18 +1827,17 @@ void rd_kafka_sticky_assignor_on_assignment_cb ( rd_kafka_topic_partition_list_destroy(state->prev_assignment); state->prev_assignment = rd_kafka_topic_partition_list_copy(partitions); - state->generation_id = rkcgm->generation_id; + state->generation_id = rkcgm->generation_id; *assignor_state = state; } /** @brief FIXME docstring */ -static rd_kafkap_bytes_t * -rd_kafka_sticky_assignor_get_metadata (const rd_kafka_assignor_t *rkas, - void *assignor_state, - const rd_list_t *topics, - const rd_kafka_topic_partition_list_t - *owned_partitions) { +static rd_kafkap_bytes_t *rd_kafka_sticky_assignor_get_metadata( + const rd_kafka_assignor_t *rkas, + void *assignor_state, + const rd_list_t *topics, + const rd_kafka_topic_partition_list_t *owned_partitions) { rd_kafka_sticky_assignor_state_t *state; rd_kafka_buf_t *rkbuf; rd_kafkap_bytes_t *metadata; @@ -1966,7 +1857,7 @@ rd_kafka_sticky_assignor_get_metadata (const rd_kafka_assignor_t *rkas, if (!assignor_state) { return rd_kafka_consumer_protocol_member_metadata_new( - topics, NULL, 0, owned_partitions); + topics, NULL, 0, owned_partitions); } state = (rd_kafka_sticky_assignor_state_t *)assignor_state; @@ -1974,24 +1865,20 @@ rd_kafka_sticky_assignor_get_metadata (const rd_kafka_assignor_t *rkas, rkbuf = rd_kafka_buf_new(1, 100); rd_assert(state->prev_assignment != NULL); rd_kafka_buf_write_topic_partitions( - rkbuf, - state->prev_assignment, - rd_false /*skip invalid offsets*/, - rd_false /*any offset*/, - rd_false /*write offsets*/, - rd_false /*write epoch*/, - rd_false /*write metadata*/); + rkbuf, state->prev_assignment, rd_false /*skip invalid offsets*/, + rd_false /*any offset*/, rd_false /*write offsets*/, + rd_false /*write epoch*/, rd_false /*write metadata*/); rd_kafka_buf_write_i32(rkbuf, state->generation_id); /* Get binary buffer and allocate a new Kafka Bytes with a copy. */ rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf); - len = rd_slice_remains(&rkbuf->rkbuf_reader); + len = rd_slice_remains(&rkbuf->rkbuf_reader); kbytes = rd_kafkap_bytes_new(NULL, (int32_t)len); rd_slice_read(&rkbuf->rkbuf_reader, (void *)kbytes->data, len); rd_kafka_buf_destroy(rkbuf); metadata = rd_kafka_consumer_protocol_member_metadata_new( - topics, kbytes->data, kbytes->len, owned_partitions); + topics, kbytes->data, kbytes->len, owned_partitions); rd_kafkap_bytes_destroy(kbytes); @@ -2002,9 +1889,9 @@ rd_kafka_sticky_assignor_get_metadata (const rd_kafka_assignor_t *rkas, /** * @brief Destroy assignor state */ -static void rd_kafka_sticky_assignor_state_destroy (void *assignor_state) { +static void rd_kafka_sticky_assignor_state_destroy(void *assignor_state) { rd_kafka_sticky_assignor_state_t *state = - (rd_kafka_sticky_assignor_state_t *)assignor_state; + (rd_kafka_sticky_assignor_state_t *)assignor_state; rd_assert(assignor_state); @@ -2033,12 +1920,12 @@ static void rd_kafka_sticky_assignor_state_destroy (void *assignor_state) { * its new assignment and including it in the next rebalance as its * owned-partitions. */ -static void ut_set_owned (rd_kafka_group_member_t *rkgm) { +static void ut_set_owned(rd_kafka_group_member_t *rkgm) { if (rkgm->rkgm_owned) rd_kafka_topic_partition_list_destroy(rkgm->rkgm_owned); rkgm->rkgm_owned = - rd_kafka_topic_partition_list_copy(rkgm->rkgm_assignment); + rd_kafka_topic_partition_list_copy(rkgm->rkgm_assignment); } @@ -2048,51 +1935,50 @@ static void ut_set_owned (rd_kafka_group_member_t *rkgm) { * @remark Also updates the members owned partitions to the assignment. */ -static int verifyValidityAndBalance0 (const char *func, int line, - rd_kafka_group_member_t *members, - size_t member_cnt, - const rd_kafka_metadata_t *metadata) { +static int verifyValidityAndBalance0(const char *func, + int line, + rd_kafka_group_member_t *members, + size_t member_cnt, + const rd_kafka_metadata_t *metadata) { int fails = 0; int i; rd_bool_t verbose = rd_false; /* Enable for troubleshooting */ - RD_UT_SAY("%s:%d: verifying assignment for %d member(s):", - func, line, (int)member_cnt); + RD_UT_SAY("%s:%d: verifying assignment for %d member(s):", func, line, + (int)member_cnt); - for (i = 0 ; i < (int)member_cnt ; i++) { + for (i = 0; i < (int)member_cnt; i++) { const char *consumer = members[i].rkgm_member_id->str; const rd_kafka_topic_partition_list_t *partitions = - members[i].rkgm_assignment; + members[i].rkgm_assignment; int p, j; if (verbose) - RD_UT_SAY("%s:%d: " - "consumer \"%s\", %d subscribed topic(s), " - "%d assigned partition(s):", - func, line, consumer, - members[i].rkgm_subscription->cnt, - partitions->cnt); - - for (p = 0 ; p < partitions->cnt ; p++) { + RD_UT_SAY( + "%s:%d: " + "consumer \"%s\", %d subscribed topic(s), " + "%d assigned partition(s):", + func, line, consumer, + members[i].rkgm_subscription->cnt, partitions->cnt); + + for (p = 0; p < partitions->cnt; p++) { const rd_kafka_topic_partition_t *partition = - &partitions->elems[p]; + &partitions->elems[p]; if (verbose) - RD_UT_SAY("%s:%d: %s [%"PRId32"]", - func, line, - partition->topic, + RD_UT_SAY("%s:%d: %s [%" PRId32 "]", func, + line, partition->topic, partition->partition); if (!rd_kafka_topic_partition_list_find( - members[i].rkgm_subscription, - partition->topic, - RD_KAFKA_PARTITION_UA)) { - RD_UT_WARN("%s [%"PRId32"] is assigned to " + members[i].rkgm_subscription, partition->topic, + RD_KAFKA_PARTITION_UA)) { + RD_UT_WARN("%s [%" PRId32 + "] is assigned to " "%s but it is not subscribed to " "that topic", partition->topic, - partition->partition, - consumer); + partition->partition, consumer); fails++; } } @@ -2104,28 +1990,27 @@ static int verifyValidityAndBalance0 (const char *func, int line, if (i == (int)member_cnt - 1) continue; - for (j = i+1 ; j < (int)member_cnt ; j++) { + for (j = i + 1; j < (int)member_cnt; j++) { const char *otherConsumer = - members[j].rkgm_member_id->str; + members[j].rkgm_member_id->str; const rd_kafka_topic_partition_list_t *otherPartitions = - members[j].rkgm_assignment; - rd_bool_t balanced = abs(partitions->cnt - - otherPartitions->cnt) <= 1; + members[j].rkgm_assignment; + rd_bool_t balanced = + abs(partitions->cnt - otherPartitions->cnt) <= 1; - for (p = 0 ; p < partitions->cnt ; p++) { + for (p = 0; p < partitions->cnt; p++) { const rd_kafka_topic_partition_t *partition = - &partitions->elems[p]; + &partitions->elems[p]; if (rd_kafka_topic_partition_list_find( - otherPartitions, - partition->topic, - partition->partition)) { + otherPartitions, partition->topic, + partition->partition)) { RD_UT_WARN( - "Consumer %s and %s are both " - "assigned %s [%"PRId32"]", - consumer, otherConsumer, - partition->topic, - partition->partition); + "Consumer %s and %s are both " + "assigned %s [%" PRId32 "]", + consumer, otherConsumer, + partition->topic, + partition->partition); fails++; } @@ -2136,35 +2021,34 @@ static int verifyValidityAndBalance0 (const char *func, int line, * properly balance the partitions. */ if (!balanced && rd_kafka_topic_partition_list_find_topic( - otherPartitions, - partition->topic)) { + otherPartitions, partition->topic)) { RD_UT_WARN( - "Some %s partition(s) can be " - "moved from " - "%s (%d partition(s)) to " - "%s (%d partition(s)) to " - "achieve a better balance", - partition->topic, - consumer, partitions->cnt, - otherConsumer, - otherPartitions->cnt); + "Some %s partition(s) can be " + "moved from " + "%s (%d partition(s)) to " + "%s (%d partition(s)) to " + "achieve a better balance", + partition->topic, consumer, + partitions->cnt, otherConsumer, + otherPartitions->cnt); fails++; } } } } - RD_UT_ASSERT(!fails, "%s:%d: See %d previous errors", - func, line, fails); + RD_UT_ASSERT(!fails, "%s:%d: See %d previous errors", func, line, + fails); return 0; } -#define verifyValidityAndBalance(members,member_cnt,metadata) do { \ - if (verifyValidityAndBalance0(__FUNCTION__,__LINE__, \ - members,member_cnt,metadata)) \ - return 1; \ +#define verifyValidityAndBalance(members, member_cnt, metadata) \ + do { \ + if (verifyValidityAndBalance0(__FUNCTION__, __LINE__, members, \ + member_cnt, metadata)) \ + return 1; \ } while (0) @@ -2173,14 +2057,15 @@ static int verifyValidityAndBalance0 (const char *func, int line, * * Only works for symmetrical subscriptions. */ -static int isFullyBalanced0 (const char *function, int line, - const rd_kafka_group_member_t *members, - size_t member_cnt) { +static int isFullyBalanced0(const char *function, + int line, + const rd_kafka_group_member_t *members, + size_t member_cnt) { int min_assignment = INT_MAX; int max_assignment = -1; size_t i; - for (i = 0 ; i < member_cnt ; i++) { + for (i = 0; i < member_cnt; i++) { int size = members[i].rkgm_assignment->cnt; if (size < min_assignment) min_assignment = size; @@ -2189,31 +2074,31 @@ static int isFullyBalanced0 (const char *function, int line, } RD_UT_ASSERT(max_assignment - min_assignment <= 1, - "%s:%d: Assignment not balanced: min %d, max %d", - function, line, min_assignment, max_assignment); + "%s:%d: Assignment not balanced: min %d, max %d", function, + line, min_assignment, max_assignment); return 0; } -#define isFullyBalanced(members,member_cnt) do { \ - if (isFullyBalanced0(__FUNCTION__,__LINE__,members,member_cnt)) \ - return 1; \ +#define isFullyBalanced(members, member_cnt) \ + do { \ + if (isFullyBalanced0(__FUNCTION__, __LINE__, members, \ + member_cnt)) \ + return 1; \ } while (0) static void -ut_print_toppar_list (const rd_kafka_topic_partition_list_t *partitions) { +ut_print_toppar_list(const rd_kafka_topic_partition_list_t *partitions) { int i; - for (i = 0 ; i < partitions->cnt ; i++) - RD_UT_SAY(" %s [%"PRId32"]", - partitions->elems[i].topic, + for (i = 0; i < partitions->cnt; i++) + RD_UT_SAY(" %s [%" PRId32 "]", partitions->elems[i].topic, partitions->elems[i].partition); } - /** * @brief Verify that member's assignment matches the expected partitions. * @@ -2222,8 +2107,10 @@ ut_print_toppar_list (const rd_kafka_topic_partition_list_t *partitions) { * * @returns 0 on success, else raises a unittest error and returns 1. */ -static int verifyAssignment0 (const char *function, int line, - rd_kafka_group_member_t *rkgm, ...) { +static int verifyAssignment0(const char *function, + int line, + rd_kafka_group_member_t *rkgm, + ...) { va_list ap; int cnt = 0; const char *topic; @@ -2236,22 +2123,23 @@ static int verifyAssignment0 (const char *function, int line, if (!rd_kafka_topic_partition_list_find(rkgm->rkgm_assignment, topic, partition)) { - RD_UT_WARN("%s:%d: Expected %s [%d] not found in %s's " - "assignment (%d partition(s))", - function, line, - topic, partition, rkgm->rkgm_member_id->str, - rkgm->rkgm_assignment->cnt); + RD_UT_WARN( + "%s:%d: Expected %s [%d] not found in %s's " + "assignment (%d partition(s))", + function, line, topic, partition, + rkgm->rkgm_member_id->str, + rkgm->rkgm_assignment->cnt); fails++; } } va_end(ap); if (cnt != rkgm->rkgm_assignment->cnt) { - RD_UT_WARN("%s:%d: " - "Expected %d assigned partition(s) for %s, not %d", - function, line, - cnt, rkgm->rkgm_member_id->str, - rkgm->rkgm_assignment->cnt); + RD_UT_WARN( + "%s:%d: " + "Expected %d assigned partition(s) for %s, not %d", + function, line, cnt, rkgm->rkgm_member_id->str, + rkgm->rkgm_assignment->cnt); fails++; } @@ -2263,9 +2151,11 @@ static int verifyAssignment0 (const char *function, int line, return 0; } -#define verifyAssignment(rkgm,...) do { \ - if (verifyAssignment0(__FUNCTION__,__LINE__,rkgm,__VA_ARGS__)) \ - return 1; \ +#define verifyAssignment(rkgm, ...) \ + do { \ + if (verifyAssignment0(__FUNCTION__, __LINE__, rkgm, \ + __VA_ARGS__)) \ + return 1; \ } while (0) @@ -2277,38 +2167,33 @@ static int verifyAssignment0 (const char *function, int line, * * Use rd_kafka_group_member_clear() to free fields. */ -static void ut_init_member (rd_kafka_group_member_t *rkgm, - const char *member_id, ...) { +static void +ut_init_member(rd_kafka_group_member_t *rkgm, const char *member_id, ...) { va_list ap; const char *topic; memset(rkgm, 0, sizeof(*rkgm)); - rkgm->rkgm_member_id = rd_kafkap_str_new(member_id, -1); + rkgm->rkgm_member_id = rd_kafkap_str_new(member_id, -1); rkgm->rkgm_group_instance_id = rd_kafkap_str_new(member_id, -1); rd_list_init(&rkgm->rkgm_eligible, 0, NULL); - rkgm->rkgm_subscription = - rd_kafka_topic_partition_list_new(4); + rkgm->rkgm_subscription = rd_kafka_topic_partition_list_new(4); va_start(ap, member_id); while ((topic = va_arg(ap, const char *))) - rd_kafka_topic_partition_list_add( - rkgm->rkgm_subscription, topic, RD_KAFKA_PARTITION_UA); + rd_kafka_topic_partition_list_add(rkgm->rkgm_subscription, + topic, RD_KAFKA_PARTITION_UA); va_end(ap); rkgm->rkgm_assignment = - rd_kafka_topic_partition_list_new( - rkgm->rkgm_subscription->size); + rd_kafka_topic_partition_list_new(rkgm->rkgm_subscription->size); } - - - -static int ut_testOneConsumerNoTopic (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +static int ut_testOneConsumerNoTopic(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; @@ -2317,9 +2202,9 @@ static int ut_testOneConsumerNoTopic (rd_kafka_t *rk, metadata = rd_kafka_metadata_new_topic_mock(NULL, 0); ut_init_member(&members[0], "consumer1", "topic1", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyAssignment(&members[0], NULL); @@ -2333,9 +2218,8 @@ static int ut_testOneConsumerNoTopic (rd_kafka_t *rk, } -static int -ut_testOneConsumerNonexistentTopic (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +static int ut_testOneConsumerNonexistentTopic(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; @@ -2344,9 +2228,9 @@ ut_testOneConsumerNonexistentTopic (rd_kafka_t *rk, metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 0); ut_init_member(&members[0], "consumer1", "topic1", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyAssignment(&members[0], NULL); @@ -2361,8 +2245,8 @@ ut_testOneConsumerNonexistentTopic (rd_kafka_t *rk, -static int ut_testOneConsumerOneTopic (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +static int ut_testOneConsumerOneTopic(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; @@ -2371,19 +2255,16 @@ static int ut_testOneConsumerOneTopic (rd_kafka_t *rk, metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 3); ut_init_member(&members[0], "consumer1", "topic1", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 3, "expected assignment of 3 partitions, got %d partition(s)", members[0].rkgm_assignment->cnt); - verifyAssignment(&members[0], - "topic1", 0, - "topic1", 1, - "topic1", 2, + verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2, NULL); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); isFullyBalanced(members, RD_ARRAYSIZE(members)); @@ -2395,29 +2276,25 @@ static int ut_testOneConsumerOneTopic (rd_kafka_t *rk, } -static int ut_testOnlyAssignsPartitionsFromSubscribedTopics ( - rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +static int ut_testOnlyAssignsPartitionsFromSubscribedTopics( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; rd_kafka_group_member_t members[1]; - metadata = rd_kafka_metadata_new_topic_mockv(2, - "topic1", 3, - "topic2", 3); + metadata = + rd_kafka_metadata_new_topic_mockv(2, "topic1", 3, "topic2", 3); ut_init_member(&members[0], "consumer1", "topic1", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyAssignment(&members[0], - "topic1", 0, - "topic1", 1, - "topic1", 2, + verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2, NULL); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); @@ -2430,27 +2307,23 @@ static int ut_testOnlyAssignsPartitionsFromSubscribedTopics ( } -static int ut_testOneConsumerMultipleTopics (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +static int ut_testOneConsumerMultipleTopics(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; rd_kafka_group_member_t members[1]; - metadata = rd_kafka_metadata_new_topic_mockv(2, - "topic1", 1, - "topic2", 2); + metadata = + rd_kafka_metadata_new_topic_mockv(2, "topic1", 1, "topic2", 2); ut_init_member(&members[0], "consumer1", "topic1", "topic2", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyAssignment(&members[0], - "topic1", 0, - "topic2", 0, - "topic2", 1, + verifyAssignment(&members[0], "topic1", 0, "topic2", 0, "topic2", 1, NULL); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); @@ -2463,26 +2336,23 @@ static int ut_testOneConsumerMultipleTopics (rd_kafka_t *rk, } static int -ut_testTwoConsumersOneTopicOnePartition (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +ut_testTwoConsumersOneTopicOnePartition(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; rd_kafka_group_member_t members[2]; - metadata = rd_kafka_metadata_new_topic_mockv(1, - "topic1", 1); + metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 1); ut_init_member(&members[0], "consumer1", "topic1", NULL); ut_init_member(&members[1], "consumer2", "topic1", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyAssignment(&members[0], - "topic1", 0, - NULL); + verifyAssignment(&members[0], "topic1", 0, NULL); verifyAssignment(&members[1], NULL); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); @@ -2497,29 +2367,24 @@ ut_testTwoConsumersOneTopicOnePartition (rd_kafka_t *rk, static int -ut_testTwoConsumersOneTopicTwoPartitions (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +ut_testTwoConsumersOneTopicTwoPartitions(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; rd_kafka_group_member_t members[2]; - metadata = rd_kafka_metadata_new_topic_mockv(1, - "topic1", 2); + metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 2); ut_init_member(&members[0], "consumer1", "topic1", NULL); ut_init_member(&members[1], "consumer2", "topic1", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyAssignment(&members[0], - "topic1", 0, - NULL); - verifyAssignment(&members[1], - "topic1", 1, - NULL); + verifyAssignment(&members[0], "topic1", 0, NULL); + verifyAssignment(&members[1], "topic1", 1, NULL); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); isFullyBalanced(members, RD_ARRAYSIZE(members)); @@ -2532,37 +2397,29 @@ ut_testTwoConsumersOneTopicTwoPartitions (rd_kafka_t *rk, } -static int ut_testMultipleConsumersMixedTopicSubscriptions ( - rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) { +static int ut_testMultipleConsumersMixedTopicSubscriptions( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; rd_kafka_group_member_t members[3]; - metadata = rd_kafka_metadata_new_topic_mockv(2, - "topic1", 3, - "topic2", 2); + metadata = + rd_kafka_metadata_new_topic_mockv(2, "topic1", 3, "topic2", 2); ut_init_member(&members[0], "consumer1", "topic1", NULL); ut_init_member(&members[1], "consumer2", "topic1", "topic2", NULL); ut_init_member(&members[2], "consumer3", "topic1", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyAssignment(&members[0], - "topic1", 0, - "topic1", 2, - NULL); - verifyAssignment(&members[1], - "topic2", 0, - "topic2", 1, - NULL); - verifyAssignment(&members[2], - "topic1", 1, - NULL); + verifyAssignment(&members[0], "topic1", 0, "topic1", 2, NULL); + verifyAssignment(&members[1], "topic2", 0, "topic2", 1, NULL); + verifyAssignment(&members[2], "topic1", 1, NULL); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); isFullyBalanced(members, RD_ARRAYSIZE(members)); @@ -2577,33 +2434,26 @@ static int ut_testMultipleConsumersMixedTopicSubscriptions ( static int -ut_testTwoConsumersTwoTopicsSixPartitions (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +ut_testTwoConsumersTwoTopicsSixPartitions(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; rd_kafka_group_member_t members[2]; - metadata = rd_kafka_metadata_new_topic_mockv(2, - "topic1", 3, - "topic2", 3); + metadata = + rd_kafka_metadata_new_topic_mockv(2, "topic1", 3, "topic2", 3); ut_init_member(&members[0], "consumer1", "topic1", "topic2", NULL); ut_init_member(&members[1], "consumer2", "topic1", "topic2", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyAssignment(&members[0], - "topic1", 0, - "topic1", 2, - "topic2", 1, + verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1, NULL); - verifyAssignment(&members[1], - "topic1", 1, - "topic2", 0, - "topic2", 2, + verifyAssignment(&members[1], "topic1", 1, "topic2", 0, "topic2", 2, NULL); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); @@ -2617,8 +2467,8 @@ ut_testTwoConsumersTwoTopicsSixPartitions (rd_kafka_t *rk, } -static int ut_testAddRemoveConsumerOneTopic (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +static int ut_testAddRemoveConsumerOneTopic(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; @@ -2627,15 +2477,11 @@ static int ut_testAddRemoveConsumerOneTopic (rd_kafka_t *rk, metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 3); ut_init_member(&members[0], "consumer1", "topic1", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, 1, + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 1, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyAssignment(&members[0], - "topic1", 0, - "topic1", 1, - "topic1", 2, + verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2, NULL); verifyValidityAndBalance(members, 1, metadata); @@ -2644,18 +2490,13 @@ static int ut_testAddRemoveConsumerOneTopic (rd_kafka_t *rk, /* Add consumer2 */ ut_init_member(&members[1], "consumer2", "topic1", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyAssignment(&members[0], - "topic1", 1, - "topic1", 2, - NULL); - verifyAssignment(&members[1], - "topic1", 0, - NULL); + verifyAssignment(&members[0], "topic1", 1, "topic1", 2, NULL); + verifyAssignment(&members[1], "topic1", 0, NULL); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); isFullyBalanced(members, RD_ARRAYSIZE(members)); @@ -2663,15 +2504,11 @@ static int ut_testAddRemoveConsumerOneTopic (rd_kafka_t *rk, /* Remove consumer1 */ - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - &members[1], 1, + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1], 1, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyAssignment(&members[1], - "topic1", 0, - "topic1", 1, - "topic1", 2, + verifyAssignment(&members[1], "topic1", 0, "topic1", 1, "topic1", 2, NULL); verifyValidityAndBalance(&members[1], 1, metadata); @@ -2707,50 +2544,34 @@ static int ut_testAddRemoveConsumerOneTopic (rd_kafka_t *rk, * - consumer4: topic4-0, topic5-1 */ static int -ut_testPoorRoundRobinAssignmentScenario (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +ut_testPoorRoundRobinAssignmentScenario(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; rd_kafka_group_member_t members[4]; - metadata = rd_kafka_metadata_new_topic_mockv(5, - "topic1", 2, - "topic2", 1, - "topic3", 2, - "topic4", 1, - "topic5", 2); - - ut_init_member(&members[0], "consumer1", - "topic1", "topic2", "topic3", "topic4", "topic5", NULL); - ut_init_member(&members[1], "consumer2", - "topic1", "topic3", "topic5", NULL); - ut_init_member(&members[2], "consumer3", - "topic1", "topic3", "topic5", NULL); - ut_init_member(&members[3], "consumer4", - "topic1", "topic2", "topic3", "topic4", "topic5", NULL); - - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + metadata = rd_kafka_metadata_new_topic_mockv( + 5, "topic1", 2, "topic2", 1, "topic3", 2, "topic4", 1, "topic5", 2); + + ut_init_member(&members[0], "consumer1", "topic1", "topic2", "topic3", + "topic4", "topic5", NULL); + ut_init_member(&members[1], "consumer2", "topic1", "topic3", "topic5", + NULL); + ut_init_member(&members[2], "consumer3", "topic1", "topic3", "topic5", + NULL); + ut_init_member(&members[3], "consumer4", "topic1", "topic2", "topic3", + "topic4", "topic5", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyAssignment(&members[0], - "topic2", 0, - "topic3", 0, - NULL); - verifyAssignment(&members[1], - "topic1", 0, - "topic3", 1, - NULL); - verifyAssignment(&members[2], - "topic1", 1, - "topic5", 0, - NULL); - verifyAssignment(&members[3], - "topic4", 0, - "topic5", 1, - NULL); + verifyAssignment(&members[0], "topic2", 0, "topic3", 0, NULL); + verifyAssignment(&members[1], "topic1", 0, "topic3", 1, NULL); + verifyAssignment(&members[2], "topic1", 1, "topic5", 0, NULL); + verifyAssignment(&members[3], "topic4", 0, "topic5", 1, NULL); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); isFullyBalanced(members, RD_ARRAYSIZE(members)); @@ -2766,8 +2587,8 @@ ut_testPoorRoundRobinAssignmentScenario (rd_kafka_t *rk, -static int ut_testAddRemoveTopicTwoConsumers (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +static int ut_testAddRemoveTopicTwoConsumers(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; @@ -2777,18 +2598,13 @@ static int ut_testAddRemoveTopicTwoConsumers (rd_kafka_t *rk, ut_init_member(&members[0], "consumer1", "topic1", "topic2", NULL); ut_init_member(&members[1], "consumer2", "topic1", "topic2", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyAssignment(&members[0], - "topic1", 0, - "topic1", 2, - NULL); - verifyAssignment(&members[1], - "topic1", 1, - NULL); + verifyAssignment(&members[0], "topic1", 0, "topic1", 2, NULL); + verifyAssignment(&members[1], "topic1", 1, NULL); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); isFullyBalanced(members, RD_ARRAYSIZE(members)); @@ -2798,24 +2614,17 @@ static int ut_testAddRemoveTopicTwoConsumers (rd_kafka_t *rk, */ RD_UT_SAY("Adding topic2"); rd_kafka_metadata_destroy(metadata); - metadata = rd_kafka_metadata_new_topic_mockv(2, - "topic1", 3, - "topic2", 3); + metadata = + rd_kafka_metadata_new_topic_mockv(2, "topic1", 3, "topic2", 3); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyAssignment(&members[0], - "topic1", 0, - "topic1", 2, - "topic2", 1, + verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1, NULL); - verifyAssignment(&members[1], - "topic1", 1, - "topic2", 2, - "topic2", 0, + verifyAssignment(&members[1], "topic1", 1, "topic2", 2, "topic2", 0, NULL); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); @@ -2830,18 +2639,13 @@ static int ut_testAddRemoveTopicTwoConsumers (rd_kafka_t *rk, rd_kafka_metadata_destroy(metadata); metadata = rd_kafka_metadata_new_topic_mockv(1, "topic2", 3); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyAssignment(&members[0], - "topic2", 1, - NULL); - verifyAssignment(&members[1], - "topic2", 0, - "topic2", 2, - NULL); + verifyAssignment(&members[0], "topic2", 1, NULL); + verifyAssignment(&members[1], "topic2", 0, "topic2", 2, NULL); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); isFullyBalanced(members, RD_ARRAYSIZE(members)); @@ -2856,8 +2660,8 @@ static int ut_testAddRemoveTopicTwoConsumers (rd_kafka_t *rk, static int -ut_testReassignmentAfterOneConsumerLeaves (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +ut_testReassignmentAfterOneConsumerLeaves(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; @@ -2867,37 +2671,36 @@ ut_testReassignmentAfterOneConsumerLeaves (rd_kafka_t *rk, int topic_cnt = RD_ARRAYSIZE(mt); int i; - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { char topic[10]; - rd_snprintf(topic, sizeof(topic), "topic%d", i+1); + rd_snprintf(topic, sizeof(topic), "topic%d", i + 1); rd_strdupa(&mt[i].topic, topic); - mt[i].partition_cnt = i+1; + mt[i].partition_cnt = i + 1; } metadata = rd_kafka_metadata_new_topic_mock(mt, topic_cnt); - for (i = 1 ; i <= member_cnt ; i++) { + for (i = 1; i <= member_cnt; i++) { char name[20]; rd_kafka_topic_partition_list_t *subscription = - rd_kafka_topic_partition_list_new(i); + rd_kafka_topic_partition_list_new(i); int j; - for (j = 1 ; j <= i ; j++) { + for (j = 1; j <= i; j++) { char topic[16]; rd_snprintf(topic, sizeof(topic), "topic%d", j); rd_kafka_topic_partition_list_add( - subscription, topic, RD_KAFKA_PARTITION_UA); + subscription, topic, RD_KAFKA_PARTITION_UA); } rd_snprintf(name, sizeof(name), "consumer%d", i); - ut_init_member(&members[i-1], name, NULL); + ut_init_member(&members[i - 1], name, NULL); rd_kafka_topic_partition_list_destroy( - members[i-1].rkgm_subscription); - members[i-1].rkgm_subscription = subscription; + members[i - 1].rkgm_subscription); + members[i - 1].rkgm_subscription = subscription; } - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, member_cnt, - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, member_cnt, metadata); @@ -2911,15 +2714,14 @@ ut_testReassignmentAfterOneConsumerLeaves (rd_kafka_t *rk, sizeof(*members) * (member_cnt - 10)); member_cnt--; - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, member_cnt, - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, member_cnt, metadata); // FIXME: isSticky(); - for (i = 0 ; i < member_cnt ; i++) + for (i = 0; i < member_cnt; i++) rd_kafka_group_member_clear(&members[i]); rd_kafka_metadata_destroy(metadata); @@ -2928,8 +2730,8 @@ ut_testReassignmentAfterOneConsumerLeaves (rd_kafka_t *rk, static int -ut_testReassignmentAfterOneConsumerAdded (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +ut_testReassignmentAfterOneConsumerAdded(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; @@ -2939,23 +2741,22 @@ ut_testReassignmentAfterOneConsumerAdded (rd_kafka_t *rk, metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 20); - for (i = 1 ; i <= member_cnt ; i++) { + for (i = 1; i <= member_cnt; i++) { char name[20]; rd_kafka_topic_partition_list_t *subscription = - rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add( - subscription, "topic1", RD_KAFKA_PARTITION_UA); + rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(subscription, "topic1", + RD_KAFKA_PARTITION_UA); rd_snprintf(name, sizeof(name), "consumer%d", i); - ut_init_member(&members[i-1], name, NULL); + ut_init_member(&members[i - 1], name, NULL); rd_kafka_topic_partition_list_destroy( - members[i-1].rkgm_subscription); - members[i-1].rkgm_subscription = subscription; + members[i - 1].rkgm_subscription); + members[i - 1].rkgm_subscription = subscription; } member_cnt--; /* Skip one consumer */ - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, member_cnt, - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, member_cnt, metadata); @@ -2966,15 +2767,14 @@ ut_testReassignmentAfterOneConsumerAdded (rd_kafka_t *rk, */ member_cnt++; - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, member_cnt, - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, member_cnt, metadata); // FIXME: isSticky(); - for (i = 0 ; i < member_cnt ; i++) + for (i = 0; i < member_cnt; i++) rd_kafka_group_member_clear(&members[i]); rd_kafka_metadata_destroy(metadata); @@ -2982,8 +2782,8 @@ ut_testReassignmentAfterOneConsumerAdded (rd_kafka_t *rk, } -static int ut_testSameSubscriptions (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +static int ut_testSameSubscriptions(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; @@ -2992,33 +2792,32 @@ static int ut_testSameSubscriptions (rd_kafka_t *rk, rd_kafka_metadata_topic_t mt[15]; int topic_cnt = RD_ARRAYSIZE(mt); rd_kafka_topic_partition_list_t *subscription = - rd_kafka_topic_partition_list_new(topic_cnt); + rd_kafka_topic_partition_list_new(topic_cnt); int i; - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { char topic[10]; - rd_snprintf(topic, sizeof(topic), "topic%d", i+1); + rd_snprintf(topic, sizeof(topic), "topic%d", i + 1); rd_strdupa(&mt[i].topic, topic); - mt[i].partition_cnt = i+1; - rd_kafka_topic_partition_list_add( - subscription, topic, RD_KAFKA_PARTITION_UA); + mt[i].partition_cnt = i + 1; + rd_kafka_topic_partition_list_add(subscription, topic, + RD_KAFKA_PARTITION_UA); } metadata = rd_kafka_metadata_new_topic_mock(mt, topic_cnt); - for (i = 1 ; i <= member_cnt ; i++) { + for (i = 1; i <= member_cnt; i++) { char name[16]; rd_snprintf(name, sizeof(name), "consumer%d", i); - ut_init_member(&members[i-1], name, NULL); + ut_init_member(&members[i - 1], name, NULL); rd_kafka_topic_partition_list_destroy( - members[i-1].rkgm_subscription); - members[i-1].rkgm_subscription = - rd_kafka_topic_partition_list_copy(subscription); + members[i - 1].rkgm_subscription); + members[i - 1].rkgm_subscription = + rd_kafka_topic_partition_list_copy(subscription); } - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, member_cnt, - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, member_cnt, metadata); @@ -3027,18 +2826,17 @@ static int ut_testSameSubscriptions (rd_kafka_t *rk, * Remove consumer5 */ rd_kafka_group_member_clear(&members[5]); - memmove(&members[5], &members[6], sizeof(*members) * (member_cnt-6)); + memmove(&members[5], &members[6], sizeof(*members) * (member_cnt - 6)); member_cnt--; - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, member_cnt, - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, member_cnt, metadata); // FIXME: isSticky(); - for (i = 0 ; i < member_cnt ; i++) + for (i = 0; i < member_cnt; i++) rd_kafka_group_member_clear(&members[i]); rd_kafka_metadata_destroy(metadata); rd_kafka_topic_partition_list_destroy(subscription); @@ -3047,8 +2845,9 @@ static int ut_testSameSubscriptions (rd_kafka_t *rk, } -static int ut_testLargeAssignmentWithMultipleConsumersLeaving ( - rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) { +static int ut_testLargeAssignmentWithMultipleConsumersLeaving( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; @@ -3059,40 +2858,38 @@ static int ut_testLargeAssignmentWithMultipleConsumersLeaving ( int topic_cnt = RD_ARRAYSIZE(mt); int i; - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { char topic[10]; - rd_snprintf(topic, sizeof(topic), "topic%d", i+1); + rd_snprintf(topic, sizeof(topic), "topic%d", i + 1); rd_strdupa(&mt[i].topic, topic); - mt[i].partition_cnt = i+1; + mt[i].partition_cnt = i + 1; } metadata = rd_kafka_metadata_new_topic_mock(mt, topic_cnt); - for (i = 0 ; i < member_cnt ; i++) { + for (i = 0; i < member_cnt; i++) { /* Java tests use a random set, this is more deterministic. */ int sub_cnt = ((i + 1) * 17) % topic_cnt; rd_kafka_topic_partition_list_t *subscription = - rd_kafka_topic_partition_list_new(sub_cnt); + rd_kafka_topic_partition_list_new(sub_cnt); char name[16]; int j; /* Subscribe to a subset of topics */ - for (j = 0 ; j < sub_cnt ; j++) + for (j = 0; j < sub_cnt; j++) rd_kafka_topic_partition_list_add( - subscription, - metadata->topics[j].topic, - RD_KAFKA_PARTITION_UA); + subscription, metadata->topics[j].topic, + RD_KAFKA_PARTITION_UA); - rd_snprintf(name, sizeof(name), "consumer%d", i+1); + rd_snprintf(name, sizeof(name), "consumer%d", i + 1); ut_init_member(&members[i], name, NULL); rd_kafka_topic_partition_list_destroy( - members[i].rkgm_subscription); + members[i].rkgm_subscription); members[i].rkgm_subscription = subscription; } - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, member_cnt, - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, member_cnt, metadata); @@ -3100,22 +2897,21 @@ static int ut_testLargeAssignmentWithMultipleConsumersLeaving ( /* * Remove every 4th consumer (~50) */ - for (i = member_cnt-1 ; i >= 0 ; i -= 4) { + for (i = member_cnt - 1; i >= 0; i -= 4) { rd_kafka_group_member_clear(&members[i]); - memmove(&members[i], &members[i+1], - sizeof(*members) * (member_cnt-(i+1))); + memmove(&members[i], &members[i + 1], + sizeof(*members) * (member_cnt - (i + 1))); member_cnt--; } - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, member_cnt, - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, member_cnt, metadata); // FIXME: isSticky(); - for (i = 0 ; i < member_cnt ; i++) + for (i = 0; i < member_cnt; i++) rd_kafka_group_member_clear(&members[i]); rd_kafka_metadata_destroy(metadata); @@ -3123,8 +2919,8 @@ static int ut_testLargeAssignmentWithMultipleConsumersLeaving ( } -static int ut_testNewSubscription (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +static int ut_testNewSubscription(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; @@ -3132,35 +2928,30 @@ static int ut_testNewSubscription (rd_kafka_t *rk, int member_cnt = RD_ARRAYSIZE(members); int i; - metadata = rd_kafka_metadata_new_topic_mockv(5, - "topic1", 1, - "topic2", 2, - "topic3", 3, - "topic4", 4, - "topic5", 5); + metadata = rd_kafka_metadata_new_topic_mockv( + 5, "topic1", 1, "topic2", 2, "topic3", 3, "topic4", 4, "topic5", 5); - for (i = 0 ; i < member_cnt ; i++) { + for (i = 0; i < member_cnt; i++) { char name[16]; int j; rd_snprintf(name, sizeof(name), "consumer%d", i); ut_init_member(&members[i], name, NULL); - rd_kafka_topic_partition_list_destroy(members[i]. - rkgm_subscription); + rd_kafka_topic_partition_list_destroy( + members[i].rkgm_subscription); members[i].rkgm_subscription = - rd_kafka_topic_partition_list_new(5); + rd_kafka_topic_partition_list_new(5); - for (j = metadata->topic_cnt - (1 + i) ; j >= 0 ; j--) + for (j = metadata->topic_cnt - (1 + i); j >= 0; j--) rd_kafka_topic_partition_list_add( - members[i].rkgm_subscription, - metadata->topics[j].topic, - RD_KAFKA_PARTITION_UA); + members[i].rkgm_subscription, + metadata->topics[j].topic, RD_KAFKA_PARTITION_UA); } - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); @@ -3173,16 +2964,16 @@ static int ut_testNewSubscription (rd_kafka_t *rk, rd_kafka_topic_partition_list_add(members[0].rkgm_subscription, "topic1", RD_KAFKA_PARTITION_UA); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); isFullyBalanced(members, RD_ARRAYSIZE(members)); // FIXME: isSticky(); - for (i = 0 ; i < member_cnt ; i++) + for (i = 0; i < member_cnt; i++) rd_kafka_group_member_clear(&members[i]); rd_kafka_metadata_destroy(metadata); @@ -3190,13 +2981,13 @@ static int ut_testNewSubscription (rd_kafka_t *rk, } -static int ut_testMoveExistingAssignments (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +static int ut_testMoveExistingAssignments(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; rd_kafka_group_member_t members[4]; - int member_cnt = RD_ARRAYSIZE(members); + int member_cnt = RD_ARRAYSIZE(members); rd_kafka_topic_partition_list_t *assignments[4] = RD_ZERO_INIT; int i; int fails = 0; @@ -3208,14 +2999,13 @@ static int ut_testMoveExistingAssignments (rd_kafka_t *rk, ut_init_member(&members[2], "consumer3", "topic1", NULL); ut_init_member(&members[3], "consumer4", "topic1", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, member_cnt, - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, member_cnt, metadata); - for (i = 0 ; i < member_cnt ; i++) { + for (i = 0; i < member_cnt; i++) { if (members[i].rkgm_assignment->cnt > 1) { RD_UT_WARN("%s assigned %d partitions, expected <= 1", members[i].rkgm_member_id->str, @@ -3223,22 +3013,21 @@ static int ut_testMoveExistingAssignments (rd_kafka_t *rk, fails++; } else if (members[i].rkgm_assignment->cnt == 1) { assignments[i] = rd_kafka_topic_partition_list_copy( - members[i].rkgm_assignment); + members[i].rkgm_assignment); } } /* * Remove potential group leader consumer1 */ - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - &members[1], member_cnt-1, - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1], + member_cnt - 1, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); - verifyValidityAndBalance(&members[1], member_cnt-1, metadata); + verifyValidityAndBalance(&members[1], member_cnt - 1, metadata); // FIXME: isSticky() - for (i = 1 ; i < member_cnt ; i++) { + for (i = 1; i < member_cnt; i++) { if (members[i].rkgm_assignment->cnt != 1) { RD_UT_WARN("%s assigned %d partitions, expected 1", members[i].rkgm_member_id->str, @@ -3246,16 +3035,17 @@ static int ut_testMoveExistingAssignments (rd_kafka_t *rk, fails++; } else if (assignments[i] && !rd_kafka_topic_partition_list_find( - assignments[i], - members[i].rkgm_assignment->elems[0].topic, - members[i].rkgm_assignment-> - elems[0].partition)) { - RD_UT_WARN("Stickiness was not honored for %s, " - "%s [%"PRId32"] not in previous assignment", - members[i].rkgm_member_id->str, - members[i].rkgm_assignment->elems[0].topic, - members[i].rkgm_assignment-> - elems[0].partition); + assignments[i], + members[i].rkgm_assignment->elems[0].topic, + members[i] + .rkgm_assignment->elems[0] + .partition)) { + RD_UT_WARN( + "Stickiness was not honored for %s, " + "%s [%" PRId32 "] not in previous assignment", + members[i].rkgm_member_id->str, + members[i].rkgm_assignment->elems[0].topic, + members[i].rkgm_assignment->elems[0].partition); fails++; } } @@ -3263,7 +3053,7 @@ static int ut_testMoveExistingAssignments (rd_kafka_t *rk, RD_UT_ASSERT(!fails, "See previous errors"); - for (i = 0 ; i < member_cnt ; i++) { + for (i = 0; i < member_cnt; i++) { rd_kafka_group_member_clear(&members[i]); if (assignments[i]) rd_kafka_topic_partition_list_destroy(assignments[i]); @@ -3275,8 +3065,7 @@ static int ut_testMoveExistingAssignments (rd_kafka_t *rk, - -static int ut_testStickiness (rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) { +static int ut_testStickiness(rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; @@ -3284,51 +3073,45 @@ static int ut_testStickiness (rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) { int member_cnt = RD_ARRAYSIZE(members); int i; - metadata = rd_kafka_metadata_new_topic_mockv(6, - "topic1", 1, - "topic2", 1, - "topic3", 1, - "topic4", 1, - "topic5", 1, - "topic6", 1); + metadata = rd_kafka_metadata_new_topic_mockv( + 6, "topic1", 1, "topic2", 1, "topic3", 1, "topic4", 1, "topic5", 1, + "topic6", 1); - ut_init_member(&members[0], "consumer1", - "topic1", "topic2", NULL); + ut_init_member(&members[0], "consumer1", "topic1", "topic2", NULL); rd_kafka_topic_partition_list_destroy(members[0].rkgm_assignment); members[0].rkgm_assignment = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, - "topic1", 0); + rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1", + 0); - ut_init_member(&members[1], "consumer2", - "topic1", "topic2", "topic3", "topic4", NULL); + ut_init_member(&members[1], "consumer2", "topic1", "topic2", "topic3", + "topic4", NULL); rd_kafka_topic_partition_list_destroy(members[1].rkgm_assignment); members[1].rkgm_assignment = rd_kafka_topic_partition_list_new(2); - rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, - "topic2", 0); - rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, - "topic3", 0); + rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic2", + 0); + rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic3", + 0); - ut_init_member(&members[2], "consumer3", - "topic4", "topic5", "topic6", NULL); + ut_init_member(&members[2], "consumer3", "topic4", "topic5", "topic6", + NULL); rd_kafka_topic_partition_list_destroy(members[2].rkgm_assignment); members[2].rkgm_assignment = rd_kafka_topic_partition_list_new(3); - rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, - "topic4", 0); - rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, - "topic5", 0); - rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, - "topic6", 0); + rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic4", + 0); + rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic5", + 0); + rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic6", + 0); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, member_cnt, - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); - for (i = 0 ; i < member_cnt ; i++) + for (i = 0; i < member_cnt; i++) rd_kafka_group_member_clear(&members[i]); rd_kafka_metadata_destroy(metadata); @@ -3339,8 +3122,7 @@ static int ut_testStickiness (rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) { /** * @brief Verify stickiness across three rebalances. */ -static int -ut_testStickiness2 (rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) { +static int ut_testStickiness2(rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; @@ -3355,102 +3137,64 @@ ut_testStickiness2 (rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) { ut_init_member(&members[2], "consumer3", "topic1", NULL); /* Just consumer1 */ - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, 1, + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 1, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, 1, metadata); isFullyBalanced(members, 1); - verifyAssignment(&members[0], - "topic1", 0, - "topic1", 1, - "topic1", 2, - "topic1", 3, - "topic1", 4, - "topic1", 5, - NULL); + verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2, + "topic1", 3, "topic1", 4, "topic1", 5, NULL); /* consumer1 and consumer2 */ - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, 2, + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 2, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, 2, metadata); isFullyBalanced(members, 2); - verifyAssignment(&members[0], - "topic1", 3, - "topic1", 4, - "topic1", 5, + verifyAssignment(&members[0], "topic1", 3, "topic1", 4, "topic1", 5, NULL); - verifyAssignment(&members[1], - "topic1", 0, - "topic1", 1, - "topic1", 2, + verifyAssignment(&members[1], "topic1", 0, "topic1", 1, "topic1", 2, NULL); /* Run it twice, should be stable. */ - for (i = 0 ; i < 2 ; i++) { + for (i = 0; i < 2; i++) { /* consumer1, consumer2, and consumer3 */ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, 3, - errstr, sizeof(errstr)); + members, 3, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, 3, metadata); isFullyBalanced(members, 3); - verifyAssignment(&members[0], - "topic1", 4, - "topic1", 5, - NULL); - verifyAssignment(&members[1], - "topic1", 1, - "topic1", 2, - NULL); - verifyAssignment(&members[2], - "topic1", 0, - "topic1", 3, - NULL); + verifyAssignment(&members[0], "topic1", 4, "topic1", 5, NULL); + verifyAssignment(&members[1], "topic1", 1, "topic1", 2, NULL); + verifyAssignment(&members[2], "topic1", 0, "topic1", 3, NULL); } /* Remove consumer1 */ - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - &members[1], 2, + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1], 2, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(&members[1], 2, metadata); isFullyBalanced(&members[1], 2); - verifyAssignment(&members[1], - "topic1", 1, - "topic1", 2, - "topic1", 5, + verifyAssignment(&members[1], "topic1", 1, "topic1", 2, "topic1", 5, NULL); - verifyAssignment(&members[2], - "topic1", 0, - "topic1", 3, - "topic1", 4, + verifyAssignment(&members[2], "topic1", 0, "topic1", 3, "topic1", 4, NULL); /* Remove consumer2 */ - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - &members[2], 1, + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[2], 1, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(&members[2], 1, metadata); isFullyBalanced(&members[2], 1); - verifyAssignment(&members[2], - "topic1", 0, - "topic1", 1, - "topic1", 2, - "topic1", 3, - "topic1", 4, - "topic1", 5, - NULL); + verifyAssignment(&members[2], "topic1", 0, "topic1", 1, "topic1", 2, + "topic1", 3, "topic1", 4, "topic1", 5, NULL); - for (i = 0 ; i < member_cnt ; i++) + for (i = 0; i < member_cnt; i++) rd_kafka_group_member_clear(&members[i]); rd_kafka_metadata_destroy(metadata); @@ -3459,30 +3203,29 @@ ut_testStickiness2 (rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) { static int -ut_testAssignmentUpdatedForDeletedTopic (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +ut_testAssignmentUpdatedForDeletedTopic(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; rd_kafka_group_member_t members[1]; - metadata = rd_kafka_metadata_new_topic_mockv(2, - "topic1", 1, - "topic3", 100); - ut_init_member(&members[0], "consumer1", - "topic1", "topic2", "topic3", NULL); + metadata = + rd_kafka_metadata_new_topic_mockv(2, "topic1", 1, "topic3", 100); + ut_init_member(&members[0], "consumer1", "topic1", "topic2", "topic3", + NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); isFullyBalanced(members, RD_ARRAYSIZE(members)); RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 1 + 100, - "Expected %d assigned partitions, not %d", - 1 + 100, members[0].rkgm_assignment->cnt); + "Expected %d assigned partitions, not %d", 1 + 100, + members[0].rkgm_assignment->cnt); rd_kafka_group_member_clear(&members[0]); rd_kafka_metadata_destroy(metadata); @@ -3491,8 +3234,9 @@ ut_testAssignmentUpdatedForDeletedTopic (rd_kafka_t *rk, } -static int ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted ( - rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) { +static int ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; @@ -3503,9 +3247,9 @@ static int ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted ( ut_init_member(&members[0], "consumer1", "topic", NULL); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); @@ -3517,9 +3261,9 @@ static int ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted ( rd_kafka_metadata_destroy(metadata); metadata = rd_kafka_metadata_new_topic_mock(NULL, 0); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, RD_ARRAYSIZE(members), - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); @@ -3533,8 +3277,8 @@ static int ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted ( static int -ut_testConflictingPreviousAssignments (rd_kafka_t *rk, - const rd_kafka_assignor_t *rkas) { +ut_testConflictingPreviousAssignments(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_metadata_t *metadata; @@ -3551,42 +3295,41 @@ ut_testConflictingPreviousAssignments (rd_kafka_t *rk, ut_init_member(&members[0], "consumer1", "topic1", NULL); rd_kafka_topic_partition_list_destroy(members[0].rkgm_assignment); members[0].rkgm_assignment = rd_kafka_topic_partition_list_new(2); - rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, - "topic1", 0); - rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, - "topic1", 1); + rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1", + 0); + rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1", + 1); ut_init_member(&members[1], "consumer2", "topic1", NULL); rd_kafka_topic_partition_list_destroy(members[1].rkgm_assignment); members[1].rkgm_assignment = rd_kafka_topic_partition_list_new(2); - rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, - "topic1", 0); - rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, - "topic1", 1); + rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic1", + 0); + rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic1", + 1); - err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, - members, member_cnt, - errstr, sizeof(errstr)); + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 1 && - members[1].rkgm_assignment->cnt == 1, + members[1].rkgm_assignment->cnt == 1, "Expected consumers to have 1 partition each, " "not %d and %d", members[0].rkgm_assignment->cnt, members[1].rkgm_assignment->cnt); RD_UT_ASSERT(members[0].rkgm_assignment->elems[0].partition != - members[1].rkgm_assignment->elems[0].partition, + members[1].rkgm_assignment->elems[0].partition, "Expected consumers to have different partitions " - "assigned, not same partition %"PRId32, + "assigned, not same partition %" PRId32, members[0].rkgm_assignment->elems[0].partition); verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); isFullyBalanced(members, RD_ARRAYSIZE(members)); /* FIXME: isSticky() */ - for (i = 0 ; i < member_cnt ; i++) + for (i = 0; i < member_cnt; i++) rd_kafka_group_member_clear(&members[i]); rd_kafka_metadata_destroy(metadata); @@ -3597,51 +3340,50 @@ ut_testConflictingPreviousAssignments (rd_kafka_t *rk, * from Java since random tests don't provide meaningful test coverage. */ -static int rd_kafka_sticky_assignor_unittest (void) { +static int rd_kafka_sticky_assignor_unittest(void) { rd_kafka_conf_t *conf; rd_kafka_t *rk; int fails = 0; char errstr[256]; rd_kafka_assignor_t *rkas; - static int (*tests[]) (rd_kafka_t *, const rd_kafka_assignor_t *) = { - ut_testOneConsumerNoTopic, - ut_testOneConsumerNonexistentTopic, - ut_testOneConsumerOneTopic, - ut_testOnlyAssignsPartitionsFromSubscribedTopics, - ut_testOneConsumerMultipleTopics, - ut_testTwoConsumersOneTopicOnePartition, - ut_testTwoConsumersOneTopicTwoPartitions, - ut_testMultipleConsumersMixedTopicSubscriptions, - ut_testTwoConsumersTwoTopicsSixPartitions, - ut_testAddRemoveConsumerOneTopic, - ut_testPoorRoundRobinAssignmentScenario, - ut_testAddRemoveTopicTwoConsumers, - ut_testReassignmentAfterOneConsumerLeaves, - ut_testReassignmentAfterOneConsumerAdded, - ut_testSameSubscriptions, - ut_testLargeAssignmentWithMultipleConsumersLeaving, - ut_testNewSubscription, - ut_testMoveExistingAssignments, - ut_testStickiness, - ut_testStickiness2, - ut_testAssignmentUpdatedForDeletedTopic, - ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted, - ut_testConflictingPreviousAssignments, - NULL, + static int (*tests[])(rd_kafka_t *, const rd_kafka_assignor_t *) = { + ut_testOneConsumerNoTopic, + ut_testOneConsumerNonexistentTopic, + ut_testOneConsumerOneTopic, + ut_testOnlyAssignsPartitionsFromSubscribedTopics, + ut_testOneConsumerMultipleTopics, + ut_testTwoConsumersOneTopicOnePartition, + ut_testTwoConsumersOneTopicTwoPartitions, + ut_testMultipleConsumersMixedTopicSubscriptions, + ut_testTwoConsumersTwoTopicsSixPartitions, + ut_testAddRemoveConsumerOneTopic, + ut_testPoorRoundRobinAssignmentScenario, + ut_testAddRemoveTopicTwoConsumers, + ut_testReassignmentAfterOneConsumerLeaves, + ut_testReassignmentAfterOneConsumerAdded, + ut_testSameSubscriptions, + ut_testLargeAssignmentWithMultipleConsumersLeaving, + ut_testNewSubscription, + ut_testMoveExistingAssignments, + ut_testStickiness, + ut_testStickiness2, + ut_testAssignmentUpdatedForDeletedTopic, + ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted, + ut_testConflictingPreviousAssignments, + NULL, }; int i; conf = rd_kafka_conf_new(); - if (rd_kafka_conf_set(conf, "group.id", "test", - errstr, sizeof(errstr)) || + if (rd_kafka_conf_set(conf, "group.id", "test", errstr, + sizeof(errstr)) || rd_kafka_conf_set(conf, "partition.assignment.strategy", - "cooperative-sticky", - errstr, sizeof(errstr))) + "cooperative-sticky", errstr, sizeof(errstr))) RD_UT_FAIL("sticky assignor conf failed: %s", errstr); - rd_kafka_conf_set(conf, "debug", rd_getenv("TEST_DEBUG", NULL), - NULL, 0); + rd_kafka_conf_set(conf, "debug", rd_getenv("TEST_DEBUG", NULL), NULL, + 0); rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); RD_UT_ASSERT(rk, "sticky assignor client instantiation failed: %s", @@ -3650,14 +3392,14 @@ static int rd_kafka_sticky_assignor_unittest (void) { rkas = rd_kafka_assignor_find(rk, "cooperative-sticky"); RD_UT_ASSERT(rkas, "sticky assignor not found"); - for (i = 0 ; tests[i] ; i++) { + for (i = 0; tests[i]; i++) { rd_ts_t ts = rd_clock(); int r; RD_UT_SAY("[ Test #%d ]", i); r = tests[i](rk, rkas); - RD_UT_SAY("[ Test #%d ran for %.3fms ]", - i, (double)(rd_clock() - ts) / 1000.0); + RD_UT_SAY("[ Test #%d ran for %.3fms ]", i, + (double)(rd_clock() - ts) / 1000.0); RD_UT_ASSERT(!r, "^ failed"); @@ -3673,14 +3415,12 @@ static int rd_kafka_sticky_assignor_unittest (void) { /** * @brief Initialzie and add sticky assignor. */ -rd_kafka_resp_err_t rd_kafka_sticky_assignor_init (rd_kafka_t *rk) { - return rd_kafka_assignor_add( - rk, "consumer", "cooperative-sticky", - RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE, - rd_kafka_sticky_assignor_assign_cb, - rd_kafka_sticky_assignor_get_metadata, - rd_kafka_sticky_assignor_on_assignment_cb, - rd_kafka_sticky_assignor_state_destroy, - rd_kafka_sticky_assignor_unittest, - NULL); +rd_kafka_resp_err_t rd_kafka_sticky_assignor_init(rd_kafka_t *rk) { + return rd_kafka_assignor_add(rk, "consumer", "cooperative-sticky", + RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE, + rd_kafka_sticky_assignor_assign_cb, + rd_kafka_sticky_assignor_get_metadata, + rd_kafka_sticky_assignor_on_assignment_cb, + rd_kafka_sticky_assignor_state_destroy, + rd_kafka_sticky_assignor_unittest, NULL); } diff --git a/src/rdkafka_subscription.c b/src/rdkafka_subscription.c index d9df76ad56..0805893587 100644 --- a/src/rdkafka_subscription.c +++ b/src/rdkafka_subscription.c @@ -36,20 +36,20 @@ #include "rdkafka_int.h" -rd_kafka_resp_err_t rd_kafka_unsubscribe (rd_kafka_t *rk) { +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk) { rd_kafka_cgrp_t *rkcg; if (!(rkcg = rd_kafka_cgrp_get(rk))) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; - return rd_kafka_op_err_destroy(rd_kafka_op_req2(rkcg->rkcg_ops, - RD_KAFKA_OP_SUBSCRIBE)); + return rd_kafka_op_err_destroy( + rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_SUBSCRIBE)); } /** @returns 1 if the topic is invalid (bad regex, empty), else 0 if valid. */ -static size_t _invalid_topic_cb (const rd_kafka_topic_partition_t *rktpar, - void *opaque) { +static size_t _invalid_topic_cb(const rd_kafka_topic_partition_t *rktpar, + void *opaque) { rd_regex_t *re; char errstr[1]; @@ -69,8 +69,8 @@ static size_t _invalid_topic_cb (const rd_kafka_topic_partition_t *rktpar, rd_kafka_resp_err_t -rd_kafka_subscribe (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *topics) { +rd_kafka_subscribe(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *topics) { rd_kafka_op_t *rko; rd_kafka_cgrp_t *rkcg; @@ -80,30 +80,29 @@ rd_kafka_subscribe (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; /* Validate topics */ - if (topics->cnt == 0 || - rd_kafka_topic_partition_list_sum(topics, - _invalid_topic_cb, NULL) > 0) + if (topics->cnt == 0 || rd_kafka_topic_partition_list_sum( + topics, _invalid_topic_cb, NULL) > 0) return RD_KAFKA_RESP_ERR__INVALID_ARG; topics_cpy = rd_kafka_topic_partition_list_copy(topics); - if (rd_kafka_topic_partition_list_has_duplicates(topics_cpy, - rd_true/*ignore partition field*/)) { + if (rd_kafka_topic_partition_list_has_duplicates( + topics_cpy, rd_true /*ignore partition field*/)) { rd_kafka_topic_partition_list_destroy(topics_cpy); return RD_KAFKA_RESP_ERR__INVALID_ARG; } - rko = rd_kafka_op_new(RD_KAFKA_OP_SUBSCRIBE); - rko->rko_u.subscribe.topics = topics_cpy; + rko = rd_kafka_op_new(RD_KAFKA_OP_SUBSCRIBE); + rko->rko_u.subscribe.topics = topics_cpy; return rd_kafka_op_err_destroy( - rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE)); + rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE)); } rd_kafka_error_t * -rd_kafka_assign0 (rd_kafka_t *rk, - rd_kafka_assign_method_t assign_method, - const rd_kafka_topic_partition_list_t *partitions) { +rd_kafka_assign0(rd_kafka_t *rk, + rd_kafka_assign_method_t assign_method, + const rd_kafka_topic_partition_list_t *partitions) { rd_kafka_op_t *rko; rd_kafka_cgrp_t *rkcg; @@ -118,21 +117,20 @@ rd_kafka_assign0 (rd_kafka_t *rk, if (partitions) rko->rko_u.assign.partitions = - rd_kafka_topic_partition_list_copy(partitions); + rd_kafka_topic_partition_list_copy(partitions); return rd_kafka_op_error_destroy( - rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE)); + rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE)); } rd_kafka_resp_err_t -rd_kafka_assign (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *partitions) { +rd_kafka_assign(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions) { rd_kafka_error_t *error; rd_kafka_resp_err_t err; - error = rd_kafka_assign0(rk, RD_KAFKA_ASSIGN_METHOD_ASSIGN, - partitions); + error = rd_kafka_assign0(rk, RD_KAFKA_ASSIGN_METHOD_ASSIGN, partitions); if (!error) err = RD_KAFKA_RESP_ERR_NO_ERROR; @@ -146,9 +144,8 @@ rd_kafka_assign (rd_kafka_t *rk, rd_kafka_error_t * -rd_kafka_incremental_assign (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t - *partitions) { +rd_kafka_incremental_assign(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions) { if (!partitions) return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, "partitions must not be NULL"); @@ -158,10 +155,9 @@ rd_kafka_incremental_assign (rd_kafka_t *rk, } -rd_kafka_error_t * -rd_kafka_incremental_unassign (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t - *partitions) { +rd_kafka_error_t *rd_kafka_incremental_unassign( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions) { if (!partitions) return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, "partitions must not be NULL"); @@ -171,8 +167,7 @@ rd_kafka_incremental_unassign (rd_kafka_t *rk, } -int -rd_kafka_assignment_lost (rd_kafka_t *rk) { +int rd_kafka_assignment_lost(rd_kafka_t *rk) { rd_kafka_cgrp_t *rkcg; if (!(rkcg = rd_kafka_cgrp_get(rk))) @@ -182,8 +177,7 @@ rd_kafka_assignment_lost (rd_kafka_t *rk) { } -const char * -rd_kafka_rebalance_protocol (rd_kafka_t *rk) { +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk) { rd_kafka_op_t *rko; rd_kafka_cgrp_t *rkcg; const char *result; @@ -210,8 +204,8 @@ rd_kafka_rebalance_protocol (rd_kafka_t *rk) { rd_kafka_resp_err_t -rd_kafka_assignment (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t **partitions) { +rd_kafka_assignment(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t **partitions) { rd_kafka_op_t *rko; rd_kafka_resp_err_t err; rd_kafka_cgrp_t *rkcg; @@ -220,13 +214,13 @@ rd_kafka_assignment (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_GET_ASSIGNMENT); - if (!rko) - return RD_KAFKA_RESP_ERR__TIMED_OUT; + if (!rko) + return RD_KAFKA_RESP_ERR__TIMED_OUT; err = rko->rko_err; - *partitions = rko->rko_u.assign.partitions; - rko->rko_u.assign.partitions = NULL; + *partitions = rko->rko_u.assign.partitions; + rko->rko_u.assign.partitions = NULL; rd_kafka_op_destroy(rko); if (!*partitions && !err) { @@ -238,9 +232,9 @@ rd_kafka_assignment (rd_kafka_t *rk, } rd_kafka_resp_err_t -rd_kafka_subscription (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t **topics){ - rd_kafka_op_t *rko; +rd_kafka_subscription(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t **topics) { + rd_kafka_op_t *rko; rd_kafka_resp_err_t err; rd_kafka_cgrp_t *rkcg; @@ -248,13 +242,13 @@ rd_kafka_subscription (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_GET_SUBSCRIPTION); - if (!rko) - return RD_KAFKA_RESP_ERR__TIMED_OUT; + if (!rko) + return RD_KAFKA_RESP_ERR__TIMED_OUT; err = rko->rko_err; - *topics = rko->rko_u.subscribe.topics; - rko->rko_u.subscribe.topics = NULL; + *topics = rko->rko_u.subscribe.topics; + rko->rko_u.subscribe.topics = NULL; rd_kafka_op_destroy(rko); if (!*topics && !err) { @@ -267,23 +261,18 @@ rd_kafka_subscription (rd_kafka_t *rk, rd_kafka_resp_err_t -rd_kafka_pause_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions) { - return rd_kafka_toppars_pause_resume(rk, - rd_true/*pause*/, - RD_SYNC, +rd_kafka_pause_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions) { + return rd_kafka_toppars_pause_resume(rk, rd_true /*pause*/, RD_SYNC, RD_KAFKA_TOPPAR_F_APP_PAUSE, partitions); } rd_kafka_resp_err_t -rd_kafka_resume_partitions (rd_kafka_t *rk, +rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions) { - return rd_kafka_toppars_pause_resume(rk, - rd_false/*resume*/, - RD_SYNC, + return rd_kafka_toppars_pause_resume(rk, rd_false /*resume*/, RD_SYNC, RD_KAFKA_TOPPAR_F_APP_PAUSE, partitions); } - diff --git a/src/rdkafka_timer.c b/src/rdkafka_timer.c index ed88a1ba5e..cdc6cf3195 100644 --- a/src/rdkafka_timer.c +++ b/src/rdkafka_timer.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -33,34 +33,34 @@ #include "rdkafka_queue.h" -static RD_INLINE void rd_kafka_timers_lock (rd_kafka_timers_t *rkts) { +static RD_INLINE void rd_kafka_timers_lock(rd_kafka_timers_t *rkts) { mtx_lock(&rkts->rkts_lock); } -static RD_INLINE void rd_kafka_timers_unlock (rd_kafka_timers_t *rkts) { +static RD_INLINE void rd_kafka_timers_unlock(rd_kafka_timers_t *rkts) { mtx_unlock(&rkts->rkts_lock); } -static RD_INLINE int rd_kafka_timer_started (const rd_kafka_timer_t *rtmr) { - return rtmr->rtmr_interval ? 1 : 0; +static RD_INLINE int rd_kafka_timer_started(const rd_kafka_timer_t *rtmr) { + return rtmr->rtmr_interval ? 1 : 0; } -static RD_INLINE int rd_kafka_timer_scheduled (const rd_kafka_timer_t *rtmr) { - return rtmr->rtmr_next ? 1 : 0; +static RD_INLINE int rd_kafka_timer_scheduled(const rd_kafka_timer_t *rtmr) { + return rtmr->rtmr_next ? 1 : 0; } -static int rd_kafka_timer_cmp (const void *_a, const void *_b) { - const rd_kafka_timer_t *a = _a, *b = _b; +static int rd_kafka_timer_cmp(const void *_a, const void *_b) { + const rd_kafka_timer_t *a = _a, *b = _b; return RD_CMP(a->rtmr_next, b->rtmr_next); } -static void rd_kafka_timer_unschedule (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr) { - TAILQ_REMOVE(&rkts->rkts_timers, rtmr, rtmr_link); - rtmr->rtmr_next = 0; +static void rd_kafka_timer_unschedule(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr) { + TAILQ_REMOVE(&rkts->rkts_timers, rtmr, rtmr_link); + rtmr->rtmr_next = 0; } @@ -71,9 +71,9 @@ static void rd_kafka_timer_unschedule (rd_kafka_timers_t *rkts, * * @locks_required timers_lock() */ -static void rd_kafka_timer_schedule_next (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr, - rd_ts_t abs_time) { +static void rd_kafka_timer_schedule_next(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + rd_ts_t abs_time) { rd_kafka_timer_t *first; rtmr->rtmr_next = abs_time; @@ -97,19 +97,20 @@ static void rd_kafka_timer_schedule_next (rd_kafka_timers_t *rkts, * * @locks_required timers_lock() */ -static void rd_kafka_timer_schedule (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr, int extra_us) { +static void rd_kafka_timer_schedule(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + int extra_us) { - /* Timer has been stopped */ - if (!rtmr->rtmr_interval) - return; + /* Timer has been stopped */ + if (!rtmr->rtmr_interval) + return; /* Timers framework is terminating */ if (unlikely(!rkts->rkts_enabled)) return; rd_kafka_timer_schedule_next( - rkts, rtmr, rd_clock() + rtmr->rtmr_interval + extra_us); + rkts, rtmr, rd_clock() + rtmr->rtmr_interval + extra_us); } /** @@ -118,24 +119,25 @@ static void rd_kafka_timer_schedule (rd_kafka_timers_t *rkts, * * @returns 1 if the timer was started (before being stopped), else 0. */ -int rd_kafka_timer_stop (rd_kafka_timers_t *rkts, rd_kafka_timer_t *rtmr, - int lock) { - if (lock) - rd_kafka_timers_lock(rkts); +int rd_kafka_timer_stop(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + int lock) { + if (lock) + rd_kafka_timers_lock(rkts); - if (!rd_kafka_timer_started(rtmr)) { - if (lock) - rd_kafka_timers_unlock(rkts); - return 0; - } + if (!rd_kafka_timer_started(rtmr)) { + if (lock) + rd_kafka_timers_unlock(rkts); + return 0; + } - if (rd_kafka_timer_scheduled(rtmr)) - rd_kafka_timer_unschedule(rkts, rtmr); + if (rd_kafka_timer_scheduled(rtmr)) + rd_kafka_timer_unschedule(rkts, rtmr); - rtmr->rtmr_interval = 0; + rtmr->rtmr_interval = 0; - if (lock) - rd_kafka_timers_unlock(rkts); + if (lock) + rd_kafka_timers_unlock(rkts); return 1; } @@ -144,8 +146,8 @@ int rd_kafka_timer_stop (rd_kafka_timers_t *rkts, rd_kafka_timer_t *rtmr, /** * @returns true if timer is started, else false. */ -rd_bool_t rd_kafka_timer_is_started (rd_kafka_timers_t *rkts, - const rd_kafka_timer_t *rtmr) { +rd_bool_t rd_kafka_timer_is_started(rd_kafka_timers_t *rkts, + const rd_kafka_timer_t *rtmr) { rd_bool_t ret; rd_kafka_timers_lock(rkts); ret = rtmr->rtmr_interval != 0; @@ -165,39 +167,40 @@ rd_bool_t rd_kafka_timer_is_started (rd_kafka_timers_t *rkts, * * Use rd_kafka_timer_stop() to stop a timer. */ -void rd_kafka_timer_start0 (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr, rd_ts_t interval, - rd_bool_t oneshot, rd_bool_t restart, - void (*callback) (rd_kafka_timers_t *rkts, - void *arg), - void *arg) { - rd_kafka_timers_lock(rkts); +void rd_kafka_timer_start0(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + rd_ts_t interval, + rd_bool_t oneshot, + rd_bool_t restart, + void (*callback)(rd_kafka_timers_t *rkts, void *arg), + void *arg) { + rd_kafka_timers_lock(rkts); if (!restart && rd_kafka_timer_scheduled(rtmr)) { rd_kafka_timers_unlock(rkts); return; } - rd_kafka_timer_stop(rkts, rtmr, 0/*!lock*/); + rd_kafka_timer_stop(rkts, rtmr, 0 /*!lock*/); /* Make sure the timer interval is non-zero or the timer * won't be scheduled, which is not what the caller of .._start*() * would expect. */ rtmr->rtmr_interval = interval == 0 ? 1 : interval; - rtmr->rtmr_callback = callback; - rtmr->rtmr_arg = arg; + rtmr->rtmr_callback = callback; + rtmr->rtmr_arg = arg; rtmr->rtmr_oneshot = oneshot; - rd_kafka_timer_schedule(rkts, rtmr, 0); + rd_kafka_timer_schedule(rkts, rtmr, 0); - rd_kafka_timers_unlock(rkts); + rd_kafka_timers_unlock(rkts); } /** * Delay the next timer invocation by '2 * rtmr->rtmr_interval' */ -void rd_kafka_timer_exp_backoff (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr) { +void rd_kafka_timer_exp_backoff(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr) { rd_kafka_timers_lock(rkts); if (rd_kafka_timer_scheduled(rtmr)) { rtmr->rtmr_interval *= 2; @@ -213,9 +216,9 @@ void rd_kafka_timer_exp_backoff (rd_kafka_timers_t *rkts, * @locks_required none * @locks_acquired timers_lock */ -void rd_kafka_timer_override_once (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr, - rd_ts_t interval) { +void rd_kafka_timer_override_once(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + rd_ts_t interval) { rd_kafka_timers_lock(rkts); if (rd_kafka_timer_scheduled(rtmr)) rd_kafka_timer_unschedule(rkts, rtmr); @@ -228,9 +231,10 @@ void rd_kafka_timer_override_once (rd_kafka_timers_t *rkts, * @returns the delta time to the next time (>=0) this timer fires, or -1 * if timer is stopped. */ -rd_ts_t rd_kafka_timer_next (rd_kafka_timers_t *rkts, rd_kafka_timer_t *rtmr, - int do_lock) { - rd_ts_t now = rd_clock(); +rd_ts_t rd_kafka_timer_next(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + int do_lock) { + rd_ts_t now = rd_clock(); rd_ts_t delta = -1; if (do_lock) @@ -253,38 +257,38 @@ rd_ts_t rd_kafka_timer_next (rd_kafka_timers_t *rkts, rd_kafka_timer_t *rtmr, * Interrupt rd_kafka_timers_run(). * Used for termination. */ -void rd_kafka_timers_interrupt (rd_kafka_timers_t *rkts) { - rd_kafka_timers_lock(rkts); - cnd_signal(&rkts->rkts_cond); - rd_kafka_timers_unlock(rkts); +void rd_kafka_timers_interrupt(rd_kafka_timers_t *rkts) { + rd_kafka_timers_lock(rkts); + cnd_signal(&rkts->rkts_cond); + rd_kafka_timers_unlock(rkts); } /** * Returns the delta time to the next timer to fire, capped by 'timeout_ms'. */ -rd_ts_t rd_kafka_timers_next (rd_kafka_timers_t *rkts, int timeout_us, - int do_lock) { - rd_ts_t now = rd_clock(); - rd_ts_t sleeptime = 0; - rd_kafka_timer_t *rtmr; - - if (do_lock) - rd_kafka_timers_lock(rkts); - - if (likely((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) != NULL)) { - sleeptime = rtmr->rtmr_next - now; - if (sleeptime < 0) - sleeptime = 0; - else if (sleeptime > (rd_ts_t)timeout_us) - sleeptime = (rd_ts_t)timeout_us; - } else - sleeptime = (rd_ts_t)timeout_us; - - if (do_lock) - rd_kafka_timers_unlock(rkts); - - return sleeptime; +rd_ts_t +rd_kafka_timers_next(rd_kafka_timers_t *rkts, int timeout_us, int do_lock) { + rd_ts_t now = rd_clock(); + rd_ts_t sleeptime = 0; + rd_kafka_timer_t *rtmr; + + if (do_lock) + rd_kafka_timers_lock(rkts); + + if (likely((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) != NULL)) { + sleeptime = rtmr->rtmr_next - now; + if (sleeptime < 0) + sleeptime = 0; + else if (sleeptime > (rd_ts_t)timeout_us) + sleeptime = (rd_ts_t)timeout_us; + } else + sleeptime = (rd_ts_t)timeout_us; + + if (do_lock) + rd_kafka_timers_unlock(rkts); + + return sleeptime; } @@ -292,35 +296,33 @@ rd_ts_t rd_kafka_timers_next (rd_kafka_timers_t *rkts, int timeout_us, * Dispatch timers. * Will block up to 'timeout' microseconds before returning. */ -void rd_kafka_timers_run (rd_kafka_timers_t *rkts, int timeout_us) { - rd_ts_t now = rd_clock(); - rd_ts_t end = now + timeout_us; +void rd_kafka_timers_run(rd_kafka_timers_t *rkts, int timeout_us) { + rd_ts_t now = rd_clock(); + rd_ts_t end = now + timeout_us; rd_kafka_timers_lock(rkts); - while (!rd_kafka_terminating(rkts->rkts_rk) && now <= end) { - int64_t sleeptime; - rd_kafka_timer_t *rtmr; - - if (timeout_us != RD_POLL_NOWAIT) { - sleeptime = rd_kafka_timers_next(rkts, - timeout_us, - 0/*no-lock*/); + while (!rd_kafka_terminating(rkts->rkts_rk) && now <= end) { + int64_t sleeptime; + rd_kafka_timer_t *rtmr; - if (sleeptime > 0) { - cnd_timedwait_ms(&rkts->rkts_cond, - &rkts->rkts_lock, - (int)(sleeptime / 1000)); + if (timeout_us != RD_POLL_NOWAIT) { + sleeptime = rd_kafka_timers_next(rkts, timeout_us, + 0 /*no-lock*/); - } - } + if (sleeptime > 0) { + cnd_timedwait_ms(&rkts->rkts_cond, + &rkts->rkts_lock, + (int)(sleeptime / 1000)); + } + } - now = rd_clock(); + now = rd_clock(); - while ((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) && - rtmr->rtmr_next <= now) { + while ((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) && + rtmr->rtmr_next <= now) { - rd_kafka_timer_unschedule(rkts, rtmr); + rd_kafka_timer_unschedule(rkts, rtmr); /* If timer must only be fired once, * disable it now prior to callback. */ @@ -329,28 +331,28 @@ void rd_kafka_timers_run (rd_kafka_timers_t *rkts, int timeout_us) { rd_kafka_timers_unlock(rkts); - rtmr->rtmr_callback(rkts, rtmr->rtmr_arg); + rtmr->rtmr_callback(rkts, rtmr->rtmr_arg); rd_kafka_timers_lock(rkts); - /* Restart timer, unless it has been stopped, or - * already reschedueld (start()ed) from callback. */ - if (rd_kafka_timer_started(rtmr) && - !rd_kafka_timer_scheduled(rtmr)) - rd_kafka_timer_schedule(rkts, rtmr, 0); - } - - if (timeout_us == RD_POLL_NOWAIT) { - /* Only iterate once, even if rd_clock doesn't change */ - break; - } - } + /* Restart timer, unless it has been stopped, or + * already reschedueld (start()ed) from callback. */ + if (rd_kafka_timer_started(rtmr) && + !rd_kafka_timer_scheduled(rtmr)) + rd_kafka_timer_schedule(rkts, rtmr, 0); + } + + if (timeout_us == RD_POLL_NOWAIT) { + /* Only iterate once, even if rd_clock doesn't change */ + break; + } + } - rd_kafka_timers_unlock(rkts); + rd_kafka_timers_unlock(rkts); } -void rd_kafka_timers_destroy (rd_kafka_timers_t *rkts) { +void rd_kafka_timers_destroy(rd_kafka_timers_t *rkts) { rd_kafka_timer_t *rtmr; rd_kafka_timers_lock(rkts); @@ -364,13 +366,14 @@ void rd_kafka_timers_destroy (rd_kafka_timers_t *rkts) { mtx_destroy(&rkts->rkts_lock); } -void rd_kafka_timers_init (rd_kafka_timers_t *rkts, rd_kafka_t *rk, - struct rd_kafka_q_s *wakeq) { +void rd_kafka_timers_init(rd_kafka_timers_t *rkts, + rd_kafka_t *rk, + struct rd_kafka_q_s *wakeq) { memset(rkts, 0, sizeof(*rkts)); rkts->rkts_rk = rk; TAILQ_INIT(&rkts->rkts_timers); mtx_init(&rkts->rkts_lock, mtx_plain); cnd_init(&rkts->rkts_cond); rkts->rkts_enabled = 1; - rkts->rkts_wakeq = wakeq; + rkts->rkts_wakeq = wakeq; } diff --git a/src/rdkafka_timer.h b/src/rdkafka_timer.h index 8a50b556ce..e3cadd7b9f 100644 --- a/src/rdkafka_timer.h +++ b/src/rdkafka_timer.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -40,8 +40,8 @@ typedef struct rd_kafka_timers_s { struct rd_kafka_s *rkts_rk; - mtx_t rkts_lock; - cnd_t rkts_cond; + mtx_t rkts_lock; + cnd_t rkts_cond; /** Optional wake-up (q_yield()) to wake up when a new timer * is scheduled that will fire prior to any existing timers. @@ -49,59 +49,66 @@ typedef struct rd_kafka_timers_s { * in the same loop as timers_run(). */ struct rd_kafka_q_s *rkts_wakeq; - int rkts_enabled; + int rkts_enabled; } rd_kafka_timers_t; typedef struct rd_kafka_timer_s { - TAILQ_ENTRY(rd_kafka_timer_s) rtmr_link; + TAILQ_ENTRY(rd_kafka_timer_s) rtmr_link; - rd_ts_t rtmr_next; - rd_ts_t rtmr_interval; /* interval in microseconds */ - rd_bool_t rtmr_oneshot; /**< Only fire once. */ + rd_ts_t rtmr_next; + rd_ts_t rtmr_interval; /* interval in microseconds */ + rd_bool_t rtmr_oneshot; /**< Only fire once. */ - void (*rtmr_callback) (rd_kafka_timers_t *rkts, void *arg); - void *rtmr_arg; + void (*rtmr_callback)(rd_kafka_timers_t *rkts, void *arg); + void *rtmr_arg; } rd_kafka_timer_t; -int rd_kafka_timer_stop (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr, int lock); -void rd_kafka_timer_start0 (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr, rd_ts_t interval, - rd_bool_t oneshot, rd_bool_t restart, - void (*callback) (rd_kafka_timers_t *rkts, - void *arg), - void *arg); -#define rd_kafka_timer_start(rkts,rtmr,interval,callback,arg) \ - rd_kafka_timer_start0(rkts,rtmr,interval,rd_false,rd_true,callback,arg) -#define rd_kafka_timer_start_oneshot(rkts,rtmr,restart,interval,callback,arg) \ - rd_kafka_timer_start0(rkts,rtmr,interval,rd_true,restart,callback,arg) - -void rd_kafka_timer_exp_backoff (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr); -rd_ts_t rd_kafka_timer_next (rd_kafka_timers_t *rkts, rd_kafka_timer_t *rtmr, - int do_lock); - -void rd_kafka_timer_override_once (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr, - rd_ts_t interval); +int rd_kafka_timer_stop(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + int lock); +void rd_kafka_timer_start0(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + rd_ts_t interval, + rd_bool_t oneshot, + rd_bool_t restart, + void (*callback)(rd_kafka_timers_t *rkts, void *arg), + void *arg); +#define rd_kafka_timer_start(rkts, rtmr, interval, callback, arg) \ + rd_kafka_timer_start0(rkts, rtmr, interval, rd_false, rd_true, \ + callback, arg) +#define rd_kafka_timer_start_oneshot(rkts, rtmr, restart, interval, callback, \ + arg) \ + rd_kafka_timer_start0(rkts, rtmr, interval, rd_true, restart, \ + callback, arg) + +void rd_kafka_timer_exp_backoff(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr); +rd_ts_t rd_kafka_timer_next(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + int do_lock); + +void rd_kafka_timer_override_once(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + rd_ts_t interval); /** * @returns true if timer is started. * * @remark Must only be called in the timer's thread (not thread-safe) */ -rd_bool_t rd_kafka_timer_is_started (rd_kafka_timers_t *rkts, - const rd_kafka_timer_t *rtmr); - -void rd_kafka_timers_interrupt (rd_kafka_timers_t *rkts); -rd_ts_t rd_kafka_timers_next (rd_kafka_timers_t *rkts, int timeout_ms, - int do_lock); -void rd_kafka_timers_run (rd_kafka_timers_t *rkts, int timeout_us); -void rd_kafka_timers_destroy (rd_kafka_timers_t *rkts); -void rd_kafka_timers_init (rd_kafka_timers_t *rkte, rd_kafka_t *rk, - struct rd_kafka_q_s *wakeq); +rd_bool_t rd_kafka_timer_is_started(rd_kafka_timers_t *rkts, + const rd_kafka_timer_t *rtmr); + +void rd_kafka_timers_interrupt(rd_kafka_timers_t *rkts); +rd_ts_t +rd_kafka_timers_next(rd_kafka_timers_t *rkts, int timeout_ms, int do_lock); +void rd_kafka_timers_run(rd_kafka_timers_t *rkts, int timeout_us); +void rd_kafka_timers_destroy(rd_kafka_timers_t *rkts); +void rd_kafka_timers_init(rd_kafka_timers_t *rkte, + rd_kafka_t *rk, + struct rd_kafka_q_s *wakeq); #endif /* _RDKAFKA_TIMER_H_ */ diff --git a/src/rdkafka_topic.c b/src/rdkafka_topic.c index ce9ba3f7ca..cbee469674 100644 --- a/src/rdkafka_topic.c +++ b/src/rdkafka_topic.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012,2013 Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -44,18 +44,14 @@ #endif -const char *rd_kafka_topic_state_names[] = { - "unknown", - "exists", - "notexists", - "error" -}; +const char *rd_kafka_topic_state_names[] = {"unknown", "exists", "notexists", + "error"}; static int -rd_kafka_topic_metadata_update (rd_kafka_topic_t *rkt, - const struct rd_kafka_metadata_topic *mdt, - rd_ts_t ts_insert); +rd_kafka_topic_metadata_update(rd_kafka_topic_t *rkt, + const struct rd_kafka_metadata_topic *mdt, + rd_ts_t ts_insert); /** @@ -69,16 +65,16 @@ rd_kafka_topic_metadata_update (rd_kafka_topic_t *rkt, * The topic_t exposed in rd_kafka_message_t is NOT covered and is handled * like a standard internal -> app pointer conversion (keep_a()). */ -static void rd_kafka_topic_keep_app (rd_kafka_topic_t *rkt) { - if (rd_refcnt_add(&rkt->rkt_app_refcnt) == 1) +static void rd_kafka_topic_keep_app(rd_kafka_topic_t *rkt) { + if (rd_refcnt_add(&rkt->rkt_app_refcnt) == 1) rd_kafka_topic_keep(rkt); } /** * @brief drop rkt app reference */ -static void rd_kafka_topic_destroy_app (rd_kafka_topic_t *app_rkt) { - rd_kafka_topic_t *rkt = app_rkt; +static void rd_kafka_topic_destroy_app(rd_kafka_topic_t *app_rkt) { + rd_kafka_topic_t *rkt = app_rkt; rd_assert(!rd_kafka_rkt_is_lw(app_rkt)); @@ -92,9 +88,9 @@ static void rd_kafka_topic_destroy_app (rd_kafka_topic_t *app_rkt) { /** * Final destructor for topic. Refcnt must be 0. */ -void rd_kafka_topic_destroy_final (rd_kafka_topic_t *rkt) { +void rd_kafka_topic_destroy_final(rd_kafka_topic_t *rkt) { - rd_kafka_assert(rkt->rkt_rk, rd_refcnt_get(&rkt->rkt_refcnt) == 0); + rd_kafka_assert(rkt->rkt_rk, rd_refcnt_get(&rkt->rkt_refcnt) == 0); rd_kafka_wrlock(rkt->rkt_rk); TAILQ_REMOVE(&rkt->rkt_rk->rk_topics, rkt, rkt_link); @@ -107,16 +103,16 @@ void rd_kafka_topic_destroy_final (rd_kafka_topic_t *rkt) { rd_avg_destroy(&rkt->rkt_avg_batchsize); rd_avg_destroy(&rkt->rkt_avg_batchcnt); - if (rkt->rkt_topic) - rd_kafkap_str_destroy(rkt->rkt_topic); + if (rkt->rkt_topic) + rd_kafkap_str_destroy(rkt->rkt_topic); - rd_kafka_anyconf_destroy(_RK_TOPIC, &rkt->rkt_conf); + rd_kafka_anyconf_destroy(_RK_TOPIC, &rkt->rkt_conf); - rwlock_destroy(&rkt->rkt_lock); + rwlock_destroy(&rkt->rkt_lock); rd_refcnt_destroy(&rkt->rkt_app_refcnt); rd_refcnt_destroy(&rkt->rkt_refcnt); - rd_free(rkt); + rd_free(rkt); } /** @@ -124,7 +120,7 @@ void rd_kafka_topic_destroy_final (rd_kafka_topic_t *rkt) { * @warning MUST ONLY BE CALLED BY THE APPLICATION. * Use rd_kafka_topic_destroy0() for all internal use. */ -void rd_kafka_topic_destroy (rd_kafka_topic_t *app_rkt) { +void rd_kafka_topic_destroy(rd_kafka_topic_t *app_rkt) { rd_kafka_lwtopic_t *lrkt; if (unlikely((lrkt = rd_kafka_rkt_get_lw(app_rkt)) != NULL)) rd_kafka_lwtopic_destroy(lrkt); @@ -141,51 +137,53 @@ void rd_kafka_topic_destroy (rd_kafka_topic_t *app_rkt) { * * Locality: any thread */ -rd_kafka_topic_t *rd_kafka_topic_find_fl (const char *func, int line, - rd_kafka_t *rk, - const char *topic, - int do_lock) { - rd_kafka_topic_t *rkt; +rd_kafka_topic_t *rd_kafka_topic_find_fl(const char *func, + int line, + rd_kafka_t *rk, + const char *topic, + int do_lock) { + rd_kafka_topic_t *rkt; if (do_lock) rd_kafka_rdlock(rk); - TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { - if (!rd_kafkap_str_cmp_str(rkt->rkt_topic, topic)) { + TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { + if (!rd_kafkap_str_cmp_str(rkt->rkt_topic, topic)) { rd_kafka_topic_keep(rkt); - break; - } - } + break; + } + } if (do_lock) rd_kafka_rdunlock(rk); - return rkt; + return rkt; } /** * Same semantics as ..find() but takes a Kafka protocol string instead. */ -rd_kafka_topic_t *rd_kafka_topic_find0_fl (const char *func, int line, - rd_kafka_t *rk, - const rd_kafkap_str_t *topic) { - rd_kafka_topic_t *rkt; - - rd_kafka_rdlock(rk); - TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { - if (!rd_kafkap_str_cmp(rkt->rkt_topic, topic)) { +rd_kafka_topic_t *rd_kafka_topic_find0_fl(const char *func, + int line, + rd_kafka_t *rk, + const rd_kafkap_str_t *topic) { + rd_kafka_topic_t *rkt; + + rd_kafka_rdlock(rk); + TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { + if (!rd_kafkap_str_cmp(rkt->rkt_topic, topic)) { rd_kafka_topic_keep(rkt); - break; - } - } - rd_kafka_rdunlock(rk); + break; + } + } + rd_kafka_rdunlock(rk); - return rkt; + return rkt; } /** * @brief rd_kafka_topic_t comparator. */ -int rd_kafka_topic_cmp_rkt (const void *_a, const void *_b) { +int rd_kafka_topic_cmp_rkt(const void *_a, const void *_b) { rd_kafka_topic_t *rkt_a = (void *)_a, *rkt_b = (void *)_b; if (rkt_a == rkt_b) @@ -198,7 +196,7 @@ int rd_kafka_topic_cmp_rkt (const void *_a, const void *_b) { /** * @brief Destroy/free a light-weight topic object. */ -void rd_kafka_lwtopic_destroy (rd_kafka_lwtopic_t *lrkt) { +void rd_kafka_lwtopic_destroy(rd_kafka_lwtopic_t *lrkt) { rd_assert(rd_kafka_rkt_is_lw((const rd_kafka_topic_t *)lrkt)); if (rd_refcnt_sub(&lrkt->lrkt_refcnt) > 0) return; @@ -215,7 +213,7 @@ void rd_kafka_lwtopic_destroy (rd_kafka_lwtopic_t *lrkt) { * to the proper rd_kafka_itopic_t for outgoing APIs * (such as rd_kafka_message_t) when there is no full topic object available. */ -rd_kafka_lwtopic_t *rd_kafka_lwtopic_new (rd_kafka_t *rk, const char *topic) { +rd_kafka_lwtopic_t *rd_kafka_lwtopic_new(rd_kafka_t *rk, const char *topic) { rd_kafka_lwtopic_t *lrkt; size_t topic_len = strlen(topic); @@ -224,8 +222,8 @@ rd_kafka_lwtopic_t *rd_kafka_lwtopic_new (rd_kafka_t *rk, const char *topic) { memcpy(lrkt->lrkt_magic, "LRKT", 4); lrkt->lrkt_rk = rk; rd_refcnt_init(&lrkt->lrkt_refcnt, 1); - lrkt->lrkt_topic = (char *)(lrkt+1); - memcpy(lrkt->lrkt_topic, topic, topic_len+1); + lrkt->lrkt_topic = (char *)(lrkt + 1); + memcpy(lrkt->lrkt_topic, topic, topic_len + 1); return lrkt; } @@ -241,15 +239,15 @@ rd_kafka_lwtopic_t *rd_kafka_lwtopic_new (rd_kafka_t *rk, const char *topic) { * This allows the application to (unknowingly) pass a light-weight * topic object to any proper-aware public API. */ -rd_kafka_topic_t *rd_kafka_topic_proper (rd_kafka_topic_t *app_rkt) { +rd_kafka_topic_t *rd_kafka_topic_proper(rd_kafka_topic_t *app_rkt) { rd_kafka_lwtopic_t *lrkt; if (likely(!(lrkt = rd_kafka_rkt_get_lw(app_rkt)))) return app_rkt; /* Create proper topic object */ - return rd_kafka_topic_new0(lrkt->lrkt_rk, lrkt->lrkt_topic, - NULL, NULL, 0); + return rd_kafka_topic_new0(lrkt->lrkt_rk, lrkt->lrkt_topic, NULL, NULL, + 0); } @@ -258,38 +256,37 @@ rd_kafka_topic_t *rd_kafka_topic_proper (rd_kafka_topic_t *app_rkt) { * * @locality any */ -rd_kafka_topic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, - const char *topic, - rd_kafka_topic_conf_t *conf, - int *existing, - int do_lock) { - rd_kafka_topic_t *rkt; +rd_kafka_topic_t *rd_kafka_topic_new0(rd_kafka_t *rk, + const char *topic, + rd_kafka_topic_conf_t *conf, + int *existing, + int do_lock) { + rd_kafka_topic_t *rkt; const struct rd_kafka_metadata_cache_entry *rkmce; const char *conf_err; const char *used_conf_str; - /* Verify configuration. - * Maximum topic name size + headers must never exceed message.max.bytes - * which is min-capped to 1000. - * See rd_kafka_broker_produce_toppar() and rdkafka_conf.c */ - if (!topic || strlen(topic) > 512) { - if (conf) - rd_kafka_topic_conf_destroy(conf); - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, - EINVAL); - return NULL; - } - - if (do_lock) + /* Verify configuration. + * Maximum topic name size + headers must never exceed message.max.bytes + * which is min-capped to 1000. + * See rd_kafka_broker_produce_toppar() and rdkafka_conf.c */ + if (!topic || strlen(topic) > 512) { + if (conf) + rd_kafka_topic_conf_destroy(conf); + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); + return NULL; + } + + if (do_lock) rd_kafka_wrlock(rk); - if ((rkt = rd_kafka_topic_find(rk, topic, 0/*no lock*/))) { + if ((rkt = rd_kafka_topic_find(rk, topic, 0 /*no lock*/))) { if (do_lock) rd_kafka_wrunlock(rk); - if (conf) - rd_kafka_topic_conf_destroy(conf); + if (conf) + rd_kafka_topic_conf_destroy(conf); if (existing) *existing = 1; - return rkt; + return rkt; } if (!conf) { @@ -297,7 +294,7 @@ rd_kafka_topic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, conf = rd_kafka_topic_conf_dup(rk->rk_conf.topic_conf); used_conf_str = "default_topic_conf"; } else { - conf = rd_kafka_topic_conf_new(); + conf = rd_kafka_topic_conf_new(); used_conf_str = "empty"; } } else { @@ -306,14 +303,15 @@ rd_kafka_topic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, /* Verify and finalize topic configuration */ - if ((conf_err = rd_kafka_topic_conf_finalize(rk->rk_type, - &rk->rk_conf, conf))) { + if ((conf_err = rd_kafka_topic_conf_finalize(rk->rk_type, &rk->rk_conf, + conf))) { if (do_lock) rd_kafka_wrunlock(rk); /* Incompatible configuration settings */ rd_kafka_log(rk, LOG_ERR, "TOPICCONF", "Incompatible configuration settings " - "for topic \"%s\": %s", topic, conf_err); + "for topic \"%s\": %s", + topic, conf_err); rd_kafka_topic_conf_destroy(conf); rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); return NULL; @@ -322,17 +320,17 @@ rd_kafka_topic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, if (existing) *existing = 0; - rkt = rd_calloc(1, sizeof(*rkt)); + rkt = rd_calloc(1, sizeof(*rkt)); memcpy(rkt->rkt_magic, "IRKT", 4); - rkt->rkt_topic = rd_kafkap_str_new(topic, -1); - rkt->rkt_rk = rk; + rkt->rkt_topic = rd_kafkap_str_new(topic, -1); + rkt->rkt_rk = rk; rkt->rkt_ts_create = rd_clock(); - rkt->rkt_conf = *conf; - rd_free(conf); /* explicitly not rd_kafka_topic_destroy() + rkt->rkt_conf = *conf; + rd_free(conf); /* explicitly not rd_kafka_topic_destroy() * since we dont want to rd_free internal members, * just the placeholder. The internal members * were copied on the line above. */ @@ -343,26 +341,21 @@ rd_kafka_topic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, const char *str; void *part; } part_map[] = { - { "random", - (void *)rd_kafka_msg_partitioner_random }, - { "consistent", - (void *)rd_kafka_msg_partitioner_consistent }, - { "consistent_random", - (void *)rd_kafka_msg_partitioner_consistent_random }, - { "murmur2", - (void *)rd_kafka_msg_partitioner_murmur2 }, - { "murmur2_random", - (void *)rd_kafka_msg_partitioner_murmur2_random }, - { "fnv1a", - (void *)rd_kafka_msg_partitioner_fnv1a }, - { "fnv1a_random", - (void *)rd_kafka_msg_partitioner_fnv1a_random }, - { NULL } - }; + {"random", (void *)rd_kafka_msg_partitioner_random}, + {"consistent", (void *)rd_kafka_msg_partitioner_consistent}, + {"consistent_random", + (void *)rd_kafka_msg_partitioner_consistent_random}, + {"murmur2", (void *)rd_kafka_msg_partitioner_murmur2}, + {"murmur2_random", + (void *)rd_kafka_msg_partitioner_murmur2_random}, + {"fnv1a", (void *)rd_kafka_msg_partitioner_fnv1a}, + {"fnv1a_random", + (void *)rd_kafka_msg_partitioner_fnv1a_random}, + {NULL}}; int i; /* Use "partitioner" configuration property string, if set */ - for (i = 0 ; rkt->rkt_conf.partitioner_str && part_map[i].str ; + for (i = 0; rkt->rkt_conf.partitioner_str && part_map[i].str; i++) { if (!strcmp(rkt->rkt_conf.partitioner_str, part_map[i].str)) { @@ -379,17 +372,14 @@ rd_kafka_topic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, assert(!rkt->rkt_conf.partitioner_str); rkt->rkt_conf.partitioner = - rd_kafka_msg_partitioner_consistent_random; + rd_kafka_msg_partitioner_consistent_random; } } if (rkt->rkt_rk->rk_conf.sticky_partition_linger_ms > 0 && - rkt->rkt_conf.partitioner != - rd_kafka_msg_partitioner_consistent && - rkt->rkt_conf.partitioner != - rd_kafka_msg_partitioner_murmur2 && - rkt->rkt_conf.partitioner != - rd_kafka_msg_partitioner_fnv1a) { + rkt->rkt_conf.partitioner != rd_kafka_msg_partitioner_consistent && + rkt->rkt_conf.partitioner != rd_kafka_msg_partitioner_murmur2 && + rkt->rkt_conf.partitioner != rd_kafka_msg_partitioner_fnv1a) { rkt->rkt_conf.random_partitioner = rd_false; } else { rkt->rkt_conf.random_partitioner = rd_true; @@ -403,37 +393,43 @@ rd_kafka_topic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, else rkt->rkt_conf.msg_order_cmp = rd_kafka_msg_cmp_msgid_lifo; - if (rkt->rkt_conf.compression_codec == RD_KAFKA_COMPRESSION_INHERIT) - rkt->rkt_conf.compression_codec = rk->rk_conf.compression_codec; + if (rkt->rkt_conf.compression_codec == RD_KAFKA_COMPRESSION_INHERIT) + rkt->rkt_conf.compression_codec = rk->rk_conf.compression_codec; /* Translate compression level to library-specific level and check * upper bound */ switch (rkt->rkt_conf.compression_codec) { #if WITH_ZLIB case RD_KAFKA_COMPRESSION_GZIP: - if (rkt->rkt_conf.compression_level == RD_KAFKA_COMPLEVEL_DEFAULT) + if (rkt->rkt_conf.compression_level == + RD_KAFKA_COMPLEVEL_DEFAULT) rkt->rkt_conf.compression_level = Z_DEFAULT_COMPRESSION; - else if (rkt->rkt_conf.compression_level > RD_KAFKA_COMPLEVEL_GZIP_MAX) + else if (rkt->rkt_conf.compression_level > + RD_KAFKA_COMPLEVEL_GZIP_MAX) rkt->rkt_conf.compression_level = - RD_KAFKA_COMPLEVEL_GZIP_MAX; + RD_KAFKA_COMPLEVEL_GZIP_MAX; break; #endif case RD_KAFKA_COMPRESSION_LZ4: - if (rkt->rkt_conf.compression_level == RD_KAFKA_COMPLEVEL_DEFAULT) + if (rkt->rkt_conf.compression_level == + RD_KAFKA_COMPLEVEL_DEFAULT) /* LZ4 has no notion of system-wide default compression * level, use zero in this case */ rkt->rkt_conf.compression_level = 0; - else if (rkt->rkt_conf.compression_level > RD_KAFKA_COMPLEVEL_LZ4_MAX) + else if (rkt->rkt_conf.compression_level > + RD_KAFKA_COMPLEVEL_LZ4_MAX) rkt->rkt_conf.compression_level = - RD_KAFKA_COMPLEVEL_LZ4_MAX; + RD_KAFKA_COMPLEVEL_LZ4_MAX; break; #if WITH_ZSTD case RD_KAFKA_COMPRESSION_ZSTD: - if (rkt->rkt_conf.compression_level == RD_KAFKA_COMPLEVEL_DEFAULT) + if (rkt->rkt_conf.compression_level == + RD_KAFKA_COMPLEVEL_DEFAULT) rkt->rkt_conf.compression_level = 3; - else if (rkt->rkt_conf.compression_level > RD_KAFKA_COMPLEVEL_ZSTD_MAX) + else if (rkt->rkt_conf.compression_level > + RD_KAFKA_COMPLEVEL_ZSTD_MAX) rkt->rkt_conf.compression_level = - RD_KAFKA_COMPLEVEL_ZSTD_MAX; + RD_KAFKA_COMPLEVEL_ZSTD_MAX; break; #endif case RD_KAFKA_COMPRESSION_SNAPPY: @@ -449,8 +445,8 @@ rd_kafka_topic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, rk->rk_conf.batch_num_messages, 2, rk->rk_conf.stats_interval_ms ? 1 : 0); - rd_kafka_dbg(rk, TOPIC, "TOPIC", "New local topic: %.*s", - RD_KAFKAP_STR_PR(rkt->rkt_topic)); + rd_kafka_dbg(rk, TOPIC, "TOPIC", "New local topic: %.*s", + RD_KAFKAP_STR_PR(rkt->rkt_topic)); rd_list_init(&rkt->rkt_desp, 16, NULL); rd_interval_init(&rkt->rkt_desp_refresh_intvl); @@ -459,16 +455,16 @@ rd_kafka_topic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, rd_kafka_topic_keep(rkt); - rwlock_init(&rkt->rkt_lock); + rwlock_init(&rkt->rkt_lock); - /* Create unassigned partition */ - rkt->rkt_ua = rd_kafka_toppar_new(rkt, RD_KAFKA_PARTITION_UA); + /* Create unassigned partition */ + rkt->rkt_ua = rd_kafka_toppar_new(rkt, RD_KAFKA_PARTITION_UA); - TAILQ_INSERT_TAIL(&rk->rk_topics, rkt, rkt_link); - rk->rk_topic_cnt++; + TAILQ_INSERT_TAIL(&rk->rk_topics, rkt, rkt_link); + rk->rk_topic_cnt++; /* Populate from metadata cache. */ - if ((rkmce = rd_kafka_metadata_cache_find(rk, topic, 1/*valid*/)) && + if ((rkmce = rd_kafka_metadata_cache_find(rk, topic, 1 /*valid*/)) && !rkmce->rkmce_mtopic.err) { if (existing) *existing = 1; @@ -483,12 +479,12 @@ rd_kafka_topic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, if (rk->rk_conf.debug & RD_KAFKA_DBG_CONF) { char desc[256]; rd_snprintf(desc, sizeof(desc), - "Topic \"%s\" configuration (%s)", - topic, used_conf_str); + "Topic \"%s\" configuration (%s)", topic, + used_conf_str); rd_kafka_anyconf_dump_dbg(rk, _RK_TOPIC, &rkt->rkt_conf, desc); } - return rkt; + return rkt; } @@ -498,17 +494,18 @@ rd_kafka_topic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, * * @locality application thread */ -rd_kafka_topic_t *rd_kafka_topic_new (rd_kafka_t *rk, const char *topic, - rd_kafka_topic_conf_t *conf) { +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, + const char *topic, + rd_kafka_topic_conf_t *conf) { rd_kafka_topic_t *rkt; int existing; - rkt = rd_kafka_topic_new0(rk, topic, conf, &existing, 1/*lock*/); + rkt = rd_kafka_topic_new0(rk, topic, conf, &existing, 1 /*lock*/); if (!rkt) return NULL; /* Increase application refcount. */ - rd_kafka_topic_keep_app(rkt); + rd_kafka_topic_keep_app(rkt); /* Query for the topic leader (async) */ if (!existing) @@ -526,14 +523,13 @@ rd_kafka_topic_t *rd_kafka_topic_new (rd_kafka_t *rk, const char *topic, * Sets the state for topic. * NOTE: rd_kafka_topic_wrlock(rkt) MUST be held */ -static void rd_kafka_topic_set_state (rd_kafka_topic_t *rkt, int state) { +static void rd_kafka_topic_set_state(rd_kafka_topic_t *rkt, int state) { if ((int)rkt->rkt_state == state) return; rd_kafka_dbg(rkt->rkt_rk, TOPIC, "STATE", - "Topic %s changed state %s -> %s", - rkt->rkt_topic->str, + "Topic %s changed state %s -> %s", rkt->rkt_topic->str, rd_kafka_topic_state_names[rkt->rkt_state], rd_kafka_topic_state_names[state]); @@ -551,7 +547,7 @@ static void rd_kafka_topic_set_state (rd_kafka_topic_t *rkt, int state) { * we can use the topic's String directly. * This is not true for Kafka Strings read from the network. */ -const char *rd_kafka_topic_name (const rd_kafka_topic_t *app_rkt) { +const char *rd_kafka_topic_name(const rd_kafka_topic_t *app_rkt) { if (rd_kafka_rkt_is_lw(app_rkt)) return rd_kafka_rkt_lw_const(app_rkt)->lrkt_topic; else @@ -574,41 +570,41 @@ const char *rd_kafka_topic_name (const rd_kafka_topic_t *app_rkt) { * @locks caller must have rd_kafka_toppar_lock(rktp) * @locality any */ -int rd_kafka_toppar_broker_update (rd_kafka_toppar_t *rktp, - int32_t broker_id, - rd_kafka_broker_t *rkb, - const char *reason) { +int rd_kafka_toppar_broker_update(rd_kafka_toppar_t *rktp, + int32_t broker_id, + rd_kafka_broker_t *rkb, + const char *reason) { rktp->rktp_broker_id = broker_id; - if (!rkb) { - int had_broker = rktp->rktp_broker ? 1 : 0; - rd_kafka_toppar_broker_delegate(rktp, NULL); - return had_broker ? -1 : 0; - } - - if (rktp->rktp_broker) { - if (rktp->rktp_broker == rkb) { - /* No change in broker */ - return 0; - } - - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, - TOPIC|RD_KAFKA_DBG_FETCH, "TOPICUPD", - "Topic %s [%"PRId32"]: migrating from " - "broker %"PRId32" to %"PRId32" (leader is " - "%"PRId32"): %s", + if (!rkb) { + int had_broker = rktp->rktp_broker ? 1 : 0; + rd_kafka_toppar_broker_delegate(rktp, NULL); + return had_broker ? -1 : 0; + } + + if (rktp->rktp_broker) { + if (rktp->rktp_broker == rkb) { + /* No change in broker */ + return 0; + } + + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_FETCH, + "TOPICUPD", + "Topic %s [%" PRId32 + "]: migrating from " + "broker %" PRId32 " to %" PRId32 + " (leader is " + "%" PRId32 "): %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rktp->rktp_broker->rkb_nodeid, - rkb->rkb_nodeid, - rktp->rktp_leader_id, - reason); - } + rktp->rktp_broker->rkb_nodeid, rkb->rkb_nodeid, + rktp->rktp_leader_id, reason); + } - rd_kafka_toppar_broker_delegate(rktp, rkb); + rd_kafka_toppar_broker_delegate(rktp, rkb); - return 1; + return 1; } @@ -630,22 +626,23 @@ int rd_kafka_toppar_broker_update (rd_kafka_toppar_t *rktp, * AND NOT rd_kafka_toppar_lock(rktp) * @locality any */ -static int rd_kafka_toppar_leader_update (rd_kafka_topic_t *rkt, - int32_t partition, - int32_t leader_id, - rd_kafka_broker_t *leader) { - rd_kafka_toppar_t *rktp; +static int rd_kafka_toppar_leader_update(rd_kafka_topic_t *rkt, + int32_t partition, + int32_t leader_id, + rd_kafka_broker_t *leader) { + rd_kafka_toppar_t *rktp; rd_bool_t fetching_from_follower; int r = 0; - rktp = rd_kafka_toppar_get(rkt, partition, 0); + rktp = rd_kafka_toppar_get(rkt, partition, 0); if (unlikely(!rktp)) { /* Have only seen this in issue #132. * Probably caused by corrupt broker state. */ rd_kafka_log(rkt->rkt_rk, LOG_WARNING, "BROKER", - "%s [%"PRId32"] is unknown " + "%s [%" PRId32 + "] is unknown " "(partition_cnt %i): " - "ignoring leader (%"PRId32") update", + "ignoring leader (%" PRId32 ") update", rkt->rkt_topic->str, partition, rkt->rkt_partition_cnt, leader_id); return -1; @@ -654,19 +651,18 @@ static int rd_kafka_toppar_leader_update (rd_kafka_topic_t *rkt, rd_kafka_toppar_lock(rktp); fetching_from_follower = - leader != NULL && - rktp->rktp_broker != NULL && - rktp->rktp_broker->rkb_source != RD_KAFKA_INTERNAL && - rktp->rktp_broker != leader; - - if (fetching_from_follower && - rktp->rktp_leader_id == leader_id) { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER", - "Topic %s [%"PRId32"]: leader %"PRId32" unchanged, " - "not migrating away from preferred replica %"PRId32, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - leader_id, rktp->rktp_broker_id); + leader != NULL && rktp->rktp_broker != NULL && + rktp->rktp_broker->rkb_source != RD_KAFKA_INTERNAL && + rktp->rktp_broker != leader; + + if (fetching_from_follower && rktp->rktp_leader_id == leader_id) { + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER", + "Topic %s [%" PRId32 "]: leader %" PRId32 + " unchanged, " + "not migrating away from preferred replica %" PRId32, + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + leader_id, rktp->rktp_broker_id); r = 0; } else { @@ -689,9 +685,9 @@ static int rd_kafka_toppar_leader_update (rd_kafka_topic_t *rkt, rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(rktp); /* from get() */ + rd_kafka_toppar_destroy(rktp); /* from get() */ - return r; + return r; } @@ -705,7 +701,7 @@ static int rd_kafka_toppar_leader_update (rd_kafka_topic_t *rkt, * @locks none * @locality any */ -int rd_kafka_toppar_delegate_to_leader (rd_kafka_toppar_t *rktp) { +int rd_kafka_toppar_delegate_to_leader(rd_kafka_toppar_t *rktp) { rd_kafka_broker_t *leader; int r; @@ -715,10 +711,11 @@ int rd_kafka_toppar_delegate_to_leader (rd_kafka_toppar_t *rktp) { rd_assert(rktp->rktp_leader_id != rktp->rktp_broker_id); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER", - "Topic %s [%"PRId32"]: Reverting from preferred " - "replica %"PRId32" to leader %"PRId32, - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rktp->rktp_broker_id, rktp->rktp_leader_id); + "Topic %s [%" PRId32 + "]: Reverting from preferred " + "replica %" PRId32 " to leader %" PRId32, + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rktp->rktp_broker_id, rktp->rktp_leader_id); leader = rd_kafka_broker_find_by_nodeid(rktp->rktp_rkt->rkt_rk, rktp->rktp_leader_id); @@ -728,8 +725,8 @@ int rd_kafka_toppar_delegate_to_leader (rd_kafka_toppar_t *rktp) { rd_kafka_toppar_lock(rktp); r = rd_kafka_toppar_broker_update( - rktp, rktp->rktp_leader_id, leader, - "reverting from preferred replica to leader"); + rktp, rktp->rktp_leader_id, leader, + "reverting from preferred replica to leader"); rd_kafka_toppar_unlock(rktp); if (leader) @@ -747,98 +744,101 @@ int rd_kafka_toppar_delegate_to_leader (rd_kafka_toppar_t *rktp) { * * @locks rd_kafka_topic_wrlock(rkt) MUST be held. */ -static int rd_kafka_topic_partition_cnt_update (rd_kafka_topic_t *rkt, - int32_t partition_cnt) { - rd_kafka_t *rk = rkt->rkt_rk; - rd_kafka_toppar_t **rktps; - rd_kafka_toppar_t *rktp; - int32_t i; +static int rd_kafka_topic_partition_cnt_update(rd_kafka_topic_t *rkt, + int32_t partition_cnt) { + rd_kafka_t *rk = rkt->rkt_rk; + rd_kafka_toppar_t **rktps; + rd_kafka_toppar_t *rktp; + int32_t i; - if (likely(rkt->rkt_partition_cnt == partition_cnt)) - return 0; /* No change in partition count */ + if (likely(rkt->rkt_partition_cnt == partition_cnt)) + return 0; /* No change in partition count */ if (unlikely(rkt->rkt_partition_cnt != 0 && !rd_kafka_terminating(rkt->rkt_rk))) rd_kafka_log(rk, LOG_NOTICE, "PARTCNT", "Topic %s partition count changed " - "from %"PRId32" to %"PRId32, - rkt->rkt_topic->str, - rkt->rkt_partition_cnt, partition_cnt); + "from %" PRId32 " to %" PRId32, + rkt->rkt_topic->str, rkt->rkt_partition_cnt, + partition_cnt); else rd_kafka_dbg(rk, TOPIC, "PARTCNT", "Topic %s partition count changed " - "from %"PRId32" to %"PRId32, - rkt->rkt_topic->str, - rkt->rkt_partition_cnt, partition_cnt); + "from %" PRId32 " to %" PRId32, + rkt->rkt_topic->str, rkt->rkt_partition_cnt, + partition_cnt); - /* Create and assign new partition list */ - if (partition_cnt > 0) - rktps = rd_calloc(partition_cnt, sizeof(*rktps)); - else - rktps = NULL; + /* Create and assign new partition list */ + if (partition_cnt > 0) + rktps = rd_calloc(partition_cnt, sizeof(*rktps)); + else + rktps = NULL; - for (i = 0 ; i < partition_cnt ; i++) { - if (i >= rkt->rkt_partition_cnt) { - /* New partition. Check if its in the list of - * desired partitions first. */ + for (i = 0; i < partition_cnt; i++) { + if (i >= rkt->rkt_partition_cnt) { + /* New partition. Check if its in the list of + * desired partitions first. */ rktp = rd_kafka_toppar_desired_get(rkt, i); if (rktp) { - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); rktp->rktp_flags &= - ~(RD_KAFKA_TOPPAR_F_UNKNOWN | - RD_KAFKA_TOPPAR_F_REMOVE); + ~(RD_KAFKA_TOPPAR_F_UNKNOWN | + RD_KAFKA_TOPPAR_F_REMOVE); /* Remove from desp list since the * partition is now known. */ rd_kafka_toppar_desired_unlink(rktp); rd_kafka_toppar_unlock(rktp); - } else { - rktp = rd_kafka_toppar_new(rkt, i); + } else { + rktp = rd_kafka_toppar_new(rkt, i); rd_kafka_toppar_lock(rktp); rktp->rktp_flags &= - ~(RD_KAFKA_TOPPAR_F_UNKNOWN | - RD_KAFKA_TOPPAR_F_REMOVE); + ~(RD_KAFKA_TOPPAR_F_UNKNOWN | + RD_KAFKA_TOPPAR_F_REMOVE); rd_kafka_toppar_unlock(rktp); } - rktps[i] = rktp; - } else { - /* Existing partition, grab our own reference. */ - rktps[i] = rd_kafka_toppar_keep(rkt->rkt_p[i]); - /* Loose previous ref */ - rd_kafka_toppar_destroy(rkt->rkt_p[i]); - } - } + rktps[i] = rktp; + } else { + /* Existing partition, grab our own reference. */ + rktps[i] = rd_kafka_toppar_keep(rkt->rkt_p[i]); + /* Loose previous ref */ + rd_kafka_toppar_destroy(rkt->rkt_p[i]); + } + } /* Propagate notexist errors for desired partitions */ RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) { rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESIRED", - "%s [%"PRId32"]: " + "%s [%" PRId32 + "]: " "desired partition does not exist in cluster", rkt->rkt_topic->str, rktp->rktp_partition); - rd_kafka_toppar_enq_error(rktp, - rkt->rkt_err ? rkt->rkt_err : - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - "desired partition is not available"); + rd_kafka_toppar_enq_error( + rktp, + rkt->rkt_err ? rkt->rkt_err + : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + "desired partition is not available"); } - /* Remove excessive partitions */ - for (i = partition_cnt ; i < rkt->rkt_partition_cnt ; i++) { - rktp = rkt->rkt_p[i]; + /* Remove excessive partitions */ + for (i = partition_cnt; i < rkt->rkt_partition_cnt; i++) { + rktp = rkt->rkt_p[i]; - rd_kafka_dbg(rkt->rkt_rk, TOPIC, "REMOVE", - "%s [%"PRId32"] no longer reported in metadata", - rkt->rkt_topic->str, rktp->rktp_partition); + rd_kafka_dbg(rkt->rkt_rk, TOPIC, "REMOVE", + "%s [%" PRId32 "] no longer reported in metadata", + rkt->rkt_topic->str, rktp->rktp_partition); - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_UNKNOWN; - if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED) { + if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED) { rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESIRED", - "Topic %s [%"PRId32"] is desired " + "Topic %s [%" PRId32 + "] is desired " "but no longer known: " "moving back on desired list", rkt->rkt_topic->str, rktp->rktp_partition); @@ -849,32 +849,33 @@ static int rd_kafka_topic_partition_cnt_update (rd_kafka_topic_t *rkt, if (!rd_kafka_terminating(rkt->rkt_rk)) rd_kafka_toppar_enq_error( - rktp, - rkt->rkt_err ? rkt->rkt_err : - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - "desired partition is no longer " - "available"); + rktp, + rkt->rkt_err + ? rkt->rkt_err + : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + "desired partition is no longer " + "available"); - rd_kafka_toppar_broker_delegate(rktp, NULL); + rd_kafka_toppar_broker_delegate(rktp, NULL); - } else { - /* Tell handling broker to let go of the toppar */ - rd_kafka_toppar_broker_leave_for_remove(rktp); - } + } else { + /* Tell handling broker to let go of the toppar */ + rd_kafka_toppar_broker_leave_for_remove(rktp); + } - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(rktp); - } + rd_kafka_toppar_destroy(rktp); + } - if (rkt->rkt_p) - rd_free(rkt->rkt_p); + if (rkt->rkt_p) + rd_free(rkt->rkt_p); - rkt->rkt_p = rktps; + rkt->rkt_p = rktps; - rkt->rkt_partition_cnt = partition_cnt; + rkt->rkt_partition_cnt = partition_cnt; - return 1; + return 1; } @@ -887,8 +888,8 @@ static int rd_kafka_topic_partition_cnt_update (rd_kafka_topic_t *rkt, * * Locks: rd_kafka_topic_*lock() must be held. */ -static void rd_kafka_topic_propagate_notexists (rd_kafka_topic_t *rkt, - rd_kafka_resp_err_t err) { +static void rd_kafka_topic_propagate_notexists(rd_kafka_topic_t *rkt, + rd_kafka_resp_err_t err) { rd_kafka_toppar_t *rktp; int i; @@ -898,7 +899,7 @@ static void rd_kafka_topic_propagate_notexists (rd_kafka_topic_t *rkt, /* Notify consumers that the topic doesn't exist. */ RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) - rd_kafka_toppar_enq_error(rktp, err, "topic does not exist"); + rd_kafka_toppar_enq_error(rktp, err, "topic does not exist"); } @@ -906,28 +907,28 @@ static void rd_kafka_topic_propagate_notexists (rd_kafka_topic_t *rkt, * Assign messages on the UA partition to available partitions. * Locks: rd_kafka_topic_*lock() must be held. */ -static void rd_kafka_topic_assign_uas (rd_kafka_topic_t *rkt, - rd_kafka_resp_err_t err) { - rd_kafka_t *rk = rkt->rkt_rk; +static void rd_kafka_topic_assign_uas(rd_kafka_topic_t *rkt, + rd_kafka_resp_err_t err) { + rd_kafka_t *rk = rkt->rkt_rk; rd_kafka_toppar_t *rktp_ua; - rd_kafka_msg_t *rkm, *tmp; - rd_kafka_msgq_t uas = RD_KAFKA_MSGQ_INITIALIZER(uas); - rd_kafka_msgq_t failed = RD_KAFKA_MSGQ_INITIALIZER(failed); + rd_kafka_msg_t *rkm, *tmp; + rd_kafka_msgq_t uas = RD_KAFKA_MSGQ_INITIALIZER(uas); + rd_kafka_msgq_t failed = RD_KAFKA_MSGQ_INITIALIZER(failed); rd_kafka_resp_err_t err_all = RD_KAFKA_RESP_ERR_NO_ERROR; - int cnt; + int cnt; - if (rkt->rkt_rk->rk_type != RD_KAFKA_PRODUCER) - return; + if (rkt->rkt_rk->rk_type != RD_KAFKA_PRODUCER) + return; - rktp_ua = rd_kafka_toppar_get(rkt, RD_KAFKA_PARTITION_UA, 0); - if (unlikely(!rktp_ua)) { - rd_kafka_dbg(rk, TOPIC, "ASSIGNUA", - "No UnAssigned partition available for %s", - rkt->rkt_topic->str); - return; - } + rktp_ua = rd_kafka_toppar_get(rkt, RD_KAFKA_PARTITION_UA, 0); + if (unlikely(!rktp_ua)) { + rd_kafka_dbg(rk, TOPIC, "ASSIGNUA", + "No UnAssigned partition available for %s", + rkt->rkt_topic->str); + return; + } - /* Assign all unassigned messages to new topics. */ + /* Assign all unassigned messages to new topics. */ rd_kafka_toppar_lock(rktp_ua); if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR) { @@ -949,32 +950,31 @@ static void rd_kafka_topic_assign_uas (rd_kafka_topic_t *rkt, } else { rd_kafka_dbg(rk, TOPIC, "PARTCNT", "Partitioning %i unassigned messages in " - "topic %.*s to %"PRId32" partitions", + "topic %.*s to %" PRId32 " partitions", rktp_ua->rktp_msgq.rkmq_msg_cnt, RD_KAFKAP_STR_PR(rkt->rkt_topic), rkt->rkt_partition_cnt); } - rd_kafka_msgq_move(&uas, &rktp_ua->rktp_msgq); - cnt = uas.rkmq_msg_cnt; - rd_kafka_toppar_unlock(rktp_ua); + rd_kafka_msgq_move(&uas, &rktp_ua->rktp_msgq); + cnt = uas.rkmq_msg_cnt; + rd_kafka_toppar_unlock(rktp_ua); - TAILQ_FOREACH_SAFE(rkm, &uas.rkmq_msgs, rkm_link, tmp) { + TAILQ_FOREACH_SAFE(rkm, &uas.rkmq_msgs, rkm_link, tmp) { /* Fast-path for failing messages with forced partition or * when all messages are to fail. */ - if (err_all || - (rkm->rkm_partition != RD_KAFKA_PARTITION_UA && - rkm->rkm_partition >= rkt->rkt_partition_cnt && - rkt->rkt_state != RD_KAFKA_TOPIC_S_UNKNOWN)) { + if (err_all || (rkm->rkm_partition != RD_KAFKA_PARTITION_UA && + rkm->rkm_partition >= rkt->rkt_partition_cnt && + rkt->rkt_state != RD_KAFKA_TOPIC_S_UNKNOWN)) { rd_kafka_msgq_enq(&failed, rkm); continue; } - if (unlikely(rd_kafka_msg_partitioner(rkt, rkm, 0) != 0)) { - /* Desired partition not available */ - rd_kafka_msgq_enq(&failed, rkm); - } - } + if (unlikely(rd_kafka_msg_partitioner(rkt, rkm, 0) != 0)) { + /* Desired partition not available */ + rd_kafka_msgq_enq(&failed, rkm); + } + } rd_kafka_dbg(rk, TOPIC, "UAS", "%i/%i messages were partitioned in topic %s", @@ -983,15 +983,16 @@ static void rd_kafka_topic_assign_uas (rd_kafka_topic_t *rkt, if (failed.rkmq_msg_cnt > 0) { /* Fail the messages */ rd_kafka_dbg(rk, TOPIC, "UAS", - "%"PRId32"/%i messages failed partitioning " + "%" PRId32 + "/%i messages failed partitioning " "in topic %s", failed.rkmq_msg_cnt, cnt, rkt->rkt_topic->str); - rd_kafka_dr_msgq(rkt, &failed, - err_all ? err_all : - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION); - } + rd_kafka_dr_msgq( + rkt, &failed, + err_all ? err_all : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION); + } - rd_kafka_toppar_destroy(rktp_ua); /* from get() */ + rd_kafka_toppar_destroy(rktp_ua); /* from get() */ } @@ -1007,8 +1008,8 @@ static void rd_kafka_topic_assign_uas (rd_kafka_topic_t *rkt, * * @locks topic_wrlock() MUST be held. */ -rd_bool_t rd_kafka_topic_set_notexists (rd_kafka_topic_t *rkt, - rd_kafka_resp_err_t err) { +rd_bool_t rd_kafka_topic_set_notexists(rd_kafka_topic_t *rkt, + rd_kafka_resp_err_t err) { rd_ts_t remains_us; rd_bool_t permanent = err == RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION; @@ -1020,20 +1021,19 @@ rd_bool_t rd_kafka_topic_set_notexists (rd_kafka_topic_t *rkt, rd_assert(err != RD_KAFKA_RESP_ERR_NO_ERROR); remains_us = - (rkt->rkt_ts_create + - (rkt->rkt_rk->rk_conf.metadata_propagation_max_ms * 1000)) - - rkt->rkt_ts_metadata; + (rkt->rkt_ts_create + + (rkt->rkt_rk->rk_conf.metadata_propagation_max_ms * 1000)) - + rkt->rkt_ts_metadata; - if (!permanent && - rkt->rkt_state == RD_KAFKA_TOPIC_S_UNKNOWN && remains_us > 0) { + if (!permanent && rkt->rkt_state == RD_KAFKA_TOPIC_S_UNKNOWN && + remains_us > 0) { /* Still allowing topic metadata to propagate. */ - rd_kafka_dbg(rkt->rkt_rk, TOPIC|RD_KAFKA_DBG_METADATA, - "TOPICPROP", - "Topic %.*s does not exist, allowing %dms " - "for metadata propagation before marking topic " - "as non-existent", - RD_KAFKAP_STR_PR(rkt->rkt_topic), - (int)(remains_us / 1000)); + rd_kafka_dbg( + rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_METADATA, "TOPICPROP", + "Topic %.*s does not exist, allowing %dms " + "for metadata propagation before marking topic " + "as non-existent", + RD_KAFKAP_STR_PR(rkt->rkt_topic), (int)(remains_us / 1000)); return rd_false; } @@ -1063,8 +1063,8 @@ rd_bool_t rd_kafka_topic_set_notexists (rd_kafka_topic_t *rkt, * @locality any * @locks topic_wrlock() MUST be held. */ -rd_bool_t rd_kafka_topic_set_error (rd_kafka_topic_t *rkt, - rd_kafka_resp_err_t err) { +rd_bool_t rd_kafka_topic_set_error(rd_kafka_topic_t *rkt, + rd_kafka_resp_err_t err) { if (unlikely(rd_kafka_terminating(rkt->rkt_rk))) { /* Dont update metadata while terminating. */ @@ -1074,13 +1074,12 @@ rd_bool_t rd_kafka_topic_set_error (rd_kafka_topic_t *rkt, rd_assert(err != RD_KAFKA_RESP_ERR_NO_ERROR); /* Same error, ignore. */ - if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR && - rkt->rkt_err == err) + if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR && rkt->rkt_err == err) return rd_true; rd_kafka_dbg(rkt->rkt_rk, TOPIC, "TOPICERROR", - "Topic %s has permanent error: %s", - rkt->rkt_topic->str, rd_kafka_err2str(err)); + "Topic %s has permanent error: %s", rkt->rkt_topic->str, + rd_kafka_err2str(err)); rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_ERROR); @@ -1108,22 +1107,22 @@ rd_bool_t rd_kafka_topic_set_error (rd_kafka_topic_t *rkt, * @locks rd_kafka_*lock() MUST be held. */ static int -rd_kafka_topic_metadata_update (rd_kafka_topic_t *rkt, - const struct rd_kafka_metadata_topic *mdt, - rd_ts_t ts_age) { +rd_kafka_topic_metadata_update(rd_kafka_topic_t *rkt, + const struct rd_kafka_metadata_topic *mdt, + rd_ts_t ts_age) { rd_kafka_t *rk = rkt->rkt_rk; - int upd = 0; - int j; + int upd = 0; + int j; rd_kafka_broker_t **partbrokers; int leader_cnt = 0; int old_state; - if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR) - rd_kafka_dbg(rk, TOPIC|RD_KAFKA_DBG_METADATA, "METADATA", - "Error in metadata reply for " - "topic %s (PartCnt %i): %s", - rkt->rkt_topic->str, mdt->partition_cnt, - rd_kafka_err2str(mdt->err)); + if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR) + rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_METADATA, "METADATA", + "Error in metadata reply for " + "topic %s (PartCnt %i): %s", + rkt->rkt_topic->str, mdt->partition_cnt, + rd_kafka_err2str(mdt->err)); if (unlikely(rd_kafka_terminating(rk))) { /* Dont update metadata while terminating, do this @@ -1134,27 +1133,25 @@ rd_kafka_topic_metadata_update (rd_kafka_topic_t *rkt, /* Look up brokers before acquiring rkt lock to preserve lock order */ partbrokers = rd_malloc(mdt->partition_cnt * sizeof(*partbrokers)); - for (j = 0 ; j < mdt->partition_cnt ; j++) { - if (mdt->partitions[j].leader == -1) { + for (j = 0; j < mdt->partition_cnt; j++) { + if (mdt->partitions[j].leader == -1) { partbrokers[j] = NULL; - continue; - } + continue; + } - partbrokers[j] = - rd_kafka_broker_find_by_nodeid(rk, - mdt->partitions[j]. - leader); - } + partbrokers[j] = rd_kafka_broker_find_by_nodeid( + rk, mdt->partitions[j].leader); + } - rd_kafka_topic_wrlock(rkt); + rd_kafka_topic_wrlock(rkt); - old_state = rkt->rkt_state; - rkt->rkt_ts_metadata = ts_age; + old_state = rkt->rkt_state; + rkt->rkt_ts_metadata = ts_age; /* Set topic state. * UNKNOWN_TOPIC_OR_PART may indicate that auto.create.topics failed */ - if (mdt->err == RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION/*invalid topic*/ || + if (mdt->err == RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION /*invalid topic*/ || mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) rd_kafka_topic_set_notexists(rkt, mdt->err); else if (mdt->partition_cnt > 0) @@ -1162,11 +1159,11 @@ rd_kafka_topic_metadata_update (rd_kafka_topic_t *rkt, else if (mdt->err) rd_kafka_topic_set_error(rkt, mdt->err); - /* Update number of partitions, but not if there are - * (possibly intermittent) errors (e.g., "Leader not available"). */ - if (mdt->err == RD_KAFKA_RESP_ERR_NO_ERROR) { - upd += rd_kafka_topic_partition_cnt_update(rkt, - mdt->partition_cnt); + /* Update number of partitions, but not if there are + * (possibly intermittent) errors (e.g., "Leader not available"). */ + if (mdt->err == RD_KAFKA_RESP_ERR_NO_ERROR) { + upd += rd_kafka_topic_partition_cnt_update(rkt, + mdt->partition_cnt); /* If the metadata times out for a topic (because all brokers * are down) the state will transition to S_UNKNOWN. @@ -1179,25 +1176,23 @@ rd_kafka_topic_metadata_update (rd_kafka_topic_t *rkt, upd++; } - /* Update leader for each partition */ - for (j = 0 ; j < mdt->partition_cnt ; j++) { + /* Update leader for each partition */ + for (j = 0; j < mdt->partition_cnt; j++) { int r; - rd_kafka_broker_t *leader; + rd_kafka_broker_t *leader; - rd_kafka_dbg(rk, TOPIC|RD_KAFKA_DBG_METADATA, "METADATA", - " Topic %s partition %i Leader %"PRId32, - rkt->rkt_topic->str, - mdt->partitions[j].id, - mdt->partitions[j].leader); + rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_METADATA, "METADATA", + " Topic %s partition %i Leader %" PRId32, + rkt->rkt_topic->str, mdt->partitions[j].id, + mdt->partitions[j].leader); - leader = partbrokers[j]; - partbrokers[j] = NULL; + leader = partbrokers[j]; + partbrokers[j] = NULL; - /* Update leader for partition */ - r = rd_kafka_toppar_leader_update(rkt, - mdt->partitions[j].id, - mdt->partitions[j].leader, - leader); + /* Update leader for partition */ + r = rd_kafka_toppar_leader_update(rkt, mdt->partitions[j].id, + mdt->partitions[j].leader, + leader); upd += (r != 0 ? 1 : 0); @@ -1213,13 +1208,13 @@ rd_kafka_topic_metadata_update (rd_kafka_topic_t *rkt, if (mdt->partition_cnt > 0 && leader_cnt == mdt->partition_cnt) rkt->rkt_flags &= ~RD_KAFKA_TOPIC_F_LEADER_UNAVAIL; - if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR && rkt->rkt_partition_cnt) { + if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR && rkt->rkt_partition_cnt) { /* (Possibly intermittent) topic-wide error: * remove leaders for partitions */ - for (j = 0 ; j < rkt->rkt_partition_cnt ; j++) { + for (j = 0; j < rkt->rkt_partition_cnt; j++) { rd_kafka_toppar_t *rktp; - if (!rkt->rkt_p[j]) + if (!rkt->rkt_p[j]) continue; rktp = rkt->rkt_p[j]; @@ -1232,20 +1227,20 @@ rd_kafka_topic_metadata_update (rd_kafka_topic_t *rkt, /* If there was an update to the partitions try to assign * unassigned messages to new partitions, or fail them */ if (upd > 0) - rd_kafka_topic_assign_uas(rkt, mdt->err ? - mdt->err : - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC); + rd_kafka_topic_assign_uas( + rkt, + mdt->err ? mdt->err : RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC); - rd_kafka_topic_wrunlock(rkt); + rd_kafka_topic_wrunlock(rkt); - /* Loose broker references */ - for (j = 0 ; j < mdt->partition_cnt ; j++) - if (partbrokers[j]) - rd_kafka_broker_destroy(partbrokers[j]); + /* Loose broker references */ + for (j = 0; j < mdt->partition_cnt; j++) + if (partbrokers[j]) + rd_kafka_broker_destroy(partbrokers[j]); rd_free(partbrokers); - return upd; + return upd; } /** @@ -1253,15 +1248,14 @@ rd_kafka_topic_metadata_update (rd_kafka_topic_t *rkt, * @sa rd_kafka_topic_metadata_update() * @locks none */ -int -rd_kafka_topic_metadata_update2 (rd_kafka_broker_t *rkb, - const struct rd_kafka_metadata_topic *mdt) { +int rd_kafka_topic_metadata_update2(rd_kafka_broker_t *rkb, + const struct rd_kafka_metadata_topic *mdt) { rd_kafka_topic_t *rkt; int r; rd_kafka_wrlock(rkb->rkb_rk); - if (!(rkt = rd_kafka_topic_find(rkb->rkb_rk, - mdt->topic, 0/*!lock*/))) { + if (!(rkt = + rd_kafka_topic_find(rkb->rkb_rk, mdt->topic, 0 /*!lock*/))) { rd_kafka_wrunlock(rkb->rkb_rk); return -1; /* Ignore topics that we dont have locally. */ } @@ -1281,70 +1275,70 @@ rd_kafka_topic_metadata_update2 (rd_kafka_broker_t *rkb, * @returns a list of all partitions (rktp's) for a topic. * @remark rd_kafka_topic_*lock() MUST be held. */ -static rd_list_t *rd_kafka_topic_get_all_partitions (rd_kafka_topic_t *rkt) { - rd_list_t *list; - rd_kafka_toppar_t *rktp; - int i; +static rd_list_t *rd_kafka_topic_get_all_partitions(rd_kafka_topic_t *rkt) { + rd_list_t *list; + rd_kafka_toppar_t *rktp; + int i; list = rd_list_new(rkt->rkt_partition_cnt + - rd_list_cnt(&rkt->rkt_desp) + 1/*ua*/, NULL); + rd_list_cnt(&rkt->rkt_desp) + 1 /*ua*/, + NULL); - for (i = 0 ; i < rkt->rkt_partition_cnt ; i++) - rd_list_add(list, rd_kafka_toppar_keep(rkt->rkt_p[i])); + for (i = 0; i < rkt->rkt_partition_cnt; i++) + rd_list_add(list, rd_kafka_toppar_keep(rkt->rkt_p[i])); - RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) - rd_list_add(list, rd_kafka_toppar_keep(rktp)); + RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) + rd_list_add(list, rd_kafka_toppar_keep(rktp)); - if (rkt->rkt_ua) - rd_list_add(list, rd_kafka_toppar_keep(rkt->rkt_ua)); + if (rkt->rkt_ua) + rd_list_add(list, rd_kafka_toppar_keep(rkt->rkt_ua)); - return list; + return list; } - /** * Remove all partitions from a topic, including the ua. * Must only be called during rd_kafka_t termination. * * Locality: main thread */ -void rd_kafka_topic_partitions_remove (rd_kafka_topic_t *rkt) { +void rd_kafka_topic_partitions_remove(rd_kafka_topic_t *rkt) { rd_kafka_toppar_t *rktp; - rd_list_t *partitions; - int i; - - /* Purge messages for all partitions outside the topic_wrlock since - * a message can hold a reference to the topic_t and thus - * would trigger a recursive lock dead-lock. */ - rd_kafka_topic_rdlock(rkt); - partitions = rd_kafka_topic_get_all_partitions(rkt); - rd_kafka_topic_rdunlock(rkt); - - RD_LIST_FOREACH(rktp, partitions, i) { - rd_kafka_toppar_lock(rktp); - rd_kafka_msgq_purge(rkt->rkt_rk, &rktp->rktp_msgq); - rd_kafka_toppar_purge_and_disable_queues(rktp); - rd_kafka_toppar_unlock(rktp); - - rd_kafka_toppar_destroy(rktp); - } - rd_list_destroy(partitions); - - rd_kafka_topic_keep(rkt); - rd_kafka_topic_wrlock(rkt); - - /* Setting the partition count to 0 moves all partitions to - * the desired list (rktp_desp). */ + rd_list_t *partitions; + int i; + + /* Purge messages for all partitions outside the topic_wrlock since + * a message can hold a reference to the topic_t and thus + * would trigger a recursive lock dead-lock. */ + rd_kafka_topic_rdlock(rkt); + partitions = rd_kafka_topic_get_all_partitions(rkt); + rd_kafka_topic_rdunlock(rkt); + + RD_LIST_FOREACH(rktp, partitions, i) { + rd_kafka_toppar_lock(rktp); + rd_kafka_msgq_purge(rkt->rkt_rk, &rktp->rktp_msgq); + rd_kafka_toppar_purge_and_disable_queues(rktp); + rd_kafka_toppar_unlock(rktp); + + rd_kafka_toppar_destroy(rktp); + } + rd_list_destroy(partitions); + + rd_kafka_topic_keep(rkt); + rd_kafka_topic_wrlock(rkt); + + /* Setting the partition count to 0 moves all partitions to + * the desired list (rktp_desp). */ rd_kafka_topic_partition_cnt_update(rkt, 0); /* Now clean out the desired partitions list. * Use reverse traversal to avoid excessive memory shuffling * in rd_list_remove() */ RD_LIST_FOREACH_REVERSE(rktp, &rkt->rkt_desp, i) { - /* Keep a reference while deleting from desired list */ - rd_kafka_toppar_keep(rktp); + /* Keep a reference while deleting from desired list */ + rd_kafka_toppar_keep(rktp); rd_kafka_toppar_lock(rktp); rd_kafka_toppar_desired_del(rktp); @@ -1355,20 +1349,20 @@ void rd_kafka_topic_partitions_remove (rd_kafka_topic_t *rkt) { rd_kafka_assert(rkt->rkt_rk, rkt->rkt_partition_cnt == 0); - if (rkt->rkt_p) - rd_free(rkt->rkt_p); + if (rkt->rkt_p) + rd_free(rkt->rkt_p); - rkt->rkt_p = NULL; - rkt->rkt_partition_cnt = 0; + rkt->rkt_p = NULL; + rkt->rkt_partition_cnt = 0; if ((rktp = rkt->rkt_ua)) { rkt->rkt_ua = NULL; rd_kafka_toppar_destroy(rktp); - } + } - rd_kafka_topic_wrunlock(rkt); + rd_kafka_topic_wrunlock(rkt); - rd_kafka_topic_destroy0(rkt); + rd_kafka_topic_destroy0(rkt); } @@ -1379,8 +1373,8 @@ void rd_kafka_topic_partitions_remove (rd_kafka_topic_t *rkt) { * @locality any * @locks rd_kafka_toppar_lock MUST be held */ -static const char *rd_kafka_toppar_needs_query (rd_kafka_t *rk, - rd_kafka_toppar_t *rktp) { +static const char *rd_kafka_toppar_needs_query(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp) { int broker_state; if (!rktp->rktp_broker) @@ -1421,30 +1415,31 @@ static const char *rd_kafka_toppar_needs_query (rd_kafka_t *rk, * * @locality rdkafka main thread */ -void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now) { - rd_kafka_topic_t *rkt; - rd_kafka_toppar_t *rktp; +void rd_kafka_topic_scan_all(rd_kafka_t *rk, rd_ts_t now) { + rd_kafka_topic_t *rkt; + rd_kafka_toppar_t *rktp; rd_list_t query_topics; rd_list_init(&query_topics, 0, rd_free); - rd_kafka_rdlock(rk); - TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { - int p; - int query_this = 0; + rd_kafka_rdlock(rk); + TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { + int p; + int query_this = 0; rd_kafka_msgq_t timedout = RD_KAFKA_MSGQ_INITIALIZER(timedout); - rd_kafka_topic_wrlock(rkt); + rd_kafka_topic_wrlock(rkt); /* Check if metadata information has timed out. */ if (rkt->rkt_state != RD_KAFKA_TOPIC_S_UNKNOWN && - !rd_kafka_metadata_cache_topic_get( - rk, rkt->rkt_topic->str, 1/*only valid*/)) { + !rd_kafka_metadata_cache_topic_get(rk, rkt->rkt_topic->str, + 1 /*only valid*/)) { rd_kafka_dbg(rk, TOPIC, "NOINFO", "Topic %s metadata information timed out " - "(%"PRId64"ms old)", + "(%" PRId64 "ms old)", rkt->rkt_topic->str, - (rd_clock() - rkt->rkt_ts_metadata)/1000); + (rd_clock() - rkt->rkt_ts_metadata) / + 1000); rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_UNKNOWN); query_this = 1; @@ -1474,7 +1469,7 @@ void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now) { } else if (!rd_list_empty(&rkt->rkt_desp) && rd_interval_immediate(&rkt->rkt_desp_refresh_intvl, - 10*1000*1000, 0) > 0) { + 10 * 1000 * 1000, 0) > 0) { /* Query topic metadata if there are * desired (non-existent) partitions. * At most every 10 seconds. */ @@ -1487,31 +1482,32 @@ void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now) { query_this = 1; } - for (p = RD_KAFKA_PARTITION_UA ; - p < rkt->rkt_partition_cnt ; p++) { + for (p = RD_KAFKA_PARTITION_UA; p < rkt->rkt_partition_cnt; + p++) { if (!(rktp = rd_kafka_toppar_get( - rkt, p, - p == RD_KAFKA_PARTITION_UA ? - rd_true : rd_false))) + rkt, p, + p == RD_KAFKA_PARTITION_UA ? rd_true + : rd_false))) continue; - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); /* Check that partition is delegated to a broker that * is up, else add topic to query list. */ if (p != RD_KAFKA_PARTITION_UA) { const char *leader_reason = - rd_kafka_toppar_needs_query(rk, rktp); + rd_kafka_toppar_needs_query(rk, rktp); if (leader_reason) { rd_kafka_dbg(rk, TOPIC, "QRYLEADER", - "Topic %s [%"PRId32"]: " + "Topic %s [%" PRId32 + "]: " "broker is %s: re-query", rkt->rkt_topic->str, rktp->rktp_partition, leader_reason); - query_this = 1; + query_this = 1; } } else { if (rk->rk_type == RD_KAFKA_PRODUCER) { @@ -1519,25 +1515,23 @@ void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now) { * timeouts. * Proper partitions are scanned by * their toppar broker thread. */ - rd_kafka_msgq_age_scan(rktp, - &rktp->rktp_msgq, - &timedout, now, - NULL); + rd_kafka_msgq_age_scan( + rktp, &rktp->rktp_msgq, &timedout, + now, NULL); } } - rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(rktp); - } + rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_destroy(rktp); + } rd_kafka_topic_rdunlock(rkt); /* Propagate delivery reports for timed out messages */ if (rd_kafka_msgq_len(&timedout) > 0) { - rd_kafka_dbg(rk, MSG, "TIMEOUT", - "%s: %d message(s) timed out", - rkt->rkt_topic->str, - rd_kafka_msgq_len(&timedout)); + rd_kafka_dbg( + rk, MSG, "TIMEOUT", "%s: %d message(s) timed out", + rkt->rkt_topic->str, rd_kafka_msgq_len(&timedout)); rd_kafka_dr_msgq(rkt, &timedout, RD_KAFKA_RESP_ERR__MSG_TIMED_OUT); } @@ -1548,18 +1542,16 @@ void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now) { (void *)strcmp)) rd_list_add(&query_topics, rd_strdup(rkt->rkt_topic->str)); - } rd_kafka_rdunlock(rk); if (!rd_list_empty(&query_topics)) rd_kafka_metadata_refresh_topics( - rk, NULL, &query_topics, - rd_true/*force even if cached - * info exists*/, - rk->rk_conf.allow_auto_create_topics, - rd_false/*!cgrp_update*/, - "refresh unavailable topics"); + rk, NULL, &query_topics, rd_true /*force even if cached + * info exists*/ + , + rk->rk_conf.allow_auto_create_topics, + rd_false /*!cgrp_update*/, "refresh unavailable topics"); rd_list_destroy(&query_topics); } @@ -1567,9 +1559,9 @@ void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now) { /** * Locks: rd_kafka_topic_*lock() must be held. */ -int rd_kafka_topic_partition_available (const rd_kafka_topic_t *app_rkt, - int32_t partition) { - int avail; +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *app_rkt, + int32_t partition) { + int avail; rd_kafka_toppar_t *rktp; rd_kafka_broker_t *rkb; @@ -1577,20 +1569,20 @@ int rd_kafka_topic_partition_available (const rd_kafka_topic_t *app_rkt, * partitioner is always passed a proper topic */ rd_assert(!rd_kafka_rkt_is_lw(app_rkt)); - rktp = rd_kafka_toppar_get(app_rkt, partition, 0/*no ua-on-miss*/); - if (unlikely(!rktp)) - return 0; + rktp = rd_kafka_toppar_get(app_rkt, partition, 0 /*no ua-on-miss*/); + if (unlikely(!rktp)) + return 0; - rkb = rd_kafka_toppar_broker(rktp, 1/*proper broker*/); + rkb = rd_kafka_toppar_broker(rktp, 1 /*proper broker*/); avail = rkb ? 1 : 0; if (rkb) rd_kafka_broker_destroy(rkb); - rd_kafka_toppar_destroy(rktp); - return avail; + rd_kafka_toppar_destroy(rktp); + return avail; } -void *rd_kafka_topic_opaque (const rd_kafka_topic_t *app_rkt) { +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *app_rkt) { const rd_kafka_lwtopic_t *lrkt; lrkt = rd_kafka_rkt_get_lw((rd_kafka_topic_t *)app_rkt); @@ -1598,8 +1590,8 @@ void *rd_kafka_topic_opaque (const rd_kafka_topic_t *app_rkt) { void *opaque; rd_kafka_topic_t *rkt; - if (!(rkt = rd_kafka_topic_find(lrkt->lrkt_rk, - lrkt->lrkt_topic, 1/*lock*/))) + if (!(rkt = rd_kafka_topic_find(lrkt->lrkt_rk, lrkt->lrkt_topic, + 1 /*lock*/))) return NULL; opaque = rkt->rkt_conf.opaque; @@ -1613,12 +1605,12 @@ void *rd_kafka_topic_opaque (const rd_kafka_topic_t *app_rkt) { } -int rd_kafka_topic_info_cmp (const void *_a, const void *_b) { - const rd_kafka_topic_info_t *a = _a, *b = _b; - int r; +int rd_kafka_topic_info_cmp(const void *_a, const void *_b) { + const rd_kafka_topic_info_t *a = _a, *b = _b; + int r; - if ((r = strcmp(a->topic, b->topic))) - return r; + if ((r = strcmp(a->topic, b->topic))) + return r; return RD_CMP(a->partition_cnt, b->partition_cnt); } @@ -1630,8 +1622,8 @@ int rd_kafka_topic_info_cmp (const void *_a, const void *_b) { * @param _a topic string (type char *) * @param _b rd_kafka_topic_info_t * pointer. */ -int rd_kafka_topic_info_topic_cmp (const void *_a, const void *_b) { - const char *a = _a; +int rd_kafka_topic_info_topic_cmp(const void *_a, const void *_b) { + const char *a = _a; const rd_kafka_topic_info_t *b = _b; return strcmp(a, b->topic); } @@ -1641,25 +1633,25 @@ int rd_kafka_topic_info_topic_cmp (const void *_a, const void *_b) { * Allocate new topic_info. * \p topic is copied. */ -rd_kafka_topic_info_t *rd_kafka_topic_info_new (const char *topic, - int partition_cnt) { - rd_kafka_topic_info_t *ti; - size_t tlen = strlen(topic) + 1; - - /* Allocate space for the topic along with the struct */ - ti = rd_malloc(sizeof(*ti) + tlen); - ti->topic = (char *)(ti+1); - memcpy((char *)ti->topic, topic, tlen); - ti->partition_cnt = partition_cnt; - - return ti; +rd_kafka_topic_info_t *rd_kafka_topic_info_new(const char *topic, + int partition_cnt) { + rd_kafka_topic_info_t *ti; + size_t tlen = strlen(topic) + 1; + + /* Allocate space for the topic along with the struct */ + ti = rd_malloc(sizeof(*ti) + tlen); + ti->topic = (char *)(ti + 1); + memcpy((char *)ti->topic, topic, tlen); + ti->partition_cnt = partition_cnt; + + return ti; } /** * Destroy/free topic_info */ -void rd_kafka_topic_info_destroy (rd_kafka_topic_info_t *ti) { - rd_free(ti); +void rd_kafka_topic_info_destroy(rd_kafka_topic_info_t *ti) { + rd_free(ti); } @@ -1671,47 +1663,42 @@ void rd_kafka_topic_info_destroy (rd_kafka_topic_info_t *ti) { * * @returns 1 on match, else 0. */ -int rd_kafka_topic_match (rd_kafka_t *rk, const char *pattern, - const char *topic) { - char errstr[128]; - - if (*pattern == '^') { - int r = rd_regex_match(pattern, topic, errstr, sizeof(errstr)); - if (unlikely(r == -1)) - rd_kafka_dbg(rk, TOPIC, "TOPICREGEX", - "Topic \"%s\" regex \"%s\" " - "matching failed: %s", - topic, pattern, errstr); - return r == 1; - } else - return !strcmp(pattern, topic); +int rd_kafka_topic_match(rd_kafka_t *rk, + const char *pattern, + const char *topic) { + char errstr[128]; + + if (*pattern == '^') { + int r = rd_regex_match(pattern, topic, errstr, sizeof(errstr)); + if (unlikely(r == -1)) + rd_kafka_dbg(rk, TOPIC, "TOPICREGEX", + "Topic \"%s\" regex \"%s\" " + "matching failed: %s", + topic, pattern, errstr); + return r == 1; + } else + return !strcmp(pattern, topic); } - - - - - - /** * @brief Trigger broker metadata query for topic leader. * * @locks none */ -void rd_kafka_topic_leader_query0 (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - int do_rk_lock) { +void rd_kafka_topic_leader_query0(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + int do_rk_lock) { rd_list_t topics; rd_list_init(&topics, 1, rd_free); rd_list_add(&topics, rd_strdup(rkt->rkt_topic->str)); - rd_kafka_metadata_refresh_topics(rk, NULL, &topics, - rd_false/*dont force*/, - rk->rk_conf.allow_auto_create_topics, - rd_false/*!cgrp_update*/, - "leader query"); + rd_kafka_metadata_refresh_topics( + rk, NULL, &topics, rd_false /*dont force*/, + rk->rk_conf.allow_auto_create_topics, rd_false /*!cgrp_update*/, + "leader query"); rd_list_destroy(&topics); } @@ -1727,15 +1714,16 @@ void rd_kafka_topic_leader_query0 (rd_kafka_t *rk, rd_kafka_topic_t *rkt, * metadata cache. * @remark \p rk lock MUST NOT be held */ -void rd_kafka_local_topics_to_list (rd_kafka_t *rk, rd_list_t *topics, - int *cache_cntp) { +void rd_kafka_local_topics_to_list(rd_kafka_t *rk, + rd_list_t *topics, + int *cache_cntp) { rd_kafka_topic_t *rkt; int cache_cnt; rd_kafka_rdlock(rk); rd_list_grow(topics, rk->rk_topic_cnt); TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) - rd_list_add(topics, rd_strdup(rkt->rkt_topic->str)); + rd_list_add(topics, rd_strdup(rkt->rkt_topic->str)); cache_cnt = rd_kafka_metadata_cache_topics_to_list(rk, topics); if (cache_cntp) *cache_cntp = cache_cnt; @@ -1747,20 +1735,19 @@ void rd_kafka_local_topics_to_list (rd_kafka_t *rk, rd_list_t *topics, * @brief Unit test helper to set a topic's state to EXISTS * with the given number of partitions. */ -void rd_ut_kafka_topic_set_topic_exists (rd_kafka_topic_t *rkt, - int partition_cnt, - int32_t leader_id) { - struct rd_kafka_metadata_topic mdt = { - .topic = (char *)rkt->rkt_topic->str, - .partition_cnt = partition_cnt - }; +void rd_ut_kafka_topic_set_topic_exists(rd_kafka_topic_t *rkt, + int partition_cnt, + int32_t leader_id) { + struct rd_kafka_metadata_topic mdt = {.topic = + (char *)rkt->rkt_topic->str, + .partition_cnt = partition_cnt}; int i; mdt.partitions = rd_alloca(sizeof(*mdt.partitions) * partition_cnt); - for (i = 0 ; i < partition_cnt ; i++) { + for (i = 0; i < partition_cnt; i++) { memset(&mdt.partitions[i], 0, sizeof(mdt.partitions[i])); - mdt.partitions[i].id = i; + mdt.partitions[i].id = i; mdt.partitions[i].leader = leader_id; } diff --git a/src/rdkafka_topic.h b/src/rdkafka_topic.h index d6b0a84cbd..414cd66228 100644 --- a/src/rdkafka_topic.h +++ b/src/rdkafka_topic.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012,2013 Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -43,7 +43,7 @@ extern const char *rd_kafka_topic_state_names[]; * @remark lrkt_magic[4] MUST be the first field and be set to "LRKT". */ struct rd_kafka_lwtopic_s { - char lrkt_magic[4]; /**< "LRKT" */ + char lrkt_magic[4]; /**< "LRKT" */ rd_kafka_t *lrkt_rk; /**< Pointer to the client instance. */ rd_refcnt_t lrkt_refcnt; /**< Refcount */ char *lrkt_topic; /**< Points past this struct, allocated @@ -51,112 +51,109 @@ struct rd_kafka_lwtopic_s { }; /** Casts a topic_t to a light-weight lwtopic_t */ -#define rd_kafka_rkt_lw(rkt) \ - ((rd_kafka_lwtopic_t *)rkt) +#define rd_kafka_rkt_lw(rkt) ((rd_kafka_lwtopic_t *)rkt) -#define rd_kafka_rkt_lw_const(rkt) \ - ((const rd_kafka_lwtopic_t *)rkt) +#define rd_kafka_rkt_lw_const(rkt) ((const rd_kafka_lwtopic_t *)rkt) /** * @returns true if the topic object is a light-weight topic, else false. */ -static RD_UNUSED RD_INLINE -rd_bool_t rd_kafka_rkt_is_lw (const rd_kafka_topic_t *app_rkt) { +static RD_UNUSED RD_INLINE rd_bool_t +rd_kafka_rkt_is_lw(const rd_kafka_topic_t *app_rkt) { const rd_kafka_lwtopic_t *lrkt = rd_kafka_rkt_lw_const(app_rkt); return !memcmp(lrkt->lrkt_magic, "LRKT", 4); } /** @returns the lwtopic_t if \p rkt is a light-weight topic, else NULL. */ -static RD_UNUSED RD_INLINE -rd_kafka_lwtopic_t *rd_kafka_rkt_get_lw (rd_kafka_topic_t *rkt) { +static RD_UNUSED RD_INLINE rd_kafka_lwtopic_t * +rd_kafka_rkt_get_lw(rd_kafka_topic_t *rkt) { if (rd_kafka_rkt_is_lw(rkt)) return rd_kafka_rkt_lw(rkt); return NULL; } -void rd_kafka_lwtopic_destroy (rd_kafka_lwtopic_t *lrkt); -rd_kafka_lwtopic_t *rd_kafka_lwtopic_new (rd_kafka_t *rk, const char *topic); +void rd_kafka_lwtopic_destroy(rd_kafka_lwtopic_t *lrkt); +rd_kafka_lwtopic_t *rd_kafka_lwtopic_new(rd_kafka_t *rk, const char *topic); -static RD_UNUSED RD_INLINE -void rd_kafka_lwtopic_keep (rd_kafka_lwtopic_t *lrkt) { +static RD_UNUSED RD_INLINE void +rd_kafka_lwtopic_keep(rd_kafka_lwtopic_t *lrkt) { rd_refcnt_add(&lrkt->lrkt_refcnt); } - /* * @struct Internal representation of a topic. * * @remark rkt_magic[4] MUST be the first field and be set to "IRKT". */ struct rd_kafka_topic_s { - char rkt_magic[4]; /**< "IRKT" */ + char rkt_magic[4]; /**< "IRKT" */ - TAILQ_ENTRY(rd_kafka_topic_s) rkt_link; + TAILQ_ENTRY(rd_kafka_topic_s) rkt_link; - rd_refcnt_t rkt_refcnt; + rd_refcnt_t rkt_refcnt; - rwlock_t rkt_lock; - rd_kafkap_str_t *rkt_topic; + rwlock_t rkt_lock; + rd_kafkap_str_t *rkt_topic; - rd_kafka_toppar_t *rkt_ua; /**< Unassigned partition (-1) */ - rd_kafka_toppar_t **rkt_p; /**< Partition array */ - int32_t rkt_partition_cnt; + rd_kafka_toppar_t *rkt_ua; /**< Unassigned partition (-1) */ + rd_kafka_toppar_t **rkt_p; /**< Partition array */ + int32_t rkt_partition_cnt; - int32_t rkt_sticky_partition; /**< Current sticky partition. - * @locks rkt_lock */ - rd_interval_t rkt_sticky_intvl; /**< Interval to assign new - * sticky partition. */ + int32_t rkt_sticky_partition; /**< Current sticky partition. + * @locks rkt_lock */ + rd_interval_t rkt_sticky_intvl; /**< Interval to assign new + * sticky partition. */ - rd_list_t rkt_desp; /* Desired partitions - * that are not yet seen - * in the cluster. */ - rd_interval_t rkt_desp_refresh_intvl; /**< Rate-limiter for - * desired partition - * metadata refresh. */ + rd_list_t rkt_desp; /* Desired partitions + * that are not yet seen + * in the cluster. */ + rd_interval_t rkt_desp_refresh_intvl; /**< Rate-limiter for + * desired partition + * metadata refresh. */ - rd_ts_t rkt_ts_create; /**< Topic object creation time. */ - rd_ts_t rkt_ts_metadata; /* Timestamp of last metadata - * update for this topic. */ + rd_ts_t rkt_ts_create; /**< Topic object creation time. */ + rd_ts_t rkt_ts_metadata; /* Timestamp of last metadata + * update for this topic. */ - rd_refcnt_t rkt_app_refcnt; /**< Number of active rkt's new()ed - * by application. */ + rd_refcnt_t rkt_app_refcnt; /**< Number of active rkt's new()ed + * by application. */ - enum { - RD_KAFKA_TOPIC_S_UNKNOWN, /* No cluster information yet */ - RD_KAFKA_TOPIC_S_EXISTS, /* Topic exists in cluster */ - RD_KAFKA_TOPIC_S_NOTEXISTS, /* Topic is not known in cluster */ - RD_KAFKA_TOPIC_S_ERROR, /* Topic exists but is in an errored - * state, such as auth failure. */ - } rkt_state; + enum { RD_KAFKA_TOPIC_S_UNKNOWN, /* No cluster information yet */ + RD_KAFKA_TOPIC_S_EXISTS, /* Topic exists in cluster */ + RD_KAFKA_TOPIC_S_NOTEXISTS, /* Topic is not known in cluster */ + RD_KAFKA_TOPIC_S_ERROR, /* Topic exists but is in an errored + * state, such as auth failure. */ + } rkt_state; - int rkt_flags; -#define RD_KAFKA_TOPIC_F_LEADER_UNAVAIL 0x1 /* Leader lost/unavailable - * for at least one partition. */ + int rkt_flags; +#define RD_KAFKA_TOPIC_F_LEADER_UNAVAIL \ + 0x1 /* Leader lost/unavailable \ + * for at least one partition. */ - rd_kafka_resp_err_t rkt_err; /**< Permanent error. */ + rd_kafka_resp_err_t rkt_err; /**< Permanent error. */ - rd_kafka_t *rkt_rk; + rd_kafka_t *rkt_rk; - rd_avg_t rkt_avg_batchsize; /**< Average batch size */ - rd_avg_t rkt_avg_batchcnt; /**< Average batch message count */ + rd_avg_t rkt_avg_batchsize; /**< Average batch size */ + rd_avg_t rkt_avg_batchcnt; /**< Average batch message count */ - rd_kafka_topic_conf_t rkt_conf; + rd_kafka_topic_conf_t rkt_conf; }; -#define rd_kafka_topic_rdlock(rkt) rwlock_rdlock(&(rkt)->rkt_lock) -#define rd_kafka_topic_wrlock(rkt) rwlock_wrlock(&(rkt)->rkt_lock) -#define rd_kafka_topic_rdunlock(rkt) rwlock_rdunlock(&(rkt)->rkt_lock) -#define rd_kafka_topic_wrunlock(rkt) rwlock_wrunlock(&(rkt)->rkt_lock) +#define rd_kafka_topic_rdlock(rkt) rwlock_rdlock(&(rkt)->rkt_lock) +#define rd_kafka_topic_wrlock(rkt) rwlock_wrlock(&(rkt)->rkt_lock) +#define rd_kafka_topic_rdunlock(rkt) rwlock_rdunlock(&(rkt)->rkt_lock) +#define rd_kafka_topic_wrunlock(rkt) rwlock_wrunlock(&(rkt)->rkt_lock) /** * @brief Increase refcount and return topic object. */ -static RD_INLINE RD_UNUSED -rd_kafka_topic_t *rd_kafka_topic_keep (rd_kafka_topic_t *rkt) { +static RD_INLINE RD_UNUSED rd_kafka_topic_t * +rd_kafka_topic_keep(rd_kafka_topic_t *rkt) { rd_kafka_lwtopic_t *lrkt; if (unlikely((lrkt = rd_kafka_rkt_get_lw(rkt)) != NULL)) rd_kafka_lwtopic_keep(lrkt); @@ -165,17 +162,16 @@ rd_kafka_topic_t *rd_kafka_topic_keep (rd_kafka_topic_t *rkt) { return rkt; } -void rd_kafka_topic_destroy_final (rd_kafka_topic_t *rkt); +void rd_kafka_topic_destroy_final(rd_kafka_topic_t *rkt); -rd_kafka_topic_t *rd_kafka_topic_proper (rd_kafka_topic_t *app_rkt); +rd_kafka_topic_t *rd_kafka_topic_proper(rd_kafka_topic_t *app_rkt); /** * @brief Loose reference to topic object as increased by ..topic_keep(). */ -static RD_INLINE RD_UNUSED void -rd_kafka_topic_destroy0 (rd_kafka_topic_t *rkt) { +static RD_INLINE RD_UNUSED void rd_kafka_topic_destroy0(rd_kafka_topic_t *rkt) { rd_kafka_lwtopic_t *lrkt; if (unlikely((lrkt = rd_kafka_rkt_get_lw(rkt)) != NULL)) rd_kafka_lwtopic_destroy(lrkt); @@ -184,29 +180,33 @@ rd_kafka_topic_destroy0 (rd_kafka_topic_t *rkt) { } -rd_kafka_topic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, const char *topic, - rd_kafka_topic_conf_t *conf, - int *existing, int do_lock); +rd_kafka_topic_t *rd_kafka_topic_new0(rd_kafka_t *rk, + const char *topic, + rd_kafka_topic_conf_t *conf, + int *existing, + int do_lock); -rd_kafka_topic_t *rd_kafka_topic_find_fl (const char *func, int line, +rd_kafka_topic_t *rd_kafka_topic_find_fl(const char *func, + int line, + rd_kafka_t *rk, + const char *topic, + int do_lock); +rd_kafka_topic_t *rd_kafka_topic_find0_fl(const char *func, + int line, rd_kafka_t *rk, - const char *topic, - int do_lock); -rd_kafka_topic_t *rd_kafka_topic_find0_fl (const char *func, int line, - rd_kafka_t *rk, - const rd_kafkap_str_t *topic); -#define rd_kafka_topic_find(rk,topic,do_lock) \ - rd_kafka_topic_find_fl(__FUNCTION__,__LINE__,rk,topic,do_lock) -#define rd_kafka_topic_find0(rk,topic) \ - rd_kafka_topic_find0_fl(__FUNCTION__,__LINE__,rk,topic) -int rd_kafka_topic_cmp_rkt (const void *_a, const void *_b); - -void rd_kafka_topic_partitions_remove (rd_kafka_topic_t *rkt); - -rd_bool_t rd_kafka_topic_set_notexists (rd_kafka_topic_t *rkt, - rd_kafka_resp_err_t err); -rd_bool_t rd_kafka_topic_set_error (rd_kafka_topic_t *rkt, - rd_kafka_resp_err_t err); + const rd_kafkap_str_t *topic); +#define rd_kafka_topic_find(rk, topic, do_lock) \ + rd_kafka_topic_find_fl(__FUNCTION__, __LINE__, rk, topic, do_lock) +#define rd_kafka_topic_find0(rk, topic) \ + rd_kafka_topic_find0_fl(__FUNCTION__, __LINE__, rk, topic) +int rd_kafka_topic_cmp_rkt(const void *_a, const void *_b); + +void rd_kafka_topic_partitions_remove(rd_kafka_topic_t *rkt); + +rd_bool_t rd_kafka_topic_set_notexists(rd_kafka_topic_t *rkt, + rd_kafka_resp_err_t err); +rd_bool_t rd_kafka_topic_set_error(rd_kafka_topic_t *rkt, + rd_kafka_resp_err_t err); /** * @returns the topic's permanent error, if any. @@ -215,7 +215,7 @@ rd_bool_t rd_kafka_topic_set_error (rd_kafka_topic_t *rkt, * @locks_acquired rd_kafka_topic_rdlock(rkt) */ static RD_INLINE RD_UNUSED rd_kafka_resp_err_t -rd_kafka_topic_get_error (rd_kafka_topic_t *rkt) { +rd_kafka_topic_get_error(rd_kafka_topic_t *rkt) { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; rd_kafka_topic_rdlock(rkt); if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR) @@ -224,48 +224,53 @@ rd_kafka_topic_get_error (rd_kafka_topic_t *rkt) { return err; } -int rd_kafka_topic_metadata_update2 (rd_kafka_broker_t *rkb, - const struct rd_kafka_metadata_topic *mdt); +int rd_kafka_topic_metadata_update2(rd_kafka_broker_t *rkb, + const struct rd_kafka_metadata_topic *mdt); -void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now); +void rd_kafka_topic_scan_all(rd_kafka_t *rk, rd_ts_t now); typedef struct rd_kafka_topic_info_s { - const char *topic; /**< Allocated along with struct */ - int partition_cnt; + const char *topic; /**< Allocated along with struct */ + int partition_cnt; } rd_kafka_topic_info_t; -int rd_kafka_topic_info_topic_cmp (const void *_a, const void *_b); -int rd_kafka_topic_info_cmp (const void *_a, const void *_b); -rd_kafka_topic_info_t *rd_kafka_topic_info_new (const char *topic, - int partition_cnt); -void rd_kafka_topic_info_destroy (rd_kafka_topic_info_t *ti); - -int rd_kafka_topic_match (rd_kafka_t *rk, const char *pattern, - const char *topic); - -int rd_kafka_toppar_broker_update (rd_kafka_toppar_t *rktp, - int32_t broker_id, rd_kafka_broker_t *rkb, - const char *reason); - -int rd_kafka_toppar_delegate_to_leader (rd_kafka_toppar_t *rktp); - -rd_kafka_resp_err_t -rd_kafka_topics_leader_query_sync (rd_kafka_t *rk, int all_topics, - const rd_list_t *topics, int timeout_ms); -void rd_kafka_topic_leader_query0 (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - int do_rk_lock); -#define rd_kafka_topic_leader_query(rk,rkt) \ - rd_kafka_topic_leader_query0(rk,rkt,1/*lock*/) - -#define rd_kafka_topic_fast_leader_query(rk) \ +int rd_kafka_topic_info_topic_cmp(const void *_a, const void *_b); +int rd_kafka_topic_info_cmp(const void *_a, const void *_b); +rd_kafka_topic_info_t *rd_kafka_topic_info_new(const char *topic, + int partition_cnt); +void rd_kafka_topic_info_destroy(rd_kafka_topic_info_t *ti); + +int rd_kafka_topic_match(rd_kafka_t *rk, + const char *pattern, + const char *topic); + +int rd_kafka_toppar_broker_update(rd_kafka_toppar_t *rktp, + int32_t broker_id, + rd_kafka_broker_t *rkb, + const char *reason); + +int rd_kafka_toppar_delegate_to_leader(rd_kafka_toppar_t *rktp); + +rd_kafka_resp_err_t rd_kafka_topics_leader_query_sync(rd_kafka_t *rk, + int all_topics, + const rd_list_t *topics, + int timeout_ms); +void rd_kafka_topic_leader_query0(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + int do_rk_lock); +#define rd_kafka_topic_leader_query(rk, rkt) \ + rd_kafka_topic_leader_query0(rk, rkt, 1 /*lock*/) + +#define rd_kafka_topic_fast_leader_query(rk) \ rd_kafka_metadata_fast_leader_query(rk) -void rd_kafka_local_topics_to_list (rd_kafka_t *rk, rd_list_t *topics, - int *cache_cntp); +void rd_kafka_local_topics_to_list(rd_kafka_t *rk, + rd_list_t *topics, + int *cache_cntp); -void rd_ut_kafka_topic_set_topic_exists (rd_kafka_topic_t *rkt, - int partition_cnt, - int32_t leader_id); +void rd_ut_kafka_topic_set_topic_exists(rd_kafka_topic_t *rkt, + int partition_cnt, + int32_t leader_id); #endif /* _RDKAFKA_TOPIC_H_ */ diff --git a/src/rdkafka_transport.c b/src/rdkafka_transport.c index 47ecabccda..732d1d3461 100644 --- a/src/rdkafka_transport.c +++ b/src/rdkafka_transport.c @@ -3,24 +3,24 @@ * * Copyright (c) 2015, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -31,7 +31,7 @@ #define __need_IOV_MAX -#define _DARWIN_C_SOURCE /* MSG_DONTWAIT */ +#define _DARWIN_C_SOURCE /* MSG_DONTWAIT */ #include "rdkafka_int.h" #include "rdaddr.h" @@ -44,7 +44,7 @@ /* AIX doesn't have MSG_DONTWAIT */ #ifndef MSG_DONTWAIT -# define MSG_DONTWAIT MSG_NONBLOCK +#define MSG_DONTWAIT MSG_NONBLOCK #endif #if WITH_SSL @@ -68,7 +68,7 @@ static int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout); /** * Low-level socket close */ -static void rd_kafka_transport_close0 (rd_kafka_t *rk, rd_socket_t s) { +static void rd_kafka_transport_close0(rd_kafka_t *rk, rd_socket_t s) { if (rk->rk_conf.closesocket_cb) rk->rk_conf.closesocket_cb((int)s, rk->rk_conf.opaque); else @@ -78,7 +78,7 @@ static void rd_kafka_transport_close0 (rd_kafka_t *rk, rd_socket_t s) { /** * Close and destroy a transport handle */ -void rd_kafka_transport_close (rd_kafka_transport_t *rktrans) { +void rd_kafka_transport_close(rd_kafka_transport_t *rktrans) { #if WITH_SSL rd_kafka_curr_transport = rktrans; if (rktrans->rktrans_ssl) @@ -87,18 +87,18 @@ void rd_kafka_transport_close (rd_kafka_transport_t *rktrans) { rd_kafka_sasl_close(rktrans); - if (rktrans->rktrans_recv_buf) - rd_kafka_buf_destroy(rktrans->rktrans_recv_buf); + if (rktrans->rktrans_recv_buf) + rd_kafka_buf_destroy(rktrans->rktrans_recv_buf); #ifdef _WIN32 WSACloseEvent(rktrans->rktrans_wsaevent); #endif - if (rktrans->rktrans_s != -1) + if (rktrans->rktrans_s != -1) rd_kafka_transport_close0(rktrans->rktrans_rkb->rkb_rk, rktrans->rktrans_s); - rd_free(rktrans); + rd_free(rktrans); } /** @@ -107,14 +107,14 @@ void rd_kafka_transport_close (rd_kafka_transport_t *rktrans) { * This will prohibit further sends and receives. * rd_kafka_transport_close() must still be called to close the socket. */ -void rd_kafka_transport_shutdown (rd_kafka_transport_t *rktrans) { +void rd_kafka_transport_shutdown(rd_kafka_transport_t *rktrans) { shutdown(rktrans->rktrans_s, #ifdef _WIN32 SD_BOTH #else SHUT_RDWR #endif - ); + ); } @@ -123,12 +123,12 @@ void rd_kafka_transport_shutdown (rd_kafka_transport_t *rktrans) { * @brief sendmsg() abstraction, converting a list of segments to iovecs. * @remark should only be called if the number of segments is > 1. */ -static ssize_t -rd_kafka_transport_socket_sendmsg (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, - char *errstr, size_t errstr_size) { +static ssize_t rd_kafka_transport_socket_sendmsg(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size) { struct iovec iov[IOV_MAX]; - struct msghdr msg = { .msg_iov = iov }; + struct msghdr msg = {.msg_iov = iov}; size_t iovlen; ssize_t r; size_t r2; @@ -143,11 +143,12 @@ rd_kafka_transport_socket_sendmsg (rd_kafka_transport_t *rktrans, rd_socket_errno = EAGAIN; #endif - r = sendmsg(rktrans->rktrans_s, &msg, MSG_DONTWAIT + r = sendmsg(rktrans->rktrans_s, &msg, + MSG_DONTWAIT #ifdef MSG_NOSIGNAL - | MSG_NOSIGNAL + | MSG_NOSIGNAL #endif - ); + ); if (r == -1) { if (rd_socket_errno == EAGAIN) @@ -169,10 +170,10 @@ rd_kafka_transport_socket_sendmsg (rd_kafka_transport_t *rktrans, /** * @brief Plain send() abstraction */ -static ssize_t -rd_kafka_transport_socket_send0 (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, - char *errstr, size_t errstr_size) { +static ssize_t rd_kafka_transport_socket_send0(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size) { ssize_t sum = 0; const void *p; size_t rlen; @@ -194,14 +195,15 @@ rd_kafka_transport_socket_send0 (rd_kafka_transport_t *rktrans, if (sum > 0 || rd_socket_errno == WSAEWOULDBLOCK) { rktrans->rktrans_blocked = rd_true; return sum; - } else { - rd_snprintf(errstr, errstr_size, "%s", - rd_socket_strerror(rd_socket_errno)); + } else { + rd_snprintf( + errstr, errstr_size, "%s", + rd_socket_strerror(rd_socket_errno)); return -1; } } - rktrans->rktrans_blocked = rd_false; + rktrans->rktrans_blocked = rd_false; #else if (unlikely(r <= 0)) { if (r == 0 || rd_socket_errno == EAGAIN) @@ -230,19 +232,19 @@ rd_kafka_transport_socket_send0 (rd_kafka_transport_t *rktrans, } -static ssize_t -rd_kafka_transport_socket_send (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, - char *errstr, size_t errstr_size) { +static ssize_t rd_kafka_transport_socket_send(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size) { #ifndef _WIN32 /* FIXME: Use sendmsg() with iovecs if there's more than one segment * remaining, otherwise (or if platform does not have sendmsg) * use plain send(). */ - return rd_kafka_transport_socket_sendmsg(rktrans, slice, - errstr, errstr_size); + return rd_kafka_transport_socket_sendmsg(rktrans, slice, errstr, + errstr_size); #endif - return rd_kafka_transport_socket_send0(rktrans, slice, - errstr, errstr_size); + return rd_kafka_transport_socket_send0(rktrans, slice, errstr, + errstr_size); } @@ -252,13 +254,13 @@ rd_kafka_transport_socket_send (rd_kafka_transport_t *rktrans, * @brief recvmsg() abstraction, converting a list of segments to iovecs. * @remark should only be called if the number of segments is > 1. */ -static ssize_t -rd_kafka_transport_socket_recvmsg (rd_kafka_transport_t *rktrans, - rd_buf_t *rbuf, - char *errstr, size_t errstr_size) { +static ssize_t rd_kafka_transport_socket_recvmsg(rd_kafka_transport_t *rktrans, + rd_buf_t *rbuf, + char *errstr, + size_t errstr_size) { ssize_t r; struct iovec iov[IOV_MAX]; - struct msghdr msg = { .msg_iov = iov }; + struct msghdr msg = {.msg_iov = iov}; size_t iovlen; rd_buf_get_write_iov(rbuf, msg.msg_iov, &iovlen, IOV_MAX, @@ -275,8 +277,7 @@ rd_kafka_transport_socket_recvmsg (rd_kafka_transport_t *rktrans, if (unlikely(r <= 0)) { if (r == -1 && rd_socket_errno == EAGAIN) return 0; - else if (r == 0 || - (r == -1 && rd_socket_errno == ECONNRESET)) { + else if (r == 0 || (r == -1 && rd_socket_errno == ECONNRESET)) { /* Receive 0 after POLLIN event means * connection closed. */ rd_snprintf(errstr, errstr_size, "Disconnected"); @@ -299,10 +300,10 @@ rd_kafka_transport_socket_recvmsg (rd_kafka_transport_t *rktrans, /** * @brief Plain recv() */ -static ssize_t -rd_kafka_transport_socket_recv0 (rd_kafka_transport_t *rktrans, - rd_buf_t *rbuf, - char *errstr, size_t errstr_size) { +static ssize_t rd_kafka_transport_socket_recv0(rd_kafka_transport_t *rktrans, + rd_buf_t *rbuf, + char *errstr, + size_t errstr_size) { ssize_t sum = 0; void *p; size_t len; @@ -314,7 +315,7 @@ rd_kafka_transport_socket_recv0 (rd_kafka_transport_t *rktrans, #ifdef _WIN32 (int) #endif - len, + len, 0); if (unlikely(r == RD_SOCKET_ERROR)) { @@ -322,18 +323,18 @@ rd_kafka_transport_socket_recv0 (rd_kafka_transport_t *rktrans, #ifdef _WIN32 || rd_socket_errno == WSAEWOULDBLOCK #endif - ) + ) return sum; else { - rd_snprintf(errstr, errstr_size, "%s", - rd_socket_strerror(rd_socket_errno)); + rd_snprintf( + errstr, errstr_size, "%s", + rd_socket_strerror(rd_socket_errno)); return -1; } } else if (unlikely(r == 0)) { /* Receive 0 after POLLIN event means * connection closed. */ - rd_snprintf(errstr, errstr_size, - "Disconnected"); + rd_snprintf(errstr, errstr_size, "Disconnected"); return -1; } @@ -351,30 +352,28 @@ rd_kafka_transport_socket_recv0 (rd_kafka_transport_t *rktrans, } -static ssize_t -rd_kafka_transport_socket_recv (rd_kafka_transport_t *rktrans, - rd_buf_t *buf, - char *errstr, size_t errstr_size) { +static ssize_t rd_kafka_transport_socket_recv(rd_kafka_transport_t *rktrans, + rd_buf_t *buf, + char *errstr, + size_t errstr_size) { #ifndef _WIN32 - return rd_kafka_transport_socket_recvmsg(rktrans, buf, - errstr, errstr_size); + return rd_kafka_transport_socket_recvmsg(rktrans, buf, errstr, + errstr_size); #endif - return rd_kafka_transport_socket_recv0(rktrans, buf, - errstr, errstr_size); + return rd_kafka_transport_socket_recv0(rktrans, buf, errstr, + errstr_size); } - - /** * CONNECT state is failed (errstr!=NULL) or done (TCP is up, SSL is working..). * From this state we either hand control back to the broker code, * or if authentication is configured we ente the AUTH state. */ -void rd_kafka_transport_connect_done (rd_kafka_transport_t *rktrans, - char *errstr) { - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; +void rd_kafka_transport_connect_done(rd_kafka_transport_t *rktrans, + char *errstr) { + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; rd_kafka_curr_transport = rktrans; @@ -383,41 +382,40 @@ void rd_kafka_transport_connect_done (rd_kafka_transport_t *rktrans, - - - -ssize_t -rd_kafka_transport_send (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, char *errstr, size_t errstr_size) { +ssize_t rd_kafka_transport_send(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size) { ssize_t r; #if WITH_SSL if (rktrans->rktrans_ssl) { rd_kafka_curr_transport = rktrans; - r = rd_kafka_transport_ssl_send(rktrans, slice, - errstr, errstr_size); + r = rd_kafka_transport_ssl_send(rktrans, slice, errstr, + errstr_size); } else #endif - r = rd_kafka_transport_socket_send(rktrans, slice, - errstr, errstr_size); + r = rd_kafka_transport_socket_send(rktrans, slice, errstr, + errstr_size); return r; } -ssize_t -rd_kafka_transport_recv (rd_kafka_transport_t *rktrans, rd_buf_t *rbuf, - char *errstr, size_t errstr_size) { +ssize_t rd_kafka_transport_recv(rd_kafka_transport_t *rktrans, + rd_buf_t *rbuf, + char *errstr, + size_t errstr_size) { ssize_t r; #if WITH_SSL if (rktrans->rktrans_ssl) { rd_kafka_curr_transport = rktrans; - r = rd_kafka_transport_ssl_recv(rktrans, rbuf, - errstr, errstr_size); + r = rd_kafka_transport_ssl_recv(rktrans, rbuf, errstr, + errstr_size); } else #endif - r = rd_kafka_transport_socket_recv(rktrans, rbuf, - errstr, errstr_size); + r = rd_kafka_transport_socket_recv(rktrans, rbuf, errstr, + errstr_size); return r; } @@ -427,24 +425,20 @@ rd_kafka_transport_recv (rd_kafka_transport_t *rktrans, rd_buf_t *rbuf, /** * @brief Notify transport layer of full request sent. */ -void rd_kafka_transport_request_sent (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf) { +void rd_kafka_transport_request_sent(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf) { rd_kafka_transport_t *rktrans = rkb->rkb_transport; /* Call on_request_sent interceptors */ rd_kafka_interceptors_on_request_sent( - rkb->rkb_rk, - (int)rktrans->rktrans_s, - rkb->rkb_name, rkb->rkb_nodeid, - rkbuf->rkbuf_reqhdr.ApiKey, - rkbuf->rkbuf_reqhdr.ApiVersion, - rkbuf->rkbuf_corrid, - rd_slice_size(&rkbuf->rkbuf_reader)); + rkb->rkb_rk, (int)rktrans->rktrans_s, rkb->rkb_name, + rkb->rkb_nodeid, rkbuf->rkbuf_reqhdr.ApiKey, + rkbuf->rkbuf_reqhdr.ApiVersion, rkbuf->rkbuf_corrid, + rd_slice_size(&rkbuf->rkbuf_reader)); } - /** * Length framed receive handling. * Currently only supports a the following framing: @@ -455,104 +449,106 @@ void rd_kafka_transport_request_sent (rd_kafka_broker_t *rkb, * 0: still waiting for data (*rkbufp remains unset) * 1: data complete, (buffer returned in *rkbufp) */ -int rd_kafka_transport_framed_recv (rd_kafka_transport_t *rktrans, - rd_kafka_buf_t **rkbufp, - char *errstr, size_t errstr_size) { - rd_kafka_buf_t *rkbuf = rktrans->rktrans_recv_buf; - ssize_t r; - const int log_decode_errors = LOG_ERR; - - /* States: - * !rktrans_recv_buf: initial state; set up buf to receive header. - * rkbuf_totlen == 0: awaiting header - * rkbuf_totlen > 0: awaiting payload - */ - - if (!rkbuf) { - rkbuf = rd_kafka_buf_new(1, 4/*length field's length*/); +int rd_kafka_transport_framed_recv(rd_kafka_transport_t *rktrans, + rd_kafka_buf_t **rkbufp, + char *errstr, + size_t errstr_size) { + rd_kafka_buf_t *rkbuf = rktrans->rktrans_recv_buf; + ssize_t r; + const int log_decode_errors = LOG_ERR; + + /* States: + * !rktrans_recv_buf: initial state; set up buf to receive header. + * rkbuf_totlen == 0: awaiting header + * rkbuf_totlen > 0: awaiting payload + */ + + if (!rkbuf) { + rkbuf = rd_kafka_buf_new(1, 4 /*length field's length*/); /* Set up buffer reader for the length field */ rd_buf_write_ensure(&rkbuf->rkbuf_buf, 4, 4); - rktrans->rktrans_recv_buf = rkbuf; - } + rktrans->rktrans_recv_buf = rkbuf; + } - r = rd_kafka_transport_recv(rktrans, &rkbuf->rkbuf_buf, - errstr, errstr_size); - if (r == 0) - return 0; - else if (r == -1) - return -1; + r = rd_kafka_transport_recv(rktrans, &rkbuf->rkbuf_buf, errstr, + errstr_size); + if (r == 0) + return 0; + else if (r == -1) + return -1; - if (rkbuf->rkbuf_totlen == 0) { - /* Frame length not known yet. */ - int32_t frame_len; + if (rkbuf->rkbuf_totlen == 0) { + /* Frame length not known yet. */ + int32_t frame_len; - if (rd_buf_write_pos(&rkbuf->rkbuf_buf) < sizeof(frame_len)) { - /* Wait for entire frame header. */ - return 0; - } + if (rd_buf_write_pos(&rkbuf->rkbuf_buf) < sizeof(frame_len)) { + /* Wait for entire frame header. */ + return 0; + } /* Initialize reader */ rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0, 4); - /* Reader header: payload length */ - rd_kafka_buf_read_i32(rkbuf, &frame_len); - - if (frame_len < 0 || - frame_len > rktrans->rktrans_rkb-> - rkb_rk->rk_conf.recv_max_msg_size) { - rd_snprintf(errstr, errstr_size, - "Invalid frame size %"PRId32, frame_len); - return -1; - } - - rkbuf->rkbuf_totlen = 4 + frame_len; - if (frame_len == 0) { - /* Payload is empty, we're done. */ - rktrans->rktrans_recv_buf = NULL; - *rkbufp = rkbuf; - return 1; - } - - /* Allocate memory to hold entire frame payload in contigious - * memory. */ + /* Reader header: payload length */ + rd_kafka_buf_read_i32(rkbuf, &frame_len); + + if (frame_len < 0 || + frame_len > rktrans->rktrans_rkb->rkb_rk->rk_conf + .recv_max_msg_size) { + rd_snprintf(errstr, errstr_size, + "Invalid frame size %" PRId32, frame_len); + return -1; + } + + rkbuf->rkbuf_totlen = 4 + frame_len; + if (frame_len == 0) { + /* Payload is empty, we're done. */ + rktrans->rktrans_recv_buf = NULL; + *rkbufp = rkbuf; + return 1; + } + + /* Allocate memory to hold entire frame payload in contigious + * memory. */ rd_buf_write_ensure_contig(&rkbuf->rkbuf_buf, frame_len); /* Try reading directly, there is probably more data available*/ - return rd_kafka_transport_framed_recv(rktrans, rkbufp, - errstr, errstr_size); - } + return rd_kafka_transport_framed_recv(rktrans, rkbufp, errstr, + errstr_size); + } - if (rd_buf_write_pos(&rkbuf->rkbuf_buf) == rkbuf->rkbuf_totlen) { - /* Payload is complete. */ - rktrans->rktrans_recv_buf = NULL; - *rkbufp = rkbuf; - return 1; - } + if (rd_buf_write_pos(&rkbuf->rkbuf_buf) == rkbuf->rkbuf_totlen) { + /* Payload is complete. */ + rktrans->rktrans_recv_buf = NULL; + *rkbufp = rkbuf; + return 1; + } - /* Wait for more data */ - return 0; + /* Wait for more data */ + return 0; - err_parse: +err_parse: rd_snprintf(errstr, errstr_size, "Frame header parsing failed: %s", rd_kafka_err2str(rkbuf->rkbuf_err)); - return -1; + return -1; } /** * @brief Final socket setup after a connection has been established */ -void rd_kafka_transport_post_connect_setup (rd_kafka_transport_t *rktrans) { +void rd_kafka_transport_post_connect_setup(rd_kafka_transport_t *rktrans) { rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; unsigned int slen; /* Set socket send & receive buffer sizes if configuerd */ if (rkb->rkb_rk->rk_conf.socket_sndbuf_size != 0) { - if (setsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_SNDBUF, - (void *)&rkb->rkb_rk->rk_conf.socket_sndbuf_size, - sizeof(rkb->rkb_rk->rk_conf. - socket_sndbuf_size)) == RD_SOCKET_ERROR) + if (setsockopt( + rktrans->rktrans_s, SOL_SOCKET, SO_SNDBUF, + (void *)&rkb->rkb_rk->rk_conf.socket_sndbuf_size, + sizeof(rkb->rkb_rk->rk_conf.socket_sndbuf_size)) == + RD_SOCKET_ERROR) rd_rkb_log(rkb, LOG_WARNING, "SNDBUF", "Failed to set socket send " "buffer size to %i: %s", @@ -561,10 +557,11 @@ void rd_kafka_transport_post_connect_setup (rd_kafka_transport_t *rktrans) { } if (rkb->rkb_rk->rk_conf.socket_rcvbuf_size != 0) { - if (setsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_RCVBUF, - (void *)&rkb->rkb_rk->rk_conf.socket_rcvbuf_size, - sizeof(rkb->rkb_rk->rk_conf. - socket_rcvbuf_size)) == RD_SOCKET_ERROR) + if (setsockopt( + rktrans->rktrans_s, SOL_SOCKET, SO_RCVBUF, + (void *)&rkb->rkb_rk->rk_conf.socket_rcvbuf_size, + sizeof(rkb->rkb_rk->rk_conf.socket_rcvbuf_size)) == + RD_SOCKET_ERROR) rd_rkb_log(rkb, LOG_WARNING, "RCVBUF", "Failed to set socket receive " "buffer size to %i: %s", @@ -583,9 +580,10 @@ void rd_kafka_transport_post_connect_setup (rd_kafka_transport_t *rktrans) { "Failed to get socket receive " "buffer size: %s: assuming 1MB", rd_socket_strerror(rd_socket_errno)); - rktrans->rktrans_rcvbuf_size = 1024*1024; + rktrans->rktrans_rcvbuf_size = 1024 * 1024; } else if (rktrans->rktrans_rcvbuf_size < 1024 * 64) - rktrans->rktrans_rcvbuf_size = 1024*64; /* Use at least 64KB */ + rktrans->rktrans_rcvbuf_size = + 1024 * 64; /* Use at least 64KB */ slen = sizeof(rktrans->rktrans_sndbuf_size); if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_SNDBUF, @@ -595,9 +593,10 @@ void rd_kafka_transport_post_connect_setup (rd_kafka_transport_t *rktrans) { "Failed to get socket send " "buffer size: %s: assuming 1MB", rd_socket_strerror(rd_socket_errno)); - rktrans->rktrans_sndbuf_size = 1024*1024; + rktrans->rktrans_sndbuf_size = 1024 * 1024; } else if (rktrans->rktrans_sndbuf_size < 1024 * 64) - rktrans->rktrans_sndbuf_size = 1024*64; /* Use at least 64KB */ + rktrans->rktrans_sndbuf_size = + 1024 * 64; /* Use at least 64KB */ #ifdef TCP_NODELAY @@ -620,42 +619,40 @@ void rd_kafka_transport_post_connect_setup (rd_kafka_transport_t *rktrans) { * * Locality: broker thread */ -static void rd_kafka_transport_connected (rd_kafka_transport_t *rktrans) { - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; +static void rd_kafka_transport_connected(rd_kafka_transport_t *rktrans) { + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; - rd_rkb_dbg(rkb, BROKER, "CONNECT", - "Connected to %s", - rd_sockaddr2str(rkb->rkb_addr_last, - RD_SOCKADDR2STR_F_PORT | - RD_SOCKADDR2STR_F_FAMILY)); + rd_rkb_dbg( + rkb, BROKER, "CONNECT", "Connected to %s", + rd_sockaddr2str(rkb->rkb_addr_last, + RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_FAMILY)); rd_kafka_transport_post_connect_setup(rktrans); #if WITH_SSL - if (rkb->rkb_proto == RD_KAFKA_PROTO_SSL || - rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL) { - char errstr[512]; + if (rkb->rkb_proto == RD_KAFKA_PROTO_SSL || + rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL) { + char errstr[512]; rd_kafka_broker_lock(rkb); rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE); rd_kafka_broker_unlock(rkb); - /* Set up SSL connection. - * This is also an asynchronous operation so dont - * propagate to broker_connect_done() just yet. */ - if (rd_kafka_transport_ssl_connect(rkb, rktrans, - errstr, - sizeof(errstr)) == -1) { - rd_kafka_transport_connect_done(rktrans, errstr); - return; - } - return; - } + /* Set up SSL connection. + * This is also an asynchronous operation so dont + * propagate to broker_connect_done() just yet. */ + if (rd_kafka_transport_ssl_connect(rkb, rktrans, errstr, + sizeof(errstr)) == -1) { + rd_kafka_transport_connect_done(rktrans, errstr); + return; + } + return; + } #endif - /* Propagate connect success */ - rd_kafka_transport_connect_done(rktrans, NULL); + /* Propagate connect success */ + rd_kafka_transport_connect_done(rktrans, NULL); } @@ -665,19 +662,19 @@ static void rd_kafka_transport_connected (rd_kafka_transport_t *rktrans) { * @returns 0 if getsockopt() was succesful (and \p and errp can be trusted), * else -1 in which case \p errp 's value is undefined. */ -static int rd_kafka_transport_get_socket_error (rd_kafka_transport_t *rktrans, - int *errp) { - socklen_t intlen = sizeof(*errp); - - if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, - SO_ERROR, (void *)errp, &intlen) == -1) { - rd_rkb_dbg(rktrans->rktrans_rkb, BROKER, "SO_ERROR", - "Failed to get socket error: %s", - rd_socket_strerror(rd_socket_errno)); - return -1; - } - - return 0; +static int rd_kafka_transport_get_socket_error(rd_kafka_transport_t *rktrans, + int *errp) { + socklen_t intlen = sizeof(*errp); + + if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_ERROR, (void *)errp, + &intlen) == -1) { + rd_rkb_dbg(rktrans->rktrans_rkb, BROKER, "SO_ERROR", + "Failed to get socket error: %s", + rd_socket_strerror(rd_socket_errno)); + return -1; + } + + return 0; } @@ -689,54 +686,53 @@ static int rd_kafka_transport_get_socket_error (rd_kafka_transport_t *rktrans, * * Locality: broker thread */ -static void rd_kafka_transport_io_event (rd_kafka_transport_t *rktrans, - int events, - const char *socket_errstr) { - char errstr[512]; - int r; - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; - - switch (rkb->rkb_state) - { - case RD_KAFKA_BROKER_STATE_CONNECT: - /* Asynchronous connect finished, read status. */ - if (!(events & (POLLOUT|POLLERR|POLLHUP))) - return; +static void rd_kafka_transport_io_event(rd_kafka_transport_t *rktrans, + int events, + const char *socket_errstr) { + char errstr[512]; + int r; + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + + switch (rkb->rkb_state) { + case RD_KAFKA_BROKER_STATE_CONNECT: + /* Asynchronous connect finished, read status. */ + if (!(events & (POLLOUT | POLLERR | POLLHUP))) + return; if (socket_errstr) rd_kafka_broker_fail( - rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, - "Connect to %s failed: %s", - rd_sockaddr2str(rkb->rkb_addr_last, - RD_SOCKADDR2STR_F_PORT | - RD_SOCKADDR2STR_F_FAMILY), - socket_errstr); - else if (rd_kafka_transport_get_socket_error(rktrans, &r) == -1) { - rd_kafka_broker_fail( - rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, - "Connect to %s failed: " - "unable to get status from " - "socket %d: %s", - rd_sockaddr2str(rkb->rkb_addr_last, - RD_SOCKADDR2STR_F_PORT | + rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, + "Connect to %s failed: %s", + rd_sockaddr2str(rkb->rkb_addr_last, + RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_FAMILY), - rktrans->rktrans_s, - rd_strerror(rd_socket_errno)); - } else if (r != 0) { - /* Connect failed */ - rd_snprintf(errstr, sizeof(errstr), - "Connect to %s failed: %s", - rd_sockaddr2str(rkb->rkb_addr_last, - RD_SOCKADDR2STR_F_PORT | - RD_SOCKADDR2STR_F_FAMILY), - rd_strerror(r)); - - rd_kafka_transport_connect_done(rktrans, errstr); - } else { - /* Connect succeeded */ - rd_kafka_transport_connected(rktrans); - } - break; + socket_errstr); + else if (rd_kafka_transport_get_socket_error(rktrans, &r) == + -1) { + rd_kafka_broker_fail( + rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, + "Connect to %s failed: " + "unable to get status from " + "socket %d: %s", + rd_sockaddr2str(rkb->rkb_addr_last, + RD_SOCKADDR2STR_F_PORT | + RD_SOCKADDR2STR_F_FAMILY), + rktrans->rktrans_s, rd_strerror(rd_socket_errno)); + } else if (r != 0) { + /* Connect failed */ + rd_snprintf( + errstr, sizeof(errstr), "Connect to %s failed: %s", + rd_sockaddr2str(rkb->rkb_addr_last, + RD_SOCKADDR2STR_F_PORT | + RD_SOCKADDR2STR_F_FAMILY), + rd_strerror(r)); + + rd_kafka_transport_connect_done(rktrans, errstr); + } else { + /* Connect succeeded */ + rd_kafka_transport_connected(rktrans); + } + break; case RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE: #if WITH_SSL @@ -749,8 +745,7 @@ static void rd_kafka_transport_io_event (rd_kafka_transport_t *rktrans, if (r == 0 /* handshake still in progress */ && (events & POLLHUP)) { rd_kafka_broker_conn_closed( - rkb, RD_KAFKA_RESP_ERR__TRANSPORT, - "Disconnected"); + rkb, RD_KAFKA_RESP_ERR__TRANSPORT, "Disconnected"); return; } @@ -763,62 +758,57 @@ static void rd_kafka_transport_io_event (rd_kafka_transport_t *rktrans, /* SASL authentication. * Prior to broker version v1.0.0 this is performed * directly on the socket without Kafka framing. */ - if (rd_kafka_sasl_io_event(rktrans, events, - errstr, + if (rd_kafka_sasl_io_event(rktrans, events, errstr, sizeof(errstr)) == -1) { rd_kafka_broker_fail( - rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__AUTHENTICATION, - "SASL authentication failure: %s", - errstr); + rkb, LOG_ERR, RD_KAFKA_RESP_ERR__AUTHENTICATION, + "SASL authentication failure: %s", errstr); return; } if (events & POLLHUP) { - rd_kafka_broker_fail( - rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__AUTHENTICATION, - "Disconnected"); + rd_kafka_broker_fail(rkb, LOG_ERR, + RD_KAFKA_RESP_ERR__AUTHENTICATION, + "Disconnected"); return; } break; - case RD_KAFKA_BROKER_STATE_APIVERSION_QUERY: - case RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE: - case RD_KAFKA_BROKER_STATE_AUTH_REQ: - case RD_KAFKA_BROKER_STATE_UP: - case RD_KAFKA_BROKER_STATE_UPDATE: + case RD_KAFKA_BROKER_STATE_APIVERSION_QUERY: + case RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE: + case RD_KAFKA_BROKER_STATE_AUTH_REQ: + case RD_KAFKA_BROKER_STATE_UP: + case RD_KAFKA_BROKER_STATE_UPDATE: - if (events & POLLIN) { - while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP && - rd_kafka_recv(rkb) > 0) - ; + if (events & POLLIN) { + while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP && + rd_kafka_recv(rkb) > 0) + ; /* If connection went down: bail out early */ if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_DOWN) return; - } + } if (events & POLLHUP) { rd_kafka_broker_conn_closed( - rkb, RD_KAFKA_RESP_ERR__TRANSPORT, - "Disconnected"); + rkb, RD_KAFKA_RESP_ERR__TRANSPORT, "Disconnected"); return; } - if (events & POLLOUT) { - while (rd_kafka_send(rkb) > 0) - ; - } - break; + if (events & POLLOUT) { + while (rd_kafka_send(rkb) > 0) + ; + } + break; - case RD_KAFKA_BROKER_STATE_INIT: - case RD_KAFKA_BROKER_STATE_DOWN: + case RD_KAFKA_BROKER_STATE_INIT: + case RD_KAFKA_BROKER_STATE_DOWN: case RD_KAFKA_BROKER_STATE_TRY_CONNECT: - rd_kafka_assert(rkb->rkb_rk, !*"bad state"); - } + rd_kafka_assert(rkb->rkb_rk, !*"bad state"); + } } @@ -827,7 +817,7 @@ static void rd_kafka_transport_io_event (rd_kafka_transport_t *rktrans, /** * @brief Convert WSA FD_.. events to POLL.. events. */ -static RD_INLINE int rd_kafka_transport_wsa2events (long wevents) { +static RD_INLINE int rd_kafka_transport_wsa2events(long wevents) { int events = 0; if (unlikely(wevents == 0)) @@ -848,8 +838,8 @@ static RD_INLINE int rd_kafka_transport_wsa2events (long wevents) { /** * @brief Convert POLL.. events to WSA FD_.. events. */ -static RD_INLINE int rd_kafka_transport_events2wsa (int events, - rd_bool_t is_connecting) { +static RD_INLINE int rd_kafka_transport_events2wsa(int events, + rd_bool_t is_connecting) { long wevents = FD_CLOSE; if (unlikely(is_connecting)) @@ -867,42 +857,38 @@ static RD_INLINE int rd_kafka_transport_events2wsa (int events, /** * @returns the WinSocket events (as POLL.. events) for the broker socket. */ -static int rd_kafka_transport_get_wsa_events (rd_kafka_transport_t *rktrans) { - const int try_bits[4 * 2] = { - FD_READ_BIT, POLLIN, - FD_WRITE_BIT, POLLOUT, - FD_CONNECT_BIT, POLLOUT, - FD_CLOSE_BIT, POLLHUP - }; +static int rd_kafka_transport_get_wsa_events(rd_kafka_transport_t *rktrans) { + const int try_bits[4 * 2] = {FD_READ_BIT, POLLIN, FD_WRITE_BIT, + POLLOUT, FD_CONNECT_BIT, POLLOUT, + FD_CLOSE_BIT, POLLHUP}; int r, i; WSANETWORKEVENTS netevents; - int events = 0; + int events = 0; const char *socket_errstr = NULL; - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; /* Get Socket event */ - r = WSAEnumNetworkEvents(rktrans->rktrans_s, - rktrans->rktrans_wsaevent, - &netevents); + r = WSAEnumNetworkEvents(rktrans->rktrans_s, rktrans->rktrans_wsaevent, + &netevents); if (unlikely(r == SOCKET_ERROR)) { rd_rkb_log(rkb, LOG_ERR, "WSAWAIT", - "WSAEnumNetworkEvents() failed: %s", - rd_socket_strerror(rd_socket_errno)); + "WSAEnumNetworkEvents() failed: %s", + rd_socket_strerror(rd_socket_errno)); socket_errstr = rd_socket_strerror(rd_socket_errno); return POLLHUP | POLLERR; } /* Get fired events and errors for each event type */ for (i = 0; i < RD_ARRAYSIZE(try_bits); i += 2) { - const int bit = try_bits[i]; + const int bit = try_bits[i]; const int event = try_bits[i + 1]; if (!(netevents.lNetworkEvents & (1 << bit))) continue; if (unlikely(netevents.iErrorCode[bit])) { - socket_errstr = rd_socket_strerror( - netevents.iErrorCode[bit]); + socket_errstr = + rd_socket_strerror(netevents.iErrorCode[bit]); events |= POLLHUP; } else { events |= event; @@ -923,18 +909,19 @@ static int rd_kafka_transport_get_wsa_events (rd_kafka_transport_t *rktrans) { * * @returns the transport socket POLL.. event bits. */ -static int rd_kafka_transport_io_serve_win32 (rd_kafka_transport_t *rktrans, - rd_kafka_q_t *rkq, int timeout_ms) { +static int rd_kafka_transport_io_serve_win32(rd_kafka_transport_t *rktrans, + rd_kafka_q_t *rkq, + int timeout_ms) { const DWORD wsaevent_cnt = 3; - WSAEVENT wsaevents[3] = { - rkq->rkq_cond.mEvents[0], /* rkq: cnd_signal */ - rkq->rkq_cond.mEvents[1], /* rkq: cnd_broadcast */ - rktrans->rktrans_wsaevent, /* socket */ + WSAEVENT wsaevents[3] = { + rkq->rkq_cond.mEvents[0], /* rkq: cnd_signal */ + rkq->rkq_cond.mEvents[1], /* rkq: cnd_broadcast */ + rktrans->rktrans_wsaevent, /* socket */ }; DWORD r; - int events = 0; - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; - rd_bool_t set_pollout = rd_false; + int events = 0; + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + rd_bool_t set_pollout = rd_false; rd_bool_t cnd_is_waiting = rd_false; /* WSA only sets FD_WRITE (e.g., POLLOUT) when the socket was @@ -943,9 +930,9 @@ static int rd_kafka_transport_io_serve_win32 (rd_kafka_transport_t *rktrans, * here and cut the timeout short if a write is wanted and the socket * is not currently blocked. */ if (rktrans->rktrans_rkb->rkb_state != RD_KAFKA_BROKER_STATE_CONNECT && - !rktrans->rktrans_blocked && - (rktrans->rktrans_pfd[0].events & POLLOUT)) { - timeout_ms = 0; + !rktrans->rktrans_blocked && + (rktrans->rktrans_pfd[0].events & POLLOUT)) { + timeout_ms = 0; set_pollout = rd_true; } else { /* Check if the queue already has ops enqueued in which case we @@ -964,8 +951,8 @@ static int rd_kafka_transport_io_serve_win32 (rd_kafka_transport_t *rktrans, } /* Wait for IO and queue events */ - r = WSAWaitForMultipleEvents(wsaevent_cnt, wsaevents, FALSE, - timeout_ms, FALSE); + r = WSAWaitForMultipleEvents(wsaevent_cnt, wsaevents, FALSE, timeout_ms, + FALSE); if (cnd_is_waiting) { mtx_lock(&rkq->rkq_lock); @@ -973,10 +960,10 @@ static int rd_kafka_transport_io_serve_win32 (rd_kafka_transport_t *rktrans, mtx_unlock(&rkq->rkq_lock); } - if (unlikely(r == WSA_WAIT_FAILED)) { + if (unlikely(r == WSA_WAIT_FAILED)) { rd_rkb_log(rkb, LOG_CRIT, "WSAWAIT", - "WSAWaitForMultipleEvents failed: %s", - rd_socket_strerror(rd_socket_errno)); + "WSAWaitForMultipleEvents failed: %s", + rd_socket_strerror(rd_socket_errno)); return POLLERR; } else if (r != WSA_WAIT_TIMEOUT) { r -= WSA_WAIT_EVENT_0; @@ -1003,8 +990,9 @@ static int rd_kafka_transport_io_serve_win32 (rd_kafka_transport_t *rktrans, * * @locality broker thread */ -int rd_kafka_transport_io_serve (rd_kafka_transport_t *rktrans, - rd_kafka_q_t *rkq, int timeout_ms) { +int rd_kafka_transport_io_serve(rd_kafka_transport_t *rktrans, + rd_kafka_q_t *rkq, + int timeout_ms) { rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; int events; @@ -1012,13 +1000,13 @@ int rd_kafka_transport_io_serve (rd_kafka_transport_t *rktrans, if ( #ifndef _WIN32 - /* BSD sockets use POLLOUT to indicate success to connect. - * Windows has its own flag for this (FD_CONNECT). */ - rkb->rkb_state == RD_KAFKA_BROKER_STATE_CONNECT || + /* BSD sockets use POLLOUT to indicate success to connect. + * Windows has its own flag for this (FD_CONNECT). */ + rkb->rkb_state == RD_KAFKA_BROKER_STATE_CONNECT || #endif - (rkb->rkb_state > RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE && - rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight && - rd_kafka_bufq_cnt(&rkb->rkb_outbufs) > 0)) + (rkb->rkb_state > RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE && + rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight && + rd_kafka_bufq_cnt(&rkb->rkb_outbufs) > 0)) rd_kafka_transport_poll_set(rkb->rkb_transport, POLLOUT); #ifdef _WIN32 @@ -1029,11 +1017,11 @@ int rd_kafka_transport_io_serve (rd_kafka_transport_t *rktrans, #endif rd_kafka_transport_poll_set(rkb->rkb_transport, POLLIN); - /* On Windows we can wait for both IO and condvars (rkq) - * simultaneously. - * - * On *nix/BSD sockets we use a local pipe (pfd[1]) to wake - * up the rkq. */ + /* On Windows we can wait for both IO and condvars (rkq) + * simultaneously. + * + * On *nix/BSD sockets we use a local pipe (pfd[1]) to wake + * up the rkq. */ #ifdef _WIN32 events = rd_kafka_transport_io_serve_win32(rktrans, rkq, timeout_ms); @@ -1046,7 +1034,7 @@ int rd_kafka_transport_io_serve (rd_kafka_transport_t *rktrans, #endif if (events) { - rd_kafka_transport_poll_clear(rktrans, POLLOUT|POLLIN); + rd_kafka_transport_poll_clear(rktrans, POLLOUT | POLLIN); rd_kafka_transport_io_event(rktrans, events, NULL); } @@ -1058,10 +1046,10 @@ int rd_kafka_transport_io_serve (rd_kafka_transport_t *rktrans, /** * @brief Create a new transport object using existing socket \p s. */ -rd_kafka_transport_t *rd_kafka_transport_new (rd_kafka_broker_t *rkb, - rd_socket_t s, - char *errstr, - size_t errstr_size) { +rd_kafka_transport_t *rd_kafka_transport_new(rd_kafka_broker_t *rkb, + rd_socket_t s, + char *errstr, + size_t errstr_size) { rd_kafka_transport_t *rktrans; int on = 1; int r; @@ -1077,8 +1065,8 @@ rd_kafka_transport_t *rd_kafka_transport_new (rd_kafka_broker_t *rkb, #ifdef SO_KEEPALIVE /* Enable TCP keep-alives, if configured. */ if (rkb->rkb_rk->rk_conf.socket_keepalive) { - if (setsockopt(s, SOL_SOCKET, SO_KEEPALIVE, - (void *)&on, sizeof(on)) == RD_SOCKET_ERROR) + if (setsockopt(s, SOL_SOCKET, SO_KEEPALIVE, (void *)&on, + sizeof(on)) == RD_SOCKET_ERROR) rd_rkb_dbg(rkb, BROKER, "SOCKET", "Failed to set SO_KEEPALIVE: %s", rd_socket_strerror(rd_socket_errno)); @@ -1094,9 +1082,9 @@ rd_kafka_transport_t *rd_kafka_transport_new (rd_kafka_broker_t *rkb, } - rktrans = rd_calloc(1, sizeof(*rktrans)); + rktrans = rd_calloc(1, sizeof(*rktrans)); rktrans->rktrans_rkb = rkb; - rktrans->rktrans_s = s; + rktrans->rktrans_s = s; #ifdef _WIN32 rktrans->rktrans_wsaevent = WSACreateEvent(); @@ -1112,24 +1100,24 @@ rd_kafka_transport_t *rd_kafka_transport_new (rd_kafka_broker_t *rkb, * * Locality: broker thread */ -rd_kafka_transport_t *rd_kafka_transport_connect (rd_kafka_broker_t *rkb, - const rd_sockaddr_inx_t *sinx, - char *errstr, - size_t errstr_size) { - rd_kafka_transport_t *rktrans; - int s = -1; +rd_kafka_transport_t *rd_kafka_transport_connect(rd_kafka_broker_t *rkb, + const rd_sockaddr_inx_t *sinx, + char *errstr, + size_t errstr_size) { + rd_kafka_transport_t *rktrans; + int s = -1; int r; rkb->rkb_addr_last = sinx; - s = rkb->rkb_rk->rk_conf.socket_cb(sinx->in.sin_family, - SOCK_STREAM, IPPROTO_TCP, - rkb->rkb_rk->rk_conf.opaque); - if (s == -1) { - rd_snprintf(errstr, errstr_size, "Failed to create socket: %s", - rd_socket_strerror(rd_socket_errno)); - return NULL; - } + s = rkb->rkb_rk->rk_conf.socket_cb(sinx->in.sin_family, SOCK_STREAM, + IPPROTO_TCP, + rkb->rkb_rk->rk_conf.opaque); + if (s == -1) { + rd_snprintf(errstr, errstr_size, "Failed to create socket: %s", + rd_socket_strerror(rd_socket_errno)); + return NULL; + } rktrans = rd_kafka_transport_new(rkb, s, errstr, errstr_size); if (!rktrans) { @@ -1137,18 +1125,19 @@ rd_kafka_transport_t *rd_kafka_transport_connect (rd_kafka_broker_t *rkb, return NULL; } - rd_rkb_dbg(rkb, BROKER, "CONNECT", "Connecting to %s (%s) " - "with socket %i", - rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_FAMILY | - RD_SOCKADDR2STR_F_PORT), - rd_kafka_secproto_names[rkb->rkb_proto], s); + rd_rkb_dbg(rkb, BROKER, "CONNECT", + "Connecting to %s (%s) " + "with socket %i", + rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_FAMILY | + RD_SOCKADDR2STR_F_PORT), + rd_kafka_secproto_names[rkb->rkb_proto], s); - /* Connect to broker */ + /* Connect to broker */ if (rkb->rkb_rk->rk_conf.connect_cb) { rd_kafka_broker_lock(rkb); /* for rkb_nodename */ r = rkb->rkb_rk->rk_conf.connect_cb( - s, (struct sockaddr *)sinx, RD_SOCKADDR_INX_LEN(sinx), - rkb->rkb_nodename, rkb->rkb_rk->rk_conf.opaque); + s, (struct sockaddr *)sinx, RD_SOCKADDR_INX_LEN(sinx), + rkb->rkb_nodename, rkb->rkb_rk->rk_conf.opaque); rd_kafka_broker_unlock(rkb); } else { if (connect(s, (struct sockaddr *)sinx, @@ -1157,40 +1146,40 @@ rd_kafka_transport_t *rd_kafka_transport_connect (rd_kafka_broker_t *rkb, #ifdef _WIN32 && rd_socket_errno != WSAEWOULDBLOCK #endif - )) + )) r = rd_socket_errno; else r = 0; } if (r != 0) { - rd_rkb_dbg(rkb, BROKER, "CONNECT", - "Couldn't connect to %s: %s (%i)", - rd_sockaddr2str(sinx, - RD_SOCKADDR2STR_F_PORT | - RD_SOCKADDR2STR_F_FAMILY), - rd_socket_strerror(r), r); - rd_snprintf(errstr, errstr_size, - "Failed to connect to broker at %s: %s", - rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_NICE), - rd_socket_strerror(r)); + rd_rkb_dbg(rkb, BROKER, "CONNECT", + "Couldn't connect to %s: %s (%i)", + rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_PORT | + RD_SOCKADDR2STR_F_FAMILY), + rd_socket_strerror(r), r); + rd_snprintf(errstr, errstr_size, + "Failed to connect to broker at %s: %s", + rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_NICE), + rd_socket_strerror(r)); rd_kafka_transport_close(rktrans); return NULL; - } + } /* Set up transport handle */ rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt++].fd = s; if (rkb->rkb_wakeup_fd[0] != -1) { rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt].events = POLLIN; - rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt++].fd = rkb->rkb_wakeup_fd[0]; + rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt++].fd = + rkb->rkb_wakeup_fd[0]; } - /* Poll writability to trigger on connection success/failure. */ - rd_kafka_transport_poll_set(rktrans, POLLOUT); + /* Poll writability to trigger on connection success/failure. */ + rd_kafka_transport_poll_set(rktrans, POLLOUT); - return rktrans; + return rktrans; } @@ -1198,19 +1187,18 @@ rd_kafka_transport_t *rd_kafka_transport_connect (rd_kafka_broker_t *rkb, /** * @brief Set the WinSocket event poll bit to \p events. */ -static void rd_kafka_transport_poll_set_wsa (rd_kafka_transport_t *rktrans, - int events) { +static void rd_kafka_transport_poll_set_wsa(rd_kafka_transport_t *rktrans, + int events) { int r; - r = WSAEventSelect(rktrans->rktrans_s, - rktrans->rktrans_wsaevent, - rd_kafka_transport_events2wsa( - rktrans->rktrans_pfd[0].events, - rktrans->rktrans_rkb->rkb_state == - RD_KAFKA_BROKER_STATE_CONNECT)); + r = WSAEventSelect( + rktrans->rktrans_s, rktrans->rktrans_wsaevent, + rd_kafka_transport_events2wsa(rktrans->rktrans_pfd[0].events, + rktrans->rktrans_rkb->rkb_state == + RD_KAFKA_BROKER_STATE_CONNECT)); if (unlikely(r != 0)) { rd_rkb_log(rktrans->rktrans_rkb, LOG_CRIT, "WSAEVENT", - "WSAEventSelect() failed: %s", - rd_socket_strerror(rd_socket_errno)); + "WSAEventSelect() failed: %s", + rd_socket_strerror(rd_socket_errno)); } } #endif @@ -1223,7 +1211,7 @@ void rd_kafka_transport_poll_set(rd_kafka_transport_t *rktrans, int event) { #ifdef _WIN32 rd_kafka_transport_poll_set_wsa(rktrans, - rktrans->rktrans_pfd[0].events); + rktrans->rktrans_pfd[0].events); #endif } @@ -1235,7 +1223,7 @@ void rd_kafka_transport_poll_clear(rd_kafka_transport_t *rktrans, int event) { #ifdef _WIN32 rd_kafka_transport_poll_set_wsa(rktrans, - rktrans->rktrans_pfd[0].events); + rktrans->rktrans_pfd[0].events); #endif } @@ -1245,20 +1233,20 @@ void rd_kafka_transport_poll_clear(rd_kafka_transport_t *rktrans, int event) { * * @returns 1 if an event was raised, else 0, or -1 on error. */ -static int rd_kafka_transport_poll (rd_kafka_transport_t *rktrans, int tmout) { +static int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout) { int r; - r = poll(rktrans->rktrans_pfd, rktrans->rktrans_pfd_cnt, tmout); - if (r <= 0) - return r; + r = poll(rktrans->rktrans_pfd, rktrans->rktrans_pfd_cnt, tmout); + if (r <= 0) + return r; rd_atomic64_add(&rktrans->rktrans_rkb->rkb_c.wakeups, 1); if (rktrans->rktrans_pfd[1].revents & POLLIN) { /* Read wake-up fd data and throw away, just used for wake-ups*/ char buf[1024]; - while (rd_read((int)rktrans->rktrans_pfd[1].fd, - buf, sizeof(buf)) > 0) + while (rd_read((int)rktrans->rktrans_pfd[1].fd, buf, + sizeof(buf)) > 0) ; /* Read all buffered signalling bytes */ } @@ -1274,8 +1262,8 @@ static int rd_kafka_transport_poll (rd_kafka_transport_t *rktrans, int tmout) { * This is really only used on Windows where POLLOUT (FD_WRITE) is * edge-triggered rather than level-triggered. */ -void rd_kafka_transport_set_blocked (rd_kafka_transport_t *rktrans, - rd_bool_t blocked) { +void rd_kafka_transport_set_blocked(rd_kafka_transport_t *rktrans, + rd_bool_t blocked) { rktrans->rktrans_blocked = blocked; } #endif @@ -1290,14 +1278,14 @@ void rd_kafka_transport_set_blocked (rd_kafka_transport_t *rktrans, */ void rd_kafka_transport_term (void) { #ifdef _WIN32 - (void)WSACleanup(); /* FIXME: dangerous */ + (void)WSACleanup(); /* FIXME: dangerous */ #endif } #endif -void rd_kafka_transport_init (void) { +void rd_kafka_transport_init(void) { #ifdef _WIN32 - WSADATA d; - (void)WSAStartup(MAKEWORD(2, 2), &d); + WSADATA d; + (void)WSAStartup(MAKEWORD(2, 2), &d); #endif } diff --git a/src/rdkafka_transport.h b/src/rdkafka_transport.h index 17223984fc..83af5ae901 100644 --- a/src/rdkafka_transport.h +++ b/src/rdkafka_transport.h @@ -3,24 +3,24 @@ * * Copyright (c) 2015, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -38,50 +38,57 @@ typedef struct rd_kafka_transport_s rd_kafka_transport_t; -int rd_kafka_transport_io_serve (rd_kafka_transport_t *rktrans, - rd_kafka_q_t *rkq, - int timeout_ms); +int rd_kafka_transport_io_serve(rd_kafka_transport_t *rktrans, + rd_kafka_q_t *rkq, + int timeout_ms); -ssize_t rd_kafka_transport_send (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, - char *errstr, size_t errstr_size); -ssize_t rd_kafka_transport_recv (rd_kafka_transport_t *rktrans, - rd_buf_t *rbuf, - char *errstr, size_t errstr_size); +ssize_t rd_kafka_transport_send(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size); +ssize_t rd_kafka_transport_recv(rd_kafka_transport_t *rktrans, + rd_buf_t *rbuf, + char *errstr, + size_t errstr_size); -void rd_kafka_transport_request_sent (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf); +void rd_kafka_transport_request_sent(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf); -int rd_kafka_transport_framed_recv (rd_kafka_transport_t *rktrans, - rd_kafka_buf_t **rkbufp, - char *errstr, size_t errstr_size); +int rd_kafka_transport_framed_recv(rd_kafka_transport_t *rktrans, + rd_kafka_buf_t **rkbufp, + char *errstr, + size_t errstr_size); -rd_kafka_transport_t *rd_kafka_transport_new (rd_kafka_broker_t *rkb, - rd_socket_t s, - char *errstr, - size_t errstr_size); +rd_kafka_transport_t *rd_kafka_transport_new(rd_kafka_broker_t *rkb, + rd_socket_t s, + char *errstr, + size_t errstr_size); struct rd_kafka_broker_s; -rd_kafka_transport_t *rd_kafka_transport_connect(struct rd_kafka_broker_s *rkb, const rd_sockaddr_inx_t *sinx, - char *errstr, size_t errstr_size); -void rd_kafka_transport_connect_done (rd_kafka_transport_t *rktrans, - char *errstr); +rd_kafka_transport_t *rd_kafka_transport_connect(struct rd_kafka_broker_s *rkb, + const rd_sockaddr_inx_t *sinx, + char *errstr, + size_t errstr_size); +void rd_kafka_transport_connect_done(rd_kafka_transport_t *rktrans, + char *errstr); -void rd_kafka_transport_post_connect_setup (rd_kafka_transport_t *rktrans); +void rd_kafka_transport_post_connect_setup(rd_kafka_transport_t *rktrans); void rd_kafka_transport_close(rd_kafka_transport_t *rktrans); -void rd_kafka_transport_shutdown (rd_kafka_transport_t *rktrans); +void rd_kafka_transport_shutdown(rd_kafka_transport_t *rktrans); void rd_kafka_transport_poll_set(rd_kafka_transport_t *rktrans, int event); void rd_kafka_transport_poll_clear(rd_kafka_transport_t *rktrans, int event); #ifdef _WIN32 -void rd_kafka_transport_set_blocked (rd_kafka_transport_t *rktrans, - rd_bool_t blocked); +void rd_kafka_transport_set_blocked(rd_kafka_transport_t *rktrans, + rd_bool_t blocked); #else /* no-op on other platforms */ -#define rd_kafka_transport_set_blocked(rktrans,blocked) do {} while (0) +#define rd_kafka_transport_set_blocked(rktrans, blocked) \ + do { \ + } while (0) #endif -void rd_kafka_transport_init (void); +void rd_kafka_transport_init(void); #endif /* _RDKAFKA_TRANSPORT_H_ */ diff --git a/src/rdkafka_transport_int.h b/src/rdkafka_transport_int.h index 09f9603bb0..4b053b98fa 100644 --- a/src/rdkafka_transport_int.h +++ b/src/rdkafka_transport_int.h @@ -3,24 +3,24 @@ * * Copyright (c) 2015, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -46,42 +46,42 @@ struct rd_kafka_transport_s { rd_socket_t rktrans_s; - rd_kafka_broker_t *rktrans_rkb; /* Not reference counted */ + rd_kafka_broker_t *rktrans_rkb; /* Not reference counted */ #if WITH_SSL - SSL *rktrans_ssl; + SSL *rktrans_ssl; #endif #ifdef _WIN32 WSAEVENT *rktrans_wsaevent; - rd_bool_t rktrans_blocked; /* Latest send() returned ..WOULDBLOCK. - * We need to poll for FD_WRITE which - * is edge-triggered rather than - * level-triggered. - * This behaviour differs from BSD - * sockets. */ + rd_bool_t rktrans_blocked; /* Latest send() returned ..WOULDBLOCK. + * We need to poll for FD_WRITE which + * is edge-triggered rather than + * level-triggered. + * This behaviour differs from BSD + * sockets. */ #endif - struct { - void *state; /* SASL implementation - * state handle */ + struct { + void *state; /* SASL implementation + * state handle */ - int complete; /* Auth was completed early - * from the client's perspective - * (but we might still have to - * wait for server reply). */ + int complete; /* Auth was completed early + * from the client's perspective + * (but we might still have to + * wait for server reply). */ /* SASL framing buffers */ - struct msghdr msg; - struct iovec iov[2]; + struct msghdr msg; + struct iovec iov[2]; - char *recv_buf; - int recv_of; /* Received byte count */ - int recv_len; /* Expected receive length for - * current frame. */ - } rktrans_sasl; + char *recv_buf; + int recv_of; /* Received byte count */ + int recv_len; /* Expected receive length for + * current frame. */ + } rktrans_sasl; - rd_kafka_buf_t *rktrans_recv_buf; /* Used with framed_recvmsg */ + rd_kafka_buf_t *rktrans_recv_buf; /* Used with framed_recvmsg */ /* Two pollable fds: * - TCP socket @@ -90,8 +90,8 @@ struct rd_kafka_transport_s { rd_pollfd_t rktrans_pfd[2]; int rktrans_pfd_cnt; - size_t rktrans_rcvbuf_size; /**< Socket receive buffer size */ - size_t rktrans_sndbuf_size; /**< Socket send buffer size */ + size_t rktrans_rcvbuf_size; /**< Socket receive buffer size */ + size_t rktrans_sndbuf_size; /**< Socket send buffer size */ }; diff --git a/src/rdkafka_txnmgr.c b/src/rdkafka_txnmgr.c index f6a0fb18bd..13b8479866 100644 --- a/src/rdkafka_txnmgr.c +++ b/src/rdkafka_txnmgr.c @@ -43,20 +43,18 @@ #include "rdrand.h" -static void -rd_kafka_txn_curr_api_reply_error (rd_kafka_q_t *rkq, rd_kafka_error_t *error); -static void rd_kafka_txn_coord_timer_start (rd_kafka_t *rk, int timeout_ms); +static void rd_kafka_txn_curr_api_reply_error(rd_kafka_q_t *rkq, + rd_kafka_error_t *error); +static void rd_kafka_txn_coord_timer_start(rd_kafka_t *rk, int timeout_ms); /** * @return a normalized error code, this for instance abstracts different * fencing errors to return one single fencing error to the application. */ -static rd_kafka_resp_err_t -rd_kafka_txn_normalize_err (rd_kafka_resp_err_t err) { +static rd_kafka_resp_err_t rd_kafka_txn_normalize_err(rd_kafka_resp_err_t err) { - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH: case RD_KAFKA_RESP_ERR_PRODUCER_FENCED: return RD_KAFKA_RESP_ERR__FENCED; @@ -74,18 +72,17 @@ rd_kafka_txn_normalize_err (rd_kafka_resp_err_t err) { * @locks none */ static RD_INLINE rd_kafka_error_t * -rd_kafka_ensure_transactional (const rd_kafka_t *rk) { +rd_kafka_ensure_transactional(const rd_kafka_t *rk) { if (unlikely(rk->rk_type != RD_KAFKA_PRODUCER)) return rd_kafka_error_new( - RD_KAFKA_RESP_ERR__INVALID_ARG, - "The Transactional API can only be used " - "on producer instances"); + RD_KAFKA_RESP_ERR__INVALID_ARG, + "The Transactional API can only be used " + "on producer instances"); if (unlikely(!rk->rk_conf.eos.transactional_id)) - return rd_kafka_error_new( - RD_KAFKA_RESP_ERR__NOT_CONFIGURED, - "The Transactional API requires " - "transactional.id to be configured"); + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__NOT_CONFIGURED, + "The Transactional API requires " + "transactional.id to be configured"); return NULL; } @@ -101,42 +98,39 @@ rd_kafka_ensure_transactional (const rd_kafka_t *rk) { * @locality any */ static RD_INLINE rd_kafka_error_t * -rd_kafka_txn_require_states0 (rd_kafka_t *rk, - rd_kafka_txn_state_t states[]) { +rd_kafka_txn_require_states0(rd_kafka_t *rk, rd_kafka_txn_state_t states[]) { rd_kafka_error_t *error; size_t i; if (unlikely((error = rd_kafka_ensure_transactional(rk)) != NULL)) return error; - for (i = 0 ; (int)states[i] != -1 ; i++) + for (i = 0; (int)states[i] != -1; i++) if (rk->rk_eos.txn_state == states[i]) return NULL; /* For fatal and abortable states return the last transactional * error, for all other states just return a state error. */ if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_FATAL_ERROR) - error = rd_kafka_error_new_fatal(rk->rk_eos.txn_err, - "%s", rk->rk_eos.txn_errstr); + error = rd_kafka_error_new_fatal(rk->rk_eos.txn_err, "%s", + rk->rk_eos.txn_errstr); else if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR) { - error = rd_kafka_error_new(rk->rk_eos.txn_err, - "%s", rk->rk_eos.txn_errstr); + error = rd_kafka_error_new(rk->rk_eos.txn_err, "%s", + rk->rk_eos.txn_errstr); rd_kafka_error_set_txn_requires_abort(error); } else error = rd_kafka_error_new( - RD_KAFKA_RESP_ERR__STATE, - "Operation not valid in state %s", - rd_kafka_txn_state2str(rk->rk_eos.txn_state)); + RD_KAFKA_RESP_ERR__STATE, "Operation not valid in state %s", + rd_kafka_txn_state2str(rk->rk_eos.txn_state)); return error; } /** @brief \p ... is a list of states */ -#define rd_kafka_txn_require_state(rk,...) \ - rd_kafka_txn_require_states0(rk, \ - (rd_kafka_txn_state_t[]){ \ - __VA_ARGS__, -1 }) +#define rd_kafka_txn_require_state(rk, ...) \ + rd_kafka_txn_require_states0( \ + rk, (rd_kafka_txn_state_t[]) {__VA_ARGS__, -1}) @@ -146,14 +140,13 @@ rd_kafka_txn_require_states0 (rd_kafka_t *rk, * @returns true if the state transition is valid, else false. */ static rd_bool_t -rd_kafka_txn_state_transition_is_valid (rd_kafka_txn_state_t curr, - rd_kafka_txn_state_t new_state, - rd_bool_t *ignore) { +rd_kafka_txn_state_transition_is_valid(rd_kafka_txn_state_t curr, + rd_kafka_txn_state_t new_state, + rd_bool_t *ignore) { *ignore = rd_false; - switch (new_state) - { + switch (new_state) { case RD_KAFKA_TXN_STATE_INIT: /* This is the initialized value and this transition will * never happen. */ @@ -167,8 +160,8 @@ rd_kafka_txn_state_transition_is_valid (rd_kafka_txn_state_t curr, case RD_KAFKA_TXN_STATE_READY: return curr == RD_KAFKA_TXN_STATE_READY_NOT_ACKED || - curr == RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED || - curr == RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED; + curr == RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED || + curr == RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED; case RD_KAFKA_TXN_STATE_IN_TRANSACTION: return curr == RD_KAFKA_TXN_STATE_READY; @@ -181,11 +174,11 @@ rd_kafka_txn_state_transition_is_valid (rd_kafka_txn_state_t curr, case RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED: return curr == RD_KAFKA_TXN_STATE_BEGIN_COMMIT || - curr == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION; + curr == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION; case RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION: return curr == RD_KAFKA_TXN_STATE_IN_TRANSACTION || - curr == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR; + curr == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR; case RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED: return curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION; @@ -200,8 +193,8 @@ rd_kafka_txn_state_transition_is_valid (rd_kafka_txn_state_t curr, } return curr == RD_KAFKA_TXN_STATE_IN_TRANSACTION || - curr == RD_KAFKA_TXN_STATE_BEGIN_COMMIT || - curr == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION; + curr == RD_KAFKA_TXN_STATE_BEGIN_COMMIT || + curr == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION; case RD_KAFKA_TXN_STATE_FATAL_ERROR: /* Any state can transition to a fatal error */ @@ -225,8 +218,8 @@ rd_kafka_txn_state_transition_is_valid (rd_kafka_txn_state_t curr, * @locality rdkafka main thread * @locks rd_kafka_wrlock MUST be held */ -static void rd_kafka_txn_set_state (rd_kafka_t *rk, - rd_kafka_txn_state_t new_state) { +static void rd_kafka_txn_set_state(rd_kafka_t *rk, + rd_kafka_txn_state_t new_state) { rd_bool_t ignore; if (rk->rk_eos.txn_state == new_state) @@ -249,8 +242,7 @@ static void rd_kafka_txn_set_state (rd_kafka_t *rk, return; } - rd_kafka_dbg(rk, EOS, "TXNSTATE", - "Transaction state change %s -> %s", + rd_kafka_dbg(rk, EOS, "TXNSTATE", "Transaction state change %s -> %s", rd_kafka_txn_state2str(rk->rk_eos.txn_state), rd_kafka_txn_state2str(new_state)); @@ -273,9 +265,11 @@ static void rd_kafka_txn_set_state (rd_kafka_t *rk, * @locality any * @locks rd_kafka_wrlock MUST NOT be held */ -void rd_kafka_txn_set_fatal_error (rd_kafka_t *rk, rd_dolock_t do_lock, - rd_kafka_resp_err_t err, - const char *fmt, ...) { +void rd_kafka_txn_set_fatal_error(rd_kafka_t *rk, + rd_dolock_t do_lock, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { char errstr[512]; va_list ap; @@ -284,8 +278,8 @@ void rd_kafka_txn_set_fatal_error (rd_kafka_t *rk, rd_dolock_t do_lock, va_end(ap); rd_kafka_log(rk, LOG_ALERT, "TXNERR", - "Fatal transaction error: %s (%s)", - errstr, rd_kafka_err2name(err)); + "Fatal transaction error: %s (%s)", errstr, + rd_kafka_err2name(err)); if (do_lock) rd_kafka_wrlock(rk); @@ -300,8 +294,8 @@ void rd_kafka_txn_set_fatal_error (rd_kafka_t *rk, rd_dolock_t do_lock, /* If application has called init_transactions() and * it has now failed, reply to the app. */ rd_kafka_txn_curr_api_reply_error( - rk->rk_eos.txn_init_rkq, - rd_kafka_error_new_fatal(err, "%s", errstr)); + rk->rk_eos.txn_init_rkq, + rd_kafka_error_new_fatal(err, "%s", errstr)); rk->rk_eos.txn_init_rkq = NULL; } @@ -321,10 +315,11 @@ void rd_kafka_txn_set_fatal_error (rd_kafka_t *rk, rd_dolock_t do_lock, * @locality rdkafka main thread * @locks rd_kafka_wrlock MUST NOT be held */ -void rd_kafka_txn_set_abortable_error0 (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_bool_t requires_epoch_bump, - const char *fmt, ...) { +void rd_kafka_txn_set_abortable_error0(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_bool_t requires_epoch_bump, + const char *fmt, + ...) { char errstr[512]; va_list ap; @@ -351,8 +346,7 @@ void rd_kafka_txn_set_abortable_error0 (rd_kafka_t *rk, "Ignoring sub-sequent abortable transaction " "error: %s (%s): " "previous error (%s) already raised", - errstr, - rd_kafka_err2name(err), + errstr, rd_kafka_err2name(err), rd_kafka_err2name(rk->rk_eos.txn_err)); rd_kafka_wrunlock(rk); return; @@ -365,19 +359,16 @@ void rd_kafka_txn_set_abortable_error0 (rd_kafka_t *rk, rd_kafka_log(rk, LOG_ERR, "TXNERR", "Current transaction failed in state %s: %s (%s%s)", - rd_kafka_txn_state2str(rk->rk_eos.txn_state), - errstr, rd_kafka_err2name(err), + rd_kafka_txn_state2str(rk->rk_eos.txn_state), errstr, + rd_kafka_err2name(err), requires_epoch_bump ? ", requires epoch bump" : ""); rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORTABLE_ERROR); rd_kafka_wrunlock(rk); /* Purge all messages in queue/flight */ - rd_kafka_purge(rk, - RD_KAFKA_PURGE_F_QUEUE | - RD_KAFKA_PURGE_F_ABORT_TXN | - RD_KAFKA_PURGE_F_NON_BLOCKING); - + rd_kafka_purge(rk, RD_KAFKA_PURGE_F_QUEUE | RD_KAFKA_PURGE_F_ABORT_TXN | + RD_KAFKA_PURGE_F_NON_BLOCKING); } @@ -393,8 +384,8 @@ void rd_kafka_txn_set_abortable_error0 (rd_kafka_t *rk, * @locality rdkafka main thread * @locks any */ -static void -rd_kafka_txn_curr_api_reply_error (rd_kafka_q_t *rkq, rd_kafka_error_t *error) { +static void rd_kafka_txn_curr_api_reply_error(rd_kafka_q_t *rkq, + rd_kafka_error_t *error) { rd_kafka_op_t *rko; if (!rkq) { @@ -403,11 +394,11 @@ rd_kafka_txn_curr_api_reply_error (rd_kafka_q_t *rkq, rd_kafka_error_t *error) { return; } - rko = rd_kafka_op_new(RD_KAFKA_OP_TXN|RD_KAFKA_OP_REPLY); + rko = rd_kafka_op_new(RD_KAFKA_OP_TXN | RD_KAFKA_OP_REPLY); if (error) { rko->rko_error = error; - rko->rko_err = rd_kafka_error_code(error); + rko->rko_err = rd_kafka_error_code(error); } rd_kafka_q_enq(rkq, rko); @@ -431,18 +422,17 @@ rd_kafka_txn_curr_api_reply_error (rd_kafka_q_t *rkq, rd_kafka_error_t *error) { * @locality rdkafka main thread * @locks any */ -static void -rd_kafka_txn_curr_api_reply (rd_kafka_q_t *rkq, - int actions, - rd_kafka_resp_err_t err, - const char *errstr_fmt, ...) - RD_FORMAT(printf, 4, 5); - -static void -rd_kafka_txn_curr_api_reply (rd_kafka_q_t *rkq, - int actions, - rd_kafka_resp_err_t err, - const char *errstr_fmt, ...) { +static void rd_kafka_txn_curr_api_reply(rd_kafka_q_t *rkq, + int actions, + rd_kafka_resp_err_t err, + const char *errstr_fmt, + ...) RD_FORMAT(printf, 4, 5); + +static void rd_kafka_txn_curr_api_reply(rd_kafka_q_t *rkq, + int actions, + rd_kafka_resp_err_t err, + const char *errstr_fmt, + ...) { rd_kafka_error_t *error = NULL; if (err) { @@ -471,8 +461,8 @@ rd_kafka_txn_curr_api_reply (rd_kafka_q_t *rkq, * @locality any thread * @locks rd_kafka_wrlock(rk) MUST be held */ -void rd_kafka_txn_idemp_state_change (rd_kafka_t *rk, - rd_kafka_idemp_state_t idemp_state) { +void rd_kafka_txn_idemp_state_change(rd_kafka_t *rk, + rd_kafka_idemp_state_t idemp_state) { rd_bool_t reply_assigned = rd_false; if (idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED && @@ -484,7 +474,7 @@ void rd_kafka_txn_idemp_state_change (rd_kafka_t *rk, } else if (idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED && rk->rk_eos.txn_state == - RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION) { + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION) { /* Application is calling abort_transaction() as we're * recovering from a fatal idempotence error. */ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); @@ -500,17 +490,15 @@ void rd_kafka_txn_idemp_state_change (rd_kafka_t *rk, * abort_transaction() and it has now failed, * reply to the app. */ rd_kafka_txn_curr_api_reply_error( - rk->rk_eos.txn_init_rkq, - rd_kafka_error_new_fatal( - rk->rk_eos.txn_err ? - rk->rk_eos.txn_err : - RD_KAFKA_RESP_ERR__FATAL, - "Fatal error raised by " - "idempotent producer while " - "retrieving PID: %s", - rk->rk_eos.txn_errstr ? - rk->rk_eos.txn_errstr : - "see previous logs")); + rk->rk_eos.txn_init_rkq, + rd_kafka_error_new_fatal( + rk->rk_eos.txn_err ? rk->rk_eos.txn_err + : RD_KAFKA_RESP_ERR__FATAL, + "Fatal error raised by " + "idempotent producer while " + "retrieving PID: %s", + rk->rk_eos.txn_errstr ? rk->rk_eos.txn_errstr + : "see previous logs")); rk->rk_eos.txn_init_rkq = NULL; } } @@ -520,12 +508,9 @@ void rd_kafka_txn_idemp_state_change (rd_kafka_t *rk, * abort_transaction() and it is now complete, * reply to the app. */ rd_kafka_txn_curr_api_reply(rk->rk_eos.txn_init_rkq, 0, - RD_KAFKA_RESP_ERR_NO_ERROR, - NULL); + RD_KAFKA_RESP_ERR_NO_ERROR, NULL); rk->rk_eos.txn_init_rkq = NULL; } - - } @@ -535,15 +520,15 @@ void rd_kafka_txn_idemp_state_change (rd_kafka_t *rk, * @locality rdkafka main thread * @locks none */ -static void rd_kafka_txn_partition_registered (rd_kafka_toppar_t *rktp) { +static void rd_kafka_txn_partition_registered(rd_kafka_toppar_t *rktp) { rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; rd_kafka_toppar_lock(rktp); if (unlikely(!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_PEND_TXN))) { - rd_kafka_dbg(rk, EOS|RD_KAFKA_DBG_PROTOCOL, - "ADDPARTS", - "\"%.*s\" [%"PRId32"] is not in pending " + rd_kafka_dbg(rk, EOS | RD_KAFKA_DBG_PROTOCOL, "ADDPARTS", + "\"%.*s\" [%" PRId32 + "] is not in pending " "list but returned in AddPartitionsToTxn " "response: ignoring", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), @@ -552,17 +537,17 @@ static void rd_kafka_txn_partition_registered (rd_kafka_toppar_t *rktp) { return; } - rd_kafka_dbg(rk, EOS|RD_KAFKA_DBG_TOPIC, "ADDPARTS", - "%.*s [%"PRId32"] registered with transaction", + rd_kafka_dbg(rk, EOS | RD_KAFKA_DBG_TOPIC, "ADDPARTS", + "%.*s [%" PRId32 "] registered with transaction", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition); - rd_assert((rktp->rktp_flags & (RD_KAFKA_TOPPAR_F_PEND_TXN| - RD_KAFKA_TOPPAR_F_IN_TXN)) == + rd_assert((rktp->rktp_flags & + (RD_KAFKA_TOPPAR_F_PEND_TXN | RD_KAFKA_TOPPAR_F_IN_TXN)) == RD_KAFKA_TOPPAR_F_PEND_TXN); rktp->rktp_flags = (rktp->rktp_flags & ~RD_KAFKA_TOPPAR_F_PEND_TXN) | - RD_KAFKA_TOPPAR_F_IN_TXN; + RD_KAFKA_TOPPAR_F_IN_TXN; rd_kafka_toppar_unlock(rktp); @@ -583,16 +568,16 @@ static void rd_kafka_txn_partition_registered (rd_kafka_toppar_t *rktp) { * @locality rdkafka main thread * @locks none */ -static void rd_kafka_txn_handle_AddPartitionsToTxn (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_txn_handle_AddPartitionsToTxn(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { const int log_decode_errors = LOG_ERR; int32_t TopicCnt; - int actions = 0; - int retry_backoff_ms = 500; /* retry backoff */ + int actions = 0; + int retry_backoff_ms = 500; /* retry backoff */ rd_kafka_resp_err_t reset_coord_err = RD_KAFKA_RESP_ERR_NO_ERROR; if (err) @@ -642,23 +627,21 @@ static void rd_kafka_txn_handle_AddPartitionsToTxn (rd_kafka_t *rk, rd_kafka_buf_read_i16(rkbuf, &ErrorCode); if (rkt) - rktp = rd_kafka_toppar_get(rkt, - Partition, + rktp = rd_kafka_toppar_get(rkt, Partition, rd_false); if (!rktp) { - rd_rkb_dbg(rkb, EOS|RD_KAFKA_DBG_PROTOCOL, + rd_rkb_dbg(rkb, EOS | RD_KAFKA_DBG_PROTOCOL, "ADDPARTS", "Unknown partition \"%.*s\" " - "[%"PRId32"] in AddPartitionsToTxn " + "[%" PRId32 + "] in AddPartitionsToTxn " "response: ignoring", - RD_KAFKAP_STR_PR(&Topic), - Partition); + RD_KAFKAP_STR_PR(&Topic), Partition); continue; } - switch (ErrorCode) - { + switch (ErrorCode) { case RD_KAFKA_RESP_ERR_NO_ERROR: /* Move rktp from pending to proper list */ rd_kafka_txn_partition_registered(rktp); @@ -672,7 +655,7 @@ static void rd_kafka_txn_handle_AddPartitionsToTxn (rd_kafka_t *rk, case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE: reset_coord_err = ErrorCode; p_actions |= RD_KAFKA_ERR_ACTION_RETRY; - err = ErrorCode; + err = ErrorCode; request_error = rd_true; break; @@ -682,7 +665,7 @@ static void rd_kafka_txn_handle_AddPartitionsToTxn (rd_kafka_t *rk, case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS: case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: p_actions |= RD_KAFKA_ERR_ACTION_RETRY; - err = ErrorCode; + err = ErrorCode; request_error = rd_true; break; @@ -692,14 +675,14 @@ static void rd_kafka_txn_handle_AddPartitionsToTxn (rd_kafka_t *rk, case RD_KAFKA_RESP_ERR_INVALID_TXN_STATE: case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED: p_actions |= RD_KAFKA_ERR_ACTION_FATAL; - err = ErrorCode; + err = ErrorCode; request_error = rd_true; break; case RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID: case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING: p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT; - err = ErrorCode; + err = ErrorCode; request_error = rd_true; break; @@ -732,25 +715,21 @@ static void rd_kafka_txn_handle_AddPartitionsToTxn (rd_kafka_t *rk, (RD_KAFKA_ERR_ACTION_FATAL | RD_KAFKA_ERR_ACTION_PERMANENT))) rd_rkb_dbg( - rkb, EOS, - "ADDPARTS", - "AddPartitionsToTxn response: " - "partition \"%.*s\": " - "[%"PRId32"]: %s", - RD_KAFKAP_STR_PR(&Topic), - Partition, - rd_kafka_err2str( - ErrorCode)); + rkb, EOS, "ADDPARTS", + "AddPartitionsToTxn response: " + "partition \"%.*s\": " + "[%" PRId32 "]: %s", + RD_KAFKAP_STR_PR(&Topic), Partition, + rd_kafka_err2str(ErrorCode)); else - rd_rkb_log(rkb, LOG_ERR, - "ADDPARTS", + rd_rkb_log(rkb, LOG_ERR, "ADDPARTS", "Failed to add partition " - "\"%.*s\" [%"PRId32"] to " + "\"%.*s\" [%" PRId32 + "] to " "transaction: %s", RD_KAFKAP_STR_PR(&Topic), Partition, - rd_kafka_err2str( - ErrorCode)); + rd_kafka_err2str(ErrorCode)); } rd_kafka_toppar_destroy(rktp); @@ -777,19 +756,18 @@ static void rd_kafka_txn_handle_AddPartitionsToTxn (rd_kafka_t *rk, goto done; - err_parse: +err_parse: err = rkbuf->rkbuf_err; actions |= RD_KAFKA_ERR_ACTION_PERMANENT; - done: +done: if (err) { rd_assert(rk->rk_eos.txn_req_cnt > 0); rk->rk_eos.txn_req_cnt--; } /* Handle local request-level errors */ - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR_NO_ERROR: break; @@ -827,9 +805,8 @@ static void rd_kafka_txn_handle_AddPartitionsToTxn (rd_kafka_t *rk, */ mtx_lock(&rk->rk_eos.txn_pending_lock); TAILQ_CONCAT_SORTED(&rk->rk_eos.txn_pending_rktps, - &rk->rk_eos.txn_waitresp_rktps, - rd_kafka_toppar_t *, rktp_txnlink, - rd_kafka_toppar_topic_cmp); + &rk->rk_eos.txn_waitresp_rktps, rd_kafka_toppar_t *, + rktp_txnlink, rd_kafka_toppar_topic_cmp); mtx_unlock(&rk->rk_eos.txn_pending_lock); err = rd_kafka_txn_normalize_err(err); @@ -843,20 +820,18 @@ static void rd_kafka_txn_handle_AddPartitionsToTxn (rd_kafka_t *rk, } else if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) { /* Treat all other permanent errors as abortable errors */ rd_kafka_txn_set_abortable_error( - rk, err, - "Failed to add partition(s) to transaction " - "on broker %s: %s (after %d ms)", - rd_kafka_broker_name(rkb), - rd_kafka_err2str(err), - (int)(request->rkbuf_ts_sent/1000)); + rk, err, + "Failed to add partition(s) to transaction " + "on broker %s: %s (after %d ms)", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent / 1000)); } else { /* Schedule registration of any new or remaining partitions */ rd_kafka_txn_schedule_register_partitions( - rk, - (actions & RD_KAFKA_ERR_ACTION_RETRY) ? - retry_backoff_ms : 1/*immediate*/); - + rk, (actions & RD_KAFKA_ERR_ACTION_RETRY) + ? retry_backoff_ms + : 1 /*immediate*/); } } @@ -867,7 +842,7 @@ static void rd_kafka_txn_handle_AddPartitionsToTxn (rd_kafka_t *rk, * @locality rdkafka main thread * @locks none */ -static void rd_kafka_txn_register_partitions (rd_kafka_t *rk) { +static void rd_kafka_txn_register_partitions(rd_kafka_t *rk) { char errstr[512]; rd_kafka_resp_err_t err; rd_kafka_error_t *error; @@ -875,9 +850,9 @@ static void rd_kafka_txn_register_partitions (rd_kafka_t *rk) { /* Require operational state */ rd_kafka_rdlock(rk); - error = rd_kafka_txn_require_state(rk, - RD_KAFKA_TXN_STATE_IN_TRANSACTION, - RD_KAFKA_TXN_STATE_BEGIN_COMMIT); + error = + rd_kafka_txn_require_state(rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION, + RD_KAFKA_TXN_STATE_BEGIN_COMMIT); if (unlikely(error != NULL)) { rd_kafka_rdunlock(rk); @@ -889,7 +864,7 @@ static void rd_kafka_txn_register_partitions (rd_kafka_t *rk) { } /* Get pid, checked later */ - pid = rd_kafka_idemp_get_pid0(rk, rd_false/*dont-lock*/); + pid = rd_kafka_idemp_get_pid0(rk, rd_false /*dont-lock*/); rd_kafka_rdunlock(rk); @@ -931,13 +906,10 @@ static void rd_kafka_txn_register_partitions (rd_kafka_t *rk) { /* Send request to coordinator */ err = rd_kafka_AddPartitionsToTxnRequest( - rk->rk_eos.txn_coord, - rk->rk_conf.eos.transactional_id, - pid, - &rk->rk_eos.txn_pending_rktps, - errstr, sizeof(errstr), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_txn_handle_AddPartitionsToTxn, NULL); + rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid, + &rk->rk_eos.txn_pending_rktps, errstr, sizeof(errstr), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_txn_handle_AddPartitionsToTxn, NULL); if (err) { mtx_unlock(&rk->rk_eos.txn_pending_lock); rd_kafka_dbg(rk, EOS, "ADDPARTS", @@ -948,8 +920,7 @@ static void rd_kafka_txn_register_partitions (rd_kafka_t *rk) { /* Move all pending partitions to wait-response list. * No need to keep waitresp sorted. */ TAILQ_CONCAT(&rk->rk_eos.txn_waitresp_rktps, - &rk->rk_eos.txn_pending_rktps, - rktp_txnlink); + &rk->rk_eos.txn_pending_rktps, rktp_txnlink); mtx_unlock(&rk->rk_eos.txn_pending_lock); @@ -960,8 +931,8 @@ static void rd_kafka_txn_register_partitions (rd_kafka_t *rk) { } -static void rd_kafka_txn_register_partitions_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { +static void rd_kafka_txn_register_partitions_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_t *rk = arg; rd_kafka_txn_register_partitions(rk); } @@ -973,14 +944,12 @@ static void rd_kafka_txn_register_partitions_tmr_cb (rd_kafka_timers_t *rkts, * @locality any * @locks any */ -void rd_kafka_txn_schedule_register_partitions (rd_kafka_t *rk, - int backoff_ms) { +void rd_kafka_txn_schedule_register_partitions(rd_kafka_t *rk, int backoff_ms) { rd_kafka_timer_start_oneshot( - &rk->rk_timers, - &rk->rk_eos.txn_register_parts_tmr, rd_false/*dont-restart*/, - backoff_ms ? backoff_ms * 1000 : 1 /* immediate */, - rd_kafka_txn_register_partitions_tmr_cb, - rk); + &rk->rk_timers, &rk->rk_eos.txn_register_parts_tmr, + rd_false /*dont-restart*/, + backoff_ms ? backoff_ms * 1000 : 1 /* immediate */, + rd_kafka_txn_register_partitions_tmr_cb, rk); } @@ -989,8 +958,8 @@ void rd_kafka_txn_schedule_register_partitions (rd_kafka_t *rk, * @brief Clears \p flag from all rktps and destroys them, emptying * and reinitializing the \p tqh. */ -static void rd_kafka_txn_clear_partitions_flag (rd_kafka_toppar_tqhead_t *tqh, - int flag) { +static void rd_kafka_txn_clear_partitions_flag(rd_kafka_toppar_tqhead_t *tqh, + int flag) { rd_kafka_toppar_t *rktp, *tmp; TAILQ_FOREACH_SAFE(rktp, tqh, rktp_txnlink, tmp) { @@ -1010,7 +979,7 @@ static void rd_kafka_txn_clear_partitions_flag (rd_kafka_toppar_tqhead_t *tqh, * * @locks txn_pending_lock MUST be held */ -static void rd_kafka_txn_clear_pending_partitions (rd_kafka_t *rk) { +static void rd_kafka_txn_clear_pending_partitions(rd_kafka_t *rk) { rd_kafka_txn_clear_partitions_flag(&rk->rk_eos.txn_pending_rktps, RD_KAFKA_TOPPAR_F_PEND_TXN); rd_kafka_txn_clear_partitions_flag(&rk->rk_eos.txn_waitresp_rktps, @@ -1022,36 +991,33 @@ static void rd_kafka_txn_clear_pending_partitions (rd_kafka_t *rk) { * * @locks rd_kafka_wrlock(rk) MUST be held */ -static void rd_kafka_txn_clear_partitions (rd_kafka_t *rk) { +static void rd_kafka_txn_clear_partitions(rd_kafka_t *rk) { rd_kafka_txn_clear_partitions_flag(&rk->rk_eos.txn_rktps, RD_KAFKA_TOPPAR_F_IN_TXN); } - /** * @brief Op timeout callback which fails the current transaction. * * @locality rdkafka main thread * @locks none */ -static void -rd_kafka_txn_curr_api_abort_timeout_cb (rd_kafka_timers_t *rkts, void *arg) { +static void rd_kafka_txn_curr_api_abort_timeout_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_q_t *rkq = arg; rd_kafka_txn_set_abortable_error( - rkts->rkts_rk, - RD_KAFKA_RESP_ERR__TIMED_OUT, - "Transactional API operation (%s) timed out", - rkq->rkq_rk->rk_eos.txn_curr_api.name); + rkts->rkts_rk, RD_KAFKA_RESP_ERR__TIMED_OUT, + "Transactional API operation (%s) timed out", + rkq->rkq_rk->rk_eos.txn_curr_api.name); rd_kafka_txn_curr_api_reply_error( - rkq, - rd_kafka_error_new_txn_requires_abort( - RD_KAFKA_RESP_ERR__TIMED_OUT, - "Transactional API operation (%s) timed out", - rkq->rkq_rk->rk_eos.txn_curr_api.name)); + rkq, rd_kafka_error_new_txn_requires_abort( + RD_KAFKA_RESP_ERR__TIMED_OUT, + "Transactional API operation (%s) timed out", + rkq->rkq_rk->rk_eos.txn_curr_api.name)); } /** @@ -1061,15 +1027,14 @@ rd_kafka_txn_curr_api_abort_timeout_cb (rd_kafka_timers_t *rkts, void *arg) { * @locality rdkafka main thread * @locks none */ -static void -rd_kafka_txn_curr_api_retriable_timeout_cb (rd_kafka_timers_t *rkts, void *arg) { +static void rd_kafka_txn_curr_api_retriable_timeout_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_q_t *rkq = arg; rd_kafka_txn_curr_api_reply_error( - rkq, - rd_kafka_error_new_retriable( - RD_KAFKA_RESP_ERR__TIMED_OUT, - "Transactional operation timed out")); + rkq, + rd_kafka_error_new_retriable(RD_KAFKA_RESP_ERR__TIMED_OUT, + "Transactional operation timed out")); } @@ -1079,8 +1044,8 @@ rd_kafka_txn_curr_api_retriable_timeout_cb (rd_kafka_timers_t *rkts, void *arg) * @locality rdkafka main thread * @locks none */ -static void -rd_kafka_txn_curr_api_timeout_cb (rd_kafka_timers_t *rkts, void *arg) { +static void rd_kafka_txn_curr_api_timeout_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_q_t *rkq = arg; rd_kafka_txn_curr_api_reply(rkq, 0, RD_KAFKA_RESP_ERR__TIMED_OUT, @@ -1094,8 +1059,8 @@ rd_kafka_txn_curr_api_timeout_cb (rd_kafka_timers_t *rkts, void *arg) { * @locality rdkafka main thread * @locks none */ -static void -rd_kafka_txn_curr_api_init_timeout_cb (rd_kafka_timers_t *rkts, void *arg) { +static void rd_kafka_txn_curr_api_init_timeout_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_q_t *rkq = arg; rd_kafka_error_t *error; rd_kafka_resp_err_t err = rkts->rkts_rk->rk_eos.txn_init_err; @@ -1103,8 +1068,7 @@ rd_kafka_txn_curr_api_init_timeout_cb (rd_kafka_timers_t *rkts, void *arg) { if (!err) err = RD_KAFKA_RESP_ERR__TIMED_OUT; - error = rd_kafka_error_new(err, - "Failed to initialize Producer ID: %s", + error = rd_kafka_error_new(err, "Failed to initialize Producer ID: %s", rd_kafka_err2str(err)); /* init_transactions() timeouts are retriable */ @@ -1128,16 +1092,14 @@ rd_kafka_txn_curr_api_init_timeout_cb (rd_kafka_timers_t *rkts, void *arg) { * @locality rdkafka main thread * @locks rd_kafka_wrlock(rk) MUST be held */ -static void rd_kafka_txn_curr_api_reset (rd_kafka_t *rk, rd_bool_t for_reuse) { +static void rd_kafka_txn_curr_api_reset(rd_kafka_t *rk, rd_bool_t for_reuse) { rd_bool_t timer_was_stopped; rd_kafka_q_t *rkq; /* Always stop timer and loose refcnt to reply queue. */ - rkq = rk->rk_eos.txn_curr_api.tmr.rtmr_arg; + rkq = rk->rk_eos.txn_curr_api.tmr.rtmr_arg; timer_was_stopped = rd_kafka_timer_stop( - &rk->rk_timers, - &rk->rk_eos.txn_curr_api.tmr, - RD_DO_LOCK); + &rk->rk_timers, &rk->rk_eos.txn_curr_api.tmr, RD_DO_LOCK); if (rkq && timer_was_stopped) { /* Remove the stopped timer's reply queue reference @@ -1179,14 +1141,15 @@ static void rd_kafka_txn_curr_api_reset (rd_kafka_t *rk, rd_bool_t for_reuse) { * @locality application thread * @locks none */ -static rd_kafka_error_t * -rd_kafka_txn_curr_api_req (rd_kafka_t *rk, const char *name, - rd_kafka_op_t *rko, - int timeout_ms, int flags) { +static rd_kafka_error_t *rd_kafka_txn_curr_api_req(rd_kafka_t *rk, + const char *name, + rd_kafka_op_t *rko, + int timeout_ms, + int flags) { rd_kafka_op_t *reply; rd_bool_t reuse = rd_false; rd_bool_t for_reuse; - rd_kafka_q_t *tmpq = NULL; + rd_kafka_q_t *tmpq = NULL; rd_kafka_error_t *error = NULL; /* Strip __FUNCTION__ name's rd_kafka_ prefix since it will @@ -1202,9 +1165,10 @@ rd_kafka_txn_curr_api_req (rd_kafka_t *rk, const char *name, rd_kafka_wrlock(rk); - rd_kafka_dbg(rk, EOS, "TXNAPI", "Transactional API called: %s " - "(in txn state %s, idemp state %s)", name, - rd_kafka_txn_state2str(rk->rk_eos.txn_state), + rd_kafka_dbg(rk, EOS, "TXNAPI", + "Transactional API called: %s " + "(in txn state %s, idemp state %s)", + name, rd_kafka_txn_state2str(rk->rk_eos.txn_state), rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); /* First set for_reuse to the current flags to match with @@ -1215,9 +1179,9 @@ rd_kafka_txn_curr_api_req (rd_kafka_t *rk, const char *name, if ((for_reuse && !reuse) || (!for_reuse && *rk->rk_eos.txn_curr_api.name)) { error = rd_kafka_error_new( - RD_KAFKA_RESP_ERR__STATE, - "Conflicting %s call already in progress", - rk->rk_eos.txn_curr_api.name); + RD_KAFKA_RESP_ERR__STATE, + "Conflicting %s call already in progress", + rk->rk_eos.txn_curr_api.name); rd_kafka_wrunlock(rk); rd_kafka_op_destroy(rko); return error; @@ -1226,8 +1190,7 @@ rd_kafka_txn_curr_api_req (rd_kafka_t *rk, const char *name, rd_assert(for_reuse == reuse); rd_snprintf(rk->rk_eos.txn_curr_api.name, - sizeof(rk->rk_eos.txn_curr_api.name), - "%s", name); + sizeof(rk->rk_eos.txn_curr_api.name), "%s", name); tmpq = rd_kafka_q_new(rk); @@ -1245,18 +1208,16 @@ rd_kafka_txn_curr_api_req (rd_kafka_t *rk, const char *name, if (timeout_ms >= 0) { rd_kafka_q_keep(tmpq); rd_kafka_timer_start_oneshot( - &rk->rk_timers, - &rk->rk_eos.txn_curr_api.tmr, - rd_true, - timeout_ms * 1000, - !strcmp(name, "init_transactions") ? - rd_kafka_txn_curr_api_init_timeout_cb : - (flags & RD_KAFKA_TXN_CURR_API_F_ABORT_ON_TIMEOUT ? - rd_kafka_txn_curr_api_abort_timeout_cb : - (flags & RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT ? - rd_kafka_txn_curr_api_retriable_timeout_cb : - rd_kafka_txn_curr_api_timeout_cb)), - tmpq); + &rk->rk_timers, &rk->rk_eos.txn_curr_api.tmr, rd_true, + timeout_ms * 1000, + !strcmp(name, "init_transactions") + ? rd_kafka_txn_curr_api_init_timeout_cb + : (flags & RD_KAFKA_TXN_CURR_API_F_ABORT_ON_TIMEOUT + ? rd_kafka_txn_curr_api_abort_timeout_cb + : (flags & RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT + ? rd_kafka_txn_curr_api_retriable_timeout_cb + : rd_kafka_txn_curr_api_timeout_cb)), + tmpq); } rd_kafka_wrunlock(rk); @@ -1267,7 +1228,7 @@ rd_kafka_txn_curr_api_req (rd_kafka_t *rk, const char *name, if ((error = reply->rko_error)) { reply->rko_error = NULL; - for_reuse = rd_false; + for_reuse = rd_false; } rd_kafka_op_destroy(reply); @@ -1284,10 +1245,9 @@ rd_kafka_txn_curr_api_req (rd_kafka_t *rk, const char *name, * @locks none * @locality rdkafka main thread */ -static rd_kafka_op_res_t -rd_kafka_txn_op_init_transactions (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +static rd_kafka_op_res_t rd_kafka_txn_op_init_transactions(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_error_t *error; if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) @@ -1295,10 +1255,8 @@ rd_kafka_txn_op_init_transactions (rd_kafka_t *rk, rd_kafka_wrlock(rk); if ((error = rd_kafka_txn_require_state( - rk, - RD_KAFKA_TXN_STATE_INIT, - RD_KAFKA_TXN_STATE_WAIT_PID, - RD_KAFKA_TXN_STATE_READY_NOT_ACKED))) { + rk, RD_KAFKA_TXN_STATE_INIT, RD_KAFKA_TXN_STATE_WAIT_PID, + RD_KAFKA_TXN_STATE_READY_NOT_ACKED))) { rd_kafka_wrunlock(rk); goto done; } @@ -1330,11 +1288,11 @@ rd_kafka_txn_op_init_transactions (rd_kafka_t *rk, rk->rk_eos.txn_init_err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Start idempotent producer to acquire PID */ - rd_kafka_idemp_start(rk, rd_true/*immediately*/); + rd_kafka_idemp_start(rk, rd_true /*immediately*/); return RD_KAFKA_OP_RES_HANDLED; - done: +done: rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), error); @@ -1350,9 +1308,9 @@ rd_kafka_txn_op_init_transactions (rd_kafka_t *rk, * @locality rdkafka main thread */ static rd_kafka_op_res_t -rd_kafka_txn_op_ack_init_transactions (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +rd_kafka_txn_op_ack_init_transactions(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_error_t *error; if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) @@ -1360,8 +1318,7 @@ rd_kafka_txn_op_ack_init_transactions (rd_kafka_t *rk, rd_kafka_wrlock(rk); if ((error = rd_kafka_txn_require_state( - rk, - RD_KAFKA_TXN_STATE_READY_NOT_ACKED))) { + rk, RD_KAFKA_TXN_STATE_READY_NOT_ACKED))) { rd_kafka_wrunlock(rk); goto done; } @@ -1371,7 +1328,7 @@ rd_kafka_txn_op_ack_init_transactions (rd_kafka_t *rk, rd_kafka_wrunlock(rk); /* FALLTHRU */ - done: +done: rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), error); @@ -1380,8 +1337,7 @@ rd_kafka_txn_op_ack_init_transactions (rd_kafka_t *rk, -rd_kafka_error_t * -rd_kafka_init_transactions (rd_kafka_t *rk, int timeout_ms) { +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms) { rd_kafka_error_t *error; if ((error = rd_kafka_ensure_transactional(rk))) @@ -1404,11 +1360,11 @@ rd_kafka_init_transactions (rd_kafka_t *rk, int timeout_ms) { /* First call is to trigger initialization */ error = rd_kafka_txn_curr_api_req( - rk, __FUNCTION__, - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_init_transactions), - timeout_ms, - RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT| + rk, __FUNCTION__, + rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, + rd_kafka_txn_op_init_transactions), + timeout_ms, + RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT | RD_KAFKA_TXN_CURR_API_F_FOR_REUSE); if (error) return error; @@ -1417,11 +1373,11 @@ rd_kafka_init_transactions (rd_kafka_t *rk, int timeout_ms) { /* Second call is to transition from READY_NOT_ACKED -> READY, * if necessary. */ return rd_kafka_txn_curr_api_req( - rk, __FUNCTION__, - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_ack_init_transactions), - RD_POLL_INFINITE, /* immediate, no timeout needed */ - RD_KAFKA_TXN_CURR_API_F_REUSE); + rk, __FUNCTION__, + rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, + rd_kafka_txn_op_ack_init_transactions), + RD_POLL_INFINITE, /* immediate, no timeout needed */ + RD_KAFKA_TXN_CURR_API_F_REUSE); } @@ -1432,10 +1388,9 @@ rd_kafka_init_transactions (rd_kafka_t *rk, int timeout_ms) { * @locks none * @locality rdkafka main thread */ -static rd_kafka_op_res_t -rd_kafka_txn_op_begin_transaction (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +static rd_kafka_op_res_t rd_kafka_txn_op_begin_transaction(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_error_t *error; rd_bool_t wakeup_brokers = rd_false; @@ -1443,8 +1398,8 @@ rd_kafka_txn_op_begin_transaction (rd_kafka_t *rk, return RD_KAFKA_OP_RES_HANDLED; rd_kafka_wrlock(rk); - if (!(error = rd_kafka_txn_require_state(rk, - RD_KAFKA_TXN_STATE_READY))) { + if (!(error = + rd_kafka_txn_require_state(rk, RD_KAFKA_TXN_STATE_READY))) { rd_assert(TAILQ_EMPTY(&rk->rk_eos.txn_rktps)); rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION); @@ -1459,7 +1414,6 @@ rd_kafka_txn_op_begin_transaction (rd_kafka_t *rk, * that were waiting for this transaction state. * But needs to be done below with no lock held. */ wakeup_brokers = rd_true; - } rd_kafka_wrunlock(rk); @@ -1473,7 +1427,7 @@ rd_kafka_txn_op_begin_transaction (rd_kafka_t *rk, } -rd_kafka_error_t *rd_kafka_begin_transaction (rd_kafka_t *rk) { +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk) { rd_kafka_op_t *reply; rd_kafka_error_t *error; @@ -1481,10 +1435,10 @@ rd_kafka_error_t *rd_kafka_begin_transaction (rd_kafka_t *rk) { return error; reply = rd_kafka_op_req( - rk->rk_ops, - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_begin_transaction), - RD_POLL_INFINITE); + rk->rk_ops, + rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, + rd_kafka_txn_op_begin_transaction), + RD_POLL_INFINITE); if ((error = reply->rko_error)) reply->rko_error = NULL; @@ -1496,11 +1450,11 @@ rd_kafka_error_t *rd_kafka_begin_transaction (rd_kafka_t *rk) { static rd_kafka_resp_err_t -rd_kafka_txn_send_TxnOffsetCommitRequest (rd_kafka_broker_t *rkb, - rd_kafka_op_t *rko, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *reply_opaque); +rd_kafka_txn_send_TxnOffsetCommitRequest(rd_kafka_broker_t *rkb, + rd_kafka_op_t *rko, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *reply_opaque); /** * @brief Handle TxnOffsetCommitResponse @@ -1508,15 +1462,15 @@ rd_kafka_txn_send_TxnOffsetCommitRequest (rd_kafka_broker_t *rkb, * @locality rdkafka main thread * @locks none */ -static void rd_kafka_txn_handle_TxnOffsetCommit (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - const int log_decode_errors = LOG_ERR; - rd_kafka_op_t *rko = opaque; - int actions = 0; +static void rd_kafka_txn_handle_TxnOffsetCommit(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + const int log_decode_errors = LOG_ERR; + rd_kafka_op_t *rko = opaque; + int actions = 0; rd_kafka_topic_partition_list_t *partitions = NULL; char errstr[512]; @@ -1531,31 +1485,31 @@ static void rd_kafka_txn_handle_TxnOffsetCommit (rd_kafka_t *rk, rd_kafka_buf_read_throttle_time(rkbuf); - partitions = rd_kafka_buf_read_topic_partitions(rkbuf, 0, - rd_false, rd_true); + partitions = + rd_kafka_buf_read_topic_partitions(rkbuf, 0, rd_false, rd_true); if (!partitions) goto err_parse; err = rd_kafka_topic_partition_list_get_err(partitions); if (err) { char errparts[256]; - rd_kafka_topic_partition_list_str(partitions, - errparts, sizeof(errparts), + rd_kafka_topic_partition_list_str(partitions, errparts, + sizeof(errparts), RD_KAFKA_FMT_F_ONLY_ERR); rd_snprintf(errstr, sizeof(errstr), "Failed to commit offsets to transaction on " "broker %s: %s " "(after %dms)", - rd_kafka_broker_name(rkb), - errparts, (int)(request->rkbuf_ts_sent/1000)); + rd_kafka_broker_name(rkb), errparts, + (int)(request->rkbuf_ts_sent / 1000)); } goto done; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - done: +done: if (err) { if (!*errstr) { rd_snprintf(errstr, sizeof(errstr), @@ -1564,7 +1518,7 @@ static void rd_kafka_txn_handle_TxnOffsetCommit (rd_kafka_t *rk, "(after %d ms)", rkb ? rd_kafka_broker_name(rkb) : "(none)", rd_kafka_err2str(err), - (int)(request->rkbuf_ts_sent/1000)); + (int)(request->rkbuf_ts_sent / 1000)); } } @@ -1572,8 +1526,7 @@ static void rd_kafka_txn_handle_TxnOffsetCommit (rd_kafka_t *rk, if (partitions) rd_kafka_topic_partition_list_destroy(partitions); - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR_NO_ERROR: break; @@ -1632,25 +1585,20 @@ static void rd_kafka_txn_handle_TxnOffsetCommit (rd_kafka_t *rk, err = rd_kafka_txn_normalize_err(err); if (actions & RD_KAFKA_ERR_ACTION_FATAL) { - rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err, - "%s", errstr); + rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err, "%s", errstr); } else if (actions & RD_KAFKA_ERR_ACTION_RETRY) { int remains_ms = rd_timeout_remains(rko->rko_u.txn.abs_timeout); if (!rd_timeout_expired(remains_ms)) { rd_kafka_coord_req( - rk, - RD_KAFKA_COORD_GROUP, - rko->rko_u.txn.cgmetadata->group_id, - rd_kafka_txn_send_TxnOffsetCommitRequest, - rko, - rd_timeout_remains_limit0( - remains_ms, - rk->rk_conf.socket_timeout_ms), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_txn_handle_TxnOffsetCommit, - rko); + rk, RD_KAFKA_COORD_GROUP, + rko->rko_u.txn.cgmetadata->group_id, + rd_kafka_txn_send_TxnOffsetCommitRequest, rko, + rd_timeout_remains_limit0( + remains_ms, rk->rk_conf.socket_timeout_ms), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_txn_handle_TxnOffsetCommit, rko); return; } else if (!err) err = RD_KAFKA_RESP_ERR__TIMED_OUT; @@ -1680,17 +1628,17 @@ static void rd_kafka_txn_handle_TxnOffsetCommit (rd_kafka_t *rk, * @locks none */ static rd_kafka_resp_err_t -rd_kafka_txn_send_TxnOffsetCommitRequest (rd_kafka_broker_t *rkb, - rd_kafka_op_t *rko, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *reply_opaque) { +rd_kafka_txn_send_TxnOffsetCommitRequest(rd_kafka_broker_t *rkb, + rd_kafka_op_t *rko, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *reply_opaque) { rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_buf_t *rkbuf; int16_t ApiVersion; rd_kafka_pid_t pid; const rd_kafka_consumer_group_metadata_t *cgmetadata = - rko->rko_u.txn.cgmetadata; + rko->rko_u.txn.cgmetadata; int cnt; rd_kafka_rdlock(rk); @@ -1712,7 +1660,7 @@ rd_kafka_txn_send_TxnOffsetCommitRequest (rd_kafka_broker_t *rkb, } ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_TxnOffsetCommit, 0, 3, NULL); + rkb, RD_KAFKAP_TxnOffsetCommit, 0, 3, NULL); if (ApiVersion == -1) { /* Do not free the rko, it is passed as the reply_opaque * on the reply queue by coord_req_fsm() when we return @@ -1721,10 +1669,8 @@ rd_kafka_txn_send_TxnOffsetCommitRequest (rd_kafka_broker_t *rkb, } rkbuf = rd_kafka_buf_new_flexver_request( - rkb, - RD_KAFKAP_TxnOffsetCommit, 1, - rko->rko_u.txn.offsets->cnt * 50, - ApiVersion >= 3); + rkb, RD_KAFKAP_TxnOffsetCommit, 1, rko->rko_u.txn.offsets->cnt * 50, + ApiVersion >= 3); /* transactional_id */ rd_kafka_buf_write_str(rkbuf, rk->rk_conf.eos.transactional_id, -1); @@ -1748,13 +1694,9 @@ rd_kafka_txn_send_TxnOffsetCommitRequest (rd_kafka_broker_t *rkb, /* Write per-partition offsets list */ cnt = rd_kafka_buf_write_topic_partitions( - rkbuf, - rko->rko_u.txn.offsets, - rd_true /*skip invalid offsets*/, - rd_false /*any offset*/, - rd_true /*write offsets*/, - ApiVersion >= 2 /*write Epoch (-1) */, - rd_true /*write Metadata*/); + rkbuf, rko->rko_u.txn.offsets, rd_true /*skip invalid offsets*/, + rd_false /*any offset*/, rd_true /*write offsets*/, + ApiVersion >= 2 /*write Epoch (-1) */, rd_true /*write Metadata*/); if (!cnt) { /* No valid partition offsets, don't commit. */ @@ -1769,8 +1711,8 @@ rd_kafka_txn_send_TxnOffsetCommitRequest (rd_kafka_broker_t *rkb, rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES; - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, - replyq, resp_cb, reply_opaque); + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, + reply_opaque); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -1782,14 +1724,14 @@ rd_kafka_txn_send_TxnOffsetCommitRequest (rd_kafka_broker_t *rkb, * @locality rdkafka main thread * @locks none */ -static void rd_kafka_txn_handle_AddOffsetsToTxn (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_txn_handle_AddOffsetsToTxn(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { const int log_decode_errors = LOG_ERR; - rd_kafka_op_t *rko = opaque; + rd_kafka_op_t *rko = opaque; int16_t ErrorCode; int actions = 0; int remains_ms; @@ -1811,10 +1753,10 @@ static void rd_kafka_txn_handle_AddOffsetsToTxn (rd_kafka_t *rk, err = ErrorCode; goto done; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - done: +done: if (err) { rd_assert(rk->rk_eos.txn_req_cnt > 0); rk->rk_eos.txn_req_cnt--; @@ -1825,8 +1767,7 @@ static void rd_kafka_txn_handle_AddOffsetsToTxn (rd_kafka_t *rk, if (rd_timeout_expired(remains_ms) && !err) err = RD_KAFKA_RESP_ERR__TIMED_OUT; - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR_NO_ERROR: break; @@ -1851,8 +1792,8 @@ static void rd_kafka_txn_handle_AddOffsetsToTxn (rd_kafka_t *rk, case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE: case RD_KAFKA_RESP_ERR_NOT_COORDINATOR: case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT: - actions |= RD_KAFKA_ERR_ACTION_RETRY| - RD_KAFKA_ERR_ACTION_REFRESH; + actions |= + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH; break; case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED: @@ -1885,8 +1826,7 @@ static void rd_kafka_txn_handle_AddOffsetsToTxn (rd_kafka_t *rk, rd_kafka_dbg(rk, EOS, "ADDOFFSETS", "AddOffsetsToTxn response from %s: %s (%s)", rkb ? rd_kafka_broker_name(rkb) : "(none)", - rd_kafka_err2name(err), - rd_kafka_actions2str(actions)); + rd_kafka_err2name(err), rd_kafka_actions2str(actions)); /* All unhandled errors are considered permanent */ if (err && !actions) @@ -1908,7 +1848,7 @@ static void rd_kafka_txn_handle_AddOffsetsToTxn (rd_kafka_t *rk, "error is retriable", rd_kafka_broker_name(rkb), rd_kafka_err2str(err), - (int)(request->rkbuf_ts_sent/1000)); + (int)(request->rkbuf_ts_sent / 1000)); if (!rd_timeout_expired(remains_ms) && rd_kafka_buf_retry(rk->rk_eos.txn_coord, request)) { @@ -1930,39 +1870,34 @@ static void rd_kafka_txn_handle_AddOffsetsToTxn (rd_kafka_t *rk, if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) rd_kafka_txn_set_abortable_error( - rk, err, - "Failed to add offsets to " - "transaction on broker %s: " - "%s (after %dms)", - rd_kafka_broker_name(rkb), - rd_kafka_err2str(err), - (int)(request->rkbuf_ts_sent/1000)); + rk, err, + "Failed to add offsets to " + "transaction on broker %s: " + "%s (after %dms)", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent / 1000)); if (!err) { /* Step 2: Commit offsets to transaction on the * group coordinator. */ - rd_kafka_coord_req(rk, - RD_KAFKA_COORD_GROUP, - rko->rko_u.txn.cgmetadata->group_id, - rd_kafka_txn_send_TxnOffsetCommitRequest, - rko, - rd_timeout_remains_limit0( - remains_ms, - rk->rk_conf.socket_timeout_ms), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_txn_handle_TxnOffsetCommit, - rko); + rd_kafka_coord_req( + rk, RD_KAFKA_COORD_GROUP, + rko->rko_u.txn.cgmetadata->group_id, + rd_kafka_txn_send_TxnOffsetCommitRequest, rko, + rd_timeout_remains_limit0(remains_ms, + rk->rk_conf.socket_timeout_ms), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_txn_handle_TxnOffsetCommit, rko); } else { rd_kafka_txn_curr_api_reply( - rd_kafka_q_keep(rko->rko_replyq.q), actions, err, - "Failed to add offsets to transaction on broker %s: " - "%s (after %dms)", - rd_kafka_broker_name(rkb), - rd_kafka_err2str(err), - (int)(request->rkbuf_ts_sent/1000)); + rd_kafka_q_keep(rko->rko_replyq.q), actions, err, + "Failed to add offsets to transaction on broker %s: " + "%s (after %dms)", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent / 1000)); rd_kafka_op_destroy(rko); } @@ -1976,9 +1911,9 @@ static void rd_kafka_txn_handle_AddOffsetsToTxn (rd_kafka_t *rk, * @locality rdkafka main thread */ static rd_kafka_op_res_t -rd_kafka_txn_op_send_offsets_to_transaction (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +rd_kafka_txn_op_send_offsets_to_transaction(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; char errstr[512]; rd_kafka_error_t *error; @@ -1992,20 +1927,20 @@ rd_kafka_txn_op_send_offsets_to_transaction (rd_kafka_t *rk, rd_kafka_wrlock(rk); if ((error = rd_kafka_txn_require_state( - rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION))) { + rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION))) { rd_kafka_wrunlock(rk); goto err; } rd_kafka_wrunlock(rk); - pid = rd_kafka_idemp_get_pid0(rk, rd_false/*dont-lock*/); + pid = rd_kafka_idemp_get_pid0(rk, rd_false /*dont-lock*/); if (!rd_kafka_pid_valid(pid)) { rd_dassert(!*"BUG: No PID despite proper transaction state"); error = rd_kafka_error_new_retriable( - RD_KAFKA_RESP_ERR__STATE, - "No PID available (idempotence state %s)", - rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); + RD_KAFKA_RESP_ERR__STATE, + "No PID available (idempotence state %s)", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); goto err; } @@ -2014,14 +1949,10 @@ rd_kafka_txn_op_send_offsets_to_transaction (rd_kafka_t *rk, * 2) send TxnOffsetCommitRequest to group coordinator. */ err = rd_kafka_AddOffsetsToTxnRequest( - rk->rk_eos.txn_coord, - rk->rk_conf.eos.transactional_id, - pid, - rko->rko_u.txn.cgmetadata->group_id, - errstr, sizeof(errstr), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_txn_handle_AddOffsetsToTxn, - rko); + rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid, + rko->rko_u.txn.cgmetadata->group_id, errstr, sizeof(errstr), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_AddOffsetsToTxn, + rko); if (err) { error = rd_kafka_error_new_retriable(err, "%s", errstr); @@ -2032,7 +1963,7 @@ rd_kafka_txn_op_send_offsets_to_transaction (rd_kafka_t *rk, return RD_KAFKA_OP_RES_KEEP; /* the rko is passed to AddOffsetsToTxn */ - err: +err: rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), error); @@ -2043,12 +1974,11 @@ rd_kafka_txn_op_send_offsets_to_transaction (rd_kafka_t *rk, * error returns: * ERR__TRANSPORT - retryable */ -rd_kafka_error_t * -rd_kafka_send_offsets_to_transaction ( - rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - const rd_kafka_consumer_group_metadata_t *cgmetadata, - int timeout_ms) { +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + const rd_kafka_consumer_group_metadata_t *cgmetadata, + int timeout_ms) { rd_kafka_error_t *error; rd_kafka_op_t *rko; rd_kafka_topic_partition_list_t *valid_offsets; @@ -2058,11 +1988,11 @@ rd_kafka_send_offsets_to_transaction ( if (!cgmetadata || !offsets) return rd_kafka_error_new( - RD_KAFKA_RESP_ERR__INVALID_ARG, - "cgmetadata and offsets are required parameters"); + RD_KAFKA_RESP_ERR__INVALID_ARG, + "cgmetadata and offsets are required parameters"); valid_offsets = rd_kafka_topic_partition_list_match( - offsets, rd_kafka_topic_partition_match_valid_offset, NULL); + offsets, rd_kafka_topic_partition_match_valid_offset, NULL); if (valid_offsets->cnt == 0) { /* No valid offsets, e.g., nothing was consumed, @@ -2073,25 +2003,23 @@ rd_kafka_send_offsets_to_transaction ( rd_kafka_topic_partition_list_sort_by_topic(valid_offsets); - rko = rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, + rko = rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, rd_kafka_txn_op_send_offsets_to_transaction); rko->rko_u.txn.offsets = valid_offsets; rko->rko_u.txn.cgmetadata = - rd_kafka_consumer_group_metadata_dup(cgmetadata); + rd_kafka_consumer_group_metadata_dup(cgmetadata); if (timeout_ms > rk->rk_conf.eos.transaction_timeout_ms) timeout_ms = rk->rk_conf.eos.transaction_timeout_ms; rko->rko_u.txn.abs_timeout = rd_timeout_init(timeout_ms); return rd_kafka_txn_curr_api_req( - rk, __FUNCTION__, rko, - RD_POLL_INFINITE, /* rely on background code to time out */ - RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT); + rk, __FUNCTION__, rko, + RD_POLL_INFINITE, /* rely on background code to time out */ + RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT); } - - /** * @brief Successfully complete the transaction. * @@ -2100,9 +2028,8 @@ rd_kafka_send_offsets_to_transaction ( * @locality rdkafka main thread * @locks rd_kafka_wrlock(rk) MUST be held */ -static void rd_kafka_txn_complete (rd_kafka_t *rk, rd_bool_t is_commit) { - rd_kafka_dbg(rk, EOS, "TXNCOMPLETE", - "Transaction successfully %s", +static void rd_kafka_txn_complete(rd_kafka_t *rk, rd_bool_t is_commit) { + rd_kafka_dbg(rk, EOS, "TXNCOMPLETE", "Transaction successfully %s", is_commit ? "committed" : "aborted"); /* Clear all transaction partition state */ @@ -2110,7 +2037,7 @@ static void rd_kafka_txn_complete (rd_kafka_t *rk, rd_bool_t is_commit) { rd_kafka_txn_clear_partitions(rk); rk->rk_eos.txn_requires_epoch_bump = rd_false; - rk->rk_eos.txn_req_cnt = 0; + rk->rk_eos.txn_req_cnt = 0; rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_READY); } @@ -2123,14 +2050,14 @@ static void rd_kafka_txn_complete (rd_kafka_t *rk, rd_bool_t is_commit) { * @locality rdkafka main thread * @locks none */ -static void rd_kafka_txn_handle_EndTxn (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_txn_handle_EndTxn(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { const int log_decode_errors = LOG_ERR; - rd_kafka_q_t *rkq = opaque; + rd_kafka_q_t *rkq = opaque; int16_t ErrorCode; int actions = 0; rd_bool_t is_commit, may_retry = rd_false; @@ -2150,11 +2077,11 @@ static void rd_kafka_txn_handle_EndTxn (rd_kafka_t *rk, err = ErrorCode; goto err; - err_parse: +err_parse: err = rkbuf->rkbuf_err; /* FALLTHRU */ - err: +err: rd_kafka_wrlock(rk); if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION) { @@ -2175,36 +2102,32 @@ static void rd_kafka_txn_handle_EndTxn (rd_kafka_t *rk, if (err) { rd_kafka_txn_curr_api_reply( - rkq, - RD_KAFKA_ERR_ACTION_PERMANENT, - rk->rk_eos.txn_err, - "EndTxn failed with %s but transaction " - "had already failed due to: %s", - rd_kafka_err2name(err), - rk->rk_eos.txn_errstr); + rkq, RD_KAFKA_ERR_ACTION_PERMANENT, + rk->rk_eos.txn_err, + "EndTxn failed with %s but transaction " + "had already failed due to: %s", + rd_kafka_err2name(err), rk->rk_eos.txn_errstr); } else { /* If the transaction has failed locally but * this EndTxn commit succeeded we'll raise * a fatal error. */ if (is_commit) rd_kafka_txn_curr_api_reply( - rkq, - RD_KAFKA_ERR_ACTION_FATAL, - rk->rk_eos.txn_err, - "Transaction commit succeeded on the " - "broker but the transaction " - "had already failed locally due to: %s", - rk->rk_eos.txn_errstr); + rkq, RD_KAFKA_ERR_ACTION_FATAL, + rk->rk_eos.txn_err, + "Transaction commit succeeded on the " + "broker but the transaction " + "had already failed locally due to: %s", + rk->rk_eos.txn_errstr); else rd_kafka_txn_curr_api_reply( - rkq, - RD_KAFKA_ERR_ACTION_PERMANENT, - rk->rk_eos.txn_err, - "Transaction abort succeeded on the " - "broker but the transaction" - "had already failed locally due to: %s", - rk->rk_eos.txn_errstr); + rkq, RD_KAFKA_ERR_ACTION_PERMANENT, + rk->rk_eos.txn_err, + "Transaction abort succeeded on the " + "broker but the transaction" + "had already failed locally due to: %s", + rk->rk_eos.txn_errstr); } return; @@ -2225,16 +2148,15 @@ static void rd_kafka_txn_handle_EndTxn (rd_kafka_t *rk, /* EndTxn successful */ if (is_commit) rd_kafka_txn_set_state( - rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED); + rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED); else rd_kafka_txn_set_state( - rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); + rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); } rd_kafka_wrunlock(rk); - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR_NO_ERROR: break; @@ -2249,15 +2171,14 @@ static void rd_kafka_txn_handle_EndTxn (rd_kafka_t *rk, break; case RD_KAFKA_RESP_ERR__TRANSPORT: - actions |= RD_KAFKA_ERR_ACTION_RETRY| - RD_KAFKA_ERR_ACTION_REFRESH; + actions |= + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH; break; case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE: case RD_KAFKA_RESP_ERR_NOT_COORDINATOR: rd_kafka_wrlock(rk); - rd_kafka_txn_coord_set(rk, NULL, - "EndTxn failed: %s", + rd_kafka_txn_coord_set(rk, NULL, "EndTxn failed: %s", rd_kafka_err2str(err)); rd_kafka_wrunlock(rk); actions |= RD_KAFKA_ERR_ACTION_RETRY; @@ -2297,10 +2218,11 @@ static void rd_kafka_txn_handle_EndTxn (rd_kafka_t *rk, rd_kafka_txn_coord_timer_start(rk, 50); if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) - rd_kafka_txn_set_abortable_error(rk, err, - "Failed to end transaction: " - "%s", - rd_kafka_err2str(err)); + rd_kafka_txn_set_abortable_error( + rk, err, + "Failed to end transaction: " + "%s", + rd_kafka_err2str(err)); else if (may_retry && actions & RD_KAFKA_ERR_ACTION_RETRY && rd_kafka_buf_retry(rkb, request)) return; @@ -2308,9 +2230,8 @@ static void rd_kafka_txn_handle_EndTxn (rd_kafka_t *rk, if (err) rd_kafka_txn_curr_api_reply( - rkq, actions, err, - "EndTxn %s failed: %s", is_commit ? "commit" : "abort", - rd_kafka_err2str(err)); + rkq, actions, err, "EndTxn %s failed: %s", + is_commit ? "commit" : "abort", rd_kafka_err2str(err)); else rd_kafka_txn_curr_api_reply(rkq, 0, RD_KAFKA_RESP_ERR_NO_ERROR, NULL); @@ -2325,9 +2246,9 @@ static void rd_kafka_txn_handle_EndTxn (rd_kafka_t *rk, * @locality rdkafka main thread */ static rd_kafka_op_res_t -rd_kafka_txn_op_commit_transaction (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +rd_kafka_txn_op_commit_transaction(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_error_t *error; rd_kafka_resp_err_t err; char errstr[512]; @@ -2340,9 +2261,8 @@ rd_kafka_txn_op_commit_transaction (rd_kafka_t *rk, rd_kafka_wrlock(rk); if ((error = rd_kafka_txn_require_state( - rk, - RD_KAFKA_TXN_STATE_BEGIN_COMMIT, - RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) + rk, RD_KAFKA_TXN_STATE_BEGIN_COMMIT, + RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) goto done; if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED) { @@ -2357,10 +2277,11 @@ rd_kafka_txn_op_commit_transaction (rd_kafka_t *rk, dr_fails = rd_atomic64_get(&rk->rk_eos.txn_dr_fails); if (unlikely(dr_fails > 0)) { error = rd_kafka_error_new_txn_requires_abort( - RD_KAFKA_RESP_ERR__INCONSISTENT, - "%"PRId64" message(s) failed delivery " - "(see individual delivery reports)", - dr_fails); + RD_KAFKA_RESP_ERR__INCONSISTENT, + "%" PRId64 + " message(s) failed delivery " + "(see individual delivery reports)", + dr_fails); goto done; } @@ -2375,24 +2296,21 @@ rd_kafka_txn_op_commit_transaction (rd_kafka_t *rk, goto done; } - pid = rd_kafka_idemp_get_pid0(rk, rd_false/*dont-lock*/); + pid = rd_kafka_idemp_get_pid0(rk, rd_false /*dont-lock*/); if (!rd_kafka_pid_valid(pid)) { rd_dassert(!*"BUG: No PID despite proper transaction state"); error = rd_kafka_error_new_retriable( - RD_KAFKA_RESP_ERR__STATE, - "No PID available (idempotence state %s)", - rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); + RD_KAFKA_RESP_ERR__STATE, + "No PID available (idempotence state %s)", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); goto done; } - err = rd_kafka_EndTxnRequest(rk->rk_eos.txn_coord, - rk->rk_conf.eos.transactional_id, - pid, - rd_true /* commit */, - errstr, sizeof(errstr), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_txn_handle_EndTxn, - rd_kafka_q_keep(rko->rko_replyq.q)); + err = rd_kafka_EndTxnRequest( + rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid, + rd_true /* commit */, errstr, sizeof(errstr), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_EndTxn, + rd_kafka_q_keep(rko->rko_replyq.q)); if (err) { error = rd_kafka_error_new_retriable(err, "%s", errstr); goto done; @@ -2404,16 +2322,15 @@ rd_kafka_txn_op_commit_transaction (rd_kafka_t *rk, return RD_KAFKA_OP_RES_HANDLED; - done: +done: rd_kafka_wrunlock(rk); /* If the returned error is an abortable error * also set the current transaction state accordingly. */ if (rd_kafka_error_txn_requires_abort(error)) - rd_kafka_txn_set_abortable_error( - rk, - rd_kafka_error_code(error), - "%s", rd_kafka_error_string(error)); + rd_kafka_txn_set_abortable_error(rk, rd_kafka_error_code(error), + "%s", + rd_kafka_error_string(error)); rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), error); @@ -2428,10 +2345,9 @@ rd_kafka_txn_op_commit_transaction (rd_kafka_t *rk, * @locks none * @locality rdkafka main thread */ -static rd_kafka_op_res_t -rd_kafka_txn_op_begin_commit (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +static rd_kafka_op_res_t rd_kafka_txn_op_begin_commit(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_error_t *error; if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) @@ -2441,10 +2357,9 @@ rd_kafka_txn_op_begin_commit (rd_kafka_t *rk, rd_kafka_wrlock(rk); if ((error = rd_kafka_txn_require_state( - rk, - RD_KAFKA_TXN_STATE_IN_TRANSACTION, - RD_KAFKA_TXN_STATE_BEGIN_COMMIT, - RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) + rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION, + RD_KAFKA_TXN_STATE_BEGIN_COMMIT, + RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) goto done; if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED) @@ -2453,7 +2368,7 @@ rd_kafka_txn_op_begin_commit (rd_kafka_t *rk, rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_BEGIN_COMMIT); /* FALLTHRU */ - done: +done: rd_kafka_wrunlock(rk); rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), error); @@ -2469,9 +2384,9 @@ rd_kafka_txn_op_begin_commit (rd_kafka_t *rk, * @locality rdkafka main thread */ static rd_kafka_op_res_t -rd_kafka_txn_op_commit_transaction_ack (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +rd_kafka_txn_op_commit_transaction_ack(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_error_t *error; if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) @@ -2480,16 +2395,15 @@ rd_kafka_txn_op_commit_transaction_ack (rd_kafka_t *rk, rd_kafka_wrlock(rk); if ((error = rd_kafka_txn_require_state( - rk, - RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) + rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) goto done; rd_kafka_dbg(rk, EOS, "TXNCOMMIT", "Committed transaction now acked by application"); - rd_kafka_txn_complete(rk, rd_true/*is commit*/); + rd_kafka_txn_complete(rk, rd_true /*is commit*/); /* FALLTHRU */ - done: +done: rd_kafka_wrunlock(rk); rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), @@ -2499,8 +2413,7 @@ rd_kafka_txn_op_commit_transaction_ack (rd_kafka_t *rk, } -rd_kafka_error_t * -rd_kafka_commit_transaction (rd_kafka_t *rk, int timeout_ms) { +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms) { rd_kafka_error_t *error; rd_kafka_resp_err_t err; rd_ts_t abs_timeout; @@ -2519,11 +2432,11 @@ rd_kafka_commit_transaction (rd_kafka_t *rk, int timeout_ms) { /* Begin commit */ error = rd_kafka_txn_curr_api_req( - rk, "commit_transaction (begin)", - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_begin_commit), - rd_timeout_remains(abs_timeout), - RD_KAFKA_TXN_CURR_API_F_FOR_REUSE| + rk, "commit_transaction (begin)", + rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, + rd_kafka_txn_op_begin_commit), + rd_timeout_remains(abs_timeout), + RD_KAFKA_TXN_CURR_API_F_FOR_REUSE | RD_KAFKA_TXN_CURR_API_F_ABORT_ON_TIMEOUT); if (error) return error; @@ -2541,28 +2454,26 @@ rd_kafka_commit_transaction (rd_kafka_t *rk, int timeout_ms) { if (err == RD_KAFKA_RESP_ERR__TIMED_OUT) error = rd_kafka_error_new_retriable( - err, - "Failed to flush all outstanding messages " - "within the transaction timeout: " - "%d message(s) remaining%s", - rd_kafka_outq_len(rk), - /* In case event queue delivery reports - * are enabled and there is no dr callback - * we instruct the developer to poll - * the event queue separately, since we - * can't do it for them. */ - ((rk->rk_conf.enabled_events & - RD_KAFKA_EVENT_DR) && - !rk->rk_conf.dr_msg_cb && - !rk->rk_conf.dr_cb) ? - ": the event queue must be polled " - "for delivery report events in a separate " - "thread or prior to calling commit" : ""); + err, + "Failed to flush all outstanding messages " + "within the transaction timeout: " + "%d message(s) remaining%s", + rd_kafka_outq_len(rk), + /* In case event queue delivery reports + * are enabled and there is no dr callback + * we instruct the developer to poll + * the event queue separately, since we + * can't do it for them. */ + ((rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR) && + !rk->rk_conf.dr_msg_cb && !rk->rk_conf.dr_cb) + ? ": the event queue must be polled " + "for delivery report events in a separate " + "thread or prior to calling commit" + : ""); else error = rd_kafka_error_new_retriable( - err, - "Failed to flush outstanding messages: %s", - rd_kafka_err2str(err)); + err, "Failed to flush outstanding messages: %s", + rd_kafka_err2str(err)); rd_kafka_txn_curr_api_reset(rk, rd_false); @@ -2576,23 +2487,22 @@ rd_kafka_commit_transaction (rd_kafka_t *rk, int timeout_ms) { /* Commit transaction */ error = rd_kafka_txn_curr_api_req( - rk, "commit_transaction", - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_commit_transaction), - rd_timeout_remains(abs_timeout), - RD_KAFKA_TXN_CURR_API_F_REUSE| - RD_KAFKA_TXN_CURR_API_F_FOR_REUSE| + rk, "commit_transaction", + rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, + rd_kafka_txn_op_commit_transaction), + rd_timeout_remains(abs_timeout), + RD_KAFKA_TXN_CURR_API_F_REUSE | RD_KAFKA_TXN_CURR_API_F_FOR_REUSE | RD_KAFKA_TXN_CURR_API_F_ABORT_ON_TIMEOUT); if (error) return error; /* Last call is to transition from COMMIT_NOT_ACKED to READY */ return rd_kafka_txn_curr_api_req( - rk, "commit_transaction (ack)", - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_commit_transaction_ack), - rd_timeout_remains(abs_timeout), - RD_KAFKA_TXN_CURR_API_F_REUSE| + rk, "commit_transaction (ack)", + rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, + rd_kafka_txn_op_commit_transaction_ack), + rd_timeout_remains(abs_timeout), + RD_KAFKA_TXN_CURR_API_F_REUSE | RD_KAFKA_TXN_CURR_API_F_ABORT_ON_TIMEOUT); } @@ -2604,10 +2514,9 @@ rd_kafka_commit_transaction (rd_kafka_t *rk, int timeout_ms) { * @locks none * @locality rdkafka main thread */ -static rd_kafka_op_res_t -rd_kafka_txn_op_begin_abort (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +static rd_kafka_op_res_t rd_kafka_txn_op_begin_abort(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_error_t *error; rd_bool_t clear_pending = rd_false; @@ -2616,22 +2525,20 @@ rd_kafka_txn_op_begin_abort (rd_kafka_t *rk, rd_kafka_wrlock(rk); if ((error = rd_kafka_txn_require_state( - rk, - RD_KAFKA_TXN_STATE_IN_TRANSACTION, - RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION, - RD_KAFKA_TXN_STATE_ABORTABLE_ERROR, - RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) + rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION, + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION, + RD_KAFKA_TXN_STATE_ABORTABLE_ERROR, + RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) goto done; if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED) goto done; - rd_kafka_txn_set_state( - rk, RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION); + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION); clear_pending = rd_true; /* FALLTHRU */ - done: +done: rd_kafka_wrunlock(rk); if (clear_pending) { @@ -2653,10 +2560,9 @@ rd_kafka_txn_op_begin_abort (rd_kafka_t *rk, * @locks none * @locality rdkafka main thread */ -static rd_kafka_op_res_t -rd_kafka_txn_op_abort_transaction (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +static rd_kafka_op_res_t rd_kafka_txn_op_abort_transaction(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_error_t *error; rd_kafka_resp_err_t err; char errstr[512]; @@ -2668,9 +2574,8 @@ rd_kafka_txn_op_abort_transaction (rd_kafka_t *rk, rd_kafka_wrlock(rk); if ((error = rd_kafka_txn_require_state( - rk, - RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION, - RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) + rk, RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION, + RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) goto done; if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED) { @@ -2698,10 +2603,9 @@ rd_kafka_txn_op_abort_transaction (rd_kafka_t *rk, */ if (rk->rk_eos.idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED) { - rd_kafka_dbg(rk, EOS, "TXNABORT", - "PID already bumped"); + rd_kafka_dbg(rk, EOS, "TXNABORT", "PID already bumped"); rd_kafka_txn_set_state( - rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); + rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); goto done; } @@ -2732,24 +2636,21 @@ rd_kafka_txn_op_abort_transaction (rd_kafka_t *rk, goto done; } - pid = rd_kafka_idemp_get_pid0(rk, rd_false/*dont-lock*/); + pid = rd_kafka_idemp_get_pid0(rk, rd_false /*dont-lock*/); if (!rd_kafka_pid_valid(pid)) { rd_dassert(!*"BUG: No PID despite proper transaction state"); error = rd_kafka_error_new_retriable( - RD_KAFKA_RESP_ERR__STATE, - "No PID available (idempotence state %s)", - rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); + RD_KAFKA_RESP_ERR__STATE, + "No PID available (idempotence state %s)", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); goto done; } - err = rd_kafka_EndTxnRequest(rk->rk_eos.txn_coord, - rk->rk_conf.eos.transactional_id, - pid, - rd_false /* abort */, - errstr, sizeof(errstr), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_txn_handle_EndTxn, - rd_kafka_q_keep(rko->rko_replyq.q)); + err = rd_kafka_EndTxnRequest( + rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid, + rd_false /* abort */, errstr, sizeof(errstr), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_EndTxn, + rd_kafka_q_keep(rko->rko_replyq.q)); if (err) { error = rd_kafka_error_new_retriable(err, "%s", errstr); goto done; @@ -2759,7 +2660,7 @@ rd_kafka_txn_op_abort_transaction (rd_kafka_t *rk, return RD_KAFKA_OP_RES_HANDLED; - done: +done: rd_kafka_wrunlock(rk); rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), @@ -2778,9 +2679,9 @@ rd_kafka_txn_op_abort_transaction (rd_kafka_t *rk, * @locality rdkafka main thread */ static rd_kafka_op_res_t -rd_kafka_txn_op_abort_transaction_ack (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +rd_kafka_txn_op_abort_transaction_ack(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_error_t *error; if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) @@ -2789,16 +2690,15 @@ rd_kafka_txn_op_abort_transaction_ack (rd_kafka_t *rk, rd_kafka_wrlock(rk); if ((error = rd_kafka_txn_require_state( - rk, - RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) + rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) goto done; rd_kafka_dbg(rk, EOS, "TXNABORT", "Aborted transaction now acked by application"); - rd_kafka_txn_complete(rk, rd_false/*is abort*/); + rd_kafka_txn_complete(rk, rd_false /*is abort*/); /* FALLTHRU */ - done: +done: rd_kafka_wrunlock(rk); rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), @@ -2809,9 +2709,7 @@ rd_kafka_txn_op_abort_transaction_ack (rd_kafka_t *rk, - -rd_kafka_error_t * -rd_kafka_abort_transaction (rd_kafka_t *rk, int timeout_ms) { +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms) { rd_kafka_error_t *error; rd_kafka_resp_err_t err; rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); @@ -2830,11 +2728,11 @@ rd_kafka_abort_transaction (rd_kafka_t *rk, int timeout_ms) { */ error = rd_kafka_txn_curr_api_req( - rk, "abort_transaction (begin)", - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_begin_abort), - RD_POLL_INFINITE, /* begin_abort is immediate, no timeout */ - RD_KAFKA_TXN_CURR_API_F_FOR_REUSE| + rk, "abort_transaction (begin)", + rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, + rd_kafka_txn_op_begin_abort), + RD_POLL_INFINITE, /* begin_abort is immediate, no timeout */ + RD_KAFKA_TXN_CURR_API_F_FOR_REUSE | RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT); if (error) return error; @@ -2847,31 +2745,29 @@ rd_kafka_abort_transaction (rd_kafka_t *rk, int timeout_ms) { /* Purge all queued messages. * Will need to wait for messages in-flight since purging these * messages may lead to gaps in the idempotent producer sequences. */ - err = rd_kafka_purge(rk, - RD_KAFKA_PURGE_F_QUEUE| - RD_KAFKA_PURGE_F_ABORT_TXN); + err = rd_kafka_purge(rk, RD_KAFKA_PURGE_F_QUEUE | + RD_KAFKA_PURGE_F_ABORT_TXN); /* Serve delivery reports for the purged messages. */ if ((err = rd_kafka_flush(rk, rd_timeout_remains(abs_timeout)))) { /* FIXME: Not sure these errors matter that much */ if (err == RD_KAFKA_RESP_ERR__TIMED_OUT) error = rd_kafka_error_new_retriable( - err, - "Failed to flush all outstanding messages " - "within the transaction timeout: " - "%d message(s) remaining%s", - rd_kafka_outq_len(rk), - (rk->rk_conf.enabled_events & - RD_KAFKA_EVENT_DR) ? - ": the event queue must be polled " - "for delivery report events in a separate " - "thread or prior to calling abort" : ""); + err, + "Failed to flush all outstanding messages " + "within the transaction timeout: " + "%d message(s) remaining%s", + rd_kafka_outq_len(rk), + (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR) + ? ": the event queue must be polled " + "for delivery report events in a separate " + "thread or prior to calling abort" + : ""); else error = rd_kafka_error_new_retriable( - err, - "Failed to flush outstanding messages: %s", - rd_kafka_err2str(err)); + err, "Failed to flush outstanding messages: %s", + rd_kafka_err2str(err)); rd_kafka_txn_curr_api_reset(rk, rd_false); @@ -2884,24 +2780,21 @@ rd_kafka_abort_transaction (rd_kafka_t *rk, int timeout_ms) { "Transaction abort message purge and flush complete"); error = rd_kafka_txn_curr_api_req( - rk, "abort_transaction", - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_abort_transaction), - rd_timeout_remains(abs_timeout), - RD_KAFKA_TXN_CURR_API_F_FOR_REUSE| - RD_KAFKA_TXN_CURR_API_F_REUSE| + rk, "abort_transaction", + rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, + rd_kafka_txn_op_abort_transaction), + rd_timeout_remains(abs_timeout), + RD_KAFKA_TXN_CURR_API_F_FOR_REUSE | RD_KAFKA_TXN_CURR_API_F_REUSE | RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT); if (error) return error; /* Last call is to transition from ABORT_NOT_ACKED to READY. */ return rd_kafka_txn_curr_api_req( - rk, "abort_transaction (ack)", - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_abort_transaction_ack), - rd_timeout_remains(abs_timeout), - RD_KAFKA_TXN_CURR_API_F_REUSE); - + rk, "abort_transaction (ack)", + rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, + rd_kafka_txn_op_abort_transaction_ack), + rd_timeout_remains(abs_timeout), RD_KAFKA_TXN_CURR_API_F_REUSE); } @@ -2913,7 +2806,7 @@ rd_kafka_abort_transaction (rd_kafka_t *rk, int timeout_ms) { * @locks none */ -static void rd_kafka_txn_coord_timer_cb (rd_kafka_timers_t *rkts, void *arg) { +static void rd_kafka_txn_coord_timer_cb(rd_kafka_timers_t *rkts, void *arg) { rd_kafka_t *rk = arg; rd_kafka_wrlock(rk); @@ -2927,13 +2820,11 @@ static void rd_kafka_txn_coord_timer_cb (rd_kafka_timers_t *rkts, void *arg) { * @locality rdkafka main thread * @locks none */ -static void rd_kafka_txn_coord_timer_start (rd_kafka_t *rk, int timeout_ms) { +static void rd_kafka_txn_coord_timer_start(rd_kafka_t *rk, int timeout_ms) { rd_assert(rd_kafka_is_transactional(rk)); - rd_kafka_timer_start_oneshot(&rk->rk_timers, - &rk->rk_eos.txn_coord_tmr, + rd_kafka_timer_start_oneshot(&rk->rk_timers, &rk->rk_eos.txn_coord_tmr, /* don't restart if already started */ - rd_false, - 1000 * timeout_ms, + rd_false, 1000 * timeout_ms, rd_kafka_txn_coord_timer_cb, rk); } @@ -2944,13 +2835,12 @@ static void rd_kafka_txn_coord_timer_start (rd_kafka_t *rk, int timeout_ms) { * @locality rdkafka main thread * @locks none */ -static void -rd_kafka_txn_handle_FindCoordinator (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_txn_handle_FindCoordinator(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { const int log_decode_errors = LOG_ERR; int16_t ErrorCode; rd_kafkap_str_t Host; @@ -2973,8 +2863,8 @@ rd_kafka_txn_handle_FindCoordinator (rd_kafka_t *rk, rd_kafkap_str_t ErrorMsg; rd_kafka_buf_read_str(rkbuf, &ErrorMsg); if (ErrorCode) - rd_snprintf(errstr, sizeof(errstr), - "%.*s", RD_KAFKAP_STR_PR(&ErrorMsg)); + rd_snprintf(errstr, sizeof(errstr), "%.*s", + RD_KAFKAP_STR_PR(&ErrorMsg)); } if ((err = ErrorCode)) @@ -2986,7 +2876,7 @@ rd_kafka_txn_handle_FindCoordinator (rd_kafka_t *rk, rd_rkb_dbg(rkb, EOS, "TXNCOORD", "FindCoordinator response: " - "Transaction coordinator is broker %"PRId32" (%.*s:%d)", + "Transaction coordinator is broker %" PRId32 " (%.*s:%d)", NodeId, RD_KAFKAP_STR_PR(&Host), (int)Port); rd_kafka_rdlock(rk); @@ -2994,7 +2884,7 @@ rd_kafka_txn_handle_FindCoordinator (rd_kafka_t *rk, err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE; else if (!(rkb = rd_kafka_broker_find_by_nodeid(rk, NodeId))) { rd_snprintf(errstr, sizeof(errstr), - "Transaction coordinator %"PRId32" is unknown", + "Transaction coordinator %" PRId32 " is unknown", NodeId); err = RD_KAFKA_RESP_ERR__UNKNOWN_BROKER; } @@ -3011,12 +2901,11 @@ rd_kafka_txn_handle_FindCoordinator (rd_kafka_t *rk, return; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: +err: - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR__DESTROY: return; @@ -3024,11 +2913,10 @@ rd_kafka_txn_handle_FindCoordinator (rd_kafka_t *rk, case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED: rd_kafka_wrlock(rk); rd_kafka_txn_set_fatal_error( - rkb->rkb_rk, RD_DONT_LOCK, err, - "Failed to find transaction coordinator: %s: %s%s%s", - rd_kafka_broker_name(rkb), - rd_kafka_err2str(err), - *errstr ? ": " : "", errstr); + rkb->rkb_rk, RD_DONT_LOCK, err, + "Failed to find transaction coordinator: %s: %s%s%s", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + *errstr ? ": " : "", errstr); rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_FATAL_ERROR); rd_kafka_wrunlock(rk); @@ -3043,16 +2931,14 @@ rd_kafka_txn_handle_FindCoordinator (rd_kafka_t *rk, } rd_kafka_wrlock(rk); - rd_kafka_txn_coord_set(rk, NULL, - "Failed to find transaction coordinator: %s: %s", - rd_kafka_err2name(err), - *errstr ? errstr : rd_kafka_err2str(err)); + rd_kafka_txn_coord_set( + rk, NULL, "Failed to find transaction coordinator: %s: %s", + rd_kafka_err2name(err), *errstr ? errstr : rd_kafka_err2str(err)); rd_kafka_wrunlock(rk); } - /** * @brief Query for the transaction coordinator. * @@ -3061,7 +2947,7 @@ rd_kafka_txn_handle_FindCoordinator (rd_kafka_t *rk, * @locality rdkafka main thread * @locks rd_kafka_wrlock(rk) MUST be held. */ -rd_bool_t rd_kafka_txn_coord_query (rd_kafka_t *rk, const char *reason) { +rd_bool_t rd_kafka_txn_coord_query(rd_kafka_t *rk, const char *reason) { rd_kafka_resp_err_t err; char errstr[512]; rd_kafka_broker_t *rkb; @@ -3077,8 +2963,7 @@ rd_bool_t rd_kafka_txn_coord_query (rd_kafka_t *rk, const char *reason) { } /* Find usable broker to query for the txn coordinator */ - rkb = rd_kafka_idemp_broker_any(rk, &err, - errstr, sizeof(errstr)); + rkb = rd_kafka_idemp_broker_any(rk, &err, errstr, sizeof(errstr)); if (!rkb) { rd_kafka_dbg(rk, EOS, "TXNCOORD", "Unable to query for transaction coordinator: " @@ -3098,17 +2983,15 @@ rd_bool_t rd_kafka_txn_coord_query (rd_kafka_t *rk, const char *reason) { /* Send FindCoordinator request */ err = rd_kafka_FindCoordinatorRequest( - rkb, RD_KAFKA_COORD_TXN, - rk->rk_conf.eos.transactional_id, - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_txn_handle_FindCoordinator, NULL); + rkb, RD_KAFKA_COORD_TXN, rk->rk_conf.eos.transactional_id, + RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_FindCoordinator, + NULL); if (err) { rd_snprintf(errstr, sizeof(errstr), "Failed to send coordinator query to %s: " "%s", - rd_kafka_broker_name(rkb), - rd_kafka_err2str(err)); + rd_kafka_broker_name(rkb), rd_kafka_err2str(err)); rd_kafka_broker_destroy(rkb); @@ -3135,8 +3018,10 @@ rd_bool_t rd_kafka_txn_coord_query (rd_kafka_t *rk, const char *reason) { * @locality rdkafka main thread * @locks rd_kafka_wrlock(rk) MUST be held */ -rd_bool_t rd_kafka_txn_coord_set (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const char *fmt, ...) { +rd_bool_t rd_kafka_txn_coord_set(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *fmt, + ...) { char buf[256]; va_list ap; @@ -3156,11 +3041,10 @@ rd_bool_t rd_kafka_txn_coord_set (rd_kafka_t *rk, rd_kafka_broker_t *rkb, rd_kafka_dbg(rk, EOS, "TXNCOORD", "Transaction coordinator changed from %s -> %s: %s", - rk->rk_eos.txn_curr_coord ? - rd_kafka_broker_name(rk->rk_eos.txn_curr_coord) : - "(none)", - rkb ? rd_kafka_broker_name(rkb) : "(none)", - buf); + rk->rk_eos.txn_curr_coord + ? rd_kafka_broker_name(rk->rk_eos.txn_curr_coord) + : "(none)", + rkb ? rd_kafka_broker_name(rkb) : "(none)", buf); if (rk->rk_eos.txn_curr_coord) rd_kafka_broker_destroy(rk->rk_eos.txn_curr_coord); @@ -3190,16 +3074,15 @@ rd_bool_t rd_kafka_txn_coord_set (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * @locality rdkafka main thread * @locks none */ -void rd_kafka_txn_coord_monitor_cb (rd_kafka_broker_t *rkb) { - rd_kafka_t *rk = rkb->rkb_rk; +void rd_kafka_txn_coord_monitor_cb(rd_kafka_broker_t *rkb) { + rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_broker_state_t state = rd_kafka_broker_get_state(rkb); rd_bool_t is_up; rd_assert(rk->rk_eos.txn_coord == rkb); is_up = rd_kafka_broker_state_is_up(state); - rd_rkb_dbg(rkb, EOS, "COORD", - "Transaction coordinator is now %s", + rd_rkb_dbg(rkb, EOS, "COORD", "Transaction coordinator is now %s", is_up ? "up" : "down"); if (!is_up) { @@ -3221,7 +3104,7 @@ void rd_kafka_txn_coord_monitor_cb (rd_kafka_broker_t *rkb) { /* PID is already valid, continue transactional * operations by checking for partitions to register */ rd_kafka_txn_schedule_register_partitions(rk, - 1/*ASAP*/); + 1 /*ASAP*/); } rd_kafka_wrunlock(rk); @@ -3236,23 +3119,21 @@ void rd_kafka_txn_coord_monitor_cb (rd_kafka_broker_t *rkb) { * @locality rdkafka main thread * @locks none */ -void rd_kafka_txns_term (rd_kafka_t *rk) { +void rd_kafka_txns_term(rd_kafka_t *rk) { RD_IF_FREE(rk->rk_eos.txn_init_rkq, rd_kafka_q_destroy); RD_IF_FREE(rk->rk_eos.txn_errstr, rd_free); - rd_kafka_timer_stop(&rk->rk_timers, - &rk->rk_eos.txn_coord_tmr, 1); - rd_kafka_timer_stop(&rk->rk_timers, - &rk->rk_eos.txn_register_parts_tmr, 1); + rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.txn_coord_tmr, 1); + rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.txn_register_parts_tmr, + 1); if (rk->rk_eos.txn_curr_coord) rd_kafka_broker_destroy(rk->rk_eos.txn_curr_coord); /* Logical coordinator */ rd_kafka_broker_persistent_connection_del( - rk->rk_eos.txn_coord, - &rk->rk_eos.txn_coord->rkb_persistconn.coord); + rk->rk_eos.txn_coord, &rk->rk_eos.txn_coord->rkb_persistconn.coord); rd_kafka_broker_monitor_del(&rk->rk_eos.txn_coord_mon); rd_kafka_broker_destroy(rk->rk_eos.txn_coord); rk->rk_eos.txn_coord = NULL; @@ -3272,7 +3153,7 @@ void rd_kafka_txns_term (rd_kafka_t *rk) { * @locality application thread * @locks none */ -void rd_kafka_txns_init (rd_kafka_t *rk) { +void rd_kafka_txns_init(rd_kafka_t *rk) { rd_atomic32_init(&rk->rk_eos.txn_may_enq, 0); mtx_init(&rk->rk_eos.txn_pending_lock, mtx_plain); TAILQ_INIT(&rk->rk_eos.txn_pending_rktps); @@ -3281,16 +3162,14 @@ void rd_kafka_txns_init (rd_kafka_t *rk) { /* Logical coordinator */ rk->rk_eos.txn_coord = - rd_kafka_broker_add_logical(rk, "TxnCoordinator"); + rd_kafka_broker_add_logical(rk, "TxnCoordinator"); rd_kafka_broker_monitor_add(&rk->rk_eos.txn_coord_mon, - rk->rk_eos.txn_coord, - rk->rk_ops, + rk->rk_eos.txn_coord, rk->rk_ops, rd_kafka_txn_coord_monitor_cb); rd_kafka_broker_persistent_connection_add( - rk->rk_eos.txn_coord, - &rk->rk_eos.txn_coord->rkb_persistconn.coord); + rk->rk_eos.txn_coord, &rk->rk_eos.txn_coord->rkb_persistconn.coord); rd_atomic64_init(&rk->rk_eos.txn_dr_fails, 0); } diff --git a/src/rdkafka_txnmgr.h b/src/rdkafka_txnmgr.h index d9becac797..3c088d09a6 100644 --- a/src/rdkafka_txnmgr.h +++ b/src/rdkafka_txnmgr.h @@ -36,10 +36,9 @@ * @locality application thread * @locks none */ -static RD_INLINE RD_UNUSED rd_bool_t -rd_kafka_txn_may_enq_msg (rd_kafka_t *rk) { +static RD_INLINE RD_UNUSED rd_bool_t rd_kafka_txn_may_enq_msg(rd_kafka_t *rk) { return !rd_kafka_is_transactional(rk) || - rd_atomic32_get(&rk->rk_eos.txn_may_enq); + rd_atomic32_get(&rk->rk_eos.txn_may_enq); } @@ -50,8 +49,7 @@ rd_kafka_txn_may_enq_msg (rd_kafka_t *rk) { * @locality broker thread * @locks none */ -static RD_INLINE RD_UNUSED rd_bool_t -rd_kafka_txn_may_send_msg (rd_kafka_t *rk) { +static RD_INLINE RD_UNUSED rd_bool_t rd_kafka_txn_may_send_msg(rd_kafka_t *rk) { rd_bool_t ret; rd_kafka_rdlock(rk); @@ -71,7 +69,7 @@ rd_kafka_txn_may_send_msg (rd_kafka_t *rk) { * @locks toppar_lock MUST be held */ static RD_INLINE RD_UNUSED rd_bool_t -rd_kafka_txn_toppar_may_send_msg (rd_kafka_toppar_t *rktp) { +rd_kafka_txn_toppar_may_send_msg(rd_kafka_toppar_t *rktp) { if (likely(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_IN_TXN)) return rd_true; @@ -80,8 +78,7 @@ rd_kafka_txn_toppar_may_send_msg (rd_kafka_toppar_t *rktp) { -void rd_kafka_txn_schedule_register_partitions (rd_kafka_t *rk, - int backoff_ms); +void rd_kafka_txn_schedule_register_partitions(rd_kafka_t *rk, int backoff_ms); /** @@ -95,8 +92,8 @@ void rd_kafka_txn_schedule_register_partitions (rd_kafka_t *rk, * @locality application thread * @locks none */ -static RD_INLINE RD_UNUSED -void rd_kafka_txn_add_partition (rd_kafka_toppar_t *rktp) { +static RD_INLINE RD_UNUSED void +rd_kafka_txn_add_partition(rd_kafka_toppar_t *rktp) { rd_kafka_t *rk; rd_bool_t schedule = rd_false; @@ -127,48 +124,48 @@ void rd_kafka_txn_add_partition (rd_kafka_toppar_t *rktp) { mtx_unlock(&rk->rk_eos.txn_pending_lock); rd_kafka_dbg(rk, EOS, "ADDPARTS", - "Marked %.*s [%"PRId32"] as part of transaction: " + "Marked %.*s [%" PRId32 + "] as part of transaction: " "%sscheduling registration", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - schedule ? "" : "not "); + rktp->rktp_partition, schedule ? "" : "not "); /* Schedule registration of partitions by the rdkafka main thread */ if (unlikely(schedule)) - rd_kafka_txn_schedule_register_partitions( - rk, 1/*immediate*/); + rd_kafka_txn_schedule_register_partitions(rk, 1 /*immediate*/); } +void rd_kafka_txn_idemp_state_change(rd_kafka_t *rk, + rd_kafka_idemp_state_t state); -void rd_kafka_txn_idemp_state_change (rd_kafka_t *rk, - rd_kafka_idemp_state_t state); - -void rd_kafka_txn_set_abortable_error0 (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_bool_t requires_epoch_bump, - const char *fmt, ...) - RD_FORMAT(printf, 4, 5); -#define rd_kafka_txn_set_abortable_error(rk,err,...) \ - rd_kafka_txn_set_abortable_error0(rk,err,rd_false,__VA_ARGS__) +void rd_kafka_txn_set_abortable_error0(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_bool_t requires_epoch_bump, + const char *fmt, + ...) RD_FORMAT(printf, 4, 5); +#define rd_kafka_txn_set_abortable_error(rk, err, ...) \ + rd_kafka_txn_set_abortable_error0(rk, err, rd_false, __VA_ARGS__) -#define rd_kafka_txn_set_abortable_error_with_bump(rk,err,...) \ - rd_kafka_txn_set_abortable_error0(rk,err,rd_true,__VA_ARGS__) +#define rd_kafka_txn_set_abortable_error_with_bump(rk, err, ...) \ + rd_kafka_txn_set_abortable_error0(rk, err, rd_true, __VA_ARGS__) -void rd_kafka_txn_set_fatal_error (rd_kafka_t *rk, rd_dolock_t do_lock, - rd_kafka_resp_err_t err, - const char *fmt, ...) - RD_FORMAT(printf, 4, 5); +void rd_kafka_txn_set_fatal_error(rd_kafka_t *rk, + rd_dolock_t do_lock, + rd_kafka_resp_err_t err, + const char *fmt, + ...) RD_FORMAT(printf, 4, 5); -rd_bool_t rd_kafka_txn_coord_query (rd_kafka_t *rk, const char *reason); +rd_bool_t rd_kafka_txn_coord_query(rd_kafka_t *rk, const char *reason); -rd_bool_t rd_kafka_txn_coord_set (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const char *fmt, ...) - RD_FORMAT(printf, 3, 4); +rd_bool_t rd_kafka_txn_coord_set(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *fmt, + ...) RD_FORMAT(printf, 3, 4); -void rd_kafka_txns_term (rd_kafka_t *rk); -void rd_kafka_txns_init (rd_kafka_t *rk); +void rd_kafka_txns_term(rd_kafka_t *rk); +void rd_kafka_txns_init(rd_kafka_t *rk); #endif /* _RDKAFKA_TXNMGR_H_ */ diff --git a/src/rdkafka_zstd.c b/src/rdkafka_zstd.c index 052cb7ca95..68b01a4e1c 100644 --- a/src/rdkafka_zstd.c +++ b/src/rdkafka_zstd.c @@ -37,10 +37,11 @@ #include #include -rd_kafka_resp_err_t -rd_kafka_zstd_decompress (rd_kafka_broker_t *rkb, - char *inbuf, size_t inlen, - void **outbuf, size_t *outlenp) { +rd_kafka_resp_err_t rd_kafka_zstd_decompress(rd_kafka_broker_t *rkb, + char *inbuf, + size_t inlen, + void **outbuf, + size_t *outlenp) { unsigned long long out_bufsize = ZSTD_getFrameContentSize(inbuf, inlen); switch (out_bufsize) { @@ -70,18 +71,18 @@ rd_kafka_zstd_decompress (rd_kafka_broker_t *rkb, if (!decompressed) { rd_rkb_dbg(rkb, MSG, "ZSTD", "Unable to allocate output buffer " - "(%llu bytes for %"PRIusz + "(%llu bytes for %" PRIusz " compressed bytes): %s", out_bufsize, inlen, rd_strerror(errno)); return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; } - ret = ZSTD_decompress(decompressed, (size_t)out_bufsize, - inbuf, inlen); + ret = ZSTD_decompress(decompressed, (size_t)out_bufsize, inbuf, + inlen); if (!ZSTD_isError(ret)) { *outlenp = ret; - *outbuf = decompressed; + *outbuf = decompressed; return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -107,7 +108,8 @@ rd_kafka_zstd_decompress (rd_kafka_broker_t *rkb, rd_rkb_dbg(rkb, MSG, "ZSTD", "Unable to decompress ZSTD " - "(input buffer %"PRIusz", output buffer %llu): " + "(input buffer %" PRIusz + ", output buffer %llu): " "output would exceed message.max.bytes (%d)", inlen, out_bufsize, rkb->rkb_rk->rk_conf.max_msg_size); @@ -115,24 +117,26 @@ rd_kafka_zstd_decompress (rd_kafka_broker_t *rkb, } -rd_kafka_resp_err_t -rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, - rd_slice_t *slice, void **outbuf, size_t *outlenp) { +rd_kafka_resp_err_t rd_kafka_zstd_compress(rd_kafka_broker_t *rkb, + int comp_level, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp) { ZSTD_CStream *cctx; size_t r; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - size_t len = rd_slice_remains(slice); + size_t len = rd_slice_remains(slice); ZSTD_outBuffer out; ZSTD_inBuffer in; - *outbuf = NULL; - out.pos = 0; + *outbuf = NULL; + out.pos = 0; out.size = ZSTD_compressBound(len); - out.dst = rd_malloc(out.size); + out.dst = rd_malloc(out.size); if (!out.dst) { rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR", "Unable to allocate output buffer " - "(%"PRIusz" bytes): %s", + "(%" PRIusz " bytes): %s", out.size, rd_strerror(errno)); return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; } @@ -146,7 +150,8 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, goto done; } -#if defined(WITH_ZSTD_STATIC) && ZSTD_VERSION_NUMBER >= (1*100*100+2*100+1) /* v1.2.1 */ +#if defined(WITH_ZSTD_STATIC) && \ + ZSTD_VERSION_NUMBER >= (1 * 100 * 100 + 2 * 100 + 1) /* v1.2.1 */ r = ZSTD_initCStream_srcSize(cctx, comp_level, len); #else /* libzstd not linked statically (or zstd version < 1.2.1): @@ -157,7 +162,7 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, if (ZSTD_isError(r)) { rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR", "Unable to begin ZSTD compression " - "(out buffer is %"PRIusz" bytes): %s", + "(out buffer is %" PRIusz " bytes): %s", out.size, ZSTD_getErrorName(r)); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; @@ -165,12 +170,14 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, while ((in.size = rd_slice_reader(slice, &in.src))) { in.pos = 0; - r = ZSTD_compressStream(cctx, &out, &in); + r = ZSTD_compressStream(cctx, &out, &in); if (unlikely(ZSTD_isError(r))) { rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR", "ZSTD compression failed " - "(at of %"PRIusz" bytes, with " - "%"PRIusz" bytes remaining in out buffer): " + "(at of %" PRIusz + " bytes, with " + "%" PRIusz + " bytes remaining in out buffer): " "%s", in.size, out.size - out.pos, ZSTD_getErrorName(r)); @@ -189,7 +196,7 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, if (rd_slice_remains(slice) != 0) { rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR", "Failed to finalize ZSTD compression " - "of %"PRIusz" bytes: %s", + "of %" PRIusz " bytes: %s", len, "Unexpected trailing data"); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; @@ -199,7 +206,7 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, if (unlikely(ZSTD_isError(r) || r > 0)) { rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR", "Failed to finalize ZSTD compression " - "of %"PRIusz" bytes: %s", + "of %" PRIusz " bytes: %s", len, ZSTD_getErrorName(r)); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; @@ -208,7 +215,7 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, *outbuf = out.dst; *outlenp = out.pos; - done: +done: if (cctx) ZSTD_freeCStream(cctx); @@ -216,5 +223,4 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, rd_free(out.dst); return err; - } diff --git a/src/rdkafka_zstd.h b/src/rdkafka_zstd.h index 83ff7ab072..f87c4c6fbc 100644 --- a/src/rdkafka_zstd.h +++ b/src/rdkafka_zstd.h @@ -3,24 +3,24 @@ * * Copyright (c) 2018 Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -34,10 +34,11 @@ * * @returns allocated buffer in \p *outbuf, length in \p *outlenp on success. */ -rd_kafka_resp_err_t -rd_kafka_zstd_decompress (rd_kafka_broker_t *rkb, - char *inbuf, size_t inlen, - void **outbuf, size_t *outlenp); +rd_kafka_resp_err_t rd_kafka_zstd_decompress(rd_kafka_broker_t *rkb, + char *inbuf, + size_t inlen, + void **outbuf, + size_t *outlenp); /** * Allocate space for \p *outbuf and compress all \p iovlen buffers in \p iov. @@ -47,8 +48,10 @@ rd_kafka_zstd_decompress (rd_kafka_broker_t *rkb, * * @returns allocated buffer in \p *outbuf, length in \p *outlenp. */ -rd_kafka_resp_err_t -rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, - rd_slice_t *slice, void **outbuf, size_t *outlenp); +rd_kafka_resp_err_t rd_kafka_zstd_compress(rd_kafka_broker_t *rkb, + int comp_level, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp); #endif /* _RDZSTD_H_ */ diff --git a/src/rdlist.c b/src/rdlist.c index 5ac224a149..c71e3004ad 100644 --- a/src/rdlist.c +++ b/src/rdlist.c @@ -30,79 +30,80 @@ #include "rdlist.h" -void rd_list_dump (const char *what, const rd_list_t *rl) { +void rd_list_dump(const char *what, const rd_list_t *rl) { int i; - printf("%s: (rd_list_t*)%p cnt %d, size %d, elems %p:\n", - what, rl, rl->rl_cnt, rl->rl_size, rl->rl_elems); - for (i = 0 ; i < rl->rl_cnt ; i++) - printf(" #%d: %p at &%p\n", i, - rl->rl_elems[i], &rl->rl_elems[i]); + printf("%s: (rd_list_t*)%p cnt %d, size %d, elems %p:\n", what, rl, + rl->rl_cnt, rl->rl_size, rl->rl_elems); + for (i = 0; i < rl->rl_cnt; i++) + printf(" #%d: %p at &%p\n", i, rl->rl_elems[i], + &rl->rl_elems[i]); } -void rd_list_grow (rd_list_t *rl, size_t size) { +void rd_list_grow(rd_list_t *rl, size_t size) { rd_assert(!(rl->rl_flags & RD_LIST_F_FIXED_SIZE)); rl->rl_size += (int)size; if (unlikely(rl->rl_size == 0)) return; /* avoid zero allocations */ - rl->rl_elems = rd_realloc(rl->rl_elems, - sizeof(*rl->rl_elems) * rl->rl_size); + rl->rl_elems = + rd_realloc(rl->rl_elems, sizeof(*rl->rl_elems) * rl->rl_size); } rd_list_t * -rd_list_init (rd_list_t *rl, int initial_size, void (*free_cb) (void *)) { +rd_list_init(rd_list_t *rl, int initial_size, void (*free_cb)(void *)) { memset(rl, 0, sizeof(*rl)); - if (initial_size > 0) - rd_list_grow(rl, initial_size); + if (initial_size > 0) + rd_list_grow(rl, initial_size); rl->rl_free_cb = free_cb; return rl; } -rd_list_t *rd_list_init_copy (rd_list_t *dst, const rd_list_t *src) { +rd_list_t *rd_list_init_copy(rd_list_t *dst, const rd_list_t *src) { if (src->rl_flags & RD_LIST_F_FIXED_SIZE) { /* Source was preallocated, prealloc new dst list */ rd_list_init(dst, 0, src->rl_free_cb); rd_list_prealloc_elems(dst, src->rl_elemsize, src->rl_size, - 1/*memzero*/); + 1 /*memzero*/); } else { /* Source is dynamic, initialize dst the same */ rd_list_init(dst, rd_list_cnt(src), src->rl_free_cb); - } return dst; } -static RD_INLINE rd_list_t *rd_list_alloc (void) { +static RD_INLINE rd_list_t *rd_list_alloc(void) { return rd_malloc(sizeof(rd_list_t)); } -rd_list_t *rd_list_new (int initial_size, void (*free_cb) (void *)) { - rd_list_t *rl = rd_list_alloc(); - rd_list_init(rl, initial_size, free_cb); - rl->rl_flags |= RD_LIST_F_ALLOCATED; - return rl; +rd_list_t *rd_list_new(int initial_size, void (*free_cb)(void *)) { + rd_list_t *rl = rd_list_alloc(); + rd_list_init(rl, initial_size, free_cb); + rl->rl_flags |= RD_LIST_F_ALLOCATED; + return rl; } -void rd_list_prealloc_elems (rd_list_t *rl, size_t elemsize, size_t cnt, - int memzero) { - size_t allocsize; - char *p; - size_t i; +void rd_list_prealloc_elems(rd_list_t *rl, + size_t elemsize, + size_t cnt, + int memzero) { + size_t allocsize; + char *p; + size_t i; - rd_assert(!rl->rl_elems); + rd_assert(!rl->rl_elems); - /* Allocation layout: - * void *ptrs[cnt]; - * elems[elemsize][cnt]; - */ + /* Allocation layout: + * void *ptrs[cnt]; + * elems[elemsize][cnt]; + */ - allocsize = (sizeof(void *) * cnt) + (elemsize * cnt); + allocsize = (sizeof(void *) * cnt) + (elemsize * cnt); if (memzero) rl->rl_elems = rd_calloc(1, allocsize); else @@ -114,47 +115,47 @@ void rd_list_prealloc_elems (rd_list_t *rl, size_t elemsize, size_t cnt, else p = rl->rl_p = NULL; - /* Pointer -> elem mapping */ - for (i = 0 ; i < cnt ; i++, p += elemsize) - rl->rl_elems[i] = p; + /* Pointer -> elem mapping */ + for (i = 0; i < cnt; i++, p += elemsize) + rl->rl_elems[i] = p; - rl->rl_size = (int)cnt; - rl->rl_cnt = 0; - rl->rl_flags |= RD_LIST_F_FIXED_SIZE; + rl->rl_size = (int)cnt; + rl->rl_cnt = 0; + rl->rl_flags |= RD_LIST_F_FIXED_SIZE; rl->rl_elemsize = (int)elemsize; } -void rd_list_set_cnt (rd_list_t *rl, size_t cnt) { +void rd_list_set_cnt(rd_list_t *rl, size_t cnt) { rd_assert(rl->rl_flags & RD_LIST_F_FIXED_SIZE); rd_assert((int)cnt <= rl->rl_size); rl->rl_cnt = (int)cnt; } -void rd_list_free_cb (rd_list_t *rl, void *ptr) { +void rd_list_free_cb(rd_list_t *rl, void *ptr) { if (rl->rl_free_cb && ptr) rl->rl_free_cb(ptr); } -void *rd_list_add (rd_list_t *rl, void *elem) { +void *rd_list_add(rd_list_t *rl, void *elem) { if (rl->rl_cnt == rl->rl_size) rd_list_grow(rl, rl->rl_size ? rl->rl_size * 2 : 16); - rl->rl_flags &= ~RD_LIST_F_SORTED; - if (elem) - rl->rl_elems[rl->rl_cnt] = elem; - return rl->rl_elems[rl->rl_cnt++]; + rl->rl_flags &= ~RD_LIST_F_SORTED; + if (elem) + rl->rl_elems[rl->rl_cnt] = elem; + return rl->rl_elems[rl->rl_cnt++]; } -void rd_list_set (rd_list_t *rl, int idx, void *ptr) { +void rd_list_set(rd_list_t *rl, int idx, void *ptr) { if (idx >= rl->rl_size) - rd_list_grow(rl, idx+1); + rd_list_grow(rl, idx + 1); if (idx >= rl->rl_cnt) { memset(&rl->rl_elems[rl->rl_cnt], 0, - sizeof(*rl->rl_elems) * (idx-rl->rl_cnt)); - rl->rl_cnt = idx+1; + sizeof(*rl->rl_elems) * (idx - rl->rl_cnt)); + rl->rl_cnt = idx + 1; } else { /* Not allowed to replace existing element. */ rd_assert(!rl->rl_elems[idx]); @@ -165,17 +166,16 @@ void rd_list_set (rd_list_t *rl, int idx, void *ptr) { -void rd_list_remove_elem (rd_list_t *rl, int idx) { +void rd_list_remove_elem(rd_list_t *rl, int idx) { rd_assert(idx < rl->rl_cnt); if (idx + 1 < rl->rl_cnt) - memmove(&rl->rl_elems[idx], - &rl->rl_elems[idx+1], - sizeof(*rl->rl_elems) * (rl->rl_cnt - (idx+1))); + memmove(&rl->rl_elems[idx], &rl->rl_elems[idx + 1], + sizeof(*rl->rl_elems) * (rl->rl_cnt - (idx + 1))); rl->rl_cnt--; } -void *rd_list_remove (rd_list_t *rl, void *match_elem) { +void *rd_list_remove(rd_list_t *rl, void *match_elem) { void *elem; int i; @@ -190,14 +190,14 @@ void *rd_list_remove (rd_list_t *rl, void *match_elem) { } -void *rd_list_remove_cmp (rd_list_t *rl, void *match_elem, - int (*cmp) (void *_a, void *_b)) { +void *rd_list_remove_cmp(rd_list_t *rl, + void *match_elem, + int (*cmp)(void *_a, void *_b)) { void *elem; int i; RD_LIST_FOREACH(elem, rl, i) { - if (elem == match_elem || - !cmp(elem, match_elem)) { + if (elem == match_elem || !cmp(elem, match_elem)) { rd_list_remove_elem(rl, i); return elem; } @@ -207,8 +207,9 @@ void *rd_list_remove_cmp (rd_list_t *rl, void *match_elem, } -int rd_list_remove_multi_cmp (rd_list_t *rl, void *match_elem, - int (*cmp) (void *_a, void *_b)) { +int rd_list_remove_multi_cmp(rd_list_t *rl, + void *match_elem, + int (*cmp)(void *_a, void *_b)) { void *elem; int i; @@ -216,8 +217,7 @@ int rd_list_remove_multi_cmp (rd_list_t *rl, void *match_elem, /* Scan backwards to minimize memmoves */ RD_LIST_FOREACH_REVERSE(elem, rl, i) { - if (match_elem == cmp || - !cmp(elem, match_elem)) { + if (match_elem == cmp || !cmp(elem, match_elem)) { rd_list_remove_elem(rl, i); cnt++; } @@ -227,7 +227,7 @@ int rd_list_remove_multi_cmp (rd_list_t *rl, void *match_elem, } -void *rd_list_pop (rd_list_t *rl) { +void *rd_list_pop(rd_list_t *rl) { void *elem; int idx = rl->rl_cnt - 1; @@ -250,26 +250,25 @@ void *rd_list_pop (rd_list_t *rl) { * * This is true for all list comparator uses, i.e., both sort() and find(). */ -static RD_TLS int (*rd_list_cmp_curr) (const void *, const void *); +static RD_TLS int (*rd_list_cmp_curr)(const void *, const void *); -static RD_INLINE -int rd_list_cmp_trampoline (const void *_a, const void *_b) { - const void *a = *(const void **)_a, *b = *(const void **)_b; +static RD_INLINE int rd_list_cmp_trampoline(const void *_a, const void *_b) { + const void *a = *(const void **)_a, *b = *(const void **)_b; - return rd_list_cmp_curr(a, b); + return rd_list_cmp_curr(a, b); } -void rd_list_sort (rd_list_t *rl, int (*cmp) (const void *, const void *)) { +void rd_list_sort(rd_list_t *rl, int (*cmp)(const void *, const void *)) { if (unlikely(rl->rl_elems == NULL)) return; - rd_list_cmp_curr = cmp; + rd_list_cmp_curr = cmp; qsort(rl->rl_elems, rl->rl_cnt, sizeof(*rl->rl_elems), - rd_list_cmp_trampoline); - rl->rl_flags |= RD_LIST_F_SORTED; + rd_list_cmp_trampoline); + rl->rl_flags |= RD_LIST_F_SORTED; } -static void rd_list_destroy_elems (rd_list_t *rl) { +static void rd_list_destroy_elems(rd_list_t *rl) { int i; if (!rl->rl_elems) @@ -277,42 +276,43 @@ static void rd_list_destroy_elems (rd_list_t *rl) { if (rl->rl_free_cb) { /* Free in reverse order to allow deletions */ - for (i = rl->rl_cnt - 1 ; i >= 0 ; i--) + for (i = rl->rl_cnt - 1; i >= 0; i--) if (rl->rl_elems[i]) rl->rl_free_cb(rl->rl_elems[i]); } rd_free(rl->rl_elems); rl->rl_elems = NULL; - rl->rl_cnt = 0; - rl->rl_size = 0; + rl->rl_cnt = 0; + rl->rl_size = 0; rl->rl_flags &= ~RD_LIST_F_SORTED; } -void rd_list_clear (rd_list_t *rl) { +void rd_list_clear(rd_list_t *rl) { rd_list_destroy_elems(rl); } -void rd_list_destroy (rd_list_t *rl) { +void rd_list_destroy(rd_list_t *rl) { rd_list_destroy_elems(rl); if (rl->rl_flags & RD_LIST_F_ALLOCATED) rd_free(rl); } -void rd_list_destroy_free (void *rl) { +void rd_list_destroy_free(void *rl) { rd_list_destroy((rd_list_t *)rl); } -void *rd_list_elem (const rd_list_t *rl, int idx) { +void *rd_list_elem(const rd_list_t *rl, int idx) { if (likely(idx < rl->rl_cnt)) return (void *)rl->rl_elems[idx]; return NULL; } -int rd_list_index (const rd_list_t *rl, const void *match, - int (*cmp) (const void *, const void *)) { +int rd_list_index(const rd_list_t *rl, + const void *match, + int (*cmp)(const void *, const void *)) { int i; const void *elem; @@ -325,19 +325,20 @@ int rd_list_index (const rd_list_t *rl, const void *match, } -void *rd_list_find (const rd_list_t *rl, const void *match, - int (*cmp) (const void *, const void *)) { +void *rd_list_find(const rd_list_t *rl, + const void *match, + int (*cmp)(const void *, const void *)) { int i; const void *elem; - if (rl->rl_flags & RD_LIST_F_SORTED) { - void **r; - rd_list_cmp_curr = cmp; - r = bsearch(&match/*ptrptr to match elems*/, - rl->rl_elems, rl->rl_cnt, - sizeof(*rl->rl_elems), rd_list_cmp_trampoline); - return r ? *r : NULL; - } + if (rl->rl_flags & RD_LIST_F_SORTED) { + void **r; + rd_list_cmp_curr = cmp; + r = bsearch(&match /*ptrptr to match elems*/, rl->rl_elems, + rl->rl_cnt, sizeof(*rl->rl_elems), + rd_list_cmp_trampoline); + return r ? *r : NULL; + } RD_LIST_FOREACH(elem, rl, i) { if (!cmp(match, elem)) @@ -348,65 +349,66 @@ void *rd_list_find (const rd_list_t *rl, const void *match, } -void *rd_list_first (const rd_list_t *rl) { +void *rd_list_first(const rd_list_t *rl) { if (rl->rl_cnt == 0) return NULL; return rl->rl_elems[0]; } -void *rd_list_last (const rd_list_t *rl) { +void *rd_list_last(const rd_list_t *rl) { if (rl->rl_cnt == 0) return NULL; - return rl->rl_elems[rl->rl_cnt-1]; + return rl->rl_elems[rl->rl_cnt - 1]; } -void *rd_list_find_duplicate (const rd_list_t *rl, - int (*cmp) (const void *, const void *)) { +void *rd_list_find_duplicate(const rd_list_t *rl, + int (*cmp)(const void *, const void *)) { int i; rd_assert(rl->rl_flags & RD_LIST_F_SORTED); - for (i = 1 ; i < rl->rl_cnt ; i++) { - if (!cmp(rl->rl_elems[i-1], - rl->rl_elems[i])) + for (i = 1; i < rl->rl_cnt; i++) { + if (!cmp(rl->rl_elems[i - 1], rl->rl_elems[i])) return rl->rl_elems[i]; } return NULL; } -int rd_list_cmp (const rd_list_t *a, const rd_list_t *b, - int (*cmp) (const void *, const void *)) { - int i; +int rd_list_cmp(const rd_list_t *a, + const rd_list_t *b, + int (*cmp)(const void *, const void *)) { + int i; - i = RD_CMP(a->rl_cnt, b->rl_cnt); - if (i) - return i; + i = RD_CMP(a->rl_cnt, b->rl_cnt); + if (i) + return i; - for (i = 0 ; i < a->rl_cnt ; i++) { - int r = cmp(a->rl_elems[i], b->rl_elems[i]); - if (r) - return r; - } + for (i = 0; i < a->rl_cnt; i++) { + int r = cmp(a->rl_elems[i], b->rl_elems[i]); + if (r) + return r; + } - return 0; + return 0; } /** * @brief Simple element pointer comparator */ -int rd_list_cmp_ptr (const void *a, const void *b) { +int rd_list_cmp_ptr(const void *a, const void *b) { return RD_CMP(a, b); } -int rd_list_cmp_str (const void *a, const void *b) { +int rd_list_cmp_str(const void *a, const void *b) { return strcmp((const char *)a, (const char *)b); } -void rd_list_apply (rd_list_t *rl, - int (*cb) (void *elem, void *opaque), void *opaque) { +void rd_list_apply(rd_list_t *rl, + int (*cb)(void *elem, void *opaque), + void *opaque) { void *elem; int i; @@ -424,12 +426,12 @@ void rd_list_apply (rd_list_t *rl, /** * @brief Default element copier that simply assigns the original pointer. */ -static void *rd_list_nocopy_ptr (const void *elem, void *opaque) { +static void *rd_list_nocopy_ptr(const void *elem, void *opaque) { return (void *)elem; } -rd_list_t *rd_list_copy (const rd_list_t *src, - rd_list_copy_cb_t *copy_cb, void *opaque) { +rd_list_t * +rd_list_copy(const rd_list_t *src, rd_list_copy_cb_t *copy_cb, void *opaque) { rd_list_t *dst; dst = rd_list_new(src->rl_cnt, src->rl_free_cb); @@ -439,9 +441,10 @@ rd_list_t *rd_list_copy (const rd_list_t *src, } -void rd_list_copy_to (rd_list_t *dst, const rd_list_t *src, - void *(*copy_cb) (const void *elem, void *opaque), - void *opaque) { +void rd_list_copy_to(rd_list_t *dst, + const rd_list_t *src, + void *(*copy_cb)(const void *elem, void *opaque), + void *opaque) { void *elem; int i; @@ -466,8 +469,8 @@ void rd_list_copy_to (rd_list_t *dst, const rd_list_t *src, * * @returns \p dst */ -static rd_list_t *rd_list_copy_preallocated0 (rd_list_t *dst, - const rd_list_t *src) { +static rd_list_t *rd_list_copy_preallocated0(rd_list_t *dst, + const rd_list_t *src) { int dst_flags = dst->rl_flags & RD_LIST_F_ALLOCATED; rd_assert(dst != src); @@ -486,14 +489,14 @@ static rd_list_t *rd_list_copy_preallocated0 (rd_list_t *dst, return dst; } -void *rd_list_copy_preallocated (const void *elem, void *opaque) { +void *rd_list_copy_preallocated(const void *elem, void *opaque) { return rd_list_copy_preallocated0(rd_list_new(0, NULL), (const rd_list_t *)elem); } -void rd_list_move (rd_list_t *dst, rd_list_t *src) { +void rd_list_move(rd_list_t *dst, rd_list_t *src) { rd_list_init_copy(dst, src); if (src->rl_flags & RD_LIST_F_FIXED_SIZE) { @@ -513,15 +516,15 @@ void rd_list_move (rd_list_t *dst, rd_list_t *src) { * @{ * */ -rd_list_t *rd_list_init_int32 (rd_list_t *rl, int max_size) { +rd_list_t *rd_list_init_int32(rd_list_t *rl, int max_size) { int rl_flags = rl->rl_flags & RD_LIST_F_ALLOCATED; rd_list_init(rl, 0, NULL); rl->rl_flags |= rl_flags; - rd_list_prealloc_elems(rl, sizeof(int32_t), max_size, 1/*memzero*/); + rd_list_prealloc_elems(rl, sizeof(int32_t), max_size, 1 /*memzero*/); return rl; } -void rd_list_set_int32 (rd_list_t *rl, int idx, int32_t val) { +void rd_list_set_int32(rd_list_t *rl, int idx, int32_t val) { rd_assert((rl->rl_flags & RD_LIST_F_FIXED_SIZE) && rl->rl_elemsize == sizeof(int32_t)); rd_assert(idx < rl->rl_size); @@ -529,18 +532,15 @@ void rd_list_set_int32 (rd_list_t *rl, int idx, int32_t val) { memcpy(rl->rl_elems[idx], &val, sizeof(int32_t)); if (rl->rl_cnt <= idx) - rl->rl_cnt = idx+1; + rl->rl_cnt = idx + 1; } -int32_t rd_list_get_int32 (const rd_list_t *rl, int idx) { +int32_t rd_list_get_int32(const rd_list_t *rl, int idx) { rd_assert((rl->rl_flags & RD_LIST_F_FIXED_SIZE) && - rl->rl_elemsize == sizeof(int32_t) && - idx < rl->rl_cnt); + rl->rl_elemsize == sizeof(int32_t) && idx < rl->rl_cnt); return *(int32_t *)rl->rl_elems[idx]; } - /**@}*/ - diff --git a/src/rdlist.h b/src/rdlist.h index b7bfa4276a..db5295f6cf 100644 --- a/src/rdlist.h +++ b/src/rdlist.h @@ -37,23 +37,26 @@ */ typedef struct rd_list_s { - int rl_size; - int rl_cnt; + int rl_size; + int rl_cnt; void **rl_elems; - void (*rl_free_cb) (void *); - int rl_flags; -#define RD_LIST_F_ALLOCATED 0x1 /* The rd_list_t is allocated, - * will be free on destroy() */ -#define RD_LIST_F_SORTED 0x2 /* Set by sort(), cleared by any mutations. - * When this flag is set bsearch() is used - * by find(), otherwise a linear search. */ -#define RD_LIST_F_FIXED_SIZE 0x4 /* Assert on grow, when prealloc()ed */ -#define RD_LIST_F_UNIQUE 0x8 /* Don't allow duplicates: - * ONLY ENFORCED BY CALLER. */ - int rl_elemsize; /**< Element size (when prealloc()ed) */ - void *rl_p; /**< Start of prealloced elements, - * the allocation itself starts at rl_elems - */ + void (*rl_free_cb)(void *); + int rl_flags; +#define RD_LIST_F_ALLOCATED \ + 0x1 /* The rd_list_t is allocated, \ + * will be free on destroy() */ +#define RD_LIST_F_SORTED \ + 0x2 /* Set by sort(), cleared by any mutations. \ + * When this flag is set bsearch() is used \ + * by find(), otherwise a linear search. */ +#define RD_LIST_F_FIXED_SIZE 0x4 /* Assert on grow, when prealloc()ed */ +#define RD_LIST_F_UNIQUE \ + 0x8 /* Don't allow duplicates: \ + * ONLY ENFORCED BY CALLER. */ + int rl_elemsize; /**< Element size (when prealloc()ed) */ + void *rl_p; /**< Start of prealloced elements, + * the allocation itself starts at rl_elems + */ } rd_list_t; @@ -65,14 +68,14 @@ typedef struct rd_list_s { * @returns \p rl */ rd_list_t * -rd_list_init (rd_list_t *rl, int initial_size, void (*free_cb) (void *)); +rd_list_init(rd_list_t *rl, int initial_size, void (*free_cb)(void *)); /** * @brief Same as rd_list_init() but uses initial_size and free_cb * from the provided \p src list. */ -rd_list_t *rd_list_init_copy (rd_list_t *rl, const rd_list_t *src); +rd_list_t *rd_list_init_copy(rd_list_t *rl, const rd_list_t *src); /** * @brief Allocate a new list pointer and initialize @@ -82,14 +85,14 @@ rd_list_t *rd_list_init_copy (rd_list_t *rl, const rd_list_t *src); * * Use rd_list_destroy() to free. */ -rd_list_t *rd_list_new (int initial_size, void (*free_cb) (void *)); +rd_list_t *rd_list_new(int initial_size, void (*free_cb)(void *)); /** * @brief Prepare list to for an additional \p size elements. * This is an optimization to avoid incremental grows. */ -void rd_list_grow (rd_list_t *rl, size_t size); +void rd_list_grow(rd_list_t *rl, size_t size); /** * @brief Preallocate elements to avoid having to pass an allocated pointer to @@ -102,15 +105,17 @@ void rd_list_grow (rd_list_t *rl, size_t size); * * @remark Preallocated element lists can't grow past \p size. */ -void rd_list_prealloc_elems (rd_list_t *rl, size_t elemsize, size_t size, - int memzero); +void rd_list_prealloc_elems(rd_list_t *rl, + size_t elemsize, + size_t size, + int memzero); /** * @brief Set the number of valid elements, this must only be used * with prealloc_elems() to make the preallocated elements directly * usable. */ -void rd_list_set_cnt (rd_list_t *rl, size_t cnt); +void rd_list_set_cnt(rd_list_t *rl, size_t cnt); /** @@ -120,7 +125,7 @@ void rd_list_set_cnt (rd_list_t *rl, size_t cnt); * * Typical use is rd_list_free_cb(rd_list_remove_cmp(....)); */ -void rd_list_free_cb (rd_list_t *rl, void *ptr); +void rd_list_free_cb(rd_list_t *rl, void *ptr); /** @@ -129,7 +134,7 @@ void rd_list_free_cb (rd_list_t *rl, void *ptr); * @returns \p elem. If \p elem is NULL the default element for that index * will be returned (for use with set_elems). */ -void *rd_list_add (rd_list_t *rl, void *elem); +void *rd_list_add(rd_list_t *rl, void *elem); /** @@ -139,7 +144,7 @@ void *rd_list_add (rd_list_t *rl, void *elem); * @remark The list will be grown, if needed, any gaps between the current * highest element and \p idx will be set to NULL. */ -void rd_list_set (rd_list_t *rl, int idx, void *ptr); +void rd_list_set(rd_list_t *rl, int idx, void *ptr); /** @@ -147,14 +152,15 @@ void rd_list_set (rd_list_t *rl, int idx, void *ptr); * This is a slow O(n) + memmove operation. * Returns the removed element. */ -void *rd_list_remove (rd_list_t *rl, void *match_elem); +void *rd_list_remove(rd_list_t *rl, void *match_elem); /** * Remove element from list using comparator. * See rd_list_remove() */ -void *rd_list_remove_cmp (rd_list_t *rl, void *match_elem, - int (*cmp) (void *_a, void *_b)); +void *rd_list_remove_cmp(rd_list_t *rl, + void *match_elem, + int (*cmp)(void *_a, void *_b)); /** @@ -162,14 +168,14 @@ void *rd_list_remove_cmp (rd_list_t *rl, void *match_elem, * * This is a O(1) + memmove operation */ -void rd_list_remove_elem (rd_list_t *rl, int idx); +void rd_list_remove_elem(rd_list_t *rl, int idx); /** * @brief Remove and return the last element in the list. * * @returns the last element, or NULL if list is empty. */ -void *rd_list_pop (rd_list_t *rl); +void *rd_list_pop(rd_list_t *rl); /** @@ -179,8 +185,9 @@ void *rd_list_pop (rd_list_t *rl); * * @sa rd_list_remove() */ -int rd_list_remove_multi_cmp (rd_list_t *rl, void *match_elem, - int (*cmp) (void *_a, void *_b)); +int rd_list_remove_multi_cmp(rd_list_t *rl, + void *match_elem, + int (*cmp)(void *_a, void *_b)); /** @@ -189,13 +196,13 @@ int rd_list_remove_multi_cmp (rd_list_t *rl, void *match_elem, * To sort a list ascendingly the comparator should implement (a - b) * and for descending order implement (b - a). */ -void rd_list_sort (rd_list_t *rl, int (*cmp) (const void *, const void *)); +void rd_list_sort(rd_list_t *rl, int (*cmp)(const void *, const void *)); /** * Empties the list and frees elements (if there is a free_cb). */ -void rd_list_clear (rd_list_t *rl); +void rd_list_clear(rd_list_t *rl); /** @@ -204,13 +211,13 @@ void rd_list_clear (rd_list_t *rl); * * If the list was previously allocated with rd_list_new() it will be freed. */ -void rd_list_destroy (rd_list_t *rl); +void rd_list_destroy(rd_list_t *rl); /** * @brief Wrapper for rd_list_destroy() that has same signature as free(3), * allowing it to be used as free_cb for nested lists. */ -void rd_list_destroy_free (void *rl); +void rd_list_destroy_free(void *rl); /** @@ -222,19 +229,19 @@ void rd_list_destroy_free (void *rl); * while ((obj = rd_list_elem(rl, i++))) * do_something(obj); */ -void *rd_list_elem (const rd_list_t *rl, int idx); +void *rd_list_elem(const rd_list_t *rl, int idx); -#define RD_LIST_FOREACH(elem,listp,idx) \ - for (idx = 0 ; (elem = rd_list_elem(listp, idx)) ; idx++) +#define RD_LIST_FOREACH(elem, listp, idx) \ + for (idx = 0; (elem = rd_list_elem(listp, idx)); idx++) -#define RD_LIST_FOREACH_REVERSE(elem,listp,idx) \ - for (idx = (listp)->rl_cnt-1 ; \ - idx >= 0 && (elem = rd_list_elem(listp, idx)) ; idx--) +#define RD_LIST_FOREACH_REVERSE(elem, listp, idx) \ + for (idx = (listp)->rl_cnt - 1; \ + idx >= 0 && (elem = rd_list_elem(listp, idx)); idx--) /** * Returns the number of elements in list. */ -static RD_INLINE RD_UNUSED int rd_list_cnt (const rd_list_t *rl) { +static RD_INLINE RD_UNUSED int rd_list_cnt(const rd_list_t *rl) { return rl->rl_cnt; } @@ -254,8 +261,9 @@ static RD_INLINE RD_UNUSED int rd_list_cnt (const rd_list_t *rl) { * @remark this is a O(n) scan. * @returns the first matching element or NULL. */ -int rd_list_index (const rd_list_t *rl, const void *match, - int (*cmp) (const void *, const void *)); +int rd_list_index(const rd_list_t *rl, + const void *match, + int (*cmp)(const void *, const void *)); /** * @brief Find element using comparator @@ -267,20 +275,21 @@ int rd_list_index (const rd_list_t *rl, const void *match, * * @returns the first matching element or NULL. */ -void *rd_list_find (const rd_list_t *rl, const void *match, - int (*cmp) (const void *, const void *)); +void *rd_list_find(const rd_list_t *rl, + const void *match, + int (*cmp)(const void *, const void *)); /** * @returns the first element of the list, or NULL if list is empty. */ -void *rd_list_first (const rd_list_t *rl); +void *rd_list_first(const rd_list_t *rl); /** * @returns the last element of the list, or NULL if list is empty. */ -void *rd_list_last (const rd_list_t *rl); +void *rd_list_last(const rd_list_t *rl); /** @@ -288,8 +297,8 @@ void *rd_list_last (const rd_list_t *rl); * * @warning The list MUST be sorted. */ -void *rd_list_find_duplicate (const rd_list_t *rl, - int (*cmp) (const void *, const void *)); +void *rd_list_find_duplicate(const rd_list_t *rl, + int (*cmp)(const void *, const void *)); /** @@ -299,36 +308,38 @@ void *rd_list_find_duplicate (const rd_list_t *rl, * > 0 if a was "greater" than b, * 0 if a and b are equal. */ -int rd_list_cmp (const rd_list_t *a, const rd_list_t *b, - int (*cmp) (const void *, const void *)); +int rd_list_cmp(const rd_list_t *a, + const rd_list_t *b, + int (*cmp)(const void *, const void *)); /** * @brief Simple element pointer comparator */ -int rd_list_cmp_ptr (const void *a, const void *b); +int rd_list_cmp_ptr(const void *a, const void *b); /** * @brief strcmp comparator where the list elements are strings. */ -int rd_list_cmp_str (const void *a, const void *b); +int rd_list_cmp_str(const void *a, const void *b); /** * @brief Apply \p cb to each element in list, if \p cb returns 0 * the element will be removed (but not freed). */ -void rd_list_apply (rd_list_t *rl, - int (*cb) (void *elem, void *opaque), void *opaque); +void rd_list_apply(rd_list_t *rl, + int (*cb)(void *elem, void *opaque), + void *opaque); -typedef void *(rd_list_copy_cb_t) (const void *elem, void *opaque); +typedef void *(rd_list_copy_cb_t)(const void *elem, void *opaque); /** * @brief Copy list \p src, returning a new list, * using optional \p copy_cb (per elem) */ -rd_list_t *rd_list_copy (const rd_list_t *src, - rd_list_copy_cb_t *copy_cb, void *opaque); +rd_list_t * +rd_list_copy(const rd_list_t *src, rd_list_copy_cb_t *copy_cb, void *opaque); /** @@ -337,22 +348,22 @@ rd_list_t *rd_list_copy (const rd_list_t *src, * @remark copy_cb() may return NULL in which case no element is added, * but the copy callback might have done so itself. */ -void rd_list_copy_to (rd_list_t *dst, const rd_list_t *src, - void *(*copy_cb) (const void *elem, void *opaque), - void *opaque); +void rd_list_copy_to(rd_list_t *dst, + const rd_list_t *src, + void *(*copy_cb)(const void *elem, void *opaque), + void *opaque); /** * @brief Copy callback to copy elements that are preallocated lists. */ -void *rd_list_copy_preallocated (const void *elem, void *opaque); +void *rd_list_copy_preallocated(const void *elem, void *opaque); /** * @brief String copier for rd_list_copy() */ -static RD_UNUSED -void *rd_list_string_copy (const void *elem, void *opaque) { +static RD_UNUSED void *rd_list_string_copy(const void *elem, void *opaque) { return rd_strdup((const char *)elem); } @@ -364,7 +375,7 @@ void *rd_list_string_copy (const void *elem, void *opaque) { * @remark \p dst will be initialized first. * @remark \p src will be emptied. */ -void rd_list_move (rd_list_t *dst, rd_list_t *src); +void rd_list_move(rd_list_t *dst, rd_list_t *src); /** @@ -380,13 +391,13 @@ void rd_list_move (rd_list_t *dst, rd_list_t *src); * @remark The allocation flag of the original \p rl is retained, * do not pass an uninitialized \p rl to this function. */ -rd_list_t *rd_list_init_int32 (rd_list_t *rl, int max_size); +rd_list_t *rd_list_init_int32(rd_list_t *rl, int max_size); /** * Debugging: Print list to stdout. */ -void rd_list_dump (const char *what, const rd_list_t *rl); +void rd_list_dump(const char *what, const rd_list_t *rl); @@ -396,14 +407,14 @@ void rd_list_dump (const char *what, const rd_list_t *rl); * @remark Must only be used with preallocated int32_t lists. * @remark Allows values to be overwritten. */ -void rd_list_set_int32 (rd_list_t *rl, int idx, int32_t val); +void rd_list_set_int32(rd_list_t *rl, int idx, int32_t val); /** * @returns the int32_t element value at index \p idx * * @remark Must only be used with preallocated int32_t lists. */ -int32_t rd_list_get_int32 (const rd_list_t *rl, int idx); +int32_t rd_list_get_int32(const rd_list_t *rl, int idx); /**@}*/ diff --git a/src/rdlog.c b/src/rdlog.c index 3f0d29ab68..19fbbb1614 100644 --- a/src/rdlog.c +++ b/src/rdlog.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,55 +35,55 @@ +void rd_hexdump(FILE *fp, const char *name, const void *ptr, size_t len) { + const char *p = (const char *)ptr; + size_t of = 0; -void rd_hexdump (FILE *fp, const char *name, const void *ptr, size_t len) { - const char *p = (const char *)ptr; - size_t of = 0; + if (name) + fprintf(fp, "%s hexdump (%" PRIusz " bytes):\n", name, len); - if (name) - fprintf(fp, "%s hexdump (%"PRIusz" bytes):\n", name, len); + for (of = 0; of < len; of += 16) { + char hexen[16 * 3 + 1]; + char charen[16 + 1]; + int hof = 0; - for (of = 0 ; of < len ; of += 16) { - char hexen[16*3+1]; - char charen[16+1]; - int hof = 0; + int cof = 0; + unsigned int i; - int cof = 0; - unsigned int i; - - for (i = (unsigned int)of ; i < (unsigned int)of + 16 && i < len ; i++) { - hof += rd_snprintf(hexen+hof, sizeof(hexen)-hof, - "%02x ", - p[i] & 0xff); - cof += rd_snprintf(charen+cof, sizeof(charen)-cof, "%c", - isprint((int)p[i]) ? p[i] : '.'); - } - fprintf(fp, "%08zx: %-48s %-16s\n", - of, hexen, charen); - } + for (i = (unsigned int)of; i < (unsigned int)of + 16 && i < len; + i++) { + hof += rd_snprintf(hexen + hof, sizeof(hexen) - hof, + "%02x ", p[i] & 0xff); + cof += + rd_snprintf(charen + cof, sizeof(charen) - cof, + "%c", isprint((int)p[i]) ? p[i] : '.'); + } + fprintf(fp, "%08zx: %-48s %-16s\n", of, hexen, charen); + } } -void rd_iov_print (const char *what, int iov_idx, const struct iovec *iov, - int hexdump) { - printf("%s: iov #%i: %"PRIusz"\n", what, iov_idx, +void rd_iov_print(const char *what, + int iov_idx, + const struct iovec *iov, + int hexdump) { + printf("%s: iov #%i: %" PRIusz "\n", what, iov_idx, (size_t)iov->iov_len); if (hexdump) rd_hexdump(stdout, what, iov->iov_base, iov->iov_len); } -void rd_msghdr_print (const char *what, const struct msghdr *msg, - int hexdump) { +void rd_msghdr_print(const char *what, const struct msghdr *msg, int hexdump) { int i; size_t len = 0; - printf("%s: iovlen %"PRIusz"\n", what, (size_t)msg->msg_iovlen); + printf("%s: iovlen %" PRIusz "\n", what, (size_t)msg->msg_iovlen); - for (i = 0 ; i < (int)msg->msg_iovlen ; i++) { + for (i = 0; i < (int)msg->msg_iovlen; i++) { rd_iov_print(what, i, &msg->msg_iov[i], hexdump); len += msg->msg_iov[i].iov_len; } - printf("%s: ^ message was %"PRIusz" bytes in total\n", what, len); + printf("%s: ^ message was %" PRIusz " bytes in total\n", what, len); } diff --git a/src/rdlog.h b/src/rdlog.h index 3c07d7d460..f360a0b66e 100644 --- a/src/rdlog.h +++ b/src/rdlog.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -29,12 +29,13 @@ #ifndef _RDLOG_H_ #define _RDLOG_H_ -void rd_hexdump (FILE *fp, const char *name, const void *ptr, size_t len); +void rd_hexdump(FILE *fp, const char *name, const void *ptr, size_t len); -void rd_iov_print (const char *what, int iov_idx, const struct iovec *iov, - int hexdump); +void rd_iov_print(const char *what, + int iov_idx, + const struct iovec *iov, + int hexdump); struct msghdr; -void rd_msghdr_print (const char *what, const struct msghdr *msg, - int hexdump); +void rd_msghdr_print(const char *what, const struct msghdr *msg, int hexdump); #endif /* _RDLOG_H_ */ diff --git a/src/rdmap.c b/src/rdmap.c index d92706ed31..4b85470336 100644 --- a/src/rdmap.c +++ b/src/rdmap.c @@ -32,17 +32,16 @@ #include "rdmap.h" -static RD_INLINE -int rd_map_elem_cmp (const rd_map_elem_t *a, - const rd_map_elem_t *b, - const rd_map_t *rmap) { +static RD_INLINE int rd_map_elem_cmp(const rd_map_elem_t *a, + const rd_map_elem_t *b, + const rd_map_t *rmap) { int r = a->hash - b->hash; if (r != 0) return r; return rmap->rmap_cmp(a->key, b->key); } -static void rd_map_elem_destroy (rd_map_t *rmap, rd_map_elem_t *elem) { +static void rd_map_elem_destroy(rd_map_t *rmap, rd_map_elem_t *elem) { rd_assert(rmap->rmap_cnt > 0); rmap->rmap_cnt--; if (rmap->rmap_destroy_key) @@ -54,8 +53,8 @@ static void rd_map_elem_destroy (rd_map_t *rmap, rd_map_elem_t *elem) { rd_free(elem); } -static rd_map_elem_t *rd_map_find (const rd_map_t *rmap, int *bktp, - const rd_map_elem_t *skel) { +static rd_map_elem_t * +rd_map_find(const rd_map_t *rmap, int *bktp, const rd_map_elem_t *skel) { int bkt = skel->hash % rmap->rmap_buckets.cnt; rd_map_elem_t *elem; @@ -74,13 +73,13 @@ static rd_map_elem_t *rd_map_find (const rd_map_t *rmap, int *bktp, /** * @brief Create and return new element based on \p skel without value set. */ -static rd_map_elem_t *rd_map_insert (rd_map_t *rmap, int bkt, - const rd_map_elem_t *skel) { +static rd_map_elem_t * +rd_map_insert(rd_map_t *rmap, int bkt, const rd_map_elem_t *skel) { rd_map_elem_t *elem; - elem = rd_calloc(1, sizeof(*elem)); + elem = rd_calloc(1, sizeof(*elem)); elem->hash = skel->hash; - elem->key = skel->key; /* takes ownership of key */ + elem->key = skel->key; /* takes ownership of key */ LIST_INSERT_HEAD(&rmap->rmap_buckets.p[bkt], elem, hlink); LIST_INSERT_HEAD(&rmap->rmap_iter, elem, link); rmap->rmap_cnt++; @@ -89,9 +88,8 @@ static rd_map_elem_t *rd_map_insert (rd_map_t *rmap, int bkt, } -rd_map_elem_t *rd_map_set (rd_map_t *rmap, void *key, void *value) { - rd_map_elem_t skel = { .key = key, - .hash = rmap->rmap_hash(key) }; +rd_map_elem_t *rd_map_set(rd_map_t *rmap, void *key, void *value) { + rd_map_elem_t skel = {.key = key, .hash = rmap->rmap_hash(key)}; rd_map_elem_t *elem; int bkt; @@ -110,9 +108,9 @@ rd_map_elem_t *rd_map_set (rd_map_t *rmap, void *key, void *value) { } -void *rd_map_get (const rd_map_t *rmap, const void *key) { - const rd_map_elem_t skel = { .key = (void *)key, - .hash = rmap->rmap_hash(key) }; +void *rd_map_get(const rd_map_t *rmap, const void *key) { + const rd_map_elem_t skel = {.key = (void *)key, + .hash = rmap->rmap_hash(key)}; rd_map_elem_t *elem; if (!(elem = rd_map_find(rmap, NULL, &skel))) @@ -122,9 +120,9 @@ void *rd_map_get (const rd_map_t *rmap, const void *key) { } -void rd_map_delete (rd_map_t *rmap, const void *key) { - const rd_map_elem_t skel = { .key = (void *)key, - .hash = rmap->rmap_hash(key) }; +void rd_map_delete(rd_map_t *rmap, const void *key) { + const rd_map_elem_t skel = {.key = (void *)key, + .hash = rmap->rmap_hash(key)}; rd_map_elem_t *elem; int bkt; @@ -135,30 +133,29 @@ void rd_map_delete (rd_map_t *rmap, const void *key) { } -void rd_map_copy (rd_map_t *dst, const rd_map_t *src, - rd_map_copy_t *key_copy, - rd_map_copy_t *value_copy) { +void rd_map_copy(rd_map_t *dst, + const rd_map_t *src, + rd_map_copy_t *key_copy, + rd_map_copy_t *value_copy) { const rd_map_elem_t *elem; RD_MAP_FOREACH_ELEM(elem, src) { - rd_map_set(dst, - key_copy ? - key_copy(elem->key) : (void *)elem->key, - value_copy ? - value_copy(elem->value) : (void *)elem->value); + rd_map_set( + dst, key_copy ? key_copy(elem->key) : (void *)elem->key, + value_copy ? value_copy(elem->value) : (void *)elem->value); } } -void rd_map_iter_begin (const rd_map_t *rmap, const rd_map_elem_t **elem) { +void rd_map_iter_begin(const rd_map_t *rmap, const rd_map_elem_t **elem) { *elem = LIST_FIRST(&rmap->rmap_iter); } -size_t rd_map_cnt (const rd_map_t *rmap) { +size_t rd_map_cnt(const rd_map_t *rmap) { return (size_t)rmap->rmap_cnt; } -rd_bool_t rd_map_is_empty (const rd_map_t *rmap) { +rd_bool_t rd_map_is_empty(const rd_map_t *rmap) { return rmap->rmap_cnt == 0; } @@ -167,27 +164,12 @@ rd_bool_t rd_map_is_empty (const rd_map_t *rmap) { * @brief Calculates the number of desired buckets and returns * a struct with pre-allocated buckets. */ -struct rd_map_buckets rd_map_alloc_buckets (size_t expected_cnt) { - static const int max_depth = 15; +struct rd_map_buckets rd_map_alloc_buckets(size_t expected_cnt) { + static const int max_depth = 15; static const int bucket_sizes[] = { - 5, - 11, - 23, - 47, - 97, - 199, /* default */ - 409, - 823, - 1741, - 3469, - 6949, - 14033, - 28411, - 57557, - 116731, - 236897, - -1 - }; + 5, 11, 23, 47, 97, 199, /* default */ + 409, 823, 1741, 3469, 6949, 14033, + 28411, 57557, 116731, 236897, -1}; struct rd_map_buckets buckets = RD_ZERO_INIT; int i; @@ -200,8 +182,8 @@ struct rd_map_buckets rd_map_alloc_buckets (size_t expected_cnt) { * When a real need arise we'll change this to a dynamically * growing hash map instead, but this will do for now. */ buckets.cnt = bucket_sizes[0]; - for (i = 1 ; bucket_sizes[i] != -1 && - (int)expected_cnt / max_depth > bucket_sizes[i]; + for (i = 1; bucket_sizes[i] != -1 && + (int)expected_cnt / max_depth > bucket_sizes[i]; i++) buckets.cnt = bucket_sizes[i]; } @@ -214,41 +196,42 @@ struct rd_map_buckets rd_map_alloc_buckets (size_t expected_cnt) { } -void rd_map_init (rd_map_t *rmap, size_t expected_cnt, - int (*cmp) (const void *a, const void *b), - unsigned int (*hash) (const void *key), - void (*destroy_key) (void *key), - void (*destroy_value) (void *value)) { +void rd_map_init(rd_map_t *rmap, + size_t expected_cnt, + int (*cmp)(const void *a, const void *b), + unsigned int (*hash)(const void *key), + void (*destroy_key)(void *key), + void (*destroy_value)(void *value)) { memset(rmap, 0, sizeof(*rmap)); - rmap->rmap_buckets = rd_map_alloc_buckets(expected_cnt); - rmap->rmap_cmp = cmp; - rmap->rmap_hash = hash; - rmap->rmap_destroy_key = destroy_key; + rmap->rmap_buckets = rd_map_alloc_buckets(expected_cnt); + rmap->rmap_cmp = cmp; + rmap->rmap_hash = hash; + rmap->rmap_destroy_key = destroy_key; rmap->rmap_destroy_value = destroy_value; } -void rd_map_clear (rd_map_t *rmap) { +void rd_map_clear(rd_map_t *rmap) { rd_map_elem_t *elem; while ((elem = LIST_FIRST(&rmap->rmap_iter))) rd_map_elem_destroy(rmap, elem); } -void rd_map_destroy (rd_map_t *rmap) { +void rd_map_destroy(rd_map_t *rmap) { rd_map_clear(rmap); rd_free(rmap->rmap_buckets.p); } -int rd_map_str_cmp (const void *a, const void *b) { +int rd_map_str_cmp(const void *a, const void *b) { return strcmp((const char *)a, (const char *)b); } /** * @brief A djb2 string hasher. */ -unsigned int rd_map_str_hash (const void *key) { +unsigned int rd_map_str_hash(const void *key) { const char *str = key; return rd_string_hash(str, -1); } @@ -271,17 +254,17 @@ unsigned int rd_map_str_hash (const void *key) { /* Complex key type */ struct mykey { int k; - int something_else; /* Ignored by comparator and hasher below */ + int something_else; /* Ignored by comparator and hasher below */ }; /* Key comparator */ -static int mykey_cmp (const void *_a, const void *_b) { +static int mykey_cmp(const void *_a, const void *_b) { const struct mykey *a = _a, *b = _b; return a->k - b->k; } /* Key hasher */ -static unsigned int mykey_hash (const void *_key) { +static unsigned int mykey_hash(const void *_key) { const struct mykey *key = _key; return (unsigned int)key->k; } @@ -293,23 +276,22 @@ struct person { }; /* Define typed hash map type */ -typedef RD_MAP_TYPE(const struct mykey *, const struct person *) - ut_my_typed_map_t; +typedef RD_MAP_TYPE(const struct mykey *, + const struct person *) ut_my_typed_map_t; /** * @brief Test typed hash map with pre-defined type. */ -static int unittest_typed_map (void) { - ut_my_typed_map_t rmap = RD_MAP_INITIALIZER(0, - mykey_cmp, mykey_hash, - NULL, NULL); - ut_my_typed_map_t dup = RD_MAP_INITIALIZER(0, mykey_cmp, mykey_hash, - NULL, NULL); - struct mykey k1 = { 1 }; - struct mykey k2 = { 2 }; - struct person v1 = { "Roy", "McPhearsome" }; - struct person v2 = { "Hedvig", "Lindahl" }; +static int unittest_typed_map(void) { + ut_my_typed_map_t rmap = + RD_MAP_INITIALIZER(0, mykey_cmp, mykey_hash, NULL, NULL); + ut_my_typed_map_t dup = + RD_MAP_INITIALIZER(0, mykey_cmp, mykey_hash, NULL, NULL); + struct mykey k1 = {1}; + struct mykey k2 = {2}; + struct person v1 = {"Roy", "McPhearsome"}; + struct person v2 = {"Hedvig", "Lindahl"}; const struct mykey *key; const struct person *value; @@ -320,8 +302,8 @@ static int unittest_typed_map (void) { RD_UT_ASSERT(value == &v2, "mismatch"); RD_MAP_FOREACH(key, value, &rmap) { - RD_UT_SAY("enumerated key %d person %s %s", - key->k, value->name, value->surname); + RD_UT_SAY("enumerated key %d person %s %s", key->k, value->name, + value->surname); } RD_MAP_COPY(&dup, &rmap, NULL, NULL); @@ -342,14 +324,14 @@ static int unittest_typed_map (void) { } -static int person_cmp (const void *_a, const void *_b) { +static int person_cmp(const void *_a, const void *_b) { const struct person *a = _a, *b = _b; int r; if ((r = strcmp(a->name, b->name))) return r; return strcmp(a->surname, b->surname); } -static unsigned int person_hash (const void *_key) { +static unsigned int person_hash(const void *_key) { const struct person *key = _key; return 31 * rd_map_str_hash(key->name) + rd_map_str_hash(key->surname); } @@ -357,15 +339,15 @@ static unsigned int person_hash (const void *_key) { /** * @brief Test typed hash map with locally defined type. */ -static int unittest_typed_map2 (void) { - RD_MAP_LOCAL_INITIALIZER(usermap, 3, - const char *, const struct person *, - rd_map_str_cmp, rd_map_str_hash, NULL, NULL); - RD_MAP_LOCAL_INITIALIZER(personmap, 3, - const struct person *, const char *, - person_cmp, person_hash, NULL, NULL); - struct person p1 = { "Magnus", "Lundstrom" }; - struct person p2 = { "Peppy", "Popperpappies" }; +static int unittest_typed_map2(void) { + RD_MAP_LOCAL_INITIALIZER(usermap, 3, const char *, + const struct person *, rd_map_str_cmp, + rd_map_str_hash, NULL, NULL); + RD_MAP_LOCAL_INITIALIZER(personmap, 3, const struct person *, + const char *, person_cmp, person_hash, NULL, + NULL); + struct person p1 = {"Magnus", "Lundstrom"}; + struct person p2 = {"Peppy", "Popperpappies"}; const char *user; const struct person *person; @@ -386,8 +368,9 @@ static int unittest_typed_map2 (void) { RD_MAP_FOREACH(person, user, &personmap) { /* Just reference the memory to catch memory errors.*/ RD_UT_ASSERT(strlen(person->name) > 0 && - strlen(person->surname) > 0 && - strlen(user) > 0, "bug"); + strlen(person->surname) > 0 && + strlen(user) > 0, + "bug"); } RD_MAP_DESTROY(&usermap); @@ -402,28 +385,25 @@ static int unittest_typed_map2 (void) { * * This is a more thorough test of the underlying hash map implementation. */ -static int unittest_untyped_map (void) { +static int unittest_untyped_map(void) { rd_map_t rmap; int pass, i, r; - int cnt = 100000; + int cnt = 100000; int exp_cnt = 0, get_cnt = 0, iter_cnt = 0; const rd_map_elem_t *elem; - rd_ts_t ts = rd_clock(); + rd_ts_t ts = rd_clock(); rd_ts_t ts_get = 0; - rd_map_init(&rmap, cnt, - rd_map_str_cmp, - rd_map_str_hash, - rd_free, + rd_map_init(&rmap, cnt, rd_map_str_cmp, rd_map_str_hash, rd_free, rd_free); /* pass 0 is set,delete,overwrite,get * pass 1-5 is get */ - for (pass = 0 ; pass < 6 ; pass++) { + for (pass = 0; pass < 6; pass++) { if (pass == 1) ts_get = rd_clock(); - for (i = 1 ; i < cnt ; i++) { + for (i = 1; i < cnt; i++) { char key[10]; char val[64]; const char *val2; @@ -442,8 +422,8 @@ static int unittest_untyped_map (void) { } if (overwrite) { - rd_snprintf(val, sizeof(val), - "OVERWRITE=%d!", i); + rd_snprintf(val, sizeof(val), "OVERWRITE=%d!", + i); if (pass == 0) rd_map_set(&rmap, rd_strdup(key), rd_strdup(val)); @@ -452,7 +432,8 @@ static int unittest_untyped_map (void) { val2 = rd_map_get(&rmap, key); if (do_delete) - RD_UT_ASSERT(!val2, "map_get pass %d " + RD_UT_ASSERT(!val2, + "map_get pass %d " "returned value %s " "for deleted key %s", pass, val2, key); @@ -461,8 +442,8 @@ static int unittest_untyped_map (void) { "map_get pass %d: " "expected value %s, not %s, " "for key %s", - pass, val, - val2 ? val2 : "NULL", key); + pass, val, val2 ? val2 : "NULL", + key); if (pass == 0 && !do_delete) exp_cnt++; @@ -473,17 +454,16 @@ static int unittest_untyped_map (void) { } ts_get = rd_clock() - ts_get; - RD_UT_SAY("%d map_get iterations took %.3fms = %"PRId64"us/get", - get_cnt, (float)ts_get / 1000.0, - ts_get / get_cnt); + RD_UT_SAY("%d map_get iterations took %.3fms = %" PRId64 "us/get", + get_cnt, (float)ts_get / 1000.0, ts_get / get_cnt); RD_MAP_FOREACH_ELEM(elem, &rmap) { iter_cnt++; } r = (int)rd_map_cnt(&rmap); - RD_UT_ASSERT(r == exp_cnt, - "expected %d map entries, not %d", exp_cnt, r); + RD_UT_ASSERT(r == exp_cnt, "expected %d map entries, not %d", exp_cnt, + r); RD_UT_ASSERT(r == iter_cnt, "map_cnt() = %d, iteration gave %d elements", r, iter_cnt); @@ -491,14 +471,14 @@ static int unittest_untyped_map (void) { rd_map_destroy(&rmap); ts = rd_clock() - ts; - RD_UT_SAY("Total time over %d entries took %.3fms", - cnt, (float)ts / 1000.0); + RD_UT_SAY("Total time over %d entries took %.3fms", cnt, + (float)ts / 1000.0); RD_UT_PASS(); } -int unittest_map (void) { +int unittest_map(void) { int fails = 0; fails += unittest_untyped_map(); fails += unittest_typed_map(); diff --git a/src/rdmap.h b/src/rdmap.h index 458cd1b145..a79dcda06a 100644 --- a/src/rdmap.h +++ b/src/rdmap.h @@ -63,8 +63,8 @@ typedef struct rd_map_elem_s { * @struct Hash buckets (internal use). */ struct rd_map_buckets { - LIST_HEAD(, rd_map_elem_s) *p; /**< Hash buckets array */ - int cnt; /**< Bucket count */ + LIST_HEAD(, rd_map_elem_s) * p; /**< Hash buckets array */ + int cnt; /**< Bucket count */ }; @@ -72,23 +72,23 @@ struct rd_map_buckets { * @struct Hash map. */ typedef struct rd_map_s { - struct rd_map_buckets rmap_buckets; /**< Hash buckets */ - int rmap_cnt; /**< Element count */ + struct rd_map_buckets rmap_buckets; /**< Hash buckets */ + int rmap_cnt; /**< Element count */ - LIST_HEAD(, rd_map_elem_s) rmap_iter; /**< Element list for iterating - * over all elements. */ + LIST_HEAD(, rd_map_elem_s) + rmap_iter; /**< Element list for iterating + * over all elements. */ - int (*rmap_cmp) (const void *a, const void *b); /**< Key comparator */ - unsigned int (*rmap_hash) (const void *key); /**< Key hash function */ - void (*rmap_destroy_key) (void *key); /**< Optional key free */ - void (*rmap_destroy_value) (void *value); /**< Optional value free */ + int (*rmap_cmp)(const void *a, const void *b); /**< Key comparator */ + unsigned int (*rmap_hash)(const void *key); /**< Key hash function */ + void (*rmap_destroy_key)(void *key); /**< Optional key free */ + void (*rmap_destroy_value)(void *value); /**< Optional value free */ void *rmap_opaque; } rd_map_t; - /** * @brief Set/overwrite value in map. * @@ -102,7 +102,7 @@ typedef struct rd_map_s { * * @returns the map element. */ -rd_map_elem_t *rd_map_set (rd_map_t *rmap, void *key, void *value); +rd_map_elem_t *rd_map_set(rd_map_t *rmap, void *key, void *value); /** @@ -111,7 +111,7 @@ rd_map_elem_t *rd_map_set (rd_map_t *rmap, void *key, void *value); * * The returned memory is still owned by the map. */ -void *rd_map_get (const rd_map_t *rmap, const void *key); +void *rd_map_get(const rd_map_t *rmap, const void *key); /** @@ -120,11 +120,11 @@ void *rd_map_get (const rd_map_t *rmap, const void *key); * The destroy_key and destroy_value functions (if set) will be used * to free the key and value memory. */ -void rd_map_delete (rd_map_t *rmap, const void *key); +void rd_map_delete(rd_map_t *rmap, const void *key); /** Key or Value Copy function signature. */ -typedef void *(rd_map_copy_t) (const void *key_or_value); +typedef void *(rd_map_copy_t)(const void *key_or_value); /** @@ -138,20 +138,21 @@ typedef void *(rd_map_copy_t) (const void *key_or_value); * @param value_copy Value copy callback. If NULL the \p dst value will just * reference the \p src value. */ -void rd_map_copy (rd_map_t *dst, const rd_map_t *src, - rd_map_copy_t *key_copy, - rd_map_copy_t *value_copy); +void rd_map_copy(rd_map_t *dst, + const rd_map_t *src, + rd_map_copy_t *key_copy, + rd_map_copy_t *value_copy); /** * @returns the current number of elements in the map. */ -size_t rd_map_cnt (const rd_map_t *rmap); +size_t rd_map_cnt(const rd_map_t *rmap); /** * @returns true if map is empty, else false. */ -rd_bool_t rd_map_is_empty (const rd_map_t *rmap); +rd_bool_t rd_map_is_empty(const rd_map_t *rmap); /** @@ -161,30 +162,27 @@ rd_bool_t rd_map_is_empty (const rd_map_t *rmap); * * @remark This is part of the untyped generic API. */ -#define RD_MAP_FOREACH_ELEM(ELEM,RMAP) \ - for (rd_map_iter_begin((RMAP), &(ELEM)) ; \ - rd_map_iter(&(ELEM)) ; \ +#define RD_MAP_FOREACH_ELEM(ELEM, RMAP) \ + for (rd_map_iter_begin((RMAP), &(ELEM)); rd_map_iter(&(ELEM)); \ rd_map_iter_next(&(ELEM))) /** * @brief Begin iterating \p rmap, first element is set in \p *elem. */ -void rd_map_iter_begin (const rd_map_t *rmap, const rd_map_elem_t **elem); +void rd_map_iter_begin(const rd_map_t *rmap, const rd_map_elem_t **elem); /** * @returns 1 if \p *elem is a valid iteration element, else 0. */ -static RD_INLINE RD_UNUSED -int rd_map_iter (const rd_map_elem_t **elem) { +static RD_INLINE RD_UNUSED int rd_map_iter(const rd_map_elem_t **elem) { return *elem != NULL; } /** * @brief Advances the iteration to the next element. */ -static RD_INLINE RD_UNUSED -void rd_map_iter_next (const rd_map_elem_t **elem) { +static RD_INLINE RD_UNUSED void rd_map_iter_next(const rd_map_elem_t **elem) { *elem = LIST_NEXT(*elem, link); } @@ -208,23 +206,24 @@ void rd_map_iter_next (const rd_map_elem_t **elem) { * * @remarks The map is not thread-safe. */ -void rd_map_init (rd_map_t *rmap, size_t expected_cnt, - int (*cmp) (const void *a, const void *b), - unsigned int (*hash) (const void *key), - void (*destroy_key) (void *key), - void (*destroy_value) (void *value)); +void rd_map_init(rd_map_t *rmap, + size_t expected_cnt, + int (*cmp)(const void *a, const void *b), + unsigned int (*hash)(const void *key), + void (*destroy_key)(void *key), + void (*destroy_value)(void *value)); /** * @brief Internal use */ -struct rd_map_buckets rd_map_alloc_buckets (size_t expected_cnt); +struct rd_map_buckets rd_map_alloc_buckets(size_t expected_cnt); /** * @brief Empty the map and free all elements. */ -void rd_map_clear (rd_map_t *rmap); +void rd_map_clear(rd_map_t *rmap); /** @@ -236,21 +235,19 @@ void rd_map_clear (rd_map_t *rmap); * * @sa rd_map_clear() */ -void rd_map_destroy (rd_map_t *rmap); +void rd_map_destroy(rd_map_t *rmap); /** * @brief String comparator for (const char *) keys. */ -int rd_map_str_cmp (const void *a, const void *b); +int rd_map_str_cmp(const void *a, const void *b); /** * @brief String hash function (djb2) for (const char *) keys. */ -unsigned int rd_map_str_hash (const void *a); - - +unsigned int rd_map_str_hash(const void *a); @@ -264,12 +261,12 @@ unsigned int rd_map_str_hash (const void *a); * @brief Define a typed map type which can later be used with * RD_MAP_INITIALIZER() and typed RD_MAP_*() API. */ -#define RD_MAP_TYPE(KEY_TYPE,VALUE_TYPE) \ - struct { \ - rd_map_t rmap; \ - KEY_TYPE key; \ - VALUE_TYPE value; \ - const rd_map_elem_t *elem; \ +#define RD_MAP_TYPE(KEY_TYPE, VALUE_TYPE) \ + struct { \ + rd_map_t rmap; \ + KEY_TYPE key; \ + VALUE_TYPE value; \ + const rd_map_elem_t *elem; \ } /** @@ -292,15 +289,16 @@ unsigned int rd_map_str_hash (const void *a); * * @sa rd_map_init() */ -#define RD_MAP_INITIALIZER(EXPECTED_CNT,CMP,HASH,DESTROY_KEY,DESTROY_VALUE) \ - { \ - .rmap = { \ - .rmap_buckets = rd_map_alloc_buckets(EXPECTED_CNT), \ - .rmap_cmp = CMP, \ - .rmap_hash = HASH, \ - .rmap_destroy_key = DESTROY_KEY, \ - .rmap_destroy_value = DESTROY_VALUE \ - } \ +#define RD_MAP_INITIALIZER(EXPECTED_CNT, CMP, HASH, DESTROY_KEY, \ + DESTROY_VALUE) \ + { \ + .rmap = { \ + .rmap_buckets = rd_map_alloc_buckets(EXPECTED_CNT), \ + .rmap_cmp = CMP, \ + .rmap_hash = HASH, \ + .rmap_destroy_key = DESTROY_KEY, \ + .rmap_destroy_value = DESTROY_VALUE \ + } \ } @@ -315,16 +313,15 @@ unsigned int rd_map_str_hash (const void *a); * * @sa RD_MAP_INITIALIZER() */ -#define RD_MAP_LOCAL_INITIALIZER(RMAP, EXPECTED_CNT, \ - KEY_TYPE, VALUE_TYPE, \ - CMP, HASH, DESTROY_KEY, DESTROY_VALUE) \ - struct { \ - rd_map_t rmap; \ - KEY_TYPE key; \ - VALUE_TYPE value; \ - const rd_map_elem_t *elem; \ - } RMAP = RD_MAP_INITIALIZER(EXPECTED_CNT,CMP,HASH, \ - DESTROY_KEY,DESTROY_VALUE) +#define RD_MAP_LOCAL_INITIALIZER(RMAP, EXPECTED_CNT, KEY_TYPE, VALUE_TYPE, \ + CMP, HASH, DESTROY_KEY, DESTROY_VALUE) \ + struct { \ + rd_map_t rmap; \ + KEY_TYPE key; \ + VALUE_TYPE value; \ + const rd_map_elem_t *elem; \ + } RMAP = RD_MAP_INITIALIZER(EXPECTED_CNT, CMP, HASH, DESTROY_KEY, \ + DESTROY_VALUE) /** @@ -332,9 +329,9 @@ unsigned int rd_map_str_hash (const void *a); * * @sa rd_map_init() */ -#define RD_MAP_INIT(RMAP,EXPECTED_CNT,CMP,HASH,DESTROY_KEY,DESTROY_VALUE) \ - rd_map_init(&(RMAP)->rmap, EXPECTED_CNT, CMP, HASH, \ - DESTROY_KEY, DESTROY_VALUE) +#define RD_MAP_INIT(RMAP, EXPECTED_CNT, CMP, HASH, DESTROY_KEY, DESTROY_VALUE) \ + rd_map_init(&(RMAP)->rmap, EXPECTED_CNT, CMP, HASH, DESTROY_KEY, \ + DESTROY_VALUE) /** @@ -347,21 +344,19 @@ unsigned int rd_map_str_hash (const void *a); * * @sa rd_map_set() */ -#define RD_MAP_SET(RMAP,KEY,VALUE) \ - ((RMAP)->key = KEY, \ - (RMAP)->value = VALUE, \ - rd_map_set(&(RMAP)->rmap, \ - (void *)(RMAP)->key, \ - (void *)(RMAP)->value)) \ +#define RD_MAP_SET(RMAP, KEY, VALUE) \ + ((RMAP)->key = KEY, (RMAP)->value = VALUE, \ + rd_map_set(&(RMAP)->rmap, (void *)(RMAP)->key, \ + (void *)(RMAP)->value)) /** * @brief Typed hash map: Get value for key. * * @sa rd_map_get() */ -#define RD_MAP_GET(RMAP,KEY) \ - ((RMAP)->key = (KEY), \ - (RMAP)->value = rd_map_get(&(RMAP)->rmap, (RMAP)->key), \ +#define RD_MAP_GET(RMAP, KEY) \ + ((RMAP)->key = (KEY), \ + (RMAP)->value = rd_map_get(&(RMAP)->rmap, (RMAP)->key), \ (RMAP)->value) @@ -370,11 +365,10 @@ unsigned int rd_map_str_hash (const void *a); * @brief Get value for key. If key does not exist in map a new * entry is added using the DEFAULT_CODE. */ -#define RD_MAP_GET_OR_SET(RMAP,KEY,DEFAULT_CODE) \ - (RD_MAP_GET(RMAP,KEY) ? \ - (RMAP)->value : \ - (RD_MAP_SET(RMAP, (RMAP)->key, DEFAULT_CODE), \ - (RMAP)->value)) +#define RD_MAP_GET_OR_SET(RMAP, KEY, DEFAULT_CODE) \ + (RD_MAP_GET(RMAP, KEY) \ + ? (RMAP)->value \ + : (RD_MAP_SET(RMAP, (RMAP)->key, DEFAULT_CODE), (RMAP)->value)) /** @@ -385,9 +379,8 @@ unsigned int rd_map_str_hash (const void *a); * * @sa rd_map_delete() */ -#define RD_MAP_DELETE(RMAP,KEY) \ - ((RMAP)->key = (KEY), \ - rd_map_delete(&(RMAP)->rmap, (RMAP)->key)) \ +#define RD_MAP_DELETE(RMAP, KEY) \ + ((RMAP)->key = (KEY), rd_map_delete(&(RMAP)->rmap, (RMAP)->key)) /** @@ -401,10 +394,11 @@ unsigned int rd_map_str_hash (const void *a); * @param VALUE_COPY Value copy callback. If NULL the \p DST value will just * reference the \p SRC value. */ -#define RD_MAP_COPY(DST,SRC,KEY_COPY,VALUE_COPY) do { \ - if ((DST) != (SRC))/*implicit type-check*/ \ - rd_map_copy(&(DST)->rmap, &(SRC)->rmap, \ - KEY_COPY, VALUE_COPY); \ +#define RD_MAP_COPY(DST, SRC, KEY_COPY, VALUE_COPY) \ + do { \ + if ((DST) != (SRC)) /*implicit type-check*/ \ + rd_map_copy(&(DST)->rmap, &(SRC)->rmap, KEY_COPY, \ + VALUE_COPY); \ } while (0) @@ -413,7 +407,7 @@ unsigned int rd_map_str_hash (const void *a); * * @sa rd_map_clear() */ -#define RD_MAP_CLEAR(RMAP) rd_map_clear(&(RMAP)->rmap) +#define RD_MAP_CLEAR(RMAP) rd_map_clear(&(RMAP)->rmap) /** @@ -421,7 +415,7 @@ unsigned int rd_map_str_hash (const void *a); * * @sa rd_map_destroy() */ -#define RD_MAP_DESTROY(RMAP) rd_map_destroy(&(RMAP)->rmap) +#define RD_MAP_DESTROY(RMAP) rd_map_destroy(&(RMAP)->rmap) /** @@ -429,10 +423,11 @@ unsigned int rd_map_str_hash (const void *a); * * @sa rd_map_destroy() */ -#define RD_MAP_DESTROY_AND_FREE(RMAP) do { \ - rd_map_destroy(&(RMAP)->rmap); \ - rd_free(RMAP); \ -} while (0) +#define RD_MAP_DESTROY_AND_FREE(RMAP) \ + do { \ + rd_map_destroy(&(RMAP)->rmap); \ + rd_free(RMAP); \ + } while (0) /** @@ -449,16 +444,13 @@ unsigned int rd_map_str_hash (const void *a); * * @remark The \p RMAP may not be const. */ -#define RD_MAP_FOREACH(K,V,RMAP) \ - for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem), \ - (K) = NULL, (V) = NULL ; \ - rd_map_iter(&(RMAP)->elem) && \ - ((RMAP)->key = (void *)(RMAP)->elem->key, \ - (K) = (RMAP)->key, \ - (RMAP)->value = (void *)(RMAP)->elem->value, \ - (V) = (RMAP)->value, \ - rd_map_iter_next(&(RMAP)->elem), \ - rd_true) ; ) \ +#define RD_MAP_FOREACH(K, V, RMAP) \ + for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem), (K) = NULL, \ + (V) = NULL; \ + rd_map_iter(&(RMAP)->elem) && \ + ((RMAP)->key = (void *)(RMAP)->elem->key, (K) = (RMAP)->key, \ + (RMAP)->value = (void *)(RMAP)->elem->value, (V) = (RMAP)->value, \ + rd_map_iter_next(&(RMAP)->elem), rd_true);) /** @@ -475,24 +467,21 @@ unsigned int rd_map_str_hash (const void *a); * * @remark The \p RMAP may not be const. */ -#define RD_MAP_FOREACH_KEY(K,RMAP) \ - for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem), \ - (K) = NULL ; \ - rd_map_iter(&(RMAP)->elem) && \ - ((RMAP)->key = (void *)(RMAP)->elem->key, \ - (K) = (RMAP)->key, \ - rd_map_iter_next(&(RMAP)->elem), \ - rd_true) ; ) \ +#define RD_MAP_FOREACH_KEY(K, RMAP) \ + for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem), (K) = NULL; \ + rd_map_iter(&(RMAP)->elem) && \ + ((RMAP)->key = (void *)(RMAP)->elem->key, (K) = (RMAP)->key, \ + rd_map_iter_next(&(RMAP)->elem), rd_true);) /** * @returns the number of elements in the map. */ -#define RD_MAP_CNT(RMAP) rd_map_cnt(&(RMAP)->rmap) +#define RD_MAP_CNT(RMAP) rd_map_cnt(&(RMAP)->rmap) /** * @returns true if map is empty, else false. */ -#define RD_MAP_IS_EMPTY(RMAP) rd_map_is_empty(&(RMAP)->rmap) +#define RD_MAP_IS_EMPTY(RMAP) rd_map_is_empty(&(RMAP)->rmap) #endif /* _RDMAP_H_ */ diff --git a/src/rdmurmur2.c b/src/rdmurmur2.c index dfc99da9f2..c3e4095d4c 100644 --- a/src/rdmurmur2.c +++ b/src/rdmurmur2.c @@ -38,7 +38,14 @@ * into the same function. */ -#define MM_MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } +#define MM_MIX(h, k, m) \ + { \ + k *= m; \ + k ^= k >> r; \ + k *= m; \ + h *= m; \ + h ^= k; \ + } /*----------------------------------------------------------------------------- // Based on MurmurHashNeutral2, by Austin Appleby @@ -47,11 +54,11 @@ // Half the speed though, alas. // */ -uint32_t rd_murmur2 (const void *key, size_t len) { +uint32_t rd_murmur2(const void *key, size_t len) { const uint32_t seed = 0x9747b28c; - const uint32_t m = 0x5bd1e995; - const int r = 24; - uint32_t h = seed ^ (uint32_t)len; + const uint32_t m = 0x5bd1e995; + const int r = 24; + uint32_t h = seed ^ (uint32_t)len; const unsigned char *tail; if (likely(((intptr_t)key & 0x3) == 0)) { @@ -61,7 +68,7 @@ uint32_t rd_murmur2 (const void *key, size_t len) { while (len >= 4) { uint32_t k = htole32(*(uint32_t *)data); - MM_MIX(h,k,m); + MM_MIX(h, k, m); data++; len -= 4; @@ -76,12 +83,12 @@ uint32_t rd_murmur2 (const void *key, size_t len) { while (len >= 4) { uint32_t k; - k = data[0]; + k = data[0]; k |= data[1] << 8; k |= data[2] << 16; k |= data[3] << 24; - MM_MIX(h,k,m); + MM_MIX(h, k, m); data += 4; len -= 4; @@ -91,11 +98,13 @@ uint32_t rd_murmur2 (const void *key, size_t len) { } /* Read remaining sub-word */ - switch(len) - { - case 3: h ^= tail[2] << 16; - case 2: h ^= tail[1] << 8; - case 1: h ^= tail[0]; + switch (len) { + case 3: + h ^= tail[2] << 16; + case 2: + h ^= tail[1] << 8; + case 1: + h ^= tail[0]; h *= m; }; @@ -112,44 +121,43 @@ uint32_t rd_murmur2 (const void *key, size_t len) { /** * @brief Unittest for rd_murmur2() */ -int unittest_murmur2 (void) { +int unittest_murmur2(void) { const char *short_unaligned = "1234"; - const char *unaligned = "PreAmbleWillBeRemoved,ThePrePartThatIs"; - const char *keysToTest[] = { - "kafka", - "giberish123456789", - short_unaligned, - short_unaligned+1, - short_unaligned+2, - short_unaligned+3, - unaligned, - unaligned+1, - unaligned+2, - unaligned+3, - "", - NULL, + const char *unaligned = "PreAmbleWillBeRemoved,ThePrePartThatIs"; + const char *keysToTest[] = { + "kafka", + "giberish123456789", + short_unaligned, + short_unaligned + 1, + short_unaligned + 2, + short_unaligned + 3, + unaligned, + unaligned + 1, + unaligned + 2, + unaligned + 3, + "", + NULL, }; const int32_t java_murmur2_results[] = { - 0xd067cf64, // kafka - 0x8f552b0c, // giberish123456789 - 0x9fc97b14, // short_unaligned - 0xe7c009ca, // short_unaligned+1 - 0x873930da, // short_unaligned+2 - 0x5a4b5ca1, // short_unaligned+3 - 0x78424f1c, // unaligned - 0x4a62b377, // unaligned+1 - 0xe0e4e09e, // unaligned+2 - 0x62b8b43f, // unaligned+3 - 0x106e08d9, // "" - 0x106e08d9, // NULL + 0xd067cf64, // kafka + 0x8f552b0c, // giberish123456789 + 0x9fc97b14, // short_unaligned + 0xe7c009ca, // short_unaligned+1 + 0x873930da, // short_unaligned+2 + 0x5a4b5ca1, // short_unaligned+3 + 0x78424f1c, // unaligned + 0x4a62b377, // unaligned+1 + 0xe0e4e09e, // unaligned+2 + 0x62b8b43f, // unaligned+3 + 0x106e08d9, // "" + 0x106e08d9, // NULL }; size_t i; for (i = 0; i < RD_ARRAYSIZE(keysToTest); i++) { - uint32_t h = rd_murmur2(keysToTest[i], - keysToTest[i] ? - strlen(keysToTest[i]) : 0); + uint32_t h = rd_murmur2( + keysToTest[i], keysToTest[i] ? strlen(keysToTest[i]) : 0); RD_UT_ASSERT((int32_t)h == java_murmur2_results[i], "Calculated murmur2 hash 0x%x for \"%s\", " "expected 0x%x", diff --git a/src/rdmurmur2.h b/src/rdmurmur2.h index 40aa17b560..5991caa50c 100644 --- a/src/rdmurmur2.h +++ b/src/rdmurmur2.h @@ -29,7 +29,7 @@ #ifndef __RDMURMUR2___H__ #define __RDMURMUR2___H__ -uint32_t rd_murmur2 (const void *key, size_t len); -int unittest_murmur2 (void); +uint32_t rd_murmur2(const void *key, size_t len); +int unittest_murmur2(void); -#endif // __RDMURMUR2___H__ +#endif // __RDMURMUR2___H__ diff --git a/src/rdports.c b/src/rdports.c index a34195b9c4..15c57e9289 100644 --- a/src/rdports.c +++ b/src/rdports.c @@ -1,30 +1,30 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2016 Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2016 Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ /** * System portability @@ -41,17 +41,18 @@ * on Win32 (qsort_s), OSX/FreeBSD (qsort_r with diff args): * http://forum.theorex.tech/t/different-declarations-of-qsort-r-on-mac-and-linux/93/2 */ -static RD_TLS int (*rd_qsort_r_cmp) (const void *, const void *, void *); +static RD_TLS int (*rd_qsort_r_cmp)(const void *, const void *, void *); static RD_TLS void *rd_qsort_r_arg; -static RD_UNUSED -int rd_qsort_r_trampoline (const void *a, const void *b) { +static RD_UNUSED int rd_qsort_r_trampoline(const void *a, const void *b) { return rd_qsort_r_cmp(a, b, rd_qsort_r_arg); } -void rd_qsort_r (void *base, size_t nmemb, size_t size, - int (*compar)(const void *, const void *, void *), - void *arg) { +void rd_qsort_r(void *base, + size_t nmemb, + size_t size, + int (*compar)(const void *, const void *, void *), + void *arg) { rd_qsort_r_cmp = compar; rd_qsort_r_arg = arg; qsort(base, nmemb, size, rd_qsort_r_trampoline); diff --git a/src/rdports.h b/src/rdports.h index 3afe6c4c9a..0cdbcd85fc 100644 --- a/src/rdports.h +++ b/src/rdports.h @@ -1,36 +1,38 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2016 Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2016 Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ #ifndef _RDPORTS_H_ #define _RDPORTS_H_ -void rd_qsort_r (void *base, size_t nmemb, size_t size, - int (*compar)(const void *, const void *, void *), - void *arg); +void rd_qsort_r(void *base, + size_t nmemb, + size_t size, + int (*compar)(const void *, const void *, void *), + void *arg); #endif /* _RDPORTS_H_ */ diff --git a/src/rdposix.h b/src/rdposix.h index 5a2bbeb873..deb1fe009f 100644 --- a/src/rdposix.h +++ b/src/rdposix.h @@ -1,30 +1,30 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2012-2015 Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2015 Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ /** * POSIX system support @@ -41,60 +41,60 @@ #include /** -* Types -*/ + * Types + */ /** * Annotations, attributes, optimizers */ #ifndef likely -#define likely(x) __builtin_expect((x),1) +#define likely(x) __builtin_expect((x), 1) #endif #ifndef unlikely -#define unlikely(x) __builtin_expect((x),0) +#define unlikely(x) __builtin_expect((x), 0) #endif -#define RD_UNUSED __attribute__((unused)) -#define RD_INLINE inline +#define RD_UNUSED __attribute__((unused)) +#define RD_INLINE inline #define RD_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) -#define RD_NORETURN __attribute__((noreturn)) -#define RD_IS_CONSTANT(p) __builtin_constant_p((p)) -#define RD_TLS __thread +#define RD_NORETURN __attribute__((noreturn)) +#define RD_IS_CONSTANT(p) __builtin_constant_p((p)) +#define RD_TLS __thread /** -* Allocation -*/ + * Allocation + */ #if !defined(__FreeBSD__) && !defined(__OpenBSD__) /* alloca(3) is in stdlib on FreeBSD */ #include #endif -#define rd_alloca(N) alloca(N) +#define rd_alloca(N) alloca(N) /** -* Strings, formatting, printf, .. -*/ + * Strings, formatting, printf, .. + */ /* size_t and ssize_t format strings */ -#define PRIusz "zu" -#define PRIdsz "zd" +#define PRIusz "zu" +#define PRIdsz "zd" #ifndef RD_FORMAT -#define RD_FORMAT(...) __attribute__((format (__VA_ARGS__))) +#define RD_FORMAT(...) __attribute__((format(__VA_ARGS__))) #endif #define rd_snprintf(...) snprintf(__VA_ARGS__) #define rd_vsnprintf(...) vsnprintf(__VA_ARGS__) -#define rd_strcasecmp(A,B) strcasecmp(A,B) -#define rd_strncasecmp(A,B,N) strncasecmp(A,B,N) +#define rd_strcasecmp(A, B) strcasecmp(A, B) +#define rd_strncasecmp(A, B, N) strncasecmp(A, B, N) #ifdef HAVE_STRCASESTR -#define rd_strcasestr(HAYSTACK,NEEDLE) strcasestr(HAYSTACK,NEEDLE) +#define rd_strcasestr(HAYSTACK, NEEDLE) strcasestr(HAYSTACK, NEEDLE) #else -#define rd_strcasestr(HAYSTACK,NEEDLE) _rd_strcasestr(HAYSTACK,NEEDLE) +#define rd_strcasestr(HAYSTACK, NEEDLE) _rd_strcasestr(HAYSTACK, NEEDLE) #endif @@ -118,8 +118,8 @@ static RD_INLINE RD_UNUSED const char *rd_strerror(int err) { * picked up anyway. */ r = strerror_r(err, ret, sizeof(ret)); if (unlikely(r)) - rd_snprintf(ret, sizeof(ret), - "strerror_r(%d) failed (ret %d)", err, r); + rd_snprintf(ret, sizeof(ret), "strerror_r(%d) failed (ret %d)", + err, r); return ret; #endif } @@ -134,15 +134,14 @@ static RD_INLINE RD_UNUSED const char *rd_strerror(int err) { #include "rdatomic.h" /** -* Misc -*/ + * Misc + */ /** * Microsecond sleep. * Will retry on signal interrupt unless *terminate is true. */ -static RD_INLINE RD_UNUSED -void rd_usleep (int usec, rd_atomic32_t *terminate) { +static RD_INLINE RD_UNUSED void rd_usleep(int usec, rd_atomic32_t *terminate) { struct timespec req = {usec / 1000000, (long)(usec % 1000000) * 1000}; /* Retry until complete (issue #272), unless terminating. */ @@ -153,23 +152,23 @@ void rd_usleep (int usec, rd_atomic32_t *terminate) { - -#define rd_gettimeofday(tv,tz) gettimeofday(tv,tz) +#define rd_gettimeofday(tv, tz) gettimeofday(tv, tz) #ifndef __COVERITY__ -#define rd_assert(EXPR) assert(EXPR) +#define rd_assert(EXPR) assert(EXPR) #else extern void __coverity_panic__(void); -#define rd_assert(EXPR) do { \ - if (!(EXPR)) \ - __coverity_panic__(); \ +#define rd_assert(EXPR) \ + do { \ + if (!(EXPR)) \ + __coverity_panic__(); \ } while (0) #endif -static RD_INLINE RD_UNUSED -const char *rd_getenv (const char *env, const char *def) { +static RD_INLINE RD_UNUSED const char *rd_getenv(const char *env, + const char *def) { const char *tmp; tmp = getenv(env); if (tmp && *tmp) @@ -181,13 +180,14 @@ const char *rd_getenv (const char *env, const char *def) { /** * Empty struct initializer */ -#define RD_ZERO_INIT {} +#define RD_ZERO_INIT \ + {} /** * Sockets, IO */ - /** @brief Socket type */ +/** @brief Socket type */ typedef int rd_socket_t; /** @brief Socket API error return value */ @@ -204,16 +204,16 @@ typedef int rd_socket_t; typedef struct pollfd rd_pollfd_t; /** @brief poll(2) */ -#define rd_socket_poll(POLLFD,FDCNT,TIMEOUT_MS) poll(POLLFD,FDCNT,TIMEOUT_MS) +#define rd_socket_poll(POLLFD, FDCNT, TIMEOUT_MS) \ + poll(POLLFD, FDCNT, TIMEOUT_MS) /** * @brief Set socket to non-blocking * @returns 0 on success or errno on failure. */ -static RD_UNUSED int rd_fd_set_nonblocking (int fd) { +static RD_UNUSED int rd_fd_set_nonblocking(int fd) { int fl = fcntl(fd, F_GETFL, 0); - if (fl == -1 || - fcntl(fd, F_SETFL, fl | O_NONBLOCK) == -1) + if (fl == -1 || fcntl(fd, F_SETFL, fl | O_NONBLOCK) == -1) return errno; return 0; } @@ -222,15 +222,14 @@ static RD_UNUSED int rd_fd_set_nonblocking (int fd) { * @brief Create non-blocking pipe * @returns 0 on success or errno on failure */ -static RD_UNUSED int rd_pipe_nonblocking (rd_socket_t *fds) { - if (pipe(fds) == -1 || - rd_fd_set_nonblocking(fds[0]) == -1 || +static RD_UNUSED int rd_pipe_nonblocking(rd_socket_t *fds) { + if (pipe(fds) == -1 || rd_fd_set_nonblocking(fds[0]) == -1 || rd_fd_set_nonblocking(fds[1])) return errno; - /* Minimize buffer sizes to avoid a large number - * of signaling bytes to accumulate when - * io-signalled queue is not being served for a while. */ + /* Minimize buffer sizes to avoid a large number + * of signaling bytes to accumulate when + * io-signalled queue is not being served for a while. */ #ifdef F_SETPIPE_SZ /* Linux automatically rounds the pipe size up * to the minimum size. */ @@ -239,9 +238,9 @@ static RD_UNUSED int rd_pipe_nonblocking (rd_socket_t *fds) { #endif return 0; } -#define rd_pipe(fds) pipe(fds) -#define rd_read(fd,buf,sz) read(fd,buf,sz) -#define rd_write(fd,buf,sz) write(fd,buf,sz) -#define rd_close(fd) close(fd) +#define rd_pipe(fds) pipe(fds) +#define rd_read(fd, buf, sz) read(fd, buf, sz) +#define rd_write(fd, buf, sz) write(fd, buf, sz) +#define rd_close(fd) close(fd) #endif /* _RDPOSIX_H_ */ diff --git a/src/rdrand.c b/src/rdrand.c index f4e210f619..e36d79380b 100644 --- a/src/rdrand.c +++ b/src/rdrand.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -31,40 +31,40 @@ #include "rdtime.h" #include "tinycthread.h" -int rd_jitter (int low, int high) { - int rand_num; +int rd_jitter(int low, int high) { + int rand_num; #if HAVE_RAND_R - static RD_TLS unsigned int seed = 0; + static RD_TLS unsigned int seed = 0; - /* Initial seed with time+thread id */ - if (unlikely(seed == 0)) { - struct timeval tv; - rd_gettimeofday(&tv, NULL); - seed = (unsigned int)(tv.tv_usec / 1000); - seed ^= (unsigned int)(intptr_t)thrd_current(); - } + /* Initial seed with time+thread id */ + if (unlikely(seed == 0)) { + struct timeval tv; + rd_gettimeofday(&tv, NULL); + seed = (unsigned int)(tv.tv_usec / 1000); + seed ^= (unsigned int)(intptr_t)thrd_current(); + } - rand_num = rand_r(&seed); + rand_num = rand_r(&seed); #else - rand_num = rand(); + rand_num = rand(); #endif - return (low + (rand_num % ((high-low)+1))); + return (low + (rand_num % ((high - low) + 1))); } -void rd_array_shuffle (void *base, size_t nmemb, size_t entry_size) { - int i; - void *tmp = rd_alloca(entry_size); +void rd_array_shuffle(void *base, size_t nmemb, size_t entry_size) { + int i; + void *tmp = rd_alloca(entry_size); - /* FIXME: Optimized version for word-sized entries. */ + /* FIXME: Optimized version for word-sized entries. */ - for (i = (int) nmemb - 1 ; i > 0 ; i--) { - int j = rd_jitter(0, i); - if (unlikely(i == j)) - continue; + for (i = (int)nmemb - 1; i > 0; i--) { + int j = rd_jitter(0, i); + if (unlikely(i == j)) + continue; - memcpy(tmp, (char *)base + (i*entry_size), entry_size); - memcpy((char *)base+(i*entry_size), - (char *)base+(j*entry_size), entry_size); - memcpy((char *)base+(j*entry_size), tmp, entry_size); - } + memcpy(tmp, (char *)base + (i * entry_size), entry_size); + memcpy((char *)base + (i * entry_size), + (char *)base + (j * entry_size), entry_size); + memcpy((char *)base + (j * entry_size), tmp, entry_size); + } } diff --git a/src/rdrand.h b/src/rdrand.h index ed2acd6fa3..0e3a927c2c 100644 --- a/src/rdrand.h +++ b/src/rdrand.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -33,11 +33,11 @@ /** * Returns a random (using rand(3)) number between 'low'..'high' (inclusive). */ -int rd_jitter (int low, int high); +int rd_jitter(int low, int high); /** * Shuffles (randomizes) an array using the modern Fisher-Yates algorithm. */ -void rd_array_shuffle (void *base, size_t nmemb, size_t entry_size); +void rd_array_shuffle(void *base, size_t nmemb, size_t entry_size); #endif /* _RDRAND_H_ */ diff --git a/src/rdregex.c b/src/rdregex.c index 71e68478ca..0c70cb334b 100644 --- a/src/rdregex.c +++ b/src/rdregex.c @@ -34,14 +34,14 @@ #if HAVE_REGEX #include struct rd_regex_s { - regex_t re; + regex_t re; }; #else #include "regexp.h" struct rd_regex_s { - Reprog *re; + Reprog *re; }; #endif @@ -49,13 +49,13 @@ struct rd_regex_s { /** * @brief Destroy compiled regex */ -void rd_regex_destroy (rd_regex_t *re) { +void rd_regex_destroy(rd_regex_t *re) { #if HAVE_REGEX - regfree(&re->re); + regfree(&re->re); #else - re_regfree(re->re); + re_regfree(re->re); #endif - rd_free(re); + rd_free(re); } @@ -64,31 +64,31 @@ void rd_regex_destroy (rd_regex_t *re) { * @returns Compiled regex object on success on error. */ rd_regex_t * -rd_regex_comp (const char *pattern, char *errstr, size_t errstr_size) { - rd_regex_t *re = rd_calloc(1, sizeof(*re)); +rd_regex_comp(const char *pattern, char *errstr, size_t errstr_size) { + rd_regex_t *re = rd_calloc(1, sizeof(*re)); #if HAVE_REGEX - int r; - - r = regcomp(&re->re, pattern, REG_EXTENDED|REG_NOSUB); - if (r) { - if (errstr) - regerror(r, &re->re, errstr, errstr_size); - rd_free(re); - return NULL; - } + int r; + + r = regcomp(&re->re, pattern, REG_EXTENDED | REG_NOSUB); + if (r) { + if (errstr) + regerror(r, &re->re, errstr, errstr_size); + rd_free(re); + return NULL; + } #else - const char *errstr2; + const char *errstr2; - re->re = re_regcomp(pattern, 0, &errstr2); - if (!re->re) { + re->re = re_regcomp(pattern, 0, &errstr2); + if (!re->re) { if (errstr) rd_strlcpy(errstr, errstr2, errstr_size); - rd_free(re); - return NULL; - } + rd_free(re); + return NULL; + } #endif - return re; + return re; } @@ -96,11 +96,11 @@ rd_regex_comp (const char *pattern, char *errstr, size_t errstr_size) { * @brief Match \p str to pre-compiled regex \p re * @returns 1 on match, else 0 */ -int rd_regex_exec (rd_regex_t *re, const char *str) { +int rd_regex_exec(rd_regex_t *re, const char *str) { #if HAVE_REGEX - return regexec(&re->re, str, 0, NULL, 0) != REG_NOMATCH; + return regexec(&re->re, str, 0, NULL, 0) != REG_NOMATCH; #else - return !re_regexec(re->re, str, NULL, 0); + return !re_regexec(re->re, str, NULL, 0); #endif } @@ -112,43 +112,45 @@ int rd_regex_exec (rd_regex_t *re, const char *str) { * in which case a human readable error string is written to * \p errstr (if not NULL). */ -int rd_regex_match (const char *pattern, const char *str, - char *errstr, size_t errstr_size) { -#if HAVE_REGEX /* use libc regex */ - regex_t re; - int r; - - /* FIXME: cache compiled regex */ - r = regcomp(&re, pattern, REG_EXTENDED|REG_NOSUB); - if (r) { - if (errstr) - regerror(r, &re, errstr, errstr_size); - return 0; - } +int rd_regex_match(const char *pattern, + const char *str, + char *errstr, + size_t errstr_size) { +#if HAVE_REGEX /* use libc regex */ + regex_t re; + int r; + + /* FIXME: cache compiled regex */ + r = regcomp(&re, pattern, REG_EXTENDED | REG_NOSUB); + if (r) { + if (errstr) + regerror(r, &re, errstr, errstr_size); + return 0; + } - r = regexec(&re, str, 0, NULL, 0) != REG_NOMATCH; + r = regexec(&re, str, 0, NULL, 0) != REG_NOMATCH; - regfree(&re); + regfree(&re); - return r; + return r; #else /* Using regexp.h from minilibs (included) */ - Reprog *re; - int r; - const char *errstr2; + Reprog *re; + int r; + const char *errstr2; - /* FIXME: cache compiled regex */ - re = re_regcomp(pattern, 0, &errstr2); - if (!re) { + /* FIXME: cache compiled regex */ + re = re_regcomp(pattern, 0, &errstr2); + if (!re) { if (errstr) rd_strlcpy(errstr, errstr2, errstr_size); - return -1; - } + return -1; + } - r = !re_regexec(re, str, NULL, 0); + r = !re_regexec(re, str, NULL, 0); - re_regfree(re); + re_regfree(re); - return r; + return r; #endif } diff --git a/src/rdregex.h b/src/rdregex.h index 26dbb30ae4..135229d626 100644 --- a/src/rdregex.h +++ b/src/rdregex.h @@ -30,11 +30,14 @@ typedef struct rd_regex_s rd_regex_t; -void rd_regex_destroy (rd_regex_t *re); -rd_regex_t *rd_regex_comp (const char *pattern, char *errstr, size_t errstr_size); -int rd_regex_exec (rd_regex_t *re, const char *str); +void rd_regex_destroy(rd_regex_t *re); +rd_regex_t * +rd_regex_comp(const char *pattern, char *errstr, size_t errstr_size); +int rd_regex_exec(rd_regex_t *re, const char *str); -int rd_regex_match (const char *pattern, const char *str, - char *errstr, size_t errstr_size); +int rd_regex_match(const char *pattern, + const char *str, + char *errstr, + size_t errstr_size); #endif /* _RDREGEX_H_ */ diff --git a/src/rdsignal.h b/src/rdsignal.h index c8e2344b5a..a2c0de1b0c 100644 --- a/src/rdsignal.h +++ b/src/rdsignal.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -31,27 +31,27 @@ #include -#define RD_SIG_ALL -1 -#define RD_SIG_END -2 +#define RD_SIG_ALL -1 +#define RD_SIG_END -2 extern sigset_t rd_intr_sigset; -extern int rd_intr_blocked; +extern int rd_intr_blocked; -static __inline void rd_intr_block (void) RD_UNUSED; -static __inline void rd_intr_block (void) { - if (rd_intr_blocked++) - return; +static __inline void rd_intr_block(void) RD_UNUSED; +static __inline void rd_intr_block(void) { + if (rd_intr_blocked++) + return; - sigprocmask(SIG_BLOCK, &rd_intr_sigset, NULL); + sigprocmask(SIG_BLOCK, &rd_intr_sigset, NULL); } -static __inline void rd_intr_unblock (void) RD_UNUSED; -static __inline void rd_intr_unblock (void) { - assert(rd_intr_blocked > 0); - if (--rd_intr_blocked) - return; +static __inline void rd_intr_unblock(void) RD_UNUSED; +static __inline void rd_intr_unblock(void) { + assert(rd_intr_blocked > 0); + if (--rd_intr_blocked) + return; - sigprocmask(SIG_UNBLOCK, &rd_intr_sigset, NULL); + sigprocmask(SIG_UNBLOCK, &rd_intr_sigset, NULL); } #endif /* _RDSIGNAL_H_ */ diff --git a/src/rdstring.c b/src/rdstring.c index c85ea0e25c..6096e52059 100644 --- a/src/rdstring.c +++ b/src/rdstring.c @@ -47,116 +47,120 @@ * @returns number of written bytes to \p dest, * or -1 on failure (errstr is written) */ -char *rd_string_render (const char *template, - char *errstr, size_t errstr_size, - ssize_t (*callback) (const char *key, - char *buf, size_t size, - void *opaque), - void *opaque) { - const char *s = template; - const char *tend = template + strlen(template); - size_t size = 256; - char *buf; - size_t of = 0; - - buf = rd_malloc(size); +char *rd_string_render( + const char *template, + char *errstr, + size_t errstr_size, + ssize_t (*callback)(const char *key, char *buf, size_t size, void *opaque), + void *opaque) { + const char *s = template; + const char *tend = template + strlen(template); + size_t size = 256; + char *buf; + size_t of = 0; + + buf = rd_malloc(size); #define _remain() (size - of - 1) -#define _assure_space(SZ) do { \ - if (of + (SZ) + 1 >= size) { \ - size = (size + (SZ) + 1) * 2; \ - buf = rd_realloc(buf, size); \ - } \ - } while (0) - -#define _do_write(PTR,SZ) do { \ - _assure_space(SZ); \ - memcpy(buf+of, (PTR), (SZ)); \ - of += (SZ); \ - } while (0) - - - - while (*s) { - const char *t; - size_t tof = (size_t)(s-template); - - t = strstr(s, "%{"); - if (t != s) { - /* Write "abc%{" - * ^^^ */ - size_t len = (size_t)((t ? t : tend)-s); - if (len) - _do_write(s, len); - } - - if (t) { - const char *te; - ssize_t r; - char *tmpkey; - - /* Find "abc%{key}" - * ^ */ - te = strchr(t+2, '}'); - if (!te) { - rd_snprintf(errstr, errstr_size, - "Missing close-brace } for " - "%.*s at %"PRIusz, - 15, t, tof); - rd_free(buf); - return NULL; - } - - rd_strndupa(&tmpkey, t+2, (int)(te-t-2)); - - /* Query callback for length of key's value. */ - r = callback(tmpkey, NULL, 0, opaque); - if (r == -1) { - rd_snprintf(errstr, errstr_size, - "Property not available: \"%s\"", - tmpkey); - rd_free(buf); - return NULL; - } - - _assure_space(r); - - /* Call again now providing a large enough buffer. */ - r = callback(tmpkey, buf+of, _remain(), opaque); - if (r == -1) { - rd_snprintf(errstr, errstr_size, - "Property not available: " - "\"%s\"", tmpkey); - rd_free(buf); - return NULL; - } - - assert(r < (ssize_t)_remain()); - of += r; - s = te+1; - - } else { - s = tend; - } - } - - buf[of] = '\0'; - return buf; -} +#define _assure_space(SZ) \ + do { \ + if (of + (SZ) + 1 >= size) { \ + size = (size + (SZ) + 1) * 2; \ + buf = rd_realloc(buf, size); \ + } \ + } while (0) + +#define _do_write(PTR, SZ) \ + do { \ + _assure_space(SZ); \ + memcpy(buf + of, (PTR), (SZ)); \ + of += (SZ); \ + } while (0) + + + + while (*s) { + const char *t; + size_t tof = (size_t)(s - template); + + t = strstr(s, "%{"); + if (t != s) { + /* Write "abc%{" + * ^^^ */ + size_t len = (size_t)((t ? t : tend) - s); + if (len) + _do_write(s, len); + } + if (t) { + const char *te; + ssize_t r; + char *tmpkey; + + /* Find "abc%{key}" + * ^ */ + te = strchr(t + 2, '}'); + if (!te) { + rd_snprintf(errstr, errstr_size, + "Missing close-brace } for " + "%.*s at %" PRIusz, + 15, t, tof); + rd_free(buf); + return NULL; + } + rd_strndupa(&tmpkey, t + 2, (int)(te - t - 2)); + + /* Query callback for length of key's value. */ + r = callback(tmpkey, NULL, 0, opaque); + if (r == -1) { + rd_snprintf(errstr, errstr_size, + "Property not available: \"%s\"", + tmpkey); + rd_free(buf); + return NULL; + } + + _assure_space(r); + + /* Call again now providing a large enough buffer. */ + r = callback(tmpkey, buf + of, _remain(), opaque); + if (r == -1) { + rd_snprintf(errstr, errstr_size, + "Property not available: " + "\"%s\"", + tmpkey); + rd_free(buf); + return NULL; + } + assert(r < (ssize_t)_remain()); + of += r; + s = te + 1; -void rd_strtup_destroy (rd_strtup_t *strtup) { + } else { + s = tend; + } + } + + buf[of] = '\0'; + return buf; +} + + + +void rd_strtup_destroy(rd_strtup_t *strtup) { rd_free(strtup); } -void rd_strtup_free (void *strtup) { +void rd_strtup_free(void *strtup) { rd_strtup_destroy((rd_strtup_t *)strtup); } -rd_strtup_t *rd_strtup_new0 (const char *name, ssize_t name_len, - const char *value, ssize_t value_len) { +rd_strtup_t *rd_strtup_new0(const char *name, + ssize_t name_len, + const char *value, + ssize_t value_len) { rd_strtup_t *strtup; /* Calculate lengths, if needed, and add space for \0 nul */ @@ -170,12 +174,12 @@ rd_strtup_t *rd_strtup_new0 (const char *name, ssize_t name_len, value_len = strlen(value); - strtup = rd_malloc(sizeof(*strtup) + - name_len + 1 + value_len + 1 - 1/*name[1]*/); + strtup = rd_malloc(sizeof(*strtup) + name_len + 1 + value_len + 1 - + 1 /*name[1]*/); memcpy(strtup->name, name, name_len); strtup->name[name_len] = '\0'; if (value) { - strtup->value = &strtup->name[name_len+1]; + strtup->value = &strtup->name[name_len + 1]; memcpy(strtup->value, value, value_len); strtup->value[value_len] = '\0'; } else { @@ -185,7 +189,7 @@ rd_strtup_t *rd_strtup_new0 (const char *name, ssize_t name_len, return strtup; } -rd_strtup_t *rd_strtup_new (const char *name, const char *value) { +rd_strtup_t *rd_strtup_new(const char *name, const char *value) { return rd_strtup_new0(name, -1, value, -1); } @@ -193,14 +197,14 @@ rd_strtup_t *rd_strtup_new (const char *name, const char *value) { /** * @returns a new copy of \p src */ -rd_strtup_t *rd_strtup_dup (const rd_strtup_t *src) { +rd_strtup_t *rd_strtup_dup(const rd_strtup_t *src) { return rd_strtup_new(src->name, src->value); } /** * @brief Wrapper for rd_strtup_dup() suitable rd_list_copy*() use */ -void *rd_strtup_list_copy (const void *elem, void *opaque) { +void *rd_strtup_list_copy(const void *elem, void *opaque) { const rd_strtup_t *src = elem; return (void *)rd_strtup_dup(src); } @@ -217,12 +221,11 @@ void *rd_strtup_list_copy (const void *elem, void *opaque) { * * @returns a null-terminated \p dst */ -char *rd_flags2str (char *dst, size_t size, - const char **desc, int flags) { - int bit = 0; +char *rd_flags2str(char *dst, size_t size, const char **desc, int flags) { + int bit = 0; size_t of = 0; - for ( ; *desc ; desc++, bit++) { + for (; *desc; desc++, bit++) { int r; if (!(flags & (1 << bit)) || !*desc) @@ -231,12 +234,12 @@ char *rd_flags2str (char *dst, size_t size, if (of >= size) { /* Dest buffer too small, indicate truncation */ if (size > 3) - rd_snprintf(dst+(size-3), 3, ".."); + rd_snprintf(dst + (size - 3), 3, ".."); break; } - r = rd_snprintf(dst+of, size-of, "%s%s", - !of ? "" : ",", *desc); + r = rd_snprintf(dst + of, size - of, "%s%s", !of ? "" : ",", + *desc); of += r; } @@ -255,15 +258,15 @@ char *rd_flags2str (char *dst, size_t size, * @param len If -1 the \p str will be hashed until nul is encountered, * else up to the \p len. */ -unsigned int rd_string_hash (const char *str, ssize_t len) { +unsigned int rd_string_hash(const char *str, ssize_t len) { unsigned int hash = 5381; ssize_t i; if (len == -1) { - for (i = 0 ; str[i] != '\0' ; i++) + for (i = 0; str[i] != '\0'; i++) hash = ((hash << 5) + hash) + str[i]; } else { - for (i = 0 ; i < len ; i++) + for (i = 0; i < len; i++) hash = ((hash << 5) + hash) + str[i]; } @@ -274,7 +277,7 @@ unsigned int rd_string_hash (const char *str, ssize_t len) { /** * @brief Same as strcmp() but handles NULL values. */ -int rd_strcmp (const char *a, const char *b) { +int rd_strcmp(const char *a, const char *b) { if (a == b) return 0; else if (!a && b) @@ -291,7 +294,7 @@ int rd_strcmp (const char *a, const char *b) { * @brief Case-insensitive strstr() for platforms where strcasestr() * is not available. */ -char *_rd_strcasestr (const char *haystack, const char *needle) { +char *_rd_strcasestr(const char *haystack, const char *needle) { const char *h_rem, *n_last; size_t h_len = strlen(haystack); size_t n_len = strlen(needle); @@ -300,8 +303,8 @@ char *_rd_strcasestr (const char *haystack, const char *needle) { if (n_len == 0 || n_len > h_len) return NULL; else if (n_len == h_len) - return !rd_strcasecmp(haystack, needle) ? - (char *)haystack : NULL; + return !rd_strcasecmp(haystack, needle) ? (char *)haystack + : NULL; /* * Scan inspired by Boyer-Moore: @@ -319,16 +322,14 @@ char *_rd_strcasestr (const char *haystack, const char *needle) { * ^-n_last */ n_last = needle + n_len - 1; - h_rem = haystack + n_len - 1; + h_rem = haystack + n_len - 1; while (*h_rem) { const char *h, *n = n_last; /* Find first occurrence of last character in the needle in the remaining haystack. */ - for (h = h_rem ; - *h && tolower((int)*h) != tolower((int)*n) ; - h++) + for (h = h_rem; *h && tolower((int)*h) != tolower((int)*n); h++) ; if (!*h) @@ -360,38 +361,38 @@ char *_rd_strcasestr (const char *haystack, const char *needle) { /** * @brief Unittests for rd_strcasestr() */ -static int ut_strcasestr (void) { +static int ut_strcasestr(void) { static const struct { const char *haystack; const char *needle; ssize_t exp; } strs[] = { - { "this is a haystack", "hays", 10 }, - { "abc", "a", 0 }, - { "abc", "b", 1 }, - { "abc", "c", 2 }, - { "AbcaBcabC", "ABC", 0 }, - { "abcabcaBC", "BcA", 1 }, - { "abcabcABc", "cAB", 2 }, - { "need to estart stART the tart ReStArT!", "REsTaRt", 30 }, - { "need to estart stART the tart ReStArT!", "?sTaRt", -1 }, - { "aaaabaaAb", "ab", 3 }, - { "0A!", "a", 1 }, - { "a", "A", 0 }, - { ".z", "Z", 1 }, - { "", "", -1 }, - { "", "a", -1 }, - { "a", "", -1 }, - { "peRfeCt", "peRfeCt", 0 }, - { "perfect", "perfect", 0 }, - { "PERFECT", "perfect", 0 }, - { NULL }, + {"this is a haystack", "hays", 10}, + {"abc", "a", 0}, + {"abc", "b", 1}, + {"abc", "c", 2}, + {"AbcaBcabC", "ABC", 0}, + {"abcabcaBC", "BcA", 1}, + {"abcabcABc", "cAB", 2}, + {"need to estart stART the tart ReStArT!", "REsTaRt", 30}, + {"need to estart stART the tart ReStArT!", "?sTaRt", -1}, + {"aaaabaaAb", "ab", 3}, + {"0A!", "a", 1}, + {"a", "A", 0}, + {".z", "Z", 1}, + {"", "", -1}, + {"", "a", -1}, + {"a", "", -1}, + {"peRfeCt", "peRfeCt", 0}, + {"perfect", "perfect", 0}, + {"PERFECT", "perfect", 0}, + {NULL}, }; int i; RD_UT_BEGIN(); - for (i = 0 ; strs[i].haystack ; i++) { + for (i = 0; strs[i].haystack; i++) { const char *ret; ssize_t of = -1; @@ -399,10 +400,10 @@ static int ut_strcasestr (void) { if (ret) of = ret - strs[i].haystack; RD_UT_ASSERT(of == strs[i].exp, - "#%d: '%s' in '%s': expected offset %"PRIdsz - ", not %"PRIdsz" (%s)", - i, strs[i].needle, strs[i].haystack, - strs[i].exp, of, ret ? ret : "(NULL)"); + "#%d: '%s' in '%s': expected offset %" PRIdsz + ", not %" PRIdsz " (%s)", + i, strs[i].needle, strs[i].haystack, strs[i].exp, + of, ret ? ret : "(NULL)"); } RD_UT_PASS(); @@ -410,7 +411,6 @@ static int ut_strcasestr (void) { - /** * @brief Split a character-separated string into an array. * @@ -430,22 +430,24 @@ static int ut_strcasestr (void) { * @returns the parsed fields in an array. The number of elements in the * array is returned in \p cntp */ -char **rd_string_split (const char *input, char sep, rd_bool_t skip_empty, - size_t *cntp) { - size_t fieldcnt = 1; +char **rd_string_split(const char *input, + char sep, + rd_bool_t skip_empty, + size_t *cntp) { + size_t fieldcnt = 1; rd_bool_t next_esc = rd_false; const char *s; char *p; char **arr; size_t inputlen; - size_t i = 0; + size_t i = 0; size_t elen = 0; *cntp = '\0'; /* First count the maximum number of fields so we know how large of * an array we need to allocate. Escapes are ignored. */ - for (s = input ; *s ; s++) { + for (s = input; *s; s++) { if (*s == sep) fieldcnt++; } @@ -454,9 +456,9 @@ char **rd_string_split (const char *input, char sep, rd_bool_t skip_empty, /* Allocate array and memory for the copied elements in one go. */ arr = rd_malloc((sizeof(*arr) * fieldcnt) + inputlen + 1); - p = (char *)(&arr[fieldcnt]); + p = (char *)(&arr[fieldcnt]); - for (s = input ; ; s++) { + for (s = input;; s++) { rd_bool_t at_end = *s == '\0'; rd_bool_t is_esc = next_esc; @@ -482,8 +484,7 @@ char **rd_string_split (const char *input, char sep, rd_bool_t skip_empty, /* Perform some common escape substitions. * If not known we'll just keep the escaped * character as is (probably the separator). */ - switch (c) - { + switch (c) { case 't': c = '\t'; break; @@ -504,7 +505,7 @@ char **rd_string_split (const char *input, char sep, rd_bool_t skip_empty, done: /* Strip trailing whitespaces */ - while (elen > 0 && isspace((int)p[elen-1])) + while (elen > 0 && isspace((int)p[elen - 1])) elen--; /* End of field */ @@ -539,7 +540,7 @@ char **rd_string_split (const char *input, char sep, rd_bool_t skip_empty, /** * @brief Unittest for rd_string_split() */ -static int ut_string_split (void) { +static int ut_string_split(void) { static const struct { const char *input; const char sep; @@ -547,68 +548,65 @@ static int ut_string_split (void) { size_t exp_cnt; const char *exp[16]; } strs[] = { - { "just one field", ',', rd_true, 1, - { "just one field" } - }, - /* Empty with skip_empty */ - { "", ',', rd_true, 0 }, - /* Empty without skip_empty */ - { "", ',', rd_false, 1, - { "" } - }, - { ", a,b ,,c, d, e,f,ghijk, lmn,opq , r s t u, v", - ',', rd_true, 11, - { - "a", "b", "c", "d", "e", "f", "ghijk", "lmn", "opq", - "r s t u", "v" - }, - }, - { ", a,b ,,c, d, e,f,ghijk, lmn,opq , r s t u, v", - ',', rd_false, 13, - { - "", "a", "b", "", "c", "d", "e", "f", "ghijk", - "lmn", "opq", "r s t u", "v" - }, - }, - { " this is an \\,escaped comma,\\,,\\\\, " - "and this is an unbalanced escape: \\\\\\\\\\\\\\", - ',', rd_true, 4, - { - "this is an ,escaped comma", - ",", - "\\", - "and this is an unbalanced escape: \\\\\\" - } - }, - { "using|another ||\\|d|elimiter", '|', rd_false, 5, - { - "using", "another", "", "|d", "elimiter" - }, - }, - { NULL }, + {"just one field", ',', rd_true, 1, {"just one field"}}, + /* Empty with skip_empty */ + {"", ',', rd_true, 0}, + /* Empty without skip_empty */ + {"", ',', rd_false, 1, {""}}, + { + ", a,b ,,c, d, e,f,ghijk, lmn,opq , r s t u, v", + ',', + rd_true, + 11, + {"a", "b", "c", "d", "e", "f", "ghijk", "lmn", "opq", + "r s t u", "v"}, + }, + { + ", a,b ,,c, d, e,f,ghijk, lmn,opq , r s t u, v", + ',', + rd_false, + 13, + {"", "a", "b", "", "c", "d", "e", "f", "ghijk", "lmn", "opq", + "r s t u", "v"}, + }, + {" this is an \\,escaped comma,\\,,\\\\, " + "and this is an unbalanced escape: \\\\\\\\\\\\\\", + ',', + rd_true, + 4, + {"this is an ,escaped comma", ",", "\\", + "and this is an unbalanced escape: \\\\\\"}}, + { + "using|another ||\\|d|elimiter", + '|', + rd_false, + 5, + {"using", "another", "", "|d", "elimiter"}, + }, + {NULL}, }; size_t i; RD_UT_BEGIN(); - for (i = 0 ; strs[i].input ; i++) { + for (i = 0; strs[i].input; i++) { char **ret; size_t cnt = 12345; size_t j; ret = rd_string_split(strs[i].input, strs[i].sep, - strs[i].skip_empty, - &cnt); - RD_UT_ASSERT(ret != NULL, - "#%"PRIusz": Did not expect NULL", i); + strs[i].skip_empty, &cnt); + RD_UT_ASSERT(ret != NULL, "#%" PRIusz ": Did not expect NULL", + i); RD_UT_ASSERT(cnt == strs[i].exp_cnt, - "#%"PRIusz": " - "Expected %"PRIusz" elements, got %"PRIusz, + "#%" PRIusz + ": " + "Expected %" PRIusz " elements, got %" PRIusz, i, strs[i].exp_cnt, cnt); - for (j = 0 ; j < cnt ; j++) + for (j = 0; j < cnt; j++) RD_UT_ASSERT(!strcmp(strs[i].exp[j], ret[j]), - "#%"PRIusz": Expected string %"PRIusz + "#%" PRIusz ": Expected string %" PRIusz " to be \"%s\", not \"%s\"", i, j, strs[i].exp[j], ret[j]); @@ -621,7 +619,7 @@ static int ut_string_split (void) { /** * @brief Unittests for strings */ -int unittest_string (void) { +int unittest_string(void) { int fails = 0; fails += ut_strcasestr(); diff --git a/src/rdstring.h b/src/rdstring.h index cd05dc4846..67ea19401b 100644 --- a/src/rdstring.h +++ b/src/rdstring.h @@ -30,14 +30,14 @@ #ifndef _RDSTRING_H_ #define _RDSTRING_H_ -static RD_INLINE RD_UNUSED -void rd_strlcpy (char *dst, const char *src, size_t dstsize) { +static RD_INLINE RD_UNUSED void +rd_strlcpy(char *dst, const char *src, size_t dstsize) { #if HAVE_STRLCPY (void)strlcpy(dst, src, dstsize); #else if (likely(dstsize > 0)) { - size_t srclen = strlen(src); - size_t copylen = RD_MIN(srclen, dstsize-1); + size_t srclen = strlen(src); + size_t copylen = RD_MIN(srclen, dstsize - 1); memcpy(dst, src, copylen); dst[copylen] = '\0'; } @@ -46,12 +46,12 @@ void rd_strlcpy (char *dst, const char *src, size_t dstsize) { -char *rd_string_render (const char *templ, - char *errstr, size_t errstr_size, - ssize_t (*callback) (const char *key, - char *buf, size_t size, - void *opaque), - void *opaque); +char *rd_string_render( + const char *templ, + char *errstr, + size_t errstr_size, + ssize_t (*callback)(const char *key, char *buf, size_t size, void *opaque), + void *opaque); @@ -61,28 +61,31 @@ char *rd_string_render (const char *templ, */ typedef struct rd_strtup_s { char *value; - char name[1]; /* Actual allocation of name + val here */ + char name[1]; /* Actual allocation of name + val here */ } rd_strtup_t; -void rd_strtup_destroy (rd_strtup_t *strtup); -void rd_strtup_free (void *strtup); -rd_strtup_t *rd_strtup_new0 (const char *name, ssize_t name_len, - const char *value, ssize_t value_len); -rd_strtup_t *rd_strtup_new (const char *name, const char *value); -rd_strtup_t *rd_strtup_dup (const rd_strtup_t *strtup); -void *rd_strtup_list_copy (const void *elem, void *opaque); +void rd_strtup_destroy(rd_strtup_t *strtup); +void rd_strtup_free(void *strtup); +rd_strtup_t *rd_strtup_new0(const char *name, + ssize_t name_len, + const char *value, + ssize_t value_len); +rd_strtup_t *rd_strtup_new(const char *name, const char *value); +rd_strtup_t *rd_strtup_dup(const rd_strtup_t *strtup); +void *rd_strtup_list_copy(const void *elem, void *opaque); -char *rd_flags2str (char *dst, size_t size, - const char **desc, int flags); +char *rd_flags2str(char *dst, size_t size, const char **desc, int flags); -unsigned int rd_string_hash (const char *str, ssize_t len); +unsigned int rd_string_hash(const char *str, ssize_t len); -int rd_strcmp (const char *a, const char *b); +int rd_strcmp(const char *a, const char *b); -char *_rd_strcasestr (const char *haystack, const char *needle); +char *_rd_strcasestr(const char *haystack, const char *needle); -char **rd_string_split (const char *input, char sep, rd_bool_t skip_empty, - size_t *cntp); +char **rd_string_split(const char *input, + char sep, + rd_bool_t skip_empty, + size_t *cntp); /** @returns "true" if EXPR is true, else "false" */ #define RD_STR_ToF(EXPR) ((EXPR) ? "true" : "false") diff --git a/src/rdsysqueue.h b/src/rdsysqueue.h index 6fa1fdb553..ecba4154eb 100644 --- a/src/rdsysqueue.h +++ b/src/rdsysqueue.h @@ -4,24 +4,24 @@ * Copyright (c) 2012-2013, Magnus Edenhill * Copyright (c) 2012-2013, Andreas Öman * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -63,75 +63,76 @@ */ #ifndef LIST_FOREACH -#define LIST_FOREACH(var, head, field) \ - for ((var) = ((head)->lh_first); \ - (var); \ - (var) = ((var)->field.le_next)) +#define LIST_FOREACH(var, head, field) \ + for ((var) = ((head)->lh_first); (var); (var) = ((var)->field.le_next)) #endif #ifndef LIST_EMPTY -#define LIST_EMPTY(head) ((head)->lh_first == NULL) +#define LIST_EMPTY(head) ((head)->lh_first == NULL) #endif #ifndef LIST_FIRST -#define LIST_FIRST(head) ((head)->lh_first) +#define LIST_FIRST(head) ((head)->lh_first) #endif #ifndef LIST_NEXT -#define LIST_NEXT(elm, field) ((elm)->field.le_next) +#define LIST_NEXT(elm, field) ((elm)->field.le_next) #endif #ifndef LIST_INSERT_BEFORE -#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ - (elm)->field.le_prev = (listelm)->field.le_prev; \ - (elm)->field.le_next = (listelm); \ - *(listelm)->field.le_prev = (elm); \ - (listelm)->field.le_prev = &(elm)->field.le_next; \ -} while (/*CONSTCOND*/0) +#define LIST_INSERT_BEFORE(listelm, elm, field) \ + do { \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + (elm)->field.le_next = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &(elm)->field.le_next; \ + } while (/*CONSTCOND*/ 0) #endif /* * Complete missing TAILQ-ops */ -#ifndef TAILQ_HEAD_INITIALIZER -#define TAILQ_HEAD_INITIALIZER(head) \ - { NULL, &(head).tqh_first } +#ifndef TAILQ_HEAD_INITIALIZER +#define TAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).tqh_first } #endif #ifndef TAILQ_INSERT_BEFORE -#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ - (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ - (elm)->field.tqe_next = (listelm); \ - *(listelm)->field.tqe_prev = (elm); \ - (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ -} while (0) +#define TAILQ_INSERT_BEFORE(listelm, elm, field) \ + do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ + } while (0) #endif #ifndef TAILQ_FOREACH -#define TAILQ_FOREACH(var, head, field) \ - for ((var) = ((head)->tqh_first); (var); (var) = ((var)->field.tqe_next)) +#define TAILQ_FOREACH(var, head, field) \ + for ((var) = ((head)->tqh_first); (var); \ + (var) = ((var)->field.tqe_next)) #endif #ifndef TAILQ_EMPTY -#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) +#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) #endif #ifndef TAILQ_FIRST -#define TAILQ_FIRST(head) ((head)->tqh_first) +#define TAILQ_FIRST(head) ((head)->tqh_first) #endif #ifndef TAILQ_NEXT -#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) #endif #ifndef TAILQ_LAST -#define TAILQ_LAST(head, headname) \ +#define TAILQ_LAST(head, headname) \ (*(((struct headname *)((head)->tqh_last))->tqh_last)) #endif #ifndef TAILQ_PREV -#define TAILQ_PREV(elm, headname, field) \ +#define TAILQ_PREV(elm, headname, field) \ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) #endif @@ -142,13 +143,13 @@ * It does not allow freeing or modifying any other element in the list, * at least not the next element. */ -#define TAILQ_FOREACH_SAFE(elm,head,field,tmpelm) \ - for ((elm) = TAILQ_FIRST(head) ; \ - (elm) && ((tmpelm) = TAILQ_NEXT((elm), field), 1) ; \ - (elm) = (tmpelm)) +#define TAILQ_FOREACH_SAFE(elm, head, field, tmpelm) \ + for ((elm) = TAILQ_FIRST(head); \ + (elm) && ((tmpelm) = TAILQ_NEXT((elm), field), 1); \ + (elm) = (tmpelm)) #endif -/* +/* * In Mac OS 10.4 and earlier TAILQ_FOREACH_REVERSE was defined * differently, redefined it. */ @@ -159,10 +160,11 @@ #endif #ifndef TAILQ_FOREACH_REVERSE -#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ - for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ - (var); \ - (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) +#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ + (var); \ + (var) = \ + (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) #endif @@ -170,56 +172,57 @@ * Treat the TAILQ as a circular list and return the previous/next entry, * possibly wrapping to the end/beginning. */ -#define TAILQ_CIRC_PREV(var, head, headname, field) \ - ((var) != TAILQ_FIRST(head) ? \ - TAILQ_PREV(var, headname, field) : \ - TAILQ_LAST(head, headname)) +#define TAILQ_CIRC_PREV(var, head, headname, field) \ + ((var) != TAILQ_FIRST(head) ? TAILQ_PREV(var, headname, field) \ + : TAILQ_LAST(head, headname)) -#define TAILQ_CIRC_NEXT(var, head, headname, field) \ - ((var) != TAILQ_LAST(head, headname) ? \ - TAILQ_NEXT(var, field) : \ - TAILQ_FIRST(head)) +#define TAILQ_CIRC_NEXT(var, head, headname, field) \ + ((var) != TAILQ_LAST(head, headname) ? TAILQ_NEXT(var, field) \ + : TAILQ_FIRST(head)) /* * Some extra functions for LIST manipulation */ -#define LIST_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) do { \ - if(LIST_EMPTY(head)) { \ - LIST_INSERT_HEAD(head, elm, field); \ - } else { \ - elmtype _tmp; \ - LIST_FOREACH(_tmp,head,field) { \ - if(cmpfunc(elm,_tmp) < 0) { \ - LIST_INSERT_BEFORE(_tmp,elm,field); \ - break; \ - } \ - if(!LIST_NEXT(_tmp,field)) { \ - LIST_INSERT_AFTER(_tmp,elm,field); \ - break; \ - } \ - } \ - } \ -} while(0) +#define LIST_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) \ + do { \ + if (LIST_EMPTY(head)) { \ + LIST_INSERT_HEAD(head, elm, field); \ + } else { \ + elmtype _tmp; \ + LIST_FOREACH(_tmp, head, field) { \ + if (cmpfunc(elm, _tmp) < 0) { \ + LIST_INSERT_BEFORE(_tmp, elm, field); \ + break; \ + } \ + if (!LIST_NEXT(_tmp, field)) { \ + LIST_INSERT_AFTER(_tmp, elm, field); \ + break; \ + } \ + } \ + } \ + } while (0) #ifndef TAILQ_INSERT_SORTED -#define TAILQ_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) do { \ - if(TAILQ_FIRST(head) == NULL) { \ - TAILQ_INSERT_HEAD(head, elm, field); \ - } else { \ - elmtype _tmp; \ - TAILQ_FOREACH(_tmp,head,field) { \ - if(cmpfunc(elm,_tmp) < 0) { \ - TAILQ_INSERT_BEFORE(_tmp,elm,field); \ - break; \ - } \ - if(!TAILQ_NEXT(_tmp,field)) { \ - TAILQ_INSERT_AFTER(head,_tmp,elm,field); \ - break; \ - } \ - } \ - } \ -} while(0) +#define TAILQ_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) \ + do { \ + if (TAILQ_FIRST(head) == NULL) { \ + TAILQ_INSERT_HEAD(head, elm, field); \ + } else { \ + elmtype _tmp; \ + TAILQ_FOREACH(_tmp, head, field) { \ + if (cmpfunc(elm, _tmp) < 0) { \ + TAILQ_INSERT_BEFORE(_tmp, elm, field); \ + break; \ + } \ + if (!TAILQ_NEXT(_tmp, field)) { \ + TAILQ_INSERT_AFTER(head, _tmp, elm, \ + field); \ + break; \ + } \ + } \ + } \ + } while (0) #endif /** @@ -227,164 +230,175 @@ * comparator \p cmpfunc. * \p src will be re-initialized on completion. */ -#define TAILQ_CONCAT_SORTED(dsthead,srchead,elmtype,field,cmpfunc) do { \ - elmtype _cstmp; \ - elmtype _cstmp2; \ - if (TAILQ_EMPTY(dsthead)) { \ - TAILQ_CONCAT(dsthead, srchead,field); \ - break; \ - } \ - TAILQ_FOREACH_SAFE(_cstmp, srchead, field, _cstmp2) { \ - TAILQ_INSERT_SORTED(dsthead, _cstmp, elmtype, \ - field, cmpfunc); \ - } \ - TAILQ_INIT(srchead); \ +#define TAILQ_CONCAT_SORTED(dsthead, srchead, elmtype, field, cmpfunc) \ + do { \ + elmtype _cstmp; \ + elmtype _cstmp2; \ + if (TAILQ_EMPTY(dsthead)) { \ + TAILQ_CONCAT(dsthead, srchead, field); \ + break; \ + } \ + TAILQ_FOREACH_SAFE(_cstmp, srchead, field, _cstmp2) { \ + TAILQ_INSERT_SORTED(dsthead, _cstmp, elmtype, field, \ + cmpfunc); \ + } \ + TAILQ_INIT(srchead); \ } while (0) -#define TAILQ_MOVE(newhead, oldhead, field) do { \ - if(TAILQ_FIRST(oldhead)) { \ - TAILQ_FIRST(oldhead)->field.tqe_prev = &(newhead)->tqh_first; \ - (newhead)->tqh_first = (oldhead)->tqh_first; \ - (newhead)->tqh_last = (oldhead)->tqh_last; \ - TAILQ_INIT(oldhead); \ - } else \ - TAILQ_INIT(newhead); \ - } while (/*CONSTCOND*/0) +#define TAILQ_MOVE(newhead, oldhead, field) \ + do { \ + if (TAILQ_FIRST(oldhead)) { \ + TAILQ_FIRST(oldhead)->field.tqe_prev = \ + &(newhead)->tqh_first; \ + (newhead)->tqh_first = (oldhead)->tqh_first; \ + (newhead)->tqh_last = (oldhead)->tqh_last; \ + TAILQ_INIT(oldhead); \ + } else \ + TAILQ_INIT(newhead); \ + } while (/*CONSTCOND*/ 0) /* @brief Prepend \p shead to \p dhead */ -#define TAILQ_PREPEND(dhead,shead,headname,field) do { \ - if (unlikely(TAILQ_EMPTY(dhead))) { \ - TAILQ_MOVE(dhead, shead, field); \ - } else if (likely(!TAILQ_EMPTY(shead))) { \ - TAILQ_LAST(shead,headname)->field.tqe_next = \ - TAILQ_FIRST(dhead); \ - TAILQ_FIRST(dhead)->field.tqe_prev = \ - &TAILQ_LAST(shead,headname)->field.tqe_next; \ - TAILQ_FIRST(shead)->field.tqe_prev = &(dhead)->tqh_first; \ - TAILQ_FIRST(dhead) = TAILQ_FIRST(shead); \ - TAILQ_INIT(shead); \ - } \ +#define TAILQ_PREPEND(dhead, shead, headname, field) \ + do { \ + if (unlikely(TAILQ_EMPTY(dhead))) { \ + TAILQ_MOVE(dhead, shead, field); \ + } else if (likely(!TAILQ_EMPTY(shead))) { \ + TAILQ_LAST(shead, headname)->field.tqe_next = \ + TAILQ_FIRST(dhead); \ + TAILQ_FIRST(dhead)->field.tqe_prev = \ + &TAILQ_LAST(shead, headname)->field.tqe_next; \ + TAILQ_FIRST(shead)->field.tqe_prev = \ + &(dhead)->tqh_first; \ + TAILQ_FIRST(dhead) = TAILQ_FIRST(shead); \ + TAILQ_INIT(shead); \ + } \ } while (0) /* @brief Insert \p shead after element \p listelm in \p dhead */ -#define TAILQ_INSERT_LIST(dhead,listelm,shead,headname,elmtype,field) do { \ - if (TAILQ_LAST(dhead, headname) == listelm) { \ - TAILQ_CONCAT(dhead, shead, field); \ - } else { \ - elmtype _elm = TAILQ_FIRST(shead); \ - elmtype _last = TAILQ_LAST(shead, headname); \ - elmtype _aft = TAILQ_NEXT(listelm, field); \ - (listelm)->field.tqe_next = _elm; \ - _elm->field.tqe_prev = &(listelm)->field.tqe_next; \ - _last->field.tqe_next = _aft; \ - _aft->field.tqe_prev = &_last->field.tqe_next; \ - TAILQ_INIT((shead)); \ - } \ +#define TAILQ_INSERT_LIST(dhead, listelm, shead, headname, elmtype, field) \ + do { \ + if (TAILQ_LAST(dhead, headname) == listelm) { \ + TAILQ_CONCAT(dhead, shead, field); \ + } else { \ + elmtype _elm = TAILQ_FIRST(shead); \ + elmtype _last = TAILQ_LAST(shead, headname); \ + elmtype _aft = TAILQ_NEXT(listelm, field); \ + (listelm)->field.tqe_next = _elm; \ + _elm->field.tqe_prev = &(listelm)->field.tqe_next; \ + _last->field.tqe_next = _aft; \ + _aft->field.tqe_prev = &_last->field.tqe_next; \ + TAILQ_INIT((shead)); \ + } \ } while (0) /* @brief Insert \p shead before element \p listelm in \p dhead */ -#define TAILQ_INSERT_LIST_BEFORE(dhead,insert_before,shead,headname,elmtype,field) \ - do { \ - if (TAILQ_FIRST(dhead) == insert_before) { \ - TAILQ_PREPEND(dhead, shead, headname, field); \ - } else { \ - elmtype _first = TAILQ_FIRST(shead); \ - elmtype _last = TAILQ_LAST(shead, headname); \ - elmtype _dprev = \ - TAILQ_PREV(insert_before, headname, field); \ - _last->field.tqe_next = insert_before; \ - _dprev->field.tqe_next = _first; \ - (insert_before)->field.tqe_prev = \ - &_last->field.tqe_next; \ - _first->field.tqe_prev = &(_dprev)->field.tqe_next; \ - TAILQ_INIT((shead)); \ - } \ +#define TAILQ_INSERT_LIST_BEFORE(dhead, insert_before, shead, headname, \ + elmtype, field) \ + do { \ + if (TAILQ_FIRST(dhead) == insert_before) { \ + TAILQ_PREPEND(dhead, shead, headname, field); \ + } else { \ + elmtype _first = TAILQ_FIRST(shead); \ + elmtype _last = TAILQ_LAST(shead, headname); \ + elmtype _dprev = \ + TAILQ_PREV(insert_before, headname, field); \ + _last->field.tqe_next = insert_before; \ + _dprev->field.tqe_next = _first; \ + (insert_before)->field.tqe_prev = \ + &_last->field.tqe_next; \ + _first->field.tqe_prev = &(_dprev)->field.tqe_next; \ + TAILQ_INIT((shead)); \ + } \ } while (0) #ifndef SIMPLEQ_HEAD -#define SIMPLEQ_HEAD(name, type) \ -struct name { \ -struct type *sqh_first; \ -struct type **sqh_last; \ -} +#define SIMPLEQ_HEAD(name, type) \ + struct name { \ + struct type *sqh_first; \ + struct type **sqh_last; \ + } #endif #ifndef SIMPLEQ_ENTRY -#define SIMPLEQ_ENTRY(type) \ -struct { \ -struct type *sqe_next; \ -} +#define SIMPLEQ_ENTRY(type) \ + struct { \ + struct type *sqe_next; \ + } #endif #ifndef SIMPLEQ_FIRST -#define SIMPLEQ_FIRST(head) ((head)->sqh_first) +#define SIMPLEQ_FIRST(head) ((head)->sqh_first) #endif #ifndef SIMPLEQ_REMOVE_HEAD -#define SIMPLEQ_REMOVE_HEAD(head, field) do { \ -if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ -(head)->sqh_last = &(head)->sqh_first; \ -} while (0) +#define SIMPLEQ_REMOVE_HEAD(head, field) \ + do { \ + if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == \ + NULL) \ + (head)->sqh_last = &(head)->sqh_first; \ + } while (0) #endif #ifndef SIMPLEQ_INSERT_TAIL -#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ -(elm)->field.sqe_next = NULL; \ -*(head)->sqh_last = (elm); \ -(head)->sqh_last = &(elm)->field.sqe_next; \ -} while (0) +#define SIMPLEQ_INSERT_TAIL(head, elm, field) \ + do { \ + (elm)->field.sqe_next = NULL; \ + *(head)->sqh_last = (elm); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + } while (0) #endif #ifndef SIMPLEQ_INIT -#define SIMPLEQ_INIT(head) do { \ -(head)->sqh_first = NULL; \ -(head)->sqh_last = &(head)->sqh_first; \ -} while (0) +#define SIMPLEQ_INIT(head) \ + do { \ + (head)->sqh_first = NULL; \ + (head)->sqh_last = &(head)->sqh_first; \ + } while (0) #endif #ifndef SIMPLEQ_INSERT_HEAD -#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ -if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ -(head)->sqh_last = &(elm)->field.sqe_next; \ -(head)->sqh_first = (elm); \ -} while (0) +#define SIMPLEQ_INSERT_HEAD(head, elm, field) \ + do { \ + if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (head)->sqh_first = (elm); \ + } while (0) #endif #ifndef SIMPLEQ_FOREACH -#define SIMPLEQ_FOREACH(var, head, field) \ -for((var) = SIMPLEQ_FIRST(head); \ -(var) != SIMPLEQ_END(head); \ -(var) = SIMPLEQ_NEXT(var, field)) +#define SIMPLEQ_FOREACH(var, head, field) \ + for ((var) = SIMPLEQ_FIRST(head); (var) != SIMPLEQ_END(head); \ + (var) = SIMPLEQ_NEXT(var, field)) #endif #ifndef SIMPLEQ_INSERT_AFTER -#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ -if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL) \ -(head)->sqh_last = &(elm)->field.sqe_next; \ -(listelm)->field.sqe_next = (elm); \ -} while (0) +#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) \ + do { \ + if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == \ + NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (listelm)->field.sqe_next = (elm); \ + } while (0) #endif #ifndef SIMPLEQ_END -#define SIMPLEQ_END(head) NULL +#define SIMPLEQ_END(head) NULL #endif #ifndef SIMPLEQ_NEXT -#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) +#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) #endif #ifndef SIMPLEQ_HEAD_INITIALIZER -#define SIMPLEQ_HEAD_INITIALIZER(head) \ -{ NULL, &(head).sqh_first } +#define SIMPLEQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).sqh_first } #endif #ifndef SIMPLEQ_EMPTY -#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head)) +#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head)) #endif - - #endif /* _RDSYSQUEUE_H_ */ diff --git a/src/rdtime.h b/src/rdtime.h index 1f59f37e87..9caa60f9a0 100644 --- a/src/rdtime.h +++ b/src/rdtime.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -31,45 +31,49 @@ #ifndef TIMEVAL_TO_TIMESPEC -#define TIMEVAL_TO_TIMESPEC(tv,ts) do { \ - (ts)->tv_sec = (tv)->tv_sec; \ - (ts)->tv_nsec = (tv)->tv_usec * 1000; \ - } while (0) - -#define TIMESPEC_TO_TIMEVAL(tv, ts) do { \ - (tv)->tv_sec = (ts)->tv_sec; \ - (tv)->tv_usec = (ts)->tv_nsec / 1000; \ - } while (0) +#define TIMEVAL_TO_TIMESPEC(tv, ts) \ + do { \ + (ts)->tv_sec = (tv)->tv_sec; \ + (ts)->tv_nsec = (tv)->tv_usec * 1000; \ + } while (0) + +#define TIMESPEC_TO_TIMEVAL(tv, ts) \ + do { \ + (tv)->tv_sec = (ts)->tv_sec; \ + (tv)->tv_usec = (ts)->tv_nsec / 1000; \ + } while (0) #endif -#define TIMESPEC_TO_TS(ts) \ - (((rd_ts_t)(ts)->tv_sec * 1000000LLU) + ((ts)->tv_nsec / 1000)) +#define TIMESPEC_TO_TS(ts) \ + (((rd_ts_t)(ts)->tv_sec * 1000000LLU) + ((ts)->tv_nsec / 1000)) -#define TS_TO_TIMESPEC(ts,tsx) do { \ - (ts)->tv_sec = (tsx) / 1000000; \ - (ts)->tv_nsec = ((tsx) % 1000000) * 1000; \ - if ((ts)->tv_nsec >= 1000000000LLU) { \ - (ts)->tv_sec++; \ - (ts)->tv_nsec -= 1000000000LLU; \ - } \ - } while (0) +#define TS_TO_TIMESPEC(ts, tsx) \ + do { \ + (ts)->tv_sec = (tsx) / 1000000; \ + (ts)->tv_nsec = ((tsx) % 1000000) * 1000; \ + if ((ts)->tv_nsec >= 1000000000LLU) { \ + (ts)->tv_sec++; \ + (ts)->tv_nsec -= 1000000000LLU; \ + } \ + } while (0) #define TIMESPEC_CLEAR(ts) ((ts)->tv_sec = (ts)->tv_nsec = 0LLU) -#define RD_POLL_INFINITE -1 -#define RD_POLL_NOWAIT 0 +#define RD_POLL_INFINITE -1 +#define RD_POLL_NOWAIT 0 #if RD_UNITTEST_QPC_OVERRIDES - /* Overrides for rd_clock() unittest using QPC on Windows */ -BOOL rd_ut_QueryPerformanceFrequency(_Out_ LARGE_INTEGER * lpFrequency); -BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER * lpPerformanceCount); -#define rd_QueryPerformanceFrequency(IFREQ) rd_ut_QueryPerformanceFrequency(IFREQ) +/* Overrides for rd_clock() unittest using QPC on Windows */ +BOOL rd_ut_QueryPerformanceFrequency(_Out_ LARGE_INTEGER *lpFrequency); +BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER *lpPerformanceCount); +#define rd_QueryPerformanceFrequency(IFREQ) \ + rd_ut_QueryPerformanceFrequency(IFREQ) #define rd_QueryPerformanceCounter(PC) rd_ut_QueryPerformanceCounter(PC) #else #define rd_QueryPerformanceFrequency(IFREQ) QueryPerformanceFrequency(IFREQ) -#define rd_QueryPerformanceCounter(PC) QueryPerformanceCounter(PC) +#define rd_QueryPerformanceCounter(PC) QueryPerformanceCounter(PC) #endif /** @@ -77,13 +81,13 @@ BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER * lpPerformanceCount); * @remark There is no monotonic clock on OSX, the system time * is returned instead. */ -static RD_INLINE rd_ts_t rd_clock (void) RD_UNUSED; -static RD_INLINE rd_ts_t rd_clock (void) { +static RD_INLINE rd_ts_t rd_clock(void) RD_UNUSED; +static RD_INLINE rd_ts_t rd_clock(void) { #if defined(__APPLE__) || (defined(__ANDROID__) && __ANDROID_API__ < 29) - /* No monotonic clock on Darwin */ - struct timeval tv; - gettimeofday(&tv, NULL); - return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec; + /* No monotonic clock on Darwin */ + struct timeval tv; + gettimeofday(&tv, NULL); + return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec; #elif defined(_WIN32) LARGE_INTEGER now; static RD_TLS double freq = 0.0; @@ -97,10 +101,10 @@ static RD_INLINE rd_ts_t rd_clock (void) { rd_QueryPerformanceCounter(&now); return (rd_ts_t)((double)now.QuadPart / freq); #else - struct timespec ts; - clock_gettime(CLOCK_MONOTONIC, &ts); - return ((rd_ts_t)ts.tv_sec * 1000000LLU) + - ((rd_ts_t)ts.tv_nsec / 1000LLU); + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return ((rd_ts_t)ts.tv_sec * 1000000LLU) + + ((rd_ts_t)ts.tv_nsec / 1000LLU); #endif } @@ -109,10 +113,10 @@ static RD_INLINE rd_ts_t rd_clock (void) { * @returns UTC wallclock time as number of microseconds since * beginning of the epoch. */ -static RD_INLINE RD_UNUSED rd_ts_t rd_uclock (void) { - struct timeval tv; - rd_gettimeofday(&tv, NULL); - return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec; +static RD_INLINE RD_UNUSED rd_ts_t rd_uclock(void) { + struct timeval tv; + rd_gettimeofday(&tv, NULL); + return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec; } @@ -120,18 +124,18 @@ static RD_INLINE RD_UNUSED rd_ts_t rd_uclock (void) { /** * Thread-safe version of ctime() that strips the trailing newline. */ -static RD_INLINE const char *rd_ctime (const time_t *t) RD_UNUSED; -static RD_INLINE const char *rd_ctime (const time_t *t) { - static RD_TLS char ret[27]; +static RD_INLINE const char *rd_ctime(const time_t *t) RD_UNUSED; +static RD_INLINE const char *rd_ctime(const time_t *t) { + static RD_TLS char ret[27]; #ifndef _WIN32 - ctime_r(t, ret); + ctime_r(t, ret); #else - ctime_s(ret, sizeof(ret), t); + ctime_s(ret, sizeof(ret), t); #endif - ret[25] = '\0'; + ret[25] = '\0'; - return ret; + return ret; } @@ -139,7 +143,7 @@ static RD_INLINE const char *rd_ctime (const time_t *t) { * @brief Convert a relative millisecond timeout to microseconds, * properly handling RD_POLL_NOWAIT, et.al. */ -static RD_INLINE rd_ts_t rd_timeout_us (int timeout_ms) { +static RD_INLINE rd_ts_t rd_timeout_us(int timeout_ms) { if (timeout_ms <= 0) return (rd_ts_t)timeout_ms; else @@ -150,7 +154,7 @@ static RD_INLINE rd_ts_t rd_timeout_us (int timeout_ms) { * @brief Convert a relative microsecond timeout to milliseconds, * properly handling RD_POLL_NOWAIT, et.al. */ -static RD_INLINE int rd_timeout_ms (rd_ts_t timeout_us) { +static RD_INLINE int rd_timeout_ms(rd_ts_t timeout_us) { if (timeout_us <= 0) return (int)timeout_us; else @@ -171,12 +175,11 @@ static RD_INLINE int rd_timeout_ms (rd_ts_t timeout_us) { * @returns the absolute timeout which should later be passed * to rd_timeout_adjust(). */ -static RD_INLINE rd_ts_t rd_timeout_init (int timeout_ms) { - if (timeout_ms == RD_POLL_INFINITE || - timeout_ms == RD_POLL_NOWAIT) - return timeout_ms; +static RD_INLINE rd_ts_t rd_timeout_init(int timeout_ms) { + if (timeout_ms == RD_POLL_INFINITE || timeout_ms == RD_POLL_NOWAIT) + return timeout_ms; - return rd_clock() + (timeout_ms * 1000); + return rd_clock() + (timeout_ms * 1000); } @@ -188,11 +191,10 @@ static RD_INLINE rd_ts_t rd_timeout_init (int timeout_ms) { * * Honours RD_POLL_INFITE and RD_POLL_NOWAIT (reflected in tspec.tv_sec). */ -static RD_INLINE void rd_timeout_init_timespec_us (struct timespec *tspec, - rd_ts_t timeout_us) { - if (timeout_us == RD_POLL_INFINITE || - timeout_us == RD_POLL_NOWAIT) { - tspec->tv_sec = timeout_us; +static RD_INLINE void rd_timeout_init_timespec_us(struct timespec *tspec, + rd_ts_t timeout_us) { + if (timeout_us == RD_POLL_INFINITE || timeout_us == RD_POLL_NOWAIT) { + tspec->tv_sec = timeout_us; tspec->tv_nsec = 0; } else { #if defined(__APPLE__) || (defined(__ANDROID__) && __ANDROID_API__ < 29) @@ -202,7 +204,7 @@ static RD_INLINE void rd_timeout_init_timespec_us (struct timespec *tspec, #else timespec_get(tspec, TIME_UTC); #endif - tspec->tv_sec += timeout_us / 1000000; + tspec->tv_sec += timeout_us / 1000000; tspec->tv_nsec += (timeout_us % 1000000) * 1000; if (tspec->tv_nsec >= 1000000000) { tspec->tv_nsec -= 1000000000; @@ -219,11 +221,10 @@ static RD_INLINE void rd_timeout_init_timespec_us (struct timespec *tspec, * * Honours RD_POLL_INFITE and RD_POLL_NOWAIT (reflected in tspec.tv_sec). */ -static RD_INLINE void rd_timeout_init_timespec (struct timespec *tspec, - int timeout_ms) { - if (timeout_ms == RD_POLL_INFINITE || - timeout_ms == RD_POLL_NOWAIT) { - tspec->tv_sec = timeout_ms; +static RD_INLINE void rd_timeout_init_timespec(struct timespec *tspec, + int timeout_ms) { + if (timeout_ms == RD_POLL_INFINITE || timeout_ms == RD_POLL_NOWAIT) { + tspec->tv_sec = timeout_ms; tspec->tv_nsec = 0; } else { #if defined(__APPLE__) || (defined(__ANDROID__) && __ANDROID_API__ < 29) @@ -233,7 +234,7 @@ static RD_INLINE void rd_timeout_init_timespec (struct timespec *tspec, #else timespec_get(tspec, TIME_UTC); #endif - tspec->tv_sec += timeout_ms / 1000; + tspec->tv_sec += timeout_ms / 1000; tspec->tv_nsec += (timeout_ms % 1000) * 1000000; if (tspec->tv_nsec >= 1000000000) { tspec->tv_nsec -= 1000000000; @@ -246,11 +247,10 @@ static RD_INLINE void rd_timeout_init_timespec (struct timespec *tspec, /** * @brief Same as rd_timeout_remains() but with microsecond precision */ -static RD_INLINE rd_ts_t rd_timeout_remains_us (rd_ts_t abs_timeout) { +static RD_INLINE rd_ts_t rd_timeout_remains_us(rd_ts_t abs_timeout) { rd_ts_t timeout_us; - if (abs_timeout == RD_POLL_INFINITE || - abs_timeout == RD_POLL_NOWAIT) + if (abs_timeout == RD_POLL_INFINITE || abs_timeout == RD_POLL_NOWAIT) return (rd_ts_t)abs_timeout; timeout_us = abs_timeout - rd_clock(); @@ -272,7 +272,7 @@ static RD_INLINE rd_ts_t rd_timeout_remains_us (rd_ts_t abs_timeout) { * rd_timeout_expired() can be used to check the return value * in a bool fashion. */ -static RD_INLINE int rd_timeout_remains (rd_ts_t abs_timeout) { +static RD_INLINE int rd_timeout_remains(rd_ts_t abs_timeout) { return rd_timeout_ms(rd_timeout_remains_us(abs_timeout)); } @@ -282,19 +282,18 @@ static RD_INLINE int rd_timeout_remains (rd_ts_t abs_timeout) { * @brief Like rd_timeout_remains() but limits the maximum time to \p limit_ms, * and operates on the return value of rd_timeout_remains(). */ -static RD_INLINE int -rd_timeout_remains_limit0 (int remains_ms, int limit_ms) { - if (remains_ms == RD_POLL_INFINITE || remains_ms > limit_ms) - return limit_ms; - else - return remains_ms; +static RD_INLINE int rd_timeout_remains_limit0(int remains_ms, int limit_ms) { + if (remains_ms == RD_POLL_INFINITE || remains_ms > limit_ms) + return limit_ms; + else + return remains_ms; } /** * @brief Like rd_timeout_remains() but limits the maximum time to \p limit_ms */ -static RD_INLINE int -rd_timeout_remains_limit (rd_ts_t abs_timeout, int limit_ms) { +static RD_INLINE int rd_timeout_remains_limit(rd_ts_t abs_timeout, + int limit_ms) { return rd_timeout_remains_limit0(rd_timeout_remains(abs_timeout), limit_ms); } @@ -303,8 +302,8 @@ rd_timeout_remains_limit (rd_ts_t abs_timeout, int limit_ms) { * @returns 1 if the **relative** timeout as returned by rd_timeout_remains() * has timed out / expired, else 0. */ -static RD_INLINE int rd_timeout_expired (int timeout_ms) { - return timeout_ms == RD_POLL_NOWAIT; +static RD_INLINE int rd_timeout_expired(int timeout_ms) { + return timeout_ms == RD_POLL_NOWAIT; } #endif /* _RDTIME_H_ */ diff --git a/src/rdtypes.h b/src/rdtypes.h index c843ead1bc..8f3625512d 100644 --- a/src/rdtypes.h +++ b/src/rdtypes.h @@ -3,24 +3,24 @@ * * Copyright (c) 2012, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -43,12 +43,12 @@ */ typedef int64_t rd_ts_t; -#define RD_TS_MAX INT64_MAX +#define RD_TS_MAX INT64_MAX typedef uint8_t rd_bool_t; -#define rd_true 1 -#define rd_false 0 +#define rd_true 1 +#define rd_false 0 /** @@ -64,8 +64,8 @@ typedef enum { * @enum Instruct function to acquire or not to acquire a lock */ typedef enum { - RD_DONT_LOCK = 0, /**< Do not acquire lock */ - RD_DO_LOCK = 1, /**< Do acquire lock */ + RD_DONT_LOCK = 0, /**< Do not acquire lock */ + RD_DO_LOCK = 1, /**< Do acquire lock */ } rd_dolock_t; @@ -80,7 +80,7 @@ typedef enum { * * @returns -1, 0 or 1. */ -#define RD_CMP(A,B) (int)((A) < (B) ? -1 : ((A) > (B))) +#define RD_CMP(A, B) (int)((A) < (B) ? -1 : ((A) > (B))) #endif /* _RDTYPES_H_ */ diff --git a/src/rdunittest.c b/src/rdunittest.c index 006b165ee7..736365c249 100644 --- a/src/rdunittest.c +++ b/src/rdunittest.c @@ -51,8 +51,8 @@ #include "rdkafka_txnmgr.h" rd_bool_t rd_unittest_assert_on_failure = rd_false; -rd_bool_t rd_unittest_on_ci = rd_false; -rd_bool_t rd_unittest_slow = rd_false; +rd_bool_t rd_unittest_on_ci = rd_false; +rd_bool_t rd_unittest_slow = rd_false; #if ENABLE_CODECOV /** @@ -60,16 +60,16 @@ rd_bool_t rd_unittest_slow = rd_false; * @{ */ -static rd_atomic64_t rd_ut_covnrs[RD_UT_COVNR_MAX+1]; +static rd_atomic64_t rd_ut_covnrs[RD_UT_COVNR_MAX + 1]; -void rd_ut_coverage (const char *file, const char *func, int line, int covnr) { +void rd_ut_coverage(const char *file, const char *func, int line, int covnr) { rd_assert(covnr >= 0 && covnr <= RD_UT_COVNR_MAX); rd_atomic64_add(&rd_ut_covnrs[covnr], 1); } -int64_t rd_ut_coverage_check (const char *file, const char *func, int line, - int covnr) { +int64_t +rd_ut_coverage_check(const char *file, const char *func, int line, int covnr) { int64_t r; rd_assert(covnr >= 0 && covnr <= RD_UT_COVNR_MAX); @@ -93,7 +93,7 @@ int64_t rd_ut_coverage_check (const char *file, const char *func, int line, fprintf(stderr, "\033[34mRDUT: CCOV: %s:%d: %s: Code coverage nr %d: " - "PASS (%"PRId64" code path execution(s))\033[0m\n", + "PASS (%" PRId64 " code path execution(s))\033[0m\n", file, line, func, covnr, r); return r; @@ -121,9 +121,9 @@ struct ut_tq_args { int base; /**< Base value */ int cnt; /**< Number of elements to add */ int step; /**< Value step */ - } q[3]; /**< Queue element definition */ - int qcnt; /**< Number of defs in .q */ - int exp[16]; /**< Expected value order after join */ + } q[3]; /**< Queue element definition */ + int qcnt; /**< Number of defs in .q */ + int exp[16]; /**< Expected value order after join */ }; /** @@ -132,8 +132,8 @@ struct ut_tq_args { * the first element in \p head. * @remarks \p head must be ascending sorted. */ -static struct ut_tq *ut_tq_find_prev_pos (const struct ut_tq_head *head, - int val) { +static struct ut_tq *ut_tq_find_prev_pos(const struct ut_tq_head *head, + int val) { struct ut_tq *e, *prev = NULL; TAILQ_FOREACH(e, head, link) { @@ -145,9 +145,9 @@ static struct ut_tq *ut_tq_find_prev_pos (const struct ut_tq_head *head, return prev; } -static int ut_tq_test (const struct ut_tq_args *args) { +static int ut_tq_test(const struct ut_tq_args *args) { int totcnt = 0; - int fails = 0; + int fails = 0; struct ut_tq_head *tqh[3]; struct ut_tq *e, *insert_after; int i, qi; @@ -166,12 +166,12 @@ static int ut_tq_test (const struct ut_tq_args *args) { /* Use heap allocated heads to let valgrind/asan assist * in detecting corruption. */ - for (qi = 0 ; qi < args->qcnt ; qi++) { + for (qi = 0; qi < args->qcnt; qi++) { tqh[qi] = rd_calloc(1, sizeof(*tqh[qi])); TAILQ_INIT(tqh[qi]); - for (i = 0 ; i < args->q[qi].cnt ; i++) { - e = rd_malloc(sizeof(*e)); + for (i = 0; i < args->q[qi].cnt; i++) { + e = rd_malloc(sizeof(*e)); e->v = args->q[qi].base + (i * args->q[qi].step); TAILQ_INSERT_TAIL(tqh[qi], e, link); } @@ -179,7 +179,7 @@ static int ut_tq_test (const struct ut_tq_args *args) { totcnt += args->q[qi].cnt; } - for (qi = 1 ; qi < args->qcnt ; qi++) { + for (qi = 1; qi < args->qcnt; qi++) { insert_after = ut_tq_find_prev_pos(tqh[0], args->q[qi].base); if (!insert_after) { /* Insert position is head of list, @@ -187,25 +187,24 @@ static int ut_tq_test (const struct ut_tq_args *args) { TAILQ_PREPEND(tqh[0], tqh[qi], ut_tq_head, link); } else { TAILQ_INSERT_LIST(tqh[0], insert_after, tqh[qi], - ut_tq_head, - struct ut_tq *, link); + ut_tq_head, struct ut_tq *, link); } - RD_UT_ASSERT(TAILQ_EMPTY(tqh[qi]), - "expected empty tqh[%d]", qi); + RD_UT_ASSERT(TAILQ_EMPTY(tqh[qi]), "expected empty tqh[%d]", + qi); RD_UT_ASSERT(!TAILQ_EMPTY(tqh[0]), "expected non-empty tqh[0]"); memset(tqh[qi], (int)'A', sizeof(*tqh[qi])); rd_free(tqh[qi]); } - RD_UT_ASSERT(TAILQ_LAST(tqh[0], ut_tq_head)->v == args->exp[totcnt-1], + RD_UT_ASSERT(TAILQ_LAST(tqh[0], ut_tq_head)->v == args->exp[totcnt - 1], "TAILQ_LAST val %d, expected %d", - TAILQ_LAST(tqh[0], ut_tq_head)->v, args->exp[totcnt-1]); + TAILQ_LAST(tqh[0], ut_tq_head)->v, args->exp[totcnt - 1]); /* Add sentinel value to verify that INSERT_TAIL works * after INSERT_LIST */ - e = rd_malloc(sizeof(*e)); + e = rd_malloc(sizeof(*e)); e->v = 99; TAILQ_INSERT_TAIL(tqh[0], e, link); totcnt++; @@ -213,14 +212,16 @@ static int ut_tq_test (const struct ut_tq_args *args) { i = 0; TAILQ_FOREACH(e, tqh[0], link) { if (i >= totcnt) { - RD_UT_WARN("Too many elements in list tqh[0]: " - "idx %d > totcnt %d: element %p (value %d)", - i, totcnt, e, e->v); + RD_UT_WARN( + "Too many elements in list tqh[0]: " + "idx %d > totcnt %d: element %p (value %d)", + i, totcnt, e, e->v); fails++; } else if (e->v != args->exp[i]) { - RD_UT_WARN("Element idx %d/%d in tqh[0] has value %d, " - "expected %d", - i, totcnt, e->v, args->exp[i]); + RD_UT_WARN( + "Element idx %d/%d in tqh[0] has value %d, " + "expected %d", + i, totcnt, e->v, args->exp[i]); fails++; } else if (i == totcnt - 1 && e != TAILQ_LAST(tqh[0], ut_tq_head)) { @@ -235,14 +236,16 @@ static int ut_tq_test (const struct ut_tq_args *args) { i = totcnt - 1; TAILQ_FOREACH_REVERSE(e, tqh[0], ut_tq_head, link) { if (i < 0) { - RD_UT_WARN("REVERSE: Too many elements in list tqh[0]: " - "idx %d < 0: element %p (value %d)", - i, e, e->v); + RD_UT_WARN( + "REVERSE: Too many elements in list tqh[0]: " + "idx %d < 0: element %p (value %d)", + i, e, e->v); fails++; } else if (e->v != args->exp[i]) { - RD_UT_WARN("REVERSE: Element idx %d/%d in tqh[0] has " - "value %d, expected %d", - i, totcnt, e->v, args->exp[i]); + RD_UT_WARN( + "REVERSE: Element idx %d/%d in tqh[0] has " + "value %d, expected %d", + i, totcnt, e->v, args->exp[i]); fails++; } else if (i == totcnt - 1 && e != TAILQ_LAST(tqh[0], ut_tq_head)) { @@ -253,9 +256,9 @@ static int ut_tq_test (const struct ut_tq_args *args) { i--; } - RD_UT_ASSERT(TAILQ_LAST(tqh[0], ut_tq_head)->v == args->exp[totcnt-1], + RD_UT_ASSERT(TAILQ_LAST(tqh[0], ut_tq_head)->v == args->exp[totcnt - 1], "TAILQ_LAST val %d, expected %d", - TAILQ_LAST(tqh[0], ut_tq_head)->v, args->exp[totcnt-1]); + TAILQ_LAST(tqh[0], ut_tq_head)->v, args->exp[totcnt - 1]); while ((e = TAILQ_FIRST(tqh[0]))) { TAILQ_REMOVE(tqh[0], e, link); @@ -268,102 +271,70 @@ static int ut_tq_test (const struct ut_tq_args *args) { } -static int unittest_sysqueue (void) { +static int unittest_sysqueue(void) { const struct ut_tq_args args[] = { + {"empty tqh[0]", + {{0, 0, 0}, {0, 3, 1}}, + 2, + {0, 1, 2, 99 /*sentinel*/}}, + {"prepend 1,0", + {{10, 3, 1}, {0, 3, 1}}, + 2, + {0, 1, 2, 10, 11, 12, 99}}, + {"prepend 2,1,0", + { + {10, 3, 1}, /* 10, 11, 12 */ + {5, 3, 1}, /* 5, 6, 7 */ + {0, 2, 1} /* 0, 1 */ + }, + 3, + {0, 1, 5, 6, 7, 10, 11, 12, 99}}, + {"insert 1", {{0, 3, 2}, {1, 2, 2}}, 2, {0, 1, 3, 2, 4, 99}}, + {"insert 1,2", + { + {0, 3, 3}, /* 0, 3, 6 */ + {1, 2, 3}, /* 1, 4 */ + {2, 1, 3} /* 2 */ + }, + 3, + {0, 1, 2, 4, 3, 6, 99}}, + {"append 1", + {{0, 5, 1}, {5, 5, 1}}, + 2, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 99}}, + {"append 1,2", + { + {0, 5, 1}, /* 0, 1, 2, 3, 4 */ + {5, 5, 1}, /* 5, 6, 7, 8, 9 */ + {11, 2, 1} /* 11, 12 */ + }, + 3, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 99}}, + { + "insert 1,0,2", { - "empty tqh[0]", - { - { 0, 0, 0 }, - { 0, 3, 1 } - }, - 2, - { 0, 1, 2, 99 /*sentinel*/ } - }, - { - "prepend 1,0", - { - { 10, 3, 1 }, - { 0, 3, 1 } - }, - 2, - { 0, 1, 2, 10, 11, 12, 99 } - }, - { - "prepend 2,1,0", - { - { 10, 3, 1 }, /* 10, 11, 12 */ - { 5, 3, 1 }, /* 5, 6, 7 */ - { 0, 2, 1 } /* 0, 1 */ - }, - 3, - { 0, 1, 5, 6, 7, 10, 11, 12, 99 } - }, - { - "insert 1", - { - { 0, 3, 2 }, - { 1, 2, 2 } - }, - 2, - { 0, 1, 3, 2, 4, 99 } - }, - { - "insert 1,2", - { - { 0, 3, 3 }, /* 0, 3, 6 */ - { 1, 2, 3 }, /* 1, 4 */ - { 2, 1, 3 } /* 2 */ - }, - 3, - { 0, 1, 2, 4, 3, 6, 99 } - }, - { - "append 1", - { - { 0, 5, 1 }, - { 5, 5, 1 } - }, - 2, - { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 99 } - }, - { - "append 1,2", - { - { 0, 5, 1 }, /* 0, 1, 2, 3, 4 */ - { 5, 5, 1 }, /* 5, 6, 7, 8, 9 */ - { 11, 2, 1 } /* 11, 12 */ - }, - 3, - { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 99 } - }, - { - "insert 1,0,2", - { - { 5, 3, 1 }, /* 5, 6, 7 */ - { 0, 1, 1 }, /* 0 */ - { 10, 2, 1 } /* 10, 11 */ - }, - 3, - { 0, 5, 6, 7, 10, 11, 99 }, + {5, 3, 1}, /* 5, 6, 7 */ + {0, 1, 1}, /* 0 */ + {10, 2, 1} /* 10, 11 */ }, + 3, + {0, 5, 6, 7, 10, 11, 99}, + }, + { + "insert 2,0,1", { - "insert 2,0,1", - { - { 5, 3, 1 }, /* 5, 6, 7 */ - { 10, 2, 1 }, /* 10, 11 */ - { 0, 1, 1 } /* 0 */ - }, - 3, - { 0, 5, 6, 7, 10, 11, 99 }, + {5, 3, 1}, /* 5, 6, 7 */ + {10, 2, 1}, /* 10, 11 */ + {0, 1, 1} /* 0 */ }, - { - NULL - } - }; + 3, + {0, 5, 6, 7, 10, 11, 99}, + }, + {NULL}}; int i; int fails = 0; - for (i = 0 ; args[i].name != NULL; i++) + for (i = 0; args[i].name != NULL; i++) fails += ut_tq_test(&args[i]); RD_UT_ASSERT(!fails, "See %d previous failure(s)", fails); @@ -392,41 +363,43 @@ static int unittest_sysqueue (void) { static const int64_t rd_ut_qpc_freq = 14318180; static int64_t rd_ut_qpc_now; -BOOL rd_ut_QueryPerformanceFrequency(_Out_ LARGE_INTEGER * lpFrequency) { +BOOL rd_ut_QueryPerformanceFrequency(_Out_ LARGE_INTEGER *lpFrequency) { lpFrequency->QuadPart = rd_ut_qpc_freq; return TRUE; } -BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER * lpPerformanceCount) { +BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER *lpPerformanceCount) { lpPerformanceCount->QuadPart = rd_ut_qpc_now * rd_ut_qpc_freq; return TRUE; } -static int unittest_rdclock (void) { +static int unittest_rdclock(void) { rd_ts_t t1, t2; /* First let "uptime" be fresh boot (0). */ rd_ut_qpc_now = 0; - t1 = rd_clock(); + t1 = rd_clock(); rd_ut_qpc_now++; t2 = rd_clock(); RD_UT_ASSERT(t2 == t1 + (1 * 1000000), - "Expected t2 %"PRId64" to be 1s more than t1 %"PRId64, + "Expected t2 %" PRId64 " to be 1s more than t1 %" PRId64, t2, t1); /* Then skip forward to 8 days, which should trigger the * overflow in a faulty implementation. */ rd_ut_qpc_now = 8 * 86400; - t2 = rd_clock(); + t2 = rd_clock(); RD_UT_ASSERT(t2 == t1 + (8LL * 86400 * 1000000), - "Expected t2 %"PRId64" to be 8 days larger than t1 %"PRId64, + "Expected t2 %" PRId64 + " to be 8 days larger than t1 %" PRId64, t2, t1); /* And make sure we can run on a system with 38 years of uptime.. */ rd_ut_qpc_now = 38 * 365 * 86400; - t2 = rd_clock(); + t2 = rd_clock(); RD_UT_ASSERT(t2 == t1 + (38LL * 365 * 86400 * 1000000), - "Expected t2 %"PRId64" to be 38 years larger than t1 %"PRId64, + "Expected t2 %" PRId64 + " to be 38 years larger than t1 %" PRId64, t2, t1); RD_UT_PASS(); @@ -437,58 +410,58 @@ static int unittest_rdclock (void) { /**@}*/ -extern int unittest_string (void); -extern int unittest_cgrp (void); +extern int unittest_string(void); +extern int unittest_cgrp(void); #if WITH_SASL_SCRAM -extern int unittest_scram (void); +extern int unittest_scram(void); #endif -extern int unittest_assignors (void); -extern int unittest_map (void); +extern int unittest_assignors(void); +extern int unittest_map(void); #if WITH_CURL -extern int unittest_http (void); +extern int unittest_http(void); #endif -int rd_unittest (void) { +int rd_unittest(void) { int fails = 0; const struct { const char *name; - int (*call) (void); + int (*call)(void); } unittests[] = { - { "sysqueue", unittest_sysqueue }, - { "string", unittest_string }, - { "map", unittest_map }, - { "rdbuf", unittest_rdbuf }, - { "rdvarint", unittest_rdvarint }, - { "crc32c", unittest_rd_crc32c }, - { "msg", unittest_msg }, - { "murmurhash", unittest_murmur2 }, - { "fnv1a", unittest_fnv1a }, + {"sysqueue", unittest_sysqueue}, + {"string", unittest_string}, + {"map", unittest_map}, + {"rdbuf", unittest_rdbuf}, + {"rdvarint", unittest_rdvarint}, + {"crc32c", unittest_rd_crc32c}, + {"msg", unittest_msg}, + {"murmurhash", unittest_murmur2}, + {"fnv1a", unittest_fnv1a}, #if WITH_HDRHISTOGRAM - { "rdhdrhistogram", unittest_rdhdrhistogram }, + {"rdhdrhistogram", unittest_rdhdrhistogram}, #endif #ifdef _WIN32 - { "rdclock", unittest_rdclock }, + {"rdclock", unittest_rdclock}, #endif - { "conf", unittest_conf }, - { "broker", unittest_broker }, - { "request", unittest_request }, + {"conf", unittest_conf}, + {"broker", unittest_broker}, + {"request", unittest_request}, #if WITH_SASL_OAUTHBEARER - { "sasl_oauthbearer", unittest_sasl_oauthbearer }, + {"sasl_oauthbearer", unittest_sasl_oauthbearer}, #endif - { "aborted_txns", unittest_aborted_txns }, - { "cgrp", unittest_cgrp }, + {"aborted_txns", unittest_aborted_txns}, + {"cgrp", unittest_cgrp}, #if WITH_SASL_SCRAM - { "scram", unittest_scram }, + {"scram", unittest_scram}, #endif - { "assignors", unittest_assignors }, + {"assignors", unittest_assignors}, #if WITH_CURL - { "http", unittest_http }, + {"http", unittest_http}, #endif - { NULL } + {NULL} }; int i; const char *match = rd_getenv("RD_UT_TEST", NULL); - int cnt = 0; + int cnt = 0; if (rd_getenv("RD_UT_ASSERT", NULL)) rd_unittest_assert_on_failure = rd_true; @@ -505,32 +478,31 @@ int rd_unittest (void) { rd_kafka_global_init(); #if ENABLE_CODECOV - for (i = 0 ; i < RD_UT_COVNR_MAX+1 ; i++) + for (i = 0; i < RD_UT_COVNR_MAX + 1; i++) rd_atomic64_init(&rd_ut_covnrs[i], 0); #endif - for (i = 0 ; unittests[i].name ; i++) { + for (i = 0; unittests[i].name; i++) { int f; if (match && !strstr(unittests[i].name, match)) continue; f = unittests[i].call(); - RD_UT_SAY("unittest: %s: %4s\033[0m", - unittests[i].name, + RD_UT_SAY("unittest: %s: %4s\033[0m", unittests[i].name, f ? "\033[31mFAIL" : "\033[32mPASS"); fails += f; cnt++; } #if ENABLE_CODECOV -#if FIXME /* This check only works if all tests that use coverage checks - * are run, which we can't really know, so disable until we +#if FIXME /* This check only works if all tests that use coverage checks \ + * are run, which we can't really know, so disable until we \ * know what to do with this. */ if (!match) { /* Verify all code paths were covered */ int cov_fails = 0; - for (i = 0 ; i < RD_UT_COVNR_MAX+1 ; i++) { + for (i = 0; i < RD_UT_COVNR_MAX + 1; i++) { if (!RD_UT_COVERAGE_CHECK(i)) cov_fails++; } diff --git a/src/rdunittest.h b/src/rdunittest.h index bff125e296..a154885680 100644 --- a/src/rdunittest.h +++ b/src/rdunittest.h @@ -43,62 +43,65 @@ extern rd_bool_t rd_unittest_slow; * @brief Begin single unit-test function (optional). * Currently only used for logging. */ -#define RD_UT_BEGIN() \ - fprintf(stderr, \ - "\033[34mRDUT: INFO: %s:%d: %s: BEGIN: \033[0m\n", \ +#define RD_UT_BEGIN() \ + fprintf(stderr, "\033[34mRDUT: INFO: %s:%d: %s: BEGIN: \033[0m\n", \ __FILE__, __LINE__, __FUNCTION__) /** * @brief Fail the current unit-test function. */ -#define RD_UT_FAIL(...) do { \ - fprintf(stderr, "\033[31mRDUT: FAIL: %s:%d: %s: ", \ - __FILE__, __LINE__, __FUNCTION__); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\033[0m\n"); \ - if (rd_unittest_assert_on_failure) \ - rd_assert(!*"unittest failure"); \ - return 1; \ +#define RD_UT_FAIL(...) \ + do { \ + fprintf(stderr, "\033[31mRDUT: FAIL: %s:%d: %s: ", __FILE__, \ + __LINE__, __FUNCTION__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\033[0m\n"); \ + if (rd_unittest_assert_on_failure) \ + rd_assert(!*"unittest failure"); \ + return 1; \ } while (0) /** * @brief Pass the current unit-test function */ -#define RD_UT_PASS() do { \ - fprintf(stderr, "\033[32mRDUT: PASS: %s:%d: %s\033[0m\n", \ - __FILE__, __LINE__, __FUNCTION__); \ - return 0; \ +#define RD_UT_PASS() \ + do { \ + fprintf(stderr, "\033[32mRDUT: PASS: %s:%d: %s\033[0m\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + return 0; \ } while (0) - /** - * @brief Skip the current unit-test function - */ -#define RD_UT_SKIP(...) do { \ - fprintf(stderr, "\033[33mRDUT: SKIP: %s:%d: %s: ", \ - __FILE__, __LINE__, __FUNCTION__); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\033[0m\n"); \ - return 0; \ +/** + * @brief Skip the current unit-test function + */ +#define RD_UT_SKIP(...) \ + do { \ + fprintf(stderr, "\033[33mRDUT: SKIP: %s:%d: %s: ", __FILE__, \ + __LINE__, __FUNCTION__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\033[0m\n"); \ + return 0; \ } while (0) /** * @brief Fail unit-test if \p expr is false */ -#define RD_UT_ASSERT(expr,...) do { \ - if (!(expr)) { \ - fprintf(stderr, \ - "\033[31mRDUT: FAIL: %s:%d: %s: " \ - "assert failed: " # expr ": ", \ - __FILE__, __LINE__, __FUNCTION__); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\033[0m\n"); \ - if (rd_unittest_assert_on_failure) \ - rd_assert(expr); \ - return 1; \ - } \ - } while (0) +#define RD_UT_ASSERT(expr, ...) \ + do { \ + if (!(expr)) { \ + fprintf(stderr, \ + "\033[31mRDUT: FAIL: %s:%d: %s: " \ + "assert failed: " #expr ": ", \ + __FILE__, __LINE__, __FUNCTION__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\033[0m\n"); \ + if (rd_unittest_assert_on_failure) \ + rd_assert(expr); \ + return 1; \ + } \ + } while (0) /** @@ -107,36 +110,38 @@ extern rd_bool_t rd_unittest_slow; * * @param VFMT is the printf formatter for \p V's type */ -#define RD_UT_ASSERT_RANGE(V,VMIN,VMAX,VFMT) \ - RD_UT_ASSERT((VMIN) <= (V) && (VMAX) >= (V), \ - VFMT" out of range "VFMT" .. "VFMT, \ - (V), (VMIN), (VMAX)) +#define RD_UT_ASSERT_RANGE(V, VMIN, VMAX, VFMT) \ + RD_UT_ASSERT((VMIN) <= (V) && (VMAX) >= (V), \ + VFMT " out of range " VFMT " .. " VFMT, (V), (VMIN), \ + (VMAX)) /** * @brief Log something from a unit-test */ -#define RD_UT_SAY(...) do { \ - fprintf(stderr, "RDUT: INFO: %s:%d: %s: ", \ - __FILE__, __LINE__, __FUNCTION__); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\n"); \ +#define RD_UT_SAY(...) \ + do { \ + fprintf(stderr, "RDUT: INFO: %s:%d: %s: ", __FILE__, __LINE__, \ + __FUNCTION__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ } while (0) /** * @brief Warn about something from a unit-test */ -#define RD_UT_WARN(...) do { \ - fprintf(stderr, "\033[33mRDUT: WARN: %s:%d: %s: ", \ - __FILE__, __LINE__, __FUNCTION__); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\033[0m\n"); \ +#define RD_UT_WARN(...) \ + do { \ + fprintf(stderr, "\033[33mRDUT: WARN: %s:%d: %s: ", __FILE__, \ + __LINE__, __FUNCTION__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\033[0m\n"); \ } while (0) -int rd_unittest (void); +int rd_unittest(void); @@ -192,7 +197,7 @@ int rd_unittest (void); /** * @brief Register code as covered/executed. */ -#define RD_UT_COVERAGE(COVNR) \ +#define RD_UT_COVERAGE(COVNR) \ rd_ut_coverage(__FILE__, __FUNCTION__, __LINE__, COVNR) /** @@ -200,18 +205,20 @@ int rd_unittest (void); * will fail the unit test (but not return) if code has not * been executed. */ -#define RD_UT_COVERAGE_CHECK(COVNR) \ +#define RD_UT_COVERAGE_CHECK(COVNR) \ rd_ut_coverage_check(__FILE__, __FUNCTION__, __LINE__, COVNR) -void rd_ut_coverage (const char *file, const char *func, int line, int covnr); -int64_t rd_ut_coverage_check (const char *file, const char *func, int line, - int covnr); +void rd_ut_coverage(const char *file, const char *func, int line, int covnr); +int64_t +rd_ut_coverage_check(const char *file, const char *func, int line, int covnr); #else /* Does nothing if ENABLE_CODECOV is not set */ -#define RD_UT_COVERAGE(COVNR) do {} while (0) +#define RD_UT_COVERAGE(COVNR) \ + do { \ + } while (0) #define RD_UT_COVERAGE_CHECK(COVNR) 1 #endif /* ENABLE_CODECOV */ diff --git a/src/rdvarint.c b/src/rdvarint.c index e718e8c9c4..fb0cbd0466 100644 --- a/src/rdvarint.c +++ b/src/rdvarint.c @@ -31,14 +31,14 @@ #include "rdunittest.h" -static int do_test_rd_uvarint_enc_i64 (const char *file, int line, - int64_t num, const char *exp, - size_t exp_size) { - char buf[16] = { 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff }; - size_t sz = rd_uvarint_enc_i64(buf, sizeof(buf), num); +static int do_test_rd_uvarint_enc_i64(const char *file, + int line, + int64_t num, + const char *exp, + size_t exp_size) { + char buf[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + size_t sz = rd_uvarint_enc_i64(buf, sizeof(buf), num); size_t r; int ir; rd_buf_t b; @@ -46,17 +46,19 @@ static int do_test_rd_uvarint_enc_i64 (const char *file, int line, int64_t ret_num; if (sz != exp_size || memcmp(buf, exp, exp_size)) - RD_UT_FAIL("i64 encode of %"PRId64": " - "expected size %"PRIusz" (got %"PRIusz")\n", + RD_UT_FAIL("i64 encode of %" PRId64 + ": " + "expected size %" PRIusz " (got %" PRIusz ")\n", num, exp_size, sz); /* Verify with standard decoder */ r = rd_varint_dec_i64(buf, sz, &ret_num); RD_UT_ASSERT(!RD_UVARINT_DEC_FAILED(r), - "varint decode failed: %"PRIusz, r); + "varint decode failed: %" PRIusz, r); RD_UT_ASSERT(ret_num == num, "varint decode returned wrong number: " - "%"PRId64" != %"PRId64, ret_num, num); + "%" PRId64 " != %" PRId64, + ret_num, num); /* Verify with slice decoder */ rd_buf_init(&b, 1, 0); @@ -66,35 +68,37 @@ static int do_test_rd_uvarint_enc_i64 (const char *file, int line, rd_slice_init_full(&slice, &b); /* Should fail for incomplete reads */ - ir = rd_slice_narrow_copy(&slice, &bad_slice, sz-1); + ir = rd_slice_narrow_copy(&slice, &bad_slice, sz - 1); RD_UT_ASSERT(ir, "narrow_copy failed"); ret_num = -1; - r = rd_slice_read_varint(&bad_slice, &ret_num); + r = rd_slice_read_varint(&bad_slice, &ret_num); RD_UT_ASSERT(RD_UVARINT_DEC_FAILED(r), "varint decode failed should have failed, " - "returned %"PRIusz, - r); + "returned %" PRIusz, + r); r = rd_slice_offset(&bad_slice); RD_UT_ASSERT(r == 0, - "expected slice position to not change, but got %"PRIusz, + "expected slice position to not change, but got %" PRIusz, r); /* Verify proper slice */ ret_num = -1; - r = rd_slice_read_varint(&slice, &ret_num); + r = rd_slice_read_varint(&slice, &ret_num); RD_UT_ASSERT(!RD_UVARINT_DEC_FAILED(r), - "varint decode failed: %"PRIusz, r); + "varint decode failed: %" PRIusz, r); RD_UT_ASSERT(ret_num == num, "varint decode returned wrong number: " - "%"PRId64" != %"PRId64, ret_num, num); + "%" PRId64 " != %" PRId64, + ret_num, num); RD_UT_ASSERT(r == sz, - "expected varint decoder to read %"PRIusz" bytes, " - "not %"PRIusz, + "expected varint decoder to read %" PRIusz + " bytes, " + "not %" PRIusz, sz, r); r = rd_slice_offset(&slice); RD_UT_ASSERT(r == sz, - "expected slice position to change to %"PRIusz - ", but got %"PRIusz, + "expected slice position to change to %" PRIusz + ", but got %" PRIusz, sz, r); @@ -104,41 +108,27 @@ static int do_test_rd_uvarint_enc_i64 (const char *file, int line, } -int unittest_rdvarint (void) { +int unittest_rdvarint(void) { int fails = 0; fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 0, - (const char[]){ 0 }, 1); + (const char[]) {0}, 1); fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 1, - (const char[]){ 0x2 }, 1); + (const char[]) {0x2}, 1); fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, -1, - (const char[]){ 0x1 }, 1); + (const char[]) {0x1}, 1); fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 23, - (const char[]){ 0x2e }, 1); + (const char[]) {0x2e}, 1); fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, -23, - (const char[]){ 0x2d }, 1); + (const char[]) {0x2d}, 1); fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 253, - (const char[]){ 0xfa, 3 }, 2); - fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, - 1234567890101112, - (const char[]){ 0xf0, - 0x8d, - 0xd3, - 0xc8, - 0xa7, - 0xb5, - 0xb1, - 0x04 }, 8); - fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, - -1234567890101112, - (const char[]){ 0xef, - 0x8d, - 0xd3, - 0xc8, - 0xa7, - 0xb5, - 0xb1, - 0x04 }, 8); + (const char[]) {0xfa, 3}, 2); + fails += do_test_rd_uvarint_enc_i64( + __FILE__, __LINE__, 1234567890101112, + (const char[]) {0xf0, 0x8d, 0xd3, 0xc8, 0xa7, 0xb5, 0xb1, 0x04}, 8); + fails += do_test_rd_uvarint_enc_i64( + __FILE__, __LINE__, -1234567890101112, + (const char[]) {0xef, 0x8d, 0xd3, 0xc8, 0xa7, 0xb5, 0xb1, 0x04}, 8); return fails; } diff --git a/src/rdvarint.h b/src/rdvarint.h index 496a9eb768..6fe112ba95 100644 --- a/src/rdvarint.h +++ b/src/rdvarint.h @@ -45,8 +45,9 @@ * @returns the number of bytes written to \p dst, or 0 if not enough space. */ -static RD_INLINE RD_UNUSED -size_t rd_uvarint_enc_u64 (char *dst, size_t dstsize, uint64_t num) { +static RD_INLINE RD_UNUSED size_t rd_uvarint_enc_u64(char *dst, + size_t dstsize, + uint64_t num) { size_t of = 0; do { @@ -64,14 +65,16 @@ size_t rd_uvarint_enc_u64 (char *dst, size_t dstsize, uint64_t num) { * @brief encodes a signed integer using zig-zag encoding. * @sa rd_uvarint_enc_u64 */ -static RD_INLINE RD_UNUSED -size_t rd_uvarint_enc_i64 (char *dst, size_t dstsize, int64_t num) { +static RD_INLINE RD_UNUSED size_t rd_uvarint_enc_i64(char *dst, + size_t dstsize, + int64_t num) { return rd_uvarint_enc_u64(dst, dstsize, (num << 1) ^ (num >> 63)); } -static RD_INLINE RD_UNUSED -size_t rd_uvarint_enc_i32 (char *dst, size_t dstsize, int32_t num) { +static RD_INLINE RD_UNUSED size_t rd_uvarint_enc_i32(char *dst, + size_t dstsize, + int32_t num) { return rd_uvarint_enc_i64(dst, dstsize, num); } @@ -96,7 +99,7 @@ size_t rd_uvarint_enc_i32 (char *dst, size_t dstsize, int32_t num) { * @returns 1 if varint decoding failed, else 0. * @warning \p DEC_RETVAL will be evaluated twice. */ -#define RD_UVARINT_DEC_FAILED(DEC_RETVAL) \ +#define RD_UVARINT_DEC_FAILED(DEC_RETVAL) \ (RD_UVARINT_UNDERFLOW(DEC_RETVAL) || RD_UVARINT_OVERFLOW(DEC_RETVAL)) @@ -111,11 +114,12 @@ size_t rd_uvarint_enc_i32 (char *dst, size_t dstsize, int32_t num) { * * @returns the number of bytes read from \p src. */ -static RD_INLINE RD_UNUSED -size_t rd_uvarint_dec (const char *src, size_t srcsize, uint64_t *nump) { - size_t of = 0; +static RD_INLINE RD_UNUSED size_t rd_uvarint_dec(const char *src, + size_t srcsize, + uint64_t *nump) { + size_t of = 0; uint64_t num = 0; - int shift = 0; + int shift = 0; do { if (unlikely(srcsize-- == 0)) @@ -128,8 +132,9 @@ size_t rd_uvarint_dec (const char *src, size_t srcsize, uint64_t *nump) { return of; } -static RD_INLINE RD_UNUSED -size_t rd_varint_dec_i64 (const char *src, size_t srcsize, int64_t *nump) { +static RD_INLINE RD_UNUSED size_t rd_varint_dec_i64(const char *src, + size_t srcsize, + int64_t *nump) { uint64_t n; size_t r; @@ -144,8 +149,7 @@ size_t rd_varint_dec_i64 (const char *src, size_t srcsize, int64_t *nump) { /** * @returns the maximum encoded size for a type */ -#define RD_UVARINT_ENC_SIZEOF(TYPE) \ - (sizeof(TYPE) + 1 + (sizeof(TYPE)/7)) +#define RD_UVARINT_ENC_SIZEOF(TYPE) (sizeof(TYPE) + 1 + (sizeof(TYPE) / 7)) /** * @returns the encoding size of the value 0 @@ -153,7 +157,7 @@ size_t rd_varint_dec_i64 (const char *src, size_t srcsize, int64_t *nump) { #define RD_UVARINT_ENC_SIZE_0() ((size_t)1) -int unittest_rdvarint (void); +int unittest_rdvarint(void); /**@}*/ diff --git a/src/rdwin32.h b/src/rdwin32.h index 15d6ee9c5f..8ca0887f60 100644 --- a/src/rdwin32.h +++ b/src/rdwin32.h @@ -1,30 +1,30 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2012-2015 Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2015 Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ /** * Win32 (Visual Studio) support @@ -39,7 +39,7 @@ #include #define WIN32_MEAN_AND_LEAN -#include /* for sockets + struct timeval */ +#include /* for sockets + struct timeval */ #include #include @@ -54,31 +54,31 @@ typedef SSIZE_T ssize_t; typedef int socklen_t; struct iovec { - void *iov_base; - size_t iov_len; + void *iov_base; + size_t iov_len; }; struct msghdr { - struct iovec *msg_iov; - int msg_iovlen; + struct iovec *msg_iov; + int msg_iovlen; }; /** -* Annotations, attributes, optimizers -*/ + * Annotations, attributes, optimizers + */ #ifndef likely -#define likely(x) x +#define likely(x) x #endif #ifndef unlikely #define unlikely(x) x #endif #define RD_UNUSED -#define RD_INLINE __inline +#define RD_INLINE __inline #define RD_WARN_UNUSED_RESULT -#define RD_NORETURN __declspec(noreturn) -#define RD_IS_CONSTANT(p) (0) +#define RD_NORETURN __declspec(noreturn) +#define RD_IS_CONSTANT(p) (0) #ifdef _MSC_VER #define RD_TLS __declspec(thread) #elif defined(__MINGW32__) @@ -99,15 +99,15 @@ struct msghdr { */ /* size_t and ssize_t format strings */ -#define PRIusz "Iu" -#define PRIdsz "Id" +#define PRIusz "Iu" +#define PRIdsz "Id" #ifndef RD_FORMAT #define RD_FORMAT(...) #endif -static RD_UNUSED RD_INLINE -int rd_vsnprintf (char *str, size_t size, const char *format, va_list ap) { +static RD_UNUSED RD_INLINE int +rd_vsnprintf(char *str, size_t size, const char *format, va_list ap) { int cnt = -1; if (size != 0) @@ -118,8 +118,8 @@ int rd_vsnprintf (char *str, size_t size, const char *format, va_list ap) { return cnt; } -static RD_UNUSED RD_INLINE -int rd_snprintf (char *str, size_t size, const char *format, ...) { +static RD_UNUSED RD_INLINE int +rd_snprintf(char *str, size_t size, const char *format, ...) { int cnt; va_list ap; @@ -131,11 +131,11 @@ int rd_snprintf (char *str, size_t size, const char *format, ...) { } -#define rd_strcasecmp(A,B) _stricmp(A,B) -#define rd_strncasecmp(A,B,N) _strnicmp(A,B,N) +#define rd_strcasecmp(A, B) _stricmp(A, B) +#define rd_strncasecmp(A, B, N) _strnicmp(A, B, N) /* There is a StrStrIA() but it requires extra linking, so use our own * implementation instead. */ -#define rd_strcasestr(HAYSTACK,NEEDLE) _rd_strcasestr(HAYSTACK,NEEDLE) +#define rd_strcasestr(HAYSTACK, NEEDLE) _rd_strcasestr(HAYSTACK, NEEDLE) @@ -153,22 +153,21 @@ int rd_snprintf (char *str, size_t size, const char *format, ...) { #define rd_set_errno(err) _set_errno((err)) static RD_INLINE RD_UNUSED const char *rd_strerror(int err) { - static RD_TLS char ret[128]; + static RD_TLS char ret[128]; - strerror_s(ret, sizeof(ret) - 1, err); - return ret; + strerror_s(ret, sizeof(ret) - 1, err); + return ret; } /** * @brief strerror() for Win32 API errors as returned by GetLastError() et.al. */ static RD_UNUSED char * -rd_strerror_w32 (DWORD errcode, char *dst, size_t dstsize) { +rd_strerror_w32(DWORD errcode, char *dst, size_t dstsize) { char *t; FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, errcode, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, errcode, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)dst, (DWORD)dstsize - 1, NULL); /* Remove newlines */ while ((t = strchr(dst, (int)'\r')) || (t = strchr(dst, (int)'\n'))) @@ -193,34 +192,33 @@ rd_strerror_w32 (DWORD errcode, char *dst, size_t dstsize) { * Microsecond sleep. * 'retry': if true, retry if sleep is interrupted (because of signal) */ -#define rd_usleep(usec,terminate) Sleep((usec) / 1000) +#define rd_usleep(usec, terminate) Sleep((usec) / 1000) /** * @brief gettimeofday() for win32 */ -static RD_UNUSED -int rd_gettimeofday (struct timeval *tv, struct timezone *tz) { - SYSTEMTIME st; - FILETIME ft; - ULARGE_INTEGER d; - - GetSystemTime(&st); - SystemTimeToFileTime(&st, &ft); - d.HighPart = ft.dwHighDateTime; - d.LowPart = ft.dwLowDateTime; - tv->tv_sec = (long)((d.QuadPart - 116444736000000000llu) / 10000000L); - tv->tv_usec = (long)(st.wMilliseconds * 1000); - - return 0; +static RD_UNUSED int rd_gettimeofday(struct timeval *tv, struct timezone *tz) { + SYSTEMTIME st; + FILETIME ft; + ULARGE_INTEGER d; + + GetSystemTime(&st); + SystemTimeToFileTime(&st, &ft); + d.HighPart = ft.dwHighDateTime; + d.LowPart = ft.dwLowDateTime; + tv->tv_sec = (long)((d.QuadPart - 116444736000000000llu) / 10000000L); + tv->tv_usec = (long)(st.wMilliseconds * 1000); + + return 0; } -#define rd_assert(EXPR) assert(EXPR) +#define rd_assert(EXPR) assert(EXPR) -static RD_INLINE RD_UNUSED -const char *rd_getenv (const char *env, const char *def) { +static RD_INLINE RD_UNUSED const char *rd_getenv(const char *env, + const char *def) { static RD_TLS char tmp[512]; DWORD r; r = GetEnvironmentVariableA(env, tmp, sizeof(tmp)); @@ -233,7 +231,8 @@ const char *rd_getenv (const char *env, const char *def) { /** * Empty struct initializer */ -#define RD_ZERO_INIT {0} +#define RD_ZERO_INIT \ + { 0 } #ifndef __cplusplus /** @@ -250,24 +249,25 @@ typedef SOCKET rd_socket_t; #define rd_socket_errno WSAGetLastError() /** @brief String representation of socket error */ -static RD_UNUSED const char *rd_socket_strerror (int err) { - static RD_TLS char buf[256]; - rd_strerror_w32(err, buf, sizeof(buf)); - return buf; +static RD_UNUSED const char *rd_socket_strerror(int err) { + static RD_TLS char buf[256]; + rd_strerror_w32(err, buf, sizeof(buf)); + return buf; } /** @brief WSAPoll() struct type */ typedef WSAPOLLFD rd_pollfd_t; /** @brief poll(2) */ -#define rd_socket_poll(POLLFD,FDCNT,TIMEOUT_MS) WSAPoll(POLLFD,FDCNT,TIMEOUT_MS) +#define rd_socket_poll(POLLFD, FDCNT, TIMEOUT_MS) \ + WSAPoll(POLLFD, FDCNT, TIMEOUT_MS) /** * @brief Set socket to non-blocking * @returns 0 on success or -1 on failure (see rd_kafka_rd_socket_errno) */ -static RD_UNUSED int rd_fd_set_nonblocking (rd_socket_t fd) { +static RD_UNUSED int rd_fd_set_nonblocking(rd_socket_t fd) { u_long on = 1; if (ioctlsocket(fd, FIONBIO, &on) == SOCKET_ERROR) return (int)WSAGetLastError(); @@ -278,12 +278,12 @@ static RD_UNUSED int rd_fd_set_nonblocking (rd_socket_t fd) { * @brief Create non-blocking pipe * @returns 0 on success or errno on failure */ -static RD_UNUSED int rd_pipe_nonblocking (rd_socket_t *fds) { +static RD_UNUSED int rd_pipe_nonblocking(rd_socket_t *fds) { /* On windows, the "pipe" will be a tcp connection. - * This is to allow WSAPoll to be used to poll pipe events */ + * This is to allow WSAPoll to be used to poll pipe events */ - SOCKET listen_s = INVALID_SOCKET; - SOCKET accept_s = INVALID_SOCKET; + SOCKET listen_s = INVALID_SOCKET; + SOCKET accept_s = INVALID_SOCKET; SOCKET connect_s = INVALID_SOCKET; struct sockaddr_in listen_addr; @@ -296,15 +296,15 @@ static RD_UNUSED int rd_pipe_nonblocking (rd_socket_t *fds) { if (listen_s == INVALID_SOCKET) goto err; - listen_addr.sin_family = AF_INET; + listen_addr.sin_family = AF_INET; listen_addr.sin_addr.s_addr = ntohl(INADDR_LOOPBACK); - listen_addr.sin_port = 0; - if (bind(listen_s, (struct sockaddr*)&listen_addr, + listen_addr.sin_port = 0; + if (bind(listen_s, (struct sockaddr *)&listen_addr, sizeof(listen_addr)) != 0) goto err; sock_len = sizeof(connect_addr); - if (getsockname(listen_s, (struct sockaddr*)&connect_addr, + if (getsockname(listen_s, (struct sockaddr *)&connect_addr, &sock_len) != 0) goto err; @@ -316,7 +316,7 @@ static RD_UNUSED int rd_pipe_nonblocking (rd_socket_t *fds) { if (connect_s == INVALID_SOCKET) goto err; - if (connect(connect_s, (struct sockaddr*)&connect_addr, + if (connect(connect_s, (struct sockaddr *)&connect_addr, sizeof(connect_addr)) == SOCKET_ERROR) goto err; @@ -338,17 +338,17 @@ static RD_UNUSED int rd_pipe_nonblocking (rd_socket_t *fds) { * of signaling bytes to accumulate when * io-signalled queue is not being served for a while. */ bufsz = 100; - setsockopt(accept_s, SOL_SOCKET, SO_SNDBUF, - (const char *)&bufsz, sizeof(bufsz)); + setsockopt(accept_s, SOL_SOCKET, SO_SNDBUF, (const char *)&bufsz, + sizeof(bufsz)); bufsz = 100; - setsockopt(accept_s, SOL_SOCKET, SO_RCVBUF, - (const char *)&bufsz, sizeof(bufsz)); + setsockopt(accept_s, SOL_SOCKET, SO_RCVBUF, (const char *)&bufsz, + sizeof(bufsz)); bufsz = 100; - setsockopt(connect_s, SOL_SOCKET, SO_SNDBUF, - (const char *)&bufsz, sizeof(bufsz)); + setsockopt(connect_s, SOL_SOCKET, SO_SNDBUF, (const char *)&bufsz, + sizeof(bufsz)); bufsz = 100; - setsockopt(connect_s, SOL_SOCKET, SO_RCVBUF, - (const char *)&bufsz, sizeof(bufsz)); + setsockopt(connect_s, SOL_SOCKET, SO_RCVBUF, (const char *)&bufsz, + sizeof(bufsz)); /* Store resulting sockets. * They are bidirectional, so it does not matter which is read or @@ -357,7 +357,7 @@ static RD_UNUSED int rd_pipe_nonblocking (rd_socket_t *fds) { fds[1] = connect_s; return 0; - err: +err: if (listen_s != INVALID_SOCKET) closesocket(listen_s); if (accept_s != INVALID_SOCKET) @@ -367,9 +367,9 @@ static RD_UNUSED int rd_pipe_nonblocking (rd_socket_t *fds) { return -1; } -#define rd_read(fd,buf,sz) recv(fd,buf,sz,0) -#define rd_write(fd,buf,sz) send(fd,buf,sz,0) -#define rd_close(fd) closesocket(fd) +#define rd_read(fd, buf, sz) recv(fd, buf, sz, 0) +#define rd_write(fd, buf, sz) send(fd, buf, sz, 0) +#define rd_close(fd) closesocket(fd) #endif /* !__cplusplus*/ diff --git a/src/rdxxhash.c b/src/rdxxhash.c index 186db2f664..89f7c8cf43 100644 --- a/src/rdxxhash.c +++ b/src/rdxxhash.c @@ -1,86 +1,92 @@ /* -* xxHash - Fast Hash algorithm -* Copyright (C) 2012-2016, Yann Collet -* -* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are -* met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following disclaimer -* in the documentation and/or other materials provided with the -* distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -* -* You can contact the author at : -* - xxHash homepage: http://www.xxhash.com -* - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ + * xxHash - Fast Hash algorithm + * Copyright (C) 2012-2016, Yann Collet + * + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at : + * - xxHash homepage: http://www.xxhash.com + * - xxHash source repository : https://github.com/Cyan4973/xxHash + */ /* ************************************* -* Tuning parameters -***************************************/ + * Tuning parameters + ***************************************/ /*!XXH_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. - * It can generate buggy code on targets which do not support unaligned memory accesses. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://stackoverflow.com/a/32095106/646947 for details. - * Prefer these methods in priority order (0 > 1 > 2) + * By default, access to unaligned memory is controlled by `memcpy()`, which is + * safe and portable. Unfortunately, on some target/compiler combinations, the + * generated assembly is sub-optimal. The below switch allow to select different + * access method for improved performance. Method 0 (default) : use `memcpy()`. + * Safe and portable. Method 1 : `__packed` statement. It depends on compiler + * extension (ie, not portable). This method is safe if your compiler supports + * it, and *generally* as fast or faster than `memcpy`. Method 2 : direct + * access. This method doesn't depend on compiler but violate C standard. It can + * generate buggy code on targets which do not support unaligned memory + * accesses. But in some circumstances, it's the only known way to get the most + * performance (ie GCC + ARMv6) See http://stackoverflow.com/a/32095106/646947 + * for details. Prefer these methods in priority order (0 > 1 > 2) */ -#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ - || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ - || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define XXH_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ - || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ - || defined(__ARM_ARCH_7S__) )) -# define XXH_FORCE_MEMORY_ACCESS 1 -# endif +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line \ + for example */ +#if defined(__GNUC__) && \ + (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \ + defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \ + defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)) +#define XXH_FORCE_MEMORY_ACCESS 2 +#elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ + (defined(__GNUC__) && \ + (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \ + defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \ + defined(__ARM_ARCH_7S__))) +#define XXH_FORCE_MEMORY_ACCESS 1 +#endif #endif /*!XXH_ACCEPT_NULL_INPUT_POINTER : - * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault. - * When this macro is enabled, xxHash actively checks input for null pointer. - * It it is, result for null input pointers is the same as a null-length input. + * If input pointer is NULL, xxHash default behavior is to dereference it, + * triggering a segfault. When this macro is enabled, xxHash actively checks + * input for null pointer. It it is, result for null input pointers is the same + * as a null-length input. */ -#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ -# define XXH_ACCEPT_NULL_INPUT_POINTER 0 +#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ +#define XXH_ACCEPT_NULL_INPUT_POINTER 0 #endif /*!XXH_FORCE_NATIVE_FORMAT : - * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. - * Results are therefore identical for little-endian and big-endian CPU. - * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. - * Should endian-independence be of no importance for your application, you may set the #define below to 1, - * to improve speed for Big-endian CPU. - * This option has no impact on Little_Endian CPU. + * By default, xxHash library provides endian-independent Hash values, based on + * little-endian convention. Results are therefore identical for little-endian + * and big-endian CPU. This comes at a performance cost for big-endian CPU, + * since some swapping is required to emulate little-endian format. Should + * endian-independence be of no importance for your application, you may set the + * #define below to 1, to improve speed for Big-endian CPU. This option has no + * impact on Little_Endian CPU. */ -#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ -# define XXH_FORCE_NATIVE_FORMAT 0 +#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ +#define XXH_FORCE_NATIVE_FORMAT 0 #endif /*!XXH_FORCE_ALIGN_CHECK : @@ -91,306 +97,353 @@ * or when alignment doesn't matter for performance. */ #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ -# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) -# define XXH_FORCE_ALIGN_CHECK 0 -# else -# define XXH_FORCE_ALIGN_CHECK 1 -# endif +#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || \ + defined(_M_X64) +#define XXH_FORCE_ALIGN_CHECK 0 +#else +#define XXH_FORCE_ALIGN_CHECK 1 +#endif #endif /* ************************************* -* Includes & Memory related functions -***************************************/ -/*! Modify the local functions below should you wish to use some other memory routines -* for malloc(), free() */ + * Includes & Memory related functions + ***************************************/ +/*! Modify the local functions below should you wish to use some other memory + * routines for malloc(), free() */ #include "rd.h" -static void* XXH_malloc(size_t s) { return rd_malloc(s); } -static void XXH_free (void* p) { rd_free(p); } +static void *XXH_malloc(size_t s) { + return rd_malloc(s); +} +static void XXH_free(void *p) { + rd_free(p); +} /*! and for memcpy() */ #include -static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } +static void *XXH_memcpy(void *dest, const void *src, size_t size) { + return memcpy(dest, src, size); +} -#include /* assert */ +#include /* assert */ #define XXH_STATIC_LINKING_ONLY #include "rdxxhash.h" /* ************************************* -* Compiler Specific Options -***************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# define FORCE_INLINE static __forceinline + * Compiler Specific Options + ***************************************/ +#ifdef _MSC_VER /* Visual Studio */ +#pragma warning( \ + disable : 4127) /* disable: C4127: conditional expression is constant */ +#define FORCE_INLINE static __forceinline #else -# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# ifdef __GNUC__ -# define FORCE_INLINE static inline __attribute__((always_inline)) -# else -# define FORCE_INLINE static inline -# endif -# else -# define FORCE_INLINE static -# endif /* __STDC_VERSION__ */ +#if defined(__cplusplus) || \ + defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +#ifdef __GNUC__ +#define FORCE_INLINE static inline __attribute__((always_inline)) +#else +#define FORCE_INLINE static inline +#endif +#else +#define FORCE_INLINE static +#endif /* __STDC_VERSION__ */ #endif /* ************************************* -* Basic Types -***************************************/ + * Basic Types + ***************************************/ #ifndef MEM_MODULE -# if !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef uint32_t U32; -# else - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef unsigned int U32; -# endif +#if !defined(__VMS) && \ + (defined(__cplusplus) || \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)) +#include +typedef uint8_t BYTE; +typedef uint16_t U16; +typedef uint32_t U32; +#else +typedef unsigned char BYTE; +typedef unsigned short U16; +typedef unsigned int U32; +#endif #endif -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 2)) -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } +/* Force direct memory access. Only works on CPU which support unaligned memory + * access in hardware */ +static U32 XXH_read32(const void *memPtr) { + return *(const U32 *)memPtr; +} -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1)) -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* __pack instructions are safer, but compiler specific, hence potentially + * problematic for some compilers */ /* currently only defined for gcc and icc */ -typedef union { U32 u32; } __attribute__((packed)) unalign; -static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +typedef union { + U32 u32; +} __attribute__((packed)) unalign; +static U32 XXH_read32(const void *ptr) { + return ((const unalign *)ptr)->u32; +} #else /* portable and safe solution. Generally efficient. * see : http://stackoverflow.com/a/32095106/646947 */ -static U32 XXH_read32(const void* memPtr) -{ - U32 val; - memcpy(&val, memPtr, sizeof(val)); - return val; +static U32 XXH_read32(const void *memPtr) { + U32 val; + memcpy(&val, memPtr, sizeof(val)); + return val; } -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ /* **************************************** -* Compiler-specific Functions and Macros -******************************************/ + * Compiler-specific Functions and Macros + ******************************************/ #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ +/* Note : although _rotl exists for minGW (GCC under windows), performance seems + * poor */ #if defined(_MSC_VER) -# define XXH_rotl32(x,r) _rotl(x,r) -# define XXH_rotl64(x,r) _rotl64(x,r) +#define XXH_rotl32(x, r) _rotl(x, r) +#define XXH_rotl64(x, r) _rotl64(x, r) #else -# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) -# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) +#define XXH_rotl32(x, r) ((x << r) | (x >> (32 - r))) +#define XXH_rotl64(x, r) ((x << r) | (x >> (64 - r))) #endif -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap32 _byteswap_ulong +#if defined(_MSC_VER) /* Visual Studio */ +#define XXH_swap32 _byteswap_ulong #elif XXH_GCC_VERSION >= 403 -# define XXH_swap32 __builtin_bswap32 +#define XXH_swap32 __builtin_bswap32 #else -static U32 XXH_swap32 (U32 x) -{ - return ((x << 24) & 0xff000000 ) | - ((x << 8) & 0x00ff0000 ) | - ((x >> 8) & 0x0000ff00 ) | - ((x >> 24) & 0x000000ff ); +static U32 XXH_swap32(U32 x) { + return ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) | + ((x >> 8) & 0x0000ff00) | ((x >> 24) & 0x000000ff); } #endif /* ************************************* -* Architecture Macros -***************************************/ -typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; + * Architecture Macros + ***************************************/ +typedef enum { XXH_bigEndian = 0, XXH_littleEndian = 1 } XXH_endianess; -/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ +/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler + * command line */ #ifndef XXH_CPU_LITTLE_ENDIAN -static int XXH_isLittleEndian(void) -{ - const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ - return one.c[0]; +static int XXH_isLittleEndian(void) { + const union { + U32 u; + BYTE c[4]; + } one = {1}; /* don't use static : performance detrimental */ + return one.c[0]; } -# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() +#define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() #endif /* *************************** -* Memory reads -*****************************/ + * Memory reads + *****************************/ typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; -FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); - else - return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); +FORCE_INLINE U32 XXH_readLE32_align(const void *ptr, + XXH_endianess endian, + XXH_alignment align) { + if (align == XXH_unaligned) + return endian == XXH_littleEndian ? XXH_read32(ptr) + : XXH_swap32(XXH_read32(ptr)); + else + return endian == XXH_littleEndian + ? *(const U32 *)ptr + : XXH_swap32(*(const U32 *)ptr); } -FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE32_align(ptr, endian, XXH_unaligned); +FORCE_INLINE U32 XXH_readLE32(const void *ptr, XXH_endianess endian) { + return XXH_readLE32_align(ptr, endian, XXH_unaligned); } -static U32 XXH_readBE32(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +static U32 XXH_readBE32(const void *ptr) { + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) + : XXH_read32(ptr); } /* ************************************* -* Macros -***************************************/ -#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */ -XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + * Macros + ***************************************/ +#define XXH_STATIC_ASSERT(c) \ + { \ + enum { XXH_sa = 1 / (int)(!!(c)) }; \ + } /* use after variable declarations */ +XXH_PUBLIC_API unsigned XXH_versionNumber(void) { + return XXH_VERSION_NUMBER; +} /* ******************************************************************* -* 32-bit hash functions -*********************************************************************/ + * 32-bit hash functions + *********************************************************************/ static const U32 PRIME32_1 = 2654435761U; static const U32 PRIME32_2 = 2246822519U; static const U32 PRIME32_3 = 3266489917U; -static const U32 PRIME32_4 = 668265263U; -static const U32 PRIME32_5 = 374761393U; - -static U32 XXH32_round(U32 seed, U32 input) -{ - seed += input * PRIME32_2; - seed = XXH_rotl32(seed, 13); - seed *= PRIME32_1; - return seed; +static const U32 PRIME32_4 = 668265263U; +static const U32 PRIME32_5 = 374761393U; + +static U32 XXH32_round(U32 seed, U32 input) { + seed += input * PRIME32_2; + seed = XXH_rotl32(seed, 13); + seed *= PRIME32_1; + return seed; } /* mix all bits */ -static U32 XXH32_avalanche(U32 h32) -{ - h32 ^= h32 >> 15; - h32 *= PRIME32_2; - h32 ^= h32 >> 13; - h32 *= PRIME32_3; - h32 ^= h32 >> 16; - return(h32); +static U32 XXH32_avalanche(U32 h32) { + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + return (h32); } #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) -static U32 -XXH32_finalize(U32 h32, const void* ptr, size_t len, - XXH_endianess endian, XXH_alignment align) +static U32 XXH32_finalize(U32 h32, + const void *ptr, + size_t len, + XXH_endianess endian, + XXH_alignment align) { - const BYTE* p = (const BYTE*)ptr; - -#define PROCESS1 \ - h32 += (*p++) * PRIME32_5; \ - h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; - -#define PROCESS4 \ - h32 += XXH_get32bits(p) * PRIME32_3; \ - p+=4; \ - h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; - - switch(len&15) /* or switch(bEnd - p) */ - { - case 12: PROCESS4; - /* fallthrough */ - case 8: PROCESS4; - /* fallthrough */ - case 4: PROCESS4; - return XXH32_avalanche(h32); - - case 13: PROCESS4; - /* fallthrough */ - case 9: PROCESS4; - /* fallthrough */ - case 5: PROCESS4; - PROCESS1; - return XXH32_avalanche(h32); - - case 14: PROCESS4; - /* fallthrough */ - case 10: PROCESS4; - /* fallthrough */ - case 6: PROCESS4; - PROCESS1; - PROCESS1; - return XXH32_avalanche(h32); - - case 15: PROCESS4; - /* fallthrough */ - case 11: PROCESS4; - /* fallthrough */ - case 7: PROCESS4; - /* fallthrough */ - case 3: PROCESS1; - /* fallthrough */ - case 2: PROCESS1; - /* fallthrough */ - case 1: PROCESS1; - /* fallthrough */ - case 0: return XXH32_avalanche(h32); - } - assert(0); - return h32; /* reaching this point is deemed impossible */ + const BYTE *p = (const BYTE *)ptr; + +#define PROCESS1 \ + h32 += (*p++) * PRIME32_5; \ + h32 = XXH_rotl32(h32, 11) * PRIME32_1; + +#define PROCESS4 \ + h32 += XXH_get32bits(p) * PRIME32_3; \ + p += 4; \ + h32 = XXH_rotl32(h32, 17) * PRIME32_4; + + switch (len & 15) /* or switch(bEnd - p) */ + { + case 12: + PROCESS4; + /* fallthrough */ + case 8: + PROCESS4; + /* fallthrough */ + case 4: + PROCESS4; + return XXH32_avalanche(h32); + + case 13: + PROCESS4; + /* fallthrough */ + case 9: + PROCESS4; + /* fallthrough */ + case 5: + PROCESS4; + PROCESS1; + return XXH32_avalanche(h32); + + case 14: + PROCESS4; + /* fallthrough */ + case 10: + PROCESS4; + /* fallthrough */ + case 6: + PROCESS4; + PROCESS1; + PROCESS1; + return XXH32_avalanche(h32); + + case 15: + PROCESS4; + /* fallthrough */ + case 11: + PROCESS4; + /* fallthrough */ + case 7: + PROCESS4; + /* fallthrough */ + case 3: + PROCESS1; + /* fallthrough */ + case 2: + PROCESS1; + /* fallthrough */ + case 1: + PROCESS1; + /* fallthrough */ + case 0: + return XXH32_avalanche(h32); + } + assert(0); + return h32; /* reaching this point is deemed impossible */ } -FORCE_INLINE U32 -XXH32_endian_align(const void* input, size_t len, U32 seed, - XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* bEnd = p + len; - U32 h32; - -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - if (p==NULL) { - len=0; - bEnd=p=(const BYTE*)(size_t)16; - } +FORCE_INLINE U32 XXH32_endian_align(const void *input, + size_t len, + U32 seed, + XXH_endianess endian, + XXH_alignment align) { + const BYTE *p = (const BYTE *)input; + const BYTE *bEnd = p + len; + U32 h32; + +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \ + (XXH_ACCEPT_NULL_INPUT_POINTER >= 1) + if (p == NULL) { + len = 0; + bEnd = p = (const BYTE *)(size_t)16; + } #endif - if (len>=16) { - const BYTE* const limit = bEnd - 15; - U32 v1 = seed + PRIME32_1 + PRIME32_2; - U32 v2 = seed + PRIME32_2; - U32 v3 = seed + 0; - U32 v4 = seed - PRIME32_1; - - do { - v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; - v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; - v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; - v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; - } while (p < limit); - - h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) - + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); - } else { - h32 = seed + PRIME32_5; - } - - h32 += (U32)len; - - return XXH32_finalize(h32, p, len&15, endian, align); + if (len >= 16) { + const BYTE *const limit = bEnd - 15; + U32 v1 = seed + PRIME32_1 + PRIME32_2; + U32 v2 = seed + PRIME32_2; + U32 v3 = seed + 0; + U32 v4 = seed - PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(p)); + p += 4; + v2 = XXH32_round(v2, XXH_get32bits(p)); + p += 4; + v3 = XXH32_round(v3, XXH_get32bits(p)); + p += 4; + v4 = XXH32_round(v4, XXH_get32bits(p)); + p += 4; + } while (p < limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; + } + + h32 += (U32)len; + + return XXH32_finalize(h32, p, len & 15, endian, align); } -XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) -{ +XXH_PUBLIC_API unsigned int +XXH32(const void *input, size_t len, unsigned int seed) { #if 0 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ XXH32_state_t state; @@ -398,20 +451,30 @@ XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int s XXH32_update(&state, input, len); return XXH32_digest(&state); #else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } } - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == + 0) { /* Input is 4-bytes aligned, leverage the speed benefit + */ + if ((endian_detected == XXH_littleEndian) || + XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, + XXH_littleEndian, + XXH_aligned); + else + return XXH32_endian_align(input, len, seed, + XXH_bigEndian, + XXH_aligned); + } + } + + if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, + XXH_unaligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, + XXH_unaligned); #endif } @@ -419,195 +482,223 @@ XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int s /*====== Hash streaming ======*/ -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) -{ - return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void) { + return (XXH32_state_t *)XXH_malloc(sizeof(XXH32_state_t)); } -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr) { + XXH_free(statePtr); + return XXH_OK; } -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) -{ - memcpy(dstState, srcState, sizeof(*dstState)); +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dstState, + const XXH32_state_t *srcState) { + memcpy(dstState, srcState, sizeof(*dstState)); } -XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) -{ - XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - memset(&state, 0, sizeof(state)); - state.v1 = seed + PRIME32_1 + PRIME32_2; - state.v2 = seed + PRIME32_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME32_1; - /* do not write into reserved, planned to be removed in a future version */ - memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); - return XXH_OK; +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr, + unsigned int seed) { + XXH32_state_t state; /* using a local state to memcpy() in order to + avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + /* do not write into reserved, planned to be removed in a future version + */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + return XXH_OK; } -FORCE_INLINE XXH_errorcode -XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) -{ - if (input==NULL) -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - return XXH_OK; +FORCE_INLINE XXH_errorcode XXH32_update_endian(XXH32_state_t *state, + const void *input, + size_t len, + XXH_endianess endian) { + if (input == NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \ + (XXH_ACCEPT_NULL_INPUT_POINTER >= 1) + return XXH_OK; #else - return XXH_ERROR; + return XXH_ERROR; #endif - { const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - - state->total_len_32 += (unsigned)len; - state->large_len |= (len>=16) | (state->total_len_32>=16); - - if (state->memsize + len < 16) { /* fill in tmp buffer */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); - state->memsize += (unsigned)len; - return XXH_OK; - } - - if (state->memsize) { /* some data left from previous update */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); - { const U32* p32 = state->mem32; - state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; - state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; - state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; - state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); - } - p += 16-state->memsize; - state->memsize = 0; - } - - if (p <= bEnd-16) { - const BYTE* const limit = bEnd - 16; - U32 v1 = state->v1; - U32 v2 = state->v2; - U32 v3 = state->v3; - U32 v4 = state->v4; - - do { - v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; - v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; - v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; - v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; + { + const BYTE *p = (const BYTE *)input; + const BYTE *const bEnd = p + len; + + state->total_len_32 += (unsigned)len; + state->large_len |= (len >= 16) | (state->total_len_32 >= 16); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((BYTE *)(state->mem32) + state->memsize, + input, len); + state->memsize += (unsigned)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((BYTE *)(state->mem32) + state->memsize, + input, 16 - state->memsize); + { + const U32 *p32 = state->mem32; + state->v1 = XXH32_round( + state->v1, XXH_readLE32(p32, endian)); + p32++; + state->v2 = XXH32_round( + state->v2, XXH_readLE32(p32, endian)); + p32++; + state->v3 = XXH32_round( + state->v3, XXH_readLE32(p32, endian)); + p32++; + state->v4 = XXH32_round( + state->v4, XXH_readLE32(p32, endian)); + } + p += 16 - state->memsize; + state->memsize = 0; + } + + if (p <= bEnd - 16) { + const BYTE *const limit = bEnd - 16; + U32 v1 = state->v1; + U32 v2 = state->v2; + U32 v3 = state->v3; + U32 v4 = state->v4; + + do { + v1 = XXH32_round(v1, XXH_readLE32(p, endian)); + p += 4; + v2 = XXH32_round(v2, XXH_readLE32(p, endian)); + p += 4; + v3 = XXH32_round(v3, XXH_readLE32(p, endian)); + p += 4; + v4 = XXH32_round(v4, XXH_readLE32(p, endian)); + p += 4; + } while (p <= limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd - p)); + state->memsize = (unsigned)(bEnd - p); + } } - if (p < bEnd) { - XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - } - - return XXH_OK; + return XXH_OK; } -XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; +XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *state_in, + const void *input, + size_t len) { + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH32_update_endian(state_in, input, len, XXH_bigEndian); + if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_update_endian(state_in, input, len, + XXH_littleEndian); + else + return XXH32_update_endian(state_in, input, len, XXH_bigEndian); } -FORCE_INLINE U32 -XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) -{ - U32 h32; +FORCE_INLINE U32 XXH32_digest_endian(const XXH32_state_t *state, + XXH_endianess endian) { + U32 h32; - if (state->large_len) { - h32 = XXH_rotl32(state->v1, 1) - + XXH_rotl32(state->v2, 7) - + XXH_rotl32(state->v3, 12) - + XXH_rotl32(state->v4, 18); - } else { - h32 = state->v3 /* == seed */ + PRIME32_5; - } + if (state->large_len) { + h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; + } - h32 += state->total_len_32; + h32 += state->total_len_32; - return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned); + return XXH32_finalize(h32, state->mem32, state->memsize, endian, + XXH_aligned); } -XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; +XXH_PUBLIC_API unsigned int XXH32_digest(const XXH32_state_t *state_in) { + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_digest_endian(state_in, XXH_littleEndian); - else - return XXH32_digest_endian(state_in, XXH_bigEndian); + if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_digest_endian(state_in, XXH_littleEndian); + else + return XXH32_digest_endian(state_in, XXH_bigEndian); } /*====== Canonical representation ======*/ /*! Default XXH result types are basic unsigned 32 and 64 bits. -* The canonical representation follows human-readable write convention, aka big-endian (large digits first). -* These functions allow transformation of hash result into and from its canonical format. -* This way, hash values can be written into a file or buffer, remaining comparable across different systems. -*/ + * The canonical representation follows human-readable write convention, aka + * big-endian (large digits first). These functions allow transformation of hash + * result into and from its canonical format. This way, hash values can be + * written into a file or buffer, remaining comparable across different systems. + */ -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); - memcpy(dst, &hash, sizeof(*dst)); +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst, + XXH32_hash_t hash) { + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) + hash = XXH_swap32(hash); + memcpy(dst, &hash, sizeof(*dst)); } -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) -{ - return XXH_readBE32(src); +XXH_PUBLIC_API XXH32_hash_t +XXH32_hashFromCanonical(const XXH32_canonical_t *src) { + return XXH_readBE32(src); } #ifndef XXH_NO_LONG_LONG /* ******************************************************************* -* 64-bit hash functions -*********************************************************************/ + * 64-bit hash functions + *********************************************************************/ /*====== Memory access ======*/ #ifndef MEM_MODULE -# define MEM_MODULE -# if !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint64_t U64; -# else - /* if compiler doesn't support unsigned long long, replace by another 64-bit type */ - typedef unsigned long long U64; -# endif +#define MEM_MODULE +#if !defined(__VMS) && \ + (defined(__cplusplus) || \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)) +#include +typedef uint64_t U64; +#else +/* if compiler doesn't support unsigned long long, replace by another 64-bit + * type */ +typedef unsigned long long U64; +#endif #endif -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 2)) -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } +/* Force direct memory access. Only works on CPU which support unaligned memory + * access in hardware */ +static U64 XXH_read64(const void *memPtr) { + return *(const U64 *)memPtr; +} -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1)) -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* __pack instructions are safer, but compiler specific, hence potentially + * problematic for some compilers */ /* currently only defined for gcc and icc */ -typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; -static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } +typedef union { + U32 u32; + U64 u64; +} __attribute__((packed)) unalign64; +static U64 XXH_read64(const void *ptr) { + return ((const unalign64 *)ptr)->u64; +} #else @@ -615,49 +706,50 @@ static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } * see : http://stackoverflow.com/a/32095106/646947 */ -static U64 XXH_read64(const void* memPtr) -{ - U64 val; - memcpy(&val, memPtr, sizeof(val)); - return val; +static U64 XXH_read64(const void *memPtr) { + U64 val; + memcpy(&val, memPtr, sizeof(val)); + return val; } -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap64 _byteswap_uint64 +#if defined(_MSC_VER) /* Visual Studio */ +#define XXH_swap64 _byteswap_uint64 #elif XXH_GCC_VERSION >= 403 -# define XXH_swap64 __builtin_bswap64 +#define XXH_swap64 __builtin_bswap64 #else -static U64 XXH_swap64 (U64 x) -{ - return ((x << 56) & 0xff00000000000000ULL) | - ((x << 40) & 0x00ff000000000000ULL) | - ((x << 24) & 0x0000ff0000000000ULL) | - ((x << 8) & 0x000000ff00000000ULL) | - ((x >> 8) & 0x00000000ff000000ULL) | - ((x >> 24) & 0x0000000000ff0000ULL) | - ((x >> 40) & 0x000000000000ff00ULL) | - ((x >> 56) & 0x00000000000000ffULL); +static U64 XXH_swap64(U64 x) { + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); } #endif -FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); - else - return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); +FORCE_INLINE U64 XXH_readLE64_align(const void *ptr, + XXH_endianess endian, + XXH_alignment align) { + if (align == XXH_unaligned) + return endian == XXH_littleEndian ? XXH_read64(ptr) + : XXH_swap64(XXH_read64(ptr)); + else + return endian == XXH_littleEndian + ? *(const U64 *)ptr + : XXH_swap64(*(const U64 *)ptr); } -FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE64_align(ptr, endian, XXH_unaligned); +FORCE_INLINE U64 XXH_readLE64(const void *ptr, XXH_endianess endian) { + return XXH_readLE64_align(ptr, endian, XXH_unaligned); } -static U64 XXH_readBE64(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +static U64 XXH_readBE64(const void *ptr) { + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) + : XXH_read64(ptr); } @@ -665,195 +757,233 @@ static U64 XXH_readBE64(const void* ptr) static const U64 PRIME64_1 = 11400714785074694791ULL; static const U64 PRIME64_2 = 14029467366897019727ULL; -static const U64 PRIME64_3 = 1609587929392839161ULL; -static const U64 PRIME64_4 = 9650029242287828579ULL; -static const U64 PRIME64_5 = 2870177450012600261ULL; - -static U64 XXH64_round(U64 acc, U64 input) -{ - acc += input * PRIME64_2; - acc = XXH_rotl64(acc, 31); - acc *= PRIME64_1; - return acc; +static const U64 PRIME64_3 = 1609587929392839161ULL; +static const U64 PRIME64_4 = 9650029242287828579ULL; +static const U64 PRIME64_5 = 2870177450012600261ULL; + +static U64 XXH64_round(U64 acc, U64 input) { + acc += input * PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; } -static U64 XXH64_mergeRound(U64 acc, U64 val) -{ - val = XXH64_round(0, val); - acc ^= val; - acc = acc * PRIME64_1 + PRIME64_4; - return acc; +static U64 XXH64_mergeRound(U64 acc, U64 val) { + val = XXH64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; } -static U64 XXH64_avalanche(U64 h64) -{ - h64 ^= h64 >> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - return h64; +static U64 XXH64_avalanche(U64 h64) { + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + return h64; } #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) -static U64 -XXH64_finalize(U64 h64, const void* ptr, size_t len, - XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)ptr; - -#define PROCESS1_64 \ - h64 ^= (*p++) * PRIME64_5; \ - h64 = XXH_rotl64(h64, 11) * PRIME64_1; - -#define PROCESS4_64 \ - h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \ - p+=4; \ - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - -#define PROCESS8_64 { \ - U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \ - p+=8; \ - h64 ^= k1; \ - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \ -} +static U64 XXH64_finalize(U64 h64, + const void *ptr, + size_t len, + XXH_endianess endian, + XXH_alignment align) { + const BYTE *p = (const BYTE *)ptr; + +#define PROCESS1_64 \ + h64 ^= (*p++) * PRIME64_5; \ + h64 = XXH_rotl64(h64, 11) * PRIME64_1; + +#define PROCESS4_64 \ + h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \ + p += 4; \ + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + +#define PROCESS8_64 \ + { \ + U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \ + p += 8; \ + h64 ^= k1; \ + h64 = XXH_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; \ + } - switch(len&31) { - case 24: PROCESS8_64; - /* fallthrough */ - case 16: PROCESS8_64; - /* fallthrough */ - case 8: PROCESS8_64; - return XXH64_avalanche(h64); - - case 28: PROCESS8_64; - /* fallthrough */ - case 20: PROCESS8_64; - /* fallthrough */ - case 12: PROCESS8_64; - /* fallthrough */ - case 4: PROCESS4_64; - return XXH64_avalanche(h64); - - case 25: PROCESS8_64; - /* fallthrough */ - case 17: PROCESS8_64; - /* fallthrough */ - case 9: PROCESS8_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 29: PROCESS8_64; - /* fallthrough */ - case 21: PROCESS8_64; - /* fallthrough */ - case 13: PROCESS8_64; - /* fallthrough */ - case 5: PROCESS4_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 26: PROCESS8_64; - /* fallthrough */ - case 18: PROCESS8_64; - /* fallthrough */ - case 10: PROCESS8_64; - PROCESS1_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 30: PROCESS8_64; - /* fallthrough */ - case 22: PROCESS8_64; - /* fallthrough */ - case 14: PROCESS8_64; - /* fallthrough */ - case 6: PROCESS4_64; - PROCESS1_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 27: PROCESS8_64; - /* fallthrough */ - case 19: PROCESS8_64; - /* fallthrough */ - case 11: PROCESS8_64; - PROCESS1_64; - PROCESS1_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 31: PROCESS8_64; - /* fallthrough */ - case 23: PROCESS8_64; - /* fallthrough */ - case 15: PROCESS8_64; - /* fallthrough */ - case 7: PROCESS4_64; - /* fallthrough */ - case 3: PROCESS1_64; - /* fallthrough */ - case 2: PROCESS1_64; - /* fallthrough */ - case 1: PROCESS1_64; - /* fallthrough */ - case 0: return XXH64_avalanche(h64); - } - - /* impossible to reach */ - assert(0); - return 0; /* unreachable, but some compilers complain without it */ + switch (len & 31) { + case 24: + PROCESS8_64; + /* fallthrough */ + case 16: + PROCESS8_64; + /* fallthrough */ + case 8: + PROCESS8_64; + return XXH64_avalanche(h64); + + case 28: + PROCESS8_64; + /* fallthrough */ + case 20: + PROCESS8_64; + /* fallthrough */ + case 12: + PROCESS8_64; + /* fallthrough */ + case 4: + PROCESS4_64; + return XXH64_avalanche(h64); + + case 25: + PROCESS8_64; + /* fallthrough */ + case 17: + PROCESS8_64; + /* fallthrough */ + case 9: + PROCESS8_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 29: + PROCESS8_64; + /* fallthrough */ + case 21: + PROCESS8_64; + /* fallthrough */ + case 13: + PROCESS8_64; + /* fallthrough */ + case 5: + PROCESS4_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 26: + PROCESS8_64; + /* fallthrough */ + case 18: + PROCESS8_64; + /* fallthrough */ + case 10: + PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 30: + PROCESS8_64; + /* fallthrough */ + case 22: + PROCESS8_64; + /* fallthrough */ + case 14: + PROCESS8_64; + /* fallthrough */ + case 6: + PROCESS4_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 27: + PROCESS8_64; + /* fallthrough */ + case 19: + PROCESS8_64; + /* fallthrough */ + case 11: + PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 31: + PROCESS8_64; + /* fallthrough */ + case 23: + PROCESS8_64; + /* fallthrough */ + case 15: + PROCESS8_64; + /* fallthrough */ + case 7: + PROCESS4_64; + /* fallthrough */ + case 3: + PROCESS1_64; + /* fallthrough */ + case 2: + PROCESS1_64; + /* fallthrough */ + case 1: + PROCESS1_64; + /* fallthrough */ + case 0: + return XXH64_avalanche(h64); + } + + /* impossible to reach */ + assert(0); + return 0; /* unreachable, but some compilers complain without it */ } -FORCE_INLINE U64 -XXH64_endian_align(const void* input, size_t len, U64 seed, - XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* bEnd = p + len; - U64 h64; - -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - if (p==NULL) { - len=0; - bEnd=p=(const BYTE*)(size_t)32; - } +FORCE_INLINE U64 XXH64_endian_align(const void *input, + size_t len, + U64 seed, + XXH_endianess endian, + XXH_alignment align) { + const BYTE *p = (const BYTE *)input; + const BYTE *bEnd = p + len; + U64 h64; + +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \ + (XXH_ACCEPT_NULL_INPUT_POINTER >= 1) + if (p == NULL) { + len = 0; + bEnd = p = (const BYTE *)(size_t)32; + } #endif - if (len>=32) { - const BYTE* const limit = bEnd - 32; - U64 v1 = seed + PRIME64_1 + PRIME64_2; - U64 v2 = seed + PRIME64_2; - U64 v3 = seed + 0; - U64 v4 = seed - PRIME64_1; - - do { - v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; - v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; - v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; - v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; - } while (p<=limit); - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - - } else { - h64 = seed + PRIME64_5; - } - - h64 += (U64) len; - - return XXH64_finalize(h64, p, len, endian, align); + if (len >= 32) { + const BYTE *const limit = bEnd - 32; + U64 v1 = seed + PRIME64_1 + PRIME64_2; + U64 v2 = seed + PRIME64_2; + U64 v3 = seed + 0; + U64 v4 = seed - PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(p)); + p += 8; + v2 = XXH64_round(v2, XXH_get64bits(p)); + p += 8; + v3 = XXH64_round(v3, XXH_get64bits(p)); + p += 8; + v4 = XXH64_round(v4, XXH_get64bits(p)); + p += 8; + } while (p <= limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (U64)len; + + return XXH64_finalize(h64, p, len, endian, align); } -XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) -{ +XXH_PUBLIC_API unsigned long long +XXH64(const void *input, size_t len, unsigned long long seed) { #if 0 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ XXH64_state_t state; @@ -861,170 +991,197 @@ XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned XXH64_update(&state, input, len); return XXH64_digest(&state); #else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } } - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7) == + 0) { /* Input is aligned, let's leverage the speed advantage + */ + if ((endian_detected == XXH_littleEndian) || + XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, + XXH_littleEndian, + XXH_aligned); + else + return XXH64_endian_align(input, len, seed, + XXH_bigEndian, + XXH_aligned); + } + } + + if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, + XXH_unaligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, + XXH_unaligned); #endif } /*====== Hash Streaming ======*/ -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) -{ - return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void) { + return (XXH64_state_t *)XXH_malloc(sizeof(XXH64_state_t)); } -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr) { + XXH_free(statePtr); + return XXH_OK; } -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) -{ - memcpy(dstState, srcState, sizeof(*dstState)); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dstState, + const XXH64_state_t *srcState) { + memcpy(dstState, srcState, sizeof(*dstState)); } -XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) -{ - XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - memset(&state, 0, sizeof(state)); - state.v1 = seed + PRIME64_1 + PRIME64_2; - state.v2 = seed + PRIME64_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME64_1; - /* do not write into reserved, planned to be removed in a future version */ - memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); - return XXH_OK; +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr, + unsigned long long seed) { + XXH64_state_t state; /* using a local state to memcpy() in order to + avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + /* do not write into reserved, planned to be removed in a future version + */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + return XXH_OK; } -FORCE_INLINE XXH_errorcode -XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) -{ - if (input==NULL) -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - return XXH_OK; +FORCE_INLINE XXH_errorcode XXH64_update_endian(XXH64_state_t *state, + const void *input, + size_t len, + XXH_endianess endian) { + if (input == NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \ + (XXH_ACCEPT_NULL_INPUT_POINTER >= 1) + return XXH_OK; #else - return XXH_ERROR; + return XXH_ERROR; #endif - { const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - - state->total_len += len; - - if (state->memsize + len < 32) { /* fill in tmp buffer */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); - state->memsize += (U32)len; - return XXH_OK; + { + const BYTE *p = (const BYTE *)input; + const BYTE *const bEnd = p + len; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((BYTE *)state->mem64) + state->memsize, + input, len); + state->memsize += (U32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((BYTE *)state->mem64) + state->memsize, + input, 32 - state->memsize); + state->v1 = XXH64_round( + state->v1, XXH_readLE64(state->mem64 + 0, endian)); + state->v2 = XXH64_round( + state->v2, XXH_readLE64(state->mem64 + 1, endian)); + state->v3 = XXH64_round( + state->v3, XXH_readLE64(state->mem64 + 2, endian)); + state->v4 = XXH64_round( + state->v4, XXH_readLE64(state->mem64 + 3, endian)); + p += 32 - state->memsize; + state->memsize = 0; + } + + if (p + 32 <= bEnd) { + const BYTE *const limit = bEnd - 32; + U64 v1 = state->v1; + U64 v2 = state->v2; + U64 v3 = state->v3; + U64 v4 = state->v4; + + do { + v1 = XXH64_round(v1, XXH_readLE64(p, endian)); + p += 8; + v2 = XXH64_round(v2, XXH_readLE64(p, endian)); + p += 8; + v3 = XXH64_round(v3, XXH_readLE64(p, endian)); + p += 8; + v4 = XXH64_round(v4, XXH_readLE64(p, endian)); + p += 8; + } while (p <= limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd - p)); + state->memsize = (unsigned)(bEnd - p); + } } - if (state->memsize) { /* tmp buffer is full */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); - state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); - state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); - state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); - state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); - p += 32-state->memsize; - state->memsize = 0; - } - - if (p+32 <= bEnd) { - const BYTE* const limit = bEnd - 32; - U64 v1 = state->v1; - U64 v2 = state->v2; - U64 v3 = state->v3; - U64 v4 = state->v4; - - do { - v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; - v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; - v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; - v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } + return XXH_OK; +} - if (p < bEnd) { - XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - } +XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *state_in, + const void *input, + size_t len) { + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - return XXH_OK; + if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_update_endian(state_in, input, len, + XXH_littleEndian); + else + return XXH64_update_endian(state_in, input, len, XXH_bigEndian); } -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; +FORCE_INLINE U64 XXH64_digest_endian(const XXH64_state_t *state, + XXH_endianess endian) { + U64 h64; + + if (state->total_len >= 32) { + U64 const v1 = state->v1; + U64 const v2 = state->v2; + U64 const v3 = state->v3; + U64 const v4 = state->v4; + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } else { + h64 = state->v3 /*seed*/ + PRIME64_5; + } - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH64_update_endian(state_in, input, len, XXH_bigEndian); -} + h64 += (U64)state->total_len; -FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) -{ - U64 h64; - - if (state->total_len >= 32) { - U64 const v1 = state->v1; - U64 const v2 = state->v2; - U64 const v3 = state->v3; - U64 const v4 = state->v4; - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - } else { - h64 = state->v3 /*seed*/ + PRIME64_5; - } - - h64 += (U64) state->total_len; - - return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned); + return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, + endian, XXH_aligned); } -XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; +XXH_PUBLIC_API unsigned long long XXH64_digest(const XXH64_state_t *state_in) { + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_digest_endian(state_in, XXH_littleEndian); - else - return XXH64_digest_endian(state_in, XXH_bigEndian); + if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_digest_endian(state_in, XXH_littleEndian); + else + return XXH64_digest_endian(state_in, XXH_bigEndian); } /*====== Canonical representation ======*/ -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); - memcpy(dst, &hash, sizeof(*dst)); +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst, + XXH64_hash_t hash) { + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) + hash = XXH_swap64(hash); + memcpy(dst, &hash, sizeof(*dst)); } -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) -{ - return XXH_readBE64(src); +XXH_PUBLIC_API XXH64_hash_t +XXH64_hashFromCanonical(const XXH64_canonical_t *src) { + return XXH_readBE64(src); } -#endif /* XXH_NO_LONG_LONG */ +#endif /* XXH_NO_LONG_LONG */ diff --git a/src/rdxxhash.h b/src/rdxxhash.h index bc06d292cf..1dad7a1119 100644 --- a/src/rdxxhash.h +++ b/src/rdxxhash.h @@ -37,7 +37,8 @@ xxHash is an extremely fast Hash algorithm, running at RAM speed limits. It also successfully passes all tests from the SMHasher suite. -Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) +Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo +@3GHz) Name Speed Q.Score Author xxHash 5.4 GB/s 10 @@ -67,16 +68,16 @@ XXH32 6.8 GB/s 6.0 GB/s #ifndef XXHASH_H_5627135585666179 #define XXHASH_H_5627135585666179 1 -#if defined (__cplusplus) +#if defined(__cplusplus) extern "C" { #endif /* **************************** -* Definitions -******************************/ -#include /* size_t */ -typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; + * Definitions + ******************************/ +#include /* size_t */ +typedef enum { XXH_OK = 0, XXH_ERROR } XXH_errorcode; /* **************************** @@ -93,153 +94,191 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; * It's not useful to compile and link it as a separate module. */ #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) -# ifndef XXH_STATIC_LINKING_ONLY -# define XXH_STATIC_LINKING_ONLY -# endif -# if defined(__GNUC__) -# define XXH_PUBLIC_API static __inline __attribute__((unused)) -# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define XXH_PUBLIC_API static inline -# elif defined(_MSC_VER) -# define XXH_PUBLIC_API static __inline -# else - /* this version may generate warnings for unused static functions */ -# define XXH_PUBLIC_API static -# endif +#ifndef XXH_STATIC_LINKING_ONLY +#define XXH_STATIC_LINKING_ONLY +#endif +#if defined(__GNUC__) +#define XXH_PUBLIC_API static __inline __attribute__((unused)) +#elif defined(__cplusplus) || \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +#define XXH_PUBLIC_API static inline +#elif defined(_MSC_VER) +#define XXH_PUBLIC_API static __inline +#else +/* this version may generate warnings for unused static functions */ +#define XXH_PUBLIC_API static +#endif #else -# define XXH_PUBLIC_API /* do nothing */ -#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ +#define XXH_PUBLIC_API /* do nothing */ +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ /*! XXH_NAMESPACE, aka Namespace Emulation : * - * If you want to include _and expose_ xxHash functions from within your own library, - * but also want to avoid symbol collisions with other libraries which may also include xxHash, + * If you want to include _and expose_ xxHash functions from within your own + * library, but also want to avoid symbol collisions with other libraries which + * may also include xxHash, * - * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library - * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). + * you can use XXH_NAMESPACE, to automatically prefix any public symbol from + * xxhash library with the value of XXH_NAMESPACE (therefore, avoid NULL and + * numeric values). * - * Note that no change is required within the calling program as long as it includes `xxhash.h` : - * regular symbol name will be automatically translated by this header. + * Note that no change is required within the calling program as long as it + * includes `xxhash.h` : regular symbol name will be automatically translated by + * this header. */ #ifdef XXH_NAMESPACE -# define XXH_CAT(A,B) A##B -# define XXH_NAME2(A,B) XXH_CAT(A,B) -# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) -# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) -# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) -# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) -# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) -# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) -# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) -# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) -# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) -# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) -# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) -# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) -# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) -# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) -# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) -# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) -# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) -# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) -# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +#define XXH_CAT(A, B) A##B +#define XXH_NAME2(A, B) XXH_CAT(A, B) +#define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +#define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +#define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +#define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +#define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +#define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +#define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +#define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +#define XXH32_canonicalFromHash \ + XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +#define XXH32_hashFromCanonical \ + XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +#define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +#define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +#define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +#define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +#define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +#define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +#define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +#define XXH64_canonicalFromHash \ + XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +#define XXH64_hashFromCanonical \ + XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) #endif /* ************************************* -* Version -***************************************/ -#define XXH_VERSION_MAJOR 0 -#define XXH_VERSION_MINOR 6 -#define XXH_VERSION_RELEASE 5 -#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) -XXH_PUBLIC_API unsigned XXH_versionNumber (void); + * Version + ***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 6 +#define XXH_VERSION_RELEASE 5 +#define XXH_VERSION_NUMBER \ + (XXH_VERSION_MAJOR * 100 * 100 + XXH_VERSION_MINOR * 100 + \ + XXH_VERSION_RELEASE) +XXH_PUBLIC_API unsigned XXH_versionNumber(void); /*-********************************************************************** -* 32-bit hash -************************************************************************/ + * 32-bit hash + ************************************************************************/ typedef unsigned int XXH32_hash_t; /*! XXH32() : - Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". - The memory between input & input+length must be valid (allocated and read-accessible). - "seed" can be used to alter the result predictably. - Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */ -XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); + Calculate the 32-bit hash of sequence "length" bytes stored at memory + address "input". The memory between input & input+length must be valid + (allocated and read-accessible). "seed" can be used to alter the result + predictably. + Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s + */ +XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input, + size_t length, + unsigned int seed); /*====== Streaming ======*/ -typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); - -XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); -XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); +typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr); +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dst_state, + const XXH32_state_t *src_state); + +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr, + unsigned int seed); +XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *statePtr, + const void *input, + size_t length); +XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *statePtr); /* - * Streaming functions generate the xxHash of an input provided in multiple segments. - * Note that, for small input, they are slower than single-call functions, due to state management. - * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * Streaming functions generate the xxHash of an input provided in multiple + * segments. Note that, for small input, they are slower than single-call + * functions, due to state management. For small inputs, prefer `XXH32()` and + * `XXH64()`, which are better optimized. * * XXH state must first be allocated, using XXH*_createState() . * * Start a new hash by initializing state with a seed, using XXH*_reset(). * - * Then, feed the hash state by calling XXH*_update() as many times as necessary. - * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. + * Then, feed the hash state by calling XXH*_update() as many times as + * necessary. The function returns an error code, with 0 meaning OK, and any + * other value meaning there is an error. * * Finally, a hash value can be produced anytime, by using XXH*_digest(). * This function returns the nn-bits hash as an int or long long. * - * It's still possible to continue inserting input into the hash state after a digest, - * and generate some new hashes later on, by calling again XXH*_digest(). + * It's still possible to continue inserting input into the hash state after a + * digest, and generate some new hashes later on, by calling again + * XXH*_digest(). * * When done, free XXH state space if it was allocated dynamically. */ /*====== Canonical representation ======*/ -typedef struct { unsigned char digest[4]; } XXH32_canonical_t; -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); +typedef struct { + unsigned char digest[4]; +} XXH32_canonical_t; +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst, + XXH32_hash_t hash); +XXH_PUBLIC_API XXH32_hash_t +XXH32_hashFromCanonical(const XXH32_canonical_t *src); /* Default result type for XXH functions are primitive unsigned 32 and 64 bits. - * The canonical representation uses human-readable write convention, aka big-endian (large digits first). - * These functions allow transformation of hash result into and from its canonical format. - * This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. + * The canonical representation uses human-readable write convention, aka + * big-endian (large digits first). These functions allow transformation of hash + * result into and from its canonical format. This way, hash values can be + * written into a file / memory, and remain comparable on different systems and + * programs. */ #ifndef XXH_NO_LONG_LONG /*-********************************************************************** -* 64-bit hash -************************************************************************/ + * 64-bit hash + ************************************************************************/ typedef unsigned long long XXH64_hash_t; /*! XXH64() : - Calculate the 64-bit hash of sequence of length "len" stored at memory address "input". - "seed" can be used to alter the result predictably. - This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark). + Calculate the 64-bit hash of sequence of length "len" stored at memory + address "input". "seed" can be used to alter the result predictably. This + function runs faster on 64-bit systems, but slower on 32-bit systems (see + benchmark). */ -XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); +XXH_PUBLIC_API XXH64_hash_t XXH64(const void *input, + size_t length, + unsigned long long seed); /*====== Streaming ======*/ -typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); - -XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dst_state, + const XXH64_state_t *src_state); + +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr, + unsigned long long seed); +XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *statePtr, + const void *input, + size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t *statePtr); /*====== Canonical representation ======*/ -typedef struct { unsigned char digest[8]; } XXH64_canonical_t; -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); -#endif /* XXH_NO_LONG_LONG */ +typedef struct { + unsigned char digest[8]; +} XXH64_canonical_t; +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst, + XXH64_hash_t hash); +XXH_PUBLIC_API XXH64_hash_t +XXH64_hashFromCanonical(const XXH64_canonical_t *src); +#endif /* XXH_NO_LONG_LONG */ @@ -247,81 +286,86 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src /* ================================================================================================ This section contains declarations which are not guaranteed to remain stable. - They may change in future versions, becoming incompatible with a different version of the library. - These declarations should only be used with static linking. - Never use them in association with dynamic linking ! -=================================================================================================== */ + They may change in future versions, becoming incompatible with a different +version of the library. These declarations should only be used with static +linking. Never use them in association with dynamic linking ! +=================================================================================================== +*/ /* These definitions are only present to allow * static allocation of XXH state, on stack or in a struct for example. * Never **ever** use members directly. */ -#if !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include +#if !defined(__VMS) && \ + (defined(__cplusplus) || \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)) +#include struct XXH32_state_s { - uint32_t total_len_32; - uint32_t large_len; - uint32_t v1; - uint32_t v2; - uint32_t v3; - uint32_t v4; - uint32_t mem32[4]; - uint32_t memsize; - uint32_t reserved; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH32_state_t */ + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; + uint32_t reserved; /* never read nor write, might be removed in a future + version */ +}; /* typedef'd to XXH32_state_t */ struct XXH64_state_s { - uint64_t total_len; - uint64_t v1; - uint64_t v2; - uint64_t v3; - uint64_t v4; - uint64_t mem64[4]; - uint32_t memsize; - uint32_t reserved[2]; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH64_state_t */ - -# else + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; + uint32_t reserved[2]; /* never read nor write, might be removed in a + future version */ +}; /* typedef'd to XXH64_state_t */ + +#else struct XXH32_state_s { - unsigned total_len_32; - unsigned large_len; - unsigned v1; - unsigned v2; - unsigned v3; - unsigned v4; - unsigned mem32[4]; - unsigned memsize; - unsigned reserved; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH32_state_t */ - -# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */ + unsigned total_len_32; + unsigned large_len; + unsigned v1; + unsigned v2; + unsigned v3; + unsigned v4; + unsigned mem32[4]; + unsigned memsize; + unsigned reserved; /* never read nor write, might be removed in a future + version */ +}; /* typedef'd to XXH32_state_t */ + +#ifndef XXH_NO_LONG_LONG /* remove 64-bit support */ struct XXH64_state_s { - unsigned long long total_len; - unsigned long long v1; - unsigned long long v2; - unsigned long long v3; - unsigned long long v4; - unsigned long long mem64[4]; - unsigned memsize; - unsigned reserved[2]; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH64_state_t */ -# endif + unsigned long long total_len; + unsigned long long v1; + unsigned long long v2; + unsigned long long v3; + unsigned long long v4; + unsigned long long mem64[4]; + unsigned memsize; + unsigned reserved[2]; /* never read nor write, might be removed in a + future version */ +}; /* typedef'd to XXH64_state_t */ +#endif -# endif +#endif #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) -# include "rdxxhash.c" /* include xxhash function bodies as `static`, for inlining */ +#include "rdxxhash.c" /* include xxhash function bodies as `static`, for inlining */ #endif #endif /* XXH_STATIC_LINKING_ONLY */ -#if defined (__cplusplus) +#if defined(__cplusplus) } #endif diff --git a/src/regexp.c b/src/regexp.c index 123b71c584..603546c478 100644 --- a/src/regexp.c +++ b/src/regexp.c @@ -1,11 +1,12 @@ /** * Copyright: public domain * - * From https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684: + * From https://github.com/ccxvii/minilibs sha + * 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684: * - * These libraries are in the public domain (or the equivalent where that is not possible). - * You can do anything you want with them. You have no legal obligation to do anything else, - * although I appreciate attribution. + * These libraries are in the public domain (or the equivalent where that is not + * possible). You can do anything you want with them. You have no legal + * obligation to do anything else, although I appreciate attribution. */ #include "rd.h" @@ -17,34 +18,31 @@ #include "regexp.h" -#define nelem(a) (sizeof (a) / sizeof (a)[0]) +#define nelem(a) (sizeof(a) / sizeof(a)[0]) typedef unsigned int Rune; -static int isalpharune(Rune c) -{ - /* TODO: Add unicode support */ - return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'); +static int isalpharune(Rune c) { + /* TODO: Add unicode support */ + return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'); } -static Rune toupperrune(Rune c) -{ - /* TODO: Add unicode support */ - if (c >= 'a' && c <= 'z') - return c - 'a' + 'A'; - return c; +static Rune toupperrune(Rune c) { + /* TODO: Add unicode support */ + if (c >= 'a' && c <= 'z') + return c - 'a' + 'A'; + return c; } -static int chartorune(Rune *r, const char *s) -{ - /* TODO: Add UTF-8 decoding */ - *r = *s; - return 1; +static int chartorune(Rune *r, const char *s) { + /* TODO: Add UTF-8 decoding */ + *r = *s; + return 1; } -#define REPINF 255 +#define REPINF 255 #define MAXTHREAD 1000 -#define MAXSUB REG_MAXSUB +#define MAXSUB REG_MAXSUB typedef struct Reclass Reclass; typedef struct Renode Renode; @@ -53,795 +51,957 @@ typedef struct Rethread Rethread; typedef struct Restate Restate; struct Reclass { - Rune *end; - Rune spans[64]; + Rune *end; + Rune spans[64]; }; struct Restate { - Reprog *prog; - Renode *pstart, *pend; + Reprog *prog; + Renode *pstart, *pend; - const char *source; - unsigned int ncclass; - unsigned int nsub; - Renode *sub[MAXSUB]; + const char *source; + unsigned int ncclass; + unsigned int nsub; + Renode *sub[MAXSUB]; - int lookahead; - Rune yychar; - Reclass *yycc; - int yymin, yymax; + int lookahead; + Rune yychar; + Reclass *yycc; + int yymin, yymax; - const char *error; - jmp_buf kaboom; + const char *error; + jmp_buf kaboom; }; struct Reprog { - Reinst *start, *end; - int flags; - unsigned int nsub; - Reclass cclass[16]; - Restate g; /**< Upstream has this as a global variable */ + Reinst *start, *end; + int flags; + unsigned int nsub; + Reclass cclass[16]; + Restate g; /**< Upstream has this as a global variable */ }; -static void die(Restate *g, const char *message) -{ - g->error = message; - longjmp(g->kaboom, 1); +static void die(Restate *g, const char *message) { + g->error = message; + longjmp(g->kaboom, 1); } -static Rune canon(Rune c) -{ - Rune u = toupperrune(c); - if (c >= 128 && u < 128) - return c; - return u; +static Rune canon(Rune c) { + Rune u = toupperrune(c); + if (c >= 128 && u < 128) + return c; + return u; } /* Scan */ -enum { - L_CHAR = 256, - L_CCLASS, /* character class */ - L_NCCLASS, /* negative character class */ - L_NC, /* "(?:" no capture */ - L_PLA, /* "(?=" positive lookahead */ - L_NLA, /* "(?!" negative lookahead */ - L_WORD, /* "\b" word boundary */ - L_NWORD, /* "\B" non-word boundary */ - L_REF, /* "\1" back-reference */ - L_COUNT /* {M,N} */ +enum { L_CHAR = 256, + L_CCLASS, /* character class */ + L_NCCLASS, /* negative character class */ + L_NC, /* "(?:" no capture */ + L_PLA, /* "(?=" positive lookahead */ + L_NLA, /* "(?!" negative lookahead */ + L_WORD, /* "\b" word boundary */ + L_NWORD, /* "\B" non-word boundary */ + L_REF, /* "\1" back-reference */ + L_COUNT /* {M,N} */ }; -static int hex(Restate *g, int c) -{ - if (c >= '0' && c <= '9') return c - '0'; - if (c >= 'a' && c <= 'f') return c - 'a' + 0xA; - if (c >= 'A' && c <= 'F') return c - 'A' + 0xA; - die(g, "invalid escape sequence"); - return 0; +static int hex(Restate *g, int c) { + if (c >= '0' && c <= '9') + return c - '0'; + if (c >= 'a' && c <= 'f') + return c - 'a' + 0xA; + if (c >= 'A' && c <= 'F') + return c - 'A' + 0xA; + die(g, "invalid escape sequence"); + return 0; } -static int dec(Restate *g, int c) -{ - if (c >= '0' && c <= '9') return c - '0'; - die(g, "invalid quantifier"); - return 0; +static int dec(Restate *g, int c) { + if (c >= '0' && c <= '9') + return c - '0'; + die(g, "invalid quantifier"); + return 0; } #define ESCAPES "BbDdSsWw^$\\.*+?()[]{}|0123456789" -static int nextrune(Restate *g) -{ - g->source += chartorune(&g->yychar, g->source); - if (g->yychar == '\\') { - g->source += chartorune(&g->yychar, g->source); - switch (g->yychar) { - case 0: die(g, "unterminated escape sequence"); - case 'f': g->yychar = '\f'; return 0; - case 'n': g->yychar = '\n'; return 0; - case 'r': g->yychar = '\r'; return 0; - case 't': g->yychar = '\t'; return 0; - case 'v': g->yychar = '\v'; return 0; - case 'c': - g->yychar = (*g->source++) & 31; - return 0; - case 'x': - g->yychar = hex(g, *g->source++) << 4; - g->yychar += hex(g, *g->source++); - if (g->yychar == 0) { - g->yychar = '0'; - return 1; - } - return 0; - case 'u': - g->yychar = hex(g, *g->source++) << 12; - g->yychar += hex(g, *g->source++) << 8; - g->yychar += hex(g, *g->source++) << 4; - g->yychar += hex(g, *g->source++); - if (g->yychar == 0) { - g->yychar = '0'; - return 1; - } - return 0; - } - if (strchr(ESCAPES, g->yychar)) - return 1; - if (isalpharune(g->yychar) || g->yychar == '_') /* check identity escape */ - die(g, "invalid escape character"); - return 0; - } - return 0; +static int nextrune(Restate *g) { + g->source += chartorune(&g->yychar, g->source); + if (g->yychar == '\\') { + g->source += chartorune(&g->yychar, g->source); + switch (g->yychar) { + case 0: + die(g, "unterminated escape sequence"); + case 'f': + g->yychar = '\f'; + return 0; + case 'n': + g->yychar = '\n'; + return 0; + case 'r': + g->yychar = '\r'; + return 0; + case 't': + g->yychar = '\t'; + return 0; + case 'v': + g->yychar = '\v'; + return 0; + case 'c': + g->yychar = (*g->source++) & 31; + return 0; + case 'x': + g->yychar = hex(g, *g->source++) << 4; + g->yychar += hex(g, *g->source++); + if (g->yychar == 0) { + g->yychar = '0'; + return 1; + } + return 0; + case 'u': + g->yychar = hex(g, *g->source++) << 12; + g->yychar += hex(g, *g->source++) << 8; + g->yychar += hex(g, *g->source++) << 4; + g->yychar += hex(g, *g->source++); + if (g->yychar == 0) { + g->yychar = '0'; + return 1; + } + return 0; + } + if (strchr(ESCAPES, g->yychar)) + return 1; + if (isalpharune(g->yychar) || + g->yychar == '_') /* check identity escape */ + die(g, "invalid escape character"); + return 0; + } + return 0; } -static int lexcount(Restate *g) -{ - g->yychar = *g->source++; - - g->yymin = dec(g, g->yychar); - g->yychar = *g->source++; - while (g->yychar != ',' && g->yychar != '}') { - g->yymin = g->yymin * 10 + dec(g, g->yychar); - g->yychar = *g->source++; - } - if (g->yymin >= REPINF) - die(g, "numeric overflow"); - - if (g->yychar == ',') { - g->yychar = *g->source++; - if (g->yychar == '}') { - g->yymax = REPINF; - } else { - g->yymax = dec(g, g->yychar); - g->yychar = *g->source++; - while (g->yychar != '}') { - g->yymax = g->yymax * 10 + dec(g, g->yychar); - g->yychar = *g->source++; - } - if (g->yymax >= REPINF) - die(g, "numeric overflow"); - } - } else { - g->yymax = g->yymin; - } - - return L_COUNT; +static int lexcount(Restate *g) { + g->yychar = *g->source++; + + g->yymin = dec(g, g->yychar); + g->yychar = *g->source++; + while (g->yychar != ',' && g->yychar != '}') { + g->yymin = g->yymin * 10 + dec(g, g->yychar); + g->yychar = *g->source++; + } + if (g->yymin >= REPINF) + die(g, "numeric overflow"); + + if (g->yychar == ',') { + g->yychar = *g->source++; + if (g->yychar == '}') { + g->yymax = REPINF; + } else { + g->yymax = dec(g, g->yychar); + g->yychar = *g->source++; + while (g->yychar != '}') { + g->yymax = g->yymax * 10 + dec(g, g->yychar); + g->yychar = *g->source++; + } + if (g->yymax >= REPINF) + die(g, "numeric overflow"); + } + } else { + g->yymax = g->yymin; + } + + return L_COUNT; } -static void newcclass(Restate *g) -{ - if (g->ncclass >= nelem(g->prog->cclass)) - die(g, "too many character classes"); - g->yycc = g->prog->cclass + g->ncclass++; - g->yycc->end = g->yycc->spans; +static void newcclass(Restate *g) { + if (g->ncclass >= nelem(g->prog->cclass)) + die(g, "too many character classes"); + g->yycc = g->prog->cclass + g->ncclass++; + g->yycc->end = g->yycc->spans; } -static void addrange(Restate *g, Rune a, Rune b) -{ - if (a > b) - die(g, "invalid character class range"); - if (g->yycc->end + 2 == g->yycc->spans + nelem(g->yycc->spans)) - die(g, "too many character class ranges"); - *g->yycc->end++ = a; - *g->yycc->end++ = b; +static void addrange(Restate *g, Rune a, Rune b) { + if (a > b) + die(g, "invalid character class range"); + if (g->yycc->end + 2 == g->yycc->spans + nelem(g->yycc->spans)) + die(g, "too many character class ranges"); + *g->yycc->end++ = a; + *g->yycc->end++ = b; } -static void addranges_d(Restate *g) -{ - addrange(g, '0', '9'); +static void addranges_d(Restate *g) { + addrange(g, '0', '9'); } -static void addranges_D(Restate *g) -{ - addrange(g, 0, '0'-1); - addrange(g, '9'+1, 0xFFFF); +static void addranges_D(Restate *g) { + addrange(g, 0, '0' - 1); + addrange(g, '9' + 1, 0xFFFF); } -static void addranges_s(Restate *g) -{ - addrange(g, 0x9, 0x9); - addrange(g, 0xA, 0xD); - addrange(g, 0x20, 0x20); - addrange(g, 0xA0, 0xA0); - addrange(g, 0x2028, 0x2029); - addrange(g, 0xFEFF, 0xFEFF); +static void addranges_s(Restate *g) { + addrange(g, 0x9, 0x9); + addrange(g, 0xA, 0xD); + addrange(g, 0x20, 0x20); + addrange(g, 0xA0, 0xA0); + addrange(g, 0x2028, 0x2029); + addrange(g, 0xFEFF, 0xFEFF); } -static void addranges_S(Restate *g) -{ - addrange(g, 0, 0x9-1); - addrange(g, 0x9+1, 0xA-1); - addrange(g, 0xD+1, 0x20-1); - addrange(g, 0x20+1, 0xA0-1); - addrange(g, 0xA0+1, 0x2028-1); - addrange(g, 0x2029+1, 0xFEFF-1); - addrange(g, 0xFEFF+1, 0xFFFF); +static void addranges_S(Restate *g) { + addrange(g, 0, 0x9 - 1); + addrange(g, 0x9 + 1, 0xA - 1); + addrange(g, 0xD + 1, 0x20 - 1); + addrange(g, 0x20 + 1, 0xA0 - 1); + addrange(g, 0xA0 + 1, 0x2028 - 1); + addrange(g, 0x2029 + 1, 0xFEFF - 1); + addrange(g, 0xFEFF + 1, 0xFFFF); } -static void addranges_w(Restate *g) -{ - addrange(g, '0', '9'); - addrange(g, 'A', 'Z'); - addrange(g, '_', '_'); - addrange(g, 'a', 'z'); +static void addranges_w(Restate *g) { + addrange(g, '0', '9'); + addrange(g, 'A', 'Z'); + addrange(g, '_', '_'); + addrange(g, 'a', 'z'); } -static void addranges_W(Restate *g) -{ - addrange(g, 0, '0'-1); - addrange(g, '9'+1, 'A'-1); - addrange(g, 'Z'+1, '_'-1); - addrange(g, '_'+1, 'a'-1); - addrange(g, 'z'+1, 0xFFFF); +static void addranges_W(Restate *g) { + addrange(g, 0, '0' - 1); + addrange(g, '9' + 1, 'A' - 1); + addrange(g, 'Z' + 1, '_' - 1); + addrange(g, '_' + 1, 'a' - 1); + addrange(g, 'z' + 1, 0xFFFF); } -static int lexclass(Restate *g) -{ - int type = L_CCLASS; - int quoted, havesave, havedash; - Rune save = 0; - - newcclass(g); - - quoted = nextrune(g); - if (!quoted && g->yychar == '^') { - type = L_NCCLASS; - quoted = nextrune(g); - } - - havesave = havedash = 0; - for (;;) { - if (g->yychar == 0) - die(g, "unterminated character class"); - if (!quoted && g->yychar == ']') - break; - - if (!quoted && g->yychar == '-') { - if (havesave) { - if (havedash) { - addrange(g, save, '-'); - havesave = havedash = 0; - } else { - havedash = 1; - } - } else { - save = '-'; - havesave = 1; - } - } else if (quoted && strchr("DSWdsw", g->yychar)) { - if (havesave) { - addrange(g, save, save); - if (havedash) - addrange(g, '-', '-'); - } - switch (g->yychar) { - case 'd': addranges_d(g); break; - case 's': addranges_s(g); break; - case 'w': addranges_w(g); break; - case 'D': addranges_D(g); break; - case 'S': addranges_S(g); break; - case 'W': addranges_W(g); break; - } - havesave = havedash = 0; - } else { - if (quoted) { - if (g->yychar == 'b') - g->yychar = '\b'; - else if (g->yychar == '0') - g->yychar = 0; - /* else identity escape */ - } - if (havesave) { - if (havedash) { - addrange(g, save, g->yychar); - havesave = havedash = 0; - } else { - addrange(g, save, save); - save = g->yychar; - } - } else { - save = g->yychar; - havesave = 1; - } - } - - quoted = nextrune(g); - } - - if (havesave) { - addrange(g, save, save); - if (havedash) - addrange(g, '-', '-'); - } - - return type; +static int lexclass(Restate *g) { + int type = L_CCLASS; + int quoted, havesave, havedash; + Rune save = 0; + + newcclass(g); + + quoted = nextrune(g); + if (!quoted && g->yychar == '^') { + type = L_NCCLASS; + quoted = nextrune(g); + } + + havesave = havedash = 0; + for (;;) { + if (g->yychar == 0) + die(g, "unterminated character class"); + if (!quoted && g->yychar == ']') + break; + + if (!quoted && g->yychar == '-') { + if (havesave) { + if (havedash) { + addrange(g, save, '-'); + havesave = havedash = 0; + } else { + havedash = 1; + } + } else { + save = '-'; + havesave = 1; + } + } else if (quoted && strchr("DSWdsw", g->yychar)) { + if (havesave) { + addrange(g, save, save); + if (havedash) + addrange(g, '-', '-'); + } + switch (g->yychar) { + case 'd': + addranges_d(g); + break; + case 's': + addranges_s(g); + break; + case 'w': + addranges_w(g); + break; + case 'D': + addranges_D(g); + break; + case 'S': + addranges_S(g); + break; + case 'W': + addranges_W(g); + break; + } + havesave = havedash = 0; + } else { + if (quoted) { + if (g->yychar == 'b') + g->yychar = '\b'; + else if (g->yychar == '0') + g->yychar = 0; + /* else identity escape */ + } + if (havesave) { + if (havedash) { + addrange(g, save, g->yychar); + havesave = havedash = 0; + } else { + addrange(g, save, save); + save = g->yychar; + } + } else { + save = g->yychar; + havesave = 1; + } + } + + quoted = nextrune(g); + } + + if (havesave) { + addrange(g, save, save); + if (havedash) + addrange(g, '-', '-'); + } + + return type; } -static int lex(Restate *g) -{ - int quoted = nextrune(g); - if (quoted) { - switch (g->yychar) { - case 'b': return L_WORD; - case 'B': return L_NWORD; - case 'd': newcclass(g); addranges_d(g); return L_CCLASS; - case 's': newcclass(g); addranges_s(g); return L_CCLASS; - case 'w': newcclass(g); addranges_w(g); return L_CCLASS; - case 'D': newcclass(g); addranges_d(g); return L_NCCLASS; - case 'S': newcclass(g); addranges_s(g); return L_NCCLASS; - case 'W': newcclass(g); addranges_w(g); return L_NCCLASS; - case '0': g->yychar = 0; return L_CHAR; - } - if (g->yychar >= '0' && g->yychar <= '9') { - g->yychar -= '0'; - if (*g->source >= '0' && *g->source <= '9') - g->yychar = g->yychar * 10 + *g->source++ - '0'; - return L_REF; - } - return L_CHAR; - } - - switch (g->yychar) { - case 0: - case '$': case ')': case '*': case '+': - case '.': case '?': case '^': case '|': - return g->yychar; - } - - if (g->yychar == '{') - return lexcount(g); - if (g->yychar == '[') - return lexclass(g); - if (g->yychar == '(') { - if (g->source[0] == '?') { - if (g->source[1] == ':') { - g->source += 2; - return L_NC; - } - if (g->source[1] == '=') { - g->source += 2; - return L_PLA; - } - if (g->source[1] == '!') { - g->source += 2; - return L_NLA; - } - } - return '('; - } - - return L_CHAR; +static int lex(Restate *g) { + int quoted = nextrune(g); + if (quoted) { + switch (g->yychar) { + case 'b': + return L_WORD; + case 'B': + return L_NWORD; + case 'd': + newcclass(g); + addranges_d(g); + return L_CCLASS; + case 's': + newcclass(g); + addranges_s(g); + return L_CCLASS; + case 'w': + newcclass(g); + addranges_w(g); + return L_CCLASS; + case 'D': + newcclass(g); + addranges_d(g); + return L_NCCLASS; + case 'S': + newcclass(g); + addranges_s(g); + return L_NCCLASS; + case 'W': + newcclass(g); + addranges_w(g); + return L_NCCLASS; + case '0': + g->yychar = 0; + return L_CHAR; + } + if (g->yychar >= '0' && g->yychar <= '9') { + g->yychar -= '0'; + if (*g->source >= '0' && *g->source <= '9') + g->yychar = g->yychar * 10 + *g->source++ - '0'; + return L_REF; + } + return L_CHAR; + } + + switch (g->yychar) { + case 0: + case '$': + case ')': + case '*': + case '+': + case '.': + case '?': + case '^': + case '|': + return g->yychar; + } + + if (g->yychar == '{') + return lexcount(g); + if (g->yychar == '[') + return lexclass(g); + if (g->yychar == '(') { + if (g->source[0] == '?') { + if (g->source[1] == ':') { + g->source += 2; + return L_NC; + } + if (g->source[1] == '=') { + g->source += 2; + return L_PLA; + } + if (g->source[1] == '!') { + g->source += 2; + return L_NLA; + } + } + return '('; + } + + return L_CHAR; } /* Parse */ -enum { - P_CAT, P_ALT, P_REP, - P_BOL, P_EOL, P_WORD, P_NWORD, - P_PAR, P_PLA, P_NLA, - P_ANY, P_CHAR, P_CCLASS, P_NCCLASS, - P_REF -}; +enum { P_CAT, + P_ALT, + P_REP, + P_BOL, + P_EOL, + P_WORD, + P_NWORD, + P_PAR, + P_PLA, + P_NLA, + P_ANY, + P_CHAR, + P_CCLASS, + P_NCCLASS, + P_REF }; struct Renode { - unsigned char type; - unsigned char ng, m, n; - Rune c; - Reclass *cc; - Renode *x; - Renode *y; + unsigned char type; + unsigned char ng, m, n; + Rune c; + Reclass *cc; + Renode *x; + Renode *y; }; -static Renode *newnode(Restate *g, int type) -{ - Renode *node = g->pend++; - node->type = type; - node->cc = NULL; - node->c = 0; - node->ng = 0; - node->m = 0; - node->n = 0; - node->x = node->y = NULL; - return node; +static Renode *newnode(Restate *g, int type) { + Renode *node = g->pend++; + node->type = type; + node->cc = NULL; + node->c = 0; + node->ng = 0; + node->m = 0; + node->n = 0; + node->x = node->y = NULL; + return node; } -static int empty(Renode *node) -{ - if (!node) return 1; - switch (node->type) { - default: return 1; - case P_CAT: return empty(node->x) && empty(node->y); - case P_ALT: return empty(node->x) || empty(node->y); - case P_REP: return empty(node->x) || node->m == 0; - case P_PAR: return empty(node->x); - case P_REF: return empty(node->x); - case P_ANY: case P_CHAR: case P_CCLASS: case P_NCCLASS: return 0; - } +static int empty(Renode *node) { + if (!node) + return 1; + switch (node->type) { + default: + return 1; + case P_CAT: + return empty(node->x) && empty(node->y); + case P_ALT: + return empty(node->x) || empty(node->y); + case P_REP: + return empty(node->x) || node->m == 0; + case P_PAR: + return empty(node->x); + case P_REF: + return empty(node->x); + case P_ANY: + case P_CHAR: + case P_CCLASS: + case P_NCCLASS: + return 0; + } } -static Renode *newrep(Restate *g, Renode *atom, int ng, int min, int max) -{ - Renode *rep = newnode(g, P_REP); - if (max == REPINF && empty(atom)) - die(g, "infinite loop matching the empty string"); - rep->ng = ng; - rep->m = min; - rep->n = max; - rep->x = atom; - return rep; +static Renode *newrep(Restate *g, Renode *atom, int ng, int min, int max) { + Renode *rep = newnode(g, P_REP); + if (max == REPINF && empty(atom)) + die(g, "infinite loop matching the empty string"); + rep->ng = ng; + rep->m = min; + rep->n = max; + rep->x = atom; + return rep; } -static void next(Restate *g) -{ - g->lookahead = lex(g); +static void next(Restate *g) { + g->lookahead = lex(g); } -static int re_accept(Restate *g, int t) -{ - if (g->lookahead == t) { - next(g); - return 1; - } - return 0; +static int re_accept(Restate *g, int t) { + if (g->lookahead == t) { + next(g); + return 1; + } + return 0; } static Renode *parsealt(Restate *g); -static Renode *parseatom(Restate *g) -{ - Renode *atom; - if (g->lookahead == L_CHAR) { - atom = newnode(g, P_CHAR); - atom->c = g->yychar; - next(g); - return atom; - } - if (g->lookahead == L_CCLASS) { - atom = newnode(g, P_CCLASS); - atom->cc = g->yycc; - next(g); - return atom; - } - if (g->lookahead == L_NCCLASS) { - atom = newnode(g, P_NCCLASS); - atom->cc = g->yycc; - next(g); - return atom; - } - if (g->lookahead == L_REF) { - atom = newnode(g, P_REF); - if (g->yychar == 0 || g->yychar > g->nsub || !g->sub[g->yychar]) - die(g, "invalid back-reference"); - atom->n = g->yychar; - atom->x = g->sub[g->yychar]; - next(g); - return atom; - } - if (re_accept(g, '.')) - return newnode(g, P_ANY); - if (re_accept(g, '(')) { - atom = newnode(g, P_PAR); - if (g->nsub == MAXSUB) - die(g, "too many captures"); - atom->n = g->nsub++; - atom->x = parsealt(g); - g->sub[atom->n] = atom; - if (!re_accept(g, ')')) - die(g, "unmatched '('"); - return atom; - } - if (re_accept(g, L_NC)) { - atom = parsealt(g); - if (!re_accept(g, ')')) - die(g, "unmatched '('"); - return atom; - } - if (re_accept(g, L_PLA)) { - atom = newnode(g, P_PLA); - atom->x = parsealt(g); - if (!re_accept(g, ')')) - die(g, "unmatched '('"); - return atom; - } - if (re_accept(g, L_NLA)) { - atom = newnode(g, P_NLA); - atom->x = parsealt(g); - if (!re_accept(g, ')')) - die(g, "unmatched '('"); - return atom; - } - die(g, "syntax error"); - return NULL; +static Renode *parseatom(Restate *g) { + Renode *atom; + if (g->lookahead == L_CHAR) { + atom = newnode(g, P_CHAR); + atom->c = g->yychar; + next(g); + return atom; + } + if (g->lookahead == L_CCLASS) { + atom = newnode(g, P_CCLASS); + atom->cc = g->yycc; + next(g); + return atom; + } + if (g->lookahead == L_NCCLASS) { + atom = newnode(g, P_NCCLASS); + atom->cc = g->yycc; + next(g); + return atom; + } + if (g->lookahead == L_REF) { + atom = newnode(g, P_REF); + if (g->yychar == 0 || g->yychar > g->nsub || !g->sub[g->yychar]) + die(g, "invalid back-reference"); + atom->n = g->yychar; + atom->x = g->sub[g->yychar]; + next(g); + return atom; + } + if (re_accept(g, '.')) + return newnode(g, P_ANY); + if (re_accept(g, '(')) { + atom = newnode(g, P_PAR); + if (g->nsub == MAXSUB) + die(g, "too many captures"); + atom->n = g->nsub++; + atom->x = parsealt(g); + g->sub[atom->n] = atom; + if (!re_accept(g, ')')) + die(g, "unmatched '('"); + return atom; + } + if (re_accept(g, L_NC)) { + atom = parsealt(g); + if (!re_accept(g, ')')) + die(g, "unmatched '('"); + return atom; + } + if (re_accept(g, L_PLA)) { + atom = newnode(g, P_PLA); + atom->x = parsealt(g); + if (!re_accept(g, ')')) + die(g, "unmatched '('"); + return atom; + } + if (re_accept(g, L_NLA)) { + atom = newnode(g, P_NLA); + atom->x = parsealt(g); + if (!re_accept(g, ')')) + die(g, "unmatched '('"); + return atom; + } + die(g, "syntax error"); + return NULL; } -static Renode *parserep(Restate *g) -{ - Renode *atom; - - if (re_accept(g, '^')) return newnode(g, P_BOL); - if (re_accept(g, '$')) return newnode(g, P_EOL); - if (re_accept(g, L_WORD)) return newnode(g, P_WORD); - if (re_accept(g, L_NWORD)) return newnode(g, P_NWORD); - - atom = parseatom(g); - if (g->lookahead == L_COUNT) { - int min = g->yymin, max = g->yymax; - next(g); - if (max < min) - die(g, "invalid quantifier"); - return newrep(g, atom, re_accept(g, '?'), min, max); - } - if (re_accept(g, '*')) return newrep(g, atom, re_accept(g, '?'), 0, REPINF); - if (re_accept(g, '+')) return newrep(g, atom, re_accept(g, '?'), 1, REPINF); - if (re_accept(g, '?')) return newrep(g, atom, re_accept(g, '?'), 0, 1); - return atom; +static Renode *parserep(Restate *g) { + Renode *atom; + + if (re_accept(g, '^')) + return newnode(g, P_BOL); + if (re_accept(g, '$')) + return newnode(g, P_EOL); + if (re_accept(g, L_WORD)) + return newnode(g, P_WORD); + if (re_accept(g, L_NWORD)) + return newnode(g, P_NWORD); + + atom = parseatom(g); + if (g->lookahead == L_COUNT) { + int min = g->yymin, max = g->yymax; + next(g); + if (max < min) + die(g, "invalid quantifier"); + return newrep(g, atom, re_accept(g, '?'), min, max); + } + if (re_accept(g, '*')) + return newrep(g, atom, re_accept(g, '?'), 0, REPINF); + if (re_accept(g, '+')) + return newrep(g, atom, re_accept(g, '?'), 1, REPINF); + if (re_accept(g, '?')) + return newrep(g, atom, re_accept(g, '?'), 0, 1); + return atom; } -static Renode *parsecat(Restate *g) -{ - Renode *cat, *x; - if (g->lookahead && g->lookahead != '|' && g->lookahead != ')') { - cat = parserep(g); - while (g->lookahead && g->lookahead != '|' && g->lookahead != ')') { - x = cat; - cat = newnode(g, P_CAT); - cat->x = x; - cat->y = parserep(g); - } - return cat; - } - return NULL; +static Renode *parsecat(Restate *g) { + Renode *cat, *x; + if (g->lookahead && g->lookahead != '|' && g->lookahead != ')') { + cat = parserep(g); + while (g->lookahead && g->lookahead != '|' && + g->lookahead != ')') { + x = cat; + cat = newnode(g, P_CAT); + cat->x = x; + cat->y = parserep(g); + } + return cat; + } + return NULL; } -static Renode *parsealt(Restate *g) -{ - Renode *alt, *x; - alt = parsecat(g); - while (re_accept(g, '|')) { - x = alt; - alt = newnode(g, P_ALT); - alt->x = x; - alt->y = parsecat(g); - } - return alt; +static Renode *parsealt(Restate *g) { + Renode *alt, *x; + alt = parsecat(g); + while (re_accept(g, '|')) { + x = alt; + alt = newnode(g, P_ALT); + alt->x = x; + alt->y = parsecat(g); + } + return alt; } /* Compile */ -enum { - I_END, I_JUMP, I_SPLIT, I_PLA, I_NLA, - I_ANYNL, I_ANY, I_CHAR, I_CCLASS, I_NCCLASS, I_REF, - I_BOL, I_EOL, I_WORD, I_NWORD, - I_LPAR, I_RPAR -}; +enum { I_END, + I_JUMP, + I_SPLIT, + I_PLA, + I_NLA, + I_ANYNL, + I_ANY, + I_CHAR, + I_CCLASS, + I_NCCLASS, + I_REF, + I_BOL, + I_EOL, + I_WORD, + I_NWORD, + I_LPAR, + I_RPAR }; struct Reinst { - unsigned char opcode; - unsigned char n; - Rune c; - Reclass *cc; - Reinst *x; - Reinst *y; + unsigned char opcode; + unsigned char n; + Rune c; + Reclass *cc; + Reinst *x; + Reinst *y; }; -static unsigned int count(Renode *node) -{ - unsigned int min, max; - if (!node) return 0; - switch (node->type) { - default: return 1; - case P_CAT: return count(node->x) + count(node->y); - case P_ALT: return count(node->x) + count(node->y) + 2; - case P_REP: - min = node->m; - max = node->n; - if (min == max) return count(node->x) * min; - if (max < REPINF) return count(node->x) * max + (max - min); - return count(node->x) * (min + 1) + 2; - case P_PAR: return count(node->x) + 2; - case P_PLA: return count(node->x) + 2; - case P_NLA: return count(node->x) + 2; - } +static unsigned int count(Renode *node) { + unsigned int min, max; + if (!node) + return 0; + switch (node->type) { + default: + return 1; + case P_CAT: + return count(node->x) + count(node->y); + case P_ALT: + return count(node->x) + count(node->y) + 2; + case P_REP: + min = node->m; + max = node->n; + if (min == max) + return count(node->x) * min; + if (max < REPINF) + return count(node->x) * max + (max - min); + return count(node->x) * (min + 1) + 2; + case P_PAR: + return count(node->x) + 2; + case P_PLA: + return count(node->x) + 2; + case P_NLA: + return count(node->x) + 2; + } } -static Reinst *emit(Reprog *prog, int opcode) -{ - Reinst *inst = prog->end++; - inst->opcode = opcode; - inst->n = 0; - inst->c = 0; - inst->cc = NULL; - inst->x = inst->y = NULL; - return inst; +static Reinst *emit(Reprog *prog, int opcode) { + Reinst *inst = prog->end++; + inst->opcode = opcode; + inst->n = 0; + inst->c = 0; + inst->cc = NULL; + inst->x = inst->y = NULL; + return inst; } -static void compile(Reprog *prog, Renode *node) -{ - Reinst *inst, *split, *jump; - unsigned int i; - - if (!node) - return; - - switch (node->type) { - case P_CAT: - compile(prog, node->x); - compile(prog, node->y); - break; - - case P_ALT: - split = emit(prog, I_SPLIT); - compile(prog, node->x); - jump = emit(prog, I_JUMP); - compile(prog, node->y); - split->x = split + 1; - split->y = jump + 1; - jump->x = prog->end; - break; - - case P_REP: - for (i = 0; i < node->m; ++i) { - inst = prog->end; - compile(prog, node->x); - } - if (node->m == node->n) - break; - if (node->n < REPINF) { - for (i = node->m; i < node->n; ++i) { - split = emit(prog, I_SPLIT); - compile(prog, node->x); - if (node->ng) { - split->y = split + 1; - split->x = prog->end; - } else { - split->x = split + 1; - split->y = prog->end; - } - } - } else if (node->m == 0) { - split = emit(prog, I_SPLIT); - compile(prog, node->x); - jump = emit(prog, I_JUMP); - if (node->ng) { - split->y = split + 1; - split->x = prog->end; - } else { - split->x = split + 1; - split->y = prog->end; - } - jump->x = split; - } else { - split = emit(prog, I_SPLIT); - if (node->ng) { - split->y = inst; - split->x = prog->end; - } else { - split->x = inst; - split->y = prog->end; - } - } - break; - - case P_BOL: emit(prog, I_BOL); break; - case P_EOL: emit(prog, I_EOL); break; - case P_WORD: emit(prog, I_WORD); break; - case P_NWORD: emit(prog, I_NWORD); break; - - case P_PAR: - inst = emit(prog, I_LPAR); - inst->n = node->n; - compile(prog, node->x); - inst = emit(prog, I_RPAR); - inst->n = node->n; - break; - case P_PLA: - split = emit(prog, I_PLA); - compile(prog, node->x); - emit(prog, I_END); - split->x = split + 1; - split->y = prog->end; - break; - case P_NLA: - split = emit(prog, I_NLA); - compile(prog, node->x); - emit(prog, I_END); - split->x = split + 1; - split->y = prog->end; - break; - - case P_ANY: - emit(prog, I_ANY); - break; - case P_CHAR: - inst = emit(prog, I_CHAR); - inst->c = (prog->flags & REG_ICASE) ? canon(node->c) : node->c; - break; - case P_CCLASS: - inst = emit(prog, I_CCLASS); - inst->cc = node->cc; - break; - case P_NCCLASS: - inst = emit(prog, I_NCCLASS); - inst->cc = node->cc; - break; - case P_REF: - inst = emit(prog, I_REF); - inst->n = node->n; - break; - } +static void compile(Reprog *prog, Renode *node) { + Reinst *inst, *split, *jump; + unsigned int i; + + if (!node) + return; + + switch (node->type) { + case P_CAT: + compile(prog, node->x); + compile(prog, node->y); + break; + + case P_ALT: + split = emit(prog, I_SPLIT); + compile(prog, node->x); + jump = emit(prog, I_JUMP); + compile(prog, node->y); + split->x = split + 1; + split->y = jump + 1; + jump->x = prog->end; + break; + + case P_REP: + for (i = 0; i < node->m; ++i) { + inst = prog->end; + compile(prog, node->x); + } + if (node->m == node->n) + break; + if (node->n < REPINF) { + for (i = node->m; i < node->n; ++i) { + split = emit(prog, I_SPLIT); + compile(prog, node->x); + if (node->ng) { + split->y = split + 1; + split->x = prog->end; + } else { + split->x = split + 1; + split->y = prog->end; + } + } + } else if (node->m == 0) { + split = emit(prog, I_SPLIT); + compile(prog, node->x); + jump = emit(prog, I_JUMP); + if (node->ng) { + split->y = split + 1; + split->x = prog->end; + } else { + split->x = split + 1; + split->y = prog->end; + } + jump->x = split; + } else { + split = emit(prog, I_SPLIT); + if (node->ng) { + split->y = inst; + split->x = prog->end; + } else { + split->x = inst; + split->y = prog->end; + } + } + break; + + case P_BOL: + emit(prog, I_BOL); + break; + case P_EOL: + emit(prog, I_EOL); + break; + case P_WORD: + emit(prog, I_WORD); + break; + case P_NWORD: + emit(prog, I_NWORD); + break; + + case P_PAR: + inst = emit(prog, I_LPAR); + inst->n = node->n; + compile(prog, node->x); + inst = emit(prog, I_RPAR); + inst->n = node->n; + break; + case P_PLA: + split = emit(prog, I_PLA); + compile(prog, node->x); + emit(prog, I_END); + split->x = split + 1; + split->y = prog->end; + break; + case P_NLA: + split = emit(prog, I_NLA); + compile(prog, node->x); + emit(prog, I_END); + split->x = split + 1; + split->y = prog->end; + break; + + case P_ANY: + emit(prog, I_ANY); + break; + case P_CHAR: + inst = emit(prog, I_CHAR); + inst->c = (prog->flags & REG_ICASE) ? canon(node->c) : node->c; + break; + case P_CCLASS: + inst = emit(prog, I_CCLASS); + inst->cc = node->cc; + break; + case P_NCCLASS: + inst = emit(prog, I_NCCLASS); + inst->cc = node->cc; + break; + case P_REF: + inst = emit(prog, I_REF); + inst->n = node->n; + break; + } } #ifdef TEST -static void dumpnode(Renode *node) -{ - Rune *p; - if (!node) { printf("Empty"); return; } - switch (node->type) { - case P_CAT: printf("Cat("); dumpnode(node->x); printf(", "); dumpnode(node->y); printf(")"); break; - case P_ALT: printf("Alt("); dumpnode(node->x); printf(", "); dumpnode(node->y); printf(")"); break; - case P_REP: - printf(node->ng ? "NgRep(%d,%d," : "Rep(%d,%d,", node->m, node->n); - dumpnode(node->x); - printf(")"); - break; - case P_BOL: printf("Bol"); break; - case P_EOL: printf("Eol"); break; - case P_WORD: printf("Word"); break; - case P_NWORD: printf("NotWord"); break; - case P_PAR: printf("Par(%d,", node->n); dumpnode(node->x); printf(")"); break; - case P_PLA: printf("PLA("); dumpnode(node->x); printf(")"); break; - case P_NLA: printf("NLA("); dumpnode(node->x); printf(")"); break; - case P_ANY: printf("Any"); break; - case P_CHAR: printf("Char(%c)", node->c); break; - case P_CCLASS: - printf("Class("); - for (p = node->cc->spans; p < node->cc->end; p += 2) printf("%02X-%02X,", p[0], p[1]); - printf(")"); - break; - case P_NCCLASS: - printf("NotClass("); - for (p = node->cc->spans; p < node->cc->end; p += 2) printf("%02X-%02X,", p[0], p[1]); - printf(")"); - break; - case P_REF: printf("Ref(%d)", node->n); break; - } +static void dumpnode(Renode *node) { + Rune *p; + if (!node) { + printf("Empty"); + return; + } + switch (node->type) { + case P_CAT: + printf("Cat("); + dumpnode(node->x); + printf(", "); + dumpnode(node->y); + printf(")"); + break; + case P_ALT: + printf("Alt("); + dumpnode(node->x); + printf(", "); + dumpnode(node->y); + printf(")"); + break; + case P_REP: + printf(node->ng ? "NgRep(%d,%d," : "Rep(%d,%d,", node->m, + node->n); + dumpnode(node->x); + printf(")"); + break; + case P_BOL: + printf("Bol"); + break; + case P_EOL: + printf("Eol"); + break; + case P_WORD: + printf("Word"); + break; + case P_NWORD: + printf("NotWord"); + break; + case P_PAR: + printf("Par(%d,", node->n); + dumpnode(node->x); + printf(")"); + break; + case P_PLA: + printf("PLA("); + dumpnode(node->x); + printf(")"); + break; + case P_NLA: + printf("NLA("); + dumpnode(node->x); + printf(")"); + break; + case P_ANY: + printf("Any"); + break; + case P_CHAR: + printf("Char(%c)", node->c); + break; + case P_CCLASS: + printf("Class("); + for (p = node->cc->spans; p < node->cc->end; p += 2) + printf("%02X-%02X,", p[0], p[1]); + printf(")"); + break; + case P_NCCLASS: + printf("NotClass("); + for (p = node->cc->spans; p < node->cc->end; p += 2) + printf("%02X-%02X,", p[0], p[1]); + printf(")"); + break; + case P_REF: + printf("Ref(%d)", node->n); + break; + } } -static void dumpprog(Reprog *prog) -{ - Reinst *inst; - int i; - for (i = 0, inst = prog->start; inst < prog->end; ++i, ++inst) { - printf("% 5d: ", i); - switch (inst->opcode) { - case I_END: puts("end"); break; - case I_JUMP: printf("jump %d\n", (int)(inst->x - prog->start)); break; - case I_SPLIT: printf("split %d %d\n", (int)(inst->x - prog->start), (int)(inst->y - prog->start)); break; - case I_PLA: printf("pla %d %d\n", (int)(inst->x - prog->start), (int)(inst->y - prog->start)); break; - case I_NLA: printf("nla %d %d\n", (int)(inst->x - prog->start), (int)(inst->y - prog->start)); break; - case I_ANY: puts("any"); break; - case I_ANYNL: puts("anynl"); break; - case I_CHAR: printf(inst->c >= 32 && inst->c < 127 ? "char '%c'\n" : "char U+%04X\n", inst->c); break; - case I_CCLASS: puts("cclass"); break; - case I_NCCLASS: puts("ncclass"); break; - case I_REF: printf("ref %d\n", inst->n); break; - case I_BOL: puts("bol"); break; - case I_EOL: puts("eol"); break; - case I_WORD: puts("word"); break; - case I_NWORD: puts("nword"); break; - case I_LPAR: printf("lpar %d\n", inst->n); break; - case I_RPAR: printf("rpar %d\n", inst->n); break; - } - } +static void dumpprog(Reprog *prog) { + Reinst *inst; + int i; + for (i = 0, inst = prog->start; inst < prog->end; ++i, ++inst) { + printf("% 5d: ", i); + switch (inst->opcode) { + case I_END: + puts("end"); + break; + case I_JUMP: + printf("jump %d\n", (int)(inst->x - prog->start)); + break; + case I_SPLIT: + printf("split %d %d\n", (int)(inst->x - prog->start), + (int)(inst->y - prog->start)); + break; + case I_PLA: + printf("pla %d %d\n", (int)(inst->x - prog->start), + (int)(inst->y - prog->start)); + break; + case I_NLA: + printf("nla %d %d\n", (int)(inst->x - prog->start), + (int)(inst->y - prog->start)); + break; + case I_ANY: + puts("any"); + break; + case I_ANYNL: + puts("anynl"); + break; + case I_CHAR: + printf(inst->c >= 32 && inst->c < 127 ? "char '%c'\n" + : "char U+%04X\n", + inst->c); + break; + case I_CCLASS: + puts("cclass"); + break; + case I_NCCLASS: + puts("ncclass"); + break; + case I_REF: + printf("ref %d\n", inst->n); + break; + case I_BOL: + puts("bol"); + break; + case I_EOL: + puts("eol"); + break; + case I_WORD: + puts("word"); + break; + case I_NWORD: + puts("nword"); + break; + case I_LPAR: + printf("lpar %d\n", inst->n); + break; + case I_RPAR: + printf("rpar %d\n", inst->n); + break; + } + } } #endif -Reprog *re_regcomp(const char *pattern, int cflags, const char **errorp) -{ +Reprog *re_regcomp(const char *pattern, int cflags, const char **errorp) { Reprog *prog; Restate *g; - Renode *node; - Reinst *split, *jump; - int i; + Renode *node; + Reinst *split, *jump; + int i; unsigned int ncount; size_t pattern_len = strlen(pattern); @@ -852,333 +1012,336 @@ Reprog *re_regcomp(const char *pattern, int cflags, const char **errorp) return NULL; } - prog = rd_calloc(1, sizeof (Reprog)); - g = &prog->g; - g->prog = prog; - g->pstart = g->pend = rd_malloc(sizeof (Renode) * pattern_len * 2); - - if (setjmp(g->kaboom)) { - if (errorp) *errorp = g->error; - rd_free(g->pstart); - rd_free(prog); - return NULL; - } - - g->source = pattern; - g->ncclass = 0; - g->nsub = 1; - for (i = 0; i < MAXSUB; ++i) - g->sub[i] = 0; - - g->prog->flags = cflags; - - next(g); - node = parsealt(g); - if (g->lookahead == ')') - die(g, "unmatched ')'"); - if (g->lookahead != 0) - die(g, "syntax error"); - - g->prog->nsub = g->nsub; - ncount = count(node); + prog = rd_calloc(1, sizeof(Reprog)); + g = &prog->g; + g->prog = prog; + g->pstart = g->pend = rd_malloc(sizeof(Renode) * pattern_len * 2); + + if (setjmp(g->kaboom)) { + if (errorp) + *errorp = g->error; + rd_free(g->pstart); + rd_free(prog); + return NULL; + } + + g->source = pattern; + g->ncclass = 0; + g->nsub = 1; + for (i = 0; i < MAXSUB; ++i) + g->sub[i] = 0; + + g->prog->flags = cflags; + + next(g); + node = parsealt(g); + if (g->lookahead == ')') + die(g, "unmatched ')'"); + if (g->lookahead != 0) + die(g, "syntax error"); + + g->prog->nsub = g->nsub; + ncount = count(node); if (ncount > 10000) die(g, "regexp graph too large"); - g->prog->start = g->prog->end = rd_malloc((ncount + 6) * sizeof (Reinst)); - - split = emit(g->prog, I_SPLIT); - split->x = split + 3; - split->y = split + 1; - emit(g->prog, I_ANYNL); - jump = emit(g->prog, I_JUMP); - jump->x = split; - emit(g->prog, I_LPAR); - compile(g->prog, node); - emit(g->prog, I_RPAR); - emit(g->prog, I_END); + g->prog->start = g->prog->end = + rd_malloc((ncount + 6) * sizeof(Reinst)); + + split = emit(g->prog, I_SPLIT); + split->x = split + 3; + split->y = split + 1; + emit(g->prog, I_ANYNL); + jump = emit(g->prog, I_JUMP); + jump->x = split; + emit(g->prog, I_LPAR); + compile(g->prog, node); + emit(g->prog, I_RPAR); + emit(g->prog, I_END); #ifdef TEST - dumpnode(node); - putchar('\n'); - dumpprog(g->prog); + dumpnode(node); + putchar('\n'); + dumpprog(g->prog); #endif - rd_free(g->pstart); + rd_free(g->pstart); - if (errorp) *errorp = NULL; - return g->prog; + if (errorp) + *errorp = NULL; + return g->prog; } -void re_regfree(Reprog *prog) -{ - if (prog) { - rd_free(prog->start); - rd_free(prog); - } +void re_regfree(Reprog *prog) { + if (prog) { + rd_free(prog->start); + rd_free(prog); + } } /* Match */ -static int isnewline(int c) -{ - return c == 0xA || c == 0xD || c == 0x2028 || c == 0x2029; +static int isnewline(int c) { + return c == 0xA || c == 0xD || c == 0x2028 || c == 0x2029; } -static int iswordchar(int c) -{ - return c == '_' || - (c >= 'a' && c <= 'z') || - (c >= 'A' && c <= 'Z') || - (c >= '0' && c <= '9'); +static int iswordchar(int c) { + return c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9'); } -static int incclass(Reclass *cc, Rune c) -{ - Rune *p; - for (p = cc->spans; p < cc->end; p += 2) - if (p[0] <= c && c <= p[1]) - return 1; - return 0; +static int incclass(Reclass *cc, Rune c) { + Rune *p; + for (p = cc->spans; p < cc->end; p += 2) + if (p[0] <= c && c <= p[1]) + return 1; + return 0; } -static int incclasscanon(Reclass *cc, Rune c) -{ - Rune *p, r; - for (p = cc->spans; p < cc->end; p += 2) - for (r = p[0]; r <= p[1]; ++r) - if (c == canon(r)) - return 1; - return 0; +static int incclasscanon(Reclass *cc, Rune c) { + Rune *p, r; + for (p = cc->spans; p < cc->end; p += 2) + for (r = p[0]; r <= p[1]; ++r) + if (c == canon(r)) + return 1; + return 0; } -static int strncmpcanon(const char *a, const char *b, unsigned int n) -{ - Rune ra, rb; - int c; - while (n--) { - if (!*a) return -1; - if (!*b) return 1; - a += chartorune(&ra, a); - b += chartorune(&rb, b); - c = canon(ra) - canon(rb); - if (c) - return c; - } - return 0; +static int strncmpcanon(const char *a, const char *b, unsigned int n) { + Rune ra, rb; + int c; + while (n--) { + if (!*a) + return -1; + if (!*b) + return 1; + a += chartorune(&ra, a); + b += chartorune(&rb, b); + c = canon(ra) - canon(rb); + if (c) + return c; + } + return 0; } struct Rethread { - Reinst *pc; - const char *sp; - Resub sub; + Reinst *pc; + const char *sp; + Resub sub; }; -static void spawn(Rethread *t, Reinst *pc, const char *sp, Resub *sub) -{ - t->pc = pc; - t->sp = sp; - memcpy(&t->sub, sub, sizeof t->sub); +static void spawn(Rethread *t, Reinst *pc, const char *sp, Resub *sub) { + t->pc = pc; + t->sp = sp; + memcpy(&t->sub, sub, sizeof t->sub); } -static int match(Reinst *pc, const char *sp, const char *bol, int flags, Resub *out) -{ - Rethread ready[MAXTHREAD]; - Resub scratch; - Resub sub; - Rune c; - unsigned int nready; - int i; - - /* queue initial thread */ - spawn(ready + 0, pc, sp, out); - nready = 1; - - /* run threads in stack order */ - while (nready > 0) { - --nready; - pc = ready[nready].pc; - sp = ready[nready].sp; - memcpy(&sub, &ready[nready].sub, sizeof sub); - for (;;) { - switch (pc->opcode) { - case I_END: - for (i = 0; i < MAXSUB; ++i) { - out->sub[i].sp = sub.sub[i].sp; - out->sub[i].ep = sub.sub[i].ep; - } - return 1; - case I_JUMP: - pc = pc->x; - continue; - case I_SPLIT: - if (nready >= MAXTHREAD) { - fprintf(stderr, "regexec: backtrack overflow!\n"); - return 0; - } - spawn(&ready[nready++], pc->y, sp, &sub); - pc = pc->x; - continue; - - case I_PLA: - if (!match(pc->x, sp, bol, flags, &sub)) - goto dead; - pc = pc->y; - continue; - case I_NLA: - memcpy(&scratch, &sub, sizeof scratch); - if (match(pc->x, sp, bol, flags, &scratch)) - goto dead; - pc = pc->y; - continue; - - case I_ANYNL: - sp += chartorune(&c, sp); - if (c == 0) - goto dead; - break; - case I_ANY: - sp += chartorune(&c, sp); - if (c == 0) - goto dead; - if (isnewline(c)) - goto dead; - break; - case I_CHAR: - sp += chartorune(&c, sp); - if (c == 0) - goto dead; - if (flags & REG_ICASE) - c = canon(c); - if (c != pc->c) - goto dead; - break; - case I_CCLASS: - sp += chartorune(&c, sp); - if (c == 0) - goto dead; - if (flags & REG_ICASE) { - if (!incclasscanon(pc->cc, canon(c))) - goto dead; - } else { - if (!incclass(pc->cc, c)) - goto dead; - } - break; - case I_NCCLASS: - sp += chartorune(&c, sp); - if (c == 0) - goto dead; - if (flags & REG_ICASE) { - if (incclasscanon(pc->cc, canon(c))) - goto dead; - } else { - if (incclass(pc->cc, c)) - goto dead; - } - break; - case I_REF: - i = (int)(sub.sub[pc->n].ep - sub.sub[pc->n].sp); - if (flags & REG_ICASE) { - if (strncmpcanon(sp, sub.sub[pc->n].sp, i)) - goto dead; - } else { - if (strncmp(sp, sub.sub[pc->n].sp, i)) - goto dead; - } - if (i > 0) - sp += i; - break; - - case I_BOL: - if (sp == bol && !(flags & REG_NOTBOL)) - break; - if (flags & REG_NEWLINE) - if (sp > bol && isnewline(sp[-1])) - break; - goto dead; - case I_EOL: - if (*sp == 0) - break; - if (flags & REG_NEWLINE) - if (isnewline(*sp)) - break; - goto dead; - case I_WORD: - i = sp > bol && iswordchar(sp[-1]); - i ^= iswordchar(sp[0]); - if (i) - break; - goto dead; - case I_NWORD: - i = sp > bol && iswordchar(sp[-1]); - i ^= iswordchar(sp[0]); - if (!i) - break; - goto dead; - - case I_LPAR: - sub.sub[pc->n].sp = sp; - break; - case I_RPAR: - sub.sub[pc->n].ep = sp; - break; - default: - goto dead; - } - pc = pc + 1; - } -dead: ; - } - return 0; +static int +match(Reinst *pc, const char *sp, const char *bol, int flags, Resub *out) { + Rethread ready[MAXTHREAD]; + Resub scratch; + Resub sub; + Rune c; + unsigned int nready; + int i; + + /* queue initial thread */ + spawn(ready + 0, pc, sp, out); + nready = 1; + + /* run threads in stack order */ + while (nready > 0) { + --nready; + pc = ready[nready].pc; + sp = ready[nready].sp; + memcpy(&sub, &ready[nready].sub, sizeof sub); + for (;;) { + switch (pc->opcode) { + case I_END: + for (i = 0; i < MAXSUB; ++i) { + out->sub[i].sp = sub.sub[i].sp; + out->sub[i].ep = sub.sub[i].ep; + } + return 1; + case I_JUMP: + pc = pc->x; + continue; + case I_SPLIT: + if (nready >= MAXTHREAD) { + fprintf( + stderr, + "regexec: backtrack overflow!\n"); + return 0; + } + spawn(&ready[nready++], pc->y, sp, &sub); + pc = pc->x; + continue; + + case I_PLA: + if (!match(pc->x, sp, bol, flags, &sub)) + goto dead; + pc = pc->y; + continue; + case I_NLA: + memcpy(&scratch, &sub, sizeof scratch); + if (match(pc->x, sp, bol, flags, &scratch)) + goto dead; + pc = pc->y; + continue; + + case I_ANYNL: + sp += chartorune(&c, sp); + if (c == 0) + goto dead; + break; + case I_ANY: + sp += chartorune(&c, sp); + if (c == 0) + goto dead; + if (isnewline(c)) + goto dead; + break; + case I_CHAR: + sp += chartorune(&c, sp); + if (c == 0) + goto dead; + if (flags & REG_ICASE) + c = canon(c); + if (c != pc->c) + goto dead; + break; + case I_CCLASS: + sp += chartorune(&c, sp); + if (c == 0) + goto dead; + if (flags & REG_ICASE) { + if (!incclasscanon(pc->cc, canon(c))) + goto dead; + } else { + if (!incclass(pc->cc, c)) + goto dead; + } + break; + case I_NCCLASS: + sp += chartorune(&c, sp); + if (c == 0) + goto dead; + if (flags & REG_ICASE) { + if (incclasscanon(pc->cc, canon(c))) + goto dead; + } else { + if (incclass(pc->cc, c)) + goto dead; + } + break; + case I_REF: + i = (int)(sub.sub[pc->n].ep - + sub.sub[pc->n].sp); + if (flags & REG_ICASE) { + if (strncmpcanon(sp, sub.sub[pc->n].sp, + i)) + goto dead; + } else { + if (strncmp(sp, sub.sub[pc->n].sp, i)) + goto dead; + } + if (i > 0) + sp += i; + break; + + case I_BOL: + if (sp == bol && !(flags & REG_NOTBOL)) + break; + if (flags & REG_NEWLINE) + if (sp > bol && isnewline(sp[-1])) + break; + goto dead; + case I_EOL: + if (*sp == 0) + break; + if (flags & REG_NEWLINE) + if (isnewline(*sp)) + break; + goto dead; + case I_WORD: + i = sp > bol && iswordchar(sp[-1]); + i ^= iswordchar(sp[0]); + if (i) + break; + goto dead; + case I_NWORD: + i = sp > bol && iswordchar(sp[-1]); + i ^= iswordchar(sp[0]); + if (!i) + break; + goto dead; + + case I_LPAR: + sub.sub[pc->n].sp = sp; + break; + case I_RPAR: + sub.sub[pc->n].ep = sp; + break; + default: + goto dead; + } + pc = pc + 1; + } + dead:; + } + return 0; } -int re_regexec(Reprog *prog, const char *sp, Resub *sub, int eflags) -{ - Resub scratch; - int i; +int re_regexec(Reprog *prog, const char *sp, Resub *sub, int eflags) { + Resub scratch; + int i; - if (!sub) - sub = &scratch; + if (!sub) + sub = &scratch; - sub->nsub = prog->nsub; - for (i = 0; i < MAXSUB; ++i) - sub->sub[i].sp = sub->sub[i].ep = NULL; + sub->nsub = prog->nsub; + for (i = 0; i < MAXSUB; ++i) + sub->sub[i].sp = sub->sub[i].ep = NULL; - return !match(prog->start, sp, sp, prog->flags | eflags, sub); + return !match(prog->start, sp, sp, prog->flags | eflags, sub); } #ifdef TEST -int main(int argc, char **argv) -{ - const char *error; - const char *s; - Reprog *p; - Resub m; - unsigned int i; - - if (argc > 1) { - p = regcomp(argv[1], 0, &error); - if (!p) { - fprintf(stderr, "regcomp: %s\n", error); - return 1; - } - - if (argc > 2) { - s = argv[2]; - printf("nsub = %d\n", p->nsub); - if (!regexec(p, s, &m, 0)) { - for (i = 0; i < m.nsub; ++i) { - int n = m.sub[i].ep - m.sub[i].sp; - if (n > 0) - printf("match %d: s=%d e=%d n=%d '%.*s'\n", i, (int)(m.sub[i].sp - s), (int)(m.sub[i].ep - s), n, n, m.sub[i].sp); - else - printf("match %d: n=0 ''\n", i); - } - } else { - printf("no match\n"); - } - } - } - - return 0; +int main(int argc, char **argv) { + const char *error; + const char *s; + Reprog *p; + Resub m; + unsigned int i; + + if (argc > 1) { + p = regcomp(argv[1], 0, &error); + if (!p) { + fprintf(stderr, "regcomp: %s\n", error); + return 1; + } + + if (argc > 2) { + s = argv[2]; + printf("nsub = %d\n", p->nsub); + if (!regexec(p, s, &m, 0)) { + for (i = 0; i < m.nsub; ++i) { + int n = m.sub[i].ep - m.sub[i].sp; + if (n > 0) + printf( + "match %d: s=%d e=%d n=%d " + "'%.*s'\n", + i, (int)(m.sub[i].sp - s), + (int)(m.sub[i].ep - s), n, + n, m.sub[i].sp); + else + printf("match %d: n=0 ''\n", i); + } + } else { + printf("no match\n"); + } + } + } + + return 0; } #endif diff --git a/src/tinycthread_extra.c b/src/tinycthread_extra.c index d48de04bc7..58049448ce 100644 --- a/src/tinycthread_extra.c +++ b/src/tinycthread_extra.c @@ -36,7 +36,7 @@ #include "tinycthread.h" -int thrd_setname (const char *name) { +int thrd_setname(const char *name) { #if HAVE_PTHREAD_SETNAME_GNU if (!pthread_setname_np(pthread_self(), name)) return thrd_success; @@ -60,14 +60,14 @@ int thrd_is_current(thrd_t thr) { #ifdef _WIN32 -void cnd_wait_enter (cnd_t *cond) { +void cnd_wait_enter(cnd_t *cond) { /* Increment number of waiters */ EnterCriticalSection(&cond->mWaitersCountLock); ++cond->mWaitersCount; LeaveCriticalSection(&cond->mWaitersCountLock); } -void cnd_wait_exit (cnd_t *cond) { +void cnd_wait_exit(cnd_t *cond) { /* Increment number of waiters */ EnterCriticalSection(&cond->mWaitersCountLock); --cond->mWaitersCount; @@ -77,7 +77,6 @@ void cnd_wait_exit (cnd_t *cond) { - int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms) { if (timeout_ms == -1 /* INFINITE*/) return cnd_wait(cnd, mtx); @@ -88,10 +87,10 @@ int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms) { struct timespec ts; gettimeofday(&tv, NULL); - ts.tv_sec = tv.tv_sec; + ts.tv_sec = tv.tv_sec; ts.tv_nsec = tv.tv_usec * 1000; - ts.tv_sec += timeout_ms / 1000; + ts.tv_sec += timeout_ms / 1000; ts.tv_nsec += (timeout_ms % 1000) * 1000000; if (ts.tv_nsec >= 1000000000) { @@ -103,18 +102,18 @@ int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms) { #endif } -int cnd_timedwait_msp (cnd_t *cnd, mtx_t *mtx, int *timeout_msp) { +int cnd_timedwait_msp(cnd_t *cnd, mtx_t *mtx, int *timeout_msp) { rd_ts_t pre = rd_clock(); int r; r = cnd_timedwait_ms(cnd, mtx, *timeout_msp); if (r != thrd_timedout) { /* Subtract spent time */ - (*timeout_msp) -= (int)(rd_clock()-pre) / 1000; + (*timeout_msp) -= (int)(rd_clock() - pre) / 1000; } return r; } -int cnd_timedwait_abs (cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec) { +int cnd_timedwait_abs(cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec) { if (tspec->tv_sec == RD_POLL_INFINITE) return cnd_wait(cnd, mtx); else if (tspec->tv_sec == RD_POLL_NOWAIT) @@ -129,7 +128,7 @@ int cnd_timedwait_abs (cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec) { * @{ */ #ifndef _WIN32 -int rwlock_init (rwlock_t *rwl) { +int rwlock_init(rwlock_t *rwl) { int r = pthread_rwlock_init(rwl, NULL); if (r) { errno = r; @@ -138,7 +137,7 @@ int rwlock_init (rwlock_t *rwl) { return thrd_success; } -int rwlock_destroy (rwlock_t *rwl) { +int rwlock_destroy(rwlock_t *rwl) { int r = pthread_rwlock_destroy(rwl); if (r) { errno = r; @@ -147,25 +146,25 @@ int rwlock_destroy (rwlock_t *rwl) { return thrd_success; } -int rwlock_rdlock (rwlock_t *rwl) { +int rwlock_rdlock(rwlock_t *rwl) { int r = pthread_rwlock_rdlock(rwl); assert(r == 0); return thrd_success; } -int rwlock_wrlock (rwlock_t *rwl) { +int rwlock_wrlock(rwlock_t *rwl) { int r = pthread_rwlock_wrlock(rwl); assert(r == 0); return thrd_success; } -int rwlock_rdunlock (rwlock_t *rwl) { +int rwlock_rdunlock(rwlock_t *rwl) { int r = pthread_rwlock_unlock(rwl); assert(r == 0); return thrd_success; } -int rwlock_wrunlock (rwlock_t *rwl) { +int rwlock_wrunlock(rwlock_t *rwl) { int r = pthread_rwlock_unlock(rwl); assert(r == 0); return thrd_success; diff --git a/src/tinycthread_extra.h b/src/tinycthread_extra.h index fc08a5bb8d..e5f6731739 100644 --- a/src/tinycthread_extra.h +++ b/src/tinycthread_extra.h @@ -45,7 +45,7 @@ * @brief Set thread system name if platform supports it (pthreads) * @return thrd_success or thrd_error */ -int thrd_setname (const char *name); +int thrd_setname(const char *name); /** * @brief Checks if passed thread is the current thread. @@ -63,12 +63,12 @@ int thrd_is_current(thrd_t thr); * * @sa cnd_wait_exit() */ -void cnd_wait_enter (cnd_t *cond); +void cnd_wait_enter(cnd_t *cond); /** * @brief Mark the current thread as no longer waiting on cnd. */ -void cnd_wait_exit (cnd_t *cond); +void cnd_wait_exit(cnd_t *cond); #endif @@ -79,8 +79,8 @@ int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms); /** * @brief Same as cnd_timedwait_ms() but updates the remaining time. -*/ -int cnd_timedwait_msp (cnd_t *cnd, mtx_t *mtx, int *timeout_msp); + */ +int cnd_timedwait_msp(cnd_t *cnd, mtx_t *mtx, int *timeout_msp); /** * @brief Same as cnd_timedwait() but honours @@ -89,8 +89,7 @@ int cnd_timedwait_msp (cnd_t *cnd, mtx_t *mtx, int *timeout_msp); * * @remark Set up \p tspec with rd_timeout_init_timespec(). */ -int cnd_timedwait_abs (cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec); - +int cnd_timedwait_abs(cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec); @@ -100,32 +99,108 @@ int cnd_timedwait_abs (cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec); #if defined(_TTHREAD_WIN32_) typedef struct rwlock_t { - SRWLOCK lock; - LONG rcnt; - LONG wcnt; + SRWLOCK lock; + LONG rcnt; + LONG wcnt; } rwlock_t; -#define rwlock_init(rwl) do { (rwl)->rcnt = (rwl)->wcnt = 0; InitializeSRWLock(&(rwl)->lock); } while (0) +#define rwlock_init(rwl) \ + do { \ + (rwl)->rcnt = (rwl)->wcnt = 0; \ + InitializeSRWLock(&(rwl)->lock); \ + } while (0) #define rwlock_destroy(rwl) -#define rwlock_rdlock(rwl) do { if (0) printf("Thr %i: at %i: RDLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); AcquireSRWLockShared(&(rwl)->lock); InterlockedIncrement(&(rwl)->rcnt); } while (0) -#define rwlock_wrlock(rwl) do { if (0) printf("Thr %i: at %i: WRLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); AcquireSRWLockExclusive(&(rwl)->lock); InterlockedIncrement(&(rwl)->wcnt); } while (0) -#define rwlock_rdunlock(rwl) do { if (0) printf("Thr %i: at %i: RDUNLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); ReleaseSRWLockShared(&(rwl)->lock); InterlockedDecrement(&(rwl)->rcnt); } while (0) -#define rwlock_wrunlock(rwl) do { if (0) printf("Thr %i: at %i: RWUNLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); ReleaseSRWLockExclusive(&(rwl)->lock); InterlockedDecrement(&(rwl)->wcnt); } while (0) - -#define rwlock_rdlock_d(rwl) do { if (1) printf("Thr %i: at %i: RDLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); AcquireSRWLockShared(&(rwl)->lock); InterlockedIncrement(&(rwl)->rcnt); } while (0) -#define rwlock_wrlock_d(rwl) do { if (1) printf("Thr %i: at %i: WRLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); AcquireSRWLockExclusive(&(rwl)->lock); InterlockedIncrement(&(rwl)->wcnt); } while (0) -#define rwlock_rdunlock_d(rwl) do { if (1) printf("Thr %i: at %i: RDUNLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); ReleaseSRWLockShared(&(rwl)->lock); InterlockedDecrement(&(rwl)->rcnt); } while (0) -#define rwlock_wrunlock_d(rwl) do { if (1) printf("Thr %i: at %i: RWUNLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); ReleaseSRWLockExclusive(&(rwl)->lock); InterlockedDecrement(&(rwl)->wcnt); } while (0) +#define rwlock_rdlock(rwl) \ + do { \ + if (0) \ + printf("Thr %i: at %i: RDLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \ + AcquireSRWLockShared(&(rwl)->lock); \ + InterlockedIncrement(&(rwl)->rcnt); \ + } while (0) +#define rwlock_wrlock(rwl) \ + do { \ + if (0) \ + printf("Thr %i: at %i: WRLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \ + AcquireSRWLockExclusive(&(rwl)->lock); \ + InterlockedIncrement(&(rwl)->wcnt); \ + } while (0) +#define rwlock_rdunlock(rwl) \ + do { \ + if (0) \ + printf("Thr %i: at %i: RDUNLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); \ + ReleaseSRWLockShared(&(rwl)->lock); \ + InterlockedDecrement(&(rwl)->rcnt); \ + } while (0) +#define rwlock_wrunlock(rwl) \ + do { \ + if (0) \ + printf("Thr %i: at %i: RWUNLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); \ + ReleaseSRWLockExclusive(&(rwl)->lock); \ + InterlockedDecrement(&(rwl)->wcnt); \ + } while (0) + +#define rwlock_rdlock_d(rwl) \ + do { \ + if (1) \ + printf("Thr %i: at %i: RDLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \ + AcquireSRWLockShared(&(rwl)->lock); \ + InterlockedIncrement(&(rwl)->rcnt); \ + } while (0) +#define rwlock_wrlock_d(rwl) \ + do { \ + if (1) \ + printf("Thr %i: at %i: WRLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \ + AcquireSRWLockExclusive(&(rwl)->lock); \ + InterlockedIncrement(&(rwl)->wcnt); \ + } while (0) +#define rwlock_rdunlock_d(rwl) \ + do { \ + if (1) \ + printf("Thr %i: at %i: RDUNLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); \ + ReleaseSRWLockShared(&(rwl)->lock); \ + InterlockedDecrement(&(rwl)->rcnt); \ + } while (0) +#define rwlock_wrunlock_d(rwl) \ + do { \ + if (1) \ + printf("Thr %i: at %i: RWUNLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); \ + ReleaseSRWLockExclusive(&(rwl)->lock); \ + InterlockedDecrement(&(rwl)->wcnt); \ + } while (0) #else typedef pthread_rwlock_t rwlock_t; -int rwlock_init (rwlock_t *rwl); -int rwlock_destroy (rwlock_t *rwl); -int rwlock_rdlock (rwlock_t *rwl); -int rwlock_wrlock (rwlock_t *rwl); -int rwlock_rdunlock (rwlock_t *rwl); -int rwlock_wrunlock (rwlock_t *rwl); +int rwlock_init(rwlock_t *rwl); +int rwlock_destroy(rwlock_t *rwl); +int rwlock_rdlock(rwlock_t *rwl); +int rwlock_wrlock(rwlock_t *rwl); +int rwlock_rdunlock(rwlock_t *rwl); +int rwlock_wrunlock(rwlock_t *rwl); #endif diff --git a/src/win32_config.h b/src/win32_config.h index 79ec1943b2..36997cfbcb 100644 --- a/src/win32_config.h +++ b/src/win32_config.h @@ -1,30 +1,30 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2012-2015 Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2015 Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ /** * Hand-crafted config header file for Win32 builds. @@ -33,23 +33,25 @@ #define _RD_WIN32_CONFIG_H_ #ifndef WITHOUT_WIN32_CONFIG -#define WITH_SSL 1 -#define WITH_ZLIB 1 +#define WITH_SSL 1 +#define WITH_ZLIB 1 #define WITH_SNAPPY 1 -#define WITH_ZSTD 1 -#define WITH_CURL 1 +#define WITH_ZSTD 1 +#define WITH_CURL 1 /* zstd is linked dynamically on Windows, but the dynamic library provides * the experimental/advanced API, just as the static builds on *nix */ -#define WITH_ZSTD_STATIC 1 -#define WITH_SASL_SCRAM 1 +#define WITH_ZSTD_STATIC 1 +#define WITH_SASL_SCRAM 1 #define WITH_SASL_OAUTHBEARER 1 -#define ENABLE_DEVEL 0 -#define WITH_PLUGINS 1 -#define WITH_HDRHISTOGRAM 1 +#define ENABLE_DEVEL 0 +#define WITH_PLUGINS 1 +#define WITH_HDRHISTOGRAM 1 #endif #define SOLIB_EXT ".dll" /* Notice: Keep up to date */ -#define BUILT_WITH "SSL ZLIB SNAPPY ZSTD CURL SASL_SCRAM SASL_OAUTHBEARER PLUGINS HDRHISTOGRAM" +#define BUILT_WITH \ + "SSL ZLIB SNAPPY ZSTD CURL SASL_SCRAM SASL_OAUTHBEARER PLUGINS " \ + "HDRHISTOGRAM" #endif /* _RD_WIN32_CONFIG_H_ */ diff --git a/tests/0000-unittests.c b/tests/0000-unittests.c index 09b6e4397b..e0a02fb625 100644 --- a/tests/0000-unittests.c +++ b/tests/0000-unittests.c @@ -35,7 +35,7 @@ * build options, OpenSSL version, etc. * Useful for manually verifying build options in CI logs. */ -static void show_build_opts (void) { +static void show_build_opts(void) { rd_kafka_conf_t *conf = rd_kafka_conf_new(); rd_kafka_t *rk; char errstr[512]; @@ -46,8 +46,8 @@ static void show_build_opts (void) { test_conf_set(conf, "debug", "generic,security"); /* Try with SSL first, which may or may not be a build option. */ - if (rd_kafka_conf_set(conf, "security.protocol", "SSL", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) + if (rd_kafka_conf_set(conf, "security.protocol", "SSL", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_SAY("Failed to security.protocol=SSL: %s\n", errstr); rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); @@ -60,7 +60,7 @@ static void show_build_opts (void) { /** * @brief Call librdkafka built-in unit-tests */ -int main_0000_unittests (int argc, char **argv) { +int main_0000_unittests(int argc, char **argv) { int fails = 0; show_build_opts(); diff --git a/tests/0001-multiobj.c b/tests/0001-multiobj.c index d62c95b268..c2a4eb57af 100644 --- a/tests/0001-multiobj.c +++ b/tests/0001-multiobj.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,55 +35,56 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ -int main_0001_multiobj (int argc, char **argv) { - int partition = RD_KAFKA_PARTITION_UA; /* random */ - int i; - int NUM_ITER = test_quick ? 2 : 5; +int main_0001_multiobj(int argc, char **argv) { + int partition = RD_KAFKA_PARTITION_UA; /* random */ + int i; + int NUM_ITER = test_quick ? 2 : 5; const char *topic = NULL; - TEST_SAY("Creating and destroying %i kafka instances\n", NUM_ITER); + TEST_SAY("Creating and destroying %i kafka instances\n", NUM_ITER); - /* Create, use and destroy NUM_ITER kafka instances. */ - for (i = 0 ; i < NUM_ITER ; i++) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char msg[128]; + /* Create, use and destroy NUM_ITER kafka instances. */ + for (i = 0; i < NUM_ITER; i++) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char msg[128]; test_timing_t t_full, t_destroy; - test_conf_init(&conf, &topic_conf, 30); + test_conf_init(&conf, &topic_conf, 30); if (!topic) topic = test_mk_topic_name("0001", 0); TIMING_START(&t_full, "full create-produce-destroy cycle"); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL( + "Failed to create topic for " + "rdkafka instance #%i: %s\n", + i, rd_kafka_err2str(rd_kafka_last_error())); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic for " - "rdkafka instance #%i: %s\n", - i, rd_kafka_err2str(rd_kafka_last_error())); + rd_snprintf(msg, sizeof(msg), + "%s test message for iteration #%i", argv[0], i); - rd_snprintf(msg, sizeof(msg), "%s test message for iteration #%i", - argv[0], i); + /* Produce a message */ + rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, NULL); - /* Produce a message */ - rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, NULL); - - /* Wait for it to be sent (and possibly acked) */ - rd_kafka_flush(rk, -1); + /* Wait for it to be sent (and possibly acked) */ + rd_kafka_flush(rk, -1); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ + /* Destroy rdkafka instance */ TIMING_START(&t_destroy, "rd_kafka_destroy()"); - rd_kafka_destroy(rk); + rd_kafka_destroy(rk); TIMING_STOP(&t_destroy); TIMING_STOP(&t_full); @@ -91,7 +92,7 @@ int main_0001_multiobj (int argc, char **argv) { /* Topic is created on the first iteration. */ if (i > 0) TIMING_ASSERT(&t_full, 0, 999); - } + } - return 0; + return 0; } diff --git a/tests/0002-unkpart.c b/tests/0002-unkpart.c index cc8bd59e7f..087e37ae62 100644 --- a/tests/0002-unkpart.c +++ b/tests/0002-unkpart.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,7 +35,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgs_wait = 0; /* bitmask */ @@ -44,90 +44,101 @@ static int msgs_wait = 0; /* bitmask */ * Delivery report callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - int msgid = *(int *)msg_opaque; - - free(msg_opaque); - - if (!(msgs_wait & (1 << msgid))) - TEST_FAIL("Unwanted delivery report for message #%i " - "(waiting for 0x%x)\n", msgid, msgs_wait); - - TEST_SAY("Delivery report for message #%i: %s\n", - msgid, rd_kafka_err2str(err)); - - msgs_wait &= ~(1 << msgid); - - if (err != RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) - TEST_FAIL("Message #%i failed with unexpected error %s\n", - msgid, rd_kafka_err2str(err)); +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + int msgid = *(int *)msg_opaque; + + free(msg_opaque); + + if (!(msgs_wait & (1 << msgid))) + TEST_FAIL( + "Unwanted delivery report for message #%i " + "(waiting for 0x%x)\n", + msgid, msgs_wait); + + TEST_SAY("Delivery report for message #%i: %s\n", msgid, + rd_kafka_err2str(err)); + + msgs_wait &= ~(1 << msgid); + + if (err != RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + TEST_FAIL("Message #%i failed with unexpected error %s\n", + msgid, rd_kafka_err2str(err)); } -static void do_test_unkpart (void) { - int partition = 99; /* non-existent */ - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char msg[128]; - int msgcnt = 10; - int i; - int fails = 0; +static void do_test_unkpart(void) { + int partition = 99; /* non-existent */ + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char msg[128]; + int msgcnt = 10; + int i; + int fails = 0; const struct rd_kafka_metadata *metadata; TEST_SAY(_C_BLU "%s\n" _C_CLR, __FUNCTION__); test_conf_init(&conf, &topic_conf, 10); - /* Set delivery report callback */ - rd_kafka_conf_set_dr_cb(conf, dr_cb); + /* Set delivery report callback */ + rd_kafka_conf_set_dr_cb(conf, dr_cb); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0002", 0), - topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0002", 0), topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_last_error())); /* Request metadata so that we know the cluster is up before producing * messages, otherwise erroneous partitions will not fail immediately.*/ if ((r = rd_kafka_metadata(rk, 0, rkt, &metadata, - tmout_multip(15000))) != + tmout_multip(15000))) != RD_KAFKA_RESP_ERR_NO_ERROR) TEST_FAIL("Failed to acquire metadata: %s\n", rd_kafka_err2str(r)); rd_kafka_metadata_destroy(metadata); - /* Produce a message */ - for (i = 0 ; i < msgcnt ; i++) { - int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; + /* Produce a message */ + for (i = 0; i < msgcnt; i++) { + int *msgidp = malloc(sizeof(*msgidp)); + *msgidp = i; rd_snprintf(msg, sizeof(msg), "%s test message #%i", __FUNCTION__, i); - r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, msgidp); + r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, msgidp); if (r == -1) { - if (rd_kafka_last_error() == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) - TEST_SAY("Failed to produce message #%i: " - "unknown partition: good!\n", i); - else - TEST_FAIL("Failed to produce message #%i: %s\n", - i, rd_kafka_err2str(rd_kafka_last_error())); + if (rd_kafka_last_error() == + RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + TEST_SAY( + "Failed to produce message #%i: " + "unknown partition: good!\n", + i); + else + TEST_FAIL( + "Failed to produce message #%i: %s\n", i, + rd_kafka_err2str(rd_kafka_last_error())); free(msgidp); - } else { - if (i > 5) { - fails++; - TEST_SAY("Message #%i produced: " - "should've failed\n", i); - } - msgs_wait |= (1 << i); - } + } else { + if (i > 5) { + fails++; + TEST_SAY( + "Message #%i produced: " + "should've failed\n", + i); + } + msgs_wait |= (1 << i); + } /* After half the messages: forcibly refresh metadata * to update the actual partition count: @@ -135,27 +146,27 @@ static void do_test_unkpart (void) { */ if (i == 5) { r = test_get_partition_count( - rk, rd_kafka_topic_name(rkt), 15000); + rk, rd_kafka_topic_name(rkt), 15000); TEST_ASSERT(r != -1, "failed to get partition count"); } } - /* Wait for messages to time out */ - rd_kafka_flush(rk, -1); + /* Wait for messages to time out */ + rd_kafka_flush(rk, -1); - if (msgs_wait != 0) - TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); + if (msgs_wait != 0) + TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); - if (fails > 0) - TEST_FAIL("See previous error(s)\n"); + if (fails > 0) + TEST_FAIL("See previous error(s)\n"); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); TEST_SAY(_C_GRN "%s PASSED\n" _C_CLR, __FUNCTION__); } @@ -170,7 +181,7 @@ static void do_test_unkpart (void) { * This test is a copy of confluent-kafka-python's * test_Producer.test_basic_api() test that surfaced this issue. */ -static void do_test_unkpart_timeout_nobroker (void) { +static void do_test_unkpart_timeout_nobroker(void) { const char *topic = test_mk_topic_name("0002_unkpart_tmout", 0); rd_kafka_conf_t *conf; rd_kafka_t *rk; @@ -188,22 +199,20 @@ static void do_test_unkpart_timeout_nobroker (void) { rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, topic, NULL); - err = rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, - RD_KAFKA_MSG_F_COPY, NULL, 0, NULL, 0, - &remains); + err = rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY, + NULL, 0, NULL, 0, &remains); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); remains++; - err = rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, - RD_KAFKA_MSG_F_COPY, "hi", 2, "hello", 5, - &remains); + err = rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY, + "hi", 2, "hello", 5, &remains); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); remains++; - err = rd_kafka_produce(rkt, 9/* explicit, but unknown, partition */, + err = rd_kafka_produce(rkt, 9 /* explicit, but unknown, partition */, RD_KAFKA_MSG_F_COPY, "three", 5, NULL, 0, &remains); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); @@ -218,8 +227,8 @@ static void do_test_unkpart_timeout_nobroker (void) { "expected no more messages in queue, got %d", rd_kafka_outq_len(rk)); - TEST_ASSERT(remains == 0, - "expected no messages remaining, got %d", remains); + TEST_ASSERT(remains == 0, "expected no messages remaining, got %d", + remains); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); @@ -228,7 +237,7 @@ static void do_test_unkpart_timeout_nobroker (void) { } -int main_0002_unkpart (int argc, char **argv) { +int main_0002_unkpart(int argc, char **argv) { do_test_unkpart(); do_test_unkpart_timeout_nobroker(); return 0; diff --git a/tests/0003-msgmaxsize.c b/tests/0003-msgmaxsize.c index 037fc5e2e5..97b5111258 100644 --- a/tests/0003-msgmaxsize.c +++ b/tests/0003-msgmaxsize.c @@ -35,7 +35,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgs_wait = 0; /* bitmask */ @@ -44,8 +44,12 @@ static int msgs_wait = 0; /* bitmask */ * Delivery report callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { int msgid = *(int *)msg_opaque; free(msg_opaque); @@ -55,17 +59,19 @@ static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, msgid, rd_kafka_err2str(err)); if (!(msgs_wait & (1 << msgid))) - TEST_FAIL("Unwanted delivery report for message #%i " - "(waiting for 0x%x)\n", msgid, msgs_wait); + TEST_FAIL( + "Unwanted delivery report for message #%i " + "(waiting for 0x%x)\n", + msgid, msgs_wait); - TEST_SAY("Delivery report for message #%i: %s\n", - msgid, rd_kafka_err2str(err)); + TEST_SAY("Delivery report for message #%i: %s\n", msgid, + rd_kafka_err2str(err)); msgs_wait &= ~(1 << msgid); } -int main_0003_msgmaxsize (int argc, char **argv) { +int main_0003_msgmaxsize(int argc, char **argv) { int partition = 0; int r; rd_kafka_t *rk; @@ -78,24 +84,22 @@ int main_0003_msgmaxsize (int argc, char **argv) { ssize_t keylen; ssize_t len; rd_kafka_resp_err_t exp_err; - } sizes[] = { - /* message.max.bytes is including framing */ - { -1, 5000, RD_KAFKA_RESP_ERR_NO_ERROR }, - { 0, 99900, RD_KAFKA_RESP_ERR_NO_ERROR }, - { 0, 100000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE }, - { 100000, 0, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE }, - { 1000, 100000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE }, - { 0, 101000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE }, - { 99000, -1, RD_KAFKA_RESP_ERR_NO_ERROR }, - { -1, -1, RD_KAFKA_RESP_ERR__END } - }; + } sizes[] = {/* message.max.bytes is including framing */ + {-1, 5000, RD_KAFKA_RESP_ERR_NO_ERROR}, + {0, 99900, RD_KAFKA_RESP_ERR_NO_ERROR}, + {0, 100000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE}, + {100000, 0, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE}, + {1000, 100000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE}, + {0, 101000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE}, + {99000, -1, RD_KAFKA_RESP_ERR_NO_ERROR}, + {-1, -1, RD_KAFKA_RESP_ERR__END}}; int i; test_conf_init(&conf, &topic_conf, 10); /* Set a small maximum message size. */ - if (rd_kafka_conf_set(conf, "message.max.bytes", "100000", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) + if (rd_kafka_conf_set(conf, "message.max.bytes", "100000", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); /* Set delivery report callback */ @@ -104,47 +108,40 @@ int main_0003_msgmaxsize (int argc, char **argv) { /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0003", 0), - topic_conf); + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0003", 0), topic_conf); if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); - for (i = 0 ; sizes[i].exp_err != RD_KAFKA_RESP_ERR__END ; i++) { - void *value = sizes[i].len != -1 ? - calloc(1, sizes[i].len) : NULL; + for (i = 0; sizes[i].exp_err != RD_KAFKA_RESP_ERR__END; i++) { + void *value = + sizes[i].len != -1 ? calloc(1, sizes[i].len) : NULL; size_t len = sizes[i].len != -1 ? sizes[i].len : 0; - void *key = sizes[i].keylen != -1 ? - calloc(1, sizes[i].keylen) : NULL; + void *key = + sizes[i].keylen != -1 ? calloc(1, sizes[i].keylen) : NULL; size_t keylen = sizes[i].keylen != -1 ? sizes[i].keylen : 0; - int *msgidp = malloc(sizeof(*msgidp)); + int *msgidp = malloc(sizeof(*msgidp)); rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; *msgidp = i; - r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - value, len, - key, keylen, - msgidp); + r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, value, + len, key, keylen, msgidp); if (r == -1) err = rd_kafka_last_error(); if (err != sizes[i].exp_err) { - TEST_FAIL("Msg #%d produce(len=%"PRIdsz - ", keylen=%"PRIdsz"): got %s, expected %s", - i, - sizes[i].len, - sizes[i].keylen, + TEST_FAIL("Msg #%d produce(len=%" PRIdsz + ", keylen=%" PRIdsz "): got %s, expected %s", + i, sizes[i].len, sizes[i].keylen, rd_kafka_err2name(err), rd_kafka_err2name(sizes[i].exp_err)); } else { - TEST_SAY("Msg #%d produce() returned expected %s " - "for value size %"PRIdsz - " and key size %"PRIdsz"\n", - i, - rd_kafka_err2name(err), - sizes[i].len, - sizes[i].keylen); + TEST_SAY( + "Msg #%d produce() returned expected %s " + "for value size %" PRIdsz " and key size %" PRIdsz + "\n", + i, rd_kafka_err2name(err), sizes[i].len, + sizes[i].keylen); if (!sizes[i].exp_err) msgs_wait |= (1 << i); diff --git a/tests/0004-conf.c b/tests/0004-conf.c index 4cd7ed4dda..52f6a0204d 100644 --- a/tests/0004-conf.c +++ b/tests/0004-conf.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,78 +35,84 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { } -static void error_cb (rd_kafka_t *rk, int err, const char *reason, - void *opaque) { - +static void +error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) { } -static int32_t partitioner (const rd_kafka_topic_t *rkt, - const void *keydata, - size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { - return 0; +static int32_t partitioner(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { + return 0; } -static void conf_verify (int line, - const char **arr, size_t cnt, const char **confs) { - int i, j; - - - for (i = 0 ; confs[i] ; i += 2) { - for (j = 0 ; j < (int)cnt ; j += 2) { - if (!strcmp(confs[i], arr[j])) { - if (strcmp(confs[i+1], arr[j+1])) - TEST_FAIL("%i: Property %s mismatch: " - "expected %s != retrieved %s", - line, - confs[i], - confs[i+1], arr[j+1]); - } - if (j == (int)cnt) - TEST_FAIL("%i: " - "Property %s not found in config\n", - line, - confs[i]); - } - } +static void +conf_verify(int line, const char **arr, size_t cnt, const char **confs) { + int i, j; + + + for (i = 0; confs[i]; i += 2) { + for (j = 0; j < (int)cnt; j += 2) { + if (!strcmp(confs[i], arr[j])) { + if (strcmp(confs[i + 1], arr[j + 1])) + TEST_FAIL( + "%i: Property %s mismatch: " + "expected %s != retrieved %s", + line, confs[i], confs[i + 1], + arr[j + 1]); + } + if (j == (int)cnt) + TEST_FAIL( + "%i: " + "Property %s not found in config\n", + line, confs[i]); + } + } } -static void conf_cmp (const char *desc, - const char **a, size_t acnt, - const char **b, size_t bcnt) { - int i; +static void conf_cmp(const char *desc, + const char **a, + size_t acnt, + const char **b, + size_t bcnt) { + int i; - if (acnt != bcnt) - TEST_FAIL("%s config compare: count %"PRIusz" != %"PRIusz" mismatch", - desc, acnt, bcnt); + if (acnt != bcnt) + TEST_FAIL("%s config compare: count %" PRIusz " != %" PRIusz + " mismatch", + desc, acnt, bcnt); - for (i = 0 ; i < (int)acnt ; i += 2) { - if (strcmp(a[i], b[i])) - TEST_FAIL("%s conf mismatch: %s != %s", - desc, a[i], b[i]); - else if (strcmp(a[i+1], b[i+1])) { + for (i = 0; i < (int)acnt; i += 2) { + if (strcmp(a[i], b[i])) + TEST_FAIL("%s conf mismatch: %s != %s", desc, a[i], + b[i]); + else if (strcmp(a[i + 1], b[i + 1])) { /* The default_topic_conf will be auto-created * when global->topic fallthru is used, so its * value will not match here. */ if (!strcmp(a[i], "default_topic_conf")) continue; TEST_FAIL("%s conf value mismatch for %s: %s != %s", - desc, a[i], a[i+1], b[i+1]); + desc, a[i], a[i + 1], b[i + 1]); } - } + } } @@ -114,10 +120,11 @@ static void conf_cmp (const char *desc, * @brief Not called, just used for config */ static int on_new_call_cnt; -static rd_kafka_resp_err_t my_on_new (rd_kafka_t *rk, - const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t my_on_new(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { TEST_SAY("%s: on_new() called\n", rd_kafka_name(rk)); on_new_call_cnt++; return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -130,7 +137,7 @@ static rd_kafka_resp_err_t my_on_new (rd_kafka_t *rk, * but when it fails the config object remains in application custody. * These tests makes sure that's the case (preferably run with valgrind) */ -static void do_test_kafka_new_failures (void) { +static void do_test_kafka_new_failures(void) { rd_kafka_conf_t *conf; rd_kafka_t *rk; char errstr[512]; @@ -145,8 +152,8 @@ static void do_test_kafka_new_failures (void) { * by conf_set() but by rd_kafka_new() */ conf = rd_kafka_conf_new(); if (rd_kafka_conf_set(conf, "partition.assignment.strategy", - "range,thiswillfail", errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) + "range,thiswillfail", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s", errstr); rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); @@ -155,8 +162,7 @@ static void do_test_kafka_new_failures (void) { /* config object should still belong to us, * correct the erroneous config and try again. */ if (rd_kafka_conf_set(conf, "partition.assignment.strategy", NULL, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) + errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s", errstr); rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); @@ -178,48 +184,53 @@ static void do_test_kafka_new_failures (void) { * @brief Verify that INVALID properties (such as for Java SSL properties) * work, as well as INTERNAL properties. */ -static void do_test_special_invalid_conf (void) { +static void do_test_special_invalid_conf(void) { rd_kafka_conf_t *conf; char errstr[512]; rd_kafka_conf_res_t res; conf = rd_kafka_conf_new(); - res = rd_kafka_conf_set(conf, "ssl.truststore.location", "abc", - errstr, sizeof(errstr)); + res = rd_kafka_conf_set(conf, "ssl.truststore.location", "abc", errstr, + sizeof(errstr)); /* Existing apps might not print the error string when conf_set * returns UNKNOWN, only on INVALID, so make sure that is * what is being returned. */ TEST_ASSERT(res == RD_KAFKA_CONF_INVALID, "expected ssl.truststore.location to fail with INVALID, " - "not %d", res); + "not %d", + res); /* Make sure there is a link to documentation */ TEST_ASSERT(strstr(errstr, "http"), "expected ssl.truststore.location to provide link to " - "documentation, not \"%s\"", errstr); + "documentation, not \"%s\"", + errstr); TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr); - res = rd_kafka_conf_set(conf, "sasl.jaas.config", "abc", - errstr, sizeof(errstr)); + res = rd_kafka_conf_set(conf, "sasl.jaas.config", "abc", errstr, + sizeof(errstr)); /* Existing apps might not print the error string when conf_set * returns UNKNOWN, only on INVALID, so make sure that is * what is being returned. */ TEST_ASSERT(res == RD_KAFKA_CONF_INVALID, "expected sasl.jaas.config to fail with INVALID, " - "not %d", res); + "not %d", + res); /* Make sure there is a link to documentation */ TEST_ASSERT(strstr(errstr, "http"), "expected sasl.jaas.config to provide link to " - "documentation, not \"%s\"", errstr); + "documentation, not \"%s\"", + errstr); TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr); - res = rd_kafka_conf_set(conf, "interceptors", "1", - errstr, sizeof(errstr)); + res = rd_kafka_conf_set(conf, "interceptors", "1", errstr, + sizeof(errstr)); TEST_ASSERT(res == RD_KAFKA_CONF_INVALID, "expected interceptors to fail with INVALID, " - "not %d", res); + "not %d", + res); TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr); rd_kafka_conf_destroy(conf); @@ -229,27 +240,25 @@ static void do_test_special_invalid_conf (void) { /** * @brief Verify idempotence configuration constraints */ -static void do_test_idempotence_conf (void) { +static void do_test_idempotence_conf(void) { static const struct { const char *prop; const char *val; rd_bool_t topic_conf; rd_bool_t exp_rk_fail; rd_bool_t exp_rkt_fail; - } check[] = { - { "acks", "1", rd_true, rd_false, rd_true }, - { "acks", "all", rd_true, rd_false, rd_false }, - { "queuing.strategy", "lifo", rd_true, rd_false, rd_true }, - { NULL } - }; + } check[] = {{"acks", "1", rd_true, rd_false, rd_true}, + {"acks", "all", rd_true, rd_false, rd_false}, + {"queuing.strategy", "lifo", rd_true, rd_false, rd_true}, + {NULL}}; int i; - for (i = 0 ; check[i].prop ; i++) { + for (i = 0; check[i].prop; i++) { int j; - for (j = 0 ; j < 1 + (check[i].topic_conf ? 1 : 0) ; j++) { + for (j = 0; j < 1 + (check[i].topic_conf ? 1 : 0); j++) { /* j = 0: set on global config - * j = 1: set on topic config */ + * j = 1: set on topic config */ rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *tconf = NULL; rd_kafka_t *rk; @@ -260,17 +269,19 @@ static void do_test_idempotence_conf (void) { test_conf_set(conf, "enable.idempotence", "true"); if (j == 0) - test_conf_set(conf, check[i].prop, check[i].val); + test_conf_set(conf, check[i].prop, + check[i].val); - rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, - errstr, sizeof(errstr)); + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, + sizeof(errstr)); if (!rk) { /* default topic config (j=0) will fail. */ TEST_ASSERT(check[i].exp_rk_fail || - (j == 0 && check[i].exp_rkt_fail && - check[i].topic_conf), + (j == 0 && + check[i].exp_rkt_fail && + check[i].topic_conf), "Did not expect config #%d.%d " "to fail: %s", i, j, errstr); @@ -280,8 +291,8 @@ static void do_test_idempotence_conf (void) { } else { TEST_ASSERT(!check[i].exp_rk_fail, - "Expect config #%d.%d to fail", - i, j); + "Expect config #%d.%d to fail", i, + j); } if (j == 1) { @@ -292,18 +303,19 @@ static void do_test_idempotence_conf (void) { rkt = rd_kafka_topic_new(rk, "mytopic", tconf); if (!rkt) { - TEST_ASSERT(check[i].exp_rkt_fail, - "Did not expect topic config " - "#%d.%d to fail: %s", - i, j, - rd_kafka_err2str( - rd_kafka_last_error())); + TEST_ASSERT( + check[i].exp_rkt_fail, + "Did not expect topic config " + "#%d.%d to fail: %s", + i, j, + rd_kafka_err2str(rd_kafka_last_error())); } else { TEST_ASSERT(!check[i].exp_rkt_fail, "Expect topic config " - "#%d.%d to fail", i, j); + "#%d.%d to fail", + i, j); rd_kafka_topic_destroy(rkt); } @@ -317,23 +329,21 @@ static void do_test_idempotence_conf (void) { * @brief Verify that configuration properties can be extract * from the instance config object. */ -static void do_test_instance_conf (void) { +static void do_test_instance_conf(void) { rd_kafka_conf_t *conf; const rd_kafka_conf_t *iconf; rd_kafka_t *rk; rd_kafka_conf_res_t res; static const char *props[] = { - "linger.ms", "123", - "group.id", "test1", - "enable.auto.commit", "false", - NULL, + "linger.ms", "123", "group.id", "test1", + "enable.auto.commit", "false", NULL, }; const char **p; conf = rd_kafka_conf_new(); - for (p = props ; *p ; p += 2) { - res = rd_kafka_conf_set(conf, *p, *(p+1), NULL, 0); + for (p = props; *p; p += 2) { + res = rd_kafka_conf_set(conf, *p, *(p + 1), NULL, 0); TEST_ASSERT(res == RD_KAFKA_CONF_OK, "failed to set %s", *p); } @@ -343,7 +353,7 @@ static void do_test_instance_conf (void) { iconf = rd_kafka_conf(rk); TEST_ASSERT(conf, "failed to get instance config"); - for (p = props ; *p ; p += 2) { + for (p = props; *p; p += 2) { char dest[512]; size_t destsz = sizeof(dest); @@ -352,9 +362,8 @@ static void do_test_instance_conf (void) { "failed to get %s: result %d", *p, res); TEST_SAY("Instance config %s=%s\n", *p, dest); - TEST_ASSERT(!strcmp(*(p+1), dest), - "Expected %s=%s, not %s", - *p, *(p+1), dest); + TEST_ASSERT(!strcmp(*(p + 1), dest), "Expected %s=%s, not %s", + *p, *(p + 1), dest); } rd_kafka_destroy(rk); @@ -364,7 +373,7 @@ static void do_test_instance_conf (void) { /** * @brief Verify that setting and retrieving the default topic config works. */ -static void do_test_default_topic_conf (void) { +static void do_test_default_topic_conf(void) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *tconf; const char *val, *exp_val; @@ -384,8 +393,8 @@ static void do_test_default_topic_conf (void) { /* Get value from global config by fall-thru */ val = test_conf_get(conf, "message.timeout.ms"); TEST_ASSERT(val && !strcmp(val, exp_val), - "Expected (conf) message.timeout.ms=%s, not %s", - exp_val, val ? val : "(NULL)"); + "Expected (conf) message.timeout.ms=%s, not %s", exp_val, + val ? val : "(NULL)"); /* Get value from default topic config */ val = test_topic_conf_get(tconf, "message.timeout.ms"); @@ -400,8 +409,8 @@ static void do_test_default_topic_conf (void) { /* Get value from global config by fall-thru */ val = test_conf_get(conf, "message.timeout.ms"); TEST_ASSERT(val && !strcmp(val, exp_val), - "Expected (conf) message.timeout.ms=%s, not %s", - exp_val, val ? val : "(NULL)"); + "Expected (conf) message.timeout.ms=%s, not %s", exp_val, + val ? val : "(NULL)"); /* Get value from default topic config */ val = test_topic_conf_get(tconf, "message.timeout.ms"); @@ -416,172 +425,177 @@ static void do_test_default_topic_conf (void) { } -int main_0004_conf (int argc, char **argv) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *ignore_conf, *conf, *conf2; - rd_kafka_topic_conf_t *ignore_topic_conf, *tconf, *tconf2; - char errstr[512]; +int main_0004_conf(int argc, char **argv) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *ignore_conf, *conf, *conf2; + rd_kafka_topic_conf_t *ignore_topic_conf, *tconf, *tconf2; + char errstr[512]; rd_kafka_resp_err_t err; - const char **arr_orig, **arr_dup; - size_t cnt_orig, cnt_dup; - int i; + const char **arr_orig, **arr_dup; + size_t cnt_orig, cnt_dup; + int i; const char *topic; - static const char *gconfs[] = { - "message.max.bytes", "12345", /* int property */ - "client.id", "my id", /* string property */ - "debug", "topic,metadata,interceptor", /* S2F property */ - "topic.blacklist", "__.*", /* #778 */ - "auto.offset.reset", "earliest", /* Global->Topic fallthru */ + static const char *gconfs[] = { + "message.max.bytes", + "12345", /* int property */ + "client.id", + "my id", /* string property */ + "debug", + "topic,metadata,interceptor", /* S2F property */ + "topic.blacklist", + "__.*", /* #778 */ + "auto.offset.reset", + "earliest", /* Global->Topic fallthru */ #if WITH_ZLIB - "compression.codec", "gzip", /* S2I property */ + "compression.codec", + "gzip", /* S2I property */ #endif #if defined(_WIN32) - "ssl.ca.certificate.stores", "Intermediate ,, Root ,", + "ssl.ca.certificate.stores", + "Intermediate ,, Root ,", #endif - NULL - }; - static const char *tconfs[] = { - "request.required.acks", "-1", /* int */ - "auto.commit.enable", "false", /* bool */ - "auto.offset.reset", "error", /* S2I */ - "offset.store.path", "my/path", /* string */ - NULL - }; - - test_conf_init(&ignore_conf, &ignore_topic_conf, 10); - rd_kafka_conf_destroy(ignore_conf); - rd_kafka_topic_conf_destroy(ignore_topic_conf); + NULL + }; + static const char *tconfs[] = {"request.required.acks", + "-1", /* int */ + "auto.commit.enable", + "false", /* bool */ + "auto.offset.reset", + "error", /* S2I */ + "offset.store.path", + "my/path", /* string */ + NULL}; + + test_conf_init(&ignore_conf, &ignore_topic_conf, 10); + rd_kafka_conf_destroy(ignore_conf); + rd_kafka_topic_conf_destroy(ignore_topic_conf); topic = test_mk_topic_name("0004", 0); - /* Set up a global config object */ - conf = rd_kafka_conf_new(); + /* Set up a global config object */ + conf = rd_kafka_conf_new(); - for (i = 0 ; gconfs[i] ; i += 2) { - if (rd_kafka_conf_set(conf, gconfs[i], gconfs[i+1], - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) + for (i = 0; gconfs[i]; i += 2) { + if (rd_kafka_conf_set(conf, gconfs[i], gconfs[i + 1], errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); } - rd_kafka_conf_set_dr_cb(conf, dr_cb); - rd_kafka_conf_set_error_cb(conf, error_cb); + rd_kafka_conf_set_dr_cb(conf, dr_cb); + rd_kafka_conf_set_error_cb(conf, error_cb); /* interceptor configs are not exposed as strings or in dumps * so the dump verification step will not cover them, but valgrind * will help track down memory leaks/use-after-free etc. */ - err = rd_kafka_conf_interceptor_add_on_new(conf, "testic", - my_on_new, NULL); + err = rd_kafka_conf_interceptor_add_on_new(conf, "testic", my_on_new, + NULL); TEST_ASSERT(!err, "add_on_new() failed: %s", rd_kafka_err2str(err)); - /* Set up a topic config object */ - tconf = rd_kafka_topic_conf_new(); + /* Set up a topic config object */ + tconf = rd_kafka_topic_conf_new(); - rd_kafka_topic_conf_set_partitioner_cb(tconf, partitioner); - rd_kafka_topic_conf_set_opaque(tconf, (void *)0xbeef); + rd_kafka_topic_conf_set_partitioner_cb(tconf, partitioner); + rd_kafka_topic_conf_set_opaque(tconf, (void *)0xbeef); - for (i = 0 ; tconfs[i] ; i += 2) { - if (rd_kafka_topic_conf_set(tconf, tconfs[i], tconfs[i+1], - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) - TEST_FAIL("%s\n", errstr); - } + for (i = 0; tconfs[i]; i += 2) { + if (rd_kafka_topic_conf_set(tconf, tconfs[i], tconfs[i + 1], + errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) + TEST_FAIL("%s\n", errstr); + } - /* Verify global config */ - arr_orig = rd_kafka_conf_dump(conf, &cnt_orig); - conf_verify(__LINE__, arr_orig, cnt_orig, gconfs); + /* Verify global config */ + arr_orig = rd_kafka_conf_dump(conf, &cnt_orig); + conf_verify(__LINE__, arr_orig, cnt_orig, gconfs); - /* Verify copied global config */ - conf2 = rd_kafka_conf_dup(conf); - arr_dup = rd_kafka_conf_dump(conf2, &cnt_dup); - conf_verify(__LINE__, arr_dup, cnt_dup, gconfs); - conf_cmp("global", arr_orig, cnt_orig, arr_dup, cnt_dup); - rd_kafka_conf_dump_free(arr_orig, cnt_orig); - rd_kafka_conf_dump_free(arr_dup, cnt_dup); + /* Verify copied global config */ + conf2 = rd_kafka_conf_dup(conf); + arr_dup = rd_kafka_conf_dump(conf2, &cnt_dup); + conf_verify(__LINE__, arr_dup, cnt_dup, gconfs); + conf_cmp("global", arr_orig, cnt_orig, arr_dup, cnt_dup); + rd_kafka_conf_dump_free(arr_orig, cnt_orig); + rd_kafka_conf_dump_free(arr_dup, cnt_dup); - /* Verify topic config */ - arr_orig = rd_kafka_topic_conf_dump(tconf, &cnt_orig); - conf_verify(__LINE__, arr_orig, cnt_orig, tconfs); + /* Verify topic config */ + arr_orig = rd_kafka_topic_conf_dump(tconf, &cnt_orig); + conf_verify(__LINE__, arr_orig, cnt_orig, tconfs); - /* Verify copied topic config */ - tconf2 = rd_kafka_topic_conf_dup(tconf); - arr_dup = rd_kafka_topic_conf_dump(tconf2, &cnt_dup); - conf_verify(__LINE__, arr_dup, cnt_dup, tconfs); - conf_cmp("topic", arr_orig, cnt_orig, arr_dup, cnt_dup); - rd_kafka_conf_dump_free(arr_orig, cnt_orig); - rd_kafka_conf_dump_free(arr_dup, cnt_dup); + /* Verify copied topic config */ + tconf2 = rd_kafka_topic_conf_dup(tconf); + arr_dup = rd_kafka_topic_conf_dump(tconf2, &cnt_dup); + conf_verify(__LINE__, arr_dup, cnt_dup, tconfs); + conf_cmp("topic", arr_orig, cnt_orig, arr_dup, cnt_dup); + rd_kafka_conf_dump_free(arr_orig, cnt_orig); + rd_kafka_conf_dump_free(arr_dup, cnt_dup); - /* - * Create kafka instances using original and copied confs - */ + /* + * Create kafka instances using original and copied confs + */ - /* original */ + /* original */ TEST_ASSERT(on_new_call_cnt == 0, "expected 0 on_new call, not %d", on_new_call_cnt); on_new_call_cnt = 0; - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); TEST_ASSERT(on_new_call_cnt == 1, "expected 1 on_new call, not %d", - on_new_call_cnt); + on_new_call_cnt); - rkt = rd_kafka_topic_new(rk, topic, tconf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + rkt = rd_kafka_topic_new(rk, topic, tconf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); - /* copied */ + /* copied */ on_new_call_cnt = 0; /* interceptors are not copied. */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf2); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf2); TEST_ASSERT(on_new_call_cnt == 0, "expected 0 on_new call, not %d", - on_new_call_cnt); + on_new_call_cnt); - rkt = rd_kafka_topic_new(rk, topic, tconf2); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rkt = rd_kafka_topic_new(rk, topic, tconf2); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); - /* Incremental S2F property. - * NOTE: The order of fields returned in get() is hardcoded here. */ - { - static const char *s2fs[] = { - "generic,broker,queue,cgrp", - "generic,broker,queue,cgrp", + /* Incremental S2F property. + * NOTE: The order of fields returned in get() is hardcoded here. */ + { + static const char *s2fs[] = {"generic,broker,queue,cgrp", + "generic,broker,queue,cgrp", - "-broker,+queue,topic", - "generic,topic,queue,cgrp", + "-broker,+queue,topic", + "generic,topic,queue,cgrp", - "-all,security,-fetch,+metadata", - "metadata,security", + "-all,security,-fetch,+metadata", + "metadata,security", - NULL - }; + NULL}; - TEST_SAY("Incremental S2F tests\n"); - conf = rd_kafka_conf_new(); + TEST_SAY("Incremental S2F tests\n"); + conf = rd_kafka_conf_new(); - for (i = 0 ; s2fs[i] ; i += 2) { - const char *val; + for (i = 0; s2fs[i]; i += 2) { + const char *val; - TEST_SAY(" Set: %s\n", s2fs[i]); - test_conf_set(conf, "debug", s2fs[i]); - val = test_conf_get(conf, "debug"); - TEST_SAY(" Now: %s\n", val); + TEST_SAY(" Set: %s\n", s2fs[i]); + test_conf_set(conf, "debug", s2fs[i]); + val = test_conf_get(conf, "debug"); + TEST_SAY(" Now: %s\n", val); - if (strcmp(val, s2fs[i+1])) - TEST_FAIL_LATER("\n" - "Expected: %s\n" - " Got: %s", - s2fs[i+1], val); - } - rd_kafka_conf_destroy(conf); - } + if (strcmp(val, s2fs[i + 1])) + TEST_FAIL_LATER( + "\n" + "Expected: %s\n" + " Got: %s", + s2fs[i + 1], val); + } + rd_kafka_conf_destroy(conf); + } { rd_kafka_conf_res_t res; @@ -589,19 +603,23 @@ int main_0004_conf (int argc, char **argv) { TEST_SAY("Error reporting for S2F properties\n"); conf = rd_kafka_conf_new(); - res = rd_kafka_conf_set(conf, "debug", - "cgrp,invalid-value,topic", errstr, sizeof(errstr)); + res = + rd_kafka_conf_set(conf, "debug", "cgrp,invalid-value,topic", + errstr, sizeof(errstr)); - TEST_ASSERT(res == RD_KAFKA_CONF_INVALID, - "expected 'debug=invalid-value' to fail with INVALID, " - "not %d", res); - TEST_ASSERT(strstr(errstr, "invalid-value"), - "expected invalid value to be mentioned in error, " - "not \"%s\"", errstr); TEST_ASSERT( - !strstr(errstr, "cgrp") && !strstr(errstr, "topic"), - "expected only invalid value to be mentioned, " - "not \"%s\"", errstr); + res == RD_KAFKA_CONF_INVALID, + "expected 'debug=invalid-value' to fail with INVALID, " + "not %d", + res); + TEST_ASSERT(strstr(errstr, "invalid-value"), + "expected invalid value to be mentioned in error, " + "not \"%s\"", + errstr); + TEST_ASSERT(!strstr(errstr, "cgrp") && !strstr(errstr, "topic"), + "expected only invalid value to be mentioned, " + "not \"%s\"", + errstr); TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr); rd_kafka_conf_destroy(conf); @@ -609,111 +627,108 @@ int main_0004_conf (int argc, char **argv) { #if WITH_SSL { - TEST_SAY("Verifying that ssl.ca.location is not " - "overwritten (#3566)\n"); + TEST_SAY( + "Verifying that ssl.ca.location is not " + "overwritten (#3566)\n"); conf = rd_kafka_conf_new(); test_conf_set(conf, "security.protocol", "SSL"); test_conf_set(conf, "ssl.ca.location", "/?/does/!/not/exist!"); - rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, - errstr, sizeof(errstr)); + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, + sizeof(errstr)); TEST_ASSERT(!rk, "Expected rd_kafka_new() to fail with " "invalid ssl.ca.location"); - TEST_SAY("rd_kafka_new() failed as expected: %s\n", - errstr); + TEST_SAY("rd_kafka_new() failed as expected: %s\n", errstr); } #endif - /* Canonical int values, aliases, s2i-verified strings, doubles */ - { - static const struct { - const char *prop; - const char *val; - const char *exp; - int is_global; - } props[] = { - { "request.required.acks", "0", "0" }, - { "request.required.acks", "-1", "-1" }, - { "request.required.acks", "1", "1" }, - { "acks", "3", "3" }, /* alias test */ - { "request.required.acks", "393", "393" }, - { "request.required.acks", "bad", NULL }, - { "request.required.acks", "all", "-1" }, - { "request.required.acks", "all", "-1", 1/*fallthru*/ }, - { "acks", "0", "0" }, /* alias test */ + /* Canonical int values, aliases, s2i-verified strings, doubles */ + { + static const struct { + const char *prop; + const char *val; + const char *exp; + int is_global; + } props[] = { + {"request.required.acks", "0", "0"}, + {"request.required.acks", "-1", "-1"}, + {"request.required.acks", "1", "1"}, + {"acks", "3", "3"}, /* alias test */ + {"request.required.acks", "393", "393"}, + {"request.required.acks", "bad", NULL}, + {"request.required.acks", "all", "-1"}, + {"request.required.acks", "all", "-1", 1 /*fallthru*/}, + {"acks", "0", "0"}, /* alias test */ #if WITH_SASL - { "sasl.mechanisms", "GSSAPI", "GSSAPI", 1 }, - { "sasl.mechanisms", "PLAIN", "PLAIN", 1 }, - { "sasl.mechanisms", "GSSAPI,PLAIN", NULL, 1 }, - { "sasl.mechanisms", "", NULL, 1 }, + {"sasl.mechanisms", "GSSAPI", "GSSAPI", 1}, + {"sasl.mechanisms", "PLAIN", "PLAIN", 1}, + {"sasl.mechanisms", "GSSAPI,PLAIN", NULL, 1}, + {"sasl.mechanisms", "", NULL, 1}, #endif - { "linger.ms", "12555.3", "12555.3", 1 }, - { "linger.ms", "1500.000", "1500", 1 }, - { "linger.ms", "0.0001", "0.0001", 1 }, - { NULL } - }; - - TEST_SAY("Canonical tests\n"); - tconf = rd_kafka_topic_conf_new(); - conf = rd_kafka_conf_new(); - - for (i = 0 ; props[i].prop ; i++) { - char dest[64]; - size_t destsz; - rd_kafka_conf_res_t res; - - TEST_SAY(" Set: %s=%s expect %s (%s)\n", - props[i].prop, props[i].val, props[i].exp, - props[i].is_global ? "global":"topic"); - - - /* Set value */ - if (props[i].is_global) - res = rd_kafka_conf_set(conf, - props[i].prop, - props[i].val, - errstr, sizeof(errstr)); - else - res = rd_kafka_topic_conf_set(tconf, - props[i].prop, - props[i].val, - errstr, - sizeof(errstr)); - if ((res == RD_KAFKA_CONF_OK ? 1:0) != - (props[i].exp ? 1:0)) - TEST_FAIL("Expected %s, got %s", - props[i].exp ? "success" : "failure", - (res == RD_KAFKA_CONF_OK ? "OK" : - (res == RD_KAFKA_CONF_INVALID ? "INVALID" : - "UNKNOWN"))); - - if (!props[i].exp) - continue; - - /* Get value and compare to expected result */ - destsz = sizeof(dest); - if (props[i].is_global) - res = rd_kafka_conf_get(conf, - props[i].prop, - dest, &destsz); - else - res = rd_kafka_topic_conf_get(tconf, - props[i].prop, - dest, &destsz); - TEST_ASSERT(res == RD_KAFKA_CONF_OK, - ".._conf_get(%s) returned %d", + {"linger.ms", "12555.3", "12555.3", 1}, + {"linger.ms", "1500.000", "1500", 1}, + {"linger.ms", "0.0001", "0.0001", 1}, + {NULL} + }; + + TEST_SAY("Canonical tests\n"); + tconf = rd_kafka_topic_conf_new(); + conf = rd_kafka_conf_new(); + + for (i = 0; props[i].prop; i++) { + char dest[64]; + size_t destsz; + rd_kafka_conf_res_t res; + + TEST_SAY(" Set: %s=%s expect %s (%s)\n", props[i].prop, + props[i].val, props[i].exp, + props[i].is_global ? "global" : "topic"); + + + /* Set value */ + if (props[i].is_global) + res = rd_kafka_conf_set(conf, props[i].prop, + props[i].val, errstr, + sizeof(errstr)); + else + res = rd_kafka_topic_conf_set( + tconf, props[i].prop, props[i].val, errstr, + sizeof(errstr)); + if ((res == RD_KAFKA_CONF_OK ? 1 : 0) != + (props[i].exp ? 1 : 0)) + TEST_FAIL("Expected %s, got %s", + props[i].exp ? "success" : "failure", + (res == RD_KAFKA_CONF_OK + ? "OK" + : (res == RD_KAFKA_CONF_INVALID + ? "INVALID" + : "UNKNOWN"))); + + if (!props[i].exp) + continue; + + /* Get value and compare to expected result */ + destsz = sizeof(dest); + if (props[i].is_global) + res = rd_kafka_conf_get(conf, props[i].prop, + dest, &destsz); + else + res = rd_kafka_topic_conf_get( + tconf, props[i].prop, dest, &destsz); + TEST_ASSERT(res == RD_KAFKA_CONF_OK, + ".._conf_get(%s) returned %d", props[i].prop, res); - TEST_ASSERT(!strcmp(props[i].exp, dest), - "Expected \"%s\", got \"%s\"", - props[i].exp, dest); - } - rd_kafka_topic_conf_destroy(tconf); - rd_kafka_conf_destroy(conf); - } + TEST_ASSERT(!strcmp(props[i].exp, dest), + "Expected \"%s\", got \"%s\"", props[i].exp, + dest); + } + rd_kafka_topic_conf_destroy(tconf); + rd_kafka_conf_destroy(conf); + } do_test_kafka_new_failures(); @@ -725,5 +740,5 @@ int main_0004_conf (int argc, char **argv) { do_test_default_topic_conf(); - return 0; + return 0; } diff --git a/tests/0005-order.c b/tests/0005-order.c index ac0dad8d84..722cef3b06 100644 --- a/tests/0005-order.c +++ b/tests/0005-order.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,97 +35,99 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgid_next = 0; -static int fails = 0; +static int fails = 0; /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - int msgid = *(int *)msg_opaque; - - free(msg_opaque); - - if (err != RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_FAIL("Message delivery failed: %s\n", - rd_kafka_err2str(err)); - - if (msgid != msgid_next) { - fails++; - TEST_FAIL("Delivered msg %i, expected %i\n", - msgid, msgid_next); - return; - } - - msgid_next = msgid+1; +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + int msgid = *(int *)msg_opaque; + + free(msg_opaque); + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) + TEST_FAIL("Message delivery failed: %s\n", + rd_kafka_err2str(err)); + + if (msgid != msgid_next) { + fails++; + TEST_FAIL("Delivered msg %i, expected %i\n", msgid, msgid_next); + return; + } + + msgid_next = msgid + 1; } -int main_0005_order (int argc, char **argv) { - int partition = 0; - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char msg[128]; - int msgcnt = test_quick ? 500 : 50000; - int i; +int main_0005_order(int argc, char **argv) { + int partition = 0; + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char msg[128]; + int msgcnt = test_quick ? 500 : 50000; + int i; test_timing_t t_produce, t_delivery; - test_conf_init(&conf, &topic_conf, 10); + test_conf_init(&conf, &topic_conf, 10); - /* Set delivery report callback */ - rd_kafka_conf_set_dr_cb(conf, dr_cb); + /* Set delivery report callback */ + rd_kafka_conf_set_dr_cb(conf, dr_cb); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0), - topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0), topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); - /* Produce messages */ + /* Produce messages */ TIMING_START(&t_produce, "PRODUCE"); - for (i = 0 ; i < msgcnt ; i++) { - int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; - rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); - r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, msgidp); - if (r == -1) - TEST_FAIL("Failed to produce message #%i: %s\n", - i, rd_strerror(errno)); - } + for (i = 0; i < msgcnt; i++) { + int *msgidp = malloc(sizeof(*msgidp)); + *msgidp = i; + rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], + i); + r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, msgidp); + if (r == -1) + TEST_FAIL("Failed to produce message #%i: %s\n", i, + rd_strerror(errno)); + } TIMING_STOP(&t_produce); - TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt); + TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt); - /* Wait for messages to be delivered */ + /* Wait for messages to be delivered */ TIMING_START(&t_delivery, "DELIVERY"); - while (rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 50); + while (rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 50); TIMING_STOP(&t_delivery); - if (fails) - TEST_FAIL("%i failures, see previous errors", fails); + if (fails) + TEST_FAIL("%i failures, see previous errors", fails); - if (msgid_next != msgcnt) - TEST_FAIL("Still waiting for messages: next %i != end %i\n", - msgid_next, msgcnt); + if (msgid_next != msgcnt) + TEST_FAIL("Still waiting for messages: next %i != end %i\n", + msgid_next, msgcnt); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); - return 0; + return 0; } diff --git a/tests/0006-symbols.c b/tests/0006-symbols.c index 64cf62a133..8a25f6a1d2 100644 --- a/tests/0006-symbols.c +++ b/tests/0006-symbols.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,19 +35,19 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ -int main_0006_symbols (int argc, char **argv) { +int main_0006_symbols(int argc, char **argv) { if (argc < 0 /* always false */) { rd_kafka_version(); rd_kafka_version_str(); - rd_kafka_get_debug_contexts(); - rd_kafka_get_err_descs(NULL, NULL); + rd_kafka_get_debug_contexts(); + rd_kafka_get_err_descs(NULL, NULL); rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR); - rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR); - rd_kafka_last_error(); + rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR); + rd_kafka_last_error(); rd_kafka_conf_new(); rd_kafka_conf_destroy(NULL); rd_kafka_conf_dup(NULL); @@ -58,15 +58,15 @@ int main_0006_symbols (int argc, char **argv) { rd_kafka_conf_set_stats_cb(NULL, NULL); rd_kafka_conf_set_log_cb(NULL, NULL); rd_kafka_conf_set_socket_cb(NULL, NULL); - rd_kafka_conf_set_rebalance_cb(NULL, NULL); - rd_kafka_conf_set_offset_commit_cb(NULL, NULL); - rd_kafka_conf_set_throttle_cb(NULL, NULL); - rd_kafka_conf_set_default_topic_conf(NULL, NULL); - rd_kafka_conf_get(NULL, NULL, NULL, NULL); + rd_kafka_conf_set_rebalance_cb(NULL, NULL); + rd_kafka_conf_set_offset_commit_cb(NULL, NULL); + rd_kafka_conf_set_throttle_cb(NULL, NULL); + rd_kafka_conf_set_default_topic_conf(NULL, NULL); + rd_kafka_conf_get(NULL, NULL, NULL, NULL); #ifndef _WIN32 - rd_kafka_conf_set_open_cb(NULL, NULL); + rd_kafka_conf_set_open_cb(NULL, NULL); #endif - rd_kafka_conf_set_opaque(NULL, NULL); + rd_kafka_conf_set_opaque(NULL, NULL); rd_kafka_opaque(NULL); rd_kafka_conf_dump(NULL, NULL); rd_kafka_topic_conf_dump(NULL, NULL); @@ -77,24 +77,26 @@ int main_0006_symbols (int argc, char **argv) { rd_kafka_topic_conf_destroy(NULL); rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_topic_conf_set_opaque(NULL, NULL); - rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL); + rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL); rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL); rd_kafka_topic_partition_available(NULL, 0); - rd_kafka_topic_opaque(NULL); + rd_kafka_topic_opaque(NULL); rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL); - rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, NULL); - rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, NULL, NULL); + rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, + NULL); + rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, + NULL, NULL); rd_kafka_new(0, NULL, NULL, 0); rd_kafka_destroy(NULL); - rd_kafka_flush(NULL, 0); + rd_kafka_flush(NULL, 0); rd_kafka_name(NULL); - rd_kafka_memberid(NULL); + rd_kafka_memberid(NULL); rd_kafka_topic_new(NULL, NULL, NULL); rd_kafka_topic_destroy(NULL); rd_kafka_topic_name(NULL); rd_kafka_message_destroy(NULL); rd_kafka_message_errstr(NULL); - rd_kafka_message_timestamp(NULL, NULL); + rd_kafka_message_timestamp(NULL, NULL); rd_kafka_consume_start(NULL, 0, 0); rd_kafka_consume_stop(NULL, 0); rd_kafka_consume(NULL, 0, 0); @@ -129,33 +131,33 @@ int main_0006_symbols (int argc, char **argv) { rd_kafka_list_groups(NULL, NULL, NULL, 0); rd_kafka_group_list_destroy(NULL); - /* KafkaConsumer API */ - rd_kafka_subscribe(NULL, NULL); - rd_kafka_unsubscribe(NULL); - rd_kafka_subscription(NULL, NULL); - rd_kafka_consumer_poll(NULL, 0); - rd_kafka_consumer_close(NULL); - rd_kafka_assign(NULL, NULL); - rd_kafka_assignment(NULL, NULL); - rd_kafka_commit(NULL, NULL, 0); - rd_kafka_commit_message(NULL, NULL, 0); + /* KafkaConsumer API */ + rd_kafka_subscribe(NULL, NULL); + rd_kafka_unsubscribe(NULL); + rd_kafka_subscription(NULL, NULL); + rd_kafka_consumer_poll(NULL, 0); + rd_kafka_consumer_close(NULL); + rd_kafka_assign(NULL, NULL); + rd_kafka_assignment(NULL, NULL); + rd_kafka_commit(NULL, NULL, 0); + rd_kafka_commit_message(NULL, NULL, 0); rd_kafka_committed(NULL, NULL, 0); - rd_kafka_position(NULL, NULL); + rd_kafka_position(NULL, NULL); - /* TopicPartition */ - rd_kafka_topic_partition_list_new(0); - rd_kafka_topic_partition_list_destroy(NULL); - rd_kafka_topic_partition_list_add(NULL, NULL, 0); - rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0); - rd_kafka_topic_partition_list_del(NULL, NULL, 0); - rd_kafka_topic_partition_list_del_by_idx(NULL, 0); - rd_kafka_topic_partition_list_copy(NULL); - rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0); - rd_kafka_topic_partition_list_find(NULL, NULL, 0); - rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0); - rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL); + /* TopicPartition */ + rd_kafka_topic_partition_list_new(0); + rd_kafka_topic_partition_list_destroy(NULL); + rd_kafka_topic_partition_list_add(NULL, NULL, 0); + rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0); + rd_kafka_topic_partition_list_del(NULL, NULL, 0); + rd_kafka_topic_partition_list_del_by_idx(NULL, 0); + rd_kafka_topic_partition_list_copy(NULL); + rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0); + rd_kafka_topic_partition_list_find(NULL, NULL, 0); + rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0); + rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL); } - return 0; + return 0; } diff --git a/tests/0007-autotopic.c b/tests/0007-autotopic.c index 2869a00b70..cf196d60c2 100644 --- a/tests/0007-autotopic.c +++ b/tests/0007-autotopic.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -38,7 +38,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgs_wait = 0; /* bitmask */ @@ -47,83 +47,90 @@ static int msgs_wait = 0; /* bitmask */ * Delivery report callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - int msgid = *(int *)msg_opaque; - - free(msg_opaque); - - if (!(msgs_wait & (1 << msgid))) - TEST_FAIL("Unwanted delivery report for message #%i " - "(waiting for 0x%x)\n", msgid, msgs_wait); - - TEST_SAY("Delivery report for message #%i: %s\n", - msgid, rd_kafka_err2str(err)); - - msgs_wait &= ~(1 << msgid); - - if (err) - TEST_FAIL("Message #%i failed with unexpected error %s\n", - msgid, rd_kafka_err2str(err)); +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + int msgid = *(int *)msg_opaque; + + free(msg_opaque); + + if (!(msgs_wait & (1 << msgid))) + TEST_FAIL( + "Unwanted delivery report for message #%i " + "(waiting for 0x%x)\n", + msgid, msgs_wait); + + TEST_SAY("Delivery report for message #%i: %s\n", msgid, + rd_kafka_err2str(err)); + + msgs_wait &= ~(1 << msgid); + + if (err) + TEST_FAIL("Message #%i failed with unexpected error %s\n", + msgid, rd_kafka_err2str(err)); } -int main_0007_autotopic (int argc, char **argv) { - int partition = 0; - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char msg[128]; - int msgcnt = 10; - int i; +int main_0007_autotopic(int argc, char **argv) { + int partition = 0; + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char msg[128]; + int msgcnt = 10; + int i; - /* Generate unique topic name */ - test_conf_init(&conf, &topic_conf, 10); + /* Generate unique topic name */ + test_conf_init(&conf, &topic_conf, 10); - TEST_SAY("\033[33mNOTE! This test requires " - "auto.create.topics.enable=true to be configured on " - "the broker!\033[0m\n"); + TEST_SAY( + "\033[33mNOTE! This test requires " + "auto.create.topics.enable=true to be configured on " + "the broker!\033[0m\n"); - /* Set delivery report callback */ - rd_kafka_conf_set_dr_cb(conf, dr_cb); + /* Set delivery report callback */ + rd_kafka_conf_set_dr_cb(conf, dr_cb); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0007_autotopic", 1), + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0007_autotopic", 1), topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); - - /* Produce a message */ - for (i = 0 ; i < msgcnt ; i++) { - int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; - rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); - r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, msgidp); - if (r == -1) - TEST_FAIL("Failed to produce message #%i: %s\n", - i, rd_strerror(errno)); - msgs_wait |= (1 << i); - } - - /* Wait for messages to time out */ - while (rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 50); - - if (msgs_wait != 0) - TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); - - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); - - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); - - return 0; + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); + + /* Produce a message */ + for (i = 0; i < msgcnt; i++) { + int *msgidp = malloc(sizeof(*msgidp)); + *msgidp = i; + rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], + i); + r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, msgidp); + if (r == -1) + TEST_FAIL("Failed to produce message #%i: %s\n", i, + rd_strerror(errno)); + msgs_wait |= (1 << i); + } + + /* Wait for messages to time out */ + while (rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 50); + + if (msgs_wait != 0) + TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); + + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); + + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); + + return 0; } diff --git a/tests/0008-reqacks.c b/tests/0008-reqacks.c index b2fafd2f12..d52081b758 100644 --- a/tests/0008-reqacks.c +++ b/tests/0008-reqacks.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,20 +35,20 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgid_next = 0; -static int fails = 0; +static int fails = 0; static rd_kafka_msg_status_t exp_status; /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque) { - int msgid = *(int *)rkmessage->_private; +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { + int msgid = *(int *)rkmessage->_private; rd_kafka_msg_status_t status = rd_kafka_message_status(rkmessage); free(rkmessage->_private); @@ -57,45 +57,46 @@ static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, TEST_FAIL("Message delivery failed: %s (status %d)\n", rd_kafka_err2str(rkmessage->err), status); - if (msgid != msgid_next) { - fails++; - TEST_FAIL("Delivered msg %i, expected %i\n", - msgid, msgid_next); - return; - } + if (msgid != msgid_next) { + fails++; + TEST_FAIL("Delivered msg %i, expected %i\n", msgid, msgid_next); + return; + } TEST_ASSERT(status == exp_status, - "For msgid #%d: expected status %d, got %d", - msgid, exp_status, status); + "For msgid #%d: expected status %d, got %d", msgid, + exp_status, status); - msgid_next = msgid+1; + msgid_next = msgid + 1; } -int main_0008_reqacks (int argc, char **argv) { - int partition = 0; - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - char msg[128]; - int msgcnt = test_quick ? 20 : 100; - int i; +int main_0008_reqacks(int argc, char **argv) { + int partition = 0; + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + char msg[128]; + int msgcnt = test_quick ? 20 : 100; + int i; int reqacks; - int idbase = 0; + int idbase = 0; const char *topic = NULL; - TEST_SAY("\033[33mNOTE! This test requires at " - "least 3 brokers!\033[0m\n"); + TEST_SAY( + "\033[33mNOTE! This test requires at " + "least 3 brokers!\033[0m\n"); - TEST_SAY("\033[33mNOTE! This test requires " - "default.replication.factor=3 to be configured on " - "all brokers!\033[0m\n"); + TEST_SAY( + "\033[33mNOTE! This test requires " + "default.replication.factor=3 to be configured on " + "all brokers!\033[0m\n"); /* Try different request.required.acks settings (issue #75) */ - for (reqacks = -1 ; reqacks <= 1 ; reqacks++) { + for (reqacks = -1; reqacks <= 1; reqacks++) { char tmp[10]; test_conf_init(&conf, &topic_conf, 10); @@ -109,8 +110,8 @@ int main_0008_reqacks (int argc, char **argv) { rd_snprintf(tmp, sizeof(tmp), "%i", reqacks); if (rd_kafka_topic_conf_set(topic_conf, "request.required.acks", - tmp, errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) + tmp, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s", errstr); /* Set delivery report callback */ @@ -124,9 +125,10 @@ int main_0008_reqacks (int argc, char **argv) { /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - TEST_SAY("Created kafka instance %s with required acks %d, " - "expecting status %d\n", - rd_kafka_name(rk), reqacks, exp_status); + TEST_SAY( + "Created kafka instance %s with required acks %d, " + "expecting status %d\n", + rd_kafka_name(rk), reqacks, exp_status); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) @@ -134,15 +136,15 @@ int main_0008_reqacks (int argc, char **argv) { rd_strerror(errno)); /* Produce messages */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = idbase + i; + *msgidp = idbase + i; rd_snprintf(msg, sizeof(msg), - "%s test message #%i (acks=%i)", - argv[0], *msgidp, reqacks); + "%s test message #%i (acks=%i)", argv[0], + *msgidp, reqacks); r = rd_kafka_produce(rkt, partition, - RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, msgidp); + RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, msgidp); if (r == -1) TEST_FAIL("Failed to produce message #%i: %s\n", *msgidp, rd_strerror(errno)); @@ -159,9 +161,10 @@ int main_0008_reqacks (int argc, char **argv) { TEST_FAIL("%i failures, see previous errors", fails); if (msgid_next != idbase + msgcnt) - TEST_FAIL("Still waiting for messages: " - "next %i != end %i\n", - msgid_next, msgcnt); + TEST_FAIL( + "Still waiting for messages: " + "next %i != end %i\n", + msgid_next, msgcnt); idbase += i; /* Destroy topic */ @@ -172,5 +175,5 @@ int main_0008_reqacks (int argc, char **argv) { rd_kafka_destroy(rk); } - return 0; + return 0; } diff --git a/tests/0009-mock_cluster.c b/tests/0009-mock_cluster.c index b6f48b14a9..32590820e7 100644 --- a/tests/0009-mock_cluster.c +++ b/tests/0009-mock_cluster.c @@ -36,7 +36,7 @@ -int main_0009_mock_cluster (int argc, char **argv) { +int main_0009_mock_cluster(int argc, char **argv) { const char *topic = test_mk_topic_name("0009_mock_cluster", 1); rd_kafka_mock_cluster_t *mcluster; rd_kafka_t *p, *c; @@ -69,12 +69,11 @@ int main_0009_mock_cluster (int argc, char **argv) { rkt = test_create_producer_topic(p, topic, NULL); /* Produce */ - test_produce_msgs(p, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, - NULL, 0); + test_produce_msgs(p, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, NULL, 0); /* Produce tiny messages */ - test_produce_msgs(p, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, - "hello", 5); + test_produce_msgs(p, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, "hello", + 5); rd_kafka_topic_destroy(rkt); diff --git a/tests/0011-produce_batch.c b/tests/0011-produce_batch.c index ed5e776f6f..584d37bc63 100644 --- a/tests/0011-produce_batch.c +++ b/tests/0011-produce_batch.c @@ -35,23 +35,27 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ -static int msgid_next = 0; -static int fails = 0; -static int msgcounter = 0; -static int *dr_partition_count = NULL; -static const int topic_num_partitions = 4; -static int msg_partition_wo_flag = 2; +static int msgid_next = 0; +static int fails = 0; +static int msgcounter = 0; +static int *dr_partition_count = NULL; +static const int topic_num_partitions = 4; +static int msg_partition_wo_flag = 2; static int msg_partition_wo_flag_success = 0; /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_single_partition_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { +static void dr_single_partition_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { int msgid = *(int *)msg_opaque; free(msg_opaque); @@ -62,17 +66,16 @@ static void dr_single_partition_cb (rd_kafka_t *rk, void *payload, size_t len, if (msgid != msgid_next) { fails++; - TEST_FAIL("Delivered msg %i, expected %i\n", - msgid, msgid_next); + TEST_FAIL("Delivered msg %i, expected %i\n", msgid, msgid_next); return; } - msgid_next = msgid+1; + msgid_next = msgid + 1; msgcounter--; } /* Produce a batch of messages to a single partition. */ -static void test_single_partition (void) { +static void test_single_partition(void) { int partition = 0; int r; rd_kafka_t *rk; @@ -80,7 +83,7 @@ static void test_single_partition (void) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char msg[128]; - int msgcnt = test_quick ? 100 : 100000; + int msgcnt = test_quick ? 100 : 100000; int failcnt = 0; int i; rd_kafka_message_t *rkmessages; @@ -98,23 +101,21 @@ static void test_single_partition (void) { TEST_SAY("test_single_partition: Created kafka instance %s\n", rd_kafka_name(rk)); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), - topic_conf); + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), topic_conf); if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Create messages */ rkmessages = calloc(sizeof(*rkmessages), msgcnt); - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; + *msgidp = i; rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i", - __FILE__, __FUNCTION__, i); + __FILE__, __FUNCTION__, i); - rkmessages[i].payload = rd_strdup(msg); - rkmessages[i].len = strlen(msg); - rkmessages[i]._private = msgidp; + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); + rkmessages[i]._private = msgidp; rkmessages[i].partition = 2; /* Will be ignored since * RD_KAFKA_MSG_F_PARTITION * is not supplied. */ @@ -124,30 +125,34 @@ static void test_single_partition (void) { rkmessages, msgcnt); /* Scan through messages to check for errors. */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { if (rkmessages[i].err) { failcnt++; if (failcnt < 100) - TEST_SAY("Message #%i failed: %s\n", - i, + TEST_SAY("Message #%i failed: %s\n", i, rd_kafka_err2str(rkmessages[i].err)); } } /* All messages should've been produced. */ if (r < msgcnt) { - TEST_SAY("Not all messages were accepted " - "by produce_batch(): %i < %i\n", r, msgcnt); + TEST_SAY( + "Not all messages were accepted " + "by produce_batch(): %i < %i\n", + r, msgcnt); if (msgcnt - r != failcnt) - TEST_SAY("Discrepency between failed messages (%i) " - "and return value %i (%i - %i)\n", - failcnt, msgcnt - r, msgcnt, r); + TEST_SAY( + "Discrepency between failed messages (%i) " + "and return value %i (%i - %i)\n", + failcnt, msgcnt - r, msgcnt, r); TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt); } free(rkmessages); - TEST_SAY("Single partition: " - "Produced %i messages, waiting for deliveries\n", r); + TEST_SAY( + "Single partition: " + "Produced %i messages, waiting for deliveries\n", + r); msgcounter = msgcnt; @@ -177,8 +182,12 @@ static void test_single_partition (void) { * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_partitioner_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { +static void dr_partitioner_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { int msgid = *(int *)msg_opaque; free(msg_opaque); @@ -188,13 +197,15 @@ static void dr_partitioner_cb (rd_kafka_t *rk, void *payload, size_t len, rd_kafka_err2str(err)); if (msgcounter <= 0) - TEST_FAIL("Too many message dr_cb callback calls " - "(at msgid #%i)\n", msgid); + TEST_FAIL( + "Too many message dr_cb callback calls " + "(at msgid #%i)\n", + msgid); msgcounter--; } /* Produce a batch of messages using random (default) partitioner */ -static void test_partitioner (void) { +static void test_partitioner(void) { int partition = RD_KAFKA_PARTITION_UA; int r; rd_kafka_t *rk; @@ -202,7 +213,7 @@ static void test_partitioner (void) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char msg[128]; - int msgcnt = test_quick ? 100 : 100000; + int msgcnt = test_quick ? 100 : 100000; int failcnt = 0; int i; rd_kafka_message_t *rkmessages; @@ -218,22 +229,20 @@ static void test_partitioner (void) { TEST_SAY("test_partitioner: Created kafka instance %s\n", rd_kafka_name(rk)); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), - topic_conf); + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), topic_conf); if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Create messages */ rkmessages = calloc(sizeof(*rkmessages), msgcnt); - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; + *msgidp = i; rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i", - __FILE__, __FUNCTION__, i); + __FILE__, __FUNCTION__, i); - rkmessages[i].payload = rd_strdup(msg); - rkmessages[i].len = strlen(msg); + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); rkmessages[i]._private = msgidp; } @@ -241,30 +250,34 @@ static void test_partitioner (void) { rkmessages, msgcnt); /* Scan through messages to check for errors. */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { if (rkmessages[i].err) { failcnt++; if (failcnt < 100) - TEST_SAY("Message #%i failed: %s\n", - i, + TEST_SAY("Message #%i failed: %s\n", i, rd_kafka_err2str(rkmessages[i].err)); } } /* All messages should've been produced. */ if (r < msgcnt) { - TEST_SAY("Not all messages were accepted " - "by produce_batch(): %i < %i\n", r, msgcnt); + TEST_SAY( + "Not all messages were accepted " + "by produce_batch(): %i < %i\n", + r, msgcnt); if (msgcnt - r != failcnt) - TEST_SAY("Discrepency between failed messages (%i) " - "and return value %i (%i - %i)\n", - failcnt, msgcnt - r, msgcnt, r); + TEST_SAY( + "Discrepency between failed messages (%i) " + "and return value %i (%i - %i)\n", + failcnt, msgcnt - r, msgcnt, r); TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt); } free(rkmessages); - TEST_SAY("Partitioner: " - "Produced %i messages, waiting for deliveries\n", r); + TEST_SAY( + "Partitioner: " + "Produced %i messages, waiting for deliveries\n", + r); msgcounter = msgcnt; /* Wait for messages to be delivered */ @@ -274,8 +287,8 @@ static void test_partitioner (void) { TEST_FAIL("%i failures, see previous errors", fails); if (msgcounter != 0) - TEST_FAIL("Still waiting for %i/%i messages\n", - msgcounter, msgcnt); + TEST_FAIL("Still waiting for %i/%i messages\n", msgcounter, + msgcnt); /* Destroy topic */ rd_kafka_topic_destroy(rkt); @@ -287,20 +300,21 @@ static void test_partitioner (void) { return; } -static void -dr_per_message_partition_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, - void *opaque) { +static void dr_per_message_partition_cb(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { free(rkmessage->_private); if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_FAIL("Message delivery failed: %s\n", - rd_kafka_err2str(rkmessage->err)); + TEST_FAIL("Message delivery failed: %s\n", + rd_kafka_err2str(rkmessage->err)); if (msgcounter <= 0) - TEST_FAIL("Too many message dr_cb callback calls " - "(at msg offset #%"PRId64")\n", rkmessage->offset); + TEST_FAIL( + "Too many message dr_cb callback calls " + "(at msg offset #%" PRId64 ")\n", + rkmessage->offset); TEST_ASSERT(rkmessage->partition < topic_num_partitions); msgcounter--; @@ -309,7 +323,7 @@ dr_per_message_partition_cb (rd_kafka_t *rk, } /* Produce a batch of messages using with per message partition flag */ -static void test_per_message_partition_flag (void) { +static void test_per_message_partition_flag(void) { int partition = 0; int r; rd_kafka_t *rk; @@ -317,7 +331,7 @@ static void test_per_message_partition_flag (void) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char msg[128 + sizeof(__FILE__) + sizeof(__FUNCTION__)]; - int msgcnt = test_quick ? 100 : 1000; + int msgcnt = test_quick ? 100 : 1000; int failcnt = 0; int i; int *rkpartition_counts; @@ -337,75 +351,77 @@ static void test_per_message_partition_flag (void) { topic_name = test_mk_topic_name("0011_per_message_flag", 1); test_create_topic(rk, topic_name, topic_num_partitions, 1); - rkt = rd_kafka_topic_new(rk, topic_name, - topic_conf); + rkt = rd_kafka_topic_new(rk, topic_name, topic_conf); if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Create messages */ rkpartition_counts = calloc(sizeof(int), topic_num_partitions); dr_partition_count = calloc(sizeof(int), topic_num_partitions); - rkmessages = calloc(sizeof(*rkmessages), msgcnt); - for (i = 0 ; i < msgcnt ; i++) { + rkmessages = calloc(sizeof(*rkmessages), msgcnt); + for (i = 0; i < msgcnt; i++) { int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; + *msgidp = i; rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i", __FILE__, __FUNCTION__, i); - rkmessages[i].payload = rd_strdup(msg); - rkmessages[i].len = strlen(msg); - rkmessages[i]._private = msgidp; + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); + rkmessages[i]._private = msgidp; rkmessages[i].partition = jitter(0, topic_num_partitions - 1); rkpartition_counts[rkmessages[i].partition]++; } - r = rd_kafka_produce_batch(rkt, partition, - RD_KAFKA_MSG_F_PARTITION|RD_KAFKA_MSG_F_FREE, - rkmessages, msgcnt); + r = rd_kafka_produce_batch( + rkt, partition, RD_KAFKA_MSG_F_PARTITION | RD_KAFKA_MSG_F_FREE, + rkmessages, msgcnt); /* Scan through messages to check for errors. */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { if (rkmessages[i].err) { failcnt++; if (failcnt < 100) - TEST_SAY("Message #%i failed: %s\n", - i, + TEST_SAY("Message #%i failed: %s\n", i, rd_kafka_err2str(rkmessages[i].err)); } } /* All messages should've been produced. */ if (r < msgcnt) { - TEST_SAY("Not all messages were accepted " - "by produce_batch(): %i < %i\n", r, msgcnt); + TEST_SAY( + "Not all messages were accepted " + "by produce_batch(): %i < %i\n", + r, msgcnt); if (msgcnt - r != failcnt) - TEST_SAY("Discrepency between failed messages (%i) " - "and return value %i (%i - %i)\n", - failcnt, msgcnt - r, msgcnt, r); + TEST_SAY( + "Discrepency between failed messages (%i) " + "and return value %i (%i - %i)\n", + failcnt, msgcnt - r, msgcnt, r); TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt); } free(rkmessages); - TEST_SAY("Per-message partition: " - "Produced %i messages, waiting for deliveries\n", r); + TEST_SAY( + "Per-message partition: " + "Produced %i messages, waiting for deliveries\n", + r); msgcounter = msgcnt; /* Wait for messages to be delivered */ test_wait_delivery(rk, &msgcounter); if (msgcounter != 0) - TEST_FAIL("Still waiting for %i/%i messages\n", - msgcounter, msgcnt); + TEST_FAIL("Still waiting for %i/%i messages\n", msgcounter, + msgcnt); for (i = 0; i < topic_num_partitions; i++) { if (dr_partition_count[i] != rkpartition_counts[i]) { - TEST_FAIL("messages were not sent to designated " - "partitions expected messages %i in " - "partition %i, but only " - "%i messages were sent", - rkpartition_counts[i], - i, dr_partition_count[i]); + TEST_FAIL( + "messages were not sent to designated " + "partitions expected messages %i in " + "partition %i, but only " + "%i messages were sent", + rkpartition_counts[i], i, dr_partition_count[i]); } } @@ -423,17 +439,19 @@ static void test_per_message_partition_flag (void) { } static void -dr_partitioner_wo_per_message_flag_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, - void *opaque) { +dr_partitioner_wo_per_message_flag_cb(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { free(rkmessage->_private); if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR) TEST_FAIL("Message delivery failed: %s\n", rd_kafka_err2str(rkmessage->err)); if (msgcounter <= 0) - TEST_FAIL("Too many message dr_cb callback calls " - "(at msg offset #%"PRId64")\n", rkmessage->offset); + TEST_FAIL( + "Too many message dr_cb callback calls " + "(at msg offset #%" PRId64 ")\n", + rkmessage->offset); if (rkmessage->partition != msg_partition_wo_flag) msg_partition_wo_flag_success = 1; msgcounter--; @@ -443,7 +461,7 @@ dr_partitioner_wo_per_message_flag_cb (rd_kafka_t *rk, * @brief Produce a batch of messages using partitioner * without per message partition flag */ -static void test_message_partitioner_wo_per_message_flag (void) { +static void test_message_partitioner_wo_per_message_flag(void) { int partition = RD_KAFKA_PARTITION_UA; int r; rd_kafka_t *rk; @@ -451,7 +469,7 @@ static void test_message_partitioner_wo_per_message_flag (void) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char msg[128 + sizeof(__FILE__) + sizeof(__FUNCTION__)]; - int msgcnt = test_quick ? 100 : 1000; + int msgcnt = test_quick ? 100 : 1000; int failcnt = 0; int i; rd_kafka_message_t *rkmessages; @@ -469,23 +487,21 @@ static void test_message_partitioner_wo_per_message_flag (void) { TEST_SAY("test_partitioner: Created kafka instance %s\n", rd_kafka_name(rk)); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), - topic_conf); + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), topic_conf); if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Create messages */ rkmessages = calloc(sizeof(*rkmessages), msgcnt); - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; + *msgidp = i; rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i", __FILE__, __FUNCTION__, i); - rkmessages[i].payload = rd_strdup(msg); - rkmessages[i].len = strlen(msg); - rkmessages[i]._private = msgidp; + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); + rkmessages[i]._private = msgidp; rkmessages[i].partition = msg_partition_wo_flag; } @@ -493,30 +509,34 @@ static void test_message_partitioner_wo_per_message_flag (void) { rkmessages, msgcnt); /* Scan through messages to check for errors. */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { if (rkmessages[i].err) { failcnt++; if (failcnt < 100) - TEST_SAY("Message #%i failed: %s\n", - i, + TEST_SAY("Message #%i failed: %s\n", i, rd_kafka_err2str(rkmessages[i].err)); } } /* All messages should've been produced. */ if (r < msgcnt) { - TEST_SAY("Not all messages were accepted " - "by produce_batch(): %i < %i\n", r, msgcnt); + TEST_SAY( + "Not all messages were accepted " + "by produce_batch(): %i < %i\n", + r, msgcnt); if (msgcnt - r != failcnt) - TEST_SAY("Discrepency between failed messages (%i) " - "and return value %i (%i - %i)\n", - failcnt, msgcnt - r, msgcnt, r); + TEST_SAY( + "Discrepency between failed messages (%i) " + "and return value %i (%i - %i)\n", + failcnt, msgcnt - r, msgcnt, r); TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt); } free(rkmessages); - TEST_SAY("Partitioner: " - "Produced %i messages, waiting for deliveries\n", r); + TEST_SAY( + "Partitioner: " + "Produced %i messages, waiting for deliveries\n", + r); msgcounter = msgcnt; /* Wait for messages to be delivered */ @@ -526,11 +546,13 @@ static void test_message_partitioner_wo_per_message_flag (void) { TEST_FAIL("%i failures, see previous errors", fails); if (msgcounter != 0) - TEST_FAIL("Still waiting for %i/%i messages\n", - msgcounter, msgcnt); + TEST_FAIL("Still waiting for %i/%i messages\n", msgcounter, + msgcnt); if (msg_partition_wo_flag_success == 0) { - TEST_FAIL("partitioner was not used, all messages were sent to " - "message specified partition %i", i); + TEST_FAIL( + "partitioner was not used, all messages were sent to " + "message specified partition %i", + i); } /* Destroy topic */ @@ -544,7 +566,7 @@ static void test_message_partitioner_wo_per_message_flag (void) { } -int main_0011_produce_batch (int argc, char **argv) { +int main_0011_produce_batch(int argc, char **argv) { test_message_partitioner_wo_per_message_flag(); test_single_partition(); test_partitioner(); diff --git a/tests/0012-produce_consume.c b/tests/0012-produce_consume.c index 187071c01e..30ff392c42 100644 --- a/tests/0012-produce_consume.c +++ b/tests/0012-produce_consume.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -36,429 +36,452 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int prod_msg_remains = 0; -static int fails = 0; +static int fails = 0; /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - - if (err != RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_FAIL("Message delivery failed: %s\n", - rd_kafka_err2str(err)); - - if (prod_msg_remains == 0) - TEST_FAIL("Too many messages delivered (prod_msg_remains %i)", - prod_msg_remains); - - prod_msg_remains--; +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) + TEST_FAIL("Message delivery failed: %s\n", + rd_kafka_err2str(err)); + + if (prod_msg_remains == 0) + TEST_FAIL("Too many messages delivered (prod_msg_remains %i)", + prod_msg_remains); + + prod_msg_remains--; } /** * Produces 'msgcnt' messages split over 'partition_cnt' partitions. */ -static void produce_messages (uint64_t testid, const char *topic, - int partition_cnt, int msgcnt) { - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - char msg[128]; - int failcnt = 0; - int i; +static void produce_messages(uint64_t testid, + const char *topic, + int partition_cnt, + int msgcnt) { + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + char msg[128]; + int failcnt = 0; + int i; rd_kafka_message_t *rkmessages; - int32_t partition; - int msgid = 0; + int32_t partition; + int msgid = 0; - test_conf_init(&conf, &topic_conf, 20); + test_conf_init(&conf, &topic_conf, 20); - rd_kafka_conf_set_dr_cb(conf, dr_cb); + rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Make sure all replicas are in-sync after producing * so that consume test wont fail. */ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1", errstr, sizeof(errstr)); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Create messages. */ - prod_msg_remains = msgcnt; - rkmessages = calloc(sizeof(*rkmessages), msgcnt / partition_cnt); - for (partition = 0 ; partition < partition_cnt ; partition++) { - int batch_cnt = msgcnt / partition_cnt; - - for (i = 0 ; i < batch_cnt ; i++) { - rd_snprintf(msg, sizeof(msg), - "testid=%"PRIu64", partition=%i, msg=%i", - testid, (int)partition, msgid); - rkmessages[i].payload = rd_strdup(msg); - rkmessages[i].len = strlen(msg); - msgid++; - } - - TEST_SAY("Start produce to partition %i: msgs #%d..%d\n", - (int)partition, msgid-batch_cnt, msgid); - /* Produce batch for this partition */ - r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE, - rkmessages, batch_cnt); - if (r == -1) - TEST_FAIL("Failed to produce " - "batch for partition %i: %s", - (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - - /* Scan through messages to check for errors. */ - for (i = 0 ; i < batch_cnt ; i++) { - if (rkmessages[i].err) { - failcnt++; - if (failcnt < 100) - TEST_SAY("Message #%i failed: %s\n", - i, - rd_kafka_err2str(rkmessages[i]. - err)); - } - } - - /* All messages should've been produced. */ - if (r < batch_cnt) { - TEST_SAY("Not all messages were accepted " - "by produce_batch(): %i < %i\n", r, batch_cnt); - - if (batch_cnt - r != failcnt) - TEST_SAY("Discrepency between failed " - "messages (%i) " - "and return value %i (%i - %i)\n", - failcnt, batch_cnt - r, batch_cnt, r); - TEST_FAIL("%i/%i messages failed\n", - batch_cnt - r, batch_cnt); - } - - TEST_SAY("Produced %i messages to partition %i, " - "waiting for deliveries\n", r, partition); - } + prod_msg_remains = msgcnt; + rkmessages = calloc(sizeof(*rkmessages), msgcnt / partition_cnt); + for (partition = 0; partition < partition_cnt; partition++) { + int batch_cnt = msgcnt / partition_cnt; + + for (i = 0; i < batch_cnt; i++) { + rd_snprintf(msg, sizeof(msg), + "testid=%" PRIu64 ", partition=%i, msg=%i", + testid, (int)partition, msgid); + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); + msgid++; + } + + TEST_SAY("Start produce to partition %i: msgs #%d..%d\n", + (int)partition, msgid - batch_cnt, msgid); + /* Produce batch for this partition */ + r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE, + rkmessages, batch_cnt); + if (r == -1) + TEST_FAIL( + "Failed to produce " + "batch for partition %i: %s", + (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + + /* Scan through messages to check for errors. */ + for (i = 0; i < batch_cnt; i++) { + if (rkmessages[i].err) { + failcnt++; + if (failcnt < 100) + TEST_SAY("Message #%i failed: %s\n", i, + rd_kafka_err2str( + rkmessages[i].err)); + } + } + + /* All messages should've been produced. */ + if (r < batch_cnt) { + TEST_SAY( + "Not all messages were accepted " + "by produce_batch(): %i < %i\n", + r, batch_cnt); + + if (batch_cnt - r != failcnt) + TEST_SAY( + "Discrepency between failed " + "messages (%i) " + "and return value %i (%i - %i)\n", + failcnt, batch_cnt - r, batch_cnt, r); + TEST_FAIL("%i/%i messages failed\n", batch_cnt - r, + batch_cnt); + } + + TEST_SAY( + "Produced %i messages to partition %i, " + "waiting for deliveries\n", + r, partition); + } free(rkmessages); - /* Wait for messages to be delivered */ - while (rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 100); + /* Wait for messages to be delivered */ + while (rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 100); - if (fails) - TEST_FAIL("%i failures, see previous errors", fails); + if (fails) + TEST_FAIL("%i failures, see previous errors", fails); - if (prod_msg_remains != 0) - TEST_FAIL("Still waiting for %i messages to be produced", - prod_msg_remains); + if (prod_msg_remains != 0) + TEST_FAIL("Still waiting for %i messages to be produced", + prod_msg_remains); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } static int *cons_msgs; -static int cons_msgs_size; -static int cons_msgs_cnt; - -static void verify_consumed_msg_reset (int msgcnt) { - TEST_SAY("Resetting consumed_msgs (msgcnt %d)\n", msgcnt); - if (cons_msgs) { - free(cons_msgs); - cons_msgs = NULL; - } - - if (msgcnt) { - int i; - - cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt); - for (i = 0 ; i < msgcnt ; i++) - cons_msgs[i] = -1; - } - - cons_msgs_size = msgcnt; - cons_msgs_cnt = 0; +static int cons_msgs_size; +static int cons_msgs_cnt; + +static void verify_consumed_msg_reset(int msgcnt) { + TEST_SAY("Resetting consumed_msgs (msgcnt %d)\n", msgcnt); + if (cons_msgs) { + free(cons_msgs); + cons_msgs = NULL; + } + + if (msgcnt) { + int i; + + cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt); + for (i = 0; i < msgcnt; i++) + cons_msgs[i] = -1; + } + + cons_msgs_size = msgcnt; + cons_msgs_cnt = 0; } -static int int_cmp (const void *_a, const void *_b) { - int a = *(int *)_a; - int b = *(int *)_b; - return RD_CMP(a, b); +static int int_cmp(const void *_a, const void *_b) { + int a = *(int *)_a; + int b = *(int *)_b; + return RD_CMP(a, b); } -static void verify_consumed_msg_check0 (const char *func, int line) { - int i; - int fails = 0; - - if (cons_msgs_cnt < cons_msgs_size) { - TEST_SAY("Missing %i messages in consumer\n", - cons_msgs_size - cons_msgs_cnt); - fails++; - } - - qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp); - - for (i = 0 ; i < cons_msgs_size ; i++) { - if (cons_msgs[i] != i) { - TEST_SAY("Consumed message #%i is wrong, " - "expected #%i\n", - cons_msgs[i], i); - fails++; - } - } +static void verify_consumed_msg_check0(const char *func, int line) { + int i; + int fails = 0; + + if (cons_msgs_cnt < cons_msgs_size) { + TEST_SAY("Missing %i messages in consumer\n", + cons_msgs_size - cons_msgs_cnt); + fails++; + } + + qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp); + + for (i = 0; i < cons_msgs_size; i++) { + if (cons_msgs[i] != i) { + TEST_SAY( + "Consumed message #%i is wrong, " + "expected #%i\n", + cons_msgs[i], i); + fails++; + } + } - if (fails) - TEST_FAIL("See above error(s)"); + if (fails) + TEST_FAIL("See above error(s)"); - verify_consumed_msg_reset(0); + verify_consumed_msg_reset(0); } -#define verify_consumed_msg_check() \ - verify_consumed_msg_check0(__FUNCTION__,__LINE__) - - - -static void verify_consumed_msg0 (const char *func, int line, - uint64_t testid, int32_t partition, - int msgnum, - rd_kafka_message_t *rkmessage) { - uint64_t in_testid; - int in_part; - int in_msgnum; - char buf[1024]; - - if (rkmessage->len +1 >= sizeof(buf)) - TEST_FAIL("Incoming message too large (%i): " - "not sourced by this test", - (int)rkmessage->len); - - rd_snprintf(buf, sizeof(buf), "%.*s", - (int)rkmessage->len, (char *)rkmessage->payload); - - if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i", - &in_testid, &in_part, &in_msgnum) != 3) - TEST_FAIL("Incorrect message format: %s", buf); - - if (test_level > 2) { - TEST_SAY("%s:%i: Our testid %"PRIu64", part %i =? %i, " - "msg %i =? %i " - ", message's: \"%s\"\n", - func, line, - testid, (int)partition, (int)rkmessage->partition, - msgnum, in_msgnum, buf); - } - - if (testid != in_testid || - (partition != -1 && partition != in_part) || - (msgnum != -1 && msgnum != in_msgnum) || - (in_msgnum < 0 || in_msgnum > cons_msgs_size)) - goto fail_match; - - if (cons_msgs_cnt == cons_msgs_size) { - TEST_SAY("Too many messages in cons_msgs (%i) while reading " - "message \"%s\"\n", - cons_msgs_cnt, buf); - verify_consumed_msg_check(); - TEST_FAIL("See above error(s)"); - } - - cons_msgs[cons_msgs_cnt++] = in_msgnum; - - return; - - fail_match: - TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i, msg %i/%i did " - "not match message's: \"%s\"\n", - func, line, - testid, (int)partition, msgnum, cons_msgs_size, buf); +#define verify_consumed_msg_check() \ + verify_consumed_msg_check0(__FUNCTION__, __LINE__) + + + +static void verify_consumed_msg0(const char *func, + int line, + uint64_t testid, + int32_t partition, + int msgnum, + rd_kafka_message_t *rkmessage) { + uint64_t in_testid; + int in_part; + int in_msgnum; + char buf[1024]; + + if (rkmessage->len + 1 >= sizeof(buf)) + TEST_FAIL( + "Incoming message too large (%i): " + "not sourced by this test", + (int)rkmessage->len); + + rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->len, + (char *)rkmessage->payload); + + if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i", &in_testid, + &in_part, &in_msgnum) != 3) + TEST_FAIL("Incorrect message format: %s", buf); + + if (test_level > 2) { + TEST_SAY("%s:%i: Our testid %" PRIu64 + ", part %i =? %i, " + "msg %i =? %i " + ", message's: \"%s\"\n", + func, line, testid, (int)partition, + (int)rkmessage->partition, msgnum, in_msgnum, buf); + } + + if (testid != in_testid || (partition != -1 && partition != in_part) || + (msgnum != -1 && msgnum != in_msgnum) || + (in_msgnum < 0 || in_msgnum > cons_msgs_size)) + goto fail_match; + + if (cons_msgs_cnt == cons_msgs_size) { + TEST_SAY( + "Too many messages in cons_msgs (%i) while reading " + "message \"%s\"\n", + cons_msgs_cnt, buf); + verify_consumed_msg_check(); + TEST_FAIL("See above error(s)"); + } + + cons_msgs[cons_msgs_cnt++] = in_msgnum; + + return; + +fail_match: + TEST_FAIL("%s:%i: Our testid %" PRIu64 + ", part %i, msg %i/%i did " + "not match message's: \"%s\"\n", + func, line, testid, (int)partition, msgnum, cons_msgs_size, + buf); } -#define verify_consumed_msg(testid,part,msgnum,rkmessage) \ - verify_consumed_msg0(__FUNCTION__,__LINE__,testid,part,msgnum,rkmessage) - - -static void consume_messages (uint64_t testid, const char *topic, - int32_t partition, int msg_base, int batch_cnt, - int msgcnt) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - int i; - - test_conf_init(&conf, &topic_conf, 20); - - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); - - TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); - - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); - - TEST_SAY("Consuming %i messages from partition %i\n", - batch_cnt, partition); - - /* Consume messages */ - if (rd_kafka_consume_start(rkt, partition, - RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1) - TEST_FAIL("consume_start(%i, -%i) failed: %s", - (int)partition, batch_cnt, - rd_kafka_err2str(rd_kafka_last_error())); - - for (i = 0 ; i < batch_cnt ; ) { - rd_kafka_message_t *rkmessage; - - rkmessage = rd_kafka_consume(rkt, partition, - tmout_multip(5000)); - if (!rkmessage) - TEST_FAIL("Failed to consume message %i/%i from " - "partition %i: %s", - i, batch_cnt, (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - if (rkmessage->err) { - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF){ +#define verify_consumed_msg(testid, part, msgnum, rkmessage) \ + verify_consumed_msg0(__FUNCTION__, __LINE__, testid, part, msgnum, \ + rkmessage) + + +static void consume_messages(uint64_t testid, + const char *topic, + int32_t partition, + int msg_base, + int batch_cnt, + int msgcnt) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + int i; + + test_conf_init(&conf, &topic_conf, 20); + + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); + + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); + + TEST_SAY("Consuming %i messages from partition %i\n", batch_cnt, + partition); + + /* Consume messages */ + if (rd_kafka_consume_start(rkt, partition, + RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1) + TEST_FAIL("consume_start(%i, -%i) failed: %s", (int)partition, + batch_cnt, rd_kafka_err2str(rd_kafka_last_error())); + + for (i = 0; i < batch_cnt;) { + rd_kafka_message_t *rkmessage; + + rkmessage = + rd_kafka_consume(rkt, partition, tmout_multip(5000)); + if (!rkmessage) + TEST_FAIL( + "Failed to consume message %i/%i from " + "partition %i: %s", + i, batch_cnt, (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + if (rkmessage->err) { + if (rkmessage->err == + RD_KAFKA_RESP_ERR__PARTITION_EOF) { rd_kafka_message_destroy(rkmessage); continue; } - TEST_FAIL("Consume message %i/%i from partition %i " - "has error: %s: %s", - i, batch_cnt, (int)partition, - rd_kafka_err2str(rkmessage->err), - rd_kafka_message_errstr(rkmessage)); + TEST_FAIL( + "Consume message %i/%i from partition %i " + "has error: %s: %s", + i, batch_cnt, (int)partition, + rd_kafka_err2str(rkmessage->err), + rd_kafka_message_errstr(rkmessage)); } - verify_consumed_msg(testid, partition, msg_base+i, rkmessage); + verify_consumed_msg(testid, partition, msg_base + i, rkmessage); - rd_kafka_message_destroy(rkmessage); + rd_kafka_message_destroy(rkmessage); i++; - } + } - rd_kafka_consume_stop(rkt, partition); + rd_kafka_consume_stop(rkt, partition); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } -static void consume_messages_with_queues (uint64_t testid, const char *topic, - int partition_cnt, int msgcnt) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - rd_kafka_queue_t *rkqu; - int i; - int32_t partition; - int batch_cnt = msgcnt / partition_cnt; +static void consume_messages_with_queues(uint64_t testid, + const char *topic, + int partition_cnt, + int msgcnt) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + rd_kafka_queue_t *rkqu; + int i; + int32_t partition; + int batch_cnt = msgcnt / partition_cnt; - test_conf_init(&conf, &topic_conf, 20); + test_conf_init(&conf, &topic_conf, 20); test_conf_set(conf, "enable.partition.eof", "true"); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); - - /* Create queue */ - rkqu = rd_kafka_queue_new(rk); - - - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); - - TEST_SAY("Consuming %i messages from one queue serving %i partitions\n", - msgcnt, partition_cnt); - - /* Start consuming each partition */ - for (partition = 0 ; partition < partition_cnt ; partition++) { - /* Consume messages */ - TEST_SAY("Start consuming partition %i at offset -%i\n", - partition, batch_cnt); - if (rd_kafka_consume_start_queue(rkt, partition, - RD_KAFKA_OFFSET_TAIL(batch_cnt), - rkqu) == -1) - TEST_FAIL("consume_start_queue(%i) failed: %s", - (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - } - - - /* Consume messages from queue */ - for (i = 0 ; i < msgcnt ; ) { - rd_kafka_message_t *rkmessage; - - rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000)); - if (!rkmessage) - TEST_FAIL("Failed to consume message %i/%i from " - "queue: %s", - i, msgcnt, - rd_kafka_err2str(rd_kafka_last_error())); - if (rkmessage->err) { - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF){ - TEST_SAY("Topic %s [%"PRId32"] reached " - "EOF at offset %"PRId64"\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + /* Create queue */ + rkqu = rd_kafka_queue_new(rk); + + + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); + + TEST_SAY("Consuming %i messages from one queue serving %i partitions\n", + msgcnt, partition_cnt); + + /* Start consuming each partition */ + for (partition = 0; partition < partition_cnt; partition++) { + /* Consume messages */ + TEST_SAY("Start consuming partition %i at offset -%i\n", + partition, batch_cnt); + if (rd_kafka_consume_start_queue( + rkt, partition, RD_KAFKA_OFFSET_TAIL(batch_cnt), + rkqu) == -1) + TEST_FAIL("consume_start_queue(%i) failed: %s", + (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + } + + + /* Consume messages from queue */ + for (i = 0; i < msgcnt;) { + rd_kafka_message_t *rkmessage; + + rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000)); + if (!rkmessage) + TEST_FAIL( + "Failed to consume message %i/%i from " + "queue: %s", + i, msgcnt, rd_kafka_err2str(rd_kafka_last_error())); + if (rkmessage->err) { + if (rkmessage->err == + RD_KAFKA_RESP_ERR__PARTITION_EOF) { + TEST_SAY("Topic %s [%" PRId32 + "] reached " + "EOF at offset %" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, + rkmessage->offset); rd_kafka_message_destroy(rkmessage); - continue; + continue; } - TEST_FAIL("Consume message %i/%i from queue " - "has error (offset %"PRId64 - ", partition %"PRId32"): %s", - i, msgcnt, - rkmessage->offset, rkmessage->partition, - rd_kafka_err2str(rkmessage->err)); + TEST_FAIL( + "Consume message %i/%i from queue " + "has error (offset %" PRId64 ", partition %" PRId32 + "): %s", + i, msgcnt, rkmessage->offset, rkmessage->partition, + rd_kafka_err2str(rkmessage->err)); } - verify_consumed_msg(testid, -1, -1, rkmessage); + verify_consumed_msg(testid, -1, -1, rkmessage); - rd_kafka_message_destroy(rkmessage); + rd_kafka_message_destroy(rkmessage); i++; - } + } - /* Stop consuming each partition */ - for (partition = 0 ; partition < partition_cnt ; partition++) - rd_kafka_consume_stop(rkt, partition); + /* Stop consuming each partition */ + for (partition = 0; partition < partition_cnt; partition++) + rd_kafka_consume_stop(rkt, partition); - /* Destroy queue */ - rd_kafka_queue_destroy(rkqu); + /* Destroy queue */ + rd_kafka_queue_destroy(rkqu); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } @@ -467,49 +490,48 @@ static void consume_messages_with_queues (uint64_t testid, const char *topic, * Consume with standard interface from both, one after the other. * Consume with queue interface from both, simultanously. */ -static void test_produce_consume (void) { - int msgcnt = test_quick ? 100 : 1000; - int partition_cnt = 2; - int i; - uint64_t testid; - int msg_base = 0; +static void test_produce_consume(void) { + int msgcnt = test_quick ? 100 : 1000; + int partition_cnt = 2; + int i; + uint64_t testid; + int msg_base = 0; const char *topic; - /* Generate a testid so we can differentiate messages - * from other tests */ - testid = test_id_generate(); + /* Generate a testid so we can differentiate messages + * from other tests */ + testid = test_id_generate(); /* Read test.conf to configure topic name */ test_conf_init(NULL, NULL, 20); topic = test_mk_topic_name("0012", 1); - TEST_SAY("Topic %s, testid %"PRIu64"\n", topic, testid); + TEST_SAY("Topic %s, testid %" PRIu64 "\n", topic, testid); - /* Produce messages */ - produce_messages(testid, topic, partition_cnt, msgcnt); + /* Produce messages */ + produce_messages(testid, topic, partition_cnt, msgcnt); - /* Consume messages with standard interface */ - verify_consumed_msg_reset(msgcnt); - for (i = 0 ; i < partition_cnt ; i++) { - consume_messages(testid, topic, i, - msg_base, msgcnt / partition_cnt, msgcnt); - msg_base += msgcnt / partition_cnt; - } - verify_consumed_msg_check(); + /* Consume messages with standard interface */ + verify_consumed_msg_reset(msgcnt); + for (i = 0; i < partition_cnt; i++) { + consume_messages(testid, topic, i, msg_base, + msgcnt / partition_cnt, msgcnt); + msg_base += msgcnt / partition_cnt; + } + verify_consumed_msg_check(); - /* Consume messages with queue interface */ - verify_consumed_msg_reset(msgcnt); - consume_messages_with_queues(testid, topic, partition_cnt, msgcnt); - verify_consumed_msg_check(); + /* Consume messages with queue interface */ + verify_consumed_msg_reset(msgcnt); + consume_messages_with_queues(testid, topic, partition_cnt, msgcnt); + verify_consumed_msg_check(); - return; + return; } - -int main_0012_produce_consume (int argc, char **argv) { - test_produce_consume(); - return 0; +int main_0012_produce_consume(int argc, char **argv) { + test_produce_consume(); + return 0; } diff --git a/tests/0013-null-msgs.c b/tests/0013-null-msgs.c index 12a3b61af4..26a7ac070d 100644 --- a/tests/0013-null-msgs.c +++ b/tests/0013-null-msgs.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -34,420 +34,440 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int prod_msg_remains = 0; -static int fails = 0; +static int fails = 0; /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - - if (err != RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_FAIL("Message delivery failed: %s\n", - rd_kafka_err2str(err)); - - if (prod_msg_remains == 0) - TEST_FAIL("Too many messages delivered (prod_msg_remains %i)", - prod_msg_remains); - - prod_msg_remains--; +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) + TEST_FAIL("Message delivery failed: %s\n", + rd_kafka_err2str(err)); + + if (prod_msg_remains == 0) + TEST_FAIL("Too many messages delivered (prod_msg_remains %i)", + prod_msg_remains); + + prod_msg_remains--; } /** * Produces 'msgcnt' messages split over 'partition_cnt' partitions. */ -static void produce_null_messages (uint64_t testid, const char *topic, - int partition_cnt, int msgcnt) { - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - int i; - int32_t partition; - int msgid = 0; - - test_conf_init(&conf, &topic_conf, 20); - - rd_kafka_conf_set_dr_cb(conf, dr_cb); +static void produce_null_messages(uint64_t testid, + const char *topic, + int partition_cnt, + int msgcnt) { + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + int i; + int32_t partition; + int msgid = 0; + + test_conf_init(&conf, &topic_conf, 20); + + rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Make sure all replicas are in-sync after producing * so that consume test wont fail. */ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1", errstr, sizeof(errstr)); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_kafka_err2str(rd_kafka_last_error())); + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", + rd_kafka_err2str(rd_kafka_last_error())); /* Produce messages */ - prod_msg_remains = msgcnt; - for (partition = 0 ; partition < partition_cnt ; partition++) { - int batch_cnt = msgcnt / partition_cnt; + prod_msg_remains = msgcnt; + for (partition = 0; partition < partition_cnt; partition++) { + int batch_cnt = msgcnt / partition_cnt; - for (i = 0 ; i < batch_cnt ; i++) { + for (i = 0; i < batch_cnt; i++) { char key[128]; - rd_snprintf(key, sizeof(key), - "testid=%"PRIu64", partition=%i, msg=%i", - testid, (int)partition, msgid); - r = rd_kafka_produce(rkt, partition, 0, - NULL, 0, - key, strlen(key), - NULL); + rd_snprintf(key, sizeof(key), + "testid=%" PRIu64 ", partition=%i, msg=%i", + testid, (int)partition, msgid); + r = rd_kafka_produce(rkt, partition, 0, NULL, 0, key, + strlen(key), NULL); if (r == -1) - TEST_FAIL("Failed to produce message %i " - "to partition %i: %s", - msgid, (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - msgid++; - } + TEST_FAIL( + "Failed to produce message %i " + "to partition %i: %s", + msgid, (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + msgid++; + } } - TEST_SAY("Produced %d messages to %d partition(s), " - "waiting for deliveries\n", msgcnt, partition_cnt); - /* Wait for messages to be delivered */ - while (rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 100); + TEST_SAY( + "Produced %d messages to %d partition(s), " + "waiting for deliveries\n", + msgcnt, partition_cnt); + /* Wait for messages to be delivered */ + while (rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 100); - if (fails) - TEST_FAIL("%i failures, see previous errors", fails); + if (fails) + TEST_FAIL("%i failures, see previous errors", fails); - if (prod_msg_remains != 0) - TEST_FAIL("Still waiting for %i messages to be produced", - prod_msg_remains); + if (prod_msg_remains != 0) + TEST_FAIL("Still waiting for %i messages to be produced", + prod_msg_remains); else TEST_SAY("All messages delivered\n"); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } static int *cons_msgs; -static int cons_msgs_size; -static int cons_msgs_cnt; +static int cons_msgs_size; +static int cons_msgs_cnt; -static void verify_consumed_msg_reset (int msgcnt) { - if (cons_msgs) { - free(cons_msgs); - cons_msgs = NULL; - } +static void verify_consumed_msg_reset(int msgcnt) { + if (cons_msgs) { + free(cons_msgs); + cons_msgs = NULL; + } - if (msgcnt) { - int i; + if (msgcnt) { + int i; - cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt); - for (i = 0 ; i < msgcnt ; i++) - cons_msgs[i] = -1; - } + cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt); + for (i = 0; i < msgcnt; i++) + cons_msgs[i] = -1; + } - cons_msgs_size = msgcnt; - cons_msgs_cnt = 0; + cons_msgs_size = msgcnt; + cons_msgs_cnt = 0; } -static int int_cmp (const void *_a, const void *_b) { - int a = *(int *)_a; - int b = *(int *)_b; - return RD_CMP(a, b); +static int int_cmp(const void *_a, const void *_b) { + int a = *(int *)_a; + int b = *(int *)_b; + return RD_CMP(a, b); } -static void verify_consumed_msg_check0 (const char *func, int line) { - int i; - int fails = 0; +static void verify_consumed_msg_check0(const char *func, int line) { + int i; + int fails = 0; - if (cons_msgs_cnt < cons_msgs_size) { - TEST_SAY("Missing %i messages in consumer\n", - cons_msgs_size - cons_msgs_cnt); - fails++; - } + if (cons_msgs_cnt < cons_msgs_size) { + TEST_SAY("Missing %i messages in consumer\n", + cons_msgs_size - cons_msgs_cnt); + fails++; + } - qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp); + qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp); - for (i = 0 ; i < cons_msgs_size ; i++) { - if (cons_msgs[i] != i) { - TEST_SAY("Consumed message #%i is wrong, " - "expected #%i\n", - cons_msgs[i], i); - fails++; - } - } + for (i = 0; i < cons_msgs_size; i++) { + if (cons_msgs[i] != i) { + TEST_SAY( + "Consumed message #%i is wrong, " + "expected #%i\n", + cons_msgs[i], i); + fails++; + } + } - if (fails) - TEST_FAIL("See above error(s)"); + if (fails) + TEST_FAIL("See above error(s)"); - verify_consumed_msg_reset(0); + verify_consumed_msg_reset(0); } -#define verify_consumed_msg_check() \ - verify_consumed_msg_check0(__FUNCTION__,__LINE__) +#define verify_consumed_msg_check() \ + verify_consumed_msg_check0(__FUNCTION__, __LINE__) -static void verify_consumed_msg0 (const char *func, int line, - uint64_t testid, int32_t partition, - int msgnum, - rd_kafka_message_t *rkmessage) { - uint64_t in_testid; - int in_part; - int in_msgnum; - char buf[128]; +static void verify_consumed_msg0(const char *func, + int line, + uint64_t testid, + int32_t partition, + int msgnum, + rd_kafka_message_t *rkmessage) { + uint64_t in_testid; + int in_part; + int in_msgnum; + char buf[128]; if (rkmessage->len != 0) TEST_FAIL("Incoming message not NULL: %i bytes", (int)rkmessage->len); - if (rkmessage->key_len +1 >= sizeof(buf)) - TEST_FAIL("Incoming message key too large (%i): " - "not sourced by this test", - (int)rkmessage->key_len); - - rd_snprintf(buf, sizeof(buf), "%.*s", - (int)rkmessage->key_len, (char *)rkmessage->key); - - if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i", - &in_testid, &in_part, &in_msgnum) != 3) - TEST_FAIL("Incorrect key format: %s", buf); - - if (testid != in_testid || - (partition != -1 && partition != in_part) || - (msgnum != -1 && msgnum != in_msgnum) || - (in_msgnum < 0 || in_msgnum > cons_msgs_size)) - goto fail_match; - - if (test_level > 2) { - TEST_SAY("%s:%i: Our testid %"PRIu64", part %i (%i), " - "msg %i/%i did " - ", key's: \"%s\"\n", - func, line, - testid, (int)partition, (int)rkmessage->partition, - msgnum, cons_msgs_size, buf); - } - - if (cons_msgs_cnt == cons_msgs_size) { - TEST_SAY("Too many messages in cons_msgs (%i) while reading " - "message key \"%s\"\n", - cons_msgs_cnt, buf); - verify_consumed_msg_check(); - TEST_FAIL("See above error(s)"); - } - - cons_msgs[cons_msgs_cnt++] = in_msgnum; - - return; - - fail_match: - TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i, msg %i/%i did " - "not match message's key: \"%s\"\n", - func, line, - testid, (int)partition, msgnum, cons_msgs_size, buf); -} - -#define verify_consumed_msg(testid,part,msgnum,rkmessage) \ - verify_consumed_msg0(__FUNCTION__,__LINE__,testid,part,msgnum,rkmessage) + if (rkmessage->key_len + 1 >= sizeof(buf)) + TEST_FAIL( + "Incoming message key too large (%i): " + "not sourced by this test", + (int)rkmessage->key_len); + + rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->key_len, + (char *)rkmessage->key); + + if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i", &in_testid, + &in_part, &in_msgnum) != 3) + TEST_FAIL("Incorrect key format: %s", buf); + + if (testid != in_testid || (partition != -1 && partition != in_part) || + (msgnum != -1 && msgnum != in_msgnum) || + (in_msgnum < 0 || in_msgnum > cons_msgs_size)) + goto fail_match; + + if (test_level > 2) { + TEST_SAY("%s:%i: Our testid %" PRIu64 + ", part %i (%i), " + "msg %i/%i did " + ", key's: \"%s\"\n", + func, line, testid, (int)partition, + (int)rkmessage->partition, msgnum, cons_msgs_size, + buf); + } + if (cons_msgs_cnt == cons_msgs_size) { + TEST_SAY( + "Too many messages in cons_msgs (%i) while reading " + "message key \"%s\"\n", + cons_msgs_cnt, buf); + verify_consumed_msg_check(); + TEST_FAIL("See above error(s)"); + } -static void consume_messages (uint64_t testid, const char *topic, - int32_t partition, int msg_base, int batch_cnt, - int msgcnt) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - int i; + cons_msgs[cons_msgs_cnt++] = in_msgnum; - test_conf_init(&conf, &topic_conf, 20); + return; - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); +fail_match: + TEST_FAIL("%s:%i: Our testid %" PRIu64 + ", part %i, msg %i/%i did " + "not match message's key: \"%s\"\n", + func, line, testid, (int)partition, msgnum, cons_msgs_size, + buf); +} - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_kafka_err2str(rd_kafka_last_error())); +#define verify_consumed_msg(testid, part, msgnum, rkmessage) \ + verify_consumed_msg0(__FUNCTION__, __LINE__, testid, part, msgnum, \ + rkmessage) - TEST_SAY("Consuming %i messages from partition %i\n", - batch_cnt, partition); - /* Consume messages */ - if (rd_kafka_consume_start(rkt, partition, - RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1) - TEST_FAIL("consume_start(%i, -%i) failed: %s", - (int)partition, batch_cnt, - rd_kafka_err2str(rd_kafka_last_error())); +static void consume_messages(uint64_t testid, + const char *topic, + int32_t partition, + int msg_base, + int batch_cnt, + int msgcnt) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + int i; - for (i = 0 ; i < batch_cnt ; i++) { - rd_kafka_message_t *rkmessage; + test_conf_init(&conf, &topic_conf, 20); - rkmessage = rd_kafka_consume(rkt, partition, tmout_multip(5000)); - if (!rkmessage) - TEST_FAIL("Failed to consume message %i/%i from " - "partition %i: %s", - i, batch_cnt, (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - if (rkmessage->err) - TEST_FAIL("Consume message %i/%i from partition %i " - "has error: %s", - i, batch_cnt, (int)partition, - rd_kafka_err2str(rkmessage->err)); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); - verify_consumed_msg(testid, partition, msg_base+i, rkmessage); + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", + rd_kafka_err2str(rd_kafka_last_error())); - rd_kafka_message_destroy(rkmessage); - } + TEST_SAY("Consuming %i messages from partition %i\n", batch_cnt, + partition); + + /* Consume messages */ + if (rd_kafka_consume_start(rkt, partition, + RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1) + TEST_FAIL("consume_start(%i, -%i) failed: %s", (int)partition, + batch_cnt, rd_kafka_err2str(rd_kafka_last_error())); + + for (i = 0; i < batch_cnt; i++) { + rd_kafka_message_t *rkmessage; + + rkmessage = + rd_kafka_consume(rkt, partition, tmout_multip(5000)); + if (!rkmessage) + TEST_FAIL( + "Failed to consume message %i/%i from " + "partition %i: %s", + i, batch_cnt, (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + if (rkmessage->err) + TEST_FAIL( + "Consume message %i/%i from partition %i " + "has error: %s", + i, batch_cnt, (int)partition, + rd_kafka_err2str(rkmessage->err)); + + verify_consumed_msg(testid, partition, msg_base + i, rkmessage); + + rd_kafka_message_destroy(rkmessage); + } - rd_kafka_consume_stop(rkt, partition); + rd_kafka_consume_stop(rkt, partition); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } -static void consume_messages_with_queues (uint64_t testid, const char *topic, - int partition_cnt, int msgcnt) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - rd_kafka_queue_t *rkqu; - int i; - int32_t partition; - int batch_cnt = msgcnt / partition_cnt; +static void consume_messages_with_queues(uint64_t testid, + const char *topic, + int partition_cnt, + int msgcnt) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + rd_kafka_queue_t *rkqu; + int i; + int32_t partition; + int batch_cnt = msgcnt / partition_cnt; - test_conf_init(&conf, &topic_conf, 20); + test_conf_init(&conf, &topic_conf, 20); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); - /* Create queue */ - rkqu = rd_kafka_queue_new(rk); + /* Create queue */ + rkqu = rd_kafka_queue_new(rk); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); + rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_last_error())); - TEST_SAY("Consuming %i messages from one queue serving %i partitions\n", - msgcnt, partition_cnt); - - /* Start consuming each partition */ - for (partition = 0 ; partition < partition_cnt ; partition++) { - /* Consume messages */ - TEST_SAY("Start consuming partition %i at tail offset -%i\n", - partition, batch_cnt); - if (rd_kafka_consume_start_queue(rkt, partition, - RD_KAFKA_OFFSET_TAIL(batch_cnt), - rkqu) == -1) - TEST_FAIL("consume_start_queue(%i) failed: %s", - (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - } - - - /* Consume messages from queue */ - for (i = 0 ; i < msgcnt ; i++) { - rd_kafka_message_t *rkmessage; - - rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000)); - if (!rkmessage) - TEST_FAIL("Failed to consume message %i/%i from " - "queue: %s", - i, msgcnt, - rd_kafka_err2str(rd_kafka_last_error())); - if (rkmessage->err) - TEST_FAIL("Consume message %i/%i from queue " - "has error (partition %"PRId32"): %s", - i, msgcnt, - rkmessage->partition, - rd_kafka_err2str(rkmessage->err)); - - verify_consumed_msg(testid, -1, -1, rkmessage); - - rd_kafka_message_destroy(rkmessage); - } - - /* Stop consuming each partition */ - for (partition = 0 ; partition < partition_cnt ; partition++) - rd_kafka_consume_stop(rkt, partition); - - /* Destroy queue */ - rd_kafka_queue_destroy(rkqu); - - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); - - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + TEST_SAY("Consuming %i messages from one queue serving %i partitions\n", + msgcnt, partition_cnt); + + /* Start consuming each partition */ + for (partition = 0; partition < partition_cnt; partition++) { + /* Consume messages */ + TEST_SAY("Start consuming partition %i at tail offset -%i\n", + partition, batch_cnt); + if (rd_kafka_consume_start_queue( + rkt, partition, RD_KAFKA_OFFSET_TAIL(batch_cnt), + rkqu) == -1) + TEST_FAIL("consume_start_queue(%i) failed: %s", + (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + } + + + /* Consume messages from queue */ + for (i = 0; i < msgcnt; i++) { + rd_kafka_message_t *rkmessage; + + rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000)); + if (!rkmessage) + TEST_FAIL( + "Failed to consume message %i/%i from " + "queue: %s", + i, msgcnt, rd_kafka_err2str(rd_kafka_last_error())); + if (rkmessage->err) + TEST_FAIL( + "Consume message %i/%i from queue " + "has error (partition %" PRId32 "): %s", + i, msgcnt, rkmessage->partition, + rd_kafka_err2str(rkmessage->err)); + + verify_consumed_msg(testid, -1, -1, rkmessage); + + rd_kafka_message_destroy(rkmessage); + } + + /* Stop consuming each partition */ + for (partition = 0; partition < partition_cnt; partition++) + rd_kafka_consume_stop(rkt, partition); + + /* Destroy queue */ + rd_kafka_queue_destroy(rkqu); + + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); + + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } -static void test_produce_consume (void) { - int msgcnt = test_quick ? 100 : 1000; +static void test_produce_consume(void) { + int msgcnt = test_quick ? 100 : 1000; int partition_cnt = 1; - int i; - uint64_t testid; - int msg_base = 0; + int i; + uint64_t testid; + int msg_base = 0; const char *topic; - /* Generate a testid so we can differentiate messages - * from other tests */ - testid = test_id_generate(); + /* Generate a testid so we can differentiate messages + * from other tests */ + testid = test_id_generate(); /* Read test.conf to configure topic name */ test_conf_init(NULL, NULL, 20); topic = test_mk_topic_name("0013", 0); - TEST_SAY("Topic %s, testid %"PRIu64"\n", topic, testid); + TEST_SAY("Topic %s, testid %" PRIu64 "\n", topic, testid); - /* Produce messages */ - produce_null_messages(testid, topic, partition_cnt, msgcnt); + /* Produce messages */ + produce_null_messages(testid, topic, partition_cnt, msgcnt); - /* Consume messages with standard interface */ - verify_consumed_msg_reset(msgcnt); - for (i = 0 ; i < partition_cnt ; i++) { - consume_messages(testid, topic, i, - msg_base, msgcnt / partition_cnt, msgcnt); - msg_base += msgcnt / partition_cnt; - } - verify_consumed_msg_check(); + /* Consume messages with standard interface */ + verify_consumed_msg_reset(msgcnt); + for (i = 0; i < partition_cnt; i++) { + consume_messages(testid, topic, i, msg_base, + msgcnt / partition_cnt, msgcnt); + msg_base += msgcnt / partition_cnt; + } + verify_consumed_msg_check(); - /* Consume messages with queue interface */ - verify_consumed_msg_reset(msgcnt); - consume_messages_with_queues(testid, topic, partition_cnt, msgcnt); - verify_consumed_msg_check(); + /* Consume messages with queue interface */ + verify_consumed_msg_reset(msgcnt); + consume_messages_with_queues(testid, topic, partition_cnt, msgcnt); + verify_consumed_msg_check(); - return; + return; } - -int main_0013_null_msgs (int argc, char **argv) { - test_produce_consume(); - return 0; +int main_0013_null_msgs(int argc, char **argv) { + test_produce_consume(); + return 0; } diff --git a/tests/0014-reconsume-191.c b/tests/0014-reconsume-191.c index a6635fd31d..edae85f5cd 100644 --- a/tests/0014-reconsume-191.c +++ b/tests/0014-reconsume-191.c @@ -30,138 +30,145 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int prod_msg_remains = 0; -static int fails = 0; +static int fails = 0; /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - - if (err != RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_FAIL("Message delivery failed: %s\n", - rd_kafka_err2str(err)); - - if (prod_msg_remains == 0) - TEST_FAIL("Too many messages delivered (prod_msg_remains %i)", - prod_msg_remains); - - prod_msg_remains--; +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) + TEST_FAIL("Message delivery failed: %s\n", + rd_kafka_err2str(err)); + + if (prod_msg_remains == 0) + TEST_FAIL("Too many messages delivered (prod_msg_remains %i)", + prod_msg_remains); + + prod_msg_remains--; } /** * Produces 'msgcnt' messages split over 'partition_cnt' partitions. */ -static void produce_messages (uint64_t testid, const char *topic, - int partition_cnt, int msg_base, int msgcnt) { - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - int i; - int32_t partition; - int msgid = msg_base; - - test_conf_init(&conf, &topic_conf, 20); - - rd_kafka_conf_set_dr_cb(conf, dr_cb); +static void produce_messages(uint64_t testid, + const char *topic, + int partition_cnt, + int msg_base, + int msgcnt) { + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + int i; + int32_t partition; + int msgid = msg_base; + + test_conf_init(&conf, &topic_conf, 20); + + rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Make sure all replicas are in-sync after producing * so that consume test wont fail. */ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1", errstr, sizeof(errstr)); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_last_error())); /* Produce messages */ - prod_msg_remains = msgcnt; - for (partition = 0 ; partition < partition_cnt ; partition++) { - int batch_cnt = msgcnt / partition_cnt; + prod_msg_remains = msgcnt; + for (partition = 0; partition < partition_cnt; partition++) { + int batch_cnt = msgcnt / partition_cnt; - for (i = 0 ; i < batch_cnt ; i++) { + for (i = 0; i < batch_cnt; i++) { char key[128]; char buf[128]; - rd_snprintf(key, sizeof(key), - "testid=%"PRIu64", partition=%i, msg=%i", - testid, (int)partition, msgid); + rd_snprintf(key, sizeof(key), + "testid=%" PRIu64 ", partition=%i, msg=%i", + testid, (int)partition, msgid); rd_snprintf(buf, sizeof(buf), - "data: testid=%"PRIu64", partition=%i, msg=%i", - testid, (int)partition, msgid); - - r = rd_kafka_produce(rkt, partition, - RD_KAFKA_MSG_F_COPY, - buf, strlen(buf), - key, strlen(key), - NULL); + "data: testid=%" PRIu64 + ", partition=%i, msg=%i", + testid, (int)partition, msgid); + + r = rd_kafka_produce( + rkt, partition, RD_KAFKA_MSG_F_COPY, buf, + strlen(buf), key, strlen(key), NULL); if (r == -1) - TEST_FAIL("Failed to produce message %i " - "to partition %i: %s", - msgid, (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - msgid++; - } + TEST_FAIL( + "Failed to produce message %i " + "to partition %i: %s", + msgid, (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + msgid++; + } } - /* Wait for messages to be delivered */ - while (rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 100); + /* Wait for messages to be delivered */ + while (rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 100); - if (fails) - TEST_FAIL("%i failures, see previous errors", fails); + if (fails) + TEST_FAIL("%i failures, see previous errors", fails); - if (prod_msg_remains != 0) - TEST_FAIL("Still waiting for %i messages to be produced", - prod_msg_remains); + if (prod_msg_remains != 0) + TEST_FAIL("Still waiting for %i messages to be produced", + prod_msg_remains); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } static int *cons_msgs; -static int cons_msgs_size; -static int cons_msgs_cnt; -static int cons_msg_next; -static int cons_msg_stop = -1; -static int64_t cons_last_offset = -1; /* last offset received */ - -static void verify_consumed_msg_reset (int msgcnt) { - if (cons_msgs) { - free(cons_msgs); - cons_msgs = NULL; - } - - if (msgcnt) { - int i; - - cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt); - for (i = 0 ; i < msgcnt ; i++) - cons_msgs[i] = -1; - } - - cons_msgs_size = msgcnt; - cons_msgs_cnt = 0; - cons_msg_next = 0; - cons_msg_stop = -1; +static int cons_msgs_size; +static int cons_msgs_cnt; +static int cons_msg_next; +static int cons_msg_stop = -1; +static int64_t cons_last_offset = -1; /* last offset received */ + +static void verify_consumed_msg_reset(int msgcnt) { + if (cons_msgs) { + free(cons_msgs); + cons_msgs = NULL; + } + + if (msgcnt) { + int i; + + cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt); + for (i = 0; i < msgcnt; i++) + cons_msgs[i] = -1; + } + + cons_msgs_size = msgcnt; + cons_msgs_cnt = 0; + cons_msg_next = 0; + cons_msg_stop = -1; cons_last_offset = -1; TEST_SAY("Reset consumed_msg stats, making room for %d new messages\n", @@ -169,174 +176,186 @@ static void verify_consumed_msg_reset (int msgcnt) { } -static int int_cmp (const void *_a, const void *_b) { - int a = *(int *)_a; - int b = *(int *)_b; +static int int_cmp(const void *_a, const void *_b) { + int a = *(int *)_a; + int b = *(int *)_b; /* Sort -1 (non-received msgs) at the end */ - return (a == -1 ? 100000000 : a) - (b == -1 ? 10000000 : b); + return (a == -1 ? 100000000 : a) - (b == -1 ? 10000000 : b); } -static void verify_consumed_msg_check0 (const char *func, int line, - const char *desc, - int expected_cnt) { - int i; - int fails = 0; +static void verify_consumed_msg_check0(const char *func, + int line, + const char *desc, + int expected_cnt) { + int i; + int fails = 0; int not_recvd = 0; - TEST_SAY("%s: received %d/%d/%d messages\n", - desc, cons_msgs_cnt, expected_cnt, cons_msgs_size); + TEST_SAY("%s: received %d/%d/%d messages\n", desc, cons_msgs_cnt, + expected_cnt, cons_msgs_size); if (expected_cnt > cons_msgs_size) - TEST_FAIL("expected_cnt %d > cons_msgs_size %d\n", - expected_cnt, cons_msgs_size); + TEST_FAIL("expected_cnt %d > cons_msgs_size %d\n", expected_cnt, + cons_msgs_size); - if (cons_msgs_cnt < expected_cnt) { - TEST_SAY("%s: Missing %i messages in consumer\n", - desc,expected_cnt - cons_msgs_cnt); - fails++; - } + if (cons_msgs_cnt < expected_cnt) { + TEST_SAY("%s: Missing %i messages in consumer\n", desc, + expected_cnt - cons_msgs_cnt); + fails++; + } - qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp); + qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp); - for (i = 0 ; i < expected_cnt ; i++) { - if (cons_msgs[i] != i) { + for (i = 0; i < expected_cnt; i++) { + if (cons_msgs[i] != i) { if (cons_msgs[i] == -1) { not_recvd++; - TEST_SAY("%s: msg %d/%d not received\n", - desc, i, expected_cnt); + TEST_SAY("%s: msg %d/%d not received\n", desc, + i, expected_cnt); } else - TEST_SAY("%s: Consumed message #%i is wrong, " - "expected #%i\n", - desc, cons_msgs[i], i); - fails++; - } - } + TEST_SAY( + "%s: Consumed message #%i is wrong, " + "expected #%i\n", + desc, cons_msgs[i], i); + fails++; + } + } if (not_recvd) - TEST_SAY("%s: %d messages not received at all\n", - desc, not_recvd); + TEST_SAY("%s: %d messages not received at all\n", desc, + not_recvd); - if (fails) - TEST_FAIL("%s: See above error(s)", desc); + if (fails) + TEST_FAIL("%s: See above error(s)", desc); else - TEST_SAY("%s: message range check: %d/%d messages consumed: " - "succeeded\n", desc, cons_msgs_cnt, expected_cnt); - + TEST_SAY( + "%s: message range check: %d/%d messages consumed: " + "succeeded\n", + desc, cons_msgs_cnt, expected_cnt); } -#define verify_consumed_msg_check(desc,expected_cnt) \ - verify_consumed_msg_check0(__FUNCTION__,__LINE__, desc, expected_cnt) +#define verify_consumed_msg_check(desc, expected_cnt) \ + verify_consumed_msg_check0(__FUNCTION__, __LINE__, desc, expected_cnt) -static void verify_consumed_msg0 (const char *func, int line, - uint64_t testid, int32_t partition, - int msgnum, - rd_kafka_message_t *rkmessage) { - uint64_t in_testid; - int in_part; - int in_msgnum; - char buf[128]; +static void verify_consumed_msg0(const char *func, + int line, + uint64_t testid, + int32_t partition, + int msgnum, + rd_kafka_message_t *rkmessage) { + uint64_t in_testid; + int in_part; + int in_msgnum; + char buf[128]; - if (rkmessage->key_len +1 >= sizeof(buf)) - TEST_FAIL("Incoming message key too large (%i): " - "not sourced by this test", - (int)rkmessage->key_len); + if (rkmessage->key_len + 1 >= sizeof(buf)) + TEST_FAIL( + "Incoming message key too large (%i): " + "not sourced by this test", + (int)rkmessage->key_len); - rd_snprintf(buf, sizeof(buf), "%.*s", - (int)rkmessage->key_len, (char *)rkmessage->key); + rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->key_len, + (char *)rkmessage->key); - if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i", - &in_testid, &in_part, &in_msgnum) != 3) - TEST_FAIL("Incorrect key format: %s", buf); + if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i", &in_testid, + &in_part, &in_msgnum) != 3) + TEST_FAIL("Incorrect key format: %s", buf); if (test_level > 2) { - TEST_SAY("%s:%i: Our testid %"PRIu64", part %i (%i), " - "msg %i/%i, key's: \"%s\"\n", - func, line, - testid, (int)partition, (int)rkmessage->partition, - msgnum, cons_msgs_size, buf); - } - - if (testid != in_testid || - (partition != -1 && partition != in_part) || - (msgnum != -1 && msgnum != in_msgnum) || - (in_msgnum < 0 || in_msgnum > cons_msgs_size)) - goto fail_match; - - if (cons_msgs_cnt == cons_msgs_size) { - TEST_SAY("Too many messages in cons_msgs (%i) while reading " - "message key \"%s\"\n", - cons_msgs_cnt, buf); - verify_consumed_msg_check("?", cons_msgs_size); - TEST_FAIL("See above error(s)"); - } - - cons_msgs[cons_msgs_cnt++] = in_msgnum; - cons_last_offset = rkmessage->offset; - - return; - - fail_match: - TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i, msg %i/%i did " - "not match message's key: \"%s\"\n", - func, line, - testid, (int)partition, msgnum, cons_msgs_size, buf); + TEST_SAY("%s:%i: Our testid %" PRIu64 + ", part %i (%i), " + "msg %i/%i, key's: \"%s\"\n", + func, line, testid, (int)partition, + (int)rkmessage->partition, msgnum, cons_msgs_size, + buf); + } + + if (testid != in_testid || (partition != -1 && partition != in_part) || + (msgnum != -1 && msgnum != in_msgnum) || + (in_msgnum < 0 || in_msgnum > cons_msgs_size)) + goto fail_match; + + if (cons_msgs_cnt == cons_msgs_size) { + TEST_SAY( + "Too many messages in cons_msgs (%i) while reading " + "message key \"%s\"\n", + cons_msgs_cnt, buf); + verify_consumed_msg_check("?", cons_msgs_size); + TEST_FAIL("See above error(s)"); + } + + cons_msgs[cons_msgs_cnt++] = in_msgnum; + cons_last_offset = rkmessage->offset; + + return; + +fail_match: + TEST_FAIL("%s:%i: Our testid %" PRIu64 + ", part %i, msg %i/%i did " + "not match message's key: \"%s\"\n", + func, line, testid, (int)partition, msgnum, cons_msgs_size, + buf); } -#define verify_consumed_msg(testid,part,msgnum,rkmessage) \ - verify_consumed_msg0(__FUNCTION__,__LINE__,testid,part,msgnum,rkmessage) +#define verify_consumed_msg(testid, part, msgnum, rkmessage) \ + verify_consumed_msg0(__FUNCTION__, __LINE__, testid, part, msgnum, \ + rkmessage) -static void consume_cb (rd_kafka_message_t *rkmessage, void *opaque) { +static void consume_cb(rd_kafka_message_t *rkmessage, void *opaque) { int64_t testid = *(int64_t *)opaque; - if (test_level > 2) - TEST_SAY("Consumed message #%d? at offset %"PRId64": %s\n", - cons_msg_next, rkmessage->offset, - rd_kafka_err2str(rkmessage->err)); + if (test_level > 2) + TEST_SAY("Consumed message #%d? at offset %" PRId64 ": %s\n", + cons_msg_next, rkmessage->offset, + rd_kafka_err2str(rkmessage->err)); if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - TEST_SAY("EOF at offset %"PRId64"\n", rkmessage->offset); + TEST_SAY("EOF at offset %" PRId64 "\n", rkmessage->offset); return; } if (rkmessage->err) - TEST_FAIL("Consume message from partition %i " - "has error: %s", - (int)rkmessage->partition, - rd_kafka_err2str(rkmessage->err)); + TEST_FAIL( + "Consume message from partition %i " + "has error: %s", + (int)rkmessage->partition, + rd_kafka_err2str(rkmessage->err)); - verify_consumed_msg(testid, rkmessage->partition, - cons_msg_next, rkmessage); + verify_consumed_msg(testid, rkmessage->partition, cons_msg_next, + rkmessage); if (cons_msg_next == cons_msg_stop) { - rd_kafka_yield(NULL/*FIXME*/); + rd_kafka_yield(NULL /*FIXME*/); } cons_msg_next++; } -static void consume_messages_callback_multi (const char *desc, - uint64_t testid, const char *topic, - int32_t partition, - const char *offset_store_method, - int msg_base, - int msg_cnt, - int64_t initial_offset, - int iterations) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - int i; - - TEST_SAY("%s: Consume messages %d+%d from %s [%"PRId32"] " - "from offset %"PRId64" in %d iterations\n", - desc, msg_base, msg_cnt, topic, partition, - initial_offset, iterations); - - test_conf_init(&conf, &topic_conf, 20); +static void consume_messages_callback_multi(const char *desc, + uint64_t testid, + const char *topic, + int32_t partition, + const char *offset_store_method, + int msg_base, + int msg_cnt, + int64_t initial_offset, + int iterations) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + int i; + + TEST_SAY("%s: Consume messages %d+%d from %s [%" PRId32 + "] " + "from offset %" PRId64 " in %d iterations\n", + desc, msg_base, msg_cnt, topic, partition, initial_offset, + iterations); + + test_conf_init(&conf, &topic_conf, 20); test_topic_conf_set(topic_conf, "offset.store.method", offset_store_method); @@ -348,35 +367,38 @@ static void consume_messages_callback_multi (const char *desc, test_conf_set(conf, "enable.partition.eof", "true"); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); rd_kafka_topic_conf_set(topic_conf, "auto.offset.reset", "smallest", NULL, 0); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("%s: Failed to create topic: %s\n", - desc, rd_kafka_err2str(rd_kafka_last_error())); + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("%s: Failed to create topic: %s\n", desc, + rd_kafka_err2str(rd_kafka_last_error())); - cons_msg_stop = cons_msg_next + msg_cnt - 1; + cons_msg_stop = cons_msg_next + msg_cnt - 1; /* Consume the same batch of messages multiple times to * make sure back-to-back start&stops work. */ - for (i = 0 ; i < iterations ; i++) { + for (i = 0; i < iterations; i++) { int cnta; test_timing_t t_stop; - TEST_SAY("%s: Iteration #%i: Consuming from " - "partition %i at offset %"PRId64", " - "msgs range %d..%d\n", - desc, i, partition, initial_offset, - cons_msg_next, cons_msg_stop); + TEST_SAY( + "%s: Iteration #%i: Consuming from " + "partition %i at offset %" PRId64 + ", " + "msgs range %d..%d\n", + desc, i, partition, initial_offset, cons_msg_next, + cons_msg_stop); /* Consume messages */ - if (rd_kafka_consume_start(rkt, partition, initial_offset) == -1) - TEST_FAIL("%s: consume_start(%i) failed: %s", - desc, (int)partition, + if (rd_kafka_consume_start(rkt, partition, initial_offset) == + -1) + TEST_FAIL("%s: consume_start(%i) failed: %s", desc, + (int)partition, rd_kafka_err2str(rd_kafka_last_error())); @@ -388,8 +410,8 @@ static void consume_messages_callback_multi (const char *desc, consume_cb, &testid); } while (cons_msg_next < cons_msg_stop); - TEST_SAY("%s: Iteration #%i: consumed %i messages\n", - desc, i, cons_msg_next - cnta); + TEST_SAY("%s: Iteration #%i: consumed %i messages\n", desc, i, + cons_msg_next - cnta); TIMING_START(&t_stop, "rd_kafka_consume_stop()"); rd_kafka_consume_stop(rkt, partition); @@ -398,75 +420,71 @@ static void consume_messages_callback_multi (const char *desc, /* Advance next offset so we dont reconsume * messages on the next run. */ if (initial_offset != RD_KAFKA_OFFSET_STORED) { - initial_offset = cons_last_offset+1; - cons_msg_stop = cons_msg_next + msg_cnt - 1; - } + initial_offset = cons_last_offset + 1; + cons_msg_stop = cons_msg_next + msg_cnt - 1; + } } - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("%s: Destroying kafka instance %s\n", desc, rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("%s: Destroying kafka instance %s\n", desc, rd_kafka_name(rk)); + rd_kafka_destroy(rk); } -static void test_produce_consume (const char *offset_store_method) { - int msgcnt = 100; +static void test_produce_consume(const char *offset_store_method) { + int msgcnt = 100; int partition_cnt = 1; - int i; - uint64_t testid; - int msg_base = 0; + int i; + uint64_t testid; + int msg_base = 0; const char *topic; - /* Generate a testid so we can differentiate messages - * from other tests */ - testid = test_id_generate(); + /* Generate a testid so we can differentiate messages + * from other tests */ + testid = test_id_generate(); /* Read test.conf to configure topic name */ test_conf_init(NULL, NULL, 20); - topic = test_mk_topic_name("0014", 1/*random*/); + topic = test_mk_topic_name("0014", 1 /*random*/); - TEST_SAY("Topic %s, testid %"PRIu64", offset.store.method=%s\n", + TEST_SAY("Topic %s, testid %" PRIu64 ", offset.store.method=%s\n", topic, testid, offset_store_method); - /* Produce messages */ - produce_messages(testid, topic, partition_cnt, msg_base, msgcnt); + /* Produce messages */ + produce_messages(testid, topic, partition_cnt, msg_base, msgcnt); /* 100% of messages */ verify_consumed_msg_reset(msgcnt); - /* Consume 50% of messages with callbacks: stored offsets with no prior + /* Consume 50% of messages with callbacks: stored offsets with no prior * offset stored. */ - for (i = 0 ; i < partition_cnt ; i++) - consume_messages_callback_multi("STORED.1/2", testid, topic, i, - offset_store_method, - msg_base, + for (i = 0; i < partition_cnt; i++) + consume_messages_callback_multi("STORED.1/2", testid, topic, i, + offset_store_method, msg_base, (msgcnt / partition_cnt) / 2, - RD_KAFKA_OFFSET_STORED, - 1); + RD_KAFKA_OFFSET_STORED, 1); verify_consumed_msg_check("STORED.1/2", msgcnt / 2); /* Consume the rest using the now stored offset */ - for (i = 0 ; i < partition_cnt ; i++) - consume_messages_callback_multi("STORED.2/2", testid, topic, i, - offset_store_method, - msg_base, + for (i = 0; i < partition_cnt; i++) + consume_messages_callback_multi("STORED.2/2", testid, topic, i, + offset_store_method, msg_base, (msgcnt / partition_cnt) / 2, - RD_KAFKA_OFFSET_STORED, - 1); + RD_KAFKA_OFFSET_STORED, 1); verify_consumed_msg_check("STORED.2/2", msgcnt); - /* Consume messages with callbacks: logical offsets */ - verify_consumed_msg_reset(msgcnt); - for (i = 0 ; i < partition_cnt ; i++) { - int p_msg_cnt = msgcnt / partition_cnt; + /* Consume messages with callbacks: logical offsets */ + verify_consumed_msg_reset(msgcnt); + for (i = 0; i < partition_cnt; i++) { + int p_msg_cnt = msgcnt / partition_cnt; int64_t initial_offset = RD_KAFKA_OFFSET_TAIL(p_msg_cnt); - const int iterations = 4; - consume_messages_callback_multi("TAIL+", testid, topic, i, + const int iterations = 4; + consume_messages_callback_multi("TAIL+", testid, topic, i, offset_store_method, /* start here (msgid) */ msg_base, @@ -474,23 +492,21 @@ static void test_produce_consume (const char *offset_store_method) { * per iteration. */ p_msg_cnt / iterations, /* start here (offset) */ - initial_offset, - iterations); + initial_offset, iterations); } verify_consumed_msg_check("TAIL+", msgcnt); verify_consumed_msg_reset(0); - return; + return; } - -int main_0014_reconsume_191 (int argc, char **argv) { - if (test_broker_version >= TEST_BRKVER(0,8,2,0)) - test_produce_consume("broker"); +int main_0014_reconsume_191(int argc, char **argv) { + if (test_broker_version >= TEST_BRKVER(0, 8, 2, 0)) + test_produce_consume("broker"); test_produce_consume("file"); - return 0; + return 0; } diff --git a/tests/0015-offset_seeks.c b/tests/0015-offset_seeks.c index cff8b19590..a551a0b531 100644 --- a/tests/0015-offset_seeks.c +++ b/tests/0015-offset_seeks.c @@ -30,65 +30,67 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ -static void do_legacy_seek (const char *topic, uint64_t testid, int msg_cnt) { +static void do_legacy_seek(const char *topic, uint64_t testid, int msg_cnt) { rd_kafka_t *rk_c; - rd_kafka_topic_t *rkt_c; - int32_t partition = 0; - int i; - int64_t offset_last, offset_base; - int dance_iterations = 10; - int msgs_per_dance = 10; - const int msg_base = 0; + rd_kafka_topic_t *rkt_c; + int32_t partition = 0; + int i; + int64_t offset_last, offset_base; + int dance_iterations = 10; + int msgs_per_dance = 10; + const int msg_base = 0; SUB_TEST_QUICK(); - rk_c = test_create_consumer(NULL, NULL, NULL, NULL); - rkt_c = test_create_consumer_topic(rk_c, topic); + rk_c = test_create_consumer(NULL, NULL, NULL, NULL); + rkt_c = test_create_consumer_topic(rk_c, topic); - /* Start consumer tests */ - test_consumer_start("verify.all", rkt_c, partition, + /* Start consumer tests */ + test_consumer_start("verify.all", rkt_c, partition, RD_KAFKA_OFFSET_BEGINNING); - /* Make sure all messages are available */ - offset_last = test_consume_msgs("verify.all", rkt_c, - testid, partition, TEST_NO_SEEK, - msg_base, msg_cnt, 1/* parse format*/); - - /* Rewind offset back to its base. */ - offset_base = offset_last - msg_cnt + 1; - - TEST_SAY("%s [%"PRId32"]: Do random seek&consume for msgs #%d+%d with " - "offsets %"PRId64"..%"PRId64"\n", - rd_kafka_topic_name(rkt_c), partition, - msg_base, msg_cnt, offset_base, offset_last); - - /* Now go dancing over the entire range with offset seeks. */ - for (i = 0 ; i < dance_iterations ; i++) { - int64_t offset = jitter((int)offset_base, - (int)offset_base+msg_cnt); - - test_consume_msgs("dance", rkt_c, - testid, partition, offset, - msg_base + (int)(offset - offset_base), - RD_MIN(msgs_per_dance, - (int)(offset_last - offset)), - 1 /* parse format */); - } + /* Make sure all messages are available */ + offset_last = test_consume_msgs("verify.all", rkt_c, testid, partition, + TEST_NO_SEEK, msg_base, msg_cnt, + 1 /* parse format*/); + + /* Rewind offset back to its base. */ + offset_base = offset_last - msg_cnt + 1; + + TEST_SAY("%s [%" PRId32 + "]: Do random seek&consume for msgs #%d+%d with " + "offsets %" PRId64 "..%" PRId64 "\n", + rd_kafka_topic_name(rkt_c), partition, msg_base, msg_cnt, + offset_base, offset_last); + + /* Now go dancing over the entire range with offset seeks. */ + for (i = 0; i < dance_iterations; i++) { + int64_t offset = + jitter((int)offset_base, (int)offset_base + msg_cnt); + + test_consume_msgs( + "dance", rkt_c, testid, partition, offset, + msg_base + (int)(offset - offset_base), + RD_MIN(msgs_per_dance, (int)(offset_last - offset)), + 1 /* parse format */); + } - test_consumer_stop("1", rkt_c, partition); + test_consumer_stop("1", rkt_c, partition); - rd_kafka_topic_destroy(rkt_c); - rd_kafka_destroy(rk_c); + rd_kafka_topic_destroy(rkt_c); + rd_kafka_destroy(rk_c); SUB_TEST_PASS(); } -static void do_seek (const char *topic, uint64_t testid, - int msg_cnt, rd_bool_t with_timeout) { +static void do_seek(const char *topic, + uint64_t testid, + int msg_cnt, + rd_bool_t with_timeout) { rd_kafka_t *c; rd_kafka_topic_partition_list_t *partitions; char errstr[512]; @@ -99,9 +101,9 @@ static void do_seek (const char *topic, uint64_t testid, c = test_create_consumer(topic, NULL, NULL, NULL); partitions = rd_kafka_topic_partition_list_new(3); - for (i = 0 ; i < 3 ; i++) - rd_kafka_topic_partition_list_add(partitions, topic, i)-> - offset = RD_KAFKA_OFFSET_END; + for (i = 0; i < 3; i++) + rd_kafka_topic_partition_list_add(partitions, topic, i) + ->offset = RD_KAFKA_OFFSET_END; TEST_CALL__(rd_kafka_assign(c, partitions)); @@ -109,24 +111,25 @@ static void do_seek (const char *topic, uint64_t testid, test_consumer_poll_no_msgs("NO.MSGS", c, testid, 3000); /* Seek to beginning */ - for (i = 0 ; i < 3 ; i++) { + for (i = 0; i < 3; i++) { /* Sentinel to verify that this field is reset by * seek_partitions() */ partitions->elems[i].err = RD_KAFKA_RESP_ERR__BAD_MSG; - partitions->elems[i].offset = i == 0 ? - /* Logical and absolute offsets for the same thing */ - RD_KAFKA_OFFSET_BEGINNING : 0; + partitions->elems[i].offset = + i == 0 ? + /* Logical and absolute offsets for the same thing */ + RD_KAFKA_OFFSET_BEGINNING + : 0; } TEST_SAY("Seeking\n"); - TEST_CALL_ERROR__(rd_kafka_seek_partitions(c, partitions, - with_timeout ? 7000 : -1)); + TEST_CALL_ERROR__( + rd_kafka_seek_partitions(c, partitions, with_timeout ? 7000 : -1)); /* Verify that there are no per-partition errors */ - for (i = 0 ; i < 3 ; i++) + for (i = 0; i < 3; i++) TEST_ASSERT_LATER(!partitions->elems[i].err, - "Partition #%d has unexpected error: %s", - i, + "Partition #%d has unexpected error: %s", i, rd_kafka_err2name(partitions->elems[i].err)); TEST_LATER_CHECK(); @@ -145,27 +148,25 @@ static void do_seek (const char *topic, uint64_t testid, } -int main_0015_offsets_seek (int argc, char **argv) { - const char *topic = test_mk_topic_name("0015", 1); +int main_0015_offsets_seek(int argc, char **argv) { + const char *topic = test_mk_topic_name("0015", 1); int msg_cnt_per_part = test_quick ? 100 : 1000; - int msg_cnt = 3 * msg_cnt_per_part; + int msg_cnt = 3 * msg_cnt_per_part; uint64_t testid; testid = test_id_generate(); test_produce_msgs_easy_multi( - testid, - topic, 0, 0*msg_cnt_per_part, msg_cnt_per_part, - topic, 1, 1*msg_cnt_per_part, msg_cnt_per_part, - topic, 2, 2*msg_cnt_per_part, msg_cnt_per_part, - NULL); + testid, topic, 0, 0 * msg_cnt_per_part, msg_cnt_per_part, topic, 1, + 1 * msg_cnt_per_part, msg_cnt_per_part, topic, 2, + 2 * msg_cnt_per_part, msg_cnt_per_part, NULL); /* legacy seek: only reads partition 0 */ do_legacy_seek(topic, testid, msg_cnt_per_part); - do_seek(topic, testid, msg_cnt, rd_true/*with timeout*/); + do_seek(topic, testid, msg_cnt, rd_true /*with timeout*/); - do_seek(topic, testid, msg_cnt, rd_true/*without timeout*/); + do_seek(topic, testid, msg_cnt, rd_true /*without timeout*/); return 0; } diff --git a/tests/0016-client_swname.c b/tests/0016-client_swname.c index db044d277a..2d0605b887 100644 --- a/tests/0016-client_swname.c +++ b/tests/0016-client_swname.c @@ -40,12 +40,12 @@ static char jmx_cmd[512]; * @brief Verify that the expected software name and version is reported * in JMX metrics. */ -static void jmx_verify (const char *exp_swname, const char *exp_swversion) { +static void jmx_verify(const char *exp_swname, const char *exp_swversion) { #if _WIN32 return; #else int r; - char cmd[512+256]; + char cmd[512 + 256]; if (!*jmx_cmd) return; @@ -53,32 +53,39 @@ static void jmx_verify (const char *exp_swname, const char *exp_swversion) { rd_snprintf(cmd, sizeof(cmd), "%s | " "grep -F 'clientSoftwareName=%s,clientSoftwareVersion=%s'", - jmx_cmd, - exp_swname, exp_swversion ? exp_swversion : ""); + jmx_cmd, exp_swname, exp_swversion ? exp_swversion : ""); r = system(cmd); if (WEXITSTATUS(r) == 1) - TEST_FAIL("Expected software name and version not found in " - "JMX metrics with command \"%s\"", cmd); + TEST_FAIL( + "Expected software name and version not found in " + "JMX metrics with command \"%s\"", + cmd); else if (r == -1 || WIFSIGNALED(r) || WEXITSTATUS(r)) - TEST_FAIL("Failed to execute JmxTool command \"%s\": " - "exit code %d", cmd, r); - - TEST_SAY("Expected software name \"%s\" and version \"%s\" " - "found in JMX metrics\n", - exp_swname, exp_swversion); + TEST_FAIL( + "Failed to execute JmxTool command \"%s\": " + "exit code %d", + cmd, r); + + TEST_SAY( + "Expected software name \"%s\" and version \"%s\" " + "found in JMX metrics\n", + exp_swname, exp_swversion); #endif /* !_WIN32 */ } -static void do_test_swname (const char *broker, - const char *swname, const char *swversion, - const char *exp_swname, const char *exp_swversion) { +static void do_test_swname(const char *broker, + const char *swname, + const char *swversion, + const char *exp_swname, + const char *exp_swversion) { rd_kafka_t *rk; rd_kafka_conf_t *conf; const rd_kafka_metadata_t *md; rd_kafka_resp_err_t err; - TEST_SAY(_C_MAG "[ Test client.software.name=%s, " + TEST_SAY(_C_MAG + "[ Test client.software.name=%s, " "client.software.version=%s ]\n", swname ? swname : "NULL", swversion ? swversion : "NULL"); @@ -101,12 +108,13 @@ static void do_test_swname (const char *broker, rd_kafka_destroy(rk); - TEST_SAY(_C_GRN "[ Test client.software.name=%s, " + TEST_SAY(_C_GRN + "[ Test client.software.name=%s, " "client.software.version=%s: PASS ]\n", swname ? swname : "NULL", swversion ? swversion : "NULL"); } -int main_0016_client_swname (int argc, char **argv) { +int main_0016_client_swname(int argc, char **argv) { const char *broker; const char *kafka_path; const char *jmx_port; @@ -115,16 +123,19 @@ int main_0016_client_swname (int argc, char **argv) { /* If available, use the Kafka JmxTool to query software name * in broker JMX metrics */ if (!(broker = test_getenv("BROKER_ADDRESS_2", NULL))) - reason = "Env var BROKER_ADDRESS_2 missing " - "(not running in trivup or trivup too old?)"; - else if (test_broker_version < TEST_BRKVER(2,5,0,0)) - reason = "Client software JMX metrics not exposed prior to " - "Apache Kafka 2.5.0.0"; + reason = + "Env var BROKER_ADDRESS_2 missing " + "(not running in trivup or trivup too old?)"; + else if (test_broker_version < TEST_BRKVER(2, 5, 0, 0)) + reason = + "Client software JMX metrics not exposed prior to " + "Apache Kafka 2.5.0.0"; else if (!(kafka_path = test_getenv("KAFKA_PATH", NULL))) reason = "Env var KAFKA_PATH missing (not running in trivup?)"; else if (!(jmx_port = test_getenv("BROKER_JMX_PORT_2", NULL))) - reason = "Env var BROKER_JMX_PORT_2 missing " - "(not running in trivup or trivup too old?)"; + reason = + "Env var BROKER_JMX_PORT_2 missing " + "(not running in trivup or trivup too old?)"; else rd_snprintf(jmx_cmd, sizeof(jmx_cmd), "%s/bin/kafka-run-class.sh kafka.tools.JmxTool " @@ -144,14 +155,12 @@ int main_0016_client_swname (int argc, char **argv) { * protocol safe. */ do_test_swname(broker, NULL, NULL, "librdkafka", NULL); /* Properly formatted */ - do_test_swname(broker, - "my-little-version", "1.2.3.4", + do_test_swname(broker, "my-little-version", "1.2.3.4", "my-little-version", "1.2.3.4"); - /* Containing invalid characters, verify that safing the strings works */ - do_test_swname(broker, - "?1?this needs! ESCAPING?", "--v99.11 ~b~", + /* Containing invalid characters, verify that safing the strings works + */ + do_test_swname(broker, "?1?this needs! ESCAPING?", "--v99.11 ~b~", "1-this-needs--ESCAPING", "v99.11--b"); return 0; } - diff --git a/tests/0017-compression.c b/tests/0017-compression.c index 2ccb2b65f2..f28f63f244 100644 --- a/tests/0017-compression.c +++ b/tests/0017-compression.c @@ -1,50 +1,50 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2012-2015, Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2015, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ #include "test.h" /* Typical include path would be , but this program -* is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ + * is built from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" /* for Kafka driver */ /** -* Basic compression tests, with rather lacking verification. -*/ + * Basic compression tests, with rather lacking verification. + */ int main_0017_compression(int argc, char **argv) { rd_kafka_t *rk_p, *rk_c; const int msg_cnt = 1000; - int msg_base = 0; + int msg_base = 0; uint64_t testid; #define CODEC_CNT 5 - const char *codecs[CODEC_CNT+1] = { + const char *codecs[CODEC_CNT + 1] = { "none", #if WITH_ZLIB "gzip", @@ -67,26 +67,26 @@ int main_0017_compression(int argc, char **argv) { /* Produce messages */ rk_p = test_create_producer(); - for (i = 0; codecs[i] != NULL ; i++) { + for (i = 0; codecs[i] != NULL; i++) { rd_kafka_topic_t *rkt_p; topics[i] = rd_strdup(test_mk_topic_name(codecs[i], 1)); - TEST_SAY("Produce %d messages with %s compression to " - "topic %s\n", - msg_cnt, codecs[i], topics[i]); - rkt_p = test_create_producer_topic(rk_p, topics[i], - "compression.codec", codecs[i], NULL); + TEST_SAY( + "Produce %d messages with %s compression to " + "topic %s\n", + msg_cnt, codecs[i], topics[i]); + rkt_p = test_create_producer_topic( + rk_p, topics[i], "compression.codec", codecs[i], NULL); /* Produce small message that will not decrease with * compression (issue #781) */ test_produce_msgs(rk_p, rkt_p, testid, partition, - msg_base + (partition*msg_cnt), 1, - NULL, 5); + msg_base + (partition * msg_cnt), 1, NULL, 5); /* Produce standard sized messages */ test_produce_msgs(rk_p, rkt_p, testid, partition, - msg_base + (partition*msg_cnt) + 1, msg_cnt-1, - NULL, 512); + msg_base + (partition * msg_cnt) + 1, + msg_cnt - 1, NULL, 512); rd_kafka_topic_destroy(rkt_p); } @@ -97,8 +97,8 @@ int main_0017_compression(int argc, char **argv) { test_timeout_set(30); /* Consume messages: Without and with CRC checking */ - for (crc = 0 ; crc < 2 ; crc++) { - const char *crc_tof = crc ? "true":"false"; + for (crc = 0; crc < 2; crc++) { + const char *crc_tof = crc ? "true" : "false"; rd_kafka_conf_t *conf; test_conf_init(&conf, NULL, 0); @@ -106,10 +106,9 @@ int main_0017_compression(int argc, char **argv) { rk_c = test_create_consumer(NULL, NULL, conf, NULL); - for (i = 0; codecs[i] != NULL ; i++) { - rd_kafka_topic_t *rkt_c = rd_kafka_topic_new(rk_c, - topics[i], - NULL); + for (i = 0; codecs[i] != NULL; i++) { + rd_kafka_topic_t *rkt_c = + rd_kafka_topic_new(rk_c, topics[i], NULL); TEST_SAY("Consume %d messages from topic %s (crc=%s)\n", msg_cnt, topics[i], crc_tof); @@ -119,14 +118,13 @@ int main_0017_compression(int argc, char **argv) { /* Consume messages */ test_consume_msgs( - codecs[i], rkt_c, testid, partition, - /* Use offset 0 here, which is wrong, should - * be TEST_NO_SEEK, but it exposed a bug - * where the Offset query was postponed - * till after the seek, causing messages - * to be replayed. */ - 0, - msg_base, msg_cnt, 1 /* parse format */); + codecs[i], rkt_c, testid, partition, + /* Use offset 0 here, which is wrong, should + * be TEST_NO_SEEK, but it exposed a bug + * where the Offset query was postponed + * till after the seek, causing messages + * to be replayed. */ + 0, msg_base, msg_cnt, 1 /* parse format */); test_consumer_stop(codecs[i], rkt_c, partition); @@ -136,7 +134,7 @@ int main_0017_compression(int argc, char **argv) { rd_kafka_destroy(rk_c); } - for (i = 0 ; codecs[i] != NULL ; i++) + for (i = 0; codecs[i] != NULL; i++) rd_free(topics[i]); diff --git a/tests/0018-cgrp_term.c b/tests/0018-cgrp_term.c index 7edd177887..181fd7b00a 100644 --- a/tests/0018-cgrp_term.c +++ b/tests/0018-cgrp_term.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -41,190 +41,191 @@ */ -static int assign_cnt = 0; +static int assign_cnt = 0; static int consumed_msg_cnt = 0; -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque) { +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque) { char *memberid = rd_kafka_memberid(rk); - TEST_SAY("%s: MemberId \"%s\": Consumer group rebalanced: %s\n", - rd_kafka_name(rk), memberid, rd_kafka_err2str(err)); + TEST_SAY("%s: MemberId \"%s\": Consumer group rebalanced: %s\n", + rd_kafka_name(rk), memberid, rd_kafka_err2str(err)); if (memberid) free(memberid); - test_print_partition_list(partitions); - - switch (err) - { - case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: - assign_cnt++; - rd_kafka_assign(rk, partitions); - break; - - case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: - if (assign_cnt == 0) - TEST_FAIL("asymetric rebalance_cb\n"); - assign_cnt--; - rd_kafka_assign(rk, NULL); - break; - - default: - TEST_FAIL("rebalance failed: %s\n", - rd_kafka_err2str(err)); - break; - } + test_print_partition_list(partitions); + + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + assign_cnt++; + rd_kafka_assign(rk, partitions); + break; + + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + if (assign_cnt == 0) + TEST_FAIL("asymetric rebalance_cb\n"); + assign_cnt--; + rd_kafka_assign(rk, NULL); + break; + + default: + TEST_FAIL("rebalance failed: %s\n", rd_kafka_err2str(err)); + break; + } } -static void consume_all (rd_kafka_t **rk_c, int rk_cnt, int exp_msg_cnt, - int max_time/*ms*/) { - int64_t ts_start = test_clock(); - int i; - - max_time *= 1000; - while (ts_start + max_time > test_clock()) { - for (i = 0 ; i < rk_cnt ; i++) { - rd_kafka_message_t *rkmsg; - - if (!rk_c[i]) - continue; - - rkmsg = rd_kafka_consumer_poll(rk_c[i], 500); - - if (!rkmsg) - continue; - else if (rkmsg->err) - TEST_SAY("Message error " - "(at offset %"PRId64" after " - "%d/%d messages and %dms): %s\n", - rkmsg->offset, - consumed_msg_cnt, exp_msg_cnt, - (int)(test_clock() - ts_start)/1000, - rd_kafka_message_errstr(rkmsg)); - else - consumed_msg_cnt++; - - rd_kafka_message_destroy(rkmsg); - - if (consumed_msg_cnt >= exp_msg_cnt) { - static int once = 0; - if (!once++) - TEST_SAY("All messages consumed\n"); - return; - } - } - } +static void consume_all(rd_kafka_t **rk_c, + int rk_cnt, + int exp_msg_cnt, + int max_time /*ms*/) { + int64_t ts_start = test_clock(); + int i; + + max_time *= 1000; + while (ts_start + max_time > test_clock()) { + for (i = 0; i < rk_cnt; i++) { + rd_kafka_message_t *rkmsg; + + if (!rk_c[i]) + continue; + + rkmsg = rd_kafka_consumer_poll(rk_c[i], 500); + + if (!rkmsg) + continue; + else if (rkmsg->err) + TEST_SAY( + "Message error " + "(at offset %" PRId64 + " after " + "%d/%d messages and %dms): %s\n", + rkmsg->offset, consumed_msg_cnt, + exp_msg_cnt, + (int)(test_clock() - ts_start) / 1000, + rd_kafka_message_errstr(rkmsg)); + else + consumed_msg_cnt++; + + rd_kafka_message_destroy(rkmsg); + + if (consumed_msg_cnt >= exp_msg_cnt) { + static int once = 0; + if (!once++) + TEST_SAY("All messages consumed\n"); + return; + } + } + } } -int main_0018_cgrp_term (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); +int main_0018_cgrp_term(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); #define _CONS_CNT 2 - rd_kafka_t *rk_p, *rk_c[_CONS_CNT]; + rd_kafka_t *rk_p, *rk_c[_CONS_CNT]; rd_kafka_topic_t *rkt_p; - int msg_cnt = test_quick ? 100 : 1000; - int msg_base = 0; + int msg_cnt = test_quick ? 100 : 1000; + int msg_base = 0; int partition_cnt = 2; int partition; - uint64_t testid; + uint64_t testid; rd_kafka_topic_conf_t *default_topic_conf; - rd_kafka_topic_partition_list_t *topics; - rd_kafka_resp_err_t err; - test_timing_t t_assign, t_consume; - char errstr[512]; - int i; + rd_kafka_topic_partition_list_t *topics; + rd_kafka_resp_err_t err; + test_timing_t t_assign, t_consume; + char errstr[512]; + int i; - testid = test_id_generate(); + testid = test_id_generate(); - /* Produce messages */ - rk_p = test_create_producer(); - rkt_p = test_create_producer_topic(rk_p, topic, NULL); + /* Produce messages */ + rk_p = test_create_producer(); + rkt_p = test_create_producer_topic(rk_p, topic, NULL); - for (partition = 0 ; partition < partition_cnt ; partition++) { + for (partition = 0; partition < partition_cnt; partition++) { test_produce_msgs(rk_p, rkt_p, testid, partition, - msg_base+(partition*msg_cnt), msg_cnt, - NULL, 0); + msg_base + (partition * msg_cnt), msg_cnt, + NULL, 0); } - rd_kafka_topic_destroy(rkt_p); - rd_kafka_destroy(rk_p); + rd_kafka_topic_destroy(rkt_p); + rd_kafka_destroy(rk_p); test_conf_init(NULL, &default_topic_conf, 5 + ((test_session_timeout_ms * 3) / 1000)); if (rd_kafka_topic_conf_set(default_topic_conf, "auto.offset.reset", - "smallest", errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) - TEST_FAIL("%s\n", errstr); - - /* Fill in topic subscription set */ - topics = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(topics, topic, -1); - - /* Create consumers and start subscription */ - for (i = 0 ; i < _CONS_CNT ; i++) { - rk_c[i] = test_create_consumer(topic/*group_id*/, - rebalance_cb, NULL, - rd_kafka_topic_conf_dup( - default_topic_conf)); - - err = rd_kafka_poll_set_consumer(rk_c[i]); - if (err) - TEST_FAIL("poll_set_consumer: %s\n", - rd_kafka_err2str(err)); - - err = rd_kafka_subscribe(rk_c[i], topics); - if (err) - TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err)); - } + "smallest", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) + TEST_FAIL("%s\n", errstr); + + /* Fill in topic subscription set */ + topics = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(topics, topic, -1); + + /* Create consumers and start subscription */ + for (i = 0; i < _CONS_CNT; i++) { + rk_c[i] = test_create_consumer( + topic /*group_id*/, rebalance_cb, NULL, + rd_kafka_topic_conf_dup(default_topic_conf)); + + err = rd_kafka_poll_set_consumer(rk_c[i]); + if (err) + TEST_FAIL("poll_set_consumer: %s\n", + rd_kafka_err2str(err)); + + err = rd_kafka_subscribe(rk_c[i], topics); + if (err) + TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err)); + } rd_kafka_topic_conf_destroy(default_topic_conf); rd_kafka_topic_partition_list_destroy(topics); - /* Wait for both consumers to get an assignment */ + /* Wait for both consumers to get an assignment */ TEST_SAY("Awaiting assignments for %d consumer(s)\n", _CONS_CNT); - TIMING_START(&t_assign, "WAIT.ASSIGN"); - while (assign_cnt < _CONS_CNT) - consume_all(rk_c, _CONS_CNT, msg_cnt, - test_session_timeout_ms + 3000); - TIMING_STOP(&t_assign); - - /* Now close one of the consumers, this will cause a rebalance. */ - TEST_SAY("Closing down 1/%d consumer(s): %s\n", _CONS_CNT, - rd_kafka_name(rk_c[0])); - err = rd_kafka_consumer_close(rk_c[0]); - if (err) - TEST_FAIL("consumer_close failed: %s\n", rd_kafka_err2str(err)); - rd_kafka_destroy(rk_c[0]); - rk_c[0] = NULL; - - /* Let remaining consumers run for a while to take over the now - * lost partitions. */ - - if (assign_cnt != _CONS_CNT-1) - TEST_FAIL("assign_cnt %d, should be %d\n", - assign_cnt, _CONS_CNT-1); - - TIMING_START(&t_consume, "CONSUME.WAIT"); - consume_all(rk_c, _CONS_CNT, msg_cnt, test_session_timeout_ms + 3000); - TIMING_STOP(&t_consume); - - TEST_SAY("Closing remaining consumers\n"); - for (i = 0 ; i < _CONS_CNT ; i++) { - test_timing_t t_close; + TIMING_START(&t_assign, "WAIT.ASSIGN"); + while (assign_cnt < _CONS_CNT) + consume_all(rk_c, _CONS_CNT, msg_cnt, + test_session_timeout_ms + 3000); + TIMING_STOP(&t_assign); + + /* Now close one of the consumers, this will cause a rebalance. */ + TEST_SAY("Closing down 1/%d consumer(s): %s\n", _CONS_CNT, + rd_kafka_name(rk_c[0])); + err = rd_kafka_consumer_close(rk_c[0]); + if (err) + TEST_FAIL("consumer_close failed: %s\n", rd_kafka_err2str(err)); + rd_kafka_destroy(rk_c[0]); + rk_c[0] = NULL; + + /* Let remaining consumers run for a while to take over the now + * lost partitions. */ + + if (assign_cnt != _CONS_CNT - 1) + TEST_FAIL("assign_cnt %d, should be %d\n", assign_cnt, + _CONS_CNT - 1); + + TIMING_START(&t_consume, "CONSUME.WAIT"); + consume_all(rk_c, _CONS_CNT, msg_cnt, test_session_timeout_ms + 3000); + TIMING_STOP(&t_consume); + + TEST_SAY("Closing remaining consumers\n"); + for (i = 0; i < _CONS_CNT; i++) { + test_timing_t t_close; rd_kafka_topic_partition_list_t *sub; int j; - if (!rk_c[i]) - continue; + if (!rk_c[i]) + continue; /* Query subscription */ err = rd_kafka_subscription(rk_c[i], &sub); @@ -232,9 +233,9 @@ int main_0018_cgrp_term (int argc, char **argv) { TEST_FAIL("%s: subscription() failed: %s\n", rd_kafka_name(rk_c[i]), rd_kafka_err2str(err)); - TEST_SAY("%s: subscription (%d):\n", - rd_kafka_name(rk_c[i]), sub->cnt); - for (j = 0 ; j < sub->cnt ; j++) + TEST_SAY("%s: subscription (%d):\n", rd_kafka_name(rk_c[i]), + sub->cnt); + for (j = 0; j < sub->cnt; j++) TEST_SAY(" %s\n", sub->elems[j].topic); rd_kafka_topic_partition_list_destroy(sub); @@ -247,26 +248,27 @@ int main_0018_cgrp_term (int argc, char **argv) { rd_kafka_name(rk_c[i]), rd_kafka_err2str(err)); - TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i])); - TIMING_START(&t_close, "CONSUMER.CLOSE"); - err = rd_kafka_consumer_close(rk_c[i]); - TIMING_STOP(&t_close); - if (err) - TEST_FAIL("consumer_close failed: %s\n", - rd_kafka_err2str(err)); - - rd_kafka_destroy(rk_c[i]); - rk_c[i] = NULL; - } - - TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, msg_cnt); - if (consumed_msg_cnt < msg_cnt) - TEST_FAIL("Only %d/%d messages were consumed\n", - consumed_msg_cnt, msg_cnt); - else if (consumed_msg_cnt > msg_cnt) - TEST_SAY("At least %d/%d messages were consumed " - "multiple times\n", - consumed_msg_cnt - msg_cnt, msg_cnt); - - return 0; + TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i])); + TIMING_START(&t_close, "CONSUMER.CLOSE"); + err = rd_kafka_consumer_close(rk_c[i]); + TIMING_STOP(&t_close); + if (err) + TEST_FAIL("consumer_close failed: %s\n", + rd_kafka_err2str(err)); + + rd_kafka_destroy(rk_c[i]); + rk_c[i] = NULL; + } + + TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, msg_cnt); + if (consumed_msg_cnt < msg_cnt) + TEST_FAIL("Only %d/%d messages were consumed\n", + consumed_msg_cnt, msg_cnt); + else if (consumed_msg_cnt > msg_cnt) + TEST_SAY( + "At least %d/%d messages were consumed " + "multiple times\n", + consumed_msg_cnt - msg_cnt, msg_cnt); + + return 0; } diff --git a/tests/0019-list_groups.c b/tests/0019-list_groups.c index a1ddc6c559..ba982edcf7 100644 --- a/tests/0019-list_groups.c +++ b/tests/0019-list_groups.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -45,30 +45,32 @@ * Verify that all groups in 'groups' are seen, if so returns group_cnt, * else returns -1. */ -static int verify_groups (const struct rd_kafka_group_list *grplist, - char **groups, int group_cnt) { +static int verify_groups(const struct rd_kafka_group_list *grplist, + char **groups, + int group_cnt) { int i; int seen = 0; - for (i = 0 ; i < grplist->group_cnt ; i++) { + for (i = 0; i < grplist->group_cnt; i++) { const struct rd_kafka_group_info *gi = &grplist->groups[i]; int j; - for (j = 0 ; j < group_cnt ; j++) { + for (j = 0; j < group_cnt; j++) { if (strcmp(gi->group, groups[j])) continue; if (gi->err) - TEST_SAY("Group %s has broker-reported " - "error: %s\n", gi->group, - rd_kafka_err2str(gi->err)); + TEST_SAY( + "Group %s has broker-reported " + "error: %s\n", + gi->group, rd_kafka_err2str(gi->err)); seen++; } } - TEST_SAY("Found %d/%d desired groups in list of %d groups\n", - seen, group_cnt, grplist->group_cnt); + TEST_SAY("Found %d/%d desired groups in list of %d groups\n", seen, + group_cnt, grplist->group_cnt); if (seen != group_cnt) return -1; @@ -85,31 +87,31 @@ static int verify_groups (const struct rd_kafka_group_list *grplist, * Returns 'group_cnt' if all groups in 'groups' were seen by both * methods, else 0, or -1 on error. */ -static int list_groups (rd_kafka_t *rk, char **groups, int group_cnt, - const char *desc) { +static int +list_groups(rd_kafka_t *rk, char **groups, int group_cnt, const char *desc) { rd_kafka_resp_err_t err = 0; const struct rd_kafka_group_list *grplist; int i, r; - int fails = 0; - int seen = 0; + int fails = 0; + int seen = 0; int seen_all = 0; - int retries = 5; + int retries = 5; TEST_SAY("List groups (expect %d): %s\n", group_cnt, desc); - /* FIXME: Wait for broker to come up. This should really be abstracted - * by librdkafka. */ - do { - if (err) { - TEST_SAY("Retrying group list in 1s because of: %s\n", - rd_kafka_err2str(err)); - rd_sleep(1); - } - err = rd_kafka_list_groups(rk, NULL, &grplist, + /* FIXME: Wait for broker to come up. This should really be abstracted + * by librdkafka. */ + do { + if (err) { + TEST_SAY("Retrying group list in 1s because of: %s\n", + rd_kafka_err2str(err)); + rd_sleep(1); + } + err = rd_kafka_list_groups(rk, NULL, &grplist, tmout_multip(5000)); - } while ((err == RD_KAFKA_RESP_ERR__TRANSPORT || - err == RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS) && - retries-- > 0); + } while ((err == RD_KAFKA_RESP_ERR__TRANSPORT || + err == RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS) && + retries-- > 0); if (err) { TEST_SAY("Failed to list all groups: %s\n", @@ -120,11 +122,11 @@ static int list_groups (rd_kafka_t *rk, char **groups, int group_cnt, seen_all = verify_groups(grplist, groups, group_cnt); rd_kafka_group_list_destroy(grplist); - for (i = 0 ; i < group_cnt ; i++) { + for (i = 0; i < group_cnt; i++) { err = rd_kafka_list_groups(rk, groups[i], &grplist, 5000); if (err) { - TEST_SAY("Failed to list group %s: %s\n", - groups[i], rd_kafka_err2str(err)); + TEST_SAY("Failed to list group %s: %s\n", groups[i], + rd_kafka_err2str(err)); fails++; continue; } @@ -144,53 +146,54 @@ static int list_groups (rd_kafka_t *rk, char **groups, int group_cnt, -int main_0019_list_groups (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); +int main_0019_list_groups(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); #define _CONS_CNT 2 char *groups[_CONS_CNT]; - rd_kafka_t *rk, *rk_c[_CONS_CNT]; - rd_kafka_topic_partition_list_t *topics; - rd_kafka_resp_err_t err; + rd_kafka_t *rk, *rk_c[_CONS_CNT]; + rd_kafka_topic_partition_list_t *topics; + rd_kafka_resp_err_t err; test_timing_t t_grps; - int i; + int i; int groups_seen; - rd_kafka_topic_t *rkt; + rd_kafka_topic_t *rkt; const struct rd_kafka_group_list *grplist; /* Handle for group listings */ rk = test_create_producer(); - /* Produce messages so that topic is auto created */ - rkt = test_create_topic_object(rk, topic, NULL); - test_produce_msgs(rk, rkt, 0, 0, 0, 10, NULL, 64); - rd_kafka_topic_destroy(rkt); + /* Produce messages so that topic is auto created */ + rkt = test_create_topic_object(rk, topic, NULL); + test_produce_msgs(rk, rkt, 0, 0, 0, 10, NULL, 64); + rd_kafka_topic_destroy(rkt); /* Query groups before creation, should not list our groups. */ groups_seen = list_groups(rk, NULL, 0, "should be none"); if (groups_seen != 0) - TEST_FAIL("Saw %d groups when there wasn't " - "supposed to be any\n", groups_seen); + TEST_FAIL( + "Saw %d groups when there wasn't " + "supposed to be any\n", + groups_seen); - /* Fill in topic subscription set */ - topics = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(topics, topic, -1); + /* Fill in topic subscription set */ + topics = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(topics, topic, -1); - /* Create consumers and start subscription */ - for (i = 0 ; i < _CONS_CNT ; i++) { + /* Create consumers and start subscription */ + for (i = 0; i < _CONS_CNT; i++) { groups[i] = malloc(32); test_str_id_generate(groups[i], 32); - rk_c[i] = test_create_consumer(groups[i], - NULL, NULL, NULL); + rk_c[i] = test_create_consumer(groups[i], NULL, NULL, NULL); - err = rd_kafka_poll_set_consumer(rk_c[i]); - if (err) - TEST_FAIL("poll_set_consumer: %s\n", - rd_kafka_err2str(err)); + err = rd_kafka_poll_set_consumer(rk_c[i]); + if (err) + TEST_FAIL("poll_set_consumer: %s\n", + rd_kafka_err2str(err)); - err = rd_kafka_subscribe(rk_c[i], topics); - if (err) - TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err)); - } + err = rd_kafka_subscribe(rk_c[i], topics); + if (err) + TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err)); + } rd_kafka_topic_partition_list_destroy(topics); @@ -199,7 +202,7 @@ int main_0019_list_groups (int argc, char **argv) { /* Query groups again until both groups are seen. */ while (1) { groups_seen = list_groups(rk, (char **)groups, _CONS_CNT, - "should see my groups"); + "should see my groups"); if (groups_seen == _CONS_CNT) break; rd_sleep(1); @@ -215,28 +218,29 @@ int main_0019_list_groups (int argc, char **argv) { grplist ? grplist->group_cnt : -1, rd_kafka_err2str(err)); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, "expected list_groups(timeout=0) to fail " - "with timeout, got %s", rd_kafka_err2str(err)); + "with timeout, got %s", + rd_kafka_err2str(err)); - TEST_SAY("Closing remaining consumers\n"); - for (i = 0 ; i < _CONS_CNT ; i++) { - test_timing_t t_close; - if (!rk_c[i]) - continue; + TEST_SAY("Closing remaining consumers\n"); + for (i = 0; i < _CONS_CNT; i++) { + test_timing_t t_close; + if (!rk_c[i]) + continue; - TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i])); - TIMING_START(&t_close, "CONSUMER.CLOSE"); - err = rd_kafka_consumer_close(rk_c[i]); - TIMING_STOP(&t_close); - if (err) - TEST_FAIL("consumer_close failed: %s\n", - rd_kafka_err2str(err)); + TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i])); + TIMING_START(&t_close, "CONSUMER.CLOSE"); + err = rd_kafka_consumer_close(rk_c[i]); + TIMING_STOP(&t_close); + if (err) + TEST_FAIL("consumer_close failed: %s\n", + rd_kafka_err2str(err)); - rd_kafka_destroy(rk_c[i]); - rk_c[i] = NULL; + rd_kafka_destroy(rk_c[i]); + rk_c[i] = NULL; free(groups[i]); - } + } rd_kafka_destroy(rk); diff --git a/tests/0020-destroy_hang.c b/tests/0020-destroy_hang.c index 332f6274a1..a8a6552fa8 100644 --- a/tests/0020-destroy_hang.c +++ b/tests/0020-destroy_hang.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -39,90 +39,90 @@ - /** * Request offset for nonexisting partition. * Will cause rd_kafka_destroy() to hang. */ -static int nonexist_part (void) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - rd_kafka_t *rk; - rd_kafka_topic_partition_list_t *parts; - rd_kafka_resp_err_t err; +static int nonexist_part(void) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_t *rk; + rd_kafka_topic_partition_list_t *parts; + rd_kafka_resp_err_t err; test_timing_t t_pos; const int msgcnt = 100; uint64_t testid; int i; - int it, iterations = 5; + int it, iterations = 5; /* Produce messages */ - testid = test_produce_msgs_easy(topic, 0, - RD_KAFKA_PARTITION_UA, msgcnt); + testid = + test_produce_msgs_easy(topic, 0, RD_KAFKA_PARTITION_UA, msgcnt); - for (it = 0 ; it < iterations ; it++) { - char group_id[32]; + for (it = 0; it < iterations; it++) { + char group_id[32]; test_conf_init(NULL, NULL, 15); - test_str_id_generate(group_id, sizeof(group_id)); - - TEST_SAY("Iteration %d/%d, using group.id %s\n", it, iterations, - group_id); - - /* Consume messages */ - test_consume_msgs_easy(group_id, topic, testid, -1, - msgcnt, NULL); - - /* - * Now start a new consumer and query stored offsets (positions) - */ - - rk = test_create_consumer(group_id, NULL, NULL, NULL); - - /* Fill in partition set */ - parts = rd_kafka_topic_partition_list_new(2); - /* existing */ - rd_kafka_topic_partition_list_add(parts, topic, 0); - /* non-existing */ - rd_kafka_topic_partition_list_add(parts, topic, 123); - - - TIMING_START(&t_pos, "COMMITTED"); - err = rd_kafka_committed(rk, parts, tmout_multip(5000)); - TIMING_STOP(&t_pos); - if (err) - TEST_FAIL("Failed to acquire committed offsets: %s\n", - rd_kafka_err2str(err)); - - for (i = 0 ; i < parts->cnt ; i++) { - TEST_SAY("%s [%"PRId32"] returned offset %"PRId64 - ": %s\n", - parts->elems[i].topic, - parts->elems[i].partition, - parts->elems[i].offset, - rd_kafka_err2str(parts->elems[i].err)); - if (parts->elems[i].partition == 0 && - parts->elems[i].offset <= 0) - TEST_FAIL("Partition %"PRId32" should have a " - "proper offset, not %"PRId64"\n", - parts->elems[i].partition, - parts->elems[i].offset); - else if (parts->elems[i].partition == 123 && - parts->elems[i].offset != - RD_KAFKA_OFFSET_INVALID) - TEST_FAIL("Partition %"PRId32 - " should have failed\n", - parts->elems[i].partition); - } - - rd_kafka_topic_partition_list_destroy(parts); - - test_consumer_close(rk); - - /* Hangs if bug isn't fixed */ - rd_kafka_destroy(rk); - } + test_str_id_generate(group_id, sizeof(group_id)); + + TEST_SAY("Iteration %d/%d, using group.id %s\n", it, iterations, + group_id); + + /* Consume messages */ + test_consume_msgs_easy(group_id, topic, testid, -1, msgcnt, + NULL); + + /* + * Now start a new consumer and query stored offsets (positions) + */ + + rk = test_create_consumer(group_id, NULL, NULL, NULL); + + /* Fill in partition set */ + parts = rd_kafka_topic_partition_list_new(2); + /* existing */ + rd_kafka_topic_partition_list_add(parts, topic, 0); + /* non-existing */ + rd_kafka_topic_partition_list_add(parts, topic, 123); + + + TIMING_START(&t_pos, "COMMITTED"); + err = rd_kafka_committed(rk, parts, tmout_multip(5000)); + TIMING_STOP(&t_pos); + if (err) + TEST_FAIL("Failed to acquire committed offsets: %s\n", + rd_kafka_err2str(err)); + + for (i = 0; i < parts->cnt; i++) { + TEST_SAY("%s [%" PRId32 "] returned offset %" PRId64 + ": %s\n", + parts->elems[i].topic, + parts->elems[i].partition, + parts->elems[i].offset, + rd_kafka_err2str(parts->elems[i].err)); + if (parts->elems[i].partition == 0 && + parts->elems[i].offset <= 0) + TEST_FAIL("Partition %" PRId32 + " should have a " + "proper offset, not %" PRId64 "\n", + parts->elems[i].partition, + parts->elems[i].offset); + else if (parts->elems[i].partition == 123 && + parts->elems[i].offset != + RD_KAFKA_OFFSET_INVALID) + TEST_FAIL("Partition %" PRId32 + " should have failed\n", + parts->elems[i].partition); + } + + rd_kafka_topic_partition_list_destroy(parts); + + test_consumer_close(rk); + + /* Hangs if bug isn't fixed */ + rd_kafka_destroy(rk); + } return 0; } @@ -131,30 +131,30 @@ static int nonexist_part (void) { /** * Issue #691: Producer hangs on destroy if group.id is configured. */ -static int producer_groupid (void) { - rd_kafka_conf_t *conf; - rd_kafka_t *rk; +static int producer_groupid(void) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; - TEST_SAY("producer_groupid hang test\n"); - test_conf_init(&conf, NULL, 10); + TEST_SAY("producer_groupid hang test\n"); + test_conf_init(&conf, NULL, 10); - test_conf_set(conf, "group.id", "dummy"); + test_conf_set(conf, "group.id", "dummy"); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - TEST_SAY("Destroying producer\n"); - rd_kafka_destroy(rk); + TEST_SAY("Destroying producer\n"); + rd_kafka_destroy(rk); - return 0; + return 0; } -int main_0020_destroy_hang (int argc, char **argv) { +int main_0020_destroy_hang(int argc, char **argv) { int fails = 0; - test_conf_init(NULL, NULL, 30); + test_conf_init(NULL, NULL, 30); - fails += nonexist_part(); - fails += producer_groupid(); + fails += nonexist_part(); + fails += producer_groupid(); if (fails > 0) TEST_FAIL("See %d previous error(s)\n", fails); diff --git a/tests/0021-rkt_destroy.c b/tests/0021-rkt_destroy.c index 3b247bd916..76b4dd16b3 100644 --- a/tests/0021-rkt_destroy.c +++ b/tests/0021-rkt_destroy.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -42,12 +42,9 @@ - - - -int main_0021_rkt_destroy (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 0); - rd_kafka_t *rk; +int main_0021_rkt_destroy(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 0); + rd_kafka_t *rk; rd_kafka_topic_t *rkt; const int msgcnt = 1000; uint64_t testid; @@ -57,12 +54,12 @@ int main_0021_rkt_destroy (int argc, char **argv) { testid = test_id_generate(); - rk = test_create_producer(); - rkt = test_create_producer_topic(rk, topic, NULL); + rk = test_create_producer(); + rkt = test_create_producer_topic(rk, topic, NULL); - test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, - 0, msgcnt, NULL, 0, 0, &remains); + test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, 0, + msgcnt, NULL, 0, 0, &remains); rd_kafka_topic_destroy(rkt); diff --git a/tests/0022-consume_batch.c b/tests/0022-consume_batch.c index fc04967fd2..2298ade2e5 100644 --- a/tests/0022-consume_batch.c +++ b/tests/0022-consume_batch.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -39,14 +39,14 @@ */ -static int do_test_consume_batch (void) { +static int do_test_consume_batch(void) { #define topic_cnt 2 - char *topics[topic_cnt]; + char *topics[topic_cnt]; const int partition_cnt = 2; - rd_kafka_t *rk; + rd_kafka_t *rk; rd_kafka_queue_t *rkq; rd_kafka_topic_t *rkts[topic_cnt]; - rd_kafka_resp_err_t err; + rd_kafka_resp_err_t err; const int msgcnt = test_quick ? 1000 : 10000; uint64_t testid; int i, p; @@ -56,12 +56,12 @@ static int do_test_consume_batch (void) { testid = test_id_generate(); /* Produce messages */ - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); - for (p = 0 ; p < partition_cnt ; p++) + for (p = 0; p < partition_cnt; p++) test_produce_msgs_easy(topics[i], testid, p, msgcnt / topic_cnt / - partition_cnt); + partition_cnt); } @@ -71,12 +71,10 @@ static int do_test_consume_batch (void) { /* Create generic consume queue */ rkq = rd_kafka_queue_new(rk); - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { /* Create topic object */ - rkts[i] = test_create_topic_object(rk, topics[i], - "auto.offset.reset", - "smallest", - NULL); + rkts[i] = test_create_topic_object( + rk, topics[i], "auto.offset.reset", "smallest", NULL); /* Start consuming each partition and redirect * messages to queue */ @@ -84,9 +82,9 @@ static int do_test_consume_batch (void) { TEST_SAY("Start consuming topic %s partitions 0..%d\n", rd_kafka_topic_name(rkts[i]), partition_cnt); - for (p = 0 ; p < partition_cnt ; p++) { + for (p = 0; p < partition_cnt; p++) { err = rd_kafka_consume_start_queue( - rkts[i], p, RD_KAFKA_OFFSET_BEGINNING, rkq); + rkts[i], p, RD_KAFKA_OFFSET_BEGINNING, rkq); if (err) TEST_FAIL("Failed to start consuming: %s\n", rd_kafka_err2str(err)); @@ -106,8 +104,9 @@ static int do_test_consume_batch (void) { r = rd_kafka_consume_batch_queue(rkq, 1000, rkmessage, 1000); TIMING_STOP(&t_batch); - TEST_SAY("Batch consume iteration #%d: Consumed %"PRIdsz - "/1000 messages\n", batch_cnt, r); + TEST_SAY("Batch consume iteration #%d: Consumed %" PRIdsz + "/1000 messages\n", + batch_cnt, r); if (r == -1) TEST_FAIL("Failed to consume messages: %s\n", @@ -115,7 +114,7 @@ static int do_test_consume_batch (void) { remains -= (int)r; - for (i = 0 ; i < r ; i++) + for (i = 0; i < r; i++) rd_kafka_message_destroy(rkmessage[i]); batch_cnt++; @@ -123,8 +122,8 @@ static int do_test_consume_batch (void) { TEST_SAY("Stopping consumer\n"); - for (i = 0 ; i < topic_cnt ; i++) { - for (p = 0 ; p < partition_cnt ; p++) { + for (i = 0; i < topic_cnt; i++) { + for (p = 0; p < partition_cnt; p++) { err = rd_kafka_consume_stop(rkts[i], p); if (err) TEST_FAIL("Failed to stop consuming: %s\n", @@ -144,8 +143,7 @@ static int do_test_consume_batch (void) { - -int main_0022_consume_batch (int argc, char **argv) { +int main_0022_consume_batch(int argc, char **argv) { int fails = 0; fails += do_test_consume_batch(); diff --git a/tests/0025-timers.c b/tests/0025-timers.c index 7d69c2ce75..318fc0a1b4 100644 --- a/tests/0025-timers.c +++ b/tests/0025-timers.c @@ -46,24 +46,25 @@ struct state { struct state state; -static int stats_cb (rd_kafka_t *rk, char *json, size_t json_len, - void *opaque) { +static int stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) { const int64_t now = test_clock(); /* Fake the first elapsed time since we dont really know how * long rd_kafka_new() takes and at what time the timer is started. */ - const int64_t elapsed = state.ts_last ? - now - state.ts_last : state.interval; + const int64_t elapsed = + state.ts_last ? now - state.ts_last : state.interval; const int64_t overshoot = elapsed - state.interval; - const int wiggleroom_up = (int)((double)state.interval * - (!strcmp(test_mode, "bare") ? 0.2 : 1.0)); - const int wiggleroom_down = (int)((double)state.interval * 0.1); - - TEST_SAY("Call #%d: after %"PRId64"ms, %.0f%% outside " - "interval %"PRId64" >-%d <+%d\n", + const int wiggleroom_up = + (int)((double)state.interval * + (!strcmp(test_mode, "bare") ? 0.2 : 1.0)); + const int wiggleroom_down = (int)((double)state.interval * 0.1); + + TEST_SAY("Call #%d: after %" PRId64 + "ms, %.0f%% outside " + "interval %" PRId64 " >-%d <+%d\n", state.calls, elapsed / 1000, ((double)overshoot / state.interval) * 100.0, - (int64_t)state.interval / 1000, - wiggleroom_down / 1000, wiggleroom_up / 1000); + (int64_t)state.interval / 1000, wiggleroom_down / 1000, + wiggleroom_up / 1000); if (overshoot < -wiggleroom_down || overshoot > wiggleroom_up) { TEST_WARN("^ outside range\n"); @@ -81,7 +82,7 @@ static int stats_cb (rd_kafka_t *rk, char *json, size_t json_len, * Enable statistics with a set interval, make sure the stats callbacks are * called within reasonable intervals. */ -static void do_test_stats_timer (void) { +static void do_test_stats_timer(void) { rd_kafka_t *rk; rd_kafka_conf_t *conf; const int exp_calls = 10; @@ -89,7 +90,7 @@ static void do_test_stats_timer (void) { memset(&state, 0, sizeof(state)); - state.interval = 600*1000; + state.interval = 600 * 1000; test_conf_init(&conf, NULL, 200); @@ -101,9 +102,10 @@ static void do_test_stats_timer (void) { rk = test_create_handle(RD_KAFKA_CONSUMER, conf); TIMING_STOP(&t_new); - TEST_SAY("Starting wait loop for %d expected stats_cb calls " - "with an interval of %dms\n", - exp_calls, state.interval/1000); + TEST_SAY( + "Starting wait loop for %d expected stats_cb calls " + "with an interval of %dms\n", + exp_calls, state.interval / 1000); while (state.calls < exp_calls) { @@ -112,33 +114,34 @@ static void do_test_stats_timer (void) { rd_kafka_poll(rk, 100); TIMING_STOP(&t_poll); - if (TIMING_DURATION(&t_poll) > 150*1000) - TEST_WARN("rd_kafka_poll(rk,100) " - "took more than 50%% extra\n"); + if (TIMING_DURATION(&t_poll) > 150 * 1000) + TEST_WARN( + "rd_kafka_poll(rk,100) " + "took more than 50%% extra\n"); } rd_kafka_destroy(rk); if (state.calls > exp_calls) - TEST_SAY("Got more calls than expected: %d > %d\n", - state.calls, exp_calls); + TEST_SAY("Got more calls than expected: %d > %d\n", state.calls, + exp_calls); if (state.fails) { /* We can't rely on CIs giving our test job enough CPU to finish * in time, so don't error out even if the time is outside * the window */ if (test_on_ci) - TEST_WARN("%d/%d intervals failed\n", - state.fails, state.calls); + TEST_WARN("%d/%d intervals failed\n", state.fails, + state.calls); else - TEST_FAIL("%d/%d intervals failed\n", - state.fails, state.calls); + TEST_FAIL("%d/%d intervals failed\n", state.fails, + state.calls); } else TEST_SAY("All %d intervals okay\n", state.calls); } -int main_0025_timers (int argc, char **argv) { +int main_0025_timers(int argc, char **argv) { do_test_stats_timer(); return 0; } diff --git a/tests/0026-consume_pause.c b/tests/0026-consume_pause.c index 38167dc4ce..09da61e537 100644 --- a/tests/0026-consume_pause.c +++ b/tests/0026-consume_pause.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -40,161 +40,164 @@ -static int consume_pause (void) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); +static int consume_pause(void) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); const int partition_cnt = 3; - rd_kafka_t *rk; + rd_kafka_t *rk; rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *tconf; - rd_kafka_topic_partition_list_t *topics; - rd_kafka_resp_err_t err; + rd_kafka_topic_conf_t *tconf; + rd_kafka_topic_partition_list_t *topics; + rd_kafka_resp_err_t err; const int msgcnt = 1000; uint64_t testid; - int it, iterations = 3; - int msg_base = 0; - int fails = 0; + int it, iterations = 3; + int msg_base = 0; + int fails = 0; char group_id[32]; test_conf_init(&conf, &tconf, 60 + (test_session_timeout_ms * 3 / 1000)); test_conf_set(conf, "enable.partition.eof", "true"); - test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); + test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); test_create_topic(NULL, topic, partition_cnt, 1); /* Produce messages */ - testid = test_produce_msgs_easy(topic, 0, - RD_KAFKA_PARTITION_UA, msgcnt); + testid = + test_produce_msgs_easy(topic, 0, RD_KAFKA_PARTITION_UA, msgcnt); - topics = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(topics, topic, -1); + topics = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(topics, topic, -1); - for (it = 0 ; it < iterations ; it++) { - const int pause_cnt = 5; - int per_pause_msg_cnt = msgcnt / pause_cnt; - const int pause_time = 1200 /* 1.2s */; - int eof_cnt = -1; - int pause; - rd_kafka_topic_partition_list_t *parts; - test_msgver_t mv_all; - int j; + for (it = 0; it < iterations; it++) { + const int pause_cnt = 5; + int per_pause_msg_cnt = msgcnt / pause_cnt; + const int pause_time = 1200 /* 1.2s */; + int eof_cnt = -1; + int pause; + rd_kafka_topic_partition_list_t *parts; + test_msgver_t mv_all; + int j; - test_msgver_init(&mv_all, testid); /* All messages */ + test_msgver_init(&mv_all, testid); /* All messages */ /* On the last iteration reuse the previous group.id * to make consumer start at committed offsets which should * also be EOF. This to trigger #1307. */ - if (it < iterations-1) + if (it < iterations - 1) test_str_id_generate(group_id, sizeof(group_id)); else { TEST_SAY("Reusing previous group.id %s\n", group_id); per_pause_msg_cnt = 0; - eof_cnt = partition_cnt; + eof_cnt = partition_cnt; } - TEST_SAY("Iteration %d/%d, using group.id %s, " - "expecting %d messages/pause and %d EOFs\n", - it, iterations-1, group_id, - per_pause_msg_cnt, eof_cnt); + TEST_SAY( + "Iteration %d/%d, using group.id %s, " + "expecting %d messages/pause and %d EOFs\n", + it, iterations - 1, group_id, per_pause_msg_cnt, eof_cnt); rk = test_create_consumer(group_id, NULL, rd_kafka_conf_dup(conf), rd_kafka_topic_conf_dup(tconf)); - TEST_SAY("Subscribing to %d topic(s): %s\n", - topics->cnt, topics->elems[0].topic); - if ((err = rd_kafka_subscribe(rk, topics))) - TEST_FAIL("Failed to subscribe: %s\n", - rd_kafka_err2str(err)); - - - for (pause = 0 ; pause < pause_cnt ; pause++) { - int rcnt; - test_timing_t t_assignment; - test_msgver_t mv; - - test_msgver_init(&mv, testid); - mv.fwd = &mv_all; - - /* Consume sub-part of the messages. */ - TEST_SAY("Pause-Iteration #%d: Consume %d messages at " - "msg_base %d\n", pause, per_pause_msg_cnt, - msg_base); - rcnt = test_consumer_poll("consume.part", rk, testid, - eof_cnt, - msg_base, - per_pause_msg_cnt == 0 ? - -1 : per_pause_msg_cnt, - &mv); - - TEST_ASSERT(rcnt == per_pause_msg_cnt, - "expected %d messages, got %d", - per_pause_msg_cnt, rcnt); - - test_msgver_verify("pause.iteration", - &mv, TEST_MSGVER_PER_PART, - msg_base, per_pause_msg_cnt); - test_msgver_clear(&mv); - - msg_base += per_pause_msg_cnt; - - TIMING_START(&t_assignment, "rd_kafka_assignment()"); - if ((err = rd_kafka_assignment(rk, &parts))) - TEST_FAIL("failed to get assignment: %s\n", - rd_kafka_err2str(err)); - TIMING_STOP(&t_assignment); - - TEST_ASSERT(parts->cnt > 0, - "parts->cnt %d, expected > 0", parts->cnt); - - TEST_SAY("Now pausing %d partition(s) for %dms\n", - parts->cnt, pause_time); - if ((err = rd_kafka_pause_partitions(rk, parts))) - TEST_FAIL("Failed to pause: %s\n", - rd_kafka_err2str(err)); - - /* Check per-partition errors */ - for (j = 0 ; j < parts->cnt ; j++) { - if (parts->elems[j].err) { - TEST_WARN("pause failure for " - "%s %"PRId32"]: %s\n", - parts->elems[j].topic, - parts->elems[j].partition, - rd_kafka_err2str( - parts->elems[j].err)); - fails++; - } - } - TEST_ASSERT(fails == 0, "See previous warnings\n"); - - TEST_SAY("Waiting for %dms, should not receive any " - "messages during this time\n", pause_time); - - test_consumer_poll_no_msgs("silence.while.paused", - rk, testid, pause_time); - - TEST_SAY("Resuming %d partitions\n", parts->cnt); - if ((err = rd_kafka_resume_partitions(rk, parts))) - TEST_FAIL("Failed to resume: %s\n", - rd_kafka_err2str(err)); - - /* Check per-partition errors */ - for (j = 0 ; j < parts->cnt ; j++) { - if (parts->elems[j].err) { - TEST_WARN("resume failure for " - "%s %"PRId32"]: %s\n", - parts->elems[j].topic, - parts->elems[j].partition, - rd_kafka_err2str( - parts->elems[j].err)); - fails++; - } - } - TEST_ASSERT(fails == 0, "See previous warnings\n"); - - rd_kafka_topic_partition_list_destroy(parts); - } + TEST_SAY("Subscribing to %d topic(s): %s\n", topics->cnt, + topics->elems[0].topic); + if ((err = rd_kafka_subscribe(rk, topics))) + TEST_FAIL("Failed to subscribe: %s\n", + rd_kafka_err2str(err)); + + + for (pause = 0; pause < pause_cnt; pause++) { + int rcnt; + test_timing_t t_assignment; + test_msgver_t mv; + + test_msgver_init(&mv, testid); + mv.fwd = &mv_all; + + /* Consume sub-part of the messages. */ + TEST_SAY( + "Pause-Iteration #%d: Consume %d messages at " + "msg_base %d\n", + pause, per_pause_msg_cnt, msg_base); + rcnt = test_consumer_poll( + "consume.part", rk, testid, eof_cnt, msg_base, + per_pause_msg_cnt == 0 ? -1 : per_pause_msg_cnt, + &mv); + + TEST_ASSERT(rcnt == per_pause_msg_cnt, + "expected %d messages, got %d", + per_pause_msg_cnt, rcnt); + + test_msgver_verify("pause.iteration", &mv, + TEST_MSGVER_PER_PART, msg_base, + per_pause_msg_cnt); + test_msgver_clear(&mv); + + msg_base += per_pause_msg_cnt; + + TIMING_START(&t_assignment, "rd_kafka_assignment()"); + if ((err = rd_kafka_assignment(rk, &parts))) + TEST_FAIL("failed to get assignment: %s\n", + rd_kafka_err2str(err)); + TIMING_STOP(&t_assignment); + + TEST_ASSERT(parts->cnt > 0, + "parts->cnt %d, expected > 0", parts->cnt); + + TEST_SAY("Now pausing %d partition(s) for %dms\n", + parts->cnt, pause_time); + if ((err = rd_kafka_pause_partitions(rk, parts))) + TEST_FAIL("Failed to pause: %s\n", + rd_kafka_err2str(err)); + + /* Check per-partition errors */ + for (j = 0; j < parts->cnt; j++) { + if (parts->elems[j].err) { + TEST_WARN( + "pause failure for " + "%s %" PRId32 "]: %s\n", + parts->elems[j].topic, + parts->elems[j].partition, + rd_kafka_err2str( + parts->elems[j].err)); + fails++; + } + } + TEST_ASSERT(fails == 0, "See previous warnings\n"); + + TEST_SAY( + "Waiting for %dms, should not receive any " + "messages during this time\n", + pause_time); + + test_consumer_poll_no_msgs("silence.while.paused", rk, + testid, pause_time); + + TEST_SAY("Resuming %d partitions\n", parts->cnt); + if ((err = rd_kafka_resume_partitions(rk, parts))) + TEST_FAIL("Failed to resume: %s\n", + rd_kafka_err2str(err)); + + /* Check per-partition errors */ + for (j = 0; j < parts->cnt; j++) { + if (parts->elems[j].err) { + TEST_WARN( + "resume failure for " + "%s %" PRId32 "]: %s\n", + parts->elems[j].topic, + parts->elems[j].partition, + rd_kafka_err2str( + parts->elems[j].err)); + fails++; + } + } + TEST_ASSERT(fails == 0, "See previous warnings\n"); + + rd_kafka_topic_partition_list_destroy(parts); + } if (per_pause_msg_cnt > 0) test_msgver_verify("all.msgs", &mv_all, @@ -202,20 +205,20 @@ static int consume_pause (void) { else test_msgver_verify("all.msgs", &mv_all, TEST_MSGVER_ALL_PART, 0, 0); - test_msgver_clear(&mv_all); - - /* Should now not see any more messages. */ - test_consumer_poll_no_msgs("end.exp.no.msgs", rk, testid, 3000); - - test_consumer_close(rk); - - /* Hangs if bug isn't fixed */ - rd_kafka_destroy(rk); - } - - rd_kafka_topic_partition_list_destroy(topics); + test_msgver_clear(&mv_all); + + /* Should now not see any more messages. */ + test_consumer_poll_no_msgs("end.exp.no.msgs", rk, testid, 3000); + + test_consumer_close(rk); + + /* Hangs if bug isn't fixed */ + rd_kafka_destroy(rk); + } + + rd_kafka_topic_partition_list_destroy(topics); rd_kafka_conf_destroy(conf); - rd_kafka_topic_conf_destroy(tconf); + rd_kafka_topic_conf_destroy(tconf); return 0; } @@ -234,10 +237,10 @@ static int consume_pause (void) { * 6. Assign partitions again * 7. Verify that consumption starts at N/2 and not N/4 */ -static int consume_pause_resume_after_reassign (void) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); +static int consume_pause_resume_after_reassign(void) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); const int32_t partition = 0; - const int msgcnt = 4000; + const int msgcnt = 4000; rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_topic_partition_list_t *partitions, *pos; @@ -251,15 +254,15 @@ static int consume_pause_resume_after_reassign (void) { test_conf_init(&conf, NULL, 60); - test_create_topic(NULL, topic, (int)partition+1, 1); + test_create_topic(NULL, topic, (int)partition + 1, 1); /* Produce messages */ testid = test_produce_msgs_easy(topic, 0, partition, msgcnt); /* Set start offset to beginning */ partitions = rd_kafka_topic_partition_list_new(1); - toppar = rd_kafka_topic_partition_list_add(partitions, topic, - partition); + toppar = + rd_kafka_topic_partition_list_add(partitions, topic, partition); toppar->offset = RD_KAFKA_OFFSET_BEGINNING; @@ -272,13 +275,13 @@ static int consume_pause_resume_after_reassign (void) { test_consumer_assign("assign", rk, partitions); - exp_msg_cnt = msgcnt/4; + exp_msg_cnt = msgcnt / 4; TEST_SAY("Consuming first quarter (%d) of messages\n", exp_msg_cnt); test_msgver_init(&mv, testid); - r = test_consumer_poll("consume.first.quarter", rk, testid, 0, - msg_base, exp_msg_cnt, &mv); - TEST_ASSERT(r == exp_msg_cnt, - "expected %d messages, got %d", exp_msg_cnt, r); + r = test_consumer_poll("consume.first.quarter", rk, testid, 0, msg_base, + exp_msg_cnt, &mv); + TEST_ASSERT(r == exp_msg_cnt, "expected %d messages, got %d", + exp_msg_cnt, r); TEST_SAY("Pausing partitions\n"); @@ -288,8 +291,8 @@ static int consume_pause_resume_after_reassign (void) { TEST_SAY("Verifying pause, should see no new messages...\n"); test_consumer_poll_no_msgs("silence.while.paused", rk, testid, 3000); - test_msgver_verify("first.quarter", &mv, TEST_MSGVER_ALL_PART, - msg_base, exp_msg_cnt); + test_msgver_verify("first.quarter", &mv, TEST_MSGVER_ALL_PART, msg_base, + exp_msg_cnt); test_msgver_clear(&mv); @@ -301,17 +304,18 @@ static int consume_pause_resume_after_reassign (void) { TEST_ASSERT(!pos->elems[0].err, "position() returned error for our partition: %s", rd_kafka_err2str(pos->elems[0].err)); - TEST_SAY("Current application consume position is %"PRId64"\n", + TEST_SAY("Current application consume position is %" PRId64 "\n", pos->elems[0].offset); TEST_ASSERT(pos->elems[0].offset == (int64_t)exp_msg_cnt, - "expected position %"PRId64", not %"PRId64, + "expected position %" PRId64 ", not %" PRId64, (int64_t)exp_msg_cnt, pos->elems[0].offset); rd_kafka_topic_partition_list_destroy(pos); - toppar->offset = (int64_t)(msgcnt/2); - TEST_SAY("Committing (yet unread) offset %"PRId64"\n", toppar->offset); - if ((err = rd_kafka_commit(rk, partitions, 0/*sync*/))) + toppar->offset = (int64_t)(msgcnt / 2); + TEST_SAY("Committing (yet unread) offset %" PRId64 "\n", + toppar->offset); + if ((err = rd_kafka_commit(rk, partitions, 0 /*sync*/))) TEST_FAIL("Commit failed: %s", rd_kafka_err2str(err)); @@ -330,18 +334,18 @@ static int consume_pause_resume_after_reassign (void) { if ((err = rd_kafka_resume_partitions(rk, partitions))) TEST_FAIL("Failed to resume: %s", rd_kafka_err2str(err)); - msg_base = msgcnt / 2; + msg_base = msgcnt / 2; exp_msg_cnt = msgcnt / 2; TEST_SAY("Consuming second half (%d) of messages at msg_base %d\n", exp_msg_cnt, msg_base); test_msgver_init(&mv, testid); - r = test_consumer_poll("consume.second.half", rk, testid, 1/*exp eof*/, + r = test_consumer_poll("consume.second.half", rk, testid, 1 /*exp eof*/, msg_base, exp_msg_cnt, &mv); - TEST_ASSERT(r == exp_msg_cnt, - "expected %d messages, got %d", exp_msg_cnt, r); + TEST_ASSERT(r == exp_msg_cnt, "expected %d messages, got %d", + exp_msg_cnt, r); - test_msgver_verify("second.half", &mv, TEST_MSGVER_ALL_PART, - msg_base, exp_msg_cnt); + test_msgver_verify("second.half", &mv, TEST_MSGVER_ALL_PART, msg_base, + exp_msg_cnt); test_msgver_clear(&mv); @@ -355,14 +359,13 @@ static int consume_pause_resume_after_reassign (void) { } -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { rd_kafka_resp_err_t err2; - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: /* Set start offset to beginning, * while auto.offset.reset is default at `latest`. */ @@ -393,10 +396,10 @@ static void rebalance_cb (rd_kafka_t *rk, * and relying on auto.offset.reset=latest (default) to catch the failure case * where the assigned offset was not honoured. */ -static int consume_subscribe_assign_pause_resume (void) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); +static int consume_subscribe_assign_pause_resume(void) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); const int32_t partition = 0; - const int msgcnt = 1; + const int msgcnt = 1; rd_kafka_t *rk; rd_kafka_conf_t *conf; uint64_t testid; @@ -407,7 +410,7 @@ static int consume_subscribe_assign_pause_resume (void) { test_conf_init(&conf, NULL, 20); - test_create_topic(NULL, topic, (int)partition+1, 1); + test_create_topic(NULL, topic, (int)partition + 1, 1); /* Produce messages */ testid = test_produce_msgs_easy(topic, 0, partition, msgcnt); @@ -423,10 +426,9 @@ static int consume_subscribe_assign_pause_resume (void) { test_consumer_subscribe(rk, topic); test_msgver_init(&mv, testid); - r = test_consumer_poll("consume", rk, testid, 1/*exp eof*/, - 0, msgcnt, &mv); - TEST_ASSERT(r == msgcnt, - "expected %d messages, got %d", msgcnt, r); + r = test_consumer_poll("consume", rk, testid, 1 /*exp eof*/, 0, msgcnt, + &mv); + TEST_ASSERT(r == msgcnt, "expected %d messages, got %d", msgcnt, r); test_msgver_verify("consumed", &mv, TEST_MSGVER_ALL_PART, 0, msgcnt); test_msgver_clear(&mv); @@ -440,7 +442,7 @@ static int consume_subscribe_assign_pause_resume (void) { } -int main_0026_consume_pause (int argc, char **argv) { +int main_0026_consume_pause(int argc, char **argv) { int fails = 0; if (test_can_create_topics(1)) { diff --git a/tests/0028-long_topicnames.c b/tests/0028-long_topicnames.c index f0d8d5705b..999d8f135f 100644 --- a/tests/0028-long_topicnames.c +++ b/tests/0028-long_topicnames.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -40,40 +40,40 @@ */ -int main_0028_long_topicnames (int argc, char **argv) { +int main_0028_long_topicnames(int argc, char **argv) { const int msgcnt = 1000; uint64_t testid; - char topic[256]; - rd_kafka_t *rk_c; + char topic[256]; + rd_kafka_t *rk_c; - if (!test_can_create_topics(1)) - return 0; + if (!test_can_create_topics(1)) + return 0; - memset(topic, 'a', sizeof(topic)-1); - topic[sizeof(topic)-1] = '\0'; + memset(topic, 'a', sizeof(topic) - 1); + topic[sizeof(topic) - 1] = '\0'; - strncpy(topic, test_mk_topic_name(topic, 1), sizeof(topic)-1); + strncpy(topic, test_mk_topic_name(topic, 1), sizeof(topic) - 1); - TEST_SAY("Using topic name of %d bytes: %s\n", - (int)strlen(topic), topic); + TEST_SAY("Using topic name of %d bytes: %s\n", (int)strlen(topic), + topic); - /* First try a non-verifying consumer. The consumer has been known - * to crash when the broker bug kicks in. */ - rk_c = test_create_consumer(topic, NULL, NULL, NULL); + /* First try a non-verifying consumer. The consumer has been known + * to crash when the broker bug kicks in. */ + rk_c = test_create_consumer(topic, NULL, NULL, NULL); /* Create topic */ test_create_topic(rk_c, topic, 1, 1); - test_consumer_subscribe(rk_c, topic); - test_consumer_poll_no_msgs("consume.nomsgs", rk_c, 0, 5000); - test_consumer_close(rk_c); + test_consumer_subscribe(rk_c, topic); + test_consumer_poll_no_msgs("consume.nomsgs", rk_c, 0, 5000); + test_consumer_close(rk_c); /* Produce messages */ - testid = test_produce_msgs_easy(topic, 0, - RD_KAFKA_PARTITION_UA, msgcnt); + testid = + test_produce_msgs_easy(topic, 0, RD_KAFKA_PARTITION_UA, msgcnt); - /* Consume messages */ - test_consume_msgs_easy(NULL, topic, testid, -1, msgcnt, NULL); + /* Consume messages */ + test_consume_msgs_easy(NULL, topic, testid, -1, msgcnt, NULL); return 0; } diff --git a/tests/0029-assign_offset.c b/tests/0029-assign_offset.c index af32947a2b..5b3595baf0 100644 --- a/tests/0029-assign_offset.c +++ b/tests/0029-assign_offset.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -38,159 +38,161 @@ */ -static const int msgcnt = 100; /* per-partition msgcnt */ +static const int msgcnt = 100; /* per-partition msgcnt */ static const int partitions = 4; /* method 1: lower half of partitions use fixed offset * upper half uses END */ -#define REB_METHOD_1 1 +#define REB_METHOD_1 1 /* method 2: first two partitions: fixed offset, * rest: INVALID (== stored == END) * issue #583 */ -#define REB_METHOD_2 2 +#define REB_METHOD_2 2 static int reb_method; -static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, void *opaque){ - int i; - - TEST_SAY("rebalance_cb: %s:\n", rd_kafka_err2str(err)); - test_print_partition_list(parts); - - if (parts->cnt < partitions) - TEST_FAIL("rebalance_cb: Expected %d partitions, not %d", - partitions, parts->cnt); - - switch (err) - { - case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: - for (i = 0 ; i < parts->cnt ; i++) { - if (i >= partitions) { - /* Dont assign() partitions we dont want. */ - rd_kafka_topic_partition_list_del_by_idx(parts, - i); - continue; - } - - if (reb_method == REB_METHOD_1) { - if (i < partitions) - parts->elems[i].offset = msgcnt / 2; - else - parts->elems[i].offset = RD_KAFKA_OFFSET_END; - } else if (reb_method == REB_METHOD_2) { - if (i < 2) - parts->elems[i].offset = msgcnt / 2; - else - parts->elems[i].offset = RD_KAFKA_OFFSET_INVALID; - } - } - TEST_SAY("Use these offsets:\n"); - test_print_partition_list(parts); - test_consumer_assign("HL.REBALANCE", rk, parts); - break; - - case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: - test_consumer_unassign("HL.REBALANCE", rk); - break; - - default: - TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err)); - } +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { + int i; + + TEST_SAY("rebalance_cb: %s:\n", rd_kafka_err2str(err)); + test_print_partition_list(parts); + + if (parts->cnt < partitions) + TEST_FAIL("rebalance_cb: Expected %d partitions, not %d", + partitions, parts->cnt); + + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + for (i = 0; i < parts->cnt; i++) { + if (i >= partitions) { + /* Dont assign() partitions we dont want. */ + rd_kafka_topic_partition_list_del_by_idx(parts, + i); + continue; + } + + if (reb_method == REB_METHOD_1) { + if (i < partitions) + parts->elems[i].offset = msgcnt / 2; + else + parts->elems[i].offset = + RD_KAFKA_OFFSET_END; + } else if (reb_method == REB_METHOD_2) { + if (i < 2) + parts->elems[i].offset = msgcnt / 2; + else + parts->elems[i].offset = + RD_KAFKA_OFFSET_INVALID; + } + } + TEST_SAY("Use these offsets:\n"); + test_print_partition_list(parts); + test_consumer_assign("HL.REBALANCE", rk, parts); + break; + + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + test_consumer_unassign("HL.REBALANCE", rk); + break; + + default: + TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err)); + } } -int main_0029_assign_offset (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_topic_partition_list_t *parts; +int main_0029_assign_offset(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_topic_partition_list_t *parts; uint64_t testid; - int i; - test_timing_t t_simple, t_hl; - test_msgver_t mv; + int i; + test_timing_t t_simple, t_hl; + test_msgver_t mv; - test_conf_init(NULL, NULL, 20 + (test_session_timeout_ms * 3 / 1000)); + test_conf_init(NULL, NULL, 20 + (test_session_timeout_ms * 3 / 1000)); - /* Produce X messages to Y partitions so we get a - * nice seekable 0..X offset one each partition. */ + /* Produce X messages to Y partitions so we get a + * nice seekable 0..X offset one each partition. */ /* Produce messages */ - testid = test_id_generate(); - rk = test_create_producer(); - rkt = test_create_producer_topic(rk, topic, NULL); - - parts = rd_kafka_topic_partition_list_new(partitions); - - for (i = 0 ; i < partitions ; i++) { - test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0); - /* Set start offset */ - rd_kafka_topic_partition_list_add(parts, topic, i)->offset = - msgcnt / 2; - } - - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); - - - /* Simple consumer */ - TIMING_START(&t_simple, "SIMPLE.CONSUMER"); - rk = test_create_consumer(topic, NULL, NULL, NULL); - test_msgver_init(&mv, testid); - test_consumer_assign("SIMPLE.ASSIGN", rk, parts); - test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0, - partitions * (msgcnt / 2), &mv); - for (i = 0 ; i < partitions ; i++) - test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART, - topic, i, msgcnt/2, msgcnt/2); - test_msgver_clear(&mv); - test_consumer_close(rk); - rd_kafka_destroy(rk); - TIMING_STOP(&t_simple); - - rd_kafka_topic_partition_list_destroy(parts); - - - /* High-level consumer: method 1 - * Offsets are set in rebalance callback. */ - if (test_broker_version >= TEST_BRKVER(0,9,0,0)) { - reb_method = REB_METHOD_1; - TIMING_START(&t_hl, "HL.CONSUMER"); - test_msgver_init(&mv, testid); - rk = test_create_consumer(topic, rebalance_cb, NULL, NULL); - test_consumer_subscribe(rk, topic); - test_consumer_poll("HL.CONSUME", rk, testid, -1, 0, - partitions * (msgcnt / 2), &mv); - for (i = 0 ; i < partitions ; i++) - test_msgver_verify_part("HL.MSGS", &mv, - TEST_MSGVER_ALL_PART, - topic, i, msgcnt/2, msgcnt/2); - test_msgver_clear(&mv); - test_consumer_close(rk); - rd_kafka_destroy(rk); - TIMING_STOP(&t_hl); - - - /* High-level consumer: method 2: - * first two partitions are with fixed absolute offset, rest are - * auto offset (stored, which is now at end). - * Offsets are set in rebalance callback. */ - reb_method = REB_METHOD_2; - TIMING_START(&t_hl, "HL.CONSUMER2"); - test_msgver_init(&mv, testid); - rk = test_create_consumer(topic, rebalance_cb, NULL, NULL); - test_consumer_subscribe(rk, topic); - test_consumer_poll("HL.CONSUME2", rk, testid, partitions, 0, - 2 * (msgcnt / 2), &mv); - for (i = 0 ; i < partitions ; i++) { - if (i < 2) - test_msgver_verify_part("HL.MSGS2.A", &mv, - TEST_MSGVER_ALL_PART, - topic, i, msgcnt/2, - msgcnt/2); - } - test_msgver_clear(&mv); - test_consumer_close(rk); - rd_kafka_destroy(rk); - TIMING_STOP(&t_hl); - } + testid = test_id_generate(); + rk = test_create_producer(); + rkt = test_create_producer_topic(rk, topic, NULL); + + parts = rd_kafka_topic_partition_list_new(partitions); + + for (i = 0; i < partitions; i++) { + test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0); + /* Set start offset */ + rd_kafka_topic_partition_list_add(parts, topic, i)->offset = + msgcnt / 2; + } + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); + + + /* Simple consumer */ + TIMING_START(&t_simple, "SIMPLE.CONSUMER"); + rk = test_create_consumer(topic, NULL, NULL, NULL); + test_msgver_init(&mv, testid); + test_consumer_assign("SIMPLE.ASSIGN", rk, parts); + test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0, + partitions * (msgcnt / 2), &mv); + for (i = 0; i < partitions; i++) + test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART, + topic, i, msgcnt / 2, msgcnt / 2); + test_msgver_clear(&mv); + test_consumer_close(rk); + rd_kafka_destroy(rk); + TIMING_STOP(&t_simple); + + rd_kafka_topic_partition_list_destroy(parts); + + + /* High-level consumer: method 1 + * Offsets are set in rebalance callback. */ + if (test_broker_version >= TEST_BRKVER(0, 9, 0, 0)) { + reb_method = REB_METHOD_1; + TIMING_START(&t_hl, "HL.CONSUMER"); + test_msgver_init(&mv, testid); + rk = test_create_consumer(topic, rebalance_cb, NULL, NULL); + test_consumer_subscribe(rk, topic); + test_consumer_poll("HL.CONSUME", rk, testid, -1, 0, + partitions * (msgcnt / 2), &mv); + for (i = 0; i < partitions; i++) + test_msgver_verify_part("HL.MSGS", &mv, + TEST_MSGVER_ALL_PART, topic, i, + msgcnt / 2, msgcnt / 2); + test_msgver_clear(&mv); + test_consumer_close(rk); + rd_kafka_destroy(rk); + TIMING_STOP(&t_hl); + + + /* High-level consumer: method 2: + * first two partitions are with fixed absolute offset, rest are + * auto offset (stored, which is now at end). + * Offsets are set in rebalance callback. */ + reb_method = REB_METHOD_2; + TIMING_START(&t_hl, "HL.CONSUMER2"); + test_msgver_init(&mv, testid); + rk = test_create_consumer(topic, rebalance_cb, NULL, NULL); + test_consumer_subscribe(rk, topic); + test_consumer_poll("HL.CONSUME2", rk, testid, partitions, 0, + 2 * (msgcnt / 2), &mv); + for (i = 0; i < partitions; i++) { + if (i < 2) + test_msgver_verify_part( + "HL.MSGS2.A", &mv, TEST_MSGVER_ALL_PART, + topic, i, msgcnt / 2, msgcnt / 2); + } + test_msgver_clear(&mv); + test_consumer_close(rk); + rd_kafka_destroy(rk); + TIMING_STOP(&t_hl); + } return 0; } diff --git a/tests/0030-offset_commit.c b/tests/0030-offset_commit.c index e4c2987fdd..9b05cb420b 100644 --- a/tests/0030-offset_commit.c +++ b/tests/0030-offset_commit.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -39,112 +39,122 @@ */ static char *topic; -static const int msgcnt = 100; +static const int msgcnt = 100; static const int partition = 0; static uint64_t testid; -static int64_t expected_offset = 0; +static int64_t expected_offset = 0; static int64_t committed_offset = -1; -static void offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque) { - rd_kafka_topic_partition_t *rktpar; +static void offset_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque) { + rd_kafka_topic_partition_t *rktpar; - TEST_SAYL(3, "Offset committed: %s:\n", rd_kafka_err2str(err)); - if (err == RD_KAFKA_RESP_ERR__NO_OFFSET) - return; + TEST_SAYL(3, "Offset committed: %s:\n", rd_kafka_err2str(err)); + if (err == RD_KAFKA_RESP_ERR__NO_OFFSET) + return; - test_print_partition_list(offsets); - if (err) - TEST_FAIL("Offset commit failed: %s", rd_kafka_err2str(err)); - if (offsets->cnt == 0) - TEST_FAIL("Expected at least one partition in offset_commit_cb"); + test_print_partition_list(offsets); + if (err) + TEST_FAIL("Offset commit failed: %s", rd_kafka_err2str(err)); + if (offsets->cnt == 0) + TEST_FAIL( + "Expected at least one partition in offset_commit_cb"); - /* Find correct partition */ - if (!(rktpar = rd_kafka_topic_partition_list_find(offsets, - topic, partition))) - return; + /* Find correct partition */ + if (!(rktpar = rd_kafka_topic_partition_list_find(offsets, topic, + partition))) + return; - if (rktpar->err) - TEST_FAIL("Offset commit failed for partitioń : %s", - rd_kafka_err2str(rktpar->err)); + if (rktpar->err) + TEST_FAIL("Offset commit failed for partitioń : %s", + rd_kafka_err2str(rktpar->err)); - if (rktpar->offset > expected_offset) - TEST_FAIL("Offset committed %"PRId64 - " > expected offset %"PRId64, - rktpar->offset, expected_offset); + if (rktpar->offset > expected_offset) + TEST_FAIL("Offset committed %" PRId64 + " > expected offset %" PRId64, + rktpar->offset, expected_offset); if (rktpar->offset < committed_offset) - TEST_FAIL("Old offset %"PRId64" (re)committed: " - "should be above committed_offset %"PRId64, + TEST_FAIL("Old offset %" PRId64 + " (re)committed: " + "should be above committed_offset %" PRId64, rktpar->offset, committed_offset); else if (rktpar->offset == committed_offset) - TEST_SAYL(1, "Current offset re-committed: %"PRId64"\n", + TEST_SAYL(1, "Current offset re-committed: %" PRId64 "\n", rktpar->offset); else committed_offset = rktpar->offset; - if (rktpar->offset < expected_offset) { - TEST_SAYL(3, "Offset committed %"PRId64 - " < expected offset %"PRId64"\n", - rktpar->offset, expected_offset); - return; - } + if (rktpar->offset < expected_offset) { + TEST_SAYL(3, + "Offset committed %" PRId64 + " < expected offset %" PRId64 "\n", + rktpar->offset, expected_offset); + return; + } - TEST_SAYL(3, "Expected offset committed: %"PRId64"\n", rktpar->offset); + TEST_SAYL(3, "Expected offset committed: %" PRId64 "\n", + rktpar->offset); } -static void do_offset_test (const char *what, int auto_commit, int auto_store, - int async, int subscribe) { - test_timing_t t_all; - char groupid[64]; - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *tconf; - int cnt = 0; - const int extra_cnt = 5; - rd_kafka_resp_err_t err; - rd_kafka_topic_partition_list_t *parts; - rd_kafka_topic_partition_t *rktpar; - int64_t next_offset = -1; +static void do_offset_test(const char *what, + int auto_commit, + int auto_store, + int async, + int subscribe) { + test_timing_t t_all; + char groupid[64]; + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *tconf; + int cnt = 0; + const int extra_cnt = 5; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *parts; + rd_kafka_topic_partition_t *rktpar; + int64_t next_offset = -1; SUB_TEST_QUICK("%s", what); - test_conf_init(&conf, &tconf, subscribe ? 30 : 10); + test_conf_init(&conf, &tconf, subscribe ? 30 : 10); test_conf_set(conf, "session.timeout.ms", "6000"); - test_conf_set(conf, "enable.auto.commit", auto_commit ? "true":"false"); - test_conf_set(conf, "enable.auto.offset.store", auto_store ?"true":"false"); - test_conf_set(conf, "auto.commit.interval.ms", "500"); - rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb); - test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); - test_str_id_generate(groupid, sizeof(groupid)); - test_conf_set(conf, "group.id", groupid); - rd_kafka_conf_set_default_topic_conf(conf, tconf); - - TIMING_START(&t_all, "%s", what); - - expected_offset = 0; - committed_offset = -1; - - /* MO: - * - Create consumer. - * - Start consuming from beginning - * - Perform store & commits according to settings - * - Stop storing&committing when half of the messages are consumed, - * - but consume 5 more to check against. - * - Query position. - * - Destroy consumer. - * - Create new consumer with same group.id using stored offsets - * - Should consume the expected message. - */ - - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, rd_kafka_conf_dup(conf)); - - rd_kafka_poll_set_consumer(rk); + test_conf_set(conf, "enable.auto.commit", + auto_commit ? "true" : "false"); + test_conf_set(conf, "enable.auto.offset.store", + auto_store ? "true" : "false"); + test_conf_set(conf, "auto.commit.interval.ms", "500"); + rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb); + test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); + test_str_id_generate(groupid, sizeof(groupid)); + test_conf_set(conf, "group.id", groupid); + rd_kafka_conf_set_default_topic_conf(conf, tconf); + + TIMING_START(&t_all, "%s", what); + + expected_offset = 0; + committed_offset = -1; + + /* MO: + * - Create consumer. + * - Start consuming from beginning + * - Perform store & commits according to settings + * - Stop storing&committing when half of the messages are consumed, + * - but consume 5 more to check against. + * - Query position. + * - Destroy consumer. + * - Create new consumer with same group.id using stored offsets + * - Should consume the expected message. + */ + + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, rd_kafka_conf_dup(conf)); + + rd_kafka_poll_set_consumer(rk); if (subscribe) { test_consumer_subscribe(rk, topic); @@ -155,109 +165,112 @@ static void do_offset_test (const char *what, int auto_commit, int auto_store, rd_kafka_topic_partition_list_destroy(parts); } - while (cnt - extra_cnt < msgcnt / 2) { - rd_kafka_message_t *rkm; + while (cnt - extra_cnt < msgcnt / 2) { + rd_kafka_message_t *rkm; - rkm = rd_kafka_consumer_poll(rk, 10*1000); - if (!rkm) - continue; + rkm = rd_kafka_consumer_poll(rk, 10 * 1000); + if (!rkm) + continue; - if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT) - TEST_FAIL("%s: Timed out waiting for message %d", what,cnt); + if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT) + TEST_FAIL("%s: Timed out waiting for message %d", what, + cnt); else if (rkm->err) - TEST_FAIL("%s: Consumer error: %s", - what, rd_kafka_message_errstr(rkm)); - - /* Offset of next message. */ - next_offset = rkm->offset + 1; - - if (cnt < msgcnt / 2) { - if (!auto_store) { - err = rd_kafka_offset_store(rkm->rkt,rkm->partition, - rkm->offset); - if (err) - TEST_FAIL("%s: offset_store failed: %s\n", - what, rd_kafka_err2str(err)); - } - expected_offset = rkm->offset+1; - if (!auto_commit) { + TEST_FAIL("%s: Consumer error: %s", what, + rd_kafka_message_errstr(rkm)); + + /* Offset of next message. */ + next_offset = rkm->offset + 1; + + if (cnt < msgcnt / 2) { + if (!auto_store) { + err = rd_kafka_offset_store( + rkm->rkt, rkm->partition, rkm->offset); + if (err) + TEST_FAIL( + "%s: offset_store failed: %s\n", + what, rd_kafka_err2str(err)); + } + expected_offset = rkm->offset + 1; + if (!auto_commit) { test_timing_t t_commit; - TIMING_START(&t_commit, - "%s @ %"PRId64, - async? - "commit.async": - "commit.sync", - rkm->offset+1); - err = rd_kafka_commit_message(rk, rkm, async); - TIMING_STOP(&t_commit); - if (err) - TEST_FAIL("%s: commit failed: %s\n", - what, rd_kafka_err2str(err)); - } - - } else if (auto_store && auto_commit) - expected_offset = rkm->offset+1; - - rd_kafka_message_destroy(rkm); - cnt++; - } - - TEST_SAY("%s: done consuming after %d messages, at offset %"PRId64 - ", next_offset %"PRId64"\n", - what, cnt, expected_offset, next_offset); - - if ((err = rd_kafka_assignment(rk, &parts))) - TEST_FAIL("%s: failed to get assignment(): %s\n", - what, rd_kafka_err2str(err)); - - /* Verify position */ - if ((err = rd_kafka_position(rk, parts))) - TEST_FAIL("%s: failed to get position(): %s\n", - what, rd_kafka_err2str(err)); - if (!(rktpar = rd_kafka_topic_partition_list_find(parts, - topic, partition))) - TEST_FAIL("%s: position(): topic lost\n", what); - if (rktpar->offset != next_offset) - TEST_FAIL("%s: Expected position() offset %"PRId64", got %"PRId64, - what, next_offset, rktpar->offset); - TEST_SAY("%s: Position is at %"PRId64", good!\n", - what, rktpar->offset); - - /* Pause messages while waiting so we can serve callbacks - * without having more messages received. */ - if ((err = rd_kafka_pause_partitions(rk, parts))) - TEST_FAIL("%s: failed to pause partitions: %s\n", - what, rd_kafka_err2str(err)); - rd_kafka_topic_partition_list_destroy(parts); - - /* Fire off any enqueued offset_commit_cb */ - test_consumer_poll_no_msgs(what, rk, testid, 0); - - TEST_SAY("%s: committed_offset %"PRId64", expected_offset %"PRId64"\n", - what, committed_offset, expected_offset); - - if (!auto_commit && !async) { - /* Sync commits should be up to date at this point. */ - if (committed_offset != expected_offset) - TEST_FAIL("%s: Sync commit: committed offset %"PRId64 - " should be same as expected offset " - "%"PRId64, - what, committed_offset, expected_offset); - } else { - - /* Wait for offset commits to catch up */ - while (committed_offset < expected_offset) { - TEST_SAYL(2, "%s: Wait for committed offset %"PRId64 - " to reach expected offset %"PRId64"\n", - what, committed_offset, expected_offset); - test_consumer_poll_no_msgs(what, rk, testid, 1000); - } - - } - - TEST_SAY("%s: phase 1 complete, %d messages consumed, " - "next expected offset is %"PRId64"\n", - what, cnt, expected_offset); + TIMING_START(&t_commit, "%s @ %" PRId64, + async ? "commit.async" + : "commit.sync", + rkm->offset + 1); + err = rd_kafka_commit_message(rk, rkm, async); + TIMING_STOP(&t_commit); + if (err) + TEST_FAIL("%s: commit failed: %s\n", + what, rd_kafka_err2str(err)); + } + + } else if (auto_store && auto_commit) + expected_offset = rkm->offset + 1; + + rd_kafka_message_destroy(rkm); + cnt++; + } + + TEST_SAY("%s: done consuming after %d messages, at offset %" PRId64 + ", next_offset %" PRId64 "\n", + what, cnt, expected_offset, next_offset); + + if ((err = rd_kafka_assignment(rk, &parts))) + TEST_FAIL("%s: failed to get assignment(): %s\n", what, + rd_kafka_err2str(err)); + + /* Verify position */ + if ((err = rd_kafka_position(rk, parts))) + TEST_FAIL("%s: failed to get position(): %s\n", what, + rd_kafka_err2str(err)); + if (!(rktpar = + rd_kafka_topic_partition_list_find(parts, topic, partition))) + TEST_FAIL("%s: position(): topic lost\n", what); + if (rktpar->offset != next_offset) + TEST_FAIL("%s: Expected position() offset %" PRId64 + ", got %" PRId64, + what, next_offset, rktpar->offset); + TEST_SAY("%s: Position is at %" PRId64 ", good!\n", what, + rktpar->offset); + + /* Pause messages while waiting so we can serve callbacks + * without having more messages received. */ + if ((err = rd_kafka_pause_partitions(rk, parts))) + TEST_FAIL("%s: failed to pause partitions: %s\n", what, + rd_kafka_err2str(err)); + rd_kafka_topic_partition_list_destroy(parts); + + /* Fire off any enqueued offset_commit_cb */ + test_consumer_poll_no_msgs(what, rk, testid, 0); + + TEST_SAY("%s: committed_offset %" PRId64 ", expected_offset %" PRId64 + "\n", + what, committed_offset, expected_offset); + + if (!auto_commit && !async) { + /* Sync commits should be up to date at this point. */ + if (committed_offset != expected_offset) + TEST_FAIL("%s: Sync commit: committed offset %" PRId64 + " should be same as expected offset " + "%" PRId64, + what, committed_offset, expected_offset); + } else { + + /* Wait for offset commits to catch up */ + while (committed_offset < expected_offset) { + TEST_SAYL(2, + "%s: Wait for committed offset %" PRId64 + " to reach expected offset %" PRId64 "\n", + what, committed_offset, expected_offset); + test_consumer_poll_no_msgs(what, rk, testid, 1000); + } + } + + TEST_SAY( + "%s: phase 1 complete, %d messages consumed, " + "next expected offset is %" PRId64 "\n", + what, cnt, expected_offset); /* Issue #827: cause committed() to return prematurely by specifying * low timeout. The bug (use after free) will only @@ -276,32 +289,35 @@ static void do_offset_test (const char *what, int auto_commit, int auto_store, rd_kafka_err2str(err)); } while (err != RD_KAFKA_RESP_ERR__TIMED_OUT); - /* Query position */ - parts = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(parts, topic, partition); - - err = rd_kafka_committed(rk, parts, tmout_multip(5*1000)); - if (err) - TEST_FAIL("%s: committed() failed: %s", what, rd_kafka_err2str(err)); - if (!(rktpar = rd_kafka_topic_partition_list_find(parts, - topic, partition))) - TEST_FAIL("%s: committed(): topic lost\n", what); - if (rktpar->offset != expected_offset) - TEST_FAIL("%s: Expected committed() offset %"PRId64", got %"PRId64, - what, expected_offset, rktpar->offset); - TEST_SAY("%s: Committed offset is at %"PRId64", good!\n", - what, rktpar->offset); - - rd_kafka_topic_partition_list_destroy(parts); - test_consumer_close(rk); - rd_kafka_destroy(rk); - - - - /* Fire up a new consumer and continue from where we left off. */ - TEST_SAY("%s: phase 2: starting new consumer to resume consumption\n",what); - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); - rd_kafka_poll_set_consumer(rk); + /* Query position */ + parts = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(parts, topic, partition); + + err = rd_kafka_committed(rk, parts, tmout_multip(5 * 1000)); + if (err) + TEST_FAIL("%s: committed() failed: %s", what, + rd_kafka_err2str(err)); + if (!(rktpar = + rd_kafka_topic_partition_list_find(parts, topic, partition))) + TEST_FAIL("%s: committed(): topic lost\n", what); + if (rktpar->offset != expected_offset) + TEST_FAIL("%s: Expected committed() offset %" PRId64 + ", got %" PRId64, + what, expected_offset, rktpar->offset); + TEST_SAY("%s: Committed offset is at %" PRId64 ", good!\n", what, + rktpar->offset); + + rd_kafka_topic_partition_list_destroy(parts); + test_consumer_close(rk); + rd_kafka_destroy(rk); + + + + /* Fire up a new consumer and continue from where we left off. */ + TEST_SAY("%s: phase 2: starting new consumer to resume consumption\n", + what); + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + rd_kafka_poll_set_consumer(rk); if (subscribe) { test_consumer_subscribe(rk, topic); @@ -312,129 +328,126 @@ static void do_offset_test (const char *what, int auto_commit, int auto_store, rd_kafka_topic_partition_list_destroy(parts); } - while (cnt < msgcnt) { - rd_kafka_message_t *rkm; + while (cnt < msgcnt) { + rd_kafka_message_t *rkm; - rkm = rd_kafka_consumer_poll(rk, 10*1000); - if (!rkm) - continue; + rkm = rd_kafka_consumer_poll(rk, 10 * 1000); + if (!rkm) + continue; - if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT) - TEST_FAIL("%s: Timed out waiting for message %d", what,cnt); + if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT) + TEST_FAIL("%s: Timed out waiting for message %d", what, + cnt); else if (rkm->err) - TEST_FAIL("%s: Consumer error: %s", - what, rd_kafka_message_errstr(rkm)); - - if (rkm->offset != expected_offset) - TEST_FAIL("%s: Received message offset %"PRId64 - ", expected %"PRId64" at msgcnt %d/%d\n", - what, rkm->offset, expected_offset, - cnt, msgcnt); - - rd_kafka_message_destroy(rkm); - expected_offset++; - cnt++; - } + TEST_FAIL("%s: Consumer error: %s", what, + rd_kafka_message_errstr(rkm)); + + if (rkm->offset != expected_offset) + TEST_FAIL("%s: Received message offset %" PRId64 + ", expected %" PRId64 " at msgcnt %d/%d\n", + what, rkm->offset, expected_offset, cnt, + msgcnt); + + rd_kafka_message_destroy(rkm); + expected_offset++; + cnt++; + } - TEST_SAY("%s: phase 2: complete\n", what); - test_consumer_close(rk); - rd_kafka_destroy(rk); + TEST_SAY("%s: phase 2: complete\n", what); + test_consumer_close(rk); + rd_kafka_destroy(rk); - TIMING_STOP(&t_all); + TIMING_STOP(&t_all); SUB_TEST_PASS(); } -static void empty_offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque) { - rd_kafka_resp_err_t expected = *(rd_kafka_resp_err_t *)opaque; - int valid_offsets = 0; - int i; - - TEST_SAY("Offset commit callback for %d partitions: %s (expecting %s)\n", - offsets ? offsets->cnt : 0, - rd_kafka_err2str(err), - rd_kafka_err2str(expected)); - - if (expected != err) - TEST_FAIL("Offset commit cb: expected %s, got %s", - rd_kafka_err2str(expected), - rd_kafka_err2str(err)); - - for (i = 0 ; i < offsets->cnt ; i++) { - TEST_SAY("committed: %s [%"PRId32"] offset %"PRId64 - ": %s\n", - offsets->elems[i].topic, - offsets->elems[i].partition, - offsets->elems[i].offset, - rd_kafka_err2str(offsets->elems[i].err)); - - if (expected == RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_ASSERT(offsets->elems[i].err == expected); - if (offsets->elems[i].offset > 0) - valid_offsets++; - } - - if (expected == RD_KAFKA_RESP_ERR_NO_ERROR) { - /* If no error is expected we instead expect one proper offset - * to have been committed. */ - TEST_ASSERT(valid_offsets > 0); - } +static void empty_offset_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque) { + rd_kafka_resp_err_t expected = *(rd_kafka_resp_err_t *)opaque; + int valid_offsets = 0; + int i; + + TEST_SAY( + "Offset commit callback for %d partitions: %s (expecting %s)\n", + offsets ? offsets->cnt : 0, rd_kafka_err2str(err), + rd_kafka_err2str(expected)); + + if (expected != err) + TEST_FAIL("Offset commit cb: expected %s, got %s", + rd_kafka_err2str(expected), rd_kafka_err2str(err)); + + for (i = 0; i < offsets->cnt; i++) { + TEST_SAY("committed: %s [%" PRId32 "] offset %" PRId64 ": %s\n", + offsets->elems[i].topic, offsets->elems[i].partition, + offsets->elems[i].offset, + rd_kafka_err2str(offsets->elems[i].err)); + + if (expected == RD_KAFKA_RESP_ERR_NO_ERROR) + TEST_ASSERT(offsets->elems[i].err == expected); + if (offsets->elems[i].offset > 0) + valid_offsets++; + } + + if (expected == RD_KAFKA_RESP_ERR_NO_ERROR) { + /* If no error is expected we instead expect one proper offset + * to have been committed. */ + TEST_ASSERT(valid_offsets > 0); + } } /** * Trigger an empty cgrp commit (issue #803) */ -static void do_empty_commit (void) { - rd_kafka_t *rk; - char group_id[64]; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *tconf; - rd_kafka_resp_err_t err, expect; +static void do_empty_commit(void) { + rd_kafka_t *rk; + char group_id[64]; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *tconf; + rd_kafka_resp_err_t err, expect; SUB_TEST_QUICK(); - test_conf_init(&conf, &tconf, 20); - test_conf_set(conf, "enable.auto.commit", "false"); - test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); - test_str_id_generate(group_id, sizeof(group_id)); + test_conf_init(&conf, &tconf, 20); + test_conf_set(conf, "enable.auto.commit", "false"); + test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); + test_str_id_generate(group_id, sizeof(group_id)); - TEST_SAY(_C_MAG "[ do_empty_commit group.id %s ]\n", group_id); + TEST_SAY(_C_MAG "[ do_empty_commit group.id %s ]\n", group_id); - rk = test_create_consumer(group_id, NULL, conf, tconf); + rk = test_create_consumer(group_id, NULL, conf, tconf); - test_consumer_subscribe(rk, topic); + test_consumer_subscribe(rk, topic); - test_consumer_poll("consume", rk, testid, -1, -1, 100, NULL); + test_consumer_poll("consume", rk, testid, -1, -1, 100, NULL); - TEST_SAY("First commit\n"); - expect = RD_KAFKA_RESP_ERR_NO_ERROR; - err = rd_kafka_commit_queue(rk, NULL, NULL, - empty_offset_commit_cb, &expect); - if (err != expect) - TEST_FAIL("commit failed: %s", rd_kafka_err2str(err)); - else - TEST_SAY("First commit returned %s\n", - rd_kafka_err2str(err)); - - TEST_SAY("Second commit, should be empty\n"); - expect = RD_KAFKA_RESP_ERR__NO_OFFSET; - err = rd_kafka_commit_queue(rk, NULL, NULL, - empty_offset_commit_cb, &expect); - if (err != RD_KAFKA_RESP_ERR__NO_OFFSET) - TEST_FAIL("unexpected commit result, wanted NO_OFFSET, got: %s", - rd_kafka_err2str(err)); - else - TEST_SAY("Second commit returned %s\n", - rd_kafka_err2str(err)); + TEST_SAY("First commit\n"); + expect = RD_KAFKA_RESP_ERR_NO_ERROR; + err = rd_kafka_commit_queue(rk, NULL, NULL, empty_offset_commit_cb, + &expect); + if (err != expect) + TEST_FAIL("commit failed: %s", rd_kafka_err2str(err)); + else + TEST_SAY("First commit returned %s\n", rd_kafka_err2str(err)); + + TEST_SAY("Second commit, should be empty\n"); + expect = RD_KAFKA_RESP_ERR__NO_OFFSET; + err = rd_kafka_commit_queue(rk, NULL, NULL, empty_offset_commit_cb, + &expect); + if (err != RD_KAFKA_RESP_ERR__NO_OFFSET) + TEST_FAIL("unexpected commit result, wanted NO_OFFSET, got: %s", + rd_kafka_err2str(err)); + else + TEST_SAY("Second commit returned %s\n", rd_kafka_err2str(err)); - test_consumer_close(rk); + test_consumer_close(rk); - rd_kafka_destroy(rk); + rd_kafka_destroy(rk); SUB_TEST_PASS(); } @@ -443,48 +456,47 @@ static void do_empty_commit (void) { /** * Commit non-existent topic (issue #704) */ -static void nonexist_offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque) { - int i; - int failed_offsets = 0; - - TEST_SAY("Offset commit callback for %d partitions: %s\n", - offsets ? offsets->cnt : 0, - rd_kafka_err2str(err)); - - TEST_ASSERT(offsets != NULL); - - for (i = 0 ; i < offsets->cnt ; i++) { - TEST_SAY("committed: %s [%"PRId32"] offset %"PRId64 - ": %s\n", - offsets->elems[i].topic, - offsets->elems[i].partition, - offsets->elems[i].offset, - rd_kafka_err2str(offsets->elems[i].err)); - failed_offsets += offsets->elems[i].err ? 1 : 0; - } - - TEST_ASSERT(err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, - "expected unknown Topic or partition, not %s", rd_kafka_err2str(err)); - TEST_ASSERT(offsets->cnt == 2, "expected %d offsets", offsets->cnt); - TEST_ASSERT(failed_offsets == offsets->cnt, - "expected %d offsets to have failed, got %d", - offsets->cnt, failed_offsets); +static void nonexist_offset_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque) { + int i; + int failed_offsets = 0; + + TEST_SAY("Offset commit callback for %d partitions: %s\n", + offsets ? offsets->cnt : 0, rd_kafka_err2str(err)); + + TEST_ASSERT(offsets != NULL); + + for (i = 0; i < offsets->cnt; i++) { + TEST_SAY("committed: %s [%" PRId32 "] offset %" PRId64 ": %s\n", + offsets->elems[i].topic, offsets->elems[i].partition, + offsets->elems[i].offset, + rd_kafka_err2str(offsets->elems[i].err)); + failed_offsets += offsets->elems[i].err ? 1 : 0; + } + + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + "expected unknown Topic or partition, not %s", + rd_kafka_err2str(err)); + TEST_ASSERT(offsets->cnt == 2, "expected %d offsets", offsets->cnt); + TEST_ASSERT(failed_offsets == offsets->cnt, + "expected %d offsets to have failed, got %d", offsets->cnt, + failed_offsets); } -static void do_nonexist_commit (void) { - rd_kafka_t *rk; - char group_id[64]; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *tconf; - rd_kafka_topic_partition_list_t *offsets; - const char *unk_topic = test_mk_topic_name(__FUNCTION__, 1); - rd_kafka_resp_err_t err; +static void do_nonexist_commit(void) { + rd_kafka_t *rk; + char group_id[64]; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *tconf; + rd_kafka_topic_partition_list_t *offsets; + const char *unk_topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_resp_err_t err; SUB_TEST_QUICK(); - test_conf_init(&conf, &tconf, 20); + test_conf_init(&conf, &tconf, 20); /* Offset commit deferrals when the broker is down is limited to * session.timeout.ms. With 0.9 brokers and api.version.request=true * the initial connect to all brokers will take 10*2 seconds @@ -492,7 +504,7 @@ static void do_nonexist_commit (void) { * Set the session timeout high here to avoid it. */ test_conf_set(conf, "session.timeout.ms", "60000"); - test_str_id_generate(group_id, sizeof(group_id)); + test_str_id_generate(group_id, sizeof(group_id)); test_conf_set(conf, "group.id", group_id); rd_kafka_conf_set_default_topic_conf(conf, tconf); @@ -502,53 +514,49 @@ static void do_nonexist_commit (void) { rk = test_create_handle(RD_KAFKA_CONSUMER, conf); rd_kafka_poll_set_consumer(rk); - TEST_SAY("Try nonexist commit\n"); - offsets = rd_kafka_topic_partition_list_new(2); - rd_kafka_topic_partition_list_add(offsets, unk_topic, 0)->offset = 123; - rd_kafka_topic_partition_list_add(offsets, unk_topic, 1)->offset = 456; + TEST_SAY("Try nonexist commit\n"); + offsets = rd_kafka_topic_partition_list_new(2); + rd_kafka_topic_partition_list_add(offsets, unk_topic, 0)->offset = 123; + rd_kafka_topic_partition_list_add(offsets, unk_topic, 1)->offset = 456; - err = rd_kafka_commit_queue(rk, offsets, NULL, - nonexist_offset_commit_cb, NULL); - TEST_SAY("nonexist commit returned %s\n", rd_kafka_err2str(err)); - if (err != RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) - TEST_FAIL("commit() should give UnknownTopicOrPart, not: %s", - rd_kafka_err2str(err)); + err = rd_kafka_commit_queue(rk, offsets, NULL, + nonexist_offset_commit_cb, NULL); + TEST_SAY("nonexist commit returned %s\n", rd_kafka_err2str(err)); + if (err != RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) + TEST_FAIL("commit() should give UnknownTopicOrPart, not: %s", + rd_kafka_err2str(err)); - rd_kafka_topic_partition_list_destroy(offsets); + rd_kafka_topic_partition_list_destroy(offsets); - test_consumer_close(rk); + test_consumer_close(rk); - rd_kafka_destroy(rk); + rd_kafka_destroy(rk); SUB_TEST_PASS(); } -int main_0030_offset_commit (int argc, char **argv) { +int main_0030_offset_commit(int argc, char **argv) { - topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); - testid = test_produce_msgs_easy(topic, 0, partition, msgcnt); + topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + testid = test_produce_msgs_easy(topic, 0, partition, msgcnt); do_empty_commit(); do_nonexist_commit(); - do_offset_test("AUTO.COMMIT & AUTO.STORE", - 1 /* enable.auto.commit */, - 1 /* enable.auto.offset.store */, - 0 /* not used. */, + do_offset_test("AUTO.COMMIT & AUTO.STORE", 1 /* enable.auto.commit */, + 1 /* enable.auto.offset.store */, 0 /* not used. */, 1 /* use subscribe */); - do_offset_test("MANUAL.COMMIT.ASYNC & AUTO.STORE", - 0 /* enable.auto.commit */, - 1 /* enable.auto.offset.store */, - 1 /* async */, + do_offset_test("MANUAL.COMMIT.ASYNC & AUTO.STORE", + 0 /* enable.auto.commit */, + 1 /* enable.auto.offset.store */, 1 /* async */, 1 /* use subscribe */); do_offset_test("AUTO.COMMIT.ASYNC & AUTO.STORE & ASSIGN", 1 /* enable.auto.commit */, - 1 /* enable.auto.offset.store */, - 0 /* not used. */, + 1 /* enable.auto.offset.store */, 0 /* not used. */, 0 /* use assign */); if (test_quick) { @@ -556,28 +564,23 @@ int main_0030_offset_commit (int argc, char **argv) { return 0; } - do_offset_test("AUTO.COMMIT & MANUAL.STORE", - 1 /* enable.auto.commit */, - 0 /* enable.auto.offset.store */, - 0 /* not used */, + do_offset_test("AUTO.COMMIT & MANUAL.STORE", 1 /* enable.auto.commit */, + 0 /* enable.auto.offset.store */, 0 /* not used */, 1 /* use subscribe */); - do_offset_test("MANUAL.COMMIT.SYNC & AUTO.STORE", - 0 /* enable.auto.commit */, - 1 /* enable.auto.offset.store */, - 0 /* async */, + do_offset_test("MANUAL.COMMIT.SYNC & AUTO.STORE", + 0 /* enable.auto.commit */, + 1 /* enable.auto.offset.store */, 0 /* async */, 1 /* use subscribe */); - do_offset_test("MANUAL.COMMIT.ASYNC & MANUAL.STORE", - 0 /* enable.auto.commit */, - 0 /* enable.auto.offset.store */, - 1 /* sync */, + do_offset_test("MANUAL.COMMIT.ASYNC & MANUAL.STORE", + 0 /* enable.auto.commit */, + 0 /* enable.auto.offset.store */, 1 /* sync */, 1 /* use subscribe */); - do_offset_test("MANUAL.COMMIT.SYNC & MANUAL.STORE", - 0 /* enable.auto.commit */, - 0 /* enable.auto.offset.store */, - 0 /* sync */, + do_offset_test("MANUAL.COMMIT.SYNC & MANUAL.STORE", + 0 /* enable.auto.commit */, + 0 /* enable.auto.offset.store */, 0 /* sync */, 1 /* use subscribe */); rd_free(topic); diff --git a/tests/0031-get_offsets.c b/tests/0031-get_offsets.c index cb26a698fe..327be43df4 100644 --- a/tests/0031-get_offsets.c +++ b/tests/0031-get_offsets.c @@ -31,7 +31,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -39,78 +39,81 @@ */ -int main_0031_get_offsets (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - const int msgcnt = test_quick ? 10 : 100; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - int64_t qry_low = -1234, qry_high = -1235; - int64_t get_low = -1234, get_high = -1235; - rd_kafka_resp_err_t err; - test_timing_t t_qry, t_get; - uint64_t testid; +int main_0031_get_offsets(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const int msgcnt = test_quick ? 10 : 100; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + int64_t qry_low = -1234, qry_high = -1235; + int64_t get_low = -1234, get_high = -1235; + rd_kafka_resp_err_t err; + test_timing_t t_qry, t_get; + uint64_t testid; /* Produce messages */ testid = test_produce_msgs_easy(topic, 0, 0, msgcnt); - /* Get offsets */ - rk = test_create_consumer(NULL, NULL, NULL, NULL -); - - TIMING_START(&t_qry, "query_watermark_offsets"); - err = rd_kafka_query_watermark_offsets(rk, topic, 0, - &qry_low, &qry_high, - tmout_multip(10*1000)); - TIMING_STOP(&t_qry); - if (err) - TEST_FAIL("query_watermark_offsets failed: %s\n", - rd_kafka_err2str(err)); - - if (qry_low != 0 && qry_high != msgcnt) - TEST_FAIL("Expected low,high %d,%d, but got " - "%"PRId64",%"PRId64, - 0, msgcnt, qry_low, qry_high); - - TEST_SAY("query_watermark_offsets: " - "offsets %"PRId64", %"PRId64"\n", qry_low, qry_high); - - /* Now start consuming to update the offset cache, then query it - * with the get_ API. */ - rkt = test_create_topic_object(rk, topic, NULL); - - test_consumer_start("get", rkt, 0, RD_KAFKA_OFFSET_BEGINNING); - test_consume_msgs("get", rkt, testid, 0, TEST_NO_SEEK, - 0, msgcnt, 0); - /* After at least one message has been consumed the - * watermarks are cached. */ - - TIMING_START(&t_get, "get_watermark_offsets"); - err = rd_kafka_get_watermark_offsets(rk, topic, 0, - &get_low, &get_high); - TIMING_STOP(&t_get); - if (err) - TEST_FAIL("get_watermark_offsets failed: %s\n", - rd_kafka_err2str(err)); - - TEST_SAY("get_watermark_offsets: " - "offsets %"PRId64", %"PRId64"\n", get_low, get_high); - - if (get_high != qry_high) - TEST_FAIL("query/get discrepancies: " - "low: %"PRId64"/%"PRId64", high: %"PRId64"/%"PRId64, - qry_low, get_low, qry_high, get_high); - if (get_low >= get_high) - TEST_FAIL("get_watermark_offsets: " - "low %"PRId64" >= high %"PRId64, - get_low, get_high); - - /* FIXME: We currently dont bother checking the get_low offset - * since it requires stats to be enabled. */ - - test_consumer_stop("get", rkt, 0); - - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + /* Get offsets */ + rk = test_create_consumer(NULL, NULL, NULL, NULL); + + TIMING_START(&t_qry, "query_watermark_offsets"); + err = rd_kafka_query_watermark_offsets( + rk, topic, 0, &qry_low, &qry_high, tmout_multip(10 * 1000)); + TIMING_STOP(&t_qry); + if (err) + TEST_FAIL("query_watermark_offsets failed: %s\n", + rd_kafka_err2str(err)); + + if (qry_low != 0 && qry_high != msgcnt) + TEST_FAIL( + "Expected low,high %d,%d, but got " + "%" PRId64 ",%" PRId64, + 0, msgcnt, qry_low, qry_high); + + TEST_SAY( + "query_watermark_offsets: " + "offsets %" PRId64 ", %" PRId64 "\n", + qry_low, qry_high); + + /* Now start consuming to update the offset cache, then query it + * with the get_ API. */ + rkt = test_create_topic_object(rk, topic, NULL); + + test_consumer_start("get", rkt, 0, RD_KAFKA_OFFSET_BEGINNING); + test_consume_msgs("get", rkt, testid, 0, TEST_NO_SEEK, 0, msgcnt, 0); + /* After at least one message has been consumed the + * watermarks are cached. */ + + TIMING_START(&t_get, "get_watermark_offsets"); + err = rd_kafka_get_watermark_offsets(rk, topic, 0, &get_low, &get_high); + TIMING_STOP(&t_get); + if (err) + TEST_FAIL("get_watermark_offsets failed: %s\n", + rd_kafka_err2str(err)); + + TEST_SAY( + "get_watermark_offsets: " + "offsets %" PRId64 ", %" PRId64 "\n", + get_low, get_high); + + if (get_high != qry_high) + TEST_FAIL( + "query/get discrepancies: " + "low: %" PRId64 "/%" PRId64 ", high: %" PRId64 "/%" PRId64, + qry_low, get_low, qry_high, get_high); + if (get_low >= get_high) + TEST_FAIL( + "get_watermark_offsets: " + "low %" PRId64 " >= high %" PRId64, + get_low, get_high); + + /* FIXME: We currently dont bother checking the get_low offset + * since it requires stats to be enabled. */ + + test_consumer_stop("get", rkt, 0); + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); return 0; } diff --git a/tests/0033-regex_subscribe.c b/tests/0033-regex_subscribe.c index 791df52f98..f31d33ebcb 100644 --- a/tests/0033-regex_subscribe.c +++ b/tests/0033-regex_subscribe.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -40,136 +40,138 @@ struct expect { - char *name; /* sub-test name */ - const char *sub[4]; /* subscriptions */ - const char *exp[4]; /* expected topics */ - int exp_err; /* expected error from subscribe() */ - int stat[4]; /* per exp status */ - int fails; - enum { - _EXP_NONE, - _EXP_FAIL, - _EXP_OK, - _EXP_ASSIGN, - _EXP_REVOKE, - _EXP_ASSIGNED, - _EXP_REVOKED, - } result; + char *name; /* sub-test name */ + const char *sub[4]; /* subscriptions */ + const char *exp[4]; /* expected topics */ + int exp_err; /* expected error from subscribe() */ + int stat[4]; /* per exp status */ + int fails; + enum { _EXP_NONE, + _EXP_FAIL, + _EXP_OK, + _EXP_ASSIGN, + _EXP_REVOKE, + _EXP_ASSIGNED, + _EXP_REVOKED, + } result; }; static struct expect *exp_curr; static uint64_t testid; -static void expect_match (struct expect *exp, - const rd_kafka_topic_partition_list_t *parts) { - int i; - int e = 0; - int fails = 0; - - memset(exp->stat, 0, sizeof(exp->stat)); - - for (i = 0 ; i < parts->cnt ; i++) { - int found = 0; - e = 0; - while (exp->exp[e]) { - if (!strcmp(parts->elems[i].topic, exp->exp[e])) { - exp->stat[e]++; - found++; - } - e++; - } - - if (!found) { - TEST_WARN("%s: got unexpected topic match: %s\n", - exp->name, parts->elems[i].topic); - fails++; - } - } - - - e = 0; - while (exp->exp[e]) { - if (!exp->stat[e]) { - TEST_WARN("%s: expected topic not " - "found in assignment: %s\n", - exp->name, exp->exp[e]); - fails++; - } else { - TEST_SAY("%s: expected topic %s seen in assignment\n", - exp->name, exp->exp[e]); - } - e++; - } - - exp->fails += fails; - if (fails) { - TEST_WARN("%s: see %d previous failures\n", exp->name, fails); - exp->result = _EXP_FAIL; - } else { - TEST_SAY(_C_MAG "[ %s: assignment matched ]\n", exp->name); - exp->result = _EXP_OK; - } +static void expect_match(struct expect *exp, + const rd_kafka_topic_partition_list_t *parts) { + int i; + int e = 0; + int fails = 0; + + memset(exp->stat, 0, sizeof(exp->stat)); + + for (i = 0; i < parts->cnt; i++) { + int found = 0; + e = 0; + while (exp->exp[e]) { + if (!strcmp(parts->elems[i].topic, exp->exp[e])) { + exp->stat[e]++; + found++; + } + e++; + } + + if (!found) { + TEST_WARN("%s: got unexpected topic match: %s\n", + exp->name, parts->elems[i].topic); + fails++; + } + } + + e = 0; + while (exp->exp[e]) { + if (!exp->stat[e]) { + TEST_WARN( + "%s: expected topic not " + "found in assignment: %s\n", + exp->name, exp->exp[e]); + fails++; + } else { + TEST_SAY("%s: expected topic %s seen in assignment\n", + exp->name, exp->exp[e]); + } + e++; + } + + exp->fails += fails; + if (fails) { + TEST_WARN("%s: see %d previous failures\n", exp->name, fails); + exp->result = _EXP_FAIL; + } else { + TEST_SAY(_C_MAG "[ %s: assignment matched ]\n", exp->name); + exp->result = _EXP_OK; + } } -static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, void *opaque){ - struct expect *exp = exp_curr; - - TEST_ASSERT(exp_curr, "exp_curr not set"); - - TEST_SAY("rebalance_cb: %s with %d partition(s)\n", - rd_kafka_err2str(err), parts->cnt); - test_print_partition_list(parts); - - switch (err) - { - case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: - /* Check that provided partitions match our expectations */ - if (exp->result != _EXP_ASSIGN) { - TEST_WARN("%s: rebalance called while expecting %d: " - "too many or undesired assignment(s?\n", - exp->name, exp->result); - } - expect_match(exp, parts); - test_consumer_assign("rebalance", rk, parts); - exp->result = _EXP_ASSIGNED; - break; - - case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: - if (exp->result != _EXP_REVOKE) { - TEST_WARN("%s: rebalance called while expecting %d: " - "too many or undesired assignment(s?\n", - exp->name, exp->result); - } - - test_consumer_unassign("rebalance", rk); - exp->result = _EXP_REVOKED; - break; - - default: - TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err)); - } +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { + struct expect *exp = exp_curr; + + TEST_ASSERT(exp_curr, "exp_curr not set"); + + TEST_SAY("rebalance_cb: %s with %d partition(s)\n", + rd_kafka_err2str(err), parts->cnt); + test_print_partition_list(parts); + + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + /* Check that provided partitions match our expectations */ + if (exp->result != _EXP_ASSIGN) { + TEST_WARN( + "%s: rebalance called while expecting %d: " + "too many or undesired assignment(s?\n", + exp->name, exp->result); + } + expect_match(exp, parts); + test_consumer_assign("rebalance", rk, parts); + exp->result = _EXP_ASSIGNED; + break; + + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + if (exp->result != _EXP_REVOKE) { + TEST_WARN( + "%s: rebalance called while expecting %d: " + "too many or undesired assignment(s?\n", + exp->name, exp->result); + } + + test_consumer_unassign("rebalance", rk); + exp->result = _EXP_REVOKED; + break; + + default: + TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err)); + } } /** * @brief Poll the consumer once. */ -static void consumer_poll_once (rd_kafka_t *rk) { - rd_kafka_message_t *rkmessage; +static void consumer_poll_once(rd_kafka_t *rk) { + rd_kafka_message_t *rkmessage; - rkmessage = rd_kafka_consumer_poll(rk, 1000); - if (!rkmessage) + rkmessage = rd_kafka_consumer_poll(rk, 1000); + if (!rkmessage) return; - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - TEST_SAY("%s [%"PRId32"] reached EOF at " - "offset %"PRId64"\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset); + if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + TEST_SAY("%s [%" PRId32 + "] reached EOF at " + "offset %" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset); } else if (rkmessage->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) { if (strstr(rd_kafka_topic_name(rkmessage->rkt), "NONEXIST")) @@ -177,287 +179,276 @@ static void consumer_poll_once (rd_kafka_t *rk) { rd_kafka_topic_name(rkmessage->rkt), rd_kafka_message_errstr(rkmessage)); else - TEST_FAIL("%s [%"PRId32"] error (offset %"PRId64"): %s", - rkmessage->rkt ? - rd_kafka_topic_name(rkmessage->rkt) : - "(no-topic)", - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_errstr(rkmessage)); - } - - rd_kafka_message_destroy(rkmessage); + TEST_FAIL( + "%s [%" PRId32 "] error (offset %" PRId64 "): %s", + rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt) + : "(no-topic)", + rkmessage->partition, rkmessage->offset, + rd_kafka_message_errstr(rkmessage)); + } + + rd_kafka_message_destroy(rkmessage); } -static int test_subscribe (rd_kafka_t *rk, struct expect *exp) { - rd_kafka_resp_err_t err; - rd_kafka_topic_partition_list_t *tlist; - int i; - test_timing_t t_sub, t_assign, t_unsub; +static int test_subscribe(rd_kafka_t *rk, struct expect *exp) { + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *tlist; + int i; + test_timing_t t_sub, t_assign, t_unsub; - exp_curr = exp; + exp_curr = exp; - test_timeout_set((test_session_timeout_ms/1000) * 3); + test_timeout_set((test_session_timeout_ms / 1000) * 3); - tlist = rd_kafka_topic_partition_list_new(4); - TEST_SAY(_C_MAG "[ %s: begin ]\n", exp->name); - i = 0; - TEST_SAY("Topic subscription:\n"); - while (exp->sub[i]) { - TEST_SAY("%s: %s\n", exp->name, exp->sub[i]); - rd_kafka_topic_partition_list_add(tlist, exp->sub[i], - RD_KAFKA_PARTITION_UA); - i++; - } + tlist = rd_kafka_topic_partition_list_new(4); + TEST_SAY(_C_MAG "[ %s: begin ]\n", exp->name); + i = 0; + TEST_SAY("Topic subscription:\n"); + while (exp->sub[i]) { + TEST_SAY("%s: %s\n", exp->name, exp->sub[i]); + rd_kafka_topic_partition_list_add(tlist, exp->sub[i], + RD_KAFKA_PARTITION_UA); + i++; + } - /* Subscribe */ - TIMING_START(&t_sub, "subscribe"); - err = rd_kafka_subscribe(rk, tlist); - TIMING_STOP(&t_sub); - TEST_ASSERT(err == exp->exp_err, - "subscribe() failed: %s (expected %s)", + /* Subscribe */ + TIMING_START(&t_sub, "subscribe"); + err = rd_kafka_subscribe(rk, tlist); + TIMING_STOP(&t_sub); + TEST_ASSERT(err == exp->exp_err, "subscribe() failed: %s (expected %s)", rd_kafka_err2str(err), rd_kafka_err2str(exp->exp_err)); - if (exp->exp[0]) { - /* Wait for assignment, actual messages are ignored. */ - exp->result = _EXP_ASSIGN; - TEST_SAY("%s: waiting for assignment\n", exp->name); - TIMING_START(&t_assign, "assignment"); - while (exp->result == _EXP_ASSIGN) - consumer_poll_once(rk); - TIMING_STOP(&t_assign); - TEST_ASSERT(exp->result == _EXP_ASSIGNED, - "got %d instead of assignment", exp->result); - - } else { - /* Not expecting any assignment */ - int64_t ts_end = test_clock() + 5000; - exp->result = _EXP_NONE; /* Not expecting a rebalance */ - while (exp->result == _EXP_NONE && test_clock() < ts_end) - consumer_poll_once(rk); - TEST_ASSERT(exp->result == _EXP_NONE); - } - - /* Unsubscribe */ - TIMING_START(&t_unsub, "unsubscribe"); - err = rd_kafka_unsubscribe(rk); - TIMING_STOP(&t_unsub); - TEST_ASSERT(!err, "unsubscribe() failed: %s", rd_kafka_err2str(err)); - - rd_kafka_topic_partition_list_destroy(tlist); - - if (exp->exp[0]) { - /* Wait for revoke, actual messages are ignored. */ - TEST_SAY("%s: waiting for revoke\n", exp->name); - exp->result = _EXP_REVOKE; - TIMING_START(&t_assign, "revoke"); - while (exp->result != _EXP_REVOKED) - consumer_poll_once(rk); - TIMING_STOP(&t_assign); - TEST_ASSERT(exp->result == _EXP_REVOKED, - "got %d instead of revoke", exp->result); - } else { - /* Not expecting any revoke */ - int64_t ts_end = test_clock() + 5000; - exp->result = _EXP_NONE; /* Not expecting a rebalance */ - while (exp->result == _EXP_NONE && test_clock() < ts_end) - consumer_poll_once(rk); - TEST_ASSERT(exp->result == _EXP_NONE); - } - - TEST_SAY(_C_MAG "[ %s: done with %d failures ]\n", exp->name, exp->fails); - - return exp->fails; + if (exp->exp[0]) { + /* Wait for assignment, actual messages are ignored. */ + exp->result = _EXP_ASSIGN; + TEST_SAY("%s: waiting for assignment\n", exp->name); + TIMING_START(&t_assign, "assignment"); + while (exp->result == _EXP_ASSIGN) + consumer_poll_once(rk); + TIMING_STOP(&t_assign); + TEST_ASSERT(exp->result == _EXP_ASSIGNED, + "got %d instead of assignment", exp->result); + + } else { + /* Not expecting any assignment */ + int64_t ts_end = test_clock() + 5000; + exp->result = _EXP_NONE; /* Not expecting a rebalance */ + while (exp->result == _EXP_NONE && test_clock() < ts_end) + consumer_poll_once(rk); + TEST_ASSERT(exp->result == _EXP_NONE); + } + + /* Unsubscribe */ + TIMING_START(&t_unsub, "unsubscribe"); + err = rd_kafka_unsubscribe(rk); + TIMING_STOP(&t_unsub); + TEST_ASSERT(!err, "unsubscribe() failed: %s", rd_kafka_err2str(err)); + + rd_kafka_topic_partition_list_destroy(tlist); + + if (exp->exp[0]) { + /* Wait for revoke, actual messages are ignored. */ + TEST_SAY("%s: waiting for revoke\n", exp->name); + exp->result = _EXP_REVOKE; + TIMING_START(&t_assign, "revoke"); + while (exp->result != _EXP_REVOKED) + consumer_poll_once(rk); + TIMING_STOP(&t_assign); + TEST_ASSERT(exp->result == _EXP_REVOKED, + "got %d instead of revoke", exp->result); + } else { + /* Not expecting any revoke */ + int64_t ts_end = test_clock() + 5000; + exp->result = _EXP_NONE; /* Not expecting a rebalance */ + while (exp->result == _EXP_NONE && test_clock() < ts_end) + consumer_poll_once(rk); + TEST_ASSERT(exp->result == _EXP_NONE); + } + + TEST_SAY(_C_MAG "[ %s: done with %d failures ]\n", exp->name, + exp->fails); + + return exp->fails; } -static int do_test (const char *assignor) { - static char topics[3][128]; - static char nonexist_topic[128]; - const int topic_cnt = 3; - rd_kafka_t *rk; - const int msgcnt = 10; - int i; - char groupid[64]; - int fails = 0; - rd_kafka_conf_t *conf; - - if (!test_check_builtin("regex")) { - TEST_SKIP("regex support not built in\n"); - return 0; - } - - testid = test_id_generate(); - test_str_id_generate(groupid, sizeof(groupid)); - - rd_snprintf(topics[0], sizeof(topics[0]), - "%s_%s", - test_mk_topic_name("regex_subscribe_TOPIC_0001_UNO", 0), - groupid); - rd_snprintf(topics[1], sizeof(topics[1]), - "%s_%s", - test_mk_topic_name("regex_subscribe_topic_0002_dup", 0), - groupid); - rd_snprintf(topics[2], sizeof(topics[2]), - "%s_%s", - test_mk_topic_name("regex_subscribe_TOOTHPIC_0003_3", 0), - groupid); +static int do_test(const char *assignor) { + static char topics[3][128]; + static char nonexist_topic[128]; + const int topic_cnt = 3; + rd_kafka_t *rk; + const int msgcnt = 10; + int i; + char groupid[64]; + int fails = 0; + rd_kafka_conf_t *conf; + + if (!test_check_builtin("regex")) { + TEST_SKIP("regex support not built in\n"); + return 0; + } + + testid = test_id_generate(); + test_str_id_generate(groupid, sizeof(groupid)); + + rd_snprintf(topics[0], sizeof(topics[0]), "%s_%s", + test_mk_topic_name("regex_subscribe_TOPIC_0001_UNO", 0), + groupid); + rd_snprintf(topics[1], sizeof(topics[1]), "%s_%s", + test_mk_topic_name("regex_subscribe_topic_0002_dup", 0), + groupid); + rd_snprintf(topics[2], sizeof(topics[2]), "%s_%s", + test_mk_topic_name("regex_subscribe_TOOTHPIC_0003_3", 0), + groupid); /* To avoid auto topic creation to kick in we use * an invalid topic name. */ - rd_snprintf(nonexist_topic, sizeof(nonexist_topic), - "%s_%s", - test_mk_topic_name("regex_subscribe_NONEXISTENT_0004_IV#!", - 0), - groupid); - - /* Produce messages to topics to ensure creation. */ - for (i = 0 ; i < topic_cnt ; i++) - test_produce_msgs_easy(topics[i], testid, - RD_KAFKA_PARTITION_UA, msgcnt); - - test_conf_init(&conf, NULL, 20); - test_conf_set(conf, "partition.assignment.strategy", assignor); - /* Speed up propagation of new topics */ - test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); + rd_snprintf( + nonexist_topic, sizeof(nonexist_topic), "%s_%s", + test_mk_topic_name("regex_subscribe_NONEXISTENT_0004_IV#!", 0), + groupid); + + /* Produce messages to topics to ensure creation. */ + for (i = 0; i < topic_cnt; i++) + test_produce_msgs_easy(topics[i], testid, RD_KAFKA_PARTITION_UA, + msgcnt); + + test_conf_init(&conf, NULL, 20); + test_conf_set(conf, "partition.assignment.strategy", assignor); + /* Speed up propagation of new topics */ + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); test_conf_set(conf, "allow.auto.create.topics", "true"); - /* Create a single consumer to handle all subscriptions. - * Has the nice side affect of testing multiple subscriptions. */ - rk = test_create_consumer(groupid, rebalance_cb, conf, NULL); - - /* - * Test cases - */ - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: no regexps (0&1)", - assignor)), - .sub = { topics[0], topics[1], NULL }, - .exp = { topics[0], topics[1], NULL } - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - } - - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: no regexps " - "(no matches)", - assignor)), - .sub = { nonexist_topic, NULL }, - .exp = { NULL } - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - } - - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: regex all", assignor)), - .sub = { rd_strdup(tsprintf("^.*_%s", groupid)), NULL }, - .exp = { topics[0], topics[1], topics[2], NULL } - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - rd_free((void*)expect.sub[0]); - } - - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: regex 0&1", assignor)), - .sub = { rd_strdup(tsprintf("^.*[tToOpPiIcC]_0+[12]_[^_]+_%s", - groupid)), NULL }, - .exp = { topics[0], topics[1], NULL } - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - rd_free((void*)expect.sub[0]); - } - - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: regex 2", assignor)), - .sub = { rd_strdup(tsprintf("^.*TOOTHPIC_000._._%s", - groupid)), NULL }, - .exp = { topics[2], NULL } - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - rd_free((void *)expect.sub[0]); - } - - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: regex 2 and " - "nonexistent(not seen)", - assignor)), - .sub = { rd_strdup(tsprintf("^.*_000[34]_..?_%s", - groupid)), NULL }, - .exp = { topics[2], NULL } - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - rd_free((void *)expect.sub[0]); - } - - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: broken regex (no matches)", - assignor)), - .sub = { "^.*[0", NULL }, - .exp = { NULL }, - .exp_err = RD_KAFKA_RESP_ERR__INVALID_ARG - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - } - - - test_consumer_close(rk); - - rd_kafka_destroy(rk); - - if (fails) - TEST_FAIL("See %d previous failures", fails); + /* Create a single consumer to handle all subscriptions. + * Has the nice side affect of testing multiple subscriptions. */ + rk = test_create_consumer(groupid, rebalance_cb, conf, NULL); + + /* + * Test cases + */ + { + struct expect expect = {.name = rd_strdup(tsprintf( + "%s: no regexps (0&1)", assignor)), + .sub = {topics[0], topics[1], NULL}, + .exp = {topics[0], topics[1], NULL}}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + } + + { + struct expect expect = {.name = + rd_strdup(tsprintf("%s: no regexps " + "(no matches)", + assignor)), + .sub = {nonexist_topic, NULL}, + .exp = {NULL}}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + } + + { + struct expect expect = { + .name = rd_strdup(tsprintf("%s: regex all", assignor)), + .sub = {rd_strdup(tsprintf("^.*_%s", groupid)), NULL}, + .exp = {topics[0], topics[1], topics[2], NULL}}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + rd_free((void *)expect.sub[0]); + } + + { + struct expect expect = { + .name = rd_strdup(tsprintf("%s: regex 0&1", assignor)), + .sub = {rd_strdup(tsprintf( + "^.*[tToOpPiIcC]_0+[12]_[^_]+_%s", groupid)), + NULL}, + .exp = {topics[0], topics[1], NULL}}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + rd_free((void *)expect.sub[0]); + } + + { + struct expect expect = { + .name = rd_strdup(tsprintf("%s: regex 2", assignor)), + .sub = {rd_strdup( + tsprintf("^.*TOOTHPIC_000._._%s", groupid)), + NULL}, + .exp = {topics[2], NULL}}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + rd_free((void *)expect.sub[0]); + } + + { + struct expect expect = { + .name = rd_strdup(tsprintf("%s: regex 2 and " + "nonexistent(not seen)", + assignor)), + .sub = {rd_strdup(tsprintf("^.*_000[34]_..?_%s", groupid)), + NULL}, + .exp = {topics[2], NULL}}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + rd_free((void *)expect.sub[0]); + } + + { + struct expect expect = { + .name = rd_strdup( + tsprintf("%s: broken regex (no matches)", assignor)), + .sub = {"^.*[0", NULL}, + .exp = {NULL}, + .exp_err = RD_KAFKA_RESP_ERR__INVALID_ARG}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + } + + + test_consumer_close(rk); + + rd_kafka_destroy(rk); + + if (fails) + TEST_FAIL("See %d previous failures", fails); return 0; } -int main_0033_regex_subscribe (int argc, char **argv) { - do_test("range"); - do_test("roundrobin"); - return 0; +int main_0033_regex_subscribe(int argc, char **argv) { + do_test("range"); + do_test("roundrobin"); + return 0; } /** * @brief Subscription API tests that dont require a broker */ -int main_0033_regex_subscribe_local (int argc, char **argv) { - rd_kafka_topic_partition_list_t *valids, *invalids, *none, - *empty, *alot; +int main_0033_regex_subscribe_local(int argc, char **argv) { + rd_kafka_topic_partition_list_t *valids, *invalids, *none, *empty, + *alot; rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_resp_err_t err; char errstr[256]; int i; - valids = rd_kafka_topic_partition_list_new(0); + valids = rd_kafka_topic_partition_list_new(0); invalids = rd_kafka_topic_partition_list_new(100); - none = rd_kafka_topic_partition_list_new(1000); - empty = rd_kafka_topic_partition_list_new(5); - alot = rd_kafka_topic_partition_list_new(1); + none = rd_kafka_topic_partition_list_new(1000); + empty = rd_kafka_topic_partition_list_new(5); + alot = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(valids, "not_a_regex", 0); rd_kafka_topic_partition_list_add(valids, "^My[vV]alid..regex+", 0); @@ -471,7 +462,7 @@ int main_0033_regex_subscribe_local (int argc, char **argv) { rd_kafka_topic_partition_list_add(empty, "", 0); rd_kafka_topic_partition_list_add(empty, "^ok", 0); - for (i = 0 ; i < 10000 ; i++) { + for (i = 0; i < 10000; i++) { char topic[32]; rd_snprintf(topic, sizeof(topic), "^Va[lLid]_regex_%d$", i); rd_kafka_topic_partition_list_add(alot, topic, i); diff --git a/tests/0034-offset_reset.c b/tests/0034-offset_reset.c index aae8fdb508..9276764c8e 100644 --- a/tests/0034-offset_reset.c +++ b/tests/0034-offset_reset.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ #include "../src/rdkafka_protocol.h" @@ -40,114 +40,112 @@ */ -static void do_test_reset (const char *topic, int partition, - const char *reset, int64_t initial_offset, - int exp_eofcnt, int exp_msgcnt, int exp_errcnt, - int exp_resetcnt) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - int eofcnt = 0, msgcnt = 0, errcnt = 0, resetcnt = 0; +static void do_test_reset(const char *topic, + int partition, + const char *reset, + int64_t initial_offset, + int exp_eofcnt, + int exp_msgcnt, + int exp_errcnt, + int exp_resetcnt) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + int eofcnt = 0, msgcnt = 0, errcnt = 0, resetcnt = 0; rd_kafka_conf_t *conf; - TEST_SAY("Test auto.offset.reset=%s, " - "expect %d msgs, %d EOFs, %d errors, %d resets\n", - reset, exp_msgcnt, exp_eofcnt, exp_errcnt, exp_resetcnt); + TEST_SAY( + "Test auto.offset.reset=%s, " + "expect %d msgs, %d EOFs, %d errors, %d resets\n", + reset, exp_msgcnt, exp_eofcnt, exp_errcnt, exp_resetcnt); test_conf_init(&conf, NULL, 60); test_conf_set(conf, "enable.partition.eof", "true"); - rk = test_create_consumer(NULL, NULL, conf, NULL); - rkt = test_create_topic_object(rk, topic, "auto.offset.reset", reset, - NULL); - - test_consumer_start(reset, rkt, partition, initial_offset); - while (1) { - rd_kafka_message_t *rkm; - - rkm = rd_kafka_consume(rkt, partition, tmout_multip(1000*10)); - if (!rkm) - TEST_FAIL("%s: no message for 10s: " - "%d/%d messages, %d/%d EOFs, %d/%d errors\n", - reset, msgcnt, exp_msgcnt, - eofcnt, exp_eofcnt, - errcnt, exp_errcnt); - - if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - TEST_SAY("%s: received EOF at offset %"PRId64"\n", - reset, rkm->offset); - eofcnt++; + rk = test_create_consumer(NULL, NULL, conf, NULL); + rkt = test_create_topic_object(rk, topic, "auto.offset.reset", reset, + NULL); + + test_consumer_start(reset, rkt, partition, initial_offset); + while (1) { + rd_kafka_message_t *rkm; + + rkm = rd_kafka_consume(rkt, partition, tmout_multip(1000 * 10)); + if (!rkm) + TEST_FAIL( + "%s: no message for 10s: " + "%d/%d messages, %d/%d EOFs, %d/%d errors\n", + reset, msgcnt, exp_msgcnt, eofcnt, exp_eofcnt, + errcnt, exp_errcnt); + + if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + TEST_SAY("%s: received EOF at offset %" PRId64 "\n", + reset, rkm->offset); + eofcnt++; } else if (rkm->err == RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) { - TEST_SAY("%s: auto.offset.reset error at offset %"PRId64 - ": %s: %s\n", - reset, rkm->offset, - rd_kafka_err2name(rkm->err), - rd_kafka_message_errstr(rkm)); + TEST_SAY( + "%s: auto.offset.reset error at offset %" PRId64 + ": %s: %s\n", + reset, rkm->offset, rd_kafka_err2name(rkm->err), + rd_kafka_message_errstr(rkm)); resetcnt++; - } else if (rkm->err) { - TEST_SAY("%s: consume error at offset %"PRId64": %s\n", - reset, rkm->offset, - rd_kafka_message_errstr(rkm)); - errcnt++; - } else { - msgcnt++; - } - - rd_kafka_message_destroy(rkm); - - if (eofcnt == exp_eofcnt && - errcnt == exp_errcnt && - msgcnt == exp_msgcnt && - resetcnt == exp_resetcnt) - break; - else if (eofcnt > exp_eofcnt || - errcnt > exp_errcnt || - msgcnt > exp_msgcnt || - resetcnt > exp_resetcnt) - TEST_FAIL("%s: unexpected: " - "%d/%d messages, %d/%d EOFs, %d/%d errors, " - "%d/%d resets\n", - reset, - msgcnt, exp_msgcnt, - eofcnt, exp_eofcnt, - errcnt, exp_errcnt, - resetcnt, exp_resetcnt); - } - - TEST_SAY("%s: Done: " - "%d/%d messages, %d/%d EOFs, %d/%d errors, %d/%d resets\n", - reset, - msgcnt, exp_msgcnt, - eofcnt, exp_eofcnt, - errcnt, exp_errcnt, - resetcnt, exp_resetcnt); - - test_consumer_stop(reset, rkt, partition); - - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + } else if (rkm->err) { + TEST_SAY( + "%s: consume error at offset %" PRId64 ": %s\n", + reset, rkm->offset, rd_kafka_message_errstr(rkm)); + errcnt++; + } else { + msgcnt++; + } + + rd_kafka_message_destroy(rkm); + + if (eofcnt == exp_eofcnt && errcnt == exp_errcnt && + msgcnt == exp_msgcnt && resetcnt == exp_resetcnt) + break; + else if (eofcnt > exp_eofcnt || errcnt > exp_errcnt || + msgcnt > exp_msgcnt || resetcnt > exp_resetcnt) + TEST_FAIL( + "%s: unexpected: " + "%d/%d messages, %d/%d EOFs, %d/%d errors, " + "%d/%d resets\n", + reset, msgcnt, exp_msgcnt, eofcnt, exp_eofcnt, + errcnt, exp_errcnt, resetcnt, exp_resetcnt); + } + + TEST_SAY( + "%s: Done: " + "%d/%d messages, %d/%d EOFs, %d/%d errors, %d/%d resets\n", + reset, msgcnt, exp_msgcnt, eofcnt, exp_eofcnt, errcnt, exp_errcnt, + resetcnt, exp_resetcnt); + + test_consumer_stop(reset, rkt, partition); + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); } -int main_0034_offset_reset (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - const int partition = 0; - const int msgcnt = test_quick ? 20 : 100; +int main_0034_offset_reset(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const int partition = 0; + const int msgcnt = test_quick ? 20 : 100; + + /* Produce messages */ + test_produce_msgs_easy(topic, 0, partition, msgcnt); - /* Produce messages */ - test_produce_msgs_easy(topic, 0, partition, msgcnt); + /* auto.offset.reset=latest: Consume messages from invalid offset: + * Should return EOF. */ + do_test_reset(topic, partition, "latest", msgcnt + 5, 1, 0, 0, 0); - /* auto.offset.reset=latest: Consume messages from invalid offset: - * Should return EOF. */ - do_test_reset(topic, partition, "latest", msgcnt+5, 1, 0, 0, 0); - - /* auto.offset.reset=earliest: Consume messages from invalid offset: - * Should return messages from beginning. */ - do_test_reset(topic, partition, "earliest", msgcnt+5, 1, msgcnt, 0, 0); + /* auto.offset.reset=earliest: Consume messages from invalid offset: + * Should return messages from beginning. */ + do_test_reset(topic, partition, "earliest", msgcnt + 5, 1, msgcnt, 0, + 0); - /* auto.offset.reset=error: Consume messages from invalid offset: - * Should return error. */ - do_test_reset(topic, partition, "error", msgcnt+5, 0, 0, 0, 1); + /* auto.offset.reset=error: Consume messages from invalid offset: + * Should return error. */ + do_test_reset(topic, partition, "error", msgcnt + 5, 0, 0, 0, 1); - return 0; + return 0; } @@ -155,15 +153,15 @@ int main_0034_offset_reset (int argc, char **argv) { * @brief Verify auto.offset.reset=error behaviour for a range of different * error cases. */ -static void offset_reset_errors (void) { +static void offset_reset_errors(void) { rd_kafka_t *c; rd_kafka_conf_t *conf; rd_kafka_mock_cluster_t *mcluster; const char *bootstraps; - const char *topic = "topic"; + const char *topic = "topic"; const int32_t partition = 0; - const int msgcnt = 10; - const int broker_id = 1; + const int msgcnt = 10; + const int broker_id = 1; rd_kafka_queue_t *queue; int i; struct { @@ -176,29 +174,27 @@ static void offset_reset_errors (void) { int64_t expect_offset; rd_bool_t broker_down; /**< Bring the broker down */ } test[] = { - { RD_KAFKA_RESP_ERR__TRANSPORT, - RD_KAFKA_RESP_ERR_NO_ERROR, - RD_KAFKA_OFFSET_TAIL(msgcnt), - 0, - .broker_down = rd_true, - }, - { RD_KAFKA_RESP_ERR__TRANSPORT, - RD_KAFKA_RESP_ERR_NO_ERROR, - RD_KAFKA_OFFSET_TAIL(msgcnt), - 0, - /* only disconnect on the ListOffsets request */ - .broker_down = rd_false, - }, - { RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, - RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, - RD_KAFKA_OFFSET_TAIL(msgcnt), - -1 - }, - { RD_KAFKA_RESP_ERR_NO_ERROR, - RD_KAFKA_RESP_ERR__NO_OFFSET, - RD_KAFKA_OFFSET_STORED, /* There's no committed offset */ - -1 - }, + { + RD_KAFKA_RESP_ERR__TRANSPORT, + RD_KAFKA_RESP_ERR_NO_ERROR, + RD_KAFKA_OFFSET_TAIL(msgcnt), + 0, + .broker_down = rd_true, + }, + { + RD_KAFKA_RESP_ERR__TRANSPORT, + RD_KAFKA_RESP_ERR_NO_ERROR, + RD_KAFKA_OFFSET_TAIL(msgcnt), + 0, + /* only disconnect on the ListOffsets request */ + .broker_down = rd_false, + }, + {RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, + RD_KAFKA_OFFSET_TAIL(msgcnt), -1}, + {RD_KAFKA_RESP_ERR_NO_ERROR, RD_KAFKA_RESP_ERR__NO_OFFSET, + RD_KAFKA_OFFSET_STORED, /* There's no committed offset */ + -1}, }; @@ -210,10 +206,9 @@ static void offset_reset_errors (void) { * between beginning and end. */ test_produce_msgs_easy_v(topic, 0, partition, 0, msgcnt, 10, "security.protocol", "plaintext", - "bootstrap.servers", bootstraps, - NULL); + "bootstrap.servers", bootstraps, NULL); - test_conf_init(&conf, NULL, 60*5); + test_conf_init(&conf, NULL, 60 * 5); test_conf_set(conf, "security.protocol", "plaintext"); test_conf_set(conf, "bootstrap.servers", bootstraps); @@ -232,15 +227,14 @@ static void offset_reset_errors (void) { queue = rd_kafka_queue_get_consumer(c); - for (i = 0 ; i < (int)RD_ARRAYSIZE(test) ; i++) { + for (i = 0; i < (int)RD_ARRAYSIZE(test); i++) { rd_kafka_event_t *ev; rd_bool_t broker_down = rd_false; /* Make sure consumer is connected */ test_wait_topic_exists(c, topic, 5000); - TEST_SAY(_C_YEL "#%d: injecting %s, expecting %s\n", - i, + TEST_SAY(_C_YEL "#%d: injecting %s, expecting %s\n", i, rd_kafka_err2name(test[i].inject), rd_kafka_err2name(test[i].expect)); @@ -252,19 +246,14 @@ static void offset_reset_errors (void) { } else if (test[i].inject) { rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_ListOffsets, 5, - test[i].inject, - test[i].inject, - test[i].inject, - test[i].inject, - test[i].inject); + mcluster, RD_KAFKAP_ListOffsets, 5, test[i].inject, + test[i].inject, test[i].inject, test[i].inject, + test[i].inject); /* mock handler will close the connection on this * request */ if (test[i].inject == RD_KAFKA_RESP_ERR__TRANSPORT) broker_down = rd_true; - } test_consumer_assign_partition("ASSIGN", c, topic, partition, @@ -295,21 +284,19 @@ static void offset_reset_errors (void) { if (rd_kafka_event_error(ev) != RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) { - TEST_SAY("#%d: Ignoring %s event: %s\n", - i, - rd_kafka_event_name(ev), - rd_kafka_event_error_string( - ev)); + TEST_SAY( + "#%d: Ignoring %s event: %s\n", i, + rd_kafka_event_name(ev), + rd_kafka_event_error_string(ev)); rd_kafka_event_destroy(ev); continue; } - TEST_SAY("#%d: injected %s, got error %s: %s\n", - i, - rd_kafka_err2name(test[i].inject), - rd_kafka_err2name( - rd_kafka_event_error(ev)), - rd_kafka_event_error_string(ev)); + TEST_SAY( + "#%d: injected %s, got error %s: %s\n", i, + rd_kafka_err2name(test[i].inject), + rd_kafka_err2name(rd_kafka_event_error(ev)), + rd_kafka_event_error_string(ev)); /* The auto reset error code is always * ERR__AUTO_OFFSET_RESET, and the original @@ -317,14 +304,12 @@ static void offset_reset_errors (void) { * So use err2str() to compare the error * string to the expected error. */ TEST_ASSERT( - strstr(rd_kafka_event_error_string(ev), - rd_kafka_err2str( - test[i].expect)), - "#%d: expected %s, got %s", - i, - rd_kafka_err2name(test[i].expect), - rd_kafka_err2name( - rd_kafka_event_error(ev))); + strstr(rd_kafka_event_error_string(ev), + rd_kafka_err2str(test[i].expect)), + "#%d: expected %s, got %s", i, + rd_kafka_err2name(test[i].expect), + rd_kafka_err2name( + rd_kafka_event_error(ev))); rd_kafka_event_destroy(ev); break; @@ -332,42 +317,38 @@ static void offset_reset_errors (void) { } else if (rd_kafka_event_type(ev) == RD_KAFKA_EVENT_FETCH) { const rd_kafka_message_t *rkm = - rd_kafka_event_message_next(ev); + rd_kafka_event_message_next(ev); TEST_ASSERT(rkm, "#%d: got null message", i); - TEST_SAY("#%d: message at offset %"PRId64 + TEST_SAY("#%d: message at offset %" PRId64 " (%s)\n", - i, - rkm->offset, + i, rkm->offset, rd_kafka_err2name(rkm->err)); TEST_ASSERT(!test[i].expect, "#%d: got message when expecting " - "error", i); - - TEST_ASSERT(test[i].expect_offset == - rkm->offset, - "#%d: expected message offset " - "%"PRId64", got %"PRId64 - " (%s)", - i, - test[i].expect_offset, - rkm->offset, - rd_kafka_err2name(rkm->err)); - - TEST_SAY("#%d: got expected message at " - "offset %"PRId64" (%s)\n", - i, - rkm->offset, - rd_kafka_err2name(rkm->err)); + "error", + i); + + TEST_ASSERT( + test[i].expect_offset == rkm->offset, + "#%d: expected message offset " + "%" PRId64 ", got %" PRId64 " (%s)", + i, test[i].expect_offset, rkm->offset, + rd_kafka_err2name(rkm->err)); + + TEST_SAY( + "#%d: got expected message at " + "offset %" PRId64 " (%s)\n", + i, rkm->offset, + rd_kafka_err2name(rkm->err)); rd_kafka_event_destroy(ev); break; } else { - TEST_SAY("#%d: Ignoring %s event: %s\n", - i, + TEST_SAY("#%d: Ignoring %s event: %s\n", i, rd_kafka_event_name(ev), rd_kafka_event_error_string(ev)); rd_kafka_event_destroy(ev); @@ -389,7 +370,7 @@ static void offset_reset_errors (void) { SUB_TEST_PASS(); } -int main_0034_offset_reset_mock (int argc, char **argv) { +int main_0034_offset_reset_mock(int argc, char **argv) { offset_reset_errors(); return 0; diff --git a/tests/0035-api_version.c b/tests/0035-api_version.c index e10f34038d..d005b1e9ec 100644 --- a/tests/0035-api_version.c +++ b/tests/0035-api_version.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -41,34 +41,33 @@ */ -int main_0035_api_version (int argc, char **argv) { - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - const struct rd_kafka_metadata *metadata; - rd_kafka_resp_err_t err; - test_timing_t t_meta; +int main_0035_api_version(int argc, char **argv) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + const struct rd_kafka_metadata *metadata; + rd_kafka_resp_err_t err; + test_timing_t t_meta; - test_conf_init(&conf, NULL, 30); - test_conf_set(conf, "socket.timeout.ms", "12000"); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "socket.timeout.ms", "12000"); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - TEST_SAY("Querying for metadata\n"); - TIMING_START(&t_meta, "metadata()"); - err = rd_kafka_metadata(rk, 0, NULL, &metadata, tmout_multip(5*1000)); - TIMING_STOP(&t_meta); - if (err) - TEST_FAIL("metadata() failed: %s", - rd_kafka_err2str(err)); + TEST_SAY("Querying for metadata\n"); + TIMING_START(&t_meta, "metadata()"); + err = rd_kafka_metadata(rk, 0, NULL, &metadata, tmout_multip(5 * 1000)); + TIMING_STOP(&t_meta); + if (err) + TEST_FAIL("metadata() failed: %s", rd_kafka_err2str(err)); - if (TIMING_DURATION(&t_meta) / 1000 > 15*1000) - TEST_FAIL("metadata() took too long: %.3fms", - (float)TIMING_DURATION(&t_meta) / 1000.0f); + if (TIMING_DURATION(&t_meta) / 1000 > 15 * 1000) + TEST_FAIL("metadata() took too long: %.3fms", + (float)TIMING_DURATION(&t_meta) / 1000.0f); - rd_kafka_metadata_destroy(metadata); + rd_kafka_metadata_destroy(metadata); - TEST_SAY("Metadata succeeded\n"); + TEST_SAY("Metadata succeeded\n"); - rd_kafka_destroy(rk); + rd_kafka_destroy(rk); - return 0; + return 0; } diff --git a/tests/0036-partial_fetch.c b/tests/0036-partial_fetch.c index 9851c217af..69ee9864c8 100644 --- a/tests/0036-partial_fetch.c +++ b/tests/0036-partial_fetch.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -44,43 +44,43 @@ */ -int main_0036_partial_fetch (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - const int partition = 0; - const int msgcnt = 100; - const int msgsize = 1000; - uint64_t testid; - rd_kafka_conf_t *conf; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; +int main_0036_partial_fetch(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const int partition = 0; + const int msgcnt = 100; + const int msgsize = 1000; + uint64_t testid; + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; - TEST_SAY("Producing %d messages of size %d to %s [%d]\n", - msgcnt, (int)msgsize, topic, partition); - testid = test_id_generate(); - rk = test_create_producer(); - rkt = test_create_producer_topic(rk, topic, NULL); + TEST_SAY("Producing %d messages of size %d to %s [%d]\n", msgcnt, + (int)msgsize, topic, partition); + testid = test_id_generate(); + rk = test_create_producer(); + rkt = test_create_producer_topic(rk, topic, NULL); - test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt, NULL, msgsize); + test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt, NULL, msgsize); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); - TEST_SAY("Creating consumer\n"); - test_conf_init(&conf, NULL, 0); - /* This should fetch 1.5 messages per fetch, thus resulting in - * partial fetches, hopefully. */ - test_conf_set(conf, "fetch.message.max.bytes", "1500"); - rk = test_create_consumer(NULL, NULL, conf, NULL); - rkt = rd_kafka_topic_new(rk, topic, NULL); + TEST_SAY("Creating consumer\n"); + test_conf_init(&conf, NULL, 0); + /* This should fetch 1.5 messages per fetch, thus resulting in + * partial fetches, hopefully. */ + test_conf_set(conf, "fetch.message.max.bytes", "1500"); + rk = test_create_consumer(NULL, NULL, conf, NULL); + rkt = rd_kafka_topic_new(rk, topic, NULL); - test_consumer_start("CONSUME", rkt, partition, - RD_KAFKA_OFFSET_BEGINNING); - test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, - 0, msgcnt, 1); - test_consumer_stop("CONSUME", rkt, partition); + test_consumer_start("CONSUME", rkt, partition, + RD_KAFKA_OFFSET_BEGINNING); + test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, 0, + msgcnt, 1); + test_consumer_stop("CONSUME", rkt, partition); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); - return 0; + return 0; } diff --git a/tests/0037-destroy_hang_local.c b/tests/0037-destroy_hang_local.c index 950cc284b2..3b543fb6f4 100644 --- a/tests/0037-destroy_hang_local.c +++ b/tests/0037-destroy_hang_local.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -39,47 +39,44 @@ - - /** * Issue #530: * "Legacy Consumer. Delete hangs if done right after RdKafka::Consumer::create. * But If I put a start and stop in between, there is no issue." */ -static int legacy_consumer_early_destroy (void) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - int pass; - const char *topic = test_mk_topic_name(__FUNCTION__, 0); - - for (pass = 0 ; pass < 2 ; pass++) { - TEST_SAY("%s: pass #%d\n", __FUNCTION__, pass); - - rk = test_create_handle(RD_KAFKA_CONSUMER, NULL); - - if (pass == 1) { - /* Second pass, create a topic too. */ - rkt = rd_kafka_topic_new(rk, topic, NULL); - TEST_ASSERT(rkt, "failed to create topic: %s", - rd_kafka_err2str( - rd_kafka_last_error())); - rd_sleep(1); - rd_kafka_topic_destroy(rkt); - } - - rd_kafka_destroy(rk); - } - - return 0; +static int legacy_consumer_early_destroy(void) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + int pass; + const char *topic = test_mk_topic_name(__FUNCTION__, 0); + + for (pass = 0; pass < 2; pass++) { + TEST_SAY("%s: pass #%d\n", __FUNCTION__, pass); + + rk = test_create_handle(RD_KAFKA_CONSUMER, NULL); + + if (pass == 1) { + /* Second pass, create a topic too. */ + rkt = rd_kafka_topic_new(rk, topic, NULL); + TEST_ASSERT(rkt, "failed to create topic: %s", + rd_kafka_err2str(rd_kafka_last_error())); + rd_sleep(1); + rd_kafka_topic_destroy(rkt); + } + + rd_kafka_destroy(rk); + } + + return 0; } -int main_0037_destroy_hang_local (int argc, char **argv) { +int main_0037_destroy_hang_local(int argc, char **argv) { int fails = 0; - test_conf_init(NULL, NULL, 30); + test_conf_init(NULL, NULL, 30); - fails += legacy_consumer_early_destroy(); + fails += legacy_consumer_early_destroy(); if (fails > 0) TEST_FAIL("See %d previous error(s)\n", fails); diff --git a/tests/0038-performance.c b/tests/0038-performance.c index c03f02c3e4..674964dc9c 100644 --- a/tests/0038-performance.c +++ b/tests/0038-performance.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -41,78 +41,80 @@ */ -int main_0038_performance (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - const int partition = 0; - const int msgsize = 100; - uint64_t testid; - rd_kafka_conf_t *conf; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - test_timing_t t_create, t_produce, t_consume; - int totsize = 1024 * 1024 * (test_quick ? 8 : 128); - int msgcnt; - - if (!strcmp(test_mode, "valgrind") || !strcmp(test_mode, "helgrind") || - !strcmp(test_mode, "drd")) - totsize = 1024*1024*8; /* 8 meg, valgrind is slow. */ - - msgcnt = totsize / msgsize; - - TEST_SAY("Producing %d messages of size %d to %s [%d]\n", - msgcnt, (int)msgsize, topic, partition); - testid = test_id_generate(); - test_conf_init(&conf, NULL, 120); - rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); - test_conf_set(conf, "queue.buffering.max.messages", "10000000"); +int main_0038_performance(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const int partition = 0; + const int msgsize = 100; + uint64_t testid; + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + test_timing_t t_create, t_produce, t_consume; + int totsize = 1024 * 1024 * (test_quick ? 8 : 128); + int msgcnt; + + if (!strcmp(test_mode, "valgrind") || !strcmp(test_mode, "helgrind") || + !strcmp(test_mode, "drd")) + totsize = 1024 * 1024 * 8; /* 8 meg, valgrind is slow. */ + + msgcnt = totsize / msgsize; + + TEST_SAY("Producing %d messages of size %d to %s [%d]\n", msgcnt, + (int)msgsize, topic, partition); + testid = test_id_generate(); + test_conf_init(&conf, NULL, 120); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + test_conf_set(conf, "queue.buffering.max.messages", "10000000"); test_conf_set(conf, "linger.ms", "100"); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = test_create_producer_topic(rk, topic, "acks", "1", NULL); - - /* First produce one message to create the topic, etc, this might take - * a while and we dont want this to affect the throughput timing. */ - TIMING_START(&t_create, "CREATE TOPIC"); - test_produce_msgs(rk, rkt, testid, partition, 0, 1, NULL, msgsize); - TIMING_STOP(&t_create); - - TIMING_START(&t_produce, "PRODUCE"); - test_produce_msgs(rk, rkt, testid, partition, 1, msgcnt-1, NULL, msgsize); - TIMING_STOP(&t_produce); - - TEST_SAY("Destroying producer\n"); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); - - TEST_SAY("Creating consumer\n"); - test_conf_init(&conf, NULL, 120); - rk = test_create_consumer(NULL, NULL, conf, NULL); - rkt = rd_kafka_topic_new(rk, topic, NULL); - - test_consumer_start("CONSUME", rkt, partition, - RD_KAFKA_OFFSET_BEGINNING); - TIMING_START(&t_consume, "CONSUME"); - test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, - 0, msgcnt, 1); - TIMING_STOP(&t_consume); - test_consumer_stop("CONSUME", rkt, partition); - - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); - - TEST_REPORT("{ \"producer\": " - " { \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f }," - " \"consumer\": " - "{ \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f } " - "}", - (double) - (totsize/((double)TIMING_DURATION(&t_produce)/1000000.0f)) / - 1000000.0f, - (float) - (msgcnt/((double)TIMING_DURATION(&t_produce)/1000000.0f)), - (double) - (totsize/((double)TIMING_DURATION(&t_consume)/1000000.0f)) / - 1000000.0f, - (float) - (msgcnt/((double)TIMING_DURATION(&t_consume)/1000000.0f))); - return 0; + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_producer_topic(rk, topic, "acks", "1", NULL); + + /* First produce one message to create the topic, etc, this might take + * a while and we dont want this to affect the throughput timing. */ + TIMING_START(&t_create, "CREATE TOPIC"); + test_produce_msgs(rk, rkt, testid, partition, 0, 1, NULL, msgsize); + TIMING_STOP(&t_create); + + TIMING_START(&t_produce, "PRODUCE"); + test_produce_msgs(rk, rkt, testid, partition, 1, msgcnt - 1, NULL, + msgsize); + TIMING_STOP(&t_produce); + + TEST_SAY("Destroying producer\n"); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); + + TEST_SAY("Creating consumer\n"); + test_conf_init(&conf, NULL, 120); + rk = test_create_consumer(NULL, NULL, conf, NULL); + rkt = rd_kafka_topic_new(rk, topic, NULL); + + test_consumer_start("CONSUME", rkt, partition, + RD_KAFKA_OFFSET_BEGINNING); + TIMING_START(&t_consume, "CONSUME"); + test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, 0, + msgcnt, 1); + TIMING_STOP(&t_consume); + test_consumer_stop("CONSUME", rkt, partition); + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); + + TEST_REPORT( + "{ \"producer\": " + " { \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f }," + " \"consumer\": " + "{ \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f } " + "}", + (double)(totsize / + ((double)TIMING_DURATION(&t_produce) / 1000000.0f)) / + 1000000.0f, + (float)(msgcnt / + ((double)TIMING_DURATION(&t_produce) / 1000000.0f)), + (double)(totsize / + ((double)TIMING_DURATION(&t_consume) / 1000000.0f)) / + 1000000.0f, + (float)(msgcnt / + ((double)TIMING_DURATION(&t_consume) / 1000000.0f))); + return 0; } diff --git a/tests/0039-event.c b/tests/0039-event.c index f11cb591f6..9ddfacc080 100644 --- a/tests/0039-event.c +++ b/tests/0039-event.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,144 +35,143 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgid_next = 0; -static int fails = 0; +static int fails = 0; /** * Handle delivery reports */ -static void handle_drs (rd_kafka_event_t *rkev) { - const rd_kafka_message_t *rkmessage; +static void handle_drs(rd_kafka_event_t *rkev) { + const rd_kafka_message_t *rkmessage; - while ((rkmessage = rd_kafka_event_message_next(rkev))) { + while ((rkmessage = rd_kafka_event_message_next(rkev))) { int32_t broker_id = rd_kafka_message_broker_id(rkmessage); - int msgid = *(int *)rkmessage->_private; - free(rkmessage->_private); - - TEST_SAYL(3,"Got rkmessage %s [%"PRId32"] @ %"PRId64": " - "from broker %"PRId32": %s\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, rkmessage->offset, - broker_id, - rd_kafka_err2str(rkmessage->err)); - - - if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_FAIL("Message delivery failed: %s\n", - rd_kafka_err2str(rkmessage->err)); - - if (msgid != msgid_next) { - fails++; - TEST_FAIL("Delivered msg %i, expected %i\n", - msgid, msgid_next); - return; - } + int msgid = *(int *)rkmessage->_private; + free(rkmessage->_private); + + TEST_SAYL(3, + "Got rkmessage %s [%" PRId32 "] @ %" PRId64 + ": " + "from broker %" PRId32 ": %s\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset, broker_id, + rd_kafka_err2str(rkmessage->err)); + + + if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR) + TEST_FAIL("Message delivery failed: %s\n", + rd_kafka_err2str(rkmessage->err)); + + if (msgid != msgid_next) { + fails++; + TEST_FAIL("Delivered msg %i, expected %i\n", msgid, + msgid_next); + return; + } - TEST_ASSERT(broker_id >= 0, - "Message %d has no broker id set", msgid); + TEST_ASSERT(broker_id >= 0, "Message %d has no broker id set", + msgid); - msgid_next = msgid+1; - } + msgid_next = msgid + 1; + } } /** * @brief Test delivery report events */ -int main_0039_event_dr (int argc, char **argv) { - int partition = 0; - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char msg[128]; - int msgcnt = test_quick ? 500 : 50000; - int i; +int main_0039_event_dr(int argc, char **argv) { + int partition = 0; + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char msg[128]; + int msgcnt = test_quick ? 500 : 50000; + int i; test_timing_t t_produce, t_delivery; - rd_kafka_queue_t *eventq; + rd_kafka_queue_t *eventq; - test_conf_init(&conf, &topic_conf, 10); + test_conf_init(&conf, &topic_conf, 10); - /* Set delivery report callback */ - rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + /* Set delivery report callback */ + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); - rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_DR); + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_DR); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - eventq = rd_kafka_queue_get_main(rk); + eventq = rd_kafka_queue_get_main(rk); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0), - topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0), topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); - /* Produce messages */ + /* Produce messages */ TIMING_START(&t_produce, "PRODUCE"); - for (i = 0 ; i < msgcnt ; i++) { - int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; - rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); - r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, msgidp); - if (r == -1) - TEST_FAIL("Failed to produce message #%i: %s\n", - i, rd_strerror(errno)); - } + for (i = 0; i < msgcnt; i++) { + int *msgidp = malloc(sizeof(*msgidp)); + *msgidp = i; + rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], + i); + r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, msgidp); + if (r == -1) + TEST_FAIL("Failed to produce message #%i: %s\n", i, + rd_strerror(errno)); + } TIMING_STOP(&t_produce); - TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt); + TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt); - /* Wait for messages to be delivered */ + /* Wait for messages to be delivered */ TIMING_START(&t_delivery, "DELIVERY"); - while (rd_kafka_outq_len(rk) > 0) { - rd_kafka_event_t *rkev; - rkev = rd_kafka_queue_poll(eventq, 1000); - switch (rd_kafka_event_type(rkev)) - { - case RD_KAFKA_EVENT_DR: - TEST_SAYL(3, "%s event with %"PRIusz" messages\n", + while (rd_kafka_outq_len(rk) > 0) { + rd_kafka_event_t *rkev; + rkev = rd_kafka_queue_poll(eventq, 1000); + switch (rd_kafka_event_type(rkev)) { + case RD_KAFKA_EVENT_DR: + TEST_SAYL(3, "%s event with %" PRIusz " messages\n", rd_kafka_event_name(rkev), rd_kafka_event_message_count(rkev)); - handle_drs(rkev); - break; - default: - TEST_SAY("Unhandled event: %s\n", - rd_kafka_event_name(rkev)); - break; - } - rd_kafka_event_destroy(rkev); - } + handle_drs(rkev); + break; + default: + TEST_SAY("Unhandled event: %s\n", + rd_kafka_event_name(rkev)); + break; + } + rd_kafka_event_destroy(rkev); + } TIMING_STOP(&t_delivery); - if (fails) - TEST_FAIL("%i failures, see previous errors", fails); + if (fails) + TEST_FAIL("%i failures, see previous errors", fails); - if (msgid_next != msgcnt) - TEST_FAIL("Still waiting for messages: next %i != end %i\n", - msgid_next, msgcnt); + if (msgid_next != msgcnt) + TEST_FAIL("Still waiting for messages: next %i != end %i\n", + msgid_next, msgcnt); - rd_kafka_queue_destroy(eventq); + rd_kafka_queue_destroy(eventq); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); - return 0; + return 0; } /** * @brief Local test: test log events */ -int main_0039_event_log (int argc, char **argv) { +int main_0039_event_log(int argc, char **argv) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *eventq; @@ -189,7 +188,7 @@ int main_0039_event_log (int argc, char **argv) { rd_kafka_conf_set(conf, "debug", "all", NULL, 0); /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); eventq = rd_kafka_queue_get_main(rk); rd_kafka_set_log_queue(rk, eventq); @@ -200,17 +199,18 @@ int main_0039_event_log (int argc, char **argv) { rd_kafka_event_t *rkev; rkev = rd_kafka_queue_poll(eventq, 1000); - switch (rd_kafka_event_type(rkev)) - { + switch (rd_kafka_event_type(rkev)) { case RD_KAFKA_EVENT_LOG: rd_kafka_event_log(rkev, &fac, &msg, &level); rd_kafka_event_debug_contexts(rkev, ctx, sizeof(ctx)); - TEST_SAY("Got log event: " - "level: %d ctx: %s fac: %s: msg: %s\n", - level, ctx, fac, msg); + TEST_SAY( + "Got log event: " + "level: %d ctx: %s fac: %s: msg: %s\n", + level, ctx, fac, msg); if (strchr(ctx, '$')) { - TEST_FAIL("ctx was not set by " - "rd_kafka_event_debug_contexts()"); + TEST_FAIL( + "ctx was not set by " + "rd_kafka_event_debug_contexts()"); } waitevent = 0; break; @@ -233,7 +233,7 @@ int main_0039_event_log (int argc, char **argv) { /** * @brief Local test: test event generation */ -int main_0039_event (int argc, char **argv) { +int main_0039_event(int argc, char **argv) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *eventq; @@ -256,12 +256,11 @@ int main_0039_event (int argc, char **argv) { while (waitevent) { rd_kafka_event_t *rkev; rkev = rd_kafka_queue_poll(eventq, 1000); - switch (rd_kafka_event_type(rkev)) - { + switch (rd_kafka_event_type(rkev)) { case RD_KAFKA_EVENT_ERROR: TEST_SAY("Got %s%s event: %s: %s\n", - rd_kafka_event_error_is_fatal(rkev) ? - "FATAL " : "", + rd_kafka_event_error_is_fatal(rkev) ? "FATAL " + : "", rd_kafka_event_name(rkev), rd_kafka_err2name(rd_kafka_event_error(rkev)), rd_kafka_event_error_string(rkev)); diff --git a/tests/0040-io_event.c b/tests/0040-io_event.c index 7ae9f46d13..d47da52060 100644 --- a/tests/0040-io_event.c +++ b/tests/0040-io_event.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,7 +35,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ #include #ifdef _WIN32 @@ -48,52 +48,48 @@ -int main_0040_io_event (int argc, char **argv) { - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *tconf; - rd_kafka_t *rk_p, *rk_c; - const char *topic; - rd_kafka_topic_t *rkt_p; - rd_kafka_queue_t *queue; - uint64_t testid; +int main_0040_io_event(int argc, char **argv) { + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *tconf; + rd_kafka_t *rk_p, *rk_c; + const char *topic; + rd_kafka_topic_t *rkt_p; + rd_kafka_queue_t *queue; + uint64_t testid; int msgcnt = test_quick ? 10 : 100; - int recvd = 0; - int fds[2]; - int wait_multiplier = 1; - struct pollfd pfd; + int recvd = 0; + int fds[2]; + int wait_multiplier = 1; + struct pollfd pfd; int r; rd_kafka_resp_err_t err; - enum { - _NOPE, - _YEP, - _REBALANCE - } expecting_io = _REBALANCE; + enum { _NOPE, _YEP, _REBALANCE } expecting_io = _REBALANCE; #ifdef _WIN32 TEST_SKIP("WSAPoll and pipes are not reliable on Win32 (FIXME)\n"); return 0; #endif - testid = test_id_generate(); - topic = test_mk_topic_name(__FUNCTION__, 1); + testid = test_id_generate(); + topic = test_mk_topic_name(__FUNCTION__, 1); - rk_p = test_create_producer(); - rkt_p = test_create_producer_topic(rk_p, topic, NULL); - err = test_auto_create_topic_rkt(rk_p, rkt_p, tmout_multip(5000)); + rk_p = test_create_producer(); + rkt_p = test_create_producer_topic(rk_p, topic, NULL); + err = test_auto_create_topic_rkt(rk_p, rkt_p, tmout_multip(5000)); TEST_ASSERT(!err, "Topic auto creation failed: %s", rd_kafka_err2str(err)); - test_conf_init(&conf, &tconf, 0); - rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); - test_conf_set(conf, "session.timeout.ms", "6000"); - test_conf_set(conf, "enable.partition.eof", "false"); - /* Speed up propagation of new topics */ - test_conf_set(conf, "metadata.max.age.ms", "1000"); - test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); - rk_c = test_create_consumer(topic, NULL, conf, tconf); + test_conf_init(&conf, &tconf, 0); + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); + test_conf_set(conf, "session.timeout.ms", "6000"); + test_conf_set(conf, "enable.partition.eof", "false"); + /* Speed up propagation of new topics */ + test_conf_set(conf, "metadata.max.age.ms", "1000"); + test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); + rk_c = test_create_consumer(topic, NULL, conf, tconf); - queue = rd_kafka_queue_get_consumer(rk_c); + queue = rd_kafka_queue_get_consumer(rk_c); - test_consumer_subscribe(rk_c, topic); + test_consumer_subscribe(rk_c, topic); #ifndef _WIN32 r = pipe(fds); @@ -101,137 +97,155 @@ int main_0040_io_event (int argc, char **argv) { r = _pipe(fds, 2, _O_BINARY); #endif if (r == -1) - TEST_FAIL("pipe() failed: %s\n", strerror(errno)); - - rd_kafka_queue_io_event_enable(queue, fds[1], "1", 1); - - pfd.fd = fds[0]; - pfd.events = POLLIN; - pfd.revents = 0; - - /** - * 1) Wait for rebalance event - * 2) Wait 1 interval (1s) expecting no IO (nothing produced). - * 3) Produce half the messages - * 4) Expect IO - * 5) Consume the available messages - * 6) Wait 1 interval expecting no IO. - * 7) Produce remaing half - * 8) Expect IO - * 9) Done. - */ - while (recvd < msgcnt) { + TEST_FAIL("pipe() failed: %s\n", strerror(errno)); + + rd_kafka_queue_io_event_enable(queue, fds[1], "1", 1); + + pfd.fd = fds[0]; + pfd.events = POLLIN; + pfd.revents = 0; + + /** + * 1) Wait for rebalance event + * 2) Wait 1 interval (1s) expecting no IO (nothing produced). + * 3) Produce half the messages + * 4) Expect IO + * 5) Consume the available messages + * 6) Wait 1 interval expecting no IO. + * 7) Produce remaing half + * 8) Expect IO + * 9) Done. + */ + while (recvd < msgcnt) { #ifndef _WIN32 - r = poll(&pfd, 1, 1000 * wait_multiplier); + r = poll(&pfd, 1, 1000 * wait_multiplier); #else r = WSAPoll(&pfd, 1, 1000 * wait_multiplier); #endif - if (r == -1) { - TEST_FAIL("poll() failed: %s", strerror(errno)); - - } else if (r == 1) { - rd_kafka_event_t *rkev; - char b; - int eventcnt = 0; - - if (pfd.events & POLLERR) - TEST_FAIL("Poll error\n"); - if (!(pfd.events & POLLIN)) { - TEST_SAY("Stray event 0x%x\n", (int)pfd.events); - continue; - } - - TEST_SAY("POLLIN\n"); + if (r == -1) { + TEST_FAIL("poll() failed: %s", strerror(errno)); + + } else if (r == 1) { + rd_kafka_event_t *rkev; + char b; + int eventcnt = 0; + + if (pfd.events & POLLERR) + TEST_FAIL("Poll error\n"); + if (!(pfd.events & POLLIN)) { + TEST_SAY("Stray event 0x%x\n", (int)pfd.events); + continue; + } + + TEST_SAY("POLLIN\n"); /* Read signaling token to purge socket queue and * eventually silence POLLIN */ #ifndef _WIN32 - r = read(pfd.fd, &b, 1); + r = read(pfd.fd, &b, 1); #else - r = _read((int)pfd.fd, &b, 1); + r = _read((int)pfd.fd, &b, 1); #endif - if (r == -1) - TEST_FAIL("read failed: %s\n", strerror(errno)); - - if (!expecting_io) - TEST_WARN("Got unexpected IO after %d/%d msgs\n", - recvd, msgcnt); - - while ((rkev = rd_kafka_queue_poll(queue, 0))) { - eventcnt++; - switch (rd_kafka_event_type(rkev)) - { - case RD_KAFKA_EVENT_REBALANCE: - TEST_SAY("Got %s: %s\n", rd_kafka_event_name(rkev), - rd_kafka_err2str(rd_kafka_event_error(rkev))); - if (expecting_io != _REBALANCE) - TEST_FAIL("Got Rebalance when expecting message\n"); - if (rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { - rd_kafka_assign(rk_c, rd_kafka_event_topic_partition_list(rkev)); - expecting_io = _NOPE; - } else - rd_kafka_assign(rk_c, NULL); - break; - - case RD_KAFKA_EVENT_FETCH: - if (expecting_io != _YEP) - TEST_FAIL("Did not expect more messages at %d/%d\n", - recvd, msgcnt); - recvd++; - if (recvd == (msgcnt / 2) || recvd == msgcnt) - expecting_io = _NOPE; - break; - - case RD_KAFKA_EVENT_ERROR: - TEST_FAIL("Error: %s\n", rd_kafka_event_error_string(rkev)); - break; - - default: - TEST_SAY("Ignoring event %s\n", rd_kafka_event_name(rkev)); - } - - rd_kafka_event_destroy(rkev); - } - TEST_SAY("%d events, Consumed %d/%d messages\n", eventcnt, recvd, msgcnt); - - wait_multiplier = 1; - - } else { - if (expecting_io == _REBALANCE) { - continue; - } else if (expecting_io == _YEP) { - TEST_FAIL("Did not see expected IO after %d/%d msgs\n", - recvd, msgcnt); - } - - TEST_SAY("IO poll timeout (good)\n"); - - TEST_SAY("Got idle period, producing\n"); - test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, msgcnt/2, - NULL, 10); - - expecting_io = _YEP; - /* When running slowly (e.g., valgrind) it might take - * some time before the first message is received - * after producing. */ - wait_multiplier = 3; - } - } - TEST_SAY("Done\n"); - - rd_kafka_topic_destroy(rkt_p); - rd_kafka_destroy(rk_p); - - rd_kafka_queue_destroy(queue); - rd_kafka_consumer_close(rk_c); - rd_kafka_destroy(rk_c); + if (r == -1) + TEST_FAIL("read failed: %s\n", strerror(errno)); + + if (!expecting_io) + TEST_WARN( + "Got unexpected IO after %d/%d msgs\n", + recvd, msgcnt); + + while ((rkev = rd_kafka_queue_poll(queue, 0))) { + eventcnt++; + switch (rd_kafka_event_type(rkev)) { + case RD_KAFKA_EVENT_REBALANCE: + TEST_SAY( + "Got %s: %s\n", + rd_kafka_event_name(rkev), + rd_kafka_err2str( + rd_kafka_event_error(rkev))); + if (expecting_io != _REBALANCE) + TEST_FAIL( + "Got Rebalance when " + "expecting message\n"); + if (rd_kafka_event_error(rkev) == + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { + rd_kafka_assign( + rk_c, + rd_kafka_event_topic_partition_list( + rkev)); + expecting_io = _NOPE; + } else + rd_kafka_assign(rk_c, NULL); + break; + + case RD_KAFKA_EVENT_FETCH: + if (expecting_io != _YEP) + TEST_FAIL( + "Did not expect more " + "messages at %d/%d\n", + recvd, msgcnt); + recvd++; + if (recvd == (msgcnt / 2) || + recvd == msgcnt) + expecting_io = _NOPE; + break; + + case RD_KAFKA_EVENT_ERROR: + TEST_FAIL( + "Error: %s\n", + rd_kafka_event_error_string(rkev)); + break; + + default: + TEST_SAY("Ignoring event %s\n", + rd_kafka_event_name(rkev)); + } + + rd_kafka_event_destroy(rkev); + } + TEST_SAY("%d events, Consumed %d/%d messages\n", + eventcnt, recvd, msgcnt); + + wait_multiplier = 1; + + } else { + if (expecting_io == _REBALANCE) { + continue; + } else if (expecting_io == _YEP) { + TEST_FAIL( + "Did not see expected IO after %d/%d " + "msgs\n", + recvd, msgcnt); + } + + TEST_SAY("IO poll timeout (good)\n"); + + TEST_SAY("Got idle period, producing\n"); + test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, + msgcnt / 2, NULL, 10); + + expecting_io = _YEP; + /* When running slowly (e.g., valgrind) it might take + * some time before the first message is received + * after producing. */ + wait_multiplier = 3; + } + } + TEST_SAY("Done\n"); + + rd_kafka_topic_destroy(rkt_p); + rd_kafka_destroy(rk_p); + + rd_kafka_queue_destroy(queue); + rd_kafka_consumer_close(rk_c); + rd_kafka_destroy(rk_c); #ifndef _WIN32 - close(fds[0]); - close(fds[1]); + close(fds[0]); + close(fds[1]); #else _close(fds[0]); _close(fds[1]); #endif - return 0; + return 0; } diff --git a/tests/0041-fetch_max_bytes.c b/tests/0041-fetch_max_bytes.c index ae0e6bedfa..e243dc8ac8 100644 --- a/tests/0041-fetch_max_bytes.c +++ b/tests/0041-fetch_max_bytes.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -46,48 +46,51 @@ */ -int main_0041_fetch_max_bytes (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - const int partition = 0; - const int msgcnt = 2*1000; - const int MAX_BYTES = 100000; - uint64_t testid; - rd_kafka_conf_t *conf; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; +int main_0041_fetch_max_bytes(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const int partition = 0; + const int msgcnt = 2 * 1000; + const int MAX_BYTES = 100000; + uint64_t testid; + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; - test_conf_init(NULL, NULL, 60); - - testid = test_id_generate(); - rk = test_create_producer(); - rkt = test_create_producer_topic(rk, topic, NULL); + test_conf_init(NULL, NULL, 60); - test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt/2, NULL, MAX_BYTES/10); - test_produce_msgs(rk, rkt, testid, partition, msgcnt/2, msgcnt/2, NULL, MAX_BYTES*5); + testid = test_id_generate(); + rk = test_create_producer(); + rkt = test_create_producer_topic(rk, topic, NULL); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt / 2, NULL, + MAX_BYTES / 10); + test_produce_msgs(rk, rkt, testid, partition, msgcnt / 2, msgcnt / 2, + NULL, MAX_BYTES * 5); - TEST_SAY("Creating consumer\n"); - test_conf_init(&conf, NULL, 0); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); - test_conf_set(conf, "fetch.message.max.bytes", tsprintf("%d", MAX_BYTES)); + TEST_SAY("Creating consumer\n"); + test_conf_init(&conf, NULL, 0); + + test_conf_set(conf, "fetch.message.max.bytes", + tsprintf("%d", MAX_BYTES)); /* This test may be slower when running with SSL or Helgrind, * restart the timeout. */ test_timeout_set(60); - rk = test_create_consumer(NULL, NULL, conf, NULL); - rkt = rd_kafka_topic_new(rk, topic, NULL); + rk = test_create_consumer(NULL, NULL, conf, NULL); + rkt = rd_kafka_topic_new(rk, topic, NULL); - test_consumer_start("CONSUME", rkt, partition, - RD_KAFKA_OFFSET_BEGINNING); - test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, - 0, msgcnt, 1); - test_consumer_stop("CONSUME", rkt, partition); + test_consumer_start("CONSUME", rkt, partition, + RD_KAFKA_OFFSET_BEGINNING); + test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, 0, + msgcnt, 1); + test_consumer_stop("CONSUME", rkt, partition); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); - return 0; + return 0; } diff --git a/tests/0042-many_topics.c b/tests/0042-many_topics.c index ab380fc55b..6ea5aa6695 100644 --- a/tests/0042-many_topics.c +++ b/tests/0042-many_topics.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -48,206 +48,205 @@ const int msgs_per_topic = 100; -static void produce_many (char **topics, int topic_cnt, uint64_t testid) { - rd_kafka_t *rk; +static void produce_many(char **topics, int topic_cnt, uint64_t testid) { + rd_kafka_t *rk; test_timing_t t_rkt_create; int i; - rd_kafka_topic_t **rkts; - - TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); - - rk = test_create_producer(); - - TEST_SAY("Creating %d topic objects\n", topic_cnt); - - rkts = malloc(sizeof(*rkts) * topic_cnt); - TIMING_START(&t_rkt_create, "Topic object create"); - for (i = 0 ; i < topic_cnt ; i++) { - rkts[i] = test_create_topic_object(rk, topics[i], - "acks", "all", NULL); - } - TIMING_STOP(&t_rkt_create); - - TEST_SAY("Producing %d messages to each %d topics\n", - msgs_per_topic, topic_cnt); + rd_kafka_topic_t **rkts; + + TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); + + rk = test_create_producer(); + + TEST_SAY("Creating %d topic objects\n", topic_cnt); + + rkts = malloc(sizeof(*rkts) * topic_cnt); + TIMING_START(&t_rkt_create, "Topic object create"); + for (i = 0; i < topic_cnt; i++) { + rkts[i] = test_create_topic_object(rk, topics[i], "acks", "all", + NULL); + } + TIMING_STOP(&t_rkt_create); + + TEST_SAY("Producing %d messages to each %d topics\n", msgs_per_topic, + topic_cnt); /* Produce messages to each topic (so they are created) */ - for (i = 0 ; i < topic_cnt ; i++) { - test_produce_msgs(rk, rkts[i], testid, 0, - i * msgs_per_topic, msgs_per_topic, - NULL, 100); - } + for (i = 0; i < topic_cnt; i++) { + test_produce_msgs(rk, rkts[i], testid, 0, i * msgs_per_topic, + msgs_per_topic, NULL, 100); + } - TEST_SAY("Destroying %d topic objects\n", topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) { - rd_kafka_topic_destroy(rkts[i]); - } - free(rkts); + TEST_SAY("Destroying %d topic objects\n", topic_cnt); + for (i = 0; i < topic_cnt; i++) { + rd_kafka_topic_destroy(rkts[i]); + } + free(rkts); - test_flush(rk, 30000); + test_flush(rk, 30000); - rd_kafka_destroy(rk); + rd_kafka_destroy(rk); } -static void legacy_consume_many (char **topics, int topic_cnt, uint64_t testid){ - rd_kafka_t *rk; +static void legacy_consume_many(char **topics, int topic_cnt, uint64_t testid) { + rd_kafka_t *rk; test_timing_t t_rkt_create; int i; - rd_kafka_topic_t **rkts; - int msg_base = 0; + rd_kafka_topic_t **rkts; + int msg_base = 0; - TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); + TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); - test_conf_init(NULL, NULL, 60); + test_conf_init(NULL, NULL, 60); - rk = test_create_consumer(NULL, NULL, NULL, NULL); + rk = test_create_consumer(NULL, NULL, NULL, NULL); - TEST_SAY("Creating %d topic objects\n", topic_cnt); - - rkts = malloc(sizeof(*rkts) * topic_cnt); - TIMING_START(&t_rkt_create, "Topic object create"); - for (i = 0 ; i < topic_cnt ; i++) - rkts[i] = test_create_topic_object(rk, topics[i], NULL); - TIMING_STOP(&t_rkt_create); + TEST_SAY("Creating %d topic objects\n", topic_cnt); - TEST_SAY("Start consumer for %d topics\n", topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) - test_consumer_start("legacy", rkts[i], 0, - RD_KAFKA_OFFSET_BEGINNING); - - TEST_SAY("Consuming from %d messages from each %d topics\n", - msgs_per_topic, topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) { - test_consume_msgs("legacy", rkts[i], testid, 0, TEST_NO_SEEK, - msg_base, msgs_per_topic, 1); - msg_base += msgs_per_topic; - } + rkts = malloc(sizeof(*rkts) * topic_cnt); + TIMING_START(&t_rkt_create, "Topic object create"); + for (i = 0; i < topic_cnt; i++) + rkts[i] = test_create_topic_object(rk, topics[i], NULL); + TIMING_STOP(&t_rkt_create); - TEST_SAY("Stopping consumers\n"); - for (i = 0 ; i < topic_cnt ; i++) - test_consumer_stop("legacy", rkts[i], 0); + TEST_SAY("Start consumer for %d topics\n", topic_cnt); + for (i = 0; i < topic_cnt; i++) + test_consumer_start("legacy", rkts[i], 0, + RD_KAFKA_OFFSET_BEGINNING); + TEST_SAY("Consuming from %d messages from each %d topics\n", + msgs_per_topic, topic_cnt); + for (i = 0; i < topic_cnt; i++) { + test_consume_msgs("legacy", rkts[i], testid, 0, TEST_NO_SEEK, + msg_base, msgs_per_topic, 1); + msg_base += msgs_per_topic; + } - TEST_SAY("Destroying %d topic objects\n", topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) - rd_kafka_topic_destroy(rkts[i]); + TEST_SAY("Stopping consumers\n"); + for (i = 0; i < topic_cnt; i++) + test_consumer_stop("legacy", rkts[i], 0); - free(rkts); - rd_kafka_destroy(rk); + TEST_SAY("Destroying %d topic objects\n", topic_cnt); + for (i = 0; i < topic_cnt; i++) + rd_kafka_topic_destroy(rkts[i]); + + free(rkts); + + rd_kafka_destroy(rk); } -static void subscribe_consume_many (char **topics, int topic_cnt, - uint64_t testid) { - rd_kafka_t *rk; +static void +subscribe_consume_many(char **topics, int topic_cnt, uint64_t testid) { + rd_kafka_t *rk; int i; - rd_kafka_topic_conf_t *tconf; - rd_kafka_topic_partition_list_t *parts; - rd_kafka_resp_err_t err; - test_msgver_t mv; + rd_kafka_topic_conf_t *tconf; + rd_kafka_topic_partition_list_t *parts; + rd_kafka_resp_err_t err; + test_msgver_t mv; - TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); + TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); - test_conf_init(NULL, &tconf, 60); - test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); - rk = test_create_consumer(__FUNCTION__, NULL, NULL, tconf); + test_conf_init(NULL, &tconf, 60); + test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); + rk = test_create_consumer(__FUNCTION__, NULL, NULL, tconf); - parts = rd_kafka_topic_partition_list_new(topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) - rd_kafka_topic_partition_list_add(parts, topics[i], - RD_KAFKA_PARTITION_UA); + parts = rd_kafka_topic_partition_list_new(topic_cnt); + for (i = 0; i < topic_cnt; i++) + rd_kafka_topic_partition_list_add(parts, topics[i], + RD_KAFKA_PARTITION_UA); - TEST_SAY("Subscribing to %d topics\n", topic_cnt); - err = rd_kafka_subscribe(rk, parts); - if (err) - TEST_FAIL("subscribe() failed: %s\n", rd_kafka_err2str(err)); + TEST_SAY("Subscribing to %d topics\n", topic_cnt); + err = rd_kafka_subscribe(rk, parts); + if (err) + TEST_FAIL("subscribe() failed: %s\n", rd_kafka_err2str(err)); - rd_kafka_topic_partition_list_destroy(parts); + rd_kafka_topic_partition_list_destroy(parts); - test_msgver_init(&mv, testid); - test_consumer_poll("consume.subscribe", rk, testid, - -1, 0, msgs_per_topic * topic_cnt, &mv); + test_msgver_init(&mv, testid); + test_consumer_poll("consume.subscribe", rk, testid, -1, 0, + msgs_per_topic * topic_cnt, &mv); - for (i = 0 ; i < topic_cnt ; i++) - test_msgver_verify_part("subscribe", &mv, TEST_MSGVER_ALL_PART, - topics[i], 0, i * msgs_per_topic, - msgs_per_topic); - test_msgver_clear(&mv); + for (i = 0; i < topic_cnt; i++) + test_msgver_verify_part("subscribe", &mv, TEST_MSGVER_ALL_PART, + topics[i], 0, i * msgs_per_topic, + msgs_per_topic); + test_msgver_clear(&mv); - test_consumer_close(rk); + test_consumer_close(rk); - rd_kafka_destroy(rk); + rd_kafka_destroy(rk); } -static void assign_consume_many (char **topics, int topic_cnt, uint64_t testid){ - rd_kafka_t *rk; - rd_kafka_topic_partition_list_t *parts; - int i; - test_msgver_t mv; +static void assign_consume_many(char **topics, int topic_cnt, uint64_t testid) { + rd_kafka_t *rk; + rd_kafka_topic_partition_list_t *parts; + int i; + test_msgver_t mv; - TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); + TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); - test_conf_init(NULL, NULL, 60); - rk = test_create_consumer(__FUNCTION__, NULL, NULL, NULL); + test_conf_init(NULL, NULL, 60); + rk = test_create_consumer(__FUNCTION__, NULL, NULL, NULL); - parts = rd_kafka_topic_partition_list_new(topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) - rd_kafka_topic_partition_list_add(parts, topics[i], 0)-> - offset = RD_KAFKA_OFFSET_TAIL(msgs_per_topic); + parts = rd_kafka_topic_partition_list_new(topic_cnt); + for (i = 0; i < topic_cnt; i++) + rd_kafka_topic_partition_list_add(parts, topics[i], 0)->offset = + RD_KAFKA_OFFSET_TAIL(msgs_per_topic); - test_consumer_assign("consume.assign", rk, parts); - rd_kafka_topic_partition_list_destroy(parts); + test_consumer_assign("consume.assign", rk, parts); + rd_kafka_topic_partition_list_destroy(parts); - test_msgver_init(&mv, testid); - test_consumer_poll("consume.assign", rk, testid, - -1, 0, msgs_per_topic * topic_cnt, &mv); + test_msgver_init(&mv, testid); + test_consumer_poll("consume.assign", rk, testid, -1, 0, + msgs_per_topic * topic_cnt, &mv); - for (i = 0 ; i < topic_cnt ; i++) - test_msgver_verify_part("assign", &mv, TEST_MSGVER_ALL_PART, - topics[i], 0, i * msgs_per_topic, - msgs_per_topic); - test_msgver_clear(&mv); + for (i = 0; i < topic_cnt; i++) + test_msgver_verify_part("assign", &mv, TEST_MSGVER_ALL_PART, + topics[i], 0, i * msgs_per_topic, + msgs_per_topic); + test_msgver_clear(&mv); - test_consumer_close(rk); + test_consumer_close(rk); - rd_kafka_destroy(rk); + rd_kafka_destroy(rk); } -int main_0042_many_topics (int argc, char **argv) { - char **topics; +int main_0042_many_topics(int argc, char **argv) { + char **topics; int topic_cnt = test_quick ? 4 : 20; /* up this as needed, * topic creation takes time so * unless hunting a bug * we keep this low to keep the * test suite run time down. */ - uint64_t testid; - int i; + uint64_t testid; + int i; - test_conf_init(NULL, NULL, 60); + test_conf_init(NULL, NULL, 60); - testid = test_id_generate(); + testid = test_id_generate(); - /* Generate unique topic names */ - topics = malloc(sizeof(*topics) * topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) - topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + /* Generate unique topic names */ + topics = malloc(sizeof(*topics) * topic_cnt); + for (i = 0; i < topic_cnt; i++) + topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); - produce_many(topics, topic_cnt, testid); - legacy_consume_many(topics, topic_cnt, testid); - if (test_broker_version >= TEST_BRKVER(0,9,0,0)) { - subscribe_consume_many(topics, topic_cnt, testid); - assign_consume_many(topics, topic_cnt, testid); - } + produce_many(topics, topic_cnt, testid); + legacy_consume_many(topics, topic_cnt, testid); + if (test_broker_version >= TEST_BRKVER(0, 9, 0, 0)) { + subscribe_consume_many(topics, topic_cnt, testid); + assign_consume_many(topics, topic_cnt, testid); + } - for (i = 0 ; i < topic_cnt ; i++) - free(topics[i]); - free(topics); + for (i = 0; i < topic_cnt; i++) + free(topics[i]); + free(topics); return 0; } diff --git a/tests/0043-no_connection.c b/tests/0043-no_connection.c index 95f6a8adb5..3470c4ae13 100644 --- a/tests/0043-no_connection.c +++ b/tests/0043-no_connection.c @@ -36,42 +36,42 @@ -static void test_producer_no_connection (void) { - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_topic_t *rkt; - int i; - const int partition_cnt = 2; - int msgcnt = 0; - test_timing_t t_destroy; +static void test_producer_no_connection(void) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_topic_t *rkt; + int i; + const int partition_cnt = 2; + int msgcnt = 0; + test_timing_t t_destroy; - test_conf_init(&conf, NULL, 20); + test_conf_init(&conf, NULL, 20); - test_conf_set(conf, "bootstrap.servers", NULL); + test_conf_set(conf, "bootstrap.servers", NULL); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = test_create_topic_object(rk, __FUNCTION__, - "message.timeout.ms", "5000", NULL); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_topic_object(rk, __FUNCTION__, "message.timeout.ms", + "5000", NULL); - test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 100, - NULL, 100, 0, &msgcnt); - for (i = 0 ; i < partition_cnt ; i++) - test_produce_msgs_nowait(rk, rkt, 0, i, - 0, 100, NULL, 100, 0, &msgcnt); + test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 100, + NULL, 100, 0, &msgcnt); + for (i = 0; i < partition_cnt; i++) + test_produce_msgs_nowait(rk, rkt, 0, i, 0, 100, NULL, 100, 0, + &msgcnt); - rd_kafka_poll(rk, 1000); + rd_kafka_poll(rk, 1000); - TEST_SAY("%d messages in queue\n", rd_kafka_outq_len(rk)); + TEST_SAY("%d messages in queue\n", rd_kafka_outq_len(rk)); - rd_kafka_topic_destroy(rkt); + rd_kafka_topic_destroy(rkt); - TIMING_START(&t_destroy, "rd_kafka_destroy()"); - rd_kafka_destroy(rk); - TIMING_STOP(&t_destroy); + TIMING_START(&t_destroy, "rd_kafka_destroy()"); + rd_kafka_destroy(rk); + TIMING_STOP(&t_destroy); } -int main_0043_no_connection (int argc, char **argv) { - test_producer_no_connection(); +int main_0043_no_connection(int argc, char **argv) { + test_producer_no_connection(); return 0; } diff --git a/tests/0044-partition_cnt.c b/tests/0044-partition_cnt.c index 29933a5bcb..51ef318c35 100644 --- a/tests/0044-partition_cnt.c +++ b/tests/0044-partition_cnt.c @@ -45,51 +45,49 @@ * - Wait for DRs * - Close */ - -static void test_producer_partition_cnt_change (void) { - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_topic_t *rkt; - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - const int partition_cnt = 4; - int msgcnt = test_quick ? 500 : 100000; - test_timing_t t_destroy; - int produced = 0; - - test_conf_init(&conf, NULL, 20); + +static void test_producer_partition_cnt_change(void) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_topic_t *rkt; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const int partition_cnt = 4; + int msgcnt = test_quick ? 500 : 100000; + test_timing_t t_destroy; + int produced = 0; + + test_conf_init(&conf, NULL, 20); rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - test_create_topic(rk, topic, partition_cnt/2, 1); + test_create_topic(rk, topic, partition_cnt / 2, 1); - rkt = test_create_topic_object(rk, __FUNCTION__, - "message.timeout.ms", - tsprintf("%d", tmout_multip(10000)), - NULL); + rkt = + test_create_topic_object(rk, __FUNCTION__, "message.timeout.ms", + tsprintf("%d", tmout_multip(10000)), NULL); - test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt/2, - NULL, 100, 0, &produced); + test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, + msgcnt / 2, NULL, 100, 0, &produced); test_create_partitions(rk, topic, partition_cnt); - test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, - msgcnt/2, msgcnt/2, - NULL, 100, 0, &produced); + test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, msgcnt / 2, + msgcnt / 2, NULL, 100, 0, &produced); - test_wait_delivery(rk, &produced); + test_wait_delivery(rk, &produced); - rd_kafka_topic_destroy(rkt); + rd_kafka_topic_destroy(rkt); - TIMING_START(&t_destroy, "rd_kafka_destroy()"); - rd_kafka_destroy(rk); - TIMING_STOP(&t_destroy); + TIMING_START(&t_destroy, "rd_kafka_destroy()"); + rd_kafka_destroy(rk); + TIMING_STOP(&t_destroy); } -int main_0044_partition_cnt (int argc, char **argv) { - if (!test_can_create_topics(1)) - return 0; +int main_0044_partition_cnt(int argc, char **argv) { + if (!test_can_create_topics(1)) + return 0; - test_producer_partition_cnt_change(); + test_producer_partition_cnt_change(); return 0; } diff --git a/tests/0045-subscribe_update.c b/tests/0045-subscribe_update.c index f387fa3b29..f804613d72 100644 --- a/tests/0045-subscribe_update.c +++ b/tests/0045-subscribe_update.c @@ -46,228 +46,226 @@ * Va-args are \p topic_cnt tuples of the expected assignment: * { const char *topic, int partition_cnt } */ -static void await_assignment (const char *pfx, rd_kafka_t *rk, - rd_kafka_queue_t *queue, - int topic_cnt, ...) { - rd_kafka_event_t *rkev; - rd_kafka_topic_partition_list_t *tps; - int i; - va_list ap; - int fails = 0; - int exp_part_cnt = 0; - - TEST_SAY("%s: waiting for assignment\n", pfx); - rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000); - if (!rkev) - TEST_FAIL("timed out waiting for assignment"); - TEST_ASSERT(rd_kafka_event_error(rkev) == - RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - "expected ASSIGN, got %s", - rd_kafka_err2str(rd_kafka_event_error(rkev))); - tps = rd_kafka_event_topic_partition_list(rkev); - - TEST_SAY("%s: assignment:\n", pfx); - test_print_partition_list(tps); - - va_start(ap, topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) { - const char *topic = va_arg(ap, const char *); - int partition_cnt = va_arg(ap, int); - int p; - TEST_SAY("%s: expecting %s with %d partitions\n", - pfx, topic, partition_cnt); - for (p = 0 ; p < partition_cnt ; p++) { - if (!rd_kafka_topic_partition_list_find(tps, topic, p)) { - TEST_FAIL_LATER("%s: expected partition %s [%d] " - "not found in assginment", - pfx, topic, p); - fails++; - } - } - exp_part_cnt += partition_cnt; - } - va_end(ap); - - TEST_ASSERT(exp_part_cnt == tps->cnt, - "expected assignment of %d partitions, got %d", - exp_part_cnt, tps->cnt); - - if (fails > 0) - TEST_FAIL("%s: assignment mismatch: see above", pfx); - - rd_kafka_assign(rk, tps); - rd_kafka_event_destroy(rkev); +static void await_assignment(const char *pfx, + rd_kafka_t *rk, + rd_kafka_queue_t *queue, + int topic_cnt, + ...) { + rd_kafka_event_t *rkev; + rd_kafka_topic_partition_list_t *tps; + int i; + va_list ap; + int fails = 0; + int exp_part_cnt = 0; + + TEST_SAY("%s: waiting for assignment\n", pfx); + rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000); + if (!rkev) + TEST_FAIL("timed out waiting for assignment"); + TEST_ASSERT(rd_kafka_event_error(rkev) == + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + "expected ASSIGN, got %s", + rd_kafka_err2str(rd_kafka_event_error(rkev))); + tps = rd_kafka_event_topic_partition_list(rkev); + + TEST_SAY("%s: assignment:\n", pfx); + test_print_partition_list(tps); + + va_start(ap, topic_cnt); + for (i = 0; i < topic_cnt; i++) { + const char *topic = va_arg(ap, const char *); + int partition_cnt = va_arg(ap, int); + int p; + TEST_SAY("%s: expecting %s with %d partitions\n", pfx, topic, + partition_cnt); + for (p = 0; p < partition_cnt; p++) { + if (!rd_kafka_topic_partition_list_find(tps, topic, + p)) { + TEST_FAIL_LATER( + "%s: expected partition %s [%d] " + "not found in assginment", + pfx, topic, p); + fails++; + } + } + exp_part_cnt += partition_cnt; + } + va_end(ap); + + TEST_ASSERT(exp_part_cnt == tps->cnt, + "expected assignment of %d partitions, got %d", + exp_part_cnt, tps->cnt); + + if (fails > 0) + TEST_FAIL("%s: assignment mismatch: see above", pfx); + + rd_kafka_assign(rk, tps); + rd_kafka_event_destroy(rkev); } /** * Wait for REBALANCE REVOKE event and perform unassignment. */ -static void await_revoke (const char *pfx, rd_kafka_t *rk, - rd_kafka_queue_t *queue) { - rd_kafka_event_t *rkev; - - TEST_SAY("%s: waiting for revoke\n", pfx); - rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000); - if (!rkev) - TEST_FAIL("timed out waiting for revoke"); - TEST_ASSERT(rd_kafka_event_error(rkev) == - RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, - "expected REVOKE, got %s", - rd_kafka_err2str(rd_kafka_event_error(rkev))); - rd_kafka_assign(rk, NULL); - rd_kafka_event_destroy(rkev); +static void +await_revoke(const char *pfx, rd_kafka_t *rk, rd_kafka_queue_t *queue) { + rd_kafka_event_t *rkev; + + TEST_SAY("%s: waiting for revoke\n", pfx); + rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000); + if (!rkev) + TEST_FAIL("timed out waiting for revoke"); + TEST_ASSERT(rd_kafka_event_error(rkev) == + RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + "expected REVOKE, got %s", + rd_kafka_err2str(rd_kafka_event_error(rkev))); + rd_kafka_assign(rk, NULL); + rd_kafka_event_destroy(rkev); } /** * Wait \p timeout_ms to make sure no rebalance was triggered. */ -static void await_no_rebalance (const char *pfx, rd_kafka_t *rk, - rd_kafka_queue_t *queue, int timeout_ms) { - rd_kafka_event_t *rkev; - - TEST_SAY("%s: waiting for %d ms to not see rebalance\n", - pfx, timeout_ms); - rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, timeout_ms); - if (!rkev) - return; - TEST_ASSERT(rkev, "did not expect %s: %s", - rd_kafka_event_name(rkev), - rd_kafka_err2str(rd_kafka_event_error(rkev))); - rd_kafka_event_destroy(rkev); - +static void await_no_rebalance(const char *pfx, + rd_kafka_t *rk, + rd_kafka_queue_t *queue, + int timeout_ms) { + rd_kafka_event_t *rkev; + + TEST_SAY("%s: waiting for %d ms to not see rebalance\n", pfx, + timeout_ms); + rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, timeout_ms); + if (!rkev) + return; + TEST_ASSERT(rkev, "did not expect %s: %s", rd_kafka_event_name(rkev), + rd_kafka_err2str(rd_kafka_event_error(rkev))); + rd_kafka_event_destroy(rkev); } -static void do_test_non_exist_and_partchange (void) { - char *topic_a = rd_strdup(test_mk_topic_name("topic_a", 1)); - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_queue_t *queue; +static void do_test_non_exist_and_partchange(void) { + char *topic_a = rd_strdup(test_mk_topic_name("topic_a", 1)); + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *queue; - /** - * Test #1: - * - Subscribe to non-existing topic. - * - Verify empty assignment - * - Create topic - * - Verify new assignment containing topic - */ + /** + * Test #1: + * - Subscribe to non-existing topic. + * - Verify empty assignment + * - Create topic + * - Verify new assignment containing topic + */ SUB_TEST(); - test_conf_init(&conf, NULL, 60); + test_conf_init(&conf, NULL, 60); - /* Decrease metadata interval to speed up topic change discovery. */ - test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); + /* Decrease metadata interval to speed up topic change discovery. */ + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); - rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); - rk = test_create_consumer(test_str_id_generate_tmp(), - NULL, conf, NULL); - queue = rd_kafka_queue_get_consumer(rk); + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); + rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL); + queue = rd_kafka_queue_get_consumer(rk); - TEST_SAY("#1: Subscribing to %s\n", topic_a); - test_consumer_subscribe(rk, topic_a); + TEST_SAY("#1: Subscribing to %s\n", topic_a); + test_consumer_subscribe(rk, topic_a); - /* Should not see a rebalance since no topics are matched. */ - await_no_rebalance("#1: empty", rk, queue, 10000); + /* Should not see a rebalance since no topics are matched. */ + await_no_rebalance("#1: empty", rk, queue, 10000); - TEST_SAY("#1: creating topic %s\n", topic_a); - test_create_topic(NULL, topic_a, 2, 1); + TEST_SAY("#1: creating topic %s\n", topic_a); + test_create_topic(NULL, topic_a, 2, 1); - await_assignment("#1: proper", rk, queue, 1, - topic_a, 2); + await_assignment("#1: proper", rk, queue, 1, topic_a, 2); - /** - * Test #2 (continue with #1 consumer) - * - Increase the partition count - * - Verify updated assignment - */ - test_kafka_topics("--alter --topic %s --partitions 4", - topic_a); - await_revoke("#2", rk, queue); + /** + * Test #2 (continue with #1 consumer) + * - Increase the partition count + * - Verify updated assignment + */ + test_kafka_topics("--alter --topic %s --partitions 4", topic_a); + await_revoke("#2", rk, queue); - await_assignment("#2: more partitions", rk, queue, 1, - topic_a, 4); + await_assignment("#2: more partitions", rk, queue, 1, topic_a, 4); - test_consumer_close(rk); - rd_kafka_queue_destroy(queue); - rd_kafka_destroy(rk); + test_consumer_close(rk); + rd_kafka_queue_destroy(queue); + rd_kafka_destroy(rk); - rd_free(topic_a); + rd_free(topic_a); SUB_TEST_PASS(); } -static void do_test_regex (void) { - char *base_topic = rd_strdup(test_mk_topic_name("topic", 1)); - char *topic_b = rd_strdup(tsprintf("%s_b", base_topic)); - char *topic_c = rd_strdup(tsprintf("%s_c", base_topic)); - char *topic_d = rd_strdup(tsprintf("%s_d", base_topic)); - char *topic_e = rd_strdup(tsprintf("%s_e", base_topic)); - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_queue_t *queue; - - /** - * Regex test: - * - Create topic b - * - Subscribe to b & d & e - * - Verify b assignment - * - Create topic c - * - Verify no rebalance - * - Create topic d - * - Verify b & d assignment - */ +static void do_test_regex(void) { + char *base_topic = rd_strdup(test_mk_topic_name("topic", 1)); + char *topic_b = rd_strdup(tsprintf("%s_b", base_topic)); + char *topic_c = rd_strdup(tsprintf("%s_c", base_topic)); + char *topic_d = rd_strdup(tsprintf("%s_d", base_topic)); + char *topic_e = rd_strdup(tsprintf("%s_e", base_topic)); + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *queue; + + /** + * Regex test: + * - Create topic b + * - Subscribe to b & d & e + * - Verify b assignment + * - Create topic c + * - Verify no rebalance + * - Create topic d + * - Verify b & d assignment + */ SUB_TEST(); - test_conf_init(&conf, NULL, 60); + test_conf_init(&conf, NULL, 60); - /* Decrease metadata interval to speed up topic change discovery. */ - test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); + /* Decrease metadata interval to speed up topic change discovery. */ + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); - rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); - rk = test_create_consumer(test_str_id_generate_tmp(), - NULL, conf, NULL); - queue = rd_kafka_queue_get_consumer(rk); + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); + rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL); + queue = rd_kafka_queue_get_consumer(rk); - TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_b); - test_create_topic(NULL, topic_b, 2, 1); - rd_sleep(1); // FIXME: do check&wait loop instead + TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_b); + test_create_topic(NULL, topic_b, 2, 1); + rd_sleep(1); // FIXME: do check&wait loop instead - TEST_SAY("Regex: Subscribing to %s & %s & %s\n", - topic_b, topic_d, topic_e); - test_consumer_subscribe(rk, tsprintf("^%s_[bde]$", base_topic)); + TEST_SAY("Regex: Subscribing to %s & %s & %s\n", topic_b, topic_d, + topic_e); + test_consumer_subscribe(rk, tsprintf("^%s_[bde]$", base_topic)); - await_assignment("Regex: just one topic exists", rk, queue, 1, - topic_b, 2); + await_assignment("Regex: just one topic exists", rk, queue, 1, topic_b, + 2); - TEST_SAY("Regex: creating topic %s (not subscribed)\n", topic_c); - test_create_topic(NULL, topic_c, 4, 1); + TEST_SAY("Regex: creating topic %s (not subscribed)\n", topic_c); + test_create_topic(NULL, topic_c, 4, 1); - /* Should not see a rebalance since no topics are matched. */ - await_no_rebalance("Regex: empty", rk, queue, 10000); + /* Should not see a rebalance since no topics are matched. */ + await_no_rebalance("Regex: empty", rk, queue, 10000); - TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_d); - test_create_topic(NULL, topic_d, 1, 1); + TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_d); + test_create_topic(NULL, topic_d, 1, 1); - await_revoke("Regex: rebalance after topic creation", rk, queue); + await_revoke("Regex: rebalance after topic creation", rk, queue); - await_assignment("Regex: two topics exist", rk, queue, 2, - topic_b, 2, - topic_d, 1); + await_assignment("Regex: two topics exist", rk, queue, 2, topic_b, 2, + topic_d, 1); - test_consumer_close(rk); - rd_kafka_queue_destroy(queue); - rd_kafka_destroy(rk); + test_consumer_close(rk); + rd_kafka_queue_destroy(queue); + rd_kafka_destroy(rk); - rd_free(base_topic); - rd_free(topic_b); - rd_free(topic_c); - rd_free(topic_d); - rd_free(topic_e); + rd_free(base_topic); + rd_free(topic_b); + rd_free(topic_c); + rd_free(topic_d); + rd_free(topic_e); SUB_TEST_PASS(); } @@ -275,84 +273,84 @@ static void do_test_regex (void) { /** * @remark Requires scenario=noautocreate. */ -static void do_test_topic_remove (void) { - char *topic_f = rd_strdup(test_mk_topic_name("topic_f", 1)); - char *topic_g = rd_strdup(test_mk_topic_name("topic_g", 1)); - int parts_f = 5; - int parts_g = 9; - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_queue_t *queue; - rd_kafka_topic_partition_list_t *topics; - rd_kafka_resp_err_t err; - - /** - * Topic removal test: - * - Create topic f & g - * - Subscribe to f & g - * - Verify f & g assignment - * - Remove topic f - * - Verify g assignment - * - Remove topic g - * - Verify empty assignment - */ +static void do_test_topic_remove(void) { + char *topic_f = rd_strdup(test_mk_topic_name("topic_f", 1)); + char *topic_g = rd_strdup(test_mk_topic_name("topic_g", 1)); + int parts_f = 5; + int parts_g = 9; + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *queue; + rd_kafka_topic_partition_list_t *topics; + rd_kafka_resp_err_t err; + + /** + * Topic removal test: + * - Create topic f & g + * - Subscribe to f & g + * - Verify f & g assignment + * - Remove topic f + * - Verify g assignment + * - Remove topic g + * - Verify empty assignment + */ SUB_TEST("Topic removal testing"); - test_conf_init(&conf, NULL, 60); + test_conf_init(&conf, NULL, 60); - /* Decrease metadata interval to speed up topic change discovery. */ - test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); + /* Decrease metadata interval to speed up topic change discovery. */ + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); - rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); - rk = test_create_consumer(test_str_id_generate_tmp(), - NULL, conf, NULL); - queue = rd_kafka_queue_get_consumer(rk); + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); + rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL); + queue = rd_kafka_queue_get_consumer(rk); - TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_f); - test_create_topic(NULL, topic_f, parts_f, 1); + TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_f); + test_create_topic(NULL, topic_f, parts_f, 1); - TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_g); - test_create_topic(NULL, topic_g, parts_g, 1); + TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_g); + test_create_topic(NULL, topic_g, parts_g, 1); - rd_sleep(1); // FIXME: do check&wait loop instead + rd_sleep(1); // FIXME: do check&wait loop instead - TEST_SAY("Topic removal: Subscribing to %s & %s\n", topic_f, topic_g); - topics = rd_kafka_topic_partition_list_new(2); - rd_kafka_topic_partition_list_add(topics, topic_f, RD_KAFKA_PARTITION_UA); - rd_kafka_topic_partition_list_add(topics, topic_g, RD_KAFKA_PARTITION_UA); - err = rd_kafka_subscribe(rk, topics); - TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, - "%s", rd_kafka_err2str(err)); - rd_kafka_topic_partition_list_destroy(topics); + TEST_SAY("Topic removal: Subscribing to %s & %s\n", topic_f, topic_g); + topics = rd_kafka_topic_partition_list_new(2); + rd_kafka_topic_partition_list_add(topics, topic_f, + RD_KAFKA_PARTITION_UA); + rd_kafka_topic_partition_list_add(topics, topic_g, + RD_KAFKA_PARTITION_UA); + err = rd_kafka_subscribe(rk, topics); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, "%s", + rd_kafka_err2str(err)); + rd_kafka_topic_partition_list_destroy(topics); - await_assignment("Topic removal: both topics exist", rk, queue, 2, - topic_f, parts_f, - topic_g, parts_g); + await_assignment("Topic removal: both topics exist", rk, queue, 2, + topic_f, parts_f, topic_g, parts_g); - TEST_SAY("Topic removal: removing %s\n", topic_f); - test_kafka_topics("--delete --topic %s", topic_f); + TEST_SAY("Topic removal: removing %s\n", topic_f); + test_kafka_topics("--delete --topic %s", topic_f); - await_revoke("Topic removal: rebalance after topic removal", rk, queue); + await_revoke("Topic removal: rebalance after topic removal", rk, queue); - await_assignment("Topic removal: one topic exists", rk, queue, 1, - topic_g, parts_g); - - TEST_SAY("Topic removal: removing %s\n", topic_g); - test_kafka_topics("--delete --topic %s", topic_g); + await_assignment("Topic removal: one topic exists", rk, queue, 1, + topic_g, parts_g); - await_revoke("Topic removal: rebalance after 2nd topic removal", - rk, queue); + TEST_SAY("Topic removal: removing %s\n", topic_g); + test_kafka_topics("--delete --topic %s", topic_g); - /* Should not see another rebalance since all topics now removed */ - await_no_rebalance("Topic removal: empty", rk, queue, 10000); + await_revoke("Topic removal: rebalance after 2nd topic removal", rk, + queue); - test_consumer_close(rk); - rd_kafka_queue_destroy(queue); - rd_kafka_destroy(rk); + /* Should not see another rebalance since all topics now removed */ + await_no_rebalance("Topic removal: empty", rk, queue, 10000); - rd_free(topic_f); - rd_free(topic_g); + test_consumer_close(rk); + rd_kafka_queue_destroy(queue); + rd_kafka_destroy(rk); + + rd_free(topic_f); + rd_free(topic_g); SUB_TEST_PASS(); } @@ -366,21 +364,21 @@ static void do_test_topic_remove (void) { * This is using the mock cluster. * */ -static void do_test_regex_many_mock (const char *assignment_strategy, - rd_bool_t lots_of_topics) { +static void do_test_regex_many_mock(const char *assignment_strategy, + rd_bool_t lots_of_topics) { const char *base_topic = "topic"; rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_mock_cluster_t *mcluster; const char *bootstraps; - int topic_cnt = lots_of_topics ? 300 : 50; + int topic_cnt = lots_of_topics ? 300 : 50; int await_assignment_every = lots_of_topics ? 150 : 15; int i; SUB_TEST("%s with %d topics", assignment_strategy, topic_cnt); mcluster = test_mock_cluster_new(3, &bootstraps); - test_conf_init(&conf, NULL, 60*5); + test_conf_init(&conf, NULL, 60 * 5); test_conf_set(conf, "security.protocol", "plaintext"); test_conf_set(conf, "bootstrap.servers", bootstraps); @@ -393,16 +391,15 @@ static void do_test_regex_many_mock (const char *assignment_strategy, test_consumer_subscribe(rk, tsprintf("^%s_.*", base_topic)); - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { char topic[256]; rd_snprintf(topic, sizeof(topic), "%s_%d", base_topic, i); TEST_SAY("Creating topic %s\n", topic); - TEST_CALL_ERR__(rd_kafka_mock_topic_create(mcluster, - topic, 1 + (i % 8), - 1)); + TEST_CALL_ERR__(rd_kafka_mock_topic_create(mcluster, topic, + 1 + (i % 8), 1)); test_consumer_poll_no_msgs("POLL", rk, 0, lots_of_topics ? 100 : 300); @@ -410,7 +407,7 @@ static void do_test_regex_many_mock (const char *assignment_strategy, /* Wait for an assignment to let the consumer catch up on * all rebalancing. */ if (i % await_assignment_every == await_assignment_every - 1) - test_consumer_wait_assignment(rk, rd_true/*poll*/); + test_consumer_wait_assignment(rk, rd_true /*poll*/); else if (!lots_of_topics) rd_usleep(100 * 1000, NULL); } @@ -425,8 +422,7 @@ static void do_test_regex_many_mock (const char *assignment_strategy, - -int main_0045_subscribe_update (int argc, char **argv) { +int main_0045_subscribe_update(int argc, char **argv) { if (!test_can_create_topics(1)) return 0; @@ -436,14 +432,14 @@ int main_0045_subscribe_update (int argc, char **argv) { return 0; } -int main_0045_subscribe_update_non_exist_and_partchange (int argc, char **argv){ +int main_0045_subscribe_update_non_exist_and_partchange(int argc, char **argv) { do_test_non_exist_and_partchange(); return 0; } -int main_0045_subscribe_update_topic_remove (int argc, char **argv) { +int main_0045_subscribe_update_topic_remove(int argc, char **argv) { if (!test_can_create_topics(1)) return 0; @@ -454,7 +450,7 @@ int main_0045_subscribe_update_topic_remove (int argc, char **argv) { } -int main_0045_subscribe_update_mock (int argc, char **argv) { +int main_0045_subscribe_update_mock(int argc, char **argv) { do_test_regex_many_mock("range", rd_false); do_test_regex_many_mock("cooperative-sticky", rd_false); do_test_regex_many_mock("cooperative-sticky", rd_true); diff --git a/tests/0046-rkt_cache.c b/tests/0046-rkt_cache.c index da960b1dc4..541c030376 100644 --- a/tests/0046-rkt_cache.c +++ b/tests/0046-rkt_cache.c @@ -39,27 +39,27 @@ */ -int main_0046_rkt_cache (int argc, char **argv) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - const char *topic = test_mk_topic_name(__FUNCTION__, 0); - int i; +int main_0046_rkt_cache(int argc, char **argv) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + const char *topic = test_mk_topic_name(__FUNCTION__, 0); + int i; - rk = test_create_producer(); + rk = test_create_producer(); - rkt = test_create_producer_topic(rk, topic, NULL); + rkt = test_create_producer_topic(rk, topic, NULL); - for (i = 0 ; i < 100 ; i++) { - rd_kafka_topic_t *rkt2; + for (i = 0; i < 100; i++) { + rd_kafka_topic_t *rkt2; - rkt2 = rd_kafka_topic_new(rk, topic, NULL); - TEST_ASSERT(rkt2 != NULL); + rkt2 = rd_kafka_topic_new(rk, topic, NULL); + TEST_ASSERT(rkt2 != NULL); - rd_kafka_topic_destroy(rkt2); - } + rd_kafka_topic_destroy(rkt2); + } - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); return 0; } diff --git a/tests/0047-partial_buf_tmout.c b/tests/0047-partial_buf_tmout.c index e17cde1c93..d90004a3aa 100644 --- a/tests/0047-partial_buf_tmout.c +++ b/tests/0047-partial_buf_tmout.c @@ -51,48 +51,47 @@ static int got_timeout_err = 0; -static void my_error_cb (rd_kafka_t *rk, int err, - const char *reason, void *opaque) { - got_timeout_err += (err == RD_KAFKA_RESP_ERR__TIMED_OUT); +static void +my_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) { + got_timeout_err += (err == RD_KAFKA_RESP_ERR__TIMED_OUT); - if (err == RD_KAFKA_RESP_ERR__TIMED_OUT || - err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) - TEST_SAY("Expected error: %s: %s\n", - rd_kafka_err2str(err), reason); - else - TEST_FAIL("Unexpected error: %s: %s", - rd_kafka_err2str(err), reason); + if (err == RD_KAFKA_RESP_ERR__TIMED_OUT || + err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) + TEST_SAY("Expected error: %s: %s\n", rd_kafka_err2str(err), + reason); + else + TEST_FAIL("Unexpected error: %s: %s", rd_kafka_err2str(err), + reason); } -int main_0047_partial_buf_tmout (int argc, char **argv) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - const char *topic = test_mk_topic_name(__FUNCTION__, 0); - rd_kafka_conf_t *conf; - const size_t msg_size = 10000; - int msgcounter = 0; +int main_0047_partial_buf_tmout(int argc, char **argv) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + const char *topic = test_mk_topic_name(__FUNCTION__, 0); + rd_kafka_conf_t *conf; + const size_t msg_size = 10000; + int msgcounter = 0; - test_conf_init(&conf, NULL, 30); - test_conf_set(conf, "socket.send.buffer.bytes", "1000"); - test_conf_set(conf, "batch.num.messages", "100"); - test_conf_set(conf, "queue.buffering.max.messages", "10000000"); - rd_kafka_conf_set_error_cb(conf, my_error_cb); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "socket.send.buffer.bytes", "1000"); + test_conf_set(conf, "batch.num.messages", "100"); + test_conf_set(conf, "queue.buffering.max.messages", "10000000"); + rd_kafka_conf_set_error_cb(conf, my_error_cb); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = test_create_producer_topic(rk, topic, - "message.timeout.ms", "300", NULL); + rkt = test_create_producer_topic(rk, topic, "message.timeout.ms", "300", + NULL); - while (got_timeout_err == 0) { - test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, - 10000, NULL, msg_size, 0, - &msgcounter); - rd_kafka_flush(rk, 100); - } + while (got_timeout_err == 0) { + test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, + 10000, NULL, msg_size, 0, &msgcounter); + rd_kafka_flush(rk, 100); + } - TEST_ASSERT(got_timeout_err > 0); + TEST_ASSERT(got_timeout_err > 0); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); return 0; } diff --git a/tests/0048-partitioner.c b/tests/0048-partitioner.c index 51c9b1b259..84efee7dbd 100644 --- a/tests/0048-partitioner.c +++ b/tests/0048-partitioner.c @@ -38,27 +38,28 @@ * - Verify that partitioning works across partitioners. */ -int32_t my_invalid_partitioner (const rd_kafka_topic_t *rkt, - const void *keydata, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { - int32_t partition = partition_cnt + 10; - TEST_SAYL(4, "partition \"%.*s\" to %"PRId32"\n", - (int)keylen, (const char *)keydata, partition); - return partition; +int32_t my_invalid_partitioner(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { + int32_t partition = partition_cnt + 10; + TEST_SAYL(4, "partition \"%.*s\" to %" PRId32 "\n", (int)keylen, + (const char *)keydata, partition); + return partition; } /* FIXME: This doesn't seem to trigger the bug in #797. * Still a useful test though. */ -static void do_test_failed_partitioning (void) { - rd_kafka_t *rk; +static void do_test_failed_partitioning(void) { + rd_kafka_t *rk; rd_kafka_conf_t *conf; - rd_kafka_topic_t *rkt; - rd_kafka_topic_conf_t *tconf; - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - int i; + rd_kafka_topic_t *rkt; + rd_kafka_topic_conf_t *tconf; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + int i; int msgcnt = test_quick ? 100 : 10000; test_conf_init(&conf, &tconf, 0); @@ -66,37 +67,39 @@ static void do_test_failed_partitioning (void) { test_conf_set(conf, "sticky.partitioning.linger.ms", "0"); rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rd_kafka_topic_conf_set_partitioner_cb(tconf, my_invalid_partitioner); - test_topic_conf_set(tconf, "message.timeout.ms", + rd_kafka_topic_conf_set_partitioner_cb(tconf, my_invalid_partitioner); + test_topic_conf_set(tconf, "message.timeout.ms", tsprintf("%d", tmout_multip(10000))); - rkt = rd_kafka_topic_new(rk, topic, tconf); - TEST_ASSERT(rkt != NULL, "%s", rd_kafka_err2str(rd_kafka_last_error())); - - /* Produce some messages (to p 0) to create topic */ - test_produce_msgs(rk, rkt, 0, 0, 0, 2, NULL, 0); - - /* Now use partitioner */ - for (i = 0 ; i < msgcnt ; i++) { - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - if (rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, - 0, NULL, 0, NULL, 0, NULL) == -1) - err = rd_kafka_last_error(); - if (err != RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) - TEST_FAIL("produce(): " - "Expected UNKNOWN_PARTITION, got %s\n", - rd_kafka_err2str(err)); - } - test_flush(rk, 5000); - - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rkt = rd_kafka_topic_new(rk, topic, tconf); + TEST_ASSERT(rkt != NULL, "%s", rd_kafka_err2str(rd_kafka_last_error())); + + /* Produce some messages (to p 0) to create topic */ + test_produce_msgs(rk, rkt, 0, 0, 0, 2, NULL, 0); + + /* Now use partitioner */ + for (i = 0; i < msgcnt; i++) { + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + if (rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, 0, NULL, 0, + NULL, 0, NULL) == -1) + err = rd_kafka_last_error(); + if (err != RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + TEST_FAIL( + "produce(): " + "Expected UNKNOWN_PARTITION, got %s\n", + rd_kafka_err2str(err)); + } + test_flush(rk, 5000); + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); } -static void part_dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void part_dr_msg_cb(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { int32_t *partp = rkmessage->_private; - int *remainsp = opaque; + int *remainsp = opaque; if (rkmessage->err) { /* Will fail later */ @@ -113,16 +116,18 @@ static void part_dr_msg_cb (rd_kafka_t *rk, /** * @brief Test single \p partitioner */ -static void do_test_partitioner (const char *topic, const char *partitioner, - int msgcnt, const char **keys, - const int32_t *exp_part) { +static void do_test_partitioner(const char *topic, + const char *partitioner, + int msgcnt, + const char **keys, + const int32_t *exp_part) { rd_kafka_t *rk; rd_kafka_conf_t *conf; int i; int32_t *parts; int remains = msgcnt; int randcnt = 0; - int fails = 0; + int fails = 0; TEST_SAY(_C_MAG "Test partitioner \"%s\"\n", partitioner); @@ -135,32 +140,28 @@ static void do_test_partitioner (const char *topic, const char *partitioner, rk = test_create_handle(RD_KAFKA_PRODUCER, conf); parts = malloc(msgcnt * sizeof(*parts)); - for (i = 0 ; i < msgcnt ; i++) + for (i = 0; i < msgcnt; i++) parts[i] = -1; /* * Produce messages */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { rd_kafka_resp_err_t err; - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_KEY(keys[i], - keys[i] ? - strlen(keys[i]) : 0), - RD_KAFKA_V_OPAQUE(&parts[i]), - RD_KAFKA_V_END); - TEST_ASSERT(!err, - "producev() failed: %s", rd_kafka_err2str(err)); + err = rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_KEY(keys[i], keys[i] ? strlen(keys[i]) : 0), + RD_KAFKA_V_OPAQUE(&parts[i]), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev() failed: %s", + rd_kafka_err2str(err)); randcnt += exp_part[i] == -1; } rd_kafka_flush(rk, tmout_multip(10000)); - TEST_ASSERT(remains == 0, - "Expected remains=%d, not %d for %d messages", + TEST_ASSERT(remains == 0, "Expected remains=%d, not %d for %d messages", 0, remains, msgcnt); /* @@ -168,9 +169,10 @@ static void do_test_partitioner (const char *topic, const char *partitioner, */ /* First look for produce failures */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { if (parts[i] == -1) { - TEST_WARN("Message #%d (exp part %"PRId32") " + TEST_WARN("Message #%d (exp part %" PRId32 + ") " "was not successfully produced\n", i, exp_part[i]); fails++; @@ -185,24 +187,23 @@ static void do_test_partitioner (const char *topic, const char *partitioner, * the produced partitions have some form of * random distribution */ int32_t last_part = parts[0]; - int samecnt = 0; + int samecnt = 0; - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { samecnt += parts[i] == last_part; last_part = parts[i]; } TEST_ASSERT(samecnt < msgcnt, - "No random distribution, all on partition %"PRId32, + "No random distribution, all on partition %" PRId32, last_part); } else { - for (i = 0 ; i < msgcnt ; i++) { - if (exp_part[i] != -1 && - parts[i] != exp_part[i]) { - TEST_WARN("Message #%d expected partition " - "%"PRId32" but got %"PRId32": %s\n", - i, exp_part[i], parts[i], - keys[i]); + for (i = 0; i < msgcnt; i++) { + if (exp_part[i] != -1 && parts[i] != exp_part[i]) { + TEST_WARN( + "Message #%d expected partition " + "%" PRId32 " but got %" PRId32 ": %s\n", + i, exp_part[i], parts[i], keys[i]); fails++; } } @@ -218,91 +219,65 @@ static void do_test_partitioner (const char *topic, const char *partitioner, TEST_SAY(_C_GRN "Test partitioner \"%s\": PASS\n", partitioner); } -extern uint32_t rd_crc32 (const char *, size_t); +extern uint32_t rd_crc32(const char *, size_t); /** * @brief Test all builtin partitioners */ -static void do_test_partitioners (void) { +static void do_test_partitioners(void) { int part_cnt = test_quick ? 7 : 17; #define _MSG_CNT 5 const char *unaligned = "123456"; /* Message keys */ const char *keys[_MSG_CNT] = { - NULL, - "", // empty - unaligned+1, - "this is another string with more length to it perhaps", - "hejsan" - }; + NULL, + "", // empty + unaligned + 1, + "this is another string with more length to it perhaps", "hejsan"}; struct { const char *partitioner; /* Expected partition per message (see keys above) */ int32_t exp_part[_MSG_CNT]; - } ptest[] = { - { "random", { -1, -1, -1, -1, -1 } }, - { "consistent", { - /* These constants were acquired using - * the 'crc32' command on OSX */ - 0x0 % part_cnt, - 0x0 % part_cnt, - 0xb1b451d7 % part_cnt, - 0xb0150df7 % part_cnt, - 0xd077037e % part_cnt - } }, - { "consistent_random", { - -1, - -1, - 0xb1b451d7 % part_cnt, - 0xb0150df7 % part_cnt, - 0xd077037e % part_cnt - } }, - { "murmur2", { - /* .. using tests/java/Murmur2Cli */ - 0x106e08d9 % part_cnt, - 0x106e08d9 % part_cnt, - 0x058d780f % part_cnt, - 0x4f7703da % part_cnt, - 0x5ec19395 % part_cnt - } }, - { "murmur2_random", { - -1, - 0x106e08d9 % part_cnt, - 0x058d780f % part_cnt, - 0x4f7703da % part_cnt, - 0x5ec19395 % part_cnt - } }, - { "fnv1a", { - /* .. using https://play.golang.org/p/hRkA4xtYyJ6 */ - 0x7ee3623b % part_cnt, - 0x7ee3623b % part_cnt, - 0x27e6f469 % part_cnt, - 0x155e3e5f % part_cnt, - 0x17b1e27a % part_cnt - } }, - { "fnv1a_random", { - -1, - 0x7ee3623b % part_cnt, - 0x27e6f469 % part_cnt, - 0x155e3e5f % part_cnt, - 0x17b1e27a % part_cnt - } }, - { NULL } - }; + } ptest[] = {{"random", {-1, -1, -1, -1, -1}}, + {"consistent", + {/* These constants were acquired using + * the 'crc32' command on OSX */ + 0x0 % part_cnt, 0x0 % part_cnt, 0xb1b451d7 % part_cnt, + 0xb0150df7 % part_cnt, 0xd077037e % part_cnt}}, + {"consistent_random", + {-1, -1, 0xb1b451d7 % part_cnt, 0xb0150df7 % part_cnt, + 0xd077037e % part_cnt}}, + {"murmur2", + {/* .. using tests/java/Murmur2Cli */ + 0x106e08d9 % part_cnt, 0x106e08d9 % part_cnt, + 0x058d780f % part_cnt, 0x4f7703da % part_cnt, + 0x5ec19395 % part_cnt}}, + {"murmur2_random", + {-1, 0x106e08d9 % part_cnt, 0x058d780f % part_cnt, + 0x4f7703da % part_cnt, 0x5ec19395 % part_cnt}}, + {"fnv1a", + {/* .. using https://play.golang.org/p/hRkA4xtYyJ6 */ + 0x7ee3623b % part_cnt, 0x7ee3623b % part_cnt, + 0x27e6f469 % part_cnt, 0x155e3e5f % part_cnt, + 0x17b1e27a % part_cnt}}, + {"fnv1a_random", + {-1, 0x7ee3623b % part_cnt, 0x27e6f469 % part_cnt, + 0x155e3e5f % part_cnt, 0x17b1e27a % part_cnt}}, + {NULL}}; int pi; const char *topic = test_mk_topic_name(__FUNCTION__, 1); test_create_topic(NULL, topic, part_cnt, 1); - for (pi = 0 ; ptest[pi].partitioner ; pi++) { - do_test_partitioner(topic, ptest[pi].partitioner, - _MSG_CNT, keys, ptest[pi].exp_part); + for (pi = 0; ptest[pi].partitioner; pi++) { + do_test_partitioner(topic, ptest[pi].partitioner, _MSG_CNT, + keys, ptest[pi].exp_part); } } -int main_0048_partitioner (int argc, char **argv) { +int main_0048_partitioner(int argc, char **argv) { if (test_can_create_topics(0)) do_test_partitioners(); - do_test_failed_partitioning(); - return 0; + do_test_failed_partitioning(); + return 0; } diff --git a/tests/0049-consume_conn_close.c b/tests/0049-consume_conn_close.c index 34e32c29bd..6083a1a764 100644 --- a/tests/0049-consume_conn_close.c +++ b/tests/0049-consume_conn_close.c @@ -43,7 +43,7 @@ static int simulate_network_down = 0; * @brief Sockem connect, called from **internal librdkafka thread** through * librdkafka's connect_cb */ -static int connect_cb (struct test *test, sockem_t *skm, const char *id) { +static int connect_cb(struct test *test, sockem_t *skm, const char *id) { int r; TEST_LOCK(); @@ -61,8 +61,8 @@ static int connect_cb (struct test *test, sockem_t *skm, const char *id) { return 0; } -static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { /* Ignore connectivity errors since we'll be bringing down * .. connectivity. * SASL auther will think a connection-down even in the auth @@ -75,7 +75,7 @@ static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, } -int main_0049_consume_conn_close (int argc, char **argv) { +int main_0049_consume_conn_close(int argc, char **argv) { rd_kafka_t *rk; const char *topic = test_mk_topic_name("0049_consume_conn_close", 1); uint64_t testid; @@ -87,8 +87,9 @@ int main_0049_consume_conn_close (int argc, char **argv) { rd_kafka_resp_err_t err; if (!test_conf_match(NULL, "sasl.mechanisms", "GSSAPI")) { - TEST_SKIP("KNOWN ISSUE: ApiVersionRequest+SaslHandshake " - "will not play well with sudden disconnects\n"); + TEST_SKIP( + "KNOWN ISSUE: ApiVersionRequest+SaslHandshake " + "will not play well with sudden disconnects\n"); return 0; } @@ -101,7 +102,7 @@ int main_0049_consume_conn_close (int argc, char **argv) { test_socket_enable(conf); - test_curr->connect_cb = connect_cb; + test_curr->connect_cb = connect_cb; test_curr->is_fatal_cb = is_fatal_cb; test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); @@ -112,7 +113,7 @@ int main_0049_consume_conn_close (int argc, char **argv) { test_msgver_init(&mv, testid); - test_consumer_poll("consume.up", rk, testid, -1, 0, msgcnt/2, &mv); + test_consumer_poll("consume.up", rk, testid, -1, 0, msgcnt / 2, &mv); err = rd_kafka_assignment(rk, &assignment); TEST_ASSERT(!err, "assignment() failed: %s", rd_kafka_err2str(err)); @@ -123,7 +124,7 @@ int main_0049_consume_conn_close (int argc, char **argv) { TEST_LOCK(); simulate_network_down = 1; TEST_UNLOCK(); - test_socket_close_all(test_curr, 1/*reinit*/); + test_socket_close_all(test_curr, 1 /*reinit*/); TEST_SAY("Waiting for session timeout to expire (6s), and then some\n"); @@ -131,7 +132,7 @@ int main_0049_consume_conn_close (int argc, char **argv) { * callback fallback (CONSUMER_ERR) */ assignment->elems[0].offset = 123456789; TEST_SAY("Committing offsets while down, should fail eventually\n"); - err = rd_kafka_commit(rk, assignment, 1/*async*/); + err = rd_kafka_commit(rk, assignment, 1 /*async*/); TEST_ASSERT(!err, "async commit failed: %s", rd_kafka_err2str(err)); rd_kafka_topic_partition_list_destroy(assignment); @@ -143,10 +144,10 @@ int main_0049_consume_conn_close (int argc, char **argv) { TEST_UNLOCK(); TEST_SAY("Continuing to consume..\n"); - test_consumer_poll("consume.up2", rk, testid, -1, msgcnt/2, msgcnt/2, - &mv); + test_consumer_poll("consume.up2", rk, testid, -1, msgcnt / 2, + msgcnt / 2, &mv); - test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER|TEST_MSGVER_DUP, + test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP, 0, msgcnt); test_msgver_clear(&mv); diff --git a/tests/0050-subscribe_adds.c b/tests/0050-subscribe_adds.c index efe3618fdd..d55e6e09a2 100644 --- a/tests/0050-subscribe_adds.c +++ b/tests/0050-subscribe_adds.c @@ -43,13 +43,13 @@ * * Verify that there were no duplicate messages. */ -int main_0050_subscribe_adds (int argc, char **argv) { +int main_0050_subscribe_adds(int argc, char **argv) { rd_kafka_t *rk; - #define TOPIC_CNT 3 +#define TOPIC_CNT 3 char *topic[TOPIC_CNT] = { - rd_strdup(test_mk_topic_name("0050_subscribe_adds_1", 1)), - rd_strdup(test_mk_topic_name("0050_subscribe_adds_2", 1)), - rd_strdup(test_mk_topic_name("0050_subscribe_adds_3", 1)), + rd_strdup(test_mk_topic_name("0050_subscribe_adds_1", 1)), + rd_strdup(test_mk_topic_name("0050_subscribe_adds_2", 1)), + rd_strdup(test_mk_topic_name("0050_subscribe_adds_3", 1)), }; uint64_t testid; int msgcnt = test_quick ? 100 : 10000; @@ -64,7 +64,7 @@ int main_0050_subscribe_adds (int argc, char **argv) { testid = test_id_generate(); rk = test_create_producer(); - for (i = 0 ; i < TOPIC_CNT ; i++) { + for (i = 0; i < TOPIC_CNT; i++) { rd_kafka_topic_t *rkt; rkt = test_create_producer_topic(rk, topic[i], NULL); @@ -84,7 +84,7 @@ int main_0050_subscribe_adds (int argc, char **argv) { rk = test_create_consumer(topic[0], NULL, conf, tconf); tlist = rd_kafka_topic_partition_list_new(TOPIC_CNT); - for (i = 0 ; i < TOPIC_CNT ; i++) { + for (i = 0; i < TOPIC_CNT; i++) { rd_kafka_topic_partition_list_add(tlist, topic[i], RD_KAFKA_PARTITION_UA); TEST_SAY("Subscribe to %d topic(s):\n", tlist->cnt); @@ -100,15 +100,15 @@ int main_0050_subscribe_adds (int argc, char **argv) { test_consumer_poll("consume", rk, testid, -1, 0, msgcnt, &mv); /* Now remove T2 */ - rd_kafka_topic_partition_list_del(tlist, topic[1], RD_KAFKA_PARTITION_UA); + rd_kafka_topic_partition_list_del(tlist, topic[1], + RD_KAFKA_PARTITION_UA); err = rd_kafka_subscribe(rk, tlist); - TEST_ASSERT(!err, "subscribe() failed: %s", - rd_kafka_err2str(err)); + TEST_ASSERT(!err, "subscribe() failed: %s", rd_kafka_err2str(err)); - test_consumer_poll_no_msgs("consume", rk, testid, (int)(6000*1.5)); + test_consumer_poll_no_msgs("consume", rk, testid, (int)(6000 * 1.5)); - test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER|TEST_MSGVER_DUP, + test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP, 0, msgcnt); test_msgver_clear(&mv); @@ -117,7 +117,7 @@ int main_0050_subscribe_adds (int argc, char **argv) { test_consumer_close(rk); rd_kafka_destroy(rk); - for (i = 0 ; i < TOPIC_CNT ; i++) + for (i = 0; i < TOPIC_CNT; i++) rd_free(topic[i]); return 0; diff --git a/tests/0051-assign_adds.c b/tests/0051-assign_adds.c index ee7e8e99ee..6f97b2ee49 100644 --- a/tests/0051-assign_adds.c +++ b/tests/0051-assign_adds.c @@ -43,13 +43,13 @@ * * Verify that there were no duplicate messages. */ -int main_0051_assign_adds (int argc, char **argv) { +int main_0051_assign_adds(int argc, char **argv) { rd_kafka_t *rk; - #define TOPIC_CNT 3 +#define TOPIC_CNT 3 char *topic[TOPIC_CNT] = { - rd_strdup(test_mk_topic_name("0051_assign_adds_1", 1)), - rd_strdup(test_mk_topic_name("0051_assign_adds_2", 1)), - rd_strdup(test_mk_topic_name("0051_assign_adds_3", 1)), + rd_strdup(test_mk_topic_name("0051_assign_adds_1", 1)), + rd_strdup(test_mk_topic_name("0051_assign_adds_2", 1)), + rd_strdup(test_mk_topic_name("0051_assign_adds_3", 1)), }; uint64_t testid; int msgcnt = test_quick ? 100 : 1000; @@ -64,13 +64,12 @@ int main_0051_assign_adds (int argc, char **argv) { testid = test_id_generate(); rk = test_create_producer(); - for (i = 0 ; i < TOPIC_CNT ; i++) { + for (i = 0; i < TOPIC_CNT; i++) { rd_kafka_topic_t *rkt; rkt = test_create_producer_topic(rk, topic[i], NULL); - test_produce_msgs(rk, rkt, testid, 0, - (msgcnt / TOPIC_CNT) * i, + test_produce_msgs(rk, rkt, testid, 0, (msgcnt / TOPIC_CNT) * i, (msgcnt / TOPIC_CNT), NULL, 100); rd_kafka_topic_destroy(rkt); @@ -84,14 +83,13 @@ int main_0051_assign_adds (int argc, char **argv) { rk = test_create_consumer(topic[0], NULL, conf, tconf); tlist = rd_kafka_topic_partition_list_new(TOPIC_CNT); - for (i = 0 ; i < TOPIC_CNT ; i++) { + for (i = 0; i < TOPIC_CNT; i++) { rd_kafka_topic_partition_list_add(tlist, topic[i], 0); TEST_SAY("Assign %d topic(s):\n", tlist->cnt); test_print_partition_list(tlist); err = rd_kafka_assign(rk, tlist); - TEST_ASSERT(!err, "assign() failed: %s", - rd_kafka_err2str(err)); + TEST_ASSERT(!err, "assign() failed: %s", rd_kafka_err2str(err)); } test_msgver_init(&mv, testid); @@ -104,13 +102,13 @@ int main_0051_assign_adds (int argc, char **argv) { /* Now remove T2 */ rd_kafka_topic_partition_list_del(tlist, topic[1], 0); err = rd_kafka_assign(rk, tlist); - TEST_ASSERT(!err, "assign() failed: %s", - rd_kafka_err2str(err)); + TEST_ASSERT(!err, "assign() failed: %s", rd_kafka_err2str(err)); - TEST_SAY("Should not see any messages for session.timeout.ms+some more\n"); - test_consumer_poll_no_msgs("consume", rk, testid, (int)(6000*1.5)); + TEST_SAY( + "Should not see any messages for session.timeout.ms+some more\n"); + test_consumer_poll_no_msgs("consume", rk, testid, (int)(6000 * 1.5)); - test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER|TEST_MSGVER_DUP, + test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP, 0, msgcnt); test_msgver_clear(&mv); @@ -120,7 +118,7 @@ int main_0051_assign_adds (int argc, char **argv) { test_consumer_close(rk); rd_kafka_destroy(rk); - for (i = 0 ; i < TOPIC_CNT ; i++) + for (i = 0; i < TOPIC_CNT; i++) rd_free(topic[i]); return 0; diff --git a/tests/0052-msg_timestamps.c b/tests/0052-msg_timestamps.c index 02f5a1eb33..b18d14aa6d 100644 --- a/tests/0052-msg_timestamps.c +++ b/tests/0052-msg_timestamps.c @@ -38,11 +38,11 @@ struct timestamp_range { int64_t max; }; -static const struct timestamp_range invalid_timestamp = { -1, -1 }; +static const struct timestamp_range invalid_timestamp = {-1, -1}; static struct timestamp_range broker_timestamp; static struct timestamp_range my_timestamp; -static void prepare_timestamps (void) { +static void prepare_timestamps(void) { struct timeval ts; rd_gettimeofday(&ts, NULL); @@ -53,15 +53,18 @@ static void prepare_timestamps (void) { /* client timestamps: set in the future (24 hours) * to be outside of broker timestamps */ my_timestamp.min = my_timestamp.max = - (int64_t)ts.tv_sec + (24 * 3600 * 1000LLU); + (int64_t)ts.tv_sec + (24 * 3600 * 1000LLU); } /** * @brief Produce messages according to compress \p codec */ -static void produce_msgs (const char *topic, int partition, uint64_t testid, - int msgcnt, const char *broker_version, - const char *codec) { +static void produce_msgs(const char *topic, + int partition, + uint64_t testid, + int msgcnt, + const char *broker_version, + const char *codec) { rd_kafka_conf_t *conf; rd_kafka_t *rk; int i; @@ -79,26 +82,25 @@ static void produce_msgs (const char *topic, int partition, uint64_t testid, } /* Make sure to trigger a bunch of MessageSets */ - test_conf_set(conf, "batch.num.messages", tsprintf("%d", msgcnt/5)); + test_conf_set(conf, "batch.num.messages", tsprintf("%d", msgcnt / 5)); rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { rd_kafka_resp_err_t err; - test_prepare_msg(testid, partition, i, - buf, sizeof(buf), key, sizeof(key)); - - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_VALUE(buf, sizeof(buf)), - RD_KAFKA_V_KEY(key, sizeof(key)), - RD_KAFKA_V_TIMESTAMP(my_timestamp.min), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_OPAQUE(&msgcounter), - RD_KAFKA_V_END); + test_prepare_msg(testid, partition, i, buf, sizeof(buf), key, + sizeof(key)); + + err = rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_VALUE(buf, sizeof(buf)), + RD_KAFKA_V_KEY(key, sizeof(key)), + RD_KAFKA_V_TIMESTAMP(my_timestamp.min), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END); if (err) - TEST_FAIL("producev() failed at msg #%d/%d: %s", - i, msgcnt, rd_kafka_err2str(err)); + TEST_FAIL("producev() failed at msg #%d/%d: %s", i, + msgcnt, rd_kafka_err2str(err)); } TEST_SAY("Waiting for %d messages to be produced\n", msgcounter); @@ -109,45 +111,48 @@ static void produce_msgs (const char *topic, int partition, uint64_t testid, } static void -consume_msgs_verify_timestamps (const char *topic, int partition, - uint64_t testid, int msgcnt, - const struct timestamp_range *exp_timestamp) { +consume_msgs_verify_timestamps(const char *topic, + int partition, + uint64_t testid, + int msgcnt, + const struct timestamp_range *exp_timestamp) { test_msgver_t mv; test_msgver_init(&mv, testid); - test_consume_msgs_easy_mv(topic, topic, -1, - testid, -1, msgcnt, NULL, &mv); - - test_msgver_verify0(__FUNCTION__, __LINE__, - topic, &mv, - TEST_MSGVER_RANGE| - TEST_MSGVER_BY_MSGID|TEST_MSGVER_BY_TIMESTAMP, - (struct test_mv_vs){ .msg_base = 0, - .exp_cnt = msgcnt, - .timestamp_min = exp_timestamp->min, - .timestamp_max = exp_timestamp->max - }); + test_consume_msgs_easy_mv(topic, topic, -1, testid, -1, msgcnt, NULL, + &mv); + + test_msgver_verify0( + __FUNCTION__, __LINE__, topic, &mv, + TEST_MSGVER_RANGE | TEST_MSGVER_BY_MSGID | TEST_MSGVER_BY_TIMESTAMP, + (struct test_mv_vs) {.msg_base = 0, + .exp_cnt = msgcnt, + .timestamp_min = exp_timestamp->min, + .timestamp_max = exp_timestamp->max}); test_msgver_clear(&mv); } -static void test_timestamps (const char *broker_tstype, - const char *broker_version, - const char *codec, - const struct timestamp_range *exp_timestamps) { - const char *topic = test_mk_topic_name( - tsprintf("0052_msg_timestamps_%s_%s_%s", - broker_tstype, broker_version, codec), 1); +static void test_timestamps(const char *broker_tstype, + const char *broker_version, + const char *codec, + const struct timestamp_range *exp_timestamps) { + const char *topic = + test_mk_topic_name(tsprintf("0052_msg_timestamps_%s_%s_%s", + broker_tstype, broker_version, codec), + 1); const int msgcnt = 20; - uint64_t testid = test_id_generate(); + uint64_t testid = test_id_generate(); if ((!strncmp(broker_version, "0.9", 3) || !strncmp(broker_version, "0.8", 3)) && !test_conf_match(NULL, "sasl.mechanisms", "GSSAPI")) { - TEST_SAY(_C_YEL "Skipping %s, %s test: " - "SaslHandshake not supported by broker v%s" _C_CLR "\n", + TEST_SAY(_C_YEL + "Skipping %s, %s test: " + "SaslHandshake not supported by broker v%s" _C_CLR + "\n", broker_tstype, codec, broker_version); return; } @@ -155,25 +160,26 @@ static void test_timestamps (const char *broker_tstype, TEST_SAY(_C_MAG "Timestamp test using %s\n", topic); test_timeout_set(30); - test_kafka_topics("--create --topic \"%s\" " - "--replication-factor 1 --partitions 1 " - "--config message.timestamp.type=%s", - topic, broker_tstype); + test_kafka_topics( + "--create --topic \"%s\" " + "--replication-factor 1 --partitions 1 " + "--config message.timestamp.type=%s", + topic, broker_tstype); TEST_SAY(_C_MAG "Producing %d messages to %s\n", msgcnt, topic); produce_msgs(topic, 0, testid, msgcnt, broker_version, codec); - TEST_SAY(_C_MAG "Consuming and verifying %d messages from %s " - "with expected timestamps %"PRId64"..%"PRId64"\n", - msgcnt, topic, - exp_timestamps->min, exp_timestamps->max); + TEST_SAY(_C_MAG + "Consuming and verifying %d messages from %s " + "with expected timestamps %" PRId64 "..%" PRId64 "\n", + msgcnt, topic, exp_timestamps->min, exp_timestamps->max); consume_msgs_verify_timestamps(topic, 0, testid, msgcnt, exp_timestamps); } -int main_0052_msg_timestamps (int argc, char **argv) { +int main_0052_msg_timestamps(int argc, char **argv) { if (!test_can_create_topics(1)) return 0; @@ -194,15 +200,15 @@ int main_0052_msg_timestamps (int argc, char **argv) { */ prepare_timestamps(); - test_timestamps("CreateTime", "0.10.1.0", "none", &my_timestamp); + test_timestamps("CreateTime", "0.10.1.0", "none", &my_timestamp); test_timestamps("LogAppendTime", "0.10.1.0", "none", &broker_timestamp); - test_timestamps("CreateTime", "0.9.0.0", "none", &invalid_timestamp); - test_timestamps("LogAppendTime", "0.9.0.0", "none", &broker_timestamp); + test_timestamps("CreateTime", "0.9.0.0", "none", &invalid_timestamp); + test_timestamps("LogAppendTime", "0.9.0.0", "none", &broker_timestamp); #if WITH_ZLIB - test_timestamps("CreateTime", "0.10.1.0", "gzip", &my_timestamp); + test_timestamps("CreateTime", "0.10.1.0", "gzip", &my_timestamp); test_timestamps("LogAppendTime", "0.10.1.0", "gzip", &broker_timestamp); - test_timestamps("CreateTime", "0.9.0.0", "gzip", &invalid_timestamp); - test_timestamps("LogAppendTime", "0.9.0.0", "gzip", &broker_timestamp); + test_timestamps("CreateTime", "0.9.0.0", "gzip", &invalid_timestamp); + test_timestamps("LogAppendTime", "0.9.0.0", "gzip", &broker_timestamp); #endif return 0; diff --git a/tests/0053-stats_cb.cpp b/tests/0053-stats_cb.cpp index f07d6b68ee..a61755c30b 100644 --- a/tests/0053-stats_cb.cpp +++ b/tests/0053-stats_cb.cpp @@ -49,32 +49,31 @@ static const char *stats_schema_path = "../src/statistics_schema.json"; */ class TestSchemaValidator { public: - TestSchemaValidator () { - + TestSchemaValidator() { } - TestSchemaValidator (const std::string schema_path) { + TestSchemaValidator(const std::string schema_path) { /* Read schema from file */ schema_path_ = schema_path; std::ifstream f(schema_path.c_str()); if (!f.is_open()) - Test::Fail(tostr() << "Failed to open schema " << schema_path << - ": " << strerror(errno)); + Test::Fail(tostr() << "Failed to open schema " << schema_path << ": " + << strerror(errno)); std::string schema_str((std::istreambuf_iterator(f)), (std::istreambuf_iterator())); /* Parse schema */ sd_ = new rapidjson::Document(); if (sd_->Parse(schema_str.c_str()).HasParseError()) - Test::Fail(tostr() << "Failed to parse statistics schema: " << - rapidjson::GetParseError_En(sd_->GetParseError()) << - " at " << sd_->GetErrorOffset()); + Test::Fail(tostr() << "Failed to parse statistics schema: " + << rapidjson::GetParseError_En(sd_->GetParseError()) + << " at " << sd_->GetErrorOffset()); - schema_ = new rapidjson::SchemaDocument(*sd_); + schema_ = new rapidjson::SchemaDocument(*sd_); validator_ = new rapidjson::SchemaValidator(*schema_); } - ~TestSchemaValidator () { + ~TestSchemaValidator() { if (sd_) delete sd_; if (schema_) @@ -83,29 +82,30 @@ class TestSchemaValidator { delete validator_; } - void validate (const std::string &json_doc) { + void validate(const std::string &json_doc) { /* Parse JSON to validate */ rapidjson::Document d; if (d.Parse(json_doc.c_str()).HasParseError()) - Test::Fail(tostr() << "Failed to parse stats JSON: " << - rapidjson::GetParseError_En(d.GetParseError()) << - " at " << d.GetErrorOffset()); + Test::Fail(tostr() << "Failed to parse stats JSON: " + << rapidjson::GetParseError_En(d.GetParseError()) + << " at " << d.GetErrorOffset()); /* Validate using schema */ if (!d.Accept(*validator_)) { - rapidjson::StringBuffer sb; validator_->GetInvalidSchemaPointer().StringifyUriFragment(sb); Test::Say(tostr() << "Schema: " << sb.GetString() << "\n"); - Test::Say(tostr() << "Invalid keyword: " << validator_->GetInvalidSchemaKeyword() << "\n"); + Test::Say(tostr() << "Invalid keyword: " + << validator_->GetInvalidSchemaKeyword() << "\n"); sb.Clear(); validator_->GetInvalidDocumentPointer().StringifyUriFragment(sb); Test::Say(tostr() << "Invalid document: " << sb.GetString() << "\n"); sb.Clear(); - Test::Fail(tostr() << "JSON validation using schema " << schema_path_ << " failed"); + Test::Fail(tostr() << "JSON validation using schema " << schema_path_ + << " failed"); } Test::Say(3, "JSON document validated using schema " + schema_path_ + "\n"); @@ -124,16 +124,15 @@ class TestSchemaValidator { /* Dummy validator doing nothing when RapidJSON is unavailable */ class TestSchemaValidator { public: - TestSchemaValidator () { - + TestSchemaValidator() { } - TestSchemaValidator (const std::string schema_path) { + TestSchemaValidator(const std::string schema_path) { } - ~TestSchemaValidator () { + ~TestSchemaValidator() { } - void validate (const std::string &json_doc) { + void validate(const std::string &json_doc) { } }; @@ -141,28 +140,27 @@ class TestSchemaValidator { class myEventCb : public RdKafka::EventCb { public: - myEventCb(const std::string schema_path): + myEventCb(const std::string schema_path) : validator_(TestSchemaValidator(schema_path)) { stats_cnt = 0; } int stats_cnt; - std::string last; /**< Last stats document */ - - void event_cb (RdKafka::Event &event) { - switch (event.type()) - { - case RdKafka::Event::EVENT_STATS: - if (!(stats_cnt % 10)) - Test::Say(tostr() << "Stats (#" << stats_cnt << "): " << - event.str() << "\n"); - if (event.str().length() > 20) - stats_cnt += 1; - validator_.validate(event.str()); - last = event.str(); - break; - default: - break; + std::string last; /**< Last stats document */ + + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_STATS: + if (!(stats_cnt % 10)) + Test::Say(tostr() << "Stats (#" << stats_cnt << "): " << event.str() + << "\n"); + if (event.str().length() > 20) + stats_cnt += 1; + validator_.validate(event.str()); + last = event.str(); + break; + default: + break; } } @@ -174,20 +172,21 @@ class myEventCb : public RdKafka::EventCb { /** * @brief Verify that stats are emitted according to statistics.interval.ms */ -void test_stats_timing () { +void test_stats_timing() { RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - myEventCb my_event = myEventCb(stats_schema_path); + myEventCb my_event = myEventCb(stats_schema_path); std::string errstr; - if (conf->set("statistics.interval.ms", "100", errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + if (conf->set("statistics.interval.ms", "100", errstr) != + RdKafka::Conf::CONF_OK) + Test::Fail(errstr); if (conf->set("event_cb", &my_event, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + Test::Fail(errstr); RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); if (!p) - Test::Fail("Failed to create Producer: " + errstr); + Test::Fail("Failed to create Producer: " + errstr); delete conf; int64_t t_start = test_clock(); @@ -195,22 +194,24 @@ void test_stats_timing () { while (my_event.stats_cnt < 12) p->poll(1000); - int elapsed = (int)((test_clock() - t_start) / 1000); + int elapsed = (int)((test_clock() - t_start) / 1000); const int expected_time = 1200; - Test::Say(tostr() << my_event.stats_cnt << " (expected 12) stats callbacks received in " << - elapsed << "ms (expected " << expected_time << "ms +-25%)\n"); + Test::Say(tostr() << my_event.stats_cnt + << " (expected 12) stats callbacks received in " << elapsed + << "ms (expected " << expected_time << "ms +-25%)\n"); - if (elapsed < expected_time * 0.75 || - elapsed > expected_time * 1.25) { + if (elapsed < expected_time * 0.75 || elapsed > expected_time * 1.25) { /* We can't rely on CIs giving our test job enough CPU to finish * in time, so don't error out even if the time is outside the window */ if (test_on_ci) - Test::Say(tostr() << "WARNING: Elapsed time " << elapsed << "ms outside +-25% window (" << - expected_time << "ms), cnt " << my_event.stats_cnt); + Test::Say(tostr() << "WARNING: Elapsed time " << elapsed + << "ms outside +-25% window (" << expected_time + << "ms), cnt " << my_event.stats_cnt); else - Test::Fail(tostr() << "Elapsed time " << elapsed << "ms outside +-25% window (" << - expected_time << "ms), cnt " << my_event.stats_cnt); + Test::Fail(tostr() << "Elapsed time " << elapsed + << "ms outside +-25% window (" << expected_time + << "ms), cnt " << my_event.stats_cnt); } delete p; } @@ -223,67 +224,68 @@ void test_stats_timing () { * @brief Expected partition stats */ struct exp_part_stats { - std::string topic; /**< Topic */ - int32_t part; /**< Partition id */ - int msgcnt; /**< Expected message count */ - int msgsize; /**< Expected per message size. - * This includes both key and value lengths */ + std::string topic; /**< Topic */ + int32_t part; /**< Partition id */ + int msgcnt; /**< Expected message count */ + int msgsize; /**< Expected per message size. + * This includes both key and value lengths */ /* Calculated */ - int64_t totsize; /**< Message size sum */ + int64_t totsize; /**< Message size sum */ }; /** * @brief Verify end-to-end producer and consumer stats. */ -static void verify_e2e_stats (const std::string &prod_stats, - const std::string &cons_stats, - struct exp_part_stats *exp_parts, int partcnt) { +static void verify_e2e_stats(const std::string &prod_stats, + const std::string &cons_stats, + struct exp_part_stats *exp_parts, + int partcnt) { /** * Parse JSON stats * These documents are already validated in the Event callback. */ rapidjson::Document p; - if (p.Parse(prod_stats.c_str()).HasParseError()) - Test::Fail(tostr() << "Failed to parse producer stats JSON: " << - rapidjson::GetParseError_En(p.GetParseError()) << - " at " << p.GetErrorOffset()); + if (p.Parse(prod_stats.c_str()) + .HasParseError()) + Test::Fail(tostr() << "Failed to parse producer stats JSON: " + << rapidjson::GetParseError_En(p.GetParseError()) + << " at " << p.GetErrorOffset()); rapidjson::Document c; - if (c.Parse(cons_stats.c_str()).HasParseError()) - Test::Fail(tostr() << "Failed to parse consumer stats JSON: " << - rapidjson::GetParseError_En(c.GetParseError()) << - " at " << c.GetErrorOffset()); + if (c.Parse(cons_stats.c_str()) + .HasParseError()) + Test::Fail(tostr() << "Failed to parse consumer stats JSON: " + << rapidjson::GetParseError_En(c.GetParseError()) + << " at " << c.GetErrorOffset()); assert(p.HasMember("name")); assert(c.HasMember("name")); assert(p.HasMember("type")); assert(c.HasMember("type")); - Test::Say(tostr() << "Verifying stats from Producer " << p["name"].GetString() << - " and Consumer " << c["name"].GetString() << "\n"); + Test::Say(tostr() << "Verifying stats from Producer " << p["name"].GetString() + << " and Consumer " << c["name"].GetString() << "\n"); assert(!strcmp(p["type"].GetString(), "producer")); assert(!strcmp(c["type"].GetString(), "consumer")); - int64_t exp_tot_txmsgs = 0; + int64_t exp_tot_txmsgs = 0; int64_t exp_tot_txmsg_bytes = 0; - int64_t exp_tot_rxmsgs = 0; + int64_t exp_tot_rxmsgs = 0; int64_t exp_tot_rxmsg_bytes = 0; - for (int part = 0 ; part < partcnt ; part++) { - + for (int part = 0; part < partcnt; part++) { /* * Find partition stats. */ /* Construct the partition path. */ char path[256]; - rd_snprintf(path, sizeof(path), - "/topics/%s/partitions/%d", + rd_snprintf(path, sizeof(path), "/topics/%s/partitions/%d", exp_parts[part].topic.c_str(), exp_parts[part].part); - Test::Say(tostr() << "Looking up partition " << exp_parts[part].part << - " with path " << path << "\n"); + Test::Say(tostr() << "Looking up partition " << exp_parts[part].part + << " with path " << path << "\n"); /* Even though GetValueByPointer() takes a "char[]" it can only be used * with perfectly sized char buffers or string literals since it @@ -293,13 +295,13 @@ static void verify_e2e_stats (const std::string &prod_stats, rapidjson::Value *pp = rapidjson::GetValueByPointer(p, jpath); if (!pp) - Test::Fail(tostr() << "Producer: could not find " << path << - " in " << prod_stats << "\n"); + Test::Fail(tostr() << "Producer: could not find " << path << " in " + << prod_stats << "\n"); rapidjson::Value *cp = rapidjson::GetValueByPointer(c, jpath); if (!pp) - Test::Fail(tostr() << "Consumer: could not find " << path << - " in " << cons_stats << "\n"); + Test::Fail(tostr() << "Consumer: could not find " << path << " in " + << cons_stats << "\n"); assert(pp->HasMember("partition")); assert(pp->HasMember("txmsgs")); @@ -311,9 +313,9 @@ static void verify_e2e_stats (const std::string &prod_stats, Test::Say(tostr() << "partition: " << (*pp)["partition"].GetInt() << "\n"); - int64_t txmsgs = (*pp)["txmsgs"].GetInt(); + int64_t txmsgs = (*pp)["txmsgs"].GetInt(); int64_t txbytes = (*pp)["txbytes"].GetInt(); - int64_t rxmsgs = (*cp)["rxmsgs"].GetInt(); + int64_t rxmsgs = (*cp)["rxmsgs"].GetInt(); int64_t rxbytes = (*cp)["rxbytes"].GetInt(); exp_tot_txmsgs += txmsgs; @@ -321,12 +323,18 @@ static void verify_e2e_stats (const std::string &prod_stats, exp_tot_rxmsgs += rxmsgs; exp_tot_rxmsg_bytes += rxbytes; - Test::Say(tostr() << "Producer partition: " << (*pp)["partition"].GetInt() << ": " << - "txmsgs: " << txmsgs << " vs " << exp_parts[part].msgcnt << ", " << - "txbytes: " << txbytes << " vs " << exp_parts[part].totsize << "\n"); - Test::Say(tostr() << "Consumer partition: " << (*cp)["partition"].GetInt() << ": " << - "rxmsgs: " << rxmsgs << " vs " << exp_parts[part].msgcnt << ", " << - "rxbytes: " << rxbytes << " vs " << exp_parts[part].totsize << "\n"); + Test::Say(tostr() << "Producer partition: " << (*pp)["partition"].GetInt() + << ": " + << "txmsgs: " << txmsgs << " vs " + << exp_parts[part].msgcnt << ", " + << "txbytes: " << txbytes << " vs " + << exp_parts[part].totsize << "\n"); + Test::Say(tostr() << "Consumer partition: " << (*cp)["partition"].GetInt() + << ": " + << "rxmsgs: " << rxmsgs << " vs " + << exp_parts[part].msgcnt << ", " + << "rxbytes: " << rxbytes << " vs " + << exp_parts[part].totsize << "\n"); } /* Check top-level total stats */ @@ -336,18 +344,21 @@ static void verify_e2e_stats (const std::string &prod_stats, assert(p.HasMember("rxmsgs")); assert(p.HasMember("rxmsg_bytes")); - int64_t tot_txmsgs = p["txmsgs"].GetInt(); + int64_t tot_txmsgs = p["txmsgs"].GetInt(); int64_t tot_txmsg_bytes = p["txmsg_bytes"].GetInt(); - int64_t tot_rxmsgs = c["rxmsgs"].GetInt(); + int64_t tot_rxmsgs = c["rxmsgs"].GetInt(); int64_t tot_rxmsg_bytes = c["rxmsg_bytes"].GetInt(); - Test::Say(tostr() << "Producer total: " << - "txmsgs: " << tot_txmsgs << " vs " << exp_tot_txmsgs << ", " << - "txbytes: " << tot_txmsg_bytes << " vs " << exp_tot_txmsg_bytes << "\n"); - Test::Say(tostr() << "Consumer total: " << - "rxmsgs: " << tot_rxmsgs << " vs " << exp_tot_rxmsgs << ", " << - "rxbytes: " << tot_rxmsg_bytes << " vs " << exp_tot_rxmsg_bytes << "\n"); - + Test::Say(tostr() << "Producer total: " + << "txmsgs: " << tot_txmsgs << " vs " << exp_tot_txmsgs + << ", " + << "txbytes: " << tot_txmsg_bytes << " vs " + << exp_tot_txmsg_bytes << "\n"); + Test::Say(tostr() << "Consumer total: " + << "rxmsgs: " << tot_rxmsgs << " vs " << exp_tot_rxmsgs + << ", " + << "rxbytes: " << tot_rxmsg_bytes << " vs " + << exp_tot_rxmsg_bytes << "\n"); } /** @@ -359,7 +370,7 @@ static void verify_e2e_stats (const std::string &prod_stats, * * Requires RapidJSON (for parsing the stats). */ -static void test_stats () { +static void test_stats() { std::string errstr; RdKafka::Conf *conf; myEventCb producer_event(stats_schema_path); @@ -368,26 +379,27 @@ static void test_stats () { std::string topic = Test::mk_topic_name("0053_stats", 1); const int partcnt = 2; - int msgcnt = (test_quick ? 10 : 100) * partcnt; - const int msgsize = 6*1024; + int msgcnt = (test_quick ? 10 : 100) * partcnt; + const int msgsize = 6 * 1024; /* * Common config for producer and consumer */ Test::conf_init(&conf, NULL, 60); - if (conf->set("statistics.interval.ms", "1000", errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + if (conf->set("statistics.interval.ms", "1000", errstr) != + RdKafka::Conf::CONF_OK) + Test::Fail(errstr); /* * Create Producer */ if (conf->set("event_cb", &producer_event, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + Test::Fail(errstr); RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); if (!p) - Test::Fail("Failed to create Producer: " + errstr); + Test::Fail("Failed to create Producer: " + errstr); /* @@ -397,7 +409,7 @@ static void test_stats () { conf->set("auto.offset.reset", "earliest", errstr); conf->set("enable.partition.eof", "false", errstr); if (conf->set("event_cb", &consumer_event, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + Test::Fail(errstr); RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); if (!c) @@ -409,15 +421,15 @@ static void test_stats () { * since there will be no topics now) and expected partitions * for later verification. */ - std::vector toppars; + std::vector toppars; struct exp_part_stats exp_parts[partcnt] = {}; - for (int32_t part = 0 ; part < (int32_t)partcnt ; part++) { - toppars.push_back(RdKafka::TopicPartition::create(topic, part, - RdKafka::Topic::OFFSET_BEGINNING)); - exp_parts[part].topic = topic; - exp_parts[part].part = part; - exp_parts[part].msgcnt = msgcnt / partcnt; + for (int32_t part = 0; part < (int32_t)partcnt; part++) { + toppars.push_back(RdKafka::TopicPartition::create( + topic, part, RdKafka::Topic::OFFSET_BEGINNING)); + exp_parts[part].topic = topic; + exp_parts[part].part = part; + exp_parts[part].msgcnt = msgcnt / partcnt; exp_parts[part].msgsize = msgsize; exp_parts[part].totsize = 0; } @@ -430,13 +442,12 @@ static void test_stats () { char key[256]; char *buf = (char *)malloc(msgsize); - for (int32_t part = 0 ; part < (int32_t)partcnt ; part++) { - for (int i = 0 ; i < msgcnt / partcnt ; i++) { + for (int32_t part = 0; part < (int32_t)partcnt; part++) { + for (int i = 0; i < msgcnt / partcnt; i++) { test_prepare_msg(testid, part, i, buf, msgsize, key, sizeof(key)); - RdKafka::ErrorCode err = p->produce(topic, part, - RdKafka::Producer::RK_MSG_COPY, - buf, msgsize, key, sizeof(key), - -1, NULL); + RdKafka::ErrorCode err = + p->produce(topic, part, RdKafka::Producer::RK_MSG_COPY, buf, msgsize, + key, sizeof(key), -1, NULL); if (err) Test::Fail("Produce failed: " + RdKafka::err2str(err)); exp_parts[part].totsize += msgsize + sizeof(key); @@ -448,11 +459,11 @@ static void test_stats () { Test::Say("Waiting for final message delivery\n"); /* Wait for delivery */ - p->flush(15*1000); + p->flush(15 * 1000); /* - * Start consuming partitions - */ + * Start consuming partitions + */ c->assign(toppars); RdKafka::TopicPartition::destroy(toppars); @@ -490,14 +501,14 @@ static void test_stats () { */ prev_cnt = consumer_event.stats_cnt; while (prev_cnt + 2 >= consumer_event.stats_cnt) { - Test::Say(tostr() << "Waiting for final consumer stats event: " << - consumer_event.stats_cnt << "\n"); + Test::Say(tostr() << "Waiting for final consumer stats event: " + << consumer_event.stats_cnt << "\n"); c->poll(100); } - verify_e2e_stats(producer_event.last, consumer_event.last, - exp_parts, partcnt); + verify_e2e_stats(producer_event.last, consumer_event.last, exp_parts, + partcnt); c->close(); @@ -508,17 +519,17 @@ static void test_stats () { #endif extern "C" { - int main_0053_stats_timing (int argc, char **argv) { - test_stats_timing(); - return 0; - } +int main_0053_stats_timing(int argc, char **argv) { + test_stats_timing(); + return 0; +} - int main_0053_stats (int argc, char **argv) { +int main_0053_stats(int argc, char **argv) { #if WITH_RAPIDJSON - test_stats(); + test_stats(); #else - Test::Skip("RapidJSON >=1.1.0 not available\n"); + Test::Skip("RapidJSON >=1.1.0 not available\n"); #endif - return 0; - } + return 0; +} } diff --git a/tests/0054-offset_time.cpp b/tests/0054-offset_time.cpp index 55b9dbecde..58c88b4a13 100644 --- a/tests/0054-offset_time.cpp +++ b/tests/0054-offset_time.cpp @@ -34,23 +34,24 @@ */ -static int verify_offset (const RdKafka::TopicPartition *tp, - int64_t timestamp, int64_t exp_offset, - RdKafka::ErrorCode exp_err) { +static int verify_offset(const RdKafka::TopicPartition *tp, + int64_t timestamp, + int64_t exp_offset, + RdKafka::ErrorCode exp_err) { int fails = 0; if (tp->err() != exp_err) { - Test::FailLater(tostr() << " " << tp->topic() << - " [" << tp->partition() << "] " << - "expected error " << RdKafka::err2str(exp_err) << ", got " << - RdKafka::err2str(tp->err()) << "\n"); + Test::FailLater(tostr() + << " " << tp->topic() << " [" << tp->partition() << "] " + << "expected error " << RdKafka::err2str(exp_err) + << ", got " << RdKafka::err2str(tp->err()) << "\n"); fails++; } if (!exp_err && tp->offset() != exp_offset) { - Test::FailLater(tostr() << " " << tp->topic() << - " [" << tp->partition() << "] " << - "expected offset " << exp_offset << " for timestamp " << - timestamp << ", got " << tp->offset() << "\n"); + Test::FailLater(tostr() + << " " << tp->topic() << " [" << tp->partition() << "] " + << "expected offset " << exp_offset << " for timestamp " + << timestamp << ", got " << tp->offset() << "\n"); fails++; } @@ -58,17 +59,19 @@ static int verify_offset (const RdKafka::TopicPartition *tp, } -static void test_offset_time (void) { - std::vector query_parts; +static void test_offset_time(void) { + std::vector query_parts; std::string topic = Test::mk_topic_name("0054-offset_time", 1); RdKafka::Conf *conf, *tconf; int64_t timestamps[] = { - /* timestamp, expected offset */ - 1234, 0, - 999999999999, 1, + /* timestamp, expected offset */ + 1234, + 0, + 999999999999, + 1, }; const int timestamp_cnt = 2; - int fails = 0; + int fails = 0; std::string errstr; Test::conf_init(&conf, &tconf, 0); @@ -84,9 +87,12 @@ static void test_offset_time (void) { if (!p) Test::Fail("Failed to create Producer: " + errstr); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 97, timestamps[0])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 98, timestamps[0])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 99, timestamps[0])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 97, timestamps[0])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 98, timestamps[0])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 99, timestamps[0])); /* First query timestamps before topic exists, should fail. */ Test::Say("Attempting first offsetsForTimes() query (should fail)\n"); @@ -96,12 +102,14 @@ static void test_offset_time (void) { Test::print_TopicPartitions("offsetsForTimes #1", query_parts); if (err != RdKafka::ERR__UNKNOWN_PARTITION) - Test::Fail("offsetsForTimes #1 should have failed with UNKNOWN_PARTITION, " - "not " + RdKafka::err2str(err)); + Test::Fail( + "offsetsForTimes #1 should have failed with UNKNOWN_PARTITION, " + "not " + + RdKafka::err2str(err)); Test::Say("Producing to " + topic + "\n"); - for (int partition = 0 ; partition < 2 ; partition++) { - for (int ti = 0 ; ti < timestamp_cnt*2 ; ti += 2) { + for (int partition = 0; partition < 2; partition++) { + for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) { err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, (void *)topic.c_str(), topic.size(), NULL, 0, timestamps[ti], NULL); @@ -114,74 +122,90 @@ static void test_offset_time (void) { Test::Fail("Not all messages flushed"); - for (int ti = 0 ; ti < timestamp_cnt*2 ; ti += 2) { + for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) { RdKafka::TopicPartition::destroy(query_parts); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); - Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp " << timestamps[ti] << "\n"); + Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp " + << timestamps[ti] << "\n"); err = p->offsetsForTimes(query_parts, tmout_multip(5000)); Test::print_TopicPartitions("offsetsForTimes", query_parts); if (err != RdKafka::ERR_NO_ERROR) Test::Fail("offsetsForTimes failed: " + RdKafka::err2str(err)); - fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti+1], RdKafka::ERR_NO_ERROR); - fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti+1], RdKafka::ERR_NO_ERROR); + fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti + 1], + RdKafka::ERR_NO_ERROR); + fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti + 1], + RdKafka::ERR_NO_ERROR); } - /* repeat test with -1 timeout */ - for (int ti = 0 ; ti < timestamp_cnt*2 ; ti += 2) { + /* repeat test with -1 timeout */ + for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) { RdKafka::TopicPartition::destroy(query_parts); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); - Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp " << timestamps[ti] << " with a timeout of -1\n"); + Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp " + << timestamps[ti] << " with a timeout of -1\n"); err = p->offsetsForTimes(query_parts, -1); Test::print_TopicPartitions("offsetsForTimes", query_parts); if (err != RdKafka::ERR_NO_ERROR) Test::Fail("offsetsForTimes failed: " + RdKafka::err2str(err)); - fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti+1], RdKafka::ERR_NO_ERROR); - fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti+1], RdKafka::ERR_NO_ERROR); + fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti + 1], + RdKafka::ERR_NO_ERROR); + fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti + 1], + RdKafka::ERR_NO_ERROR); } /* And a negative test with a request that should timeout instantly. */ - for (int ti = 0 ; ti < timestamp_cnt*2 ; ti += 2) { + for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) { RdKafka::TopicPartition::destroy(query_parts); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); - - Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp " << timestamps[ti] << " with minimal timeout (should fail)\n"); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); + + Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp " + << timestamps[ti] + << " with minimal timeout (should fail)\n"); err = p->offsetsForTimes(query_parts, 0); Test::print_TopicPartitions("offsetsForTimes", query_parts); if (err != RdKafka::ERR__TIMED_OUT) - Test::Fail("expected offsetsForTimes(timeout=0) to fail with TIMED_OUT, not " + RdKafka::err2str(err)); + Test::Fail( + "expected offsetsForTimes(timeout=0) to fail with TIMED_OUT, not " + + RdKafka::err2str(err)); } /* Include non-existent partitions */ - for (int ti = 0 ; ti < timestamp_cnt*2 ; ti += 2) { + for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) { RdKafka::TopicPartition::destroy(query_parts); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 0, - timestamps[ti])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 1, - timestamps[ti])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 2, - timestamps[ti])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 20, - timestamps[ti])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 3, - timestamps[ti])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 21, - timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 2, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 20, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 3, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 21, timestamps[ti])); Test::Say("Attempting offsetsForTimes() with non-existent partitions\n"); err = p->offsetsForTimes(query_parts, -1); Test::print_TopicPartitions("offsetsForTimes", query_parts); if (err != RdKafka::ERR_NO_ERROR) Test::Fail("expected offsetsForTimes(timeout=0) to succeed, not " + RdKafka::err2str(err)); - fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti+1], + fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti + 1], RdKafka::ERR_NO_ERROR); - fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti+1], + fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti + 1], RdKafka::ERR_NO_ERROR); fails += verify_offset(query_parts[2], timestamps[ti], -1, RdKafka::ERR_NO_ERROR); @@ -205,8 +229,8 @@ static void test_offset_time (void) { } extern "C" { - int main_0054_offset_time (int argc, char **argv) { - test_offset_time(); - return 0; - } +int main_0054_offset_time(int argc, char **argv) { + test_offset_time(); + return 0; +} } diff --git a/tests/0055-producer_latency.c b/tests/0055-producer_latency.c index ec1363d382..2759e098f9 100644 --- a/tests/0055-producer_latency.c +++ b/tests/0055-producer_latency.c @@ -34,30 +34,30 @@ struct latconf { const char *name; const char *conf[16]; - int min; /* Minimum expected latency */ - int max; /* Maximum expected latency */ + int min; /* Minimum expected latency */ + int max; /* Maximum expected latency */ - float rtt; /* Network+broker latency */ + float rtt; /* Network+broker latency */ - char linger_ms_conf[32]; /**< Read back to show actual value */ + char linger_ms_conf[32]; /**< Read back to show actual value */ /* Result vector */ float latency[_MSG_COUNT]; float sum; - int cnt; + int cnt; }; -static void dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { struct latconf *latconf = opaque; - int64_t *ts_send = (int64_t *)rkmessage->_private; + int64_t *ts_send = (int64_t *)rkmessage->_private; float delivery_time; if (rkmessage->err) - TEST_FAIL("%s: delivery failed: %s\n", - latconf->name, rd_kafka_err2str(rkmessage->err)); + TEST_FAIL("%s: delivery failed: %s\n", latconf->name, + rd_kafka_err2str(rkmessage->err)); if (!rkmessage->_private) return; /* Priming message, ignore. */ @@ -68,19 +68,19 @@ static void dr_msg_cb (rd_kafka_t *rk, TEST_ASSERT(latconf->cnt < _MSG_COUNT, ""); - TEST_SAY("%s: Message %d delivered in %.3fms\n", - latconf->name, latconf->cnt, delivery_time); + TEST_SAY("%s: Message %d delivered in %.3fms\n", latconf->name, + latconf->cnt, delivery_time); latconf->latency[latconf->cnt++] = delivery_time; latconf->sum += delivery_time; } -static int verify_latency (struct latconf *latconf) { +static int verify_latency(struct latconf *latconf) { float avg; int fails = 0; - double ext_overhead = latconf->rtt + - 5.0 /* broker ProduceRequest handling time, maybe */; + double ext_overhead = + latconf->rtt + 5.0 /* broker ProduceRequest handling time, maybe */; ext_overhead *= test_timeout_multiplier; @@ -91,17 +91,18 @@ static int verify_latency (struct latconf *latconf) { if (avg < (float)latconf->min || avg > (float)latconf->max + ext_overhead) { - TEST_FAIL_LATER("%s: average latency %.3fms is " - "outside range %d..%d +%.0fms", - latconf->name, avg, latconf->min, latconf->max, - ext_overhead); + TEST_FAIL_LATER( + "%s: average latency %.3fms is " + "outside range %d..%d +%.0fms", + latconf->name, avg, latconf->min, latconf->max, + ext_overhead); fails++; } return fails; } -static void measure_rtt (struct latconf *latconf, rd_kafka_t *rk) { +static void measure_rtt(struct latconf *latconf, rd_kafka_t *rk) { rd_kafka_resp_err_t err; const struct rd_kafka_metadata *md; int64_t ts = test_clock(); @@ -110,13 +111,12 @@ static void measure_rtt (struct latconf *latconf, rd_kafka_t *rk) { TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); latconf->rtt = (float)(test_clock() - ts) / 1000.0f; - TEST_SAY("%s: broker base RTT is %.3fms\n", - latconf->name, latconf->rtt); + TEST_SAY("%s: broker base RTT is %.3fms\n", latconf->name, + latconf->rtt); rd_kafka_metadata_destroy(md); } -static int test_producer_latency (const char *topic, - struct latconf *latconf) { +static int test_producer_latency(const char *topic, struct latconf *latconf) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_resp_err_t err; @@ -129,10 +129,10 @@ static int test_producer_latency (const char *topic, rd_kafka_conf_set_opaque(conf, latconf); TEST_SAY(_C_BLU "[%s: begin]\n" _C_CLR, latconf->name); - for (i = 0 ; latconf->conf[i] ; i += 2) { - TEST_SAY("%s: set conf %s = %s\n", - latconf->name, latconf->conf[i], latconf->conf[i+1]); - test_conf_set(conf, latconf->conf[i], latconf->conf[i+1]); + for (i = 0; latconf->conf[i]; i += 2) { + TEST_SAY("%s: set conf %s = %s\n", latconf->name, + latconf->conf[i], latconf->conf[i + 1]); + test_conf_set(conf, latconf->conf[i], latconf->conf[i + 1]); } sz = sizeof(latconf->linger_ms_conf); @@ -143,15 +143,13 @@ static int test_producer_latency (const char *topic, TEST_SAY("%s: priming producer\n", latconf->name); /* Send a priming message to make sure everything is up * and functional before starting measurements */ - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("priming", 7), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_END); + err = rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("priming", 7), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END); if (err) - TEST_FAIL("%s: priming producev failed: %s", - latconf->name, rd_kafka_err2str(err)); + TEST_FAIL("%s: priming producev failed: %s", latconf->name, + rd_kafka_err2str(err)); /* Await delivery */ rd_kafka_flush(rk, tmout_multip(5000)); @@ -160,22 +158,20 @@ static int test_producer_latency (const char *topic, measure_rtt(latconf, rk); TEST_SAY("%s: producing %d messages\n", latconf->name, _MSG_COUNT); - for (i = 0 ; i < _MSG_COUNT ; i++) { + for (i = 0; i < _MSG_COUNT; i++) { int64_t *ts_send; - ts_send = malloc(sizeof(*ts_send)); + ts_send = malloc(sizeof(*ts_send)); *ts_send = test_clock(); - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_OPAQUE(ts_send), - RD_KAFKA_V_END); + err = rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_OPAQUE(ts_send), RD_KAFKA_V_END); if (err) - TEST_FAIL("%s: producev #%d failed: %s", - latconf->name, i, rd_kafka_err2str(err)); + TEST_FAIL("%s: producev #%d failed: %s", latconf->name, + i, rd_kafka_err2str(err)); /* Await delivery */ rd_kafka_poll(rk, 5000); @@ -187,48 +183,56 @@ static int test_producer_latency (const char *topic, } -static float find_min (const struct latconf *latconf) { +static float find_min(const struct latconf *latconf) { int i; float v = 1000000; - for (i = 0 ; i < latconf->cnt ; i++) + for (i = 0; i < latconf->cnt; i++) if (latconf->latency[i] < v) v = latconf->latency[i]; return v; } -static float find_max (const struct latconf *latconf) { +static float find_max(const struct latconf *latconf) { int i; float v = 0; - for (i = 0 ; i < latconf->cnt ; i++) + for (i = 0; i < latconf->cnt; i++) if (latconf->latency[i] > v) v = latconf->latency[i]; return v; } -int main_0055_producer_latency (int argc, char **argv) { +int main_0055_producer_latency(int argc, char **argv) { struct latconf latconfs[] = { - { "standard settings", {NULL}, 5, 5 }, /* default is now 5ms */ - { "low queue.buffering.max.ms", - {"queue.buffering.max.ms", "0", NULL}, 0, 0 }, - { "microsecond queue.buffering.max.ms", - {"queue.buffering.max.ms", "0.001", NULL}, 0, 1 }, - { "high queue.buffering.max.ms", - {"queue.buffering.max.ms", "3000", NULL}, 3000, 3100}, - { "queue.buffering.max.ms < 1000", /* internal block_max_ms */ - {"queue.buffering.max.ms", "500", NULL}, 500, 600 }, - { "no acks", - {"queue.buffering.max.ms", "0", - "acks", "0", - "enable.idempotence", "false", NULL}, 0, 0 }, - { NULL } - }; + {"standard settings", {NULL}, 5, 5}, /* default is now 5ms */ + {"low queue.buffering.max.ms", + {"queue.buffering.max.ms", "0", NULL}, + 0, + 0}, + {"microsecond queue.buffering.max.ms", + {"queue.buffering.max.ms", "0.001", NULL}, + 0, + 1}, + {"high queue.buffering.max.ms", + {"queue.buffering.max.ms", "3000", NULL}, + 3000, + 3100}, + {"queue.buffering.max.ms < 1000", /* internal block_max_ms */ + {"queue.buffering.max.ms", "500", NULL}, + 500, + 600}, + {"no acks", + {"queue.buffering.max.ms", "0", "acks", "0", "enable.idempotence", + "false", NULL}, + 0, + 0}, + {NULL}}; struct latconf *latconf; const char *topic = test_mk_topic_name("0055_producer_latency", 0); - int fails = 0; + int fails = 0; if (test_on_ci) { TEST_SKIP("Latency measurements not reliable on CI\n"); @@ -238,25 +242,22 @@ int main_0055_producer_latency (int argc, char **argv) { /* Create topic without replicas to keep broker-side latency down */ test_create_topic(NULL, topic, 4, 1); - for (latconf = latconfs ; latconf->name ; latconf++) + for (latconf = latconfs; latconf->name; latconf++) fails += test_producer_latency(topic, latconf); if (fails) TEST_FAIL("See %d previous failure(s)", fails); TEST_SAY(_C_YEL "Latency tests summary:\n" _C_CLR); - TEST_SAY("%-40s %9s %6s..%-6s %7s %9s %9s %9s\n", - "Name", "linger.ms", - "MinExp", "MaxExp", "RTT", "Min", "Average", "Max"); + TEST_SAY("%-40s %9s %6s..%-6s %7s %9s %9s %9s\n", "Name", + "linger.ms", "MinExp", "MaxExp", "RTT", "Min", "Average", + "Max"); - for (latconf = latconfs ; latconf->name ; latconf++) + for (latconf = latconfs; latconf->name; latconf++) TEST_SAY("%-40s %9s %6d..%-6d %7g %9g %9g %9g\n", - latconf->name, latconf->linger_ms_conf, - latconf->min, latconf->max, - latconf->rtt, - find_min(latconf), - latconf->sum / latconf->cnt, - find_max(latconf)); + latconf->name, latconf->linger_ms_conf, latconf->min, + latconf->max, latconf->rtt, find_min(latconf), + latconf->sum / latconf->cnt, find_max(latconf)); return 0; } diff --git a/tests/0056-balanced_group_mt.c b/tests/0056-balanced_group_mt.c index 3ba1eae4e7..e6205ddb63 100644 --- a/tests/0056-balanced_group_mt.c +++ b/tests/0056-balanced_group_mt.c @@ -41,8 +41,8 @@ #define MAX_THRD_CNT 4 -static int assign_cnt = 0; -static int consumed_msg_cnt = 0; +static int assign_cnt = 0; +static int consumed_msg_cnt = 0; static int consumers_running = 0; static int exp_msg_cnt; @@ -50,11 +50,11 @@ static mtx_t lock; static thrd_t tids[MAX_THRD_CNT]; typedef struct part_consume_info_s { - rd_kafka_queue_t * rkqu; + rd_kafka_queue_t *rkqu; int partition; } part_consume_info_t; -static int is_consuming () { +static int is_consuming() { int result; mtx_lock(&lock); result = consumers_running; @@ -62,13 +62,13 @@ static int is_consuming () { return result; } -static int partition_consume (void *args) { +static int partition_consume(void *args) { part_consume_info_t *info = (part_consume_info_t *)args; - rd_kafka_queue_t *rkqu = info->rkqu; - int partition = info->partition; - int64_t ts_start = test_clock(); - int max_time = (test_session_timeout_ms + 3000) * 1000; - int running = 1; + rd_kafka_queue_t *rkqu = info->rkqu; + int partition = info->partition; + int64_t ts_start = test_clock(); + int max_time = (test_session_timeout_ms + 3000) * 1000; + int running = 1; free(args); /* Free the parameter struct dynamically allocated for us */ @@ -84,19 +84,22 @@ static int partition_consume (void *args) { running = 0; else if (rkmsg->err) { mtx_lock(&lock); - TEST_FAIL("Message error " - "(at offset %" PRId64 " after " - "%d/%d messages and %dms): %s", - rkmsg->offset, consumed_msg_cnt, exp_msg_cnt, - (int)(test_clock() - ts_start) / 1000, - rd_kafka_message_errstr(rkmsg)); + TEST_FAIL( + "Message error " + "(at offset %" PRId64 + " after " + "%d/%d messages and %dms): %s", + rkmsg->offset, consumed_msg_cnt, exp_msg_cnt, + (int)(test_clock() - ts_start) / 1000, + rd_kafka_message_errstr(rkmsg)); mtx_unlock(&lock); } else { if (rkmsg->partition != partition) { mtx_lock(&lock); - TEST_FAIL("Message consumed has partition %d " - "but we expected partition %d.", - rkmsg->partition, partition); + TEST_FAIL( + "Message consumed has partition %d " + "but we expected partition %d.", + rkmsg->partition, partition); mtx_unlock(&lock); } } @@ -115,11 +118,11 @@ static int partition_consume (void *args) { return thrd_success; } -static thrd_t spawn_thread (rd_kafka_queue_t *rkqu, int partition) { +static thrd_t spawn_thread(rd_kafka_queue_t *rkqu, int partition) { thrd_t thr; part_consume_info_t *info = malloc(sizeof(part_consume_info_t)); - info->rkqu = rkqu; + info->rkqu = rkqu; info->partition = partition; if (thrd_create(&thr, &partition_consume, info) != thrd_success) { @@ -130,7 +133,8 @@ static thrd_t spawn_thread (rd_kafka_queue_t *rkqu, int partition) { static int rebalanced = 0; -static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque) { int i; @@ -161,8 +165,8 @@ static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, part.partition); rd_kafka_queue_forward(rkqu, NULL); - tids[part.partition] = spawn_thread(rkqu, - part.partition); + tids[part.partition] = + spawn_thread(rkqu, part.partition); } rebalanced = 1; @@ -186,7 +190,7 @@ static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, } } -static void get_assignment (rd_kafka_t *rk_c) { +static void get_assignment(rd_kafka_t *rk_c) { while (!rebalanced) { rd_kafka_message_t *rkmsg; rkmsg = rd_kafka_consumer_poll(rk_c, 500); @@ -195,12 +199,12 @@ static void get_assignment (rd_kafka_t *rk_c) { } } -int main_0056_balanced_group_mt (int argc, char **argv) { +int main_0056_balanced_group_mt(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__, 1); rd_kafka_t *rk_p, *rk_c; rd_kafka_topic_t *rkt_p; - int msg_cnt = test_quick ? 100 : 1000; - int msg_base = 0; + int msg_cnt = test_quick ? 100 : 1000; + int msg_base = 0; int partition_cnt = 2; int partition; uint64_t testid; @@ -216,7 +220,7 @@ int main_0056_balanced_group_mt (int argc, char **argv) { testid = test_id_generate(); /* Produce messages */ - rk_p = test_create_producer(); + rk_p = test_create_producer(); rkt_p = test_create_producer_topic(rk_p, topic, NULL); for (partition = 0; partition < partition_cnt; partition++) { @@ -244,9 +248,8 @@ int main_0056_balanced_group_mt (int argc, char **argv) { rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA); /* Create consumers and start subscription */ - rk_c = test_create_consumer( - topic /*group_id*/, rebalance_cb, - conf, default_topic_conf); + rk_c = test_create_consumer(topic /*group_id*/, rebalance_cb, conf, + default_topic_conf); test_consumer_subscribe(rk_c, topic); @@ -297,9 +300,10 @@ int main_0056_balanced_group_mt (int argc, char **argv) { exp_msg_cnt); if (consumed_msg_cnt > exp_msg_cnt) - TEST_SAY("At least %d/%d messages were consumed " - "multiple times\n", - consumed_msg_cnt - exp_msg_cnt, exp_msg_cnt); + TEST_SAY( + "At least %d/%d messages were consumed " + "multiple times\n", + consumed_msg_cnt - exp_msg_cnt, exp_msg_cnt); mtx_destroy(&lock); diff --git a/tests/0057-invalid_topic.cpp b/tests/0057-invalid_topic.cpp index d95ada65c3..0b50b40ad7 100644 --- a/tests/0057-invalid_topic.cpp +++ b/tests/0057-invalid_topic.cpp @@ -38,26 +38,27 @@ -#define check_err(ERR,EXP) do { \ - if ((ERR) != (EXP)) \ - Test::Fail(tostr() << __FUNCTION__ << ":" << __LINE__ << ": " << \ - "Expected " << RdKafka::err2str(EXP) << ", got " << \ - RdKafka::err2str(ERR)); \ +#define check_err(ERR, EXP) \ + do { \ + if ((ERR) != (EXP)) \ + Test::Fail(tostr() << __FUNCTION__ << ":" << __LINE__ << ": " \ + << "Expected " << RdKafka::err2str(EXP) << ", got " \ + << RdKafka::err2str(ERR)); \ } while (0) class DrCb0057 : public RdKafka::DeliveryReportCb { public: - void dr_cb (RdKafka::Message &msg) { + void dr_cb(RdKafka::Message &msg) { std::string val((const char *)msg.payload()); - Test::Say(tostr() << "DeliveryReport for " << val << " message on " << - msg.topic_name() << " [" << msg.partition() << "]: " << - msg.errstr() << "\n"); + Test::Say(tostr() << "DeliveryReport for " << val << " message on " + << msg.topic_name() << " [" << msg.partition() + << "]: " << msg.errstr() << "\n"); if (val == "good") check_err(msg.err(), RdKafka::ERR_NO_ERROR); else if (val == "bad") { - if (test_broker_version >= TEST_BRKVER(0,8,2,2)) + if (test_broker_version >= TEST_BRKVER(0, 8, 2, 2)) check_err(msg.err(), RdKafka::ERR_TOPIC_EXCEPTION); else check_err(msg.err(), RdKafka::ERR_UNKNOWN); @@ -65,9 +66,9 @@ class DrCb0057 : public RdKafka::DeliveryReportCb { } }; -static void test_invalid_topic (void) { - std::string topic_bad = Test::mk_topic_name("0057-invalid_topic$#!", 1); - std::string topic_good =Test::mk_topic_name("0057-invalid_topic_good", 1); +static void test_invalid_topic(void) { + std::string topic_bad = Test::mk_topic_name("0057-invalid_topic$#!", 1); + std::string topic_good = Test::mk_topic_name("0057-invalid_topic_good", 1); RdKafka::Conf *conf; std::string errstr; @@ -82,15 +83,13 @@ static void test_invalid_topic (void) { RdKafka::ErrorCode err; - for (int i = -1 ; i < 3 ; i++) { - err = p->produce(topic_bad, i, - RdKafka::Producer::RK_MSG_COPY, + for (int i = -1; i < 3; i++) { + err = p->produce(topic_bad, i, RdKafka::Producer::RK_MSG_COPY, (void *)"bad", 4, NULL, 0, 0, NULL); if (err) /* Error is probably delayed until delivery report */ check_err(err, RdKafka::ERR_TOPIC_EXCEPTION); - err = p->produce(topic_good, i, - RdKafka::Producer::RK_MSG_COPY, + err = p->produce(topic_good, i, RdKafka::Producer::RK_MSG_COPY, (void *)"good", 5, NULL, 0, 0, NULL); check_err(err, RdKafka::ERR_NO_ERROR); } @@ -98,17 +97,16 @@ static void test_invalid_topic (void) { p->flush(tmout_multip(10000)); if (p->outq_len() > 0) - Test::Fail(tostr() << "Expected producer to be flushed, " << - p->outq_len() << " messages remain"); + Test::Fail(tostr() << "Expected producer to be flushed, " << p->outq_len() + << " messages remain"); delete p; delete conf; - } extern "C" { - int main_0057_invalid_topic (int argc, char **argv) { - test_invalid_topic(); - return 0; - } +int main_0057_invalid_topic(int argc, char **argv) { + test_invalid_topic(); + return 0; +} } diff --git a/tests/0058-log.cpp b/tests/0058-log.cpp index 803a907175..4da46e7f76 100644 --- a/tests/0058-log.cpp +++ b/tests/0058-log.cpp @@ -30,95 +30,94 @@ #include "testcpp.h" - /** - * @brief Test log callbacks and log queues - */ +/** + * @brief Test log callbacks and log queues + */ class myLogCb : public RdKafka::EventCb { -private: - enum { - _EXP_NONE, - _EXP_LOG - } state_; - int cnt_; -public: - myLogCb (): state_(_EXP_NONE), cnt_(0) {} - void expecting (bool b) { - state_ = b ? _EXP_LOG : _EXP_NONE; - } - int count () { - return cnt_; - } - void event_cb (RdKafka::Event &event) { - switch (event.type()) - { - case RdKafka::Event::EVENT_LOG: - cnt_++; - Test::Say(tostr() << "Log: " << - "level " << event.severity() << - ", facility " << event.fac() << - ", str " << event.str() << "\n"); - if (state_ != _EXP_LOG) - Test::Fail("Received unexpected " - "log message"); - break; - default: - break; - } - } + private: + enum { _EXP_NONE, _EXP_LOG } state_; + int cnt_; + + public: + myLogCb() : state_(_EXP_NONE), cnt_(0) { + } + void expecting(bool b) { + state_ = b ? _EXP_LOG : _EXP_NONE; + } + int count() { + return cnt_; + } + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_LOG: + cnt_++; + Test::Say(tostr() << "Log: " + << "level " << event.severity() << ", facility " + << event.fac() << ", str " << event.str() << "\n"); + if (state_ != _EXP_LOG) + Test::Fail( + "Received unexpected " + "log message"); + break; + default: + break; + } + } }; -static void test_log (std::string what, bool main_queue) { - RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - myLogCb my_log; - std::string errstr; +static void test_log(std::string what, bool main_queue) { + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + myLogCb my_log; + std::string errstr; - Test::conf_set(conf, "client.id", test_curr_name()); - Test::conf_set(conf, "debug", "generic"); // generate some logs - Test::conf_set(conf, "log.queue", "true"); + Test::conf_set(conf, "client.id", test_curr_name()); + Test::conf_set(conf, "debug", "generic"); // generate some logs + Test::conf_set(conf, "log.queue", "true"); - if (conf->set("event_cb", &my_log, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + if (conf->set("event_cb", &my_log, errstr) != RdKafka::Conf::CONF_OK) + Test::Fail(errstr); - Test::Say(what + "Creating producer, not expecting any log messages\n"); - my_log.expecting(false); - RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); - if (!p) - Test::Fail(what + "Failed to create Producer: " + errstr); - delete conf; + Test::Say(what + "Creating producer, not expecting any log messages\n"); + my_log.expecting(false); + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail(what + "Failed to create Producer: " + errstr); + delete conf; - RdKafka::Queue *queue = NULL; - if (!main_queue) { - queue = RdKafka::Queue::create(p); - queue->poll(1000); - } else { - p->poll(1000); - } + RdKafka::Queue *queue = NULL; + if (!main_queue) { + queue = RdKafka::Queue::create(p); + queue->poll(1000); + } else { + p->poll(1000); + } - Test::Say(what + "Setting log queue\n"); - p->set_log_queue(queue); /* Redirect logs to main queue */ + Test::Say(what + "Setting log queue\n"); + p->set_log_queue(queue); /* Redirect logs to main queue */ - Test::Say(what + "Expecting at least one log message\n"); - my_log.expecting(true); - if (queue) - queue->poll(1000); - else - p->poll(1000); /* Should not spontaneously call logs */ + Test::Say(what + "Expecting at least one log message\n"); + my_log.expecting(true); + if (queue) + queue->poll(1000); + else + p->poll(1000); /* Should not spontaneously call logs */ - Test::Say(tostr() << what << "Saw " << my_log.count() << " logs\n"); - if (my_log.count() < 1) - Test::Fail(what + "No logs seen: expected at least one broker " - "failure"); + Test::Say(tostr() << what << "Saw " << my_log.count() << " logs\n"); + if (my_log.count() < 1) + Test::Fail(what + + "No logs seen: expected at least one broker " + "failure"); - if (queue) - delete queue; - delete(p); + if (queue) + delete queue; + delete (p); } extern "C" { - int main_0058_log (int argc, char **argv) { - test_log("main.queue: ", true); - test_log("local.queue: ", false); - return 0; - } +int main_0058_log(int argc, char **argv) { + test_log("main.queue: ", true); + test_log("local.queue: ", false); + return 0; +} } diff --git a/tests/0059-bsearch.cpp b/tests/0059-bsearch.cpp index 20f598efef..67508ff824 100644 --- a/tests/0059-bsearch.cpp +++ b/tests/0059-bsearch.cpp @@ -35,19 +35,20 @@ static std::string topic; -static const int partition = 0; +static const int partition = 0; static int64_t golden_timestamp = -1; -static int64_t golden_offset = -1; +static int64_t golden_offset = -1; /** * @brief Seek to offset and consume that message. * * Asserts on failure. */ -static RdKafka::Message *get_msg (RdKafka::KafkaConsumer *c, int64_t offset, - bool use_seek) { +static RdKafka::Message *get_msg(RdKafka::KafkaConsumer *c, + int64_t offset, + bool use_seek) { RdKafka::TopicPartition *next = - RdKafka::TopicPartition::create(topic, partition, offset); + RdKafka::TopicPartition::create(topic, partition, offset); RdKafka::ErrorCode err; /* Since seek() can only be used to change the currently consumed @@ -57,7 +58,7 @@ static RdKafka::Message *get_msg (RdKafka::KafkaConsumer *c, int64_t offset, test_timing_t t_seek; TIMING_START(&t_seek, "seek"); if (!use_seek) { - std::vector parts; + std::vector parts; parts.push_back(next); err = c->assign(parts); if (err) @@ -82,15 +83,15 @@ static RdKafka::Message *get_msg (RdKafka::KafkaConsumer *c, int64_t offset, Test::Fail("consume() returned error: " + msg->errstr()); if (msg->offset() != offset) - Test::Fail(tostr() << "seek()ed to offset " << offset << - " but consume() returned offset " << msg->offset()); + Test::Fail(tostr() << "seek()ed to offset " << offset + << " but consume() returned offset " << msg->offset()); return msg; } class MyDeliveryReportCb : public RdKafka::DeliveryReportCb { public: - void dr_cb (RdKafka::Message &msg) { + void dr_cb(RdKafka::Message &msg) { if (msg.err()) Test::Fail("Delivery failed: " + msg.errstr()); @@ -102,11 +103,11 @@ class MyDeliveryReportCb : public RdKafka::DeliveryReportCb { Test::Fail(tostr() << "Dr msg timestamp type wrong: " << ts.type); golden_timestamp = ts.timestamp; - golden_offset = msg.offset(); + golden_offset = msg.offset(); } }; -static void do_test_bsearch (void) { +static void do_test_bsearch(void) { RdKafka::Conf *conf, *tconf; int msgcnt = 1000; int64_t timestamp; @@ -128,22 +129,21 @@ static void do_test_bsearch (void) { delete tconf; timestamp = 1000; - for (int i = 0 ; i < msgcnt ; i++) { + for (int i = 0; i < msgcnt; i++) { err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, - (void *)topic.c_str(), topic.size(), NULL, 0, - timestamp, + (void *)topic.c_str(), topic.size(), NULL, 0, timestamp, i == 357 ? (void *)1 /*golden*/ : NULL); - if (err != RdKafka::ERR_NO_ERROR) - Test::Fail("Produce failed: " + RdKafka::err2str(err)); - timestamp += 100 + (timestamp % 9); + if (err != RdKafka::ERR_NO_ERROR) + Test::Fail("Produce failed: " + RdKafka::err2str(err)); + timestamp += 100 + (timestamp % 9); } if (p->flush(tmout_multip(5000)) != 0) Test::Fail("Not all messages flushed"); - Test::Say(tostr() << "Produced " << msgcnt << " messages, " << - "golden message with timestamp " << golden_timestamp << - " at offset " << golden_offset << "\n"); + Test::Say(tostr() << "Produced " << msgcnt << " messages, " + << "golden message with timestamp " << golden_timestamp + << " at offset " << golden_offset << "\n"); delete p; @@ -184,8 +184,8 @@ static void do_test_bsearch (void) { mid = low + ((high - low) / 2); - Test::Say(1, tostr() << "Get message at mid point of " << low << - ".." << high << " -> " << mid << "\n"); + Test::Say(1, tostr() << "Get message at mid point of " << low << ".." + << high << " -> " << mid << "\n"); RdKafka::Message *msg = get_msg(c, mid, /* use assign() on first iteration, @@ -194,25 +194,25 @@ static void do_test_bsearch (void) { RdKafka::MessageTimestamp ts = msg->timestamp(); if (ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME) - Test::Fail(tostr() << "Expected CreateTime timestamp, not " << - ts.type << " at offset " << msg->offset()); + Test::Fail(tostr() << "Expected CreateTime timestamp, not " << ts.type + << " at offset " << msg->offset()); - Test::Say(1, tostr() << "Message at offset " << msg->offset() << - " with timestamp " << ts.timestamp << "\n"); + Test::Say(1, tostr() << "Message at offset " << msg->offset() + << " with timestamp " << ts.timestamp << "\n"); if (ts.timestamp == golden_timestamp) { - Test::Say(1, tostr() << "Found golden timestamp " << ts.timestamp << - " at offset " << msg->offset() << " in " << itcnt+1 << - " iterations\n"); + Test::Say(1, tostr() << "Found golden timestamp " << ts.timestamp + << " at offset " << msg->offset() << " in " + << itcnt + 1 << " iterations\n"); delete msg; break; } if (low == high) { - Test::Fail(tostr() << "Search exhausted at offset " << msg->offset() << - " with timestamp " << ts.timestamp << - " without finding golden timestamp " << golden_timestamp << - " at offset " << golden_offset); + Test::Fail(tostr() << "Search exhausted at offset " << msg->offset() + << " with timestamp " << ts.timestamp + << " without finding golden timestamp " + << golden_timestamp << " at offset " << golden_offset); } else if (ts.timestamp < golden_timestamp) low = msg->offset() + 1; @@ -230,8 +230,8 @@ static void do_test_bsearch (void) { } extern "C" { - int main_0059_bsearch (int argc, char **argv) { - do_test_bsearch(); - return 0; - } +int main_0059_bsearch(int argc, char **argv) { + do_test_bsearch(); + return 0; +} } diff --git a/tests/0060-op_prio.cpp b/tests/0060-op_prio.cpp index b7027f6536..156b8a57a9 100644 --- a/tests/0060-op_prio.cpp +++ b/tests/0060-op_prio.cpp @@ -51,8 +51,8 @@ class MyCbs : public RdKafka::OffsetCommitCb, public RdKafka::EventCb { int seen_commit; int seen_stats; - void offset_commit_cb (RdKafka::ErrorCode err, - std::vector&offsets) { + void offset_commit_cb(RdKafka::ErrorCode err, + std::vector &offsets) { if (err) Test::Fail("Offset commit failed: " + RdKafka::err2str(err)); @@ -60,22 +60,21 @@ class MyCbs : public RdKafka::OffsetCommitCb, public RdKafka::EventCb { Test::Say("Got commit callback!\n"); } - void event_cb (RdKafka::Event &event) { - switch (event.type()) - { - case RdKafka::Event::EVENT_STATS: - Test::Say("Got stats callback!\n"); - seen_stats++; - break; - default: - break; + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_STATS: + Test::Say("Got stats callback!\n"); + seen_stats++; + break; + default: + break; } } }; -static void do_test_commit_cb (void) { +static void do_test_commit_cb(void) { const int msgcnt = test_quick ? 100 : 1000; std::string errstr; RdKafka::ErrorCode err; @@ -128,8 +127,11 @@ static void do_test_commit_cb (void) { Test::Say(tostr() << "Received message #" << cnt << "\n"); if (cnt > 10) Test::Fail(tostr() << "Should've seen the " - "offset commit (" << cbs.seen_commit << ") and " - "stats callbacks (" << cbs.seen_stats << ") by now"); + "offset commit (" + << cbs.seen_commit + << ") and " + "stats callbacks (" + << cbs.seen_stats << ") by now"); /* Commit the first message to trigger the offset commit_cb */ if (cnt == 1) { @@ -154,8 +156,8 @@ static void do_test_commit_cb (void) { } extern "C" { - int main_0060_op_prio (int argc, char **argv) { - do_test_commit_cb(); - return 0; - } +int main_0060_op_prio(int argc, char **argv) { + do_test_commit_cb(); + return 0; +} } diff --git a/tests/0061-consumer_lag.cpp b/tests/0061-consumer_lag.cpp index 0c5ec1a200..7595415834 100644 --- a/tests/0061-consumer_lag.cpp +++ b/tests/0061-consumer_lag.cpp @@ -39,21 +39,21 @@ static std::string topic; class StatsCb : public RdKafka::EventCb { public: - int64_t calc_lag; //calculated lag - int lag_valid; // number of times lag has been valid + int64_t calc_lag; // calculated lag + int lag_valid; // number of times lag has been valid StatsCb() { - calc_lag = -1; + calc_lag = -1; lag_valid = 0; } /** * @brief Event callback */ - void event_cb (RdKafka::Event &event) { + void event_cb(RdKafka::Event &event) { if (event.type() == RdKafka::Event::EVENT_LOG) { - Test::Say(tostr() << "LOG-" << event.severity() << "-" << event.fac() << - ": " << event.str() << "\n"); + Test::Say(tostr() << "LOG-" << event.severity() << "-" << event.fac() + << ": " << event.str() << "\n"); return; } else if (event.type() != RdKafka::Event::EVENT_STATS) { Test::Say(tostr() << "Dropping event " << event.type() << "\n"); @@ -67,7 +67,8 @@ class StatsCb : public RdKafka::EventCb { Test::Say(2, "Skipping old stats with invalid consumer_lag\n"); return; /* Old stats generated before first message consumed */ } else if (consumer_lag != calc_lag) - Test::Fail(tostr() << "Stats consumer_lag " << consumer_lag << ", expected " << calc_lag << "\n"); + Test::Fail(tostr() << "Stats consumer_lag " << consumer_lag + << ", expected " << calc_lag << "\n"); else lag_valid++; } @@ -77,22 +78,20 @@ class StatsCb : public RdKafka::EventCb { * @brief Naiive JSON parsing, find the consumer_lag for partition 0 * and return it. */ - static int64_t parse_json (const char *json_doc) { + static int64_t parse_json(const char *json_doc) { const std::string match_topic(std::string("\"") + topic + "\":"); - const char *search[] = { "\"topics\":", - match_topic.c_str(), - "\"partitions\":", - "\"0\":", - "\"consumer_lag_stored\":", - NULL }; + const char *search[] = { + "\"topics\":", match_topic.c_str(), "\"partitions\":", + "\"0\":", "\"consumer_lag_stored\":", NULL}; const char *remain = json_doc; - for (const char **sp = search ; *sp ; sp++) { + for (const char **sp = search; *sp; sp++) { const char *t = strstr(remain, *sp); if (!t) - Test::Fail(tostr() << "Couldnt find " << *sp << - " in remaining stats output:\n" << remain << - "\n====================\n" << json_doc << "\n"); + Test::Fail(tostr() << "Couldnt find " << *sp + << " in remaining stats output:\n" + << remain << "\n====================\n" + << json_doc << "\n"); remain = t + strlen(*sp); } @@ -115,14 +114,15 @@ class StatsCb : public RdKafka::EventCb { /** * @brief Produce \p msgcnt in a transaction that is aborted. */ -static void produce_aborted_txns (const std::string &topic, - int32_t partition, int msgcnt) { +static void produce_aborted_txns(const std::string &topic, + int32_t partition, + int msgcnt) { RdKafka::Producer *p; RdKafka::Conf *conf; RdKafka::Error *error; - Test::Say(tostr() << "Producing " << msgcnt << " transactional messages " << - "which will be aborted\n"); + Test::Say(tostr() << "Producing " << msgcnt << " transactional messages " + << "which will be aborted\n"); Test::conf_init(&conf, NULL, 0); Test::conf_set(conf, "transactional.id", "txn_id_" + topic); @@ -141,13 +141,11 @@ static void produce_aborted_txns (const std::string &topic, if (error) Test::Fail("begin_transaction() failed: " + error->str()); - for (int i = 0 ; i < msgcnt ; i++) { + for (int i = 0; i < msgcnt; i++) { RdKafka::ErrorCode err; - err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, - &i, sizeof(i), - NULL, 0, - 0, NULL); + err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, &i, + sizeof(i), NULL, 0, 0, NULL); if (err) Test::Fail("produce() failed: " + RdKafka::err2str(err)); } @@ -168,14 +166,14 @@ static void produce_aborted_txns (const std::string &topic, } -static void do_test_consumer_lag (bool with_txns) { - int msgcnt = test_quick ? 5 : 10; +static void do_test_consumer_lag(bool with_txns) { + int msgcnt = test_quick ? 5 : 10; int txn_msgcnt = 3; - int addcnt = 0; + int addcnt = 0; std::string errstr; RdKafka::ErrorCode err; - SUB_TEST("Test consumer lag %s transactions", with_txns ? "with":"without"); + SUB_TEST("Test consumer lag %s transactions", with_txns ? "with" : "without"); topic = Test::mk_topic_name("0061-consumer_lag", 1); @@ -210,7 +208,7 @@ static void do_test_consumer_lag (bool with_txns) { delete conf; /* Assign partitions */ - std::vector parts; + std::vector parts; parts.push_back(RdKafka::TopicPartition::create(topic, 0)); if ((err = c->assign(parts))) Test::Fail("assign failed: " + RdKafka::err2str(err)); @@ -222,42 +220,42 @@ static void do_test_consumer_lag (bool with_txns) { while (cnt < msgcnt + addcnt) { RdKafka::Message *msg = c->consume(1000); - switch (msg->err()) - { - case RdKafka::ERR__TIMED_OUT: - if (with_txns && cnt >= msgcnt && stats.calc_lag == 0) - addcnt = 0; /* done */ - break; - case RdKafka::ERR__PARTITION_EOF: - Test::Fail(tostr() << "Unexpected PARTITION_EOF (not enbaled) after " - << cnt << "/" << msgcnt << " messages: " << msg->errstr()); - break; - - case RdKafka::ERR_NO_ERROR: - /* Proper message. Update calculated lag for later - * checking in stats callback */ - if (msg->offset()+1 >= msgcnt && with_txns) - stats.calc_lag = 0; - else - stats.calc_lag = (msgcnt+addcnt) - (msg->offset()+1); - cnt++; - Test::Say(2, tostr() << "Received message #" << cnt << "/" << msgcnt << - " at offset " << msg->offset() << " (calc lag " << stats.calc_lag << ")\n"); - /* Slow down message "processing" to make sure we get - * at least one stats callback per message. */ - if (cnt < msgcnt) - rd_sleep(1); - break; - - default: - Test::Fail("Consume error: " + msg->errstr()); - break; - } + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + if (with_txns && cnt >= msgcnt && stats.calc_lag == 0) + addcnt = 0; /* done */ + break; + case RdKafka::ERR__PARTITION_EOF: + Test::Fail(tostr() << "Unexpected PARTITION_EOF (not enbaled) after " + << cnt << "/" << msgcnt + << " messages: " << msg->errstr()); + break; + + case RdKafka::ERR_NO_ERROR: + /* Proper message. Update calculated lag for later + * checking in stats callback */ + if (msg->offset() + 1 >= msgcnt && with_txns) + stats.calc_lag = 0; + else + stats.calc_lag = (msgcnt + addcnt) - (msg->offset() + 1); + cnt++; + Test::Say(2, tostr() << "Received message #" << cnt << "/" << msgcnt + << " at offset " << msg->offset() << " (calc lag " + << stats.calc_lag << ")\n"); + /* Slow down message "processing" to make sure we get + * at least one stats callback per message. */ + if (cnt < msgcnt) + rd_sleep(1); + break; + + default: + Test::Fail("Consume error: " + msg->errstr()); + break; + } delete msg; } - Test::Say(tostr() << "Done, lag was valid " << - stats.lag_valid << " times\n"); + Test::Say(tostr() << "Done, lag was valid " << stats.lag_valid << " times\n"); if (stats.lag_valid == 0) Test::Fail("No valid consumer_lag in statistics seen"); @@ -268,10 +266,10 @@ static void do_test_consumer_lag (bool with_txns) { } extern "C" { - int main_0061_consumer_lag (int argc, char **argv) { - do_test_consumer_lag(false/*no txns*/); - if (test_broker_version >= TEST_BRKVER(0,11,0,0)) - do_test_consumer_lag(true/*txns*/); - return 0; - } +int main_0061_consumer_lag(int argc, char **argv) { + do_test_consumer_lag(false /*no txns*/); + if (test_broker_version >= TEST_BRKVER(0, 11, 0, 0)) + do_test_consumer_lag(true /*txns*/); + return 0; +} } diff --git a/tests/0062-stats_event.c b/tests/0062-stats_event.c index 88de287518..bdddda5e08 100644 --- a/tests/0062-stats_event.c +++ b/tests/0062-stats_event.c @@ -35,7 +35,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int stats_count = 0; @@ -43,82 +43,84 @@ static int stats_count = 0; /** * Handle stats */ -static void handle_stats (rd_kafka_event_t *rkev) { - const char *stats_json = NULL; - stats_json = rd_kafka_event_stats(rkev); - if (stats_json != NULL) { - TEST_SAY("Stats: %s\n", stats_json); - stats_count++; - } else { - TEST_FAIL("Stats: failed to get stats\n"); - } +static void handle_stats(rd_kafka_event_t *rkev) { + const char *stats_json = NULL; + stats_json = rd_kafka_event_stats(rkev); + if (stats_json != NULL) { + TEST_SAY("Stats: %s\n", stats_json); + stats_count++; + } else { + TEST_FAIL("Stats: failed to get stats\n"); + } } -int main_0062_stats_event (int argc, char **argv) { - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - test_timing_t t_delivery; - rd_kafka_queue_t *eventq; - const int iterations = 5; - int i; - test_conf_init(NULL, NULL, 10); - - /* Set up a global config object */ - conf = rd_kafka_conf_new(); - rd_kafka_conf_set(conf,"statistics.interval.ms", "100", NULL, 0); - - rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_STATS); - - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - - eventq = rd_kafka_queue_get_main(rk); - - /* Wait for stats event */ - for (i = 0 ; i < iterations ; i++) { - TIMING_START(&t_delivery, "STATS_EVENT"); - stats_count = 0; - while (stats_count == 0) { - rd_kafka_event_t *rkev; - rkev = rd_kafka_queue_poll(eventq, 100); - switch (rd_kafka_event_type(rkev)) - { - case RD_KAFKA_EVENT_STATS: - TEST_SAY("%s event\n", rd_kafka_event_name(rkev)); - handle_stats(rkev); - break; - case RD_KAFKA_EVENT_NONE: - break; - default: - TEST_SAY("Ignore event: %s\n", - rd_kafka_event_name(rkev)); - break; - } - rd_kafka_event_destroy(rkev); - } - TIMING_STOP(&t_delivery); - - if (TIMING_DURATION(&t_delivery) < 1000 * 100 * 0.5 || - TIMING_DURATION(&t_delivery) > 1000 * 100 * 1.5) { - /* CIs and valgrind are too flaky/slow to - * make this failure meaningful. */ - if (!test_on_ci && !strcmp(test_mode, "bare")) { - TEST_FAIL("Stats duration %.3fms is >= 50%% " - "outside statistics.interval.ms 100", - (float)TIMING_DURATION(&t_delivery)/ - 1000.0f); - } else { - TEST_WARN("Stats duration %.3fms is >= 50%% " - "outside statistics.interval.ms 100\n", - (float)TIMING_DURATION(&t_delivery)/ - 1000.0f); - } - } - } - - rd_kafka_queue_destroy(eventq); - - rd_kafka_destroy(rk); - - return 0; +int main_0062_stats_event(int argc, char **argv) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + test_timing_t t_delivery; + rd_kafka_queue_t *eventq; + const int iterations = 5; + int i; + test_conf_init(NULL, NULL, 10); + + /* Set up a global config object */ + conf = rd_kafka_conf_new(); + rd_kafka_conf_set(conf, "statistics.interval.ms", "100", NULL, 0); + + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_STATS); + + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + eventq = rd_kafka_queue_get_main(rk); + + /* Wait for stats event */ + for (i = 0; i < iterations; i++) { + TIMING_START(&t_delivery, "STATS_EVENT"); + stats_count = 0; + while (stats_count == 0) { + rd_kafka_event_t *rkev; + rkev = rd_kafka_queue_poll(eventq, 100); + switch (rd_kafka_event_type(rkev)) { + case RD_KAFKA_EVENT_STATS: + TEST_SAY("%s event\n", + rd_kafka_event_name(rkev)); + handle_stats(rkev); + break; + case RD_KAFKA_EVENT_NONE: + break; + default: + TEST_SAY("Ignore event: %s\n", + rd_kafka_event_name(rkev)); + break; + } + rd_kafka_event_destroy(rkev); + } + TIMING_STOP(&t_delivery); + + if (TIMING_DURATION(&t_delivery) < 1000 * 100 * 0.5 || + TIMING_DURATION(&t_delivery) > 1000 * 100 * 1.5) { + /* CIs and valgrind are too flaky/slow to + * make this failure meaningful. */ + if (!test_on_ci && !strcmp(test_mode, "bare")) { + TEST_FAIL( + "Stats duration %.3fms is >= 50%% " + "outside statistics.interval.ms 100", + (float)TIMING_DURATION(&t_delivery) / + 1000.0f); + } else { + TEST_WARN( + "Stats duration %.3fms is >= 50%% " + "outside statistics.interval.ms 100\n", + (float)TIMING_DURATION(&t_delivery) / + 1000.0f); + } + } + } + + rd_kafka_queue_destroy(eventq); + + rd_kafka_destroy(rk); + + return 0; } diff --git a/tests/0063-clusterid.cpp b/tests/0063-clusterid.cpp index 0aeac2c79e..dda8d6ddb2 100644 --- a/tests/0063-clusterid.cpp +++ b/tests/0063-clusterid.cpp @@ -35,8 +35,7 @@ * Test Handle::clusterid() and Handle::controllerid() */ -static void do_test_clusterid (void) { - +static void do_test_clusterid(void) { Test::Say("[ do_test_clusterid ]\n"); /* @@ -107,8 +106,7 @@ static void do_test_clusterid (void) { * This instantiates its own client to avoid having the value cached * from do_test_clusterid(), but they are basically the same tests. */ -static void do_test_controllerid (void) { - +static void do_test_controllerid(void) { Test::Say("[ do_test_controllerid ]\n"); /* @@ -142,39 +140,41 @@ static void do_test_controllerid (void) { int32_t controllerid_good_1 = p_good->controllerid(tmout_multip(2000)); if (controllerid_good_1 == -1) Test::Fail("good producer(w timeout): Controllerid is -1"); - Test::Say(tostr() << "good producer(w timeout): Controllerid " << controllerid_good_1 << "\n"); + Test::Say(tostr() << "good producer(w timeout): Controllerid " + << controllerid_good_1 << "\n"); /* Then retrieve a cached copy. */ int32_t controllerid_good_2 = p_good->controllerid(0); if (controllerid_good_2 == -1) Test::Fail("good producer(0): Controllerid is -1"); - Test::Say(tostr() << "good producer(0): Controllerid " << controllerid_good_2 << "\n"); + Test::Say(tostr() << "good producer(0): Controllerid " << controllerid_good_2 + << "\n"); if (controllerid_good_1 != controllerid_good_2) - Test::Fail(tostr() << "Good Controllerid mismatch: " << - controllerid_good_1 << " != " << controllerid_good_2); + Test::Fail(tostr() << "Good Controllerid mismatch: " << controllerid_good_1 + << " != " << controllerid_good_2); /* * Try bad producer, should return -1 */ int32_t controllerid_bad_1 = p_bad->controllerid(tmout_multip(2000)); if (controllerid_bad_1 != -1) - Test::Fail(tostr() << - "bad producer(w timeout): Controllerid should be -1, not " << - controllerid_bad_1); + Test::Fail( + tostr() << "bad producer(w timeout): Controllerid should be -1, not " + << controllerid_bad_1); int32_t controllerid_bad_2 = p_bad->controllerid(0); if (controllerid_bad_2 != -1) - Test::Fail(tostr() << "bad producer(0): Controllerid should be -1, not " << - controllerid_bad_2); + Test::Fail(tostr() << "bad producer(0): Controllerid should be -1, not " + << controllerid_bad_2); delete p_good; delete p_bad; } extern "C" { - int main_0063_clusterid (int argc, char **argv) { - do_test_clusterid(); - do_test_controllerid(); - return 0; - } +int main_0063_clusterid(int argc, char **argv) { + do_test_clusterid(); + do_test_controllerid(); + return 0; +} } diff --git a/tests/0064-interceptors.c b/tests/0064-interceptors.c index 2e3c744f5e..e5c5b047a7 100644 --- a/tests/0064-interceptors.c +++ b/tests/0064-interceptors.c @@ -56,10 +56,10 @@ static const int producer_ic_cnt = 5; static const int consumer_ic_cnt = 10; /* The base values help differentiating opaque values between interceptors */ -static const int on_send_base = 1<<24; -static const int on_ack_base = 1<<25; -static const int on_consume_base = 1<<26; -static const int on_commit_base = 1<<27; +static const int on_send_base = 1 << 24; +static const int on_ack_base = 1 << 25; +static const int on_consume_base = 1 << 26; +static const int on_commit_base = 1 << 27; static const int base_mask = 0xff << 24; #define _ON_SEND 0 @@ -68,7 +68,7 @@ static const int base_mask = 0xff << 24; #define _ON_CNT 3 struct msg_state { int id; - int bits[_ON_CNT]; /* Bit field, one bit per interceptor */ + int bits[_ON_CNT]; /* Bit field, one bit per interceptor */ mtx_t lock; }; @@ -87,30 +87,34 @@ static int on_commit_bits = 0; * must be reflected here, meaning that all lower bits must be set, * and no higher ones. */ -static void msg_verify_ic_cnt (const struct msg_state *msg, const char *what, - int bits, int exp_cnt) { - int exp_bits = exp_cnt ? (1 << exp_cnt)-1 : 0; +static void msg_verify_ic_cnt(const struct msg_state *msg, + const char *what, + int bits, + int exp_cnt) { + int exp_bits = exp_cnt ? (1 << exp_cnt) - 1 : 0; TEST_ASSERT(bits == exp_bits, - "msg #%d: %s: expected bits 0x%x (%d), got 0x%x", - msg->id, what, exp_bits, exp_cnt, bits); + "msg #%d: %s: expected bits 0x%x (%d), got 0x%x", msg->id, + what, exp_bits, exp_cnt, bits); } /* * @brief Same as msg_verify_ic_cnt() without the msg reliance */ -static void verify_ic_cnt (const char *what, int bits, int exp_cnt) { - int exp_bits = exp_cnt ? (1 << exp_cnt)-1 : 0; +static void verify_ic_cnt(const char *what, int bits, int exp_cnt) { + int exp_bits = exp_cnt ? (1 << exp_cnt) - 1 : 0; - TEST_ASSERT(bits == exp_bits, - "%s: expected bits 0x%x (%d), got 0x%x", + TEST_ASSERT(bits == exp_bits, "%s: expected bits 0x%x (%d), got 0x%x", what, exp_bits, exp_cnt, bits); } -static void verify_msg (const char *what, int base, int bitid, - rd_kafka_message_t *rkmessage, void *ic_opaque) { +static void verify_msg(const char *what, + int base, + int bitid, + rd_kafka_message_t *rkmessage, + void *ic_opaque) { const char *id_str = rkmessage->key; struct msg_state *msg; int id; @@ -122,25 +126,24 @@ static void verify_msg (const char *what, int base, int bitid, /* Find message by id */ TEST_ASSERT(rkmessage->key && rkmessage->key_len > 0 && - id_str[(int)rkmessage->key_len-1] == '\0' && + id_str[(int)rkmessage->key_len - 1] == '\0' && strlen(id_str) > 0 && isdigit(*id_str)); id = atoi(id_str); - TEST_ASSERT(id >= 0 && id < msgcnt, - "%s: bad message id %s", what, id_str); + TEST_ASSERT(id >= 0 && id < msgcnt, "%s: bad message id %s", what, + id_str); msg = &msgs[id]; mtx_lock(&msg->lock); - TEST_ASSERT(msg->id == id, "expected msg #%d has wrong id %d", - id, msg->id); + TEST_ASSERT(msg->id == id, "expected msg #%d has wrong id %d", id, + msg->id); /* Verify message opaque */ - if (!strcmp(what, "on_send") || - !strncmp(what, "on_ack", 6)) + if (!strcmp(what, "on_send") || !strncmp(what, "on_ack", 6)) TEST_ASSERT(rkmessage->_private == (void *)msg); - TEST_SAYL(3, "%s: interceptor #%d called for message #%d (%d)\n", - what, ic_id, id, msg->id); + TEST_SAYL(3, "%s: interceptor #%d called for message #%d (%d)\n", what, + ic_id, id, msg->id); msg_verify_ic_cnt(msg, what, msg->bits[bitid], ic_id); @@ -151,25 +154,22 @@ static void verify_msg (const char *what, int base, int bitid, } -static rd_kafka_resp_err_t on_send (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_send(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { TEST_ASSERT(ic_opaque != NULL); verify_msg("on_send", on_send_base, _ON_SEND, rkmessage, ic_opaque); return RD_KAFKA_RESP_ERR_NO_ERROR; } -static rd_kafka_resp_err_t on_ack (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_ack(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { TEST_ASSERT(ic_opaque != NULL); verify_msg("on_ack", on_ack_base, _ON_ACK, rkmessage, ic_opaque); return RD_KAFKA_RESP_ERR_NO_ERROR; } -static rd_kafka_resp_err_t on_consume (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_consume(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { TEST_ASSERT(ic_opaque != NULL); verify_msg("on_consume", on_consume_base, _ON_CONSUME, rkmessage, ic_opaque); @@ -177,9 +177,11 @@ static rd_kafka_resp_err_t on_consume (rd_kafka_t *rk, } -static rd_kafka_resp_err_t on_commit ( - rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_resp_err_t err, void *ic_opaque) { +static rd_kafka_resp_err_t +on_commit(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err, + void *ic_opaque) { int ic_id = (int)(intptr_t)ic_opaque; /* Since on_commit is triggered a bit randomly and not per @@ -216,9 +218,12 @@ static rd_kafka_resp_err_t on_commit ( } -static void do_test_produce (rd_kafka_t *rk, const char *topic, - int32_t partition, int msgid, int exp_fail, - int exp_ic_cnt) { +static void do_test_produce(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int msgid, + int exp_fail, + int exp_ic_cnt) { rd_kafka_resp_err_t err; char key[16]; struct msg_state *msg = &msgs[msgid]; @@ -226,28 +231,27 @@ static void do_test_produce (rd_kafka_t *rk, const char *topic, /* Message state should be empty, no interceptors should have * been called yet.. */ - for (i = 0 ; i < _ON_CNT ; i++) + for (i = 0; i < _ON_CNT; i++) TEST_ASSERT(msg->bits[i] == 0); mtx_init(&msg->lock, mtx_plain); msg->id = msgid; rd_snprintf(key, sizeof(key), "%d", msgid); - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_KEY(key, strlen(key)+1), + RD_KAFKA_V_KEY(key, strlen(key) + 1), RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_OPAQUE(msg), - RD_KAFKA_V_END); + RD_KAFKA_V_OPAQUE(msg), RD_KAFKA_V_END); mtx_lock(&msg->lock); msg_verify_ic_cnt(msg, "on_send", msg->bits[_ON_SEND], exp_ic_cnt); if (err) { - msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], exp_ic_cnt); - TEST_ASSERT(exp_fail, - "producev() failed: %s", rd_kafka_err2str(err)); + msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], + exp_ic_cnt); + TEST_ASSERT(exp_fail, "producev() failed: %s", + rd_kafka_err2str(err)); } else { msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], 0); TEST_ASSERT(!exp_fail, @@ -259,24 +263,25 @@ static void do_test_produce (rd_kafka_t *rk, const char *topic, -static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, - const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { int i; - for (i = 0 ; i < producer_ic_cnt ; i++) { + for (i = 0; i < producer_ic_cnt; i++) { rd_kafka_resp_err_t err; err = rd_kafka_interceptor_add_on_send( - rk, tsprintf("on_send:%d",i), - on_send, (void *)(intptr_t)(on_send_base | i)); + rk, tsprintf("on_send:%d", i), on_send, + (void *)(intptr_t)(on_send_base | i)); TEST_ASSERT(!err, "add_on_send failed: %s", rd_kafka_err2str(err)); err = rd_kafka_interceptor_add_on_acknowledgement( - rk, tsprintf("on_acknowledgement:%d",i), - on_ack, (void *)(intptr_t)(on_ack_base | i)); + rk, tsprintf("on_acknowledgement:%d", i), on_ack, + (void *)(intptr_t)(on_ack_base | i)); TEST_ASSERT(!err, "add_on_ack.. failed: %s", rd_kafka_err2str(err)); @@ -284,15 +289,13 @@ static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, /* Add consumer interceptors as well to make sure * they are not called. */ err = rd_kafka_interceptor_add_on_consume( - rk, tsprintf("on_consume:%d",i), - on_consume, NULL); + rk, tsprintf("on_consume:%d", i), on_consume, NULL); TEST_ASSERT(!err, "add_on_consume failed: %s", rd_kafka_err2str(err)); err = rd_kafka_interceptor_add_on_commit( - rk, tsprintf("on_commit:%d",i), - on_commit, NULL); + rk, tsprintf("on_commit:%d", i), on_commit, NULL); TEST_ASSERT(!err, "add_on_commit failed: %s", rd_kafka_err2str(err)); } @@ -300,7 +303,7 @@ static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR_NO_ERROR; } -static void do_test_producer (const char *topic) { +static void do_test_producer(const char *topic) { rd_kafka_conf_t *conf; int i; rd_kafka_t *rk; @@ -315,7 +318,7 @@ static void do_test_producer (const char *topic) { /* Create producer */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - for (i = 0 ; i < msgcnt-1 ; i++) + for (i = 0; i < msgcnt - 1; i++) do_test_produce(rk, topic, RD_KAFKA_PARTITION_UA, i, 0, producer_ic_cnt); @@ -328,7 +331,7 @@ static void do_test_producer (const char *topic) { /* Verify acks */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { struct msg_state *msg = &msgs[i]; mtx_lock(&msg->lock); msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], @@ -340,30 +343,29 @@ static void do_test_producer (const char *topic) { } -static rd_kafka_resp_err_t on_new_consumer (rd_kafka_t *rk, - const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new_consumer(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { int i; - for (i = 0 ; i < consumer_ic_cnt ; i++) { + for (i = 0; i < consumer_ic_cnt; i++) { rd_kafka_interceptor_add_on_consume( - rk, tsprintf("on_consume:%d",i), - on_consume, (void *)(intptr_t)(on_consume_base | i)); + rk, tsprintf("on_consume:%d", i), on_consume, + (void *)(intptr_t)(on_consume_base | i)); rd_kafka_interceptor_add_on_commit( - rk, tsprintf("on_commit:%d",i), - on_commit, (void *)(intptr_t)(on_commit_base | i)); + rk, tsprintf("on_commit:%d", i), on_commit, + (void *)(intptr_t)(on_commit_base | i)); /* Add producer interceptors as well to make sure they * are not called. */ - rd_kafka_interceptor_add_on_send( - rk, tsprintf("on_send:%d",i), - on_send, NULL); + rd_kafka_interceptor_add_on_send(rk, tsprintf("on_send:%d", i), + on_send, NULL); rd_kafka_interceptor_add_on_acknowledgement( - rk, tsprintf("on_acknowledgement:%d",i), - on_ack, NULL); + rk, tsprintf("on_acknowledgement:%d", i), on_ack, NULL); } @@ -371,7 +373,7 @@ static rd_kafka_resp_err_t on_new_consumer (rd_kafka_t *rk, } -static void do_test_consumer (const char *topic) { +static void do_test_consumer(const char *topic) { rd_kafka_conf_t *conf; int i; @@ -392,11 +394,11 @@ static void do_test_consumer (const char *topic) { test_consumer_subscribe(rk, topic); /* Consume messages (-1 for the one that failed producing) */ - test_consumer_poll("interceptors.consume", rk, 0, -1, -1, msgcnt-1, + test_consumer_poll("interceptors.consume", rk, 0, -1, -1, msgcnt - 1, NULL); /* Verify on_consume */ - for (i = 0 ; i < msgcnt-1 ; i++) { + for (i = 0; i < msgcnt - 1; i++) { struct msg_state *msg = &msgs[i]; mtx_lock(&msg->lock); msg_verify_ic_cnt(msg, "on_consume", msg->bits[_ON_CONSUME], @@ -406,10 +408,10 @@ static void do_test_consumer (const char *topic) { /* Verify that the produce-failed message didnt have * interceptors called */ - mtx_lock(&msgs[msgcnt-1].lock); - msg_verify_ic_cnt(&msgs[msgcnt-1], "on_consume", - msgs[msgcnt-1].bits[_ON_CONSUME], 0); - mtx_unlock(&msgs[msgcnt-1].lock); + mtx_lock(&msgs[msgcnt - 1].lock); + msg_verify_ic_cnt(&msgs[msgcnt - 1], "on_consume", + msgs[msgcnt - 1].bits[_ON_CONSUME], 0); + mtx_unlock(&msgs[msgcnt - 1].lock); test_consumer_close(rk); @@ -425,7 +427,7 @@ static void do_test_consumer (const char *topic) { * is not duplicated without the interceptor's knowledge or * assistance. */ -static void do_test_conf_copy (const char *topic) { +static void do_test_conf_copy(const char *topic) { rd_kafka_conf_t *conf, *conf2; int i; rd_kafka_t *rk; @@ -442,20 +444,20 @@ static void do_test_conf_copy (const char *topic) { /* Now copy the configuration to verify that interceptors are * NOT copied. */ conf2 = conf; - conf = rd_kafka_conf_dup(conf2); + conf = rd_kafka_conf_dup(conf2); rd_kafka_conf_destroy(conf2); /* Create producer */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - for (i = 0 ; i < msgcnt-1 ; i++) + for (i = 0; i < msgcnt - 1; i++) do_test_produce(rk, topic, RD_KAFKA_PARTITION_UA, i, 0, 0); /* Wait for messages to be delivered */ test_flush(rk, -1); /* Verify acks */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { struct msg_state *msg = &msgs[i]; mtx_lock(&msg->lock); msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], 0); @@ -466,7 +468,7 @@ static void do_test_conf_copy (const char *topic) { } -int main_0064_interceptors (int argc, char **argv) { +int main_0064_interceptors(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__, 1); do_test_producer(topic); @@ -477,4 +479,3 @@ int main_0064_interceptors (int argc, char **argv) { return 0; } - diff --git a/tests/0065-yield.cpp b/tests/0065-yield.cpp index 8bdaa34a61..6f2dbb0acb 100644 --- a/tests/0065-yield.cpp +++ b/tests/0065-yield.cpp @@ -44,13 +44,14 @@ class DrCb0065 : public RdKafka::DeliveryReportCb { public: - int cnt; // dr messages seen - bool do_yield; // whether to yield for each message or not + int cnt; // dr messages seen + bool do_yield; // whether to yield for each message or not RdKafka::Producer *p; - DrCb0065(bool yield): cnt(0), do_yield(yield), p(NULL) {} + DrCb0065(bool yield) : cnt(0), do_yield(yield), p(NULL) { + } - void dr_cb (RdKafka::Message &message) { + void dr_cb(RdKafka::Message &message) { if (message.err()) Test::Fail("DR: message failed: " + RdKafka::err2str(message.err())); @@ -63,7 +64,7 @@ class DrCb0065 : public RdKafka::DeliveryReportCb { }; -static void do_test_producer (bool do_yield) { +static void do_test_producer(bool do_yield) { int msgcnt = 100; std::string errstr; RdKafka::ErrorCode err; @@ -88,12 +89,12 @@ static void do_test_producer (bool do_yield) { dr.p = p; - Test::Say(tostr() << (do_yield ? "Yield: " : "Dont Yield: ") << - "Producing " << msgcnt << " messages to " << topic << "\n"); + Test::Say(tostr() << (do_yield ? "Yield: " : "Dont Yield: ") << "Producing " + << msgcnt << " messages to " << topic << "\n"); - for (int i = 0 ; i < msgcnt ; i++) { - err = p->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY, - (void *)"hi", 2, NULL, 0, 0, NULL); + for (int i = 0; i < msgcnt; i++) { + err = p->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY, (void *)"hi", 2, + NULL, 0, 0, NULL); if (err) Test::Fail("produce() failed: " + RdKafka::err2str(err)); } @@ -114,8 +115,8 @@ static void do_test_producer (bool do_yield) { } if (this_dr_cnt != exp_msgs_per_poll) - Test::Fail(tostr() << "Expected " << exp_msgs_per_poll << - " DRs per poll() call, got " << this_dr_cnt); + Test::Fail(tostr() << "Expected " << exp_msgs_per_poll + << " DRs per poll() call, got " << this_dr_cnt); else Test::Say(3, tostr() << dr.cnt << "/" << msgcnt << "\n"); } @@ -123,17 +124,17 @@ static void do_test_producer (bool do_yield) { if (dr.cnt != msgcnt) Test::Fail(tostr() << "Expected " << msgcnt << " DRs, got " << dr.cnt); - Test::Say(tostr() << (do_yield ? "Yield: " : "Dont Yield: ") << - "Success: " << dr.cnt << " DRs received in batches of " << - exp_msgs_per_poll << "\n"); + Test::Say(tostr() << (do_yield ? "Yield: " : "Dont Yield: ") + << "Success: " << dr.cnt << " DRs received in batches of " + << exp_msgs_per_poll << "\n"); delete p; } extern "C" { - int main_0065_yield (int argc, char **argv) { - do_test_producer(1/*yield*/); - do_test_producer(0/*dont yield*/); - return 0; - } +int main_0065_yield(int argc, char **argv) { + do_test_producer(1 /*yield*/); + do_test_producer(0 /*dont yield*/); + return 0; +} } diff --git a/tests/0066-plugins.cpp b/tests/0066-plugins.cpp index 828aab9890..9f9f312400 100644 --- a/tests/0066-plugins.cpp +++ b/tests/0066-plugins.cpp @@ -50,24 +50,30 @@ struct ictest ictest; */ -static void do_test_plugin () { +static void do_test_plugin() { std::string errstr; - std::string topic = Test::mk_topic_name("0066_plugins", 1); + std::string topic = Test::mk_topic_name("0066_plugins", 1); static const char *config[] = { - "session.timeout.ms", "6000", /* Before plugin */ - "plugin.library.paths", "interceptor_test/interceptor_test", - "socket.timeout.ms", "12", /* After plugin */ - "interceptor_test.config1", "one", - "interceptor_test.config2", "two", - "topic.metadata.refresh.interval.ms", "1234", - NULL, + "session.timeout.ms", + "6000", /* Before plugin */ + "plugin.library.paths", + "interceptor_test/interceptor_test", + "socket.timeout.ms", + "12", /* After plugin */ + "interceptor_test.config1", + "one", + "interceptor_test.config2", + "two", + "topic.metadata.refresh.interval.ms", + "1234", + NULL, }; char cwd[512], *pcwd; #ifdef _WIN32 - pcwd = _getcwd(cwd, sizeof(cwd)-1); + pcwd = _getcwd(cwd, sizeof(cwd) - 1); #else - pcwd = getcwd(cwd, sizeof(cwd)-1); + pcwd = getcwd(cwd, sizeof(cwd) - 1); #endif if (pcwd) Test::Say(tostr() << "running test from cwd " << cwd << "\n"); @@ -80,9 +86,9 @@ static void do_test_plugin () { /* Config for intercepted client */ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - for (int i = 0 ; config[i] ; i += 2) { - Test::Say(tostr() << "set(" << config[i] << ", " << config[i+1] << ")\n"); - if (conf->set(config[i], config[i+1], errstr)) + for (int i = 0; config[i]; i += 2) { + Test::Say(tostr() << "set(" << config[i] << ", " << config[i + 1] << ")\n"); + if (conf->set(config[i], config[i + 1], errstr)) Test::Fail(tostr() << "set(" << config[i] << ") failed: " << errstr); } @@ -93,9 +99,9 @@ static void do_test_plugin () { if (ictest.on_new.cnt < ictest.on_new.min || ictest.on_new.cnt > ictest.on_new.max) - Test::Fail(tostr() << "on_new.cnt " << ictest.on_new.cnt << - " not within range " << ictest.on_new.min << ".." << - ictest.on_new.max); + Test::Fail(tostr() << "on_new.cnt " << ictest.on_new.cnt + << " not within range " << ictest.on_new.min << ".." + << ictest.on_new.max); /* Verification */ if (!ictest.config1 || strcmp(ictest.config1, "one")) @@ -103,7 +109,8 @@ static void do_test_plugin () { if (!ictest.config2 || strcmp(ictest.config2, "two")) Test::Fail(tostr() << "config2 was " << ictest.config2); if (!ictest.session_timeout_ms || strcmp(ictest.session_timeout_ms, "6000")) - Test::Fail(tostr() << "session.timeout.ms was " << ictest.session_timeout_ms); + Test::Fail(tostr() << "session.timeout.ms was " + << ictest.session_timeout_ms); if (!ictest.socket_timeout_ms || strcmp(ictest.socket_timeout_ms, "12")) Test::Fail(tostr() << "socket.timeout.ms was " << ictest.socket_timeout_ms); @@ -115,8 +122,8 @@ static void do_test_plugin () { } extern "C" { - int main_0066_plugins (int argc, char **argv) { - do_test_plugin(); - return 0; - } +int main_0066_plugins(int argc, char **argv) { + do_test_plugin(); + return 0; +} } diff --git a/tests/0067-empty_topic.cpp b/tests/0067-empty_topic.cpp index d965e299d1..f71489fa16 100644 --- a/tests/0067-empty_topic.cpp +++ b/tests/0067-empty_topic.cpp @@ -38,9 +38,9 @@ */ -static void do_test_empty_topic_consumer () { +static void do_test_empty_topic_consumer() { std::string errstr; - std::string topic = Test::mk_topic_name("0067_empty_topic", 1); + std::string topic = Test::mk_topic_name("0067_empty_topic", 1); const int32_t partition = 0; RdKafka::Conf *conf; @@ -53,39 +53,42 @@ static void do_test_empty_topic_consumer () { /* Create simple consumer */ RdKafka::Consumer *consumer = RdKafka::Consumer::create(conf, errstr); if (!consumer) - Test::Fail("Failed to create Consumer: " + errstr); + Test::Fail("Failed to create Consumer: " + errstr); RdKafka::Topic *rkt = RdKafka::Topic::create(consumer, topic, NULL, errstr); if (!rkt) - Test::Fail("Simple Topic failed: " + errstr); + Test::Fail("Simple Topic failed: " + errstr); /* Create the topic through a metadata request. */ Test::Say("Creating empty topic " + topic + "\n"); RdKafka::Metadata *md; - RdKafka::ErrorCode err = consumer->metadata(false, rkt, &md, - tmout_multip(10*1000)); + RdKafka::ErrorCode err = + consumer->metadata(false, rkt, &md, tmout_multip(10 * 1000)); if (err) - Test::Fail("Failed to create topic " + topic + ": " + RdKafka::err2str(err)); + Test::Fail("Failed to create topic " + topic + ": " + + RdKafka::err2str(err)); delete md; /* Start consumer */ err = consumer->start(rkt, partition, RdKafka::Topic::OFFSET_BEGINNING); if (err) - Test::Fail("Consume start() failed: " + RdKafka::err2str(err)); + Test::Fail("Consume start() failed: " + RdKafka::err2str(err)); /* Consume using legacy consumer, should give an EOF and nothing else. */ Test::Say("Simple Consumer: consuming\n"); - RdKafka::Message *msg = consumer->consume(rkt, partition, - tmout_multip(10 * 1000)); + RdKafka::Message *msg = + consumer->consume(rkt, partition, tmout_multip(10 * 1000)); if (msg->err() != RdKafka::ERR__PARTITION_EOF) - Test::Fail("Simple consume() expected EOF, got " + RdKafka::err2str(msg->err())); + Test::Fail("Simple consume() expected EOF, got " + + RdKafka::err2str(msg->err())); delete msg; /* Nothing else should come now, just a consume() timeout */ msg = consumer->consume(rkt, partition, 1 * 1000); if (msg->err() != RdKafka::ERR__TIMED_OUT) - Test::Fail("Simple consume() expected timeout, got " + RdKafka::err2str(msg->err())); + Test::Fail("Simple consume() expected timeout, got " + + RdKafka::err2str(msg->err())); delete msg; consumer->stop(rkt, partition); @@ -103,29 +106,32 @@ static void do_test_empty_topic_consumer () { Test::conf_set(conf, "enable.partition.eof", "true"); Test::conf_set(conf, "allow.auto.create.topics", "true"); - RdKafka::KafkaConsumer *kconsumer = RdKafka::KafkaConsumer::create(conf, errstr); + RdKafka::KafkaConsumer *kconsumer = + RdKafka::KafkaConsumer::create(conf, errstr); if (!kconsumer) - Test::Fail("Failed to create KafkaConsumer: " + errstr); + Test::Fail("Failed to create KafkaConsumer: " + errstr); - std::vector part; + std::vector part; part.push_back(RdKafka::TopicPartition::create(topic, partition)); err = kconsumer->assign(part); if (err) - Test::Fail("assign() failed: " + RdKafka::err2str(err)); + Test::Fail("assign() failed: " + RdKafka::err2str(err)); RdKafka::TopicPartition::destroy(part); Test::Say("KafkaConsumer: consuming\n"); msg = kconsumer->consume(tmout_multip(5 * 1000)); if (msg->err() != RdKafka::ERR__PARTITION_EOF) - Test::Fail("KafkaConsumer consume() expected EOF, got " + RdKafka::err2str(msg->err())); + Test::Fail("KafkaConsumer consume() expected EOF, got " + + RdKafka::err2str(msg->err())); delete msg; /* Nothing else should come now, just a consume() timeout */ msg = kconsumer->consume(1 * 1000); if (msg->err() != RdKafka::ERR__TIMED_OUT) - Test::Fail("KafkaConsumer consume() expected timeout, got " + RdKafka::err2str(msg->err())); + Test::Fail("KafkaConsumer consume() expected timeout, got " + + RdKafka::err2str(msg->err())); delete msg; kconsumer->close(); @@ -135,8 +141,8 @@ static void do_test_empty_topic_consumer () { } extern "C" { - int main_0067_empty_topic (int argc, char **argv) { - do_test_empty_topic_consumer(); - return 0; - } +int main_0067_empty_topic(int argc, char **argv) { + do_test_empty_topic_consumer(); + return 0; +} } diff --git a/tests/0068-produce_timeout.c b/tests/0068-produce_timeout.c index 0fcc88abb9..a7ad37e164 100644 --- a/tests/0068-produce_timeout.c +++ b/tests/0068-produce_timeout.c @@ -41,15 +41,15 @@ * @brief Sockem connect, called from **internal librdkafka thread** through * librdkafka's connect_cb */ -static int connect_cb (struct test *test, sockem_t *skm, const char *id) { +static int connect_cb(struct test *test, sockem_t *skm, const char *id) { /* Let delay be high to trigger the local timeout */ sockem_set(skm, "delay", 10000, NULL); return 0; } -static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { /* Ignore connectivity errors since we'll be bringing down * .. connectivity. * SASL auther will think a connection-down even in the auth @@ -63,19 +63,20 @@ static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, return 1; } -static int msg_dr_cnt = 0; +static int msg_dr_cnt = 0; static int msg_dr_fail_cnt = 0; -static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { msg_dr_cnt++; if (rkmessage->err != RD_KAFKA_RESP_ERR__MSG_TIMED_OUT) - TEST_FAIL_LATER("Expected message to fail with MSG_TIMED_OUT, " - "got: %s", - rd_kafka_err2str(rkmessage->err)); + TEST_FAIL_LATER( + "Expected message to fail with MSG_TIMED_OUT, " + "got: %s", + rd_kafka_err2str(rkmessage->err)); else { TEST_ASSERT_LATER(rd_kafka_message_status(rkmessage) == - RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED, + RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED, "Message should have status " "PossiblyPersisted (%d), not %d", RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED, @@ -86,7 +87,7 @@ static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, -int main_0068_produce_timeout (int argc, char **argv) { +int main_0068_produce_timeout(int argc, char **argv) { rd_kafka_t *rk; const char *topic = test_mk_topic_name("0068_produce_timeout", 1); uint64_t testid; @@ -101,19 +102,19 @@ int main_0068_produce_timeout (int argc, char **argv) { rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); test_socket_enable(conf); - test_curr->connect_cb = connect_cb; + test_curr->connect_cb = connect_cb; test_curr->is_fatal_cb = is_fatal_cb; - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = test_create_producer_topic(rk, topic, - "message.timeout.ms", "2000", NULL); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_producer_topic(rk, topic, "message.timeout.ms", + "2000", NULL); TEST_SAY("Auto-creating topic %s\n", topic); test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000)); TEST_SAY("Producing %d messages that should timeout\n", msgcnt); - test_produce_msgs_nowait(rk, rkt, testid, 0, 0, msgcnt, - NULL, 0, 0, &msgcounter); + test_produce_msgs_nowait(rk, rkt, testid, 0, 0, msgcnt, NULL, 0, 0, + &msgcounter); TEST_SAY("Flushing..\n"); @@ -122,10 +123,10 @@ int main_0068_produce_timeout (int argc, char **argv) { TEST_SAY("%d/%d delivery reports, where of %d with proper error\n", msg_dr_cnt, msgcnt, msg_dr_fail_cnt); - TEST_ASSERT(msg_dr_cnt == msgcnt, - "expected %d, got %d", msgcnt, msg_dr_cnt); - TEST_ASSERT(msg_dr_fail_cnt == msgcnt, - "expected %d, got %d", msgcnt, msg_dr_fail_cnt); + TEST_ASSERT(msg_dr_cnt == msgcnt, "expected %d, got %d", msgcnt, + msg_dr_cnt); + TEST_ASSERT(msg_dr_fail_cnt == msgcnt, "expected %d, got %d", msgcnt, + msg_dr_fail_cnt); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); diff --git a/tests/0069-consumer_add_parts.c b/tests/0069-consumer_add_parts.c index a57176df17..933e53775b 100644 --- a/tests/0069-consumer_add_parts.c +++ b/tests/0069-consumer_add_parts.c @@ -41,8 +41,10 @@ static rd_kafka_t *c1, *c2; static rd_kafka_resp_err_t state1, state2; -static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, void *opaque) { +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { rd_kafka_resp_err_t *statep = NULL; if (rk == c1) @@ -52,7 +54,8 @@ static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, else TEST_FAIL("Invalid rk %p", rk); - TEST_SAY("Rebalance for %s: %s:\n", rd_kafka_name(rk), rd_kafka_err2str(err)); + TEST_SAY("Rebalance for %s: %s:\n", rd_kafka_name(rk), + rd_kafka_err2str(err)); test_print_partition_list(parts); if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) @@ -64,7 +67,7 @@ static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, } -int main_0069_consumer_add_parts (int argc, char **argv) { +int main_0069_consumer_add_parts(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 1); int64_t ts_start; int wait_sec; @@ -78,7 +81,7 @@ int main_0069_consumer_add_parts (int argc, char **argv) { TEST_SAY("Creating topic %s with 2 partitions\n", topic); test_create_topic(c1, topic, 2, 1); - test_wait_topic_exists(c1, topic, 10*1000); + test_wait_topic_exists(c1, topic, 10 * 1000); TEST_SAY("Subscribing\n"); test_consumer_subscribe(c1, topic); @@ -96,7 +99,9 @@ int main_0069_consumer_add_parts (int argc, char **argv) { TEST_SAY("Changing partition count for topic %s\n", topic); test_create_partitions(NULL, topic, 4); - TEST_SAY("Closing consumer 1 (to quickly trigger rebalance with new partitions)\n"); + TEST_SAY( + "Closing consumer 1 (to quickly trigger rebalance with new " + "partitions)\n"); test_consumer_close(c1); rd_kafka_destroy(c1); diff --git a/tests/0070-null_empty.cpp b/tests/0070-null_empty.cpp index 5e46eb9b06..fac48185c3 100644 --- a/tests/0070-null_empty.cpp +++ b/tests/0070-null_empty.cpp @@ -35,30 +35,35 @@ */ -static int check_equal (const char *exp, - const char *actual, size_t len, - std::string what) { +static int check_equal(const char *exp, + const char *actual, + size_t len, + std::string what) { size_t exp_len = exp ? strlen(exp) : 0; - int failures = 0; + int failures = 0; if (!actual && len != 0) { - Test::FailLater(tostr() << what << ": expected length 0 for Null, not " << len); + Test::FailLater(tostr() + << what << ": expected length 0 for Null, not " << len); failures++; } if (exp) { if (!actual) { - Test::FailLater(tostr() << what << ": expected \"" << exp << "\", not Null"); + Test::FailLater(tostr() + << what << ": expected \"" << exp << "\", not Null"); failures++; } else if (len != exp_len || strncmp(exp, actual, exp_len)) { - Test::FailLater(tostr() << what << ": expected \"" << exp << "\", not \"" << actual << "\" (" << len << " bytes)"); + Test::FailLater(tostr() << what << ": expected \"" << exp << "\", not \"" + << actual << "\" (" << len << " bytes)"); failures++; } } else { if (actual) { - Test::FailLater(tostr() << what << ": expected Null, not \"" << actual << "\" (" << len << " bytes)"); + Test::FailLater(tostr() << what << ": expected Null, not \"" << actual + << "\" (" << len << " bytes)"); failures++; } } @@ -70,11 +75,13 @@ static int check_equal (const char *exp, } -static void do_test_null_empty (bool api_version_request) { - std::string topic = Test::mk_topic_name("0070_null_empty", 1); +static void do_test_null_empty(bool api_version_request) { + std::string topic = Test::mk_topic_name("0070_null_empty", 1); const int partition = 0; - Test::Say(tostr() << "Testing with api.version.request=" << api_version_request << " on topic " << topic << " partition " << partition << "\n"); + Test::Say(tostr() << "Testing with api.version.request=" + << api_version_request << " on topic " << topic + << " partition " << partition << "\n"); RdKafka::Conf *conf; Test::conf_init(&conf, NULL, 0); @@ -89,37 +96,31 @@ static void do_test_null_empty (bool api_version_request) { Test::Fail("Failed to create Producer: " + errstr); delete conf; - const int msgcnt = 8; - static const char *msgs[msgcnt*2] = { - NULL, NULL, - "key2", NULL, - "key3", "val3", - NULL, "val4", - "", NULL, - NULL, "", - "", "" - }; + const int msgcnt = 8; + static const char *msgs[msgcnt * 2] = {NULL, NULL, "key2", NULL, "key3", + "val3", NULL, "val4", "", NULL, + NULL, "", "", ""}; RdKafka::ErrorCode err; - for (int i = 0 ; i < msgcnt * 2 ; i += 2) { - Test::Say(3, tostr() << "Produce message #" << (i/2) << - ": key=\"" << (msgs[i] ? msgs[i] : "Null") << - "\", value=\"" << (msgs[i+1] ? msgs[i+1] : "Null") << "\"\n"); + for (int i = 0; i < msgcnt * 2; i += 2) { + Test::Say(3, tostr() << "Produce message #" << (i / 2) << ": key=\"" + << (msgs[i] ? msgs[i] : "Null") << "\", value=\"" + << (msgs[i + 1] ? msgs[i + 1] : "Null") << "\"\n"); err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, /* Value */ - (void *)msgs[i+1], msgs[i+1] ? strlen(msgs[i+1]) : 0, + (void *)msgs[i + 1], msgs[i + 1] ? strlen(msgs[i + 1]) : 0, /* Key */ - (void *)msgs[i], msgs[i] ? strlen(msgs[i]) : 0, - 0, NULL); + (void *)msgs[i], msgs[i] ? strlen(msgs[i]) : 0, 0, NULL); if (err != RdKafka::ERR_NO_ERROR) Test::Fail("Produce failed: " + RdKafka::err2str(err)); } - if (p->flush(tmout_multip(3*5000)) != 0) + if (p->flush(tmout_multip(3 * 5000)) != 0) Test::Fail("Not all messages flushed"); - Test::Say(tostr() << "Produced " << msgcnt << " messages to " << topic << "\n"); + Test::Say(tostr() << "Produced " << msgcnt << " messages to " << topic + << "\n"); delete p; @@ -141,9 +142,9 @@ static void do_test_null_empty (bool api_version_request) { delete conf; /* Assign the partition */ - std::vector parts; - parts.push_back(RdKafka::TopicPartition::create(topic, partition, - RdKafka::Topic::OFFSET_BEGINNING)); + std::vector parts; + parts.push_back(RdKafka::TopicPartition::create( + topic, partition, RdKafka::Topic::OFFSET_BEGINNING)); err = c->assign(parts); if (err != RdKafka::ERR_NO_ERROR) Test::Fail("assign() failed: " + RdKafka::err2str(err)); @@ -151,26 +152,33 @@ static void do_test_null_empty (bool api_version_request) { /* Start consuming */ int failures = 0; - for (int i = 0 ; i < msgcnt * 2 ; i += 2) { + for (int i = 0; i < msgcnt * 2; i += 2) { RdKafka::Message *msg = c->consume(tmout_multip(5000)); if (msg->err()) - Test::Fail(tostr() << "consume() failed at message " << (i/2) << ": " << - msg->errstr()); + Test::Fail(tostr() << "consume() failed at message " << (i / 2) << ": " + << msg->errstr()); /* verify key */ - failures += check_equal(msgs[i], msg->key() ? msg->key()->c_str() : NULL, msg->key_len(), - tostr() << "message #" << (i/2) << " (offset " << msg->offset() << ") key"); + failures += check_equal(msgs[i], msg->key() ? msg->key()->c_str() : NULL, + msg->key_len(), + tostr() << "message #" << (i / 2) << " (offset " + << msg->offset() << ") key"); /* verify key_pointer() API as too */ - failures += check_equal(msgs[i], (const char *)msg->key_pointer(), msg->key_len(), - tostr() << "message #" << (i/2) << " (offset " << msg->offset() << ") key"); + failures += + check_equal(msgs[i], (const char *)msg->key_pointer(), msg->key_len(), + tostr() << "message #" << (i / 2) << " (offset " + << msg->offset() << ") key"); /* verify value */ - failures += check_equal(msgs[i+1], (const char *)msg->payload(), msg->len(), - tostr() << "message #" << (i/2) << " (offset " << msg->offset() << ") value"); + failures += + check_equal(msgs[i + 1], (const char *)msg->payload(), msg->len(), + tostr() << "message #" << (i / 2) << " (offset " + << msg->offset() << ") value"); delete msg; } - Test::Say(tostr() << "Done consuming, closing. " << failures << " test failures\n"); + Test::Say(tostr() << "Done consuming, closing. " << failures + << " test failures\n"); if (failures) Test::Fail(tostr() << "See " << failures << " previous test failure(s)"); @@ -180,10 +188,10 @@ static void do_test_null_empty (bool api_version_request) { extern "C" { - int main_0070_null_empty (int argc, char **argv) { - if (test_broker_version >= TEST_BRKVER(0,10,0,0)) - do_test_null_empty(true); - do_test_null_empty(false); - return 0; - } +int main_0070_null_empty(int argc, char **argv) { + if (test_broker_version >= TEST_BRKVER(0, 10, 0, 0)) + do_test_null_empty(true); + do_test_null_empty(false); + return 0; +} } diff --git a/tests/0072-headers_ut.c b/tests/0072-headers_ut.c index fc3d0894ae..0576d611ae 100644 --- a/tests/0072-headers_ut.c +++ b/tests/0072-headers_ut.c @@ -45,8 +45,9 @@ struct expect { /** * @brief returns the message id */ -static int expect_check (const char *what, const struct expect *expected, - const rd_kafka_message_t *rkmessage) { +static int expect_check(const char *what, + const struct expect *expected, + const rd_kafka_message_t *rkmessage) { const struct expect *exp; rd_kafka_resp_err_t err; size_t idx = 0; @@ -57,7 +58,7 @@ static int expect_check (const char *what, const struct expect *expected, int msgid; if (rkmessage->len != sizeof(msgid)) - TEST_FAIL("%s: expected message len %"PRIusz" == sizeof(int)", + TEST_FAIL("%s: expected message len %" PRIusz " == sizeof(int)", what, rkmessage->len); memcpy(&msgid, rkmessage->payload, rkmessage->len); @@ -75,20 +76,20 @@ static int expect_check (const char *what, const struct expect *expected, /* msgid should always be first and has a variable value so hard to * match with the expect struct. */ - for (idx = 0, exp = expected ; - !rd_kafka_header_get_all(hdrs, idx, &name, - (const void **)&value, &size) ; + for (idx = 0, exp = expected; !rd_kafka_header_get_all( + hdrs, idx, &name, (const void **)&value, &size); idx++, exp++) { - TEST_SAYL(3, "%s: Msg #%d: " - "Header #%"PRIusz": %s='%s' (expecting %s='%s')\n", + TEST_SAYL(3, + "%s: Msg #%d: " + "Header #%" PRIusz ": %s='%s' (expecting %s='%s')\n", what, msgid, idx, name, value ? value : "(NULL)", exp->name, exp->value ? exp->value : "(NULL)"); if (strcmp(name, exp->name)) - TEST_FAIL("%s: Expected header %s at idx #%"PRIusz + TEST_FAIL("%s: Expected header %s at idx #%" PRIusz ", not %s", - what, exp->name, idx-1, name); + what, exp->name, idx - 1, name); if (!strcmp(name, "msgid")) { int vid; @@ -96,10 +97,11 @@ static int expect_check (const char *what, const struct expect *expected, /* Special handling: compare msgid header value * to message body, should be identical */ if (size != rkmessage->len || size != sizeof(int)) - TEST_FAIL("%s: " - "Expected msgid/int-sized payload " - "%"PRIusz", got %"PRIusz, - what, size, rkmessage->len); + TEST_FAIL( + "%s: " + "Expected msgid/int-sized payload " + "%" PRIusz ", got %" PRIusz, + what, size, rkmessage->len); /* Copy to avoid unaligned access (by cast) */ memcpy(&vid, value, size); @@ -109,8 +111,8 @@ static int expect_check (const char *what, const struct expect *expected, what, vid, msgid); if (exp_msgid != vid) - TEST_FAIL("%s: Expected msgid %d, not %d", - what, exp_msgid, vid); + TEST_FAIL("%s: Expected msgid %d, not %d", what, + exp_msgid, vid); continue; } @@ -127,8 +129,9 @@ static int expect_check (const char *what, const struct expect *expected, what, exp->name); TEST_ASSERT(size == strlen(exp->value), - "%s: Expected size %"PRIusz" for %s, " - "not %"PRIusz, + "%s: Expected size %" PRIusz + " for %s, " + "not %" PRIusz, what, strlen(exp->value), exp->name, size); TEST_ASSERT(value[size] == '\0', @@ -155,25 +158,16 @@ static int expect_check (const char *what, const struct expect *expected, /** * @brief Delivery report callback */ -static void dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { const struct expect expected[] = { - { "msgid", NULL }, /* special handling */ - { "static", "hey" }, - { "null", NULL }, - { "empty", "" }, - { "send1", "1" }, - { "multi", "multi5" }, - { NULL } - }; + {"msgid", NULL}, /* special handling */ + {"static", "hey"}, {"null", NULL}, {"empty", ""}, + {"send1", "1"}, {"multi", "multi5"}, {NULL}}; const struct expect replace_expected[] = { - { "msgid", NULL }, - { "new", "one" }, - { "this is the", NULL }, - { "replaced headers\"", "" }, - { "new", "right?" }, - { NULL } - }; + {"msgid", NULL}, {"new", "one"}, + {"this is the", NULL}, {"replaced headers\"", ""}, + {"new", "right?"}, {NULL}}; const struct expect *exp; rd_kafka_headers_t *new_hdrs; int msgid; @@ -187,11 +181,11 @@ static void dr_msg_cb (rd_kafka_t *rk, /* Replace entire headers list */ if (msgid > 0) { new_hdrs = rd_kafka_headers_new(1); - rd_kafka_header_add(new_hdrs, "msgid", -1, - &msgid, sizeof(msgid)); - for (exp = &replace_expected[1] ; exp->name ; exp++) - rd_kafka_header_add(new_hdrs, - exp->name, -1, exp->value, -1); + rd_kafka_header_add(new_hdrs, "msgid", -1, &msgid, + sizeof(msgid)); + for (exp = &replace_expected[1]; exp->name; exp++) + rd_kafka_header_add(new_hdrs, exp->name, -1, exp->value, + -1); rd_kafka_message_set_headers((rd_kafka_message_t *)rkmessage, new_hdrs); @@ -200,37 +194,41 @@ static void dr_msg_cb (rd_kafka_t *rk, } exp_msgid++; - } -static void expect_iter (const char *what, - const rd_kafka_headers_t *hdrs, const char *name, - const char **expected, size_t cnt) { +static void expect_iter(const char *what, + const rd_kafka_headers_t *hdrs, + const char *name, + const char **expected, + size_t cnt) { size_t idx; rd_kafka_resp_err_t err; const void *value; size_t size; - for (idx = 0 ; - !(err = rd_kafka_header_get(hdrs, idx, name, &value, &size)) ;\ + for (idx = 0; + !(err = rd_kafka_header_get(hdrs, idx, name, &value, &size)); idx++) { TEST_ASSERT(idx < cnt, "%s: too many headers matching '%s', " - "expected %"PRIusz, + "expected %" PRIusz, what, name, cnt); - TEST_SAYL(3, "%s: get(%"PRIusz", '%s') " + TEST_SAYL(3, + "%s: get(%" PRIusz + ", '%s') " "expecting '%s' =? '%s'\n", what, idx, name, expected[idx], (const char *)value); - TEST_ASSERT(!strcmp((const char *)value, expected[idx]), - "%s: get(%"PRIusz", '%s') expected '%s', not '%s'", - what, idx, name, expected[idx], - (const char *)value); + TEST_ASSERT( + !strcmp((const char *)value, expected[idx]), + "%s: get(%" PRIusz ", '%s') expected '%s', not '%s'", what, + idx, name, expected[idx], (const char *)value); } TEST_ASSERT(idx == cnt, - "%s: expected %"PRIusz" headers matching '%s', not %"PRIusz, + "%s: expected %" PRIusz + " headers matching '%s', not %" PRIusz, what, cnt, name, idx); } @@ -239,28 +237,21 @@ static void expect_iter (const char *what, /** * @brief First on_send() interceptor */ -static rd_kafka_resp_err_t on_send1 (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_send1(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { const struct expect expected[] = { - { "msgid", NULL }, /* special handling */ - { "static", "hey" }, - { "multi", "multi1" }, - { "multi", "multi2" }, - { "multi", "multi3" }, - { "null", NULL }, - { "empty", "" }, - { NULL } - }; + {"msgid", NULL}, /* special handling */ + {"static", "hey"}, + {"multi", "multi1"}, + {"multi", "multi2"}, + {"multi", "multi3"}, + {"null", NULL}, + {"empty", ""}, + {NULL}}; const char *expect_iter_multi[4] = { - "multi1", - "multi2", - "multi3", - "multi4" /* added below */ - }; - const char *expect_iter_static[1] = { - "hey" + "multi1", "multi2", "multi3", "multi4" /* added below */ }; + const char *expect_iter_static[1] = {"hey"}; rd_kafka_headers_t *hdrs; size_t header_cnt; rd_kafka_resp_err_t err; @@ -274,14 +265,14 @@ static rd_kafka_resp_err_t on_send1 (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR_NO_ERROR; header_cnt = rd_kafka_header_cnt(hdrs); - TEST_ASSERT(header_cnt == 7, - "Expected 7 length got %"PRIusz"", header_cnt); + TEST_ASSERT(header_cnt == 7, "Expected 7 length got %" PRIusz "", + header_cnt); rd_kafka_header_add(hdrs, "multi", -1, "multi4", -1); header_cnt = rd_kafka_header_cnt(hdrs); - TEST_ASSERT(header_cnt == 8, - "Expected 8 length got %"PRIusz"", header_cnt); + TEST_ASSERT(header_cnt == 8, "Expected 8 length got %" PRIusz "", + header_cnt); /* test iter() */ expect_iter(__FUNCTION__, hdrs, "multi", expect_iter_multi, 4); @@ -291,28 +282,27 @@ static rd_kafka_resp_err_t on_send1 (rd_kafka_t *rk, rd_kafka_header_add(hdrs, "send1", -1, "1", -1); header_cnt = rd_kafka_header_cnt(hdrs); - TEST_ASSERT(header_cnt == 9, - "Expected 9 length got %"PRIusz"", header_cnt); + TEST_ASSERT(header_cnt == 9, "Expected 9 length got %" PRIusz "", + header_cnt); rd_kafka_header_remove(hdrs, "multi"); header_cnt = rd_kafka_header_cnt(hdrs); - TEST_ASSERT(header_cnt == 5, - "Expected 5 length got %"PRIusz"", header_cnt); + TEST_ASSERT(header_cnt == 5, "Expected 5 length got %" PRIusz "", + header_cnt); rd_kafka_header_add(hdrs, "multi", -1, "multi5", -1); header_cnt = rd_kafka_header_cnt(hdrs); - TEST_ASSERT(header_cnt == 6, - "Expected 6 length got %"PRIusz"", header_cnt); + TEST_ASSERT(header_cnt == 6, "Expected 6 length got %" PRIusz "", + header_cnt); /* test get_last() */ err = rd_kafka_header_get_last(hdrs, "multi", &value, &size); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); TEST_ASSERT(size == strlen("multi5") && - !strcmp((const char *)value, "multi5"), - "expected 'multi5', not '%s'", - (const char *)value); + !strcmp((const char *)value, "multi5"), + "expected 'multi5', not '%s'", (const char *)value); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -321,18 +311,12 @@ static rd_kafka_resp_err_t on_send1 (rd_kafka_t *rk, /** * @brief Second on_send() interceptor */ -static rd_kafka_resp_err_t on_send2 (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_send2(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { const struct expect expected[] = { - { "msgid", NULL }, /* special handling */ - { "static", "hey" }, - { "null", NULL }, - { "empty", "" }, - { "send1", "1" }, - { "multi", "multi5" }, - { NULL } - }; + {"msgid", NULL}, /* special handling */ + {"static", "hey"}, {"null", NULL}, {"empty", ""}, + {"send1", "1"}, {"multi", "multi5"}, {NULL}}; expect_check(__FUNCTION__, expected, rkmessage); @@ -343,16 +327,18 @@ static rd_kafka_resp_err_t on_send2 (rd_kafka_t *rk, * @brief on_new() interceptor to set up message interceptors * from rd_kafka_new(). */ -static rd_kafka_resp_err_t on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send1, NULL); rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send2, NULL); return RD_KAFKA_RESP_ERR_NO_ERROR; } -int main_0072_headers_ut (int argc, char **argv) { +int main_0072_headers_ut(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 0); rd_kafka_t *rk; rd_kafka_conf_t *conf; @@ -370,25 +356,22 @@ int main_0072_headers_ut (int argc, char **argv) { rk = test_create_handle(RD_KAFKA_PRODUCER, conf); /* First message is without headers (negative testing) */ - i = 0; + i = 0; err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_VALUE(&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_END); - TEST_ASSERT(!err, - "producev() failed: %s", rd_kafka_err2str(err)); + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_VALUE(&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev() failed: %s", rd_kafka_err2str(err)); exp_msgid++; - for (i = 1 ; i < msgcnt ; i++, exp_msgid++) { + for (i = 1; i < msgcnt; i++, exp_msgid++) { /* Use headers list on one message */ if (i == 3) { rd_kafka_headers_t *hdrs = rd_kafka_headers_new(4); header_cnt = rd_kafka_header_cnt(hdrs); TEST_ASSERT(header_cnt == 0, - "Expected 0 length got %"PRIusz"", header_cnt); + "Expected 0 length got %" PRIusz "", + header_cnt); rd_kafka_headers_t *copied; @@ -396,7 +379,8 @@ int main_0072_headers_ut (int argc, char **argv) { rd_kafka_header_add(hdrs, "static", -1, "hey", -1); rd_kafka_header_add(hdrs, "multi", -1, "multi1", -1); rd_kafka_header_add(hdrs, "multi", -1, "multi2", 6); - rd_kafka_header_add(hdrs, "multi", -1, "multi3", strlen("multi3")); + rd_kafka_header_add(hdrs, "multi", -1, "multi3", + strlen("multi3")); rd_kafka_header_add(hdrs, "null", -1, NULL, 0); /* Make a copy of the headers to verify copy() */ @@ -404,7 +388,8 @@ int main_0072_headers_ut (int argc, char **argv) { header_cnt = rd_kafka_header_cnt(hdrs); TEST_ASSERT(header_cnt == 6, - "Expected 6 length got %"PRIusz"", header_cnt); + "Expected 6 length got %" PRIusz "", + header_cnt); rd_kafka_headers_destroy(hdrs); @@ -413,14 +398,12 @@ int main_0072_headers_ut (int argc, char **argv) { /* Try unsupported _V_HEADER() and _V_HEADERS() mix, * must fail with CONFLICT */ err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_VALUE(&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_HEADER("will_be_removed", "yep", -1), - RD_KAFKA_V_HEADERS(copied), - RD_KAFKA_V_HEADER("empty", "", 0), - RD_KAFKA_V_END); + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_VALUE(&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_HEADER("will_be_removed", "yep", -1), + RD_KAFKA_V_HEADERS(copied), + RD_KAFKA_V_HEADER("empty", "", 0), RD_KAFKA_V_END); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__CONFLICT, "producev(): expected CONFLICT, got %s", rd_kafka_err2str(err)); @@ -428,31 +411,28 @@ int main_0072_headers_ut (int argc, char **argv) { /* Proper call using only _V_HEADERS() */ rd_kafka_header_add(copied, "empty", -1, "", -1); err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_VALUE(&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_HEADERS(copied), - RD_KAFKA_V_END); + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_VALUE(&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_HEADERS(copied), RD_KAFKA_V_END); TEST_ASSERT(!err, "producev() failed: %s", rd_kafka_err2str(err)); } else { err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_VALUE(&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)), - RD_KAFKA_V_HEADER("static", "hey", -1), - RD_KAFKA_V_HEADER("multi", "multi1", -1), - RD_KAFKA_V_HEADER("multi", "multi2", 6), - RD_KAFKA_V_HEADER("multi", "multi3", strlen("multi3")), - RD_KAFKA_V_HEADER("null", NULL, 0), - RD_KAFKA_V_HEADER("empty", "", 0), - RD_KAFKA_V_END); - TEST_ASSERT(!err, - "producev() failed: %s", rd_kafka_err2str(err)); + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_VALUE(&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)), + RD_KAFKA_V_HEADER("static", "hey", -1), + RD_KAFKA_V_HEADER("multi", "multi1", -1), + RD_KAFKA_V_HEADER("multi", "multi2", 6), + RD_KAFKA_V_HEADER("multi", "multi3", + strlen("multi3")), + RD_KAFKA_V_HEADER("null", NULL, 0), + RD_KAFKA_V_HEADER("empty", "", 0), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev() failed: %s", + rd_kafka_err2str(err)); } } diff --git a/tests/0073-headers.c b/tests/0073-headers.c index fb7644c437..e7e5c4074d 100644 --- a/tests/0073-headers.c +++ b/tests/0073-headers.c @@ -44,8 +44,10 @@ struct expect { -static void expect_check (const char *what, const struct expect *expected, - rd_kafka_message_t *rkmessage, int is_const) { +static void expect_check(const char *what, + const struct expect *expected, + rd_kafka_message_t *rkmessage, + int is_const) { const struct expect *exp; rd_kafka_resp_err_t err; size_t idx = 0; @@ -56,7 +58,7 @@ static void expect_check (const char *what, const struct expect *expected, int msgid; if (rkmessage->len != sizeof(msgid)) - TEST_FAIL("%s: expected message len %"PRIusz" == sizeof(int)", + TEST_FAIL("%s: expected message len %" PRIusz " == sizeof(int)", what, rkmessage->len); memcpy(&msgid, rkmessage->payload, rkmessage->len); @@ -64,10 +66,11 @@ static void expect_check (const char *what, const struct expect *expected, if ((err = rd_kafka_message_headers(rkmessage, &hdrs))) { if (msgid == 0) { rd_kafka_resp_err_t err2; - TEST_SAYL(3, "%s: Msg #%d: no headers, good\n", - what, msgid); + TEST_SAYL(3, "%s: Msg #%d: no headers, good\n", what, + msgid); - err2 = rd_kafka_message_detach_headers(rkmessage, &hdrs); + err2 = + rd_kafka_message_detach_headers(rkmessage, &hdrs); TEST_ASSERT(err == err2, "expected detach_headers() error %s " "to match headers() error %s", @@ -86,22 +89,22 @@ static void expect_check (const char *what, const struct expect *expected, test_headers_dump(what, 3, hdrs); - for (idx = 0, exp = expected ; - !rd_kafka_header_get_all(hdrs, idx, &name, - (const void **)&value, &size) ; + for (idx = 0, exp = expected; !rd_kafka_header_get_all( + hdrs, idx, &name, (const void **)&value, &size); idx++, exp++) { - TEST_SAYL(3, "%s: Msg #%d: " - "Header #%"PRIusz": %s='%s' (expecting %s='%s')\n", + TEST_SAYL(3, + "%s: Msg #%d: " + "Header #%" PRIusz ": %s='%s' (expecting %s='%s')\n", what, msgid, idx, name, value ? value : "(NULL)", exp->name, exp->value ? exp->value : "(NULL)"); if (strcmp(name, exp->name)) - TEST_FAIL("%s: Msg #%d: " - "Expected header %s at idx #%"PRIusz - ", not '%s' (%"PRIusz")", - what, msgid, exp->name, idx, name, - strlen(name)); + TEST_FAIL( + "%s: Msg #%d: " + "Expected header %s at idx #%" PRIusz + ", not '%s' (%" PRIusz ")", + what, msgid, exp->name, idx, name, strlen(name)); if (!strcmp(name, "msgid")) { int vid; @@ -109,10 +112,11 @@ static void expect_check (const char *what, const struct expect *expected, /* Special handling: compare msgid header value * to message body, should be identical */ if (size != rkmessage->len || size != sizeof(int)) - TEST_FAIL("%s: " - "Expected msgid/int-sized payload " - "%"PRIusz", got %"PRIusz, - what, size, rkmessage->len); + TEST_FAIL( + "%s: " + "Expected msgid/int-sized payload " + "%" PRIusz ", got %" PRIusz, + what, size, rkmessage->len); /* Copy to avoid unaligned access (by cast) */ memcpy(&vid, value, size); @@ -122,8 +126,8 @@ static void expect_check (const char *what, const struct expect *expected, what, vid, msgid); if (exp_msgid != vid) - TEST_FAIL("%s: Expected msgid %d, not %d", - what, exp_msgid, vid); + TEST_FAIL("%s: Expected msgid %d, not %d", what, + exp_msgid, vid); continue; } @@ -140,8 +144,9 @@ static void expect_check (const char *what, const struct expect *expected, what, exp->name); TEST_ASSERT(size == strlen(exp->value), - "%s: Expected size %"PRIusz" for %s, " - "not %"PRIusz, + "%s: Expected size %" PRIusz + " for %s, " + "not %" PRIusz, what, strlen(exp->value), exp->name, size); TEST_ASSERT(value[size] == '\0', @@ -166,8 +171,7 @@ static void expect_check (const char *what, const struct expect *expected, rd_kafka_headers_t *dhdrs; err = rd_kafka_message_detach_headers(rkmessage, &dhdrs); - TEST_ASSERT(!err, - "detach_headers() should not fail, got %s", + TEST_ASSERT(!err, "detach_headers() should not fail, got %s", rd_kafka_err2str(err)); TEST_ASSERT(hdrs == dhdrs); @@ -177,48 +181,40 @@ static void expect_check (const char *what, const struct expect *expected, TEST_ASSERT(hdrs != dhdrs); rd_kafka_headers_destroy(dhdrs); - expect_check("post_detach_headers", expected, - rkmessage, is_const); - } + expect_check("post_detach_headers", expected, rkmessage, + is_const); + } } /** * @brief Final (as in no more header modifications) message check. */ -static void msg_final_check (const char *what, - rd_kafka_message_t *rkmessage, int is_const) { +static void +msg_final_check(const char *what, rd_kafka_message_t *rkmessage, int is_const) { const struct expect expected[] = { - { "msgid", NULL }, /* special handling */ - { "static", "hey" }, - { "null", NULL }, - { "empty", "" }, - { "send1", "1" }, - { "multi", "multi5" }, - { NULL } - }; + {"msgid", NULL}, /* special handling */ + {"static", "hey"}, {"null", NULL}, {"empty", ""}, + {"send1", "1"}, {"multi", "multi5"}, {NULL}}; expect_check(what, expected, rkmessage, is_const); exp_msgid++; - - } /** * @brief Handle consumed message, must be identical to dr_msg_cb */ -static void handle_consumed_msg (rd_kafka_message_t *rkmessage) { +static void handle_consumed_msg(rd_kafka_message_t *rkmessage) { msg_final_check(__FUNCTION__, rkmessage, 0); } /** * @brief Delivery report callback */ -static void dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { - TEST_ASSERT(!rkmessage->err, - "Message delivery failed: %s", +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { + TEST_ASSERT(!rkmessage->err, "Message delivery failed: %s", rd_kafka_err2str(rkmessage->err)); msg_final_check(__FUNCTION__, (rd_kafka_message_t *)rkmessage, 1); @@ -228,19 +224,17 @@ static void dr_msg_cb (rd_kafka_t *rk, /** * @brief First on_send() interceptor */ -static rd_kafka_resp_err_t on_send1 (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_send1(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { const struct expect expected[] = { - { "msgid", NULL }, /* special handling */ - { "static", "hey" }, - { "multi", "multi1" }, - { "multi", "multi2" }, - { "multi", "multi3" }, - { "null", NULL }, - { "empty", "" }, - { NULL } - }; + {"msgid", NULL}, /* special handling */ + {"static", "hey"}, + {"multi", "multi1"}, + {"multi", "multi2"}, + {"multi", "multi3"}, + {"null", NULL}, + {"empty", ""}, + {NULL}}; rd_kafka_headers_t *hdrs; rd_kafka_resp_err_t err; @@ -262,18 +256,12 @@ static rd_kafka_resp_err_t on_send1 (rd_kafka_t *rk, /** * @brief Second on_send() interceptor */ -static rd_kafka_resp_err_t on_send2 (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_send2(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { const struct expect expected[] = { - { "msgid", NULL }, /* special handling */ - { "static", "hey" }, - { "null", NULL }, - { "empty", "" }, - { "send1", "1" }, - { "multi", "multi5" }, - { NULL } - }; + {"msgid", NULL}, /* special handling */ + {"static", "hey"}, {"null", NULL}, {"empty", ""}, + {"send1", "1"}, {"multi", "multi5"}, {NULL}}; expect_check(__FUNCTION__, expected, rkmessage, 0); @@ -284,16 +272,18 @@ static rd_kafka_resp_err_t on_send2 (rd_kafka_t *rk, * @brief on_new() interceptor to set up message interceptors * from rd_kafka_new(). */ -static rd_kafka_resp_err_t on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send1, NULL); rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send2, NULL); return RD_KAFKA_RESP_ERR_NO_ERROR; } -static void do_produce (const char *topic, int msgcnt) { +static void do_produce(const char *topic, int msgcnt) { rd_kafka_t *rk; rd_kafka_conf_t *conf; int i; @@ -308,35 +298,28 @@ static void do_produce (const char *topic, int msgcnt) { rk = test_create_handle(RD_KAFKA_PRODUCER, conf); /* First message is without headers (negative testing) */ - i = 0; + i = 0; err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE(&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_END); - TEST_ASSERT(!err, - "producev() failed: %s", rd_kafka_err2str(err)); + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE(&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev() failed: %s", rd_kafka_err2str(err)); exp_msgid++; - for (i = 1 ; i < msgcnt ; i++, exp_msgid++) { + for (i = 1; i < msgcnt; i++, exp_msgid++) { err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE(&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)), - RD_KAFKA_V_HEADER("static", "hey", -1), - RD_KAFKA_V_HEADER("multi", "multi1", -1), - RD_KAFKA_V_HEADER("multi", "multi2", 6), - RD_KAFKA_V_HEADER("multi", "multi3", strlen("multi3")), - RD_KAFKA_V_HEADER("null", NULL, 0), - RD_KAFKA_V_HEADER("empty", "", 0), - RD_KAFKA_V_END); - TEST_ASSERT(!err, - "producev() failed: %s", rd_kafka_err2str(err)); + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE(&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)), + RD_KAFKA_V_HEADER("static", "hey", -1), + RD_KAFKA_V_HEADER("multi", "multi1", -1), + RD_KAFKA_V_HEADER("multi", "multi2", 6), + RD_KAFKA_V_HEADER("multi", "multi3", strlen("multi3")), + RD_KAFKA_V_HEADER("null", NULL, 0), + RD_KAFKA_V_HEADER("empty", "", 0), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev() failed: %s", + rd_kafka_err2str(err)); } /* Reset expected message id for dr */ @@ -348,7 +331,7 @@ static void do_produce (const char *topic, int msgcnt) { rd_kafka_destroy(rk); } -static void do_consume (const char *topic, int msgcnt) { +static void do_consume(const char *topic, int msgcnt) { rd_kafka_t *rk; rd_kafka_topic_partition_list_t *parts; @@ -356,7 +339,7 @@ static void do_consume (const char *topic, int msgcnt) { parts = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(parts, topic, 0)->offset = - RD_KAFKA_OFFSET_BEGINNING; + RD_KAFKA_OFFSET_BEGINNING; test_consumer_assign("assign", rk, parts); @@ -372,10 +355,10 @@ static void do_consume (const char *topic, int msgcnt) { continue; if (rkm->err) - TEST_FAIL("consume error while expecting msgid %d/%d: " - "%s", - exp_msgid, msgcnt, - rd_kafka_message_errstr(rkm)); + TEST_FAIL( + "consume error while expecting msgid %d/%d: " + "%s", + exp_msgid, msgcnt, rd_kafka_message_errstr(rkm)); handle_consumed_msg(rkm); @@ -387,9 +370,9 @@ static void do_consume (const char *topic, int msgcnt) { } -int main_0073_headers (int argc, char **argv) { +int main_0073_headers(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 1); - const int msgcnt = 10; + const int msgcnt = 10; do_produce(topic, msgcnt); do_consume(topic, msgcnt); diff --git a/tests/0074-producev.c b/tests/0074-producev.c index 09a64282a3..544a847348 100644 --- a/tests/0074-producev.c +++ b/tests/0074-producev.c @@ -37,7 +37,7 @@ * @brief Verify #1478: The internal shared rkt reference was not destroyed * when producev() failed. */ -static void do_test_srkt_leak (void) { +static void do_test_srkt_leak(void) { rd_kafka_conf_t *conf; char buf[2000]; rd_kafka_t *rk; @@ -50,28 +50,27 @@ static void do_test_srkt_leak (void) { rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("test"), + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("test"), RD_KAFKA_V_VALUE(buf, sizeof(buf)), RD_KAFKA_V_END); TEST_ASSERT(err == RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, "expected MSG_SIZE_TOO_LARGE, not %s", rd_kafka_err2str(err)); - vus[0].vtype = RD_KAFKA_VTYPE_TOPIC; - vus[0].u.cstr = "test"; - vus[1].vtype = RD_KAFKA_VTYPE_VALUE; - vus[1].u.mem.ptr = buf; - vus[1].u.mem.size = sizeof(buf); - vus[2].vtype = RD_KAFKA_VTYPE_HEADER; + vus[0].vtype = RD_KAFKA_VTYPE_TOPIC; + vus[0].u.cstr = "test"; + vus[1].vtype = RD_KAFKA_VTYPE_VALUE; + vus[1].u.mem.ptr = buf; + vus[1].u.mem.size = sizeof(buf); + vus[2].vtype = RD_KAFKA_VTYPE_HEADER; vus[2].u.header.name = "testheader"; - vus[2].u.header.val = "test value"; + vus[2].u.header.val = "test value"; vus[2].u.header.size = -1; error = rd_kafka_produceva(rk, vus, 3); TEST_ASSERT(error, "expected failure"); TEST_ASSERT(rd_kafka_error_code(error) == - RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, + RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, "expected MSG_SIZE_TOO_LARGE, not %s", rd_kafka_error_string(error)); TEST_SAY("produceva() error (expected): %s\n", @@ -82,7 +81,7 @@ static void do_test_srkt_leak (void) { } -int main_0074_producev (int argc, char **argv) { +int main_0074_producev(int argc, char **argv) { do_test_srkt_leak(); return 0; } diff --git a/tests/0075-retry.c b/tests/0075-retry.c index 8606de438b..7e1e4f0f58 100644 --- a/tests/0075-retry.c +++ b/tests/0075-retry.c @@ -42,24 +42,24 @@ * reject all the rest (connection refused) to make sure we're only * playing with one single broker for this test. */ static struct { - mtx_t lock; - cnd_t cnd; + mtx_t lock; + cnd_t cnd; sockem_t *skm; - thrd_t thrd; + thrd_t thrd; struct { - int64_t ts_at; /* to ctrl thread: at this time, set delay */ - int delay; - int ack; /* from ctrl thread: new delay acked */ + int64_t ts_at; /* to ctrl thread: at this time, set delay */ + int delay; + int ack; /* from ctrl thread: new delay acked */ } cmd; struct { - int64_t ts_at; /* to ctrl thread: at this time, set delay */ - int delay; + int64_t ts_at; /* to ctrl thread: at this time, set delay */ + int delay; } next; - int term; + int term; } ctrl; -static int ctrl_thrd_main (void *arg) { +static int ctrl_thrd_main(void *arg) { mtx_lock(&ctrl.lock); @@ -71,21 +71,21 @@ static int ctrl_thrd_main (void *arg) { if (ctrl.cmd.ts_at) { ctrl.next.ts_at = ctrl.cmd.ts_at; ctrl.next.delay = ctrl.cmd.delay; - ctrl.cmd.ts_at = 0; - ctrl.cmd.ack = 1; - printf(_C_CYA "## %s: sockem: " + ctrl.cmd.ts_at = 0; + ctrl.cmd.ack = 1; + printf(_C_CYA + "## %s: sockem: " "receieved command to set delay " "to %d in %dms\n" _C_CLR, - __FILE__, - ctrl.next.delay, + __FILE__, ctrl.next.delay, (int)(ctrl.next.ts_at - test_clock()) / 1000); - } now = test_clock(); if (ctrl.next.ts_at && now > ctrl.next.ts_at) { assert(ctrl.skm); - printf(_C_CYA "## %s: " + printf(_C_CYA + "## %s: " "sockem: setting socket delay to %d\n" _C_CLR, __FILE__, ctrl.next.delay); sockem_set(ctrl.skm, "delay", ctrl.next.delay, NULL); @@ -103,7 +103,7 @@ static int ctrl_thrd_main (void *arg) { * @brief Sockem connect, called from **internal librdkafka thread** through * librdkafka's connect_cb */ -static int connect_cb (struct test *test, sockem_t *skm, const char *id) { +static int connect_cb(struct test *test, sockem_t *skm, const char *id) { mtx_lock(&ctrl.lock); if (ctrl.skm) { @@ -121,8 +121,8 @@ static int connect_cb (struct test *test, sockem_t *skm, const char *id) { return 0; } -static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { /* Ignore connectivity errors since we'll be bringing down * .. connectivity. * SASL auther will think a connection-down even in the auth @@ -139,13 +139,13 @@ static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, /** * @brief Set socket delay to kick in after \p after ms */ -static void set_delay (int after, int delay) { +static void set_delay(int after, int delay) { TEST_SAY("Set delay to %dms (after %dms)\n", delay, after); mtx_lock(&ctrl.lock); - ctrl.cmd.ts_at = test_clock() + (after*1000); + ctrl.cmd.ts_at = test_clock() + (after * 1000); ctrl.cmd.delay = delay; - ctrl.cmd.ack = 0; + ctrl.cmd.ack = 0; cnd_broadcast(&ctrl.cnd); /* Wait for ack from sockem thread */ @@ -160,7 +160,7 @@ static void set_delay (int after, int delay) { * @brief Test that Metadata requests are retried properly when * timing out due to high broker rtt. */ -static void do_test_low_socket_timeout (const char *topic) { +static void do_test_low_socket_timeout(const char *topic) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_topic_t *rkt; @@ -181,10 +181,10 @@ static void do_test_low_socket_timeout (const char *topic) { * the way of our test */ test_conf_set(conf, "api.version.request", "false"); test_socket_enable(conf); - test_curr->connect_cb = connect_cb; + test_curr->connect_cb = connect_cb; test_curr->is_fatal_cb = is_fatal_cb; - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = test_create_producer_topic(rk, topic, NULL); TEST_SAY("Waiting for sockem connect..\n"); @@ -193,8 +193,9 @@ static void do_test_low_socket_timeout (const char *topic) { cnd_wait(&ctrl.cnd, &ctrl.lock); mtx_unlock(&ctrl.lock); - TEST_SAY("Connected, fire off a undelayed metadata() to " - "make sure connection is up\n"); + TEST_SAY( + "Connected, fire off a undelayed metadata() to " + "make sure connection is up\n"); err = rd_kafka_metadata(rk, 0, rkt, &md, tmout_multip(2000)); TEST_ASSERT(!err, "metadata(undelayed) failed: %s", @@ -208,15 +209,19 @@ static void do_test_low_socket_timeout (const char *topic) { /* After two retries, remove the delay, the third retry * should kick in and work. */ - set_delay(((1000 /*socket.timeout.ms*/ + - 5000 /*retry.backoff.ms*/) * 2) - 2000, 0); - - TEST_SAY("Calling metadata() again which should succeed after " - "3 internal retries\n"); + set_delay( + ((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) - + 2000, + 0); + + TEST_SAY( + "Calling metadata() again which should succeed after " + "3 internal retries\n"); /* Metadata should be returned after the third retry */ - err = rd_kafka_metadata(rk, 0, rkt, &md, - ((1000 /*socket.timeout.ms*/ + - 5000 /*retry.backoff.ms*/) * 2) + 5000); + err = rd_kafka_metadata( + rk, 0, rkt, &md, + ((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) + + 5000); TEST_SAY("metadata() returned %s\n", rd_kafka_err2str(err)); TEST_ASSERT(!err, "metadata(undelayed) failed: %s", rd_kafka_err2str(err)); @@ -235,7 +240,7 @@ static void do_test_low_socket_timeout (const char *topic) { mtx_destroy(&ctrl.lock); } -int main_0075_retry (int argc, char **argv) { +int main_0075_retry(int argc, char **argv) { const char *topic = test_mk_topic_name("0075_retry", 1); do_test_low_socket_timeout(topic); diff --git a/tests/0076-produce_retry.c b/tests/0076-produce_retry.c index b6663c291d..16d6f602c6 100644 --- a/tests/0076-produce_retry.c +++ b/tests/0076-produce_retry.c @@ -32,8 +32,8 @@ #include #include -static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { /* Ignore connectivity errors since we'll be bringing down * .. connectivity. * SASL auther will think a connection-down even in the auth @@ -65,10 +65,10 @@ static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, * * @param should_fail If true, do negative testing which should fail. */ -static void do_test_produce_retries (const char *topic, - int idempotence, - int try_fail, - int should_fail) { +static void do_test_produce_retries(const char *topic, + int idempotence, + int try_fail, + int should_fail) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_topic_t *rkt; @@ -77,7 +77,8 @@ static void do_test_produce_retries (const char *topic, int msgcnt = 1; sockem_ctrl_t ctrl; - TEST_SAY(_C_BLU "Test produce retries " + TEST_SAY(_C_BLU + "Test produce retries " "(idempotence=%d,try_fail=%d,should_fail=%d)\n", idempotence, try_fail, should_fail); @@ -86,10 +87,10 @@ static void do_test_produce_retries (const char *topic, test_conf_init(&conf, NULL, 60); if (should_fail && - !strcmp(test_conf_get(conf, "enable.sparse.connections"), - "true")) { + !strcmp(test_conf_get(conf, "enable.sparse.connections"), "true")) { rd_kafka_conf_destroy(conf); - TEST_SAY(_C_YEL "Sparse connections enabled: " + TEST_SAY(_C_YEL + "Sparse connections enabled: " "skipping connection-timing related test\n"); return; } @@ -99,8 +100,9 @@ static void do_test_produce_retries (const char *topic, test_conf_set(conf, "socket.timeout.ms", "1000"); /* Avoid disconnects on request timeouts */ test_conf_set(conf, "socket.max.fails", "100"); - test_conf_set(conf, "enable.idempotence", idempotence?"true":"false"); - test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; + test_conf_set(conf, "enable.idempotence", + idempotence ? "true" : "false"); + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_PERSISTED; if (!try_fail) { test_conf_set(conf, "retries", "5"); @@ -112,8 +114,10 @@ static void do_test_produce_retries (const char *topic, else test_conf_set(conf, "retries", "0"); if (should_fail) { - test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; - test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; + test_curr->exp_dr_err = + RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; + test_curr->exp_dr_status = + RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; } } test_conf_set(conf, "retry.backoff.ms", "5000"); @@ -121,7 +125,7 @@ static void do_test_produce_retries (const char *topic, test_socket_enable(conf); test_curr->is_fatal_cb = is_fatal_cb; - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = test_create_producer_topic(rk, topic, NULL); /* Create the topic to make sure connections are up and ready. */ @@ -133,12 +137,14 @@ static void do_test_produce_retries (const char *topic, /* After two retries, remove the delay, the third retry * should kick in and work. */ - sockem_ctrl_set_delay(&ctrl, - ((1000 /*socket.timeout.ms*/ + - 5000 /*retry.backoff.ms*/) * 2) - 2000, 0); + sockem_ctrl_set_delay( + &ctrl, + ((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) - + 2000, + 0); - test_produce_msgs(rk, rkt, testid, RD_KAFKA_PARTITION_UA, - 0, msgcnt, NULL, 0); + test_produce_msgs(rk, rkt, testid, RD_KAFKA_PARTITION_UA, 0, msgcnt, + NULL, 0); rd_kafka_topic_destroy(rkt); @@ -151,7 +157,8 @@ static void do_test_produce_retries (const char *topic, sockem_ctrl_term(&ctrl); - TEST_SAY(_C_GRN "Test produce retries " + TEST_SAY(_C_GRN + "Test produce retries " "(idempotence=%d,try_fail=%d,should_fail=%d): PASS\n", idempotence, try_fail, should_fail); } @@ -159,7 +166,6 @@ static void do_test_produce_retries (const char *topic, - /** * @brief Simple on_request_sent interceptor that simply disconnects * the socket when first ProduceRequest is seen. @@ -168,15 +174,15 @@ static void do_test_produce_retries (const char *topic, */ static mtx_t produce_disconnect_lock; static int produce_disconnects = 0; -static rd_kafka_resp_err_t on_request_sent (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - void *ic_opaque) { +static rd_kafka_resp_err_t on_request_sent(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + void *ic_opaque) { /* Ignore if not a ProduceRequest */ if (ApiKey != 0) @@ -198,8 +204,9 @@ static rd_kafka_resp_err_t on_request_sent (rd_kafka_t *rk, * socket recv buffer to make sure librdkafka does not see * the response. */ while ((r = recv(sockfd, buf, sizeof(buf), 0)) > 0) - printf(_C_CYA "%s:%d: " - "purged %"PRIdsz" bytes from socket\n", + printf(_C_CYA + "%s:%d: " + "purged %" PRIdsz " bytes from socket\n", __FILE__, __LINE__, r); produce_disconnects = 1; } @@ -209,13 +216,13 @@ static rd_kafka_resp_err_t on_request_sent (rd_kafka_t *rk, } -static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, - const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { return rd_kafka_interceptor_add_on_request_sent( - rk, "disconnect_on_send", - on_request_sent, NULL); + rk, "disconnect_on_send", on_request_sent, NULL); } /** @@ -224,10 +231,10 @@ static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, * * @param should_fail If true, do negative testing which should fail. */ -static void do_test_produce_retries_disconnect (const char *topic, - int idempotence, - int try_fail, - int should_fail) { +static void do_test_produce_retries_disconnect(const char *topic, + int idempotence, + int try_fail, + int should_fail) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_topic_t *rkt; @@ -236,7 +243,8 @@ static void do_test_produce_retries_disconnect (const char *topic, int msgcnt = 1; int partition_cnt; - TEST_SAY(_C_BLU "Test produce retries by disconnect " + TEST_SAY(_C_BLU + "Test produce retries by disconnect " "(idempotence=%d,try_fail=%d,should_fail=%d)\n", idempotence, try_fail, should_fail); @@ -246,9 +254,11 @@ static void do_test_produce_retries_disconnect (const char *topic, test_conf_init(&conf, NULL, 60); rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); - test_conf_set(conf, "socket.timeout.ms", test_quick ? "3000":"10000"); - test_conf_set(conf, "message.timeout.ms", test_quick ? "9000":"30000"); - test_conf_set(conf, "enable.idempotence", idempotence?"true":"false"); + test_conf_set(conf, "socket.timeout.ms", test_quick ? "3000" : "10000"); + test_conf_set(conf, "message.timeout.ms", + test_quick ? "9000" : "30000"); + test_conf_set(conf, "enable.idempotence", + idempotence ? "true" : "false"); if (!try_fail) { test_conf_set(conf, "retries", "1"); } else { @@ -264,7 +274,7 @@ static void do_test_produce_retries_disconnect (const char *topic, rd_kafka_conf_interceptor_add_on_new(conf, "on_new_producer", on_new_producer, NULL); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = test_create_producer_topic(rk, topic, NULL); err = test_produce_sync(rk, rkt, testid, 0); @@ -284,8 +294,8 @@ static void do_test_produce_retries_disconnect (const char *topic, } mtx_lock(&produce_disconnect_lock); - TEST_ASSERT(produce_disconnects == 1, - "expected %d disconnects, not %d", 1, produce_disconnects); + TEST_ASSERT(produce_disconnects == 1, "expected %d disconnects, not %d", + 1, produce_disconnects); mtx_unlock(&produce_disconnect_lock); @@ -304,16 +314,17 @@ static void do_test_produce_retries_disconnect (const char *topic, * count (-1). */ should_fail ? -1 : msgcnt, NULL); - TEST_SAY(_C_GRN "Test produce retries by disconnect " + TEST_SAY(_C_GRN + "Test produce retries by disconnect " "(idempotence=%d,try_fail=%d,should_fail=%d): PASS\n", idempotence, try_fail, should_fail); } -int main_0076_produce_retry (int argc, char **argv) { +int main_0076_produce_retry(int argc, char **argv) { const char *topic = test_mk_topic_name("0076_produce_retry", 1); const rd_bool_t has_idempotence = - test_broker_version >= TEST_BRKVER(0,11,0,0); + test_broker_version >= TEST_BRKVER(0, 11, 0, 0); #if WITH_SOCKEM if (has_idempotence) { @@ -337,5 +348,3 @@ int main_0076_produce_retry (int argc, char **argv) { return 0; } - - diff --git a/tests/0077-compaction.c b/tests/0077-compaction.c index da4791c814..3f4bfe7718 100644 --- a/tests/0077-compaction.c +++ b/tests/0077-compaction.c @@ -48,17 +48,16 @@ * @brief Get low watermark in partition, we use this see if compaction * has kicked in. */ -static int64_t get_low_wmark (rd_kafka_t *rk, const char *topic, - int32_t partition) { +static int64_t +get_low_wmark(rd_kafka_t *rk, const char *topic, int32_t partition) { rd_kafka_resp_err_t err; int64_t low, high; - err = rd_kafka_query_watermark_offsets(rk, topic, partition, - &low, &high, - tmout_multip(10000)); + err = rd_kafka_query_watermark_offsets(rk, topic, partition, &low, + &high, tmout_multip(10000)); - TEST_ASSERT(!err, "query_warmark_offsets(%s, %d) failed: %s", - topic, (int)partition, rd_kafka_err2str(err)); + TEST_ASSERT(!err, "query_warmark_offsets(%s, %d) failed: %s", topic, + (int)partition, rd_kafka_err2str(err)); return low; } @@ -67,22 +66,25 @@ static int64_t get_low_wmark (rd_kafka_t *rk, const char *topic, /** * @brief Wait for compaction by checking for * partition low-watermark increasing */ -static void wait_compaction (rd_kafka_t *rk, - const char *topic, int32_t partition, - int64_t low_offset, - int timeout_ms) { - int64_t low = -1; +static void wait_compaction(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t low_offset, + int timeout_ms) { + int64_t low = -1; int64_t ts_start = test_clock(); - TEST_SAY("Waiting for compaction to kick in and increase the " - "Low watermark offset from %"PRId64" on %s [%"PRId32"]\n", - low_offset, topic, partition); + TEST_SAY( + "Waiting for compaction to kick in and increase the " + "Low watermark offset from %" PRId64 " on %s [%" PRId32 "]\n", + low_offset, topic, partition); while (1) { low = get_low_wmark(rk, topic, partition); - TEST_SAY("Low watermark offset for %s [%"PRId32"] is " - "%"PRId64" (want > %"PRId64")\n", + TEST_SAY("Low watermark offset for %s [%" PRId32 + "] is " + "%" PRId64 " (want > %" PRId64 ")\n", topic, partition, low, low_offset); if (low > low_offset) @@ -95,9 +97,11 @@ static void wait_compaction (rd_kafka_t *rk, } } -static void produce_compactable_msgs (const char *topic, int32_t partition, - uint64_t testid, - int msgcnt, size_t msgsize) { +static void produce_compactable_msgs(const char *topic, + int32_t partition, + uint64_t testid, + int msgcnt, + size_t msgsize) { rd_kafka_t *rk; rd_kafka_conf_t *conf; int i; @@ -113,8 +117,10 @@ static void produce_compactable_msgs (const char *topic, int32_t partition, val = calloc(1, msgsize); - TEST_SAY("Producing %d messages (total of %"PRIusz" bytes) of " - "compactable messages\n", msgcnt, (size_t)msgcnt*msgsize); + TEST_SAY("Producing %d messages (total of %" PRIusz + " bytes) of " + "compactable messages\n", + msgcnt, (size_t)msgcnt * msgsize); test_conf_init(&conf, NULL, 0); rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); @@ -124,11 +130,10 @@ static void produce_compactable_msgs (const char *topic, int32_t partition, rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - for (i = 0 ; i < msgcnt-1 ; i++) { - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), + for (i = 0; i < msgcnt - 1; i++) { + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_KEY(key, sizeof(key)-1), + RD_KAFKA_V_KEY(key, sizeof(key) - 1), RD_KAFKA_V_VALUE(val, msgsize), RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END); @@ -136,12 +141,10 @@ static void produce_compactable_msgs (const char *topic, int32_t partition, } /* Final message is the tombstone */ - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_KEY(key, sizeof(key)-1), - RD_KAFKA_V_OPAQUE(&msgcounter), - RD_KAFKA_V_END); + RD_KAFKA_V_KEY(key, sizeof(key) - 1), + RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END); TEST_ASSERT(!err, "producev(): %s", rd_kafka_err2str(err)); test_flush(rk, tmout_multip(10000)); @@ -154,37 +157,41 @@ static void produce_compactable_msgs (const char *topic, int32_t partition, -static void do_test_compaction (int msgs_per_key, const char *compression) { +static void do_test_compaction(int msgs_per_key, const char *compression) { const char *topic = test_mk_topic_name(__FILE__, 1); #define _KEY_CNT 4 - const char *keys[_KEY_CNT] = { "k1", "k2", "k3", NULL/*generate unique*/ }; - int msgcnt = msgs_per_key * _KEY_CNT; + const char *keys[_KEY_CNT] = {"k1", "k2", "k3", + NULL /*generate unique*/}; + int msgcnt = msgs_per_key * _KEY_CNT; rd_kafka_conf_t *conf; rd_kafka_t *rk; rd_kafka_topic_t *rkt; uint64_t testid; int32_t partition = 0; - int cnt = 0; + int cnt = 0; test_msgver_t mv; test_msgver_t mv_correct; - int msgcounter = 0; + int msgcounter = 0; const int fillcnt = 20; testid = test_id_generate(); - TEST_SAY(_C_MAG "Test compaction on topic %s with %s compression (%d messages)\n", - topic, compression ? compression : "no", msgcnt); - - test_kafka_topics("--create --topic \"%s\" " - "--partitions %d " - "--replication-factor 1 " - "--config cleanup.policy=compact " - "--config segment.ms=10000 " - "--config segment.bytes=10000 " - "--config min.cleanable.dirty.ratio=0.01 " - "--config delete.retention.ms=86400 " - "--config file.delete.delay.ms=10000", - topic, partition+1); + TEST_SAY( + _C_MAG + "Test compaction on topic %s with %s compression (%d messages)\n", + topic, compression ? compression : "no", msgcnt); + + test_kafka_topics( + "--create --topic \"%s\" " + "--partitions %d " + "--replication-factor 1 " + "--config cleanup.policy=compact " + "--config segment.ms=10000 " + "--config segment.bytes=10000 " + "--config min.cleanable.dirty.ratio=0.01 " + "--config delete.retention.ms=86400 " + "--config file.delete.delay.ms=10000", + topic, partition + 1); test_conf_init(&conf, NULL, 120); rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); @@ -194,7 +201,7 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { * to accumulate into a batch that will be rejected by the broker. */ test_conf_set(conf, "message.max.bytes", "6000"); test_conf_set(conf, "linger.ms", "10"); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, topic, NULL); /* The low watermark is not updated on message deletion(compaction) @@ -206,10 +213,10 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { test_msgver_init(&mv_correct, testid); TEST_SAY("Producing %d messages for %d keys\n", msgcnt, _KEY_CNT); - for (cnt = 0 ; cnt < msgcnt ; ) { + for (cnt = 0; cnt < msgcnt;) { int k; - for (k = 0 ; k < _KEY_CNT ; k++) { + for (k = 0; k < _KEY_CNT; k++) { rd_kafka_resp_err_t err; int is_last = cnt + _KEY_CNT >= msgcnt; /* Let keys[0] have some tombstones */ @@ -222,14 +229,14 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { size_t keysize; int64_t offset = fillcnt + cnt; - test_msg_fmt(rdk_msgid, sizeof(rdk_msgid), - testid, partition, cnt); + test_msg_fmt(rdk_msgid, sizeof(rdk_msgid), testid, + partition, cnt); if (is_tombstone) { - valp = NULL; + valp = NULL; valsize = 0; } else { - valp = rdk_msgid; + valp = rdk_msgid; valsize = strlen(valp); } @@ -247,32 +254,29 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { "Add to correct msgvec: " "msgid: %d: %s is_last=%d, " "is_tomb=%d\n", - cnt, (const char *)key, - is_last, is_tombstone); - test_msgver_add_msg00(__FUNCTION__, __LINE__, - rd_kafka_name(rk), - &mv_correct, testid, - topic, partition, - offset, -1, -1, 0, cnt); + cnt, (const char *)key, is_last, + is_tombstone); + test_msgver_add_msg00( + __FUNCTION__, __LINE__, rd_kafka_name(rk), + &mv_correct, testid, topic, partition, + offset, -1, -1, 0, cnt); } msgcounter++; err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_KEY(key, keysize), - RD_KAFKA_V_VALUE(valp, valsize), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_HEADER("rdk_msgid", rdk_msgid, -1), - /* msgcounter as msg_opaque is used - * by test delivery report callback to - * count number of messages. */ - RD_KAFKA_V_OPAQUE(&msgcounter), - RD_KAFKA_V_END); - TEST_ASSERT(!err, "producev(#%d) failed: %s", - cnt, rd_kafka_err2str(err)); + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_KEY(key, keysize), + RD_KAFKA_V_VALUE(valp, valsize), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_HEADER("rdk_msgid", rdk_msgid, -1), + /* msgcounter as msg_opaque is used + * by test delivery report callback to + * count number of messages. */ + RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev(#%d) failed: %s", cnt, + rd_kafka_err2str(err)); cnt++; } @@ -296,7 +300,7 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { * is not updated on compaction if the first segment is not deleted. * But it serves as a pause to let compaction kick in * which is triggered by the dummy produce above. */ - wait_compaction(rk, topic, partition, 0, 20*1000); + wait_compaction(rk, topic, partition, 0, 20 * 1000); TEST_SAY(_C_YEL "Verify messages after compaction\n"); /* After compaction we expect the following messages: @@ -305,7 +309,8 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { mv.msgid_hdr = "rdk_msgid"; test_consume_msgs_easy_mv(NULL, topic, -1, testid, 1, -1, NULL, &mv); test_msgver_verify_compare("post-compaction", &mv, &mv_correct, - TEST_MSGVER_BY_MSGID|TEST_MSGVER_BY_OFFSET); + TEST_MSGVER_BY_MSGID | + TEST_MSGVER_BY_OFFSET); test_msgver_clear(&mv); test_msgver_clear(&mv_correct); @@ -317,7 +322,7 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { compression ? compression : "no"); } -int main_0077_compaction (int argc, char **argv) { +int main_0077_compaction(int argc, char **argv) { if (!test_can_create_topics(1)) return 0; @@ -325,8 +330,9 @@ int main_0077_compaction (int argc, char **argv) { do_test_compaction(10, NULL); if (test_quick) { - TEST_SAY("Skipping further compaction tests " - "due to quick mode\n"); + TEST_SAY( + "Skipping further compaction tests " + "due to quick mode\n"); return 0; } diff --git a/tests/0078-c_from_cpp.cpp b/tests/0078-c_from_cpp.cpp index 58d7c662a6..41d6886cb9 100644 --- a/tests/0078-c_from_cpp.cpp +++ b/tests/0078-c_from_cpp.cpp @@ -38,57 +38,59 @@ extern "C" { - int main_0078_c_from_cpp (int argc, char **argv) { - RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); +int main_0078_c_from_cpp(int argc, char **argv) { + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - std::string errstr; + std::string errstr; - if (conf->set("client.id", "myclient", errstr)) - Test::Fail("conf->set() failed: " + errstr); + if (conf->set("client.id", "myclient", errstr)) + Test::Fail("conf->set() failed: " + errstr); - RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); - if (!p) - Test::Fail("Failed to create Producer: " + errstr); + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); - delete conf; + delete conf; - /* - * Acquire rd_kafka_t and compare its name to the configured client.id - */ - rd_kafka_t *rk = p->c_ptr(); - if (!rk) - Test::Fail("Failed to acquire c_ptr"); + /* + * Acquire rd_kafka_t and compare its name to the configured client.id + */ + rd_kafka_t *rk = p->c_ptr(); + if (!rk) + Test::Fail("Failed to acquire c_ptr"); - std::string name = p->name(); - std::string c_name = rd_kafka_name(rk); + std::string name = p->name(); + std::string c_name = rd_kafka_name(rk); - Test::Say("Compare C name " + c_name + " to C++ name " + name + "\n"); - if (c_name != name) - Test::Fail("Expected C client name " + c_name + " to match C++ " + name); + Test::Say("Compare C name " + c_name + " to C++ name " + name + "\n"); + if (c_name != name) + Test::Fail("Expected C client name " + c_name + " to match C++ " + name); - /* - * Create topic object, acquire rd_kafka_topic_t and compare - * its topic name. - */ + /* + * Create topic object, acquire rd_kafka_topic_t and compare + * its topic name. + */ - RdKafka::Topic *topic = RdKafka::Topic::create(p, "mytopic", NULL, errstr); - if (!topic) - Test::Fail("Failed to create Topic: " + errstr); + RdKafka::Topic *topic = RdKafka::Topic::create(p, "mytopic", NULL, errstr); + if (!topic) + Test::Fail("Failed to create Topic: " + errstr); - rd_kafka_topic_t *rkt = topic->c_ptr(); - if (!rkt) - Test::Fail("Failed to acquire topic c_ptr"); + rd_kafka_topic_t *rkt = topic->c_ptr(); + if (!rkt) + Test::Fail("Failed to acquire topic c_ptr"); - std::string topicname = topic->name(); - std::string c_topicname = rd_kafka_topic_name(rkt); + std::string topicname = topic->name(); + std::string c_topicname = rd_kafka_topic_name(rkt); - Test::Say("Compare C topic " + c_topicname + " to C++ topic " + topicname + "\n"); - if (c_topicname != topicname) - Test::Fail("Expected C topic " + c_topicname + " to match C++ topic " + topicname); + Test::Say("Compare C topic " + c_topicname + " to C++ topic " + topicname + + "\n"); + if (c_topicname != topicname) + Test::Fail("Expected C topic " + c_topicname + " to match C++ topic " + + topicname); - delete topic; - delete p; + delete topic; + delete p; - return 0; - } + return 0; +} } diff --git a/tests/0079-fork.c b/tests/0079-fork.c index c1b6880927..506dd62a31 100644 --- a/tests/0079-fork.c +++ b/tests/0079-fork.c @@ -41,10 +41,12 @@ * in the child process, but it should not crash on destruction: #1674 */ -int main_0079_fork (int argc, char **argv) { +int main_0079_fork(int argc, char **argv) { #if __SANITIZE_ADDRESS__ - TEST_SKIP("AddressSanitizer is enabled: this test leaks memory (due to fork())\n"); + TEST_SKIP( + "AddressSanitizer is enabled: this test leaks memory (due to " + "fork())\n"); return 0; #endif #ifdef _WIN32 @@ -57,10 +59,8 @@ int main_0079_fork (int argc, char **argv) { rk = test_create_producer(); - rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("atopic"), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END); + rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("atopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); pid = fork(); TEST_ASSERT(pid != 1, "fork() failed: %s", strerror(errno)); @@ -70,10 +70,8 @@ int main_0079_fork (int argc, char **argv) { /* This call will enqueue the message on a queue * which is not served by any thread, but it should not crash */ - rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("atopic"), - RD_KAFKA_V_VALUE("hello", 5), - RD_KAFKA_V_END); + rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("atopic"), + RD_KAFKA_V_VALUE("hello", 5), RD_KAFKA_V_END); /* Don't crash on us */ rd_kafka_destroy(rk); @@ -85,8 +83,7 @@ int main_0079_fork (int argc, char **argv) { if (waitpid(pid, &status, 0) == -1) TEST_FAIL("waitpid(%d) failed: %s", (int)pid, strerror(errno)); - if (!WIFEXITED(status) || - WEXITSTATUS(status) != 0) + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) TEST_FAIL("child exited with status %d", WEXITSTATUS(status)); rd_kafka_destroy(rk); diff --git a/tests/0080-admin_ut.c b/tests/0080-admin_ut.c index 09b0301977..6f80154c07 100644 --- a/tests/0080-admin_ut.c +++ b/tests/0080-admin_ut.c @@ -33,7 +33,7 @@ * @brief Admin API local dry-run unit-tests. */ -#define MY_SOCKET_TIMEOUT_MS 100 +#define MY_SOCKET_TIMEOUT_MS 100 #define MY_SOCKET_TIMEOUT_MS_STR "100" @@ -46,10 +46,11 @@ static rd_kafka_event_t *last_event = NULL; * @brief The background event callback is called automatically * by librdkafka from a background thread. */ -static void background_event_cb (rd_kafka_t *rk, rd_kafka_event_t *rkev, - void *opaque) { +static void +background_event_cb(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque) { mtx_lock(&last_event_lock); - TEST_ASSERT(!last_event, "Multiple events seen in background_event_cb " + TEST_ASSERT(!last_event, + "Multiple events seen in background_event_cb " "(existing %s, new %s)", rd_kafka_event_name(last_event), rd_kafka_event_name(rkev)); last_event = rkev; @@ -58,7 +59,7 @@ static void background_event_cb (rd_kafka_t *rk, rd_kafka_event_t *rkev, rd_sleep(1); } -static rd_kafka_event_t *wait_background_event_cb (void) { +static rd_kafka_event_t *wait_background_event_cb(void) { rd_kafka_event_t *rkev; mtx_lock(&last_event_lock); while (!(rkev = last_event)) @@ -76,15 +77,16 @@ static rd_kafka_event_t *wait_background_event_cb (void) { * * */ -static void do_test_CreateTopics (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int with_background_event_cb, - int with_options) { +static void do_test_CreateTopics(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_background_event_cb, + int with_options) { rd_kafka_queue_t *q; #define MY_NEW_TOPICS_CNT 6 rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT]; rd_kafka_AdminOptions_t *options = NULL; - int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; int i; char errstr[512]; const char *errstr2; @@ -105,18 +107,16 @@ static void do_test_CreateTopics (const char *what, * Construct NewTopic array with different properties for * different partitions. */ - for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) { + for (i = 0; i < MY_NEW_TOPICS_CNT; i++) { const char *topic = test_mk_topic_name(__FUNCTION__, 1); - int num_parts = i * 51 + 1; - int num_replicas = jitter(1, MY_NEW_TOPICS_CNT-1); - int set_config = (i & 2); - int set_replicas = !(i % 1); + int num_parts = i * 51 + 1; + int num_replicas = jitter(1, MY_NEW_TOPICS_CNT - 1); + int set_config = (i & 2); + int set_replicas = !(i % 1); - new_topics[i] = rd_kafka_NewTopic_new(topic, - num_parts, - set_replicas ? -1 : - num_replicas, - NULL, 0); + new_topics[i] = rd_kafka_NewTopic_new( + topic, num_parts, set_replicas ? -1 : num_replicas, NULL, + 0); if (set_config) { /* @@ -128,9 +128,8 @@ static void do_test_CreateTopics (const char *what, "to verify that"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); - err = rd_kafka_NewTopic_set_config(new_topics[i], - "try.a.null.value", - NULL); + err = rd_kafka_NewTopic_set_config( + new_topics[i], "try.a.null.value", NULL); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); err = rd_kafka_NewTopic_set_config(new_topics[i], @@ -144,59 +143,60 @@ static void do_test_CreateTopics (const char *what, int32_t replicas[MY_NEW_TOPICS_CNT]; int j; - for (j = 0 ; j < num_replicas ; j++) + for (j = 0; j < num_replicas; j++) replicas[j] = j; /* * Set valid replica assignments */ - for (p = 0 ; p < num_parts ; p++) { + for (p = 0; p < num_parts; p++) { /* Try adding an existing out of order, * should fail */ if (p == 1) { - err = rd_kafka_NewTopic_set_replica_assignment( - new_topics[i], p+1, - replicas, num_replicas, - errstr, sizeof(errstr)); - TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, - "%s", rd_kafka_err2str(err)); + err = + rd_kafka_NewTopic_set_replica_assignment( + new_topics[i], p + 1, replicas, + num_replicas, errstr, + sizeof(errstr)); + TEST_ASSERT( + err == + RD_KAFKA_RESP_ERR__INVALID_ARG, + "%s", rd_kafka_err2str(err)); } err = rd_kafka_NewTopic_set_replica_assignment( - new_topics[i], p, - replicas, num_replicas, - errstr, sizeof(errstr)); + new_topics[i], p, replicas, num_replicas, + errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); } /* Try to add an existing partition, should fail */ err = rd_kafka_NewTopic_set_replica_assignment( - new_topics[i], 0, - replicas, num_replicas, NULL, 0); - TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, - "%s", rd_kafka_err2str(err)); + new_topics[i], 0, replicas, num_replicas, NULL, 0); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, "%s", + rd_kafka_err2str(err)); } else { int32_t dummy_replicas[1] = {1}; /* Test invalid partition */ err = rd_kafka_NewTopic_set_replica_assignment( - new_topics[i], num_parts+1, dummy_replicas, 1, - errstr, sizeof(errstr)); + new_topics[i], num_parts + 1, dummy_replicas, 1, + errstr, sizeof(errstr)); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, "%s: %s", rd_kafka_err2str(err), - err == RD_KAFKA_RESP_ERR_NO_ERROR ? - "" : errstr); + err == RD_KAFKA_RESP_ERR_NO_ERROR ? "" + : errstr); /* Setting replicas with with default replicas != -1 * is an error. */ err = rd_kafka_NewTopic_set_replica_assignment( - new_topics[i], 0, dummy_replicas, 1, - errstr, sizeof(errstr)); + new_topics[i], 0, dummy_replicas, 1, errstr, + sizeof(errstr)); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, "%s: %s", rd_kafka_err2str(err), - err == RD_KAFKA_RESP_ERR_NO_ERROR ? - "" : errstr); + err == RD_KAFKA_RESP_ERR_NO_ERROR ? "" + : errstr); } } @@ -204,8 +204,8 @@ static void do_test_CreateTopics (const char *what, options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; - err = rd_kafka_AdminOptions_set_request_timeout( - options, exp_timeout, errstr, sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); my_opaque = (void *)123; @@ -214,8 +214,7 @@ static void do_test_CreateTopics (const char *what, TIMING_START(&timing, "CreateTopics"); TEST_SAY("Call CreateTopics, timeout is %dms\n", exp_timeout); - rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, - options, q); + rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); if (with_background_event_cb) { @@ -229,11 +228,9 @@ static void do_test_CreateTopics (const char *what, rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); } - TIMING_ASSERT_LATER(&timing, exp_timeout-100, exp_timeout+100); - TEST_ASSERT(rkev != NULL, "expected result in %dms", - exp_timeout); - TEST_SAY("CreateTopics: got %s in %.3fs\n", - rd_kafka_event_name(rkev), + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("CreateTopics: got %s in %.3fs\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); /* Convert event to proper result */ @@ -246,19 +243,18 @@ static void do_test_CreateTopics (const char *what, my_opaque, opaque); /* Expecting error */ - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, "expected CreateTopics to return error %s, not %s (%s)", rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), - rd_kafka_err2str(err), - err ? errstr2 : "n/a"); + rd_kafka_err2str(err), err ? errstr2 : "n/a"); /* Attempt to extract topics anyway, should return NULL. */ restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt); TEST_ASSERT(!restopics && restopic_cnt == 0, - "expected no result_topics, got %p cnt %"PRIusz, - restopics, restopic_cnt); + "expected no result_topics, got %p cnt %" PRIusz, restopics, + restopic_cnt); rd_kafka_event_destroy(rkev); @@ -275,23 +271,21 @@ static void do_test_CreateTopics (const char *what, - - - /** * @brief DeleteTopics tests * * * */ -static void do_test_DeleteTopics (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int with_options) { +static void do_test_DeleteTopics(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options) { rd_kafka_queue_t *q; #define MY_DEL_TOPICS_CNT 4 rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT]; rd_kafka_AdminOptions_t *options = NULL; - int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; int i; char errstr[512]; const char *errstr2; @@ -308,16 +302,17 @@ static void do_test_DeleteTopics (const char *what, q = useq ? useq : rd_kafka_queue_new(rk); - for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) - del_topics[i] = rd_kafka_DeleteTopic_new(test_mk_topic_name(__FUNCTION__, 1)); + for (i = 0; i < MY_DEL_TOPICS_CNT; i++) + del_topics[i] = rd_kafka_DeleteTopic_new( + test_mk_topic_name(__FUNCTION__, 1)); if (with_options) { options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_DELETETOPICS); + rk, RD_KAFKA_ADMIN_OP_DELETETOPICS); exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; - err = rd_kafka_AdminOptions_set_request_timeout( - options, exp_timeout, errstr, sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); if (useq) { @@ -328,17 +323,16 @@ static void do_test_DeleteTopics (const char *what, TIMING_START(&timing, "DeleteTopics"); TEST_SAY("Call DeleteTopics, timeout is %dms\n", exp_timeout); - rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, - options, q); + rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); /* Poll result queue */ TIMING_START(&timing, "DeleteTopics.queue_poll"); rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); - TIMING_ASSERT_LATER(&timing, exp_timeout-100, exp_timeout+100); + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); - TEST_SAY("DeleteTopics: got %s in %.3fs\n", - rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + TEST_SAY("DeleteTopics: got %s in %.3fs\n", rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); /* Convert event to proper result */ res = rd_kafka_event_DeleteTopics_result(rkev); @@ -350,19 +344,18 @@ static void do_test_DeleteTopics (const char *what, my_opaque, opaque); /* Expecting error */ - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, "expected DeleteTopics to return error %s, not %s (%s)", rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), - rd_kafka_err2str(err), - err ? errstr2 : "n/a"); + rd_kafka_err2str(err), err ? errstr2 : "n/a"); /* Attempt to extract topics anyway, should return NULL. */ restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt); TEST_ASSERT(!restopics && restopic_cnt == 0, - "expected no result_topics, got %p cnt %"PRIusz, - restopics, restopic_cnt); + "expected no result_topics, got %p cnt %" PRIusz, restopics, + restopic_cnt); rd_kafka_event_destroy(rkev); @@ -384,16 +377,17 @@ static void do_test_DeleteTopics (const char *what, * * */ -static void do_test_DeleteGroups (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int with_options, - rd_bool_t destroy) { +static void do_test_DeleteGroups(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options, + rd_bool_t destroy) { rd_kafka_queue_t *q; #define MY_DEL_GROUPS_CNT 4 char *group_names[MY_DEL_GROUPS_CNT]; rd_kafka_DeleteGroup_t *del_groups[MY_DEL_GROUPS_CNT]; rd_kafka_AdminOptions_t *options = NULL; - int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; int i; char errstr[512]; const char *errstr2; @@ -410,18 +404,18 @@ static void do_test_DeleteGroups (const char *what, q = useq ? useq : rd_kafka_queue_new(rk); - for (i = 0 ; i < MY_DEL_GROUPS_CNT ; i++) { + for (i = 0; i < MY_DEL_GROUPS_CNT; i++) { group_names[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); - del_groups[i] = rd_kafka_DeleteGroup_new(group_names[i]); + del_groups[i] = rd_kafka_DeleteGroup_new(group_names[i]); } if (with_options) { options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_DELETEGROUPS); + rk, RD_KAFKA_ADMIN_OP_DELETEGROUPS); exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; - err = rd_kafka_AdminOptions_set_request_timeout( - options, exp_timeout, errstr, sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); if (useq) { @@ -432,8 +426,7 @@ static void do_test_DeleteGroups (const char *what, TIMING_START(&timing, "DeleteGroups"); TEST_SAY("Call DeleteGroups, timeout is %dms\n", exp_timeout); - rd_kafka_DeleteGroups(rk, del_groups, MY_DEL_GROUPS_CNT, - options, q); + rd_kafka_DeleteGroups(rk, del_groups, MY_DEL_GROUPS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); if (destroy) @@ -442,10 +435,10 @@ static void do_test_DeleteGroups (const char *what, /* Poll result queue */ TIMING_START(&timing, "DeleteGroups.queue_poll"); rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); - TIMING_ASSERT_LATER(&timing, exp_timeout-100, exp_timeout+100); + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); - TEST_SAY("DeleteGroups: got %s in %.3fs\n", - rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + TEST_SAY("DeleteGroups: got %s in %.3fs\n", rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); /* Convert event to proper result */ res = rd_kafka_event_DeleteGroups_result(rkev); @@ -457,18 +450,17 @@ static void do_test_DeleteGroups (const char *what, my_opaque, opaque); /* Expecting no error (errors will be per-group) */ - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, "expected DeleteGroups to return error %s, not %s (%s)", rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR), - rd_kafka_err2str(err), - err ? errstr2 : "n/a"); + rd_kafka_err2str(err), err ? errstr2 : "n/a"); /* Extract groups, should return MY_DEL_GROUPS_CNT groups. */ resgroups = rd_kafka_DeleteGroups_result_groups(res, &resgroup_cnt); TEST_ASSERT(resgroups && resgroup_cnt == MY_DEL_GROUPS_CNT, - "expected %d result_groups, got %p cnt %"PRIusz, + "expected %d result_groups, got %p cnt %" PRIusz, MY_DEL_GROUPS_CNT, resgroups, resgroup_cnt); /* The returned groups should be in the original order, and @@ -480,17 +472,16 @@ static void do_test_DeleteGroups (const char *what, group_names[i], i, rd_kafka_group_result_name(resgroups[i])); TEST_ASSERT(rd_kafka_error_code(rd_kafka_group_result_error( - resgroups[i])) == - RD_KAFKA_RESP_ERR__TIMED_OUT, + resgroups[i])) == RD_KAFKA_RESP_ERR__TIMED_OUT, "expected group '%s' to have timed out, got %s", group_names[i], rd_kafka_error_string( - rd_kafka_group_result_error(resgroups[i]))); + rd_kafka_group_result_error(resgroups[i]))); } rd_kafka_event_destroy(rkev); - destroy: +destroy: for (i = 0; i < MY_DEL_GROUPS_CNT; i++) { rd_kafka_DeleteGroup_destroy(del_groups[i]); rd_free(group_names[i]); @@ -506,12 +497,14 @@ static void do_test_DeleteGroups (const char *what, SUB_TEST_QUICK(); } -static void do_test_DeleteRecords (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int with_options, rd_bool_t destroy) { +static void do_test_DeleteRecords(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options, + rd_bool_t destroy) { rd_kafka_queue_t *q; #define MY_DEL_RECORDS_CNT 4 - rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_AdminOptions_t *options = NULL; rd_kafka_topic_partition_list_t *offsets = NULL; rd_kafka_DeleteRecords_t *del_records; const rd_kafka_DeleteRecords_result_t *res; @@ -529,18 +522,18 @@ static void do_test_DeleteRecords (const char *what, q = useq ? useq : rd_kafka_queue_new(rk); - for (i = 0 ; i < MY_DEL_RECORDS_CNT ; i++) { + for (i = 0; i < MY_DEL_RECORDS_CNT; i++) { topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); } if (with_options) { options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_DELETERECORDS); + rk, RD_KAFKA_ADMIN_OP_DELETERECORDS); exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; err = rd_kafka_AdminOptions_set_request_timeout( - options, exp_timeout, errstr, sizeof(errstr)); + options, exp_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); if (useq) { @@ -552,8 +545,8 @@ static void do_test_DeleteRecords (const char *what, offsets = rd_kafka_topic_partition_list_new(MY_DEL_RECORDS_CNT); for (i = 0; i < MY_DEL_RECORDS_CNT; i++) - rd_kafka_topic_partition_list_add(offsets,topics[i], i)-> - offset = RD_KAFKA_OFFSET_END; + rd_kafka_topic_partition_list_add(offsets, topics[i], i) + ->offset = RD_KAFKA_OFFSET_END; del_records = rd_kafka_DeleteRecords_new(offsets); rd_kafka_topic_partition_list_destroy(offsets); @@ -571,10 +564,10 @@ static void do_test_DeleteRecords (const char *what, /* Poll result queue */ TIMING_START(&timing, "DeleteRecords.queue_poll"); rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); - TIMING_ASSERT(&timing, exp_timeout-100, exp_timeout+100); + TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100); TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); - TEST_SAY("DeleteRecords: got %s in %.3fs\n", - rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + TEST_SAY("DeleteRecords: got %s in %.3fs\n", rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); /* Convert event to proper result */ res = rd_kafka_event_DeleteRecords_result(rkev); @@ -591,7 +584,7 @@ static void do_test_DeleteRecords (const char *what, rd_kafka_event_destroy(rkev); - destroy: +destroy: if (options) rd_kafka_AdminOptions_destroy(options); @@ -599,7 +592,7 @@ static void do_test_DeleteRecords (const char *what, if (!useq) rd_kafka_queue_destroy(q); - for (i = 0 ; i < MY_DEL_RECORDS_CNT ; i++) + for (i = 0; i < MY_DEL_RECORDS_CNT; i++) rd_free(topics[i]); #undef MY_DEL_RECORDS_CNT @@ -608,10 +601,10 @@ static void do_test_DeleteRecords (const char *what, } -static void do_test_DeleteConsumerGroupOffsets (const char *what, - rd_kafka_t *rk, - rd_kafka_queue_t *useq, - int with_options) { +static void do_test_DeleteConsumerGroupOffsets(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options) { rd_kafka_queue_t *q; #define MY_DEL_CGRPOFFS_CNT 1 rd_kafka_AdminOptions_t *options = NULL; @@ -630,25 +623,25 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, q = useq ? useq : rd_kafka_queue_new(rk); - for (i = 0 ; i < MY_DEL_CGRPOFFS_CNT ; i++) { + for (i = 0; i < MY_DEL_CGRPOFFS_CNT; i++) { rd_kafka_topic_partition_list_t *partitions = - rd_kafka_topic_partition_list_new(3); + rd_kafka_topic_partition_list_new(3); rd_kafka_topic_partition_list_add(partitions, "topic1", 9); rd_kafka_topic_partition_list_add(partitions, "topic3", 15); rd_kafka_topic_partition_list_add(partitions, "topic1", 1); cgoffsets[i] = rd_kafka_DeleteConsumerGroupOffsets_new( - "mygroup", partitions); + "mygroup", partitions); rd_kafka_topic_partition_list_destroy(partitions); } if (with_options) { options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS); + rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS); exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; err = rd_kafka_AdminOptions_set_request_timeout( - options, exp_timeout, errstr, sizeof(errstr)); + options, exp_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); if (useq) { @@ -660,15 +653,14 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, TIMING_START(&timing, "DeleteConsumerGroupOffsets"); TEST_SAY("Call DeleteConsumerGroupOffsets, timeout is %dms\n", exp_timeout); - rd_kafka_DeleteConsumerGroupOffsets(rk, cgoffsets, - MY_DEL_CGRPOFFS_CNT, + rd_kafka_DeleteConsumerGroupOffsets(rk, cgoffsets, MY_DEL_CGRPOFFS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 10); /* Poll result queue */ TIMING_START(&timing, "DeleteConsumerGroupOffsets.queue_poll"); rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); - TIMING_ASSERT(&timing, exp_timeout-100, exp_timeout+100); + TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100); TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); TEST_SAY("DeleteConsumerGroupOffsets: got %s in %.3fs\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); @@ -694,8 +686,8 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, if (!useq) rd_kafka_queue_destroy(q); - rd_kafka_DeleteConsumerGroupOffsets_destroy_array( - cgoffsets, MY_DEL_CGRPOFFS_CNT); + rd_kafka_DeleteConsumerGroupOffsets_destroy_array(cgoffsets, + MY_DEL_CGRPOFFS_CNT); #undef MY_DEL_CGRPOFFSETS_CNT @@ -714,9 +706,9 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, * - Delete records from A,B,C * - Create extra partitions for topic D */ -static void do_test_mix (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { - char *topics[] = { "topicA", "topicB", "topicC" }; - int cnt = 0; +static void do_test_mix(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { + char *topics[] = {"topicA", "topicB", "topicC"}; + int cnt = 0; struct waiting { rd_kafka_event_type_t evtype; int seen; @@ -737,11 +729,11 @@ static void do_test_mix (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { offsets = rd_kafka_topic_partition_list_new(3); rd_kafka_topic_partition_list_add(offsets, topics[0], 0)->offset = - RD_KAFKA_OFFSET_END; + RD_KAFKA_OFFSET_END; rd_kafka_topic_partition_list_add(offsets, topics[1], 0)->offset = - RD_KAFKA_OFFSET_END; + RD_KAFKA_OFFSET_END; rd_kafka_topic_partition_list_add(offsets, topics[2], 0)->offset = - RD_KAFKA_OFFSET_END; + RD_KAFKA_OFFSET_END; test_CreateTopics_simple(rk, rkqu, topics, 2, 1, &id1); test_DeleteTopics_simple(rk, rkqu, &topics[1], 1, &id2); @@ -764,16 +756,15 @@ static void do_test_mix (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { rkev = rd_kafka_queue_poll(rkqu, -1); TEST_ASSERT(rkev); - TEST_SAY("Got event %s: %s\n", - rd_kafka_event_name(rkev), + TEST_SAY("Got event %s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); w = rd_kafka_event_opaque(rkev); TEST_ASSERT(w); TEST_ASSERT(w->evtype == rd_kafka_event_type(rkev), - "Expected evtype %d, not %d (%s)", - w->evtype, rd_kafka_event_type(rkev), + "Expected evtype %d, not %d (%s)", w->evtype, + rd_kafka_event_type(rkev), rd_kafka_event_name(rkev)); TEST_ASSERT(w->seen == 0, "Duplicate results"); @@ -791,7 +782,7 @@ static void do_test_mix (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { /** * @brief Test AlterConfigs and DescribeConfigs */ -static void do_test_configs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { +static void do_test_configs(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { #define MY_CONFRES_CNT RD_KAFKA_RESOURCE__CNT + 2 rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT]; rd_kafka_AdminOptions_t *options; @@ -806,22 +797,22 @@ static void do_test_configs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { SUB_TEST_QUICK(); /* Check invalids */ - configs[0] = rd_kafka_ConfigResource_new( - (rd_kafka_ResourceType_t)-1, "something"); + configs[0] = rd_kafka_ConfigResource_new((rd_kafka_ResourceType_t)-1, + "something"); TEST_ASSERT(!configs[0]); - configs[0] = rd_kafka_ConfigResource_new( - (rd_kafka_ResourceType_t)0, NULL); + configs[0] = + rd_kafka_ConfigResource_new((rd_kafka_ResourceType_t)0, NULL); TEST_ASSERT(!configs[0]); - for (i = 0 ; i < MY_CONFRES_CNT ; i++) { + for (i = 0; i < MY_CONFRES_CNT; i++) { int set_config = !(i % 2); /* librdkafka shall not limit the use of illogical * or unknown settings, they are enforced by the broker. */ configs[i] = rd_kafka_ConfigResource_new( - (rd_kafka_ResourceType_t)i, "3"); + (rd_kafka_ResourceType_t)i, "3"); TEST_ASSERT(configs[i] != NULL); if (set_config) { @@ -829,9 +820,8 @@ static void do_test_configs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { "some.conf", "which remains " "unchecked"); - rd_kafka_ConfigResource_set_config(configs[i], - "some.conf.null", - NULL); + rd_kafka_ConfigResource_set_config( + configs[i], "some.conf.null", NULL); } } @@ -842,8 +832,7 @@ static void do_test_configs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { TEST_ASSERT(!err, "%s", errstr); /* AlterConfigs */ - rd_kafka_AlterConfigs(rk, configs, MY_CONFRES_CNT, - options, rkqu); + rd_kafka_AlterConfigs(rk, configs, MY_CONFRES_CNT, options, rkqu); rkev = test_wait_admin_result(rkqu, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, 2000); @@ -857,21 +846,18 @@ static void do_test_configs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { rconfigs = rd_kafka_AlterConfigs_result_resources(res, &rconfig_cnt); TEST_ASSERT(!rconfigs && !rconfig_cnt, - "Expected no result resources, got %"PRIusz, - rconfig_cnt); + "Expected no result resources, got %" PRIusz, rconfig_cnt); rd_kafka_event_destroy(rkev); /* DescribeConfigs: reuse same configs and options */ - rd_kafka_DescribeConfigs(rk, configs, MY_CONFRES_CNT, - options, rkqu); + rd_kafka_DescribeConfigs(rk, configs, MY_CONFRES_CNT, options, rkqu); rd_kafka_AdminOptions_destroy(options); rd_kafka_ConfigResource_destroy_array(configs, MY_CONFRES_CNT); - rkev = test_wait_admin_result(rkqu, - RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, - 2000); + rkev = test_wait_admin_result( + rkqu, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 2000); TEST_ASSERT(rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__TIMED_OUT, "Expected timeout, not %s", @@ -882,8 +868,7 @@ static void do_test_configs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { rconfigs = rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt); TEST_ASSERT(!rconfigs && !rconfig_cnt, - "Expected no result resources, got %"PRIusz, - rconfig_cnt); + "Expected no result resources, got %" PRIusz, rconfig_cnt); rd_kafka_event_destroy(rkev); @@ -894,7 +879,7 @@ static void do_test_configs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { /** * @brief Verify that an unclean rd_kafka_destroy() does not hang or crash. */ -static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { +static void do_test_unclean_destroy(rd_kafka_type_t cltype, int with_mainq) { rd_kafka_t *rk; char errstr[512]; rd_kafka_conf_t *conf; @@ -932,8 +917,9 @@ static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { rd_kafka_queue_destroy(q); - TEST_SAY("Giving rd_kafka_destroy() 5s to finish, " - "despite Admin API request being processed\n"); + TEST_SAY( + "Giving rd_kafka_destroy() 5s to finish, " + "despite Admin API request being processed\n"); test_timeout_set(5); TIMING_START(&t_destroy, "rd_kafka_destroy()"); rd_kafka_destroy(rk); @@ -949,77 +935,83 @@ static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { /** * @brief Test AdminOptions */ -static void do_test_options (rd_kafka_t *rk) { -#define _all_apis { RD_KAFKA_ADMIN_OP_CREATETOPICS, \ - RD_KAFKA_ADMIN_OP_DELETETOPICS, \ - RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, \ - RD_KAFKA_ADMIN_OP_ALTERCONFIGS, \ - RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, \ - RD_KAFKA_ADMIN_OP_DELETEGROUPS, \ - RD_KAFKA_ADMIN_OP_DELETERECORDS, \ - RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, \ - RD_KAFKA_ADMIN_OP_ANY /* Must be last */} +static void do_test_options(rd_kafka_t *rk) { +#define _all_apis \ + { \ + RD_KAFKA_ADMIN_OP_CREATETOPICS, \ + RD_KAFKA_ADMIN_OP_DELETETOPICS, \ + RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, \ + RD_KAFKA_ADMIN_OP_ALTERCONFIGS, \ + RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, \ + RD_KAFKA_ADMIN_OP_DELETEGROUPS, \ + RD_KAFKA_ADMIN_OP_DELETERECORDS, \ + RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, \ + RD_KAFKA_ADMIN_OP_ANY /* Must be last */ \ + } struct { const char *setter; const rd_kafka_admin_op_t valid_apis[9]; } matrix[] = { - { "request_timeout", _all_apis }, - { "operation_timeout", { RD_KAFKA_ADMIN_OP_CREATETOPICS, - RD_KAFKA_ADMIN_OP_DELETETOPICS, - RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, - RD_KAFKA_ADMIN_OP_DELETERECORDS } }, - { "validate_only", { RD_KAFKA_ADMIN_OP_CREATETOPICS, - RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, - RD_KAFKA_ADMIN_OP_ALTERCONFIGS } }, - { "broker", _all_apis }, - { "opaque", _all_apis }, - { NULL }, + {"request_timeout", _all_apis}, + {"operation_timeout", + {RD_KAFKA_ADMIN_OP_CREATETOPICS, RD_KAFKA_ADMIN_OP_DELETETOPICS, + RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, + RD_KAFKA_ADMIN_OP_DELETERECORDS}}, + {"validate_only", + {RD_KAFKA_ADMIN_OP_CREATETOPICS, + RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, + RD_KAFKA_ADMIN_OP_ALTERCONFIGS}}, + {"broker", _all_apis}, + {"opaque", _all_apis}, + {NULL}, }; int i; rd_kafka_AdminOptions_t *options; SUB_TEST_QUICK(); - for (i = 0 ; matrix[i].setter ; i++) { + for (i = 0; matrix[i].setter; i++) { static const rd_kafka_admin_op_t all_apis[] = _all_apis; const rd_kafka_admin_op_t *for_api; - for (for_api = all_apis ; ; for_api++) { + for (for_api = all_apis;; for_api++) { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_resp_err_t exp_err = + RD_KAFKA_RESP_ERR_NO_ERROR; char errstr[512]; int fi; options = rd_kafka_AdminOptions_new(rk, *for_api); - TEST_ASSERT(options, - "AdminOptions_new(%d) failed", *for_api); + TEST_ASSERT(options, "AdminOptions_new(%d) failed", + *for_api); if (!strcmp(matrix[i].setter, "request_timeout")) err = rd_kafka_AdminOptions_set_request_timeout( - options, 1234, errstr, sizeof(errstr)); + options, 1234, errstr, sizeof(errstr)); else if (!strcmp(matrix[i].setter, "operation_timeout")) - err = rd_kafka_AdminOptions_set_operation_timeout( + err = + rd_kafka_AdminOptions_set_operation_timeout( options, 12345, errstr, sizeof(errstr)); else if (!strcmp(matrix[i].setter, "validate_only")) err = rd_kafka_AdminOptions_set_validate_only( - options, 1, errstr, sizeof(errstr)); + options, 1, errstr, sizeof(errstr)); else if (!strcmp(matrix[i].setter, "broker")) err = rd_kafka_AdminOptions_set_broker( - options, 5, errstr, sizeof(errstr)); + options, 5, errstr, sizeof(errstr)); else if (!strcmp(matrix[i].setter, "opaque")) { rd_kafka_AdminOptions_set_opaque( - options, (void *)options); + options, (void *)options); err = RD_KAFKA_RESP_ERR_NO_ERROR; } else TEST_FAIL("Invalid setter: %s", matrix[i].setter); - TEST_SAYL(3, "AdminOptions_set_%s on " + TEST_SAYL(3, + "AdminOptions_set_%s on " "RD_KAFKA_ADMIN_OP_%d options " "returned %s: %s\n", - matrix[i].setter, - *for_api, + matrix[i].setter, *for_api, rd_kafka_err2name(err), err ? errstr : "success"); @@ -1030,24 +1022,25 @@ static void do_test_options (rd_kafka_t *rk) { } else if (*for_api != RD_KAFKA_ADMIN_OP_ANY) { exp_err = RD_KAFKA_RESP_ERR__INVALID_ARG; - for (fi = 0 ; matrix[i].valid_apis[fi] ; fi++) { + for (fi = 0; matrix[i].valid_apis[fi]; fi++) { if (matrix[i].valid_apis[fi] == *for_api) - exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + exp_err = + RD_KAFKA_RESP_ERR_NO_ERROR; } } else { exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; } if (err != exp_err) - TEST_FAIL_LATER("Expected AdminOptions_set_%s " - "for RD_KAFKA_ADMIN_OP_%d " - "options to return %s, " - "not %s", - matrix[i].setter, - *for_api, - rd_kafka_err2name(exp_err), - rd_kafka_err2name(err)); + TEST_FAIL_LATER( + "Expected AdminOptions_set_%s " + "for RD_KAFKA_ADMIN_OP_%d " + "options to return %s, " + "not %s", + matrix[i].setter, *for_api, + rd_kafka_err2name(exp_err), + rd_kafka_err2name(err)); rd_kafka_AdminOptions_destroy(options); @@ -1058,7 +1051,8 @@ static void do_test_options (rd_kafka_t *rk) { /* Try an invalid for_api */ options = rd_kafka_AdminOptions_new(rk, (rd_kafka_admin_op_t)1234); - TEST_ASSERT(!options, "Expected AdminOptions_new() to fail " + TEST_ASSERT(!options, + "Expected AdminOptions_new() to fail " "with an invalid for_api, didn't."); TEST_LATER_CHECK(); @@ -1067,7 +1061,7 @@ static void do_test_options (rd_kafka_t *rk) { } -static rd_kafka_t *create_admin_client (rd_kafka_type_t cltype) { +static rd_kafka_t *create_admin_client(rd_kafka_type_t cltype) { rd_kafka_t *rk; char errstr[512]; rd_kafka_conf_t *conf; @@ -1087,26 +1081,26 @@ static rd_kafka_t *create_admin_client (rd_kafka_type_t cltype) { } -static void do_test_apis (rd_kafka_type_t cltype) { +static void do_test_apis(rd_kafka_type_t cltype) { rd_kafka_t *rk; rd_kafka_queue_t *mainq, *backgroundq; mtx_init(&last_event_lock, mtx_plain); cnd_init(&last_event_cnd); - do_test_unclean_destroy(cltype, 0/*tempq*/); - do_test_unclean_destroy(cltype, 1/*mainq*/); + do_test_unclean_destroy(cltype, 0 /*tempq*/); + do_test_unclean_destroy(cltype, 1 /*mainq*/); rk = create_admin_client(cltype); - mainq = rd_kafka_queue_get_main(rk); + mainq = rd_kafka_queue_get_main(rk); backgroundq = rd_kafka_queue_get_background(rk); do_test_options(rk); do_test_CreateTopics("temp queue, no options", rk, NULL, 0, 0); - do_test_CreateTopics("temp queue, no options, background_event_cb", - rk, backgroundq, 1, 0); + do_test_CreateTopics("temp queue, no options, background_event_cb", rk, + backgroundq, 1, 0); do_test_CreateTopics("temp queue, options", rk, NULL, 0, 1); do_test_CreateTopics("main queue, options", rk, mainq, 0, 1); @@ -1122,8 +1116,8 @@ static void do_test_apis (rd_kafka_type_t cltype) { do_test_DeleteRecords("temp queue, options", rk, NULL, 1, rd_false); do_test_DeleteRecords("main queue, options", rk, mainq, 1, rd_false); - do_test_DeleteConsumerGroupOffsets("temp queue, no options", - rk, NULL, 0); + do_test_DeleteConsumerGroupOffsets("temp queue, no options", rk, NULL, + 0); do_test_DeleteConsumerGroupOffsets("temp queue, options", rk, NULL, 1); do_test_DeleteConsumerGroupOffsets("main queue, options", rk, mainq, 1); @@ -1139,17 +1133,17 @@ static void do_test_apis (rd_kafka_type_t cltype) { /* * Tests which require a unique unused client instance. */ - rk = create_admin_client(cltype); + rk = create_admin_client(cltype); mainq = rd_kafka_queue_get_main(rk); do_test_DeleteRecords("main queue, options, destroy", rk, mainq, 1, - rd_true/*destroy instance before finishing*/); + rd_true /*destroy instance before finishing*/); rd_kafka_queue_destroy(mainq); rd_kafka_destroy(rk); - rk = create_admin_client(cltype); + rk = create_admin_client(cltype); mainq = rd_kafka_queue_get_main(rk); do_test_DeleteGroups("main queue, options, destroy", rk, mainq, 1, - rd_true/*destroy instance before finishing*/); + rd_true /*destroy instance before finishing*/); rd_kafka_queue_destroy(mainq); rd_kafka_destroy(rk); @@ -1160,7 +1154,7 @@ static void do_test_apis (rd_kafka_type_t cltype) { } -int main_0080_admin_ut (int argc, char **argv) { +int main_0080_admin_ut(int argc, char **argv) { do_test_apis(RD_KAFKA_PRODUCER); do_test_apis(RD_KAFKA_CONSUMER); return 0; diff --git a/tests/0081-admin.c b/tests/0081-admin.c index ea029e3e4f..00971d3bcc 100644 --- a/tests/0081-admin.c +++ b/tests/0081-admin.c @@ -39,23 +39,24 @@ static size_t avail_broker_cnt; - -static void do_test_CreateTopics (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int op_timeout, rd_bool_t validate_only) { +static void do_test_CreateTopics(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int op_timeout, + rd_bool_t validate_only) { rd_kafka_queue_t *q; #define MY_NEW_TOPICS_CNT 7 char *topics[MY_NEW_TOPICS_CNT]; rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT]; - rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_AdminOptions_t *options = NULL; rd_kafka_resp_err_t exp_topicerr[MY_NEW_TOPICS_CNT] = {0}; rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Expected topics in metadata */ rd_kafka_metadata_topic_t exp_mdtopics[MY_NEW_TOPICS_CNT] = {{0}}; - int exp_mdtopic_cnt = 0; + int exp_mdtopic_cnt = 0; /* Not expected topics in metadata */ rd_kafka_metadata_topic_t exp_not_mdtopics[MY_NEW_TOPICS_CNT] = {{0}}; - int exp_not_mdtopic_cnt = 0; + int exp_not_mdtopic_cnt = 0; int i; char errstr[512]; const char *errstr2; @@ -65,71 +66,69 @@ static void do_test_CreateTopics (const char *what, const rd_kafka_CreateTopics_result_t *res; const rd_kafka_topic_result_t **restopics; size_t restopic_cnt; - int metadata_tmout ; + int metadata_tmout; int num_replicas = (int)avail_broker_cnt; int32_t *replicas; - SUB_TEST_QUICK("%s CreateTopics with %s, " - "op_timeout %d, validate_only %d", - rd_kafka_name(rk), what, op_timeout, validate_only); + SUB_TEST_QUICK( + "%s CreateTopics with %s, " + "op_timeout %d, validate_only %d", + rd_kafka_name(rk), what, op_timeout, validate_only); q = useq ? useq : rd_kafka_queue_new(rk); /* Set up replicas */ replicas = rd_alloca(sizeof(*replicas) * num_replicas); - for (i = 0 ; i < num_replicas ; i++) + for (i = 0; i < num_replicas; i++) replicas[i] = avail_brokers[i]; /** * Construct NewTopic array with different properties for * different partitions. */ - for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) { + for (i = 0; i < MY_NEW_TOPICS_CNT; i++) { char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); - int use_defaults = i == 6 && - test_broker_version >= TEST_BRKVER(2,4,0,0); - int num_parts = !use_defaults ? (i * 7 + 1) : -1; - int set_config = (i & 1); + int use_defaults = + i == 6 && test_broker_version >= TEST_BRKVER(2, 4, 0, 0); + int num_parts = !use_defaults ? (i * 7 + 1) : -1; + int set_config = (i & 1); int add_invalid_config = (i == 1); - int set_replicas = !use_defaults && !(i % 3); + int set_replicas = !use_defaults && !(i % 3); rd_kafka_resp_err_t this_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; - topics[i] = topic; - new_topics[i] = rd_kafka_NewTopic_new(topic, - num_parts, - set_replicas ? -1 : - num_replicas, - NULL, 0); + topics[i] = topic; + new_topics[i] = rd_kafka_NewTopic_new( + topic, num_parts, set_replicas ? -1 : num_replicas, NULL, + 0); if (set_config) { /* * Add various configuration properties */ err = rd_kafka_NewTopic_set_config( - new_topics[i], "compression.type", "lz4"); + new_topics[i], "compression.type", "lz4"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); err = rd_kafka_NewTopic_set_config( - new_topics[i], "delete.retention.ms", "900"); + new_topics[i], "delete.retention.ms", "900"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } if (add_invalid_config) { /* Add invalid config property */ err = rd_kafka_NewTopic_set_config( - new_topics[i], - "dummy.doesntexist", - "broker is verifying this"); + new_topics[i], "dummy.doesntexist", + "broker is verifying this"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); this_exp_err = RD_KAFKA_RESP_ERR_INVALID_CONFIG; } - TEST_SAY("Expecting result for topic #%d: %s " - "(set_config=%d, add_invalid_config=%d, " - "set_replicas=%d, use_defaults=%d)\n", - i, rd_kafka_err2name(this_exp_err), - set_config, add_invalid_config, set_replicas, - use_defaults); + TEST_SAY( + "Expecting result for topic #%d: %s " + "(set_config=%d, add_invalid_config=%d, " + "set_replicas=%d, use_defaults=%d)\n", + i, rd_kafka_err2name(this_exp_err), set_config, + add_invalid_config, set_replicas, use_defaults); if (set_replicas) { int32_t p; @@ -137,11 +136,10 @@ static void do_test_CreateTopics (const char *what, /* * Set valid replica assignments */ - for (p = 0 ; p < num_parts ; p++) { + for (p = 0; p < num_parts; p++) { err = rd_kafka_NewTopic_set_replica_assignment( - new_topics[i], p, - replicas, num_replicas, - errstr, sizeof(errstr)); + new_topics[i], p, replicas, num_replicas, + errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); } } @@ -151,34 +149,32 @@ static void do_test_CreateTopics (const char *what, exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic; } else { - exp_mdtopics[exp_mdtopic_cnt].topic = topic; - exp_mdtopics[exp_mdtopic_cnt].partition_cnt = - num_parts; + exp_mdtopics[exp_mdtopic_cnt].topic = topic; + exp_mdtopics[exp_mdtopic_cnt].partition_cnt = num_parts; exp_mdtopic_cnt++; } } if (op_timeout != -1 || validate_only) { options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_CREATETOPICS); + rk, RD_KAFKA_ADMIN_OP_CREATETOPICS); if (op_timeout != -1) { err = rd_kafka_AdminOptions_set_operation_timeout( - options, op_timeout, errstr, sizeof(errstr)); + options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } if (validate_only) { err = rd_kafka_AdminOptions_set_validate_only( - options, validate_only, errstr, sizeof(errstr)); + options, validate_only, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } } TIMING_START(&timing, "CreateTopics"); TEST_SAY("Call CreateTopics\n"); - rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, - options, q); + rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); /* Poll result queue for CreateTopics result. @@ -186,13 +182,12 @@ static void do_test_CreateTopics (const char *what, * (typically generic Error events). */ TIMING_START(&timing, "CreateTopics.queue_poll"); do { - rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000)); + rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000)); TEST_SAY("CreateTopics: got %s in %.3fms\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); if (rd_kafka_event_error(rkev)) - TEST_SAY("%s: %s\n", - rd_kafka_event_name(rkev), + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); } while (rd_kafka_event_type(rkev) != RD_KAFKA_EVENT_CREATETOPICS_RESULT); @@ -203,44 +198,41 @@ static void do_test_CreateTopics (const char *what, rd_kafka_event_name(rkev)); /* Expecting error */ - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == exp_err, "expected CreateTopics to return %s, not %s (%s)", - rd_kafka_err2str(exp_err), - rd_kafka_err2str(err), + rd_kafka_err2str(exp_err), rd_kafka_err2str(err), err ? errstr2 : "n/a"); - TEST_SAY("CreateTopics: returned %s (%s)\n", - rd_kafka_err2str(err), err ? errstr2 : "n/a"); + TEST_SAY("CreateTopics: returned %s (%s)\n", rd_kafka_err2str(err), + err ? errstr2 : "n/a"); /* Extract topics */ restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt); /* Scan topics for proper fields and expected failures. */ - for (i = 0 ; i < (int)restopic_cnt ; i++) { + for (i = 0; i < (int)restopic_cnt; i++) { const rd_kafka_topic_result_t *terr = restopics[i]; /* Verify that topic order matches our request. */ if (strcmp(rd_kafka_topic_result_name(terr), topics[i])) - TEST_FAIL_LATER("Topic result order mismatch at #%d: " - "expected %s, got %s", - i, topics[i], - rd_kafka_topic_result_name(terr)); + TEST_FAIL_LATER( + "Topic result order mismatch at #%d: " + "expected %s, got %s", + i, topics[i], rd_kafka_topic_result_name(terr)); - TEST_SAY("CreateTopics result: #%d: %s: %s: %s\n", - i, + TEST_SAY("CreateTopics result: #%d: %s: %s: %s\n", i, rd_kafka_topic_result_name(terr), rd_kafka_err2name(rd_kafka_topic_result_error(terr)), rd_kafka_topic_result_error_string(terr)); if (rd_kafka_topic_result_error(terr) != exp_topicerr[i]) - TEST_FAIL_LATER( - "Expected %s, not %d: %s", - rd_kafka_err2name(exp_topicerr[i]), - rd_kafka_topic_result_error(terr), - rd_kafka_err2name(rd_kafka_topic_result_error( - terr))); + TEST_FAIL_LATER("Expected %s, not %d: %s", + rd_kafka_err2name(exp_topicerr[i]), + rd_kafka_topic_result_error(terr), + rd_kafka_err2name( + rd_kafka_topic_result_error(terr))); } /** @@ -259,16 +251,13 @@ static void do_test_CreateTopics (const char *what, metadata_tmout = 10 * 1000; } - test_wait_metadata_update(rk, - exp_mdtopics, - exp_mdtopic_cnt, - exp_not_mdtopics, - exp_not_mdtopic_cnt, + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, + exp_not_mdtopics, exp_not_mdtopic_cnt, metadata_tmout); rd_kafka_event_destroy(rkev); - for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) { + for (i = 0; i < MY_NEW_TOPICS_CNT; i++) { rd_kafka_NewTopic_destroy(new_topics[i]); rd_free(topics[i]); } @@ -287,29 +276,29 @@ static void do_test_CreateTopics (const char *what, - /** * @brief Test deletion of topics * * */ -static void do_test_DeleteTopics (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int op_timeout) { +static void do_test_DeleteTopics(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int op_timeout) { rd_kafka_queue_t *q; const int skip_topic_cnt = 2; #define MY_DEL_TOPICS_CNT 9 char *topics[MY_DEL_TOPICS_CNT]; rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT]; - rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_AdminOptions_t *options = NULL; rd_kafka_resp_err_t exp_topicerr[MY_DEL_TOPICS_CNT] = {0}; rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Expected topics in metadata */ rd_kafka_metadata_topic_t exp_mdtopics[MY_DEL_TOPICS_CNT] = {{0}}; - int exp_mdtopic_cnt = 0; + int exp_mdtopic_cnt = 0; /* Not expected topics in metadata */ rd_kafka_metadata_topic_t exp_not_mdtopics[MY_DEL_TOPICS_CNT] = {{0}}; - int exp_not_mdtopic_cnt = 0; + int exp_not_mdtopic_cnt = 0; int i; char errstr[512]; const char *errstr2; @@ -329,7 +318,7 @@ static void do_test_DeleteTopics (const char *what, /** * Construct DeleteTopic array */ - for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) { + for (i = 0; i < MY_DEL_TOPICS_CNT; i++) { char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); int notexist_topic = i >= MY_DEL_TOPICS_CNT - skip_topic_cnt; @@ -339,10 +328,9 @@ static void do_test_DeleteTopics (const char *what, if (notexist_topic) exp_topicerr[i] = - RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; else { - exp_topicerr[i] = - RD_KAFKA_RESP_ERR_NO_ERROR; + exp_topicerr[i] = RD_KAFKA_RESP_ERR_NO_ERROR; exp_mdtopics[exp_mdtopic_cnt++].topic = topic; } @@ -351,31 +339,26 @@ static void do_test_DeleteTopics (const char *what, } if (op_timeout != -1) { - options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_ANY); + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); err = rd_kafka_AdminOptions_set_operation_timeout( - options, op_timeout, errstr, sizeof(errstr)); + options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } /* Create the topics first, minus the skip count. */ test_CreateTopics_simple(rk, NULL, topics, - MY_DEL_TOPICS_CNT-skip_topic_cnt, - 2/*num_partitions*/, - NULL); + MY_DEL_TOPICS_CNT - skip_topic_cnt, + 2 /*num_partitions*/, NULL); /* Verify that topics are reported by metadata */ - test_wait_metadata_update(rk, - exp_mdtopics, exp_mdtopic_cnt, - NULL, 0, - 15*1000); + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0, + 15 * 1000); TIMING_START(&timing, "DeleteTopics"); TEST_SAY("Call DeleteTopics\n"); - rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, - options, q); + rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); /* Poll result queue for DeleteTopics result. @@ -383,13 +366,12 @@ static void do_test_DeleteTopics (const char *what, * (typically generic Error events). */ TIMING_START(&timing, "DeleteTopics.queue_poll"); while (1) { - rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000)); + rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000)); TEST_SAY("DeleteTopics: got %s in %.3fms\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); if (rd_kafka_event_error(rkev)) - TEST_SAY("%s: %s\n", - rd_kafka_event_name(rkev), + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); if (rd_kafka_event_type(rkev) == @@ -405,44 +387,41 @@ static void do_test_DeleteTopics (const char *what, rd_kafka_event_name(rkev)); /* Expecting error */ - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == exp_err, "expected DeleteTopics to return %s, not %s (%s)", - rd_kafka_err2str(exp_err), - rd_kafka_err2str(err), + rd_kafka_err2str(exp_err), rd_kafka_err2str(err), err ? errstr2 : "n/a"); - TEST_SAY("DeleteTopics: returned %s (%s)\n", - rd_kafka_err2str(err), err ? errstr2 : "n/a"); + TEST_SAY("DeleteTopics: returned %s (%s)\n", rd_kafka_err2str(err), + err ? errstr2 : "n/a"); /* Extract topics */ restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt); /* Scan topics for proper fields and expected failures. */ - for (i = 0 ; i < (int)restopic_cnt ; i++) { + for (i = 0; i < (int)restopic_cnt; i++) { const rd_kafka_topic_result_t *terr = restopics[i]; /* Verify that topic order matches our request. */ if (strcmp(rd_kafka_topic_result_name(terr), topics[i])) - TEST_FAIL_LATER("Topic result order mismatch at #%d: " - "expected %s, got %s", - i, topics[i], - rd_kafka_topic_result_name(terr)); + TEST_FAIL_LATER( + "Topic result order mismatch at #%d: " + "expected %s, got %s", + i, topics[i], rd_kafka_topic_result_name(terr)); - TEST_SAY("DeleteTopics result: #%d: %s: %s: %s\n", - i, + TEST_SAY("DeleteTopics result: #%d: %s: %s: %s\n", i, rd_kafka_topic_result_name(terr), rd_kafka_err2name(rd_kafka_topic_result_error(terr)), rd_kafka_topic_result_error_string(terr)); if (rd_kafka_topic_result_error(terr) != exp_topicerr[i]) - TEST_FAIL_LATER( - "Expected %s, not %d: %s", - rd_kafka_err2name(exp_topicerr[i]), - rd_kafka_topic_result_error(terr), - rd_kafka_err2name(rd_kafka_topic_result_error( - terr))); + TEST_FAIL_LATER("Expected %s, not %d: %s", + rd_kafka_err2name(exp_topicerr[i]), + rd_kafka_topic_result_error(terr), + rd_kafka_err2name( + rd_kafka_topic_result_error(terr))); } /** @@ -454,15 +433,12 @@ static void do_test_DeleteTopics (const char *what, else metadata_tmout = 10 * 1000; - test_wait_metadata_update(rk, - NULL, 0, - exp_not_mdtopics, - exp_not_mdtopic_cnt, - metadata_tmout); + test_wait_metadata_update(rk, NULL, 0, exp_not_mdtopics, + exp_not_mdtopic_cnt, metadata_tmout); rd_kafka_event_destroy(rkev); - for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) { + for (i = 0; i < MY_DEL_TOPICS_CNT; i++) { rd_kafka_DeleteTopic_destroy(del_topics[i]); rd_free(topics[i]); } @@ -486,9 +462,10 @@ static void do_test_DeleteTopics (const char *what, * * */ -static void do_test_CreatePartitions (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int op_timeout) { +static void do_test_CreatePartitions(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int op_timeout) { rd_kafka_queue_t *q; #define MY_CRP_TOPICS_CNT 9 char *topics[MY_CRP_TOPICS_CNT]; @@ -497,8 +474,8 @@ static void do_test_CreatePartitions (const char *what, rd_kafka_AdminOptions_t *options = NULL; /* Expected topics in metadata */ rd_kafka_metadata_topic_t exp_mdtopics[MY_CRP_TOPICS_CNT] = {{0}}; - rd_kafka_metadata_partition_t exp_mdparts[2] = {{0}}; - int exp_mdtopic_cnt = 0; + rd_kafka_metadata_partition_t exp_mdparts[2] = {{0}}; + int exp_mdtopic_cnt = 0; int i; char errstr[512]; rd_kafka_resp_err_t err; @@ -517,102 +494,104 @@ static void do_test_CreatePartitions (const char *what, * use exp_mdparts[1]. */ /* Set valid replica assignments (even, and odd (reverse) ) */ - exp_mdparts[0].replicas = rd_alloca(sizeof(*exp_mdparts[0].replicas) * - num_replicas); - exp_mdparts[1].replicas = rd_alloca(sizeof(*exp_mdparts[1].replicas) * - num_replicas); + exp_mdparts[0].replicas = + rd_alloca(sizeof(*exp_mdparts[0].replicas) * num_replicas); + exp_mdparts[1].replicas = + rd_alloca(sizeof(*exp_mdparts[1].replicas) * num_replicas); exp_mdparts[0].replica_cnt = num_replicas; exp_mdparts[1].replica_cnt = num_replicas; - for (i = 0 ; i < num_replicas ; i++) { + for (i = 0; i < num_replicas; i++) { exp_mdparts[0].replicas[i] = avail_brokers[i]; - exp_mdparts[1].replicas[i] = avail_brokers[num_replicas-i-1]; + exp_mdparts[1].replicas[i] = + avail_brokers[num_replicas - i - 1]; } /** * Construct CreatePartitions array */ - for (i = 0 ; i < MY_CRP_TOPICS_CNT ; i++) { + for (i = 0; i < MY_CRP_TOPICS_CNT; i++) { char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); int initial_part_cnt = 1 + (i * 2); - int new_part_cnt = 1 + (i / 2); - int final_part_cnt = initial_part_cnt + new_part_cnt; - int set_replicas = !(i % 2); + int new_part_cnt = 1 + (i / 2); + int final_part_cnt = initial_part_cnt + new_part_cnt; + int set_replicas = !(i % 2); int pi; topics[i] = topic; /* Topic to create with initial partition count */ - new_topics[i] = rd_kafka_NewTopic_new(topic, initial_part_cnt, - set_replicas ? - -1 : num_replicas, - NULL, 0); + new_topics[i] = rd_kafka_NewTopic_new( + topic, initial_part_cnt, set_replicas ? -1 : num_replicas, + NULL, 0); /* .. and later add more partitions to */ - crp_topics[i] = rd_kafka_NewPartitions_new(topic, - final_part_cnt, - errstr, - sizeof(errstr)); + crp_topics[i] = rd_kafka_NewPartitions_new( + topic, final_part_cnt, errstr, sizeof(errstr)); if (set_replicas) { - exp_mdtopics[exp_mdtopic_cnt].partitions = - rd_alloca(final_part_cnt * - sizeof(*exp_mdtopics[exp_mdtopic_cnt]. - partitions)); + exp_mdtopics[exp_mdtopic_cnt].partitions = rd_alloca( + final_part_cnt * + sizeof(*exp_mdtopics[exp_mdtopic_cnt].partitions)); - for (pi = 0 ; pi < final_part_cnt ; pi++) { + for (pi = 0; pi < final_part_cnt; pi++) { const rd_kafka_metadata_partition_t *exp_mdp = - &exp_mdparts[pi & 1]; + &exp_mdparts[pi & 1]; - exp_mdtopics[exp_mdtopic_cnt]. - partitions[pi] = *exp_mdp; /* copy */ + exp_mdtopics[exp_mdtopic_cnt].partitions[pi] = + *exp_mdp; /* copy */ - exp_mdtopics[exp_mdtopic_cnt]. - partitions[pi].id = pi; + exp_mdtopics[exp_mdtopic_cnt] + .partitions[pi] + .id = pi; if (pi < initial_part_cnt) { /* Set replica assignment * for initial partitions */ - err = rd_kafka_NewTopic_set_replica_assignment( + err = + rd_kafka_NewTopic_set_replica_assignment( new_topics[i], pi, exp_mdp->replicas, (size_t)exp_mdp->replica_cnt, errstr, sizeof(errstr)); - TEST_ASSERT(!err, "NewTopic_set_replica_assignment: %s", - errstr); + TEST_ASSERT(!err, + "NewTopic_set_replica_" + "assignment: %s", + errstr); } else { /* Set replica assignment for new * partitions */ - err = rd_kafka_NewPartitions_set_replica_assignment( + err = + rd_kafka_NewPartitions_set_replica_assignment( crp_topics[i], pi - initial_part_cnt, exp_mdp->replicas, (size_t)exp_mdp->replica_cnt, errstr, sizeof(errstr)); - TEST_ASSERT(!err, "NewPartitions_set_replica_assignment: %s", - errstr); + TEST_ASSERT(!err, + "NewPartitions_set_replica_" + "assignment: %s", + errstr); } - } } - TEST_SAY(_C_YEL "Topic %s with %d initial partitions will grow " + TEST_SAY(_C_YEL + "Topic %s with %d initial partitions will grow " "by %d to %d total partitions with%s replicas set\n", - topics[i], - initial_part_cnt, new_part_cnt, final_part_cnt, - set_replicas ? "" : "out"); + topics[i], initial_part_cnt, new_part_cnt, + final_part_cnt, set_replicas ? "" : "out"); - exp_mdtopics[exp_mdtopic_cnt].topic = topic; + exp_mdtopics[exp_mdtopic_cnt].topic = topic; exp_mdtopics[exp_mdtopic_cnt].partition_cnt = final_part_cnt; exp_mdtopic_cnt++; } if (op_timeout != -1) { - options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_ANY); + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); err = rd_kafka_AdminOptions_set_operation_timeout( - options, op_timeout, errstr, sizeof(errstr)); + options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } @@ -621,13 +600,11 @@ static void do_test_CreatePartitions (const char *what, */ TIMING_START(&timing, "CreateTopics"); TEST_SAY("Creating topics with initial partition counts\n"); - rd_kafka_CreateTopics(rk, new_topics, MY_CRP_TOPICS_CNT, - options, q); + rd_kafka_CreateTopics(rk, new_topics, MY_CRP_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); - err = test_wait_topic_admin_result(q, - RD_KAFKA_EVENT_CREATETOPICS_RESULT, - NULL, 15000); + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_CREATETOPICS_RESULT, NULL, 15000); TEST_ASSERT(!err, "CreateTopics failed: %s", rd_kafka_err2str(err)); rd_kafka_NewTopic_destroy_array(new_topics, MY_CRP_TOPICS_CNT); @@ -638,13 +615,12 @@ static void do_test_CreatePartitions (const char *what, */ TIMING_START(&timing, "CreatePartitions"); TEST_SAY("Creating partitions\n"); - rd_kafka_CreatePartitions(rk, crp_topics, MY_CRP_TOPICS_CNT, - options, q); + rd_kafka_CreatePartitions(rk, crp_topics, MY_CRP_TOPICS_CNT, options, + q); TIMING_ASSERT_LATER(&timing, 0, 50); - err = test_wait_topic_admin_result(q, - RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, - NULL, 15000); + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, NULL, 15000); TEST_ASSERT(!err, "CreatePartitions failed: %s", rd_kafka_err2str(err)); rd_kafka_NewPartitions_destroy_array(crp_topics, MY_CRP_TOPICS_CNT); @@ -659,13 +635,10 @@ static void do_test_CreatePartitions (const char *what, else metadata_tmout = 10 * 1000; - test_wait_metadata_update(rk, - exp_mdtopics, - exp_mdtopic_cnt, - NULL, 0, + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0, metadata_tmout); - for (i = 0 ; i < MY_CRP_TOPICS_CNT ; i++) + for (i = 0; i < MY_CRP_TOPICS_CNT; i++) rd_free(topics[i]); if (options) @@ -685,13 +658,13 @@ static void do_test_CreatePartitions (const char *what, /** * @brief Print the ConfigEntrys in the provided array. */ -static void -test_print_ConfigEntry_array (const rd_kafka_ConfigEntry_t **entries, - size_t entry_cnt, unsigned int depth) { +static void test_print_ConfigEntry_array(const rd_kafka_ConfigEntry_t **entries, + size_t entry_cnt, + unsigned int depth) { const char *indent = &" "[4 - (depth > 4 ? 4 : depth)]; size_t ei; - for (ei = 0 ; ei < entry_cnt ; ei++) { + for (ei = 0; ei < entry_cnt; ei++) { const rd_kafka_ConfigEntry_t *e = entries[ei]; const rd_kafka_ConfigEntry_t **syns; size_t syn_cnt; @@ -699,28 +672,27 @@ test_print_ConfigEntry_array (const rd_kafka_ConfigEntry_t **entries, syns = rd_kafka_ConfigEntry_synonyms(e, &syn_cnt); #define YN(v) ((v) ? "y" : "n") - TEST_SAYL(3, - "%s#%"PRIusz"/%"PRIusz - ": Source %s (%d): \"%s\"=\"%s\" " - "[is read-only=%s, default=%s, sensitive=%s, " - "synonym=%s] with %"PRIusz" synonym(s)\n", - indent, - ei, entry_cnt, - rd_kafka_ConfigSource_name( - rd_kafka_ConfigEntry_source(e)), - rd_kafka_ConfigEntry_source(e), - rd_kafka_ConfigEntry_name(e), - rd_kafka_ConfigEntry_value(e) ? - rd_kafka_ConfigEntry_value(e) : "(NULL)", - YN(rd_kafka_ConfigEntry_is_read_only(e)), - YN(rd_kafka_ConfigEntry_is_default(e)), - YN(rd_kafka_ConfigEntry_is_sensitive(e)), - YN(rd_kafka_ConfigEntry_is_synonym(e)), - syn_cnt); + TEST_SAYL( + 3, + "%s#%" PRIusz "/%" PRIusz + ": Source %s (%d): \"%s\"=\"%s\" " + "[is read-only=%s, default=%s, sensitive=%s, " + "synonym=%s] with %" PRIusz " synonym(s)\n", + indent, ei, entry_cnt, + rd_kafka_ConfigSource_name(rd_kafka_ConfigEntry_source(e)), + rd_kafka_ConfigEntry_source(e), + rd_kafka_ConfigEntry_name(e), + rd_kafka_ConfigEntry_value(e) + ? rd_kafka_ConfigEntry_value(e) + : "(NULL)", + YN(rd_kafka_ConfigEntry_is_read_only(e)), + YN(rd_kafka_ConfigEntry_is_default(e)), + YN(rd_kafka_ConfigEntry_is_sensitive(e)), + YN(rd_kafka_ConfigEntry_is_synonym(e)), syn_cnt); #undef YN if (syn_cnt > 0) - test_print_ConfigEntry_array(syns, syn_cnt, depth+1); + test_print_ConfigEntry_array(syns, syn_cnt, depth + 1); } } @@ -728,7 +700,7 @@ test_print_ConfigEntry_array (const rd_kafka_ConfigEntry_t **entries, /** * @brief Test AlterConfigs */ -static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { +static void do_test_AlterConfigs(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { #define MY_CONFRES_CNT 3 char *topics[MY_CONFRES_CNT]; rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT]; @@ -750,7 +722,7 @@ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { /* * Only create one topic, the others will be non-existent. */ - for (i = 0 ; i < MY_CONFRES_CNT ; i++) + for (i = 0; i < MY_CONFRES_CNT; i++) rd_strdupa(&topics[i], test_mk_topic_name(__FUNCTION__, 1)); test_CreateTopics_simple(rk, NULL, topics, 1, 1, NULL); @@ -760,15 +732,15 @@ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { /* * ConfigResource #0: valid topic config */ - configs[ci] = rd_kafka_ConfigResource_new( - RD_KAFKA_RESOURCE_TOPIC, topics[ci]); + configs[ci] = + rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]); err = rd_kafka_ConfigResource_set_config(configs[ci], "compression.type", "gzip"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); - err = rd_kafka_ConfigResource_set_config(configs[ci], - "flush.ms", "12345678"); + err = rd_kafka_ConfigResource_set_config(configs[ci], "flush.ms", + "12345678"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; @@ -780,34 +752,34 @@ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { * ConfigResource #1: valid broker config */ configs[ci] = rd_kafka_ConfigResource_new( - RD_KAFKA_RESOURCE_BROKER, - tsprintf("%"PRId32, avail_brokers[0])); + RD_KAFKA_RESOURCE_BROKER, + tsprintf("%" PRId32, avail_brokers[0])); err = rd_kafka_ConfigResource_set_config( - configs[ci], - "sasl.kerberos.min.time.before.relogin", "58000"); + configs[ci], "sasl.kerberos.min.time.before.relogin", + "58000"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; } else { - TEST_WARN("Skipping RESOURCE_BROKER test on unsupported " - "broker version\n"); + TEST_WARN( + "Skipping RESOURCE_BROKER test on unsupported " + "broker version\n"); } /* * ConfigResource #2: valid topic config, non-existent topic */ - configs[ci] = rd_kafka_ConfigResource_new( - RD_KAFKA_RESOURCE_TOPIC, topics[ci]); + configs[ci] = + rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]); err = rd_kafka_ConfigResource_set_config(configs[ci], "compression.type", "lz4"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); - err = rd_kafka_ConfigResource_set_config(configs[ci], - "offset.metadata.max.bytes", - "12345"); + err = rd_kafka_ConfigResource_set_config( + configs[ci], "offset.metadata.max.bytes", "12345"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); if (test_broker_version >= TEST_BRKVER(2, 7, 0, 0)) @@ -837,7 +809,7 @@ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { * Wait for result */ rkev = test_wait_admin_result(rkqu, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, - 10000+1000); + 10000 + 1000); /* * Extract result @@ -846,68 +818,67 @@ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { TEST_ASSERT(res, "Expected AlterConfigs result, not %s", rd_kafka_event_name(rkev)); - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); - TEST_ASSERT(!err, - "Expected success, not %s: %s", + TEST_ASSERT(!err, "Expected success, not %s: %s", rd_kafka_err2name(err), errstr2); rconfigs = rd_kafka_AlterConfigs_result_resources(res, &rconfig_cnt); TEST_ASSERT((int)rconfig_cnt == ci, - "Expected %d result resources, got %"PRIusz"\n", - ci, rconfig_cnt); + "Expected %d result resources, got %" PRIusz "\n", ci, + rconfig_cnt); /* * Verify status per resource */ - for (i = 0 ; i < (int)rconfig_cnt ; i++) { + for (i = 0; i < (int)rconfig_cnt; i++) { const rd_kafka_ConfigEntry_t **entries; size_t entry_cnt; - err = rd_kafka_ConfigResource_error(rconfigs[i]); + err = rd_kafka_ConfigResource_error(rconfigs[i]); errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[i]); - entries = rd_kafka_ConfigResource_configs(rconfigs[i], - &entry_cnt); + entries = + rd_kafka_ConfigResource_configs(rconfigs[i], &entry_cnt); - TEST_SAY("ConfigResource #%d: type %s (%d), \"%s\": " - "%"PRIusz" ConfigEntries, error %s (%s)\n", - i, - rd_kafka_ResourceType_name( - rd_kafka_ConfigResource_type(rconfigs[i])), - rd_kafka_ConfigResource_type(rconfigs[i]), - rd_kafka_ConfigResource_name(rconfigs[i]), - entry_cnt, - rd_kafka_err2name(err), errstr2 ? errstr2 : ""); + TEST_SAY( + "ConfigResource #%d: type %s (%d), \"%s\": " + "%" PRIusz " ConfigEntries, error %s (%s)\n", + i, + rd_kafka_ResourceType_name( + rd_kafka_ConfigResource_type(rconfigs[i])), + rd_kafka_ConfigResource_type(rconfigs[i]), + rd_kafka_ConfigResource_name(rconfigs[i]), entry_cnt, + rd_kafka_err2name(err), errstr2 ? errstr2 : ""); test_print_ConfigEntry_array(entries, entry_cnt, 1); if (rd_kafka_ConfigResource_type(rconfigs[i]) != - rd_kafka_ConfigResource_type(configs[i]) || + rd_kafka_ConfigResource_type(configs[i]) || strcmp(rd_kafka_ConfigResource_name(rconfigs[i]), rd_kafka_ConfigResource_name(configs[i]))) { TEST_FAIL_LATER( - "ConfigResource #%d: " - "expected type %s name %s, " - "got type %s name %s", - i, - rd_kafka_ResourceType_name(rd_kafka_ConfigResource_type(configs[i])), - rd_kafka_ConfigResource_name(configs[i]), - rd_kafka_ResourceType_name(rd_kafka_ConfigResource_type(rconfigs[i])), - rd_kafka_ConfigResource_name(rconfigs[i])); + "ConfigResource #%d: " + "expected type %s name %s, " + "got type %s name %s", + i, + rd_kafka_ResourceType_name( + rd_kafka_ConfigResource_type(configs[i])), + rd_kafka_ConfigResource_name(configs[i]), + rd_kafka_ResourceType_name( + rd_kafka_ConfigResource_type(rconfigs[i])), + rd_kafka_ConfigResource_name(rconfigs[i])); fails++; continue; } if (err != exp_err[i]) { - TEST_FAIL_LATER("ConfigResource #%d: " - "expected %s (%d), got %s (%s)", - i, - rd_kafka_err2name(exp_err[i]), - exp_err[i], - rd_kafka_err2name(err), - errstr2 ? errstr2 : ""); + TEST_FAIL_LATER( + "ConfigResource #%d: " + "expected %s (%d), got %s (%s)", + i, rd_kafka_err2name(exp_err[i]), exp_err[i], + rd_kafka_err2name(err), errstr2 ? errstr2 : ""); fails++; } } @@ -929,7 +900,7 @@ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { /** * @brief Test DescribeConfigs */ -static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { +static void do_test_DescribeConfigs(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { #define MY_CONFRES_CNT 3 char *topics[MY_CONFRES_CNT]; rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT]; @@ -944,7 +915,7 @@ static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { const char *errstr2; int ci = 0; int i; - int fails = 0; + int fails = 0; int max_retry_describe = 3; SUB_TEST_QUICK(); @@ -953,7 +924,7 @@ static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { * Only create one topic, the others will be non-existent. */ rd_strdupa(&topics[0], test_mk_topic_name("DescribeConfigs_exist", 1)); - for (i = 1 ; i < MY_CONFRES_CNT ; i++) + for (i = 1; i < MY_CONFRES_CNT; i++) rd_strdupa(&topics[i], test_mk_topic_name("DescribeConfigs_notexist", 1)); @@ -962,8 +933,8 @@ static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { /* * ConfigResource #0: topic config, no config entries. */ - configs[ci] = rd_kafka_ConfigResource_new( - RD_KAFKA_RESOURCE_TOPIC, topics[ci]); + configs[ci] = + rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; @@ -971,8 +942,7 @@ static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { * ConfigResource #1:broker config, no config entries */ configs[ci] = rd_kafka_ConfigResource_new( - RD_KAFKA_RESOURCE_BROKER, - tsprintf("%"PRId32, avail_brokers[0])); + RD_KAFKA_RESOURCE_BROKER, tsprintf("%" PRId32, avail_brokers[0])); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; @@ -980,20 +950,20 @@ static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { /* * ConfigResource #2: topic config, non-existent topic, no config entr. */ - configs[ci] = rd_kafka_ConfigResource_new( - RD_KAFKA_RESOURCE_TOPIC, topics[ci]); - /* FIXME: This is a bug in the broker ( 0) { - TEST_WARN("ConfigResource #%d: " - "expected %s (%d), got %s (%s): " - "this is typically a temporary " - "error while the new resource " - "is propagating: retrying", - i, - rd_kafka_err2name(exp_err[i]), - exp_err[i], - rd_kafka_err2name(err), - errstr2 ? errstr2 : ""); + TEST_WARN( + "ConfigResource #%d: " + "expected %s (%d), got %s (%s): " + "this is typically a temporary " + "error while the new resource " + "is propagating: retrying", + i, rd_kafka_err2name(exp_err[i]), + exp_err[i], rd_kafka_err2name(err), + errstr2 ? errstr2 : ""); rd_kafka_event_destroy(rkev); rd_sleep(1); goto retry_describe; } - TEST_FAIL_LATER("ConfigResource #%d: " - "expected %s (%d), got %s (%s)", - i, - rd_kafka_err2name(exp_err[i]), - exp_err[i], - rd_kafka_err2name(err), - errstr2 ? errstr2 : ""); + TEST_FAIL_LATER( + "ConfigResource #%d: " + "expected %s (%d), got %s (%s)", + i, rd_kafka_err2name(exp_err[i]), exp_err[i], + rd_kafka_err2name(err), errstr2 ? errstr2 : ""); fails++; } } @@ -1124,7 +1091,7 @@ static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { /** * @brief Verify that an unclean rd_kafka_destroy() does not hang. */ -static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { +static void do_test_unclean_destroy(rd_kafka_type_t cltype, int with_mainq) { rd_kafka_t *rk; char errstr[512]; rd_kafka_conf_t *conf; @@ -1145,15 +1112,16 @@ static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { else q = rd_kafka_queue_new(rk); - topic = rd_kafka_NewTopic_new(test_mk_topic_name(__FUNCTION__, 1), - 3, 1, NULL, 0); + topic = rd_kafka_NewTopic_new(test_mk_topic_name(__FUNCTION__, 1), 3, 1, + NULL, 0); rd_kafka_CreateTopics(rk, &topic, 1, NULL, q); rd_kafka_NewTopic_destroy(topic); rd_kafka_queue_destroy(q); - TEST_SAY("Giving rd_kafka_destroy() 5s to finish, " - "despite Admin API request being processed\n"); + TEST_SAY( + "Giving rd_kafka_destroy() 5s to finish, " + "despite Admin API request being processed\n"); test_timeout_set(5); TIMING_START(&t_destroy, "rd_kafka_destroy()"); rd_kafka_destroy(rk); @@ -1167,19 +1135,19 @@ static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { - /** - * @brief Test deletion of records - * - * - */ -static void do_test_DeleteRecords (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int op_timeout) { + * @brief Test deletion of records + * + * + */ +static void do_test_DeleteRecords(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int op_timeout) { rd_kafka_queue_t *q; - rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_AdminOptions_t *options = NULL; rd_kafka_topic_partition_list_t *offsets = NULL; - rd_kafka_event_t *rkev = NULL; + rd_kafka_event_t *rkev = NULL; rd_kafka_resp_err_t err; char errstr[512]; const char *errstr2; @@ -1187,10 +1155,10 @@ static void do_test_DeleteRecords (const char *what, rd_kafka_topic_partition_list_t *results = NULL; int i; const int partitions_cnt = 3; - const int msgs_cnt = 100; + const int msgs_cnt = 100; char *topics[MY_DEL_RECORDS_CNT]; rd_kafka_metadata_topic_t exp_mdtopics[MY_DEL_RECORDS_CNT] = {{0}}; - int exp_mdtopic_cnt = 0; + int exp_mdtopic_cnt = 0; test_timing_t timing; rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; rd_kafka_DeleteRecords_t *del_records; @@ -1202,42 +1170,37 @@ static void do_test_DeleteRecords (const char *what, q = useq ? useq : rd_kafka_queue_new(rk); if (op_timeout != -1) { - options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_ANY); + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); err = rd_kafka_AdminOptions_set_operation_timeout( - options, op_timeout, errstr, sizeof(errstr)); + options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } - for (i = 0 ; i < MY_DEL_RECORDS_CNT ; i++) { + for (i = 0; i < MY_DEL_RECORDS_CNT; i++) { char pfx[32]; char *topic; rd_snprintf(pfx, sizeof(pfx), "DeleteRecords-topic%d", i); topic = rd_strdup(test_mk_topic_name(pfx, 1)); - topics[i] = topic; + topics[i] = topic; exp_mdtopics[exp_mdtopic_cnt++].topic = topic; } /* Create the topics first. */ - test_CreateTopics_simple(rk, NULL, topics, - MY_DEL_RECORDS_CNT, - partitions_cnt /*num_partitions*/, - NULL); + test_CreateTopics_simple(rk, NULL, topics, MY_DEL_RECORDS_CNT, + partitions_cnt /*num_partitions*/, NULL); /* Verify that topics are reported by metadata */ - test_wait_metadata_update(rk, - exp_mdtopics, exp_mdtopic_cnt, - NULL, 0, - 15*1000); + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0, + 15 * 1000); /* Produce 100 msgs / partition */ - for (i = 0 ; i < MY_DEL_RECORDS_CNT; i++ ) { + for (i = 0; i < MY_DEL_RECORDS_CNT; i++) { int32_t partition; - for (partition = 0 ; partition < partitions_cnt; partition++ ) { + for (partition = 0; partition < partitions_cnt; partition++) { test_produce_msgs_easy(topics[i], 0, partition, msgs_cnt); } @@ -1246,24 +1209,24 @@ static void do_test_DeleteRecords (const char *what, offsets = rd_kafka_topic_partition_list_new(10); /* Wipe all data from topic 0 */ - for (i = 0 ; i < partitions_cnt; i++) - rd_kafka_topic_partition_list_add(offsets, topics[0], i)-> - offset = RD_KAFKA_OFFSET_END; + for (i = 0; i < partitions_cnt; i++) + rd_kafka_topic_partition_list_add(offsets, topics[0], i) + ->offset = RD_KAFKA_OFFSET_END; /* Wipe all data from partition 0 in topic 1 */ - rd_kafka_topic_partition_list_add(offsets, topics[1], 0)-> - offset = RD_KAFKA_OFFSET_END; + rd_kafka_topic_partition_list_add(offsets, topics[1], 0)->offset = + RD_KAFKA_OFFSET_END; /* Wipe some data from partition 2 in topic 1 */ - rd_kafka_topic_partition_list_add(offsets, topics[1], 2)-> - offset = msgs_cnt / 2; + rd_kafka_topic_partition_list_add(offsets, topics[1], 2)->offset = + msgs_cnt / 2; /* Not changing the offset (out of range) for topic 2 partition 0 */ rd_kafka_topic_partition_list_add(offsets, topics[2], 0); /* Offset out of range for topic 2 partition 1 */ - rd_kafka_topic_partition_list_add(offsets, topics[2], 1)-> - offset = msgs_cnt + 1; + rd_kafka_topic_partition_list_add(offsets, topics[2], 1)->offset = + msgs_cnt + 1; del_records = rd_kafka_DeleteRecords_new(offsets); @@ -1280,15 +1243,14 @@ static void do_test_DeleteRecords (const char *what, * Print but otherwise ignore other event types * (typically generic Error events). */ while (1) { - rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000)); + rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000)); TEST_SAY("DeleteRecords: got %s in %.3fms\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); if (rkev == NULL) continue; if (rd_kafka_event_error(rkev)) - TEST_SAY("%s: %s\n", - rd_kafka_event_name(rkev), + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); if (rd_kafka_event_type(rkev) == @@ -1304,19 +1266,18 @@ static void do_test_DeleteRecords (const char *what, rd_kafka_event_name(rkev)); /* Expecting error */ - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == exp_err, "expected DeleteRecords to return %s, not %s (%s)", - rd_kafka_err2str(exp_err), - rd_kafka_err2str(err), + rd_kafka_err2str(exp_err), rd_kafka_err2str(err), err ? errstr2 : "n/a"); - TEST_SAY("DeleteRecords: returned %s (%s)\n", - rd_kafka_err2str(err), err ? errstr2 : "n/a"); + TEST_SAY("DeleteRecords: returned %s (%s)\n", rd_kafka_err2str(err), + err ? errstr2 : "n/a"); results = rd_kafka_topic_partition_list_copy( - rd_kafka_DeleteRecords_result_offsets(res)); + rd_kafka_DeleteRecords_result_offsets(res)); /* Sort both input and output list */ rd_kafka_topic_partition_list_sort(offsets, NULL, NULL); @@ -1330,14 +1291,13 @@ static void do_test_DeleteRecords (const char *what, TEST_ASSERT(offsets->cnt == results->cnt, "expected DeleteRecords_result_offsets to return %d items, " "not %d", - offsets->cnt, - results->cnt); + offsets->cnt, results->cnt); - for (i = 0 ; i < results->cnt ; i++) { - const rd_kafka_topic_partition_t *input =&offsets->elems[i]; + for (i = 0; i < results->cnt; i++) { + const rd_kafka_topic_partition_t *input = &offsets->elems[i]; const rd_kafka_topic_partition_t *output = &results->elems[i]; - int64_t expected_offset = input->offset; - rd_kafka_resp_err_t expected_err = 0; + int64_t expected_offset = input->offset; + rd_kafka_resp_err_t expected_err = 0; if (expected_offset == RD_KAFKA_OFFSET_END) expected_offset = msgs_cnt; @@ -1347,56 +1307,52 @@ static void do_test_DeleteRecords (const char *what, input->offset > msgs_cnt) expected_err = 1; - TEST_SAY("DeleteRecords Returned %s for %s [%"PRId32"] " + TEST_SAY("DeleteRecords Returned %s for %s [%" PRId32 + "] " "low-watermark = %d\n", - rd_kafka_err2name(output->err), - output->topic, - output->partition, - (int)output->offset); + rd_kafka_err2name(output->err), output->topic, + output->partition, (int)output->offset); if (strcmp(output->topic, input->topic)) - TEST_FAIL_LATER("Result order mismatch at #%d: " - "expected topic %s, got %s", - i, - input->topic, - output->topic); + TEST_FAIL_LATER( + "Result order mismatch at #%d: " + "expected topic %s, got %s", + i, input->topic, output->topic); if (output->partition != input->partition) - TEST_FAIL_LATER("Result order mismatch at #%d: " - "expected partition %d, got %d", - i, - input->partition, - output->partition); + TEST_FAIL_LATER( + "Result order mismatch at #%d: " + "expected partition %d, got %d", + i, input->partition, output->partition); if (output->err != expected_err) - TEST_FAIL_LATER("%s [%"PRId32"]: " - "expected error code %d (%s), " - "got %d (%s)", - output->topic, - output->partition, - expected_err, - rd_kafka_err2str(expected_err), - output->err, - rd_kafka_err2str(output->err)); + TEST_FAIL_LATER( + "%s [%" PRId32 + "]: " + "expected error code %d (%s), " + "got %d (%s)", + output->topic, output->partition, expected_err, + rd_kafka_err2str(expected_err), output->err, + rd_kafka_err2str(output->err)); if (output->err == 0 && output->offset != expected_offset) - TEST_FAIL_LATER("%s [%"PRId32"]: " - "expected offset %"PRId64", " - "got %"PRId64, - output->topic, - output->partition, - expected_offset, - output->offset); + TEST_FAIL_LATER("%s [%" PRId32 + "]: " + "expected offset %" PRId64 + ", " + "got %" PRId64, + output->topic, output->partition, + expected_offset, output->offset); } /* Check watermarks for partitions */ - for (i = 0 ; i < MY_DEL_RECORDS_CNT; i++ ) { + for (i = 0; i < MY_DEL_RECORDS_CNT; i++) { int32_t partition; - for (partition = 0 ; partition < partitions_cnt; partition++ ) { + for (partition = 0; partition < partitions_cnt; partition++) { const rd_kafka_topic_partition_t *del = - rd_kafka_topic_partition_list_find( - results, topics[i], partition); - int64_t expected_low = 0; + rd_kafka_topic_partition_list_find( + results, topics[i], partition); + int64_t expected_low = 0; int64_t expected_high = msgs_cnt; int64_t low, high; @@ -1405,36 +1361,35 @@ static void do_test_DeleteRecords (const char *what, } err = rd_kafka_query_watermark_offsets( - rk, topics[i], partition, - &low, &high, tmout_multip(10000)); + rk, topics[i], partition, &low, &high, + tmout_multip(10000)); if (err) - TEST_FAIL("query_watermark_offsets failed: " - "%s\n", - rd_kafka_err2str(err)); + TEST_FAIL( + "query_watermark_offsets failed: " + "%s\n", + rd_kafka_err2str(err)); if (low != expected_low) - TEST_FAIL_LATER("For %s [%"PRId32"] expected " - "a low watermark of %"PRId64 - ", got %"PRId64, - topics[i], - partition, - expected_low, - low); + TEST_FAIL_LATER("For %s [%" PRId32 + "] expected " + "a low watermark of %" PRId64 + ", got %" PRId64, + topics[i], partition, + expected_low, low); if (high != expected_high) - TEST_FAIL_LATER("For %s [%"PRId32"] expected " - "a high watermark of %"PRId64 - ", got %"PRId64, - topics[i], - partition, - expected_high, - high); + TEST_FAIL_LATER("For %s [%" PRId32 + "] expected " + "a high watermark of %" PRId64 + ", got %" PRId64, + topics[i], partition, + expected_high, high); } } rd_kafka_event_destroy(rkev); - for (i = 0 ; i < MY_DEL_RECORDS_CNT ; i++) + for (i = 0; i < MY_DEL_RECORDS_CNT; i++) rd_free(topics[i]); if (results) @@ -1456,22 +1411,23 @@ static void do_test_DeleteRecords (const char *what, } /** - * @brief Test deletion of groups - * - * - */ + * @brief Test deletion of groups + * + * + */ typedef struct expected_group_result { char *group; rd_kafka_resp_err_t err; } expected_group_result_t; -static void do_test_DeleteGroups (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int op_timeout) { +static void do_test_DeleteGroups(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int op_timeout) { rd_kafka_queue_t *q; rd_kafka_AdminOptions_t *options = NULL; - rd_kafka_event_t *rkev = NULL; + rd_kafka_event_t *rkev = NULL; rd_kafka_resp_err_t err; char errstr[512]; const char *errstr2; @@ -1479,12 +1435,12 @@ static void do_test_DeleteGroups (const char *what, int known_groups = MY_DEL_GROUPS_CNT - 1; int i; const int partitions_cnt = 1; - const int msgs_cnt = 100; + const int msgs_cnt = 100; char *topic; rd_kafka_metadata_topic_t exp_mdtopic = {0}; - int64_t testid = test_id_generate(); + int64_t testid = test_id_generate(); test_timing_t timing; - rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; const rd_kafka_group_result_t **results = NULL; expected_group_result_t expected[MY_DEL_GROUPS_CNT] = {{0}}; rd_kafka_DeleteGroup_t *del_groups[MY_DEL_GROUPS_CNT]; @@ -1496,28 +1452,22 @@ static void do_test_DeleteGroups (const char *what, q = useq ? useq : rd_kafka_queue_new(rk); if (op_timeout != -1) { - options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_ANY); + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); err = rd_kafka_AdminOptions_set_operation_timeout( - options, op_timeout, errstr, sizeof(errstr)); + options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } - topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); exp_mdtopic.topic = topic; /* Create the topics first. */ - test_CreateTopics_simple(rk, NULL, &topic, 1, - partitions_cnt, - NULL); + test_CreateTopics_simple(rk, NULL, &topic, 1, partitions_cnt, NULL); /* Verify that topics are reported by metadata */ - test_wait_metadata_update(rk, - &exp_mdtopic, 1, - NULL, 0, - 15*1000); + test_wait_metadata_update(rk, &exp_mdtopic, 1, NULL, 0, 15 * 1000); /* Produce 100 msgs */ test_produce_msgs_easy(topic, testid, 0, msgs_cnt); @@ -1525,9 +1475,10 @@ static void do_test_DeleteGroups (const char *what, for (i = 0; i < MY_DEL_GROUPS_CNT; i++) { char *group = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); if (i < known_groups) { - test_consume_msgs_easy(group, topic, testid, -1, msgs_cnt, NULL); + test_consume_msgs_easy(group, topic, testid, -1, + msgs_cnt, NULL); expected[i].group = group; - expected[i].err = RD_KAFKA_RESP_ERR_NO_ERROR; + expected[i].err = RD_KAFKA_RESP_ERR_NO_ERROR; } else { expected[i].group = group; expected[i].err = RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND; @@ -1545,16 +1496,15 @@ static void do_test_DeleteGroups (const char *what, /* Poll result queue for DeleteGroups result. * Print but otherwise ignore other event types * (typically generic Error events). */ - while(1) { - rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000)); + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000)); TEST_SAY("DeleteGroups: got %s in %.3fms\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); if (rkev == NULL) continue; if (rd_kafka_event_error(rkev)) - TEST_SAY("%s: %s\n", - rd_kafka_event_name(rkev), + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); if (rd_kafka_event_type(rkev) == @@ -1570,44 +1520,44 @@ static void do_test_DeleteGroups (const char *what, rd_kafka_event_name(rkev)); /* Expecting error */ - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == exp_err, "expected DeleteGroups to return %s, not %s (%s)", - rd_kafka_err2str(exp_err), - rd_kafka_err2str(err), + rd_kafka_err2str(exp_err), rd_kafka_err2str(err), err ? errstr2 : "n/a"); - TEST_SAY("DeleteGroups: returned %s (%s)\n", - rd_kafka_err2str(err), err ? errstr2 : "n/a"); + TEST_SAY("DeleteGroups: returned %s (%s)\n", rd_kafka_err2str(err), + err ? errstr2 : "n/a"); size_t cnt = 0; - results = rd_kafka_DeleteGroups_result_groups(res, &cnt); + results = rd_kafka_DeleteGroups_result_groups(res, &cnt); TEST_ASSERT(MY_DEL_GROUPS_CNT == cnt, - "expected DeleteGroups_result_groups to return %d items, not %"PRIusz, - MY_DEL_GROUPS_CNT, - cnt); + "expected DeleteGroups_result_groups to return %d items, " + "not %" PRIusz, + MY_DEL_GROUPS_CNT, cnt); - for (i = 0 ; i < MY_DEL_GROUPS_CNT ; i++) { + for (i = 0; i < MY_DEL_GROUPS_CNT; i++) { const expected_group_result_t *exp = &expected[i]; - rd_kafka_resp_err_t exp_err = exp->err; + rd_kafka_resp_err_t exp_err = exp->err; const rd_kafka_group_result_t *act = results[i]; - rd_kafka_resp_err_t act_err = rd_kafka_error_code(rd_kafka_group_result_error(act)); - TEST_ASSERT(strcmp(exp->group, rd_kafka_group_result_name(act)) == 0, - "Result order mismatch at #%d: expected group name to be %s, not %s", - i, exp->group, rd_kafka_group_result_name(act)); + rd_kafka_resp_err_t act_err = + rd_kafka_error_code(rd_kafka_group_result_error(act)); + TEST_ASSERT( + strcmp(exp->group, rd_kafka_group_result_name(act)) == 0, + "Result order mismatch at #%d: expected group name to be " + "%s, not %s", + i, exp->group, rd_kafka_group_result_name(act)); TEST_ASSERT(exp_err == act_err, "expected err=%d for group %s, not %d (%s)", - exp_err, - exp->group, - act_err, + exp_err, exp->group, act_err, rd_kafka_err2str(act_err)); } rd_kafka_event_destroy(rkev); - for (i = 0 ; i < MY_DEL_GROUPS_CNT ; i++) { + for (i = 0; i < MY_DEL_GROUPS_CNT; i++) { rd_kafka_DeleteGroup_destroy(del_groups[i]); rd_free(expected[i].group); } @@ -1628,19 +1578,19 @@ static void do_test_DeleteGroups (const char *what, /** - * @brief Test deletion of committed offsets. - * - * - */ -static void do_test_DeleteConsumerGroupOffsets (const char *what, - rd_kafka_t *rk, - rd_kafka_queue_t *useq, - int op_timeout, - rd_bool_t sub_consumer) { + * @brief Test deletion of committed offsets. + * + * + */ +static void do_test_DeleteConsumerGroupOffsets(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int op_timeout, + rd_bool_t sub_consumer) { rd_kafka_queue_t *q; rd_kafka_AdminOptions_t *options = NULL; - rd_kafka_topic_partition_list_t *orig_offsets, *offsets, - *to_delete, *committed, *deleted, *subscription = NULL; + rd_kafka_topic_partition_list_t *orig_offsets, *offsets, *to_delete, + *committed, *deleted, *subscription = NULL; rd_kafka_event_t *rkev = NULL; rd_kafka_resp_err_t err; char errstr[512]; @@ -1650,7 +1600,7 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, const int partitions_cnt = 3; char *topics[MY_TOPIC_CNT]; rd_kafka_metadata_topic_t exp_mdtopics[MY_TOPIC_CNT] = {{0}}; - int exp_mdtopic_cnt = 0; + int exp_mdtopic_cnt = 0; test_timing_t timing; rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; rd_kafka_DeleteConsumerGroupOffsets_t *cgoffsets; @@ -1670,26 +1620,24 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, q = useq ? useq : rd_kafka_queue_new(rk); if (op_timeout != -1) { - options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_ANY); + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); err = rd_kafka_AdminOptions_set_operation_timeout( - options, op_timeout, errstr, sizeof(errstr)); + options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } subscription = rd_kafka_topic_partition_list_new(MY_TOPIC_CNT); - for (i = 0 ; i < MY_TOPIC_CNT ; i++) { + for (i = 0; i < MY_TOPIC_CNT; i++) { char pfx[64]; char *topic; - rd_snprintf(pfx, sizeof(pfx), - "DCGO-topic%d", i); + rd_snprintf(pfx, sizeof(pfx), "DCGO-topic%d", i); topic = rd_strdup(test_mk_topic_name(pfx, 1)); - topics[i] = topic; + topics[i] = topic; exp_mdtopics[exp_mdtopic_cnt++].topic = topic; rd_kafka_topic_partition_list_add(subscription, topic, @@ -1699,14 +1647,12 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, groupid = topics[0]; /* Create the topics first. */ - test_CreateTopics_simple(rk, NULL, topics, MY_TOPIC_CNT, - partitions_cnt, NULL); + test_CreateTopics_simple(rk, NULL, topics, MY_TOPIC_CNT, partitions_cnt, + NULL); /* Verify that topics are reported by metadata */ - test_wait_metadata_update(rk, - exp_mdtopics, exp_mdtopic_cnt, - NULL, 0, - 15*1000); + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0, + 15 * 1000); rd_sleep(1); /* Additional wait time for cluster propagation */ @@ -1719,17 +1665,17 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, /* Commit some offsets */ orig_offsets = rd_kafka_topic_partition_list_new(MY_TOPIC_CNT * 2); - for (i = 0 ; i < MY_TOPIC_CNT * 2 ; i++) - rd_kafka_topic_partition_list_add( - orig_offsets, topics[i/2], - i % MY_TOPIC_CNT)->offset = (i+1)*10; + for (i = 0; i < MY_TOPIC_CNT * 2; i++) + rd_kafka_topic_partition_list_add(orig_offsets, topics[i / 2], + i % MY_TOPIC_CNT) + ->offset = (i + 1) * 10; - TEST_CALL_ERR__(rd_kafka_commit(consumer, orig_offsets, 0/*sync*/)); + TEST_CALL_ERR__(rd_kafka_commit(consumer, orig_offsets, 0 /*sync*/)); /* Verify committed offsets match */ committed = rd_kafka_topic_partition_list_copy(orig_offsets); - TEST_CALL_ERR__(rd_kafka_committed(consumer, committed, - tmout_multip(5*1000))); + TEST_CALL_ERR__( + rd_kafka_committed(consumer, committed, tmout_multip(5 * 1000))); if (test_partition_list_cmp(committed, orig_offsets)) { TEST_SAY("commit() list:\n"); @@ -1742,26 +1688,22 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, rd_kafka_topic_partition_list_destroy(committed); /* Now delete second half of the commits */ - offsets = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2); + offsets = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2); to_delete = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2); - for (i = 0 ; i < orig_offsets->cnt ; i++) { + for (i = 0; i < orig_offsets->cnt; i++) { if (i < orig_offsets->cnt / 2) rd_kafka_topic_partition_list_add( - offsets, - orig_offsets->elems[i].topic, - orig_offsets->elems[i].partition); + offsets, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition); else { rd_kafka_topic_partition_list_add( - to_delete, - orig_offsets->elems[i].topic, - orig_offsets->elems[i].partition); + to_delete, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition); rd_kafka_topic_partition_list_add( - offsets, - orig_offsets->elems[i].topic, - orig_offsets->elems[i].partition)->offset = - RD_KAFKA_OFFSET_INVALID; + offsets, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition) + ->offset = RD_KAFKA_OFFSET_INVALID; } - } cgoffsets = rd_kafka_DeleteConsumerGroupOffsets_new(groupid, to_delete); @@ -1778,15 +1720,14 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, * Print but otherwise ignore other event types * (typically generic Error events). */ while (1) { - rkev = rd_kafka_queue_poll(q, tmout_multip(10*1000)); + rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000)); TEST_SAY("DeleteConsumerGroupOffsets: got %s in %.3fms\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); if (rkev == NULL) continue; if (rd_kafka_event_error(rkev)) - TEST_SAY("%s: %s\n", - rd_kafka_event_name(rkev), + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); if (rd_kafka_event_type(rkev) == @@ -1802,24 +1743,23 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, rd_kafka_event_name(rkev)); /* Expecting error */ - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(!err, "expected DeleteConsumerGroupOffsets to succeed, " "got %s (%s)", - rd_kafka_err2name(err), - err ? errstr2 : "n/a"); + rd_kafka_err2name(err), err ? errstr2 : "n/a"); TEST_SAY("DeleteConsumerGroupOffsets: returned %s (%s)\n", rd_kafka_err2str(err), err ? errstr2 : "n/a"); - gres = rd_kafka_DeleteConsumerGroupOffsets_result_groups(res, - &gres_cnt); + gres = + rd_kafka_DeleteConsumerGroupOffsets_result_groups(res, &gres_cnt); TEST_ASSERT(gres && gres_cnt == 1, - "expected gres_cnt == 1, not %"PRIusz, gres_cnt); + "expected gres_cnt == 1, not %" PRIusz, gres_cnt); deleted = rd_kafka_topic_partition_list_copy( - rd_kafka_group_result_partitions(gres[0])); + rd_kafka_group_result_partitions(gres[0])); if (test_partition_list_cmp(deleted, to_delete)) { TEST_SAY("Result list:\n"); @@ -1830,9 +1770,10 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, } /* Verify expected errors */ - for (i = 0 ; i < deleted->cnt ; i++) { + for (i = 0; i < deleted->cnt; i++) { TEST_ASSERT_LATER(deleted->elems[i].err == exp_err, - "Result %s [%"PRId32"] has error %s, " + "Result %s [%" PRId32 + "] has error %s, " "expected %s", deleted->elems[i].topic, deleted->elems[i].partition, @@ -1850,8 +1791,8 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, /* Verify committed offsets match */ committed = rd_kafka_topic_partition_list_copy(orig_offsets); - TEST_CALL_ERR__(rd_kafka_committed(consumer, committed, - tmout_multip(5*1000))); + TEST_CALL_ERR__( + rd_kafka_committed(consumer, committed, tmout_multip(5 * 1000))); TEST_SAY("Original committed offsets:\n"); test_print_partition_list(orig_offsets); @@ -1872,7 +1813,7 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, rd_kafka_topic_partition_list_destroy(orig_offsets); rd_kafka_topic_partition_list_destroy(subscription); - for (i = 0 ; i < MY_TOPIC_CNT ; i++) + for (i = 0; i < MY_TOPIC_CNT; i++) rd_free(topics[i]); rd_kafka_destroy(consumer); @@ -1890,7 +1831,7 @@ static void do_test_DeleteConsumerGroupOffsets (const char *what, } -static void do_test_apis (rd_kafka_type_t cltype) { +static void do_test_apis(rd_kafka_type_t cltype) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *mainq; @@ -1898,12 +1839,13 @@ static void do_test_apis (rd_kafka_type_t cltype) { /* Get the available brokers, but use a separate rd_kafka_t instance * so we don't jinx the tests by having up-to-date metadata. */ avail_brokers = test_get_broker_ids(NULL, &avail_broker_cnt); - TEST_SAY("%"PRIusz" brokers in cluster " + TEST_SAY("%" PRIusz + " brokers in cluster " "which will be used for replica sets\n", avail_broker_cnt); - do_test_unclean_destroy(cltype, 0/*tempq*/); - do_test_unclean_destroy(cltype, 1/*mainq*/); + do_test_unclean_destroy(cltype, 0 /*tempq*/); + do_test_unclean_destroy(cltype, 1 /*mainq*/); test_conf_init(&conf, NULL, 180); test_conf_set(conf, "socket.timeout.ms", "10000"); @@ -1912,27 +1854,27 @@ static void do_test_apis (rd_kafka_type_t cltype) { mainq = rd_kafka_queue_get_main(rk); /* Create topics */ - do_test_CreateTopics("temp queue, op timeout 0", - rk, NULL, 0, 0); - do_test_CreateTopics("temp queue, op timeout 15000", - rk, NULL, 15000, 0); - do_test_CreateTopics("temp queue, op timeout 300, " - "validate only", - rk, NULL, 300, rd_true); - do_test_CreateTopics("temp queue, op timeout 9000, validate_only", - rk, NULL, 9000, rd_true); + do_test_CreateTopics("temp queue, op timeout 0", rk, NULL, 0, 0); + do_test_CreateTopics("temp queue, op timeout 15000", rk, NULL, 15000, + 0); + do_test_CreateTopics( + "temp queue, op timeout 300, " + "validate only", + rk, NULL, 300, rd_true); + do_test_CreateTopics("temp queue, op timeout 9000, validate_only", rk, + NULL, 9000, rd_true); do_test_CreateTopics("main queue, options", rk, mainq, -1, 0); /* Delete topics */ do_test_DeleteTopics("temp queue, op timeout 0", rk, NULL, 0); do_test_DeleteTopics("main queue, op timeout 15000", rk, mainq, 1500); - if (test_broker_version >= TEST_BRKVER(1,0,0,0)) { + if (test_broker_version >= TEST_BRKVER(1, 0, 0, 0)) { /* Create Partitions */ - do_test_CreatePartitions("temp queue, op timeout 6500", - rk, NULL, 6500); - do_test_CreatePartitions("main queue, op timeout 0", - rk, mainq, 0); + do_test_CreatePartitions("temp queue, op timeout 6500", rk, + NULL, 6500); + do_test_CreatePartitions("main queue, op timeout 0", rk, mainq, + 0); } /* AlterConfigs */ @@ -1950,16 +1892,15 @@ static void do_test_apis (rd_kafka_type_t cltype) { do_test_DeleteGroups("main queue, op timeout 1500", rk, mainq, 1500); do_test_DeleteGroups("main queue, op timeout 1500", rk, mainq, 1500); - if (test_broker_version >= TEST_BRKVER(2,4,0,0)) { + if (test_broker_version >= TEST_BRKVER(2, 4, 0, 0)) { /* Delete committed offsets */ + do_test_DeleteConsumerGroupOffsets("temp queue, op timeout 0", + rk, NULL, 0, rd_false); do_test_DeleteConsumerGroupOffsets( - "temp queue, op timeout 0", rk, NULL, 0, rd_false); - do_test_DeleteConsumerGroupOffsets( - "main queue, op timeout 1500", rk, mainq, 1500, - rd_false); + "main queue, op timeout 1500", rk, mainq, 1500, rd_false); do_test_DeleteConsumerGroupOffsets( - "main queue, op timeout 1500", rk, mainq, 1500, - rd_true/*with subscribing consumer*/); + "main queue, op timeout 1500", rk, mainq, 1500, + rd_true /*with subscribing consumer*/); } rd_kafka_queue_destroy(mainq); @@ -1970,7 +1911,7 @@ static void do_test_apis (rd_kafka_type_t cltype) { } -int main_0081_admin (int argc, char **argv) { +int main_0081_admin(int argc, char **argv) { do_test_apis(RD_KAFKA_PRODUCER); diff --git a/tests/0082-fetch_max_bytes.cpp b/tests/0082-fetch_max_bytes.cpp index 1209991673..16eb5a21a1 100644 --- a/tests/0082-fetch_max_bytes.cpp +++ b/tests/0082-fetch_max_bytes.cpp @@ -41,18 +41,18 @@ */ -static void do_test_fetch_max_bytes (void) { +static void do_test_fetch_max_bytes(void) { const int partcnt = 3; - int msgcnt = 10 * partcnt; - const int msgsize = 900*1024; /* Less than 1 Meg to account - * for batch overhead */ + int msgcnt = 10 * partcnt; + const int msgsize = 900 * 1024; /* Less than 1 Meg to account + * for batch overhead */ std::string errstr; RdKafka::ErrorCode err; std::string topic = Test::mk_topic_name("0081-fetch_max_bytes", 1); /* Produce messages to partitions */ - for (int32_t p = 0 ; p < (int32_t)partcnt ; p++) + for (int32_t p = 0; p < (int32_t)partcnt; p++) test_produce_msgs_easy_size(topic.c_str(), 0, p, msgcnt, msgsize); /* Create consumer */ @@ -79,8 +79,8 @@ static void do_test_fetch_max_bytes (void) { * larger than fetch.max.bytes. */ Test::conf_set(conf, "max.partition.fetch.bytes", "20000000"); /* ~20MB */ - Test::conf_set(conf, "fetch.max.bytes", "1000000"); /* ~1MB */ - Test::conf_set(conf, "receive.message.max.bytes", "1000512"); /* ~1MB+512 */ + Test::conf_set(conf, "fetch.max.bytes", "1000000"); /* ~1MB */ + Test::conf_set(conf, "receive.message.max.bytes", "1000512"); /* ~1MB+512 */ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); if (!c) @@ -98,19 +98,18 @@ static void do_test_fetch_max_bytes (void) { int cnt = 0; while (cnt < msgcnt) { RdKafka::Message *msg = c->consume(tmout_multip(1000)); - switch (msg->err()) - { - case RdKafka::ERR__TIMED_OUT: - break; + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + break; - case RdKafka::ERR_NO_ERROR: - cnt++; - break; + case RdKafka::ERR_NO_ERROR: + cnt++; + break; - default: - Test::Fail("Consume error: " + msg->errstr()); - break; - } + default: + Test::Fail("Consume error: " + msg->errstr()); + break; + } delete msg; } @@ -121,14 +120,14 @@ static void do_test_fetch_max_bytes (void) { } extern "C" { - int main_0082_fetch_max_bytes (int argc, char **argv) { - if (test_quick) { - Test::Skip("Test skipped due to quick mode\n"); - return 0; - } - - do_test_fetch_max_bytes(); - +int main_0082_fetch_max_bytes(int argc, char **argv) { + if (test_quick) { + Test::Skip("Test skipped due to quick mode\n"); return 0; } + + do_test_fetch_max_bytes(); + + return 0; +} } diff --git a/tests/0083-cb_event.c b/tests/0083-cb_event.c index dd1aee57a3..23ce798208 100644 --- a/tests/0083-cb_event.c +++ b/tests/0083-cb_event.c @@ -3,24 +3,24 @@ * * Copyright (c) 2018, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,7 +35,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -49,7 +49,7 @@ static struct { * @brief Event callback function. Check the opaque pointer and * increase the count of received event. */ static void event_cb(rd_kafka_t *rk_p, void *opaque) { - TEST_ASSERT(opaque == (void*)0x1234, + TEST_ASSERT(opaque == (void *)0x1234, "Opaque pointer is not as expected (got: %p)", opaque); mtx_lock(&event_receiver.lock); event_receiver.count += 1; @@ -63,7 +63,7 @@ static int wait_event_cb(int timeout_secs) { int event_count = 0; for (; timeout_secs >= 0; timeout_secs--) { mtx_lock(&event_receiver.lock); - event_count = event_receiver.count; + event_count = event_receiver.count; event_receiver.count = 0; mtx_unlock(&event_receiver.lock); if (event_count > 0 || timeout_secs == 0) @@ -74,7 +74,7 @@ static int wait_event_cb(int timeout_secs) { } -int main_0083_cb_event (int argc, char **argv) { +int main_0083_cb_event(int argc, char **argv) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *tconf; rd_kafka_t *rk_p, *rk_c; @@ -82,15 +82,11 @@ int main_0083_cb_event (int argc, char **argv) { rd_kafka_topic_t *rkt_p; rd_kafka_queue_t *queue; uint64_t testid; - int msgcnt = 100; - int recvd = 0; + int msgcnt = 100; + int recvd = 0; int wait_multiplier = 1; rd_kafka_resp_err_t err; - enum { - _NOPE, - _YEP, - _REBALANCE - } expecting_io = _REBALANCE; + enum { _NOPE, _YEP, _REBALANCE } expecting_io = _REBALANCE; int callback_event_count; rd_kafka_event_t *rkev; int eventcnt = 0; @@ -98,11 +94,11 @@ int main_0083_cb_event (int argc, char **argv) { mtx_init(&event_receiver.lock, mtx_plain); testid = test_id_generate(); - topic = test_mk_topic_name(__FUNCTION__, 1); + topic = test_mk_topic_name(__FUNCTION__, 1); - rk_p = test_create_producer(); + rk_p = test_create_producer(); rkt_p = test_create_producer_topic(rk_p, topic, NULL); - err = test_auto_create_topic_rkt(rk_p, rkt_p, tmout_multip(5000)); + err = test_auto_create_topic_rkt(rk_p, rkt_p, tmout_multip(5000)); TEST_ASSERT(!err, "Topic auto creation failed: %s", rd_kafka_err2str(err)); @@ -135,22 +131,31 @@ int main_0083_cb_event (int argc, char **argv) { while (recvd < msgcnt) { TEST_SAY("Waiting for event\n"); callback_event_count = wait_event_cb(1 * wait_multiplier); - TEST_ASSERT(callback_event_count <= 1, "Event cb called %d times", callback_event_count); + TEST_ASSERT(callback_event_count <= 1, + "Event cb called %d times", callback_event_count); if (callback_event_count == 1) { TEST_SAY("Events received: %d\n", callback_event_count); while ((rkev = rd_kafka_queue_poll(queue, 0))) { eventcnt++; - switch (rd_kafka_event_type(rkev)) - { + switch (rd_kafka_event_type(rkev)) { case RD_KAFKA_EVENT_REBALANCE: - TEST_SAY("Got %s: %s\n", rd_kafka_event_name(rkev), - rd_kafka_err2str(rd_kafka_event_error(rkev))); + TEST_SAY( + "Got %s: %s\n", + rd_kafka_event_name(rkev), + rd_kafka_err2str( + rd_kafka_event_error(rkev))); if (expecting_io != _REBALANCE) - TEST_FAIL("Got Rebalance when expecting message\n"); - if (rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { - rd_kafka_assign(rk_c, rd_kafka_event_topic_partition_list(rkev)); + TEST_FAIL( + "Got Rebalance when " + "expecting message\n"); + if (rd_kafka_event_error(rkev) == + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { + rd_kafka_assign( + rk_c, + rd_kafka_event_topic_partition_list( + rkev)); expecting_io = _NOPE; } else rd_kafka_assign(rk_c, NULL); @@ -158,24 +163,31 @@ int main_0083_cb_event (int argc, char **argv) { case RD_KAFKA_EVENT_FETCH: if (expecting_io != _YEP) - TEST_FAIL("Did not expect more messages at %d/%d\n", - recvd, msgcnt); + TEST_FAIL( + "Did not expect more " + "messages at %d/%d\n", + recvd, msgcnt); recvd++; - if (recvd == (msgcnt / 2) || recvd == msgcnt) + if (recvd == (msgcnt / 2) || + recvd == msgcnt) expecting_io = _NOPE; break; case RD_KAFKA_EVENT_ERROR: - TEST_FAIL("Error: %s\n", rd_kafka_event_error_string(rkev)); + TEST_FAIL( + "Error: %s\n", + rd_kafka_event_error_string(rkev)); break; default: - TEST_SAY("Ignoring event %s\n", rd_kafka_event_name(rkev)); + TEST_SAY("Ignoring event %s\n", + rd_kafka_event_name(rkev)); } rd_kafka_event_destroy(rkev); } - TEST_SAY("%d events, Consumed %d/%d messages\n", eventcnt, recvd, msgcnt); + TEST_SAY("%d events, Consumed %d/%d messages\n", + eventcnt, recvd, msgcnt); wait_multiplier = 1; @@ -183,14 +195,16 @@ int main_0083_cb_event (int argc, char **argv) { if (expecting_io == _REBALANCE) { continue; } else if (expecting_io == _YEP) { - TEST_FAIL("Did not see expected IO after %d/%d msgs\n", - recvd, msgcnt); + TEST_FAIL( + "Did not see expected IO after %d/%d " + "msgs\n", + recvd, msgcnt); } TEST_SAY("Event wait timeout (good)\n"); TEST_SAY("Got idle period, producing\n"); - test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, msgcnt/2, - NULL, 10); + test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, + msgcnt / 2, NULL, 10); expecting_io = _YEP; /* When running slowly (e.g., valgrind) it might take diff --git a/tests/0084-destroy_flags.c b/tests/0084-destroy_flags.c index 606aa35ebd..008195f365 100644 --- a/tests/0084-destroy_flags.c +++ b/tests/0084-destroy_flags.c @@ -36,16 +36,16 @@ static RD_TLS int rebalance_cnt = 0; -static void destroy_flags_rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { +static void destroy_flags_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { rebalance_cnt++; TEST_SAY("rebalance_cb: %s with %d partition(s)\n", rd_kafka_err2str(err), parts->cnt); - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: test_consumer_assign("rebalance", rk, parts); break; @@ -66,20 +66,20 @@ struct df_args { int consumer_unsubscribe; }; -static void do_test_destroy_flags (const char *topic, - int destroy_flags, - int local_mode, - const struct df_args *args) { +static void do_test_destroy_flags(const char *topic, + int destroy_flags, + int local_mode, + const struct df_args *args) { rd_kafka_t *rk; rd_kafka_conf_t *conf; test_timing_t t_destroy; - TEST_SAY(_C_MAG "[ test destroy_flags 0x%x for client_type %d, " + TEST_SAY(_C_MAG + "[ test destroy_flags 0x%x for client_type %d, " "produce_cnt %d, subscribe %d, unsubscribe %d, " "%s mode ]\n" _C_CLR, - destroy_flags, args->client_type, - args->produce_cnt, args->consumer_subscribe, - args->consumer_unsubscribe, + destroy_flags, args->client_type, args->produce_cnt, + args->consumer_subscribe, args->consumer_unsubscribe, local_mode ? "local" : "broker"); test_conf_init(&conf, NULL, 20); @@ -96,11 +96,9 @@ static void do_test_destroy_flags (const char *topic, int msgcounter = 0; rkt = test_create_producer_topic(rk, topic, NULL); - test_produce_msgs_nowait(rk, rkt, 0, - RD_KAFKA_PARTITION_UA, - 0, args->produce_cnt, - NULL, 100, 0, - &msgcounter); + test_produce_msgs_nowait( + rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, + args->produce_cnt, NULL, 100, 0, &msgcounter); rd_kafka_topic_destroy(rkt); } @@ -122,14 +120,14 @@ static void do_test_destroy_flags (const char *topic, } } - for (i = 0 ; i < 5 ; i++) + for (i = 0; i < 5; i++) test_consumer_poll_once(rk, NULL, 100); if (args->consumer_unsubscribe) { /* Test that calling rd_kafka_unsubscribe immediately * prior to rd_kafka_destroy_flags doesn't cause the * latter to hang. */ - TEST_SAY(_C_YEL"Calling rd_kafka_unsubscribe\n"_C_CLR); + TEST_SAY(_C_YEL "Calling rd_kafka_unsubscribe\n"_C_CLR); rd_kafka_unsubscribe(rk); } } @@ -156,12 +154,12 @@ static void do_test_destroy_flags (const char *topic, "expected no rebalance callbacks, got %d", rebalance_cnt); - TEST_SAY(_C_GRN "[ test destroy_flags 0x%x for client_type %d, " + TEST_SAY(_C_GRN + "[ test destroy_flags 0x%x for client_type %d, " "produce_cnt %d, subscribe %d, unsubscribe %d, " "%s mode: PASS ]\n" _C_CLR, - destroy_flags, args->client_type, - args->produce_cnt, args->consumer_subscribe, - args->consumer_unsubscribe, + destroy_flags, args->client_type, args->produce_cnt, + args->consumer_subscribe, args->consumer_unsubscribe, local_mode ? "local" : "broker"); } @@ -169,19 +167,17 @@ static void do_test_destroy_flags (const char *topic, /** * @brief Destroy with flags */ -static void destroy_flags (int local_mode) { +static void destroy_flags(int local_mode) { const struct df_args args[] = { - { RD_KAFKA_PRODUCER, 0, 0, 0 }, - { RD_KAFKA_PRODUCER, test_quick ? 100 : 10000, 0, 0 }, - { RD_KAFKA_CONSUMER, 0, 1, 0 }, - { RD_KAFKA_CONSUMER, 0, 1, 1 }, - { RD_KAFKA_CONSUMER, 0, 0, 0 } - }; - const int flag_combos[] = { 0, - RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE }; - const char *topic = test_mk_topic_name(__FUNCTION__, 1); + {RD_KAFKA_PRODUCER, 0, 0, 0}, + {RD_KAFKA_PRODUCER, test_quick ? 100 : 10000, 0, 0}, + {RD_KAFKA_CONSUMER, 0, 1, 0}, + {RD_KAFKA_CONSUMER, 0, 1, 1}, + {RD_KAFKA_CONSUMER, 0, 0, 0}}; + const int flag_combos[] = {0, RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE}; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); const rd_bool_t can_subscribe = - test_broker_version >= TEST_BRKVER(0,9,0,0); + test_broker_version >= TEST_BRKVER(0, 9, 0, 0); int i, j; /* Create the topic to avoid not-yet-auto-created-topics being @@ -189,29 +185,25 @@ static void destroy_flags (int local_mode) { if (!local_mode) test_create_topic(NULL, topic, 3, 1); - for (i = 0 ; i < (int)RD_ARRAYSIZE(args) ; i++) { - for (j = 0 ; j < (int)RD_ARRAYSIZE(flag_combos) ; j++) { - if (!can_subscribe && - (args[i].consumer_subscribe || - args[i].consumer_unsubscribe)) + for (i = 0; i < (int)RD_ARRAYSIZE(args); i++) { + for (j = 0; j < (int)RD_ARRAYSIZE(flag_combos); j++) { + if (!can_subscribe && (args[i].consumer_subscribe || + args[i].consumer_unsubscribe)) continue; - do_test_destroy_flags(topic, - flag_combos[j], - local_mode, + do_test_destroy_flags(topic, flag_combos[j], local_mode, &args[i]); } } - } -int main_0084_destroy_flags_local (int argc, char **argv) { - destroy_flags(1/*no brokers*/); +int main_0084_destroy_flags_local(int argc, char **argv) { + destroy_flags(1 /*no brokers*/); return 0; } -int main_0084_destroy_flags (int argc, char **argv) { - destroy_flags(0/*with brokers*/); +int main_0084_destroy_flags(int argc, char **argv) { + destroy_flags(0 /*with brokers*/); return 0; } diff --git a/tests/0085-headers.cpp b/tests/0085-headers.cpp index 7bbec3558e..a342478c15 100644 --- a/tests/0085-headers.cpp +++ b/tests/0085-headers.cpp @@ -41,67 +41,61 @@ static void assert_all_headers_match(RdKafka::Headers *actual, } if (actual->size() != expected->size()) { Test::Fail(tostr() << "Expected headers length to equal " - << expected->size() << " instead equals " << actual->size() << "\n"); + << expected->size() << " instead equals " + << actual->size() << "\n"); } - std::vector actual_headers = actual->get_all(); + std::vector actual_headers = actual->get_all(); std::vector expected_headers = expected->get_all(); Test::Say(3, tostr() << "Header size " << actual_headers.size() << "\n"); - for(size_t i = 0; i < actual_headers.size(); i++) { - RdKafka::Headers::Header actual_header = actual_headers[i]; + for (size_t i = 0; i < actual_headers.size(); i++) { + RdKafka::Headers::Header actual_header = actual_headers[i]; const RdKafka::Headers::Header expected_header = expected_headers[i]; - std::string actual_key = actual_header.key(); - std::string actual_value = std::string( - actual_header.value_string(), - actual_header.value_size() - ); + std::string actual_key = actual_header.key(); + std::string actual_value = + std::string(actual_header.value_string(), actual_header.value_size()); std::string expected_key = expected_header.key(); - std::string expected_value = std::string( - actual_header.value_string(), - expected_header.value_size() - ); - - Test::Say(3, - tostr() << - "Expected Key " << expected_key << - ", Expected val " << expected_value << - ", Actual key " << actual_key << - ", Actual val " << actual_value << "\n"); + std::string expected_value = + std::string(actual_header.value_string(), expected_header.value_size()); + + Test::Say(3, tostr() << "Expected Key " << expected_key << ", Expected val " + << expected_value << ", Actual key " << actual_key + << ", Actual val " << actual_value << "\n"); if (actual_key != expected_key) { Test::Fail(tostr() << "Header key does not match, expected '" - << actual_key << "' but got '" << expected_key << "'\n"); + << actual_key << "' but got '" << expected_key + << "'\n"); } if (actual_value != expected_value) { Test::Fail(tostr() << "Header value does not match, expected '" - << actual_value << "' but got '" << expected_value << "'\n"); + << actual_value << "' but got '" << expected_value + << "'\n"); } } } -static void test_headers (RdKafka::Headers *produce_headers, - const RdKafka::Headers *compare_headers) { - +static void test_headers(RdKafka::Headers *produce_headers, + const RdKafka::Headers *compare_headers) { RdKafka::ErrorCode err; - err = producer->produce(topic, 0, - RdKafka::Producer::RK_MSG_COPY, - (void *)"message", 7, - (void *)"key", 3, 0, produce_headers, NULL); + err = producer->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY, + (void *)"message", 7, (void *)"key", 3, 0, + produce_headers, NULL); if (err) Test::Fail("produce() failed: " + RdKafka::err2str(err)); - producer->flush(tmout_multip(10*1000)); + producer->flush(tmout_multip(10 * 1000)); if (producer->outq_len() > 0) - Test::Fail(tostr() << "Expected producer to be flushed, " << - producer->outq_len() << " messages remain"); + Test::Fail(tostr() << "Expected producer to be flushed, " + << producer->outq_len() << " messages remain"); - int cnt = 0; + int cnt = 0; bool running = true; while (running) { - RdKafka::Message *msg = consumer->consume(10*1000); + RdKafka::Message *msg = consumer->consume(10 * 1000); if (msg->err() == RdKafka::ERR_NO_ERROR) { cnt++; @@ -121,9 +115,9 @@ static void test_headers (RdKafka::Headers *produce_headers, } } -static void test_headers (int num_hdrs) { - Test::Say(tostr() << "Test " << num_hdrs << - " headers in consumed message.\n"); +static void test_headers(int num_hdrs) { + Test::Say(tostr() << "Test " << num_hdrs + << " headers in consumed message.\n"); RdKafka::Headers *produce_headers = RdKafka::Headers::create(); RdKafka::Headers *compare_headers = RdKafka::Headers::create(); for (int i = 0; i < num_hdrs; ++i) { @@ -158,9 +152,9 @@ static void test_headers (int num_hdrs) { delete compare_headers; } -static void test_duplicate_keys () { +static void test_duplicate_keys() { Test::Say("Test multiple headers with duplicate keys.\n"); - int num_hdrs = 4; + int num_hdrs = 4; RdKafka::Headers *produce_headers = RdKafka::Headers::create(); RdKafka::Headers *compare_headers = RdKafka::Headers::create(); for (int i = 0; i < num_hdrs; ++i) { @@ -175,7 +169,7 @@ static void test_duplicate_keys () { delete compare_headers; } -static void test_remove_after_add () { +static void test_remove_after_add() { Test::Say("Test removing after adding headers.\n"); RdKafka::Headers *headers = RdKafka::Headers::create(); @@ -192,9 +186,8 @@ static void test_remove_after_add () { // Assert header length is 2 size_t expected_size = 2; if (headers->size() != expected_size) { - Test::Fail(tostr() << "Expected header->size() to equal " - << expected_size << ", instead got " - << headers->size() << "\n"); + Test::Fail(tostr() << "Expected header->size() to equal " << expected_size + << ", instead got " << headers->size() << "\n"); } // Remove key_one and assert headers == 1 @@ -209,7 +202,7 @@ static void test_remove_after_add () { delete headers; } -static void test_remove_all_duplicate_keys () { +static void test_remove_all_duplicate_keys() { Test::Say("Test removing duplicate keys removes all headers.\n"); RdKafka::Headers *headers = RdKafka::Headers::create(); @@ -227,9 +220,8 @@ static void test_remove_all_duplicate_keys () { // Assert header length is 3 size_t expected_size = 3; if (headers->size() != expected_size) { - Test::Fail(tostr() << "Expected header->size() to equal " - << expected_size << ", instead got " - << headers->size() << "\n"); + Test::Fail(tostr() << "Expected header->size() to equal " << expected_size + << ", instead got " << headers->size() << "\n"); } // Remove key_one and assert headers == 1 @@ -244,14 +236,14 @@ static void test_remove_all_duplicate_keys () { delete headers; } -static void test_get_last_gives_last_added_val () { +static void test_get_last_gives_last_added_val() { Test::Say("Test get_last returns the last added value of duplicate keys.\n"); RdKafka::Headers *headers = RdKafka::Headers::create(); // Add two duplicate keys - std::string dup_key = "dup_key"; - std::string val_one = "val_one"; - std::string val_two = "val_two"; + std::string dup_key = "dup_key"; + std::string val_one = "val_one"; + std::string val_two = "val_two"; std::string val_three = "val_three"; headers->add(dup_key, val_one); headers->add(dup_key, val_two); @@ -260,33 +252,32 @@ static void test_get_last_gives_last_added_val () { // Assert header length is 3 size_t expected_size = 3; if (headers->size() != expected_size) { - Test::Fail(tostr() << "Expected header->size() to equal " - << expected_size << ", instead got " - << headers->size() << "\n"); + Test::Fail(tostr() << "Expected header->size() to equal " << expected_size + << ", instead got " << headers->size() << "\n"); } // Get last of duplicate key and assert it equals val_two RdKafka::Headers::Header last = headers->get_last(dup_key); - std::string value = std::string(last.value_string()); + std::string value = std::string(last.value_string()); if (value != val_three) { Test::Fail(tostr() << "Expected get_last to return " << val_two - << " as the value of the header instead got " - << value << "\n"); + << " as the value of the header instead got " << value + << "\n"); } delete headers; } -static void test_get_of_key_returns_all () { +static void test_get_of_key_returns_all() { Test::Say("Test get returns all the headers of a duplicate key.\n"); RdKafka::Headers *headers = RdKafka::Headers::create(); // Add two duplicate keys std::string unique_key = "unique"; - std::string dup_key = "dup_key"; - std::string val_one = "val_one"; - std::string val_two = "val_two"; - std::string val_three = "val_three"; + std::string dup_key = "dup_key"; + std::string val_one = "val_one"; + std::string val_two = "val_two"; + std::string val_three = "val_three"; headers->add(unique_key, val_one); headers->add(dup_key, val_one); headers->add(dup_key, val_two); @@ -295,14 +286,13 @@ static void test_get_of_key_returns_all () { // Assert header length is 4 size_t expected_size = 4; if (headers->size() != expected_size) { - Test::Fail(tostr() << "Expected header->size() to equal " - << expected_size << ", instead got " - << headers->size() << "\n"); + Test::Fail(tostr() << "Expected header->size() to equal " << expected_size + << ", instead got " << headers->size() << "\n"); } // Get all of the duplicate key std::vector get = headers->get(dup_key); - size_t expected_get_size = 3; + size_t expected_get_size = 3; if (get.size() != expected_get_size) { Test::Fail(tostr() << "Expected header->size() to equal " << expected_get_size << ", instead got " @@ -312,16 +302,14 @@ static void test_get_of_key_returns_all () { delete headers; } -static void test_failed_produce () { - +static void test_failed_produce() { RdKafka::Headers *headers = RdKafka::Headers::create(); headers->add("my", "header"); RdKafka::ErrorCode err; err = producer->produce(topic, 999 /* invalid partition */, - RdKafka::Producer::RK_MSG_COPY, - (void *)"message", 7, + RdKafka::Producer::RK_MSG_COPY, (void *)"message", 7, (void *)"key", 3, 0, headers, NULL); if (!err) Test::Fail("Expected produce() to fail"); @@ -329,7 +317,7 @@ static void test_failed_produce () { delete headers; } -static void test_assignment_op () { +static void test_assignment_op() { Test::Say("Test Header assignment operator\n"); RdKafka::Headers *headers = RdKafka::Headers::create(); @@ -337,65 +325,64 @@ static void test_assignment_op () { headers->add("abc", "123"); headers->add("def", "456"); - RdKafka::Headers::Header h = headers->get_last("abc"); - h = headers->get_last("def"); + RdKafka::Headers::Header h = headers->get_last("abc"); + h = headers->get_last("def"); RdKafka::Headers::Header h2 = h; - h = headers->get_last("nope"); + h = headers->get_last("nope"); RdKafka::Headers::Header h3 = h; - h = headers->get_last("def"); + h = headers->get_last("def"); delete headers; } extern "C" { - int main_0085_headers (int argc, char **argv) { - topic = Test::mk_topic_name("0085-headers", 1); - - RdKafka::Conf *conf; - std::string errstr; - - Test::conf_init(&conf, NULL, 0); - - RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); - if (!p) - Test::Fail("Failed to create Producer: " + errstr); - - Test::conf_set(conf, "group.id", topic); - - RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); - if (!c) - Test::Fail("Failed to create KafkaConsumer: " + errstr); - - delete conf; - - std::vector parts; - parts.push_back(RdKafka::TopicPartition::create(topic, 0, - RdKafka::Topic:: - OFFSET_BEGINNING)); - RdKafka::ErrorCode err = c->assign(parts); - if (err != RdKafka::ERR_NO_ERROR) - Test::Fail("assign() failed: " + RdKafka::err2str(err)); - RdKafka::TopicPartition::destroy(parts); - - producer = p; - consumer = c; - - test_headers(0); - test_headers(1); - test_headers(261); - test_duplicate_keys(); - test_remove_after_add(); - test_remove_all_duplicate_keys(); - test_get_last_gives_last_added_val(); - test_get_of_key_returns_all(); - test_failed_produce(); - test_assignment_op(); - - c->close(); - delete c; - delete p; - - return 0; - } +int main_0085_headers(int argc, char **argv) { + topic = Test::mk_topic_name("0085-headers", 1); + + RdKafka::Conf *conf; + std::string errstr; + + Test::conf_init(&conf, NULL, 0); + + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); + + Test::conf_set(conf, "group.id", topic); + + RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); + if (!c) + Test::Fail("Failed to create KafkaConsumer: " + errstr); + + delete conf; + + std::vector parts; + parts.push_back(RdKafka::TopicPartition::create( + topic, 0, RdKafka::Topic::OFFSET_BEGINNING)); + RdKafka::ErrorCode err = c->assign(parts); + if (err != RdKafka::ERR_NO_ERROR) + Test::Fail("assign() failed: " + RdKafka::err2str(err)); + RdKafka::TopicPartition::destroy(parts); + + producer = p; + consumer = c; + + test_headers(0); + test_headers(1); + test_headers(261); + test_duplicate_keys(); + test_remove_after_add(); + test_remove_all_duplicate_keys(); + test_get_last_gives_last_added_val(); + test_get_of_key_returns_all(); + test_failed_produce(); + test_assignment_op(); + + c->close(); + delete c; + delete p; + + return 0; +} } diff --git a/tests/0086-purge.c b/tests/0086-purge.c index ee378638ba..594f3ee502 100644 --- a/tests/0086-purge.c +++ b/tests/0086-purge.c @@ -62,27 +62,27 @@ static int produce_req_cnt = 0; * @brief Sockem connect, called from **internal librdkafka thread** through * librdkafka's connect_cb */ -static int connect_cb (struct test *test, sockem_t *skm, const char *id) { +static int connect_cb(struct test *test, sockem_t *skm, const char *id) { sockem_set(skm, "delay", 500, NULL); return 0; } -static rd_kafka_resp_err_t on_request_sent (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - void *ic_opaque) { +static rd_kafka_resp_err_t on_request_sent(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + void *ic_opaque) { /* Ignore if not a ProduceRequest */ if (ApiKey != 0) return RD_KAFKA_RESP_ERR_NO_ERROR; - TEST_SAY("ProduceRequest sent to %s (%"PRId32")\n", - brokername, brokerid); + TEST_SAY("ProduceRequest sent to %s (%" PRId32 ")\n", brokername, + brokerid); mtx_lock(&produce_req_lock); produce_req_cnt++; @@ -95,20 +95,20 @@ static rd_kafka_resp_err_t on_request_sent (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR_NO_ERROR; } -static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, - const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { return rd_kafka_interceptor_add_on_request_sent( - rk, "catch_producer_req", - on_request_sent, NULL); + rk, "catch_producer_req", on_request_sent, NULL); } #endif -static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { int msgid; struct waitmsgs *waitmsgs = rkmessage->_private; @@ -117,19 +117,19 @@ static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, waitmsgs->cnt--; TEST_ASSERT(rkmessage->len == sizeof(msgid), - "invalid message size %"PRIusz", expected sizeof(int)", + "invalid message size %" PRIusz ", expected sizeof(int)", rkmessage->len); memcpy(&msgid, rkmessage->payload, rkmessage->len); - TEST_ASSERT(msgid >= 0 && msgid < msgcnt, - "msgid %d out of range 0..%d", msgid, msgcnt - 1); + TEST_ASSERT(msgid >= 0 && msgid < msgcnt, "msgid %d out of range 0..%d", + msgid, msgcnt - 1); TEST_ASSERT((int)waitmsgs->exp_err[msgid] != 12345, "msgid %d delivered twice", msgid); - TEST_SAY("DeliveryReport for msg #%d: %s\n", - msgid, rd_kafka_err2name(rkmessage->err)); + TEST_SAY("DeliveryReport for msg #%d: %s\n", msgid, + rd_kafka_err2name(rkmessage->err)); if (rkmessage->err != waitmsgs->exp_err[msgid]) { TEST_FAIL_LATER("Expected message #%d to fail with %s, not %s", @@ -144,44 +144,45 @@ static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - - - - -static void purge_and_expect (const char *what, int line, - rd_kafka_t *rk, int purge_flags, - struct waitmsgs *waitmsgs, - int exp_remain, const char *reason) { +static void purge_and_expect(const char *what, + int line, + rd_kafka_t *rk, + int purge_flags, + struct waitmsgs *waitmsgs, + int exp_remain, + const char *reason) { test_timing_t t_purge; rd_kafka_resp_err_t err; - TEST_SAY("%s:%d: purge(0x%x): " - "expecting %d messages to remain when done\n", - what, line, purge_flags, exp_remain); + TEST_SAY( + "%s:%d: purge(0x%x): " + "expecting %d messages to remain when done\n", + what, line, purge_flags, exp_remain); TIMING_START(&t_purge, "%s:%d: purge(0x%x)", what, line, purge_flags); err = rd_kafka_purge(rk, purge_flags); TIMING_STOP(&t_purge); - TEST_ASSERT(!err, "purge(0x%x) at %d failed: %s", - purge_flags, line, rd_kafka_err2str(err)); + TEST_ASSERT(!err, "purge(0x%x) at %d failed: %s", purge_flags, line, + rd_kafka_err2str(err)); rd_kafka_poll(rk, 0); TEST_ASSERT(waitmsgs->cnt == exp_remain, - "%s:%d: expected %d messages remaining, not %d", - what, line, exp_remain, waitmsgs->cnt); + "%s:%d: expected %d messages remaining, not %d", what, line, + exp_remain, waitmsgs->cnt); } /** * @brief Don't treat ERR__GAPLESS_GUARANTEE as a fatal error */ -static int gapless_is_not_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int gapless_is_not_fatal_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason) { return err != RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE; } -static void do_test_purge (const char *what, int remote, - int idempotence, int gapless) { +static void +do_test_purge(const char *what, int remote, int idempotence, int gapless) { const char *topic = test_mk_topic_name("0086_purge", 0); rd_kafka_conf_t *conf; rd_kafka_t *rk; @@ -203,8 +204,10 @@ static void do_test_purge (const char *what, int remote, test_conf_set(conf, "batch.num.messages", "10"); test_conf_set(conf, "max.in.flight", "1"); test_conf_set(conf, "linger.ms", "500"); - test_conf_set(conf, "enable.idempotence", idempotence?"true":"false"); - test_conf_set(conf, "enable.gapless.guarantee", gapless?"true":"false"); + test_conf_set(conf, "enable.idempotence", + idempotence ? "true" : "false"); + test_conf_set(conf, "enable.gapless.guarantee", + gapless ? "true" : "false"); rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); if (remote) { @@ -228,7 +231,7 @@ static void do_test_purge (const char *what, int remote, TEST_SAY("Producing %d messages to topic %s\n", msgcnt, topic); - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { int32_t partition; if (remote) { @@ -240,19 +243,18 @@ static void do_test_purge (const char *what, int remote, partition = (i < 10 ? i % 3 : RD_KAFKA_PARTITION_UA); } - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_VALUE((void *)&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_OPAQUE(&waitmsgs), - RD_KAFKA_V_END); - TEST_ASSERT(!err, "producev(#%d) failed: %s", - i, rd_kafka_err2str(err)); + err = rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_PARTITION(partition), + RD_KAFKA_V_VALUE((void *)&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_OPAQUE(&waitmsgs), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev(#%d) failed: %s", i, + rd_kafka_err2str(err)); - waitmsgs.exp_err[i] = (remote && i < 10 ? - RD_KAFKA_RESP_ERR__PURGE_INFLIGHT : - RD_KAFKA_RESP_ERR__PURGE_QUEUE); + waitmsgs.exp_err[i] = + (remote && i < 10 ? RD_KAFKA_RESP_ERR__PURGE_INFLIGHT + : RD_KAFKA_RESP_ERR__PURGE_QUEUE); waitmsgs.cnt++; } @@ -261,7 +263,8 @@ static void do_test_purge (const char *what, int remote, if (remote) { /* Wait for ProduceRequest to be sent */ mtx_lock(&produce_req_lock); - cnd_timedwait_ms(&produce_req_cnd, &produce_req_lock, 15*1000); + cnd_timedwait_ms(&produce_req_cnd, &produce_req_lock, + 15 * 1000); TEST_ASSERT(produce_req_cnt > 0, "First Produce request should've been sent by now"); mtx_unlock(&produce_req_lock); @@ -270,11 +273,10 @@ static void do_test_purge (const char *what, int remote, &waitmsgs, 10, "in-flight messages should not be purged"); - purge_and_expect(what, __LINE__, rk, - RD_KAFKA_PURGE_F_INFLIGHT| - RD_KAFKA_PURGE_F_QUEUE, - &waitmsgs, 0, - "all messages should have been purged"); + purge_and_expect( + what, __LINE__, rk, + RD_KAFKA_PURGE_F_INFLIGHT | RD_KAFKA_PURGE_F_QUEUE, + &waitmsgs, 0, "all messages should have been purged"); } else { purge_and_expect(what, __LINE__, rk, RD_KAFKA_PURGE_F_INFLIGHT, &waitmsgs, msgcnt, @@ -292,23 +294,24 @@ static void do_test_purge (const char *what, int remote, } -int main_0086_purge_remote (int argc, char **argv) { +int main_0086_purge_remote(int argc, char **argv) { const rd_bool_t has_idempotence = - test_broker_version >= TEST_BRKVER(0,11,0,0); + test_broker_version >= TEST_BRKVER(0, 11, 0, 0); - do_test_purge("remote", 1/*remote*/, 0/*idempotence*/, 0/*!gapless*/); + do_test_purge("remote", 1 /*remote*/, 0 /*idempotence*/, + 0 /*!gapless*/); if (has_idempotence) { - do_test_purge("remote,idempotence", - 1/*remote*/, 1/*idempotence*/, 0/*!gapless*/); - do_test_purge("remote,idempotence,gapless", - 1/*remote*/, 1/*idempotence*/, 1/*!gapless*/); + do_test_purge("remote,idempotence", 1 /*remote*/, + 1 /*idempotence*/, 0 /*!gapless*/); + do_test_purge("remote,idempotence,gapless", 1 /*remote*/, + 1 /*idempotence*/, 1 /*!gapless*/); } return 0; } -int main_0086_purge_local (int argc, char **argv) { - do_test_purge("local", 0/*local*/, 0, 0); +int main_0086_purge_local(int argc, char **argv) { + do_test_purge("local", 0 /*local*/, 0, 0); return 0; } diff --git a/tests/0088-produce_metadata_timeout.c b/tests/0088-produce_metadata_timeout.c index a877c6b831..c71b5a69fd 100644 --- a/tests/0088-produce_metadata_timeout.c +++ b/tests/0088-produce_metadata_timeout.c @@ -48,15 +48,15 @@ static rd_atomic32_t refuse_connect; * @brief Sockem connect, called from **internal librdkafka thread** through * librdkafka's connect_cb */ -static int connect_cb (struct test *test, sockem_t *skm, const char *id) { +static int connect_cb(struct test *test, sockem_t *skm, const char *id) { if (rd_atomic32_get(&refuse_connect) > 0) return -1; else return 0; } -static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { /* Ignore connectivity errors since we'll be bringing down * .. connectivity. * SASL auther will think a connection-down even in the auth @@ -70,14 +70,14 @@ static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, return 1; } -static int msg_dr_cnt = 0; +static int msg_dr_cnt = 0; static int msg_dr_fail_cnt = 0; -static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { msg_dr_cnt++; - TEST_SAYL(3, "Delivery for message %.*s: %s\n", - (int)rkmessage->len, (const char *)rkmessage->payload, + TEST_SAYL(3, "Delivery for message %.*s: %s\n", (int)rkmessage->len, + (const char *)rkmessage->payload, rd_kafka_err2name(rkmessage->err)); if (rkmessage->err) { @@ -89,12 +89,12 @@ static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, -int main_0088_produce_metadata_timeout (int argc, char **argv) { +int main_0088_produce_metadata_timeout(int argc, char **argv) { int64_t testid; rd_kafka_t *rk; rd_kafka_topic_t *rkt; - const char *topic = test_mk_topic_name("0088_produce_metadata_timeout", - 1); + const char *topic = + test_mk_topic_name("0088_produce_metadata_timeout", 1); int msgcnt = 0; rd_kafka_conf_t *conf; @@ -108,7 +108,7 @@ int main_0088_produce_metadata_timeout (int argc, char **argv) { test_conf_set(conf, "batch.num.messages", "5"); test_socket_enable(conf); - test_curr->connect_cb = connect_cb; + test_curr->connect_cb = connect_cb; test_curr->is_fatal_cb = is_fatal_cb; rk = test_create_handle(RD_KAFKA_PRODUCER, conf); @@ -119,39 +119,40 @@ int main_0088_produce_metadata_timeout (int argc, char **argv) { rkt = rd_kafka_topic_new(rk, topic, NULL); /* Produce first set of messages and wait for delivery */ - test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, - msgcnt, 20, NULL, 0, 0, &msgcnt); + test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt, + 20, NULL, 0, 0, &msgcnt); while (msg_dr_cnt < 5) rd_kafka_poll(rk, 1000); - TEST_SAY(_C_YEL "Disconnecting sockets and " + TEST_SAY(_C_YEL + "Disconnecting sockets and " "refusing future connections\n"); rd_atomic32_set(&refuse_connect, 1); - test_socket_close_all(test_curr, 1/*reinit*/); + test_socket_close_all(test_curr, 1 /*reinit*/); /* Wait for metadata timeout */ TEST_SAY("Waiting for metadata timeout\n"); - rd_sleep(10+5); + rd_sleep(10 + 5); /* These messages will be put on the UA queue */ - test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, - msgcnt, 20, NULL, 0, 0, &msgcnt); + test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt, + 20, NULL, 0, 0, &msgcnt); /* Restore the connection(s) when metadata has timed out. */ TEST_SAY(_C_YEL "Allowing connections\n"); rd_atomic32_set(&refuse_connect, 0); rd_sleep(3); - test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, - msgcnt, 20, NULL, 0, 0, &msgcnt); + test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt, + 20, NULL, 0, 0, &msgcnt); - test_flush(rk, 2*5*1000); /* linger.ms * 2 */ + test_flush(rk, 2 * 5 * 1000); /* linger.ms * 2 */ - TEST_ASSERT(msg_dr_cnt == msgcnt, - "expected %d, got %d", msgcnt, msg_dr_cnt); - TEST_ASSERT(msg_dr_fail_cnt == 0, - "expected %d dr failures, got %d", 0, msg_dr_fail_cnt); + TEST_ASSERT(msg_dr_cnt == msgcnt, "expected %d, got %d", msgcnt, + msg_dr_cnt); + TEST_ASSERT(msg_dr_fail_cnt == 0, "expected %d dr failures, got %d", 0, + msg_dr_fail_cnt); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); diff --git a/tests/0089-max_poll_interval.c b/tests/0089-max_poll_interval.c index f094d6ae60..5ae935d269 100644 --- a/tests/0089-max_poll_interval.c +++ b/tests/0089-max_poll_interval.c @@ -43,15 +43,15 @@ -int main_0089_max_poll_interval (int argc, char **argv) { +int main_0089_max_poll_interval(int argc, char **argv) { const char *topic = test_mk_topic_name("0089_max_poll_interval", 1); uint64_t testid; const int msgcnt = 10; rd_kafka_t *c[2]; rd_kafka_conf_t *conf; - int64_t ts_next[2] = { 0, 0 }; - int64_t ts_exp_msg[2] = { 0, 0 }; - int cmsgcnt = 0; + int64_t ts_next[2] = {0, 0}; + int64_t ts_exp_msg[2] = {0, 0}; + int cmsgcnt = 0; int i; int bad = -1; @@ -74,7 +74,7 @@ int main_0089_max_poll_interval (int argc, char **argv) { test_consumer_subscribe(c[1], topic); while (1) { - for (i = 0 ; i < 2 ; i++) { + for (i = 0; i < 2; i++) { int64_t now; rd_kafka_message_t *rkm; @@ -87,9 +87,10 @@ int main_0089_max_poll_interval (int argc, char **argv) { continue; if (rkm->err) { - TEST_WARN("Consumer %d error: %s: " - "ignoring\n", i, - rd_kafka_message_errstr(rkm)); + TEST_WARN( + "Consumer %d error: %s: " + "ignoring\n", + i, rd_kafka_message_errstr(rkm)); continue; } @@ -97,29 +98,30 @@ int main_0089_max_poll_interval (int argc, char **argv) { cmsgcnt++; - TEST_SAY("Consumer %d received message (#%d) " - "at offset %"PRId64"\n", - i, cmsgcnt, rkm->offset); + TEST_SAY( + "Consumer %d received message (#%d) " + "at offset %" PRId64 "\n", + i, cmsgcnt, rkm->offset); if (ts_exp_msg[i]) { /* This consumer is expecting a message * after a certain time, namely after the * rebalance following max.poll.. being * exceeded in the other consumer */ - TEST_ASSERT(now > ts_exp_msg[i], - "Consumer %d: did not expect " - "message for at least %dms", - i, - (int)((ts_exp_msg[i] - now)/1000)); - TEST_ASSERT(now < ts_exp_msg[i] + 10000*1000, - "Consumer %d: expected message " - "within 10s, not after %dms", - i, - (int)((now - ts_exp_msg[i])/1000)); - TEST_SAY("Consumer %d: received message " - "at offset %"PRId64 - " after rebalance\n", - i, rkm->offset); + TEST_ASSERT( + now > ts_exp_msg[i], + "Consumer %d: did not expect " + "message for at least %dms", + i, (int)((ts_exp_msg[i] - now) / 1000)); + TEST_ASSERT( + now < ts_exp_msg[i] + 10000 * 1000, + "Consumer %d: expected message " + "within 10s, not after %dms", + i, (int)((now - ts_exp_msg[i]) / 1000)); + TEST_SAY( + "Consumer %d: received message " + "at offset %" PRId64 " after rebalance\n", + i, rkm->offset); rd_kafka_message_destroy(rkm); goto done; @@ -130,25 +132,28 @@ int main_0089_max_poll_interval (int argc, char **argv) { /* Exp message on other consumer after * max.poll.interval.ms */ - ts_exp_msg[i^1] = now + (10000 * 1000); + ts_exp_msg[i ^ 1] = now + (10000 * 1000); /* This is the bad consumer */ bad = i; - TEST_SAY("Consumer %d processing message at " - "offset %"PRId64"\n", - i, rkm->offset); + TEST_SAY( + "Consumer %d processing message at " + "offset %" PRId64 "\n", + i, rkm->offset); rd_kafka_message_destroy(rkm); } else { rd_kafka_message_destroy(rkm); - TEST_FAIL("Consumer %d did not expect " - "a message", i); + TEST_FAIL( + "Consumer %d did not expect " + "a message", + i); } } } - done: +done: TEST_ASSERT(bad != -1, "Bad consumer not set"); @@ -174,7 +179,7 @@ int main_0089_max_poll_interval (int argc, char **argv) { } - for (i = 0 ; i < 2 ; i++) + for (i = 0; i < 2; i++) rd_kafka_destroy_flags(c[i], RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE); return 0; diff --git a/tests/0090-idempotence.c b/tests/0090-idempotence.c index dc117d4f62..02d16df565 100644 --- a/tests/0090-idempotence.c +++ b/tests/0090-idempotence.c @@ -51,10 +51,10 @@ static struct { * * @locality an internal rdkafka thread */ -static rd_kafka_resp_err_t handle_ProduceResponse (rd_kafka_t *rk, - int32_t brokerid, - uint64_t msgseq, - rd_kafka_resp_err_t err) { +static rd_kafka_resp_err_t handle_ProduceResponse(rd_kafka_t *rk, + int32_t brokerid, + uint64_t msgseq, + rd_kafka_resp_err_t err) { rd_kafka_resp_err_t new_err = err; int n; @@ -68,20 +68,20 @@ static rd_kafka_resp_err_t handle_ProduceResponse (rd_kafka_t *rk, * Do allow the first request through. */ if (n > 1 && n <= state.initial_fail_batch_cnt) { if (err) - TEST_WARN("First %d ProduceRequests should not " - "have failed, this is #%d with error %s for " - "brokerid %"PRId32" and msgseq %"PRIu64"\n", - state.initial_fail_batch_cnt, n, - rd_kafka_err2name(err), brokerid, msgseq); + TEST_WARN( + "First %d ProduceRequests should not " + "have failed, this is #%d with error %s for " + "brokerid %" PRId32 " and msgseq %" PRIu64 "\n", + state.initial_fail_batch_cnt, n, + rd_kafka_err2name(err), brokerid, msgseq); assert(!err && *"First N ProduceRequests should not have failed"); new_err = RD_KAFKA_RESP_ERR__TIMED_OUT; } - TEST_SAY("handle_ProduceResponse(broker %"PRId32 - ", MsgSeq %"PRId64", Error %s) -> new Error %s\n", - brokerid, msgseq, - rd_kafka_err2name(err), + TEST_SAY("handle_ProduceResponse(broker %" PRId32 ", MsgSeq %" PRId64 + ", Error %s) -> new Error %s\n", + brokerid, msgseq, rd_kafka_err2name(err), rd_kafka_err2name(new_err)); return new_err; @@ -95,13 +95,14 @@ static rd_kafka_resp_err_t handle_ProduceResponse (rd_kafka_t *rk, * @param initial_fail_batch_cnt How many of the initial batches should * fail with an emulated network timeout. */ -static void do_test_implicit_ack (const char *what, - int batch_cnt, int initial_fail_batch_cnt) { +static void do_test_implicit_ack(const char *what, + int batch_cnt, + int initial_fail_batch_cnt) { rd_kafka_t *rk; const char *topic = test_mk_topic_name("0090_idempotence_impl_ack", 1); const int32_t partition = 0; uint64_t testid; - int msgcnt = 10*batch_cnt; + int msgcnt = 10 * batch_cnt; rd_kafka_conf_t *conf; rd_kafka_topic_t *rkt; test_msgver_t mv; @@ -109,7 +110,7 @@ static void do_test_implicit_ack (const char *what, TEST_SAY(_C_MAG "[ Test implicit ack: %s ]\n", what); rd_atomic32_init(&state.produce_cnt, 0); - state.batch_cnt = batch_cnt; + state.batch_cnt = batch_cnt; state.initial_fail_batch_cnt = initial_fail_batch_cnt; testid = test_id_generate(); @@ -145,8 +146,8 @@ static void do_test_implicit_ack (const char *what, TEST_SAY("Verifying messages with consumer\n"); test_msgver_init(&mv, testid); - test_consume_msgs_easy_mv(NULL, topic, partition, - testid, 1, msgcnt, NULL, &mv); + test_consume_msgs_easy_mv(NULL, topic, partition, testid, 1, msgcnt, + NULL, &mv); test_msgver_verify("verify", &mv, TEST_MSGVER_ALL, 0, msgcnt); test_msgver_clear(&mv); @@ -154,7 +155,7 @@ static void do_test_implicit_ack (const char *what, } -int main_0090_idempotence (int argc, char **argv) { +int main_0090_idempotence(int argc, char **argv) { /* The broker maintains a window of the N last ProduceRequests * per partition and producer to allow ProduceRequest retries * for previously successful requests to return a non-error response. @@ -162,12 +163,10 @@ int main_0090_idempotence (int argc, char **argv) { const int broker_req_window = 5; do_test_implicit_ack("within broker request window", - broker_req_window * 2, - broker_req_window); + broker_req_window * 2, broker_req_window); do_test_implicit_ack("outside broker request window", - broker_req_window + 3, - broker_req_window + 3); + broker_req_window + 3, broker_req_window + 3); return 0; } diff --git a/tests/0091-max_poll_interval_timeout.c b/tests/0091-max_poll_interval_timeout.c index b624c2f8e1..c1506afd9b 100644 --- a/tests/0091-max_poll_interval_timeout.c +++ b/tests/0091-max_poll_interval_timeout.c @@ -47,7 +47,7 @@ */ -const int64_t processing_time = 31*1000*1000; /*31s*/ +const int64_t processing_time = 31 * 1000 * 1000; /*31s*/ struct _consumer { rd_kafka_t *rk; @@ -57,23 +57,21 @@ struct _consumer { int max_rebalance_cnt; }; -static void do_consume (struct _consumer *cons, int timeout_s) { +static void do_consume(struct _consumer *cons, int timeout_s) { rd_kafka_message_t *rkm; - rkm = rd_kafka_consumer_poll(cons->rk, timeout_s*1000); + rkm = rd_kafka_consumer_poll(cons->rk, timeout_s * 1000); if (!rkm) return; - TEST_ASSERT(!rkm->err, - "%s consumer error: %s (last poll was %dms ago)", - rd_kafka_name(cons->rk), - rd_kafka_message_errstr(rkm), - (int)((test_clock() - cons->last)/1000)); + TEST_ASSERT(!rkm->err, "%s consumer error: %s (last poll was %dms ago)", + rd_kafka_name(cons->rk), rd_kafka_message_errstr(rkm), + (int)((test_clock() - cons->last) / 1000)); - TEST_SAY("%s: processing message #%d from " - "partition %"PRId32" at offset %"PRId64"\n", - rd_kafka_name(cons->rk), cons->cnt, - rkm->partition, rkm->offset); + TEST_SAY( + "%s: processing message #%d from " + "partition %" PRId32 " at offset %" PRId64 "\n", + rd_kafka_name(cons->rk), cons->cnt, rkm->partition, rkm->offset); rd_kafka_message_destroy(rkm); @@ -86,24 +84,22 @@ static void do_consume (struct _consumer *cons, int timeout_s) { } -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { struct _consumer *cons = opaque; cons->rebalance_cnt++; TEST_SAY(_C_BLU "%s rebalance #%d/%d: %s: %d partition(s)\n", - rd_kafka_name(cons->rk), - cons->rebalance_cnt, cons->max_rebalance_cnt, - rd_kafka_err2name(err), - parts->cnt); + rd_kafka_name(cons->rk), cons->rebalance_cnt, + cons->max_rebalance_cnt, rd_kafka_err2name(err), parts->cnt); TEST_ASSERT(cons->rebalance_cnt <= cons->max_rebalance_cnt, "%s rebalanced %d times, max was %d", - rd_kafka_name(cons->rk), - cons->rebalance_cnt, cons->max_rebalance_cnt); + rd_kafka_name(cons->rk), cons->rebalance_cnt, + cons->max_rebalance_cnt); if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) rd_kafka_assign(rk, parts); @@ -113,9 +109,9 @@ static void rebalance_cb (rd_kafka_t *rk, #define _CONSUMER_CNT 2 -static void do_test_with_subscribe (const char *topic) { +static void do_test_with_subscribe(const char *topic) { int64_t testid; - const int msgcnt = 3; + const int msgcnt = 3; struct _consumer c[_CONSUMER_CNT] = RD_ZERO_INIT; rd_kafka_conf_t *conf; @@ -124,12 +120,12 @@ static void do_test_with_subscribe (const char *topic) { testid = test_id_generate(); test_conf_init(&conf, NULL, - 10 + (int)(processing_time/1000000) * msgcnt); + 10 + (int)(processing_time / 1000000) * msgcnt); /* Produce extra messages since we can't fully rely on the * random partitioner to provide exact distribution. */ test_produce_msgs_easy(topic, testid, -1, msgcnt * _CONSUMER_CNT * 2); - test_produce_msgs_easy(topic, testid, 1, msgcnt/2); + test_produce_msgs_easy(topic, testid, 1, msgcnt / 2); test_conf_set(conf, "session.timeout.ms", "6000"); test_conf_set(conf, "max.poll.interval.ms", "20000" /*20s*/); @@ -141,8 +137,8 @@ static void do_test_with_subscribe (const char *topic) { rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); rd_kafka_conf_set_opaque(conf, &c[0]); - c[0].rk = test_create_consumer(topic, NULL, - rd_kafka_conf_dup(conf), NULL); + c[0].rk = + test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); rd_kafka_conf_set_opaque(conf, &c[1]); c[1].rk = test_create_consumer(topic, NULL, conf, NULL); @@ -158,10 +154,10 @@ static void do_test_with_subscribe (const char *topic) { while (1) { rd_kafka_topic_partition_list_t *parts = NULL; - do_consume(&c[0], 1/*1s*/); + do_consume(&c[0], 1 /*1s*/); if (rd_kafka_assignment(c[0].rk, &parts) != - RD_KAFKA_RESP_ERR_NO_ERROR || + RD_KAFKA_RESP_ERR_NO_ERROR || !parts || parts->cnt == 0) { if (parts) rd_kafka_topic_partition_list_destroy(parts); @@ -179,7 +175,7 @@ static void do_test_with_subscribe (const char *topic) { /* Poll until both consumers have finished reading N messages */ while (c[0].cnt < msgcnt && c[1].cnt < msgcnt) { do_consume(&c[0], 0); - do_consume(&c[1], 10/*10s*/); + do_consume(&c[1], 10 /*10s*/); } /* Allow the extra revoke rebalance on close() */ @@ -201,7 +197,7 @@ static void do_test_with_subscribe (const char *topic) { * @brief Verify that max.poll.interval.ms does NOT kick in * when just using assign() and not subscribe(). */ -static void do_test_with_assign (const char *topic) { +static void do_test_with_assign(const char *topic) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_message_t *rkm; @@ -226,8 +222,7 @@ static void do_test_with_assign (const char *topic) { /* Make sure no error was raised */ while ((rkm = rd_kafka_consumer_poll(rk, 0))) { - TEST_ASSERT(!rkm->err, - "Unexpected consumer error: %s: %s", + TEST_ASSERT(!rkm->err, "Unexpected consumer error: %s: %s", rd_kafka_err2name(rkm->err), rd_kafka_message_errstr(rkm)); @@ -238,8 +233,7 @@ static void do_test_with_assign (const char *topic) { test_consumer_close(rk); rd_kafka_destroy(rk); - TEST_SAY(_C_GRN - "[ Test max.poll.interval.ms with assign(): PASS ]\n"); + TEST_SAY(_C_GRN "[ Test max.poll.interval.ms with assign(): PASS ]\n"); } @@ -247,7 +241,7 @@ static void do_test_with_assign (const char *topic) { * @brief Verify that max.poll.interval.ms kicks in even if * the application hasn't called poll once. */ -static void do_test_no_poll (const char *topic) { +static void do_test_no_poll(const char *topic) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_message_t *rkm; @@ -287,9 +281,9 @@ static void do_test_no_poll (const char *topic) { } -int main_0091_max_poll_interval_timeout (int argc, char **argv) { - const char *topic = test_mk_topic_name("0091_max_poll_interval_tmout", - 1); +int main_0091_max_poll_interval_timeout(int argc, char **argv) { + const char *topic = + test_mk_topic_name("0091_max_poll_interval_tmout", 1); test_create_topic(NULL, topic, 2, 1); diff --git a/tests/0092-mixed_msgver.c b/tests/0092-mixed_msgver.c index 2cc3adf222..46308ddf47 100644 --- a/tests/0092-mixed_msgver.c +++ b/tests/0092-mixed_msgver.c @@ -40,11 +40,11 @@ -int main_0092_mixed_msgver (int argc, char **argv) { +int main_0092_mixed_msgver(int argc, char **argv) { rd_kafka_t *rk; const char *topic = test_mk_topic_name("0092_mixed_msgver", 1); int32_t partition = 0; - const int msgcnt = 60; + const int msgcnt = 60; int cnt; int64_t testid; int msgcounter = msgcnt; @@ -59,38 +59,31 @@ int main_0092_mixed_msgver (int argc, char **argv) { rk = test_create_producer(); /* Produce messages */ - for (cnt = 0 ; cnt < msgcnt ; cnt++) { + for (cnt = 0; cnt < msgcnt; cnt++) { rd_kafka_resp_err_t err; char buf[230]; test_msg_fmt(buf, sizeof(buf), testid, partition, cnt); err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_VALUE(buf, sizeof(buf)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_OPAQUE(&msgcounter), - RD_KAFKA_V_END); - TEST_ASSERT(!err, "producev() #%d failed: %s", - cnt, rd_kafka_err2str(err)); + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_PARTITION(partition), + RD_KAFKA_V_VALUE(buf, sizeof(buf)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev() #%d failed: %s", cnt, + rd_kafka_err2str(err)); /* One message per batch */ - rd_kafka_flush(rk, 30*1000); + rd_kafka_flush(rk, 30 * 1000); if (cnt == msgcnt / 2) { - const char *msgconf[] = { - "message.format.version", - "0.10.0.0" - }; + const char *msgconf[] = {"message.format.version", + "0.10.0.0"}; TEST_SAY("Changing message.format.version\n"); err = test_AlterConfigs_simple( - rk, - RD_KAFKA_RESOURCE_TOPIC, topic, - msgconf, 1); - TEST_ASSERT(!err, - "AlterConfigs failed: %s", + rk, RD_KAFKA_RESOURCE_TOPIC, topic, msgconf, 1); + TEST_ASSERT(!err, "AlterConfigs failed: %s", rd_kafka_err2str(err)); } } diff --git a/tests/0093-holb.c b/tests/0093-holb.c index e46faf745f..366deca328 100644 --- a/tests/0093-holb.c +++ b/tests/0093-holb.c @@ -50,18 +50,16 @@ struct _consumer { int max_rebalance_cnt; }; -static void do_consume (struct _consumer *cons, int timeout_s) { +static void do_consume(struct _consumer *cons, int timeout_s) { rd_kafka_message_t *rkm; - rkm = rd_kafka_consumer_poll(cons->rk, 100+(timeout_s*1000)); + rkm = rd_kafka_consumer_poll(cons->rk, 100 + (timeout_s * 1000)); if (!rkm) return; - TEST_ASSERT(!rkm->err, - "%s consumer error: %s (last poll was %dms ago)", - rd_kafka_name(cons->rk), - rd_kafka_message_errstr(rkm), - (int)((test_clock() - cons->last)/1000)); + TEST_ASSERT(!rkm->err, "%s consumer error: %s (last poll was %dms ago)", + rd_kafka_name(cons->rk), rd_kafka_message_errstr(rkm), + (int)((test_clock() - cons->last) / 1000)); rd_kafka_message_destroy(rkm); @@ -76,24 +74,22 @@ static void do_consume (struct _consumer *cons, int timeout_s) { } -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { struct _consumer *cons = opaque; cons->rebalance_cnt++; TEST_SAY(_C_BLU "%s rebalance #%d/%d: %s: %d partition(s)\n", - rd_kafka_name(cons->rk), - cons->rebalance_cnt, cons->max_rebalance_cnt, - rd_kafka_err2name(err), - parts->cnt); + rd_kafka_name(cons->rk), cons->rebalance_cnt, + cons->max_rebalance_cnt, rd_kafka_err2name(err), parts->cnt); TEST_ASSERT(cons->rebalance_cnt <= cons->max_rebalance_cnt, "%s rebalanced %d times, max was %d", - rd_kafka_name(cons->rk), - cons->rebalance_cnt, cons->max_rebalance_cnt); + rd_kafka_name(cons->rk), cons->rebalance_cnt, + cons->max_rebalance_cnt); if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) rd_kafka_assign(rk, parts); @@ -103,10 +99,10 @@ static void rebalance_cb (rd_kafka_t *rk, #define _CONSUMER_CNT 2 -int main_0093_holb_consumer (int argc, char **argv) { +int main_0093_holb_consumer(int argc, char **argv) { const char *topic = test_mk_topic_name("0093_holb_consumer", 1); int64_t testid; - const int msgcnt = 100; + const int msgcnt = 100; struct _consumer c[_CONSUMER_CNT] = RD_ZERO_INIT; rd_kafka_conf_t *conf; @@ -127,8 +123,8 @@ int main_0093_holb_consumer (int argc, char **argv) { rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); rd_kafka_conf_set_opaque(conf, &c[0]); - c[0].rk = test_create_consumer(topic, NULL, - rd_kafka_conf_dup(conf), NULL); + c[0].rk = + test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); rd_kafka_conf_set_opaque(conf, &c[1]); c[1].rk = test_create_consumer(topic, NULL, conf, NULL); @@ -145,10 +141,10 @@ int main_0093_holb_consumer (int argc, char **argv) { while (1) { rd_kafka_topic_partition_list_t *parts = NULL; - do_consume(&c[0], 1/*1s*/); + do_consume(&c[0], 1 /*1s*/); if (rd_kafka_assignment(c[0].rk, &parts) != - RD_KAFKA_RESP_ERR_NO_ERROR || + RD_KAFKA_RESP_ERR_NO_ERROR || !parts || parts->cnt == 0) { if (parts) rd_kafka_topic_partition_list_destroy(parts); @@ -162,14 +158,14 @@ int main_0093_holb_consumer (int argc, char **argv) { } TEST_SAY("c[0] got assignment, consuming..\n"); - do_consume(&c[0], 5/*5s*/); + do_consume(&c[0], 5 /*5s*/); TEST_SAY("Joining second consumer\n"); test_consumer_subscribe(c[1].rk, topic); /* Just poll second consumer for 10s, the rebalance will not * finish until the first consumer polls */ - do_consume(&c[1], 10/*10s*/); + do_consume(&c[1], 10 /*10s*/); /* c0: the next call to do_consume/poll will trigger * its rebalance callback, first revoke then assign. */ @@ -178,8 +174,8 @@ int main_0093_holb_consumer (int argc, char **argv) { c[1].max_rebalance_cnt++; TEST_SAY("Expected rebalances: c[0]: %d/%d, c[1]: %d/%d\n", - c[0].rebalance_cnt, c[0].max_rebalance_cnt, - c[1].rebalance_cnt, c[1].max_rebalance_cnt); + c[0].rebalance_cnt, c[0].max_rebalance_cnt, c[1].rebalance_cnt, + c[1].max_rebalance_cnt); /* Let rebalances kick in, then consume messages. */ while (c[0].cnt + c[1].cnt < msgcnt) { diff --git a/tests/0094-idempotence_msg_timeout.c b/tests/0094-idempotence_msg_timeout.c index bac7c969bf..8704adc09c 100644 --- a/tests/0094-idempotence_msg_timeout.c +++ b/tests/0094-idempotence_msg_timeout.c @@ -65,8 +65,8 @@ * 6b. Try to recover within the current epoch, the broker is expecting * sequence 2, 3, 4, or 5, depending on what it managed to persist * before the connection went down. - * The producer should produce msg 2 but it no longer exists due to timed out. - * If lucky, only 2 was persisted by the broker, which means the Producer + * The producer should produce msg 2 but it no longer exists due to timed + * out. If lucky, only 2 was persisted by the broker, which means the Producer * can successfully produce 3. * If 3 was persisted the producer would get a DuplicateSequence error * back, indicating that it was already produced, this would get @@ -101,8 +101,9 @@ static struct { } counters; -static void my_dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque) { +static void my_dr_msg_cb(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { if (rd_kafka_message_status(rkmessage) >= RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED) @@ -116,8 +117,8 @@ static void my_dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, } } -static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { /* Ignore connectivity errors since we'll be bringing down * .. connectivity. * SASL auther will think a connection-down even in the auth @@ -132,21 +133,23 @@ static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, } -static void do_test_produce_timeout (const char *topic, const int msgrate) { +static void do_test_produce_timeout(const char *topic, const int msgrate) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_topic_t *rkt; uint64_t testid; rd_kafka_resp_err_t err; const int partition = RD_KAFKA_PARTITION_UA; - int msgcnt = msgrate * 20; - const int msgsize = 100*1000; + int msgcnt = msgrate * 20; + const int msgsize = 100 * 1000; sockem_ctrl_t ctrl; int msgcounter = 0; test_msgver_t mv; - TEST_SAY(_C_BLU "Test idempotent producer " - "with message timeouts (%d msgs/s)\n", msgrate); + TEST_SAY(_C_BLU + "Test idempotent producer " + "with message timeouts (%d msgs/s)\n", + msgrate); testid = test_id_generate(); @@ -163,24 +166,24 @@ static void do_test_produce_timeout (const char *topic, const int msgrate) { test_socket_enable(conf); test_curr->is_fatal_cb = is_fatal_cb; - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = test_create_producer_topic(rk, topic, - "message.timeout.ms", "5000", NULL); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_producer_topic(rk, topic, "message.timeout.ms", + "5000", NULL); /* Create the topic to make sure connections are up and ready. */ err = test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000)); TEST_ASSERT(!err, "topic creation failed: %s", rd_kafka_err2str(err)); /* After 1 seconds, set socket delay to 2*message.timeout.ms */ - sockem_ctrl_set_delay(&ctrl, 1000, 2*5000); + sockem_ctrl_set_delay(&ctrl, 1000, 2 * 5000); /* After 3*message.timeout.ms seconds, remove delay. */ - sockem_ctrl_set_delay(&ctrl, 3*5000, 0); + sockem_ctrl_set_delay(&ctrl, 3 * 5000, 0); - test_produce_msgs_nowait(rk, rkt, testid, partition, 0, - msgcnt, NULL, msgsize, msgrate, &msgcounter); + test_produce_msgs_nowait(rk, rkt, testid, partition, 0, msgcnt, NULL, + msgsize, msgrate, &msgcounter); - test_flush(rk, 3*5000); + test_flush(rk, 3 * 5000); TEST_SAY("%d/%d messages produced, %d delivered, %d failed\n", msgcounter, msgcnt, counters.dr_ok, counters.dr_fail); @@ -194,21 +197,23 @@ static void do_test_produce_timeout (const char *topic, const int msgrate) { counters.dr_ok); test_msgver_init(&mv, testid); - test_consume_msgs_easy_mv(NULL, topic, partition, - testid, 1, -1, NULL, &mv); + test_consume_msgs_easy_mv(NULL, topic, partition, testid, 1, -1, NULL, + &mv); test_msgver_verify_compare("delivered", &mv, &counters.mv_delivered, - TEST_MSGVER_ORDER|TEST_MSGVER_DUP| - TEST_MSGVER_BY_MSGID| - TEST_MSGVER_SUBSET); + TEST_MSGVER_ORDER | TEST_MSGVER_DUP | + TEST_MSGVER_BY_MSGID | + TEST_MSGVER_SUBSET); test_msgver_clear(&mv); test_msgver_clear(&counters.mv_delivered); - TEST_SAY(_C_GRN "Test idempotent producer " - "with message timeouts (%d msgs/s): SUCCESS\n", msgrate); + TEST_SAY(_C_GRN + "Test idempotent producer " + "with message timeouts (%d msgs/s): SUCCESS\n", + msgrate); } -int main_0094_idempotence_msg_timeout (int argc, char **argv) { +int main_0094_idempotence_msg_timeout(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__, 1); do_test_produce_timeout(topic, 10); diff --git a/tests/0095-all_brokers_down.cpp b/tests/0095-all_brokers_down.cpp index be720be5ef..6ebd5f500e 100644 --- a/tests/0095-all_brokers_down.cpp +++ b/tests/0095-all_brokers_down.cpp @@ -31,24 +31,24 @@ class errorEventCb : public RdKafka::EventCb { -public: - errorEventCb(): error_seen(false) { } + public: + errorEventCb() : error_seen(false) { + } - void event_cb (RdKafka::Event &event) { - switch (event.type()) - { + void event_cb(RdKafka::Event &event) { + switch (event.type()) { case RdKafka::Event::EVENT_ERROR: - Test::Say(tostr() << "Error: " << RdKafka::err2str(event.err()) << - ": " << event.str() << "\n"); + Test::Say(tostr() << "Error: " << RdKafka::err2str(event.err()) << ": " + << event.str() << "\n"); if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN) error_seen = true; break; - case RdKafka::Event::EVENT_LOG: - Test::Say(tostr() << "Log: " << event.str() << "\n"); - break; + case RdKafka::Event::EVENT_LOG: + Test::Say(tostr() << "Log: " << event.str() << "\n"); + break; - default: + default: break; } } @@ -58,65 +58,65 @@ class errorEventCb : public RdKafka::EventCb { extern "C" { - int main_0095_all_brokers_down (int argc, char **argv) { - RdKafka::Conf *conf; - std::string errstr; +int main_0095_all_brokers_down(int argc, char **argv) { + RdKafka::Conf *conf; + std::string errstr; - Test::conf_init(&conf, NULL, 20); - /* Two broker addresses that will quickly reject the connection */ - Test::conf_set(conf, "bootstrap.servers", "127.0.0.1:1,127.0.0.1:2"); + Test::conf_init(&conf, NULL, 20); + /* Two broker addresses that will quickly reject the connection */ + Test::conf_set(conf, "bootstrap.servers", "127.0.0.1:1,127.0.0.1:2"); - /* - * First test producer - */ - errorEventCb pEvent = errorEventCb(); + /* + * First test producer + */ + errorEventCb pEvent = errorEventCb(); - if (conf->set("event_cb", &pEvent, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + if (conf->set("event_cb", &pEvent, errstr) != RdKafka::Conf::CONF_OK) + Test::Fail(errstr); - Test::Say("Test Producer\n"); + Test::Say("Test Producer\n"); - RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); - if (!p) - Test::Fail("Failed to create Producer: " + errstr); + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); - /* Wait for all brokers down */ - while (!pEvent.error_seen) - p->poll(1000); + /* Wait for all brokers down */ + while (!pEvent.error_seen) + p->poll(1000); - delete p; + delete p; - /* - * Test high-level consumer that has a logical broker (group coord), - * which has caused AllBrokersDown generation problems (#2259) - */ - errorEventCb cEvent = errorEventCb(); + /* + * Test high-level consumer that has a logical broker (group coord), + * which has caused AllBrokersDown generation problems (#2259) + */ + errorEventCb cEvent = errorEventCb(); - Test::conf_set(conf, "group.id", "test"); + Test::conf_set(conf, "group.id", "test"); - if (conf->set("event_cb", &cEvent, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + if (conf->set("event_cb", &cEvent, errstr) != RdKafka::Conf::CONF_OK) + Test::Fail(errstr); - Test::Say("Test KafkaConsumer\n"); + Test::Say("Test KafkaConsumer\n"); - RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); - if (!c) - Test::Fail("Failed to create KafkaConsumer: " + errstr); + RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); + if (!c) + Test::Fail("Failed to create KafkaConsumer: " + errstr); - delete conf; + delete conf; - /* Wait for all brokers down */ - while (!cEvent.error_seen) { - RdKafka::Message *m = c->consume(1000); - if (m) - delete m; - } + /* Wait for all brokers down */ + while (!cEvent.error_seen) { + RdKafka::Message *m = c->consume(1000); + if (m) + delete m; + } - c->close(); + c->close(); - delete c; + delete c; - return 0; - } + return 0; +} } diff --git a/tests/0097-ssl_verify.cpp b/tests/0097-ssl_verify.cpp index 9b77b4a98e..c2b0e51d98 100644 --- a/tests/0097-ssl_verify.cpp +++ b/tests/0097-ssl_verify.cpp @@ -34,30 +34,28 @@ #include "testcpp.h" #include "tinycthread.h" -static const std::string envname[RdKafka::CERT__CNT][RdKafka::CERT_ENC__CNT] = - { - /* [RdKafka::CERT_PUBLIC_KEY] = */ - { - "RDK_SSL_pkcs", - "RDK_SSL_pub_der", - "RDK_SSL_pub_pem", - }, - /* [RdKafka::CERT_PRIVATE_KEY] = */ - { - "RDK_SSL_pkcs", - "RDK_SSL_priv_der", - "RDK_SSL_priv_pem", - }, - /* [RdKafka::CERT_CA] = */ - { - "RDK_SSL_pkcs", - "RDK_SSL_ca_der", - "RDK_SSL_ca_pem", - } - }; - - -static std::vector read_file (const std::string path) { +static const std::string envname[RdKafka::CERT__CNT][RdKafka::CERT_ENC__CNT] = { + /* [RdKafka::CERT_PUBLIC_KEY] = */ + { + "RDK_SSL_pkcs", + "RDK_SSL_pub_der", + "RDK_SSL_pub_pem", + }, + /* [RdKafka::CERT_PRIVATE_KEY] = */ + { + "RDK_SSL_pkcs", + "RDK_SSL_priv_der", + "RDK_SSL_priv_pem", + }, + /* [RdKafka::CERT_CA] = */ + { + "RDK_SSL_pkcs", + "RDK_SSL_ca_der", + "RDK_SSL_ca_pem", + }}; + + +static std::vector read_file(const std::string path) { std::ifstream ifs(path.c_str(), std::ios::binary | std::ios::ate); if (ifs.fail()) Test::Fail("Failed to open " + path + ": " + strerror(errno)); @@ -80,10 +78,10 @@ static std::vector read_file (const std::string path) { class TestVerifyCb : public RdKafka::SslCertificateVerifyCb { public: bool verify_ok; - int cnt; //< Verify callbacks triggered. + int cnt; //< Verify callbacks triggered. mtx_t lock; - TestVerifyCb(bool verify_ok): verify_ok(verify_ok), cnt(0) { + TestVerifyCb(bool verify_ok) : verify_ok(verify_ok), cnt(0) { mtx_init(&lock, mtx_plain); } @@ -91,21 +89,20 @@ class TestVerifyCb : public RdKafka::SslCertificateVerifyCb { mtx_destroy(&lock); } - bool ssl_cert_verify_cb (const std::string &broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - std::string &errstr) { - + bool ssl_cert_verify_cb(const std::string &broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + std::string &errstr) { mtx_lock(&lock); - Test::Say(tostr() << "ssl_cert_verify_cb #" << cnt << - ": broker_name=" << broker_name << - ", broker_id=" << broker_id << - ", x509_error=" << *x509_error << - ", depth=" << depth << - ", buf size=" << size << ", verify_ok=" << verify_ok << "\n"); + Test::Say(tostr() << "ssl_cert_verify_cb #" << cnt << ": broker_name=" + << broker_name << ", broker_id=" << broker_id + << ", x509_error=" << *x509_error << ", depth=" << depth + << ", buf size=" << size << ", verify_ok=" << verify_ok + << "\n"); cnt++; mtx_unlock(&lock); @@ -113,7 +110,7 @@ class TestVerifyCb : public RdKafka::SslCertificateVerifyCb { if (verify_ok) return true; - errstr = "This test triggered a verification failure"; + errstr = "This test triggered a verification failure"; *x509_error = 26; /*X509_V_ERR_INVALID_PURPOSE*/ return false; @@ -121,9 +118,9 @@ class TestVerifyCb : public RdKafka::SslCertificateVerifyCb { }; -static void conf_location_to_pem (RdKafka::Conf *conf, - std::string loc_prop, - std::string pem_prop) { +static void conf_location_to_pem(RdKafka::Conf *conf, + std::string loc_prop, + std::string pem_prop) { std::string loc; @@ -153,15 +150,15 @@ static void conf_location_to_pem (RdKafka::Conf *conf, * @remark Requires a bunch of SSL_.. env vars to point out where * certs are found. These are set up by trivup. */ -static void conf_location_to_setter (RdKafka::Conf *conf, - std::string loc_prop, - RdKafka::CertificateType cert_type, - RdKafka::CertificateEncoding encoding) { +static void conf_location_to_setter(RdKafka::Conf *conf, + std::string loc_prop, + RdKafka::CertificateType cert_type, + RdKafka::CertificateEncoding encoding) { std::string loc; static const std::string encnames[] = { - "PKCS#12", - "DER", - "PEM", + "PKCS#12", + "DER", + "PEM", }; /* Clear the config property (e.g., ssl.key.location) */ @@ -172,14 +169,16 @@ static void conf_location_to_setter (RdKafka::Conf *conf, const char *p; p = test_getenv(envname[cert_type][encoding].c_str(), NULL); if (!p) - Test::Fail("Invalid test environment: " - "Missing " + envname[cert_type][encoding] + - " env variable: make sure trivup is up to date"); + Test::Fail( + "Invalid test environment: " + "Missing " + + envname[cert_type][encoding] + + " env variable: make sure trivup is up to date"); loc = p; - Test::Say(tostr() << "Reading " << loc_prop << " file " << loc << - " as " << encnames[encoding] << "\n"); + Test::Say(tostr() << "Reading " << loc_prop << " file " << loc << " as " + << encnames[encoding] << "\n"); /* Read file */ std::ifstream ifs(loc.c_str(), std::ios::binary | std::ios::ate); @@ -194,40 +193,41 @@ static void conf_location_to_setter (RdKafka::Conf *conf, if (conf->set_ssl_cert(cert_type, encoding, buffer.data(), size, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(tostr() << "Failed to set cert from " << loc << - " as cert type " << cert_type << " with encoding " << encoding << - ": " << errstr << "\n"); + Test::Fail(tostr() << "Failed to set cert from " << loc << " as cert type " + << cert_type << " with encoding " << encoding << ": " + << errstr << "\n"); } typedef enum { - USE_LOCATION, /* use ssl.key.location */ - USE_CONF, /* use ssl.key.pem */ - USE_SETTER, /* use conf->set_ssl_cert(), this supports multiple formats */ + USE_LOCATION, /* use ssl.key.location */ + USE_CONF, /* use ssl.key.pem */ + USE_SETTER, /* use conf->set_ssl_cert(), this supports multiple formats */ } cert_load_t; static const std::string load_names[] = { - "location", - "conf", - "setter", + "location", + "conf", + "setter", }; -static void do_test_verify (const int line, bool verify_ok, - cert_load_t load_key, - RdKafka::CertificateEncoding key_enc, - cert_load_t load_pub, - RdKafka::CertificateEncoding pub_enc, - cert_load_t load_ca, - RdKafka::CertificateEncoding ca_enc) { +static void do_test_verify(const int line, + bool verify_ok, + cert_load_t load_key, + RdKafka::CertificateEncoding key_enc, + cert_load_t load_pub, + RdKafka::CertificateEncoding pub_enc, + cert_load_t load_ca, + RdKafka::CertificateEncoding ca_enc) { /* * Create any type of client */ - std::string teststr = tostr() << line << ": " << - "SSL cert verify: verify_ok=" << verify_ok << - ", load_key=" << load_names[load_key] << - ", load_pub=" << load_names[load_pub] << - ", load_ca=" << load_names[load_ca]; + std::string teststr = tostr() << line << ": " + << "SSL cert verify: verify_ok=" << verify_ok + << ", load_key=" << load_names[load_key] + << ", load_pub=" << load_names[load_pub] + << ", load_ca=" << load_names[load_ca]; Test::Say(_C_BLU "[ " + teststr + " ]\n" _C_CLR); @@ -247,8 +247,8 @@ static void do_test_verify (const int line, bool verify_ok, if (load_key == USE_CONF) conf_location_to_pem(conf, "ssl.key.location", "ssl.key.pem"); else if (load_key == USE_SETTER) - conf_location_to_setter(conf, "ssl.key.location", - RdKafka::CERT_PRIVATE_KEY, key_enc); + conf_location_to_setter(conf, "ssl.key.location", RdKafka::CERT_PRIVATE_KEY, + key_enc); if (load_pub == USE_CONF) conf_location_to_pem(conf, "ssl.certificate.location", @@ -260,8 +260,7 @@ static void do_test_verify (const int line, bool verify_ok, if (load_ca == USE_CONF) conf_location_to_pem(conf, "ssl.ca.location", "ssl.ca.pem"); else if (load_ca == USE_SETTER) - conf_location_to_setter(conf, "ssl.ca.location", - RdKafka::CERT_CA, ca_enc); + conf_location_to_setter(conf, "ssl.ca.location", RdKafka::CERT_CA, ca_enc); std::string errstr; @@ -278,19 +277,18 @@ static void do_test_verify (const int line, bool verify_ok, delete conf; bool run = true; - for (int i = 0 ; run && i < 10 ; i++) { + for (int i = 0; run && i < 10; i++) { p->poll(1000); mtx_lock(&verifyCb.lock); - if ((verify_ok && verifyCb.cnt > 0) || - (!verify_ok && verifyCb.cnt > 3)) + if ((verify_ok && verifyCb.cnt > 0) || (!verify_ok && verifyCb.cnt > 3)) run = false; mtx_unlock(&verifyCb.lock); } mtx_lock(&verifyCb.lock); if (!verifyCb.cnt) - Test::Fail("Expected at least one verifyCb invocation"); + Test::Fail("Expected at least one verifyCb invocation"); mtx_unlock(&verifyCb.lock); /* Retrieving the clusterid allows us to easily check if a @@ -305,7 +303,7 @@ static void do_test_verify (const int line, bool verify_ok, delete p; - Test::Say(_C_GRN "[ PASSED: " + teststr + " ]\n" _C_CLR); + Test::Say(_C_GRN "[ PASSED: " + teststr + " ]\n" _C_CLR); } @@ -313,7 +311,7 @@ static void do_test_verify (const int line, bool verify_ok, * @brief Verification that some bad combinations of calls behave as expected. * This is simply to verify #2904. */ -static void do_test_bad_calls () { +static void do_test_bad_calls() { RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); std::string errstr; @@ -328,29 +326,26 @@ static void do_test_bad_calls () { errstr)) Test::Fail(errstr); - std::vector certBuffer = - read_file(test_getenv(envname[RdKafka::CERT_CA] - [RdKafka::CERT_ENC_PEM].c_str(), NULL)); + std::vector certBuffer = read_file(test_getenv( + envname[RdKafka::CERT_CA][RdKafka::CERT_ENC_PEM].c_str(), NULL)); if (conf->set_ssl_cert(RdKafka::CERT_CA, RdKafka::CERT_ENC_PEM, certBuffer.data(), certBuffer.size(), errstr)) Test::Fail(errstr); /* Set public-key as CA (over-writing the previous one) */ - std::vector userBuffer = - read_file(test_getenv(envname[RdKafka::CERT_PUBLIC_KEY] - [RdKafka::CERT_ENC_PEM].c_str(), NULL)); + std::vector userBuffer = read_file(test_getenv( + envname[RdKafka::CERT_PUBLIC_KEY][RdKafka::CERT_ENC_PEM].c_str(), NULL)); if (conf->set_ssl_cert(RdKafka::CERT_CA, RdKafka::CERT_ENC_PEM, - userBuffer.data(), userBuffer.size(), errstr)) + userBuffer.data(), userBuffer.size(), errstr)) Test::Fail(errstr); - std::vector keyBuffer = - read_file(test_getenv(envname[RdKafka::CERT_PRIVATE_KEY] - [RdKafka::CERT_ENC_PEM].c_str(), NULL)); + std::vector keyBuffer = read_file(test_getenv( + envname[RdKafka::CERT_PRIVATE_KEY][RdKafka::CERT_ENC_PEM].c_str(), NULL)); if (conf->set_ssl_cert(RdKafka::CERT_PRIVATE_KEY, RdKafka::CERT_ENC_PEM, - keyBuffer.data(), keyBuffer.size(), errstr)) + keyBuffer.data(), keyBuffer.size(), errstr)) Test::Fail(errstr); // Create Kafka producer @@ -366,90 +361,79 @@ static void do_test_bad_calls () { } extern "C" { - int main_0097_ssl_verify (int argc, char **argv) { - - if (!test_check_builtin("ssl")) { - Test::Skip("Test requires SSL support\n"); - return 0; - } - - if (!test_getenv("RDK_SSL_pkcs", NULL)) { - Test::Skip("Test requires SSL_* env-vars set up by trivup\n"); - return 0; - } - - - do_test_bad_calls(); - - do_test_verify(__LINE__, true, - USE_LOCATION, RdKafka::CERT_ENC_PEM, - USE_LOCATION, RdKafka::CERT_ENC_PEM, - USE_LOCATION, RdKafka::CERT_ENC_PEM); - do_test_verify(__LINE__, false, - USE_LOCATION, RdKafka::CERT_ENC_PEM, - USE_LOCATION, RdKafka::CERT_ENC_PEM, - USE_LOCATION, RdKafka::CERT_ENC_PEM); - - /* Verify various priv and pub key and CA input formats */ - do_test_verify(__LINE__, true, - USE_CONF, RdKafka::CERT_ENC_PEM, - USE_CONF, RdKafka::CERT_ENC_PEM, - USE_LOCATION, RdKafka::CERT_ENC_PEM); - do_test_verify(__LINE__, true, - USE_CONF, RdKafka::CERT_ENC_PEM, - USE_CONF, RdKafka::CERT_ENC_PEM, - USE_CONF, RdKafka::CERT_ENC_PEM); - do_test_verify(__LINE__, true, - USE_SETTER, RdKafka::CERT_ENC_PEM, - USE_SETTER, RdKafka::CERT_ENC_PEM, - USE_SETTER, RdKafka::CERT_ENC_PKCS12); - do_test_verify(__LINE__, true, - USE_LOCATION, RdKafka::CERT_ENC_PEM, - USE_SETTER, RdKafka::CERT_ENC_DER, - USE_SETTER, RdKafka::CERT_ENC_DER); - do_test_verify(__LINE__, true, - USE_SETTER, RdKafka::CERT_ENC_PKCS12, - USE_SETTER, RdKafka::CERT_ENC_PKCS12, - USE_SETTER, RdKafka::CERT_ENC_PKCS12); +int main_0097_ssl_verify(int argc, char **argv) { + if (!test_check_builtin("ssl")) { + Test::Skip("Test requires SSL support\n"); + return 0; + } + if (!test_getenv("RDK_SSL_pkcs", NULL)) { + Test::Skip("Test requires SSL_* env-vars set up by trivup\n"); return 0; } - int main_0097_ssl_verify_local (int argc, char **argv) { - if (!test_check_builtin("ssl")) { - Test::Skip("Test requires SSL support\n"); - return 0; - } + do_test_bad_calls(); + + do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM, + USE_LOCATION, RdKafka::CERT_ENC_PEM, USE_LOCATION, + RdKafka::CERT_ENC_PEM); + do_test_verify(__LINE__, false, USE_LOCATION, RdKafka::CERT_ENC_PEM, + USE_LOCATION, RdKafka::CERT_ENC_PEM, USE_LOCATION, + RdKafka::CERT_ENC_PEM); + + /* Verify various priv and pub key and CA input formats */ + do_test_verify(__LINE__, true, USE_CONF, RdKafka::CERT_ENC_PEM, USE_CONF, + RdKafka::CERT_ENC_PEM, USE_LOCATION, RdKafka::CERT_ENC_PEM); + do_test_verify(__LINE__, true, USE_CONF, RdKafka::CERT_ENC_PEM, USE_CONF, + RdKafka::CERT_ENC_PEM, USE_CONF, RdKafka::CERT_ENC_PEM); + do_test_verify(__LINE__, true, USE_SETTER, RdKafka::CERT_ENC_PEM, USE_SETTER, + RdKafka::CERT_ENC_PEM, USE_SETTER, RdKafka::CERT_ENC_PKCS12); + do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM, + USE_SETTER, RdKafka::CERT_ENC_DER, USE_SETTER, + RdKafka::CERT_ENC_DER); + do_test_verify(__LINE__, true, USE_SETTER, RdKafka::CERT_ENC_PKCS12, + USE_SETTER, RdKafka::CERT_ENC_PKCS12, USE_SETTER, + RdKafka::CERT_ENC_PKCS12); + + return 0; +} - /* Check that creating a client with an invalid PEM string fails. */ - const std::string props[] = { "ssl.ca.pem", "ssl.key.pem", - "ssl.certificate.pem", "" }; +int main_0097_ssl_verify_local(int argc, char **argv) { + if (!test_check_builtin("ssl")) { + Test::Skip("Test requires SSL support\n"); + return 0; + } - for (int i = 0 ; props[i] != "" ; i++) { - RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - std::string errstr; + /* Check that creating a client with an invalid PEM string fails. */ + const std::string props[] = {"ssl.ca.pem", "ssl.key.pem", + "ssl.certificate.pem", ""}; - if (conf->set("security.protocol", "SSL", errstr)) - Test::Fail(errstr); - conf->set("debug", "security", errstr); - if (conf->set(props[i], "this is \n not a \t PEM!", errstr)) - Test::Fail("Setting " + props[i] + " to junk should work, " - "expecting failure on client creation"); + for (int i = 0; props[i] != ""; i++) { + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); - delete conf; - if (producer) - Test::Fail("Expected producer creation to fail with " + props[i] + - " set to junk"); - else - Test::Say("Failed to create producer with junk " + props[i] + - " (as expected): " + errstr + "\n"); - } + std::string errstr; - return 0; + if (conf->set("security.protocol", "SSL", errstr)) + Test::Fail(errstr); + conf->set("debug", "security", errstr); + if (conf->set(props[i], "this is \n not a \t PEM!", errstr)) + Test::Fail("Setting " + props[i] + + " to junk should work, " + "expecting failure on client creation"); + + RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); + delete conf; + if (producer) + Test::Fail("Expected producer creation to fail with " + props[i] + + " set to junk"); + else + Test::Say("Failed to create producer with junk " + props[i] + + " (as expected): " + errstr + "\n"); } + return 0; +} } diff --git a/tests/0098-consumer-txn.cpp b/tests/0098-consumer-txn.cpp index 26706ffc8d..1bdb46d0bf 100644 --- a/tests/0098-consumer-txn.cpp +++ b/tests/0098-consumer-txn.cpp @@ -58,58 +58,52 @@ class TestEventCb : public RdKafka::EventCb { public: - static bool should_capture_stats; static bool has_captured_stats; static int64_t partition_0_hi_offset; static int64_t partition_0_ls_offset; static std::string topic; - void event_cb (RdKafka::Event &event) { - - switch (event.type()) - { - case RdKafka::Event::EVENT_STATS: - if (should_capture_stats) { - partition_0_hi_offset = -1; - partition_0_ls_offset = -1; - - has_captured_stats = true; - should_capture_stats = false; - char path[256]; - - /* Parse JSON to validate */ - rapidjson::Document d; - if (d.Parse(event.str().c_str()).HasParseError()) - Test::Fail(tostr() << "Failed to parse stats JSON: " << - rapidjson::GetParseError_En(d.GetParseError()) << - " at " << d.GetErrorOffset()); - - rd_snprintf(path, sizeof(path), - "/topics/%s/partitions/0", - topic.c_str()); - - rapidjson::Pointer jpath((const char *)path); - rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath); - if (pp == NULL) - return; - - TEST_ASSERT(pp->HasMember("hi_offset"), - "hi_offset not found in stats"); - TEST_ASSERT(pp->HasMember("ls_offset"), - "ls_offset not found in stats"); - - partition_0_hi_offset = (*pp)["hi_offset"].GetInt(); - partition_0_ls_offset = (*pp)["ls_offset"].GetInt(); - } - break; + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_STATS: + if (should_capture_stats) { + partition_0_hi_offset = -1; + partition_0_ls_offset = -1; + + has_captured_stats = true; + should_capture_stats = false; + char path[256]; + + /* Parse JSON to validate */ + rapidjson::Document d; + if (d.Parse(event.str().c_str()).HasParseError()) + Test::Fail(tostr() << "Failed to parse stats JSON: " + << rapidjson::GetParseError_En(d.GetParseError()) + << " at " << d.GetErrorOffset()); + + rd_snprintf(path, sizeof(path), "/topics/%s/partitions/0", + topic.c_str()); + + rapidjson::Pointer jpath((const char *)path); + rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath); + if (pp == NULL) + return; + + TEST_ASSERT(pp->HasMember("hi_offset"), "hi_offset not found in stats"); + TEST_ASSERT(pp->HasMember("ls_offset"), "ls_offset not found in stats"); + + partition_0_hi_offset = (*pp)["hi_offset"].GetInt(); + partition_0_ls_offset = (*pp)["ls_offset"].GetInt(); + } + break; - case RdKafka::Event::EVENT_LOG: - std::cerr << event.str() << "\n"; - break; + case RdKafka::Event::EVENT_LOG: + std::cerr << event.str() << "\n"; + break; - default: - break; + default: + break; } } }; @@ -126,19 +120,19 @@ static TestEventCb ex_event_cb; static void execute_java_produce_cli(std::string &bootstrapServers, const std::string &topic, const std::string &testidstr, - const char **cmds, size_t cmd_cnt) { - const std::string topicCmd = "topic," + topic; + const char **cmds, + size_t cmd_cnt) { + const std::string topicCmd = "topic," + topic; const std::string testidCmd = "testid," + testidstr; const char **argv; size_t i = 0; - argv = (const char **)rd_alloca(sizeof(*argv) * - (1 + 1 + 1 + cmd_cnt + 1)); + argv = (const char **)rd_alloca(sizeof(*argv) * (1 + 1 + 1 + cmd_cnt + 1)); argv[i++] = bootstrapServers.c_str(); argv[i++] = topicCmd.c_str(); argv[i++] = testidCmd.c_str(); - for (size_t j = 0 ; j < cmd_cnt ; j++) + for (size_t j = 0; j < cmd_cnt; j++) argv[i++] = cmds[j]; argv[i] = NULL; @@ -147,41 +141,37 @@ static void execute_java_produce_cli(std::string &bootstrapServers, test_waitpid(pid); } -static std::vector consume_messages( - RdKafka::KafkaConsumer *c, - std::string topic, - int partition) { +static std::vector +consume_messages(RdKafka::KafkaConsumer *c, std::string topic, int partition) { RdKafka::ErrorCode err; /* Assign partitions */ - std::vector parts; + std::vector parts; parts.push_back(RdKafka::TopicPartition::create(topic, partition)); if ((err = c->assign(parts))) Test::Fail("assign failed: " + RdKafka::err2str(err)); RdKafka::TopicPartition::destroy(parts); - Test::Say(tostr() << "Consuming from topic " << topic << - " partition " << partition << "\n"); + Test::Say(tostr() << "Consuming from topic " << topic << " partition " + << partition << "\n"); std::vector result = std::vector(); while (true) { RdKafka::Message *msg = c->consume(tmout_multip(1000)); - switch (msg->err()) - { - case RdKafka::ERR__TIMED_OUT: - delete msg; - continue; - case RdKafka::ERR__PARTITION_EOF: - delete msg; - break; - case RdKafka::ERR_NO_ERROR: - result.push_back(msg); - continue; - default: - Test::Fail("Error consuming from topic " + - topic + ": " + msg->errstr()); - delete msg; - break; + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + delete msg; + continue; + case RdKafka::ERR__PARTITION_EOF: + delete msg; + break; + case RdKafka::ERR_NO_ERROR: + result.push_back(msg); + continue; + default: + Test::Fail("Error consuming from topic " + topic + ": " + msg->errstr()); + delete msg; + break; } break; } @@ -205,7 +195,7 @@ static std::vector consume_messages( static void delete_messages(std::vector &messages) { - for (size_t i=0; iset("event_cb", &ex_event_cb, errstr); TestEventCb::should_capture_stats = false; - TestEventCb::has_captured_stats = false; + TestEventCb::has_captured_stats = false; RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); if (!c) @@ -247,7 +236,7 @@ static RdKafka::KafkaConsumer *create_consumer( } -static std::vector csv_split (const std::string &input) { +static std::vector csv_split(const std::string &input) { std::stringstream ss(input); std::vector res; @@ -256,7 +245,7 @@ static std::vector csv_split (const std::string &input) { std::getline(ss, substr, ','); /* Trim */ substr.erase(0, substr.find_first_not_of(' ')); - substr.erase(substr.find_last_not_of(' ')+1); + substr.erase(substr.find_last_not_of(' ') + 1); res.push_back(substr); } @@ -275,10 +264,10 @@ enum TransactionType { TransactionType_ContinueOpen }; -static TransactionType TransactionType_from_string (std::string str) { -#define _CHKRET(NAME) \ - if (!str.compare(# NAME)) \ - return TransactionType_ ## NAME +static TransactionType TransactionType_from_string(std::string str) { +#define _CHKRET(NAME) \ + if (!str.compare(#NAME)) \ + return TransactionType_##NAME _CHKRET(None); _CHKRET(BeginAbort); @@ -290,24 +279,21 @@ static TransactionType TransactionType_from_string (std::string str) { Test::Fail("Unknown TransactionType: " + str); - return TransactionType_None; /* NOTREACHED */ + return TransactionType_None; /* NOTREACHED */ } -static void txn_producer_makeTestMessages (RdKafka::Producer *producer, - const std::string &topic, - const std::string &testidstr, - int partition, - int idStart, - int msgcount, - TransactionType tt, - bool do_flush) { - - +static void txn_producer_makeTestMessages(RdKafka::Producer *producer, + const std::string &topic, + const std::string &testidstr, + int partition, + int idStart, + int msgcount, + TransactionType tt, + bool do_flush) { RdKafka::Error *error; - if (tt != TransactionType_None && - tt != TransactionType_ContinueOpen && + if (tt != TransactionType_None && tt != TransactionType_ContinueOpen && tt != TransactionType_ContinueCommit && tt != TransactionType_ContinueAbort) { error = producer->begin_transaction(); @@ -317,15 +303,13 @@ static void txn_producer_makeTestMessages (RdKafka::Producer *producer, } } - for (int i = 0 ; i < msgcount ; i++) { - char key[] = { (char)((i + idStart) & 0xff) }; - char payload[] = { 0x10, 0x20, 0x30, 0x40 }; + for (int i = 0; i < msgcount; i++) { + char key[] = {(char)((i + idStart) & 0xff)}; + char payload[] = {0x10, 0x20, 0x30, 0x40}; RdKafka::ErrorCode err; - err = producer->produce(topic, partition, producer->RK_MSG_COPY, - payload, sizeof(payload), - key, sizeof(key), - 0, NULL); + err = producer->produce(topic, partition, producer->RK_MSG_COPY, payload, + sizeof(payload), key, sizeof(key), 0, NULL); if (err) Test::Fail("produce() failed: " + RdKafka::err2str(err)); } @@ -336,7 +320,7 @@ static void txn_producer_makeTestMessages (RdKafka::Producer *producer, switch (tt) { case TransactionType_BeginAbort: case TransactionType_ContinueAbort: - error = producer->abort_transaction(30*1000); + error = producer->abort_transaction(30 * 1000); if (error) { Test::Fail("abort_transaction() failed: " + error->str()); delete error; @@ -345,7 +329,7 @@ static void txn_producer_makeTestMessages (RdKafka::Producer *producer, case TransactionType_BeginCommit: case TransactionType_ContinueCommit: - error = producer->commit_transaction(30*1000); + error = producer->commit_transaction(30 * 1000); if (error) { Test::Fail("commit_transaction() failed: " + error->str()); delete error; @@ -360,7 +344,7 @@ static void txn_producer_makeTestMessages (RdKafka::Producer *producer, class txnDeliveryReportCb : public RdKafka::DeliveryReportCb { public: - void dr_cb (RdKafka::Message &msg) { + void dr_cb(RdKafka::Message &msg) { switch (msg.err()) { case RdKafka::ERR__PURGE_QUEUE: case RdKafka::ERR__PURGE_INFLIGHT: @@ -383,9 +367,11 @@ class txnDeliveryReportCb : public RdKafka::DeliveryReportCb { * This is the librdkafka counterpart of * java/TransactionProducerCli.java */ -static void txn_producer (const std::string &brokers, const std::string &topic, - const std::string &testidstr, - const char **cmds, size_t cmd_cnt) { +static void txn_producer(const std::string &brokers, + const std::string &topic, + const std::string &testidstr, + const char **cmds, + size_t cmd_cnt) { RdKafka::Conf *conf; txnDeliveryReportCb txn_dr; @@ -393,9 +379,9 @@ static void txn_producer (const std::string &brokers, const std::string &topic, Test::conf_set(conf, "bootstrap.servers", brokers); - std::map producers; + std::map producers; - for (size_t i = 0 ; i < cmd_cnt ; i++) { + for (size_t i = 0; i < cmd_cnt; i++) { std::string cmdstr = std::string(cmds[i]); Test::Say(_C_CLR "rdkafka txn producer command: " + cmdstr + "\n"); @@ -406,14 +392,14 @@ static void txn_producer (const std::string &brokers, const std::string &topic, rd_usleep(atoi(cmd[1].c_str()) * 1000, NULL); } else if (!cmd[0].compare("exit")) { - break; /* We can't really simulate the Java exit behaviour - * from in-process. */ + break; /* We can't really simulate the Java exit behaviour + * from in-process. */ } else if (cmd[0].find("producer") == 0) { TransactionType txntype = TransactionType_from_string(cmd[4]); - std::map::iterator it = - producers.find(cmd[0]); + std::map::iterator it = + producers.find(cmd[0]); RdKafka::Producer *producer; @@ -421,9 +407,9 @@ static void txn_producer (const std::string &brokers, const std::string &topic, /* Create producer if it doesn't exist */ std::string errstr; - Test::Say(tostr() << "Creating producer " << cmd[0] << - " with transactiontype " << txntype << - " '" << cmd[4] << "'\n"); + Test::Say(tostr() << "Creating producer " << cmd[0] + << " with transactiontype " << txntype << " '" + << cmd[4] << "'\n"); /* Config */ Test::conf_set(conf, "enable.idempotence", "true"); @@ -442,7 +428,7 @@ static void txn_producer (const std::string &brokers, const std::string &topic, /* Init transactions if producer is transactional */ if (txntype != TransactionType_None) { - RdKafka::Error *error = producer->init_transactions(20*1000); + RdKafka::Error *error = producer->init_transactions(20 * 1000); if (error) { Test::Fail("init_transactions() failed: " + error->str()); delete error; @@ -455,15 +441,15 @@ static void txn_producer (const std::string &brokers, const std::string &topic, producer = it->second; } - txn_producer_makeTestMessages - (producer, /* producer */ - topic, /* topic */ - testidstr, /* testid */ - atoi(cmd[1].c_str()), /* partition */ - (int)strtol(cmd[2].c_str(), NULL, 0), /* idStart */ - atoi(cmd[3].c_str()), /* msg count */ - txntype, /* TransactionType */ - !cmd[5].compare("DoFlush") /* Flush */); + txn_producer_makeTestMessages( + producer, /* producer */ + topic, /* topic */ + testidstr, /* testid */ + atoi(cmd[1].c_str()), /* partition */ + (int)strtol(cmd[2].c_str(), NULL, 0), /* idStart */ + atoi(cmd[3].c_str()), /* msg count */ + txntype, /* TransactionType */ + !cmd[5].compare("DoFlush") /* Flush */); } else { Test::Fail("Unknown command: " + cmd[0]); @@ -473,14 +459,14 @@ static void txn_producer (const std::string &brokers, const std::string &topic, delete conf; for (std::map::iterator it = - producers.begin(); it != producers.end() ; it++) + producers.begin(); + it != producers.end(); it++) delete it->second; } - -static void do_test_consumer_txn_test (bool use_java_producer) { +static void do_test_consumer_txn_test(bool use_java_producer) { std::string errstr; std::string topic_name; RdKafka::KafkaConsumer *c; @@ -489,17 +475,18 @@ static void do_test_consumer_txn_test (bool use_java_producer) { std::string bootstrap_servers = get_bootstrap_servers(); - Test::Say(tostr() << _C_BLU "[ Consumer transaction tests using " << - (use_java_producer ? "java" : "librdkafka" ) << - " producer with testid " << testidstr << "]\n" _C_CLR); - -#define run_producer(CMDS...) do { \ - const char *_cmds[] = { CMDS }; \ - size_t _cmd_cnt = sizeof(_cmds) / sizeof(*_cmds); \ - if (use_java_producer) \ - execute_java_produce_cli(bootstrap_servers, topic_name, testidstr, \ - _cmds, _cmd_cnt); \ - else \ + Test::Say(tostr() << _C_BLU "[ Consumer transaction tests using " + << (use_java_producer ? "java" : "librdkafka") + << " producer with testid " << testidstr << "]\n" _C_CLR); + +#define run_producer(CMDS...) \ + do { \ + const char *_cmds[] = {CMDS}; \ + size_t _cmd_cnt = sizeof(_cmds) / sizeof(*_cmds); \ + if (use_java_producer) \ + execute_java_produce_cli(bootstrap_servers, topic_name, testidstr, \ + _cmds, _cmd_cnt); \ + else \ txn_producer(bootstrap_servers, topic_name, testidstr, _cmds, _cmd_cnt); \ } while (0) @@ -512,7 +499,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 0 - basic commit + abort\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-0", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); run_producer("producer1, -1, 0x0, 5, BeginCommit, DoFlush", @@ -521,7 +508,8 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 5, "Consumed unexpected number of messages. " - "Expected 5, got: %d", (int)msgs.size()); + "Expected 5, got: %d", + (int)msgs.size()); TEST_ASSERT(msgs[0]->key_len() >= 1 && 0 == msgs[0]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[4]->key_len() >= 1 && 4 == msgs[4]->key()->c_str()[0], @@ -530,23 +518,24 @@ static void do_test_consumer_txn_test (bool use_java_producer) { c->close(); delete c; -#define expect_msgcnt(msgcnt) \ - TEST_ASSERT(msgs.size() == msgcnt, \ - "Expected %d messages, got %d", (int)msgs.size(), msgcnt) - -#define expect_key(msgidx,value) do { \ - TEST_ASSERT(msgs.size() > msgidx, \ - "Expected at least %d message(s), only got %d", \ - msgidx+1, (int)msgs.size()); \ - TEST_ASSERT(msgs[msgidx]->key_len() == 1, \ - "Expected msg #%d key to be of size 1, not %d\n", \ - msgidx, (int)msgs[msgidx]->key_len()); \ - TEST_ASSERT(value == (int)msgs[msgidx]->key()->c_str()[0], \ - "Expected msg #%d key 0x%x, not 0x%x", \ - msgidx, value, (int)msgs[msgidx]->key()->c_str()[0]); \ +#define expect_msgcnt(msgcnt) \ + TEST_ASSERT(msgs.size() == msgcnt, "Expected %d messages, got %d", \ + (int)msgs.size(), msgcnt) + +#define expect_key(msgidx, value) \ + do { \ + TEST_ASSERT(msgs.size() > msgidx, \ + "Expected at least %d message(s), only got %d", msgidx + 1, \ + (int)msgs.size()); \ + TEST_ASSERT(msgs[msgidx]->key_len() == 1, \ + "Expected msg #%d key to be of size 1, not %d\n", msgidx, \ + (int)msgs[msgidx]->key_len()); \ + TEST_ASSERT(value == (int)msgs[msgidx]->key()->c_str()[0], \ + "Expected msg #%d key 0x%x, not 0x%x", msgidx, value, \ + (int)msgs[msgidx]->key()->c_str()[0]); \ } while (0) - c = create_consumer(topic_name, "READ_UNCOMMITTED"); + c = create_consumer(topic_name, "READ_UNCOMMITTED"); msgs = consume_messages(c, topic_name, 0); expect_msgcnt(10); expect_key(0, 0x0); @@ -564,7 +553,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 0.1\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-0.1", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); run_producer("producer1, -1, 0x0, 5, BeginCommit, DontFlush", @@ -573,7 +562,8 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 5, "Consumed unexpected number of messages. " - "Expected 5, got: %d", (int)msgs.size()); + "Expected 5, got: %d", + (int)msgs.size()); TEST_ASSERT(msgs[0]->key_len() >= 1 && 0 == msgs[0]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[4]->key_len() >= 1 && 4 == msgs[4]->key()->c_str()[0], @@ -582,11 +572,12 @@ static void do_test_consumer_txn_test (bool use_java_producer) { c->close(); delete c; - c = create_consumer(topic_name, "READ_UNCOMMITTED"); + c = create_consumer(topic_name, "READ_UNCOMMITTED"); msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 10, "Consumed unexpected number of messages. " - "Expected 10, got: %d", (int)msgs.size()); + "Expected 10, got: %d", + (int)msgs.size()); TEST_ASSERT(msgs[0]->key_len() >= 1 && 0 == msgs[0]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[4]->key_len() >= 1 && 4 == msgs[4]->key()->c_str()[0], @@ -606,7 +597,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 0.2\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-0.2", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); run_producer("producer1, -1, 0x10, 5, BeginAbort, DoFlush", @@ -615,7 +606,8 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 5, "Consumed unexpected number of messages. " - "Expected 5, got: %d", (int)msgs.size()); + "Expected 5, got: %d", + (int)msgs.size()); TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x30 == msgs[0]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x34 == msgs[4]->key()->c_str()[0], @@ -624,11 +616,12 @@ static void do_test_consumer_txn_test (bool use_java_producer) { c->close(); delete c; - c = create_consumer(topic_name, "READ_UNCOMMITTED"); + c = create_consumer(topic_name, "READ_UNCOMMITTED"); msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 10, "Consumed unexpected number of messages. " - "Expected 10, got: %d", (int)msgs.size()); + "Expected 10, got: %d", + (int)msgs.size()); TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x10 == msgs[0]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x14 == msgs[4]->key()->c_str()[0], @@ -648,7 +641,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 1 - mixed with non-transactional.\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-1", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); TestEventCb::topic = topic_name; @@ -659,8 +652,8 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(TestEventCb::partition_0_ls_offset != -1 && - TestEventCb::partition_0_ls_offset == - TestEventCb::partition_0_hi_offset, + TestEventCb::partition_0_ls_offset == + TestEventCb::partition_0_hi_offset, "Expected hi_offset to equal ls_offset but " "got hi_offset: %" PRId64 ", ls_offset: %" PRId64, TestEventCb::partition_0_hi_offset, @@ -668,18 +661,15 @@ static void do_test_consumer_txn_test (bool use_java_producer) { TEST_ASSERT(msgs.size() == 10, "Consumed unexpected number of messages. " - "Expected 10, got: %d", (int)msgs.size()); - TEST_ASSERT(msgs[0]->key_len() >= 1 && - 0x10 == msgs[0]->key()->c_str()[0], + "Expected 10, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x10 == msgs[0]->key()->c_str()[0], "Unexpected key"); - TEST_ASSERT(msgs[4]->key_len() >= 1 && - 0x14 == msgs[4]->key()->c_str()[0], + TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x14 == msgs[4]->key()->c_str()[0], "Unexpected key"); - TEST_ASSERT(msgs[5]->key_len() >= 1 && - 0x50 == msgs[5]->key()->c_str()[0], + TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x50 == msgs[5]->key()->c_str()[0], "Unexpected key"); - TEST_ASSERT(msgs[9]->key_len() >= 1 && - 0x54 == msgs[9]->key()->c_str()[0], + TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x54 == msgs[9]->key()->c_str()[0], "Unexpected key"); delete_messages(msgs); @@ -691,7 +681,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 1.1\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-1.1", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); run_producer("producer1, -1, 0x30, 5, BeginAbort, DoFlush", @@ -702,7 +692,8 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 10, "Consumed unexpected number of messages. " - "Expected 10, got: %d", (int)msgs.size()); + "Expected 10, got: %d", + (int)msgs.size()); TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x40 == msgs[0]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x44 == msgs[4]->key()->c_str()[0], @@ -722,7 +713,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 1.2\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-1.2", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); run_producer("producer1, -1, 0x10, 5, BeginCommit, DoFlush", @@ -732,7 +723,8 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 10, "Consumed unexpected number of messages. " - "Expected 10, got: %d", (int)msgs.size()); + "Expected 10, got: %d", + (int)msgs.size()); TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x10 == msgs[0]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x14 == msgs[4]->key()->c_str()[0], @@ -753,7 +745,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { // note: aborted records never seem to make it to the broker when not flushed. topic_name = Test::mk_topic_name("0098-consumer_txn-2", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); run_producer("producer1, -1, 0x10, 1, BeginAbort, DontFlush", @@ -772,27 +764,28 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 7, "Consumed unexpected number of messages. " - "Expected 7, got: %d", (int)msgs.size()); + "Expected 7, got: %d", + (int)msgs.size()); TEST_ASSERT(msgs[0]->key_len() >= 1 && - 0x20 == (unsigned char)msgs[0]->key()->c_str()[0], + 0x20 == (unsigned char)msgs[0]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[1]->key_len() >= 1 && - 0x40 == (unsigned char)msgs[1]->key()->c_str()[0], + 0x40 == (unsigned char)msgs[1]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[2]->key_len() >= 1 && - 0x60 == (unsigned char)msgs[2]->key()->c_str()[0], + 0x60 == (unsigned char)msgs[2]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[3]->key_len() >= 1 && - 0x80 == (unsigned char)msgs[3]->key()->c_str()[0], + 0x80 == (unsigned char)msgs[3]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[4]->key_len() >= 1 && - 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0], + 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[5]->key_len() >= 1 && - 0xb0 == (unsigned char)msgs[5]->key()->c_str()[0], + 0xb0 == (unsigned char)msgs[5]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[6]->key_len() >= 1 && - 0xc0 == (unsigned char)msgs[6]->key()->c_str()[0], + 0xc0 == (unsigned char)msgs[6]->key()->c_str()[0], "Unexpected key"); delete_messages(msgs); @@ -805,7 +798,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 2.1\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-2.1", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); run_producer("producer1, -1, 0x10, 1, BeginAbort, DoFlush", @@ -824,57 +817,59 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 7, "Consumed unexpected number of messages. " - "Expected 7, got: %d", (int)msgs.size()); + "Expected 7, got: %d", + (int)msgs.size()); TEST_ASSERT(msgs[0]->key_len() >= 1 && - 0x20 == (unsigned char)msgs[0]->key()->c_str()[0], + 0x20 == (unsigned char)msgs[0]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[1]->key_len() >= 1 && - 0x40 == (unsigned char)msgs[1]->key()->c_str()[0], + 0x40 == (unsigned char)msgs[1]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[2]->key_len() >= 1 && - 0x60 == (unsigned char)msgs[2]->key()->c_str()[0], + 0x60 == (unsigned char)msgs[2]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[3]->key_len() >= 1 && - 0x80 == (unsigned char)msgs[3]->key()->c_str()[0], + 0x80 == (unsigned char)msgs[3]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[4]->key_len() >= 1 && - 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0], + 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[5]->key_len() >= 1 && - 0xb0 == (unsigned char)msgs[5]->key()->c_str()[0], + 0xb0 == (unsigned char)msgs[5]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[6]->key_len() >= 1 && - 0xc0 == (unsigned char)msgs[6]->key()->c_str()[0], + 0xc0 == (unsigned char)msgs[6]->key()->c_str()[0], "Unexpected key"); delete_messages(msgs); c->close(); delete c; - c = create_consumer(topic_name, "READ_UNCOMMITTED"); + c = create_consumer(topic_name, "READ_UNCOMMITTED"); msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 12, "Consumed unexpected number of messages. " - "Expected 12, got: %d", (int)msgs.size()); + "Expected 12, got: %d", + (int)msgs.size()); TEST_ASSERT(msgs[0]->key_len() >= 1 && - 0x10 == (unsigned char)msgs[0]->key()->c_str()[0], + 0x10 == (unsigned char)msgs[0]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[1]->key_len() >= 1 && - 0x20 == (unsigned char)msgs[1]->key()->c_str()[0], + 0x20 == (unsigned char)msgs[1]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[2]->key_len() >= 1 && - 0x30 == (unsigned char)msgs[2]->key()->c_str()[0], + 0x30 == (unsigned char)msgs[2]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[3]->key_len() >= 1 && - 0x40 == (unsigned char)msgs[3]->key()->c_str()[0], + 0x40 == (unsigned char)msgs[3]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[4]->key_len() >= 1 && - 0x50 == (unsigned char)msgs[4]->key()->c_str()[0], + 0x50 == (unsigned char)msgs[4]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[5]->key_len() >= 1 && - 0x60 == (unsigned char)msgs[5]->key()->c_str()[0], + 0x60 == (unsigned char)msgs[5]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[6]->key_len() >= 1 && - 0x70 == (unsigned char)msgs[6]->key()->c_str()[0], + 0x70 == (unsigned char)msgs[6]->key()->c_str()[0], "Unexpected key"); delete_messages(msgs); @@ -887,7 +882,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 3 - cross partition (simple).\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-3", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 2, 3); run_producer("producer1, 0, 0x10, 3, BeginOpen, DoFlush", @@ -897,26 +892,30 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 6, "Consumed unexpected number of messages. " - "Expected 6, got: %d", (int)msgs.size()); + "Expected 6, got: %d", + (int)msgs.size()); delete_messages(msgs); msgs = consume_messages(c, topic_name, 1); TEST_ASSERT(msgs.size() == 3, "Consumed unexpected number of messages. " - "Expected 3, got: %d", (int)msgs.size()); + "Expected 3, got: %d", + (int)msgs.size()); delete_messages(msgs); c->close(); delete c; - c = create_consumer(topic_name, "READ_UNCOMMITTED"); + c = create_consumer(topic_name, "READ_UNCOMMITTED"); msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 6, "Consumed unexpected number of messages. " - "Expected 6, got: %d", (int)msgs.size()); + "Expected 6, got: %d", + (int)msgs.size()); delete_messages(msgs); msgs = consume_messages(c, topic_name, 1); TEST_ASSERT(msgs.size() == 3, "Consumed unexpected number of messages. " - "Expected 3, got: %d", (int)msgs.size()); + "Expected 3, got: %d", + (int)msgs.size()); delete_messages(msgs); Test::delete_topic(c, topic_name.c_str()); @@ -928,7 +927,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 3.1\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-3.1", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 2, 3); run_producer("producer1, 0, 0x55, 1, BeginCommit, DoFlush", @@ -940,21 +939,23 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 2, - "Consumed unexpected number of messages. " - "Expected 2, got: %d", (int)msgs.size()); + "Consumed unexpected number of messages. " + "Expected 2, got: %d", + (int)msgs.size()); TEST_ASSERT(msgs[0]->key_len() >= 1 && - 0x55 == (unsigned char)msgs[0]->key()->c_str()[0], + 0x55 == (unsigned char)msgs[0]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[1]->key_len() >= 1 && - 0x00 == (unsigned char)msgs[1]->key()->c_str()[0], + 0x00 == (unsigned char)msgs[1]->key()->c_str()[0], "Unexpected key"); delete_messages(msgs); msgs = consume_messages(c, topic_name, 1); TEST_ASSERT(msgs.size() == 1, "Consumed unexpected number of messages. " - "Expected 1, got: %d", (int)msgs.size()); + "Expected 1, got: %d", + (int)msgs.size()); TEST_ASSERT(msgs[0]->key_len() >= 1 && - 0x44 == (unsigned char)msgs[0]->key()->c_str()[0], + 0x44 == (unsigned char)msgs[0]->key()->c_str()[0], "Unexpected key"); delete_messages(msgs); @@ -967,7 +968,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 4 - simultaneous transactions (simple).\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-4", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); run_producer("producer3, 0, 0x10, 1, None, DoFlush", @@ -979,16 +980,18 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 7, "Consumed unexpected number of messages. " - "Expected 7, got: %d", (int)msgs.size()); + "Expected 7, got: %d", + (int)msgs.size()); delete_messages(msgs); c->close(); delete c; - c = create_consumer(topic_name, "READ_UNCOMMITTED"); + c = create_consumer(topic_name, "READ_UNCOMMITTED"); msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 13, "Consumed unexpected number of messages. " - "Expected 13, got: %d", (int)msgs.size()); + "Expected 13, got: %d", + (int)msgs.size()); delete_messages(msgs); Test::delete_topic(c, topic_name.c_str()); @@ -1000,7 +1003,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 4.1\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-4.1", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); run_producer("producer3, 0, 0x10, 1, None, DoFlush", @@ -1012,16 +1015,18 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 7, "Consumed unexpected number of messages. " - "Expected 7, got: %d", (int)msgs.size()); + "Expected 7, got: %d", + (int)msgs.size()); delete_messages(msgs); c->close(); delete c; - c = create_consumer(topic_name, "READ_UNCOMMITTED"); + c = create_consumer(topic_name, "READ_UNCOMMITTED"); msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 13, "Consumed unexpected number of messages. " - "Expected 13, got: %d", (int)msgs.size()); + "Expected 13, got: %d", + (int)msgs.size()); delete_messages(msgs); Test::delete_topic(c, topic_name.c_str()); @@ -1033,7 +1038,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 4.2\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-4.2", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); run_producer("producer3, 0, 0x10, 1, None, DoFlush", @@ -1045,16 +1050,18 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 13, "Consumed unexpected number of messages. " - "Expected 7, got: %d", (int)msgs.size()); + "Expected 7, got: %d", + (int)msgs.size()); delete_messages(msgs); c->close(); delete c; - c = create_consumer(topic_name, "READ_UNCOMMITTED"); + c = create_consumer(topic_name, "READ_UNCOMMITTED"); msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 13, "Consumed unexpected number of messages. " - "Expected 13, got: %d", (int)msgs.size()); + "Expected 13, got: %d", + (int)msgs.size()); delete_messages(msgs); Test::delete_topic(c, topic_name.c_str()); @@ -1066,7 +1073,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 4.3\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-4.3", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); run_producer("producer3, 0, 0x10, 1, None, DoFlush", @@ -1078,16 +1085,18 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 1, "Consumed unexpected number of messages. " - "Expected 7, got: %d", (int)msgs.size()); + "Expected 7, got: %d", + (int)msgs.size()); delete_messages(msgs); c->close(); delete c; - c = create_consumer(topic_name, "READ_UNCOMMITTED"); + c = create_consumer(topic_name, "READ_UNCOMMITTED"); msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 13, "Consumed unexpected number of messages. " - "Expected 13, got: %d", (int)msgs.size()); + "Expected 13, got: %d", + (int)msgs.size()); delete_messages(msgs); Test::delete_topic(c, topic_name.c_str()); @@ -1101,53 +1110,50 @@ static void do_test_consumer_txn_test (bool use_java_producer) { test5: topic_name = Test::mk_topic_name("0098-consumer_txn-5", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); - run_producer("producer1, 0, 0x10, 2, BeginOpen, DontFlush", - "sleep,200", + run_producer("producer1, 0, 0x10, 2, BeginOpen, DontFlush", "sleep,200", "producer1, 0, 0x20, 2, ContinueAbort, DontFlush", - "producer1, 0, 0x30, 2, BeginOpen, DontFlush", - "sleep,200", + "producer1, 0, 0x30, 2, BeginOpen, DontFlush", "sleep,200", "producer1, 0, 0x40, 2, ContinueCommit, DontFlush", - "producer1, 0, 0x50, 2, BeginOpen, DontFlush", - "sleep,200", + "producer1, 0, 0x50, 2, BeginOpen, DontFlush", "sleep,200", "producer1, 0, 0x60, 2, ContinueAbort, DontFlush", - "producer1, 0, 0xa0, 2, BeginOpen, DontFlush", - "sleep,200", + "producer1, 0, 0xa0, 2, BeginOpen, DontFlush", "sleep,200", "producer1, 0, 0xb0, 2, ContinueCommit, DontFlush", "producer3, 0, 0x70, 1, None, DoFlush"); msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 9, "Consumed unexpected number of messages. " - "Expected 9, got: %d", (int)msgs.size()); + "Expected 9, got: %d", + (int)msgs.size()); TEST_ASSERT(msgs[0]->key_len() >= 1 && - 0x30 == (unsigned char)msgs[0]->key()->c_str()[0], + 0x30 == (unsigned char)msgs[0]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[1]->key_len() >= 1 && - 0x31 == (unsigned char)msgs[1]->key()->c_str()[0], + 0x31 == (unsigned char)msgs[1]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[2]->key_len() >= 1 && - 0x40 == (unsigned char)msgs[2]->key()->c_str()[0], + 0x40 == (unsigned char)msgs[2]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[3]->key_len() >= 1 && - 0x41 == (unsigned char)msgs[3]->key()->c_str()[0], + 0x41 == (unsigned char)msgs[3]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[4]->key_len() >= 1 && - 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0], + 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[5]->key_len() >= 1 && - 0xa1 == (unsigned char)msgs[5]->key()->c_str()[0], + 0xa1 == (unsigned char)msgs[5]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[6]->key_len() >= 1 && - 0xb0 == (unsigned char)msgs[6]->key()->c_str()[0], + 0xb0 == (unsigned char)msgs[6]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[7]->key_len() >= 1 && - 0xb1 == (unsigned char)msgs[7]->key()->c_str()[0], + 0xb1 == (unsigned char)msgs[7]->key()->c_str()[0], "Unexpected key"); TEST_ASSERT(msgs[8]->key_len() >= 1 && - 0x70 == (unsigned char)msgs[8]->key()->c_str()[0], + 0x70 == (unsigned char)msgs[8]->key()->c_str()[0], "Unexpected key"); delete_messages(msgs); @@ -1160,7 +1166,7 @@ static void do_test_consumer_txn_test (bool use_java_producer) { Test::Say(_C_BLU "Test 6 - transaction left open\n" _C_CLR); topic_name = Test::mk_topic_name("0098-consumer_txn-0", 1); - c = create_consumer(topic_name, "READ_COMMITTED"); + c = create_consumer(topic_name, "READ_COMMITTED"); Test::create_topic(c, topic_name.c_str(), 1, 3); TestEventCb::topic = topic_name; @@ -1172,10 +1178,11 @@ static void do_test_consumer_txn_test (bool use_java_producer) { msgs = consume_messages(c, topic_name, 0); TEST_ASSERT(msgs.size() == 1, "Consumed unexpected number of messages. " - "Expected 1, got: %d", (int)msgs.size()); + "Expected 1, got: %d", + (int)msgs.size()); TEST_ASSERT(TestEventCb::partition_0_ls_offset + 3 == - TestEventCb::partition_0_hi_offset, + TestEventCb::partition_0_hi_offset, "Expected hi_offset to be 3 greater than ls_offset " "but got hi_offset: %" PRId64 ", ls_offset: %" PRId64, TestEventCb::partition_0_hi_offset, @@ -1192,19 +1199,20 @@ static void do_test_consumer_txn_test (bool use_java_producer) { extern "C" { - int main_0098_consumer_txn (int argc, char **argv) { - if (test_needs_auth()) { - Test::Skip("Authentication or security configuration " - "required on client: not supported in " - "Java transactional producer: skipping tests\n"); - return 0; - } +int main_0098_consumer_txn(int argc, char **argv) { + if (test_needs_auth()) { + Test::Skip( + "Authentication or security configuration " + "required on client: not supported in " + "Java transactional producer: skipping tests\n"); + return 0; + } #if WITH_RAPIDJSON - do_test_consumer_txn_test(true /* with java producer */); - do_test_consumer_txn_test(false /* with librdkafka producer */); + do_test_consumer_txn_test(true /* with java producer */); + do_test_consumer_txn_test(false /* with librdkafka producer */); #else - Test::Skip("RapidJSON >=1.1.0 not available\n"); + Test::Skip("RapidJSON >=1.1.0 not available\n"); #endif - return 0; - } + return 0; +} } diff --git a/tests/0099-commit_metadata.c b/tests/0099-commit_metadata.c index cfaea06890..902849fb24 100644 --- a/tests/0099-commit_metadata.c +++ b/tests/0099-commit_metadata.c @@ -28,39 +28,35 @@ #include "test.h" -static RD_UNUSED -void print_toppar_list (const rd_kafka_topic_partition_list_t *list) { +static RD_UNUSED void +print_toppar_list(const rd_kafka_topic_partition_list_t *list) { int i; TEST_SAY("List count: %d\n", list->cnt); - for (i = 0 ; i < list->cnt ; i++) { + for (i = 0; i < list->cnt; i++) { const rd_kafka_topic_partition_t *a = &list->elems[i]; - TEST_SAY(" #%d/%d: " - "%s [%"PRId32"] @ %"PRId64": " - "(%"PRIusz") \"%*s\"\n", - i, list->cnt, - a->topic, - a->partition, - a->offset, - a->metadata_size, - (int)a->metadata_size, - (const char *)a->metadata); + TEST_SAY( + " #%d/%d: " + "%s [%" PRId32 "] @ %" PRId64 + ": " + "(%" PRIusz ") \"%*s\"\n", + i, list->cnt, a->topic, a->partition, a->offset, + a->metadata_size, (int)a->metadata_size, + (const char *)a->metadata); } } -static void compare_toppar_lists ( - const rd_kafka_topic_partition_list_t *lista, - const rd_kafka_topic_partition_list_t *listb) { +static void compare_toppar_lists(const rd_kafka_topic_partition_list_t *lista, + const rd_kafka_topic_partition_list_t *listb) { int i; TEST_ASSERT(lista->cnt == listb->cnt, - "different list lengths: %d != %d", - lista->cnt, listb->cnt); + "different list lengths: %d != %d", lista->cnt, listb->cnt); - for (i = 0 ; i < lista->cnt ; i++) { + for (i = 0; i < lista->cnt; i++) { const rd_kafka_topic_partition_t *a = &lista->elems[i]; const rd_kafka_topic_partition_t *b = &listb->elems[i]; @@ -68,24 +64,19 @@ static void compare_toppar_lists ( a->metadata_size != b->metadata_size || memcmp(a->metadata, b->metadata, a->metadata_size)) TEST_FAIL_LATER( - "Lists did not match at element %d/%d:\n" - " a: %s [%"PRId32"] @ %"PRId64": " - "(%"PRIusz") \"%*s\"\n" - " b: %s [%"PRId32"] @ %"PRId64": " - "(%"PRIusz") \"%*s\"", - i, lista->cnt, - a->topic, - a->partition, - a->offset, - a->metadata_size, - (int)a->metadata_size, - (const char *)a->metadata, - b->topic, - b->partition, - b->offset, - b->metadata_size, - (int)b->metadata_size, - (const char *)b->metadata); + "Lists did not match at element %d/%d:\n" + " a: %s [%" PRId32 "] @ %" PRId64 + ": " + "(%" PRIusz + ") \"%*s\"\n" + " b: %s [%" PRId32 "] @ %" PRId64 + ": " + "(%" PRIusz ") \"%*s\"", + i, lista->cnt, a->topic, a->partition, a->offset, + a->metadata_size, (int)a->metadata_size, + (const char *)a->metadata, b->topic, b->partition, + b->offset, b->metadata_size, (int)b->metadata_size, + (const char *)b->metadata); } TEST_LATER_CHECK(); @@ -94,10 +85,10 @@ static void compare_toppar_lists ( static int commit_cb_cnt = 0; -static void offset_commit_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *list, - void *opaque) { +static void offset_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *list, + void *opaque) { commit_cb_cnt++; TEST_ASSERT(!err, "offset_commit_cb failure: %s", rd_kafka_err2str(err)); @@ -105,13 +96,13 @@ static void offset_commit_cb (rd_kafka_t *rk, static void -commit_metadata (const char *group_id, - const rd_kafka_topic_partition_list_t *toppar_to_commit) { +commit_metadata(const char *group_id, + const rd_kafka_topic_partition_list_t *toppar_to_commit) { rd_kafka_resp_err_t err; rd_kafka_t *rk; rd_kafka_conf_t *conf; - test_conf_init(&conf, NULL, 20/*timeout*/); + test_conf_init(&conf, NULL, 20 /*timeout*/); test_conf_set(conf, "group.id", group_id); @@ -134,15 +125,15 @@ commit_metadata (const char *group_id, static void -get_committed_metadata (const char *group_id, - const rd_kafka_topic_partition_list_t *toppar_to_check, - const rd_kafka_topic_partition_list_t *expected_toppar) { +get_committed_metadata(const char *group_id, + const rd_kafka_topic_partition_list_t *toppar_to_check, + const rd_kafka_topic_partition_list_t *expected_toppar) { rd_kafka_resp_err_t err; rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_topic_partition_list_t *committed_toppar; - test_conf_init(&conf, NULL, 20/*timeout*/); + test_conf_init(&conf, NULL, 20 /*timeout*/); test_conf_set(conf, "group.id", group_id); @@ -162,13 +153,13 @@ get_committed_metadata (const char *group_id, rd_kafka_destroy(rk); } -int main_0099_commit_metadata (int argc, char **argv) { +int main_0099_commit_metadata(int argc, char **argv) { rd_kafka_topic_partition_list_t *origin_toppar; rd_kafka_topic_partition_list_t *expected_toppar; const char *topic = test_mk_topic_name("0099-commit_metadata", 0); char group_id[16]; - test_conf_init(NULL, NULL, 20/*timeout*/); + test_conf_init(NULL, NULL, 20 /*timeout*/); test_str_id_generate(group_id, sizeof(group_id)); @@ -180,10 +171,10 @@ int main_0099_commit_metadata (int argc, char **argv) { expected_toppar = rd_kafka_topic_partition_list_copy(origin_toppar); - expected_toppar->elems[0].offset = 42; + expected_toppar->elems[0].offset = 42; expected_toppar->elems[0].metadata = rd_strdup("Hello world!"); expected_toppar->elems[0].metadata_size = - strlen(expected_toppar->elems[0].metadata); + strlen(expected_toppar->elems[0].metadata); get_committed_metadata(group_id, origin_toppar, origin_toppar); @@ -196,5 +187,3 @@ int main_0099_commit_metadata (int argc, char **argv) { return 0; } - - diff --git a/tests/0100-thread_interceptors.cpp b/tests/0100-thread_interceptors.cpp index 6a44092c2c..a34ccac980 100644 --- a/tests/0100-thread_interceptors.cpp +++ b/tests/0100-thread_interceptors.cpp @@ -36,33 +36,33 @@ extern "C" { class myThreadCb { public: - myThreadCb(): startCnt_(0), exitCnt_(0) { + myThreadCb() : startCnt_(0), exitCnt_(0) { mtx_init(&lock_, mtx_plain); } ~myThreadCb() { mtx_destroy(&lock_); } - int startCount () { + int startCount() { int cnt; mtx_lock(&lock_); cnt = startCnt_; mtx_unlock(&lock_); return cnt; } - int exitCount () { + int exitCount() { int cnt; mtx_lock(&lock_); cnt = exitCnt_; mtx_unlock(&lock_); return cnt; } - virtual void thread_start_cb (const char *threadname) { + virtual void thread_start_cb(const char *threadname) { Test::Say(tostr() << "Started thread: " << threadname << "\n"); mtx_lock(&lock_); startCnt_++; mtx_unlock(&lock_); } - virtual void thread_exit_cb (const char *threadname) { + virtual void thread_exit_cb(const char *threadname) { Test::Say(tostr() << "Exiting from thread: " << threadname << "\n"); mtx_lock(&lock_); exitCnt_++; @@ -79,15 +79,15 @@ class myThreadCb { /** * @brief C to C++ callback trampoline. */ -static rd_kafka_resp_err_t -on_thread_start_trampoline (rd_kafka_t *rk, - rd_kafka_thread_type_t thread_type, - const char *threadname, - void *ic_opaque) { +static rd_kafka_resp_err_t on_thread_start_trampoline( + rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type, + const char *threadname, + void *ic_opaque) { myThreadCb *threadcb = (myThreadCb *)ic_opaque; - Test::Say(tostr() << "on_thread_start(" << thread_type << ", " << - threadname << ") called\n"); + Test::Say(tostr() << "on_thread_start(" << thread_type << ", " << threadname + << ") called\n"); threadcb->thread_start_cb(threadname); @@ -97,15 +97,15 @@ on_thread_start_trampoline (rd_kafka_t *rk, /** * @brief C to C++ callback trampoline. */ -static rd_kafka_resp_err_t -on_thread_exit_trampoline (rd_kafka_t *rk, - rd_kafka_thread_type_t thread_type, - const char *threadname, - void *ic_opaque) { +static rd_kafka_resp_err_t on_thread_exit_trampoline( + rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type, + const char *threadname, + void *ic_opaque) { myThreadCb *threadcb = (myThreadCb *)ic_opaque; - Test::Say(tostr() << "on_thread_exit(" << thread_type << ", " << - threadname << ") called\n"); + Test::Say(tostr() << "on_thread_exit(" << thread_type << ", " << threadname + << ") called\n"); threadcb->thread_exit_cb(threadname); @@ -117,16 +117,16 @@ on_thread_exit_trampoline (rd_kafka_t *rk, * prior to any threads being created. * We use it to set up the instance's thread interceptors. */ -static rd_kafka_resp_err_t on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { Test::Say("on_new() interceptor called\n"); - rd_kafka_interceptor_add_on_thread_start(rk, "test:0100", - on_thread_start_trampoline, - ic_opaque); + rd_kafka_interceptor_add_on_thread_start( + rk, "test:0100", on_thread_start_trampoline, ic_opaque); rd_kafka_interceptor_add_on_thread_exit(rk, "test:0100", - on_thread_exit_trampoline, - ic_opaque); + on_thread_exit_trampoline, ic_opaque); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -135,19 +135,19 @@ static rd_kafka_resp_err_t on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf, * in case the config object is copied, since interceptors are not * automatically copied. */ -static rd_kafka_resp_err_t on_conf_dup (rd_kafka_conf_t *new_conf, - const rd_kafka_conf_t *old_conf, - size_t filter_cnt, - const char **filter, - void *ic_opaque) { +static rd_kafka_resp_err_t on_conf_dup(rd_kafka_conf_t *new_conf, + const rd_kafka_conf_t *old_conf, + size_t filter_cnt, + const char **filter, + void *ic_opaque) { Test::Say("on_conf_dup() interceptor called\n"); - return rd_kafka_conf_interceptor_add_on_new(new_conf, "test:0100", - on_new, ic_opaque); + return rd_kafka_conf_interceptor_add_on_new(new_conf, "test:0100", on_new, + ic_opaque); } -static void test_thread_cbs () { +static void test_thread_cbs() { RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); std::string errstr; rd_kafka_conf_t *c_conf; @@ -163,7 +163,7 @@ static void test_thread_cbs () { * 4. In the on_new() interceptor, add the thread interceptors. */ c_conf = conf->c_ptr_global(); rd_kafka_conf_interceptor_add_on_new(c_conf, "test:0100", on_new, - &my_threads); + &my_threads); rd_kafka_conf_interceptor_add_on_conf_dup(c_conf, "test:0100", on_conf_dup, &my_threads); @@ -174,8 +174,8 @@ static void test_thread_cbs () { delete conf; delete p; - Test::Say(tostr() << my_threads.startCount() << " thread start calls, " << - my_threads.exitCount() << " thread exit calls seen\n"); + Test::Say(tostr() << my_threads.startCount() << " thread start calls, " + << my_threads.exitCount() << " thread exit calls seen\n"); /* 3 = rdkafka main thread + internal broker + bootstrap broker */ if (my_threads.startCount() < 3) @@ -188,8 +188,8 @@ static void test_thread_cbs () { extern "C" { - int main_0100_thread_interceptors (int argc, char **argv) { - test_thread_cbs(); - return 0; - } +int main_0100_thread_interceptors(int argc, char **argv) { + test_thread_cbs(); + return 0; +} } diff --git a/tests/0101-fetch-from-follower.cpp b/tests/0101-fetch-from-follower.cpp index 8b4e3c7a01..0168ac55d3 100644 --- a/tests/0101-fetch-from-follower.cpp +++ b/tests/0101-fetch-from-follower.cpp @@ -55,13 +55,13 @@ * broker's `broker.rack` (and use * org.apache.kafka.common.replica.RackAwareReplicaSelector). * - consume the messages, and check they are as expected. - * - use rxbytes from the statistics event to confirm that + * - use rxbytes from the statistics event to confirm that * the messages were retrieved from the replica broker (not the * leader). */ -static void test_assert (bool cond, std::string msg) { +static void test_assert(bool cond, std::string msg) { if (!cond) Test::Say(msg); assert(cond); @@ -74,44 +74,44 @@ class TestEvent2Cb : public RdKafka::EventCb { static bool has_captured_stats; static std::map rxbytes; - void event_cb (RdKafka::Event &event) { - - switch (event.type()) - { - case RdKafka::Event::EVENT_LOG: - Test::Say(event.str() + "\n"); - break; - case RdKafka::Event::EVENT_STATS: - if (should_capture_stats) { - - rapidjson::Document d; - if (d.Parse(event.str().c_str()).HasParseError()) - Test::Fail(tostr() << "Failed to parse stats JSON: " << - rapidjson::GetParseError_En(d.GetParseError()) << - " at " << d.GetErrorOffset()); - - /* iterate over brokers. */ - rapidjson::Pointer jpath((const char *)"/brokers"); - rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath); - if (pp == NULL) - return; - - for (rapidjson::Value::ConstMemberIterator itr = pp->MemberBegin(); itr != pp->MemberEnd(); ++itr) { - std::string broker_name = itr->name.GetString(); - size_t broker_id_idx = broker_name.rfind('/'); - if (broker_id_idx == (size_t)-1) - continue; - std::string broker_id = broker_name.substr(broker_id_idx + 1, broker_name.size() - broker_id_idx - 1); - - int64_t broker_rxbytes = itr->value.FindMember("rxbytes")->value.GetInt64(); - rxbytes[atoi(broker_id.c_str())] = broker_rxbytes; - } - - has_captured_stats = true; - break; + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_LOG: + Test::Say(event.str() + "\n"); + break; + case RdKafka::Event::EVENT_STATS: + if (should_capture_stats) { + rapidjson::Document d; + if (d.Parse(event.str().c_str()).HasParseError()) + Test::Fail(tostr() << "Failed to parse stats JSON: " + << rapidjson::GetParseError_En(d.GetParseError()) + << " at " << d.GetErrorOffset()); + + /* iterate over brokers. */ + rapidjson::Pointer jpath((const char *)"/brokers"); + rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath); + if (pp == NULL) + return; + + for (rapidjson::Value::ConstMemberIterator itr = pp->MemberBegin(); + itr != pp->MemberEnd(); ++itr) { + std::string broker_name = itr->name.GetString(); + size_t broker_id_idx = broker_name.rfind('/'); + if (broker_id_idx == (size_t)-1) + continue; + std::string broker_id = broker_name.substr( + broker_id_idx + 1, broker_name.size() - broker_id_idx - 1); + + int64_t broker_rxbytes = + itr->value.FindMember("rxbytes")->value.GetInt64(); + rxbytes[atoi(broker_id.c_str())] = broker_rxbytes; } - default: + + has_captured_stats = true; break; + } + default: + break; } } }; @@ -122,7 +122,9 @@ std::map TestEvent2Cb::rxbytes; static TestEvent2Cb ex_event_cb; -static void get_brokers_info (std::string &topic_str, int32_t *leader, std::vector &brokers) { +static void get_brokers_info(std::string &topic_str, + int32_t *leader, + std::vector &brokers) { std::string errstr; RdKafka::ErrorCode err; class RdKafka::Metadata *metadata; @@ -140,25 +142,27 @@ static void get_brokers_info (std::string &topic_str, int32_t *leader, std::vect test_assert(topic, tostr() << "Failed to create topic: " << errstr); err = p->metadata(0, topic, &metadata, tmout_multip(5000)); - test_assert(err == RdKafka::ERR_NO_ERROR, - tostr() << "%% Failed to acquire metadata: " - << RdKafka::err2str(err)); + test_assert( + err == RdKafka::ERR_NO_ERROR, + tostr() << "%% Failed to acquire metadata: " << RdKafka::err2str(err)); test_assert(metadata->topics()->size() == 1, - tostr() << "expecting metadata for exactly one topic. " - << "have metadata for " << metadata->topics()->size() - << "topics"); + tostr() << "expecting metadata for exactly one topic. " + << "have metadata for " << metadata->topics()->size() + << "topics"); - RdKafka::Metadata::TopicMetadataIterator topicMetadata = metadata->topics()->begin(); - RdKafka::TopicMetadata::PartitionMetadataIterator partitionMetadata = (*topicMetadata)->partitions()->begin(); + RdKafka::Metadata::TopicMetadataIterator topicMetadata = + metadata->topics()->begin(); + RdKafka::TopicMetadata::PartitionMetadataIterator partitionMetadata = + (*topicMetadata)->partitions()->begin(); *leader = (*partitionMetadata)->leader(); size_t idx = 0; RdKafka::PartitionMetadata::ReplicasIterator replicasIterator; for (replicasIterator = (*partitionMetadata)->replicas()->begin(); - replicasIterator != (*partitionMetadata)->replicas()->end(); - ++replicasIterator) { + replicasIterator != (*partitionMetadata)->replicas()->end(); + ++replicasIterator) { brokers.push_back(*replicasIterator); idx++; } @@ -173,32 +177,30 @@ static void get_brokers_info (std::string &topic_str, int32_t *leader, std::vect * @brief Wait for up to \p tmout for any type of admin result. * @returns the event */ -rd_kafka_event_t * -test_wait_admin_result (rd_kafka_queue_t *q, - rd_kafka_event_type_t evtype, - int tmout) { +rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q, + rd_kafka_event_type_t evtype, + int tmout) { rd_kafka_event_t *rkev; while (1) { rkev = rd_kafka_queue_poll(q, tmout); if (!rkev) - Test::Fail(tostr() << "Timed out waiting for admin result (" - << evtype << ")\n"); + Test::Fail(tostr() << "Timed out waiting for admin result (" << evtype + << ")\n"); if (rd_kafka_event_type(rkev) == evtype) return rkev; if (rd_kafka_event_type(rkev) == RD_KAFKA_EVENT_ERROR) { - Test::Say(tostr() << "Received error event while waiting for " - << evtype << ": " - << rd_kafka_event_error_string(rkev) + Test::Say(tostr() << "Received error event while waiting for " << evtype + << ": " << rd_kafka_event_error_string(rkev) << ": ignoring"); continue; } test_assert(rd_kafka_event_type(rkev) == evtype, - tostr() << "Expected event type " << evtype - << ", got " << rd_kafka_event_type(rkev) << " (" + tostr() << "Expected event type " << evtype << ", got " + << rd_kafka_event_type(rkev) << " (" << rd_kafka_event_name(rkev) << ")"); } @@ -209,8 +211,7 @@ test_wait_admin_result (rd_kafka_queue_t *q, /** * @returns the number of broker.rack values configured across all brokers. */ -static int get_broker_rack_count (std::vector &replica_ids) -{ +static int get_broker_rack_count(std::vector &replica_ids) { std::string errstr; RdKafka::Conf *pConf; Test::conf_init(&pConf, NULL, 10); @@ -220,44 +221,53 @@ static int get_broker_rack_count (std::vector &replica_ids) rd_kafka_queue_t *mainq = rd_kafka_queue_get_main(p->c_ptr()); std::set racks; - for (size_t i=0; ic_ptr(), RD_KAFKA_ADMIN_OP_ANY); - rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout(options, 10000, cerrstr, sizeof(cerrstr)); + rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( + options, 10000, cerrstr, sizeof(cerrstr)); test_assert(!err, cerrstr); rd_kafka_DescribeConfigs(p->c_ptr(), &config, 1, options, mainq); rd_kafka_AdminOptions_destroy(options); - rd_kafka_event_t *rkev = test_wait_admin_result(mainq, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 5000); + rd_kafka_event_t *rkev = test_wait_admin_result( + mainq, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 5000); - const rd_kafka_DescribeConfigs_result_t *res = rd_kafka_event_DescribeConfigs_result(rkev); + const rd_kafka_DescribeConfigs_result_t *res = + rd_kafka_event_DescribeConfigs_result(rkev); test_assert(res, "expecting describe config results to be not NULL"); - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); const char *errstr2 = rd_kafka_event_error_string(rkev); - test_assert(!err, tostr() << "Expected success, not " << rd_kafka_err2name(err) << ": " << errstr2); + test_assert(!err, tostr() << "Expected success, not " + << rd_kafka_err2name(err) << ": " << errstr2); size_t rconfig_cnt; - const rd_kafka_ConfigResource_t **rconfigs = rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt); - test_assert(rconfig_cnt == 1, tostr() << "Expecting 1 resource, got " << rconfig_cnt); + const rd_kafka_ConfigResource_t **rconfigs = + rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt); + test_assert(rconfig_cnt == 1, + tostr() << "Expecting 1 resource, got " << rconfig_cnt); - err = rd_kafka_ConfigResource_error(rconfigs[0]); + err = rd_kafka_ConfigResource_error(rconfigs[0]); errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[0]); size_t entry_cnt; - const rd_kafka_ConfigEntry_t **entries = rd_kafka_ConfigResource_configs(rconfigs[0], &entry_cnt); + const rd_kafka_ConfigEntry_t **entries = + rd_kafka_ConfigResource_configs(rconfigs[0], &entry_cnt); - for (size_t j = 0; j &replica_ids) } -static void do_fff_test (void) { - +static void do_fff_test(void) { /* Produce some messages to a single partition topic * with 3 replicas. */ - int msgcnt = 1000; - const int msgsize = 100; + int msgcnt = 1000; + const int msgsize = 100; std::string topic_str = Test::mk_topic_name("0101-fetch-from-follower", 1); test_create_topic(NULL, topic_str.c_str(), 1, 3); test_produce_msgs_easy_size(topic_str.c_str(), 0, 0, msgcnt, msgsize); @@ -285,23 +294,28 @@ static void do_fff_test (void) { int leader_id; std::vector replica_ids; get_brokers_info(topic_str, &leader_id, replica_ids); - test_assert(replica_ids.size() == 3, tostr() << "expecting three replicas, but " << replica_ids.size() << " were reported."); - Test::Say(tostr() << topic_str << " leader id: " << leader_id << ", all replica ids: [" << replica_ids[0] << ", " << replica_ids[1] << ", " << replica_ids[2] << "]\n"); + test_assert(replica_ids.size() == 3, + tostr() << "expecting three replicas, but " << replica_ids.size() + << " were reported."); + Test::Say(tostr() << topic_str << " leader id: " << leader_id + << ", all replica ids: [" << replica_ids[0] << ", " + << replica_ids[1] << ", " << replica_ids[2] << "]\n"); if (get_broker_rack_count(replica_ids) != 3) { Test::Skip("unexpected broker.rack configuration: skipping test.\n"); } - /* arrange for the consumer's client.rack to align with a broker that is not the leader. */ + /* arrange for the consumer's client.rack to align with a broker that is not + * the leader. */ int client_rack_id = -1; size_t i; - for (i=0; iconsume(tmout_multip(1000)); - switch (msg->err()) - { - case RdKafka::ERR__TIMED_OUT: - break; - - case RdKafka::ERR_NO_ERROR: - { - test_assert(msg->len() == 100, "expecting message value size to be 100"); - char *cnt_str_start_ptr = strstr((char *)msg->payload(), "msg=") + 4; - test_assert(cnt_str_start_ptr, "expecting 'msg=' in message payload"); - char *cnt_str_end_ptr = strstr(cnt_str_start_ptr, "\n"); - test_assert(cnt_str_start_ptr, "expecting '\n' following 'msg=' in message payload"); - *cnt_str_end_ptr = '\0'; - int msg_cnt = atoi(cnt_str_start_ptr); - test_assert(msg_cnt == cnt, "message consumed out of order"); - cnt++; - } - break; + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + break; - default: - Test::Fail("Consume error: " + msg->errstr()); - break; - } + case RdKafka::ERR_NO_ERROR: { + test_assert(msg->len() == 100, "expecting message value size to be 100"); + char *cnt_str_start_ptr = strstr((char *)msg->payload(), "msg=") + 4; + test_assert(cnt_str_start_ptr, "expecting 'msg=' in message payload"); + char *cnt_str_end_ptr = strstr(cnt_str_start_ptr, "\n"); + test_assert(cnt_str_start_ptr, + "expecting '\n' following 'msg=' in message payload"); + *cnt_str_end_ptr = '\0'; + int msg_cnt = atoi(cnt_str_start_ptr); + test_assert(msg_cnt == cnt, "message consumed out of order"); + cnt++; + } break; + + default: + Test::Fail("Consume error: " + msg->errstr()); + break; + } delete msg; } @@ -369,39 +381,50 @@ static void do_fff_test (void) { delete msg; } - for (i=0; i msgcnt * msgsize, - tostr() << "expecting rxbytes of client.rack broker to be at least " << msgcnt * msgsize - << " but it was " << TestEvent2Cb::rxbytes[client_rack_id]); + test_assert( + TestEvent2Cb::rxbytes[client_rack_id] > msgcnt * msgsize, + tostr() << "expecting rxbytes of client.rack broker to be at least " + << msgcnt * msgsize << " but it was " + << TestEvent2Cb::rxbytes[client_rack_id]); Test::Say("Done\n"); // Manual test 1: - // - change the lease period from 5 minutes to 5 seconds (modify rdkafka_partition.c) - // - change the max lease grant period from 1 minute to 10 seconds (modify rdkafka_broker.c) + // - change the lease period from 5 minutes to 5 seconds (modify + // rdkafka_partition.c) + // - change the max lease grant period from 1 minute to 10 seconds (modify + // rdkafka_broker.c) // - add infinite consume loop to the end of this test. // - observe: // - the partition gets delegated to the preferred replica. // - the messages get consumed. // - the lease expires. // - the partition is reverted to the leader. - // - the toppar is backed off, and debug message noting the faster than expected delegation to a replica. + // - the toppar is backed off, and debug message noting the faster than + // expected delegation to a replica. // Manual test 2: // - same modifications as above. // - add Test::conf_set(conf, "topic.metadata.refresh.interval.ms", "3000"); // - observe: - // - that metadata being periodically received and not interfering with anything. + // - that metadata being periodically received and not interfering with + // anything. c->close(); delete c; @@ -409,12 +432,12 @@ static void do_fff_test (void) { #endif extern "C" { -int main_0101_fetch_from_follower (int argc, char **argv) { +int main_0101_fetch_from_follower(int argc, char **argv) { #if WITH_RAPIDJSON - do_fff_test(); + do_fff_test(); #else - Test::Skip("RapidJSON >=1.1.0 not available\n"); + Test::Skip("RapidJSON >=1.1.0 not available\n"); #endif - return 0; - } + return 0; +} } diff --git a/tests/0102-static_group_rebalance.c b/tests/0102-static_group_rebalance.c index 08d45d3005..1465f99efe 100644 --- a/tests/0102-static_group_rebalance.c +++ b/tests/0102-static_group_rebalance.c @@ -52,16 +52,17 @@ typedef struct _consumer_s { /** * @brief Call poll until a rebalance has been triggered */ -static int static_member_wait_rebalance0 (int line, - _consumer_t *c, int64_t start, - int64_t *target, int timeout_ms) { +static int static_member_wait_rebalance0(int line, + _consumer_t *c, + int64_t start, + int64_t *target, + int timeout_ms) { int64_t tmout = test_clock() + (timeout_ms * 1000); test_timing_t t_time; c->curr_line = line; - TEST_SAY("line %d: %s awaiting %s event\n", - line, rd_kafka_name(c->rk), + TEST_SAY("line %d: %s awaiting %s event\n", line, rd_kafka_name(c->rk), rd_kafka_err2name(c->expected_rb_event)); TIMING_START(&t_time, "wait_rebalance"); @@ -76,29 +77,29 @@ static int static_member_wait_rebalance0 (int line, c->curr_line = 0; - TEST_SAY("line %d: %s timed out awaiting %s event\n", - line, rd_kafka_name(c->rk), - rd_kafka_err2name(c->expected_rb_event)); + TEST_SAY("line %d: %s timed out awaiting %s event\n", line, + rd_kafka_name(c->rk), rd_kafka_err2name(c->expected_rb_event)); return 0; } -#define static_member_expect_rebalance(C,START,TARGET,TIMEOUT_MS) do { \ - if (!static_member_wait_rebalance0(__LINE__,C, \ - START,TARGET,TIMEOUT_MS)) \ - TEST_FAIL("%s: timed out waiting for %s event", \ - rd_kafka_name((C)->rk), \ - rd_kafka_err2name((C)->expected_rb_event)); \ +#define static_member_expect_rebalance(C, START, TARGET, TIMEOUT_MS) \ + do { \ + if (!static_member_wait_rebalance0(__LINE__, C, START, TARGET, \ + TIMEOUT_MS)) \ + TEST_FAIL("%s: timed out waiting for %s event", \ + rd_kafka_name((C)->rk), \ + rd_kafka_err2name((C)->expected_rb_event)); \ } while (0) -#define static_member_wait_rebalance(C,START,TARGET,TIMEOUT_MS) \ - static_member_wait_rebalance0(__LINE__,C, START,TARGET,TIMEOUT_MS) +#define static_member_wait_rebalance(C, START, TARGET, TIMEOUT_MS) \ + static_member_wait_rebalance0(__LINE__, C, START, TARGET, TIMEOUT_MS) -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { _consumer_t *c = opaque; TEST_ASSERT(c->expected_rb_event == err, @@ -107,15 +108,14 @@ static void rebalance_cb (rd_kafka_t *rk, rd_kafka_err2name(c->expected_rb_event), rd_kafka_err2name(err)); - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: TEST_SAY("line %d: %s Assignment (%d partition(s)):\n", c->curr_line, rd_kafka_name(rk), parts->cnt); test_print_partition_list(parts); c->partition_cnt = parts->cnt; - c->assigned_at = test_clock(); + c->assigned_at = test_clock(); rd_kafka_assign(rk, parts); break; @@ -123,8 +123,8 @@ static void rebalance_cb (rd_kafka_t *rk, case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: c->revoked_at = test_clock(); rd_kafka_assign(rk, NULL); - TEST_SAY("line %d: %s revoked %d partitions\n", - c->curr_line, rd_kafka_name(c->rk), parts->cnt); + TEST_SAY("line %d: %s revoked %d partitions\n", c->curr_line, + rd_kafka_name(c->rk), parts->cnt); break; @@ -141,15 +141,15 @@ static void rebalance_cb (rd_kafka_t *rk, } -static void do_test_static_group_rebalance (void) { +static void do_test_static_group_rebalance(void) { rd_kafka_conf_t *conf; test_msgver_t mv; int64_t rebalance_start; _consumer_t c[_CONSUMER_CNT] = RD_ZERO_INIT; - const int msgcnt = 100; - uint64_t testid = test_id_generate(); - const char *topic = test_mk_topic_name("0102_static_group_rebalance", - 1); + const int msgcnt = 100; + uint64_t testid = test_id_generate(); + const char *topic = + test_mk_topic_name("0102_static_group_rebalance", 1); char *topics = rd_strdup(tsprintf("^%s.*", topic)); test_timing_t t_close; @@ -193,7 +193,7 @@ static void do_test_static_group_rebalance (void) { * interleave calls to poll while awaiting our assignment to avoid * unexpected rebalances being triggered. */ - rebalance_start = test_clock(); + rebalance_start = test_clock(); c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; while (!static_member_wait_rebalance(&c[0], rebalance_start, @@ -213,11 +213,11 @@ static void do_test_static_group_rebalance (void) { * after rejoin/rebalance operations. */ c[0].curr_line = __LINE__; - test_consumer_poll("serve.queue", - c[0].rk, testid, c[0].partition_cnt, 0, -1, &mv); + test_consumer_poll("serve.queue", c[0].rk, testid, c[0].partition_cnt, + 0, -1, &mv); c[1].curr_line = __LINE__; - test_consumer_poll("serve.queue", - c[1].rk, testid, c[1].partition_cnt, 0, -1, &mv); + test_consumer_poll("serve.queue", c[1].rk, testid, c[1].partition_cnt, + 0, -1, &mv); test_msgver_verify("first.verify", &mv, TEST_MSGVER_ALL, 0, msgcnt); @@ -237,7 +237,7 @@ static void do_test_static_group_rebalance (void) { /* Await assignment */ c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; - rebalance_start = test_clock(); + rebalance_start = test_clock(); while (!static_member_wait_rebalance(&c[1], rebalance_start, &c[1].assigned_at, 1000)) { c[0].curr_line = __LINE__; @@ -258,7 +258,7 @@ static void do_test_static_group_rebalance (void) { test_create_topic(c->rk, tsprintf("%snew", topic), 1, 1); /* Await revocation */ - rebalance_start = test_clock(); + rebalance_start = test_clock(); c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; while (!static_member_wait_rebalance(&c[0], rebalance_start, @@ -267,8 +267,8 @@ static void do_test_static_group_rebalance (void) { test_consumer_poll_once(c[1].rk, &mv, 0); } - static_member_expect_rebalance(&c[1], rebalance_start, - &c[1].revoked_at, -1); + static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at, + -1); /* Await assignment */ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; @@ -293,10 +293,10 @@ static void do_test_static_group_rebalance (void) { /* Await revocation */ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; - static_member_expect_rebalance(&c[1], rebalance_start, - &c[1].revoked_at, -1); - static_member_expect_rebalance(&c[0], rebalance_start, - &c[0].revoked_at, -1); + static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at, + -1); + static_member_expect_rebalance(&c[0], rebalance_start, &c[0].revoked_at, + -1); /* New cgrp generation with 1 member, c[0] */ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; @@ -309,8 +309,8 @@ static void do_test_static_group_rebalance (void) { /* End previous single member generation */ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; - static_member_expect_rebalance(&c[0], rebalance_start, - &c[0].revoked_at, -1); + static_member_expect_rebalance(&c[0], rebalance_start, &c[0].revoked_at, + -1); /* Await assignment */ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; @@ -331,10 +331,10 @@ static void do_test_static_group_rebalance (void) { * Block long enough for consumer 2 to be evicted from the group * `max.poll.interval.ms` + `session.timeout.ms` */ - rebalance_start = test_clock(); + rebalance_start = test_clock(); c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; - c[0].curr_line = __LINE__; + c[0].curr_line = __LINE__; test_consumer_poll_no_msgs("wait.max.poll", c[0].rk, testid, 6000 + 9000); c[1].curr_line = __LINE__; @@ -348,8 +348,8 @@ static void do_test_static_group_rebalance (void) { test_consumer_poll_once(c[1].rk, &mv, 0); } - static_member_expect_rebalance(&c[1], rebalance_start, - &c[1].revoked_at, -1); + static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at, + -1); /* Await assignment */ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; @@ -365,15 +365,15 @@ static void do_test_static_group_rebalance (void) { TEST_SAY("== Testing `session.timeout.ms` member eviction ==\n"); - rebalance_start = test_clock(); + rebalance_start = test_clock(); c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; TIMING_START(&t_close, "consumer close"); test_consumer_close(c[0].rk); rd_kafka_destroy(c[0].rk); c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; - static_member_expect_rebalance(&c[1], rebalance_start, - &c[1].revoked_at, 2*7000); + static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at, + 2 * 7000); c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; static_member_expect_rebalance(&c[1], rebalance_start, @@ -384,14 +384,13 @@ static void do_test_static_group_rebalance (void) { * the last Heartbeat or SyncGroup request was sent we need to * allow some leeway on the minimum side (4s), and also some on * the maximum side (1s) for slow runtimes. */ - TIMING_ASSERT(&t_close, 6000-4000, 9000+1000); + TIMING_ASSERT(&t_close, 6000 - 4000, 9000 + 1000); c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; test_consumer_close(c[1].rk); rd_kafka_destroy(c[1].rk); - test_msgver_verify("final.validation", &mv, TEST_MSGVER_ALL, 0, - msgcnt); + test_msgver_verify("final.validation", &mv, TEST_MSGVER_ALL, 0, msgcnt); test_msgver_clear(&mv); free(topics); @@ -402,7 +401,7 @@ static void do_test_static_group_rebalance (void) { /** * @brief Await a non-empty assignment for all consumers in \p c */ -static void await_assignment_multi (const char *what, rd_kafka_t **c, int cnt) { +static void await_assignment_multi(const char *what, rd_kafka_t **c, int cnt) { rd_kafka_topic_partition_list_t *parts; int assignment_cnt; @@ -414,7 +413,7 @@ static void await_assignment_multi (const char *what, rd_kafka_t **c, int cnt) { assignment_cnt = 0; - for (i = 0 ; i < cnt ; i++) { + for (i = 0; i < cnt; i++) { test_consumer_poll_no_msgs("poll", c[i], 0, timeout_ms); timeout_ms = 100; @@ -435,19 +434,19 @@ static const rd_kafka_t *valid_fatal_rk; /** * @brief Tells test harness that fatal error should not fail the current test */ -static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { return rk != valid_fatal_rk; } /** * @brief Test that consumer fencing raises a fatal error */ -static void do_test_fenced_member (void) { +static void do_test_fenced_member(void) { rd_kafka_t *c[3]; /* 0: consumer2b, 1: consumer1, 2: consumer2a */ rd_kafka_conf_t *conf; - const char *topic = test_mk_topic_name("0102_static_group_rebalance", - 1); + const char *topic = + test_mk_topic_name("0102_static_group_rebalance", 1); rd_kafka_message_t *rkm; char errstr[512]; rd_kafka_resp_err_t err; @@ -491,17 +490,15 @@ static void do_test_fenced_member (void) { TEST_ASSERT(rkm != NULL, "Expected error, not timeout"); TEST_ASSERT(rkm->err == RD_KAFKA_RESP_ERR__FATAL, "Expected ERR__FATAL, not %s: %s", - rd_kafka_err2str(rkm->err), - rd_kafka_message_errstr(rkm)); + rd_kafka_err2str(rkm->err), rd_kafka_message_errstr(rkm)); TEST_SAY("Fenced consumer returned expected: %s: %s\n", - rd_kafka_err2name(rkm->err), - rd_kafka_message_errstr(rkm)); + rd_kafka_err2name(rkm->err), rd_kafka_message_errstr(rkm)); /* Read the actual error */ err = rd_kafka_fatal_error(c[2], errstr, sizeof(errstr)); - TEST_SAY("%s fatal error: %s: %s\n", - rd_kafka_name(c[2]), rd_kafka_err2name(err), errstr); + TEST_SAY("%s fatal error: %s: %s\n", rd_kafka_name(c[2]), + rd_kafka_err2name(err), errstr); TEST_ASSERT(err == RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID, "Expected ERR_FENCED_INSTANCE_ID as fatal error, not %s", rd_kafka_err2name(err)); @@ -527,7 +524,7 @@ static void do_test_fenced_member (void) { -int main_0102_static_group_rebalance (int argc, char **argv) { +int main_0102_static_group_rebalance(int argc, char **argv) { do_test_static_group_rebalance(); diff --git a/tests/0103-transactions.c b/tests/0103-transactions.c index cffc224b46..1b6e1e1a7a 100644 --- a/tests/0103-transactions.c +++ b/tests/0103-transactions.c @@ -39,29 +39,32 @@ /** * @brief Produce messages using batch interface. */ -void do_produce_batch (rd_kafka_t *rk, const char *topic, uint64_t testid, - int32_t partition, int msg_base, int cnt) { +void do_produce_batch(rd_kafka_t *rk, + const char *topic, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt) { rd_kafka_message_t *messages; rd_kafka_topic_t *rkt = rd_kafka_topic_new(rk, topic, NULL); int i; int ret; int remains = cnt; - TEST_SAY("Batch-producing %d messages to partition %"PRId32"\n", - cnt, partition); + TEST_SAY("Batch-producing %d messages to partition %" PRId32 "\n", cnt, + partition); messages = rd_calloc(sizeof(*messages), cnt); - for (i = 0 ; i < cnt ; i++) { + for (i = 0; i < cnt; i++) { char key[128]; char value[128]; - test_prepare_msg(testid, partition, msg_base + i, - value, sizeof(value), - key, sizeof(key)); - messages[i].key = rd_strdup(key); - messages[i].key_len = strlen(key); - messages[i].payload = rd_strdup(value); - messages[i].len = strlen(value); + test_prepare_msg(testid, partition, msg_base + i, value, + sizeof(value), key, sizeof(key)); + messages[i].key = rd_strdup(key); + messages[i].key_len = strlen(key); + messages[i].payload = rd_strdup(value); + messages[i].len = strlen(value); messages[i]._private = &remains; } @@ -71,12 +74,11 @@ void do_produce_batch (rd_kafka_t *rk, const char *topic, uint64_t testid, rd_kafka_topic_destroy(rkt); TEST_ASSERT(ret == cnt, - "Failed to batch-produce: %d/%d messages produced", - ret, cnt); + "Failed to batch-produce: %d/%d messages produced", ret, + cnt); - for (i = 0 ; i < cnt ; i++) { - TEST_ASSERT(!messages[i].err, - "Failed to produce message: %s", + for (i = 0; i < cnt; i++) { + TEST_ASSERT(!messages[i].err, "Failed to produce message: %s", rd_kafka_err2str(messages[i].err)); rd_free(messages[i].key); rd_free(messages[i].payload); @@ -94,8 +96,8 @@ void do_produce_batch (rd_kafka_t *rk, const char *topic, uint64_t testid, * (only consumed output for verification). * e.g., no consumer offsets to commit with transaction. */ -static void do_test_basic_producer_txn (rd_bool_t enable_compression) { - const char *topic = test_mk_topic_name("0103_transactions", 1); +static void do_test_basic_producer_txn(rd_bool_t enable_compression) { + const char *topic = test_mk_topic_name("0103_transactions", 1); const int partition_cnt = 4; #define _TXNCNT 6 struct { @@ -107,18 +109,15 @@ static void do_test_basic_producer_txn (rd_bool_t enable_compression) { rd_bool_t batch; rd_bool_t batch_any; } txn[_TXNCNT] = { - { "Commit transaction, sync producing", - 0, 100, rd_false, rd_true }, - { "Commit transaction, async producing", - 0, 1000, rd_false, rd_false }, - { "Commit transaction, sync batch producing to any partition", - 0, 100, rd_false, rd_true, rd_true, rd_true }, - { "Abort transaction, sync producing", - 0, 500, rd_true, rd_true }, - { "Abort transaction, async producing", - 0, 5000, rd_true, rd_false }, - { "Abort transaction, sync batch producing to one partition", - 0, 500, rd_true, rd_true, rd_true, rd_false }, + {"Commit transaction, sync producing", 0, 100, rd_false, rd_true}, + {"Commit transaction, async producing", 0, 1000, rd_false, + rd_false}, + {"Commit transaction, sync batch producing to any partition", 0, + 100, rd_false, rd_true, rd_true, rd_true}, + {"Abort transaction, sync producing", 0, 500, rd_true, rd_true}, + {"Abort transaction, async producing", 0, 5000, rd_true, rd_false}, + {"Abort transaction, sync batch producing to one partition", 0, 500, + rd_true, rd_true, rd_true, rd_false}, }; rd_kafka_t *p, *c; @@ -127,8 +126,8 @@ static void do_test_basic_producer_txn (rd_bool_t enable_compression) { /* Mark one of run modes as quick so we don't run both when * in a hurry.*/ - SUB_TEST0(enable_compression /* quick */, - "with%s compression", enable_compression ? "" : "out"); + SUB_TEST0(enable_compression /* quick */, "with%s compression", + enable_compression ? "" : "out"); test_conf_init(&conf, NULL, 30); @@ -149,10 +148,10 @@ static void do_test_basic_producer_txn (rd_bool_t enable_compression) { c_conf = conf; test_conf_set(conf, "auto.offset.reset", "earliest"); /* Make sure default isolation.level is transaction aware */ - TEST_ASSERT(!strcmp(test_conf_get(c_conf, "isolation.level"), - "read_committed"), - "expected isolation.level=read_committed, not %s", - test_conf_get(c_conf, "isolation.level")); + TEST_ASSERT( + !strcmp(test_conf_get(c_conf, "isolation.level"), "read_committed"), + "expected isolation.level=read_committed, not %s", + test_conf_get(c_conf, "isolation.level")); c = test_create_consumer(topic, NULL, c_conf, NULL); @@ -168,13 +167,13 @@ static void do_test_basic_producer_txn (rd_bool_t enable_compression) { test_consumer_wait_assignment(c, rd_true); /* Init transactions */ - TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30*1000)); + TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000)); - for (i = 0 ; i < _TXNCNT ; i++) { + for (i = 0; i < _TXNCNT; i++) { int wait_msgcnt = 0; - TEST_SAY(_C_BLU "txn[%d]: Begin transaction: %s\n" _C_CLR, - i, txn[i].desc); + TEST_SAY(_C_BLU "txn[%d]: Begin transaction: %s\n" _C_CLR, i, + txn[i].desc); /* Begin a transaction */ TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); @@ -186,11 +185,10 @@ static void do_test_basic_producer_txn (rd_bool_t enable_compression) { /* Produce messages */ txn[i].testid = test_id_generate(); - TEST_SAY("txn[%d]: Produce %d messages %ssynchronously " - "with testid %"PRIu64"\n", - i, txn[i].msgcnt, - txn[i].sync ? "" : "a", - txn[i].testid); + TEST_SAY( + "txn[%d]: Produce %d messages %ssynchronously " + "with testid %" PRIu64 "\n", + i, txn[i].msgcnt, txn[i].sync ? "" : "a", txn[i].testid); if (!txn[i].batch) { if (txn[i].sync) @@ -198,37 +196,33 @@ static void do_test_basic_producer_txn (rd_bool_t enable_compression) { RD_KAFKA_PARTITION_UA, 0, txn[i].msgcnt, NULL, 0); else - test_produce_msgs2_nowait(p, topic, - txn[i].testid, - RD_KAFKA_PARTITION_UA, - 0, - txn[i].msgcnt, - NULL, 0, - &wait_msgcnt); + test_produce_msgs2_nowait( + p, topic, txn[i].testid, + RD_KAFKA_PARTITION_UA, 0, txn[i].msgcnt, + NULL, 0, &wait_msgcnt); } else if (txn[i].batch_any) { /* Batch: use any partition */ do_produce_batch(p, topic, txn[i].testid, - RD_KAFKA_PARTITION_UA, - 0, txn[i].msgcnt); + RD_KAFKA_PARTITION_UA, 0, + txn[i].msgcnt); } else { /* Batch: specific partition */ do_produce_batch(p, topic, txn[i].testid, - 1 /* partition */, - 0, txn[i].msgcnt); + 1 /* partition */, 0, txn[i].msgcnt); } /* Abort or commit transaction */ - TEST_SAY("txn[%d]: %s" _C_CLR " transaction\n", - i, txn[i].abort ? _C_RED "Abort" : _C_GRN "Commit"); + TEST_SAY("txn[%d]: %s" _C_CLR " transaction\n", i, + txn[i].abort ? _C_RED "Abort" : _C_GRN "Commit"); if (txn[i].abort) { test_curr->ignore_dr_err = rd_true; - TEST_CALL_ERROR__(rd_kafka_abort_transaction(p, - 30*1000)); + TEST_CALL_ERROR__( + rd_kafka_abort_transaction(p, 30 * 1000)); } else { test_curr->ignore_dr_err = rd_false; - TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, - 30*1000)); + TEST_CALL_ERROR__( + rd_kafka_commit_transaction(p, 30 * 1000)); } if (!txn[i].sync) @@ -240,9 +234,9 @@ static void do_test_basic_producer_txn (rd_bool_t enable_compression) { test_consumer_poll_no_msgs(txn[i].desc, c, txn[i].testid, 3000); else - test_consumer_poll(txn[i].desc, c, - txn[i].testid, partition_cnt, 0, - txn[i].msgcnt, NULL); + test_consumer_poll(txn[i].desc, c, txn[i].testid, + partition_cnt, 0, txn[i].msgcnt, + NULL); TEST_SAY(_C_GRN "txn[%d]: Finished successfully: %s\n" _C_CLR, i, txn[i].desc); @@ -261,8 +255,8 @@ static void do_test_basic_producer_txn (rd_bool_t enable_compression) { * @brief Consumes \p cnt messages and returns them in the provided array * which must be pre-allocated. */ -static void consume_messages (rd_kafka_t *c, - rd_kafka_message_t **msgs, int msgcnt) { +static void +consume_messages(rd_kafka_t *c, rd_kafka_message_t **msgs, int msgcnt) { int i = 0; while (i < msgcnt) { msgs[i] = rd_kafka_consumer_poll(c, 1000); @@ -270,16 +264,14 @@ static void consume_messages (rd_kafka_t *c, continue; if (msgs[i]->err) { - TEST_SAY("%s consumer error: %s\n", - rd_kafka_name(c), + TEST_SAY("%s consumer error: %s\n", rd_kafka_name(c), rd_kafka_message_errstr(msgs[i])); rd_kafka_message_destroy(msgs[i]); continue; } - TEST_SAYL(3, "%s: consumed message %s [%d] @ %"PRId64"\n", - rd_kafka_name(c), - rd_kafka_topic_name(msgs[i]->rkt), + TEST_SAYL(3, "%s: consumed message %s [%d] @ %" PRId64 "\n", + rd_kafka_name(c), rd_kafka_topic_name(msgs[i]->rkt), msgs[i]->partition, msgs[i]->offset); @@ -287,7 +279,7 @@ static void consume_messages (rd_kafka_t *c, } } -static void destroy_messages (rd_kafka_message_t **msgs, int msgcnt) { +static void destroy_messages(rd_kafka_message_t **msgs, int msgcnt) { while (msgcnt-- > 0) rd_kafka_message_destroy(msgs[msgcnt]); } @@ -304,11 +296,11 @@ static void destroy_messages (rd_kafka_message_t **msgs, int msgcnt) { * * Every 3rd transaction is aborted. */ -void do_test_consumer_producer_txn (void) { +void do_test_consumer_producer_txn(void) { char *input_topic = - rd_strdup(test_mk_topic_name("0103-transactions-input", 1)); + rd_strdup(test_mk_topic_name("0103-transactions-input", 1)); char *output_topic = - rd_strdup(test_mk_topic_name("0103-transactions-output", 1)); + rd_strdup(test_mk_topic_name("0103-transactions-output", 1)); const char *c1_groupid = input_topic; const char *c2_groupid = output_topic; rd_kafka_t *p1, *p2, *c1, *c2; @@ -359,11 +351,11 @@ void do_test_consumer_producer_txn (void) { test_create_topic(p1, output_topic, 4, 3); /* Seed input topic with messages */ - TEST_CALL_ERROR__(rd_kafka_init_transactions(p1, 30*1000)); + TEST_CALL_ERROR__(rd_kafka_init_transactions(p1, 30 * 1000)); TEST_CALL_ERROR__(rd_kafka_begin_transaction(p1)); - test_produce_msgs2(p1, input_topic, testid, RD_KAFKA_PARTITION_UA, - 0, msgcnt, NULL, 0); - TEST_CALL_ERROR__(rd_kafka_commit_transaction(p1, 30*1000)); + test_produce_msgs2(p1, input_topic, testid, RD_KAFKA_PARTITION_UA, 0, + msgcnt, NULL, 0); + TEST_CALL_ERROR__(rd_kafka_commit_transaction(p1, 30 * 1000)); rd_kafka_destroy(p1); @@ -373,7 +365,7 @@ void do_test_consumer_producer_txn (void) { test_conf_set(tmpconf, "auto.offset.reset", "earliest"); test_conf_set(tmpconf, "enable.auto.commit", "false"); c1_conf = rd_kafka_conf_dup(tmpconf); - c1 = test_create_consumer(c1_groupid, NULL, tmpconf, NULL); + c1 = test_create_consumer(c1_groupid, NULL, tmpconf, NULL); test_consumer_subscribe(c1, input_topic); /* Create Producer 2 */ @@ -381,7 +373,7 @@ void do_test_consumer_producer_txn (void) { test_conf_set(tmpconf, "transactional.id", output_topic); rd_kafka_conf_set_dr_msg_cb(tmpconf, test_dr_msg_cb); p2 = test_create_handle(RD_KAFKA_PRODUCER, tmpconf); - TEST_CALL_ERROR__(rd_kafka_init_transactions(p2, 30*1000)); + TEST_CALL_ERROR__(rd_kafka_init_transactions(p2, 30 * 1000)); /* Create Consumer 2: reading msgs from output_topic (Producer 2) */ tmpconf = rd_kafka_conf_dup(conf); @@ -395,28 +387,28 @@ void do_test_consumer_producer_txn (void) { /* Keep track of what messages to expect on the output topic */ test_msgver_init(&expect_mv, testid); - for (txn = 0 ; txn < txncnt ; txn++) { + for (txn = 0; txn < txncnt; txn++) { int msgcnt2 = 10 * (1 + (txn % 3)); rd_kafka_message_t *msgs[_MSGCNT]; int i; - rd_bool_t do_abort = !(txn % 3); + rd_bool_t do_abort = !(txn % 3); rd_bool_t recreate_consumer = do_abort && txn == 3; rd_kafka_topic_partition_list_t *offsets; rd_kafka_resp_err_t err; rd_kafka_consumer_group_metadata_t *c1_cgmetadata; int remains = msgcnt2; - TEST_SAY(_C_BLU "Begin transaction #%d/%d " + TEST_SAY(_C_BLU + "Begin transaction #%d/%d " "(msgcnt=%d, do_abort=%s, recreate_consumer=%s)\n", - txn, txncnt, msgcnt2, - do_abort ? "true":"false", - recreate_consumer ? "true":"false"); + txn, txncnt, msgcnt2, do_abort ? "true" : "false", + recreate_consumer ? "true" : "false"); consume_messages(c1, msgs, msgcnt2); TEST_CALL_ERROR__(rd_kafka_begin_transaction(p2)); - for (i = 0 ; i < msgcnt2 ; i++) { + for (i = 0; i < msgcnt2; i++) { rd_kafka_message_t *msg = msgs[i]; if (!do_abort) { @@ -425,23 +417,18 @@ void do_test_consumer_producer_txn (void) { * on the output topic, so we need to * override the topic name to match * the actual msgver's output topic. */ - test_msgver_add_msg0(__FUNCTION__, __LINE__, - rd_kafka_name(p2), - &expect_mv, msg, - output_topic); + test_msgver_add_msg0( + __FUNCTION__, __LINE__, rd_kafka_name(p2), + &expect_mv, msg, output_topic); committed_msgcnt++; } - err = rd_kafka_producev(p2, - RD_KAFKA_V_TOPIC(output_topic), - RD_KAFKA_V_KEY(msg->key, - msg->key_len), - RD_KAFKA_V_VALUE(msg->payload, - msg->len), - RD_KAFKA_V_MSGFLAGS( - RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_OPAQUE(&remains), - RD_KAFKA_V_END); + err = rd_kafka_producev( + p2, RD_KAFKA_V_TOPIC(output_topic), + RD_KAFKA_V_KEY(msg->key, msg->key_len), + RD_KAFKA_V_VALUE(msg->payload, msg->len), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_OPAQUE(&remains), RD_KAFKA_V_END); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); @@ -462,9 +449,8 @@ void do_test_consumer_producer_txn (void) { TEST_ASSERT(c1_cgmetadata != NULL, "failed to get consumer group metadata"); - TEST_CALL_ERROR__( - rd_kafka_send_offsets_to_transaction( - p2, offsets, c1_cgmetadata, -1)); + TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction( + p2, offsets, c1_cgmetadata, -1)); rd_kafka_consumer_group_metadata_destroy(c1_cgmetadata); @@ -474,17 +460,18 @@ void do_test_consumer_producer_txn (void) { if (do_abort) { test_curr->ignore_dr_err = rd_true; - TEST_CALL_ERROR__(rd_kafka_abort_transaction( - p2, 30*1000)); + TEST_CALL_ERROR__( + rd_kafka_abort_transaction(p2, 30 * 1000)); } else { test_curr->ignore_dr_err = rd_false; - TEST_CALL_ERROR__(rd_kafka_commit_transaction( - p2, 30*1000)); + TEST_CALL_ERROR__( + rd_kafka_commit_transaction(p2, 30 * 1000)); } TEST_ASSERT(remains == 0, "expected no remaining messages " - "in-flight/in-queue, got %d", remains); + "in-flight/in-queue, got %d", + remains); if (recreate_consumer) { @@ -502,12 +489,11 @@ void do_test_consumer_producer_txn (void) { test_msgver_init(&actual_mv, testid); - test_consumer_poll("Verify output topic", c2, testid, - -1, 0, committed_msgcnt, &actual_mv); + test_consumer_poll("Verify output topic", c2, testid, -1, 0, + committed_msgcnt, &actual_mv); - test_msgver_verify_compare("Verify output topic", - &actual_mv, &expect_mv, - TEST_MSGVER_ALL); + test_msgver_verify_compare("Verify output topic", &actual_mv, + &expect_mv, TEST_MSGVER_ALL); test_msgver_clear(&actual_mv); test_msgver_clear(&expect_mv); @@ -528,7 +514,7 @@ void do_test_consumer_producer_txn (void) { /** * @brief Testing misuse of the transaction API. */ -static void do_test_misuse_txn (void) { +static void do_test_misuse_txn(void) { const char *topic = test_mk_topic_name("0103-test_misuse_txn", 1); rd_kafka_t *p; rd_kafka_conf_t *conf; @@ -549,10 +535,10 @@ static void do_test_misuse_txn (void) { p = test_create_handle(RD_KAFKA_PRODUCER, conf); - error = rd_kafka_init_transactions(p, 10*1000); + error = rd_kafka_init_transactions(p, 10 * 1000); TEST_ASSERT(error, "Expected init_transactions() to fail"); TEST_ASSERT(rd_kafka_error_code(error) == - RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT, + RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT, "Expected error ERR_INVALID_TRANSACTION_TIMEOUT, " "not %s: %s", rd_kafka_error_name(error), @@ -565,8 +551,7 @@ static void do_test_misuse_txn (void) { TEST_ASSERT(fatal_err == RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT, "Expected fatal error ERR_INVALID_TRANSACTION_TIMEOUT, " "not %s: %s", - rd_kafka_err2name(fatal_err), - fatal_err ? errstr : ""); + rd_kafka_err2name(fatal_err), fatal_err ? errstr : ""); rd_kafka_destroy(p); @@ -581,7 +566,7 @@ static void do_test_misuse_txn (void) { p = test_create_handle(RD_KAFKA_PRODUCER, conf); - TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30*1000)); + TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000)); error = rd_kafka_init_transactions(p, 1); TEST_ASSERT(error, "Expected init_transactions() to fail"); @@ -592,7 +577,7 @@ static void do_test_misuse_txn (void) { TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); - error = rd_kafka_init_transactions(p, 3*1000); + error = rd_kafka_init_transactions(p, 3 * 1000); TEST_ASSERT(error, "Expected init_transactions() to fail"); TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__STATE, "Expected ERR__STATE error, not %s", @@ -618,13 +603,12 @@ static void do_test_misuse_txn (void) { rd_kafka_error_is_retriable(error)); TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, "Expected ERR__TIMED_OUT, not %s: %s", - rd_kafka_error_name(error), - rd_kafka_error_string(error)); + rd_kafka_error_name(error), rd_kafka_error_string(error)); TEST_ASSERT(rd_kafka_error_is_retriable(error), "Expected error to be retriable"); rd_kafka_error_destroy(error); - TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30*1000)); + TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000)); rd_kafka_destroy(p); @@ -640,7 +624,7 @@ static void do_test_misuse_txn (void) { p = test_create_handle(RD_KAFKA_PRODUCER, conf); /* Call until init succeeds */ - for (i = 0 ; i < 5000 ; i++) { + for (i = 0; i < 5000; i++) { if (!(error = rd_kafka_init_transactions(p, 1))) break; @@ -651,7 +635,7 @@ static void do_test_misuse_txn (void) { error = rd_kafka_begin_transaction(p); TEST_ASSERT(error, "Expected begin_transactions() to fail"); TEST_ASSERT(rd_kafka_error_code(error) == - RD_KAFKA_RESP_ERR__STATE, + RD_KAFKA_RESP_ERR__STATE, "Expected begin_transactions() to fail " "with STATE, not %s", rd_kafka_error_name(error)); @@ -659,10 +643,10 @@ static void do_test_misuse_txn (void) { rd_kafka_error_destroy(error); } - TEST_SAY("init_transactions() succeeded after %d call(s)\n", i+1); + TEST_SAY("init_transactions() succeeded after %d call(s)\n", i + 1); /* Make sure a sub-sequent init call fails. */ - error = rd_kafka_init_transactions(p, 5*1000); + error = rd_kafka_init_transactions(p, 5 * 1000); TEST_ASSERT(error, "Expected init_transactions() to fail"); TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__STATE, "Expected init_transactions() to fail with STATE, not %s", @@ -681,8 +665,9 @@ static void do_test_misuse_txn (void) { /** * @brief is_fatal_cb for fenced_txn test. */ -static int fenced_txn_is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int fenced_txn_is_fatal_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason) { TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason); if (err == RD_KAFKA_RESP_ERR__FENCED) { TEST_SAY("Saw the expected fatal error\n"); @@ -695,7 +680,7 @@ static int fenced_txn_is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, /** * @brief Check that transaction fencing is handled correctly. */ -static void do_test_fenced_txn (rd_bool_t produce_after_fence) { +static void do_test_fenced_txn(rd_bool_t produce_after_fence) { const char *topic = test_mk_topic_name("0103_fenced_txn", 1); rd_kafka_conf_t *conf; rd_kafka_t *p1, *p2; @@ -721,29 +706,29 @@ static void do_test_fenced_txn (rd_bool_t produce_after_fence) { p2 = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf)); rd_kafka_conf_destroy(conf); - TEST_CALL_ERROR__(rd_kafka_init_transactions(p1, 30*1000)); + TEST_CALL_ERROR__(rd_kafka_init_transactions(p1, 30 * 1000)); /* Begin a transaction */ TEST_CALL_ERROR__(rd_kafka_begin_transaction(p1)); /* Produce some messages */ - test_produce_msgs2(p1, topic, testid, RD_KAFKA_PARTITION_UA, - 0, 10, NULL, 0); + test_produce_msgs2(p1, topic, testid, RD_KAFKA_PARTITION_UA, 0, 10, + NULL, 0); /* Initialize transactions on producer 2, this should * fence off producer 1. */ - TEST_CALL_ERROR__(rd_kafka_init_transactions(p2, 30*1000)); + TEST_CALL_ERROR__(rd_kafka_init_transactions(p2, 30 * 1000)); if (produce_after_fence) { /* This will fail hard since the epoch was bumped. */ TEST_SAY("Producing after producing fencing\n"); test_curr->ignore_dr_err = rd_true; - test_produce_msgs2(p1, topic, testid, RD_KAFKA_PARTITION_UA, - 0, 10, NULL, 0); + test_produce_msgs2(p1, topic, testid, RD_KAFKA_PARTITION_UA, 0, + 10, NULL, 0); } - error = rd_kafka_commit_transaction(p1, 30*1000); + error = rd_kafka_commit_transaction(p1, 30 * 1000); TEST_ASSERT(error, "Expected commit to fail"); TEST_ASSERT(rd_kafka_fatal_error(p1, NULL, 0), @@ -758,23 +743,20 @@ static void do_test_fenced_txn (rd_bool_t produce_after_fence) { TEST_ASSERT(!rd_kafka_error_is_retriable(error), "Expected commit_transaction() not to return a " "retriable error"); - TEST_ASSERT(rd_kafka_error_code(error) == - RD_KAFKA_RESP_ERR__FENCED, + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__FENCED, "Expected commit_transaction() to return %s, " "not %s: %s", rd_kafka_err2name(RD_KAFKA_RESP_ERR__FENCED), - rd_kafka_error_name(error), - rd_kafka_error_string(error)); + rd_kafka_error_name(error), rd_kafka_error_string(error)); rd_kafka_error_destroy(error); rd_kafka_destroy(p1); rd_kafka_destroy(p2); /* Make sure no messages were committed. */ - test_consume_txn_msgs_easy(topic, topic, testid, - test_get_partition_count(NULL, topic, - 10*1000), - 0, NULL); + test_consume_txn_msgs_easy( + topic, topic, testid, + test_get_partition_count(NULL, topic, 10 * 1000), 0, NULL); SUB_TEST_PASS(); } @@ -785,14 +767,14 @@ static void do_test_fenced_txn (rd_bool_t produce_after_fence) { * @brief Check that fatal idempotent producer errors are also fatal * transactional errors when KIP-360 is not supported. */ -static void do_test_fatal_idempo_error_without_kip360 (void) { - const char *topic = test_mk_topic_name("0103_fatal_idempo", 1); +static void do_test_fatal_idempo_error_without_kip360(void) { + const char *topic = test_mk_topic_name("0103_fatal_idempo", 1); const int32_t partition = 0; rd_kafka_conf_t *conf, *c_conf; rd_kafka_t *p, *c; rd_kafka_error_t *error; uint64_t testid; - const int msgcnt[3] = { 6, 4, 1 }; + const int msgcnt[3] = {6, 4, 1}; rd_kafka_topic_partition_list_t *records; test_msgver_t expect_mv, actual_mv; /* This test triggers UNKNOWN_PRODUCER_ID on AK <2.4 and >2.4, but @@ -800,15 +782,14 @@ static void do_test_fatal_idempo_error_without_kip360 (void) { * On AK <2.5 (pre KIP-360) these errors are unrecoverable, * on AK >2.5 (with KIP-360) we can recover. * Since 2.4 is not behaving as the other releases we skip it here. */ - rd_bool_t expect_fail = test_broker_version < TEST_BRKVER(2,5,0,0); + rd_bool_t expect_fail = test_broker_version < TEST_BRKVER(2, 5, 0, 0); - SUB_TEST_QUICK("%s", - expect_fail ? - "expecting failure since broker is < 2.5" : - "not expecting failure since broker is >= 2.5"); + SUB_TEST_QUICK( + "%s", expect_fail ? "expecting failure since broker is < 2.5" + : "not expecting failure since broker is >= 2.5"); - if (test_broker_version >= TEST_BRKVER(2,4,0,0) && - test_broker_version < TEST_BRKVER(2,5,0,0)) + if (test_broker_version >= TEST_BRKVER(2, 4, 0, 0) && + test_broker_version < TEST_BRKVER(2, 5, 0, 0)) SUB_TEST_SKIP("can't trigger UNKNOWN_PRODUCER_ID on AK 2.4"); if (expect_fail) @@ -831,7 +812,7 @@ static void do_test_fatal_idempo_error_without_kip360 (void) { test_create_topic(p, topic, 1, 3); - TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30*1000)); + TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000)); /* * 3 transactions: @@ -848,8 +829,7 @@ static void do_test_fatal_idempo_error_without_kip360 (void) { */ TEST_SAY(_C_BLU "Transaction 1: %d msgs\n", msgcnt[0]); TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); - test_produce_msgs2(p, topic, testid, partition, 0, - msgcnt[0], NULL, 0); + test_produce_msgs2(p, topic, testid, partition, 0, msgcnt[0], NULL, 0); TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1)); @@ -860,16 +840,13 @@ static void do_test_fatal_idempo_error_without_kip360 (void) { TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); /* Now delete the messages from txn1 */ - TEST_SAY("Deleting records < %s [%"PRId32"] offset %d+1\n", - topic, partition, msgcnt[0]); + TEST_SAY("Deleting records < %s [%" PRId32 "] offset %d+1\n", topic, + partition, msgcnt[0]); records = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(records, topic, partition)->offset = - msgcnt[0]; /* include the control message too */ + msgcnt[0]; /* include the control message too */ - TEST_CALL_ERR__(test_DeleteRecords_simple(p, - NULL, - records, - NULL)); + TEST_CALL_ERR__(test_DeleteRecords_simple(p, NULL, records, NULL)); rd_kafka_topic_partition_list_destroy(records); /* Wait for deletes to propagate */ @@ -879,16 +856,14 @@ static void do_test_fatal_idempo_error_without_kip360 (void) { test_curr->dr_mv = &expect_mv; /* Produce more messages, should now fail */ - test_produce_msgs2(p, topic, testid, partition, 0, - msgcnt[1], NULL, 0); + test_produce_msgs2(p, topic, testid, partition, 0, msgcnt[1], NULL, 0); error = rd_kafka_commit_transaction(p, -1); TEST_SAY_ERROR(error, "commit_transaction() returned: "); if (expect_fail) { - TEST_ASSERT(error != NULL, - "Expected transaction to fail"); + TEST_ASSERT(error != NULL, "Expected transaction to fail"); TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), "Expected abortable error"); rd_kafka_error_destroy(error); @@ -898,8 +873,7 @@ static void do_test_fatal_idempo_error_without_kip360 (void) { */ error = rd_kafka_abort_transaction(p, -1); TEST_SAY_ERROR(error, "abort_transaction() returned: "); - TEST_ASSERT(error != NULL, - "Expected abort to fail"); + TEST_ASSERT(error != NULL, "Expected abort to fail"); TEST_ASSERT(rd_kafka_error_is_fatal(error), "Expecting fatal error"); TEST_ASSERT(!rd_kafka_error_is_retriable(error), @@ -922,8 +896,8 @@ static void do_test_fatal_idempo_error_without_kip360 (void) { TEST_SAY(_C_BLU "Transaction 3: %d msgs\n", msgcnt[2]); test_curr->dr_mv = &expect_mv; TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); - test_produce_msgs2(p, topic, testid, partition, 0, - msgcnt[2], NULL, 0); + test_produce_msgs2(p, topic, testid, partition, 0, msgcnt[2], + NULL, 0); TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1)); } @@ -939,19 +913,17 @@ static void do_test_fatal_idempo_error_without_kip360 (void) { test_conf_init(&c_conf, NULL, 0); test_conf_set(c_conf, "enable.partition.eof", "true"); c = test_create_consumer(topic, NULL, c_conf, NULL); - test_consumer_assign_partition("consume", - c, topic, partition, + test_consumer_assign_partition("consume", c, topic, partition, RD_KAFKA_OFFSET_BEGINNING); test_msgver_init(&actual_mv, testid); test_msgver_ignore_eof(&actual_mv); - test_consumer_poll("Verify output topic", c, testid, - 1, 0, -1, &actual_mv); + test_consumer_poll("Verify output topic", c, testid, 1, 0, -1, + &actual_mv); - test_msgver_verify_compare("Verify output topic", - &actual_mv, &expect_mv, - TEST_MSGVER_ALL); + test_msgver_verify_compare("Verify output topic", &actual_mv, + &expect_mv, TEST_MSGVER_ALL); test_msgver_clear(&actual_mv); test_msgver_clear(&expect_mv); @@ -966,7 +938,7 @@ static void do_test_fatal_idempo_error_without_kip360 (void) { * @brief Check that empty transactions, with no messages produced, work * as expected. */ -static void do_test_empty_txn (rd_bool_t send_offsets, rd_bool_t do_commit) { +static void do_test_empty_txn(rd_bool_t send_offsets, rd_bool_t do_commit) { const char *topic = test_mk_topic_name("0103_empty_txn", 1); rd_kafka_conf_t *conf, *c_conf; rd_kafka_t *p, *c; @@ -975,8 +947,7 @@ static void do_test_empty_txn (rd_bool_t send_offsets, rd_bool_t do_commit) { rd_kafka_topic_partition_list_t *committed; int64_t offset; - SUB_TEST_QUICK("%ssend offsets, %s", - send_offsets ? "" : "don't ", + SUB_TEST_QUICK("%ssend offsets, %s", send_offsets ? "" : "don't ", do_commit ? "commit" : "abort"); testid = test_id_generate(); @@ -1018,9 +989,8 @@ static void do_test_empty_txn (rd_bool_t send_offsets, rd_bool_t do_commit) { TEST_ASSERT(cgmetadata != NULL, "failed to get consumer group metadata"); - TEST_CALL_ERROR__( - rd_kafka_send_offsets_to_transaction( - p, offsets, cgmetadata, -1)); + TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction( + p, offsets, cgmetadata, -1)); rd_kafka_consumer_group_metadata_destroy(cgmetadata); @@ -1035,21 +1005,20 @@ static void do_test_empty_txn (rd_bool_t send_offsets, rd_bool_t do_commit) { /* Get the committed offsets */ TEST_CALL_ERR__(rd_kafka_assignment(c, &committed)); - TEST_CALL_ERR__(rd_kafka_committed(c, committed, 10*1000)); + TEST_CALL_ERR__(rd_kafka_committed(c, committed, 10 * 1000)); TEST_ASSERT(committed->cnt == 1, - "expected one committed offset, not %d", - committed->cnt); + "expected one committed offset, not %d", committed->cnt); offset = committed->elems[0].offset; - TEST_SAY("Committed offset is %"PRId64"\n", offset); + TEST_SAY("Committed offset is %" PRId64 "\n", offset); if (do_commit && send_offsets) TEST_ASSERT(offset >= msgcnt, - "expected committed offset >= %d, got %"PRId64, + "expected committed offset >= %d, got %" PRId64, msgcnt, offset); else TEST_ASSERT(offset < 0, - "expected no committed offset, got %"PRId64, + "expected no committed offset, got %" PRId64, offset); rd_kafka_topic_partition_list_destroy(committed); @@ -1063,25 +1032,24 @@ static void do_test_empty_txn (rd_bool_t send_offsets, rd_bool_t do_commit) { /** * @returns the high watermark for the given partition. */ -int64_t query_hi_wmark0 (int line, - rd_kafka_t *c, const char *topic, int32_t partition) { +int64_t +query_hi_wmark0(int line, rd_kafka_t *c, const char *topic, int32_t partition) { rd_kafka_resp_err_t err; int64_t lo = -1, hi = -1; err = rd_kafka_query_watermark_offsets(c, topic, partition, &lo, &hi, - tmout_multip(5*1000)); - TEST_ASSERT(!err, - "%d: query_watermark_offsets(%s) failed: %s", - line, topic, rd_kafka_err2str(err)); + tmout_multip(5 * 1000)); + TEST_ASSERT(!err, "%d: query_watermark_offsets(%s) failed: %s", line, + topic, rd_kafka_err2str(err)); return hi; } -#define query_hi_wmark(c,topic,part) query_hi_wmark0(__LINE__,c,topic,part) +#define query_hi_wmark(c, topic, part) query_hi_wmark0(__LINE__, c, topic, part) /** * @brief Check that isolation.level works as expected for query_watermark..(). */ -static void do_test_wmark_isolation_level (void) { +static void do_test_wmark_isolation_level(void) { const char *topic = test_mk_topic_name("0103_wmark_isol", 1); rd_kafka_conf_t *conf, *c_conf; rd_kafka_t *p, *c1, *c2; @@ -1117,18 +1085,19 @@ static void do_test_wmark_isolation_level (void) { /* Produce some txn messages */ test_produce_msgs2(p, topic, testid, 0, 0, 100, NULL, 0); - test_flush(p, 10*1000); + test_flush(p, 10 * 1000); - hw_committed = query_hi_wmark(c1, topic, 0); + hw_committed = query_hi_wmark(c1, topic, 0); hw_uncommitted = query_hi_wmark(c2, topic, 0); - TEST_SAY("Pre-commit hwmarks: committed %"PRId64 - ", uncommitted %"PRId64"\n", + TEST_SAY("Pre-commit hwmarks: committed %" PRId64 + ", uncommitted %" PRId64 "\n", hw_committed, hw_uncommitted); TEST_ASSERT(hw_committed > 0 && hw_committed < hw_uncommitted, - "Committed hwmark %"PRId64" should be lower than " - "uncommitted hwmark %"PRId64" for %s [0]", + "Committed hwmark %" PRId64 + " should be lower than " + "uncommitted hwmark %" PRId64 " for %s [0]", hw_committed, hw_uncommitted, topic); TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1)); @@ -1142,16 +1111,17 @@ static void do_test_wmark_isolation_level (void) { /* Now query wmarks again */ - hw_committed = query_hi_wmark(c1, topic, 0); + hw_committed = query_hi_wmark(c1, topic, 0); hw_uncommitted = query_hi_wmark(c2, topic, 0); - TEST_SAY("Post-commit hwmarks: committed %"PRId64 - ", uncommitted %"PRId64"\n", + TEST_SAY("Post-commit hwmarks: committed %" PRId64 + ", uncommitted %" PRId64 "\n", hw_committed, hw_uncommitted); TEST_ASSERT(hw_committed == hw_uncommitted, - "Committed hwmark %"PRId64" should be equal to " - "uncommitted hwmark %"PRId64" for %s [0]", + "Committed hwmark %" PRId64 + " should be equal to " + "uncommitted hwmark %" PRId64 " for %s [0]", hw_committed, hw_uncommitted, topic); rd_kafka_destroy(c1); @@ -1162,7 +1132,7 @@ static void do_test_wmark_isolation_level (void) { -int main_0103_transactions (int argc, char **argv) { +int main_0103_transactions(int argc, char **argv) { do_test_misuse_txn(); do_test_basic_producer_txn(rd_false /* without compression */); @@ -1171,10 +1141,10 @@ int main_0103_transactions (int argc, char **argv) { do_test_fenced_txn(rd_false /* no produce after fencing */); do_test_fenced_txn(rd_true /* produce after fencing */); do_test_fatal_idempo_error_without_kip360(); - do_test_empty_txn(rd_false/*don't send offsets*/, rd_true/*commit*/); - do_test_empty_txn(rd_false/*don't send offsets*/, rd_false/*abort*/); - do_test_empty_txn(rd_true/*send offsets*/, rd_true/*commit*/); - do_test_empty_txn(rd_true/*send offsets*/, rd_false/*abort*/); + do_test_empty_txn(rd_false /*don't send offsets*/, rd_true /*commit*/); + do_test_empty_txn(rd_false /*don't send offsets*/, rd_false /*abort*/); + do_test_empty_txn(rd_true /*send offsets*/, rd_true /*commit*/); + do_test_empty_txn(rd_true /*send offsets*/, rd_false /*abort*/); do_test_wmark_isolation_level(); return 0; } @@ -1184,7 +1154,7 @@ int main_0103_transactions (int argc, char **argv) { /** * @brief Transaction tests that don't require a broker. */ -static void do_test_txn_local (void) { +static void do_test_txn_local(void) { rd_kafka_conf_t *conf; rd_kafka_t *p; rd_kafka_error_t *error; @@ -1203,10 +1173,9 @@ static void do_test_txn_local (void) { error = rd_kafka_init_transactions(p, 10); TEST_ASSERT(error, "Expected init_transactions() to fail"); - TEST_ASSERT(rd_kafka_error_code(error) == - RD_KAFKA_RESP_ERR__NOT_CONFIGURED, - "Expected ERR__NOT_CONFIGURED, not %s", - rd_kafka_error_name(error)); + TEST_ASSERT( + rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__NOT_CONFIGURED, + "Expected ERR__NOT_CONFIGURED, not %s", rd_kafka_error_name(error)); rd_kafka_error_destroy(error); rd_kafka_destroy(p); @@ -1221,8 +1190,7 @@ static void do_test_txn_local (void) { test_conf_set(conf, "transactional.id", "test"); p = test_create_handle(RD_KAFKA_PRODUCER, conf); - TEST_SAY("Waiting for init_transactions() timeout %d ms\n", - timeout_ms); + TEST_SAY("Waiting for init_transactions() timeout %d ms\n", timeout_ms); test_timeout_set((timeout_ms + 2000) / 1000); @@ -1233,8 +1201,7 @@ static void do_test_txn_local (void) { TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, "Expected RD_KAFKA_RESP_ERR__TIMED_OUT, " "not %s: %s", - rd_kafka_error_name(error), - rd_kafka_error_string(error)); + rd_kafka_error_name(error), rd_kafka_error_string(error)); TEST_SAY("init_transactions() failed as expected: %s\n", rd_kafka_error_string(error)); @@ -1249,7 +1216,7 @@ static void do_test_txn_local (void) { } -int main_0103_transactions_local (int argc, char **argv) { +int main_0103_transactions_local(int argc, char **argv) { do_test_txn_local(); diff --git a/tests/0104-fetch_from_follower_mock.c b/tests/0104-fetch_from_follower_mock.c index 77970b4b06..6749ab57b9 100644 --- a/tests/0104-fetch_from_follower_mock.c +++ b/tests/0104-fetch_from_follower_mock.c @@ -41,13 +41,13 @@ * a reset is performed. See do_test_offset_reset_lag() * for the case where the replica is lagging and can't be trusted. */ -static void do_test_offset_reset (const char *auto_offset_reset) { +static void do_test_offset_reset(const char *auto_offset_reset) { const char *bootstraps; rd_kafka_mock_cluster_t *mcluster; rd_kafka_conf_t *conf; rd_kafka_t *c; - const char *topic = "test"; - const int msgcnt = 1000; + const char *topic = "test"; + const int msgcnt = 1000; const size_t msgsize = 1000; TEST_SAY(_C_MAG "[ Test FFF auto.offset.reset=%s ]\n", @@ -58,8 +58,7 @@ static void do_test_offset_reset (const char *auto_offset_reset) { /* Seed the topic with messages */ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize, "bootstrap.servers", bootstraps, - "batch.num.messages", "10", - NULL); + "batch.num.messages", "10", NULL); /* Set partition leader to broker 1, follower to broker 2 */ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); @@ -79,12 +78,10 @@ static void do_test_offset_reset (const char *auto_offset_reset) { * will go to the follower. We want the third fetch, second one on * the follower, to fail and trigger an offset reset. */ rd_kafka_mock_push_request_errors( - mcluster, - 1/*FetchRequest*/, - 3, - RD_KAFKA_RESP_ERR_NO_ERROR /*leader*/, - RD_KAFKA_RESP_ERR_NO_ERROR /*follower*/, - RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE /*follower: fail*/); + mcluster, 1 /*FetchRequest*/, 3, + RD_KAFKA_RESP_ERR_NO_ERROR /*leader*/, + RD_KAFKA_RESP_ERR_NO_ERROR /*follower*/, + RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE /*follower: fail*/); test_consumer_assign_partition(auto_offset_reset, c, topic, 0, RD_KAFKA_OFFSET_INVALID); @@ -92,8 +89,7 @@ static void do_test_offset_reset (const char *auto_offset_reset) { if (!strcmp(auto_offset_reset, "latest")) test_consumer_poll_no_msgs(auto_offset_reset, c, 0, 5000); else - test_consumer_poll(auto_offset_reset, c, 0, 1, 0, - msgcnt, NULL); + test_consumer_poll(auto_offset_reset, c, 0, 1, 0, msgcnt, NULL); test_consumer_close(c); @@ -111,14 +107,14 @@ static void do_test_offset_reset (const char *auto_offset_reset) { * who's high-watermark is behind the leader, which means * an offset reset should not be triggered. */ -static void do_test_offset_reset_lag (void) { +static void do_test_offset_reset_lag(void) { const char *bootstraps; rd_kafka_mock_cluster_t *mcluster; rd_kafka_conf_t *conf; rd_kafka_t *c; - const char *topic = "test"; - const int msgcnt = 10; - const int lag = 3; + const char *topic = "test"; + const int msgcnt = 10; + const int lag = 3; const size_t msgsize = 1000; TEST_SAY(_C_MAG "[ Test lagging FFF offset reset ]\n"); @@ -128,8 +124,7 @@ static void do_test_offset_reset_lag (void) { /* Seed the topic with messages */ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize, "bootstrap.servers", bootstraps, - "batch.num.messages", "1", - NULL); + "batch.num.messages", "1", NULL); /* Set broker rack */ /* Set partition leader to broker 1, follower to broker 2 */ @@ -138,8 +133,8 @@ static void do_test_offset_reset_lag (void) { /* Make follower lag by some messages * ( .. -1 because offsets start at 0) */ - rd_kafka_mock_partition_set_follower_wmarks(mcluster, topic, 0, - -1, msgcnt - lag - 1); + rd_kafka_mock_partition_set_follower_wmarks(mcluster, topic, 0, -1, + msgcnt - lag - 1); test_conf_init(&conf, NULL, 0); test_conf_set(conf, "bootstrap.servers", bootstraps); @@ -181,13 +176,13 @@ static void do_test_offset_reset_lag (void) { * is questionable but for a later PR). Then change to a valid * replica and verify messages can be consumed. */ -static void do_test_unknown_follower (void) { +static void do_test_unknown_follower(void) { const char *bootstraps; rd_kafka_mock_cluster_t *mcluster; rd_kafka_conf_t *conf; rd_kafka_t *c; - const char *topic = "test"; - const int msgcnt = 1000; + const char *topic = "test"; + const int msgcnt = 1000; const size_t msgsize = 1000; test_msgver_t mv; @@ -198,8 +193,7 @@ static void do_test_unknown_follower (void) { /* Seed the topic with messages */ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize, "bootstrap.servers", bootstraps, - "batch.num.messages", "10", - NULL); + "batch.num.messages", "10", NULL); /* Set partition leader to broker 1, follower * to non-existent broker 19 */ @@ -225,11 +219,10 @@ static void do_test_unknown_follower (void) { test_msgver_init(&mv, 0); test_consumer_poll("proper follower", c, 0, 1, 0, msgcnt, &mv); /* Verify messages were indeed received from broker 3 */ - test_msgver_verify0(__FUNCTION__, __LINE__, "broker_id", - &mv, TEST_MSGVER_BY_BROKER_ID, - (struct test_mv_vs){ .msg_base = 0, - .exp_cnt = msgcnt, - .broker_id = 3 }); + test_msgver_verify0( + __FUNCTION__, __LINE__, "broker_id", &mv, TEST_MSGVER_BY_BROKER_ID, + (struct test_mv_vs) { + .msg_base = 0, .exp_cnt = msgcnt, .broker_id = 3}); test_msgver_clear(&mv); test_consumer_close(c); @@ -247,13 +240,13 @@ static void do_test_unknown_follower (void) { * periodic metadata timeout when leader broker is no longer * a replica. */ -static void do_test_replica_not_available (void) { +static void do_test_replica_not_available(void) { const char *bootstraps; rd_kafka_mock_cluster_t *mcluster; rd_kafka_conf_t *conf; rd_kafka_t *c; const char *topic = "test"; - const int msgcnt = 1000; + const int msgcnt = 1000; TEST_SAY(_C_MAG "[ Test REPLICA_NOT_AVAIALBLE ]\n"); @@ -262,8 +255,7 @@ static void do_test_replica_not_available (void) { /* Seed the topic with messages */ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 1000, "bootstrap.servers", bootstraps, - "batch.num.messages", "10", - NULL); + "batch.num.messages", "10", NULL); /* Set partition leader to broker 1. */ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); @@ -278,20 +270,17 @@ static void do_test_replica_not_available (void) { c = test_create_consumer("mygroup", NULL, conf, NULL); rd_kafka_mock_broker_push_request_error_rtts( - mcluster, - 1/*Broker 1*/, - 1/*FetchRequest*/, - 10, - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0); + mcluster, 1 /*Broker 1*/, 1 /*FetchRequest*/, 10, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0); test_consumer_assign_partition("REPLICA_NOT_AVAIALBLE", c, topic, 0, @@ -315,7 +304,7 @@ static void do_test_replica_not_available (void) { } -int main_0104_fetch_from_follower_mock (int argc, char **argv) { +int main_0104_fetch_from_follower_mock(int argc, char **argv) { if (test_needs_auth()) { TEST_SKIP("Mock cluster does not support SSL/SASL\n"); diff --git a/tests/0105-transactions_mock.c b/tests/0105-transactions_mock.c index 15f91dc55a..5c8cd3df7b 100644 --- a/tests/0105-transactions_mock.c +++ b/tests/0105-transactions_mock.c @@ -48,8 +48,8 @@ static int allowed_error; /** * @brief Decide what error_cb's will cause the test to fail. */ -static int error_is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +error_is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { if (err == allowed_error || /* If transport errors are allowed then it is likely * that we'll also see ALL_BROKERS_DOWN. */ @@ -63,54 +63,55 @@ static int error_is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, } -static rd_kafka_resp_err_t (*on_response_received_cb) (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - int64_t rtt, - rd_kafka_resp_err_t err, - void *ic_opaque); +static rd_kafka_resp_err_t (*on_response_received_cb)(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque); /** * @brief Simple on_response_received interceptor that simply calls the * sub-test's on_response_received_cb function, if set. */ static rd_kafka_resp_err_t -on_response_received_trampoline (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - int64_t rtt, - rd_kafka_resp_err_t err, - void *ic_opaque) { +on_response_received_trampoline(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque) { TEST_ASSERT(on_response_received_cb != NULL, ""); - return on_response_received_cb(rk, sockfd, brokername, brokerid, - ApiKey, ApiVersion, - CorrId, size, rtt, err, ic_opaque); + return on_response_received_cb(rk, sockfd, brokername, brokerid, ApiKey, + ApiVersion, CorrId, size, rtt, err, + ic_opaque); } /** * @brief on_new interceptor to add an on_response_received interceptor. */ -static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, - const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; if (on_response_received_cb) err = rd_kafka_interceptor_add_on_response_received( - rk, "on_response_received", - on_response_received_trampoline, ic_opaque); + rk, "on_response_received", on_response_received_trampoline, + ic_opaque); return err; } @@ -128,9 +129,10 @@ static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, * which must be assigned prior to * calling create_tnx_producer(). */ -static rd_kafka_t *create_txn_producer (rd_kafka_mock_cluster_t **mclusterp, - const char *transactional_id, - int broker_cnt, ...) { +static rd_kafka_t *create_txn_producer(rd_kafka_mock_cluster_t **mclusterp, + const char *transactional_id, + int broker_cnt, + ...) { rd_kafka_conf_t *conf; rd_kafka_t *rk; char numstr[8]; @@ -163,10 +165,8 @@ static rd_kafka_t *create_txn_producer (rd_kafka_mock_cluster_t **mclusterp, /* Add an on_.. interceptors */ if (add_interceptors) - rd_kafka_conf_interceptor_add_on_new( - conf, - "on_new_producer", - on_new_producer, NULL); + rd_kafka_conf_interceptor_add_on_new(conf, "on_new_producer", + on_new_producer, NULL); rk = test_create_handle(RD_KAFKA_PRODUCER, conf); @@ -183,18 +183,17 @@ static rd_kafka_t *create_txn_producer (rd_kafka_mock_cluster_t **mclusterp, * @brief Test recoverable errors using mock broker error injections * and code coverage checks. */ -static void do_test_txn_recoverable_errors (void) { +static void do_test_txn_recoverable_errors(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; rd_kafka_topic_partition_list_t *offsets; rd_kafka_consumer_group_metadata_t *cgmetadata; const char *groupid = "myGroupId"; - const char *txnid = "myTxnId"; + const char *txnid = "myTxnId"; SUB_TEST_QUICK(); - rk = create_txn_producer(&mcluster, txnid, 3, - "batch.num.messages", "1", + rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1", NULL); /* Make sure transaction and group coordinators are different. @@ -207,12 +206,10 @@ static void do_test_txn_recoverable_errors (void) { * Inject som InitProducerId errors that causes retries */ rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_InitProducerId, - 3, - RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS); + mcluster, RD_KAFKAP_InitProducerId, 3, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS); TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); @@ -226,27 +223,21 @@ static void do_test_txn_recoverable_errors (void) { /* Produce a message without error first */ - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); /* * Produce a message, let it fail with a non-idempo/non-txn * retryable error */ rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_Produce, - 1, - RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS); + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS); - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); /* Make sure messages are produced */ rd_kafka_flush(rk, -1); @@ -258,29 +249,24 @@ static void do_test_txn_recoverable_errors (void) { offsets = rd_kafka_topic_partition_list_new(4); rd_kafka_topic_partition_list_add(offsets, "srctopic", 3)->offset = 12; rd_kafka_topic_partition_list_add(offsets, "srctop2", 99)->offset = - 999999111; + 999999111; rd_kafka_topic_partition_list_add(offsets, "srctopic", 0)->offset = 999; rd_kafka_topic_partition_list_add(offsets, "srctop2", 3499)->offset = - 123456789; + 123456789; rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_AddPartitionsToTxn, - 1, - RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART); + mcluster, RD_KAFKAP_AddPartitionsToTxn, 1, + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART); rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_TxnOffsetCommit, - 2, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS); + mcluster, RD_KAFKAP_TxnOffsetCommit, 2, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS); cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); - TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction( - rk, offsets, - cgmetadata, -1)); + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); rd_kafka_consumer_group_metadata_destroy(cgmetadata); rd_kafka_topic_partition_list_destroy(offsets); @@ -289,12 +275,10 @@ static void do_test_txn_recoverable_errors (void) { * Commit transaction, first with som failures, then succeed. */ rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_EndTxn, - 3, - RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS); + mcluster, RD_KAFKAP_EndTxn, 3, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS); TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000)); @@ -310,7 +294,7 @@ static void do_test_txn_recoverable_errors (void) { * @brief KIP-360: Test that fatal idempotence errors triggers abortable * transaction errors and that the producer can recover. */ -static void do_test_txn_fatal_idempo_errors (void) { +static void do_test_txn_fatal_idempo_errors(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; rd_kafka_error_t *error; @@ -318,13 +302,12 @@ static void do_test_txn_fatal_idempo_errors (void) { SUB_TEST_QUICK(); - rk = create_txn_producer(&mcluster, txnid, 3, - "batch.num.messages", "1", + rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1", NULL); test_curr->ignore_dr_err = rd_true; - test_curr->is_fatal_cb = error_is_fatal_cb; - allowed_error = RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID; + test_curr->is_fatal_cb = error_is_fatal_cb; + allowed_error = RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID; TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); @@ -335,24 +318,18 @@ static void do_test_txn_fatal_idempo_errors (void) { /* Produce a message without error first */ - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); /* Produce a message, let it fail with a fatal idempo error. */ rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_Produce, - 1, - RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID); + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID); - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); /* Commit the transaction, should fail */ error = rd_kafka_commit_transaction(rk, -1); @@ -374,11 +351,9 @@ static void do_test_txn_fatal_idempo_errors (void) { * producer can recover. */ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); @@ -402,7 +377,7 @@ static void do_test_txn_fatal_idempo_errors (void) { * re-init the pid so that the internal state automatically * transitions. */ -static void do_test_txn_slow_reinit (rd_bool_t with_sleep) { +static void do_test_txn_slow_reinit(rd_bool_t with_sleep) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; rd_kafka_error_t *error; @@ -410,17 +385,16 @@ static void do_test_txn_slow_reinit (rd_bool_t with_sleep) { const char *txnid = "myTxnId"; test_timing_t timing; - SUB_TEST("%s sleep", with_sleep ? "with": "without"); + SUB_TEST("%s sleep", with_sleep ? "with" : "without"); - rk = create_txn_producer(&mcluster, txnid, 3, - "batch.num.messages", "1", + rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1", NULL); rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, txn_coord); test_curr->ignore_dr_err = rd_true; - test_curr->is_fatal_cb = NULL; + test_curr->is_fatal_cb = NULL; TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); @@ -431,11 +405,9 @@ static void do_test_txn_slow_reinit (rd_bool_t with_sleep) { /* Produce a message without error first */ - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); test_flush(rk, -1); @@ -443,24 +415,17 @@ static void do_test_txn_slow_reinit (rd_bool_t with_sleep) { * the abort_transaction() call timeout so that the automatic * re-initpid takes longer than abort_transaction(). */ rd_kafka_mock_broker_push_request_error_rtts( - mcluster, - txn_coord, - RD_KAFKAP_InitProducerId, - 1, - RD_KAFKA_RESP_ERR_NO_ERROR, 10000/*10s*/); + mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1, + RD_KAFKA_RESP_ERR_NO_ERROR, 10000 /*10s*/); /* Produce a message, let it fail with a fatal idempo error. */ rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_Produce, - 1, - RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID); + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID); - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); /* Commit the transaction, should fail */ @@ -505,11 +470,9 @@ static void do_test_txn_slow_reinit (rd_bool_t with_sleep) { * producer can recover. */ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); @@ -530,7 +493,7 @@ static void do_test_txn_slow_reinit (rd_bool_t with_sleep) { * producer PID fail with a fencing error. * Should raise a fatal error. */ -static void do_test_txn_fenced_reinit (void) { +static void do_test_txn_fenced_reinit(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; rd_kafka_error_t *error; @@ -541,16 +504,15 @@ static void do_test_txn_fenced_reinit (void) { SUB_TEST_QUICK(); - rk = create_txn_producer(&mcluster, txnid, 3, - "batch.num.messages", "1", + rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1", NULL); rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, txn_coord); test_curr->ignore_dr_err = rd_true; - test_curr->is_fatal_cb = error_is_fatal_cb; - allowed_error = RD_KAFKA_RESP_ERR__FENCED; + test_curr->is_fatal_cb = error_is_fatal_cb; + allowed_error = RD_KAFKA_RESP_ERR__FENCED; TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); @@ -561,34 +523,25 @@ static void do_test_txn_fenced_reinit (void) { /* Produce a message without error first */ - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); test_flush(rk, -1); /* Fail the PID reinit */ rd_kafka_mock_broker_push_request_error_rtts( - mcluster, - txn_coord, - RD_KAFKAP_InitProducerId, - 1, - RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH, 0); + mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1, + RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH, 0); /* Produce a message, let it fail with a fatal idempo error. */ rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_Produce, - 1, - RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID); + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID); - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); test_flush(rk, -1); @@ -598,15 +551,12 @@ static void do_test_txn_fenced_reinit (void) { TEST_SAY("abort_transaction() failed: %s\n", rd_kafka_error_string(error)); - TEST_ASSERT(rd_kafka_error_is_fatal(error), - "Expected a fatal error"); + TEST_ASSERT(rd_kafka_error_is_fatal(error), "Expected a fatal error"); rd_kafka_error_destroy(error); fatal_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr)); - TEST_ASSERT(fatal_err, - "Expected a fatal error to have been raised"); - TEST_SAY("Fatal error: %s: %s\n", - rd_kafka_err2name(fatal_err), errstr); + TEST_ASSERT(fatal_err, "Expected a fatal error to have been raised"); + TEST_SAY("Fatal error: %s: %s\n", rd_kafka_err2name(fatal_err), errstr); /* All done */ @@ -621,8 +571,8 @@ static void do_test_txn_fenced_reinit (void) { /** * @brief Test EndTxn errors. */ -static void do_test_txn_endtxn_errors (void) { - rd_kafka_t *rk = NULL; +static void do_test_txn_endtxn_errors(void) { + rd_kafka_t *rk = NULL; rd_kafka_mock_cluster_t *mcluster = NULL; rd_kafka_resp_err_t err; struct { @@ -633,102 +583,113 @@ static void do_test_txn_endtxn_errors (void) { rd_bool_t exp_abortable; rd_bool_t exp_fatal; } scenario[] = { - /* This list of errors is from the EndTxnResponse handler in - * AK clients/.../TransactionManager.java */ - { /* #0 */ - 2, - { RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, - RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE }, - /* Should auto-recover */ - RD_KAFKA_RESP_ERR_NO_ERROR, - }, - { /* #1 */ - 2, - { RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR }, - /* Should auto-recover */ - RD_KAFKA_RESP_ERR_NO_ERROR, - }, - { /* #2 */ - 1, - { RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS }, - /* Should auto-recover */ - RD_KAFKA_RESP_ERR_NO_ERROR, - }, - { /* #3 */ - 3, - { RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, - RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, - RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS }, - /* Should auto-recover */ - RD_KAFKA_RESP_ERR_NO_ERROR, - }, - { /* #4 */ - 1, - { RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID }, - RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, - rd_false /* !retriable */, - rd_true /* abortable */, - rd_false /* !fatal */ - }, - { /* #5 */ - 1, - { RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING }, - RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING, - rd_false /* !retriable */, - rd_true /* abortable */, - rd_false /* !fatal */ - }, - { /* #6 */ - 1, - { RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH }, - /* This error is normalized */ - RD_KAFKA_RESP_ERR__FENCED, - rd_false /* !retriable */, - rd_false /* !abortable */, - rd_true /* fatal */ - }, - { /* #7 */ - 1, - { RD_KAFKA_RESP_ERR_PRODUCER_FENCED }, - /* This error is normalized */ - RD_KAFKA_RESP_ERR__FENCED, - rd_false /* !retriable */, - rd_false /* !abortable */, - rd_true /* fatal */ - }, - { /* #8 */ - 1, - { RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED }, - RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED, - rd_false /* !retriable */, - rd_false /* !abortable */, - rd_true /* fatal */ - }, - { /* #9 */ - 1, - { RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED }, - RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, - rd_false /* !retriable */, - rd_true /* abortable */, - rd_false /* !fatal */ - }, - { /* #10 */ - /* Any other error should raise a fatal error */ - 1, - { RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE }, - RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE, - rd_false /* !retriable */, - rd_true /* abortable */, - rd_false /* !fatal */, - }, - { 0 }, + /* This list of errors is from the EndTxnResponse handler in + * AK clients/.../TransactionManager.java */ + { + /* #0 */ + 2, + {RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE}, + /* Should auto-recover */ + RD_KAFKA_RESP_ERR_NO_ERROR, + }, + { + /* #1 */ + 2, + {RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR}, + /* Should auto-recover */ + RD_KAFKA_RESP_ERR_NO_ERROR, + }, + { + /* #2 */ + 1, + {RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS}, + /* Should auto-recover */ + RD_KAFKA_RESP_ERR_NO_ERROR, + }, + { + /* #3 */ + 3, + {RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS}, + /* Should auto-recover */ + RD_KAFKA_RESP_ERR_NO_ERROR, + }, + { + /* #4 */ + 1, + {RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID}, + RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, + rd_false /* !retriable */, + rd_true /* abortable */, + rd_false /* !fatal */ + }, + { + /* #5 */ + 1, + {RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING}, + RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING, + rd_false /* !retriable */, + rd_true /* abortable */, + rd_false /* !fatal */ + }, + { + /* #6 */ + 1, + {RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH}, + /* This error is normalized */ + RD_KAFKA_RESP_ERR__FENCED, + rd_false /* !retriable */, + rd_false /* !abortable */, + rd_true /* fatal */ + }, + { + /* #7 */ + 1, + {RD_KAFKA_RESP_ERR_PRODUCER_FENCED}, + /* This error is normalized */ + RD_KAFKA_RESP_ERR__FENCED, + rd_false /* !retriable */, + rd_false /* !abortable */, + rd_true /* fatal */ + }, + { + /* #8 */ + 1, + {RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED}, + RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED, + rd_false /* !retriable */, + rd_false /* !abortable */, + rd_true /* fatal */ + }, + { + /* #9 */ + 1, + {RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED}, + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, + rd_false /* !retriable */, + rd_true /* abortable */, + rd_false /* !fatal */ + }, + { + /* #10 */ + /* Any other error should raise a fatal error */ + 1, + {RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE}, + RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE, + rd_false /* !retriable */, + rd_true /* abortable */, + rd_false /* !fatal */, + }, + {0}, }; int i; SUB_TEST_QUICK(); - for (i = 0 ; scenario[i].error_cnt > 0 ; i++) { + for (i = 0; scenario[i].error_cnt > 0; i++) { int j; /* For each scenario, test: * commit_transaction() @@ -736,30 +697,28 @@ static void do_test_txn_endtxn_errors (void) { * abort_transaction() * flush() + abort_transaction() */ - for (j = 0 ; j < (2+2) ; j++) { - rd_bool_t commit = j < 2; + for (j = 0; j < (2 + 2); j++) { + rd_bool_t commit = j < 2; rd_bool_t with_flush = j & 1; const char *commit_str = - commit ? - (with_flush ? "commit&flush" : "commit") : - (with_flush ? "abort&flush" : "abort"); + commit ? (with_flush ? "commit&flush" : "commit") + : (with_flush ? "abort&flush" : "abort"); rd_kafka_topic_partition_list_t *offsets; rd_kafka_consumer_group_metadata_t *cgmetadata; rd_kafka_error_t *error; test_timing_t t_call; - TEST_SAY("Testing scenario #%d %s with %"PRIusz + TEST_SAY("Testing scenario #%d %s with %" PRIusz " injected erorrs, expecting %s\n", - i, commit_str, - scenario[i].error_cnt, + i, commit_str, scenario[i].error_cnt, rd_kafka_err2name(scenario[i].exp_err)); if (!rk) { const char *txnid = "myTxnId"; - rk = create_txn_producer(&mcluster, txnid, - 3, NULL); - TEST_CALL_ERROR__(rd_kafka_init_transactions( - rk, 5000)); + rk = create_txn_producer(&mcluster, txnid, 3, + NULL); + TEST_CALL_ERROR__( + rd_kafka_init_transactions(rk, 5000)); } /* @@ -774,8 +733,7 @@ static void do_test_txn_endtxn_errors (void) { /* * Produce a message. */ - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); TEST_ASSERT(!err, "produce failed: %s", @@ -789,16 +747,17 @@ static void do_test_txn_endtxn_errors (void) { */ offsets = rd_kafka_topic_partition_list_new(4); rd_kafka_topic_partition_list_add(offsets, "srctopic", - 3)->offset = 12; + 3) + ->offset = 12; rd_kafka_topic_partition_list_add(offsets, "srctop2", - 99)->offset = 99999; + 99) + ->offset = 99999; - cgmetadata = rd_kafka_consumer_group_metadata_new( - "mygroupid"); + cgmetadata = + rd_kafka_consumer_group_metadata_new("mygroupid"); TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction( - rk, offsets, - cgmetadata, -1)); + rk, offsets, cgmetadata, -1)); rd_kafka_consumer_group_metadata_destroy(cgmetadata); rd_kafka_topic_partition_list_destroy(offsets); @@ -808,33 +767,34 @@ static void do_test_txn_endtxn_errors (void) { * then succeed. */ rd_kafka_mock_push_request_errors_array( - mcluster, - RD_KAFKAP_EndTxn, - scenario[i].error_cnt, - scenario[i].errors); + mcluster, RD_KAFKAP_EndTxn, scenario[i].error_cnt, + scenario[i].errors); TIMING_START(&t_call, "%s", commit_str); if (commit) error = rd_kafka_commit_transaction( - rk, tmout_multip(5000)); + rk, tmout_multip(5000)); else error = rd_kafka_abort_transaction( - rk, tmout_multip(5000)); + rk, tmout_multip(5000)); TIMING_STOP(&t_call); if (error) - TEST_SAY("Scenario #%d %s failed: %s: %s " - "(retriable=%s, req_abort=%s, " - "fatal=%s)\n", - i, commit_str, - rd_kafka_error_name(error), - rd_kafka_error_string(error), - RD_STR_ToF(rd_kafka_error_is_retriable(error)), - RD_STR_ToF(rd_kafka_error_txn_requires_abort(error)), - RD_STR_ToF(rd_kafka_error_is_fatal(error))); + TEST_SAY( + "Scenario #%d %s failed: %s: %s " + "(retriable=%s, req_abort=%s, " + "fatal=%s)\n", + i, commit_str, rd_kafka_error_name(error), + rd_kafka_error_string(error), + RD_STR_ToF( + rd_kafka_error_is_retriable(error)), + RD_STR_ToF( + rd_kafka_error_txn_requires_abort( + error)), + RD_STR_ToF(rd_kafka_error_is_fatal(error))); else - TEST_SAY("Scenario #%d %s succeeded\n", - i, commit_str); + TEST_SAY("Scenario #%d %s succeeded\n", i, + commit_str); if (!scenario[i].exp_err) { TEST_ASSERT(!error, @@ -846,28 +806,26 @@ static void do_test_txn_endtxn_errors (void) { } - TEST_ASSERT(error != NULL, - "Expected #%d %s to fail", - i, commit_str); + TEST_ASSERT(error != NULL, "Expected #%d %s to fail", i, + commit_str); TEST_ASSERT(scenario[i].exp_err == - rd_kafka_error_code(error), - "Scenario #%d: expected %s, not %s", - i, + rd_kafka_error_code(error), + "Scenario #%d: expected %s, not %s", i, rd_kafka_err2name(scenario[i].exp_err), rd_kafka_error_name(error)); - TEST_ASSERT(scenario[i].exp_retriable == - (rd_bool_t) - rd_kafka_error_is_retriable(error), - "Scenario #%d: retriable mismatch", - i); - TEST_ASSERT(scenario[i].exp_abortable == - (rd_bool_t) - rd_kafka_error_txn_requires_abort(error), - "Scenario #%d: abortable mismatch", - i); - TEST_ASSERT(scenario[i].exp_fatal == - (rd_bool_t)rd_kafka_error_is_fatal(error), - "Scenario #%d: fatal mismatch", i); + TEST_ASSERT( + scenario[i].exp_retriable == + (rd_bool_t)rd_kafka_error_is_retriable(error), + "Scenario #%d: retriable mismatch", i); + TEST_ASSERT( + scenario[i].exp_abortable == + (rd_bool_t)rd_kafka_error_txn_requires_abort( + error), + "Scenario #%d: abortable mismatch", i); + TEST_ASSERT( + scenario[i].exp_fatal == + (rd_bool_t)rd_kafka_error_is_fatal(error), + "Scenario #%d: fatal mismatch", i); /* Handle errors according to the error flags */ if (rd_kafka_error_is_fatal(error)) { @@ -879,10 +837,11 @@ static void do_test_txn_endtxn_errors (void) { } else if (rd_kafka_error_txn_requires_abort(error)) { rd_kafka_error_destroy(error); - TEST_SAY("Abortable error, " - "aborting transaction\n"); + TEST_SAY( + "Abortable error, " + "aborting transaction\n"); TEST_CALL_ERROR__( - rd_kafka_abort_transaction(rk, -1)); + rd_kafka_abort_transaction(rk, -1)); } else if (rd_kafka_error_is_retriable(error)) { rd_kafka_error_destroy(error); @@ -890,18 +849,19 @@ static void do_test_txn_endtxn_errors (void) { commit_str); if (commit) TEST_CALL_ERROR__( - rd_kafka_commit_transaction( - rk, 5000)); + rd_kafka_commit_transaction(rk, + 5000)); else TEST_CALL_ERROR__( - rd_kafka_abort_transaction( - rk, 5000)); + rd_kafka_abort_transaction(rk, + 5000)); } else { - TEST_FAIL("Scenario #%d %s: " - "Permanent error without enough " - "hints to proceed: %s\n", - i, commit_str, - rd_kafka_error_string(error)); + TEST_FAIL( + "Scenario #%d %s: " + "Permanent error without enough " + "hints to proceed: %s\n", + i, commit_str, + rd_kafka_error_string(error)); } } } @@ -917,10 +877,10 @@ static void do_test_txn_endtxn_errors (void) { /** * @brief Test that the commit/abort works properly with infinite timeout. */ -static void do_test_txn_endtxn_infinite (void) { +static void do_test_txn_endtxn_infinite(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster = NULL; - const char *txnid = "myTxnId"; + const char *txnid = "myTxnId"; int i; SUB_TEST_QUICK(); @@ -929,8 +889,8 @@ static void do_test_txn_endtxn_infinite (void) { TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); - for (i = 0 ; i < 2 ; i++) { - rd_bool_t commit = i == 0; + for (i = 0; i < 2; i++) { + rd_bool_t commit = i == 0; const char *commit_str = commit ? "commit" : "abort"; rd_kafka_error_t *error; test_timing_t t_call; @@ -941,29 +901,26 @@ static void do_test_txn_endtxn_infinite (void) { TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END)); /* * Commit/abort transaction, first with som retriable failures, * then success. */ rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_EndTxn, - 10, - RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR); + mcluster, RD_KAFKAP_EndTxn, 10, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR); rd_sleep(1); @@ -974,14 +931,11 @@ static void do_test_txn_endtxn_infinite (void) { error = rd_kafka_abort_transaction(rk, -1); TIMING_STOP(&t_call); - TEST_SAY("%s returned %s\n", - commit_str, + TEST_SAY("%s returned %s\n", commit_str, error ? rd_kafka_error_string(error) : "success"); - TEST_ASSERT(!error, - "Expected %s to succeed, got %s", + TEST_ASSERT(!error, "Expected %s to succeed, got %s", commit_str, rd_kafka_error_string(error)); - } /* All done */ @@ -996,10 +950,10 @@ static void do_test_txn_endtxn_infinite (void) { /** * @brief Test that the commit/abort user timeout is honoured. */ -static void do_test_txn_endtxn_timeout (void) { +static void do_test_txn_endtxn_timeout(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster = NULL; - const char *txnid = "myTxnId"; + const char *txnid = "myTxnId"; int i; SUB_TEST_QUICK(); @@ -1008,8 +962,8 @@ static void do_test_txn_endtxn_timeout (void) { TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); - for (i = 0 ; i < 2 ; i++) { - rd_bool_t commit = i == 0; + for (i = 0; i < 2; i++) { + rd_bool_t commit = i == 0; const char *commit_str = commit ? "commit" : "abort"; rd_kafka_error_t *error; test_timing_t t_call; @@ -1020,29 +974,26 @@ static void do_test_txn_endtxn_timeout (void) { TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END)); /* * Commit/abort transaction, first with som retriable failures * whos retries exceed the user timeout. */ rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_EndTxn, - 10, - RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR); + mcluster, RD_KAFKAP_EndTxn, 10, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR); rd_sleep(1); @@ -1053,19 +1004,15 @@ static void do_test_txn_endtxn_timeout (void) { error = rd_kafka_abort_transaction(rk, 100); TIMING_STOP(&t_call); - TEST_SAY("%s returned %s\n", - commit_str, + TEST_SAY("%s returned %s\n", commit_str, error ? rd_kafka_error_string(error) : "success"); - TEST_ASSERT(error != NULL, - "Expected %s to fail", commit_str); + TEST_ASSERT(error != NULL, "Expected %s to fail", commit_str); - TEST_ASSERT(rd_kafka_error_code(error) == - RD_KAFKA_RESP_ERR__TIMED_OUT, - "Expected %s to fail with timeout, not %s: %s", - commit_str, - rd_kafka_error_name(error), - rd_kafka_error_string(error)); + TEST_ASSERT( + rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "Expected %s to fail with timeout, not %s: %s", commit_str, + rd_kafka_error_name(error), rd_kafka_error_string(error)); if (!commit) TEST_ASSERT(!rd_kafka_error_txn_requires_abort(error), @@ -1075,8 +1022,9 @@ static void do_test_txn_endtxn_timeout (void) { TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), "commit_transaction() failure should raise " "a txn_requires_abort error"); - TEST_SAY("Aborting transaction as instructed by " - "error flag\n"); + TEST_SAY( + "Aborting transaction as instructed by " + "error flag\n"); TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); } @@ -1098,7 +1046,7 @@ static void do_test_txn_endtxn_timeout (void) { * even if AddOffsetsToTxnRequest was retried. * This is a check for a txn_req_cnt bug. */ -static void do_test_txn_req_cnt (void) { +static void do_test_txn_req_cnt(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; rd_kafka_topic_partition_list_t *offsets; @@ -1123,27 +1071,22 @@ static void do_test_txn_req_cnt (void) { offsets = rd_kafka_topic_partition_list_new(2); rd_kafka_topic_partition_list_add(offsets, "srctopic", 3)->offset = 12; rd_kafka_topic_partition_list_add(offsets, "srctop2", 99)->offset = - 999999111; + 999999111; - rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_AddOffsetsToTxn, - 2, - RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR); + rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_AddOffsetsToTxn, + 2, + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR); rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_TxnOffsetCommit, - 2, - RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, - RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART); + mcluster, RD_KAFKAP_TxnOffsetCommit, 2, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART); cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); - TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction( - rk, offsets, - cgmetadata, -1)); + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); rd_kafka_consumer_group_metadata_destroy(cgmetadata); rd_kafka_topic_partition_list_destroy(offsets); @@ -1162,7 +1105,7 @@ static void do_test_txn_req_cnt (void) { * @brief Test abortable errors using mock broker error injections * and code coverage checks. */ -static void do_test_txn_requires_abort_errors (void) { +static void do_test_txn_requires_abort_errors(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; rd_kafka_error_t *error; @@ -1187,15 +1130,11 @@ static void do_test_txn_requires_abort_errors (void) { TEST_SAY("1. Fail on produce\n"); rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_Produce, - 1, - RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED); + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED); - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END); + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); /* Wait for messages to fail */ @@ -1207,8 +1146,8 @@ static void do_test_txn_requires_abort_errors (void) { cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); - error = rd_kafka_send_offsets_to_transaction(rk, offsets, - cgmetadata, -1); + error = + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1); rd_kafka_consumer_group_metadata_destroy(cgmetadata); rd_kafka_topic_partition_list_destroy(offsets); @@ -1216,8 +1155,7 @@ static void do_test_txn_requires_abort_errors (void) { TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), "expected abortable error, not %s", rd_kafka_error_string(error)); - TEST_SAY("Error %s: %s\n", - rd_kafka_error_name(error), + TEST_SAY("Error %s: %s\n", rd_kafka_error_name(error), rd_kafka_error_string(error)); rd_kafka_error_destroy(error); @@ -1236,54 +1174,45 @@ static void do_test_txn_requires_abort_errors (void) { TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_AddPartitionsToTxn, - 1, - RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED); + mcluster, RD_KAFKAP_AddPartitionsToTxn, 1, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED); - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END); + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); error = rd_kafka_commit_transaction(rk, 5000); TEST_ASSERT(error, "commit_transaction should have failed"); TEST_SAY("commit_transaction() error %s: %s\n", - rd_kafka_error_name(error), - rd_kafka_error_string(error)); + rd_kafka_error_name(error), rd_kafka_error_string(error)); rd_kafka_error_destroy(error); TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); /* - * 3. Restart transaction and fail on AddOffsetsToTxn - */ + * 3. Restart transaction and fail on AddOffsetsToTxn + */ TEST_SAY("3. Fail on AddOffsetsToTxn\n"); TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END); + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_AddOffsetsToTxn, - 1, - RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED); + mcluster, RD_KAFKAP_AddOffsetsToTxn, 1, + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED); offsets = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(offsets, "srctopic", 3)->offset = 12; cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); - error = rd_kafka_send_offsets_to_transaction(rk, offsets, - cgmetadata, -1); + error = + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1); TEST_ASSERT(error, "Expected send_offsets..() to fail"); TEST_ASSERT(rd_kafka_error_code(error) == - RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, "expected send_offsets_to_transaction() to fail with " "group auth error: not %s", rd_kafka_error_name(error)); @@ -1311,25 +1240,25 @@ static void do_test_txn_requires_abort_errors (void) { * @brief Test error handling and recover for when broker goes down during * an ongoing transaction. */ -static void do_test_txn_broker_down_in_txn (rd_bool_t down_coord) { +static void do_test_txn_broker_down_in_txn(rd_bool_t down_coord) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; int32_t coord_id, leader_id, down_id; const char *down_what; rd_kafka_resp_err_t err; - const char *topic = "test"; + const char *topic = "test"; const char *transactional_id = "txnid"; - int msgcnt = 1000; - int remains = 0; + int msgcnt = 1000; + int remains = 0; /* Assign coordinator and leader to two different brokers */ - coord_id = 1; + coord_id = 1; leader_id = 2; if (down_coord) { - down_id = coord_id; + down_id = coord_id; down_what = "coordinator"; } else { - down_id = leader_id; + down_id = leader_id; down_what = "leader"; } @@ -1338,7 +1267,7 @@ static void do_test_txn_broker_down_in_txn (rd_bool_t down_coord) { rk = create_txn_producer(&mcluster, transactional_id, 3, NULL); /* Broker down is not a test-failing error */ - allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; test_curr->is_fatal_cb = error_is_fatal_cb; err = rd_kafka_mock_topic_create(mcluster, topic, 1, 3); @@ -1354,10 +1283,10 @@ static void do_test_txn_broker_down_in_txn (rd_bool_t down_coord) { TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, - 0, msgcnt / 2, NULL, 0, &remains); + test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, + msgcnt / 2, NULL, 0, &remains); - TEST_SAY("Bringing down %s %"PRId32"\n", down_what, down_id); + TEST_SAY("Bringing down %s %" PRId32 "\n", down_what, down_id); rd_kafka_mock_broker_set_down(mcluster, down_id); rd_kafka_flush(rk, 3000); @@ -1368,20 +1297,18 @@ static void do_test_txn_broker_down_in_txn (rd_bool_t down_coord) { rd_sleep(2); - TEST_SAY("Bringing up %s %"PRId32"\n", down_what, down_id); + TEST_SAY("Bringing up %s %" PRId32 "\n", down_what, down_id); rd_kafka_mock_broker_set_up(mcluster, down_id); TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); - TEST_ASSERT(remains == 0, - "%d message(s) were not produced\n", remains); + TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains); rd_kafka_destroy(rk); test_curr->is_fatal_cb = NULL; SUB_TEST_PASS(); - } @@ -1389,16 +1316,18 @@ static void do_test_txn_broker_down_in_txn (rd_bool_t down_coord) { /** * @brief Advance the coord_id to the next broker. */ -static void set_next_coord (rd_kafka_mock_cluster_t *mcluster, - const char *transactional_id, int broker_cnt, - int32_t *coord_idp) { +static void set_next_coord(rd_kafka_mock_cluster_t *mcluster, + const char *transactional_id, + int broker_cnt, + int32_t *coord_idp) { int32_t new_coord_id; new_coord_id = 1 + ((*coord_idp) % (broker_cnt)); - TEST_SAY("Changing transaction coordinator from %"PRId32 - " to %"PRId32"\n", *coord_idp, new_coord_id); - rd_kafka_mock_coordinator_set(mcluster, "transaction", - transactional_id, new_coord_id); + TEST_SAY("Changing transaction coordinator from %" PRId32 " to %" PRId32 + "\n", + *coord_idp, new_coord_id); + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + new_coord_id); *coord_idp = new_coord_id; } @@ -1407,14 +1336,14 @@ static void set_next_coord (rd_kafka_mock_cluster_t *mcluster, * @brief Switch coordinator during a transaction. * */ -static void do_test_txn_switch_coordinator (void) { +static void do_test_txn_switch_coordinator(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; int32_t coord_id; - const char *topic = "test"; + const char *topic = "test"; const char *transactional_id = "txnid"; - const int broker_cnt = 5; - const int iterations = 20; + const int broker_cnt = 5; + const int iterations = 20; int i; test_timeout_set(iterations * 10); @@ -1431,21 +1360,21 @@ static void do_test_txn_switch_coordinator (void) { TEST_SAY("Starting transaction\n"); TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); - for (i = 0 ; i < iterations ; i++) { + for (i = 0; i < iterations; i++) { const int msgcnt = 100; - int remains = 0; + int remains = 0; - set_next_coord(mcluster, transactional_id, - broker_cnt, &coord_id); + set_next_coord(mcluster, transactional_id, broker_cnt, + &coord_id); TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - test_produce_msgs2(rk, topic, 0, RD_KAFKA_PARTITION_UA, - 0, msgcnt / 2, NULL, 0); + test_produce_msgs2(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, + msgcnt / 2, NULL, 0); if (!(i % 3)) - set_next_coord(mcluster, transactional_id, - broker_cnt, &coord_id); + set_next_coord(mcluster, transactional_id, broker_cnt, + &coord_id); /* Produce remaining messages */ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, @@ -1453,8 +1382,8 @@ static void do_test_txn_switch_coordinator (void) { &remains); if ((i & 1) || !(i % 8)) - set_next_coord(mcluster, transactional_id, - broker_cnt, &coord_id); + set_next_coord(mcluster, transactional_id, broker_cnt, + &coord_id); if (!(i % 5)) { @@ -1478,10 +1407,10 @@ static void do_test_txn_switch_coordinator (void) { * @brief Switch coordinator during a transaction when AddOffsetsToTxn * are sent. #3571. */ -static void do_test_txn_switch_coordinator_refresh (void) { +static void do_test_txn_switch_coordinator_refresh(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; - const char *topic = "test"; + const char *topic = "test"; const char *transactional_id = "txnid"; rd_kafka_topic_partition_list_t *offsets; rd_kafka_consumer_group_metadata_t *cgmetadata; @@ -1509,16 +1438,14 @@ static void do_test_txn_switch_coordinator_refresh (void) { * Send some arbitrary offsets. */ offsets = rd_kafka_topic_partition_list_new(4); - rd_kafka_topic_partition_list_add(offsets, "srctopic", - 3)->offset = 12; - rd_kafka_topic_partition_list_add(offsets, "srctop2", - 99)->offset = 99999; + rd_kafka_topic_partition_list_add(offsets, "srctopic", 3)->offset = 12; + rd_kafka_topic_partition_list_add(offsets, "srctop2", 99)->offset = + 99999; cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction( - rk, offsets, - cgmetadata, 20*1000)); + rk, offsets, cgmetadata, 20 * 1000)); rd_kafka_consumer_group_metadata_destroy(cgmetadata); rd_kafka_topic_partition_list_destroy(offsets); @@ -1540,7 +1467,7 @@ static void do_test_txn_switch_coordinator_refresh (void) { * @brief Test fatal error handling when transactions are not supported * by the broker. */ -static void do_test_txns_not_supported (void) { +static void do_test_txns_not_supported(void) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_mock_cluster_t *mcluster; @@ -1561,31 +1488,28 @@ static void do_test_txns_not_supported (void) { mcluster = rd_kafka_mock_cluster_new(rk, 3); /* Disable InitProducerId */ - rd_kafka_mock_set_apiversion(mcluster, 22/*InitProducerId*/, -1, -1); + rd_kafka_mock_set_apiversion(mcluster, 22 /*InitProducerId*/, -1, -1); rd_kafka_brokers_add(rk, rd_kafka_mock_cluster_bootstraps(mcluster)); - error = rd_kafka_init_transactions(rk, 5*1000); + error = rd_kafka_init_transactions(rk, 5 * 1000); TEST_SAY("init_transactions() returned %s: %s\n", error ? rd_kafka_error_name(error) : "success", error ? rd_kafka_error_string(error) : "success"); TEST_ASSERT(error, "Expected init_transactions() to fail"); TEST_ASSERT(rd_kafka_error_code(error) == - RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, + RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, "Expected init_transactions() to fail with %s, not %s: %s", rd_kafka_err2name(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE), - rd_kafka_error_name(error), - rd_kafka_error_string(error)); + rd_kafka_error_name(error), rd_kafka_error_string(error)); rd_kafka_error_destroy(error); - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("test"), - RD_KAFKA_V_KEY("test", 4), - RD_KAFKA_V_END); + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("test"), + RD_KAFKA_V_KEY("test", 4), RD_KAFKA_V_END); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__FATAL, "Expected producev() to fail with %s, not %s", rd_kafka_err2name(RD_KAFKA_RESP_ERR__FATAL), @@ -1602,7 +1526,7 @@ static void do_test_txns_not_supported (void) { /** * @brief CONCURRENT_TRANSACTION on AddOffsets.. should be retried. */ -static void do_test_txns_send_offsets_concurrent_is_retried (void) { +static void do_test_txns_send_offsets_concurrent_is_retried(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; rd_kafka_resp_err_t err; @@ -1619,10 +1543,8 @@ static void do_test_txns_send_offsets_concurrent_is_retried (void) { TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END); + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); /* Wait for messages to be delivered */ @@ -1634,23 +1556,22 @@ static void do_test_txns_send_offsets_concurrent_is_retried (void) { * infinite retries. */ rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_AddOffsetsToTxn, - 1+5,/* first request + some retries */ - RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, - RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, - RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, - RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, - RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, - RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS); + mcluster, RD_KAFKAP_AddOffsetsToTxn, + 1 + 5, /* first request + some retries */ + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS); offsets = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(offsets, "srctopic", 3)->offset = 12; cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); - TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction(rk, offsets, - cgmetadata, -1)); + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); rd_kafka_consumer_group_metadata_destroy(cgmetadata); rd_kafka_topic_partition_list_destroy(offsets); @@ -1668,7 +1589,7 @@ static void do_test_txns_send_offsets_concurrent_is_retried (void) { /** * @brief Verify that request timeouts don't cause crash (#2913). */ -static void do_test_txns_no_timeout_crash (void) { +static void do_test_txns_no_timeout_crash(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; rd_kafka_error_t *error; @@ -1678,19 +1599,16 @@ static void do_test_txns_no_timeout_crash (void) { SUB_TEST_QUICK(); - rk = create_txn_producer(&mcluster, "txnid", 3, - "socket.timeout.ms", "1000", - "transaction.timeout.ms", "5000", - NULL); + rk = + create_txn_producer(&mcluster, "txnid", 3, "socket.timeout.ms", + "1000", "transaction.timeout.ms", "5000", NULL); TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END); + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); test_flush(rk, -1); @@ -1707,14 +1625,13 @@ static void do_test_txns_no_timeout_crash (void) { rd_kafka_topic_partition_list_add(offsets, "srctopic", 3)->offset = 12; cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); - error = rd_kafka_send_offsets_to_transaction(rk, offsets, - cgmetadata, -1); + error = + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1); TEST_ASSERT(error, "Expected send_offsets..() to fail"); TEST_SAY("send_offsets..() failed with %serror: %s\n", rd_kafka_error_is_retriable(error) ? "retriable " : "", rd_kafka_error_string(error)); - TEST_ASSERT(rd_kafka_error_code(error) == - RD_KAFKA_RESP_ERR__TIMED_OUT, + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, "expected send_offsets_to_transaction() to fail with " "timeout, not %s", rd_kafka_error_name(error)); @@ -1731,8 +1648,8 @@ static void do_test_txns_no_timeout_crash (void) { rd_kafka_err2str(err)); TEST_SAY("Retrying send_offsets..()\n"); - error = rd_kafka_send_offsets_to_transaction(rk, offsets, - cgmetadata, -1); + error = + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1); TEST_ASSERT(!error, "Expected send_offsets..() to succeed, got: %s", rd_kafka_error_string(error)); @@ -1749,22 +1666,18 @@ static void do_test_txns_no_timeout_crash (void) { /** * @brief Test auth failure handling. */ -static void do_test_txn_auth_failure (int16_t ApiKey, - rd_kafka_resp_err_t ErrorCode) { +static void do_test_txn_auth_failure(int16_t ApiKey, + rd_kafka_resp_err_t ErrorCode) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; rd_kafka_error_t *error; - SUB_TEST_QUICK("ApiKey=%s ErrorCode=%s", - rd_kafka_ApiKey2str(ApiKey), + SUB_TEST_QUICK("ApiKey=%s ErrorCode=%s", rd_kafka_ApiKey2str(ApiKey), rd_kafka_err2name(ErrorCode)); rk = create_txn_producer(&mcluster, "txnid", 3, NULL); - rd_kafka_mock_push_request_errors(mcluster, - ApiKey, - 1, - ErrorCode); + rd_kafka_mock_push_request_errors(mcluster, ApiKey, 1, ErrorCode); error = rd_kafka_init_transactions(rk, 5000); TEST_ASSERT(error, "Expected init_transactions() to fail"); @@ -1773,8 +1686,7 @@ static void do_test_txn_auth_failure (int16_t ApiKey, rd_kafka_err2name(rd_kafka_error_code(error)), rd_kafka_error_string(error)); TEST_ASSERT(rd_kafka_error_code(error) == ErrorCode, - "Expected error %s, not %s", - rd_kafka_err2name(ErrorCode), + "Expected error %s, not %s", rd_kafka_err2name(ErrorCode), rd_kafka_err2name(rd_kafka_error_code(error))); TEST_ASSERT(rd_kafka_error_is_fatal(error), "Expected error to be fatal"); @@ -1795,31 +1707,29 @@ static void do_test_txn_auth_failure (int16_t ApiKey, * eventually resulting in an unabortable error and failure to * re-init the transactional producer. */ -static void do_test_txn_flush_timeout (void) { +static void do_test_txn_flush_timeout(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; rd_kafka_topic_partition_list_t *offsets; rd_kafka_consumer_group_metadata_t *cgmetadata; rd_kafka_error_t *error; - const char *txnid = "myTxnId"; - const char *topic = "myTopic"; + const char *txnid = "myTxnId"; + const char *topic = "myTopic"; const int32_t coord_id = 2; - int msgcounter = 0; - rd_bool_t is_retry = rd_false; + int msgcounter = 0; + rd_bool_t is_retry = rd_false; SUB_TEST_QUICK(); - rk = create_txn_producer(&mcluster, txnid, 3, - "message.timeout.ms", "10000", - "transaction.timeout.ms", "10000", + rk = create_txn_producer(&mcluster, txnid, 3, "message.timeout.ms", + "10000", "transaction.timeout.ms", "10000", /* Speed up coordinator reconnect */ - "reconnect.backoff.max.ms", "1000", - NULL); + "reconnect.backoff.max.ms", "1000", NULL); /* Broker down is not a test-failing error */ test_curr->is_fatal_cb = error_is_fatal_cb; - allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; rd_kafka_mock_topic_create(mcluster, topic, 2, 3); @@ -1831,12 +1741,12 @@ static void do_test_txn_flush_timeout (void) { */ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); - retry: +retry: if (!is_retry) { /* First attempt should fail. */ test_curr->ignore_dr_err = rd_true; - test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; /* Assign invalid partition leaders for some partitions so * that messages will not be delivered. */ @@ -1846,12 +1756,12 @@ static void do_test_txn_flush_timeout (void) { } else { /* The retry should succeed */ test_curr->ignore_dr_err = rd_false; - test_curr->exp_dr_err = is_retry ? RD_KAFKA_RESP_ERR_NO_ERROR : - RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; + test_curr->exp_dr_err = is_retry + ? RD_KAFKA_RESP_ERR_NO_ERROR + : RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 1); - } @@ -1867,8 +1777,8 @@ static void do_test_txn_flush_timeout (void) { &msgcounter); test_produce_msgs2_nowait(rk, topic, 1, 0, 0, 100, NULL, 10, &msgcounter); - test_produce_msgs2_nowait(rk, topic, RD_KAFKA_PARTITION_UA, - 0, 0, 100, NULL, 10, &msgcounter); + test_produce_msgs2_nowait(rk, topic, RD_KAFKA_PARTITION_UA, 0, 0, 100, + NULL, 10, &msgcounter); /* @@ -1877,16 +1787,15 @@ static void do_test_txn_flush_timeout (void) { offsets = rd_kafka_topic_partition_list_new(4); rd_kafka_topic_partition_list_add(offsets, "srctopic", 3)->offset = 12; rd_kafka_topic_partition_list_add(offsets, "srctop2", 99)->offset = - 999999111; + 999999111; rd_kafka_topic_partition_list_add(offsets, "srctopic", 0)->offset = 999; rd_kafka_topic_partition_list_add(offsets, "srctop2", 3499)->offset = - 123456789; + 123456789; cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); - TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction( - rk, offsets, - cgmetadata, -1)); + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); rd_kafka_consumer_group_metadata_destroy(cgmetadata); rd_kafka_topic_partition_list_destroy(offsets); @@ -1895,7 +1804,7 @@ static void do_test_txn_flush_timeout (void) { if (!is_retry) { /* Now disconnect the coordinator. */ - TEST_SAY("Disconnecting transaction coordinator %"PRId32"\n", + TEST_SAY("Disconnecting transaction coordinator %" PRId32 "\n", coord_id); rd_kafka_mock_broker_set_down(mcluster, coord_id); } @@ -1906,15 +1815,13 @@ static void do_test_txn_flush_timeout (void) { error = rd_kafka_commit_transaction(rk, -1); if (!is_retry) { - TEST_ASSERT(error != NULL, - "Expected commit to fail"); + TEST_ASSERT(error != NULL, "Expected commit to fail"); TEST_SAY("commit_transaction() failed (expectedly): %s\n", rd_kafka_error_string(error)); rd_kafka_error_destroy(error); } else { - TEST_ASSERT(!error, - "Expected commit to succeed, not: %s", + TEST_ASSERT(!error, "Expected commit to succeed, not: %s", rd_kafka_error_string(error)); } @@ -1950,7 +1857,7 @@ static void do_test_txn_flush_timeout (void) { * This is somewhat of a race condition so we need to perform a couple of * iterations before it hits, usually 2 or 3, so we try at least 15 times. */ -static void do_test_txn_coord_req_destroy (void) { +static void do_test_txn_coord_req_destroy(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; int i; @@ -1964,7 +1871,7 @@ static void do_test_txn_coord_req_destroy (void) { TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); - for (i = 0 ; i < 15 ; i++) { + for (i = 0; i < 15; i++) { rd_kafka_error_t *error; rd_kafka_resp_err_t err; rd_kafka_topic_partition_list_t *offsets; @@ -1978,37 +1885,31 @@ static void do_test_txn_coord_req_destroy (void) { * Inject errors to trigger retries */ rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_AddPartitionsToTxn, - 2,/* first request + number of internal retries */ - RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, - RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS); + mcluster, RD_KAFKAP_AddPartitionsToTxn, + 2, /* first request + number of internal retries */ + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS); rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_AddOffsetsToTxn, - 1,/* first request + number of internal retries */ - RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS); + mcluster, RD_KAFKAP_AddOffsetsToTxn, + 1, /* first request + number of internal retries */ + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS); - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_Produce, - 4, - RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, - RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, - RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, - RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED); + mcluster, RD_KAFKAP_Produce, 4, + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED); /* FIXME: When KIP-360 is supported, add this error: * RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER */ - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); @@ -2019,25 +1920,26 @@ static void do_test_txn_coord_req_destroy (void) { */ offsets = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(offsets, "srctopic", 3)-> - offset = 12; + rd_kafka_topic_partition_list_add(offsets, "srctopic", 3) + ->offset = 12; cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); error = rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1); - TEST_SAY("send_offsets_to_transaction() #%d: %s\n", - i, rd_kafka_error_string(error)); + TEST_SAY("send_offsets_to_transaction() #%d: %s\n", i, + rd_kafka_error_string(error)); /* As we can't control the exact timing and sequence * of requests this sometimes fails and sometimes succeeds, * but we run the test enough times to trigger at least * one failure. */ if (error) { - TEST_SAY("send_offsets_to_transaction() #%d " - "failed (expectedly): %s\n", - i, rd_kafka_error_string(error)); + TEST_SAY( + "send_offsets_to_transaction() #%d " + "failed (expectedly): %s\n", + i, rd_kafka_error_string(error)); TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), "Expected abortable error for #%d", i); rd_kafka_error_destroy(error); @@ -2066,24 +1968,24 @@ static void do_test_txn_coord_req_destroy (void) { static rd_atomic32_t multi_find_req_cnt; static rd_kafka_resp_err_t -multi_find_on_response_received_cb (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - int64_t rtt, - rd_kafka_resp_err_t err, - void *ic_opaque) { +multi_find_on_response_received_cb(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque) { rd_kafka_mock_cluster_t *mcluster = rd_kafka_handle_mock_cluster(rk); rd_bool_t done = rd_atomic32_get(&multi_find_req_cnt) > 10000; if (ApiKey != RD_KAFKAP_AddOffsetsToTxn || done) return RD_KAFKA_RESP_ERR_NO_ERROR; - TEST_SAY("on_response_received_cb: %s: %s: brokerid %"PRId32 + TEST_SAY("on_response_received_cb: %s: %s: brokerid %" PRId32 ", ApiKey %hd, CorrId %d, rtt %.2fms, %s: %s\n", rd_kafka_name(rk), brokername, brokerid, ApiKey, CorrId, rtt != -1 ? (float)rtt / 1000.0 : 0.0, @@ -2134,7 +2036,7 @@ multi_find_on_response_received_cb (rd_kafka_t *rk, * 7. FindCoordinatorResponse from 5 is received, references the destroyed rko * and crashes. */ -static void do_test_txn_coord_req_multi_find (void) { +static void do_test_txn_coord_req_multi_find(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; rd_kafka_error_t *error; @@ -2149,7 +2051,7 @@ static void do_test_txn_coord_req_multi_find (void) { rd_atomic32_init(&multi_find_req_cnt, 0); on_response_received_cb = multi_find_on_response_received_cb; - rk = create_txn_producer(&mcluster, txnid, 3, + rk = create_txn_producer(&mcluster, txnid, 3, /* Need connections to all brokers so we * can trigger coord_req_fsm events * by toggling connections. */ @@ -2172,19 +2074,17 @@ static void do_test_txn_coord_req_multi_find (void) { rd_kafka_mock_partition_set_leader(mcluster, topic, 2, 3); /* Broker down is not a test-failing error */ - allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; test_curr->is_fatal_cb = error_is_fatal_cb; TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - for (i = 0 ; i < 3 ; i++) { - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(i), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END); + for (i = 0; i < 3; i++) { + err = rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(i), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); } @@ -2195,7 +2095,7 @@ static void do_test_txn_coord_req_multi_find (void) { * we need to make those requests slow so that multiple requests are * sent. */ - for (i = 1 ; i <= 3 ; i++) + for (i = 1; i <= 3; i++) rd_kafka_mock_broker_set_rtt(mcluster, (int32_t)i, 4000); /* @@ -2203,13 +2103,12 @@ static void do_test_txn_coord_req_multi_find (void) { */ offsets = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(offsets, "srctopic", 3)-> - offset = 12; + rd_kafka_topic_partition_list_add(offsets, "srctopic", 3)->offset = 12; cgmetadata = rd_kafka_consumer_group_metadata_new(groupid); - error = rd_kafka_send_offsets_to_transaction(rk, offsets, - cgmetadata, -1); + error = + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1); TEST_SAY("send_offsets_to_transaction() %s\n", rd_kafka_error_string(error)); @@ -2220,7 +2119,7 @@ static void do_test_txn_coord_req_multi_find (void) { rd_kafka_topic_partition_list_destroy(offsets); /* Clear delay */ - for (i = 1 ; i <= 3 ; i++) + for (i = 1; i <= 3; i++) rd_kafka_mock_broker_set_rtt(mcluster, (int32_t)i, 0); rd_sleep(5); @@ -2258,25 +2157,24 @@ static void do_test_txn_coord_req_multi_find (void) { static rd_atomic32_t multi_addparts_resp_cnt; static rd_kafka_resp_err_t -multi_addparts_response_received_cb (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - int64_t rtt, - rd_kafka_resp_err_t err, - void *ic_opaque) { +multi_addparts_response_received_cb(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque) { if (ApiKey == RD_KAFKAP_AddPartitionsToTxn) { - TEST_SAY("on_response_received_cb: %s: %s: brokerid %"PRId32 - ", ApiKey %hd, CorrId %d, rtt %.2fms, count %"PRId32 + TEST_SAY("on_response_received_cb: %s: %s: brokerid %" PRId32 + ", ApiKey %hd, CorrId %d, rtt %.2fms, count %" PRId32 ": %s\n", - rd_kafka_name(rk), brokername, brokerid, - ApiKey, CorrId, - rtt != -1 ? (float)rtt / 1000.0 : 0.0, + rd_kafka_name(rk), brokername, brokerid, ApiKey, + CorrId, rtt != -1 ? (float)rtt / 1000.0 : 0.0, rd_atomic32_get(&multi_addparts_resp_cnt), rd_kafka_err2name(err)); @@ -2287,7 +2185,7 @@ multi_addparts_response_received_cb (rd_kafka_t *rk, } -static void do_test_txn_addparts_req_multi (void) { +static void do_test_txn_addparts_req_multi(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; const char *txnid = "txnid", *topic = "mytopic"; @@ -2298,8 +2196,7 @@ static void do_test_txn_addparts_req_multi (void) { rd_atomic32_init(&multi_addparts_resp_cnt, 0); on_response_received_cb = multi_addparts_response_received_cb; - rk = create_txn_producer(&mcluster, txnid, 3, - "linger.ms", "0", + rk = create_txn_producer(&mcluster, txnid, 3, "linger.ms", "0", "message.timeout.ms", "9000", /* Set up on_response_received interceptor */ "on_response_received", "", NULL); @@ -2327,8 +2224,7 @@ static void do_test_txn_addparts_req_multi (void) { */ TEST_SAY("Running seed transaction\n"); TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), + TEST_CALL_ERR__(rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_VALUE("seed", 4), RD_KAFKA_V_END)); TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000)); @@ -2348,41 +2244,35 @@ static void do_test_txn_addparts_req_multi (void) { rd_kafka_mock_broker_set_rtt(mcluster, txn_coord, 1000); /* Produce to partition 0 */ - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); - rd_usleep(500*1000, NULL); + rd_usleep(500 * 1000, NULL); /* Produce to partition 1 */ - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(1), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(1), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); TEST_SAY("Waiting for two AddPartitionsToTxnResponse\n"); while (rd_atomic32_get(&multi_addparts_resp_cnt) < 2) - rd_usleep(10*1000, NULL); + rd_usleep(10 * 1000, NULL); - TEST_SAY("%"PRId32" AddPartitionsToTxnResponses seen\n", + TEST_SAY("%" PRId32 " AddPartitionsToTxnResponses seen\n", rd_atomic32_get(&multi_addparts_resp_cnt)); /* Produce to partition 2, this message will hang in * queue if the bug is not fixed. */ - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(2), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(2), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); /* Allow some extra time for things to settle before committing * transaction. */ - rd_usleep(1000*1000, NULL); + rd_usleep(1000 * 1000, NULL); - TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 10*1000)); + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 10 * 1000)); /* All done */ rd_kafka_destroy(rk); @@ -2401,13 +2291,13 @@ static void do_test_txn_addparts_req_multi (void) { * - OffsetFetch triggered by committed() (and similar code paths) * - OffsetFetch triggered by assign() */ -static void do_test_unstable_offset_commit (void) { +static void do_test_unstable_offset_commit(void) { rd_kafka_t *rk, *c; rd_kafka_conf_t *c_conf; rd_kafka_mock_cluster_t *mcluster; rd_kafka_topic_partition_list_t *offsets; - const char *topic = "mytopic"; - const int msgcnt = 100; + const char *topic = "mytopic"; + const int msgcnt = 100; const int64_t offset_to_commit = msgcnt / 2; int i; int remains = 0; @@ -2430,16 +2320,16 @@ static void do_test_unstable_offset_commit (void) { * something to read. */ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt, - NULL, 0, &remains); + test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt, NULL, 0, + &remains); TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); /* Commit offset */ offsets = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(offsets, topic, 0)->offset = - offset_to_commit; - TEST_CALL_ERR__(rd_kafka_commit(c, offsets, 0/*sync*/)); + offset_to_commit; + TEST_CALL_ERR__(rd_kafka_commit(c, offsets, 0 /*sync*/)); rd_kafka_topic_partition_list_destroy(offsets); /* Retrieve offsets by calling committed(). @@ -2448,52 +2338,48 @@ static void do_test_unstable_offset_commit (void) { * the API timeout is higher than the amount of time the retries will * take and thus succeed, and on the second iteration the timeout * will be lower and thus fail. */ - for (i = 0 ; i < 2 ; i++) { + for (i = 0; i < 2; i++) { rd_kafka_resp_err_t err; - rd_kafka_resp_err_t exp_err = i == 0 ? - RD_KAFKA_RESP_ERR_NO_ERROR : - RD_KAFKA_RESP_ERR__TIMED_OUT; - int timeout_ms = exp_err ? 200 : 5*1000; + rd_kafka_resp_err_t exp_err = + i == 0 ? RD_KAFKA_RESP_ERR_NO_ERROR + : RD_KAFKA_RESP_ERR__TIMED_OUT; + int timeout_ms = exp_err ? 200 : 5 * 1000; rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_OffsetFetch, - 1+5,/* first request + some retries */ - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT); + mcluster, RD_KAFKAP_OffsetFetch, + 1 + 5, /* first request + some retries */ + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT); offsets = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(offsets, topic, 0); err = rd_kafka_committed(c, offsets, timeout_ms); - TEST_SAY("#%d: committed() returned %s (expected %s)\n", - i, - rd_kafka_err2name(err), - rd_kafka_err2name(exp_err)); + TEST_SAY("#%d: committed() returned %s (expected %s)\n", i, + rd_kafka_err2name(err), rd_kafka_err2name(exp_err)); TEST_ASSERT(err == exp_err, - "#%d: Expected committed() to return %s, not %s", - i, - rd_kafka_err2name(exp_err), - rd_kafka_err2name(err)); + "#%d: Expected committed() to return %s, not %s", i, + rd_kafka_err2name(exp_err), rd_kafka_err2name(err)); TEST_ASSERT(offsets->cnt == 1, "Expected 1 committed offset, not %d", offsets->cnt); if (!exp_err) - TEST_ASSERT(offsets->elems[0].offset == offset_to_commit, - "Expected committed offset %"PRId64", " - "not %"PRId64, - offset_to_commit, - offsets->elems[0].offset); + TEST_ASSERT(offsets->elems[0].offset == + offset_to_commit, + "Expected committed offset %" PRId64 + ", " + "not %" PRId64, + offset_to_commit, offsets->elems[0].offset); else TEST_ASSERT(offsets->elems[0].offset < 0, "Expected no committed offset, " - "not %"PRId64, + "not %" PRId64, offsets->elems[0].offset); rd_kafka_topic_partition_list_destroy(offsets); @@ -2502,25 +2388,23 @@ static void do_test_unstable_offset_commit (void) { TEST_SAY("Phase 2: OffsetFetch lookup through assignment\n"); offsets = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(offsets, topic, 0)->offset = - RD_KAFKA_OFFSET_STORED; + RD_KAFKA_OFFSET_STORED; rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_OffsetFetch, - 1+5,/* first request + some retries */ - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, - RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT); + mcluster, RD_KAFKAP_OffsetFetch, + 1 + 5, /* first request + some retries */ + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT); test_consumer_incremental_assign("assign", c, offsets); rd_kafka_topic_partition_list_destroy(offsets); - test_consumer_poll_exact("consume", c, 0, - 1/*eof*/, 0, msgcnt/2, - rd_true/*exact counts*/, NULL); + test_consumer_poll_exact("consume", c, 0, 1 /*eof*/, 0, msgcnt / 2, + rd_true /*exact counts*/, NULL); /* All done */ rd_kafka_destroy(c); @@ -2535,34 +2419,33 @@ static void do_test_unstable_offset_commit (void) { * and commit_transaction() is called, the transaction must not succeed. * https://github.com/confluentinc/confluent-kafka-dotnet/issues/1568 */ -static void do_test_commit_after_msg_timeout (void) { +static void do_test_commit_after_msg_timeout(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; int32_t coord_id, leader_id; rd_kafka_resp_err_t err; rd_kafka_error_t *error; - const char *topic = "test"; + const char *topic = "test"; const char *transactional_id = "txnid"; - int remains = 0; + int remains = 0; SUB_TEST_QUICK(); /* Assign coordinator and leader to two different brokers */ - coord_id = 1; + coord_id = 1; leader_id = 2; rk = create_txn_producer(&mcluster, transactional_id, 3, "message.timeout.ms", "5000", - "transaction.timeout.ms", "10000", - NULL); + "transaction.timeout.ms", "10000", NULL); /* Broker down is not a test-failing error */ - allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; test_curr->is_fatal_cb = error_is_fatal_cb; - test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; err = rd_kafka_mock_topic_create(mcluster, topic, 1, 3); - TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str (err)); + TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err)); rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, coord_id); @@ -2574,7 +2457,7 @@ static void do_test_commit_after_msg_timeout (void) { TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - TEST_SAY("Bringing down %"PRId32"\n", leader_id); + TEST_SAY("Bringing down %" PRId32 "\n", leader_id); rd_kafka_mock_broker_set_down(mcluster, leader_id); rd_kafka_mock_broker_set_down(mcluster, coord_id); @@ -2584,7 +2467,7 @@ static void do_test_commit_after_msg_timeout (void) { TEST_ASSERT(error != NULL, "expected commit_transaciton() to fail"); TEST_SAY("commit_transaction() failed (as expected): %s\n", rd_kafka_error_string(error)); - TEST_ASSERT(rd_kafka_error_txn_requires_abort (error), + TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), "Expected txn_requires_abort error"); rd_kafka_error_destroy(error); @@ -2595,21 +2478,19 @@ static void do_test_commit_after_msg_timeout (void) { TEST_SAY("Aborting transaction\n"); TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); - TEST_ASSERT(remains == 0, - "%d message(s) were not flushed\n", remains); + TEST_ASSERT(remains == 0, "%d message(s) were not flushed\n", remains); TEST_SAY("Attempting second transaction, which should succeed\n"); - allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; test_curr->is_fatal_cb = error_is_fatal_cb; - test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); test_produce_msgs2_nowait(rk, topic, 0, 0, 0, 1, NULL, 0, &remains); TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); - TEST_ASSERT(remains == 0, - "%d message(s) were not produced\n", remains); + TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains); rd_kafka_destroy(rk); @@ -2624,7 +2505,7 @@ static void do_test_commit_after_msg_timeout (void) { * during an ongoing transaction. * The transaction should instead enter the abortable state. */ -static void do_test_out_of_order_seq (void) { +static void do_test_out_of_order_seq(void) { rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; rd_kafka_error_t *error; @@ -2635,8 +2516,7 @@ static void do_test_out_of_order_seq (void) { SUB_TEST_QUICK(); - rk = create_txn_producer(&mcluster, txnid, 3, - "batch.num.messages", "1", + rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1", NULL); rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, @@ -2645,7 +2525,7 @@ static void do_test_out_of_order_seq (void) { rd_kafka_mock_partition_set_leader(mcluster, "mytopic", 0, leader); test_curr->ignore_dr_err = rd_true; - test_curr->is_fatal_cb = NULL; + test_curr->is_fatal_cb = NULL; TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); @@ -2657,45 +2537,35 @@ static void do_test_out_of_order_seq (void) { /* Produce one seeding message first to get the leader up and running */ - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); test_flush(rk, -1); /* Let partition leader have a latency of 2 seconds * so that we can have multiple messages in-flight. */ - rd_kafka_mock_broker_set_rtt(mcluster, leader, 2*1000); + rd_kafka_mock_broker_set_rtt(mcluster, leader, 2 * 1000); /* Produce a message, let it fail with with different errors, * ending with OUT_OF_ORDER which previously triggered an * Epoch bump. */ rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_Produce, - 3, - RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, - RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, - RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER); + mcluster, RD_KAFKAP_Produce, 3, + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, + RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER); /* Produce three messages that will be delayed * and have errors injected.*/ - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); /* Now sleep a short while so that the messages are processed * by the broker and errors are returned. */ @@ -2706,16 +2576,13 @@ static void do_test_out_of_order_seq (void) { /* Produce a fifth message, should fail with ERR__STATE since * the transaction should have entered the abortable state. */ - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END); + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__STATE, "Expected produce() to fail with ERR__STATE, not %s", rd_kafka_err2name(err)); - TEST_SAY("produce() failed as expected: %s\n", - rd_kafka_err2str(err)); + TEST_SAY("produce() failed as expected: %s\n", rd_kafka_err2str(err)); /* Commit the transaction, should fail with abortable error. */ TIMING_START(&timing, "commit_transaction(-1)"); @@ -2739,11 +2606,9 @@ static void do_test_out_of_order_seq (void) { * producer can recover. */ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - TEST_CALL_ERR__(rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); @@ -2753,7 +2618,7 @@ static void do_test_out_of_order_seq (void) { } -int main_0105_transactions_mock (int argc, char **argv) { +int main_0105_transactions_mock(int argc, char **argv) { if (test_needs_auth()) { TEST_SKIP("Mock cluster does not support SSL/SASL\n"); return 0; @@ -2804,12 +2669,12 @@ int main_0105_transactions_mock (int argc, char **argv) { do_test_txns_no_timeout_crash(); do_test_txn_auth_failure( - RD_KAFKAP_InitProducerId, - RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED); + RD_KAFKAP_InitProducerId, + RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED); do_test_txn_auth_failure( - RD_KAFKAP_FindCoordinator, - RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED); + RD_KAFKAP_FindCoordinator, + RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED); do_test_txn_flush_timeout(); diff --git a/tests/0106-cgrp_sess_timeout.c b/tests/0106-cgrp_sess_timeout.c index be5da59c8a..0451e4a00c 100644 --- a/tests/0106-cgrp_sess_timeout.c +++ b/tests/0106-cgrp_sess_timeout.c @@ -41,19 +41,18 @@ static int rebalance_cnt; static rd_kafka_resp_err_t rebalance_exp_event; static rd_kafka_resp_err_t commit_exp_err; -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { rebalance_cnt++; - TEST_SAY("Rebalance #%d: %s: %d partition(s)\n", - rebalance_cnt, rd_kafka_err2name(err), parts->cnt); + TEST_SAY("Rebalance #%d: %s: %d partition(s)\n", rebalance_cnt, + rd_kafka_err2name(err), parts->cnt); - TEST_ASSERT(err == rebalance_exp_event, - "Expected rebalance event %s, not %s", - rd_kafka_err2name(rebalance_exp_event), - rd_kafka_err2name(err)); + TEST_ASSERT( + err == rebalance_exp_event, "Expected rebalance event %s, not %s", + rd_kafka_err2name(rebalance_exp_event), rd_kafka_err2name(err)); if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { test_consumer_assign("assign", rk, parts); @@ -74,7 +73,7 @@ static void rebalance_cb (rd_kafka_t *rk, rd_sleep(1); commit_err = rd_kafka_commit( - rk, parts, !strcmp(commit_type, "async")); + rk, parts, !strcmp(commit_type, "async")); if (!strcmp(commit_type, "async")) TEST_ASSERT(!commit_err, @@ -82,15 +81,16 @@ static void rebalance_cb (rd_kafka_t *rk, "but it returned %s", rd_kafka_err2name(commit_err)); else - TEST_ASSERT(commit_err == commit_exp_err || - (!commit_exp_err && - commit_err == + TEST_ASSERT( + commit_err == commit_exp_err || + (!commit_exp_err && + commit_err == RD_KAFKA_RESP_ERR__NO_OFFSET), - "Expected %s commit to return %s, " - "not %s", - commit_type, - rd_kafka_err2name(commit_exp_err), - rd_kafka_err2name(commit_err)); + "Expected %s commit to return %s, " + "not %s", + commit_type, + rd_kafka_err2name(commit_exp_err), + rd_kafka_err2name(commit_err)); } test_consumer_unassign("unassign", rk); @@ -106,14 +106,15 @@ static void rebalance_cb (rd_kafka_t *rk, /** * @brief Wait for an expected rebalance event, or fail. */ -static void expect_rebalance (const char *what, rd_kafka_t *c, - rd_kafka_resp_err_t exp_event, - int timeout_s) { +static void expect_rebalance(const char *what, + rd_kafka_t *c, + rd_kafka_resp_err_t exp_event, + int timeout_s) { int64_t tmout = test_clock() + (timeout_s * 1000000); int start_cnt = rebalance_cnt; - TEST_SAY("Waiting for %s (%s) for %ds\n", - what, rd_kafka_err2name(exp_event), timeout_s); + TEST_SAY("Waiting for %s (%s) for %ds\n", what, + rd_kafka_err2name(exp_event), timeout_s); rebalance_exp_event = exp_event; @@ -127,8 +128,8 @@ static void expect_rebalance (const char *what, rd_kafka_t *c, return; } - TEST_FAIL("Timed out waiting for %s (%s)\n", - what, rd_kafka_err2name(exp_event)); + TEST_FAIL("Timed out waiting for %s (%s)\n", what, + rd_kafka_err2name(exp_event)); } @@ -137,16 +138,16 @@ static void expect_rebalance (const char *what, rd_kafka_t *c, * * @param use_commit_type "auto", "sync" (manual), "async" (manual) */ -static void do_test_session_timeout (const char *use_commit_type) { +static void do_test_session_timeout(const char *use_commit_type) { const char *bootstraps; rd_kafka_mock_cluster_t *mcluster; rd_kafka_conf_t *conf; rd_kafka_t *c; const char *groupid = "mygroup"; - const char *topic = "test"; + const char *topic = "test"; rebalance_cnt = 0; - commit_type = use_commit_type; + commit_type = use_commit_type; SUB_TEST0(!strcmp(use_commit_type, "sync") /*quick*/, "Test session timeout with %s commit", use_commit_type); @@ -156,10 +157,8 @@ static void do_test_session_timeout (const char *use_commit_type) { rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); /* Seed the topic with messages */ - test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, - "bootstrap.servers", bootstraps, - "batch.num.messages", "10", - NULL); + test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers", + bootstraps, "batch.num.messages", "10", NULL); test_conf_init(&conf, NULL, 30); test_conf_set(conf, "bootstrap.servers", bootstraps); @@ -177,20 +176,17 @@ static void do_test_session_timeout (const char *use_commit_type) { /* Let Heartbeats fail after a couple of successful ones */ rd_kafka_mock_push_request_errors( - mcluster, RD_KAFKAP_Heartbeat, - 9, - RD_KAFKA_RESP_ERR_NO_ERROR, - RD_KAFKA_RESP_ERR_NO_ERROR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR); + mcluster, RD_KAFKAP_Heartbeat, 9, RD_KAFKA_RESP_ERR_NO_ERROR, + RD_KAFKA_RESP_ERR_NO_ERROR, RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR); expect_rebalance("initial assignment", c, - RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, 5+2); + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, 5 + 2); /* Consume a couple of messages so that we have something to commit */ test_consumer_poll("consume", c, 0, -1, 0, 10, NULL); @@ -200,15 +196,15 @@ static void do_test_session_timeout (const char *use_commit_type) { commit_exp_err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID; expect_rebalance("session timeout revoke", c, - RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, 2+5+2); + RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, 2 + 5 + 2); expect_rebalance("second assignment", c, - RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, 5+2); + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, 5 + 2); /* Final rebalance in close(). * Its commit will work. */ rebalance_exp_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; - commit_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + commit_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; test_consumer_close(c); @@ -223,13 +219,13 @@ static void do_test_session_timeout (const char *use_commit_type) { /** * @brief Attempt manual commit when assignment has been lost (#3217) */ -static void do_test_commit_on_lost (void) { +static void do_test_commit_on_lost(void) { const char *bootstraps; rd_kafka_mock_cluster_t *mcluster; rd_kafka_conf_t *conf; rd_kafka_t *c; const char *groupid = "mygroup"; - const char *topic = "test"; + const char *topic = "test"; rd_kafka_resp_err_t err; SUB_TEST(); @@ -241,10 +237,8 @@ static void do_test_commit_on_lost (void) { rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); /* Seed the topic with messages */ - test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, - "bootstrap.servers", bootstraps, - "batch.num.messages", "10", - NULL); + test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers", + bootstraps, "batch.num.messages", "10", NULL); test_conf_init(&conf, NULL, 30); test_conf_set(conf, "bootstrap.servers", bootstraps); @@ -273,7 +267,7 @@ static void do_test_commit_on_lost (void) { TEST_SAY("Assignment is lost, committing\n"); /* Perform manual commit */ - err = rd_kafka_commit(c, NULL, 0/*sync*/); + err = rd_kafka_commit(c, NULL, 0 /*sync*/); TEST_SAY("commit() returned: %s\n", rd_kafka_err2name(err)); TEST_ASSERT(err, "expected commit to fail"); @@ -289,7 +283,7 @@ static void do_test_commit_on_lost (void) { } -int main_0106_cgrp_sess_timeout (int argc, char **argv) { +int main_0106_cgrp_sess_timeout(int argc, char **argv) { if (test_needs_auth()) { TEST_SKIP("Mock cluster does not support SSL/SASL\n"); diff --git a/tests/0107-topic_recreate.c b/tests/0107-topic_recreate.c index a648ccb6a1..1f91e2a84d 100644 --- a/tests/0107-topic_recreate.c +++ b/tests/0107-topic_recreate.c @@ -46,27 +46,27 @@ static mtx_t value_mtx; static char *value; -static const int msg_rate = 10; /**< Messages produced per second */ +static const int msg_rate = 10; /**< Messages produced per second */ -static struct test *this_test; /**< Exposes current test struct (in TLS) to - * producer thread. */ +static struct test *this_test; /**< Exposes current test struct (in TLS) to + * producer thread. */ /** * @brief Treat all error_cb as non-test-fatal. */ -static int is_error_fatal (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_error_fatal(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { return rd_false; } /** * @brief Producing thread */ -static int run_producer (void *arg) { - const char *topic = arg; +static int run_producer(void *arg) { + const char *topic = arg; rd_kafka_t *producer = test_create_producer(); - int ret = 0; + int ret = 0; test_curr = this_test; @@ -89,11 +89,9 @@ static int run_producer (void *arg) { } err = rd_kafka_producev( - producer, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_VALUE(value, strlen(value)), - RD_KAFKA_V_END); + producer, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_VALUE(value, strlen(value)), RD_KAFKA_V_END); if (err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART || err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) @@ -114,9 +112,8 @@ static int run_producer (void *arg) { TEST_WARN("Failed to flush all message(s), %d remain\n", rd_kafka_outq_len(producer)); /* Purge the messages to see which partition they were for */ - rd_kafka_purge(producer, - RD_KAFKA_PURGE_F_QUEUE| - RD_KAFKA_PURGE_F_INFLIGHT); + rd_kafka_purge(producer, RD_KAFKA_PURGE_F_QUEUE | + RD_KAFKA_PURGE_F_INFLIGHT); rd_kafka_flush(producer, 5000); TEST_SAY("%d message(s) in queue after purge\n", rd_kafka_outq_len(producer)); @@ -134,13 +131,13 @@ static int run_producer (void *arg) { * @brief Expect at least \p cnt messages with value matching \p exp_value, * else fail the current test. */ -static void expect_messages (rd_kafka_t *consumer, int cnt, - const char *exp_value) { +static void +expect_messages(rd_kafka_t *consumer, int cnt, const char *exp_value) { int match_cnt = 0, other_cnt = 0, err_cnt = 0; size_t exp_len = strlen(exp_value); - TEST_SAY("Expecting >= %d messages with value \"%s\"...\n", - cnt, exp_value); + TEST_SAY("Expecting >= %d messages with value \"%s\"...\n", cnt, + exp_value); while (match_cnt < cnt) { rd_kafka_message_t *rkmessage; @@ -157,20 +154,21 @@ static void expect_messages (rd_kafka_t *consumer, int cnt, !memcmp(rkmessage->payload, exp_value, exp_len)) { match_cnt++; } else { - TEST_SAYL(3, "Received \"%.*s\", expected \"%s\": " + TEST_SAYL(3, + "Received \"%.*s\", expected \"%s\": " "ignored\n", (int)rkmessage->len, - (const char *)rkmessage->payload, - exp_value); + (const char *)rkmessage->payload, exp_value); other_cnt++; } rd_kafka_message_destroy(rkmessage); } - TEST_SAY("Consumed %d messages matching \"%s\", " - "ignored %d others, saw %d error(s)\n", - match_cnt, exp_value, other_cnt, err_cnt); + TEST_SAY( + "Consumed %d messages matching \"%s\", " + "ignored %d others, saw %d error(s)\n", + match_cnt, exp_value, other_cnt, err_cnt); } @@ -178,11 +176,11 @@ static void expect_messages (rd_kafka_t *consumer, int cnt, * @brief Test topic create + delete + create with first topic having * \p part_cnt_1 partitions and second topic having \p part_cnt_2 . */ -static void do_test_create_delete_create (int part_cnt_1, int part_cnt_2) { +static void do_test_create_delete_create(int part_cnt_1, int part_cnt_2) { rd_kafka_t *consumer; thrd_t producer_thread; const char *topic = test_mk_topic_name(__FUNCTION__, 1); - int ret = 0; + int ret = 0; TEST_SAY(_C_MAG "[ Test topic create(%d parts)+delete+create(%d parts) ]\n", @@ -202,8 +200,8 @@ static void do_test_create_delete_create (int part_cnt_1, int part_cnt_2) { mtx_unlock(&value_mtx); /* Create producer thread */ - if (thrd_create(&producer_thread, run_producer, - (void *)topic) != thrd_success) + if (thrd_create(&producer_thread, run_producer, (void *)topic) != + thrd_success) TEST_FAIL("thrd_create failed"); /* Consume messages for 5s */ @@ -244,7 +242,7 @@ static void do_test_create_delete_create (int part_cnt_1, int part_cnt_2) { } -int main_0107_topic_recreate (int argc, char **argv) { +int main_0107_topic_recreate(int argc, char **argv) { this_test = test_curr; /* Need to expose current test struct (in TLS) * to producer thread. */ diff --git a/tests/0109-auto_create_topics.cpp b/tests/0109-auto_create_topics.cpp index 6d49e0a670..cabee67041 100644 --- a/tests/0109-auto_create_topics.cpp +++ b/tests/0109-auto_create_topics.cpp @@ -42,22 +42,22 @@ */ -static void do_test_consumer (bool allow_auto_create_topics, - bool with_wildcards) { - Test::Say(tostr() << _C_MAG << "[ Test allow.auto.create.topics=" << - (allow_auto_create_topics ? "true":"false") << - " with_wildcards=" << (with_wildcards ? "true":"false") << " ]\n"); +static void do_test_consumer(bool allow_auto_create_topics, + bool with_wildcards) { + Test::Say(tostr() << _C_MAG << "[ Test allow.auto.create.topics=" + << (allow_auto_create_topics ? "true" : "false") + << " with_wildcards=" << (with_wildcards ? "true" : "false") + << " ]\n"); - bool has_acl_cli = - test_broker_version >= TEST_BRKVER(2,1,0,0) && - !test_needs_auth(); /* We can't bother passing Java security config to - * kafka-acls.sh */ + bool has_acl_cli = test_broker_version >= TEST_BRKVER(2, 1, 0, 0) && + !test_needs_auth(); /* We can't bother passing Java + * security config to kafka-acls.sh */ - bool supports_allow = test_broker_version >= TEST_BRKVER(0,11,0,0); + bool supports_allow = test_broker_version >= TEST_BRKVER(0, 11, 0, 0); - std::string topic_exists = Test::mk_topic_name("0109-exists", 1); + std::string topic_exists = Test::mk_topic_name("0109-exists", 1); std::string topic_notexists = Test::mk_topic_name("0109-notexists", 1); - std::string topic_unauth = Test::mk_topic_name("0109-unauthorized", 1); + std::string topic_unauth = Test::mk_topic_name("0109-unauthorized", 1); /* Create consumer */ RdKafka::Conf *conf; @@ -87,23 +87,24 @@ static void do_test_consumer (bool allow_auto_create_topics, Test::create_topic(c, topic_unauth.c_str(), 1, 1); /* Add denying ACL for unauth topic */ - test_kafka_cmd("kafka-acls.sh --bootstrap-server %s " - "--add --deny-principal 'User:*' " - "--operation All --deny-host '*' " - "--topic '%s'", - bootstraps.c_str(), topic_unauth.c_str()); + test_kafka_cmd( + "kafka-acls.sh --bootstrap-server %s " + "--add --deny-principal 'User:*' " + "--operation All --deny-host '*' " + "--topic '%s'", + bootstraps.c_str(), topic_unauth.c_str()); } /* Wait for topic to be fully created */ - test_wait_topic_exists(NULL, topic_exists.c_str(), 10*1000); + test_wait_topic_exists(NULL, topic_exists.c_str(), 10 * 1000); /* * Subscribe */ std::vector topics; - std::map exp_errors; + std::map exp_errors; topics.push_back(topic_notexists); if (has_acl_cli) @@ -118,7 +119,7 @@ static void do_test_consumer (bool allow_auto_create_topics, * not triggering topic auto creation). * We need to handle the expected error cases accordingly. */ exp_errors["^" + topic_notexists] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART; - exp_errors[topic_notexists] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART; + exp_errors[topic_notexists] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART; if (has_acl_cli) { /* Unauthorized topics are not included in list-all-topics Metadata, @@ -145,54 +146,51 @@ static void do_test_consumer (bool allow_auto_create_topics, bool run = true; while (run) { RdKafka::Message *msg = c->consume(tmout_multip(1000)); - switch (msg->err()) - { - case RdKafka::ERR__TIMED_OUT: - case RdKafka::ERR_NO_ERROR: - break; + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + case RdKafka::ERR_NO_ERROR: + break; - case RdKafka::ERR__PARTITION_EOF: - run = false; - break; + case RdKafka::ERR__PARTITION_EOF: + run = false; + break; - default: - Test::Say("Consume error on " + msg->topic_name() + - ": " + msg->errstr() + "\n"); + default: + Test::Say("Consume error on " + msg->topic_name() + ": " + msg->errstr() + + "\n"); - std::map::iterator it = + std::map::iterator it = exp_errors.find(msg->topic_name()); - /* Temporary unknown-topic errors are okay for auto-created topics. */ - bool unknown_is_ok = - allow_auto_create_topics && - !with_wildcards && - msg->err() == RdKafka::ERR_UNKNOWN_TOPIC_OR_PART && - msg->topic_name() == topic_notexists; - - if (it == exp_errors.end()) { - if (unknown_is_ok) - Test::Say("Ignoring temporary auto-create error for topic " + - msg->topic_name() + ": " + - RdKafka::err2str(msg->err()) + "\n"); - else - Test::Fail("Did not expect error for " + msg->topic_name() + - ": got: " + RdKafka::err2str(msg->err())); - } else if (msg->err() != it->second) { - if (unknown_is_ok) - Test::Say("Ignoring temporary auto-create error for topic " + - msg->topic_name() + ": " + - RdKafka::err2str(msg->err()) + "\n"); - else - Test::Fail("Expected '" + RdKafka::err2str(it->second) + "' for " + - msg->topic_name() + ", got " + - RdKafka::err2str(msg->err())); - } else { - exp_errors.erase(msg->topic_name()); - } - - break; + /* Temporary unknown-topic errors are okay for auto-created topics. */ + bool unknown_is_ok = allow_auto_create_topics && !with_wildcards && + msg->err() == RdKafka::ERR_UNKNOWN_TOPIC_OR_PART && + msg->topic_name() == topic_notexists; + + if (it == exp_errors.end()) { + if (unknown_is_ok) + Test::Say("Ignoring temporary auto-create error for topic " + + msg->topic_name() + ": " + RdKafka::err2str(msg->err()) + + "\n"); + else + Test::Fail("Did not expect error for " + msg->topic_name() + + ": got: " + RdKafka::err2str(msg->err())); + } else if (msg->err() != it->second) { + if (unknown_is_ok) + Test::Say("Ignoring temporary auto-create error for topic " + + msg->topic_name() + ": " + RdKafka::err2str(msg->err()) + + "\n"); + else + Test::Fail("Expected '" + RdKafka::err2str(it->second) + "' for " + + msg->topic_name() + ", got " + + RdKafka::err2str(msg->err())); + } else { + exp_errors.erase(msg->topic_name()); } + break; + } + delete msg; } @@ -207,14 +205,14 @@ static void do_test_consumer (bool allow_auto_create_topics, } extern "C" { - int main_0109_auto_create_topics (int argc, char **argv) { - /* Parameters: - * allow auto create, with wildcards */ - do_test_consumer(true, true); - do_test_consumer(true, false); - do_test_consumer(false, true); - do_test_consumer(false, false); - - return 0; - } +int main_0109_auto_create_topics(int argc, char **argv) { + /* Parameters: + * allow auto create, with wildcards */ + do_test_consumer(true, true); + do_test_consumer(true, false); + do_test_consumer(false, true); + do_test_consumer(false, false); + + return 0; +} } diff --git a/tests/0110-batch_size.cpp b/tests/0110-batch_size.cpp index f17e553d80..8dd8f56cf9 100644 --- a/tests/0110-batch_size.cpp +++ b/tests/0110-batch_size.cpp @@ -45,20 +45,20 @@ class myAvgStatsCb : public RdKafka::EventCb { public: - myAvgStatsCb(std::string topic): - avg_batchsize(0), min_batchsize(0), max_batchsize(0), topic_(topic) {} - - void event_cb (RdKafka::Event &event) { - switch (event.type()) - { - case RdKafka::Event::EVENT_LOG: - Test::Say(event.str() + "\n"); - break; - case RdKafka::Event::EVENT_STATS: - read_batch_stats(event.str()); - break; - default: - break; + myAvgStatsCb(std::string topic) : + avg_batchsize(0), min_batchsize(0), max_batchsize(0), topic_(topic) { + } + + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_LOG: + Test::Say(event.str() + "\n"); + break; + case RdKafka::Event::EVENT_STATS: + read_batch_stats(event.str()); + break; + default: + break; } } @@ -67,14 +67,13 @@ class myAvgStatsCb : public RdKafka::EventCb { int max_batchsize; private: - - void read_val (rapidjson::Document &d, const std::string &path, int &val) { + void read_val(rapidjson::Document &d, const std::string &path, int &val) { rapidjson::Pointer jpath(path.c_str()); if (!jpath.IsValid()) - Test::Fail(tostr() << "json pointer parse " << path << " failed at " << - jpath.GetParseErrorOffset() << " with error code " << - jpath.GetParseErrorCode()); + Test::Fail(tostr() << "json pointer parse " << path << " failed at " + << jpath.GetParseErrorOffset() << " with error code " + << jpath.GetParseErrorCode()); rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath); if (!pp) { @@ -85,13 +84,13 @@ class myAvgStatsCb : public RdKafka::EventCb { val = pp->GetInt(); } - void read_batch_stats (const std::string &stats) { + void read_batch_stats(const std::string &stats) { rapidjson::Document d; if (d.Parse(stats.c_str()).HasParseError()) - Test::Fail(tostr() << "Failed to parse stats JSON: " << - rapidjson::GetParseError_En(d.GetParseError()) << - " at " << d.GetErrorOffset()); + Test::Fail(tostr() << "Failed to parse stats JSON: " + << rapidjson::GetParseError_En(d.GetParseError()) + << " at " << d.GetErrorOffset()); read_val(d, "/topics/" + topic_ + "/batchsize/avg", avg_batchsize); read_val(d, "/topics/" + topic_ + "/batchsize/min", min_batchsize); @@ -106,7 +105,7 @@ class myAvgStatsCb : public RdKafka::EventCb { * @brief Specify batch.size and parse stats to verify it takes effect. * */ -static void do_test_batch_size () { +static void do_test_batch_size() { std::string topic = Test::mk_topic_name(__FILE__, 0); myAvgStatsCb event_cb(topic); @@ -114,10 +113,10 @@ static void do_test_batch_size () { RdKafka::Conf *conf; Test::conf_init(&conf, NULL, 0); - const int msgcnt = 1000; - const int msgsize = 1000; - int batchsize = 5000; - int exp_min_batchsize = batchsize - msgsize - 100/*~framing overhead*/; + const int msgcnt = 1000; + const int msgsize = 1000; + int batchsize = 5000; + int exp_min_batchsize = batchsize - msgsize - 100 /*~framing overhead*/; Test::conf_set(conf, "batch.size", "5000"); @@ -132,52 +131,51 @@ static void do_test_batch_size () { RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); if (!p) - Test::Fail("Failed to create Producer: " + errstr); + Test::Fail("Failed to create Producer: " + errstr); /* Produce messages */ char val[msgsize]; memset(val, 'a', msgsize); - for (int i = 0 ; i < msgcnt ; i++) { - RdKafka::ErrorCode err = p->produce(topic, 0, - RdKafka::Producer::RK_MSG_COPY, - val, msgsize, NULL, 0, -1, NULL); + for (int i = 0; i < msgcnt; i++) { + RdKafka::ErrorCode err = + p->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY, val, msgsize, NULL, + 0, -1, NULL); if (err) Test::Fail("Produce failed: " + RdKafka::err2str(err)); } Test::Say(tostr() << "Produced " << msgcnt << " messages\n"); - p->flush(5*1000); + p->flush(5 * 1000); Test::Say("Waiting for stats\n"); while (event_cb.avg_batchsize == 0) p->poll(1000); - Test::Say(tostr() << "Batchsize: " << - "configured " << batchsize << - ", min " << event_cb.min_batchsize << - ", max " << event_cb.max_batchsize << - ", avg " << event_cb.avg_batchsize << - "\n"); + Test::Say(tostr() << "Batchsize: " + << "configured " << batchsize << ", min " + << event_cb.min_batchsize << ", max " + << event_cb.max_batchsize << ", avg " + << event_cb.avg_batchsize << "\n"); /* The average batchsize should within a message size from batch.size. */ if (event_cb.avg_batchsize < exp_min_batchsize || event_cb.avg_batchsize > batchsize) - Test::Fail(tostr() << "Expected avg batchsize to be within " << - exp_min_batchsize << ".." << batchsize << - " but got " << event_cb.avg_batchsize); + Test::Fail(tostr() << "Expected avg batchsize to be within " + << exp_min_batchsize << ".." << batchsize << " but got " + << event_cb.avg_batchsize); delete p; } #endif extern "C" { - int main_0110_batch_size (int argc, char **argv) { +int main_0110_batch_size(int argc, char **argv) { #if WITH_RAPIDJSON - do_test_batch_size(); + do_test_batch_size(); #else - Test::Skip("RapidJSON >=1.1.0 not available\n"); + Test::Skip("RapidJSON >=1.1.0 not available\n"); #endif - return 0; - } + return 0; +} } diff --git a/tests/0111-delay_create_topics.cpp b/tests/0111-delay_create_topics.cpp index 1df60d9a04..4b6683add9 100644 --- a/tests/0111-delay_create_topics.cpp +++ b/tests/0111-delay_create_topics.cpp @@ -45,9 +45,10 @@ namespace { class DrCb : public RdKafka::DeliveryReportCb { public: - DrCb (RdKafka::ErrorCode exp_err): ok(false), _exp_err(exp_err) {} + DrCb(RdKafka::ErrorCode exp_err) : ok(false), _exp_err(exp_err) { + } - void dr_cb (RdKafka::Message &msg) { + void dr_cb(RdKafka::Message &msg) { Test::Say("Delivery report: " + RdKafka::err2str(msg.err()) + "\n"); if (msg.err() != _exp_err) Test::Fail("Delivery report: Expected " + RdKafka::err2str(_exp_err) + @@ -63,12 +64,11 @@ class DrCb : public RdKafka::DeliveryReportCb { private: RdKafka::ErrorCode _exp_err; }; -}; - -static void do_test_producer (bool timeout_too_short) { +}; // namespace - Test::Say(tostr() << _C_MAG << "[ Test with timeout_too_short=" << - (timeout_too_short ? "true" : "false") << " ]\n"); +static void do_test_producer(bool timeout_too_short) { + Test::Say(tostr() << _C_MAG << "[ Test with timeout_too_short=" + << (timeout_too_short ? "true" : "false") << " ]\n"); std::string topic = Test::mk_topic_name("0110-delay_create_topics", 1); @@ -83,8 +83,8 @@ static void do_test_producer (bool timeout_too_short) { Test::Fail(errstr); } - DrCb dr_cb(timeout_too_short ? - RdKafka::ERR_UNKNOWN_TOPIC_OR_PART : RdKafka::ERR_NO_ERROR); + DrCb dr_cb(timeout_too_short ? RdKafka::ERR_UNKNOWN_TOPIC_OR_PART + : RdKafka::ERR_NO_ERROR); conf->set("dr_cb", &dr_cb, errstr); RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); @@ -93,15 +93,13 @@ static void do_test_producer (bool timeout_too_short) { delete conf; /* Produce a message to the yet non-existent topic. */ - RdKafka::ErrorCode err = p->produce(topic, RdKafka::Topic::PARTITION_UA, - RdKafka::Producer::RK_MSG_COPY, - (void *)"hello", 5, - "hi", 2, - 0, NULL, NULL); + RdKafka::ErrorCode err = p->produce( + topic, RdKafka::Topic::PARTITION_UA, RdKafka::Producer::RK_MSG_COPY, + (void *)"hello", 5, "hi", 2, 0, NULL, NULL); if (err) Test::Fail(tostr() << "produce failed: " << RdKafka::err2str(err)); - int delay = 5; + int delay = 5; int64_t end_wait = test_clock() + (delay * 1000000); while (test_clock() < end_wait) @@ -109,21 +107,21 @@ static void do_test_producer (bool timeout_too_short) { Test::create_topic(NULL, topic.c_str(), 1, 3); - p->flush(10*1000); + p->flush(10 * 1000); if (!dr_cb.ok) Test::Fail("Did not get delivery report for message"); delete p; - Test::Say(tostr() << _C_GRN << "[ Test with timeout_too_short=" << - (timeout_too_short ? "true" : "false") << ": PASS ]\n"); + Test::Say(tostr() << _C_GRN << "[ Test with timeout_too_short=" + << (timeout_too_short ? "true" : "false") << ": PASS ]\n"); } extern "C" { - int main_0111_delay_create_topics (int argc, char **argv) { - do_test_producer(false); - do_test_producer(true); - return 0; - } +int main_0111_delay_create_topics(int argc, char **argv) { + do_test_producer(false); + do_test_producer(true); + return 0; +} } diff --git a/tests/0112-assign_unknown_part.c b/tests/0112-assign_unknown_part.c index b46f2ada08..d945a2c32c 100644 --- a/tests/0112-assign_unknown_part.c +++ b/tests/0112-assign_unknown_part.c @@ -37,10 +37,10 @@ * See #2915. */ -int main_0112_assign_unknown_part (int argc, char **argv) { +int main_0112_assign_unknown_part(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 1); - int64_t offset = RD_KAFKA_OFFSET_BEGINNING; - uint64_t testid = test_id_generate(); + int64_t offset = RD_KAFKA_OFFSET_BEGINNING; + uint64_t testid = test_id_generate(); rd_kafka_t *c; rd_kafka_topic_partition_list_t *tpl; int r; @@ -52,7 +52,7 @@ int main_0112_assign_unknown_part (int argc, char **argv) { TEST_SAY("Creating topic %s with 1 partition\n", topic); test_create_topic(c, topic, 1, 1); - test_wait_topic_exists(c, topic, 10*1000); + test_wait_topic_exists(c, topic, 10 * 1000); TEST_SAY("Producing message to partition 0\n"); test_produce_msgs_easy(topic, testid, 0, 1); @@ -73,8 +73,10 @@ int main_0112_assign_unknown_part (int argc, char **argv) { * which causes the produce to fail. * Loop until the partition count is correct. */ while ((r = test_get_partition_count(c, topic, 5000)) != 2) { - TEST_SAY("Waiting for %s partition count to reach 2, " - "currently %d\n", topic, r); + TEST_SAY( + "Waiting for %s partition count to reach 2, " + "currently %d\n", + topic, r); rd_sleep(1); } diff --git a/tests/0113-cooperative_rebalance.cpp b/tests/0113-cooperative_rebalance.cpp index 7fd217cdd1..1af06363ae 100644 --- a/tests/0113-cooperative_rebalance.cpp +++ b/tests/0113-cooperative_rebalance.cpp @@ -44,24 +44,26 @@ using namespace std; /** Topic+Partition helper class */ class Toppar { -public: - Toppar(const string &topic, int32_t partition): - topic(topic), partition(partition) { } + public: + Toppar(const string &topic, int32_t partition) : + topic(topic), partition(partition) { + } - Toppar(const RdKafka::TopicPartition *tp): - topic(tp->topic()), partition(tp->partition()) {} + Toppar(const RdKafka::TopicPartition *tp) : + topic(tp->topic()), partition(tp->partition()) { + } - friend bool operator== (const Toppar &a, const Toppar &b) { + friend bool operator==(const Toppar &a, const Toppar &b) { return a.partition == b.partition && a.topic == b.topic; } - friend bool operator< (const Toppar &a, const Toppar &b) { + friend bool operator<(const Toppar &a, const Toppar &b) { if (a.partition < b.partition) return true; return a.topic < b.topic; } - string str () const { + string str() const { return tostr() << topic << "[" << partition << "]"; } @@ -83,7 +85,7 @@ static std::string get_bootstrap_servers() { class DrCb : public RdKafka::DeliveryReportCb { public: - void dr_cb (RdKafka::Message &msg) { + void dr_cb(RdKafka::Message &msg) { if (msg.err()) Test::Fail("Delivery failed: " + RdKafka::err2str(msg.err())); } @@ -96,8 +98,7 @@ class DrCb : public RdKafka::DeliveryReportCb { * The pair is Toppar,msg_cnt_per_partition. * The Toppar is topic,partition_cnt. */ -static void produce_msgs (vector > partitions) { - +static void produce_msgs(vector > partitions) { RdKafka::Conf *conf; Test::conf_init(&conf, NULL, 0); @@ -109,18 +110,15 @@ static void produce_msgs (vector > partitions) { Test::Fail("Failed to create producer: " + errstr); delete conf; - for (vector >::iterator it = partitions.begin() ; - it != partitions.end() ; it++) { - for (int part = 0 ; part < it->first.partition ; part++) { - for (int i = 0 ; i < it->second ; i++) { - RdKafka::ErrorCode err = p->produce(it->first.topic, part, - RdKafka::Producer::RK_MSG_COPY, - (void *)"Hello there", 11, - NULL, 0, - 0, NULL); - TEST_ASSERT(!err, "produce(%s, %d) failed: %s", - it->first.topic.c_str(), part, - RdKafka::err2str(err).c_str()); + for (vector >::iterator it = partitions.begin(); + it != partitions.end(); it++) { + for (int part = 0; part < it->first.partition; part++) { + for (int i = 0; i < it->second; i++) { + RdKafka::ErrorCode err = + p->produce(it->first.topic, part, RdKafka::Producer::RK_MSG_COPY, + (void *)"Hello there", 11, NULL, 0, 0, NULL); + TEST_ASSERT(!err, "produce(%s, %d) failed: %s", it->first.topic.c_str(), + part, RdKafka::err2str(err).c_str()); p->poll(0); } @@ -134,14 +132,13 @@ static void produce_msgs (vector > partitions) { -static RdKafka::KafkaConsumer * -make_consumer (string client_id, - string group_id, - string assignment_strategy, - vector > *additional_conf, - RdKafka::RebalanceCb *rebalance_cb, - int timeout_s) { - +static RdKafka::KafkaConsumer *make_consumer( + string client_id, + string group_id, + string assignment_strategy, + vector > *additional_conf, + RdKafka::RebalanceCb *rebalance_cb, + int timeout_s) { std::string bootstraps; std::string errstr; std::vector >::iterator itr; @@ -154,7 +151,8 @@ make_consumer (string client_id, Test::conf_set(conf, "enable.auto.commit", "false"); Test::conf_set(conf, "partition.assignment.strategy", assignment_strategy); if (additional_conf != NULL) { - for (itr = (*additional_conf).begin(); itr != (*additional_conf).end(); itr++) + for (itr = (*additional_conf).begin(); itr != (*additional_conf).end(); + itr++) Test::conf_set(conf, itr->first, itr->second); } @@ -162,7 +160,8 @@ make_consumer (string client_id, if (conf->set("rebalance_cb", rebalance_cb, errstr)) Test::Fail("Failed to set rebalance_cb: " + errstr); } - RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr); + RdKafka::KafkaConsumer *consumer = + RdKafka::KafkaConsumer::create(conf, errstr); if (!consumer) Test::Fail("Failed to create KafkaConsumer: " + errstr); delete conf; @@ -173,30 +172,30 @@ make_consumer (string client_id, /** * @returns a CSV string of the vector */ -static string string_vec_to_str (const vector &v) { +static string string_vec_to_str(const vector &v) { ostringstream ss; - for (vector::const_iterator it = v.begin(); - it != v.end(); - it++) + for (vector::const_iterator it = v.begin(); it != v.end(); it++) ss << (it == v.begin() ? "" : ", ") << *it; return ss.str(); } void expect_assignment(RdKafka::KafkaConsumer *consumer, size_t count) { - std::vector partitions; + std::vector partitions; RdKafka::ErrorCode err; err = consumer->assignment(partitions); if (err) - Test::Fail(consumer->name() + " assignment() failed: " + - RdKafka::err2str(err)); + Test::Fail(consumer->name() + + " assignment() failed: " + RdKafka::err2str(err)); if (partitions.size() != count) - Test::Fail(tostr() << "Expecting consumer " << consumer->name() << " to have " << count << " assigned partition(s), not: " << partitions.size()); + Test::Fail(tostr() << "Expecting consumer " << consumer->name() + << " to have " << count + << " assigned partition(s), not: " << partitions.size()); RdKafka::TopicPartition::destroy(partitions); } -static bool TopicPartition_cmp (const RdKafka::TopicPartition *a, - const RdKafka::TopicPartition *b) { +static bool TopicPartition_cmp(const RdKafka::TopicPartition *a, + const RdKafka::TopicPartition *b) { if (a->topic() < b->topic()) return true; else if (a->topic() > b->topic()) @@ -205,34 +204,33 @@ static bool TopicPartition_cmp (const RdKafka::TopicPartition *a, } -void expect_assignment (RdKafka::KafkaConsumer *consumer, - vector &expected) { - vector partitions; +void expect_assignment(RdKafka::KafkaConsumer *consumer, + vector &expected) { + vector partitions; RdKafka::ErrorCode err; err = consumer->assignment(partitions); if (err) - Test::Fail(consumer->name() + " assignment() failed: " + - RdKafka::err2str(err)); + Test::Fail(consumer->name() + + " assignment() failed: " + RdKafka::err2str(err)); if (partitions.size() != expected.size()) - Test::Fail(tostr() << "Expecting consumer " << consumer->name() << - " to have " << expected.size() << - " assigned partition(s), not " << partitions.size()); + Test::Fail(tostr() << "Expecting consumer " << consumer->name() + << " to have " << expected.size() + << " assigned partition(s), not " << partitions.size()); sort(partitions.begin(), partitions.end(), TopicPartition_cmp); sort(expected.begin(), expected.end(), TopicPartition_cmp); int fails = 0; - for (int i = 0 ; i < (int)partitions.size() ; i++) { + for (int i = 0; i < (int)partitions.size(); i++) { if (!TopicPartition_cmp(partitions[i], expected[i])) continue; - Test::Say(tostr() << _C_RED << consumer->name() << - ": expected assignment #" << i << " " << - expected[i]->topic() << - " [" << expected[i]->partition() << "], not " << - partitions[i]->topic() << - " [" << partitions[i]->partition() << "]\n"); + Test::Say(tostr() << _C_RED << consumer->name() << ": expected assignment #" + << i << " " << expected[i]->topic() << " [" + << expected[i]->partition() << "], not " + << partitions[i]->topic() << " [" + << partitions[i]->partition() << "]\n"); fails++; } @@ -244,46 +242,44 @@ void expect_assignment (RdKafka::KafkaConsumer *consumer, class DefaultRebalanceCb : public RdKafka::RebalanceCb { - -private: - - static string part_list_print (const vector - &partitions) { + private: + static string part_list_print( + const vector &partitions) { ostringstream ss; - for (unsigned int i = 0 ; i < partitions.size() ; i++) - ss << (i == 0 ? "" : ", ") << - partitions[i]->topic() << " [" << partitions[i]->partition() << "]"; + for (unsigned int i = 0; i < partitions.size(); i++) + ss << (i == 0 ? "" : ", ") << partitions[i]->topic() << " [" + << partitions[i]->partition() << "]"; return ss.str(); } -public: - + public: int assign_call_cnt; int revoke_call_cnt; int nonempty_assign_call_cnt; /**< ASSIGN_PARTITIONS with partitions */ int lost_call_cnt; int partitions_assigned_net; bool wait_rebalance; - int64_t ts_last_assign; /**< Timestamp of last rebalance assignment */ - map msg_cnt; /**< Number of consumed messages per partition. */ + int64_t ts_last_assign; /**< Timestamp of last rebalance assignment */ + map msg_cnt; /**< Number of consumed messages per partition. */ - ~DefaultRebalanceCb () { + ~DefaultRebalanceCb() { reset_msg_cnt(); } - DefaultRebalanceCb (): - assign_call_cnt(0), - revoke_call_cnt(0), - nonempty_assign_call_cnt(0), - lost_call_cnt(0), - partitions_assigned_net(0), - wait_rebalance(false), - ts_last_assign(0) { } + DefaultRebalanceCb() : + assign_call_cnt(0), + revoke_call_cnt(0), + nonempty_assign_call_cnt(0), + lost_call_cnt(0), + partitions_assigned_net(0), + wait_rebalance(false), + ts_last_assign(0) { + } - void rebalance_cb (RdKafka::KafkaConsumer *consumer, - RdKafka::ErrorCode err, - std::vector &partitions) { + void rebalance_cb(RdKafka::KafkaConsumer *consumer, + RdKafka::ErrorCode err, + std::vector &partitions) { wait_rebalance = false; std::string protocol = consumer->rebalance_protocol(); @@ -293,18 +289,18 @@ class DefaultRebalanceCb : public RdKafka::RebalanceCb { consumer->name().c_str(), protocol.c_str()); const char *lost_str = consumer->assignment_lost() ? " (LOST)" : ""; - Test::Say(tostr() << _C_YEL "RebalanceCb " << protocol << - ": " << consumer->name() << - " " << RdKafka::err2str(err) << lost_str << ": " << - part_list_print(partitions) << "\n"); + Test::Say(tostr() << _C_YEL "RebalanceCb " << protocol << ": " + << consumer->name() << " " << RdKafka::err2str(err) + << lost_str << ": " << part_list_print(partitions) + << "\n"); if (err == RdKafka::ERR__ASSIGN_PARTITIONS) { if (consumer->assignment_lost()) Test::Fail("unexpected lost assignment during ASSIGN rebalance"); RdKafka::Error *error = consumer->incremental_assign(partitions); if (error) - Test::Fail(tostr() << "consumer->incremental_assign() failed: " << - error->str()); + Test::Fail(tostr() << "consumer->incremental_assign() failed: " + << error->str()); if (partitions.size() > 0) nonempty_assign_call_cnt++; assign_call_cnt += 1; @@ -316,8 +312,8 @@ class DefaultRebalanceCb : public RdKafka::RebalanceCb { lost_call_cnt += 1; RdKafka::Error *error = consumer->incremental_unassign(partitions); if (error) - Test::Fail(tostr() << "consumer->incremental_unassign() failed: " << - error->str()); + Test::Fail(tostr() << "consumer->incremental_unassign() failed: " + << error->str()); if (partitions.size() == 0) Test::Fail("revoked partitions size should never be 0"); revoke_call_cnt += 1; @@ -329,47 +325,44 @@ class DefaultRebalanceCb : public RdKafka::RebalanceCb { reset_msg_cnt(partitions); } - bool poll_once (RdKafka::KafkaConsumer *c, int timeout_ms) { + bool poll_once(RdKafka::KafkaConsumer *c, int timeout_ms) { RdKafka::Message *msg = c->consume(timeout_ms); - bool ret = msg->err() != RdKafka::ERR__TIMED_OUT; + bool ret = msg->err() != RdKafka::ERR__TIMED_OUT; if (!msg->err()) msg_cnt[Toppar(msg->topic_name(), msg->partition())]++; delete msg; return ret; } - void reset_msg_cnt () { + void reset_msg_cnt() { msg_cnt.clear(); } - void reset_msg_cnt (Toppar &tp) { + void reset_msg_cnt(Toppar &tp) { int msgcnt = get_msg_cnt(tp); - Test::Say(tostr() << " RESET " << tp.topic << " [" << tp.partition << "]" - << " with " << msgcnt << " messages\n"); + Test::Say(tostr() << " RESET " << tp.topic << " [" << tp.partition << "]" + << " with " << msgcnt << " messages\n"); if (!msg_cnt.erase(tp) && msgcnt) Test::Fail("erase failed!"); - } - void reset_msg_cnt (const vector &partitions) { - for (unsigned int i = 0 ; i < partitions.size() ; i++) { + void reset_msg_cnt(const vector &partitions) { + for (unsigned int i = 0; i < partitions.size(); i++) { Toppar tp(partitions[i]->topic(), partitions[i]->partition()); reset_msg_cnt(tp); } } - int get_msg_cnt (const Toppar &tp) { - map::iterator it = msg_cnt.find(tp); + int get_msg_cnt(const Toppar &tp) { + map::iterator it = msg_cnt.find(tp); if (it == msg_cnt.end()) return 0; return it->second; } - }; - /** * @brief Verify that the consumer's assignment is a subset of the * subscribed topics. @@ -388,53 +381,52 @@ class DefaultRebalanceCb : public RdKafka::RebalanceCb { * assignment is empty or there is an assignment for * topic that is not subscribed. */ -static int verify_consumer_assignment (RdKafka::KafkaConsumer *consumer, - DefaultRebalanceCb &rebalance_cb, - const vector &topics, - bool allow_empty, - bool allow_mismatch, - map - *all_assignments, - int exp_msg_cnt) { - vector partitions; +static int verify_consumer_assignment( + RdKafka::KafkaConsumer *consumer, + DefaultRebalanceCb &rebalance_cb, + const vector &topics, + bool allow_empty, + bool allow_mismatch, + map *all_assignments, + int exp_msg_cnt) { + vector partitions; RdKafka::ErrorCode err; int fails = 0; int count; ostringstream ss; err = consumer->assignment(partitions); - TEST_ASSERT(!err, - "Failed to get assignment for consumer %s: %s", - consumer->name().c_str(), - RdKafka::err2str(err).c_str()); + TEST_ASSERT(!err, "Failed to get assignment for consumer %s: %s", + consumer->name().c_str(), RdKafka::err2str(err).c_str()); count = (int)partitions.size(); - for (vector::iterator it = partitions.begin() ; - it != partitions.end() ; it++) { + for (vector::iterator it = partitions.begin(); + it != partitions.end(); it++) { RdKafka::TopicPartition *p = *it; if (find(topics.begin(), topics.end(), p->topic()) == topics.end()) { - Test::Say(tostr() << - (allow_mismatch ? _C_YEL "Warning (allowed)" : _C_RED "Error") - << ": " << consumer->name() << " is assigned " - << p->topic() << " [" << p->partition() << "] which is " - << "not in the list of subscribed topics: " << - string_vec_to_str(topics) << "\n"); + Test::Say(tostr() << (allow_mismatch ? _C_YEL "Warning (allowed)" + : _C_RED "Error") + << ": " << consumer->name() << " is assigned " + << p->topic() << " [" << p->partition() << "] which is " + << "not in the list of subscribed topics: " + << string_vec_to_str(topics) << "\n"); if (!allow_mismatch) fails++; } Toppar tp(p); - pair::iterator,bool> ret; - ret = all_assignments->insert(pair(tp, consumer)); + pair::iterator, bool> ret; + ret = all_assignments->insert( + pair(tp, consumer)); if (!ret.second) { - Test::Say(tostr() << _C_RED << "Error: " - << consumer->name() << " is assigned " - << p->topic() << " [" << p->partition() << "] which is " - "already assigned to consumer " << - ret.first->second->name() << "\n"); + Test::Say(tostr() << _C_RED << "Error: " << consumer->name() + << " is assigned " << p->topic() << " [" + << p->partition() + << "] which is " + "already assigned to consumer " + << ret.first->second->name() << "\n"); fails++; } @@ -442,51 +434,48 @@ static int verify_consumer_assignment (RdKafka::KafkaConsumer *consumer, int msg_cnt = rebalance_cb.get_msg_cnt(tp); if (exp_msg_cnt != -1 && msg_cnt != exp_msg_cnt) { - Test::Say(tostr() << _C_RED << "Error: " - << consumer->name() << " expected " << exp_msg_cnt << - " messages on " << - p->topic() << " [" << p->partition() << "], not " << - msg_cnt << "\n"); + Test::Say(tostr() << _C_RED << "Error: " << consumer->name() + << " expected " << exp_msg_cnt << " messages on " + << p->topic() << " [" << p->partition() << "], not " + << msg_cnt << "\n"); fails++; } - ss << (it == partitions.begin() ? "" : ", ") << p->topic() << - " [" << p->partition() << "] (" << msg_cnt << "msgs)"; + ss << (it == partitions.begin() ? "" : ", ") << p->topic() << " [" + << p->partition() << "] (" << msg_cnt << "msgs)"; } RdKafka::TopicPartition::destroy(partitions); - Test::Say(tostr() << "Consumer " << consumer->name() << - " assignment (" << count << "): " << ss.str() << "\n"); + Test::Say(tostr() << "Consumer " << consumer->name() << " assignment (" + << count << "): " << ss.str() << "\n"); if (count == 0 && !allow_empty) Test::Fail("Consumer " + consumer->name() + " has unexpected empty assignment"); if (fails) - Test::Fail(tostr() << "Consumer " + consumer->name() << - " assignment verification failed (see previous error)"); + Test::Fail( + tostr() << "Consumer " + consumer->name() + << " assignment verification failed (see previous error)"); return count; } - - - - /* -------- a_assign_tests * - * check behavior incremental assign / unassign outside the context of a rebalance. + * check behavior incremental assign / unassign outside the context of a + * rebalance. */ /** Incremental assign, then assign(NULL). */ -static void assign_test_1 (RdKafka::KafkaConsumer *consumer, - std::vector toppars1, - std::vector toppars2) { +static void assign_test_1(RdKafka::KafkaConsumer *consumer, + std::vector toppars1, + std::vector toppars2) { RdKafka::ErrorCode err; RdKafka::Error *error; @@ -504,9 +493,9 @@ static void assign_test_1 (RdKafka::KafkaConsumer *consumer, /** Assign, then incremental unassign. */ -static void assign_test_2 (RdKafka::KafkaConsumer *consumer, - std::vector toppars1, - std::vector toppars2) { +static void assign_test_2(RdKafka::KafkaConsumer *consumer, + std::vector toppars1, + std::vector toppars2) { RdKafka::ErrorCode err; RdKafka::Error *error; @@ -524,9 +513,9 @@ static void assign_test_2 (RdKafka::KafkaConsumer *consumer, /** Incremental assign, then incremental unassign. */ -static void assign_test_3 (RdKafka::KafkaConsumer *consumer, - std::vector toppars1, - std::vector toppars2) { +static void assign_test_3(RdKafka::KafkaConsumer *consumer, + std::vector toppars1, + std::vector toppars2) { RdKafka::Error *error; Test::Say("Incremental assign, then incremental unassign\n"); @@ -543,12 +532,13 @@ static void assign_test_3 (RdKafka::KafkaConsumer *consumer, /** Multi-topic incremental assign and unassign + message consumption. */ -static void assign_test_4 (RdKafka::KafkaConsumer *consumer, - std::vector toppars1, - std::vector toppars2) { +static void assign_test_4(RdKafka::KafkaConsumer *consumer, + std::vector toppars1, + std::vector toppars2) { RdKafka::Error *error; - Test::Say("Multi-topic incremental assign and unassign + message consumption\n"); + Test::Say( + "Multi-topic incremental assign and unassign + message consumption\n"); if ((error = consumer->incremental_assign(toppars1))) Test::Fail("Incremental assign failed: " + error->str()); @@ -558,7 +548,8 @@ static void assign_test_4 (RdKafka::KafkaConsumer *consumer, if (m->err() != RdKafka::ERR_NO_ERROR) Test::Fail("Expecting a consumed message."); if (m->len() != 100) - Test::Fail(tostr() << "Expecting msg len to be 100, not: " << m->len()); /* implies read from topic 1. */ + Test::Fail(tostr() << "Expecting msg len to be 100, not: " + << m->len()); /* implies read from topic 1. */ delete m; if ((error = consumer->incremental_unassign(toppars1))) @@ -578,13 +569,15 @@ static void assign_test_4 (RdKafka::KafkaConsumer *consumer, if (m->err() != RdKafka::ERR_NO_ERROR) Test::Fail("Expecting a consumed message."); if (m->len() != 200) - Test::Fail(tostr() << "Expecting msg len to be 200, not: " << m->len()); /* implies read from topic 2. */ + Test::Fail(tostr() << "Expecting msg len to be 200, not: " + << m->len()); /* implies read from topic 2. */ delete m; if ((error = consumer->incremental_assign(toppars1))) Test::Fail("Incremental assign failed: " + error->str()); if (Test::assignment_partition_count(consumer, NULL) != 2) - Test::Fail(tostr() << "Expecting current assignment to have size 2, not: " << Test::assignment_partition_count(consumer, NULL)); + Test::Fail(tostr() << "Expecting current assignment to have size 2, not: " + << Test::assignment_partition_count(consumer, NULL)); m = consumer->consume(5000); if (m->err() != RdKafka::ERR_NO_ERROR) @@ -601,9 +594,9 @@ static void assign_test_4 (RdKafka::KafkaConsumer *consumer, /** Incremental assign and unassign of empty collection. */ -static void assign_test_5 (RdKafka::KafkaConsumer *consumer, - std::vector toppars1, - std::vector toppars2) { +static void assign_test_5(RdKafka::KafkaConsumer *consumer, + std::vector toppars1, + std::vector toppars2) { RdKafka::Error *error; std::vector toppars3; @@ -620,53 +613,52 @@ static void assign_test_5 (RdKafka::KafkaConsumer *consumer, +static void run_test( + const std::string &t1, + const std::string &t2, + void (*test)(RdKafka::KafkaConsumer *consumer, + std::vector toppars1, + std::vector toppars2)) { + std::vector toppars1; + toppars1.push_back(RdKafka::TopicPartition::create(t1, 0)); + std::vector toppars2; + toppars2.push_back(RdKafka::TopicPartition::create(t2, 0)); -static void -run_test (const std::string &t1, const std::string &t2, - void (*test)(RdKafka::KafkaConsumer *consumer, - std::vector toppars1, - std::vector toppars2)) { - std::vector toppars1; - toppars1.push_back(RdKafka::TopicPartition::create(t1, 0)); - std::vector toppars2; - toppars2.push_back(RdKafka::TopicPartition::create(t2, 0)); + RdKafka::KafkaConsumer *consumer = + make_consumer("C_1", t1, "cooperative-sticky", NULL, NULL, 10); - RdKafka::KafkaConsumer *consumer = make_consumer("C_1", t1, - "cooperative-sticky", - NULL, NULL, 10); + test(consumer, toppars1, toppars2); - test(consumer, toppars1, toppars2); - - RdKafka::TopicPartition::destroy(toppars1); - RdKafka::TopicPartition::destroy(toppars2); + RdKafka::TopicPartition::destroy(toppars1); + RdKafka::TopicPartition::destroy(toppars2); - consumer->close(); - delete consumer; + consumer->close(); + delete consumer; } -static void a_assign_tests () { - SUB_TEST_QUICK(); +static void a_assign_tests() { + SUB_TEST_QUICK(); - int msgcnt = 1000; - const int msgsize1 = 100; - const int msgsize2 = 200; + int msgcnt = 1000; + const int msgsize1 = 100; + const int msgsize2 = 200; - std::string topic1_str = Test::mk_topic_name("0113-a1", 1); - test_create_topic(NULL, topic1_str.c_str(), 1, 1); - std::string topic2_str = Test::mk_topic_name("0113-a2", 1); - test_create_topic(NULL, topic2_str.c_str(), 1, 1); + std::string topic1_str = Test::mk_topic_name("0113-a1", 1); + test_create_topic(NULL, topic1_str.c_str(), 1, 1); + std::string topic2_str = Test::mk_topic_name("0113-a2", 1); + test_create_topic(NULL, topic2_str.c_str(), 1, 1); - test_produce_msgs_easy_size(topic1_str.c_str(), 0, 0, msgcnt, msgsize1); - test_produce_msgs_easy_size(topic2_str.c_str(), 0, 0, msgcnt, msgsize2); + test_produce_msgs_easy_size(topic1_str.c_str(), 0, 0, msgcnt, msgsize1); + test_produce_msgs_easy_size(topic2_str.c_str(), 0, 0, msgcnt, msgsize2); - run_test(topic1_str, topic2_str, assign_test_1); - run_test(topic1_str, topic2_str, assign_test_2); - run_test(topic1_str, topic2_str, assign_test_3); - run_test(topic1_str, topic2_str, assign_test_4); - run_test(topic1_str, topic2_str, assign_test_5); + run_test(topic1_str, topic2_str, assign_test_1); + run_test(topic1_str, topic2_str, assign_test_2); + run_test(topic1_str, topic2_str, assign_test_3); + run_test(topic1_str, topic2_str, assign_test_4); + run_test(topic1_str, topic2_str, assign_test_5); - SUB_TEST_PASS(); + SUB_TEST_PASS(); } @@ -678,7 +670,7 @@ static void a_assign_tests () { * * Makes use of the mock cluster to induce latency. */ -static void a_assign_rapid () { +static void a_assign_rapid() { SUB_TEST_QUICK(); std::string group_id = __FUNCTION__; @@ -686,7 +678,7 @@ static void a_assign_rapid () { rd_kafka_mock_cluster_t *mcluster; const char *bootstraps; - mcluster = test_mock_cluster_new(3, &bootstraps); + mcluster = test_mock_cluster_new(3, &bootstraps); int32_t coord_id = 1; rd_kafka_mock_coordinator_set(mcluster, "group", group_id.c_str(), coord_id); @@ -705,14 +697,17 @@ static void a_assign_rapid () { std::string errstr; RdKafka::Producer *p = RdKafka::Producer::create(pconf, errstr); if (!p) - Test::Fail(tostr() << __FUNCTION__ << ": Failed to create producer: " << - errstr); + Test::Fail(tostr() << __FUNCTION__ + << ": Failed to create producer: " << errstr); delete pconf; - Test::produce_msgs(p, "topic1", 0, msgs_per_partition, 10, false/*no flush*/); - Test::produce_msgs(p, "topic2", 0, msgs_per_partition, 10, false/*no flush*/); - Test::produce_msgs(p, "topic3", 0, msgs_per_partition, 10, false/*no flush*/); - p->flush(10*1000); + Test::produce_msgs(p, "topic1", 0, msgs_per_partition, 10, + false /*no flush*/); + Test::produce_msgs(p, "topic2", 0, msgs_per_partition, 10, + false /*no flush*/); + Test::produce_msgs(p, "topic3", 0, msgs_per_partition, 10, + false /*no flush*/); + p->flush(10 * 1000); delete p; @@ -735,14 +730,14 @@ static void a_assign_rapid () { RdKafka::KafkaConsumer *consumer; consumer = RdKafka::KafkaConsumer::create(conf, errstr); if (!consumer) - Test::Fail(tostr() << __FUNCTION__ << ": Failed to create consumer: " << - errstr); + Test::Fail(tostr() << __FUNCTION__ + << ": Failed to create consumer: " << errstr); delete conf; vector toppars; vector expected; - map pos; /* Expected consume position per partition */ + map pos; /* Expected consume position per partition */ pos[Toppar(toppars1[0]->topic(), toppars1[0]->partition())] = 0; pos[Toppar(toppars2[0]->topic(), toppars2[0]->partition())] = 0; pos[Toppar(toppars3[0]->topic(), toppars3[0]->partition())] = 0; @@ -751,15 +746,13 @@ static void a_assign_rapid () { * we commit an offset that should not be used in the final consume loop. * This commit will be overwritten below with another commit. */ vector offsets; - offsets.push_back(RdKafka::TopicPartition::create(toppars1[0]->topic(), - toppars1[0]->partition(), - 11)); + offsets.push_back(RdKafka::TopicPartition::create( + toppars1[0]->topic(), toppars1[0]->partition(), 11)); /* This partition should start at this position even though * there will be a sub-sequent commit to overwrite it, that should not * be used since this partition is never unassigned. */ - offsets.push_back(RdKafka::TopicPartition::create(toppars2[0]->topic(), - toppars2[0]->partition(), - 22)); + offsets.push_back(RdKafka::TopicPartition::create( + toppars2[0]->topic(), toppars2[0]->partition(), 22)); pos[Toppar(toppars2[0]->topic(), toppars2[0]->partition())] = 22; Test::print_TopicPartitions("pre-commit", offsets); @@ -767,8 +760,8 @@ static void a_assign_rapid () { RdKafka::ErrorCode err; err = consumer->commitSync(offsets); if (err) - Test::Fail(tostr() << __FUNCTION__ << ": pre-commit failed: " << - RdKafka::err2str(err) << "\n"); + Test::Fail(tostr() << __FUNCTION__ << ": pre-commit failed: " + << RdKafka::err2str(err) << "\n"); /* Add coordinator delay so that the OffsetFetchRequest originating * from the coming incremental_assign() will not finish before @@ -789,9 +782,8 @@ static void a_assign_rapid () { /* Unassign -1 == 2 */ toppars.clear(); toppars.push_back(toppars1[0]); - vector::iterator it = find(expected.begin(), - expected.end(), - toppars1[0]); + vector::iterator it = + find(expected.begin(), expected.end(), toppars1[0]); expected.erase(it); Test::incremental_unassign(consumer, toppars); @@ -801,20 +793,18 @@ static void a_assign_rapid () { /* Commit offset for the removed partition and the partition that is * unchanged in the assignment. */ RdKafka::TopicPartition::destroy(offsets); - offsets.push_back(RdKafka::TopicPartition::create(toppars1[0]->topic(), - toppars1[0]->partition(), - 55)); - offsets.push_back(RdKafka::TopicPartition::create(toppars2[0]->topic(), - toppars2[0]->partition(), - 33)); /* should not be - * used. */ + offsets.push_back(RdKafka::TopicPartition::create( + toppars1[0]->topic(), toppars1[0]->partition(), 55)); + offsets.push_back(RdKafka::TopicPartition::create( + toppars2[0]->topic(), toppars2[0]->partition(), 33)); /* should not be + * used. */ pos[Toppar(toppars1[0]->topic(), toppars1[0]->partition())] = 55; Test::print_TopicPartitions("commit", offsets); err = consumer->commitAsync(offsets); if (err) - Test::Fail(tostr() << __FUNCTION__ << ": commit failed: " << - RdKafka::err2str(err) << "\n"); + Test::Fail(tostr() << __FUNCTION__ + << ": commit failed: " << RdKafka::err2str(err) << "\n"); /* Assign +3 == 2,3 */ toppars.clear(); @@ -839,31 +829,33 @@ static void a_assign_rapid () { */ int wait_end = (int)expected.size(); while (wait_end > 0) { - RdKafka::Message *msg = consumer->consume(10*1000); + RdKafka::Message *msg = consumer->consume(10 * 1000); if (msg->err() == RdKafka::ERR__TIMED_OUT) - Test::Fail(tostr() << __FUNCTION__ << ": Consume timed out waiting " - "for " << wait_end << " more partitions"); + Test::Fail(tostr() << __FUNCTION__ + << ": Consume timed out waiting " + "for " + << wait_end << " more partitions"); - Toppar tp = Toppar(msg->topic_name(), msg->partition()); + Toppar tp = Toppar(msg->topic_name(), msg->partition()); int64_t *exp_pos = &pos[tp]; - Test::Say(3, tostr() << __FUNCTION__ << ": Received " << - tp.topic << " [" << tp.partition << "] at offset " << - msg->offset() << " (expected offset " << *exp_pos << ")\n"); + Test::Say(3, tostr() << __FUNCTION__ << ": Received " << tp.topic << " [" + << tp.partition << "] at offset " << msg->offset() + << " (expected offset " << *exp_pos << ")\n"); if (*exp_pos != msg->offset()) - Test::Fail(tostr() << __FUNCTION__ << ": expected message offset " << - *exp_pos << " for " << msg->topic_name() << - " [" << msg->partition() << "], not " << msg->offset() << - "\n"); + Test::Fail(tostr() << __FUNCTION__ << ": expected message offset " + << *exp_pos << " for " << msg->topic_name() << " [" + << msg->partition() << "], not " << msg->offset() + << "\n"); (*exp_pos)++; if (*exp_pos == msgs_per_partition) { TEST_ASSERT(wait_end > 0, ""); wait_end--; } else if (msg->offset() > msgs_per_partition) - Test::Fail(tostr() << __FUNCTION__ << ": unexpected message with " << - "offset " << msg->offset() << " on " << tp.topic << - " [" << tp.partition << "]\n"); + Test::Fail(tostr() << __FUNCTION__ << ": unexpected message with " + << "offset " << msg->offset() << " on " << tp.topic + << " [" << tp.partition << "]\n"); delete msg; } @@ -888,18 +880,21 @@ static void a_assign_rapid () { * 4. close. */ -static void b_subscribe_with_cb_test (rd_bool_t close_consumer) { +static void b_subscribe_with_cb_test(rd_bool_t close_consumer) { SUB_TEST(); std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); test_create_topic(NULL, topic_name.c_str(), 2, 1); DefaultRebalanceCb rebalance_cb1; - RdKafka::KafkaConsumer *c1 = make_consumer("C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb1, 25); + RdKafka::KafkaConsumer *c1 = make_consumer( + "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb1, 25); DefaultRebalanceCb rebalance_cb2; - RdKafka::KafkaConsumer *c2 = make_consumer("C_2", group_name, "cooperative-sticky", NULL, &rebalance_cb2, 25); - test_wait_topic_exists(c1->c_ptr(), topic_name.c_str(), 10*1000); + RdKafka::KafkaConsumer *c2 = make_consumer( + "C_2", group_name, "cooperative-sticky", NULL, &rebalance_cb2, 25); + test_wait_topic_exists(c1->c_ptr(), topic_name.c_str(), 10 * 1000); Test::subscribe(c1, topic_name); @@ -925,42 +920,56 @@ static void b_subscribe_with_cb_test (rd_bool_t close_consumer) { * * 1. c1 joins group. * 2. c1 gets assigned 2 partitions. - * - there isn't a follow-on rebalance because there aren't any revoked partitions. + * - there isn't a follow-on rebalance because there aren't any revoked + * partitions. * 3. c2 joins group. - * 4. This results in a rebalance with one partition being revoked from c1, and no - * partitions assigned to either c1 or c2 (however the rebalance callback will be - * called in each case with an empty set). + * 4. This results in a rebalance with one partition being revoked from c1, + * and no partitions assigned to either c1 or c2 (however the rebalance + * callback will be called in each case with an empty set). * 5. c1 then re-joins the group since it had a partition revoked. - * 6. c2 is now assigned a single partition, and c1's incremental assignment is empty. - * 7. Since there were no revoked partitions, no further rebalance is triggered. + * 6. c2 is now assigned a single partition, and c1's incremental assignment + * is empty. + * 7. Since there were no revoked partitions, no further rebalance is + * triggered. */ /* The rebalance cb is always called on assign, even if empty. */ if (rebalance_cb1.assign_call_cnt != 3) - Test::Fail(tostr() << "Expecting 3 assign calls on consumer 1, not " << rebalance_cb1.assign_call_cnt); + Test::Fail(tostr() << "Expecting 3 assign calls on consumer 1, not " + << rebalance_cb1.assign_call_cnt); if (rebalance_cb2.assign_call_cnt != 2) - Test::Fail(tostr() << "Expecting 2 assign calls on consumer 2, not: " << rebalance_cb2.assign_call_cnt); + Test::Fail(tostr() << "Expecting 2 assign calls on consumer 2, not: " + << rebalance_cb2.assign_call_cnt); - /* The rebalance cb is not called on and empty revoke (unless partitions lost, which is not the case here) */ + /* The rebalance cb is not called on and empty revoke (unless partitions lost, + * which is not the case here) */ if (rebalance_cb1.revoke_call_cnt != 1) - Test::Fail(tostr() << "Expecting 1 revoke call on consumer 1, not: " << rebalance_cb1.revoke_call_cnt); + Test::Fail(tostr() << "Expecting 1 revoke call on consumer 1, not: " + << rebalance_cb1.revoke_call_cnt); if (rebalance_cb2.revoke_call_cnt != 0) - Test::Fail(tostr() << "Expecting 0 revoke calls on consumer 2, not: " << rebalance_cb2.revoke_call_cnt); + Test::Fail(tostr() << "Expecting 0 revoke calls on consumer 2, not: " + << rebalance_cb2.revoke_call_cnt); /* Final state */ - /* Expect both consumers to have 1 assigned partition (via net calculation in rebalance_cb) */ + /* Expect both consumers to have 1 assigned partition (via net calculation in + * rebalance_cb) */ if (rebalance_cb1.partitions_assigned_net != 1) - Test::Fail(tostr() << "Expecting consumer 1 to have net 1 assigned partition, not: " << rebalance_cb1.partitions_assigned_net); + Test::Fail(tostr() + << "Expecting consumer 1 to have net 1 assigned partition, not: " + << rebalance_cb1.partitions_assigned_net); if (rebalance_cb2.partitions_assigned_net != 1) - Test::Fail(tostr() << "Expecting consumer 2 to have net 1 assigned partition, not: " << rebalance_cb2.partitions_assigned_net); + Test::Fail(tostr() + << "Expecting consumer 2 to have net 1 assigned partition, not: " + << rebalance_cb2.partitions_assigned_net); - /* Expect both consumers to have 1 assigned partition (via ->assignment() query) */ + /* Expect both consumers to have 1 assigned partition (via ->assignment() + * query) */ expect_assignment(c1, 1); expect_assignment(c2, 1); /* Make sure the fetchers are running */ - int msgcnt = 100; + int msgcnt = 100; const int msgsize1 = 100; test_produce_msgs_easy_size(topic_name.c_str(), 0, 0, msgcnt, msgsize1); test_produce_msgs_easy_size(topic_name.c_str(), 0, 1, msgcnt, msgsize1); @@ -995,21 +1004,33 @@ static void b_subscribe_with_cb_test (rd_bool_t close_consumer) { /* Closing the consumer should trigger rebalance_cb (revoke): */ if (rebalance_cb1.revoke_call_cnt != 2) - Test::Fail(tostr() << "Expecting 2 revoke calls on consumer 1, not: " << rebalance_cb1.revoke_call_cnt); + Test::Fail(tostr() << "Expecting 2 revoke calls on consumer 1, not: " + << rebalance_cb1.revoke_call_cnt); if (rebalance_cb2.revoke_call_cnt != 1) - Test::Fail(tostr() << "Expecting 1 revoke call on consumer 2, not: " << rebalance_cb2.revoke_call_cnt); + Test::Fail(tostr() << "Expecting 1 revoke call on consumer 2, not: " + << rebalance_cb2.revoke_call_cnt); /* ..and net assigned partitions should drop to 0 in both cases: */ if (rebalance_cb1.partitions_assigned_net != 0) - Test::Fail(tostr() << "Expecting consumer 1 to have net 0 assigned partitions, not: " << rebalance_cb1.partitions_assigned_net); + Test::Fail( + tostr() + << "Expecting consumer 1 to have net 0 assigned partitions, not: " + << rebalance_cb1.partitions_assigned_net); if (rebalance_cb2.partitions_assigned_net != 0) - Test::Fail(tostr() << "Expecting consumer 2 to have net 0 assigned partitions, not: " << rebalance_cb2.partitions_assigned_net); + Test::Fail( + tostr() + << "Expecting consumer 2 to have net 0 assigned partitions, not: " + << rebalance_cb2.partitions_assigned_net); /* Nothing in this test should result in lost partitions */ if (rebalance_cb1.lost_call_cnt > 0) - Test::Fail(tostr() << "Expecting consumer 1 to have 0 lost partition events, not: " << rebalance_cb1.lost_call_cnt); + Test::Fail( + tostr() << "Expecting consumer 1 to have 0 lost partition events, not: " + << rebalance_cb1.lost_call_cnt); if (rebalance_cb2.lost_call_cnt > 0) - Test::Fail(tostr() << "Expecting consumer 2 to have 0 lost partition events, not: " << rebalance_cb2.lost_call_cnt); + Test::Fail( + tostr() << "Expecting consumer 2 to have 0 lost partition events, not: " + << rebalance_cb2.lost_call_cnt); delete c1; delete c2; @@ -1026,21 +1047,24 @@ static void b_subscribe_with_cb_test (rd_bool_t close_consumer) { * 4. Close. */ -static void c_subscribe_no_cb_test (rd_bool_t close_consumer) { +static void c_subscribe_no_cb_test(rd_bool_t close_consumer) { SUB_TEST(); std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); test_create_topic(NULL, topic_name.c_str(), 2, 1); - RdKafka::KafkaConsumer *c1 = make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 20); - RdKafka::KafkaConsumer *c2 = make_consumer("C_2", group_name, "cooperative-sticky", NULL, NULL, 20); - test_wait_topic_exists(c1->c_ptr(), topic_name.c_str(), 10*1000); + RdKafka::KafkaConsumer *c1 = + make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 20); + RdKafka::KafkaConsumer *c2 = + make_consumer("C_2", group_name, "cooperative-sticky", NULL, NULL, 20); + test_wait_topic_exists(c1->c_ptr(), topic_name.c_str(), 10 * 1000); Test::subscribe(c1, topic_name); bool c2_subscribed = false; - bool done = false; + bool done = false; while (!done) { Test::poll_once(c1, 500); Test::poll_once(c2, 500); @@ -1080,28 +1104,33 @@ static void c_subscribe_no_cb_test (rd_bool_t close_consumer) { * 3. Consumer is closed. */ -static void d_change_subscription_add_topic (rd_bool_t close_consumer) { +static void d_change_subscription_add_topic(rd_bool_t close_consumer) { SUB_TEST(); - std::string topic_name_1 = Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); test_create_topic(NULL, topic_name_1.c_str(), 2, 1); - std::string topic_name_2 = Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_2 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); test_create_topic(NULL, topic_name_2.c_str(), 2, 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); - RdKafka::KafkaConsumer *c = make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15); - test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10*1000); - test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10*1000); + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000); Test::subscribe(c, topic_name_1); bool subscribed_to_one_topic = false; - bool done = false; + bool done = false; while (!done) { Test::poll_once(c, 500); - if (Test::assignment_partition_count(c, NULL) == 2 && !subscribed_to_one_topic) { + if (Test::assignment_partition_count(c, NULL) == 2 && + !subscribed_to_one_topic) { subscribed_to_one_topic = true; Test::subscribe(c, topic_name_1, topic_name_2); } @@ -1131,28 +1160,33 @@ static void d_change_subscription_add_topic (rd_bool_t close_consumer) { * 3. Consumer is closed. */ -static void e_change_subscription_remove_topic (rd_bool_t close_consumer) { +static void e_change_subscription_remove_topic(rd_bool_t close_consumer) { SUB_TEST(); - std::string topic_name_1 = Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); test_create_topic(NULL, topic_name_1.c_str(), 2, 1); - std::string topic_name_2 = Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_2 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); test_create_topic(NULL, topic_name_2.c_str(), 2, 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); - RdKafka::KafkaConsumer *c = make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15); - test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10*1000); - test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10*1000); + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000); Test::subscribe(c, topic_name_1, topic_name_2); bool subscribed_to_two_topics = false; - bool done = false; + bool done = false; while (!done) { Test::poll_once(c, 500); - if (Test::assignment_partition_count(c, NULL) == 4 && !subscribed_to_two_topics) { + if (Test::assignment_partition_count(c, NULL) == 4 && + !subscribed_to_two_topics) { subscribed_to_two_topics = true; Test::subscribe(c, topic_name_1); } @@ -1176,62 +1210,73 @@ static void e_change_subscription_remove_topic (rd_bool_t close_consumer) { -/* Check that use of consumer->assign() and consumer->unassign() is disallowed when a - * COOPERATIVE assignor is in use. +/* Check that use of consumer->assign() and consumer->unassign() is disallowed + * when a COOPERATIVE assignor is in use. */ class FTestRebalanceCb : public RdKafka::RebalanceCb { -public: + public: rd_bool_t assigned; - FTestRebalanceCb () { + FTestRebalanceCb() { assigned = rd_false; } - void rebalance_cb (RdKafka::KafkaConsumer *consumer, - RdKafka::ErrorCode err, - std::vector &partitions) { - Test::Say(tostr() << "RebalanceCb: " << consumer->name() << " " << RdKafka::err2str(err) << "\n"); + void rebalance_cb(RdKafka::KafkaConsumer *consumer, + RdKafka::ErrorCode err, + std::vector &partitions) { + Test::Say(tostr() << "RebalanceCb: " << consumer->name() << " " + << RdKafka::err2str(err) << "\n"); if (err == RdKafka::ERR__ASSIGN_PARTITIONS) { RdKafka::ErrorCode err_resp = consumer->assign(partitions); - Test::Say(tostr() << "consumer->assign() response code: " << err_resp << "\n"); + Test::Say(tostr() << "consumer->assign() response code: " << err_resp + << "\n"); if (err_resp != RdKafka::ERR__STATE) - Test::Fail(tostr() << "Expected assign to fail with error code: " << RdKafka::ERR__STATE << "(ERR__STATE)"); + Test::Fail(tostr() << "Expected assign to fail with error code: " + << RdKafka::ERR__STATE << "(ERR__STATE)"); RdKafka::Error *error = consumer->incremental_assign(partitions); if (error) - Test::Fail(tostr() << "consumer->incremental_unassign() failed: " << error->str()); + Test::Fail(tostr() << "consumer->incremental_unassign() failed: " + << error->str()); assigned = rd_true; } else { RdKafka::ErrorCode err_resp = consumer->unassign(); - Test::Say(tostr() << "consumer->unassign() response code: " << err_resp << "\n"); + Test::Say(tostr() << "consumer->unassign() response code: " << err_resp + << "\n"); if (err_resp != RdKafka::ERR__STATE) - Test::Fail(tostr() << "Expected assign to fail with error code: " << RdKafka::ERR__STATE << "(ERR__STATE)"); + Test::Fail(tostr() << "Expected assign to fail with error code: " + << RdKafka::ERR__STATE << "(ERR__STATE)"); RdKafka::Error *error = consumer->incremental_unassign(partitions); if (error) - Test::Fail(tostr() << "consumer->incremental_unassign() failed: " << error->str()); + Test::Fail(tostr() << "consumer->incremental_unassign() failed: " + << error->str()); } } }; -static void f_assign_call_cooperative () { +static void f_assign_call_cooperative() { SUB_TEST(); std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); test_create_topic(NULL, topic_name.c_str(), 1, 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); std::vector > additional_conf; - additional_conf.push_back(std::pair(std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); FTestRebalanceCb rebalance_cb; - RdKafka::KafkaConsumer *c = make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, &rebalance_cb, 15); - test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10*1000); + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb, 15); + test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000); Test::subscribe(c, topic_name); @@ -1246,29 +1291,34 @@ static void f_assign_call_cooperative () { -/* Check that use of consumer->incremental_assign() and consumer->incremental_unassign() is - * disallowed when an EAGER assignor is in use. +/* Check that use of consumer->incremental_assign() and + * consumer->incremental_unassign() is disallowed when an EAGER assignor is in + * use. */ class GTestRebalanceCb : public RdKafka::RebalanceCb { -public: + public: rd_bool_t assigned; - GTestRebalanceCb () { + GTestRebalanceCb() { assigned = rd_false; } - void rebalance_cb (RdKafka::KafkaConsumer *consumer, - RdKafka::ErrorCode err, - std::vector &partitions) { - Test::Say(tostr() << "RebalanceCb: " << consumer->name() << " " << RdKafka::err2str(err) << "\n"); + void rebalance_cb(RdKafka::KafkaConsumer *consumer, + RdKafka::ErrorCode err, + std::vector &partitions) { + Test::Say(tostr() << "RebalanceCb: " << consumer->name() << " " + << RdKafka::err2str(err) << "\n"); if (err == RdKafka::ERR__ASSIGN_PARTITIONS) { RdKafka::Error *error = consumer->incremental_assign(partitions); - Test::Say(tostr() << "consumer->incremental_assign() response: " << (!error ? "NULL" : error->str()) << "\n"); + Test::Say(tostr() << "consumer->incremental_assign() response: " + << (!error ? "NULL" : error->str()) << "\n"); if (!error) Test::Fail("Expected consumer->incremental_assign() to fail"); if (error->code() != RdKafka::ERR__STATE) - Test::Fail(tostr() << "Expected consumer->incremental_assign() to fail with error code " << RdKafka::ERR__STATE); + Test::Fail(tostr() << "Expected consumer->incremental_assign() to fail " + "with error code " + << RdKafka::ERR__STATE); delete error; RdKafka::ErrorCode err_resp = consumer->assign(partitions); @@ -1279,11 +1329,14 @@ class GTestRebalanceCb : public RdKafka::RebalanceCb { } else { RdKafka::Error *error = consumer->incremental_unassign(partitions); - Test::Say(tostr() << "consumer->incremental_unassign() response: " << (!error ? "NULL" : error->str()) << "\n"); + Test::Say(tostr() << "consumer->incremental_unassign() response: " + << (!error ? "NULL" : error->str()) << "\n"); if (!error) Test::Fail("Expected consumer->incremental_unassign() to fail"); if (error->code() != RdKafka::ERR__STATE) - Test::Fail(tostr() << "Expected consumer->incremental_unassign() to fail with error code " << RdKafka::ERR__STATE); + Test::Fail(tostr() << "Expected consumer->incremental_unassign() to " + "fail with error code " + << RdKafka::ERR__STATE); delete error; RdKafka::ErrorCode err_resp = consumer->unassign(); @@ -1299,13 +1352,16 @@ static void g_incremental_assign_call_eager() { std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); test_create_topic(NULL, topic_name.c_str(), 1, 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); std::vector > additional_conf; - additional_conf.push_back(std::pair(std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); GTestRebalanceCb rebalance_cb; - RdKafka::KafkaConsumer *c = make_consumer("C_1", group_name, "roundrobin", &additional_conf, &rebalance_cb, 15); - test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10*1000); + RdKafka::KafkaConsumer *c = make_consumer( + "C_1", group_name, "roundrobin", &additional_conf, &rebalance_cb, 15); + test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000); Test::subscribe(c, topic_name); @@ -1326,44 +1382,54 @@ static void g_incremental_assign_call_eager() { * 3. Consumer is closed. */ -static void h_delete_topic () { +static void h_delete_topic() { SUB_TEST(); - std::string topic_name_1 = Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); test_create_topic(NULL, topic_name_1.c_str(), 1, 1); - std::string topic_name_2 = Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_2 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); test_create_topic(NULL, topic_name_2.c_str(), 1, 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); std::vector > additional_conf; - additional_conf.push_back(std::pair(std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); DefaultRebalanceCb rebalance_cb; - RdKafka::KafkaConsumer *c = make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, &rebalance_cb, 15); - test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10*1000); - test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10*1000); + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb, 15); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000); Test::subscribe(c, topic_name_1, topic_name_2); bool deleted = false; - bool done = false; + bool done = false; while (!done) { Test::poll_once(c, 500); - std::vector partitions; + std::vector partitions; c->assignment(partitions); if (partitions.size() == 2 && !deleted) { if (rebalance_cb.assign_call_cnt != 1) - Test::Fail(tostr() << "Expected 1 assign call, saw " << rebalance_cb.assign_call_cnt << "\n"); + Test::Fail(tostr() << "Expected 1 assign call, saw " + << rebalance_cb.assign_call_cnt << "\n"); Test::delete_topic(c, topic_name_2.c_str()); deleted = true; } if (partitions.size() == 1 && deleted) { if (partitions[0]->topic() != topic_name_1) - Test::Fail(tostr() << "Expecting subscribed topic to be '" << topic_name_1 << "' not '" << partitions[0]->topic() << "'"); - Test::Say(tostr() << "Assignment no longer includes deleted topic '" << topic_name_2 << "'\n"); + Test::Fail(tostr() << "Expecting subscribed topic to be '" + << topic_name_1 << "' not '" + << partitions[0]->topic() << "'"); + Test::Say(tostr() << "Assignment no longer includes deleted topic '" + << topic_name_2 << "'\n"); done = true; } @@ -1386,29 +1452,35 @@ static void h_delete_topic () { * 3. Consumer is closed. */ -static void i_delete_topic_2 () { +static void i_delete_topic_2() { SUB_TEST(); - std::string topic_name_1 = Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); test_create_topic(NULL, topic_name_1.c_str(), 1, 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); std::vector > additional_conf; - additional_conf.push_back(std::pair(std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); DefaultRebalanceCb rebalance_cb; - RdKafka::KafkaConsumer *c = make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, &rebalance_cb, 15); - test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10*1000); + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb, 15); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); Test::subscribe(c, topic_name_1); bool deleted = false; - bool done = false; + bool done = false; while (!done) { Test::poll_once(c, 500); if (Test::assignment_partition_count(c, NULL) == 1 && !deleted) { if (rebalance_cb.assign_call_cnt != 1) - Test::Fail(tostr() << "Expected one assign call, saw " << rebalance_cb.assign_call_cnt << "\n"); + Test::Fail(tostr() << "Expected one assign call, saw " + << rebalance_cb.assign_call_cnt << "\n"); Test::delete_topic(c, topic_name_1.c_str()); deleted = true; } @@ -1435,23 +1507,27 @@ static void i_delete_topic_2 () { * 3. consumer is closed. */ -static void j_delete_topic_no_rb_callback () { +static void j_delete_topic_no_rb_callback() { SUB_TEST(); - std::string topic_name_1 = Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); test_create_topic(NULL, topic_name_1.c_str(), 1, 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); std::vector > additional_conf; - additional_conf.push_back(std::pair(std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); - RdKafka::KafkaConsumer *c = make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, NULL, 15); - test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10*1000); + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + RdKafka::KafkaConsumer *c = make_consumer( + "C_1", group_name, "cooperative-sticky", &additional_conf, NULL, 15); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); Test::subscribe(c, topic_name_1); bool deleted = false; - bool done = false; + bool done = false; while (!done) { Test::poll_once(c, 500); @@ -1482,41 +1558,49 @@ static void j_delete_topic_no_rb_callback () { * 3. Consumer is closed. */ -static void k_add_partition () { +static void k_add_partition() { SUB_TEST(); std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); test_create_topic(NULL, topic_name.c_str(), 1, 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); std::vector > additional_conf; - additional_conf.push_back(std::pair(std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); DefaultRebalanceCb rebalance_cb; - RdKafka::KafkaConsumer *c = make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, &rebalance_cb, 15); - test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10*1000); + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb, 15); + test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000); Test::subscribe(c, topic_name); bool subscribed = false; - bool done = false; + bool done = false; while (!done) { Test::poll_once(c, 500); if (Test::assignment_partition_count(c, NULL) == 1 && !subscribed) { if (rebalance_cb.assign_call_cnt != 1) - Test::Fail(tostr() << "Expected 1 assign call, saw " << rebalance_cb.assign_call_cnt); + Test::Fail(tostr() << "Expected 1 assign call, saw " + << rebalance_cb.assign_call_cnt); if (rebalance_cb.revoke_call_cnt != 0) - Test::Fail(tostr() << "Expected 0 revoke calls, saw " << rebalance_cb.revoke_call_cnt); + Test::Fail(tostr() << "Expected 0 revoke calls, saw " + << rebalance_cb.revoke_call_cnt); Test::create_partitions(c, topic_name.c_str(), 2); subscribed = true; } if (Test::assignment_partition_count(c, NULL) == 2 && subscribed) { if (rebalance_cb.assign_call_cnt != 2) - Test::Fail(tostr() << "Expected 2 assign calls, saw " << rebalance_cb.assign_call_cnt); + Test::Fail(tostr() << "Expected 2 assign calls, saw " + << rebalance_cb.assign_call_cnt); if (rebalance_cb.revoke_call_cnt != 0) - Test::Fail(tostr() << "Expected 0 revoke calls, saw " << rebalance_cb.revoke_call_cnt); + Test::Fail(tostr() << "Expected 0 revoke calls, saw " + << rebalance_cb.revoke_call_cnt); done = true; } } @@ -1526,9 +1610,11 @@ static void k_add_partition () { delete c; if (rebalance_cb.assign_call_cnt != 2) - Test::Fail(tostr() << "Expected 2 assign calls, saw " << rebalance_cb.assign_call_cnt); + Test::Fail(tostr() << "Expected 2 assign calls, saw " + << rebalance_cb.assign_call_cnt); if (rebalance_cb.revoke_call_cnt != 1) - Test::Fail(tostr() << "Expected 1 revoke call, saw " << rebalance_cb.revoke_call_cnt); + Test::Fail(tostr() << "Expected 1 revoke call, saw " + << rebalance_cb.revoke_call_cnt); SUB_TEST_PASS(); } @@ -1541,51 +1627,74 @@ static void k_add_partition () { * 3. consumers closed. */ -static void l_unsubscribe () { +static void l_unsubscribe() { SUB_TEST(); - std::string topic_name_1 = Test::mk_topic_name("0113-cooperative_rebalance", 1); - std::string topic_name_2 = Test::mk_topic_name("0113-cooperative_rebalance", 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_2 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); test_create_topic(NULL, topic_name_1.c_str(), 2, 1); test_create_topic(NULL, topic_name_2.c_str(), 2, 1); DefaultRebalanceCb rebalance_cb1; - RdKafka::KafkaConsumer *c1 = make_consumer("C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb1, 30); - test_wait_topic_exists(c1->c_ptr(), topic_name_1.c_str(), 10*1000); - test_wait_topic_exists(c1->c_ptr(), topic_name_2.c_str(), 10*1000); + RdKafka::KafkaConsumer *c1 = make_consumer( + "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb1, 30); + test_wait_topic_exists(c1->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c1->c_ptr(), topic_name_2.c_str(), 10 * 1000); Test::subscribe(c1, topic_name_1, topic_name_2); DefaultRebalanceCb rebalance_cb2; - RdKafka::KafkaConsumer *c2 = make_consumer("C_2", group_name, "cooperative-sticky", NULL, &rebalance_cb2, 30); + RdKafka::KafkaConsumer *c2 = make_consumer( + "C_2", group_name, "cooperative-sticky", NULL, &rebalance_cb2, 30); Test::subscribe(c2, topic_name_1, topic_name_2); - bool done = false; + bool done = false; bool unsubscribed = false; while (!done) { Test::poll_once(c1, 500); Test::poll_once(c2, 500); - if (Test::assignment_partition_count(c1, NULL) == 2 && Test::assignment_partition_count(c2, NULL) == 2) { + if (Test::assignment_partition_count(c1, NULL) == 2 && + Test::assignment_partition_count(c2, NULL) == 2) { if (rebalance_cb1.assign_call_cnt != 1) - Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: " << rebalance_cb1.assign_call_cnt); + Test::Fail( + tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: " + << rebalance_cb1.assign_call_cnt); if (rebalance_cb2.assign_call_cnt != 1) - Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be 1 not: " << rebalance_cb2.assign_call_cnt); + Test::Fail( + tostr() << "Expecting consumer 2's assign_call_cnt to be 1 not: " + << rebalance_cb2.assign_call_cnt); Test::Say("Unsubscribing consumer 1 from both topics\n"); c1->unsubscribe(); unsubscribed = true; } - if (unsubscribed && Test::assignment_partition_count(c1, NULL) == 0 && Test::assignment_partition_count(c2, NULL) == 4) { - if (rebalance_cb1.assign_call_cnt != 1) /* is now unsubscribed, so rebalance_cb will no longer be called. */ - Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: " << rebalance_cb1.assign_call_cnt); + if (unsubscribed && Test::assignment_partition_count(c1, NULL) == 0 && + Test::assignment_partition_count(c2, NULL) == 4) { + if (rebalance_cb1.assign_call_cnt != + 1) /* is now unsubscribed, so rebalance_cb will no longer be called. + */ + Test::Fail( + tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: " + << rebalance_cb1.assign_call_cnt); if (rebalance_cb2.assign_call_cnt != 2) - Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be 2 not: " << rebalance_cb2.assign_call_cnt); + Test::Fail( + tostr() << "Expecting consumer 2's assign_call_cnt to be 2 not: " + << rebalance_cb2.assign_call_cnt); if (rebalance_cb1.revoke_call_cnt != 1) - Test::Fail(tostr() << "Expecting consumer 1's revoke_call_cnt to be 1 not: " << rebalance_cb1.revoke_call_cnt); - if (rebalance_cb2.revoke_call_cnt != 0) /* the rebalance_cb should not be called if the revoked partition list is empty */ - Test::Fail(tostr() << "Expecting consumer 2's revoke_call_cnt to be 0 not: " << rebalance_cb2.revoke_call_cnt); + Test::Fail( + tostr() << "Expecting consumer 1's revoke_call_cnt to be 1 not: " + << rebalance_cb1.revoke_call_cnt); + if (rebalance_cb2.revoke_call_cnt != + 0) /* the rebalance_cb should not be called if the revoked partition + list is empty */ + Test::Fail( + tostr() << "Expecting consumer 2's revoke_call_cnt to be 0 not: " + << rebalance_cb2.revoke_call_cnt); Test::Say("Unsubscribe completed"); done = true; } @@ -1598,19 +1707,26 @@ static void l_unsubscribe () { /* there should be no assign rebalance_cb calls on close */ if (rebalance_cb1.assign_call_cnt != 1) - Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: " << rebalance_cb1.assign_call_cnt); + Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: " + << rebalance_cb1.assign_call_cnt); if (rebalance_cb2.assign_call_cnt != 2) - Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be 2 not: " << rebalance_cb2.assign_call_cnt); + Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be 2 not: " + << rebalance_cb2.assign_call_cnt); - if (rebalance_cb1.revoke_call_cnt != 1) /* should not be called a second revoke rebalance_cb */ - Test::Fail(tostr() << "Expecting consumer 1's revoke_call_cnt to be 1 not: " << rebalance_cb1.revoke_call_cnt); + if (rebalance_cb1.revoke_call_cnt != + 1) /* should not be called a second revoke rebalance_cb */ + Test::Fail(tostr() << "Expecting consumer 1's revoke_call_cnt to be 1 not: " + << rebalance_cb1.revoke_call_cnt); if (rebalance_cb2.revoke_call_cnt != 1) - Test::Fail(tostr() << "Expecting consumer 2's revoke_call_cnt to be 1 not: " << rebalance_cb2.revoke_call_cnt); + Test::Fail(tostr() << "Expecting consumer 2's revoke_call_cnt to be 1 not: " + << rebalance_cb2.revoke_call_cnt); if (rebalance_cb1.lost_call_cnt != 0) - Test::Fail(tostr() << "Expecting consumer 1's lost_call_cnt to be 0, not: " << rebalance_cb1.lost_call_cnt); + Test::Fail(tostr() << "Expecting consumer 1's lost_call_cnt to be 0, not: " + << rebalance_cb1.lost_call_cnt); if (rebalance_cb2.lost_call_cnt != 0) - Test::Fail(tostr() << "Expecting consumer 2's lost_call_cnt to be 0, not: " << rebalance_cb2.lost_call_cnt); + Test::Fail(tostr() << "Expecting consumer 2's lost_call_cnt to be 0, not: " + << rebalance_cb2.lost_call_cnt); delete c1; delete c2; @@ -1626,19 +1742,21 @@ static void l_unsubscribe () { * 3. Consumers closed. */ -static void m_unsubscribe_2 () { +static void m_unsubscribe_2() { SUB_TEST(); std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); test_create_topic(NULL, topic_name.c_str(), 2, 1); - RdKafka::KafkaConsumer *c = make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15); - test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10*1000); + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15); + test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000); Test::subscribe(c, topic_name); - bool done = false; + bool done = false; bool unsubscribed = false; while (!done) { Test::poll_once(c, 500); @@ -1665,44 +1783,53 @@ static void m_unsubscribe_2 () { /* Check behavior when: - * 1. Two consumers (with rebalance_cb) subscribe to a regex (no matching topics exist) + * 1. Two consumers (with rebalance_cb) subscribe to a regex (no matching + * topics exist) * 2. Create two topics. * 3. Remove one of the topics. * 3. Consumers closed. */ -static void n_wildcard () { +static void n_wildcard() { SUB_TEST(); const string topic_base_name = Test::mk_topic_name("0113-n_wildcard", 1); - const string topic_name_1 = topic_base_name + "_1"; - const string topic_name_2 = topic_base_name + "_2"; - const string topic_regex = "^" + topic_base_name + "_."; - const string group_name = Test::mk_unique_group_name("0113-n_wildcard"); + const string topic_name_1 = topic_base_name + "_1"; + const string topic_name_2 = topic_base_name + "_2"; + const string topic_regex = "^" + topic_base_name + "_."; + const string group_name = Test::mk_unique_group_name("0113-n_wildcard"); std::vector > additional_conf; - additional_conf.push_back(std::pair(std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); DefaultRebalanceCb rebalance_cb1; - RdKafka::KafkaConsumer *c1 = make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, &rebalance_cb1, 30); + RdKafka::KafkaConsumer *c1 = + make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb1, 30); Test::subscribe(c1, topic_regex); DefaultRebalanceCb rebalance_cb2; - RdKafka::KafkaConsumer *c2 = make_consumer("C_2", group_name, "cooperative-sticky", &additional_conf, &rebalance_cb2, 30); + RdKafka::KafkaConsumer *c2 = + make_consumer("C_2", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb2, 30); Test::subscribe(c2, topic_regex); - /* There are no matching topics, so the consumers should not join the group initially */ + /* There are no matching topics, so the consumers should not join the group + * initially */ Test::poll_once(c1, 500); Test::poll_once(c2, 500); if (rebalance_cb1.assign_call_cnt != 0) - Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be 0 not: " << rebalance_cb1.assign_call_cnt); + Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be 0 not: " + << rebalance_cb1.assign_call_cnt); if (rebalance_cb2.assign_call_cnt != 0) - Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be 0 not: " << rebalance_cb2.assign_call_cnt); + Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be 0 not: " + << rebalance_cb2.assign_call_cnt); - bool done = false; - bool created_topics = false; - bool deleted_topic = false; + bool done = false; + bool created_topics = false; + bool deleted_topic = false; int last_cb1_assign_call_cnt = 0; int last_cb2_assign_call_cnt = 0; while (!done) { @@ -1711,7 +1838,8 @@ static void n_wildcard () { if (Test::assignment_partition_count(c1, NULL) == 0 && Test::assignment_partition_count(c2, NULL) == 0 && !created_topics) { - Test::Say("Creating two topics with 2 partitions each that match regex\n"); + Test::Say( + "Creating two topics with 2 partitions each that match regex\n"); test_create_topic(NULL, topic_name_1.c_str(), 2, 1); test_create_topic(NULL, topic_name_2.c_str(), 2, 1); /* The consumers should autonomously discover these topics and start @@ -1728,7 +1856,6 @@ static void n_wildcard () { if (Test::assignment_partition_count(c1, NULL) == 2 && Test::assignment_partition_count(c2, NULL) == 2 && !deleted_topic) { - if (rebalance_cb1.nonempty_assign_call_cnt == 1) { /* just one rebalance was required */ TEST_ASSERT(rebalance_cb1.nonempty_assign_call_cnt == 1, @@ -1853,53 +1980,54 @@ static void o_java_interop() { std::string topic_name_1 = Test::mk_topic_name("0113_o_2", 1); std::string topic_name_2 = Test::mk_topic_name("0113_o_6", 1); - std::string group_name = Test::mk_unique_group_name("0113_o"); + std::string group_name = Test::mk_unique_group_name("0113_o"); test_create_topic(NULL, topic_name_1.c_str(), 2, 1); test_create_topic(NULL, topic_name_2.c_str(), 6, 1); DefaultRebalanceCb rebalance_cb; - RdKafka::KafkaConsumer *c = make_consumer("C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb, 25); - test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10*1000); - test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10*1000); + RdKafka::KafkaConsumer *c = make_consumer( + "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb, 25); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000); Test::subscribe(c, topic_name_1, topic_name_2); - bool done = false; - bool changed_subscription = false; + bool done = false; + bool changed_subscription = false; bool changed_subscription_done = false; - int java_pid = 0; + int java_pid = 0; while (!done) { Test::poll_once(c, 500); - if (1) // FIXME: Remove after debugging - Test::Say(tostr() << "Assignment partition count: " << - Test::assignment_partition_count(c, NULL) << - ", changed_sub " << changed_subscription << - ", changed_sub_done " << changed_subscription_done << - ", assign_call_cnt " << rebalance_cb.assign_call_cnt << - "\n"); + if (1) // FIXME: Remove after debugging + Test::Say(tostr() << "Assignment partition count: " + << Test::assignment_partition_count(c, NULL) + << ", changed_sub " << changed_subscription + << ", changed_sub_done " << changed_subscription_done + << ", assign_call_cnt " << rebalance_cb.assign_call_cnt + << "\n"); if (Test::assignment_partition_count(c, NULL) == 8 && !java_pid) { Test::Say(_C_GRN "librdkafka consumer assigned to 8 partitions\n"); string bootstrapServers = get_bootstrap_servers(); const char *argv[1 + 1 + 1 + 1 + 1 + 1]; - size_t i = 0; + size_t i = 0; argv[i++] = "test1"; argv[i++] = bootstrapServers.c_str(); argv[i++] = topic_name_1.c_str(); argv[i++] = topic_name_2.c_str(); argv[i++] = group_name.c_str(); - argv[i] = NULL; - java_pid = test_run_java("IncrementalRebalanceCli", argv); + argv[i] = NULL; + java_pid = test_run_java("IncrementalRebalanceCli", argv); if (java_pid <= 0) Test::Fail(tostr() << "Unexpected pid: " << java_pid); } - if (Test::assignment_partition_count(c, NULL) == 4 && - java_pid != 0 && + if (Test::assignment_partition_count(c, NULL) == 4 && java_pid != 0 && !changed_subscription) { if (rebalance_cb.assign_call_cnt != 2) Test::Fail(tostr() << "Expecting consumer's assign_call_cnt to be 2, " - "not " << rebalance_cb.assign_call_cnt); + "not " + << rebalance_cb.assign_call_cnt); Test::Say(_C_GRN "Java consumer is now part of the group\n"); Test::subscribe(c, topic_name_1); changed_subscription = true; @@ -1911,9 +2039,9 @@ static void o_java_interop() { if (Test::assignment_partition_count(c, NULL) == 2 && changed_subscription && rebalance_cb.assign_call_cnt <= 5 && !changed_subscription_done) { - /* All topic 1 partitions will be allocated to this consumer whether or not the Java - * consumer has unsubscribed yet because the sticky algorithm attempts to ensure - * partition counts are even. */ + /* All topic 1 partitions will be allocated to this consumer whether or + * not the Java consumer has unsubscribed yet because the sticky algorithm + * attempts to ensure partition counts are even. */ Test::Say(_C_GRN "Consumer 1 has unsubscribed from topic 2\n"); changed_subscription_done = true; } @@ -1921,8 +2049,8 @@ static void o_java_interop() { if (Test::assignment_partition_count(c, NULL) == 2 && changed_subscription && rebalance_cb.assign_call_cnt >= 5 && changed_subscription_done) { - /* When the java consumer closes, this will cause an empty assign rebalance_cb event, - * allowing detection of when this has happened. */ + /* When the java consumer closes, this will cause an empty assign + * rebalance_cb event, allowing detection of when this has happened. */ Test::Say(_C_GRN "Java consumer has left the group\n"); done = true; } @@ -1931,7 +2059,8 @@ static void o_java_interop() { Test::Say("Closing consumer\n"); c->close(); - /* Expected behavior is IncrementalRebalanceCli will exit cleanly, timeout otherwise. */ + /* Expected behavior is IncrementalRebalanceCli will exit cleanly, timeout + * otherwise. */ test_waitpid(java_pid); delete c; @@ -1943,25 +2072,31 @@ static void o_java_interop() { /* Check behavior when: * - Single consumer subscribes to topic. - * - Soon after (timing such that rebalance is probably in progress) it subscribes to a different topic. + * - Soon after (timing such that rebalance is probably in progress) it + * subscribes to a different topic. */ -static void s_subscribe_when_rebalancing (int variation) { +static void s_subscribe_when_rebalancing(int variation) { SUB_TEST("variation %d", variation); - std::string topic_name_1 = Test::mk_topic_name("0113-cooperative_rebalance", 1); - std::string topic_name_2 = Test::mk_topic_name("0113-cooperative_rebalance", 1); - std::string topic_name_3 = Test::mk_topic_name("0113-cooperative_rebalance", 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_2 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_3 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); test_create_topic(NULL, topic_name_1.c_str(), 1, 1); test_create_topic(NULL, topic_name_2.c_str(), 1, 1); test_create_topic(NULL, topic_name_3.c_str(), 1, 1); DefaultRebalanceCb rebalance_cb; - RdKafka::KafkaConsumer *c = make_consumer("C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb, 25); - test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10*1000); - test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10*1000); - test_wait_topic_exists(c->c_ptr(), topic_name_3.c_str(), 10*1000); + RdKafka::KafkaConsumer *c = make_consumer( + "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb, 25); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000); + test_wait_topic_exists(c->c_ptr(), topic_name_3.c_str(), 10 * 1000); if (variation == 2 || variation == 4 || variation == 6) { /* Pre-cache metadata for all topics. */ @@ -2003,38 +2138,52 @@ static void s_subscribe_when_rebalancing (int variation) { static void t_max_poll_interval_exceeded(int variation) { SUB_TEST("variation %d", variation); - std::string topic_name_1 = Test::mk_topic_name("0113-cooperative_rebalance", 1); - std::string group_name = Test::mk_unique_group_name("0113-cooperative_rebalance"); + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); test_create_topic(NULL, topic_name_1.c_str(), 2, 1); std::vector > additional_conf; - additional_conf.push_back(std::pair(std::string("session.timeout.ms"), std::string("6000"))); - additional_conf.push_back(std::pair(std::string("max.poll.interval.ms"), std::string("7000"))); + additional_conf.push_back(std::pair( + std::string("session.timeout.ms"), std::string("6000"))); + additional_conf.push_back(std::pair( + std::string("max.poll.interval.ms"), std::string("7000"))); DefaultRebalanceCb rebalance_cb1; - RdKafka::KafkaConsumer *c1 = make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, &rebalance_cb1, 30); + RdKafka::KafkaConsumer *c1 = + make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb1, 30); DefaultRebalanceCb rebalance_cb2; - RdKafka::KafkaConsumer *c2 = make_consumer("C_2", group_name, "cooperative-sticky", &additional_conf, &rebalance_cb2, 30); + RdKafka::KafkaConsumer *c2 = + make_consumer("C_2", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb2, 30); - test_wait_topic_exists(c1->c_ptr(), topic_name_1.c_str(), 10*1000); - test_wait_topic_exists(c2->c_ptr(), topic_name_1.c_str(), 10*1000); + test_wait_topic_exists(c1->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c2->c_ptr(), topic_name_1.c_str(), 10 * 1000); Test::subscribe(c1, topic_name_1); Test::subscribe(c2, topic_name_1); - bool done = false; + bool done = false; bool both_have_been_assigned = false; while (!done) { if (!both_have_been_assigned) Test::poll_once(c1, 500); Test::poll_once(c2, 500); - if (Test::assignment_partition_count(c1, NULL) == 1 && Test::assignment_partition_count(c2, NULL) == 1 && !both_have_been_assigned) { - Test::Say(tostr() << "Both consumers are assigned to topic " << topic_name_1 << ". WAITING 7 seconds for max.poll.interval.ms to be exceeded\n"); + if (Test::assignment_partition_count(c1, NULL) == 1 && + Test::assignment_partition_count(c2, NULL) == 1 && + !both_have_been_assigned) { + Test::Say( + tostr() + << "Both consumers are assigned to topic " << topic_name_1 + << ". WAITING 7 seconds for max.poll.interval.ms to be exceeded\n"); both_have_been_assigned = true; } - if (Test::assignment_partition_count(c2, NULL) == 2 && both_have_been_assigned) { + if (Test::assignment_partition_count(c2, NULL) == 2 && + both_have_been_assigned) { Test::Say("Consumer 1 is no longer assigned any partitions, done\n"); done = true; } @@ -2042,28 +2191,39 @@ static void t_max_poll_interval_exceeded(int variation) { if (variation == 1) { if (rebalance_cb1.lost_call_cnt != 0) - Test::Fail(tostr() << "Expected consumer 1 lost revoke count to be 0, not: " << rebalance_cb1.lost_call_cnt); - Test::poll_once(c1, 500); /* Eat the max poll interval exceeded error message */ - Test::poll_once(c1, 500); /* Trigger the rebalance_cb with lost partitions */ + Test::Fail( + tostr() << "Expected consumer 1 lost revoke count to be 0, not: " + << rebalance_cb1.lost_call_cnt); + Test::poll_once(c1, + 500); /* Eat the max poll interval exceeded error message */ + Test::poll_once(c1, + 500); /* Trigger the rebalance_cb with lost partitions */ if (rebalance_cb1.lost_call_cnt != 1) - Test::Fail(tostr() << "Expected consumer 1 lost revoke count to be 1, not: " << rebalance_cb1.lost_call_cnt); + Test::Fail( + tostr() << "Expected consumer 1 lost revoke count to be 1, not: " + << rebalance_cb1.lost_call_cnt); } c1->close(); c2->close(); if (rebalance_cb1.lost_call_cnt != 1) - Test::Fail(tostr() << "Expected consumer 1 lost revoke count to be 1, not: " << rebalance_cb1.lost_call_cnt); + Test::Fail(tostr() << "Expected consumer 1 lost revoke count to be 1, not: " + << rebalance_cb1.lost_call_cnt); if (rebalance_cb1.assign_call_cnt != 1) - Test::Fail(tostr() << "Expected consumer 1 assign count to be 1, not: " << rebalance_cb1.assign_call_cnt); + Test::Fail(tostr() << "Expected consumer 1 assign count to be 1, not: " + << rebalance_cb1.assign_call_cnt); if (rebalance_cb2.assign_call_cnt != 2) - Test::Fail(tostr() << "Expected consumer 1 assign count to be 2, not: " << rebalance_cb1.assign_call_cnt); + Test::Fail(tostr() << "Expected consumer 1 assign count to be 2, not: " + << rebalance_cb1.assign_call_cnt); if (rebalance_cb1.revoke_call_cnt != 1) - Test::Fail(tostr() << "Expected consumer 1 revoke count to be 1, not: " << rebalance_cb1.revoke_call_cnt); + Test::Fail(tostr() << "Expected consumer 1 revoke count to be 1, not: " + << rebalance_cb1.revoke_call_cnt); if (rebalance_cb2.revoke_call_cnt != 1) - Test::Fail(tostr() << "Expected consumer 2 revoke count to be 1, not: " << rebalance_cb1.revoke_call_cnt); + Test::Fail(tostr() << "Expected consumer 2 revoke count to be 1, not: " + << rebalance_cb1.revoke_call_cnt); delete c1; delete c2; @@ -2076,9 +2236,10 @@ static void t_max_poll_interval_exceeded(int variation) { * @brief Poll all consumers until there are no more events or messages * and the timeout has expired. */ -static void poll_all_consumers (RdKafka::KafkaConsumer **consumers, - DefaultRebalanceCb *rebalance_cbs, - size_t num, int timeout_ms) { +static void poll_all_consumers(RdKafka::KafkaConsumer **consumers, + DefaultRebalanceCb *rebalance_cbs, + size_t num, + int timeout_ms) { int64_t ts_end = test_clock() + (timeout_ms * 1000); /* Poll all consumers until no more events are seen, @@ -2086,9 +2247,8 @@ static void poll_all_consumers (RdKafka::KafkaConsumer **consumers, bool evented; do { evented = false; - for (size_t i = 0 ; i < num ; i++) { - int block_ms = - min(10, (int)((ts_end - test_clock()) / 1000)); + for (size_t i = 0; i < num; i++) { + int block_ms = min(10, (int)((ts_end - test_clock()) / 1000)); while (rebalance_cbs[i].poll_once(consumers[i], max(block_ms, 0))) evented = true; } @@ -2104,11 +2264,12 @@ static void poll_all_consumers (RdKafka::KafkaConsumer **consumers, * TODO: incorporate committing offsets. */ -static void u_multiple_subscription_changes (bool use_rebalance_cb, int subscription_variation) { - const int N_CONSUMERS = 8; - const int N_TOPICS = 2; - const int N_PARTS_PER_TOPIC = N_CONSUMERS * N_TOPICS; - const int N_PARTITIONS = N_PARTS_PER_TOPIC * N_TOPICS; +static void u_multiple_subscription_changes(bool use_rebalance_cb, + int subscription_variation) { + const int N_CONSUMERS = 8; + const int N_TOPICS = 2; + const int N_PARTS_PER_TOPIC = N_CONSUMERS * N_TOPICS; + const int N_PARTITIONS = N_PARTS_PER_TOPIC * N_TOPICS; const int N_MSGS_PER_PARTITION = 1000; SUB_TEST("use_rebalance_cb: %d, subscription_variation: %d", @@ -2116,7 +2277,7 @@ static void u_multiple_subscription_changes (bool use_rebalance_cb, int subscrip string topic_name_1 = Test::mk_topic_name("0113u_1", 1); string topic_name_2 = Test::mk_topic_name("0113u_2", 1); - string group_name = Test::mk_unique_group_name("0113u"); + string group_name = Test::mk_unique_group_name("0113u"); test_create_topic(NULL, topic_name_1.c_str(), N_PARTS_PER_TOPIC, 1); test_create_topic(NULL, topic_name_2.c_str(), N_PARTS_PER_TOPIC, 1); @@ -2125,27 +2286,28 @@ static void u_multiple_subscription_changes (bool use_rebalance_cb, int subscrip DefaultRebalanceCb rebalance_cbs[N_CONSUMERS]; RdKafka::KafkaConsumer *consumers[N_CONSUMERS]; - for (int i = 0 ; i < N_CONSUMERS ; i++) { + for (int i = 0; i < N_CONSUMERS; i++) { std::string name = tostr() << "C_" << i; - consumers[i] = make_consumer(name.c_str(), group_name, "cooperative-sticky", - NULL, - use_rebalance_cb ? &rebalance_cbs[i] : NULL, - 120); + consumers[i] = + make_consumer(name.c_str(), group_name, "cooperative-sticky", NULL, + use_rebalance_cb ? &rebalance_cbs[i] : NULL, 120); } - test_wait_topic_exists(consumers[0]->c_ptr(), topic_name_1.c_str(), 10*1000); - test_wait_topic_exists(consumers[0]->c_ptr(), topic_name_2.c_str(), 10*1000); + test_wait_topic_exists(consumers[0]->c_ptr(), topic_name_1.c_str(), + 10 * 1000); + test_wait_topic_exists(consumers[0]->c_ptr(), topic_name_2.c_str(), + 10 * 1000); /* * Seed all partitions with the same number of messages so we later can * verify that consumption is working. */ - vector >ptopics; - ptopics.push_back(pair(Toppar(topic_name_1, N_PARTS_PER_TOPIC), - N_MSGS_PER_PARTITION)); - ptopics.push_back(pair(Toppar(topic_name_2, N_PARTS_PER_TOPIC), - N_MSGS_PER_PARTITION)); + vector > ptopics; + ptopics.push_back(pair(Toppar(topic_name_1, N_PARTS_PER_TOPIC), + N_MSGS_PER_PARTITION)); + ptopics.push_back(pair(Toppar(topic_name_2, N_PARTS_PER_TOPIC), + N_MSGS_PER_PARTITION)); produce_msgs(ptopics); @@ -2193,78 +2355,61 @@ static void u_multiple_subscription_changes (bool use_rebalance_cb, int subscrip int timestamp_ms; int consumer; const vector *topics; - } playbook[] = { - /* timestamp_ms, consumer_number, subscribe-to-topics */ - { 0, 0, &SUBSCRIPTION_1 }, /* Cmd 0 */ - { 4000, 1, &SUBSCRIPTION_1 }, - { 4000, 1, &SUBSCRIPTION_1 }, - { 4000, 1, &SUBSCRIPTION_1 }, - { 4000, 2, &SUBSCRIPTION_1 }, - { 6000, 3, &SUBSCRIPTION_1 }, /* Cmd 5 */ - { 6000, 4, &SUBSCRIPTION_1 }, - { 6000, 5, &SUBSCRIPTION_1 }, - { 6000, 6, &SUBSCRIPTION_1 }, - { 6000, 7, &SUBSCRIPTION_2 }, - { 6000, 1, &SUBSCRIPTION_1 }, /* Cmd 10 */ - { 6000, 1, &SUBSCRIPTION_2 }, - { 6000, 1, &SUBSCRIPTION_1 }, - { 6000, 2, &SUBSCRIPTION_2 }, - { 7000, 2, &SUBSCRIPTION_1 }, - { 7000, 1, &SUBSCRIPTION_2 }, /* Cmd 15 */ - { 8000, 0, &SUBSCRIPTION_2 }, - { 8000, 1, &SUBSCRIPTION_1 }, - { 8000, 0, &SUBSCRIPTION_1 }, - { 13000, 2, &SUBSCRIPTION_1 }, - { 13000, 1, &SUBSCRIPTION_2 }, /* Cmd 20 */ - { 13000, 5, &SUBSCRIPTION_2 }, - { 14000, 6, &SUBSCRIPTION_2 }, - { 15000, 7, &SUBSCRIPTION_1 }, - { 15000, 1, &SUBSCRIPTION_1 }, - { 15000, 5, &SUBSCRIPTION_1 }, /* Cmd 25 */ - { 15000, 6, &SUBSCRIPTION_1 }, - { INT_MAX, 0, 0 } - }; + } playbook[] = {/* timestamp_ms, consumer_number, subscribe-to-topics */ + {0, 0, &SUBSCRIPTION_1}, /* Cmd 0 */ + {4000, 1, &SUBSCRIPTION_1}, {4000, 1, &SUBSCRIPTION_1}, + {4000, 1, &SUBSCRIPTION_1}, {4000, 2, &SUBSCRIPTION_1}, + {6000, 3, &SUBSCRIPTION_1}, /* Cmd 5 */ + {6000, 4, &SUBSCRIPTION_1}, {6000, 5, &SUBSCRIPTION_1}, + {6000, 6, &SUBSCRIPTION_1}, {6000, 7, &SUBSCRIPTION_2}, + {6000, 1, &SUBSCRIPTION_1}, /* Cmd 10 */ + {6000, 1, &SUBSCRIPTION_2}, {6000, 1, &SUBSCRIPTION_1}, + {6000, 2, &SUBSCRIPTION_2}, {7000, 2, &SUBSCRIPTION_1}, + {7000, 1, &SUBSCRIPTION_2}, /* Cmd 15 */ + {8000, 0, &SUBSCRIPTION_2}, {8000, 1, &SUBSCRIPTION_1}, + {8000, 0, &SUBSCRIPTION_1}, {13000, 2, &SUBSCRIPTION_1}, + {13000, 1, &SUBSCRIPTION_2}, /* Cmd 20 */ + {13000, 5, &SUBSCRIPTION_2}, {14000, 6, &SUBSCRIPTION_2}, + {15000, 7, &SUBSCRIPTION_1}, {15000, 1, &SUBSCRIPTION_1}, + {15000, 5, &SUBSCRIPTION_1}, /* Cmd 25 */ + {15000, 6, &SUBSCRIPTION_1}, {INT_MAX, 0, 0}}; /* * Run the playbook */ - int cmd_number = 0; + int cmd_number = 0; uint64_t ts_start = test_clock(); while (playbook[cmd_number].timestamp_ms != INT_MAX) { - TEST_ASSERT(playbook[cmd_number].consumer < N_CONSUMERS); - Test::Say(tostr() << "Cmd #" << cmd_number << ": wait " << - playbook[cmd_number].timestamp_ms << "ms\n"); + Test::Say(tostr() << "Cmd #" << cmd_number << ": wait " + << playbook[cmd_number].timestamp_ms << "ms\n"); poll_all_consumers(consumers, rebalance_cbs, N_CONSUMERS, playbook[cmd_number].timestamp_ms - - (int)((test_clock() - ts_start) / 1000)); + (int)((test_clock() - ts_start) / 1000)); /* Verify consumer assignments match subscribed topics */ - map all_assignments; - for (int i = 0 ; i < N_CONSUMERS ; i++) - verify_consumer_assignment(consumers[i], - rebalance_cbs[i], - consumer_topics[i], - /* Allow empty assignment */ - true, - /* Allow mismatch between subscribed topics - * and actual assignment since we can't - * synchronize the last subscription - * to the current assignment due to - * an unknown number of rebalances required - * for the final assignment to settle. - * This is instead checked at the end of - * this test case. */ - true, - &all_assignments, - -1/* no msgcnt check*/); - - int cid = playbook[cmd_number].consumer; + map all_assignments; + for (int i = 0; i < N_CONSUMERS; i++) + verify_consumer_assignment( + consumers[i], rebalance_cbs[i], consumer_topics[i], + /* Allow empty assignment */ + true, + /* Allow mismatch between subscribed topics + * and actual assignment since we can't + * synchronize the last subscription + * to the current assignment due to + * an unknown number of rebalances required + * for the final assignment to settle. + * This is instead checked at the end of + * this test case. */ + true, &all_assignments, -1 /* no msgcnt check*/); + + int cid = playbook[cmd_number].consumer; RdKafka::KafkaConsumer *consumer = consumers[playbook[cmd_number].consumer]; - const vector *topics = playbook[cmd_number].topics; + const vector *topics = playbook[cmd_number].topics; /* * Update our view of the consumer's subscribed topics and vice versa. @@ -2288,16 +2433,17 @@ static void u_multiple_subscription_changes (bool use_rebalance_cb, int subscrip * Change subscription */ if (!topics->empty()) { - Test::Say(tostr() << "Consumer: " << consumer->name() << - " is subscribing to topics " << string_vec_to_str(*topics) << - " after " << ((test_clock() - ts_start) / 1000) << "ms\n"); + Test::Say(tostr() << "Consumer: " << consumer->name() + << " is subscribing to topics " + << string_vec_to_str(*topics) << " after " + << ((test_clock() - ts_start) / 1000) << "ms\n"); err = consumer->subscribe(*topics); TEST_ASSERT(!err, "Expected subscribe() to succeed, got %s", RdKafka::err2str(err).c_str()); } else { - Test::Say(tostr() << "Consumer: " << consumer->name() << - " is unsubscribing after " << - ((test_clock() - ts_start) / 1000) << "ms\n"); + Test::Say(tostr() << "Consumer: " << consumer->name() + << " is unsubscribing after " + << ((test_clock() - ts_start) / 1000) << "ms\n"); Test::unsubscribe(consumer); } @@ -2317,14 +2463,14 @@ static void u_multiple_subscription_changes (bool use_rebalance_cb, int subscrip sort(subscription.begin(), subscription.end()); - Test::Say(tostr() << "Consumer " << consumer->name() << - " subscription is now " << string_vec_to_str(subscription) - << "\n"); + Test::Say(tostr() << "Consumer " << consumer->name() + << " subscription is now " + << string_vec_to_str(subscription) << "\n"); if (subscription != *topics) - Test::Fail(tostr() << "Expected consumer " << consumer->name() << - " subscription: " << string_vec_to_str(*topics) << - " but got: " << string_vec_to_str(subscription)); + Test::Fail(tostr() << "Expected consumer " << consumer->name() + << " subscription: " << string_vec_to_str(*topics) + << " but got: " << string_vec_to_str(subscription)); cmd_number++; } @@ -2337,7 +2483,7 @@ static void u_multiple_subscription_changes (bool use_rebalance_cb, int subscrip Test::Say(_C_YEL "Waiting for final assignment state\n"); int done_count = 0; /* Allow at least 20 seconds for group to stabilize. */ - int64_t stabilize_until = test_clock() + (20 * 1000*1000); /* 20s */ + int64_t stabilize_until = test_clock() + (20 * 1000 * 1000); /* 20s */ while (done_count < 2) { bool stabilized = test_clock() > stabilize_until; @@ -2346,50 +2492,46 @@ static void u_multiple_subscription_changes (bool use_rebalance_cb, int subscrip /* Verify consumer assignments */ int counts[N_CONSUMERS]; - map all_assignments; - Test::Say(tostr() << "Consumer assignments " << - "(subscription_variation " << subscription_variation << ")" << - (stabilized ? " (stabilized)" : "") << - (use_rebalance_cb ? - " (use_rebalance_cb)" : " (no rebalance cb)") << - ":\n"); - for (int i = 0 ; i < N_CONSUMERS ; i++) { + map all_assignments; + Test::Say(tostr() << "Consumer assignments " + << "(subscription_variation " << subscription_variation + << ")" << (stabilized ? " (stabilized)" : "") + << (use_rebalance_cb ? " (use_rebalance_cb)" + : " (no rebalance cb)") + << ":\n"); + for (int i = 0; i < N_CONSUMERS; i++) { bool last_rebalance_stabilized = - stabilized && - (!use_rebalance_cb || - /* session.timeout.ms * 2 + 1 */ - test_clock() > rebalance_cbs[i].ts_last_assign + (13 * 1000*1000)); - - counts[i] = verify_consumer_assignment(consumers[i], - rebalance_cbs[i], - consumer_topics[i], - /* allow empty */ - true, - /* if we're waiting for a - * rebalance it is okay for the - * current assignment to contain - * topics that this consumer - * (no longer) subscribes to. */ - !last_rebalance_stabilized || - !use_rebalance_cb || - rebalance_cbs[i].wait_rebalance, - /* do not allow assignments for - * topics that are not subscribed*/ - &all_assignments, - /* Verify received message counts - * once the assignments have - * stabilized. - * Requires the rebalance cb.*/ - done_count > 0 && - use_rebalance_cb ? - N_MSGS_PER_PARTITION : -1); + stabilized && + (!use_rebalance_cb || + /* session.timeout.ms * 2 + 1 */ + test_clock() > rebalance_cbs[i].ts_last_assign + (13 * 1000 * 1000)); + + counts[i] = verify_consumer_assignment( + consumers[i], rebalance_cbs[i], consumer_topics[i], + /* allow empty */ + true, + /* if we're waiting for a + * rebalance it is okay for the + * current assignment to contain + * topics that this consumer + * (no longer) subscribes to. */ + !last_rebalance_stabilized || !use_rebalance_cb || + rebalance_cbs[i].wait_rebalance, + /* do not allow assignments for + * topics that are not subscribed*/ + &all_assignments, + /* Verify received message counts + * once the assignments have + * stabilized. + * Requires the rebalance cb.*/ + done_count > 0 && use_rebalance_cb ? N_MSGS_PER_PARTITION : -1); } - Test::Say(tostr() << all_assignments.size() << "/" << N_PARTITIONS << - " partitions assigned\n"); + Test::Say(tostr() << all_assignments.size() << "/" << N_PARTITIONS + << " partitions assigned\n"); bool done = true; - for (int i = 0 ; i < N_CONSUMERS ; i++) { + for (int i = 0; i < N_CONSUMERS; i++) { /* For each topic the consumer subscribes to it should * be assigned its share of partitions. */ int exp_parts = 0; @@ -2397,12 +2539,12 @@ static void u_multiple_subscription_changes (bool use_rebalance_cb, int subscrip it != consumer_topics[i].end(); it++) exp_parts += N_PARTS_PER_TOPIC / (int)topic_consumers[*it].size(); - Test::Say(tostr() << - (counts[i] == exp_parts ? "" : _C_YEL) << - "Consumer " << consumers[i]->name() << " has " << - counts[i] << " assigned partitions (" << - consumer_topics[i].size() << " subscribed topic(s))" << - ", expecting " << exp_parts << " assigned partitions\n"); + Test::Say(tostr() << (counts[i] == exp_parts ? "" : _C_YEL) << "Consumer " + << consumers[i]->name() << " has " << counts[i] + << " assigned partitions (" << consumer_topics[i].size() + << " subscribed topic(s))" + << ", expecting " << exp_parts + << " assigned partitions\n"); if (counts[i] != exp_parts) done = false; @@ -2410,15 +2552,14 @@ static void u_multiple_subscription_changes (bool use_rebalance_cb, int subscrip if (done && stabilized) { done_count++; - Test::Say(tostr() << "All assignments verified, done count is " << - done_count << "\n"); + Test::Say(tostr() << "All assignments verified, done count is " + << done_count << "\n"); } } Test::Say("Disposing consumers\n"); - for (int i = 0 ; i < N_CONSUMERS ; i++) { - TEST_ASSERT(!use_rebalance_cb || - !rebalance_cbs[i].wait_rebalance, + for (int i = 0; i < N_CONSUMERS; i++) { + TEST_ASSERT(!use_rebalance_cb || !rebalance_cbs[i].wait_rebalance, "Consumer %d still waiting for rebalance", i); if (i & 1) consumers[i]->close(); @@ -2432,608 +2573,559 @@ static void u_multiple_subscription_changes (bool use_rebalance_cb, int subscrip extern "C" { - static int rebalance_cnt; - static rd_kafka_resp_err_t rebalance_exp_event; - static rd_bool_t rebalance_exp_lost; - - extern void test_print_partition_list (const rd_kafka_topic_partition_list_t - *partitions); +static int rebalance_cnt; +static rd_kafka_resp_err_t rebalance_exp_event; +static rd_bool_t rebalance_exp_lost; +extern void test_print_partition_list( + const rd_kafka_topic_partition_list_t *partitions); - static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { - rebalance_cnt++; - TEST_SAY("Rebalance #%d: %s: %d partition(s)\n", - rebalance_cnt, rd_kafka_err2name(err), parts->cnt); - test_print_partition_list(parts); +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { + rebalance_cnt++; + TEST_SAY("Rebalance #%d: %s: %d partition(s)\n", rebalance_cnt, + rd_kafka_err2name(err), parts->cnt); - TEST_ASSERT(err == rebalance_exp_event || - rebalance_exp_event == RD_KAFKA_RESP_ERR_NO_ERROR, - "Expected rebalance event %s, not %s", - rd_kafka_err2name(rebalance_exp_event), - rd_kafka_err2name(err)); + test_print_partition_list(parts); - if (rebalance_exp_lost) { - TEST_ASSERT(rd_kafka_assignment_lost(rk), - "Expected partitions lost"); - TEST_SAY("Partitions were lost\n"); - } + TEST_ASSERT(err == rebalance_exp_event || + rebalance_exp_event == RD_KAFKA_RESP_ERR_NO_ERROR, + "Expected rebalance event %s, not %s", + rd_kafka_err2name(rebalance_exp_event), rd_kafka_err2name(err)); - if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { - test_consumer_incremental_assign("assign", rk, parts); - } else { - test_consumer_incremental_unassign("unassign", rk, parts); - } + if (rebalance_exp_lost) { + TEST_ASSERT(rd_kafka_assignment_lost(rk), "Expected partitions lost"); + TEST_SAY("Partitions were lost\n"); } - /** - * @brief Wait for an expected rebalance event, or fail. - */ - static void expect_rebalance0 (const char *func, int line, - const char *what, rd_kafka_t *c, - rd_kafka_resp_err_t exp_event, - rd_bool_t exp_lost, - int timeout_s) { - int64_t tmout = test_clock() + (timeout_s * 1000000); - int start_cnt = rebalance_cnt; - - TEST_SAY("%s:%d: Waiting for %s (%s) for %ds\n", - func, line, what, rd_kafka_err2name(exp_event), timeout_s); - - rebalance_exp_lost = exp_lost; - rebalance_exp_event = exp_event; - - while (tmout > test_clock() && rebalance_cnt == start_cnt) { - test_consumer_poll_once(c, NULL, 1000); - } + if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { + test_consumer_incremental_assign("assign", rk, parts); + } else { + test_consumer_incremental_unassign("unassign", rk, parts); + } +} - if (rebalance_cnt == start_cnt + 1) { - rebalance_exp_event = RD_KAFKA_RESP_ERR_NO_ERROR; - rebalance_exp_lost = exp_lost = rd_false; - return; - } +/** + * @brief Wait for an expected rebalance event, or fail. + */ +static void expect_rebalance0(const char *func, + int line, + const char *what, + rd_kafka_t *c, + rd_kafka_resp_err_t exp_event, + rd_bool_t exp_lost, + int timeout_s) { + int64_t tmout = test_clock() + (timeout_s * 1000000); + int start_cnt = rebalance_cnt; + + TEST_SAY("%s:%d: Waiting for %s (%s) for %ds\n", func, line, what, + rd_kafka_err2name(exp_event), timeout_s); + + rebalance_exp_lost = exp_lost; + rebalance_exp_event = exp_event; + + while (tmout > test_clock() && rebalance_cnt == start_cnt) { + test_consumer_poll_once(c, NULL, 1000); + } - TEST_FAIL("%s:%d: Timed out waiting for %s (%s)", - func, line, what, rd_kafka_err2name(exp_event)); + if (rebalance_cnt == start_cnt + 1) { + rebalance_exp_event = RD_KAFKA_RESP_ERR_NO_ERROR; + rebalance_exp_lost = exp_lost = rd_false; + return; } -#define expect_rebalance(WHAT,C,EXP_EVENT,EXP_LOST,TIMEOUT_S) \ - expect_rebalance0(__FUNCTION__, __LINE__, \ - WHAT, C, EXP_EVENT, EXP_LOST, TIMEOUT_S) + TEST_FAIL("%s:%d: Timed out waiting for %s (%s)", func, line, what, + rd_kafka_err2name(exp_event)); +} +#define expect_rebalance(WHAT, C, EXP_EVENT, EXP_LOST, TIMEOUT_S) \ + expect_rebalance0(__FUNCTION__, __LINE__, WHAT, C, EXP_EVENT, EXP_LOST, \ + TIMEOUT_S) - /* Check lost partitions revoke occurs on ILLEGAL_GENERATION heartbeat error. - */ - static void p_lost_partitions_heartbeat_illegal_generation_test () { - const char *bootstraps; - rd_kafka_mock_cluster_t *mcluster; - const char *groupid = "mygroup"; - const char *topic = "test"; - rd_kafka_t *c; - rd_kafka_conf_t *conf; +/* Check lost partitions revoke occurs on ILLEGAL_GENERATION heartbeat error. + */ - SUB_TEST_QUICK(); +static void p_lost_partitions_heartbeat_illegal_generation_test() { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *groupid = "mygroup"; + const char *topic = "test"; + rd_kafka_t *c; + rd_kafka_conf_t *conf; - mcluster = test_mock_cluster_new(3, &bootstraps); + SUB_TEST_QUICK(); - rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); + mcluster = test_mock_cluster_new(3, &bootstraps); - /* Seed the topic with messages */ - test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, - "bootstrap.servers", bootstraps, - "batch.num.messages", "10", - NULL); + rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); - test_conf_init(&conf, NULL, 30); - test_conf_set(conf, "bootstrap.servers", bootstraps); - test_conf_set(conf, "security.protocol", "PLAINTEXT"); - test_conf_set(conf, "group.id", groupid); - test_conf_set(conf, "session.timeout.ms", "5000"); - test_conf_set(conf, "heartbeat.interval.ms", "1000"); - test_conf_set(conf, "auto.offset.reset", "earliest"); - test_conf_set(conf, "enable.auto.commit", "false"); - test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers", + bootstraps, "batch.num.messages", "10", NULL); - c = test_create_consumer(groupid, rebalance_cb, conf, NULL); + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "security.protocol", "PLAINTEXT"); + test_conf_set(conf, "group.id", groupid); + test_conf_set(conf, "session.timeout.ms", "5000"); + test_conf_set(conf, "heartbeat.interval.ms", "1000"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); - test_consumer_subscribe(c, topic); + c = test_create_consumer(groupid, rebalance_cb, conf, NULL); - expect_rebalance("initial assignment", c, - RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - rd_false/*don't expect lost*/, 5+2); + test_consumer_subscribe(c, topic); - /* Fail heartbeats */ - rd_kafka_mock_push_request_errors( - mcluster, RD_KAFKAP_Heartbeat, - 5, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION); + expect_rebalance("initial assignment", c, + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rd_false /*don't expect lost*/, 5 + 2); - expect_rebalance("lost partitions", c, - RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, - rd_true/*expect lost*/, 10+2); + /* Fail heartbeats */ + rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_Heartbeat, 5, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION); - rd_kafka_mock_clear_request_errors( - mcluster, RD_KAFKAP_Heartbeat); + expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + rd_true /*expect lost*/, 10 + 2); - expect_rebalance("rejoin after lost", c, - RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - rd_false/*don't expect lost*/, 10+2); + rd_kafka_mock_clear_request_errors(mcluster, RD_KAFKAP_Heartbeat); - TEST_SAY("Closing consumer\n"); - test_consumer_close(c); + expect_rebalance("rejoin after lost", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rd_false /*don't expect lost*/, 10 + 2); - TEST_SAY("Destroying consumer\n"); - rd_kafka_destroy(c); + TEST_SAY("Closing consumer\n"); + test_consumer_close(c); - TEST_SAY("Destroying mock cluster\n"); - test_mock_cluster_destroy(mcluster); + TEST_SAY("Destroying consumer\n"); + rd_kafka_destroy(c); - SUB_TEST_PASS(); - } + TEST_SAY("Destroying mock cluster\n"); + test_mock_cluster_destroy(mcluster); + SUB_TEST_PASS(); +} - /* Check lost partitions revoke occurs on ILLEGAL_GENERATION JoinGroup - * or SyncGroup error. - */ - static void q_lost_partitions_illegal_generation_test ( - rd_bool_t test_joingroup_fail) { - - const char *bootstraps; - rd_kafka_mock_cluster_t *mcluster; - const char *groupid = "mygroup"; - const char *topic1 = "test1"; - const char *topic2 = "test2"; - rd_kafka_t *c; - rd_kafka_conf_t *conf; - rd_kafka_resp_err_t err; - rd_kafka_topic_partition_list_t *topics; - - SUB_TEST0(!test_joingroup_fail/*quick*/, - "test_joingroup_fail=%d", test_joingroup_fail); - - mcluster = test_mock_cluster_new(3, &bootstraps); - - rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); - - /* Seed the topic1 with messages */ - test_produce_msgs_easy_v(topic1, 0, 0, 0, 100, 10, - "bootstrap.servers", bootstraps, - "batch.num.messages", "10", - NULL); - - /* Seed the topic2 with messages */ - test_produce_msgs_easy_v(topic2, 0, 0, 0, 100, 10, - "bootstrap.servers", bootstraps, - "batch.num.messages", "10", - NULL); - - test_conf_init(&conf, NULL, 30); - test_conf_set(conf, "bootstrap.servers", bootstraps); - test_conf_set(conf, "security.protocol", "PLAINTEXT"); - test_conf_set(conf, "group.id", groupid); - test_conf_set(conf, "session.timeout.ms", "5000"); - test_conf_set(conf, "heartbeat.interval.ms", "1000"); - test_conf_set(conf, "auto.offset.reset", "earliest"); - test_conf_set(conf, "enable.auto.commit", "false"); - test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); - - c = test_create_consumer(groupid, rebalance_cb, conf, NULL); - - test_consumer_subscribe(c, topic1); - - expect_rebalance("initial assignment", c, - RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - rd_false/*don't expect lost*/, 5+2); - - /* Fail JoinGroups or SyncGroups */ - rd_kafka_mock_push_request_errors( - mcluster, - test_joingroup_fail ? RD_KAFKAP_JoinGroup : RD_KAFKAP_SyncGroup, - 5, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, +/* Check lost partitions revoke occurs on ILLEGAL_GENERATION JoinGroup + * or SyncGroup error. + */ + +static void q_lost_partitions_illegal_generation_test( + rd_bool_t test_joingroup_fail) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *groupid = "mygroup"; + const char *topic1 = "test1"; + const char *topic2 = "test2"; + rd_kafka_t *c; + rd_kafka_conf_t *conf; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *topics; + + SUB_TEST0(!test_joingroup_fail /*quick*/, "test_joingroup_fail=%d", + test_joingroup_fail); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); + + /* Seed the topic1 with messages */ + test_produce_msgs_easy_v(topic1, 0, 0, 0, 100, 10, "bootstrap.servers", + bootstraps, "batch.num.messages", "10", NULL); + + /* Seed the topic2 with messages */ + test_produce_msgs_easy_v(topic2, 0, 0, 0, 100, 10, "bootstrap.servers", + bootstraps, "batch.num.messages", "10", NULL); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "security.protocol", "PLAINTEXT"); + test_conf_set(conf, "group.id", groupid); + test_conf_set(conf, "session.timeout.ms", "5000"); + test_conf_set(conf, "heartbeat.interval.ms", "1000"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); + + c = test_create_consumer(groupid, rebalance_cb, conf, NULL); + + test_consumer_subscribe(c, topic1); + + expect_rebalance("initial assignment", c, + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rd_false /*don't expect lost*/, 5 + 2); + + /* Fail JoinGroups or SyncGroups */ + rd_kafka_mock_push_request_errors( + mcluster, test_joingroup_fail ? RD_KAFKAP_JoinGroup : RD_KAFKAP_SyncGroup, + 5, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION); - topics = rd_kafka_topic_partition_list_new(2); - rd_kafka_topic_partition_list_add(topics, topic1, - RD_KAFKA_PARTITION_UA); - rd_kafka_topic_partition_list_add(topics, topic2, - RD_KAFKA_PARTITION_UA); - err = rd_kafka_subscribe(c, topics); - if (err) - TEST_FAIL("%s: Failed to subscribe to topics: %s\n", - rd_kafka_name(c), rd_kafka_err2str(err)); - rd_kafka_topic_partition_list_destroy(topics); + topics = rd_kafka_topic_partition_list_new(2); + rd_kafka_topic_partition_list_add(topics, topic1, RD_KAFKA_PARTITION_UA); + rd_kafka_topic_partition_list_add(topics, topic2, RD_KAFKA_PARTITION_UA); + err = rd_kafka_subscribe(c, topics); + if (err) + TEST_FAIL("%s: Failed to subscribe to topics: %s\n", rd_kafka_name(c), + rd_kafka_err2str(err)); + rd_kafka_topic_partition_list_destroy(topics); - expect_rebalance("lost partitions", c, - RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, - rd_true/*expect lost*/, 10+2); + expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + rd_true /*expect lost*/, 10 + 2); - rd_kafka_mock_clear_request_errors( - mcluster, - test_joingroup_fail ? RD_KAFKAP_JoinGroup : RD_KAFKAP_SyncGroup); + rd_kafka_mock_clear_request_errors(mcluster, test_joingroup_fail + ? RD_KAFKAP_JoinGroup + : RD_KAFKAP_SyncGroup); - expect_rebalance("rejoin group", c, - RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - rd_false/*expect lost*/, 10+2); + expect_rebalance("rejoin group", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rd_false /*expect lost*/, 10 + 2); - TEST_SAY("Closing consumer\n"); - test_consumer_close(c); + TEST_SAY("Closing consumer\n"); + test_consumer_close(c); - TEST_SAY("Destroying consumer\n"); - rd_kafka_destroy(c); + TEST_SAY("Destroying consumer\n"); + rd_kafka_destroy(c); - TEST_SAY("Destroying mock cluster\n"); - test_mock_cluster_destroy(mcluster); + TEST_SAY("Destroying mock cluster\n"); + test_mock_cluster_destroy(mcluster); - SUB_TEST_PASS(); - } + SUB_TEST_PASS(); +} - /* Check lost partitions revoke occurs on ILLEGAL_GENERATION Commit - * error. - */ +/* Check lost partitions revoke occurs on ILLEGAL_GENERATION Commit + * error. + */ - static void r_lost_partitions_commit_illegal_generation_test_local () { - const char *bootstraps; - rd_kafka_mock_cluster_t *mcluster; - const char *groupid = "mygroup"; - const char *topic = "test"; - const int msgcnt = 100; - rd_kafka_t *c; - rd_kafka_conf_t *conf; +static void r_lost_partitions_commit_illegal_generation_test_local() { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *groupid = "mygroup"; + const char *topic = "test"; + const int msgcnt = 100; + rd_kafka_t *c; + rd_kafka_conf_t *conf; - SUB_TEST(); + SUB_TEST(); - mcluster = test_mock_cluster_new(3, &bootstraps); + mcluster = test_mock_cluster_new(3, &bootstraps); - rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); + rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); - /* Seed the topic with messages */ - test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 10, - "bootstrap.servers", bootstraps, - "batch.num.messages", "10", - NULL); + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 10, "bootstrap.servers", + bootstraps, "batch.num.messages", "10", NULL); - test_conf_init(&conf, NULL, 30); - test_conf_set(conf, "bootstrap.servers", bootstraps); - test_conf_set(conf, "security.protocol", "PLAINTEXT"); - test_conf_set(conf, "group.id", groupid); - test_conf_set(conf, "auto.offset.reset", "earliest"); - test_conf_set(conf, "enable.auto.commit", "false"); - test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "security.protocol", "PLAINTEXT"); + test_conf_set(conf, "group.id", groupid); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); - c = test_create_consumer(groupid, rebalance_cb, conf, NULL); + c = test_create_consumer(groupid, rebalance_cb, conf, NULL); - test_consumer_subscribe(c, topic); + test_consumer_subscribe(c, topic); - expect_rebalance("initial assignment", c, - RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - rd_false/*don't expect lost*/, 5+2); + expect_rebalance("initial assignment", c, + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rd_false /*don't expect lost*/, 5 + 2); - /* Consume some messages so that the commit has something to commit. */ - test_consumer_poll("consume", c, -1, -1, -1, msgcnt/2, NULL); + /* Consume some messages so that the commit has something to commit. */ + test_consumer_poll("consume", c, -1, -1, -1, msgcnt / 2, NULL); - /* Fail Commit */ - rd_kafka_mock_push_request_errors( - mcluster, RD_KAFKAP_OffsetCommit, - 5, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION); + /* Fail Commit */ + rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_OffsetCommit, 5, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION); - rd_kafka_commit(c, NULL, rd_false); + rd_kafka_commit(c, NULL, rd_false); - expect_rebalance("lost partitions", c, - RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, - rd_true/*expect lost*/, 10+2); + expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + rd_true /*expect lost*/, 10 + 2); - expect_rebalance("rejoin group", c, - RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - rd_false/*expect lost*/, 20+2); + expect_rebalance("rejoin group", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rd_false /*expect lost*/, 20 + 2); - TEST_SAY("Closing consumer\n"); - test_consumer_close(c); + TEST_SAY("Closing consumer\n"); + test_consumer_close(c); - TEST_SAY("Destroying consumer\n"); - rd_kafka_destroy(c); + TEST_SAY("Destroying consumer\n"); + rd_kafka_destroy(c); - TEST_SAY("Destroying mock cluster\n"); - test_mock_cluster_destroy(mcluster); - } + TEST_SAY("Destroying mock cluster\n"); + test_mock_cluster_destroy(mcluster); +} - /** - * @brief Rebalance callback for the v_.. test below. - */ - static void v_rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { - bool *auto_commitp = (bool *)opaque; +/** + * @brief Rebalance callback for the v_.. test below. + */ +static void v_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { + bool *auto_commitp = (bool *)opaque; - TEST_SAY("%s: %s: %d partition(s)%s\n", - rd_kafka_name(rk), rd_kafka_err2name(err), parts->cnt, - rd_kafka_assignment_lost(rk) ? " - assignment lost" : ""); + TEST_SAY("%s: %s: %d partition(s)%s\n", rd_kafka_name(rk), + rd_kafka_err2name(err), parts->cnt, + rd_kafka_assignment_lost(rk) ? " - assignment lost" : ""); - test_print_partition_list(parts); + test_print_partition_list(parts); - if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { - test_consumer_incremental_assign("assign", rk, parts); - } else { - test_consumer_incremental_unassign("unassign", rk, parts); - - if (!*auto_commitp) { - rd_kafka_resp_err_t commit_err; - - TEST_SAY("Attempting manual commit after unassign, in 2 seconds..\n"); - /* Sleep enough to have the generation-id bumped by rejoin. */ - rd_sleep(2); - commit_err = rd_kafka_commit(rk, NULL, 0/*sync*/); - TEST_ASSERT(!commit_err || - commit_err == RD_KAFKA_RESP_ERR__NO_OFFSET || - commit_err == RD_KAFKA_RESP_ERR__DESTROY, - "%s: manual commit failed: %s", - rd_kafka_name(rk), rd_kafka_err2str(commit_err)); - } + if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { + test_consumer_incremental_assign("assign", rk, parts); + } else { + test_consumer_incremental_unassign("unassign", rk, parts); + + if (!*auto_commitp) { + rd_kafka_resp_err_t commit_err; + + TEST_SAY("Attempting manual commit after unassign, in 2 seconds..\n"); + /* Sleep enough to have the generation-id bumped by rejoin. */ + rd_sleep(2); + commit_err = rd_kafka_commit(rk, NULL, 0 /*sync*/); + TEST_ASSERT(!commit_err || commit_err == RD_KAFKA_RESP_ERR__NO_OFFSET || + commit_err == RD_KAFKA_RESP_ERR__DESTROY, + "%s: manual commit failed: %s", rd_kafka_name(rk), + rd_kafka_err2str(commit_err)); } } +} - /** - * @brief Commit callback for the v_.. test. - */ - static void v_commit_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque) { - TEST_SAY("%s offset commit for %d offsets: %s\n", - rd_kafka_name(rk), offsets ? offsets->cnt : -1, - rd_kafka_err2name(err)); - TEST_ASSERT(!err || - err == RD_KAFKA_RESP_ERR__NO_OFFSET || - err == RD_KAFKA_RESP_ERR__DESTROY /* consumer was closed */, - "%s offset commit failed: %s", - rd_kafka_name(rk), - rd_kafka_err2str(err)); - } +/** + * @brief Commit callback for the v_.. test. + */ +static void v_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque) { + TEST_SAY("%s offset commit for %d offsets: %s\n", rd_kafka_name(rk), + offsets ? offsets->cnt : -1, rd_kafka_err2name(err)); + TEST_ASSERT(!err || err == RD_KAFKA_RESP_ERR__NO_OFFSET || + err == RD_KAFKA_RESP_ERR__DESTROY /* consumer was closed */, + "%s offset commit failed: %s", rd_kafka_name(rk), + rd_kafka_err2str(err)); +} - static void v_commit_during_rebalance (bool with_rebalance_cb, - bool auto_commit) { - rd_kafka_t *p, *c1, *c2; - rd_kafka_conf_t *conf; - const char *topic = test_mk_topic_name("0113_v", 1); - const int partition_cnt = 6; - const int msgcnt_per_partition = 100; - const int msgcnt = partition_cnt * msgcnt_per_partition; - uint64_t testid; - int i; +static void v_commit_during_rebalance(bool with_rebalance_cb, + bool auto_commit) { + rd_kafka_t *p, *c1, *c2; + rd_kafka_conf_t *conf; + const char *topic = test_mk_topic_name("0113_v", 1); + const int partition_cnt = 6; + const int msgcnt_per_partition = 100; + const int msgcnt = partition_cnt * msgcnt_per_partition; + uint64_t testid; + int i; - SUB_TEST("With%s rebalance callback and %s-commit", - with_rebalance_cb ? "" : "out", - auto_commit ? "auto" : "manual"); + SUB_TEST("With%s rebalance callback and %s-commit", + with_rebalance_cb ? "" : "out", auto_commit ? "auto" : "manual"); - test_conf_init(&conf, NULL, 30); - testid = test_id_generate(); + test_conf_init(&conf, NULL, 30); + testid = test_id_generate(); - /* - * Produce messages to topic - */ - p = test_create_producer(); + /* + * Produce messages to topic + */ + p = test_create_producer(); - test_create_topic(p, topic, partition_cnt, 1); + test_create_topic(p, topic, partition_cnt, 1); - for (i = 0 ; i < partition_cnt ; i++) { - test_produce_msgs2(p, topic, testid, i, - i * msgcnt_per_partition, - msgcnt_per_partition, NULL, 0); - } + for (i = 0; i < partition_cnt; i++) { + test_produce_msgs2(p, topic, testid, i, i * msgcnt_per_partition, + msgcnt_per_partition, NULL, 0); + } - test_flush(p, -1); - - rd_kafka_destroy(p); - - - test_conf_set(conf, "auto.offset.reset", "earliest"); - test_conf_set(conf, "enable.auto.commit", auto_commit ? "true" : "false"); - test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); - rd_kafka_conf_set_offset_commit_cb(conf, v_commit_cb); - rd_kafka_conf_set_opaque(conf, (void *)&auto_commit); - - TEST_SAY("Create and subscribe first consumer\n"); - c1 = test_create_consumer(topic, - with_rebalance_cb ? v_rebalance_cb : NULL, - rd_kafka_conf_dup(conf), NULL); - TEST_ASSERT(rd_kafka_opaque(c1) == (void *)&auto_commit, - "c1 opaque mismatch"); - test_consumer_subscribe(c1, topic); - - /* Consume some messages so that we know we have an assignment - * and something to commit. */ - test_consumer_poll("C1.PRECONSUME", c1, testid, -1, 0, - msgcnt/partition_cnt/2, NULL); - - TEST_SAY("Create and subscribe second consumer\n"); - c2 = test_create_consumer(topic, - with_rebalance_cb ? v_rebalance_cb : NULL, - conf, NULL); - TEST_ASSERT(rd_kafka_opaque(c2) == (void *)&auto_commit, - "c2 opaque mismatch"); - test_consumer_subscribe(c2, topic); - - /* Poll both consumers */ - for (i = 0 ; i < 10 ; i++) { - test_consumer_poll_once(c1, NULL, 1000); - test_consumer_poll_once(c2, NULL, 1000); - } + test_flush(p, -1); + + rd_kafka_destroy(p); + + + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", auto_commit ? "true" : "false"); + test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); + rd_kafka_conf_set_offset_commit_cb(conf, v_commit_cb); + rd_kafka_conf_set_opaque(conf, (void *)&auto_commit); - TEST_SAY("Closing consumers\n"); - test_consumer_close(c1); - test_consumer_close(c2); + TEST_SAY("Create and subscribe first consumer\n"); + c1 = test_create_consumer(topic, with_rebalance_cb ? v_rebalance_cb : NULL, + rd_kafka_conf_dup(conf), NULL); + TEST_ASSERT(rd_kafka_opaque(c1) == (void *)&auto_commit, + "c1 opaque mismatch"); + test_consumer_subscribe(c1, topic); - rd_kafka_destroy(c1); - rd_kafka_destroy(c2); + /* Consume some messages so that we know we have an assignment + * and something to commit. */ + test_consumer_poll("C1.PRECONSUME", c1, testid, -1, 0, + msgcnt / partition_cnt / 2, NULL); - SUB_TEST_PASS(); + TEST_SAY("Create and subscribe second consumer\n"); + c2 = test_create_consumer(topic, with_rebalance_cb ? v_rebalance_cb : NULL, + conf, NULL); + TEST_ASSERT(rd_kafka_opaque(c2) == (void *)&auto_commit, + "c2 opaque mismatch"); + test_consumer_subscribe(c2, topic); + + /* Poll both consumers */ + for (i = 0; i < 10; i++) { + test_consumer_poll_once(c1, NULL, 1000); + test_consumer_poll_once(c2, NULL, 1000); } + TEST_SAY("Closing consumers\n"); + test_consumer_close(c1); + test_consumer_close(c2); - /** - * @brief Verify that incremental rebalances retain stickyness. - */ - static void x_incremental_rebalances (void) { + rd_kafka_destroy(c1); + rd_kafka_destroy(c2); + + SUB_TEST_PASS(); +} + + +/** + * @brief Verify that incremental rebalances retain stickyness. + */ +static void x_incremental_rebalances(void) { #define _NUM_CONS 3 - rd_kafka_t *c[_NUM_CONS]; - rd_kafka_conf_t *conf; - const char *topic = test_mk_topic_name("0113_x", 1); - int i; + rd_kafka_t *c[_NUM_CONS]; + rd_kafka_conf_t *conf; + const char *topic = test_mk_topic_name("0113_x", 1); + int i; - SUB_TEST(); - test_conf_init(&conf, NULL, 60); + SUB_TEST(); + test_conf_init(&conf, NULL, 60); - test_create_topic(NULL, topic, 6, 1); + test_create_topic(NULL, topic, 6, 1); - test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); - for (i = 0 ; i < _NUM_CONS ; i++) { - char clientid[32]; - rd_snprintf(clientid, sizeof(clientid), "consumer%d", i); - test_conf_set(conf, "client.id", clientid); + test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); + for (i = 0; i < _NUM_CONS; i++) { + char clientid[32]; + rd_snprintf(clientid, sizeof(clientid), "consumer%d", i); + test_conf_set(conf, "client.id", clientid); - c[i] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); - } - rd_kafka_conf_destroy(conf); - - /* First consumer joins group */ - TEST_SAY("%s: joining\n", rd_kafka_name(c[0])); - test_consumer_subscribe(c[0], topic); - test_consumer_wait_assignment(c[0], rd_true/*poll*/); - test_consumer_verify_assignment(c[0], rd_true/*fail immediately*/, - topic, 0, - topic, 1, - topic, 2, - topic, 3, - topic, 4, - topic, 5, - NULL); - - - /* Second consumer joins group */ - TEST_SAY("%s: joining\n", rd_kafka_name(c[1])); - test_consumer_subscribe(c[1], topic); - test_consumer_wait_assignment(c[1], rd_true/*poll*/); - rd_sleep(3); - test_consumer_verify_assignment(c[0], rd_false/*fail later*/, - topic, 3, - topic, 4, - topic, 5, - NULL); - test_consumer_verify_assignment(c[1], rd_false/*fail later*/, - topic, 0, - topic, 1, - topic, 2, - NULL); - - /* Third consumer joins group */ - TEST_SAY("%s: joining\n", rd_kafka_name(c[2])); - test_consumer_subscribe(c[2], topic); - test_consumer_wait_assignment(c[2], rd_true/*poll*/); - rd_sleep(3); - test_consumer_verify_assignment(c[0], rd_false/*fail later*/, - topic, 4, - topic, 5, - NULL); - test_consumer_verify_assignment(c[1], rd_false/*fail later*/, - topic, 1, - topic, 2, - NULL); - test_consumer_verify_assignment(c[2], rd_false/*fail later*/, - topic, 3, - topic, 0, - NULL); - - /* Raise any previously failed verify_assignment calls and fail the test */ - TEST_LATER_CHECK(); - - for (i = 0 ; i < _NUM_CONS ; i++) - rd_kafka_destroy(c[i]); - - SUB_TEST_PASS(); - - #undef _NUM_CONS + c[i] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); } + rd_kafka_conf_destroy(conf); + + /* First consumer joins group */ + TEST_SAY("%s: joining\n", rd_kafka_name(c[0])); + test_consumer_subscribe(c[0], topic); + test_consumer_wait_assignment(c[0], rd_true /*poll*/); + test_consumer_verify_assignment(c[0], rd_true /*fail immediately*/, topic, 0, + topic, 1, topic, 2, topic, 3, topic, 4, topic, + 5, NULL); + + + /* Second consumer joins group */ + TEST_SAY("%s: joining\n", rd_kafka_name(c[1])); + test_consumer_subscribe(c[1], topic); + test_consumer_wait_assignment(c[1], rd_true /*poll*/); + rd_sleep(3); + test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 3, + topic, 4, topic, 5, NULL); + test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 0, + topic, 1, topic, 2, NULL); + + /* Third consumer joins group */ + TEST_SAY("%s: joining\n", rd_kafka_name(c[2])); + test_consumer_subscribe(c[2], topic); + test_consumer_wait_assignment(c[2], rd_true /*poll*/); + rd_sleep(3); + test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 4, + topic, 5, NULL); + test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 1, + topic, 2, NULL); + test_consumer_verify_assignment(c[2], rd_false /*fail later*/, topic, 3, + topic, 0, NULL); + + /* Raise any previously failed verify_assignment calls and fail the test */ + TEST_LATER_CHECK(); + + for (i = 0; i < _NUM_CONS; i++) + rd_kafka_destroy(c[i]); - /* Local tests not needing a cluster */ - int main_0113_cooperative_rebalance_local (int argc, char **argv) { - a_assign_rapid(); - p_lost_partitions_heartbeat_illegal_generation_test(); - q_lost_partitions_illegal_generation_test(rd_false/*joingroup*/); - q_lost_partitions_illegal_generation_test(rd_true/*syncgroup*/); - r_lost_partitions_commit_illegal_generation_test_local(); - return 0; - } + SUB_TEST_PASS(); - int main_0113_cooperative_rebalance (int argc, char **argv) { - int i; +#undef _NUM_CONS +} - a_assign_tests(); - b_subscribe_with_cb_test(true/*close consumer*/); - b_subscribe_with_cb_test(false/*don't close consumer*/); - c_subscribe_no_cb_test(true/*close consumer*/); +/* Local tests not needing a cluster */ +int main_0113_cooperative_rebalance_local(int argc, char **argv) { + a_assign_rapid(); + p_lost_partitions_heartbeat_illegal_generation_test(); + q_lost_partitions_illegal_generation_test(rd_false /*joingroup*/); + q_lost_partitions_illegal_generation_test(rd_true /*syncgroup*/); + r_lost_partitions_commit_illegal_generation_test_local(); + return 0; +} - if (test_quick) { - Test::Say("Skipping tests >= c_ .. due to quick mode\n"); - return 0; - } +int main_0113_cooperative_rebalance(int argc, char **argv) { + int i; - c_subscribe_no_cb_test(false/*don't close consumer*/); - d_change_subscription_add_topic(true/*close consumer*/); - d_change_subscription_add_topic(false/*don't close consumer*/); - e_change_subscription_remove_topic(true/*close consumer*/); - e_change_subscription_remove_topic(false/*don't close consumer*/); - f_assign_call_cooperative(); - g_incremental_assign_call_eager(); - h_delete_topic(); - i_delete_topic_2(); - j_delete_topic_no_rb_callback(); - k_add_partition(); - l_unsubscribe(); - m_unsubscribe_2(); - n_wildcard(); - o_java_interop(); - for (i = 1 ; i <= 6 ; i++) /* iterate over 6 different test variations */ - s_subscribe_when_rebalancing(i); - for (i = 1 ; i <= 2 ; i++) - t_max_poll_interval_exceeded(i); - /* Run all 2*3 variations of the u_.. test */ - for (i = 0 ; i < 3 ; i++) { - u_multiple_subscription_changes(true/*with rebalance_cb*/, i); - u_multiple_subscription_changes(false/*without rebalance_cb*/, i); - } - v_commit_during_rebalance(true/*with rebalance callback*/, - true/*auto commit*/); - v_commit_during_rebalance(false/*without rebalance callback*/, - true/*auto commit*/); - v_commit_during_rebalance(true/*with rebalance callback*/, - false/*manual commit*/); - x_incremental_rebalances(); + a_assign_tests(); + b_subscribe_with_cb_test(true /*close consumer*/); + b_subscribe_with_cb_test(false /*don't close consumer*/); + c_subscribe_no_cb_test(true /*close consumer*/); + if (test_quick) { + Test::Say("Skipping tests >= c_ .. due to quick mode\n"); return 0; } + + c_subscribe_no_cb_test(false /*don't close consumer*/); + d_change_subscription_add_topic(true /*close consumer*/); + d_change_subscription_add_topic(false /*don't close consumer*/); + e_change_subscription_remove_topic(true /*close consumer*/); + e_change_subscription_remove_topic(false /*don't close consumer*/); + f_assign_call_cooperative(); + g_incremental_assign_call_eager(); + h_delete_topic(); + i_delete_topic_2(); + j_delete_topic_no_rb_callback(); + k_add_partition(); + l_unsubscribe(); + m_unsubscribe_2(); + n_wildcard(); + o_java_interop(); + for (i = 1; i <= 6; i++) /* iterate over 6 different test variations */ + s_subscribe_when_rebalancing(i); + for (i = 1; i <= 2; i++) + t_max_poll_interval_exceeded(i); + /* Run all 2*3 variations of the u_.. test */ + for (i = 0; i < 3; i++) { + u_multiple_subscription_changes(true /*with rebalance_cb*/, i); + u_multiple_subscription_changes(false /*without rebalance_cb*/, i); + } + v_commit_during_rebalance(true /*with rebalance callback*/, + true /*auto commit*/); + v_commit_during_rebalance(false /*without rebalance callback*/, + true /*auto commit*/); + v_commit_during_rebalance(true /*with rebalance callback*/, + false /*manual commit*/); + x_incremental_rebalances(); + + return 0; +} } diff --git a/tests/0114-sticky_partitioning.cpp b/tests/0114-sticky_partitioning.cpp index ace47f6c1e..8ef88e7df4 100644 --- a/tests/0114-sticky_partitioning.cpp +++ b/tests/0114-sticky_partitioning.cpp @@ -42,8 +42,7 @@ * @brief Specify sticky.partitioning.linger.ms and check consumed * messages to verify it takes effect. */ -static void do_test_sticky_partitioning (int sticky_delay) { - +static void do_test_sticky_partitioning(int sticky_delay) { std::string topic = Test::mk_topic_name(__FILE__, 1); Test::create_topic(NULL, topic.c_str(), 3, 1); @@ -56,16 +55,16 @@ static void do_test_sticky_partitioning (int sticky_delay) { std::string errstr; RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); if (!p) - Test::Fail("Failed to create Producer: " + errstr); + Test::Fail("Failed to create Producer: " + errstr); RdKafka::Consumer *c = RdKafka::Consumer::create(conf, errstr); if (!c) - Test::Fail("Failed to create Consumer: " + errstr); + Test::Fail("Failed to create Consumer: " + errstr); delete conf; RdKafka::Topic *t = RdKafka::Topic::create(c, topic, NULL, errstr); if (!t) - Test::Fail("Failed to create Topic: " + errstr); + Test::Fail("Failed to create Topic: " + errstr); c->start(t, 0, RdKafka::Topic::OFFSET_BEGINNING); c->start(t, 1, RdKafka::Topic::OFFSET_BEGINNING); @@ -79,49 +78,47 @@ static void do_test_sticky_partitioning (int sticky_delay) { memset(val, 'a', msgsize); /* produce for for seconds at 100 msgs/sec */ - for (int s = 0 ; s < 4; s++){ - + for (int s = 0; s < 4; s++) { int64_t end_wait = test_clock() + (1 * 1000000); - for (int i = 0 ; i < msgrate ; i++) { - RdKafka::ErrorCode err = p->produce(topic, RdKafka::Topic::PARTITION_UA, - RdKafka::Producer::RK_MSG_COPY, - val, msgsize, NULL, 0, -1, NULL); - if (err) - Test::Fail("Produce failed: " + RdKafka::err2str(err)); + for (int i = 0; i < msgrate; i++) { + RdKafka::ErrorCode err = p->produce(topic, RdKafka::Topic::PARTITION_UA, + RdKafka::Producer::RK_MSG_COPY, val, + msgsize, NULL, 0, -1, NULL); + if (err) + Test::Fail("Produce failed: " + RdKafka::err2str(err)); } while (test_clock() < end_wait) - p->poll(100); + p->poll(100); } Test::Say(tostr() << "Produced " << 4 * msgrate << " messages\n"); - p->flush(5*1000); + p->flush(5 * 1000); /* Consume messages */ - int partition_msgcnt[3] = {0,0,0}; + int partition_msgcnt[3] = {0, 0, 0}; int num_partitions_active = 0; - int i = 0; + int i = 0; int64_t end_wait = test_clock() + (5 * 1000000); - while (test_clock() < end_wait){ - + while (test_clock() < end_wait) { RdKafka::Message *msg = c->consume(t, i, 5); - switch (msg->err()) - { - case RdKafka::ERR__TIMED_OUT: - i++; - if (i > 2) i = 0; - break; + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + i++; + if (i > 2) + i = 0; + break; - case RdKafka::ERR_NO_ERROR: - partition_msgcnt[msg->partition()]++; - break; + case RdKafka::ERR_NO_ERROR: + partition_msgcnt[msg->partition()]++; + break; - default: - Test::Fail("Consume error: " + msg->errstr()); - break; + default: + Test::Fail("Consume error: " + msg->errstr()); + break; } delete msg; @@ -131,8 +128,7 @@ static void do_test_sticky_partitioning (int sticky_delay) { c->stop(t, 1); c->stop(t, 2); - for(int i = 0; i < 3; i++){ - + for (int i = 0; i < 3; i++) { /* Partitions must receive 100+ messages to be deemed 'active'. This * is because while topics are being updated, it is possible for some * number of messages to be partitioned to joining partitions before @@ -145,29 +141,24 @@ static void do_test_sticky_partitioning (int sticky_delay) { } Test::Say("Partition Message Count: \n"); - for(int i = 0; i < 3; i++){ - Test::Say(tostr() << " " << i << ": " << - partition_msgcnt[i] << "\n"); + for (int i = 0; i < 3; i++) { + Test::Say(tostr() << " " << i << ": " << partition_msgcnt[i] << "\n"); } /* When sticky.partitioning.linger.ms is long (greater than expected * length of run), one partition should be sticky and receive messages. */ - if (sticky_delay == 5000 && - num_partitions_active > 1) - Test::Fail(tostr() - << "Expected only 1 partition to receive msgs" - << " but " << num_partitions_active - << " partitions received msgs."); + if (sticky_delay == 5000 && num_partitions_active > 1) + Test::Fail(tostr() << "Expected only 1 partition to receive msgs" + << " but " << num_partitions_active + << " partitions received msgs."); /* When sticky.partitioning.linger.ms is short (sufficiently smaller than * length of run), it is extremely likely that all partitions are sticky * at least once and receive messages. */ - if (sticky_delay == 1000 && - num_partitions_active <= 1) - Test::Fail(tostr() - << "Expected more than one partition to receive msgs" - << " but only " << num_partitions_active - << " partition received msgs."); + if (sticky_delay == 1000 && num_partitions_active <= 1) + Test::Fail(tostr() << "Expected more than one partition to receive msgs" + << " but only " << num_partitions_active + << " partition received msgs."); delete t; delete p; @@ -175,11 +166,11 @@ static void do_test_sticky_partitioning (int sticky_delay) { } extern "C" { - int main_0114_sticky_partitioning (int argc, char **argv) { - /* long delay (5 secs) */ - do_test_sticky_partitioning(5000); - /* short delay (0.001 secs) */ - do_test_sticky_partitioning(1); - return 0; - } +int main_0114_sticky_partitioning(int argc, char **argv) { + /* long delay (5 secs) */ + do_test_sticky_partitioning(5000); + /* short delay (0.001 secs) */ + do_test_sticky_partitioning(1); + return 0; +} } diff --git a/tests/0115-producer_auth.cpp b/tests/0115-producer_auth.cpp index 17a84541c9..c4d1a96aa9 100644 --- a/tests/0115-producer_auth.cpp +++ b/tests/0115-producer_auth.cpp @@ -36,9 +36,10 @@ namespace { class DrCb : public RdKafka::DeliveryReportCb { public: - DrCb (RdKafka::ErrorCode exp_err): cnt(0), exp_err(exp_err) {} + DrCb(RdKafka::ErrorCode exp_err) : cnt(0), exp_err(exp_err) { + } - void dr_cb (RdKafka::Message &msg) { + void dr_cb(RdKafka::Message &msg) { Test::Say("Delivery report: " + RdKafka::err2str(msg.err()) + "\n"); if (msg.err() != exp_err) Test::Fail("Delivery report: Expected " + RdKafka::err2str(exp_err) + @@ -49,7 +50,7 @@ class DrCb : public RdKafka::DeliveryReportCb { int cnt; RdKafka::ErrorCode exp_err; }; -}; +}; // namespace /** * @brief Test producer auth failures. @@ -62,9 +63,9 @@ class DrCb : public RdKafka::DeliveryReportCb { */ -static void do_test_producer (bool topic_known) { - Test::Say(tostr() << _C_MAG << "[ Test producer auth with topic " << - (topic_known ? "" : "not ") << "known ]\n"); +static void do_test_producer(bool topic_known) { + Test::Say(tostr() << _C_MAG << "[ Test producer auth with topic " + << (topic_known ? "" : "not ") << "known ]\n"); /* Create producer */ RdKafka::Conf *conf; @@ -94,48 +95,36 @@ static void do_test_producer (bool topic_known) { if (topic_known) { /* Produce a single message to make sure metadata is known. */ Test::Say("Producing seeding message 0\n"); - err = p->produce(topic_unauth, - RdKafka::Topic::PARTITION_UA, - RdKafka::Producer::RK_MSG_COPY, - (void *)"0", 1, - NULL, 0, - 0, NULL); - TEST_ASSERT(!err, - "produce() failed: %s", RdKafka::err2str(err).c_str()); + err = p->produce(topic_unauth, RdKafka::Topic::PARTITION_UA, + RdKafka::Producer::RK_MSG_COPY, (void *)"0", 1, NULL, 0, 0, + NULL); + TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str()); p->flush(-1); exp_dr_cnt++; } /* Add denying ACL for unauth topic */ - test_kafka_cmd("kafka-acls.sh --bootstrap-server %s " - "--add --deny-principal 'User:*' " - "--operation All --deny-host '*' " - "--topic '%s'", - bootstraps.c_str(), topic_unauth.c_str()); + test_kafka_cmd( + "kafka-acls.sh --bootstrap-server %s " + "--add --deny-principal 'User:*' " + "--operation All --deny-host '*' " + "--topic '%s'", + bootstraps.c_str(), topic_unauth.c_str()); /* Produce message to any partition. */ Test::Say("Producing message 1 to any partition\n"); - err = p->produce(topic_unauth, - RdKafka::Topic::PARTITION_UA, - RdKafka::Producer::RK_MSG_COPY, - (void *)"1", 1, - NULL, 0, - 0, NULL); - TEST_ASSERT(!err, - "produce() failed: %s", RdKafka::err2str(err).c_str()); + err = p->produce(topic_unauth, RdKafka::Topic::PARTITION_UA, + RdKafka::Producer::RK_MSG_COPY, (void *)"1", 1, NULL, 0, 0, + NULL); + TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str()); exp_dr_cnt++; /* Produce message to specific partition. */ Test::Say("Producing message 2 to partition 0\n"); - err = p->produce(topic_unauth, - 0, - RdKafka::Producer::RK_MSG_COPY, - (void *)"3", 1, - NULL, 0, - 0, NULL); - TEST_ASSERT(!err, - "produce() failed: %s", RdKafka::err2str(err).c_str()); + err = p->produce(topic_unauth, 0, RdKafka::Producer::RK_MSG_COPY, (void *)"3", + 1, NULL, 0, 0, NULL); + TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str()); exp_dr_cnt++; /* Wait for DRs */ @@ -145,51 +134,46 @@ static void do_test_producer (bool topic_known) { /* Produce message to any and specific partition, should fail immediately. */ Test::Say("Producing message 3 to any partition\n"); - err = p->produce(topic_unauth, - RdKafka::Topic::PARTITION_UA, - RdKafka::Producer::RK_MSG_COPY, - (void *)"3", 1, - NULL, 0, - 0, NULL); + err = p->produce(topic_unauth, RdKafka::Topic::PARTITION_UA, + RdKafka::Producer::RK_MSG_COPY, (void *)"3", 1, NULL, 0, 0, + NULL); TEST_ASSERT(err == dr.exp_err, "Expected produce() to fail with ERR_TOPIC_AUTHORIZATION_FAILED, " - "not %s", RdKafka::err2str(err).c_str()); + "not %s", + RdKafka::err2str(err).c_str()); /* Specific partition */ Test::Say("Producing message 4 to partition 0\n"); - err = p->produce(topic_unauth, - 0, - RdKafka::Producer::RK_MSG_COPY, - (void *)"4", 1, - NULL, 0, - 0, NULL); + err = p->produce(topic_unauth, 0, RdKafka::Producer::RK_MSG_COPY, (void *)"4", + 1, NULL, 0, 0, NULL); TEST_ASSERT(err == dr.exp_err, "Expected produce() to fail with ERR_TOPIC_AUTHORIZATION_FAILED, " - "not %s", RdKafka::err2str(err).c_str()); + "not %s", + RdKafka::err2str(err).c_str()); /* Final flush just to make sure */ p->flush(-1); - TEST_ASSERT(exp_dr_cnt == dr.cnt, - "Expected %d deliveries, not %d", exp_dr_cnt, dr.cnt); + TEST_ASSERT(exp_dr_cnt == dr.cnt, "Expected %d deliveries, not %d", + exp_dr_cnt, dr.cnt); - Test::Say(tostr() << _C_GRN << "[ Test producer auth with topic " << - (topic_known ? "" : "not ") << "known: PASS ]\n"); + Test::Say(tostr() << _C_GRN << "[ Test producer auth with topic " + << (topic_known ? "" : "not ") << "known: PASS ]\n"); delete p; } extern "C" { - int main_0115_producer_auth (int argc, char **argv) { - /* We can't bother passing Java security config to kafka-acls.sh */ - if (test_needs_auth()) { - Test::Skip("Cluster authentication required\n"); - return 0; - } - - do_test_producer(true); - do_test_producer(false); - +int main_0115_producer_auth(int argc, char **argv) { + /* We can't bother passing Java security config to kafka-acls.sh */ + if (test_needs_auth()) { + Test::Skip("Cluster authentication required\n"); return 0; } + + do_test_producer(true); + do_test_producer(false); + + return 0; +} } diff --git a/tests/0116-kafkaconsumer_close.cpp b/tests/0116-kafkaconsumer_close.cpp index b6bd8ace07..6645df5ee4 100644 --- a/tests/0116-kafkaconsumer_close.cpp +++ b/tests/0116-kafkaconsumer_close.cpp @@ -40,13 +40,12 @@ extern "C" { */ -static void do_test_consumer_close (bool do_subscribe, - bool do_unsubscribe, - bool do_close) { - Test::Say(tostr() << _C_MAG << "[ Test C++ KafkaConsumer close " << - "subscribe=" << do_subscribe << - ", unsubscribe=" << do_unsubscribe << - ", close=" << do_close << " ]\n"); +static void do_test_consumer_close(bool do_subscribe, + bool do_unsubscribe, + bool do_close) { + Test::Say(tostr() << _C_MAG << "[ Test C++ KafkaConsumer close " + << "subscribe=" << do_subscribe << ", unsubscribe=" + << do_unsubscribe << ", close=" << do_close << " ]\n"); rd_kafka_mock_cluster_t *mcluster; const char *bootstraps; @@ -63,10 +62,11 @@ static void do_test_consumer_close (bool do_subscribe, Test::conf_set(pconf, "bootstrap.servers", bootstraps); RdKafka::Producer *p = RdKafka::Producer::create(pconf, errstr); if (!p) - Test::Fail(tostr() << __FUNCTION__ << ": Failed to create producer: " << - errstr); + Test::Fail(tostr() << __FUNCTION__ + << ": Failed to create producer: " << errstr); delete pconf; - Test::produce_msgs(p, "some_topic", 0, msgs_per_partition, 10, true/*flush*/); + Test::produce_msgs(p, "some_topic", 0, msgs_per_partition, 10, + true /*flush*/); delete p; /* Create consumer */ @@ -125,18 +125,18 @@ static void do_test_consumer_close (bool do_subscribe, } extern "C" { - int main_0116_kafkaconsumer_close (int argc, char **argv) { - /* Parameters: - * subscribe, unsubscribe, close */ - do_test_consumer_close(true, true, true); - do_test_consumer_close(true, true, false); - do_test_consumer_close(true, false, true); - do_test_consumer_close(true, false, false); - do_test_consumer_close(false, true, true); - do_test_consumer_close(false, true, false); - do_test_consumer_close(false, false, true); - do_test_consumer_close(false, false, false); - - return 0; - } +int main_0116_kafkaconsumer_close(int argc, char **argv) { + /* Parameters: + * subscribe, unsubscribe, close */ + do_test_consumer_close(true, true, true); + do_test_consumer_close(true, true, false); + do_test_consumer_close(true, false, true); + do_test_consumer_close(true, false, false); + do_test_consumer_close(false, true, true); + do_test_consumer_close(false, true, false); + do_test_consumer_close(false, false, true); + do_test_consumer_close(false, false, false); + + return 0; +} } diff --git a/tests/0117-mock_errors.c b/tests/0117-mock_errors.c index 2d2ac4c56c..2c44887478 100644 --- a/tests/0117-mock_errors.c +++ b/tests/0117-mock_errors.c @@ -44,14 +44,13 @@ /** * @brief Test producer handling (retry) of ERR_KAFKA_STORAGE_ERROR. */ -static void do_test_producer_storage_error (rd_bool_t too_few_retries) { +static void do_test_producer_storage_error(rd_bool_t too_few_retries) { rd_kafka_conf_t *conf; rd_kafka_t *rk; rd_kafka_mock_cluster_t *mcluster; rd_kafka_resp_err_t err; - SUB_TEST_QUICK("%s", - too_few_retries ? "with too few retries" : ""); + SUB_TEST_QUICK("%s", too_few_retries ? "with too few retries" : ""); test_conf_init(&conf, NULL, 10); @@ -65,7 +64,7 @@ static void do_test_producer_storage_error (rd_bool_t too_few_retries) { test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR; test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; } else { - test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_PERSISTED; } @@ -75,17 +74,13 @@ static void do_test_producer_storage_error (rd_bool_t too_few_retries) { TEST_ASSERT(mcluster, "missing mock cluster"); rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_Produce, - 3, - RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, - RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, - RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR); - - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("mytopic"), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END); + mcluster, RD_KAFKAP_Produce, 3, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR); + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); /* Wait for delivery report. */ @@ -102,13 +97,13 @@ static void do_test_producer_storage_error (rd_bool_t too_few_retries) { * RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS and then causing fetchers * to not start. */ -static void do_test_offset_commit_error_during_rebalance (void) { +static void do_test_offset_commit_error_during_rebalance(void) { rd_kafka_conf_t *conf; rd_kafka_t *c1, *c2; rd_kafka_mock_cluster_t *mcluster; const char *bootstraps; const char *topic = "test"; - const int msgcnt = 100; + const int msgcnt = 100; rd_kafka_resp_err_t err; SUB_TEST(); @@ -122,8 +117,7 @@ static void do_test_offset_commit_error_during_rebalance (void) { /* Seed the topic with messages */ test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10, "bootstrap.servers", bootstraps, - "batch.num.messages", "1", - NULL); + "batch.num.messages", "1", NULL); test_conf_set(conf, "bootstrap.servers", bootstraps); test_conf_set(conf, "auto.offset.reset", "earliest"); @@ -135,8 +129,7 @@ static void do_test_offset_commit_error_during_rebalance (void) { c1 = test_create_consumer("mygroup", test_rebalance_cb, rd_kafka_conf_dup(conf), NULL); - c2 = test_create_consumer("mygroup", test_rebalance_cb, - conf, NULL); + c2 = test_create_consumer("mygroup", test_rebalance_cb, conf, NULL); test_consumer_subscribe(c1, topic); test_consumer_subscribe(c2, topic); @@ -151,23 +144,22 @@ static void do_test_offset_commit_error_during_rebalance (void) { rd_kafka_destroy(c2); rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_OffsetCommit, - 6, - RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, - RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, - RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, - RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, - RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, - RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS); + mcluster, RD_KAFKAP_OffsetCommit, 6, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS); /* This commit should fail (async) */ TEST_SAY("Committing (should fail)\n"); - err = rd_kafka_commit(c1, NULL, 0/*sync*/); + err = rd_kafka_commit(c1, NULL, 0 /*sync*/); TEST_SAY("Commit returned %s\n", rd_kafka_err2name(err)); TEST_ASSERT(err == RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, "Expected commit to fail with ERR_REBALANCE_IN_PROGRESS, " - "not %s", rd_kafka_err2name(err)); + "not %s", + rd_kafka_err2name(err)); /* Wait for new assignment and able to read all messages */ test_consumer_poll("C1.PRE", c1, 0, -1, -1, msgcnt, NULL); @@ -186,16 +178,16 @@ static void do_test_offset_commit_error_during_rebalance (void) { * RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS and then causing fetchers * to not start. */ -static void do_test_offset_commit_request_timed_out (rd_bool_t auto_commit) { +static void do_test_offset_commit_request_timed_out(rd_bool_t auto_commit) { rd_kafka_conf_t *conf; rd_kafka_t *c1, *c2; rd_kafka_mock_cluster_t *mcluster; const char *bootstraps; const char *topic = "test"; - const int msgcnt = 1; + const int msgcnt = 1; rd_kafka_topic_partition_list_t *partitions; - SUB_TEST_QUICK("enable.auto.commit=%s", auto_commit ? "true": "false"); + SUB_TEST_QUICK("enable.auto.commit=%s", auto_commit ? "true" : "false"); test_conf_init(&conf, NULL, 60); @@ -206,20 +198,20 @@ static void do_test_offset_commit_request_timed_out (rd_bool_t auto_commit) { /* Seed the topic with messages */ test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10, "bootstrap.servers", bootstraps, - "batch.num.messages", "1", - NULL); + "batch.num.messages", "1", NULL); test_conf_set(conf, "bootstrap.servers", bootstraps); test_conf_set(conf, "auto.offset.reset", "earliest"); - test_conf_set(conf, "enable.auto.commit", auto_commit ? "true":"false"); + test_conf_set(conf, "enable.auto.commit", + auto_commit ? "true" : "false"); /* Too high to be done by interval in this test */ test_conf_set(conf, "auto.commit.interval.ms", "90000"); /* Make sure we don't consume the entire partition in one Fetch */ test_conf_set(conf, "fetch.message.max.bytes", "100"); - c1 = test_create_consumer("mygroup", NULL, - rd_kafka_conf_dup(conf), NULL); + c1 = test_create_consumer("mygroup", NULL, rd_kafka_conf_dup(conf), + NULL); test_consumer_subscribe(c1, topic); @@ -227,15 +219,12 @@ static void do_test_offset_commit_request_timed_out (rd_bool_t auto_commit) { /* Wait for assignment and one message */ test_consumer_poll("C1.PRE", c1, 0, -1, -1, 1, NULL); - rd_kafka_mock_push_request_errors( - mcluster, - RD_KAFKAP_OffsetCommit, - 2, - RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, - RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT); + rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_OffsetCommit, 2, + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT); if (!auto_commit) - TEST_CALL_ERR__(rd_kafka_commit(c1, NULL, 0/*sync*/)); + TEST_CALL_ERR__(rd_kafka_commit(c1, NULL, 0 /*sync*/)); /* Rely on consumer_close() doing final commit * when auto commit is enabled */ @@ -250,11 +239,11 @@ static void do_test_offset_commit_request_timed_out (rd_bool_t auto_commit) { partitions = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(partitions, topic, 0)->offset = - RD_KAFKA_OFFSET_INVALID; + RD_KAFKA_OFFSET_INVALID; - TEST_CALL_ERR__(rd_kafka_committed(c2, partitions, 10*1000)); + TEST_CALL_ERR__(rd_kafka_committed(c2, partitions, 10 * 1000)); TEST_ASSERT(partitions->elems[0].offset == 1, - "Expected committed offset to be 1, not %"PRId64, + "Expected committed offset to be 1, not %" PRId64, partitions->elems[0].offset); rd_kafka_topic_partition_list_destroy(partitions); diff --git a/tests/0118-commit_rebalance.c b/tests/0118-commit_rebalance.c index ce816d4b54..1cdcda4623 100644 --- a/tests/0118-commit_rebalance.c +++ b/tests/0118-commit_rebalance.c @@ -35,12 +35,13 @@ static rd_kafka_t *c1, *c2; -static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { - TEST_SAY("Rebalance for %s: %s: %d partition(s)\n", - rd_kafka_name(rk), rd_kafka_err2name(err), parts->cnt); + TEST_SAY("Rebalance for %s: %s: %d partition(s)\n", rd_kafka_name(rk), + rd_kafka_err2name(err), parts->cnt); if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { TEST_CALL_ERR__(rd_kafka_assign(rk, parts)); @@ -67,24 +68,22 @@ static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, * since it will have started to shut down after the assign * call. */ TEST_SAY("%s: Committing\n", rd_kafka_name(rk)); - commit_err = rd_kafka_commit(rk, parts, 0/*sync*/); - TEST_SAY("%s: Commit result: %s\n", - rd_kafka_name(rk), rd_kafka_err2name(commit_err)); + commit_err = rd_kafka_commit(rk, parts, 0 /*sync*/); + TEST_SAY("%s: Commit result: %s\n", rd_kafka_name(rk), + rd_kafka_err2name(commit_err)); TEST_ASSERT(commit_err, "Expected closing consumer %s's commit to " "fail, but got %s", - rd_kafka_name(rk), - rd_kafka_err2name(commit_err)); + rd_kafka_name(rk), rd_kafka_err2name(commit_err)); } else { TEST_FAIL("Unhandled event: %s", rd_kafka_err2name(err)); } - } -int main_0118_commit_rebalance (int argc, char **argv) { +int main_0118_commit_rebalance(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__, 1); rd_kafka_conf_t *conf; const int msgcnt = 1000; @@ -94,11 +93,11 @@ int main_0118_commit_rebalance (int argc, char **argv) { test_conf_set(conf, "auto.offset.reset", "earliest"); rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); - test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, - msgcnt, 10, NULL); + test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10, + NULL); - c1 = test_create_consumer(topic, rebalance_cb, - rd_kafka_conf_dup(conf), NULL); + c1 = test_create_consumer(topic, rebalance_cb, rd_kafka_conf_dup(conf), + NULL); c2 = test_create_consumer(topic, rebalance_cb, conf, NULL); test_consumer_subscribe(c1, topic); diff --git a/tests/0119-consumer_auth.cpp b/tests/0119-consumer_auth.cpp index b899dba59a..507b673024 100644 --- a/tests/0119-consumer_auth.cpp +++ b/tests/0119-consumer_auth.cpp @@ -39,7 +39,7 @@ */ -static void do_test_fetch_unauth () { +static void do_test_fetch_unauth() { Test::Say(tostr() << _C_MAG << "[ Test unauthorized Fetch ]\n"); std::string topic = Test::mk_topic_name("0119-fetch_unauth", 1); @@ -71,17 +71,19 @@ static void do_test_fetch_unauth () { * Deny Read (Fetch) */ - test_kafka_cmd("kafka-acls.sh --bootstrap-server %s " - "--add --allow-principal 'User:*' " - "--operation Describe --allow-host '*' " - "--topic '%s'", - bootstraps.c_str(), topic.c_str()); + test_kafka_cmd( + "kafka-acls.sh --bootstrap-server %s " + "--add --allow-principal 'User:*' " + "--operation Describe --allow-host '*' " + "--topic '%s'", + bootstraps.c_str(), topic.c_str()); - test_kafka_cmd("kafka-acls.sh --bootstrap-server %s " - "--add --deny-principal 'User:*' " - "--operation Read --deny-host '*' " - "--topic '%s'", - bootstraps.c_str(), topic.c_str()); + test_kafka_cmd( + "kafka-acls.sh --bootstrap-server %s " + "--add --deny-principal 'User:*' " + "--operation Read --deny-host '*' " + "--topic '%s'", + bootstraps.c_str(), topic.c_str()); Test::subscribe(c, topic); @@ -89,60 +91,58 @@ static void do_test_fetch_unauth () { /* Consume for 15s (30*0.5), counting the number of auth errors, * should only see one error per consumed partition, and no messages. */ - for (int i = 0 ; i < 30 ; i++) { + for (int i = 0; i < 30; i++) { RdKafka::Message *msg; msg = c->consume(500); TEST_ASSERT(msg, "Expected msg"); - switch (msg->err()) - { - case RdKafka::ERR__TIMED_OUT: - break; - - case RdKafka::ERR_NO_ERROR: - Test::Fail("Did not expect a valid message"); - break; - - case RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED: - Test::Say(tostr() << "Consumer error on " << msg->topic_name() << - " [" << msg->partition() << "]: " << msg->errstr() << "\n"); - - if (auth_err_cnt++ > partition_cnt) - Test::Fail("Too many auth errors received, " - "expected same as number of partitions"); - break; - - default: - Test::Fail(tostr() << "Unexpected consumer error on " << - msg->topic_name() << " [" << msg->partition() << "]: " << - msg->errstr()); - break; + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + break; + + case RdKafka::ERR_NO_ERROR: + Test::Fail("Did not expect a valid message"); + break; + + case RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED: + Test::Say(tostr() << "Consumer error on " << msg->topic_name() << " [" + << msg->partition() << "]: " << msg->errstr() << "\n"); + + if (auth_err_cnt++ > partition_cnt) + Test::Fail( + "Too many auth errors received, " + "expected same as number of partitions"); + break; + + default: + Test::Fail(tostr() << "Unexpected consumer error on " << msg->topic_name() + << " [" << msg->partition() << "]: " << msg->errstr()); + break; } delete msg; } TEST_ASSERT(auth_err_cnt == partition_cnt, - "Expected exactly %d auth errors, saw %d", - partition_cnt, auth_err_cnt); + "Expected exactly %d auth errors, saw %d", partition_cnt, + auth_err_cnt); delete c; Test::Say(tostr() << _C_GRN << "[ Test unauthorized Fetch PASS ]\n"); - } extern "C" { - int main_0119_consumer_auth (int argc, char **argv) { - /* We can't bother passing Java security config to kafka-acls.sh */ - if (test_needs_auth()) { - Test::Skip("Cluster authentication required\n"); - return 0; - } - - do_test_fetch_unauth(); - +int main_0119_consumer_auth(int argc, char **argv) { + /* We can't bother passing Java security config to kafka-acls.sh */ + if (test_needs_auth()) { + Test::Skip("Cluster authentication required\n"); return 0; } + + do_test_fetch_unauth(); + + return 0; +} } diff --git a/tests/0120-asymmetric_subscription.c b/tests/0120-asymmetric_subscription.c index 201d160ff2..2031dcba19 100644 --- a/tests/0120-asymmetric_subscription.c +++ b/tests/0120-asymmetric_subscription.c @@ -35,35 +35,35 @@ /** * @brief Verify proper assignment for asymmetrical subscriptions. */ -static void do_test_asymmetric (const char *assignor, const char *bootstraps) { +static void do_test_asymmetric(const char *assignor, const char *bootstraps) { rd_kafka_conf_t *conf; #define _C_CNT 3 rd_kafka_t *c[_C_CNT]; -#define _S_CNT 2 /* max subscription count per consumer */ +#define _S_CNT 2 /* max subscription count per consumer */ const char *topics[_C_CNT][_S_CNT] = { - /* c0 */ { "t1", "t2" }, - /* c1 */ { "t2", "t3" }, - /* c2 */ { "t4" }, + /* c0 */ {"t1", "t2"}, + /* c1 */ {"t2", "t3"}, + /* c2 */ {"t4"}, }; struct { const char *topic; const int cnt; int seen; } expect[_C_CNT][_S_CNT] = { - /* c0 */ - { - { "t1", _PART_CNT }, - { "t2", _PART_CNT/2 }, - }, - /* c1 */ - { - { "t2", _PART_CNT/2 }, - { "t3", _PART_CNT }, - }, - /* c2 */ - { - { "t4", _PART_CNT }, - }, + /* c0 */ + { + {"t1", _PART_CNT}, + {"t2", _PART_CNT / 2}, + }, + /* c1 */ + { + {"t2", _PART_CNT / 2}, + {"t3", _PART_CNT}, + }, + /* c2 */ + { + {"t4", _PART_CNT}, + }, }; const char *groupid = assignor; int i; @@ -74,18 +74,18 @@ static void do_test_asymmetric (const char *assignor, const char *bootstraps) { test_conf_set(conf, "bootstrap.servers", bootstraps); test_conf_set(conf, "partition.assignment.strategy", assignor); - for (i = 0 ; i < _C_CNT ; i++) { + for (i = 0; i < _C_CNT; i++) { char name[16]; rd_kafka_topic_partition_list_t *tlist = - rd_kafka_topic_partition_list_new(2); + rd_kafka_topic_partition_list_new(2); int j; rd_snprintf(name, sizeof(name), "c%d", i); test_conf_set(conf, "client.id", name); - for (j = 0 ; j < _S_CNT && topics[i][j] ; j++) + for (j = 0; j < _S_CNT && topics[i][j]; j++) rd_kafka_topic_partition_list_add( - tlist, topics[i][j], RD_KAFKA_PARTITION_UA); + tlist, topics[i][j], RD_KAFKA_PARTITION_UA); c[i] = test_create_consumer(groupid, NULL, rd_kafka_conf_dup(conf), NULL); @@ -99,11 +99,11 @@ static void do_test_asymmetric (const char *assignor, const char *bootstraps) { /* Await assignments for all consumers */ - for (i = 0 ; i < _C_CNT ; i++) + for (i = 0; i < _C_CNT; i++) test_consumer_wait_assignment(c[i], rd_true); /* All have assignments, grab them. */ - for (i = 0 ; i < _C_CNT ; i++) { + for (i = 0; i < _C_CNT; i++) { int j; int p; rd_kafka_topic_partition_list_t *assignment; @@ -113,12 +113,12 @@ static void do_test_asymmetric (const char *assignor, const char *bootstraps) { TEST_ASSERT(assignment, "No assignment for %s", rd_kafka_name(c[i])); - for (p = 0 ; p < assignment->cnt ; p++) { + for (p = 0; p < assignment->cnt; p++) { const rd_kafka_topic_partition_t *part = - &assignment->elems[p]; + &assignment->elems[p]; rd_bool_t found = rd_false; - for (j = 0 ; j < _S_CNT && expect[i][j].topic ; j++) { + for (j = 0; j < _S_CNT && expect[i][j].topic; j++) { if (!strcmp(part->topic, expect[i][j].topic)) { expect[i][j].seen++; found = rd_true; @@ -129,24 +129,21 @@ static void do_test_asymmetric (const char *assignor, const char *bootstraps) { TEST_ASSERT(found, "%s was assigned unexpected topic %s", rd_kafka_name(c[i]), part->topic); - } - for (j = 0 ; j < _S_CNT && expect[i][j].topic ; j++) { + for (j = 0; j < _S_CNT && expect[i][j].topic; j++) { TEST_ASSERT(expect[i][j].seen == expect[i][j].cnt, "%s expected %d assigned partitions " "for %s, not %d", - rd_kafka_name(c[i]), - expect[i][j].cnt, - expect[i][j].topic, - expect[i][j].seen); + rd_kafka_name(c[i]), expect[i][j].cnt, + expect[i][j].topic, expect[i][j].seen); } rd_kafka_topic_partition_list_destroy(assignment); } - for (i = 0 ; i < _C_CNT ; i++) { + for (i = 0; i < _C_CNT; i++) { if (strcmp(assignor, "range") && (i & 1) == 0) test_consumer_close(c[i]); rd_kafka_destroy(c[i]); @@ -157,7 +154,7 @@ static void do_test_asymmetric (const char *assignor, const char *bootstraps) { } -int main_0120_asymmetric_subscription (int argc, char **argv) { +int main_0120_asymmetric_subscription(int argc, char **argv) { const char *bootstraps; rd_kafka_mock_cluster_t *mcluster; diff --git a/tests/0121-clusterid.c b/tests/0121-clusterid.c index 70fe28730c..35f5d529e9 100644 --- a/tests/0121-clusterid.c +++ b/tests/0121-clusterid.c @@ -41,14 +41,13 @@ * */ -static void log_cb (const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { +static void +log_cb(const rd_kafka_t *rk, int level, const char *fac, const char *buf) { rd_atomic32_t *log_cntp = rd_kafka_opaque(rk); - rd_bool_t matched = !strcmp(fac, "CLUSTERID") && - strstr(buf, "reports different ClusterId"); + rd_bool_t matched = !strcmp(fac, "CLUSTERID") && + strstr(buf, "reports different ClusterId"); - TEST_SAY("%sLog: %s level %d fac %s: %s\n", - matched ? _C_GRN : "", + TEST_SAY("%sLog: %s level %d fac %s: %s\n", matched ? _C_GRN : "", rd_kafka_name(rk), level, fac, buf); if (matched) @@ -56,7 +55,7 @@ static void log_cb (const rd_kafka_t *rk, int level, } -int main_0121_clusterid (int argc, char **argv) { +int main_0121_clusterid(int argc, char **argv) { rd_kafka_mock_cluster_t *cluster_a, *cluster_b; const char *bootstraps_a, *bootstraps_b; size_t bs_size; @@ -79,7 +78,7 @@ int main_0121_clusterid (int argc, char **argv) { test_conf_init(&conf, NULL, 10); /* Combine bootstraps from both clusters */ - bs_size = strlen(bootstraps_a) + strlen(bootstraps_b) + 2; + bs_size = strlen(bootstraps_a) + strlen(bootstraps_b) + 2; bootstraps = malloc(bs_size); rd_snprintf(bootstraps, bs_size, "%s,%s", bootstraps_a, bootstraps_b); test_conf_set(conf, "bootstrap.servers", bootstraps); diff --git a/tests/0122-buffer_cleaning_after_rebalance.c b/tests/0122-buffer_cleaning_after_rebalance.c index f265247a0d..a1537ba9b8 100644 --- a/tests/0122-buffer_cleaning_after_rebalance.c +++ b/tests/0122-buffer_cleaning_after_rebalance.c @@ -29,7 +29,7 @@ #include "test.h" /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ typedef struct consumer_s { const char *what; @@ -43,44 +43,47 @@ typedef struct consumer_s { struct test *test; } consumer_t; -static int consumer_batch_queue (void *arg) { +static int consumer_batch_queue(void *arg) { consumer_t *arguments = arg; - int msg_cnt = 0; + int msg_cnt = 0; int i; test_timing_t t_cons; - rd_kafka_queue_t *rkq = arguments->rkq; - int timeout_ms = arguments->timeout_ms; + rd_kafka_queue_t *rkq = arguments->rkq; + int timeout_ms = arguments->timeout_ms; const int consume_msg_cnt = arguments->consume_msg_cnt; - rd_kafka_t *rk = arguments->rk; - uint64_t testid = arguments->testid; + rd_kafka_t *rk = arguments->rk; + uint64_t testid = arguments->testid; rd_kafka_message_t **rkmessage = - malloc(consume_msg_cnt * sizeof(*rkmessage)); + malloc(consume_msg_cnt * sizeof(*rkmessage)); if (arguments->test) test_curr = arguments->test; - TEST_SAY("%s calling consume_batch_queue(timeout=%d, msgs=%d) " - "and expecting %d messages back\n", - rd_kafka_name(rk), timeout_ms, consume_msg_cnt, - arguments->expected_msg_cnt); + TEST_SAY( + "%s calling consume_batch_queue(timeout=%d, msgs=%d) " + "and expecting %d messages back\n", + rd_kafka_name(rk), timeout_ms, consume_msg_cnt, + arguments->expected_msg_cnt); TIMING_START(&t_cons, "CONSUME"); - msg_cnt = (int)rd_kafka_consume_batch_queue( - rkq, timeout_ms, rkmessage, consume_msg_cnt); + msg_cnt = (int)rd_kafka_consume_batch_queue(rkq, timeout_ms, rkmessage, + consume_msg_cnt); TIMING_STOP(&t_cons); - TEST_SAY("%s consumed %d/%d/%d message(s)\n", - rd_kafka_name(rk), msg_cnt, arguments->consume_msg_cnt, + TEST_SAY("%s consumed %d/%d/%d message(s)\n", rd_kafka_name(rk), + msg_cnt, arguments->consume_msg_cnt, arguments->expected_msg_cnt); TEST_ASSERT(msg_cnt == arguments->expected_msg_cnt, - "consumed %d messages, expected %d", - msg_cnt, arguments->expected_msg_cnt); + "consumed %d messages, expected %d", msg_cnt, + arguments->expected_msg_cnt); for (i = 0; i < msg_cnt; i++) { if (test_msgver_add_msg(rk, arguments->mv, rkmessage[i]) == 0) - TEST_FAIL("The message is not from testid " - "%"PRId64" \n", testid); + TEST_FAIL( + "The message is not from testid " + "%" PRId64 " \n", + testid); rd_kafka_message_destroy(rkmessage[i]); } @@ -119,7 +122,7 @@ static int consumer_batch_queue (void *arg) { * verify if there isn't any missed or duplicate messages * */ -static void do_test_consume_batch (const char *strategy) { +static void do_test_consume_batch(const char *strategy) { const int partition_cnt = 4; rd_kafka_queue_t *rkq1, *rkq2; const char *topic; @@ -149,15 +152,12 @@ static void do_test_consume_batch (const char *strategy) { /* Produce messages */ topic = test_mk_topic_name("0122-buffer_cleaning", 1); - for (p = 0 ; p < partition_cnt ; p++) - test_produce_msgs_easy(topic, - testid, - p, + for (p = 0; p < partition_cnt; p++) + test_produce_msgs_easy(topic, testid, p, produce_msg_cnt / partition_cnt); /* Create consumers */ - c1 = test_create_consumer(topic, NULL, - rd_kafka_conf_dup(conf), NULL); + c1 = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); c2 = test_create_consumer(topic, NULL, conf, NULL); test_consumer_subscribe(c1, topic); @@ -166,17 +166,17 @@ static void do_test_consume_batch (const char *strategy) { /* Create generic consume queue */ rkq1 = rd_kafka_queue_get_consumer(c1); - c1_args.what = "C1.PRE"; - c1_args.rkq = rkq1; - c1_args.timeout_ms = timeout_ms; - c1_args.consume_msg_cnt = consume_msg_cnt; + c1_args.what = "C1.PRE"; + c1_args.rkq = rkq1; + c1_args.timeout_ms = timeout_ms; + c1_args.consume_msg_cnt = consume_msg_cnt; c1_args.expected_msg_cnt = produce_msg_cnt / 2; - c1_args.rk = c1; - c1_args.testid = testid; - c1_args.mv = &mv; - c1_args.test = test_curr; - if (thrd_create(&thread_id, consumer_batch_queue, &c1_args) - != thrd_success) + c1_args.rk = c1; + c1_args.testid = testid; + c1_args.mv = &mv; + c1_args.test = test_curr; + if (thrd_create(&thread_id, consumer_batch_queue, &c1_args) != + thrd_success) TEST_FAIL("Failed to create thread for %s", "C1.PRE"); test_consumer_subscribe(c2, topic); @@ -188,21 +188,19 @@ static void do_test_consume_batch (const char *strategy) { rkq2 = rd_kafka_queue_get_consumer(c2); c2_args.what = "C2.PRE"; - c2_args.rkq = rkq2; + c2_args.rkq = rkq2; /* Second consumer should be able to consume all messages right away */ - c2_args.timeout_ms = 5000; - c2_args.consume_msg_cnt = consume_msg_cnt; + c2_args.timeout_ms = 5000; + c2_args.consume_msg_cnt = consume_msg_cnt; c2_args.expected_msg_cnt = produce_msg_cnt / 2; - c2_args.rk = c2; - c2_args.testid = testid; - c2_args.mv = &mv; + c2_args.rk = c2; + c2_args.testid = testid; + c2_args.mv = &mv; consumer_batch_queue(&c2_args); - test_msgver_verify("C1.PRE + C2.PRE", - &mv, - TEST_MSGVER_ORDER|TEST_MSGVER_DUP, - 0, + test_msgver_verify("C1.PRE + C2.PRE", &mv, + TEST_MSGVER_ORDER | TEST_MSGVER_DUP, 0, produce_msg_cnt); test_msgver_clear(&mv); @@ -219,7 +217,7 @@ static void do_test_consume_batch (const char *strategy) { } -int main_0122_buffer_cleaning_after_rebalance (int argc, char **argv) { +int main_0122_buffer_cleaning_after_rebalance(int argc, char **argv) { do_test_consume_batch("range"); do_test_consume_batch("cooperative-sticky"); return 0; diff --git a/tests/0123-connections_max_idle.c b/tests/0123-connections_max_idle.c index eed4d6ac14..734467017d 100644 --- a/tests/0123-connections_max_idle.c +++ b/tests/0123-connections_max_idle.c @@ -41,20 +41,20 @@ * */ -static void log_cb (const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { +static void +log_cb(const rd_kafka_t *rk, int level, const char *fac, const char *buf) { rd_atomic32_t *log_cntp = rd_kafka_opaque(rk); if (!strstr(buf, "Connection max idle time exceeded")) return; - TEST_SAY("Log: %s level %d fac %s: %s\n", - rd_kafka_name(rk), level, fac, buf); + TEST_SAY("Log: %s level %d fac %s: %s\n", rd_kafka_name(rk), level, fac, + buf); rd_atomic32_add(log_cntp, 1); } -static void do_test_idle (rd_bool_t set_idle) { +static void do_test_idle(rd_bool_t set_idle) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_atomic32_t log_cnt; @@ -89,7 +89,7 @@ static void do_test_idle (rd_bool_t set_idle) { } -int main_0123_connections_max_idle (int argc, char **argv) { +int main_0123_connections_max_idle(int argc, char **argv) { do_test_idle(rd_true); do_test_idle(rd_false); diff --git a/tests/0124-openssl_invalid_engine.c b/tests/0124-openssl_invalid_engine.c index 36af5049a1..5c61e5318a 100644 --- a/tests/0124-openssl_invalid_engine.c +++ b/tests/0124-openssl_invalid_engine.c @@ -28,7 +28,7 @@ #include "test.h" -int main_0124_openssl_invalid_engine (int argc, char **argv) { +int main_0124_openssl_invalid_engine(int argc, char **argv) { rd_kafka_conf_t *conf; rd_kafka_t *rk; char errstr[512]; @@ -47,18 +47,22 @@ int main_0124_openssl_invalid_engine (int argc, char **argv) { if (res != RD_KAFKA_CONF_OK) TEST_FAIL("%s", errstr); - if (rd_kafka_conf_set(conf, "security.protocol", "ssl", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) + if (rd_kafka_conf_set(conf, "security.protocol", "ssl", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s", errstr); rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); - TEST_ASSERT(!rk, "kafka_new() should not succeed with invalid engine" - " path, error: %s", errstr); + TEST_ASSERT(!rk, + "kafka_new() should not succeed with invalid engine" + " path, error: %s", + errstr); TEST_SAY("rd_kafka_new() failed (as expected): %s\n", errstr); - TEST_ASSERT(strstr(errstr, "engine initialization failed in"), "engine" - " initialization failure expected because of invalid engine" - " path, error: %s", errstr); + TEST_ASSERT(strstr(errstr, "engine initialization failed in"), + "engine" + " initialization failure expected because of invalid engine" + " path, error: %s", + errstr); rd_kafka_conf_destroy(conf); return 0; diff --git a/tests/0125-immediate_flush.c b/tests/0125-immediate_flush.c index 564a79c641..12f36cf191 100644 --- a/tests/0125-immediate_flush.c +++ b/tests/0125-immediate_flush.c @@ -33,12 +33,12 @@ * Verify that flush() overrides the linger.ms time. * */ -int main_0125_immediate_flush (int argc, char **argv) { +int main_0125_immediate_flush(int argc, char **argv) { rd_kafka_t *rk; rd_kafka_conf_t *conf; const char *topic = test_mk_topic_name("0125_immediate_flush", 1); - const int msgcnt = 100; - int remains = 0; + const int msgcnt = 100; + int remains = 0; test_timing_t t_time; test_conf_init(&conf, NULL, 30); @@ -50,8 +50,8 @@ int main_0125_immediate_flush (int argc, char **argv) { test_create_topic(rk, topic, 1, 1); /* Produce half set of messages without waiting for delivery. */ - test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt/2, - NULL, 50, &remains); + test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt / 2, NULL, 50, + &remains); TIMING_START(&t_time, "NO_FLUSH"); do { @@ -60,8 +60,8 @@ int main_0125_immediate_flush (int argc, char **argv) { TIMING_ASSERT(&t_time, 10000, 15000); /* Produce remaining messages without waiting for delivery. */ - test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt/2, - NULL, 50, &remains); + test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt / 2, NULL, 50, + &remains); /* The linger time should be overriden when flushing */ TIMING_START(&t_time, "FLUSH"); diff --git a/tests/0126-oauthbearer_oidc.c b/tests/0126-oauthbearer_oidc.c index 6e7540393a..56eea3f08b 100644 --- a/tests/0126-oauthbearer_oidc.c +++ b/tests/0126-oauthbearer_oidc.c @@ -29,7 +29,7 @@ #include "test.h" /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -37,7 +37,7 @@ * successfully. * */ -static void do_test_create_producer () { +static void do_test_create_producer() { const char *topic; uint64_t testid; rd_kafka_t *rk; @@ -49,10 +49,7 @@ static void do_test_create_producer () { test_conf_init(&conf, NULL, 60); - res = rd_kafka_conf_set(conf, - "sasl.oauthbearer.method", - "oidc", - errstr, + res = rd_kafka_conf_set(conf, "sasl.oauthbearer.method", "oidc", errstr, sizeof(errstr)); if (res == RD_KAFKA_CONF_INVALID) { @@ -64,20 +61,14 @@ static void do_test_create_producer () { if (res != RD_KAFKA_CONF_OK) TEST_FAIL("%s", errstr); - test_conf_set(conf, - "sasl.oauthbearer.client.id", - "randomuniqclientid"); - test_conf_set(conf, - "sasl.oauthbearer.client.secret", + test_conf_set(conf, "sasl.oauthbearer.client.id", "randomuniqclientid"); + test_conf_set(conf, "sasl.oauthbearer.client.secret", "randomuniqclientsecret"); - test_conf_set(conf, - "sasl.oauthbearer.client.secret", + test_conf_set(conf, "sasl.oauthbearer.client.secret", "randomuniqclientsecret"); - test_conf_set(conf, - "sasl.oauthbearer.extensions", + test_conf_set(conf, "sasl.oauthbearer.extensions", "supportFeatureX=true"); - test_conf_set(conf, - "sasl.oauthbearer.token.endpoint.url", + test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url", "https://localhost:1/token"); testid = test_id_generate(); @@ -99,7 +90,7 @@ static void do_test_create_producer () { } -int main_0126_oauthbearer_oidc (int argc, char **argv) { +int main_0126_oauthbearer_oidc(int argc, char **argv) { do_test_create_producer(); return 0; } diff --git a/tests/0128-sasl_callback_queue.cpp b/tests/0128-sasl_callback_queue.cpp index 45ab2c8840..6f7298f20c 100644 --- a/tests/0128-sasl_callback_queue.cpp +++ b/tests/0128-sasl_callback_queue.cpp @@ -37,25 +37,26 @@ namespace { /* Provide our own token refresh callback */ class MyCb : public RdKafka::OAuthBearerTokenRefreshCb { -public: - MyCb (): called(false) {} + public: + MyCb() : called(false) { + } - void oauthbearer_token_refresh_cb (RdKafka::Handle *handle, - const std::string &oauthbearer_config) { - handle->oauthbearer_set_token_failure("Not implemented by this test, " - "but that's okay"); + void oauthbearer_token_refresh_cb(RdKafka::Handle *handle, + const std::string &oauthbearer_config) { + handle->oauthbearer_set_token_failure( + "Not implemented by this test, " + "but that's okay"); called = true; Test::Say("Callback called!\n"); } bool called; }; -}; +}; // namespace -static void do_test (bool use_background_queue) { - SUB_TEST("Use background queue = %s", - use_background_queue ? "yes" : "no"); +static void do_test(bool use_background_queue) { + SUB_TEST("Use background queue = %s", use_background_queue ? "yes" : "no"); bool expect_called = use_background_queue; @@ -87,7 +88,7 @@ static void do_test (bool use_background_queue) { /* This call should fail since the refresh callback fails, * and there are no brokers configured anyway. */ - const std::string clusterid = p->clusterid(5*1000); + const std::string clusterid = p->clusterid(5 * 1000); TEST_ASSERT(clusterid.empty(), "Expected clusterid() to fail since the token was not set"); @@ -105,10 +106,10 @@ static void do_test (bool use_background_queue) { } extern "C" { - int main_0128_sasl_callback_queue (int argc, char **argv) { - do_test(true); - do_test(false); +int main_0128_sasl_callback_queue(int argc, char **argv) { + do_test(true); + do_test(false); - return 0; - } + return 0; +} } diff --git a/tests/1000-unktopic.c b/tests/1000-unktopic.c index 30a94d746b..ad2b7e8709 100644 --- a/tests/1000-unktopic.c +++ b/tests/1000-unktopic.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -42,7 +42,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgs_wait = 0; /* bitmask */ @@ -51,103 +51,114 @@ static int msgs_wait = 0; /* bitmask */ * Delivery report callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - int msgid = *(int *)msg_opaque; - - free(msg_opaque); - - if (!(msgs_wait & (1 << msgid))) - TEST_FAIL("Unwanted delivery report for message #%i " - "(waiting for 0x%x)\n", msgid, msgs_wait); - - TEST_SAY("Delivery report for message #%i: %s\n", - msgid, rd_kafka_err2str(err)); - - msgs_wait &= ~(1 << msgid); - - if (err != RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) - TEST_FAIL("Message #%i failed with unexpected error %s\n", - msgid, rd_kafka_err2str(err)); +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + int msgid = *(int *)msg_opaque; + + free(msg_opaque); + + if (!(msgs_wait & (1 << msgid))) + TEST_FAIL( + "Unwanted delivery report for message #%i " + "(waiting for 0x%x)\n", + msgid, msgs_wait); + + TEST_SAY("Delivery report for message #%i: %s\n", msgid, + rd_kafka_err2str(err)); + + msgs_wait &= ~(1 << msgid); + + if (err != RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) + TEST_FAIL("Message #%i failed with unexpected error %s\n", + msgid, rd_kafka_err2str(err)); } -int main (int argc, char **argv) { - char topic[64]; - int partition = 0; - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - char msg[128]; - int msgcnt = 10; - int i; - - /* Generate unique topic name */ - test_conf_init(&conf, &topic_conf, 10); - - rd_snprintf(topic, sizeof(topic), "rdkafkatest1_unk_%x%x", - rand(), rand()); - - TEST_SAY("\033[33mNOTE! This test requires " - "auto.create.topics.enable=false to be configured on " - "the broker!\033[0m\n"); - - /* Set delivery report callback */ - rd_kafka_conf_set_dr_cb(conf, dr_cb); - - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - strerror(errno)); - - /* Produce a message */ - for (i = 0 ; i < msgcnt ; i++) { - int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; - rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); - r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, msgidp); - if (r == -1) { - if (errno == ENOENT) - TEST_SAY("Failed to produce message #%i: " - "unknown topic: good!\n", i); - else - TEST_FAIL("Failed to produce message #%i: %s\n", - i, strerror(errno)); - } else { - if (i > 5) - TEST_FAIL("Message #%i produced: " - "should've failed\n", i); - msgs_wait |= (1 << i); - } - - /* After half the messages: sleep to allow the metadata - * to be fetched from broker and update the actual partition - * count: this will make subsequent produce() calls fail - * immediately. */ - if (i == 5) - sleep(2); - } - - /* Wait for messages to time out */ - while (rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 50); - - if (msgs_wait != 0) - TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); - - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); - - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); - - return 0; +int main(int argc, char **argv) { + char topic[64]; + int partition = 0; + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + char msg[128]; + int msgcnt = 10; + int i; + + /* Generate unique topic name */ + test_conf_init(&conf, &topic_conf, 10); + + rd_snprintf(topic, sizeof(topic), "rdkafkatest1_unk_%x%x", rand(), + rand()); + + TEST_SAY( + "\033[33mNOTE! This test requires " + "auto.create.topics.enable=false to be configured on " + "the broker!\033[0m\n"); + + /* Set delivery report callback */ + rd_kafka_conf_set_dr_cb(conf, dr_cb); + + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); + + /* Produce a message */ + for (i = 0; i < msgcnt; i++) { + int *msgidp = malloc(sizeof(*msgidp)); + *msgidp = i; + rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], + i); + r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, msgidp); + if (r == -1) { + if (errno == ENOENT) + TEST_SAY( + "Failed to produce message #%i: " + "unknown topic: good!\n", + i); + else + TEST_FAIL("Failed to produce message #%i: %s\n", + i, strerror(errno)); + } else { + if (i > 5) + TEST_FAIL( + "Message #%i produced: " + "should've failed\n", + i); + msgs_wait |= (1 << i); + } + + /* After half the messages: sleep to allow the metadata + * to be fetched from broker and update the actual partition + * count: this will make subsequent produce() calls fail + * immediately. */ + if (i == 5) + sleep(2); + } + + /* Wait for messages to time out */ + while (rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 50); + + if (msgs_wait != 0) + TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); + + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); + + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); + + return 0; } diff --git a/tests/8000-idle.cpp b/tests/8000-idle.cpp index 5dcf2aa8f9..9659ade97a 100644 --- a/tests/8000-idle.cpp +++ b/tests/8000-idle.cpp @@ -35,8 +35,7 @@ */ -static void do_test_idle_producer () { - +static void do_test_idle_producer() { RdKafka::Conf *conf; Test::conf_init(&conf, NULL, 0); @@ -54,8 +53,8 @@ static void do_test_idle_producer () { extern "C" { - int main_8000_idle (int argc, char **argv) { - do_test_idle_producer(); - return 0; - } +int main_8000_idle(int argc, char **argv) { + do_test_idle_producer(); + return 0; +} } diff --git a/tests/LibrdkafkaTestApp.py b/tests/LibrdkafkaTestApp.py index a8f0263d3c..483f84dd63 100644 --- a/tests/LibrdkafkaTestApp.py +++ b/tests/LibrdkafkaTestApp.py @@ -19,7 +19,9 @@ class LibrdkafkaTestApp(App): """ Sets up and executes the librdkafka regression tests. Assumes tests are in the current directory. Must be instantiated after ZookeeperApp and KafkaBrokerApp """ - def __init__(self, cluster, version, conf=None, tests=None, scenario="default"): + + def __init__(self, cluster, version, conf=None, + tests=None, scenario="default"): super(LibrdkafkaTestApp, self).__init__(cluster, conf=conf) self.appid = UuidAllocator(self.cluster).next(self, trunc=8) @@ -30,7 +32,7 @@ def __init__(self, cluster, version, conf=None, tests=None, scenario="default"): # Generate test config file conf_blob = list() - self.security_protocol='PLAINTEXT' + self.security_protocol = 'PLAINTEXT' f, self.test_conf_file = self.open_file('test.conf', 'perm') f.write('broker.address.family=v4\n'.encode('ascii')) @@ -39,13 +41,15 @@ def __init__(self, cluster, version, conf=None, tests=None, scenario="default"): sparse = conf.get('sparse_connections', None) if sparse is not None: - f.write('enable.sparse.connections={}\n'.format(sparse).encode('ascii')) + f.write('enable.sparse.connections={}\n'.format( + sparse).encode('ascii')) if version.startswith('0.9') or version.startswith('0.8'): conf_blob.append('api.version.request=false') conf_blob.append('broker.version.fallback=%s' % version) else: - conf_blob.append('broker.version.fallback=0.10.0.0') # any broker version with ApiVersion support + # any broker version with ApiVersion support + conf_blob.append('broker.version.fallback=0.10.0.0') conf_blob.append('api.version.fallback.ms=0') # SASL (only one mechanism supported at a time) @@ -53,36 +57,43 @@ def __init__(self, cluster, version, conf=None, tests=None, scenario="default"): if mech != '': conf_blob.append('sasl.mechanisms=%s' % mech) if mech == 'PLAIN' or mech.find('SCRAM-') != -1: - self.security_protocol='SASL_PLAINTEXT' + self.security_protocol = 'SASL_PLAINTEXT' # Use first user as SASL user/pass for up in self.conf.get('sasl_users', '').split(','): - u,p = up.split('=') + u, p = up.split('=') conf_blob.append('sasl.username=%s' % u) conf_blob.append('sasl.password=%s' % p) break elif mech == 'OAUTHBEARER': - self.security_protocol='SASL_PLAINTEXT' + self.security_protocol = 'SASL_PLAINTEXT' conf_blob.append('enable.sasl.oauthbearer.unsecure.jwt=true\n') - conf_blob.append('sasl.oauthbearer.config=%s\n' % self.conf.get('sasl_oauthbearer_config')) + conf_blob.append( + 'sasl.oauthbearer.config=%s\n' % + self.conf.get('sasl_oauthbearer_config')) elif mech == 'GSSAPI': - self.security_protocol='SASL_PLAINTEXT' + self.security_protocol = 'SASL_PLAINTEXT' kdc = cluster.find_app(KerberosKdcApp) if kdc is None: - self.log('WARNING: sasl_mechanisms is GSSAPI set but no KerberosKdcApp available: client SASL config will be invalid (which might be intentional)') + self.log( + 'WARNING: sasl_mechanisms is GSSAPI set but no KerberosKdcApp available: client SASL config will be invalid (which might be intentional)') else: self.env_add('KRB5_CONFIG', kdc.conf['krb5_conf']) self.env_add('KRB5_KDC_PROFILE', kdc.conf['kdc_conf']) - principal,keytab = kdc.add_principal(self.name, - conf.get('advertised_hostname', self.node.name)) - conf_blob.append('sasl.kerberos.service.name=%s' % \ + principal, keytab = kdc.add_principal(self.name, + conf.get('advertised_hostname', self.node.name)) + conf_blob.append('sasl.kerberos.service.name=%s' % self.conf.get('sasl_servicename', 'kafka')) conf_blob.append('sasl.kerberos.keytab=%s' % keytab) - conf_blob.append('sasl.kerberos.principal=%s' % principal.split('@')[0]) + conf_blob.append( + 'sasl.kerberos.principal=%s' % + principal.split('@')[0]) else: - self.log('WARNING: FIXME: SASL %s client config not written to %s: unhandled mechanism' % (mech, self.test_conf_file)) + self.log( + 'WARNING: FIXME: SASL %s client config not written to %s: unhandled mechanism' % + (mech, self.test_conf_file)) # SSL config if getattr(cluster, 'ssl', None) is not None: @@ -102,14 +113,13 @@ def __init__(self, cluster, version, conf=None, tests=None, scenario="default"): # Set envs for all generated keys so tests can find them. for k, v in key.items(): - if type(v) is dict: + if isinstance(v, dict): for k2, v2 in v.items(): # E.g. "RDK_SSL_priv_der=path/to/librdkafka-priv.der" self.env_add('RDK_SSL_{}_{}'.format(k, k2), v2) else: self.env_add('RDK_SSL_{}'.format(k), v) - if 'SASL' in self.security_protocol: self.security_protocol = 'SASL_SSL' else: @@ -117,11 +127,19 @@ def __init__(self, cluster, version, conf=None, tests=None, scenario="default"): # Define bootstrap brokers based on selected security protocol self.dbg('Using client security.protocol=%s' % self.security_protocol) - all_listeners = (','.join(cluster.get_all('advertised.listeners', '', KafkaBrokerApp))).split(',') - bootstrap_servers = ','.join([x for x in all_listeners if x.startswith(self.security_protocol)]) + all_listeners = ( + ','.join( + cluster.get_all( + 'advertised.listeners', + '', + KafkaBrokerApp))).split(',') + bootstrap_servers = ','.join( + [x for x in all_listeners if x.startswith(self.security_protocol)]) if len(bootstrap_servers) == 0: bootstrap_servers = all_listeners[0] - self.log('WARNING: No eligible listeners for security.protocol=%s in %s: falling back to first listener: %s: tests will fail (which might be the intention)' % (self.security_protocol, all_listeners, bootstrap_servers)) + self.log( + 'WARNING: No eligible listeners for security.protocol=%s in %s: falling back to first listener: %s: tests will fail (which might be the intention)' % + (self.security_protocol, all_listeners, bootstrap_servers)) self.bootstrap_servers = bootstrap_servers @@ -143,19 +161,35 @@ def __init__(self, cluster, version, conf=None, tests=None, scenario="default"): if tests is not None: self.env_add('TESTS', ','.join(tests)) - def start_cmd (self): - self.env_add('KAFKA_PATH', self.cluster.get_all('destdir', '', KafkaBrokerApp)[0], False) - self.env_add('ZK_ADDRESS', self.cluster.get_all('address', '', ZookeeperApp)[0], False) + def start_cmd(self): + self.env_add( + 'KAFKA_PATH', + self.cluster.get_all( + 'destdir', + '', + KafkaBrokerApp)[0], + False) + self.env_add( + 'ZK_ADDRESS', + self.cluster.get_all( + 'address', + '', + ZookeeperApp)[0], + False) self.env_add('BROKERS', self.cluster.bootstrap_servers(), False) # Provide a HTTPS REST endpoint for the HTTP client tests. - self.env_add('RD_UT_HTTP_URL', 'https://jsonplaceholder.typicode.com/users') + self.env_add( + 'RD_UT_HTTP_URL', + 'https://jsonplaceholder.typicode.com/users') # Per broker env vars - for b in [x for x in self.cluster.apps if isinstance(x, KafkaBrokerApp)]: + for b in [x for x in self.cluster.apps if isinstance( + x, KafkaBrokerApp)]: self.env_add('BROKER_ADDRESS_%d' % b.appid, ','.join([x for x in b.conf['listeners'].split(',') if x.startswith(self.security_protocol)])) - # Add each broker pid as an env so they can be killed indivdidually. + # Add each broker pid as an env so they can be killed + # indivdidually. self.env_add('BROKER_PID_%d' % b.appid, str(b.proc.pid)) # JMX port, if available jmx_port = b.conf.get('jmx_port', None) @@ -168,10 +202,10 @@ def start_cmd (self): if self.conf.get('args', None) is not None: extra_args.append(self.conf.get('args')) extra_args.append('-E') - return './run-test.sh -p%d -K %s %s' % (int(self.conf.get('parallel', 5)), ' '.join(extra_args), self.test_mode) - + return './run-test.sh -p%d -K %s %s' % ( + int(self.conf.get('parallel', 5)), ' '.join(extra_args), self.test_mode) - def report (self): + def report(self): if self.test_mode == 'bash': return None @@ -179,9 +213,11 @@ def report (self): with open(self.test_report_file, 'r') as f: res = json.load(f) except Exception as e: - self.log('Failed to read report %s: %s' % (self.test_report_file, str(e))) + self.log( + 'Failed to read report %s: %s' % + (self.test_report_file, str(e))) return {'root_path': self.root_path(), 'error': str(e)} return res - def deploy (self): + def deploy(self): pass diff --git a/tests/broker_version_tests.py b/tests/broker_version_tests.py index 6ca2ca6bb9..ce3cde4fb9 100755 --- a/tests/broker_version_tests.py +++ b/tests/broker_version_tests.py @@ -22,10 +22,11 @@ import json import tempfile -def test_it (version, deploy=True, conf={}, rdkconf={}, tests=None, - interact=False, debug=False, scenario="default"): + +def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None, + interact=False, debug=False, scenario="default"): """ - @brief Create, deploy and start a Kafka cluster using Kafka \p version + @brief Create, deploy and start a Kafka cluster using Kafka \\p version Then run librdkafka's regression tests. """ @@ -34,7 +35,7 @@ def test_it (version, deploy=True, conf={}, rdkconf={}, tests=None, debug=debug, scenario=scenario) # librdkafka's regression tests, as an App. - _rdkconf = conf.copy() # Base rdkconf on cluster conf + rdkconf + _rdkconf = conf.copy() # Base rdkconf on cluster conf + rdkconf _rdkconf.update(rdkconf) rdkafka = LibrdkafkaTestApp(cluster, version, _rdkconf, tests=tests, scenario=scenario) @@ -46,22 +47,33 @@ def test_it (version, deploy=True, conf={}, rdkconf={}, tests=None, cluster.start(timeout=30) if conf.get('test_mode', '') == 'bash': - cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\w$ "\')' % (cluster.name, version) - subprocess.call(cmd, env=rdkafka.env, shell=True, executable='/bin/bash') + cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\\w$ "\')' % ( + cluster.name, version) + subprocess.call( + cmd, + env=rdkafka.env, + shell=True, + executable='/bin/bash') report = None else: rdkafka.start() - print('# librdkafka regression tests started, logs in %s' % rdkafka.root_path()) - rdkafka.wait_stopped(timeout=60*30) + print( + '# librdkafka regression tests started, logs in %s' % + rdkafka.root_path()) + rdkafka.wait_stopped(timeout=60 * 30) report = rdkafka.report() report['root_path'] = rdkafka.root_path() if report.get('tests_failed', 0) > 0 and interact: - print('# Connect to cluster with bootstrap.servers %s' % cluster.bootstrap_servers()) + print( + '# Connect to cluster with bootstrap.servers %s' % + cluster.bootstrap_servers()) print('# Exiting the shell will bring down the cluster. Good luck.') - subprocess.call('bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\w$ "\')' % (cluster.name, version), env=rdkafka.env, shell=True, executable='/bin/bash') + subprocess.call( + 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\\w$ "\')' % + (cluster.name, version), env=rdkafka.env, shell=True, executable='/bin/bash') cluster.stop(force=True) @@ -69,7 +81,7 @@ def test_it (version, deploy=True, conf={}, rdkconf={}, tests=None, return report -def handle_report (report, version, suite): +def handle_report(report, version, suite): """ Parse test report and return tuple (Passed(bool), Reason(str)) """ test_cnt = report.get('tests_run', 0) @@ -78,27 +90,32 @@ def handle_report (report, version, suite): passed = report.get('tests_passed', 0) failed = report.get('tests_failed', 0) - if 'all' in suite.get('expect_fail', []) or version in suite.get('expect_fail', []): + if 'all' in suite.get('expect_fail', []) or version in suite.get( + 'expect_fail', []): expect_fail = True else: expect_fail = False if expect_fail: if failed == test_cnt: - return (True, 'All %d/%d tests failed as expected' % (failed, test_cnt)) + return (True, 'All %d/%d tests failed as expected' % + (failed, test_cnt)) else: - return (False, '%d/%d tests failed: expected all to fail' % (failed, test_cnt)) + return (False, '%d/%d tests failed: expected all to fail' % + (failed, test_cnt)) else: if failed > 0: - return (False, '%d/%d tests passed: expected all to pass' % (passed, test_cnt)) + return (False, '%d/%d tests passed: expected all to pass' % + (passed, test_cnt)) else: - return (True, 'All %d/%d tests passed as expected' % (passed, test_cnt)) - + return (True, 'All %d/%d tests passed as expected' % + (passed, test_cnt)) if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Run librdkafka tests on a range of broker versions') + parser = argparse.ArgumentParser( + description='Run librdkafka tests on a range of broker versions') parser.add_argument('--debug', action='store_true', default=False, help='Enable trivup debugging') @@ -121,13 +138,37 @@ def handle_report (report, version, suite): parser.add_argument('--interactive', action='store_true', dest='interactive', default=False, help='Start a shell instead of running tests') - parser.add_argument('--root', type=str, default=os.environ.get('TRIVUP_ROOT', 'tmp'), help='Root working directory') - parser.add_argument('--port', default=None, help='Base TCP port to start allocating from') - parser.add_argument('--kafka-src', dest='kafka_path', type=str, default=None, help='Path to Kafka git repo checkout (used for version=trunk)') - parser.add_argument('--brokers', dest='broker_cnt', type=int, default=3, help='Number of Kafka brokers') + parser.add_argument( + '--root', + type=str, + default=os.environ.get( + 'TRIVUP_ROOT', + 'tmp'), + help='Root working directory') + parser.add_argument( + '--port', + default=None, + help='Base TCP port to start allocating from') + parser.add_argument( + '--kafka-src', + dest='kafka_path', + type=str, + default=None, + help='Path to Kafka git repo checkout (used for version=trunk)') + parser.add_argument( + '--brokers', + dest='broker_cnt', + type=int, + default=3, + help='Number of Kafka brokers') parser.add_argument('--ssl', dest='ssl', action='store_true', default=False, help='Enable SSL endpoints') - parser.add_argument('--sasl', dest='sasl', type=str, default=None, help='SASL mechanism (PLAIN, GSSAPI)') + parser.add_argument( + '--sasl', + dest='sasl', + type=str, + default=None, + help='SASL mechanism (PLAIN, GSSAPI)') args = parser.parse_args() @@ -198,7 +239,7 @@ def handle_report (report, version, suite): # Handle test report report['version'] = version - passed,reason = handle_report(report, version, suite) + passed, reason = handle_report(report, version, suite) report['PASSED'] = passed report['REASON'] = reason @@ -212,7 +253,12 @@ def handle_report (report, version, suite): fail_cnt += 1 # Emit hopefully relevant parts of the log on failure - subprocess.call("grep --color=always -B100 -A10 FAIL %s" % (os.path.join(report['root_path'], 'stderr.log')), shell=True) + subprocess.call( + "grep --color=always -B100 -A10 FAIL %s" % + (os.path.join( + report['root_path'], + 'stderr.log')), + shell=True) print('#### Test output: %s/stderr.log' % (report['root_path'])) @@ -229,7 +275,7 @@ def handle_report (report, version, suite): f = os.fdopen(fd, 'w') full_report = {'suites': suites, 'pass_cnt': pass_cnt, - 'fail_cnt': fail_cnt, 'total_cnt': pass_cnt+fail_cnt} + 'fail_cnt': fail_cnt, 'total_cnt': pass_cnt + fail_cnt} f.write(json.dumps(full_report)) f.close() diff --git a/tests/cluster_testing.py b/tests/cluster_testing.py index 18878ca3d1..a0f28ac9c7 100755 --- a/tests/cluster_testing.py +++ b/tests/cluster_testing.py @@ -13,14 +13,19 @@ from trivup.apps.KerberosKdcApp import KerberosKdcApp from trivup.apps.SslApp import SslApp -import os, sys, json, argparse, re +import os +import sys +import json +import argparse +import re from jsoncomment import JsonComment -def version_as_list (version): +def version_as_list(version): if version == 'trunk': - return [sys.maxint] - return [int(a) for a in re.findall('\d+', version)][0:3] + return [sys.maxsize] + return [int(a) for a in re.findall('\\d+', version)][0:3] + def read_scenario_conf(scenario): """ Read scenario configuration from scenarios/.json """ @@ -28,16 +33,17 @@ def read_scenario_conf(scenario): with open(os.path.join('scenarios', scenario + '.json'), 'r') as f: return parser.load(f) + class LibrdkafkaTestCluster(Cluster): def __init__(self, version, conf={}, num_brokers=3, debug=False, scenario="default"): """ - @brief Create, deploy and start a Kafka cluster using Kafka \p version + @brief Create, deploy and start a Kafka cluster using Kafka \\p version - Supported \p conf keys: + Supported \\p conf keys: * security.protocol - PLAINTEXT, SASL_PLAINTEXT, SASL_SSL - \p conf dict is passed to KafkaBrokerApp classes, etc. + \\p conf dict is passed to KafkaBrokerApp classes, etc. """ super(LibrdkafkaTestCluster, self).__init__(self.__class__.__name__, @@ -70,19 +76,29 @@ def __init__(self, version, conf={}, num_brokers=3, debug=False, self.conf = defconf for n in range(0, num_brokers): - # Configure rack & replica selector if broker supports fetch-from-follower + # Configure rack & replica selector if broker supports + # fetch-from-follower if version_as_list(version) >= [2, 4, 0]: - defconf.update({'conf': ['broker.rack=RACK${appid}', 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) + defconf.update( + { + 'conf': [ + 'broker.rack=RACK${appid}', + 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) self.brokers.append(KafkaBrokerApp(self, defconf)) - - def bootstrap_servers (self): + def bootstrap_servers(self): """ @return Kafka bootstrap servers based on security.protocol """ - all_listeners = (','.join(self.get_all('advertised_listeners', '', KafkaBrokerApp))).split(',') - return ','.join([x for x in all_listeners if x.startswith(self.conf.get('security.protocol'))]) + all_listeners = ( + ','.join( + self.get_all( + 'advertised_listeners', + '', + KafkaBrokerApp))).split(',') + return ','.join([x for x in all_listeners if x.startswith( + self.conf.get('security.protocol'))]) -def result2color (res): +def result2color(res): if res == 'PASSED': return '\033[42m' elif res == 'FAILED': @@ -90,7 +106,8 @@ def result2color (res): else: return '' -def print_test_report_summary (name, report): + +def print_test_report_summary(name, report): """ Print summary for a test run. """ passed = report.get('PASSED', False) if passed: @@ -101,12 +118,12 @@ def print_test_report_summary (name, report): print('%6s %-50s: %s' % (resstr, name, report.get('REASON', 'n/a'))) if not passed: # Print test details - for name,test in report.get('tests', {}).items(): + for name, test in report.get('tests', {}).items(): testres = test.get('state', '') if testres == 'SKIPPED': continue - print('%s --> %-20s \033[0m' % \ - ('%s%s\033[0m' % \ + print('%s --> %-20s \033[0m' % + ('%s%s\033[0m' % (result2color(test.get('state', 'n/a')), test.get('state', 'n/a')), test.get('name', 'n/a'))) @@ -114,14 +131,14 @@ def print_test_report_summary (name, report): ('', report.get('root_path', '.'), 'stderr.log')) -def print_report_summary (fullreport): +def print_report_summary(fullreport): """ Print summary from a full report suite """ suites = fullreport.get('suites', list()) print('#### Full test suite report (%d suite(s))' % len(suites)) for suite in suites: - for version,report in suite.get('version', {}).items(): - print_test_report_summary('%s @ %s' % \ - (suite.get('name','n/a'), version), + for version, report in suite.get('version', {}).items(): + print_test_report_summary('%s @ %s' % + (suite.get('name', 'n/a'), version), report) pass_cnt = fullreport.get('pass_cnt', -1) @@ -136,11 +153,10 @@ def print_report_summary (fullreport): else: fail_clr = '\033[41m' - print('#### %d suites %sPASSED\033[0m, %d suites %sFAILED\033[0m' % \ + print('#### %d suites %sPASSED\033[0m, %d suites %sFAILED\033[0m' % (pass_cnt, pass_clr, fail_cnt, fail_clr)) - if __name__ == '__main__': parser = argparse.ArgumentParser(description='Show test suite report') diff --git a/tests/fuzzers/fuzz_regex.c b/tests/fuzzers/fuzz_regex.c index c5746a3c46..2facc19f02 100644 --- a/tests/fuzzers/fuzz_regex.c +++ b/tests/fuzzers/fuzz_regex.c @@ -1,30 +1,30 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2020, Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ /** @@ -42,29 +42,29 @@ #include "regexp.h" int LLVMFuzzerTestOneInput(uint8_t *data, size_t size) { - /* wrap random data in a null-terminated string */ - char *null_terminated = malloc(size+1); - memcpy(null_terminated, data, size); - null_terminated[size] = '\0'; + /* wrap random data in a null-terminated string */ + char *null_terminated = malloc(size + 1); + memcpy(null_terminated, data, size); + null_terminated[size] = '\0'; - const char *error; - Reprog *p = re_regcomp(null_terminated, 0, &error); - if (p != NULL) { - re_regfree(p); - } + const char *error; + Reprog *p = re_regcomp(null_terminated, 0, &error); + if (p != NULL) { + re_regfree(p); + } - /* cleanup */ - free(null_terminated); + /* cleanup */ + free(null_terminated); - return 0; + return 0; } #if WITH_MAIN #include "helpers.h" -int main (int argc, char **argv) { +int main(int argc, char **argv) { int i; - for (i = 1 ; i < argc ; i++) { + for (i = 1; i < argc; i++) { size_t size; uint8_t *buf = read_file(argv[i], &size); LLVMFuzzerTestOneInput(buf, size); diff --git a/tests/fuzzers/helpers.h b/tests/fuzzers/helpers.h index b53bcc6e3b..cfab037779 100644 --- a/tests/fuzzers/helpers.h +++ b/tests/fuzzers/helpers.h @@ -1,30 +1,30 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2020, Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ #ifndef _HELPERS_H_ #define _HELPERS_H_ @@ -40,29 +40,29 @@ * Fuzz program helpers */ -static __attribute__((unused)) -uint8_t *read_file (const char *path, size_t *sizep) { +static __attribute__((unused)) uint8_t *read_file(const char *path, + size_t *sizep) { int fd; uint8_t *buf; struct stat st; if ((fd = open(path, O_RDONLY)) == -1) { - fprintf(stderr, "Failed to open %s: %s\n", - path, strerror(errno)); + fprintf(stderr, "Failed to open %s: %s\n", path, + strerror(errno)); exit(2); return NULL; /* NOTREACHED */ } if (fstat(fd, &st) == -1) { - fprintf(stderr, "Failed to stat %s: %s\n", - path, strerror(errno)); + fprintf(stderr, "Failed to stat %s: %s\n", path, + strerror(errno)); close(fd); exit(2); return NULL; /* NOTREACHED */ } - buf = malloc(st.st_size+1); + buf = malloc(st.st_size + 1); if (!buf) { fprintf(stderr, "Failed to malloc %d bytes for %s\n", (int)st.st_size, path); diff --git a/tests/interactive_broker_version.py b/tests/interactive_broker_version.py index eae8e68662..30e42977a1 100755 --- a/tests/interactive_broker_version.py +++ b/tests/interactive_broker_version.py @@ -23,16 +23,18 @@ import argparse import json -def version_as_number (version): + +def version_as_number(version): if version == 'trunk': - return sys.maxint + return sys.maxsize tokens = version.split('.') return float('%s.%s' % (tokens[0], tokens[1])) -def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt=1, - root_path='tmp', broker_cnt=3, scenario='default'): + +def test_version(version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt=1, + root_path='tmp', broker_cnt=3, scenario='default'): """ - @brief Create, deploy and start a Kafka cluster using Kafka \p version + @brief Create, deploy and start a Kafka cluster using Kafka \\p version Then run librdkafka's regression tests. """ @@ -59,41 +61,53 @@ def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt brokers = [] for n in range(0, broker_cnt): - # Configure rack & replica selector if broker supports fetch-from-follower + # Configure rack & replica selector if broker supports + # fetch-from-follower if version_as_number(version) >= 2.4: - defconf.update({'conf': ['broker.rack=RACK${appid}', 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) + defconf.update( + { + 'conf': [ + 'broker.rack=RACK${appid}', + 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) brokers.append(KafkaBrokerApp(cluster, defconf)) cmd_env = os.environ.copy() # Generate test config file - security_protocol='PLAINTEXT' + security_protocol = 'PLAINTEXT' fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True) os.write(fd, ('test.sql.command=sqlite3 rdktests\n').encode('ascii')) os.write(fd, 'broker.address.family=v4\n'.encode('ascii')) if version.startswith('0.9') or version.startswith('0.8'): os.write(fd, 'api.version.request=false\n'.encode('ascii')) - os.write(fd, ('broker.version.fallback=%s\n' % version).encode('ascii')) + os.write( + fd, ('broker.version.fallback=%s\n' % + version).encode('ascii')) # SASL (only one mechanism supported) mech = defconf.get('sasl_mechanisms', '').split(',')[0] if mech != '': os.write(fd, ('sasl.mechanisms=%s\n' % mech).encode('ascii')) if mech == 'PLAIN' or mech.find('SCRAM') != -1: - print('# Writing SASL %s client config to %s' % (mech, test_conf_file)) - security_protocol='SASL_PLAINTEXT' + print( + '# Writing SASL %s client config to %s' % + (mech, test_conf_file)) + security_protocol = 'SASL_PLAINTEXT' # Use first user as SASL user/pass for up in defconf.get('sasl_users', '').split(','): - u,p = up.split('=') + u, p = up.split('=') os.write(fd, ('sasl.username=%s\n' % u).encode('ascii')) os.write(fd, ('sasl.password=%s\n' % p).encode('ascii')) break elif mech == 'OAUTHBEARER': - security_protocol='SASL_PLAINTEXT' - os.write(fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n'.encode('ascii'))) - os.write(fd, ('sasl.oauthbearer.config=%s\n' % \ + security_protocol = 'SASL_PLAINTEXT' + os.write( + fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n'.encode('ascii'))) + os.write(fd, ('sasl.oauthbearer.config=%s\n' % 'scope=requiredScope principal=admin').encode('ascii')) else: - print('# FIXME: SASL %s client config not written to %s' % (mech, test_conf_file)) + print( + '# FIXME: SASL %s client config not written to %s' % + (mech, test_conf_file)) # SSL support ssl = getattr(cluster, 'ssl', None) @@ -106,29 +120,41 @@ def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt key = ssl.create_cert('librdkafka') os.write(fd, ('ssl.ca.location=%s\n' % ssl.ca['pem']).encode('ascii')) - os.write(fd, ('ssl.certificate.location=%s\n' % key['pub']['pem']).encode('ascii')) - os.write(fd, ('ssl.key.location=%s\n' % key['priv']['pem']).encode('ascii')) - os.write(fd, ('ssl.key.password=%s\n' % key['password']).encode('ascii')) + os.write(fd, ('ssl.certificate.location=%s\n' % + key['pub']['pem']).encode('ascii')) + os.write( + fd, ('ssl.key.location=%s\n' % + key['priv']['pem']).encode('ascii')) + os.write( + fd, ('ssl.key.password=%s\n' % + key['password']).encode('ascii')) for k, v in ssl.ca.items(): cmd_env['RDK_SSL_ca_{}'.format(k)] = v # Set envs for all generated keys so tests can find them. for k, v in key.items(): - if type(v) is dict: + if isinstance(v, dict): for k2, v2 in v.items(): # E.g. "RDK_SSL_priv_der=path/to/librdkafka-priv.der" cmd_env['RDK_SSL_{}_{}'.format(k, k2)] = v2 else: cmd_env['RDK_SSL_{}'.format(k)] = v - # Define bootstrap brokers based on selected security protocol print('# Using client security.protocol=%s' % security_protocol) - all_listeners = (','.join(cluster.get_all('listeners', '', KafkaBrokerApp))).split(',') - bootstrap_servers = ','.join([x for x in all_listeners if x.startswith(security_protocol)]) - os.write(fd, ('bootstrap.servers=%s\n' % bootstrap_servers).encode('ascii')) - os.write(fd, ('security.protocol=%s\n' % security_protocol).encode('ascii')) + all_listeners = ( + ','.join( + cluster.get_all( + 'listeners', + '', + KafkaBrokerApp))).split(',') + bootstrap_servers = ','.join( + [x for x in all_listeners if x.startswith(security_protocol)]) + os.write(fd, ('bootstrap.servers=%s\n' % + bootstrap_servers).encode('ascii')) + os.write(fd, ('security.protocol=%s\n' % + security_protocol).encode('ascii')) os.close(fd) if deploy: @@ -144,7 +170,7 @@ def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt if not cluster.wait_operational(30): cluster.stop(force=True) - raise Exception('Cluster %s did not go operational, see logs in %s/%s' % \ + raise Exception('Cluster %s did not go operational, see logs in %s/%s' % (cluster.name, cluster.root_path, cluster.instance)) print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers) @@ -163,7 +189,8 @@ def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt # Per broker env vars for b in [x for x in cluster.apps if isinstance(x, KafkaBrokerApp)]: cmd_env['BROKER_ADDRESS_%d' % b.appid] = \ - ','.join([x for x in b.conf['listeners'].split(',') if x.startswith(security_protocol)]) + ','.join([x for x in b.conf['listeners'].split( + ',') if x.startswith(security_protocol)]) # Add each broker pid as an env so they can be killed indivdidually. cmd_env['BROKER_PID_%d' % b.appid] = str(b.proc.pid) # JMX port, if available @@ -172,20 +199,25 @@ def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt cmd_env['BROKER_JMX_PORT_%d' % b.appid] = str(jmx_port) if not cmd: - cmd_env['PS1'] = '[TRIVUP:%s@%s] \\u@\\h:\w$ ' % (cluster.name, version) + cmd_env['PS1'] = '[TRIVUP:%s@%s] \\u@\\h:\\w$ ' % ( + cluster.name, version) cmd = 'bash --rcfile <(cat ~/.bashrc)' ret = True for i in range(0, exec_cnt): - retcode = subprocess.call(cmd, env=cmd_env, shell=True, executable='/bin/bash') + retcode = subprocess.call( + cmd, + env=cmd_env, + shell=True, + executable='/bin/bash') if retcode != 0: print('# Command failed with returncode %d: %s' % (retcode, cmd)) ret = False try: os.remove(test_conf_file) - except: + except BaseException: pass cluster.stop(force=True) @@ -193,9 +225,11 @@ def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt cluster.cleanup(keeptypes=['log']) return ret + if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Start a Kafka cluster and provide an interactive shell') + parser = argparse.ArgumentParser( + description='Start a Kafka cluster and provide an interactive shell') parser.add_argument('versions', type=str, default=None, nargs='+', help='Kafka version(s) to deploy') @@ -211,13 +245,37 @@ def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt help='Number of times to execute -c ..') parser.add_argument('--debug', action='store_true', dest='debug', default=False, help='Enable trivup debugging') - parser.add_argument('--root', type=str, default=os.environ.get('TRIVUP_ROOT', 'tmp'), help='Root working directory') - parser.add_argument('--port', default=None, help='Base TCP port to start allocating from') - parser.add_argument('--kafka-src', dest='kafka_path', type=str, default=None, help='Path to Kafka git repo checkout (used for version=trunk)') - parser.add_argument('--brokers', dest='broker_cnt', type=int, default=3, help='Number of Kafka brokers') + parser.add_argument( + '--root', + type=str, + default=os.environ.get( + 'TRIVUP_ROOT', + 'tmp'), + help='Root working directory') + parser.add_argument( + '--port', + default=None, + help='Base TCP port to start allocating from') + parser.add_argument( + '--kafka-src', + dest='kafka_path', + type=str, + default=None, + help='Path to Kafka git repo checkout (used for version=trunk)') + parser.add_argument( + '--brokers', + dest='broker_cnt', + type=int, + default=3, + help='Number of Kafka brokers') parser.add_argument('--ssl', dest='ssl', action='store_true', default=False, help='Enable SSL endpoints') - parser.add_argument('--sasl', dest='sasl', type=str, default=None, help='SASL mechanism (PLAIN, SCRAM-SHA-nnn, GSSAPI, OAUTHBEARER)') + parser.add_argument( + '--sasl', + dest='sasl', + type=str, + default=None, + help='SASL mechanism (PLAIN, SCRAM-SHA-nnn, GSSAPI, OAUTHBEARER)') args = parser.parse_args() if args.conf is not None: @@ -234,7 +292,8 @@ def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt if args.ssl: args.conf['security.protocol'] = 'SSL' if args.sasl: - if (args.sasl == 'PLAIN' or args.sasl.find('SCRAM') != -1) and 'sasl_users' not in args.conf: + if (args.sasl == 'PLAIN' or args.sasl.find('SCRAM') + != -1) and 'sasl_users' not in args.conf: args.conf['sasl_users'] = 'testuser=testpass' args.conf['sasl_mechanisms'] = args.sasl @@ -249,5 +308,4 @@ def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt if not r: retcode = 2 - sys.exit(retcode) diff --git a/tests/interceptor_test/interceptor_test.c b/tests/interceptor_test/interceptor_test.c index ecbda795f5..ee8a63ba98 100644 --- a/tests/interceptor_test/interceptor_test.c +++ b/tests/interceptor_test/interceptor_test.c @@ -64,8 +64,8 @@ * or by conf_dup() which is a copying of a conf previously seen by conf_init()) */ struct ici { - rd_kafka_conf_t *conf; /**< Interceptor config */ - char *config1; /**< Interceptor-specific config */ + rd_kafka_conf_t *conf; /**< Interceptor config */ + char *config1; /**< Interceptor-specific config */ char *config2; int on_new_cnt; @@ -77,44 +77,43 @@ static char *my_interceptor_plug_opaque = "my_interceptor_plug_opaque"; /* Producer methods */ -rd_kafka_resp_err_t on_send (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +rd_kafka_resp_err_t +on_send(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { struct ici *ici = ic_opaque; printf("on_send: %p\n", ici); return RD_KAFKA_RESP_ERR_NO_ERROR; } -rd_kafka_resp_err_t on_acknowledgement (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +rd_kafka_resp_err_t on_acknowledgement(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque) { struct ici *ici = ic_opaque; - printf("on_acknowledgement: %p: err %d, partition %"PRId32"\n", - ici, rkmessage->err, rkmessage->partition); + printf("on_acknowledgement: %p: err %d, partition %" PRId32 "\n", ici, + rkmessage->err, rkmessage->partition); return RD_KAFKA_RESP_ERR_NO_ERROR; } /* Consumer methods */ -rd_kafka_resp_err_t on_consume (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +rd_kafka_resp_err_t +on_consume(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { struct ici *ici = ic_opaque; - printf("on_consume: %p: partition %"PRId32" @ %"PRId64"\n", - ici, rkmessage->partition, rkmessage->offset); + printf("on_consume: %p: partition %" PRId32 " @ %" PRId64 "\n", ici, + rkmessage->partition, rkmessage->offset); return RD_KAFKA_RESP_ERR_NO_ERROR; } -rd_kafka_resp_err_t on_commit (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_resp_err_t err, void *ic_opaque) { +rd_kafka_resp_err_t on_commit(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err, + void *ic_opaque) { struct ici *ici = ic_opaque; printf("on_commit: %p: err %d\n", ici, err); return RD_KAFKA_RESP_ERR_NO_ERROR; } -static void ici_destroy (struct ici *ici) { +static void ici_destroy(struct ici *ici) { if (ici->conf) rd_kafka_conf_destroy(ici->conf); if (ici->config1) @@ -124,7 +123,7 @@ static void ici_destroy (struct ici *ici) { free(ici); } -rd_kafka_resp_err_t on_destroy (rd_kafka_t *rk, void *ic_opaque) { +rd_kafka_resp_err_t on_destroy(rd_kafka_t *rk, void *ic_opaque) { struct ici *ici = ic_opaque; printf("on_destroy: %p\n", ici); /* the ici is freed from on_conf_destroy() */ @@ -135,16 +134,18 @@ rd_kafka_resp_err_t on_destroy (rd_kafka_t *rk, void *ic_opaque) { /** * @brief Called from rd_kafka_new(). We use it to set up interceptors. */ -static rd_kafka_resp_err_t on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { struct ici *ici = ic_opaque; ictest.on_new.cnt++; ici->on_new_cnt++; - TEST_SAY("on_new(rk %p, conf %p, ici->conf %p): %p: #%d\n", - rk, conf, ici->conf, ici, ictest.on_new.cnt); + TEST_SAY("on_new(rk %p, conf %p, ici->conf %p): %p: #%d\n", rk, conf, + ici->conf, ici, ictest.on_new.cnt); ICTEST_CNT_CHECK(on_new); TEST_ASSERT(ici->on_new_cnt == 1); @@ -153,8 +154,10 @@ static rd_kafka_resp_err_t on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf, TEST_ASSERT(!ictest.socket_timeout_ms); /* Extract some well known config properties from the interceptor's * configuration. */ - ictest.session_timeout_ms = rd_strdup(test_conf_get(ici->conf, "session.timeout.ms")); - ictest.socket_timeout_ms = rd_strdup(test_conf_get(ici->conf, "socket.timeout.ms")); + ictest.session_timeout_ms = + rd_strdup(test_conf_get(ici->conf, "session.timeout.ms")); + ictest.socket_timeout_ms = + rd_strdup(test_conf_get(ici->conf, "socket.timeout.ms")); ictest.config1 = rd_strdup(ici->config1); ictest.config2 = rd_strdup(ici->config2); @@ -172,27 +175,29 @@ static rd_kafka_resp_err_t on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf, /** * @brief Configuration set handler */ -static rd_kafka_conf_res_t on_conf_set (rd_kafka_conf_t *conf, - const char *name, const char *val, - char *errstr, size_t errstr_size, - void *ic_opaque) { +static rd_kafka_conf_res_t on_conf_set(rd_kafka_conf_t *conf, + const char *name, + const char *val, + char *errstr, + size_t errstr_size, + void *ic_opaque) { struct ici *ici = ic_opaque; - int level = 3; + int level = 3; if (!strcmp(name, "session.timeout.ms") || !strcmp(name, "socket.timeout.ms") || !strncmp(name, "interceptor_test", strlen("interceptor_test"))) level = 2; - TEST_SAYL(level, "on_conf_set(conf %p, \"%s\", \"%s\"): %p\n", - conf, name, val, ici); + TEST_SAYL(level, "on_conf_set(conf %p, \"%s\", \"%s\"): %p\n", conf, + name, val, ici); if (!strcmp(name, "interceptor_test.good")) return RD_KAFKA_CONF_OK; else if (!strcmp(name, "interceptor_test.bad")) { strncpy(errstr, "on_conf_set failed deliberately", - errstr_size-1); - errstr[errstr_size-1] = '\0'; + errstr_size - 1); + errstr[errstr_size - 1] = '\0'; return RD_KAFKA_CONF_INVALID; } else if (!strcmp(name, "interceptor_test.config1")) { if (ici->config1) { @@ -201,8 +206,8 @@ static rd_kafka_conf_res_t on_conf_set (rd_kafka_conf_t *conf, } if (val) ici->config1 = rd_strdup(val); - TEST_SAY("on_conf_set(conf %p, %s, %s): %p\n", - conf, name, val, ici); + TEST_SAY("on_conf_set(conf %p, %s, %s): %p\n", conf, name, val, + ici); return RD_KAFKA_CONF_OK; } else if (!strcmp(name, "interceptor_test.config2")) { if (ici->config2) { @@ -215,8 +220,7 @@ static rd_kafka_conf_res_t on_conf_set (rd_kafka_conf_t *conf, } else { /* Apply intercepted client's config properties on * interceptor config. */ - rd_kafka_conf_set(ici->conf, name, val, - errstr, errstr_size); + rd_kafka_conf_set(ici->conf, name, val, errstr, errstr_size); /* UNKNOWN makes the conf_set() call continue with * other interceptors and finally the librdkafka properties. */ return RD_KAFKA_CONF_UNKNOWN; @@ -225,18 +229,19 @@ static rd_kafka_conf_res_t on_conf_set (rd_kafka_conf_t *conf, return RD_KAFKA_CONF_UNKNOWN; } -static void conf_init0 (rd_kafka_conf_t *conf); +static void conf_init0(rd_kafka_conf_t *conf); /** * @brief Set up new configuration on copy. */ -static rd_kafka_resp_err_t on_conf_dup (rd_kafka_conf_t *new_conf, - const rd_kafka_conf_t *old_conf, - size_t filter_cnt, const char **filter, - void *ic_opaque) { +static rd_kafka_resp_err_t on_conf_dup(rd_kafka_conf_t *new_conf, + const rd_kafka_conf_t *old_conf, + size_t filter_cnt, + const char **filter, + void *ic_opaque) { struct ici *ici = ic_opaque; - TEST_SAY("on_conf_dup(new_conf %p, old_conf %p, filter_cnt %"PRIusz + TEST_SAY("on_conf_dup(new_conf %p, old_conf %p, filter_cnt %" PRIusz ", ici %p)\n", new_conf, old_conf, filter_cnt, ici); conf_init0(new_conf); @@ -244,11 +249,11 @@ static rd_kafka_resp_err_t on_conf_dup (rd_kafka_conf_t *new_conf, } -static rd_kafka_resp_err_t on_conf_destroy (void *ic_opaque) { +static rd_kafka_resp_err_t on_conf_destroy(void *ic_opaque) { struct ici *ici = ic_opaque; ici->on_conf_destroy_cnt++; - printf("conf_destroy called (opaque %p vs %p) ici %p\n", - ic_opaque, my_interceptor_plug_opaque, ici); + printf("conf_destroy called (opaque %p vs %p) ici %p\n", ic_opaque, + my_interceptor_plug_opaque, ici); TEST_ASSERT(ici->on_conf_destroy_cnt == 1); ici_destroy(ici); return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -261,11 +266,10 @@ static rd_kafka_resp_err_t on_conf_destroy (void *ic_opaque) { * as well as rd_kafka_conf_dup(). * This internal method serves both cases. */ -static void conf_init0 (rd_kafka_conf_t *conf) { +static void conf_init0(rd_kafka_conf_t *conf) { struct ici *ici; - const char *filter[] = { "plugin.library.paths", - "interceptor_test." }; - size_t filter_cnt = sizeof(filter) / sizeof(*filter); + const char *filter[] = {"plugin.library.paths", "interceptor_test."}; + size_t filter_cnt = sizeof(filter) / sizeof(*filter); /* Create new interceptor instance */ ici = calloc(1, sizeof(*ici)); @@ -276,8 +280,8 @@ static void conf_init0 (rd_kafka_conf_t *conf) { /* Create own copy of configuration, after filtering out what * brought us here (plugins and our own interceptor config). */ ici->conf = rd_kafka_conf_dup_filter(conf, filter_cnt, filter); - TEST_SAY("conf_init0(conf %p) for ici %p with ici->conf %p\n", - conf, ici, ici->conf); + TEST_SAY("conf_init0(conf %p) for ici %p with ici->conf %p\n", conf, + ici, ici->conf); /* Add interceptor methods */ @@ -295,17 +299,16 @@ static void conf_init0 (rd_kafka_conf_t *conf) { * @brief Plugin conf initializer called when plugin.library.paths is set. */ DLL_EXPORT -rd_kafka_resp_err_t conf_init (rd_kafka_conf_t *conf, - void **plug_opaquep, - char *errstr, size_t errstr_size) { +rd_kafka_resp_err_t conf_init(rd_kafka_conf_t *conf, + void **plug_opaquep, + char *errstr, + size_t errstr_size) { *plug_opaquep = (void *)my_interceptor_plug_opaque; - TEST_SAY("conf_init(conf %p) called (setting opaque to %p)\n", - conf, *plug_opaquep); + TEST_SAY("conf_init(conf %p) called (setting opaque to %p)\n", conf, + *plug_opaquep); conf_init0(conf); return RD_KAFKA_RESP_ERR_NO_ERROR; } - - diff --git a/tests/interceptor_test/interceptor_test.h b/tests/interceptor_test/interceptor_test.h index e3c4aca36c..646b4b4d67 100644 --- a/tests/interceptor_test/interceptor_test.h +++ b/tests/interceptor_test/interceptor_test.h @@ -22,23 +22,30 @@ struct ictest { }; #define ictest_init(ICT) memset((ICT), 0, sizeof(ictest)) -#define ictest_cnt_init(CNT,MIN,MAX) do { \ - (CNT)->cnt = 0; \ - (CNT)->min = MIN; \ - (CNT)->max = MAX; \ +#define ictest_cnt_init(CNT, MIN, MAX) \ + do { \ + (CNT)->cnt = 0; \ + (CNT)->min = MIN; \ + (CNT)->max = MAX; \ } while (0) -#define ictest_free(ICT) do { \ - if ((ICT)->config1) free((ICT)->config1); \ - if ((ICT)->config2) free((ICT)->config2); \ - if ((ICT)->session_timeout_ms) free((ICT)->session_timeout_ms); \ - if ((ICT)->socket_timeout_ms) free((ICT)->socket_timeout_ms); \ +#define ictest_free(ICT) \ + do { \ + if ((ICT)->config1) \ + free((ICT)->config1); \ + if ((ICT)->config2) \ + free((ICT)->config2); \ + if ((ICT)->session_timeout_ms) \ + free((ICT)->session_timeout_ms); \ + if ((ICT)->socket_timeout_ms) \ + free((ICT)->socket_timeout_ms); \ } while (0) -#define ICTEST_CNT_CHECK(F) do { \ - if (ictest.F.cnt > ictest.F.max) \ - TEST_FAIL("interceptor %s count %d > max %d", \ - # F, ictest.F.cnt, ictest.F.max); \ +#define ICTEST_CNT_CHECK(F) \ + do { \ + if (ictest.F.cnt > ictest.F.max) \ + TEST_FAIL("interceptor %s count %d > max %d", #F, \ + ictest.F.cnt, ictest.F.max); \ } while (0) /* The ictest struct is defined and set up by the calling test. */ diff --git a/tests/performance_plot.py b/tests/performance_plot.py index 7c5fb957c9..7d540f5513 100755 --- a/tests/performance_plot.py +++ b/tests/performance_plot.py @@ -1,13 +1,15 @@ #!/usr/bin/env python3 # -import sys, json +import sys +import json import numpy as np import matplotlib.pyplot as plt from collections import defaultdict -def semver2int (semver): + +def semver2int(semver): if semver == 'trunk': semver = '0.10.0.0' vi = 0 @@ -17,7 +19,8 @@ def semver2int (semver): i += 1 return vi -def get_perf_data (perfname, stats): + +def get_perf_data(perfname, stats): """ Return [labels,x,y,errs] for perfname 'mb_per_sec' as a numpy arrays labels: broker versions x: list with identical value (to plot on same x point) @@ -31,7 +34,6 @@ def get_perf_data (perfname, stats): # * calculate average # * calculate error - # Accumulate values per version for x in stats: v = str(x[0]) @@ -54,11 +56,11 @@ def get_perf_data (perfname, stats): y1 = np.array(y0) x1 = np.array(range(0, len(labels))) errs = np.array(errs0) - return [labels,x1,y1,errs] + return [labels, x1, y1, errs] -def plot (description, name, stats, perfname, outfile=None): - labels,x,y,errs = get_perf_data(perfname, stats) +def plot(description, name, stats, perfname, outfile=None): + labels, x, y, errs = get_perf_data(perfname, stats) colors = np.random.rand(len(labels)) plt.title('%s: %s %s' % (description, name, perfname)) plt.xlabel('Kafka version') @@ -87,12 +89,18 @@ def plot (description, name, stats, perfname, outfile=None): # Extract performance test data for rep in reports: - perfs = rep.get('tests', dict()).get('0038_performance', list).get('report', None) + perfs = rep.get( + 'tests', + dict()).get( + '0038_performance', + list).get( + 'report', + None) if perfs is None: continue for perf in perfs: - for n in ['producer','consumer']: + for n in ['producer', 'consumer']: o = perf.get(n, None) if o is None: print('no %s in %s' % (n, perf)) @@ -100,11 +108,8 @@ def plot (description, name, stats, perfname, outfile=None): stats[n].append((rep.get('broker_version', 'unknown'), o)) - - for t in ['producer','consumer']: + for t in ['producer', 'consumer']: for perfname in ['mb_per_sec', 'records_per_sec']: - plot('librdkafka 0038_performance test: %s (%d samples)' % \ + plot('librdkafka 0038_performance test: %s (%d samples)' % (outfile, len(reports)), t, stats[t], perfname, outfile='%s_%s_%s.png' % (outfile, t, perfname)) - - diff --git a/tests/plugin_test/plugin_test.c b/tests/plugin_test/plugin_test.c index 9144289303..54639a5a83 100644 --- a/tests/plugin_test/plugin_test.c +++ b/tests/plugin_test/plugin_test.c @@ -43,16 +43,16 @@ static void *my_opaque = (void *)0x5678; /* * Common methods */ -rd_kafka_resp_err_t conf_init (rd_kafka_conf_t *conf, - void **plug_opaquep, - char *errstr, size_t errstr_size) { +rd_kafka_resp_err_t conf_init(rd_kafka_conf_t *conf, + void **plug_opaquep, + char *errstr, + size_t errstr_size) { printf("plugin conf_init called!\n"); *plug_opaquep = my_opaque; return RD_KAFKA_RESP_ERR_NO_ERROR; } -void conf_destroy (const rd_kafka_conf_t *conf, void *plug_opaque) { +void conf_destroy(const rd_kafka_conf_t *conf, void *plug_opaque) { assert(plug_opaque == plug_opaque); printf("plugin destroy called\n"); } - diff --git a/tests/rusage.c b/tests/rusage.c index c20ec11892..48e702f3f4 100644 --- a/tests/rusage.c +++ b/tests/rusage.c @@ -32,7 +32,7 @@ */ #ifdef __APPLE__ -#define _DARWIN_C_SOURCE /* required for rusage.ru_maxrss, etc. */ +#define _DARWIN_C_SOURCE /* required for rusage.ru_maxrss, etc. */ #endif #include "test.h" @@ -47,7 +47,7 @@ /** * @brief Call getrusage(2) */ -static int test_getrusage (struct rusage *ru) { +static int test_getrusage(struct rusage *ru) { if (getrusage(RUSAGE_SELF, ru) == -1) { TEST_WARN("getrusage() failed: %s\n", rd_strerror(errno)); return -1; @@ -57,11 +57,11 @@ static int test_getrusage (struct rusage *ru) { } /* Convert timeval to seconds */ -#define _tv2s(TV) (double)((double)(TV).tv_sec + \ - ((double)(TV).tv_usec / 1000000.0)) +#define _tv2s(TV) \ + (double)((double)(TV).tv_sec + ((double)(TV).tv_usec / 1000000.0)) /* Convert timeval to CPU usage percentage (5 = 5%, 130.3 = 130.3%) */ -#define _tv2cpu(TV,DURATION) ((_tv2s(TV) / (DURATION)) * 100.0) +#define _tv2cpu(TV, DURATION) ((_tv2s(TV) / (DURATION)) * 100.0) /** @@ -69,9 +69,9 @@ static int test_getrusage (struct rusage *ru) { * * @returns the delta */ -static struct rusage test_rusage_calc (const struct rusage *start, - const struct rusage *end, - double duration) { +static struct rusage test_rusage_calc(const struct rusage *start, + const struct rusage *end, + double duration) { struct rusage delta = RD_ZERO_INIT; timersub(&end->ru_utime, &start->ru_utime, &delta.ru_utime); @@ -81,20 +81,18 @@ static struct rusage test_rusage_calc (const struct rusage *start, * maximum RSS, not the current one. * Read this from /proc//.. instead */ delta.ru_maxrss = end->ru_maxrss - start->ru_maxrss; - delta.ru_nvcsw = end->ru_nvcsw - start->ru_nvcsw; + delta.ru_nvcsw = end->ru_nvcsw - start->ru_nvcsw; /* skip fields we're not interested in */ - TEST_SAY(_C_MAG "Test resource usage summary: " + TEST_SAY(_C_MAG + "Test resource usage summary: " "%.3fs (%.1f%%) User CPU time, " "%.3fs (%.1f%%) Sys CPU time, " "%.3fMB RSS memory increase, " "%ld Voluntary context switches\n", - _tv2s(delta.ru_utime), - _tv2cpu(delta.ru_utime, duration), - _tv2s(delta.ru_stime), - _tv2cpu(delta.ru_stime, duration), - (double)delta.ru_maxrss / (1024.0*1024.0), - delta.ru_nvcsw); + _tv2s(delta.ru_utime), _tv2cpu(delta.ru_utime, duration), + _tv2s(delta.ru_stime), _tv2cpu(delta.ru_stime, duration), + (double)delta.ru_maxrss / (1024.0 * 1024.0), delta.ru_nvcsw); return delta; } @@ -103,27 +101,27 @@ static struct rusage test_rusage_calc (const struct rusage *start, /** * @brief Check that test ran within threshold levels */ -static int test_rusage_check_thresholds (struct test *test, - const struct rusage *ru, - double duration) { +static int test_rusage_check_thresholds(struct test *test, + const struct rusage *ru, + double duration) { static const struct rusage_thres defaults = { - .ucpu = 5.0, /* min value, see below */ - .scpu = 2.5, /* min value, see below */ - .rss = 10.0, /* 10 megs */ - .ctxsw = 100, /* this is the default number of context switches - * per test second. - * note: when ctxsw is specified on a test - * it should be specified as the total - * number of context switches. */ + .ucpu = 5.0, /* min value, see below */ + .scpu = 2.5, /* min value, see below */ + .rss = 10.0, /* 10 megs */ + .ctxsw = 100, /* this is the default number of context switches + * per test second. + * note: when ctxsw is specified on a test + * it should be specified as the total + * number of context switches. */ }; /* CPU usage thresholds are too blunt for very quick tests. * Use a forgiving default CPU threshold for any test that * runs below a certain duration. */ const double min_duration = 2.0; /* minimum test duration for * CPU thresholds to have effect. */ - const double lax_cpu = 1000.0; /* 1000% CPU usage (e.g 10 cores - * at full speed) allowed for any - * test that finishes in under 2s */ + const double lax_cpu = 1000.0; /* 1000% CPU usage (e.g 10 cores + * at full speed) allowed for any + * test that finishes in under 2s */ const struct rusage_thres *thres = &test->rusage_thres; double cpu, mb, uthres, uthres_orig, sthres, rssthres; int csthres; @@ -138,7 +136,7 @@ static int test_rusage_check_thresholds (struct test *test, uthres_orig = uthres; uthres *= test_rusage_cpu_calibration; - cpu = _tv2cpu(ru->ru_utime, duration); + cpu = _tv2cpu(ru->ru_utime, duration); if (cpu > uthres) { rd_snprintf(reasons[fails], sizeof(reasons[fails]), "User CPU time (%.3fs) exceeded: %.1f%% > %.1f%%", @@ -150,12 +148,13 @@ static int test_rusage_check_thresholds (struct test *test, /* Let the default Sys CPU be the maximum of the defaults.cpu * and 20% of the User CPU. */ if (rd_dbl_zero((sthres = thres->scpu))) - sthres = duration < min_duration ? lax_cpu : - RD_MAX(uthres_orig * 0.20, defaults.scpu); + sthres = duration < min_duration + ? lax_cpu + : RD_MAX(uthres_orig * 0.20, defaults.scpu); sthres *= test_rusage_cpu_calibration; - cpu = _tv2cpu(ru->ru_stime, duration); + cpu = _tv2cpu(ru->ru_stime, duration); if (cpu > sthres) { rd_snprintf(reasons[fails], sizeof(reasons[fails]), "Sys CPU time (%.3fs) exceeded: %.1f%% > %.1f%%", @@ -165,24 +164,26 @@ static int test_rusage_check_thresholds (struct test *test, } rssthres = thres->rss > 0.0 ? thres->rss : defaults.rss; - if ((mb = (double)ru->ru_maxrss / (1024.0*1024.0)) > rssthres) { + if ((mb = (double)ru->ru_maxrss / (1024.0 * 1024.0)) > rssthres) { rd_snprintf(reasons[fails], sizeof(reasons[fails]), - "RSS memory exceeded: %.2fMB > %.2fMB", - mb, rssthres); + "RSS memory exceeded: %.2fMB > %.2fMB", mb, + rssthres); TEST_WARN("%s\n", reasons[fails]); fails++; } if (!(csthres = thres->ctxsw)) - csthres = duration < min_duration ? defaults.ctxsw * 100 : - (int)(duration * (double)defaults.ctxsw); + csthres = duration < min_duration + ? defaults.ctxsw * 100 + : (int)(duration * (double)defaults.ctxsw); /* FIXME: not sure how to use this */ if (0 && ru->ru_nvcsw > csthres) { - TEST_WARN("Voluntary context switches exceeded: " - "%ld > %d\n", - ru->ru_nvcsw, csthres); + TEST_WARN( + "Voluntary context switches exceeded: " + "%ld > %d\n", + ru->ru_nvcsw, csthres); fails++; } @@ -193,11 +194,8 @@ static int test_rusage_check_thresholds (struct test *test, return 0; TEST_FAIL("Test resource usage exceeds %d threshold(s): %s%s%s%s%s", - fails, - reasons[0], - fails > 1 ? ", " : "", - fails > 1 ? reasons[1] : "", - fails > 2 ? ", " : "", + fails, reasons[0], fails > 1 ? ", " : "", + fails > 1 ? reasons[1] : "", fails > 2 ? ", " : "", fails > 2 ? reasons[2] : ""); @@ -207,7 +205,7 @@ static int test_rusage_check_thresholds (struct test *test, -void test_rusage_start (struct test *test) { +void test_rusage_start(struct test *test) { #if HAVE_GETRUSAGE /* Can't do per-test rusage checks when tests run in parallel. */ if (test_concurrent_max > 1) @@ -225,7 +223,7 @@ void test_rusage_start (struct test *test) { * * @returns -1 if thresholds were exceeded, else 0. */ - int test_rusage_stop (struct test *test, double duration) { +int test_rusage_stop(struct test *test, double duration) { #if HAVE_GETRUSAGE struct rusage start, end; @@ -241,7 +239,7 @@ void test_rusage_start (struct test *test) { if (duration < 0.001) duration = 0.001; - start = test->rusage; + start = test->rusage; test->rusage = test_rusage_calc(&start, &end, duration); return test_rusage_check_thresholds(test, &test->rusage, duration); diff --git a/tests/sasl_test.py b/tests/sasl_test.py index c3f0514a49..f73fba560e 100755 --- a/tests/sasl_test.py +++ b/tests/sasl_test.py @@ -17,14 +17,16 @@ import json import tempfile -def test_it (version, deploy=True, conf={}, rdkconf={}, tests=None, debug=False, - scenario="default"): + +def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None, debug=False, + scenario="default"): """ - @brief Create, deploy and start a Kafka cluster using Kafka \p version + @brief Create, deploy and start a Kafka cluster using Kafka \\p version Then run librdkafka's regression tests. """ - cluster = LibrdkafkaTestCluster(version, conf, debug=debug, scenario=scenario) + cluster = LibrdkafkaTestCluster( + version, conf, debug=debug, scenario=scenario) # librdkafka's regression tests, as an App. rdkafka = LibrdkafkaTestApp(cluster, version, _rdkconf, tests=tests, @@ -37,12 +39,18 @@ def test_it (version, deploy=True, conf={}, rdkconf={}, tests=None, debug=False, cluster.start(timeout=30) - print('# Connect to cluster with bootstrap.servers %s' % cluster.bootstrap_servers()) + print( + '# Connect to cluster with bootstrap.servers %s' % + cluster.bootstrap_servers()) rdkafka.start() - print('# librdkafka regression tests started, logs in %s' % rdkafka.root_path()) + print( + '# librdkafka regression tests started, logs in %s' % + rdkafka.root_path()) try: - rdkafka.wait_stopped(timeout=60*30) - rdkafka.dbg('wait stopped: %s, runtime %ds' % (rdkafka.state, rdkafka.runtime())) + rdkafka.wait_stopped(timeout=60 * 30) + rdkafka.dbg( + 'wait stopped: %s, runtime %ds' % + (rdkafka.state, rdkafka.runtime())) except KeyboardInterrupt: print('# Aborted by user') @@ -56,7 +64,7 @@ def test_it (version, deploy=True, conf={}, rdkconf={}, tests=None, debug=False, return report -def handle_report (report, version, suite): +def handle_report(report, version, suite): """ Parse test report and return tuple (Passed(bool), Reason(str)) """ test_cnt = report.get('tests_run', 0) @@ -65,27 +73,32 @@ def handle_report (report, version, suite): passed = report.get('tests_passed', 0) failed = report.get('tests_failed', 0) - if 'all' in suite.get('expect_fail', []) or version in suite.get('expect_fail', []): + if 'all' in suite.get('expect_fail', []) or version in suite.get( + 'expect_fail', []): expect_fail = True else: expect_fail = False if expect_fail: if failed == test_cnt: - return (True, 'All %d/%d tests failed as expected' % (failed, test_cnt)) + return (True, 'All %d/%d tests failed as expected' % + (failed, test_cnt)) else: - return (False, '%d/%d tests failed: expected all to fail' % (failed, test_cnt)) + return (False, '%d/%d tests failed: expected all to fail' % + (failed, test_cnt)) else: if failed > 0: - return (False, '%d/%d tests passed: expected all to pass' % (passed, test_cnt)) + return (False, '%d/%d tests passed: expected all to pass' % + (passed, test_cnt)) else: - return (True, 'All %d/%d tests passed as expected' % (passed, test_cnt)) - + return (True, 'All %d/%d tests passed as expected' % + (passed, test_cnt)) if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Run librdkafka test suit using SASL on a trivupped cluster') + parser = argparse.ArgumentParser( + description='Run librdkafka test suit using SASL on a trivupped cluster') parser.add_argument('--conf', type=str, dest='conf', default=None, help='trivup JSON config object (not file)') @@ -129,10 +142,11 @@ def handle_report (report, version, suite): versions = list() if len(args.versions): for v in args.versions: - versions.append((v, ['SCRAM-SHA-512','PLAIN','GSSAPI','OAUTHBEARER'])) + versions.append( + (v, ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI', 'OAUTHBEARER'])) else: - versions = [('2.1.0', ['OAUTHBEARER','GSSAPI']), - ('0.10.2.0', ['SCRAM-SHA-512','PLAIN','GSSAPI']), + versions = [('2.1.0', ['OAUTHBEARER', 'GSSAPI']), + ('0.10.2.0', ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI']), ('0.9.0.1', ['GSSAPI']), ('0.8.2.2', [])] sasl_plain_conf = {'sasl_mechanisms': 'PLAIN', @@ -190,7 +204,7 @@ def handle_report (report, version, suite): pass_cnt = 0 fail_cnt = 0 - for version,supported in versions: + for version, supported in versions: if len(args.versions) > 0 and version not in args.versions: print('### Skipping version %s' % version) continue @@ -216,7 +230,9 @@ def handle_report (report, version, suite): _conf.pop('sasl_mechanisms', None) # Run tests - print('#### Version %s, suite %s: STARTING' % (version, suite['name'])) + print( + '#### Version %s, suite %s: STARTING' % + (version, suite['name'])) if tests is None: tests_to_run = suite.get('tests', None) else: @@ -226,7 +242,7 @@ def handle_report (report, version, suite): # Handle test report report['version'] = version - passed,reason = handle_report(report, version, suite) + passed, reason = handle_report(report, version, suite) report['PASSED'] = passed report['REASON'] = reason @@ -237,7 +253,7 @@ def handle_report (report, version, suite): else: print('\033[41m#### Version %s, suite %s: FAILED: %s\033[0m' % (version, suite['name'], reason)) - print_test_report_summary('%s @ %s' % \ + print_test_report_summary('%s @ %s' % (suite['name'], version), report) fail_cnt += 1 print('#### Test output: %s/stderr.log' % (report['root_path'])) @@ -255,7 +271,7 @@ def handle_report (report, version, suite): f = os.fdopen(fd, 'w') full_report = {'suites': suites, 'pass_cnt': pass_cnt, - 'fail_cnt': fail_cnt, 'total_cnt': pass_cnt+fail_cnt} + 'fail_cnt': fail_cnt, 'total_cnt': pass_cnt + fail_cnt} f.write(json.dumps(full_report)) f.close() diff --git a/tests/sockem.c b/tests/sockem.c index 796dee5910..2de01627d8 100644 --- a/tests/sockem.c +++ b/tests/sockem.c @@ -50,72 +50,71 @@ #define socket_errno() WSAGetLastError() #else #define socket_errno() errno -#define SOCKET_ERROR -1 +#define SOCKET_ERROR -1 #endif #ifndef strdupa -#define strdupa(s) \ - ({ \ - const char *_s = (s); \ - size_t _len = strlen(_s)+1; \ - char *_d = (char *)alloca(_len); \ - (char *)memcpy(_d, _s, _len); \ +#define strdupa(s) \ + ({ \ + const char *_s = (s); \ + size_t _len = strlen(_s) + 1; \ + char *_d = (char *)alloca(_len); \ + (char *)memcpy(_d, _s, _len); \ }) #endif #include typedef pthread_mutex_t mtx_t; -#define mtx_init(M) pthread_mutex_init(M, NULL) +#define mtx_init(M) pthread_mutex_init(M, NULL) #define mtx_destroy(M) pthread_mutex_destroy(M) -#define mtx_lock(M) pthread_mutex_lock(M) -#define mtx_unlock(M) pthread_mutex_unlock(M) +#define mtx_lock(M) pthread_mutex_lock(M) +#define mtx_unlock(M) pthread_mutex_unlock(M) typedef pthread_t thrd_t; -#define thrd_create(THRD,START_ROUTINE,ARG) \ - pthread_create(THRD, NULL, START_ROUTINE, ARG) -#define thrd_join0(THRD) \ - pthread_join(THRD, NULL) +#define thrd_create(THRD, START_ROUTINE, ARG) \ + pthread_create(THRD, NULL, START_ROUTINE, ARG) +#define thrd_join0(THRD) pthread_join(THRD, NULL) static mtx_t sockem_lock; static LIST_HEAD(, sockem_s) sockems; static pthread_once_t sockem_once = PTHREAD_ONCE_INIT; -static char *sockem_conf_str = ""; +static char *sockem_conf_str = ""; typedef int64_t sockem_ts_t; #ifdef LIBSOCKEM_PRELOAD -static int (*sockem_orig_connect) (int, const struct sockaddr *, socklen_t); -static int (*sockem_orig_close) (int); +static int (*sockem_orig_connect)(int, const struct sockaddr *, socklen_t); +static int (*sockem_orig_close)(int); -#define sockem_close0(S) (sockem_orig_close(S)) -#define sockem_connect0(S,A,AL) (sockem_orig_connect(S,A,AL)) +#define sockem_close0(S) (sockem_orig_close(S)) +#define sockem_connect0(S, A, AL) (sockem_orig_connect(S, A, AL)) #else -#define sockem_close0(S) close(S) -#define sockem_connect0(S,A,AL) connect(S,A,AL) +#define sockem_close0(S) close(S) +#define sockem_connect0(S, A, AL) connect(S, A, AL) #endif struct sockem_conf { /* FIXME: these needs to be implemented */ - int tx_thruput; /* app->peer bytes/second */ - int rx_thruput; /* peer->app bytes/second */ - int delay; /* latency in ms */ - int jitter; /* latency variation in ms */ - int debug; /* enable sockem printf debugging */ - size_t recv_bufsz; /* recv chunk/buffer size */ - int direct; /* direct forward, no delay or rate-limiting */ + int tx_thruput; /* app->peer bytes/second */ + int rx_thruput; /* peer->app bytes/second */ + int delay; /* latency in ms */ + int jitter; /* latency variation in ms */ + int debug; /* enable sockem printf debugging */ + size_t recv_bufsz; /* recv chunk/buffer size */ + int direct; /* direct forward, no delay or rate-limiting */ }; typedef struct sockem_buf_s { TAILQ_ENTRY(sockem_buf_s) sb_link; - size_t sb_size; - size_t sb_of; - char *sb_data; - int64_t sb_at; /* Transmit at this absolute time. */ + size_t sb_size; + size_t sb_of; + char *sb_data; + int64_t sb_at; /* Transmit at this absolute time. */ } sockem_buf_t; @@ -130,46 +129,47 @@ struct sockem_s { SOCKEM_TERM } run; - int as; /* application's socket. */ - int ls; /* internal application listen socket */ - int ps; /* internal peer socket connecting sockem to the peer.*/ + int as; /* application's socket. */ + int ls; /* internal application listen socket */ + int ps; /* internal peer socket connecting sockem to the peer.*/ - void *recv_buf; /* Receive buffer */ - size_t recv_bufsz; /* .. size */ + void *recv_buf; /* Receive buffer */ + size_t recv_bufsz; /* .. size */ - int linked; /* On sockems list */ + int linked; /* On sockems list */ - thrd_t thrd; /* Forwarder thread */ + thrd_t thrd; /* Forwarder thread */ - mtx_t lock; + mtx_t lock; - struct sockem_conf conf; /* application-set config. - * protected by .lock */ + struct sockem_conf conf; /* application-set config. + * protected by .lock */ - struct sockem_conf use; /* last copy of .conf - * local to skm thread */ + struct sockem_conf use; /* last copy of .conf + * local to skm thread */ - TAILQ_HEAD(, sockem_buf_s) bufs; /* Buffers in queue waiting for - * transmission (delayed) */ + TAILQ_HEAD(, sockem_buf_s) + bufs; /* Buffers in queue waiting for + * transmission (delayed) */ - size_t bufs_size; /* Total number of bytes currently enqueued - * for transmission */ + size_t bufs_size; /* Total number of bytes currently enqueued + * for transmission */ size_t bufs_size_max; /* Soft max threshold for bufs_size, * when this value is exceeded the app fd * is removed from the poll set until * bufs_size falls below the threshold again. */ int poll_fd_cnt; - int64_t ts_last_fwd; /* For rate-limiter: timestamp of last forward */ + int64_t ts_last_fwd; /* For rate-limiter: timestamp of last forward */ }; -static int sockem_vset (sockem_t *skm, va_list ap); +static int sockem_vset(sockem_t *skm, va_list ap); /** * A microsecond monotonic clock */ -static __attribute__((unused)) __inline int64_t sockem_clock (void) { +static __attribute__((unused)) __inline int64_t sockem_clock(void) { #ifdef __APPLE__ /* No monotonic clock on Darwin */ struct timeval tv; @@ -181,14 +181,14 @@ static __attribute__((unused)) __inline int64_t sockem_clock (void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return ((int64_t)ts.tv_sec * 1000000LLU) + - ((int64_t)ts.tv_nsec / 1000LLU); + ((int64_t)ts.tv_nsec / 1000LLU); #endif } /** * @brief Initialize libsockem once. */ -static void sockem_init (void) { +static void sockem_init(void) { mtx_init(&sockem_lock); sockem_conf_str = getenv("SOCKEM_CONF"); if (!sockem_conf_str) @@ -198,7 +198,7 @@ static void sockem_init (void) { sockem_conf_str); #ifdef LIBSOCKEM_PRELOAD sockem_orig_connect = dlsym(RTLD_NEXT, "connect"); - sockem_orig_close = dlsym(RTLD_NEXT, "close"); + sockem_orig_close = dlsym(RTLD_NEXT, "close"); #endif } @@ -207,7 +207,7 @@ static void sockem_init (void) { * @returns the maximum waittime in ms for poll(), at most 1000 ms. * @remark lock must be held */ -static int sockem_calc_waittime (sockem_t *skm, int64_t now) { +static int sockem_calc_waittime(sockem_t *skm, int64_t now) { const sockem_buf_t *sb; int64_t r; @@ -229,7 +229,7 @@ static int sockem_calc_waittime (sockem_t *skm, int64_t now) { /** * @brief Unlink and destroy a buffer */ -static void sockem_buf_destroy (sockem_t *skm, sockem_buf_t *sb) { +static void sockem_buf_destroy(sockem_t *skm, sockem_buf_t *sb) { skm->bufs_size -= sb->sb_size - sb->sb_of; TAILQ_REMOVE(&skm->bufs, sb, sb_link); free(sb); @@ -238,8 +238,8 @@ static void sockem_buf_destroy (sockem_t *skm, sockem_buf_t *sb) { /** * @brief Add delayed buffer to transmit. */ -static sockem_buf_t *sockem_buf_add (sockem_t *skm, - size_t size, const void *data) { +static sockem_buf_t * +sockem_buf_add(sockem_t *skm, size_t size, const void *data) { sockem_buf_t *sb; skm->bufs_size += size; @@ -253,10 +253,9 @@ static sockem_buf_t *sockem_buf_add (sockem_t *skm, sb->sb_of = 0; sb->sb_size = size; - sb->sb_data = (char *)(sb+1); + sb->sb_data = (char *)(sb + 1); sb->sb_at = sockem_clock() + - ((skm->use.delay + - (skm->use.jitter / 2)/*FIXME*/) * 1000); + ((skm->use.delay + (skm->use.jitter / 2) /*FIXME*/) * 1000); memcpy(sb->sb_data, data, size); TAILQ_INSERT_TAIL(&skm->bufs, sb, sb_link); @@ -270,7 +269,7 @@ static sockem_buf_t *sockem_buf_add (sockem_t *skm, * @remark lock must be held but will be released momentarily while * performing send syscall. */ -static int sockem_fwd_bufs (sockem_t *skm, int ofd) { +static int sockem_fwd_bufs(sockem_t *skm, int ofd) { sockem_buf_t *sb; int64_t now = sockem_clock(); size_t to_write; @@ -278,7 +277,7 @@ static int sockem_fwd_bufs (sockem_t *skm, int ofd) { if (skm->use.direct) - to_write = 1024*1024*100; + to_write = 1024 * 1024 * 100; else if ((elapsed = now - skm->ts_last_fwd)) { /* Calculate how many bytes to send to adhere to rate-limit */ to_write = (size_t)((double)skm->use.tx_thruput * @@ -286,19 +285,18 @@ static int sockem_fwd_bufs (sockem_t *skm, int ofd) { } else return 0; - while (to_write > 0 && - (sb = TAILQ_FIRST(&skm->bufs)) && + while (to_write > 0 && (sb = TAILQ_FIRST(&skm->bufs)) && (skm->use.direct || sb->sb_at <= now)) { ssize_t r; size_t remain = sb->sb_size - sb->sb_of; - size_t wr = to_write < remain ? to_write : remain; + size_t wr = to_write < remain ? to_write : remain; if (wr == 0) break; mtx_unlock(&skm->lock); - r = send(ofd, sb->sb_data+sb->sb_of, wr, 0); + r = send(ofd, sb->sb_data + sb->sb_of, wr, 0); mtx_lock(&skm->lock); @@ -312,7 +310,7 @@ static int sockem_fwd_bufs (sockem_t *skm, int ofd) { skm->ts_last_fwd = now; sb->sb_of += r; - to_write -= r; + to_write -= r; if (sb->sb_of < sb->sb_size) break; @@ -335,7 +333,7 @@ static int sockem_fwd_bufs (sockem_t *skm, int ofd) { * * @returns the number of bytes forwarded, or -1 on error. */ -static int sockem_recv_fwd (sockem_t *skm, int ifd, int ofd, int direct) { +static int sockem_recv_fwd(sockem_t *skm, int ifd, int ofd, int direct) { ssize_t r, wr; r = recv(ifd, skm->recv_buf, skm->recv_bufsz, MSG_DONTWAIT); @@ -369,7 +367,7 @@ static int sockem_recv_fwd (sockem_t *skm, int ifd, int ofd, int direct) { * @remark Preserves caller's errno. * @remark lock must be held. */ -static void sockem_close_all (sockem_t *skm) { +static void sockem_close_all(sockem_t *skm) { int serr = socket_errno(); if (skm->ls != -1) { @@ -392,7 +390,7 @@ static void sockem_close_all (sockem_t *skm) { * @brief Copy desired (app) config to internally use(d) configuration. * @remark lock must be held */ -static __inline void sockem_conf_use (sockem_t *skm) { +static __inline void sockem_conf_use(sockem_t *skm) { skm->use = skm->conf; /* Figure out if direct forward is to be used */ skm->use.direct = !(skm->use.delay || skm->use.jitter || @@ -402,9 +400,9 @@ static __inline void sockem_conf_use (sockem_t *skm) { /** * @brief sockem internal per-socket forwarder thread */ -static void *sockem_run (void *arg) { +static void *sockem_run(void *arg) { sockem_t *skm = arg; - int cs = -1; + int cs = -1; int ls; struct pollfd pfd[2]; @@ -416,7 +414,7 @@ static void *sockem_run (void *arg) { mtx_unlock(&skm->lock); skm->recv_bufsz = skm->use.recv_bufsz; - skm->recv_buf = malloc(skm->recv_bufsz); + skm->recv_buf = malloc(skm->recv_bufsz); /* Accept connection from sockfd in sockem_connect() */ cs = accept(ls, NULL, 0); @@ -426,15 +424,15 @@ static void *sockem_run (void *arg) { /* App socket was closed. */ goto done; } - fprintf(stderr, "%% sockem: accept(%d) failed: %s\n", - ls, strerror(socket_errno())); + fprintf(stderr, "%% sockem: accept(%d) failed: %s\n", ls, + strerror(socket_errno())); mtx_unlock(&skm->lock); assert(cs != -1); } /* Set up poll (blocking IO) */ memset(pfd, 0, sizeof(pfd)); - pfd[1].fd = cs; + pfd[1].fd = cs; pfd[1].events = POLLIN; mtx_lock(&skm->lock); @@ -466,21 +464,19 @@ static void *sockem_run (void *arg) { } mtx_unlock(&skm->lock); - for (i = 0 ; r > 0 && i < 2 ; i++) { - if (pfd[i].revents & (POLLHUP|POLLERR)) { + for (i = 0; r > 0 && i < 2; i++) { + if (pfd[i].revents & (POLLHUP | POLLERR)) { skm->run = SOCKEM_TERM; } else if (pfd[i].revents & POLLIN) { if (sockem_recv_fwd( - skm, - pfd[i].fd, - pfd[i^1].fd, - /* direct mode for app socket - * without delay, and always for - * peer socket (receive channel) */ - i == 0 || - (skm->use.direct && - skm->bufs_size == 0)) == -1) { + skm, pfd[i].fd, pfd[i ^ 1].fd, + /* direct mode for app socket + * without delay, and always for + * peer socket (receive channel) */ + i == 0 || (skm->use.direct && + skm->bufs_size == 0)) == + -1) { skm->run = SOCKEM_TERM; break; } @@ -489,7 +485,7 @@ static void *sockem_run (void *arg) { mtx_lock(&skm->lock); } - done: +done: if (cs != -1) sockem_close0(cs); sockem_close_all(skm); @@ -506,8 +502,8 @@ static void *sockem_run (void *arg) { /** * @brief Connect socket \p s to \p addr */ -static int sockem_do_connect (int s, const struct sockaddr *addr, - socklen_t addrlen) { +static int +sockem_do_connect(int s, const struct sockaddr *addr, socklen_t addrlen) { int r; r = sockem_connect0(s, addr, addrlen); @@ -517,7 +513,7 @@ static int sockem_do_connect (int s, const struct sockaddr *addr, #ifdef _WIN32 && serr != WSAEWOULDBLOCK #endif - ) { + ) { #ifndef _WIN32 errno = serr; #endif @@ -529,12 +525,14 @@ static int sockem_do_connect (int s, const struct sockaddr *addr, } -sockem_t *sockem_connect (int sockfd, const struct sockaddr *addr, - socklen_t addrlen, ...) { +sockem_t *sockem_connect(int sockfd, + const struct sockaddr *addr, + socklen_t addrlen, + ...) { sockem_t *skm; int ls, ps; - struct sockaddr_in6 sin6 = { .sin6_family = addr->sa_family }; - socklen_t addrlen2 = addrlen; + struct sockaddr_in6 sin6 = {.sin6_family = addr->sa_family}; + socklen_t addrlen2 = addrlen; va_list ap; pthread_once(&sockem_once, sockem_init); @@ -575,10 +573,10 @@ sockem_t *sockem_connect (int sockfd, const struct sockaddr *addr, } /* Create sockem handle */ - skm = calloc(1, sizeof(*skm)); - skm->as = sockfd; - skm->ls = ls; - skm->ps = ps; + skm = calloc(1, sizeof(*skm)); + skm->as = sockfd; + skm->ls = ls; + skm->ps = ps; skm->bufs_size_max = 16 * 1024 * 1024; /* 16kb of queue buffer */ TAILQ_INIT(&skm->bufs); mtx_init(&skm->lock); @@ -586,10 +584,10 @@ sockem_t *sockem_connect (int sockfd, const struct sockaddr *addr, /* Default config */ skm->conf.rx_thruput = 1 << 30; skm->conf.tx_thruput = 1 << 30; - skm->conf.delay = 0; - skm->conf.jitter = 0; - skm->conf.recv_bufsz = 1024*1024; - skm->conf.direct = 1; + skm->conf.delay = 0; + skm->conf.jitter = 0; + skm->conf.recv_bufsz = 1024 * 1024; + skm->conf.direct = 1; /* Apply passed configuration */ va_start(ap, addrlen); @@ -612,8 +610,8 @@ sockem_t *sockem_connect (int sockfd, const struct sockaddr *addr, mtx_unlock(&skm->lock); /* Connect application socket to listen socket */ - if (sockem_do_connect(sockfd, - (struct sockaddr *)&sin6, addrlen2) == -1) { + if (sockem_do_connect(sockfd, (struct sockaddr *)&sin6, addrlen2) == + -1) { sockem_close(skm); return NULL; } @@ -632,7 +630,7 @@ sockem_t *sockem_connect (int sockfd, const struct sockaddr *addr, /** * @brief Purge/drop all queued buffers */ -static void sockem_bufs_purge (sockem_t *skm) { +static void sockem_bufs_purge(sockem_t *skm) { sockem_buf_t *sb; while ((sb = TAILQ_FIRST(&skm->bufs))) @@ -640,7 +638,7 @@ static void sockem_bufs_purge (sockem_t *skm) { } -void sockem_close (sockem_t *skm) { +void sockem_close(sockem_t *skm) { mtx_lock(&sockem_lock); mtx_lock(&skm->lock); if (skm->linked) @@ -649,8 +647,7 @@ void sockem_close (sockem_t *skm) { /* If thread is running let it close the sockets * to avoid race condition. */ - if (skm->run == SOCKEM_START || - skm->run == SOCKEM_RUN) + if (skm->run == SOCKEM_START || skm->run == SOCKEM_RUN) skm->run = SOCKEM_TERM; else sockem_close_all(skm); @@ -673,12 +670,10 @@ void sockem_close (sockem_t *skm) { * @remark lock must be held. * @returns 0 on success or -1 if key is unknown */ -static int sockem_set0 (sockem_t *skm, const char *key, int val) { - if (!strcmp(key, "rx.thruput") || - !strcmp(key, "rx.throughput")) +static int sockem_set0(sockem_t *skm, const char *key, int val) { + if (!strcmp(key, "rx.thruput") || !strcmp(key, "rx.throughput")) skm->conf.rx_thruput = val; - else if (!strcmp(key, "tx.thruput") || - !strcmp(key, "tx.throughput")) + else if (!strcmp(key, "tx.thruput") || !strcmp(key, "tx.throughput")) skm->conf.tx_thruput = val; else if (!strcmp(key, "delay")) skm->conf.delay = val; @@ -718,7 +713,7 @@ static int sockem_set0 (sockem_t *skm, const char *key, int val) { /** * @brief Set sockem config parameters */ -static int sockem_vset (sockem_t *skm, va_list ap) { +static int sockem_vset(sockem_t *skm, va_list ap) { const char *key; int val; @@ -735,7 +730,7 @@ static int sockem_vset (sockem_t *skm, va_list ap) { return 0; } -int sockem_set (sockem_t *skm, ...) { +int sockem_set(sockem_t *skm, ...) { va_list ap; int r; @@ -747,15 +742,15 @@ int sockem_set (sockem_t *skm, ...) { } -sockem_t *sockem_find (int sockfd) { +sockem_t *sockem_find(int sockfd) { sockem_t *skm; pthread_once(&sockem_once, sockem_init); mtx_lock(&sockem_lock); LIST_FOREACH(skm, &sockems, link) - if (skm->as == sockfd) - break; + if (skm->as == sockfd) + break; mtx_unlock(&sockem_lock); return skm; @@ -773,7 +768,7 @@ sockem_t *sockem_find (int sockfd) { /** * @brief connect(2) overload */ -int connect (int sockfd, const struct sockaddr *addr, socklen_t addrlen) { +int connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { sockem_t *skm; pthread_once(&sockem_once, sockem_init); @@ -788,7 +783,7 @@ int connect (int sockfd, const struct sockaddr *addr, socklen_t addrlen) { /** * @brief close(2) overload */ -int close (int fd) { +int close(int fd) { sockem_t *skm; pthread_once(&sockem_once, sockem_init); diff --git a/tests/sockem.h b/tests/sockem.h index b4e21d95c1..8a2ddcd875 100644 --- a/tests/sockem.h +++ b/tests/sockem.h @@ -3,24 +3,24 @@ * * Copyright (c) 2016, Magnus Edenhill, Andreas Smas * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -44,13 +44,13 @@ typedef struct sockem_s sockem_t; * * @returns a sockem handle on success or NULL on failure. */ -sockem_t *sockem_connect (int sockfd, const struct sockaddr *addr, - socklen_t addrlen, ...); +sockem_t * +sockem_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen, ...); /** * @brief Close the connection and destroy the sockem. */ -void sockem_close (sockem_t *skm); +void sockem_close(sockem_t *skm); @@ -72,7 +72,7 @@ void sockem_close (sockem_t *skm); * * @returns 0 on success or -1 if a key was unknown. */ -int sockem_set (sockem_t *skm, ...); +int sockem_set(sockem_t *skm, ...); @@ -80,6 +80,6 @@ int sockem_set (sockem_t *skm, ...); * @brief Find sockem by (application) socket. * @remark Application is responsible for locking. */ -sockem_t *sockem_find (int sockfd); +sockem_t *sockem_find(int sockfd); #endif /* _RD_SOCKEM_H_ */ diff --git a/tests/sockem_ctrl.c b/tests/sockem_ctrl.c index 276494c611..c3e8ce92ed 100644 --- a/tests/sockem_ctrl.c +++ b/tests/sockem_ctrl.c @@ -36,7 +36,7 @@ #include "sockem.h" #include "sockem_ctrl.h" -static int sockem_ctrl_thrd_main (void *arg) { +static int sockem_ctrl_thrd_main(void *arg) { sockem_ctrl_t *ctrl = (sockem_ctrl_t *)arg; int64_t next_wakeup = 0; mtx_lock(&ctrl->lock); @@ -62,7 +62,7 @@ static int sockem_ctrl_thrd_main (void *arg) { /* Serve expired commands */ next_wakeup = 0; - now = test_clock(); + now = test_clock(); while ((cmd = TAILQ_FIRST(&ctrl->cmds))) { if (!ctrl->term) { if (cmd->ts_at > now) { @@ -70,12 +70,12 @@ static int sockem_ctrl_thrd_main (void *arg) { break; } - printf(_C_CYA "## %s: " - "sockem: setting socket delay to %d\n" - _C_CLR, + printf(_C_CYA + "## %s: " + "sockem: setting socket delay to " + "%d\n" _C_CLR, __FILE__, cmd->delay); - test_socket_sockem_set_all("delay", - cmd->delay); + test_socket_sockem_set_all("delay", cmd->delay); } TAILQ_REMOVE(&ctrl->cmds, cmd, link); free(cmd); @@ -91,14 +91,14 @@ static int sockem_ctrl_thrd_main (void *arg) { /** * @brief Set socket delay to kick in after \p after ms */ -void sockem_ctrl_set_delay (sockem_ctrl_t *ctrl, int after, int delay) { +void sockem_ctrl_set_delay(sockem_ctrl_t *ctrl, int after, int delay) { struct sockem_cmd *cmd; int wait_seq; TEST_SAY("Set delay to %dms (after %dms)\n", delay, after); - cmd = calloc(1, sizeof(*cmd)); - cmd->ts_at = test_clock() + (after*1000); + cmd = calloc(1, sizeof(*cmd)); + cmd->ts_at = test_clock() + (after * 1000); cmd->delay = delay; mtx_lock(&ctrl->lock); @@ -115,7 +115,7 @@ void sockem_ctrl_set_delay (sockem_ctrl_t *ctrl, int after, int delay) { } -void sockem_ctrl_init (sockem_ctrl_t *ctrl) { +void sockem_ctrl_init(sockem_ctrl_t *ctrl) { memset(ctrl, 0, sizeof(*ctrl)); mtx_init(&ctrl->lock, mtx_plain); cnd_init(&ctrl->cnd); @@ -123,13 +123,13 @@ void sockem_ctrl_init (sockem_ctrl_t *ctrl) { ctrl->test = test_curr; mtx_lock(&ctrl->lock); - if (thrd_create(&ctrl->thrd, sockem_ctrl_thrd_main, - ctrl) != thrd_success) + if (thrd_create(&ctrl->thrd, sockem_ctrl_thrd_main, ctrl) != + thrd_success) TEST_FAIL("Failed to create sockem ctrl thread"); mtx_unlock(&ctrl->lock); } -void sockem_ctrl_term (sockem_ctrl_t *ctrl) { +void sockem_ctrl_term(sockem_ctrl_t *ctrl) { int res; /* Join controller thread */ diff --git a/tests/sockem_ctrl.h b/tests/sockem_ctrl.h index 1005e149e1..d33c87fca0 100644 --- a/tests/sockem_ctrl.h +++ b/tests/sockem_ctrl.h @@ -33,29 +33,29 @@ struct sockem_cmd { TAILQ_ENTRY(sockem_cmd) link; - int64_t ts_at; /**< to ctrl thread: at this time, set delay*/ - int delay; + int64_t ts_at; /**< to ctrl thread: at this time, set delay*/ + int delay; }; typedef struct sockem_ctrl_s { - mtx_t lock; - cnd_t cnd; - thrd_t thrd; + mtx_t lock; + cnd_t cnd; + thrd_t thrd; - int cmd_seq; /**< Command sequence id */ - int cmd_ack; /**< Last acked (seen) command sequence id */ + int cmd_seq; /**< Command sequence id */ + int cmd_ack; /**< Last acked (seen) command sequence id */ TAILQ_HEAD(, sockem_cmd) cmds; /**< Queue of commands. */ - int term; /**< Terminate */ + int term; /**< Terminate */ struct test *test; } sockem_ctrl_t; -void sockem_ctrl_set_delay (sockem_ctrl_t *ctrl, int after, int delay); -void sockem_ctrl_init (sockem_ctrl_t *ctrl); -void sockem_ctrl_term (sockem_ctrl_t *ctrl); +void sockem_ctrl_set_delay(sockem_ctrl_t *ctrl, int after, int delay); +void sockem_ctrl_init(sockem_ctrl_t *ctrl); +void sockem_ctrl_term(sockem_ctrl_t *ctrl); #endif /* _SOCKEM_CTRL_H_ */ diff --git a/tests/test.c b/tests/test.c index 9aad5a4a5c..20b6d06710 100644 --- a/tests/test.c +++ b/tests/test.c @@ -3,24 +3,24 @@ * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -45,43 +45,43 @@ #include "rdkafka.h" int test_level = 2; -int test_seed = 0; +int test_seed = 0; -char test_mode[64] = "bare"; -char test_scenario[64] = "default"; +char test_mode[64] = "bare"; +char test_scenario[64] = "default"; static volatile sig_atomic_t test_exit = 0; -static char test_topic_prefix[128] = "rdkafkatest"; -static int test_topic_random = 0; -int tests_running_cnt = 0; -int test_concurrent_max = 5; -int test_assert_on_fail = 0; -double test_timeout_multiplier = 1.0; -static char *test_sql_cmd = NULL; -int test_session_timeout_ms = 6000; -int test_broker_version; +static char test_topic_prefix[128] = "rdkafkatest"; +static int test_topic_random = 0; +int tests_running_cnt = 0; +int test_concurrent_max = 5; +int test_assert_on_fail = 0; +double test_timeout_multiplier = 1.0; +static char *test_sql_cmd = NULL; +int test_session_timeout_ms = 6000; +int test_broker_version; static const char *test_broker_version_str = "2.4.0.0"; -int test_flags = 0; -int test_neg_flags = TEST_F_KNOWN_ISSUE; +int test_flags = 0; +int test_neg_flags = TEST_F_KNOWN_ISSUE; /* run delete-test-topics.sh between each test (when concurrent_max = 1) */ -static int test_delete_topics_between = 0; -static const char *test_git_version = "HEAD"; -static const char *test_sockem_conf = ""; -int test_on_ci = 0; /* Tests are being run on CI, be more forgiving - * with regards to timeouts, etc. */ -int test_quick = 0; /** Run tests quickly */ -int test_idempotent_producer = 0; -int test_rusage = 0; /**< Check resource usage */ +static int test_delete_topics_between = 0; +static const char *test_git_version = "HEAD"; +static const char *test_sockem_conf = ""; +int test_on_ci = 0; /* Tests are being run on CI, be more forgiving + * with regards to timeouts, etc. */ +int test_quick = 0; /** Run tests quickly */ +int test_idempotent_producer = 0; +int test_rusage = 0; /**< Check resource usage */ /**< CPU speed calibration for rusage threshold checks. * >1.0: CPU is slower than base line system, * <1.0: CPU is faster than base line system. */ -double test_rusage_cpu_calibration = 1.0; -static const char *tests_to_run = NULL; /* all */ +double test_rusage_cpu_calibration = 1.0; +static const char *tests_to_run = NULL; /* all */ static const char *subtests_to_run = NULL; /* all */ -static const char *tests_to_skip = NULL; /* none */ -int test_write_report = 0; /**< Write test report file */ +static const char *tests_to_skip = NULL; /* none */ +int test_write_report = 0; /**< Write test report file */ static int show_summary = 1; -static int test_summary (int do_lock); +static int test_summary(int do_lock); /** * Protects shared state, such as tests[] @@ -90,19 +90,14 @@ mtx_t test_mtx; cnd_t test_cnd; static const char *test_states[] = { - "DNS", - "SKIPPED", - "RUNNING", - "PASSED", - "FAILED", + "DNS", "SKIPPED", "RUNNING", "PASSED", "FAILED", }; -#define _TEST_DECL(NAME) \ - extern int main_ ## NAME (int, char **) -#define _TEST(NAME,FLAGS,...) \ - { .name = # NAME, .mainfunc = main_ ## NAME, .flags = FLAGS, __VA_ARGS__ } +#define _TEST_DECL(NAME) extern int main_##NAME(int, char **) +#define _TEST(NAME, FLAGS, ...) \ + { .name = #NAME, .mainfunc = main_##NAME, .flags = FLAGS, __VA_ARGS__ } /** @@ -262,200 +257,225 @@ _TEST_DECL(8000_idle); * _TEST(00...., ..., * _THRES(.ucpu = 15.0)), <-- Max 15% User CPU usage */ -#define _THRES(...) .rusage_thres = { __VA_ARGS__ } +#define _THRES(...) .rusage_thres = {__VA_ARGS__} /** * Define all tests here */ struct test tests[] = { - /* Special MAIN test to hold over-all timings, etc. */ - { .name = "
", .flags = TEST_F_LOCAL }, - _TEST(0000_unittests, TEST_F_LOCAL, - /* The msgq insert order tests are heavy on - * user CPU (memory scan), RSS, and - * system CPU (lots of allocations -> madvise(2)). */ - _THRES(.ucpu = 100.0, .scpu = 20.0, .rss = 900.0)), - _TEST(0001_multiobj, 0), - _TEST(0002_unkpart, 0), - _TEST(0003_msgmaxsize, 0), - _TEST(0004_conf, TEST_F_LOCAL), - _TEST(0005_order, 0), - _TEST(0006_symbols, TEST_F_LOCAL), - _TEST(0007_autotopic, 0), - _TEST(0008_reqacks, 0), - _TEST(0009_mock_cluster, TEST_F_LOCAL, - /* Mock cluster requires MsgVersion 2 */ - TEST_BRKVER(0,11,0,0)), - _TEST(0011_produce_batch, 0, - /* Produces a lot of messages */ - _THRES(.ucpu = 40.0, .scpu = 8.0)), - _TEST(0012_produce_consume, 0), - _TEST(0013_null_msgs, 0), - _TEST(0014_reconsume_191, 0), - _TEST(0015_offsets_seek, 0), - _TEST(0016_client_swname, 0), - _TEST(0017_compression, 0), - _TEST(0018_cgrp_term, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0019_list_groups, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0020_destroy_hang, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0021_rkt_destroy, 0), - _TEST(0022_consume_batch, 0), - _TEST(0025_timers, TEST_F_LOCAL), - _TEST(0026_consume_pause, TEST_F_KNOWN_ISSUE, TEST_BRKVER(0,9,0,0), - .extra = "Fragile test due to #2190"), - _TEST(0028_long_topicnames, TEST_F_KNOWN_ISSUE, TEST_BRKVER(0,9,0,0), - .extra = "https://github.com/edenhill/librdkafka/issues/529"), - _TEST(0029_assign_offset, 0), - _TEST(0030_offset_commit, 0, TEST_BRKVER(0,9,0,0), - /* Loops over committed() until timeout */ - _THRES(.ucpu = 10.0, .scpu = 5.0)), - _TEST(0031_get_offsets, 0), - _TEST(0033_regex_subscribe, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0033_regex_subscribe_local, TEST_F_LOCAL), - _TEST(0034_offset_reset, 0), - _TEST(0034_offset_reset_mock, TEST_F_LOCAL), - _TEST(0035_api_version, 0), - _TEST(0036_partial_fetch, 0), - _TEST(0037_destroy_hang_local, TEST_F_LOCAL), - _TEST(0038_performance, 0, - /* Produces and consumes a lot of messages */ - _THRES(.ucpu = 150.0, .scpu = 10)), - _TEST(0039_event_dr, 0), - _TEST(0039_event_log, TEST_F_LOCAL), - _TEST(0039_event, TEST_F_LOCAL), - _TEST(0040_io_event, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0041_fetch_max_bytes, 0, - /* Re-fetches large messages multiple times */ - _THRES(.ucpu = 20.0, .scpu = 10.0)), - _TEST(0042_many_topics, 0), - _TEST(0043_no_connection, TEST_F_LOCAL), - _TEST(0044_partition_cnt, 0, TEST_BRKVER(1,0,0,0), - /* Produces a lot of messages */ - _THRES(.ucpu = 30.0)), - _TEST(0045_subscribe_update, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0045_subscribe_update_topic_remove, 0, - TEST_BRKVER(0,9,0,0), - .scenario = "noautocreate"), - _TEST(0045_subscribe_update_non_exist_and_partchange, 0, - TEST_BRKVER(0,9,0,0), - .scenario = "noautocreate"), - _TEST(0045_subscribe_update_mock, TEST_F_LOCAL), - _TEST(0046_rkt_cache, TEST_F_LOCAL), - _TEST(0047_partial_buf_tmout, TEST_F_KNOWN_ISSUE), - _TEST(0048_partitioner, 0, - /* Produces many small messages */ - _THRES(.ucpu = 10.0, .scpu = 5.0)), + /* Special MAIN test to hold over-all timings, etc. */ + {.name = "
", .flags = TEST_F_LOCAL}, + _TEST(0000_unittests, + TEST_F_LOCAL, + /* The msgq insert order tests are heavy on + * user CPU (memory scan), RSS, and + * system CPU (lots of allocations -> madvise(2)). */ + _THRES(.ucpu = 100.0, .scpu = 20.0, .rss = 900.0)), + _TEST(0001_multiobj, 0), + _TEST(0002_unkpart, 0), + _TEST(0003_msgmaxsize, 0), + _TEST(0004_conf, TEST_F_LOCAL), + _TEST(0005_order, 0), + _TEST(0006_symbols, TEST_F_LOCAL), + _TEST(0007_autotopic, 0), + _TEST(0008_reqacks, 0), + _TEST(0009_mock_cluster, + TEST_F_LOCAL, + /* Mock cluster requires MsgVersion 2 */ + TEST_BRKVER(0, 11, 0, 0)), + _TEST(0011_produce_batch, + 0, + /* Produces a lot of messages */ + _THRES(.ucpu = 40.0, .scpu = 8.0)), + _TEST(0012_produce_consume, 0), + _TEST(0013_null_msgs, 0), + _TEST(0014_reconsume_191, 0), + _TEST(0015_offsets_seek, 0), + _TEST(0016_client_swname, 0), + _TEST(0017_compression, 0), + _TEST(0018_cgrp_term, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0019_list_groups, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0020_destroy_hang, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0021_rkt_destroy, 0), + _TEST(0022_consume_batch, 0), + _TEST(0025_timers, TEST_F_LOCAL), + _TEST(0026_consume_pause, + TEST_F_KNOWN_ISSUE, + TEST_BRKVER(0, 9, 0, 0), + .extra = "Fragile test due to #2190"), + _TEST(0028_long_topicnames, + TEST_F_KNOWN_ISSUE, + TEST_BRKVER(0, 9, 0, 0), + .extra = "https://github.com/edenhill/librdkafka/issues/529"), + _TEST(0029_assign_offset, 0), + _TEST(0030_offset_commit, + 0, + TEST_BRKVER(0, 9, 0, 0), + /* Loops over committed() until timeout */ + _THRES(.ucpu = 10.0, .scpu = 5.0)), + _TEST(0031_get_offsets, 0), + _TEST(0033_regex_subscribe, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0033_regex_subscribe_local, TEST_F_LOCAL), + _TEST(0034_offset_reset, 0), + _TEST(0034_offset_reset_mock, TEST_F_LOCAL), + _TEST(0035_api_version, 0), + _TEST(0036_partial_fetch, 0), + _TEST(0037_destroy_hang_local, TEST_F_LOCAL), + _TEST(0038_performance, + 0, + /* Produces and consumes a lot of messages */ + _THRES(.ucpu = 150.0, .scpu = 10)), + _TEST(0039_event_dr, 0), + _TEST(0039_event_log, TEST_F_LOCAL), + _TEST(0039_event, TEST_F_LOCAL), + _TEST(0040_io_event, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0041_fetch_max_bytes, + 0, + /* Re-fetches large messages multiple times */ + _THRES(.ucpu = 20.0, .scpu = 10.0)), + _TEST(0042_many_topics, 0), + _TEST(0043_no_connection, TEST_F_LOCAL), + _TEST(0044_partition_cnt, + 0, + TEST_BRKVER(1, 0, 0, 0), + /* Produces a lot of messages */ + _THRES(.ucpu = 30.0)), + _TEST(0045_subscribe_update, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0045_subscribe_update_topic_remove, + 0, + TEST_BRKVER(0, 9, 0, 0), + .scenario = "noautocreate"), + _TEST(0045_subscribe_update_non_exist_and_partchange, + 0, + TEST_BRKVER(0, 9, 0, 0), + .scenario = "noautocreate"), + _TEST(0045_subscribe_update_mock, TEST_F_LOCAL), + _TEST(0046_rkt_cache, TEST_F_LOCAL), + _TEST(0047_partial_buf_tmout, TEST_F_KNOWN_ISSUE), + _TEST(0048_partitioner, + 0, + /* Produces many small messages */ + _THRES(.ucpu = 10.0, .scpu = 5.0)), #if WITH_SOCKEM - _TEST(0049_consume_conn_close, TEST_F_SOCKEM, TEST_BRKVER(0,9,0,0)), + _TEST(0049_consume_conn_close, TEST_F_SOCKEM, TEST_BRKVER(0, 9, 0, 0)), #endif - _TEST(0050_subscribe_adds, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0051_assign_adds, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0052_msg_timestamps, 0, TEST_BRKVER(0,10,0,0)), - _TEST(0053_stats_timing, TEST_F_LOCAL), - _TEST(0053_stats, 0), - _TEST(0054_offset_time, 0, TEST_BRKVER(0,10,1,0)), - _TEST(0055_producer_latency, TEST_F_KNOWN_ISSUE_WIN32), - _TEST(0056_balanced_group_mt, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0057_invalid_topic, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0058_log, TEST_F_LOCAL), - _TEST(0059_bsearch, 0, TEST_BRKVER(0,10,0,0)), - _TEST(0060_op_prio, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0061_consumer_lag, 0), - _TEST(0062_stats_event, TEST_F_LOCAL), - _TEST(0063_clusterid, 0, TEST_BRKVER(0,10,1,0)), - _TEST(0064_interceptors, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0065_yield, 0), - _TEST(0066_plugins, - TEST_F_LOCAL|TEST_F_KNOWN_ISSUE_WIN32|TEST_F_KNOWN_ISSUE_OSX, - .extra = "dynamic loading of tests might not be fixed for this platform"), - _TEST(0067_empty_topic, 0), + _TEST(0050_subscribe_adds, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0051_assign_adds, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0052_msg_timestamps, 0, TEST_BRKVER(0, 10, 0, 0)), + _TEST(0053_stats_timing, TEST_F_LOCAL), + _TEST(0053_stats, 0), + _TEST(0054_offset_time, 0, TEST_BRKVER(0, 10, 1, 0)), + _TEST(0055_producer_latency, TEST_F_KNOWN_ISSUE_WIN32), + _TEST(0056_balanced_group_mt, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0057_invalid_topic, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0058_log, TEST_F_LOCAL), + _TEST(0059_bsearch, 0, TEST_BRKVER(0, 10, 0, 0)), + _TEST(0060_op_prio, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0061_consumer_lag, 0), + _TEST(0062_stats_event, TEST_F_LOCAL), + _TEST(0063_clusterid, 0, TEST_BRKVER(0, 10, 1, 0)), + _TEST(0064_interceptors, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0065_yield, 0), + _TEST(0066_plugins, + TEST_F_LOCAL | TEST_F_KNOWN_ISSUE_WIN32 | TEST_F_KNOWN_ISSUE_OSX, + .extra = + "dynamic loading of tests might not be fixed for this platform"), + _TEST(0067_empty_topic, 0), #if WITH_SOCKEM - _TEST(0068_produce_timeout, TEST_F_SOCKEM), + _TEST(0068_produce_timeout, TEST_F_SOCKEM), #endif - _TEST(0069_consumer_add_parts, TEST_F_KNOWN_ISSUE_WIN32, - TEST_BRKVER(1,0,0,0)), - _TEST(0070_null_empty, 0), - _TEST(0072_headers_ut, TEST_F_LOCAL), - _TEST(0073_headers, 0, TEST_BRKVER(0,11,0,0)), - _TEST(0074_producev, TEST_F_LOCAL), + _TEST(0069_consumer_add_parts, + TEST_F_KNOWN_ISSUE_WIN32, + TEST_BRKVER(1, 0, 0, 0)), + _TEST(0070_null_empty, 0), + _TEST(0072_headers_ut, TEST_F_LOCAL), + _TEST(0073_headers, 0, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0074_producev, TEST_F_LOCAL), #if WITH_SOCKEM - _TEST(0075_retry, TEST_F_SOCKEM), + _TEST(0075_retry, TEST_F_SOCKEM), #endif - _TEST(0076_produce_retry, TEST_F_SOCKEM), - _TEST(0077_compaction, 0, - /* The test itself requires message headers */ - TEST_BRKVER(0,11,0,0)), - _TEST(0078_c_from_cpp, TEST_F_LOCAL), - _TEST(0079_fork, TEST_F_LOCAL|TEST_F_KNOWN_ISSUE, - .extra = "using a fork():ed rd_kafka_t is not supported and will " - "most likely hang"), - _TEST(0080_admin_ut, TEST_F_LOCAL), - _TEST(0081_admin, 0, TEST_BRKVER(0,10,2,0)), - _TEST(0082_fetch_max_bytes, 0, TEST_BRKVER(0,10,1,0)), - _TEST(0083_cb_event, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0084_destroy_flags_local, TEST_F_LOCAL), - _TEST(0084_destroy_flags, 0), - _TEST(0085_headers, 0, TEST_BRKVER(0,11,0,0)), - _TEST(0086_purge_local, TEST_F_LOCAL), - _TEST(0086_purge_remote, 0), + _TEST(0076_produce_retry, TEST_F_SOCKEM), + _TEST(0077_compaction, + 0, + /* The test itself requires message headers */ + TEST_BRKVER(0, 11, 0, 0)), + _TEST(0078_c_from_cpp, TEST_F_LOCAL), + _TEST(0079_fork, + TEST_F_LOCAL | TEST_F_KNOWN_ISSUE, + .extra = "using a fork():ed rd_kafka_t is not supported and will " + "most likely hang"), + _TEST(0080_admin_ut, TEST_F_LOCAL), + _TEST(0081_admin, 0, TEST_BRKVER(0, 10, 2, 0)), + _TEST(0082_fetch_max_bytes, 0, TEST_BRKVER(0, 10, 1, 0)), + _TEST(0083_cb_event, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0084_destroy_flags_local, TEST_F_LOCAL), + _TEST(0084_destroy_flags, 0), + _TEST(0085_headers, 0, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0086_purge_local, TEST_F_LOCAL), + _TEST(0086_purge_remote, 0), #if WITH_SOCKEM - _TEST(0088_produce_metadata_timeout, TEST_F_SOCKEM), + _TEST(0088_produce_metadata_timeout, TEST_F_SOCKEM), #endif - _TEST(0089_max_poll_interval, 0, TEST_BRKVER(0,10,1,0)), - _TEST(0090_idempotence, 0, TEST_BRKVER(0,11,0,0)), - _TEST(0091_max_poll_interval_timeout, 0, TEST_BRKVER(0,10,1,0)), - _TEST(0092_mixed_msgver, 0, TEST_BRKVER(0,11,0,0)), - _TEST(0093_holb_consumer, 0, TEST_BRKVER(0,10,1,0)), + _TEST(0089_max_poll_interval, 0, TEST_BRKVER(0, 10, 1, 0)), + _TEST(0090_idempotence, 0, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0091_max_poll_interval_timeout, 0, TEST_BRKVER(0, 10, 1, 0)), + _TEST(0092_mixed_msgver, 0, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0093_holb_consumer, 0, TEST_BRKVER(0, 10, 1, 0)), #if WITH_SOCKEM - _TEST(0094_idempotence_msg_timeout, TEST_F_SOCKEM, - TEST_BRKVER(0,11,0,0)), + _TEST(0094_idempotence_msg_timeout, + TEST_F_SOCKEM, + TEST_BRKVER(0, 11, 0, 0)), #endif - _TEST(0095_all_brokers_down, TEST_F_LOCAL), - _TEST(0097_ssl_verify, 0), - _TEST(0097_ssl_verify_local, TEST_F_LOCAL), - _TEST(0098_consumer_txn, 0, TEST_BRKVER(0,11,0,0)), - _TEST(0099_commit_metadata, 0), - _TEST(0100_thread_interceptors, TEST_F_LOCAL), - _TEST(0101_fetch_from_follower, 0, TEST_BRKVER(2,4,0,0)), - _TEST(0102_static_group_rebalance, 0, - TEST_BRKVER(2,3,0,0)), - _TEST(0103_transactions_local, TEST_F_LOCAL), - _TEST(0103_transactions, 0, TEST_BRKVER(0, 11, 0, 0), - .scenario = "default,ak23"), - _TEST(0104_fetch_from_follower_mock, TEST_F_LOCAL, - TEST_BRKVER(2,4,0,0)), - _TEST(0105_transactions_mock, TEST_F_LOCAL, TEST_BRKVER(0,11,0,0)), - _TEST(0106_cgrp_sess_timeout, TEST_F_LOCAL, TEST_BRKVER(0,11,0,0)), - _TEST(0107_topic_recreate, 0, TEST_BRKVER_TOPIC_ADMINAPI, - .scenario = "noautocreate"), - _TEST(0109_auto_create_topics, 0), - _TEST(0110_batch_size, 0), - _TEST(0111_delay_create_topics, 0, TEST_BRKVER_TOPIC_ADMINAPI, - .scenario = "noautocreate"), - _TEST(0112_assign_unknown_part, 0), - _TEST(0113_cooperative_rebalance_local, TEST_F_LOCAL, - TEST_BRKVER(2,4,0,0)), - _TEST(0113_cooperative_rebalance, 0, TEST_BRKVER(2,4,0,0)), - _TEST(0114_sticky_partitioning, 0), - _TEST(0115_producer_auth, 0, TEST_BRKVER(2,1,0,0)), - _TEST(0116_kafkaconsumer_close, TEST_F_LOCAL), - _TEST(0117_mock_errors, TEST_F_LOCAL), - _TEST(0118_commit_rebalance, 0), - _TEST(0119_consumer_auth, 0, TEST_BRKVER(2,1,0,0)), - _TEST(0120_asymmetric_subscription, TEST_F_LOCAL), - _TEST(0121_clusterid, TEST_F_LOCAL), - _TEST(0122_buffer_cleaning_after_rebalance, 0, TEST_BRKVER(2,4,0,0)), - _TEST(0123_connections_max_idle, 0), - _TEST(0124_openssl_invalid_engine, TEST_F_LOCAL), - _TEST(0125_immediate_flush, 0), - _TEST(0126_oauthbearer_oidc, 0, TEST_BRKVER(3,0,0,0)), - _TEST(0128_sasl_callback_queue, TEST_F_LOCAL, TEST_BRKVER(2,0,0,0)), - - /* Manual tests */ - _TEST(8000_idle, TEST_F_MANUAL), - - { NULL } -}; + _TEST(0095_all_brokers_down, TEST_F_LOCAL), + _TEST(0097_ssl_verify, 0), + _TEST(0097_ssl_verify_local, TEST_F_LOCAL), + _TEST(0098_consumer_txn, 0, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0099_commit_metadata, 0), + _TEST(0100_thread_interceptors, TEST_F_LOCAL), + _TEST(0101_fetch_from_follower, 0, TEST_BRKVER(2, 4, 0, 0)), + _TEST(0102_static_group_rebalance, 0, TEST_BRKVER(2, 3, 0, 0)), + _TEST(0103_transactions_local, TEST_F_LOCAL), + _TEST(0103_transactions, + 0, + TEST_BRKVER(0, 11, 0, 0), + .scenario = "default,ak23"), + _TEST(0104_fetch_from_follower_mock, TEST_F_LOCAL, TEST_BRKVER(2, 4, 0, 0)), + _TEST(0105_transactions_mock, TEST_F_LOCAL, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0106_cgrp_sess_timeout, TEST_F_LOCAL, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0107_topic_recreate, + 0, + TEST_BRKVER_TOPIC_ADMINAPI, + .scenario = "noautocreate"), + _TEST(0109_auto_create_topics, 0), + _TEST(0110_batch_size, 0), + _TEST(0111_delay_create_topics, + 0, + TEST_BRKVER_TOPIC_ADMINAPI, + .scenario = "noautocreate"), + _TEST(0112_assign_unknown_part, 0), + _TEST(0113_cooperative_rebalance_local, + TEST_F_LOCAL, + TEST_BRKVER(2, 4, 0, 0)), + _TEST(0113_cooperative_rebalance, 0, TEST_BRKVER(2, 4, 0, 0)), + _TEST(0114_sticky_partitioning, 0), + _TEST(0115_producer_auth, 0, TEST_BRKVER(2, 1, 0, 0)), + _TEST(0116_kafkaconsumer_close, TEST_F_LOCAL), + _TEST(0117_mock_errors, TEST_F_LOCAL), + _TEST(0118_commit_rebalance, 0), + _TEST(0119_consumer_auth, 0, TEST_BRKVER(2, 1, 0, 0)), + _TEST(0120_asymmetric_subscription, TEST_F_LOCAL), + _TEST(0121_clusterid, TEST_F_LOCAL), + _TEST(0122_buffer_cleaning_after_rebalance, 0, TEST_BRKVER(2, 4, 0, 0)), + _TEST(0123_connections_max_idle, 0), + _TEST(0124_openssl_invalid_engine, TEST_F_LOCAL), + _TEST(0125_immediate_flush, 0), + _TEST(0126_oauthbearer_oidc, 0, TEST_BRKVER(3, 0, 0, 0)), + _TEST(0128_sasl_callback_queue, TEST_F_LOCAL, TEST_BRKVER(2, 0, 0, 0)), + + /* Manual tests */ + _TEST(8000_idle, TEST_F_MANUAL), + + {NULL}}; RD_TLS struct test *test_curr = &tests[0]; @@ -466,14 +486,14 @@ RD_TLS struct test *test_curr = &tests[0]; /** * Socket network emulation with sockem */ - -static void test_socket_add (struct test *test, sockem_t *skm) { + +static void test_socket_add(struct test *test, sockem_t *skm) { TEST_LOCK(); rd_list_add(&test->sockets, skm); TEST_UNLOCK(); } -static void test_socket_del (struct test *test, sockem_t *skm, int do_lock) { +static void test_socket_del(struct test *test, sockem_t *skm, int do_lock) { if (do_lock) TEST_LOCK(); /* Best effort, skm might not have been added if connect_cb failed */ @@ -482,7 +502,7 @@ static void test_socket_del (struct test *test, sockem_t *skm, int do_lock) { TEST_UNLOCK(); } -int test_socket_sockem_set_all (const char *key, int val) { +int test_socket_sockem_set_all(const char *key, int val) { int i; sockem_t *skm; int cnt = 0; @@ -503,7 +523,7 @@ int test_socket_sockem_set_all (const char *key, int val) { return cnt; } -void test_socket_sockem_set (int s, const char *key, int value) { +void test_socket_sockem_set(int s, const char *key, int value) { sockem_t *skm; TEST_LOCK(); @@ -513,7 +533,7 @@ void test_socket_sockem_set (int s, const char *key, int value) { TEST_UNLOCK(); } -void test_socket_close_all (struct test *test, int reinit) { +void test_socket_close_all(struct test *test, int reinit) { TEST_LOCK(); rd_list_destroy(&test->sockets); if (reinit) @@ -522,8 +542,11 @@ void test_socket_close_all (struct test *test, int reinit) { } -static int test_connect_cb (int s, const struct sockaddr *addr, - int addrlen, const char *id, void *opaque) { +static int test_connect_cb(int s, + const struct sockaddr *addr, + int addrlen, + const char *id, + void *opaque) { struct test *test = opaque; sockem_t *skm; int r; @@ -543,7 +566,7 @@ static int test_connect_cb (int s, const struct sockaddr *addr, return 0; } -static int test_closesocket_cb (int s, void *opaque) { +static int test_closesocket_cb(int s, void *opaque) { struct test *test = opaque; sockem_t *skm; @@ -552,7 +575,7 @@ static int test_closesocket_cb (int s, void *opaque) { if (skm) { /* Close sockem's sockets */ sockem_close(skm); - test_socket_del(test, skm, 0/*nolock*/); + test_socket_del(test, skm, 0 /*nolock*/); } TEST_UNLOCK(); @@ -567,24 +590,26 @@ static int test_closesocket_cb (int s, void *opaque) { } -void test_socket_enable (rd_kafka_conf_t *conf) { +void test_socket_enable(rd_kafka_conf_t *conf) { rd_kafka_conf_set_connect_cb(conf, test_connect_cb); rd_kafka_conf_set_closesocket_cb(conf, test_closesocket_cb); - rd_kafka_conf_set_opaque(conf, test_curr); + rd_kafka_conf_set_opaque(conf, test_curr); } #endif /* WITH_SOCKEM */ /** * @brief For use as the is_fatal_cb(), treating no errors as test-fatal. */ -int test_error_is_not_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +int test_error_is_not_fatal_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason) { return 0; } -static void test_error_cb (rd_kafka_t *rk, int err, - const char *reason, void *opaque) { - if (test_curr->is_fatal_cb && !test_curr->is_fatal_cb(rk, err, reason)) { +static void +test_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) { + if (test_curr->is_fatal_cb && + !test_curr->is_fatal_cb(rk, err, reason)) { TEST_SAY(_C_YEL "%s rdkafka error (non-testfatal): %s: %s\n", rd_kafka_name(rk), rd_kafka_err2str(err), reason); } else { @@ -596,7 +621,7 @@ static void test_error_cb (rd_kafka_t *rk, int err, err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr)); if (test_curr->is_fatal_cb && - !test_curr->is_fatal_cb(rk, err, reason)) + !test_curr->is_fatal_cb(rk, err, reason)) TEST_SAY(_C_YEL "%s rdkafka ignored FATAL error: " "%s: %s\n", @@ -608,15 +633,14 @@ static void test_error_cb (rd_kafka_t *rk, int err, rd_kafka_err2str(err), errstr); } else { - TEST_FAIL("%s rdkafka error: %s: %s", - rd_kafka_name(rk), + TEST_FAIL("%s rdkafka error: %s: %s", rd_kafka_name(rk), rd_kafka_err2str(err), reason); } } } -static int test_stats_cb (rd_kafka_t *rk, char *json, size_t json_len, - void *opaque) { +static int +test_stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) { struct test *test = test_curr; if (test->stats_fp) fprintf(test->stats_fp, @@ -630,16 +654,16 @@ static int test_stats_cb (rd_kafka_t *rk, char *json, size_t json_len, /** * @brief Limit the test run time (in seconds) */ -void test_timeout_set (int timeout) { - TEST_LOCK(); - TEST_SAY("Setting test timeout to %ds * %.1f\n", - timeout, test_timeout_multiplier); - timeout = (int)((double)timeout * test_timeout_multiplier); - test_curr->timeout = test_clock() + (timeout * 1000000); - TEST_UNLOCK(); +void test_timeout_set(int timeout) { + TEST_LOCK(); + TEST_SAY("Setting test timeout to %ds * %.1f\n", timeout, + test_timeout_multiplier); + timeout = (int)((double)timeout * test_timeout_multiplier); + test_curr->timeout = test_clock() + (timeout * 1000000); + TEST_UNLOCK(); } -int tmout_multip (int msecs) { +int tmout_multip(int msecs) { int r; TEST_LOCK(); r = (int)(((double)(msecs)) * test_timeout_multiplier); @@ -650,13 +674,12 @@ int tmout_multip (int msecs) { #ifdef _WIN32 -static void test_init_win32 (void) { +static void test_init_win32(void) { /* Enable VT emulation to support colored output. */ - HANDLE hOut = GetStdHandle(STD_OUTPUT_HANDLE); + HANDLE hOut = GetStdHandle(STD_OUTPUT_HANDLE); DWORD dwMode = 0; - if (hOut == INVALID_HANDLE_VALUE || - !GetConsoleMode(hOut, &dwMode)) + if (hOut == INVALID_HANDLE_VALUE || !GetConsoleMode(hOut, &dwMode)) return; #ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING @@ -668,7 +691,7 @@ static void test_init_win32 (void) { #endif -static void test_init (void) { +static void test_init(void) { int seed; const char *tmp; @@ -679,9 +702,9 @@ static void test_init (void) { if ((tmp = test_getenv("TEST_LEVEL", NULL))) test_level = atoi(tmp); if ((tmp = test_getenv("TEST_MODE", NULL))) - strncpy(test_mode, tmp, sizeof(test_mode)-1); + strncpy(test_mode, tmp, sizeof(test_mode) - 1); if ((tmp = test_getenv("TEST_SCENARIO", NULL))) - strncpy(test_scenario, tmp, sizeof(test_scenario)-1); + strncpy(test_scenario, tmp, sizeof(test_scenario) - 1); if ((tmp = test_getenv("TEST_SOCKEM", NULL))) test_sockem_conf = tmp; if ((tmp = test_getenv("TEST_SEED", NULL))) @@ -701,18 +724,18 @@ static void test_init (void) { #ifdef _WIN32 test_init_win32(); - { - LARGE_INTEGER cycl; - QueryPerformanceCounter(&cycl); - seed = (int)cycl.QuadPart; - } + { + LARGE_INTEGER cycl; + QueryPerformanceCounter(&cycl); + seed = (int)cycl.QuadPart; + } #endif - srand(seed); - test_seed = seed; + srand(seed); + test_seed = seed; } -const char *test_mk_topic_name (const char *suffix, int randomized) { +const char *test_mk_topic_name(const char *suffix, int randomized) { static RD_TLS char ret[512]; /* Strip main_ prefix (caller is using __FUNCTION__) */ @@ -720,10 +743,11 @@ const char *test_mk_topic_name (const char *suffix, int randomized) { suffix += 5; if (test_topic_random || randomized) - rd_snprintf(ret, sizeof(ret), "%s_rnd%"PRIx64"_%s", - test_topic_prefix, test_id_generate(), suffix); + rd_snprintf(ret, sizeof(ret), "%s_rnd%" PRIx64 "_%s", + test_topic_prefix, test_id_generate(), suffix); else - rd_snprintf(ret, sizeof(ret), "%s_%s", test_topic_prefix, suffix); + rd_snprintf(ret, sizeof(ret), "%s_%s", test_topic_prefix, + suffix); TEST_SAY("Using topic \"%s\"\n", ret); @@ -735,18 +759,17 @@ const char *test_mk_topic_name (const char *suffix, int randomized) { * @brief Set special test config property * @returns 1 if property was known, else 0. */ -int test_set_special_conf (const char *name, const char *val, int *timeoutp) { +int test_set_special_conf(const char *name, const char *val, int *timeoutp) { if (!strcmp(name, "test.timeout.multiplier")) { TEST_LOCK(); test_timeout_multiplier = strtod(val, NULL); TEST_UNLOCK(); - *timeoutp = tmout_multip((*timeoutp)*1000) / 1000; + *timeoutp = tmout_multip((*timeoutp) * 1000) / 1000; } else if (!strcmp(name, "test.topic.prefix")) { - rd_snprintf(test_topic_prefix, sizeof(test_topic_prefix), - "%s", val); + rd_snprintf(test_topic_prefix, sizeof(test_topic_prefix), "%s", + val); } else if (!strcmp(name, "test.topic.random")) { - if (!strcmp(val, "true") || - !strcmp(val, "1")) + if (!strcmp(val, "true") || !strcmp(val, "1")) test_topic_random = 1; else test_topic_random = 0; @@ -766,60 +789,59 @@ int test_set_special_conf (const char *name, const char *val, int *timeoutp) { return 1; } -static void test_read_conf_file (const char *conf_path, - rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *topic_conf, - int *timeoutp) { +static void test_read_conf_file(const char *conf_path, + rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *topic_conf, + int *timeoutp) { FILE *fp; - char buf[1024]; - int line = 0; + char buf[1024]; + int line = 0; #ifndef _WIN32 - fp = fopen(conf_path, "r"); + fp = fopen(conf_path, "r"); #else - fp = NULL; - errno = fopen_s(&fp, conf_path, "r"); + fp = NULL; + errno = fopen_s(&fp, conf_path, "r"); #endif - if (!fp) { - if (errno == ENOENT) { - TEST_SAY("Test config file %s not found\n", conf_path); + if (!fp) { + if (errno == ENOENT) { + TEST_SAY("Test config file %s not found\n", conf_path); return; - } else - TEST_FAIL("Failed to read %s: %s", - conf_path, strerror(errno)); - } - - while (fgets(buf, sizeof(buf)-1, fp)) { - char *t; - char *b = buf; - rd_kafka_conf_res_t res = RD_KAFKA_CONF_UNKNOWN; - char *name, *val; + } else + TEST_FAIL("Failed to read %s: %s", conf_path, + strerror(errno)); + } + + while (fgets(buf, sizeof(buf) - 1, fp)) { + char *t; + char *b = buf; + rd_kafka_conf_res_t res = RD_KAFKA_CONF_UNKNOWN; + char *name, *val; char errstr[512]; - line++; - if ((t = strchr(b, '\n'))) - *t = '\0'; + line++; + if ((t = strchr(b, '\n'))) + *t = '\0'; - if (*b == '#' || !*b) - continue; + if (*b == '#' || !*b) + continue; - if (!(t = strchr(b, '='))) - TEST_FAIL("%s:%i: expected name=value format\n", - conf_path, line); + if (!(t = strchr(b, '='))) + TEST_FAIL("%s:%i: expected name=value format\n", + conf_path, line); - name = b; - *t = '\0'; - val = t+1; + name = b; + *t = '\0'; + val = t + 1; if (test_set_special_conf(name, val, timeoutp)) continue; if (!strncmp(name, "topic.", strlen("topic."))) { - name += strlen("topic."); + name += strlen("topic."); if (topic_conf) - res = rd_kafka_topic_conf_set(topic_conf, - name, val, - errstr, + res = rd_kafka_topic_conf_set(topic_conf, name, + val, errstr, sizeof(errstr)); else res = RD_KAFKA_CONF_OK; @@ -828,33 +850,31 @@ static void test_read_conf_file (const char *conf_path, if (res == RD_KAFKA_CONF_UNKNOWN) { if (conf) - res = rd_kafka_conf_set(conf, - name, val, - errstr, sizeof(errstr)); + res = rd_kafka_conf_set(conf, name, val, errstr, + sizeof(errstr)); else res = RD_KAFKA_CONF_OK; } - if (res != RD_KAFKA_CONF_OK) - TEST_FAIL("%s:%i: %s\n", - conf_path, line, errstr); - } + if (res != RD_KAFKA_CONF_OK) + TEST_FAIL("%s:%i: %s\n", conf_path, line, errstr); + } - fclose(fp); + fclose(fp); } /** * @brief Get path to test config file */ -const char *test_conf_get_path (void) { +const char *test_conf_get_path(void) { return test_getenv("RDKAFKA_TEST_CONF", "test.conf"); } -const char *test_getenv (const char *env, const char *def) { +const char *test_getenv(const char *env, const char *def) { return rd_getenv(env, def); } -void test_conf_common_init (rd_kafka_conf_t *conf, int timeout) { +void test_conf_common_init(rd_kafka_conf_t *conf, int timeout) { if (conf) { const char *tmp = test_getenv("TEST_DEBUG", NULL); if (tmp) @@ -870,8 +890,9 @@ void test_conf_common_init (rd_kafka_conf_t *conf, int timeout) { * Creates and sets up kafka configuration objects. * Will read "test.conf" file if it exists. */ -void test_conf_init (rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf, - int timeout) { +void test_conf_init(rd_kafka_conf_t **conf, + rd_kafka_topic_conf_t **topic_conf, + int timeout) { const char *test_conf = test_conf_get_path(); if (conf) { @@ -887,15 +908,15 @@ void test_conf_init (rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf, test_conf_set(*conf, "request.timeout.ms", "10000"); #ifdef SIGIO - { - char buf[64]; - - /* Quick termination */ - rd_snprintf(buf, sizeof(buf), "%i", SIGIO); - rd_kafka_conf_set(*conf, "internal.termination.signal", - buf, NULL, 0); - signal(SIGIO, SIG_IGN); - } + { + char buf[64]; + + /* Quick termination */ + rd_snprintf(buf, sizeof(buf), "%i", SIGIO); + rd_kafka_conf_set(*conf, "internal.termination.signal", + buf, NULL, 0); + signal(SIGIO, SIG_IGN); + } #endif } @@ -904,12 +925,11 @@ void test_conf_init (rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf, test_socket_enable(*conf); #endif - if (topic_conf) - *topic_conf = rd_kafka_topic_conf_new(); + if (topic_conf) + *topic_conf = rd_kafka_topic_conf_new(); - /* Open and read optional local test configuration file, if any. */ - test_read_conf_file(test_conf, - conf ? *conf : NULL, + /* Open and read optional local test configuration file, if any. */ + test_read_conf_file(test_conf, conf ? *conf : NULL, topic_conf ? *topic_conf : NULL, &timeout); test_conf_common_init(conf ? *conf : NULL, timeout); @@ -917,69 +937,76 @@ void test_conf_init (rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf, static RD_INLINE unsigned int test_rand(void) { - unsigned int r; + unsigned int r; #ifdef _WIN32 - rand_s(&r); + rand_s(&r); #else - r = rand(); + r = rand(); #endif - return r; + return r; } /** * Generate a "unique" test id. */ -uint64_t test_id_generate (void) { - return (((uint64_t)test_rand()) << 32) | (uint64_t)test_rand(); +uint64_t test_id_generate(void) { + return (((uint64_t)test_rand()) << 32) | (uint64_t)test_rand(); } /** * Generate a "unique" string id */ -char *test_str_id_generate (char *dest, size_t dest_size) { - rd_snprintf(dest, dest_size, "%"PRId64, test_id_generate()); - return dest; +char *test_str_id_generate(char *dest, size_t dest_size) { + rd_snprintf(dest, dest_size, "%" PRId64, test_id_generate()); + return dest; } /** * Same as test_str_id_generate but returns a temporary string. */ -const char *test_str_id_generate_tmp (void) { - static RD_TLS char ret[64]; - return test_str_id_generate(ret, sizeof(ret)); +const char *test_str_id_generate_tmp(void) { + static RD_TLS char ret[64]; + return test_str_id_generate(ret, sizeof(ret)); } /** * Format a message token. * Pad's to dest_size. */ -void test_msg_fmt (char *dest, size_t dest_size, - uint64_t testid, int32_t partition, int msgid) { +void test_msg_fmt(char *dest, + size_t dest_size, + uint64_t testid, + int32_t partition, + int msgid) { size_t of; of = rd_snprintf(dest, dest_size, - "testid=%"PRIu64", partition=%"PRId32", msg=%i\n", + "testid=%" PRIu64 ", partition=%" PRId32 ", msg=%i\n", testid, partition, msgid); if (of < dest_size - 1) { - memset(dest+of, '!', dest_size-of); - dest[dest_size-1] = '\0'; + memset(dest + of, '!', dest_size - of); + dest[dest_size - 1] = '\0'; } } /** * @brief Prepare message value and key for test produce. */ -void test_prepare_msg (uint64_t testid, int32_t partition, int msg_id, - char *val, size_t val_size, - char *key, size_t key_size) { +void test_prepare_msg(uint64_t testid, + int32_t partition, + int msg_id, + char *val, + size_t val_size, + char *key, + size_t key_size) { size_t of = 0; test_msg_fmt(key, key_size, testid, partition, msg_id); while (of < val_size) { /* Copy-repeat key into val until val_size */ - size_t len = RD_MIN(val_size-of, key_size); - memcpy(val+of, key, len); + size_t len = RD_MIN(val_size - of, key_size); + memcpy(val + of, key, len); of += len; } } @@ -989,36 +1016,47 @@ void test_prepare_msg (uint64_t testid, int32_t partition, int msg_id, /** * Parse a message token */ -void test_msg_parse00 (const char *func, int line, - uint64_t testid, int32_t exp_partition, int *msgidp, - const char *topic, int32_t partition, int64_t offset, - const char *key, size_t key_size) { +void test_msg_parse00(const char *func, + int line, + uint64_t testid, + int32_t exp_partition, + int *msgidp, + const char *topic, + int32_t partition, + int64_t offset, + const char *key, + size_t key_size) { char buf[128]; uint64_t in_testid; int in_part; if (!key) - TEST_FAIL("%s:%i: Message (%s [%"PRId32"] @ %"PRId64") " + TEST_FAIL("%s:%i: Message (%s [%" PRId32 "] @ %" PRId64 + ") " "has empty key\n", func, line, topic, partition, offset); rd_snprintf(buf, sizeof(buf), "%.*s", (int)key_size, key); - if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i\n", + if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i\n", &in_testid, &in_part, msgidp) != 3) TEST_FAIL("%s:%i: Incorrect key format: %s", func, line, buf); if (testid != in_testid || (exp_partition != -1 && exp_partition != in_part)) - TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i did " + TEST_FAIL("%s:%i: Our testid %" PRIu64 + ", part %i did " "not match message: \"%s\"\n", - func, line, testid, (int)exp_partition, buf); + func, line, testid, (int)exp_partition, buf); } -void test_msg_parse0 (const char *func, int line, - uint64_t testid, rd_kafka_message_t *rkmessage, - int32_t exp_partition, int *msgidp) { +void test_msg_parse0(const char *func, + int line, + uint64_t testid, + rd_kafka_message_t *rkmessage, + int32_t exp_partition, + int *msgidp) { test_msg_parse00(func, line, testid, exp_partition, msgidp, rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset, @@ -1032,19 +1070,19 @@ struct run_args { char **argv; }; -static int run_test0 (struct run_args *run_args) { +static int run_test0(struct run_args *run_args) { struct test *test = run_args->test; - test_timing_t t_run; - int r; + test_timing_t t_run; + int r; char stats_file[256]; - rd_snprintf(stats_file, sizeof(stats_file), "stats_%s_%"PRIu64".json", + rd_snprintf(stats_file, sizeof(stats_file), "stats_%s_%" PRIu64 ".json", test->name, test_id_generate()); if (!(test->stats_fp = fopen(stats_file, "w+"))) TEST_SAY("=== Failed to create stats file %s: %s ===\n", stats_file, strerror(errno)); - test_curr = test; + test_curr = test; #if WITH_SOCKEM rd_list_init(&test->sockets, 16, (void *)sockem_close); @@ -1052,17 +1090,17 @@ static int run_test0 (struct run_args *run_args) { /* Don't check message status by default */ test->exp_dr_status = (rd_kafka_msg_status_t)-1; - TEST_SAY("================= Running test %s =================\n", - test->name); + TEST_SAY("================= Running test %s =================\n", + test->name); if (test->stats_fp) TEST_SAY("==== Stats written to file %s ====\n", stats_file); test_rusage_start(test_curr); - TIMING_START(&t_run, "%s", test->name); + TIMING_START(&t_run, "%s", test->name); test->start = t_run.ts_start; /* Run test main function */ - r = test->mainfunc(run_args->argc, run_args->argv); + r = test->mainfunc(run_args->argc, run_args->argv); TIMING_STOP(&t_run); test_rusage_stop(test_curr, @@ -1071,22 +1109,25 @@ static int run_test0 (struct run_args *run_args) { TEST_LOCK(); test->duration = TIMING_DURATION(&t_run); - if (test->state == TEST_SKIPPED) { - TEST_SAY("================= Test %s SKIPPED " - "=================\n", - run_args->test->name); - } else if (r) { + if (test->state == TEST_SKIPPED) { + TEST_SAY( + "================= Test %s SKIPPED " + "=================\n", + run_args->test->name); + } else if (r) { test->state = TEST_FAILED; - TEST_SAY("\033[31m" - "================= Test %s FAILED =================" - "\033[0m\n", - run_args->test->name); + TEST_SAY( + "\033[31m" + "================= Test %s FAILED =================" + "\033[0m\n", + run_args->test->name); } else { test->state = TEST_PASSED; - TEST_SAY("\033[32m" - "================= Test %s PASSED =================" - "\033[0m\n", - run_args->test->name); + TEST_SAY( + "\033[32m" + "================= Test %s PASSED =================" + "\033[0m\n", + run_args->test->name); } TEST_UNLOCK(); @@ -1111,20 +1152,19 @@ static int run_test0 (struct run_args *run_args) { } if (test_delete_topics_between && test_concurrent_max == 1) - test_delete_all_test_topics(60*1000); + test_delete_all_test_topics(60 * 1000); - return r; + return r; } - -static int run_test_from_thread (void *arg) { +static int run_test_from_thread(void *arg) { struct run_args *run_args = arg; - thrd_detach(thrd_current()); + thrd_detach(thrd_current()); - run_test0(run_args); + run_test0(run_args); TEST_LOCK(); tests_running_cnt--; @@ -1140,31 +1180,27 @@ static int run_test_from_thread (void *arg) { * @brief Check running tests for timeouts. * @locks TEST_LOCK MUST be held */ -static void check_test_timeouts (void) { +static void check_test_timeouts(void) { int64_t now = test_clock(); struct test *test; - for (test = tests ; test->name ; test++) { + for (test = tests; test->name; test++) { if (test->state != TEST_RUNNING) continue; /* Timeout check */ if (now > test->timeout) { struct test *save_test = test_curr; - test_curr = test; - test->state = TEST_FAILED; - test_summary(0/*no-locks*/); - TEST_FAIL0(__FILE__,__LINE__,0/*nolock*/, - 0/*fail-later*/, - "Test %s%s%s%s timed out " - "(timeout set to %d seconds)\n", - test->name, - *test->subtest ? " (" : "", - test->subtest, - *test->subtest ? ")" : "", - (int)(test->timeout- - test->start)/ - 1000000); + test_curr = test; + test->state = TEST_FAILED; + test_summary(0 /*no-locks*/); + TEST_FAIL0( + __FILE__, __LINE__, 0 /*nolock*/, 0 /*fail-later*/, + "Test %s%s%s%s timed out " + "(timeout set to %d seconds)\n", + test->name, *test->subtest ? " (" : "", + test->subtest, *test->subtest ? ")" : "", + (int)(test->timeout - test->start) / 1000000); test_curr = save_test; tests_running_cnt--; /* fail-later misses this*/ #ifdef _WIN32 @@ -1177,9 +1213,9 @@ static void check_test_timeouts (void) { } -static int run_test (struct test *test, int argc, char **argv) { +static int run_test(struct test *test, int argc, char **argv) { struct run_args *run_args = calloc(1, sizeof(*run_args)); - int wait_cnt = 0; + int wait_cnt = 0; run_args->test = test; run_args->argc = argc; @@ -1188,17 +1224,17 @@ static int run_test (struct test *test, int argc, char **argv) { TEST_LOCK(); while (tests_running_cnt >= test_concurrent_max) { if (!(wait_cnt++ % 100)) - TEST_SAY("Too many tests running (%d >= %d): " - "postponing %s start...\n", - tests_running_cnt, test_concurrent_max, - test->name); + TEST_SAY( + "Too many tests running (%d >= %d): " + "postponing %s start...\n", + tests_running_cnt, test_concurrent_max, test->name); cnd_timedwait_ms(&test_cnd, &test_mtx, 100); check_test_timeouts(); } tests_running_cnt++; - test->timeout = test_clock() + (int64_t)(30.0 * 1000000.0 * - test_timeout_multiplier); + test->timeout = test_clock() + + (int64_t)(30.0 * 1000000.0 * test_timeout_multiplier); test->state = TEST_RUNNING; TEST_UNLOCK(); @@ -1209,51 +1245,51 @@ static int run_test (struct test *test, int argc, char **argv) { test->state = TEST_FAILED; TEST_UNLOCK(); - TEST_FAIL("Failed to start thread for test %s\n", - test->name); + TEST_FAIL("Failed to start thread for test %s\n", test->name); } return 0; } -static void run_tests (int argc, char **argv) { +static void run_tests(int argc, char **argv) { struct test *test; - for (test = tests ; test->name ; test++) { + for (test = tests; test->name; test++) { char testnum[128]; char *t; const char *skip_reason = NULL; - rd_bool_t skip_silent = rd_false; - char tmp[128]; + rd_bool_t skip_silent = rd_false; + char tmp[128]; const char *scenario = - test->scenario ? test->scenario : "default"; + test->scenario ? test->scenario : "default"; if (!test->mainfunc) continue; /* Extract test number, as string */ - strncpy(testnum, test->name, sizeof(testnum)-1); - testnum[sizeof(testnum)-1] = '\0'; + strncpy(testnum, test->name, sizeof(testnum) - 1); + testnum[sizeof(testnum) - 1] = '\0'; if ((t = strchr(testnum, '_'))) *t = '\0'; if ((test_flags && (test_flags & test->flags) != test_flags)) { skip_reason = "filtered due to test flags"; skip_silent = rd_true; - } if ((test_neg_flags & ~test_flags) & test->flags) - skip_reason = "Filtered due to negative test flags"; - if (test_broker_version && - (test->minver > test_broker_version || - (test->maxver && test->maxver < test_broker_version))) { - rd_snprintf(tmp, sizeof(tmp), - "not applicable for broker " - "version %d.%d.%d.%d", - TEST_BRKVER_X(test_broker_version, 0), - TEST_BRKVER_X(test_broker_version, 1), - TEST_BRKVER_X(test_broker_version, 2), - TEST_BRKVER_X(test_broker_version, 3)); - skip_reason = tmp; - } + } + if ((test_neg_flags & ~test_flags) & test->flags) + skip_reason = "Filtered due to negative test flags"; + if (test_broker_version && + (test->minver > test_broker_version || + (test->maxver && test->maxver < test_broker_version))) { + rd_snprintf(tmp, sizeof(tmp), + "not applicable for broker " + "version %d.%d.%d.%d", + TEST_BRKVER_X(test_broker_version, 0), + TEST_BRKVER_X(test_broker_version, 1), + TEST_BRKVER_X(test_broker_version, 2), + TEST_BRKVER_X(test_broker_version, 3)); + skip_reason = tmp; + } if (!strstr(scenario, test_scenario)) { rd_snprintf(tmp, sizeof(tmp), @@ -1287,11 +1323,8 @@ static void run_tests (int argc, char **argv) { TEST_SKIP("%s\n", skip_reason); test_curr = &tests[0]; } - } } - - } /** @@ -1299,7 +1332,7 @@ static void run_tests (int argc, char **argv) { * * @returns the number of failed tests. */ -static int test_summary (int do_lock) { +static int test_summary(int do_lock) { struct test *test; FILE *report_fp = NULL; char report_path[128]; @@ -1307,14 +1340,14 @@ static int test_summary (int do_lock) { struct tm *tm; char datestr[64]; int64_t total_duration = 0; - int tests_run = 0; - int tests_failed = 0; - int tests_failed_known = 0; - int tests_passed = 0; - FILE *sql_fp = NULL; + int tests_run = 0; + int tests_failed = 0; + int tests_failed_known = 0; + int tests_passed = 0; + FILE *sql_fp = NULL; const char *tmp; - t = time(NULL); + t = time(NULL); tm = localtime(&t); strftime(datestr, sizeof(datestr), "%Y%m%d%H%M%S", tm); @@ -1339,84 +1372,85 @@ static int test_summary (int do_lock) { "\"git_version\": \"%s\", " "\"broker_version\": \"%s\", " "\"tests\": {", - datestr, test_mode, test_mode, - test_scenario, datestr, - test_git_version, + datestr, test_mode, test_mode, test_scenario, + datestr, test_git_version, test_broker_version_str); } if (do_lock) TEST_LOCK(); - if (test_sql_cmd) { + if (test_sql_cmd) { #ifdef _WIN32 - sql_fp = _popen(test_sql_cmd, "w"); + sql_fp = _popen(test_sql_cmd, "w"); #else - sql_fp = popen(test_sql_cmd, "w"); + sql_fp = popen(test_sql_cmd, "w"); #endif - fprintf(sql_fp, - "CREATE TABLE IF NOT EXISTS " - "runs(runid text PRIMARY KEY, mode text, " - "date datetime, cnt int, passed int, failed int, " - "duration numeric);\n" - "CREATE TABLE IF NOT EXISTS " - "tests(runid text, mode text, name text, state text, " - "extra text, duration numeric);\n"); - } - - if (show_summary) - printf("TEST %s (%s, scenario %s) SUMMARY\n" - "#==================================================================#\n", - datestr, test_mode, test_scenario); - - for (test = tests ; test->name ; test++) { + fprintf(sql_fp, + "CREATE TABLE IF NOT EXISTS " + "runs(runid text PRIMARY KEY, mode text, " + "date datetime, cnt int, passed int, failed int, " + "duration numeric);\n" + "CREATE TABLE IF NOT EXISTS " + "tests(runid text, mode text, name text, state text, " + "extra text, duration numeric);\n"); + } + + if (show_summary) + printf( + "TEST %s (%s, scenario %s) SUMMARY\n" + "#=========================================================" + "=========#\n", + datestr, test_mode, test_scenario); + + for (test = tests; test->name; test++) { const char *color; int64_t duration; - char extra[128] = ""; - int do_count = 1; + char extra[128] = ""; + int do_count = 1; if (!(duration = test->duration) && test->start > 0) duration = test_clock() - test->start; if (test == tests) { - /*
test: - * test accounts for total runtime. - * dont include in passed/run/failed counts. */ + /*
test: + * test accounts for total runtime. + * dont include in passed/run/failed counts. */ total_duration = duration; - do_count = 0; - } + do_count = 0; + } - switch (test->state) - { + switch (test->state) { case TEST_PASSED: color = _C_GRN; - if (do_count) { - tests_passed++; - tests_run++; - } + if (do_count) { + tests_passed++; + tests_run++; + } break; case TEST_FAILED: - if (test->flags & TEST_F_KNOWN_ISSUE) { - rd_snprintf(extra, sizeof(extra), - " <-- known issue%s%s", - test->extra ? ": " : "", - test->extra ? test->extra : ""); - if (do_count) - tests_failed_known++; - } + if (test->flags & TEST_F_KNOWN_ISSUE) { + rd_snprintf(extra, sizeof(extra), + " <-- known issue%s%s", + test->extra ? ": " : "", + test->extra ? test->extra : ""); + if (do_count) + tests_failed_known++; + } color = _C_RED; - if (do_count) { - tests_failed++; - tests_run++; - } + if (do_count) { + tests_failed++; + tests_run++; + } break; case TEST_RUNNING: color = _C_MAG; - if (do_count) { - tests_failed++; /* All tests should be finished */ - tests_run++; - } + if (do_count) { + tests_failed++; /* All tests should be finished + */ + tests_run++; + } break; case TEST_NOT_STARTED: color = _C_YEL; @@ -1431,13 +1465,11 @@ static int test_summary (int do_lock) { if (show_summary && (test->state != TEST_SKIPPED || *test->failstr || - (tests_to_run && - !strncmp(tests_to_run, test->name, - strlen(tests_to_run))))) { - printf("|%s %-40s | %10s | %7.3fs %s|", - color, + (tests_to_run && !strncmp(tests_to_run, test->name, + strlen(tests_to_run))))) { + printf("|%s %-40s | %10s | %7.3fs %s|", color, test->name, test_states[test->state], - (double)duration/1000000.0, _C_CLR); + (double)duration / 1000000.0, _C_CLR); if (test->state == TEST_FAILED) printf(_C_RED " %s" _C_CLR, test->failstr); else if (test->state == TEST_SKIPPED) @@ -1446,45 +1478,46 @@ static int test_summary (int do_lock) { } if (report_fp) { - int i; + int i; fprintf(report_fp, "%s\"%s\": {" "\"name\": \"%s\", " "\"state\": \"%s\", " - "\"known_issue\": %s, " - "\"extra\": \"%s\", " + "\"known_issue\": %s, " + "\"extra\": \"%s\", " "\"duration\": %.3f, " - "\"report\": [ ", - test == tests ? "": ", ", - test->name, - test->name, test_states[test->state], - test->flags & TEST_F_KNOWN_ISSUE ? "true":"false", - test->extra ? test->extra : "", - (double)duration/1000000.0); - - for (i = 0 ; i < test->report_cnt ; i++) { - fprintf(report_fp, "%s%s ", - i == 0 ? "":",", - test->report_arr[i]); - } - - fprintf(report_fp, "] }"); - } - - if (sql_fp) - fprintf(sql_fp, - "INSERT INTO tests VALUES(" - "'%s_%s', '%s', '%s', '%s', '%s', %f);\n", - datestr, test_mode, test_mode, + "\"report\": [ ", + test == tests ? "" : ", ", test->name, test->name, test_states[test->state], - test->extra ? test->extra : "", - (double)duration/1000000.0); + test->flags & TEST_F_KNOWN_ISSUE ? "true" + : "false", + test->extra ? test->extra : "", + (double)duration / 1000000.0); + + for (i = 0; i < test->report_cnt; i++) { + fprintf(report_fp, "%s%s ", i == 0 ? "" : ",", + test->report_arr[i]); + } + + fprintf(report_fp, "] }"); + } + + if (sql_fp) + fprintf(sql_fp, + "INSERT INTO tests VALUES(" + "'%s_%s', '%s', '%s', '%s', '%s', %f);\n", + datestr, test_mode, test_mode, test->name, + test_states[test->state], + test->extra ? test->extra : "", + (double)duration / 1000000.0); } if (do_lock) TEST_UNLOCK(); - if (show_summary) - printf("#==================================================================#\n"); + if (show_summary) + printf( + "#=========================================================" + "=========#\n"); if (report_fp) { fprintf(report_fp, @@ -1495,93 +1528,95 @@ static int test_summary (int do_lock) { "\"duration\": %.3f" "}\n", tests_run, tests_passed, tests_failed, - (double)total_duration/1000000.0); + (double)total_duration / 1000000.0); fclose(report_fp); TEST_SAY("# Test report written to %s\n", report_path); } - if (sql_fp) { - fprintf(sql_fp, - "INSERT INTO runs VALUES('%s_%s', '%s', datetime(), " - "%d, %d, %d, %f);\n", - datestr, test_mode, test_mode, - tests_run, tests_passed, tests_failed, - (double)total_duration/1000000.0); - fclose(sql_fp); - } + if (sql_fp) { + fprintf(sql_fp, + "INSERT INTO runs VALUES('%s_%s', '%s', datetime(), " + "%d, %d, %d, %f);\n", + datestr, test_mode, test_mode, tests_run, tests_passed, + tests_failed, (double)total_duration / 1000000.0); + fclose(sql_fp); + } return tests_failed - tests_failed_known; } #ifndef _WIN32 -static void test_sig_term (int sig) { - if (test_exit) - exit(1); - fprintf(stderr, "Exiting tests, waiting for running tests to finish.\n"); - test_exit = 1; +static void test_sig_term(int sig) { + if (test_exit) + exit(1); + fprintf(stderr, + "Exiting tests, waiting for running tests to finish.\n"); + test_exit = 1; } #endif /** * Wait 'timeout' seconds for rdkafka to kill all its threads and clean up. */ -static void test_wait_exit (int timeout) { - int r; +static void test_wait_exit(int timeout) { + int r; time_t start = time(NULL); - while ((r = rd_kafka_thread_cnt()) && timeout-- >= 0) { - TEST_SAY("%i thread(s) in use by librdkafka, waiting...\n", r); - rd_sleep(1); - } + while ((r = rd_kafka_thread_cnt()) && timeout-- >= 0) { + TEST_SAY("%i thread(s) in use by librdkafka, waiting...\n", r); + rd_sleep(1); + } - TEST_SAY("%i thread(s) in use by librdkafka\n", r); + TEST_SAY("%i thread(s) in use by librdkafka\n", r); if (r > 0) TEST_FAIL("%i thread(s) still active in librdkafka", r); timeout -= (int)(time(NULL) - start); if (timeout > 0) { - TEST_SAY("Waiting %d seconds for all librdkafka memory " - "to be released\n", timeout); + TEST_SAY( + "Waiting %d seconds for all librdkafka memory " + "to be released\n", + timeout); if (rd_kafka_wait_destroyed(timeout * 1000) == -1) - TEST_FAIL("Not all internal librdkafka " - "objects destroyed\n"); - } + TEST_FAIL( + "Not all internal librdkafka " + "objects destroyed\n"); + } } - /** * @brief Test framework cleanup before termination. */ -static void test_cleanup (void) { - struct test *test; +static void test_cleanup(void) { + struct test *test; - /* Free report arrays */ - for (test = tests ; test->name ; test++) { - int i; - if (!test->report_arr) - continue; - for (i = 0 ; i < test->report_cnt ; i++) - rd_free(test->report_arr[i]); - rd_free(test->report_arr); - test->report_arr = NULL; - } + /* Free report arrays */ + for (test = tests; test->name; test++) { + int i; + if (!test->report_arr) + continue; + for (i = 0; i < test->report_cnt; i++) + rd_free(test->report_arr[i]); + rd_free(test->report_arr); + test->report_arr = NULL; + } - if (test_sql_cmd) - rd_free(test_sql_cmd); + if (test_sql_cmd) + rd_free(test_sql_cmd); } int main(int argc, char **argv) { int i, r; - test_timing_t t_all; - int a,b,c,d; + test_timing_t t_all; + int a, b, c, d; const char *tmpver; - mtx_init(&test_mtx, mtx_plain); + mtx_init(&test_mtx, mtx_plain); cnd_init(&test_cnd); test_init(); @@ -1589,10 +1624,10 @@ int main(int argc, char **argv) { #ifndef _WIN32 signal(SIGINT, test_sig_term); #endif - tests_to_run = test_getenv("TESTS", NULL); + tests_to_run = test_getenv("TESTS", NULL); subtests_to_run = test_getenv("SUBTESTS", NULL); - tests_to_skip = test_getenv("TESTS_SKIP", NULL); - tmpver = test_getenv("TEST_KAFKA_VERSION", NULL); + tests_to_skip = test_getenv("TESTS_SKIP", NULL); + tmpver = test_getenv("TEST_KAFKA_VERSION", NULL); if (!tmpver) tmpver = test_getenv("KAFKA_VERSION", test_broker_version_str); test_broker_version_str = tmpver; @@ -1601,13 +1636,13 @@ int main(int argc, char **argv) { /* Are we running on CI? */ if (test_getenv("CI", NULL)) { - test_on_ci = 1; + test_on_ci = 1; test_concurrent_max = 3; } - test_conf_init(NULL, NULL, 10); + test_conf_init(NULL, NULL, 10); - for (i = 1 ; i < argc ; i++) { + for (i = 1; i < argc; i++) { if (!strncmp(argv[i], "-p", 2) && strlen(argv[i]) > 2) { if (test_rusage) { fprintf(stderr, @@ -1615,26 +1650,26 @@ int main(int argc, char **argv) { argv[i]); continue; } - test_concurrent_max = (int)strtod(argv[i]+2, NULL); + test_concurrent_max = (int)strtod(argv[i] + 2, NULL); } else if (!strcmp(argv[i], "-l")) test_flags |= TEST_F_LOCAL; - else if (!strcmp(argv[i], "-L")) + else if (!strcmp(argv[i], "-L")) test_neg_flags |= TEST_F_LOCAL; else if (!strcmp(argv[i], "-a")) test_assert_on_fail = 1; - else if (!strcmp(argv[i], "-k")) - test_flags |= TEST_F_KNOWN_ISSUE; - else if (!strcmp(argv[i], "-K")) - test_neg_flags |= TEST_F_KNOWN_ISSUE; + else if (!strcmp(argv[i], "-k")) + test_flags |= TEST_F_KNOWN_ISSUE; + else if (!strcmp(argv[i], "-K")) + test_neg_flags |= TEST_F_KNOWN_ISSUE; else if (!strcmp(argv[i], "-E")) test_neg_flags |= TEST_F_SOCKEM; - else if (!strcmp(argv[i], "-V") && i+1 < argc) - test_broker_version_str = argv[++i]; - else if (!strcmp(argv[i], "-s") && i+1 < argc) + else if (!strcmp(argv[i], "-V") && i + 1 < argc) + test_broker_version_str = argv[++i]; + else if (!strcmp(argv[i], "-s") && i + 1 < argc) strncpy(test_scenario, argv[++i], - sizeof(test_scenario)-1); - else if (!strcmp(argv[i], "-S")) - show_summary = 0; + sizeof(test_scenario) - 1); + else if (!strcmp(argv[i], "-S")) + show_summary = 0; else if (!strcmp(argv[i], "-D")) test_delete_topics_between = 1; else if (!strcmp(argv[i], "-P")) @@ -1644,86 +1679,99 @@ int main(int argc, char **argv) { else if (!strcmp(argv[i], "-r")) test_write_report = 1; else if (!strncmp(argv[i], "-R", 2)) { - test_rusage = 1; + test_rusage = 1; test_concurrent_max = 1; if (strlen(argv[i]) > strlen("-R")) { test_rusage_cpu_calibration = - strtod(argv[i]+2, NULL); + strtod(argv[i] + 2, NULL); if (test_rusage_cpu_calibration < 0.00001) { fprintf(stderr, "%% Invalid CPU calibration " - "value: %s\n", argv[i]+2); + "value: %s\n", + argv[i] + 2); exit(1); } } } else if (*argv[i] != '-') tests_to_run = argv[i]; else { - printf("Unknown option: %s\n" - "\n" - "Usage: %s [options] []\n" - "Options:\n" - " -p Run N tests in parallel\n" - " -l/-L Only/dont run local tests (no broker needed)\n" - " -k/-K Only/dont run tests with known issues\n" - " -E Don't run sockem tests\n" - " -a Assert on failures\n" - " -r Write test_report_...json file.\n" - " -S Dont show test summary\n" - " -s Test scenario.\n" - " -V Broker version.\n" - " -D Delete all test topics between each test (-p1) or after all tests\n" - " -P Run all tests with `enable.idempotency=true`\n" - " -Q Run tests in quick mode: faster tests, fewer iterations, less data.\n" - " -R Check resource usage thresholds.\n" - " -R Check resource usage thresholds but adjust CPU thresholds by C (float):\n" - " C < 1.0: CPU is faster than base line system.\n" - " C > 1.0: CPU is slower than base line system.\n" - " E.g. -R2.5 = CPU is 2.5x slower than base line system.\n" - "\n" - "Environment variables:\n" - " TESTS - substring matched test to run (e.g., 0033)\n" - " SUBTESTS - substring matched subtest to run " - "(e.g., n_wildcard)\n" - " TEST_KAFKA_VERSION - broker version (e.g., 0.9.0.1)\n" - " TEST_SCENARIO - Test scenario\n" - " TEST_LEVEL - Test verbosity level\n" - " TEST_MODE - bare, helgrind, valgrind\n" - " TEST_SEED - random seed\n" - " RDKAFKA_TEST_CONF - test config file (test.conf)\n" - " KAFKA_PATH - Path to kafka source dir\n" - " ZK_ADDRESS - Zookeeper address\n" - "\n", - argv[i], argv[0]); + printf( + "Unknown option: %s\n" + "\n" + "Usage: %s [options] []\n" + "Options:\n" + " -p Run N tests in parallel\n" + " -l/-L Only/dont run local tests (no broker " + "needed)\n" + " -k/-K Only/dont run tests with known issues\n" + " -E Don't run sockem tests\n" + " -a Assert on failures\n" + " -r Write test_report_...json file.\n" + " -S Dont show test summary\n" + " -s Test scenario.\n" + " -V Broker version.\n" + " -D Delete all test topics between each test " + "(-p1) or after all tests\n" + " -P Run all tests with " + "`enable.idempotency=true`\n" + " -Q Run tests in quick mode: faster tests, " + "fewer iterations, less data.\n" + " -R Check resource usage thresholds.\n" + " -R Check resource usage thresholds but " + "adjust CPU thresholds by C (float):\n" + " C < 1.0: CPU is faster than base line " + "system.\n" + " C > 1.0: CPU is slower than base line " + "system.\n" + " E.g. -R2.5 = CPU is 2.5x slower than " + "base line system.\n" + "\n" + "Environment variables:\n" + " TESTS - substring matched test to run (e.g., " + "0033)\n" + " SUBTESTS - substring matched subtest to run " + "(e.g., n_wildcard)\n" + " TEST_KAFKA_VERSION - broker version (e.g., " + "0.9.0.1)\n" + " TEST_SCENARIO - Test scenario\n" + " TEST_LEVEL - Test verbosity level\n" + " TEST_MODE - bare, helgrind, valgrind\n" + " TEST_SEED - random seed\n" + " RDKAFKA_TEST_CONF - test config file " + "(test.conf)\n" + " KAFKA_PATH - Path to kafka source dir\n" + " ZK_ADDRESS - Zookeeper address\n" + "\n", + argv[i], argv[0]); exit(1); } } - TEST_SAY("Git version: %s\n", test_git_version); + TEST_SAY("Git version: %s\n", test_git_version); if (!strcmp(test_broker_version_str, "trunk")) test_broker_version_str = "9.9.9.9"; /* for now */ d = 0; - if (sscanf(test_broker_version_str, "%d.%d.%d.%d", - &a, &b, &c, &d) < 3) { - printf("%% Expected broker version to be in format " - "N.N.N (N=int), not %s\n", - test_broker_version_str); - exit(1); - } - test_broker_version = TEST_BRKVER(a, b, c, d); - TEST_SAY("Broker version: %s (%d.%d.%d.%d)\n", - test_broker_version_str, - TEST_BRKVER_X(test_broker_version, 0), - TEST_BRKVER_X(test_broker_version, 1), - TEST_BRKVER_X(test_broker_version, 2), - TEST_BRKVER_X(test_broker_version, 3)); - - /* Set up fake "
" test for all operations performed in - * the main thread rather than the per-test threads. - * Nice side effect is that we get timing and status for main as well.*/ - test_curr = &tests[0]; + if (sscanf(test_broker_version_str, "%d.%d.%d.%d", &a, &b, &c, &d) < + 3) { + printf( + "%% Expected broker version to be in format " + "N.N.N (N=int), not %s\n", + test_broker_version_str); + exit(1); + } + test_broker_version = TEST_BRKVER(a, b, c, d); + TEST_SAY("Broker version: %s (%d.%d.%d.%d)\n", test_broker_version_str, + TEST_BRKVER_X(test_broker_version, 0), + TEST_BRKVER_X(test_broker_version, 1), + TEST_BRKVER_X(test_broker_version, 2), + TEST_BRKVER_X(test_broker_version, 3)); + + /* Set up fake "
" test for all operations performed in + * the main thread rather than the per-test threads. + * Nice side effect is that we get timing and status for main as well.*/ + test_curr = &tests[0]; test_curr->state = TEST_PASSED; test_curr->start = test_clock(); @@ -1733,16 +1781,15 @@ int main(int argc, char **argv) { TEST_UNLOCK(); } - if (!strcmp(test_mode, "helgrind") || - !strcmp(test_mode, "drd")) { - TEST_LOCK(); - test_timeout_multiplier += 5; - TEST_UNLOCK(); - } else if (!strcmp(test_mode, "valgrind")) { - TEST_LOCK(); - test_timeout_multiplier += 3; - TEST_UNLOCK(); - } + if (!strcmp(test_mode, "helgrind") || !strcmp(test_mode, "drd")) { + TEST_LOCK(); + test_timeout_multiplier += 5; + TEST_UNLOCK(); + } else if (!strcmp(test_mode, "valgrind")) { + TEST_LOCK(); + test_timeout_multiplier += 3; + TEST_UNLOCK(); + } /* Broker version 0.9 and api.version.request=true (which is default) * will cause a 10s stall per connection. Instead of fixing @@ -1755,18 +1802,17 @@ int main(int argc, char **argv) { if (test_concurrent_max > 1) test_timeout_multiplier += (double)test_concurrent_max / 3; - TEST_SAY("Tests to run : %s\n", tests_to_run ? tests_to_run : "all"); + TEST_SAY("Tests to run : %s\n", tests_to_run ? tests_to_run : "all"); if (subtests_to_run) TEST_SAY("Sub tests : %s\n", subtests_to_run); if (tests_to_skip) TEST_SAY("Skip tests : %s\n", tests_to_skip); - TEST_SAY("Test mode : %s%s%s\n", - test_quick ? "quick, ":"", - test_mode, - test_on_ci ? ", CI":""); + TEST_SAY("Test mode : %s%s%s\n", test_quick ? "quick, " : "", + test_mode, test_on_ci ? ", CI" : ""); TEST_SAY("Test scenario: %s\n", test_scenario); - TEST_SAY("Test filter : %s\n", - (test_flags & TEST_F_LOCAL) ? "local tests only" : "no filter"); + TEST_SAY("Test filter : %s\n", (test_flags & TEST_F_LOCAL) + ? "local tests only" + : "no filter"); TEST_SAY("Test timeout multiplier: %.1f\n", test_timeout_multiplier); TEST_SAY("Action on test failure: %s\n", test_assert_on_fail ? "assert crash" : "continue other tests"); @@ -1781,7 +1827,7 @@ int main(int argc, char **argv) { #ifdef _WIN32 pcwd = _getcwd(cwd, sizeof(cwd) - 1); #else - pcwd = getcwd(cwd, sizeof(cwd) - 1); + pcwd = getcwd(cwd, sizeof(cwd) - 1); #endif if (pcwd) TEST_SAY("Current directory: %s\n", cwd); @@ -1801,7 +1847,7 @@ int main(int argc, char **argv) { if (!test_quick && test_level >= 2) { TEST_SAY("%d test(s) running:", tests_running_cnt); - for (test = tests ; test->name ; test++) { + for (test = tests; test->name; test++) { if (test->state != TEST_RUNNING) continue; @@ -1816,73 +1862,73 @@ int main(int argc, char **argv) { TEST_UNLOCK(); if (test_quick) - rd_usleep(200*1000, NULL); + rd_usleep(200 * 1000, NULL); else rd_sleep(1); TEST_LOCK(); } - TIMING_STOP(&t_all); + TIMING_STOP(&t_all); - test_curr = &tests[0]; + test_curr = &tests[0]; test_curr->duration = test_clock() - test_curr->start; TEST_UNLOCK(); if (test_delete_topics_between) - test_delete_all_test_topics(60*1000); + test_delete_all_test_topics(60 * 1000); - r = test_summary(1/*lock*/) ? 1 : 0; + r = test_summary(1 /*lock*/) ? 1 : 0; /* Wait for everything to be cleaned up since broker destroys are - * handled in its own thread. */ - test_wait_exit(0); + * handled in its own thread. */ + test_wait_exit(0); - /* If we havent failed at this point then - * there were no threads leaked */ + /* If we havent failed at this point then + * there were no threads leaked */ if (r == 0) TEST_SAY("\n============== ALL TESTS PASSED ==============\n"); - test_cleanup(); + test_cleanup(); - if (r > 0) - TEST_FAIL("%d test(s) failed, see previous errors", r); + if (r > 0) + TEST_FAIL("%d test(s) failed, see previous errors", r); - return r; + return r; } - - /****************************************************************************** * * Helpers * ******************************************************************************/ -void test_dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque) { - int *remainsp = rkmessage->_private; +void test_dr_msg_cb(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { + int *remainsp = rkmessage->_private; static const char *status_names[] = { - [RD_KAFKA_MSG_STATUS_NOT_PERSISTED] = "NotPersisted", - [RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED] = "PossiblyPersisted", - [RD_KAFKA_MSG_STATUS_PERSISTED] = "Persisted" - }; - - TEST_SAYL(4, "Delivery report: %s (%s) to %s [%"PRId32"] " - "at offset %"PRId64" latency %.2fms\n", + [RD_KAFKA_MSG_STATUS_NOT_PERSISTED] = "NotPersisted", + [RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED] = "PossiblyPersisted", + [RD_KAFKA_MSG_STATUS_PERSISTED] = "Persisted"}; + + TEST_SAYL(4, + "Delivery report: %s (%s) to %s [%" PRId32 + "] " + "at offset %" PRId64 " latency %.2fms\n", rd_kafka_err2str(rkmessage->err), status_names[rd_kafka_message_status(rkmessage)], - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, + rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset, (float)rd_kafka_message_latency(rkmessage) / 1000.0); if (!test_curr->produce_sync) { if (!test_curr->ignore_dr_err && rkmessage->err != test_curr->exp_dr_err) - TEST_FAIL("Message delivery (to %s [%"PRId32"]) " + TEST_FAIL("Message delivery (to %s [%" PRId32 + "]) " "failed: expected %s, got %s", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, @@ -1891,7 +1937,7 @@ void test_dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, if ((int)test_curr->exp_dr_status != -1) { rd_kafka_msg_status_t status = - rd_kafka_message_status(rkmessage); + rd_kafka_message_status(rkmessage); TEST_ASSERT(status == test_curr->exp_dr_status, "Expected message status %s, not %s", @@ -1904,7 +1950,7 @@ void test_dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, test_msgver_add_msg(rk, test_curr->dr_mv, rkmessage); } - if (remainsp) { + if (remainsp) { TEST_ASSERT(*remainsp > 0, "Too many messages delivered (remains %i)", *remainsp); @@ -1917,9 +1963,9 @@ void test_dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, } -rd_kafka_t *test_create_handle (int mode, rd_kafka_conf_t *conf) { - rd_kafka_t *rk; - char errstr[512]; +rd_kafka_t *test_create_handle(int mode, rd_kafka_conf_t *conf) { + rd_kafka_t *rk; + char errstr[512]; if (!conf) { test_conf_init(&conf, NULL, 0); @@ -1934,24 +1980,24 @@ rd_kafka_t *test_create_handle (int mode, rd_kafka_conf_t *conf) { - /* Creat kafka instance */ - rk = rd_kafka_new(mode, conf, errstr, sizeof(errstr)); - if (!rk) - TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); + /* Creat kafka instance */ + rk = rd_kafka_new(mode, conf, errstr, sizeof(errstr)); + if (!rk) + TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); - TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); + TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); - return rk; + return rk; } -rd_kafka_t *test_create_producer (void) { - rd_kafka_conf_t *conf; +rd_kafka_t *test_create_producer(void) { + rd_kafka_conf_t *conf; - test_conf_init(&conf, NULL, 0); + test_conf_init(&conf, NULL, 0); rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); - return test_create_handle(RD_KAFKA_PRODUCER, conf); + return test_create_handle(RD_KAFKA_PRODUCER, conf); } @@ -1959,64 +2005,62 @@ rd_kafka_t *test_create_producer (void) { * Create topic_t object with va-arg list as key-value config pairs * terminated by NULL. */ -rd_kafka_topic_t *test_create_topic_object (rd_kafka_t *rk, - const char *topic, ...) { - rd_kafka_topic_t *rkt; - rd_kafka_topic_conf_t *topic_conf; - va_list ap; - const char *name, *val; - - test_conf_init(NULL, &topic_conf, 0); - - va_start(ap, topic); - while ((name = va_arg(ap, const char *)) && - (val = va_arg(ap, const char *))) { +rd_kafka_topic_t * +test_create_topic_object(rd_kafka_t *rk, const char *topic, ...) { + rd_kafka_topic_t *rkt; + rd_kafka_topic_conf_t *topic_conf; + va_list ap; + const char *name, *val; + + test_conf_init(NULL, &topic_conf, 0); + + va_start(ap, topic); + while ((name = va_arg(ap, const char *)) && + (val = va_arg(ap, const char *))) { test_topic_conf_set(topic_conf, name, val); - } - va_end(ap); + } + va_end(ap); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_last_error())); - return rkt; - + return rkt; } -rd_kafka_topic_t *test_create_producer_topic (rd_kafka_t *rk, - const char *topic, ...) { - rd_kafka_topic_t *rkt; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - va_list ap; - const char *name, *val; +rd_kafka_topic_t * +test_create_producer_topic(rd_kafka_t *rk, const char *topic, ...) { + rd_kafka_topic_t *rkt; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + va_list ap; + const char *name, *val; - test_conf_init(NULL, &topic_conf, 0); + test_conf_init(NULL, &topic_conf, 0); - va_start(ap, topic); - while ((name = va_arg(ap, const char *)) && - (val = va_arg(ap, const char *))) { - if (rd_kafka_topic_conf_set(topic_conf, name, val, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) - TEST_FAIL("Conf failed: %s\n", errstr); - } - va_end(ap); + va_start(ap, topic); + while ((name = va_arg(ap, const char *)) && + (val = va_arg(ap, const char *))) { + if (rd_kafka_topic_conf_set(topic_conf, name, val, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) + TEST_FAIL("Conf failed: %s\n", errstr); + } + va_end(ap); - /* Make sure all replicas are in-sync after producing - * so that consume test wont fail. */ + /* Make sure all replicas are in-sync after producing + * so that consume test wont fail. */ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1", errstr, sizeof(errstr)); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_last_error())); - return rkt; - + return rkt; } @@ -2035,65 +2079,69 @@ rd_kafka_topic_t *test_create_producer_topic (rd_kafka_t *rk, * Default message size is 128 bytes, if \p size is non-zero and \p payload * is NULL the message size of \p size will be used. */ -void test_produce_msgs_nowait (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size, int msgrate, - int *msgcounterp) { - int msg_id; - test_timing_t t_all, t_poll; - char key[128]; - void *buf; - int64_t tot_bytes = 0; +void test_produce_msgs_nowait(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size, + int msgrate, + int *msgcounterp) { + int msg_id; + test_timing_t t_all, t_poll; + char key[128]; + void *buf; + int64_t tot_bytes = 0; int64_t tot_time_poll = 0; - int64_t per_msg_wait = 0; + int64_t per_msg_wait = 0; if (msgrate > 0) per_msg_wait = 1000000 / (int64_t)msgrate; - if (payload) - buf = (void *)payload; - else { - if (size == 0) - size = 128; - buf = calloc(1, size); - } + if (payload) + buf = (void *)payload; + else { + if (size == 0) + size = 128; + buf = calloc(1, size); + } - TEST_SAY("Produce to %s [%"PRId32"]: messages #%d..%d\n", - rd_kafka_topic_name(rkt), partition, msg_base, msg_base+cnt); + TEST_SAY("Produce to %s [%" PRId32 "]: messages #%d..%d\n", + rd_kafka_topic_name(rkt), partition, msg_base, msg_base + cnt); - TIMING_START(&t_all, "PRODUCE"); + TIMING_START(&t_all, "PRODUCE"); TIMING_START(&t_poll, "SUM(POLL)"); - for (msg_id = msg_base ; msg_id < msg_base + cnt ; msg_id++) { + for (msg_id = msg_base; msg_id < msg_base + cnt; msg_id++) { int wait_time = 0; if (!payload) - test_prepare_msg(testid, partition, msg_id, - buf, size, key, sizeof(key)); + test_prepare_msg(testid, partition, msg_id, buf, size, + key, sizeof(key)); - if (rd_kafka_produce(rkt, partition, - RD_KAFKA_MSG_F_COPY, - buf, size, - !payload ? key : NULL, - !payload ? strlen(key) : 0, - msgcounterp) == -1) - TEST_FAIL("Failed to produce message %i " - "to partition %i: %s", - msg_id, (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); + if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, buf, + size, !payload ? key : NULL, + !payload ? strlen(key) : 0, + msgcounterp) == -1) + TEST_FAIL( + "Failed to produce message %i " + "to partition %i: %s", + msg_id, (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); (*msgcounterp)++; - tot_bytes += size; + tot_bytes += size; TIMING_RESTART(&t_poll); do { if (per_msg_wait) { wait_time = (int)(per_msg_wait - TIMING_DURATION(&t_poll)) / - 1000; + 1000; if (wait_time < 0) wait_time = 0; } @@ -2102,48 +2150,50 @@ void test_produce_msgs_nowait (rd_kafka_t *rk, rd_kafka_topic_t *rkt, tot_time_poll = TIMING_DURATION(&t_poll); - if (TIMING_EVERY(&t_all, 3*1000000)) - TEST_SAY("produced %3d%%: %d/%d messages " - "(%d msgs/s, %d bytes/s)\n", - ((msg_id - msg_base) * 100) / cnt, - msg_id - msg_base, cnt, - (int)((msg_id - msg_base) / - (TIMING_DURATION(&t_all) / 1000000)), - (int)((tot_bytes) / - (TIMING_DURATION(&t_all) / 1000000))); + if (TIMING_EVERY(&t_all, 3 * 1000000)) + TEST_SAY( + "produced %3d%%: %d/%d messages " + "(%d msgs/s, %d bytes/s)\n", + ((msg_id - msg_base) * 100) / cnt, + msg_id - msg_base, cnt, + (int)((msg_id - msg_base) / + (TIMING_DURATION(&t_all) / 1000000)), + (int)((tot_bytes) / + (TIMING_DURATION(&t_all) / 1000000))); } - if (!payload) - free(buf); + if (!payload) + free(buf); t_poll.duration = tot_time_poll; TIMING_STOP(&t_poll); - TIMING_STOP(&t_all); + TIMING_STOP(&t_all); } /** * Waits for the messages tracked by counter \p msgcounterp to be delivered. */ -void test_wait_delivery (rd_kafka_t *rk, int *msgcounterp) { - test_timing_t t_all; +void test_wait_delivery(rd_kafka_t *rk, int *msgcounterp) { + test_timing_t t_all; int start_cnt = *msgcounterp; TIMING_START(&t_all, "PRODUCE.DELIVERY.WAIT"); - /* Wait for messages to be delivered */ - while (*msgcounterp > 0 && rd_kafka_outq_len(rk) > 0) { - rd_kafka_poll(rk, 10); - if (TIMING_EVERY(&t_all, 3*1000000)) { + /* Wait for messages to be delivered */ + while (*msgcounterp > 0 && rd_kafka_outq_len(rk) > 0) { + rd_kafka_poll(rk, 10); + if (TIMING_EVERY(&t_all, 3 * 1000000)) { int delivered = start_cnt - *msgcounterp; - TEST_SAY("wait_delivery: " - "%d/%d messages delivered: %d msgs/s\n", - delivered, start_cnt, - (int)(delivered / - (TIMING_DURATION(&t_all) / 1000000))); + TEST_SAY( + "wait_delivery: " + "%d/%d messages delivered: %d msgs/s\n", + delivered, start_cnt, + (int)(delivered / + (TIMING_DURATION(&t_all) / 1000000))); } } - TIMING_STOP(&t_all); + TIMING_STOP(&t_all); TEST_ASSERT(*msgcounterp == 0, "Not all messages delivered: msgcounter still at %d, " @@ -2154,11 +2204,15 @@ void test_wait_delivery (rd_kafka_t *rk, int *msgcounterp) { /** * Produces \p cnt messages and waits for succesful delivery */ -void test_produce_msgs (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size) { - int remains = 0; +void test_produce_msgs(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size) { + int remains = 0; test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt, payload, size, 0, &remains); @@ -2170,11 +2224,15 @@ void test_produce_msgs (rd_kafka_t *rk, rd_kafka_topic_t *rkt, /** * @brief Produces \p cnt messages and waits for succesful delivery */ -void test_produce_msgs2 (rd_kafka_t *rk, const char *topic, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size) { - int remains = 0; +void test_produce_msgs2(rd_kafka_t *rk, + const char *topic, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size) { + int remains = 0; rd_kafka_topic_t *rkt = test_create_topic_object(rk, topic, NULL); test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt, @@ -2188,11 +2246,15 @@ void test_produce_msgs2 (rd_kafka_t *rk, const char *topic, /** * @brief Produces \p cnt messages without waiting for delivery. */ -void test_produce_msgs2_nowait (rd_kafka_t *rk, const char *topic, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size, - int *remainsp) { +void test_produce_msgs2_nowait(rd_kafka_t *rk, + const char *topic, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size, + int *remainsp) { rd_kafka_topic_t *rkt = test_create_topic_object(rk, topic, NULL); test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt, @@ -2205,11 +2267,16 @@ void test_produce_msgs2_nowait (rd_kafka_t *rk, const char *topic, /** * Produces \p cnt messages at \p msgs/s, and waits for succesful delivery */ -void test_produce_msgs_rate (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size, int msgrate) { - int remains = 0; +void test_produce_msgs_rate(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size, + int msgrate) { + int remains = 0; test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt, payload, size, msgrate, &remains); @@ -2223,16 +2290,18 @@ void test_produce_msgs_rate (rd_kafka_t *rk, rd_kafka_topic_t *rkt, * Create producer, produce \p msgcnt messages to \p topic \p partition, * destroy consumer, and returns the used testid. */ -uint64_t -test_produce_msgs_easy_size (const char *topic, uint64_t testid, - int32_t partition, int msgcnt, size_t size) { +uint64_t test_produce_msgs_easy_size(const char *topic, + uint64_t testid, + int32_t partition, + int msgcnt, + size_t size) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; test_timing_t t_produce; if (!testid) testid = test_id_generate(); - rk = test_create_producer(); + rk = test_create_producer(); rkt = test_create_producer_topic(rk, topic, NULL); TIMING_START(&t_produce, "PRODUCE"); @@ -2244,8 +2313,10 @@ test_produce_msgs_easy_size (const char *topic, uint64_t testid, return testid; } -rd_kafka_resp_err_t test_produce_sync (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition) { +rd_kafka_resp_err_t test_produce_sync(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition) { test_curr->produce_sync = 1; test_produce_msgs(rk, rkt, testid, partition, 0, 1, NULL, 0); test_curr->produce_sync = 0; @@ -2258,9 +2329,13 @@ rd_kafka_resp_err_t test_produce_sync (rd_kafka_t *rk, rd_kafka_topic_t *rkt, * * @param ... is a NULL-terminated list of key, value config property pairs. */ -void test_produce_msgs_easy_v (const char *topic, uint64_t testid, - int32_t partition, - int msg_base, int cnt, size_t size, ...) { +void test_produce_msgs_easy_v(const char *topic, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + size_t size, + ...) { rd_kafka_conf_t *conf; rd_kafka_t *p; rd_kafka_topic_t *rkt; @@ -2299,7 +2374,7 @@ void test_produce_msgs_easy_v (const char *topic, uint64_t testid, * * End with a NULL topic */ -void test_produce_msgs_easy_multi (uint64_t testid, ...) { +void test_produce_msgs_easy_multi(uint64_t testid, ...) { rd_kafka_conf_t *conf; rd_kafka_t *p; va_list ap; @@ -2315,21 +2390,20 @@ void test_produce_msgs_easy_multi (uint64_t testid, ...) { va_start(ap, testid); while ((topic = va_arg(ap, const char *))) { int32_t partition = va_arg(ap, int32_t); - int msg_base = va_arg(ap, int); - int msg_cnt = va_arg(ap, int); + int msg_base = va_arg(ap, int); + int msg_cnt = va_arg(ap, int); rd_kafka_topic_t *rkt; rkt = test_create_producer_topic(p, topic, NULL); - test_produce_msgs_nowait(p, rkt, testid, partition, - msg_base, msg_cnt, - NULL, 0, 0, &msgcounter); + test_produce_msgs_nowait(p, rkt, testid, partition, msg_base, + msg_cnt, NULL, 0, 0, &msgcounter); rd_kafka_topic_destroy(rkt); } va_end(ap); - test_flush(p, tmout_multip(10*1000)); + test_flush(p, tmout_multip(10 * 1000)); rd_kafka_destroy(p); } @@ -2339,16 +2413,15 @@ void test_produce_msgs_easy_multi (uint64_t testid, ...) { /** * @brief A standard incremental rebalance callback. */ -void test_incremental_rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { +void test_incremental_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { TEST_SAY("%s: incremental rebalance: %s: %d partition(s)%s\n", rd_kafka_name(rk), rd_kafka_err2name(err), parts->cnt, - rd_kafka_assignment_lost(rk) ? ", assignment lost": ""); + rd_kafka_assignment_lost(rk) ? ", assignment lost" : ""); - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: test_consumer_incremental_assign("rebalance_cb", rk, parts); break; @@ -2365,21 +2438,20 @@ void test_incremental_rebalance_cb (rd_kafka_t *rk, /** * @brief A standard rebalance callback. */ -void test_rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { +void test_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) { test_incremental_rebalance_cb(rk, err, parts, opaque); return; } - TEST_SAY("%s: Rebalance: %s: %d partition(s)\n", - rd_kafka_name(rk), rd_kafka_err2name(err), parts->cnt); + TEST_SAY("%s: Rebalance: %s: %d partition(s)\n", rd_kafka_name(rk), + rd_kafka_err2name(err), parts->cnt); - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: test_consumer_assign("assign", rk, parts); break; @@ -2395,97 +2467,100 @@ void test_rebalance_cb (rd_kafka_t *rk, -rd_kafka_t *test_create_consumer (const char *group_id, - void (*rebalance_cb) ( - rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t - *partitions, - void *opaque), - rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *default_topic_conf) { - rd_kafka_t *rk; - char tmp[64]; +rd_kafka_t *test_create_consumer( + const char *group_id, + void (*rebalance_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque), + rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *default_topic_conf) { + rd_kafka_t *rk; + char tmp[64]; - if (!conf) - test_conf_init(&conf, NULL, 0); + if (!conf) + test_conf_init(&conf, NULL, 0); if (group_id) { - test_conf_set(conf, "group.id", group_id); + test_conf_set(conf, "group.id", group_id); - rd_snprintf(tmp, sizeof(tmp), "%d", test_session_timeout_ms); - test_conf_set(conf, "session.timeout.ms", tmp); + rd_snprintf(tmp, sizeof(tmp), "%d", test_session_timeout_ms); + test_conf_set(conf, "session.timeout.ms", tmp); - if (rebalance_cb) - rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); - } else { - TEST_ASSERT(!rebalance_cb); - } + if (rebalance_cb) + rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); + } else { + TEST_ASSERT(!rebalance_cb); + } if (default_topic_conf) rd_kafka_conf_set_default_topic_conf(conf, default_topic_conf); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); - if (group_id) - rd_kafka_poll_set_consumer(rk); + if (group_id) + rd_kafka_poll_set_consumer(rk); - return rk; + return rk; } -rd_kafka_topic_t *test_create_consumer_topic (rd_kafka_t *rk, - const char *topic) { - rd_kafka_topic_t *rkt; - rd_kafka_topic_conf_t *topic_conf; +rd_kafka_topic_t *test_create_consumer_topic(rd_kafka_t *rk, + const char *topic) { + rd_kafka_topic_t *rkt; + rd_kafka_topic_conf_t *topic_conf; - test_conf_init(NULL, &topic_conf, 0); + test_conf_init(NULL, &topic_conf, 0); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_last_error())); - return rkt; + return rkt; } -void test_consumer_start (const char *what, - rd_kafka_topic_t *rkt, int32_t partition, - int64_t start_offset) { +void test_consumer_start(const char *what, + rd_kafka_topic_t *rkt, + int32_t partition, + int64_t start_offset) { - TEST_SAY("%s: consumer_start: %s [%"PRId32"] at offset %"PRId64"\n", - what, rd_kafka_topic_name(rkt), partition, start_offset); + TEST_SAY("%s: consumer_start: %s [%" PRId32 "] at offset %" PRId64 "\n", + what, rd_kafka_topic_name(rkt), partition, start_offset); - if (rd_kafka_consume_start(rkt, partition, start_offset) == -1) - TEST_FAIL("%s: consume_start failed: %s\n", - what, rd_kafka_err2str(rd_kafka_last_error())); + if (rd_kafka_consume_start(rkt, partition, start_offset) == -1) + TEST_FAIL("%s: consume_start failed: %s\n", what, + rd_kafka_err2str(rd_kafka_last_error())); } -void test_consumer_stop (const char *what, - rd_kafka_topic_t *rkt, int32_t partition) { +void test_consumer_stop(const char *what, + rd_kafka_topic_t *rkt, + int32_t partition) { - TEST_SAY("%s: consumer_stop: %s [%"PRId32"]\n", - what, rd_kafka_topic_name(rkt), partition); + TEST_SAY("%s: consumer_stop: %s [%" PRId32 "]\n", what, + rd_kafka_topic_name(rkt), partition); - if (rd_kafka_consume_stop(rkt, partition) == -1) - TEST_FAIL("%s: consume_stop failed: %s\n", - what, rd_kafka_err2str(rd_kafka_last_error())); + if (rd_kafka_consume_stop(rkt, partition) == -1) + TEST_FAIL("%s: consume_stop failed: %s\n", what, + rd_kafka_err2str(rd_kafka_last_error())); } -void test_consumer_seek (const char *what, rd_kafka_topic_t *rkt, - int32_t partition, int64_t offset) { - int err; +void test_consumer_seek(const char *what, + rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset) { + int err; - TEST_SAY("%s: consumer_seek: %s [%"PRId32"] to offset %"PRId64"\n", - what, rd_kafka_topic_name(rkt), partition, offset); + TEST_SAY("%s: consumer_seek: %s [%" PRId32 "] to offset %" PRId64 "\n", + what, rd_kafka_topic_name(rkt), partition, offset); - if ((err = rd_kafka_seek(rkt, partition, offset, 2000))) - TEST_FAIL("%s: consume_seek(%s, %"PRId32", %"PRId64") " - "failed: %s\n", - what, - rd_kafka_topic_name(rkt), partition, offset, - rd_kafka_err2str(err)); + if ((err = rd_kafka_seek(rkt, partition, offset, 2000))) + TEST_FAIL("%s: consume_seek(%s, %" PRId32 ", %" PRId64 + ") " + "failed: %s\n", + what, rd_kafka_topic_name(rkt), partition, offset, + rd_kafka_err2str(err)); } @@ -2493,112 +2568,122 @@ void test_consumer_seek (const char *what, rd_kafka_topic_t *rkt, /** * Returns offset of the last message consumed */ -int64_t test_consume_msgs (const char *what, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, int64_t offset, - int exp_msg_base, int exp_cnt, int parse_fmt) { - int cnt = 0; - int msg_next = exp_msg_base; - int fails = 0; - int64_t offset_last = -1; - int64_t tot_bytes = 0; - test_timing_t t_first, t_all; - - TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: expect msg #%d..%d " - "at offset %"PRId64"\n", - what, rd_kafka_topic_name(rkt), partition, - exp_msg_base, exp_msg_base+exp_cnt, offset); - - if (offset != TEST_NO_SEEK) { - rd_kafka_resp_err_t err; - test_timing_t t_seek; - - TIMING_START(&t_seek, "SEEK"); - if ((err = rd_kafka_seek(rkt, partition, offset, 5000))) - TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: " - "seek to %"PRId64" failed: %s\n", - what, rd_kafka_topic_name(rkt), partition, - offset, rd_kafka_err2str(err)); - TIMING_STOP(&t_seek); - TEST_SAY("%s: seeked to offset %"PRId64"\n", what, offset); - } - - TIMING_START(&t_first, "FIRST MSG"); - TIMING_START(&t_all, "ALL MSGS"); - - while (cnt < exp_cnt) { - rd_kafka_message_t *rkmessage; - int msg_id; - - rkmessage = rd_kafka_consume(rkt, partition, - tmout_multip(5000)); - - if (TIMING_EVERY(&t_all, 3*1000000)) - TEST_SAY("%s: " - "consumed %3d%%: %d/%d messages " - "(%d msgs/s, %d bytes/s)\n", - what, cnt * 100 / exp_cnt, cnt, exp_cnt, - (int)(cnt / - (TIMING_DURATION(&t_all) / 1000000)), - (int)(tot_bytes / - (TIMING_DURATION(&t_all) / 1000000))); - - if (!rkmessage) - TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: " - "expected msg #%d (%d/%d): timed out\n", - what, rd_kafka_topic_name(rkt), partition, - msg_next, cnt, exp_cnt); - - if (rkmessage->err) - TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: " - "expected msg #%d (%d/%d): got error: %s\n", - what, rd_kafka_topic_name(rkt), partition, - msg_next, cnt, exp_cnt, - rd_kafka_err2str(rkmessage->err)); - - if (cnt == 0) - TIMING_STOP(&t_first); - - if (parse_fmt) - test_msg_parse(testid, rkmessage, partition, &msg_id); - else - msg_id = 0; - - if (test_level >= 3) - TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: " - "got msg #%d at offset %"PRId64 - " (expect #%d at offset %"PRId64")\n", - what, rd_kafka_topic_name(rkt), partition, - msg_id, rkmessage->offset, - msg_next, - offset >= 0 ? offset + cnt : -1); - - if (parse_fmt && msg_id != msg_next) { - TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: " - "expected msg #%d (%d/%d): got msg #%d\n", - what, rd_kafka_topic_name(rkt), partition, - msg_next, cnt, exp_cnt, msg_id); - fails++; - } - - cnt++; - tot_bytes += rkmessage->len; - msg_next++; - offset_last = rkmessage->offset; - - rd_kafka_message_destroy(rkmessage); - } - - TIMING_STOP(&t_all); - - if (fails) - TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: %d failures\n", - what, rd_kafka_topic_name(rkt), partition, fails); - - TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: " - "%d/%d messages consumed succesfully\n", - what, rd_kafka_topic_name(rkt), partition, - cnt, exp_cnt); - return offset_last; +int64_t test_consume_msgs(const char *what, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int64_t offset, + int exp_msg_base, + int exp_cnt, + int parse_fmt) { + int cnt = 0; + int msg_next = exp_msg_base; + int fails = 0; + int64_t offset_last = -1; + int64_t tot_bytes = 0; + test_timing_t t_first, t_all; + + TEST_SAY("%s: consume_msgs: %s [%" PRId32 + "]: expect msg #%d..%d " + "at offset %" PRId64 "\n", + what, rd_kafka_topic_name(rkt), partition, exp_msg_base, + exp_msg_base + exp_cnt, offset); + + if (offset != TEST_NO_SEEK) { + rd_kafka_resp_err_t err; + test_timing_t t_seek; + + TIMING_START(&t_seek, "SEEK"); + if ((err = rd_kafka_seek(rkt, partition, offset, 5000))) + TEST_FAIL("%s: consume_msgs: %s [%" PRId32 + "]: " + "seek to %" PRId64 " failed: %s\n", + what, rd_kafka_topic_name(rkt), partition, + offset, rd_kafka_err2str(err)); + TIMING_STOP(&t_seek); + TEST_SAY("%s: seeked to offset %" PRId64 "\n", what, offset); + } + + TIMING_START(&t_first, "FIRST MSG"); + TIMING_START(&t_all, "ALL MSGS"); + + while (cnt < exp_cnt) { + rd_kafka_message_t *rkmessage; + int msg_id; + + rkmessage = + rd_kafka_consume(rkt, partition, tmout_multip(5000)); + + if (TIMING_EVERY(&t_all, 3 * 1000000)) + TEST_SAY( + "%s: " + "consumed %3d%%: %d/%d messages " + "(%d msgs/s, %d bytes/s)\n", + what, cnt * 100 / exp_cnt, cnt, exp_cnt, + (int)(cnt / (TIMING_DURATION(&t_all) / 1000000)), + (int)(tot_bytes / + (TIMING_DURATION(&t_all) / 1000000))); + + if (!rkmessage) + TEST_FAIL("%s: consume_msgs: %s [%" PRId32 + "]: " + "expected msg #%d (%d/%d): timed out\n", + what, rd_kafka_topic_name(rkt), partition, + msg_next, cnt, exp_cnt); + + if (rkmessage->err) + TEST_FAIL("%s: consume_msgs: %s [%" PRId32 + "]: " + "expected msg #%d (%d/%d): got error: %s\n", + what, rd_kafka_topic_name(rkt), partition, + msg_next, cnt, exp_cnt, + rd_kafka_err2str(rkmessage->err)); + + if (cnt == 0) + TIMING_STOP(&t_first); + + if (parse_fmt) + test_msg_parse(testid, rkmessage, partition, &msg_id); + else + msg_id = 0; + + if (test_level >= 3) + TEST_SAY("%s: consume_msgs: %s [%" PRId32 + "]: " + "got msg #%d at offset %" PRId64 + " (expect #%d at offset %" PRId64 ")\n", + what, rd_kafka_topic_name(rkt), partition, + msg_id, rkmessage->offset, msg_next, + offset >= 0 ? offset + cnt : -1); + + if (parse_fmt && msg_id != msg_next) { + TEST_SAY("%s: consume_msgs: %s [%" PRId32 + "]: " + "expected msg #%d (%d/%d): got msg #%d\n", + what, rd_kafka_topic_name(rkt), partition, + msg_next, cnt, exp_cnt, msg_id); + fails++; + } + + cnt++; + tot_bytes += rkmessage->len; + msg_next++; + offset_last = rkmessage->offset; + + rd_kafka_message_destroy(rkmessage); + } + + TIMING_STOP(&t_all); + + if (fails) + TEST_FAIL("%s: consume_msgs: %s [%" PRId32 "]: %d failures\n", + what, rd_kafka_topic_name(rkt), partition, fails); + + TEST_SAY("%s: consume_msgs: %s [%" PRId32 + "]: " + "%d/%d messages consumed succesfully\n", + what, rd_kafka_topic_name(rkt), partition, cnt, exp_cnt); + return offset_last; } @@ -2613,13 +2698,15 @@ int64_t test_consume_msgs (const char *what, rd_kafka_topic_t *rkt, * * If \p group_id is NULL a new unique group is generated */ -void -test_consume_msgs_easy_mv0 (const char *group_id, const char *topic, - rd_bool_t txn, - int32_t partition, - uint64_t testid, int exp_eofcnt, int exp_msgcnt, - rd_kafka_topic_conf_t *tconf, - test_msgver_t *mv) { +void test_consume_msgs_easy_mv0(const char *group_id, + const char *topic, + rd_bool_t txn, + int32_t partition, + uint64_t testid, + int exp_eofcnt, + int exp_msgcnt, + rd_kafka_topic_conf_t *tconf, + test_msgver_t *mv) { rd_kafka_t *rk; char grpid0[64]; rd_kafka_conf_t *conf; @@ -2640,16 +2727,18 @@ test_consume_msgs_easy_mv0 (const char *group_id, const char *topic, rd_kafka_poll_set_consumer(rk); if (partition == -1) { - TEST_SAY("Subscribing to topic %s in group %s " - "(expecting %d msgs with testid %"PRIu64")\n", - topic, group_id, exp_msgcnt, testid); + TEST_SAY( + "Subscribing to topic %s in group %s " + "(expecting %d msgs with testid %" PRIu64 ")\n", + topic, group_id, exp_msgcnt, testid); test_consumer_subscribe(rk, topic); } else { rd_kafka_topic_partition_list_t *plist; - TEST_SAY("Assign topic %s [%"PRId32"] in group %s " - "(expecting %d msgs with testid %"PRIu64")\n", + TEST_SAY("Assign topic %s [%" PRId32 + "] in group %s " + "(expecting %d msgs with testid %" PRIu64 ")\n", topic, partition, group_id, exp_msgcnt, testid); plist = rd_kafka_topic_partition_list_new(1); @@ -2659,18 +2748,20 @@ test_consume_msgs_easy_mv0 (const char *group_id, const char *topic, } /* Consume messages */ - test_consumer_poll("consume.easy", rk, testid, exp_eofcnt, - -1, exp_msgcnt, mv); + test_consumer_poll("consume.easy", rk, testid, exp_eofcnt, -1, + exp_msgcnt, mv); test_consumer_close(rk); rd_kafka_destroy(rk); } -void -test_consume_msgs_easy (const char *group_id, const char *topic, - uint64_t testid, int exp_eofcnt, int exp_msgcnt, - rd_kafka_topic_conf_t *tconf) { +void test_consume_msgs_easy(const char *group_id, + const char *topic, + uint64_t testid, + int exp_eofcnt, + int exp_msgcnt, + rd_kafka_topic_conf_t *tconf) { test_msgver_t mv; test_msgver_init(&mv, testid); @@ -2682,17 +2773,18 @@ test_consume_msgs_easy (const char *group_id, const char *topic, } -void -test_consume_txn_msgs_easy (const char *group_id, const char *topic, - uint64_t testid, int exp_eofcnt, int exp_msgcnt, - rd_kafka_topic_conf_t *tconf) { +void test_consume_txn_msgs_easy(const char *group_id, + const char *topic, + uint64_t testid, + int exp_eofcnt, + int exp_msgcnt, + rd_kafka_topic_conf_t *tconf) { test_msgver_t mv; test_msgver_init(&mv, testid); - test_consume_msgs_easy_mv0(group_id, topic, rd_true/*txn*/, - -1, testid, exp_eofcnt, - exp_msgcnt, tconf, &mv); + test_consume_msgs_easy_mv0(group_id, topic, rd_true /*txn*/, -1, testid, + exp_eofcnt, exp_msgcnt, tconf, &mv); test_msgver_clear(&mv); } @@ -2705,7 +2797,7 @@ test_consume_txn_msgs_easy (const char *group_id, const char *topic, * @warning This method will poll the consumer and might thus read messages. * Set \p do_poll to false to use a sleep rather than poll. */ -void test_consumer_wait_assignment (rd_kafka_t *rk, rd_bool_t do_poll) { +void test_consumer_wait_assignment(rd_kafka_t *rk, rd_bool_t do_poll) { rd_kafka_topic_partition_list_t *assignment = NULL; int i; @@ -2724,14 +2816,13 @@ void test_consumer_wait_assignment (rd_kafka_t *rk, rd_bool_t do_poll) { if (do_poll) test_consumer_poll_once(rk, NULL, 1000); else - rd_usleep(1000*1000, NULL); + rd_usleep(1000 * 1000, NULL); } - TEST_SAY("%s: Assignment (%d partition(s)): ", - rd_kafka_name(rk), assignment->cnt); - for (i = 0 ; i < assignment->cnt ; i++) - TEST_SAY0("%s%s[%"PRId32"]", - i == 0 ? "" : ", ", + TEST_SAY("%s: Assignment (%d partition(s)): ", rd_kafka_name(rk), + assignment->cnt); + for (i = 0; i < assignment->cnt; i++) + TEST_SAY0("%s%s[%" PRId32 "]", i == 0 ? "" : ", ", assignment->elems[i].topic, assignment->elems[i].partition); TEST_SAY0("\n"); @@ -2748,9 +2839,11 @@ void test_consumer_wait_assignment (rd_kafka_t *rk, rd_bool_t do_poll) { * * Fails the test on mismatch, unless \p fail_immediately is false. */ -void test_consumer_verify_assignment0 (const char *func, int line, - rd_kafka_t *rk, - int fail_immediately, ...) { +void test_consumer_verify_assignment0(const char *func, + int line, + rd_kafka_t *rk, + int fail_immediately, + ...) { va_list ap; int cnt = 0; const char *topic; @@ -2759,14 +2852,13 @@ void test_consumer_verify_assignment0 (const char *func, int line, int i; if ((err = rd_kafka_assignment(rk, &assignment))) - TEST_FAIL("%s:%d: Failed to get assignment for %s: %s", - func, line, rd_kafka_name(rk), rd_kafka_err2str(err)); + TEST_FAIL("%s:%d: Failed to get assignment for %s: %s", func, + line, rd_kafka_name(rk), rd_kafka_err2str(err)); TEST_SAY("%s assignment (%d partition(s)):\n", rd_kafka_name(rk), assignment->cnt); - for (i = 0 ; i < assignment->cnt ; i++) - TEST_SAY(" %s [%"PRId32"]\n", - assignment->elems[i].topic, + for (i = 0; i < assignment->cnt; i++) + TEST_SAY(" %s [%" PRId32 "]\n", assignment->elems[i].topic, assignment->elems[i].partition); va_start(ap, fail_immediately); @@ -2774,22 +2866,21 @@ void test_consumer_verify_assignment0 (const char *func, int line, int partition = va_arg(ap, int); cnt++; - if (!rd_kafka_topic_partition_list_find(assignment, - topic, partition)) + if (!rd_kafka_topic_partition_list_find(assignment, topic, + partition)) TEST_FAIL_LATER( - "%s:%d: Expected %s [%d] not found in %s's " - "assignment (%d partition(s))", - func, line, - topic, partition, rd_kafka_name(rk), - assignment->cnt); + "%s:%d: Expected %s [%d] not found in %s's " + "assignment (%d partition(s))", + func, line, topic, partition, rd_kafka_name(rk), + assignment->cnt); } va_end(ap); if (cnt != assignment->cnt) TEST_FAIL_LATER( - "%s:%d: " - "Expected %d assigned partition(s) for %s, not %d", - func, line, cnt, rd_kafka_name(rk), assignment->cnt); + "%s:%d: " + "Expected %d assigned partition(s) for %s, not %d", + func, line, cnt, rd_kafka_name(rk), assignment->cnt); if (fail_immediately) TEST_LATER_CHECK(); @@ -2799,18 +2890,15 @@ void test_consumer_verify_assignment0 (const char *func, int line, - - /** * @brief Start subscribing for 'topic' */ -void test_consumer_subscribe (rd_kafka_t *rk, const char *topic) { +void test_consumer_subscribe(rd_kafka_t *rk, const char *topic) { rd_kafka_topic_partition_list_t *topics; - rd_kafka_resp_err_t err; + rd_kafka_resp_err_t err; - topics = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(topics, topic, - RD_KAFKA_PARTITION_UA); + topics = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA); err = rd_kafka_subscribe(rk, topics); if (err) @@ -2821,8 +2909,9 @@ void test_consumer_subscribe (rd_kafka_t *rk, const char *topic) { } -void test_consumer_assign (const char *what, rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions) { +void test_consumer_assign(const char *what, + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions) { rd_kafka_resp_err_t err; test_timing_t timing; @@ -2830,17 +2919,18 @@ void test_consumer_assign (const char *what, rd_kafka_t *rk, err = rd_kafka_assign(rk, partitions); TIMING_STOP(&timing); if (err) - TEST_FAIL("%s: failed to assign %d partition(s): %s\n", - what, partitions->cnt, rd_kafka_err2str(err)); + TEST_FAIL("%s: failed to assign %d partition(s): %s\n", what, + partitions->cnt, rd_kafka_err2str(err)); else - TEST_SAY("%s: assigned %d partition(s)\n", - what, partitions->cnt); + TEST_SAY("%s: assigned %d partition(s)\n", what, + partitions->cnt); } -void test_consumer_incremental_assign (const char *what, rd_kafka_t *rk, - rd_kafka_topic_partition_list_t - *partitions) { +void test_consumer_incremental_assign( + const char *what, + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions) { rd_kafka_error_t *error; test_timing_t timing; @@ -2848,9 +2938,10 @@ void test_consumer_incremental_assign (const char *what, rd_kafka_t *rk, error = rd_kafka_incremental_assign(rk, partitions); TIMING_STOP(&timing); if (error) { - TEST_FAIL("%s: incremental assign of %d partition(s) failed: " - "%s", what, partitions->cnt, - rd_kafka_error_string(error)); + TEST_FAIL( + "%s: incremental assign of %d partition(s) failed: " + "%s", + what, partitions->cnt, rd_kafka_error_string(error)); rd_kafka_error_destroy(error); } else TEST_SAY("%s: incremental assign of %d partition(s) done\n", @@ -2858,7 +2949,7 @@ void test_consumer_incremental_assign (const char *what, rd_kafka_t *rk, } -void test_consumer_unassign (const char *what, rd_kafka_t *rk) { +void test_consumer_unassign(const char *what, rd_kafka_t *rk) { rd_kafka_resp_err_t err; test_timing_t timing; @@ -2873,9 +2964,10 @@ void test_consumer_unassign (const char *what, rd_kafka_t *rk) { } -void test_consumer_incremental_unassign (const char *what, rd_kafka_t *rk, - rd_kafka_topic_partition_list_t - *partitions) { +void test_consumer_incremental_unassign( + const char *what, + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions) { rd_kafka_error_t *error; test_timing_t timing; @@ -2883,9 +2975,10 @@ void test_consumer_incremental_unassign (const char *what, rd_kafka_t *rk, error = rd_kafka_incremental_unassign(rk, partitions); TIMING_STOP(&timing); if (error) { - TEST_FAIL("%s: incremental unassign of %d partition(s) " - "failed: %s", what, partitions->cnt, - rd_kafka_error_string(error)); + TEST_FAIL( + "%s: incremental unassign of %d partition(s) " + "failed: %s", + what, partitions->cnt, rd_kafka_error_string(error)); rd_kafka_error_destroy(error); } else TEST_SAY("%s: incremental unassign of %d partition(s) done\n", @@ -2896,14 +2989,16 @@ void test_consumer_incremental_unassign (const char *what, rd_kafka_t *rk, /** * @brief Assign a single partition with an optional starting offset */ -void test_consumer_assign_partition (const char *what, rd_kafka_t *rk, - const char *topic, int32_t partition, - int64_t offset) { +void test_consumer_assign_partition(const char *what, + rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t offset) { rd_kafka_topic_partition_list_t *part; part = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(part, topic, partition)->offset = - offset; + offset; test_consumer_assign(what, rk, part); @@ -2911,9 +3006,10 @@ void test_consumer_assign_partition (const char *what, rd_kafka_t *rk, } -void test_consumer_pause_resume_partition (rd_kafka_t *rk, - const char *topic, int32_t partition, - rd_bool_t pause) { +void test_consumer_pause_resume_partition(rd_kafka_t *rk, + const char *topic, + int32_t partition, + rd_bool_t pause) { rd_kafka_topic_partition_list_t *part; rd_kafka_resp_err_t err; @@ -2925,9 +3021,8 @@ void test_consumer_pause_resume_partition (rd_kafka_t *rk, else err = rd_kafka_resume_partitions(rk, part); - TEST_ASSERT(!err, "Failed to %s %s [%"PRId32"]: %s", - pause ? "pause":"resume", - topic, partition, + TEST_ASSERT(!err, "Failed to %s %s [%" PRId32 "]: %s", + pause ? "pause" : "resume", topic, partition, rd_kafka_err2str(err)); rd_kafka_topic_partition_list_destroy(part); @@ -2939,95 +3034,99 @@ void test_consumer_pause_resume_partition (rd_kafka_t *rk, * */ -void test_msgver_init (test_msgver_t *mv, uint64_t testid) { - memset(mv, 0, sizeof(*mv)); - mv->testid = testid; - /* Max warning logs before suppressing. */ - mv->log_max = (test_level + 1) * 100; +void test_msgver_init(test_msgver_t *mv, uint64_t testid) { + memset(mv, 0, sizeof(*mv)); + mv->testid = testid; + /* Max warning logs before suppressing. */ + mv->log_max = (test_level + 1) * 100; } -void test_msgver_ignore_eof (test_msgver_t *mv) { +void test_msgver_ignore_eof(test_msgver_t *mv) { mv->ignore_eof = rd_true; } -#define TEST_MV_WARN(mv,...) do { \ - if ((mv)->log_cnt++ > (mv)->log_max) \ - (mv)->log_suppr_cnt++; \ - else \ - TEST_WARN(__VA_ARGS__); \ - } while (0) - +#define TEST_MV_WARN(mv, ...) \ + do { \ + if ((mv)->log_cnt++ > (mv)->log_max) \ + (mv)->log_suppr_cnt++; \ + else \ + TEST_WARN(__VA_ARGS__); \ + } while (0) + -static void test_mv_mvec_grow (struct test_mv_mvec *mvec, int tot_size) { - if (tot_size <= mvec->size) - return; - mvec->size = tot_size; - mvec->m = realloc(mvec->m, sizeof(*mvec->m) * mvec->size); +static void test_mv_mvec_grow(struct test_mv_mvec *mvec, int tot_size) { + if (tot_size <= mvec->size) + return; + mvec->size = tot_size; + mvec->m = realloc(mvec->m, sizeof(*mvec->m) * mvec->size); } /** * Make sure there is room for at least \p cnt messages, else grow mvec. */ -static void test_mv_mvec_reserve (struct test_mv_mvec *mvec, int cnt) { - test_mv_mvec_grow(mvec, mvec->cnt + cnt); +static void test_mv_mvec_reserve(struct test_mv_mvec *mvec, int cnt) { + test_mv_mvec_grow(mvec, mvec->cnt + cnt); } -void test_mv_mvec_init (struct test_mv_mvec *mvec, int exp_cnt) { - TEST_ASSERT(mvec->m == NULL, "mvec not cleared"); +void test_mv_mvec_init(struct test_mv_mvec *mvec, int exp_cnt) { + TEST_ASSERT(mvec->m == NULL, "mvec not cleared"); - if (!exp_cnt) - return; + if (!exp_cnt) + return; - test_mv_mvec_grow(mvec, exp_cnt); + test_mv_mvec_grow(mvec, exp_cnt); } -void test_mv_mvec_clear (struct test_mv_mvec *mvec) { - if (mvec->m) - free(mvec->m); +void test_mv_mvec_clear(struct test_mv_mvec *mvec) { + if (mvec->m) + free(mvec->m); } -void test_msgver_clear (test_msgver_t *mv) { - int i; - for (i = 0 ; i < mv->p_cnt ; i++) { - struct test_mv_p *p = mv->p[i]; - free(p->topic); - test_mv_mvec_clear(&p->mvec); - free(p); - } +void test_msgver_clear(test_msgver_t *mv) { + int i; + for (i = 0; i < mv->p_cnt; i++) { + struct test_mv_p *p = mv->p[i]; + free(p->topic); + test_mv_mvec_clear(&p->mvec); + free(p); + } - free(mv->p); + free(mv->p); - test_msgver_init(mv, mv->testid); + test_msgver_init(mv, mv->testid); } -struct test_mv_p *test_msgver_p_get (test_msgver_t *mv, const char *topic, - int32_t partition, int do_create) { - int i; - struct test_mv_p *p; +struct test_mv_p *test_msgver_p_get(test_msgver_t *mv, + const char *topic, + int32_t partition, + int do_create) { + int i; + struct test_mv_p *p; - for (i = 0 ; i < mv->p_cnt ; i++) { - p = mv->p[i]; - if (p->partition == partition && !strcmp(p->topic, topic)) - return p; - } + for (i = 0; i < mv->p_cnt; i++) { + p = mv->p[i]; + if (p->partition == partition && !strcmp(p->topic, topic)) + return p; + } - if (!do_create) - TEST_FAIL("Topic %s [%d] not found in msgver", topic, partition); + if (!do_create) + TEST_FAIL("Topic %s [%d] not found in msgver", topic, + partition); - if (mv->p_cnt == mv->p_size) { - mv->p_size = (mv->p_size + 4) * 2; - mv->p = realloc(mv->p, sizeof(*mv->p) * mv->p_size); - } + if (mv->p_cnt == mv->p_size) { + mv->p_size = (mv->p_size + 4) * 2; + mv->p = realloc(mv->p, sizeof(*mv->p) * mv->p_size); + } - mv->p[mv->p_cnt++] = p = calloc(1, sizeof(*p)); + mv->p[mv->p_cnt++] = p = calloc(1, sizeof(*p)); - p->topic = rd_strdup(topic); - p->partition = partition; - p->eof_offset = RD_KAFKA_OFFSET_INVALID; + p->topic = rd_strdup(topic); + p->partition = partition; + p->eof_offset = RD_KAFKA_OFFSET_INVALID; - return p; + return p; } @@ -3035,34 +3134,34 @@ struct test_mv_p *test_msgver_p_get (test_msgver_t *mv, const char *topic, * Add (room for) message to message vector. * Resizes the vector as needed. */ -static struct test_mv_m *test_mv_mvec_add (struct test_mv_mvec *mvec) { - if (mvec->cnt == mvec->size) { - test_mv_mvec_grow(mvec, (mvec->size ? mvec->size * 2 : 10000)); - } +static struct test_mv_m *test_mv_mvec_add(struct test_mv_mvec *mvec) { + if (mvec->cnt == mvec->size) { + test_mv_mvec_grow(mvec, (mvec->size ? mvec->size * 2 : 10000)); + } - mvec->cnt++; + mvec->cnt++; - return &mvec->m[mvec->cnt-1]; + return &mvec->m[mvec->cnt - 1]; } /** * Returns message at index \p mi */ -static RD_INLINE struct test_mv_m *test_mv_mvec_get (struct test_mv_mvec *mvec, - int mi) { +static RD_INLINE struct test_mv_m *test_mv_mvec_get(struct test_mv_mvec *mvec, + int mi) { if (mi >= mvec->cnt) return NULL; - return &mvec->m[mi]; + return &mvec->m[mi]; } /** * @returns the message with msgid \p msgid, or NULL. */ -static struct test_mv_m *test_mv_mvec_find_by_msgid (struct test_mv_mvec *mvec, - int msgid) { +static struct test_mv_m *test_mv_mvec_find_by_msgid(struct test_mv_mvec *mvec, + int msgid) { int mi; - for (mi = 0 ; mi < mvec->cnt ; mi++) + for (mi = 0; mi < mvec->cnt; mi++) if (mvec->m[mi].msgid == msgid) return &mvec->m[mi]; @@ -3073,22 +3172,21 @@ static struct test_mv_m *test_mv_mvec_find_by_msgid (struct test_mv_mvec *mvec, /** * Print message list to \p fp */ -static RD_UNUSED -void test_mv_mvec_dump (FILE *fp, const struct test_mv_mvec *mvec) { - int mi; - - fprintf(fp, "*** Dump mvec with %d messages (capacity %d): ***\n", - mvec->cnt, mvec->size); - for (mi = 0 ; mi < mvec->cnt ; mi++) - fprintf(fp, " msgid %d, offset %"PRId64"\n", - mvec->m[mi].msgid, mvec->m[mi].offset); - fprintf(fp, "*** Done ***\n"); +static RD_UNUSED void test_mv_mvec_dump(FILE *fp, + const struct test_mv_mvec *mvec) { + int mi; + fprintf(fp, "*** Dump mvec with %d messages (capacity %d): ***\n", + mvec->cnt, mvec->size); + for (mi = 0; mi < mvec->cnt; mi++) + fprintf(fp, " msgid %d, offset %" PRId64 "\n", + mvec->m[mi].msgid, mvec->m[mi].offset); + fprintf(fp, "*** Done ***\n"); } -static void test_mv_mvec_sort (struct test_mv_mvec *mvec, - int (*cmp) (const void *, const void *)) { - qsort(mvec->m, mvec->cnt, sizeof(*mvec->m), cmp); +static void test_mv_mvec_sort(struct test_mv_mvec *mvec, + int (*cmp)(const void *, const void *)) { + qsort(mvec->m, mvec->cnt, sizeof(*mvec->m), cmp); } @@ -3097,24 +3195,31 @@ static void test_mv_mvec_sort (struct test_mv_mvec *mvec, * * @returns 1 if message is from the expected testid, else 0 (not added) */ -int test_msgver_add_msg00 (const char *func, int line, const char *clientname, - test_msgver_t *mv, - uint64_t testid, - const char *topic, int32_t partition, - int64_t offset, int64_t timestamp, int32_t broker_id, - rd_kafka_resp_err_t err, int msgnum) { +int test_msgver_add_msg00(const char *func, + int line, + const char *clientname, + test_msgver_t *mv, + uint64_t testid, + const char *topic, + int32_t partition, + int64_t offset, + int64_t timestamp, + int32_t broker_id, + rd_kafka_resp_err_t err, + int msgnum) { struct test_mv_p *p; struct test_mv_m *m; if (testid != mv->testid) { - TEST_SAYL(3, "%s:%d: %s: mismatching testid %"PRIu64 - " != %"PRIu64"\n", + TEST_SAYL(3, + "%s:%d: %s: mismatching testid %" PRIu64 + " != %" PRIu64 "\n", func, line, clientname, testid, mv->testid); return 0; /* Ignore message */ } if (err == RD_KAFKA_RESP_ERR__PARTITION_EOF && mv->ignore_eof) { - TEST_SAYL(3, "%s:%d: %s: ignoring EOF for %s [%"PRId32"]\n", + TEST_SAYL(3, "%s:%d: %s: ignoring EOF for %s [%" PRId32 "]\n", func, line, clientname, topic, partition); return 0; /* Ignore message */ } @@ -3128,18 +3233,19 @@ int test_msgver_add_msg00 (const char *func, int line, const char *clientname, m = test_mv_mvec_add(&p->mvec); - m->offset = offset; - m->msgid = msgnum; + m->offset = offset; + m->msgid = msgnum; m->timestamp = timestamp; m->broker_id = broker_id; if (test_level > 2) { - TEST_SAY("%s:%d: %s: " - "Recv msg %s [%"PRId32"] offset %"PRId64" msgid %d " - "timestamp %"PRId64" broker %"PRId32"\n", - func, line, clientname, - p->topic, p->partition, m->offset, m->msgid, - m->timestamp, m->broker_id); + TEST_SAY( + "%s:%d: %s: " + "Recv msg %s [%" PRId32 "] offset %" PRId64 + " msgid %d " + "timestamp %" PRId64 " broker %" PRId32 "\n", + func, line, clientname, p->topic, p->partition, m->offset, + m->msgid, m->timestamp, m->broker_id); } mv->msgcnt++; @@ -3157,29 +3263,32 @@ int test_msgver_add_msg00 (const char *func, int line, const char *clientname, * * @returns 1 if message is from the expected testid, else 0 (not added). */ -int test_msgver_add_msg0 (const char *func, int line, const char *clientname, - test_msgver_t *mv, - const rd_kafka_message_t *rkmessage, - const char *override_topic) { - uint64_t in_testid; - int in_part; - int in_msgnum = -1; - char buf[128]; +int test_msgver_add_msg0(const char *func, + int line, + const char *clientname, + test_msgver_t *mv, + const rd_kafka_message_t *rkmessage, + const char *override_topic) { + uint64_t in_testid; + int in_part; + int in_msgnum = -1; + char buf[128]; const void *val; size_t valsize; if (mv->fwd) - test_msgver_add_msg0(func, line, clientname, - mv->fwd, rkmessage, override_topic); + test_msgver_add_msg0(func, line, clientname, mv->fwd, rkmessage, + override_topic); if (rd_kafka_message_status(rkmessage) == - RD_KAFKA_MSG_STATUS_NOT_PERSISTED && rkmessage->err) { - if (rkmessage->err != RD_KAFKA_RESP_ERR__PARTITION_EOF) - return 0; /* Ignore error */ + RD_KAFKA_MSG_STATUS_NOT_PERSISTED && + rkmessage->err) { + if (rkmessage->err != RD_KAFKA_RESP_ERR__PARTITION_EOF) + return 0; /* Ignore error */ - in_testid = mv->testid; + in_testid = mv->testid; - } else { + } else { if (!mv->msgid_hdr) { rd_snprintf(buf, sizeof(buf), "%.*s", @@ -3191,39 +3300,36 @@ int test_msgver_add_msg0 (const char *func, int line, const char *clientname, rd_kafka_headers_t *hdrs; if (rd_kafka_message_headers(rkmessage, &hdrs) || - rd_kafka_header_get_last(hdrs, mv->msgid_hdr, - &val, &valsize)) { + rd_kafka_header_get_last(hdrs, mv->msgid_hdr, &val, + &valsize)) { TEST_SAYL(3, "%s:%d: msgid expected in header %s " "but %s exists for " - "message at offset %"PRId64 + "message at offset %" PRId64 " has no headers\n", func, line, mv->msgid_hdr, - hdrs ? "no such header" : "no headers", + hdrs ? "no such header" + : "no headers", rkmessage->offset); return 0; } } - if (sscanf(val, "testid=%"SCNu64", partition=%i, msg=%i\n", + if (sscanf(val, "testid=%" SCNu64 ", partition=%i, msg=%i\n", &in_testid, &in_part, &in_msgnum) != 3) - TEST_FAIL("%s:%d: Incorrect format at offset %"PRId64 - ": %s", - func, line, rkmessage->offset, - (const char *)val); + TEST_FAIL( + "%s:%d: Incorrect format at offset %" PRId64 ": %s", + func, line, rkmessage->offset, (const char *)val); } - return test_msgver_add_msg00(func, line, clientname, mv, in_testid, - override_topic ? - override_topic : - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_timestamp(rkmessage, NULL), - rd_kafka_message_broker_id(rkmessage), - rkmessage->err, - in_msgnum); + return test_msgver_add_msg00( + func, line, clientname, mv, in_testid, + override_topic ? override_topic + : rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset, + rd_kafka_message_timestamp(rkmessage, NULL), + rd_kafka_message_broker_id(rkmessage), rkmessage->err, in_msgnum); return 1; } @@ -3235,61 +3341,61 @@ int test_msgver_add_msg0 (const char *func, int line, const char *clientname, * - Offsets need to occur without gaps * - msgids need to be increasing: but may have gaps, e.g., using partitioner) */ -static int test_mv_mvec_verify_order (test_msgver_t *mv, int flags, - struct test_mv_p *p, - struct test_mv_mvec *mvec, - struct test_mv_vs *vs) { - int mi; - int fails = 0; - - for (mi = 1/*skip first*/ ; mi < mvec->cnt ; mi++) { - struct test_mv_m *prev = test_mv_mvec_get(mvec, mi-1); - struct test_mv_m *this = test_mv_mvec_get(mvec, mi); - - if (((flags & TEST_MSGVER_BY_OFFSET) && - prev->offset + 1 != this->offset) || - ((flags & TEST_MSGVER_BY_MSGID) && - prev->msgid > this->msgid)) { - TEST_MV_WARN( - mv, - " %s [%"PRId32"] msg rcvidx #%d/%d: " - "out of order (prev vs this): " - "offset %"PRId64" vs %"PRId64", " - "msgid %d vs %d\n", - p ? p->topic : "*", - p ? p->partition : -1, - mi, mvec->cnt, - prev->offset, this->offset, - prev->msgid, this->msgid); - fails++; +static int test_mv_mvec_verify_order(test_msgver_t *mv, + int flags, + struct test_mv_p *p, + struct test_mv_mvec *mvec, + struct test_mv_vs *vs) { + int mi; + int fails = 0; + + for (mi = 1 /*skip first*/; mi < mvec->cnt; mi++) { + struct test_mv_m *prev = test_mv_mvec_get(mvec, mi - 1); + struct test_mv_m *this = test_mv_mvec_get(mvec, mi); + + if (((flags & TEST_MSGVER_BY_OFFSET) && + prev->offset + 1 != this->offset) || + ((flags & TEST_MSGVER_BY_MSGID) && + prev->msgid > this->msgid)) { + TEST_MV_WARN(mv, + " %s [%" PRId32 + "] msg rcvidx #%d/%d: " + "out of order (prev vs this): " + "offset %" PRId64 " vs %" PRId64 + ", " + "msgid %d vs %d\n", + p ? p->topic : "*", p ? p->partition : -1, + mi, mvec->cnt, prev->offset, this->offset, + prev->msgid, this->msgid); + fails++; } else if ((flags & TEST_MSGVER_BY_BROKER_ID) && this->broker_id != vs->broker_id) { - TEST_MV_WARN( - mv, - " %s [%"PRId32"] msg rcvidx #%d/%d: " - "broker id mismatch: expected %"PRId32 - ", not %"PRId32"\n", - p ? p->topic : "*", - p ? p->partition : -1, - mi, mvec->cnt, - vs->broker_id, this->broker_id); + TEST_MV_WARN(mv, + " %s [%" PRId32 + "] msg rcvidx #%d/%d: " + "broker id mismatch: expected %" PRId32 + ", not %" PRId32 "\n", + p ? p->topic : "*", p ? p->partition : -1, + mi, mvec->cnt, vs->broker_id, + this->broker_id); fails++; } } - return fails; + return fails; } /** * @brief Verify that messages correspond to 'correct' msgver. */ -static int test_mv_mvec_verify_corr (test_msgver_t *mv, int flags, - struct test_mv_p *p, - struct test_mv_mvec *mvec, - struct test_mv_vs *vs) { +static int test_mv_mvec_verify_corr(test_msgver_t *mv, + int flags, + struct test_mv_p *p, + struct test_mv_mvec *mvec, + struct test_mv_vs *vs) { int mi; - int fails = 0; + int fails = 0; struct test_mv_p *corr_p = NULL; struct test_mv_mvec *corr_mvec; int verifycnt = 0; @@ -3301,42 +3407,42 @@ static int test_mv_mvec_verify_corr (test_msgver_t *mv, int flags, corr_p = test_msgver_p_get(vs->corr, p->topic, p->partition, 0); if (!corr_p) { TEST_MV_WARN(mv, - " %s [%"PRId32"]: " + " %s [%" PRId32 + "]: " "no corresponding correct partition found\n", - p ? p->topic : "*", - p ? p->partition : -1); + p ? p->topic : "*", p ? p->partition : -1); return 1; } corr_mvec = &corr_p->mvec; - for (mi = 0 ; mi < mvec->cnt ; mi++) { + for (mi = 0; mi < mvec->cnt; mi++) { struct test_mv_m *this = test_mv_mvec_get(mvec, mi); const struct test_mv_m *corr; if (flags & TEST_MSGVER_SUBSET) - corr = test_mv_mvec_find_by_msgid(corr_mvec, - this->msgid); + corr = + test_mv_mvec_find_by_msgid(corr_mvec, this->msgid); else corr = test_mv_mvec_get(corr_mvec, mi); if (0) TEST_MV_WARN(mv, - "msg #%d: msgid %d, offset %"PRId64"\n", + "msg #%d: msgid %d, offset %" PRId64 "\n", mi, this->msgid, this->offset); if (!corr) { if (!(flags & TEST_MSGVER_SUBSET)) { TEST_MV_WARN( - mv, - " %s [%"PRId32"] msg rcvidx #%d/%d: " - "out of range: correct mvec has " - "%d messages: " - "message offset %"PRId64", msgid %d\n", - p ? p->topic : "*", - p ? p->partition : -1, - mi, mvec->cnt, corr_mvec->cnt, - this->offset, this->msgid); + mv, + " %s [%" PRId32 + "] msg rcvidx #%d/%d: " + "out of range: correct mvec has " + "%d messages: " + "message offset %" PRId64 ", msgid %d\n", + p ? p->topic : "*", p ? p->partition : -1, + mi, mvec->cnt, corr_mvec->cnt, this->offset, + this->msgid); fails++; } continue; @@ -3351,36 +3457,33 @@ static int test_mv_mvec_verify_corr (test_msgver_t *mv, int flags, ((flags & TEST_MSGVER_BY_BROKER_ID) && this->broker_id != corr->broker_id)) { TEST_MV_WARN( - mv, - " %s [%"PRId32"] msg rcvidx #%d/%d: " - "did not match correct msg: " - "offset %"PRId64" vs %"PRId64", " - "msgid %d vs %d, " - "timestamp %"PRId64" vs %"PRId64", " - "broker %"PRId32" vs %"PRId32" (fl 0x%x)\n", - p ? p->topic : "*", - p ? p->partition : -1, - mi, mvec->cnt, - this->offset, corr->offset, - this->msgid, corr->msgid, - this->timestamp, corr->timestamp, - this->broker_id, corr->broker_id, - flags); + mv, + " %s [%" PRId32 + "] msg rcvidx #%d/%d: " + "did not match correct msg: " + "offset %" PRId64 " vs %" PRId64 + ", " + "msgid %d vs %d, " + "timestamp %" PRId64 " vs %" PRId64 + ", " + "broker %" PRId32 " vs %" PRId32 " (fl 0x%x)\n", + p ? p->topic : "*", p ? p->partition : -1, mi, + mvec->cnt, this->offset, corr->offset, this->msgid, + corr->msgid, this->timestamp, corr->timestamp, + this->broker_id, corr->broker_id, flags); fails++; } else { verifycnt++; } } - if (verifycnt != corr_mvec->cnt && - !(flags & TEST_MSGVER_SUBSET)) { - TEST_MV_WARN( - mv, - " %s [%"PRId32"]: of %d input messages, " - "only %d/%d matched correct messages\n", - p ? p->topic : "*", - p ? p->partition : -1, - mvec->cnt, verifycnt, corr_mvec->cnt); + if (verifycnt != corr_mvec->cnt && !(flags & TEST_MSGVER_SUBSET)) { + TEST_MV_WARN(mv, + " %s [%" PRId32 + "]: of %d input messages, " + "only %d/%d matched correct messages\n", + p ? p->topic : "*", p ? p->partition : -1, + mvec->cnt, verifycnt, corr_mvec->cnt); fails++; } @@ -3389,14 +3492,14 @@ static int test_mv_mvec_verify_corr (test_msgver_t *mv, int flags, -static int test_mv_m_cmp_offset (const void *_a, const void *_b) { - const struct test_mv_m *a = _a, *b = _b; +static int test_mv_m_cmp_offset(const void *_a, const void *_b) { + const struct test_mv_m *a = _a, *b = _b; return RD_CMP(a->offset, b->offset); } -static int test_mv_m_cmp_msgid (const void *_a, const void *_b) { - const struct test_mv_m *a = _a, *b = _b; +static int test_mv_m_cmp_msgid(const void *_a, const void *_b) { + const struct test_mv_m *a = _a, *b = _b; return RD_CMP(a->msgid, b->msgid); } @@ -3411,56 +3514,55 @@ static int test_mv_m_cmp_msgid (const void *_a, const void *_b) { * * NOTE: This sorts the message (.m) array, first by offset, then by msgid * and leaves the message array sorted (by msgid) */ -static int test_mv_mvec_verify_dup (test_msgver_t *mv, int flags, - struct test_mv_p *p, - struct test_mv_mvec *mvec, - struct test_mv_vs *vs) { - int mi; - int fails = 0; - enum { - _P_OFFSET, - _P_MSGID - } pass; - - for (pass = _P_OFFSET ; pass <= _P_MSGID ; pass++) { - - if (pass == _P_OFFSET) { - if (!(flags & TEST_MSGVER_BY_OFFSET)) - continue; - test_mv_mvec_sort(mvec, test_mv_m_cmp_offset); - } else if (pass == _P_MSGID) { - if (!(flags & TEST_MSGVER_BY_MSGID)) - continue; - test_mv_mvec_sort(mvec, test_mv_m_cmp_msgid); - } - - for (mi = 1/*skip first*/ ; mi < mvec->cnt ; mi++) { - struct test_mv_m *prev = test_mv_mvec_get(mvec, mi-1); - struct test_mv_m *this = test_mv_mvec_get(mvec, mi); - int is_dup = 0; - - if (pass == _P_OFFSET) - is_dup = prev->offset == this->offset; - else if (pass == _P_MSGID) - is_dup = prev->msgid == this->msgid; - - if (!is_dup) - continue; - - TEST_MV_WARN(mv, - " %s [%"PRId32"] " - "duplicate msg (prev vs this): " - "offset %"PRId64" vs %"PRId64", " - "msgid %d vs %d\n", - p ? p->topic : "*", - p ? p->partition : -1, - prev->offset, this->offset, - prev->msgid, this->msgid); - fails++; - } - } - - return fails; +static int test_mv_mvec_verify_dup(test_msgver_t *mv, + int flags, + struct test_mv_p *p, + struct test_mv_mvec *mvec, + struct test_mv_vs *vs) { + int mi; + int fails = 0; + enum { _P_OFFSET, _P_MSGID } pass; + + for (pass = _P_OFFSET; pass <= _P_MSGID; pass++) { + + if (pass == _P_OFFSET) { + if (!(flags & TEST_MSGVER_BY_OFFSET)) + continue; + test_mv_mvec_sort(mvec, test_mv_m_cmp_offset); + } else if (pass == _P_MSGID) { + if (!(flags & TEST_MSGVER_BY_MSGID)) + continue; + test_mv_mvec_sort(mvec, test_mv_m_cmp_msgid); + } + + for (mi = 1 /*skip first*/; mi < mvec->cnt; mi++) { + struct test_mv_m *prev = test_mv_mvec_get(mvec, mi - 1); + struct test_mv_m *this = test_mv_mvec_get(mvec, mi); + int is_dup = 0; + + if (pass == _P_OFFSET) + is_dup = prev->offset == this->offset; + else if (pass == _P_MSGID) + is_dup = prev->msgid == this->msgid; + + if (!is_dup) + continue; + + TEST_MV_WARN(mv, + " %s [%" PRId32 + "] " + "duplicate msg (prev vs this): " + "offset %" PRId64 " vs %" PRId64 + ", " + "msgid %d vs %d\n", + p ? p->topic : "*", p ? p->partition : -1, + prev->offset, this->offset, prev->msgid, + this->msgid); + fails++; + } + } + + return fails; } @@ -3475,14 +3577,15 @@ static int test_mv_mvec_verify_dup (test_msgver_t *mv, int flags, * * NOTE: This sorts the message (.m) array by msgid * and leaves the message array sorted (by msgid) */ -static int test_mv_mvec_verify_range (test_msgver_t *mv, int flags, - struct test_mv_p *p, - struct test_mv_mvec *mvec, - struct test_mv_vs *vs) { +static int test_mv_mvec_verify_range(test_msgver_t *mv, + int flags, + struct test_mv_p *p, + struct test_mv_mvec *mvec, + struct test_mv_vs *vs) { int mi; - int fails = 0; - int cnt = 0; - int exp_cnt = vs->msgid_max - vs->msgid_min + 1; + int fails = 0; + int cnt = 0; + int exp_cnt = vs->msgid_max - vs->msgid_min + 1; int skip_cnt = 0; if (!(flags & TEST_MSGVER_BY_MSGID)) @@ -3490,10 +3593,11 @@ static int test_mv_mvec_verify_range (test_msgver_t *mv, int flags, test_mv_mvec_sort(mvec, test_mv_m_cmp_msgid); - //test_mv_mvec_dump(stdout, mvec); + // test_mv_mvec_dump(stdout, mvec); - for (mi = 0 ; mi < mvec->cnt ; mi++) { - struct test_mv_m *prev = mi ? test_mv_mvec_get(mvec, mi-1):NULL; + for (mi = 0; mi < mvec->cnt; mi++) { + struct test_mv_m *prev = + mi ? test_mv_mvec_get(mvec, mi - 1) : NULL; struct test_mv_m *this = test_mv_mvec_get(mvec, mi); if (this->msgid < vs->msgid_min) { @@ -3506,16 +3610,16 @@ static int test_mv_mvec_verify_range (test_msgver_t *mv, int flags, if (this->timestamp < vs->timestamp_min || this->timestamp > vs->timestamp_max) { TEST_MV_WARN( - mv, - " %s [%"PRId32"] range check: " - "msgid #%d (at mi %d): " - "timestamp %"PRId64" outside " - "expected range %"PRId64"..%"PRId64"\n", - p ? p->topic : "*", - p ? p->partition : -1, - this->msgid, mi, - this->timestamp, - vs->timestamp_min, vs->timestamp_max); + mv, + " %s [%" PRId32 + "] range check: " + "msgid #%d (at mi %d): " + "timestamp %" PRId64 + " outside " + "expected range %" PRId64 "..%" PRId64 "\n", + p ? p->topic : "*", p ? p->partition : -1, + this->msgid, mi, this->timestamp, + vs->timestamp_min, vs->timestamp_max); fails++; } } @@ -3523,39 +3627,38 @@ static int test_mv_mvec_verify_range (test_msgver_t *mv, int flags, if ((flags & TEST_MSGVER_BY_BROKER_ID) && this->broker_id != vs->broker_id) { TEST_MV_WARN( - mv, - " %s [%"PRId32"] range check: " - "msgid #%d (at mi %d): " - "expected broker id %"PRId32", not %"PRId32"\n", - p ? p->topic : "*", - p ? p->partition : -1, - this->msgid, mi, - vs->broker_id, this->broker_id); - fails++; + mv, + " %s [%" PRId32 + "] range check: " + "msgid #%d (at mi %d): " + "expected broker id %" PRId32 ", not %" PRId32 "\n", + p ? p->topic : "*", p ? p->partition : -1, + this->msgid, mi, vs->broker_id, this->broker_id); + fails++; } if (cnt++ == 0) { if (this->msgid != vs->msgid_min) { TEST_MV_WARN(mv, - " %s [%"PRId32"] range check: " + " %s [%" PRId32 + "] range check: " "first message #%d (at mi %d) " "is not first in " "expected range %d..%d\n", p ? p->topic : "*", - p ? p->partition : -1, - this->msgid, mi, - vs->msgid_min, vs->msgid_max); + p ? p->partition : -1, this->msgid, + mi, vs->msgid_min, vs->msgid_max); fails++; } } else if (cnt > exp_cnt) { TEST_MV_WARN(mv, - " %s [%"PRId32"] range check: " + " %s [%" PRId32 + "] range check: " "too many messages received (%d/%d) at " "msgid %d for expected range %d..%d\n", - p ? p->topic : "*", - p ? p->partition : -1, - cnt, exp_cnt, this->msgid, - vs->msgid_min, vs->msgid_max); + p ? p->topic : "*", p ? p->partition : -1, + cnt, exp_cnt, this->msgid, vs->msgid_min, + vs->msgid_max); fails++; } @@ -3565,13 +3668,14 @@ static int test_mv_mvec_verify_range (test_msgver_t *mv, int flags, } if (prev->msgid + 1 != this->msgid) { - TEST_MV_WARN(mv, " %s [%"PRId32"] range check: " + TEST_MV_WARN(mv, + " %s [%" PRId32 + "] range check: " " %d message(s) missing between " "msgid %d..%d in expected range %d..%d\n", - p ? p->topic : "*", - p ? p->partition : -1, + p ? p->topic : "*", p ? p->partition : -1, this->msgid - prev->msgid - 1, - prev->msgid+1, this->msgid-1, + prev->msgid + 1, this->msgid - 1, vs->msgid_min, vs->msgid_max); fails++; } @@ -3579,13 +3683,12 @@ static int test_mv_mvec_verify_range (test_msgver_t *mv, int flags, if (cnt != exp_cnt) { TEST_MV_WARN(mv, - " %s [%"PRId32"] range check: " + " %s [%" PRId32 + "] range check: " " wrong number of messages seen, wanted %d got %d " "in expected range %d..%d (%d messages skipped)\n", - p ? p->topic : "*", - p ? p->partition : -1, - exp_cnt, cnt, vs->msgid_min, vs->msgid_max, - skip_cnt); + p ? p->topic : "*", p ? p->partition : -1, exp_cnt, + cnt, vs->msgid_min, vs->msgid_max, skip_cnt); fails++; } @@ -3597,48 +3700,48 @@ static int test_mv_mvec_verify_range (test_msgver_t *mv, int flags, /** * Run verifier \p f for all partitions. */ -#define test_mv_p_verify_f(mv,flags,f,vs) \ - test_mv_p_verify_f0(mv,flags,f, # f, vs) -static int test_mv_p_verify_f0 (test_msgver_t *mv, int flags, - int (*f) (test_msgver_t *mv, - int flags, - struct test_mv_p *p, - struct test_mv_mvec *mvec, - struct test_mv_vs *vs), - const char *f_name, - struct test_mv_vs *vs) { - int i; - int fails = 0; - - for (i = 0 ; i < mv->p_cnt ; i++) { - TEST_SAY("Verifying %s [%"PRId32"] %d msgs with %s\n", - mv->p[i]->topic, mv->p[i]->partition, - mv->p[i]->mvec.cnt, f_name); - fails += f(mv, flags, mv->p[i], &mv->p[i]->mvec, vs); - } - - return fails; +#define test_mv_p_verify_f(mv, flags, f, vs) \ + test_mv_p_verify_f0(mv, flags, f, #f, vs) +static int test_mv_p_verify_f0(test_msgver_t *mv, + int flags, + int (*f)(test_msgver_t *mv, + int flags, + struct test_mv_p *p, + struct test_mv_mvec *mvec, + struct test_mv_vs *vs), + const char *f_name, + struct test_mv_vs *vs) { + int i; + int fails = 0; + + for (i = 0; i < mv->p_cnt; i++) { + TEST_SAY("Verifying %s [%" PRId32 "] %d msgs with %s\n", + mv->p[i]->topic, mv->p[i]->partition, + mv->p[i]->mvec.cnt, f_name); + fails += f(mv, flags, mv->p[i], &mv->p[i]->mvec, vs); + } + + return fails; } /** * Collect all messages from all topics and partitions into vs->mvec */ -static void test_mv_collect_all_msgs (test_msgver_t *mv, - struct test_mv_vs *vs) { - int i; +static void test_mv_collect_all_msgs(test_msgver_t *mv, struct test_mv_vs *vs) { + int i; - for (i = 0 ; i < mv->p_cnt ; i++) { - struct test_mv_p *p = mv->p[i]; - int mi; + for (i = 0; i < mv->p_cnt; i++) { + struct test_mv_p *p = mv->p[i]; + int mi; - test_mv_mvec_reserve(&vs->mvec, p->mvec.cnt); - for (mi = 0 ; mi < p->mvec.cnt ; mi++) { - struct test_mv_m *m = test_mv_mvec_get(&p->mvec, mi); - struct test_mv_m *m_new = test_mv_mvec_add(&vs->mvec); - *m_new = *m; - } - } + test_mv_mvec_reserve(&vs->mvec, p->mvec.cnt); + for (mi = 0; mi < p->mvec.cnt; mi++) { + struct test_mv_m *m = test_mv_mvec_get(&p->mvec, mi); + struct test_mv_m *m_new = test_mv_mvec_add(&vs->mvec); + *m_new = *m; + } + } } @@ -3647,29 +3750,29 @@ static void test_mv_collect_all_msgs (test_msgver_t *mv, * and received only once. * This works across all partitions. */ -static int test_msgver_verify_range (test_msgver_t *mv, int flags, - struct test_mv_vs *vs) { - int fails = 0; +static int +test_msgver_verify_range(test_msgver_t *mv, int flags, struct test_mv_vs *vs) { + int fails = 0; + + /** + * Create temporary array to hold expected message set, + * then traverse all topics and partitions and move matching messages + * to that set. Then verify the message set. + */ - /** - * Create temporary array to hold expected message set, - * then traverse all topics and partitions and move matching messages - * to that set. Then verify the message set. - */ + test_mv_mvec_init(&vs->mvec, vs->exp_cnt); - test_mv_mvec_init(&vs->mvec, vs->exp_cnt); + /* Collect all msgs into vs mvec */ + test_mv_collect_all_msgs(mv, vs); - /* Collect all msgs into vs mvec */ - test_mv_collect_all_msgs(mv, vs); - - fails += test_mv_mvec_verify_range(mv, TEST_MSGVER_BY_MSGID|flags, - NULL, &vs->mvec, vs); - fails += test_mv_mvec_verify_dup(mv, TEST_MSGVER_BY_MSGID|flags, - NULL, &vs->mvec, vs); + fails += test_mv_mvec_verify_range(mv, TEST_MSGVER_BY_MSGID | flags, + NULL, &vs->mvec, vs); + fails += test_mv_mvec_verify_dup(mv, TEST_MSGVER_BY_MSGID | flags, NULL, + &vs->mvec, vs); - test_mv_mvec_clear(&vs->mvec); + test_mv_mvec_clear(&vs->mvec); - return fails; + return fails; } @@ -3677,189 +3780,202 @@ static int test_msgver_verify_range (test_msgver_t *mv, int flags, * Verify that \p exp_cnt messages were received for \p topic and \p partition * starting at msgid base \p msg_base. */ -int test_msgver_verify_part0 (const char *func, int line, const char *what, - test_msgver_t *mv, int flags, - const char *topic, int partition, - int msg_base, int exp_cnt) { - int fails = 0; - struct test_mv_vs vs = { .msg_base = msg_base, .exp_cnt = exp_cnt }; - struct test_mv_p *p; - - TEST_SAY("%s:%d: %s: Verifying %d received messages (flags 0x%x) " - "in %s [%d]: expecting msgids %d..%d (%d)\n", - func, line, what, mv->msgcnt, flags, topic, partition, - msg_base, msg_base+exp_cnt, exp_cnt); - - p = test_msgver_p_get(mv, topic, partition, 0); - - /* Per-partition checks */ - if (flags & TEST_MSGVER_ORDER) - fails += test_mv_mvec_verify_order(mv, flags, p, &p->mvec, &vs); - if (flags & TEST_MSGVER_DUP) - fails += test_mv_mvec_verify_dup(mv, flags, p, &p->mvec, &vs); - - if (mv->msgcnt < vs.exp_cnt) { - TEST_MV_WARN(mv, - "%s:%d: " - "%s [%"PRId32"] expected %d messages but only " - "%d received\n", - func, line, - p ? p->topic : "*", - p ? p->partition : -1, - vs.exp_cnt, mv->msgcnt); - fails++; - } - - - if (mv->log_suppr_cnt > 0) - TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n", - func, line, what, mv->log_suppr_cnt); - - if (fails) - TEST_FAIL("%s:%d: %s: Verification of %d received messages " - "failed: " - "expected msgids %d..%d (%d): see previous errors\n", - func, line, what, - mv->msgcnt, msg_base, msg_base+exp_cnt, exp_cnt); - else - TEST_SAY("%s:%d: %s: Verification of %d received messages " - "succeeded: " - "expected msgids %d..%d (%d)\n", - func, line, what, - mv->msgcnt, msg_base, msg_base+exp_cnt, exp_cnt); - - return fails; +int test_msgver_verify_part0(const char *func, + int line, + const char *what, + test_msgver_t *mv, + int flags, + const char *topic, + int partition, + int msg_base, + int exp_cnt) { + int fails = 0; + struct test_mv_vs vs = {.msg_base = msg_base, .exp_cnt = exp_cnt}; + struct test_mv_p *p; + + TEST_SAY( + "%s:%d: %s: Verifying %d received messages (flags 0x%x) " + "in %s [%d]: expecting msgids %d..%d (%d)\n", + func, line, what, mv->msgcnt, flags, topic, partition, msg_base, + msg_base + exp_cnt, exp_cnt); + p = test_msgver_p_get(mv, topic, partition, 0); + + /* Per-partition checks */ + if (flags & TEST_MSGVER_ORDER) + fails += test_mv_mvec_verify_order(mv, flags, p, &p->mvec, &vs); + if (flags & TEST_MSGVER_DUP) + fails += test_mv_mvec_verify_dup(mv, flags, p, &p->mvec, &vs); + + if (mv->msgcnt < vs.exp_cnt) { + TEST_MV_WARN(mv, + "%s:%d: " + "%s [%" PRId32 + "] expected %d messages but only " + "%d received\n", + func, line, p ? p->topic : "*", + p ? p->partition : -1, vs.exp_cnt, mv->msgcnt); + fails++; + } + + + if (mv->log_suppr_cnt > 0) + TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n", + func, line, what, mv->log_suppr_cnt); + + if (fails) + TEST_FAIL( + "%s:%d: %s: Verification of %d received messages " + "failed: " + "expected msgids %d..%d (%d): see previous errors\n", + func, line, what, mv->msgcnt, msg_base, msg_base + exp_cnt, + exp_cnt); + else + TEST_SAY( + "%s:%d: %s: Verification of %d received messages " + "succeeded: " + "expected msgids %d..%d (%d)\n", + func, line, what, mv->msgcnt, msg_base, msg_base + exp_cnt, + exp_cnt); + + return fails; } /** * Verify that \p exp_cnt messages were received starting at * msgid base \p msg_base. */ -int test_msgver_verify0 (const char *func, int line, const char *what, - test_msgver_t *mv, - int flags, struct test_mv_vs vs) { - int fails = 0; - - TEST_SAY("%s:%d: %s: Verifying %d received messages (flags 0x%x): " - "expecting msgids %d..%d (%d)\n", - func, line, what, mv->msgcnt, flags, - vs.msg_base, vs.msg_base+vs.exp_cnt, vs.exp_cnt); +int test_msgver_verify0(const char *func, + int line, + const char *what, + test_msgver_t *mv, + int flags, + struct test_mv_vs vs) { + int fails = 0; + + TEST_SAY( + "%s:%d: %s: Verifying %d received messages (flags 0x%x): " + "expecting msgids %d..%d (%d)\n", + func, line, what, mv->msgcnt, flags, vs.msg_base, + vs.msg_base + vs.exp_cnt, vs.exp_cnt); if (flags & TEST_MSGVER_BY_TIMESTAMP) { assert((flags & TEST_MSGVER_BY_MSGID)); /* Required */ - TEST_SAY("%s:%d: %s: " - " and expecting timestamps %"PRId64"..%"PRId64"\n", - func, line, what, - vs.timestamp_min, vs.timestamp_max); + TEST_SAY( + "%s:%d: %s: " + " and expecting timestamps %" PRId64 "..%" PRId64 "\n", + func, line, what, vs.timestamp_min, vs.timestamp_max); } - /* Per-partition checks */ - if (flags & TEST_MSGVER_ORDER) - fails += test_mv_p_verify_f(mv, flags, - test_mv_mvec_verify_order, &vs); - if (flags & TEST_MSGVER_DUP) - fails += test_mv_p_verify_f(mv, flags, - test_mv_mvec_verify_dup, &vs); - - /* Checks across all partitions */ - if ((flags & TEST_MSGVER_RANGE) && vs.exp_cnt > 0) { - vs.msgid_min = vs.msg_base; - vs.msgid_max = vs.msgid_min + vs.exp_cnt - 1; - fails += test_msgver_verify_range(mv, flags, &vs); - } - - if (mv->log_suppr_cnt > 0) - TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n", - func, line, what, mv->log_suppr_cnt); - - if (vs.exp_cnt != mv->msgcnt) { + /* Per-partition checks */ + if (flags & TEST_MSGVER_ORDER) + fails += test_mv_p_verify_f(mv, flags, + test_mv_mvec_verify_order, &vs); + if (flags & TEST_MSGVER_DUP) + fails += + test_mv_p_verify_f(mv, flags, test_mv_mvec_verify_dup, &vs); + + /* Checks across all partitions */ + if ((flags & TEST_MSGVER_RANGE) && vs.exp_cnt > 0) { + vs.msgid_min = vs.msg_base; + vs.msgid_max = vs.msgid_min + vs.exp_cnt - 1; + fails += test_msgver_verify_range(mv, flags, &vs); + } + + if (mv->log_suppr_cnt > 0) + TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n", + func, line, what, mv->log_suppr_cnt); + + if (vs.exp_cnt != mv->msgcnt) { if (!(flags & TEST_MSGVER_SUBSET)) { TEST_WARN("%s:%d: %s: expected %d messages, got %d\n", func, line, what, vs.exp_cnt, mv->msgcnt); fails++; } - } + } - if (fails) - TEST_FAIL("%s:%d: %s: Verification of %d received messages " - "failed: " - "expected msgids %d..%d (%d): see previous errors\n", - func, line, what, - mv->msgcnt, vs.msg_base, vs.msg_base+vs.exp_cnt, - vs.exp_cnt); - else - TEST_SAY("%s:%d: %s: Verification of %d received messages " - "succeeded: " - "expected msgids %d..%d (%d)\n", - func, line, what, - mv->msgcnt, vs.msg_base, vs.msg_base+vs.exp_cnt, - vs.exp_cnt); + if (fails) + TEST_FAIL( + "%s:%d: %s: Verification of %d received messages " + "failed: " + "expected msgids %d..%d (%d): see previous errors\n", + func, line, what, mv->msgcnt, vs.msg_base, + vs.msg_base + vs.exp_cnt, vs.exp_cnt); + else + TEST_SAY( + "%s:%d: %s: Verification of %d received messages " + "succeeded: " + "expected msgids %d..%d (%d)\n", + func, line, what, mv->msgcnt, vs.msg_base, + vs.msg_base + vs.exp_cnt, vs.exp_cnt); - return fails; + return fails; } +void test_verify_rkmessage0(const char *func, + int line, + rd_kafka_message_t *rkmessage, + uint64_t testid, + int32_t partition, + int msgnum) { + uint64_t in_testid; + int in_part; + int in_msgnum; + char buf[128]; -void test_verify_rkmessage0 (const char *func, int line, - rd_kafka_message_t *rkmessage, uint64_t testid, - int32_t partition, int msgnum) { - uint64_t in_testid; - int in_part; - int in_msgnum; - char buf[128]; - - rd_snprintf(buf, sizeof(buf), "%.*s", - (int)rkmessage->len, (char *)rkmessage->payload); + rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->len, + (char *)rkmessage->payload); - if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i\n", - &in_testid, &in_part, &in_msgnum) != 3) - TEST_FAIL("Incorrect format: %s", buf); + if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i\n", + &in_testid, &in_part, &in_msgnum) != 3) + TEST_FAIL("Incorrect format: %s", buf); - if (testid != in_testid || - (partition != -1 && partition != in_part) || - (msgnum != -1 && msgnum != in_msgnum) || - in_msgnum < 0) - goto fail_match; + if (testid != in_testid || (partition != -1 && partition != in_part) || + (msgnum != -1 && msgnum != in_msgnum) || in_msgnum < 0) + goto fail_match; - if (test_level > 2) { - TEST_SAY("%s:%i: Our testid %"PRIu64", part %i (%i), msg %i\n", - func, line, - testid, (int)partition, (int)rkmessage->partition, - msgnum); - } + if (test_level > 2) { + TEST_SAY("%s:%i: Our testid %" PRIu64 + ", part %i (%i), msg %i\n", + func, line, testid, (int)partition, + (int)rkmessage->partition, msgnum); + } return; fail_match: - TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i, msg %i did " - "not match message: \"%s\"\n", - func, line, - testid, (int)partition, msgnum, buf); + TEST_FAIL("%s:%i: Our testid %" PRIu64 + ", part %i, msg %i did " + "not match message: \"%s\"\n", + func, line, testid, (int)partition, msgnum, buf); } /** * @brief Verify that \p mv is identical to \p corr according to flags. */ -void test_msgver_verify_compare0 (const char *func, int line, - const char *what, test_msgver_t *mv, - test_msgver_t *corr, int flags) { +void test_msgver_verify_compare0(const char *func, + int line, + const char *what, + test_msgver_t *mv, + test_msgver_t *corr, + int flags) { struct test_mv_vs vs; int fails = 0; memset(&vs, 0, sizeof(vs)); - TEST_SAY("%s:%d: %s: Verifying %d received messages (flags 0x%x) by " - "comparison to correct msgver (%d messages)\n", - func, line, what, mv->msgcnt, flags, corr->msgcnt); + TEST_SAY( + "%s:%d: %s: Verifying %d received messages (flags 0x%x) by " + "comparison to correct msgver (%d messages)\n", + func, line, what, mv->msgcnt, flags, corr->msgcnt); vs.corr = corr; /* Per-partition checks */ - fails += test_mv_p_verify_f(mv, flags, - test_mv_mvec_verify_corr, &vs); + fails += test_mv_p_verify_f(mv, flags, test_mv_mvec_verify_corr, &vs); if (mv->log_suppr_cnt > 0) TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n", @@ -3874,73 +3990,73 @@ void test_msgver_verify_compare0 (const char *func, int line, } if (fails) - TEST_FAIL("%s:%d: %s: Verification of %d received messages " - "failed: expected %d messages: see previous errors\n", - func, line, what, - mv->msgcnt, corr->msgcnt); + TEST_FAIL( + "%s:%d: %s: Verification of %d received messages " + "failed: expected %d messages: see previous errors\n", + func, line, what, mv->msgcnt, corr->msgcnt); else - TEST_SAY("%s:%d: %s: Verification of %d received messages " - "succeeded: matching %d messages from correct msgver\n", - func, line, what, - mv->msgcnt, corr->msgcnt); - + TEST_SAY( + "%s:%d: %s: Verification of %d received messages " + "succeeded: matching %d messages from correct msgver\n", + func, line, what, mv->msgcnt, corr->msgcnt); } /** * Consumer poll but dont expect any proper messages for \p timeout_ms. */ -void test_consumer_poll_no_msgs (const char *what, rd_kafka_t *rk, - uint64_t testid, int timeout_ms) { - int64_t tmout = test_clock() + timeout_ms * 1000; - int cnt = 0; +void test_consumer_poll_no_msgs(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int timeout_ms) { + int64_t tmout = test_clock() + timeout_ms * 1000; + int cnt = 0; test_timing_t t_cons; - test_msgver_t mv; + test_msgver_t mv; - test_msgver_init(&mv, testid); + test_msgver_init(&mv, testid); if (what) - TEST_SAY("%s: not expecting any messages for %dms\n", - what, timeout_ms); + TEST_SAY("%s: not expecting any messages for %dms\n", what, + timeout_ms); TIMING_START(&t_cons, "CONSUME"); - do { + do { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consumer_poll(rk, timeout_ms); if (!rkmessage) - continue; + continue; if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - TEST_SAY("%s [%"PRId32"] reached EOF at " - "offset %"PRId64"\n", + TEST_SAY("%s [%" PRId32 + "] reached EOF at " + "offset %" PRId64 "\n", rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset); + rkmessage->partition, rkmessage->offset); test_msgver_add_msg(rk, &mv, rkmessage); } else if (rkmessage->err) { - TEST_FAIL("%s [%"PRId32"] error (offset %"PRId64"): %s", - rkmessage->rkt ? - rd_kafka_topic_name(rkmessage->rkt) : - "(no-topic)", - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_errstr(rkmessage)); + TEST_FAIL( + "%s [%" PRId32 "] error (offset %" PRId64 "): %s", + rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt) + : "(no-topic)", + rkmessage->partition, rkmessage->offset, + rd_kafka_message_errstr(rkmessage)); } else { if (test_msgver_add_msg(rk, &mv, rkmessage)) { - TEST_MV_WARN(&mv, - "Received unexpected message on " - "%s [%"PRId32"] at offset " - "%"PRId64"\n", - rd_kafka_topic_name(rkmessage-> - rkt), - rkmessage->partition, - rkmessage->offset); - cnt++; - } + TEST_MV_WARN( + &mv, + "Received unexpected message on " + "%s [%" PRId32 + "] at offset " + "%" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset); + cnt++; + } } rd_kafka_message_destroy(rkmessage); @@ -3949,22 +4065,24 @@ void test_consumer_poll_no_msgs (const char *what, rd_kafka_t *rk, if (what) TIMING_STOP(&t_cons); - test_msgver_verify(what, &mv, TEST_MSGVER_ALL, 0, 0); - test_msgver_clear(&mv); + test_msgver_verify(what, &mv, TEST_MSGVER_ALL, 0, 0); + test_msgver_clear(&mv); - TEST_ASSERT(cnt == 0, "Expected 0 messages, got %d", cnt); + TEST_ASSERT(cnt == 0, "Expected 0 messages, got %d", cnt); } /** * @brief Consumer poll with expectation that a \p err will be reached * within \p timeout_ms. */ -void test_consumer_poll_expect_err (rd_kafka_t *rk, uint64_t testid, - int timeout_ms, rd_kafka_resp_err_t err) { +void test_consumer_poll_expect_err(rd_kafka_t *rk, + uint64_t testid, + int timeout_ms, + rd_kafka_resp_err_t err) { int64_t tmout = test_clock() + timeout_ms * 1000; - TEST_SAY("%s: expecting error %s within %dms\n", - rd_kafka_name(rk), rd_kafka_err2name(err), timeout_ms); + TEST_SAY("%s: expecting error %s within %dms\n", rd_kafka_name(rk), + rd_kafka_err2name(err), timeout_ms); do { rd_kafka_message_t *rkmessage; @@ -3973,27 +4091,27 @@ void test_consumer_poll_expect_err (rd_kafka_t *rk, uint64_t testid, continue; if (rkmessage->err == err) { - TEST_SAY("Got expected error: %s: %s\n", + TEST_SAY("Got expected error: %s: %s\n", rd_kafka_err2name(rkmessage->err), rd_kafka_message_errstr(rkmessage)); rd_kafka_message_destroy(rkmessage); return; } else if (rkmessage->err) { - TEST_FAIL("%s [%"PRId32"] unexpected error " - "(offset %"PRId64"): %s", - rkmessage->rkt ? - rd_kafka_topic_name(rkmessage->rkt) : - "(no-topic)", - rkmessage->partition, - rkmessage->offset, - rd_kafka_err2name(rkmessage->err)); + TEST_FAIL("%s [%" PRId32 + "] unexpected error " + "(offset %" PRId64 "): %s", + rkmessage->rkt + ? rd_kafka_topic_name(rkmessage->rkt) + : "(no-topic)", + rkmessage->partition, rkmessage->offset, + rd_kafka_err2name(rkmessage->err)); } rd_kafka_message_destroy(rkmessage); } while (test_clock() <= tmout); - TEST_FAIL("Expected error %s not seen in %dms", - rd_kafka_err2name(err), timeout_ms); + TEST_FAIL("Expected error %s not seen in %dms", rd_kafka_err2name(err), + timeout_ms); } /** @@ -4006,40 +4124,38 @@ void test_consumer_poll_expect_err (rd_kafka_t *rk, uint64_t testid, * if EOF was reached. * TEST_FAIL()s on all errors. */ -int test_consumer_poll_once (rd_kafka_t *rk, test_msgver_t *mv, int timeout_ms){ - rd_kafka_message_t *rkmessage; - - rkmessage = rd_kafka_consumer_poll(rk, timeout_ms); - if (!rkmessage) - return 0; - - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - TEST_SAY("%s [%"PRId32"] reached EOF at " - "offset %"PRId64"\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset); - if (mv) - test_msgver_add_msg(rk, mv, rkmessage); - rd_kafka_message_destroy(rkmessage); - return RD_KAFKA_RESP_ERR__PARTITION_EOF; - - } else if (rkmessage->err) { - TEST_FAIL("%s [%"PRId32"] error (offset %"PRId64"): %s", - rkmessage->rkt ? - rd_kafka_topic_name(rkmessage->rkt) : - "(no-topic)", - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_errstr(rkmessage)); - - } else { - if (mv) - test_msgver_add_msg(rk, mv, rkmessage); - } - - rd_kafka_message_destroy(rkmessage); - return 1; +int test_consumer_poll_once(rd_kafka_t *rk, test_msgver_t *mv, int timeout_ms) { + rd_kafka_message_t *rkmessage; + + rkmessage = rd_kafka_consumer_poll(rk, timeout_ms); + if (!rkmessage) + return 0; + + if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + TEST_SAY("%s [%" PRId32 + "] reached EOF at " + "offset %" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset); + if (mv) + test_msgver_add_msg(rk, mv, rkmessage); + rd_kafka_message_destroy(rkmessage); + return RD_KAFKA_RESP_ERR__PARTITION_EOF; + + } else if (rkmessage->err) { + TEST_FAIL("%s [%" PRId32 "] error (offset %" PRId64 "): %s", + rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt) + : "(no-topic)", + rkmessage->partition, rkmessage->offset, + rd_kafka_message_errstr(rkmessage)); + + } else { + if (mv) + test_msgver_add_msg(rk, mv, rkmessage); + } + + rd_kafka_message_destroy(rkmessage); + return 1; } @@ -4047,64 +4163,65 @@ int test_consumer_poll_once (rd_kafka_t *rk, test_msgver_t *mv, int timeout_ms){ * @param exact Require exact exp_eof_cnt (unless -1) and exp_cnt (unless -1). * If false: poll until either one is reached. */ -int test_consumer_poll_exact (const char *what, rd_kafka_t *rk, uint64_t testid, - int exp_eof_cnt, int exp_msg_base, int exp_cnt, - rd_bool_t exact, test_msgver_t *mv) { +int test_consumer_poll_exact(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int exp_eof_cnt, + int exp_msg_base, + int exp_cnt, + rd_bool_t exact, + test_msgver_t *mv) { int eof_cnt = 0; - int cnt = 0; + int cnt = 0; test_timing_t t_cons; - TEST_SAY("%s: consume %s%d messages\n", what, - exact ? "exactly ": "", exp_cnt); + TEST_SAY("%s: consume %s%d messages\n", what, exact ? "exactly " : "", + exp_cnt); TIMING_START(&t_cons, "CONSUME"); - while ((!exact && - ((exp_eof_cnt <= 0 || eof_cnt < exp_eof_cnt) && - (exp_cnt <= 0 || cnt < exp_cnt))) || - (exact && - (eof_cnt < exp_eof_cnt || - cnt < exp_cnt))) { + while ((!exact && ((exp_eof_cnt <= 0 || eof_cnt < exp_eof_cnt) && + (exp_cnt <= 0 || cnt < exp_cnt))) || + (exact && (eof_cnt < exp_eof_cnt || cnt < exp_cnt))) { rd_kafka_message_t *rkmessage; - rkmessage = rd_kafka_consumer_poll(rk, tmout_multip(10*1000)); + rkmessage = rd_kafka_consumer_poll(rk, tmout_multip(10 * 1000)); if (!rkmessage) /* Shouldn't take this long to get a msg */ - TEST_FAIL("%s: consumer_poll() timeout " - "(%d/%d eof, %d/%d msgs)\n", what, - eof_cnt, exp_eof_cnt, cnt, exp_cnt); + TEST_FAIL( + "%s: consumer_poll() timeout " + "(%d/%d eof, %d/%d msgs)\n", + what, eof_cnt, exp_eof_cnt, cnt, exp_cnt); if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - TEST_SAY("%s [%"PRId32"] reached EOF at " - "offset %"PRId64"\n", + TEST_SAY("%s [%" PRId32 + "] reached EOF at " + "offset %" PRId64 "\n", rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset); + rkmessage->partition, rkmessage->offset); TEST_ASSERT(exp_eof_cnt != 0, "expected no EOFs"); - if (mv) - test_msgver_add_msg(rk, mv, rkmessage); + if (mv) + test_msgver_add_msg(rk, mv, rkmessage); eof_cnt++; } else if (rkmessage->err) { - TEST_FAIL("%s [%"PRId32"] error (offset %"PRId64 - "): %s", - rkmessage->rkt ? - rd_kafka_topic_name(rkmessage->rkt) : - "(no-topic)", - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_errstr(rkmessage)); + TEST_FAIL( + "%s [%" PRId32 "] error (offset %" PRId64 "): %s", + rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt) + : "(no-topic)", + rkmessage->partition, rkmessage->offset, + rd_kafka_message_errstr(rkmessage)); } else { - TEST_SAYL(4, "%s: consumed message on %s [%"PRId32"] " - "at offset %"PRId64"\n", - what, - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset); - - if (!mv || test_msgver_add_msg(rk, mv, rkmessage)) - cnt++; + TEST_SAYL(4, + "%s: consumed message on %s [%" PRId32 + "] " + "at offset %" PRId64 "\n", + what, rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset); + + if (!mv || test_msgver_add_msg(rk, mv, rkmessage)) + cnt++; } rd_kafka_message_destroy(rkmessage); @@ -4112,12 +4229,11 @@ int test_consumer_poll_exact (const char *what, rd_kafka_t *rk, uint64_t testid, TIMING_STOP(&t_cons); - TEST_SAY("%s: consumed %d/%d messages (%d/%d EOFs)\n", - what, cnt, exp_cnt, eof_cnt, exp_eof_cnt); + TEST_SAY("%s: consumed %d/%d messages (%d/%d EOFs)\n", what, cnt, + exp_cnt, eof_cnt, exp_eof_cnt); - TEST_ASSERT(!exact || - ((exp_cnt == -1 || exp_cnt == cnt) && - (exp_eof_cnt == -1 || exp_eof_cnt == eof_cnt)), + TEST_ASSERT(!exact || ((exp_cnt == -1 || exp_cnt == cnt) && + (exp_eof_cnt == -1 || exp_eof_cnt == eof_cnt)), "%s: mismatch between exact expected counts and actual: " "%d/%d EOFs, %d/%d msgs", what, eof_cnt, exp_eof_cnt, cnt, exp_cnt); @@ -4131,15 +4247,19 @@ int test_consumer_poll_exact (const char *what, rd_kafka_t *rk, uint64_t testid, } -int test_consumer_poll (const char *what, rd_kafka_t *rk, uint64_t testid, - int exp_eof_cnt, int exp_msg_base, int exp_cnt, - test_msgver_t *mv) { - return test_consumer_poll_exact(what, rk, testid, - exp_eof_cnt, exp_msg_base, exp_cnt, - rd_false/*not exact */, mv); +int test_consumer_poll(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int exp_eof_cnt, + int exp_msg_base, + int exp_cnt, + test_msgver_t *mv) { + return test_consumer_poll_exact(what, rk, testid, exp_eof_cnt, + exp_msg_base, exp_cnt, + rd_false /*not exact */, mv); } -void test_consumer_close (rd_kafka_t *rk) { +void test_consumer_close(rd_kafka_t *rk) { rd_kafka_resp_err_t err; test_timing_t timing; @@ -4154,29 +4274,28 @@ void test_consumer_close (rd_kafka_t *rk) { } -void test_flush (rd_kafka_t *rk, int timeout_ms) { - test_timing_t timing; - rd_kafka_resp_err_t err; +void test_flush(rd_kafka_t *rk, int timeout_ms) { + test_timing_t timing; + rd_kafka_resp_err_t err; - TEST_SAY("%s: Flushing %d messages\n", - rd_kafka_name(rk), rd_kafka_outq_len(rk)); - TIMING_START(&timing, "FLUSH"); - err = rd_kafka_flush(rk, timeout_ms); - TIMING_STOP(&timing); - if (err) - TEST_FAIL("Failed to flush(%s, %d): %s: len() = %d\n", - rd_kafka_name(rk), timeout_ms, - rd_kafka_err2str(err), + TEST_SAY("%s: Flushing %d messages\n", rd_kafka_name(rk), + rd_kafka_outq_len(rk)); + TIMING_START(&timing, "FLUSH"); + err = rd_kafka_flush(rk, timeout_ms); + TIMING_STOP(&timing); + if (err) + TEST_FAIL("Failed to flush(%s, %d): %s: len() = %d\n", + rd_kafka_name(rk), timeout_ms, rd_kafka_err2str(err), rd_kafka_outq_len(rk)); } -void test_conf_set (rd_kafka_conf_t *conf, const char *name, const char *val) { +void test_conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { char errstr[512]; if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) - TEST_FAIL("Failed to set config \"%s\"=\"%s\": %s\n", - name, val, errstr); + TEST_FAIL("Failed to set config \"%s\"=\"%s\": %s\n", name, val, + errstr); } /** @@ -4185,16 +4304,16 @@ void test_conf_set (rd_kafka_conf_t *conf, const char *name, const char *val) { * @param conf Configuration to get value from. If NULL the test.conf (if any) * configuration will be used. */ -char *test_conf_get (const rd_kafka_conf_t *conf, const char *name) { +char *test_conf_get(const rd_kafka_conf_t *conf, const char *name) { static RD_TLS char ret[256]; - size_t ret_sz = sizeof(ret); + size_t ret_sz = sizeof(ret); rd_kafka_conf_t *def_conf = NULL; if (!conf) /* Use the current test.conf */ test_conf_init(&def_conf, NULL, 0); - if (rd_kafka_conf_get(conf ? conf : def_conf, - name, ret, &ret_sz) != RD_KAFKA_CONF_OK) + if (rd_kafka_conf_get(conf ? conf : def_conf, name, ret, &ret_sz) != + RD_KAFKA_CONF_OK) TEST_FAIL("Failed to get config \"%s\": %s\n", name, "unknown property"); @@ -4205,8 +4324,8 @@ char *test_conf_get (const rd_kafka_conf_t *conf, const char *name) { } -char *test_topic_conf_get (const rd_kafka_topic_conf_t *tconf, - const char *name) { +char *test_topic_conf_get(const rd_kafka_topic_conf_t *tconf, + const char *name) { static RD_TLS char ret[256]; size_t ret_sz = sizeof(ret); if (rd_kafka_topic_conf_get(tconf, name, ret, &ret_sz) != @@ -4220,7 +4339,7 @@ char *test_topic_conf_get (const rd_kafka_topic_conf_t *tconf, /** * @brief Check if property \name matches \p val in \p conf. * If \p conf is NULL the test config will be used. */ -int test_conf_match (rd_kafka_conf_t *conf, const char *name, const char *val) { +int test_conf_match(rd_kafka_conf_t *conf, const char *name, const char *val) { char *real; int free_conf = 0; @@ -4238,8 +4357,9 @@ int test_conf_match (rd_kafka_conf_t *conf, const char *name, const char *val) { } -void test_topic_conf_set (rd_kafka_topic_conf_t *tconf, - const char *name, const char *val) { +void test_topic_conf_set(rd_kafka_topic_conf_t *tconf, + const char *name, + const char *val) { char errstr[512]; if (rd_kafka_topic_conf_set(tconf, name, val, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) @@ -4250,22 +4370,23 @@ void test_topic_conf_set (rd_kafka_topic_conf_t *tconf, /** * @brief First attempt to set topic level property, then global. */ -void test_any_conf_set (rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf, - const char *name, const char *val) { +void test_any_conf_set(rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf, + const char *name, + const char *val) { rd_kafka_conf_res_t res = RD_KAFKA_CONF_UNKNOWN; - char errstr[512] = {"Missing conf_t"}; + char errstr[512] = {"Missing conf_t"}; if (tconf) - res = rd_kafka_topic_conf_set(tconf, name, val, - errstr, sizeof(errstr)); + res = rd_kafka_topic_conf_set(tconf, name, val, errstr, + sizeof(errstr)); if (res == RD_KAFKA_CONF_UNKNOWN && conf) - res = rd_kafka_conf_set(conf, name, val, - errstr, sizeof(errstr)); + res = + rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)); if (res != RD_KAFKA_CONF_OK) - TEST_FAIL("Failed to set any config \"%s\"=\"%s\": %s\n", - name, val, errstr); + TEST_FAIL("Failed to set any config \"%s\"=\"%s\": %s\n", name, + val, errstr); } @@ -4273,7 +4394,7 @@ void test_any_conf_set (rd_kafka_conf_t *conf, * @returns true if test clients need to be configured for authentication * or other security measures (SSL), else false for unauthed plaintext. */ -int test_needs_auth (void) { +int test_needs_auth(void) { rd_kafka_conf_t *conf; const char *sec; @@ -4287,17 +4408,18 @@ int test_needs_auth (void) { } -void test_print_partition_list (const rd_kafka_topic_partition_list_t - *partitions) { +void test_print_partition_list( + const rd_kafka_topic_partition_list_t *partitions) { int i; - for (i = 0 ; i < partitions->cnt ; i++) { - TEST_SAY(" %s [%"PRId32"] offset %"PRId64"%s%s\n", - partitions->elems[i].topic, - partitions->elems[i].partition, - partitions->elems[i].offset, - partitions->elems[i].err ? ": " : "", - partitions->elems[i].err ? - rd_kafka_err2str(partitions->elems[i].err) : ""); + for (i = 0; i < partitions->cnt; i++) { + TEST_SAY(" %s [%" PRId32 "] offset %" PRId64 "%s%s\n", + partitions->elems[i].topic, + partitions->elems[i].partition, + partitions->elems[i].offset, + partitions->elems[i].err ? ": " : "", + partitions->elems[i].err + ? rd_kafka_err2str(partitions->elems[i].err) + : ""); } } @@ -4306,8 +4428,8 @@ void test_print_partition_list (const rd_kafka_topic_partition_list_t * * @remark The lists may be sorted by this function. */ -int test_partition_list_cmp (rd_kafka_topic_partition_list_t *al, - rd_kafka_topic_partition_list_t *bl) { +int test_partition_list_cmp(rd_kafka_topic_partition_list_t *al, + rd_kafka_topic_partition_list_t *bl) { int i; if (al->cnt < bl->cnt) @@ -4320,11 +4442,10 @@ int test_partition_list_cmp (rd_kafka_topic_partition_list_t *al, rd_kafka_topic_partition_list_sort(al, NULL, NULL); rd_kafka_topic_partition_list_sort(bl, NULL, NULL); - for (i = 0 ; i < al->cnt ; i++) { + for (i = 0; i < al->cnt; i++) { const rd_kafka_topic_partition_t *a = &al->elems[i]; const rd_kafka_topic_partition_t *b = &bl->elems[i]; - if (a->partition != b->partition || - strcmp(a->topic, b->topic)) + if (a->partition != b->partition || strcmp(a->topic, b->topic)) return -1; } @@ -4335,87 +4456,85 @@ int test_partition_list_cmp (rd_kafka_topic_partition_list_t *al, /** * @brief Execute script from the Kafka distribution bin/ path. */ -void test_kafka_cmd (const char *fmt, ...) { +void test_kafka_cmd(const char *fmt, ...) { #ifdef _WIN32 - TEST_FAIL("%s not supported on Windows, yet", __FUNCTION__); + TEST_FAIL("%s not supported on Windows, yet", __FUNCTION__); #else - char cmd[1024]; - int r; - va_list ap; - test_timing_t t_cmd; - const char *kpath; - - kpath = test_getenv("KAFKA_PATH", NULL); - - if (!kpath) - TEST_FAIL("%s: KAFKA_PATH must be set", - __FUNCTION__); - - r = rd_snprintf(cmd, sizeof(cmd), - "%s/bin/", kpath); - TEST_ASSERT(r < (int)sizeof(cmd)); - - va_start(ap, fmt); - rd_vsnprintf(cmd+r, sizeof(cmd)-r, fmt, ap); - va_end(ap); - - TEST_SAY("Executing: %s\n", cmd); - TIMING_START(&t_cmd, "exec"); - r = system(cmd); - TIMING_STOP(&t_cmd); - - if (r == -1) - TEST_FAIL("system(\"%s\") failed: %s", cmd, strerror(errno)); - else if (WIFSIGNALED(r)) - TEST_FAIL("system(\"%s\") terminated by signal %d\n", cmd, - WTERMSIG(r)); - else if (WEXITSTATUS(r)) - TEST_FAIL("system(\"%s\") failed with exit status %d\n", - cmd, WEXITSTATUS(r)); + char cmd[1024]; + int r; + va_list ap; + test_timing_t t_cmd; + const char *kpath; + + kpath = test_getenv("KAFKA_PATH", NULL); + + if (!kpath) + TEST_FAIL("%s: KAFKA_PATH must be set", __FUNCTION__); + + r = rd_snprintf(cmd, sizeof(cmd), "%s/bin/", kpath); + TEST_ASSERT(r < (int)sizeof(cmd)); + + va_start(ap, fmt); + rd_vsnprintf(cmd + r, sizeof(cmd) - r, fmt, ap); + va_end(ap); + + TEST_SAY("Executing: %s\n", cmd); + TIMING_START(&t_cmd, "exec"); + r = system(cmd); + TIMING_STOP(&t_cmd); + + if (r == -1) + TEST_FAIL("system(\"%s\") failed: %s", cmd, strerror(errno)); + else if (WIFSIGNALED(r)) + TEST_FAIL("system(\"%s\") terminated by signal %d\n", cmd, + WTERMSIG(r)); + else if (WEXITSTATUS(r)) + TEST_FAIL("system(\"%s\") failed with exit status %d\n", cmd, + WEXITSTATUS(r)); #endif } /** * @brief Execute kafka-topics.sh from the Kafka distribution. */ -void test_kafka_topics (const char *fmt, ...) { +void test_kafka_topics(const char *fmt, ...) { #ifdef _WIN32 - TEST_FAIL("%s not supported on Windows, yet", __FUNCTION__); + TEST_FAIL("%s not supported on Windows, yet", __FUNCTION__); #else - char cmd[512]; - int r; - va_list ap; - test_timing_t t_cmd; - const char *kpath, *zk; - - kpath = test_getenv("KAFKA_PATH", NULL); - zk = test_getenv("ZK_ADDRESS", NULL); - - if (!kpath || !zk) - TEST_FAIL("%s: KAFKA_PATH and ZK_ADDRESS must be set", - __FUNCTION__); - - r = rd_snprintf(cmd, sizeof(cmd), - "%s/bin/kafka-topics.sh --zookeeper %s ", kpath, zk); - TEST_ASSERT(r < (int)sizeof(cmd)); - - va_start(ap, fmt); - rd_vsnprintf(cmd+r, sizeof(cmd)-r, fmt, ap); - va_end(ap); - - TEST_SAY("Executing: %s\n", cmd); - TIMING_START(&t_cmd, "exec"); - r = system(cmd); - TIMING_STOP(&t_cmd); - - if (r == -1) - TEST_FAIL("system(\"%s\") failed: %s", cmd, strerror(errno)); - else if (WIFSIGNALED(r)) - TEST_FAIL("system(\"%s\") terminated by signal %d\n", cmd, - WTERMSIG(r)); - else if (WEXITSTATUS(r)) - TEST_FAIL("system(\"%s\") failed with exit status %d\n", - cmd, WEXITSTATUS(r)); + char cmd[512]; + int r; + va_list ap; + test_timing_t t_cmd; + const char *kpath, *zk; + + kpath = test_getenv("KAFKA_PATH", NULL); + zk = test_getenv("ZK_ADDRESS", NULL); + + if (!kpath || !zk) + TEST_FAIL("%s: KAFKA_PATH and ZK_ADDRESS must be set", + __FUNCTION__); + + r = rd_snprintf(cmd, sizeof(cmd), + "%s/bin/kafka-topics.sh --zookeeper %s ", kpath, zk); + TEST_ASSERT(r < (int)sizeof(cmd)); + + va_start(ap, fmt); + rd_vsnprintf(cmd + r, sizeof(cmd) - r, fmt, ap); + va_end(ap); + + TEST_SAY("Executing: %s\n", cmd); + TIMING_START(&t_cmd, "exec"); + r = system(cmd); + TIMING_STOP(&t_cmd); + + if (r == -1) + TEST_FAIL("system(\"%s\") failed: %s", cmd, strerror(errno)); + else if (WIFSIGNALED(r)) + TEST_FAIL("system(\"%s\") terminated by signal %d\n", cmd, + WTERMSIG(r)); + else if (WEXITSTATUS(r)) + TEST_FAIL("system(\"%s\") failed with exit status %d\n", cmd, + WEXITSTATUS(r)); #endif } @@ -4424,9 +4543,10 @@ void test_kafka_topics (const char *fmt, ...) { /** * @brief Create topic using Topic Admin API */ -static void test_admin_create_topic (rd_kafka_t *use_rk, - const char *topicname, int partition_cnt, - int replication_factor) { +static void test_admin_create_topic(rd_kafka_t *use_rk, + const char *topicname, + int partition_cnt, + int replication_factor) { rd_kafka_t *rk; rd_kafka_NewTopic_t *newt[1]; const size_t newt_cnt = 1; @@ -4446,20 +4566,20 @@ static void test_admin_create_topic (rd_kafka_t *use_rk, rkqu = rd_kafka_queue_new(rk); - newt[0] = rd_kafka_NewTopic_new(topicname, partition_cnt, - replication_factor, - errstr, sizeof(errstr)); + newt[0] = + rd_kafka_NewTopic_new(topicname, partition_cnt, replication_factor, + errstr, sizeof(errstr)); TEST_ASSERT(newt[0] != NULL, "%s", errstr); options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATETOPICS); - err = rd_kafka_AdminOptions_set_operation_timeout(options, timeout_ms, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, timeout_ms, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); - TEST_SAY("Creating topic \"%s\" " - "(partitions=%d, replication_factor=%d, timeout=%d)\n", - topicname, partition_cnt, replication_factor, timeout_ms); + TEST_SAY( + "Creating topic \"%s\" " + "(partitions=%d, replication_factor=%d, timeout=%d)\n", + topicname, partition_cnt, replication_factor, timeout_ms); TIMING_START(&t_create, "CreateTopics"); rd_kafka_CreateTopics(rk, newt, newt_cnt, options, rkqu); @@ -4470,8 +4590,7 @@ static void test_admin_create_topic (rd_kafka_t *use_rk, TIMING_STOP(&t_create); - TEST_ASSERT(!rd_kafka_event_error(rkev), - "CreateTopics failed: %s", + TEST_ASSERT(!rd_kafka_event_error(rkev), "CreateTopics failed: %s", rd_kafka_event_error_string(rkev)); res = rd_kafka_event_CreateTopics_result(rkev); @@ -4481,13 +4600,14 @@ static void test_admin_create_topic (rd_kafka_t *use_rk, terr = rd_kafka_CreateTopics_result_topics(res, &res_cnt); TEST_ASSERT(terr, "CreateTopics_result_topics returned NULL"); TEST_ASSERT(res_cnt == newt_cnt, - "CreateTopics_result_topics returned %"PRIusz" topics, " - "not the expected %"PRIusz, + "CreateTopics_result_topics returned %" PRIusz + " topics, " + "not the expected %" PRIusz, res_cnt, newt_cnt); TEST_ASSERT(!rd_kafka_topic_result_error(terr[0]) || - rd_kafka_topic_result_error(terr[0]) == - RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS, + rd_kafka_topic_result_error(terr[0]) == + RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS, "Topic %s result error: %s", rd_kafka_topic_result_name(terr[0]), rd_kafka_topic_result_error_string(terr[0])); @@ -4506,25 +4626,27 @@ static void test_admin_create_topic (rd_kafka_t *use_rk, - /** * @brief Create topic using kafka-topics.sh --create */ -static void test_create_topic_sh (const char *topicname, int partition_cnt, - int replication_factor) { - test_kafka_topics("--create --topic \"%s\" " - "--replication-factor %d --partitions %d", - topicname, replication_factor, partition_cnt); +static void test_create_topic_sh(const char *topicname, + int partition_cnt, + int replication_factor) { + test_kafka_topics( + "--create --topic \"%s\" " + "--replication-factor %d --partitions %d", + topicname, replication_factor, partition_cnt); } /** * @brief Create topic */ -void test_create_topic (rd_kafka_t *use_rk, - const char *topicname, int partition_cnt, - int replication_factor) { - if (test_broker_version < TEST_BRKVER(0,10,2,0)) +void test_create_topic(rd_kafka_t *use_rk, + const char *topicname, + int partition_cnt, + int replication_factor) { + if (test_broker_version < TEST_BRKVER(0, 10, 2, 0)) test_create_topic_sh(topicname, partition_cnt, replication_factor); else @@ -4536,16 +4658,15 @@ void test_create_topic (rd_kafka_t *use_rk, /** * @brief Create topic using kafka-topics.sh --delete */ -static void test_delete_topic_sh (const char *topicname) { - test_kafka_topics("--delete --topic \"%s\" ", topicname); +static void test_delete_topic_sh(const char *topicname) { + test_kafka_topics("--delete --topic \"%s\" ", topicname); } /** * @brief Delete topic using Topic Admin API */ -static void test_admin_delete_topic (rd_kafka_t *use_rk, - const char *topicname) { +static void test_admin_delete_topic(rd_kafka_t *use_rk, const char *topicname) { rd_kafka_t *rk; rd_kafka_DeleteTopic_t *delt[1]; const size_t delt_cnt = 1; @@ -4568,14 +4689,14 @@ static void test_admin_delete_topic (rd_kafka_t *use_rk, delt[0] = rd_kafka_DeleteTopic_new(topicname); options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETETOPICS); - err = rd_kafka_AdminOptions_set_operation_timeout(options, timeout_ms, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, timeout_ms, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); - TEST_SAY("Deleting topic \"%s\" " - "(timeout=%d)\n", - topicname, timeout_ms); + TEST_SAY( + "Deleting topic \"%s\" " + "(timeout=%d)\n", + topicname, timeout_ms); TIMING_START(&t_create, "DeleteTopics"); rd_kafka_DeleteTopics(rk, delt, delt_cnt, options, rkqu); @@ -4593,8 +4714,9 @@ static void test_admin_delete_topic (rd_kafka_t *use_rk, terr = rd_kafka_DeleteTopics_result_topics(res, &res_cnt); TEST_ASSERT(terr, "DeleteTopics_result_topics returned NULL"); TEST_ASSERT(res_cnt == delt_cnt, - "DeleteTopics_result_topics returned %"PRIusz" topics, " - "not the expected %"PRIusz, + "DeleteTopics_result_topics returned %" PRIusz + " topics, " + "not the expected %" PRIusz, res_cnt, delt_cnt); TEST_ASSERT(!rd_kafka_topic_result_error(terr[0]), @@ -4618,8 +4740,8 @@ static void test_admin_delete_topic (rd_kafka_t *use_rk, /** * @brief Delete a topic */ -void test_delete_topic (rd_kafka_t *use_rk, const char *topicname) { - if (test_broker_version < TEST_BRKVER(0,10,2,0)) +void test_delete_topic(rd_kafka_t *use_rk, const char *topicname) { + if (test_broker_version < TEST_BRKVER(0, 10, 2, 0)) test_delete_topic_sh(topicname); else test_admin_delete_topic(use_rk, topicname); @@ -4629,9 +4751,9 @@ void test_delete_topic (rd_kafka_t *use_rk, const char *topicname) { /** * @brief Create additional partitions for a topic using Admin API */ -static void test_admin_create_partitions (rd_kafka_t *use_rk, - const char *topicname, - int new_partition_cnt) { +static void test_admin_create_partitions(rd_kafka_t *use_rk, + const char *topicname, + int new_partition_cnt) { rd_kafka_t *rk; rd_kafka_NewPartitions_t *newp[1]; const size_t newp_cnt = 1; @@ -4655,11 +4777,10 @@ static void test_admin_create_partitions (rd_kafka_t *use_rk, errstr, sizeof(errstr)); TEST_ASSERT(newp[0] != NULL, "%s", errstr); - options = rd_kafka_AdminOptions_new(rk, - RD_KAFKA_ADMIN_OP_CREATEPARTITIONS); - err = rd_kafka_AdminOptions_set_operation_timeout(options, timeout_ms, - errstr, - sizeof(errstr)); + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEPARTITIONS); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, timeout_ms, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); TEST_SAY("Creating %d (total) partitions for topic \"%s\"\n", @@ -4681,8 +4802,8 @@ static void test_admin_create_partitions (rd_kafka_t *use_rk, terr = rd_kafka_CreatePartitions_result_topics(res, &res_cnt); TEST_ASSERT(terr, "CreatePartitions_result_topics returned NULL"); TEST_ASSERT(res_cnt == newp_cnt, - "CreatePartitions_result_topics returned %"PRIusz - " topics, not the expected %"PRIusz, + "CreatePartitions_result_topics returned %" PRIusz + " topics, not the expected %" PRIusz, res_cnt, newp_cnt); TEST_ASSERT(!rd_kafka_topic_result_error(terr[0]), @@ -4706,9 +4827,10 @@ static void test_admin_create_partitions (rd_kafka_t *use_rk, /** * @brief Create partitions for topic */ -void test_create_partitions (rd_kafka_t *use_rk, - const char *topicname, int new_partition_cnt) { - if (test_broker_version < TEST_BRKVER(0,10,2,0)) +void test_create_partitions(rd_kafka_t *use_rk, + const char *topicname, + int new_partition_cnt) { + if (test_broker_version < TEST_BRKVER(0, 10, 2, 0)) test_kafka_topics("--alter --topic %s --partitions %d", topicname, new_partition_cnt); else @@ -4717,13 +4839,14 @@ void test_create_partitions (rd_kafka_t *use_rk, } -int test_get_partition_count (rd_kafka_t *rk, const char *topicname, - int timeout_ms) { +int test_get_partition_count(rd_kafka_t *rk, + const char *topicname, + int timeout_ms) { rd_kafka_t *use_rk; rd_kafka_resp_err_t err; rd_kafka_topic_t *rkt; int64_t abs_timeout = test_clock() + (timeout_ms * 1000); - int ret = -1; + int ret = -1; if (!rk) use_rk = test_create_producer(); @@ -4739,8 +4862,8 @@ int test_get_partition_count (rd_kafka_t *rk, const char *topicname, tmout_multip(15000)); if (err) TEST_WARN("metadata() for %s failed: %s\n", - rkt ? rd_kafka_topic_name(rkt) : - "(all-local)", + rkt ? rd_kafka_topic_name(rkt) + : "(all-local)", rd_kafka_err2str(err)); else { if (metadata->topic_cnt == 1) { @@ -4752,10 +4875,10 @@ int test_get_partition_count (rd_kafka_t *rk, const char *topicname, ret = (int)cnt; break; } - TEST_SAY("metadata(%s) returned %s: retrying\n", - rd_kafka_topic_name(rkt), - rd_kafka_err2str(metadata-> - topics[0].err)); + TEST_SAY( + "metadata(%s) returned %s: retrying\n", + rd_kafka_topic_name(rkt), + rd_kafka_err2str(metadata->topics[0].err)); } rd_kafka_metadata_destroy(metadata); rd_sleep(1); @@ -4773,12 +4896,12 @@ int test_get_partition_count (rd_kafka_t *rk, const char *topicname, /** * @brief Let the broker auto-create the topic for us. */ -rd_kafka_resp_err_t test_auto_create_topic_rkt (rd_kafka_t *rk, - rd_kafka_topic_t *rkt, - int timeout_ms) { - const struct rd_kafka_metadata *metadata; - rd_kafka_resp_err_t err; - test_timing_t t; +rd_kafka_resp_err_t test_auto_create_topic_rkt(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + int timeout_ms) { + const struct rd_kafka_metadata *metadata; + rd_kafka_resp_err_t err; + test_timing_t t; int64_t abs_timeout = test_clock() + (timeout_ms * 1000); do { @@ -4788,8 +4911,8 @@ rd_kafka_resp_err_t test_auto_create_topic_rkt (rd_kafka_t *rk, TIMING_STOP(&t); if (err) TEST_WARN("metadata() for %s failed: %s\n", - rkt ? rd_kafka_topic_name(rkt) : - "(all-local)", + rkt ? rd_kafka_topic_name(rkt) + : "(all-local)", rd_kafka_err2str(err)); else { if (metadata->topic_cnt == 1) { @@ -4798,10 +4921,10 @@ rd_kafka_resp_err_t test_auto_create_topic_rkt (rd_kafka_t *rk, rd_kafka_metadata_destroy(metadata); return 0; } - TEST_SAY("metadata(%s) returned %s: retrying\n", - rd_kafka_topic_name(rkt), - rd_kafka_err2str(metadata-> - topics[0].err)); + TEST_SAY( + "metadata(%s) returned %s: retrying\n", + rd_kafka_topic_name(rkt), + rd_kafka_err2str(metadata->topics[0].err)); } rd_kafka_metadata_destroy(metadata); rd_sleep(1); @@ -4811,8 +4934,8 @@ rd_kafka_resp_err_t test_auto_create_topic_rkt (rd_kafka_t *rk, return err; } -rd_kafka_resp_err_t test_auto_create_topic (rd_kafka_t *rk, const char *name, - int timeout_ms) { +rd_kafka_resp_err_t +test_auto_create_topic(rd_kafka_t *rk, const char *name, int timeout_ms) { rd_kafka_topic_t *rkt = rd_kafka_topic_new(rk, name, NULL); rd_kafka_resp_err_t err; if (!rkt) @@ -4827,18 +4950,18 @@ rd_kafka_resp_err_t test_auto_create_topic (rd_kafka_t *rk, const char *name, * @brief Check if topic auto creation works. * @returns 1 if it does, else 0. */ -int test_check_auto_create_topic (void) { +int test_check_auto_create_topic(void) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_resp_err_t err; const char *topic = test_mk_topic_name("autocreatetest", 1); test_conf_init(&conf, NULL, 0); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); err = test_auto_create_topic(rk, topic, tmout_multip(5000)); if (err) - TEST_SAY("Auto topic creation of \"%s\" failed: %s\n", - topic, rd_kafka_err2str(err)); + TEST_SAY("Auto topic creation of \"%s\" failed: %s\n", topic, + rd_kafka_err2str(err)); rd_kafka_destroy(rk); return err ? 0 : 1; @@ -4855,10 +4978,9 @@ int test_check_auto_create_topic (void) { * * @returns -1 if the application could not be started, else the pid. */ -int test_run_java (const char *cls, const char **argv) { +int test_run_java(const char *cls, const char **argv) { #ifdef _WIN32 - TEST_WARN("%s(%s) not supported Windows, yet", - __FUNCTION__, cls); + TEST_WARN("%s(%s) not supported Windows, yet", __FUNCTION__, cls); return -1; #else int r; @@ -4871,8 +4993,8 @@ int test_run_java (const char *cls, const char **argv) { kpath = test_getenv("KAFKA_PATH", NULL); if (!kpath) { - TEST_WARN("%s(%s): KAFKA_PATH must be set\n", - __FUNCTION__, cls); + TEST_WARN("%s(%s): KAFKA_PATH must be set\n", __FUNCTION__, + cls); return -1; } @@ -4888,8 +5010,8 @@ int test_run_java (const char *cls, const char **argv) { /* For child process and run cls */ pid = fork(); if (pid == -1) { - TEST_WARN("%s(%s): failed to fork: %s\n", - __FUNCTION__, cls, strerror(errno)); + TEST_WARN("%s(%s): failed to fork: %s\n", __FUNCTION__, cls, + strerror(errno)); return -1; } @@ -4899,24 +5021,24 @@ int test_run_java (const char *cls, const char **argv) { /* In child process */ /* Reconstruct argv to contain run-class.sh and the cls */ - for (cnt = 0 ; argv[cnt] ; cnt++) + for (cnt = 0; argv[cnt]; cnt++) ; cnt += 3; /* run-class.sh, cls, .., NULL */ - full_argv = malloc(sizeof(*full_argv) * cnt); + full_argv = malloc(sizeof(*full_argv) * cnt); full_argv[0] = "java/run-class.sh"; full_argv[1] = (const char *)cls; /* Copy arguments */ - for (p = &full_argv[2] ; *argv ; p++, argv++) + for (p = &full_argv[2]; *argv; p++, argv++) *p = *argv; *p = NULL; /* Run */ - r = execve(full_argv[0], (char *const*)full_argv, environ); + r = execve(full_argv[0], (char *const *)full_argv, environ); - TEST_WARN("%s(%s): failed to execute run-class.sh: %s\n", - __FUNCTION__, cls, strerror(errno)); + TEST_WARN("%s(%s): failed to execute run-class.sh: %s\n", __FUNCTION__, + cls, strerror(errno)); exit(2); return -1; /* NOTREACHED */ @@ -4929,10 +5051,9 @@ int test_run_java (const char *cls, const char **argv) { * * @returns -1 if the child process exited successfully, else -1. */ -int test_waitpid (int pid) { +int test_waitpid(int pid) { #ifdef _WIN32 - TEST_WARN("%s() not supported Windows, yet", - __FUNCTION__); + TEST_WARN("%s() not supported Windows, yet", __FUNCTION__); return -1; #else pid_t r; @@ -4941,8 +5062,7 @@ int test_waitpid (int pid) { r = waitpid((pid_t)pid, &status, 0); if (r == -1) { - TEST_WARN("waitpid(%d) failed: %s\n", - pid, strerror(errno)); + TEST_WARN("waitpid(%d) failed: %s\n", pid, strerror(errno)); return -1; } @@ -4951,8 +5071,8 @@ int test_waitpid (int pid) { WTERMSIG(status)); return -1; } else if (WEXITSTATUS(status)) { - TEST_WARN("Process %d exited with status %d\n", - pid, WEXITSTATUS(status)); + TEST_WARN("Process %d exited with status %d\n", pid, + WEXITSTATUS(status)); return -1; } @@ -4965,40 +5085,39 @@ int test_waitpid (int pid) { * @brief Check if \p feature is builtin to librdkafka. * @returns returns 1 if feature is built in, else 0. */ -int test_check_builtin (const char *feature) { - rd_kafka_conf_t *conf; - char errstr[128]; - int r; +int test_check_builtin(const char *feature) { + rd_kafka_conf_t *conf; + char errstr[128]; + int r; - conf = rd_kafka_conf_new(); - if (rd_kafka_conf_set(conf, "builtin.features", feature, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { - TEST_SAY("Feature \"%s\" not built-in: %s\n", - feature, errstr); - r = 0; - } else { - TEST_SAY("Feature \"%s\" is built-in\n", feature); - r = 1; - } + conf = rd_kafka_conf_new(); + if (rd_kafka_conf_set(conf, "builtin.features", feature, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + TEST_SAY("Feature \"%s\" not built-in: %s\n", feature, errstr); + r = 0; + } else { + TEST_SAY("Feature \"%s\" is built-in\n", feature); + r = 1; + } - rd_kafka_conf_destroy(conf); - return r; + rd_kafka_conf_destroy(conf); + return r; } -char *tsprintf (const char *fmt, ...) { - static RD_TLS char ret[8][512]; - static RD_TLS int i; - va_list ap; +char *tsprintf(const char *fmt, ...) { + static RD_TLS char ret[8][512]; + static RD_TLS int i; + va_list ap; - i = (i + 1) % 8; + i = (i + 1) % 8; - va_start(ap, fmt); - rd_vsnprintf(ret[i], sizeof(ret[i]), fmt, ap); - va_end(ap); + va_start(ap, fmt); + rd_vsnprintf(ret[i], sizeof(ret[i]), fmt, ap); + va_end(ap); - return ret[i]; + return ret[i]; } @@ -5006,28 +5125,28 @@ char *tsprintf (const char *fmt, ...) { * @brief Add a test report JSON object. * These will be written as a JSON array to the test report file. */ -void test_report_add (struct test *test, const char *fmt, ...) { - va_list ap; - char buf[512]; +void test_report_add(struct test *test, const char *fmt, ...) { + va_list ap; + char buf[512]; - va_start(ap, fmt); - vsnprintf(buf, sizeof(buf), fmt, ap); - va_end(ap); + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); - if (test->report_cnt == test->report_size) { - if (test->report_size == 0) - test->report_size = 8; - else - test->report_size *= 2; + if (test->report_cnt == test->report_size) { + if (test->report_size == 0) + test->report_size = 8; + else + test->report_size *= 2; - test->report_arr = realloc(test->report_arr, - sizeof(*test->report_arr) * - test->report_size); - } + test->report_arr = + realloc(test->report_arr, + sizeof(*test->report_arr) * test->report_size); + } - test->report_arr[test->report_cnt++] = rd_strdup(buf); + test->report_arr[test->report_cnt++] = rd_strdup(buf); - TEST_SAYL(1, "Report #%d: %s\n", test->report_cnt-1, buf); + TEST_SAYL(1, "Report #%d: %s\n", test->report_cnt - 1, buf); } /** @@ -5036,27 +5155,28 @@ void test_report_add (struct test *test, const char *fmt, ...) { * * If \p skip is set TEST_SKIP() will be called with a helpful message. */ -int test_can_create_topics (int skip) { +int test_can_create_topics(int skip) { /* Has AdminAPI */ - if (test_broker_version >= TEST_BRKVER(0,10,2,0)) + if (test_broker_version >= TEST_BRKVER(0, 10, 2, 0)) return 1; #ifdef _WIN32 - if (skip) - TEST_SKIP("Cannot create topics on Win32\n"); - return 0; + if (skip) + TEST_SKIP("Cannot create topics on Win32\n"); + return 0; #else - if (!test_getenv("KAFKA_PATH", NULL) || - !test_getenv("ZK_ADDRESS", NULL)) { - if (skip) - TEST_SKIP("Cannot create topics " - "(set KAFKA_PATH and ZK_ADDRESS)\n"); - return 0; - } + if (!test_getenv("KAFKA_PATH", NULL) || + !test_getenv("ZK_ADDRESS", NULL)) { + if (skip) + TEST_SKIP( + "Cannot create topics " + "(set KAFKA_PATH and ZK_ADDRESS)\n"); + return 0; + } - return 1; + return 1; #endif } @@ -5064,61 +5184,59 @@ int test_can_create_topics (int skip) { /** * Wait for \p event_type, discarding all other events prior to it. */ -rd_kafka_event_t *test_wait_event (rd_kafka_queue_t *eventq, - rd_kafka_event_type_t event_type, - int timeout_ms) { - test_timing_t t_w; - int64_t abs_timeout = test_clock() + (timeout_ms * 1000); - - TIMING_START(&t_w, "wait_event"); - while (test_clock() < abs_timeout) { - rd_kafka_event_t *rkev; +rd_kafka_event_t *test_wait_event(rd_kafka_queue_t *eventq, + rd_kafka_event_type_t event_type, + int timeout_ms) { + test_timing_t t_w; + int64_t abs_timeout = test_clock() + (timeout_ms * 1000); - rkev = rd_kafka_queue_poll(eventq, - (int)(abs_timeout - test_clock())/ - 1000); + TIMING_START(&t_w, "wait_event"); + while (test_clock() < abs_timeout) { + rd_kafka_event_t *rkev; - if (rd_kafka_event_type(rkev) == event_type) { - TIMING_STOP(&t_w); - return rkev; - } + rkev = rd_kafka_queue_poll( + eventq, (int)(abs_timeout - test_clock()) / 1000); - if (!rkev) - continue; + if (rd_kafka_event_type(rkev) == event_type) { + TIMING_STOP(&t_w); + return rkev; + } - if (rd_kafka_event_error(rkev)) - TEST_SAY("discarding ignored event %s: %s\n", - rd_kafka_event_name(rkev), - rd_kafka_event_error_string(rkev)); - else - TEST_SAY("discarding ignored event %s\n", - rd_kafka_event_name(rkev)); - rd_kafka_event_destroy(rkev); + if (!rkev) + continue; - } - TIMING_STOP(&t_w); + if (rd_kafka_event_error(rkev)) + TEST_SAY("discarding ignored event %s: %s\n", + rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + else + TEST_SAY("discarding ignored event %s\n", + rd_kafka_event_name(rkev)); + rd_kafka_event_destroy(rkev); + } + TIMING_STOP(&t_w); - return NULL; + return NULL; } -void test_SAY (const char *file, int line, int level, const char *str) { +void test_SAY(const char *file, int line, int level, const char *str) { TEST_SAYL(level, "%s", str); } -void test_SKIP (const char *file, int line, const char *str) { +void test_SKIP(const char *file, int line, const char *str) { TEST_WARN("SKIPPING TEST: %s", str); TEST_LOCK(); test_curr->state = TEST_SKIPPED; if (!*test_curr->failstr) { - rd_snprintf(test_curr->failstr, - sizeof(test_curr->failstr), "%s", str); + rd_snprintf(test_curr->failstr, sizeof(test_curr->failstr), + "%s", str); rtrim(test_curr->failstr); } TEST_UNLOCK(); } -const char *test_curr_name (void) { +const char *test_curr_name(void) { return test_curr->name; } @@ -5126,17 +5244,17 @@ const char *test_curr_name (void) { /** * @brief Dump/print message haders */ -void test_headers_dump (const char *what, int lvl, - const rd_kafka_headers_t *hdrs) { +void test_headers_dump(const char *what, + int lvl, + const rd_kafka_headers_t *hdrs) { size_t idx = 0; const char *name, *value; size_t size; while (!rd_kafka_header_get_all(hdrs, idx++, &name, (const void **)&value, &size)) - TEST_SAYL(lvl, "%s: Header #%"PRIusz": %s='%s'\n", - what, idx-1, name, - value ? value : "(NULL)"); + TEST_SAYL(lvl, "%s: Header #%" PRIusz ": %s='%s'\n", what, + idx - 1, name, value ? value : "(NULL)"); } @@ -5148,7 +5266,7 @@ void test_headers_dump (const char *what, int lvl, * * @returns a malloc:ed list of int32_t broker ids. */ -int32_t *test_get_broker_ids (rd_kafka_t *use_rk, size_t *cntp) { +int32_t *test_get_broker_ids(rd_kafka_t *use_rk, size_t *cntp) { int32_t *ids; rd_kafka_t *rk; const rd_kafka_metadata_t *md; @@ -5160,12 +5278,12 @@ int32_t *test_get_broker_ids (rd_kafka_t *use_rk, size_t *cntp) { err = rd_kafka_metadata(rk, 0, NULL, &md, tmout_multip(5000)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); - TEST_ASSERT(md->broker_cnt > 0, - "%d brokers, expected > 0", md->broker_cnt); + TEST_ASSERT(md->broker_cnt > 0, "%d brokers, expected > 0", + md->broker_cnt); ids = malloc(sizeof(*ids) * md->broker_cnt); - for (i = 0 ; i < (size_t)md->broker_cnt ; i++) + for (i = 0; i < (size_t)md->broker_cnt; i++) ids[i] = md->brokers[i].id; *cntp = md->broker_cnt; @@ -5186,11 +5304,11 @@ int32_t *test_get_broker_ids (rd_kafka_t *use_rk, size_t *cntp) { * * @returns the number of failures (but does not FAIL). */ -static int verify_topics_in_metadata (rd_kafka_t *rk, - rd_kafka_metadata_topic_t *topics, - size_t topic_cnt, - rd_kafka_metadata_topic_t *not_topics, - size_t not_topic_cnt) { +static int verify_topics_in_metadata(rd_kafka_t *rk, + rd_kafka_metadata_topic_t *topics, + size_t topic_cnt, + rd_kafka_metadata_topic_t *not_topics, + size_t not_topic_cnt) { const rd_kafka_metadata_t *md; rd_kafka_resp_err_t err; int ti; @@ -5200,17 +5318,17 @@ static int verify_topics_in_metadata (rd_kafka_t *rk, /* Mark topics with dummy error which is overwritten * when topic is found in metadata, allowing us to check * for missed topics. */ - for (i = 0 ; i < topic_cnt ; i++) + for (i = 0; i < topic_cnt; i++) topics[i].err = 12345; - err = rd_kafka_metadata(rk, 1/*all_topics*/, NULL, &md, + err = rd_kafka_metadata(rk, 1 /*all_topics*/, NULL, &md, tmout_multip(5000)); TEST_ASSERT(!err, "metadata failed: %s", rd_kafka_err2str(err)); - for (ti = 0 ; ti < md->topic_cnt ; ti++) { + for (ti = 0; ti < md->topic_cnt; ti++) { const rd_kafka_metadata_topic_t *mdt = &md->topics[ti]; - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { int pi; rd_kafka_metadata_topic_t *exp_mdt; @@ -5221,41 +5339,43 @@ static int verify_topics_in_metadata (rd_kafka_t *rk, exp_mdt->err = mdt->err; /* indicate found */ if (mdt->err) { - TEST_SAY("metadata: " - "Topic %s has error %s\n", - mdt->topic, - rd_kafka_err2str(mdt->err)); + TEST_SAY( + "metadata: " + "Topic %s has error %s\n", + mdt->topic, rd_kafka_err2str(mdt->err)); fails++; } if (exp_mdt->partition_cnt > 0 && mdt->partition_cnt != exp_mdt->partition_cnt) { - TEST_SAY("metadata: " - "Topic %s, expected %d partitions" - ", not %d\n", - mdt->topic, - exp_mdt->partition_cnt, - mdt->partition_cnt); + TEST_SAY( + "metadata: " + "Topic %s, expected %d partitions" + ", not %d\n", + mdt->topic, exp_mdt->partition_cnt, + mdt->partition_cnt); fails++; continue; } /* Verify per-partition values */ - for (pi = 0 ; exp_mdt->partitions && - pi < exp_mdt->partition_cnt ; pi++) { + for (pi = 0; + exp_mdt->partitions && pi < exp_mdt->partition_cnt; + pi++) { const rd_kafka_metadata_partition_t *mdp = - &mdt->partitions[pi]; + &mdt->partitions[pi]; const rd_kafka_metadata_partition_t *exp_mdp = - &exp_mdt->partitions[pi]; + &exp_mdt->partitions[pi]; if (mdp->id != exp_mdp->id) { - TEST_SAY("metadata: " - "Topic %s, " - "partition %d, " - "partition list out of order," - " expected %d, not %d\n", - mdt->topic, pi, - exp_mdp->id, mdp->id); + TEST_SAY( + "metadata: " + "Topic %s, " + "partition %d, " + "partition list out of order," + " expected %d, not %d\n", + mdt->topic, pi, exp_mdp->id, + mdp->id); fails++; continue; } @@ -5263,78 +5383,85 @@ static int verify_topics_in_metadata (rd_kafka_t *rk, if (exp_mdp->replicas) { if (mdp->replica_cnt != exp_mdp->replica_cnt) { - TEST_SAY("metadata: " - "Topic %s, " - "partition %d, " - "expected %d replicas," - " not %d\n", - mdt->topic, pi, - exp_mdp->replica_cnt, - mdp->replica_cnt); + TEST_SAY( + "metadata: " + "Topic %s, " + "partition %d, " + "expected %d replicas," + " not %d\n", + mdt->topic, pi, + exp_mdp->replica_cnt, + mdp->replica_cnt); fails++; - } else if (memcmp(mdp->replicas, - exp_mdp->replicas, - mdp->replica_cnt * - sizeof(*mdp->replicas))) { + } else if ( + memcmp( + mdp->replicas, + exp_mdp->replicas, + mdp->replica_cnt * + sizeof(*mdp->replicas))) { int ri; - TEST_SAY("metadata: " - "Topic %s, " - "partition %d, " - "replica mismatch:\n", - mdt->topic, pi); + TEST_SAY( + "metadata: " + "Topic %s, " + "partition %d, " + "replica mismatch:\n", + mdt->topic, pi); - for (ri = 0 ; - ri < mdp->replica_cnt ; + for (ri = 0; + ri < mdp->replica_cnt; ri++) { - TEST_SAY(" #%d: " - "expected " - "replica %d, " - "not %d\n", - ri, - exp_mdp-> - replicas[ri], - mdp-> - replicas[ri]); + TEST_SAY( + " #%d: " + "expected " + "replica %d, " + "not %d\n", + ri, + exp_mdp + ->replicas[ri], + mdp->replicas[ri]); } fails++; } - } } } - for (i = 0 ; i < not_topic_cnt ; i++) { + for (i = 0; i < not_topic_cnt; i++) { if (strcmp(not_topics[i].topic, mdt->topic)) continue; - TEST_SAY("metadata: " - "Topic %s found in metadata, unexpected\n", - mdt->topic); + TEST_SAY( + "metadata: " + "Topic %s found in metadata, unexpected\n", + mdt->topic); fails++; } - } - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { if ((int)topics[i].err == 12345) { - TEST_SAY("metadata: " - "Topic %s not seen in metadata\n", - topics[i].topic); + TEST_SAY( + "metadata: " + "Topic %s not seen in metadata\n", + topics[i].topic); fails++; } } if (fails > 0) - TEST_SAY("Metadata verification for %"PRIusz" topics failed " + TEST_SAY("Metadata verification for %" PRIusz + " topics failed " "with %d errors (see above)\n", topic_cnt, fails); else - TEST_SAY("Metadata verification succeeded: " - "%"PRIusz" desired topics seen, " - "%"PRIusz" undesired topics not seen\n", - topic_cnt, not_topic_cnt); + TEST_SAY( + "Metadata verification succeeded: " + "%" PRIusz + " desired topics seen, " + "%" PRIusz " undesired topics not seen\n", + topic_cnt, not_topic_cnt); rd_kafka_metadata_destroy(md); @@ -5346,12 +5473,12 @@ static int verify_topics_in_metadata (rd_kafka_t *rk, /** * @brief Wait for metadata to reflect expected and not expected topics */ -void test_wait_metadata_update (rd_kafka_t *rk, - rd_kafka_metadata_topic_t *topics, - size_t topic_cnt, - rd_kafka_metadata_topic_t *not_topics, - size_t not_topic_cnt, - int tmout) { +void test_wait_metadata_update(rd_kafka_t *rk, + rd_kafka_metadata_topic_t *topics, + size_t topic_cnt, + rd_kafka_metadata_topic_t *not_topics, + size_t not_topic_cnt, + int tmout) { int64_t abs_timeout; test_timing_t t_md; rd_kafka_t *our_rk = NULL; @@ -5367,14 +5494,13 @@ void test_wait_metadata_update (rd_kafka_t *rk, do { int md_fails; - md_fails = verify_topics_in_metadata( - rk, - topics, topic_cnt, - not_topics, not_topic_cnt); + md_fails = verify_topics_in_metadata(rk, topics, topic_cnt, + not_topics, not_topic_cnt); if (!md_fails) { - TEST_SAY("All expected topics (not?) " - "seen in metadata\n"); + TEST_SAY( + "All expected topics (not?) " + "seen in metadata\n"); abs_timeout = 0; break; } @@ -5393,8 +5519,8 @@ void test_wait_metadata_update (rd_kafka_t *rk, /** * @brief Wait for topic to be available in metadata */ -void test_wait_topic_exists (rd_kafka_t *rk, const char *topic, int tmout) { - rd_kafka_metadata_topic_t topics = { .topic = (char *)topic }; +void test_wait_topic_exists(rd_kafka_t *rk, const char *topic, int tmout) { + rd_kafka_metadata_topic_t topics = {.topic = (char *)topic}; test_wait_metadata_update(rk, &topics, 1, NULL, 0, tmout); @@ -5410,10 +5536,9 @@ void test_wait_topic_exists (rd_kafka_t *rk, const char *topic, int tmout) { * @brief Wait for up to \p tmout for any type of admin result. * @returns the event */ -rd_kafka_event_t * -test_wait_admin_result (rd_kafka_queue_t *q, - rd_kafka_event_type_t evtype, - int tmout) { +rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q, + rd_kafka_event_type_t evtype, + int tmout) { rd_kafka_event_t *rkev; while (1) { @@ -5427,16 +5552,16 @@ test_wait_admin_result (rd_kafka_queue_t *q, if (rd_kafka_event_type(rkev) == RD_KAFKA_EVENT_ERROR) { - TEST_WARN("Received error event while waiting for %d: " - "%s: ignoring", - evtype, rd_kafka_event_error_string(rkev)); + TEST_WARN( + "Received error event while waiting for %d: " + "%s: ignoring", + evtype, rd_kafka_event_error_string(rkev)); continue; } TEST_ASSERT(rd_kafka_event_type(rkev) == evtype, - "Expected event type %d, got %d (%s)", - evtype, + "Expected event type %d, got %d (%s)", evtype, rd_kafka_event_type(rkev), rd_kafka_event_name(rkev)); } @@ -5460,28 +5585,26 @@ test_wait_admin_result (rd_kafka_queue_t *q, * * DeleteConsumerGroupOffsets * - DescribeConfigs */ -rd_kafka_resp_err_t -test_wait_topic_admin_result (rd_kafka_queue_t *q, - rd_kafka_event_type_t evtype, - rd_kafka_event_t **retevent, - int tmout) { +rd_kafka_resp_err_t test_wait_topic_admin_result(rd_kafka_queue_t *q, + rd_kafka_event_type_t evtype, + rd_kafka_event_t **retevent, + int tmout) { rd_kafka_event_t *rkev; size_t i; - const rd_kafka_topic_result_t **terr = NULL; - size_t terr_cnt = 0; + const rd_kafka_topic_result_t **terr = NULL; + size_t terr_cnt = 0; const rd_kafka_ConfigResource_t **cres = NULL; - size_t cres_cnt = 0; - int errcnt = 0; + size_t cres_cnt = 0; + int errcnt = 0; rd_kafka_resp_err_t err; - const rd_kafka_group_result_t **gres = NULL; - size_t gres_cnt = 0; + const rd_kafka_group_result_t **gres = NULL; + size_t gres_cnt = 0; const rd_kafka_topic_partition_list_t *offsets = NULL; rkev = test_wait_admin_result(q, evtype, tmout); if ((err = rd_kafka_event_error(rkev))) { - TEST_WARN("%s failed: %s\n", - rd_kafka_event_name(rkev), + TEST_WARN("%s failed: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); rd_kafka_event_destroy(rkev); return err; @@ -5518,8 +5641,8 @@ test_wait_topic_admin_result (rd_kafka_queue_t *q, TEST_FAIL("Expected a DescribeConfigs result, not %s", rd_kafka_event_name(rkev)); - cres = rd_kafka_DescribeConfigs_result_resources(res, - &cres_cnt); + cres = + rd_kafka_DescribeConfigs_result_resources(res, &cres_cnt); } else if (evtype == RD_KAFKA_EVENT_ALTERCONFIGS_RESULT) { const rd_kafka_AlterConfigs_result_t *res; @@ -5548,14 +5671,15 @@ test_wait_topic_admin_result (rd_kafka_queue_t *q, } else if (evtype == RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT) { const rd_kafka_DeleteConsumerGroupOffsets_result_t *res; - if (!(res = - rd_kafka_event_DeleteConsumerGroupOffsets_result(rkev))) - TEST_FAIL("Expected a DeleteConsumerGroupOffsets " - "result, not %s", - rd_kafka_event_name(rkev)); + if (!(res = rd_kafka_event_DeleteConsumerGroupOffsets_result( + rkev))) + TEST_FAIL( + "Expected a DeleteConsumerGroupOffsets " + "result, not %s", + rd_kafka_event_name(rkev)); gres = rd_kafka_DeleteConsumerGroupOffsets_result_groups( - rkev, &gres_cnt); + rkev, &gres_cnt); } else { TEST_FAIL("Bad evtype: %d", evtype); @@ -5563,7 +5687,7 @@ test_wait_topic_admin_result (rd_kafka_queue_t *q, } /* Check topic errors */ - for (i = 0 ; i < terr_cnt ; i++) { + for (i = 0; i < terr_cnt; i++) { if (rd_kafka_topic_result_error(terr[i])) { TEST_WARN("..Topics result: %s: error: %s\n", rd_kafka_topic_result_name(terr[i]), @@ -5574,19 +5698,20 @@ test_wait_topic_admin_result (rd_kafka_queue_t *q, } /* Check resource errors */ - for (i = 0 ; i < cres_cnt ; i++) { + for (i = 0; i < cres_cnt; i++) { if (rd_kafka_ConfigResource_error(cres[i])) { - TEST_WARN("ConfigResource result: %d,%s: error: %s\n", - rd_kafka_ConfigResource_type(cres[i]), - rd_kafka_ConfigResource_name(cres[i]), - rd_kafka_ConfigResource_error_string(cres[i])); + TEST_WARN( + "ConfigResource result: %d,%s: error: %s\n", + rd_kafka_ConfigResource_type(cres[i]), + rd_kafka_ConfigResource_name(cres[i]), + rd_kafka_ConfigResource_error_string(cres[i])); if (!(errcnt++)) err = rd_kafka_ConfigResource_error(cres[i]); } } /* Check group errors */ - for (i = 0 ; i < gres_cnt ; i++) { + for (i = 0; i < gres_cnt; i++) { const rd_kafka_topic_partition_list_t *parts; if (rd_kafka_group_result_error(gres[i])) { @@ -5594,36 +5719,39 @@ test_wait_topic_admin_result (rd_kafka_queue_t *q, TEST_WARN("%s result: %s: error: %s\n", rd_kafka_event_name(rkev), rd_kafka_group_result_name(gres[i]), - rd_kafka_error_string(rd_kafka_group_result_error(gres[i]))); + rd_kafka_error_string( + rd_kafka_group_result_error(gres[i]))); if (!(errcnt++)) - err = rd_kafka_error_code(rd_kafka_group_result_error(gres[i])); + err = rd_kafka_error_code( + rd_kafka_group_result_error(gres[i])); } parts = rd_kafka_group_result_partitions(gres[i]); if (parts) { int j; - for (j = 0 ; j < parts->cnt ; i++) { + for (j = 0; j < parts->cnt; i++) { if (!parts->elems[j].err) continue; - TEST_WARN("%s result: %s: " - "%s [%"PRId32"] error: %s\n", - rd_kafka_event_name(rkev), - rd_kafka_group_result_name(gres[i]), - parts->elems[j].topic, - parts->elems[j].partition, - rd_kafka_err2str( - parts->elems[j].err)); + TEST_WARN( + "%s result: %s: " + "%s [%" PRId32 "] error: %s\n", + rd_kafka_event_name(rkev), + rd_kafka_group_result_name(gres[i]), + parts->elems[j].topic, + parts->elems[j].partition, + rd_kafka_err2str(parts->elems[j].err)); errcnt++; } } } /* Check offset errors */ - for (i = 0 ; (offsets && i < (size_t)offsets->cnt) ; i++) { + for (i = 0; (offsets && i < (size_t)offsets->cnt); i++) { if (offsets->elems[i].err) { TEST_WARN("DeleteRecords result: %s [%d]: error: %s\n", - offsets->elems[i].topic, offsets->elems[i].partition, + offsets->elems[i].topic, + offsets->elems[i].partition, rd_kafka_err2str(offsets->elems[i].err)); if (!(errcnt++)) err = offsets->elems[i].err; @@ -5650,12 +5778,12 @@ test_wait_topic_admin_result (rd_kafka_queue_t *q, * @remark Fails the current test on failure. */ -rd_kafka_resp_err_t -test_CreateTopics_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - char **topics, size_t topic_cnt, - int num_partitions, - void *opaque) { +rd_kafka_resp_err_t test_CreateTopics_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + char **topics, + size_t topic_cnt, + int num_partitions, + void *opaque) { rd_kafka_NewTopic_t **new_topics; rd_kafka_AdminOptions_t *options; rd_kafka_queue_t *q; @@ -5665,13 +5793,12 @@ test_CreateTopics_simple (rd_kafka_t *rk, new_topics = malloc(sizeof(*new_topics) * topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { char errstr[512]; - new_topics[i] = rd_kafka_NewTopic_new(topics[i], - num_partitions, 1, - errstr, sizeof(errstr)); + new_topics[i] = rd_kafka_NewTopic_new( + topics[i], num_partitions, 1, errstr, sizeof(errstr)); TEST_ASSERT(new_topics[i], - "Failed to NewTopic(\"%s\", %d) #%"PRIusz": %s", + "Failed to NewTopic(\"%s\", %d) #%" PRIusz ": %s", topics[i], num_partitions, i, errstr); } @@ -5681,15 +5808,11 @@ test_CreateTopics_simple (rd_kafka_t *rk, if (!useq) { char errstr[512]; - err = rd_kafka_AdminOptions_set_request_timeout(options, - tmout, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, tmout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_request_timeout: %s", errstr); - err = rd_kafka_AdminOptions_set_operation_timeout(options, - tmout-5000, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, tmout - 5000, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_operation_timeout: %s", errstr); q = rd_kafka_queue_new(rk); @@ -5697,7 +5820,7 @@ test_CreateTopics_simple (rd_kafka_t *rk, q = useq; } - TEST_SAY("Creating %"PRIusz" topics\n", topic_cnt); + TEST_SAY("Creating %" PRIusz " topics\n", topic_cnt); rd_kafka_CreateTopics(rk, new_topics, topic_cnt, options, q); @@ -5710,26 +5833,24 @@ test_CreateTopics_simple (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR_NO_ERROR; - err = test_wait_topic_admin_result(q, - RD_KAFKA_EVENT_CREATETOPICS_RESULT, - NULL, tmout+5000); + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_CREATETOPICS_RESULT, NULL, tmout + 5000); rd_kafka_queue_destroy(q); if (err) - TEST_FAIL("Failed to create %d topic(s): %s", - (int)topic_cnt, rd_kafka_err2str(err)); + TEST_FAIL("Failed to create %d topic(s): %s", (int)topic_cnt, + rd_kafka_err2str(err)); return err; } -rd_kafka_resp_err_t -test_CreatePartitions_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - const char *topic, - size_t total_part_cnt, - void *opaque) { +rd_kafka_resp_err_t test_CreatePartitions_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + const char *topic, + size_t total_part_cnt, + void *opaque) { rd_kafka_NewPartitions_t *newp[1]; rd_kafka_AdminOptions_t *options; rd_kafka_queue_t *q; @@ -5739,24 +5860,19 @@ test_CreatePartitions_simple (rd_kafka_t *rk, newp[0] = rd_kafka_NewPartitions_new(topic, total_part_cnt, errstr, sizeof(errstr)); - TEST_ASSERT(newp[0], - "Failed to NewPartitions(\"%s\", %"PRIusz"): %s", + TEST_ASSERT(newp[0], "Failed to NewPartitions(\"%s\", %" PRIusz "): %s", topic, total_part_cnt, errstr); - options = rd_kafka_AdminOptions_new(rk, - RD_KAFKA_ADMIN_OP_CREATEPARTITIONS); + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEPARTITIONS); rd_kafka_AdminOptions_set_opaque(options, opaque); if (!useq) { - err = rd_kafka_AdminOptions_set_request_timeout(options, - tmout, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, tmout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_request_timeout: %s", errstr); - err = rd_kafka_AdminOptions_set_operation_timeout(options, - tmout-5000, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, tmout - 5000, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_operation_timeout: %s", errstr); q = rd_kafka_queue_new(rk); @@ -5764,7 +5880,7 @@ test_CreatePartitions_simple (rd_kafka_t *rk, q = useq; } - TEST_SAY("Creating (up to) %"PRIusz" partitions for topic \"%s\"\n", + TEST_SAY("Creating (up to) %" PRIusz " partitions for topic \"%s\"\n", total_part_cnt, topic); rd_kafka_CreatePartitions(rk, newp, 1, options, q); @@ -5778,7 +5894,7 @@ test_CreatePartitions_simple (rd_kafka_t *rk, err = test_wait_topic_admin_result( - q, RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, NULL, tmout+5000); + q, RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, NULL, tmout + 5000); rd_kafka_queue_destroy(q); @@ -5790,21 +5906,21 @@ test_CreatePartitions_simple (rd_kafka_t *rk, } -rd_kafka_resp_err_t -test_DeleteTopics_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - char **topics, size_t topic_cnt, - void *opaque) { +rd_kafka_resp_err_t test_DeleteTopics_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + char **topics, + size_t topic_cnt, + void *opaque) { rd_kafka_queue_t *q; rd_kafka_DeleteTopic_t **del_topics; rd_kafka_AdminOptions_t *options; size_t i; rd_kafka_resp_err_t err; - const int tmout = 30*1000; + const int tmout = 30 * 1000; del_topics = malloc(sizeof(*del_topics) * topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { del_topics[i] = rd_kafka_DeleteTopic_new(topics[i]); TEST_ASSERT(del_topics[i]); } @@ -5815,15 +5931,11 @@ test_DeleteTopics_simple (rd_kafka_t *rk, if (!useq) { char errstr[512]; - err = rd_kafka_AdminOptions_set_request_timeout(options, - tmout, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, tmout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_request_timeout: %s", errstr); - err = rd_kafka_AdminOptions_set_operation_timeout(options, - tmout-5000, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, tmout - 5000, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_operation_timeout: %s", errstr); q = rd_kafka_queue_new(rk); @@ -5831,7 +5943,7 @@ test_DeleteTopics_simple (rd_kafka_t *rk, q = useq; } - TEST_SAY("Deleting %"PRIusz" topics\n", topic_cnt); + TEST_SAY("Deleting %" PRIusz " topics\n", topic_cnt); rd_kafka_DeleteTopics(rk, del_topics, topic_cnt, options, useq); @@ -5844,34 +5956,32 @@ test_DeleteTopics_simple (rd_kafka_t *rk, if (useq) return RD_KAFKA_RESP_ERR_NO_ERROR; - err = test_wait_topic_admin_result(q, - RD_KAFKA_EVENT_DELETETOPICS_RESULT, - NULL, tmout+5000); + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_DELETETOPICS_RESULT, NULL, tmout + 5000); rd_kafka_queue_destroy(q); if (err) - TEST_FAIL("Failed to delete topics: %s", - rd_kafka_err2str(err)); + TEST_FAIL("Failed to delete topics: %s", rd_kafka_err2str(err)); return err; } -rd_kafka_resp_err_t -test_DeleteGroups_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - char **groups, size_t group_cnt, - void *opaque) { +rd_kafka_resp_err_t test_DeleteGroups_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + char **groups, + size_t group_cnt, + void *opaque) { rd_kafka_queue_t *q; rd_kafka_DeleteGroup_t **del_groups; rd_kafka_AdminOptions_t *options; size_t i; rd_kafka_resp_err_t err; - const int tmout = 30*1000; + const int tmout = 30 * 1000; del_groups = malloc(sizeof(*del_groups) * group_cnt); - for (i = 0 ; i < group_cnt ; i++) { + for (i = 0; i < group_cnt; i++) { del_groups[i] = rd_kafka_DeleteGroup_new(groups[i]); TEST_ASSERT(del_groups[i]); } @@ -5882,10 +5992,8 @@ test_DeleteGroups_simple (rd_kafka_t *rk, if (!useq) { char errstr[512]; - err = rd_kafka_AdminOptions_set_request_timeout(options, - tmout, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, tmout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_request_timeout: %s", errstr); q = rd_kafka_queue_new(rk); @@ -5893,7 +6001,7 @@ test_DeleteGroups_simple (rd_kafka_t *rk, q = useq; } - TEST_SAY("Deleting %"PRIusz" groups\n", group_cnt); + TEST_SAY("Deleting %" PRIusz " groups\n", group_cnt); rd_kafka_DeleteGroups(rk, del_groups, group_cnt, options, useq); @@ -5905,50 +6013,43 @@ test_DeleteGroups_simple (rd_kafka_t *rk, if (useq) return RD_KAFKA_RESP_ERR_NO_ERROR; - err = test_wait_topic_admin_result(q, - RD_KAFKA_EVENT_DELETEGROUPS_RESULT, - NULL, tmout+5000); + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_DELETEGROUPS_RESULT, NULL, tmout + 5000); rd_kafka_queue_destroy(q); rd_kafka_DeleteGroup_destroy_array(del_groups, group_cnt); if (err) - TEST_FAIL("Failed to delete groups: %s", - rd_kafka_err2str(err)); + TEST_FAIL("Failed to delete groups: %s", rd_kafka_err2str(err)); return err; } rd_kafka_resp_err_t -test_DeleteRecords_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - const rd_kafka_topic_partition_list_t *offsets, - void *opaque) { +test_DeleteRecords_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + const rd_kafka_topic_partition_list_t *offsets, + void *opaque) { rd_kafka_queue_t *q; rd_kafka_AdminOptions_t *options; rd_kafka_resp_err_t err; rd_kafka_DeleteRecords_t *del_records = - rd_kafka_DeleteRecords_new(offsets); - const int tmout = 30*1000; + rd_kafka_DeleteRecords_new(offsets); + const int tmout = 30 * 1000; - options = rd_kafka_AdminOptions_new(rk, - RD_KAFKA_ADMIN_OP_DELETERECORDS); + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETERECORDS); rd_kafka_AdminOptions_set_opaque(options, opaque); if (!useq) { char errstr[512]; - err = rd_kafka_AdminOptions_set_request_timeout(options, - tmout, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, tmout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_request_timeout: %s", errstr); err = rd_kafka_AdminOptions_set_operation_timeout( - options, - tmout-5000, - errstr, - sizeof(errstr)); + options, tmout - 5000, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_operation_timeout: %s", errstr); q = rd_kafka_queue_new(rk); @@ -5967,9 +6068,8 @@ test_DeleteRecords_simple (rd_kafka_t *rk, if (useq) return RD_KAFKA_RESP_ERR_NO_ERROR; - err = test_wait_topic_admin_result(q, - RD_KAFKA_EVENT_DELETERECORDS_RESULT, - NULL, tmout+5000); + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_DELETERECORDS_RESULT, NULL, tmout + 5000); rd_kafka_queue_destroy(q); @@ -5980,36 +6080,30 @@ test_DeleteRecords_simple (rd_kafka_t *rk, return err; } -rd_kafka_resp_err_t -test_DeleteConsumerGroupOffsets_simple ( - rd_kafka_t *rk, - rd_kafka_queue_t *useq, - const char *group_id, - const rd_kafka_topic_partition_list_t *offsets, - void *opaque) { +rd_kafka_resp_err_t test_DeleteConsumerGroupOffsets_simple( + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + const char *group_id, + const rd_kafka_topic_partition_list_t *offsets, + void *opaque) { rd_kafka_queue_t *q; rd_kafka_AdminOptions_t *options; rd_kafka_resp_err_t err; - const int tmout = 30*1000; + const int tmout = 30 * 1000; rd_kafka_DeleteConsumerGroupOffsets_t *cgoffsets; options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS); + rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS); rd_kafka_AdminOptions_set_opaque(options, opaque); if (!useq) { char errstr[512]; - err = rd_kafka_AdminOptions_set_request_timeout(options, - tmout, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, tmout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_request_timeout: %s", errstr); err = rd_kafka_AdminOptions_set_operation_timeout( - options, - tmout-5000, - errstr, - sizeof(errstr)); + options, tmout - 5000, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_operation_timeout: %s", errstr); q = rd_kafka_queue_new(rk); @@ -6018,19 +6112,19 @@ test_DeleteConsumerGroupOffsets_simple ( } if (offsets) { - TEST_SAY("Deleting committed offsets for group %s and " - "%d partitions\n", - group_id, offsets->cnt); + TEST_SAY( + "Deleting committed offsets for group %s and " + "%d partitions\n", + group_id, offsets->cnt); - cgoffsets = rd_kafka_DeleteConsumerGroupOffsets_new(group_id, - offsets); + cgoffsets = + rd_kafka_DeleteConsumerGroupOffsets_new(group_id, offsets); } else { TEST_SAY("Provoking invalid DeleteConsumerGroupOffsets call\n"); cgoffsets = NULL; } - rd_kafka_DeleteConsumerGroupOffsets(rk, &cgoffsets, - cgoffsets ? 1 : 0, + rd_kafka_DeleteConsumerGroupOffsets(rk, &cgoffsets, cgoffsets ? 1 : 0, options, useq); if (cgoffsets) @@ -6042,9 +6136,8 @@ test_DeleteConsumerGroupOffsets_simple ( return RD_KAFKA_RESP_ERR_NO_ERROR; err = test_wait_topic_admin_result( - q, - RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT, - NULL, tmout+5000); + q, RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT, NULL, + tmout + 5000); rd_kafka_queue_destroy(q); @@ -6063,11 +6156,11 @@ test_DeleteConsumerGroupOffsets_simple ( * @param configs 'const char *name, const char *value' tuples * @param config_cnt is the number of tuples in \p configs */ -rd_kafka_resp_err_t -test_AlterConfigs_simple (rd_kafka_t *rk, - rd_kafka_ResourceType_t restype, - const char *resname, - const char **configs, size_t config_cnt) { +rd_kafka_resp_err_t test_AlterConfigs_simple(rd_kafka_t *rk, + rd_kafka_ResourceType_t restype, + const char *resname, + const char **configs, + size_t config_cnt) { rd_kafka_queue_t *q; rd_kafka_ConfigResource_t *confres; rd_kafka_event_t *rkev; @@ -6087,7 +6180,7 @@ test_AlterConfigs_simple (rd_kafka_t *rk, rd_kafka_DescribeConfigs(rk, &confres, 1, NULL, q); err = test_wait_topic_admin_result( - q, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, &rkev, 15*1000); + q, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, &rkev, 15 * 1000); if (err) { rd_kafka_queue_destroy(q); rd_kafka_ConfigResource_destroy(confres); @@ -6095,26 +6188,26 @@ test_AlterConfigs_simple (rd_kafka_t *rk, } results = rd_kafka_DescribeConfigs_result_resources( - rd_kafka_event_DescribeConfigs_result(rkev), &result_cnt); + rd_kafka_event_DescribeConfigs_result(rkev), &result_cnt); TEST_ASSERT(result_cnt == 1, - "expected 1 DescribeConfigs result, not %"PRIusz, + "expected 1 DescribeConfigs result, not %" PRIusz, result_cnt); - configents = rd_kafka_ConfigResource_configs(results[0], - &configent_cnt); + configents = + rd_kafka_ConfigResource_configs(results[0], &configent_cnt); TEST_ASSERT(configent_cnt > 0, - "expected > 0 ConfigEntry:s, not %"PRIusz, configent_cnt); + "expected > 0 ConfigEntry:s, not %" PRIusz, configent_cnt); TEST_SAY("Altering configuration for %d %s\n", restype, resname); /* Apply all existing configuration entries to resource object that * will later be passed to AlterConfigs. */ - for (i = 0 ; i < configent_cnt ; i++) { + for (i = 0; i < configent_cnt; i++) { err = rd_kafka_ConfigResource_set_config( - confres, - rd_kafka_ConfigEntry_name(configents[i]), - rd_kafka_ConfigEntry_value(configents[i])); - TEST_ASSERT(!err, "Failed to set read-back config %s=%s " + confres, rd_kafka_ConfigEntry_name(configents[i]), + rd_kafka_ConfigEntry_value(configents[i])); + TEST_ASSERT(!err, + "Failed to set read-back config %s=%s " "on local resource object", rd_kafka_ConfigEntry_name(configents[i]), rd_kafka_ConfigEntry_value(configents[i])); @@ -6123,13 +6216,13 @@ test_AlterConfigs_simple (rd_kafka_t *rk, rd_kafka_event_destroy(rkev); /* Then apply the configuration to change. */ - for (i = 0 ; i < config_cnt ; i += 2) { - err = rd_kafka_ConfigResource_set_config(confres, - configs[i], - configs[i+1]); - TEST_ASSERT(!err, "Failed to set config %s=%s on " + for (i = 0; i < config_cnt; i += 2) { + err = rd_kafka_ConfigResource_set_config(confres, configs[i], + configs[i + 1]); + TEST_ASSERT(!err, + "Failed to set config %s=%s on " "local resource object", - configs[i], configs[i+1]); + configs[i], configs[i + 1]); } rd_kafka_AlterConfigs(rk, &confres, 1, NULL, q); @@ -6137,7 +6230,7 @@ test_AlterConfigs_simple (rd_kafka_t *rk, rd_kafka_ConfigResource_destroy(confres); err = test_wait_topic_admin_result( - q, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, NULL, 15*1000); + q, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, NULL, 15 * 1000); rd_kafka_queue_destroy(q); @@ -6146,9 +6239,9 @@ test_AlterConfigs_simple (rd_kafka_t *rk, -static void test_free_string_array (char **strs, size_t cnt) { +static void test_free_string_array(char **strs, size_t cnt) { size_t i; - for (i = 0 ; i < cnt ; i++) + for (i = 0; i < cnt; i++) free(strs[i]); free(strs); } @@ -6159,10 +6252,10 @@ static void test_free_string_array (char **strs, size_t cnt) { * rdkafka test prefix. */ static rd_kafka_resp_err_t -test_get_all_test_topics (rd_kafka_t *rk, char ***topicsp, size_t *topic_cntp) { +test_get_all_test_topics(rd_kafka_t *rk, char ***topicsp, size_t *topic_cntp) { size_t test_topic_prefix_len = strlen(test_topic_prefix); const rd_kafka_metadata_t *md; - char **topics = NULL; + char **topics = NULL; size_t topic_cnt = 0; int i; rd_kafka_resp_err_t err; @@ -6172,12 +6265,13 @@ test_get_all_test_topics (rd_kafka_t *rk, char ***topicsp, size_t *topic_cntp) { *topicsp = NULL; /* Retrieve list of topics */ - err = rd_kafka_metadata(rk, 1/*all topics*/, NULL, &md, + err = rd_kafka_metadata(rk, 1 /*all topics*/, NULL, &md, tmout_multip(10000)); if (err) { - TEST_WARN("%s: Failed to acquire metadata: %s: " - "not deleting any topics\n", - __FUNCTION__, rd_kafka_err2str(err)); + TEST_WARN( + "%s: Failed to acquire metadata: %s: " + "not deleting any topics\n", + __FUNCTION__, rd_kafka_err2str(err)); return err; } @@ -6190,22 +6284,23 @@ test_get_all_test_topics (rd_kafka_t *rk, char ***topicsp, size_t *topic_cntp) { if (topicsp) topics = malloc(sizeof(*topics) * md->topic_cnt); - for (i = 0 ; i < md->topic_cnt ; i++) { + for (i = 0; i < md->topic_cnt; i++) { if (strlen(md->topics[i].topic) >= test_topic_prefix_len && - !strncmp(md->topics[i].topic, - test_topic_prefix, test_topic_prefix_len)) { + !strncmp(md->topics[i].topic, test_topic_prefix, + test_topic_prefix_len)) { if (topicsp) topics[topic_cnt++] = - rd_strdup(md->topics[i].topic); + rd_strdup(md->topics[i].topic); else topic_cnt++; } } if (topic_cnt == 0) { - TEST_SAY("%s: No topics (out of %d) matching our " - "test prefix (%s)\n", - __FUNCTION__, md->topic_cnt, test_topic_prefix); + TEST_SAY( + "%s: No topics (out of %d) matching our " + "test prefix (%s)\n", + __FUNCTION__, md->topic_cnt, test_topic_prefix); rd_kafka_metadata_destroy(md); if (topics) test_free_string_array(topics, topic_cnt); @@ -6224,7 +6319,7 @@ test_get_all_test_topics (rd_kafka_t *rk, char ***topicsp, size_t *topic_cntp) { /** * @brief Delete all test topics using the Kafka Admin API. */ -rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { +rd_kafka_resp_err_t test_delete_all_test_topics(int timeout_ms) { rd_kafka_t *rk; char **topics; size_t topic_cnt = 0; @@ -6252,14 +6347,15 @@ rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { q = rd_kafka_queue_get_main(rk); options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETETOPICS); - if (rd_kafka_AdminOptions_set_operation_timeout(options, 2*60*1000, - errstr, - sizeof(errstr))) - TEST_SAY(_C_YEL "Failed to set DeleteTopics timeout: %s: " + if (rd_kafka_AdminOptions_set_operation_timeout(options, 2 * 60 * 1000, + errstr, sizeof(errstr))) + TEST_SAY(_C_YEL + "Failed to set DeleteTopics timeout: %s: " "ignoring\n", errstr); - TEST_SAY(_C_MAG "====> Deleting all test topics with <====" + TEST_SAY(_C_MAG + "====> Deleting all test topics with <====" "a timeout of 2 minutes\n"); test_DeleteTopics_simple(rk, q, topics, topic_cnt, options); @@ -6274,15 +6370,16 @@ rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { res = rd_kafka_event_DeleteTopics_result(rkev); if (!res) { - TEST_SAY("%s: Ignoring event: %s: %s\n", - __FUNCTION__, rd_kafka_event_name(rkev), + TEST_SAY("%s: Ignoring event: %s: %s\n", __FUNCTION__, + rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); rd_kafka_event_destroy(rkev); continue; } if (rd_kafka_event_error(rkev)) { - TEST_WARN("%s: DeleteTopics for %"PRIusz" topics " + TEST_WARN("%s: DeleteTopics for %" PRIusz + " topics " "failed: %s\n", __FUNCTION__, topic_cnt, rd_kafka_event_error_string(rkev)); @@ -6294,7 +6391,7 @@ rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { terr = rd_kafka_DeleteTopics_result_topics(res, &tcnt); - for(i = 0 ; i < (int)tcnt ; i++) { + for (i = 0; i < (int)tcnt; i++) { if (!rd_kafka_topic_result_error(terr[i])) { okcnt++; continue; @@ -6304,12 +6401,13 @@ rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { __FUNCTION__, rd_kafka_topic_result_name(terr[i]), rd_kafka_topic_result_error_string( - terr[i])); + terr[i])); } - TEST_SAY("%s: DeleteTopics " - "succeeded for %d/%"PRIusz" topics\n", - __FUNCTION__, okcnt, topic_cnt); + TEST_SAY( + "%s: DeleteTopics " + "succeeded for %d/%" PRIusz " topics\n", + __FUNCTION__, okcnt, topic_cnt); err = RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -6329,15 +6427,19 @@ rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { break; if (abs_timeout < test_clock()) { - TEST_WARN("%s: Timed out waiting for " - "remaining %"PRIusz" deleted topics " - "to disappear from cluster metadata\n", - __FUNCTION__, topic_cnt); + TEST_WARN( + "%s: Timed out waiting for " + "remaining %" PRIusz + " deleted topics " + "to disappear from cluster metadata\n", + __FUNCTION__, topic_cnt); break; } - TEST_SAY("Waiting for remaining %"PRIusz" delete topics " - "to disappear from cluster metadata\n", topic_cnt); + TEST_SAY("Waiting for remaining %" PRIusz + " delete topics " + "to disappear from cluster metadata\n", + topic_cnt); rd_sleep(1); } @@ -6349,8 +6451,13 @@ rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { -void test_fail0 (const char *file, int line, const char *function, - int do_lock, int fail_now, const char *fmt, ...) { +void test_fail0(const char *file, + int line, + const char *function, + int do_lock, + int fail_now, + const char *fmt, + ...) { char buf[512]; int is_thrd = 0; size_t of; @@ -6360,7 +6467,8 @@ void test_fail0 (const char *file, int line, const char *function, time_t tnow = time(NULL); #ifdef __MINGW32__ - strftime(timestr, sizeof(timestr), "%a %b %d %H:%M:%S %Y", localtime(&tnow)); + strftime(timestr, sizeof(timestr), "%a %b %d %H:%M:%S %Y", + localtime(&tnow)); #elif defined(_WIN32) ctime_s(timestr, sizeof(timestr), &tnow); #else @@ -6370,17 +6478,16 @@ void test_fail0 (const char *file, int line, const char *function, if (t) *t = '\0'; - of = rd_snprintf(buf, sizeof(buf), "%s%s%s():%i: ", - test_curr->subtest, *test_curr->subtest ? ": " : "", - function, line); + of = rd_snprintf(buf, sizeof(buf), "%s%s%s():%i: ", test_curr->subtest, + *test_curr->subtest ? ": " : "", function, line); rd_assert(of < sizeof(buf)); va_start(ap, fmt); - rd_vsnprintf(buf+of, sizeof(buf)-of, fmt, ap); + rd_vsnprintf(buf + of, sizeof(buf) - of, fmt, ap); va_end(ap); /* Remove trailing newline */ - if ((t = strchr(buf, '\n')) && !*(t+1)) + if ((t = strchr(buf, '\n')) && !*(t + 1)) *t = '\0'; TEST_SAYL(0, "TEST FAILURE\n"); @@ -6388,11 +6495,9 @@ void test_fail0 (const char *file, int line, const char *function, "\033[31m### Test \"%s%s%s%s\" failed at %s:%i:%s() at %s: " "###\n" "%s\n", - test_curr->name, - *test_curr->subtest ? " (" : "", - test_curr->subtest, - *test_curr->subtest ? ")" : "", - file, line, function, timestr, buf+of); + test_curr->name, *test_curr->subtest ? " (" : "", + test_curr->subtest, *test_curr->subtest ? ")" : "", file, line, + function, timestr, buf + of); if (do_lock) TEST_LOCK(); test_curr->state = TEST_FAILED; @@ -6401,7 +6506,7 @@ void test_fail0 (const char *file, int line, const char *function, if (!*test_curr->failstr) { strncpy(test_curr->failstr, buf, sizeof(test_curr->failstr)); - test_curr->failstr[sizeof(test_curr->failstr)-1] = '\0'; + test_curr->failstr[sizeof(test_curr->failstr) - 1] = '\0'; } if (fail_now && test_curr->mainfunc) { tests_running_cnt--; @@ -6421,7 +6526,7 @@ void test_fail0 (const char *file, int line, const char *function, /** * @brief Destroy a mock cluster and its underlying rd_kafka_t handle */ -void test_mock_cluster_destroy (rd_kafka_mock_cluster_t *mcluster) { +void test_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster) { rd_kafka_t *rk = rd_kafka_mock_cluster_handle(mcluster); rd_kafka_mock_cluster_destroy(mcluster); rd_kafka_destroy(rk); @@ -6433,8 +6538,8 @@ void test_mock_cluster_destroy (rd_kafka_mock_cluster_t *mcluster) { * @brief Create a standalone mock cluster that can be used by multiple * rd_kafka_t instances. */ -rd_kafka_mock_cluster_t *test_mock_cluster_new (int broker_cnt, - const char **bootstraps) { +rd_kafka_mock_cluster_t *test_mock_cluster_new(int broker_cnt, + const char **bootstraps) { rd_kafka_t *rk; rd_kafka_conf_t *conf = rd_kafka_conf_new(); rd_kafka_mock_cluster_t *mcluster; @@ -6469,8 +6574,11 @@ rd_kafka_mock_cluster_t *test_mock_cluster_new (int broker_cnt, * * @returns 0 if sub-test should not be run, else 1. */ -int test_sub_start (const char *func, int line, int is_quick, - const char *fmt, ...) { +int test_sub_start(const char *func, + int line, + int is_quick, + const char *fmt, + ...) { if (!is_quick && test_quick) return 0; @@ -6517,7 +6625,7 @@ static void test_sub_reset(void) { /** * @brief Sub-test has passed. */ -void test_sub_pass (void) { +void test_sub_pass(void) { TEST_ASSERT(*test_curr->subtest); @@ -6532,7 +6640,7 @@ void test_sub_pass (void) { /** * @brief Skip sub-test (must have been started with SUB_TEST*()). */ -void test_sub_skip (const char *fmt, ...) { +void test_sub_skip(const char *fmt, ...) { va_list ap; char buf[256]; diff --git a/tests/test.h b/tests/test.h index 48c46b4015..ca33f713b4 100644 --- a/tests/test.h +++ b/tests/test.h @@ -1,30 +1,30 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2012-2015, Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2015, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ #ifndef _TEST_H_ #define _TEST_H_ @@ -74,10 +74,10 @@ extern int test_concurrent_max; extern int test_rusage; extern double test_rusage_cpu_calibration; extern double test_timeout_multiplier; -extern int test_session_timeout_ms; /* Group session timeout */ -extern int test_flags; -extern int test_neg_flags; -extern int test_idempotent_producer; +extern int test_session_timeout_ms; /* Group session timeout */ +extern int test_flags; +extern int test_neg_flags; +extern int test_idempotent_producer; extern mtx_t test_mtx; @@ -91,11 +91,11 @@ typedef struct test_msgver_s test_msgver_t; /** @struct Resource usage thresholds */ struct rusage_thres { - double ucpu; /**< Max User CPU in percentage */ - double scpu; /**< Max Sys CPU in percentage */ - double rss; /**< Max RSS (memory) increase in MB */ - int ctxsw; /**< Max number of voluntary context switches, i.e. - * syscalls. */ + double ucpu; /**< Max User CPU in percentage */ + double scpu; /**< Max Sys CPU in percentage */ + double rss; /**< Max RSS (memory) increase in MB */ + int ctxsw; /**< Max number of voluntary context switches, i.e. + * syscalls. */ }; typedef enum { @@ -110,32 +110,35 @@ struct test { /** * Setup */ - const char *name; /**< e.g. Same as filename minus extension */ - int (*mainfunc) (int argc, char **argv); /**< test's main func */ - const int flags; /**< Test flags */ -#define TEST_F_LOCAL 0x1 /**< Test is local, no broker requirement */ -#define TEST_F_KNOWN_ISSUE 0x2 /**< Known issue, can fail without affecting - * total test run status. */ -#define TEST_F_MANUAL 0x4 /**< Manual test, only started when specifically - * stated */ -#define TEST_F_SOCKEM 0x8 /**< Test requires socket emulation. */ - int minver; /**< Limit tests to broker version range. */ - int maxver; - - const char *extra; /**< Extra information to print in test_summary. */ + const char *name; /**< e.g. Same as filename minus extension */ + int (*mainfunc)(int argc, char **argv); /**< test's main func */ + const int flags; /**< Test flags */ +#define TEST_F_LOCAL 0x1 /**< Test is local, no broker requirement */ +#define TEST_F_KNOWN_ISSUE \ + 0x2 /**< Known issue, can fail without affecting \ + * total test run status. */ +#define TEST_F_MANUAL \ + 0x4 /**< Manual test, only started when specifically \ + * stated */ +#define TEST_F_SOCKEM 0x8 /**< Test requires socket emulation. */ + int minver; /**< Limit tests to broker version range. */ + int maxver; + + const char *extra; /**< Extra information to print in test_summary. */ const char *scenario; /**< Test scenario */ - char **report_arr; /**< Test-specific reporting, JSON array of objects. */ - int report_cnt; - int report_size; + char * + *report_arr; /**< Test-specific reporting, JSON array of objects. */ + int report_cnt; + int report_size; rd_bool_t ignore_dr_err; /**< Ignore delivery report errors */ rd_kafka_resp_err_t exp_dr_err; /* Expected error in test_dr_cb */ rd_kafka_msg_status_t exp_dr_status; /**< Expected delivery status, * or -1 for not checking. */ - int produce_sync; /**< test_produce_sync() call in action */ - rd_kafka_resp_err_t produce_sync_err; /**< DR error */ + int produce_sync; /**< test_produce_sync() call in action */ + rd_kafka_resp_err_t produce_sync_err; /**< DR error */ test_msgver_t *dr_mv; /**< MsgVer that delivered messages will be * added to (if not NULL). * Must be set and freed by test. */ @@ -143,26 +146,27 @@ struct test { /** * Runtime */ - thrd_t thrd; + thrd_t thrd; int64_t start; int64_t duration; - FILE *stats_fp; - int64_t timeout; + FILE *stats_fp; + int64_t timeout; test_state_t state; - int failcnt; /**< Number of failures, useful with FAIL_LATER */ - char failstr[512+1];/**< First test failure reason */ - char subtest[400];/**< Current subtest, if any */ + int failcnt; /**< Number of failures, useful with FAIL_LATER */ + char failstr[512 + 1]; /**< First test failure reason */ + char subtest[400]; /**< Current subtest, if any */ test_timing_t subtest_duration; /**< Subtest duration timing */ #if WITH_SOCKEM rd_list_t sockets; - int (*connect_cb) (struct test *test, sockem_t *skm, const char *id); + int (*connect_cb)(struct test *test, sockem_t *skm, const char *id); #endif - int (*is_fatal_cb) (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason); + int (*is_fatal_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason); /**< Resource usage thresholds */ - struct rusage_thres rusage_thres; /**< Usage thresholds */ + struct rusage_thres rusage_thres; /**< Usage thresholds */ #if HAVE_GETRUSAGE struct rusage rusage; /**< Monitored process CPU/mem usage */ #endif @@ -170,30 +174,33 @@ struct test { #ifdef _WIN32 -#define TEST_F_KNOWN_ISSUE_WIN32 TEST_F_KNOWN_ISSUE +#define TEST_F_KNOWN_ISSUE_WIN32 TEST_F_KNOWN_ISSUE #else #define TEST_F_KNOWN_ISSUE_WIN32 0 #endif #ifdef __APPLE__ -#define TEST_F_KNOWN_ISSUE_OSX TEST_F_KNOWN_ISSUE +#define TEST_F_KNOWN_ISSUE_OSX TEST_F_KNOWN_ISSUE #else -#define TEST_F_KNOWN_ISSUE_OSX 0 +#define TEST_F_KNOWN_ISSUE_OSX 0 #endif -#define TEST_SAY0(...) fprintf(stderr, __VA_ARGS__) -#define TEST_SAYL(LVL,...) do { \ - if (test_level >= LVL) { \ - fprintf(stderr, "\033[36m[%-28s/%7.3fs] ", \ - test_curr->name, \ - test_curr->start ? \ - ((float)(test_clock() - \ - test_curr->start)/1000000.0f) : 0); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\033[0m"); \ - } \ - } while (0) +#define TEST_SAY0(...) fprintf(stderr, __VA_ARGS__) +#define TEST_SAYL(LVL, ...) \ + do { \ + if (test_level >= LVL) { \ + fprintf( \ + stderr, "\033[36m[%-28s/%7.3fs] ", \ + test_curr->name, \ + test_curr->start \ + ? ((float)(test_clock() - test_curr->start) / \ + 1000000.0f) \ + : 0); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\033[0m"); \ + } \ + } while (0) #define TEST_SAY(...) TEST_SAYL(2, __VA_ARGS__) /** @@ -203,7 +210,7 @@ struct test { -static RD_INLINE RD_UNUSED void rtrim (char *str) { +static RD_INLINE RD_UNUSED void rtrim(char *str) { size_t len = strlen(str); char *s; @@ -218,41 +225,45 @@ static RD_INLINE RD_UNUSED void rtrim (char *str) { } /* Skip the current test. Argument is textual reason (printf format) */ -#define TEST_SKIP(...) do { \ - TEST_WARN("SKIPPING TEST: " __VA_ARGS__); \ - TEST_LOCK(); \ - test_curr->state = TEST_SKIPPED; \ - if (!*test_curr->failstr) { \ - rd_snprintf(test_curr->failstr, \ - sizeof(test_curr->failstr), __VA_ARGS__); \ - rtrim(test_curr->failstr); \ - } \ - TEST_UNLOCK(); \ +#define TEST_SKIP(...) \ + do { \ + TEST_WARN("SKIPPING TEST: " __VA_ARGS__); \ + TEST_LOCK(); \ + test_curr->state = TEST_SKIPPED; \ + if (!*test_curr->failstr) { \ + rd_snprintf(test_curr->failstr, \ + sizeof(test_curr->failstr), __VA_ARGS__); \ + rtrim(test_curr->failstr); \ + } \ + TEST_UNLOCK(); \ } while (0) -void test_conf_init (rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf, - int timeout); - - - +void test_conf_init(rd_kafka_conf_t **conf, + rd_kafka_topic_conf_t **topic_conf, + int timeout); +void test_msg_fmt(char *dest, + size_t dest_size, + uint64_t testid, + int32_t partition, + int msgid); +void test_msg_parse0(const char *func, + int line, + uint64_t testid, + rd_kafka_message_t *rkmessage, + int32_t exp_partition, + int *msgidp); +#define test_msg_parse(testid, rkmessage, exp_partition, msgidp) \ + test_msg_parse0(__FUNCTION__, __LINE__, testid, rkmessage, \ + exp_partition, msgidp) -void test_msg_fmt (char *dest, size_t dest_size, - uint64_t testid, int32_t partition, int msgid); -void test_msg_parse0 (const char *func, int line, - uint64_t testid, rd_kafka_message_t *rkmessage, - int32_t exp_partition, int *msgidp); -#define test_msg_parse(testid,rkmessage,exp_partition,msgidp) \ - test_msg_parse0(__FUNCTION__,__LINE__,\ - testid,rkmessage,exp_partition,msgidp) - -static RD_INLINE int jitter (int low, int high) RD_UNUSED; -static RD_INLINE int jitter (int low, int high) { - return (low + (rand() % ((high-low)+1))); +static RD_INLINE int jitter(int low, int high) RD_UNUSED; +static RD_INLINE int jitter(int low, int high) { + return (low + (rand() % ((high - low) + 1))); } @@ -266,10 +277,10 @@ static RD_INLINE int jitter (int low, int high) { /**************************************************************** - * Message verification services * - * * - * * - * * + * Message verification services * + * * + * * + * * ****************************************************************/ @@ -281,27 +292,27 @@ static RD_INLINE int jitter (int low, int high) { * - EOF */ struct test_msgver_s { - struct test_mv_p **p; /* Partitions array */ - int p_cnt; /* Partition count */ - int p_size; /* p size */ - int msgcnt; /* Total message count */ - uint64_t testid; /* Only accept messages for this testid */ - rd_bool_t ignore_eof; /* Don't end PARTITION_EOF messages */ + struct test_mv_p **p; /* Partitions array */ + int p_cnt; /* Partition count */ + int p_size; /* p size */ + int msgcnt; /* Total message count */ + uint64_t testid; /* Only accept messages for this testid */ + rd_bool_t ignore_eof; /* Don't end PARTITION_EOF messages */ - struct test_msgver_s *fwd; /* Also forward add_msg() to this mv */ + struct test_msgver_s *fwd; /* Also forward add_msg() to this mv */ - int log_cnt; /* Current number of warning logs */ - int log_max; /* Max warning logs before suppressing. */ - int log_suppr_cnt; /* Number of suppressed log messages. */ + int log_cnt; /* Current number of warning logs */ + int log_max; /* Max warning logs before suppressing. */ + int log_suppr_cnt; /* Number of suppressed log messages. */ const char *msgid_hdr; /**< msgid string is in header by this name, * rather than in the payload (default). */ -}; /* test_msgver_t; */ +}; /* test_msgver_t; */ /* Message */ struct test_mv_m { int64_t offset; /* Message offset */ - int msgid; /* Message id */ + int msgid; /* Message id */ int64_t timestamp; /* Message timestamp */ int32_t broker_id; /* Message broker id */ }; @@ -309,81 +320,92 @@ struct test_mv_m { /* Message vector */ struct test_mv_mvec { - struct test_mv_m *m; - int cnt; - int size; /* m[] size */ + struct test_mv_m *m; + int cnt; + int size; /* m[] size */ }; /* Partition */ struct test_mv_p { - char *topic; - int32_t partition; - struct test_mv_mvec mvec; - int64_t eof_offset; + char *topic; + int32_t partition; + struct test_mv_mvec mvec; + int64_t eof_offset; }; /* Verification state */ struct test_mv_vs { - int msg_base; - int exp_cnt; + int msg_base; + int exp_cnt; - /* used by verify_range */ - int msgid_min; - int msgid_max; + /* used by verify_range */ + int msgid_min; + int msgid_max; int64_t timestamp_min; int64_t timestamp_max; /* used by verify_broker_id */ int32_t broker_id; - struct test_mv_mvec mvec; + struct test_mv_mvec mvec; /* Correct msgver for comparison */ test_msgver_t *corr; }; -void test_msgver_init (test_msgver_t *mv, uint64_t testid); -void test_msgver_clear (test_msgver_t *mv); -void test_msgver_ignore_eof (test_msgver_t *mv); -int test_msgver_add_msg00 (const char *func, int line, const char *clientname, - test_msgver_t *mv, - uint64_t testid, - const char *topic, int32_t partition, - int64_t offset, int64_t timestamp, int32_t broker_id, - rd_kafka_resp_err_t err, int msgnum); -int test_msgver_add_msg0 (const char *func, int line, const char *clientname, +void test_msgver_init(test_msgver_t *mv, uint64_t testid); +void test_msgver_clear(test_msgver_t *mv); +void test_msgver_ignore_eof(test_msgver_t *mv); +int test_msgver_add_msg00(const char *func, + int line, + const char *clientname, test_msgver_t *mv, - const rd_kafka_message_t *rkmessage, - const char *override_topic); -#define test_msgver_add_msg(rk,mv,rkm) \ - test_msgver_add_msg0(__FUNCTION__,__LINE__, \ - rd_kafka_name(rk),mv,rkm,NULL) + uint64_t testid, + const char *topic, + int32_t partition, + int64_t offset, + int64_t timestamp, + int32_t broker_id, + rd_kafka_resp_err_t err, + int msgnum); +int test_msgver_add_msg0(const char *func, + int line, + const char *clientname, + test_msgver_t *mv, + const rd_kafka_message_t *rkmessage, + const char *override_topic); +#define test_msgver_add_msg(rk, mv, rkm) \ + test_msgver_add_msg0(__FUNCTION__, __LINE__, rd_kafka_name(rk), mv, \ + rkm, NULL) /** * Flags to indicate what to verify. */ -#define TEST_MSGVER_ORDER 0x1 /* Order */ -#define TEST_MSGVER_DUP 0x2 /* Duplicates */ -#define TEST_MSGVER_RANGE 0x4 /* Range of messages */ +#define TEST_MSGVER_ORDER 0x1 /* Order */ +#define TEST_MSGVER_DUP 0x2 /* Duplicates */ +#define TEST_MSGVER_RANGE 0x4 /* Range of messages */ -#define TEST_MSGVER_ALL 0xf /* All verifiers */ +#define TEST_MSGVER_ALL 0xf /* All verifiers */ -#define TEST_MSGVER_BY_MSGID 0x10000 /* Verify by msgid (unique in testid) */ -#define TEST_MSGVER_BY_OFFSET 0x20000 /* Verify by offset (unique in partition)*/ +#define TEST_MSGVER_BY_MSGID 0x10000 /* Verify by msgid (unique in testid) */ +#define TEST_MSGVER_BY_OFFSET \ + 0x20000 /* Verify by offset (unique in partition)*/ #define TEST_MSGVER_BY_TIMESTAMP 0x40000 /* Verify by timestamp range */ #define TEST_MSGVER_BY_BROKER_ID 0x80000 /* Verify by broker id */ -#define TEST_MSGVER_SUBSET 0x100000 /* verify_compare: allow correct mv to be - * a subset of mv. */ +#define TEST_MSGVER_SUBSET \ + 0x100000 /* verify_compare: allow correct mv to be \ + * a subset of mv. */ /* Only test per partition, not across all messages received on all partitions. * This is useful when doing incremental verifications with multiple partitions * and the total number of messages has not been received yet. * Can't do range check here since messages may be spread out on multiple * partitions and we might just have read a few partitions. */ -#define TEST_MSGVER_PER_PART ((TEST_MSGVER_ALL & ~TEST_MSGVER_RANGE) | \ - TEST_MSGVER_BY_MSGID | TEST_MSGVER_BY_OFFSET) +#define TEST_MSGVER_PER_PART \ + ((TEST_MSGVER_ALL & ~TEST_MSGVER_RANGE) | TEST_MSGVER_BY_MSGID | \ + TEST_MSGVER_BY_OFFSET) /* Test on all messages across all partitions. * This can only be used to check with msgid, not offset since that @@ -391,310 +413,395 @@ int test_msgver_add_msg0 (const char *func, int line, const char *clientname, #define TEST_MSGVER_ALL_PART (TEST_MSGVER_ALL | TEST_MSGVER_BY_MSGID) -int test_msgver_verify_part0 (const char *func, int line, const char *what, - test_msgver_t *mv, int flags, - const char *topic, int partition, - int msg_base, int exp_cnt); -#define test_msgver_verify_part(what,mv,flags,topic,partition,msg_base,exp_cnt) \ - test_msgver_verify_part0(__FUNCTION__,__LINE__, \ - what,mv,flags,topic,partition,msg_base,exp_cnt) - -int test_msgver_verify0 (const char *func, int line, const char *what, - test_msgver_t *mv, int flags, struct test_mv_vs vs); -#define test_msgver_verify(what,mv,flags,msgbase,expcnt) \ - test_msgver_verify0(__FUNCTION__,__LINE__, \ - what,mv,flags, \ - (struct test_mv_vs){.msg_base = msgbase, \ - .exp_cnt = expcnt}) - - -void test_msgver_verify_compare0 (const char *func, int line, - const char *what, test_msgver_t *mv, - test_msgver_t *corr, int flags); -#define test_msgver_verify_compare(what,mv,corr,flags) \ - test_msgver_verify_compare0(__FUNCTION__,__LINE__, what, mv, corr, flags) - -rd_kafka_t *test_create_handle (int mode, rd_kafka_conf_t *conf); +int test_msgver_verify_part0(const char *func, + int line, + const char *what, + test_msgver_t *mv, + int flags, + const char *topic, + int partition, + int msg_base, + int exp_cnt); +#define test_msgver_verify_part(what, mv, flags, topic, partition, msg_base, \ + exp_cnt) \ + test_msgver_verify_part0(__FUNCTION__, __LINE__, what, mv, flags, \ + topic, partition, msg_base, exp_cnt) + +int test_msgver_verify0(const char *func, + int line, + const char *what, + test_msgver_t *mv, + int flags, + struct test_mv_vs vs); +#define test_msgver_verify(what, mv, flags, msgbase, expcnt) \ + test_msgver_verify0( \ + __FUNCTION__, __LINE__, what, mv, flags, \ + (struct test_mv_vs) {.msg_base = msgbase, .exp_cnt = expcnt}) + + +void test_msgver_verify_compare0(const char *func, + int line, + const char *what, + test_msgver_t *mv, + test_msgver_t *corr, + int flags); +#define test_msgver_verify_compare(what, mv, corr, flags) \ + test_msgver_verify_compare0(__FUNCTION__, __LINE__, what, mv, corr, \ + flags) + +rd_kafka_t *test_create_handle(int mode, rd_kafka_conf_t *conf); /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -void test_dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque); - -rd_kafka_t *test_create_producer (void); -rd_kafka_topic_t *test_create_producer_topic(rd_kafka_t *rk, - const char *topic, ...); -void test_wait_delivery (rd_kafka_t *rk, int *msgcounterp); -void test_produce_msgs_nowait (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size, int msgrate, - int *msgcounterp); -void test_produce_msgs (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size); -void test_produce_msgs2 (rd_kafka_t *rk, const char *topic, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size); -void test_produce_msgs2_nowait (rd_kafka_t *rk, const char *topic, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size, - int *remainsp); -void test_produce_msgs_rate (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size, int msgrate); -rd_kafka_resp_err_t test_produce_sync (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition); - -void test_produce_msgs_easy_v (const char *topic, uint64_t testid, +void test_dr_msg_cb(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque); + +rd_kafka_t *test_create_producer(void); +rd_kafka_topic_t * +test_create_producer_topic(rd_kafka_t *rk, const char *topic, ...); +void test_wait_delivery(rd_kafka_t *rk, int *msgcounterp); +void test_produce_msgs_nowait(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size, + int msgrate, + int *msgcounterp); +void test_produce_msgs(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size); +void test_produce_msgs2(rd_kafka_t *rk, + const char *topic, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size); +void test_produce_msgs2_nowait(rd_kafka_t *rk, + const char *topic, + uint64_t testid, int32_t partition, - int msg_base, int cnt, size_t size, ...); -void test_produce_msgs_easy_multi (uint64_t testid, ...); - -void test_incremental_rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque); -void test_rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque); - -rd_kafka_t *test_create_consumer (const char *group_id, - void (*rebalance_cb) ( - rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t - *partitions, - void *opaque), - rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *default_topic_conf); -rd_kafka_topic_t *test_create_consumer_topic (rd_kafka_t *rk, - const char *topic); -rd_kafka_topic_t *test_create_topic_object (rd_kafka_t *rk, - const char *topic, ...); -void test_consumer_start (const char *what, - rd_kafka_topic_t *rkt, int32_t partition, - int64_t start_offset); -void test_consumer_stop (const char *what, - rd_kafka_topic_t *rkt, int32_t partition); -void test_consumer_seek (const char *what, rd_kafka_topic_t *rkt, - int32_t partition, int64_t offset); - -#define TEST_NO_SEEK -1 -int64_t test_consume_msgs (const char *what, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, int64_t offset, - int exp_msg_base, int exp_cnt, int parse_fmt); - - -void test_verify_rkmessage0 (const char *func, int line, - rd_kafka_message_t *rkmessage, uint64_t testid, - int32_t partition, int msgnum); -#define test_verify_rkmessage(rkmessage,testid,partition,msgnum) \ - test_verify_rkmessage0(__FUNCTION__,__LINE__,\ - rkmessage,testid,partition,msgnum) - -void test_consumer_subscribe (rd_kafka_t *rk, const char *topic); - -void -test_consume_msgs_easy_mv0 (const char *group_id, const char *topic, - rd_bool_t txn, + int msg_base, + int cnt, + const char *payload, + size_t size, + int *remainsp); +void test_produce_msgs_rate(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size, + int msgrate); +rd_kafka_resp_err_t test_produce_sync(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition); + +void test_produce_msgs_easy_v(const char *topic, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + size_t size, + ...); +void test_produce_msgs_easy_multi(uint64_t testid, ...); + +void test_incremental_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque); +void test_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque); + +rd_kafka_t *test_create_consumer( + const char *group_id, + void (*rebalance_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque), + rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *default_topic_conf); +rd_kafka_topic_t *test_create_consumer_topic(rd_kafka_t *rk, const char *topic); +rd_kafka_topic_t * +test_create_topic_object(rd_kafka_t *rk, const char *topic, ...); +void test_consumer_start(const char *what, + rd_kafka_topic_t *rkt, + int32_t partition, + int64_t start_offset); +void test_consumer_stop(const char *what, + rd_kafka_topic_t *rkt, + int32_t partition); +void test_consumer_seek(const char *what, + rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset); + +#define TEST_NO_SEEK -1 +int64_t test_consume_msgs(const char *what, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int64_t offset, + int exp_msg_base, + int exp_cnt, + int parse_fmt); + + +void test_verify_rkmessage0(const char *func, + int line, + rd_kafka_message_t *rkmessage, + uint64_t testid, int32_t partition, - uint64_t testid, int exp_eofcnt, int exp_msgcnt, - rd_kafka_topic_conf_t *tconf, - test_msgver_t *mv); - -#define test_consume_msgs_easy_mv(group_id,topic,partition,testid,exp_eofcnt,exp_msgcnt,tconf,mv) \ - test_consume_msgs_easy_mv0(group_id,topic,rd_false/*not-txn*/, \ - partition,testid,exp_eofcnt,exp_msgcnt, \ - tconf,mv) - -void -test_consume_msgs_easy (const char *group_id, const char *topic, - uint64_t testid, int exp_eofcnt, int exp_msgcnt, - rd_kafka_topic_conf_t *tconf); - -void -test_consume_txn_msgs_easy (const char *group_id, const char *topic, - uint64_t testid, int exp_eofcnt, int exp_msgcnt, + int msgnum); +#define test_verify_rkmessage(rkmessage, testid, partition, msgnum) \ + test_verify_rkmessage0(__FUNCTION__, __LINE__, rkmessage, testid, \ + partition, msgnum) + +void test_consumer_subscribe(rd_kafka_t *rk, const char *topic); + +void test_consume_msgs_easy_mv0(const char *group_id, + const char *topic, + rd_bool_t txn, + int32_t partition, + uint64_t testid, + int exp_eofcnt, + int exp_msgcnt, + rd_kafka_topic_conf_t *tconf, + test_msgver_t *mv); + +#define test_consume_msgs_easy_mv(group_id, topic, partition, testid, \ + exp_eofcnt, exp_msgcnt, tconf, mv) \ + test_consume_msgs_easy_mv0(group_id, topic, rd_false /*not-txn*/, \ + partition, testid, exp_eofcnt, exp_msgcnt, \ + tconf, mv) + +void test_consume_msgs_easy(const char *group_id, + const char *topic, + uint64_t testid, + int exp_eofcnt, + int exp_msgcnt, rd_kafka_topic_conf_t *tconf); -void test_consumer_poll_no_msgs (const char *what, rd_kafka_t *rk, - uint64_t testid, int timeout_ms); -void test_consumer_poll_expect_err (rd_kafka_t *rk, uint64_t testid, - int timeout_ms, rd_kafka_resp_err_t err); -int test_consumer_poll_once (rd_kafka_t *rk, test_msgver_t *mv, int timeout_ms); -int test_consumer_poll_exact (const char *what, rd_kafka_t *rk, uint64_t testid, - int exp_eof_cnt, int exp_msg_base, int exp_cnt, - rd_bool_t exact, test_msgver_t *mv); -int test_consumer_poll (const char *what, rd_kafka_t *rk, uint64_t testid, - int exp_eof_cnt, int exp_msg_base, int exp_cnt, - test_msgver_t *mv); - -void test_consumer_wait_assignment (rd_kafka_t *rk, rd_bool_t do_poll); -void test_consumer_verify_assignment0 (const char *func, int line, - rd_kafka_t *rk, - int fail_immediately, ...); -#define test_consumer_verify_assignment(rk,fail_immediately,...) \ - test_consumer_verify_assignment0(__FUNCTION__,__LINE__,rk, \ - fail_immediately,__VA_ARGS__) - -void test_consumer_assign (const char *what, rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *parts); -void test_consumer_incremental_assign (const char *what, rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *parts); -void test_consumer_unassign (const char *what, rd_kafka_t *rk); -void test_consumer_incremental_unassign (const char *what, rd_kafka_t *rk, - rd_kafka_topic_partition_list_t - *parts); -void test_consumer_assign_partition (const char *what, rd_kafka_t *rk, - const char *topic, int32_t partition, - int64_t offset); -void test_consumer_pause_resume_partition (rd_kafka_t *rk, - const char *topic, int32_t partition, - rd_bool_t pause); - -void test_consumer_close (rd_kafka_t *rk); - -void test_flush (rd_kafka_t *rk, int timeout_ms); - -void test_conf_set (rd_kafka_conf_t *conf, const char *name, const char *val); -char *test_topic_conf_get (const rd_kafka_topic_conf_t *tconf, - const char *name); -int test_conf_match (rd_kafka_conf_t *conf, const char *name, const char *val); -void test_topic_conf_set (rd_kafka_topic_conf_t *tconf, - const char *name, const char *val); -void test_any_conf_set (rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf, - const char *name, const char *val); - -void test_print_partition_list (const rd_kafka_topic_partition_list_t - *partitions); -int test_partition_list_cmp (rd_kafka_topic_partition_list_t *al, - rd_kafka_topic_partition_list_t *bl); - -void test_kafka_topics (const char *fmt, ...); -void test_create_topic (rd_kafka_t *use_rk, - const char *topicname, int partition_cnt, - int replication_factor); -rd_kafka_resp_err_t test_auto_create_topic_rkt (rd_kafka_t *rk, - rd_kafka_topic_t *rkt, - int timeout_ms); -rd_kafka_resp_err_t test_auto_create_topic (rd_kafka_t *rk, const char *name, - int timeout_ms); -int test_check_auto_create_topic (void); - -void test_create_partitions (rd_kafka_t *use_rk, - const char *topicname, int new_partition_cnt); - -int test_get_partition_count (rd_kafka_t *rk, const char *topicname, - int timeout_ms); - -char *tsprintf (const char *fmt, ...) RD_FORMAT(printf, 1, 2); - -void test_report_add (struct test *test, const char *fmt, ...); -int test_can_create_topics (int skip); - -rd_kafka_event_t *test_wait_event (rd_kafka_queue_t *eventq, - rd_kafka_event_type_t event_type, - int timeout_ms); - -void test_prepare_msg (uint64_t testid, int32_t partition, int msg_id, - char *val, size_t val_size, - char *key, size_t key_size); - -#if WITH_SOCKEM -void test_socket_enable (rd_kafka_conf_t *conf); -void test_socket_close_all (struct test *test, int reinit); -int test_socket_sockem_set_all (const char *key, int val); -void test_socket_sockem_set (int s, const char *key, int value); -#endif +void test_consume_txn_msgs_easy(const char *group_id, + const char *topic, + uint64_t testid, + int exp_eofcnt, + int exp_msgcnt, + rd_kafka_topic_conf_t *tconf); + +void test_consumer_poll_no_msgs(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int timeout_ms); +void test_consumer_poll_expect_err(rd_kafka_t *rk, + uint64_t testid, + int timeout_ms, + rd_kafka_resp_err_t err); +int test_consumer_poll_once(rd_kafka_t *rk, test_msgver_t *mv, int timeout_ms); +int test_consumer_poll_exact(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int exp_eof_cnt, + int exp_msg_base, + int exp_cnt, + rd_bool_t exact, + test_msgver_t *mv); +int test_consumer_poll(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int exp_eof_cnt, + int exp_msg_base, + int exp_cnt, + test_msgver_t *mv); + +void test_consumer_wait_assignment(rd_kafka_t *rk, rd_bool_t do_poll); +void test_consumer_verify_assignment0(const char *func, + int line, + rd_kafka_t *rk, + int fail_immediately, + ...); +#define test_consumer_verify_assignment(rk, fail_immediately, ...) \ + test_consumer_verify_assignment0(__FUNCTION__, __LINE__, rk, \ + fail_immediately, __VA_ARGS__) + +void test_consumer_assign(const char *what, + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *parts); +void test_consumer_incremental_assign(const char *what, + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *parts); +void test_consumer_unassign(const char *what, rd_kafka_t *rk); +void test_consumer_incremental_unassign(const char *what, + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *parts); +void test_consumer_assign_partition(const char *what, + rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t offset); +void test_consumer_pause_resume_partition(rd_kafka_t *rk, + const char *topic, + int32_t partition, + rd_bool_t pause); + +void test_consumer_close(rd_kafka_t *rk); + +void test_flush(rd_kafka_t *rk, int timeout_ms); + +void test_conf_set(rd_kafka_conf_t *conf, const char *name, const char *val); +char *test_topic_conf_get(const rd_kafka_topic_conf_t *tconf, const char *name); +int test_conf_match(rd_kafka_conf_t *conf, const char *name, const char *val); +void test_topic_conf_set(rd_kafka_topic_conf_t *tconf, + const char *name, + const char *val); +void test_any_conf_set(rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf, + const char *name, + const char *val); + +void test_print_partition_list( + const rd_kafka_topic_partition_list_t *partitions); +int test_partition_list_cmp(rd_kafka_topic_partition_list_t *al, + rd_kafka_topic_partition_list_t *bl); + +void test_kafka_topics(const char *fmt, ...); +void test_create_topic(rd_kafka_t *use_rk, + const char *topicname, + int partition_cnt, + int replication_factor); +rd_kafka_resp_err_t test_auto_create_topic_rkt(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + int timeout_ms); +rd_kafka_resp_err_t +test_auto_create_topic(rd_kafka_t *rk, const char *name, int timeout_ms); +int test_check_auto_create_topic(void); -void test_headers_dump (const char *what, int lvl, - const rd_kafka_headers_t *hdrs); +void test_create_partitions(rd_kafka_t *use_rk, + const char *topicname, + int new_partition_cnt); -int32_t *test_get_broker_ids (rd_kafka_t *use_rk, size_t *cntp); +int test_get_partition_count(rd_kafka_t *rk, + const char *topicname, + int timeout_ms); -void test_wait_metadata_update (rd_kafka_t *rk, - rd_kafka_metadata_topic_t *topics, - size_t topic_cnt, - rd_kafka_metadata_topic_t *not_topics, - size_t not_topic_cnt, - int tmout); +char *tsprintf(const char *fmt, ...) RD_FORMAT(printf, 1, 2); -rd_kafka_event_t * -test_wait_admin_result (rd_kafka_queue_t *q, - rd_kafka_event_type_t evtype, - int tmout); +void test_report_add(struct test *test, const char *fmt, ...); +int test_can_create_topics(int skip); -rd_kafka_resp_err_t -test_wait_topic_admin_result (rd_kafka_queue_t *q, - rd_kafka_event_type_t evtype, - rd_kafka_event_t **retevent, - int tmout); +rd_kafka_event_t *test_wait_event(rd_kafka_queue_t *eventq, + rd_kafka_event_type_t event_type, + int timeout_ms); -rd_kafka_resp_err_t -test_CreateTopics_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - char **topics, size_t topic_cnt, - int num_partitions, - void *opaque); -rd_kafka_resp_err_t -test_CreatePartitions_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - const char *topic, - size_t total_part_cnt, - void *opaque); +void test_prepare_msg(uint64_t testid, + int32_t partition, + int msg_id, + char *val, + size_t val_size, + char *key, + size_t key_size); -rd_kafka_resp_err_t -test_DeleteTopics_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - char **topics, size_t topic_cnt, - void *opaque); +#if WITH_SOCKEM +void test_socket_enable(rd_kafka_conf_t *conf); +void test_socket_close_all(struct test *test, int reinit); +int test_socket_sockem_set_all(const char *key, int val); +void test_socket_sockem_set(int s, const char *key, int value); +#endif -rd_kafka_resp_err_t -test_AlterConfigs_simple (rd_kafka_t *rk, - rd_kafka_ResourceType_t restype, - const char *resname, - const char **configs, size_t config_cnt); +void test_headers_dump(const char *what, + int lvl, + const rd_kafka_headers_t *hdrs); + +int32_t *test_get_broker_ids(rd_kafka_t *use_rk, size_t *cntp); + +void test_wait_metadata_update(rd_kafka_t *rk, + rd_kafka_metadata_topic_t *topics, + size_t topic_cnt, + rd_kafka_metadata_topic_t *not_topics, + size_t not_topic_cnt, + int tmout); + +rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q, + rd_kafka_event_type_t evtype, + int tmout); + +rd_kafka_resp_err_t test_wait_topic_admin_result(rd_kafka_queue_t *q, + rd_kafka_event_type_t evtype, + rd_kafka_event_t **retevent, + int tmout); + +rd_kafka_resp_err_t test_CreateTopics_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + char **topics, + size_t topic_cnt, + int num_partitions, + void *opaque); +rd_kafka_resp_err_t test_CreatePartitions_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + const char *topic, + size_t total_part_cnt, + void *opaque); + +rd_kafka_resp_err_t test_DeleteTopics_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + char **topics, + size_t topic_cnt, + void *opaque); + +rd_kafka_resp_err_t test_AlterConfigs_simple(rd_kafka_t *rk, + rd_kafka_ResourceType_t restype, + const char *resname, + const char **configs, + size_t config_cnt); + +rd_kafka_resp_err_t test_DeleteGroups_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + char **groups, + size_t group_cnt, + void *opaque); rd_kafka_resp_err_t -test_DeleteGroups_simple (rd_kafka_t *rk, +test_DeleteRecords_simple(rd_kafka_t *rk, rd_kafka_queue_t *useq, - char **groups, size_t group_cnt, + const rd_kafka_topic_partition_list_t *offsets, void *opaque); -rd_kafka_resp_err_t -test_DeleteRecords_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - const rd_kafka_topic_partition_list_t *offsets, - void *opaque); - -rd_kafka_resp_err_t -test_DeleteConsumerGroupOffsets_simple ( - rd_kafka_t *rk, - rd_kafka_queue_t *useq, - const char *group_id, - const rd_kafka_topic_partition_list_t *offsets, - void *opaque); +rd_kafka_resp_err_t test_DeleteConsumerGroupOffsets_simple( + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + const char *group_id, + const rd_kafka_topic_partition_list_t *offsets, + void *opaque); -rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms); +rd_kafka_resp_err_t test_delete_all_test_topics(int timeout_ms); -void test_mock_cluster_destroy (rd_kafka_mock_cluster_t *mcluster); -rd_kafka_mock_cluster_t *test_mock_cluster_new (int broker_cnt, - const char **bootstraps); +void test_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster); +rd_kafka_mock_cluster_t *test_mock_cluster_new(int broker_cnt, + const char **bootstraps); -int test_error_is_not_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason); +int test_error_is_not_fatal_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason); /** @@ -705,58 +812,60 @@ int test_error_is_not_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, * * @remark The trailing __ makes calling code easier to read. */ -#define TEST_CALL__(FUNC_W_ARGS) do { \ - test_timing_t _timing; \ - const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \ - rd_kafka_resp_err_t _err; \ - TIMING_START(&_timing, "%s", _desc); \ - TEST_SAYL(3, "Begin call %s\n", _desc); \ - _err = FUNC_W_ARGS; \ - TIMING_STOP(&_timing); \ - if (!_err) \ - break; \ - if (strstr(_desc, "errstr")) \ - TEST_FAIL("%s failed: %s: %s\n", \ - _desc, rd_kafka_err2name(_err), errstr); \ - else \ - TEST_FAIL("%s failed: %s\n", \ - _desc, rd_kafka_err2str(_err)); \ +#define TEST_CALL__(FUNC_W_ARGS) \ + do { \ + test_timing_t _timing; \ + const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \ + rd_kafka_resp_err_t _err; \ + TIMING_START(&_timing, "%s", _desc); \ + TEST_SAYL(3, "Begin call %s\n", _desc); \ + _err = FUNC_W_ARGS; \ + TIMING_STOP(&_timing); \ + if (!_err) \ + break; \ + if (strstr(_desc, "errstr")) \ + TEST_FAIL("%s failed: %s: %s\n", _desc, \ + rd_kafka_err2name(_err), errstr); \ + else \ + TEST_FAIL("%s failed: %s\n", _desc, \ + rd_kafka_err2str(_err)); \ } while (0) /** * @brief Same as TEST_CALL__() but expects an rd_kafka_error_t * return type. */ -#define TEST_CALL_ERROR__(FUNC_W_ARGS) do { \ - test_timing_t _timing; \ - const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \ - rd_kafka_error_t *_error; \ - TIMING_START(&_timing, "%s", _desc); \ - TEST_SAYL(3, "Begin call %s\n", _desc); \ - _error = FUNC_W_ARGS; \ - TIMING_STOP(&_timing); \ - if (!_error) \ - break; \ - TEST_FAIL("%s failed: %s\n", \ - _desc, rd_kafka_error_string(_error)); \ +#define TEST_CALL_ERROR__(FUNC_W_ARGS) \ + do { \ + test_timing_t _timing; \ + const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \ + rd_kafka_error_t *_error; \ + TIMING_START(&_timing, "%s", _desc); \ + TEST_SAYL(3, "Begin call %s\n", _desc); \ + _error = FUNC_W_ARGS; \ + TIMING_STOP(&_timing); \ + if (!_error) \ + break; \ + TEST_FAIL("%s failed: %s\n", _desc, \ + rd_kafka_error_string(_error)); \ } while (0) /** * @brief Same as TEST_CALL__() but expects an rd_kafka_resp_err_t return type * without errstr. */ -#define TEST_CALL_ERR__(FUNC_W_ARGS) do { \ - test_timing_t _timing; \ - const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \ - rd_kafka_resp_err_t _err; \ - TIMING_START(&_timing, "%s", _desc); \ - TEST_SAYL(3, "Begin call %s\n", _desc); \ - _err = FUNC_W_ARGS; \ - TIMING_STOP(&_timing); \ - if (!_err) \ - break; \ - TEST_FAIL("%s failed: %s\n", \ - _desc, rd_kafka_err2str(_err)); \ +#define TEST_CALL_ERR__(FUNC_W_ARGS) \ + do { \ + test_timing_t _timing; \ + const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \ + rd_kafka_resp_err_t _err; \ + TIMING_START(&_timing, "%s", _desc); \ + TEST_SAYL(3, "Begin call %s\n", _desc); \ + _err = FUNC_W_ARGS; \ + TIMING_STOP(&_timing); \ + if (!_err) \ + break; \ + TEST_FAIL("%s failed: %s\n", _desc, rd_kafka_err2str(_err)); \ } while (0) @@ -767,30 +876,30 @@ int test_error_is_not_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, * prior to the error details. E.g., "commit() returned: ". * A newline is automatically appended. */ -#define TEST_SAY_ERROR(ERROR,...) do { \ - rd_kafka_error_t *_e = (ERROR); \ - TEST_SAY(__VA_ARGS__); \ - if (!_e) { \ - TEST_SAY0("No error" _C_CLR "\n"); \ - break; \ - } \ - if (rd_kafka_error_is_fatal(_e)) \ - TEST_SAY0(_C_RED "FATAL "); \ - if (rd_kafka_error_is_retriable(_e)) \ - TEST_SAY0("Retriable "); \ - if (rd_kafka_error_txn_requires_abort(_e)) \ - TEST_SAY0("TxnRequiresAbort "); \ - TEST_SAY0("Error: %s: %s" _C_CLR "\n", \ - rd_kafka_error_name(_e), \ - rd_kafka_error_string(_e)); \ +#define TEST_SAY_ERROR(ERROR, ...) \ + do { \ + rd_kafka_error_t *_e = (ERROR); \ + TEST_SAY(__VA_ARGS__); \ + if (!_e) { \ + TEST_SAY0("No error" _C_CLR "\n"); \ + break; \ + } \ + if (rd_kafka_error_is_fatal(_e)) \ + TEST_SAY0(_C_RED "FATAL "); \ + if (rd_kafka_error_is_retriable(_e)) \ + TEST_SAY0("Retriable "); \ + if (rd_kafka_error_txn_requires_abort(_e)) \ + TEST_SAY0("TxnRequiresAbort "); \ + TEST_SAY0("Error: %s: %s" _C_CLR "\n", \ + rd_kafka_error_name(_e), rd_kafka_error_string(_e)); \ } while (0) /** * @name rusage.c * @{ */ -void test_rusage_start (struct test *test); -int test_rusage_stop (struct test *test, double duration); +void test_rusage_start(struct test *test); +int test_rusage_stop(struct test *test, double duration); /**@}*/ diff --git a/tests/testcpp.cpp b/tests/testcpp.cpp index 908bbf7b2d..e965e249f1 100644 --- a/tests/testcpp.cpp +++ b/tests/testcpp.cpp @@ -39,10 +39,10 @@ namespace Test { * @brief Read config file and populate config objects. * @returns 0 on success or -1 on error */ -static int read_config_file (std::string path, - RdKafka::Conf *conf, - RdKafka::Conf *topic_conf, - int *timeoutp) { +static int read_config_file(std::string path, + RdKafka::Conf *conf, + RdKafka::Conf *topic_conf, + int *timeoutp) { std::ifstream input(path.c_str(), std::ifstream::in); if (!input) @@ -54,8 +54,7 @@ static int read_config_file (std::string path, line.erase(0, line.find_first_not_of("\t ")); line.erase(line.find_last_not_of("\t ") + 1); - if (line.length() == 0 || - line.substr(0, 1) == "#") + if (line.length() == 0 || line.substr(0, 1) == "#") continue; size_t f = line.find("="); @@ -65,7 +64,7 @@ static int read_config_file (std::string path, } std::string n = line.substr(0, f); - std::string v = line.substr(f+1); + std::string v = line.substr(f + 1); std::string errstr; if (test_set_special_conf(n.c_str(), v.c_str(), timeoutp)) @@ -87,9 +86,7 @@ static int read_config_file (std::string path, return 0; } -void conf_init (RdKafka::Conf **conf, - RdKafka::Conf **topic_conf, - int timeout) { +void conf_init(RdKafka::Conf **conf, RdKafka::Conf **topic_conf, int timeout) { const char *tmp; if (conf) @@ -97,8 +94,7 @@ void conf_init (RdKafka::Conf **conf, if (topic_conf) *topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); - read_config_file(test_conf_get_path(), - conf ? *conf : NULL, + read_config_file(test_conf_get_path(), conf ? *conf : NULL, topic_conf ? *topic_conf : NULL, &timeout); std::string errstr; @@ -117,17 +113,14 @@ void conf_init (RdKafka::Conf **conf, } - void DeliveryReportCb::dr_cb (RdKafka::Message &msg) { - if (msg.err() != RdKafka::ERR_NO_ERROR) - Test::Fail(tostr() << "Delivery failed to " << - msg.topic_name() << " [" << msg.partition() << "]: " << - msg.errstr()); - else - Test::Say(3, tostr() << "Delivered to " << - msg.topic_name() << " [" << msg.partition() << "] @ " << - msg.offset() << " (timestamp " << msg.timestamp().timestamp << - ")\n"); - - - } -}; +void DeliveryReportCb::dr_cb(RdKafka::Message &msg) { + if (msg.err() != RdKafka::ERR_NO_ERROR) + Test::Fail(tostr() << "Delivery failed to " << msg.topic_name() << " [" + << msg.partition() << "]: " << msg.errstr()); + else + Test::Say(3, tostr() << "Delivered to " << msg.topic_name() << " [" + << msg.partition() << "] @ " << msg.offset() + << " (timestamp " << msg.timestamp().timestamp + << ")\n"); +} +}; // namespace Test diff --git a/tests/testcpp.h b/tests/testcpp.h index 8c77c6f05b..2ecaed3948 100644 --- a/tests/testcpp.h +++ b/tests/testcpp.h @@ -45,315 +45,316 @@ extern "C" { #include "testshared.h" } -// courtesy of http://stackoverview.blogspot.se/2011/04/create-string-on-fly-just-in-one-line.html +// courtesy of +// http://stackoverview.blogspot.se/2011/04/create-string-on-fly-just-in-one-line.html struct tostr { std::stringstream ss; - template - tostr & operator << (const T &data) - { + template + tostr &operator<<(const T &data) { ss << data; return *this; } - operator std::string() { return ss.str(); } + operator std::string() { + return ss.str(); + } }; -#define TestMessageVerify(testid,exp_partition,msgidp,msg) \ - test_msg_parse00(__FUNCTION__, __LINE__, testid, exp_partition, \ - msgidp, (msg)->topic_name().c_str(), \ - (msg)->partition(), (msg)->offset(), \ - (const char *)(msg)->key_pointer(), (msg)->key_len()) +#define TestMessageVerify(testid, exp_partition, msgidp, msg) \ + test_msg_parse00(__FUNCTION__, __LINE__, testid, exp_partition, msgidp, \ + (msg)->topic_name().c_str(), (msg)->partition(), \ + (msg)->offset(), (const char *)(msg)->key_pointer(), \ + (msg)->key_len()) namespace Test { - /** - * @brief Get test config object - */ - - static RD_UNUSED void Fail (std::string str) { - test_fail0(__FILE__, __LINE__, "", 1/*do-lock*/, 1/*now*/, - "%s", str.c_str()); - } - static RD_UNUSED void FailLater (std::string str) { - test_fail0(__FILE__, __LINE__, "", 1/*do-lock*/, 0/*later*/, - "%s", str.c_str()); - } - static RD_UNUSED void Skip (std::string str) { - test_SKIP(__FILE__, __LINE__, str.c_str()); - } - static RD_UNUSED void Say (int level, std::string str) { - test_SAY(__FILE__, __LINE__, level, str.c_str()); - } - static RD_UNUSED void Say (std::string str) { - Test::Say(2, str); - } - - /** - * @brief Generate test topic name - */ - static RD_UNUSED std::string mk_topic_name (std::string suffix, - bool randomized) { - return test_mk_topic_name(suffix.c_str(), - (int)randomized); - } - - /** - * @brief Generate random test group name - */ - static RD_UNUSED std::string mk_unique_group_name (std::string suffix) { - return test_mk_topic_name(suffix.c_str(), 1); - } +/** + * @brief Get test config object + */ - /** - * @brief Create partitions - */ - static RD_UNUSED void create_partitions (RdKafka::Handle *use_handle, const char *topicname, - int new_partition_cnt) { - rd_kafka_t *use_rk = NULL; - if (use_handle != NULL) - use_rk = use_handle->c_ptr(); - test_create_partitions(use_rk, topicname, new_partition_cnt); - } +static RD_UNUSED void Fail(std::string str) { + test_fail0(__FILE__, __LINE__, "", 1 /*do-lock*/, 1 /*now*/, "%s", + str.c_str()); +} +static RD_UNUSED void FailLater(std::string str) { + test_fail0(__FILE__, __LINE__, "", 1 /*do-lock*/, 0 /*later*/, "%s", + str.c_str()); +} +static RD_UNUSED void Skip(std::string str) { + test_SKIP(__FILE__, __LINE__, str.c_str()); +} +static RD_UNUSED void Say(int level, std::string str) { + test_SAY(__FILE__, __LINE__, level, str.c_str()); +} +static RD_UNUSED void Say(std::string str) { + Test::Say(2, str); +} - /** - * @brief Create a topic - */ - static RD_UNUSED void create_topic (RdKafka::Handle *use_handle, const char *topicname, - int partition_cnt, int replication_factor) { - rd_kafka_t *use_rk = NULL; - if (use_handle != NULL) - use_rk = use_handle->c_ptr(); - test_create_topic(use_rk, topicname, partition_cnt, replication_factor); - } +/** + * @brief Generate test topic name + */ +static RD_UNUSED std::string mk_topic_name(std::string suffix, + bool randomized) { + return test_mk_topic_name(suffix.c_str(), (int)randomized); +} - /** - * @brief Delete a topic - */ - static RD_UNUSED void delete_topic (RdKafka::Handle *use_handle, const char *topicname) { - rd_kafka_t *use_rk = NULL; - if (use_handle != NULL) - use_rk = use_handle->c_ptr(); - test_delete_topic(use_rk, topicname); - } +/** + * @brief Generate random test group name + */ +static RD_UNUSED std::string mk_unique_group_name(std::string suffix) { + return test_mk_topic_name(suffix.c_str(), 1); +} - /** - * @brief Get new configuration objects - */ - void conf_init (RdKafka::Conf **conf, - RdKafka::Conf **topic_conf, - int timeout); +/** + * @brief Create partitions + */ +static RD_UNUSED void create_partitions(RdKafka::Handle *use_handle, + const char *topicname, + int new_partition_cnt) { + rd_kafka_t *use_rk = NULL; + if (use_handle != NULL) + use_rk = use_handle->c_ptr(); + test_create_partitions(use_rk, topicname, new_partition_cnt); +} +/** + * @brief Create a topic + */ +static RD_UNUSED void create_topic(RdKafka::Handle *use_handle, + const char *topicname, + int partition_cnt, + int replication_factor) { + rd_kafka_t *use_rk = NULL; + if (use_handle != NULL) + use_rk = use_handle->c_ptr(); + test_create_topic(use_rk, topicname, partition_cnt, replication_factor); +} - static RD_UNUSED - void conf_set (RdKafka::Conf *conf, std::string name, std::string val) { - std::string errstr; - if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail("Conf failed: " + errstr); - } +/** + * @brief Delete a topic + */ +static RD_UNUSED void delete_topic(RdKafka::Handle *use_handle, + const char *topicname) { + rd_kafka_t *use_rk = NULL; + if (use_handle != NULL) + use_rk = use_handle->c_ptr(); + test_delete_topic(use_rk, topicname); +} - static RD_UNUSED - void print_TopicPartitions (std::string header, - const std::vector&partitions) { - Test::Say(tostr() << header << ": " << partitions.size() << - " TopicPartition(s):\n"); - for (unsigned int i = 0 ; i < partitions.size() ; i++) - Test::Say(tostr() << " " << partitions[i]->topic() << - "[" << partitions[i]->partition() << "] " << - "offset " << partitions[i]->offset() << - ": " << RdKafka::err2str(partitions[i]->err()) - << "\n"); - } +/** + * @brief Get new configuration objects + */ +void conf_init(RdKafka::Conf **conf, RdKafka::Conf **topic_conf, int timeout); - /* Convenience subscribe() */ - static RD_UNUSED void subscribe (RdKafka::KafkaConsumer *c, - const std::string &topic) { - Test::Say(c->name() + ": Subscribing to " + topic + "\n"); - std::vector topics; - topics.push_back(topic); - RdKafka::ErrorCode err; - if ((err = c->subscribe(topics))) - Test::Fail("Subscribe failed: " + RdKafka::err2str(err)); - } +static RD_UNUSED void conf_set(RdKafka::Conf *conf, + std::string name, + std::string val) { + std::string errstr; + if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) + Test::Fail("Conf failed: " + errstr); +} +static RD_UNUSED void print_TopicPartitions( + std::string header, + const std::vector &partitions) { + Test::Say(tostr() << header << ": " << partitions.size() + << " TopicPartition(s):\n"); + for (unsigned int i = 0; i < partitions.size(); i++) + Test::Say(tostr() << " " << partitions[i]->topic() << "[" + << partitions[i]->partition() << "] " + << "offset " << partitions[i]->offset() << ": " + << RdKafka::err2str(partitions[i]->err()) << "\n"); +} - /* Convenience subscribe() to two topics */ - static RD_UNUSED void subscribe (RdKafka::KafkaConsumer *c, - const std::string &topic1, - const std::string &topic2) { - Test::Say(c->name() + ": Subscribing to " + topic1 + " and " - + topic2 + "\n"); - std::vector topics; - topics.push_back(topic1); - topics.push_back(topic2); - RdKafka::ErrorCode err; - if ((err = c->subscribe(topics))) - Test::Fail("Subscribe failed: " + RdKafka::err2str(err)); - } - /* Convenience unsubscribe() */ - static RD_UNUSED void unsubscribe (RdKafka::KafkaConsumer *c) { - Test::Say(c->name() + ": Unsubscribing\n"); - RdKafka::ErrorCode err; - if ((err = c->unsubscribe())) - Test::Fail("Unsubscribe failed: " + RdKafka::err2str(err)); - } +/* Convenience subscribe() */ +static RD_UNUSED void subscribe(RdKafka::KafkaConsumer *c, + const std::string &topic) { + Test::Say(c->name() + ": Subscribing to " + topic + "\n"); + std::vector topics; + topics.push_back(topic); + RdKafka::ErrorCode err; + if ((err = c->subscribe(topics))) + Test::Fail("Subscribe failed: " + RdKafka::err2str(err)); +} - static RD_UNUSED void - incremental_assign (RdKafka::KafkaConsumer *c, - const std::vector &parts) { - Test::Say(tostr() << c->name() << - ": incremental assign of " << parts.size() << - " partition(s)\n"); - if (test_level >= 2) - print_TopicPartitions("incremental_assign()", parts); - RdKafka::Error *error; - if ((error = c->incremental_assign(parts))) - Test::Fail(c->name() + ": Incremental assign failed: " + error->str()); - } +/* Convenience subscribe() to two topics */ +static RD_UNUSED void subscribe(RdKafka::KafkaConsumer *c, + const std::string &topic1, + const std::string &topic2) { + Test::Say(c->name() + ": Subscribing to " + topic1 + " and " + topic2 + "\n"); + std::vector topics; + topics.push_back(topic1); + topics.push_back(topic2); + RdKafka::ErrorCode err; + if ((err = c->subscribe(topics))) + Test::Fail("Subscribe failed: " + RdKafka::err2str(err)); +} - static RD_UNUSED void - incremental_unassign (RdKafka::KafkaConsumer *c, - const std::vector &parts) { - Test::Say(tostr() << c->name() << - ": incremental unassign of " << parts.size() << - " partition(s)\n"); - if (test_level >= 2) - print_TopicPartitions("incremental_unassign()", parts); - RdKafka::Error *error; - if ((error = c->incremental_unassign(parts))) - Test::Fail(c->name() + ": Incremental unassign failed: " + error->str()); - } +/* Convenience unsubscribe() */ +static RD_UNUSED void unsubscribe(RdKafka::KafkaConsumer *c) { + Test::Say(c->name() + ": Unsubscribing\n"); + RdKafka::ErrorCode err; + if ((err = c->unsubscribe())) + Test::Fail("Unsubscribe failed: " + RdKafka::err2str(err)); +} - /** - * @brief Wait until the current assignment size is \p partition_count. - * If \p topic is not NULL, then additionally, each partition in - * the assignment must have topic \p topic. - */ - static RD_UNUSED void wait_for_assignment (RdKafka::KafkaConsumer *c, - size_t partition_count, - const std::string *topic) { - bool done = false; - while (!done) { - RdKafka::Message *msg1 = c->consume(500); - delete msg1; - - std::vector partitions; - c->assignment(partitions); - - if (partitions.size() == partition_count) { - done = true; - if (topic) { - for (size_t i = 0 ; i < partitions.size() ; i++) { - if (partitions[i]->topic() != *topic) { - done = false; - break; - } - } - } - } - RdKafka::TopicPartition::destroy(partitions); - } - } +static RD_UNUSED void incremental_assign( + RdKafka::KafkaConsumer *c, + const std::vector &parts) { + Test::Say(tostr() << c->name() << ": incremental assign of " << parts.size() + << " partition(s)\n"); + if (test_level >= 2) + print_TopicPartitions("incremental_assign()", parts); + RdKafka::Error *error; + if ((error = c->incremental_assign(parts))) + Test::Fail(c->name() + ": Incremental assign failed: " + error->str()); +} +static RD_UNUSED void incremental_unassign( + RdKafka::KafkaConsumer *c, + const std::vector &parts) { + Test::Say(tostr() << c->name() << ": incremental unassign of " << parts.size() + << " partition(s)\n"); + if (test_level >= 2) + print_TopicPartitions("incremental_unassign()", parts); + RdKafka::Error *error; + if ((error = c->incremental_unassign(parts))) + Test::Fail(c->name() + ": Incremental unassign failed: " + error->str()); +} - /** - * @brief Check current assignment has size \p partition_count - * If \p topic is not NULL, then additionally check that - * each partition in the assignment has topic \p topic. - */ - static RD_UNUSED void check_assignment (RdKafka::KafkaConsumer *c, +/** + * @brief Wait until the current assignment size is \p partition_count. + * If \p topic is not NULL, then additionally, each partition in + * the assignment must have topic \p topic. + */ +static RD_UNUSED void wait_for_assignment(RdKafka::KafkaConsumer *c, size_t partition_count, const std::string *topic) { - std::vector partitions; + bool done = false; + while (!done) { + RdKafka::Message *msg1 = c->consume(500); + delete msg1; + + std::vector partitions; c->assignment(partitions); - if (partition_count != partitions.size()) - Test::Fail(tostr() << "Expecting current assignment to have size " << partition_count << ", not: " << partitions.size()); - for (size_t i = 0 ; i < partitions.size() ; i++) { - if (topic != NULL) { - if (partitions[i]->topic() != *topic) - Test::Fail(tostr() << "Expecting assignment to be " << *topic << ", not " << partitions[i]->topic()); + + if (partitions.size() == partition_count) { + done = true; + if (topic) { + for (size_t i = 0; i < partitions.size(); i++) { + if (partitions[i]->topic() != *topic) { + done = false; + break; + } + } } - delete partitions[i]; } + + RdKafka::TopicPartition::destroy(partitions); } +} - /** - * @brief Current assignment partition count. If \p topic is - * NULL, then the total partition count, else the number - * of assigned partitions from \p topic. - */ - static RD_UNUSED size_t assignment_partition_count (RdKafka::KafkaConsumer *c, std::string *topic) { - std::vector partitions; - c->assignment(partitions); - int cnt = 0; - for (size_t i = 0 ; i < partitions.size() ; i++) { - if (topic == NULL || *topic == partitions[i]->topic()) - cnt++; - delete partitions[i]; +/** + * @brief Check current assignment has size \p partition_count + * If \p topic is not NULL, then additionally check that + * each partition in the assignment has topic \p topic. + */ +static RD_UNUSED void check_assignment(RdKafka::KafkaConsumer *c, + size_t partition_count, + const std::string *topic) { + std::vector partitions; + c->assignment(partitions); + if (partition_count != partitions.size()) + Test::Fail(tostr() << "Expecting current assignment to have size " + << partition_count << ", not: " << partitions.size()); + for (size_t i = 0; i < partitions.size(); i++) { + if (topic != NULL) { + if (partitions[i]->topic() != *topic) + Test::Fail(tostr() << "Expecting assignment to be " << *topic + << ", not " << partitions[i]->topic()); } - return cnt; + delete partitions[i]; } +} - /** - * @brief Poll the consumer once, discarding the returned message - * or error event. - * @returns true if a proper event/message was seen, or false on timeout. - */ - static RD_UNUSED bool poll_once (RdKafka::KafkaConsumer *c, - int timeout_ms) { - RdKafka::Message *msg = c->consume(timeout_ms); - bool ret = msg->err() != RdKafka::ERR__TIMED_OUT; - delete msg; - return ret; +/** + * @brief Current assignment partition count. If \p topic is + * NULL, then the total partition count, else the number + * of assigned partitions from \p topic. + */ +static RD_UNUSED size_t assignment_partition_count(RdKafka::KafkaConsumer *c, + std::string *topic) { + std::vector partitions; + c->assignment(partitions); + int cnt = 0; + for (size_t i = 0; i < partitions.size(); i++) { + if (topic == NULL || *topic == partitions[i]->topic()) + cnt++; + delete partitions[i]; } + return cnt; +} - /** - * @brief Produce \p msgcnt messages to \p topic \p partition. - */ - static RD_UNUSED void produce_msgs (RdKafka::Producer *p, - const std::string &topic, - int32_t partition, - int msgcnt, int msgsize, - bool flush) { - char *buf = (char *)malloc(msgsize); - - for (int i = 0 ; i < msgsize ; i++) - buf[i] = (char)((int)'a' + (i % 26)); - - for (int i = 0 ; i < msgcnt ; i++) { - RdKafka::ErrorCode err; - err = p->produce(topic, partition, - RdKafka::Producer::RK_MSG_COPY, - (void *)buf, (size_t)msgsize, - NULL, 0, 0, NULL); - TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str()); - p->poll(0); - } +/** + * @brief Poll the consumer once, discarding the returned message + * or error event. + * @returns true if a proper event/message was seen, or false on timeout. + */ +static RD_UNUSED bool poll_once(RdKafka::KafkaConsumer *c, int timeout_ms) { + RdKafka::Message *msg = c->consume(timeout_ms); + bool ret = msg->err() != RdKafka::ERR__TIMED_OUT; + delete msg; + return ret; +} - free(buf); - if (flush) - p->flush(10*1000); +/** + * @brief Produce \p msgcnt messages to \p topic \p partition. + */ +static RD_UNUSED void produce_msgs(RdKafka::Producer *p, + const std::string &topic, + int32_t partition, + int msgcnt, + int msgsize, + bool flush) { + char *buf = (char *)malloc(msgsize); + + for (int i = 0; i < msgsize; i++) + buf[i] = (char)((int)'a' + (i % 26)); + + for (int i = 0; i < msgcnt; i++) { + RdKafka::ErrorCode err; + err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, + (void *)buf, (size_t)msgsize, NULL, 0, 0, NULL); + TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str()); + p->poll(0); } + free(buf); + if (flush) + p->flush(10 * 1000); +} - /** - * @brief Delivery report class - */ - class DeliveryReportCb : public RdKafka::DeliveryReportCb { - public: - void dr_cb (RdKafka::Message &msg); - }; - static DeliveryReportCb DrCb; + +/** + * @brief Delivery report class + */ +class DeliveryReportCb : public RdKafka::DeliveryReportCb { + public: + void dr_cb(RdKafka::Message &msg); }; +static DeliveryReportCb DrCb; +}; // namespace Test + #endif /* _TESTCPP_H_ */ diff --git a/tests/testshared.h b/tests/testshared.h index 505df5fa65..b54af26c1c 100644 --- a/tests/testshared.h +++ b/tests/testshared.h @@ -1,30 +1,30 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2012-2015, Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2015, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ #ifndef _TESTSHARED_H_ #define _TESTSHARED_H_ @@ -54,142 +54,170 @@ extern int test_level; extern char test_scenario[64]; /** @returns the \p msecs timeout multiplied by the test timeout multiplier */ -extern int tmout_multip (int msecs); +extern int tmout_multip(int msecs); /** @brief true if tests should run in quick-mode (faster, less data) */ extern int test_quick; /** @brief Broker version to int */ -#define TEST_BRKVER(A,B,C,D) \ - (((A) << 24) | ((B) << 16) | ((C) << 8) | (D)) +#define TEST_BRKVER(A, B, C, D) (((A) << 24) | ((B) << 16) | ((C) << 8) | (D)) /** @brief return single version component from int */ -#define TEST_BRKVER_X(V,I) \ - (((V) >> (24-((I)*8))) & 0xff) +#define TEST_BRKVER_X(V, I) (((V) >> (24 - ((I)*8))) & 0xff) /** @brief Topic Admin API supported by this broker version and later */ -#define TEST_BRKVER_TOPIC_ADMINAPI TEST_BRKVER(0,10,2,0) +#define TEST_BRKVER_TOPIC_ADMINAPI TEST_BRKVER(0, 10, 2, 0) extern int test_broker_version; extern int test_on_ci; -const char *test_mk_topic_name (const char *suffix, int randomized); +const char *test_mk_topic_name(const char *suffix, int randomized); -void test_delete_topic (rd_kafka_t *use_rk, const char *topicname); +void test_delete_topic(rd_kafka_t *use_rk, const char *topicname); -void test_create_topic (rd_kafka_t *use_rk, const char *topicname, - int partition_cnt, int replication_factor); +void test_create_topic(rd_kafka_t *use_rk, + const char *topicname, + int partition_cnt, + int replication_factor); -void test_create_partitions (rd_kafka_t *use_rk, const char *topicname, - int new_partition_cnt); +void test_create_partitions(rd_kafka_t *use_rk, + const char *topicname, + int new_partition_cnt); -void test_wait_topic_exists (rd_kafka_t *rk, const char *topic, int tmout); +void test_wait_topic_exists(rd_kafka_t *rk, const char *topic, int tmout); -void test_kafka_cmd (const char *fmt, ...); +void test_kafka_cmd(const char *fmt, ...); -uint64_t -test_produce_msgs_easy_size (const char *topic, uint64_t testid, - int32_t partition, int msgcnt, size_t size); -#define test_produce_msgs_easy(topic,testid,partition,msgcnt) \ - test_produce_msgs_easy_size(topic,testid,partition,msgcnt,0) +uint64_t test_produce_msgs_easy_size(const char *topic, + uint64_t testid, + int32_t partition, + int msgcnt, + size_t size); +#define test_produce_msgs_easy(topic, testid, partition, msgcnt) \ + test_produce_msgs_easy_size(topic, testid, partition, msgcnt, 0) -void test_fail0 (const char *file, int line, const char *function, - int do_lock, int fail_now, const char *fmt, ...) - RD_FORMAT(printf, 6, 7); +void test_fail0(const char *file, + int line, + const char *function, + int do_lock, + int fail_now, + const char *fmt, + ...) RD_FORMAT(printf, 6, 7); -void test_fail0 (const char *file, int line, const char *function, - int do_lock, int fail_now, const char *fmt, ...) - RD_FORMAT(printf, 6, 7); +void test_fail0(const char *file, + int line, + const char *function, + int do_lock, + int fail_now, + const char *fmt, + ...) RD_FORMAT(printf, 6, 7); -#define TEST_FAIL0(file,line,do_lock,fail_now,...) \ - test_fail0(__FILE__, __LINE__, __FUNCTION__, \ - do_lock, fail_now, __VA_ARGS__) +#define TEST_FAIL0(file, line, do_lock, fail_now, ...) \ + test_fail0(__FILE__, __LINE__, __FUNCTION__, do_lock, fail_now, \ + __VA_ARGS__) /* Whine and abort test */ -#define TEST_FAIL(...) TEST_FAIL0(__FILE__,__LINE__,1,1,__VA_ARGS__) +#define TEST_FAIL(...) TEST_FAIL0(__FILE__, __LINE__, 1, 1, __VA_ARGS__) /* Whine right away, mark the test as failed, but continue the test. */ -#define TEST_FAIL_LATER(...) TEST_FAIL0(__FILE__,__LINE__,1,0,__VA_ARGS__) +#define TEST_FAIL_LATER(...) TEST_FAIL0(__FILE__, __LINE__, 1, 0, __VA_ARGS__) /* Whine right away, maybe mark the test as failed, but continue the test. */ -#define TEST_FAIL_LATER0(LATER,...) TEST_FAIL0(__FILE__,__LINE__,1,!(LATER),__VA_ARGS__) +#define TEST_FAIL_LATER0(LATER, ...) \ + TEST_FAIL0(__FILE__, __LINE__, 1, !(LATER), __VA_ARGS__) + +#define TEST_FAILCNT() (test_curr->failcnt) -#define TEST_FAILCNT() (test_curr->failcnt) +#define TEST_LATER_CHECK(...) \ + do { \ + if (test_curr->state == TEST_FAILED) \ + TEST_FAIL("See previous errors. " __VA_ARGS__); \ + } while (0) -#define TEST_LATER_CHECK(...) do { \ - if (test_curr->state == TEST_FAILED) \ - TEST_FAIL("See previous errors. " __VA_ARGS__); \ +#define TEST_PERROR(call) \ + do { \ + if (!(call)) \ + TEST_FAIL(#call " failed: %s", rd_strerror(errno)); \ } while (0) -#define TEST_PERROR(call) do { \ - if (!(call)) \ - TEST_FAIL(#call " failed: %s", rd_strerror(errno)); \ - } while (0) - -#define TEST_WARN(...) do { \ - fprintf(stderr, "\033[33m[%-28s/%7.3fs] WARN: ", \ - test_curr->name, \ - test_curr->start ? \ - ((float)(test_clock() - \ - test_curr->start)/1000000.0f) : 0); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\033[0m"); \ - } while (0) +#define TEST_WARN(...) \ + do { \ + fprintf(stderr, \ + "\033[33m[%-28s/%7.3fs] WARN: ", test_curr->name, \ + test_curr->start \ + ? ((float)(test_clock() - test_curr->start) / \ + 1000000.0f) \ + : 0); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\033[0m"); \ + } while (0) /* "..." is a failure reason in printf format, include as much info as needed */ -#define TEST_ASSERT(expr,...) do { \ - if (!(expr)) { \ - TEST_FAIL("Test assertion failed: \"" # expr "\": " \ - __VA_ARGS__); \ - } \ +#define TEST_ASSERT(expr, ...) \ + do { \ + if (!(expr)) { \ + TEST_FAIL("Test assertion failed: \"" #expr \ + "\": " __VA_ARGS__); \ + } \ } while (0) /* "..." is a failure reason in printf format, include as much info as needed */ -#define TEST_ASSERT_LATER(expr,...) do { \ - if (!(expr)) { \ - TEST_FAIL0(__FILE__, __LINE__, 1, 0, \ - "Test assertion failed: \"" # expr "\": " \ - __VA_ARGS__); \ - } \ +#define TEST_ASSERT_LATER(expr, ...) \ + do { \ + if (!(expr)) { \ + TEST_FAIL0(__FILE__, __LINE__, 1, 0, \ + "Test assertion failed: \"" #expr \ + "\": " __VA_ARGS__); \ + } \ } while (0) -void test_SAY (const char *file, int line, int level, const char *str); -void test_SKIP (const char *file, int line, const char *str); +void test_SAY(const char *file, int line, int level, const char *str); +void test_SKIP(const char *file, int line, const char *str); -void test_timeout_set (int timeout); -int test_set_special_conf (const char *name, const char *val, int *timeoutp); -char *test_conf_get (const rd_kafka_conf_t *conf, const char *name); -const char *test_conf_get_path (void); -const char *test_getenv (const char *env, const char *def); +void test_timeout_set(int timeout); +int test_set_special_conf(const char *name, const char *val, int *timeoutp); +char *test_conf_get(const rd_kafka_conf_t *conf, const char *name); +const char *test_conf_get_path(void); +const char *test_getenv(const char *env, const char *def); -int test_needs_auth (void); +int test_needs_auth(void); -uint64_t test_id_generate (void); -char *test_str_id_generate (char *dest, size_t dest_size); -const char *test_str_id_generate_tmp (void); +uint64_t test_id_generate(void); +char *test_str_id_generate(char *dest, size_t dest_size); +const char *test_str_id_generate_tmp(void); -void test_prepare_msg (uint64_t testid, int32_t partition, int msg_id, - char *val, size_t val_size, - char *key, size_t key_size); +void test_prepare_msg(uint64_t testid, + int32_t partition, + int msg_id, + char *val, + size_t val_size, + char *key, + size_t key_size); /** * Parse a message token */ -void test_msg_parse00 (const char *func, int line, - uint64_t testid, int32_t exp_partition, int *msgidp, - const char *topic, int32_t partition, int64_t offset, - const char *key, size_t key_size); +void test_msg_parse00(const char *func, + int line, + uint64_t testid, + int32_t exp_partition, + int *msgidp, + const char *topic, + int32_t partition, + int64_t offset, + const char *key, + size_t key_size); -int test_check_builtin (const char *feature); +int test_check_builtin(const char *feature); /** * @returns the current test's name (thread-local) */ -extern const char *test_curr_name (void); +extern const char *test_curr_name(void); #ifndef _WIN32 #include @@ -209,14 +237,14 @@ extern const char *test_curr_name (void); /** -* A microsecond monotonic clock -*/ -static RD_INLINE int64_t test_clock (void) + * A microsecond monotonic clock + */ +static RD_INLINE int64_t test_clock(void) #ifndef _MSC_VER -__attribute__((unused)) + __attribute__((unused)) #endif -; -static RD_INLINE int64_t test_clock (void) { + ; +static RD_INLINE int64_t test_clock(void) { #ifdef __APPLE__ /* No monotonic clock on Darwin */ struct timeval tv; @@ -233,7 +261,7 @@ static RD_INLINE int64_t test_clock (void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return ((int64_t)ts.tv_sec * 1000000LLU) + - ((int64_t)ts.tv_nsec / 1000LLU); + ((int64_t)ts.tv_nsec / 1000LLU); #endif } @@ -248,66 +276,76 @@ typedef struct test_timing_s { /** * @brief Start timing, Va-Argument is textual name (printf format) */ -#define TIMING_RESTART(TIMING) do { \ - (TIMING)->ts_start = test_clock(); \ - (TIMING)->duration = 0; \ +#define TIMING_RESTART(TIMING) \ + do { \ + (TIMING)->ts_start = test_clock(); \ + (TIMING)->duration = 0; \ } while (0) -#define TIMING_START(TIMING,...) do { \ - rd_snprintf((TIMING)->name, sizeof((TIMING)->name), __VA_ARGS__); \ - TIMING_RESTART(TIMING); \ - (TIMING)->ts_every = (TIMING)->ts_start; \ +#define TIMING_START(TIMING, ...) \ + do { \ + rd_snprintf((TIMING)->name, sizeof((TIMING)->name), \ + __VA_ARGS__); \ + TIMING_RESTART(TIMING); \ + (TIMING)->ts_every = (TIMING)->ts_start; \ } while (0) #define TIMING_STOPPED(TIMING) ((TIMING)->duration != 0) #ifndef __cplusplus -#define TIMING_STOP(TIMING) do { \ - (TIMING)->duration = test_clock() - (TIMING)->ts_start; \ - TEST_SAY("%s: duration %.3fms\n", \ - (TIMING)->name, (float)(TIMING)->duration / 1000.0f); \ +#define TIMING_STOP(TIMING) \ + do { \ + (TIMING)->duration = test_clock() - (TIMING)->ts_start; \ + TEST_SAY("%s: duration %.3fms\n", (TIMING)->name, \ + (float)(TIMING)->duration / 1000.0f); \ } while (0) -#define TIMING_REPORT(TIMING) \ - TEST_SAY("%s: duration %.3fms\n", \ - (TIMING)->name, (float)(TIMING)->duration / 1000.0f); \ +#define TIMING_REPORT(TIMING) \ + TEST_SAY("%s: duration %.3fms\n", (TIMING)->name, \ + (float)(TIMING)->duration / 1000.0f); #else -#define TIMING_STOP(TIMING) do { \ - char _str[512]; \ - (TIMING)->duration = test_clock() - (TIMING)->ts_start; \ - rd_snprintf(_str, sizeof(_str), "%s: duration %.3fms\n", \ - (TIMING)->name, (float)(TIMING)->duration / 1000.0f); \ - Test::Say(_str); \ +#define TIMING_STOP(TIMING) \ + do { \ + char _str[512]; \ + (TIMING)->duration = test_clock() - (TIMING)->ts_start; \ + rd_snprintf(_str, sizeof(_str), "%s: duration %.3fms\n", \ + (TIMING)->name, \ + (float)(TIMING)->duration / 1000.0f); \ + Test::Say(_str); \ } while (0) #endif -#define TIMING_DURATION(TIMING) ((TIMING)->duration ? (TIMING)->duration : \ - (test_clock() - (TIMING)->ts_start)) - -#define TIMING_ASSERT0(TIMING,DO_FAIL_LATER,TMIN_MS,TMAX_MS) do { \ - if (!TIMING_STOPPED(TIMING)) \ - TIMING_STOP(TIMING); \ - int _dur_ms = (int)TIMING_DURATION(TIMING) / 1000; \ - if (TMIN_MS <= _dur_ms && _dur_ms <= TMAX_MS) \ - break; \ - if (test_on_ci || strcmp(test_mode, "bare")) \ - TEST_WARN("%s: expected duration %d <= %d <= %d ms%s\n", \ - (TIMING)->name, TMIN_MS, _dur_ms, TMAX_MS, \ - ": not FAILING test on CI"); \ - else \ - TEST_FAIL_LATER0(DO_FAIL_LATER, \ - "%s: expected duration %d <= %d <= %d ms", \ - (TIMING)->name, TMIN_MS, _dur_ms, TMAX_MS); \ +#define TIMING_DURATION(TIMING) \ + ((TIMING)->duration ? (TIMING)->duration \ + : (test_clock() - (TIMING)->ts_start)) + +#define TIMING_ASSERT0(TIMING, DO_FAIL_LATER, TMIN_MS, TMAX_MS) \ + do { \ + if (!TIMING_STOPPED(TIMING)) \ + TIMING_STOP(TIMING); \ + int _dur_ms = (int)TIMING_DURATION(TIMING) / 1000; \ + if (TMIN_MS <= _dur_ms && _dur_ms <= TMAX_MS) \ + break; \ + if (test_on_ci || strcmp(test_mode, "bare")) \ + TEST_WARN( \ + "%s: expected duration %d <= %d <= %d ms%s\n", \ + (TIMING)->name, TMIN_MS, _dur_ms, TMAX_MS, \ + ": not FAILING test on CI"); \ + else \ + TEST_FAIL_LATER0( \ + DO_FAIL_LATER, \ + "%s: expected duration %d <= %d <= %d ms", \ + (TIMING)->name, TMIN_MS, _dur_ms, TMAX_MS); \ } while (0) -#define TIMING_ASSERT(TIMING,TMIN_MS,TMAX_MS) \ - TIMING_ASSERT0(TIMING,0,TMIN_MS,TMAX_MS) -#define TIMING_ASSERT_LATER(TIMING,TMIN_MS,TMAX_MS) \ - TIMING_ASSERT0(TIMING,1,TMIN_MS,TMAX_MS) +#define TIMING_ASSERT(TIMING, TMIN_MS, TMAX_MS) \ + TIMING_ASSERT0(TIMING, 0, TMIN_MS, TMAX_MS) +#define TIMING_ASSERT_LATER(TIMING, TMIN_MS, TMAX_MS) \ + TIMING_ASSERT0(TIMING, 1, TMIN_MS, TMAX_MS) /* Trigger something every US microseconds. */ -static RD_UNUSED int TIMING_EVERY (test_timing_t *timing, int us) { +static RD_UNUSED int TIMING_EVERY(test_timing_t *timing, int us) { int64_t now = test_clock(); if (timing->ts_every + us <= now) { timing->ts_every = now; @@ -320,23 +358,28 @@ static RD_UNUSED int TIMING_EVERY (test_timing_t *timing, int us) { /** * Sub-tests */ -int test_sub_start (const char *func, int line, int is_quick, - const char *fmt, ...); -void test_sub_pass (void); -void test_sub_skip (const char *fmt, ...); - -#define SUB_TEST0(IS_QUICK,...) do { \ - if (!test_sub_start(__FUNCTION__, __LINE__, \ - IS_QUICK, __VA_ARGS__)) \ - return; \ +int test_sub_start(const char *func, + int line, + int is_quick, + const char *fmt, + ...); +void test_sub_pass(void); +void test_sub_skip(const char *fmt, ...); + +#define SUB_TEST0(IS_QUICK, ...) \ + do { \ + if (!test_sub_start(__FUNCTION__, __LINE__, IS_QUICK, \ + __VA_ARGS__)) \ + return; \ } while (0) -#define SUB_TEST(...) SUB_TEST0(0, "" __VA_ARGS__) +#define SUB_TEST(...) SUB_TEST0(0, "" __VA_ARGS__) #define SUB_TEST_QUICK(...) SUB_TEST0(1, "" __VA_ARGS__) -#define SUB_TEST_PASS() test_sub_pass() -#define SUB_TEST_SKIP(...) do { \ - test_sub_skip(__VA_ARGS__); \ - return; \ +#define SUB_TEST_PASS() test_sub_pass() +#define SUB_TEST_SKIP(...) \ + do { \ + test_sub_skip(__VA_ARGS__); \ + return; \ } while (0) @@ -348,12 +391,12 @@ void test_sub_skip (const char *fmt, ...); /* Make sure __SANITIZE_ADDRESS__ (gcc) is defined if compiled with asan */ #if !defined(__SANITIZE_ADDRESS__) && defined(__has_feature) - #if __has_feature(address_sanitizer) - #define __SANITIZE_ADDRESS__ 1 - #endif +#if __has_feature(address_sanitizer) +#define __SANITIZE_ADDRESS__ 1 +#endif #endif -int test_run_java (const char *cls, const char **argv); -int test_waitpid (int pid); +int test_run_java(const char *cls, const char **argv); +int test_waitpid(int pid); #endif /* _TESTSHARED_H_ */ diff --git a/tests/tools/stats/graph.py b/tests/tools/stats/graph.py index a4f454305c..3eeaa1541a 100755 --- a/tests/tools/stats/graph.py +++ b/tests/tools/stats/graph.py @@ -145,6 +145,6 @@ grid = [] for i in range(0, len(plots), args.chart_cols): - grid.append(plots[i:i+args.chart_cols]) + grid.append(plots[i:i + args.chart_cols]) pandas_bokeh.plot_grid(grid) diff --git a/tests/xxxx-assign_partition.c b/tests/xxxx-assign_partition.c index 451fa1ee10..18431ba723 100644 --- a/tests/xxxx-assign_partition.c +++ b/tests/xxxx-assign_partition.c @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -38,60 +38,60 @@ */ -int main_0016_assign_partition (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - rd_kafka_t *rk_p, *rk_c; +int main_0016_assign_partition(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_t *rk_p, *rk_c; rd_kafka_topic_t *rkt_p; - int msg_cnt = 1000; - int msg_base = 0; + int msg_cnt = 1000; + int msg_base = 0; int partition_cnt = 2; int partition; - uint64_t testid; + uint64_t testid; rd_kafka_topic_conf_t *default_topic_conf; - rd_kafka_topic_partition_list_t *partitions; - char errstr[512]; + rd_kafka_topic_partition_list_t *partitions; + char errstr[512]; - testid = test_id_generate(); + testid = test_id_generate(); - /* Produce messages */ - rk_p = test_create_producer(); - rkt_p = test_create_producer_topic(rk_p, topic, NULL); + /* Produce messages */ + rk_p = test_create_producer(); + rkt_p = test_create_producer_topic(rk_p, topic, NULL); - for (partition = 0 ; partition < partition_cnt ; partition++) { + for (partition = 0; partition < partition_cnt; partition++) { test_produce_msgs(rk_p, rkt_p, testid, partition, - msg_base+(partition*msg_cnt), msg_cnt, - NULL, 0); + msg_base + (partition * msg_cnt), msg_cnt, + NULL, 0); } - rd_kafka_topic_destroy(rkt_p); - rd_kafka_destroy(rk_p); + rd_kafka_topic_destroy(rkt_p); + rd_kafka_destroy(rk_p); test_conf_init(NULL, &default_topic_conf, 0); if (rd_kafka_topic_conf_set(default_topic_conf, "auto.offset.reset", - "smallest", errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) - TEST_FAIL("%s\n", errstr); + "smallest", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) + TEST_FAIL("%s\n", errstr); - rk_c = test_create_consumer(topic/*group_id*/, NULL, - default_topic_conf); + rk_c = + test_create_consumer(topic /*group_id*/, NULL, default_topic_conf); - /* Fill in partition set */ - partitions = rd_kafka_topic_partition_list_new(partition_cnt); + /* Fill in partition set */ + partitions = rd_kafka_topic_partition_list_new(partition_cnt); - for (partition = 0 ; partition < partition_cnt ; partition++) - rd_kafka_topic_partition_list_add(partitions, topic, partition); + for (partition = 0; partition < partition_cnt; partition++) + rd_kafka_topic_partition_list_add(partitions, topic, partition); - test_consumer_assign("assign.partition", rk_c, partitions); + test_consumer_assign("assign.partition", rk_c, partitions); - /* Make sure all messages are available */ - test_consumer_poll("verify.all", rk_c, testid, partition_cnt, - msg_base, partition_cnt * msg_cnt); + /* Make sure all messages are available */ + test_consumer_poll("verify.all", rk_c, testid, partition_cnt, msg_base, + partition_cnt * msg_cnt); /* Stop assignments */ - test_consumer_unassign("unassign.partitions", rk_c); + test_consumer_unassign("unassign.partitions", rk_c); -#if 0 // FIXME when get_offset() is functional +#if 0 // FIXME when get_offset() is functional /* Acquire stored offsets */ for (partition = 0 ; partition < partition_cnt ; partition++) { rd_kafka_resp_err_t err; @@ -116,7 +116,7 @@ int main_0016_assign_partition (int argc, char **argv) { #endif test_consumer_close(rk_c); - rd_kafka_destroy(rk_c); + rd_kafka_destroy(rk_c); - return 0; + return 0; } diff --git a/tests/xxxx-metadata.cpp b/tests/xxxx-metadata.cpp index a751f46546..00c31bc824 100644 --- a/tests/xxxx-metadata.cpp +++ b/tests/xxxx-metadata.cpp @@ -3,35 +3,37 @@ * * Copyright (c) 2012-2014, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** - * - Generate unique topic name (there is a C function for that in test.h wihch you should use) + * - Generate unique topic name (there is a C function for that in test.h wihch + * you should use) * - Query metadata for that topic * - Wait one second * - Query again, it should now have isrs and everything - * Note: The test require auto.create.topics.enable = true in kafka server properties. + * Note: The test require auto.create.topics.enable = true in kafka server + * properties. */ @@ -49,108 +51,109 @@ extern "C" { /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafkacpp.h" /* for Kafka driver */ +#include "rdkafkacpp.h" /* for Kafka driver */ /** - * Generate unique topic name (there is a C function for that in test.h wihch you should use) - * Query metadata for that topic - * Wait one second - * Query again, it should now have isrs and everything + * Generate unique topic name (there is a C function for that in test.h wihch + * you should use) Query metadata for that topic Wait one second Query again, it + * should now have isrs and everything */ -static void test_metadata_cpp (void) { - RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); /* @TODO: Do we need to merge with C test_conf_init()? */ - RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); /* @TODO: Same of prev */ - - RdKafka::Metadata *metadata; - RdKafka::ErrorCode err; - int msgcnt = test_on_ci ? 1000 : 10000; - int partition_cnt = 2; - int i; - uint64_t testid; - int msg_base = 0; - std::string errstr; - const char *topic_str = test_mk_topic_name("0013", 1); -/* if(!topic){ - TEST_FAIL() - }*/ - - //const RdKafka::Conf::ConfResult confResult = conf->set("debug","all",errstr); - //if(confResult != RdKafka::Conf::CONF_OK){ - // std::stringstream errstring; - // errstring << "Can't set config" << errstr; - // TEST_FAIL(errstring.str().c_str()); - //} - - TEST_SAY("Topic %s.\n", topic_str); - - const RdKafka::Conf::ConfResult confBrokerResult = conf->set("metadata.broker.list", "localhost:9092", errstr); - if(confBrokerResult != RdKafka::Conf::CONF_OK){ - std::stringstream errstring; - errstring << "Can't set broker" << errstr; - TEST_FAIL(errstring.str().c_str()); - } - - /* Create a producer to fetch metadata */ - RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); - if (!producer) { - std::stringstream errstring; - errstring << "Can't create producer" << errstr; - TEST_FAIL(errstring.str().c_str()); - } - - /* - * Create topic handle. - */ - RdKafka::Topic *topic = NULL; - topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr); - if (!topic) { - std::stringstream errstring; - errstring << "Can't create topic" << errstr; - exit(1); - } - - /* First request of metadata: It have to fail */ - err = producer->metadata(topic!=NULL, topic, - &metadata, 5000); - if (err != RdKafka::ERR_NO_ERROR) { - std::stringstream errstring; - errstring << "Can't request first metadata: " << errstr; - TEST_FAIL(errstring.str().c_str()); - } - - /* It's a new topic, it should have no partitions */ - if(metadata->topics()->at(0)->partitions()->size() != 0){ - TEST_FAIL("ISRS != 0"); - } - - sleep(1); - - /* Second request of metadata: It have to success */ - err = producer->metadata(topic!=NULL, topic, - &metadata, 5000); - - /* It should have now partitions */ - if(metadata->topics()->at(0)->partitions()->size() == 0){ - TEST_FAIL("ISRS == 0"); - } - - - delete topic; - delete producer; - delete tconf; - delete conf; - - /* Wait for everything to be cleaned up since broker destroys are - * handled in its own thread. */ - test_wait_exit(10); - - /* If we havent failed at this point then - * there were no threads leaked */ - return; +static void test_metadata_cpp(void) { + RdKafka::Conf *conf = RdKafka::Conf::create( + RdKafka::Conf::CONF_GLOBAL); /* @TODO: Do we need to merge with C + test_conf_init()? */ + RdKafka::Conf *tconf = RdKafka::Conf::create( + RdKafka::Conf::CONF_TOPIC); /* @TODO: Same of prev */ + + RdKafka::Metadata *metadata; + RdKafka::ErrorCode err; + int msgcnt = test_on_ci ? 1000 : 10000; + int partition_cnt = 2; + int i; + uint64_t testid; + int msg_base = 0; + std::string errstr; + const char *topic_str = test_mk_topic_name("0013", 1); + /* if(!topic){ + TEST_FAIL() + }*/ + + // const RdKafka::Conf::ConfResult confResult = + // conf->set("debug","all",errstr); if(confResult != RdKafka::Conf::CONF_OK){ + // std::stringstream errstring; + // errstring << "Can't set config" << errstr; + // TEST_FAIL(errstring.str().c_str()); + //} + + TEST_SAY("Topic %s.\n", topic_str); + + const RdKafka::Conf::ConfResult confBrokerResult = + conf->set("metadata.broker.list", "localhost:9092", errstr); + if (confBrokerResult != RdKafka::Conf::CONF_OK) { + std::stringstream errstring; + errstring << "Can't set broker" << errstr; + TEST_FAIL(errstring.str().c_str()); + } + + /* Create a producer to fetch metadata */ + RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); + if (!producer) { + std::stringstream errstring; + errstring << "Can't create producer" << errstr; + TEST_FAIL(errstring.str().c_str()); + } + + /* + * Create topic handle. + */ + RdKafka::Topic *topic = NULL; + topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr); + if (!topic) { + std::stringstream errstring; + errstring << "Can't create topic" << errstr; + exit(1); + } + + /* First request of metadata: It have to fail */ + err = producer->metadata(topic != NULL, topic, &metadata, 5000); + if (err != RdKafka::ERR_NO_ERROR) { + std::stringstream errstring; + errstring << "Can't request first metadata: " << errstr; + TEST_FAIL(errstring.str().c_str()); + } + + /* It's a new topic, it should have no partitions */ + if (metadata->topics()->at(0)->partitions()->size() != 0) { + TEST_FAIL("ISRS != 0"); + } + + sleep(1); + + /* Second request of metadata: It have to success */ + err = producer->metadata(topic != NULL, topic, &metadata, 5000); + + /* It should have now partitions */ + if (metadata->topics()->at(0)->partitions()->size() == 0) { + TEST_FAIL("ISRS == 0"); + } + + + delete topic; + delete producer; + delete tconf; + delete conf; + + /* Wait for everything to be cleaned up since broker destroys are + * handled in its own thread. */ + test_wait_exit(10); + + /* If we havent failed at this point then + * there were no threads leaked */ + return; } -int main (int argc, char **argv) { - test_conf_init (NULL, NULL, 20); - test_metadata_cpp(); - return 0; +int main(int argc, char **argv) { + test_conf_init(NULL, NULL, 20); + test_metadata_cpp(); + return 0; } diff --git a/win32/wingetopt.c b/win32/wingetopt.c index 4391c2927c..b202529325 100644 --- a/win32/wingetopt.c +++ b/win32/wingetopt.c @@ -1,5 +1,5 @@ -/* $OpenBSD: getopt_long.c,v 1.23 2007/10/31 12:34:57 chl Exp $ */ -/* $NetBSD: getopt_long.c,v 1.15 2002/01/31 22:43:40 tv Exp $ */ +/* $OpenBSD: getopt_long.c,v 1.23 2007/10/31 12:34:57 chl Exp $ */ +/* $NetBSD: getopt_long.c,v 1.15 2002/01/31 22:43:40 tv Exp $ */ /* * Copyright (c) 2002 Todd C. Miller @@ -57,96 +57,97 @@ #include #include -#define REPLACE_GETOPT /* use this getopt as the system getopt(3) */ +#define REPLACE_GETOPT /* use this getopt as the system getopt(3) */ #ifdef REPLACE_GETOPT -int opterr = 1; /* if error message should be printed */ -int optind = 1; /* index into parent argv vector */ -int optopt = '?'; /* character checked for validity */ -#undef optreset /* see getopt.h */ -#define optreset __mingw_optreset -int optreset; /* reset getopt */ -char *optarg; /* argument associated with option */ +int opterr = 1; /* if error message should be printed */ +int optind = 1; /* index into parent argv vector */ +int optopt = '?'; /* character checked for validity */ +#undef optreset /* see getopt.h */ +#define optreset __mingw_optreset +int optreset; /* reset getopt */ +char *optarg; /* argument associated with option */ #endif -#define PRINT_ERROR ((opterr) && (*options != ':')) +#define PRINT_ERROR ((opterr) && (*options != ':')) -#define FLAG_PERMUTE 0x01 /* permute non-options to the end of argv */ -#define FLAG_ALLARGS 0x02 /* treat non-options as args to option "-1" */ -#define FLAG_LONGONLY 0x04 /* operate as getopt_long_only */ +#define FLAG_PERMUTE 0x01 /* permute non-options to the end of argv */ +#define FLAG_ALLARGS 0x02 /* treat non-options as args to option "-1" */ +#define FLAG_LONGONLY 0x04 /* operate as getopt_long_only */ /* return values */ -#define BADCH (int)'?' -#define BADARG ((*options == ':') ? (int)':' : (int)'?') -#define INORDER (int)1 +#define BADCH (int)'?' +#define BADARG ((*options == ':') ? (int)':' : (int)'?') +#define INORDER (int)1 #ifndef __CYGWIN__ #define __progname __argv[0] #else -extern char __declspec(dllimport) *__progname; +extern char __declspec(dllimport) * __progname; #endif #ifdef __CYGWIN__ static char EMSG[] = ""; #else -#define EMSG "" +#define EMSG "" #endif -static int getopt_internal(int, char * const *, const char *, - const struct option *, int *, int); -static int parse_long_options(char * const *, const char *, - const struct option *, int *, int); +static int getopt_internal(int, + char *const *, + const char *, + const struct option *, + int *, + int); +static int parse_long_options(char *const *, + const char *, + const struct option *, + int *, + int); static int gcd(int, int); -static void permute_args(int, int, int, char * const *); +static void permute_args(int, int, int, char *const *); static char *place = EMSG; /* option letter processing */ /* XXX: set optreset to 1 rather than these two */ static int nonopt_start = -1; /* first non option argument (for permute) */ -static int nonopt_end = -1; /* first option after non options (for permute) */ +static int nonopt_end = -1; /* first option after non options (for permute) */ /* Error messages */ -static const char recargchar[] = "option requires an argument -- %c"; +static const char recargchar[] = "option requires an argument -- %c"; static const char recargstring[] = "option requires an argument -- %s"; -static const char ambig[] = "ambiguous option -- %.*s"; -static const char noarg[] = "option doesn't take an argument -- %.*s"; -static const char illoptchar[] = "unknown option -- %c"; +static const char ambig[] = "ambiguous option -- %.*s"; +static const char noarg[] = "option doesn't take an argument -- %.*s"; +static const char illoptchar[] = "unknown option -- %c"; static const char illoptstring[] = "unknown option -- %s"; -static void -_vwarnx(const char *fmt,va_list ap) -{ - (void)fprintf(stderr,"%s: ",__progname); - if (fmt != NULL) - (void)vfprintf(stderr,fmt,ap); - (void)fprintf(stderr,"\n"); +static void _vwarnx(const char *fmt, va_list ap) { + (void)fprintf(stderr, "%s: ", __progname); + if (fmt != NULL) + (void)vfprintf(stderr, fmt, ap); + (void)fprintf(stderr, "\n"); } -static void -warnx(const char *fmt,...) -{ - va_list ap; - va_start(ap,fmt); - _vwarnx(fmt,ap); - va_end(ap); +static void warnx(const char *fmt, ...) { + va_list ap; + va_start(ap, fmt); + _vwarnx(fmt, ap); + va_end(ap); } /* * Compute the greatest common divisor of a and b. */ -static int -gcd(int a, int b) -{ - int c; - - c = a % b; - while (c != 0) { - a = b; - b = c; - c = a % b; - } - - return (b); +static int gcd(int a, int b) { + int c; + + c = a % b; + while (c != 0) { + a = b; + b = c; + c = a % b; + } + + return (b); } /* @@ -154,411 +155,410 @@ gcd(int a, int b) * from nonopt_end to opt_end (keeping the same order of arguments * in each block). */ -static void -permute_args(int panonopt_start, int panonopt_end, int opt_end, - char * const *nargv) -{ - int cstart, cyclelen, i, j, ncycle, nnonopts, nopts, pos; - char *swap; - - /* - * compute lengths of blocks and number and size of cycles - */ - nnonopts = panonopt_end - panonopt_start; - nopts = opt_end - panonopt_end; - ncycle = gcd(nnonopts, nopts); - cyclelen = (opt_end - panonopt_start) / ncycle; - - for (i = 0; i < ncycle; i++) { - cstart = panonopt_end+i; - pos = cstart; - for (j = 0; j < cyclelen; j++) { - if (pos >= panonopt_end) - pos -= nnonopts; - else - pos += nopts; - swap = nargv[pos]; - /* LINTED const cast */ - ((char **) nargv)[pos] = nargv[cstart]; - /* LINTED const cast */ - ((char **)nargv)[cstart] = swap; - } - } +static void permute_args(int panonopt_start, + int panonopt_end, + int opt_end, + char *const *nargv) { + int cstart, cyclelen, i, j, ncycle, nnonopts, nopts, pos; + char *swap; + + /* + * compute lengths of blocks and number and size of cycles + */ + nnonopts = panonopt_end - panonopt_start; + nopts = opt_end - panonopt_end; + ncycle = gcd(nnonopts, nopts); + cyclelen = (opt_end - panonopt_start) / ncycle; + + for (i = 0; i < ncycle; i++) { + cstart = panonopt_end + i; + pos = cstart; + for (j = 0; j < cyclelen; j++) { + if (pos >= panonopt_end) + pos -= nnonopts; + else + pos += nopts; + swap = nargv[pos]; + /* LINTED const cast */ + ((char **)nargv)[pos] = nargv[cstart]; + /* LINTED const cast */ + ((char **)nargv)[cstart] = swap; + } + } } /* * parse_long_options -- - * Parse long options in argc/argv argument vector. + * Parse long options in argc/argv argument vector. * Returns -1 if short_too is set and the option does not match long_options. */ -static int -parse_long_options(char * const *nargv, const char *options, - const struct option *long_options, int *idx, int short_too) -{ - char *current_argv, *has_equal; - size_t current_argv_len; - int i, ambiguous, match; - -#define IDENTICAL_INTERPRETATION(_x, _y) \ - (long_options[(_x)].has_arg == long_options[(_y)].has_arg && \ - long_options[(_x)].flag == long_options[(_y)].flag && \ - long_options[(_x)].val == long_options[(_y)].val) - - current_argv = place; - match = -1; - ambiguous = 0; - - optind++; - - if ((has_equal = strchr(current_argv, '=')) != NULL) { - /* argument found (--option=arg) */ - current_argv_len = has_equal - current_argv; - has_equal++; - } else - current_argv_len = strlen(current_argv); - - for (i = 0; long_options[i].name; i++) { - /* find matching long option */ - if (strncmp(current_argv, long_options[i].name, - current_argv_len)) - continue; - - if (strlen(long_options[i].name) == current_argv_len) { - /* exact match */ - match = i; - ambiguous = 0; - break; - } - /* - * If this is a known short option, don't allow - * a partial match of a single character. - */ - if (short_too && current_argv_len == 1) - continue; - - if (match == -1) /* partial match */ - match = i; - else if (!IDENTICAL_INTERPRETATION(i, match)) - ambiguous = 1; - } - if (ambiguous) { - /* ambiguous abbreviation */ - if (PRINT_ERROR) - warnx(ambig, (int)current_argv_len, - current_argv); - optopt = 0; - return (BADCH); - } - if (match != -1) { /* option found */ - if (long_options[match].has_arg == no_argument - && has_equal) { - if (PRINT_ERROR) - warnx(noarg, (int)current_argv_len, - current_argv); - /* - * XXX: GNU sets optopt to val regardless of flag - */ - if (long_options[match].flag == NULL) - optopt = long_options[match].val; - else - optopt = 0; - return (BADARG); - } - if (long_options[match].has_arg == required_argument || - long_options[match].has_arg == optional_argument) { - if (has_equal) - optarg = has_equal; - else if (long_options[match].has_arg == - required_argument) { - /* - * optional argument doesn't use next nargv - */ - optarg = nargv[optind++]; - } - } - if ((long_options[match].has_arg == required_argument) - && (optarg == NULL)) { - /* - * Missing argument; leading ':' indicates no error - * should be generated. - */ - if (PRINT_ERROR) - warnx(recargstring, - current_argv); - /* - * XXX: GNU sets optopt to val regardless of flag - */ - if (long_options[match].flag == NULL) - optopt = long_options[match].val; - else - optopt = 0; - --optind; - return (BADARG); - } - } else { /* unknown option */ - if (short_too) { - --optind; - return (-1); - } - if (PRINT_ERROR) - warnx(illoptstring, current_argv); - optopt = 0; - return (BADCH); - } - if (idx) - *idx = match; - if (long_options[match].flag) { - *long_options[match].flag = long_options[match].val; - return (0); - } else - return (long_options[match].val); +static int parse_long_options(char *const *nargv, + const char *options, + const struct option *long_options, + int *idx, + int short_too) { + char *current_argv, *has_equal; + size_t current_argv_len; + int i, ambiguous, match; + +#define IDENTICAL_INTERPRETATION(_x, _y) \ + (long_options[(_x)].has_arg == long_options[(_y)].has_arg && \ + long_options[(_x)].flag == long_options[(_y)].flag && \ + long_options[(_x)].val == long_options[(_y)].val) + + current_argv = place; + match = -1; + ambiguous = 0; + + optind++; + + if ((has_equal = strchr(current_argv, '=')) != NULL) { + /* argument found (--option=arg) */ + current_argv_len = has_equal - current_argv; + has_equal++; + } else + current_argv_len = strlen(current_argv); + + for (i = 0; long_options[i].name; i++) { + /* find matching long option */ + if (strncmp(current_argv, long_options[i].name, + current_argv_len)) + continue; + + if (strlen(long_options[i].name) == current_argv_len) { + /* exact match */ + match = i; + ambiguous = 0; + break; + } + /* + * If this is a known short option, don't allow + * a partial match of a single character. + */ + if (short_too && current_argv_len == 1) + continue; + + if (match == -1) /* partial match */ + match = i; + else if (!IDENTICAL_INTERPRETATION(i, match)) + ambiguous = 1; + } + if (ambiguous) { + /* ambiguous abbreviation */ + if (PRINT_ERROR) + warnx(ambig, (int)current_argv_len, current_argv); + optopt = 0; + return (BADCH); + } + if (match != -1) { /* option found */ + if (long_options[match].has_arg == no_argument && has_equal) { + if (PRINT_ERROR) + warnx(noarg, (int)current_argv_len, + current_argv); + /* + * XXX: GNU sets optopt to val regardless of flag + */ + if (long_options[match].flag == NULL) + optopt = long_options[match].val; + else + optopt = 0; + return (BADARG); + } + if (long_options[match].has_arg == required_argument || + long_options[match].has_arg == optional_argument) { + if (has_equal) + optarg = has_equal; + else if (long_options[match].has_arg == + required_argument) { + /* + * optional argument doesn't use next nargv + */ + optarg = nargv[optind++]; + } + } + if ((long_options[match].has_arg == required_argument) && + (optarg == NULL)) { + /* + * Missing argument; leading ':' indicates no error + * should be generated. + */ + if (PRINT_ERROR) + warnx(recargstring, current_argv); + /* + * XXX: GNU sets optopt to val regardless of flag + */ + if (long_options[match].flag == NULL) + optopt = long_options[match].val; + else + optopt = 0; + --optind; + return (BADARG); + } + } else { /* unknown option */ + if (short_too) { + --optind; + return (-1); + } + if (PRINT_ERROR) + warnx(illoptstring, current_argv); + optopt = 0; + return (BADCH); + } + if (idx) + *idx = match; + if (long_options[match].flag) { + *long_options[match].flag = long_options[match].val; + return (0); + } else + return (long_options[match].val); #undef IDENTICAL_INTERPRETATION } /* * getopt_internal -- - * Parse argc/argv argument vector. Called by user level routines. + * Parse argc/argv argument vector. Called by user level routines. */ -static int -getopt_internal(int nargc, char * const *nargv, const char *options, - const struct option *long_options, int *idx, int flags) -{ - char *oli; /* option letter list index */ - int optchar, short_too; - static int posixly_correct = -1; - - if (options == NULL) - return (-1); - - /* - * XXX Some GNU programs (like cvs) set optind to 0 instead of - * XXX using optreset. Work around this braindamage. - */ - if (optind == 0) - optind = optreset = 1; - - /* - * Disable GNU extensions if POSIXLY_CORRECT is set or options - * string begins with a '+'. - * - * CV, 2009-12-14: Check POSIXLY_CORRECT anew if optind == 0 or - * optreset != 0 for GNU compatibility. - */ +static int getopt_internal(int nargc, + char *const *nargv, + const char *options, + const struct option *long_options, + int *idx, + int flags) { + char *oli; /* option letter list index */ + int optchar, short_too; + static int posixly_correct = -1; + + if (options == NULL) + return (-1); + + /* + * XXX Some GNU programs (like cvs) set optind to 0 instead of + * XXX using optreset. Work around this braindamage. + */ + if (optind == 0) + optind = optreset = 1; + + /* + * Disable GNU extensions if POSIXLY_CORRECT is set or options + * string begins with a '+'. + * + * CV, 2009-12-14: Check POSIXLY_CORRECT anew if optind == 0 or + * optreset != 0 for GNU compatibility. + */ #ifndef _WIN32 - if (posixly_correct == -1 || optreset != 0) - posixly_correct = (getenv("POSIXLY_CORRECT") != NULL); + if (posixly_correct == -1 || optreset != 0) + posixly_correct = (getenv("POSIXLY_CORRECT") != NULL); #endif - if (*options == '-') - flags |= FLAG_ALLARGS; - else if (posixly_correct || *options == '+') - flags &= ~FLAG_PERMUTE; - if (*options == '+' || *options == '-') - options++; - - optarg = NULL; - if (optreset) - nonopt_start = nonopt_end = -1; + if (*options == '-') + flags |= FLAG_ALLARGS; + else if (posixly_correct || *options == '+') + flags &= ~FLAG_PERMUTE; + if (*options == '+' || *options == '-') + options++; + + optarg = NULL; + if (optreset) + nonopt_start = nonopt_end = -1; start: - if (optreset || !*place) { /* update scanning pointer */ - optreset = 0; - if (optind >= nargc) { /* end of argument vector */ - place = EMSG; - if (nonopt_end != -1) { - /* do permutation, if we have to */ - permute_args(nonopt_start, nonopt_end, - optind, nargv); - optind -= nonopt_end - nonopt_start; - } - else if (nonopt_start != -1) { - /* - * If we skipped non-options, set optind - * to the first of them. - */ - optind = nonopt_start; - } - nonopt_start = nonopt_end = -1; - return (-1); - } - if (*(place = nargv[optind]) != '-' || - (place[1] == '\0' && strchr(options, '-') == NULL)) { - place = EMSG; /* found non-option */ - if (flags & FLAG_ALLARGS) { - /* - * GNU extension: - * return non-option as argument to option 1 - */ - optarg = nargv[optind++]; - return (INORDER); - } - if (!(flags & FLAG_PERMUTE)) { - /* - * If no permutation wanted, stop parsing - * at first non-option. - */ - return (-1); - } - /* do permutation */ - if (nonopt_start == -1) - nonopt_start = optind; - else if (nonopt_end != -1) { - permute_args(nonopt_start, nonopt_end, - optind, nargv); - nonopt_start = optind - - (nonopt_end - nonopt_start); - nonopt_end = -1; - } - optind++; - /* process next argument */ - goto start; - } - if (nonopt_start != -1 && nonopt_end == -1) - nonopt_end = optind; - - /* - * If we have "-" do nothing, if "--" we are done. - */ - if (place[1] != '\0' && *++place == '-' && place[1] == '\0') { - optind++; - place = EMSG; - /* - * We found an option (--), so if we skipped - * non-options, we have to permute. - */ - if (nonopt_end != -1) { - permute_args(nonopt_start, nonopt_end, - optind, nargv); - optind -= nonopt_end - nonopt_start; - } - nonopt_start = nonopt_end = -1; - return (-1); - } - } - - /* - * Check long options if: - * 1) we were passed some - * 2) the arg is not just "-" - * 3) either the arg starts with -- we are getopt_long_only() - */ - if (long_options != NULL && place != nargv[optind] && - (*place == '-' || (flags & FLAG_LONGONLY))) { - short_too = 0; - if (*place == '-') - place++; /* --foo long option */ - else if (*place != ':' && strchr(options, *place) != NULL) - short_too = 1; /* could be short option too */ - - optchar = parse_long_options(nargv, options, long_options, - idx, short_too); - if (optchar != -1) { - place = EMSG; - return (optchar); - } - } - - if ((optchar = (int)*place++) == (int)':' || - (optchar == (int)'-' && *place != '\0') || - (oli = strchr(options, optchar)) == NULL) { - /* - * If the user specified "-" and '-' isn't listed in - * options, return -1 (non-option) as per POSIX. - * Otherwise, it is an unknown option character (or ':'). - */ - if (optchar == (int)'-' && *place == '\0') - return (-1); - if (!*place) - ++optind; - if (PRINT_ERROR) - warnx(illoptchar, optchar); - optopt = optchar; - return (BADCH); - } - if (long_options != NULL && optchar == 'W' && oli[1] == ';') { - /* -W long-option */ - if (*place) /* no space */ - /* NOTHING */; - else if (++optind >= nargc) { /* no arg */ - place = EMSG; - if (PRINT_ERROR) - warnx(recargchar, optchar); - optopt = optchar; - return (BADARG); - } else /* white space */ - place = nargv[optind]; - optchar = parse_long_options(nargv, options, long_options, - idx, 0); - place = EMSG; - return (optchar); - } - if (*++oli != ':') { /* doesn't take argument */ - if (!*place) - ++optind; - } else { /* takes (optional) argument */ - optarg = NULL; - if (*place) /* no white space */ - optarg = place; - else if (oli[1] != ':') { /* arg not optional */ - if (++optind >= nargc) { /* no arg */ - place = EMSG; - if (PRINT_ERROR) - warnx(recargchar, optchar); - optopt = optchar; - return (BADARG); - } else - optarg = nargv[optind]; - } - place = EMSG; - ++optind; - } - /* dump back option letter */ - return (optchar); + if (optreset || !*place) { /* update scanning pointer */ + optreset = 0; + if (optind >= nargc) { /* end of argument vector */ + place = EMSG; + if (nonopt_end != -1) { + /* do permutation, if we have to */ + permute_args(nonopt_start, nonopt_end, optind, + nargv); + optind -= nonopt_end - nonopt_start; + } else if (nonopt_start != -1) { + /* + * If we skipped non-options, set optind + * to the first of them. + */ + optind = nonopt_start; + } + nonopt_start = nonopt_end = -1; + return (-1); + } + if (*(place = nargv[optind]) != '-' || + (place[1] == '\0' && strchr(options, '-') == NULL)) { + place = EMSG; /* found non-option */ + if (flags & FLAG_ALLARGS) { + /* + * GNU extension: + * return non-option as argument to option 1 + */ + optarg = nargv[optind++]; + return (INORDER); + } + if (!(flags & FLAG_PERMUTE)) { + /* + * If no permutation wanted, stop parsing + * at first non-option. + */ + return (-1); + } + /* do permutation */ + if (nonopt_start == -1) + nonopt_start = optind; + else if (nonopt_end != -1) { + permute_args(nonopt_start, nonopt_end, optind, + nargv); + nonopt_start = + optind - (nonopt_end - nonopt_start); + nonopt_end = -1; + } + optind++; + /* process next argument */ + goto start; + } + if (nonopt_start != -1 && nonopt_end == -1) + nonopt_end = optind; + + /* + * If we have "-" do nothing, if "--" we are done. + */ + if (place[1] != '\0' && *++place == '-' && place[1] == '\0') { + optind++; + place = EMSG; + /* + * We found an option (--), so if we skipped + * non-options, we have to permute. + */ + if (nonopt_end != -1) { + permute_args(nonopt_start, nonopt_end, optind, + nargv); + optind -= nonopt_end - nonopt_start; + } + nonopt_start = nonopt_end = -1; + return (-1); + } + } + + /* + * Check long options if: + * 1) we were passed some + * 2) the arg is not just "-" + * 3) either the arg starts with -- we are getopt_long_only() + */ + if (long_options != NULL && place != nargv[optind] && + (*place == '-' || (flags & FLAG_LONGONLY))) { + short_too = 0; + if (*place == '-') + place++; /* --foo long option */ + else if (*place != ':' && strchr(options, *place) != NULL) + short_too = 1; /* could be short option too */ + + optchar = parse_long_options(nargv, options, long_options, idx, + short_too); + if (optchar != -1) { + place = EMSG; + return (optchar); + } + } + + if ((optchar = (int)*place++) == (int)':' || + (optchar == (int)'-' && *place != '\0') || + (oli = strchr(options, optchar)) == NULL) { + /* + * If the user specified "-" and '-' isn't listed in + * options, return -1 (non-option) as per POSIX. + * Otherwise, it is an unknown option character (or ':'). + */ + if (optchar == (int)'-' && *place == '\0') + return (-1); + if (!*place) + ++optind; + if (PRINT_ERROR) + warnx(illoptchar, optchar); + optopt = optchar; + return (BADCH); + } + if (long_options != NULL && optchar == 'W' && oli[1] == ';') { + /* -W long-option */ + if (*place) /* no space */ + /* NOTHING */; + else if (++optind >= nargc) { /* no arg */ + place = EMSG; + if (PRINT_ERROR) + warnx(recargchar, optchar); + optopt = optchar; + return (BADARG); + } else /* white space */ + place = nargv[optind]; + optchar = + parse_long_options(nargv, options, long_options, idx, 0); + place = EMSG; + return (optchar); + } + if (*++oli != ':') { /* doesn't take argument */ + if (!*place) + ++optind; + } else { /* takes (optional) argument */ + optarg = NULL; + if (*place) /* no white space */ + optarg = place; + else if (oli[1] != ':') { /* arg not optional */ + if (++optind >= nargc) { /* no arg */ + place = EMSG; + if (PRINT_ERROR) + warnx(recargchar, optchar); + optopt = optchar; + return (BADARG); + } else + optarg = nargv[optind]; + } + place = EMSG; + ++optind; + } + /* dump back option letter */ + return (optchar); } #ifdef REPLACE_GETOPT /* * getopt -- - * Parse argc/argv argument vector. + * Parse argc/argv argument vector. * * [eventually this will replace the BSD getopt] */ -int -getopt(int nargc, char * const *nargv, const char *options) -{ - - /* - * We don't pass FLAG_PERMUTE to getopt_internal() since - * the BSD getopt(3) (unlike GNU) has never done this. - * - * Furthermore, since many privileged programs call getopt() - * before dropping privileges it makes sense to keep things - * as simple (and bug-free) as possible. - */ - return (getopt_internal(nargc, nargv, options, NULL, NULL, 0)); +int getopt(int nargc, char *const *nargv, const char *options) { + + /* + * We don't pass FLAG_PERMUTE to getopt_internal() since + * the BSD getopt(3) (unlike GNU) has never done this. + * + * Furthermore, since many privileged programs call getopt() + * before dropping privileges it makes sense to keep things + * as simple (and bug-free) as possible. + */ + return (getopt_internal(nargc, nargv, options, NULL, NULL, 0)); } #endif /* REPLACE_GETOPT */ /* * getopt_long -- - * Parse argc/argv argument vector. + * Parse argc/argv argument vector. */ -int -getopt_long(int nargc, char * const *nargv, const char *options, - const struct option *long_options, int *idx) -{ - - return (getopt_internal(nargc, nargv, options, long_options, idx, - FLAG_PERMUTE)); +int getopt_long(int nargc, + char *const *nargv, + const char *options, + const struct option *long_options, + int *idx) { + + return (getopt_internal(nargc, nargv, options, long_options, idx, + FLAG_PERMUTE)); } /* * getopt_long_only -- - * Parse argc/argv argument vector. + * Parse argc/argv argument vector. */ -int -getopt_long_only(int nargc, char * const *nargv, const char *options, - const struct option *long_options, int *idx) -{ - - return (getopt_internal(nargc, nargv, options, long_options, idx, - FLAG_PERMUTE|FLAG_LONGONLY)); +int getopt_long_only(int nargc, + char *const *nargv, + const char *options, + const struct option *long_options, + int *idx) { + + return (getopt_internal(nargc, nargv, options, long_options, idx, + FLAG_PERMUTE | FLAG_LONGONLY)); } diff --git a/win32/wingetopt.h b/win32/wingetopt.h index 260915b7f2..aaaa523783 100644 --- a/win32/wingetopt.h +++ b/win32/wingetopt.h @@ -4,9 +4,9 @@ * This file has no copyright assigned and is placed in the Public Domain. * This file is a part of the w64 mingw-runtime package. * - * The w64 mingw-runtime package and its code is distributed in the hope that it - * will be useful but WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESSED OR - * IMPLIED ARE HEREBY DISCLAIMED. This includes but is not limited to + * The w64 mingw-runtime package and its code is distributed in the hope that it + * will be useful but WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESSED OR + * IMPLIED ARE HEREBY DISCLAIMED. This includes but is not limited to * warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */ @@ -19,14 +19,14 @@ extern "C" { #endif -extern int optind; /* index of first non-option in argv */ -extern int optopt; /* single option character, as parsed */ -extern int opterr; /* flag to enable built-in diagnostics... */ - /* (user may set to zero, to suppress) */ +extern int optind; /* index of first non-option in argv */ +extern int optopt; /* single option character, as parsed */ +extern int opterr; /* flag to enable built-in diagnostics... */ + /* (user may set to zero, to suppress) */ -extern char *optarg; /* pointer to argument of current option */ +extern char *optarg; /* pointer to argument of current option */ -extern int getopt(int nargc, char * const *nargv, const char *options); +extern int getopt(int nargc, char *const *nargv, const char *options); #ifdef _BSD_SOURCE /* @@ -35,7 +35,7 @@ extern int getopt(int nargc, char * const *nargv, const char *options); * proclaim their BSD heritage, before including this header; however, * to maintain portability, developers are advised to avoid it. */ -# define optreset __mingw_optreset +#define optreset __mingw_optreset extern int optreset; #endif #ifdef __cplusplus @@ -59,25 +59,30 @@ extern int optreset; extern "C" { #endif -struct option /* specification for a long form option... */ +struct option /* specification for a long form option... */ { - const char *name; /* option name, without leading hyphens */ - int has_arg; /* does it take an argument? */ - int *flag; /* where to save its status, or NULL */ - int val; /* its associated status value */ + const char *name; /* option name, without leading hyphens */ + int has_arg; /* does it take an argument? */ + int *flag; /* where to save its status, or NULL */ + int val; /* its associated status value */ }; -enum /* permitted values for its `has_arg' field... */ -{ - no_argument = 0, /* option never takes an argument */ - required_argument, /* option always requires an argument */ - optional_argument /* option may take an argument */ +enum /* permitted values for its `has_arg' field... */ +{ no_argument = 0, /* option never takes an argument */ + required_argument, /* option always requires an argument */ + optional_argument /* option may take an argument */ }; -extern int getopt_long(int nargc, char * const *nargv, const char *options, - const struct option *long_options, int *idx); -extern int getopt_long_only(int nargc, char * const *nargv, const char *options, - const struct option *long_options, int *idx); +extern int getopt_long(int nargc, + char *const *nargv, + const char *options, + const struct option *long_options, + int *idx); +extern int getopt_long_only(int nargc, + char *const *nargv, + const char *options, + const struct option *long_options, + int *idx); /* * Previous MinGW implementation had... */ @@ -85,7 +90,7 @@ extern int getopt_long_only(int nargc, char * const *nargv, const char *options, /* * ...for the long form API only; keep this for compatibility. */ -# define HAVE_DECL_GETOPT 1 +#define HAVE_DECL_GETOPT 1 #endif #ifdef __cplusplus diff --git a/win32/wintime.h b/win32/wintime.h index fb6e5347e5..07f55b8b17 100644 --- a/win32/wintime.h +++ b/win32/wintime.h @@ -4,29 +4,30 @@ #pragma once /** - * gettimeofday() for Win32 from http://stackoverflow.com/questions/10905892/equivalent-of-gettimeday-for-windows + * gettimeofday() for Win32 from + * http://stackoverflow.com/questions/10905892/equivalent-of-gettimeday-for-windows */ #define WIN32_LEAN_AND_MEAN #include -#include // portable: uint64_t MSVC: __int64 +#include // portable: uint64_t MSVC: __int64 -static int gettimeofday(struct timeval * tp, struct timezone * tzp) -{ - // Note: some broken versions only have 8 trailing zero's, the correct epoch has 9 trailing zero's - // This magic number is the number of 100 nanosecond intervals since January 1, 1601 (UTC) - // until 00:00:00 January 1, 1970 +static int gettimeofday(struct timeval *tp, struct timezone *tzp) { + // Note: some broken versions only have 8 trailing zero's, the correct + // epoch has 9 trailing zero's This magic number is the number of 100 + // nanosecond intervals since January 1, 1601 (UTC) until 00:00:00 + // January 1, 1970 static const uint64_t EPOCH = ((uint64_t)116444736000000000ULL); - SYSTEMTIME system_time; - FILETIME file_time; - uint64_t time; + SYSTEMTIME system_time; + FILETIME file_time; + uint64_t time; GetSystemTime(&system_time); SystemTimeToFileTime(&system_time, &file_time); time = ((uint64_t)file_time.dwLowDateTime); time += ((uint64_t)file_time.dwHighDateTime) << 32; - tp->tv_sec = (long)((time - EPOCH) / 10000000L); + tp->tv_sec = (long)((time - EPOCH) / 10000000L); tp->tv_usec = (long)(system_time.wMilliseconds * 1000); return 0; } From f092c290995ca81b3afb4015fcc3350ba02caa96 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Thu, 28 Oct 2021 18:53:14 +0200 Subject: [PATCH 55/56] Manual style fixes of Python code --- packaging/nuget/artifact.py | 6 +- packaging/nuget/cleanup-s3.py | 6 +- packaging/nuget/packaging.py | 96 +++++++++++++++++------------ packaging/nuget/release.py | 12 ++-- tests/LibrdkafkaTestApp.py | 26 +++++--- tests/broker_version_tests.py | 37 ++++++----- tests/cluster_testing.py | 9 +-- tests/interactive_broker_version.py | 32 ++++++---- tests/performance_plot.py | 4 +- tests/sasl_test.py | 30 ++++++--- 10 files changed, 159 insertions(+), 99 deletions(-) diff --git a/packaging/nuget/artifact.py b/packaging/nuget/artifact.py index 88c6f64f5c..c58e0c9c7b 100755 --- a/packaging/nuget/artifact.py +++ b/packaging/nuget/artifact.py @@ -24,9 +24,10 @@ import re import os -import argparse import boto3 +import packaging + s3_bucket = 'librdkafka-ci-packages' dry_run = False @@ -151,7 +152,8 @@ def collect_single(self, path, req_tag=True): return Artifact(self, path, info) def collect_s3(self): - """ Collect and download build-artifacts from S3 based on git reference """ + """ Collect and download build-artifacts from S3 based on + git reference """ print( 'Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket)) diff --git a/packaging/nuget/cleanup-s3.py b/packaging/nuget/cleanup-s3.py index 6cc8803330..2093af0c1d 100755 --- a/packaging/nuget/cleanup-s3.py +++ b/packaging/nuget/cleanup-s3.py @@ -4,7 +4,6 @@ # This also covers python builds. import re -import os from datetime import datetime, timezone import boto3 import argparse @@ -82,7 +81,7 @@ def collect_s3(s3, min_age_days=60): else: res = s3.list_objects_v2(Bucket=s3_bucket) - if res.get('IsTruncated') == True: + if res.get('IsTruncated') is True: cont_token = res.get('NextContinuationToken') else: more = False @@ -107,7 +106,8 @@ def chunk_list(lst, cnt): parser = argparse.ArgumentParser() parser.add_argument("--delete", - help="WARNING! Don't just check, actually delete S3 objects.", + help="WARNING! Don't just check, actually delete " + "S3 objects.", action="store_true") parser.add_argument("--age", help="Minimum object age in days.", type=int, default=360) diff --git a/packaging/nuget/packaging.py b/packaging/nuget/packaging.py index 11c7020872..7ae461b3a4 100755 --- a/packaging/nuget/packaging.py +++ b/packaging/nuget/packaging.py @@ -11,10 +11,8 @@ import tempfile import shutil import subprocess -import urllib from fnmatch import fnmatch from string import Template -from collections import defaultdict import boto3 from zfile import zfile import magic @@ -37,7 +35,8 @@ # This is used to verify that an artifact has the expected file type. magic_patterns = { ('win', 'x64', '.dll'): re.compile('PE32.*DLL.* x86-64, for MS Windows'), - ('win', 'x86', '.dll'): re.compile('PE32.*DLL.* Intel 80386, for MS Windows'), + ('win', 'x86', '.dll'): + re.compile('PE32.*DLL.* Intel 80386, for MS Windows'), ('win', 'x64', '.lib'): re.compile('current ar archive'), ('win', 'x86', '.lib'): re.compile('current ar archive'), ('linux', 'x64', '.so'): re.compile('ELF 64.* x86-64'), @@ -60,7 +59,8 @@ def magic_mismatch(path, a): minfo = magic.id_filename(path) if not pattern.match(minfo): print( - f"Warning: {path} magic \"{minfo}\" does not match expected {pattern} for key {k}") + f"Warning: {path} magic \"{minfo}\" " + f"does not match expected {pattern} for key {k}") return True return False @@ -170,8 +170,6 @@ def collect_single(self, path, req_tag=True): :param: req_tag bool: Require tag to match. """ - #print('? %s' % path) - # For local files, strip download path. # Also ignore any parent directories. if path.startswith(self.dlpath): @@ -209,13 +207,13 @@ def collect_single(self, path, req_tag=True): # Make sure all matches were satisfied, unless this is a # common artifact. if info.get('p', '') != 'common' and len(unmatched) > 0: - # print('%s: %s did not match %s' % (info.get('p', None), folder, unmatched)) return None return Artifact(self, path, info) def collect_s3(self): - """ Collect and download build-artifacts from S3 based on git reference """ + """ Collect and download build-artifacts from S3 based on + git reference """ print( 'Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket)) @@ -229,14 +227,15 @@ def collect_s3(self): more = True while more: if cont_token is not None: - res = self.s3_client.list_objects_v2(Bucket=s3_bucket, - Prefix='librdkafka/', - ContinuationToken=cont_token) + res = self.s3_client.list_objects_v2( + Bucket=s3_bucket, + Prefix='librdkafka/', + ContinuationToken=cont_token) else: res = self.s3_client.list_objects_v2(Bucket=s3_bucket, Prefix='librdkafka/') - if res.get('IsTruncated') == True: + if res.get('IsTruncated') is True: cont_token = res.get('NextContinuationToken') else: more = False @@ -278,7 +277,8 @@ def add_file(self, file): self.files[file] = True def build(self): - """ Build package output(s), return a list of paths to built packages """ + """ Build package output(s), return a list of paths " + to built packages """ raise NotImplementedError def cleanup(self): @@ -404,8 +404,10 @@ def build(self, buildtype): 'LICENSES.txt'], # Travis OSX build - [{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, - './lib/librdkafka.dylib', 'runtimes/osx-x64/native/librdkafka.dylib'], + [{'arch': 'x64', 'plat': 'osx', + 'fname_glob': 'librdkafka-clang.tar.gz'}, + './lib/librdkafka.dylib', + 'runtimes/osx-x64/native/librdkafka.dylib'], # Travis Manylinux build [{'arch': 'x64', 'plat': 'linux', @@ -426,15 +428,20 @@ def build(self, buildtype): './usr/lib64/librdkafka.so.1', 'runtimes/linux-x64/native/centos7-librdkafka.so'], # Travis Alpine build - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'alpine-librdkafka.tgz'}, - 'librdkafka.so.1', 'runtimes/linux-x64/native/alpine-librdkafka.so'], + [{'arch': 'x64', 'plat': 'linux', + 'fname_glob': 'alpine-librdkafka.tgz'}, + 'librdkafka.so.1', + 'runtimes/linux-x64/native/alpine-librdkafka.so'], # Travis arm64 Linux build - [{'arch': 'arm64', 'plat': 'linux', 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './lib/librdkafka.so.1', 'runtimes/linux-arm64/native/librdkafka.so'], + [{'arch': 'arm64', 'plat': 'linux', + 'fname_glob': 'librdkafka-gcc.tar.gz'}, + './lib/librdkafka.so.1', + 'runtimes/linux-arm64/native/librdkafka.so'], # Common Win runtime [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, - 'vcruntime140.dll', 'runtimes/win-x64/native/vcruntime140.dll'], + 'vcruntime140.dll', + 'runtimes/win-x64/native/vcruntime140.dll'], [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, 'msvcp140.dll', 'runtimes/win-x64/native/msvcp140.dll'], # matches librdkafka.redist.{VER}.nupkg @@ -469,13 +476,18 @@ def build(self, buildtype): 'build/native/bin/v140/x64/Release/zstd.dll', 'runtimes/win-x64/native/zstd.dll'], # matches librdkafka.{VER}.nupkg - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v140/x64/Release/librdkafka.lib', 'build/native/lib/win/x64/win-x64-Release/v140/librdkafka.lib'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v140/x64/Release/librdkafkacpp.lib', 'build/native/lib/win/x64/win-x64-Release/v140/librdkafkacpp.lib'], + [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', + 'fname_excludes': ['redist', 'symbols']}, + 'build/native/lib/v140/x64/Release/librdkafka.lib', + 'build/native/lib/win/x64/win-x64-Release/v140/librdkafka.lib'], + [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', + 'fname_excludes': ['redist', 'symbols']}, + 'build/native/lib/v140/x64/Release/librdkafkacpp.lib', + 'build/native/lib/win/x64/win-x64-Release/v140/librdkafkacpp.lib'], # noqa: E501 [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, - 'vcruntime140.dll', 'runtimes/win-x86/native/vcruntime140.dll'], + 'vcruntime140.dll', + 'runtimes/win-x86/native/vcruntime140.dll'], [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, 'msvcp140.dll', 'runtimes/win-x86/native/msvcp140.dll'], # matches librdkafka.redist.{VER}.nupkg @@ -512,10 +524,14 @@ def build(self, buildtype): 'runtimes/win-x86/native/zstd.dll'], # matches librdkafka.{VER}.nupkg - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v140/Win32/Release/librdkafka.lib', 'build/native/lib/win/x86/win-x86-Release/v140/librdkafka.lib'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v140/Win32/Release/librdkafkacpp.lib', 'build/native/lib/win/x86/win-x86-Release/v140/librdkafkacpp.lib'] + [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', + 'fname_excludes': ['redist', 'symbols']}, + 'build/native/lib/v140/Win32/Release/librdkafka.lib', + 'build/native/lib/win/x86/win-x86-Release/v140/librdkafka.lib'], + [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', + 'fname_excludes': ['redist', 'symbols']}, + 'build/native/lib/v140/Win32/Release/librdkafkacpp.lib', + 'build/native/lib/win/x86/win-x86-Release/v140/librdkafkacpp.lib'] ] for m in mappings: @@ -551,11 +567,11 @@ def build(self, buildtype): try: zfile.ZFile.extract(a.lpath, member, outf) - except KeyError as e: + except KeyError: continue except Exception as e: raise Exception( - 'file not found in archive %s: %s. Files in archive are: %s' % + 'file not found in archive %s: %s. Files in archive are: %s' % # noqa: E501 (a.lpath, e, zfile.ZFile( a.lpath).getnames())) @@ -569,7 +585,7 @@ def build(self, buildtype): if not found: raise MissingArtifactError( - 'unable to find artifact with tags %s matching "%s" for file "%s"' % + 'unable to find artifact with tags %s matching "%s" for file "%s"' % # noqa: E501 (str(attributes), fname_glob, member)) print('Tree extracted to %s' % self.stpath) @@ -577,8 +593,9 @@ def build(self, buildtype): # After creating a bare-bone nupkg layout containing the artifacts # and some spec and props files, call the 'nuget' utility to # make a proper nupkg of it (with all the metadata files). - subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" % - (os.path.join(self.stpath, 'librdkafka.redist.nuspec'), + subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" % # noqa: E501 + (os.path.join(self.stpath, + 'librdkafka.redist.nuspec'), self.stpath), shell=True) return 'librdkafka.redist.%s.nupkg' % vless_version @@ -705,13 +722,16 @@ def build(self, buildtype): 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/librdkafka-static.a', 'librdkafka_darwin.a'], - [{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, + [{'arch': 'x64', 'plat': 'osx', + 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/pkgconfig/rdkafka-static.pc', 'librdkafka_darwin.pc'], # win static lib and pkg-config file (mingw) - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka-gcc.tar.gz'}, + [{'arch': 'x64', 'plat': 'win', + 'fname_glob': 'librdkafka-gcc.tar.gz'}, './lib/librdkafka-static.a', 'librdkafka_windows.a'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka-gcc.tar.gz'}, + [{'arch': 'x64', 'plat': 'win', + 'fname_glob': 'librdkafka-gcc.tar.gz'}, './lib/pkgconfig/rdkafka-static.pc', 'librdkafka_windows.pc'], ] @@ -757,7 +777,7 @@ def build(self, buildtype): zfile.ZFile.extract(artifact.lpath, member, outf) except KeyError as e: raise Exception( - 'file not found in archive %s: %s. Files in archive are: %s' % + 'file not found in archive %s: %s. Files in archive are: %s' % # noqa: E501 (artifact.lpath, e, zfile.ZFile( artifact.lpath).getnames())) diff --git a/packaging/nuget/release.py b/packaging/nuget/release.py index 0b1f64c29a..1078c73ef0 100755 --- a/packaging/nuget/release.py +++ b/packaging/nuget/release.py @@ -24,7 +24,8 @@ help="Don't collect from S3", action="store_true") parser.add_argument("--dry-run", - help="Locate artifacts but don't actually download or do anything", + help="Locate artifacts but don't actually " + "download or do anything", action="store_true") parser.add_argument( "--directory", @@ -42,7 +43,10 @@ "--nuget-version", help="The nuget package version (defaults to same as tag)", default=None) - parser.add_argument("--upload", help="Upload package to after building, using provided NuGet API key (either file or the key itself)", default=None, + parser.add_argument("--upload", help="Upload package to after building, " + "using provided NuGet API key " + "(either file or the key itself)", + default=None, type=str) parser.add_argument( "--class", @@ -141,6 +145,6 @@ print('Uploading %s to NuGet' % pkgfile) r = os.system("./push-to-nuget.sh '%s' %s" % (nuget_key, pkgfile)) - assert int( - r) == 0, "NuGet upload failed with exit code {}, see previous errors".format(r) + assert int(r) == 0, \ + f"NuGet upload failed with exit code {r}, see previous errors" print('%s successfully uploaded to NuGet' % pkgfile) diff --git a/tests/LibrdkafkaTestApp.py b/tests/LibrdkafkaTestApp.py index 483f84dd63..d1e0df1919 100644 --- a/tests/LibrdkafkaTestApp.py +++ b/tests/LibrdkafkaTestApp.py @@ -6,13 +6,12 @@ # trivup python module # gradle in your PATH -from trivup.trivup import Cluster, App, UuidAllocator +from trivup.trivup import App, UuidAllocator from trivup.apps.ZookeeperApp import ZookeeperApp from trivup.apps.KafkaBrokerApp import KafkaBrokerApp from trivup.apps.KerberosKdcApp import KerberosKdcApp import json -import subprocess class LibrdkafkaTestApp(App): @@ -77,14 +76,18 @@ def __init__(self, cluster, version, conf=None, kdc = cluster.find_app(KerberosKdcApp) if kdc is None: self.log( - 'WARNING: sasl_mechanisms is GSSAPI set but no KerberosKdcApp available: client SASL config will be invalid (which might be intentional)') + 'WARNING: sasl_mechanisms is GSSAPI set but no ' + 'KerberosKdcApp available: client SASL config will ' + 'be invalid (which might be intentional)') else: self.env_add('KRB5_CONFIG', kdc.conf['krb5_conf']) self.env_add('KRB5_KDC_PROFILE', kdc.conf['kdc_conf']) - principal, keytab = kdc.add_principal(self.name, - conf.get('advertised_hostname', self.node.name)) + principal, keytab = kdc.add_principal( + self.name, + conf.get('advertised_hostname', self.node.name)) conf_blob.append('sasl.kerberos.service.name=%s' % - self.conf.get('sasl_servicename', 'kafka')) + self.conf.get('sasl_servicename', + 'kafka')) conf_blob.append('sasl.kerberos.keytab=%s' % keytab) conf_blob.append( 'sasl.kerberos.principal=%s' % @@ -92,7 +95,7 @@ def __init__(self, cluster, version, conf=None, else: self.log( - 'WARNING: FIXME: SASL %s client config not written to %s: unhandled mechanism' % + 'WARNING: FIXME: SASL %s client config not written to %s: unhandled mechanism' % # noqa: E501 (mech, self.test_conf_file)) # SSL config @@ -138,7 +141,7 @@ def __init__(self, cluster, version, conf=None, if len(bootstrap_servers) == 0: bootstrap_servers = all_listeners[0] self.log( - 'WARNING: No eligible listeners for security.protocol=%s in %s: falling back to first listener: %s: tests will fail (which might be the intention)' % + 'WARNING: No eligible listeners for security.protocol=%s in %s: falling back to first listener: %s: tests will fail (which might be the intention)' % # noqa: E501 (self.security_protocol, all_listeners, bootstrap_servers)) self.bootstrap_servers = bootstrap_servers @@ -187,7 +190,9 @@ def start_cmd(self): for b in [x for x in self.cluster.apps if isinstance( x, KafkaBrokerApp)]: self.env_add('BROKER_ADDRESS_%d' % b.appid, - ','.join([x for x in b.conf['listeners'].split(',') if x.startswith(self.security_protocol)])) + ','.join([x for x in + b.conf['listeners'].split(',') + if x.startswith(self.security_protocol)])) # Add each broker pid as an env so they can be killed # indivdidually. self.env_add('BROKER_PID_%d' % b.appid, str(b.proc.pid)) @@ -203,7 +208,8 @@ def start_cmd(self): extra_args.append(self.conf.get('args')) extra_args.append('-E') return './run-test.sh -p%d -K %s %s' % ( - int(self.conf.get('parallel', 5)), ' '.join(extra_args), self.test_mode) + int(self.conf.get('parallel', 5)), ' '.join(extra_args), + self.test_mode) def report(self): if self.test_mode == 'bash': diff --git a/tests/broker_version_tests.py b/tests/broker_version_tests.py index ce3cde4fb9..717da28d54 100755 --- a/tests/broker_version_tests.py +++ b/tests/broker_version_tests.py @@ -8,19 +8,18 @@ # trivup python module # gradle in your PATH -from cluster_testing import LibrdkafkaTestCluster, print_report_summary, read_scenario_conf +from cluster_testing import ( + LibrdkafkaTestCluster, + print_report_summary, + read_scenario_conf) from LibrdkafkaTestApp import LibrdkafkaTestApp -from trivup.apps.ZookeeperApp import ZookeeperApp -from trivup.apps.KafkaBrokerApp import KafkaBrokerApp import subprocess -import time import tempfile import os import sys import argparse import json -import tempfile def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None, @@ -47,7 +46,7 @@ def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None, cluster.start(timeout=30) if conf.get('test_mode', '') == 'bash': - cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\\w$ "\')' % ( + cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\\w$ "\')' % ( # noqa: E501 cluster.name, version) subprocess.call( cmd, @@ -70,10 +69,12 @@ def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None, print( '# Connect to cluster with bootstrap.servers %s' % cluster.bootstrap_servers()) - print('# Exiting the shell will bring down the cluster. Good luck.') + print('# Exiting the shell will bring down the cluster. ' + 'Good luck.') subprocess.call( - 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\\w$ "\')' % - (cluster.name, version), env=rdkafka.env, shell=True, executable='/bin/bash') + 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\\w$ "\')' % # noqa: E501 + (cluster.name, version), env=rdkafka.env, shell=True, + executable='/bin/bash') cluster.stop(force=True) @@ -122,8 +123,10 @@ def handle_report(report, version, suite): parser.add_argument('--conf', type=str, dest='conf', default=None, help='trivup JSON config object (not file)') parser.add_argument('--rdkconf', type=str, dest='rdkconf', default=None, - help='trivup JSON config object (not file) for LibrdkafkaTestApp') - parser.add_argument('--scenario', type=str, dest='scenario', default='default', + help='trivup JSON config object (not file) ' + 'for LibrdkafkaTestApp') + parser.add_argument('--scenario', type=str, dest='scenario', + default='default', help='Test scenario (see scenarios/ directory)') parser.add_argument('--tests', type=str, dest='tests', default=None, help='Test to run (e.g., "0002")') @@ -131,11 +134,13 @@ def handle_report(report, version, suite): help='Write test suites report to this filename') parser.add_argument('--interact', action='store_true', dest='interact', default=False, - help='On test failure start a shell before bringing the cluster down.') + help='On test failure start a shell before bringing ' + 'the cluster down.') parser.add_argument('versions', type=str, nargs='*', default=['0.8.1.1', '0.8.2.2', '0.9.0.1', '2.3.0'], help='Broker versions to test') - parser.add_argument('--interactive', action='store_true', dest='interactive', + parser.add_argument('--interactive', action='store_true', + dest='interactive', default=False, help='Start a shell instead of running tests') parser.add_argument( @@ -161,7 +166,8 @@ def handle_report(report, version, suite): type=int, default=3, help='Number of Kafka brokers') - parser.add_argument('--ssl', dest='ssl', action='store_true', default=False, + parser.add_argument('--ssl', dest='ssl', action='store_true', + default=False, help='Enable SSL endpoints') parser.add_argument( '--sasl', @@ -230,7 +236,8 @@ def handle_report(report, version, suite): # Run tests print('#### Version %s, suite %s, scenario %s: STARTING' % (version, suite['name'], args.scenario)) - report = test_it(version, tests=tests, conf=_conf, rdkconf=_rdkconf, + report = test_it(version, tests=tests, conf=_conf, + rdkconf=_rdkconf, interact=args.interact, debug=args.debug, scenario=args.scenario) diff --git a/tests/cluster_testing.py b/tests/cluster_testing.py index a0f28ac9c7..3136f33307 100755 --- a/tests/cluster_testing.py +++ b/tests/cluster_testing.py @@ -7,7 +7,7 @@ # trivup python module # gradle in your PATH -from trivup.trivup import Cluster, UuidAllocator +from trivup.trivup import Cluster from trivup.apps.ZookeeperApp import ZookeeperApp from trivup.apps.KafkaBrokerApp import KafkaBrokerApp from trivup.apps.KerberosKdcApp import KerberosKdcApp @@ -46,8 +46,9 @@ def __init__(self, version, conf={}, num_brokers=3, debug=False, \\p conf dict is passed to KafkaBrokerApp classes, etc. """ - super(LibrdkafkaTestCluster, self).__init__(self.__class__.__name__, - os.environ.get('TRIVUP_ROOT', 'tmp'), debug=debug) + super(LibrdkafkaTestCluster, self).__init__( + self.__class__.__name__, + os.environ.get('TRIVUP_ROOT', 'tmp'), debug=debug) # Read trivup config from scenario definition. defconf = read_scenario_conf(scenario) @@ -83,7 +84,7 @@ def __init__(self, version, conf={}, num_brokers=3, debug=False, { 'conf': [ 'broker.rack=RACK${appid}', - 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) + 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) # noqa: E501 self.brokers.append(KafkaBrokerApp(self, defconf)) def bootstrap_servers(self): diff --git a/tests/interactive_broker_version.py b/tests/interactive_broker_version.py index 30e42977a1..2283f88ca1 100755 --- a/tests/interactive_broker_version.py +++ b/tests/interactive_broker_version.py @@ -16,7 +16,6 @@ from cluster_testing import read_scenario_conf import subprocess -import time import tempfile import os import sys @@ -31,7 +30,8 @@ def version_as_number(version): return float('%s.%s' % (tokens[0], tokens[1])) -def test_version(version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt=1, +def test_version(version, cmd=None, deploy=True, conf={}, debug=False, + exec_cnt=1, root_path='tmp', broker_cnt=3, scenario='default'): """ @brief Create, deploy and start a Kafka cluster using Kafka \\p version @@ -68,7 +68,7 @@ def test_version(version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt= { 'conf': [ 'broker.rack=RACK${appid}', - 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) + 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) # noqa: E501 brokers.append(KafkaBrokerApp(cluster, defconf)) cmd_env = os.environ.copy() @@ -101,9 +101,11 @@ def test_version(version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt= elif mech == 'OAUTHBEARER': security_protocol = 'SASL_PLAINTEXT' os.write( - fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n'.encode('ascii'))) + fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n'.encode( + 'ascii'))) os.write(fd, ('sasl.oauthbearer.config=%s\n' % - 'scope=requiredScope principal=admin').encode('ascii')) + 'scope=requiredScope principal=admin').encode( + 'ascii')) else: print( '# FIXME: SASL %s client config not written to %s' % @@ -170,7 +172,7 @@ def test_version(version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt= if not cluster.wait_operational(30): cluster.stop(force=True) - raise Exception('Cluster %s did not go operational, see logs in %s/%s' % + raise Exception('Cluster %s did not go operational, see logs in %s/%s' % # noqa: E501 (cluster.name, cluster.root_path, cluster.instance)) print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers) @@ -233,17 +235,21 @@ def test_version(version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt= parser.add_argument('versions', type=str, default=None, nargs='+', help='Kafka version(s) to deploy') - parser.add_argument('--no-deploy', action='store_false', dest='deploy', default=True, - help='Dont deploy applications, assume already deployed.') + parser.add_argument('--no-deploy', action='store_false', dest='deploy', + default=True, + help='Dont deploy applications, ' + 'assume already deployed.') parser.add_argument('--conf', type=str, dest='conf', default=None, help='JSON config object (not file)') - parser.add_argument('--scenario', type=str, dest='scenario', default='default', + parser.add_argument('--scenario', type=str, dest='scenario', + default='default', help='Test scenario (see scenarios/ directory)') parser.add_argument('-c', type=str, dest='cmd', default=None, help='Command to execute instead of shell') parser.add_argument('-n', type=int, dest='exec_cnt', default=1, help='Number of times to execute -c ..') - parser.add_argument('--debug', action='store_true', dest='debug', default=False, + parser.add_argument('--debug', action='store_true', dest='debug', + default=False, help='Enable trivup debugging') parser.add_argument( '--root', @@ -268,7 +274,8 @@ def test_version(version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt= type=int, default=3, help='Number of Kafka brokers') - parser.add_argument('--ssl', dest='ssl', action='store_true', default=False, + parser.add_argument('--ssl', dest='ssl', action='store_true', + default=False, help='Enable SSL endpoints') parser.add_argument( '--sasl', @@ -302,7 +309,8 @@ def test_version(version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt= retcode = 0 for version in args.versions: r = test_version(version, cmd=args.cmd, deploy=args.deploy, - conf=args.conf, debug=args.debug, exec_cnt=args.exec_cnt, + conf=args.conf, debug=args.debug, + exec_cnt=args.exec_cnt, root_path=args.root, broker_cnt=args.broker_cnt, scenario=args.scenario) if not r: diff --git a/tests/performance_plot.py b/tests/performance_plot.py index 7d540f5513..b699377f1c 100755 --- a/tests/performance_plot.py +++ b/tests/performance_plot.py @@ -61,7 +61,6 @@ def get_perf_data(perfname, stats): def plot(description, name, stats, perfname, outfile=None): labels, x, y, errs = get_perf_data(perfname, stats) - colors = np.random.rand(len(labels)) plt.title('%s: %s %s' % (description, name, perfname)) plt.xlabel('Kafka version') plt.ylabel(perfname) @@ -112,4 +111,5 @@ def plot(description, name, stats, perfname, outfile=None): for perfname in ['mb_per_sec', 'records_per_sec']: plot('librdkafka 0038_performance test: %s (%d samples)' % (outfile, len(reports)), - t, stats[t], perfname, outfile='%s_%s_%s.png' % (outfile, t, perfname)) + t, stats[t], perfname, outfile='%s_%s_%s.png' % ( + outfile, t, perfname)) diff --git a/tests/sasl_test.py b/tests/sasl_test.py index f73fba560e..fef02e0509 100755 --- a/tests/sasl_test.py +++ b/tests/sasl_test.py @@ -8,7 +8,11 @@ # trivup python module # gradle in your PATH -from cluster_testing import LibrdkafkaTestCluster, print_report_summary, print_test_report_summary, read_scenario_conf +from cluster_testing import ( + LibrdkafkaTestCluster, + print_report_summary, + print_test_report_summary, + read_scenario_conf) from LibrdkafkaTestApp import LibrdkafkaTestApp import os @@ -98,27 +102,33 @@ def handle_report(report, version, suite): if __name__ == '__main__': parser = argparse.ArgumentParser( - description='Run librdkafka test suit using SASL on a trivupped cluster') + description='Run librdkafka test suit using SASL on a ' + 'trivupped cluster') parser.add_argument('--conf', type=str, dest='conf', default=None, help='trivup JSON config object (not file)') parser.add_argument('--rdkconf', type=str, dest='rdkconf', default=None, - help='trivup JSON config object (not file) for LibrdkafkaTestApp') + help='trivup JSON config object (not file) ' + 'for LibrdkafkaTestApp') parser.add_argument('--scenario', type=str, dest='scenario', default='default', help='Test scenario (see scenarios/ directory)') parser.add_argument('--tests', type=str, dest='tests', default=None, help='Test to run (e.g., "0002")') - parser.add_argument('--no-ssl', action='store_false', dest='ssl', default=True, + parser.add_argument('--no-ssl', action='store_false', dest='ssl', + default=True, help='Don\'t run SSL tests') - parser.add_argument('--no-sasl', action='store_false', dest='sasl', default=True, + parser.add_argument('--no-sasl', action='store_false', dest='sasl', + default=True, help='Don\'t run SASL tests') - parser.add_argument('--no-plaintext', action='store_false', dest='plaintext', default=True, + parser.add_argument('--no-plaintext', action='store_false', + dest='plaintext', default=True, help='Don\'t run PLAINTEXT tests') parser.add_argument('--report', type=str, dest='report', default=None, help='Write test suites report to this filename') - parser.add_argument('--debug', action='store_true', dest='debug', default=False, + parser.add_argument('--debug', action='store_true', dest='debug', + default=False, help='Enable trivup debugging') parser.add_argument('versions', type=str, default=None, nargs='*', help='Limit broker versions to these') @@ -157,7 +167,8 @@ def handle_report(report, version, suite): 'sasl_users': 'myuser=mypassword', 'security.protocol': 'SSL'} sasl_oauthbearer_conf = {'sasl_mechanisms': 'OAUTHBEARER', - 'sasl_oauthbearer_config': 'scope=requiredScope principal=admin'} + 'sasl_oauthbearer_config': + 'scope=requiredScope principal=admin'} sasl_kerberos_conf = {'sasl_mechanisms': 'GSSAPI', 'sasl_servicename': 'kafka'} suites = [{'name': 'SASL PLAIN', @@ -237,7 +248,8 @@ def handle_report(report, version, suite): tests_to_run = suite.get('tests', None) else: tests_to_run = tests - report = test_it(version, tests=tests_to_run, conf=_conf, rdkconf=_rdkconf, + report = test_it(version, tests=tests_to_run, conf=_conf, + rdkconf=_rdkconf, debug=args.debug, scenario=args.scenario) # Handle test report From 2a8bb418e0eb4655dc88ce9aec3eccb107551ff4 Mon Sep 17 00:00:00 2001 From: Magnus Edenhill Date: Tue, 2 Nov 2021 11:07:56 +0100 Subject: [PATCH 56/56] Avoid use of FILE* BIOs to circumvent OpenSSL_Applink requirement on Windows (#3554) --- CHANGELOG.md | 11 +++++++++++ src/rdkafka_ssl.c | 23 ++++++++++++++--------- tests/0004-conf.c | 46 +++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 70 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d672692706..eda7221b85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,17 @@ librdkafka v1.9.0 is a feature release: if not already created. +## Fixes + +### General fixes + + * Windows: some applications would crash with an error message like + `no OPENSSL_Applink()` written to the console if `ssl.keystore.location` + was configured. + This regression was introduced in v1.8.0 due to use of vcpkgs and how + keystore file was read. #3554. + + # librdkafka v1.8.2 diff --git a/src/rdkafka_ssl.c b/src/rdkafka_ssl.c index 2d5e138aa2..9c20696657 100644 --- a/src/rdkafka_ssl.c +++ b/src/rdkafka_ssl.c @@ -1236,30 +1236,32 @@ static int rd_kafka_ssl_set_certs(rd_kafka_t *rk, * ssl.keystore.location */ if (rk->rk_conf.ssl.keystore_location) { - FILE *fp; EVP_PKEY *pkey; X509 *cert; STACK_OF(X509) *ca = NULL; + BIO *bio; PKCS12 *p12; rd_kafka_dbg(rk, SECURITY, "SSL", "Loading client's keystore file from %s", rk->rk_conf.ssl.keystore_location); - if (!(fp = fopen(rk->rk_conf.ssl.keystore_location, "rb"))) { + bio = BIO_new_file(rk->rk_conf.ssl.keystore_location, "r"); + if (!bio) { rd_snprintf(errstr, errstr_size, "Failed to open ssl.keystore.location: " - "%s: %s", - rk->rk_conf.ssl.keystore_location, - rd_strerror(errno)); + "%s: ", + rk->rk_conf.ssl.keystore_location); return -1; } - p12 = d2i_PKCS12_fp(fp, NULL); - fclose(fp); + p12 = d2i_PKCS12_bio(bio, NULL); if (!p12) { + BIO_free(bio); rd_snprintf(errstr, errstr_size, - "Error reading PKCS#12 file: "); + "Error reading ssl.keystore.location " + "PKCS#12 file: %s: ", + rk->rk_conf.ssl.keystore_location); return -1; } @@ -1270,10 +1272,12 @@ static int rd_kafka_ssl_set_certs(rd_kafka_t *rk, EVP_PKEY_free(pkey); X509_free(cert); PKCS12_free(p12); + BIO_free(bio); if (ca != NULL) sk_X509_pop_free(ca, X509_free); rd_snprintf(errstr, errstr_size, - "Failed to parse PKCS#12 file: %s: ", + "Failed to parse ssl.keystore.location " + "PKCS#12 file: %s: ", rk->rk_conf.ssl.keystore_location); return -1; } @@ -1282,6 +1286,7 @@ static int rd_kafka_ssl_set_certs(rd_kafka_t *rk, sk_X509_pop_free(ca, X509_free); PKCS12_free(p12); + BIO_free(bio); r = SSL_CTX_use_certificate(ctx, cert); X509_free(cert); diff --git a/tests/0004-conf.c b/tests/0004-conf.c index 52f6a0204d..4b2980a243 100644 --- a/tests/0004-conf.c +++ b/tests/0004-conf.c @@ -643,7 +643,51 @@ int main_0004_conf(int argc, char **argv) { "invalid ssl.ca.location"); TEST_SAY("rd_kafka_new() failed as expected: %s\n", errstr); } -#endif + +#ifdef _WIN32 + { + FILE *fp; + TEST_SAY( + "Verifying that OpenSSL_AppLink " + "is not needed (#3554)\n"); + + /* Create dummy file so the file open works, + * but parsing fails. */ + fp = fopen("_tmp_0004", "w"); + TEST_ASSERT(fp != NULL, "Failed to create dummy file: %s", + rd_strerror(errno)); + if (fwrite("?", 1, 1, fp) != 1) + TEST_FAIL("Failed to write to dummy file _tmp_0004: %s", + rd_strerror(errno)); + fclose(fp); + + conf = rd_kafka_conf_new(); + + test_conf_set(conf, "security.protocol", "SSL"); + test_conf_set(conf, "ssl.keystore.location", "_tmp_0004"); + test_conf_set(conf, "ssl.keystore.password", "x"); + + /* Prior to the fix OpenSSL will assert with a message like + * this: "OPENSSL_Uplink(00007FF9C0229D30,08): no + * OPENSSL_Applink" + * and the program will exit with error code 1. */ + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, + sizeof(errstr)); + _unlink("tmp_0004"); + + TEST_ASSERT(!rk, + "Expected rd_kafka_new() to fail due to " + "dummy ssl.keystore.location"); + TEST_ASSERT(strstr(errstr, "ssl.keystore.location") != NULL, + "Expected rd_kafka_new() to fail with " + "dummy ssl.keystore.location, not: %s", + errstr); + + TEST_SAY("rd_kafka_new() failed as expected: %s\n", errstr); + } +#endif /* _WIN32 */ + +#endif /* WITH_SSL */ /* Canonical int values, aliases, s2i-verified strings, doubles */ {