diff --git a/X11/XF86keysym.h b/X11/XF86keysym.h index 7b248fa463..57a10569dd 100644 --- a/X11/XF86keysym.h +++ b/X11/XF86keysym.h @@ -385,6 +385,7 @@ #define XF86XK_LightsToggle _EVDEVK(0x21e) /* v3.10 KEY_LIGHTS_TOGGLE */ #define XF86XK_ALSToggle _EVDEVK(0x230) /* v3.13 KEY_ALS_TOGGLE */ /* Use: XF86XK_RotationLockToggle _EVDEVK(0x231) v4.16 KEY_ROTATE_LOCK_TOGGLE */ +#define XF86XK_RefreshRateToggle _EVDEVK(0x232) /* v6.9 KEY_REFRESH_RATE_TOGGLE */ #define XF86XK_Buttonconfig _EVDEVK(0x240) /* v3.16 KEY_BUTTONCONFIG */ #define XF86XK_Taskmanager _EVDEVK(0x241) /* v3.16 KEY_TASKMANAGER */ #define XF86XK_Journal _EVDEVK(0x242) /* v3.16 KEY_JOURNAL */ @@ -399,6 +400,8 @@ #define XF86XK_CameraAccessEnable _EVDEVK(0x24b) /* v6.2 KEY_CAMERA_ACCESS_ENABLE */ #define XF86XK_CameraAccessDisable _EVDEVK(0x24c) /* v6.2 KEY_CAMERA_ACCESS_DISABLE */ #define XF86XK_CameraAccessToggle _EVDEVK(0x24d) /* v6.2 KEY_CAMERA_ACCESS_TOGGLE */ +#define XF86XK_Accessibility _EVDEVK(0x24e) /* v6.10 KEY_ACCESSIBILITY */ +#define XF86XK_DoNotDisturb _EVDEVK(0x24f) /* v6.10 KEY_DO_NOT_DISTURB */ #define XF86XK_BrightnessMin _EVDEVK(0x250) /* v3.16 KEY_BRIGHTNESS_MIN */ #define XF86XK_BrightnessMax _EVDEVK(0x251) /* v3.16 KEY_BRIGHTNESS_MAX */ #define XF86XK_KbdInputAssistPrev _EVDEVK(0x260) /* v3.18 KEY_KBDINPUTASSIST_PREV */ diff --git a/X11/Xdmcp.h b/X11/Xdmcp.h index a6f6f88b4c..e24cc43f75 100644 --- a/X11/Xdmcp.h +++ b/X11/Xdmcp.h @@ -128,6 +128,12 @@ typedef char *XdmcpNetaddr; # define XDM_ACCESS_ATTRIBUTE(X) #endif +#if __has_attribute(pure) +# define XDM_PURE_ATTRIBUTE __attribute__((pure)) +#else +# define XDM_PURE_ATTRIBUTE +#endif + XDM_ACCESS_ATTRIBUTE((read_write, 1)) XDM_ACCESS_ATTRIBUTE((read_only, 2)) extern int XdmcpWriteARRAY16(XdmcpBufferPtr buffer, const ARRAY16Ptr array); XDM_ACCESS_ATTRIBUTE((read_write, 1)) XDM_ACCESS_ATTRIBUTE((read_only, 2)) @@ -169,6 +175,7 @@ XDM_ACCESS_ATTRIBUTE((read_write, 2)) extern int XdmcpFill(int fd, XdmcpBufferPtr buffer, XdmcpNetaddr from, int *fromlen); XDM_ACCESS_ATTRIBUTE((read_only, 1)) +XDM_PURE_ATTRIBUTE extern int XdmcpReadRemaining(const XdmcpBufferPtr buffer); XDM_ACCESS_ATTRIBUTE((read_write, 1)) @@ -184,6 +191,7 @@ XDM_ACCESS_ATTRIBUTE((read_only, 1)) XDM_ACCESS_ATTRIBUTE((write_only, 2)) extern int XdmcpCopyARRAY8(const ARRAY8Ptr src, ARRAY8Ptr dst); XDM_ACCESS_ATTRIBUTE((read_only, 1)) XDM_ACCESS_ATTRIBUTE((read_only, 2)) +XDM_PURE_ATTRIBUTE extern int XdmcpARRAY8Equal(const ARRAY8Ptr array1, const ARRAY8Ptr array2); XDM_ACCESS_ATTRIBUTE((write_only, 1)) @@ -201,6 +209,7 @@ extern void XdmcpUnwrap(unsigned char *input, unsigned char *wrapper, unsigned c #endif XDM_ACCESS_ATTRIBUTE((read_only, 1)) XDM_ACCESS_ATTRIBUTE((read_only, 2)) +XDM_PURE_ATTRIBUTE extern int XdmcpCompareKeys (const XdmAuthKeyPtr a, const XdmAuthKeyPtr b); XDM_ACCESS_ATTRIBUTE((write_only, 1)) diff --git a/X11/Xlibint.h b/X11/Xlibint.h index 6b0799d83a..1c6fa0c7f3 100644 --- a/X11/Xlibint.h +++ b/X11/Xlibint.h @@ -435,8 +435,8 @@ X11_EXTERN LockInfoPtr _Xglobal_lock; #define _XLockMutex(lock) if (_XLockMutex_fn) (*_XLockMutex_fn)(lock) #define _XUnlockMutex(lock) if (_XUnlockMutex_fn) (*_XUnlockMutex_fn)(lock) #endif -#define _XCreateMutex(lock) if (_XCreateMutex_fn) (*_XCreateMutex_fn)(lock); -#define _XFreeMutex(lock) if (_XFreeMutex_fn) (*_XFreeMutex_fn)(lock); +#define _XCreateMutex(lock) if (_XCreateMutex_fn) (*_XCreateMutex_fn)(lock) +#define _XFreeMutex(lock) if (_XFreeMutex_fn) (*_XFreeMutex_fn)(lock) #else /* XTHREADS */ #define LockDisplay(dis) @@ -647,13 +647,13 @@ extern void _XFlushGCCache(Display *dpy, GC gc); * "len" is the length of the data buffer. */ #ifndef DataRoutineIsProcedure -#define Data(dpy, data, len) {\ +#define Data(dpy, data, len) do {\ if (dpy->bufptr + (len) <= dpy->bufmax) {\ memcpy(dpy->bufptr, data, (size_t)(len));\ dpy->bufptr += ((size_t)((len) + 3) & (size_t)~3);\ } else\ _XSend(dpy, (_Xconst char*)(data), (long)(len));\ -} +} while (0) #endif /* DataRoutineIsProcedure */ @@ -671,12 +671,13 @@ extern void _XFlushGCCache(Display *dpy, GC gc); * BufAlloc (xTextElt *, elt, nbytes) */ -#define BufAlloc(type, ptr, n) \ +#define BufAlloc(type, ptr, n) do { \ if (dpy->bufptr + (n) > dpy->bufmax) \ _XFlush (dpy); \ ptr = (type) dpy->bufptr; \ memset(ptr, '\0', (size_t)(n)); \ - dpy->bufptr += (n); + dpy->bufptr += (n); \ +} while (0) #define Data16(dpy, data, len) Data((dpy), (_Xconst char *)(data), (len)) #define _XRead16Pad(dpy, data, len) _XReadPad((dpy), (char *)(data), (len)) @@ -719,7 +720,7 @@ extern void _XRead32( * char. */ #define CI_GET_CHAR_INFO_1D(fs,col,def,cs) \ -{ \ +do { \ cs = def; \ if (col >= fs->min_char_or_byte2 && col <= fs->max_char_or_byte2) { \ if (fs->per_char == NULL) { \ @@ -729,7 +730,7 @@ extern void _XRead32( if (CI_NONEXISTCHAR(cs)) cs = def; \ } \ } \ -} +} while (0) #define CI_GET_DEFAULT_INFO_1D(fs,cs) \ CI_GET_CHAR_INFO_1D (fs, fs->default_char, NULL, cs) @@ -741,7 +742,7 @@ extern void _XRead32( * column. This is used for fonts that have more than row zero. */ #define CI_GET_CHAR_INFO_2D(fs,row,col,def,cs) \ -{ \ +do { \ cs = def; \ if (row >= fs->min_byte1 && row <= fs->max_byte1 && \ col >= fs->min_char_or_byte2 && col <= fs->max_char_or_byte2) { \ @@ -755,19 +756,19 @@ extern void _XRead32( if (CI_NONEXISTCHAR(cs)) cs = def; \ } \ } \ -} +} while (0) #define CI_GET_DEFAULT_INFO_2D(fs,cs) \ -{ \ +do { \ unsigned int r = (fs->default_char >> 8); \ unsigned int c = (fs->default_char & 0xff); \ CI_GET_CHAR_INFO_2D (fs, r, c, NULL, cs); \ -} +} while (0) /* srcvar must be a variable for large architecture version */ #define OneDataCard32(dpy,dstaddr,srcvar) \ - { *(CARD32 *)(dstaddr) = (srcvar); } + do { *(CARD32 *)(dstaddr) = (srcvar); } while (0) typedef struct _XInternalAsync { @@ -807,12 +808,12 @@ typedef struct _XAsyncEState { } _XAsyncErrorState; extern void _XDeqAsyncHandler(Display *dpy, _XAsyncHandler *handler); -#define DeqAsyncHandler(dpy,handler) { \ +#define DeqAsyncHandler(dpy,handler) do { \ if (dpy->async_handlers == (handler)) \ dpy->async_handlers = (handler)->next; \ else \ _XDeqAsyncHandler(dpy, handler); \ - } + } while (0) typedef void (*FreeFuncType) ( Display* /* display */ diff --git a/X11/Xpoll.h.in b/X11/Xpoll.h.in index 9f127e1069..f7c42e6ead 100644 --- a/X11/Xpoll.h.in +++ b/X11/Xpoll.h.in @@ -163,7 +163,7 @@ typedef struct fd_set { } #else /* USE_POLL */ -#include +#include #endif /* USE_POLL */ #else /* WIN32 */ diff --git a/X11/extensions/recordproto.h b/X11/extensions/recordproto.h index d2016eb08c..22a6cd91a1 100644 --- a/X11/extensions/recordproto.h +++ b/X11/extensions/recordproto.h @@ -21,6 +21,7 @@ #ifndef _RECORDPROTO_H_ #define _RECORDPROTO_H_ +#include #include /* only difference between 1.12 and 1.13 is byte order of device events, diff --git a/X11/extensions/securproto.h b/X11/extensions/securproto.h index d9e120c906..7c6831395f 100644 --- a/X11/extensions/securproto.h +++ b/X11/extensions/securproto.h @@ -27,6 +27,7 @@ from The Open Group. #ifndef _SECURPROTO_H #define _SECURPROTO_H +#include #include #define X_SecurityQueryVersion 0 diff --git a/X11/xtrans/Xtrans.c b/X11/xtrans/Xtrans.c index d827325fe1..24f847c401 100644 --- a/X11/xtrans/Xtrans.c +++ b/X11/xtrans/Xtrans.c @@ -136,7 +136,7 @@ void TRANS(FreeConnInfo) (XtransConnInfo ciptr) { - prmsg (3,"FreeConnInfo(%p)\n", ciptr); + prmsg (3,"FreeConnInfo(%p)\n", (void *) ciptr); if (ciptr->addr) free (ciptr->addr); @@ -160,7 +160,6 @@ TRANS(SelectTransport) (const char *protocol) #ifndef HAVE_STRCASECMP char protobuf[PROTOBUFSIZE]; #endif - int i; prmsg (3,"SelectTransport(%s)\n", protocol); @@ -173,14 +172,14 @@ TRANS(SelectTransport) (const char *protocol) strncpy (protobuf, protocol, PROTOBUFSIZE - 1); protobuf[PROTOBUFSIZE-1] = '\0'; - for (i = 0; i < PROTOBUFSIZE && protobuf[i] != '\0'; i++) + for (unsigned int i = 0; i < PROTOBUFSIZE && protobuf[i] != '\0'; i++) if (isupper ((unsigned char)protobuf[i])) protobuf[i] = tolower ((unsigned char)protobuf[i]); #endif /* Look at all of the configured protocols */ - for (i = 0; i < NUMTRANS; i++) + for (unsigned int i = 0; i < NUMTRANS; i++) { #ifndef HAVE_STRCASECMP if (!strcmp (protobuf, Xtransports[i].transport->TransName)) @@ -254,9 +253,10 @@ TRANS(ParseAddress) (const char *address, _protocol = mybuf; - if ( ((mybuf = strchr (mybuf,'/')) == NULL) && - ((mybuf = strrchr (tmpptr,':')) == NULL) ) - { + if ((mybuf == NULL) || + ( ((mybuf = strchr (mybuf, '/')) == NULL) && + ((mybuf = strrchr (tmpptr, ':')) == NULL) ) ) + { /* address is in a bad format */ *protocol = NULL; *host = NULL; @@ -510,18 +510,19 @@ TRANS(Reopen) (int type, int trans_id, int fd, const char *port) XtransConnInfo ciptr = NULL; Xtransport *thistrans = NULL; char *save_port; - int i; prmsg (2,"Reopen(%d,%d,%s)\n", trans_id, fd, port); /* Determine the transport type */ - for (i = 0; i < NUMTRANS; i++) + for (unsigned int i = 0; i < NUMTRANS; i++) + { if (Xtransports[i].transport_id == trans_id) { thistrans = Xtransports[i].transport; break; } + } if (thistrans == NULL) { @@ -613,9 +614,8 @@ TRANS(GetReopenInfo) (XtransConnInfo ciptr, int *trans_id, int *fd, char **port) { - int i; - - for (i = 0; i < NUMTRANS; i++) + for (unsigned int i = 0; i < NUMTRANS; i++) + { if (Xtransports[i].transport == ciptr->transptr) { *trans_id = Xtransports[i].transport_id; @@ -626,6 +626,7 @@ TRANS(GetReopenInfo) (XtransConnInfo ciptr, else return 1; } + } return 0; } @@ -924,7 +925,7 @@ TRANS(Read) (XtransConnInfo ciptr, char *buf, int size) } int -TRANS(Write) (XtransConnInfo ciptr, char *buf, int size) +TRANS(Write) (XtransConnInfo ciptr, const char *buf, int size) { return ciptr->transptr->Write (ciptr, buf, size); @@ -1047,13 +1048,12 @@ complete_network_count (void) { int count = 0; int found_local = 0; - int i; /* * For a complete network, we only need one LOCALCONN transport to work */ - for (i = 0; i < NUMTRANS; i++) + for (unsigned int i = 0; i < NUMTRANS; i++) { if (Xtransports[i].transport->flags & TRANS_ALIAS || Xtransports[i].transport->flags & TRANS_NOLISTEN) @@ -1086,7 +1086,7 @@ receive_listening_fds(const char* port, XtransConnInfo* temp_ciptrs, return -1; } - for (i = 0; i < systemd_listen_fds && *count_ret < NUMTRANS; i++) + for (i = 0; i < systemd_listen_fds && *count_ret < (int)NUMTRANS; i++) { struct sockaddr_storage a; int ti; @@ -1153,14 +1153,14 @@ TRANS(MakeAllCOTSServerListeners) (const char *port, int *partial, { char buffer[256]; /* ??? What size ?? */ - XtransConnInfo ciptr, temp_ciptrs[NUMTRANS]; - int status, i, j; + XtransConnInfo ciptr, temp_ciptrs[NUMTRANS] = { NULL }; + int status, j; #if defined(IPv6) && defined(AF_INET6) int ipv6_succ = 0; #endif prmsg (2,"MakeAllCOTSServerListeners(%s,%p)\n", - port ? port : "NULL", ciptrs_ret); + port ? port : "NULL", (void *) ciptrs_ret); *count_ret = 0; @@ -1178,7 +1178,7 @@ TRANS(MakeAllCOTSServerListeners) (const char *port, int *partial, if (receive_listening_fds(port, temp_ciptrs, count_ret) < 0) return -1; - for (i = 0; i < NUMTRANS; i++) + for (unsigned int i = 0; i < NUMTRANS; i++) { Xtransport *trans = Xtransports[i].transport; unsigned int flags = 0; @@ -1226,7 +1226,8 @@ TRANS(MakeAllCOTSServerListeners) (const char *port, int *partial, "MakeAllCOTSServerListeners: server already running\n"); for (j = 0; j < *count_ret; j++) - TRANS(Close) (temp_ciptrs[j]); + if (temp_ciptrs[j] != NULL) + TRANS(Close) (temp_ciptrs[j]); *count_ret = 0; *ciptrs_ret = NULL; @@ -1270,7 +1271,7 @@ TRANS(MakeAllCOTSServerListeners) (const char *port, int *partial, return -1; } - for (i = 0; i < *count_ret; i++) + for (int i = 0; i < *count_ret; i++) { (*ciptrs_ret)[i] = temp_ciptrs[i]; } diff --git a/X11/xtrans/Xtrans.h b/X11/xtrans/Xtrans.h index fbf385e014..54eb970cb2 100644 --- a/X11/xtrans/Xtrans.h +++ b/X11/xtrans/Xtrans.h @@ -328,7 +328,7 @@ int TRANS(Read)( int TRANS(Write)( XtransConnInfo, /* ciptr */ - char *, /* buf */ + const char *, /* buf */ int /* size */ ); diff --git a/X11/xtrans/Xtransint.h b/X11/xtrans/Xtransint.h index a3384a9965..4d09593f43 100644 --- a/X11/xtrans/Xtransint.h +++ b/X11/xtrans/Xtransint.h @@ -239,7 +239,7 @@ typedef struct _Xtransport { int (*Write)( XtransConnInfo, /* connection */ - char *, /* buf */ + const char *, /* buf */ int /* size */ ); diff --git a/X11/xtrans/Xtranslcl.c b/X11/xtrans/Xtranslcl.c index 24dd9a1222..33dd4eb0fc 100644 --- a/X11/xtrans/Xtranslcl.c +++ b/X11/xtrans/Xtranslcl.c @@ -437,7 +437,7 @@ TRANS(NAMEDResetListener) (XtransConnInfo ciptr) struct sockaddr_un *sockname=(struct sockaddr_un *) ciptr->addr; struct stat statb; - prmsg(2,"NAMEDResetListener(%p, %d)\n", ciptr, ciptr->fd); + prmsg(2,"NAMEDResetListener(%p, %d)\n", (void *) ciptr, ciptr->fd); if (ciptr->fd != -1) { /* @@ -464,7 +464,7 @@ TRANS(NAMEDAccept)(XtransConnInfo ciptr, XtransConnInfo newciptr, int *status) { struct strrecvfd str; - prmsg(2,"NAMEDAccept(%p->%d)\n", ciptr, ciptr->fd); + prmsg(2,"NAMEDAccept(%p->%d)\n", (void *) ciptr, ciptr->fd); if( ioctl(ciptr->fd, I_RECVFD, &str ) < 0 ) { prmsg(1, "NAMEDAccept: ioctl(I_RECVFD) failed, errno=%d\n", errno); @@ -753,7 +753,7 @@ TRANS(LocalInitTransports)(const char *protocol) workingXLOCAL = freeXLOCAL = strdup (protocol); } else { - XLOCAL=(char *)getenv("XLOCAL"); + XLOCAL = getenv("XLOCAL"); if(XLOCAL==NULL) XLOCAL=DEF_XLOCAL; workingXLOCAL = freeXLOCAL = strdup (XLOCAL); @@ -777,7 +777,6 @@ static LOCALtrans2dev * TRANS(LocalGetNextTransport)(void) { - int i; char *typetocheck; prmsg(3,"LocalGetNextTransport()\n"); @@ -791,7 +790,7 @@ TRANS(LocalGetNextTransport)(void) if(workingXLOCAL && *workingXLOCAL) *workingXLOCAL++='\0'; - for(i=0;i%d,%s)\n",ciptr,ciptr->fd,port); + prmsg(2,"LocalCreateListener(%p->%d,%s)\n", (void *) ciptr, ciptr->fd, port); return 0; } @@ -1178,7 +1176,7 @@ TRANS(LocalResetListener)(XtransConnInfo ciptr) { LOCALtrans2dev *transptr; - prmsg(2,"LocalResetListener(%p)\n",ciptr); + prmsg(2,"LocalResetListener(%p)\n", (void *) ciptr); transptr=(LOCALtrans2dev *)ciptr->priv; if (transptr->devreset != NULL) { @@ -1195,7 +1193,7 @@ TRANS(LocalAccept)(XtransConnInfo ciptr, int *status) XtransConnInfo newciptr; LOCALtrans2dev *transptr; - prmsg(2,"LocalAccept(%p->%d)\n", ciptr, ciptr->fd); + prmsg(2,"LocalAccept(%p->%d)\n", (void *) ciptr, ciptr->fd); transptr=(LOCALtrans2dev *)ciptr->priv; @@ -1233,7 +1231,7 @@ TRANS(LocalConnect)(XtransConnInfo ciptr, const char *host _X_UNUSED, const char *port) { - prmsg(2,"LocalConnect(%p->%d,%s)\n", ciptr, ciptr->fd, port); + prmsg(2,"LocalConnect(%p->%d,%s)\n", (void *) ciptr, ciptr->fd, port); return 0; } @@ -1245,7 +1243,8 @@ static int TRANS(LocalBytesReadable)(XtransConnInfo ciptr, BytesReadable_t *pend ) { - prmsg(2,"LocalBytesReadable(%p->%d,%p)\n", ciptr, ciptr->fd, pend); + prmsg(2,"LocalBytesReadable(%p->%d,%p)\n", + (void *) ciptr, ciptr->fd, (void *) pend); return ioctl(ciptr->fd, FIONREAD, (char *)pend); } @@ -1254,16 +1253,16 @@ static int TRANS(LocalRead)(XtransConnInfo ciptr, char *buf, int size) { - prmsg(2,"LocalRead(%d,%p,%d)\n", ciptr->fd, buf, size ); + prmsg(2,"LocalRead(%d,%p,%d)\n", ciptr->fd, (void *) buf, size ); return read(ciptr->fd,buf,size); } static int -TRANS(LocalWrite)(XtransConnInfo ciptr, char *buf, int size) +TRANS(LocalWrite)(XtransConnInfo ciptr, const char *buf, int size) { - prmsg(2,"LocalWrite(%d,%p,%d)\n", ciptr->fd, buf, size ); + prmsg(2,"LocalWrite(%d,%p,%d)\n", ciptr->fd, (const void *) buf, size ); return write(ciptr->fd,buf,size); } @@ -1272,7 +1271,7 @@ static int TRANS(LocalReadv)(XtransConnInfo ciptr, struct iovec *buf, int size) { - prmsg(2,"LocalReadv(%d,%p,%d)\n", ciptr->fd, buf, size ); + prmsg(2,"LocalReadv(%d,%p,%d)\n", ciptr->fd, (void *) buf, size ); return READV(ciptr,buf,size); } @@ -1281,7 +1280,7 @@ static int TRANS(LocalWritev)(XtransConnInfo ciptr, struct iovec *buf, int size) { - prmsg(2,"LocalWritev(%d,%p,%d)\n", ciptr->fd, buf, size ); + prmsg(2,"LocalWritev(%d,%p,%d)\n", ciptr->fd, (const void *) buf, size ); return WRITEV(ciptr,buf,size); } @@ -1290,7 +1289,7 @@ static int TRANS(LocalDisconnect)(XtransConnInfo ciptr) { - prmsg(2,"LocalDisconnect(%p->%d)\n", ciptr, ciptr->fd); + prmsg(2,"LocalDisconnect(%p->%d)\n", (void *) ciptr, ciptr->fd); return 0; } @@ -1302,7 +1301,7 @@ TRANS(LocalClose)(XtransConnInfo ciptr) struct sockaddr_un *sockname=(struct sockaddr_un *) ciptr->addr; int ret; - prmsg(2,"LocalClose(%p->%d)\n", ciptr, ciptr->fd ); + prmsg(2,"LocalClose(%p->%d)\n", (void *) ciptr, ciptr->fd ); ret=close(ciptr->fd); @@ -1324,7 +1323,7 @@ TRANS(LocalCloseForCloning)(XtransConnInfo ciptr) { int ret; - prmsg(2,"LocalCloseForCloning(%p->%d)\n", ciptr, ciptr->fd ); + prmsg(2,"LocalCloseForCloning(%p->%d)\n", (void *) ciptr, ciptr->fd ); /* Don't unlink path */ @@ -1351,7 +1350,7 @@ static const char * local_aliases[] = { NULL }; #endif -Xtransport TRANS(LocalFuncs) = { +static Xtransport TRANS(LocalFuncs) = { /* Local Interface */ "local", TRANS_ALIAS | TRANS_LOCAL, @@ -1391,7 +1390,7 @@ Xtransport TRANS(LocalFuncs) = { #ifdef LOCAL_TRANS_NAMED -Xtransport TRANS(NAMEDFuncs) = { +static Xtransport TRANS(NAMEDFuncs) = { /* Local Interface */ "named", TRANS_LOCAL, @@ -1428,7 +1427,7 @@ Xtransport TRANS(NAMEDFuncs) = { TRANS(LocalCloseForCloning), }; -Xtransport TRANS(PIPEFuncs) = { +static Xtransport TRANS(PIPEFuncs) = { /* Local Interface */ "pipe", TRANS_ALIAS | TRANS_LOCAL, diff --git a/X11/xtrans/Xtranssock.c b/X11/xtrans/Xtranssock.c index 1a2f92fa56..4757086643 100644 --- a/X11/xtrans/Xtranssock.c +++ b/X11/xtrans/Xtranssock.c @@ -224,7 +224,8 @@ static Sockettrans2dev Sockettrans2devtab[] = { static int TRANS(SocketINETClose) (XtransConnInfo ciptr); #endif -#if defined(TCPCONN) || defined(TRANS_REOPEN) +#if (defined(TCPCONN) && defined(TRANS_SERVER)) || defined(TRANS_REOPEN) || \ + !defined(IPv6) static int is_numeric (const char *str) { @@ -294,7 +295,7 @@ TRANS(SocketSelectFamily) (int first, const char *family) prmsg (3,"SocketSelectFamily(%s)\n", family); - for (i = first + 1; i < NUMSOCKETFAMILIES;i++) + for (i = first + 1; i < (int)NUMSOCKETFAMILIES; i++) { if (!strcmp (family, Sockettrans2devtab[i].transname)) return i; @@ -321,7 +322,7 @@ TRANS(SocketINETGetAddr) (XtransConnInfo ciptr) void *socknamePtr; SOCKLEN_T namelen; - prmsg (3,"SocketINETGetAddr(%p)\n", ciptr); + prmsg (3,"SocketINETGetAddr(%p)\n", (void *) ciptr); #if defined(IPv6) && defined(AF_INET6) namelen = sizeof(socknamev6); @@ -791,7 +792,7 @@ TRANS(SocketINETGetPeerAddr) (XtransConnInfo ciptr) bzero(socknamePtr, namelen); - prmsg (3,"SocketINETGetPeerAddr(%p)\n", ciptr); + prmsg (3,"SocketINETGetPeerAddr(%p)\n", (void *) ciptr); if (getpeername (ciptr->fd, (struct sockaddr *) socknamePtr, (void *)&namelen) < 0) @@ -836,14 +837,22 @@ TRANS(SocketOpen) (int i, int type) return NULL; } - if ((ciptr->fd = socket(Sockettrans2devtab[i].family, type, - Sockettrans2devtab[i].protocol)) < 0 + ciptr->fd = socket(Sockettrans2devtab[i].family, type, + Sockettrans2devtab[i].protocol); + #ifndef WIN32 #if (defined(X11_t) && !defined(USE_POLL)) || defined(FS_t) || defined(FONT_t) - || ciptr->fd >= sysconf(_SC_OPEN_MAX) + if (ciptr->fd >= sysconf(_SC_OPEN_MAX)) + { + prmsg (2, "SocketOpen: socket() returned out of range fd %d\n", + ciptr->fd); + close (ciptr->fd); + ciptr->fd = -1; + } #endif #endif - ) { + + if (ciptr->fd < 0) { #ifdef WIN32 errno = WSAGetLastError(); #endif @@ -1002,7 +1011,7 @@ static XtransConnInfo TRANS(SocketOpenCOTSClientBase) (const char *transname, const char *protocol, const char *host, const char *port, int previndex) { - XtransConnInfo ciptr; + XtransConnInfo ciptr = NULL; int i = previndex; prmsg (2, "SocketOpenCOTSClient(%s,%s,%s)\n", @@ -1051,7 +1060,7 @@ TRANS(SocketOpenCOTSServer) (Xtransport *thistrans, const char *protocol, const char *host, const char *port) { - XtransConnInfo ciptr; + XtransConnInfo ciptr = NULL; int i = -1; prmsg (2,"SocketOpenCOTSServer(%s,%s,%s)\n", protocol, host, port); @@ -1170,7 +1179,7 @@ static int set_sun_path(const char *port, const char *upath, char *path, int abstract) { struct sockaddr_un s; - int maxlen = sizeof(s.sun_path) - 1; + ssize_t maxlen = sizeof(s.sun_path) - 1; const char *at = ""; if (!port || !*port || !path) @@ -1186,7 +1195,7 @@ set_sun_path(const char *port, const char *upath, char *path, int abstract) if (*port == '/') /* a full pathname */ upath = ""; - if (strlen(port) + strlen(upath) > maxlen) + if ((ssize_t)(strlen(at) + strlen(upath) + strlen(port)) > maxlen) return -1; snprintf(path, sizeof(s.sun_path), "%s%s%s", at, upath, port); return 0; @@ -1209,7 +1218,7 @@ TRANS(SocketCreateListener) (XtransConnInfo ciptr, int fd = ciptr->fd; int retry; - prmsg (3, "SocketCreateListener(%p,%d)\n", ciptr, fd); + prmsg (3, "SocketCreateListener(%p,%d)\n", (void *) ciptr, fd); if (Sockettrans2devtab[ciptr->index].family == AF_INET #if defined(IPv6) && defined(AF_INET6) @@ -1222,7 +1231,7 @@ TRANS(SocketCreateListener) (XtransConnInfo ciptr, else retry = 0; - while (bind (fd, (struct sockaddr *) sockname, namelen) < 0) + while (bind (fd, sockname, namelen) < 0) { if (errno == EADDRINUSE) { if (flags & ADDR_IN_USE_ALLOWED) @@ -1532,7 +1541,7 @@ TRANS(SocketUNIXResetListener) (XtransConnInfo ciptr) abstract = ciptr->transptr->flags & TRANS_ABSTRACT; #endif - prmsg (3, "SocketUNIXResetListener(%p,%d)\n", ciptr, ciptr->fd); + prmsg (3, "SocketUNIXResetListener(%p,%d)\n", (void *) ciptr, ciptr->fd); if (!abstract && ( stat (unsock->sun_path, &statb) == -1 || @@ -1610,7 +1619,7 @@ TRANS(SocketINETAccept) (XtransConnInfo ciptr, int *status) #endif SOCKLEN_T namelen = sizeof(sockname); - prmsg (2, "SocketINETAccept(%p,%d)\n", ciptr, ciptr->fd); + prmsg (2, "SocketINETAccept(%p,%d)\n", (void *) ciptr, ciptr->fd); if ((newciptr = calloc (1, sizeof(struct _XtransConnInfo))) == NULL) { @@ -1685,7 +1694,7 @@ TRANS(SocketUNIXAccept) (XtransConnInfo ciptr, int *status) struct sockaddr_un sockname; SOCKLEN_T namelen = sizeof sockname; - prmsg (2, "SocketUNIXAccept(%p,%d)\n", ciptr, ciptr->fd); + prmsg (2, "SocketUNIXAccept(%p,%d)\n", (void *) ciptr, ciptr->fd); if ((newciptr = calloc (1, sizeof(struct _XtransConnInfo))) == NULL) { @@ -1833,6 +1842,11 @@ TRANS(SocketINETConnect) (XtransConnInfo ciptr, } } else { addrlist = malloc(sizeof(struct addrlist)); + if (addrlist == NULL) { + prmsg (1, "SocketINETConnect() can't allocate memory " + "for addrlist: %s\n", strerror(errno)); + return TRANS_CONNECT_FAILED; + } addrlist->firstaddr = NULL; } @@ -2415,7 +2429,7 @@ TRANS(SocketBytesReadable) (XtransConnInfo ciptr, BytesReadable_t *pend) { prmsg (2,"SocketBytesReadable(%p,%d,%p)\n", - ciptr, ciptr->fd, pend); + (void *) ciptr, ciptr->fd, (void *) pend); #ifdef WIN32 { int ret = ioctlsocket ((SOCKET) ciptr->fd, FIONREAD, (u_long *) pend); @@ -2538,7 +2552,7 @@ static int TRANS(SocketRead) (XtransConnInfo ciptr, char *buf, int size) { - prmsg (2,"SocketRead(%d,%p,%d)\n", ciptr->fd, buf, size); + prmsg (2,"SocketRead(%d,%p,%d)\n", ciptr->fd, (void *) buf, size); #if defined(WIN32) { @@ -2592,7 +2606,7 @@ static int TRANS(SocketReadv) (XtransConnInfo ciptr, struct iovec *buf, int size) { - prmsg (2,"SocketReadv(%d,%p,%d)\n", ciptr->fd, buf, size); + prmsg (2,"SocketReadv(%d,%p,%d)\n", ciptr->fd, (void *) buf, size); #if XTRANS_SEND_FDS { @@ -2633,7 +2647,7 @@ static int TRANS(SocketWritev) (XtransConnInfo ciptr, struct iovec *buf, int size) { - prmsg (2,"SocketWritev(%d,%p,%d)\n", ciptr->fd, buf, size); + prmsg (2,"SocketWritev(%d,%p,%d)\n", ciptr->fd, (void *) buf, size); #if XTRANS_SEND_FDS if (ciptr->send_fds) @@ -2675,10 +2689,10 @@ TRANS(SocketWritev) (XtransConnInfo ciptr, struct iovec *buf, int size) static int -TRANS(SocketWrite) (XtransConnInfo ciptr, char *buf, int size) +TRANS(SocketWrite) (XtransConnInfo ciptr, const char *buf, int size) { - prmsg (2,"SocketWrite(%d,%p,%d)\n", ciptr->fd, buf, size); + prmsg (2,"SocketWrite(%d,%p,%d)\n", ciptr->fd, (const void *) buf, size); #if defined(WIN32) { @@ -2694,7 +2708,7 @@ TRANS(SocketWrite) (XtransConnInfo ciptr, char *buf, int size) { struct iovec iov; - iov.iov_base = buf; + iov.iov_base = (void *) buf; iov.iov_len = size; return TRANS(SocketWritev)(ciptr, &iov, 1); } @@ -2707,7 +2721,7 @@ static int TRANS(SocketDisconnect) (XtransConnInfo ciptr) { - prmsg (2,"SocketDisconnect(%p,%d)\n", ciptr, ciptr->fd); + prmsg (2,"SocketDisconnect(%p,%d)\n", (void *) ciptr, ciptr->fd); #ifdef WIN32 { @@ -2726,7 +2740,7 @@ static int TRANS(SocketINETClose) (XtransConnInfo ciptr) { - prmsg (2,"SocketINETClose(%p,%d)\n", ciptr, ciptr->fd); + prmsg (2,"SocketINETClose(%p,%d)\n", (void *) ciptr, ciptr->fd); #ifdef WIN32 { @@ -2753,7 +2767,7 @@ TRANS(SocketUNIXClose) (XtransConnInfo ciptr) struct sockaddr_un *sockname = (struct sockaddr_un *) ciptr->addr; int ret; - prmsg (2,"SocketUNIXClose(%p,%d)\n", ciptr, ciptr->fd); + prmsg (2,"SocketUNIXClose(%p,%d)\n", (void *) ciptr, ciptr->fd); #if XTRANS_SEND_FDS cleanupFds(ciptr); @@ -2784,7 +2798,7 @@ TRANS(SocketUNIXCloseForCloning) (XtransConnInfo ciptr) int ret; prmsg (2,"SocketUNIXCloseForCloning(%p,%d)\n", - ciptr, ciptr->fd); + (void *) ciptr, ciptr->fd); #if XTRANS_SEND_FDS cleanupFds(ciptr); @@ -2808,7 +2822,7 @@ static const char* tcp_nolisten[] = { }; # endif -Xtransport TRANS(SocketTCPFuncs) = { +static Xtransport TRANS(SocketTCPFuncs) = { /* Socket Interface */ "tcp", TRANS_ALIAS, @@ -2843,7 +2857,7 @@ Xtransport TRANS(SocketTCPFuncs) = { TRANS(SocketINETClose), }; -Xtransport TRANS(SocketINETFuncs) = { +static Xtransport TRANS(SocketINETFuncs) = { /* Socket Interface */ "inet", 0, @@ -2879,7 +2893,7 @@ Xtransport TRANS(SocketINETFuncs) = { }; #if defined(IPv6) && defined(AF_INET6) -Xtransport TRANS(SocketINET6Funcs) = { +static Xtransport TRANS(SocketINET6Funcs) = { /* Socket Interface */ "inet6", 0, @@ -2955,7 +2969,7 @@ Xtransport TRANS(SocketHyperVFuncs) = { #ifdef UNIXCONN #if !defined(LOCALCONN) -Xtransport TRANS(SocketLocalFuncs) = { +static Xtransport TRANS(SocketLocalFuncs) = { /* Socket Interface */ "local", #ifdef HAVE_ABSTRACT_SOCKETS @@ -3002,7 +3016,7 @@ static const char* unix_nolisten[] = { "local" , NULL }; # endif # endif -Xtransport TRANS(SocketUNIXFuncs) = { +static Xtransport TRANS(SocketUNIXFuncs) = { /* Socket Interface */ "unix", #if !defined(LOCALCONN) && !defined(HAVE_ABSTRACT_SOCKETS) diff --git a/X11/xtrans/Xtransutil.c b/X11/xtrans/Xtransutil.c index 884cfdfea2..50262057e8 100644 --- a/X11/xtrans/Xtransutil.c +++ b/X11/xtrans/Xtransutil.c @@ -250,8 +250,9 @@ TRANS(GetMyNetworkId) (XtransConnInfo ciptr) struct sockaddr_un *saddr = (struct sockaddr_un *) addr; networkId = malloc (3 + strlen (transName) + strlen (hostnamebuf) + strlen (saddr->sun_path)); - sprintf (networkId, "%s/%s:%s", transName, - hostnamebuf, saddr->sun_path); + if (networkId != NULL) + sprintf (networkId, "%s/%s:%s", transName, + hostnamebuf, saddr->sun_path); break; } #endif /* defined(UNIXCONN) || defined(LOCALCONN) */ @@ -280,7 +281,8 @@ TRANS(GetMyNetworkId) (XtransConnInfo ciptr) snprintf (portnumbuf, sizeof(portnumbuf), "%d", portnum); networkId = malloc (3 + strlen (transName) + strlen (hostnamebuf) + strlen (portnumbuf)); - sprintf (networkId, "%s/%s:%s", transName, hostnamebuf, portnumbuf); + if (networkId != NULL) + sprintf (networkId, "%s/%s:%s", transName, hostnamebuf, portnumbuf); break; } #endif /* defined(TCPCONN) */ @@ -399,12 +401,15 @@ TRANS(GetPeerNetworkId) (XtransConnInfo ciptr) } - hostname = malloc (strlen (ciptr->transptr->TransName) + strlen (addr) + 2); - strcpy (hostname, ciptr->transptr->TransName); - strcat (hostname, "/"); - if (addr) - strcat (hostname, addr); - + hostname = malloc (strlen (ciptr->transptr->TransName) + + (addr ? strlen (addr) : 0) + 2); + if (hostname) + { + strcpy (hostname, ciptr->transptr->TransName); + strcat (hostname, "/"); + if (addr) + strcat (hostname, addr); + } return (hostname); } diff --git a/X11/xtrans/configure.ac b/X11/xtrans/configure.ac index f9b0e0b180..72a65e110a 100644 --- a/X11/xtrans/configure.ac +++ b/X11/xtrans/configure.ac @@ -21,8 +21,8 @@ # Initialize Autoconf AC_PREREQ([2.60]) -AC_INIT([xtrans], [1.5.0], - [https://gitlab.freedesktop.org/xorg/lib/libxtrans/issues], [xtrans]) +AC_INIT([xtrans], [1.5.2], + [https://gitlab.freedesktop.org/xorg/lib/libxtrans/-/issues], [xtrans]) AC_CONFIG_SRCDIR([Makefile.am]) # Initialize Automake diff --git a/X11/xtrans/doc/xtrans.xml b/X11/xtrans/doc/xtrans.xml index dd0030b86f..b8618cb348 100644 --- a/X11/xtrans/doc/xtrans.xml +++ b/X11/xtrans/doc/xtrans.xml @@ -243,7 +243,7 @@ typedef struct _Xtransport { int (*Write)( XtransConnInfo, /* connection */ - char *, /* buf */ + const char *, /* buf */ int /* size */ ); @@ -541,7 +541,7 @@ connection, and will return the minimum of the number bytes requested. int TRANS(Write) XtransConnInfo connection - char *buf + const char *buf int size diff --git a/fontconfig/.gitlab-ci.yml b/fontconfig/.gitlab-ci.yml index aab89ac226..7bd5bc6252 100644 --- a/fontconfig/.gitlab-ci.yml +++ b/fontconfig/.gitlab-ci.yml @@ -10,7 +10,7 @@ # and run ci-fairy generate-template. For details, see # https://freedesktop.pages.freedesktop.org/ci-templates/ci-fairy.html#templating-gitlab-ci-yml -.templates_sha: &template_sha 98b1218f146a1ec96d65e3ce0041f9a6ec5cb5e6 +.templates_sha: &template_sha e195d80f35b45cc73668be3767b923fd76c70ed5 include: # Fedora container builder template @@ -41,8 +41,8 @@ variables: # changing these will force rebuilding the associated image # Note: these tags have no meaning and are not tied to a particular # fontconfig version - FEDORA_TAG: '2024-04-22.1-3f4457bbcc1d' - FREEBSD_TAG: '2024-04-22.1-eae64220fd2b' + FEDORA_TAG: '2025-01-17.1-ad5c9d5d2ff5' + FREEBSD_TAG: '2025-01-17.1-285a32ac747a' FEDORA_EXEC: 'bash .gitlab-ci/fedora-install.sh' FREEBSD_EXEC: 'bash .gitlab-ci/freebsd-install.sh' @@ -96,51 +96,39 @@ fedora:rawhide@container-prep: variables: GIT_STRATEGY: none FDO_DISTRIBUTION_VERSION: 'rawhide' - FDO_DISTRIBUTION_PACKAGES: '@buildsys-build autoconf automake libtool gettext gettext-devel gperf expat-devel libxml2-devel freetype-devel json-c-devel git docbook-utils docbook-utils-pdf bubblewrap ninja-build wget python3-pip mingw64-expat mingw64-gcc mingw64-gettext mingw64-freetype mingw64-libxml2 wine' + FDO_DISTRIBUTION_PACKAGES: '@buildsys-build autoconf automake clang-devel libtool gettext gettext-devel gperf expat-devel libxml2-devel freetype-devel json-c-devel git docbook-utils docbook-utils-pdf bubblewrap ninja-build wget python3-pip mingw64-expat mingw64-gcc mingw64-gettext mingw64-freetype mingw64-libxml2 wine rust cargo bindgen-cli' FDO_DISTRIBUTION_TAG: $FEDORA_TAG FDO_DISTRIBUTION_EXEC: $FEDORA_EXEC -fedora:40@container-prep: +fedora:41@container-prep: extends: .fdo.container-build@fedora stage: prep variables: GIT_STRATEGY: none - FDO_DISTRIBUTION_VERSION: '40' - FDO_DISTRIBUTION_PACKAGES: '@buildsys-build autoconf automake libtool gettext gettext-devel gperf expat-devel libxml2-devel freetype-devel json-c-devel git docbook-utils docbook-utils-pdf bubblewrap ninja-build wget python3-pip mingw64-expat mingw64-gcc mingw64-gettext mingw64-freetype mingw64-libxml2 wine' + FDO_DISTRIBUTION_VERSION: '41' + FDO_DISTRIBUTION_PACKAGES: '@buildsys-build autoconf automake clang-devel libtool gettext gettext-devel gperf expat-devel libxml2-devel freetype-devel json-c-devel git docbook-utils docbook-utils-pdf bubblewrap ninja-build wget python3-pip mingw64-expat mingw64-gcc mingw64-gettext mingw64-freetype mingw64-libxml2 wine rust cargo bindgen-cli' FDO_DISTRIBUTION_TAG: $FEDORA_TAG FDO_DISTRIBUTION_EXEC: $FEDORA_EXEC -fedora:39@container-prep: +fedora:40@container-prep: extends: .fdo.container-build@fedora stage: prep variables: GIT_STRATEGY: none - FDO_DISTRIBUTION_VERSION: '39' - FDO_DISTRIBUTION_PACKAGES: '@buildsys-build autoconf automake libtool gettext gettext-devel gperf expat-devel libxml2-devel freetype-devel json-c-devel git docbook-utils docbook-utils-pdf bubblewrap ninja-build wget python3-pip mingw64-expat mingw64-gcc mingw64-gettext mingw64-freetype mingw64-libxml2 wine' + FDO_DISTRIBUTION_VERSION: '40' + FDO_DISTRIBUTION_PACKAGES: '@buildsys-build autoconf automake clang-devel libtool gettext gettext-devel gperf expat-devel libxml2-devel freetype-devel json-c-devel git docbook-utils docbook-utils-pdf bubblewrap ninja-build wget python3-pip mingw64-expat mingw64-gcc mingw64-gettext mingw64-freetype mingw64-libxml2 wine rust cargo bindgen-cli' FDO_DISTRIBUTION_TAG: $FEDORA_TAG FDO_DISTRIBUTION_EXEC: $FEDORA_EXEC -freebsd:14.0@container-prep: +freebsd:14.1@container-prep: extends: .fdo.qemu-build@freebsd tags: - kvm stage: prep variables: GIT_STRATEGY: none - FDO_DISTRIBUTION_VERSION: '14.0' - FDO_DISTRIBUTION_PACKAGES: 'gcc autoconf automake libtool gettext gperf expat libxml2 freetype2 json-c git ninja wget python3 py39-pip pkgconf gmake gettext-runtime' - FDO_DISTRIBUTION_TAG: $FREEBSD_TAG - FDO_DISTRIBUTION_EXEC: $FREEBSD_EXEC - -freebsd:13.2@container-prep: - extends: .fdo.qemu-build@freebsd - tags: - - kvm - stage: prep - variables: - GIT_STRATEGY: none - FDO_DISTRIBUTION_VERSION: '13.2' - FDO_DISTRIBUTION_PACKAGES: 'gcc autoconf automake libtool gettext gperf expat libxml2 freetype2 json-c git ninja wget python3 py39-pip pkgconf gmake gettext-runtime' + FDO_DISTRIBUTION_VERSION: '14.1' + FDO_DISTRIBUTION_PACKAGES: 'gcc autoconf automake libtool gettext gperf expat libxml2 freetype2 json-c git ninja wget python3 py311-pip pkgconf gmake gettext-runtime' FDO_DISTRIBUTION_TAG: $FREEBSD_TAG FDO_DISTRIBUTION_EXEC: $FREEBSD_EXEC @@ -186,40 +174,31 @@ fedora:rawhide@container-clean: FDO_DISTRIBUTION_VERSION: 'rawhide' FDO_DISTRIBUTION_TAG: $FEDORA_TAG -fedora:40@container-clean: +fedora:41@container-clean: extends: - .container-clean variables: GIT_STRATEGY: none CURRENT_CONTAINER_IMAGE: $CI_REGISTRY_IMAGE/fedora/$FDO_DISTRIBUTION_VERSION:$FDO_DISTRIBUTION_TAG - FDO_DISTRIBUTION_VERSION: '40' + FDO_DISTRIBUTION_VERSION: '41' FDO_DISTRIBUTION_TAG: $FEDORA_TAG -fedora:39@container-clean: +fedora:40@container-clean: extends: - .container-clean variables: GIT_STRATEGY: none CURRENT_CONTAINER_IMAGE: $CI_REGISTRY_IMAGE/fedora/$FDO_DISTRIBUTION_VERSION:$FDO_DISTRIBUTION_TAG - FDO_DISTRIBUTION_VERSION: '39' + FDO_DISTRIBUTION_VERSION: '40' FDO_DISTRIBUTION_TAG: $FEDORA_TAG -freebsd:14.0@container-clean: +freebsd:14.1@container-clean: extends: - .container-clean variables: GIT_STRATEGY: none CURRENT_CONTAINER_IMAGE: $CI_REGISTRY_IMAGE/freebsd/$FDO_DISTRIBUTION_VERSION:$FDO_DISTRIBUTION_TAG - FDO_DISTRIBUTION_VERSION: '14.0' - FDO_DISTRIBUTION_TAG: $FREEBSD_TAG - -freebsd:13.2@container-clean: - extends: - - .container-clean - variables: - GIT_STRATEGY: none - CURRENT_CONTAINER_IMAGE: $CI_REGISTRY_IMAGE/freebsd/$FDO_DISTRIBUTION_VERSION:$FDO_DISTRIBUTION_TAG - FDO_DISTRIBUTION_VERSION: '13.2' + FDO_DISTRIBUTION_VERSION: '14.1' FDO_DISTRIBUTION_TAG: $FREEBSD_TAG @@ -327,7 +306,7 @@ freebsd:13.2@container-clean: expire_in: 5 days paths: - build*/doc/fontconfig-user.html - - build*/doc/fontconfig-devel/* + - build*/doc/fontconfig-devel.html - build*/fc-build.log - build*/config.log - build*/fontconfig-*.tar.* @@ -403,6 +382,7 @@ t_fedora:rawhide:meson shared libxml2: FC_BUILDSYS: meson FC_BUILD_TYPE: shared FC_XML_BACKEND: libxml2 + FC_BUILD_DISTCHECK: 1 needs: - 'fedora:rawhide@container-prep' @@ -471,7 +451,9 @@ t_fedora:rawhide:meson static libxml2: - 'fedora:rawhide@container-prep' -t_fedora:rawhide:mingw autotools static libxml2: + + +t_fedora:rawhide:mingw meson static expat: extends: - .build@template - .fdo.distribution-image@fedora @@ -480,16 +462,17 @@ t_fedora:rawhide:mingw autotools static libxml2: FC_DISTRO_NAME: fedora FDO_DISTRIBUTION_VERSION: 'rawhide' FDO_DISTRIBUTION_TAG: $FEDORA_TAG - FC_BUILDSYS: autotools + FC_BUILDSYS: meson FC_BUILD_TYPE: static - FC_XML_BACKEND: libxml2 + FC_XML_BACKEND: expat FC_BUILD_PLATFORM: mingw - FC_BUILD_ARCH: x86_64-mingw32 + FC_BUILD_ARCH: linux-mingw-w64-64bit + FC_BUILD_NO_INSTALL: 1 needs: - 'fedora:rawhide@container-prep' -t_fedora:rawhide:mingw meson static expat: +t_fedora:rawhide:meson static libxml2 fontations: extends: - .build@template - .fdo.distribution-image@fedora @@ -500,77 +483,76 @@ t_fedora:rawhide:mingw meson static expat: FDO_DISTRIBUTION_TAG: $FEDORA_TAG FC_BUILDSYS: meson FC_BUILD_TYPE: static - FC_XML_BACKEND: expat - FC_BUILD_PLATFORM: mingw - FC_BUILD_ARCH: linux-mingw-w64-64bit - FC_BUILD_NO_INSTALL: 1 + FC_XML_BACKEND: libxml2 + FC_BUILD_ENABLED: fontations needs: - 'fedora:rawhide@container-prep' -t_fedora:40:autotools shared expat: +t_fedora:41:autotools shared expat: extends: - .build@template - .fdo.distribution-image@fedora - .fc_artifacts variables: FC_DISTRO_NAME: fedora - FDO_DISTRIBUTION_VERSION: '40' + FDO_DISTRIBUTION_VERSION: '41' FDO_DISTRIBUTION_TAG: $FEDORA_TAG FC_BUILDSYS: autotools FC_BUILD_TYPE: shared FC_XML_BACKEND: expat needs: - - 'fedora:40@container-prep' + - 'fedora:41@container-prep' -t_fedora:40:autotools shared libxml2: +t_fedora:41:autotools shared libxml2: extends: - .build@template - .fdo.distribution-image@fedora - .fc_artifacts variables: FC_DISTRO_NAME: fedora - FDO_DISTRIBUTION_VERSION: '40' + FDO_DISTRIBUTION_VERSION: '41' FDO_DISTRIBUTION_TAG: $FEDORA_TAG FC_BUILDSYS: autotools FC_BUILD_TYPE: shared FC_XML_BACKEND: libxml2 FC_BUILD_DISTCHECK: 1 needs: - - 'fedora:40@container-prep' + - 'fedora:41@container-prep' -t_fedora:40:meson shared expat: +t_fedora:41:meson shared expat: extends: - .build@template - .fdo.distribution-image@fedora - .fc_artifacts variables: FC_DISTRO_NAME: fedora - FDO_DISTRIBUTION_VERSION: '40' + FDO_DISTRIBUTION_VERSION: '41' FDO_DISTRIBUTION_TAG: $FEDORA_TAG FC_BUILDSYS: meson FC_BUILD_TYPE: shared FC_XML_BACKEND: expat needs: - - 'fedora:40@container-prep' + - 'fedora:41@container-prep' -t_fedora:40:meson shared libxml2: +t_fedora:41:meson shared libxml2: extends: - .build@template - .fdo.distribution-image@fedora - .fc_artifacts variables: FC_DISTRO_NAME: fedora - FDO_DISTRIBUTION_VERSION: '40' + FDO_DISTRIBUTION_VERSION: '41' FDO_DISTRIBUTION_TAG: $FEDORA_TAG FC_BUILDSYS: meson FC_BUILD_TYPE: shared FC_XML_BACKEND: libxml2 + FC_BUILD_DISTCHECK: 1 needs: - - 'fedora:40@container-prep' + - 'fedora:41@container-prep' @@ -585,73 +567,72 @@ t_fedora:40:meson shared libxml2: -t_fedora:39:autotools shared expat: + + +t_fedora:40:autotools shared expat: extends: - .build@template - .fdo.distribution-image@fedora - .fc_artifacts variables: FC_DISTRO_NAME: fedora - FDO_DISTRIBUTION_VERSION: '39' + FDO_DISTRIBUTION_VERSION: '40' FDO_DISTRIBUTION_TAG: $FEDORA_TAG FC_BUILDSYS: autotools FC_BUILD_TYPE: shared FC_XML_BACKEND: expat needs: - - 'fedora:39@container-prep' + - 'fedora:40@container-prep' -t_fedora:39:autotools shared libxml2: +t_fedora:40:autotools shared libxml2: extends: - .build@template - .fdo.distribution-image@fedora - .fc_artifacts variables: FC_DISTRO_NAME: fedora - FDO_DISTRIBUTION_VERSION: '39' + FDO_DISTRIBUTION_VERSION: '40' FDO_DISTRIBUTION_TAG: $FEDORA_TAG FC_BUILDSYS: autotools FC_BUILD_TYPE: shared FC_XML_BACKEND: libxml2 FC_BUILD_DISTCHECK: 1 needs: - - 'fedora:39@container-prep' + - 'fedora:40@container-prep' -t_fedora:39:meson shared expat: +t_fedora:40:meson shared expat: extends: - .build@template - .fdo.distribution-image@fedora - .fc_artifacts variables: FC_DISTRO_NAME: fedora - FDO_DISTRIBUTION_VERSION: '39' + FDO_DISTRIBUTION_VERSION: '40' FDO_DISTRIBUTION_TAG: $FEDORA_TAG FC_BUILDSYS: meson FC_BUILD_TYPE: shared FC_XML_BACKEND: expat needs: - - 'fedora:39@container-prep' + - 'fedora:40@container-prep' -t_fedora:39:meson shared libxml2: +t_fedora:40:meson shared libxml2: extends: - .build@template - .fdo.distribution-image@fedora - .fc_artifacts variables: FC_DISTRO_NAME: fedora - FDO_DISTRIBUTION_VERSION: '39' + FDO_DISTRIBUTION_VERSION: '40' FDO_DISTRIBUTION_TAG: $FEDORA_TAG FC_BUILDSYS: meson FC_BUILD_TYPE: shared FC_XML_BACKEND: libxml2 + FC_BUILD_DISTCHECK: 1 needs: - - 'fedora:39@container-prep' - - - - + - 'fedora:40@container-prep' @@ -662,84 +643,36 @@ t_fedora:39:meson shared libxml2: -t_freebsd:14.0:autotools shared expat: +t_fedora:40:mingw autotools static libxml2: extends: - - .build-in-qemu@template - - .fdo.distribution-image@freebsd - - .fc_artifacts - variables: - FC_DISTRO_NAME: freebsd - FDO_DISTRIBUTION_VERSION: '14.0' - FDO_DISTRIBUTION_TAG: $FREEBSD_TAG - FC_BUILDSYS: autotools - FC_BUILD_TYPE: shared - FC_XML_BACKEND: expat - MAKE: gmake - FC_BUILD_NO_CHECK: 1 - needs: - - 'freebsd:14.0@container-prep' - - -t_freebsd:14.0:autotools shared libxml2: - extends: - - .build-in-qemu@template - - .fdo.distribution-image@freebsd + - .build@template + - .fdo.distribution-image@fedora - .fc_artifacts variables: - FC_DISTRO_NAME: freebsd - FDO_DISTRIBUTION_VERSION: '14.0' - FDO_DISTRIBUTION_TAG: $FREEBSD_TAG + FC_DISTRO_NAME: fedora + FDO_DISTRIBUTION_VERSION: '40' + FDO_DISTRIBUTION_TAG: $FEDORA_TAG FC_BUILDSYS: autotools - FC_BUILD_TYPE: shared + FC_BUILD_TYPE: static FC_XML_BACKEND: libxml2 - MAKE: gmake - FC_BUILD_NO_CHECK: 1 + FC_BUILD_PLATFORM: mingw + FC_BUILD_ARCH: x86_64-mingw32 needs: - - 'freebsd:14.0@container-prep' + - 'fedora:40@container-prep' -t_freebsd:14.0:meson shared expat: - extends: - - .build-in-qemu@template - - .fdo.distribution-image@freebsd - - .fc_artifacts - variables: - FC_DISTRO_NAME: freebsd - FDO_DISTRIBUTION_VERSION: '14.0' - FDO_DISTRIBUTION_TAG: $FREEBSD_TAG - FC_BUILDSYS: meson - FC_BUILD_TYPE: shared - FC_XML_BACKEND: expat - FC_BUILD_NO_CHECK: 1 - needs: - - 'freebsd:14.0@container-prep' -t_freebsd:14.0:meson shared libxml2: - extends: - - .build-in-qemu@template - - .fdo.distribution-image@freebsd - - .fc_artifacts - variables: - FC_DISTRO_NAME: freebsd - FDO_DISTRIBUTION_VERSION: '14.0' - FDO_DISTRIBUTION_TAG: $FREEBSD_TAG - FC_BUILDSYS: meson - FC_BUILD_TYPE: shared - FC_XML_BACKEND: libxml2 - FC_BUILD_NO_CHECK: 1 - needs: - - 'freebsd:14.0@container-prep' -t_freebsd:13.2:autotools shared expat: +t_freebsd:14.1:autotools shared expat: extends: - .build-in-qemu@template - .fdo.distribution-image@freebsd - .fc_artifacts variables: FC_DISTRO_NAME: freebsd - FDO_DISTRIBUTION_VERSION: '13.2' + FDO_DISTRIBUTION_VERSION: '14.1' FDO_DISTRIBUTION_TAG: $FREEBSD_TAG FC_BUILDSYS: autotools FC_BUILD_TYPE: shared @@ -747,17 +680,17 @@ t_freebsd:13.2:autotools shared expat: MAKE: gmake FC_BUILD_NO_CHECK: 1 needs: - - 'freebsd:13.2@container-prep' + - 'freebsd:14.1@container-prep' -t_freebsd:13.2:autotools shared libxml2: +t_freebsd:14.1:autotools shared libxml2: extends: - .build-in-qemu@template - .fdo.distribution-image@freebsd - .fc_artifacts variables: FC_DISTRO_NAME: freebsd - FDO_DISTRIBUTION_VERSION: '13.2' + FDO_DISTRIBUTION_VERSION: '14.1' FDO_DISTRIBUTION_TAG: $FREEBSD_TAG FC_BUILDSYS: autotools FC_BUILD_TYPE: shared @@ -765,41 +698,41 @@ t_freebsd:13.2:autotools shared libxml2: MAKE: gmake FC_BUILD_NO_CHECK: 1 needs: - - 'freebsd:13.2@container-prep' + - 'freebsd:14.1@container-prep' -t_freebsd:13.2:meson shared expat: +t_freebsd:14.1:meson shared expat: extends: - .build-in-qemu@template - .fdo.distribution-image@freebsd - .fc_artifacts variables: FC_DISTRO_NAME: freebsd - FDO_DISTRIBUTION_VERSION: '13.2' + FDO_DISTRIBUTION_VERSION: '14.1' FDO_DISTRIBUTION_TAG: $FREEBSD_TAG FC_BUILDSYS: meson FC_BUILD_TYPE: shared FC_XML_BACKEND: expat FC_BUILD_NO_CHECK: 1 needs: - - 'freebsd:13.2@container-prep' + - 'freebsd:14.1@container-prep' -t_freebsd:13.2:meson shared libxml2: +t_freebsd:14.1:meson shared libxml2: extends: - .build-in-qemu@template - .fdo.distribution-image@freebsd - .fc_artifacts variables: FC_DISTRO_NAME: freebsd - FDO_DISTRIBUTION_VERSION: '13.2' + FDO_DISTRIBUTION_VERSION: '14.1' FDO_DISTRIBUTION_TAG: $FREEBSD_TAG FC_BUILDSYS: meson FC_BUILD_TYPE: shared FC_XML_BACKEND: libxml2 FC_BUILD_NO_CHECK: 1 needs: - - 'freebsd:13.2@container-prep' + - 'freebsd:14.1@container-prep' ####################################### @@ -819,13 +752,15 @@ check-merge-request: reports: junit: results.xml allow_failure: true + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" pages: stage: deploy script: - | mkdir public || : - cp -a build*/doc/fontconfig-user.html build*/doc/fontconfig-devel public/ + cp -a build*/doc/fontconfig-user.html build*/doc/fontconfig-devel.html public/ artifacts: paths: - public @@ -835,3 +770,10 @@ pages: - t_fedora:rawhide:autotools shared libxml2 rules: - if: $CI_COMMIT_BRANCH == "main" && $CI_PIPELINE_SOURCE != "merge_request_event" + +workflow: + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS + when: never + - if: $CI_COMMIT_BRANCH diff --git a/fontconfig/.gitlab-ci/build.sh b/fontconfig/.gitlab-ci/build.sh index d56864463a..932b042dbf 100644 --- a/fontconfig/.gitlab-ci/build.sh +++ b/fontconfig/.gitlab-ci/build.sh @@ -106,11 +106,13 @@ if [ x"$buildsys" == "xautotools" ]; then fi elif [ x"$buildsys" == "xmeson" ]; then pip install meson +# tomli not required for Python >= 3.11 + pip install tomli for i in "${enable[@]}"; do - buildopt+=(-D$i=true) + buildopt+=(-D$i=enabled) done for i in "${disable[@]}"; do - buildopt+=(-D$i=false) + buildopt+=(-D$i=disabled) done case x"$backend" in 'xexpat') diff --git a/fontconfig/.gitlab-ci/ci.template b/fontconfig/.gitlab-ci/ci.template index 1ddfdbe501..7bcc84b829 100644 --- a/fontconfig/.gitlab-ci/ci.template +++ b/fontconfig/.gitlab-ci/ci.template @@ -12,7 +12,7 @@ # and run ci-fairy generate-template. For details, see # https://freedesktop.pages.freedesktop.org/ci-templates/ci-fairy.html#templating-gitlab-ci-yml -.templates_sha: &template_sha 98b1218f146a1ec96d65e3ce0041f9a6ec5cb5e6 +.templates_sha: &template_sha e195d80f35b45cc73668be3767b923fd76c70ed5 include: {% for distro in distributions|sort(attribute="name") %} @@ -268,7 +268,7 @@ check-ci-script: expire_in: 5 days paths: - build*/doc/fontconfig-user.html - - build*/doc/fontconfig-devel/* + - build*/doc/fontconfig-devel.html - build*/fc-build.log - build*/config.log - build*/fontconfig-*.tar.* @@ -331,13 +331,15 @@ check-merge-request: reports: junit: results.xml allow_failure: true + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" pages: stage: deploy script: - | mkdir public || : - cp -a build*/doc/fontconfig-user.html build*/doc/fontconfig-devel public/ + cp -a build*/doc/fontconfig-user.html build*/doc/fontconfig-devel.html public/ artifacts: paths: - public @@ -347,3 +349,10 @@ pages: - t_fedora:rawhide:autotools shared libxml2 rules: - if: $CI_COMMIT_BRANCH == "main" && $CI_PIPELINE_SOURCE != "merge_request_event" + +workflow: + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS + when: never + - if: $CI_COMMIT_BRANCH diff --git a/fontconfig/.gitlab-ci/config.yml b/fontconfig/.gitlab-ci/config.yml index ca20c1cdc9..a1d31527fa 100644 --- a/fontconfig/.gitlab-ci/config.yml +++ b/fontconfig/.gitlab-ci/config.yml @@ -1,4 +1,4 @@ -.default_tag: &default_tag "2024-04-22.1" +.default_tag: &default_tag "2025-01-17.1" distributions: - name: fedora @@ -7,8 +7,8 @@ distributions: qemu_based: false versions: - "rawhide" + - "41" - "40" - - "39" builds: - name: "autotools shared expat" variables: @@ -31,6 +31,7 @@ distributions: FC_BUILDSYS: meson FC_BUILD_TYPE: shared FC_XML_BACKEND: libxml2 + FC_BUILD_DISTCHECK: 1 - name: "autotools static expat" build_only: "rawhide" variables: @@ -56,7 +57,7 @@ distributions: FC_BUILD_TYPE: static FC_XML_BACKEND: libxml2 - name: "mingw autotools static libxml2" - build_only: "rawhide" + build_only: "40" variables: FC_BUILDSYS: autotools FC_BUILD_TYPE: static @@ -72,13 +73,19 @@ distributions: FC_BUILD_PLATFORM: mingw FC_BUILD_ARCH: linux-mingw-w64-64bit FC_BUILD_NO_INSTALL: 1 + - name: "meson static libxml2 fontations" + build_only: "rawhide" + variables: + FC_BUILDSYS: meson + FC_BUILD_TYPE: static + FC_XML_BACKEND: libxml2 + FC_BUILD_ENABLED: "fontations" - name: freebsd tag: *default_tag base_type: freebsd qemu_based: true versions: - - "14.0" - - "13.2" + - "14.1" builds: - name: "autotools shared expat" variables: @@ -114,6 +121,7 @@ packages: "@buildsys-build", "autoconf", "automake", + "clang-devel", "libtool", "gettext", "gettext-devel", @@ -135,6 +143,9 @@ packages: "mingw64-freetype", "mingw64-libxml2", "wine", + "rust", + "cargo", + "bindgen-cli" ] freebsd: needed: @@ -153,7 +164,7 @@ packages: "ninja", "wget", "python3", - "py39-pip", + "py311-pip", "pkgconf", "gmake", "gettext-runtime", diff --git a/fontconfig/.gitlab-ci/other.yml b/fontconfig/.gitlab-ci/other.yml index dc53011854..b2aad54a5a 100644 --- a/fontconfig/.gitlab-ci/other.yml +++ b/fontconfig/.gitlab-ci/other.yml @@ -1,8 +1,8 @@ # FIXME: fontconfig should probably get its own image # In the meantime, the latest GStreamer image tag can be found here: -# https://gitlab.freedesktop.org/gstreamer/gstreamer/-/blob/main/.gitlab-image-tags.yml#L10 +# https://gitlab.freedesktop.org/gstreamer/gstreamer/-/blob/main/.gitlab-image-tags.yml#L18 .build meson windows: - image: "registry.freedesktop.org/gstreamer/gstreamer/amd64/windows:2022-09-23.0-main" + image: "registry.freedesktop.org/gstreamer/gstreamer/amd64/windows:2024-11-12.0-main" stage: "test" tags: - "docker" @@ -19,7 +19,11 @@ # -Dbar=disabled before_script: # Make sure meson is up to date, so we don't need to rebuild the image with each release - - pip3 install -U meson certifi + - pip3 install -U meson certifi tomli requests + # Test Rust availability. As of 11/25/2024, according to + # https://gitlab.freedesktop.org/gstreamer/gstreamer/-/blob/main/ci/docker/windows/Dockerfile + # Rust 1.82 is already installed. + - rustc --version script: # For some reason, options are separated by newline instead of space, so we # have to replace them first. @@ -41,20 +45,24 @@ - build-*/meson-logs/*txt - prefix-* -meson vs2019 amd64: - extends: ".build meson windows" - variables: - ARCH: "amd64" meson vs2019 x86: extends: ".build meson windows" variables: ARCH: "x86" + MESON_ARGS: "-Dfontations=disabled" -meson macos: +# A Windows Fontations build would currently require +# libclang and llvm installation which is a several hundred megabyte download. +# Skip that configuration for now. + +.meson macos test: stage: "test" + # See https://gitlab.freedesktop.org/gstreamer/gstreamer/-/blob/main/.gitlab-ci.yml + # As of 2024-11-21, this is a Mac OS 15 Sequia image on Apple silicon. + image: "registry.freedesktop.org/gstreamer/gstreamer/macos-arm64/15-sequoia:2024-10-28.0" tags: - - gst-macos-11.1 + - gst-mac-arm artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_SHA}" expire_in: "5 days" @@ -65,19 +73,35 @@ meson macos: - pip3 install --upgrade pip # Make sure meson is up to date - pip3 install -U meson + # For Python < 3.11 we need tomli for Rust build support + - pip3 install -U tomli # Need to install certificates for python - pip3 install --upgrade certifi - # Anther way t install certificates - - open /Applications/Python\ 3.8/Install\ Certificates.command # Get ninja - - curl -L -o ninja-mac.zip https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-mac.zip - - unzip ninja-mac.zip - - sudo cp ninja /usr/local/bin + - pip3 install --upgrade ninja + # Install rust + - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + - . "$HOME/.cargo/env" + # Test Rust availability. + - rustup update + - cargo install bindgen-cli + - rustc --version + - bindgen --version script: - - CERT_PATH=$(python3 -m certifi) && export SSL_CERT_FILE=${CERT_PATH} && export REQUESTS_CA_BUNDLE=${CERT_PATH} && meson setup -Diconv=enabled build + - CERT_PATH=$(python3 -m certifi) && export SSL_CERT_FILE=${CERT_PATH} && export REQUESTS_CA_BUNDLE=${CERT_PATH} && meson setup -Diconv=enabled $MESON_FONTATIONS_ARG build - meson compile --verbose -C build - meson test -C build +meson macos test: + extends: ".meson macos test" + variables: + MESON_FONTATIONS_ARG: "-Dfontations=disabled" + +meson macos test fontations: + extends: ".meson macos test" + variables: + MESON_FONTATIONS_ARG: "-Dfontations=enabled" + # msys infrastructure is a bit broken, disable for now meson msys2: extends: ".build meson windows" diff --git a/fontconfig/Cargo.lock b/fontconfig/Cargo.lock new file mode 100644 index 0000000000..156bffc89a --- /dev/null +++ b/fontconfig/Cargo.lock @@ -0,0 +1,24 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "fc-fontations" +version = "0.1.0" +dependencies = [ + "fc-fontations-bindgen", + "libc", +] + +[[package]] +name = "fc-fontations-bindgen" +version = "0.1.0" +dependencies = [ + "libc", +] + +[[package]] +name = "libc" +version = "0.2.165" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcb4d3d38eab6c5239a362fa8bae48c03baf980a6e7079f063942d563ef3533e" diff --git a/fontconfig/Cargo.toml b/fontconfig/Cargo.toml new file mode 100644 index 0000000000..6feaf0cb6e --- /dev/null +++ b/fontconfig/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "fc-fontations" +version = "0.1.0" + +[dependencies] +fc-fontations-bindgen = { path = "./fc-fontations-bindgen" } +libc = "0.2" + +[lib] +path = "fc-fontations/mod.rs" diff --git a/fontconfig/NEWS b/fontconfig/NEWS index 3fd50d1f7d..e6cfbac024 100644 --- a/fontconfig/NEWS +++ b/fontconfig/NEWS @@ -1,12 +1,124 @@ Fontconfig Font configuration and customization library - Version 2.15 - 2023-12-22 + Version 2.16 + 2025-01-17 Check INSTALL for compilation and installation instructions. Report bugs to https://gitlab.freedesktop.org/fontconfig/fontconfig/issues/new. +2.16 + +Akira TAGOH (69): + Publish docs to GitLab pages + doc: default index for fontconfig-devel to index.html + Update URLs for docs + doc: Fix a typo of the summary for FcFontSetSort + Clean up .uuid files with fc-cache -f too + Fix undesired unref of FcConfig on Win32 + meson.build: Fix a typo in POT names + meson.build: Add missing --msgid-bugs-address + Sort out bitmap related config files + Add test cases for 70-no-bitmap-and-emoji.conf and 70-no-bitmap-except-emoji.conf + meson: Add missing checkup + Add a missing dependency for CI on FreeBSD + meson: try to figure out libintl dependency + ci: Fix a typo in build script + ci: Add config.log for artifacts + ci: Add missing dependencies + ci: Disable cache update + meson: Fix build fail with NLS enabled on BSD + meson: Add libxml2 support + ci: Add libxml2 build for meson + meson: Workaround an exception + ci: Workaround an error with libxml2 on Android + meson: Add iconv checkup for all platforms + Fix incompatible pointer type on MinGW + meson: Use c_shared_args to take care of --default-library=both on Win32 + ci: Fix a typo + ci: disable iconv for MSVC + ci: build with expat on MSVC + doc: Use sans-serif instead of sans + Do not add merge commits into NEWS file + doc: Fix a typo + meson: Enable run-test.sh for non-Win32 + test/wrapper-script.sh: don't add a path when executable already has a path name. + meson: Add missing the unit testing with json-c + test-conf: Fix compiler warnings + Fix test case for reproducible builds + ci: Use md5 if md5sum isn't available. + ci: normalize path to avoid miscalculation of cache name + ci: Add Fedora 40 and remove Fedora 38 + More information when no writable cache directories + Fix a memory leak in _get_real_paths_from_prefix + Set FcTypeVoid if no valid types to convert + Add FcConfigSetFontSetFilter + Improve hinting detection for fonthashint object + Accept integer for pixelsize + Fix a memory leak in fc-list/fc-query/fc-scan + Add got.orth for Gothic language + Add cop.orth for Coptic language + Add foreign automake option to avoid an error on autotools bootstrap + ci: rebase ci-templates + ci: Add Fedora 41 and drop 39 + ci: run check-merge-request on merge request pipelines only + ci: Add FreeBSD 14.1 and drop 13.2 + ci: build mingw on f40 only + meson: Add install_tag for install targets + meson: Add docs into dist + meson: Add autotools files into dist + doc: generate fontconfig-devel.html as one big file + ci: Fix a fail on pages deployment + ci: Fix pages deployment again + fc-case: Correct the license header of fccase.h + Use proper postscriptname for named instance if any + Replace hardcoded path in man pages to url link + Allow comma as a delimiter in postscriptname and ignore it on matching + Deal with glob string properly + Another fix of glob string for Win32 + ci: Enable meson dist + Fix misleading-indentation warning + Bump the libtool version + +Andreas Falkenhahn (1): + Do not prefix cache_base with a "/". Doing so will lead to FcStrBuildFilename() composing paths that contain double slashes, e.g. in FcDirCacheProcess(). If FcDirCacheBasenameMD5() returns a cache_base that is prefixed with a "/", the call to FcStrBuildFilename() in FcDirCacheProcess() will compose a path that contains double slashes and this double-slashed path will then be passed to FcDirCacheOpenFile(). This won't cause any harm on Linux because Linux just ignores multiple slashes in paths but on other operating systems multiple slashes in paths are not allowed so FcDirCacheOpenFile() will fail on those platforms because of the double slash in the path. + +Andrey Prokopyuk (2): + Fix qsort nullpointer issue + Fix FcSerialize null pointer usage + +Christoph Reiter (1): + meson: fix config relocation on Windows + +Dan Yeaw (1): + Fix invalid escape character \s + +Dominik Röttsches (6): + Remove redundant leaf assignment in fcfreetype.c + Move Mac OS image to an up-to-date Mac OS 15 Sequoia image on ARM + Update Windows image to gstreamer image from stable + Allow building Rust targets in CI + [Fontations] Build bindgen targets, basic Rust test + Refactor exclusive language logic into separate file + +Florian "sp1rit"​ (1): + meson: added default font dirs for android + +Kenny Levinsen (2): + Unlock on allocation failure in FcCacheInsert + Ensure config is locked during retry in FcConfigReference + +Ryan Schmidt (1): + Fix wording in README.md + +Sam James (2): + build: detect-and-use `-lm` for `fabs` in fcmatch + fontconfig: mark _FcPatternIter as may_alias + +Xavier Claessens (2): + Meson: Fix build with clang-cl by using cc.preprocess() + meson: Add missing dep on generated header + 2.15 Akira TAGOH (39): diff --git a/fontconfig/build-aux/meson-dist-autotools.py b/fontconfig/build-aux/meson-dist-autotools.py new file mode 100644 index 0000000000..ec0f4f1159 --- /dev/null +++ b/fontconfig/build-aux/meson-dist-autotools.py @@ -0,0 +1,26 @@ +#! /usr/bin/env python3 +# Copyright (C) 2024 fontconfig Authors +# SPDX-License-Identifier: HPND + +import os +import shutil +import subprocess +import sys +from pathlib import Path + +sourcedir = os.environ.get('MESON_SOURCE_ROOT') +builddir = os.environ.get('MESON_BUILD_ROOT') +distdir = os.environ.get('MESON_DIST_ROOT') + +if shutil.which('autoreconf'): + print('no autoreconf installed', file=sys.stderr) + +subprocess.run(['autoreconf', '-i'], cwd=distdir) + +# Copy files for compatibility +for f in (Path(builddir) / 'doc').glob('*.1'): + print(f'Copying {f.name}') + shutil.copy2(f, Path(distdir) / f.stem) + +# Remove autom4te.cache +shutil.rmtree(Path(distdir) / 'autom4te.cache') diff --git a/fontconfig/build-aux/meson-dist-docs.py b/fontconfig/build-aux/meson-dist-docs.py new file mode 100644 index 0000000000..b229d23529 --- /dev/null +++ b/fontconfig/build-aux/meson-dist-docs.py @@ -0,0 +1,29 @@ +#! /usr/bin/env python3 +# Copyright (C) 2024 fontconfig Authors +# SPDX-License-Identifier: HPND + +import os +import shutil +from pathlib import Path + +sourcedir = os.environ.get('MESON_SOURCE_ROOT') +builddir = os.environ.get('MESON_BUILD_ROOT') +distdir = os.environ.get('MESON_DIST_ROOT') + +# Copy manpages +docdir = Path(distdir) / 'doc' +for f in (Path(builddir) / 'doc').glob('*.[135]'): + print(f'Copying {f.name}') + shutil.copy2(f, docdir) + +# Copy config file +confdir = Path(distdir) / 'conf.d' +shutil.copy2(Path(builddir) / 'conf.d' / '35-lang-normalize.conf', confdir) + +# Documentation +shutil.copy2(Path(builddir) / 'doc' / 'fontconfig-devel.html', docdir) +shutil.copy2(Path(builddir) / 'doc' / 'fontconfig-devel.pdf', docdir) +shutil.copy2(Path(builddir) / 'doc' / 'fontconfig-devel.txt', docdir) +shutil.copy2(Path(builddir) / 'doc' / 'fontconfig-user.html', docdir) +shutil.copy2(Path(builddir) / 'doc' / 'fontconfig-user.pdf', docdir) +shutil.copy2(Path(builddir) / 'doc' / 'fontconfig-user.txt', docdir) diff --git a/fontconfig/conf.d/meson.build b/fontconfig/conf.d/meson.build index b6d07ee0c8..ef9b0cc677 100644 --- a/fontconfig/conf.d/meson.build +++ b/fontconfig/conf.d/meson.build @@ -70,12 +70,14 @@ conf_links = [ '90-synthetic.conf', ] -install_data(conf_files, install_dir: fc_templatedir) +install_data(conf_files, + install_dir: fc_templatedir, + install_tag: 'runtime') meson.add_install_script('link_confs.py', fc_templatedir, - fc_configdir, - conf_links, -) + fc_configdir, + conf_links, + install_tag: 'runtime') # 35-lang-normalize.conf orths = [] @@ -90,7 +92,8 @@ custom_target('35-lang-normalize.conf', output: '35-lang-normalize.conf', command: [find_program('write-35-lang-normalize-conf.py'), ','.join(orths), '@OUTPUT@'], install_dir: fc_templatedir, - install: true) + install: true, + install_tag: 'runtime') # README readme_cdata = configuration_data() @@ -99,4 +102,5 @@ configure_file(output: 'README', input: 'README.in', configuration: readme_cdata, install_dir: fc_configdir, - install: true) + install: true, + install_tag: 'runtime') diff --git a/fontconfig/configure.ac b/fontconfig/configure.ac index 56c7fa9710..21aeb08809 100644 --- a/fontconfig/configure.ac +++ b/fontconfig/configure.ac @@ -33,8 +33,8 @@ dnl This is the package version number, not the shared library dnl version. This same version number must appear in fontconfig/fontconfig.h dnl Yes, it is a pain to synchronize version numbers. Unfortunately, it's dnl not possible to extract the version number here from fontconfig.h -AC_INIT([fontconfig],[2.15.0],[https://gitlab.freedesktop.org/fontconfig/fontconfig/issues/new]) -AM_INIT_AUTOMAKE([1.11 parallel-tests dist-xz]) +AC_INIT([fontconfig],[2.16.0],[https://gitlab.freedesktop.org/fontconfig/fontconfig/issues/new]) +AM_INIT_AUTOMAKE([1.11 parallel-tests dist-xz foreign]) m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES([yes])]) dnl ========================================================================== @@ -73,11 +73,11 @@ dnl libtool versioning dnl bump revision when fixing bugs dnl bump current and age, reset revision to zero when adding APIs dnl bump current, leave age, reset revision to zero when changing/removing APIS -LIBT_CURRENT=13 -LIBT_REVISION=1 +LIBT_CURRENT=16 +LIBT_REVISION=0 AC_SUBST(LIBT_CURRENT) AC_SUBST(LIBT_REVISION) -LIBT_AGE=12 +LIBT_AGE=15 LIBT_VERSION_INFO="$LIBT_CURRENT:$LIBT_REVISION:$LIBT_AGE" AC_SUBST(LIBT_VERSION_INFO) diff --git a/fontconfig/doc/Makefile.am b/fontconfig/doc/Makefile.am index e6ec73abbd..335f9840b4 100644 --- a/fontconfig/doc/Makefile.am +++ b/fontconfig/doc/Makefile.am @@ -26,7 +26,6 @@ NULL = EXTRA_DIST = \ $(BUILT_DOCS) \ $(DOC_FUNCS_FNCS) \ - $(HTML_DIR)/* \ $(SGML_FILES) \ $(check_SCRIPTS) \ confdir.sgml.in \ @@ -130,8 +129,8 @@ TXT_FILES = $(SGML_FILES:.sgml=.txt) PDF_FILES = $(SGML_FILES:.sgml=.pdf) HTML_FILES = \ fontconfig-user.html \ + fontconfig-devel.html \ $(NULL) -HTML_DIR = fontconfig-devel # noinst_PROGRAMS = \ $(NULL) @@ -158,15 +157,10 @@ doc_DATA = \ $(HTML_FILES) \ $(NULL) # -htmldocdir = $(docdir)/$(HTML_DIR) -htmldoc_DATA = \ - $(NULL) - if USEDOCBOOK BUILT_SOURCES += \ $(LOCAL_SGML_FILES) \ $(NULL) -htmldoc_DATA += $(HTML_DIR)/* ## .fncs.sgml: @@ -202,10 +196,6 @@ $(DOC_FUNCS_SGML): $(DOC_FUNCS_FNCS) $(srcdir)/edit-sgml.py $(srcdir)/func.sgml $(TXT_FILES): $(DOCS_DEPS) $(PDF_FILES): $(DOCS_DEPS) $(HTML_FILES): $(DOCS_DEPS) -$(HTML_DIR)/*: $(HTML_DIR) -$(HTML_DIR): local-fontconfig-devel.sgml $(DOCS_DEPS) - $(AM_V_GEN) $(RM) -r $@; \ - $(DOC2HTML) -V '%use-id-as-filename%' -o $@ local-fontconfig-devel.sgml local-fontconfig-user.sgml: $(srcdir)/fontconfig-user.sgml $(AM_V_GEN) $(LN_S) $(srcdir)/fontconfig-user.sgml $@; \ [ ! -f $(builddir)/fontconfig-user.sgml ] && cp -a $(srcdir)/fontconfig-user.sgml $(builddir)/fontconfig-user.sgml || : @@ -213,14 +203,13 @@ local-fontconfig-devel.sgml: $(srcdir)/fontconfig-devel.sgml $(AM_V_GEN) $(LN_S) $(srcdir)/fontconfig-devel.sgml $@; \ [ ! -f $(builddir)/fontconfig-devel.sgml ] && cp -a $(srcdir)/fontconfig-devel.sgml $(builddir)/fontconfig-devel.sgml || : # -all-local: $(BUILT_DOCS) $(HTML_DIR)/* +all-local: $(BUILT_DOCS) clean-local: - $(RM) -r $(HTML_DIR) devel-man + $(RM) -r devel-man [ "x$(builddir)" != "x$(srcdir)" ] && $(RM) $(builddir)/*.sgml || : dist-local-check-docs-enabled: @true else -htmldoc_DATA += $(srcdir)/$(HTML_DIR)/* .fncs.sgml: $(AM_V_GEN) $(RM) $@; \ touch -r $< $@ diff --git a/fontconfig/doc/fcconfig.fncs b/fontconfig/doc/fcconfig.fncs index 49e691eb4c..0a072af74d 100644 --- a/fontconfig/doc/fcconfig.fncs +++ b/fontconfig/doc/fcconfig.fncs @@ -71,6 +71,36 @@ in config since 2.12.0, returning FcFalse if that call fa Returns the current default configuration. @@ +@RET@ FcConfig * +@FUNC@ FcConfigSetFontSetFilter +@TYPE1@ FcConfig * @ARG1@ config +@TYPE2@ FcFilterFontSetFunc% @ARG2@ filter_func +@TYPE3@ FcDestroyFunc% @ARG3@ destroy_data_func +@TYPE4@ void * @ARG4@ user_data +@PURPOSE@ Set a predicate function to filter fontsets +@DESC@ +Sets filter_func as a predicate function and filter out +fontsets in config as desired. +filter_func will be called with a font pattern and +user_data only when loading caches. +When config is going to be destroyed, +user_data will be destroyed through +destroy_data_func if it is set. +@SINCE@ 2.16.0 +@@ + +@RET@ FcBool +@FUNC@ FcConfigAcceptFilter +@TYPE1@ FcConfig * @ARG1@ config +@TYPE2@ const FcPattern * @ARG2@ font +@PURPOSE@ Test whether the given pattern matches filter +@DESC@ +This triggers a predicate function set by FcConfigSetFontSetFilter +and return FcTrue if font matches something they expect. +otherwise FcFalse. +@SINCE@ 2.16.0 +@@ + @RET@ FcBool @FUNC@ FcConfigUptoDate @TYPE1@ FcConfig * @ARG1@ config @@ -514,4 +544,3 @@ in configuration file. This function tries to match 'pat' with them and return FcFalse if 'pat' is rejected, otherwise FcTrue. @SINCE@ 2.15.1 @@ - diff --git a/fontconfig/doc/meson.build b/fontconfig/doc/meson.build index 290549c7f6..548df129ba 100644 --- a/fontconfig/doc/meson.build +++ b/fontconfig/doc/meson.build @@ -4,10 +4,12 @@ tests = [] if host_machine.system() != 'windows' tests += [ 'check-missing-doc', - 'check-whitespace-in-args' + # 'check-whitespace-in-args' ] + meson.add_dist_script(find_program('check-whitespace-in-args.py'), '@SOURCE_ROOT@/doc') endif + docbook2man = find_program('docbook2man', required: get_option('doc-man')) docbook2txt = find_program('docbook2txt', required: get_option('doc-txt')) docbook2pdf = find_program('docbook2pdf', required: get_option('doc-pdf')) @@ -96,7 +98,8 @@ if docbook2man.found() command: [run_quiet, docbook2man, '@INPUT0@', '--output', '@OUTDIR@'], build_by_default: true, install_dir: get_option('mandir') / 'man3', - install: true) + install: true, + install_tag: 'docs') # fonts.conf(5) custom_target('fonts-conf-5-man-page', @@ -105,7 +108,8 @@ if docbook2man.found() command: [run_quiet, docbook2man, '@INPUT0@', '--output', '@OUTDIR@'], install_dir: get_option('mandir') / 'man5', build_by_default: true, - install: true) + install: true, + install_tag: 'docs') # Generate man pages for tools foreach t : tools_man_pages @@ -115,7 +119,8 @@ if docbook2man.found() output: '@0@.1'.format(t), command: [run_quiet, docbook2man, '@INPUT@', '--output', '@OUTDIR@'], install_dir: get_option('mandir') / 'man1', - install: true) + install: true, + install_tag: 'docs') endforeach endif @@ -128,15 +133,17 @@ if docbook2pdf.found() command: [run_quiet, docbook2pdf, '@INPUT0@', '--output', '@OUTDIR@'], build_by_default: true, install_dir: get_option('datadir') / 'doc' / 'fontconfig', - install: true) + install: true, + install_tag: 'docs') custom_target('user-pdf', - input: [fontconfig_user_sgml, funcs_sgml], + input: [fontconfig_user_sgml, funcs_sgml], output: 'fontconfig-user.pdf', command: [run_quiet, docbook2pdf, '@INPUT0@', '--output', '@OUTDIR@'], build_by_default: true, install_dir: get_option('datadir') / 'doc' / 'fontconfig', - install: true) + install: true, + install_tag: 'docs') endif if docbook2txt.found() @@ -148,7 +155,8 @@ if docbook2txt.found() command: [run_quiet, docbook2txt, '@INPUT0@', '--output', '@OUTDIR@'], build_by_default: true, install_dir: get_option('datadir') / 'doc' / 'fontconfig', - install: true) + install: true, + install_tag: 'docs') custom_target('user-txt', input: [fontconfig_user_sgml, funcs_sgml], @@ -156,7 +164,8 @@ if docbook2txt.found() command: [run_quiet, docbook2txt, '@INPUT0@', '--output', '@OUTDIR@'], build_by_default: true, install_dir: get_option('datadir') / 'doc' / 'fontconfig', - install: true) + install: true, + install_tag: 'docs') endif if docbook2html.found() @@ -168,7 +177,8 @@ if docbook2html.found() command: [run_quiet, docbook2html, '--nochunks', '@INPUT0@', '--output', '@OUTDIR@'], build_by_default: true, install_dir: get_option('datadir') / 'doc' / 'fontconfig', - install: true) + install: true, + install_tag: 'docs') custom_target('user-html', input: [fontconfig_user_sgml, funcs_sgml], @@ -176,7 +186,8 @@ if docbook2html.found() command: [run_quiet, docbook2html, '--nochunks', '@INPUT0@', '--output', '@OUTDIR@'], build_by_default: true, install_dir: get_option('datadir') / 'doc' / 'fontconfig', - install: true) + install: true, + install_tag: 'docs') endif foreach script : tests diff --git a/fontconfig/fc-cache/fc-cache.sgml b/fontconfig/fc-cache/fc-cache.sgml index 6313020c97..87b5e41fab 100644 --- a/fontconfig/fc-cache/fc-cache.sgml +++ b/fontconfig/fc-cache/fc-cache.sgml @@ -218,8 +218,7 @@ manpage.1: manpage.sgml fc-scan(1) - The fontconfig user's guide, in HTML format: - /usr/share/doc/fontconfig/fontconfig-user.html. + The fontconfig user's guide diff --git a/fontconfig/fc-cache/meson.build b/fontconfig/fc-cache/meson.build index d1ee97685a..1d66ccf34c 100644 --- a/fontconfig/fc-cache/meson.build +++ b/fontconfig/fc-cache/meson.build @@ -4,12 +4,13 @@ fccache = executable('fc-cache', ['fc-cache.c', fcstdint_h, alias_headers, ft_al link_with: [libfontconfig], c_args: c_args, install: true, -) + install_tag: 'tools') tools_man_pages += ['fc-cache'] # Do not try to execute target's fc-cache on host when cross compiling if get_option('cache-build').enabled() and not meson.is_cross_build() meson.add_install_script(fccache, '-s', '-f', '-v', - skip_if_destdir: true) + skip_if_destdir: true, + install_tag: 'tools') endif diff --git a/fontconfig/fc-case/fc-case.py b/fontconfig/fc-case/fc-case.py index 360bd32983..be808b3064 100644 --- a/fontconfig/fc-case/fc-case.py +++ b/fontconfig/fc-case/fc-case.py @@ -28,29 +28,32 @@ import string import sys + class CaseFoldClass(Enum): COMMON = 1 FULL = 2 SIMPLE = 3 TURKIC = 4 + class CaseFoldMethod(Enum): RANGE = 0 EVEN_ODD = 1 FULL = 2 + caseFoldClassMap = { - 'C' : CaseFoldClass.COMMON, - 'F' : CaseFoldClass.FULL, - 'S' : CaseFoldClass.SIMPLE, - 'T' : CaseFoldClass.TURKIC + 'C': CaseFoldClass.COMMON, + 'F': CaseFoldClass.FULL, + 'S': CaseFoldClass.SIMPLE, + 'T': CaseFoldClass.TURKIC } folds = [] def ucs4_to_utf8(ucs4): utf8_rep = [] - + if ucs4 < 0x80: utf8_rep.append(ucs4) bits = -6 @@ -70,17 +73,19 @@ def ucs4_to_utf8(ucs4): utf8_rep.append(((ucs4 >> 30) & 0x01) | 0xFC) bits = 24 else: - return []; + return [] while bits >= 0: utf8_rep.append(((ucs4 >> bits) & 0x3F) | 0x80) - bits-= 6 + bits -= 6 return utf8_rep + def utf8_size(ucs4): return len(ucs4_to_utf8(ucs4)) + case_fold_method_name_map = { CaseFoldMethod.RANGE: 'FC_CASE_FOLD_RANGE,', CaseFoldMethod.EVEN_ODD: 'FC_CASE_FOLD_EVEN_ODD,', @@ -115,7 +120,8 @@ def utf8_size(ucs4): tokens = line.split('; ') if len(tokens) < 3: - print('Not enough tokens in line {}'.format(cnt), file=sys.stderr) + print('Not enough tokens in line {}'.format(cnt), + file=sys.stderr) sys.exit(1) # Get upper case value @@ -125,14 +131,14 @@ def utf8_size(ucs4): cfclass = caseFoldClassMap[tokens.pop(0)] # Get list of result characters - lower = list(map(lambda s: int(s,16), tokens.pop(0).split())) + lower = list(map(lambda s: int(s, 16), tokens.pop(0).split())) # print('\t----> {:04X} {} {}'.format(upper, cfclass, lower)) if not minFoldChar: minFoldChar = upper - maxFoldChar = upper; + maxFoldChar = upper if cfclass in [CaseFoldClass.COMMON, CaseFoldClass.FULL]: if len(lower) == 1: @@ -146,18 +152,18 @@ def utf8_size(ucs4): if foldExtends: # This modifies the last fold item in the array too - fold['count'] = upper - fold['upper'] + 1; + fold['count'] = upper - fold['upper'] + 1 else: fold = {} fold['upper'] = upper - fold['offset'] = lower[0] - upper; + fold['offset'] = lower[0] - upper if fold['offset'] == 1: fold['method'] = CaseFoldMethod.EVEN_ODD else: fold['method'] = CaseFoldMethod.RANGE fold['count'] = 1 folds.append(fold) - expand = utf8_size (lower[0]) - utf8_size(upper) + expand = utf8_size(lower[0]) - utf8_size(upper) else: fold = {} fold['upper'] = upper @@ -185,20 +191,14 @@ def utf8_size(ucs4): if args.output_file: sys.stdout = open(args.output_file, 'w', encoding='utf-8') - # Read the template file - if args.template_file: - tmpl_file = open(args.template_file, 'r', encoding='utf-8') - else: - tmpl_file = sys.stdin - - # Scan the input until the marker is found - # FIXME: this is a bit silly really, might just as well harcode - # the license header in the script and drop the template - for line in tmpl_file: - if line.strip() == '@@@': - break - print(line, end='') - + print('/*') + print(' * This file was generated against CaseFolding.txt from' + ' Unicode.org.') + print(' * All the data in array is a part of them and licensed' + ' under a terms of use:') + print(' * https://www.unicode.org/terms_of_use.html') + print(' */') + print('') # Dump these tables print('#define FC_NUM_CASE_FOLD\t{}'.format(len(folds))) print('#define FC_NUM_CASE_FOLD_CHARS\t{}'.format(len(foldChars))) @@ -211,14 +211,14 @@ def utf8_size(ucs4): # Dump out ranges print('static const FcCaseFold fcCaseFold[FC_NUM_CASE_FOLD] = {') for f in folds: - short_offset = f['offset'] - if short_offset < -32367: - short_offset += 65536 - if short_offset > 32368: - short_offset -= 65536 - print(' {} 0x{:08x}, {:22s} 0x{:04x}, {:6d} {},'.format('{', - f['upper'], case_fold_method_name_map[f['method']], - f['count'], short_offset, '}')) + short_offset = f['offset'] + if short_offset < -32367: + short_offset += 65536 + if short_offset > 32368: + short_offset -= 65536 + print(f' {{ 0x{f["upper"]:08x}, ' + f'{case_fold_method_name_map[f["method"]]:22s} ' + f'0x{f["count"]:04x}, {short_offset:6d} }},') print('};\n') # Dump out "other" values @@ -233,8 +233,4 @@ def utf8_size(ucs4): print('0x{:02x}'.format(c), end=end) print('\n};') - # And flush out the rest of the input file - for line in tmpl_file: - print(line, end='') - sys.stdout.flush() diff --git a/fontconfig/fc-cat/fc-cat.sgml b/fontconfig/fc-cat/fc-cat.sgml index d4817e3ab7..de7ff03ec2 100644 --- a/fontconfig/fc-cat/fc-cat.sgml +++ b/fontconfig/fc-cat/fc-cat.sgml @@ -161,8 +161,7 @@ manpage.1: manpage.sgml fc-scan(1) - The fontconfig user's guide, in HTML format: - /usr/share/doc/fontconfig/fontconfig-user.html. + The fontconfig user's guide diff --git a/fontconfig/fc-cat/meson.build b/fontconfig/fc-cat/meson.build index 7e87309990..dd7ee29e48 100644 --- a/fontconfig/fc-cat/meson.build +++ b/fontconfig/fc-cat/meson.build @@ -4,6 +4,6 @@ fccat = executable('fc-cat', ['fc-cat.c', fcstdint_h, alias_headers, ft_alias_he link_with: [libfontconfig], c_args: c_args, install: true, -) + install_tag: 'tools') tools_man_pages += ['fc-cat'] diff --git a/fontconfig/fc-conflist/meson.build b/fontconfig/fc-conflist/meson.build index 87ff4acd82..88208ff8c2 100644 --- a/fontconfig/fc-conflist/meson.build +++ b/fontconfig/fc-conflist/meson.build @@ -4,6 +4,6 @@ fcconflist = executable('fc-conflist', ['fc-conflist.c', fcstdint_h, alias_heade link_with: [libfontconfig], c_args: c_args, install: true, -) + install_tag: 'tools') tools_man_pages += ['fc-conflist'] diff --git a/fontconfig/fc-fontations-bindgen/Cargo.lock b/fontconfig/fc-fontations-bindgen/Cargo.lock new file mode 100644 index 0000000000..98cf25e725 --- /dev/null +++ b/fontconfig/fc-fontations-bindgen/Cargo.lock @@ -0,0 +1,7 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "fc-fontations-bindgen" +version = "0.1.0" diff --git a/fontconfig/fc-fontations-bindgen/Cargo.toml b/fontconfig/fc-fontations-bindgen/Cargo.toml new file mode 100644 index 0000000000..fafeefcfd2 --- /dev/null +++ b/fontconfig/fc-fontations-bindgen/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "fc-fontations-bindgen" +version = "0.1.0" +links = "fontconfig" + +[lib] +path = "../build/fc-fontations/fontconfig.rs" + +[dependencies] +libc = "0.2" diff --git a/fontconfig/fc-fontations-bindgen/build.rs b/fontconfig/fc-fontations-bindgen/build.rs new file mode 100644 index 0000000000..50b703d1e7 --- /dev/null +++ b/fontconfig/fc-fontations-bindgen/build.rs @@ -0,0 +1,34 @@ +use std::path::PathBuf; +use std::process::Command; + +fn main() { + // Directory for the fontconfig build + let build_dir = PathBuf::from("build"); + + // Configure and build fontconfig using meson + let mut meson = Command::new("meson"); + meson.current_dir("../"); + meson.arg("setup") + .arg(build_dir.to_str().unwrap()) + .arg("-Dfontations=enabled"); + + let status = meson.status().expect("Failed to execute meson"); + if !status.success() { + panic!("Meson setup failed"); + } + + let mut ninja = Command::new("ninja"); + ninja.current_dir("../"); + ninja.arg("-C").arg(build_dir.to_str().unwrap()); + let status = ninja.status().expect("Failed to execute ninja"); + if !status.success() { + panic!("Ninja build failed"); + } + + // Tell cargo to look for fontconfig in the build directory + println!("cargo:rustc-link-search=native={}", build_dir.join("lib").display()); + println!("cargo:rustc-link-lib=dylib=fontconfig"); + + // Rerun this build script if the fontconfig source code changes + println!("cargo:rerun-if-changed=src"); +} diff --git a/fontconfig/fc-fontations/meson.build b/fontconfig/fc-fontations/meson.build new file mode 100644 index 0000000000..fd060814e2 --- /dev/null +++ b/fontconfig/fc-fontations/meson.build @@ -0,0 +1,53 @@ +fontations = get_option('fontations') + +if (fontations.enabled()) + rust = import('rust') + + generated_fontconfig = rust.bindgen( + input : '../fontconfig/fontconfig.h', + output : 'fontconfig.rs', + include_directories : [ '../' ], + args : [ + '--merge-extern-blocks', + '--allowlist-item=(FcCharSet.*|FC_(SLANT|WEIGHT|WIDTH)_.*|FcFontSet(Add|Create|Destroy).*|FcLangSet(Destroy|Copy)|FcWeightFromOpenType.*)', + '--raw-line=#![allow(nonstandard_style,unused)]', + '--raw-line= ', + '--raw-line=pub mod fcint;', + ], + c_args : ['-DBINDGEN_IGNORE_VISIBILITY=1'], + ) + + generated_fcint = rust.bindgen( + input : '../src/fcint.h', + output : 'fcint.rs', + include_directories : [ '../' ], + args : [ + '--merge-extern-blocks', + '--allowlist-item=(FcPattern.*|FcRange.*|FC_.*_OBJECT|FcCharSet.*|FcFreeTypeLangSet)', + '--blocklist-type=(FcCharSet|FcLangSet)', + '--raw-line=#![allow(nonstandard_style,unused)]', + '--raw-line= ', + '--raw-line=pub use FcCharSet; pub use FcLangSet;', + ], + c_args : ['-DBINDGEN_IGNORE_VISIBILITY=1'], + ) + + bindgen_lib = static_library( + 'fc_fontations_bindgen', + sources: [generated_fontconfig, generated_fcint], + rust_abi : 'rust', + ) + + fc_fontations = static_library( + 'fc_fontations', + sources: ['mod.rs'], + link_with: [bindgen_lib, libfontconfig], + rust_abi: 'c', + dependencies: [ + dependency('libc-0.2-rs'), + + ], + + ) + +endif \ No newline at end of file diff --git a/fontconfig/fc-fontations/mod.rs b/fontconfig/fc-fontations/mod.rs new file mode 100644 index 0000000000..a39c814cfb --- /dev/null +++ b/fontconfig/fc-fontations/mod.rs @@ -0,0 +1,41 @@ +extern crate fc_fontations_bindgen; + +use fc_fontations_bindgen::{fcint::FcPatternCreate, FcFontSet, FcFontSetAdd}; + +#[no_mangle] +/// Externally called in fcfontations.c as the file scanner function +/// similar to the job that FreeType performs. +/// +/// # Safety +/// * At this point, the font file path is not dereferenced. +/// * In this initial sanity check mock call, only one empty pattern +/// is added to the FontSet, which is null checked, which is sound. +pub unsafe extern "C" fn add_patterns_to_fontset( + _: *const libc::c_char, + font_set: *mut FcFontSet, +) -> libc::c_int { + let empty_pattern = FcPatternCreate(); + if !font_set.is_null() { + FcFontSetAdd( + font_set, + empty_pattern as *mut fc_fontations_bindgen::FcPattern, + ) + } else { + 0 + } +} + +#[cfg(test)] +mod test { + use crate::add_patterns_to_fontset; + use fc_fontations_bindgen::{FcFontSetCreate, FcFontSetDestroy}; + + #[test] + fn basic_pattern_construction() { + unsafe { + let font_set = FcFontSetCreate(); + assert!(add_patterns_to_fontset(std::ptr::null(), font_set) == 1); + FcFontSetDestroy(font_set); + } + } +} diff --git a/fontconfig/fc-lang/Makefile.am b/fontconfig/fc-lang/Makefile.am index bd099b7979..4227fcee53 100644 --- a/fontconfig/fc-lang/Makefile.am +++ b/fontconfig/fc-lang/Makefile.am @@ -315,7 +315,9 @@ ORTH = \ unm.orth \ wae.orth \ yue.orth \ - yuw.orth + yuw.orth \ + got.orth \ + cop.orth # ^-------------- Add new orth files here BUILT_SOURCES += $(top_builddir)/conf.d/35-lang-normalize.conf diff --git a/fontconfig/fc-lang/cop.orth b/fontconfig/fc-lang/cop.orth new file mode 100644 index 0000000000..239402a682 --- /dev/null +++ b/fontconfig/fc-lang/cop.orth @@ -0,0 +1,9 @@ +# Copyright (C) 2024 fontconfig Authors +# SPDX-License-Identifier: HPND +# +# Coptic (cop) +# +# https://en.wikipedia.org/wiki/Coptic_script +03E2-03EF # Greek and Coptic +2C80-2CF3 # Coptic +2CF9-2CFF # Coptic diff --git a/fontconfig/fc-lang/got.orth b/fontconfig/fc-lang/got.orth new file mode 100644 index 0000000000..d3b3abe313 --- /dev/null +++ b/fontconfig/fc-lang/got.orth @@ -0,0 +1,29 @@ +# +# fontconfig/fc-lang/got.orth +# +# Copyright © 2024 Akira TAGOH +# +# Permission to use, copy, modify, distribute, and sell this software and its +# documentation for any purpose is hereby granted without fee, provided that +# the above copyright notice appear in all copies and that both that +# copyright notice and this permission notice appear in supporting +# documentation, and that the name of the author(s) not be used in +# advertising or publicity pertaining to distribution of the software without +# specific, written prior permission. The authors make no +# representations about the suitability of this software for any purpose. It +# is provided "as is" without express or implied warranty. +# +# THE AUTHOR(S) DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +# EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY SPECIAL, INDIRECT OR +# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. +# +# Gothic (got) +# +# See: +# https://en.wikipedia.org/wiki/Gothic_alphabet +# +10330-1034A diff --git a/fontconfig/fc-lang/meson.build b/fontconfig/fc-lang/meson.build index 42e9969e77..265ebdab5d 100644 --- a/fontconfig/fc-lang/meson.build +++ b/fontconfig/fc-lang/meson.build @@ -279,6 +279,8 @@ orth_files = [ 'wae.orth', 'yue.orth', 'yuw.orth', + 'got.orth', + 'cop.orth', ] fclang_h = custom_target('fclang.h', diff --git a/fontconfig/fc-list/fc-list.c b/fontconfig/fc-list/fc-list.c index 54796c857e..5cacd51d3b 100644 --- a/fontconfig/fc-list/fc-list.c +++ b/fontconfig/fc-list/fc-list.c @@ -113,6 +113,7 @@ main (int argc, char **argv) int brief = 0; int quiet = 0; const FcChar8 *format = NULL; + const FcChar8 *format_optarg = NULL; int nfont = 0; int i; FcObjectSet *os = 0; @@ -136,7 +137,7 @@ main (int argc, char **argv) brief = 1; break; case 'f': - format = (FcChar8 *) strdup (optarg); + format_optarg = format = (FcChar8 *) strdup (optarg); break; case 'q': quiet = 1; @@ -218,6 +219,10 @@ main (int argc, char **argv) nfont = fs->nfont; FcFontSetDestroy (fs); } + if (format_optarg) + { + free ((void *)format_optarg); + } FcFini (); diff --git a/fontconfig/fc-list/fc-list.sgml b/fontconfig/fc-list/fc-list.sgml index ceeae904ee..593f0acb52 100644 --- a/fontconfig/fc-list/fc-list.sgml +++ b/fontconfig/fc-list/fc-list.sgml @@ -196,8 +196,7 @@ manpage.1: manpage.sgml fc-scan(1) - The fontconfig user's guide, in HTML format: - /usr/share/doc/fontconfig/fontconfig-user.html. + The fontconfig user's guide diff --git a/fontconfig/fc-list/meson.build b/fontconfig/fc-list/meson.build index e76099212b..1be9df5d64 100644 --- a/fontconfig/fc-list/meson.build +++ b/fontconfig/fc-list/meson.build @@ -4,6 +4,6 @@ fclist = executable('fc-list', ['fc-list.c', fcstdint_h, alias_headers, ft_alias link_with: [libfontconfig], c_args: c_args, install: true, -) + install_tag: 'tools') tools_man_pages += ['fc-list'] diff --git a/fontconfig/fc-match/fc-match.sgml b/fontconfig/fc-match/fc-match.sgml index d64642ba06..faaf26d2b5 100644 --- a/fontconfig/fc-match/fc-match.sgml +++ b/fontconfig/fc-match/fc-match.sgml @@ -235,8 +235,7 @@ output is requested. fc-scan(1) - The fontconfig user's guide, in HTML format: - /usr/share/doc/fontconfig/fontconfig-user.html. + The fontconfig user's guide diff --git a/fontconfig/fc-match/meson.build b/fontconfig/fc-match/meson.build index 41367c4af4..1aa1b1b5da 100644 --- a/fontconfig/fc-match/meson.build +++ b/fontconfig/fc-match/meson.build @@ -4,6 +4,6 @@ fcmatch = executable('fc-match', ['fc-match.c', fcstdint_h, alias_headers, ft_al link_with: [libfontconfig], c_args: c_args, install: true, -) + install_tag: 'tools') tools_man_pages += ['fc-match'] diff --git a/fontconfig/fc-pattern/fc-pattern.sgml b/fontconfig/fc-pattern/fc-pattern.sgml index d616ddaf93..81d9928f09 100644 --- a/fontconfig/fc-pattern/fc-pattern.sgml +++ b/fontconfig/fc-pattern/fc-pattern.sgml @@ -174,8 +174,7 @@ pattern before being displayed. fc-scan(1) - The fontconfig user's guide, in HTML format: - /usr/share/doc/fontconfig/fontconfig-user.html. + The fontconfig user's guide diff --git a/fontconfig/fc-pattern/meson.build b/fontconfig/fc-pattern/meson.build index a2dac1639e..d9b048cbe0 100644 --- a/fontconfig/fc-pattern/meson.build +++ b/fontconfig/fc-pattern/meson.build @@ -4,6 +4,6 @@ fcpattern = executable('fc-pattern', ['fc-pattern.c', fcstdint_h, alias_headers, link_with: [libfontconfig], c_args: c_args, install: true, -) + install_tag: 'tools') tools_man_pages += ['fc-pattern'] diff --git a/fontconfig/fc-query/fc-query.c b/fontconfig/fc-query/fc-query.c index 74841b0e40..6e4774a190 100644 --- a/fontconfig/fc-query/fc-query.c +++ b/fontconfig/fc-query/fc-query.c @@ -193,6 +193,8 @@ main (int argc, char **argv) } FcFontSetDestroy (fs); + if (format) + free (format); FcFini (); return err; diff --git a/fontconfig/fc-query/fc-query.sgml b/fontconfig/fc-query/fc-query.sgml index 40fb821ca2..034c4aa639 100644 --- a/fontconfig/fc-query/fc-query.sgml +++ b/fontconfig/fc-query/fc-query.sgml @@ -174,8 +174,7 @@ manpage.1: manpage.sgml fc-pattern(1) - The fontconfig user's guide, in HTML format: - /usr/share/doc/fontconfig/fontconfig-user.html. + The fontconfig user's guide diff --git a/fontconfig/fc-query/meson.build b/fontconfig/fc-query/meson.build index 2e6e53854b..629bc71ec4 100644 --- a/fontconfig/fc-query/meson.build +++ b/fontconfig/fc-query/meson.build @@ -4,6 +4,6 @@ fcquery = executable('fc-query', ['fc-query.c', fcstdint_h, alias_headers, ft_al link_with: [libfontconfig], c_args: c_args, install: true, -) + install_tag: 'tools') tools_man_pages += ['fc-query'] diff --git a/fontconfig/fc-scan/fc-scan.c b/fontconfig/fc-scan/fc-scan.c index dca1cd5409..df9638bb2d 100644 --- a/fontconfig/fc-scan/fc-scan.c +++ b/fontconfig/fc-scan/fc-scan.c @@ -207,6 +207,8 @@ main (int argc, char **argv) } FcFontSetDestroy (fs); + if (format) + free (format); FcFini (); return i > 0 ? 0 : 1; diff --git a/fontconfig/fc-scan/fc-scan.sgml b/fontconfig/fc-scan/fc-scan.sgml index 97bd31c3d5..d893e75cea 100644 --- a/fontconfig/fc-scan/fc-scan.sgml +++ b/fontconfig/fc-scan/fc-scan.sgml @@ -147,8 +147,7 @@ manpage.1: manpage.sgml fc-pattern(1) - The fontconfig user's guide, in HTML format: - /usr/share/doc/fontconfig/fontconfig-user.html. + The fontconfig user's guide diff --git a/fontconfig/fc-scan/meson.build b/fontconfig/fc-scan/meson.build index 1f6a9ac994..9049578fe5 100644 --- a/fontconfig/fc-scan/meson.build +++ b/fontconfig/fc-scan/meson.build @@ -4,6 +4,6 @@ fcscan = executable('fc-scan', ['fc-scan.c', fcstdint_h, alias_headers, ft_alias link_with: [libfontconfig], c_args: c_args, install: true, -) + install_tag: 'tools') tools_man_pages += ['fc-scan'] diff --git a/fontconfig/fc-validate/meson.build b/fontconfig/fc-validate/meson.build index bcb8d147d4..fcabf29488 100644 --- a/fontconfig/fc-validate/meson.build +++ b/fontconfig/fc-validate/meson.build @@ -4,6 +4,6 @@ fcvalidate = executable('fc-validate', ['fc-validate.c', fcstdint_h, alias_heade link_with: [libfontconfig], c_args: c_args, install: true, -) + install_tag: 'tools') tools_man_pages += ['fc-validate'] diff --git a/fontconfig/fontconfig/fontconfig.h b/fontconfig/fontconfig/fontconfig.h index 47d7b830a3..a3a7c5f65d 100644 --- a/fontconfig/fontconfig/fontconfig.h +++ b/fontconfig/fontconfig/fontconfig.h @@ -32,8 +32,10 @@ #if defined(__GNUC__) && (__GNUC__ >= 4) #define FC_ATTRIBUTE_SENTINEL(x) __attribute__((__sentinel__(0))) +#define FC_ATTRIBUTE_MAY_ALIAS __attribute__((may_alias)) #else #define FC_ATTRIBUTE_SENTINEL(x) +#define FC_ATTRIBUTE_MAY_ALIAS #endif #ifndef FcPublic @@ -52,7 +54,7 @@ typedef int FcBool; */ #define FC_MAJOR 2 -#define FC_MINOR 15 +#define FC_MINOR 16 #define FC_REVISION 0 #define FC_VERSION ((FC_MAJOR * 10000) + (FC_MINOR * 100) + (FC_REVISION)) @@ -253,7 +255,7 @@ typedef enum _FcValueBinding { typedef struct _FcPattern FcPattern; -typedef struct _FcPatternIter { +typedef struct FC_ATTRIBUTE_MAY_ALIAS _FcPatternIter { void *dummy1; void *dummy2; } FcPatternIter; @@ -337,6 +339,9 @@ typedef struct _FcStrSet FcStrSet; typedef struct _FcCache FcCache; +typedef void (* FcDestroyFunc) (void *data); +typedef FcBool (* FcFilterFontSetFunc) (const FcPattern *font, void *user_data); + _FCFUNCPROTOBEGIN /* fcblanks.c */ @@ -457,6 +462,10 @@ FcPublic FcBool FcConfigAcceptFont (FcConfig *config, const FcPattern *font); +FcPublic FcBool +FcConfigAcceptFilter (FcConfig *config, + const FcPattern *font); + FcPublic FcBool FcConfigAppFontAddFile (FcConfig *config, const FcChar8 *file); @@ -486,6 +495,12 @@ FcPublic void FcConfigSetSysRoot (FcConfig *config, const FcChar8 *sysroot); +FcPublic FcConfig * +FcConfigSetFontSetFilter (FcConfig *config, + FcFilterFontSetFunc filter_func, + FcDestroyFunc destroy_data_func, + void *user_data); + FcPublic void FcConfigFileInfoIterInit (FcConfig *config, FcConfigFileInfoIter *iter); @@ -1147,6 +1162,7 @@ FcConfigParseAndLoadFromMemory (FcConfig *config, _FCFUNCPROTOEND #undef FC_ATTRIBUTE_SENTINEL +#undef FC_ATTRIBUTE_MAY_ALIAS #ifndef _FCINT_H_ diff --git a/fontconfig/its/meson.build b/fontconfig/its/meson.build index 4153b1a569..cb9f68ef84 100644 --- a/fontconfig/its/meson.build +++ b/fontconfig/its/meson.build @@ -3,4 +3,6 @@ gettext_files = [ 'fontconfig.loc', ] -install_data(gettext_files, install_dir: join_paths(get_option('datadir'), 'gettext/its')) +install_data(gettext_files, + install_dir: join_paths(get_option('datadir'), 'gettext/its'), + install_tag: 'devel') diff --git a/fontconfig/meson.build b/fontconfig/meson.build index 44ed8cb830..4439758d09 100644 --- a/fontconfig/meson.build +++ b/fontconfig/meson.build @@ -1,7 +1,7 @@ project('fontconfig', 'c', - version: '2.15.0', - meson_version : '>= 1.3.0', - default_options: [ 'buildtype=debugoptimized'], + version: '2.16.0', + meson_version : '>= 1.6.0', + default_options: [ 'buildtype=debugoptimized', ] ) fs = import('fs') @@ -73,8 +73,15 @@ if xmltype == '' xmltype = xml_dep.name() endif +fontations = get_option('fontations') +if (fontations.enabled()) + conf.set('ENABLE_FONTATIONS', 1) + add_languages(['rust'], native: false, required : true) +endif + pkgmod = import('pkgconfig') python3 = import('python').find_installation() +pytest = find_program('pytest', required: false) check_headers = [ ['dirent.h'], @@ -316,6 +323,8 @@ if default_fonts_dirs == ['yes'] fc_fonts_paths = ['WINDOWSFONTDIR', 'WINDOWSUSERFONTDIR'] elif host_machine.system() == 'darwin' fc_fonts_paths = ['/System/Library/Fonts', '/Library/Fonts', '~/Library/Fonts', '/System/Library/Assets/com_apple_MobileAsset_Font3', '/System/Library/Assets/com_apple_MobileAsset_Font4'] + elif host_machine.system() == 'android' + fc_fonts_paths = ['/system/fonts/', '/product/fonts/'] else fc_fonts_paths = ['/usr/share/fonts', '/usr/local/share/fonts'] endif @@ -482,6 +491,10 @@ subdir('fc-case') subdir('fc-lang') subdir('src') +if get_option('fontations').enabled() + subdir('fc-fontations') +endif + if not get_option('tools').disabled() subdir('fc-cache') subdir('fc-cat') @@ -517,11 +530,12 @@ configure_file(output: 'fonts.conf', input: 'fonts.conf.in', configuration: fonts_conf, install_dir: fc_baseconfigdir, - install: true) + install: true, + install_tag: 'runtime') install_data('fonts.dtd', - install_dir: join_paths(get_option('prefix'), get_option('datadir'), 'xml/fontconfig') - ) + install_dir: join_paths(get_option('prefix'), get_option('datadir'), 'xml/fontconfig'), + install_tag: 'runtime') fc_headers = [ 'fontconfig/fontconfig.h', @@ -531,6 +545,11 @@ fc_headers = [ install_headers(fc_headers, subdir: meson.project_name()) +if not meson.is_subproject() + meson.add_dist_script('build-aux/meson-dist-docs.py') + meson.add_dist_script('build-aux/meson-dist-autotools.py') +endif + # Summary doc_targets = get_variable('doc_targets', []) @@ -541,6 +560,7 @@ summary({ 'Tools': not get_option('tools').disabled(), 'iconv': found_iconv == 1, 'XML backend': xmltype, + 'Fontations support' : fontations }, section: 'General', bool_yn: true, list_sep: ', ') summary({ 'Hinting': preferred_hinting, diff --git a/fontconfig/meson_options.txt b/fontconfig/meson_options.txt index 49c6dad44c..d148621cf9 100644 --- a/fontconfig/meson_options.txt +++ b/fontconfig/meson_options.txt @@ -16,6 +16,8 @@ option('cache-build', type : 'feature', value : 'enabled', option('iconv', type: 'feature', value: 'disabled') option('xml-backend', type: 'combo', choices: ['auto', 'expat', 'libxml2'], value: 'auto', description: 'Select xml backend to read config') +option('fontations', type: 'feature', value: 'disabled', + description: 'Use Fontations (https://github.com/googlefonts/fontations) for indexing.') # Defaults option('default-hinting', type: 'combo', choices: ['none', 'slight', 'medium', 'full'], value: 'slight', diff --git a/fontconfig/src/fccache.c b/fontconfig/src/fccache.c index c0fed03543..3cc1a8dde7 100644 --- a/fontconfig/src/fccache.c +++ b/fontconfig/src/fccache.c @@ -259,8 +259,7 @@ FcDirCacheBasenameMD5 (FcConfig *config, const FcChar8 *dir, FcChar8 cache_base[ if (key) FcStrFree (key); - cache_base[0] = '/'; - hex_hash = cache_base + 1; + hex_hash = cache_base; for (cnt = 0; cnt < 16; ++cnt) { hex_hash[2*cnt ] = bin2hex[hash[cnt] >> 4]; @@ -652,7 +651,10 @@ FcCacheInsert (FcCache *cache, struct stat *cache_stat) s = malloc (sizeof (FcCacheSkip) + (level - 1) * sizeof (FcCacheSkip *)); if (!s) + { + unlock_cache (); return FcFalse; + } s->cache = cache; s->size = cache->size; diff --git a/fontconfig/src/fccfg.c b/fontconfig/src/fccfg.c index 0b6da8433f..9a0182a23d 100644 --- a/fontconfig/src/fccfg.c +++ b/fontconfig/src/fccfg.c @@ -25,6 +25,7 @@ /* Objects MT-safe for readonly access. */ #include "fcint.h" +#include "fontconfig/fontconfig.h" #ifdef HAVE_DIRENT_H #include #endif @@ -202,6 +203,10 @@ FcConfigCreate (void) if (!config->availConfigFiles) goto bail10; + config->filter_func = NULL; + config->filter_data = NULL; + config->destroy_data_func = NULL; + FcRefInit (&config->ref, 1); return config; @@ -331,9 +336,9 @@ FcConfigReference (FcConfig *config) unlock_config (); config = FcInitLoadConfigAndFonts (); + lock_config (); if (!config) goto retry; - lock_config (); if (!fc_atomic_ptr_cmpexch (&_fcConfig, NULL, config)) { FcConfigDestroy (config); @@ -390,6 +395,9 @@ FcConfigDestroy (FcConfig *config) if (config->sysRoot) FcStrFree (config->sysRoot); + if (config->filter_data && config->destroy_data_func) + config->destroy_data_func (config->filter_data); + free (config); } } @@ -453,10 +461,18 @@ FcConfigAddCache (FcConfig *config, FcCache *cache, continue; } + /* + * Check to see if font is banned by client + */ + if (!FcConfigAcceptFilter (config, font)) + { + free (relocated_font_file); + continue; + } if (relocated_font_file) { - font = FcPatternCacheRewriteFile (font, cache, relocated_font_file); - free (relocated_font_file); + font = FcPatternCacheRewriteFile (font, cache, relocated_font_file); + free (relocated_font_file); } if (FcFontSetAdd (config->fonts[set], font)) @@ -811,6 +827,63 @@ FcConfigSetFonts (FcConfig *config, config->fonts[set] = fonts; } +FcConfig * +FcConfigSetFontSetFilter (FcConfig *config, + FcFilterFontSetFunc filter_func, + FcDestroyFunc destroy_data_func, + void *user_data) +{ + FcBool rebuild = FcFalse; + + if (!config) + { + /* Do not use FcConfigEnsure() here for optimization */ + retry: + config = fc_atomic_ptr_get (&_fcConfig); + if (!config) + config = FcConfigCreate (); + else + rebuild = FcTrue; + } + else + rebuild = FcTrue; + if (config->filter_data == user_data && + config->filter_func == filter_func) + { + /* No need to update */ + rebuild = FcFalse; + } + else + { + if (config->filter_data && config->destroy_data_func) + { + config->destroy_data_func (config->filter_data); + } + config->filter_func = filter_func; + config->destroy_data_func = destroy_data_func; + config->filter_data = user_data; + } + + if (rebuild) + { + /* Rebuild FontSet */ + FcConfigBuildFonts (config); + } + else + { + /* Initialize FcConfig with regular procedure */ + config = FcInitLoadOwnConfigAndFonts (config); + + if (!config || !fc_atomic_ptr_cmpexch (&_fcConfig, NULL, config)) + { + if (config) + FcConfigDestroy (config); + goto retry; + } + } + + return config; +} FcBlanks * FcBlanksCreate (void) @@ -2918,12 +2991,38 @@ FcConfigGlobAdd (FcConfig *config, FcBool accept) { FcStrSet *set = accept ? config->acceptGlobs : config->rejectGlobs; - FcChar8 *realglob = FcStrCopyFilename(glob); - if (!realglob) - return FcFalse; + FcChar8 *realglob = FcStrCopyFilename(glob); + FcChar8 *cwd = FcStrCopyFilename((const FcChar8 *) "."); + const FcChar8 *s; + FcBool ret; + size_t len = 0; - FcBool ret = FcStrSetAdd (set, realglob); + /* + * FcStrCopyFilename canonicalize a path string and prepend + * current directory name if no path included in a string. + * This isn't a desired behavior here. + * So drop the extra path name if they have. Otherwise use it as it is. + */ + if (cwd == NULL) + s = glob; + else + { + len = strlen((const char *) cwd); + /* No need to use FC_DIR_SEPARATOR because '\\' will be + * replaced with / by FcConvertDosPath in FcStrCanonFilename + */ + if (strncmp((const char *) cwd, (const char *) realglob, len) == 0 && + realglob[len] == '/') + s = &realglob[len + 1]; + else + s = realglob; + } + if (!s) + return FcFalse; + + ret = FcStrSetAdd (set, s); FcStrFree(realglob); + FcStrFree(cwd); return ret; } @@ -2987,6 +3086,16 @@ FcConfigAcceptFont (FcConfig *config, return FcTrue; } +FcBool +FcConfigAcceptFilter (FcConfig *config, + const FcPattern *font) +{ + if (config && config->filter_func) + { + return config->filter_func (font, config->filter_data); + } + return FcTrue; +} const FcChar8 * FcConfigGetSysRoot (const FcConfig *config) { diff --git a/fontconfig/src/fcdefault.c b/fontconfig/src/fcdefault.c index d00c494e8e..87249f029b 100644 --- a/fontconfig/src/fcdefault.c +++ b/fontconfig/src/fcdefault.c @@ -338,7 +338,10 @@ FcDefaultSubstitute (FcPattern *pattern) else { FcPatternIterGetValue(pattern, &iter, 0, &v, NULL); - size = v.u.d; + if (v.type == FcTypeInteger) + size = (double)v.u.i; + else + size = v.u.d; size = size / dpi * 72.0 / scale; } (void) FcPatternObjectDel (pattern, FC_SIZE_OBJECT); diff --git a/fontconfig/src/fcdir.c b/fontconfig/src/fcdir.c index 2e4fdc69d4..cb2ff42f67 100644 --- a/fontconfig/src/fcdir.c +++ b/fontconfig/src/fcdir.c @@ -265,7 +265,8 @@ FcDirScanConfig (FcFontSet *set, /* * Sort files to make things prettier */ - qsort(files->strs, files->num, sizeof(FcChar8 *), cmpstringp); + if (files->num) + qsort(files->strs, files->num, sizeof(FcChar8 *), cmpstringp); /* * Scan file files to build font patterns diff --git a/fontconfig/src/fcfreetype.c b/fontconfig/src/fcfreetype.c index af014d10ec..3c3cab787b 100644 --- a/fontconfig/src/fcfreetype.c +++ b/fontconfig/src/fcfreetype.c @@ -32,6 +32,7 @@ #include FT_FREETYPE_H #include FT_ADVANCES_H #include FT_TRUETYPE_TABLES_H +#include FT_TRUETYPE_TAGS_H #include FT_SFNT_NAMES_H #include FT_TRUETYPE_IDS_H #include FT_TYPE1_TABLES_H @@ -47,36 +48,6 @@ #include "fcfoundry.h" #include "ftglue.h" -/* - * Keep Han languages separated by eliminating languages - * that the codePageRange bits says aren't supported - */ - -static const struct { - char bit; - const FcChar8 lang[6]; -} FcCodePageRange[] = { - { 17, "ja" }, - { 18, "zh-cn" }, - { 19, "ko" }, - { 20, "zh-tw" }, -}; - -#define NUM_CODE_PAGE_RANGE (int) (sizeof FcCodePageRange / sizeof FcCodePageRange[0]) - -FcBool -FcFreeTypeIsExclusiveLang (const FcChar8 *lang) -{ - int i; - - for (i = 0; i < NUM_CODE_PAGE_RANGE; i++) - { - if (FcLangCompare (lang, FcCodePageRange[i].lang) == FcLangEqual) - return FcTrue; - } - return FcFalse; -} - typedef struct { const FT_UShort platform_id; const FT_UShort encoding_id; @@ -1771,6 +1742,9 @@ FcFreeTypeQueryFaceInternal (const FT_Face face, { char psname[256]; const char *tmp; + + if (instance) + FT_Set_Named_Instance (face, id >> 16); tmp = FT_Get_Postscript_Name (face); if (!tmp) { @@ -1855,36 +1829,7 @@ FcFreeTypeQueryFaceInternal (const FT_Face face, if (os2 && os2->version >= 0x0001 && os2->version != 0xffff) { - unsigned int i; - for (i = 0; i < NUM_CODE_PAGE_RANGE; i++) - { - FT_ULong bits; - int bit; - if (FcCodePageRange[i].bit < 32) - { - bits = os2->ulCodePageRange1; - bit = FcCodePageRange[i].bit; - } - else - { - bits = os2->ulCodePageRange2; - bit = FcCodePageRange[i].bit - 32; - } - if (bits & (1U << bit)) - { - /* - * If the font advertises support for multiple - * "exclusive" languages, then include support - * for any language found to have coverage - */ - if (exclusiveLang) - { - exclusiveLang = 0; - break; - } - exclusiveLang = FcCodePageRange[i].lang; - } - } + exclusiveLang = FcLangIsExclusiveFromOs2(os2->ulCodePageRange1, os2->ulCodePageRange2); } if (os2 && os2->version != 0xffff) @@ -2618,15 +2563,12 @@ FcFreeTypeCharSet (FT_Face face, FcBlanks *blanks FC_UNUSED) #endif for (o = 0; o < NUM_DECODE; o++) { - FcChar32 page, off, ucs4; - FcCharLeaf *leaf; + FcChar32 ucs4; FT_UInt glyph; if (FT_Select_Charmap (face, fcFontEncodings[o]) != 0) continue; - page = ~0; - leaf = NULL; ucs4 = FT_Get_First_Char (face, &glyph); while (glyph != 0) { @@ -2643,18 +2585,7 @@ FcFreeTypeCharSet (FT_Face face, FcBlanks *blanks FC_UNUSED) } if (good) - { FcCharSetAddChar (fcs, ucs4); - if ((ucs4 >> 8) != page) - { - page = (ucs4 >> 8); - leaf = FcCharSetFindLeafCreate (fcs, ucs4); - if (!leaf) - goto bail; - } - off = ucs4 & 0xff; - leaf->map[off >> 5] |= (1U << (off & 0x1f)); - } ucs4 = FT_Get_Next_Char (face, ucs4, &glyph); } @@ -2708,10 +2639,8 @@ FcFreeTypeCharSetAndSpacing (FT_Face face, FcBlanks *blanks FC_UNUSED, int *spac } -#define TTAG_GPOS FT_MAKE_TAG( 'G', 'P', 'O', 'S' ) -#define TTAG_GSUB FT_MAKE_TAG( 'G', 'S', 'U', 'B' ) +/* Graphite Rules Table */ #define TTAG_SILF FT_MAKE_TAG( 'S', 'i', 'l', 'f') -#define TTAG_prep FT_MAKE_TAG( 'p', 'r', 'e', 'p' ) #define OTLAYOUT_HEAD "otlayout:" #define OTLAYOUT_HEAD_LEN 9 @@ -2762,7 +2691,7 @@ compareulong (const void *a, const void *b) } static FcBool -FindTable (FT_Face face, FT_ULong tabletag) +FindTable (FT_Face face, FT_ULong tabletag, FT_ULong *tablesize) { FT_Stream stream = face->stream; FT_Error error; @@ -2770,7 +2699,7 @@ FindTable (FT_Face face, FT_ULong tabletag) if (!stream) return FcFalse; - if (( error = ftglue_face_goto_table( face, tabletag, stream ) )) + if (( error = ftglue_face_goto_table( face, tabletag, stream, tablesize ) )) return FcFalse; return FcTrue; @@ -2788,7 +2717,7 @@ GetScriptTags(FT_Face face, FT_ULong tabletag, FT_ULong **stags) if (!stream) return 0; - if (( error = ftglue_face_goto_table( face, tabletag, stream ) )) + if (( error = ftglue_face_goto_table( face, tabletag, stream, NULL ) )) return 0; base_offset = ftglue_stream_pos ( stream ); @@ -2911,9 +2840,29 @@ FcFontCapabilities(FT_Face face) } static FcBool -FcFontHasHint (FT_Face face) +FcFontHasHint(FT_Face face) { - return FindTable (face, TTAG_prep); + FT_ULong size; + + /* For a workaround of gttools fix-nonhinting. + * See https://gitlab.freedesktop.org/fontconfig/fontconfig/-/issues/426 + */ + if (FcDebug() & FC_DBG_SCANV) + { + FT_ULong ret; + + fprintf(stderr, "*** Has hint:\n"); + fprintf(stderr, " fpgm table: %s\n", + FindTable(face, TTAG_fpgm, NULL) ? "True" : "False"); + fprintf(stderr, " cvt table: %s\n", + FindTable(face, TTAG_cvt, NULL) ? "True" : "False"); + fprintf(stderr, " prep table: %s\n", + FindTable(face, TTAG_prep, &ret) ? "True" : "False"); + fprintf(stderr, " prep size: %lu\n", ret); + } + return FindTable(face, TTAG_fpgm, NULL) || + FindTable(face, TTAG_cvt, NULL) || + (FindTable (face, TTAG_prep, &size) && size > 7); } diff --git a/fontconfig/src/fcftint.h b/fontconfig/src/fcftint.h index a317370e1f..7396a1f2cf 100644 --- a/fontconfig/src/fcftint.h +++ b/fontconfig/src/fcftint.h @@ -36,9 +36,6 @@ #endif /* fcfreetype.c */ -FcPrivate FcBool -FcFreeTypeIsExclusiveLang (const FcChar8 *lang); - FcPrivate FcBool FcFreeTypeHasLang (FcPattern *pattern, const FcChar8 *lang); diff --git a/fontconfig/src/fcinit.c b/fontconfig/src/fcinit.c index 00ae205f45..1b099c6d39 100644 --- a/fontconfig/src/fcinit.c +++ b/fontconfig/src/fcinit.c @@ -66,6 +66,26 @@ FcInitFallbackConfig (const FcChar8 *sysroot) return 0; } +static FcConfig * +FcInitFallbackConfigWithFilter (FcConfig *config, const FcChar8 *sysroot) +{ + FcConfig *fallback = FcInitFallbackConfig (sysroot); + + /* Copy filter data */ + fallback->filter_func = config->filter_func; + fallback->filter_data = config->filter_data; + fallback->destroy_data_func = config->destroy_data_func; + config->filter_func = NULL; + config->filter_data = NULL; + config->destroy_data_func = NULL; + /* Rebuild fontset */ + FcConfigBuildFonts (fallback); + + FcConfigDestroy (config); + + return fallback; +} + int FcGetVersion (void) { @@ -90,9 +110,7 @@ FcInitLoadOwnConfig (FcConfig *config) if (!FcConfigParseAndLoad (config, 0, FcTrue)) { const FcChar8 *sysroot = FcConfigGetSysRoot (config); - FcConfig *fallback = FcInitFallbackConfig (sysroot); - - FcConfigDestroy (config); + FcConfig *fallback = FcInitFallbackConfigWithFilter (config, sysroot); return fallback; } @@ -145,8 +163,7 @@ FcInitLoadOwnConfig (FcConfig *config) "Fontconfig error: out of memory"); if (prefix) FcStrFree (prefix); - fallback = FcInitFallbackConfig (sysroot); - FcConfigDestroy (config); + fallback = FcInitFallbackConfigWithFilter (config, sysroot); return fallback; } diff --git a/fontconfig/src/fcint.h b/fontconfig/src/fcint.h index 86676b3e57..db3fc3e2f2 100644 --- a/fontconfig/src/fcint.h +++ b/fontconfig/src/fcint.h @@ -328,8 +328,6 @@ typedef struct _FcEdit { FcValueBinding binding; } FcEdit; -typedef void (* FcDestroyFunc) (void *data); - typedef struct _FcPtrList FcPtrList; /* need to sync with FcConfigFileInfoIter at fontconfig.h */ typedef struct _FcPtrListIter { @@ -580,6 +578,10 @@ struct _FcConfig { FcChar8 *sysRoot; /* override the system root directory */ FcStrSet *availConfigFiles; /* config files available */ FcPtrList *rulesetList; /* List of rulesets being installed */ + + FcFilterFontSetFunc filter_func; /* A predicate function to filter out config->fonts */ + FcDestroyFunc destroy_data_func; /* A callback function to destroy config->filter_data */ + void *filter_data; /* An user data to be used for filter_func */ }; typedef struct _FcFileTime { @@ -797,6 +799,12 @@ FcCharSetPromote (FcValuePromotionBuffer *vbuf); FcPrivate void FcLangCharSetPopulate (void); +FcPrivate FcBool +FcLangIsExclusive (const FcChar8 *lang); + +FcPrivate const FcChar8* +FcLangIsExclusiveFromOs2 (unsigned long os2ulUnicodeRange1, unsigned long os2ulUnicodeRange2); + FcPrivate FcCharSetFreezer * FcCharSetFreezerCreate (void); diff --git a/fontconfig/src/fclang.c b/fontconfig/src/fclang.c index bca343db38..f01d037be1 100644 --- a/fontconfig/src/fclang.c +++ b/fontconfig/src/fclang.c @@ -39,6 +39,23 @@ typedef struct { #include "fclang.h" +/* + * Keep Han languages separated by eliminating languages + * that the codePageRange bits says aren't supported + */ + +static const struct { + char bit; + const FcChar8 lang[6]; +} FcCodePageRange[] = { + { 17, "ja" }, + { 18, "zh-cn" }, + { 19, "ko" }, + { 20, "zh-tw" }, +}; + +#define NUM_CODE_PAGE_RANGE (int) (sizeof FcCodePageRange / sizeof FcCodePageRange[0]) + struct _FcLangSet { FcStrSet *extra; FcChar32 map_size; @@ -125,7 +142,7 @@ FcFreeTypeLangSet (const FcCharSet *charset, * not support other Han languages */ if (exclusiveCharset && - FcFreeTypeIsExclusiveLang (fcLangCharSets[i].lang)) + FcLangIsExclusive (fcLangCharSets[i].lang)) { if (fcLangCharSets[i].charset.num != exclusiveCharset->num) continue; @@ -1104,6 +1121,58 @@ FcLangSetSubtract (const FcLangSet *a, const FcLangSet *b) return FcLangSetOperate(a, b, FcLangSetDel); } +FcBool +FcLangIsExclusive (const FcChar8 *lang) +{ + int i; + + for (i = 0; i < NUM_CODE_PAGE_RANGE; i++) + { + if (FcLangCompare (lang, FcCodePageRange[i].lang) == FcLangEqual) + return FcTrue; + } + return FcFalse; +} + +const FcChar8 * +FcLangIsExclusiveFromOs2(unsigned long os2ulUnicodeRange1, unsigned long os2ulUnicodeRange2) +{ + unsigned int i; + const FcChar8* exclusiveLang = 0; + + for (i = 0; i < NUM_CODE_PAGE_RANGE; i++) + { + unsigned long bits; + int bit; + if (FcCodePageRange[i].bit < 32) + { + bits = os2ulUnicodeRange1; + bit = FcCodePageRange[i].bit; + } + else + { + bits = os2ulUnicodeRange2; + bit = FcCodePageRange[i].bit - 32; + } + if (bits & (1U << bit)) + { + /* + * If the font advertises support for multiple + * "exclusive" languages, then include support + * for any language found to have coverage + */ + if (exclusiveLang) + { + exclusiveLang = 0; + break; + } + exclusiveLang = FcCodePageRange[i].lang; + } + } + return exclusiveLang; +} + + #define __fclang__ #include "fcaliastail.h" #include "fcftaliastail.h" diff --git a/fontconfig/src/fcmatch.c b/fontconfig/src/fcmatch.c index 27074d4f7d..521a8f2257 100644 --- a/fontconfig/src/fcmatch.c +++ b/fontconfig/src/fcmatch.c @@ -94,7 +94,7 @@ FcComparePostScript (const FcValue *v1, const FcValue *v2, FcValue *bestValue) *v1_string != ' ' && *v2_string != ' ') return 1.0; - n = FcStrMatchIgnoreCaseAndDelims (v1_string, v2_string, (const FcChar8 *)" -"); + n = FcStrMatchIgnoreCaseAndDelims (v1_string, v2_string, (const FcChar8 *)" -,"); len1 = strlen ((const char *)v1_string); len2 = strlen ((const char *)v2_string); mlen = FC_MAX (len1, len2); diff --git a/fontconfig/src/fcname.c b/fontconfig/src/fcname.c index 566f0efb8a..b152bd9e47 100644 --- a/fontconfig/src/fcname.c +++ b/fontconfig/src/fcname.c @@ -406,6 +406,8 @@ FcNameConvert (FcType type, const char *object, FcChar8 *string) v.u.r = FcRangeCreateDouble (b, e); break; default: + /* No valid type to convert */ + v.type = FcTypeVoid; break; } return v; diff --git a/fontconfig/src/fcserialize.c b/fontconfig/src/fcserialize.c index 2388dcd8a6..18c969305b 100644 --- a/fontconfig/src/fcserialize.c +++ b/fontconfig/src/fcserialize.c @@ -163,7 +163,7 @@ FcSerializeResize (FcSerialize *serialize, size_t new_count) size_t old_used = serialize->buckets_used; size_t old_count = serialize->buckets_count; FcSerializeBucket *old_buckets = serialize->buckets; - FcSerializeBucket *old_buckets_end = old_buckets + old_count; + FcSerializeBucket *old_buckets_end = old_buckets ? old_buckets + old_count : NULL; FcSerializeBucket *new_buckets = malloc (new_count * sizeof (*old_buckets)); if (!new_buckets) diff --git a/fontconfig/src/fcxml.c b/fontconfig/src/fcxml.c index 9fe06748db..083fc1f26b 100644 --- a/fontconfig/src/fcxml.c +++ b/fontconfig/src/fcxml.c @@ -1327,11 +1327,9 @@ _get_real_paths_from_prefix(FcConfigParse *parse, const FcChar8 *path, const FcC if (!p) return NULL; parent = FcStrDirname (p); + FcStrFree (p); if (!parent) - { - free (p); return NULL; - } } } #ifndef _WIN32 diff --git a/fontconfig/src/ftglue.c b/fontconfig/src/ftglue.c index 7490a8c0b1..f6ad79cea1 100644 --- a/fontconfig/src/ftglue.c +++ b/fontconfig/src/ftglue.c @@ -166,7 +166,8 @@ ftglue_stream_frame_exit( FT_Stream stream ) FTGLUE_APIDEF( FT_Error ) ftglue_face_goto_table( FT_Face face, FT_ULong the_tag, - FT_Stream stream ) + FT_Stream stream, + FT_ULong *table_size ) { FT_Error error; @@ -238,6 +239,8 @@ ftglue_face_goto_table( FT_Face face, { LOG(( "TrueType table (start: %ld) (size: %ld)\n", start, size )); error = ftglue_stream_seek( stream, start ); + if (table_size) + *table_size = size; goto FoundIt; } } diff --git a/fontconfig/src/ftglue.h b/fontconfig/src/ftglue.h index 650ee283ac..53992498f3 100644 --- a/fontconfig/src/ftglue.h +++ b/fontconfig/src/ftglue.h @@ -104,7 +104,8 @@ ftglue_stream_frame_exit( FT_Stream stream ); FTGLUE_API( FT_Error ) ftglue_face_goto_table( FT_Face face, FT_ULong tag, - FT_Stream stream ); + FT_Stream stream, + FT_ULong *table_size ); FT_END_HEADER diff --git a/fontconfig/src/meson.build b/fontconfig/src/meson.build index 6538ce4715..3c2950fd0a 100644 --- a/fontconfig/src/meson.build +++ b/fontconfig/src/meson.build @@ -52,7 +52,7 @@ if host_machine.system() == 'windows' and get_option('default_library') in ['bot endif libfontconfig = library('fontconfig', - fc_sources, alias_headers, ft_alias_headers, fclang_h, fccase_h, fcobjshash_h, + fc_sources, alias_headers, ft_alias_headers, fclang_h, fccase_h, fcobjshash_h, fcstdint_h, c_shared_args: fc_c_shared_args, include_directories: incbase, dependencies: [deps, math_dep], diff --git a/fontconfig/test/Makefile.am b/fontconfig/test/Makefile.am index 3b79c783b8..3b4e3e8403 100644 --- a/fontconfig/test/Makefile.am +++ b/fontconfig/test/Makefile.am @@ -49,6 +49,7 @@ TESTDATA = \ test-70-no-bitmaps-and-emoji.json \ test-70-no-bitmaps-except-emoji.json \ test-90-synthetic.json \ + test-filter.json \ test-issue-286.json \ test-style-match.json \ $(NULL) @@ -176,6 +177,9 @@ check_PROGRAMS += test-family-matching test_family_matching_LDADD = $(top_builddir)/src/libfontconfig.la TESTS += test-family-matching +check_PROGRAMS += test-filter +test_filter_LDADD = $(top_builddir)/src/libfontconfig.la + EXTRA_DIST=run-test.sh run-test-conf.sh wrapper-script.sh $(TESTDATA) out.expected-long-family-names out.expected-no-long-family-names CLEANFILES = \ diff --git a/fontconfig/test/meson.build b/fontconfig/test/meson.build index d547591539..32d77146ad 100644 --- a/fontconfig/test/meson.build +++ b/fontconfig/test/meson.build @@ -26,6 +26,7 @@ if host_machine.system() != 'windows' endif endif + foreach test_data : tests fname = test_data[0] opts = test_data.length() > 1 ? test_data[1] : {} @@ -43,6 +44,11 @@ foreach test_data : tests test(test_name, exe, timeout: 600) endforeach +if get_option('fontations').enabled() + rust = import('rust') + rust.test('fc_fontations_tests', fc_fontations) +endif + fs = import('fs') if host_machine.system() != 'windows' @@ -55,6 +61,13 @@ if host_machine.system() != 'windows' endif test('run_test_sh', find_program('run-test.sh'), timeout: 600, env: ['srcdir=@0@'.format(meson.current_source_dir()), 'builddir=@0@'.format(meson.current_build_dir()), 'EXEEXT=@0@'.format(conf.get('EXEEXT')), 'VERBOSE=1']) + + if pytest.found() + test('pytest', pytest, args: ['--tap'], + workdir: meson.current_source_dir(), + env: ['builddir=@0@'.format(meson.current_build_dir())], + protocol: 'tap') + endif endif if jsonc_dep.found() diff --git a/fontconfig/test/run-test-conf.sh b/fontconfig/test/run-test-conf.sh index cc41185844..88407cdaf6 100644 --- a/fontconfig/test/run-test-conf.sh +++ b/fontconfig/test/run-test-conf.sh @@ -53,6 +53,7 @@ done for i in \ test-issue-286.json \ test-style-match.json \ + test-filter.json \ ; do echo $RUNNER $TESTDIR/$i ... $RUNNER $TESTDIR/../conf.d/10-autohint.conf $TESTDIR/$i diff --git a/fontconfig/test/test-conf.c b/fontconfig/test/test-conf.c index 8b298ef629..b5f702bcdd 100644 --- a/fontconfig/test/test-conf.c +++ b/fontconfig/test/test-conf.c @@ -278,7 +278,7 @@ build_pattern (json_object *obj) } static FcFontSet * -build_fs (FcConfig *config, json_object *obj) +build_fs (FcConfig *config, json_object *obj, FcBool filter) { FcFontSet *fs = FcFontSetCreate (); int i, n; @@ -292,7 +292,8 @@ build_fs (FcConfig *config, json_object *obj) if (json_object_get_type (o) != json_type_object) continue; pat = build_pattern (o); - if (FcConfigAcceptFont (config, pat)) + if (FcConfigAcceptFont (config, pat) && + (!filter || FcConfigAcceptFilter (config, pat))) FcFontSetAdd (fs, pat); else FcPatternDestroy(pat); @@ -301,19 +302,71 @@ build_fs (FcConfig *config, json_object *obj) return fs; } +static FcBool +filter_func (const FcPattern *f, void *user_data) +{ + FcPattern *filter = (FcPattern *)user_data; + FcPatternIter iter; + FcBool ret = FcTrue; + + FcPatternIterStart (filter, &iter); + if (!(ret = FcPatternIterIsValid (filter, &iter))) + goto bail; + do + { + const char *obj = FcPatternIterGetObject (filter, &iter); + int i, n = FcPatternIterValueCount(filter, &iter); + + for (i = 0; i < n; i++) + { + FcValue v, v2; + FcValueBinding b; + + if (FcPatternIterGetValue (filter, &iter, i, &v, &b) != FcResultMatch) + { + ret = FcFalse; + goto bail; + } + if (FcPatternGet (f, obj, 0, &v2) != FcResultMatch) + { + ret = FcFalse; + goto bail; + } + if (!FcValueEqual (v, v2)) + { + ret = FcFalse; + goto bail; + } + } + } while (FcPatternIterNext (filter, &iter)); +bail: + return ret; +} + static FcBool build_fonts (FcConfig *config, json_object *root) { - json_object *fonts; + json_object *fonts, *filter; FcFontSet *fs; + FcPattern *filterpat; + if (json_object_object_get_ex (root, "filter", &filter)) + { + if (json_object_get_type (filter) != json_type_object) + { + fprintf (stderr, "W: Invalid filter defined\n"); + return FcFalse; + } + filterpat = build_pattern (filter); + FcConfigSetFontSetFilter(config, filter_func, (FcDestroyFunc)FcPatternDestroy, filterpat); + } if (!json_object_object_get_ex (root, "fonts", &fonts) || json_object_get_type (fonts) != json_type_array) { fprintf (stderr, "W: No fonts defined\n"); return FcFalse; } - fs = build_fs (config, fonts); + fs = build_fs (config, fonts, FcTrue); /* FcConfigSetFonts (config, fs, FcSetSystem); */ if (config->fonts[FcSetSystem]) FcFontSetDestroy (config->fonts[FcSetSystem]); @@ -341,6 +394,7 @@ run_test (FcConfig *config, json_object *root) json_object_iter iter; FcPattern *query = NULL; FcPattern *result = NULL; + FcPattern *filterpat = NULL; FcFontSet *result_fs = NULL; const char *method = NULL; @@ -388,7 +442,7 @@ run_test (FcConfig *config, json_object *root) } if (result_fs) FcFontSetDestroy (result_fs); - result_fs = build_fs (config, iter.val); + result_fs = build_fs (config, iter.val, FcFalse); } else if (strcmp (iter.key, "$comment") == 0) { diff --git a/fontconfig/test/test-filter.c b/fontconfig/test/test-filter.c new file mode 100644 index 0000000000..9eebdf040c --- /dev/null +++ b/fontconfig/test/test-filter.c @@ -0,0 +1,59 @@ +#include +#include +#include + +static FcBool +filter (const FcPattern *f, void *user_data) +{ + FcChar8 *s = NULL; + + if (FcPatternGetString (f, FC_FONT_WRAPPER, 0, &s) == FcResultMatch) + { + /* accept "SFNT" only */ + if (FcStrCmp (s, (FcChar8 *)"SFNT") == 0) + return FcTrue; + } + return FcFalse; +} + +int +main (void) +{ + FcPattern *p; + FcObjectSet *os; + FcFontSet *fs; + int i, ret = 0; + FcChar8 *s = NULL, *f; + + FcConfigSetFontSetFilter(NULL, filter, NULL, NULL); + p = FcPatternCreate (); + os = FcObjectSetBuild (FC_FAMILY, FC_STYLE, FC_FILE, FC_FONT_WRAPPER, NULL); + fs = FcFontList (NULL, p, os); + FcObjectSetDestroy (os); + FcPatternDestroy (p); + + printf ("%d matched\n", fs->nfont); + for (i = 0; i < fs->nfont; i++) + { + if (FcPatternGetString (fs->fonts[i], FC_FONT_WRAPPER, 0, &s) == FcResultMatch) + { + f = FcPatternFormat (fs->fonts[i], (FcChar8 *)"%{=fclist}\n"); + printf ("%s", f); + FcStrFree (f); + if (FcStrCmp (s, (FcChar8 *)"SFNT") != 0) + { + printf ("failed:\n"); + fail: + ret = 1; + } + } + else + { + printf ("no font wrapper\n"); + goto fail; + } + } + FcFontSetDestroy (fs); + + return ret; +} diff --git a/fontconfig/test/test-filter.json b/fontconfig/test/test-filter.json new file mode 100644 index 0000000000..25bce57c0b --- /dev/null +++ b/fontconfig/test/test-filter.json @@ -0,0 +1,65 @@ +{ + "fonts": [ + { + "family": [ + "Foo" + ], + "style": [ + "Regular" + ], + "file": "/path/to/Foo.ttf", + "fontwrapper": "SFNT" + }, + { + "family": [ + "Bar" + ], + "style": [ + "Regular" + ], + "file": "/path/to/Bar.otf", + "fontwrapper": "CFF" + }, + { + "family": [ + "Baz" + ], + "style": [ + "Regular" + ], + "file": "/path/to/Baz.woff", + "fontwrapper": "WOFF" + }, + { + "family": [ + "Blah" + ], + "style": [ + "Regular" + ], + "file": "/path/to/Baz.bdf" + } + ], + "filter": { + "fontwrapper": "SFNT" + }, + "tests": [ + { + "method": "list", + "query": { + }, + "result_fs": [ + { + "family": [ + "Foo" + ], + "style": [ + "Regular" + ], + "file": "/path/to/Foo.ttf", + "fontwrapper": "SFNT" + } + ] + } + ] +} diff --git a/fontconfig/test/test_issue431.py b/fontconfig/test/test_issue431.py new file mode 100644 index 0000000000..55ca2b4f6c --- /dev/null +++ b/fontconfig/test/test_issue431.py @@ -0,0 +1,27 @@ +#! /usr/bin/env python3 +# Copyright (C) 2024 fontconfig Authors +# SPDX-License-Identifier: HPND + +import os +import pytest +import re +import requests +import shutil +import subprocess +from pathlib import Path + + +def test_issue431(tmp_path): + req = requests.get('https://github.com/googlefonts/roboto-flex/releases/download/3.100/roboto-flex-fonts.zip', + allow_redirects=True) + with open(tmp_path / 'roboto-flex-fonts.zip', 'wb') as f: + f.write(req.content) + shutil.unpack_archive(tmp_path / 'roboto-flex-fonts.zip', tmp_path) + builddir = Path(os.environ.get('builddir', Path(__file__).parent)).parent + result = subprocess.run([builddir / 'fc-query' / 'fc-query', '-f', '%{family[0]}:%{index}:%{style[0]}:%{postscriptname}\n', tmp_path / 'roboto-flex-fonts/fonts/variable/RobotoFlex[GRAD,XOPQ,XTRA,YOPQ,YTAS,YTDE,YTFI,YTLC,YTUC,opsz,slnt,wdth,wght].ttf'], stdout=subprocess.PIPE) + + for line in result.stdout.decode('utf-8').splitlines(): + family, index, style, psname = line.split(':') + normstyle = re.sub('[\x04\\(\\)/<>\\[\\]{}\t\f\r\n ]', '', style) + assert psname.split('-')[-1] == normstyle, \ + f'postscriptname `{psname}\' does not contain style name `{normstyle}\': index {index}' diff --git a/gl/internal/dri_interface.h b/gl/internal/dri_interface.h index f171d5084a..a4d576ecda 100644 --- a/gl/internal/dri_interface.h +++ b/gl/internal/dri_interface.h @@ -442,7 +442,6 @@ struct mesa_glinterop_device_info; struct mesa_glinterop_export_in; struct mesa_glinterop_export_out; struct mesa_glinterop_flush_out; -typedef struct __GLsync *GLsync; struct __DRI2interopExtensionRec { __DRIextension base; @@ -968,7 +967,7 @@ struct __DRIswrastExtensionRec { int (*queryBufferAge)(__DRIdrawable *drawable); /** - * createNewScreen() with the driver extensions passed in and implicit load flag. + * createNewScreen() with the driver extensions passed in and driver_name_is_inferred load flag. * * \since version 6 */ @@ -976,7 +975,7 @@ struct __DRIswrastExtensionRec { const __DRIextension **loader_extensions, const __DRIextension **driver_extensions, const __DRIconfig ***driver_configs, - bool implicit, + bool driver_name_is_inferred, void *loaderPrivate); }; @@ -995,7 +994,7 @@ typedef __DRIscreen * const __DRIextension **extensions, const __DRIextension **driver_extensions, const __DRIconfig ***driver_configs, - bool implicit, + bool driver_name_is_inferred, void *loaderPrivate); typedef __DRIdrawable * @@ -1253,7 +1252,7 @@ struct __DRIdri2ExtensionRec { __DRIcreateNewScreen2Func createNewScreen2; /** - * createNewScreen with the driver's extension list passed in and implicit load flag. + * createNewScreen with the driver's extension list passed in and driver_name_is_inferred load flag. * * \since version 5 */ @@ -1266,7 +1265,7 @@ struct __DRIdri2ExtensionRec { * extensions. */ #define __DRI_IMAGE "DRI_IMAGE" -#define __DRI_IMAGE_VERSION 20 +#define __DRI_IMAGE_VERSION 22 /* __DRI_IMAGE_FORMAT_* tokens are no longer exported */ @@ -1346,6 +1345,7 @@ struct __DRIdri2ExtensionRec { #define __DRI_IMAGE_ATTRIB_OFFSET 0x200A /* available in versions 13 */ #define __DRI_IMAGE_ATTRIB_MODIFIER_LOWER 0x200B /* available in versions 14 */ #define __DRI_IMAGE_ATTRIB_MODIFIER_UPPER 0x200C /* available in versions 14 */ +#define __DRI_IMAGE_ATTRIB_COMPRESSION_RATE 0x200D /* available in versions 22 */ enum __DRIYUVColorSpace { __DRI_YUV_COLOR_SPACE_UNDEFINED = 0, @@ -1366,6 +1366,24 @@ enum __DRIChromaSiting { __DRI_YUV_CHROMA_SITING_0_5 = 0x3285 }; +enum __DRIFixedRateCompression { + __DRI_FIXED_RATE_COMPRESSION_NONE = 0x34B1, + __DRI_FIXED_RATE_COMPRESSION_DEFAULT = 0x34B2, + + __DRI_FIXED_RATE_COMPRESSION_1BPC = 0x34B4, + __DRI_FIXED_RATE_COMPRESSION_2BPC = 0x34B5, + __DRI_FIXED_RATE_COMPRESSION_3BPC = 0x34B6, + __DRI_FIXED_RATE_COMPRESSION_4BPC = 0x34B7, + __DRI_FIXED_RATE_COMPRESSION_5BPC = 0x34B8, + __DRI_FIXED_RATE_COMPRESSION_6BPC = 0x34B9, + __DRI_FIXED_RATE_COMPRESSION_7BPC = 0x34BA, + __DRI_FIXED_RATE_COMPRESSION_8BPC = 0x34BB, + __DRI_FIXED_RATE_COMPRESSION_9BPC = 0x34BC, + __DRI_FIXED_RATE_COMPRESSION_10BPC = 0x34BD, + __DRI_FIXED_RATE_COMPRESSION_11BPC = 0x34BE, + __DRI_FIXED_RATE_COMPRESSION_12BPC = 0x34BF, +}; + /** * \name Reasons that __DRIimageExtensionRec::createImageFromTexture or * __DRIimageExtensionRec::createImageFromDmaBufs might fail @@ -1755,6 +1773,51 @@ struct __DRIimageExtensionRec { * \since 21 */ void (*setInFenceFd)(__DRIimage *image, int fd); + + /* + * Query supported compression rates for a given format for + * EGL_EXT_surface_compression. + * + * \param config Config for which to query the supported compression + * rates. + * \param max Maximum number of rates that can be accomodated into + * \param rates. If zero, no rates are returned - + * instead, the driver returns the total number of + * supported compression rates in \param count. + * \param rates Buffer to fill rates into. + * \param count Count of rates returned, or, total number of + * supported rates in case \param max is zero. + * + * Returns true on success. + * + * \since 22 + */ + bool (*queryCompressionRates)(__DRIscreen *screen, const __DRIconfig *config, + int max, enum __DRIFixedRateCompression *rates, + int *count); + + /* + * Query list of modifiers that are associated with given fixed-rate + * compression bitrate. + * + * \param format The format to query + * \param rate Compression rate to query for + * \param max Maximum number of modifiers that can be accomodated in + * \param modifiers. If zero, no modifiers are returned - + * instead, the driver returns the total number of + * modifiers for \param format in \param count. + * \param modifiers Buffer to fill modifiers into. + * \param count Count of the modifiers returned, or, total number of + * supported modifiers for \param fourcc in case + * \param max is zero. + * + * Returns true on success. + * + * \since 22 + */ + bool (*queryCompressionModifiers)(__DRIscreen *screen, uint32_t format, + enum __DRIFixedRateCompression rate, + int max, uint64_t *modifiers, int *count); }; diff --git a/include/xcb/xcb.h b/include/xcb/xcb.h index d5a1697fa2..bf2ba2f8f5 100644 --- a/include/xcb/xcb.h +++ b/include/xcb/xcb.h @@ -29,11 +29,7 @@ #define __XCB_H__ #include -#if defined(__solaris__) -#include -#else #include -#endif #ifndef _WIN32 #include @@ -52,12 +48,36 @@ extern "C" { * @file xcb.h */ -#ifdef __GNUC__ +#ifndef __has_attribute +# define __has_attribute(x) 0 /* Compatibility with older compilers. */ +#endif + +/* + * For the below checks, we currently assume that __GNUC__ indicates + * gcc 3.0 (released 2001) or later, as we require support for C99. + */ + +/* Supported in gcc 2.5 and later */ +#if defined(__GNUC__) || __has_attribute(__const__) +#define XCB_CONST_FUNCTION __attribute__((__const__)) +#else +#define XCB_CONST_FUNCTION XCB_PURE_FUNCTION +#endif + +/* Supported in gcc 2.7 and later */ +#if defined(__GNUC__) || __has_attribute(__packed__) #define XCB_PACKED __attribute__((__packed__)) #else #define XCB_PACKED #endif +/* Supported in gcc 2.96 and later */ +#if defined(__GNUC__) || __has_attribute(__pure__) +#define XCB_PURE_FUNCTION __attribute__((__pure__)) +#else +#define XCB_PURE_FUNCTION +#endif + /** * @defgroup XCB_Core_API XCB Core API * @brief Core API of the XCB library. @@ -470,6 +490,7 @@ void xcb_prefetch_extension_data(xcb_connection_t *c, xcb_extension_t *ext); * * The result must not be freed. */ +XCB_PURE_FUNCTION const struct xcb_setup_t *xcb_get_setup(xcb_connection_t *c); /** @@ -480,6 +501,7 @@ const struct xcb_setup_t *xcb_get_setup(xcb_connection_t *c); * Accessor for the file descriptor that was passed to the * xcb_connect_to_fd call that returned @p c. */ +XCB_PURE_FUNCTION int xcb_get_file_descriptor(xcb_connection_t *c); /** @@ -500,6 +522,7 @@ int xcb_get_file_descriptor(xcb_connection_t *c); * @return XCB_CONN_CLOSED_PARSE_ERR, error during parsing display string. * @return XCB_CONN_CLOSED_INVALID_SCREEN, because the server does not have a screen matching the display. */ +XCB_PURE_FUNCTION int xcb_connection_has_error(xcb_connection_t *c); /** diff --git a/include/xcb/xcbext.h b/include/xcb/xcbext.h index 90f9d58b88..1bb992eccd 100644 --- a/include/xcb/xcbext.h +++ b/include/xcb/xcbext.h @@ -297,6 +297,7 @@ int xcb_poll_for_reply64(xcb_connection_t *c, uint64_t request, void **reply, xc * @param replylen The size of the reply. * @return Pointer to the location where received file descriptors are stored. */ +XCB_CONST_FUNCTION int *xcb_get_reply_fds(xcb_connection_t *c, void *reply, size_t replylen); @@ -306,6 +307,7 @@ int *xcb_get_reply_fds(xcb_connection_t *c, void *reply, size_t replylen); * @param mask The mask to check * @return The number of set bits in the mask */ +XCB_CONST_FUNCTION int xcb_popcount(uint32_t mask); /** @@ -313,6 +315,7 @@ int xcb_popcount(uint32_t mask); * @param len The length of the array * @return The sum of all entries in the array. */ +XCB_PURE_FUNCTION int xcb_sumof(uint8_t *list, int len); #ifdef __cplusplus diff --git a/libX11/README.md b/libX11/README.md index be1bddbb43..abacb3934b 100644 --- a/libX11/README.md +++ b/libX11/README.md @@ -31,6 +31,17 @@ For patch submission instructions, see: https://www.x.org/wiki/Development/Documentation/SubmittingPatches +## Release 1.8.10 + + * Re-fix XIM input sometimes jumbled (#205, #206, #207, #208, !246) + * Fix various static analysis errors (!250) + * Add compose sequences for Arabic hamza (!218), Ezh (!221), and + hryvnia currency (!259) + * Make colormap private interfaces thread safe (#215, !254) + * Fix deadlock in XRebindKeysym() (!256) + * Assorted memory handling cleanups (!251, !258) + * Restore VAX support still in use by NetBSD (!257) + ## Release 1.8.9 * Fix regressions introduced in 1.8.8 (!245, !248) - this includes reverting diff --git a/libX11/configure.ac b/libX11/configure.ac index 15ccdd5376..49f05e9f27 100644 --- a/libX11/configure.ac +++ b/libX11/configure.ac @@ -1,7 +1,7 @@ # Initialize Autoconf AC_PREREQ([2.70]) -AC_INIT([libX11], [1.8.9], +AC_INIT([libX11], [1.8.10], [https://gitlab.freedesktop.org/xorg/lib/libx11/-/issues], [libX11]) AC_CONFIG_SRCDIR([Makefile.am]) AC_CONFIG_HEADERS([src/config.h include/X11/XlibConf.h]) diff --git a/libX11/include/X11/Xlibint.h b/libX11/include/X11/Xlibint.h index 6b0799d83a..1c6fa0c7f3 100644 --- a/libX11/include/X11/Xlibint.h +++ b/libX11/include/X11/Xlibint.h @@ -435,8 +435,8 @@ X11_EXTERN LockInfoPtr _Xglobal_lock; #define _XLockMutex(lock) if (_XLockMutex_fn) (*_XLockMutex_fn)(lock) #define _XUnlockMutex(lock) if (_XUnlockMutex_fn) (*_XUnlockMutex_fn)(lock) #endif -#define _XCreateMutex(lock) if (_XCreateMutex_fn) (*_XCreateMutex_fn)(lock); -#define _XFreeMutex(lock) if (_XFreeMutex_fn) (*_XFreeMutex_fn)(lock); +#define _XCreateMutex(lock) if (_XCreateMutex_fn) (*_XCreateMutex_fn)(lock) +#define _XFreeMutex(lock) if (_XFreeMutex_fn) (*_XFreeMutex_fn)(lock) #else /* XTHREADS */ #define LockDisplay(dis) @@ -647,13 +647,13 @@ extern void _XFlushGCCache(Display *dpy, GC gc); * "len" is the length of the data buffer. */ #ifndef DataRoutineIsProcedure -#define Data(dpy, data, len) {\ +#define Data(dpy, data, len) do {\ if (dpy->bufptr + (len) <= dpy->bufmax) {\ memcpy(dpy->bufptr, data, (size_t)(len));\ dpy->bufptr += ((size_t)((len) + 3) & (size_t)~3);\ } else\ _XSend(dpy, (_Xconst char*)(data), (long)(len));\ -} +} while (0) #endif /* DataRoutineIsProcedure */ @@ -671,12 +671,13 @@ extern void _XFlushGCCache(Display *dpy, GC gc); * BufAlloc (xTextElt *, elt, nbytes) */ -#define BufAlloc(type, ptr, n) \ +#define BufAlloc(type, ptr, n) do { \ if (dpy->bufptr + (n) > dpy->bufmax) \ _XFlush (dpy); \ ptr = (type) dpy->bufptr; \ memset(ptr, '\0', (size_t)(n)); \ - dpy->bufptr += (n); + dpy->bufptr += (n); \ +} while (0) #define Data16(dpy, data, len) Data((dpy), (_Xconst char *)(data), (len)) #define _XRead16Pad(dpy, data, len) _XReadPad((dpy), (char *)(data), (len)) @@ -719,7 +720,7 @@ extern void _XRead32( * char. */ #define CI_GET_CHAR_INFO_1D(fs,col,def,cs) \ -{ \ +do { \ cs = def; \ if (col >= fs->min_char_or_byte2 && col <= fs->max_char_or_byte2) { \ if (fs->per_char == NULL) { \ @@ -729,7 +730,7 @@ extern void _XRead32( if (CI_NONEXISTCHAR(cs)) cs = def; \ } \ } \ -} +} while (0) #define CI_GET_DEFAULT_INFO_1D(fs,cs) \ CI_GET_CHAR_INFO_1D (fs, fs->default_char, NULL, cs) @@ -741,7 +742,7 @@ extern void _XRead32( * column. This is used for fonts that have more than row zero. */ #define CI_GET_CHAR_INFO_2D(fs,row,col,def,cs) \ -{ \ +do { \ cs = def; \ if (row >= fs->min_byte1 && row <= fs->max_byte1 && \ col >= fs->min_char_or_byte2 && col <= fs->max_char_or_byte2) { \ @@ -755,19 +756,19 @@ extern void _XRead32( if (CI_NONEXISTCHAR(cs)) cs = def; \ } \ } \ -} +} while (0) #define CI_GET_DEFAULT_INFO_2D(fs,cs) \ -{ \ +do { \ unsigned int r = (fs->default_char >> 8); \ unsigned int c = (fs->default_char & 0xff); \ CI_GET_CHAR_INFO_2D (fs, r, c, NULL, cs); \ -} +} while (0) /* srcvar must be a variable for large architecture version */ #define OneDataCard32(dpy,dstaddr,srcvar) \ - { *(CARD32 *)(dstaddr) = (srcvar); } + do { *(CARD32 *)(dstaddr) = (srcvar); } while (0) typedef struct _XInternalAsync { @@ -807,12 +808,12 @@ typedef struct _XAsyncEState { } _XAsyncErrorState; extern void _XDeqAsyncHandler(Display *dpy, _XAsyncHandler *handler); -#define DeqAsyncHandler(dpy,handler) { \ +#define DeqAsyncHandler(dpy,handler) do { \ if (dpy->async_handlers == (handler)) \ dpy->async_handlers = (handler)->next; \ else \ _XDeqAsyncHandler(dpy, handler); \ - } + } while (0) typedef void (*FreeFuncType) ( Display* /* display */ diff --git a/libX11/modules/im/ximcp/imCallbk.c b/libX11/modules/im/ximcp/imCallbk.c index 359d9b8cab..bc7a93c1bb 100644 --- a/libX11/modules/im/ximcp/imCallbk.c +++ b/libX11/modules/im/ximcp/imCallbk.c @@ -327,15 +327,17 @@ _XimStrConversionCallback(Xim im, * `cbrec.text->length' means the string length in characters */ { + size_t blen; length_in_bytes = (cbrec.text->encoding_is_wchar)? sizeof(wchar_t) * cbrec.text->length: /* wchar */ strlen(cbrec.text->string.mbs); /* mb */ - buf_len = XIM_HEADER_SIZE + + blen = XIM_HEADER_SIZE + sz_CARD16 + 2 + length_in_bytes + XIM_PAD(2 + length_in_bytes) + 2 + 2 + sz_CARD32 * cbrec.text->length; - buf = Xmalloc(buf_len); + buf = Xmalloc(blen); + buf_len = (INT16) blen; } _XimSetHeader((XPointer)buf, XIM_STR_CONVERSION_REPLY, 0, &buf_len); buf_len -= XIM_HEADER_SIZE; /* added by _XimSetHeader (HACK) */ diff --git a/libX11/modules/im/ximcp/imDefIm.c b/libX11/modules/im/ximcp/imDefIm.c index e307539895..b27f80c58b 100644 --- a/libX11/modules/im/ximcp/imDefIm.c +++ b/libX11/modules/im/ximcp/imDefIm.c @@ -303,7 +303,7 @@ _XimPreConnectionIM( /* server name check */ if( !(str = XGetAtomName( display, selection )) ) - return False; + goto Error; if(!_XimCheckServerName(im, str)) { XFree( (XPointer)str ); goto Error; @@ -1728,7 +1728,7 @@ _XimEncodingNegotiation( + sizeof(CARD16) + detail_len; - if (!(buf = Xcalloc(XIM_HEADER_SIZE + len, 1))) + if (!(buf = Xcalloc(1, XIM_HEADER_SIZE + len))) goto free_detail_ptr; buf_s = (CARD16 *)&buf[XIM_HEADER_SIZE]; diff --git a/libX11/modules/im/ximcp/imRm.c b/libX11/modules/im/ximcp/imRm.c index 4575d122de..254b113103 100644 --- a/libX11/modules/im/ximcp/imRm.c +++ b/libX11/modules/im/ximcp/imRm.c @@ -1943,7 +1943,7 @@ static const XimICMode ic_mode[] = { (XIM_MODE_STS_DEFAULT | XIM_MODE_STS_SET | XIM_MODE_STS_GET), 0}, {OFFSET_XNAREA, - 0, + (XIM_MODE_PRE_SET | XIM_MODE_PRE_GET), (XIM_MODE_PRE_DEFAULT | XIM_MODE_PRE_SET | XIM_MODE_PRE_GET), (XIM_MODE_PRE_DEFAULT | XIM_MODE_PRE_SET | XIM_MODE_PRE_GET), 0, diff --git a/libX11/modules/om/generic/omDefault.c b/libX11/modules/om/generic/omDefault.c index 322690fa0c..1bed9332db 100644 --- a/libX11/modules/om/generic/omDefault.c +++ b/libX11/modules/om/generic/omDefault.c @@ -231,10 +231,10 @@ _XmbDefaultTextPerCharExtents(XOC oc, _Xconst char *text, int length, bzero((char *) &overall, sizeof(XCharStruct)); *num_chars = 0; - CI_GET_DEFAULT_INFO_1D(font, def) + CI_GET_DEFAULT_INFO_1D(font, def); while (length-- > 0) { - CI_GET_CHAR_INFO_1D(font, *text, def, cs) + CI_GET_CHAR_INFO_1D(font, *text, def, cs); text++; if (cs == NULL) continue; diff --git a/libX11/modules/om/generic/omTextPer.c b/libX11/modules/om/generic/omTextPer.c index ae08fe6a45..2d770324b6 100644 --- a/libX11/modules/om/generic/omTextPer.c +++ b/libX11/modules/om/generic/omTextPer.c @@ -92,20 +92,20 @@ _XomGenericTextPerCharExtents( } if (is_xchar2b) { - CI_GET_DEFAULT_INFO_2D(font, def) + CI_GET_DEFAULT_INFO_2D(font, def); xchar2b_ptr = xchar2b_buf; } else { - CI_GET_DEFAULT_INFO_1D(font, def) + CI_GET_DEFAULT_INFO_1D(font, def); xchar_ptr = (char *) xchar2b_buf; } while (buf_len-- > 0) { if (is_xchar2b) { CI_GET_CHAR_INFO_2D(font, xchar2b_ptr->byte1, - xchar2b_ptr->byte2, def, cs) + xchar2b_ptr->byte2, def, cs); xchar2b_ptr++; } else { - CI_GET_CHAR_INFO_1D(font, *xchar_ptr, def, cs) + CI_GET_CHAR_INFO_1D(font, *xchar_ptr, def, cs); xchar_ptr++; } if (cs == NULL) diff --git a/libX11/nls/en_US.UTF-8/Compose.pre b/libX11/nls/en_US.UTF-8/Compose.pre index 680f4fabaa..d4ff812634 100644 --- a/libX11/nls/en_US.UTF-8/Compose.pre +++ b/libX11/nls/en_US.UTF-8/Compose.pre @@ -204,7 +204,14 @@ XCOMM "₪" U20aa NEW SHEQEL SIGN XCOMM "₱" U20b1 PESO SIGN : "₲" U20b2 # GUARANI SIGN : "₲" U20b2 # GUARANI SIGN -XCOMM "₴" U20b4 HRYVNIA SIGN + : "₴" U20b4 # HRYVNIA SIGN + : "₴" U20b4 # HRYVNIA SIGN + : "₴" U20b4 # HRYVNIA SIGN + : "₴" U20b4 # HRYVNIA SIGN + : "₴" U20b4 # HRYVNIA SIGN + : "₴" U20b4 # HRYVNIA SIGN + : "₴" U20b4 # HRYVNIA SIGN + : "₴" U20b4 # HRYVNIA SIGN : "₵" U20b5 # CEDI SIGN : "₵" U20b5 # CEDI SIGN : "₹" U20b9 # INDIAN RUPEE SIGN @@ -912,7 +919,6 @@ XCOMM Latin Extended-A : "ş" U015F # LATIN SMALL LETTER S WITH CEDILLA : "ş" U015F # LATIN SMALL LETTER S WITH CEDILLA : "ş" U015F # LATIN SMALL LETTER S WITH CEDILLA - : "ş" U015F # LATIN SMALL LETTER S WITH CEDILLA : "Š" U0160 # LATIN CAPITAL LETTER S WITH CARON : "Š" U0160 # LATIN CAPITAL LETTER S WITH CARON : "Š" U0160 # LATIN CAPITAL LETTER S WITH CARON @@ -1065,6 +1071,7 @@ XCOMM Latin Extended-B : "ƶ" U01B6 # LATIN SMALL LETTER Z WITH STROKE : "ƶ" U01B6 # LATIN SMALL LETTER Z WITH STROKE : "ƶ" U01B6 # LATIN SMALL LETTER Z WITH STROKE + : "Ʒ" U01B7 # LATIN CAPITAL LETTER EZH : "Ǎ" U01CD # LATIN CAPITAL LETTER A WITH CARON : "Ǎ" U01CD # LATIN CAPITAL LETTER A WITH CARON : "Ǎ" U01CD # LATIN CAPITAL LETTER A WITH CARON @@ -1234,9 +1241,15 @@ XCOMM Latin Extended-B : "Ǯ" U01EE # LATIN CAPITAL LETTER EZH WITH CARON : "Ǯ" U01EE # LATIN CAPITAL LETTER EZH WITH CARON : "Ǯ" U01EE # LATIN CAPITAL LETTER EZH WITH CARON + : "Ǯ" U01EE # LATIN CAPITAL LETTER EZH WITH CARON + : "Ǯ" U01EE # LATIN CAPITAL LETTER EZH WITH CARON + : "Ǯ" U01EE # LATIN CAPITAL LETTER EZH WITH CARON : "ǯ" U01EF # LATIN SMALL LETTER EZH WITH CARON : "ǯ" U01EF # LATIN SMALL LETTER EZH WITH CARON : "ǯ" U01EF # LATIN SMALL LETTER EZH WITH CARON + : "ǯ" U01EF # LATIN SMALL LETTER EZH WITH CARON + : "ǯ" U01EF # LATIN SMALL LETTER EZH WITH CARON + : "ǯ" U01EF # LATIN SMALL LETTER EZH WITH CARON : "ǰ" U01F0 # LATIN SMALL LETTER J WITH CARON : "ǰ" U01F0 # LATIN SMALL LETTER J WITH CARON : "ǰ" U01F0 # LATIN SMALL LETTER J WITH CARON @@ -1416,6 +1429,7 @@ XCOMM IPA Extensions : "ɨ" U0268 # LATIN SMALL LETTER I WITH STROKE : "ɨ" U0268 # LATIN SMALL LETTER I WITH STROKE : "ɨ" U0268 # LATIN SMALL LETTER I WITH STROKE + : "ʒ" U0292 # LATIN SMALL LETTER EZH XCOMM Spacing Modifier Letters : "ʡ" U02A1 # LATIN LETTER GLOTTAL STOP WITH STROKE @@ -1678,7 +1692,12 @@ XCOMM Cyrillic : "ӹ" U04F9 # CYRILLIC SMALL LETTER YERU WITH DIAERESIS : "ӹ" U04F9 # CYRILLIC SMALL LETTER YERU WITH DIAERESIS -XCOMM Several other scripts +XCOMM Arabic + : "ء" Arabic_hamza # ARABIC LETTER HAMZA + : "ء" Arabic_hamza # ARABIC LETTER HAMZA + : "أ" Arabic_hamzaonalef # ARABIC LETTER ALEF WITH HAMZA ABOVE + : "ؤ" Arabic_hamzaonwaw # ARABIC LETTER WAW WITH HAMZA ABOVE + : "ئ" Arabic_hamzaonyeh # ARABIC LETTER YEH WITH HAMZA ABOVE : "آ" U0622 # ARABIC LETTER ALEF WITH MADDA ABOVE : "أ" U0623 # ARABIC LETTER ALEF WITH HAMZA ABOVE : "ؤ" U0624 # ARABIC LETTER WAW WITH HAMZA ABOVE @@ -1687,6 +1706,8 @@ XCOMM Several other scripts : "ۀ" U06C0 # ARABIC LETTER HEH WITH YEH ABOVE : "ۂ" U06C2 # ARABIC LETTER HEH GOAL WITH HAMZA ABOVE : "ۓ" U06D3 # ARABIC LETTER YEH BARREE WITH HAMZA ABOVE + +XCOMM Several other scripts : "ऩ" U0929 # DEVANAGARI LETTER NNNA : "ऱ" U0931 # DEVANAGARI LETTER RRA : "ऴ" U0934 # DEVANAGARI LETTER LLLA diff --git a/libX11/src/ClDisplay.c b/libX11/src/ClDisplay.c index aa904e5168..31d3a84155 100644 --- a/libX11/src/ClDisplay.c +++ b/libX11/src/ClDisplay.c @@ -47,6 +47,7 @@ XCloseDisplay ( { register _XExtension *ext; register int i; + xcb_connection_t *connection; if (!(dpy->flags & XlibDisplayClosing)) { @@ -68,7 +69,8 @@ XCloseDisplay ( if (X_DPY_GET_REQUEST(dpy) != X_DPY_GET_LAST_REQUEST_READ(dpy)) XSync(dpy, 1); } - xcb_disconnect(dpy->xcb->connection); + connection = dpy->xcb->connection; _XFreeDisplayStructure (dpy); + xcb_disconnect(connection); return 0; } diff --git a/libX11/src/CopyCmap.c b/libX11/src/CopyCmap.c index b37aba733e..5444550cd2 100644 --- a/libX11/src/CopyCmap.c +++ b/libX11/src/CopyCmap.c @@ -53,17 +53,12 @@ Colormap XCopyColormapAndFree( mid = req->mid = XAllocID(dpy); req->srcCmap = src_cmap; - /* re-lock the display to keep XID handling in sync */ UnlockDisplay(dpy); SyncHandle(); - LockDisplay(dpy); #if XCMS _XcmsCopyCmapRecAndFree(dpy, src_cmap, mid); #endif - UnlockDisplay(dpy); - SyncHandle(); - return(mid); } diff --git a/libX11/src/CrCmap.c b/libX11/src/CrCmap.c index 1b18a15bb5..9904c7dda9 100644 --- a/libX11/src/CrCmap.c +++ b/libX11/src/CrCmap.c @@ -48,12 +48,12 @@ Colormap XCreateColormap( if (visual == CopyFromParent) req->visual = CopyFromParent; else req->visual = visual->visualid; + UnlockDisplay(dpy); + SyncHandle(); + #ifdef XCMS _XcmsAddCmapRec(dpy, mid, w, visual); #endif - UnlockDisplay(dpy); - SyncHandle(); - return(mid); } diff --git a/libX11/src/CrGlCur.c b/libX11/src/CrGlCur.c index 0d4da87a0d..2214597d57 100644 --- a/libX11/src/CrGlCur.c +++ b/libX11/src/CrGlCur.c @@ -116,7 +116,7 @@ typedef Cursor (*TryShapeCursorFunc) (Display *dpy, static XModuleType _XcursorModule; static Bool _XcursorModuleTried; -#define GetFunc(type,name,ret) {\ +#define GetFunc(type,name,ret) do { \ static Bool been_here; \ static type staticFunc; \ \ @@ -134,7 +134,7 @@ static Bool _XcursorModuleTried; } \ ret = staticFunc; \ _XUnlockMutex (_Xglobal_lock); \ -} +} while (0) static Cursor _XTryShapeCursor (Display *dpy, diff --git a/libX11/src/FreeCmap.c b/libX11/src/FreeCmap.c index 68496dd885..e2b76fa6f7 100644 --- a/libX11/src/FreeCmap.c +++ b/libX11/src/FreeCmap.c @@ -41,12 +41,12 @@ XFreeColormap( LockDisplay(dpy); GetResReq(FreeColormap, cmap, req); + UnlockDisplay(dpy); + SyncHandle(); + #ifdef XCMS _XcmsDeleteCmapRec(dpy, cmap); #endif - UnlockDisplay(dpy); - SyncHandle(); - return 1; } diff --git a/libX11/src/KeyBind.c b/libX11/src/KeyBind.c index a8181b91bd..a5dc5f230d 100644 --- a/libX11/src/KeyBind.c +++ b/libX11/src/KeyBind.c @@ -462,6 +462,9 @@ UCSConvertCase( register unsigned code, else if ( (code >= 0x00e0 && code <= 0x00f6) || (code >= 0x00f8 && code <= 0x00fe) ) *upper -= 0x20; + /* The following code points do not map within Latin-1 and + * require special handling in XConvertCase + */ else if (code == 0x00ff) /* y with diaeresis */ *upper = 0x0178; else if (code == 0x00b5) /* micro sign */ @@ -655,15 +658,34 @@ XConvertCase( { /* Latin 1 keysym */ if (sym < 0x100) { - UCSConvertCase(sym, lower, upper); - return; + /* Special cases that do not map within Latin-1 */ + switch (sym) { + case XK_ydiaeresis: + *lower = sym; + *upper = XK_Ydiaeresis; + return; + case XK_mu: + *lower = sym; + *upper = XK_Greek_MU; + return; + case XK_ssharp: + *lower = sym; + *upper = 0x1001e9e; + return; + default: + UCSConvertCase(sym, lower, upper); + return; + } } /* Unicode keysym */ if ((sym & 0xff000000) == 0x01000000) { UCSConvertCase((sym & 0x00ffffff), lower, upper); - *upper |= 0x01000000; - *lower |= 0x01000000; + /* Use the Unicode keysym mask only for non Latin-1 */ + if (*upper >= 0x100) + *upper |= 0x01000000; + if (*lower >= 0x100) + *lower |= 0x01000000; return; } @@ -958,8 +980,9 @@ XRebindKeysym ( memcpy ((char *) p->modifiers, (char *) mlist, (size_t) nb); p->key = keysym; p->mlen = nm; - ComputeMaskFromKeytrans(dpy, p); UnlockDisplay(dpy); + ComputeMaskFromKeytrans(dpy, p); + return 0; } diff --git a/libX11/src/OpenDis.c b/libX11/src/OpenDis.c index 24e58348cf..c21c189707 100644 --- a/libX11/src/OpenDis.c +++ b/libX11/src/OpenDis.c @@ -710,7 +710,10 @@ void _XFreeDisplayStructure(Display *dpy) static void OutOfMemory(Display *dpy) { - if(dpy->xcb->connection) - xcb_disconnect(dpy->xcb->connection); - _XFreeDisplayStructure (dpy); + xcb_connection_t *connection = dpy->xcb->connection; + + _XFreeDisplayStructure (dpy); + + if (connection) + xcb_disconnect(connection); } diff --git a/libX11/src/ParseCmd.c b/libX11/src/ParseCmd.c index 09f4f57b73..121e6ba738 100644 --- a/libX11/src/ParseCmd.c +++ b/libX11/src/ParseCmd.c @@ -92,11 +92,11 @@ XrmParseCommand( char **argend; #define PutCommandResource(value_str) \ - { \ + do { \ XrmStringToBindingQuarkList( \ options[i].specifier, start_bindings, start_quarks); \ XrmQPutStringResource(pdb, bindings, quarks, value_str); \ - } /* PutCommandResource */ + } while (0) /* PutCommandResource */ myargc = (*argc); argend = argv + myargc; diff --git a/libX11/src/RdBitF.c b/libX11/src/RdBitF.c index 3cbef81e4c..6645125034 100644 --- a/libX11/src/RdBitF.c +++ b/libX11/src/RdBitF.c @@ -135,7 +135,7 @@ XReadBitmapFileData ( /* error cleanup and return macro */ #define RETURN(code) \ -{ Xfree (bits); fclose (fstream); return code; } + do { Xfree (bits); fclose (fstream); return code; } while (0) while (fgets(line, MAX_SIZE, fstream)) { if (strlen(line) == MAX_SIZE-1) diff --git a/libX11/src/SetFPath.c b/libX11/src/SetFPath.c index 6ac546f7b0..e1b54b6cfd 100644 --- a/libX11/src/SetFPath.c +++ b/libX11/src/SetFPath.c @@ -38,43 +38,49 @@ XSetFontPath ( char **directories, int ndirs) { - register size_t n = 0; - register int i; - register int nbytes; - char *p; register xSetFontPathReq *req; int retCode; LockDisplay(dpy); GetReq (SetFontPath, req); req->nFonts = ndirs; - for (i = 0; i < ndirs; i++) { - n = n + (safestrlen (directories[i]) + 1); - if (n >= USHRT_MAX) { - UnlockDisplay(dpy); - SyncHandle(); - return 0; + if (ndirs > 0) { + size_t n = 0; + int nbytes; + char *p; + + for (int i = 0; i < ndirs; i++) { + n = n + (safestrlen (directories[i]) + 1); + if (n >= USHRT_MAX) { + UnlockDisplay(dpy); + SyncHandle(); + return 0; + } } - } - nbytes = (n + 3) & ~3; - req->length += nbytes >> 2; - if ((p = Xmalloc (nbytes))) { - /* - * pack into counted strings. - */ - char *tmp = p; + nbytes = (n + 3) & ~3; + req->length += nbytes >> 2; + if ((p = Xmalloc (nbytes))) { + /* + * pack into counted strings. + */ + char *tmp = p; - for (i = 0; i < ndirs; i++) { - size_t length = safestrlen (directories[i]); - *p = length; - memcpy (p + 1, directories[i], length); - p += length + 1; + for (int i = 0; i < ndirs; i++) { + size_t length = safestrlen (directories[i]); + *p = length; + memcpy (p + 1, directories[i], length); + p += length + 1; + } + Data (dpy, tmp, nbytes); + Xfree (tmp); + retCode = 1; } - Data (dpy, tmp, nbytes); - Xfree (tmp); - retCode = 1; + else + retCode = 0; } - else + else if (ndirs == 0) + retCode = 1; + else /* ndirs < 0 */ retCode = 0; UnlockDisplay(dpy); diff --git a/libX11/src/TextExt.c b/libX11/src/TextExt.c index b883b01b44..0f98f9b46d 100644 --- a/libX11/src/TextExt.c +++ b/libX11/src/TextExt.c @@ -50,7 +50,7 @@ from The Open Group. */ #define CI_GET_ROWZERO_CHAR_INFO_2D(fs,col,def,cs) \ -{ \ +do { \ cs = def; \ if (fs->min_byte1 == 0 && \ col >= fs->min_byte2 && col <= fs->max_byte2) { \ @@ -61,7 +61,7 @@ from The Open Group. if (CI_NONEXISTCHAR(cs)) cs = def; \ } \ } \ -} +} while (0) /* diff --git a/libX11/src/XlibInt.c b/libX11/src/XlibInt.c index f4732d7ebf..66b5ae05ab 100644 --- a/libX11/src/XlibInt.c +++ b/libX11/src/XlibInt.c @@ -1813,8 +1813,11 @@ void *_XGetRequest(Display *dpy, CARD8 type, size_t len) dpy->last_req = dpy->bufptr; req = (xReq*)dpy->bufptr; - req->reqType = type; - req->length = len / 4; + *req = (xReq) { + .reqType = type, + .data = 0, + .length = len / 4 + }; dpy->bufptr += len; X_DPY_REQUEST_INCREMENT(dpy); return req; diff --git a/libX11/src/Xrm.c b/libX11/src/Xrm.c index ae098009ac..c9587c4f06 100644 --- a/libX11/src/Xrm.c +++ b/libX11/src/Xrm.c @@ -1183,12 +1183,12 @@ static void GetDatabase( if (c == '#') { /* Directive */ /* remove extra whitespace */ only_pcs = True; - while (is_space(bits = next_char(c, str))) {}; + while (is_space(bits = next_char(c, str))) {} /* only "include" directive is currently defined */ if (!strncmp(str, "include", 7)) { str += (7-1); /* remove extra whitespace */ - while (is_space(bits = next_char(c, str))) {}; + while (is_space(bits = next_char(c, str))) {} /* must have a starting " */ if (c == '"') { _Xconst char *fname = str+1; diff --git a/libX11/src/imConv.c b/libX11/src/imConv.c index 48bc79005a..5a5b83dc89 100644 --- a/libX11/src/imConv.c +++ b/libX11/src/imConv.c @@ -177,6 +177,8 @@ _XimLookupMBText( if ((nbytes == 0) || (symbol == NoSymbol)) return count; if (count > 1) { + if ((unsigned)count >= sizeof(look)) + return 0; memcpy(look, (char *)buffer,count); look[count] = '\0'; if ((count = im->methods->ctstombs(ic->core.im, @@ -320,6 +322,8 @@ _XimLookupUTF8Text( if ((nbytes == 0) || (symbol == NoSymbol)) return count; if (count > 1) { + if ((unsigned)count >= sizeof(look)) + return 0; memcpy(look, (char *)buffer,count); look[count] = '\0'; if ((count = im->methods->ctstoutf8(ic->core.im, diff --git a/libX11/src/poly.h b/libX11/src/poly.h index e064783d71..e43636c9bf 100644 --- a/libX11/src/poly.h +++ b/libX11/src/poly.h @@ -80,7 +80,7 @@ SOFTWARE. * If it is moving to the left, then we don't want it to flip until * we traverse an entire pixel. */ -#define BRESINITPGON(dy, x1, x2, xStart, d, m, m1, incr1, incr2) { \ +#define BRESINITPGON(dy, x1, x2, xStart, d, m, m1, incr1, incr2) do { \ int dx; /* local storage */ \ \ /* \ @@ -104,9 +104,9 @@ SOFTWARE. d = -2 * m * (dy) + 2 * dx; \ } \ } \ -} +} while (0) -#define BRESINCRPGON(d, minval, m, m1, incr1, incr2) { \ +#define BRESINCRPGON(d, minval, m, m1, incr1, incr2) do { \ if (m1 > 0) { \ if (d > 0) { \ minval += m1; \ @@ -126,7 +126,7 @@ SOFTWARE. d += incr2; \ } \ } \ -} +} while (0) /* @@ -256,7 +256,7 @@ typedef struct _ScanLineListBlock { * the caller when the edge has been removed so he * can reorder the Winding Active Edge Table. */ -#define EVALUATEEDGEWINDING(pAET, pPrevAET, y, fixWAET) { \ +#define EVALUATEEDGEWINDING(pAET, pPrevAET, y, fixWAET) do { \ if (pAET->ymax == y) { /* leaving this edge */ \ pPrevAET->next = pAET->next; \ pAET = pPrevAET->next; \ @@ -269,7 +269,7 @@ typedef struct _ScanLineListBlock { pPrevAET = pAET; \ pAET = pAET->next; \ } \ -} +} while (0) /* @@ -279,7 +279,7 @@ typedef struct _ScanLineListBlock { * x value to be ready for the next scanline. * The even-odd rule is in effect. */ -#define EVALUATEEDGEEVENODD(pAET, pPrevAET, y) { \ +#define EVALUATEEDGEEVENODD(pAET, pPrevAET, y) do { \ if (pAET->ymax == y) { /* leaving this edge */ \ pPrevAET->next = pAET->next; \ pAET = pPrevAET->next; \ @@ -291,4 +291,4 @@ typedef struct _ScanLineListBlock { pPrevAET = pAET; \ pAET = pAET->next; \ } \ -} +} while (0) diff --git a/libX11/src/utlist.h b/libX11/src/utlist.h index 215c2c62e9..e28dcdb397 100644 --- a/libX11/src/utlist.h +++ b/libX11/src/utlist.h @@ -86,7 +86,7 @@ do { (head)->prev = (head); \ (head)->next = NULL; \ } \ -} while (0); +} while (0) #define DL_DELETE(head,del) \ do { \ @@ -103,7 +103,7 @@ do { (head)->prev = (del)->prev; \ } \ } \ -} while (0); +} while (0) #define DL_FOREACH(head,el) \ diff --git a/libX11/src/xcb_io.c b/libX11/src/xcb_io.c index d4e57ac8a5..e9e38b9a8a 100644 --- a/libX11/src/xcb_io.c +++ b/libX11/src/xcb_io.c @@ -27,13 +27,13 @@ #include -#define xcb_fail_assert(_message, _var) { \ +#define xcb_fail_assert(_message, _var) do { \ unsigned int _var = 1; \ fprintf(stderr, "[xcb] Aborting, sorry about that.\n"); \ assert(!_var); \ -} +} while (0) -#define throw_thread_fail_assert(_message, _var) { \ +#define throw_thread_fail_assert(_message, _var) do { \ fprintf(stderr, "[xcb] " _message "\n"); \ if (_Xglobal_lock) { \ fprintf(stderr, "[xcb] You called XInitThreads, this is not your fault\n"); \ @@ -42,16 +42,16 @@ "and XInitThreads has not been called\n"); \ } \ xcb_fail_assert(_message, _var); \ -} +} while (0) /* XXX: It would probably be most useful if we stored the last-processed * request, so we could find the offender from the message. */ -#define throw_extlib_fail_assert(_message, _var) { \ +#define throw_extlib_fail_assert(_message, _var) do { \ fprintf(stderr, "[xcb] " _message "\n"); \ fprintf(stderr, "[xcb] This is most likely caused by a broken X " \ "extension library\n"); \ xcb_fail_assert(_message, _var); \ -} +} while (0) static void return_socket(void *closure) { diff --git a/libX11/src/xcms/cmsCmap.c b/libX11/src/xcms/cmsCmap.c index 8720dda702..2da3a4284d 100644 --- a/libX11/src/xcms/cmsCmap.c +++ b/libX11/src/xcms/cmsCmap.c @@ -91,12 +91,17 @@ CmapRecForColormap( _XAsyncHandler async; _XAsyncErrorState async_state; + LockDisplay(dpy); for (pRec = (XcmsCmapRec *)dpy->cms.clientCmaps; pRec != NULL; pRec = pRec->pNext) { if (pRec->cmapID == cmap) { + UnlockDisplay(dpy); + SyncHandle(); return(pRec); } } + UnlockDisplay(dpy); + SyncHandle(); /* * Can't find an XcmsCmapRec associated with cmap in our records. @@ -262,9 +267,12 @@ _XcmsAddCmapRec( pNew->dpy = dpy; pNew->windowID = windowID; pNew->visual = visual; + LockDisplay(dpy); pNew->pNext = (XcmsCmapRec *)dpy->cms.clientCmaps; dpy->cms.clientCmaps = (XPointer)pNew; dpy->free_funcs->clientCmaps = _XcmsFreeClientCmaps; + UnlockDisplay(dpy); + SyncHandle(); /* * Note, we don't create the XcmsCCC for pNew->ccc here because @@ -346,6 +354,7 @@ _XcmsDeleteCmapRec( } /* search for it in the list */ + LockDisplay(dpy); pPrevPtr = (XcmsCmapRec **)&dpy->cms.clientCmaps; while ((pRec = *pPrevPtr) && (pRec->cmapID != cmap)) { pPrevPtr = &pRec->pNext; @@ -358,6 +367,8 @@ _XcmsDeleteCmapRec( *pPrevPtr = pRec->pNext; Xfree(pRec); } + UnlockDisplay(dpy); + SyncHandle(); } @@ -382,6 +393,7 @@ _XcmsFreeClientCmaps( { XcmsCmapRec *pRecNext, *pRecFree; + LockDisplay(dpy); pRecNext = (XcmsCmapRec *)dpy->cms.clientCmaps; while (pRecNext != NULL) { pRecFree = pRecNext; @@ -394,6 +406,8 @@ _XcmsFreeClientCmaps( Xfree(pRecFree); } dpy->cms.clientCmaps = (XPointer)NULL; + UnlockDisplay(dpy); + SyncHandle(); } diff --git a/libX11/src/xcms/cmsColNm.c b/libX11/src/xcms/cmsColNm.c index 8058218115..231d1b5265 100644 --- a/libX11/src/xcms/cmsColNm.c +++ b/libX11/src/xcms/cmsColNm.c @@ -248,7 +248,6 @@ FirstCmp(const void *p1, const void *p2) /* * DESCRIPTION * Compares the color names of XcmsColorTuples. - * This routine is public to allow access from qsort???. * * RETURNS * 0 if equal; diff --git a/libX11/src/xcms/cmsTrig.c b/libX11/src/xcms/cmsTrig.c index b23033aa8a..ebb92be5b5 100644 --- a/libX11/src/xcms/cmsTrig.c +++ b/libX11/src/xcms/cmsTrig.c @@ -71,7 +71,11 @@ _XcmsModuloF( #define XCMS_SIXTHPI 0.523598775598298820 #define XCMS_RADIANS(d) ((d) * XCMS_PI / 180.0) #define XCMS_DEGREES(r) ((r) * 180.0 / XCMS_PI) +#ifdef __vax__ +#define XCMS_X6_UNDERFLOWS (3.784659e-07) /* X**6 almost underflows*/ +#else #define XCMS_X6_UNDERFLOWS (4.209340e-52) /* X**6 almost underflows */ +#endif #define XCMS_X16_UNDERFLOWS (5.421010e-20) /* X**16 almost underflows*/ #define XCMS_CHAR_BIT 8 #define XCMS_LONG_MAX 0x7FFFFFFF diff --git a/libX11/src/xkb/XKBleds.c b/libX11/src/xkb/XKBleds.c index 5c51f276a7..8e75ea540b 100644 --- a/libX11/src/xkb/XKBleds.c +++ b/libX11/src/xkb/XKBleds.c @@ -72,16 +72,14 @@ _XkbReadGetIndicatorMapReply(Display *dpy, leds->phys_indicators = rep->realIndicators; if (rep->length > 0) { - register int left; - if (!_XkbInitReadBuffer(dpy, &buf, (int) rep->length * 4)) return BadAlloc; if (nread_rtrn) *nread_rtrn = (int) rep->length * 4; if (rep->which) { - register int i, bit; + unsigned int i, bit, left; - left = (int) rep->which; + left = rep->which; for (i = 0, bit = 1; (i < XkbNumIndicators) && (left); i++, bit <<= 1) { if (left & bit) { @@ -106,7 +104,7 @@ _XkbReadGetIndicatorMapReply(Display *dpy, } } } - left = _XkbFreeReadBuffer(&buf); + (void) _XkbFreeReadBuffer(&buf); } return Success; } diff --git a/libX11/src/xkb/XKBlibint.h b/libX11/src/xkb/XKBlibint.h index c2f7351c74..ebf72ced59 100644 --- a/libX11/src/xkb/XKBlibint.h +++ b/libX11/src/xkb/XKBlibint.h @@ -85,7 +85,7 @@ typedef struct _XkbInfoRec { (((d)->flags&XlibDisplayNoXkb) || \ ((!(d)->xkb_info || (!(d)->xkb_info->desc)) && !_XkbLoadDpy(d))) -#define _XkbCheckPendingRefresh(d,xi) { \ +#define _XkbCheckPendingRefresh(d,xi) do { \ if ((xi)->flags&XkbXlibNewKeyboard) \ _XkbReloadDpy((d)); \ else if ((xi)->flags&XkbMapPending) { \ @@ -95,7 +95,7 @@ typedef struct _XkbInfoRec { UnlockDisplay((d)); \ } \ } \ -} +} while (0) #define _XkbNeedModmap(i) ((!(i)->desc->map)||(!(i)->desc->map->modmap)) diff --git a/libX11/src/xlibi18n/XDefaultOMIF.c b/libX11/src/xlibi18n/XDefaultOMIF.c index cdd90113a4..de26167589 100644 --- a/libX11/src/xlibi18n/XDefaultOMIF.c +++ b/libX11/src/xlibi18n/XDefaultOMIF.c @@ -695,10 +695,10 @@ _XmbDefaultTextPerCharExtents(XOC oc, _Xconst char *text, int length, bzero((char *) &overall, sizeof(XCharStruct)); *num_chars = 0; - CI_GET_DEFAULT_INFO_1D(font, def) + CI_GET_DEFAULT_INFO_1D(font, def); while (length-- > 0) { - CI_GET_CHAR_INFO_1D(font, *text, def, cs) + CI_GET_CHAR_INFO_1D(font, *text, def, cs); text++; if (cs == NULL) continue; diff --git a/libX11/src/xlibi18n/XimProto.h b/libX11/src/xlibi18n/XimProto.h index eab1d29331..c62bff2730 100644 --- a/libX11/src/xlibi18n/XimProto.h +++ b/libX11/src/xlibi18n/XimProto.h @@ -220,7 +220,7 @@ typedef CARD16 XICID; /* Input Context ID */ #define XIM_PAD(length) ((4 - ((length) % 4)) % 4) #define XIM_SET_PAD(ptr, length) \ - { \ + do { \ register int Counter = XIM_PAD((int)length); \ if (Counter) { \ register char *Ptr = (char *)(ptr) + (length); \ @@ -228,6 +228,6 @@ typedef CARD16 XICID; /* Input Context ID */ for (; Counter; --Counter, ++Ptr) \ *Ptr = '\0'; \ } \ - } + } while (0) #endif /* _XIMPROTO_H */ diff --git a/libX11/src/xlibi18n/XimintP.h b/libX11/src/xlibi18n/XimintP.h index 1afa3c75f2..525e1ddb53 100644 --- a/libX11/src/xlibi18n/XimintP.h +++ b/libX11/src/xlibi18n/XimintP.h @@ -311,18 +311,18 @@ typedef struct _XicProtoPrivateRec { #define XIM_MAXIMNAMELEN 64 #define XIM_MAXLCNAMELEN 64 -Bool +_X_HIDDEN Bool _XimFabricateSerial( Xim im, XKeyEvent *event); -Bool +_X_HIDDEN Bool _XimUnfabricateSerial( Xim im, Xic ic, XKeyEvent *event); -Bool +_X_HIDDEN Bool _XimIsFabricatedSerial( Xim im, XKeyEvent *event); diff --git a/libX11/src/xlibi18n/lcFile.c b/libX11/src/xlibi18n/lcFile.c index 4a7d05947f..f116ff1d21 100644 --- a/libX11/src/xlibi18n/lcFile.c +++ b/libX11/src/xlibi18n/lcFile.c @@ -626,9 +626,9 @@ _XlcLocaleDirName(char *dir_name, size_t dir_len, const char *lc_name) Xfree (last_dir_name); Xfree (last_lc_name); - last_dir_len = strlen (dir_name) + 1; - last_dir_name = Xmalloc (last_dir_len); - strcpy (last_dir_name, dir_name); + last_dir_name = strdup (dir_name); + last_dir_len = (last_dir_name != NULL) ? strlen (last_dir_name) + 1 : 0; + last_lc_name = strdup (lc_name); return dir_name; @@ -703,9 +703,9 @@ _XlcLocaleLibDirName(char *dir_name, size_t dir_len, const char *lc_name) Xfree (last_dir_name); Xfree (last_lc_name); - last_dir_len = strlen (dir_name) + 1; - last_dir_name = Xmalloc (last_dir_len); - strcpy (last_dir_name, dir_name); + last_dir_name = strdup (dir_name); + last_dir_len = (last_dir_name != NULL) ? strlen (last_dir_name) + 1 : 0; + last_lc_name = strdup (lc_name); return dir_name; diff --git a/libX11/src/xlibi18n/lcUtil.c b/libX11/src/xlibi18n/lcUtil.c index 2ff86abaab..cd4cc77442 100644 --- a/libX11/src/xlibi18n/lcUtil.c +++ b/libX11/src/xlibi18n/lcUtil.c @@ -32,9 +32,10 @@ /* Don't use here because it is locale dependent. */ -#define set_toupper(ch) \ +#define set_toupper(ch) do { \ if (ch >= 'a' && ch <= 'z') \ - ch = (unsigned char) (ch - 'a' + 'A'); + ch = (unsigned char) (ch - 'a' + 'A'); \ +} while (0) /* Compares two ISO 8859-1 strings, ignoring case of ASCII letters. Like strcasecmp in an ASCII locale. */ diff --git a/libX11/src/xlibi18n/lcWrap.c b/libX11/src/xlibi18n/lcWrap.c index 7d1877b551..a5d6e18147 100644 --- a/libX11/src/xlibi18n/lcWrap.c +++ b/libX11/src/xlibi18n/lcWrap.c @@ -134,16 +134,16 @@ _XlcDefaultMapModifiers( const char *user_mods, const char *prog_mods) { - int i; + size_t i; char *mods; if (!_XlcValidModSyntax(prog_mods, im_valid)) return (char *)NULL; if (!_XlcValidModSyntax(user_mods, im_valid)) return (char *)NULL; - i = (int) strlen(prog_mods) + 1; + i = strlen(prog_mods) + 1; if (user_mods) - i = (int) ((size_t) i + strlen(user_mods)); + i += strlen(user_mods); mods = Xmalloc(i); if (mods) { strcpy(mods, prog_mods); diff --git a/libXau/AuDispose.c b/libXau/AuDispose.c index a449390cec..e1e609f769 100644 --- a/libXau/AuDispose.c +++ b/libXau/AuDispose.c @@ -39,13 +39,13 @@ XauDisposeAuth (Xauth *auth) free (auth->name); if (auth->data) { #ifdef HAVE_EXPLICIT_BZERO - (void) explicit_bzero (auth->data, auth->data_length); + explicit_bzero (auth->data, auth->data_length); #elif HAVE_EXPLICIT_MEMSET - (void) explicit_memset (auth->data, 0, auth->data_length); + explicit_memset (auth->data, 0, auth->data_length); #else - (void) bzero (auth->data, auth->data_length); + bzero (auth->data, auth->data_length); #endif - (void) free (auth->data); + free (auth->data); } free (auth); } diff --git a/libXau/configure.ac b/libXau/configure.ac index f062dbbad3..e1182b6372 100644 --- a/libXau/configure.ac +++ b/libXau/configure.ac @@ -22,7 +22,7 @@ # Initialize Autoconf AC_PREREQ([2.60]) -AC_INIT([libXau], [1.0.11], +AC_INIT([libXau], [1.0.12], [https://gitlab.freedesktop.org/xorg/lib/libxau/-/issues], [libXau]) AC_CONFIG_SRCDIR([Makefile.am]) AC_CONFIG_HEADERS([config.h]) diff --git a/libXau/meson.build b/libXau/meson.build index 22dd88c692..dc0c34bb66 100644 --- a/libXau/meson.build +++ b/libXau/meson.build @@ -4,7 +4,7 @@ project( 'libXau', 'c', - version : '1.0.11', + version : '1.0.12', license : 'MIT', meson_version : '>= 0.60.0', ) diff --git a/libXdmcp/Unwrap.c b/libXdmcp/Unwrap.c index 82a10ee03f..b5e6cc788f 100644 --- a/libXdmcp/Unwrap.c +++ b/libXdmcp/Unwrap.c @@ -74,7 +74,7 @@ XdmcpUnwrap ( return; /* bad input length */ for (i = 0; i < 8; i++) blocks[k][i] = input[j + i]; - _XdmcpAuthDoIt ((unsigned char *) (input + j), (unsigned char *) tmp, schedule, 0); + _XdmcpAuthDoIt ((input + j), tmp, schedule, 0); /* block chaining */ k = (k == 0) ? 1 : 0; for (i = 0; i < 8; i++) diff --git a/libXdmcp/Write.c b/libXdmcp/Write.c index 3bec7a2b37..c7f86f6f3e 100644 --- a/libXdmcp/Write.c +++ b/libXdmcp/Write.c @@ -40,7 +40,7 @@ XdmcpWriteHeader ( { BYTE *newData; - if ((int)buffer->size < 6 + (int)header->length) + if (buffer->size < 6 + (int)header->length) { newData = calloc(XDM_MAX_MSGLEN, sizeof (BYTE)); if (!newData) diff --git a/libXdmcp/include/X11/Xdmcp.h b/libXdmcp/include/X11/Xdmcp.h index a6f6f88b4c..e24cc43f75 100644 --- a/libXdmcp/include/X11/Xdmcp.h +++ b/libXdmcp/include/X11/Xdmcp.h @@ -128,6 +128,12 @@ typedef char *XdmcpNetaddr; # define XDM_ACCESS_ATTRIBUTE(X) #endif +#if __has_attribute(pure) +# define XDM_PURE_ATTRIBUTE __attribute__((pure)) +#else +# define XDM_PURE_ATTRIBUTE +#endif + XDM_ACCESS_ATTRIBUTE((read_write, 1)) XDM_ACCESS_ATTRIBUTE((read_only, 2)) extern int XdmcpWriteARRAY16(XdmcpBufferPtr buffer, const ARRAY16Ptr array); XDM_ACCESS_ATTRIBUTE((read_write, 1)) XDM_ACCESS_ATTRIBUTE((read_only, 2)) @@ -169,6 +175,7 @@ XDM_ACCESS_ATTRIBUTE((read_write, 2)) extern int XdmcpFill(int fd, XdmcpBufferPtr buffer, XdmcpNetaddr from, int *fromlen); XDM_ACCESS_ATTRIBUTE((read_only, 1)) +XDM_PURE_ATTRIBUTE extern int XdmcpReadRemaining(const XdmcpBufferPtr buffer); XDM_ACCESS_ATTRIBUTE((read_write, 1)) @@ -184,6 +191,7 @@ XDM_ACCESS_ATTRIBUTE((read_only, 1)) XDM_ACCESS_ATTRIBUTE((write_only, 2)) extern int XdmcpCopyARRAY8(const ARRAY8Ptr src, ARRAY8Ptr dst); XDM_ACCESS_ATTRIBUTE((read_only, 1)) XDM_ACCESS_ATTRIBUTE((read_only, 2)) +XDM_PURE_ATTRIBUTE extern int XdmcpARRAY8Equal(const ARRAY8Ptr array1, const ARRAY8Ptr array2); XDM_ACCESS_ATTRIBUTE((write_only, 1)) @@ -201,6 +209,7 @@ extern void XdmcpUnwrap(unsigned char *input, unsigned char *wrapper, unsigned c #endif XDM_ACCESS_ATTRIBUTE((read_only, 1)) XDM_ACCESS_ATTRIBUTE((read_only, 2)) +XDM_PURE_ATTRIBUTE extern int XdmcpCompareKeys (const XdmAuthKeyPtr a, const XdmAuthKeyPtr b); XDM_ACCESS_ATTRIBUTE((write_only, 1)) diff --git a/libXft/configure.ac b/libXft/configure.ac index 3673d127e9..c45c9957f4 100644 --- a/libXft/configure.ac +++ b/libXft/configure.ac @@ -64,7 +64,7 @@ AC_SUBST([XFT_LT_VERSION]) # # Check for Xrender # -PKG_CHECK_MODULES(XRENDER, xrender >= 0.8.2 x11) +PKG_CHECK_MODULES(XRENDER, xrender >= 0.8.2 x11 xproto >= 7.0.22) # Check freetype configuration PKG_CHECK_MODULES(FREETYPE, freetype2 >= 2.1.6) diff --git a/libXft/include/X11/Xft/Xft.h.in b/libXft/include/X11/Xft/Xft.h.in index df0c2c0555..420ec28e04 100644 --- a/libXft/include/X11/Xft/Xft.h.in +++ b/libXft/include/X11/Xft/Xft.h.in @@ -42,10 +42,6 @@ #include #include -/* #include */ -#ifndef _X_SENTINEL -# define _X_SENTINEL(x) -#endif #ifndef _XFT_NO_COMPAT_ #include diff --git a/libXft/src/xftcore.c b/libXft/src/xftcore.c index b93dc58158..9b525b8be9 100644 --- a/libXft/src/xftcore.c +++ b/libXft/src/xftcore.c @@ -923,7 +923,7 @@ _XftGlyphDefault (Display *dpy, XftFont *public) { XftFontInt *font = (XftFontInt *) public; FT_UInt missing[XFT_NMISSING]; - int nmissing; + int nmissing = 0; FcBool glyphs_loaded = FcFalse; if (XftFontCheckGlyph (dpy, public, FcTrue, 0, missing, &nmissing)) diff --git a/libXft/src/xftint.h b/libXft/src/xftint.h index c59985ab16..2f2d7d32ce 100644 --- a/libXft/src/xftint.h +++ b/libXft/src/xftint.h @@ -34,22 +34,11 @@ #ifdef HAVE_CONFIG_H #include "config.h" -#else /* X monolithic tree */ -#define HAVE_STDLIB_H 1 /* assumed since all ANSI C platforms require it */ -#include /* get string.h or strings.h as appropriate */ #endif #include -#if HAVE_STDLIB_H #include -#endif -#if HAVE_STRING_H #include -#else -#if HAVE_STRINGS_H -#include -#endif -#endif #include #include @@ -62,14 +51,6 @@ #include #include -/* Added to in X11R6.9 and later */ -#ifndef _X_HIDDEN -# define _X_HIDDEN /**/ -#endif -#ifndef _X_EXPORT -# define _X_EXPORT /**/ -#endif - typedef struct _XftMatcher { char *object; double (*compare) (char *object, FcValue value1, FcValue value2); diff --git a/libXrender/src/Glyph.c b/libXrender/src/Glyph.c index bc9593317f..aad96117d8 100644 --- a/libXrender/src/Glyph.c +++ b/libXrender/src/Glyph.c @@ -480,7 +480,7 @@ XRenderCompositeText8 (Display *dpy, { int this_chars = nchars > MAX_8 ? MAX_8 : nchars; - BufAlloc (xGlyphElt *, elt, SIZEOF(xGlyphElt)) + BufAlloc (xGlyphElt *, elt, SIZEOF(xGlyphElt)); elt->len = (CARD8) this_chars; elt->deltax = (INT16) xDst; elt->deltay = (INT16) yDst; @@ -594,7 +594,7 @@ XRenderCompositeText16 (Display *dpy, int this_chars = nchars > MAX_16 ? MAX_16 : nchars; int this_bytes = this_chars * 2; - BufAlloc (xGlyphElt *, elt, SIZEOF(xGlyphElt)) + BufAlloc (xGlyphElt *, elt, SIZEOF(xGlyphElt)); elt->len = (CARD8) this_chars; elt->deltax = (INT16) xDst; elt->deltay = (INT16) yDst; @@ -703,7 +703,7 @@ XRenderCompositeText32 (Display *dpy, { int this_chars = nchars > MAX_32 ? MAX_32 : nchars; int this_bytes = this_chars * 4; - BufAlloc (xGlyphElt *, elt, SIZEOF(xGlyphElt)) + BufAlloc (xGlyphElt *, elt, SIZEOF(xGlyphElt)); elt->len = (CARD8) this_chars; elt->deltax = (INT16) xDst; elt->deltay = (INT16) yDst; diff --git a/libxcb/src/c_client.py b/libxcb/src/c_client.py index 3a267d2f73..6e1b7157cf 100755 --- a/libxcb/src/c_client.py +++ b/libxcb/src/c_client.py @@ -2661,14 +2661,14 @@ def create_link(linkname): name = 'man/%s.%s' % (linkname, section) if manpaths: sys.stdout.write(name) - f = open(name, 'w') + f = open(name, 'w', encoding='UTF-8') f.write('.so man%s/%s.%s' % (section, func_name, section)) f.close() if manpaths: sys.stdout.write('man/%s.%s ' % (func_name, section)) # Our CWD is src/, so this will end up in src/man/ - f = open('man/%s.%s' % (func_name, section), 'w') + f = open('man/%s.%s' % (func_name, section), 'w', encoding='UTF-8') f.write('.TH %s %s "%s" "%s" "XCB Requests"\n' % (func_name, section, center_footer, left_footer)) # Left-adjust instead of adjusting to both sides f.write('.ad l\n') @@ -3036,7 +3036,7 @@ def _man_event(self, name): if manpaths: sys.stdout.write('man/%s.%s ' % (self.c_type, section)) # Our CWD is src/, so this will end up in src/man/ - f = open('man/%s.%s' % (self.c_type, section), 'w') + f = open('man/%s.%s' % (self.c_type, section), 'w', encoding='UTF-8') f.write('.TH %s %s "%s" "%s" "XCB Events"\n' % (self.c_type, section, center_footer, left_footer)) # Left-adjust instead of adjusting to both sides f.write('.ad l\n') diff --git a/libxcb/src/xcb.h b/libxcb/src/xcb.h index d5a1697fa2..bf2ba2f8f5 100644 --- a/libxcb/src/xcb.h +++ b/libxcb/src/xcb.h @@ -29,11 +29,7 @@ #define __XCB_H__ #include -#if defined(__solaris__) -#include -#else #include -#endif #ifndef _WIN32 #include @@ -52,12 +48,36 @@ extern "C" { * @file xcb.h */ -#ifdef __GNUC__ +#ifndef __has_attribute +# define __has_attribute(x) 0 /* Compatibility with older compilers. */ +#endif + +/* + * For the below checks, we currently assume that __GNUC__ indicates + * gcc 3.0 (released 2001) or later, as we require support for C99. + */ + +/* Supported in gcc 2.5 and later */ +#if defined(__GNUC__) || __has_attribute(__const__) +#define XCB_CONST_FUNCTION __attribute__((__const__)) +#else +#define XCB_CONST_FUNCTION XCB_PURE_FUNCTION +#endif + +/* Supported in gcc 2.7 and later */ +#if defined(__GNUC__) || __has_attribute(__packed__) #define XCB_PACKED __attribute__((__packed__)) #else #define XCB_PACKED #endif +/* Supported in gcc 2.96 and later */ +#if defined(__GNUC__) || __has_attribute(__pure__) +#define XCB_PURE_FUNCTION __attribute__((__pure__)) +#else +#define XCB_PURE_FUNCTION +#endif + /** * @defgroup XCB_Core_API XCB Core API * @brief Core API of the XCB library. @@ -470,6 +490,7 @@ void xcb_prefetch_extension_data(xcb_connection_t *c, xcb_extension_t *ext); * * The result must not be freed. */ +XCB_PURE_FUNCTION const struct xcb_setup_t *xcb_get_setup(xcb_connection_t *c); /** @@ -480,6 +501,7 @@ const struct xcb_setup_t *xcb_get_setup(xcb_connection_t *c); * Accessor for the file descriptor that was passed to the * xcb_connect_to_fd call that returned @p c. */ +XCB_PURE_FUNCTION int xcb_get_file_descriptor(xcb_connection_t *c); /** @@ -500,6 +522,7 @@ int xcb_get_file_descriptor(xcb_connection_t *c); * @return XCB_CONN_CLOSED_PARSE_ERR, error during parsing display string. * @return XCB_CONN_CLOSED_INVALID_SCREEN, because the server does not have a screen matching the display. */ +XCB_PURE_FUNCTION int xcb_connection_has_error(xcb_connection_t *c); /** diff --git a/libxcb/src/xcbext.h b/libxcb/src/xcbext.h index 90f9d58b88..1bb992eccd 100644 --- a/libxcb/src/xcbext.h +++ b/libxcb/src/xcbext.h @@ -297,6 +297,7 @@ int xcb_poll_for_reply64(xcb_connection_t *c, uint64_t request, void **reply, xc * @param replylen The size of the reply. * @return Pointer to the location where received file descriptors are stored. */ +XCB_CONST_FUNCTION int *xcb_get_reply_fds(xcb_connection_t *c, void *reply, size_t replylen); @@ -306,6 +307,7 @@ int *xcb_get_reply_fds(xcb_connection_t *c, void *reply, size_t replylen); * @param mask The mask to check * @return The number of set bits in the mask */ +XCB_CONST_FUNCTION int xcb_popcount(uint32_t mask); /** @@ -313,6 +315,7 @@ int xcb_popcount(uint32_t mask); * @param len The length of the array * @return The sum of all entries in the array. */ +XCB_PURE_FUNCTION int xcb_sumof(uint8_t *list, int len); #ifdef __cplusplus diff --git a/libxcb/src/xcbint.h b/libxcb/src/xcbint.h index 235c8481a3..9836def50a 100644 --- a/libxcb/src/xcbint.h +++ b/libxcb/src/xcbint.h @@ -225,6 +225,7 @@ struct xcb_connection_t { void _xcb_conn_shutdown(xcb_connection_t *c, int err); +XCB_CONST_FUNCTION xcb_connection_t *_xcb_conn_ret_error(int err); int _xcb_conn_wait(xcb_connection_t *c, pthread_cond_t *cond, struct iovec **vector, int *count); diff --git a/libxcb/xcb-proto/doc/xml-xcb.txt b/libxcb/xcb-proto/doc/xml-xcb.txt index baef734168..156275d791 100644 --- a/libxcb/xcb-proto/doc/xml-xcb.txt +++ b/libxcb/xcb-proto/doc/xml-xcb.txt @@ -19,7 +19,7 @@ xcb.xsd An XML Schema defining the data format for describing the X Generating C bindings ===================== -See libxcb . +See libxcb . Protocol Description Format diff --git a/libxcb/xcb-proto/src/glx.xml b/libxcb/xcb-proto/src/glx.xml index 9abfc5b76c..0b0e968ace 100644 --- a/libxcb/xcb-proto/src/glx.xml +++ b/libxcb/xcb-proto/src/glx.xml @@ -40,8 +40,8 @@ but on broken servers the "* 2" was missing. A workaround that is correct for all implementations is to rewrite the length field on receipt on the client side, using the expression above. -The patch that fixed this server bug in X.org CVS is here: - http://cvs.freedesktop.org/xorg/xserver/xorg/GL/glx/glxcmds.c?r1=1.6&r2=1.7 +The patch that fixed this server bug in X.org is here: + https://gitlab.freedesktop.org/xorg/xserver/-/commit/baa99be190c51b533 --> 7 --> @@ -2700,15 +2700,15 @@ authorization from the authors. diff --git a/mesalib/.ci-farms/austriancoder b/mesalib/.ci-farms-disabled/austriancoder similarity index 100% rename from mesalib/.ci-farms/austriancoder rename to mesalib/.ci-farms-disabled/austriancoder diff --git a/mesalib/.ci-farms/lima b/mesalib/.ci-farms-disabled/lima similarity index 100% rename from mesalib/.ci-farms/lima rename to mesalib/.ci-farms-disabled/lima diff --git a/mesalib/.ci-farms-disabled/vmware b/mesalib/.ci-farms/google-freedreno similarity index 100% rename from mesalib/.ci-farms-disabled/vmware rename to mesalib/.ci-farms/google-freedreno diff --git a/mesalib/.ci-farms/freedreno b/mesalib/.ci-farms/vmware similarity index 100% rename from mesalib/.ci-farms/freedreno rename to mesalib/.ci-farms/vmware diff --git a/mesalib/.clang-format-include b/mesalib/.clang-format-include index 9d49d8aba2..797627b13d 100644 --- a/mesalib/.clang-format-include +++ b/mesalib/.clang-format-include @@ -2,6 +2,7 @@ # enforcement in the CI. src/gallium/drivers/i915 +src/gallium/drivers/r300/compiler/* src/gallium/targets/teflon/**/* src/amd/vulkan/**/* src/amd/compiler/**/* diff --git a/mesalib/.git-blame-ignore-revs b/mesalib/.git-blame-ignore-revs index b2036d7e61..08e28ae058 100644 --- a/mesalib/.git-blame-ignore-revs +++ b/mesalib/.git-blame-ignore-revs @@ -65,3 +65,6 @@ c7bf3b69ebc8f2252dbf724a4de638e6bb2ac402 # ir3: Reformat source with clang-format 177138d8cb0b4f6a42ef0a1f8593e14d79f17c54 + +# ir3: reformat after refactoring in previous commit +8ae5b27ee0331a739d14b42e67586784d6840388 diff --git a/mesalib/.gitlab-ci.yml b/mesalib/.gitlab-ci.yml index 22415218f3..b5f9c88335 100644 --- a/mesalib/.gitlab-ci.yml +++ b/mesalib/.gitlab-ci.yml @@ -33,39 +33,34 @@ workflow: # merge pipeline - if: &is-merge-attempt $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event" variables: - KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG} MESA_CI_PERFORMANCE_ENABLED: 1 VALVE_INFRA_VANGOGH_JOB_PRIORITY: "" # Empty tags are ignored by gitlab + JOB_PRIORITY: 75 + # fast-fail in merge pipelines: stop early if we get this many unexpected fails/crashes + DEQP_RUNNER_MAX_FAILS: 40 # post-merge pipeline - if: &is-post-merge $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "push" + # Pre-merge pipeline + - if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event" + # Push to a branch on a fork + - if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push" # nightly pipeline - if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule" variables: - KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG} - JOB_PRIORITY: 50 - VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low + JOB_PRIORITY: 45 + # (some) nightly builds perform LTO, so they take much longer than the + # short timeout allowed in other pipelines. + # Note: 0 = infinity = gitlab's job `timeout:` applies, which is 1h + BUILD_JOB_TIMEOUT_OVERRIDE: 0 # pipeline for direct pushes that bypassed the CI - if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot" variables: - KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG} - JOB_PRIORITY: 40 - VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low - # pre-merge or fork pipeline - - if: $FORCE_KERNEL_TAG != null - variables: - KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${FORCE_KERNEL_TAG} - JOB_PRIORITY: 50 - VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low - - if: $FORCE_KERNEL_TAG == null - variables: - KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG} - JOB_PRIORITY: 50 - VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low + JOB_PRIORITY: 70 variables: FDO_UPSTREAM_REPO: mesa/mesa - MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb + MESA_TEMPLATES_COMMIT: &ci-templates-commit e195d80f35b45cc73668be3767b923fd76c70ed5 CI_PRE_CLONE_SCRIPT: |- set -o xtrace wget -q -O download-git-cache.sh ${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/download-git-cache.sh @@ -73,7 +68,12 @@ variables: rm download-git-cache.sh set +o xtrace S3_JWT_FILE: /s3_jwt + S3_JWT_FILE_SCRIPT: |- + echo -n '${S3_JWT}' > '${S3_JWT_FILE}' && + unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables S3_HOST: s3.freedesktop.org + # This bucket is used to fetch ANDROID prebuilts and images + S3_ANDROID_BUCKET: mesa-rootfs # This bucket is used to fetch the kernel image S3_KERNEL_BUCKET: mesa-rootfs # Bucket for git cache @@ -97,34 +97,33 @@ variables: ARTIFACTS_BASE_URL: https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts # Python scripts for structured logger PYTHONPATH: "$PYTHONPATH:$CI_PROJECT_DIR/install" - # Drop once deqp-runner is upgraded to > 0.18.0 + # No point in continuing once the device is lost MESA_VK_ABORT_ON_DEVICE_LOSS: 1 # Avoid the wall of "Unsupported SPIR-V capability" warnings in CI job log, hiding away useful output MESA_SPIRV_LOG_LEVEL: error + # Default priority for non-merge pipelines + VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low + JOB_PRIORITY: 50 default: id_tokens: S3_JWT: aud: https://s3.freedesktop.org before_script: + - | + if [ -z "${KERNEL_IMAGE_BASE:-}" ]; then + export KERNEL_IMAGE_BASE="https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${EXTERNAL_KERNEL_TAG:-$KERNEL_TAG}" + fi - > export SCRIPTS_DIR=$(mktemp -d) && curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 -O --output-dir "${SCRIPTS_DIR}" "${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/setup-test-env.sh" && - . ${SCRIPTS_DIR}/setup-test-env.sh && - echo -n "${S3_JWT}" > "${S3_JWT_FILE}" && - unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables + . ${SCRIPTS_DIR}/setup-test-env.sh + - eval "$S3_JWT_FILE_SCRIPT" after_script: # Work around https://gitlab.com/gitlab-org/gitlab/-/issues/20338 - find -name '*.log' -exec mv {} {}.txt \; - - > - set +x - - test -e "${S3_JWT_FILE}" && - export S3_JWT="$(<${S3_JWT_FILE})" && - rm "${S3_JWT_FILE}" - # Retry when job fails. Failed jobs can be found in the Mesa CI Daily Reports: # https://gitlab.freedesktop.org/mesa/mesa/-/issues/?sort=created_date&state=opened&label_name%5B%5D=CI%20daily retry: @@ -144,18 +143,28 @@ stages: - sanity - container - git-archive - - build-x86_64 - - build-misc + - build-for-tests + - build-only - code-validation - amd + - amd-postmerge - intel + - intel-postmerge - nouveau + - nouveau-postmerge - arm + - arm-postmerge - broadcom + - broadcom-postmerge - freedreno + - freedreno-postmerge - etnaviv + - etnaviv-postmerge - software-renderer + - software-renderer-postmerge - layered-backends + - layered-backends-postmerge + - performance - deploy include: @@ -180,12 +189,11 @@ include: - local: 'src/**/ci/gitlab-ci.yml' -# YAML anchors for rule conditions -# -------------------------------- -.rules-anchors: - # Pre-merge pipeline - - &is-pre-merge '$CI_PIPELINE_SOURCE == "merge_request_event"' - +# Rules applied to every job in the pipeline +.common-rules: + rules: + - if: *is-fork-push + when: manual .never-post-merge-rules: rules: @@ -195,6 +203,7 @@ include: .container+build-rules: rules: + - !reference [.common-rules, rules] # Run when re-enabling a disabled farm, but not when disabling it - !reference [.disable-farm-mr-rules, rules] # Never run immediately after merging, as we just ran everything @@ -207,6 +216,7 @@ include: - bin/git_sha1_gen.py - bin/install_megadrivers.py - bin/symbols-check.py + - bin/ci/**/* # GitLab CI - .gitlab-ci.yml - .gitlab-ci/**/* @@ -295,19 +305,20 @@ sanity: - | set -eu image_tags=( - DEBIAN_BASE_TAG - DEBIAN_BUILD_TAG - DEBIAN_X86_64_TEST_ANDROID_TAG - DEBIAN_X86_64_TEST_GL_TAG - DEBIAN_X86_64_TEST_VK_TAG ALPINE_X86_64_BUILD_TAG ALPINE_X86_64_LAVA_SSH_TAG + DEBIAN_BASE_TAG + DEBIAN_BUILD_TAG + DEBIAN_PYUTILS_TAG + DEBIAN_TEST_ANDROID_TAG + DEBIAN_TEST_GL_TAG + DEBIAN_TEST_VK_TAG FEDORA_X86_64_BUILD_TAG KERNEL_ROOTFS_TAG KERNEL_TAG PKG_REPO_REV - WINDOWS_X64_MSVC_TAG WINDOWS_X64_BUILD_TAG + WINDOWS_X64_MSVC_TAG WINDOWS_X64_TEST_TAG ) for var in "${image_tags[@]}" @@ -349,3 +360,5 @@ mr-label-maker-test: optional: true - job: rustfmt optional: true + - job: toml-lint + optional: true diff --git a/mesalib/.gitlab-ci/.flake8 b/mesalib/.gitlab-ci/.flake8 new file mode 100644 index 0000000000..1be72fc784 --- /dev/null +++ b/mesalib/.gitlab-ci/.flake8 @@ -0,0 +1,33 @@ +[flake8] +exclude = .venv*, + +# PEP 8 Style Guide limits line length to 79 characters +max-line-length = 159 + +ignore = + # continuation line under-indented for hanging indent + E121 + # continuation line over-indented for hanging indent + E126, + # continuation line under-indented for visual indent + E128, + # whitespace before ':' + E203, + # missing whitespace around arithmetic operator + E226, + # missing whitespace after ',' + E231, + # expected 2 blank lines, found 1 + E302, + # too many blank lines + E303, + # imported but unused + F401, + # f-string is missing placeholders + F541, + # local variable assigned to but never used + F841, + # line break before binary operator + W503, + # line break after binary operator + W504, diff --git a/mesalib/.gitlab-ci/all-skips.txt b/mesalib/.gitlab-ci/all-skips.txt index 6291ca0d17..88f287fd78 100644 --- a/mesalib/.gitlab-ci/all-skips.txt +++ b/mesalib/.gitlab-ci/all-skips.txt @@ -80,3 +80,8 @@ wayland-dEQP-EGL.functional.render.multi_context.gles2_gles3.other wayland-dEQP-EGL.functional.render.multi_thread.gles2.other wayland-dEQP-EGL.functional.render.multi_thread.gles3.other wayland-dEQP-EGL.functional.render.multi_thread.gles2_gles3.other + +# These test the loader more than the implementation and are broken because the +# Vulkan loader in Debian is too old +dEQP-VK.api.get_device_proc_addr.non_enabled +dEQP-VK.api.version_check.unavailable_entry_points diff --git a/mesalib/.gitlab-ci/angle-skips.txt b/mesalib/.gitlab-ci/angle-skips.txt new file mode 100644 index 0000000000..4ccedad2fd --- /dev/null +++ b/mesalib/.gitlab-ci/angle-skips.txt @@ -0,0 +1,7 @@ +# Unlike zink which does support it, ANGLE relies on a waiver to not implement +# capturing individual array elements (see waivers.xml and gles3-waivers.txt in the CTS) +dEQP-GLES3.functional.transform_feedback.array_element.* +dEQP-GLES3.functional.transform_feedback.random.* +dEQP-GLES31.functional.program_interface_query.transform_feedback_varying.*_array_element +dEQP-GLES31.functional.program_interface_query.transform_feedback_varying.type.*.array.* +KHR-GLES31.core.program_interface_query.transform-feedback-types diff --git a/mesalib/.gitlab-ci/b2c/b2c.yml.jinja2.jinja2 b/mesalib/.gitlab-ci/b2c/b2c.yml.jinja2.jinja2 index dd872e9c89..8e80e8099f 100644 --- a/mesalib/.gitlab-ci/b2c/b2c.yml.jinja2.jinja2 +++ b/mesalib/.gitlab-ci/b2c/b2c.yml.jinja2.jinja2 @@ -39,37 +39,51 @@ console_patterns: job_success: regex: >- {{ job_success_regex }} +{% if job_warn_regex %} job_warn: regex: >- {{ job_warn_regex }} +{% endif %} # Environment to deploy deployment: # Initial boot start: + storage: + http: + - path: "/b2c-extra-args" + data: > + b2c.pipefail b2c.poweroff_delay={{ poweroff_delay }} + b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}" + b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},remove,expiration=pipeline_end,preserve" + {% for volume in volumes %} + b2c.volume={{ volume }} + {% endfor %} + b2c.run_service="--privileged --tls-verify=false --pid=host docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/telegraf:latest" b2c.hostname=dut-{{ '{{' }} machine.full_name }} + b2c.run="-ti --tls-verify=false docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/machine-registration:latest check" + b2c.run="-v {{ '{{' }} job_bucket }}-results:{{ working_dir }} -w {{ working_dir }} {% for mount_volume in mount_volumes %} -v {{ mount_volume }}{% endfor %} --tls-verify=false docker://{{ local_container }} {{ container_cmd | replace('"', '\\\"') }}" kernel: +{% if kernel_url %} url: '{{ kernel_url }}' +{% endif %} + + # NOTE: b2c.cache_device should not be here, but this works around + # a limitation of b2c which will be removed in the next release cmdline: > SALAD.machine_id={{ '{{' }} machine_id }} - console={{ '{{' }} local_tty_device }},115200 earlyprintk=vga,keep - loglevel={{ log_level }} no_hash_pointers - b2c.service="--privileged --tls-verify=false --pid=host docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/telegraf:latest" b2c.hostname=dut-{{ '{{' }} machine.full_name }} - b2c.container="-ti --tls-verify=false docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/machine-registration:latest check" - b2c.ntp_peer=10.42.0.1 b2c.pipefail b2c.cache_device=auto b2c.poweroff_delay={{ poweroff_delay }} - b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}" - b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},remove,expiration=pipeline_end,preserve" -{% for volume in volumes %} - b2c.volume={{ volume }} -{% endfor %} - b2c.container="-v {{ '{{' }} job_bucket }}-results:{{ working_dir }} -w {{ working_dir }} {% for mount_volume in mount_volumes %} -v {{ mount_volume }}{% endfor %} --tls-verify=false docker://{{ local_container }} {{ container_cmd }}" + console={{ '{{' }} local_tty_device }},115200 + b2c.cache_device=auto b2c.ntp_peer=10.42.0.1 + b2c.extra_args_url={{ '{{' }} job.http.url }}/b2c-extra-args {% if kernel_cmdline_extras is defined %} {{ kernel_cmdline_extras }} {% endif %} +{% if initramfs_url %} initramfs: url: '{{ initramfs_url }}' +{% endif %} -{% if dtb_url is defined %} +{% if dtb_url %} dtb: url: '{{ dtb_url }}' {% endif %} diff --git a/mesalib/.gitlab-ci/bare-metal/bm-init.sh b/mesalib/.gitlab-ci/bare-metal/bm-init.sh index 6935957b2c..d55f803251 100644 --- a/mesalib/.gitlab-ci/bare-metal/bm-init.sh +++ b/mesalib/.gitlab-ci/bare-metal/bm-init.sh @@ -5,6 +5,8 @@ # First stage: very basic setup to bring up network and /dev etc /init-stage1.sh +export CURRENT_SECTION=dut_boot + # Second stage: run jobs test $? -eq 0 && /init-stage2.sh diff --git a/mesalib/.gitlab-ci/bare-metal/cros-servo.sh b/mesalib/.gitlab-ci/bare-metal/cros-servo.sh index a69d69b4ef..6106bf0d0e 100644 --- a/mesalib/.gitlab-ci/bare-metal/cros-servo.sh +++ b/mesalib/.gitlab-ci/bare-metal/cros-servo.sh @@ -50,6 +50,10 @@ if [ -z "$BM_CMDLINE" ]; then exit 1 fi +. "${SCRIPTS_DIR}/setup-test-env.sh" + +section_start prepare_rootfs "Preparing rootfs components" + set -ex # Clear out any previous run's artifacts. @@ -86,7 +90,7 @@ rm -rf /tftp/* if echo "$BM_KERNEL" | grep -q http; then curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ $BM_KERNEL -o /tftp/vmlinuz -elif [ -n "${FORCE_KERNEL_TAG}" ]; then +elif [ -n "${EXTERNAL_KERNEL_TAG}" ]; then curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ "${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o /tftp/vmlinuz curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ @@ -99,16 +103,20 @@ fi echo "$BM_CMDLINE" > /tftp/cmdline set +e -STRUCTURED_LOG_FILE=job_detail.json +STRUCTURED_LOG_FILE=results/job_detail.json python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update dut_job_type "${DEVICE_TYPE}" python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update farm "${FARM}" python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --create-dut-job dut_name "${CI_RUNNER_DESCRIPTION}" python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update-dut-time submit "${CI_JOB_STARTED_AT}" +section_end prepare_rootfs + python3 $BM/cros_servo_run.py \ --cpu $BM_SERIAL \ --ec $BM_SERIAL_EC \ - --test-timeout ${TEST_PHASE_TIMEOUT:-20} + --test-timeout ${TEST_PHASE_TIMEOUT_MINUTES:-20} ret=$? + +section_start dut_cleanup "Cleaning up after job" python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close set -e @@ -116,9 +124,6 @@ set -e # Bring artifacts back from the NFS dir to the build dir where gitlab-runner # will look for them. cp -Rp /nfs/results/. results/ -if [ -f "${STRUCTURED_LOG_FILE}" ]; then - cp -p ${STRUCTURED_LOG_FILE} results/ - echo "Structured log file is available at https://${CI_PROJECT_ROOT_NAMESPACE}.pages.freedesktop.org/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts/results/${STRUCTURED_LOG_FILE}" -fi +section_end dut_cleanup exit $ret diff --git a/mesalib/.gitlab-ci/bare-metal/cros_servo_run.py b/mesalib/.gitlab-ci/bare-metal/cros_servo_run.py index 02a91edc38..95fed60c47 100644 --- a/mesalib/.gitlab-ci/bare-metal/cros_servo_run.py +++ b/mesalib/.gitlab-ci/bare-metal/cros_servo_run.py @@ -4,21 +4,29 @@ # SPDX-License-Identifier: MIT import argparse +import datetime +import math +import os import re import sys from custom_logger import CustomLogger from serial_buffer import SerialBuffer +ANSI_ESCAPE="\x1b[0K" +ANSI_COLOUR="\x1b[0;36m" +ANSI_RESET="\x1b[0m" +SECTION_START="start" +SECTION_END="end" class CrosServoRun: def __init__(self, cpu, ec, test_timeout, logger): self.cpu_ser = SerialBuffer( - cpu, "results/serial.txt", "R SERIAL-CPU> ") + cpu, "results/serial.txt", ": ") # Merge the EC serial into the cpu_ser's line stream so that we can # effectively poll on both at the same time and not have to worry about self.ec_ser = SerialBuffer( - ec, "results/serial-ec.txt", "R SERIAL-EC> ", line_queue=self.cpu_ser.line_queue) + ec, "results/serial-ec.txt", " EC: ", line_queue=self.cpu_ser.line_queue) self.test_timeout = test_timeout self.logger = logger @@ -27,11 +35,11 @@ def close(self): self.cpu_ser.close() def ec_write(self, s): - print("W SERIAL-EC> %s" % s) + print("EC> %s" % s) self.ec_ser.serial.write(s.encode()) def cpu_write(self, s): - print("W SERIAL-CPU> %s" % s) + print("> %s" % s) self.cpu_ser.serial.write(s.encode()) def print_error(self, message): @@ -40,6 +48,31 @@ def print_error(self, message): print(RED + message + NO_COLOR) self.logger.update_status_fail(message) + def get_rel_timestamp(self): + now = datetime.datetime.now(tz=datetime.UTC) + then_env = os.getenv("CI_JOB_STARTED_AT") + if not then_env: + return "" + delta = now - datetime.datetime.fromisoformat(then_env) + return f"[{math.floor(delta.seconds / 60):02}:{(delta.seconds % 60):02}]" + + def get_cur_timestamp(self): + return str(int(datetime.datetime.timestamp(datetime.datetime.now()))) + + def print_gitlab_section(self, action, name, description, collapse=True): + assert action in [SECTION_START, SECTION_END] + out = ANSI_ESCAPE + "section_" + action + ":" + out += self.get_cur_timestamp() + ":" + out += name + if action == "start" and collapse: + out += "[collapsed=true]" + out += "\r" + ANSI_ESCAPE + ANSI_COLOUR + out += self.get_rel_timestamp() + " " + description + ANSI_RESET + print(out) + + def boot_section(self, action): + self.print_gitlab_section(action, "dut_boot", "Booting hardware device", True) + def run(self): # Flush any partial commands in the EC's prompt, then ask for a reboot. self.ec_write("\n") @@ -47,6 +80,7 @@ def run(self): bootloader_done = False self.logger.create_job_phase("boot") + self.boot_section(SECTION_START) tftp_failures = 0 # This is emitted right when the bootloader pauses to check for input. # Emit a ^N character to request network boot, because we don't have a @@ -127,14 +161,18 @@ def run(self): self.print_error("Detected cheza MMU fail, abandoning run.") return 1 - result = re.search("hwci: mesa: (\S*)", line) + result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line) if result: - if result.group(1) == "pass": + status = result.group(1) + exit_code = int(result.group(2)) + + if status == "pass": self.logger.update_dut_job("status", "pass") - return 0 else: self.logger.update_status_fail("test fail") - return 1 + + self.logger.update_dut_job("exit_code", exit_code) + return exit_code self.print_error( "Reached the end of the CPU serial log without finding a result") @@ -151,7 +189,7 @@ def main(): '--test-timeout', type=int, help='Test phase timeout (minutes)', required=True) args = parser.parse_args() - logger = CustomLogger("job_detail.json") + logger = CustomLogger("results/job_detail.json") logger.update_dut_time("start", None) servo = CrosServoRun(args.cpu, args.ec, args.test_timeout * 60, logger) retval = servo.run() diff --git a/mesalib/.gitlab-ci/bare-metal/fastboot.sh b/mesalib/.gitlab-ci/bare-metal/fastboot.sh index 6d19aa4f9e..930d06b5f9 100644 --- a/mesalib/.gitlab-ci/bare-metal/fastboot.sh +++ b/mesalib/.gitlab-ci/bare-metal/fastboot.sh @@ -55,6 +55,8 @@ if echo $BM_CMDLINE | grep -q "root=/dev/nfs"; then BM_FASTBOOT_NFSROOT=1 fi +section_start prepare_rootfs "Preparing rootfs components" + set -ex # Clear out any previous run's artifacts. @@ -105,7 +107,7 @@ if echo "$BM_KERNEL $BM_DTB" | grep -q http; then cat kernel dtb > Image.gz-dtb -elif [ -n "${FORCE_KERNEL_TAG}" ]; then +elif [ -n "${EXTERNAL_KERNEL_TAG}" ]; then curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ "${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o kernel curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ @@ -148,10 +150,12 @@ if [ -n "$BM_SERIAL_SCRIPT" ]; then done fi +section_end prepare_rootfs + set +e $BM/fastboot_run.py \ --dev="$BM_SERIAL" \ - --test-timeout ${TEST_PHASE_TIMEOUT:-20} \ + --test-timeout ${TEST_PHASE_TIMEOUT_MINUTES:-20} \ --fbserial="$BM_FASTBOOT_SERIAL" \ --powerup="$BM_POWERUP" \ --powerdown="$BM_POWERDOWN" diff --git a/mesalib/.gitlab-ci/bare-metal/fastboot_run.py b/mesalib/.gitlab-ci/bare-metal/fastboot_run.py index ca3229f6d2..f063704002 100644 --- a/mesalib/.gitlab-ci/bare-metal/fastboot_run.py +++ b/mesalib/.gitlab-ci/bare-metal/fastboot_run.py @@ -119,12 +119,12 @@ def run(self): if print_more_lines == -1: print_more_lines = 30 - result = re.search("hwci: mesa: (\S*)", line) + result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line) if result: - if result.group(1) == "pass": - return 0 - else: - return 1 + status = result.group(1) + exit_code = int(result.group(2)) + + return exit_code self.print_error( "Reached the end of the CPU serial log without finding a result, abandoning run.") diff --git a/mesalib/.gitlab-ci/bare-metal/poe-off b/mesalib/.gitlab-ci/bare-metal/poe-off index 3332a7b0f3..64517204f4 100644 --- a/mesalib/.gitlab-ci/bare-metal/poe-off +++ b/mesalib/.gitlab-ci/bare-metal/poe-off @@ -10,7 +10,7 @@ if [ -z "$BM_POE_ADDRESS" ]; then exit 1 fi -SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((48 + BM_POE_INTERFACE))" +SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((${BM_POE_BASE:-0} + BM_POE_INTERFACE))" SNMP_OFF="i 2" flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF" diff --git a/mesalib/.gitlab-ci/bare-metal/poe-on b/mesalib/.gitlab-ci/bare-metal/poe-on index de41fc9b81..c5fde69791 100644 --- a/mesalib/.gitlab-ci/bare-metal/poe-on +++ b/mesalib/.gitlab-ci/bare-metal/poe-on @@ -10,7 +10,7 @@ if [ -z "$BM_POE_ADDRESS" ]; then exit 1 fi -SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((48 + BM_POE_INTERFACE))" +SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((${BM_POE_BASE:-0} + BM_POE_INTERFACE))" SNMP_ON="i 1" SNMP_OFF="i 2" diff --git a/mesalib/.gitlab-ci/bare-metal/poe-powered.sh b/mesalib/.gitlab-ci/bare-metal/poe-powered.sh index 4d172b1e05..3be933ef85 100644 --- a/mesalib/.gitlab-ci/bare-metal/poe-powered.sh +++ b/mesalib/.gitlab-ci/bare-metal/poe-powered.sh @@ -71,6 +71,8 @@ if [ -z "$BM_CMDLINE" ]; then exit 1 fi +section_start prepare_rootfs "Preparing rootfs components" + set -ex date +'%F %T' @@ -102,7 +104,7 @@ if [ -f "${BM_BOOTFS}" ]; then fi # If BM_KERNEL and BM_DTS is present -if [ -n "${FORCE_KERNEL_TAG}" ]; then +if [ -n "${EXTERNAL_KERNEL_TAG}" ]; then if [ -z "${BM_KERNEL}" ] || [ -z "${BM_DTB}" ]; then echo "This machine cannot be tested with external kernel since BM_KERNEL or BM_DTB missing!" exit 1 @@ -120,7 +122,7 @@ date +'%F %T' # Install kernel modules (it could be either in /lib/modules or # /usr/lib/modules, but we want to install in the latter) -if [ -n "${FORCE_KERNEL_TAG}" ]; then +if [ -n "${EXTERNAL_KERNEL_TAG}" ]; then tar --keep-directory-symlink --zstd -xf modules.tar.zst -C /nfs/ rm modules.tar.zst & elif [ -n "${BM_BOOTFS}" ]; then @@ -134,7 +136,7 @@ fi date +'%F %T' # Install kernel image + bootloader files -if [ -n "${FORCE_KERNEL_TAG}" ] || [ -z "$BM_BOOTFS" ]; then +if [ -n "${EXTERNAL_KERNEL_TAG}" ] || [ -z "$BM_BOOTFS" ]; then mv "${BM_KERNEL}" "${BM_DTB}.dtb" /tftp/ else # BM_BOOTFS rsync -aL --delete $BM_BOOTFS/boot/ /tftp/ @@ -181,13 +183,16 @@ if [ -n "$BM_BOOTCONFIG" ]; then printf "$BM_BOOTCONFIG" >> /tftp/config.txt fi +section_end prepare_rootfs + set +e -STRUCTURED_LOG_FILE=job_detail.json +STRUCTURED_LOG_FILE=results/job_detail.json python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update dut_job_type "${DEVICE_TYPE}" python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update farm "${FARM}" ATTEMPTS=3 first_attempt=True while [ $((ATTEMPTS--)) -gt 0 ]; do + section_start dut_boot "Booting hardware device ..." python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --create-dut-job dut_name "${CI_RUNNER_DESCRIPTION}" # Update subtime time to CI_JOB_STARTED_AT only for the first run if [ "$first_attempt" = "True" ]; then @@ -199,17 +204,22 @@ while [ $((ATTEMPTS--)) -gt 0 ]; do --dev="$BM_SERIAL" \ --powerup="$BM_POWERUP" \ --powerdown="$BM_POWERDOWN" \ - --test-timeout ${TEST_PHASE_TIMEOUT:-20} + --boot-timeout-seconds ${BOOT_PHASE_TIMEOUT_SECONDS:-300} \ + --test-timeout-minutes ${TEST_PHASE_TIMEOUT_MINUTES:-$((CI_JOB_TIMEOUT/60 - ${TEST_SETUP_AND_UPLOAD_MARGIN_MINUTES:-5}))} ret=$? if [ $ret -eq 2 ]; then - echo "Did not detect boot sequence, retrying..." python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job first_attempt=False + error "Device failed to boot; will retry" else + # We're no longer in dut_boot by this point + unset CURRENT_SECTION ATTEMPTS=0 fi done + +section_start dut_cleanup "Cleaning up after job" python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close set -e @@ -219,11 +229,8 @@ date +'%F %T' # Bring artifacts back from the NFS dir to the build dir where gitlab-runner # will look for them. cp -Rp /nfs/results/. results/ -if [ -f "${STRUCTURED_LOG_FILE}" ]; then - cp -p ${STRUCTURED_LOG_FILE} results/ - echo "Structured log file is available at ${ARTIFACTS_BASE_URL}/results/${STRUCTURED_LOG_FILE}" -fi date +'%F %T' +section_end dut_cleanup exit $ret diff --git a/mesalib/.gitlab-ci/bare-metal/poe_run.py b/mesalib/.gitlab-ci/bare-metal/poe_run.py index 0220acad54..c987c39d96 100644 --- a/mesalib/.gitlab-ci/bare-metal/poe_run.py +++ b/mesalib/.gitlab-ci/bare-metal/poe_run.py @@ -31,11 +31,12 @@ from serial_buffer import SerialBuffer class PoERun: - def __init__(self, args, test_timeout, logger): + def __init__(self, args, boot_timeout, test_timeout, logger): self.powerup = args.powerup self.powerdown = args.powerdown self.ser = SerialBuffer( - args.dev, "results/serial-output.txt", "") + args.dev, "results/serial-output.txt", ": ") + self.boot_timeout = boot_timeout self.test_timeout = test_timeout self.logger = logger @@ -56,7 +57,7 @@ def run(self): boot_detected = False self.logger.create_job_phase("boot") - for line in self.ser.lines(timeout=5 * 60, phase="bootloader"): + for line in self.ser.lines(timeout=self.boot_timeout, phase="bootloader"): if re.search("Booting Linux", line): boot_detected = True break @@ -64,7 +65,7 @@ def run(self): if not boot_detected: self.print_error( "Something wrong; couldn't detect the boot start up sequence") - return 1 + return 2 self.logger.create_job_phase("test") for line in self.ser.lines(timeout=self.test_timeout, phase="test"): @@ -86,14 +87,18 @@ def run(self): self.print_error("nouveau jetson tk1 network fail, abandoning run.") return 1 - result = re.search("hwci: mesa: (\S*)", line) + result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line) if result: - if result.group(1) == "pass": + status = result.group(1) + exit_code = int(result.group(2)) + + if status == "pass": self.logger.update_dut_job("status", "pass") - return 0 else: self.logger.update_status_fail("test fail") - return 1 + + self.logger.update_dut_job("exit_code", exit_code) + return exit_code self.print_error( "Reached the end of the CPU serial log without finding a result") @@ -109,12 +114,14 @@ def main(): parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True) parser.add_argument( - '--test-timeout', type=int, help='Test phase timeout (minutes)', required=True) + '--boot-timeout-seconds', type=int, help='Boot phase timeout (seconds)', required=True) + parser.add_argument( + '--test-timeout-minutes', type=int, help='Test phase timeout (minutes)', required=True) args = parser.parse_args() - logger = CustomLogger("job_detail.json") + logger = CustomLogger("results/job_detail.json") logger.update_dut_time("start", None) - poe = PoERun(args, args.test_timeout * 60, logger) + poe = PoERun(args, args.boot_timeout_seconds, args.test_timeout_minutes * 60, logger) retval = poe.run() poe.logged_system(args.powerdown) diff --git a/mesalib/.gitlab-ci/bare-metal/rootfs-setup.sh b/mesalib/.gitlab-ci/bare-metal/rootfs-setup.sh index 882ddb964c..0aaeea2b2a 100644 --- a/mesalib/.gitlab-ci/bare-metal/rootfs-setup.sh +++ b/mesalib/.gitlab-ci/bare-metal/rootfs-setup.sh @@ -17,9 +17,6 @@ cp "${S3_JWT_FILE}" "${rootfs_dst}${S3_JWT_FILE}" date +'%F %T' -cp $CI_COMMON/capture-devcoredump.sh $rootfs_dst/ -cp $CI_COMMON/intel-gpu-freq.sh $rootfs_dst/ -cp $CI_COMMON/kdl.sh $rootfs_dst/ cp "$SCRIPTS_DIR/setup-test-env.sh" "$rootfs_dst/" set +x diff --git a/mesalib/.gitlab-ci/bare-metal/serial_buffer.py b/mesalib/.gitlab-ci/bare-metal/serial_buffer.py index b21ce6e6ef..f57060e186 100644 --- a/mesalib/.gitlab-ci/bare-metal/serial_buffer.py +++ b/mesalib/.gitlab-ci/bare-metal/serial_buffer.py @@ -22,7 +22,7 @@ # IN THE SOFTWARE. import argparse -from datetime import datetime, timezone +from datetime import datetime, UTC import queue import serial import threading @@ -130,9 +130,10 @@ def serial_lines_thread_loop(self): if b == b'\n'[0]: line = line.decode(errors="replace") - time = datetime.now().strftime('%y-%m-%d %H:%M:%S') - print("{endc}{time} {prefix}{line}".format( - time=time, prefix=self.prefix, line=line, endc='\033[0m'), flush=True, end='') + ts = datetime.now(tz=UTC) + ts_str = f"{ts.hour:02}:{ts.minute:02}:{ts.second:02}.{int(ts.microsecond / 1000):03}" + print("{endc}{time}{prefix}{line}".format( + time=ts_str, prefix=self.prefix, line=line, endc='\033[0m'), flush=True, end='') self.line_queue.put(line) line = bytearray() diff --git a/mesalib/.gitlab-ci/bin/ci_gantt_chart.py b/mesalib/.gitlab-ci/bin/ci_gantt_chart.py index 8b9472217a..44f39865f6 100644 --- a/mesalib/.gitlab-ci/bin/ci_gantt_chart.py +++ b/mesalib/.gitlab-ci/bin/ci_gantt_chart.py @@ -8,24 +8,24 @@ import argparse -import gitlab +from datetime import datetime, timedelta, timezone +from typing import Dict, List + import plotly.express as px -from gitlab_common import pretty_duration -from datetime import datetime, timedelta -from gitlab_common import read_token, GITLAB_URL, get_gitlab_pipeline_from_url +import plotly.graph_objs as go +from gitlab import Gitlab, base +from gitlab.v4.objects import ProjectPipeline +from gitlab_common import (GITLAB_URL, get_gitlab_pipeline_from_url, + get_token_from_default_dir, pretty_duration, + read_token) -def calculate_queued_at(job): - # we can have queued_duration without started_at when a job is canceled - if not job.queued_duration or not job.started_at: - return None +def calculate_queued_at(job) -> datetime: started_at = job.started_at.replace("Z", "+00:00") return datetime.fromisoformat(started_at) - timedelta(seconds=job.queued_duration) -def calculate_time_difference(time1, time2): - if not time1 or not time2: - return None +def calculate_time_difference(time1, time2) -> str: if type(time1) is str: time1 = datetime.fromisoformat(time1.replace("Z", "+00:00")) if type(time2) is str: @@ -35,12 +35,14 @@ def calculate_time_difference(time1, time2): return pretty_duration(diff.seconds) -def create_task_name(job): +def create_task_name(job) -> str: status_color = {"success": "green", "failed": "red"}.get(job.status, "grey") return f"{job.name}\t({job.status},{job.id})" -def add_gantt_bar(job, tasks): +def add_gantt_bar( + job: base.RESTObject, tasks: List[Dict[str, str | datetime | timedelta]] +) -> None: queued_at = calculate_queued_at(job) task_name = create_task_name(job) @@ -62,25 +64,43 @@ def add_gantt_bar(job, tasks): "Phase": "Queued", } ) - tasks.append( - { - "Job": task_name, - "Start": job.started_at, - "Finish": job.finished_at, - "Duration": calculate_time_difference(job.started_at, job.finished_at), - "Phase": "Running", - } - ) - -def generate_gantt_chart(pipeline): + if job.finished_at: + tasks.append( + { + "Job": task_name, + "Start": job.started_at, + "Finish": job.finished_at, + "Duration": calculate_time_difference(job.started_at, job.finished_at), + "Phase": "Time spent running", + } + ) + else: + current_time = datetime.now(timezone.utc).isoformat() + tasks.append( + { + "Job": task_name, + "Start": job.started_at, + "Finish": current_time, + "Duration": calculate_time_difference(job.started_at, current_time), + "Phase": "In-Progress", + } + ) + + +def generate_gantt_chart( + pipeline: ProjectPipeline, ci_timeout: float = 60 +) -> go.Figure: if pipeline.yaml_errors: raise ValueError("Pipeline YAML errors detected") # Convert the data into a list of dictionaries for plotly - tasks = [] + tasks: List[Dict[str, str | datetime | timedelta]] = [] for job in pipeline.jobs.list(all=True, include_retried=True): + # we can have queued_duration without started_at when a job is canceled + if not job.queued_duration or not job.started_at: + continue add_gantt_bar(job, tasks) # Make it easier to see retried jobs @@ -94,7 +114,8 @@ def generate_gantt_chart(pipeline): ) # Create a Gantt chart - fig = px.timeline( + default_colors = px.colors.qualitative.Plotly + fig: go.Figure = px.timeline( tasks, x_start="Start", x_end="Finish", @@ -102,6 +123,12 @@ def generate_gantt_chart(pipeline): color="Phase", title=title, hover_data=["Duration"], + color_discrete_map={ + "In-Progress": default_colors[3], # purple + "Waiting dependencies": default_colors[0], # blue + "Queued": default_colors[1], # red + "Time spent running": default_colors[2], # green + }, ) # Calculate the height dynamically @@ -109,11 +136,11 @@ def generate_gantt_chart(pipeline): # Add a deadline line to the chart created_at = datetime.fromisoformat(pipeline.created_at.replace("Z", "+00:00")) - timeout_at = created_at + timedelta(hours=1) + timeout_at = created_at + timedelta(minutes=ci_timeout) fig.add_vrect( x0=timeout_at, x1=timeout_at, - annotation_text="1h Timeout", + annotation_text=f"{int(ci_timeout)} min Timeout", fillcolor="gray", line_width=2, line_color="gray", @@ -125,7 +152,30 @@ def generate_gantt_chart(pipeline): return fig -def parse_args() -> None: +def main( + token: str | None, + pipeline_url: str, + output: str | None, + ci_timeout: float = 60, +): + if token is None: + token = get_token_from_default_dir() + + token = read_token(token) + gl = Gitlab(url=GITLAB_URL, private_token=token, retry_transient_errors=True) + + pipeline, _ = get_gitlab_pipeline_from_url(gl, pipeline_url) + fig: go.Figure = generate_gantt_chart(pipeline, ci_timeout) + if output and "htm" in output: + fig.write_html(output) + elif output: + fig.update_layout(width=1000) + fig.write_image(output) + else: + fig.show() + + +if __name__ == "__main__": parser = argparse.ArgumentParser( description="Generate the Gantt chart from a given pipeline." ) @@ -134,29 +184,19 @@ def parse_args() -> None: "-o", "--output", type=str, - help="Output file name. Use html ou image suffixes to choose the format.", + help="Output file name. Use html or image suffixes to choose the format.", ) parser.add_argument( "--token", metavar="token", help="force GitLab token, otherwise it's read from ~/.config/gitlab-token", ) - return parser.parse_args() - - -if __name__ == "__main__": - args = parse_args() - - token = read_token(args.token) - - gl = gitlab.Gitlab(url=GITLAB_URL, private_token=token, retry_transient_errors=True) - - pipeline, _ = get_gitlab_pipeline_from_url(gl, args.pipeline_url) - fig = generate_gantt_chart(pipeline) - if args.output and "htm" in args.output: - fig.write_html(args.output) - elif args.output: - fig.update_layout(width=1000) - fig.write_image(args.output) - else: - fig.show() + parser.add_argument( + "--ci-timeout", + metavar="ci_timeout", + type=float, + default=60, + help="Time that marge-bot will wait for ci to finish. Defaults to one hour.", + ) + args = parser.parse_args() + main(args.token, args.pipeline_url, args.output, args.ci_timeout) diff --git a/mesalib/.gitlab-ci/bin/ci_post_gantt.py b/mesalib/.gitlab-ci/bin/ci_post_gantt.py index 131f27e937..3749d68392 100644 --- a/mesalib/.gitlab-ci/bin/ci_post_gantt.py +++ b/mesalib/.gitlab-ci/bin/ci_post_gantt.py @@ -8,25 +8,31 @@ import argparse -import gitlab -import re +import logging as log import os -import pytz +import re import traceback from datetime import datetime, timedelta -from gitlab_common import ( - read_token, - GITLAB_URL, - get_gitlab_pipeline_from_url, -) +from typing import Any, Dict + +import gitlab +import pytz from ci_gantt_chart import generate_gantt_chart +from gitlab import Gitlab +from gitlab.base import RESTObject +from gitlab.v4.objects import Project, ProjectPipeline +from gitlab_common import (GITLAB_URL, get_gitlab_pipeline_from_url, + get_token_from_default_dir, read_token) + + +class MockGanttExit(Exception): + pass -MARGE_USER_ID = 9716 # Marge LAST_MARGE_EVENT_FILE = os.path.expanduser("~/.config/last_marge_event") -def read_last_event_date_from_file(): +def read_last_event_date_from_file() -> str: try: with open(LAST_MARGE_EVENT_FILE, "r") as f: last_event_date = f.read().strip() @@ -36,7 +42,7 @@ def read_last_event_date_from_file(): return last_event_date -def pretty_time(time_str): +def pretty_time(time_str: str) -> str: """Pretty print time""" local_timezone = datetime.now().astimezone().tzinfo @@ -46,10 +52,8 @@ def pretty_time(time_str): return f'{time_str} ({time_d.strftime("%d %b %Y %Hh%Mm%Ss")} {local_timezone})' -def compose_message(file_name, attachment_url): +def compose_message(file_name: str, attachment_url: str) -> str: return f""" -Here is the Gantt chart for the referred pipeline, I hope it helps 😄 (tip: click on the "Pan" button on the top right bar): - [{file_name}]({attachment_url})
@@ -60,13 +64,13 @@ def compose_message(file_name, attachment_url): """ -def gitlab_upload_file_get_url(gl, project_id, filepath): - project = gl.projects.get(project_id) - uploaded_file = project.upload(filepath, filepath=filepath) +def gitlab_upload_file_get_url(gl: Gitlab, project_id: str, filepath: str) -> str: + project: Project = gl.projects.get(project_id) + uploaded_file: Dict[str, Any] = project.upload(filepath, filepath=filepath) return uploaded_file["url"] -def gitlab_post_reply_to_note(gl, event, reply_message): +def gitlab_post_reply_to_note(gl: Gitlab, event: RESTObject, reply_message: str): """ Post a reply to a note in thread based on a GitLab event. @@ -82,7 +86,7 @@ def gitlab_post_reply_to_note(gl, event, reply_message): merge_request = project.mergerequests.get(merge_request_iid) # Find the discussion to which the note belongs - discussions = merge_request.discussions.list(as_list=False) + discussions = merge_request.discussions.list(iterator=True) target_discussion = next( ( d @@ -100,36 +104,28 @@ def gitlab_post_reply_to_note(gl, event, reply_message): return reply except gitlab.exceptions.GitlabError as e: - print(f"Failed to post a reply to '{event.note['body']}': {e}") + log.error(f"Failed to post a reply to '{event.note['body']}': {e}") return None -def parse_args() -> None: - parser = argparse.ArgumentParser(description="Monitor rejected pipelines by Marge.") - parser.add_argument( - "--token", - metavar="token", - help="force GitLab token, otherwise it's read from ~/.config/gitlab-token", - ) - parser.add_argument( - "--since", - metavar="since", - help="consider only events after this date (ISO format), otherwise it's read from ~/.config/last_marge_event", - ) - return parser.parse_args() - - -if __name__ == "__main__": - args = parse_args() +def main( + token: str | None, + since: str | None, + marge_user_id: int = 9716, + project_ids: list[int] = [176], + ci_timeout: float = 60, +): + log.basicConfig(level=log.INFO) + if token is None: + token = get_token_from_default_dir() - token = read_token(args.token) + token = read_token(token) + gl = Gitlab(url=GITLAB_URL, private_token=token, retry_transient_errors=True) - gl = gitlab.Gitlab(url=GITLAB_URL, private_token=token, retry_transient_errors=True) + user = gl.users.get(marge_user_id) + last_event_at = since if since else read_last_event_date_from_file() - user = gl.users.get(MARGE_USER_ID) - last_event_at = args.since if args.since else read_last_event_date_from_file() - - print(f"Retrieving Marge messages since {pretty_time(last_event_at)}\n") + log.info(f"Retrieving Marge messages since {pretty_time(last_event_at)}\n") # the "after" only considers the "2023-10-24" part, it doesn't consider the time events = user.events.list( @@ -144,6 +140,8 @@ def parse_args() -> None: ).replace(tzinfo=pytz.UTC) for event in events: + if event.project_id not in project_ids: + continue created_at_date = datetime.fromisoformat( event.created_at.replace("Z", "+00:00") ).replace(tzinfo=pytz.UTC) @@ -151,28 +149,75 @@ def parse_args() -> None: continue last_event_at = event.created_at - match = re.search(r"https://[^ ]+", event.note["body"]) + escaped_gitlab_url = re.escape(GITLAB_URL) + match = re.search(rf"{escaped_gitlab_url}/[^\s<]+", event.note["body"]) + if match: try: - print("Found message:", event.note["body"]) + log.info(f"Found message: {event.note['body']}") pipeline_url = match.group(0)[:-1] + pipeline: ProjectPipeline pipeline, _ = get_gitlab_pipeline_from_url(gl, pipeline_url) - print("Generating gantt chart...") - fig = generate_gantt_chart(pipeline) - file_name = "Gantt.html" + log.info("Generating gantt chart...") + fig = generate_gantt_chart(pipeline, ci_timeout) + file_name = f"{str(pipeline.id)}-Gantt.html" fig.write_html(file_name) - print("Uploading gantt file...") + log.info("Uploading gantt file...") file_url = gitlab_upload_file_get_url(gl, event.project_id, file_name) - print("Posting reply ...\n") + log.info("Posting reply ...") message = compose_message(file_name, file_url) gitlab_post_reply_to_note(gl, event, message) + except MockGanttExit: + pass # Allow tests to exit early without printing a traceback except Exception as e: - print(f"Failed to generate gantt chart, not posting reply.{e}") + log.info(f"Failed to generate gantt chart, not posting reply.{e}") traceback.print_exc() - if not args.since: - print( + if not since: + log.info( f"Updating last event date to {pretty_time(last_event_at)} on {LAST_MARGE_EVENT_FILE}\n" ) with open(LAST_MARGE_EVENT_FILE, "w") as f: f.write(last_event_at) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Monitor rejected pipelines by Marge.") + parser.add_argument( + "--token", + metavar="token", + type=str, + default=None, + help="force GitLab token, otherwise it's read from ~/.config/gitlab-token", + ) + parser.add_argument( + "--since", + metavar="since", + type=str, + default=None, + help="consider only events after this date (ISO format), otherwise it's read from ~/.config/last_marge_event", + ) + parser.add_argument( + "--marge-user-id", + metavar="marge_user_id", + type=int, + default=9716, # default https://gitlab.freedesktop.org/users/marge-bot/activity + help="GitLab user ID for marge-bot, defaults to 9716", + ) + parser.add_argument( + "--project-id", + metavar="project_id", + type=int, + nargs="+", + default=[176], # default is the mesa/mesa project id + help="GitLab project id(s) to analyze. Defaults to 176 i.e. mesa/mesa.", + ) + parser.add_argument( + "--ci-timeout", + metavar="ci_timeout", + type=float, + default=60, + help="Time that marge-bot will wait for ci to finish. Defaults to one hour.", + ) + args = parser.parse_args() + main(args.token, args.since, args.marge_user_id, args.project_id, args.ci_timeout) diff --git a/mesalib/.gitlab-ci/bin/ci_run_n_monitor.py b/mesalib/.gitlab-ci/bin/ci_run_n_monitor.py index 6ead6c6ec9..5cd732be4f 100644 --- a/mesalib/.gitlab-ci/bin/ci_run_n_monitor.py +++ b/mesalib/.gitlab-ci/bin/ci_run_n_monitor.py @@ -21,7 +21,7 @@ from functools import partial from itertools import chain from subprocess import check_output, CalledProcessError -from typing import TYPE_CHECKING, Iterable, Literal, Optional +from typing import Dict, TYPE_CHECKING, Iterable, Literal, Optional, Tuple import gitlab import gitlab.v4.objects @@ -33,6 +33,7 @@ get_gitlab_project, get_token_from_default_dir, pretty_duration, + print_once, read_token, wait_for_pipeline, ) @@ -53,90 +54,138 @@ "success": Fore.GREEN, "failed": Fore.RED, "canceled": Fore.MAGENTA, + "canceling": Fore.MAGENTA, "manual": "", "pending": "", "skipped": "", } -COMPLETED_STATUSES = ["success", "failed"] +COMPLETED_STATUSES = frozenset({"success", "failed"}) +RUNNING_STATUSES = frozenset({"created", "pending", "running"}) -def print_job_status(job, new_status=False) -> None: +def print_job_status( + job: gitlab.v4.objects.ProjectPipelineJob, + new_status: bool = False, + job_name_field_pad: int = 0, +) -> None: """It prints a nice, colored job status with a link to the job.""" - if job.status == "canceled": + if job.status in {"canceled", "canceling"}: return if new_status and job.status == "created": return - if job.duration: - duration = job.duration - elif job.started_at: - duration = time.perf_counter() - time.mktime(job.started_at.timetuple()) + job_name_field_pad = len(job.name) if job_name_field_pad < 1 else job_name_field_pad - print( + duration = job_duration(job) + + print_once( STATUS_COLORS[job.status] - + "🞋 job " - + URL_START - + f"{job.web_url}\a{job.name}" - + URL_END - + (f" has new status: {job.status}" if new_status else f" :: {job.status}") + + "🞋 job " # U+1F78B Round target + + link2print(job.web_url, job.name, job_name_field_pad) + + (f"has new status: {job.status}" if new_status else f"{job.status}") + (f" ({pretty_duration(duration)})" if job.started_at else "") + Style.RESET_ALL ) +def job_duration(job: gitlab.v4.objects.ProjectPipelineJob) -> float: + """ + Given a job, report the time lapsed in execution. + :param job: Pipeline job + :return: Current time in execution + """ + if job.duration: + return job.duration + elif job.started_at: + return time.perf_counter() - time.mktime(job.started_at.timetuple()) + return 0.0 + + def pretty_wait(sec: int) -> None: """shows progressbar in dots""" for val in range(sec, 0, -1): - print(f"⏲ {val} seconds", end="\r") + print(f"⏲ {val} seconds", end="\r") # U+23F2 Timer clock time.sleep(1) def monitor_pipeline( - project, - pipeline, + project: gitlab.v4.objects.Project, + pipeline: gitlab.v4.objects.ProjectPipeline, target_jobs_regex: re.Pattern, - dependencies, - force_manual: bool, + include_stage_regex: re.Pattern, + exclude_stage_regex: re.Pattern, + dependencies: set[str], stress: int, -) -> tuple[Optional[int], Optional[int]]: +) -> tuple[Optional[int], Optional[int], Dict[str, Dict[int, Tuple[float, str, str]]]]: """Monitors pipeline and delegate canceling jobs""" statuses: dict[str, str] = defaultdict(str) target_statuses: dict[str, str] = defaultdict(str) - stress_status_counter = defaultdict(lambda: defaultdict(int)) - target_id = None - + stress_status_counter: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) + execution_times = defaultdict(lambda: defaultdict(tuple)) + target_id: int = -1 + name_field_pad: int = len(max(dependencies, key=len))+2 + # In a running pipeline, we can skip following job traces that are in these statuses. + skip_follow_statuses: frozenset[str] = (COMPLETED_STATUSES) + + # Pre-populate the stress status counter for already completed target jobs. + if stress: + # When stress test, it is necessary to collect this information before start. + for job in pipeline.jobs.list(all=True, include_retried=True): + if target_jobs_regex.fullmatch(job.name) and \ + include_stage_regex.fullmatch(job.stage) and \ + not exclude_stage_regex.fullmatch(job.stage) and \ + job.status in COMPLETED_STATUSES: + stress_status_counter[job.name][job.status] += 1 + execution_times[job.name][job.id] = (job_duration(job), job.status, job.web_url) + + # jobs_waiting is a list of job names that are waiting for status update. + # It occurs when a job that we want to run depends on another job that is not yet finished. + jobs_waiting = [] + # FIXME: This function has too many parameters, consider refactoring. + enable_job_fn = partial( + enable_job, + project=project, + pipeline=pipeline, + job_name_field_pad=name_field_pad, + jobs_waiting=jobs_waiting, + ) while True: deps_failed = [] to_cancel = [] - for job in pipeline.jobs.list(all=True, sort="desc"): - # target jobs - if target_jobs_regex.fullmatch(job.name): + jobs_waiting.clear() + for job in sorted(pipeline.jobs.list(all=True), key=lambda j: j.name): + if target_jobs_regex.fullmatch(job.name) and \ + include_stage_regex.fullmatch(job.stage) and \ + not exclude_stage_regex.fullmatch(job.stage): target_id = job.id + target_status = job.status - if stress and job.status in ["success", "failed"]: + if stress and target_status in COMPLETED_STATUSES: if ( stress < 0 or sum(stress_status_counter[job.name].values()) < stress ): - job = enable_job(project, pipeline, job, "retry", force_manual) - stress_status_counter[job.name][job.status] += 1 + stress_status_counter[job.name][target_status] += 1 + execution_times[job.name][job.id] = (job_duration(job), target_status, job.web_url) + job = enable_job_fn(job=job, action_type="retry") else: - job = enable_job(project, pipeline, job, "target", force_manual) + execution_times[job.name][job.id] = (job_duration(job), target_status, job.web_url) + job = enable_job_fn(job=job, action_type="target") - print_job_status(job, job.status not in target_statuses[job.name]) - target_statuses[job.name] = job.status + print_job_status(job, target_status not in target_statuses[job.name], name_field_pad) + target_statuses[job.name] = target_status continue - # all jobs + # all other non-target jobs if job.status != statuses[job.name]: - print_job_status(job, True) + print_job_status(job, True, name_field_pad) statuses[job.name] = job.status # run dependencies and cancel the rest if job.name in dependencies: - job = enable_job(project, pipeline, job, "dep", True) + job = enable_job_fn(job=job, action_type="dep") if job.status == "failed": deps_failed.append(job.name) else: @@ -146,9 +195,9 @@ def monitor_pipeline( if stress: enough = True - for job_name, status in stress_status_counter.items(): + for job_name, status in sorted(stress_status_counter.items()): print( - f"{job_name}\tsucc: {status['success']}; " + f"* {job_name:{name_field_pad}}succ: {status['success']}; " f"fail: {status['failed']}; " f"total: {sum(status.values())} of {stress}", flush=False, @@ -160,22 +209,29 @@ def monitor_pipeline( pretty_wait(REFRESH_WAIT_JOBS) continue - print("---------------------------------", flush=False) + if jobs_waiting: + print_once( + f"{Fore.YELLOW}Waiting for jobs to update status:", + ", ".join(jobs_waiting), + Fore.RESET, + ) + pretty_wait(REFRESH_WAIT_JOBS) + continue - if len(target_statuses) == 1 and {"running"}.intersection( + if len(target_statuses) == 1 and RUNNING_STATUSES.intersection( target_statuses.values() ): - return target_id, None + return target_id, None, execution_times if ( {"failed"}.intersection(target_statuses.values()) - and not set(["running", "pending"]).intersection(target_statuses.values()) + and not RUNNING_STATUSES.intersection(target_statuses.values()) ): - return None, 1 + return None, 1, execution_times if ( {"skipped"}.intersection(target_statuses.values()) - and not {"running", "pending"}.intersection(target_statuses.values()) + and not RUNNING_STATUSES.intersection(target_statuses.values()) ): print( Fore.RED, @@ -183,20 +239,20 @@ def monitor_pipeline( deps_failed, Fore.RESET, ) - return None, 1 + return None, 1, execution_times - if {"success", "manual"}.issuperset(target_statuses.values()): - return None, 0 + if skip_follow_statuses.issuperset(target_statuses.values()): + return None, 0, execution_times pretty_wait(REFRESH_WAIT_JOBS) def get_pipeline_job( pipeline: gitlab.v4.objects.ProjectPipeline, - id: int, + job_id: int, ) -> gitlab.v4.objects.ProjectPipelineJob: pipeline_jobs = pipeline.jobs.list(all=True) - return [j for j in pipeline_jobs if j.id == id][0] + return [j for j in pipeline_jobs if j.id == job_id][0] def enable_job( @@ -204,19 +260,24 @@ def enable_job( pipeline: gitlab.v4.objects.ProjectPipeline, job: gitlab.v4.objects.ProjectPipelineJob, action_type: Literal["target", "dep", "retry"], - force_manual: bool, + job_name_field_pad: int = 0, + jobs_waiting: list[str] = [], ) -> gitlab.v4.objects.ProjectPipelineJob: - """enable job""" + # We want to run this job, but it is not ready to run yet, so let's try again in the next + # iteration. + if job.status == "created": + jobs_waiting.append(job.name) + return job + if ( - (job.status in ["success", "failed"] and action_type != "retry") - or (job.status == "manual" and not force_manual) - or job.status in ["skipped", "running", "created", "pending"] + (job.status in COMPLETED_STATUSES and action_type != "retry") + or job.status in {"skipped"} | RUNNING_STATUSES ): return job pjob = project.jobs.get(job.id, lazy=True) - if job.status in ["success", "failed", "canceled"]: + if job.status in {"success", "failed", "canceled", "canceling"}: new_job = pjob.retry() job = get_pipeline_job(pipeline, new_job["id"]) else: @@ -224,32 +285,34 @@ def enable_job( job = get_pipeline_job(pipeline, pjob.id) if action_type == "target": - jtype = "🞋 " + jtype = "🞋 target" # U+1F78B Round target elif action_type == "retry": - jtype = "↻" + jtype = "↻ retrying" # U+21BB Clockwise open circle arrow else: - jtype = "(dependency)" + jtype = "↪ dependency" # U+21AA Left Arrow Curving Right - print(Fore.MAGENTA + f"{jtype} job {job.name} manually enabled" + Style.RESET_ALL) + job_name_field_pad = len(job.name) if job_name_field_pad < 1 else job_name_field_pad + print(Fore.MAGENTA + f"{jtype} job {job.name:{job_name_field_pad}}manually enabled" + Style.RESET_ALL) return job -def cancel_job(project, job) -> None: +def cancel_job( + project: gitlab.v4.objects.Project, + job: gitlab.v4.objects.ProjectPipelineJob +) -> None: """Cancel GitLab job""" - if job.status in [ - "canceled", - "success", - "failed", - "skipped", - ]: + if job.status not in RUNNING_STATUSES: return pjob = project.jobs.get(job.id, lazy=True) pjob.cancel() - print(f"♲ {job.name}", end=" ") + print(f"🗙 {job.name}", end=" ") # U+1F5D9 Cancellation X -def cancel_jobs(project, to_cancel) -> None: +def cancel_jobs( + project: gitlab.v4.objects.Project, + to_cancel: list +) -> None: """Cancel unwanted GitLab jobs""" if not to_cancel: return @@ -257,10 +320,15 @@ def cancel_jobs(project, to_cancel) -> None: with ThreadPoolExecutor(max_workers=6) as exe: part = partial(cancel_job, project) exe.map(part, to_cancel) - print() + # The cancelled jobs are printed without a newline + print_once() -def print_log(project, job_id) -> None: + +def print_log( + project: gitlab.v4.objects.Project, + job_id: int +) -> None: """Print job log into output""" printed_lines = 0 while True: @@ -278,7 +346,7 @@ def print_log(project, job_id) -> None: pretty_wait(REFRESH_WAIT_LOG) -def parse_args() -> None: +def parse_args() -> argparse.Namespace: """Parse args""" parser = argparse.ArgumentParser( description="Tool to trigger a subset of container jobs " @@ -290,10 +358,31 @@ def parse_args() -> None: "--target", metavar="target-job", help="Target job regex. For multiple targets, pass multiple values, " - "eg. `--target foo bar`.", + "eg. `--target foo bar`. Only jobs in the target stage(s) " + "supplied, and their dependencies, will be considered.", required=True, nargs=argparse.ONE_OR_MORE, ) + parser.add_argument( + "--include-stage", + metavar="include-stage", + help="Job stages to include when searching for target jobs. " + "For multiple targets, pass multiple values, eg. " + "`--include-stage foo bar`.", + default=[".*"], + nargs=argparse.ONE_OR_MORE, + ) + parser.add_argument( + "--exclude-stage", + metavar="exclude-stage", + help="Job stages to exclude when searching for target jobs. " + "For multiple targets, pass multiple values, eg. " + "`--exclude-stage foo bar`. By default, performance and " + "post-merge jobs are excluded; pass --exclude-stage '' to " + "include them for consideration.", + default=["performance", ".*-postmerge"], + nargs=argparse.ONE_OR_MORE, + ) parser.add_argument( "--token", metavar="token", @@ -303,19 +392,27 @@ def parse_args() -> None: f"otherwise it's read from {TOKEN_DIR / 'gitlab-token'}", ) parser.add_argument( - "--force-manual", action="store_true", help="Force jobs marked as manual" + "--force-manual", action="store_true", + help="Deprecated argument; manual jobs are always force-enabled" ) parser.add_argument( "--stress", default=0, type=int, - help="Stresstest job(s). Number or repetitions or -1 for infinite.", + help="Stresstest job(s). Specify the number of times to rerun the selected jobs, " + "or use -1 for indefinite. Defaults to 0. If jobs have already been executed, " + "this will ensure the total run count respects the specified number.", ) parser.add_argument( "--project", default="mesa", help="GitLab project in the format / or just ", ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Exit after printing target jobs and dependencies", + ) mutex_group1 = parser.add_mutually_exclusive_group() mutex_group1.add_argument( @@ -344,12 +441,14 @@ def parse_args() -> None: def print_detected_jobs( - target_dep_dag: "Dag", dependency_jobs: Iterable[str], target_jobs: Iterable[str] + target_dep_dag: "Dag", + dependency_jobs: Iterable[str], + target_jobs: Iterable[str], ) -> None: def print_job_set(color: str, kind: str, job_set: Iterable[str]): print( color + f"Running {len(job_set)} {kind} jobs: ", - "\n", + "\n\t", ", ".join(sorted(job_set)), Fore.RESET, "\n", @@ -361,10 +460,14 @@ def print_job_set(color: str, kind: str, job_set: Iterable[str]): print_job_set(Fore.BLUE, "target", target_jobs) -def find_dependencies(token: str | None, - target_jobs_regex: re.Pattern, - project_path: str, - iid: int) -> set[str]: +def find_dependencies( + token: str | None, + target_jobs_regex: re.Pattern, + include_stage_regex: re.Pattern, + exclude_stage_regex: re.Pattern, + project_path: str, + iid: int +) -> set[str]: """ Find the dependencies of the target jobs in a GitLab pipeline. @@ -390,7 +493,7 @@ def find_dependencies(token: str | None, gql_instance, {"projectPath": project_path.path_with_namespace, "iid": iid} ) - target_dep_dag = filter_dag(dag, target_jobs_regex) + target_dep_dag = filter_dag(dag, target_jobs_regex, include_stage_regex, exclude_stage_regex) if not target_dep_dag: print(Fore.RED + "The job(s) were not found in the pipeline." + Fore.RESET) sys.exit(1) @@ -401,7 +504,45 @@ def find_dependencies(token: str | None, return target_jobs.union(dependency_jobs) -if __name__ == "__main__": +def print_monitor_summary( + execution_collection: Dict[str, Dict[int, Tuple[float, str, str]]], + t_start: float, +) -> None: + """Summary of the test execution""" + t_end = time.perf_counter() + spend_minutes = (t_end - t_start) / 60 + print(f"⏲ Duration of script execution: {spend_minutes:0.1f} minutes") # U+23F2 Timer clock + if len(execution_collection) == 0: + return + print(f"⏲ Jobs execution times:") # U+23F2 Timer clock + job_names = list(execution_collection.keys()) + job_names.sort() + name_field_pad = len(max(job_names, key=len)) + 2 + for name in job_names: + job_executions = execution_collection[name] + job_times = ', '.join([__job_duration_record(job_execution) + for job_execution in sorted(job_executions.items())]) + print(f"* {name:{name_field_pad}}: ({len(job_executions)}) {job_times}") + + +def __job_duration_record(dict_item: tuple) -> str: + """ + Format each pair of job and its duration. + :param job_execution: item of execution_collection[name][idn]: Dict[int, Tuple[float, str, str]] + """ + job_id = f"{dict_item[0]}" # dictionary key + job_duration, job_status, job_url = dict_item[1] # dictionary value, the tuple + return (f"{STATUS_COLORS[job_status]}" + f"{link2print(job_url, job_id)}: {pretty_duration(job_duration):>8}" + f"{Style.RESET_ALL}") + + +def link2print(url: str, text: str, text_pad: int = 0) -> str: + text_pad = len(text) if text_pad < 1 else text_pad + return f"{URL_START}{url}\a{text:{text_pad}}{URL_END}" + + +def main() -> None: try: t_start = time.perf_counter() @@ -463,31 +604,58 @@ def find_dependencies(token: str | None, target = '|'.join(args.target) target = target.strip() - deps = set() - print("🞋 job: " + Fore.BLUE + target + Style.RESET_ALL) + print("🞋 target job: " + Fore.BLUE + target + Style.RESET_ALL) # U+1F78B Round target # Implicitly include `parallel:` jobs target = f'({target})' + r'( \d+/\d+)?' target_jobs_regex = re.compile(target) + include_stage = '|'.join(args.include_stage) + include_stage = include_stage.strip() + + print("🞋 target from stages: " + Fore.BLUE + include_stage + Style.RESET_ALL) # U+1F78B Round target + + include_stage_regex = re.compile(include_stage) + + exclude_stage = '|'.join(args.exclude_stage) + exclude_stage = exclude_stage.strip() + + print("🞋 target excluding stages: " + Fore.BLUE + exclude_stage + Style.RESET_ALL) # U+1F78B Round target + + exclude_stage_regex = re.compile(exclude_stage) + deps = find_dependencies( token=token, target_jobs_regex=target_jobs_regex, + include_stage_regex=include_stage_regex, + exclude_stage_regex=exclude_stage_regex, iid=pipe.iid, project_path=cur_project ) - target_job_id, ret = monitor_pipeline( - cur_project, pipe, target_jobs_regex, deps, args.force_manual, args.stress + + if args.dry_run: + sys.exit(0) + + target_job_id, ret, exec_t = monitor_pipeline( + cur_project, + pipe, + target_jobs_regex, + include_stage_regex, + exclude_stage_regex, + deps, + args.stress ) if target_job_id: print_log(cur_project, target_job_id) - t_end = time.perf_counter() - spend_minutes = (t_end - t_start) / 60 - print(f"⏲ Duration of script execution: {spend_minutes:0.1f} minutes") + print_monitor_summary(exec_t, t_start) sys.exit(ret) except KeyboardInterrupt: sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/mesalib/.gitlab-ci/bin/gitlab_common.py b/mesalib/.gitlab-ci/bin/gitlab_common.py index 54e0cc7920..46b43a0a70 100644 --- a/mesalib/.gitlab-ci/bin/gitlab_common.py +++ b/mesalib/.gitlab-ci/bin/gitlab_common.py @@ -12,6 +12,7 @@ import os import re import time +from functools import cache from pathlib import Path GITLAB_URL = "https://gitlab.freedesktop.org" @@ -28,30 +29,40 @@ "Feed token": "glft-", "Incoming mail token": "glimt-", "GitLab Agent for Kubernetes token": "glagent-", - "SCIM Tokens": "glsoat-" + "SCIM Tokens": "glsoat-", } +@cache +def print_once(*args, **kwargs): + """Print without spamming the output""" + print(*args, **kwargs) + + def pretty_duration(seconds): """Pretty print duration""" hours, rem = divmod(seconds, 3600) minutes, seconds = divmod(rem, 60) if hours: - return f"{hours:0.0f}h{minutes:0.0f}m{seconds:0.0f}s" + return f"{hours:0.0f}h{minutes:02.0f}m{seconds:02.0f}s" if minutes: - return f"{minutes:0.0f}m{seconds:0.0f}s" + return f"{minutes:0.0f}m{seconds:02.0f}s" return f"{seconds:0.0f}s" -def get_gitlab_pipeline_from_url(gl, pipeline_url): - assert pipeline_url.startswith(GITLAB_URL) - url_path = pipeline_url[len(GITLAB_URL) :] - url_path_components = url_path.split("/") - project_name = "/".join(url_path_components[1:3]) - assert url_path_components[3] == "-" - assert url_path_components[4] == "pipelines" - pipeline_id = int(url_path_components[5]) - cur_project = gl.projects.get(project_name) +def get_gitlab_pipeline_from_url(gl, pipeline_url) -> tuple: + """ + Extract the project and pipeline object from the url string + :param gl: Gitlab object + :param pipeline_url: string with a url to a pipeline + :return: ProjectPipeline, Project objects + """ + pattern = rf"^{re.escape(GITLAB_URL)}/(.*)/-/pipelines/([0-9]+)$" + match = re.match(pattern, pipeline_url) + if not match: + raise AssertionError(f"url {pipeline_url} doesn't follow the pattern {pattern}") + namespace_with_project, pipeline_id = match.groups() + cur_project = gl.projects.get(namespace_with_project) pipe = cur_project.pipelines.get(pipeline_id) return pipe, cur_project @@ -88,19 +99,23 @@ def get_token_from_default_dir() -> str: def validate_gitlab_token(token: str) -> bool: - token_suffix = token.split("-")[-1] + # Match against recognised token prefixes + token_suffix = None + for token_type, token_prefix in TOKEN_PREFIXES.items(): + if token.startswith(token_prefix): + logging.info(f"Found probable token type: {token_type}") + token_suffix = token[len(token_prefix):] + break + + if not token_suffix: + return False + # Basic validation of the token suffix based on: # https://gitlab.com/gitlab-org/gitlab/-/blob/master/gems/gitlab-secret_detection/lib/gitleaks.toml if not re.match(r"(\w+-)?[0-9a-zA-Z_\-]{20,64}", token_suffix): return False - for token_type, token_prefix in TOKEN_PREFIXES.items(): - if token.startswith(token_prefix): - logging.info(f"Found probable token type: {token_type}") - return True - - # If the token type is not recognized, return False - return False + return True def get_token_from_arg(token_arg: str | Path | None) -> str | None: diff --git a/mesalib/.gitlab-ci/bin/gitlab_gql.py b/mesalib/.gitlab-ci/bin/gitlab_gql.py index eefdf214dd..144e3c31d7 100644 --- a/mesalib/.gitlab-ci/bin/gitlab_gql.py +++ b/mesalib/.gitlab-ci/bin/gitlab_gql.py @@ -325,16 +325,24 @@ def create_job_needs_dag(gl_gql: GitlabGQL, params, disable_cache: bool = True) return final_dag -def filter_dag(dag: Dag, regex: Pattern) -> Dag: - jobs_with_regex: set[str] = {job for job in dag if regex.fullmatch(job)} - return Dag({job: data for job, data in dag.items() if job in sorted(jobs_with_regex)}) +def filter_dag( + dag: Dag, job_name_regex: Pattern, include_stage_regex: Pattern, exclude_stage_regex: Pattern +) -> Dag: + filtered_jobs: Dag = Dag({}) + for (job, data) in dag.items(): + if not job_name_regex.fullmatch(job): + continue + if not include_stage_regex.fullmatch(data["stage"]): + continue + if exclude_stage_regex.fullmatch(data["stage"]): + continue + filtered_jobs[job] = data + return filtered_jobs def print_dag(dag: Dag) -> None: - for job, data in dag.items(): - print(f"{job}:") - print(f"\t{' '.join(data['needs'])}") - print() + for job, data in sorted(dag.items()): + print(f"{job}:\n\t{' '.join(data['needs'])}\n") def fetch_merged_yaml(gl_gql: GitlabGQL, params) -> dict[str, Any]: @@ -474,8 +482,23 @@ def parse_args() -> Namespace: "--regex", type=str, required=False, + default=".*", help="Regex pattern for the job name to be considered", ) + parser.add_argument( + "--include-stage", + type=str, + required=False, + default=".*", + help="Regex pattern for the stage name to be considered", + ) + parser.add_argument( + "--exclude-stage", + type=str, + required=False, + default="^$", + help="Regex pattern for the stage name to be excluded", + ) mutex_group_print = parser.add_mutually_exclusive_group() mutex_group_print.add_argument( "--print-dag", @@ -517,8 +540,7 @@ def main(): gl_gql, {"projectPath": args.project_path, "iid": iid}, disable_cache=True ) - if args.regex: - dag = filter_dag(dag, re.compile(args.regex)) + dag = filter_dag(dag, re.compile(args.regex), re.compile(args.include_stage), re.compile(args.exclude_stage)) print_dag(dag) diff --git a/mesalib/.gitlab-ci/bin/nightly_compare.py b/mesalib/.gitlab-ci/bin/nightly_compare.py new file mode 100644 index 0000000000..76c49b9456 --- /dev/null +++ b/mesalib/.gitlab-ci/bin/nightly_compare.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python3 +# Copyright © 2020 - 2024 Collabora Ltd. +# Authors: +# David Heidelberg +# Sergi Blanch Torne +# SPDX-License-Identifier: MIT + +""" +Compare the two latest scheduled pipelines and provide information +about the jobs you're interested in. +""" + +import argparse +import csv +import re +import requests +import io +from tabulate import tabulate + +import gitlab +from colorama import Fore, Style +from gitlab_common import read_token + + +MARGE_BOT_USER_ID = 9716 + +def print_failures_csv(id): + url = 'https://gitlab.freedesktop.org/mesa/mesa/-/jobs/' + str(id) + '/artifacts/raw/results/failures.csv' + missing: int = 0 + MAX_MISS: int = 20 + try: + response = requests.get(url) + response.raise_for_status() + csv_content = io.StringIO(response.text) + csv_reader = csv.reader(csv_content) + data = list(csv_reader) + + for line in data[:]: + if line[1] == "UnexpectedImprovement(Pass)": + line[1] = Fore.GREEN + line[1] + Style.RESET_ALL + elif line[1] == "UnexpectedImprovement(Fail)": + line[1] = Fore.YELLOW + line[1] + Style.RESET_ALL + elif line[1] == "Crash" or line[1] == "Fail": + line[1] = Fore.RED + line[1] + Style.RESET_ALL + elif line[1] == "Missing": + if missing > MAX_MISS: + data.remove(line) + continue + missing += 1 + line[1] = Fore.YELLOW + line[1] + Style.RESET_ALL + elif line[1] == "Fail": + line[1] = Fore.RED + line[1] + Style.RESET_ALL + else: + line[1] = Fore.WHITE + line[1] + Style.RESET_ALL + + if missing > MAX_MISS: + data.append([Fore.RED + f"... more than {MAX_MISS} missing tests, something crashed?", "Missing" + Style.RESET_ALL]) + headers = ["Test ", "Result"] + print(tabulate(data, headers, tablefmt="plain")) + except Exception: + pass + + +def job_failed_before(old_jobs, job): + for old_job in old_jobs: + if job.name == old_job.name: + return old_job + + +def parse_args() -> None: + """Parse args""" + parser = argparse.ArgumentParser( + description="Tool to show merge requests assigned to the marge-bot", + ) + parser.add_argument( + "--target", + metavar="target-job", + help="Target job regex. For multiple targets, pass multiple values, " + "eg. `--target foo bar`.", + required=False, + nargs=argparse.ONE_OR_MORE, + ) + parser.add_argument( + "--token", + metavar="token", + help="force GitLab token, otherwise it's read from ~/.config/gitlab-token", + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + token = read_token(args.token) + gl = gitlab.Gitlab(url="https://gitlab.freedesktop.org", private_token=token) + + project = gl.projects.get("mesa/mesa") + + print( + "\u001b]8;;https://gitlab.freedesktop.org/mesa/mesa/-/pipelines?page=1&scope=all&source=schedule\u001b\\Scheduled pipelines overview\u001b]8;;\u001b\\" + ) + pipelines = project.pipelines.list( + source="schedule", ordered_by="created_at", sort="desc", page=1, per_page=2 + ) + print( + f"Old pipeline: {pipelines[1].created_at}\t\u001b]8;;{pipelines[1].web_url}\u001b\\{pipelines[1].status}\u001b]8;;\u001b\\\t{pipelines[1].sha}" + ) + print( + f"New pipeline: {pipelines[0].created_at}\t\u001b]8;;{pipelines[0].web_url}\u001b\\{pipelines[0].status}\u001b]8;;\u001b\\\t{pipelines[0].sha}" + ) + print( + f"\nWebUI visual compare: https://gitlab.freedesktop.org/mesa/mesa/-/compare/{pipelines[1].sha}...{pipelines[0].sha}\n" + ) + + # regex part + if args.target: + target = "|".join(args.target) + target = target.strip() + print("🞋 jobs: " + Fore.BLUE + target + Style.RESET_ALL) + + target = f"({target})" + r"( \d+/\d+)?" + else: + target = ".*" + + target_jobs_regex: re.Pattern = re.compile(target) + + old_failed_jobs = [] + for job in pipelines[1].jobs.list(all=True): + if ( + job.status != "failed" + or target_jobs_regex + and not target_jobs_regex.fullmatch(job.name) + ): + continue + old_failed_jobs.append(job) + + job_failed = False + for job in pipelines[0].jobs.list(all=True): + if ( + job.status != "failed" + or target_jobs_regex + and not target_jobs_regex.fullmatch(job.name) + ): + continue + + job_failed = True + + previously_failed_job = job_failed_before(old_failed_jobs, job) + if previously_failed_job: + print( + Fore.YELLOW + + f":: \u001b]8;;{job.web_url}\u001b\\{job.name}\u001b]8;;\u001b\\" + + Fore.MAGENTA + + f" \u001b]8;;{previously_failed_job.web_url}\u001b\\(previous run)\u001b]8;;\u001b\\" + + Style.RESET_ALL + ) + else: + print( + Fore.RED + + f":: \u001b]8;;{job.web_url}\u001b\\{job.name}\u001b]8;;\u001b\\" + + Style.RESET_ALL + ) + print_failures_csv(job.id) + + if not job_failed: + exit(0) + + print("Commits between nightly pipelines:") + commit = project.commits.get(pipelines[0].sha) + while True: + print( + f"{commit.id} \u001b]8;;{commit.web_url}\u001b\\{commit.title}\u001b]8;;\u001b\\" + ) + if commit.id == pipelines[1].sha: + break + commit = project.commits.get(commit.parent_ids[0]) diff --git a/mesalib/.gitlab-ci/bin/nightly_compare.sh b/mesalib/.gitlab-ci/bin/nightly_compare.sh new file mode 100644 index 0000000000..d72cfdc50e --- /dev/null +++ b/mesalib/.gitlab-ci/bin/nightly_compare.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +set -eu + +this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")") +readonly this_dir + +exec \ + "$this_dir/../python-venv.sh" \ + "$this_dir/requirements.txt" \ + "$this_dir/nightly_compare.py" "$@" + diff --git a/mesalib/.gitlab-ci/bin/pipeline_message.py b/mesalib/.gitlab-ci/bin/pipeline_message.py new file mode 100644 index 0000000000..13527409f1 --- /dev/null +++ b/mesalib/.gitlab-ci/bin/pipeline_message.py @@ -0,0 +1,377 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: MIT + +# Provide a markdown-formatted message summarizing the reasons why a pipeline failed. +# Marge bot can use this script to provide more helpful comments when CI fails. +# Example for running locally: +# ./bin/ci/pipeline_message.sh --project-id 176 --pipeline-id 1310098 + + +import argparse +import asyncio +import logging +from typing import Any + +import aiohttp + +PER_PAGE: int = 6000 + + +async def get_pipeline_status( + session: aiohttp.ClientSession, project_id: str, pipeline_id: str +): + url = f"https://gitlab.freedesktop.org/api/v4/projects/{project_id}/pipelines/{pipeline_id}" + logging.info(f"Fetching pipeline status from {url}") + async with session.get(url) as response: + response.raise_for_status() + pipeline_details = await response.json() + return pipeline_details.get("status") + + +async def get_jobs_for_pipeline( + session: aiohttp.ClientSession, project_id: str, pipeline_id: str +): + url = f"https://gitlab.freedesktop.org/api/v4/projects/{project_id}/pipelines/{pipeline_id}/jobs" + logging.info(url) + jobs = [] + params = {"per_page": PER_PAGE} + async with session.get(url, params=params) as response: + response.raise_for_status() + jobs = await response.json() + return jobs + + +def get_problem_jobs(jobs: list[dict[str, Any]]): + ignore_stage_list = [ + "postmerge", + "performance", + ] + problem_jobs = [] + for job in jobs: + if any(ignore.lower() in job["stage"] for ignore in ignore_stage_list): + continue + if job["status"] in {"failed", "canceled"}: + problem_jobs.append(job) + return problem_jobs + + +def unexpected_improvements(failed_test_array): + if failed_test_array["unexpected_improvements"]: + unexpected_improvements_count = len( + failed_test_array["unexpected_improvements"] + ) + return f" {unexpected_improvements_count} improved test{'s' if unexpected_improvements_count != 1 else ''}" + return "" + + +def fails(failed_test_array): + if failed_test_array["fails"]: + fails_count = len(failed_test_array["fails"]) + return f" {fails_count} failed test{'s' if fails_count != 1 else ''}" + return "" + + +def crashes(failed_test_array): + if failed_test_array["crashes"]: + crash_count = len(failed_test_array["crashes"]) + return f" {crash_count} crashed test{'s' if crash_count != 1 else ''}" + return "" + + +def get_failed_test_details(failed_test_array): + message = "" + max_tests_to_display = 5 + + if failed_test_array["unexpected_improvements"]: + for i, test in enumerate(failed_test_array["unexpected_improvements"]): + if i > max_tests_to_display: + message += " \nand more...
" + break + message += f"{test}
" + + if failed_test_array["fails"]: + for i, test in enumerate(failed_test_array["fails"]): + if i > max_tests_to_display: + message += " \nand more...
" + break + message += f"{test}
" + + if failed_test_array["crashes"]: + for i, test in enumerate(failed_test_array["crashes"]): + if i > max_tests_to_display: + message += " \nand more...
" + break + message += f"{test}
" + + return message + + +def get_failed_test_summary_message(failed_test_array): + summary_msg = "" + summary_msg += unexpected_improvements(failed_test_array) + summary_msg += fails(failed_test_array) + summary_msg += crashes(failed_test_array) + summary_msg += "" + return summary_msg + + +def sort_failed_tests_by_status(failures_csv): + failed_test_array = { + "unexpected_improvements": [], + "fails": [], + "crashes": [], + "timeouts": [], + } + + for test in failures_csv.splitlines(): + if "UnexpectedImprovement" in test: + failed_test_array["unexpected_improvements"].append(test) + elif "Fail" in test: + failed_test_array["fails"].append(test) + elif "Crash" in test: + failed_test_array["crashes"].append(test) + elif "Timeout" in test: + failed_test_array["timeouts"].append(test) + + return failed_test_array + + +async def get_failures_csv(session, project_id, job): + job_id = job["id"] + url = f"https://gitlab.freedesktop.org/api/v4/projects/{project_id}/jobs/{job_id}/artifacts/results/failures.csv" + async with session.get(url) as response: + if response.status == 200: + text = await response.text() + return text + else: + logging.debug(f"No response from: {url}") + return "" + + +async def get_test_failures(session, project_id, job): + failures_csv = await get_failures_csv(session, project_id, job) + if not failures_csv: + return "" + + # If just one test failed, don't bother with more complicated sorting + lines = failures_csv.splitlines() + if len(lines) == 1: + return ": " + lines[0] + "
" + + failed_test_array = sort_failed_tests_by_status(failures_csv) + failures_msg = "
" + failures_msg += get_failed_test_summary_message(failed_test_array) + failures_msg += get_failed_test_details(failed_test_array) + failures_msg += "
" + + return failures_msg + + +async def get_trace_failures(session, project_id, job): + project_json = await get_project_json(session, project_id) + path = project_json.get("path", "") + if not path: + return "" + + job_id = job["id"] + url = f"https://mesa.pages.freedesktop.org/-/{path}/-/jobs/{job_id}/artifacts/results/summary/problems.html" + async with session.get(url) as response: + if response.status == 200: + return url + else: + logging.debug(f"No response from: {url}") + return "" + + +async def get_project_json(session, project_id): + url_project_id = f"https://gitlab.freedesktop.org/api/v4/projects/{project_id}" + async with session.get(url_project_id) as response: + if response.status == 200: + return await response.json() + else: + logging.debug(f"No response from: {url_project_id}") + return "" + + +async def get_job_log(session: aiohttp.ClientSession, project_id: str, job_id: int): + project_json = await get_project_json(session, project_id) + path_with_namespace = project_json.get("path_with_namespace", "") + if not path_with_namespace: + return "" + + url_job_log = ( + f"https://gitlab.freedesktop.org/{path_with_namespace}/-/jobs/{job_id}/raw" + ) + async with session.get(url_job_log) as response: + if response.status == 200: + return await response.text() + else: + logging.debug(f"No response from job log: {url_job_log}") + return "" + + +async def search_job_log_for_errors(session, project_id, job): + log_error_message = "" + + # Bypass these generic error messages in hopes of finding a more specific error. + # The entries are case insensitive. Keep them in alphabetical order and don't + # forget to add a comma after each entry + ignore_list = [ + "403: b", + "aborting", + "building c", + "continuing", + "error_msg : None", + "error_type", + "error generated", + "errors generated", + "exit code", + "exit status", + "exiting now", + "job failed", + "no_error", + "no files to upload", + "performing test", + "ret code", + "retry", + "retry-all-errors", + "strerror_", + "success", + "unknown-section", + ] + job_log = await get_job_log(session, project_id, job["id"]) + + for line in reversed(job_log.splitlines()): + if "fatal" in line.lower(): + # remove date and formatting before fatal message + log_error_message = line[line.lower().find("fatal") :] + break + + if "error" in line.lower(): + if any(ignore.lower() in line.lower() for ignore in ignore_list): + continue + + # remove date and formatting before error message + log_error_message = line[line.lower().find("error") :].strip() + + # if there is no further info after the word error then it's not helpful + # so reset the message and try again. + if log_error_message.lower() in {"error", "errors", "error:", "errors:"}: + log_error_message = "" + continue + break + + # timeout msg from .gitlab-ci/lava/lava_job_submitter.py + if "expected to take at least" in line.lower(): + log_error_message = line + break + + return log_error_message + + +async def process_single_job(session, project_id, job): + job_url = job.get("web_url", "") + if not job_url: + logging.info(f"Job {job['name']} is missing a web_url") + + job_name = job.get("name", "Unnamed Job") + message = f"[{job_name}]({job_url})" + + # if a job times out it's cancelled, so worth mentioning here + if job["status"] == "canceled": + return f"{message}: canceled
" + + # if it's not a script failure then all we can do is give the gitlab assigned reason + if job["failure_reason"] != "script_failure": + return f"{message}: {job['failure_reason']}
" + + test_failures = await get_test_failures(session, project_id, job) + if test_failures: + return f"{message}{test_failures}" + + trace_failures = await get_trace_failures(session, project_id, job) + if trace_failures: + return f"{message}: has a [trace failure]({trace_failures})
" + + log_error_message = await search_job_log_for_errors(session, project_id, job) + if log_error_message: + return f"{message}: {log_error_message}
" + + return f"{message}
" + + +async def process_job_with_limit(session, project_id, job): + # Use at most 10 concurrent tasks + semaphore = asyncio.Semaphore(10) + async with semaphore: + return await process_single_job(session, project_id, job) + + +async def process_problem_jobs(session, project_id, problem_jobs): + + problem_jobs_count = len(problem_jobs) + + if problem_jobs_count == 1: + message = f"
There were problems with job: " + message += await process_single_job(session, project_id, problem_jobs[0]) + return message + + message = f"
" + message += f"" + message += f"There were problems with {problem_jobs_count} jobs: " + message += "" + + tasks = [process_job_with_limit(session, project_id, job) for job in problem_jobs] + + results = await asyncio.gather(*tasks) + + for result in results: + message += result + + message += f"
" + + return message + + +async def main(pipeline_id: str, project_id: str = "176") -> str: + + message = "" + + try: + timeout = aiohttp.ClientTimeout(total=120) + logging.basicConfig(level=logging.INFO) + + async with aiohttp.ClientSession(timeout=timeout) as session: + pipeline_status = await get_pipeline_status( + session, project_id, pipeline_id + ) + logging.debug(f"Pipeline status: {pipeline_status}") + if pipeline_status != "failed": + return message + + jobs = await get_jobs_for_pipeline(session, project_id, pipeline_id) + problem_jobs = get_problem_jobs(jobs) + + if len(problem_jobs) == 0: + return message + + message = await process_problem_jobs(session, project_id, problem_jobs) + except Exception as e: + logging.error(f"An error occurred: {e}") + return "" + + return message + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Fetch GitLab pipeline details") + parser.add_argument( + "--project-id", default="176", help="Project ID (default: 176 i.e. mesa/mesa)" + ) + parser.add_argument("--pipeline-id", required=True, help="Pipeline ID") + + args = parser.parse_args() + + message = asyncio.run(main(args.pipeline_id, args.project_id)) + + print(message) diff --git a/mesalib/.gitlab-ci/bin/pipeline_message.sh b/mesalib/.gitlab-ci/bin/pipeline_message.sh new file mode 100644 index 0000000000..84c2825488 --- /dev/null +++ b/mesalib/.gitlab-ci/bin/pipeline_message.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -eu + +this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")") +readonly this_dir + +exec \ + "$this_dir/../python-venv.sh" \ + "$this_dir/requirements.txt" \ + "$this_dir/pipeline_message.py" "$@" diff --git a/mesalib/.gitlab-ci/bin/requirements.txt b/mesalib/.gitlab-ci/bin/requirements.txt index a1448999b1..40f4c5dd8a 100644 --- a/mesalib/.gitlab-ci/bin/requirements.txt +++ b/mesalib/.gitlab-ci/bin/requirements.txt @@ -1,11 +1,19 @@ -colorama==0.4.5 +# If you change these requirements, and you need these packages +# to be available in the debian/x86_64_pyutils container +# then bump the DEBIAN_PYUTILS_TAG +PyYAML==6.* +colorama==0.4.* filecache==0.81 -gql==3.4.0 -kaleido==0.2.1 -python-dateutil==2.8.2 -pandas==2.1.1 -plotly==5.17.0 -python-gitlab==3.5.0 -PyYAML==6.0.1 -ruamel.yaml.clib==0.2.8 -ruamel.yaml==0.17.21 +filelock==3.* +fire==0.5.0 +flake8==7.* +gql==3.* +kaleido==0.2.* +lavacli==1.5.2 +pandas==2.* +plotly==5.* +python-dateutil==2.* +python-gitlab==4.* +ruamel.yaml.clib==0.2.* +ruamel.yaml==0.17.* +tabulate==0.9.* diff --git a/mesalib/.gitlab-ci/bin/test/requirements.txt b/mesalib/.gitlab-ci/bin/test/requirements.txt index f80621af28..5060531afe 100644 --- a/mesalib/.gitlab-ci/bin/test/requirements.txt +++ b/mesalib/.gitlab-ci/bin/test/requirements.txt @@ -1,5 +1,10 @@ +-r ../requirements.txt filelock==3.12.4 fire==0.5.0 +freezegun==1.5.1 +hypothesis==6.67.1 mock==5.1.0 polars==0.19.3 pytest==7.4.2 +pytest-asyncio==0.21.0 +pytest-cov==3.0.0 diff --git a/mesalib/.gitlab-ci/bin/test/test_gantt_chart.py b/mesalib/.gitlab-ci/bin/test/test_gantt_chart.py new file mode 100644 index 0000000000..84d6da0f26 --- /dev/null +++ b/mesalib/.gitlab-ci/bin/test/test_gantt_chart.py @@ -0,0 +1,201 @@ +from contextlib import suppress +from datetime import datetime, timedelta +from unittest import mock +from unittest.mock import MagicMock, patch + +import ci_post_gantt +import pytest +from ci_gantt_chart import generate_gantt_chart +from ci_post_gantt import Gitlab, MockGanttExit + + +def create_mock_job( + name, id, status, created_at, queued_duration, started_at, finished_at=None +): + mock_job = MagicMock() + mock_job.name = name + mock_job.status = status + mock_job.id = id + mock_job.created_at = created_at + mock_job.queued_duration = queued_duration + mock_job.started_at = started_at + mock_job.finished_at = finished_at + return mock_job + + +@pytest.fixture +def fake_pipeline(): + current_time = datetime.fromisoformat("2024-12-17 23:54:13.940091+00:00") + created_at = current_time - timedelta(minutes=10) + + job1 = create_mock_job( + name="job1", + id="1", + status="success", + created_at=created_at.isoformat(), + queued_duration=1, # seconds + started_at=(created_at + timedelta(seconds=2)).isoformat(), + finished_at=(created_at + timedelta(minutes=1)).isoformat(), + ) + + mock_pipeline = MagicMock() + mock_pipeline.web_url = "https://gitlab.freedesktop.org/mesa/mesa/-/pipelines/9999" + mock_pipeline.duration = 600 # Total pipeline duration in seconds + mock_pipeline.created_at = created_at.isoformat() + mock_pipeline.yaml_errors = False + mock_pipeline.jobs.list.return_value = [job1] + return mock_pipeline + + +def test_generate_gantt_chart(fake_pipeline): + fig = generate_gantt_chart(fake_pipeline) + + fig_dict = fig.to_dict() + assert "data" in fig_dict + + # Extract all job names from the "y" axis in the Gantt chart data + all_job_names = set() + for trace in fig_dict["data"]: + if "y" in trace: + all_job_names.update(trace["y"]) + + assert any( + "job1" in job for job in all_job_names + ), "job1 should be present in the Gantt chart" + + +def test_ci_timeout(fake_pipeline): + fig = generate_gantt_chart(fake_pipeline, ci_timeout=1) + + fig_dict = fig.to_dict() + + timeout_line = None + for shape in fig_dict.get("layout", {}).get("shapes", []): + if shape.get("line", {}).get("dash") == "dash": + timeout_line = shape + break + + assert timeout_line is not None, "Timeout line should exist in the Gantt chart" + timeout_x = timeout_line["x0"] + + # Check that the timeout line is 1 minute after the pipeline creation time + pipeline_created_at = datetime.fromisoformat(fake_pipeline.created_at) + expected_timeout = pipeline_created_at + timedelta(minutes=1) + assert ( + timeout_x == expected_timeout + ), f"Timeout should be at {expected_timeout}, got {timeout_x}" + + +def test_marge_bot_user_id(): + with patch("ci_post_gantt.Gitlab") as MockGitlab: + mock_gitlab_instance = MagicMock(spec=Gitlab) + mock_gitlab_instance.users = MagicMock() + MockGitlab.return_value = mock_gitlab_instance + + marge_bot_user_id = 12345 + ci_post_gantt.main("fake_token", None, marge_bot_user_id) + mock_gitlab_instance.users.get.assert_called_once_with(marge_bot_user_id) + + +def test_project_ids(): + current_time = datetime.now() + project_id_1 = 176 + event_1 = MagicMock() + event_1.project_id = project_id_1 + event_1.created_at = (current_time - timedelta(days=1)).isoformat() + event_1.note = {"body": f"Event for project {project_id_1}"} + + project_id_2 = 166 + event_2 = MagicMock() + event_2.project_id = project_id_2 + event_2.created_at = (current_time - timedelta(days=2)).isoformat() + event_2.note = {"body": f"Event for project {project_id_2}"} + + with patch("ci_post_gantt.Gitlab") as MockGitlab: + mock_user = MagicMock() + mock_user.events = MagicMock() + mock_user.events.list.return_value = [event_1, event_2] + + mock_gitlab_instance = MagicMock(spec=Gitlab) + mock_gitlab_instance.users = MagicMock() + mock_gitlab_instance.users.get.return_value = mock_user + MockGitlab.return_value = mock_gitlab_instance + + last_event_date = (current_time - timedelta(days=3)).isoformat() + + # Test a single project id + ci_post_gantt.main("fake_token", last_event_date) + marge_bot_single_project_scope = [ + event.note["body"] + for event in mock_user.events.list.return_value + if event.project_id == project_id_1 + ] + assert f"Event for project {project_id_1}" in marge_bot_single_project_scope + assert f"Event for project {project_id_2}" not in marge_bot_single_project_scope + + # Test multiple project ids + ci_post_gantt.main( + "fake_token", last_event_date, 9716, [project_id_1, project_id_2] + ) + + marge_bot_multiple_project_scope = [ + event.note["body"] for event in mock_user.events.list.return_value + ] + assert f"Event for project {project_id_1}" in marge_bot_multiple_project_scope + assert f"Event for project {project_id_2}" in marge_bot_multiple_project_scope + + +def test_add_gantt_after_pipeline_message(): + current_time = datetime.now() + + plain_url = "https://gitlab.freedesktop.org/mesa/mesa/-/pipelines/12345" + plain_message = ( + f"I couldn't merge this branch: CI failed! See pipeline {plain_url}." + ) + event_plain = MagicMock() + event_plain.project_id = 176 + event_plain.created_at = (current_time - timedelta(days=1)).isoformat() + event_plain.note = {"body": plain_message} + + summary_url = "https://gitlab.freedesktop.org/mesa/mesa/-/pipelines/99999" + summary_message = ( + "I couldn't merge this branch: " + f"CI failed! See pipeline {summary_url}.
There were problems with job:" + "[lavapipe](https://gitlab.freedesktop.org/mesa/mesa/-/jobs/68141218)
" + "3 crashed testsdEQP-VK.ray_query.builtin.instancecustomindex.frag.aabbs,Crash
dEQP" + "-VK.ray_query.builtin.objecttoworld.frag.aabbs,Crash
dEQP-VK.sparse_resources.shader_intrinsics." + "2d_array_sparse_fetch.g16_b16r16_2plane_444_unorm.11_37_3_nontemporal,Crash
" + ) + event_with_summary = MagicMock() + event_with_summary.project_id = 176 + event_with_summary.created_at = (current_time - timedelta(days=1)).isoformat() + event_with_summary.note = {"body": summary_message} + + with patch("ci_post_gantt.Gitlab") as MockGitlab, patch( + "ci_post_gantt.get_gitlab_pipeline_from_url", return_value=None + ) as mock_get_gitlab_pipeline_from_url: + + def safe_mock(*args, **kwargs): + with suppress(TypeError): + raise MockGanttExit("Exiting for test purposes") + + mock_get_gitlab_pipeline_from_url.side_effect = safe_mock + + mock_user = MagicMock() + mock_user.events = MagicMock() + mock_user.events.list.return_value = [event_plain, event_with_summary] + + mock_gitlab_instance = MagicMock(spec=Gitlab) + mock_gitlab_instance.users = MagicMock() + mock_gitlab_instance.users.get.return_value = mock_user + MockGitlab.return_value = mock_gitlab_instance + + last_event_date = (current_time - timedelta(days=3)).isoformat() + ci_post_gantt.main("fake_token", last_event_date, 12345) + mock_get_gitlab_pipeline_from_url.assert_has_calls( + [ + mock.call(mock_gitlab_instance, plain_url), + mock.call(mock_gitlab_instance, summary_url), + ], + any_order=True, + ) diff --git a/mesalib/.gitlab-ci/bin/test/test_pipeline_message.py b/mesalib/.gitlab-ci/bin/test/test_pipeline_message.py new file mode 100644 index 0000000000..688497dcf1 --- /dev/null +++ b/mesalib/.gitlab-ci/bin/test/test_pipeline_message.py @@ -0,0 +1,309 @@ +import logging +from unittest.mock import AsyncMock, patch + +import pytest + +from pipeline_message import ( + get_failed_test_summary_message, + get_problem_jobs, + get_trace_failures, + main, + process_problem_jobs, + search_job_log_for_errors, + sort_failed_tests_by_status, + unexpected_improvements, +) + + +def test_get_problem_jobs(): + jobs = [ + {"stage": "build", "status": "failed"}, + {"stage": "test", "status": "canceled"}, + {"stage": "postmerge", "status": "failed"}, + {"stage": "performance", "status": "failed"}, + {"stage": "deploy", "status": "failed"}, + ] + + problem_jobs = get_problem_jobs(jobs) + + assert len(problem_jobs) == 3 + assert problem_jobs[0]["stage"] == "build" + assert problem_jobs[1]["stage"] == "test" + assert problem_jobs[2]["stage"] == "deploy" + + +def test_sort_failed_tests_by_status(): + failures_csv = """\ +Test1,UnexpectedImprovement +Test2,Fail +Test3,Crash +Test4,Timeout +Test5,Fail +Test6,UnexpectedImprovement +""" + sorted_tests = sort_failed_tests_by_status(failures_csv) + + assert len(sorted_tests["unexpected_improvements"]) == 2 + assert len(sorted_tests["fails"]) == 2 + assert len(sorted_tests["crashes"]) == 1 + assert len(sorted_tests["timeouts"]) == 1 + + assert sorted_tests["unexpected_improvements"] == [ + "Test1,UnexpectedImprovement", + "Test6,UnexpectedImprovement", + ] + assert sorted_tests["fails"] == ["Test2,Fail", "Test5,Fail"] + assert sorted_tests["crashes"] == ["Test3,Crash"] + assert sorted_tests["timeouts"] == ["Test4,Timeout"] + + +def test_get_failed_test_summary_message(): + failed_test_array = { + "unexpected_improvements": [ + "test1 UnexpectedImprovement", + "test2 UnexpectedImprovement", + ], + "fails": ["test3 Fail", "test4 Fail", "test5 Fail"], + "crashes": ["test6 Crash"], + "timeouts": [], + } + + summary_message = get_failed_test_summary_message(failed_test_array) + + assert "" in summary_message + assert "2 improved tests" in summary_message + assert "3 failed tests" in summary_message + assert "1 crashed test" in summary_message + assert "" in summary_message + + +def test_unexpected_improvements(): + message = "" + failed_test_array = { + "unexpected_improvements": ["test_improvement_1", "test_improvement_2"], + "fails": [], + "crashes": [], + "timeouts": [], + } + result = unexpected_improvements(failed_test_array) + assert result == " 2 improved tests", f"Unexpected result: {result}" + + +@pytest.mark.asyncio +@patch("pipeline_message.get_pipeline_status", new_callable=AsyncMock) +async def test_gitlab_api_failure(mock_get_pipeline_status): + mock_get_pipeline_status.side_effect = Exception("GitLab API not responding") + message = await main("1234567") + assert message == "" + + +@pytest.mark.asyncio +async def test_no_message_when_pipeline_not_failed(): + project_id = "176" + pipeline_id = "12345" + + with patch( + "pipeline_message.get_pipeline_status", new_callable=AsyncMock + ) as mock_get_pipeline_status: + mock_get_pipeline_status.return_value = "success" + + message = await main(pipeline_id, project_id) + assert ( + message == "" + ), f"Expected no message for successful pipeline, but got: {message}" + + +@pytest.mark.asyncio +async def test_single_problem_job_not_summarized(): + session = AsyncMock() + project_id = "176" + problem_jobs = [ + { + "id": 1234, + "name": "test-job", + "web_url": "http://example.com/job/1234", + "status": "canceled", + } + ] + + mock_response = AsyncMock() + mock_response.status = 200 + mock_response.text.return_value = "" # Empty CSV response for test + session.get.return_value = mock_response + + message = await process_problem_jobs(session, project_id, problem_jobs) + + assert "summary" not in message + assert "[test-job](http://example.com/job/1234)" in message + + +@pytest.mark.asyncio +@patch("pipeline_message.get_project_json", new_callable=AsyncMock) +@patch("pipeline_message.aiohttp.ClientSession", autospec=True) +async def test_get_trace_failures_no_response( + mock_client_session_cls, mock_get_project_json, caplog +): + caplog.set_level(logging.DEBUG) + namespace = "mesa" + mock_get_project_json.return_value = {"path": namespace} + + mock_get = AsyncMock() + mock_get.status = 404 + + mock_session_instance = mock_client_session_cls.return_value + mock_session_instance.get.return_value = mock_get + + job_id = 12345678 + job = {"id": job_id} + url = await get_trace_failures(mock_session_instance, "176", job) + + assert url == "" + + expected_log_message = f"No response from: https://mesa.pages.freedesktop.org/-/{namespace}/-/jobs/{job_id}/artifacts/results/summary/problems.html" + assert any(expected_log_message in record.message for record in caplog.records) + + +@pytest.mark.asyncio +@patch("pipeline_message.get_job_log", new_callable=AsyncMock) +async def test_search_job_log_for_errors(mock_get_job_log): + session = AsyncMock() + project_id = "176" + job = {"id": 12345} + + job_log = r""" +error_msg: something useful +[0m15:41:36.102: GL_KHR_no_error GL_KHR_texture_compression_astc_sliced_3d +1 error generated +3 errors generated. +-- Looking for strerror_r - found +-- Looking for strerror_s - not found +[49/176] Building CXX object lib/Support/CMakeFiles/LLVMSupport.dir/ErrorHandling.cpp.o +[127/2034] Building C object lib/Support/CMakeFiles/LLVMSupport.dir/regerror.c.o +-- Performing Test HAS_WERROR_GLOBAL_CTORS +-- Performing Test C_SUPPORTS_WERROR_UNGUARDED_AVAILABILITY_NEW - Success +-- Performing Test LLVM_LIBSTDCXX_SOFT_ERROR +error aborting +error_msg : None +error_type : Job +[0Ksection_end:1734694783:job_data +[0K +[0m11:39:43.438: [1mFinished executing LAVA job in the attempt #3 [0m +[0Ksection_end:1734694783:lava_submit +[0K +[0;31m[01:54] ERROR: lava_submit: ret code: 1 [0m + +[0;31m[01:54] ERROR: unknown-section: ret code: 1 [0m +section_end:1734694783:step_script +[0Ksection_start:1734694783:after_script +[0K[0K[36;1mRunning after_script[0;m[0;m +[32;1mRunning after script...[0;m +[32;1m$ curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s "https://" | tar --warning=no-timestamp --zstd -x[0;m +zstd: /*stdin*\: unexpected end of file # noqa: W605 +tar: Child returned status 1 +tar: Error is not recoverable: exiting now +section_end:1734695025:after_script +[0K[0;33mWARNING: after_script failed, but job will continue unaffected: exit code 1[0;m +section_start:1734695025:upload_artifacts_on_failure +[0K[0K[36;1mUploading artifacts for failed job[0;m[0;m +[32;1mUploading artifacts...[0;m +results/: found 11 matching artifact files and directories[0;m +Uploading artifacts as "archive" to coordinator... 201 Created[0;m id[0;m=68509685 responseStatus[0;m=201 Created token[0;m=glcbt-64 +[32;1mUploading artifacts...[0;m +[0;33mWARNING: results/junit.xml: no matching files. Ensure that the artifact path is relative to the working directory (/builds/mesa/mesa)[0;m +[31;1mERROR: No files to upload [0;m +section_end:1734695027:upload_artifacts_on_failure +[0Ksection_start:1734695027:cleanup_file_variables +[0K[0K[36;1mCleaning up project directory and file based variables[0;m[0;m +section_end:1734695027:cleanup_file_variables +[0K[31;1mERROR: Job failed: exit code 1 +[0;m +[0;m + """ + + mock_get_job_log.return_value = job_log + + error_message = await search_job_log_for_errors(session, project_id, job) + assert "something useful" in error_message + + +@pytest.mark.asyncio +@patch("pipeline_message.get_job_log", new_callable=AsyncMock) +async def test_search_job_log_for_fatal_errors(mock_get_job_log): + session = AsyncMock() + project_id = "176" + job = {"id": 12345} + + job_log = r""" +[0m15:41:36.105: [15:41:31.951] fatal: something fatal +Uploading artifacts as "archive" to coordinator... 201 Created[0;m id[0;m=68509685 responseStatus[0;m=201 Created token[0;m=glcbt-64 +[32;1mUploading artifacts...[0;m +[0;33mWARNING: results/junit.xml: no matching files. Ensure that the artifact path is relative to the working directory (/builds/mesa/mesa)[0;m +[31;1mERROR: No files to upload [0;m +section_end:1734695027:upload_artifacts_on_failure +[0Ksection_start:1734695027:cleanup_file_variables +[0K[0K[36;1mCleaning up project directory and file based variables[0;m[0;m +section_end:1734695027:cleanup_file_variables +[0K[31;1mERROR: Job failed: exit code 1 +[0;m +[0;m + """ + + mock_get_job_log.return_value = job_log + + error_message = await search_job_log_for_errors(session, project_id, job) + assert "something fatal" in error_message + + +@pytest.mark.asyncio +@patch("pipeline_message.get_job_log", new_callable=AsyncMock) +async def test_search_job_log_for_errors_but_find_none(mock_get_job_log): + session = AsyncMock() + project_id = "176" + job = {"id": 12345} + + job_log = r""" +[0KRunning with gitlab-runner 17.4.0 (b92ee590)[0;m +[0K on fdo-equinix-m3l-30-placeholder_63 XmDXAt7xd, system ID: s_785ae19292ea[0;m +section_start:1734736110:prepare_executor +[0K[0K[36;1mPreparing the "docker" executor[0;m[0;m +[0KUsing Docker executor with image registry.freedesktop.org/mesa/mesa/debian +[0KAuthenticating with credentials from job payload (GitLab Registry)[0;m +[0KPulling docker image registry.freedesktop.org/mesa/mesa/debian/x86_64_pyuti +[0KUsing docker image sha256:ebc7b3fe89be4d390775303adddb33539c235a2663165d78d +[0Ksection_start:1734736124:prepare_script +[0K[0K[36;1mPreparing environment[0;m[0;m +Running on runner-xmdxat7xd-project-23076-concurrent-1 via fdo-equinix-m3l-30... +section_end:1734736125:prepare_script +[0Ksection_start:1734736125:get_sources +[0K[0K[36;1mGetting source from Git repository[0;m[0;m +[32;1m$ /host/bin/curl -s -L --cacert /host/ca-certificates.crt --retry 4 -f --retry-delay 60 https://gitlab. +Checking if the user of the pipeline is allowed... +Checking if the job's project is part of a well-known group... +Checking if the job is part of an official MR pipeline... +Thank you for contributing to freedesktop.org +Running pre-clone script: 'set -o xtrace +wget -q -O download-git-cache.sh https://gitlab.freedesktop.org/mesa/mesa/-/raw/0d43b4cba639b809ad0e08a065ce01846e262249/.gitlab-ci/download-git-cache.sh +bash download-git-cache.sh +rm download-git-cache.sh +[31;1m errors +[0K[31;1mERROR: +[31;1m error +[31;1m Here is a blank error: +/builds/mesa/mesa/bin/ci/test/test_pipeline_message.py:162: AssertionError +Uploading artifacts as "archive" to coordinator... 201 Created[0;m id[0;m=68509685 responseStatus[0;m=201 Created token[0;m=glcbt-64 +[32;1mUploading artifacts...[0;m +[0;33mWARNING: results/junit.xml: no matching files. Ensure that the artifact path is relative to the working directory (/builds/mesa/mesa)[0;m +[31;1mERROR: No files to upload [0;m +section_end:1734695027:upload_artifacts_on_failure +[0Ksection_start:1734695027:cleanup_file_variables +[0K[0K[36;1mCleaning up project directory and file based variables[0;m[0;m +section_end:1734695027:cleanup_file_variables +[0K[31;1mERROR: Job failed: exit code 1 +[0;m +[0;m + """ + + mock_get_job_log.return_value = job_log + + error_message = await search_job_log_for_errors(session, project_id, job) + assert error_message == "", f"Unexpected error message: {error_message}" diff --git a/mesalib/.gitlab-ci/bin/update_traces_checksum.py b/mesalib/.gitlab-ci/bin/update_traces_checksum.py index 064573d556..f050c8f6e5 100644 --- a/mesalib/.gitlab-ci/bin/update_traces_checksum.py +++ b/mesalib/.gitlab-ci/bin/update_traces_checksum.py @@ -20,10 +20,11 @@ import gitlab from colorama import Fore, Style -from gitlab_common import get_gitlab_project, read_token, wait_for_pipeline +from gitlab_common import (get_gitlab_project, read_token, wait_for_pipeline, + get_gitlab_pipeline_from_url, TOKEN_DIR, get_token_from_default_dir) -DESCRIPTION_FILE = "export PIGLIT_REPLAY_DESCRIPTION_FILE='.*/install/(.*)'$" +DESCRIPTION_FILE = "export PIGLIT_REPLAY_DESCRIPTION_FILE=.*/install/(.*)$" DEVICE_NAME = "export PIGLIT_REPLAY_DEVICE_NAME='(.*)'$" @@ -40,7 +41,7 @@ def gather_results( cur_job = project.jobs.get(job.id) # get variables print(f"👁 {job.name}...") - log: list[str] = cur_job.trace().decode("unicode_escape").splitlines() + log: list[str] = cur_job.trace().decode("unicode_escape", "ignore").splitlines() filename: str = '' dev_name: str = '' for logline in log: @@ -69,7 +70,7 @@ def gather_results( target = yaml.load(target_file) # parse artifact - results_json_bz2 = cur_job.artifact(path="results/results.json.bz2", streamed=False) + results_json_bz2 = cur_job.artifact("results/results.json.bz2") results_json = bz2.decompress(results_json_bz2).decode("utf-8", errors="replace") results = json.loads(results_json) @@ -96,7 +97,11 @@ def gather_results( continue if "label" in target['traces'][trace][dev_name]: - print(f'{dev_name}: {trace}: please verify that label {Fore.BLUE}{target["traces"][trace][dev_name]["label"]}{Style.RESET_ALL} is still valid') + print( + f"{dev_name}: {trace}: please verify that label " + f"{Fore.BLUE}{target['traces'][trace][dev_name]['label']}{Style.RESET_ALL} " + "is still valid" + ) print(Fore.GREEN + f'{dev_name}: {trace}: checksum updated' + Style.RESET_ALL) target['traces'][trace][dev_name]['checksum'] = checksum @@ -113,12 +118,20 @@ def parse_args() -> None: epilog="Example: update_traces_checksum.py --rev $(git rev-parse HEAD) " ) parser.add_argument( - "--rev", metavar="revision", help="repository git revision", required=True + "--rev", metavar="revision", help="repository git revision", ) parser.add_argument( "--token", metavar="token", - help="force GitLab token, otherwise it's read from ~/.config/gitlab-token", + type=str, + default=get_token_from_default_dir(), + help="Use the provided GitLab token or token file, " + f"otherwise it's read from {TOKEN_DIR / 'gitlab-token'}", + ) + parser.add_argument( + "--pipeline-url", + metavar="pipeline_url", + help="specify a pipeline url", ) return parser.parse_args() @@ -133,8 +146,15 @@ def parse_args() -> None: cur_project = get_gitlab_project(gl, "mesa") - print(f"Revision: {args.rev}") - (pipe, cur_project) = wait_for_pipeline([cur_project], args.rev) + if args.pipeline_url: + pipe, cur_project = get_gitlab_pipeline_from_url(gl, args.pipeline_url) + REV = pipe.sha + else: + if not args.rev: + print('error: the following arguments are required: --rev') + sys.exit(1) + print(f"Revision: {args.rev}") + (pipe, cur_project) = wait_for_pipeline([cur_project], args.rev) print(f"Pipeline: {pipe.web_url}") gather_results(cur_project, pipe) diff --git a/mesalib/.gitlab-ci/build/gitlab-ci.yml b/mesalib/.gitlab-ci/build/gitlab-ci.yml index 113daa1d26..63ff66dfdd 100644 --- a/mesalib/.gitlab-ci/build/gitlab-ci.yml +++ b/mesalib/.gitlab-ci/build/gitlab-ci.yml @@ -3,14 +3,17 @@ extends: .container+build-rules # Cancel job if a newer commit is pushed to the same branch interruptible: true - # Build jobs don't take more than 1-3 minutes. 5-8 min max on a fresh runner - # without a populated ccache. - # These jobs are never slow, either they finish within reasonable time or - # something has gone wrong and the job will never terminate, so we should - # instead timeout so that the retry mechanism can kick in. - # A few exception are made, see `timeout:` overrides in the rest of this - # file. - timeout: 30m + variables: + # Build jobs don't take more than 1-3 minutes. 5-8 min max on a fresh runner + # without a populated ccache. + # These jobs are never slow, either they finish within reasonable time or + # something has gone wrong and the job will never terminate, so we should + # instead timeout so that the retry mechanism can kick in. + # A few exception are made, see overrides in the rest of this file. + BUILD_JOB_TIMEOUT: 15m + timeout: 1h + # We don't want to download any previous job's artifacts + dependencies: [] artifacts: name: "mesa_${CI_JOB_NAME}" when: always @@ -54,17 +57,46 @@ extends: - .build-linux - .use-debian/x86_64_build - stage: build-x86_64 + stage: build-only variables: LLVM_VERSION: 15 script: - - .gitlab-ci/meson/build.sh + - &meson-build timeout --verbose ${BUILD_JOB_TIMEOUT_OVERRIDE:-$BUILD_JOB_TIMEOUT} .gitlab-ci/meson/build.sh + + +# Make sure this list stays the same as all the jobs with +# `stage: build-for-tests`, except for the windows job as +# explained below. +.build-for-tests-jobs: + - job: debian-testing + optional: true + - job: debian-testing-asan + optional: true + - job: debian-build-testing + optional: true + - job: debian-arm32 + optional: true + - job: debian-arm32-asan + optional: true + - job: debian-arm64 + optional: true + - job: debian-arm64-asan + optional: true + # Windows runners don't have more than one build right now, so there is + # no need to wait on the "first one" to be done. + # - job: windows-msvc + # optional: true + - job: python-artifacts + optional: true + debian-testing: extends: - .meson-build - .ci-deqp-artifacts + stage: build-for-tests variables: + BUILD_JOB_TIMEOUT: 30m UNWIND: "enabled" DRI_LOADERS: > -D glx=dri @@ -73,11 +105,10 @@ debian-testing: -D glvnd=disabled -D platforms=x11,wayland GALLIUM_ST: > - -D dri3=enabled -D gallium-nine=true -D gallium-va=enabled -D gallium-rusticl=true - GALLIUM_DRIVERS: "swrast,virgl,radeonsi,zink,crocus,iris,i915,r300,svga" + GALLIUM_DRIVERS: "llvmpipe,softpipe,virgl,radeonsi,zink,crocus,iris,i915,r300,svga" VULKAN_DRIVERS: "swrast,amd,intel,intel_hasvk,virtio,nouveau" BUILDTYPE: "debugoptimized" EXTRA_OPTION: > @@ -88,7 +119,7 @@ debian-testing: S3_ARTIFACT_NAME: mesa-x86_64-default-${BUILDTYPE} LLVM_VERSION: 15 script: - - .gitlab-ci/meson/build.sh + - *meson-build - .gitlab-ci/prepare-artifacts.sh artifacts: reports: @@ -97,7 +128,9 @@ debian-testing: debian-testing-asan: extends: - debian-testing + stage: build-for-tests variables: + BUILD_JOB_TIMEOUT: 30m C_ARGS: > -Wno-error=stringop-truncation EXTRA_OPTION: > @@ -126,7 +159,9 @@ debian-testing-msan: # msan cannot fully work until it's used together with msan libc extends: - debian-clang + # `needs:` inherited from debian-clang variables: + BUILD_JOB_TIMEOUT: 30m # l_undef is incompatible with msan EXTRA_OPTION: -D b_sanitize=memory @@ -138,7 +173,7 @@ debian-testing-msan: # GLSL has some issues in sexpression reading. # gtest has issues in its test initialization. MESON_TEST_ARGS: "--suite glcpp --suite format" - GALLIUM_DRIVERS: "freedreno,iris,nouveau,kmsro,r300,r600,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus" + GALLIUM_DRIVERS: "freedreno,iris,nouveau,r300,r600,llvmpipe,softpipe,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus" VULKAN_DRIVERS: intel,amd,broadcom,virtio # Do a host build for intel-clc (msan complains about # uninitialized values in the LLVM libs) @@ -154,8 +189,37 @@ debian-testing-msan: -D intel-clc=enabled -D install-intel-clc=true +debian-testing-ubsan: + extends: + - debian-testing + stage: build-for-tests + timeout: 40m + variables: + C_ARGS: > + -Wno-error=stringop-overflow + -Wno-error=stringop-truncation + CPP_ARGS: > + -Wno-error=array-bounds + EXTRA_OPTION: > + -D b_sanitize=undefined + -D intel-clc=system + S3_ARTIFACT_NAME: "" + ARTIFACTS_DEBUG_SYMBOLS: 1 + HOST_BUILD_OPTIONS: > + -D build-tests=false + -D enable-glcpp-tests=false + -D gallium-opencl=disabled + -D gallium-drivers= + -D vulkan-drivers= + -D video-codecs= + -D glx=disabled + -D platforms= + -D intel-clc=enabled + -D install-intel-clc=true + debian-build-testing: extends: .meson-build + stage: build-for-tests variables: BUILDTYPE: debug UNWIND: "enabled" @@ -166,40 +230,43 @@ debian-build-testing: -D glvnd=disabled -D platforms=x11,wayland GALLIUM_ST: > - -D dri3=enabled -D gallium-extra-hud=true -D gallium-vdpau=enabled - -D gallium-omx=bellagio -D gallium-va=enabled -D gallium-xa=enabled - -D gallium-nine=true + -D gallium-nine=false -D gallium-rusticl=false - GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,asahi,crocus" + GALLIUM_DRIVERS: "iris,nouveau,r300,r600,freedreno,llvmpipe,softpipe,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,asahi,crocus" VULKAN_DRIVERS: swrast EXTRA_OPTION: > -D spirv-to-dxil=true -D osmesa=true -D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,lima,panfrost,asahi - -D b_lto=true LLVM_VERSION: 15 S3_ARTIFACT_NAME: debian-build-testing - script: | - section_start lava-pytest "lava-pytest" - .gitlab-ci/lava/lava-pytest.sh - section_switch shellcheck "shellcheck" - .gitlab-ci/run-shellcheck.sh - section_switch yamllint "yamllint" - .gitlab-ci/run-yamllint.sh - section_end yamllint - .gitlab-ci/meson/build.sh - .gitlab-ci/prepare-artifacts.sh - timeout: 15m + script: + - *meson-build + - .gitlab-ci/prepare-artifacts.sh shader-db: stage: code-validation extends: - .use-debian/x86_64_build - - .container+build-rules + rules: + - !reference [.never-post-merge-rules, rules] + - !reference [.core-rules, rules] + # Keep this list in sync with the drivers tested in run-shader-db.sh + - !reference [.freedreno-common-rules, rules] + - !reference [.intel-common-rules, rules] + - !reference [.lima-rules, rules] + - !reference [.v3d-rules, rules] + - !reference [.vc4-rules, rules] + - !reference [.nouveau-rules, rules] + - !reference [.r300-rules, rules] + # Also run if this job's own config or script changes + - changes: + - .gitlab-ci/build/gitlab-ci.yml + - .gitlab-ci/run-shader-db.sh needs: - debian-build-testing variables: @@ -216,6 +283,9 @@ shader-db: # Test a release build with -Werror so new warnings don't sneak in. debian-release: extends: .meson-build + needs: + - !reference [.meson-build, needs] + - !reference [.build-for-tests-jobs] variables: LLVM_VERSION: 15 UNWIND: "enabled" @@ -228,16 +298,14 @@ debian-release: -D glvnd=disabled -D platforms=x11,wayland GALLIUM_ST: > - -D dri3=enabled -D gallium-extra-hud=true -D gallium-vdpau=enabled - -D gallium-omx=disabled -D gallium-va=enabled -D gallium-xa=enabled -D gallium-nine=false -D gallium-rusticl=false -D llvm=enabled - GALLIUM_DRIVERS: "i915,iris,nouveau,kmsro,freedreno,r300,svga,swrast,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,crocus" + GALLIUM_DRIVERS: "i915,iris,nouveau,freedreno,r300,svga,llvmpipe,softpipe,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,crocus" VULKAN_DRIVERS: "amd,imagination-experimental,microsoft-experimental" EXTRA_OPTION: > -D spirv-to-dxil=true @@ -249,19 +317,23 @@ debian-release: BUILDTYPE: "release" S3_ARTIFACT_NAME: "mesa-x86_64-default-${BUILDTYPE}" script: - - .gitlab-ci/meson/build.sh + - *meson-build - 'if [ -n "$MESA_CI_PERFORMANCE_ENABLED" ]; then .gitlab-ci/prepare-artifacts.sh; fi' alpine-build-testing: extends: - .meson-build - .use-alpine/x86_64_build - stage: build-x86_64 + needs: + - !reference [.use-alpine/x86_64_build, needs] + - !reference [.build-for-tests-jobs] variables: + BUILD_JOB_TIMEOUT: 30m BUILDTYPE: "release" C_ARGS: > -Wno-error=cpp -Wno-error=array-bounds + -Wno-error=stringop-overflow -Wno-error=stringop-overread DRI_LOADERS: > -D glx=disabled @@ -269,13 +341,10 @@ alpine-build-testing: -D egl=enabled -D glvnd=disabled -D platforms=wayland - LLVM_VERSION: "16" - GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,iris,kmsro,lima,nouveau,panfrost,r300,r600,radeonsi,svga,swrast,tegra,v3d,vc4,virgl,zink" + GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,iris,lima,nouveau,panfrost,r300,r600,radeonsi,svga,llvmpipe,softpipe,tegra,v3d,vc4,virgl,zink" GALLIUM_ST: > - -D dri3=enabled -D gallium-extra-hud=true -D gallium-vdpau=disabled - -D gallium-omx=disabled -D gallium-va=enabled -D gallium-xa=disabled -D gallium-nine=true @@ -283,26 +352,32 @@ alpine-build-testing: -D gles1=disabled -D gles2=enabled -D llvm=enabled + -D llvm-orcjit=true -D microsoft-clc=disabled -D shared-llvm=enabled UNWIND: "disabled" - VULKAN_DRIVERS: "amd,broadcom,freedreno,intel,imagination-experimental" + VULKAN_DRIVERS: "amd,asahi,broadcom,freedreno,intel,imagination-experimental" fedora-release: extends: - .meson-build - .use-fedora/x86_64_build + needs: + - !reference [.use-fedora/x86_64_build, needs] + - !reference [.build-for-tests-jobs] variables: BUILDTYPE: "release" - C_LINK_ARGS: > + # array-bounds are pure non-LTO gcc buggy warning + # maybe-uninitialized is misfiring in nir_lower_gs_intrinsics.c, and + # a "maybe" warning should never be an error anyway. + C_ARGS: > -Wno-error=stringop-overflow -Wno-error=stringop-overread + -Wno-error=array-bounds + -Wno-error=maybe-uninitialized CPP_ARGS: > -Wno-error=dangling-reference -Wno-error=overloaded-virtual - CPP_LINK_ARGS: > - -Wno-error=stringop-overflow - -Wno-error=stringop-overread DRI_LOADERS: > -D glx=dri -D gbm=enabled @@ -310,20 +385,16 @@ fedora-release: -D glvnd=enabled -D platforms=x11,wayland EXTRA_OPTION: > - -D b_lto=true -D osmesa=true - -D selinux=true -D tools=drm-shim,etnaviv,freedreno,glsl,intel,nir,nouveau,lima,panfrost,imagination -D vulkan-layers=device-select,overlay -D intel-rt=enabled -D imagination-srv=true -D teflon=true - GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,i915,iris,kmsro,lima,nouveau,panfrost,r300,r600,radeonsi,svga,swrast,tegra,v3d,vc4,virgl,zink" + GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,i915,iris,lima,nouveau,panfrost,r300,r600,radeonsi,svga,llvmpipe,softpipe,tegra,v3d,vc4,virgl,zink" GALLIUM_ST: > - -D dri3=enabled -D gallium-extra-hud=true -D gallium-vdpau=enabled - -D gallium-omx=disabled -D gallium-va=enabled -D gallium-xa=enabled -D gallium-nine=false @@ -335,13 +406,16 @@ fedora-release: -D shared-llvm=enabled LLVM_VERSION: "" UNWIND: "disabled" - VULKAN_DRIVERS: "amd,broadcom,freedreno,imagination-experimental,intel,intel_hasvk" + VULKAN_DRIVERS: "amd,asahi,broadcom,freedreno,imagination-experimental,intel,intel_hasvk" debian-android: extends: - .meson-cross - .use-debian/android_build - .ci-deqp-artifacts + needs: + - !reference [.use-debian/android_build, needs] + - !reference [.build-for-tests-jobs] variables: BUILDTYPE: debug UNWIND: "disabled" @@ -362,17 +436,16 @@ debian-android: -D egl=enabled -D glvnd=disabled -D platforms=android + FORCE_FALLBACK_FOR: llvm EXTRA_OPTION: > -D android-stub=true - -D llvm=disabled -D platform-sdk-version=33 + -D cpp_rtti=false -D valgrind=disabled -D android-libbacktrace=disabled -D intel-clc=system GALLIUM_ST: > - -D dri3=disabled -D gallium-vdpau=disabled - -D gallium-omx=disabled -D gallium-va=disabled -D gallium-xa=disabled -D gallium-nine=false @@ -393,19 +466,25 @@ debian-android: ARTIFACTS_DEBUG_SYMBOLS: 1 S3_ARTIFACT_NAME: mesa-x86_64-android-${BUILDTYPE} script: - - CROSS=aarch64-linux-android GALLIUM_DRIVERS=etnaviv,freedreno,lima,panfrost,vc4,v3d VULKAN_DRIVERS=freedreno,broadcom,virtio .gitlab-ci/meson/build.sh + - export CROSS=aarch64-linux-android + - export GALLIUM_DRIVERS=etnaviv,freedreno,lima,panfrost,vc4,v3d + - export VULKAN_DRIVERS=freedreno,broadcom,virtio + - *meson-build # x86_64 build: # Can't do Intel because gen_decoder.c currently requires libexpat, which # is not a dependency that AOSP wants to accept. Can't do Radeon Gallium # drivers because they requires LLVM, which we don't have an Android build # of. - - CROSS=x86_64-linux-android GALLIUM_DRIVERS=iris,virgl VULKAN_DRIVERS=amd,intel .gitlab-ci/meson/build.sh + - export CROSS=x86_64-linux-android + - export GALLIUM_DRIVERS=iris,virgl,zink,softpipe,llvmpipe,swrast + - export VULKAN_DRIVERS=virtio,swrast + - .gitlab-ci/create-llvm-meson-wrap-file.sh + - *meson-build - .gitlab-ci/prepare-artifacts.sh .meson-cross: extends: - .meson-build - stage: build-misc variables: UNWIND: "disabled" DRI_LOADERS: > @@ -415,9 +494,7 @@ debian-android: -D platforms=x11,wayland -D osmesa=false GALLIUM_ST: > - -D dri3=enabled -D gallium-vdpau=disabled - -D gallium-omx=disabled -D gallium-va=disabled -D gallium-xa=disabled -D gallium-nine=false @@ -429,8 +506,8 @@ debian-android: needs: - debian/arm64_build variables: - VULKAN_DRIVERS: freedreno,broadcom - GALLIUM_DRIVERS: "etnaviv,freedreno,kmsro,lima,nouveau,panfrost,swrast,tegra,v3d,vc4,zink" + VULKAN_DRIVERS: asahi,freedreno,broadcom + GALLIUM_DRIVERS: "etnaviv,freedreno,lima,nouveau,panfrost,llvmpipe,softpipe,tegra,v3d,vc4,zink" BUILDTYPE: "debugoptimized" tags: - aarch64 @@ -439,10 +516,14 @@ debian-arm32: extends: - .meson-arm - .ci-deqp-artifacts + stage: build-for-tests variables: CROSS: armhf DRI_LOADERS: -D glvnd=disabled + # remove asahi & llvmpipe from the .meson-arm list because here we have llvm=disabled + VULKAN_DRIVERS: freedreno,broadcom + GALLIUM_DRIVERS: "etnaviv,freedreno,lima,nouveau,panfrost,softpipe,tegra,v3d,vc4,zink" EXTRA_OPTION: > -D llvm=disabled -D valgrind=disabled @@ -451,12 +532,13 @@ debian-arm32: # tempfiles in our artifacts. ARTIFACTS_DEBUG_SYMBOLS: 1 script: - - .gitlab-ci/meson/build.sh + - *meson-build - .gitlab-ci/prepare-artifacts.sh debian-arm32-asan: extends: - debian-arm32 + stage: build-for-tests variables: DRI_LOADERS: -D glvnd=disabled @@ -473,33 +555,35 @@ debian-arm64: extends: - .meson-arm - .ci-deqp-artifacts + stage: build-for-tests variables: C_ARGS: > -Wno-error=array-bounds -Wno-error=stringop-truncation - VULKAN_DRIVERS: "freedreno,broadcom,panfrost,imagination-experimental" + VULKAN_DRIVERS: "asahi,freedreno,broadcom,panfrost,imagination-experimental" DRI_LOADERS: -D glvnd=disabled EXTRA_OPTION: > - -D llvm=disabled -D valgrind=disabled -D imagination-srv=true -D perfetto=true -D freedreno-kmds=msm,virtio -D teflon=true + GALLIUM_ST: + -D gallium-rusticl=true S3_ARTIFACT_NAME: mesa-arm64-default-${BUILDTYPE} script: - - .gitlab-ci/meson/build.sh + - *meson-build - .gitlab-ci/prepare-artifacts.sh debian-arm64-asan: extends: - debian-arm64 + stage: build-for-tests variables: DRI_LOADERS: -D glvnd=disabled EXTRA_OPTION: > - -D llvm=disabled -D b_sanitize=address -D valgrind=disabled -D tools=dlclose-skip @@ -507,12 +591,35 @@ debian-arm64-asan: S3_ARTIFACT_NAME: mesa-arm64-asan-${BUILDTYPE} MESON_TEST_ARGS: "--no-suite mesa:compiler" +debian-arm64-ubsan: + extends: + - debian-arm64 + stage: build-for-tests + variables: + C_ARGS: > + -Wno-error=array-bounds + -Wno-error=stringop-overflow + -Wno-error=stringop-truncation + CPP_ARGS: > + -Wno-error=array-bounds + -fno-var-tracking-assignments + DRI_LOADERS: + -D glvnd=disabled + EXTRA_OPTION: > + -D b_sanitize=undefined + ARTIFACTS_DEBUG_SYMBOLS: 1 + S3_ARTIFACT_NAME: mesa-arm64-ubsan-${BUILDTYPE} + MESON_TEST_ARGS: "--no-suite mesa:compiler" + debian-arm64-build-test: extends: - .meson-arm - .ci-deqp-artifacts + needs: + - !reference [.meson-arm, needs] + - !reference [.build-for-tests-jobs] variables: - VULKAN_DRIVERS: "amd" + VULKAN_DRIVERS: "amd,nouveau" DRI_LOADERS: -D glvnd=disabled EXTRA_OPTION: > @@ -521,6 +628,10 @@ debian-arm64-build-test: debian-arm64-release: extends: - debian-arm64 + stage: build-only + needs: + - !reference [debian-arm64, needs] + - !reference [.build-for-tests-jobs] variables: BUILDTYPE: release S3_ARTIFACT_NAME: mesa-arm64-default-${BUILDTYPE} @@ -529,11 +640,35 @@ debian-arm64-release: -Wno-error=stringop-truncation -Wno-error=stringop-overread script: - - .gitlab-ci/meson/build.sh + - *meson-build - 'if [ -n "$MESA_CI_PERFORMANCE_ENABLED" ]; then .gitlab-ci/prepare-artifacts.sh; fi' +debian-no-libdrm: + extends: + - .meson-arm + stage: build-only + needs: + - !reference [.meson-arm, needs] + - !reference [.build-for-tests-jobs] + variables: + VULKAN_DRIVERS: freedreno + GALLIUM_DRIVERS: "zink,llvmpipe" + BUILDTYPE: release + C_ARGS: > + -Wno-error=array-bounds + -Wno-error=stringop-truncation + -Wno-error=stringop-overread + EXTRA_OPTION: > + -D freedreno-kmds=kgsl + -D glx=disabled + -D gbm=disabled + -D egl=disabled + debian-clang: extends: .meson-build + needs: + - !reference [.meson-build, needs] + - !reference [.build-for-tests-jobs] variables: BUILDTYPE: debug LLVM_VERSION: 15 @@ -556,10 +691,8 @@ debian-clang: -D glvnd=enabled -D platforms=x11,wayland GALLIUM_ST: > - -D dri3=enabled -D gallium-extra-hud=true -D gallium-vdpau=enabled - -D gallium-omx=bellagio -D gallium-va=enabled -D gallium-xa=enabled -D gallium-nine=true @@ -568,9 +701,8 @@ debian-clang: -D llvm=enabled -D microsoft-clc=disabled -D shared-llvm=enabled - -D opencl-spirv=true -D shared-glapi=enabled - GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus,i915,asahi" + GALLIUM_DRIVERS: "iris,nouveau,r300,r600,freedreno,llvmpipe,softpipe,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus,i915,asahi" VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio,swrast,panfrost,imagination-experimental,microsoft-experimental,nouveau EXTRA_OPTION: -D spirv-to-dxil=true @@ -588,16 +720,16 @@ debian-clang: debian-clang-release: extends: debian-clang + # `needs:` inherited from debian-clang variables: + BUILD_JOB_TIMEOUT: 30m BUILDTYPE: "release" DRI_LOADERS: > -D glx=xlib -D platforms=x11,wayland GALLIUM_ST: > - -D dri3=enabled -D gallium-extra-hud=true -D gallium-vdpau=enabled - -D gallium-omx=bellagio -D gallium-va=enabled -D gallium-xa=enabled -D gallium-nine=true @@ -606,7 +738,6 @@ debian-clang-release: -D llvm=enabled -D microsoft-clc=disabled -D shared-llvm=enabled - -D opencl-spirv=true -D shared-glapi=disabled windows-msvc: @@ -614,7 +745,7 @@ windows-msvc: - .build-windows - .use-windows_build_msvc - .windows-build-rules - stage: build-misc + stage: build-for-tests script: - pwsh -ExecutionPolicy RemoteSigned .\.gitlab-ci\windows\mesa_build.ps1 artifacts: @@ -624,7 +755,11 @@ windows-msvc: debian-vulkan: extends: .meson-build + needs: + - !reference [.meson-build, needs] + - !reference [.build-for-tests-jobs] variables: + BUILD_JOB_TIMEOUT: 30m BUILDTYPE: debug LLVM_VERSION: 15 UNWIND: "disabled" @@ -639,9 +774,7 @@ debian-vulkan: -D platforms=x11,wayland -D osmesa=false GALLIUM_ST: > - -D dri3=enabled -D gallium-vdpau=disabled - -D gallium-omx=disabled -D gallium-va=disabled -D gallium-xa=disabled -D gallium-nine=false @@ -650,7 +783,7 @@ debian-vulkan: -D c_args=-fno-sanitize-recover=all -D cpp_args=-fno-sanitize-recover=all UBSAN_OPTIONS: "print_stacktrace=1" - VULKAN_DRIVERS: amd,broadcom,freedreno,intel,intel_hasvk,panfrost,virtio,imagination-experimental,microsoft-experimental,nouveau + VULKAN_DRIVERS: amd,asahi,broadcom,freedreno,intel,intel_hasvk,panfrost,virtio,imagination-experimental,microsoft-experimental,nouveau EXTRA_OPTION: > -D vulkan-layers=device-select,overlay -D build-aco-tests=true @@ -661,11 +794,14 @@ debian-x86_32: extends: - .meson-cross - .use-debian/x86_32_build + needs: + - !reference [.use-debian/x86_32_build, needs] + - !reference [.build-for-tests-jobs] variables: BUILDTYPE: debug CROSS: i386 - VULKAN_DRIVERS: intel,amd,swrast,virtio - GALLIUM_DRIVERS: "iris,nouveau,r300,r600,radeonsi,swrast,virgl,zink,crocus,d3d12" + VULKAN_DRIVERS: intel,amd,swrast,virtio,panfrost + GALLIUM_DRIVERS: "iris,nouveau,r300,r600,radeonsi,llvmpipe,softpipe,virgl,zink,crocus,d3d12,panfrost" LLVM_VERSION: 15 DRI_LOADERS: -D glvnd=disabled @@ -684,18 +820,24 @@ debian-x86_32: -D intel-clc=enabled -D install-intel-clc=true +# While s390 is dead, s390x is very much alive, and one of the last major +# big-endian platforms, so it provides useful coverage. +# In case of issues with this job, contact @ajax debian-s390x: extends: - - debian-ppc64el + - .meson-cross - .use-debian/s390x_build - - .s390x-rules + needs: + - !reference [.use-debian/s390x_build, needs] + - !reference [.build-for-tests-jobs] tags: - kvm variables: + BUILDTYPE: debug CROSS: s390x - GALLIUM_DRIVERS: "swrast,zink" + GALLIUM_DRIVERS: "llvmpipe,virgl,zink" LLVM_VERSION: 15 - VULKAN_DRIVERS: "swrast" + VULKAN_DRIVERS: "swrast,virtio" DRI_LOADERS: -D glvnd=disabled @@ -703,11 +845,29 @@ debian-ppc64el: extends: - .meson-cross - .use-debian/ppc64el_build - - .ppc64el-rules + needs: + - !reference [.use-debian/ppc64el_build, needs] + - !reference [.build-for-tests-jobs] variables: BUILDTYPE: debug CROSS: ppc64el - GALLIUM_DRIVERS: "nouveau,radeonsi,swrast,virgl,zink" + GALLIUM_DRIVERS: "nouveau,radeonsi,llvmpipe,softpipe,virgl,zink" VULKAN_DRIVERS: "amd,swrast" DRI_LOADERS: -D glvnd=disabled + +# This job emits our scripts into artifacts so they can be reused for +# job submission to hardware devices. +python-artifacts: + stage: build-for-tests + extends: + - .use-debian/x86_64_pyutils + - .build-common + variables: + GIT_STRATEGY: fetch + S3_ARTIFACT_NAME: mesa-python-ci-artifacts + timeout: 10m + script: + - .gitlab-ci/prepare-artifacts-python.sh + tags: + - placeholder-job diff --git a/mesalib/.gitlab-ci/common/capture-devcoredump.sh b/mesalib/.gitlab-ci/common/capture-devcoredump.sh index 302b9208ba..f5f615d69e 100644 --- a/mesalib/.gitlab-ci/common/capture-devcoredump.sh +++ b/mesalib/.gitlab-ci/common/capture-devcoredump.sh @@ -7,7 +7,7 @@ while true; do devcds=$(find /sys/devices/virtual/devcoredump/ -name data 2>/dev/null) for i in $devcds; do echo "Found a devcoredump at $i." - if cp $i /results/first.devcore; then + if cp $i $RESULTS_DIR/first.devcore; then echo 1 > $i echo "Saved to the job artifacts at /first.devcore" exit 0 @@ -23,7 +23,7 @@ while true; do rm "$tmpfile" else echo "Found an i915 error state at $i size=$filesize." - if cp "$tmpfile" /results/first.i915_error_state; then + if cp "$tmpfile" $RESULTS_DIR/first.i915_error_state; then rm "$tmpfile" echo 1 > "$i" echo "Saved to the job artifacts at /first.i915_error_state" diff --git a/mesalib/.gitlab-ci/common/generate-env.sh b/mesalib/.gitlab-ci/common/generate-env.sh index e461379f78..428e7fa6f8 100644 --- a/mesalib/.gitlab-ci/common/generate-env.sh +++ b/mesalib/.gitlab-ci/common/generate-env.sh @@ -31,16 +31,11 @@ VARS=( CROSVM_GPU_ARGS CURRENT_SECTION DEQP_BIN_DIR - DEQP_CONFIG - DEQP_EXPECTED_RENDERER + DEQP_FORCE_ASAN DEQP_FRACTION - DEQP_HEIGHT - DEQP_RESULTS_DIR - DEQP_RUNNER_OPTIONS + DEQP_RUNNER_MAX_FAILS DEQP_SUITE DEQP_TEMP_DIR - DEQP_VER - DEQP_WIDTH DEVICE_NAME DRIVER_NAME EGL_PLATFORM @@ -56,7 +51,6 @@ VARS=( GTEST GTEST_FAILS GTEST_FRACTION - GTEST_RESULTS_DIR GTEST_RUNNER_OPTIONS GTEST_SKIPS HWCI_FREQ_MAX @@ -64,6 +58,7 @@ VARS=( HWCI_KVM HWCI_START_WESTON HWCI_START_XORG + HWCI_TEST_ARGS HWCI_TEST_SCRIPT IR3_SHADER_DEBUG JOB_ARTIFACTS_BASE @@ -84,6 +79,7 @@ VARS=( MESA_IMAGE_PATH MESA_IMAGE_TAG MESA_LOADER_DRIVER_OVERRIDE + MESA_SPIRV_LOG_LEVEL MESA_TEMPLATES_COMMIT MESA_VK_ABORT_ON_DEVICE_LOSS MESA_VK_IGNORE_CONFORMANCE_WARNING @@ -106,6 +102,7 @@ VARS=( PIGLIT_REPLAY_REFERENCE_IMAGES_BASE PIGLIT_REPLAY_SUBCOMMAND PIGLIT_RESULTS + PIGLIT_RUNNER_OPTIONS PIGLIT_TESTS PIGLIT_TRACES_FILE PIPELINE_ARTIFACTS_BASE @@ -120,9 +117,6 @@ VARS=( VIRGL_RENDER_SERVER WAFFLE_PLATFORM VK_DRIVER - VKD3D_PROTON_RESULTS - VKD3D_CONFIG - VKD3D_TEST_EXCLUDE ZINK_DESCRIPTORS ZINK_DEBUG LVP_POISON_MEMORY diff --git a/mesalib/.gitlab-ci/common/init-stage2.sh b/mesalib/.gitlab-ci/common/init-stage2.sh index 1c280a13fc..7043fa3166 100644 --- a/mesalib/.gitlab-ci/common/init-stage2.sh +++ b/mesalib/.gitlab-ci/common/init-stage2.sh @@ -47,6 +47,13 @@ for path in '/dut-env-vars.sh' '/set-job-env-vars.sh' './set-job-env-vars.sh'; d done . "$SCRIPTS_DIR"/setup-test-env.sh +# Flush out anything which might be stuck in a serial buffer +echo +echo +echo + +section_switch init_stage2 "Pre-testing hardware setup" + set -ex # Set up any devices required by the jobs @@ -133,13 +140,14 @@ if [ "$HWCI_FREQ_MAX" = "true" ]; then # and enable throttling detection & reporting. # Additionally, set the upper limit for CPU scaling frequency to 65% of the # maximum permitted, as an additional measure to mitigate thermal throttling. - /intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d + /install/common/intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d fi # Start a little daemon to capture sysfs records and produce a JSON file -if [ -x /kdl.sh ]; then +KDL_PATH=/install/common/kdl.sh +if [ -x "$KDL_PATH" ]; then echo "launch kdl.sh!" - /kdl.sh & + $KDL_PATH & BACKGROUND_PIDS="$! $BACKGROUND_PIDS" else echo "kdl.sh not found!" @@ -153,8 +161,9 @@ fi # Start a little daemon to capture the first devcoredump we encounter. (They # expire after 5 minutes, so we poll for them). -if [ -x /capture-devcoredump.sh ]; then - /capture-devcoredump.sh & +CAPTURE_DEVCOREDUMP=/install/common/capture-devcoredump.sh +if [ -x "$CAPTURE_DEVCOREDUMP" ]; then + $CAPTURE_DEVCOREDUMP & BACKGROUND_PIDS="$! $BACKGROUND_PIDS" fi @@ -168,7 +177,7 @@ export VK_DRIVER_FILES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$ARCH.json" if [ -n "$HWCI_START_XORG" ]; then echo "touch /xorg-started; sleep 100000" > /xorg-script env \ - xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log & + xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile "$RESULTS_DIR/Xorg.0.log" & BACKGROUND_PIDS="$! $BACKGROUND_PIDS" # Wait for xorg to be ready for connections. @@ -200,15 +209,18 @@ if [ -n "$HWCI_START_WESTON" ]; then while [ ! -S "$WESTON_X11_SOCK" ]; do sleep 1; done fi +set +x + +section_end init_stage2 + +echo "Running ${HWCI_TEST_SCRIPT} ${HWCI_TEST_ARGS} ..." + set +e -bash -c ". $SCRIPTS_DIR/setup-test-env.sh && $HWCI_TEST_SCRIPT" -EXIT_CODE=$? +$HWCI_TEST_SCRIPT ${HWCI_TEST_ARGS:-}; EXIT_CODE=$? set -e -# Let's make sure the results are always stored in current working directory -mv -f ${CI_PROJECT_DIR}/results ./ 2>/dev/null || true - -[ ${EXIT_CODE} -ne 0 ] || rm -rf results/trace/"$PIGLIT_REPLAY_DEVICE_NAME" +section_start post_test_cleanup "Cleaning up after testing, uploading results" +set -x # Make sure that capture-devcoredump is done before we start trying to tar up # artifacts -- if it's writing while tar is reading, tar will throw an error and @@ -226,11 +238,12 @@ fi [ ${EXIT_CODE} -eq 0 ] && RESULT=pass || RESULT=fail set +x +section_end post_test_cleanup # Print the final result; both bare-metal and LAVA look for this string to get # the result of our run, so try really hard to get it out rather than losing # the run. The device gets shut down right at this point, and a630 seems to # enjoy corrupting the last line of serial output before shutdown. -for _ in $(seq 0 3); do echo "hwci: mesa: $RESULT"; sleep 1; echo; done +for _ in $(seq 0 3); do echo "hwci: mesa: $RESULT, exit_code: $EXIT_CODE"; sleep 1; echo; done exit $EXIT_CODE diff --git a/mesalib/.gitlab-ci/common/intel-gpu-freq.sh b/mesalib/.gitlab-ci/common/intel-gpu-freq.sh index 8d0166eac4..78d572c37c 100644 --- a/mesalib/.gitlab-ci/common/intel-gpu-freq.sh +++ b/mesalib/.gitlab-ci/common/intel-gpu-freq.sh @@ -560,7 +560,8 @@ set_cpu_freq_max() { read_cpu_freq_info ${cpu_index} n ${CAP_CPU_FREQ_INFO} || { res=$?; continue; } target_freq=$(compute_cpu_freq_set "${CPU_SET_MAX_FREQ}") - [ -z "${target_freq}" ] && { res=$?; continue; } + tf_res=$? + [ -z "${target_freq}" ] && { res=$tf_res; continue; } log INFO "Setting CPU%s max scaling freq to %s Hz" ${cpu_index} "${target_freq}" [ -n "${DRY_RUN}" ] && continue diff --git a/mesalib/.gitlab-ci/common/kdl.sh b/mesalib/.gitlab-ci/common/kdl.sh index 4e8a8d5d3f..0459d8e76d 100644 --- a/mesalib/.gitlab-ci/common/kdl.sh +++ b/mesalib/.gitlab-ci/common/kdl.sh @@ -1,24 +1,18 @@ #!/usr/bin/env bash # shellcheck disable=SC1091 # the path is created in build-kdl and # here is check if exist +# shellcheck disable=SC2086 # we want the arguments to be expanded -terminate() { - echo "ci-kdl.sh caught SIGTERM signal! propagating to child processes" - for job in $(jobs -p) - do - kill -15 "$job" - done -} - -trap terminate SIGTERM - -if [ -f /ci-kdl.venv/bin/activate ]; then - source /ci-kdl.venv/bin/activate - /ci-kdl.venv/bin/python /ci-kdl.venv/bin/ci-kdl | tee -a /results/kdl.log & - child=$! - wait $child - mv kdl_*.json /results/kdl.json -else - echo -e "Not possible to activate ci-kdl virtual environment" +if ! [ -f /ci-kdl/bin/activate ]; then + echo -e "ci-kdl not installed; not monitoring temperature" + exit 0 fi +KDL_ARGS=" + --output-file=${RESULTS_DIR}/kdl.json + --log-level=WARNING + --num-samples=-1 +" + +source /ci-kdl/bin/activate +exec /ci-kdl/bin/ci-kdl ${KDL_ARGS} diff --git a/mesalib/.gitlab-ci/common/start-x.sh b/mesalib/.gitlab-ci/common/start-x.sh deleted file mode 100644 index ccd132358c..0000000000 --- a/mesalib/.gitlab-ci/common/start-x.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -set -ex - -_XORG_SCRIPT="/xorg-script" -_FLAG_FILE="/xorg-started" - -echo "touch ${_FLAG_FILE}; sleep 100000" > "${_XORG_SCRIPT}" -if [ "x$1" != "x" ]; then - export LD_LIBRARY_PATH="${1}/lib" - export LIBGL_DRIVERS_PATH="${1}/lib/dri" -fi -xinit /bin/sh "${_XORG_SCRIPT}" -- /usr/bin/Xorg vt45 -noreset -s 0 -dpms -logfile /Xorg.0.log & - -# Wait for xorg to be ready for connections. -for _ in 1 2 3 4 5; do - if [ -e "${_FLAG_FILE}" ]; then - break - fi - sleep 5 -done diff --git a/mesalib/.gitlab-ci/container/alpine/x86_64_build.sh b/mesalib/.gitlab-ci/container/alpine/x86_64_build.sh index ea3c5aeea1..f8e5b2c24b 100644 --- a/mesalib/.gitlab-ci/container/alpine/x86_64_build.sh +++ b/mesalib/.gitlab-ci/container/alpine/x86_64_build.sh @@ -6,9 +6,10 @@ # ALPINE_X86_64_BUILD_TAG set -e -set -o xtrace -export LLVM_VERSION="${LLVM_VERSION:=16}" +. .gitlab-ci/setup-test-env.sh + +set -o xtrace EPHEMERAL=( ) @@ -18,7 +19,7 @@ DEPS=( bash bison ccache - clang16-dev + "clang${LLVM_VERSION}-dev" cmake clang-dev coreutils @@ -29,25 +30,31 @@ DEPS=( git gettext glslang + graphviz linux-headers - llvm16-static - llvm16-dev + "llvm${LLVM_VERSION}-static" + "llvm${LLVM_VERSION}-dev" meson mold + musl-dev expat-dev elfutils-dev + libclc-dev libdrm-dev - libselinux-dev libva-dev libpciaccess-dev zlib-dev python3-dev + py3-clang py3-cparser py3-mako py3-packaging + py3-pip py3-ply + py3-yaml vulkan-headers spirv-tools-dev + spirv-llvm-translator-dev util-macros wayland-dev wayland-protocols @@ -55,15 +62,20 @@ DEPS=( apk --no-cache add "${DEPS[@]}" "${EPHEMERAL[@]}" -. .gitlab-ci/container/build-llvm-spirv.sh - -. .gitlab-ci/container/build-libclc.sh +pip3 install --break-system-packages sphinx===5.1.1 hawkmoth===0.16.0 . .gitlab-ci/container/container_pre_build.sh ############### Uninstall the build software +# too many vendor binarise, just keep the ones we need +find /usr/share/clc \ + \( -type f -o -type l \) \ + ! -name 'spirv-mesa3d-.spv' \ + ! -name 'spirv64-mesa3d-.spv' \ + -delete + apk del "${EPHEMERAL[@]}" . .gitlab-ci/container/container_post_build.sh diff --git a/mesalib/.gitlab-ci/container/alpine/x86_64_lava_ssh_client.sh b/mesalib/.gitlab-ci/container/alpine/x86_64_lava_ssh_client.sh index 585e30323c..379fcf6550 100644 --- a/mesalib/.gitlab-ci/container/alpine/x86_64_lava_ssh_client.sh +++ b/mesalib/.gitlab-ci/container/alpine/x86_64_lava_ssh_client.sh @@ -4,6 +4,9 @@ # shellcheck disable=SC1091 set -e + +. .gitlab-ci/setup-test-env.sh + set -o xtrace EPHEMERAL=( diff --git a/mesalib/.gitlab-ci/container/baremetal_build.sh b/mesalib/.gitlab-ci/container/baremetal_build.sh index fcd13de3e5..a10a86668f 100644 --- a/mesalib/.gitlab-ci/container/baremetal_build.sh +++ b/mesalib/.gitlab-ci/container/baremetal_build.sh @@ -31,7 +31,7 @@ if [[ $arch == "arm64" ]]; then -O "${KERNEL_IMAGE_BASE}"/arm64/cheza-kernel DEVICE_TREES="" - DEVICE_TREES="$DEVICE_TREES apq8016-sbc.dtb" + DEVICE_TREES="$DEVICE_TREES apq8016-sbc-usb-host.dtb" DEVICE_TREES="$DEVICE_TREES apq8096-db820c.dtb" DEVICE_TREES="$DEVICE_TREES tegra210-p3450-0000.dtb" DEVICE_TREES="$DEVICE_TREES imx8mq-nitrogen.dtb" diff --git a/mesalib/.gitlab-ci/container/build-android-x86_64-llvm.sh b/mesalib/.gitlab-ci/container/build-android-x86_64-llvm.sh new file mode 100644 index 0000000000..a7e015e3ac --- /dev/null +++ b/mesalib/.gitlab-ci/container/build-android-x86_64-llvm.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +set -exu + +# If CI vars are not set, assign an empty value, this prevents -u to fail +: "${CI:=}" +: "${CI_PROJECT_PATH:=}" + +# Early check for required env variables, relies on `set -u` +: "$ANDROID_SDK_VERSION" +: "$ANDROID_NDK" +: "$ANDROID_LLVM_VERSION" +: "$ANDROID_LLVM_ARTIFACT_NAME" +: "$S3_JWT_FILE" +: "$S3_HOST" +: "$S3_ANDROID_BUCKET" + +# Check for CI if the auth file used later on is non-empty +if [ -n "$CI" ] && [ ! -s "${S3_JWT_FILE}" ]; then + echo "Error: ${S3_JWT_FILE} is empty." 1>&2 + exit 1 +fi + +if curl -s -o /dev/null -I -L -f --retry 4 --retry-delay 15 "https://${S3_HOST}/${S3_ANDROID_BUCKET}/${CI_PROJECT_PATH}/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"; then + echo "Artifact ${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst already exists, skip re-building." + + # Download prebuilt LLVM libraries for Android when they have not changed, + # to save some time + curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ + -o "/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" "https://${S3_HOST}/${S3_ANDROID_BUCKET}/${CI_PROJECT_PATH}/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" + tar -C / --zstd -xf "/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" + rm "/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" + + exit +fi + +# Install some dependencies needed to build LLVM +EPHEMERAL=( + ninja-build + unzip +) + +apt-get update +apt-get install -y --no-install-recommends --no-remove "${EPHEMERAL[@]}" + +ANDROID_NDK_ROOT="/${ANDROID_NDK}" +if [ ! -d "$ANDROID_NDK_ROOT" ]; +then + curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ + -o "${ANDROID_NDK}.zip" \ + "https://dl.google.com/android/repository/${ANDROID_NDK}-linux.zip" + unzip -d / "${ANDROID_NDK}.zip" "$ANDROID_NDK/source.properties" "$ANDROID_NDK/build/cmake/*" "$ANDROID_NDK/toolchains/llvm/*" + rm "${ANDROID_NDK}.zip" +fi + +if [ ! -d "/llvm-project" ]; +then + mkdir "/llvm-project" + pushd "/llvm-project" + git init + git remote add origin https://github.com/llvm/llvm-project.git + git fetch --depth 1 origin "$ANDROID_LLVM_VERSION" + git checkout FETCH_HEAD + popd +fi + +pushd "/llvm-project" + +# Checkout again the intended version, just in case of a pre-existing full clone +git checkout "$ANDROID_LLVM_VERSION" || true + +LLVM_INSTALL_PREFIX="/${ANDROID_LLVM_ARTIFACT_NAME}" + +rm -rf build/ +cmake -GNinja -S llvm -B build/ \ + -DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake" \ + -DANDROID_ABI=x86_64 \ + -DANDROID_PLATFORM="android-${ANDROID_SDK_VERSION}" \ + -DANDROID_NDK="${ANDROID_NDK_ROOT}" \ + -DCMAKE_ANDROID_ARCH_ABI=x86_64 \ + -DCMAKE_ANDROID_NDK="${ANDROID_NDK_ROOT}" \ + -DCMAKE_BUILD_TYPE=MinSizeRel \ + -DCMAKE_SYSTEM_NAME=Android \ + -DCMAKE_SYSTEM_VERSION="${ANDROID_SDK_VERSION}" \ + -DCMAKE_INSTALL_PREFIX="${LLVM_INSTALL_PREFIX}" \ + -DCMAKE_CXX_FLAGS="-march=x86-64 --target=x86_64-linux-android${ANDROID_SDK_VERSION} -fno-rtti" \ + -DLLVM_HOST_TRIPLE="x86_64-linux-android${ANDROID_SDK_VERSION}" \ + -DLLVM_TARGETS_TO_BUILD=X86 \ + -DLLVM_BUILD_LLVM_DYLIB=OFF \ + -DLLVM_BUILD_TESTS=OFF \ + -DLLVM_BUILD_EXAMPLES=OFF \ + -DLLVM_BUILD_DOCS=OFF \ + -DLLVM_BUILD_TOOLS=OFF \ + -DLLVM_ENABLE_RTTI=OFF \ + -DLLVM_BUILD_INSTRUMENTED_COVERAGE=OFF \ + -DLLVM_NATIVE_TOOL_DIR="${ANDROID_NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin" \ + -DLLVM_ENABLE_PIC=False \ + -DLLVM_OPTIMIZED_TABLEGEN=ON + +ninja "-j${FDO_CI_CONCURRENT:-4}" -C build/ install + +popd + +rm -rf /llvm-project + +tar --zstd -cf "${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" "$LLVM_INSTALL_PREFIX" + +# If run in CI upload the tar.zst archive to S3 to avoid rebuilding it if the +# version does not change, and delete it. +# The file is not deleted for non-CI because it can be useful in local runs. +if [ -n "$CI" ]; then + ci-fairy s3cp --token-file "${S3_JWT_FILE}" "${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" "https://${S3_HOST}/${S3_ANDROID_BUCKET}/${CI_PROJECT_PATH}/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" + rm "${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" +fi + +rm -rf "$LLVM_INSTALL_PREFIX" + +apt-get purge -y "${EPHEMERAL[@]}" diff --git a/mesalib/.gitlab-ci/container/build-angle.sh b/mesalib/.gitlab-ci/container/build-angle.sh index 8ab65006f5..b47637c6c9 100644 --- a/mesalib/.gitlab-ci/container/build-angle.sh +++ b/mesalib/.gitlab-ci/container/build-angle.sh @@ -4,42 +4,80 @@ # .gitlab-ci/image-tags.yml tags: # KERNEL_ROOTFS_TAG -set -ex +set -uex -ANGLE_REV="1409a05a81e3ccb279142433a2b987bc330f555b" +uncollapsed_section_start angle "Building angle" + +ANGLE_REV="76025caa1a059f464a2b0e8f879dbd4746f092b9" +SCRIPTS_DIR="$(pwd)/.gitlab-ci" +ANGLE_PATCH_DIR="${SCRIPTS_DIR}/container/patches" # DEPOT tools -git clone --depth 1 https://chromium.googlesource.com/chromium/tools/depot_tools.git -PWD=$(pwd) -export PATH=$PWD/depot_tools:$PATH +git clone --depth 1 https://chromium.googlesource.com/chromium/tools/depot_tools.git /depot-tools +export PATH=/depot-tools:$PATH export DEPOT_TOOLS_UPDATE=0 mkdir /angle-build +mkdir /angle pushd /angle-build git init git remote add origin https://chromium.googlesource.com/angle/angle.git git fetch --depth 1 origin "$ANGLE_REV" git checkout FETCH_HEAD -# source preparation -python3 scripts/bootstrap.py -mkdir -p build/config -gclient sync +angle_patch_files=( + build-angle_deps_Make-more-sources-conditional.patch +) +for patch in "${angle_patch_files[@]}"; do + echo "Apply patch to ANGLE from ${patch}" + GIT_COMMITTER_DATE="$(LC_TIME=C date -d@0)" git am < "${ANGLE_PATCH_DIR}/${patch}" +done -sed -i "/catapult/d" testing/BUILD.gn +{ + echo "ANGLE base version $ANGLE_REV" + echo "The following local patches are applied on top:" + git log --reverse --oneline $ANGLE_REV.. --format='- %s' +} > /angle/version + +# source preparation +gclient config --name REPLACE-WITH-A-DOT --unmanaged \ + --custom-var='angle_enable_cl=False' \ + --custom-var='angle_enable_cl_testing=False' \ + --custom-var='angle_enable_vulkan_validation_layers=False' \ + --custom-var='angle_enable_wgpu=False' \ + --custom-var='build_allow_regenerate=False' \ + --custom-var='build_angle_deqp_tests=False' \ + --custom-var='build_angle_perftests=False' \ + --custom-var='build_with_catapult=False' \ + --custom-var='build_with_swiftshader=False' \ + https://chromium.googlesource.com/angle/angle.git +sed -e 's/REPLACE-WITH-A-DOT/./;' -i .gclient +gclient sync -j"${FDO_CI_CONCURRENT:-4}" mkdir -p out/Release echo ' -is_debug = false -angle_enable_swiftshader = false -angle_enable_null = false -angle_enable_gl = false -angle_enable_vulkan = true -angle_has_histograms = false -build_angle_trace_perf_tests = false -build_angle_deqp_tests = false -angle_use_custom_libvulkan = false +angle_build_all=false +angle_build_tests=false +angle_enable_cl=false +angle_enable_cl_testing=false +angle_enable_gl=false +angle_enable_gl_desktop_backend=false +angle_enable_null=false +angle_enable_swiftshader=false +angle_enable_trace=false +angle_enable_wgpu=false +angle_enable_vulkan=true +angle_enable_vulkan_api_dump_layer=false +angle_enable_vulkan_validation_layers=false +angle_has_frame_capture=false +angle_has_histograms=false +angle_use_custom_libvulkan=false +angle_egl_extension="so.1" +angle_glesv2_extension="so.2" +build_angle_deqp_tests=false dcheck_always_on=true +enable_expensive_dchecks=false +is_debug=false ' > out/Release/args.gn if [[ "$DEBIAN_ARCH" = "arm64" ]]; then @@ -49,14 +87,17 @@ fi gn gen out/Release # depot_tools overrides ninja with a version that doesn't work. We want # ninja with FDO_CI_CONCURRENT anyway. -/usr/local/bin/ninja -C out/Release/ +/usr/local/bin/ninja -C out/Release/ libEGL libGLESv2 -mkdir /angle -cp out/Release/lib*GL*.so /angle/ -ln -s libEGL.so /angle/libEGL.so.1 -ln -s libGLESv2.so /angle/libGLESv2.so.2 +rm -f out/Release/libvulkan.so* out/Release/*.so.TOC +cp out/Release/lib*.so* /angle/ +ln -s libEGL.so.1 /angle/libEGL.so +ln -s libGLESv2.so.2 /angle/libGLESv2.so rm -rf out popd -rm -rf ./depot_tools +rm -rf /depot-tools +rm -rf /angle-build + +section_end angle diff --git a/mesalib/.gitlab-ci/container/build-apitrace.sh b/mesalib/.gitlab-ci/container/build-apitrace.sh index 0697c3cb7f..903fb73472 100644 --- a/mesalib/.gitlab-ci/container/build-apitrace.sh +++ b/mesalib/.gitlab-ci/container/build-apitrace.sh @@ -3,11 +3,13 @@ # When changing this file, you need to bump the following # .gitlab-ci/image-tags.yml tags: -# DEBIAN_X86_64_TEST_GL_TAG -# DEBIAN_X86_64_TEST_VK_TAG +# DEBIAN_TEST_GL_TAG +# DEBIAN_TEST_VK_TAG # KERNEL_ROOTFS_TAG -set -ex +set -uex + +uncollapsed_section_start apitrace "Building apitrace" APITRACE_VERSION="0a6506433e1f9f7b69757b4e5730326970c4321a" @@ -15,7 +17,7 @@ git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout pushd /apitrace git checkout "$APITRACE_VERSION" git submodule update --init --depth 1 --recursive -cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on $EXTRA_CMAKE_ARGS +cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on ${EXTRA_CMAKE_ARGS:-} cmake --build _build --parallel --target apitrace eglretrace mkdir build cp _build/apitrace build @@ -23,3 +25,5 @@ cp _build/eglretrace build ${STRIP_CMD:-strip} build/* find . -not -path './build' -not -path './build/*' -delete popd + +section_end apitrace diff --git a/mesalib/.gitlab-ci/container/build-bindgen.sh b/mesalib/.gitlab-ci/container/build-bindgen.sh new file mode 100644 index 0000000000..9ac30eeaf0 --- /dev/null +++ b/mesalib/.gitlab-ci/container/build-bindgen.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2086 # we want word splitting + +uncollapsed_section_start bindgen "Building bindgen" + +BINDGEN_VER=0.65.1 +CBINDGEN_VER=0.26.0 + +# bindgen +RUSTFLAGS='-L native=/usr/local/lib' cargo install \ + bindgen-cli --version ${BINDGEN_VER} \ + --locked \ + -j ${FDO_CI_CONCURRENT:-4} \ + --root /usr/local + +# cbindgen +RUSTFLAGS='-L native=/usr/local/lib' cargo install \ + cbindgen --version ${CBINDGEN_VER} \ + --locked \ + -j ${FDO_CI_CONCURRENT:-4} \ + --root /usr/local + +section_end bindgen diff --git a/mesalib/.gitlab-ci/container/build-crosvm.sh b/mesalib/.gitlab-ci/container/build-crosvm.sh index f199529845..9adfcdaaa3 100644 --- a/mesalib/.gitlab-ci/container/build-crosvm.sh +++ b/mesalib/.gitlab-ci/container/build-crosvm.sh @@ -1,23 +1,32 @@ #!/usr/bin/env bash # shellcheck disable=SC2086 # we want word splitting -set -ex +# When changing this file, you need to bump the following +# .gitlab-ci/image-tags.yml tags: +# DEBIAN_BASE_TAG +# DEBIAN_TEST_GL_TAG +# DEBIAN_TEST_VK_TAG +# KERNEL_ROOTFS_TAG + +set -uex + +uncollapsed_section_start crosvm "Building crosvm" git config --global user.email "mesa@example.com" git config --global user.name "Mesa CI" -CROSVM_VERSION=1641c55bcc922588e24de73e9cca7b5e4005bd6d +CROSVM_VERSION=2118fbb57ca26b495a9aa407845c7729d697a24b git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/crosvm/crosvm /platform/crosvm pushd /platform/crosvm git checkout "$CROSVM_VERSION" git submodule update --init -VIRGLRENDERER_VERSION=d9c002fac153b834a2c17731f2b85c36e333e102 +VIRGLRENDERER_VERSION=57a2b82e0958f08d02ade8400786e1ca0935c9b1 rm -rf third_party/virglrenderer git clone --single-branch -b main --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer pushd third_party/virglrenderer git checkout "$VIRGLRENDERER_VERSION" -meson setup build/ -D libdir=lib -D render-server-worker=process -D venus=true $EXTRA_MESON_ARGS +meson setup build/ -D libdir=lib -D render-server-worker=process -D venus=true ${EXTRA_MESON_ARGS:-} meson install -C build popd @@ -29,7 +38,7 @@ RUSTFLAGS='-L native=/usr/local/lib' cargo install \ -j ${FDO_CI_CONCURRENT:-4} \ --root /usr/local \ --version 0.65.1 \ - $EXTRA_CARGO_ARGS + ${EXTRA_CARGO_ARGS:-} CROSVM_USE_SYSTEM_MINIGBM=1 CROSVM_USE_SYSTEM_VIRGLRENDERER=1 RUSTFLAGS='-L native=/usr/local/lib' cargo install \ -j ${FDO_CI_CONCURRENT:-4} \ @@ -37,8 +46,10 @@ CROSVM_USE_SYSTEM_MINIGBM=1 CROSVM_USE_SYSTEM_VIRGLRENDERER=1 RUSTFLAGS='-L nati --features 'default-no-sandbox gpu x virgl_renderer' \ --path . \ --root /usr/local \ - $EXTRA_CARGO_ARGS + ${EXTRA_CARGO_ARGS:-} popd rm -rf /platform/crosvm + +section_end crosvm diff --git a/mesalib/.gitlab-ci/container/build-deqp-runner.sh b/mesalib/.gitlab-ci/container/build-deqp-runner.sh index b35454c477..e6f764949a 100644 --- a/mesalib/.gitlab-ci/container/build-deqp-runner.sh +++ b/mesalib/.gitlab-ci/container/build-deqp-runner.sh @@ -3,47 +3,75 @@ # When changing this file, you need to bump the following # .gitlab-ci/image-tags.yml tags: -# DEBIAN_X86_64_TEST_ANDROID_TAG -# DEBIAN_X86_64_TEST_GL_TAG -# DEBIAN_X86_64_TEST_VK_TAG +# DEBIAN_TEST_ANDROID_TAG +# DEBIAN_BASE_TAG # KERNEL_ROOTFS_TAG -set -ex +set -uex -DEQP_RUNNER_VERSION=0.18.0 +uncollapsed_section_start deqp-runner "Building deqp-runner" -DEQP_RUNNER_GIT_URL="${DEQP_RUNNER_GIT_URL:-https://gitlab.freedesktop.org/mesa/deqp-runner.git}" +DEQP_RUNNER_VERSION=0.20.2 -if [ -n "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then - # Build and install from source - DEQP_RUNNER_CARGO_ARGS="--git $DEQP_RUNNER_GIT_URL" +commits_to_backport=( +) - if [ -n "${DEQP_RUNNER_GIT_TAG}" ]; then - DEQP_RUNNER_CARGO_ARGS="--tag ${DEQP_RUNNER_GIT_TAG} ${DEQP_RUNNER_CARGO_ARGS}" - DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_TAG" - else - DEQP_RUNNER_CARGO_ARGS="--rev ${DEQP_RUNNER_GIT_REV} ${DEQP_RUNNER_CARGO_ARGS}" - DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_REV" - fi +patch_files=( +) + +DEQP_RUNNER_GIT_URL="${DEQP_RUNNER_GIT_URL:-https://gitlab.freedesktop.org/mesa/deqp-runner.git}" - DEQP_RUNNER_CARGO_ARGS="${DEQP_RUNNER_CARGO_ARGS} ${EXTRA_CARGO_ARGS}" +if [ -n "${DEQP_RUNNER_GIT_TAG:-}" ]; then + DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_TAG" +elif [ -n "${DEQP_RUNNER_GIT_REV:-}" ]; then + DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_REV" else - # Install from package registry - DEQP_RUNNER_CARGO_ARGS="--version ${DEQP_RUNNER_VERSION} ${EXTRA_CARGO_ARGS} -- deqp-runner" DEQP_RUNNER_GIT_CHECKOUT="v$DEQP_RUNNER_VERSION" fi +BASE_PWD=$PWD + +mkdir -p /deqp-runner +pushd /deqp-runner +mkdir deqp-runner-git +pushd deqp-runner-git +git init +git remote add origin "$DEQP_RUNNER_GIT_URL" +git fetch --depth 1 origin "$DEQP_RUNNER_GIT_CHECKOUT" +git checkout FETCH_HEAD + +for commit in "${commits_to_backport[@]}" +do + PATCH_URL="https://gitlab.freedesktop.org/mesa/deqp-runner/-/commit/$commit.patch" + echo "Backport deqp-runner commit $commit from $PATCH_URL" + curl -L --retry 4 -f --retry-all-errors --retry-delay 60 $PATCH_URL | git am +done + +for patch in "${patch_files[@]}" +do + echo "Apply patch to deqp-runner from $patch" + git am "$BASE_PWD/.gitlab-ci/container/patches/$patch" +done + +if [ -z "${RUST_TARGET:-}" ]; then + RUST_TARGET="" +fi + if [[ "$RUST_TARGET" != *-android ]]; then + # When CC (/usr/lib/ccache/gcc) variable is set, the rust compiler uses + # this variable when cross-compiling arm32 and build fails for zsys-sys. + # So unset the CC variable when cross-compiling for arm32. + SAVEDCC=${CC:-} + if [ "$RUST_TARGET" = "armv7-unknown-linux-gnueabihf" ]; then + unset CC + fi cargo install --locked \ -j ${FDO_CI_CONCURRENT:-4} \ --root /usr/local \ - ${DEQP_RUNNER_CARGO_ARGS} + ${EXTRA_CARGO_ARGS:-} \ + --path . + CC=$SAVEDCC else - mkdir -p /deqp-runner - pushd /deqp-runner - git clone --branch "$DEQP_RUNNER_GIT_CHECKOUT" --depth 1 "$DEQP_RUNNER_GIT_URL" deqp-runner-git - pushd deqp-runner-git - cargo install --locked \ -j ${FDO_CI_CONCURRENT:-4} \ --root /usr/local --version 2.10.0 \ @@ -57,14 +85,16 @@ else cargo uninstall --locked \ --root /usr/local \ cargo-ndk - - popd - rm -rf deqp-runner-git - popd fi +popd +rm -rf deqp-runner-git +popd + # remove unused test runners to shrink images for the Mesa CI build (not kernel, # which chooses its own deqp branch) -if [ -z "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then +if [ -z "${DEQP_RUNNER_GIT_TAG:-}${DEQP_RUNNER_GIT_REV:-}" ]; then rm -f /usr/local/bin/igt-runner fi + +section_end deqp-runner diff --git a/mesalib/.gitlab-ci/container/build-deqp.sh b/mesalib/.gitlab-ci/container/build-deqp.sh index 79e7e5f702..1ff4fedabf 100644 --- a/mesalib/.gitlab-ci/container/build-deqp.sh +++ b/mesalib/.gitlab-ci/container/build-deqp.sh @@ -3,22 +3,29 @@ # When changing this file, you need to bump the following # .gitlab-ci/image-tags.yml tags: -# DEBIAN_X86_64_TEST_ANDROID_TAG -# DEBIAN_X86_64_TEST_GL_TAG -# DEBIAN_X86_64_TEST_VK_TAG +# DEBIAN_TEST_ANDROID_TAG +# DEBIAN_TEST_GL_TAG +# DEBIAN_TEST_VK_TAG # KERNEL_ROOTFS_TAG -set -ex -o pipefail +set -uex -o pipefail + +# shellcheck disable=SC2153 +deqp_api=${DEQP_API,,} + +uncollapsed_section_start deqp-$deqp_api "Building dEQP $DEQP_API" # See `deqp_build_targets` below for which release is used to produce which # binary. Unless this comment has bitrotten: +# - the commit from the main branch produces the deqp tools and `deqp-vk`, # - the VK release produces `deqp-vk`, # - the GL release produces `glcts`, and # - the GLES release produces `deqp-gles*` and `deqp-egl` -DEQP_VK_VERSION=1.3.8.2 -DEQP_GL_VERSION=4.6.4.0 -DEQP_GLES_VERSION=3.2.10.0 +DEQP_MAIN_COMMIT=a9f7069b9a5ba94715a175cb1818ed504add0107 +DEQP_VK_VERSION=1.3.10.0 +DEQP_GL_VERSION=4.6.5.0 +DEQP_GLES_VERSION=3.2.11.0 # Patches to VulkanCTS may come from commits in their repo (listed in # cts_commits_to_backport) or patch files stored in our repo (in the patch @@ -27,30 +34,35 @@ DEQP_GLES_VERSION=3.2.10.0 # patches. # shellcheck disable=SC2034 -vk_cts_commits_to_backport=( - # Fix more ASAN errors due to missing virtual destructors - dd40bcfef1b4035ea55480b6fd4d884447120768 +main_cts_commits_to_backport=( + # If you find yourself wanting to add something in here, consider whether + # bumping DEQP_MAIN_COMMIT is not a better solution :) - # Remove "unused shader stages" tests - 7dac86c6bbd15dec91d7d9a98cd6dd57c11092a7 + # Build testlog-* and other tools also on Android + 0fcd87248f83a2174e5c938cb105dc2da03f3683 +) - # Emit point size from "many indirect draws" test - 771e56d1c4d03e073ddb7f1200ad6d57e0a0c979 +# shellcheck disable=SC2034 +main_cts_patch_files=( ) # shellcheck disable=SC2034 -vk_cts_patch_files=( +vk_cts_commits_to_backport=( + # Remove multi-line test results in DRM format modifier tests + 8c95af68a2a85cbdc7e1d9267ab029f73e9427d2 ) -if [ "${DEQP_TARGET}" = 'android' ]; then - vk_cts_patch_files+=( - build-deqp-vk_Allow-running-on-Android-from-the-command-line.patch - build-deqp-vk_Android-prints-to-stdout-instead-of-logcat.patch - ) -fi +# shellcheck disable=SC2034 +vk_cts_patch_files=( +) # shellcheck disable=SC2034 gl_cts_commits_to_backport=( + # Add #include in deMath.h when being compiled by C++ + 71808fe7d0a640dfd703e845d93ba1c5ab751055 + # Revert "Add #include in deMath.h when being compiled by C++ compiler" + # This also adds an alternative fix along with the revert. + 6164879a0acce258637d261592a9c395e564b361 ) # shellcheck disable=SC2034 @@ -67,14 +79,15 @@ fi # shellcheck disable=SC2034 # GLES builds also EGL gles_cts_commits_to_backport=( - # Implement support for the EGL_EXT_config_select_group extension - 88ba9ac270db5be600b1ecacbc6d9db0c55d5be4 + # Add #include in deMath.h when being compiled by C++ + 71808fe7d0a640dfd703e845d93ba1c5ab751055 + # Revert "Add #include in deMath.h when being compiled by C++ compiler" + # This also adds an alternative fix along with the revert. + 6164879a0acce258637d261592a9c395e564b361 ) # shellcheck disable=SC2034 gles_cts_patch_files=( - # Correct detection mechanism for EGL_EXT_config_select_group extension - build-deqp-egl_Correct-EGL_EXT_config_select_group-extension-query.patch ) if [ "${DEQP_TARGET}" = 'android' ]; then @@ -93,87 +106,114 @@ git config --global user.name "Mesa CI" # shellcheck disable=SC2153 case "${DEQP_API}" in + tools) DEQP_VERSION="$DEQP_MAIN_COMMIT";; + *-main) DEQP_VERSION="$DEQP_MAIN_COMMIT";; VK) DEQP_VERSION="vulkan-cts-$DEQP_VK_VERSION";; GL) DEQP_VERSION="opengl-cts-$DEQP_GL_VERSION";; GLES) DEQP_VERSION="opengl-es-cts-$DEQP_GLES_VERSION";; + *) echo "Unexpected DEQP_API value: $DEQP_API"; exit 1;; esac -git clone \ - https://github.com/KhronosGroup/VK-GL-CTS.git \ - -b $DEQP_VERSION \ - --depth 1 \ - /VK-GL-CTS +mkdir -p /VK-GL-CTS pushd /VK-GL-CTS +[ -e .git ] || { + git init + git remote add origin https://github.com/KhronosGroup/VK-GL-CTS.git +} +git fetch --depth 1 origin "$DEQP_VERSION" +git checkout FETCH_HEAD +DEQP_COMMIT=$(git rev-parse FETCH_HEAD) + +if [ "$DEQP_VERSION" = "$DEQP_MAIN_COMMIT" ]; then + git fetch origin main + if ! git merge-base --is-ancestor "$DEQP_MAIN_COMMIT" origin/main; then + echo "VK-GL-CTS commit $DEQP_MAIN_COMMIT is not a commit from the main branch." + exit 1 + fi +fi -mkdir -p /deqp +mkdir -p /deqp-$deqp_api -# shellcheck disable=SC2153 -deqp_api=${DEQP_API,,} +if [ "$DEQP_VERSION" = "$DEQP_MAIN_COMMIT" ]; then + prefix="main" +else + prefix="$deqp_api" +fi -cts_commits_to_backport="${deqp_api}_cts_commits_to_backport[@]" +cts_commits_to_backport="${prefix}_cts_commits_to_backport[@]" for commit in "${!cts_commits_to_backport}" do PATCH_URL="https://github.com/KhronosGroup/VK-GL-CTS/commit/$commit.patch" echo "Apply patch to ${DEQP_API} CTS from $PATCH_URL" curl -L --retry 4 -f --retry-all-errors --retry-delay 60 $PATCH_URL | \ - git am - + GIT_COMMITTER_DATE=$(LC_TIME=C date -d@0) git am - done -cts_patch_files="${deqp_api}_cts_patch_files[@]" +cts_patch_files="${prefix}_cts_patch_files[@]" for patch in "${!cts_patch_files}" do echo "Apply patch to ${DEQP_API} CTS from $patch" - git am < $OLDPWD/.gitlab-ci/container/patches/$patch + GIT_COMMITTER_DATE=$(LC_TIME=C date -d@0) git am < $OLDPWD/.gitlab-ci/container/patches/$patch done { - echo "dEQP base version $DEQP_VERSION" - echo "The following local patches are applied on top:" - git log --reverse --oneline $DEQP_VERSION.. --format=%s | sed 's/^/- /' -} > /deqp/version-$deqp_api + if [ "$DEQP_VERSION" = "$DEQP_MAIN_COMMIT" ]; then + commit_desc=$(git show --no-patch --format='commit %h on %ci' --abbrev=10 "$DEQP_COMMIT") + echo "dEQP $DEQP_API at $commit_desc" + else + echo "dEQP $DEQP_API version $DEQP_VERSION" + fi + if [ "$(git rev-parse HEAD)" != "$DEQP_COMMIT" ]; then + echo "The following local patches are applied on top:" + git log --reverse --oneline "$DEQP_COMMIT".. --format='- %s' + fi +} > /deqp-$deqp_api/deqp-$deqp_api-version # --insecure is due to SSL cert failures hitting sourceforge for zlib and # libpng (sigh). The archives get their checksums checked anyway, and git # always goes through ssh or https. python3 external/fetch_sources.py --insecure -# Save the testlog stylesheets: -cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp +if [[ "$DEQP_API" = tools ]]; then + # Save the testlog stylesheets: + cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp-$deqp_api +fi + popd -pushd /deqp +pushd /deqp-$deqp_api if [ "${DEQP_API}" = 'GLES' ]; then if [ "${DEQP_TARGET}" = 'android' ]; then cmake -S /VK-GL-CTS -B . -G Ninja \ -DDEQP_TARGET=android \ -DCMAKE_BUILD_TYPE=Release \ - $EXTRA_CMAKE_ARGS - mold --run ninja modules/egl/deqp-egl - mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-android + ${EXTRA_CMAKE_ARGS:-} + ninja modules/egl/deqp-egl + mv modules/egl/deqp-egl{,-android} else # When including EGL/X11 testing, do that build first and save off its # deqp-egl binary. cmake -S /VK-GL-CTS -B . -G Ninja \ -DDEQP_TARGET=x11_egl_glx \ -DCMAKE_BUILD_TYPE=Release \ - $EXTRA_CMAKE_ARGS - mold --run ninja modules/egl/deqp-egl - mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-x11 + ${EXTRA_CMAKE_ARGS:-} + ninja modules/egl/deqp-egl + mv modules/egl/deqp-egl{,-x11} cmake -S /VK-GL-CTS -B . -G Ninja \ -DDEQP_TARGET=wayland \ -DCMAKE_BUILD_TYPE=Release \ - $EXTRA_CMAKE_ARGS - mold --run ninja modules/egl/deqp-egl - mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-wayland + ${EXTRA_CMAKE_ARGS:-} + ninja modules/egl/deqp-egl + mv modules/egl/deqp-egl{,-wayland} fi fi cmake -S /VK-GL-CTS -B . -G Ninja \ -DDEQP_TARGET=${DEQP_TARGET} \ -DCMAKE_BUILD_TYPE=Release \ - $EXTRA_CMAKE_ARGS + ${EXTRA_CMAKE_ARGS:-} # Make sure `default` doesn't silently stop detecting one of the platforms we care about if [ "${DEQP_TARGET}" = 'default' ]; then @@ -184,7 +224,7 @@ fi deqp_build_targets=() case "${DEQP_API}" in - VK) + VK|VK-main) deqp_build_targets+=(deqp-vk) ;; GL) @@ -192,76 +232,82 @@ case "${DEQP_API}" in ;; GLES) deqp_build_targets+=(deqp-gles{2,3,31}) + deqp_build_targets+=(glcts) # needed for gles*-khr tests # deqp-egl also comes from this build, but it is handled separately above. ;; + tools) + deqp_build_targets+=(testlog-to-xml) + deqp_build_targets+=(testlog-to-csv) + deqp_build_targets+=(testlog-to-junit) + ;; esac -if [ "${DEQP_TARGET}" != 'android' ]; then - deqp_build_targets+=(testlog-to-xml) - deqp_build_targets+=(testlog-to-csv) - deqp_build_targets+=(testlog-to-junit) -fi -mold --run ninja "${deqp_build_targets[@]}" +ninja "${deqp_build_targets[@]}" -if [ "${DEQP_TARGET}" != 'android' ]; then +if [ "$DEQP_API" != tools ]; then # Copy out the mustpass lists we want. - mkdir -p /deqp/mustpass + mkdir -p mustpass - if [ "${DEQP_API}" = 'VK' ]; then + if [ "${DEQP_API}" = 'VK' ] || [ "${DEQP_API}" = 'VK-main' ]; then for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/main/vk-default.txt) ; do cat /VK-GL-CTS/external/vulkancts/mustpass/main/$mustpass \ - >> /deqp/mustpass/vk-main.txt + >> mustpass/vk-main.txt done fi if [ "${DEQP_API}" = 'GL' ]; then cp \ - /VK-GL-CTS/external/openglcts/data/mustpass/gl/khronos_mustpass/4.6.1.x/*-main.txt \ - /deqp/mustpass/ + /VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gl/khronos_mustpass/main/*-main.txt \ + mustpass/ cp \ - /VK-GL-CTS/external/openglcts/data/mustpass/gl/khronos_mustpass_single/4.6.1.x/*-single.txt \ - /deqp/mustpass/ + /VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gl/khronos_mustpass_single/main/*-single.txt \ + mustpass/ fi if [ "${DEQP_API}" = 'GLES' ]; then cp \ - /VK-GL-CTS/external/openglcts/data/mustpass/gles/aosp_mustpass/3.2.6.x/*.txt \ - /deqp/mustpass/ + /VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gles/aosp_mustpass/main/*.txt \ + mustpass/ cp \ - /VK-GL-CTS/external/openglcts/data/mustpass/egl/aosp_mustpass/3.2.6.x/egl-main.txt \ - /deqp/mustpass/ + /VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/egl/aosp_mustpass/main/egl-main.txt \ + mustpass/ cp \ - /VK-GL-CTS/external/openglcts/data/mustpass/gles/khronos_mustpass/3.2.6.x/*-main.txt \ - /deqp/mustpass/ + /VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gles/khronos_mustpass/main/*-main.txt \ + mustpass/ fi + # Compress the caselists, since Vulkan's in particular are gigantic; higher + # compression levels provide no real measurable benefit. + zstd -1 --rm mustpass/*.txt +fi + +if [ "$DEQP_API" = tools ]; then # Save *some* executor utils, but otherwise strip things down # to reduct deqp build size: - mkdir /deqp/executor.save - cp /deqp/executor/testlog-to-* /deqp/executor.save - rm -rf /deqp/executor - mv /deqp/executor.save /deqp/executor + mv executor/testlog-to-* . + rm -rf executor fi # Remove other mustpass files, since we saved off the ones we wanted to conventient locations above. -rm -rf /deqp/external/**/mustpass/ -rm -rf /deqp/external/vulkancts/modules/vulkan/vk-main* -rm -rf /deqp/external/vulkancts/modules/vulkan/vk-default - -rm -rf /deqp/external/openglcts/modules/cts-runner -rm -rf /deqp/modules/internal -rm -rf /deqp/execserver -rm -rf /deqp/framework +rm -rf external/**/mustpass/ +rm -rf external/vulkancts/modules/vulkan/vk-main* +rm -rf external/vulkancts/modules/vulkan/vk-default + +rm -rf external/openglcts/modules/cts-runner +rm -rf modules/internal +rm -rf execserver +rm -rf framework find . -depth \( -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' \) -exec rm -rf {} \; -if [ "${DEQP_API}" = 'VK' ]; then +if [ "${DEQP_API}" = 'VK' ] || [ "${DEQP_API}" = 'VK-main' ]; then ${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk fi -if [ "${DEQP_API}" = 'GL' ]; then +if [ "${DEQP_API}" = 'GL' ] || [ "${DEQP_API}" = 'GLES' ]; then ${STRIP_CMD:-strip} external/openglcts/modules/glcts fi if [ "${DEQP_API}" = 'GLES' ]; then ${STRIP_CMD:-strip} modules/*/deqp-* fi du -sh ./* -rm -rf /VK-GL-CTS popd + +section_end deqp-$deqp_api diff --git a/mesalib/.gitlab-ci/container/build-directx-headers.sh b/mesalib/.gitlab-ci/container/build-directx-headers.sh index a930520ee8..fd19e548bc 100644 --- a/mesalib/.gitlab-ci/container/build-directx-headers.sh +++ b/mesalib/.gitlab-ci/container/build-directx-headers.sh @@ -5,11 +5,15 @@ # .gitlab-ci/image-tags.yml tags: # DEBIAN_BUILD_TAG -set -ex +set -uex -git clone https://github.com/microsoft/DirectX-Headers -b v1.613.1 --depth 1 +uncollapsed_section_start directx-headers "Building directx-headers" + +git clone https://github.com/microsoft/DirectX-Headers -b v1.614.1 --depth 1 pushd DirectX-Headers -meson setup build --backend=ninja --buildtype=release -Dbuild-test=false $EXTRA_MESON_ARGS +meson setup build --backend=ninja --buildtype=release -Dbuild-test=false ${EXTRA_MESON_ARGS:-} meson install -C build popd rm -rf DirectX-Headers + +section_end directx-headers diff --git a/mesalib/.gitlab-ci/container/build-fossilize.sh b/mesalib/.gitlab-ci/container/build-fossilize.sh index ca1204451b..b726fe92a4 100644 --- a/mesalib/.gitlab-ci/container/build-fossilize.sh +++ b/mesalib/.gitlab-ci/container/build-fossilize.sh @@ -2,11 +2,13 @@ # When changing this file, you need to bump the following # .gitlab-ci/image-tags.yml tags: -# DEBIAN_X86_64_TEST_VK_TAG +# DEBIAN_TEST_VK_TAG # KERNEL_ROOTFS_TAG set -ex +uncollapsed_section_start fossilize "Building fossilize" + git clone https://github.com/ValveSoftware/Fossilize.git cd Fossilize git checkout b43ee42bbd5631ea21fe9a2dee4190d5d875c327 @@ -17,3 +19,5 @@ cmake -S .. -B . -G Ninja -DCMAKE_BUILD_TYPE=Release ninja -C . install cd ../.. rm -rf Fossilize + +section_end fossilize diff --git a/mesalib/.gitlab-ci/container/build-gfxreconstruct.sh b/mesalib/.gitlab-ci/container/build-gfxreconstruct.sh index c7600fc5f2..b52206bba6 100644 --- a/mesalib/.gitlab-ci/container/build-gfxreconstruct.sh +++ b/mesalib/.gitlab-ci/container/build-gfxreconstruct.sh @@ -2,6 +2,8 @@ set -ex +uncollapsed_section_start gfxreconstruct "Building gfxreconstruct" + GFXRECONSTRUCT_VERSION=761837794a1e57f918a85af7000b12e531b178ae git clone https://github.com/LunarG/gfxreconstruct.git \ @@ -17,3 +19,5 @@ cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX: cmake --build _build --parallel --target tools/{replay,info}/install/strip find . -not -path './build' -not -path './build/*' -delete popd + +section_end gfxreconstruct diff --git a/mesalib/.gitlab-ci/container/build-hang-detection.sh b/mesalib/.gitlab-ci/container/build-hang-detection.sh deleted file mode 100644 index b5af1af890..0000000000 --- a/mesalib/.gitlab-ci/container/build-hang-detection.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -set -ex - -PARALLEL_DEQP_RUNNER_VERSION=fe557794b5dadd8dbf0eae403296625e03bda18a - -git clone https://gitlab.freedesktop.org/mesa/parallel-deqp-runner --single-branch -b master --no-checkout /parallel-deqp-runner -pushd /parallel-deqp-runner -git checkout "$PARALLEL_DEQP_RUNNER_VERSION" -meson . _build -ninja -C _build hang-detection -mkdir -p build/bin -install _build/hang-detection build/bin -strip build/bin/* -find . -not -path './build' -not -path './build/*' -delete -popd diff --git a/mesalib/.gitlab-ci/container/build-kdl.sh b/mesalib/.gitlab-ci/container/build-kdl.sh index e45127be54..3a03489690 100644 --- a/mesalib/.gitlab-ci/container/build-kdl.sh +++ b/mesalib/.gitlab-ci/container/build-kdl.sh @@ -3,21 +3,30 @@ set -ex -KDL_REVISION="5056f71b100a68b72b285c6fc845a66a2ed25985" +uncollapsed_section_start kdl "Building kdl" -mkdir ci-kdl.git -pushd ci-kdl.git +KDL_REVISION="cbbe5fd54505fd03ee34f35bfd16794f0c30074f" +KDL_CHECKOUT_DIR="/tmp/ci-kdl.git" + +mkdir -p ${KDL_CHECKOUT_DIR} +pushd ${KDL_CHECKOUT_DIR} git init git remote add origin https://gitlab.freedesktop.org/gfx-ci/ci-kdl.git git fetch --depth 1 origin ${KDL_REVISION} git checkout FETCH_HEAD popd -python3 -m venv ci-kdl.venv -source ci-kdl.venv/bin/activate -pushd ci-kdl.git -pip install -r requirements.txt -pip install . -popd +# Run venv in a subshell, so we don't accidentally leak the venv state into +# calling scripts +( + python3 -m venv /ci-kdl + source /ci-kdl/bin/activate && + pushd ${KDL_CHECKOUT_DIR} && + pip install -r requirements.txt && + pip install . && + popd +) + +rm -rf ${KDL_CHECKOUT_DIR} -rm -rf ci-kdl.git +section_end kdl diff --git a/mesalib/.gitlab-ci/container/build-kernel.sh b/mesalib/.gitlab-ci/container/build-kernel.sh deleted file mode 100644 index 7f56c2989a..0000000000 --- a/mesalib/.gitlab-ci/container/build-kernel.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2086 # we want word splitting -# shellcheck disable=SC2153 - -set -ex - -mkdir -p kernel -pushd kernel - -if [[ ${DEBIAN_ARCH} = "arm64" ]]; then - KERNEL_IMAGE_NAME+=" cheza-kernel" -fi - -for image in ${KERNEL_IMAGE_NAME}; do - curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ - -o "/lava-files/${image}" "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${image}" -done - -for dtb in ${DEVICE_TREES}; do - curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ - -o "/lava-files/${dtb}" "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${dtb}" - done - -mkdir -p "/lava-files/rootfs-${DEBIAN_ARCH}" -curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ - -O "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "/lava-files/rootfs-${DEBIAN_ARCH}/" - -popd -rm -rf kernel - diff --git a/mesalib/.gitlab-ci/container/build-libclc.sh b/mesalib/.gitlab-ci/container/build-libclc.sh index 9ec3e3c55b..4d399305ac 100644 --- a/mesalib/.gitlab-ci/container/build-libclc.sh +++ b/mesalib/.gitlab-ci/container/build-libclc.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash -set -ex +set -uex + +uncollapsed_section_start libclc "Building libclc" export LLVM_CONFIG="llvm-config-${LLVM_VERSION:?"llvm unset!"}" LLVM_TAG="llvmorg-15.0.7" @@ -29,3 +31,5 @@ ln -s /usr/share/clc/spirv-mesa3d-.spv /usr/lib/clc/ du -sh ./* rm -rf /libclc /llvm-project + +section_end libclc diff --git a/mesalib/.gitlab-ci/container/build-libdrm.sh b/mesalib/.gitlab-ci/container/build-libdrm.sh index 0293b598ab..396ba659cf 100644 --- a/mesalib/.gitlab-ci/container/build-libdrm.sh +++ b/mesalib/.gitlab-ci/container/build-libdrm.sh @@ -1,16 +1,21 @@ #!/usr/bin/env bash -# Script used for Android and Fedora builds +# Script used for Android and Fedora builds (Debian builds get their libdrm version +# from https://gitlab.freedesktop.org/gfx-ci/ci-deb-repo - see PKG_REPO_REV) # shellcheck disable=SC2086 # we want word splitting -set -ex +set -uex -export LIBDRM_VERSION=libdrm-2.4.119 +uncollapsed_section_start libdrm "Building libdrm" + +export LIBDRM_VERSION=libdrm-2.4.122 curl -L -O --retry 4 -f --retry-all-errors --retry-delay 60 \ https://dri.freedesktop.org/libdrm/"$LIBDRM_VERSION".tar.xz tar -xvf "$LIBDRM_VERSION".tar.xz && rm "$LIBDRM_VERSION".tar.xz cd "$LIBDRM_VERSION" -meson setup build -D vc4=disabled -D freedreno=disabled -D etnaviv=disabled $EXTRA_MESON_ARGS +meson setup build -D vc4=disabled -D freedreno=disabled -D etnaviv=disabled ${EXTRA_MESON_ARGS:-} meson install -C build cd .. rm -rf "$LIBDRM_VERSION" + +section_end libdrm diff --git a/mesalib/.gitlab-ci/container/build-llvm-spirv.sh b/mesalib/.gitlab-ci/container/build-llvm-spirv.sh index 2742298b12..125420c973 100644 --- a/mesalib/.gitlab-ci/container/build-llvm-spirv.sh +++ b/mesalib/.gitlab-ci/container/build-llvm-spirv.sh @@ -2,6 +2,8 @@ set -ex +uncollapsed_section_start llvm-spirv "Building LLVM-SPIRV-Translator" + VER="${LLVM_VERSION:?llvm not set}.0.0" curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ @@ -20,3 +22,5 @@ popd du -sh "SPIRV-LLVM-Translator-${VER}" rm -rf "SPIRV-LLVM-Translator-${VER}" + +section_end llvm-spirv diff --git a/mesalib/.gitlab-ci/container/build-mold.sh b/mesalib/.gitlab-ci/container/build-mold.sh index b97b5c2764..6ca90eb831 100644 --- a/mesalib/.gitlab-ci/container/build-mold.sh +++ b/mesalib/.gitlab-ci/container/build-mold.sh @@ -4,19 +4,29 @@ set -ex # When changing this file, you need to bump the following # .gitlab-ci/image-tags.yml tags: +# ALPINE_X86_64_BUILD_TAG # DEBIAN_BASE_TAG # DEBIAN_BUILD_TAG # FEDORA_X86_64_BUILD_TAG # KERNEL_ROOTFS_TAG -MOLD_VERSION="2.31.0" +uncollapsed_section_start mold "Building mold" + +MOLD_VERSION="2.32.0" git clone -b v"$MOLD_VERSION" --single-branch --depth 1 https://github.com/rui314/mold.git pushd mold cmake -DCMAKE_BUILD_TYPE=Release -D BUILD_TESTING=OFF -D MOLD_LTO=ON -cmake --build . --parallel -cmake --install . +cmake --build . --parallel "${FDO_CI_CONCURRENT:-4}" +cmake --install . --strip + +# Always use mold from now on +find /usr/bin \( -name '*-ld' -o -name 'ld' \) \ + -exec ln -sf /usr/local/bin/ld.mold {} \; \ + -exec ls -l {} + popd rm -rf mold + +section_end mold diff --git a/mesalib/.gitlab-ci/container/build-ninetests.sh b/mesalib/.gitlab-ci/container/build-ninetests.sh index d60b5fe826..9e9b9f03f3 100644 --- a/mesalib/.gitlab-ci/container/build-ninetests.sh +++ b/mesalib/.gitlab-ci/container/build-ninetests.sh @@ -2,10 +2,12 @@ # When changing this file, you need to bump the following # .gitlab-ci/image-tags.yml tags: -# DEBIAN_X86_64_TEST_GL_TAG +# DEBIAN_TEST_GL_TAG set -ex -o pipefail +uncollapsed_section_start ninetests "Building Nine tests" + ### Careful editing anything below this line git config --global user.email "mesa@example.com" @@ -23,3 +25,5 @@ mv NineTests/NineTests /NineTests/ popd rm -rf /Xnine + +section_end ninetests diff --git a/mesalib/.gitlab-ci/container/build-piglit.sh b/mesalib/.gitlab-ci/container/build-piglit.sh index c252c0a8c7..45a6f2f3e0 100644 --- a/mesalib/.gitlab-ci/container/build-piglit.sh +++ b/mesalib/.gitlab-ci/container/build-piglit.sh @@ -1,24 +1,27 @@ #!/bin/bash # shellcheck disable=SC2086 # we want word splitting -set -ex +set -uex + +uncollapsed_section_start piglit "Building piglit" # When changing this file, you need to bump the following # .gitlab-ci/image-tags.yml tags: -# DEBIAN_X86_64_TEST_GL_TAG -# DEBIAN_X86_64_TEST_VK_TAG +# DEBIAN_TEST_GL_TAG +# DEBIAN_TEST_VK_TAG # KERNEL_ROOTFS_TAG -REV="8a6ce9c6fc5c8039665655bca4904d5601c6dba0" +REV="631b72944f56e688f56a08d26c8a9f3988801a08" git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit pushd /piglit git checkout "$REV" patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff -cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS -ninja $PIGLIT_BUILD_TARGETS -find . -depth \( -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' \) -exec rm -rf {} \; +cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS ${EXTRA_CMAKE_ARGS:-} +ninja ${PIGLIT_BUILD_TARGETS:-} +find . -depth \( -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' \) \ + ! -name 'include_test.h' -exec rm -rf {} \; rm -rf target_api -if [ "$PIGLIT_BUILD_TARGETS" = "piglit_replayer" ]; then +if [ "${PIGLIT_BUILD_TARGETS:-}" = "piglit_replayer" ]; then find . -depth \ ! -regex "^\.$" \ ! -regex "^\.\/piglit.*" \ @@ -31,3 +34,5 @@ if [ "$PIGLIT_BUILD_TARGETS" = "piglit_replayer" ]; then -exec rm -rf {} \; 2>/dev/null fi popd + +section_end piglit diff --git a/mesalib/.gitlab-ci/container/build-rust.sh b/mesalib/.gitlab-ci/container/build-rust.sh index 50b0138248..344327adfe 100644 --- a/mesalib/.gitlab-ci/container/build-rust.sh +++ b/mesalib/.gitlab-ci/container/build-rust.sh @@ -5,17 +5,16 @@ set -ex +uncollapsed_section_start rust "Building Rust toolchain" + # cargo (and rustup) wants to store stuff in $HOME/.cargo, and binaries in # $HOME/.cargo/bin. Make bin a link to a public bin directory so the commands # are just available to all build jobs. mkdir -p "$HOME"/.cargo ln -s /usr/local/bin "$HOME"/.cargo/bin -# Rusticl requires at least Rust 1.66.0 and NAK requires 1.73.0 -# -# Also, pick a specific snapshot from rustup so the compiler doesn't drift on -# us. -RUST_VERSION=1.73.0-2023-10-05 +# Pick a specific snapshot from rustup so the compiler doesn't drift on us. +RUST_VERSION=1.78.0-2024-05-02 # For rust in Mesa, we use rustup to install. This lets us pick an arbitrary # version of the compiler, rather than whatever the container's Debian comes @@ -37,3 +36,5 @@ linker = "arm-linux-gnueabihf-gcc" [target.aarch64-unknown-linux-gnu] linker = "aarch64-linux-gnu-gcc" EOF + +section_end rust diff --git a/mesalib/.gitlab-ci/container/build-shader-db.sh b/mesalib/.gitlab-ci/container/build-shader-db.sh index 7cebcd8f2a..0a6af9b63d 100644 --- a/mesalib/.gitlab-ci/container/build-shader-db.sh +++ b/mesalib/.gitlab-ci/container/build-shader-db.sh @@ -6,9 +6,13 @@ set -ex +uncollapsed_section_start shader-db "Building shader-db" + pushd /usr/local git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1 rm -rf shader-db/.git cd shader-db make popd + +section_end shader-db diff --git a/mesalib/.gitlab-ci/container/build-skqp.sh b/mesalib/.gitlab-ci/container/build-skqp.sh index f5e435c114..e17f66bea8 100644 --- a/mesalib/.gitlab-ci/container/build-skqp.sh +++ b/mesalib/.gitlab-ci/container/build-skqp.sh @@ -8,13 +8,33 @@ # .gitlab-ci/image-tags.yml tags: # KERNEL_ROOTFS_TAG +set -uex + +uncollapsed_section_start skqp "Building skqp" + SKQP_BRANCH=android-cts-12.1_r5 -# hack for skqp see the clang -pushd /usr/bin/ -ln -s ../lib/llvm-15/bin/clang clang -ln -s ../lib/llvm-15/bin/clang++ clang++ -popd +SCRIPT_DIR="$(pwd)/.gitlab-ci/container" +SKQP_PATCH_DIR="${SCRIPT_DIR}/patches" +BASE_ARGS_GN_FILE="${SCRIPT_DIR}/build-skqp_base.gn" + +case "$DEBIAN_ARCH" in + amd64) + SKQP_ARCH=x64 + ;; + armhf) + SKQP_ARCH=arm + ;; + arm64) + SKQP_ARCH=arm64 + ;; +esac + +SKIA_DIR=${SKIA_DIR:-$(mktemp -d)} +SKQP_OUT_DIR=${SKIA_DIR}/out/${SKQP_ARCH} +SKQP_INSTALL_DIR=${SKQP_INSTALL_DIR:-/skqp} +SKQP_ASSETS_DIR="${SKQP_INSTALL_DIR}/assets" +SKQP_BINARIES=(skqp list_gpu_unit_tests list_gms) create_gn_args() { # gn can be configured to cross-compile skia and its tools @@ -38,19 +58,6 @@ download_skia_source() { git clone --branch "${SKQP_BRANCH}" --depth 1 "${SKQP_REPO}" "${SKIA_DIR}" } -set -ex - -SCRIPT_DIR=$(realpath "$(dirname "$0")") -SKQP_PATCH_DIR="${SCRIPT_DIR}/patches" -BASE_ARGS_GN_FILE="${SCRIPT_DIR}/build-skqp_base.gn" - -SKQP_ARCH=${SKQP_ARCH:-x64} -SKIA_DIR=${SKIA_DIR:-$(mktemp -d)} -SKQP_OUT_DIR=${SKIA_DIR}/out/${SKQP_ARCH} -SKQP_INSTALL_DIR=${SKQP_INSTALL_DIR:-/skqp} -SKQP_ASSETS_DIR="${SKQP_INSTALL_DIR}/assets" -SKQP_BINARIES=(skqp list_gpu_unit_tests list_gms) - download_skia_source pushd "${SKIA_DIR}" @@ -59,6 +66,12 @@ pushd "${SKIA_DIR}" cat "${SKQP_PATCH_DIR}"/build-skqp_*.patch | patch -p1 +# hack for skqp see the clang +pushd /usr/bin/ +ln -s "../lib/llvm-${LLVM_VERSION:-15}/bin/clang" clang +ln -s "../lib/llvm-${LLVM_VERSION:-15}/bin/clang++" clang++ +popd + # Fetch some needed build tools needed to build skia/skqp. # Basically, it clones repositories with commits SHAs from ${SKIA_DIR}/DEPS # directory. @@ -87,3 +100,5 @@ popd rm -Rf "${SKIA_DIR}" set +ex + +section_end skqp diff --git a/mesalib/.gitlab-ci/container/build-va-tools.sh b/mesalib/.gitlab-ci/container/build-va-tools.sh index 5d28b47f98..0795c3f958 100644 --- a/mesalib/.gitlab-ci/container/build-va-tools.sh +++ b/mesalib/.gitlab-ci/container/build-va-tools.sh @@ -4,7 +4,9 @@ # .gitlab-ci/image-tags.yml tags: # KERNEL_ROOTFS_TAG -set -ex +set -uex + +uncollapsed_section_start va-tools "Building va-tools" git config --global user.email "mesa@example.com" git config --global user.name "Mesa CI" @@ -19,7 +21,9 @@ pushd /va-utils # Too old libva in Debian 11. TODO: when this PR gets in, refer to the patch. curl -L https://github.com/intel/libva-utils/pull/329.patch | git am -meson setup build -D tests=true -Dprefix=/va $EXTRA_MESON_ARGS +meson setup build -D tests=true -Dprefix=/va ${EXTRA_MESON_ARGS:-} meson install -C build popd rm -rf /va-utils + +section_end va-tools diff --git a/mesalib/.gitlab-ci/container/build-vkd3d-proton.sh b/mesalib/.gitlab-ci/container/build-vkd3d-proton.sh index b4b1788304..c332e7235f 100644 --- a/mesalib/.gitlab-ci/container/build-vkd3d-proton.sh +++ b/mesalib/.gitlab-ci/container/build-vkd3d-proton.sh @@ -2,21 +2,22 @@ # When changing this file, you need to bump the following # .gitlab-ci/image-tags.yml tags: -# DEBIAN_X86_64_TEST_VK_TAG +# DEBIAN_TEST_VK_TAG # KERNEL_ROOTFS_TAG set -ex -VKD3D_PROTON_COMMIT="c3b385606a93baed42482d822805e0d9c2f3f603" +uncollapsed_section_start vkd3d-proton "Building vkd3d-proton" + +VKD3D_PROTON_COMMIT="b121e6d746341e0aaba7663e3d85f3194e8e20e1" VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests" VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src" -VKD3D_PROTON_BUILD_DIR="/vkd3d-proton-$VKD3D_PROTON_VERSION" +VKD3D_PROTON_BUILD_DIR="/vkd3d-proton-build" function build_arch { local arch="$1" - shift - meson "$@" \ + meson setup \ -Denable_tests=true \ --buildtype release \ --prefix "$VKD3D_PROTON_DST_DIR" \ @@ -37,7 +38,14 @@ git submodule update --init --recursive git submodule update --recursive build_arch 64 build_arch 86 +mkdir "$VKD3D_PROTON_DST_DIR/tests" +cp \ + "tests/test-runner.sh" \ + "tests/d3d12_tests.h" \ + "$VKD3D_PROTON_DST_DIR/tests/" popd rm -rf "$VKD3D_PROTON_BUILD_DIR" rm -rf "$VKD3D_PROTON_SRC_DIR" + +section_end vkd3d-proton diff --git a/mesalib/.gitlab-ci/container/build-vulkan-validation.sh b/mesalib/.gitlab-ci/container/build-vulkan-validation.sh index bdadf2ae19..35c3bd30d0 100644 --- a/mesalib/.gitlab-ci/container/build-vulkan-validation.sh +++ b/mesalib/.gitlab-ci/container/build-vulkan-validation.sh @@ -2,17 +2,22 @@ # When changing this file, you need to bump the following # .gitlab-ci/image-tags.yml tags: -# DEBIAN_X86_64_TEST_GL_TAG -# KERNEL_ROOTFS_TAG: +# DEBIAN_TEST_GL_TAG +# KERNEL_ROOTFS_TAG -set -ex +set -uex -VALIDATION_TAG="v1.3.285" +uncollapsed_section_start vulkan-validation "Building Vulkan validation layers" + +VALIDATION_TAG="snapshot-2024wk39" git clone -b "$VALIDATION_TAG" --single-branch --depth 1 https://github.com/KhronosGroup/Vulkan-ValidationLayers.git pushd Vulkan-ValidationLayers -python3 scripts/update_deps.py --dir external --config debug +python3 scripts/update_deps.py --dir external --config release --generator Ninja cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_TESTS=OFF -DBUILD_WERROR=OFF -C external/helper.cmake -S . -B build -ninja -C build install +ninja -C build +cmake --install build --strip popd rm -rf Vulkan-ValidationLayers + +section_end vulkan-validation diff --git a/mesalib/.gitlab-ci/container/build-wayland.sh b/mesalib/.gitlab-ci/container/build-wayland.sh index da9089a435..d6117254e8 100644 --- a/mesalib/.gitlab-ci/container/build-wayland.sh +++ b/mesalib/.gitlab-ci/container/build-wayland.sh @@ -1,24 +1,26 @@ #!/usr/bin/env bash # shellcheck disable=SC2086 # we want word splitting -set -ex +set -uex + +uncollapsed_section_start wayland "Building Wayland" # When changing this file, you need to bump the following # .gitlab-ci/image-tags.yml tags: # DEBIAN_BUILD_TAG -# DEBIAN_X86_64_TEST_ANDROID_TAG -# DEBIAN_X86_64_TEST_GL_TAG -# DEBIAN_X86_64_TEST_VK_TAG +# DEBIAN_TEST_ANDROID_TAG +# DEBIAN_TEST_GL_TAG +# DEBIAN_TEST_VK_TAG # FEDORA_X86_64_BUILD_TAG # KERNEL_ROOTFS_TAG export LIBWAYLAND_VERSION="1.21.0" -export WAYLAND_PROTOCOLS_VERSION="1.34" +export WAYLAND_PROTOCOLS_VERSION="1.38" git clone https://gitlab.freedesktop.org/wayland/wayland cd wayland git checkout "$LIBWAYLAND_VERSION" -meson setup -Ddocumentation=false -Ddtd_validation=false -Dlibraries=true _build $EXTRA_MESON_ARGS +meson setup -Ddocumentation=false -Ddtd_validation=false -Dlibraries=true _build ${EXTRA_MESON_ARGS:-} meson install -C _build cd .. rm -rf wayland @@ -26,7 +28,9 @@ rm -rf wayland git clone https://gitlab.freedesktop.org/wayland/wayland-protocols cd wayland-protocols git checkout "$WAYLAND_PROTOCOLS_VERSION" -meson setup _build $EXTRA_MESON_ARGS +meson setup -Dtests=false _build ${EXTRA_MESON_ARGS:-} meson install -C _build cd .. rm -rf wayland-protocols + +section_end wayland diff --git a/mesalib/.gitlab-ci/container/container_job_trampoline.sh b/mesalib/.gitlab-ci/container/container_job_trampoline.sh new file mode 100644 index 0000000000..3edbffab4d --- /dev/null +++ b/mesalib/.gitlab-ci/container/container_job_trampoline.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# When changing this file, check if the *_BUIL_TAG tags in +# .gitlab-ci/image-tags.yml need updating. + +set -eu + +# Early check for required env variables, relies on `set -u` +: "$S3_JWT_FILE_SCRIPT" + +if [ -z "$1" ]; then + echo "usage: $(basename "$0") " 1>&2 + exit 1 +fi + +CONTAINER_CI_JOB_NAME="$1" + +# Tasks to perform before executing the script of a container job +eval "$S3_JWT_FILE_SCRIPT" +unset S3_JWT_FILE_SCRIPT + +trap 'rm -f ${S3_JWT_FILE}' EXIT INT TERM + +bash ".gitlab-ci/container/${CONTAINER_CI_JOB_NAME}.sh" diff --git a/mesalib/.gitlab-ci/container/container_pre_build.sh b/mesalib/.gitlab-ci/container/container_pre_build.sh index a036ef9f39..b4238a5b45 100644 --- a/mesalib/.gitlab-ci/container/container_pre_build.sh +++ b/mesalib/.gitlab-ci/container/container_pre_build.sh @@ -13,8 +13,8 @@ if test -x /usr/bin/ccache; then export CCACHE_COMPILERCHECK=content export CCACHE_COMPRESS=true - export CCACHE_DIR=/cache/$CI_PROJECT_NAME/ccache - export PATH=$CCACHE_PATH:$PATH + export CCACHE_DIR="/cache/$CI_PROJECT_NAME/ccache" + export PATH="$CCACHE_PATH:$PATH" # CMake ignores $PATH, so we have to force CC/GCC to the ccache versions. export CC="${CCACHE_PATH}/gcc" @@ -23,14 +23,6 @@ if test -x /usr/bin/ccache; then ccache --show-stats fi -# When not using the mold linker (e.g. unsupported architecture), force -# linkers to gold, since it's so much faster for building. We can't use -# lld because we're on old debian and it's buggy. mingw fails meson builds -# with it with "meson.build:21:0: ERROR: Unable to determine dynamic linker" -find /usr/bin -name \*-ld -o -name ld | \ - grep -v mingw | \ - xargs -n 1 -I '{}' ln -sf '{}.gold' '{}' - # Make a wrapper script for ninja to always include the -j flags { echo '#!/bin/sh -x' diff --git a/mesalib/.gitlab-ci/container/create-android-cross-file.sh b/mesalib/.gitlab-ci/container/create-android-cross-file.sh index 3064a487c0..d815a7e1cf 100644 --- a/mesalib/.gitlab-ci/container/create-android-cross-file.sh +++ b/mesalib/.gitlab-ci/container/create-android-cross-file.sh @@ -18,11 +18,11 @@ cat > "$cross_file" < +Date: Tue, 15 Oct 2024 16:02:26 +0100 +Subject: [PATCH] deps: Make more sources conditional + +Fetching all the dependent sources - including at least one copy of LLVM +- can take a surprising amount of time. Mesa needs to build ANGLE as +part of CI, and the cost of downloading all the sources all of the time +is not OK for the number of dependencies we don't need during the build. +--- + DEPS | 33 +++++++++++++++++++++++---------- + 1 file changed, 23 insertions(+), 10 deletions(-) + +Submitted upstream at: +https://chromium-review.googlesource.com/c/angle/angle/+/5937820 + +diff --git a/DEPS b/DEPS +index 61263fb7af..0cff8c3126 100644 +--- a/DEPS ++++ b/DEPS +@@ -17,6 +17,17 @@ gclient_gn_args = [ + ] + + vars = { ++ 'angle_enable_cl': True, ++ 'angle_enable_cl_testing': False, ++ 'angle_enable_vulkan': True, ++ 'angle_enable_vulkan_validation_layers': True, ++ 'angle_enable_wgpu': True, ++ 'build_angle_deqp_tests': True, ++ 'build_angle_perftests': True, ++ 'build_with_swiftshader': True, ++ 'use_custom_libcxx': True, ++ 'export_libcxxapi_from_executables': True, ++ + 'android_git': 'https://android.googlesource.com', + 'chromium_git': 'https://chromium.googlesource.com', + 'chrome_internal_git': 'https://chrome-internal.googlesource.com', +@@ -673,7 +684,7 @@ deps = { + + 'third_party/catapult': { + 'url': Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'), +- 'condition': 'not build_with_chromium', ++ 'condition': 'build_with_catapult and not build_with_chromium', + }, + + # Cherry is a dEQP/VK-GL-CTS management GUI written in Go. We use it for viewing test results. +@@ -689,7 +700,7 @@ deps = { + + 'third_party/clspv/src': { + 'url': Var('chromium_git') + '/external/github.com/google/clspv@a173c052455434a422bcfe5c12ffe44d574fd6e1', +- 'condition': 'not build_with_chromium', ++ 'condition': 'angle_enable_cl and angle_enable_vulkan and not build_with_chromium', + }, + + 'third_party/cpu_features/src': { +@@ -700,7 +711,7 @@ deps = { + + 'third_party/dawn': { + 'url': Var('dawn_git') + '/dawn.git' + '@' + Var('dawn_revision'), +- 'condition': 'not build_with_chromium' ++ 'condition': 'angle_enable_wgpu and not build_with_chromium' + }, + + 'third_party/depot_tools': { +@@ -745,6 +756,7 @@ deps = { + # glmark2 is a GPL3-licensed OpenGL ES 2.0 benchmark. We use it for testing. + 'third_party/glmark2/src': { + 'url': Var('chromium_git') + '/external/github.com/glmark2/glmark2@ca8de51fedb70bace5351c6b002eb952c747e889', ++ 'condition': 'build_angle_perftests', + }, + + 'third_party/googletest': { +@@ -777,7 +789,7 @@ deps = { + # libjpeg_turbo is used by glmark2. + 'third_party/libjpeg_turbo': { + 'url': Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git@927aabfcd26897abb9776ecf2a6c38ea5bb52ab6', +- 'condition': 'not build_with_chromium', ++ 'condition': 'build_angle_perftests and not build_with_chromium', + }, + + 'third_party/libpng/src': { +@@ -787,7 +799,7 @@ deps = { + + 'third_party/llvm/src': { + 'url': Var('chromium_git') + '/external/github.com/llvm/llvm-project@d222fa4521531cc4ac14b8e157d231c108c003be', +- 'condition': 'not build_with_chromium', ++ 'condition': '(build_with_swiftshader or (angle_enable_cl and angle_enable_vulkan)) and not build_with_chromium', + }, + + 'third_party/jdk': { +@@ -824,12 +836,12 @@ deps = { + + 'third_party/libc++/src': { + 'url': Var('chromium_git') + '/external/github.com/llvm/llvm-project/libcxx.git@6a68fd412b9aecd515a20a7cf84d11b598bfaf96', +- 'condition': 'not build_with_chromium', ++ 'condition': 'use_custom_libcxx and not build_with_chromium', + }, + + 'third_party/libc++abi/src': { + 'url': Var('chromium_git') + '/external/github.com/llvm/llvm-project/libcxxabi.git@9a1d90c3b412d5ebeb97a6e33d98e1d0dd923221', +- 'condition': 'not build_with_chromium', ++ 'condition': 'export_libcxxapi_from_executables and not build_with_chromium', + }, + + 'third_party/libunwind/src': { +@@ -872,7 +884,7 @@ deps = { + + 'third_party/OpenCL-CTS/src': { + 'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenCL-CTS@e0a31a03fc8f816d59fd8b3051ac6a61d3fa50c6', +- 'condition': 'not build_with_chromium', ++ 'condition': 'angle_enable_cl_testing and not build_with_chromium', + }, + + 'third_party/OpenCL-Docs/src': { +@@ -968,7 +980,7 @@ deps = { + + 'third_party/SwiftShader': { + 'url': Var('swiftshader_git') + '/SwiftShader@7a9a492a38b7c701f7c96a15a76046aed8f8c0c3', +- 'condition': 'not build_with_chromium', ++ 'condition': 'build_with_swiftshader and not build_with_chromium', + }, + + 'third_party/turbine/cipd': { +@@ -984,6 +996,7 @@ deps = { + + 'third_party/VK-GL-CTS/src': { + 'url': Var('chromium_git') + '/external/github.com/KhronosGroup/VK-GL-CTS' + '@' + Var('vk_gl_cts_revision'), ++ 'condition': 'build_angle_deqp_tests', + }, + + 'third_party/vulkan-deps': { +@@ -1038,7 +1051,7 @@ deps = { + + 'third_party/vulkan-validation-layers/src': { + 'url': '{chromium_git}/external/github.com/KhronosGroup/Vulkan-ValidationLayers@b63e9bd51fbd7bf8fea161a4f7c06994abc24b75', +- 'condition': 'not build_with_chromium', ++ 'condition': 'angle_enable_vulkan_validation_layers and not build_with_chromium', + }, + + 'third_party/vulkan_memory_allocator': { +-- +2.46.2 + diff --git a/mesalib/.gitlab-ci/container/patches/build-deqp-egl_Correct-EGL_EXT_config_select_group-extension-query.patch b/mesalib/.gitlab-ci/container/patches/build-deqp-egl_Correct-EGL_EXT_config_select_group-extension-query.patch deleted file mode 100644 index 7839b0e558..0000000000 --- a/mesalib/.gitlab-ci/container/patches/build-deqp-egl_Correct-EGL_EXT_config_select_group-extension-query.patch +++ /dev/null @@ -1,45 +0,0 @@ -From cab41ed387c66a5e7f3454c547fc9ea53587ec1e Mon Sep 17 00:00:00 2001 -From: David Heidelberg -Date: Thu, 9 May 2024 14:08:59 -0700 -Subject: [PATCH] Correct EGL_EXT_config_select_group extension query - -EGL_EXT_config_select_group is a display extension, -not a client extension. - -Affects: -dEQP-EGL.functional.choose_config.simple.selection_and_sort.* - -Ref: https://github.com/KhronosGroup/EGL-Registry/pull/199 - -Fixes: 88ba9ac270db ("Implement support for the EGL_EXT_config_select_group extension") - -Change-Id: I38956511bdcb8e99d585ea9b99aeab53da0457e2 -Signed-off-by: David Heidelberg ---- - framework/egl/egluConfigInfo.cpp | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/framework/egl/egluConfigInfo.cpp b/framework/egl/egluConfigInfo.cpp -index 88c30fd65..10936055a 100644 ---- a/framework/egl/egluConfigInfo.cpp -+++ b/framework/egl/egluConfigInfo.cpp -@@ -129,7 +129,6 @@ void queryCoreConfigInfo (const Library& egl, EGLDisplay display, EGLConfig conf - void queryExtConfigInfo (const eglw::Library& egl, eglw::EGLDisplay display, eglw::EGLConfig config, ConfigInfo* dst) - { - const std::vector extensions = getDisplayExtensions(egl, display); -- const std::vector clientExtensions = getClientExtensions(egl); - - if (de::contains(extensions.begin(), extensions.end(), "EGL_EXT_yuv_surface")) - { -@@ -159,7 +158,7 @@ void queryExtConfigInfo (const eglw::Library& egl, eglw::EGLDisplay display, egl - else - dst->colorComponentType = EGL_COLOR_COMPONENT_TYPE_FIXED_EXT; - -- if (de::contains(clientExtensions.begin(), clientExtensions.end(), "EGL_EXT_config_select_group")) -+ if (hasExtension(egl, display, "EGL_EXT_config_select_group")) - { - egl.getConfigAttrib(display, config, EGL_CONFIG_SELECT_GROUP_EXT, (EGLint*)&dst->groupId); - --- -2.43.0 - diff --git a/mesalib/.gitlab-ci/container/patches/build-deqp-gl_Allow-running-on-Android-from-the-command-line.patch b/mesalib/.gitlab-ci/container/patches/build-deqp-gl_Allow-running-on-Android-from-the-command-line.patch index 9431941b73..5077c87b5f 100644 --- a/mesalib/.gitlab-ci/container/patches/build-deqp-gl_Allow-running-on-Android-from-the-command-line.patch +++ b/mesalib/.gitlab-ci/container/patches/build-deqp-gl_Allow-running-on-Android-from-the-command-line.patch @@ -1,4 +1,4 @@ -From dc97ee83a813f6b170079ddf2a04bbb06221a5a7 Mon Sep 17 00:00:00 2001 +From 6250d347d15502e3b45769edba57ae244e20fb92 Mon Sep 17 00:00:00 2001 From: Tomeu Vizoso Date: Fri, 26 Aug 2022 18:24:27 +0200 Subject: [PATCH 1/2] Allow running on Android from the command line @@ -20,10 +20,10 @@ Signed-off-by: Tomeu Vizoso 3 files changed, 34 insertions(+), 52 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt -index eb58cc7ba..98b8fc6cc 100644 +index 309bdda18..7c833751f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -272,7 +272,7 @@ include_directories( +@@ -275,7 +275,7 @@ include_directories( external/vulkancts/framework/vulkan ) @@ -32,7 +32,7 @@ index eb58cc7ba..98b8fc6cc 100644 # On Android deqp modules are compiled as libraries and linked into final .so set(DEQP_MODULE_LIBRARIES ) set(DEQP_MODULE_ENTRY_POINTS ) -@@ -316,7 +316,7 @@ macro (add_deqp_module MODULE_NAME SRCS LIBS EXECLIBS ENTRY) +@@ -319,7 +319,7 @@ macro (add_deqp_module MODULE_NAME SRCS LIBS EXECLIBS ENTRY) set(DEQP_MODULE_LIBRARIES ${DEQP_MODULE_LIBRARIES} PARENT_SCOPE) set(DEQP_MODULE_ENTRY_POINTS ${DEQP_MODULE_ENTRY_POINTS} PARENT_SCOPE) @@ -41,7 +41,7 @@ index eb58cc7ba..98b8fc6cc 100644 # Executable target add_executable(${MODULE_NAME} ${PROJECT_SOURCE_DIR}/framework/platform/tcuMain.cpp ${ENTRY}) target_link_libraries(${MODULE_NAME} PUBLIC "${EXECLIBS}" "${MODULE_NAME}${MODULE_LIB_TARGET_POSTFIX}") -@@ -338,7 +338,7 @@ macro (add_deqp_module_skip_android MODULE_NAME SRCS LIBS EXECLIBS ENTRY) +@@ -341,7 +341,7 @@ macro (add_deqp_module_skip_android MODULE_NAME SRCS LIBS EXECLIBS ENTRY) add_library("${MODULE_NAME}${MODULE_LIB_TARGET_POSTFIX}" STATIC ${SRCS}) target_link_libraries("${MODULE_NAME}${MODULE_LIB_TARGET_POSTFIX}" ${LIBS}) @@ -50,7 +50,7 @@ index eb58cc7ba..98b8fc6cc 100644 # Executable target add_executable(${MODULE_NAME} ${PROJECT_SOURCE_DIR}/framework/platform/tcuMain.cpp ${ENTRY}) target_link_libraries(${MODULE_NAME} PUBLIC "${EXECLIBS}" "${MODULE_NAME}${MODULE_LIB_TARGET_POSTFIX}") -@@ -390,37 +390,7 @@ add_subdirectory(external/vulkancts/vkscpc ${MAYBE_EXCLUDE_FROM_ALL}) +@@ -393,37 +393,7 @@ add_subdirectory(external/vulkancts/vkscpc ${MAYBE_EXCLUDE_FROM_ALL}) add_subdirectory(external/openglcts ${MAYBE_EXCLUDE_FROM_ALL}) # Single-binary targets @@ -90,54 +90,54 @@ index eb58cc7ba..98b8fc6cc 100644 set(DEQP_IOS_CODE_SIGN_IDENTITY "drawElements" CACHE STRING "Code sign identity for iOS build") diff --git a/framework/platform/android/tcuAndroidNativeActivity.cpp b/framework/platform/android/tcuAndroidNativeActivity.cpp -index 6f8cd8fc5..b83e30f41 100644 +index 82a9ab699..4eab14a39 100644 --- a/framework/platform/android/tcuAndroidNativeActivity.cpp +++ b/framework/platform/android/tcuAndroidNativeActivity.cpp -@@ -116,23 +116,25 @@ namespace Android - NativeActivity::NativeActivity (ANativeActivity* activity) - : m_activity(activity) +@@ -115,23 +115,25 @@ namespace Android + + NativeActivity::NativeActivity(ANativeActivity *activity) : m_activity(activity) { -- activity->instance = (void*)this; -- activity->callbacks->onStart = onStartCallback; -- activity->callbacks->onResume = onResumeCallback; -- activity->callbacks->onSaveInstanceState = onSaveInstanceStateCallback; -- activity->callbacks->onPause = onPauseCallback; -- activity->callbacks->onStop = onStopCallback; -- activity->callbacks->onDestroy = onDestroyCallback; -- activity->callbacks->onWindowFocusChanged = onWindowFocusChangedCallback; -- activity->callbacks->onNativeWindowCreated = onNativeWindowCreatedCallback; -- activity->callbacks->onNativeWindowResized = onNativeWindowResizedCallback; -- activity->callbacks->onNativeWindowRedrawNeeded = onNativeWindowRedrawNeededCallback; -- activity->callbacks->onNativeWindowDestroyed = onNativeWindowDestroyedCallback; -- activity->callbacks->onInputQueueCreated = onInputQueueCreatedCallback; -- activity->callbacks->onInputQueueDestroyed = onInputQueueDestroyedCallback; -- activity->callbacks->onContentRectChanged = onContentRectChangedCallback; -- activity->callbacks->onConfigurationChanged = onConfigurationChangedCallback; -- activity->callbacks->onLowMemory = onLowMemoryCallback; -+ if (activity) { -+ activity->instance = (void*)this; -+ activity->callbacks->onStart = onStartCallback; -+ activity->callbacks->onResume = onResumeCallback; -+ activity->callbacks->onSaveInstanceState = onSaveInstanceStateCallback; -+ activity->callbacks->onPause = onPauseCallback; -+ activity->callbacks->onStop = onStopCallback; -+ activity->callbacks->onDestroy = onDestroyCallback; -+ activity->callbacks->onWindowFocusChanged = onWindowFocusChangedCallback; -+ activity->callbacks->onNativeWindowCreated = onNativeWindowCreatedCallback; -+ activity->callbacks->onNativeWindowResized = onNativeWindowResizedCallback; -+ activity->callbacks->onNativeWindowRedrawNeeded = onNativeWindowRedrawNeededCallback; -+ activity->callbacks->onNativeWindowDestroyed = onNativeWindowDestroyedCallback; -+ activity->callbacks->onInputQueueCreated = onInputQueueCreatedCallback; -+ activity->callbacks->onInputQueueDestroyed = onInputQueueDestroyedCallback; -+ activity->callbacks->onContentRectChanged = onContentRectChangedCallback; -+ activity->callbacks->onConfigurationChanged = onConfigurationChangedCallback; -+ activity->callbacks->onLowMemory = onLowMemoryCallback; -+ } +- activity->instance = (void *)this; +- activity->callbacks->onStart = onStartCallback; +- activity->callbacks->onResume = onResumeCallback; +- activity->callbacks->onSaveInstanceState = onSaveInstanceStateCallback; +- activity->callbacks->onPause = onPauseCallback; +- activity->callbacks->onStop = onStopCallback; +- activity->callbacks->onDestroy = onDestroyCallback; +- activity->callbacks->onWindowFocusChanged = onWindowFocusChangedCallback; +- activity->callbacks->onNativeWindowCreated = onNativeWindowCreatedCallback; +- activity->callbacks->onNativeWindowResized = onNativeWindowResizedCallback; +- activity->callbacks->onNativeWindowRedrawNeeded = onNativeWindowRedrawNeededCallback; +- activity->callbacks->onNativeWindowDestroyed = onNativeWindowDestroyedCallback; +- activity->callbacks->onInputQueueCreated = onInputQueueCreatedCallback; +- activity->callbacks->onInputQueueDestroyed = onInputQueueDestroyedCallback; +- activity->callbacks->onContentRectChanged = onContentRectChangedCallback; +- activity->callbacks->onConfigurationChanged = onConfigurationChangedCallback; +- activity->callbacks->onLowMemory = onLowMemoryCallback; ++ if (activity) { ++ activity->instance = (void *)this; ++ activity->callbacks->onStart = onStartCallback; ++ activity->callbacks->onResume = onResumeCallback; ++ activity->callbacks->onSaveInstanceState = onSaveInstanceStateCallback; ++ activity->callbacks->onPause = onPauseCallback; ++ activity->callbacks->onStop = onStopCallback; ++ activity->callbacks->onDestroy = onDestroyCallback; ++ activity->callbacks->onWindowFocusChanged = onWindowFocusChangedCallback; ++ activity->callbacks->onNativeWindowCreated = onNativeWindowCreatedCallback; ++ activity->callbacks->onNativeWindowResized = onNativeWindowResizedCallback; ++ activity->callbacks->onNativeWindowRedrawNeeded = onNativeWindowRedrawNeededCallback; ++ activity->callbacks->onNativeWindowDestroyed = onNativeWindowDestroyedCallback; ++ activity->callbacks->onInputQueueCreated = onInputQueueCreatedCallback; ++ activity->callbacks->onInputQueueDestroyed = onInputQueueDestroyedCallback; ++ activity->callbacks->onContentRectChanged = onContentRectChangedCallback; ++ activity->callbacks->onConfigurationChanged = onConfigurationChangedCallback; ++ activity->callbacks->onLowMemory = onLowMemoryCallback; ++ } } - NativeActivity::~NativeActivity (void) + NativeActivity::~NativeActivity(void) diff --git a/framework/platform/android/tcuAndroidPlatform.cpp b/framework/platform/android/tcuAndroidPlatform.cpp -index b8a35898c..cf02e6b70 100644 +index 0472fa615..ff8929409 100644 --- a/framework/platform/android/tcuAndroidPlatform.cpp +++ b/framework/platform/android/tcuAndroidPlatform.cpp @@ -22,6 +22,7 @@ @@ -148,35 +148,35 @@ index b8a35898c..cf02e6b70 100644 #include "tcuAndroidUtil.hpp" #include "gluRenderContext.hpp" #include "egluNativeDisplay.hpp" -@@ -170,7 +171,7 @@ eglu::NativeWindow* NativeWindowFactory::createWindow (const eglu::WindowParams& - Window* window = m_windowRegistry.tryAcquireWindow(); +@@ -198,7 +199,7 @@ eglu::NativeWindow *NativeWindowFactory::createWindow(const eglu::WindowParams & + Window *window = m_windowRegistry.tryAcquireWindow(); - if (!window) -- throw ResourceError("Native window is not available", DE_NULL, __FILE__, __LINE__); -+ throw NotSupportedError("Native window is not available", DE_NULL, __FILE__, __LINE__); + if (!window) +- throw ResourceError("Native window is not available", DE_NULL, __FILE__, __LINE__); ++ throw NotSupportedError("Native window is not available", DE_NULL, __FILE__, __LINE__); - return new NativeWindow(window, params.width, params.height, format); + return new NativeWindow(window, params.width, params.height, format); } -@@ -292,6 +293,9 @@ static size_t getTotalSystemMemory (ANativeActivity* activity) +@@ -319,6 +320,9 @@ static size_t getTotalSystemMemory(ANativeActivity *activity) - try - { -+ if (!activity) -+ throw tcu::InternalError("No activity (running from command line?"); + try + { ++ if (!activity) ++ throw tcu::InternalError("No activity (running from command line?"); + - const size_t totalMemory = getTotalAndroidSystemMemory(activity); - print("Device has %.2f MiB of system memory\n", static_cast(totalMemory) / static_cast(MiB)); - return totalMemory; -@@ -388,3 +392,9 @@ bool Platform::hasDisplay (vk::wsi::Type wsiType) const + const size_t totalMemory = getTotalAndroidSystemMemory(activity); + print("Device has %.2f MiB of system memory\n", static_cast(totalMemory) / static_cast(MiB)); + return totalMemory; +@@ -416,3 +420,9 @@ bool Platform::hasDisplay(vk::wsi::Type wsiType) const - } // Android - } // tcu + } // namespace Android + } // namespace tcu + +tcu::Platform* createPlatform (void) +{ -+ tcu::Android::NativeActivity activity(NULL); -+ return new tcu::Android::Platform(activity); ++ tcu::Android::NativeActivity activity(NULL); ++ return new tcu::Android::Platform(activity); +} -- -2.42.0 +2.45.2 diff --git a/mesalib/.gitlab-ci/container/patches/build-deqp-gles_Allow-running-on-Android-from-the-command-line.patch b/mesalib/.gitlab-ci/container/patches/build-deqp-gles_Allow-running-on-Android-from-the-command-line.patch index 9431941b73..09b9921364 100644 --- a/mesalib/.gitlab-ci/container/patches/build-deqp-gles_Allow-running-on-Android-from-the-command-line.patch +++ b/mesalib/.gitlab-ci/container/patches/build-deqp-gles_Allow-running-on-Android-from-the-command-line.patch @@ -1,4 +1,4 @@ -From dc97ee83a813f6b170079ddf2a04bbb06221a5a7 Mon Sep 17 00:00:00 2001 +From 2503bf1be98eaa810672f6a6eb8f735a33511a4f Mon Sep 17 00:00:00 2001 From: Tomeu Vizoso Date: Fri, 26 Aug 2022 18:24:27 +0200 Subject: [PATCH 1/2] Allow running on Android from the command line @@ -20,10 +20,10 @@ Signed-off-by: Tomeu Vizoso 3 files changed, 34 insertions(+), 52 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt -index eb58cc7ba..98b8fc6cc 100644 +index 309bdda18..7c833751f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -272,7 +272,7 @@ include_directories( +@@ -275,7 +275,7 @@ include_directories( external/vulkancts/framework/vulkan ) @@ -32,7 +32,7 @@ index eb58cc7ba..98b8fc6cc 100644 # On Android deqp modules are compiled as libraries and linked into final .so set(DEQP_MODULE_LIBRARIES ) set(DEQP_MODULE_ENTRY_POINTS ) -@@ -316,7 +316,7 @@ macro (add_deqp_module MODULE_NAME SRCS LIBS EXECLIBS ENTRY) +@@ -319,7 +319,7 @@ macro (add_deqp_module MODULE_NAME SRCS LIBS EXECLIBS ENTRY) set(DEQP_MODULE_LIBRARIES ${DEQP_MODULE_LIBRARIES} PARENT_SCOPE) set(DEQP_MODULE_ENTRY_POINTS ${DEQP_MODULE_ENTRY_POINTS} PARENT_SCOPE) @@ -41,7 +41,7 @@ index eb58cc7ba..98b8fc6cc 100644 # Executable target add_executable(${MODULE_NAME} ${PROJECT_SOURCE_DIR}/framework/platform/tcuMain.cpp ${ENTRY}) target_link_libraries(${MODULE_NAME} PUBLIC "${EXECLIBS}" "${MODULE_NAME}${MODULE_LIB_TARGET_POSTFIX}") -@@ -338,7 +338,7 @@ macro (add_deqp_module_skip_android MODULE_NAME SRCS LIBS EXECLIBS ENTRY) +@@ -341,7 +341,7 @@ macro (add_deqp_module_skip_android MODULE_NAME SRCS LIBS EXECLIBS ENTRY) add_library("${MODULE_NAME}${MODULE_LIB_TARGET_POSTFIX}" STATIC ${SRCS}) target_link_libraries("${MODULE_NAME}${MODULE_LIB_TARGET_POSTFIX}" ${LIBS}) @@ -50,7 +50,7 @@ index eb58cc7ba..98b8fc6cc 100644 # Executable target add_executable(${MODULE_NAME} ${PROJECT_SOURCE_DIR}/framework/platform/tcuMain.cpp ${ENTRY}) target_link_libraries(${MODULE_NAME} PUBLIC "${EXECLIBS}" "${MODULE_NAME}${MODULE_LIB_TARGET_POSTFIX}") -@@ -390,37 +390,7 @@ add_subdirectory(external/vulkancts/vkscpc ${MAYBE_EXCLUDE_FROM_ALL}) +@@ -393,37 +393,7 @@ add_subdirectory(external/vulkancts/vkscpc ${MAYBE_EXCLUDE_FROM_ALL}) add_subdirectory(external/openglcts ${MAYBE_EXCLUDE_FROM_ALL}) # Single-binary targets @@ -90,54 +90,54 @@ index eb58cc7ba..98b8fc6cc 100644 set(DEQP_IOS_CODE_SIGN_IDENTITY "drawElements" CACHE STRING "Code sign identity for iOS build") diff --git a/framework/platform/android/tcuAndroidNativeActivity.cpp b/framework/platform/android/tcuAndroidNativeActivity.cpp -index 6f8cd8fc5..b83e30f41 100644 +index 82a9ab699..4eab14a39 100644 --- a/framework/platform/android/tcuAndroidNativeActivity.cpp +++ b/framework/platform/android/tcuAndroidNativeActivity.cpp -@@ -116,23 +116,25 @@ namespace Android - NativeActivity::NativeActivity (ANativeActivity* activity) - : m_activity(activity) +@@ -115,23 +115,25 @@ namespace Android + + NativeActivity::NativeActivity(ANativeActivity *activity) : m_activity(activity) { -- activity->instance = (void*)this; -- activity->callbacks->onStart = onStartCallback; -- activity->callbacks->onResume = onResumeCallback; -- activity->callbacks->onSaveInstanceState = onSaveInstanceStateCallback; -- activity->callbacks->onPause = onPauseCallback; -- activity->callbacks->onStop = onStopCallback; -- activity->callbacks->onDestroy = onDestroyCallback; -- activity->callbacks->onWindowFocusChanged = onWindowFocusChangedCallback; -- activity->callbacks->onNativeWindowCreated = onNativeWindowCreatedCallback; -- activity->callbacks->onNativeWindowResized = onNativeWindowResizedCallback; -- activity->callbacks->onNativeWindowRedrawNeeded = onNativeWindowRedrawNeededCallback; -- activity->callbacks->onNativeWindowDestroyed = onNativeWindowDestroyedCallback; -- activity->callbacks->onInputQueueCreated = onInputQueueCreatedCallback; -- activity->callbacks->onInputQueueDestroyed = onInputQueueDestroyedCallback; -- activity->callbacks->onContentRectChanged = onContentRectChangedCallback; -- activity->callbacks->onConfigurationChanged = onConfigurationChangedCallback; -- activity->callbacks->onLowMemory = onLowMemoryCallback; -+ if (activity) { -+ activity->instance = (void*)this; -+ activity->callbacks->onStart = onStartCallback; -+ activity->callbacks->onResume = onResumeCallback; -+ activity->callbacks->onSaveInstanceState = onSaveInstanceStateCallback; -+ activity->callbacks->onPause = onPauseCallback; -+ activity->callbacks->onStop = onStopCallback; -+ activity->callbacks->onDestroy = onDestroyCallback; -+ activity->callbacks->onWindowFocusChanged = onWindowFocusChangedCallback; -+ activity->callbacks->onNativeWindowCreated = onNativeWindowCreatedCallback; -+ activity->callbacks->onNativeWindowResized = onNativeWindowResizedCallback; -+ activity->callbacks->onNativeWindowRedrawNeeded = onNativeWindowRedrawNeededCallback; -+ activity->callbacks->onNativeWindowDestroyed = onNativeWindowDestroyedCallback; -+ activity->callbacks->onInputQueueCreated = onInputQueueCreatedCallback; -+ activity->callbacks->onInputQueueDestroyed = onInputQueueDestroyedCallback; -+ activity->callbacks->onContentRectChanged = onContentRectChangedCallback; -+ activity->callbacks->onConfigurationChanged = onConfigurationChangedCallback; -+ activity->callbacks->onLowMemory = onLowMemoryCallback; -+ } +- activity->instance = (void *)this; +- activity->callbacks->onStart = onStartCallback; +- activity->callbacks->onResume = onResumeCallback; +- activity->callbacks->onSaveInstanceState = onSaveInstanceStateCallback; +- activity->callbacks->onPause = onPauseCallback; +- activity->callbacks->onStop = onStopCallback; +- activity->callbacks->onDestroy = onDestroyCallback; +- activity->callbacks->onWindowFocusChanged = onWindowFocusChangedCallback; +- activity->callbacks->onNativeWindowCreated = onNativeWindowCreatedCallback; +- activity->callbacks->onNativeWindowResized = onNativeWindowResizedCallback; +- activity->callbacks->onNativeWindowRedrawNeeded = onNativeWindowRedrawNeededCallback; +- activity->callbacks->onNativeWindowDestroyed = onNativeWindowDestroyedCallback; +- activity->callbacks->onInputQueueCreated = onInputQueueCreatedCallback; +- activity->callbacks->onInputQueueDestroyed = onInputQueueDestroyedCallback; +- activity->callbacks->onContentRectChanged = onContentRectChangedCallback; +- activity->callbacks->onConfigurationChanged = onConfigurationChangedCallback; +- activity->callbacks->onLowMemory = onLowMemoryCallback; ++ if (activity) { ++ activity->instance = (void *)this; ++ activity->callbacks->onStart = onStartCallback; ++ activity->callbacks->onResume = onResumeCallback; ++ activity->callbacks->onSaveInstanceState = onSaveInstanceStateCallback; ++ activity->callbacks->onPause = onPauseCallback; ++ activity->callbacks->onStop = onStopCallback; ++ activity->callbacks->onDestroy = onDestroyCallback; ++ activity->callbacks->onWindowFocusChanged = onWindowFocusChangedCallback; ++ activity->callbacks->onNativeWindowCreated = onNativeWindowCreatedCallback; ++ activity->callbacks->onNativeWindowResized = onNativeWindowResizedCallback; ++ activity->callbacks->onNativeWindowRedrawNeeded = onNativeWindowRedrawNeededCallback; ++ activity->callbacks->onNativeWindowDestroyed = onNativeWindowDestroyedCallback; ++ activity->callbacks->onInputQueueCreated = onInputQueueCreatedCallback; ++ activity->callbacks->onInputQueueDestroyed = onInputQueueDestroyedCallback; ++ activity->callbacks->onContentRectChanged = onContentRectChangedCallback; ++ activity->callbacks->onConfigurationChanged = onConfigurationChangedCallback; ++ activity->callbacks->onLowMemory = onLowMemoryCallback; ++ } } - NativeActivity::~NativeActivity (void) + NativeActivity::~NativeActivity(void) diff --git a/framework/platform/android/tcuAndroidPlatform.cpp b/framework/platform/android/tcuAndroidPlatform.cpp -index b8a35898c..cf02e6b70 100644 +index 0472fa615..ff8929409 100644 --- a/framework/platform/android/tcuAndroidPlatform.cpp +++ b/framework/platform/android/tcuAndroidPlatform.cpp @@ -22,6 +22,7 @@ @@ -148,35 +148,35 @@ index b8a35898c..cf02e6b70 100644 #include "tcuAndroidUtil.hpp" #include "gluRenderContext.hpp" #include "egluNativeDisplay.hpp" -@@ -170,7 +171,7 @@ eglu::NativeWindow* NativeWindowFactory::createWindow (const eglu::WindowParams& - Window* window = m_windowRegistry.tryAcquireWindow(); +@@ -198,7 +199,7 @@ eglu::NativeWindow *NativeWindowFactory::createWindow(const eglu::WindowParams & + Window *window = m_windowRegistry.tryAcquireWindow(); - if (!window) -- throw ResourceError("Native window is not available", DE_NULL, __FILE__, __LINE__); -+ throw NotSupportedError("Native window is not available", DE_NULL, __FILE__, __LINE__); + if (!window) +- throw ResourceError("Native window is not available", DE_NULL, __FILE__, __LINE__); ++ throw NotSupportedError("Native window is not available", DE_NULL, __FILE__, __LINE__); - return new NativeWindow(window, params.width, params.height, format); + return new NativeWindow(window, params.width, params.height, format); } -@@ -292,6 +293,9 @@ static size_t getTotalSystemMemory (ANativeActivity* activity) +@@ -319,6 +320,9 @@ static size_t getTotalSystemMemory(ANativeActivity *activity) - try - { -+ if (!activity) -+ throw tcu::InternalError("No activity (running from command line?"); + try + { ++ if (!activity) ++ throw tcu::InternalError("No activity (running from command line?"); + - const size_t totalMemory = getTotalAndroidSystemMemory(activity); - print("Device has %.2f MiB of system memory\n", static_cast(totalMemory) / static_cast(MiB)); - return totalMemory; -@@ -388,3 +392,9 @@ bool Platform::hasDisplay (vk::wsi::Type wsiType) const + const size_t totalMemory = getTotalAndroidSystemMemory(activity); + print("Device has %.2f MiB of system memory\n", static_cast(totalMemory) / static_cast(MiB)); + return totalMemory; +@@ -416,3 +420,9 @@ bool Platform::hasDisplay(vk::wsi::Type wsiType) const - } // Android - } // tcu + } // namespace Android + } // namespace tcu + +tcu::Platform* createPlatform (void) +{ -+ tcu::Android::NativeActivity activity(NULL); -+ return new tcu::Android::Platform(activity); ++ tcu::Android::NativeActivity activity(NULL); ++ return new tcu::Android::Platform(activity); +} -- -2.42.0 +2.45.2 diff --git a/mesalib/.gitlab-ci/container/patches/build-deqp-vk_Allow-running-on-Android-from-the-command-line.patch b/mesalib/.gitlab-ci/container/patches/build-deqp-vk_Allow-running-on-Android-from-the-command-line.patch deleted file mode 100644 index dda871c00f..0000000000 --- a/mesalib/.gitlab-ci/container/patches/build-deqp-vk_Allow-running-on-Android-from-the-command-line.patch +++ /dev/null @@ -1,173 +0,0 @@ -From dc97ee83a813f6b170079ddf2a04bbb06221a5a7 Mon Sep 17 00:00:00 2001 -From: Tomeu Vizoso -Date: Fri, 26 Aug 2022 18:24:27 +0200 -Subject: [PATCH 1/2] Allow running on Android from the command line - -For testing the Android EGL platform without having to go via the -Android activity manager, build deqp-egl. - -Tests that render to native windows are unsupported, as command line -programs cannot create windows on Android. - -$ cmake -S . -B build/ -DDEQP_TARGET=android -DDEQP_TARGET_TOOLCHAIN=ndk-modern -DCMAKE_C_FLAGS=-Werror -DCMAKE_CXX_FLAGS=-Werror -DANDROID_NDK_PATH=./android-ndk-r21d -DANDROID_ABI=x86_64 -DDE_ANDROID_API=28 -DGLCTS_GTF_TARGET=gles32 -G Ninja -$ ninja -C build modules/egl/deqp-egl - -Signed-off-by: Tomeu Vizoso ---- - CMakeLists.txt | 36 ++----------------- - .../android/tcuAndroidNativeActivity.cpp | 36 ++++++++++--------- - .../platform/android/tcuAndroidPlatform.cpp | 12 ++++++- - 3 files changed, 33 insertions(+), 51 deletions(-) - -diff --git a/CMakeLists.txt b/CMakeLists.txt -index f9c61d0db..d6ad2990b 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -272,7 +272,7 @@ include_directories( - external/vulkancts/framework/vulkan - ) - --if (DE_OS_IS_ANDROID OR DE_OS_IS_IOS) -+if (DE_OS_IS_IOS) - # On Android deqp modules are compiled as libraries and linked into final .so - set(DEQP_MODULE_LIBRARIES ) - set(DEQP_MODULE_ENTRY_POINTS ) -@@ -316,7 +316,7 @@ macro (add_deqp_module MODULE_NAME SRCS LIBS EXECLIBS ENTRY) - set(DEQP_MODULE_LIBRARIES ${DEQP_MODULE_LIBRARIES} PARENT_SCOPE) - set(DEQP_MODULE_ENTRY_POINTS ${DEQP_MODULE_ENTRY_POINTS} PARENT_SCOPE) - -- if (NOT DE_OS_IS_ANDROID AND NOT DE_OS_IS_IOS) -+ if (NOT DE_OS_IS_IOS) - # Executable target - add_executable(${MODULE_NAME} ${PROJECT_SOURCE_DIR}/framework/platform/tcuMain.cpp ${ENTRY}) - target_link_libraries(${MODULE_NAME} PUBLIC "${EXECLIBS}" "${MODULE_NAME}${MODULE_LIB_TARGET_POSTFIX}") -@@ -390,37 +390,7 @@ add_subdirectory(external/vulkancts/vkscpc ${MAYBE_EXCLUDE_FROM_ALL}) - add_subdirectory(external/openglcts ${MAYBE_EXCLUDE_FROM_ALL}) - - # Single-binary targets --if (DE_OS_IS_ANDROID) -- include_directories(executor) -- include_directories(${PROJECT_BINARY_DIR}/external/vulkancts/framework/vulkan) -- -- set(DEQP_SRCS -- framework/platform/android/tcuAndroidMain.cpp -- framework/platform/android/tcuAndroidJNI.cpp -- framework/platform/android/tcuAndroidPlatformCapabilityQueryJNI.cpp -- framework/platform/android/tcuTestLogParserJNI.cpp -- ${DEQP_MODULE_ENTRY_POINTS} -- ) -- -- set(DEQP_LIBS -- tcutil-platform -- xecore -- ${DEQP_MODULE_LIBRARIES} -- ) -- -- add_library(deqp SHARED ${DEQP_SRCS}) -- target_link_libraries(deqp ${DEQP_LIBS}) -- -- # Separate out the debug information because it's enormous -- add_custom_command(TARGET deqp POST_BUILD -- COMMAND ${CMAKE_STRIP} --only-keep-debug -o $.debug $ -- COMMAND ${CMAKE_STRIP} -g $) -- -- # Needed by OpenGL CTS that defines its own activity but depends on -- # common Android support code. -- target_include_directories(deqp PRIVATE framework/platform/android) -- --elseif (DE_OS_IS_IOS) -+if (DE_OS_IS_IOS) - # Code sign identity - set(DEQP_IOS_CODE_SIGN_IDENTITY "drawElements" CACHE STRING "Code sign identity for iOS build") - -diff --git a/framework/platform/android/tcuAndroidNativeActivity.cpp b/framework/platform/android/tcuAndroidNativeActivity.cpp -index 6f8cd8fc5..b83e30f41 100644 ---- a/framework/platform/android/tcuAndroidNativeActivity.cpp -+++ b/framework/platform/android/tcuAndroidNativeActivity.cpp -@@ -116,23 +116,25 @@ namespace Android - NativeActivity::NativeActivity (ANativeActivity* activity) - : m_activity(activity) - { -- activity->instance = (void*)this; -- activity->callbacks->onStart = onStartCallback; -- activity->callbacks->onResume = onResumeCallback; -- activity->callbacks->onSaveInstanceState = onSaveInstanceStateCallback; -- activity->callbacks->onPause = onPauseCallback; -- activity->callbacks->onStop = onStopCallback; -- activity->callbacks->onDestroy = onDestroyCallback; -- activity->callbacks->onWindowFocusChanged = onWindowFocusChangedCallback; -- activity->callbacks->onNativeWindowCreated = onNativeWindowCreatedCallback; -- activity->callbacks->onNativeWindowResized = onNativeWindowResizedCallback; -- activity->callbacks->onNativeWindowRedrawNeeded = onNativeWindowRedrawNeededCallback; -- activity->callbacks->onNativeWindowDestroyed = onNativeWindowDestroyedCallback; -- activity->callbacks->onInputQueueCreated = onInputQueueCreatedCallback; -- activity->callbacks->onInputQueueDestroyed = onInputQueueDestroyedCallback; -- activity->callbacks->onContentRectChanged = onContentRectChangedCallback; -- activity->callbacks->onConfigurationChanged = onConfigurationChangedCallback; -- activity->callbacks->onLowMemory = onLowMemoryCallback; -+ if (activity) { -+ activity->instance = (void*)this; -+ activity->callbacks->onStart = onStartCallback; -+ activity->callbacks->onResume = onResumeCallback; -+ activity->callbacks->onSaveInstanceState = onSaveInstanceStateCallback; -+ activity->callbacks->onPause = onPauseCallback; -+ activity->callbacks->onStop = onStopCallback; -+ activity->callbacks->onDestroy = onDestroyCallback; -+ activity->callbacks->onWindowFocusChanged = onWindowFocusChangedCallback; -+ activity->callbacks->onNativeWindowCreated = onNativeWindowCreatedCallback; -+ activity->callbacks->onNativeWindowResized = onNativeWindowResizedCallback; -+ activity->callbacks->onNativeWindowRedrawNeeded = onNativeWindowRedrawNeededCallback; -+ activity->callbacks->onNativeWindowDestroyed = onNativeWindowDestroyedCallback; -+ activity->callbacks->onInputQueueCreated = onInputQueueCreatedCallback; -+ activity->callbacks->onInputQueueDestroyed = onInputQueueDestroyedCallback; -+ activity->callbacks->onContentRectChanged = onContentRectChangedCallback; -+ activity->callbacks->onConfigurationChanged = onConfigurationChangedCallback; -+ activity->callbacks->onLowMemory = onLowMemoryCallback; -+ } - } - - NativeActivity::~NativeActivity (void) -diff --git a/framework/platform/android/tcuAndroidPlatform.cpp b/framework/platform/android/tcuAndroidPlatform.cpp -index b8a35898c..cf02e6b70 100644 ---- a/framework/platform/android/tcuAndroidPlatform.cpp -+++ b/framework/platform/android/tcuAndroidPlatform.cpp -@@ -22,6 +22,7 @@ - *//*--------------------------------------------------------------------*/ - - #include "tcuAndroidPlatform.hpp" -+#include "tcuAndroidNativeActivity.hpp" - #include "tcuAndroidUtil.hpp" - #include "gluRenderContext.hpp" - #include "egluNativeDisplay.hpp" -@@ -170,7 +171,7 @@ eglu::NativeWindow* NativeWindowFactory::createWindow (const eglu::WindowParams& - Window* window = m_windowRegistry.tryAcquireWindow(); - - if (!window) -- throw ResourceError("Native window is not available", DE_NULL, __FILE__, __LINE__); -+ throw NotSupportedError("Native window is not available", DE_NULL, __FILE__, __LINE__); - - return new NativeWindow(window, params.width, params.height, format); - } -@@ -292,6 +293,9 @@ static size_t getTotalSystemMemory (ANativeActivity* activity) - - try - { -+ if (!activity) -+ throw tcu::InternalError("No activity (running from command line?"); -+ - const size_t totalMemory = getTotalAndroidSystemMemory(activity); - print("Device has %.2f MiB of system memory\n", static_cast(totalMemory) / static_cast(MiB)); - return totalMemory; -@@ -388,3 +392,9 @@ bool Platform::hasDisplay (vk::wsi::Type wsiType) const - - } // Android - } // tcu -+ -+tcu::Platform* createPlatform (void) -+{ -+ tcu::Android::NativeActivity activity(NULL); -+ return new tcu::Android::Platform(activity); -+} --- -2.42.0 - diff --git a/mesalib/.gitlab-ci/container/patches/build-deqp-vk_Android-prints-to-stdout-instead-of-logcat.patch b/mesalib/.gitlab-ci/container/patches/build-deqp-vk_Android-prints-to-stdout-instead-of-logcat.patch deleted file mode 100644 index 3c0b72c430..0000000000 --- a/mesalib/.gitlab-ci/container/patches/build-deqp-vk_Android-prints-to-stdout-instead-of-logcat.patch +++ /dev/null @@ -1,26 +0,0 @@ -From a602822c53e22e985f942f843ccadbfb64613212 Mon Sep 17 00:00:00 2001 -From: Helen Koike -Date: Tue, 27 Sep 2022 12:35:22 -0300 -Subject: [PATCH 2/2] Android prints to stdout instead of logcat - -Signed-off-by: Helen Koike ---- - framework/qphelper/qpDebugOut.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/framework/qphelper/qpDebugOut.c b/framework/qphelper/qpDebugOut.c -index 6579e9f48..c200c6f6b 100644 ---- a/framework/qphelper/qpDebugOut.c -+++ b/framework/qphelper/qpDebugOut.c -@@ -98,7 +98,7 @@ void qpDiev (const char* format, va_list args) - } - - /* print() implementation. */ --#if (DE_OS == DE_OS_ANDROID) -+#if (0) - - #include - --- -2.42.0 - diff --git a/mesalib/.gitlab-ci/container/setup-rootfs.sh b/mesalib/.gitlab-ci/container/setup-rootfs.sh index 596f50c467..9dedee05f7 100644 --- a/mesalib/.gitlab-ci/container/setup-rootfs.sh +++ b/mesalib/.gitlab-ci/container/setup-rootfs.sh @@ -5,6 +5,8 @@ # KERNEL_ROOTFS_TAG set -ex +. setup-test-env.sh + export DEBIAN_FRONTEND=noninteractive # Needed for ci-fairy, this revision is able to upload files to diff --git a/mesalib/.gitlab-ci/container/setup-wine.sh b/mesalib/.gitlab-ci/container/setup-wine.sh index 1c8158ad8d..a943fb07b8 100644 --- a/mesalib/.gitlab-ci/container/setup-wine.sh +++ b/mesalib/.gitlab-ci/container/setup-wine.sh @@ -1,5 +1,9 @@ #!/usr/bin/env bash +set -u + +uncollapsed_section_start wine "Setting up Wine" + export WINEPREFIX="$1" export WINEDEBUG="-all" @@ -22,3 +26,5 @@ rm crashdialog.reg # it a bit more of time for it to be created solves the problem # ... while ! test -f "${WINEPREFIX}/system.reg"; do sleep 1; done + +section_end wine diff --git a/mesalib/.gitlab-ci/container/strip-rootfs.sh b/mesalib/.gitlab-ci/container/strip-rootfs.sh index cb6bec9d85..47bc918185 100644 --- a/mesalib/.gitlab-ci/container/strip-rootfs.sh +++ b/mesalib/.gitlab-ci/container/strip-rootfs.sh @@ -5,6 +5,8 @@ # KERNEL_ROOTFS_TAG set -ex +section_start strip-rootfs "Stripping rootfs" + export DEBIAN_FRONTEND=noninteractive UNNEEDED_PACKAGES=( @@ -26,7 +28,7 @@ apt-get autoremove --yes || true UNNEEDED_PACKAGES=( apt libapt-pkg6.0 ncurses-bin ncurses-base libncursesw6 libncurses6 - perl-base + perl-base libperl5.36 perl-modules-5.36 debconf libdebconfclient0 e2fsprogs e2fslibs libfdisk1 insserv @@ -40,14 +42,8 @@ UNNEEDED_PACKAGES=( hostname adduser debian-archive-keyring - libegl1-mesa-dev # mesa group - libegl-mesa0 - libgl1-mesa-dev - libgl1-mesa-dri - libglapi-mesa - libgles2-mesa-dev - libglx-mesa0 - mesa-common-dev + libgl1-mesa-dri mesa-vulkan-drivers mesa-va-drivers mesa-vdpau-drivers i965-va-driver + intel-media-va-driver gnupg2 software-properties-common ) @@ -91,6 +87,7 @@ directories=( /var/lib/usbutils/usb.ids /root/.pip # pip cache /root/.cache + /root/.cargo /etc/apt # configuration archives of apt and dpkg /etc/dpkg /var/* # drop non-ostree directories @@ -114,6 +111,14 @@ directories=( /usr/lib/*/libdb-5.3.so # libdb-5.3.so that is only used by this pam module ^ /usr/lib/*/libnss_hesiod* # remove NSS support for nis, nisplus and hesiod /usr/lib/*/libnss_nis* + /usr/lib/*/wine # don't need Wine's implementation, using Proton instead + /usr/local/bin/mold + /usr/local/bin/bindgen + /usr/local/bin/cargo* + /usr/local/bin/clippy* + /usr/local/bin/rust* + /usr/local/bin/rls + /usr/lib/*/dri ) for directory in "${directories[@]}"; do @@ -131,3 +136,26 @@ files=( for files in "${files[@]}"; do find /usr /etc -name "$files" -prune -exec rm -r {} \; done + +# We purge apt and dpkg to save on space, which is great for runtime and +# bandwidth use etc, but less great for cbuild which wants to run apt-get clean +# when we're done. Install a stub which works for that and is apologetic for +# anyone else. +cat >/usr/bin/apt-get <&2 + exit 1 +fi + +mkdir -p subprojects/llvm + +cat << EOF > subprojects/llvm/meson.build +project('llvm', ['cpp']) + +cpp = meson.get_compiler('cpp') + +_deps = [] +_search = join_paths('$LLVM_INSTALL_PREFIX', 'lib') + +foreach d: ['libLLVMAggressiveInstCombine', 'libLLVMAnalysis', 'libLLVMAsmParser', 'libLLVMAsmPrinter', 'libLLVMBinaryFormat', 'libLLVMBitReader', 'libLLVMBitstreamReader', 'libLLVMBitWriter', 'libLLVMCFGuard', 'libLLVMCFIVerify', 'libLLVMCodeGen', 'libLLVMCodeGenTypes', 'libLLVMCore', 'libLLVMCoroutines', 'libLLVMCoverage', 'libLLVMDebugInfoBTF', 'libLLVMDebugInfoCodeView', 'libLLVMDebuginfod', 'libLLVMDebugInfoDWARF', 'libLLVMDebugInfoGSYM', 'libLLVMDebugInfoLogicalView', 'libLLVMDebugInfoMSF', 'libLLVMDebugInfoPDB', 'libLLVMDemangle', 'libLLVMDiff', 'libLLVMDlltoolDriver', 'libLLVMDWARFLinker', 'libLLVMDWARFLinkerClassic', 'libLLVMDWARFLinkerParallel', 'libLLVMDWP', 'libLLVMExecutionEngine', 'libLLVMExegesis', 'libLLVMExegesisX86', 'libLLVMExtensions', 'libLLVMFileCheck', 'libLLVMFrontendDriver', 'libLLVMFrontendHLSL', 'libLLVMFrontendOffloading', 'libLLVMFrontendOpenACC', 'libLLVMFrontendOpenMP', 'libLLVMFuzzerCLI', 'libLLVMFuzzMutate', 'libLLVMGlobalISel', 'libLLVMHipStdPar', 'libLLVMInstCombine', 'libLLVMInstrumentation', 'libLLVMInterfaceStub', 'libLLVMInterpreter', 'libLLVMipo', 'libLLVMIRPrinter', 'libLLVMIRReader', 'libLLVMJITLink', 'libLLVMLibDriver', 'libLLVMLineEditor', 'libLLVMLinker', 'libLLVMLTO', 'libLLVMMC', 'libLLVMMCA', 'libLLVMMCDisassembler', 'libLLVMMCJIT', 'libLLVMMCParser', 'libLLVMMIRParser', 'libLLVMObjCARCOpts', 'libLLVMObjCopy', 'libLLVMObject', 'libLLVMObjectYAML', 'libLLVMOption', 'libLLVMOrcDebugging', 'libLLVMOrcJIT', 'libLLVMOrcShared', 'libLLVMOrcTargetProcess', 'libLLVMPasses', 'libLLVMProfileData', 'libLLVMRemarks', 'libLLVMRuntimeDyld', 'libLLVMScalarOpts', 'libLLVMSelectionDAG', 'libLLVMSupport', 'libLLVMSymbolize', 'libLLVMTableGen', 'libLLVMTableGenCommon', 'libLLVMTarget', 'libLLVMTargetParser', 'libLLVMTextAPI', 'libLLVMTextAPIBinaryReader', 'libLLVMTransformUtils', 'libLLVMVectorize', 'libLLVMWindowsDriver', 'libLLVMWindowsManifest', 'libLLVMX86AsmParser', 'libLLVMX86CodeGen', 'libLLVMX86Desc', 'libLLVMX86Disassembler', 'libLLVMX86Info', 'libLLVMX86TargetMCA', 'libLLVMXRay'] + _deps += cpp.find_library(d, dirs : _search) +endforeach + +dep_llvm = declare_dependency( + include_directories : include_directories('$LLVM_INSTALL_PREFIX/include'), + dependencies : _deps, + version : '$(sed -n -e 's/^#define LLVM_VERSION_STRING "\([^"]*\)".*/\1/p' "${LLVM_INSTALL_PREFIX}/include/llvm/Config/llvm-config.h" )', +) + +has_rtti = false +irbuilder_h = files('$LLVM_INSTALL_PREFIX/include/llvm/IR/IRBuilder.h') +EOF diff --git a/mesalib/.gitlab-ci/crosvm-runner.sh b/mesalib/.gitlab-ci/crosvm-runner.sh index e345b3b99c..cb6a0ecd68 100644 --- a/mesalib/.gitlab-ci/crosvm-runner.sh +++ b/mesalib/.gitlab-ci/crosvm-runner.sh @@ -1,6 +1,17 @@ #!/usr/bin/env bash # shellcheck disable=SC2086 # we want word splitting -set -e + +set -ue + +# Instead of starting one dEQP instance per available CPU core, pour our +# concurrency at llvmpipe threads instead. This is mostly useful for VirGL and +# Venus, which serialise quite a bit at the host level. So instead of smashing +# it with a pile of concurrent jobs which don't actually parallelise very well, +# we use that concurrency for llvmpipe/lavapipe's render pipeline. +if [ -n "${PARALLELISE_VIA_LP_THREADS:-}" ]; then + export LP_NUM_THREADS="${FDO_CI_CONCURRENT:-4}" + export FDO_CI_CONCURRENT=1 +fi # If run outside of a deqp-runner invoction (e.g. piglit trace replay), then act # the same as the first thread in its threadpool. @@ -25,7 +36,7 @@ THREAD=${DEQP_RUNNER_THREAD:-0} # context data towards the guest # set_vsock_context() { - [ -n "${CI_JOB_ID}" ] || { + [ -n "${CI_JOB_ID:-}" ] || { echo "Missing or unset CI_JOB_ID env variable" >&2 exit 1 } @@ -64,13 +75,17 @@ set_vsock_context || { echo "Could not generate crosvm vsock CID" >&2; exit 1; } # Securely pass the current variables to the crosvm environment echo "Variables passed through:" -SCRIPT_DIR=$(readlink -en "${0%/*}") -${SCRIPT_DIR}/common/generate-env.sh | tee ${VM_TEMP_DIR}/crosvm-env.sh -cp ${SCRIPT_DIR}/setup-test-env.sh ${VM_TEMP_DIR}/setup-test-env.sh +SCRIPTS_DIR=$(readlink -en "${0%/*}") +${SCRIPTS_DIR}/common/generate-env.sh | tee ${VM_TEMP_DIR}/crosvm-env.sh +cp ${SCRIPTS_DIR}/setup-test-env.sh ${VM_TEMP_DIR}/setup-test-env.sh # Set the crosvm-script as the arguments of the current script -echo ". ${VM_TEMP_DIR}/setup-test-env.sh" > ${VM_TEMP_DIR}/crosvm-script.sh -echo "$@" >> ${VM_TEMP_DIR}/crosvm-script.sh +{ + echo "export SCRIPTS_DIR=${SCRIPTS_DIR}" + echo "export RESULTS_DIR=${RESULTS_DIR}" + echo ". ${VM_TEMP_DIR}/setup-test-env.sh" + echo "$@" +} > ${VM_TEMP_DIR}/crosvm-script.sh # Setup networking /usr/sbin/iptables-legacy -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE @@ -85,20 +100,25 @@ unset DISPLAY unset XDG_RUNTIME_DIR CROSVM_KERN_ARGS="quiet console=null root=my_root rw rootfstype=virtiofs ip=192.168.30.2::192.168.30.1:255.255.255.0:crosvm:eth0" -CROSVM_KERN_ARGS="${CROSVM_KERN_ARGS} init=${SCRIPT_DIR}/crosvm-init.sh -- ${VSOCK_STDOUT} ${VSOCK_STDERR} ${VM_TEMP_DIR}" +CROSVM_KERN_ARGS="${CROSVM_KERN_ARGS} init=${SCRIPTS_DIR}/crosvm-init.sh -- ${VSOCK_STDOUT} ${VSOCK_STDERR} ${VM_TEMP_DIR}" -[ "${CROSVM_GALLIUM_DRIVER}" = "llvmpipe" ] && \ +[ "${CROSVM_GALLIUM_DRIVER:-}" = "llvmpipe" ] && \ CROSVM_LIBGL_ALWAYS_SOFTWARE=true || CROSVM_LIBGL_ALWAYS_SOFTWARE=false -set +e -x +set +e + +if [ "${INSIDE_DEQP_RUNNER:-}" != "true" ] +then + set -x +fi # We aren't testing the host driver here, so we don't need to validate NIR on the host NIR_DEBUG="novalidate" \ -LIBGL_ALWAYS_SOFTWARE=${CROSVM_LIBGL_ALWAYS_SOFTWARE} \ -GALLIUM_DRIVER=${CROSVM_GALLIUM_DRIVER} \ -VK_DRIVER_FILES=$CI_PROJECT_DIR/install/share/vulkan/icd.d/${CROSVM_VK_DRIVER}_icd.x86_64.json \ +LIBGL_ALWAYS_SOFTWARE=${CROSVM_LIBGL_ALWAYS_SOFTWARE:-} \ +GALLIUM_DRIVER=${CROSVM_GALLIUM_DRIVER:-} \ +VK_DRIVER_FILES=$CI_PROJECT_DIR/install/share/vulkan/icd.d/${CROSVM_VK_DRIVER:-}_icd.x86_64.json \ crosvm --no-syslog run \ - --gpu "${CROSVM_GPU_ARGS}" --gpu-render-server "path=${VIRGL_RENDER_SERVER:-/usr/local/libexec/virgl_render_server}" \ + --gpu "${CROSVM_GPU_ARGS:-}" --gpu-render-server "path=${VIRGL_RENDER_SERVER:-/usr/local/libexec/virgl_render_server}" \ -m "${CROSVM_MEMORY:-4096}" -c "${CROSVM_CPU:-2}" --disable-sandbox \ --shared-dir /:my_root:type=fs:writeback=true:timeout=60:cache=always \ --net "host-ip=192.168.30.1,netmask=255.255.255.0,mac=AA:BB:CC:00:00:12" \ @@ -117,7 +137,7 @@ CROSVM_RET=$? # Show crosvm output on error to help with debugging [ ${CROSVM_RET} -eq 0 ] || { - set +x + { set +x; } 2>/dev/null echo "Dumping crosvm output.." >&2 cat ${VM_TEMP_DIR}/crosvm >&2 set -x diff --git a/mesalib/.gitlab-ci/cuttlefish-runner.sh b/mesalib/.gitlab-ci/cuttlefish-runner.sh index 562de581cf..3501f3ee40 100644 --- a/mesalib/.gitlab-ci/cuttlefish-runner.sh +++ b/mesalib/.gitlab-ci/cuttlefish-runner.sh @@ -1,31 +1,77 @@ #!/usr/bin/env bash # shellcheck disable=SC2086 # we want word splitting +# shellcheck disable=SC1091 # paths only become valid at runtime + +. "${SCRIPTS_DIR}/setup-test-env.sh" section_start cuttlefish_setup "cuttlefish: setup" set -xe export HOME=/cuttlefish -export PATH=$PATH:/cuttlefish/bin -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${CI_PROJECT_DIR}/install/lib/:/cuttlefish/lib64 +export PATH=/cuttlefish/bin:$PATH +export LD_LIBRARY_PATH=/cuttlefish/lib64:${CI_PROJECT_DIR}/install/lib:$LD_LIBRARY_PATH export EGL_PLATFORM=surfaceless -syslogd - -chown root.kvm /dev/kvm +# Pick up a vulkan driver +# +# TODO: the vulkan driver should probably be controlled by a variable in the +# .test-android job or in derived jobs +export VK_DRIVER_FILES=${CI_PROJECT_DIR}/install/share/vulkan/icd.d/ -/etc/init.d/cuttlefish-host-resources start +syslogd -cd /cuttlefish +chown root:kvm /dev/kvm + +pushd /cuttlefish + +# Add a function to perform some tasks when exiting the script +function my_atexit() +{ + # shellcheck disable=SC2317 + cp /cuttlefish/cuttlefish/instances/cvd-1/logs/logcat $RESULTS_DIR || true + # shellcheck disable=SC2317 + cp /cuttlefish/cuttlefish/instances/cvd-1/kernel.log $RESULTS_DIR || true + + # shellcheck disable=SC2317 + cp /cuttlefish/cuttlefish/instances/cvd-1/logs/launcher.log $RESULTS_DIR || true + + # shellcheck disable=SC2317 + /cuttlefish/bin/stop_cvd -wait_for_launcher=10 +} + +# stop cuttlefish if the script ends prematurely or is interrupted +trap 'my_atexit' EXIT +trap 'exit 2' HUP INT PIPE TERM + +ulimit -S -n 32768 + +# Clean up state of previous run +rm -rf /cuttlefish/cuttlefish +rm -rf /cuttlefish/.cache +rm -rf /cuttlefish/.cuttlefish_config.json + +launch_cvd \ + -daemon \ + -verbosity=VERBOSE \ + -file_verbosity=VERBOSE \ + -use_overlay=false \ + -enable_bootanimation=false \ + -enable_minimal_mode=true \ + -guest_enforce_security=false \ + -report_anonymous_usage_stats=no \ + -gpu_mode="$ANDROID_GPU_MODE" \ + -cpus=${FDO_CI_CONCURRENT:-4} \ + -memory_mb 8192 \ + -kernel_path="$HOME/bzImage" \ + -initramfs_path="$HOME/initramfs.img" -launch_cvd --verbosity=DEBUG --report_anonymous_usage_stats=n --cpus=8 --memory_mb=8192 --gpu_mode="$ANDROID_GPU_MODE" --daemon --enable_minimal_mode=true --guest_enforce_security=false --use_overlay=false sleep 1 -cd - +popd -adb connect vsock:3:5555 -ADB="adb -s vsock:3:5555" +ADB=adb -$ADB root +$ADB wait-for-device root sleep 1 $ADB shell echo Hi from Android # shellcheck disable=SC2035 @@ -47,72 +93,94 @@ $ADB shell setenforce 0 # deqp -$ADB push /deqp/modules/egl/deqp-egl-android /data/. -$ADB push /deqp/assets/gl_cts/data/mustpass/egl/aosp_mustpass/3.2.6.x/egl-main.txt /data/ -$ADB push /deqp-runner/deqp-runner /data/. +$ADB shell mkdir -p /data/deqp +$ADB push /deqp-gles/modules/egl/deqp-egl-android /data/deqp +$ADB push /deqp-gles/assets/gl_cts/data/mustpass/egl/aosp_mustpass/3.2.6.x/egl-main.txt /data/deqp +$ADB push /deqp-vk/external/vulkancts/modules/vulkan/* /data/deqp +$ADB push /deqp-vk/mustpass/vk-main.txt.zst /data/deqp +$ADB push /deqp-tools/* /data/deqp +$ADB push /deqp-runner/deqp-runner /data/deqp # download Android Mesa from S3 -MESA_ANDROID_ARTIFACT_URL=https://${PIPELINE_ARTIFACTS_BASE}/${S3_ARTIFACT_NAME}.tar.zst -curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -o ${S3_ARTIFACT_NAME}.tar.zst ${MESA_ANDROID_ARTIFACT_URL} -tar -xvf ${S3_ARTIFACT_NAME}.tar.zst -rm "${S3_ARTIFACT_NAME}.tar.zst" & +MESA_ANDROID_ARTIFACT_URL=https://${PIPELINE_ARTIFACTS_BASE}/${S3_ANDROID_ARTIFACT_NAME}.tar.zst +curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -o ${S3_ANDROID_ARTIFACT_NAME}.tar.zst ${MESA_ANDROID_ARTIFACT_URL} +mkdir /mesa-android +tar -C /mesa-android -xvf ${S3_ANDROID_ARTIFACT_NAME}.tar.zst +rm "${S3_ANDROID_ARTIFACT_NAME}.tar.zst" & -$ADB push install/all-skips.txt /data/. -$ADB push install/$GPU_VERSION-flakes.txt /data/. -$ADB push install/deqp-$DEQP_SUITE.toml /data/. +$ADB push /mesa-android/install/all-skips.txt /data/deqp +$ADB push "/mesa-android/install/$GPU_VERSION-flakes.txt" /data/deqp +$ADB push "/mesa-android/install/deqp-$DEQP_SUITE.toml" /data/deqp # remove 32 bits libs from /vendor/lib -$ADB shell rm /vendor/lib/dri/${ANDROID_DRIVER}_dri.so -$ADB shell rm /vendor/lib/libglapi.so -$ADB shell rm /vendor/lib/egl/libGLES_mesa.so +$ADB shell rm -f /vendor/lib/libglapi.so +$ADB shell rm -f /vendor/lib/egl/libGLES_mesa.so -$ADB shell rm /vendor/lib/egl/libEGL_angle.so -$ADB shell rm /vendor/lib/egl/libEGL_emulation.so -$ADB shell rm /vendor/lib/egl/libGLESv1_CM_angle.so -$ADB shell rm /vendor/lib/egl/libGLESv1_CM_emulation.so -$ADB shell rm /vendor/lib/egl/libGLESv2_angle.so -$ADB shell rm /vendor/lib/egl/libGLESv2_emulation.so +$ADB shell rm -f /vendor/lib/egl/libEGL_angle.so +$ADB shell rm -f /vendor/lib/egl/libEGL_emulation.so +$ADB shell rm -f /vendor/lib/egl/libGLESv1_CM_angle.so +$ADB shell rm -f /vendor/lib/egl/libGLESv1_CM_emulation.so +$ADB shell rm -f /vendor/lib/egl/libGLESv2_angle.so +$ADB shell rm -f /vendor/lib/egl/libGLESv2_emulation.so -# replace on /vendor/lib64 - -$ADB push install/lib/dri/${ANDROID_DRIVER}_dri.so /vendor/lib64/dri/${ANDROID_DRIVER}_dri.so -$ADB push install/lib/libglapi.so /vendor/lib64/libglapi.so -$ADB push install/lib/libEGL.so /vendor/lib64/egl/libEGL_mesa.so - -$ADB shell rm /vendor/lib64/egl/libEGL_angle.so -$ADB shell rm /vendor/lib64/egl/libEGL_emulation.so -$ADB shell rm /vendor/lib64/egl/libGLESv1_CM_angle.so -$ADB shell rm /vendor/lib64/egl/libGLESv1_CM_emulation.so -$ADB shell rm /vendor/lib64/egl/libGLESv2_angle.so -$ADB shell rm /vendor/lib64/egl/libGLESv2_emulation.so +$ADB shell rm -f /vendor/lib/hw/vulkan.* +# replace on /vendor/lib64 -RESULTS=/data/results +$ADB push /mesa-android/install/lib/libgallium_dri.so /vendor/lib64/libgallium_dri.so +$ADB push /mesa-android/install/lib/libglapi.so /vendor/lib64/libglapi.so +$ADB push /mesa-android/install/lib/libEGL.so /vendor/lib64/egl/libEGL_mesa.so +$ADB push /mesa-android/install/lib/libGLESv1_CM.so /vendor/lib64/egl/libGLESv1_CM_mesa.so +$ADB push /mesa-android/install/lib/libGLESv2.so /vendor/lib64/egl/libGLESv2_mesa.so + +$ADB push /mesa-android/install/lib/libvulkan_lvp.so /vendor/lib64/hw/vulkan.lvp.so +$ADB push /mesa-android/install/lib/libvulkan_virtio.so /vendor/lib64/hw/vulkan.virtio.so + +$ADB shell rm -f /vendor/lib64/egl/libEGL_emulation.so +$ADB shell rm -f /vendor/lib64/egl/libGLESv1_CM_emulation.so +$ADB shell rm -f /vendor/lib64/egl/libGLESv2_emulation.so + +# Check what GLES implementation Surfaceflinger is using before copying the new mesa libraries +while [ "$($ADB shell dumpsys SurfaceFlinger | grep GLES:)" = "" ] ; do sleep 1; done +$ADB shell dumpsys SurfaceFlinger | grep GLES + +# restart Android shell, so that surfaceflinger uses the new libraries +$ADB shell stop +$ADB shell start + +# Check what GLES implementation Surfaceflinger is using after copying the new mesa libraries +while [ "$($ADB shell dumpsys SurfaceFlinger | grep GLES:)" = "" ] ; do sleep 1; done +MESA_RUNTIME_VERSION="$($ADB shell dumpsys SurfaceFlinger | grep GLES:)" +MESA_BUILD_VERSION=$(cat /mesa-android/install/VERSION) +if ! printf "%s" "$MESA_RUNTIME_VERSION" | grep "${MESA_BUILD_VERSION}$"; then + echo "Fatal: Android is loading a wrong version of the Mesa3D libs: ${MESA_RUNTIME_VERSION}" 1>&2 + exit 1 +fi + +AOSP_RESULTS=/data/deqp/results uncollapsed_section_switch cuttlefish_test "cuttlefish: testing" set +e -$ADB shell "mkdir /data/results; cd /data; ./deqp-runner \ +$ADB shell "mkdir ${AOSP_RESULTS}; cd ${AOSP_RESULTS}/..; \ + XDG_CACHE_HOME=/data/local/tmp \ + ./deqp-runner \ suite \ - --suite /data/deqp-$DEQP_SUITE.toml \ - --output $RESULTS \ - --skips /data/all-skips.txt $DEQP_SKIPS \ - --flakes /data/$GPU_VERSION-flakes.txt \ - --testlog-to-xml /deqp/executor/testlog-to-xml \ - --fraction-start $CI_NODE_INDEX \ + --suite /data/deqp/deqp-$DEQP_SUITE.toml \ + --output $AOSP_RESULTS \ + --skips /data/deqp/all-skips.txt $DEQP_SKIPS \ + --flakes /data/deqp/$GPU_VERSION-flakes.txt \ + --testlog-to-xml /data/deqp/testlog-to-xml \ + --shader-cache-dir /data/local/tmp \ + --fraction-start ${CI_NODE_INDEX:-1} \ --fraction $(( CI_NODE_TOTAL * ${DEQP_FRACTION:-1})) \ - --jobs ${FDO_CI_CONCURRENT:-4} \ - $DEQP_RUNNER_OPTIONS" + --jobs ${FDO_CI_CONCURRENT:-4}" EXIT_CODE=$? set -e section_switch cuttlefish_results "cuttlefish: gathering the results" -$ADB pull $RESULTS results - -cp /cuttlefish/cuttlefish/instances/cvd-1/logs/logcat results -cp /cuttlefish/cuttlefish/instances/cvd-1/kernel.log results -cp /cuttlefish/cuttlefish/instances/cvd-1/logs/launcher.log results +$ADB pull $AOSP_RESULTS $RESULTS_DIR section_end cuttlefish_results exit $EXIT_CODE diff --git a/mesalib/.gitlab-ci/deqp-runner.sh b/mesalib/.gitlab-ci/deqp-runner.sh index 7f680f5cb3..5f4dc53fd1 100644 --- a/mesalib/.gitlab-ci/deqp-runner.sh +++ b/mesalib/.gitlab-ci/deqp-runner.sh @@ -1,5 +1,8 @@ #!/usr/bin/env bash # shellcheck disable=SC2086 # we want word splitting +# shellcheck disable=SC1091 # paths only become valid at runtime + +. "${SCRIPTS_DIR}/setup-test-env.sh" section_start test_setup "deqp: preparing test setup" @@ -26,9 +29,6 @@ if [ -n "$USE_ANGLE" ]; then export LD_LIBRARY_PATH=/angle:$LD_LIBRARY_PATH fi -RESULTS="$PWD/${DEQP_RESULTS_DIR:-results}" -mkdir -p "$RESULTS" - # Ensure Mesa Shader Cache resides on tmpfs. SHADER_CACHE_HOME=${XDG_CACHE_HOME:-${HOME}/.cache} SHADER_CACHE_DIR=${MESA_SHADER_CACHE_DIR:-${SHADER_CACHE_HOME}/mesa_shader_cache} @@ -38,64 +38,9 @@ findmnt -n tmpfs ${SHADER_CACHE_HOME} || findmnt -n tmpfs ${SHADER_CACHE_DIR} || mount -t tmpfs -o nosuid,nodev,size=2G,mode=1755 tmpfs ${SHADER_CACHE_DIR} } -if [ -z "$DEQP_SUITE" ]; then - if [ -z "$DEQP_VER" ]; then - echo 'DEQP_SUITE must be set to the name of your deqp-gpu_version.toml, or DEQP_VER must be set to something like "gles2", "gles31-khr" or "vk" for the test run' - exit 1 - fi - - DEQP_WIDTH=${DEQP_WIDTH:-256} - DEQP_HEIGHT=${DEQP_HEIGHT:-256} - DEQP_CONFIG=${DEQP_CONFIG:-rgba8888d24s8ms0} - - DEQP_OPTIONS="$DEQP_OPTIONS --deqp-surface-width=$DEQP_WIDTH --deqp-surface-height=$DEQP_HEIGHT" - DEQP_OPTIONS="$DEQP_OPTIONS --deqp-surface-type=${DEQP_SURFACE_TYPE:-pbuffer}" - DEQP_OPTIONS="$DEQP_OPTIONS --deqp-gl-config-name=$DEQP_CONFIG" - DEQP_OPTIONS="$DEQP_OPTIONS --deqp-visibility=hidden" - - if [ "$DEQP_VER" = "vk" ] && [ -z "$VK_DRIVER" ]; then - echo 'VK_DRIVER must be to something like "radeon" or "intel" for the test run' - exit 1 - fi - - # Generate test case list file. - if [ "$DEQP_VER" = "vk" ]; then - MUSTPASS=/deqp/mustpass/vk-main.txt - DEQP=/deqp/external/vulkancts/modules/vulkan/deqp-vk - elif [ "$DEQP_VER" = "gles2" ] || [ "$DEQP_VER" = "gles3" ] || [ "$DEQP_VER" = "gles31" ] || [ "$DEQP_VER" = "egl" ]; then - MUSTPASS=/deqp/mustpass/$DEQP_VER-main.txt - DEQP=/deqp/modules/$DEQP_VER/deqp-$DEQP_VER - elif [ "$DEQP_VER" = "gles2-khr" ] || [ "$DEQP_VER" = "gles3-khr" ] || [ "$DEQP_VER" = "gles31-khr" ] || [ "$DEQP_VER" = "gles32-khr" ]; then - MUSTPASS=/deqp/mustpass/$DEQP_VER-main.txt - DEQP=/deqp/external/openglcts/modules/glcts - else - MUSTPASS=/deqp/mustpass/$DEQP_VER-main.txt - DEQP=/deqp/external/openglcts/modules/glcts - fi - - cp $MUSTPASS /tmp/case-list.txt - - # If the caselist is too long to run in a reasonable amount of time, let the job - # specify what fraction (1/n) of the caselist we should run. Note: N~M is a gnu - # sed extension to match every nth line (first line is #1). - if [ -n "$DEQP_FRACTION" ]; then - sed -ni 1~$DEQP_FRACTION"p" /tmp/case-list.txt - fi - - # If the job is parallel at the gitab job level, take the corresponding fraction - # of the caselist. - if [ -n "$CI_NODE_INDEX" ]; then - sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt - fi - - if [ ! -s /tmp/case-list.txt ]; then - echo "Caselist generation failed" - exit 1 - fi -fi - +BASELINE="" if [ -e "$INSTALL/$GPU_VERSION-fails.txt" ]; then - DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --baseline $INSTALL/$GPU_VERSION-fails.txt" + BASELINE="--baseline $INSTALL/$GPU_VERSION-fails.txt" fi # Default to an empty known flakes file if it doesn't exist. @@ -118,6 +63,10 @@ if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$GPU_VERSION-skips.txt" fi +if [ -e "$INSTALL/$GPU_VERSION-slow-skips.txt" ] && [[ $CI_JOB_NAME != *full* ]]; then + DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$GPU_VERSION-slow-skips.txt" +fi + if [ "$PIGLIT_PLATFORM" != "gbm" ] ; then DEQP_SKIPS="$DEQP_SKIPS $INSTALL/x11-skips.txt" fi @@ -126,9 +75,8 @@ if [ "$PIGLIT_PLATFORM" = "gbm" ]; then DEQP_SKIPS="$DEQP_SKIPS $INSTALL/gbm-skips.txt" fi -if [ -n "$VK_DRIVER" ] && [ -z "$DEQP_SUITE" ]; then - # Bump the number of tests per group to reduce the startup time of VKCTS. - DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --tests-per-group ${DEQP_RUNNER_TESTS_PER_GROUP:-5000}" +if [ -n "$USE_ANGLE" ]; then + DEQP_SKIPS="$DEQP_SKIPS $INSTALL/angle-skips.txt" fi # Set the path to VK validation layer settings (in case it ends up getting loaded) @@ -152,89 +100,68 @@ if [ "$GALLIUM_DRIVER" = "virpipe" ]; then fi GALLIUM_DRIVER=llvmpipe \ - virgl_test_server $VTEST_ARGS >$RESULTS/vtest-log.txt 2>&1 & + virgl_test_server $VTEST_ARGS >$RESULTS_DIR/vtest-log.txt 2>&1 & sleep 1 fi -if [ -z "$DEQP_SUITE" ]; then - if [ -n "$DEQP_EXPECTED_RENDERER" ]; then - export DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --renderer-check $DEQP_EXPECTED_RENDERER" - fi - if [ $DEQP_VER != vk ] && [ $DEQP_VER != egl ]; then - VER=$(sed 's/[() ]/./g' "$INSTALL/VERSION") - export DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --version-check $VER" - fi -fi - uncollapsed_section_switch deqp "deqp: deqp-runner" # Print the detailed version with the list of backports and local patches -for api in vk gl gles; do - deqp_version_log=/deqp/version-$api +{ set +x; } 2>/dev/null +for api in vk-main vk gl gles; do + deqp_version_log=/deqp-$api/deqp-$api-version if [ -r "$deqp_version_log" ]; then cat "$deqp_version_log" fi done +set -x +# If you change the format of the suite toml filenames or the +# $GPU_VERSION-{fails,flakes,skips}.txt filenames, look through the rest +# of the tree for other places that need to be kept in sync (e.g. +# src/**/ci/gitlab-ci*.yml) set +e -if [ -z "$DEQP_SUITE" ]; then - deqp-runner \ - run \ - --deqp $DEQP \ - --output $RESULTS \ - --caselist /tmp/case-list.txt \ - --skips $INSTALL/all-skips.txt $DEQP_SKIPS \ - --flakes $INSTALL/$GPU_VERSION-flakes.txt \ - --testlog-to-xml /deqp/executor/testlog-to-xml \ - --jobs ${FDO_CI_CONCURRENT:-4} \ - $DEQP_RUNNER_OPTIONS \ - -- \ - $DEQP_OPTIONS -else - # If you change the format of the suite toml filenames or the - # $GPU_VERSION-{fails,flakes,skips}.txt filenames, look through the rest - # of the tree for other places that need to be kept in sync (e.g. - # src/**/ci/gitlab-ci*.yml) - deqp-runner \ - suite \ - --suite $INSTALL/deqp-$DEQP_SUITE.toml \ - --output $RESULTS \ - --skips $INSTALL/all-skips.txt $DEQP_SKIPS \ - --flakes $INSTALL/$GPU_VERSION-flakes.txt \ - --testlog-to-xml /deqp/executor/testlog-to-xml \ - --fraction-start $CI_NODE_INDEX \ - --fraction $((CI_NODE_TOTAL * ${DEQP_FRACTION:-1})) \ - --jobs ${FDO_CI_CONCURRENT:-4} \ - $DEQP_RUNNER_OPTIONS -fi +deqp-runner -V +deqp-runner \ + suite \ + --suite $INSTALL/deqp-$DEQP_SUITE.toml \ + --output $RESULTS_DIR \ + --skips $INSTALL/all-skips.txt $DEQP_SKIPS \ + --flakes $INSTALL/$GPU_VERSION-flakes.txt \ + --testlog-to-xml /deqp-tools/testlog-to-xml \ + --fraction-start ${CI_NODE_INDEX:-1} \ + --fraction $((CI_NODE_TOTAL * ${DEQP_FRACTION:-1})) \ + --jobs ${FDO_CI_CONCURRENT:-4} \ + $BASELINE \ + ${DEQP_RUNNER_MAX_FAILS:+--max-fails "$DEQP_RUNNER_MAX_FAILS"} \ + ${DEQP_FORCE_ASAN:+--env LD_PRELOAD=libasan.so.8:/install/lib/libdlclose-skip.so}; DEQP_EXITCODE=$? + +{ set +x; } 2>/dev/null -DEQP_EXITCODE=$? set -e -set +x - -report_load - section_switch test_post_process "deqp: post-processing test results" set -x +report_load + # Remove all but the first 50 individual XML files uploaded as artifacts, to # save fd.o space when you break everything. -find $RESULTS -name \*.xml | \ +find $RESULTS_DIR -name \*.xml | \ sort -n | sed -n '1,+49!p' | \ xargs rm -f # If any QPA XMLs are there, then include the XSL/CSS in our artifacts. -find $RESULTS -name \*.xml \ - -exec cp /deqp/testlog.css /deqp/testlog.xsl "$RESULTS/" ";" \ +find $RESULTS_DIR -name \*.xml \ + -exec cp /deqp-tools/testlog.css /deqp-tools/testlog.xsl "$RESULTS_DIR/" ";" \ -quit deqp-runner junit \ --testsuite dEQP \ - --results $RESULTS/failures.csv \ - --output $RESULTS/junit.xml \ + --results $RESULTS_DIR/failures.csv \ + --output $RESULTS_DIR/junit.xml \ --limit 50 \ --template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml" @@ -243,7 +170,7 @@ if [ -n "$FLAKES_CHANNEL" ]; then python3 $INSTALL/report-flakes.py \ --host irc.oftc.net \ --port 6667 \ - --results $RESULTS/results.csv \ + --results $RESULTS_DIR/results.csv \ --known-flakes $INSTALL/$GPU_VERSION-flakes.txt \ --channel "$FLAKES_CHANNEL" \ --runner "$CI_RUNNER_DESCRIPTION" \ @@ -256,8 +183,9 @@ fi # Compress results.csv to save on bandwidth during the upload of artifacts to # GitLab. This reduces the size in a VKCTS run from 135 to 7.6MB, and takes # 0.17s on a Ryzen 5950X (16 threads, 0.95s when limited to 1 thread). -zstd --rm -T0 -8q "$RESULTS/results.csv" -o "$RESULTS/results.csv.zst" +zstd --quiet --rm --threads ${FDO_CI_CONCURRENT:-0} -8 "$RESULTS_DIR/results.csv" -o "$RESULTS_DIR/results.csv.zst" +set +x section_end test_post_process exit $DEQP_EXITCODE diff --git a/mesalib/.gitlab-ci/docs/bare-metal.rst b/mesalib/.gitlab-ci/docs/bare-metal.rst index b9d5f654f7..772f5f4c98 100644 --- a/mesalib/.gitlab-ci/docs/bare-metal.rst +++ b/mesalib/.gitlab-ci/docs/bare-metal.rst @@ -53,7 +53,7 @@ of needing more storage on the runner. Telling the board about where its TFTP and NFS should come from is done using dnsmasq on the runner host. For example, this snippet in -the dnsmasq.conf.d in the google farm, with the gitlab-runner host we +the dnsmasq.conf.d in the Google farm, with the gitlab-runner host we call "servo":: dhcp-host=1c:69:7a:0d:a3:d3,10.42.0.10,set:servo @@ -123,7 +123,7 @@ With that set up, you should be able to power on/off a port with something like: Note that the "1.3.6..." SNMP OID changes between switches. The last digit above is the interface id (port number). You can probably find the right OID by -google, that was easier than figuring it out from finding the switch's MIB +Google, that was easier than figuring it out from finding the switch's MIB database. You can query the POE status from the switch serial using the ``show power inline`` command. diff --git a/mesalib/.gitlab-ci/docs/index.rst b/mesalib/.gitlab-ci/docs/index.rst index d173d74a3c..484672b00d 100644 --- a/mesalib/.gitlab-ci/docs/index.rst +++ b/mesalib/.gitlab-ci/docs/index.rst @@ -302,8 +302,8 @@ and cancel the rest to avoid wasting resources. See ``bin/ci/ci_run_n_monitor.py --help`` for all the options. The ``--target`` argument takes a regex that you can use to select the -jobs names you want to run, eg. ``--target 'zink.*'`` will run all the -zink jobs, leaving the other drivers' jobs free for others to use. +jobs names you want to run, e.g. ``--target 'zink.*'`` will run all the +Zink jobs, leaving the other drivers' jobs free for others to use. Note that in fork pipelines, GitLab only adds the jobs for the files that have changed **since the last push**, so you might not get the jobs you expect. diff --git a/mesalib/.gitlab-ci/docs/local-traces.rst b/mesalib/.gitlab-ci/docs/local-traces.rst index a834c8ac4f..28af27d982 100644 --- a/mesalib/.gitlab-ci/docs/local-traces.rst +++ b/mesalib/.gitlab-ci/docs/local-traces.rst @@ -32,7 +32,7 @@ Simulating CI trace job Sometimes it's useful to be able to test traces on your local machine instead of the Mesa CI runner. To simulate the CI environment as closely as possible. -Download the YAML file from your driver's ``ci/`` directory and then change the path in the YAML file from local proxy or MinIO to the local directory (url-like format ``file://``) +Download the YAML file from your driver's ``ci/`` directory and then change the path in the YAML file from local proxy or MinIO to the local directory (URL-like format ``file://``) .. code-block:: sh diff --git a/mesalib/.gitlab-ci/farm-rules.yml b/mesalib/.gitlab-ci/farm-rules.yml index eaddaa9fbd..7b4d453ea8 100644 --- a/mesalib/.gitlab-ci/farm-rules.yml +++ b/mesalib/.gitlab-ci/farm-rules.yml @@ -217,25 +217,25 @@ - !reference [.austriancoder-farm-rules, rules] -.freedreno-farm-rules: +.google-freedreno-farm-rules: rules: - - exists: [ .ci-farms-disabled/freedreno ] + - exists: [ .ci-farms-disabled/google-freedreno ] when: never - - changes: [ .ci-farms-disabled/freedreno ] + - changes: [ .ci-farms-disabled/google-freedreno ] if: '$CI_PIPELINE_SOURCE != "schedule"' when: on_success - changes: [ .ci-farms-disabled/* ] if: '$CI_PIPELINE_SOURCE != "schedule"' when: never -.freedreno-farm-manual-rules: +.google-freedreno-farm-manual-rules: rules: - - exists: [ .ci-farms-disabled/freedreno ] + - exists: [ .ci-farms-disabled/google-freedreno ] when: never - - changes: [ .ci-farms-disabled/freedreno ] + - changes: [ .ci-farms-disabled/google-freedreno ] if: '$CI_PIPELINE_SOURCE != "schedule"' when: never - - !reference [.freedreno-farm-rules, rules] + - !reference [.google-freedreno-farm-rules, rules] .vmware-farm-rules: rules: @@ -323,8 +323,8 @@ exists: [ .ci-farms-disabled/austriancoder ] when: never - if: '$CI_PIPELINE_SOURCE == "merge_request_event"' - changes: [ .ci-farms-disabled/freedreno ] - exists: [ .ci-farms-disabled/freedreno ] + changes: [ .ci-farms-disabled/google-freedreno ] + exists: [ .ci-farms-disabled/google-freedreno ] when: never - if: '$CI_PIPELINE_SOURCE == "merge_request_event"' changes: [ .ci-farms-disabled/ondracka ] diff --git a/mesalib/.gitlab-ci/firmware/arm/mali/arch10.8/mali-fw.json b/mesalib/.gitlab-ci/firmware/arm/mali/arch10.8/mali-fw.json new file mode 100644 index 0000000000..cad011c8b5 --- /dev/null +++ b/mesalib/.gitlab-ci/firmware/arm/mali/arch10.8/mali-fw.json @@ -0,0 +1,8 @@ +{ + "src": "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/arm/mali/arch10.8/", + "git_hash": "ad8d5f76c429e5485764a9ecb7a2ce3fbc1386ae", + "files": [ + "mali_csffw.bin" + ], + "dst": "/lib/firmware/arm/mali/arch10.8/" +} diff --git a/mesalib/.gitlab-ci/gtest-runner.sh b/mesalib/.gitlab-ci/gtest-runner.sh index c4ae1e741d..b55e581a34 100644 --- a/mesalib/.gitlab-ci/gtest-runner.sh +++ b/mesalib/.gitlab-ci/gtest-runner.sh @@ -1,5 +1,8 @@ #!/usr/bin/env bash # shellcheck disable=SC2086 # we want word splitting +# shellcheck disable=SC1091 # paths only become valid at runtime + +. "${SCRIPTS_DIR}/setup-test-env.sh" set -ex @@ -8,9 +11,6 @@ INSTALL=$PWD/install # Set up the driver environment. export LD_LIBRARY_PATH=$INSTALL/lib/ -RESULTS="$PWD/${GTEST_RESULTS_DIR:-results}" -mkdir -p "$RESULTS" - export LIBVA_DRIVERS_PATH=$INSTALL/lib/dri/ # libva spams driver open info by default, and that happens per testcase. export LIBVA_MESSAGING_LEVEL=1 @@ -39,7 +39,7 @@ set +e gtest-runner \ run \ --gtest $GTEST \ - --output ${RESULTS} \ + --output ${RESULTS_DIR} \ --jobs ${FDO_CI_CONCURRENT:-4} \ $GTEST_SKIPS \ --flakes $INSTALL/$GPU_VERSION-flakes.txt \ @@ -52,8 +52,8 @@ GTEST_EXITCODE=$? deqp-runner junit \ --testsuite gtest \ - --results $RESULTS/failures.csv \ - --output $RESULTS/junit.xml \ + --results $RESULTS_DIR/failures.csv \ + --output $RESULTS_DIR/junit.xml \ --limit 50 \ --template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml" @@ -62,7 +62,7 @@ if [ -n "$FLAKES_CHANNEL" ]; then python3 $INSTALL/report-flakes.py \ --host irc.oftc.net \ --port 6667 \ - --results $RESULTS/results.csv \ + --results $RESULTS_DIR/results.csv \ --known-flakes $INSTALL/$GPU_VERSION-flakes.txt \ --channel "$FLAKES_CHANNEL" \ --runner "$CI_RUNNER_DESCRIPTION" \ diff --git a/mesalib/.gitlab-ci/image-tags.yml b/mesalib/.gitlab-ci/image-tags.yml index 018c01cdff..46a88daed8 100644 --- a/mesalib/.gitlab-ci/image-tags.yml +++ b/mesalib/.gitlab-ci/image-tags.yml @@ -13,34 +13,41 @@ variables: DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base" - DEBIAN_BASE_TAG: "20240509-meson" + DEBIAN_BASE_TAG: "20250109-libubsan1" DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build" - DEBIAN_BUILD_TAG: "20240509-bindgen" + DEBIAN_BUILD_TAG: "20241220-android" DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base" + DEBIAN_ARM64_TEST_BASE_IMAGE: "debian/arm64_test-base" DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl" + DEBIAN_ARM64_TEST_IMAGE_GL_PATH: "debian/arm64_test-gl" DEBIAN_X86_64_TEST_IMAGE_VK_PATH: "debian/x86_64_test-vk" + DEBIAN_ARM64_TEST_IMAGE_VK_PATH: "debian/arm64_test-vk" DEBIAN_X86_64_TEST_ANDROID_IMAGE_PATH: "debian/x86_64_test-android" - DEBIAN_X86_64_TEST_ANDROID_TAG: "20240423-deqp" - DEBIAN_X86_64_TEST_GL_TAG: "20240515-piglit-8a" - DEBIAN_X86_64_TEST_VK_TAG: "20240515-piglit-8a" - KERNEL_ROOTFS_TAG: "20240516-angle" + DEBIAN_TEST_ANDROID_TAG: "20241220-venus" + DEBIAN_TEST_GL_TAG: "20250114-piglit-63" + DEBIAN_TEST_VK_TAG: "20250114-piglit-63" + KERNEL_ROOTFS_TAG: "20250116-kernel" - ALPINE_X86_64_BUILD_TAG: "20240517-m0ld" - ALPINE_X86_64_LAVA_SSH_TAG: "20240401-wlproto" - FEDORA_X86_64_BUILD_TAG: "20240509-meson" - KERNEL_TAG: "v6.6.21-mesa-f8ea" + DEBIAN_PYUTILS_IMAGE: "debian/x86_64_pyutils" + DEBIAN_PYUTILS_TAG: "20241223-pyutils" + + ALPINE_X86_64_BUILD_TAG: "20241122-sections" + ALPINE_X86_64_LAVA_SSH_TAG: "20241122-sections" + FEDORA_X86_64_BUILD_TAG: "20241122-sections" + + KERNEL_TAG: "v6.13-rc4-mesa-5e77" KERNEL_REPO: "gfx-ci/linux" - PKG_REPO_REV: "3cc12a2a" + PKG_REPO_REV: "bca9635d" WINDOWS_X64_MSVC_PATH: "windows/x86_64_msvc" - WINDOWS_X64_MSVC_TAG: "20231222-msvc" + WINDOWS_X64_MSVC_TAG: "20240827-v143" WINDOWS_X64_BUILD_PATH: "windows/x86_64_build" - WINDOWS_X64_BUILD_TAG: "20240405-vainfo-ci-1" + WINDOWS_X64_BUILD_TAG: "20241107-setup" WINDOWS_X64_TEST_PATH: "windows/x86_64_test" - WINDOWS_X64_TEST_TAG: "20240405-vainfo-ci-1" + WINDOWS_X64_TEST_TAG: "20241107-setup" diff --git a/mesalib/.gitlab-ci/lava/lava-gitlab-ci.yml b/mesalib/.gitlab-ci/lava/lava-gitlab-ci.yml index 2a91a66e62..5022f251d7 100644 --- a/mesalib/.gitlab-ci/lava/lava-gitlab-ci.yml +++ b/mesalib/.gitlab-ci/lava/lava-gitlab-ci.yml @@ -8,8 +8,8 @@ variables: variables: GIT_STRATEGY: none # testing doesn't build anything from source FDO_CI_CONCURRENT: 6 # should be replaced by per-machine definitions - # proxy used to cache data locally - FDO_HTTP_CACHE_URI: "http://caching-proxy/cache/?uri=" + # the dispatchers use this to cache data locally + LAVA_HTTP_CACHE_URI: "http://caching-proxy/cache/?uri=" # base system generated by the container build job, shared between many pipelines BASE_SYSTEM_HOST_PREFIX: "${S3_HOST}/${S3_KERNEL_BUCKET}" BASE_SYSTEM_MAINLINE_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${FDO_UPSTREAM_REPO}/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}" @@ -17,11 +17,15 @@ variables: # per-job build artifacts JOB_ROOTFS_OVERLAY_PATH: "${JOB_ARTIFACTS_BASE}/job-rootfs-overlay.tar.gz" JOB_RESULTS_PATH: "${JOB_ARTIFACTS_BASE}/results.tar.zst" - S3_ARTIFACT_NAME: "mesa-${ARCH}-default-debugoptimized" + LAVA_S3_ARTIFACT_NAME: "mesa-${ARCH}-default-debugoptimized" + S3_ARTIFACT_NAME: "mesa-python-ci-artifacts" S3_RESULTS_UPLOAD: "${JOB_ARTIFACTS_BASE}" PIGLIT_NO_WINDOW: 1 VISIBILITY_GROUP: "Collabora+fdo" + before_script: + - !reference [.download_s3, before_script] script: + - . artifacts/setup-test-env.sh - ./artifacts/lava/lava-submit.sh artifacts: name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}" @@ -35,10 +39,15 @@ variables: tags: - $RUNNER_TAG after_script: - - curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s "https://${JOB_RESULTS_PATH}" | tar --zstd -x + - curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s "https://${JOB_RESULTS_PATH}" | tar --warning=no-timestamp --zstd -x needs: - - alpine/x86_64_lava_ssh_client - !reference [.required-for-hardware-jobs, needs] + - job: alpine/x86_64_lava_ssh_client + artifacts: false + - job: debian/x86_64_pyutils + artifacts: false + - job: python-artifacts + artifacts: false .lava-test:arm32: variables: @@ -49,14 +58,15 @@ variables: BOOT_METHOD: u-boot extends: - .use-debian/arm64_build # for same $MESA_ARTIFACTS_TAG as in kernel+rootfs_arm32 - - .use-debian/x86_64_build + - .use-debian/x86_64_pyutils - .lava-test - .use-kernel+rootfs-arm needs: - !reference [.lava-test, needs] - - kernel+rootfs_arm32 - - debian/x86_64_build - - debian-arm32 + - job: kernel+rootfs_arm32 + artifacts: false + - job: debian-arm32 + artifacts: false .lava-test-deqp:arm32: extends: @@ -73,16 +83,15 @@ variables: BOOT_METHOD: u-boot extends: - .use-debian/arm64_build # for same $MESA_ARTIFACTS_TAG as in kernel+rootfs_arm64 - - .use-debian/x86_64_build + - .use-debian/x86_64_pyutils - .lava-test - .use-kernel+rootfs-arm - dependencies: - - debian-arm64 needs: - !reference [.lava-test, needs] - - kernel+rootfs_arm64 - - debian/x86_64_build - - debian-arm64 + - job: kernel+rootfs_arm64 + artifacts: false + - job: debian-arm64 + artifacts: false .lava-test-deqp:arm64: variables: @@ -99,13 +108,15 @@ variables: BOOT_METHOD: u-boot extends: - .use-debian/x86_64_build-base # for same $MESA_ARTIFACTS_BASE_TAG as in kernel+rootfs_x86_64 - - .use-debian/x86_64_build + - .use-debian/x86_64_pyutils - .lava-test - .use-kernel+rootfs-x86_64 needs: - !reference [.lava-test, needs] - - kernel+rootfs_x86_64 - - debian-testing + - job: kernel+rootfs_x86_64 + artifacts: false + - job: debian-testing + artifacts: false .lava-test-deqp:x86_64: variables: @@ -145,13 +156,3 @@ variables: - .lava-test:arm64 - .lava-piglit - .lava-traces-base - -.lava-piglit:x86_64: - extends: - - .lava-test:x86_64 - - .lava-piglit - -.lava-piglit:arm64: - extends: - - .lava-test:arm64 - - .lava-piglit diff --git a/mesalib/.gitlab-ci/lava/lava-pytest.sh b/mesalib/.gitlab-ci/lava/lava-pytest.sh deleted file mode 100644 index 786a669b91..0000000000 --- a/mesalib/.gitlab-ci/lava/lava-pytest.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -# SPDX-License-Identifier: MIT -# © Collabora Limited -# Author: Guilherme Gallo - -# This script runs unit/integration tests related with LAVA CI tools -# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime. - -set -ex - -# Use this script in a python virtualenv for isolation -python3 -m venv .venv -. .venv/bin/activate -python3 -m pip install --break-system-packages -r "${CI_PROJECT_DIR}/.gitlab-ci/lava/requirements-test.txt" - -TEST_DIR=${CI_PROJECT_DIR}/.gitlab-ci/tests - -PYTHONPATH="${TEST_DIR}:${PYTHONPATH}" python3 -m \ - pytest "${TEST_DIR}" \ - -W ignore::DeprecationWarning \ - --junitxml=artifacts/ci_scripts_report.xml \ - -m 'not slow' diff --git a/mesalib/.gitlab-ci/lava/lava-submit.sh b/mesalib/.gitlab-ci/lava/lava-submit.sh index 3531437f7d..b3a78d15a9 100644 --- a/mesalib/.gitlab-ci/lava/lava-submit.sh +++ b/mesalib/.gitlab-ci/lava/lava-submit.sh @@ -1,63 +1,101 @@ #!/usr/bin/env bash # shellcheck disable=SC2086 # we want word splitting +# shellcheck disable=SC1091 # paths only become valid at runtime + +# If we run in the fork (not from mesa or Marge-bot), reuse mainline kernel and rootfs, if exist. +_check_artifact_path() { + _url="https://${1}/${2}" + if curl -s -o /dev/null -I -L -f --retry 4 --retry-delay 15 "${_url}"; then + echo -n "${_url}" + fi +} + +get_path_to_artifact() { + _mainline_artifact="$(_check_artifact_path ${BASE_SYSTEM_MAINLINE_HOST_PATH} ${1})" + if [ -n "${_mainline_artifact}" ]; then + echo -n "${_mainline_artifact}" + return + fi + _fork_artifact="$(_check_artifact_path ${BASE_SYSTEM_FORK_HOST_PATH} ${1})" + if [ -n "${_fork_artifact}" ]; then + echo -n "${_fork_artifact}" + return + fi + set +x + error "Sorry, I couldn't find a viable built path for ${1} in either mainline or a fork." >&2 + echo "" >&2 + echo "If you're working on CI, this probably means that you're missing a dependency:" >&2 + echo "this job ran ahead of the job which was supposed to upload that artifact." >&2 + echo "" >&2 + echo "If you aren't working on CI, please ping @mesa/ci-helpers to see if we can help." >&2 + echo "" >&2 + echo "This job is going to fail, because I can't find the resources I need. Sorry." >&2 + set -x + exit 1 +} + +. "${SCRIPTS_DIR}/setup-test-env.sh" + +section_start prepare_rootfs "Preparing root filesystem" set -ex -# If we run in the fork (not from mesa or Marge-bot), reuse mainline kernel and rootfs, if exist. -BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_MAINLINE_HOST_PATH}" -if [ "$CI_PROJECT_PATH" != "$FDO_UPSTREAM_REPO" ]; then - if ! curl -s -X HEAD -L --retry 4 -f --retry-delay 60 \ - "https://${BASE_SYSTEM_MAINLINE_HOST_PATH}/done"; then - echo "Using kernel and rootfs from the fork, cached from mainline is unavailable." - BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_FORK_HOST_PATH}" - else - echo "Using the cached mainline kernel and rootfs." - fi -fi +section_switch rootfs "Assembling root filesystem" +ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)" +[ $? != 1 ] || exit 1 rm -rf results mkdir -p results/job-rootfs-overlay/ -cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/ +artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/ -cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/ -cp artifacts/ci-common/kdl.sh results/job-rootfs-overlay/ cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/ -# Prepare env vars for upload. -section_start variables "Variables passed through:" -artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh -section_end variables - tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ . ci-fairy s3cp --token-file "${S3_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}" -ARTIFACT_URL="${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${S3_ARTIFACT_NAME:?}.tar.zst" +# Prepare env vars for upload. +section_switch variables "Environment variables passed through to device:" +cat results/job-rootfs-overlay/set-job-env-vars.sh + +section_switch lava_submit "Submitting job for scheduling" touch results/lava.log tail -f results/lava.log & PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \ - submit \ + --farm "${FARM}" \ + --device-type "${DEVICE_TYPE}" \ + --boot-method "${BOOT_METHOD}" \ + --job-timeout-min $((CI_JOB_TIMEOUT/60 - 5)) \ --dump-yaml \ --pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \ - --rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \ + --rootfs-url "${ROOTFS_URL}" \ --kernel-url-prefix "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}" \ - --kernel-external "${FORCE_KERNEL_TAG}" \ - --build-url "${ARTIFACT_URL}" \ - --job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \ - --job-timeout-min ${JOB_TIMEOUT:-30} \ + --kernel-external "${EXTERNAL_KERNEL_TAG}" \ --first-stage-init artifacts/ci-common/init-stage1.sh \ - --ci-project-dir "${CI_PROJECT_DIR}" \ - --device-type "${DEVICE_TYPE}" \ --dtb-filename "${DTB}" \ --jwt-file "${S3_JWT_FILE}" \ --kernel-image-name "${KERNEL_IMAGE_NAME}" \ --kernel-image-type "${KERNEL_IMAGE_TYPE}" \ - --boot-method "${BOOT_METHOD}" \ --visibility-group "${VISIBILITY_GROUP}" \ --lava-tags "${LAVA_TAGS}" \ --mesa-job-name "$CI_JOB_NAME" \ --structured-log-file "results/lava_job_detail.json" \ --ssh-client-image "${LAVA_SSH_CLIENT_IMAGE}" \ --project-name "${CI_PROJECT_NAME}" \ + --starting-section "${CURRENT_SECTION}" \ + --job-submitted-at "${CI_JOB_STARTED_AT}" \ + - append-overlay \ + --name=mesa-build \ + --url="https://${PIPELINE_ARTIFACTS_BASE}/${LAVA_S3_ARTIFACT_NAME:?}.tar.zst" \ + --compression=zstd \ + --path="${CI_PROJECT_DIR}" \ + --format=tar \ + - append-overlay \ + --name=job-overlay \ + --url="https://${JOB_ROOTFS_OVERLAY_PATH}" \ + --compression=gz \ + --path="/" \ + --format=tar \ + - submit \ >> results/lava.log diff --git a/mesalib/.gitlab-ci/lava/lava_job_submitter.py b/mesalib/.gitlab-ci/lava/lava_job_submitter.py index 41dbec6053..fe9988f29f 100644 --- a/mesalib/.gitlab-ci/lava/lava_job_submitter.py +++ b/mesalib/.gitlab-ci/lava/lava_job_submitter.py @@ -15,10 +15,10 @@ import sys import time from collections import defaultdict -from dataclasses import dataclass, fields -from datetime import datetime, timedelta, timezone -from os import environ, getenv, path -from typing import Any, Optional +from dataclasses import dataclass, field, fields +from datetime import datetime, timedelta, UTC +from os import environ, getenv +from typing import Any, Optional, Self import fire from lavacli.utils import flow_yaml as lava_yaml @@ -51,7 +51,7 @@ STRUCTURAL_LOG = defaultdict(list) try: - from ci.structured_logger import StructuredLogger + from structured_logger import StructuredLogger except ImportError as e: print_log( f"Could not import StructuredLogger library: {e}. " @@ -91,7 +91,7 @@ CI_JOB_STARTED_AT: datetime = ( datetime.fromisoformat(CI_JOB_STARTED_AT_RAW) if CI_JOB_STARTED_AT_RAW - else datetime.now(timezone.utc) + else datetime.now(tz=UTC) ) @@ -103,20 +103,22 @@ def raise_exception_from_metadata(metadata: dict, job_id: int) -> None: if "result" not in metadata or metadata["result"] != "fail": return if "error_type" in metadata: - error_type = metadata["error_type"] - if error_type == "Infrastructure": - raise MesaCIRetriableException( - f"LAVA job {job_id} failed with Infrastructure Error. Retry." - ) + error_type: str = metadata["error_type"] + error_msg: str = metadata.get("error_msg", "") + full_err_msg: str = error_type if not error_msg else f"{error_type}: {error_msg}" if error_type == "Job": # This happens when LAVA assumes that the job cannot terminate or # with mal-formed job definitions. As we are always validating the # jobs, only the former is probable to happen. E.g.: When some LAVA # action timed out more times than expected in job definition. raise MesaCIRetriableException( - f"LAVA job {job_id} failed with JobError " + f"LAVA job {job_id} failed with {full_err_msg}. Retry." "(possible LAVA timeout misconfiguration/bug). Retry." ) + if error_type: + raise MesaCIRetriableException( + f"LAVA job {job_id} failed with error type: {full_err_msg}. Retry." + ) if "case" in metadata and metadata["case"] == "validate": raise MesaCIRetriableException( f"LAVA job {job_id} failed validation (possible download error). Retry." @@ -136,36 +138,6 @@ def raise_lava_error(job) -> None: job.status = "fail" -def show_final_job_data(job, colour=f"{CONSOLE_LOG['BOLD']}{CONSOLE_LOG['FG_GREEN']}"): - with GitlabSection( - "job_data", - "LAVA job info", - type=LogSectionType.LAVA_POST_PROCESSING, - start_collapsed=True, - colour=colour, - ): - wait_post_processing_retries: int = WAIT_FOR_LAVA_POST_PROCESSING_RETRIES - while not job.is_post_processed() and wait_post_processing_retries > 0: - # Wait a little until LAVA finishes processing metadata - time.sleep(WAIT_FOR_LAVA_POST_PROCESSING_SEC) - wait_post_processing_retries -= 1 - - if not job.is_post_processed(): - waited_for_sec: int = ( - WAIT_FOR_LAVA_POST_PROCESSING_RETRIES - * WAIT_FOR_LAVA_POST_PROCESSING_SEC - ) - print_log( - f"Waited for {waited_for_sec} seconds " - "for LAVA to post-process the job, it haven't finished yet. " - "Dumping it's info anyway" - ) - - details: dict[str, str] = job.show() - for field, value in details.items(): - print(f"{field:<15}: {value}") - job.refresh_log() - def fetch_logs(job, max_idle_time, log_follower) -> None: is_job_hanging(job, max_idle_time) @@ -181,14 +153,13 @@ def fetch_logs(job, max_idle_time, log_follower) -> None: def is_job_hanging(job, max_idle_time): # Poll to check for new logs, assuming that a prolonged period of # silence means that the device has died and we should try it again - if datetime.now() - job.last_log_time > max_idle_time: + if datetime.now(tz=UTC) - job.last_log_time > max_idle_time: max_idle_time_min = max_idle_time.total_seconds() / 60 raise MesaCITimeoutError( - f"{CONSOLE_LOG['BOLD']}" - f"{CONSOLE_LOG['FG_YELLOW']}" - f"LAVA job {job.job_id} does not respond for {max_idle_time_min} " - "minutes. Retry." + f"{CONSOLE_LOG['FG_BOLD_YELLOW']}" + f"LAVA job {job.job_id} unresponsive for {max_idle_time_min} " + "minutes; retrying the job." f"{CONSOLE_LOG['RESET']}", timeout_duration=max_idle_time, ) @@ -236,14 +207,13 @@ def wait_for_job_get_started(job, attempt_no): print_log(f"Waiting for job {job.job_id} to start.") while not job.is_started(): current_job_duration_sec: int = int( - (datetime.now(timezone.utc) - CI_JOB_STARTED_AT).total_seconds() + (datetime.now(tz=UTC) - CI_JOB_STARTED_AT).total_seconds() ) remaining_time_sec: int = max(0, CI_JOB_TIMEOUT_SEC - current_job_duration_sec) if remaining_time_sec < EXPECTED_JOB_DURATION_SEC: job.cancel() raise MesaCIFatalException( - f"{CONSOLE_LOG['BOLD']}" - f"{CONSOLE_LOG['FG_YELLOW']}" + f"{CONSOLE_LOG['FG_BOLD_YELLOW']}" f"Job {job.job_id} only has {remaining_time_sec} seconds " "remaining to run, but it is expected to take at least " f"{EXPECTED_JOB_DURATION_SEC} seconds." @@ -254,15 +224,21 @@ def wait_for_job_get_started(job, attempt_no): print_log(f"Job {job.job_id} started.") -def bootstrap_log_follower() -> LogFollower: - gl = GitlabSection( - id="lava_boot", - header="LAVA boot", +def bootstrap_log_follower(main_test_case, timestamp_relative_to) -> LogFollower: + start_section = GitlabSection( + id="dut_boot", + header="Booting hardware device", type=LogSectionType.LAVA_BOOT, start_collapsed=True, + suppress_end=True, # init-stage2 prints the end for us + timestamp_relative_to=timestamp_relative_to, + ) + print(start_section.start()) + return LogFollower( + starting_section=start_section, + main_test_case=main_test_case, + timestamp_relative_to=timestamp_relative_to ) - print(gl.start()) - return LogFollower(starting_section=gl) def follow_job_execution(job, log_follower): @@ -295,23 +271,46 @@ def structural_log_phases(job, log_follower): job.log["dut_job_phases"] = phases -def print_job_final_status(job): +def print_job_final_status(job, timestamp_relative_to): + job.refresh_log() if job.status == "running": job.status = "hung" - color = LAVAJob.COLOR_STATUS_MAP.get(job.status, CONSOLE_LOG["FG_RED"]) - print_log( - f"{color}" - f"LAVA Job finished with status: {job.status}" - f"{CONSOLE_LOG['RESET']}" - ) + colour = LAVAJob.COLOR_STATUS_MAP.get(job.status, CONSOLE_LOG["FG_RED"]) + with GitlabSection( + "job_data", + f"Hardware job info for {job.status} job", + type=LogSectionType.LAVA_POST_PROCESSING, + start_collapsed=True, + colour=colour, + timestamp_relative_to=timestamp_relative_to, + ): + wait_post_processing_retries: int = WAIT_FOR_LAVA_POST_PROCESSING_RETRIES + while not job.is_post_processed() and wait_post_processing_retries > 0: + # Wait a little until LAVA finishes processing metadata + time.sleep(WAIT_FOR_LAVA_POST_PROCESSING_SEC) + wait_post_processing_retries -= 1 - job.refresh_log() - show_final_job_data(job, colour=f"{CONSOLE_LOG['BOLD']}{color}") + if not job.is_post_processed(): + waited_for_sec: int = ( + WAIT_FOR_LAVA_POST_PROCESSING_RETRIES + * WAIT_FOR_LAVA_POST_PROCESSING_SEC + ) + print_log( + "Timed out waiting for LAVA post-processing after " + f"{waited_for_sec} seconds. Printing incomplete information " + "anyway." + ) + + details: dict[str, str] = job.show() + for field, value in details.items(): + print(f"{field:<15}: {value}") + job.refresh_log() def execute_job_with_retries( - proxy, job_definition, retry_count, jobs_log + proxy, job_definition, retry_count, jobs_log, main_test_case, + timestamp_relative_to ) -> Optional[LAVAJob]: last_failed_job = None for attempt_no in range(1, retry_count + 2): @@ -322,10 +321,20 @@ def execute_job_with_retries( job = LAVAJob(proxy, job_definition, job_log) STRUCTURAL_LOG["dut_attempt_counter"] = attempt_no try: - job_log["submitter_start_time"] = datetime.now().isoformat() + job_log["submitter_start_time"] = datetime.now(tz=UTC).isoformat() submit_job(job) - wait_for_job_get_started(job, attempt_no) - log_follower: LogFollower = bootstrap_log_follower() + queue_section = GitlabSection( + id="dut_queue", + header="Waiting for hardware device to become available", + type=LogSectionType.LAVA_QUEUE, + start_collapsed=False, + timestamp_relative_to=timestamp_relative_to + ) + with queue_section as section: + wait_for_job_get_started(job, attempt_no) + log_follower: LogFollower = bootstrap_log_follower( + main_test_case, timestamp_relative_to + ) follow_job_execution(job, log_follower) return job @@ -333,10 +342,10 @@ def execute_job_with_retries( job.handle_exception(exception) finally: - print_job_final_status(job) + print_job_final_status(job, timestamp_relative_to) # If LAVA takes too long to post process the job, the submitter # gives up and proceeds. - job_log["submitter_end_time"] = datetime.now().isoformat() + job_log["submitter_end_time"] = datetime.now(tz=UTC).isoformat() last_failed_job = job print_log( f"{CONSOLE_LOG['BOLD']}" @@ -349,11 +358,14 @@ def execute_job_with_retries( return last_failed_job -def retriable_follow_job(proxy, job_definition) -> LAVAJob: +def retriable_follow_job( + proxy, job_definition, main_test_case, timestamp_relative_to +) -> LAVAJob: number_of_retries = NUMBER_OF_RETRIES_TIMEOUT_DETECTION last_attempted_job = execute_job_with_retries( - proxy, job_definition, number_of_retries, STRUCTURAL_LOG["dut_jobs"] + proxy, job_definition, number_of_retries, STRUCTURAL_LOG["dut_jobs"], + main_test_case, timestamp_relative_to ) if last_attempted_job.exception is not None: @@ -386,10 +398,9 @@ def __post_init__(self): @dataclass class LAVAJobSubmitter(PathResolver): boot_method: str - ci_project_dir: str device_type: str + farm: str job_timeout_min: int # The job timeout in minutes - build_url: str = None dtb_filename: str = None dump_yaml: bool = False # Whether to dump the YAML payload to stdout first_stage_init: str = None @@ -398,29 +409,68 @@ class LAVAJobSubmitter(PathResolver): kernel_image_type: str = "" kernel_url_prefix: str = None kernel_external: str = None - lava_tags: str = "" # Comma-separated LAVA tags for the job + lava_tags: str | tuple[str, ...] = () # Comma-separated LAVA tags for the job mesa_job_name: str = "mesa_ci_job" pipeline_info: str = "" - rootfs_url_prefix: str = None + rootfs_url: str = None validate_only: bool = False # Whether to only validate the job, not execute it visibility_group: str = None # Only affects LAVA farm maintainers - job_rootfs_overlay_url: str = None structured_log_file: pathlib.Path = None # Log file path with structured LAVA log ssh_client_image: str = None # x86_64 SSH client image to follow the job's output project_name: str = None # Project name to be used in the job name + starting_section: str = None # GitLab section used to start + job_submitted_at: [str | datetime] = None __structured_log_context = contextlib.nullcontext() # Structured Logger context + _overlays: dict = field(default_factory=dict, init=False) - def __post_init__(self) -> None: + def __post_init__(self) -> Self: super().__post_init__() # Remove mesa job names with spaces, which breaks the lava-test-case command self.mesa_job_name = self.mesa_job_name.split(" ")[0] - if not self.structured_log_file: - return + if self.structured_log_file: + self.__structured_log_context = StructuredLoggerWrapper(self).logger_context() - self.__structured_log_context = StructuredLoggerWrapper(self).logger_context() + if self.job_submitted_at: + self.job_submitted_at = datetime.fromisoformat(self.job_submitted_at) self.proxy = setup_lava_proxy() + return self + + def append_overlay( + self, compression: str, name: str, path: str, url: str, format: str = "tar" + ) -> Self: + """ + Append an overlay to the LAVA job definition. + + Args: + compression (str): The compression type of the overlay (e.g., "gz", "xz"). + name (str): The name of the overlay. + path (str): The path where the overlay should be applied. + url (str): The URL from where the overlay can be downloaded. + format (str, optional): The format of the overlay (default is "tar"). + + Returns: + Self: The instance of LAVAJobSubmitter with the overlay appended. + """ + self._overlays[name] = { + "compression": compression, + "format": format, + "path": path, + "url": url, + } + return self + + def print(self) -> Self: + """ + Prints the dictionary representation of the instance and returns the instance itself. + + Returns: + Self: The instance of the class. + """ + print(self.__dict__) + return self + def __prepare_submission(self) -> str: # Overwrite the timeout for the testcases with the value offered by the # user. The testcase running time should be at least 4 times greater than @@ -439,7 +489,6 @@ def __prepare_submission(self) -> str: validation_job = LAVAJob(self.proxy, job_definition) if errors := validation_job.validate(): fatal_err(f"Error in LAVA job definition: {errors}") - print_log("LAVA job definition validated successfully") return job_definition @@ -469,10 +518,24 @@ def submit(self) -> None: if self.validate_only: return + if self.starting_section: + gl = GitlabSection( + id=self.starting_section, + header="Preparing to submit job for scheduling", + type=LogSectionType.LAVA_SUBMIT, + start_collapsed=True, + timestamp_relative_to=self.job_submitted_at, + ) + gl.start() + print(gl.end()) + with self.__structured_log_context: last_attempt_job = None try: - last_attempt_job = retriable_follow_job(self.proxy, job_definition) + last_attempt_job = retriable_follow_job( + self.proxy, job_definition, + f'{self.project_name}_{self.mesa_job_name}', + self.job_submitted_at) except MesaCIRetryError as retry_exception: last_attempt_job = retry_exception.last_job @@ -484,17 +547,7 @@ def submit(self) -> None: finally: self.finish_script(last_attempt_job) - def print_log_artifact_url(self): - relative_log_path = self.structured_log_file.relative_to(pathlib.Path.cwd()) - full_path = f"$ARTIFACTS_BASE_URL/{relative_log_path}" - artifact_url = path.expandvars(full_path) - - print_log(f"Structural Logging data available at: {artifact_url}") - def finish_script(self, last_attempt_job): - if self.is_under_ci() and self.structured_log_file: - self.print_log_artifact_url() - if not last_attempt_job: # No job was run, something bad happened STRUCTURAL_LOG["job_combined_status"] = "script_crash" @@ -504,9 +557,10 @@ def finish_script(self, last_attempt_job): raise SystemExit(1) STRUCTURAL_LOG["job_combined_status"] = last_attempt_job.status + STRUCTURAL_LOG["job_exit_code"] = last_attempt_job.exit_code if last_attempt_job.status != "pass": - raise SystemExit(1) + raise SystemExit(last_attempt_job.exit_code) class StructuredLoggerWrapper: @@ -516,8 +570,10 @@ def __init__(self, submitter: LAVAJobSubmitter) -> None: def _init_logger(self): STRUCTURAL_LOG["fixed_tags"] = self.__submitter.lava_tags STRUCTURAL_LOG["dut_job_type"] = self.__submitter.device_type + STRUCTURAL_LOG["farm"] = self.__submitter.farm STRUCTURAL_LOG["job_combined_fail_reason"] = None STRUCTURAL_LOG["job_combined_status"] = "not_submitted" + STRUCTURAL_LOG["job_exit_code"] = None STRUCTURAL_LOG["dut_attempt_counter"] = 0 # Initialize dut_jobs list to enable appends @@ -554,11 +610,5 @@ def logger_context(self): # more buffering sys.stdout.reconfigure(line_buffering=True) sys.stderr.reconfigure(line_buffering=True) - # LAVA farm is giving datetime in UTC timezone, let's set it locally for the - # script run. - # Setting environ here will not affect the system time, as the os.environ - # lifetime follows the script one. - environ["TZ"] = "UTC" - time.tzset() fire.Fire(LAVAJobSubmitter) diff --git a/mesalib/.gitlab-ci/lava/requirements-test.txt b/mesalib/.gitlab-ci/lava/requirements-test.txt deleted file mode 100644 index 0ff561db90..0000000000 --- a/mesalib/.gitlab-ci/lava/requirements-test.txt +++ /dev/null @@ -1,6 +0,0 @@ --r requirements.txt -freezegun==1.1.0 -hypothesis==6.67.1 -pytest==7.2.1 -pytest-cov==3.0.0 -PyYAML==5.3.1 diff --git a/mesalib/.gitlab-ci/lava/requirements.txt b/mesalib/.gitlab-ci/lava/requirements.txt deleted file mode 100644 index e89021f3fd..0000000000 --- a/mesalib/.gitlab-ci/lava/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -lavacli==1.5.2 -fire==0.5.0 diff --git a/mesalib/.gitlab-ci/lava/utils/console_format.py b/mesalib/.gitlab-ci/lava/utils/console_format.py index 3ad7600591..cb52625cb5 100644 --- a/mesalib/.gitlab-ci/lava/utils/console_format.py +++ b/mesalib/.gitlab-ci/lava/utils/console_format.py @@ -1,8 +1,13 @@ CONSOLE_LOG = { - "FG_GREEN": "\x1b[1;32;5;197m", - "FG_RED": "\x1b[1;38;5;197m", - "FG_YELLOW": "\x1b[1;33;5;197m", - "FG_MAGENTA": "\x1b[1;35;5;197m", + "FG_GREEN": "\x1b[0;32m", + "FG_BOLD_GREEN": "\x1b[0;1;32m", + "FG_RED": "\x1b[0;38;5;197m", + "FG_BOLD_RED": "\x1b[0;1;38;5;197m", + "FG_YELLOW": "\x1b[0;33m", + "FG_BOLD_YELLOW": "\x1b[0;1;33m", + "FG_MAGENTA": "\x1b[0;35m", + "FG_BOLD_MAGENTA": "\x1b[0;1;35m", + "FG_CYAN": "\x1b[0;36m", "RESET": "\x1b[0m", "UNDERLINED": "\x1b[3m", "BOLD": "\x1b[1m", diff --git a/mesalib/.gitlab-ci/lava/utils/constants.py b/mesalib/.gitlab-ci/lava/utils/constants.py index 8a688fb04d..82f0b66fc7 100644 --- a/mesalib/.gitlab-ci/lava/utils/constants.py +++ b/mesalib/.gitlab-ci/lava/utils/constants.py @@ -23,3 +23,10 @@ # This is considered noise, since LAVA produces this log after receiving a package of feedback # messages. LOG_DEBUG_FEEDBACK_NOISE = "Listened to connection for namespace 'dut' done" + +A6XX_GPU_RECOVERY_WATCH_PERIOD_MIN = 3 +A6XX_GPU_RECOVERY_FAILURE_MAX_COUNT = 30 +A6XX_GPU_RECOVERY_FAILURE_MESSAGE = ( + "cx gdsc didn't collapse", + "Timeout waiting for GMU OOB", +) diff --git a/mesalib/.gitlab-ci/lava/utils/gitlab_section.py b/mesalib/.gitlab-ci/lava/utils/gitlab_section.py index 034afb4eb3..1e9894fc85 100644 --- a/mesalib/.gitlab-ci/lava/utils/gitlab_section.py +++ b/mesalib/.gitlab-ci/lava/utils/gitlab_section.py @@ -2,7 +2,8 @@ import re from dataclasses import dataclass, field -from datetime import datetime, timedelta +from datetime import datetime, timedelta, UTC +from math import floor from typing import TYPE_CHECKING, Optional from lava.utils.console_format import CONSOLE_LOG @@ -18,8 +19,11 @@ class GitlabSection: header: str type: LogSectionType start_collapsed: bool = False + suppress_end: bool = False + suppress_start: bool = False + timestamp_relative_to: Optional[datetime] = None escape: str = "\x1b[0K" - colour: str = f"{CONSOLE_LOG['BOLD']}{CONSOLE_LOG['FG_GREEN']}" + colour: str = f"{CONSOLE_LOG['FG_CYAN']}" __start_time: Optional[datetime] = field(default=None, init=False) __end_time: Optional[datetime] = field(default=None, init=False) @@ -58,7 +62,12 @@ def section(self, marker: str, header: str, time: datetime) -> str: timestamp = self.get_timestamp(time) before_header = ":".join([preamble, timestamp, section_id]) - colored_header = f"{self.colour}{header}\x1b[0m" if header else "" + if self.timestamp_relative_to: + delta = self.start_time - self.timestamp_relative_to + reltime = f"[{floor(delta.seconds / 60):02}:{(delta.seconds % 60):02}] " + else: + reltime = "" + colored_header = f"{self.colour}{reltime}{header}\x1b[0m" if header else "" header_wrapper = "\r" + f"{self.escape}{colored_header}" return f"{before_header}{header_wrapper}" @@ -82,15 +91,25 @@ def __exit__(self, exc_type, exc_val, exc_tb): def start(self) -> str: assert not self.has_finished, "Starting an already finished section" - self.__start_time = datetime.now() + self.__start_time = datetime.now(tz=UTC) + return self.print_start_section() + + def print_start_section(self) -> str: + if self.suppress_start: + return "" return self.section(marker="start", header=self.header, time=self.__start_time) def end(self) -> str: assert self.has_started, "Ending an uninitialized section" - self.__end_time = datetime.now() + self.__end_time = datetime.now(tz=UTC) assert ( self.__end_time >= self.__start_time ), "Section execution time will be negative" + return self.print_end_section() + + def print_end_section(self) -> str: + if self.suppress_end: + return "" return self.section(marker="end", header="", time=self.__end_time) def delta_time(self) -> Optional[timedelta]: @@ -98,6 +117,6 @@ def delta_time(self) -> Optional[timedelta]: return self.__end_time - self.__start_time if self.has_started: - return datetime.now() - self.__start_time + return datetime.now(tz=UTC) - self.__start_time return None diff --git a/mesalib/.gitlab-ci/lava/utils/lava_job.py b/mesalib/.gitlab-ci/lava/utils/lava_job.py index f05168dac2..2326c399b5 100644 --- a/mesalib/.gitlab-ci/lava/utils/lava_job.py +++ b/mesalib/.gitlab-ci/lava/utils/lava_job.py @@ -1,7 +1,7 @@ import re import xmlrpc from collections import defaultdict -from datetime import datetime +from datetime import datetime, UTC from typing import Any, Optional from lava.exceptions import ( @@ -21,9 +21,9 @@ class LAVAJob: COLOR_STATUS_MAP: dict[str, str] = { "pass": CONSOLE_LOG["FG_GREEN"], - "hung": CONSOLE_LOG["FG_YELLOW"], - "fail": CONSOLE_LOG["FG_RED"], - "canceled": CONSOLE_LOG["FG_MAGENTA"], + "hung": CONSOLE_LOG["FG_BOLD_YELLOW"], + "fail": CONSOLE_LOG["FG_BOLD_RED"], + "canceled": CONSOLE_LOG["FG_BOLD_MAGENTA"], } def __init__(self, proxy, definition, log=defaultdict(str)) -> None: @@ -35,10 +35,14 @@ def __init__(self, proxy, definition, log=defaultdict(str)) -> None: self._is_finished = False self.log: dict[str, Any] = log self.status = "not_submitted" + # Set the default exit code to 1 because we should set it to 0 only if the job has passed. + # If it fails or if it is interrupted, the exit code should be set to a non-zero value to + # make the GitLab job fail. + self._exit_code: int = 1 self.__exception: Optional[Exception] = None def heartbeat(self) -> None: - self.last_log_time: datetime = datetime.now() + self.last_log_time: datetime = datetime.now(tz=UTC) self.status = "running" @property @@ -50,6 +54,15 @@ def status(self, new_status: str) -> None: self._status = new_status self.log["status"] = self._status + @property + def exit_code(self) -> int: + return self._exit_code + + @exit_code.setter + def exit_code(self, code: int) -> None: + self._exit_code = code + self.log["exit_code"] = self._exit_code + @property def job_id(self) -> int: return self._job_id @@ -158,11 +171,12 @@ def parse_job_result_from_log( last_line = None # Print all lines. lines[:None] == lines[:] for idx, line in enumerate(lava_lines): - if result := re.search(r"hwci: mesa: (pass|fail)", line): + if result := re.search(r"hwci: mesa: (pass|fail), exit_code: (\d+)", line): self._is_finished = True - self.status = result[1] + self.status = result.group(1) + self.exit_code = int(result.group(2)) - last_line = idx + 1 + last_line = idx # We reached the log end here. hwci script has finished. break return lava_lines[:last_line] @@ -172,6 +186,9 @@ def handle_exception(self, exception: Exception): self.cancel() self.exception = exception + # Set the exit code to nonzero value + self.exit_code = 1 + # Give more accurate status depending on exception if isinstance(exception, MesaCIKnownIssueException): self.status = "canceled" diff --git a/mesalib/.gitlab-ci/lava/utils/lava_job_definition.py b/mesalib/.gitlab-ci/lava/utils/lava_job_definition.py index 1227297d06..55cd30d4b7 100644 --- a/mesalib/.gitlab-ci/lava/utils/lava_job_definition.py +++ b/mesalib/.gitlab-ci/lava/utils/lava_job_definition.py @@ -34,6 +34,10 @@ class LAVAJobDefinition: def __init__(self, job_submitter: "LAVAJobSubmitter") -> None: self.job_submitter: "LAVAJobSubmitter" = job_submitter + # NFS args provided by LAVA + self.lava_nfs_args: str = "root=/dev/nfs rw nfsroot=$NFS_SERVER_IP:$NFS_ROOTFS,tcp,hard,v3 ip=dhcp" + # extra_nfsroot_args appends to cmdline + self.extra_nfsroot_args: str = " init=/init rootwait usbcore.quirks=0bda:8153:k" def has_ssh_support(self) -> bool: if FORCE_UART: @@ -57,18 +61,20 @@ def generate_lava_yaml_payload(self) -> dict[str, Any]: actions for the LAVA job submission. """ args = self.job_submitter - values = self.generate_metadata() nfsrootfs = { - "url": f"{args.rootfs_url_prefix}/lava-rootfs.tar.zst", + "url": f"{args.rootfs_url}", "compression": "zstd", + "format": "tar", + "overlays": args._overlays, } + values = self.generate_metadata() init_stage1_steps = self.init_stage1_steps() - artifact_download_steps = self.artifact_download_steps() + jwt_steps = self.jwt_steps() deploy_actions = [] boot_action = [] - test_actions = uart_test_actions(args, init_stage1_steps, artifact_download_steps) + test_actions = uart_test_actions(args, init_stage1_steps, jwt_steps) if args.boot_method == "fastboot": deploy_actions = fastboot_deploy_actions(self, nfsrootfs) @@ -90,7 +96,7 @@ def generate_lava_yaml_payload(self) -> dict[str, Any]: wrap_boot_action(boot_action) test_actions = ( generate_dut_test(args, init_stage1_steps), - generate_docker_test(args, artifact_download_steps), + generate_docker_test(args, jwt_steps), ) values["actions"] = [ @@ -115,6 +121,22 @@ def generate_lava_job_definition(self) -> str: yaml.dump(self.generate_lava_yaml_payload(), job_stream) return job_stream.getvalue() + def consume_lava_tags_args(self, values: dict[str, Any]): + # python-fire parses --lava-tags without arguments as True + if isinstance(self.job_submitter.lava_tags, tuple): + values["tags"] = self.job_submitter.lava_tags + # python-fire parses "tag-1,tag2" as str and "tag1,tag2" as tuple + # even if the -- --separator is something other than '-' + elif isinstance(self.job_submitter.lava_tags, str): + # Split string tags by comma, removing any trailing commas + values["tags"] = self.job_submitter.lava_tags.rstrip(",").split(",") + # Ensure tags are always a list of non-empty strings + if "tags" in values: + values["tags"] = [tag for tag in values["tags"] if tag] + # Remove empty tags + if "tags" in values and not values["tags"]: + del values["tags"] + def generate_metadata(self) -> dict[str, Any]: # General metadata and permissions values = { @@ -122,7 +144,7 @@ def generate_metadata(self) -> dict[str, Any]: "device_type": self.job_submitter.device_type, "visibility": {"group": [self.job_submitter.visibility_group]}, "priority": JOB_PRIORITY, - "context": {"extra_nfsroot_args": " init=/init rootwait usbcore.quirks=0bda:8153:k"}, + "context": {"extra_nfsroot_args": self.extra_nfsroot_args}, "timeouts": { "job": {"minutes": self.job_submitter.job_timeout_min}, "actions": { @@ -144,8 +166,7 @@ def generate_metadata(self) -> dict[str, Any]: }, } - if self.job_submitter.lava_tags: - values["tags"] = self.job_submitter.lava_tags.split(",") + self.consume_lava_tags_args(values) # QEMU lava jobs mandate proper arch value in the context if self.job_submitter.boot_method == "qemu-nfs": @@ -169,39 +190,33 @@ def attach_external_modules(self, deploy_field): "compression": "zstd" } - def artifact_download_steps(self): + def jwt_steps(self): """ This function is responsible for setting up the SSH server in the DUT and to export the first boot environment to a file. """ - # Putting JWT pre-processing and mesa download, within init-stage1.sh file, - # as we do with non-SSH version. - download_steps = [ - "set -ex", - "curl -L --retry 4 -f --retry-all-errors --retry-delay 60 " - f"{self.job_submitter.job_rootfs_overlay_url} | tar -xz -C /", - f"mkdir -p {self.job_submitter.ci_project_dir}", - f"curl -L --retry 4 -f --retry-all-errors --retry-delay 60 {self.job_submitter.build_url} | " - f"tar --zstd -x -C {self.job_submitter.ci_project_dir}", + # Pre-process the JWT + jwt_steps = [ + "set -e", ] # If the JWT file is provided, we will use it to authenticate with the cloud # storage provider and will hide it from the job output in Gitlab. if self.job_submitter.jwt_file: with open(self.job_submitter.jwt_file) as jwt_file: - download_steps += [ + jwt_steps += [ "set +x # HIDE_START", f'echo -n "{jwt_file.read()}" > "{self.job_submitter.jwt_file}"', "set -x # HIDE_END", f'echo "export S3_JWT_FILE={self.job_submitter.jwt_file}" >> /set-job-env-vars.sh', ] else: - download_steps += [ + jwt_steps += [ "echo Could not find jwt file, disabling S3 requests...", "sed -i '/S3_RESULTS_UPLOAD/d' /set-job-env-vars.sh", ] - return download_steps + return jwt_steps def init_stage1_steps(self) -> list[str]: run_steps = [] @@ -215,7 +230,7 @@ def init_stage1_steps(self) -> list[str]: # For vmware farm, patch nameserver as 8.8.8.8 is off limit. # This is temporary and will be reverted once the farm is moved. if self.job_submitter.mesa_job_name.startswith("vmware-"): - run_steps += [x.rstrip().replace("nameserver 8.8.8.8", "nameserver 10.25.198.110") for x in init_sh if not x.startswith("#") and x.rstrip()] + run_steps += [x.rstrip().replace("nameserver 8.8.8.8", "nameserver 192.19.189.10") for x in init_sh if not x.startswith("#") and x.rstrip()] else: run_steps += [x.rstrip() for x in init_sh if not x.startswith("#") and x.rstrip()] @@ -228,4 +243,6 @@ def init_stage1_steps(self) -> list[str]: + '-o "/lib/firmware/qcom/sm8350/a660_zap.mbn"' ) + run_steps.append("export CURRENT_SECTION=dut_boot") + return run_steps diff --git a/mesalib/.gitlab-ci/lava/utils/lava_log_hints.py b/mesalib/.gitlab-ci/lava/utils/lava_log_hints.py index 04c158eb34..3a35ae97c3 100644 --- a/mesalib/.gitlab-ci/lava/utils/lava_log_hints.py +++ b/mesalib/.gitlab-ci/lava/utils/lava_log_hints.py @@ -1,8 +1,9 @@ from __future__ import annotations import re +from datetime import datetime, timedelta from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Sequence +from typing import TYPE_CHECKING, Any, Optional, Sequence if TYPE_CHECKING: from lava.utils import LogFollower @@ -13,6 +14,9 @@ KNOWN_ISSUE_R8152_MAX_CONSECUTIVE_COUNTER, LOG_DEBUG_FEEDBACK_NOISE, KNOWN_ISSUE_R8152_PATTERNS, + A6XX_GPU_RECOVERY_WATCH_PERIOD_MIN, + A6XX_GPU_RECOVERY_FAILURE_MESSAGE, + A6XX_GPU_RECOVERY_FAILURE_MAX_COUNT, ) from lava.utils.log_section import LogSectionType @@ -29,6 +33,8 @@ class LAVALogHints: log_follower: LogFollower r8152_issue_consecutive_counter: int = field(default=0, init=False) reboot_counter: int = field(default=0, init=False) + a6xx_gpu_recovery_fail_counter: int = field(default=0, init=False) + a6xx_gpu_first_fail_time: Optional[datetime] = field(default=None, init=False) def raise_known_issue(self, message) -> None: raise MesaCIKnownIssueException( @@ -44,6 +50,7 @@ def detect_failure(self, new_lines: list[dict[str, Any]]): continue self.detect_r8152_issue(line) self.detect_forced_reboot(line) + self.detect_a6xx_gpu_recovery_failure(line) def detect_r8152_issue(self, line): if self.log_follower.phase in ( @@ -77,3 +84,23 @@ def detect_forced_reboot(self, line: dict[str, Any]) -> None: self.raise_known_issue( "Forced reboot detected during test phase, failing the job..." ) + + # If the a6xx gpu repeatedly fails to recover over a short period of time, + # then successful recovery is unlikely so cancel the job preemptively. + def detect_a6xx_gpu_recovery_failure(self, line: dict[str, Any]) -> None: + if search_known_issue_patterns(A6XX_GPU_RECOVERY_FAILURE_MESSAGE, line["msg"]): + time_of_failure = datetime.fromisoformat(line["dt"]) + self.a6xx_gpu_recovery_fail_counter += 1 + + if self.a6xx_gpu_first_fail_time is None: + self.a6xx_gpu_first_fail_time = time_of_failure + + if self.a6xx_gpu_recovery_fail_counter == A6XX_GPU_RECOVERY_FAILURE_MAX_COUNT: + time_since_first_fail = time_of_failure - self.a6xx_gpu_first_fail_time + if time_since_first_fail <= timedelta(minutes=A6XX_GPU_RECOVERY_WATCH_PERIOD_MIN): + self.raise_known_issue( + "Repeated GPU recovery failure detected: cancelling the job" + ) + else: + self.a6xx_gpu_first_fail_time = None + self.a6xx_gpu_recovery_fail_counter = 0 diff --git a/mesalib/.gitlab-ci/lava/utils/lava_proxy.py b/mesalib/.gitlab-ci/lava/utils/lava_proxy.py index 581ec46038..ae127acbff 100644 --- a/mesalib/.gitlab-ci/lava/utils/lava_proxy.py +++ b/mesalib/.gitlab-ci/lava/utils/lava_proxy.py @@ -23,8 +23,6 @@ def setup_lava_proxy(): ) proxy = xmlrpc.client.ServerProxy(uri_str, allow_none=True, transport=transport) - print_log(f'Proxy for {config["uri"]} created.') - return proxy diff --git a/mesalib/.gitlab-ci/lava/utils/log_follower.py b/mesalib/.gitlab-ci/lava/utils/log_follower.py index 7d54f88446..d76b70203e 100644 --- a/mesalib/.gitlab-ci/lava/utils/log_follower.py +++ b/mesalib/.gitlab-ci/lava/utils/log_follower.py @@ -14,7 +14,7 @@ import re import sys from dataclasses import dataclass, field -from datetime import datetime, timedelta +from datetime import datetime, timedelta, UTC from typing import Optional, Union from lava.exceptions import MesaCITimeoutError @@ -33,6 +33,8 @@ @dataclass class LogFollower: starting_section: Optional[GitlabSection] = None + main_test_case: Optional[str] = None + timestamp_relative_to: Optional[datetime] = None _current_section: Optional[GitlabSection] = None section_history: list[GitlabSection] = field(default_factory=list, init=False) timeout_durations: dict[LogSectionType, timedelta] = field( @@ -122,7 +124,9 @@ def manage_gl_sections(self, line): return for log_section in LOG_SECTIONS: - if new_section := log_section.from_log_line_to_section(line): + if new_section := log_section.from_log_line_to_section( + line, self.main_test_case, self.timestamp_relative_to + ): self.update_section(new_section) break @@ -252,7 +256,7 @@ def parse_lava_line(self, line) -> Optional[str]: if line["lvl"] in ["results", "feedback", "debug"]: return elif line["lvl"] in ["warning", "error"]: - prefix = CONSOLE_LOG["FG_RED"] + prefix = CONSOLE_LOG["FG_BOLD_RED"] suffix = CONSOLE_LOG["RESET"] elif line["lvl"] == "input": prefix = "$ " @@ -304,11 +308,13 @@ def fix_lava_gitlab_section_log(): def print_log(msg: str, *args) -> None: # Reset color from timestamp, since `msg` can tint the terminal color - print(f"{CONSOLE_LOG['RESET']}{datetime.now()}: {msg}", *args) + ts = datetime.now(tz=UTC) + ts_str = f"{ts.hour:02}:{ts.minute:02}:{ts.second:02}.{int(ts.microsecond / 1000):03}" + print(f"{CONSOLE_LOG['RESET']}{ts_str}: {msg}", *args) def fatal_err(msg, exception=None): - colored_msg = f"{CONSOLE_LOG['FG_RED']}" + colored_msg = f"{CONSOLE_LOG['FG_BOLD_RED']}" print_log(colored_msg, f"{msg}", f"{CONSOLE_LOG['RESET']}") if exception: raise exception diff --git a/mesalib/.gitlab-ci/lava/utils/log_section.py b/mesalib/.gitlab-ci/lava/utils/log_section.py index 25620a6155..6ce607ee47 100644 --- a/mesalib/.gitlab-ci/lava/utils/log_section.py +++ b/mesalib/.gitlab-ci/lava/utils/log_section.py @@ -1,6 +1,6 @@ import re from dataclasses import dataclass -from datetime import timedelta +from datetime import datetime, timedelta from enum import Enum, auto from os import getenv from typing import Optional, Pattern, Union @@ -10,12 +10,22 @@ class LogSectionType(Enum): UNKNOWN = auto() + LAVA_SUBMIT = auto() + LAVA_QUEUE = auto() LAVA_BOOT = auto() TEST_DUT_SUITE = auto() TEST_SUITE = auto() TEST_CASE = auto() LAVA_POST_PROCESSING = auto() +# How long to wait whilst we try to submit a job; make it fairly short, +# since the job will be retried. +LAVA_SUBMIT_TIMEOUT = int(getenv("LAVA_SUBMIT_TIMEOUT", 5)) + +# How long should we wait for a device to become available? +# For post-merge jobs, this should be ~infinite, but we can fail more +# aggressively for pre-merge. +LAVA_QUEUE_TIMEOUT = int(getenv("LAVA_QUEUE_TIMEOUT", 60)) # Empirically, successful device boot in LAVA time takes less than 3 # minutes. @@ -25,9 +35,13 @@ class LogSectionType(Enum): # the enqueue delay. LAVA_BOOT_TIMEOUT = int(getenv("LAVA_BOOT_TIMEOUT", 9)) +# Estimated overhead in minutes for a job from GitLab to reach the test phase, +# including LAVA scheduling and boot duration +LAVA_TEST_OVERHEAD_MIN = 5 + # Test DUT suite phase is where the initialization happens in DUT, not on docker. # The device will be listening to SSH session until the end of the job. -LAVA_TEST_DUT_SUITE_TIMEOUT = int(getenv("JOB_TIMEOUT", 60)) +LAVA_TEST_DUT_SUITE_TIMEOUT = int(getenv("CI_JOB_TIMEOUT")) // 60 - LAVA_TEST_OVERHEAD_MIN # Test suite phase is where the initialization happens on docker. LAVA_TEST_SUITE_TIMEOUT = int(getenv("LAVA_TEST_SUITE_TIMEOUT", 5)) @@ -35,7 +49,7 @@ class LogSectionType(Enum): # Test cases may take a long time, this script has no right to interrupt # them. But if the test case takes almost 1h, it will never succeed due to # Gitlab job timeout. -LAVA_TEST_CASE_TIMEOUT = int(getenv("JOB_TIMEOUT", 60)) +LAVA_TEST_CASE_TIMEOUT = int(getenv("CI_JOB_TIMEOUT")) // 60 - LAVA_TEST_OVERHEAD_MIN # LAVA post processing may refer to a test suite teardown, or the # adjustments to start the next test_case @@ -43,6 +57,8 @@ class LogSectionType(Enum): FALLBACK_GITLAB_SECTION_TIMEOUT = timedelta(minutes=10) DEFAULT_GITLAB_SECTION_TIMEOUTS = { + LogSectionType.LAVA_SUBMIT: timedelta(minutes=LAVA_SUBMIT_TIMEOUT), + LogSectionType.LAVA_QUEUE: timedelta(minutes=LAVA_QUEUE_TIMEOUT), LogSectionType.LAVA_BOOT: timedelta(minutes=LAVA_BOOT_TIMEOUT), LogSectionType.TEST_DUT_SUITE: timedelta(minutes=LAVA_TEST_DUT_SUITE_TIMEOUT), LogSectionType.TEST_SUITE: timedelta(minutes=LAVA_TEST_SUITE_TIMEOUT), @@ -63,7 +79,8 @@ class LogSection: collapsed: bool = False def from_log_line_to_section( - self, lava_log_line: dict[str, str] + self, lava_log_line: dict[str, str], main_test_case: Optional[str], + timestamp_relative_to: Optional[datetime] ) -> Optional[GitlabSection]: if lava_log_line["lvl"] not in self.levels: return @@ -71,12 +88,16 @@ def from_log_line_to_section( if match := re.search(self.regex, lava_log_line["msg"]): section_id = self.section_id.format(*match.groups()) section_header = self.section_header.format(*match.groups()) + is_main_test_case = section_id == main_test_case timeout = DEFAULT_GITLAB_SECTION_TIMEOUTS[self.section_type] return GitlabSection( id=section_id, header=f"{section_header} - Timeout: {timeout}", type=self.section_type, start_collapsed=self.collapsed, + suppress_start=is_main_test_case, + suppress_end=is_main_test_case, + timestamp_relative_to=timestamp_relative_to, ) diff --git a/mesalib/.gitlab-ci/lava/utils/uart_job_definition.py b/mesalib/.gitlab-ci/lava/utils/uart_job_definition.py index d375898ad7..be06f61f45 100644 --- a/mesalib/.gitlab-ci/lava/utils/uart_job_definition.py +++ b/mesalib/.gitlab-ci/lava/utils/uart_job_definition.py @@ -18,6 +18,7 @@ def fastboot_deploy_actions( job_definition: "LAVAJobDefinition", nfsrootfs ) -> tuple[dict[str, Any], ...]: args = job_definition.job_submitter + cmdline = f"{job_definition.lava_nfs_args}{job_definition.extra_nfsroot_args}" fastboot_deploy_nfs = { "timeout": {"minutes": 10}, "to": "nfs", @@ -39,7 +40,7 @@ def fastboot_deploy_actions( "steps": [ f"cat Image.gz {args.dtb_filename}.dtb > Image.gz+dtb", "mkbootimg --kernel Image.gz+dtb" - + ' --cmdline "root=/dev/nfs rw nfsroot=$NFS_SERVER_IP:$NFS_ROOTFS,tcp,hard rootwait ip=dhcp init=/init"' + + f' --cmdline "{cmdline}"' + " --pagesize 4096 --base 0x80000000 -o boot.img", ], } @@ -101,7 +102,7 @@ def qemu_deploy_actions(job_definition: "LAVAJobDefinition", nfsrootfs) -> tuple def uart_test_actions( - args: "LAVAJobSubmitter", init_stage1_steps: list[str], artifact_download_steps: list[str] + args: "LAVAJobSubmitter", init_stage1_steps: list[str], jwt_steps: list[str] ) -> tuple[dict[str, Any]]: # skeleton test definition: only declaring each job as a single 'test' # since LAVA's test parsing is not useful to us @@ -130,11 +131,9 @@ def uart_test_actions( } run_steps += init_stage1_steps - run_steps += artifact_download_steps + run_steps += jwt_steps run_steps += [ - f"mkdir -p {args.ci_project_dir}", - f"curl {args.build_url} | tar --zstd -x -C {args.ci_project_dir}", # Sleep a bit to give time for bash to dump shell xtrace messages into # console which may cause interleaving with LAVA_SIGNAL_STARTTC in some # devices like a618. diff --git a/mesalib/.gitlab-ci/meson/build.sh b/mesalib/.gitlab-ci/meson/build.sh index 82d590d5f6..5279e180e7 100644 --- a/mesalib/.gitlab-ci/meson/build.sh +++ b/mesalib/.gitlab-ci/meson/build.sh @@ -1,12 +1,20 @@ #!/usr/bin/env bash # shellcheck disable=SC1003 # works for us now... # shellcheck disable=SC2086 # we want word splitting +# shellcheck disable=SC1091 # paths only become valid at runtime + +. "${SCRIPTS_DIR}/setup-test-env.sh" section_switch meson-cross-file "meson: cross file generate" set -e set -o xtrace +comma_separated() { + local IFS=, + echo "$*" +} + CROSS_FILE=/cross_file-"$CROSS".txt export PATH=$PATH:$PWD/.gitlab-ci/build @@ -98,13 +106,54 @@ case $CI_JOB_NAME in ;; esac +# LTO handling +case $CI_PIPELINE_SOURCE in + schedule) + # run builds with LTO only for nightly + if [ "$CI_JOB_NAME" == "debian-ppc64el" ]; then + # /tmp/ccWlDCPV.s: Assembler messages: + # /tmp/ccWlDCPV.s:15250880: Error: operand out of range (0xfffffffffdd4e688 is not between 0xfffffffffe000000 and 0x1fffffc) + LTO=false + # enable one by one for now + elif [ "$CI_JOB_NAME" == "fedora-release" ] || [ "$CI_JOB_NAME" == "debian-build-testing" ]; then + LTO=true + else + LTO=false + fi + ;; + *) + LTO=false + ;; +esac + +if [ "$LTO" == "true" ]; then + MAX_LD=2 +else + MAX_LD=${FDO_CI_CONCURRENT:-4} +fi + +# shellcheck disable=2206 +force_fallback_for=( + # FIXME: explain what these are needed for + perfetto + syn + paste + pest + pest_derive + pest_generator + pest_meta + roxmltree + indexmap + ${FORCE_FALLBACK_FOR:-} +) + section_switch meson-configure "meson: configure" rm -rf _build meson setup _build \ --native-file=native.file \ --wrap-mode=nofallback \ - --force-fallback-for perfetto,syn,paste \ + --force-fallback-for "$(comma_separated "${force_fallback_for[@]}")" \ ${CROSS+--cross "$CROSS_FILE"} \ -D prefix=$PWD/install \ -D libdir=lib \ @@ -123,26 +172,20 @@ meson setup _build \ -D vulkan-drivers=${VULKAN_DRIVERS:-[]} \ -D video-codecs=all \ -D werror=true \ + -D b_lto=${LTO} \ + -D backend_max_links=${MAX_LD} \ ${EXTRA_OPTION} cd _build meson configure uncollapsed_section_switch meson-build "meson: build" -if command -V mold &> /dev/null ; then - mold --run ninja -else - ninja -fi +ninja uncollapsed_section_switch meson-test "meson: test" LC_ALL=C.UTF-8 meson test --num-processes "${FDO_CI_CONCURRENT:-4}" --print-errorlogs ${MESON_TEST_ARGS} section_switch meson-install "meson: install" -if command -V mold &> /dev/null ; then - mold --run ninja install -else - ninja install -fi +ninja install cd .. section_end meson-install diff --git a/mesalib/.gitlab-ci/piglit/piglit-runner.sh b/mesalib/.gitlab-ci/piglit/piglit-runner.sh index 878d30307b..7c7f565f1e 100644 --- a/mesalib/.gitlab-ci/piglit/piglit-runner.sh +++ b/mesalib/.gitlab-ci/piglit/piglit-runner.sh @@ -1,5 +1,8 @@ #!/usr/bin/env bash # shellcheck disable=SC2086 # we want word splitting +# shellcheck disable=SC1091 # paths only become valid at runtime + +. "${SCRIPTS_DIR}/setup-test-env.sh" set -ex @@ -16,9 +19,6 @@ export EGL_PLATFORM=surfaceless ARCH=$(uname -m) export VK_DRIVER_FILES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.$ARCH.json" -RESULTS=$PWD/${PIGLIT_RESULTS_DIR:-results} -mkdir -p $RESULTS - # Ensure Mesa Shader Cache resides on tmpfs. SHADER_CACHE_HOME=${XDG_CACHE_HOME:-${HOME}/.cache} SHADER_CACHE_DIR=${MESA_SHADER_CACHE_DIR:-${SHADER_CACHE_HOME}/mesa_shader_cache} @@ -39,7 +39,7 @@ if [ "$GALLIUM_DRIVER" = "virpipe" ]; then GALLIUM_DRIVER=llvmpipe \ GALLIVM_PERF="nopt" \ - virgl_test_server $VTEST_ARGS >$RESULTS/vtest-log.txt 2>&1 & + virgl_test_server $VTEST_ARGS >$RESULTS_DIR/vtest-log.txt 2>&1 & sleep 1 fi @@ -78,6 +78,10 @@ if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then PIGLIT_SKIPS="$PIGLIT_SKIPS $INSTALL/$GPU_VERSION-skips.txt" fi +if [ -e "$INSTALL/$GPU_VERSION-slow-skips.txt" ] && [[ $CI_JOB_NAME != *full* ]]; then + PIGLIT_SKIPS="$PIGLIT_SKIPS $INSTALL/$GPU_VERSION-slow-skips.txt" +fi + if [ "$PIGLIT_PLATFORM" != "gbm" ] ; then PIGLIT_SKIPS="$PIGLIT_SKIPS $INSTALL/x11-skips.txt" fi @@ -91,7 +95,7 @@ set +e piglit-runner \ run \ --piglit-folder /piglit \ - --output $RESULTS \ + --output $RESULTS_DIR \ --jobs ${FDO_CI_CONCURRENT:-4} \ --skips $INSTALL/all-skips.txt $PIGLIT_SKIPS \ --flakes $INSTALL/$GPU_VERSION-flakes.txt \ @@ -104,8 +108,8 @@ PIGLIT_EXITCODE=$? deqp-runner junit \ --testsuite $PIGLIT_PROFILES \ - --results $RESULTS/failures.csv \ - --output $RESULTS/junit.xml \ + --results $RESULTS_DIR/failures.csv \ + --output $RESULTS_DIR/junit.xml \ --limit 50 \ --template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml" @@ -114,7 +118,7 @@ if [ -n "$FLAKES_CHANNEL" ]; then python3 $INSTALL/report-flakes.py \ --host irc.oftc.net \ --port 6667 \ - --results $RESULTS/results.csv \ + --results $RESULTS_DIR/results.csv \ --known-flakes $INSTALL/$GPU_VERSION-flakes.txt \ --channel "$FLAKES_CHANNEL" \ --runner "$CI_RUNNER_DESCRIPTION" \ @@ -127,6 +131,6 @@ fi # Compress results.csv to save on bandwidth during the upload of artifacts to # GitLab. This reduces a full piglit run to 550 KB, down from 6 MB, and takes # 55ms on my Ryzen 5950X (with or without parallelism). -zstd --rm -T0 -8qc $RESULTS/results.csv -o $RESULTS/results.csv.zst +zstd --quiet --rm --threads ${FDO_CI_CONCURRENT:-0} -8 $RESULTS_DIR/results.csv -o $RESULTS_DIR/results.csv.zst exit $PIGLIT_EXITCODE diff --git a/mesalib/.gitlab-ci/piglit/piglit-traces.sh b/mesalib/.gitlab-ci/piglit/piglit-traces.sh index 38c204df68..b689294d06 100644 --- a/mesalib/.gitlab-ci/piglit/piglit-traces.sh +++ b/mesalib/.gitlab-ci/piglit/piglit-traces.sh @@ -1,6 +1,11 @@ #!/usr/bin/env bash # shellcheck disable=SC2035 # FIXME glob # shellcheck disable=SC2086 # we want word splitting +# shellcheck disable=SC1091 # paths only become valid at runtime + +. "${SCRIPTS_DIR}/setup-test-env.sh" + +section_start traces_prepare "traces: preparing test setup" set -ex @@ -10,9 +15,6 @@ export PAGER=cat # FIXME: export everywhere INSTALL=$(realpath -s "$PWD"/install) S3_ARGS="--token-file ${S3_JWT_FILE}" -RESULTS=$(realpath -s "$PWD"/results) -mkdir -p "$RESULTS" - export PIGLIT_REPLAY_DESCRIPTION_FILE="$INSTALL/$PIGLIT_TRACES_FILE" # FIXME: guess why /usr/local/bin is not included in all runners PATH. @@ -27,33 +29,12 @@ else export PIGLIT_REPLAY_EXTRA_ARGS="--keep-image ${PIGLIT_REPLAY_EXTRA_ARGS}" fi -# WINE -case "$PIGLIT_REPLAY_DEVICE_NAME" in - vk-*) - export WINEPREFIX="/dxvk-wine64" - ;; - *) - export WINEPREFIX="/generic-wine64" - ;; -esac - -#PATH="/opt/wine-stable/bin/:$PATH" # WineHQ path - -# Avoid asking about Gecko or Mono instalation -export WINEDLLOVERRIDES="mscoree=d;mshtml=d" # FIXME: drop, not needed anymore? (wine dir is already created) - - # Set up the environment. # Modifiying here directly LD_LIBRARY_PATH may cause problems when # using a command wrapper. Hence, we will just set it when running the # command. export __LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/" if [ -n "${VK_DRIVER}" ]; then - # Set environment for DXVK. - export DXVK_LOG_LEVEL="info" - export DXVK_LOG="$RESULTS/dxvk" - [ -d "$DXVK_LOG" ] || mkdir -pv "$DXVK_LOG" - export DXVK_STATE_CACHE=0 ARCH=$(uname -m) export VK_DRIVER_FILES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.$ARCH.json" fi @@ -71,9 +52,6 @@ quiet() { # Set environment for apitrace executable. export PATH="/apitrace/build:$PATH" -export PIGLIT_REPLAY_WINE_BINARY=wine -export PIGLIT_REPLAY_WINE_APITRACE_BINARY="/apitrace-msvc-win64/bin/apitrace.exe" -export PIGLIT_REPLAY_WINE_D3DRETRACE_BINARY="/apitrace-msvc-win64/bin/d3dretrace.exe" echo "Version:" apitrace version 2>/dev/null || echo "apitrace not found (Linux)" @@ -98,7 +76,7 @@ if [ "$EGL_PLATFORM" = "surfaceless" ]; then GALLIUM_DRIVER=llvmpipe \ VTEST_USE_EGL_SURFACELESS=1 \ VTEST_USE_GLES=1 \ - virgl_test_server >"$RESULTS"/vtest-log.txt 2>&1 & + virgl_test_server >"$RESULTS_DIR"/vtest-log.txt 2>&1 & sleep 1 fi @@ -132,7 +110,7 @@ fi # shellcheck disable=SC2317 replay_s3_upload_images() { - find "$RESULTS/$__PREFIX" -type f -name "*.png" -printf "%P\n" \ + find "$RESULTS_DIR/$__PREFIX" -type f -name "*.png" -printf "%P\n" \ | while read -r line; do __TRACE="${line%-*-*}" @@ -150,16 +128,14 @@ replay_s3_upload_images() { __DESTINATION_FILE_PATH="$__S3_TRACES_PREFIX/${line##*-}" fi - ci-fairy s3cp $S3_ARGS "$RESULTS/$__PREFIX/$line" \ + ci-fairy s3cp $S3_ARGS "$RESULTS_DIR/$__PREFIX/$line" \ "https://${__S3_PATH}/${__DESTINATION_FILE_PATH}" done } SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD | tee /tmp/version.txt | grep \"Mesa $MESA_VERSION\(\s\|$\)\"" -if [ -d results ]; then - cd results && rm -rf ..?* .[!.]* * -fi +cd $RESULTS_DIR && rm -rf ..?* .[!.]* * cd /piglit if [ -n "$USE_CASELIST" ]; then @@ -178,7 +154,7 @@ PIGLIT_OPTIONS=$(printf "%s" "$PIGLIT_OPTIONS") PIGLIT_TESTS=$(printf "%s" "$PIGLIT_TESTS") -PIGLIT_CMD="./piglit run -l verbose --timeout 300 -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS replay "$(/usr/bin/printf "%q" "$RESULTS") +PIGLIT_CMD="./piglit run -l verbose --timeout 300 -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS replay "$(/usr/bin/printf "%q" "$RESULTS_DIR") RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $SANITY_MESA_VERSION_CMD && $HANG_DETECTION_CMD $PIGLIT_CMD" @@ -198,44 +174,65 @@ if [ -n "$PIGLIT_REPLAY_ANGLE_TAG" ]; then tar --zstd -xf ${FILE} -C replayer-db/angle/ fi +PIGLIT_RESULTS="${PIGLIT_RESULTS:-replay}" +RESULTSFILE="$RESULTS_DIR/$PIGLIT_RESULTS.txt" +mkdir -p .gitlab-ci/piglit + +uncollapsed_section_switch traces "traces: run traces" + if ! eval $RUN_CMD; then - printf "%s\n" "Found $(cat /tmp/version.txt), expected $MESA_VERSION" + error "Found $(cat /tmp/version.txt), expected $MESA_VERSION" fi -./piglit summary aggregate "$RESULTS" -o junit.xml -PIGLIT_RESULTS="${PIGLIT_RESULTS:-replay}" -RESULTSFILE="$RESULTS/$PIGLIT_RESULTS.txt" -mkdir -p .gitlab-ci/piglit -./piglit summary console "$RESULTS"/results.json.bz2 \ +./piglit summary aggregate "$RESULTS_DIR" -o junit.xml + +{ set +x; } 2>/dev/null +./piglit summary console "$RESULTS_DIR"/results.json.bz2 \ | tee ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig" \ | head -n -1 | grep -v ": pass" \ | sed '/^summary:/Q' \ > $RESULTSFILE +if [ -s $RESULTSFILE ]; then + error "Failures in traces:" + cat $RESULTSFILE + echo "Review the image changes and get the new checksums at: ${ARTIFACTS_BASE_URL}/results/summary/problems.html" + echo "If the new traces look correct to you, you can update the checksums" + echo "locally by running:" + echo " ./bin/ci/update_traces_checksum.sh" + echo "and resubmit this merge request." +fi + +section_switch test_post_process "traces: post-processing test results" + __PREFIX="trace/$PIGLIT_REPLAY_DEVICE_NAME" __S3_PATH="$PIGLIT_REPLAY_ARTIFACTS_BASE_URL" __S3_TRACES_PREFIX="traces" +set -x + if [ "$PIGLIT_REPLAY_SUBCOMMAND" != "profile" ]; then quiet replay_s3_upload_images fi if [ ! -s $RESULTSFILE ]; then + rm -rf "${RESULTS_DIR:?}/${__PREFIX}" + { set +x; } 2>/dev/null + section_end test_post_process exit 0 fi ./piglit summary html --exclude-details=pass \ -"$RESULTS"/summary "$RESULTS"/results.json.bz2 +"$RESULTS_DIR"/summary "$RESULTS_DIR"/results.json.bz2 -find "$RESULTS"/summary -type f -name "*.html" -print0 \ +find "$RESULTS_DIR"/summary -type f -name "*.html" -print0 \ | xargs -0 sed -i 's% artifacts/VERSION +cp -Rp .gitlab-ci/report-flakes.py artifacts/ +cp -Rp .gitlab-ci/setup-test-env.sh artifacts/ +cp -Rp .gitlab-ci/common artifacts/ci-common +cp -Rp .gitlab-ci/b2c artifacts/ +cp -Rp .gitlab-ci/bare-metal artifacts/ +cp -Rp .gitlab-ci/lava artifacts/ +cp -Rp .gitlab-ci/bin/*_logger.py artifacts/ + +mapfile -t duplicate_files < <( + find src/ -path '*/ci/*' \ + \( \ + -name '*.txt' \ + -o -name '*.toml' \ + -o -name '*traces*.yml' \ + \) \ + -exec basename -a {} + | sort | uniq -d +) +if [ ${#duplicate_files[@]} -gt 0 ]; then + echo 'Several files with the same name in various ci/ folders:' + printf -- ' %s\n' "${duplicate_files[@]}" + exit 1 +fi + +if [ -d "src/" ]; then + find src/ -path '*/ci/*' \ + \( \ + -name '*.txt' \ + -o -name '*.toml' \ + -o -name '*traces*.yml' \ + \) \ + -exec cp -p {} artifacts/ \; +fi +cp -Rp .gitlab-ci/*.txt artifacts/ + +if [ -n "$S3_ARTIFACT_NAME" ]; then + # Pass needed files to the test stage + S3_ARTIFACT_TAR="$S3_ARTIFACT_NAME.tar.zst" + tar cv artifacts/ | zstd -o "${S3_ARTIFACT_TAR}" + ci-fairy s3cp --token-file "${S3_JWT_FILE}" "${S3_ARTIFACT_TAR}" "https://${PIPELINE_ARTIFACTS_BASE}/${S3_ARTIFACT_TAR}" + rm "${S3_ARTIFACT_TAR}" +fi + +section_end prepare-artifacts diff --git a/mesalib/.gitlab-ci/prepare-artifacts.sh b/mesalib/.gitlab-ci/prepare-artifacts.sh index 2709c03f98..3fdadf5b1d 100644 --- a/mesalib/.gitlab-ci/prepare-artifacts.sh +++ b/mesalib/.gitlab-ci/prepare-artifacts.sh @@ -1,6 +1,9 @@ #!/usr/bin/env bash # shellcheck disable=SC2038 # TODO: rewrite the find # shellcheck disable=SC2086 # we want word splitting +# shellcheck disable=SC1091 # paths only become valid at runtime + +. "${SCRIPTS_DIR}/setup-test-env.sh" section_switch prepare-artifacts "artifacts: prepare" @@ -11,6 +14,7 @@ CROSS_FILE=/cross_file-"$CROSS".txt # Delete unused bin and includes from artifacts to save space. rm -rf install/bin install/include +rm -f install/lib/*.a # Strip the drivers in the artifacts to cut 80% of the artifacts size. if [ -n "$CROSS" ]; then @@ -38,15 +42,33 @@ cp -Rp .gitlab-ci/fossilize-runner.sh install/ cp -Rp .gitlab-ci/crosvm-init.sh install/ cp -Rp .gitlab-ci/*.txt install/ cp -Rp .gitlab-ci/report-flakes.py install/ -cp -Rp .gitlab-ci/vkd3d-proton install/ cp -Rp .gitlab-ci/setup-test-env.sh install/ cp -Rp .gitlab-ci/*-runner.sh install/ cp -Rp .gitlab-ci/bin/structured_logger.py install/ cp -Rp .gitlab-ci/bin/custom_logger.py install/ -find . -path \*/ci/\*.txt \ - -o -path \*/ci/\*.toml \ - -o -path \*/ci/\*traces\*.yml \ - | xargs -I '{}' cp -p '{}' install/ + +mapfile -t duplicate_files < <( + find src/ -path '*/ci/*' \ + \( \ + -name '*.txt' \ + -o -name '*.toml' \ + -o -name '*traces*.yml' \ + \) \ + -exec basename -a {} + | sort | uniq -d +) +if [ ${#duplicate_files[@]} -gt 0 ]; then + echo 'Several files with the same name in various ci/ folders:' + printf -- ' %s\n' "${duplicate_files[@]}" + exit 1 +fi + +find src/ -path '*/ci/*' \ + \( \ + -name '*.txt' \ + -o -name '*.toml' \ + -o -name '*traces*.yml' \ + \) \ + -exec cp -p {} install/ \; # Tar up the install dir so that symlinks and hardlinks aren't each # packed separately in the zip file. @@ -55,11 +77,12 @@ tar -cf artifacts/install.tar install cp -Rp .gitlab-ci/common artifacts/ci-common cp -Rp .gitlab-ci/lava artifacts/ cp -Rp .gitlab-ci/b2c artifacts/ +cp bin/ci/structured_logger.py artifacts/ if [ -n "$S3_ARTIFACT_NAME" ]; then # Pass needed files to the test stage S3_ARTIFACT_NAME="$S3_ARTIFACT_NAME.tar.zst" - zstd artifacts/install.tar -o ${S3_ARTIFACT_NAME} + zstd --quiet --threads ${FDO_CI_CONCURRENT:-0} artifacts/install.tar -o ${S3_ARTIFACT_NAME} ci-fairy s3cp --token-file "${S3_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${S3_ARTIFACT_NAME} fi diff --git a/mesalib/.gitlab-ci/run-pytest.sh b/mesalib/.gitlab-ci/run-pytest.sh new file mode 100644 index 0000000000..b5bbff8746 --- /dev/null +++ b/mesalib/.gitlab-ci/run-pytest.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: MIT +# © Collabora Limited +# Author: Guilherme Gallo + +# This script runs unit/integration tests related with LAVA CI tools +# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime. +# shellcheck disable=SC2086 # quoting PYTEST_VERBOSE makes us pass an empty path + +set -eu + +PYTHON_BIN="python3.11" + +if [ -z "${SCRIPTS_DIR:-}" ]; then + SCRIPTS_DIR="$(dirname "${0}")" +fi + +if [ -z "${CI_JOB_STARTED_AT:-}" ]; then + CI_JOB_STARTED_AT=$(date -u +%Y-%m-%dT%H:%M:%SZ) # isoformat +fi + +source "${SCRIPTS_DIR}/setup-test-env.sh" + +if [ -z "${CI_PROJECT_DIR:-}" ]; then + CI_PROJECT_DIR="$(dirname "${0}")/../" +fi + +if [ -z "${CI_JOB_TIMEOUT:-}" ]; then + # Export this default value, 1 hour in seconds, to test the lava job submitter + export CI_JOB_TIMEOUT=3600 +fi + +# If running outside of the debian/x86_64_pyutils container, +# run in a virtual environment for isolation +# e.g. USE_VENV=true ./.gitlab-ci/run-pytest.sh +if [ "${USE_VENV:-}" == true ]; then + echo "Setting up virtual environment for local testing." + MESA_PYTEST_VENV="${CI_PROJECT_DIR}/.venv-pytest" + ${PYTHON_BIN} -m venv "${MESA_PYTEST_VENV}" + source "${MESA_PYTEST_VENV}"/bin/activate + ${PYTHON_BIN} -m pip install --break-system-packages -r "${CI_PROJECT_DIR}/bin/ci/test/requirements.txt" +fi + +LIB_TEST_DIR=${CI_PROJECT_DIR}/.gitlab-ci/tests +SCRIPT_TEST_DIR=${CI_PROJECT_DIR}/bin/ci + +uncollapsed_section_start pytest "Running pytest" + +PYTHONPATH="${LIB_TEST_DIR}:${SCRIPT_TEST_DIR}:${PYTHONPATH:-}" ${PYTHON_BIN} -m \ + pytest "${LIB_TEST_DIR}" "${SCRIPT_TEST_DIR}" \ + -W ignore::DeprecationWarning \ + --junitxml=artifacts/ci_scripts_report.xml \ + -m 'not slow' \ + ${PYTEST_VERBOSE:-} + +section_end pytest + +section_start flake8 "flake8" +${PYTHON_BIN} -m flake8 \ +--config "${CI_PROJECT_DIR}/.gitlab-ci/.flake8" \ +"${LIB_TEST_DIR}" "${SCRIPT_TEST_DIR}" +section_end flake8 diff --git a/mesalib/.gitlab-ci/run-shader-db.sh b/mesalib/.gitlab-ci/run-shader-db.sh index 0db5af13ae..8a53c74b6b 100644 --- a/mesalib/.gitlab-ci/run-shader-db.sh +++ b/mesalib/.gitlab-ci/run-shader-db.sh @@ -1,6 +1,10 @@ #!/usr/bin/env bash +# shellcheck disable=SC1091 # paths only become valid at runtime + set -e +. "${SCRIPTS_DIR}/setup-test-env.sh" + ARTIFACTSDIR=$(pwd)/shader-db mkdir -p "$ARTIFACTSDIR" export DRM_SHIM_DEBUG=true @@ -19,14 +23,14 @@ for driver in freedreno intel lima v3d vc4; do done # Run shader-db over a number of supported chipsets for nouveau -#for chipset in 40 a3 c0 e4 f0 134 162; do -# section_start shader-db-nouveau-${chipset} "Running shader-db for nouveau - ${chipset}" -# env LD_PRELOAD="$LIBDIR/libnouveau_noop_drm_shim.so" \ -# NOUVEAU_CHIPSET=${chipset} \ -# ./run -j"${FDO_CI_CONCURRENT:-4}" ./shaders \ -# > "$ARTIFACTSDIR/nouveau-${chipset}-shader-db.txt" -# section_end shader-db-nouveau-${chipset} -#done +for chipset in 40 a3 c0 e4 f0 134 162; do + section_start shader-db-nouveau-${chipset} "Running shader-db for nouveau - ${chipset}" + env LD_PRELOAD="$LIBDIR/libnouveau_noop_drm_shim.so" \ + NOUVEAU_CHIPSET=${chipset} \ + ./run -j"${FDO_CI_CONCURRENT:-4}" ./shaders \ + > "$ARTIFACTSDIR/nouveau-${chipset}-shader-db.txt" + section_end shader-db-nouveau-${chipset} +done # Run shader-db for r300 (RV370 and RV515) for chipset in 0x5460 0x7140; do diff --git a/mesalib/.gitlab-ci/run-shellcheck.sh b/mesalib/.gitlab-ci/run-shellcheck.sh index 9691ccd38f..76dea93e4a 100644 --- a/mesalib/.gitlab-ci/run-shellcheck.sh +++ b/mesalib/.gitlab-ci/run-shellcheck.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -CHECKPATH=".gitlab-ci" +SCRIPTS_DIR="$(realpath "$(dirname "$0")")" is_bash() { [[ $1 == *.sh ]] && return 0 @@ -9,15 +9,14 @@ is_bash() { return 1 } +anyfailed=0 + while IFS= read -r -d $'' file; do if is_bash "$file" ; then - shellcheck -x -W0 -s bash "$file" - rc=$? - if [ "${rc}" -eq 0 ] - then - continue - else - exit 1 + if ! shellcheck "$file"; then + anyfailed=1 fi fi -done < <(find $CHECKPATH -type f \! -path "./.git/*" -print0) +done < <(find "$SCRIPTS_DIR" -type f \! -path "./.git/*" -print0) + +exit "$anyfailed" diff --git a/mesalib/.gitlab-ci/setup-test-env.sh b/mesalib/.gitlab-ci/setup-test-env.sh index 0cdd96ecff..d8af3c70cf 100644 --- a/mesalib/.gitlab-ci/setup-test-env.sh +++ b/mesalib/.gitlab-ci/setup-test-env.sh @@ -3,16 +3,28 @@ # shellcheck disable=SC2086 # we want word splitting # shellcheck disable=SC2155 # mktemp usually not failing -function x_off { +shopt -s expand_aliases + +function _x_store_state { if [[ "$-" == *"x"* ]]; then - state_x=1 - set +x + previous_state_x=1 else - state_x=0 + previous_state_x=0 fi } +_x_store_state +alias x_store_state='{ _x_store_state; } >/dev/null 2>/dev/null' -# TODO: implement x_on ! +function _x_off { + x_store_state + set +x +} +alias x_off='{ _x_off; } >/dev/null 2>/dev/null' + +function _x_restore { + [ $previous_state_x -eq 0 ] || set -x +} +alias x_restore='{ _x_restore; } >/dev/null 2>/dev/null' export JOB_START_S=$(date -u +"%s" -d "${CI_JOB_STARTED_AT:?}") @@ -22,23 +34,7 @@ function get_current_minsec { printf "%02d:%02d" $((CURR_TIME/60)) $((CURR_TIME%60)) } -function error { - x_off 2>/dev/null - RED="\e[0;31m" - ENDCOLOR="\e[0m" - # we force the following to be not in a section - section_end $CURRENT_SECTION - - CURR_MINSEC=$(get_current_minsec) - echo -e "\n${RED}[${CURR_MINSEC}] ERROR: $*${ENDCOLOR}\n" - [ "$state_x" -eq 0 ] || set -x -} - -function trap_err { - error ${CURRENT_SECTION:-'unknown-section'}: ret code: $* -} - -function build_section_start { +function _build_section_start { local section_params=$1 shift local section_name=$1 @@ -49,58 +45,100 @@ function build_section_start { CURR_MINSEC=$(get_current_minsec) echo -e "\n\e[0Ksection_start:$(date +%s):$section_name$section_params\r\e[0K${CYAN}[${CURR_MINSEC}] $*${ENDCOLOR}\n" + x_restore } +alias build_section_start="x_off; _build_section_start" -function section_start { - x_off 2>/dev/null +function _section_start { build_section_start "[collapsed=true]" $* - [ "$state_x" -eq 0 ] || set -x + x_restore +} +alias section_start="x_off; _section_start" + +function _uncollapsed_section_start { + build_section_start "" $* + x_restore } +alias uncollapsed_section_start="x_off; _uncollapsed_section_start" -function build_section_end { +function _build_section_end { echo -e "\e[0Ksection_end:$(date +%s):$1\r\e[0K" CURRENT_SECTION="" + x_restore } +alias build_section_end="x_off; _build_section_end" -function section_end { - x_off >/dev/null +function _section_end { build_section_end $* - [ "$state_x" -eq 0 ] || set -x + x_restore } +alias section_end="x_off; _section_end" -function section_switch { - x_off 2>/dev/null +function _section_switch { if [ -n "$CURRENT_SECTION" ] then - build_section_end $CURRENT_SECTION + build_section_end $CURRENT_SECTION + x_off fi build_section_start "[collapsed=true]" $* - [ "$state_x" -eq 0 ] || set -x + x_restore } +alias section_switch="x_off; _section_switch" -function uncollapsed_section_switch { - x_off 2>/dev/null +function _uncollapsed_section_switch { if [ -n "$CURRENT_SECTION" ] then - build_section_end $CURRENT_SECTION + build_section_end $CURRENT_SECTION + x_off fi build_section_start "" $* - [ "$state_x" -eq 0 ] || set -x + x_restore } +alias uncollapsed_section_switch="x_off; _uncollapsed_section_switch" -export -f x_off +export -f _x_store_state +export -f _x_off +export -f _x_restore export -f get_current_minsec -export -f error -export -f trap_err -export -f build_section_start -export -f section_start -export -f build_section_end -export -f section_end -export -f section_switch -export -f uncollapsed_section_switch +export -f _build_section_start +export -f _section_start +export -f _build_section_end +export -f _section_end +export -f _section_switch +export -f _uncollapsed_section_switch # Freedesktop requirement (needed for Wayland) -[ -n "${XDG_RUNTIME_DIR}" ] || export XDG_RUNTIME_DIR="$(mktemp -p "$PWD" -d xdg-runtime-XXXXXX)" +[ -n "${XDG_RUNTIME_DIR:-}" ] || export XDG_RUNTIME_DIR="$(mktemp -p "$PWD" -d xdg-runtime-XXXXXX)" + +if [ -z "${RESULTS_DIR:-}" ]; then + export RESULTS_DIR="${PWD%/}/results" + if [ -e "${RESULTS_DIR}" ]; then + rm -rf "${RESULTS_DIR}" + fi + mkdir -p "${RESULTS_DIR}" +fi + +function error { + RED="\e[0;31m" + ENDCOLOR="\e[0m" + # we force the following to be not in a section + if [ -n "${CURRENT_SECTION:-}" ]; then + section_end $CURRENT_SECTION + x_off + fi + + CURR_MINSEC=$(get_current_minsec) + echo -e "\n${RED}[${CURR_MINSEC}] ERROR: $*${ENDCOLOR}\n" + x_restore +} + +function trap_err { + x_off + error ${CURRENT_SECTION:-'unknown-section'}: ret code: $* +} + +export -f error +export -f trap_err set -E trap 'trap_err $?' ERR diff --git a/mesalib/.gitlab-ci/test-source-dep.yml b/mesalib/.gitlab-ci/test-source-dep.yml index fbcccebb9a..e1c897110a 100644 --- a/mesalib/.gitlab-ci/test-source-dep.yml +++ b/mesalib/.gitlab-ci/test-source-dep.yml @@ -6,6 +6,11 @@ rules: - if: &is-scheduled-pipeline '$CI_PIPELINE_SOURCE == "schedule"' when: on_success + +# Override of the `default: retry:` settings, which automatically retries jobs +# if one of the tests result didn't match its expectation; this override +# disables that, but keeps the auto-retry for infrastructure failures. +.no-auto-retry: retry: max: 1 # Don't retry on script_failure, job_execution_timeout, runner_unsupported, @@ -35,7 +40,7 @@ .restricted-rules: rules: # If the triggerer has access to the restricted traces and if it is pre-merge - - if: '($GITLAB_USER_LOGIN !~ "/^(robclark|anholt|flto|cwabbott0|Danil|tomeu|okias|gallo|kwg|llanderwelin|zmike|vigneshraman)$/") && + - if: '($GITLAB_USER_LOGIN !~ "/^(robclark|anholt|flto|cwabbott0|Danil|tomeu|okias|gallo|kwg|llanderwelin|zmike|vigneshraman|Valentine)$/") && ($GITLAB_USER_LOGIN != "marge-bot" || $CI_COMMIT_BRANCH)' when: never @@ -43,6 +48,7 @@ # --------------------------------------------------------------- .core-rules: rules: + - !reference [.common-rules, rules] - !reference [.no_scheduled_pipelines-rules, rules] - changes: &core_file_list - .gitlab-ci.yml @@ -56,7 +62,6 @@ - src/* - src/compiler/**/* - src/drm-shim/**/* - - src/gbm/**/* - src/gtest/**/* # Some src/util and src/compiler files use headers from mesa/ (e.g. # mtypes.h). We should clean that up. @@ -67,8 +72,9 @@ # Same core dependencies for doing manual runs. .core-manual-rules: - retry: !reference [.scheduled_pipeline-rules, retry] + extends: .no-auto-retry rules: + - !reference [.common-rules, rules] # We only want manual jobs to show up when it's not marge's pre-merge CI # run, otherwise she'll wait until her timeout. The exception is # performance jobs, see below. @@ -87,6 +93,7 @@ # Collabora triggers the manual job after merge to main. These "never" filters # need to come before any paths with "manual". .performance-rules: + stage: performance rules: - !reference [.no_scheduled_pipelines-rules, rules] # Run only on pre-merge pipelines from Marge @@ -114,7 +121,8 @@ # Always use the same device LAVA_TAGS: "cbg-0" # Ensure that we are using the release build artifact - S3_ARTIFACT_NAME: mesa-${ARCH}-default-release + LAVA_S3_ARTIFACT_NAME: mesa-${ARCH}-default-release + S3_ARTIFACT_NAME: mesa-python-ci-artifacts # Reset dependencies in performance jobs to enforce the release build artifact dependencies: null # Don't run in parallel. It is okay to performance jobs to take a little @@ -125,7 +133,7 @@ extends: - .piglit-performance-base needs: - - debian/arm64_test + - debian/baremetal_arm64_test - debian-arm64-release .piglit-performance:x86_64: @@ -156,6 +164,7 @@ - !reference [.core-rules, rules] - changes: &mesa_core_file_list - src/egl/**/* + - src/gbm/**/* - src/glx/**/* - src/loader/**/* - src/mapi/**/* @@ -174,7 +183,7 @@ - !reference [.gallium-core-rules, rules] .gl-manual-rules: - retry: !reference [.scheduled_pipeline-rules, retry] + extends: .no-auto-retry rules: - !reference [.core-manual-rules, rules] - changes: @@ -193,36 +202,13 @@ when: on_success .vulkan-manual-rules: - retry: !reference [.scheduled_pipeline-rules, retry] + extends: .no-auto-retry rules: - !reference [.core-manual-rules, rules] - changes: *vulkan_file_list when: manual -# Rules for unusual architectures that only build a subset of drivers -.ppc64el-rules: - rules: - - !reference [.never-post-merge-rules, rules] - - !reference [.no_scheduled_pipelines-rules, rules] - - !reference [.zink-common-rules, rules] - - !reference [.softpipe-rules, rules] - - !reference [.llvmpipe-rules, rules] - - !reference [.lavapipe-rules, rules] - - !reference [.radv-rules, rules] - - !reference [.radeonsi-rules, rules] - - !reference [.virgl-rules, rules] - - !reference [.nouveau-rules, rules] - -.s390x-rules: - rules: - - !reference [.never-post-merge-rules, rules] - - !reference [.no_scheduled_pipelines-rules, rules] - - !reference [.zink-common-rules, rules] - - !reference [.softpipe-rules, rules] - - !reference [.llvmpipe-rules, rules] - - !reference [.lavapipe-rules, rules] - # Rules for linters .lint-rustfmt-rules: rules: diff --git a/mesalib/.gitlab-ci/test/gitlab-ci.yml b/mesalib/.gitlab-ci/test/gitlab-ci.yml index 832bd3e713..902a795a58 100644 --- a/mesalib/.gitlab-ci/test/gitlab-ci.yml +++ b/mesalib/.gitlab-ci/test/gitlab-ci.yml @@ -17,6 +17,7 @@ paths: - results/ rules: + - !reference [.common-rules, rules] - !reference [.never-post-merge-rules, rules] .formatting-check: @@ -41,36 +42,42 @@ rustfmt: - shopt -s globstar - rustfmt --version - rustfmt --verbose src/**/lib.rs - - rustfmt --verbose src/**/main.rs -python-test: - # Cancel job if a newer commit is pushed to the same branch - interruptible: true - stage: code-validation +yaml-toml-shell-py-test: extends: - - .use-debian/x86_64_build - variables: - GIT_STRATEGY: fetch - timeout: 10m + - .use-debian/x86_64_pyutils + - .no-auto-retry # this job can't be flaky + stage: code-validation script: - - cd bin/ci - - pip install --break-system-packages -r test/requirements.txt - - PYTHONPATH=. pytest -v + - uncollapsed_section_start tomllint "tomllint" + - echo "If your change looks right but this script rejects it, contact @eric (GitLab) / eric_engestrom (IRC)." + - python3 bin/toml_lint.py + - section_end tomllint + - section_start yamllint "yamllint" + - .gitlab-ci/run-yamllint.sh + - section_end yamllint + - section_start shellcheck "shellcheck" + - .gitlab-ci/run-shellcheck.sh + - section_end shellcheck + - .gitlab-ci/run-pytest.sh rules: - !reference [.disable-farm-mr-rules, rules] - !reference [.never-post-merge-rules, rules] - - if: $CI_PIPELINE_SOURCE == "schedule" - when: on_success - - if: $CI_PIPELINE_SOURCE == "push" && $CI_PROJECT_NAMESPACE == "mesa" && $GITLAB_USER_LOGIN != "marge-bot" - when: on_success + - !reference [.no_scheduled_pipelines-rules, rules] - if: $GITLAB_USER_LOGIN == "marge-bot" - changes: &bin_ci_files - - .gitlab-ci.yml - - .gitlab-ci/**/* + changes: &lint_files + - .gitlab-ci/test/gitlab-ci.yml + - .gitlab-ci/**/*.sh + - .shellcheckrc + - bin/toml_lint.py + - src/**/ci/*.toml + - .gitlab-ci/tests/**/* - bin/ci/**/* when: on_success - - changes: *bin_ci_files + - changes: *lint_files when: manual + tags: + - placeholder-job .test-gl: extends: @@ -107,7 +114,9 @@ python-test: - .test - .use-debian/x86_64_test-android variables: - S3_ARTIFACT_NAME: mesa-x86_64-android-debug + # This is for the guest artifacts from debian-android which will be + # downloaded explicitly by cuttlefish-runner.sh + S3_ANDROID_ARTIFACT_NAME: mesa-x86_64-android-debug needs: - job: debian-testing artifacts: true # On the host we want the Linux build @@ -124,14 +133,8 @@ python-test: - results/ .b2c-vkd3d-proton-test: - artifacts: - when: on_failure - name: "mesa_${CI_JOB_NAME}" - paths: - - results/vkd3d-proton.log variables: - HWCI_TEST_SCRIPT: ./install/vkd3d-proton/run.sh - B2C_JOB_SUCCESS_REGEX: 'vkd3d-proton execution: SUCCESS\r$' + HWCI_TEST_SCRIPT: install/vkd3d-runner.sh .piglit-traces-test: artifacts: @@ -211,12 +214,12 @@ python-test: .baremetal-test-arm32: extends: - .baremetal-test - - .use-debian/arm32_test + - .use-debian/baremetal_arm32_test variables: DEBIAN_ARCH: armhf S3_ARTIFACT_NAME: mesa-arm32-default-debugoptimized needs: - - debian/arm32_test + - debian/baremetal_arm32_test - job: debian-arm32 artifacts: false - !reference [.required-for-hardware-jobs, needs] @@ -225,43 +228,49 @@ python-test: .baremetal-test-arm64: extends: - .baremetal-test - - .use-debian/arm64_test + - .use-debian/baremetal_arm64_test variables: DEBIAN_ARCH: arm64 S3_ARTIFACT_NAME: mesa-arm64-default-debugoptimized needs: - - debian/arm64_test + - debian/baremetal_arm64_test - job: debian-arm64 artifacts: false - !reference [.required-for-hardware-jobs, needs] # ARM32/64 testing of bare-metal boards attached to an x86 gitlab-runner system, using an asan mesa build .baremetal-arm32-asan-test: - extends: - - .baremetal-test - - .use-debian/arm32_test variables: - DEQP_RUNNER_OPTIONS: "--env LD_PRELOAD=libasan.so.8:/install/lib/libdlclose-skip.so" S3_ARTIFACT_NAME: mesa-arm32-asan-debugoptimized + DEQP_FORCE_ASAN: 1 needs: - - debian/arm32_test + - debian/baremetal_arm32_test - job: debian-arm32-asan artifacts: false - !reference [.required-for-hardware-jobs, needs] .baremetal-arm64-asan-test: - extends: - - .baremetal-test - - .use-debian/arm64_test variables: - DEQP_RUNNER_OPTIONS: "--env LD_PRELOAD=libasan.so.8:/install/lib/libdlclose-skip.so" S3_ARTIFACT_NAME: mesa-arm64-asan-debugoptimized + DEQP_FORCE_ASAN: 1 needs: - - debian/arm64_test + - debian/baremetal_arm64_test - job: debian-arm64-asan artifacts: false - !reference [.required-for-hardware-jobs, needs] +.baremetal-arm64-ubsan-test: + extends: + - .baremetal-test + - .use-debian/baremetal_arm64_test + variables: + S3_ARTIFACT_NAME: mesa-arm64-ubsan-debugoptimized + needs: + - debian/baremetal_arm64_test + - job: debian-arm64-ubsan + artifacts: false + - !reference [.required-for-hardware-jobs, needs] + .baremetal-deqp-test: variables: HWCI_TEST_SCRIPT: "/install/deqp-runner.sh" @@ -285,10 +294,8 @@ python-test: # No need by default to pull the whole repo GIT_STRATEGY: none # boot2container initrd configuration parameters. - B2C_KERNEL_URL: 'https://gitlab.freedesktop.org/gfx-ci/ci-tron/-/package_files/519/download' # Linux 6.1 - B2C_INITRAMFS_URL: 'https://gitlab.freedesktop.org/mupuf/boot2container/-/releases/v0.9.10/downloads/initramfs.linux_amd64.cpio.xz' - B2C_JOB_SUCCESS_REGEX: '\[.*\]: Execution is over, pipeline status: 0\r$' - B2C_JOB_WARN_REGEX: '\*ERROR\* ring .* timeout' + B2C_VERSION: v0.9.14 + B2C_JOB_SUCCESS_REGEX: 'hwci: mesa: pass, exit_code: 0\r$' B2C_LOG_LEVEL: 6 B2C_POWEROFF_DELAY: 15 B2C_SESSION_END_REGEX: '^.*It''s now safe to turn off your computer\r$' @@ -322,6 +329,8 @@ python-test: - | set -eux + section_start b2c_kernel_boot "Booting hardware device" + # Useful as a hook point for runner admins. You may edit the # config.toml for the Gitlab runner and use a bind-mount to # populate the hook script with some executable commands. This @@ -381,7 +390,7 @@ python-test: after_script: # Keep the results path the same as baremetal and LAVA - mkdir -p "${JOB_FOLDER}"/results - - mv "${JOB_FOLDER}"/results results/ + - mv "${JOB_FOLDER}"/results ./ - !reference [default, after_script] artifacts: @@ -392,10 +401,17 @@ python-test: reports: junit: results/**/junit.xml +.b2c-x86_64-test: + extends: + - .b2c-test + variables: + B2C_KERNEL_URL: 'https://gitlab.freedesktop.org/gfx-ci/ci-tron/-/package_files/519/download' # Linux 6.1 + B2C_INITRAMFS_URL: 'https://gitlab.freedesktop.org/gfx-ci/boot2container/-/releases/${B2C_VERSION}/downloads/initramfs.linux_amd64.cpio.xz' + .b2c-x86_64-test-vk: extends: - .use-debian/x86_64_test-vk - - .b2c-test + - .b2c-x86_64-test needs: - debian/x86_64_test-vk - debian-testing @@ -404,8 +420,33 @@ python-test: .b2c-x86_64-test-gl: extends: - .use-debian/x86_64_test-gl - - .b2c-test + - .b2c-x86_64-test needs: - debian/x86_64_test-gl - debian-testing - !reference [.required-for-hardware-jobs, needs] + +.b2c-arm64-test: + extends: + - .b2c-test + variables: + B2C_INITRAMFS_URL: 'https://gitlab.freedesktop.org/gfx-ci/boot2container/-/releases/${B2C_VERSION}/downloads/initramfs.linux_arm64.cpio.xz' + B2C_KERNEL_URL: 'https://gitlab.freedesktop.org/gfx-ci/boot2container/-/releases/${B2C_VERSION}/downloads/linux-arm64' + +.b2c-arm64-test-vk: + extends: + - .use-debian/arm64_test-vk + - .b2c-arm64-test + needs: + - debian/arm64_test-vk + - debian-arm64 + - !reference [.required-for-hardware-jobs, needs] + +.b2c-arm64-test-gl: + extends: + - .use-debian/arm64_test-gl + - .b2c-arm64-test + needs: + - debian/arm64_test-gl + - debian-arm64 + - !reference [.required-for-hardware-jobs, needs] diff --git a/mesalib/.gitlab-ci/vkd3d-proton/run.sh b/mesalib/.gitlab-ci/vkd3d-proton/run.sh deleted file mode 100644 index 174c0cce80..0000000000 --- a/mesalib/.gitlab-ci/vkd3d-proton/run.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2035 # FIXME glob - -set -ex - -if [[ -z "$VK_DRIVER" ]]; then - exit 1 -fi - -INSTALL=$(realpath -s "$PWD"/install) - -RESULTS=$(realpath -s "$PWD"/results) - -# Set up the driver environment. -# Modifiying here directly LD_LIBRARY_PATH may cause problems when -# using a command wrapper. Hence, we will just set it when running the -# command. -export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/:/vkd3d-proton-tests/x64/" - - -# Sanity check to ensure that our environment is sufficient to make our tests -# run against the Mesa built by CI, rather than any installed distro version. -MESA_VERSION=$(cat "$INSTALL/VERSION") - -# Set the Vulkan driver to use. -ARCH=$(uname -m) -export VK_DRIVER_FILES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.$ARCH.json" - -# Set environment for Wine. -export WINEDEBUG="-all" -export WINEPREFIX="/vkd3d-proton-wine64" -export WINEESYNC=1 - -# wrapper to supress +x to avoid spamming the log -quiet() { - set +x - "$@" - set -x -} - -set +e -if ! vulkaninfo | tee /tmp/version.txt | grep -F "Mesa $MESA_VERSION"; -then - printf "%s\n" "Found $(cat /tmp/version.txt), expected $MESA_VERSION" - exit 1 -fi -set -e - -if [ -d "$RESULTS" ]; then - cd "$RESULTS" && rm -rf ..?* .[!.]* * && cd - -else - mkdir "$RESULTS" -fi - -quiet printf "%s\n" "Running vkd3d-proton testsuite..." - -set +e -if ! /vkd3d-proton-tests/x64/bin/d3d12 > "$RESULTS/vkd3d-proton.log"; -then - # Check if the executable finished (ie. no segfault). - if ! grep "tests executed" "$RESULTS/vkd3d-proton.log" > /dev/null; then - error printf "%s\n" "Failed, see vkd3d-proton.log!" - exit 1 - fi - - # Collect all the failures - RESULTSFILE="$RESULTS/$VKD3D_PROTON_RESULTS.txt" - mkdir -p .gitlab-ci/vkd3d-proton - grep "Test failed" "$RESULTS"/vkd3d-proton.log > "$RESULTSFILE" - - # Gather the list expected failures - if [ -f "$INSTALL/$VKD3D_PROTON_RESULTS-vkd3d.txt" ]; then - cp "$INSTALL/$VKD3D_PROTON_RESULTS-vkd3d.txt" \ - ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline" - else - touch ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline" - fi - - # Make sure that the failures found in this run match the current expectation - if ! diff -q ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline" "$RESULTSFILE"; then - error printf "%s\n" "Changes found, see vkd3d-proton.log!" - quiet diff --color=always -u ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline" "$RESULTSFILE" - exit 1 - fi -fi - -printf "%s\n" "vkd3d-proton execution: SUCCESS" - -exit 0 diff --git a/mesalib/.gitlab-ci/vkd3d-runner.sh b/mesalib/.gitlab-ci/vkd3d-runner.sh new file mode 100644 index 0000000000..36737e18f6 --- /dev/null +++ b/mesalib/.gitlab-ci/vkd3d-runner.sh @@ -0,0 +1,165 @@ +#!/usr/bin/env bash +# shellcheck disable=SC1091 # paths only become valid at runtime + +. "${SCRIPTS_DIR}/setup-test-env.sh" + +set -e + +comma_separated() { + local IFS=, + echo "$*" +} + +if [[ -z "$VK_DRIVER" ]]; then + printf "VK_DRIVER is not defined\n" + exit 1 +fi + +INSTALL=$(realpath -s "$PWD"/install) + +# Set up the driver environment. +# Modifiying here directly LD_LIBRARY_PATH may cause problems when +# using a command wrapper. Hence, we will just set it when running the +# command. +export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/:/vkd3d-proton-tests/x64/" + + +# Set the Vulkan driver to use. +ARCH=$(uname -m) +export VK_DRIVER_FILES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.$ARCH.json" + +# Set environment for Wine. +export WINEDEBUG="-all" +export WINEPREFIX="/vkd3d-proton-wine64" +export WINEESYNC=1 + +if [ -f "$INSTALL/$GPU_VERSION-vkd3d-skips.txt" ]; then + mapfile -t skips < <(grep -vE '^#|^$' "$INSTALL/$GPU_VERSION-vkd3d-skips.txt") + VKD3D_TEST_EXCLUDE=$(comma_separated "${skips[@]}") + printf 'VKD3D_TEST_EXCLUDE=%s\n' "$VKD3D_TEST_EXCLUDE" + export VKD3D_TEST_EXCLUDE +fi + +# Sanity check to ensure that our environment is sufficient to make our tests +# run against the Mesa built by CI, rather than any installed distro version. +MESA_VERSION=$(cat "$INSTALL/VERSION") +if ! vulkaninfo | grep driverInfo | tee /tmp/version.txt | grep -F "Mesa $MESA_VERSION"; then + printf "%s\n" "Found $(cat /tmp/version.txt), expected $MESA_VERSION" + exit 1 +fi + +# Gather the list expected failures +EXPECTATIONFILE="$RESULTS_DIR/$GPU_VERSION-vkd3d-fails.txt" +if [ -f "$INSTALL/$GPU_VERSION-vkd3d-fails.txt" ]; then + grep -vE '^(#|$)' "$INSTALL/$GPU_VERSION-vkd3d-fails.txt" | sort > "$EXPECTATIONFILE" +else + printf "%s\n" "$GPU_VERSION-vkd3d-fails.txt not found, assuming a \"no failures\" baseline." + touch "$EXPECTATIONFILE" +fi + +if [ -f "$INSTALL/$GPU_VERSION-vkd3d-flakes.txt" ]; then + mapfile -t flakes < <(grep -vE '^#|^$' "$INSTALL/$GPU_VERSION-vkd3d-flakes.txt") +else + flakes=() +fi + +# Some sanity checks before we start +mapfile -t flakes_dups < <( + [ ${#flakes[@]} -eq 0 ] || + printf '%s\n' "${flakes[@]}" | sort | uniq -d +) +if [ ${#flakes_dups[@]} -gt 0 ]; then + printf >&2 'Duplicate flakes lines:\n' + printf >&2 ' %s\n' "${flakes_dups[@]}" + exit 1 +fi + +flakes_in_baseline=() +for flake in "${flakes[@]}"; do + if grep -qF "$flake" "$EXPECTATIONFILE"; then + flakes_in_baseline+=("$flake") + fi +done +if [ ${#flakes_in_baseline[@]} -gt 0 ]; then + printf >&2 "Flakes found in %s:\n" "$EXPECTATIONFILE" + printf >&2 ' %s\n' "${flakes_in_baseline[@]}" + exit 1 +fi + +printf "%s\n" "Running vkd3d-proton testsuite..." + +LOGFILE="$RESULTS_DIR/vkd3d-proton-log.txt" +TEST_LOGS="$RESULTS_DIR/test-logs" +(cd /vkd3d-proton-tests && tests/test-runner.sh x64/bin/d3d12 --jobs "${FDO_CI_CONCURRENT:-4}" --output-dir "$TEST_LOGS" | tee "$LOGFILE") + +printf '\n\n' + +# Check if the executable finished (ie. no segfault). +if ! grep -E "^Finished" "$LOGFILE" > /dev/null; then + error "Failed, see ${ARTIFACTS_BASE_URL}/results/vkd3d-proton-log.txt" + exit 1 +fi + +# Print list of flakes seen this time +flakes_seen=() +for flake in "${flakes[@]}"; do + if grep -qF "FAILED $flake" "$LOGFILE"; then + flakes_seen+=("$flake") + fi +done +if [ ${#flakes_seen[@]} -gt 0 ]; then + # Keep this string and output format in line with the corresponding + # deqp-runner message + printf >&2 '\nSome known flakes found:\n' + printf >&2 ' %s\n' "${flakes_seen[@]}" +fi + +# Collect all the failures +mapfile -t fails < <(grep -oE "^FAILED .+$" "$LOGFILE" | cut -d' ' -f2 | sort) + +# Save test output for failed tests (before excluding flakes) +for failed_test in "${fails[@]}"; do + cp "$TEST_LOGS/$failed_test.log" "$RESULTS/$failed_test.log" +done + +# Ignore flakes when comparing +for flake in "${flakes[@]}"; do + for idx in "${!fails[@]}"; do + grep -qF "$flake" <<< "${fails[$idx]}" && unset -v 'fails[$idx]' + done +done + +RESULTSFILE="$RESULTS/$GPU_VERSION.txt" +for failed_test in "${fails[@]}"; do + if ! grep -qE "$failed_test end" "$RESULTS/$failed_test.log"; then + test_status=Crash + elif grep -qE "Test failed:" "$RESULTS/$failed_test.log"; then + test_status=Fail + else + test_status=Unknown + fi + printf '%s,%s\n' "$failed_test" "$test_status" +done > "$RESULTSFILE" + +# Catch tests listed but not executed or not failing +mapfile -t expected_fail_lines < "$EXPECTATIONFILE" +for expected_fail_line in "${expected_fail_lines[@]}"; do + test_name=$(cut -d, -f1 <<< "$expected_fail_line") + if [ ! -f "$TEST_LOGS/$test_name.log" ]; then + test_status='UnexpectedImprovement(Skip)' + elif [ ! -f "$RESULTS/$test_name.log" ]; then + test_status='UnexpectedImprovement(Pass)' + else + continue + fi + printf '%s,%s\n' "$test_name" "$test_status" +done >> "$RESULTSFILE" + +mapfile -t unexpected_results < <(comm -23 "$RESULTSFILE" "$EXPECTATIONFILE") +if [ ${#unexpected_results[@]} -gt 0 ]; then + printf >&2 '\nUnexpected results:\n' + printf >&2 ' %s\n' "${unexpected_results[@]}" + exit 1 +fi + +exit 0 diff --git a/mesalib/.gitlab-ci/windows/deqp_runner_run.ps1 b/mesalib/.gitlab-ci/windows/deqp_runner_run.ps1 index 136cf8e377..53d708ecdf 100644 --- a/mesalib/.gitlab-ci/windows/deqp_runner_run.ps1 +++ b/mesalib/.gitlab-ci/windows/deqp_runner_run.ps1 @@ -31,6 +31,7 @@ $template = "See $($env:ARTIFACTS_BASE_URL)/results/{{testcase}}.xml" deqp-runner junit --testsuite dEQP --results "$($results)/failures.csv" --output "$($results)/junit.xml" --limit 50 --template $template Copy-Item -Path "C:\deqp\testlog.css" -Destination $($results) Copy-Item -Path "C:\deqp\testlog.xsl" -Destination $($results) +Remove-Item -Path "$($results)/*.shader_cache" if (!$deqpstatus) { Exit 1 diff --git a/mesalib/.gitlab-ci/windows/mesa_build.ps1 b/mesalib/.gitlab-ci/windows/mesa_build.ps1 index ee93ff74f0..8711e3f005 100644 --- a/mesalib/.gitlab-ci/windows/mesa_build.ps1 +++ b/mesalib/.gitlab-ci/windows/mesa_build.ps1 @@ -56,7 +56,6 @@ meson setup ` -Dgles2=enabled ` -Dgallium-opencl=icd ` -Dgallium-rusticl=false ` --Dopencl-spirv=true ` -Dmicrosoft-clc=enabled ` -Dstatic-libclc=all ` -Dspirv-to-dxil=true ` diff --git a/mesalib/.gitlab-ci/windows/mesa_deps_build.ps1 b/mesalib/.gitlab-ci/windows/mesa_deps_build.ps1 index 3c83cc8611..c50299f9a9 100644 --- a/mesalib/.gitlab-ci/windows/mesa_deps_build.ps1 +++ b/mesalib/.gitlab-ci/windows/mesa_deps_build.ps1 @@ -12,7 +12,7 @@ $depsInstallPath="C:\mesa-deps" Get-Date Write-Host "Cloning DirectX-Headers" -git clone -b v1.613.1 --depth=1 https://github.com/microsoft/DirectX-Headers deps/DirectX-Headers +git clone -b v1.614.1 --depth=1 https://github.com/microsoft/DirectX-Headers deps/DirectX-Headers if (!$?) { Write-Host "Failed to clone DirectX-Headers repository" Exit 1 @@ -20,7 +20,7 @@ if (!$?) { Write-Host "Building DirectX-Headers" $dxheaders_build = New-Item -ItemType Directory -Path ".\deps\DirectX-Headers" -Name "build" Push-Location -Path $dxheaders_build.FullName -meson .. --backend=ninja -Dprefix="$depsInstallPath" --buildtype=release -Db_vscrt=mt && ` +meson setup .. --backend=ninja -Dprefix="$depsInstallPath" --buildtype=release -Db_vscrt=mt && ` ninja -j32 install $buildstatus = $? Pop-Location @@ -45,7 +45,7 @@ robocopy deps/zlib/zlib-1.3.1 deps/zlib /E Remove-Item -Recurse -Force -ErrorAction SilentlyContinue -Path deps/zlib/zlib-1.3.1 $zlib_build = New-Item -ItemType Directory -Path ".\deps\zlib" -Name "build" Push-Location -Path $zlib_build.FullName -meson .. --backend=ninja -Dprefix="$depsInstallPath" --default-library=static --buildtype=release -Db_vscrt=mt && ` +meson setup .. --backend=ninja -Dprefix="$depsInstallPath" --default-library=static --buildtype=release -Db_vscrt=mt && ` ninja -j32 install $buildstatus = $? Pop-Location @@ -96,6 +96,7 @@ cmake ../llvm ` -DLLVM_ENABLE_DIA_SDK=OFF ` -DCLANG_BUILD_TOOLS=ON ` -DLLVM_SPIRV_INCLUDE_TESTS=OFF ` +-DLLVM_ENABLE_ZLIB=OFF ` -Wno-dev && ` ninja -j32 install $buildstatus = $? diff --git a/mesalib/.gitlab-ci/windows/mesa_deps_choco.ps1 b/mesalib/.gitlab-ci/windows/mesa_deps_choco.ps1 index d2cbeb6960..b3481de67d 100644 --- a/mesalib/.gitlab-ci/windows/mesa_deps_choco.ps1 +++ b/mesalib/.gitlab-ci/windows/mesa_deps_choco.ps1 @@ -68,7 +68,7 @@ Get-Date python -m pip install --upgrade pip --progress-bar off Write-Host "Installing python packages at:" Get-Date -pip3 install packaging meson mako numpy --progress-bar off +pip3 install packaging meson mako "numpy < 2.0" pyyaml --progress-bar off if (!$?) { Write-Host "Failed to install dependencies from pip" Exit 1 diff --git a/mesalib/.gitlab-ci/windows/mesa_deps_d3d.ps1 b/mesalib/.gitlab-ci/windows/mesa_deps_d3d.ps1 index d25b0ab48c..f76641967a 100644 --- a/mesalib/.gitlab-ci/windows/mesa_deps_d3d.ps1 +++ b/mesalib/.gitlab-ci/windows/mesa_deps_d3d.ps1 @@ -8,7 +8,7 @@ $depsInstallPath="C:\mesa-deps" Write-Host "Downloading DirectX 12 Agility SDK at:" Get-Date -Invoke-WebRequest -Uri https://www.nuget.org/api/v2/package/Microsoft.Direct3D.D3D12/1.613.2 -OutFile 'agility.zip' +Invoke-WebRequest -Uri https://www.nuget.org/api/v2/package/Microsoft.Direct3D.D3D12/1.614.1 -OutFile 'agility.zip' Expand-Archive -Path 'agility.zip' -DestinationPath 'C:\agility' # Copy Agility SDK into mesa-deps\bin\D3D12 New-Item -ErrorAction SilentlyContinue -ItemType Directory -Path $depsInstallPath\bin -Name 'D3D12' diff --git a/mesalib/.gitlab-ci/windows/mesa_deps_libva.ps1 b/mesalib/.gitlab-ci/windows/mesa_deps_libva.ps1 index e6b9fb696a..8c4628897e 100644 --- a/mesalib/.gitlab-ci/windows/mesa_deps_libva.ps1 +++ b/mesalib/.gitlab-ci/windows/mesa_deps_libva.ps1 @@ -29,7 +29,7 @@ Pop-Location # libva already has a build dir in their repo, use builddir instead $libva_build = New-Item -ItemType Directory -Path ".\deps\libva" -Name "builddir" Push-Location -Path $libva_build.FullName -meson .. -Dprefix="$depsInstallPath" +meson setup .. -Dprefix="$depsInstallPath" ninja -j32 install $buildstatus = $? Pop-Location @@ -65,7 +65,7 @@ Write-Host "Building libva-utils" # libva-utils already has a build dir in their repo, use builddir instead $libva_utils_build = New-Item -ItemType Directory -Path ".\deps\libva-utils" -Name "builddir" Push-Location -Path $libva_utils_build.FullName -meson .. -Dprefix="$depsInstallPath" --pkg-config-path="$depsInstallPath\lib\pkgconfig;$depsInstallPath\share\pkgconfig" +meson setup .. -Dprefix="$depsInstallPath" --pkg-config-path="$depsInstallPath\lib\pkgconfig;$depsInstallPath\share\pkgconfig" ninja -j32 install $buildstatus = $? Pop-Location diff --git a/mesalib/.gitlab-ci/windows/mesa_deps_test_deqp.ps1 b/mesalib/.gitlab-ci/windows/mesa_deps_test_deqp.ps1 index 503803c285..06edbf2ad8 100644 --- a/mesalib/.gitlab-ci/windows/mesa_deps_test_deqp.ps1 +++ b/mesalib/.gitlab-ci/windows/mesa_deps_test_deqp.ps1 @@ -14,7 +14,7 @@ New-Item -ItemType Directory -Path "$deqp_source" | Out-Null Push-Location -Path $deqp_source git init git remote add origin https://github.com/KhronosGroup/VK-GL-CTS.git -git fetch --depth 1 origin 56114106d860c121cd6ff0c3b926ddc50c4c11fd # of branch vulkan-cts-1.3.4 +git fetch --depth 1 origin d48899f85b486a70d090af59a1453763458611d9 # of branch vulkan-cts-1.3.8 if (!$?) { Write-Host "Failed to fetch deqp repository" Pop-Location diff --git a/mesalib/.gitlab-ci/windows/mesa_init_msvc.ps1 b/mesalib/.gitlab-ci/windows/mesa_init_msvc.ps1 index 01aadb648d..a8975b52fa 100644 --- a/mesalib/.gitlab-ci/windows/mesa_init_msvc.ps1 +++ b/mesalib/.gitlab-ci/windows/mesa_init_msvc.ps1 @@ -11,7 +11,7 @@ Import-Module (Join-Path $vsInstallPath "Common7\Tools\Microsoft.VisualStudio.De $vcvars_ver_arg=$args if ($null -eq $vcvars_ver_arg[0]) { - $vcvars_ver_arg="-vcvars_ver=14.29" + $vcvars_ver_arg="-vcvars_ver=14" } Enter-VsDevShell -VsInstallPath $vsInstallPath -SkipAutomaticLocation -DevCmdArguments "$vcvars_ver_arg -arch=x64 -no_logo -host_arch=amd64" diff --git a/mesalib/.mailmap b/mesalib/.mailmap index 9ddcd147e9..da782338b4 100644 --- a/mesalib/.mailmap +++ b/mesalib/.mailmap @@ -84,6 +84,8 @@ Ben Widawsky Ben Widawsky Blair Sadewitz Blair Sadewitz +Bob Beckett + Boris Brezillon Boris Peterbarg reist @@ -157,6 +159,8 @@ Christopher James Halse Rogers Christop Christopher Li Chris Li Christopher Li Qicheng Christopher Li +Christopher Michael + Claudio Ciccani Claudio Ciccani @@ -168,7 +172,7 @@ Colin McDonald Connor Abbott Connor Abbott -Konstantin Kharlamov +Constantine Shablia Corbin Simpson Corbin Simpson @@ -192,6 +196,7 @@ David Miller davem69 David Heidelberg David Heidelberger David Heidelberg +David Heidelberg David Reveman @@ -231,7 +236,7 @@ Eric Engestrom Eric Engestrom Eric Engestrom -Erik Faye-Lund +Erik Faye-Lund Eugeni Dodonov @@ -251,6 +256,7 @@ Freya Gentz George Sapountzis George Sapountzis Gert Wollny +Gert Wollny Gurchetan Singh @@ -372,6 +378,9 @@ Karl Schultz Karl Schultz Karl Schultz +Karmjit Mahil +Karmjit Mahil + Karol Herbst Karol Herbst Karol Herbst @@ -386,6 +395,8 @@ Keith Whitwell keithw Kevin Rogovin +Konstantin Kharlamov + Kristian Høgsberg Kristian Høgsberg Kristian Høgsberg @@ -575,6 +586,9 @@ Robert Hooker Rodrigo Vivi +Rohan Garg +Rohan Garg + Roland Scheidegger Roland Scheidegger @@ -658,7 +672,7 @@ Tom Stellard Tomasz Figa -Tomeu Vizoso +Tomeu Vizoso Topi Pohjolainen diff --git a/mesalib/.marge/hooks/pipeline_message.py b/mesalib/.marge/hooks/pipeline_message.py new file mode 100644 index 0000000000..13527409f1 --- /dev/null +++ b/mesalib/.marge/hooks/pipeline_message.py @@ -0,0 +1,377 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: MIT + +# Provide a markdown-formatted message summarizing the reasons why a pipeline failed. +# Marge bot can use this script to provide more helpful comments when CI fails. +# Example for running locally: +# ./bin/ci/pipeline_message.sh --project-id 176 --pipeline-id 1310098 + + +import argparse +import asyncio +import logging +from typing import Any + +import aiohttp + +PER_PAGE: int = 6000 + + +async def get_pipeline_status( + session: aiohttp.ClientSession, project_id: str, pipeline_id: str +): + url = f"https://gitlab.freedesktop.org/api/v4/projects/{project_id}/pipelines/{pipeline_id}" + logging.info(f"Fetching pipeline status from {url}") + async with session.get(url) as response: + response.raise_for_status() + pipeline_details = await response.json() + return pipeline_details.get("status") + + +async def get_jobs_for_pipeline( + session: aiohttp.ClientSession, project_id: str, pipeline_id: str +): + url = f"https://gitlab.freedesktop.org/api/v4/projects/{project_id}/pipelines/{pipeline_id}/jobs" + logging.info(url) + jobs = [] + params = {"per_page": PER_PAGE} + async with session.get(url, params=params) as response: + response.raise_for_status() + jobs = await response.json() + return jobs + + +def get_problem_jobs(jobs: list[dict[str, Any]]): + ignore_stage_list = [ + "postmerge", + "performance", + ] + problem_jobs = [] + for job in jobs: + if any(ignore.lower() in job["stage"] for ignore in ignore_stage_list): + continue + if job["status"] in {"failed", "canceled"}: + problem_jobs.append(job) + return problem_jobs + + +def unexpected_improvements(failed_test_array): + if failed_test_array["unexpected_improvements"]: + unexpected_improvements_count = len( + failed_test_array["unexpected_improvements"] + ) + return f" {unexpected_improvements_count} improved test{'s' if unexpected_improvements_count != 1 else ''}" + return "" + + +def fails(failed_test_array): + if failed_test_array["fails"]: + fails_count = len(failed_test_array["fails"]) + return f" {fails_count} failed test{'s' if fails_count != 1 else ''}" + return "" + + +def crashes(failed_test_array): + if failed_test_array["crashes"]: + crash_count = len(failed_test_array["crashes"]) + return f" {crash_count} crashed test{'s' if crash_count != 1 else ''}" + return "" + + +def get_failed_test_details(failed_test_array): + message = "" + max_tests_to_display = 5 + + if failed_test_array["unexpected_improvements"]: + for i, test in enumerate(failed_test_array["unexpected_improvements"]): + if i > max_tests_to_display: + message += " \nand more...
" + break + message += f"{test}
" + + if failed_test_array["fails"]: + for i, test in enumerate(failed_test_array["fails"]): + if i > max_tests_to_display: + message += " \nand more...
" + break + message += f"{test}
" + + if failed_test_array["crashes"]: + for i, test in enumerate(failed_test_array["crashes"]): + if i > max_tests_to_display: + message += " \nand more...
" + break + message += f"{test}
" + + return message + + +def get_failed_test_summary_message(failed_test_array): + summary_msg = "" + summary_msg += unexpected_improvements(failed_test_array) + summary_msg += fails(failed_test_array) + summary_msg += crashes(failed_test_array) + summary_msg += "" + return summary_msg + + +def sort_failed_tests_by_status(failures_csv): + failed_test_array = { + "unexpected_improvements": [], + "fails": [], + "crashes": [], + "timeouts": [], + } + + for test in failures_csv.splitlines(): + if "UnexpectedImprovement" in test: + failed_test_array["unexpected_improvements"].append(test) + elif "Fail" in test: + failed_test_array["fails"].append(test) + elif "Crash" in test: + failed_test_array["crashes"].append(test) + elif "Timeout" in test: + failed_test_array["timeouts"].append(test) + + return failed_test_array + + +async def get_failures_csv(session, project_id, job): + job_id = job["id"] + url = f"https://gitlab.freedesktop.org/api/v4/projects/{project_id}/jobs/{job_id}/artifacts/results/failures.csv" + async with session.get(url) as response: + if response.status == 200: + text = await response.text() + return text + else: + logging.debug(f"No response from: {url}") + return "" + + +async def get_test_failures(session, project_id, job): + failures_csv = await get_failures_csv(session, project_id, job) + if not failures_csv: + return "" + + # If just one test failed, don't bother with more complicated sorting + lines = failures_csv.splitlines() + if len(lines) == 1: + return ": " + lines[0] + "
" + + failed_test_array = sort_failed_tests_by_status(failures_csv) + failures_msg = "
" + failures_msg += get_failed_test_summary_message(failed_test_array) + failures_msg += get_failed_test_details(failed_test_array) + failures_msg += "
" + + return failures_msg + + +async def get_trace_failures(session, project_id, job): + project_json = await get_project_json(session, project_id) + path = project_json.get("path", "") + if not path: + return "" + + job_id = job["id"] + url = f"https://mesa.pages.freedesktop.org/-/{path}/-/jobs/{job_id}/artifacts/results/summary/problems.html" + async with session.get(url) as response: + if response.status == 200: + return url + else: + logging.debug(f"No response from: {url}") + return "" + + +async def get_project_json(session, project_id): + url_project_id = f"https://gitlab.freedesktop.org/api/v4/projects/{project_id}" + async with session.get(url_project_id) as response: + if response.status == 200: + return await response.json() + else: + logging.debug(f"No response from: {url_project_id}") + return "" + + +async def get_job_log(session: aiohttp.ClientSession, project_id: str, job_id: int): + project_json = await get_project_json(session, project_id) + path_with_namespace = project_json.get("path_with_namespace", "") + if not path_with_namespace: + return "" + + url_job_log = ( + f"https://gitlab.freedesktop.org/{path_with_namespace}/-/jobs/{job_id}/raw" + ) + async with session.get(url_job_log) as response: + if response.status == 200: + return await response.text() + else: + logging.debug(f"No response from job log: {url_job_log}") + return "" + + +async def search_job_log_for_errors(session, project_id, job): + log_error_message = "" + + # Bypass these generic error messages in hopes of finding a more specific error. + # The entries are case insensitive. Keep them in alphabetical order and don't + # forget to add a comma after each entry + ignore_list = [ + "403: b", + "aborting", + "building c", + "continuing", + "error_msg : None", + "error_type", + "error generated", + "errors generated", + "exit code", + "exit status", + "exiting now", + "job failed", + "no_error", + "no files to upload", + "performing test", + "ret code", + "retry", + "retry-all-errors", + "strerror_", + "success", + "unknown-section", + ] + job_log = await get_job_log(session, project_id, job["id"]) + + for line in reversed(job_log.splitlines()): + if "fatal" in line.lower(): + # remove date and formatting before fatal message + log_error_message = line[line.lower().find("fatal") :] + break + + if "error" in line.lower(): + if any(ignore.lower() in line.lower() for ignore in ignore_list): + continue + + # remove date and formatting before error message + log_error_message = line[line.lower().find("error") :].strip() + + # if there is no further info after the word error then it's not helpful + # so reset the message and try again. + if log_error_message.lower() in {"error", "errors", "error:", "errors:"}: + log_error_message = "" + continue + break + + # timeout msg from .gitlab-ci/lava/lava_job_submitter.py + if "expected to take at least" in line.lower(): + log_error_message = line + break + + return log_error_message + + +async def process_single_job(session, project_id, job): + job_url = job.get("web_url", "") + if not job_url: + logging.info(f"Job {job['name']} is missing a web_url") + + job_name = job.get("name", "Unnamed Job") + message = f"[{job_name}]({job_url})" + + # if a job times out it's cancelled, so worth mentioning here + if job["status"] == "canceled": + return f"{message}: canceled
" + + # if it's not a script failure then all we can do is give the gitlab assigned reason + if job["failure_reason"] != "script_failure": + return f"{message}: {job['failure_reason']}
" + + test_failures = await get_test_failures(session, project_id, job) + if test_failures: + return f"{message}{test_failures}" + + trace_failures = await get_trace_failures(session, project_id, job) + if trace_failures: + return f"{message}: has a [trace failure]({trace_failures})
" + + log_error_message = await search_job_log_for_errors(session, project_id, job) + if log_error_message: + return f"{message}: {log_error_message}
" + + return f"{message}
" + + +async def process_job_with_limit(session, project_id, job): + # Use at most 10 concurrent tasks + semaphore = asyncio.Semaphore(10) + async with semaphore: + return await process_single_job(session, project_id, job) + + +async def process_problem_jobs(session, project_id, problem_jobs): + + problem_jobs_count = len(problem_jobs) + + if problem_jobs_count == 1: + message = f"
There were problems with job: " + message += await process_single_job(session, project_id, problem_jobs[0]) + return message + + message = f"
" + message += f"" + message += f"There were problems with {problem_jobs_count} jobs: " + message += "" + + tasks = [process_job_with_limit(session, project_id, job) for job in problem_jobs] + + results = await asyncio.gather(*tasks) + + for result in results: + message += result + + message += f"
" + + return message + + +async def main(pipeline_id: str, project_id: str = "176") -> str: + + message = "" + + try: + timeout = aiohttp.ClientTimeout(total=120) + logging.basicConfig(level=logging.INFO) + + async with aiohttp.ClientSession(timeout=timeout) as session: + pipeline_status = await get_pipeline_status( + session, project_id, pipeline_id + ) + logging.debug(f"Pipeline status: {pipeline_status}") + if pipeline_status != "failed": + return message + + jobs = await get_jobs_for_pipeline(session, project_id, pipeline_id) + problem_jobs = get_problem_jobs(jobs) + + if len(problem_jobs) == 0: + return message + + message = await process_problem_jobs(session, project_id, problem_jobs) + except Exception as e: + logging.error(f"An error occurred: {e}") + return "" + + return message + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Fetch GitLab pipeline details") + parser.add_argument( + "--project-id", default="176", help="Project ID (default: 176 i.e. mesa/mesa)" + ) + parser.add_argument("--pipeline-id", required=True, help="Pipeline ID") + + args = parser.parse_args() + + message = asyncio.run(main(args.pipeline_id, args.project_id)) + + print(message) diff --git a/mesalib/.mr-label-maker.yml b/mesalib/.mr-label-maker.yml index 049eed9936..305652587f 100644 --- a/mesalib/.mr-label-maker.yml +++ b/mesalib/.mr-label-maker.yml @@ -15,6 +15,7 @@ issues: 'clc': 'OpenCL' 'clover': 'clover' 'crocus': 'crocus' + 'd3d10umd': 'd3d10umd' 'd3d12': 'd3d12' 'docs': 'docs' 'dozen': 'dozen' @@ -27,6 +28,7 @@ issues: 'freedreno/ir3': ['freedreno', 'ir3'] 'gallium': 'gallium' 'gbm': 'gbm' + 'gfxstream': 'gfxstream' 'gitlab-ci': 'CI' 'glsl': 'GLSL' 'glvnd': 'GLVND' @@ -38,6 +40,7 @@ issues: 'intel/brw': 'intel-brw' 'intel/elk': 'intel-elk' 'intel/tools': 'intel-tools' + 'intel/executor': 'intel-executor' 'iris': 'iris' 'isl': 'ISL' 'lima': 'lima' @@ -57,6 +60,7 @@ issues: 'nvc0': ['nouveau', 'nvc0'] 'nvk': 'NVK' 'panfrost': 'panfrost' + 'panvk': 'panvk' 'pan/midgard': 'panfrost' 'pvr': 'powervr' 'r100': 'r100' @@ -154,8 +158,20 @@ merge_requests: '^src/egl/': ['EGL'] '^src/egl/drivers/wgl/': ['wgl'] '^src/etnaviv/': ['etnaviv'] - '^src/freedreno/': ['freedreno'] + '^src/freedreno/afuc/': ['freedreno'] + '^src/freedreno/common/': ['freedreno'] + '^src/freedreno/computerator/': ['freedreno'] + '^src/freedreno/decode/': ['freedreno'] + '^src/freedreno/drm-shim/': ['freedreno'] + '^src/freedreno/drm/': ['freedreno'] + '^src/freedreno/ds/': ['freedreno'] + '^src/freedreno/fdl/': ['freedreno'] + '^src/freedreno/ir2/': ['freedreno'] '^src/freedreno/ir3/': ['ir3'] + '^src/freedreno/isa/': ['freedreno'] + '^src/freedreno/perfcntrs/': ['freedreno'] + '^src/freedreno/registers/': ['freedreno'] + '^src/freedreno/rnn/': ['freedreno'] '^src/freedreno/vulkan/': ['turnip'] '^src/gallium/auxiliary/': ['gallium'] '^src/gallium/auxiliary/nir/': ['NIR'] @@ -190,6 +206,7 @@ merge_requests: '^src/gallium/drivers/virgl/': ['virgl'] '^src/gallium/drivers/zink/': ['zink'] '^src/gallium/frontends/clover/': ['clover'] + '^src/gallium/frontends/d3d10umd/': ['d3d10umd'] '^src/gallium/frontends/dri/': ['gallium'] '^src/gallium/frontends/glx/': ['GLX'] '^src/gallium/frontends/hgl/': ['haiku'] @@ -205,6 +222,7 @@ merge_requests: # '^src/gallium/frontends/xa/': [''] '^src/gallium/include/': ['gallium'] '^src/gallium/targets/': ['gallium'] + '^src/gallium/targets/d3d10umd': ['d3d10umd'] '^src/gallium/targets/opencl/': ['clover'] '^src/gallium/targets/osmesa/': ['osmesa'] '^src/gallium/targets/rusticl/': ['Rusticl'] @@ -231,6 +249,7 @@ merge_requests: '^src/gallium/winsys/vc4/': ['vc4'] '^src/gallium/winsys/virgl/': ['virgl'] '^src/gbm/': ['gbm'] + '^src/gfxstream/': ['gfxstream'] '^src/glx/': ['GLX'] '^src/imagination/': ['powervr'] '^src/intel/blorp/': ['blorp'] @@ -240,6 +259,7 @@ merge_requests: '^src/intel/compiler/elk': ['intel-elk'] '^src/intel/dev/': ['intel'] '^src/intel/ds/': ['intel'] + '^src/intel/executor/': ['intel-executor'] '^src/intel/genxml/': ['intel'] '^src/intel/isl/': ['ISL'] '^src/intel/nullhw-layer/': ['intel'] @@ -267,18 +287,21 @@ merge_requests: '^src/nouveau/codegen/': ['nouveau'] '^src/nouveau/compiler/': ['NAK'] '^src/nouveau/drm-shim/': ['nouveau'] + '^src/nouveau/drm/': ['nouveau'] + '^src/nouveau/headers/': ['NVK'] '^src/nouveau/mme/': ['NVK'] '^src/nouveau/nil/': ['NVK'] - '^src/nouveau/nvidia-headers/': ['NVK'] '^src/nouveau/vulkan/': ['NVK'] '^src/nouveau/winsys/': ['NVK'] '^src/panfrost/': ['panfrost'] + '^src/panfrost/vulkan/': ['panvk'] '^src/virtio/vulkan/': ['venus'] '^src/virtio/venus-protocol/': ['venus'] '^src/virtio/ci/': ['venus'] '^src/util/': ['util'] '^src/util/00-mesa-defaults.conf': ['drirc'] '^src/vulkan/': ['vulkan'] + '^src/vulkan/wsi/': ['wsi'] '^VERSION$': ['maintainer-scripts'] 'Android': ['android'] diff --git a/mesalib/.shellcheckrc b/mesalib/.shellcheckrc new file mode 100644 index 0000000000..8534815fdc --- /dev/null +++ b/mesalib/.shellcheckrc @@ -0,0 +1,10 @@ +# Allow using `source` to execute the contents of other files than the one +# being checked. +external-sources=true + +# Do not print links to wiki pages explaining why this is an issue. +wiki-link-count=0 + +# All shell scripts are being run using bash, even if they are lacking +# a shebang. +shell=bash diff --git a/mesalib/CODEOWNERS b/mesalib/CODEOWNERS index fb8026bdb6..2d3ac0f3eb 100644 --- a/mesalib/CODEOWNERS +++ b/mesalib/CODEOWNERS @@ -127,6 +127,7 @@ gitlab-ci*.yml @eric /include/drm-uapi/pvr_drm.h @aashishc @frankbinns @luigi.santivetti /src/imagination/ @aashishc @frankbinns @luigi.santivetti /src/imagination/rogue/ @simon-perretta-img +/src/imagination/pco/ @simon-perretta-img # Intel /include/drm-uapi/i915_drm.h @kwg @llandwerlin @gfxstrand @idr diff --git a/mesalib/README.rst b/mesalib/README.rst index 3ff6e39c8d..d9f2f06e9b 100644 --- a/mesalib/README.rst +++ b/mesalib/README.rst @@ -18,11 +18,9 @@ Meson (`docs/meson.rst `_): .. code-block:: sh - $ mkdir build - $ cd build - $ meson .. - $ sudo ninja install - + $ meson setup build + $ ninja -C build/ + $ sudo ninja -C build/ install Support ------- diff --git a/mesalib/VERSION b/mesalib/VERSION index 60001e16dd..e00d6fada8 100644 --- a/mesalib/VERSION +++ b/mesalib/VERSION @@ -1 +1 @@ -24.2.0-devel +25.0.0-devel diff --git a/mesalib/android/Android.mk b/mesalib/android/Android.mk index ced12e3056..d5b4c10aba 100644 --- a/mesalib/android/Android.mk +++ b/mesalib/android/Android.mk @@ -27,6 +27,12 @@ LOCAL_PATH := $(call my-dir) MESA3D_TOP := $(dir $(LOCAL_PATH)) LIBDRM_VERSION = $(shell cat external/libdrm/meson.build | grep -o "\\s*:\s*'\w*\.\w*\.\w*'" | grep -o "\w*\.\w*\.\w*" | head -1) +LLVM_VERSION_MAJOR = $(shell \ + if [ -f external/llvm-project/cmake/Modules/LLVMVersion.cmake ]; then \ + grep 'set.LLVM_VERSION_MAJOR ' external/llvm-project/cmake/Modules/LLVMVersion.cmake | grep -o '[0-9]\+'; \ + else \ + grep 'set.LLVM_VERSION_MAJOR ' external/llvm-project/llvm/CMakeLists.txt | grep -o '[0-9]\+'; \ + fi) MESA_VK_LIB_SUFFIX_amd := radeon MESA_VK_LIB_SUFFIX_intel := intel @@ -42,7 +48,7 @@ include $(CLEAR_VARS) LOCAL_SHARED_LIBRARIES := libc libdl libdrm libm liblog libcutils libz libc++ libnativewindow libsync libhardware LOCAL_STATIC_LIBRARIES := libexpat libarect libelf LOCAL_HEADER_LIBRARIES := libnativebase_headers hwvulkan_headers -MESON_GEN_PKGCONFIGS := cutils expat hardware libdrm:$(LIBDRM_VERSION) nativewindow sync zlib:1.2.11 libelf +MESON_GEN_PKGCONFIGS := log cutils expat hardware libdrm:$(LIBDRM_VERSION) nativewindow sync zlib:1.2.11 libelf LOCAL_CFLAGS += $(BOARD_MESA3D_CFLAGS) ifneq ($(filter swrast,$(BOARD_MESA3D_GALLIUM_DRIVERS) $(BOARD_MESA3D_VULKAN_DRIVERS)),) @@ -86,8 +92,8 @@ MESON_GEN_PKGCONFIGS += DirectX-Headers endif ifneq ($(MESON_GEN_LLVM_STUB),) -MESON_LLVM_VERSION := 12.0.0 -LOCAL_SHARED_LIBRARIES += libLLVM12 +MESON_LLVM_VERSION := $(LLVM_VERSION_MAJOR).0.0 +LOCAL_SHARED_LIBRARIES += libLLVM$(LLVM_VERSION_MAJOR) endif ifeq ($(shell test $(PLATFORM_SDK_VERSION) -ge 30; echo $$?), 0) @@ -96,7 +102,10 @@ LOCAL_SHARED_LIBRARIES += \ libgralloctypes \ libhidlbase \ libutils - +ifeq ($(shell test $(PLATFORM_SDK_VERSION) -ge 35; echo $$?), 0) +LOCAL_SHARED_LIBRARIES += libui +MESON_GEN_PKGCONFIGS += ui +endif MESON_GEN_PKGCONFIGS += android.hardware.graphics.mapper:4.0 endif @@ -157,9 +166,9 @@ endif endef ifneq ($(strip $(BOARD_MESA3D_GALLIUM_DRIVERS)),) -# Module 'libgallium_dri', produces '/vendor/lib{64}/dri/libgallium_dri.so' +# Module 'libgallium_dri', produces '/vendor/lib{64}/libgallium_dri.so' # This module also trigger DRI symlinks creation process -$(eval $(call mesa3d-lib,libgallium_dri,dri,MESA3D_GALLIUM_DRI_BIN)) +$(eval $(call mesa3d-lib,libgallium_dri,,MESA3D_GALLIUM_BIN)) # Module 'libglapi', produces '/vendor/lib{64}/libglapi.so' $(eval $(call mesa3d-lib,libglapi,,MESA3D_LIBGLAPI_BIN)) @@ -178,6 +187,7 @@ $(foreach driver,$(BOARD_MESA3D_VULKAN_DRIVERS), \ ifneq ($(filter true, $(BOARD_MESA3D_BUILD_LIBGBM)),) # Modules 'libgbm', produces '/vendor/lib{64}/libgbm.so' $(eval $(call mesa3d-lib,$(MESA_LIBGBM_NAME),,MESA3D_LIBGBM_BIN,$(MESA3D_TOP)/src/gbm/main)) +$(eval $(call mesa3d-lib,dri_gbm,,MESA3D_DRI_GBM_BIN)) endif #------------------------------------------------------------------------------- diff --git a/mesalib/android/mesa3d_cross.mk b/mesalib/android/mesa3d_cross.mk index 6807887098..b83427927f 100644 --- a/mesalib/android/mesa3d_cross.mk +++ b/mesalib/android/mesa3d_cross.mk @@ -63,16 +63,22 @@ MESON_OUT_DIR := $($(M_TARGET_PREFIX)TARGET_OUT_INTER MESON_GEN_DIR := $(MESON_OUT_DIR)_GEN MESON_GEN_FILES_TARGET := $(MESON_GEN_DIR)/.timestamp -MESA3D_GALLIUM_DRI_DIR := $(MESON_OUT_DIR)/install/usr/local/lib/dri -$(M_TARGET_PREFIX)MESA3D_GALLIUM_DRI_BIN := $(MESON_OUT_DIR)/install/usr/local/lib/libgallium_dri.so +MESA3D_GALLIUM_DIR := $(MESON_OUT_DIR)/install/usr/local/lib +$(M_TARGET_PREFIX)MESA3D_GALLIUM_BIN := $(MESON_OUT_DIR)/install/usr/local/lib/libgallium_dri.so $(M_TARGET_PREFIX)MESA3D_LIBEGL_BIN := $(MESON_OUT_DIR)/install/usr/local/lib/libEGL.so $(M_TARGET_PREFIX)MESA3D_LIBGLESV1_BIN := $(MESON_OUT_DIR)/install/usr/local/lib/libGLESv1_CM.so $(M_TARGET_PREFIX)MESA3D_LIBGLESV2_BIN := $(MESON_OUT_DIR)/install/usr/local/lib/libGLESv2.so $(M_TARGET_PREFIX)MESA3D_LIBGLAPI_BIN := $(MESON_OUT_DIR)/install/usr/local/lib/libglapi.so $(M_TARGET_PREFIX)MESA3D_LIBGBM_BIN := $(MESON_OUT_DIR)/install/usr/local/lib/$(MESA_LIBGBM_NAME).so +$(M_TARGET_PREFIX)MESA3D_DRI_GBM_BIN := $(MESON_OUT_DIR)/install/usr/local/lib/gbm/dri_gbm.so +MESA3D_GBM_BINS := \ + $($(M_TARGET_PREFIX)MESA3D_LIBGBM_BIN) \ + $($(M_TARGET_PREFIX)MESA3D_DRI_GBM_BIN) \ + MESA3D_GLES_BINS := \ + $($(M_TARGET_PREFIX)MESA3D_GALLIUM_BIN) \ $($(M_TARGET_PREFIX)MESA3D_LIBEGL_BIN) \ $($(M_TARGET_PREFIX)MESA3D_LIBGLESV1_BIN) \ $($(M_TARGET_PREFIX)MESA3D_LIBGLESV2_BIN) \ @@ -82,12 +88,12 @@ MESON_GEN_NINJA := \ cd $(MESON_OUT_DIR) && PATH=/usr/bin:/usr/local/bin:$$PATH meson ./build \ --cross-file $(call relative-to-absolute,$(MESON_GEN_DIR))/aosp_cross \ --buildtype=release \ - -Ddri-search-path=/vendor/$(MESA3D_LIB_DIR)/dri \ -Dplatforms=android \ -Dplatform-sdk-version=$(PLATFORM_SDK_VERSION) \ -Dgallium-drivers=$(subst $(space),$(comma),$(BOARD_MESA3D_GALLIUM_DRIVERS)) \ -Dvulkan-drivers=$(subst $(space),$(comma),$(subst radeon,amd,$(BOARD_MESA3D_VULKAN_DRIVERS))) \ -Dgbm=enabled \ + -Dgbm-backends-path=/vendor/$(MESA3D_LIB_DIR) \ -Degl=$(if $(BOARD_MESA3D_GALLIUM_DRIVERS),enabled,disabled) \ -Dllvm=$(if $(MESON_GEN_LLVM_STUB),enabled,disabled) \ -Dcpp_rtti=false \ @@ -284,19 +290,14 @@ endif $(MESON_BUILD) touch $@ -MESON_COPY_LIBGALLIUM := \ - cp `ls -1 $(MESA3D_GALLIUM_DRI_DIR)/* | head -1` $($(M_TARGET_PREFIX)MESA3D_GALLIUM_DRI_BIN) - -$(MESON_OUT_DIR)/install/.install.timestamp: MESON_COPY_LIBGALLIUM:=$(MESON_COPY_LIBGALLIUM) $(MESON_OUT_DIR)/install/.install.timestamp: MESON_BUILD:=$(MESON_BUILD) $(MESON_OUT_DIR)/install/.install.timestamp: $(MESON_OUT_DIR)/.build.timestamp rm -rf $(dir $@) mkdir -p $(dir $@) DESTDIR=$(call relative-to-absolute,$(dir $@)) $(MESON_BUILD) install - $(if $(BOARD_MESA3D_GALLIUM_DRIVERS),$(MESON_COPY_LIBGALLIUM)) touch $@ -$($(M_TARGET_PREFIX)MESA3D_LIBGBM_BIN) $(MESA3D_GLES_BINS): $(MESON_OUT_DIR)/install/.install.timestamp +$(MESA3D_GBM_BINS) $(MESA3D_GLES_BINS): $(MESON_OUT_DIR)/install/.install.timestamp echo "Build $@" touch $@ @@ -308,14 +309,3 @@ $(MESON_OUT_DIR)/install/usr/local/lib/libvulkan_$(MESA_VK_LIB_SUFFIX_$1).so: $( endef $(foreach driver,$(BOARD_MESA3D_VULKAN_DRIVERS), $(eval $(call vulkan_target,$(driver)))) - -$($(M_TARGET_PREFIX)TARGET_OUT_VENDOR_SHARED_LIBRARIES)/dri/.symlinks.timestamp: MESA3D_GALLIUM_DRI_DIR:=$(MESA3D_GALLIUM_DRI_DIR) -$($(M_TARGET_PREFIX)TARGET_OUT_VENDOR_SHARED_LIBRARIES)/dri/.symlinks.timestamp: $(MESON_OUT_DIR)/install/.install.timestamp - # Create Symlinks - mkdir -p $(dir $@) - ls -1 $(MESA3D_GALLIUM_DRI_DIR)/ | PATH=/usr/bin:$$PATH xargs -I{} ln -s -f libgallium_dri.so $(dir $@)/{} - touch $@ - -$($(M_TARGET_PREFIX)MESA3D_GALLIUM_DRI_BIN): $(TARGET_OUT_VENDOR)/$(MESA3D_LIB_DIR)/dri/.symlinks.timestamp - echo "Build $@" - touch $@ diff --git a/mesalib/bin/ci/ci_gantt_chart.py b/mesalib/bin/ci/ci_gantt_chart.py index 8b9472217a..44f39865f6 100644 --- a/mesalib/bin/ci/ci_gantt_chart.py +++ b/mesalib/bin/ci/ci_gantt_chart.py @@ -8,24 +8,24 @@ import argparse -import gitlab +from datetime import datetime, timedelta, timezone +from typing import Dict, List + import plotly.express as px -from gitlab_common import pretty_duration -from datetime import datetime, timedelta -from gitlab_common import read_token, GITLAB_URL, get_gitlab_pipeline_from_url +import plotly.graph_objs as go +from gitlab import Gitlab, base +from gitlab.v4.objects import ProjectPipeline +from gitlab_common import (GITLAB_URL, get_gitlab_pipeline_from_url, + get_token_from_default_dir, pretty_duration, + read_token) -def calculate_queued_at(job): - # we can have queued_duration without started_at when a job is canceled - if not job.queued_duration or not job.started_at: - return None +def calculate_queued_at(job) -> datetime: started_at = job.started_at.replace("Z", "+00:00") return datetime.fromisoformat(started_at) - timedelta(seconds=job.queued_duration) -def calculate_time_difference(time1, time2): - if not time1 or not time2: - return None +def calculate_time_difference(time1, time2) -> str: if type(time1) is str: time1 = datetime.fromisoformat(time1.replace("Z", "+00:00")) if type(time2) is str: @@ -35,12 +35,14 @@ def calculate_time_difference(time1, time2): return pretty_duration(diff.seconds) -def create_task_name(job): +def create_task_name(job) -> str: status_color = {"success": "green", "failed": "red"}.get(job.status, "grey") return f"{job.name}\t({job.status},{job.id})" -def add_gantt_bar(job, tasks): +def add_gantt_bar( + job: base.RESTObject, tasks: List[Dict[str, str | datetime | timedelta]] +) -> None: queued_at = calculate_queued_at(job) task_name = create_task_name(job) @@ -62,25 +64,43 @@ def add_gantt_bar(job, tasks): "Phase": "Queued", } ) - tasks.append( - { - "Job": task_name, - "Start": job.started_at, - "Finish": job.finished_at, - "Duration": calculate_time_difference(job.started_at, job.finished_at), - "Phase": "Running", - } - ) - -def generate_gantt_chart(pipeline): + if job.finished_at: + tasks.append( + { + "Job": task_name, + "Start": job.started_at, + "Finish": job.finished_at, + "Duration": calculate_time_difference(job.started_at, job.finished_at), + "Phase": "Time spent running", + } + ) + else: + current_time = datetime.now(timezone.utc).isoformat() + tasks.append( + { + "Job": task_name, + "Start": job.started_at, + "Finish": current_time, + "Duration": calculate_time_difference(job.started_at, current_time), + "Phase": "In-Progress", + } + ) + + +def generate_gantt_chart( + pipeline: ProjectPipeline, ci_timeout: float = 60 +) -> go.Figure: if pipeline.yaml_errors: raise ValueError("Pipeline YAML errors detected") # Convert the data into a list of dictionaries for plotly - tasks = [] + tasks: List[Dict[str, str | datetime | timedelta]] = [] for job in pipeline.jobs.list(all=True, include_retried=True): + # we can have queued_duration without started_at when a job is canceled + if not job.queued_duration or not job.started_at: + continue add_gantt_bar(job, tasks) # Make it easier to see retried jobs @@ -94,7 +114,8 @@ def generate_gantt_chart(pipeline): ) # Create a Gantt chart - fig = px.timeline( + default_colors = px.colors.qualitative.Plotly + fig: go.Figure = px.timeline( tasks, x_start="Start", x_end="Finish", @@ -102,6 +123,12 @@ def generate_gantt_chart(pipeline): color="Phase", title=title, hover_data=["Duration"], + color_discrete_map={ + "In-Progress": default_colors[3], # purple + "Waiting dependencies": default_colors[0], # blue + "Queued": default_colors[1], # red + "Time spent running": default_colors[2], # green + }, ) # Calculate the height dynamically @@ -109,11 +136,11 @@ def generate_gantt_chart(pipeline): # Add a deadline line to the chart created_at = datetime.fromisoformat(pipeline.created_at.replace("Z", "+00:00")) - timeout_at = created_at + timedelta(hours=1) + timeout_at = created_at + timedelta(minutes=ci_timeout) fig.add_vrect( x0=timeout_at, x1=timeout_at, - annotation_text="1h Timeout", + annotation_text=f"{int(ci_timeout)} min Timeout", fillcolor="gray", line_width=2, line_color="gray", @@ -125,7 +152,30 @@ def generate_gantt_chart(pipeline): return fig -def parse_args() -> None: +def main( + token: str | None, + pipeline_url: str, + output: str | None, + ci_timeout: float = 60, +): + if token is None: + token = get_token_from_default_dir() + + token = read_token(token) + gl = Gitlab(url=GITLAB_URL, private_token=token, retry_transient_errors=True) + + pipeline, _ = get_gitlab_pipeline_from_url(gl, pipeline_url) + fig: go.Figure = generate_gantt_chart(pipeline, ci_timeout) + if output and "htm" in output: + fig.write_html(output) + elif output: + fig.update_layout(width=1000) + fig.write_image(output) + else: + fig.show() + + +if __name__ == "__main__": parser = argparse.ArgumentParser( description="Generate the Gantt chart from a given pipeline." ) @@ -134,29 +184,19 @@ def parse_args() -> None: "-o", "--output", type=str, - help="Output file name. Use html ou image suffixes to choose the format.", + help="Output file name. Use html or image suffixes to choose the format.", ) parser.add_argument( "--token", metavar="token", help="force GitLab token, otherwise it's read from ~/.config/gitlab-token", ) - return parser.parse_args() - - -if __name__ == "__main__": - args = parse_args() - - token = read_token(args.token) - - gl = gitlab.Gitlab(url=GITLAB_URL, private_token=token, retry_transient_errors=True) - - pipeline, _ = get_gitlab_pipeline_from_url(gl, args.pipeline_url) - fig = generate_gantt_chart(pipeline) - if args.output and "htm" in args.output: - fig.write_html(args.output) - elif args.output: - fig.update_layout(width=1000) - fig.write_image(args.output) - else: - fig.show() + parser.add_argument( + "--ci-timeout", + metavar="ci_timeout", + type=float, + default=60, + help="Time that marge-bot will wait for ci to finish. Defaults to one hour.", + ) + args = parser.parse_args() + main(args.token, args.pipeline_url, args.output, args.ci_timeout) diff --git a/mesalib/bin/ci/ci_post_gantt.py b/mesalib/bin/ci/ci_post_gantt.py index 131f27e937..3749d68392 100644 --- a/mesalib/bin/ci/ci_post_gantt.py +++ b/mesalib/bin/ci/ci_post_gantt.py @@ -8,25 +8,31 @@ import argparse -import gitlab -import re +import logging as log import os -import pytz +import re import traceback from datetime import datetime, timedelta -from gitlab_common import ( - read_token, - GITLAB_URL, - get_gitlab_pipeline_from_url, -) +from typing import Any, Dict + +import gitlab +import pytz from ci_gantt_chart import generate_gantt_chart +from gitlab import Gitlab +from gitlab.base import RESTObject +from gitlab.v4.objects import Project, ProjectPipeline +from gitlab_common import (GITLAB_URL, get_gitlab_pipeline_from_url, + get_token_from_default_dir, read_token) + + +class MockGanttExit(Exception): + pass -MARGE_USER_ID = 9716 # Marge LAST_MARGE_EVENT_FILE = os.path.expanduser("~/.config/last_marge_event") -def read_last_event_date_from_file(): +def read_last_event_date_from_file() -> str: try: with open(LAST_MARGE_EVENT_FILE, "r") as f: last_event_date = f.read().strip() @@ -36,7 +42,7 @@ def read_last_event_date_from_file(): return last_event_date -def pretty_time(time_str): +def pretty_time(time_str: str) -> str: """Pretty print time""" local_timezone = datetime.now().astimezone().tzinfo @@ -46,10 +52,8 @@ def pretty_time(time_str): return f'{time_str} ({time_d.strftime("%d %b %Y %Hh%Mm%Ss")} {local_timezone})' -def compose_message(file_name, attachment_url): +def compose_message(file_name: str, attachment_url: str) -> str: return f""" -Here is the Gantt chart for the referred pipeline, I hope it helps 😄 (tip: click on the "Pan" button on the top right bar): - [{file_name}]({attachment_url})
@@ -60,13 +64,13 @@ def compose_message(file_name, attachment_url): """ -def gitlab_upload_file_get_url(gl, project_id, filepath): - project = gl.projects.get(project_id) - uploaded_file = project.upload(filepath, filepath=filepath) +def gitlab_upload_file_get_url(gl: Gitlab, project_id: str, filepath: str) -> str: + project: Project = gl.projects.get(project_id) + uploaded_file: Dict[str, Any] = project.upload(filepath, filepath=filepath) return uploaded_file["url"] -def gitlab_post_reply_to_note(gl, event, reply_message): +def gitlab_post_reply_to_note(gl: Gitlab, event: RESTObject, reply_message: str): """ Post a reply to a note in thread based on a GitLab event. @@ -82,7 +86,7 @@ def gitlab_post_reply_to_note(gl, event, reply_message): merge_request = project.mergerequests.get(merge_request_iid) # Find the discussion to which the note belongs - discussions = merge_request.discussions.list(as_list=False) + discussions = merge_request.discussions.list(iterator=True) target_discussion = next( ( d @@ -100,36 +104,28 @@ def gitlab_post_reply_to_note(gl, event, reply_message): return reply except gitlab.exceptions.GitlabError as e: - print(f"Failed to post a reply to '{event.note['body']}': {e}") + log.error(f"Failed to post a reply to '{event.note['body']}': {e}") return None -def parse_args() -> None: - parser = argparse.ArgumentParser(description="Monitor rejected pipelines by Marge.") - parser.add_argument( - "--token", - metavar="token", - help="force GitLab token, otherwise it's read from ~/.config/gitlab-token", - ) - parser.add_argument( - "--since", - metavar="since", - help="consider only events after this date (ISO format), otherwise it's read from ~/.config/last_marge_event", - ) - return parser.parse_args() - - -if __name__ == "__main__": - args = parse_args() +def main( + token: str | None, + since: str | None, + marge_user_id: int = 9716, + project_ids: list[int] = [176], + ci_timeout: float = 60, +): + log.basicConfig(level=log.INFO) + if token is None: + token = get_token_from_default_dir() - token = read_token(args.token) + token = read_token(token) + gl = Gitlab(url=GITLAB_URL, private_token=token, retry_transient_errors=True) - gl = gitlab.Gitlab(url=GITLAB_URL, private_token=token, retry_transient_errors=True) + user = gl.users.get(marge_user_id) + last_event_at = since if since else read_last_event_date_from_file() - user = gl.users.get(MARGE_USER_ID) - last_event_at = args.since if args.since else read_last_event_date_from_file() - - print(f"Retrieving Marge messages since {pretty_time(last_event_at)}\n") + log.info(f"Retrieving Marge messages since {pretty_time(last_event_at)}\n") # the "after" only considers the "2023-10-24" part, it doesn't consider the time events = user.events.list( @@ -144,6 +140,8 @@ def parse_args() -> None: ).replace(tzinfo=pytz.UTC) for event in events: + if event.project_id not in project_ids: + continue created_at_date = datetime.fromisoformat( event.created_at.replace("Z", "+00:00") ).replace(tzinfo=pytz.UTC) @@ -151,28 +149,75 @@ def parse_args() -> None: continue last_event_at = event.created_at - match = re.search(r"https://[^ ]+", event.note["body"]) + escaped_gitlab_url = re.escape(GITLAB_URL) + match = re.search(rf"{escaped_gitlab_url}/[^\s<]+", event.note["body"]) + if match: try: - print("Found message:", event.note["body"]) + log.info(f"Found message: {event.note['body']}") pipeline_url = match.group(0)[:-1] + pipeline: ProjectPipeline pipeline, _ = get_gitlab_pipeline_from_url(gl, pipeline_url) - print("Generating gantt chart...") - fig = generate_gantt_chart(pipeline) - file_name = "Gantt.html" + log.info("Generating gantt chart...") + fig = generate_gantt_chart(pipeline, ci_timeout) + file_name = f"{str(pipeline.id)}-Gantt.html" fig.write_html(file_name) - print("Uploading gantt file...") + log.info("Uploading gantt file...") file_url = gitlab_upload_file_get_url(gl, event.project_id, file_name) - print("Posting reply ...\n") + log.info("Posting reply ...") message = compose_message(file_name, file_url) gitlab_post_reply_to_note(gl, event, message) + except MockGanttExit: + pass # Allow tests to exit early without printing a traceback except Exception as e: - print(f"Failed to generate gantt chart, not posting reply.{e}") + log.info(f"Failed to generate gantt chart, not posting reply.{e}") traceback.print_exc() - if not args.since: - print( + if not since: + log.info( f"Updating last event date to {pretty_time(last_event_at)} on {LAST_MARGE_EVENT_FILE}\n" ) with open(LAST_MARGE_EVENT_FILE, "w") as f: f.write(last_event_at) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Monitor rejected pipelines by Marge.") + parser.add_argument( + "--token", + metavar="token", + type=str, + default=None, + help="force GitLab token, otherwise it's read from ~/.config/gitlab-token", + ) + parser.add_argument( + "--since", + metavar="since", + type=str, + default=None, + help="consider only events after this date (ISO format), otherwise it's read from ~/.config/last_marge_event", + ) + parser.add_argument( + "--marge-user-id", + metavar="marge_user_id", + type=int, + default=9716, # default https://gitlab.freedesktop.org/users/marge-bot/activity + help="GitLab user ID for marge-bot, defaults to 9716", + ) + parser.add_argument( + "--project-id", + metavar="project_id", + type=int, + nargs="+", + default=[176], # default is the mesa/mesa project id + help="GitLab project id(s) to analyze. Defaults to 176 i.e. mesa/mesa.", + ) + parser.add_argument( + "--ci-timeout", + metavar="ci_timeout", + type=float, + default=60, + help="Time that marge-bot will wait for ci to finish. Defaults to one hour.", + ) + args = parser.parse_args() + main(args.token, args.since, args.marge_user_id, args.project_id, args.ci_timeout) diff --git a/mesalib/bin/ci/ci_run_n_monitor.py b/mesalib/bin/ci/ci_run_n_monitor.py index 6ead6c6ec9..5cd732be4f 100644 --- a/mesalib/bin/ci/ci_run_n_monitor.py +++ b/mesalib/bin/ci/ci_run_n_monitor.py @@ -21,7 +21,7 @@ from functools import partial from itertools import chain from subprocess import check_output, CalledProcessError -from typing import TYPE_CHECKING, Iterable, Literal, Optional +from typing import Dict, TYPE_CHECKING, Iterable, Literal, Optional, Tuple import gitlab import gitlab.v4.objects @@ -33,6 +33,7 @@ get_gitlab_project, get_token_from_default_dir, pretty_duration, + print_once, read_token, wait_for_pipeline, ) @@ -53,90 +54,138 @@ "success": Fore.GREEN, "failed": Fore.RED, "canceled": Fore.MAGENTA, + "canceling": Fore.MAGENTA, "manual": "", "pending": "", "skipped": "", } -COMPLETED_STATUSES = ["success", "failed"] +COMPLETED_STATUSES = frozenset({"success", "failed"}) +RUNNING_STATUSES = frozenset({"created", "pending", "running"}) -def print_job_status(job, new_status=False) -> None: +def print_job_status( + job: gitlab.v4.objects.ProjectPipelineJob, + new_status: bool = False, + job_name_field_pad: int = 0, +) -> None: """It prints a nice, colored job status with a link to the job.""" - if job.status == "canceled": + if job.status in {"canceled", "canceling"}: return if new_status and job.status == "created": return - if job.duration: - duration = job.duration - elif job.started_at: - duration = time.perf_counter() - time.mktime(job.started_at.timetuple()) + job_name_field_pad = len(job.name) if job_name_field_pad < 1 else job_name_field_pad - print( + duration = job_duration(job) + + print_once( STATUS_COLORS[job.status] - + "🞋 job " - + URL_START - + f"{job.web_url}\a{job.name}" - + URL_END - + (f" has new status: {job.status}" if new_status else f" :: {job.status}") + + "🞋 job " # U+1F78B Round target + + link2print(job.web_url, job.name, job_name_field_pad) + + (f"has new status: {job.status}" if new_status else f"{job.status}") + (f" ({pretty_duration(duration)})" if job.started_at else "") + Style.RESET_ALL ) +def job_duration(job: gitlab.v4.objects.ProjectPipelineJob) -> float: + """ + Given a job, report the time lapsed in execution. + :param job: Pipeline job + :return: Current time in execution + """ + if job.duration: + return job.duration + elif job.started_at: + return time.perf_counter() - time.mktime(job.started_at.timetuple()) + return 0.0 + + def pretty_wait(sec: int) -> None: """shows progressbar in dots""" for val in range(sec, 0, -1): - print(f"⏲ {val} seconds", end="\r") + print(f"⏲ {val} seconds", end="\r") # U+23F2 Timer clock time.sleep(1) def monitor_pipeline( - project, - pipeline, + project: gitlab.v4.objects.Project, + pipeline: gitlab.v4.objects.ProjectPipeline, target_jobs_regex: re.Pattern, - dependencies, - force_manual: bool, + include_stage_regex: re.Pattern, + exclude_stage_regex: re.Pattern, + dependencies: set[str], stress: int, -) -> tuple[Optional[int], Optional[int]]: +) -> tuple[Optional[int], Optional[int], Dict[str, Dict[int, Tuple[float, str, str]]]]: """Monitors pipeline and delegate canceling jobs""" statuses: dict[str, str] = defaultdict(str) target_statuses: dict[str, str] = defaultdict(str) - stress_status_counter = defaultdict(lambda: defaultdict(int)) - target_id = None - + stress_status_counter: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) + execution_times = defaultdict(lambda: defaultdict(tuple)) + target_id: int = -1 + name_field_pad: int = len(max(dependencies, key=len))+2 + # In a running pipeline, we can skip following job traces that are in these statuses. + skip_follow_statuses: frozenset[str] = (COMPLETED_STATUSES) + + # Pre-populate the stress status counter for already completed target jobs. + if stress: + # When stress test, it is necessary to collect this information before start. + for job in pipeline.jobs.list(all=True, include_retried=True): + if target_jobs_regex.fullmatch(job.name) and \ + include_stage_regex.fullmatch(job.stage) and \ + not exclude_stage_regex.fullmatch(job.stage) and \ + job.status in COMPLETED_STATUSES: + stress_status_counter[job.name][job.status] += 1 + execution_times[job.name][job.id] = (job_duration(job), job.status, job.web_url) + + # jobs_waiting is a list of job names that are waiting for status update. + # It occurs when a job that we want to run depends on another job that is not yet finished. + jobs_waiting = [] + # FIXME: This function has too many parameters, consider refactoring. + enable_job_fn = partial( + enable_job, + project=project, + pipeline=pipeline, + job_name_field_pad=name_field_pad, + jobs_waiting=jobs_waiting, + ) while True: deps_failed = [] to_cancel = [] - for job in pipeline.jobs.list(all=True, sort="desc"): - # target jobs - if target_jobs_regex.fullmatch(job.name): + jobs_waiting.clear() + for job in sorted(pipeline.jobs.list(all=True), key=lambda j: j.name): + if target_jobs_regex.fullmatch(job.name) and \ + include_stage_regex.fullmatch(job.stage) and \ + not exclude_stage_regex.fullmatch(job.stage): target_id = job.id + target_status = job.status - if stress and job.status in ["success", "failed"]: + if stress and target_status in COMPLETED_STATUSES: if ( stress < 0 or sum(stress_status_counter[job.name].values()) < stress ): - job = enable_job(project, pipeline, job, "retry", force_manual) - stress_status_counter[job.name][job.status] += 1 + stress_status_counter[job.name][target_status] += 1 + execution_times[job.name][job.id] = (job_duration(job), target_status, job.web_url) + job = enable_job_fn(job=job, action_type="retry") else: - job = enable_job(project, pipeline, job, "target", force_manual) + execution_times[job.name][job.id] = (job_duration(job), target_status, job.web_url) + job = enable_job_fn(job=job, action_type="target") - print_job_status(job, job.status not in target_statuses[job.name]) - target_statuses[job.name] = job.status + print_job_status(job, target_status not in target_statuses[job.name], name_field_pad) + target_statuses[job.name] = target_status continue - # all jobs + # all other non-target jobs if job.status != statuses[job.name]: - print_job_status(job, True) + print_job_status(job, True, name_field_pad) statuses[job.name] = job.status # run dependencies and cancel the rest if job.name in dependencies: - job = enable_job(project, pipeline, job, "dep", True) + job = enable_job_fn(job=job, action_type="dep") if job.status == "failed": deps_failed.append(job.name) else: @@ -146,9 +195,9 @@ def monitor_pipeline( if stress: enough = True - for job_name, status in stress_status_counter.items(): + for job_name, status in sorted(stress_status_counter.items()): print( - f"{job_name}\tsucc: {status['success']}; " + f"* {job_name:{name_field_pad}}succ: {status['success']}; " f"fail: {status['failed']}; " f"total: {sum(status.values())} of {stress}", flush=False, @@ -160,22 +209,29 @@ def monitor_pipeline( pretty_wait(REFRESH_WAIT_JOBS) continue - print("---------------------------------", flush=False) + if jobs_waiting: + print_once( + f"{Fore.YELLOW}Waiting for jobs to update status:", + ", ".join(jobs_waiting), + Fore.RESET, + ) + pretty_wait(REFRESH_WAIT_JOBS) + continue - if len(target_statuses) == 1 and {"running"}.intersection( + if len(target_statuses) == 1 and RUNNING_STATUSES.intersection( target_statuses.values() ): - return target_id, None + return target_id, None, execution_times if ( {"failed"}.intersection(target_statuses.values()) - and not set(["running", "pending"]).intersection(target_statuses.values()) + and not RUNNING_STATUSES.intersection(target_statuses.values()) ): - return None, 1 + return None, 1, execution_times if ( {"skipped"}.intersection(target_statuses.values()) - and not {"running", "pending"}.intersection(target_statuses.values()) + and not RUNNING_STATUSES.intersection(target_statuses.values()) ): print( Fore.RED, @@ -183,20 +239,20 @@ def monitor_pipeline( deps_failed, Fore.RESET, ) - return None, 1 + return None, 1, execution_times - if {"success", "manual"}.issuperset(target_statuses.values()): - return None, 0 + if skip_follow_statuses.issuperset(target_statuses.values()): + return None, 0, execution_times pretty_wait(REFRESH_WAIT_JOBS) def get_pipeline_job( pipeline: gitlab.v4.objects.ProjectPipeline, - id: int, + job_id: int, ) -> gitlab.v4.objects.ProjectPipelineJob: pipeline_jobs = pipeline.jobs.list(all=True) - return [j for j in pipeline_jobs if j.id == id][0] + return [j for j in pipeline_jobs if j.id == job_id][0] def enable_job( @@ -204,19 +260,24 @@ def enable_job( pipeline: gitlab.v4.objects.ProjectPipeline, job: gitlab.v4.objects.ProjectPipelineJob, action_type: Literal["target", "dep", "retry"], - force_manual: bool, + job_name_field_pad: int = 0, + jobs_waiting: list[str] = [], ) -> gitlab.v4.objects.ProjectPipelineJob: - """enable job""" + # We want to run this job, but it is not ready to run yet, so let's try again in the next + # iteration. + if job.status == "created": + jobs_waiting.append(job.name) + return job + if ( - (job.status in ["success", "failed"] and action_type != "retry") - or (job.status == "manual" and not force_manual) - or job.status in ["skipped", "running", "created", "pending"] + (job.status in COMPLETED_STATUSES and action_type != "retry") + or job.status in {"skipped"} | RUNNING_STATUSES ): return job pjob = project.jobs.get(job.id, lazy=True) - if job.status in ["success", "failed", "canceled"]: + if job.status in {"success", "failed", "canceled", "canceling"}: new_job = pjob.retry() job = get_pipeline_job(pipeline, new_job["id"]) else: @@ -224,32 +285,34 @@ def enable_job( job = get_pipeline_job(pipeline, pjob.id) if action_type == "target": - jtype = "🞋 " + jtype = "🞋 target" # U+1F78B Round target elif action_type == "retry": - jtype = "↻" + jtype = "↻ retrying" # U+21BB Clockwise open circle arrow else: - jtype = "(dependency)" + jtype = "↪ dependency" # U+21AA Left Arrow Curving Right - print(Fore.MAGENTA + f"{jtype} job {job.name} manually enabled" + Style.RESET_ALL) + job_name_field_pad = len(job.name) if job_name_field_pad < 1 else job_name_field_pad + print(Fore.MAGENTA + f"{jtype} job {job.name:{job_name_field_pad}}manually enabled" + Style.RESET_ALL) return job -def cancel_job(project, job) -> None: +def cancel_job( + project: gitlab.v4.objects.Project, + job: gitlab.v4.objects.ProjectPipelineJob +) -> None: """Cancel GitLab job""" - if job.status in [ - "canceled", - "success", - "failed", - "skipped", - ]: + if job.status not in RUNNING_STATUSES: return pjob = project.jobs.get(job.id, lazy=True) pjob.cancel() - print(f"♲ {job.name}", end=" ") + print(f"🗙 {job.name}", end=" ") # U+1F5D9 Cancellation X -def cancel_jobs(project, to_cancel) -> None: +def cancel_jobs( + project: gitlab.v4.objects.Project, + to_cancel: list +) -> None: """Cancel unwanted GitLab jobs""" if not to_cancel: return @@ -257,10 +320,15 @@ def cancel_jobs(project, to_cancel) -> None: with ThreadPoolExecutor(max_workers=6) as exe: part = partial(cancel_job, project) exe.map(part, to_cancel) - print() + # The cancelled jobs are printed without a newline + print_once() -def print_log(project, job_id) -> None: + +def print_log( + project: gitlab.v4.objects.Project, + job_id: int +) -> None: """Print job log into output""" printed_lines = 0 while True: @@ -278,7 +346,7 @@ def print_log(project, job_id) -> None: pretty_wait(REFRESH_WAIT_LOG) -def parse_args() -> None: +def parse_args() -> argparse.Namespace: """Parse args""" parser = argparse.ArgumentParser( description="Tool to trigger a subset of container jobs " @@ -290,10 +358,31 @@ def parse_args() -> None: "--target", metavar="target-job", help="Target job regex. For multiple targets, pass multiple values, " - "eg. `--target foo bar`.", + "eg. `--target foo bar`. Only jobs in the target stage(s) " + "supplied, and their dependencies, will be considered.", required=True, nargs=argparse.ONE_OR_MORE, ) + parser.add_argument( + "--include-stage", + metavar="include-stage", + help="Job stages to include when searching for target jobs. " + "For multiple targets, pass multiple values, eg. " + "`--include-stage foo bar`.", + default=[".*"], + nargs=argparse.ONE_OR_MORE, + ) + parser.add_argument( + "--exclude-stage", + metavar="exclude-stage", + help="Job stages to exclude when searching for target jobs. " + "For multiple targets, pass multiple values, eg. " + "`--exclude-stage foo bar`. By default, performance and " + "post-merge jobs are excluded; pass --exclude-stage '' to " + "include them for consideration.", + default=["performance", ".*-postmerge"], + nargs=argparse.ONE_OR_MORE, + ) parser.add_argument( "--token", metavar="token", @@ -303,19 +392,27 @@ def parse_args() -> None: f"otherwise it's read from {TOKEN_DIR / 'gitlab-token'}", ) parser.add_argument( - "--force-manual", action="store_true", help="Force jobs marked as manual" + "--force-manual", action="store_true", + help="Deprecated argument; manual jobs are always force-enabled" ) parser.add_argument( "--stress", default=0, type=int, - help="Stresstest job(s). Number or repetitions or -1 for infinite.", + help="Stresstest job(s). Specify the number of times to rerun the selected jobs, " + "or use -1 for indefinite. Defaults to 0. If jobs have already been executed, " + "this will ensure the total run count respects the specified number.", ) parser.add_argument( "--project", default="mesa", help="GitLab project in the format / or just ", ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Exit after printing target jobs and dependencies", + ) mutex_group1 = parser.add_mutually_exclusive_group() mutex_group1.add_argument( @@ -344,12 +441,14 @@ def parse_args() -> None: def print_detected_jobs( - target_dep_dag: "Dag", dependency_jobs: Iterable[str], target_jobs: Iterable[str] + target_dep_dag: "Dag", + dependency_jobs: Iterable[str], + target_jobs: Iterable[str], ) -> None: def print_job_set(color: str, kind: str, job_set: Iterable[str]): print( color + f"Running {len(job_set)} {kind} jobs: ", - "\n", + "\n\t", ", ".join(sorted(job_set)), Fore.RESET, "\n", @@ -361,10 +460,14 @@ def print_job_set(color: str, kind: str, job_set: Iterable[str]): print_job_set(Fore.BLUE, "target", target_jobs) -def find_dependencies(token: str | None, - target_jobs_regex: re.Pattern, - project_path: str, - iid: int) -> set[str]: +def find_dependencies( + token: str | None, + target_jobs_regex: re.Pattern, + include_stage_regex: re.Pattern, + exclude_stage_regex: re.Pattern, + project_path: str, + iid: int +) -> set[str]: """ Find the dependencies of the target jobs in a GitLab pipeline. @@ -390,7 +493,7 @@ def find_dependencies(token: str | None, gql_instance, {"projectPath": project_path.path_with_namespace, "iid": iid} ) - target_dep_dag = filter_dag(dag, target_jobs_regex) + target_dep_dag = filter_dag(dag, target_jobs_regex, include_stage_regex, exclude_stage_regex) if not target_dep_dag: print(Fore.RED + "The job(s) were not found in the pipeline." + Fore.RESET) sys.exit(1) @@ -401,7 +504,45 @@ def find_dependencies(token: str | None, return target_jobs.union(dependency_jobs) -if __name__ == "__main__": +def print_monitor_summary( + execution_collection: Dict[str, Dict[int, Tuple[float, str, str]]], + t_start: float, +) -> None: + """Summary of the test execution""" + t_end = time.perf_counter() + spend_minutes = (t_end - t_start) / 60 + print(f"⏲ Duration of script execution: {spend_minutes:0.1f} minutes") # U+23F2 Timer clock + if len(execution_collection) == 0: + return + print(f"⏲ Jobs execution times:") # U+23F2 Timer clock + job_names = list(execution_collection.keys()) + job_names.sort() + name_field_pad = len(max(job_names, key=len)) + 2 + for name in job_names: + job_executions = execution_collection[name] + job_times = ', '.join([__job_duration_record(job_execution) + for job_execution in sorted(job_executions.items())]) + print(f"* {name:{name_field_pad}}: ({len(job_executions)}) {job_times}") + + +def __job_duration_record(dict_item: tuple) -> str: + """ + Format each pair of job and its duration. + :param job_execution: item of execution_collection[name][idn]: Dict[int, Tuple[float, str, str]] + """ + job_id = f"{dict_item[0]}" # dictionary key + job_duration, job_status, job_url = dict_item[1] # dictionary value, the tuple + return (f"{STATUS_COLORS[job_status]}" + f"{link2print(job_url, job_id)}: {pretty_duration(job_duration):>8}" + f"{Style.RESET_ALL}") + + +def link2print(url: str, text: str, text_pad: int = 0) -> str: + text_pad = len(text) if text_pad < 1 else text_pad + return f"{URL_START}{url}\a{text:{text_pad}}{URL_END}" + + +def main() -> None: try: t_start = time.perf_counter() @@ -463,31 +604,58 @@ def find_dependencies(token: str | None, target = '|'.join(args.target) target = target.strip() - deps = set() - print("🞋 job: " + Fore.BLUE + target + Style.RESET_ALL) + print("🞋 target job: " + Fore.BLUE + target + Style.RESET_ALL) # U+1F78B Round target # Implicitly include `parallel:` jobs target = f'({target})' + r'( \d+/\d+)?' target_jobs_regex = re.compile(target) + include_stage = '|'.join(args.include_stage) + include_stage = include_stage.strip() + + print("🞋 target from stages: " + Fore.BLUE + include_stage + Style.RESET_ALL) # U+1F78B Round target + + include_stage_regex = re.compile(include_stage) + + exclude_stage = '|'.join(args.exclude_stage) + exclude_stage = exclude_stage.strip() + + print("🞋 target excluding stages: " + Fore.BLUE + exclude_stage + Style.RESET_ALL) # U+1F78B Round target + + exclude_stage_regex = re.compile(exclude_stage) + deps = find_dependencies( token=token, target_jobs_regex=target_jobs_regex, + include_stage_regex=include_stage_regex, + exclude_stage_regex=exclude_stage_regex, iid=pipe.iid, project_path=cur_project ) - target_job_id, ret = monitor_pipeline( - cur_project, pipe, target_jobs_regex, deps, args.force_manual, args.stress + + if args.dry_run: + sys.exit(0) + + target_job_id, ret, exec_t = monitor_pipeline( + cur_project, + pipe, + target_jobs_regex, + include_stage_regex, + exclude_stage_regex, + deps, + args.stress ) if target_job_id: print_log(cur_project, target_job_id) - t_end = time.perf_counter() - spend_minutes = (t_end - t_start) / 60 - print(f"⏲ Duration of script execution: {spend_minutes:0.1f} minutes") + print_monitor_summary(exec_t, t_start) sys.exit(ret) except KeyboardInterrupt: sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/mesalib/bin/ci/gitlab_common.py b/mesalib/bin/ci/gitlab_common.py index 54e0cc7920..46b43a0a70 100644 --- a/mesalib/bin/ci/gitlab_common.py +++ b/mesalib/bin/ci/gitlab_common.py @@ -12,6 +12,7 @@ import os import re import time +from functools import cache from pathlib import Path GITLAB_URL = "https://gitlab.freedesktop.org" @@ -28,30 +29,40 @@ "Feed token": "glft-", "Incoming mail token": "glimt-", "GitLab Agent for Kubernetes token": "glagent-", - "SCIM Tokens": "glsoat-" + "SCIM Tokens": "glsoat-", } +@cache +def print_once(*args, **kwargs): + """Print without spamming the output""" + print(*args, **kwargs) + + def pretty_duration(seconds): """Pretty print duration""" hours, rem = divmod(seconds, 3600) minutes, seconds = divmod(rem, 60) if hours: - return f"{hours:0.0f}h{minutes:0.0f}m{seconds:0.0f}s" + return f"{hours:0.0f}h{minutes:02.0f}m{seconds:02.0f}s" if minutes: - return f"{minutes:0.0f}m{seconds:0.0f}s" + return f"{minutes:0.0f}m{seconds:02.0f}s" return f"{seconds:0.0f}s" -def get_gitlab_pipeline_from_url(gl, pipeline_url): - assert pipeline_url.startswith(GITLAB_URL) - url_path = pipeline_url[len(GITLAB_URL) :] - url_path_components = url_path.split("/") - project_name = "/".join(url_path_components[1:3]) - assert url_path_components[3] == "-" - assert url_path_components[4] == "pipelines" - pipeline_id = int(url_path_components[5]) - cur_project = gl.projects.get(project_name) +def get_gitlab_pipeline_from_url(gl, pipeline_url) -> tuple: + """ + Extract the project and pipeline object from the url string + :param gl: Gitlab object + :param pipeline_url: string with a url to a pipeline + :return: ProjectPipeline, Project objects + """ + pattern = rf"^{re.escape(GITLAB_URL)}/(.*)/-/pipelines/([0-9]+)$" + match = re.match(pattern, pipeline_url) + if not match: + raise AssertionError(f"url {pipeline_url} doesn't follow the pattern {pattern}") + namespace_with_project, pipeline_id = match.groups() + cur_project = gl.projects.get(namespace_with_project) pipe = cur_project.pipelines.get(pipeline_id) return pipe, cur_project @@ -88,19 +99,23 @@ def get_token_from_default_dir() -> str: def validate_gitlab_token(token: str) -> bool: - token_suffix = token.split("-")[-1] + # Match against recognised token prefixes + token_suffix = None + for token_type, token_prefix in TOKEN_PREFIXES.items(): + if token.startswith(token_prefix): + logging.info(f"Found probable token type: {token_type}") + token_suffix = token[len(token_prefix):] + break + + if not token_suffix: + return False + # Basic validation of the token suffix based on: # https://gitlab.com/gitlab-org/gitlab/-/blob/master/gems/gitlab-secret_detection/lib/gitleaks.toml if not re.match(r"(\w+-)?[0-9a-zA-Z_\-]{20,64}", token_suffix): return False - for token_type, token_prefix in TOKEN_PREFIXES.items(): - if token.startswith(token_prefix): - logging.info(f"Found probable token type: {token_type}") - return True - - # If the token type is not recognized, return False - return False + return True def get_token_from_arg(token_arg: str | Path | None) -> str | None: diff --git a/mesalib/bin/ci/gitlab_gql.py b/mesalib/bin/ci/gitlab_gql.py index eefdf214dd..144e3c31d7 100644 --- a/mesalib/bin/ci/gitlab_gql.py +++ b/mesalib/bin/ci/gitlab_gql.py @@ -325,16 +325,24 @@ def create_job_needs_dag(gl_gql: GitlabGQL, params, disable_cache: bool = True) return final_dag -def filter_dag(dag: Dag, regex: Pattern) -> Dag: - jobs_with_regex: set[str] = {job for job in dag if regex.fullmatch(job)} - return Dag({job: data for job, data in dag.items() if job in sorted(jobs_with_regex)}) +def filter_dag( + dag: Dag, job_name_regex: Pattern, include_stage_regex: Pattern, exclude_stage_regex: Pattern +) -> Dag: + filtered_jobs: Dag = Dag({}) + for (job, data) in dag.items(): + if not job_name_regex.fullmatch(job): + continue + if not include_stage_regex.fullmatch(data["stage"]): + continue + if exclude_stage_regex.fullmatch(data["stage"]): + continue + filtered_jobs[job] = data + return filtered_jobs def print_dag(dag: Dag) -> None: - for job, data in dag.items(): - print(f"{job}:") - print(f"\t{' '.join(data['needs'])}") - print() + for job, data in sorted(dag.items()): + print(f"{job}:\n\t{' '.join(data['needs'])}\n") def fetch_merged_yaml(gl_gql: GitlabGQL, params) -> dict[str, Any]: @@ -474,8 +482,23 @@ def parse_args() -> Namespace: "--regex", type=str, required=False, + default=".*", help="Regex pattern for the job name to be considered", ) + parser.add_argument( + "--include-stage", + type=str, + required=False, + default=".*", + help="Regex pattern for the stage name to be considered", + ) + parser.add_argument( + "--exclude-stage", + type=str, + required=False, + default="^$", + help="Regex pattern for the stage name to be excluded", + ) mutex_group_print = parser.add_mutually_exclusive_group() mutex_group_print.add_argument( "--print-dag", @@ -517,8 +540,7 @@ def main(): gl_gql, {"projectPath": args.project_path, "iid": iid}, disable_cache=True ) - if args.regex: - dag = filter_dag(dag, re.compile(args.regex)) + dag = filter_dag(dag, re.compile(args.regex), re.compile(args.include_stage), re.compile(args.exclude_stage)) print_dag(dag) diff --git a/mesalib/bin/ci/nightly_compare.py b/mesalib/bin/ci/nightly_compare.py new file mode 100644 index 0000000000..76c49b9456 --- /dev/null +++ b/mesalib/bin/ci/nightly_compare.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python3 +# Copyright © 2020 - 2024 Collabora Ltd. +# Authors: +# David Heidelberg +# Sergi Blanch Torne +# SPDX-License-Identifier: MIT + +""" +Compare the two latest scheduled pipelines and provide information +about the jobs you're interested in. +""" + +import argparse +import csv +import re +import requests +import io +from tabulate import tabulate + +import gitlab +from colorama import Fore, Style +from gitlab_common import read_token + + +MARGE_BOT_USER_ID = 9716 + +def print_failures_csv(id): + url = 'https://gitlab.freedesktop.org/mesa/mesa/-/jobs/' + str(id) + '/artifacts/raw/results/failures.csv' + missing: int = 0 + MAX_MISS: int = 20 + try: + response = requests.get(url) + response.raise_for_status() + csv_content = io.StringIO(response.text) + csv_reader = csv.reader(csv_content) + data = list(csv_reader) + + for line in data[:]: + if line[1] == "UnexpectedImprovement(Pass)": + line[1] = Fore.GREEN + line[1] + Style.RESET_ALL + elif line[1] == "UnexpectedImprovement(Fail)": + line[1] = Fore.YELLOW + line[1] + Style.RESET_ALL + elif line[1] == "Crash" or line[1] == "Fail": + line[1] = Fore.RED + line[1] + Style.RESET_ALL + elif line[1] == "Missing": + if missing > MAX_MISS: + data.remove(line) + continue + missing += 1 + line[1] = Fore.YELLOW + line[1] + Style.RESET_ALL + elif line[1] == "Fail": + line[1] = Fore.RED + line[1] + Style.RESET_ALL + else: + line[1] = Fore.WHITE + line[1] + Style.RESET_ALL + + if missing > MAX_MISS: + data.append([Fore.RED + f"... more than {MAX_MISS} missing tests, something crashed?", "Missing" + Style.RESET_ALL]) + headers = ["Test ", "Result"] + print(tabulate(data, headers, tablefmt="plain")) + except Exception: + pass + + +def job_failed_before(old_jobs, job): + for old_job in old_jobs: + if job.name == old_job.name: + return old_job + + +def parse_args() -> None: + """Parse args""" + parser = argparse.ArgumentParser( + description="Tool to show merge requests assigned to the marge-bot", + ) + parser.add_argument( + "--target", + metavar="target-job", + help="Target job regex. For multiple targets, pass multiple values, " + "eg. `--target foo bar`.", + required=False, + nargs=argparse.ONE_OR_MORE, + ) + parser.add_argument( + "--token", + metavar="token", + help="force GitLab token, otherwise it's read from ~/.config/gitlab-token", + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + token = read_token(args.token) + gl = gitlab.Gitlab(url="https://gitlab.freedesktop.org", private_token=token) + + project = gl.projects.get("mesa/mesa") + + print( + "\u001b]8;;https://gitlab.freedesktop.org/mesa/mesa/-/pipelines?page=1&scope=all&source=schedule\u001b\\Scheduled pipelines overview\u001b]8;;\u001b\\" + ) + pipelines = project.pipelines.list( + source="schedule", ordered_by="created_at", sort="desc", page=1, per_page=2 + ) + print( + f"Old pipeline: {pipelines[1].created_at}\t\u001b]8;;{pipelines[1].web_url}\u001b\\{pipelines[1].status}\u001b]8;;\u001b\\\t{pipelines[1].sha}" + ) + print( + f"New pipeline: {pipelines[0].created_at}\t\u001b]8;;{pipelines[0].web_url}\u001b\\{pipelines[0].status}\u001b]8;;\u001b\\\t{pipelines[0].sha}" + ) + print( + f"\nWebUI visual compare: https://gitlab.freedesktop.org/mesa/mesa/-/compare/{pipelines[1].sha}...{pipelines[0].sha}\n" + ) + + # regex part + if args.target: + target = "|".join(args.target) + target = target.strip() + print("🞋 jobs: " + Fore.BLUE + target + Style.RESET_ALL) + + target = f"({target})" + r"( \d+/\d+)?" + else: + target = ".*" + + target_jobs_regex: re.Pattern = re.compile(target) + + old_failed_jobs = [] + for job in pipelines[1].jobs.list(all=True): + if ( + job.status != "failed" + or target_jobs_regex + and not target_jobs_regex.fullmatch(job.name) + ): + continue + old_failed_jobs.append(job) + + job_failed = False + for job in pipelines[0].jobs.list(all=True): + if ( + job.status != "failed" + or target_jobs_regex + and not target_jobs_regex.fullmatch(job.name) + ): + continue + + job_failed = True + + previously_failed_job = job_failed_before(old_failed_jobs, job) + if previously_failed_job: + print( + Fore.YELLOW + + f":: \u001b]8;;{job.web_url}\u001b\\{job.name}\u001b]8;;\u001b\\" + + Fore.MAGENTA + + f" \u001b]8;;{previously_failed_job.web_url}\u001b\\(previous run)\u001b]8;;\u001b\\" + + Style.RESET_ALL + ) + else: + print( + Fore.RED + + f":: \u001b]8;;{job.web_url}\u001b\\{job.name}\u001b]8;;\u001b\\" + + Style.RESET_ALL + ) + print_failures_csv(job.id) + + if not job_failed: + exit(0) + + print("Commits between nightly pipelines:") + commit = project.commits.get(pipelines[0].sha) + while True: + print( + f"{commit.id} \u001b]8;;{commit.web_url}\u001b\\{commit.title}\u001b]8;;\u001b\\" + ) + if commit.id == pipelines[1].sha: + break + commit = project.commits.get(commit.parent_ids[0]) diff --git a/mesalib/bin/ci/nightly_compare.sh b/mesalib/bin/ci/nightly_compare.sh new file mode 100644 index 0000000000..d72cfdc50e --- /dev/null +++ b/mesalib/bin/ci/nightly_compare.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +set -eu + +this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")") +readonly this_dir + +exec \ + "$this_dir/../python-venv.sh" \ + "$this_dir/requirements.txt" \ + "$this_dir/nightly_compare.py" "$@" + diff --git a/mesalib/bin/ci/pipeline_message.py b/mesalib/bin/ci/pipeline_message.py new file mode 100644 index 0000000000..13527409f1 --- /dev/null +++ b/mesalib/bin/ci/pipeline_message.py @@ -0,0 +1,377 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: MIT + +# Provide a markdown-formatted message summarizing the reasons why a pipeline failed. +# Marge bot can use this script to provide more helpful comments when CI fails. +# Example for running locally: +# ./bin/ci/pipeline_message.sh --project-id 176 --pipeline-id 1310098 + + +import argparse +import asyncio +import logging +from typing import Any + +import aiohttp + +PER_PAGE: int = 6000 + + +async def get_pipeline_status( + session: aiohttp.ClientSession, project_id: str, pipeline_id: str +): + url = f"https://gitlab.freedesktop.org/api/v4/projects/{project_id}/pipelines/{pipeline_id}" + logging.info(f"Fetching pipeline status from {url}") + async with session.get(url) as response: + response.raise_for_status() + pipeline_details = await response.json() + return pipeline_details.get("status") + + +async def get_jobs_for_pipeline( + session: aiohttp.ClientSession, project_id: str, pipeline_id: str +): + url = f"https://gitlab.freedesktop.org/api/v4/projects/{project_id}/pipelines/{pipeline_id}/jobs" + logging.info(url) + jobs = [] + params = {"per_page": PER_PAGE} + async with session.get(url, params=params) as response: + response.raise_for_status() + jobs = await response.json() + return jobs + + +def get_problem_jobs(jobs: list[dict[str, Any]]): + ignore_stage_list = [ + "postmerge", + "performance", + ] + problem_jobs = [] + for job in jobs: + if any(ignore.lower() in job["stage"] for ignore in ignore_stage_list): + continue + if job["status"] in {"failed", "canceled"}: + problem_jobs.append(job) + return problem_jobs + + +def unexpected_improvements(failed_test_array): + if failed_test_array["unexpected_improvements"]: + unexpected_improvements_count = len( + failed_test_array["unexpected_improvements"] + ) + return f" {unexpected_improvements_count} improved test{'s' if unexpected_improvements_count != 1 else ''}" + return "" + + +def fails(failed_test_array): + if failed_test_array["fails"]: + fails_count = len(failed_test_array["fails"]) + return f" {fails_count} failed test{'s' if fails_count != 1 else ''}" + return "" + + +def crashes(failed_test_array): + if failed_test_array["crashes"]: + crash_count = len(failed_test_array["crashes"]) + return f" {crash_count} crashed test{'s' if crash_count != 1 else ''}" + return "" + + +def get_failed_test_details(failed_test_array): + message = "" + max_tests_to_display = 5 + + if failed_test_array["unexpected_improvements"]: + for i, test in enumerate(failed_test_array["unexpected_improvements"]): + if i > max_tests_to_display: + message += " \nand more...
" + break + message += f"{test}
" + + if failed_test_array["fails"]: + for i, test in enumerate(failed_test_array["fails"]): + if i > max_tests_to_display: + message += " \nand more...
" + break + message += f"{test}
" + + if failed_test_array["crashes"]: + for i, test in enumerate(failed_test_array["crashes"]): + if i > max_tests_to_display: + message += " \nand more...
" + break + message += f"{test}
" + + return message + + +def get_failed_test_summary_message(failed_test_array): + summary_msg = "" + summary_msg += unexpected_improvements(failed_test_array) + summary_msg += fails(failed_test_array) + summary_msg += crashes(failed_test_array) + summary_msg += "" + return summary_msg + + +def sort_failed_tests_by_status(failures_csv): + failed_test_array = { + "unexpected_improvements": [], + "fails": [], + "crashes": [], + "timeouts": [], + } + + for test in failures_csv.splitlines(): + if "UnexpectedImprovement" in test: + failed_test_array["unexpected_improvements"].append(test) + elif "Fail" in test: + failed_test_array["fails"].append(test) + elif "Crash" in test: + failed_test_array["crashes"].append(test) + elif "Timeout" in test: + failed_test_array["timeouts"].append(test) + + return failed_test_array + + +async def get_failures_csv(session, project_id, job): + job_id = job["id"] + url = f"https://gitlab.freedesktop.org/api/v4/projects/{project_id}/jobs/{job_id}/artifacts/results/failures.csv" + async with session.get(url) as response: + if response.status == 200: + text = await response.text() + return text + else: + logging.debug(f"No response from: {url}") + return "" + + +async def get_test_failures(session, project_id, job): + failures_csv = await get_failures_csv(session, project_id, job) + if not failures_csv: + return "" + + # If just one test failed, don't bother with more complicated sorting + lines = failures_csv.splitlines() + if len(lines) == 1: + return ": " + lines[0] + "
" + + failed_test_array = sort_failed_tests_by_status(failures_csv) + failures_msg = "
" + failures_msg += get_failed_test_summary_message(failed_test_array) + failures_msg += get_failed_test_details(failed_test_array) + failures_msg += "
" + + return failures_msg + + +async def get_trace_failures(session, project_id, job): + project_json = await get_project_json(session, project_id) + path = project_json.get("path", "") + if not path: + return "" + + job_id = job["id"] + url = f"https://mesa.pages.freedesktop.org/-/{path}/-/jobs/{job_id}/artifacts/results/summary/problems.html" + async with session.get(url) as response: + if response.status == 200: + return url + else: + logging.debug(f"No response from: {url}") + return "" + + +async def get_project_json(session, project_id): + url_project_id = f"https://gitlab.freedesktop.org/api/v4/projects/{project_id}" + async with session.get(url_project_id) as response: + if response.status == 200: + return await response.json() + else: + logging.debug(f"No response from: {url_project_id}") + return "" + + +async def get_job_log(session: aiohttp.ClientSession, project_id: str, job_id: int): + project_json = await get_project_json(session, project_id) + path_with_namespace = project_json.get("path_with_namespace", "") + if not path_with_namespace: + return "" + + url_job_log = ( + f"https://gitlab.freedesktop.org/{path_with_namespace}/-/jobs/{job_id}/raw" + ) + async with session.get(url_job_log) as response: + if response.status == 200: + return await response.text() + else: + logging.debug(f"No response from job log: {url_job_log}") + return "" + + +async def search_job_log_for_errors(session, project_id, job): + log_error_message = "" + + # Bypass these generic error messages in hopes of finding a more specific error. + # The entries are case insensitive. Keep them in alphabetical order and don't + # forget to add a comma after each entry + ignore_list = [ + "403: b", + "aborting", + "building c", + "continuing", + "error_msg : None", + "error_type", + "error generated", + "errors generated", + "exit code", + "exit status", + "exiting now", + "job failed", + "no_error", + "no files to upload", + "performing test", + "ret code", + "retry", + "retry-all-errors", + "strerror_", + "success", + "unknown-section", + ] + job_log = await get_job_log(session, project_id, job["id"]) + + for line in reversed(job_log.splitlines()): + if "fatal" in line.lower(): + # remove date and formatting before fatal message + log_error_message = line[line.lower().find("fatal") :] + break + + if "error" in line.lower(): + if any(ignore.lower() in line.lower() for ignore in ignore_list): + continue + + # remove date and formatting before error message + log_error_message = line[line.lower().find("error") :].strip() + + # if there is no further info after the word error then it's not helpful + # so reset the message and try again. + if log_error_message.lower() in {"error", "errors", "error:", "errors:"}: + log_error_message = "" + continue + break + + # timeout msg from .gitlab-ci/lava/lava_job_submitter.py + if "expected to take at least" in line.lower(): + log_error_message = line + break + + return log_error_message + + +async def process_single_job(session, project_id, job): + job_url = job.get("web_url", "") + if not job_url: + logging.info(f"Job {job['name']} is missing a web_url") + + job_name = job.get("name", "Unnamed Job") + message = f"[{job_name}]({job_url})" + + # if a job times out it's cancelled, so worth mentioning here + if job["status"] == "canceled": + return f"{message}: canceled
" + + # if it's not a script failure then all we can do is give the gitlab assigned reason + if job["failure_reason"] != "script_failure": + return f"{message}: {job['failure_reason']}
" + + test_failures = await get_test_failures(session, project_id, job) + if test_failures: + return f"{message}{test_failures}" + + trace_failures = await get_trace_failures(session, project_id, job) + if trace_failures: + return f"{message}: has a [trace failure]({trace_failures})
" + + log_error_message = await search_job_log_for_errors(session, project_id, job) + if log_error_message: + return f"{message}: {log_error_message}
" + + return f"{message}
" + + +async def process_job_with_limit(session, project_id, job): + # Use at most 10 concurrent tasks + semaphore = asyncio.Semaphore(10) + async with semaphore: + return await process_single_job(session, project_id, job) + + +async def process_problem_jobs(session, project_id, problem_jobs): + + problem_jobs_count = len(problem_jobs) + + if problem_jobs_count == 1: + message = f"
There were problems with job: " + message += await process_single_job(session, project_id, problem_jobs[0]) + return message + + message = f"
" + message += f"" + message += f"There were problems with {problem_jobs_count} jobs: " + message += "" + + tasks = [process_job_with_limit(session, project_id, job) for job in problem_jobs] + + results = await asyncio.gather(*tasks) + + for result in results: + message += result + + message += f"
" + + return message + + +async def main(pipeline_id: str, project_id: str = "176") -> str: + + message = "" + + try: + timeout = aiohttp.ClientTimeout(total=120) + logging.basicConfig(level=logging.INFO) + + async with aiohttp.ClientSession(timeout=timeout) as session: + pipeline_status = await get_pipeline_status( + session, project_id, pipeline_id + ) + logging.debug(f"Pipeline status: {pipeline_status}") + if pipeline_status != "failed": + return message + + jobs = await get_jobs_for_pipeline(session, project_id, pipeline_id) + problem_jobs = get_problem_jobs(jobs) + + if len(problem_jobs) == 0: + return message + + message = await process_problem_jobs(session, project_id, problem_jobs) + except Exception as e: + logging.error(f"An error occurred: {e}") + return "" + + return message + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Fetch GitLab pipeline details") + parser.add_argument( + "--project-id", default="176", help="Project ID (default: 176 i.e. mesa/mesa)" + ) + parser.add_argument("--pipeline-id", required=True, help="Pipeline ID") + + args = parser.parse_args() + + message = asyncio.run(main(args.pipeline_id, args.project_id)) + + print(message) diff --git a/mesalib/bin/ci/pipeline_message.sh b/mesalib/bin/ci/pipeline_message.sh new file mode 100644 index 0000000000..84c2825488 --- /dev/null +++ b/mesalib/bin/ci/pipeline_message.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -eu + +this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")") +readonly this_dir + +exec \ + "$this_dir/../python-venv.sh" \ + "$this_dir/requirements.txt" \ + "$this_dir/pipeline_message.py" "$@" diff --git a/mesalib/bin/ci/requirements.txt b/mesalib/bin/ci/requirements.txt index a1448999b1..40f4c5dd8a 100644 --- a/mesalib/bin/ci/requirements.txt +++ b/mesalib/bin/ci/requirements.txt @@ -1,11 +1,19 @@ -colorama==0.4.5 +# If you change these requirements, and you need these packages +# to be available in the debian/x86_64_pyutils container +# then bump the DEBIAN_PYUTILS_TAG +PyYAML==6.* +colorama==0.4.* filecache==0.81 -gql==3.4.0 -kaleido==0.2.1 -python-dateutil==2.8.2 -pandas==2.1.1 -plotly==5.17.0 -python-gitlab==3.5.0 -PyYAML==6.0.1 -ruamel.yaml.clib==0.2.8 -ruamel.yaml==0.17.21 +filelock==3.* +fire==0.5.0 +flake8==7.* +gql==3.* +kaleido==0.2.* +lavacli==1.5.2 +pandas==2.* +plotly==5.* +python-dateutil==2.* +python-gitlab==4.* +ruamel.yaml.clib==0.2.* +ruamel.yaml==0.17.* +tabulate==0.9.* diff --git a/mesalib/bin/ci/test/requirements.txt b/mesalib/bin/ci/test/requirements.txt index f80621af28..5060531afe 100644 --- a/mesalib/bin/ci/test/requirements.txt +++ b/mesalib/bin/ci/test/requirements.txt @@ -1,5 +1,10 @@ +-r ../requirements.txt filelock==3.12.4 fire==0.5.0 +freezegun==1.5.1 +hypothesis==6.67.1 mock==5.1.0 polars==0.19.3 pytest==7.4.2 +pytest-asyncio==0.21.0 +pytest-cov==3.0.0 diff --git a/mesalib/bin/ci/test/test_gantt_chart.py b/mesalib/bin/ci/test/test_gantt_chart.py new file mode 100644 index 0000000000..84d6da0f26 --- /dev/null +++ b/mesalib/bin/ci/test/test_gantt_chart.py @@ -0,0 +1,201 @@ +from contextlib import suppress +from datetime import datetime, timedelta +from unittest import mock +from unittest.mock import MagicMock, patch + +import ci_post_gantt +import pytest +from ci_gantt_chart import generate_gantt_chart +from ci_post_gantt import Gitlab, MockGanttExit + + +def create_mock_job( + name, id, status, created_at, queued_duration, started_at, finished_at=None +): + mock_job = MagicMock() + mock_job.name = name + mock_job.status = status + mock_job.id = id + mock_job.created_at = created_at + mock_job.queued_duration = queued_duration + mock_job.started_at = started_at + mock_job.finished_at = finished_at + return mock_job + + +@pytest.fixture +def fake_pipeline(): + current_time = datetime.fromisoformat("2024-12-17 23:54:13.940091+00:00") + created_at = current_time - timedelta(minutes=10) + + job1 = create_mock_job( + name="job1", + id="1", + status="success", + created_at=created_at.isoformat(), + queued_duration=1, # seconds + started_at=(created_at + timedelta(seconds=2)).isoformat(), + finished_at=(created_at + timedelta(minutes=1)).isoformat(), + ) + + mock_pipeline = MagicMock() + mock_pipeline.web_url = "https://gitlab.freedesktop.org/mesa/mesa/-/pipelines/9999" + mock_pipeline.duration = 600 # Total pipeline duration in seconds + mock_pipeline.created_at = created_at.isoformat() + mock_pipeline.yaml_errors = False + mock_pipeline.jobs.list.return_value = [job1] + return mock_pipeline + + +def test_generate_gantt_chart(fake_pipeline): + fig = generate_gantt_chart(fake_pipeline) + + fig_dict = fig.to_dict() + assert "data" in fig_dict + + # Extract all job names from the "y" axis in the Gantt chart data + all_job_names = set() + for trace in fig_dict["data"]: + if "y" in trace: + all_job_names.update(trace["y"]) + + assert any( + "job1" in job for job in all_job_names + ), "job1 should be present in the Gantt chart" + + +def test_ci_timeout(fake_pipeline): + fig = generate_gantt_chart(fake_pipeline, ci_timeout=1) + + fig_dict = fig.to_dict() + + timeout_line = None + for shape in fig_dict.get("layout", {}).get("shapes", []): + if shape.get("line", {}).get("dash") == "dash": + timeout_line = shape + break + + assert timeout_line is not None, "Timeout line should exist in the Gantt chart" + timeout_x = timeout_line["x0"] + + # Check that the timeout line is 1 minute after the pipeline creation time + pipeline_created_at = datetime.fromisoformat(fake_pipeline.created_at) + expected_timeout = pipeline_created_at + timedelta(minutes=1) + assert ( + timeout_x == expected_timeout + ), f"Timeout should be at {expected_timeout}, got {timeout_x}" + + +def test_marge_bot_user_id(): + with patch("ci_post_gantt.Gitlab") as MockGitlab: + mock_gitlab_instance = MagicMock(spec=Gitlab) + mock_gitlab_instance.users = MagicMock() + MockGitlab.return_value = mock_gitlab_instance + + marge_bot_user_id = 12345 + ci_post_gantt.main("fake_token", None, marge_bot_user_id) + mock_gitlab_instance.users.get.assert_called_once_with(marge_bot_user_id) + + +def test_project_ids(): + current_time = datetime.now() + project_id_1 = 176 + event_1 = MagicMock() + event_1.project_id = project_id_1 + event_1.created_at = (current_time - timedelta(days=1)).isoformat() + event_1.note = {"body": f"Event for project {project_id_1}"} + + project_id_2 = 166 + event_2 = MagicMock() + event_2.project_id = project_id_2 + event_2.created_at = (current_time - timedelta(days=2)).isoformat() + event_2.note = {"body": f"Event for project {project_id_2}"} + + with patch("ci_post_gantt.Gitlab") as MockGitlab: + mock_user = MagicMock() + mock_user.events = MagicMock() + mock_user.events.list.return_value = [event_1, event_2] + + mock_gitlab_instance = MagicMock(spec=Gitlab) + mock_gitlab_instance.users = MagicMock() + mock_gitlab_instance.users.get.return_value = mock_user + MockGitlab.return_value = mock_gitlab_instance + + last_event_date = (current_time - timedelta(days=3)).isoformat() + + # Test a single project id + ci_post_gantt.main("fake_token", last_event_date) + marge_bot_single_project_scope = [ + event.note["body"] + for event in mock_user.events.list.return_value + if event.project_id == project_id_1 + ] + assert f"Event for project {project_id_1}" in marge_bot_single_project_scope + assert f"Event for project {project_id_2}" not in marge_bot_single_project_scope + + # Test multiple project ids + ci_post_gantt.main( + "fake_token", last_event_date, 9716, [project_id_1, project_id_2] + ) + + marge_bot_multiple_project_scope = [ + event.note["body"] for event in mock_user.events.list.return_value + ] + assert f"Event for project {project_id_1}" in marge_bot_multiple_project_scope + assert f"Event for project {project_id_2}" in marge_bot_multiple_project_scope + + +def test_add_gantt_after_pipeline_message(): + current_time = datetime.now() + + plain_url = "https://gitlab.freedesktop.org/mesa/mesa/-/pipelines/12345" + plain_message = ( + f"I couldn't merge this branch: CI failed! See pipeline {plain_url}." + ) + event_plain = MagicMock() + event_plain.project_id = 176 + event_plain.created_at = (current_time - timedelta(days=1)).isoformat() + event_plain.note = {"body": plain_message} + + summary_url = "https://gitlab.freedesktop.org/mesa/mesa/-/pipelines/99999" + summary_message = ( + "I couldn't merge this branch: " + f"CI failed! See pipeline {summary_url}.
There were problems with job:" + "[lavapipe](https://gitlab.freedesktop.org/mesa/mesa/-/jobs/68141218)
" + "3 crashed testsdEQP-VK.ray_query.builtin.instancecustomindex.frag.aabbs,Crash
dEQP" + "-VK.ray_query.builtin.objecttoworld.frag.aabbs,Crash
dEQP-VK.sparse_resources.shader_intrinsics." + "2d_array_sparse_fetch.g16_b16r16_2plane_444_unorm.11_37_3_nontemporal,Crash
" + ) + event_with_summary = MagicMock() + event_with_summary.project_id = 176 + event_with_summary.created_at = (current_time - timedelta(days=1)).isoformat() + event_with_summary.note = {"body": summary_message} + + with patch("ci_post_gantt.Gitlab") as MockGitlab, patch( + "ci_post_gantt.get_gitlab_pipeline_from_url", return_value=None + ) as mock_get_gitlab_pipeline_from_url: + + def safe_mock(*args, **kwargs): + with suppress(TypeError): + raise MockGanttExit("Exiting for test purposes") + + mock_get_gitlab_pipeline_from_url.side_effect = safe_mock + + mock_user = MagicMock() + mock_user.events = MagicMock() + mock_user.events.list.return_value = [event_plain, event_with_summary] + + mock_gitlab_instance = MagicMock(spec=Gitlab) + mock_gitlab_instance.users = MagicMock() + mock_gitlab_instance.users.get.return_value = mock_user + MockGitlab.return_value = mock_gitlab_instance + + last_event_date = (current_time - timedelta(days=3)).isoformat() + ci_post_gantt.main("fake_token", last_event_date, 12345) + mock_get_gitlab_pipeline_from_url.assert_has_calls( + [ + mock.call(mock_gitlab_instance, plain_url), + mock.call(mock_gitlab_instance, summary_url), + ], + any_order=True, + ) diff --git a/mesalib/bin/ci/test/test_pipeline_message.py b/mesalib/bin/ci/test/test_pipeline_message.py new file mode 100644 index 0000000000..688497dcf1 --- /dev/null +++ b/mesalib/bin/ci/test/test_pipeline_message.py @@ -0,0 +1,309 @@ +import logging +from unittest.mock import AsyncMock, patch + +import pytest + +from pipeline_message import ( + get_failed_test_summary_message, + get_problem_jobs, + get_trace_failures, + main, + process_problem_jobs, + search_job_log_for_errors, + sort_failed_tests_by_status, + unexpected_improvements, +) + + +def test_get_problem_jobs(): + jobs = [ + {"stage": "build", "status": "failed"}, + {"stage": "test", "status": "canceled"}, + {"stage": "postmerge", "status": "failed"}, + {"stage": "performance", "status": "failed"}, + {"stage": "deploy", "status": "failed"}, + ] + + problem_jobs = get_problem_jobs(jobs) + + assert len(problem_jobs) == 3 + assert problem_jobs[0]["stage"] == "build" + assert problem_jobs[1]["stage"] == "test" + assert problem_jobs[2]["stage"] == "deploy" + + +def test_sort_failed_tests_by_status(): + failures_csv = """\ +Test1,UnexpectedImprovement +Test2,Fail +Test3,Crash +Test4,Timeout +Test5,Fail +Test6,UnexpectedImprovement +""" + sorted_tests = sort_failed_tests_by_status(failures_csv) + + assert len(sorted_tests["unexpected_improvements"]) == 2 + assert len(sorted_tests["fails"]) == 2 + assert len(sorted_tests["crashes"]) == 1 + assert len(sorted_tests["timeouts"]) == 1 + + assert sorted_tests["unexpected_improvements"] == [ + "Test1,UnexpectedImprovement", + "Test6,UnexpectedImprovement", + ] + assert sorted_tests["fails"] == ["Test2,Fail", "Test5,Fail"] + assert sorted_tests["crashes"] == ["Test3,Crash"] + assert sorted_tests["timeouts"] == ["Test4,Timeout"] + + +def test_get_failed_test_summary_message(): + failed_test_array = { + "unexpected_improvements": [ + "test1 UnexpectedImprovement", + "test2 UnexpectedImprovement", + ], + "fails": ["test3 Fail", "test4 Fail", "test5 Fail"], + "crashes": ["test6 Crash"], + "timeouts": [], + } + + summary_message = get_failed_test_summary_message(failed_test_array) + + assert "" in summary_message + assert "2 improved tests" in summary_message + assert "3 failed tests" in summary_message + assert "1 crashed test" in summary_message + assert "" in summary_message + + +def test_unexpected_improvements(): + message = "" + failed_test_array = { + "unexpected_improvements": ["test_improvement_1", "test_improvement_2"], + "fails": [], + "crashes": [], + "timeouts": [], + } + result = unexpected_improvements(failed_test_array) + assert result == " 2 improved tests", f"Unexpected result: {result}" + + +@pytest.mark.asyncio +@patch("pipeline_message.get_pipeline_status", new_callable=AsyncMock) +async def test_gitlab_api_failure(mock_get_pipeline_status): + mock_get_pipeline_status.side_effect = Exception("GitLab API not responding") + message = await main("1234567") + assert message == "" + + +@pytest.mark.asyncio +async def test_no_message_when_pipeline_not_failed(): + project_id = "176" + pipeline_id = "12345" + + with patch( + "pipeline_message.get_pipeline_status", new_callable=AsyncMock + ) as mock_get_pipeline_status: + mock_get_pipeline_status.return_value = "success" + + message = await main(pipeline_id, project_id) + assert ( + message == "" + ), f"Expected no message for successful pipeline, but got: {message}" + + +@pytest.mark.asyncio +async def test_single_problem_job_not_summarized(): + session = AsyncMock() + project_id = "176" + problem_jobs = [ + { + "id": 1234, + "name": "test-job", + "web_url": "http://example.com/job/1234", + "status": "canceled", + } + ] + + mock_response = AsyncMock() + mock_response.status = 200 + mock_response.text.return_value = "" # Empty CSV response for test + session.get.return_value = mock_response + + message = await process_problem_jobs(session, project_id, problem_jobs) + + assert "summary" not in message + assert "[test-job](http://example.com/job/1234)" in message + + +@pytest.mark.asyncio +@patch("pipeline_message.get_project_json", new_callable=AsyncMock) +@patch("pipeline_message.aiohttp.ClientSession", autospec=True) +async def test_get_trace_failures_no_response( + mock_client_session_cls, mock_get_project_json, caplog +): + caplog.set_level(logging.DEBUG) + namespace = "mesa" + mock_get_project_json.return_value = {"path": namespace} + + mock_get = AsyncMock() + mock_get.status = 404 + + mock_session_instance = mock_client_session_cls.return_value + mock_session_instance.get.return_value = mock_get + + job_id = 12345678 + job = {"id": job_id} + url = await get_trace_failures(mock_session_instance, "176", job) + + assert url == "" + + expected_log_message = f"No response from: https://mesa.pages.freedesktop.org/-/{namespace}/-/jobs/{job_id}/artifacts/results/summary/problems.html" + assert any(expected_log_message in record.message for record in caplog.records) + + +@pytest.mark.asyncio +@patch("pipeline_message.get_job_log", new_callable=AsyncMock) +async def test_search_job_log_for_errors(mock_get_job_log): + session = AsyncMock() + project_id = "176" + job = {"id": 12345} + + job_log = r""" +error_msg: something useful +[0m15:41:36.102: GL_KHR_no_error GL_KHR_texture_compression_astc_sliced_3d +1 error generated +3 errors generated. +-- Looking for strerror_r - found +-- Looking for strerror_s - not found +[49/176] Building CXX object lib/Support/CMakeFiles/LLVMSupport.dir/ErrorHandling.cpp.o +[127/2034] Building C object lib/Support/CMakeFiles/LLVMSupport.dir/regerror.c.o +-- Performing Test HAS_WERROR_GLOBAL_CTORS +-- Performing Test C_SUPPORTS_WERROR_UNGUARDED_AVAILABILITY_NEW - Success +-- Performing Test LLVM_LIBSTDCXX_SOFT_ERROR +error aborting +error_msg : None +error_type : Job +[0Ksection_end:1734694783:job_data +[0K +[0m11:39:43.438: [1mFinished executing LAVA job in the attempt #3 [0m +[0Ksection_end:1734694783:lava_submit +[0K +[0;31m[01:54] ERROR: lava_submit: ret code: 1 [0m + +[0;31m[01:54] ERROR: unknown-section: ret code: 1 [0m +section_end:1734694783:step_script +[0Ksection_start:1734694783:after_script +[0K[0K[36;1mRunning after_script[0;m[0;m +[32;1mRunning after script...[0;m +[32;1m$ curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s "https://" | tar --warning=no-timestamp --zstd -x[0;m +zstd: /*stdin*\: unexpected end of file # noqa: W605 +tar: Child returned status 1 +tar: Error is not recoverable: exiting now +section_end:1734695025:after_script +[0K[0;33mWARNING: after_script failed, but job will continue unaffected: exit code 1[0;m +section_start:1734695025:upload_artifacts_on_failure +[0K[0K[36;1mUploading artifacts for failed job[0;m[0;m +[32;1mUploading artifacts...[0;m +results/: found 11 matching artifact files and directories[0;m +Uploading artifacts as "archive" to coordinator... 201 Created[0;m id[0;m=68509685 responseStatus[0;m=201 Created token[0;m=glcbt-64 +[32;1mUploading artifacts...[0;m +[0;33mWARNING: results/junit.xml: no matching files. Ensure that the artifact path is relative to the working directory (/builds/mesa/mesa)[0;m +[31;1mERROR: No files to upload [0;m +section_end:1734695027:upload_artifacts_on_failure +[0Ksection_start:1734695027:cleanup_file_variables +[0K[0K[36;1mCleaning up project directory and file based variables[0;m[0;m +section_end:1734695027:cleanup_file_variables +[0K[31;1mERROR: Job failed: exit code 1 +[0;m +[0;m + """ + + mock_get_job_log.return_value = job_log + + error_message = await search_job_log_for_errors(session, project_id, job) + assert "something useful" in error_message + + +@pytest.mark.asyncio +@patch("pipeline_message.get_job_log", new_callable=AsyncMock) +async def test_search_job_log_for_fatal_errors(mock_get_job_log): + session = AsyncMock() + project_id = "176" + job = {"id": 12345} + + job_log = r""" +[0m15:41:36.105: [15:41:31.951] fatal: something fatal +Uploading artifacts as "archive" to coordinator... 201 Created[0;m id[0;m=68509685 responseStatus[0;m=201 Created token[0;m=glcbt-64 +[32;1mUploading artifacts...[0;m +[0;33mWARNING: results/junit.xml: no matching files. Ensure that the artifact path is relative to the working directory (/builds/mesa/mesa)[0;m +[31;1mERROR: No files to upload [0;m +section_end:1734695027:upload_artifacts_on_failure +[0Ksection_start:1734695027:cleanup_file_variables +[0K[0K[36;1mCleaning up project directory and file based variables[0;m[0;m +section_end:1734695027:cleanup_file_variables +[0K[31;1mERROR: Job failed: exit code 1 +[0;m +[0;m + """ + + mock_get_job_log.return_value = job_log + + error_message = await search_job_log_for_errors(session, project_id, job) + assert "something fatal" in error_message + + +@pytest.mark.asyncio +@patch("pipeline_message.get_job_log", new_callable=AsyncMock) +async def test_search_job_log_for_errors_but_find_none(mock_get_job_log): + session = AsyncMock() + project_id = "176" + job = {"id": 12345} + + job_log = r""" +[0KRunning with gitlab-runner 17.4.0 (b92ee590)[0;m +[0K on fdo-equinix-m3l-30-placeholder_63 XmDXAt7xd, system ID: s_785ae19292ea[0;m +section_start:1734736110:prepare_executor +[0K[0K[36;1mPreparing the "docker" executor[0;m[0;m +[0KUsing Docker executor with image registry.freedesktop.org/mesa/mesa/debian +[0KAuthenticating with credentials from job payload (GitLab Registry)[0;m +[0KPulling docker image registry.freedesktop.org/mesa/mesa/debian/x86_64_pyuti +[0KUsing docker image sha256:ebc7b3fe89be4d390775303adddb33539c235a2663165d78d +[0Ksection_start:1734736124:prepare_script +[0K[0K[36;1mPreparing environment[0;m[0;m +Running on runner-xmdxat7xd-project-23076-concurrent-1 via fdo-equinix-m3l-30... +section_end:1734736125:prepare_script +[0Ksection_start:1734736125:get_sources +[0K[0K[36;1mGetting source from Git repository[0;m[0;m +[32;1m$ /host/bin/curl -s -L --cacert /host/ca-certificates.crt --retry 4 -f --retry-delay 60 https://gitlab. +Checking if the user of the pipeline is allowed... +Checking if the job's project is part of a well-known group... +Checking if the job is part of an official MR pipeline... +Thank you for contributing to freedesktop.org +Running pre-clone script: 'set -o xtrace +wget -q -O download-git-cache.sh https://gitlab.freedesktop.org/mesa/mesa/-/raw/0d43b4cba639b809ad0e08a065ce01846e262249/.gitlab-ci/download-git-cache.sh +bash download-git-cache.sh +rm download-git-cache.sh +[31;1m errors +[0K[31;1mERROR: +[31;1m error +[31;1m Here is a blank error: +/builds/mesa/mesa/bin/ci/test/test_pipeline_message.py:162: AssertionError +Uploading artifacts as "archive" to coordinator... 201 Created[0;m id[0;m=68509685 responseStatus[0;m=201 Created token[0;m=glcbt-64 +[32;1mUploading artifacts...[0;m +[0;33mWARNING: results/junit.xml: no matching files. Ensure that the artifact path is relative to the working directory (/builds/mesa/mesa)[0;m +[31;1mERROR: No files to upload [0;m +section_end:1734695027:upload_artifacts_on_failure +[0Ksection_start:1734695027:cleanup_file_variables +[0K[0K[36;1mCleaning up project directory and file based variables[0;m[0;m +section_end:1734695027:cleanup_file_variables +[0K[31;1mERROR: Job failed: exit code 1 +[0;m +[0;m + """ + + mock_get_job_log.return_value = job_log + + error_message = await search_job_log_for_errors(session, project_id, job) + assert error_message == "", f"Unexpected error message: {error_message}" diff --git a/mesalib/bin/ci/update_traces_checksum.py b/mesalib/bin/ci/update_traces_checksum.py index 064573d556..f050c8f6e5 100644 --- a/mesalib/bin/ci/update_traces_checksum.py +++ b/mesalib/bin/ci/update_traces_checksum.py @@ -20,10 +20,11 @@ import gitlab from colorama import Fore, Style -from gitlab_common import get_gitlab_project, read_token, wait_for_pipeline +from gitlab_common import (get_gitlab_project, read_token, wait_for_pipeline, + get_gitlab_pipeline_from_url, TOKEN_DIR, get_token_from_default_dir) -DESCRIPTION_FILE = "export PIGLIT_REPLAY_DESCRIPTION_FILE='.*/install/(.*)'$" +DESCRIPTION_FILE = "export PIGLIT_REPLAY_DESCRIPTION_FILE=.*/install/(.*)$" DEVICE_NAME = "export PIGLIT_REPLAY_DEVICE_NAME='(.*)'$" @@ -40,7 +41,7 @@ def gather_results( cur_job = project.jobs.get(job.id) # get variables print(f"👁 {job.name}...") - log: list[str] = cur_job.trace().decode("unicode_escape").splitlines() + log: list[str] = cur_job.trace().decode("unicode_escape", "ignore").splitlines() filename: str = '' dev_name: str = '' for logline in log: @@ -69,7 +70,7 @@ def gather_results( target = yaml.load(target_file) # parse artifact - results_json_bz2 = cur_job.artifact(path="results/results.json.bz2", streamed=False) + results_json_bz2 = cur_job.artifact("results/results.json.bz2") results_json = bz2.decompress(results_json_bz2).decode("utf-8", errors="replace") results = json.loads(results_json) @@ -96,7 +97,11 @@ def gather_results( continue if "label" in target['traces'][trace][dev_name]: - print(f'{dev_name}: {trace}: please verify that label {Fore.BLUE}{target["traces"][trace][dev_name]["label"]}{Style.RESET_ALL} is still valid') + print( + f"{dev_name}: {trace}: please verify that label " + f"{Fore.BLUE}{target['traces'][trace][dev_name]['label']}{Style.RESET_ALL} " + "is still valid" + ) print(Fore.GREEN + f'{dev_name}: {trace}: checksum updated' + Style.RESET_ALL) target['traces'][trace][dev_name]['checksum'] = checksum @@ -113,12 +118,20 @@ def parse_args() -> None: epilog="Example: update_traces_checksum.py --rev $(git rev-parse HEAD) " ) parser.add_argument( - "--rev", metavar="revision", help="repository git revision", required=True + "--rev", metavar="revision", help="repository git revision", ) parser.add_argument( "--token", metavar="token", - help="force GitLab token, otherwise it's read from ~/.config/gitlab-token", + type=str, + default=get_token_from_default_dir(), + help="Use the provided GitLab token or token file, " + f"otherwise it's read from {TOKEN_DIR / 'gitlab-token'}", + ) + parser.add_argument( + "--pipeline-url", + metavar="pipeline_url", + help="specify a pipeline url", ) return parser.parse_args() @@ -133,8 +146,15 @@ def parse_args() -> None: cur_project = get_gitlab_project(gl, "mesa") - print(f"Revision: {args.rev}") - (pipe, cur_project) = wait_for_pipeline([cur_project], args.rev) + if args.pipeline_url: + pipe, cur_project = get_gitlab_pipeline_from_url(gl, args.pipeline_url) + REV = pipe.sha + else: + if not args.rev: + print('error: the following arguments are required: --rev') + sys.exit(1) + print(f"Revision: {args.rev}") + (pipe, cur_project) = wait_for_pipeline([cur_project], args.rev) print(f"Pipeline: {pipe.web_url}") gather_results(cur_project, pipe) diff --git a/mesalib/bin/flamegraph_map_lp_jit.py b/mesalib/bin/flamegraph_map_lp_jit.py new file mode 100644 index 0000000000..39e7277265 --- /dev/null +++ b/mesalib/bin/flamegraph_map_lp_jit.py @@ -0,0 +1,142 @@ +# +# Copyright 2024 Autodesk, Inc. +# +# SPDX-License-Identifier: MIT +# + +import argparse +from bisect import bisect_left, bisect_right +from dataclasses import dataclass +from pathlib import Path +import re + + +@dataclass +class Instruction: + address: int + assembly: str + samples: int = 0 + + +def mapping_address_key(mapping: tuple[int, int, str]): + return mapping[0] + + +def instruction_address_key(instruction: Instruction): + return instruction.address + + +def parse_mappings(map_file_path: Path): + mappings: list[tuple[int, int, str]] = [] + with open(map_file_path) as map_file: + for mapping in map_file: + address_hex, size_hex, name = mapping.split(' ') + address = int(address_hex, base=16) + mappings.append((address, address + int(size_hex, base=16), name.strip())) + + mappings.sort(key=mapping_address_key) + return mappings + + +def parse_traces(trace_file_path: Path): + pattern = re.compile(r'((?:[^;]+;)*?[^;]+) (\d+)\n') + + traces: list[tuple[list[str], int]] = [] + with open(trace_file_path) as trace_file: + for trace in trace_file: + match = pattern.fullmatch(trace) + traces.append((match.group(1).split(';'), int(match.group(2)))) + + return traces + + +def parse_asm(asm_file_path: Path): + symbol_pattern = re.compile(r'(\w+) ([0-9a-fA-F]+):\n') + instruction_pattern = re.compile(r' *([0-9a-fA-F]+):\t(.*?)\n') + + asm: dict[tuple[int, str], list[Instruction]] = {} + with open(asm_file_path) as asm_file: + current_instructions = None + for line in asm_file: + if match := symbol_pattern.fullmatch(line): + symbol = (int(match.group(2), base=16), match.group(1)) + current_instructions = asm[symbol] = [] + elif match := instruction_pattern.fullmatch(line): + current_instructions.append(Instruction(int(match.group(1), base=16), match.group(2))) + + return asm + + +def main(): + parser = argparse.ArgumentParser(description='Map LLVMPipe JIT addresses in FlameGraph style ' + 'collapsed stack traces to their symbol name. Also optionally ' + 'annotate JIT assembly dumps with sample counts.') + parser.add_argument('jit_symbol_map', type=Path, help='JIT symbol map from LLVMPipe') + parser.add_argument('collapsed_traces', type=Path) + parser.add_argument('-a', '--asm', type=Path, nargs='?', const='', metavar='asm_path', + help='JIT assembly dump from LLVMPipe. Defaults to ".asm"') + parser.add_argument('-o', '--out', type=Path, metavar='out_path') + arguments = parser.parse_args() + + mappings = parse_mappings(arguments.jit_symbol_map) + traces = parse_traces(arguments.collapsed_traces) + + asm = {} + asm_file_path: Path | None = arguments.asm + if asm_file_path: + if len(asm_file_path.parts) <= 0: + asm_file_path = Path(str(arguments.jit_symbol_map) + '.asm') + if asm_file_path.exists(): + asm = parse_asm(asm_file_path) + else: + asm = parse_asm(asm_file_path) + + merged_traces: dict[str, int] = {} + for stack, count in traces: + for i, function in enumerate(stack): + if not function.startswith('0x'): + continue + + address = int(function, base=16) + mapping = mappings[bisect_right(mappings, address, key=mapping_address_key) - 1] + if address < mapping[0] or address >= mapping[1]: + continue + + stack[i] = f'lp`{mapping[2]}@{mapping[0]:x}' + + symbol = (mapping[0], mapping[2]) + if symbol in asm: + instructions = asm[symbol] + instruction_address = address - symbol[0] + index = bisect_left(instructions, instruction_address, key=instruction_address_key) + if index < len(instructions) and instructions[index].address == instruction_address: + instructions[index].samples += count + + stack_key = ';'.join(stack) + if stack_key in merged_traces: + merged_traces[stack_key] += count + else: + merged_traces[stack_key] = count + + out_file_path: Path | None = arguments.out + if not out_file_path: + out_file_path = arguments.collapsed_traces.with_stem(f'{arguments.collapsed_traces.stem}_mapped') + with open(out_file_path, 'w') as out: + for t, c in merged_traces.items(): + print(f'{t} {c}', file=out) + + if asm: + annotated_asm_file_path = asm_file_path.with_stem(f'{asm_file_path.stem}_annotated') + with open(annotated_asm_file_path, 'w') as out: + for symbol, instructions in asm.items(): + print(f'{symbol[1]}: ;{symbol[0]:x}', file=out) + for instruction in instructions: + print(f'\t{instruction.assembly}', end='', file=out) + if instruction.samples: + print(f' ;s {instruction.samples}', file=out) + else: + print(file=out) + print(file=out) + +if __name__ == '__main__': + main() diff --git a/mesalib/bin/gen_release_notes.py b/mesalib/bin/gen_release_notes.py index 0725120ae2..222ff653dc 100644 --- a/mesalib/bin/gen_release_notes.py +++ b/mesalib/bin/gen_release_notes.py @@ -40,7 +40,7 @@ import docutils.parsers.rst.states as states CURRENT_GL_VERSION = '4.6' -CURRENT_VK_VERSION = '1.3' +CURRENT_VK_VERSION = '1.4' TEMPLATE = Template(textwrap.dedent("""\ ${header} @@ -65,8 +65,8 @@ the apiVersion property of the VkPhysicalDeviceProperties struct depends on the particular driver being used. - SHA256 checksum - --------------- + SHA checksums + ------------- :: diff --git a/mesalib/bin/install_megadrivers.py b/mesalib/bin/install_megadrivers.py index 60ee4576c6..e467f0d0e7 100644 --- a/mesalib/bin/install_megadrivers.py +++ b/mesalib/bin/install_megadrivers.py @@ -26,23 +26,37 @@ import os +def resolve_libdir(libdir): + if os.path.isabs(libdir): + destdir = os.environ.get('DESTDIR') + if destdir: + return os.path.join(destdir, libdir[1:]) + else: + return libdir + return os.path.join(os.environ['MESON_INSTALL_DESTDIR_PREFIX'], libdir) + + def main(): parser = argparse.ArgumentParser() parser.add_argument('megadriver') parser.add_argument('libdir') parser.add_argument('drivers', nargs='+') + parser.add_argument('--megadriver-libdir') + parser.add_argument('--libname-suffix', required=True) args = parser.parse_args() - if os.path.isabs(args.libdir): - destdir = os.environ.get('DESTDIR') - if destdir: - to = os.path.join(destdir, args.libdir[1:]) - else: - to = args.libdir + # Not neccesarily at the end, there might be a version suffix, but let's + # make sure that the same suffix is in the megadriver lib name. + assert '.' + args.libname_suffix in args.megadriver + + to = resolve_libdir(args.libdir) + if args.megadriver_libdir: + md_to = resolve_libdir(args.megadriver_libdir) else: - to = os.path.join(os.environ['MESON_INSTALL_DESTDIR_PREFIX'], args.libdir) + md_to = to - master = os.path.join(to, os.path.basename(args.megadriver)) + basename = os.path.basename(args.megadriver) + master = os.path.join(to, basename) if not os.path.exists(to): if os.path.lexists(to): @@ -54,15 +68,18 @@ def main(): if os.path.lexists(abs_driver): os.unlink(abs_driver) - print('installing {} to {}'.format(args.megadriver, abs_driver)) - os.link(master, abs_driver) + + symlink = os.path.relpath(os.path.join(md_to, basename), start=to) + + print(f'Installing symlink pointing to {symlink} to {abs_driver}') + os.symlink(symlink, abs_driver) try: ret = os.getcwd() os.chdir(to) name, ext = os.path.splitext(driver) - while ext != '.so': + while ext != '.' + args.libname_suffix: if os.path.lexists(name): os.unlink(name) os.symlink(driver, name) @@ -70,10 +87,9 @@ def main(): finally: os.chdir(ret) - # Remove meson-created master .so and symlinks - os.unlink(master) + # Remove meson-created symlinks name, ext = os.path.splitext(master) - while ext != '.so': + while ext != '.' + args.libname_suffix: if os.path.lexists(name): os.unlink(name) name, ext = os.path.splitext(name) diff --git a/mesalib/bin/khronos-update.py b/mesalib/bin/khronos-update.py index 901d27ab88..43260af357 100644 --- a/mesalib/bin/khronos-update.py +++ b/mesalib/bin/khronos-update.py @@ -13,9 +13,13 @@ def error(msg: str) -> None: class Source: - def __init__(self, filename: str, url: typing.Optional[str]): + def __init__(self, filename: str, url: typing.Optional[str], + template: typing.Optional[str] = None, remove: + typing.Optional[str] = None): self.file = pathlib.Path(filename) self.url = url + self.template = template + self.remove = remove def sync(self) -> None: if self.url is None: @@ -35,12 +39,37 @@ def sync(self) -> None: else: content = req.content - with open(self.file, 'wb') as f: + content = str(content, encoding='utf-8') + if self.remove is not None: + content = content.replace(self.remove, '') + if self.template is not None: + content = self.template % content + + with open(self.file, 'w') as f: f.write(content) print('Done') +VK_ANDROID_NATIVE_BUFFER_TEMPLATE = """\ +/* MESA: A hack to avoid #ifdefs in driver code. */ +#ifdef __ANDROID__ + +#include +#if ANDROID_API_LEVEL < 28 +/* buffer_handle_t was defined in the deprecated system/window.h */ +typedef const native_handle_t *buffer_handle_t; +#endif + +#else + +typedef void *buffer_handle_t; + +#endif + +%s\ +""" + # a URL of `None` means there is no upstream, because *we* are the upstream SOURCES = [ { @@ -176,7 +205,8 @@ def sync(self) -> None: Source('include/vulkan/vulkan_xcb.h', 'https://github.com/KhronosGroup/Vulkan-Headers/raw/main/include/vulkan/vulkan_xcb.h'), Source('include/vulkan/vulkan_xlib.h', 'https://github.com/KhronosGroup/Vulkan-Headers/raw/main/include/vulkan/vulkan_xlib.h'), Source('include/vulkan/vulkan_xlib_xrandr.h', 'https://github.com/KhronosGroup/Vulkan-Headers/raw/main/include/vulkan/vulkan_xlib_xrandr.h'), - Source('include/vulkan/vk_android_native_buffer.h', 'https://android.googlesource.com/platform/frameworks/native/+/master/vulkan/include/vulkan/vk_android_native_buffer.h?format=TEXT'), + Source('include/vulkan/vk_android_native_buffer.h', 'https://android.googlesource.com/platform/frameworks/native/+/master/vulkan/include/vulkan/vk_android_native_buffer.h?format=TEXT', + template=VK_ANDROID_NATIVE_BUFFER_TEMPLATE, remove='#include \n'), Source('include/vk_video/vulkan_video_codec_av1std.h', 'https://github.com/KhronosGroup/Vulkan-Headers/raw/main/include/vk_video/vulkan_video_codec_av1std.h'), Source('include/vk_video/vulkan_video_codec_av1std_decode.h', 'https://github.com/KhronosGroup/Vulkan-Headers/raw/main/include/vk_video/vulkan_video_codec_av1std_decode.h'), Source('include/vk_video/vulkan_video_codec_h264std.h', 'https://github.com/KhronosGroup/Vulkan-Headers/raw/main/include/vk_video/vulkan_video_codec_h264std.h'), diff --git a/mesalib/bin/meson.build b/mesalib/bin/meson.build index ef28adf34d..e6664d7fe7 100644 --- a/mesalib/bin/meson.build +++ b/mesalib/bin/meson.build @@ -1,22 +1,5 @@ # Copyright © 2017 Eric Engestrom - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. +# SPDX-License-Identifier: MIT git_sha1_gen_py = files('git_sha1_gen.py') gen_vs_module_defs_py = files('gen_vs_module_defs.py') @@ -28,3 +11,7 @@ gen_vs_module_defs_normal_command = [ ] symbols_check = find_program('symbols-check.py') install_megadrivers_py = find_program('install_megadrivers.py') +install_megadrivers = [ + install_megadrivers_py.full_path(), + '--libname-suffix', libname_suffix, +] diff --git a/mesalib/bin/nir-test-runner.py b/mesalib/bin/nir-test-runner.py new file mode 100644 index 0000000000..831c243e69 --- /dev/null +++ b/mesalib/bin/nir-test-runner.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 + +# Copyright © 2024 Valve Corporation +# SPDX-License-Identifier: MIT + +import argparse +import collections +import subprocess +import os +import re +import sys +import tempfile +import textwrap +from pathlib import Path + +class TestFileChange: + def __init__(self, line, result): + self.line = line + self.result = result + +class TestFileChanges: + def __init__(self, name): + self.name = name + self.changes = [] + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--build-dir', '-B', required=False) + parser.add_argument('--test-filter', '-f', required=False) + parser.add_argument('--update-all', '-u', action='store_true') + args = parser.parse_args() + + bin_path = 'src/compiler/nir/nir_tests' + if args.build_dir: + bin_path = args.build_dir + '/' + bin_path + + if not os.path.isfile(bin_path): + print(f'{bin_path} \033[91m does not exist!\033[0m') + exit(1) + + build_args = ['meson', 'compile'] + if args.build_dir: + build_args.append(f'-C{args.build_dir}') + subprocess.run(build_args) + + test_args = [bin_path] + if args.test_filter: + test_args.append(f'--gtest_filter={args.test_filter}') + + env = os.environ.copy() + if args.update_all: + env['NIR_TEST_DUMP_SHADERS'] = 'true' + + output = subprocess.run(test_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env=env) + + expected_pattern = re.compile(r'Expected \(([\d\w\W/.-_]+):(\d+)\):') + + test_result = None + expectations = collections.defaultdict(list) + + # Parse the output of the test binary and gather the changed shaders. + for output_line in output.stdout.split('\n'): + if output_line.startswith('Got:'): + test_result = '' + + continue + + if output_line.startswith('Expected ('): + match = expected_pattern.match(output_line) + file = match.group(1).removeprefix('../') + line = int(match.group(2)) + + expectations[file].append(TestFileChange(line, test_result.strip())) + + test_result = None + + continue + + if test_result is not None: + test_result += output_line + '\n' + + patches = [] + + # Generate patches for the changed shaders. + for file in expectations: + changes = expectations[file] + + updated_test_file = '' + change_index = 0 + line_index = 1 + inside_expectation = False + + with open(file) as test_file: + for test_line in test_file: + if test_line.strip().startswith(')\"'): + inside_expectation = False + + if not inside_expectation: + updated_test_file += test_line + + if change_index < len(changes) and line_index == changes[change_index].line: + inside_expectation = True + indentation = len(test_line) - len(test_line.lstrip()) + 3 + updated_test_file += textwrap.indent(changes[change_index].result, " " * indentation) + '\n' + change_index += 1 + + line_index += 1 + + with tempfile.NamedTemporaryFile(delete_on_close=False) as tmp: + tmp.write(bytes(updated_test_file, encoding="utf-8")) + tmp.close() + + diff = subprocess.run( + ['git', 'diff', '--no-index', file, tmp.name], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) + patch = diff.stdout.replace(tmp.name, '/' + file) + + print(patch) + + patches.append(patch) + + if len(patches) != 0: + sys.stdout.write('\033[96mApply the changes listed above?\033[0m [Y/n]') + response = None + try: + response = input() + except KeyboardInterrupt: + print() + sys.exit(1) + + if response in ['', 'y', 'Y']: + for patch in patches: + apply = subprocess.Popen( + ['git', 'apply', '--allow-empty'], + stdin=subprocess.PIPE, + ) + apply.communicate(input=bytes(patch, encoding="utf-8")) diff --git a/mesalib/bin/perf-annotate-jit.py b/mesalib/bin/perf-annotate-jit.py index 680a775662..d62b4e8ee9 100644 --- a/mesalib/bin/perf-annotate-jit.py +++ b/mesalib/bin/perf-annotate-jit.py @@ -104,7 +104,7 @@ def lookupMap(filename, matchSymbol): def lookupAsm(filename, desiredFunction): stream = open(filename + '.asm', 'rt') - while stream.readline() != desiredFunction + ':\n': + while not stream.readline().startswith(desiredFunction + ' '): pass asm = [] diff --git a/mesalib/bin/pick/core.py b/mesalib/bin/pick/core.py index cde058602c..987f6621f6 100644 --- a/mesalib/bin/pick/core.py +++ b/mesalib/bin/pick/core.py @@ -71,10 +71,10 @@ class PickUIException(Exception): @enum.unique class NominationType(enum.Enum): - CC = 0 - FIXES = 1 - REVERT = 2 - NONE = 3 + NONE = 0 + CC = 1 + FIXES = 2 + REVERT = 3 BACKPORT = 4 diff --git a/mesalib/bin/renderdoc-update.py b/mesalib/bin/renderdoc-update.py new file mode 100644 index 0000000000..661083f68c --- /dev/null +++ b/mesalib/bin/renderdoc-update.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 + +import base64 +import pathlib +import requests +import subprocess + +def error(msg: str) -> None: + print('\033[31m' + msg + '\033[0m') + +if __name__ == '__main__': + git_toplevel = subprocess.check_output(['git', 'rev-parse', '--show-toplevel'], + stderr=subprocess.DEVNULL).decode("ascii").strip() + if not pathlib.Path(git_toplevel).resolve() == pathlib.Path('.').resolve(): + error('Please run this script from the root folder ({})'.format(git_toplevel)) + exit(1) + + file = 'include/renderdoc_app.h' + url = 'https://raw.githubusercontent.com/baldurk/renderdoc/v1.1/renderdoc/api/app/renderdoc_app.h' + + print('Syncing {}...'.format(file), end=' ', flush=True) + req = requests.get(url) + + if not req.ok: + error('Failed to retrieve file: {} {}'.format(req.status_code, req.reason)) + exit(1) + + with open(file, 'wb') as f: + f.write(req.content) + + print('Done') diff --git a/mesalib/bin/toml_lint.py b/mesalib/bin/toml_lint.py new file mode 100644 index 0000000000..344228477e --- /dev/null +++ b/mesalib/bin/toml_lint.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +import argparse +import pathlib +import re + + +def detect_misleading_indentation( + toml_path: str, + toml_lines: list[str], +) -> bool: + issue_detected = False + previous_indentation = 0 + for line_number, line in enumerate(toml_lines, start=1): + if match := re.match(r'^(\s*)\S', line): + line_indentation = len(match.group(1)) + if line_indentation < previous_indentation: + # Allow de-indenting when starting a new section (`[`) or + # terminating a multi-line list (`]`) + if not re.match(r'^\s*(\[|\])', line): + print(f'{toml_path}:{line_number}: ' + f'Misleading indentation found') + issue_detected = True + else: + line_indentation = 0 + previous_indentation = line_indentation + + return issue_detected + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + 'toml_files', + type=pathlib.Path, + nargs=argparse.ZERO_OR_MORE, + help='*.toml files to lint (default: src/**/ci/*.toml)', + ) + + args = parser.parse_args() + + if not args.toml_files: + args.toml_files = pathlib.Path('src').glob('**/ci/*.toml') + + error = False + + for path in args.toml_files: + with path.open('r') as toml_file: + toml_lines = toml_file.readlines() + if detect_misleading_indentation(path.as_posix(), toml_lines): + error = True + + if error: + exit(1) + + +if __name__ == '__main__': + main() diff --git a/mesalib/docs/_static/_redirects b/mesalib/docs/_extra/_redirects similarity index 100% rename from mesalib/docs/_static/_redirects rename to mesalib/docs/_extra/_redirects diff --git a/mesalib/docs/_static/relnotes/3.1 b/mesalib/docs/_extra/relnotes/3.1 similarity index 100% rename from mesalib/docs/_static/relnotes/3.1 rename to mesalib/docs/_extra/relnotes/3.1 diff --git a/mesalib/docs/_static/relnotes/3.2 b/mesalib/docs/_extra/relnotes/3.2 similarity index 100% rename from mesalib/docs/_static/relnotes/3.2 rename to mesalib/docs/_extra/relnotes/3.2 diff --git a/mesalib/docs/_static/relnotes/3.2.1 b/mesalib/docs/_extra/relnotes/3.2.1 similarity index 100% rename from mesalib/docs/_static/relnotes/3.2.1 rename to mesalib/docs/_extra/relnotes/3.2.1 diff --git a/mesalib/docs/_static/relnotes/3.3 b/mesalib/docs/_extra/relnotes/3.3 similarity index 100% rename from mesalib/docs/_static/relnotes/3.3 rename to mesalib/docs/_extra/relnotes/3.3 diff --git a/mesalib/docs/_static/relnotes/3.4 b/mesalib/docs/_extra/relnotes/3.4 similarity index 100% rename from mesalib/docs/_static/relnotes/3.4 rename to mesalib/docs/_extra/relnotes/3.4 diff --git a/mesalib/docs/_static/relnotes/3.4.1 b/mesalib/docs/_extra/relnotes/3.4.1 similarity index 100% rename from mesalib/docs/_static/relnotes/3.4.1 rename to mesalib/docs/_extra/relnotes/3.4.1 diff --git a/mesalib/docs/_static/relnotes/3.4.2 b/mesalib/docs/_extra/relnotes/3.4.2 similarity index 100% rename from mesalib/docs/_static/relnotes/3.4.2 rename to mesalib/docs/_extra/relnotes/3.4.2 diff --git a/mesalib/docs/_static/relnotes/3.5 b/mesalib/docs/_extra/relnotes/3.5 similarity index 100% rename from mesalib/docs/_static/relnotes/3.5 rename to mesalib/docs/_extra/relnotes/3.5 diff --git a/mesalib/docs/_static/relnotes/4.0 b/mesalib/docs/_extra/relnotes/4.0 similarity index 100% rename from mesalib/docs/_static/relnotes/4.0 rename to mesalib/docs/_extra/relnotes/4.0 diff --git a/mesalib/docs/_static/relnotes/4.0.1 b/mesalib/docs/_extra/relnotes/4.0.1 similarity index 100% rename from mesalib/docs/_static/relnotes/4.0.1 rename to mesalib/docs/_extra/relnotes/4.0.1 diff --git a/mesalib/docs/_static/relnotes/4.0.2 b/mesalib/docs/_extra/relnotes/4.0.2 similarity index 100% rename from mesalib/docs/_static/relnotes/4.0.2 rename to mesalib/docs/_extra/relnotes/4.0.2 diff --git a/mesalib/docs/_static/relnotes/4.0.3 b/mesalib/docs/_extra/relnotes/4.0.3 similarity index 100% rename from mesalib/docs/_static/relnotes/4.0.3 rename to mesalib/docs/_extra/relnotes/4.0.3 diff --git a/mesalib/docs/_static/relnotes/4.1 b/mesalib/docs/_extra/relnotes/4.1 similarity index 100% rename from mesalib/docs/_static/relnotes/4.1 rename to mesalib/docs/_extra/relnotes/4.1 diff --git a/mesalib/docs/_static/relnotes/5.0 b/mesalib/docs/_extra/relnotes/5.0 similarity index 100% rename from mesalib/docs/_static/relnotes/5.0 rename to mesalib/docs/_extra/relnotes/5.0 diff --git a/mesalib/docs/_static/relnotes/5.0.1 b/mesalib/docs/_extra/relnotes/5.0.1 similarity index 100% rename from mesalib/docs/_static/relnotes/5.0.1 rename to mesalib/docs/_extra/relnotes/5.0.1 diff --git a/mesalib/docs/_static/relnotes/5.0.2 b/mesalib/docs/_extra/relnotes/5.0.2 similarity index 100% rename from mesalib/docs/_static/relnotes/5.0.2 rename to mesalib/docs/_extra/relnotes/5.0.2 diff --git a/mesalib/docs/_static/relnotes/5.1 b/mesalib/docs/_extra/relnotes/5.1 similarity index 100% rename from mesalib/docs/_static/relnotes/5.1 rename to mesalib/docs/_extra/relnotes/5.1 diff --git a/mesalib/docs/_static/relnotes/6.0 b/mesalib/docs/_extra/relnotes/6.0 similarity index 100% rename from mesalib/docs/_static/relnotes/6.0 rename to mesalib/docs/_extra/relnotes/6.0 diff --git a/mesalib/docs/_static/relnotes/6.0.1 b/mesalib/docs/_extra/relnotes/6.0.1 similarity index 100% rename from mesalib/docs/_static/relnotes/6.0.1 rename to mesalib/docs/_extra/relnotes/6.0.1 diff --git a/mesalib/docs/_static/relnotes/6.1 b/mesalib/docs/_extra/relnotes/6.1 similarity index 100% rename from mesalib/docs/_static/relnotes/6.1 rename to mesalib/docs/_extra/relnotes/6.1 diff --git a/mesalib/docs/_static/relnotes/6.2 b/mesalib/docs/_extra/relnotes/6.2 similarity index 100% rename from mesalib/docs/_static/relnotes/6.2 rename to mesalib/docs/_extra/relnotes/6.2 diff --git a/mesalib/docs/_static/relnotes/6.2.1 b/mesalib/docs/_extra/relnotes/6.2.1 similarity index 100% rename from mesalib/docs/_static/relnotes/6.2.1 rename to mesalib/docs/_extra/relnotes/6.2.1 diff --git a/mesalib/docs/_static/relnotes/6.3 b/mesalib/docs/_extra/relnotes/6.3 similarity index 100% rename from mesalib/docs/_static/relnotes/6.3 rename to mesalib/docs/_extra/relnotes/6.3 diff --git a/mesalib/docs/_static/relnotes/6.3.1 b/mesalib/docs/_extra/relnotes/6.3.1 similarity index 100% rename from mesalib/docs/_static/relnotes/6.3.1 rename to mesalib/docs/_extra/relnotes/6.3.1 diff --git a/mesalib/docs/_static/relnotes/6.3.2 b/mesalib/docs/_extra/relnotes/6.3.2 similarity index 100% rename from mesalib/docs/_static/relnotes/6.3.2 rename to mesalib/docs/_extra/relnotes/6.3.2 diff --git a/mesalib/docs/_static/specs/EGL_MESA_device_software.txt b/mesalib/docs/_extra/specs/EGL_MESA_device_software.txt similarity index 100% rename from mesalib/docs/_static/specs/EGL_MESA_device_software.txt rename to mesalib/docs/_extra/specs/EGL_MESA_device_software.txt diff --git a/mesalib/docs/_static/specs/EGL_MESA_drm_image_formats.txt b/mesalib/docs/_extra/specs/EGL_MESA_drm_image_formats.txt similarity index 100% rename from mesalib/docs/_static/specs/EGL_MESA_drm_image_formats.txt rename to mesalib/docs/_extra/specs/EGL_MESA_drm_image_formats.txt diff --git a/mesalib/docs/_static/specs/EGL_MESA_platform_surfaceless.txt b/mesalib/docs/_extra/specs/EGL_MESA_platform_surfaceless.txt similarity index 100% rename from mesalib/docs/_static/specs/EGL_MESA_platform_surfaceless.txt rename to mesalib/docs/_extra/specs/EGL_MESA_platform_surfaceless.txt diff --git a/mesalib/docs/_static/specs/EGL_MESA_query_driver.txt b/mesalib/docs/_extra/specs/EGL_MESA_query_driver.txt similarity index 100% rename from mesalib/docs/_static/specs/EGL_MESA_query_driver.txt rename to mesalib/docs/_extra/specs/EGL_MESA_query_driver.txt diff --git a/mesalib/docs/_static/specs/EGL_MESA_x11_native_visual_id.txt b/mesalib/docs/_extra/specs/EGL_MESA_x11_native_visual_id.txt similarity index 100% rename from mesalib/docs/_static/specs/EGL_MESA_x11_native_visual_id.txt rename to mesalib/docs/_extra/specs/EGL_MESA_x11_native_visual_id.txt diff --git a/mesalib/docs/_static/specs/EXT_shader_integer_mix.spec b/mesalib/docs/_extra/specs/EXT_shader_integer_mix.spec similarity index 100% rename from mesalib/docs/_static/specs/EXT_shader_integer_mix.spec rename to mesalib/docs/_extra/specs/EXT_shader_integer_mix.spec diff --git a/mesalib/docs/_static/specs/EXT_shader_samples_identical.txt b/mesalib/docs/_extra/specs/EXT_shader_samples_identical.txt similarity index 100% rename from mesalib/docs/_static/specs/EXT_shader_samples_identical.txt rename to mesalib/docs/_extra/specs/EXT_shader_samples_identical.txt diff --git a/mesalib/docs/_static/specs/INTEL_shader_atomic_float_minmax.txt b/mesalib/docs/_extra/specs/INTEL_shader_atomic_float_minmax.txt similarity index 100% rename from mesalib/docs/_static/specs/INTEL_shader_atomic_float_minmax.txt rename to mesalib/docs/_extra/specs/INTEL_shader_atomic_float_minmax.txt diff --git a/mesalib/docs/_static/specs/MESA_bgra.txt b/mesalib/docs/_extra/specs/MESA_bgra.txt similarity index 100% rename from mesalib/docs/_static/specs/MESA_bgra.txt rename to mesalib/docs/_extra/specs/MESA_bgra.txt diff --git a/mesalib/docs/_static/specs/MESA_configless_context.spec b/mesalib/docs/_extra/specs/MESA_configless_context.spec similarity index 100% rename from mesalib/docs/_static/specs/MESA_configless_context.spec rename to mesalib/docs/_extra/specs/MESA_configless_context.spec diff --git a/mesalib/docs/_static/specs/MESA_copy_sub_buffer.spec b/mesalib/docs/_extra/specs/MESA_copy_sub_buffer.spec similarity index 100% rename from mesalib/docs/_static/specs/MESA_copy_sub_buffer.spec rename to mesalib/docs/_extra/specs/MESA_copy_sub_buffer.spec diff --git a/mesalib/docs/_static/specs/MESA_drm_image.spec b/mesalib/docs/_extra/specs/MESA_drm_image.spec similarity index 100% rename from mesalib/docs/_static/specs/MESA_drm_image.spec rename to mesalib/docs/_extra/specs/MESA_drm_image.spec diff --git a/mesalib/docs/_static/specs/MESA_framebuffer_flip_y.txt b/mesalib/docs/_extra/specs/MESA_framebuffer_flip_y.txt similarity index 100% rename from mesalib/docs/_static/specs/MESA_framebuffer_flip_y.txt rename to mesalib/docs/_extra/specs/MESA_framebuffer_flip_y.txt diff --git a/mesalib/docs/_static/specs/MESA_image_dma_buf_export.txt b/mesalib/docs/_extra/specs/MESA_image_dma_buf_export.txt similarity index 100% rename from mesalib/docs/_static/specs/MESA_image_dma_buf_export.txt rename to mesalib/docs/_extra/specs/MESA_image_dma_buf_export.txt diff --git a/mesalib/docs/_static/specs/MESA_pack_invert.spec b/mesalib/docs/_extra/specs/MESA_pack_invert.spec similarity index 100% rename from mesalib/docs/_static/specs/MESA_pack_invert.spec rename to mesalib/docs/_extra/specs/MESA_pack_invert.spec diff --git a/mesalib/docs/_static/specs/MESA_pixmap_colormap.spec b/mesalib/docs/_extra/specs/MESA_pixmap_colormap.spec similarity index 100% rename from mesalib/docs/_static/specs/MESA_pixmap_colormap.spec rename to mesalib/docs/_extra/specs/MESA_pixmap_colormap.spec diff --git a/mesalib/docs/_static/specs/MESA_query_renderer.spec b/mesalib/docs/_extra/specs/MESA_query_renderer.spec similarity index 100% rename from mesalib/docs/_static/specs/MESA_query_renderer.spec rename to mesalib/docs/_extra/specs/MESA_query_renderer.spec diff --git a/mesalib/docs/_static/specs/MESA_release_buffers.spec b/mesalib/docs/_extra/specs/MESA_release_buffers.spec similarity index 100% rename from mesalib/docs/_static/specs/MESA_release_buffers.spec rename to mesalib/docs/_extra/specs/MESA_release_buffers.spec diff --git a/mesalib/docs/_static/specs/MESA_sampler_objects.spec b/mesalib/docs/_extra/specs/MESA_sampler_objects.spec similarity index 100% rename from mesalib/docs/_static/specs/MESA_sampler_objects.spec rename to mesalib/docs/_extra/specs/MESA_sampler_objects.spec diff --git a/mesalib/docs/_static/specs/MESA_shader_integer_functions.txt b/mesalib/docs/_extra/specs/MESA_shader_integer_functions.txt similarity index 100% rename from mesalib/docs/_static/specs/MESA_shader_integer_functions.txt rename to mesalib/docs/_extra/specs/MESA_shader_integer_functions.txt diff --git a/mesalib/docs/_static/specs/MESA_swap_control.spec b/mesalib/docs/_extra/specs/MESA_swap_control.spec similarity index 100% rename from mesalib/docs/_static/specs/MESA_swap_control.spec rename to mesalib/docs/_extra/specs/MESA_swap_control.spec diff --git a/mesalib/docs/_static/specs/MESA_texture_const_bandwidth.spec b/mesalib/docs/_extra/specs/MESA_texture_const_bandwidth.spec similarity index 100% rename from mesalib/docs/_static/specs/MESA_texture_const_bandwidth.spec rename to mesalib/docs/_extra/specs/MESA_texture_const_bandwidth.spec diff --git a/mesalib/docs/_static/specs/MESA_texture_signed_rgba.spec b/mesalib/docs/_extra/specs/MESA_texture_signed_rgba.spec similarity index 100% rename from mesalib/docs/_static/specs/MESA_texture_signed_rgba.spec rename to mesalib/docs/_extra/specs/MESA_texture_signed_rgba.spec diff --git a/mesalib/docs/_static/specs/MESA_window_pos.spec b/mesalib/docs/_extra/specs/MESA_window_pos.spec similarity index 100% rename from mesalib/docs/_static/specs/MESA_window_pos.spec rename to mesalib/docs/_extra/specs/MESA_window_pos.spec diff --git a/mesalib/docs/_static/specs/MESA_ycbcr_texture.spec b/mesalib/docs/_extra/specs/MESA_ycbcr_texture.spec similarity index 100% rename from mesalib/docs/_static/specs/MESA_ycbcr_texture.spec rename to mesalib/docs/_extra/specs/MESA_ycbcr_texture.spec diff --git a/mesalib/docs/_static/specs/WL_bind_wayland_display.spec b/mesalib/docs/_extra/specs/WL_bind_wayland_display.spec similarity index 100% rename from mesalib/docs/_static/specs/WL_bind_wayland_display.spec rename to mesalib/docs/_extra/specs/WL_bind_wayland_display.spec diff --git a/mesalib/docs/_static/specs/WL_create_wayland_buffer_from_image.spec b/mesalib/docs/_extra/specs/WL_create_wayland_buffer_from_image.spec similarity index 100% rename from mesalib/docs/_static/specs/WL_create_wayland_buffer_from_image.spec rename to mesalib/docs/_extra/specs/WL_create_wayland_buffer_from_image.spec diff --git a/mesalib/docs/_static/specs/enums.txt b/mesalib/docs/_extra/specs/enums.txt similarity index 100% rename from mesalib/docs/_static/specs/enums.txt rename to mesalib/docs/_extra/specs/enums.txt diff --git a/mesalib/docs/_exts/bootstrap.py b/mesalib/docs/_exts/bootstrap.py index bb0e595c4d..18e46cea16 100644 --- a/mesalib/docs/_exts/bootstrap.py +++ b/mesalib/docs/_exts/bootstrap.py @@ -96,7 +96,7 @@ def visit_table(self, node): self.body.append(tag) def setup_translators(app): - if app.builder.default_translator_class is None: + if app.builder.format != "html": return if not app.registry.translators.items(): @@ -111,10 +111,6 @@ def setup_translators(app): app.set_translator(app.builder.name, translator, override=True) else: for name, klass in app.registry.translators.items(): - if app.builder.format != "html": - # Skip translators that are not HTML - continue - translator = types.new_class( "BootstrapHTML5Translator", ( diff --git a/mesalib/docs/_exts/depfile.py b/mesalib/docs/_exts/depfile.py new file mode 100644 index 0000000000..40838439c7 --- /dev/null +++ b/mesalib/docs/_exts/depfile.py @@ -0,0 +1,34 @@ +# Copyright © 2021 Collabora Ltd +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sub license, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice (including the +# next paragraph) shall be included in all copies or substantial portions +# of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. +# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR +# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +def create_depfile(app, env): + if not app.config.depfile: + return + + with open(app.config.depfile, 'w') as f: + for doc in env.found_docs: + path = env.doc2path(doc) + f.write('{0}: {1}\n'.format(app.outdir, path)) + +def setup(app): + app.add_config_value('depfile', None, 'env') + app.connect('env-updated', create_depfile) diff --git a/mesalib/docs/android.rst b/mesalib/docs/android.rst index 0034706bb7..b46dddfaa5 100644 --- a/mesalib/docs/android.rst +++ b/mesalib/docs/android.rst @@ -11,6 +11,10 @@ needs a built Android tree to build against, and it has never been tested in CI. The Meson build system flow is frequently used by Chrome OS developers for building and testing Android drivers. +When building llvmpipe or lavapipe for Android the ndk-build workflow +is also used, but there are additional steps required to add the driver +to the Android OS image. + Building using the Android NDK ------------------------------ @@ -23,14 +27,14 @@ Then, create your Meson cross file to use it, something like this [binaries] ar = 'NDKDIR/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android-ar' c = ['ccache', 'NDKDIR/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android29-clang'] - cpp = ['ccache', 'NDKDIR/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android29-clang++', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables', '-static-libstdc++'] + cpp = ['ccache', 'NDKDIR/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android29-clang++', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables', '--start-no-unused-arguments', '-static-libstdc++', '--end-no-unused-arguments'] c_ld = 'lld' cpp_ld = 'lld' strip = 'NDKDIR/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android-strip' # Android doesn't come with a pkg-config, but we need one for Meson to be happy not # finding all the optional deps it looks for. Use system pkg-config pointing at a # directory we get to populate with any .pc files we want to add for Android - pkgconfig = ['env', 'PKG_CONFIG_LIBDIR=NDKDIR/pkgconfig', '/usr/bin/pkg-config'] + pkg-config = ['env', 'PKG_CONFIG_LIBDIR=NDKDIR/pkgconfig', '/usr/bin/pkg-config'] [host_machine] system = 'android' @@ -175,3 +179,246 @@ container and let it restart: .. code-block:: sh kill $(cat /run/containers/android-run_oci/container.pid ) + +Adding out-of-tree drivers to Android OS image +---------------------------------------------- + +When building your own Android OS images it's possible to add +drivers built out of tree directly into the OS image. For +running llvmpipe and lavapipe on Android this step is required +to ensure Android is able to load the drivers correctly. + +The following steps provide and example for building +the android cuttlefish image following the official Android +documentation from https://source.android.com/docs/setup + +When building llvmpipe or lavapipe for Android, it is required +to do this so that the permissions for accessing the library +are set correctly. + +Following the Android documentation, we can run the following +commands + +.. code-block:: sh + + repo init -b main -u https://android.googlesource.com/platform/manifest + repo sync -c -j8 + + source build/envsetup.sh + lunch aosp_cf_x86_64_phone-trunk_staging-userdebug + +Be aware that the sync command can take a long time to run as +it will download all of the source code. This will set up +the ``aosp_cf_x86_64_phone-trunk_staging-userdebug`` build target +for Android. Please note that the x86_64 cuttlefish target will require +you to build mesa for 32bit and 64bit. Next we need to copy the build +driver libraries into the source tree of Android and patch the binary names. + +.. code-block:: sh + + mkdir prebuilts/mesa + mkdir prebuilts/mesa/x86_64 + mkdir prebuilts/mesa/x86 + cp ${INSTALL_PREFIX_64}/lib/libEGL.so prebuilts/mesa/x86_64/ + cp ${INSTALL_PREFIX_64}/lib/libglapi.so prebuilts/mesa/x86_64/ + cp ${INSTALL_PREFIX_64}/lib/libgallium_dri.so prebuilts/mesa/x86_64/ + cp ${INSTALL_PREFIX_64}/lib/libGLESv1_CM.so prebuilts/mesa/x86_64/ + cp ${INSTALL_PREFIX_64}/lib/libGLESv2.so prebuilts/mesa/x86_64/ + cp ${INSTALL_PREFIX_64}/lib/libvulkan_lvp.so prebuilts/mesa/x86_64/ + cp ${INSTALL_PREFIX_32}/lib/libEGL.so prebuilts/mesa/x86 + cp ${INSTALL_PREFIX_32}/lib/libglapi.so prebuilts/mesa/x86 + cp ${INSTALL_PREFIX_32}/lib/libgallium_dri.so prebuilts/mesa/x86/ + cp ${INSTALL_PREFIX_32}/lib/libGLESv1_CM.so prebuilts/mesa/x86 + cp ${INSTALL_PREFIX_32}/lib/libGLESv2.so prebuilts/mesa/x86 + cp ${INSTALL_PREFIX_32}/lib/libvulkan_lvp.so prebuilts/mesa/x86 + + patchelf --set-soname libEGL_lp.so prebuilts/mesa/x86_64/libEGL.so + patchelf --set-soname libGLESv1_CM_lp.so prebuilts/mesa/x86_64/libGLESv1_CM.so + patchelf --set-soname libGLESv2_lp.so prebuilts/mesa/x86_64/libGLESv2.so + patchelf --set-soname vulkan.lvp.so prebuilts/mesa/x86_64/libvulkan_lvp.so + patchelf --set-soname libEGL_lp.so prebuilts/mesa/x86/libEGL.so + patchelf --set-soname libGLESv1_CM_lp.so prebuilts/mesa/x86/libGLESv1_CM.so + patchelf --set-soname libGLESv2_lp.so prebuilts/mesa/x86/libGLESv2.so + patchelf --set-soname vulkan.lvp.so prebuilts/mesa/x86/libvulkan_lvp.so + +We then need to create an ``prebuilts/mesa/Android.bp`` build file to include +the libraries in the build. + +.. code-block:: + + cc_prebuilt_library_shared { + name: "libglapi", + arch: { + x86_64: { + srcs: ["x86_64/libglapi.so"], + }, + x86: { + srcs: ["x86/libglapi.so"], + }, + }, + strip: { + none: true, + }, + relative_install_path: "egl", + shared_libs: ["libc", "libdl", "liblog", "libm"], + vendor: true + } + + cc_prebuilt_library_shared { + name: "libgallium_dri", + arch: { + x86_64: { + srcs: ["x86_64/libgallium_dri.so"], + }, + x86: { + srcs: ["x86/libgallium_dri.so"], + }, + }, + strip: { + none: true, + }, + relative_install_path: "egl", + shared_libs: ["libc", "libdl", "liblog", "libm"], + check_elf_files: false, + vendor: true + } + + cc_prebuilt_library_shared { + name: "libEGL_lp", + arch: { + x86_64: { + srcs: ["x86_64/libEGL.so"], + }, + x86: { + srcs: ["x86/libEGL.so"], + }, + }, + strip: { + none: true, + }, + relative_install_path: "egl", + shared_libs: ["libc", "libdl", "liblog", "libm", "libcutils", "libdrm", "libhardware", "liblog", "libnativewindow", "libsync"], + check_elf_files: false, + vendor: true + } + + cc_prebuilt_library_shared { + name: "libGLESv1_CM_lp", + arch: { + x86_64: { + srcs: ["x86_64/libGLESv1_CM.so"], + }, + x86: { + srcs: ["x86/libGLESv1_CM.so"], + }, + }, + strip: { + none: true, + }, + relative_install_path: "egl", + shared_libs: ["libc", "libdl", "liblog", "libm"], + check_elf_files: false, + vendor: true + } + + cc_prebuilt_library_shared { + name: "libGLESv2_lp", + arch: { + x86_64: { + srcs: ["x86_64/libGLESv2.so"], + }, + x86: { + srcs: ["x86_64/libGLESv2.so"], + }, + }, + strip: { + none: true, + }, + relative_install_path: "egl", + shared_libs: ["libc", "libdl", "liblog", "libm"], + check_elf_files: false, + vendor: true + } + + cc_prebuilt_library_shared { + name: "vulkan.lvp", + arch: { + x86_64: { + srcs: ["x86_64/libvulkan_lvp.so"], + }, + x86: { + srcs: ["x86/libvulkan_lvp.so"], + }, + }, + strip: { + none: true, + }, + relative_install_path: "hw", + shared_libs: ["libc", "libdl", "liblog", "libm", "libcutils", "libdrm", "liblog", "libnativewindow", "libsync", "libz"], + vendor: true + } + + +Next we need to update the device configuration to include the libraries +in the build, as well as set the appropriate system properties. We can +create the file +``device/google/cuttlefish/shared/mesa/device_vendor.mk`` + + +.. code-block:: makefile + + PRODUCT_SOONG_NAMESPACES += prebuilts/mesa + PRODUCT_PACKAGES += libglapi \ + libGLESv1_CM_lp \ + libGLESv2_lp \ + libEGL_lp \ + libgallium_dri.so \ + vulkan.lvp + PRODUCT_VENDOR_PROPERTIES += \ + ro.hardware.egl=lp \ + ro.hardware.vulkan=lvp \ + mesa.libgl.always.software=true \ + mesa.android.no.kms.swrast=true \ + debug.hwui.renderer=opengl \ + ro.gfx.angle.supported=false \ + debug.sf.disable_hwc_vds=1 \ + ro.vendor.hwcomposer.mode=client + +Also the file ``device/google/cuttlefish/shared/mesa/BoardConfig.mk`` + +.. code-block:: makefile + + BOARD_VENDOR_SEPOLICY_DIRS += \ + device/google/cuttlefish/shared/mesa/sepolicy + +Next the file ``device/google/cuttlefish/shared/mesa/sepolicy/file_contexts`` + +.. code-block:: sh + + /vendor/lib(64)?/egl/libEGL_lp\.so u:object_r:same_process_hal_file:s0 + /vendor/lib(64)?/egl/libGLESv1_CM_lp\.so u:object_r:same_process_hal_file:s0 + /vendor/lib(64)?/egl/libGLESv2_lp\.so u:object_r:same_process_hal_file:s0 + /vendor/lib(64)?/libglapi\.so u:object_r:same_process_hal_file:s0 + /vendor/lib(64)?/libgallium_dri\.so u:object_r:same_process_hal_file:s0 + /vendor/lib(64)?/hw/vulkan\.lvp\.so u:object_r:same_process_hal_file:s0 + +After creating these files we need to modify the existing config files +to include these build files. First we modify +``device/google/cuttlefish/shared/phone/device_vendor.mk`` +to add the below code in the spot where other device_vendor +files are included. + +.. code-block:: sh + + $(call inherit-product, device/google/cuttlefish/shared/mesa/device_vendor.mk) + +Lastly we modify +``device/google/cuttlefish/vsoc_x86_64/BoardConfig.mk`` to include +the following line where the other BoardConfig files are included + +.. code-block:: sh + + -include device/google/cuttlefish/shared/mesa/BoardConfig.mk + +Then we are set to continue following the official instructions to +build the cuttlefish target and run it in the cuttlefish emulator. diff --git a/mesalib/docs/ci/bare-metal.rst b/mesalib/docs/ci/bare-metal.rst index b9d5f654f7..772f5f4c98 100644 --- a/mesalib/docs/ci/bare-metal.rst +++ b/mesalib/docs/ci/bare-metal.rst @@ -53,7 +53,7 @@ of needing more storage on the runner. Telling the board about where its TFTP and NFS should come from is done using dnsmasq on the runner host. For example, this snippet in -the dnsmasq.conf.d in the google farm, with the gitlab-runner host we +the dnsmasq.conf.d in the Google farm, with the gitlab-runner host we call "servo":: dhcp-host=1c:69:7a:0d:a3:d3,10.42.0.10,set:servo @@ -123,7 +123,7 @@ With that set up, you should be able to power on/off a port with something like: Note that the "1.3.6..." SNMP OID changes between switches. The last digit above is the interface id (port number). You can probably find the right OID by -google, that was easier than figuring it out from finding the switch's MIB +Google, that was easier than figuring it out from finding the switch's MIB database. You can query the POE status from the switch serial using the ``show power inline`` command. diff --git a/mesalib/docs/ci/index.rst b/mesalib/docs/ci/index.rst index d173d74a3c..484672b00d 100644 --- a/mesalib/docs/ci/index.rst +++ b/mesalib/docs/ci/index.rst @@ -302,8 +302,8 @@ and cancel the rest to avoid wasting resources. See ``bin/ci/ci_run_n_monitor.py --help`` for all the options. The ``--target`` argument takes a regex that you can use to select the -jobs names you want to run, eg. ``--target 'zink.*'`` will run all the -zink jobs, leaving the other drivers' jobs free for others to use. +jobs names you want to run, e.g. ``--target 'zink.*'`` will run all the +Zink jobs, leaving the other drivers' jobs free for others to use. Note that in fork pipelines, GitLab only adds the jobs for the files that have changed **since the last push**, so you might not get the jobs you expect. diff --git a/mesalib/docs/ci/local-traces.rst b/mesalib/docs/ci/local-traces.rst index a834c8ac4f..28af27d982 100644 --- a/mesalib/docs/ci/local-traces.rst +++ b/mesalib/docs/ci/local-traces.rst @@ -32,7 +32,7 @@ Simulating CI trace job Sometimes it's useful to be able to test traces on your local machine instead of the Mesa CI runner. To simulate the CI environment as closely as possible. -Download the YAML file from your driver's ``ci/`` directory and then change the path in the YAML file from local proxy or MinIO to the local directory (url-like format ``file://``) +Download the YAML file from your driver's ``ci/`` directory and then change the path in the YAML file from local proxy or MinIO to the local directory (URL-like format ``file://``) .. code-block:: sh diff --git a/mesalib/docs/codingstyle.rst b/mesalib/docs/codingstyle.rst index f595d887b0..9fc5c6c2ea 100644 --- a/mesalib/docs/codingstyle.rst +++ b/mesalib/docs/codingstyle.rst @@ -25,7 +25,7 @@ them by running:: git config blame.ignoreRevsFile .git-blame-ignore-revs Most code editors also support automatically formatting code as you -write it; check your editor or its pluggins to see how to enable this. +write it; check your editor or its plug-ins to see how to enable this. Vim *** @@ -83,7 +83,7 @@ Add this to your ``.emacs`` to automatically format any C & C++ file If ``/usr/share/clang/clang-format.el`` doesn't exist, look through the files in the package providing ``clang-format`` in your distro. If you -can't find anything (eg. on Debian/Ubuntu), refer to `this StackOverflow +can't find anything (e.g. on Debian/Ubuntu), refer to `this StackOverflow answer `__ to install clang-format through Emacs instead. diff --git a/mesalib/docs/conf.py b/mesalib/docs/conf.py index 8fc8517006..a6f197a17c 100644 --- a/mesalib/docs/conf.py +++ b/mesalib/docs/conf.py @@ -40,6 +40,7 @@ # ones. extensions = [ 'bootstrap', + 'depfile', 'formatting', 'hawkmoth', 'nir', @@ -111,8 +112,10 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = [ - '_static/', +html_static_path = [] + +html_extra_path = [ + '_extra/', 'release-maintainers-keys.asc', 'features.txt', 'libGL.txt', @@ -120,8 +123,6 @@ 'README.VCE', ] -html_extra_path = [] - html_redirects = [ ('webmaster', 'https://www.mesa3d.org/website/'), ('developers', 'https://www.mesa3d.org/developers/'), @@ -138,6 +139,8 @@ r'https://gitlab.com/.*#.*', # needs JS eval r'https://gitlab.freedesktop.org/.*#.*', # needs JS eval r'https://github.com/.*#.*', # needs JS eval + r'https://www.intel.com/.*', # intel.com is blocking the linkcheck user-agent; maybe it can be customized to look like a browser? + r'https://cgit.freedesktop.org/.*', # cgit is no more ] linkcheck_exclude_documents = [r'relnotes/.*'] @@ -145,7 +148,6 @@ # Pages that forward the front-page to a wiki or some explore-page 'https://www.freedesktop.org': 'https://www.freedesktop.org/wiki/', 'https://x.org': 'https://x.org/wiki/', - 'https://perf.wiki.kernel.org/': 'https://perf.wiki.kernel.org/index.php/Main_Page', 'https://dri.freedesktop.org/': 'https://dri.freedesktop.org/wiki/', 'https://gitlab.freedesktop.org/': 'https://gitlab.freedesktop.org/explore/groups', 'https://www.sphinx-doc.org/': 'https://www.sphinx-doc.org/en/master/', @@ -218,14 +220,15 @@ # -- Options for hawkmoth ------------------------------------------------- -hawkmoth_root = os.path.abspath('..') +hawkmoth_root = os.path.abspath(os.pardir) +mesa_root = os.path.join(os.path.dirname(__file__), os.pardir) hawkmoth_clang = [ - '-Idocs/header-stubs/', - '-Iinclude/', - '-Isrc/', - '-Isrc/gallium/include/', - '-Isrc/intel/', - '-Isrc/mesa/', + '-I{}/docs/header-stubs/'.format(mesa_root), + '-I{}/include/'.format(mesa_root), + '-I{}/src/'.format(mesa_root), + '-I{}/src/gallium/include/'.format(mesa_root), + '-I{}/src/intel/'.format(mesa_root), + '-I{}/src/mesa/'.format(mesa_root), '-DHAVE_STRUCT_TIMESPEC', '-DHAVE_PTHREAD', '-DHAVE_ENDIAN_H', diff --git a/mesalib/docs/drivers/amd/hang-debugging.rst b/mesalib/docs/drivers/amd/hang-debugging.rst index a5fd2f8a98..1edc09290a 100644 --- a/mesalib/docs/drivers/amd/hang-debugging.rst +++ b/mesalib/docs/drivers/amd/hang-debugging.rst @@ -29,7 +29,9 @@ trace markers and synchronization and check for hangs. The hang report will be saved to ``~/radv_dumps__