mirrored from https://chromium.googlesource.com/angle/angle
-
Notifications
You must be signed in to change notification settings - Fork 611
/
ContextVk.cpp
9180 lines (7912 loc) · 385 KB
/
ContextVk.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
//
// Copyright 2016 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// ContextVk.cpp:
// Implements the class methods for ContextVk.
//
#include "libANGLE/renderer/vulkan/ContextVk.h"
#include "common/bitset_utils.h"
#include "common/debug.h"
#include "common/system_utils.h"
#include "common/utilities.h"
#include "image_util/loadimage.h"
#include "libANGLE/Context.h"
#include "libANGLE/Display.h"
#include "libANGLE/Program.h"
#include "libANGLE/Semaphore.h"
#include "libANGLE/ShareGroup.h"
#include "libANGLE/Surface.h"
#include "libANGLE/angletypes.h"
#include "libANGLE/renderer/renderer_utils.h"
#include "libANGLE/renderer/vulkan/BufferVk.h"
#include "libANGLE/renderer/vulkan/CompilerVk.h"
#include "libANGLE/renderer/vulkan/DisplayVk.h"
#include "libANGLE/renderer/vulkan/FenceNVVk.h"
#include "libANGLE/renderer/vulkan/FramebufferVk.h"
#include "libANGLE/renderer/vulkan/MemoryObjectVk.h"
#include "libANGLE/renderer/vulkan/OverlayVk.h"
#include "libANGLE/renderer/vulkan/ProgramPipelineVk.h"
#include "libANGLE/renderer/vulkan/ProgramVk.h"
#include "libANGLE/renderer/vulkan/QueryVk.h"
#include "libANGLE/renderer/vulkan/RenderbufferVk.h"
#include "libANGLE/renderer/vulkan/SamplerVk.h"
#include "libANGLE/renderer/vulkan/SemaphoreVk.h"
#include "libANGLE/renderer/vulkan/ShaderVk.h"
#include "libANGLE/renderer/vulkan/SurfaceVk.h"
#include "libANGLE/renderer/vulkan/SyncVk.h"
#include "libANGLE/renderer/vulkan/TextureVk.h"
#include "libANGLE/renderer/vulkan/TransformFeedbackVk.h"
#include "libANGLE/renderer/vulkan/VertexArrayVk.h"
#include "libANGLE/renderer/vulkan/vk_renderer.h"
#include <fstream>
#include <iostream>
#include <sstream>
namespace rx
{
namespace
{
// If the total size of copyBufferToImage commands in the outside command buffer reaches the
// threshold below, the latter is flushed.
static constexpr VkDeviceSize kMaxBufferToImageCopySize = 64 * 1024 * 1024;
// The number of queueSerials we will reserve for outsideRenderPassCommands when we generate one for
// RenderPassCommands.
static constexpr size_t kMaxReservedOutsideRenderPassQueueSerials = 15;
// Dumping the command stream is disabled by default.
static constexpr bool kEnableCommandStreamDiagnostics = false;
// All glMemoryBarrier bits that related to texture usage
static constexpr GLbitfield kWriteAfterAccessImageMemoryBarriers =
GL_SHADER_IMAGE_ACCESS_BARRIER_BIT;
static constexpr GLbitfield kWriteAfterAccessMemoryBarriers =
kWriteAfterAccessImageMemoryBarriers | GL_SHADER_STORAGE_BARRIER_BIT;
// For shader uniforms such as gl_DepthRange and the viewport size.
struct GraphicsDriverUniforms
{
// Contain packed 8-bit values for atomic counter buffer offsets. These offsets are within
// Vulkan's minStorageBufferOffsetAlignment limit and are used to support unaligned offsets
// allowed in GL.
std::array<uint32_t, 2> acbBufferOffsets;
// .x is near, .y is far
std::array<float, 2> depthRange;
// Used to flip gl_FragCoord. Packed uvec2
uint32_t renderArea;
// Packed vec4 of snorm8
uint32_t flipXY;
// Only the lower 16 bits used
uint32_t dither;
// Various bits of state:
// - Surface rotation
// - Advanced blend equation
// - Sample count
// - Enabled clip planes
// - Depth transformation
uint32_t misc;
};
static_assert(sizeof(GraphicsDriverUniforms) % (sizeof(uint32_t) * 4) == 0,
"GraphicsDriverUniforms should be 16bytes aligned");
// Only used when transform feedback is emulated.
struct GraphicsDriverUniformsExtended
{
GraphicsDriverUniforms common;
// Only used with transform feedback emulation
std::array<int32_t, 4> xfbBufferOffsets;
int32_t xfbVerticesPerInstance;
int32_t padding[3];
};
static_assert(sizeof(GraphicsDriverUniformsExtended) % (sizeof(uint32_t) * 4) == 0,
"GraphicsDriverUniformsExtended should be 16bytes aligned");
struct ComputeDriverUniforms
{
// Atomic counter buffer offsets with the same layout as in GraphicsDriverUniforms.
std::array<uint32_t, 4> acbBufferOffsets;
};
uint32_t MakeFlipUniform(bool flipX, bool flipY, bool invertViewport)
{
// Create snorm values of either -1 or 1, based on whether flipping is enabled or not
// respectively.
constexpr uint8_t kSnormOne = 0x7F;
constexpr uint8_t kSnormMinusOne = 0x81;
// .xy are flips for the fragment stage.
uint32_t x = flipX ? kSnormMinusOne : kSnormOne;
uint32_t y = flipY ? kSnormMinusOne : kSnormOne;
// .zw are flips for the vertex stage.
uint32_t z = x;
uint32_t w = flipY != invertViewport ? kSnormMinusOne : kSnormOne;
return x | y << 8 | z << 16 | w << 24;
}
GLenum DefaultGLErrorCode(VkResult result)
{
switch (result)
{
case VK_ERROR_OUT_OF_HOST_MEMORY:
case VK_ERROR_OUT_OF_DEVICE_MEMORY:
case VK_ERROR_TOO_MANY_OBJECTS:
return GL_OUT_OF_MEMORY;
case VK_ERROR_DEVICE_LOST:
return GL_CONTEXT_LOST;
default:
return GL_INVALID_OPERATION;
}
}
constexpr gl::ShaderMap<vk::ImageLayout> kShaderReadOnlyImageLayouts = {
{gl::ShaderType::Vertex, vk::ImageLayout::VertexShaderReadOnly},
{gl::ShaderType::TessControl, vk::ImageLayout::PreFragmentShadersReadOnly},
{gl::ShaderType::TessEvaluation, vk::ImageLayout::PreFragmentShadersReadOnly},
{gl::ShaderType::Geometry, vk::ImageLayout::PreFragmentShadersReadOnly},
{gl::ShaderType::Fragment, vk::ImageLayout::FragmentShaderReadOnly},
{gl::ShaderType::Compute, vk::ImageLayout::ComputeShaderReadOnly}};
constexpr gl::ShaderMap<vk::ImageLayout> kShaderWriteImageLayouts = {
{gl::ShaderType::Vertex, vk::ImageLayout::VertexShaderWrite},
{gl::ShaderType::TessControl, vk::ImageLayout::PreFragmentShadersWrite},
{gl::ShaderType::TessEvaluation, vk::ImageLayout::PreFragmentShadersWrite},
{gl::ShaderType::Geometry, vk::ImageLayout::PreFragmentShadersWrite},
{gl::ShaderType::Fragment, vk::ImageLayout::FragmentShaderWrite},
{gl::ShaderType::Compute, vk::ImageLayout::ComputeShaderWrite}};
constexpr VkBufferUsageFlags kVertexBufferUsage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
constexpr size_t kDynamicVertexDataSize = 16 * 1024;
bool CanMultiDrawIndirectUseCmd(ContextVk *contextVk,
VertexArrayVk *vertexArray,
gl::PrimitiveMode mode,
GLsizei drawcount,
GLsizei stride)
{
// Use the generic implementation if multiDrawIndirect is disabled, if line loop is being used
// for multiDraw, if drawcount is greater than maxDrawIndirectCount, or if there are streaming
// vertex attributes.
ASSERT(drawcount > 1);
const bool supportsMultiDrawIndirect =
contextVk->getFeatures().supportsMultiDrawIndirect.enabled;
const bool isMultiDrawLineLoop = (mode == gl::PrimitiveMode::LineLoop);
const bool isDrawCountBeyondLimit =
(static_cast<uint32_t>(drawcount) >
contextVk->getRenderer()->getPhysicalDeviceProperties().limits.maxDrawIndirectCount);
const bool isMultiDrawWithStreamingAttribs = vertexArray->getStreamingVertexAttribsMask().any();
const bool canMultiDrawIndirectUseCmd = supportsMultiDrawIndirect && !isMultiDrawLineLoop &&
!isDrawCountBeyondLimit &&
!isMultiDrawWithStreamingAttribs;
return canMultiDrawIndirectUseCmd;
}
uint32_t GetCoverageSampleCount(const gl::State &glState, GLint samples)
{
ASSERT(glState.isSampleCoverageEnabled());
// Get a fraction of the samples based on the coverage parameters.
// There are multiple ways to obtain an integer value from a float -
// truncation, ceil and round
//
// round() provides a more even distribution of values but doesn't seem to play well
// with all vendors (AMD). A way to work around this is to increase the comparison threshold
// of deqp tests. Though this takes care of deqp tests other apps would still have issues.
//
// Truncation provides an uneven distribution near the edges of the interval but seems to
// play well with all vendors.
//
// We are going with truncation for expediency.
return static_cast<uint32_t>(glState.getSampleCoverageValue() * samples);
}
void ApplySampleCoverage(const gl::State &glState, uint32_t coverageSampleCount, uint32_t *maskOut)
{
ASSERT(glState.isSampleCoverageEnabled());
uint32_t coverageMask = angle::BitMask<uint32_t>(coverageSampleCount);
if (glState.getSampleCoverageInvert())
{
coverageMask = ~coverageMask;
}
*maskOut &= coverageMask;
}
SurfaceRotation DetermineSurfaceRotation(const gl::Framebuffer *framebuffer,
const WindowSurfaceVk *windowSurface)
{
if (windowSurface && framebuffer->isDefault())
{
switch (windowSurface->getPreTransform())
{
case VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR:
// Do not rotate gl_Position (surface matches the device's orientation):
return SurfaceRotation::Identity;
case VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR:
// Rotate gl_Position 90 degrees:
return SurfaceRotation::Rotated90Degrees;
case VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR:
// Rotate gl_Position 180 degrees:
return SurfaceRotation::Rotated180Degrees;
case VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR:
// Rotate gl_Position 270 degrees:
return SurfaceRotation::Rotated270Degrees;
default:
UNREACHABLE();
return SurfaceRotation::Identity;
}
}
else
{
// Do not rotate gl_Position (offscreen framebuffer):
return SurfaceRotation::Identity;
}
}
// Should not generate a copy with modern C++.
EventName GetTraceEventName(const char *title, uint64_t counter)
{
EventName buf;
snprintf(buf.data(), kMaxGpuEventNameLen - 1, "%s %llu", title,
static_cast<unsigned long long>(counter));
return buf;
}
vk::ResourceAccess GetColorAccess(const gl::State &state,
const gl::FramebufferState &framebufferState,
const gl::DrawBufferMask &emulatedAlphaMask,
const gl::ProgramExecutable *executable,
size_t colorIndexGL)
{
// No access if draw buffer is disabled altogether
// Without framebuffer fetch:
// No access if color output is masked, or rasterizer discard is enabled
// With framebuffer fetch:
// Read access if color output is masked, or rasterizer discard is enabled
if (!framebufferState.getEnabledDrawBuffers().test(colorIndexGL))
{
return vk::ResourceAccess::Unused;
}
const gl::BlendStateExt &blendStateExt = state.getBlendStateExt();
uint8_t colorMask = gl::BlendStateExt::ColorMaskStorage::GetValueIndexed(
colorIndexGL, blendStateExt.getColorMaskBits());
if (emulatedAlphaMask[colorIndexGL])
{
colorMask &= ~VK_COLOR_COMPONENT_A_BIT;
}
const bool isOutputMasked = colorMask == 0 || state.isRasterizerDiscardEnabled();
if (isOutputMasked)
{
const bool hasFramebufferFetch =
executable ? executable->usesColorFramebufferFetch() : false;
return hasFramebufferFetch ? vk::ResourceAccess::ReadOnly : vk::ResourceAccess::Unused;
}
return vk::ResourceAccess::ReadWrite;
}
vk::ResourceAccess GetDepthAccess(const gl::DepthStencilState &dsState,
const gl::ProgramExecutable *executable,
UpdateDepthFeedbackLoopReason reason)
{
// Skip if depth/stencil not actually accessed.
if (reason == UpdateDepthFeedbackLoopReason::None)
{
return vk::ResourceAccess::Unused;
}
// Note that clear commands don't respect depth test enable, only the mask
// Note Other state can be stated here too in the future, such as rasterizer discard.
if (!dsState.depthTest && reason != UpdateDepthFeedbackLoopReason::Clear)
{
return vk::ResourceAccess::Unused;
}
if (dsState.isDepthMaskedOut())
{
const bool hasFramebufferFetch =
executable ? executable->usesDepthFramebufferFetch() : false;
// If depthFunc is GL_ALWAYS or GL_NEVER, we do not need to load depth value.
return (dsState.depthFunc == GL_ALWAYS || dsState.depthFunc == GL_NEVER) &&
!hasFramebufferFetch
? vk::ResourceAccess::Unused
: vk::ResourceAccess::ReadOnly;
}
return vk::ResourceAccess::ReadWrite;
}
vk::ResourceAccess GetStencilAccess(const gl::DepthStencilState &dsState,
GLuint framebufferStencilSize,
const gl::ProgramExecutable *executable,
UpdateDepthFeedbackLoopReason reason)
{
// Skip if depth/stencil not actually accessed.
if (reason == UpdateDepthFeedbackLoopReason::None)
{
return vk::ResourceAccess::Unused;
}
// Note that clear commands don't respect stencil test enable, only the mask
// Note Other state can be stated here too in the future, such as rasterizer discard.
if (!dsState.stencilTest && reason != UpdateDepthFeedbackLoopReason::Clear)
{
return vk::ResourceAccess::Unused;
}
const bool hasFramebufferFetch = executable ? executable->usesStencilFramebufferFetch() : false;
return dsState.isStencilNoOp(framebufferStencilSize) &&
dsState.isStencilBackNoOp(framebufferStencilSize) && !hasFramebufferFetch
? vk::ResourceAccess::ReadOnly
: vk::ResourceAccess::ReadWrite;
}
egl::ContextPriority GetContextPriority(const gl::State &state)
{
return egl::FromEGLenum<egl::ContextPriority>(state.getContextPriority());
}
bool IsStencilSamplerBinding(const gl::ProgramExecutable &executable, size_t textureUnit)
{
const gl::SamplerFormat format = executable.getSamplerFormatForTextureUnitIndex(textureUnit);
const bool isStencilTexture = format == gl::SamplerFormat::Unsigned;
return isStencilTexture;
}
vk::ImageLayout GetDepthStencilAttachmentImageReadLayout(const vk::ImageHelper &image,
gl::ShaderType firstShader)
{
const bool isDepthTexture =
image.hasRenderPassUsageFlag(vk::RenderPassUsage::DepthTextureSampler);
const bool isStencilTexture =
image.hasRenderPassUsageFlag(vk::RenderPassUsage::StencilTextureSampler);
const bool isDepthReadOnlyAttachment =
image.hasRenderPassUsageFlag(vk::RenderPassUsage::DepthReadOnlyAttachment);
const bool isStencilReadOnlyAttachment =
image.hasRenderPassUsageFlag(vk::RenderPassUsage::StencilReadOnlyAttachment);
const bool isFS = firstShader == gl::ShaderType::Fragment;
// Only called when at least one aspect of the image is bound as texture
ASSERT(isDepthTexture || isStencilTexture);
// Check for feedback loop; this is when depth or stencil is both bound as a texture and is used
// in a non-read-only way as attachment.
if ((isDepthTexture && !isDepthReadOnlyAttachment) ||
(isStencilTexture && !isStencilReadOnlyAttachment))
{
return isFS ? vk::ImageLayout::DepthStencilFragmentShaderFeedback
: vk::ImageLayout::DepthStencilAllShadersFeedback;
}
if (isDepthReadOnlyAttachment)
{
if (isStencilReadOnlyAttachment)
{
// Depth read + stencil read
return isFS ? vk::ImageLayout::DepthReadStencilReadFragmentShaderRead
: vk::ImageLayout::DepthReadStencilReadAllShadersRead;
}
else
{
// Depth read + stencil write
return isFS ? vk::ImageLayout::DepthReadStencilWriteFragmentShaderDepthRead
: vk::ImageLayout::DepthReadStencilWriteAllShadersDepthRead;
}
}
else
{
if (isStencilReadOnlyAttachment)
{
// Depth write + stencil read
return isFS ? vk::ImageLayout::DepthWriteStencilReadFragmentShaderStencilRead
: vk::ImageLayout::DepthWriteStencilReadAllShadersStencilRead;
}
else
{
// Depth write + stencil write: This is definitely a feedback loop and is handled above.
UNREACHABLE();
return vk::ImageLayout::DepthStencilAllShadersFeedback;
}
}
}
vk::ImageLayout GetImageReadLayout(TextureVk *textureVk,
const gl::ProgramExecutable &executable,
size_t textureUnit,
PipelineType pipelineType)
{
vk::ImageHelper &image = textureVk->getImage();
// If this texture has been bound as image and the current executable program accesses images,
// we consider this image's layout as writeable.
if (textureVk->hasBeenBoundAsImage() && executable.hasImages())
{
return pipelineType == PipelineType::Compute ? vk::ImageLayout::ComputeShaderWrite
: vk::ImageLayout::AllGraphicsShadersWrite;
}
gl::ShaderBitSet remainingShaderBits =
executable.getSamplerShaderBitsForTextureUnitIndex(textureUnit);
ASSERT(remainingShaderBits.any());
gl::ShaderType firstShader = remainingShaderBits.first();
gl::ShaderType lastShader = remainingShaderBits.last();
remainingShaderBits.reset(firstShader);
remainingShaderBits.reset(lastShader);
const bool isFragmentShaderOnly = firstShader == gl::ShaderType::Fragment;
if (isFragmentShaderOnly)
{
ASSERT(remainingShaderBits.none() && lastShader == firstShader);
}
if (image.hasRenderPassUsageFlag(vk::RenderPassUsage::RenderTargetAttachment))
{
// Right now we set the *TextureSampler flag only when RenderTargetAttachment is set since
// we do not track all textures in the render pass.
if (image.isDepthOrStencil())
{
if (IsStencilSamplerBinding(executable, textureUnit))
{
image.setRenderPassUsageFlag(vk::RenderPassUsage::StencilTextureSampler);
}
else
{
image.setRenderPassUsageFlag(vk::RenderPassUsage::DepthTextureSampler);
}
return GetDepthStencilAttachmentImageReadLayout(image, firstShader);
}
image.setRenderPassUsageFlag(vk::RenderPassUsage::ColorTextureSampler);
return isFragmentShaderOnly ? vk::ImageLayout::ColorWriteFragmentShaderFeedback
: vk::ImageLayout::ColorWriteAllShadersFeedback;
}
if (image.isDepthOrStencil())
{
// We always use a depth-stencil read-only layout for any depth Textures to simplify
// our implementation's handling of depth-stencil read-only mode. We don't have to
// split a RenderPass to transition a depth texture from shader-read to read-only.
// This improves performance in Manhattan. Future optimizations are likely possible
// here including using specialized barriers without breaking the RenderPass.
return isFragmentShaderOnly ? vk::ImageLayout::DepthReadStencilReadFragmentShaderRead
: vk::ImageLayout::DepthReadStencilReadAllShadersRead;
}
// We barrier against either:
// - Vertex only
// - Fragment only
// - Pre-fragment only (vertex, geometry and tessellation together)
if (remainingShaderBits.any() || firstShader != lastShader)
{
return lastShader == gl::ShaderType::Fragment ? vk::ImageLayout::AllGraphicsShadersReadOnly
: vk::ImageLayout::PreFragmentShadersReadOnly;
}
return kShaderReadOnlyImageLayouts[firstShader];
}
vk::ImageLayout GetImageWriteLayoutAndSubresource(const gl::ImageUnit &imageUnit,
vk::ImageHelper &image,
gl::ShaderBitSet shaderStages,
gl::LevelIndex *levelOut,
uint32_t *layerStartOut,
uint32_t *layerCountOut)
{
*levelOut = gl::LevelIndex(static_cast<uint32_t>(imageUnit.level));
*layerStartOut = 0;
*layerCountOut = image.getLayerCount();
if (imageUnit.layered)
{
*layerStartOut = imageUnit.layered;
*layerCountOut = 1;
}
gl::ShaderType firstShader = shaderStages.first();
gl::ShaderType lastShader = shaderStages.last();
shaderStages.reset(firstShader);
shaderStages.reset(lastShader);
// We barrier against either:
// - Vertex only
// - Fragment only
// - Pre-fragment only (vertex, geometry and tessellation together)
if (shaderStages.any() || firstShader != lastShader)
{
return lastShader == gl::ShaderType::Fragment ? vk::ImageLayout::AllGraphicsShadersWrite
: vk::ImageLayout::PreFragmentShadersWrite;
}
return kShaderWriteImageLayouts[firstShader];
}
template <typename CommandBufferT>
void OnTextureBufferRead(vk::BufferHelper *buffer,
gl::ShaderBitSet stages,
CommandBufferT *commandBufferHelper)
{
ASSERT(stages.any());
// TODO: accept multiple stages in bufferRead. http://anglebug.com/42262235
for (gl::ShaderType stage : stages)
{
// Note: if another range of the same buffer is simultaneously used for storage,
// such as for transform feedback output, or SSBO, unnecessary barriers can be
// generated.
commandBufferHelper->bufferRead(VK_ACCESS_SHADER_READ_BIT, vk::GetPipelineStage(stage),
buffer);
}
}
void OnImageBufferWrite(BufferVk *bufferVk,
gl::ShaderBitSet stages,
vk::CommandBufferHelperCommon *commandBufferHelper)
{
vk::BufferHelper &buffer = bufferVk->getBuffer();
// TODO: accept multiple stages in bufferWrite. http://anglebug.com/42262235
for (gl::ShaderType stage : stages)
{
commandBufferHelper->bufferWrite(VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
vk::GetPipelineStage(stage), &buffer);
}
}
constexpr angle::PackedEnumMap<RenderPassClosureReason, const char *> kRenderPassClosureReason = {{
{RenderPassClosureReason::AlreadySpecifiedElsewhere, nullptr},
{RenderPassClosureReason::ContextDestruction, "Render pass closed due to context destruction"},
{RenderPassClosureReason::ContextChange, "Render pass closed due to context change"},
{RenderPassClosureReason::GLFlush, "Render pass closed due to glFlush()"},
{RenderPassClosureReason::GLFinish, "Render pass closed due to glFinish()"},
{RenderPassClosureReason::EGLSwapBuffers, "Render pass closed due to eglSwapBuffers()"},
{RenderPassClosureReason::EGLWaitClient, "Render pass closed due to eglWaitClient()"},
{RenderPassClosureReason::SurfaceUnMakeCurrent,
"Render pass closed due to onSurfaceUnMakeCurrent()"},
{RenderPassClosureReason::FramebufferBindingChange,
"Render pass closed due to framebuffer binding change"},
{RenderPassClosureReason::FramebufferChange, "Render pass closed due to framebuffer change"},
{RenderPassClosureReason::NewRenderPass,
"Render pass closed due to starting a new render pass"},
{RenderPassClosureReason::BufferUseThenXfbWrite,
"Render pass closed due to buffer use as transform feedback output after prior use in render "
"pass"},
{RenderPassClosureReason::XfbWriteThenVertexIndexBuffer,
"Render pass closed due to transform feedback buffer use as vertex/index input"},
{RenderPassClosureReason::XfbWriteThenIndirectDrawBuffer,
"Render pass closed due to indirect draw buffer previously used as transform feedback output "
"in render pass"},
{RenderPassClosureReason::XfbResumeAfterDrawBasedClear,
"Render pass closed due to transform feedback resume after clear through draw"},
{RenderPassClosureReason::DepthStencilUseInFeedbackLoop,
"Render pass closed due to depth/stencil attachment use under feedback loop"},
{RenderPassClosureReason::DepthStencilWriteAfterFeedbackLoop,
"Render pass closed due to depth/stencil attachment write after feedback loop"},
{RenderPassClosureReason::PipelineBindWhileXfbActive,
"Render pass closed due to graphics pipeline change while transform feedback is active"},
{RenderPassClosureReason::BufferWriteThenMap,
"Render pass closed due to mapping buffer being written to by said render pass"},
{RenderPassClosureReason::BufferWriteThenOutOfRPRead,
"Render pass closed due to non-render-pass read of buffer that was written to in render pass"},
{RenderPassClosureReason::BufferUseThenOutOfRPWrite,
"Render pass closed due to non-render-pass write of buffer that was used in render pass"},
{RenderPassClosureReason::ImageUseThenOutOfRPRead,
"Render pass closed due to non-render-pass read of image that was used in render pass"},
{RenderPassClosureReason::ImageUseThenOutOfRPWrite,
"Render pass closed due to non-render-pass write of image that was used in render pass"},
{RenderPassClosureReason::XfbWriteThenComputeRead,
"Render pass closed due to compute read of buffer previously used as transform feedback "
"output in render pass"},
{RenderPassClosureReason::XfbWriteThenIndirectDispatchBuffer,
"Render pass closed due to indirect dispatch buffer previously used as transform feedback "
"output in render pass"},
{RenderPassClosureReason::ImageAttachmentThenComputeRead,
"Render pass closed due to compute read of image previously used as framebuffer attachment in "
"render pass"},
{RenderPassClosureReason::GetQueryResult, "Render pass closed due to getting query result"},
{RenderPassClosureReason::BeginNonRenderPassQuery,
"Render pass closed due to non-render-pass query begin"},
{RenderPassClosureReason::EndNonRenderPassQuery,
"Render pass closed due to non-render-pass query end"},
{RenderPassClosureReason::TimestampQuery, "Render pass closed due to timestamp query"},
{RenderPassClosureReason::EndRenderPassQuery,
"Render pass closed due to switch from query enabled draw to query disabled draw"},
{RenderPassClosureReason::GLReadPixels, "Render pass closed due to glReadPixels()"},
{RenderPassClosureReason::BufferUseThenReleaseToExternal,
"Render pass closed due to buffer (used by render pass) release to external"},
{RenderPassClosureReason::ImageUseThenReleaseToExternal,
"Render pass closed due to image (used by render pass) release to external"},
{RenderPassClosureReason::BufferInUseWhenSynchronizedMap,
"Render pass closed due to mapping buffer in use by GPU without GL_MAP_UNSYNCHRONIZED_BIT"},
{RenderPassClosureReason::GLMemoryBarrierThenStorageResource,
"Render pass closed due to glMemoryBarrier before storage output in render pass"},
{RenderPassClosureReason::StorageResourceUseThenGLMemoryBarrier,
"Render pass closed due to glMemoryBarrier after storage output in render pass"},
{RenderPassClosureReason::ExternalSemaphoreSignal,
"Render pass closed due to external semaphore signal"},
{RenderPassClosureReason::SyncObjectInit, "Render pass closed due to sync object insertion"},
{RenderPassClosureReason::SyncObjectWithFdInit,
"Render pass closed due to sync object with fd insertion"},
{RenderPassClosureReason::SyncObjectClientWait,
"Render pass closed due to sync object client wait"},
{RenderPassClosureReason::SyncObjectServerWait,
"Render pass closed due to sync object server wait"},
{RenderPassClosureReason::SyncObjectGetStatus,
"Render pass closed due to sync object get status"},
{RenderPassClosureReason::XfbPause, "Render pass closed due to transform feedback pause"},
{RenderPassClosureReason::FramebufferFetchEmulation,
"Render pass closed due to framebuffer fetch emulation"},
{RenderPassClosureReason::GenerateMipmapOnCPU,
"Render pass closed due to fallback to CPU when generating mipmaps"},
{RenderPassClosureReason::CopyTextureOnCPU,
"Render pass closed due to fallback to CPU when copying texture"},
{RenderPassClosureReason::TextureReformatToRenderable,
"Render pass closed due to reformatting texture to a renderable fallback"},
{RenderPassClosureReason::DeviceLocalBufferMap,
"Render pass closed due to mapping device local buffer"},
{RenderPassClosureReason::PrepareForBlit, "Render pass closed prior to draw-based blit"},
{RenderPassClosureReason::PrepareForImageCopy,
"Render pass closed prior to draw-based image copy"},
{RenderPassClosureReason::TemporaryForImageClear,
"Temporary render pass used for image clear closed"},
{RenderPassClosureReason::TemporaryForImageCopy,
"Temporary render pass used for image copy closed"},
{RenderPassClosureReason::TemporaryForOverlayDraw,
"Temporary render pass used for overlay draw closed"},
}};
VkDependencyFlags GetLocalDependencyFlags(ContextVk *contextVk)
{
VkDependencyFlags dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT;
if (contextVk->getCurrentViewCount() > 0)
{
dependencyFlags |= VK_DEPENDENCY_VIEW_LOCAL_BIT;
}
return dependencyFlags;
}
bool BlendModeSupportsDither(const ContextVk *contextVk, size_t colorIndex)
{
const gl::State &state = contextVk->getState();
// Specific combinations of color blend modes are known to work with our dithering emulation.
// Note we specifically don't check alpha blend, as dither isn't applied to alpha.
// See http://b/232574868 for more discussion and reasoning.
gl::BlendFactorType srcBlendFactor = state.getBlendStateExt().getSrcColorIndexed(colorIndex);
gl::BlendFactorType dstBlendFactor = state.getBlendStateExt().getDstColorIndexed(colorIndex);
const bool ditheringCompatibleBlendFactors =
(srcBlendFactor == gl::BlendFactorType::SrcAlpha &&
dstBlendFactor == gl::BlendFactorType::OneMinusSrcAlpha);
const bool allowAdditionalBlendFactors =
contextVk->getFeatures().enableAdditionalBlendFactorsForDithering.enabled &&
(srcBlendFactor == gl::BlendFactorType::One &&
dstBlendFactor == gl::BlendFactorType::OneMinusSrcAlpha);
return ditheringCompatibleBlendFactors || allowAdditionalBlendFactors;
}
bool ShouldUseGraphicsDriverUniformsExtended(const vk::Context *context)
{
return context->getFeatures().emulateTransformFeedback.enabled;
}
bool IsAnySamplesQuery(gl::QueryType type)
{
return type == gl::QueryType::AnySamples || type == gl::QueryType::AnySamplesConservative;
}
enum class GraphicsPipelineSubsetRenderPass
{
Unused,
Required,
};
template <typename Cache>
angle::Result CreateGraphicsPipelineSubset(ContextVk *contextVk,
const vk::GraphicsPipelineDesc &desc,
vk::GraphicsPipelineTransitionBits transition,
GraphicsPipelineSubsetRenderPass renderPass,
Cache *cache,
vk::PipelineCacheAccess *pipelineCache,
vk::PipelineHelper **pipelineOut)
{
const vk::PipelineLayout unusedPipelineLayout;
const vk::ShaderModuleMap unusedShaders;
const vk::SpecializationConstants unusedSpecConsts = {};
if (*pipelineOut != nullptr && !transition.any())
{
return angle::Result::Continue;
}
if (*pipelineOut != nullptr)
{
ASSERT((*pipelineOut)->valid());
if ((*pipelineOut)->findTransition(transition, desc, pipelineOut))
{
return angle::Result::Continue;
}
}
vk::PipelineHelper *oldPipeline = *pipelineOut;
const vk::GraphicsPipelineDesc *descPtr = nullptr;
if (!cache->getPipeline(desc, &descPtr, pipelineOut))
{
const vk::RenderPass unusedRenderPass;
const vk::RenderPass *compatibleRenderPass = &unusedRenderPass;
if (renderPass == GraphicsPipelineSubsetRenderPass::Required)
{
// Pull in a compatible RenderPass if used by this subset.
ANGLE_TRY(contextVk->getCompatibleRenderPass(desc.getRenderPassDesc(),
&compatibleRenderPass));
}
ANGLE_TRY(cache->createPipeline(contextVk, pipelineCache, *compatibleRenderPass,
unusedPipelineLayout, unusedShaders, unusedSpecConsts,
PipelineSource::Draw, desc, &descPtr, pipelineOut));
}
if (oldPipeline)
{
oldPipeline->addTransition(transition, descPtr, *pipelineOut);
}
return angle::Result::Continue;
}
bool QueueSerialsHaveDifferentIndexOrSmaller(const QueueSerial &queueSerial1,
const QueueSerial &queueSerial2)
{
return queueSerial1.getIndex() != queueSerial2.getIndex() || queueSerial1 < queueSerial2;
}
void UpdateImagesWithSharedCacheKey(const gl::ActiveTextureArray<TextureVk *> &activeImages,
const std::vector<gl::ImageBinding> &imageBindings,
const vk::SharedDescriptorSetCacheKey &sharedCacheKey)
{
for (const gl::ImageBinding &imageBinding : imageBindings)
{
uint32_t arraySize = static_cast<uint32_t>(imageBinding.boundImageUnits.size());
for (uint32_t arrayElement = 0; arrayElement < arraySize; ++arrayElement)
{
GLuint imageUnit = imageBinding.boundImageUnits[arrayElement];
// For simplicity, we do not check if uniform is active or duplicate. The worst case is
// we unnecessarily delete the cache entry when image bound to inactive uniform is
// destroyed.
activeImages[imageUnit]->onNewDescriptorSet(sharedCacheKey);
}
}
}
void UpdateBufferWithSharedCacheKey(const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding,
VkDescriptorType descriptorType,
const vk::SharedDescriptorSetCacheKey &sharedCacheKey)
{
if (bufferBinding.get() != nullptr)
{
// For simplicity, we do not check if uniform is active or duplicate. The worst case is
// we unnecessarily delete the cache entry when buffer bound to inactive uniform is
// destroyed.
BufferVk *bufferVk = vk::GetImpl(bufferBinding.get());
vk::BufferHelper &bufferHelper = bufferVk->getBuffer();
if (vk::IsDynamicDescriptor(descriptorType))
{
bufferHelper.getBufferBlock()->onNewDescriptorSet(sharedCacheKey);
}
else
{
bufferHelper.onNewDescriptorSet(sharedCacheKey);
}
}
}
void GenerateTextureUnitSamplerIndexMap(
const std::vector<GLuint> &samplerBoundTextureUnits,
std::unordered_map<size_t, uint32_t> *textureUnitSamplerIndexMapOut)
{
// Create a map of textureUnit <-> samplerIndex
for (size_t samplerIndex = 0; samplerIndex < samplerBoundTextureUnits.size(); samplerIndex++)
{
textureUnitSamplerIndexMapOut->insert(
{samplerBoundTextureUnits[samplerIndex], static_cast<uint32_t>(samplerIndex)});
}
}
} // anonymous namespace
void ContextVk::flushDescriptorSetUpdates()
{
mPerfCounters.writeDescriptorSets +=
mShareGroupVk->getUpdateDescriptorSetsBuilder()->flushDescriptorSetUpdates(getDevice());
}
ANGLE_INLINE void ContextVk::onRenderPassFinished(RenderPassClosureReason reason)
{
if (mRenderPassCommandBuffer != nullptr)
{
pauseRenderPassQueriesIfActive();
// If reason is specified, add it to the command buffer right before ending the render pass,
// so it will show up in GPU debuggers.
const char *reasonText = kRenderPassClosureReason[reason];
if (reasonText)
{
insertEventMarkerImpl(GL_DEBUG_SOURCE_API, reasonText);
}
mRenderPassCommandBuffer = nullptr;
// Restart at subpass 0.
mGraphicsPipelineDesc->resetSubpass(&mGraphicsPipelineTransition);
}
mGraphicsDirtyBits.set(DIRTY_BIT_RENDER_PASS);
}
// ContextVk implementation.
ContextVk::ContextVk(const gl::State &state, gl::ErrorSet *errorSet, vk::Renderer *renderer)
: ContextImpl(state, errorSet),
vk::Context(renderer),
mGraphicsDirtyBitHandlers{},
mComputeDirtyBitHandlers{},
mRenderPassCommandBuffer(nullptr),
mCurrentGraphicsPipeline(nullptr),
mCurrentGraphicsPipelineShaders(nullptr),
mCurrentGraphicsPipelineVertexInput(nullptr),
mCurrentGraphicsPipelineFragmentOutput(nullptr),
mCurrentComputePipeline(nullptr),
mCurrentDrawMode(gl::PrimitiveMode::InvalidEnum),
mCurrentWindowSurface(nullptr),
mCurrentRotationDrawFramebuffer(SurfaceRotation::Identity),
mCurrentRotationReadFramebuffer(SurfaceRotation::Identity),
mActiveRenderPassQueries{},
mLastIndexBufferOffset(nullptr),
mCurrentIndexBuffer(nullptr),
mCurrentIndexBufferOffset(0),
mCurrentDrawElementsType(gl::DrawElementsType::InvalidEnum),
mXfbBaseVertex(0),
mXfbVertexCountPerInstance(0),
mClearColorValue{},
mClearDepthStencilValue{},
mClearColorMasks(0),
mDeferredMemoryBarriers(0),
mFlipYForCurrentSurface(false),
mFlipViewportForDrawFramebuffer(false),
mFlipViewportForReadFramebuffer(false),
mIsAnyHostVisibleBufferWritten(false),
mCurrentQueueSerialIndex(kInvalidQueueSerialIndex),
mOutsideRenderPassCommands(nullptr),
mRenderPassCommands(nullptr),
mQueryEventType(GraphicsEventCmdBuf::NotInQueryCmd),
mGpuEventsEnabled(false),
mPrimaryBufferEventCounter(0),
mHasDeferredFlush(false),
mHasAnyCommandsPendingSubmission(false),
mIsInColorFramebufferFetchMode(false),
mAllowRenderPassToReactivate(true),
mTotalBufferToImageCopySize(0),
mEstimatedPendingImageGarbageSize(0),
mHasWaitSemaphoresPendingSubmission(false),
mGpuClockSync{std::numeric_limits<double>::max(), std::numeric_limits<double>::max()},
mGpuEventTimestampOrigin(0),
mInitialContextPriority(renderer->getDriverPriority(GetContextPriority(state))),
mContextPriority(mInitialContextPriority),
mProtectionType(vk::ConvertProtectionBoolToType(state.hasProtectedContent())),
mShareGroupVk(vk::GetImpl(state.getShareGroup()))
{
ANGLE_TRACE_EVENT0("gpu.angle", "ContextVk::ContextVk");
memset(&mClearColorValue, 0, sizeof(mClearColorValue));
memset(&mClearDepthStencilValue, 0, sizeof(mClearDepthStencilValue));
memset(&mViewport, 0, sizeof(mViewport));
memset(&mScissor, 0, sizeof(mScissor));
// Ensure viewport is within Vulkan requirements
vk::ClampViewport(&mViewport);
mNonIndexedDirtyBitsMask.set();
mNonIndexedDirtyBitsMask.reset(DIRTY_BIT_INDEX_BUFFER);
mIndexedDirtyBitsMask.set();
// Once a command buffer is ended, all bindings (through |vkCmdBind*| calls) are lost per Vulkan
// spec. Once a new command buffer is allocated, we must make sure every previously bound
// resource is bound again.
//
// Note that currently these dirty bits are set every time a new render pass command buffer is
// begun. However, using ANGLE's SecondaryCommandBuffer, the Vulkan command buffer (which is
// the primary command buffer) is not ended, so technically we don't need to rebind these.
mNewGraphicsCommandBufferDirtyBits = DirtyBits{
DIRTY_BIT_RENDER_PASS, DIRTY_BIT_COLOR_ACCESS, DIRTY_BIT_DEPTH_STENCIL_ACCESS,
DIRTY_BIT_PIPELINE_BINDING, DIRTY_BIT_TEXTURES, DIRTY_BIT_VERTEX_BUFFERS,
DIRTY_BIT_INDEX_BUFFER, DIRTY_BIT_SHADER_RESOURCES, DIRTY_BIT_DESCRIPTOR_SETS,
DIRTY_BIT_DRIVER_UNIFORMS,
};
if (getFeatures().supportsTransformFeedbackExtension.enabled ||
getFeatures().emulateTransformFeedback.enabled)
{
mNewGraphicsCommandBufferDirtyBits.set(DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS);
}
mNewComputeCommandBufferDirtyBits =
DirtyBits{DIRTY_BIT_PIPELINE_BINDING, DIRTY_BIT_TEXTURES, DIRTY_BIT_SHADER_RESOURCES,
DIRTY_BIT_DESCRIPTOR_SETS, DIRTY_BIT_DRIVER_UNIFORMS};
mDynamicStateDirtyBits = DirtyBits{
DIRTY_BIT_DYNAMIC_VIEWPORT, DIRTY_BIT_DYNAMIC_SCISSOR,
DIRTY_BIT_DYNAMIC_LINE_WIDTH, DIRTY_BIT_DYNAMIC_DEPTH_BIAS,
DIRTY_BIT_DYNAMIC_BLEND_CONSTANTS, DIRTY_BIT_DYNAMIC_STENCIL_COMPARE_MASK,
DIRTY_BIT_DYNAMIC_STENCIL_WRITE_MASK, DIRTY_BIT_DYNAMIC_STENCIL_REFERENCE,
};
if (mRenderer->getFeatures().useVertexInputBindingStrideDynamicState.enabled ||
getFeatures().supportsVertexInputDynamicState.enabled)
{
mDynamicStateDirtyBits.set(DIRTY_BIT_VERTEX_BUFFERS);
}
if (mRenderer->getFeatures().useCullModeDynamicState.enabled)
{
mDynamicStateDirtyBits.set(DIRTY_BIT_DYNAMIC_CULL_MODE);
}
if (mRenderer->getFeatures().useFrontFaceDynamicState.enabled)
{
mDynamicStateDirtyBits.set(DIRTY_BIT_DYNAMIC_FRONT_FACE);
}
if (mRenderer->getFeatures().useDepthTestEnableDynamicState.enabled)
{
mDynamicStateDirtyBits.set(DIRTY_BIT_DYNAMIC_DEPTH_TEST_ENABLE);
}
if (mRenderer->getFeatures().useDepthWriteEnableDynamicState.enabled)
{
mDynamicStateDirtyBits.set(DIRTY_BIT_DYNAMIC_DEPTH_WRITE_ENABLE);
}
if (mRenderer->getFeatures().useDepthCompareOpDynamicState.enabled)
{
mDynamicStateDirtyBits.set(DIRTY_BIT_DYNAMIC_DEPTH_COMPARE_OP);
}
if (mRenderer->getFeatures().useStencilTestEnableDynamicState.enabled)
{
mDynamicStateDirtyBits.set(DIRTY_BIT_DYNAMIC_STENCIL_TEST_ENABLE);
}
if (mRenderer->getFeatures().useStencilOpDynamicState.enabled)
{
mDynamicStateDirtyBits.set(DIRTY_BIT_DYNAMIC_STENCIL_OP);
}
if (mRenderer->getFeatures().usePrimitiveRestartEnableDynamicState.enabled)
{
mDynamicStateDirtyBits.set(DIRTY_BIT_DYNAMIC_PRIMITIVE_RESTART_ENABLE);