|
| 1 | +// Copyright 2025 The Go Authors. All rights reserved. |
| 2 | +// Use of this source code is governed by a BSD-style |
| 3 | +// license that can be found in the LICENSE file. |
| 4 | + |
| 5 | +//go:build loong64 && gc && !purego |
| 6 | + |
| 7 | +#include "textflag.h" |
| 8 | + |
| 9 | +#define BLAMKA_ROUND \ |
| 10 | + VMULWEVVWU V0, V2, V8; \ |
| 11 | + VADDV V2, V0, V0; \ |
| 12 | + VADDV V0, V8, V0; \ |
| 13 | + VADDV V0, V8, V0; \ |
| 14 | + VXORV V6, V0, V6; \ |
| 15 | + VROTRV $32, V6, V6; \ |
| 16 | + VMULWEVVWU V4, V6, V8; \ |
| 17 | + VADDV V4, V6, V4; \ |
| 18 | + VADDV V4, V8, V4; \ |
| 19 | + VADDV V4, V8, V4; \ |
| 20 | + VXORV V2, V4, V2; \ |
| 21 | + VROTRV $24, V2, V2; \ |
| 22 | + VMULWEVVWU V0, V2, V8; \ |
| 23 | + VADDV V0, V2, V0; \ |
| 24 | + VADDV V0, V8, V0; \ |
| 25 | + VADDV V0, V8, V0; \ |
| 26 | + VXORV V6, V0, V6; \ |
| 27 | + VROTRV $16, V6, V6; \ |
| 28 | + VMULWEVVWU V4, V6, V8; \ |
| 29 | + VADDV V4, V6, V4; \ |
| 30 | + VADDV V4, V8, V4; \ |
| 31 | + VADDV V4, V8, V4; \ |
| 32 | + VXORV V2, V4, V2; \ |
| 33 | + VROTRV $63, V2, V2; \ |
| 34 | +;\ |
| 35 | + VMULWEVVWU V1, V3, V8; \ |
| 36 | + VADDV V1, V3, V1; \ |
| 37 | + VADDV V1, V8, V1; \ |
| 38 | + VADDV V1, V8, V1; \ |
| 39 | + VXORV V7, V1, V7; \ |
| 40 | + VROTRV $32, V7, V7; \ |
| 41 | + VMULWEVVWU V5, V7, V8; \ |
| 42 | + VADDV V5, V7, V5; \ |
| 43 | + VADDV V5, V8, V5; \ |
| 44 | + VADDV V5, V8, V5; \ |
| 45 | + VXORV V3, V5, V3; \ |
| 46 | + VROTRV $24, V3, V3; \ |
| 47 | + VMULWEVVWU V1, V3, V8; \ |
| 48 | + VADDV V1, V3, V1; \ |
| 49 | + VADDV V1, V8, V1; \ |
| 50 | + VADDV V1, V8, V1; \ |
| 51 | + VXORV V7, V1, V7; \ |
| 52 | + VROTRV $16, V7, V7; \ |
| 53 | + VMULWEVVWU V5, V7, V8; \ |
| 54 | + VADDV V5, V7, V5; \ |
| 55 | + VADDV V5, V8, V5; \ |
| 56 | + VADDV V5, V8, V5; \ |
| 57 | + VXORV V3, V5, V3; \ |
| 58 | + VROTRV $63, V3, V3; \ |
| 59 | +;\ |
| 60 | + VXORV V0, V0, V8; \ // V8 = 0 |
| 61 | + VADDV V2, V8, V9; \ // V9 = V2 |
| 62 | + VADDV V5, V8, V10; \ // V10 = V5 |
| 63 | + VADDV V6, V8, V11; \ // V11 = V6 |
| 64 | + VADDV V4, V8, V5; \ // V5 = V4 |
| 65 | + VADDV V10, V8, V4; \ // V4 = V5 |
| 66 | + VSHUF4IV $9, V3, V2; \ |
| 67 | + VSHUF4IV $9, V9, V3; \ |
| 68 | + VSHUF4IV $3, V7, V6; \ |
| 69 | + VSHUF4IV $3, V11, V7; \ |
| 70 | +;\ |
| 71 | + VMULWEVVWU V0, V2, V9; \ |
| 72 | + VADDV V0, V2, V0; \ |
| 73 | + VADDV V0, V9, V0; \ |
| 74 | + VADDV V0, V9, V0; \ |
| 75 | + VXORV V6, V0, V6; \ |
| 76 | + VROTRV $32, V6, V6; \ |
| 77 | + VMULWEVVWU V4, V6, V9; \ |
| 78 | + VADDV V4, V6, V4; \ |
| 79 | + VADDV V4, V9, V4; \ |
| 80 | + VADDV V4, V9, V4; \ |
| 81 | + VXORV V2, V4, V2; \ |
| 82 | + VROTRV $24, V2, V2; \ |
| 83 | + VMULWEVVWU V0, V2, V9; \ |
| 84 | + VADDV V0, V2, V0; \ |
| 85 | + VADDV V0, V9, V0; \ |
| 86 | + VADDV V0, V9, V0; \ |
| 87 | + VXORV V6, V0, V6; \ |
| 88 | + VROTRV $16, V6, V6; \ |
| 89 | + VMULWEVVWU V4, V6, V9; \ |
| 90 | + VADDV V4, V6, V4; \ |
| 91 | + VADDV V4, V9, V4; \ |
| 92 | + VADDV V4, V9, V4; \ |
| 93 | + VXORV V2, V4, V2; \ |
| 94 | + VROTRV $63, V2, V2; \ |
| 95 | +;\ |
| 96 | + VMULWEVVWU V1, V3, V9; \ |
| 97 | + VADDV V1, V3, V1; \ |
| 98 | + VADDV V1, V9, V1; \ |
| 99 | + VADDV V1, V9, V1; \ |
| 100 | + VXORV V7, V1, V7; \ |
| 101 | + VROTRV $32, V7, V7; \ |
| 102 | + VMULWEVVWU V5, V7, V9; \ |
| 103 | + VADDV V5, V7, V5; \ |
| 104 | + VADDV V5, V9, V5; \ |
| 105 | + VADDV V5, V9, V5; \ |
| 106 | + VXORV V3, V5, V3; \ |
| 107 | + VROTRV $24, V3, V3; \ |
| 108 | + VMULWEVVWU V1, V3, V9; \ |
| 109 | + VADDV V1, V3, V1; \ |
| 110 | + VADDV V1, V9, V1; \ |
| 111 | + VADDV V1, V9, V1; \ |
| 112 | + VXORV V7, V1, V7; \ |
| 113 | + VROTRV $16, V7, V7; \ |
| 114 | + VMULWEVVWU V5, V7, V9; \ |
| 115 | + VADDV V5, V7, V5; \ |
| 116 | + VADDV V5, V9, V5; \ |
| 117 | + VADDV V5, V9, V5; \ |
| 118 | + VXORV V3, V5, V3; \ |
| 119 | + VROTRV $63, V3, V3; \ |
| 120 | +;\ |
| 121 | + VADDV V2, V8, V9; \ // V9 = V2 |
| 122 | + VADDV V5, V8, V10; \ // V10 = V5 |
| 123 | + VADDV V6, V8, V11; \ // V11 = V6 |
| 124 | + VADDV V4, V8, V5; \ // V5 = V4 |
| 125 | + VADDV V10, V8, V4; \ // V4 = V5 |
| 126 | + VSHUF4IV $3, V3, V2; \ |
| 127 | + VSHUF4IV $3, V9, V3; \ |
| 128 | + VSHUF4IV $9, V7, V6; \ |
| 129 | + VSHUF4IV $9, V11, V7; \ |
| 130 | + |
| 131 | +#define BLAMKA_ROUND1(index) \ |
| 132 | + VMOVQ (index+0)(R4), V0; \ |
| 133 | + VMOVQ (index+16)(R4), V1; \ |
| 134 | + VMOVQ (index+32)(R4), V2; \ |
| 135 | + VMOVQ (index+48)(R4), V3; \ |
| 136 | + VMOVQ (index+64)(R4), V4; \ |
| 137 | + VMOVQ (index+80)(R4), V5; \ |
| 138 | + VMOVQ (index+96)(R4), V6; \ |
| 139 | + VMOVQ (index+112)(R4), V7; \ |
| 140 | + BLAMKA_ROUND; \ |
| 141 | + VMOVQ V0, (index+0)(R4); \ |
| 142 | + VMOVQ V1, (index+16)(R4); \ |
| 143 | + VMOVQ V2, (index+32)(R4); \ |
| 144 | + VMOVQ V3, (index+48)(R4); \ |
| 145 | + VMOVQ V4, (index+64)(R4); \ |
| 146 | + VMOVQ V5, (index+80)(R4); \ |
| 147 | + VMOVQ V6, (index+96)(R4); \ |
| 148 | + VMOVQ V7, (index+112)(R4); \ |
| 149 | + |
| 150 | +#define BLAMKA_ROUND2(index) \ |
| 151 | + VMOVQ (index+0)(R4), V0; \ |
| 152 | + VMOVQ (index+128)(R4), V1; \ |
| 153 | + VMOVQ (index+256)(R4), V2; \ |
| 154 | + VMOVQ (index+384)(R4), V3; \ |
| 155 | + VMOVQ (index+512)(R4), V4; \ |
| 156 | + VMOVQ (index+640)(R4), V5; \ |
| 157 | + VMOVQ (index+768)(R4), V6; \ |
| 158 | + VMOVQ (index+896)(R4), V7; \ |
| 159 | + BLAMKA_ROUND; \ |
| 160 | + VMOVQ V0, (index+0)(R4); \ |
| 161 | + VMOVQ V1, (index+128)(R4); \ |
| 162 | + VMOVQ V2, (index+256)(R4); \ |
| 163 | + VMOVQ V3, (index+384)(R4); \ |
| 164 | + VMOVQ V4, (index+512)(R4); \ |
| 165 | + VMOVQ V5, (index+640)(R4); \ |
| 166 | + VMOVQ V6, (index+768)(R4); \ |
| 167 | + VMOVQ V7, (index+896)(R4); \ |
| 168 | + |
| 169 | +// func blamkaVX(b *block) |
| 170 | +TEXT ·blamkaVX(SB), NOSPLIT, $0-8 |
| 171 | + MOVV b+0(FP), R4 |
| 172 | + |
| 173 | + BLAMKA_ROUND1(0) |
| 174 | + BLAMKA_ROUND1(128) |
| 175 | + BLAMKA_ROUND1(256) |
| 176 | + BLAMKA_ROUND1(384) |
| 177 | + BLAMKA_ROUND1(512) |
| 178 | + BLAMKA_ROUND1(640) |
| 179 | + BLAMKA_ROUND1(768) |
| 180 | + BLAMKA_ROUND1(896) |
| 181 | + |
| 182 | + BLAMKA_ROUND2(0) |
| 183 | + BLAMKA_ROUND2(16) |
| 184 | + BLAMKA_ROUND2(32) |
| 185 | + BLAMKA_ROUND2(48) |
| 186 | + BLAMKA_ROUND2(64) |
| 187 | + BLAMKA_ROUND2(80) |
| 188 | + BLAMKA_ROUND2(96) |
| 189 | + BLAMKA_ROUND2(112) |
| 190 | + |
| 191 | + RET |
| 192 | + |
| 193 | +// func mixBlocks1VX(t *block, in1 *block, in2 *block) |
| 194 | +TEXT ·mixBlocks1VX(SB), NOSPLIT, $0-24 |
| 195 | + MOVV t+0(FP), R4 |
| 196 | + MOVV in1+8(FP), R5 |
| 197 | + MOVV in2+16(FP), R6 |
| 198 | + MOVV $128, R8 |
| 199 | + |
| 200 | +loop: |
| 201 | + VMOVQ (R5), V0 |
| 202 | + VMOVQ (R6), V1 |
| 203 | + VXORV V0, V1, V2 |
| 204 | + VMOVQ V2, (R4) |
| 205 | + ADDV $16, R5 |
| 206 | + ADDV $16, R6 |
| 207 | + ADDV $16, R4 |
| 208 | + SUBV $2, R8 |
| 209 | + BLT R0, R8, loop |
| 210 | + RET |
| 211 | + |
| 212 | +// func mixBlocks2VX(out *block, in1 *block, in2 *block, t *block) |
| 213 | +TEXT ·mixBlocks2VX(SB), NOSPLIT, $0-32 |
| 214 | + MOVV out+0(FP), R4 |
| 215 | + MOVV in1+8(FP), R5 |
| 216 | + MOVV in2+16(FP), R6 |
| 217 | + MOVV t+24(FP), R7 |
| 218 | + MOVV $128, R8 |
| 219 | + |
| 220 | +loop: |
| 221 | + VMOVQ (R5), V0 |
| 222 | + VMOVQ (R6), V1 |
| 223 | + VMOVQ (R7), V2 |
| 224 | + VXORV V0, V1, V3 |
| 225 | + VXORV V3, V2, V4 |
| 226 | + VMOVQ V4, (R4) |
| 227 | + ADDV $16, R5 |
| 228 | + ADDV $16, R6 |
| 229 | + ADDV $16, R7 |
| 230 | + ADDV $16, R4 |
| 231 | + SUBV $2, R8 |
| 232 | + BLT R0, R8, loop |
| 233 | + RET |
| 234 | + |
| 235 | +// func xorBlocksVX(out *block, in1 *block, in2 *block, t *block) |
| 236 | +TEXT ·xorBlocksVX(SB), NOSPLIT, $0-32 |
| 237 | + MOVV out+0(FP), R4 |
| 238 | + MOVV in1+8(FP), R5 |
| 239 | + MOVV in2+16(FP), R6 |
| 240 | + MOVV t+24(FP), R7 |
| 241 | + MOVV $128, R8 |
| 242 | + |
| 243 | +loop: |
| 244 | + VMOVQ (R5), V0 |
| 245 | + VMOVQ (R6), V1 |
| 246 | + VMOVQ (R7), V2 |
| 247 | + VMOVQ (R4), V3 |
| 248 | + VXORV V0, V1, V4 |
| 249 | + VXORV V4, V2, V5 |
| 250 | + VXORV V5, V3, V6 |
| 251 | + VMOVQ V6, (R4) |
| 252 | + ADDV $16, R5 |
| 253 | + ADDV $16, R6 |
| 254 | + ADDV $16, R7 |
| 255 | + ADDV $16, R4 |
| 256 | + SUBV $2, R8 |
| 257 | + BLT R0, R8, loop |
| 258 | + RET |
0 commit comments