block8_amd64.s 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. // Copyright (c) 2018 Igneous Systems
  2. // MIT License
  3. //
  4. // Permission is hereby granted, free of charge, to any person obtaining a copy
  5. // of this software and associated documentation files (the "Software"), to deal
  6. // in the Software without restriction, including without limitation the rights
  7. // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  8. // copies of the Software, and to permit persons to whom the Software is
  9. // furnished to do so, subject to the following conditions:
  10. //
  11. // The above copyright notice and this permission notice shall be included in all
  12. // copies or substantial portions of the Software.
  13. //
  14. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  19. // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  20. // SOFTWARE.
  21. // Copyright (c) 2020 MinIO Inc. All rights reserved.
  22. // Use of this source code is governed by a license that can be
  23. // found in the LICENSE file.
  24. // This is the AVX2 implementation of the MD5 block function (8-way parallel)
  25. // block8(state *uint64, base uintptr, bufs *int32, cache *byte, n int)
  26. TEXT ·block8(SB), 4, $0-40
  27. MOVQ state+0(FP), BX
  28. MOVQ base+8(FP), SI
  29. MOVQ bufs+16(FP), AX
  30. MOVQ cache+24(FP), CX
  31. MOVQ n+32(FP), DX
  32. MOVQ ·avx256md5consts+0(SB), DI
  33. // Align cache (which is stack allocated by the compiler)
  34. // to a 256 bit boundary (ymm register alignment)
  35. // The cache8 type is deliberately oversized to permit this.
  36. ADDQ $31, CX
  37. ANDB $-32, CL
  38. #define a Y0
  39. #define b Y1
  40. #define c Y2
  41. #define d Y3
  42. #define sa Y4
  43. #define sb Y5
  44. #define sc Y6
  45. #define sd Y7
  46. #define tmp Y8
  47. #define tmp2 Y9
  48. #define mask Y10
  49. #define off Y11
  50. #define ones Y12
  51. #define rtmp1 Y13
  52. #define rtmp2 Y14
  53. #define mem Y15
  54. #define dig BX
  55. #define cache CX
  56. #define count DX
  57. #define base SI
  58. #define consts DI
  59. #define prepmask \
  60. VXORPS mask, mask, mask \
  61. VPCMPGTD mask, off, mask
  62. #define prep(index) \
  63. VMOVAPD mask, rtmp2 \
  64. VPGATHERDD rtmp2, index*4(base)(off*1), mem
  65. #define load(index) \
  66. VMOVAPD index*32(cache), mem
  67. #define store(index) \
  68. VMOVAPD mem, index*32(cache)
  69. #define roll(shift, a) \
  70. VPSLLD $shift, a, rtmp1 \
  71. VPSRLD $32-shift, a, a \
  72. VORPS rtmp1, a, a
  73. #define ROUND1(a, b, c, d, index, const, shift) \
  74. VXORPS c, tmp, tmp \
  75. VPADDD 32*const(consts), a, a \
  76. VPADDD mem, a, a \
  77. VANDPS b, tmp, tmp \
  78. VXORPS d, tmp, tmp \
  79. prep(index) \
  80. VPADDD tmp, a, a \
  81. roll(shift,a) \
  82. VMOVAPD c, tmp \
  83. VPADDD b, a, a
  84. #define ROUND1load(a, b, c, d, index, const, shift) \
  85. VXORPS c, tmp, tmp \
  86. VPADDD 32*const(consts), a, a \
  87. VPADDD mem, a, a \
  88. VANDPS b, tmp, tmp \
  89. VXORPS d, tmp, tmp \
  90. load(index) \
  91. VPADDD tmp, a, a \
  92. roll(shift,a) \
  93. VMOVAPD c, tmp \
  94. VPADDD b, a, a
  95. #define ROUND2(a, b, c, d, index, const, shift) \
  96. VPADDD 32*const(consts), a, a \
  97. VPADDD mem, a, a \
  98. VANDPS b, tmp2, tmp2 \
  99. VANDNPS c, tmp, tmp \
  100. load(index) \
  101. VORPS tmp, tmp2, tmp2 \
  102. VMOVAPD c, tmp \
  103. VPADDD tmp2, a, a \
  104. VMOVAPD c, tmp2 \
  105. roll(shift,a) \
  106. VPADDD b, a, a
  107. #define ROUND3(a, b, c, d, index, const, shift) \
  108. VPADDD 32*const(consts), a, a \
  109. VPADDD mem, a, a \
  110. load(index) \
  111. VXORPS d, tmp, tmp \
  112. VXORPS b, tmp, tmp \
  113. VPADDD tmp, a, a \
  114. roll(shift,a) \
  115. VMOVAPD b, tmp \
  116. VPADDD b, a, a
  117. #define ROUND4(a, b, c, d, index, const, shift) \
  118. VPADDD 32*const(consts), a, a \
  119. VPADDD mem, a, a \
  120. VORPS b, tmp, tmp \
  121. VXORPS c, tmp, tmp \
  122. VPADDD tmp, a, a \
  123. load(index) \
  124. roll(shift,a) \
  125. VXORPS c, ones, tmp \
  126. VPADDD b, a, a
  127. // load digest into state registers
  128. VMOVUPD (dig), a
  129. VMOVUPD 32(dig), b
  130. VMOVUPD 64(dig), c
  131. VMOVUPD 96(dig), d
  132. // load source buffer offsets
  133. VMOVUPD (AX), off
  134. prepmask
  135. VPCMPEQD ones, ones, ones
  136. loop:
  137. VMOVAPD a, sa
  138. VMOVAPD b, sb
  139. VMOVAPD c, sc
  140. VMOVAPD d, sd
  141. prep(0)
  142. VMOVAPD d, tmp
  143. store(0)
  144. ROUND1(a,b,c,d, 1,0x00, 7)
  145. store(1)
  146. ROUND1(d,a,b,c, 2,0x01,12)
  147. store(2)
  148. ROUND1(c,d,a,b, 3,0x02,17)
  149. store(3)
  150. ROUND1(b,c,d,a, 4,0x03,22)
  151. store(4)
  152. ROUND1(a,b,c,d, 5,0x04, 7)
  153. store(5)
  154. ROUND1(d,a,b,c, 6,0x05,12)
  155. store(6)
  156. ROUND1(c,d,a,b, 7,0x06,17)
  157. store(7)
  158. ROUND1(b,c,d,a, 8,0x07,22)
  159. store(8)
  160. ROUND1(a,b,c,d, 9,0x08, 7)
  161. store(9)
  162. ROUND1(d,a,b,c,10,0x09,12)
  163. store(10)
  164. ROUND1(c,d,a,b,11,0x0a,17)
  165. store(11)
  166. ROUND1(b,c,d,a,12,0x0b,22)
  167. store(12)
  168. ROUND1(a,b,c,d,13,0x0c, 7)
  169. store(13)
  170. ROUND1(d,a,b,c,14,0x0d,12)
  171. store(14)
  172. ROUND1(c,d,a,b,15,0x0e,17)
  173. store(15)
  174. ROUND1load(b,c,d,a, 1,0x0f,22)
  175. VMOVAPD d, tmp
  176. VMOVAPD d, tmp2
  177. ROUND2(a,b,c,d, 6,0x10, 5)
  178. ROUND2(d,a,b,c,11,0x11, 9)
  179. ROUND2(c,d,a,b, 0,0x12,14)
  180. ROUND2(b,c,d,a, 5,0x13,20)
  181. ROUND2(a,b,c,d,10,0x14, 5)
  182. ROUND2(d,a,b,c,15,0x15, 9)
  183. ROUND2(c,d,a,b, 4,0x16,14)
  184. ROUND2(b,c,d,a, 9,0x17,20)
  185. ROUND2(a,b,c,d,14,0x18, 5)
  186. ROUND2(d,a,b,c, 3,0x19, 9)
  187. ROUND2(c,d,a,b, 8,0x1a,14)
  188. ROUND2(b,c,d,a,13,0x1b,20)
  189. ROUND2(a,b,c,d, 2,0x1c, 5)
  190. ROUND2(d,a,b,c, 7,0x1d, 9)
  191. ROUND2(c,d,a,b,12,0x1e,14)
  192. ROUND2(b,c,d,a, 0,0x1f,20)
  193. load(5)
  194. VMOVAPD c, tmp
  195. ROUND3(a,b,c,d, 8,0x20, 4)
  196. ROUND3(d,a,b,c,11,0x21,11)
  197. ROUND3(c,d,a,b,14,0x22,16)
  198. ROUND3(b,c,d,a, 1,0x23,23)
  199. ROUND3(a,b,c,d, 4,0x24, 4)
  200. ROUND3(d,a,b,c, 7,0x25,11)
  201. ROUND3(c,d,a,b,10,0x26,16)
  202. ROUND3(b,c,d,a,13,0x27,23)
  203. ROUND3(a,b,c,d, 0,0x28, 4)
  204. ROUND3(d,a,b,c, 3,0x29,11)
  205. ROUND3(c,d,a,b, 6,0x2a,16)
  206. ROUND3(b,c,d,a, 9,0x2b,23)
  207. ROUND3(a,b,c,d,12,0x2c, 4)
  208. ROUND3(d,a,b,c,15,0x2d,11)
  209. ROUND3(c,d,a,b, 2,0x2e,16)
  210. ROUND3(b,c,d,a, 0,0x2f,23)
  211. load(0)
  212. VXORPS d, ones, tmp
  213. ROUND4(a,b,c,d, 7,0x30, 6)
  214. ROUND4(d,a,b,c,14,0x31,10)
  215. ROUND4(c,d,a,b, 5,0x32,15)
  216. ROUND4(b,c,d,a,12,0x33,21)
  217. ROUND4(a,b,c,d, 3,0x34, 6)
  218. ROUND4(d,a,b,c,10,0x35,10)
  219. ROUND4(c,d,a,b, 1,0x36,15)
  220. ROUND4(b,c,d,a, 8,0x37,21)
  221. ROUND4(a,b,c,d,15,0x38, 6)
  222. ROUND4(d,a,b,c, 6,0x39,10)
  223. ROUND4(c,d,a,b,13,0x3a,15)
  224. ROUND4(b,c,d,a, 4,0x3b,21)
  225. ROUND4(a,b,c,d,11,0x3c, 6)
  226. ROUND4(d,a,b,c, 2,0x3d,10)
  227. ROUND4(c,d,a,b, 9,0x3e,15)
  228. ROUND4(b,c,d,a, 0,0x3f,21)
  229. VPADDD sa, a, a
  230. VPADDD sb, b, b
  231. VPADDD sc, c, c
  232. VPADDD sd, d, d
  233. LEAQ 64(base), base
  234. SUBQ $64, count
  235. JNE loop
  236. VMOVUPD a, (dig)
  237. VMOVUPD b, 32(dig)
  238. VMOVUPD c, 64(dig)
  239. VMOVUPD d, 96(dig)
  240. VZEROUPPER
  241. RET