ARMISelLowering.cpp 735 KB
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825 8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906 8907 8908 8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222 9223 9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 9466 9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575 9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588 9589 9590 9591 9592 9593 9594 9595 9596 9597 9598 9599 9600 9601 9602 9603 9604 9605 9606 9607 9608 9609 9610 9611 9612 9613 9614 9615 9616 9617 9618 9619 9620 9621 9622 9623 9624 9625 9626 9627 9628 9629 9630 9631 9632 9633 9634 9635 9636 9637 9638 9639 9640 9641 9642 9643 9644 9645 9646 9647 9648 9649 9650 9651 9652 9653 9654 9655 9656 9657 9658 9659 9660 9661 9662 9663 9664 9665 9666 9667 9668 9669 9670 9671 9672 9673 9674 9675 9676 9677 9678 9679 9680 9681 9682 9683 9684 9685 9686 9687 9688 9689 9690 9691 9692 9693 9694 9695 9696 9697 9698 9699 9700 9701 9702 9703 9704 9705 9706 9707 9708 9709 9710 9711 9712 9713 9714 9715 9716 9717 9718 9719 9720 9721 9722 9723 9724 9725 9726 9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741 9742 9743 9744 9745 9746 9747 9748 9749 9750 9751 9752 9753 9754 9755 9756 9757 9758 9759 9760 9761 9762 9763 9764 9765 9766 9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786 9787 9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801 9802 9803 9804 9805 9806 9807 9808 9809 9810 9811 9812 9813 9814 9815 9816 9817 9818 9819 9820 9821 9822 9823 9824 9825 9826 9827 9828 9829 9830 9831 9832 9833 9834 9835 9836 9837 9838 9839 9840 9841 9842 9843 9844 9845 9846 9847 9848 9849 9850 9851 9852 9853 9854 9855 9856 9857 9858 9859 9860 9861 9862 9863 9864 9865 9866 9867 9868 9869 9870 9871 9872 9873 9874 9875 9876 9877 9878 9879 9880 9881 9882 9883 9884 9885 9886 9887 9888 9889 9890 9891 9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903 9904 9905 9906 9907 9908 9909 9910 9911 9912 9913 9914 9915 9916 9917 9918 9919 9920 9921 9922 9923 9924 9925 9926 9927 9928 9929 9930 9931 9932 9933 9934 9935 9936 9937 9938 9939 9940 9941 9942 9943 9944 9945 9946 9947 9948 9949 9950 9951 9952 9953 9954 9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974 9975 9976 9977 9978 9979 9980 9981 9982 9983 9984 9985 9986 9987 9988 9989 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999 10000 10001 10002 10003 10004 10005 10006 10007 10008 10009 10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027 10028 10029 10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040 10041 10042 10043 10044 10045 10046 10047 10048 10049 10050 10051 10052 10053 10054 10055 10056 10057 10058 10059 10060 10061 10062 10063 10064 10065 10066 10067 10068 10069 10070 10071 10072 10073 10074 10075 10076 10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155 10156 10157 10158 10159 10160 10161 10162 10163 10164 10165 10166 10167 10168 10169 10170 10171 10172 10173 10174 10175 10176 10177 10178 10179 10180 10181 10182 10183 10184 10185 10186 10187 10188 10189 10190 10191 10192 10193 10194 10195 10196 10197 10198 10199 10200 10201 10202 10203 10204 10205 10206 10207 10208 10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240 10241 10242 10243 10244 10245 10246 10247 10248 10249 10250 10251 10252 10253 10254 10255 10256 10257 10258 10259 10260 10261 10262 10263 10264 10265 10266 10267 10268 10269 10270 10271 10272 10273 10274 10275 10276 10277 10278 10279 10280 10281 10282 10283 10284 10285 10286 10287 10288 10289 10290 10291 10292 10293 10294 10295 10296 10297 10298 10299 10300 10301 10302 10303 10304 10305 10306 10307 10308 10309 10310 10311 10312 10313 10314 10315 10316 10317 10318 10319 10320 10321 10322 10323 10324 10325 10326 10327 10328 10329 10330 10331 10332 10333 10334 10335 10336 10337 10338 10339 10340 10341 10342 10343 10344 10345 10346 10347 10348 10349 10350 10351 10352 10353 10354 10355 10356 10357 10358 10359 10360 10361 10362 10363 10364 10365 10366 10367 10368 10369 10370 10371 10372 10373 10374 10375 10376 10377 10378 10379 10380 10381 10382 10383 10384 10385 10386 10387 10388 10389 10390 10391 10392 10393 10394 10395 10396 10397 10398 10399 10400 10401 10402 10403 10404 10405 10406 10407 10408 10409 10410 10411 10412 10413 10414 10415 10416 10417 10418 10419 10420 10421 10422 10423 10424 10425 10426 10427 10428 10429 10430 10431 10432 10433 10434 10435 10436 10437 10438 10439 10440 10441 10442 10443 10444 10445 10446 10447 10448 10449 10450 10451 10452 10453 10454 10455 10456 10457 10458 10459 10460 10461 10462 10463 10464 10465 10466 10467 10468 10469 10470 10471 10472 10473 10474 10475 10476 10477 10478 10479 10480 10481 10482 10483 10484 10485 10486 10487 10488 10489 10490 10491 10492 10493 10494 10495 10496 10497 10498 10499 10500 10501 10502 10503 10504 10505 10506 10507 10508 10509 10510 10511 10512 10513 10514 10515 10516 10517 10518 10519 10520 10521 10522 10523 10524 10525 10526 10527 10528 10529 10530 10531 10532 10533 10534 10535 10536 10537 10538 10539 10540 10541 10542 10543 10544 10545 10546 10547 10548 10549 10550 10551 10552 10553 10554 10555 10556 10557 10558 10559 10560 10561 10562 10563 10564 10565 10566 10567 10568 10569 10570 10571 10572 10573 10574 10575 10576 10577 10578 10579 10580 10581 10582 10583 10584 10585 10586 10587 10588 10589 10590 10591 10592 10593 10594 10595 10596 10597 10598 10599 10600 10601 10602 10603 10604 10605 10606 10607 10608 10609 10610 10611 10612 10613 10614 10615 10616 10617 10618 10619 10620 10621 10622 10623 10624 10625 10626 10627 10628 10629 10630 10631 10632 10633 10634 10635 10636 10637 10638 10639 10640 10641 10642 10643 10644 10645 10646 10647 10648 10649 10650 10651 10652 10653 10654 10655 10656 10657 10658 10659 10660 10661 10662 10663 10664 10665 10666 10667 10668 10669 10670 10671 10672 10673 10674 10675 10676 10677 10678 10679 10680 10681 10682 10683 10684 10685 10686 10687 10688 10689 10690 10691 10692 10693 10694 10695 10696 10697 10698 10699 10700 10701 10702 10703 10704 10705 10706 10707 10708 10709 10710 10711 10712 10713 10714 10715 10716 10717 10718 10719 10720 10721 10722 10723 10724 10725 10726 10727 10728 10729 10730 10731 10732 10733 10734 10735 10736 10737 10738 10739 10740 10741 10742 10743 10744 10745 10746 10747 10748 10749 10750 10751 10752 10753 10754 10755 10756 10757 10758 10759 10760 10761 10762 10763 10764 10765 10766 10767 10768 10769 10770 10771 10772 10773 10774 10775 10776 10777 10778 10779 10780 10781 10782 10783 10784 10785 10786 10787 10788 10789 10790 10791 10792 10793 10794 10795 10796 10797 10798 10799 10800 10801 10802 10803 10804 10805 10806 10807 10808 10809 10810 10811 10812 10813 10814 10815 10816 10817 10818 10819 10820 10821 10822 10823 10824 10825 10826 10827 10828 10829 10830 10831 10832 10833 10834 10835 10836 10837 10838 10839 10840 10841 10842 10843 10844 10845 10846 10847 10848 10849 10850 10851 10852 10853 10854 10855 10856 10857 10858 10859 10860 10861 10862 10863 10864 10865 10866 10867 10868 10869 10870 10871 10872 10873 10874 10875 10876 10877 10878 10879 10880 10881 10882 10883 10884 10885 10886 10887 10888 10889 10890 10891 10892 10893 10894 10895 10896 10897 10898 10899 10900 10901 10902 10903 10904 10905 10906 10907 10908 10909 10910 10911 10912 10913 10914 10915 10916 10917 10918 10919 10920 10921 10922 10923 10924 10925 10926 10927 10928 10929 10930 10931 10932 10933 10934 10935 10936 10937 10938 10939 10940 10941 10942 10943 10944 10945 10946 10947 10948 10949 10950 10951 10952 10953 10954 10955 10956 10957 10958 10959 10960 10961 10962 10963 10964 10965 10966 10967 10968 10969 10970 10971 10972 10973 10974 10975 10976 10977 10978 10979 10980 10981 10982 10983 10984 10985 10986 10987 10988 10989 10990 10991 10992 10993 10994 10995 10996 10997 10998 10999 11000 11001 11002 11003 11004 11005 11006 11007 11008 11009 11010 11011 11012 11013 11014 11015 11016 11017 11018 11019 11020 11021 11022 11023 11024 11025 11026 11027 11028 11029 11030 11031 11032 11033 11034 11035 11036 11037 11038 11039 11040 11041 11042 11043 11044 11045 11046 11047 11048 11049 11050 11051 11052 11053 11054 11055 11056 11057 11058 11059 11060 11061 11062 11063 11064 11065 11066 11067 11068 11069 11070 11071 11072 11073 11074 11075 11076 11077 11078 11079 11080 11081 11082 11083 11084 11085 11086 11087 11088 11089 11090 11091 11092 11093 11094 11095 11096 11097 11098 11099 11100 11101 11102 11103 11104 11105 11106 11107 11108 11109 11110 11111 11112 11113 11114 11115 11116 11117 11118 11119 11120 11121 11122 11123 11124 11125 11126 11127 11128 11129 11130 11131 11132 11133 11134 11135 11136 11137 11138 11139 11140 11141 11142 11143 11144 11145 11146 11147 11148 11149 11150 11151 11152 11153 11154 11155 11156 11157 11158 11159 11160 11161 11162 11163 11164 11165 11166 11167 11168 11169 11170 11171 11172 11173 11174 11175 11176 11177 11178 11179 11180 11181 11182 11183 11184 11185 11186 11187 11188 11189 11190 11191 11192 11193 11194 11195 11196 11197 11198 11199 11200 11201 11202 11203 11204 11205 11206 11207 11208 11209 11210 11211 11212 11213 11214 11215 11216 11217 11218 11219 11220 11221 11222 11223 11224 11225 11226 11227 11228 11229 11230 11231 11232 11233 11234 11235 11236 11237 11238 11239 11240 11241 11242 11243 11244 11245 11246 11247 11248 11249 11250 11251 11252 11253 11254 11255 11256 11257 11258 11259 11260 11261 11262 11263 11264 11265 11266 11267 11268 11269 11270 11271 11272 11273 11274 11275 11276 11277 11278 11279 11280 11281 11282 11283 11284 11285 11286 11287 11288 11289 11290 11291 11292 11293 11294 11295 11296 11297 11298 11299 11300 11301 11302 11303 11304 11305 11306 11307 11308 11309 11310 11311 11312 11313 11314 11315 11316 11317 11318 11319 11320 11321 11322 11323 11324 11325 11326 11327 11328 11329 11330 11331 11332 11333 11334 11335 11336 11337 11338 11339 11340 11341 11342 11343 11344 11345 11346 11347 11348 11349 11350 11351 11352 11353 11354 11355 11356 11357 11358 11359 11360 11361 11362 11363 11364 11365 11366 11367 11368 11369 11370 11371 11372 11373 11374 11375 11376 11377 11378 11379 11380 11381 11382 11383 11384 11385 11386 11387 11388 11389 11390 11391 11392 11393 11394 11395 11396 11397 11398 11399 11400 11401 11402 11403 11404 11405 11406 11407 11408 11409 11410 11411 11412 11413 11414 11415 11416 11417 11418 11419 11420 11421 11422 11423 11424 11425 11426 11427 11428 11429 11430 11431 11432 11433 11434 11435 11436 11437 11438 11439 11440 11441 11442 11443 11444 11445 11446 11447 11448 11449 11450 11451 11452 11453 11454 11455 11456 11457 11458 11459 11460 11461 11462 11463 11464 11465 11466 11467 11468 11469 11470 11471 11472 11473 11474 11475 11476 11477 11478 11479 11480 11481 11482 11483 11484 11485 11486 11487 11488 11489 11490 11491 11492 11493 11494 11495 11496 11497 11498 11499 11500 11501 11502 11503 11504 11505 11506 11507 11508 11509 11510 11511 11512 11513 11514 11515 11516 11517 11518 11519 11520 11521 11522 11523 11524 11525 11526 11527 11528 11529 11530 11531 11532 11533 11534 11535 11536 11537 11538 11539 11540 11541 11542 11543 11544 11545 11546 11547 11548 11549 11550 11551 11552 11553 11554 11555 11556 11557 11558 11559 11560 11561 11562 11563 11564 11565 11566 11567 11568 11569 11570 11571 11572 11573 11574 11575 11576 11577 11578 11579 11580 11581 11582 11583 11584 11585 11586 11587 11588 11589 11590 11591 11592 11593 11594 11595 11596 11597 11598 11599 11600 11601 11602 11603 11604 11605 11606 11607 11608 11609 11610 11611 11612 11613 11614 11615 11616 11617 11618 11619 11620 11621 11622 11623 11624 11625 11626 11627 11628 11629 11630 11631 11632 11633 11634 11635 11636 11637 11638 11639 11640 11641 11642 11643 11644 11645 11646 11647 11648 11649 11650 11651 11652 11653 11654 11655 11656 11657 11658 11659 11660 11661 11662 11663 11664 11665 11666 11667 11668 11669 11670 11671 11672 11673 11674 11675 11676 11677 11678 11679 11680 11681 11682 11683 11684 11685 11686 11687 11688 11689 11690 11691 11692 11693 11694 11695 11696 11697 11698 11699 11700 11701 11702 11703 11704 11705 11706 11707 11708 11709 11710 11711 11712 11713 11714 11715 11716 11717 11718 11719 11720 11721 11722 11723 11724 11725 11726 11727 11728 11729 11730 11731 11732 11733 11734 11735 11736 11737 11738 11739 11740 11741 11742 11743 11744 11745 11746 11747 11748 11749 11750 11751 11752 11753 11754 11755 11756 11757 11758 11759 11760 11761 11762 11763 11764 11765 11766 11767 11768 11769 11770 11771 11772 11773 11774 11775 11776 11777 11778 11779 11780 11781 11782 11783 11784 11785 11786 11787 11788 11789 11790 11791 11792 11793 11794 11795 11796 11797 11798 11799 11800 11801 11802 11803 11804 11805 11806 11807 11808 11809 11810 11811 11812 11813 11814 11815 11816 11817 11818 11819 11820 11821 11822 11823 11824 11825 11826 11827 11828 11829 11830 11831 11832 11833 11834 11835 11836 11837 11838 11839 11840 11841 11842 11843 11844 11845 11846 11847 11848 11849 11850 11851 11852 11853 11854 11855 11856 11857 11858 11859 11860 11861 11862 11863 11864 11865 11866 11867 11868 11869 11870 11871 11872 11873 11874 11875 11876 11877 11878 11879 11880 11881 11882 11883 11884 11885 11886 11887 11888 11889 11890 11891 11892 11893 11894 11895 11896 11897 11898 11899 11900 11901 11902 11903 11904 11905 11906 11907 11908 11909 11910 11911 11912 11913 11914 11915 11916 11917 11918 11919 11920 11921 11922 11923 11924 11925 11926 11927 11928 11929 11930 11931 11932 11933 11934 11935 11936 11937 11938 11939 11940 11941 11942 11943 11944 11945 11946 11947 11948 11949 11950 11951 11952 11953 11954 11955 11956 11957 11958 11959 11960 11961 11962 11963 11964 11965 11966 11967 11968 11969 11970 11971 11972 11973 11974 11975 11976 11977 11978 11979 11980 11981 11982 11983 11984 11985 11986 11987 11988 11989 11990 11991 11992 11993 11994 11995 11996 11997 11998 11999 12000 12001 12002 12003 12004 12005 12006 12007 12008 12009 12010 12011 12012 12013 12014 12015 12016 12017 12018 12019 12020 12021 12022 12023 12024 12025 12026 12027 12028 12029 12030 12031 12032 12033 12034 12035 12036 12037 12038 12039 12040 12041 12042 12043 12044 12045 12046 12047 12048 12049 12050 12051 12052 12053 12054 12055 12056 12057 12058 12059 12060 12061 12062 12063 12064 12065 12066 12067 12068 12069 12070 12071 12072 12073 12074 12075 12076 12077 12078 12079 12080 12081 12082 12083 12084 12085 12086 12087 12088 12089 12090 12091 12092 12093 12094 12095 12096 12097 12098 12099 12100 12101 12102 12103 12104 12105 12106 12107 12108 12109 12110 12111 12112 12113 12114 12115 12116 12117 12118 12119 12120 12121 12122 12123 12124 12125 12126 12127 12128 12129 12130 12131 12132 12133 12134 12135 12136 12137 12138 12139 12140 12141 12142 12143 12144 12145 12146 12147 12148 12149 12150 12151 12152 12153 12154 12155 12156 12157 12158 12159 12160 12161 12162 12163 12164 12165 12166 12167 12168 12169 12170 12171 12172 12173 12174 12175 12176 12177 12178 12179 12180 12181 12182 12183 12184 12185 12186 12187 12188 12189 12190 12191 12192 12193 12194 12195 12196 12197 12198 12199 12200 12201 12202 12203 12204 12205 12206 12207 12208 12209 12210 12211 12212 12213 12214 12215 12216 12217 12218 12219 12220 12221 12222 12223 12224 12225 12226 12227 12228 12229 12230 12231 12232 12233 12234 12235 12236 12237 12238 12239 12240 12241 12242 12243 12244 12245 12246 12247 12248 12249 12250 12251 12252 12253 12254 12255 12256 12257 12258 12259 12260 12261 12262 12263 12264 12265 12266 12267 12268 12269 12270 12271 12272 12273 12274 12275 12276 12277 12278 12279 12280 12281 12282 12283 12284 12285 12286 12287 12288 12289 12290 12291 12292 12293 12294 12295 12296 12297 12298 12299 12300 12301 12302 12303 12304 12305 12306 12307 12308 12309 12310 12311 12312 12313 12314 12315 12316 12317 12318 12319 12320 12321 12322 12323 12324 12325 12326 12327 12328 12329 12330 12331 12332 12333 12334 12335 12336 12337 12338 12339 12340 12341 12342 12343 12344 12345 12346 12347 12348 12349 12350 12351 12352 12353 12354 12355 12356 12357 12358 12359 12360 12361 12362 12363 12364 12365 12366 12367 12368 12369 12370 12371 12372 12373 12374 12375 12376 12377 12378 12379 12380 12381 12382 12383 12384 12385 12386 12387 12388 12389 12390 12391 12392 12393 12394 12395 12396 12397 12398 12399 12400 12401 12402 12403 12404 12405 12406 12407 12408 12409 12410 12411 12412 12413 12414 12415 12416 12417 12418 12419 12420 12421 12422 12423 12424 12425 12426 12427 12428 12429 12430 12431 12432 12433 12434 12435 12436 12437 12438 12439 12440 12441 12442 12443 12444 12445 12446 12447 12448 12449 12450 12451 12452 12453 12454 12455 12456 12457 12458 12459 12460 12461 12462 12463 12464 12465 12466 12467 12468 12469 12470 12471 12472 12473 12474 12475 12476 12477 12478 12479 12480 12481 12482 12483 12484 12485 12486 12487 12488 12489 12490 12491 12492 12493 12494 12495 12496 12497 12498 12499 12500 12501 12502 12503 12504 12505 12506 12507 12508 12509 12510 12511 12512 12513 12514 12515 12516 12517 12518 12519 12520 12521 12522 12523 12524 12525 12526 12527 12528 12529 12530 12531 12532 12533 12534 12535 12536 12537 12538 12539 12540 12541 12542 12543 12544 12545 12546 12547 12548 12549 12550 12551 12552 12553 12554 12555 12556 12557 12558 12559 12560 12561 12562 12563 12564 12565 12566 12567 12568 12569 12570 12571 12572 12573 12574 12575 12576 12577 12578 12579 12580 12581 12582 12583 12584 12585 12586 12587 12588 12589 12590 12591 12592 12593 12594 12595 12596 12597 12598 12599 12600 12601 12602 12603 12604 12605 12606 12607 12608 12609 12610 12611 12612 12613 12614 12615 12616 12617 12618 12619 12620 12621 12622 12623 12624 12625 12626 12627 12628 12629 12630 12631 12632 12633 12634 12635 12636 12637 12638 12639 12640 12641 12642 12643 12644 12645 12646 12647 12648 12649 12650 12651 12652 12653 12654 12655 12656 12657 12658 12659 12660 12661 12662 12663 12664 12665 12666 12667 12668 12669 12670 12671 12672 12673 12674 12675 12676 12677 12678 12679 12680 12681 12682 12683 12684 12685 12686 12687 12688 12689 12690 12691 12692 12693 12694 12695 12696 12697 12698 12699 12700 12701 12702 12703 12704 12705 12706 12707 12708 12709 12710 12711 12712 12713 12714 12715 12716 12717 12718 12719 12720 12721 12722 12723 12724 12725 12726 12727 12728 12729 12730 12731 12732 12733 12734 12735 12736 12737 12738 12739 12740 12741 12742 12743 12744 12745 12746 12747 12748 12749 12750 12751 12752 12753 12754 12755 12756 12757 12758 12759 12760 12761 12762 12763 12764 12765 12766 12767 12768 12769 12770 12771 12772 12773 12774 12775 12776 12777 12778 12779 12780 12781 12782 12783 12784 12785 12786 12787 12788 12789 12790 12791 12792 12793 12794 12795 12796 12797 12798 12799 12800 12801 12802 12803 12804 12805 12806 12807 12808 12809 12810 12811 12812 12813 12814 12815 12816 12817 12818 12819 12820 12821 12822 12823 12824 12825 12826 12827 12828 12829 12830 12831 12832 12833 12834 12835 12836 12837 12838 12839 12840 12841 12842 12843 12844 12845 12846 12847 12848 12849 12850 12851 12852 12853 12854 12855 12856 12857 12858 12859 12860 12861 12862 12863 12864 12865 12866 12867 12868 12869 12870 12871 12872 12873 12874 12875 12876 12877 12878 12879 12880 12881 12882 12883 12884 12885 12886 12887 12888 12889 12890 12891 12892 12893 12894 12895 12896 12897 12898 12899 12900 12901 12902 12903 12904 12905 12906 12907 12908 12909 12910 12911 12912 12913 12914 12915 12916 12917 12918 12919 12920 12921 12922 12923 12924 12925 12926 12927 12928 12929 12930 12931 12932 12933 12934 12935 12936 12937 12938 12939 12940 12941 12942 12943 12944 12945 12946 12947 12948 12949 12950 12951 12952 12953 12954 12955 12956 12957 12958 12959 12960 12961 12962 12963 12964 12965 12966 12967 12968 12969 12970 12971 12972 12973 12974 12975 12976 12977 12978 12979 12980 12981 12982 12983 12984 12985 12986 12987 12988 12989 12990 12991 12992 12993 12994 12995 12996 12997 12998 12999 13000 13001 13002 13003 13004 13005 13006 13007 13008 13009 13010 13011 13012 13013 13014 13015 13016 13017 13018 13019 13020 13021 13022 13023 13024 13025 13026 13027 13028 13029 13030 13031 13032 13033 13034 13035 13036 13037 13038 13039 13040 13041 13042 13043 13044 13045 13046 13047 13048 13049 13050 13051 13052 13053 13054 13055 13056 13057 13058 13059 13060 13061 13062 13063 13064 13065 13066 13067 13068 13069 13070 13071 13072 13073 13074 13075 13076 13077 13078 13079 13080 13081 13082 13083 13084 13085 13086 13087 13088 13089 13090 13091 13092 13093 13094 13095 13096 13097 13098 13099 13100 13101 13102 13103 13104 13105 13106 13107 13108 13109 13110 13111 13112 13113 13114 13115 13116 13117 13118 13119 13120 13121 13122 13123 13124 13125 13126 13127 13128 13129 13130 13131 13132 13133 13134 13135 13136 13137 13138 13139 13140 13141 13142 13143 13144 13145 13146 13147 13148 13149 13150 13151 13152 13153 13154 13155 13156 13157 13158 13159 13160 13161 13162 13163 13164 13165 13166 13167 13168 13169 13170 13171 13172 13173 13174 13175 13176 13177 13178 13179 13180 13181 13182 13183 13184 13185 13186 13187 13188 13189 13190 13191 13192 13193 13194 13195 13196 13197 13198 13199 13200 13201 13202 13203 13204 13205 13206 13207 13208 13209 13210 13211 13212 13213 13214 13215 13216 13217 13218 13219 13220 13221 13222 13223 13224 13225 13226 13227 13228 13229 13230 13231 13232 13233 13234 13235 13236 13237 13238 13239 13240 13241 13242 13243 13244 13245 13246 13247 13248 13249 13250 13251 13252 13253 13254 13255 13256 13257 13258 13259 13260 13261 13262 13263 13264 13265 13266 13267 13268 13269 13270 13271 13272 13273 13274 13275 13276 13277 13278 13279 13280 13281 13282 13283 13284 13285 13286 13287 13288 13289 13290 13291 13292 13293 13294 13295 13296 13297 13298 13299 13300 13301 13302 13303 13304 13305 13306 13307 13308 13309 13310 13311 13312 13313 13314 13315 13316 13317 13318 13319 13320 13321 13322 13323 13324 13325 13326 13327 13328 13329 13330 13331 13332 13333 13334 13335 13336 13337 13338 13339 13340 13341 13342 13343 13344 13345 13346 13347 13348 13349 13350 13351 13352 13353 13354 13355 13356 13357 13358 13359 13360 13361 13362 13363 13364 13365 13366 13367 13368 13369 13370 13371 13372 13373 13374 13375 13376 13377 13378 13379 13380 13381 13382 13383 13384 13385 13386 13387 13388 13389 13390 13391 13392 13393 13394 13395 13396 13397 13398 13399 13400 13401 13402 13403 13404 13405 13406 13407 13408 13409 13410 13411 13412 13413 13414 13415 13416 13417 13418 13419 13420 13421 13422 13423 13424 13425 13426 13427 13428 13429 13430 13431 13432 13433 13434 13435 13436 13437 13438 13439 13440 13441 13442 13443 13444 13445 13446 13447 13448 13449 13450 13451 13452 13453 13454 13455 13456 13457 13458 13459 13460 13461 13462 13463 13464 13465 13466 13467 13468 13469 13470 13471 13472 13473 13474 13475 13476 13477 13478 13479 13480 13481 13482 13483 13484 13485 13486 13487 13488 13489 13490 13491 13492 13493 13494 13495 13496 13497 13498 13499 13500 13501 13502 13503 13504 13505 13506 13507 13508 13509 13510 13511 13512 13513 13514 13515 13516 13517 13518 13519 13520 13521 13522 13523 13524 13525 13526 13527 13528 13529 13530 13531 13532 13533 13534 13535 13536 13537 13538 13539 13540 13541 13542 13543 13544 13545 13546 13547 13548 13549 13550 13551 13552 13553 13554 13555 13556 13557 13558 13559 13560 13561 13562 13563 13564 13565 13566 13567 13568 13569 13570 13571 13572 13573 13574 13575 13576 13577 13578 13579 13580 13581 13582 13583 13584 13585 13586 13587 13588 13589 13590 13591 13592 13593 13594 13595 13596 13597 13598 13599 13600 13601 13602 13603 13604 13605 13606 13607 13608 13609 13610 13611 13612 13613 13614 13615 13616 13617 13618 13619 13620 13621 13622 13623 13624 13625 13626 13627 13628 13629 13630 13631 13632 13633 13634 13635 13636 13637 13638 13639 13640 13641 13642 13643 13644 13645 13646 13647 13648 13649 13650 13651 13652 13653 13654 13655 13656 13657 13658 13659 13660 13661 13662 13663 13664 13665 13666 13667 13668 13669 13670 13671 13672 13673 13674 13675 13676 13677 13678 13679 13680 13681 13682 13683 13684 13685 13686 13687 13688 13689 13690 13691 13692 13693 13694 13695 13696 13697 13698 13699 13700 13701 13702 13703 13704 13705 13706 13707 13708 13709 13710 13711 13712 13713 13714 13715 13716 13717 13718 13719 13720 13721 13722 13723 13724 13725 13726 13727 13728 13729 13730 13731 13732 13733 13734 13735 13736 13737 13738 13739 13740 13741 13742 13743 13744 13745 13746 13747 13748 13749 13750 13751 13752 13753 13754 13755 13756 13757 13758 13759 13760 13761 13762 13763 13764 13765 13766 13767 13768 13769 13770 13771 13772 13773 13774 13775 13776 13777 13778 13779 13780 13781 13782 13783 13784 13785 13786 13787 13788 13789 13790 13791 13792 13793 13794 13795 13796 13797 13798 13799 13800 13801 13802 13803 13804 13805 13806 13807 13808 13809 13810 13811 13812 13813 13814 13815 13816 13817 13818 13819 13820 13821 13822 13823 13824 13825 13826 13827 13828 13829 13830 13831 13832 13833 13834 13835 13836 13837 13838 13839 13840 13841 13842 13843 13844 13845 13846 13847 13848 13849 13850 13851 13852 13853 13854 13855 13856 13857 13858 13859 13860 13861 13862 13863 13864 13865 13866 13867 13868 13869 13870 13871 13872 13873 13874 13875 13876 13877 13878 13879 13880 13881 13882 13883 13884 13885 13886 13887 13888 13889 13890 13891 13892 13893 13894 13895 13896 13897 13898 13899 13900 13901 13902 13903 13904 13905 13906 13907 13908 13909 13910 13911 13912 13913 13914 13915 13916 13917 13918 13919 13920 13921 13922 13923 13924 13925 13926 13927 13928 13929 13930 13931 13932 13933 13934 13935 13936 13937 13938 13939 13940 13941 13942 13943 13944 13945 13946 13947 13948 13949 13950 13951 13952 13953 13954 13955 13956 13957 13958 13959 13960 13961 13962 13963 13964 13965 13966 13967 13968 13969 13970 13971 13972 13973 13974 13975 13976 13977 13978 13979 13980 13981 13982 13983 13984 13985 13986 13987 13988 13989 13990 13991 13992 13993 13994 13995 13996 13997 13998 13999 14000 14001 14002 14003 14004 14005 14006 14007 14008 14009 14010 14011 14012 14013 14014 14015 14016 14017 14018 14019 14020 14021 14022 14023 14024 14025 14026 14027 14028 14029 14030 14031 14032 14033 14034 14035 14036 14037 14038 14039 14040 14041 14042 14043 14044 14045 14046 14047 14048 14049 14050 14051 14052 14053 14054 14055 14056 14057 14058 14059 14060 14061 14062 14063 14064 14065 14066 14067 14068 14069 14070 14071 14072 14073 14074 14075 14076 14077 14078 14079 14080 14081 14082 14083 14084 14085 14086 14087 14088 14089 14090 14091 14092 14093 14094 14095 14096 14097 14098 14099 14100 14101 14102 14103 14104 14105 14106 14107 14108 14109 14110 14111 14112 14113 14114 14115 14116 14117 14118 14119 14120 14121 14122 14123 14124 14125 14126 14127 14128 14129 14130 14131 14132 14133 14134 14135 14136 14137 14138 14139 14140 14141 14142 14143 14144 14145 14146 14147 14148 14149 14150 14151 14152 14153 14154 14155 14156 14157 14158 14159 14160 14161 14162 14163 14164 14165 14166 14167 14168 14169 14170 14171 14172 14173 14174 14175 14176 14177 14178 14179 14180 14181 14182 14183 14184 14185 14186 14187 14188 14189 14190 14191 14192 14193 14194 14195 14196 14197 14198 14199 14200 14201 14202 14203 14204 14205 14206 14207 14208 14209 14210 14211 14212 14213 14214 14215 14216 14217 14218 14219 14220 14221 14222 14223 14224 14225 14226 14227 14228 14229 14230 14231 14232 14233 14234 14235 14236 14237 14238 14239 14240 14241 14242 14243 14244 14245 14246 14247 14248 14249 14250 14251 14252 14253 14254 14255 14256 14257 14258 14259 14260 14261 14262 14263 14264 14265 14266 14267 14268 14269 14270 14271 14272 14273 14274 14275 14276 14277 14278 14279 14280 14281 14282 14283 14284 14285 14286 14287 14288 14289 14290 14291 14292 14293 14294 14295 14296 14297 14298 14299 14300 14301 14302 14303 14304 14305 14306 14307 14308 14309 14310 14311 14312 14313 14314 14315 14316 14317 14318 14319 14320 14321 14322 14323 14324 14325 14326 14327 14328 14329 14330 14331 14332 14333 14334 14335 14336 14337 14338 14339 14340 14341 14342 14343 14344 14345 14346 14347 14348 14349 14350 14351 14352 14353 14354 14355 14356 14357 14358 14359 14360 14361 14362 14363 14364 14365 14366 14367 14368 14369 14370 14371 14372 14373 14374 14375 14376 14377 14378 14379 14380 14381 14382 14383 14384 14385 14386 14387 14388 14389 14390 14391 14392 14393 14394 14395 14396 14397 14398 14399 14400 14401 14402 14403 14404 14405 14406 14407 14408 14409 14410 14411 14412 14413 14414 14415 14416 14417 14418 14419 14420 14421 14422 14423 14424 14425 14426 14427 14428 14429 14430 14431 14432 14433 14434 14435 14436 14437 14438 14439 14440 14441 14442 14443 14444 14445 14446 14447 14448 14449 14450 14451 14452 14453 14454 14455 14456 14457 14458 14459 14460 14461 14462 14463 14464 14465 14466 14467 14468 14469 14470 14471 14472 14473 14474 14475 14476 14477 14478 14479 14480 14481 14482 14483 14484 14485 14486 14487 14488 14489 14490 14491 14492 14493 14494 14495 14496 14497 14498 14499 14500 14501 14502 14503 14504 14505 14506 14507 14508 14509 14510 14511 14512 14513 14514 14515 14516 14517 14518 14519 14520 14521 14522 14523 14524 14525 14526 14527 14528 14529 14530 14531 14532 14533 14534 14535 14536 14537 14538 14539 14540 14541 14542 14543 14544 14545 14546 14547 14548 14549 14550 14551 14552 14553 14554 14555 14556 14557 14558 14559 14560 14561 14562 14563 14564 14565 14566 14567 14568 14569 14570 14571 14572 14573 14574 14575 14576 14577 14578 14579 14580 14581 14582 14583 14584 14585 14586 14587 14588 14589 14590 14591 14592 14593 14594 14595 14596 14597 14598 14599 14600 14601 14602 14603 14604 14605 14606 14607 14608 14609 14610 14611 14612 14613 14614 14615 14616 14617 14618 14619 14620 14621 14622 14623 14624 14625 14626 14627 14628 14629 14630 14631 14632 14633 14634 14635 14636 14637 14638 14639 14640 14641 14642 14643 14644 14645 14646 14647 14648 14649 14650 14651 14652 14653 14654 14655 14656 14657 14658 14659 14660 14661 14662 14663 14664 14665 14666 14667 14668 14669 14670 14671 14672 14673 14674 14675 14676 14677 14678 14679 14680 14681 14682 14683 14684 14685 14686 14687 14688 14689 14690 14691 14692 14693 14694 14695 14696 14697 14698 14699 14700 14701 14702 14703 14704 14705 14706 14707 14708 14709 14710 14711 14712 14713 14714 14715 14716 14717 14718 14719 14720 14721 14722 14723 14724 14725 14726 14727 14728 14729 14730 14731 14732 14733 14734 14735 14736 14737 14738 14739 14740 14741 14742 14743 14744 14745 14746 14747 14748 14749 14750 14751 14752 14753 14754 14755 14756 14757 14758 14759 14760 14761 14762 14763 14764 14765 14766 14767 14768 14769 14770 14771 14772 14773 14774 14775 14776 14777 14778 14779 14780 14781 14782 14783 14784 14785 14786 14787 14788 14789 14790 14791 14792 14793 14794 14795 14796 14797 14798 14799 14800 14801 14802 14803 14804 14805 14806 14807 14808 14809 14810 14811 14812 14813 14814 14815 14816 14817 14818 14819 14820 14821 14822 14823 14824 14825 14826 14827 14828 14829 14830 14831 14832 14833 14834 14835 14836 14837 14838 14839 14840 14841 14842 14843 14844 14845 14846 14847 14848 14849 14850 14851 14852 14853 14854 14855 14856 14857 14858 14859 14860 14861 14862 14863 14864 14865 14866 14867 14868 14869 14870 14871 14872 14873 14874 14875 14876 14877 14878 14879 14880 14881 14882 14883 14884 14885 14886 14887 14888 14889 14890 14891 14892 14893 14894 14895 14896 14897 14898 14899 14900 14901 14902 14903 14904 14905 14906 14907 14908 14909 14910 14911 14912 14913 14914 14915 14916 14917 14918 14919 14920 14921 14922 14923 14924 14925 14926 14927 14928 14929 14930 14931 14932 14933 14934 14935 14936 14937 14938 14939 14940 14941 14942 14943 14944 14945 14946 14947 14948 14949 14950 14951 14952 14953 14954 14955 14956 14957 14958 14959 14960 14961 14962 14963 14964 14965 14966 14967 14968 14969 14970 14971 14972 14973 14974 14975 14976 14977 14978 14979 14980 14981 14982 14983 14984 14985 14986 14987 14988 14989 14990 14991 14992 14993 14994 14995 14996 14997 14998 14999 15000 15001 15002 15003 15004 15005 15006 15007 15008 15009 15010 15011 15012 15013 15014 15015 15016 15017 15018 15019 15020 15021 15022 15023 15024 15025 15026 15027 15028 15029 15030 15031 15032 15033 15034 15035 15036 15037 15038 15039 15040 15041 15042 15043 15044 15045 15046 15047 15048 15049 15050 15051 15052 15053 15054 15055 15056 15057 15058 15059 15060 15061 15062 15063 15064 15065 15066 15067 15068 15069 15070 15071 15072 15073 15074 15075 15076 15077 15078 15079 15080 15081 15082 15083 15084 15085 15086 15087 15088 15089 15090 15091 15092 15093 15094 15095 15096 15097 15098 15099 15100 15101 15102 15103 15104 15105 15106 15107 15108 15109 15110 15111 15112 15113 15114 15115 15116 15117 15118 15119 15120 15121 15122 15123 15124 15125 15126 15127 15128 15129 15130 15131 15132 15133 15134 15135 15136 15137 15138 15139 15140 15141 15142 15143 15144 15145 15146 15147 15148 15149 15150 15151 15152 15153 15154 15155 15156 15157 15158 15159 15160 15161 15162 15163 15164 15165 15166 15167 15168 15169 15170 15171 15172 15173 15174 15175 15176 15177 15178 15179 15180 15181 15182 15183 15184 15185 15186 15187 15188 15189 15190 15191 15192 15193 15194 15195 15196 15197 15198 15199 15200 15201 15202 15203 15204 15205 15206 15207 15208 15209 15210 15211 15212 15213 15214 15215 15216 15217 15218 15219 15220 15221 15222 15223 15224 15225 15226 15227 15228 15229 15230 15231 15232 15233 15234 15235 15236 15237 15238 15239 15240 15241 15242 15243 15244 15245 15246 15247 15248 15249 15250 15251 15252 15253 15254 15255 15256 15257 15258 15259 15260 15261 15262 15263 15264 15265 15266 15267 15268 15269 15270 15271 15272 15273 15274 15275 15276 15277 15278 15279 15280 15281 15282 15283 15284 15285 15286 15287 15288 15289 15290 15291 15292 15293 15294 15295 15296 15297 15298 15299 15300 15301 15302 15303 15304 15305 15306 15307 15308 15309 15310 15311 15312 15313 15314 15315 15316 15317 15318 15319 15320 15321 15322 15323 15324 15325 15326 15327 15328 15329 15330 15331 15332 15333 15334 15335 15336 15337 15338 15339 15340 15341 15342 15343 15344 15345 15346 15347 15348 15349 15350 15351 15352 15353 15354 15355 15356 15357 15358 15359 15360 15361 15362 15363 15364 15365 15366 15367 15368 15369 15370 15371 15372 15373 15374 15375 15376 15377 15378 15379 15380 15381 15382 15383 15384 15385 15386 15387 15388 15389 15390 15391 15392 15393 15394 15395 15396 15397 15398 15399 15400 15401 15402 15403 15404 15405 15406 15407 15408 15409 15410 15411 15412 15413 15414 15415 15416 15417 15418 15419 15420 15421 15422 15423 15424 15425 15426 15427 15428 15429 15430 15431 15432 15433 15434 15435 15436 15437 15438 15439 15440 15441 15442 15443 15444 15445 15446 15447 15448 15449 15450 15451 15452 15453 15454 15455 15456 15457 15458 15459 15460 15461 15462 15463 15464 15465 15466 15467 15468 15469 15470 15471 15472 15473 15474 15475 15476 15477 15478 15479 15480 15481 15482 15483 15484 15485 15486 15487 15488 15489 15490 15491 15492 15493 15494 15495 15496 15497 15498 15499 15500 15501 15502 15503 15504 15505 15506 15507 15508 15509 15510 15511 15512 15513 15514 15515 15516 15517 15518 15519 15520 15521 15522 15523 15524 15525 15526 15527 15528 15529 15530 15531 15532 15533 15534 15535 15536 15537 15538 15539 15540 15541 15542 15543 15544 15545 15546 15547 15548 15549 15550 15551 15552 15553 15554 15555 15556 15557 15558 15559 15560 15561 15562 15563 15564 15565 15566 15567 15568 15569 15570 15571 15572 15573 15574 15575 15576 15577 15578 15579 15580 15581 15582 15583 15584 15585 15586 15587 15588 15589 15590 15591 15592 15593 15594 15595 15596 15597 15598 15599 15600 15601 15602 15603 15604 15605 15606 15607 15608 15609 15610 15611 15612 15613 15614 15615 15616 15617 15618 15619 15620 15621 15622 15623 15624 15625 15626 15627 15628 15629 15630 15631 15632 15633 15634 15635 15636 15637 15638 15639 15640 15641 15642 15643 15644 15645 15646 15647 15648 15649 15650 15651 15652 15653 15654 15655 15656 15657 15658 15659 15660 15661 15662 15663 15664 15665 15666 15667 15668 15669 15670 15671 15672 15673 15674 15675 15676 15677 15678 15679 15680 15681 15682 15683 15684 15685 15686 15687 15688 15689 15690 15691 15692 15693 15694 15695 15696 15697 15698 15699 15700 15701 15702 15703 15704 15705 15706 15707 15708 15709 15710 15711 15712 15713 15714 15715 15716 15717 15718 15719 15720 15721 15722 15723 15724 15725 15726 15727 15728 15729 15730 15731 15732 15733 15734 15735 15736 15737 15738 15739 15740 15741 15742 15743 15744 15745 15746 15747 15748 15749 15750 15751 15752 15753 15754 15755 15756 15757 15758 15759 15760 15761 15762 15763 15764 15765 15766 15767 15768 15769 15770 15771 15772 15773 15774 15775 15776 15777 15778 15779 15780 15781 15782 15783 15784 15785 15786 15787 15788 15789 15790 15791 15792 15793 15794 15795 15796 15797 15798 15799 15800 15801 15802 15803 15804 15805 15806 15807 15808 15809 15810 15811 15812 15813 15814 15815 15816 15817 15818 15819 15820 15821 15822 15823 15824 15825 15826 15827 15828 15829 15830 15831 15832 15833 15834 15835 15836 15837 15838 15839 15840 15841 15842 15843 15844 15845 15846 15847 15848 15849 15850 15851 15852 15853 15854 15855 15856 15857 15858 15859 15860 15861 15862 15863 15864 15865 15866 15867 15868 15869 15870 15871 15872 15873 15874 15875 15876 15877 15878 15879 15880 15881 15882 15883 15884 15885 15886 15887 15888 15889 15890 15891 15892 15893 15894 15895 15896 15897 15898 15899 15900 15901 15902 15903 15904 15905 15906 15907 15908 15909 15910 15911 15912 15913 15914 15915 15916 15917 15918 15919 15920 15921 15922 15923 15924 15925 15926 15927 15928 15929 15930 15931 15932 15933 15934 15935 15936 15937 15938 15939 15940 15941 15942 15943 15944 15945 15946 15947 15948 15949 15950 15951 15952 15953 15954 15955 15956 15957 15958 15959 15960 15961 15962 15963 15964 15965 15966 15967 15968 15969 15970 15971 15972 15973 15974 15975 15976 15977 15978 15979 15980 15981 15982 15983 15984 15985 15986 15987 15988 15989 15990 15991 15992 15993 15994 15995 15996 15997 15998 15999 16000 16001 16002 16003 16004 16005 16006 16007 16008 16009 16010 16011 16012 16013 16014 16015 16016 16017 16018 16019 16020 16021 16022 16023 16024 16025 16026 16027 16028 16029 16030 16031 16032 16033 16034 16035 16036 16037 16038 16039 16040 16041 16042 16043 16044 16045 16046 16047 16048 16049 16050 16051 16052 16053 16054 16055 16056 16057 16058 16059 16060 16061 16062 16063 16064 16065 16066 16067 16068 16069 16070 16071 16072 16073 16074 16075 16076 16077 16078 16079 16080 16081 16082 16083 16084 16085 16086 16087 16088 16089 16090 16091 16092 16093 16094 16095 16096 16097 16098 16099 16100 16101 16102 16103 16104 16105 16106 16107 16108 16109 16110 16111 16112 16113 16114 16115 16116 16117 16118 16119 16120 16121 16122 16123 16124 16125 16126 16127 16128 16129 16130 16131 16132 16133 16134 16135 16136 16137 16138 16139 16140 16141 16142 16143 16144 16145 16146 16147 16148 16149 16150 16151 16152 16153 16154 16155 16156 16157 16158 16159 16160 16161 16162 16163 16164 16165 16166 16167 16168 16169 16170 16171 16172 16173 16174 16175 16176 16177 16178 16179 16180 16181 16182 16183 16184 16185 16186 16187 16188 16189 16190 16191 16192 16193 16194 16195 16196 16197 16198 16199 16200 16201 16202 16203 16204 16205 16206 16207 16208 16209 16210 16211 16212 16213 16214 16215 16216 16217 16218 16219 16220 16221 16222 16223 16224 16225 16226 16227 16228 16229 16230 16231 16232 16233 16234 16235 16236 16237 16238 16239 16240 16241 16242 16243 16244 16245 16246 16247 16248 16249 16250 16251 16252 16253 16254 16255 16256 16257 16258 16259 16260 16261 16262 16263 16264 16265 16266 16267 16268 16269 16270 16271 16272 16273 16274 16275 16276 16277 16278 16279 16280 16281 16282 16283 16284 16285 16286 16287 16288 16289 16290 16291 16292 16293 16294 16295 16296 16297 16298 16299 16300 16301 16302 16303 16304 16305 16306 16307 16308 16309 16310 16311 16312 16313 16314 16315 16316 16317 16318 16319 16320 16321 16322 16323 16324 16325 16326 16327 16328 16329 16330 16331 16332 16333 16334 16335 16336 16337 16338 16339 16340 16341 16342 16343 16344 16345 16346 16347 16348 16349 16350 16351 16352 16353 16354 16355 16356 16357 16358 16359 16360 16361 16362 16363 16364 16365 16366 16367 16368 16369 16370 16371 16372 16373 16374 16375 16376 16377 16378 16379 16380 16381 16382 16383 16384 16385 16386 16387 16388 16389 16390 16391 16392 16393 16394 16395 16396 16397 16398 16399 16400 16401 16402 16403 16404 16405 16406 16407 16408 16409 16410 16411 16412 16413 16414 16415 16416 16417 16418 16419 16420 16421 16422 16423 16424 16425 16426 16427 16428 16429 16430 16431 16432 16433 16434 16435 16436 16437 16438 16439 16440 16441 16442 16443 16444 16445 16446 16447 16448 16449 16450 16451 16452 16453 16454 16455 16456 16457 16458 16459 16460 16461 16462 16463 16464 16465 16466 16467 16468 16469 16470 16471 16472 16473 16474 16475 16476 16477 16478 16479 16480 16481 16482 16483 16484 16485 16486 16487 16488 16489 16490 16491 16492 16493 16494 16495 16496 16497 16498 16499 16500 16501 16502 16503 16504 16505 16506 16507 16508 16509 16510 16511 16512 16513 16514 16515 16516 16517 16518 16519 16520 16521 16522 16523 16524 16525 16526 16527 16528 16529 16530 16531 16532 16533 16534 16535 16536 16537 16538 16539 16540 16541 16542 16543 16544 16545 16546 16547 16548 16549 16550 16551 16552 16553 16554 16555 16556 16557 16558 16559 16560 16561 16562 16563 16564 16565 16566 16567 16568 16569 16570 16571 16572 16573 16574 16575 16576 16577 16578 16579 16580 16581 16582 16583 16584 16585 16586 16587 16588 16589 16590 16591 16592 16593 16594 16595 16596 16597 16598 16599 16600 16601 16602 16603 16604 16605 16606 16607 16608 16609 16610 16611 16612 16613 16614 16615 16616 16617 16618 16619 16620 16621 16622 16623 16624 16625 16626 16627 16628 16629 16630 16631 16632 16633 16634 16635 16636 16637 16638 16639 16640 16641 16642 16643 16644 16645 16646 16647 16648 16649 16650 16651 16652 16653 16654 16655 16656 16657 16658 16659 16660 16661 16662 16663 16664 16665 16666 16667 16668 16669 16670 16671 16672 16673 16674 16675 16676 16677 16678 16679 16680 16681 16682 16683 16684 16685 16686 16687 16688 16689 16690 16691 16692 16693 16694 16695 16696 16697 16698 16699 16700 16701 16702 16703 16704 16705 16706 16707 16708 16709 16710 16711 16712 16713 16714 16715 16716 16717 16718 16719 16720 16721 16722 16723 16724 16725 16726 16727 16728 16729 16730 16731 16732 16733 16734 16735 16736 16737 16738 16739 16740 16741 16742 16743 16744 16745 16746 16747 16748 16749 16750 16751 16752 16753 16754 16755 16756 16757 16758 16759 16760 16761 16762 16763 16764 16765 16766 16767 16768 16769 16770 16771 16772 16773 16774 16775 16776 16777 16778 16779 16780 16781 16782 16783 16784 16785 16786 16787 16788 16789 16790 16791 16792 16793 16794 16795 16796 16797 16798 16799 16800 16801 16802 16803 16804 16805 16806 16807 16808 16809 16810 16811 16812 16813 16814 16815 16816 16817 16818 16819 16820 16821 16822 16823 16824 16825 16826 16827 16828 16829 16830 16831 16832 16833 16834 16835 16836 16837 16838 16839 16840 16841 16842 16843 16844 16845 16846 16847 16848 16849 16850 16851 16852 16853 16854 16855 16856 16857 16858 16859 16860 16861 16862 16863 16864 16865 16866 16867 16868 16869 16870 16871 16872 16873 16874 16875 16876 16877 16878 16879 16880 16881 16882 16883 16884 16885 16886 16887 16888 16889 16890 16891 16892 16893 16894 16895 16896 16897 16898 16899 16900 16901 16902 16903 16904 16905 16906 16907 16908 16909 16910 16911 16912 16913 16914 16915 16916 16917 16918 16919 16920 16921 16922 16923 16924 16925 16926 16927 16928 16929 16930 16931 16932 16933 16934 16935 16936 16937 16938 16939 16940 16941 16942 16943 16944 16945 16946 16947 16948 16949 16950 16951 16952 16953 16954 16955 16956 16957 16958 16959 16960 16961 16962 16963 16964 16965 16966 16967 16968 16969 16970 16971 16972 16973 16974 16975 16976 16977 16978 16979 16980 16981 16982 16983 16984 16985 16986 16987 16988 16989 16990 16991 16992 16993 16994 16995 16996 16997 16998 16999 17000 17001 17002 17003 17004 17005 17006 17007 17008 17009 17010 17011 17012 17013 17014 17015 17016 17017 17018 17019 17020 17021 17022 17023 17024 17025 17026 17027 17028 17029 17030 17031 17032 17033 17034 17035 17036 17037 17038 17039 17040 17041 17042 17043 17044 17045 17046 17047 17048 17049 17050 17051 17052 17053 17054 17055 17056 17057 17058 17059 17060 17061 17062 17063 17064 17065 17066 17067 17068 17069 17070 17071 17072 17073 17074 17075 17076 17077 17078 17079 17080 17081 17082 17083 17084 17085 17086 17087 17088 17089 17090 17091 17092 17093 17094 17095 17096 17097 17098 17099 17100 17101 17102 17103 17104 17105 17106 17107 17108 17109 17110 17111 17112 17113 17114 17115 17116 17117 17118 17119 17120 17121 17122 17123 17124 17125 17126 17127 17128 17129 17130 17131 17132 17133 17134 17135 17136 17137 17138 17139 17140 17141 17142 17143 17144 17145 17146 17147 17148 17149 17150 17151 17152 17153 17154 17155 17156 17157 17158 17159 17160 17161 17162 17163 17164 17165 17166 17167 17168 17169 17170 17171 17172 17173 17174 17175 17176 17177 17178 17179 17180 17181 17182 17183 17184 17185 17186 17187 17188 17189 17190 17191 17192 17193 17194 17195 17196 17197 17198 17199 17200 17201 17202 17203 17204 17205 17206 17207 17208 17209 17210 17211 17212 17213 17214 17215 17216 17217 17218 17219 17220 17221 17222 17223 17224 17225 17226 17227 17228 17229 17230 17231 17232 17233 17234 17235 17236 17237 17238 17239 17240 17241 17242 17243 17244 17245 17246 17247 17248 17249 17250 17251 17252 17253 17254 17255 17256 17257 17258 17259 17260 17261 17262 17263 17264 17265 17266 17267 17268 17269 17270 17271 17272 17273 17274 17275 17276 17277 17278 17279 17280 17281 17282 17283 17284 17285 17286 17287 17288 17289 17290 17291 17292 17293 17294 17295 17296 17297 17298 17299 17300 17301 17302 17303 17304 17305 17306 17307 17308 17309 17310 17311 17312 17313 17314 17315 17316 17317 17318 17319 17320 17321 17322 17323 17324 17325 17326 17327 17328 17329 17330 17331 17332 17333 17334 17335 17336 17337 17338 17339 17340 17341 17342 17343 17344 17345 17346 17347 17348 17349 17350 17351 17352 17353 17354 17355 17356 17357 17358 17359 17360 17361 17362 17363 17364 17365 17366 17367 17368 17369 17370 17371 17372 17373 17374 17375 17376 17377 17378 17379 17380 17381 17382 17383 17384 17385 17386 17387 17388 17389 17390 17391 17392 17393 17394 17395 17396 17397 17398 17399 17400 17401 17402 17403 17404 17405 17406 17407 17408 17409 17410 17411 17412 17413 17414 17415 17416 17417 17418 17419 17420 17421 17422 17423 17424 17425 17426 17427 17428 17429 17430 17431 17432 17433 17434 17435 17436 17437 17438 17439 17440 17441 17442 17443 17444 17445 17446 17447 17448 17449 17450 17451 17452 17453 17454 17455 17456 17457 17458 17459 17460 17461 17462 17463 17464 17465 17466 17467 17468 17469 17470 17471 17472 17473 17474 17475 17476 17477 17478 17479 17480 17481 17482 17483 17484 17485 17486 17487 17488 17489 17490 17491 17492 17493 17494 17495 17496 17497 17498 17499 17500 17501 17502 17503 17504 17505 17506 17507 17508 17509 17510 17511 17512 17513 17514 17515 17516 17517 17518 17519 17520 17521 17522 17523 17524 17525 17526 17527 17528 17529 17530 17531 17532 17533 17534 17535 17536 17537 17538 17539 17540 17541 17542 17543 17544 17545 17546 17547 17548 17549 17550 17551 17552 17553 17554 17555 17556 17557 17558 17559 17560 17561 17562 17563 17564 17565 17566 17567 17568 17569 17570 17571 17572 17573 17574 17575 17576 17577 17578 17579 17580 17581 17582 17583 17584 17585 17586 17587 17588 17589 17590 17591 17592 17593 17594 17595 17596 17597 17598 17599 17600 17601 17602 17603 17604 17605 17606 17607 17608 17609 17610 17611 17612 17613 17614 17615 17616 17617 17618 17619 17620 17621 17622 17623 17624 17625 17626 17627 17628 17629 17630 17631 17632 17633 17634 17635 17636 17637 17638 17639 17640 17641 17642 17643 17644 17645 17646 17647 17648 17649 17650 17651 17652 17653 17654 17655 17656 17657 17658 17659 17660 17661 17662 17663 17664 17665 17666 17667 17668 17669 17670 17671 17672 17673 17674 17675 17676 17677 17678 17679 17680 17681 17682 17683 17684 17685 17686 17687 17688 17689 17690 17691 17692 17693 17694 17695 17696 17697 17698 17699 17700 17701 17702 17703 17704 17705 17706 17707 17708 17709 17710 17711 17712 17713 17714 17715 17716 17717 17718 17719 17720 17721 17722 17723 17724 17725 17726 17727 17728 17729 17730 17731 17732 17733 17734 17735 17736 17737 17738 17739 17740 17741 17742 17743 17744 17745 17746 17747 17748 17749 17750 17751 17752 17753 17754 17755 17756 17757 17758 17759 17760 17761 17762 17763 17764 17765 17766 17767 17768 17769 17770 17771 17772 17773 17774 17775 17776 17777 17778 17779 17780 17781 17782 17783 17784 17785 17786 17787 17788 17789 17790 17791 17792 17793 17794 17795 17796 17797 17798 17799 17800 17801 17802 17803 17804 17805 17806 17807 17808 17809 17810 17811 17812 17813 17814 17815 17816 17817 17818 17819 17820 17821 17822 17823 17824 17825 17826 17827 17828 17829 17830 17831 17832 17833 17834 17835 17836 17837 17838 17839 17840 17841 17842 17843 17844 17845 17846 17847 17848 17849 17850 17851 17852 17853 17854 17855 17856 17857 17858 17859 17860 17861 17862 17863 17864 17865 17866 17867 17868 17869 17870 17871 17872 17873 17874 17875 17876 17877 17878 17879 17880 17881 17882 17883 17884 17885 17886 17887 17888 17889 17890 17891 17892 17893 17894 17895 17896 17897 17898 17899 17900 17901 17902 17903 17904 17905 17906 17907 17908 17909 17910 17911 17912 17913 17914 17915 17916 17917 17918 17919 17920 17921 17922 17923 17924 17925 17926 17927 17928 17929 17930 17931 17932 17933 17934 17935 17936 17937 17938 17939 17940 17941 17942 17943 17944 17945 17946 17947 17948 17949 17950 17951 17952 17953 17954 17955 17956 17957 17958 17959 17960 17961 17962 17963 17964 17965 17966 17967 17968 17969 17970 17971 17972 17973 17974 17975 17976 17977 17978 17979 17980 17981 17982 17983 17984 17985 17986 17987 17988 17989 17990 17991 17992 17993 17994 17995 17996 17997 17998 17999 18000 18001 18002 18003 18004 18005 18006 18007 18008 18009 18010 18011 18012 18013 18014 18015 18016 18017 18018 18019 18020 18021 18022 18023 18024 18025 18026 18027 18028 18029 18030 18031 18032 18033 18034 18035 18036 18037 18038 18039 18040 18041 18042 18043 18044 18045 18046 18047 18048 18049 18050 18051 18052 18053 18054 18055 18056 18057 18058 18059 18060 18061 18062 18063 18064 18065 18066 18067 18068 18069 18070 18071 18072 18073 18074 18075 18076 18077 18078 18079 18080 18081 18082 18083 18084 18085 18086 18087 18088 18089 18090 18091 18092 18093 18094 18095 18096 18097 18098 18099 18100 18101 18102 18103 18104 18105 18106 18107 18108 18109 18110 18111 18112 18113 18114 18115 18116 18117 18118 18119 18120 18121 18122 18123 18124 18125 18126 18127 18128 18129 18130 18131 18132 18133 18134 18135 18136 18137 18138 18139 18140 18141 18142 18143 18144 18145 18146 18147 18148 18149 18150 18151 18152 18153 18154 18155 18156 18157 18158 18159 18160 18161 18162 18163 18164 18165 18166 18167 18168 18169 18170 18171 18172 18173 18174 18175 18176 18177 18178 18179 18180 18181 18182 18183 18184 18185 18186 18187 18188 18189 18190 18191 18192 18193 18194 18195 18196 18197 18198 18199 18200 18201 18202 18203 18204 18205 18206 18207 18208 18209 18210 18211 18212 18213 18214 18215 18216 18217 18218 18219 18220 18221 18222 18223 18224 18225 18226 18227 18228 18229 18230 18231 18232 18233 18234 18235 18236 18237 18238 18239 18240 18241 18242 18243 18244 18245 18246 18247 18248 18249 18250 18251 18252 18253 18254 18255 18256 18257 18258 18259 18260 18261 18262 18263 18264 18265 18266 18267 18268 18269 18270 18271 18272 18273 18274 18275 18276 18277 18278 18279 18280 18281 18282 18283 18284 18285 18286 18287 18288 18289 18290 18291 18292 18293 18294 18295 18296 18297 18298 18299 18300 18301 18302 18303 18304 18305 18306 18307 18308 18309 18310 18311 18312 18313 18314 18315 18316 18317 18318 18319 18320 18321 18322 18323 18324 18325 18326 18327 18328 18329 18330 18331 18332 18333 18334 18335 18336 18337 18338 18339 18340 18341 18342 18343 18344 18345 18346 18347 18348 18349 18350 18351 18352 18353 18354 18355 18356 18357 18358 18359 18360 18361 18362 18363 18364 18365 18366 18367 18368 18369 18370 18371 18372 18373 18374 18375 18376 18377 18378 18379 18380 18381 18382 18383 18384 18385 18386 18387 18388 18389 18390 18391 18392 18393 18394 18395 18396 18397 18398 18399 18400 18401 18402 18403 18404 18405 18406 18407 18408 18409 18410 18411 18412 18413 18414 18415 18416 18417 18418 18419 18420 18421 18422 18423 18424 18425 18426 18427 18428 18429 18430 18431 18432 18433 18434 18435 18436 18437 18438 18439 18440 18441 18442 18443 18444 18445 18446 18447 18448 18449 18450 18451 18452 18453 18454 18455 18456 18457 18458 18459 18460 18461 18462 18463 18464 18465 18466 18467 18468 18469 18470 18471 18472 18473 18474 18475 18476 18477 18478 18479 18480 18481 18482 18483 18484 18485 18486 18487 18488 18489 18490 18491 18492 18493 18494 18495 18496 18497 18498 18499 18500 18501 18502 18503 18504 18505 18506 18507 18508 18509 18510 18511 18512 18513 18514 18515 18516 18517 18518 18519 18520 18521 18522 18523 18524 18525 18526 18527 18528 18529 18530 18531 18532 18533 18534 18535 18536 18537 18538 18539 18540 18541 18542 18543 18544 18545 18546 18547 18548 18549 18550 18551 18552 18553 18554 18555 18556 18557 18558 18559 18560 18561 18562 18563 18564 18565 18566 18567 18568 18569 18570 18571 18572 18573 18574 18575 18576 18577 18578 18579 18580 18581 18582 18583 18584 18585 18586 18587 18588 18589 18590 18591 18592 18593 18594 18595 18596 18597 18598 18599 18600 18601 18602 18603 18604 18605 18606 18607 18608 18609 18610 18611 18612 18613 18614 18615 18616 18617 18618 18619 18620 18621 18622 18623 18624 18625 18626 18627 18628 18629 18630 18631 18632 18633 18634 18635 18636 18637 18638 18639 18640 18641 18642 18643 18644 18645 18646 18647 18648 18649 18650 18651 18652 18653 18654 18655 18656 18657 18658 18659 18660 18661 18662 18663 18664 18665 18666 18667 18668 18669 18670 18671 18672 18673 18674 18675 18676 18677 18678 18679 18680 18681 18682 18683 18684 18685 18686 18687 18688 18689 18690 18691 18692 18693 18694 18695 18696 18697 18698 18699 18700 18701 18702 18703 18704 18705 18706 18707 18708 18709 18710 18711 18712 18713 18714 18715 18716 18717 18718 18719 18720 18721 18722 18723 18724 18725 18726 18727 18728 18729 18730 18731 18732 18733 18734 18735 18736 18737 18738 18739 18740 18741 18742 18743 18744 18745 18746 18747 18748 18749 18750 18751 18752 18753 18754 18755 18756 18757 18758 18759 18760 18761 18762 18763 18764 18765 18766 18767 18768 18769 18770 18771 18772 18773 18774 18775 18776 18777 18778 18779 18780 18781 18782 18783 18784 18785 18786 18787 18788 18789 18790 18791 18792 18793 18794 18795 18796 18797 18798 18799 18800 18801 18802 18803 18804 18805 18806 18807 18808 18809 18810 18811 18812 18813 18814 18815 18816 18817 18818 18819 18820 18821 18822 18823 18824 18825 18826 18827 18828 18829 18830 18831 18832 18833 18834 18835 18836 18837 18838 18839 18840 18841 18842 18843 18844 18845 18846 18847 18848 18849 18850 18851 18852 18853 18854 18855 18856 18857 18858 18859 18860 18861 18862 18863 18864 18865 18866 18867 18868 18869 18870 18871 18872 18873 18874 18875 18876 18877 18878 18879 18880 18881 18882 18883 18884 18885 18886 18887 18888 18889 18890 18891 18892 18893 18894 18895 18896 18897 18898 18899 18900 18901 18902 18903 18904 18905 18906 18907 18908 18909 18910 18911 18912 18913 18914 18915 18916 18917 18918 18919 18920 18921 18922 18923 18924 18925 18926 18927 18928 18929 18930 18931 18932 18933 18934 18935 18936 18937 18938 18939 18940 18941 18942 18943 18944 18945 18946 18947 18948 18949 18950 18951 18952 18953 18954 18955 18956 18957 18958 18959 18960 18961 18962 18963 18964 18965 18966 18967 18968 18969 18970 18971 18972 18973 18974 18975 18976 18977 18978 18979 18980 18981 18982 18983 18984 18985 18986 18987 18988 18989 18990 18991 18992 18993 18994 18995 18996 18997 18998 18999 19000 19001 19002 19003 19004 19005 19006 19007 19008 19009 19010 19011 19012 19013 19014 19015 19016 19017 19018 19019 19020 19021 19022 19023 19024 19025 19026 19027 19028 19029 19030 19031 19032 19033 19034 19035 19036 19037 19038 19039 19040 19041 19042 19043 19044 19045 19046 19047 19048 19049 19050 19051 19052 19053 19054 19055 19056 19057 19058 19059 19060 19061 19062 19063 19064 19065 19066 19067 19068 19069 19070 19071 19072 19073 19074 19075 19076 19077 19078 19079
//===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that ARM uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//

#include "ARMISelLowering.h"
#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
#include "ARMCallingConv.h"
#include "ARMConstantPoolValue.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMPerfectShuffle.h"
#include "ARMRegisterInfo.h"
#include "ARMSelectionDAGInfo.h"
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "Utils/ARMBaseInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsARM.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <iterator>
#include <limits>
#include <string>
#include <tuple>
#include <utility>
#include <vector>

using namespace llvm;
using namespace llvm::PatternMatch;

#define DEBUG_TYPE "arm-isel"

STATISTIC(NumTailCalls, "Number of tail calls");
STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
STATISTIC(NumConstpoolPromoted,
  "Number of constants with their storage promoted into constant pools");

static cl::opt<bool>
ARMInterworking("arm-interworking", cl::Hidden,
  cl::desc("Enable / disable ARM interworking (for debugging only)"),
  cl::init(true));

static cl::opt<bool> EnableConstpoolPromotion(
    "arm-promote-constant", cl::Hidden,
    cl::desc("Enable / disable promotion of unnamed_addr constants into "
             "constant pools"),
    cl::init(false)); // FIXME: set to true by default once PR32780 is fixed
static cl::opt<unsigned> ConstpoolPromotionMaxSize(
    "arm-promote-constant-max-size", cl::Hidden,
    cl::desc("Maximum size of constant to promote into a constant pool"),
    cl::init(64));
static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
    "arm-promote-constant-max-total", cl::Hidden,
    cl::desc("Maximum size of ALL constants to promote into a constant pool"),
    cl::init(128));

cl::opt<unsigned>
MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden,
  cl::desc("Maximum interleave factor for MVE VLDn to generate."),
  cl::init(2));

// The APCS parameter registers.
static const MCPhysReg GPRArgRegs[] = {
  ARM::R0, ARM::R1, ARM::R2, ARM::R3
};

void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
                                       MVT PromotedBitwiseVT) {
  if (VT != PromotedLdStVT) {
    setOperationAction(ISD::LOAD, VT, Promote);
    AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);

    setOperationAction(ISD::STORE, VT, Promote);
    AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
  }

  MVT ElemTy = VT.getVectorElementType();
  if (ElemTy != MVT::f64)
    setOperationAction(ISD::SETCC, VT, Custom);
  setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
  setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
  if (ElemTy == MVT::i32) {
    setOperationAction(ISD::SINT_TO_FP, VT, Custom);
    setOperationAction(ISD::UINT_TO_FP, VT, Custom);
    setOperationAction(ISD::FP_TO_SINT, VT, Custom);
    setOperationAction(ISD::FP_TO_UINT, VT, Custom);
  } else {
    setOperationAction(ISD::SINT_TO_FP, VT, Expand);
    setOperationAction(ISD::UINT_TO_FP, VT, Expand);
    setOperationAction(ISD::FP_TO_SINT, VT, Expand);
    setOperationAction(ISD::FP_TO_UINT, VT, Expand);
  }
  setOperationAction(ISD::BUILD_VECTOR,      VT, Custom);
  setOperationAction(ISD::VECTOR_SHUFFLE,    VT, Custom);
  setOperationAction(ISD::CONCAT_VECTORS,    VT, Legal);
  setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
  setOperationAction(ISD::SELECT,            VT, Expand);
  setOperationAction(ISD::SELECT_CC,         VT, Expand);
  setOperationAction(ISD::VSELECT,           VT, Expand);
  setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
  if (VT.isInteger()) {
    setOperationAction(ISD::SHL, VT, Custom);
    setOperationAction(ISD::SRA, VT, Custom);
    setOperationAction(ISD::SRL, VT, Custom);
  }

  // Promote all bit-wise operations.
  if (VT.isInteger() && VT != PromotedBitwiseVT) {
    setOperationAction(ISD::AND, VT, Promote);
    AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
    setOperationAction(ISD::OR,  VT, Promote);
    AddPromotedToType (ISD::OR,  VT, PromotedBitwiseVT);
    setOperationAction(ISD::XOR, VT, Promote);
    AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
  }

  // Neon does not support vector divide/remainder operations.
  setOperationAction(ISD::SDIV, VT, Expand);
  setOperationAction(ISD::UDIV, VT, Expand);
  setOperationAction(ISD::FDIV, VT, Expand);
  setOperationAction(ISD::SREM, VT, Expand);
  setOperationAction(ISD::UREM, VT, Expand);
  setOperationAction(ISD::FREM, VT, Expand);
  setOperationAction(ISD::SDIVREM, VT, Expand);
  setOperationAction(ISD::UDIVREM, VT, Expand);

  if (!VT.isFloatingPoint() &&
      VT != MVT::v2i64 && VT != MVT::v1i64)
    for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
      setOperationAction(Opcode, VT, Legal);
  if (!VT.isFloatingPoint())
    for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT})
      setOperationAction(Opcode, VT, Legal);
}

void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
  addRegisterClass(VT, &ARM::DPRRegClass);
  addTypeForNEON(VT, MVT::f64, MVT::v2i32);
}

void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
  addRegisterClass(VT, &ARM::DPairRegClass);
  addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
}

void ARMTargetLowering::setAllExpand(MVT VT) {
  for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
    setOperationAction(Opc, VT, Expand);

  // We support these really simple operations even on types where all
  // the actual arithmetic has to be broken down into simpler
  // operations or turned into library calls.
  setOperationAction(ISD::BITCAST, VT, Legal);
  setOperationAction(ISD::LOAD, VT, Legal);
  setOperationAction(ISD::STORE, VT, Legal);
  setOperationAction(ISD::UNDEF, VT, Legal);
}

void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To,
                                       LegalizeAction Action) {
  setLoadExtAction(ISD::EXTLOAD,  From, To, Action);
  setLoadExtAction(ISD::ZEXTLOAD, From, To, Action);
  setLoadExtAction(ISD::SEXTLOAD, From, To, Action);
}

void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) {
  const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 };

  for (auto VT : IntTypes) {
    addRegisterClass(VT, &ARM::MQPRRegClass);
    setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
    setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
    setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
    setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
    setOperationAction(ISD::SHL, VT, Custom);
    setOperationAction(ISD::SRA, VT, Custom);
    setOperationAction(ISD::SRL, VT, Custom);
    setOperationAction(ISD::SMIN, VT, Legal);
    setOperationAction(ISD::SMAX, VT, Legal);
    setOperationAction(ISD::UMIN, VT, Legal);
    setOperationAction(ISD::UMAX, VT, Legal);
    setOperationAction(ISD::ABS, VT, Legal);
    setOperationAction(ISD::SETCC, VT, Custom);
    setOperationAction(ISD::MLOAD, VT, Custom);
    setOperationAction(ISD::MSTORE, VT, Legal);
    setOperationAction(ISD::CTLZ, VT, Legal);
    setOperationAction(ISD::CTTZ, VT, Custom);
    setOperationAction(ISD::BITREVERSE, VT, Legal);
    setOperationAction(ISD::BSWAP, VT, Legal);
    setOperationAction(ISD::SADDSAT, VT, Legal);
    setOperationAction(ISD::UADDSAT, VT, Legal);
    setOperationAction(ISD::SSUBSAT, VT, Legal);
    setOperationAction(ISD::USUBSAT, VT, Legal);

    // No native support for these.
    setOperationAction(ISD::UDIV, VT, Expand);
    setOperationAction(ISD::SDIV, VT, Expand);
    setOperationAction(ISD::UREM, VT, Expand);
    setOperationAction(ISD::SREM, VT, Expand);
    setOperationAction(ISD::UDIVREM, VT, Expand);
    setOperationAction(ISD::SDIVREM, VT, Expand);
    setOperationAction(ISD::CTPOP, VT, Expand);

    // Vector reductions
    setOperationAction(ISD::VECREDUCE_ADD, VT, Legal);
    setOperationAction(ISD::VECREDUCE_SMAX, VT, Legal);
    setOperationAction(ISD::VECREDUCE_UMAX, VT, Legal);
    setOperationAction(ISD::VECREDUCE_SMIN, VT, Legal);
    setOperationAction(ISD::VECREDUCE_UMIN, VT, Legal);
    setOperationAction(ISD::VECREDUCE_MUL, VT, Custom);
    setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
    setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
    setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);

    if (!HasMVEFP) {
      setOperationAction(ISD::SINT_TO_FP, VT, Expand);
      setOperationAction(ISD::UINT_TO_FP, VT, Expand);
      setOperationAction(ISD::FP_TO_SINT, VT, Expand);
      setOperationAction(ISD::FP_TO_UINT, VT, Expand);
    }

    // Pre and Post inc are supported on loads and stores
    for (unsigned im = (unsigned)ISD::PRE_INC;
         im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
      setIndexedLoadAction(im, VT, Legal);
      setIndexedStoreAction(im, VT, Legal);
      setIndexedMaskedLoadAction(im, VT, Legal);
      setIndexedMaskedStoreAction(im, VT, Legal);
    }
  }

  const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 };
  for (auto VT : FloatTypes) {
    addRegisterClass(VT, &ARM::MQPRRegClass);
    if (!HasMVEFP)
      setAllExpand(VT);

    // These are legal or custom whether we have MVE.fp or not
    setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
    setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
    setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getVectorElementType(), Custom);
    setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
    setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
    setOperationAction(ISD::BUILD_VECTOR, VT.getVectorElementType(), Custom);
    setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal);
    setOperationAction(ISD::SETCC, VT, Custom);
    setOperationAction(ISD::MLOAD, VT, Custom);
    setOperationAction(ISD::MSTORE, VT, Legal);

    // Pre and Post inc are supported on loads and stores
    for (unsigned im = (unsigned)ISD::PRE_INC;
         im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
      setIndexedLoadAction(im, VT, Legal);
      setIndexedStoreAction(im, VT, Legal);
      setIndexedMaskedLoadAction(im, VT, Legal);
      setIndexedMaskedStoreAction(im, VT, Legal);
    }

    if (HasMVEFP) {
      setOperationAction(ISD::FMINNUM, VT, Legal);
      setOperationAction(ISD::FMAXNUM, VT, Legal);
      setOperationAction(ISD::FROUND, VT, Legal);
      setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
      setOperationAction(ISD::VECREDUCE_FMUL, VT, Custom);
      setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
      setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);

      // No native support for these.
      setOperationAction(ISD::FDIV, VT, Expand);
      setOperationAction(ISD::FREM, VT, Expand);
      setOperationAction(ISD::FSQRT, VT, Expand);
      setOperationAction(ISD::FSIN, VT, Expand);
      setOperationAction(ISD::FCOS, VT, Expand);
      setOperationAction(ISD::FPOW, VT, Expand);
      setOperationAction(ISD::FLOG, VT, Expand);
      setOperationAction(ISD::FLOG2, VT, Expand);
      setOperationAction(ISD::FLOG10, VT, Expand);
      setOperationAction(ISD::FEXP, VT, Expand);
      setOperationAction(ISD::FEXP2, VT, Expand);
      setOperationAction(ISD::FNEARBYINT, VT, Expand);
    }
  }

  // Custom Expand smaller than legal vector reductions to prevent false zero
  // items being added.
  setOperationAction(ISD::VECREDUCE_FADD, MVT::v4f16, Custom);
  setOperationAction(ISD::VECREDUCE_FMUL, MVT::v4f16, Custom);
  setOperationAction(ISD::VECREDUCE_FMIN, MVT::v4f16, Custom);
  setOperationAction(ISD::VECREDUCE_FMAX, MVT::v4f16, Custom);
  setOperationAction(ISD::VECREDUCE_FADD, MVT::v2f16, Custom);
  setOperationAction(ISD::VECREDUCE_FMUL, MVT::v2f16, Custom);
  setOperationAction(ISD::VECREDUCE_FMIN, MVT::v2f16, Custom);
  setOperationAction(ISD::VECREDUCE_FMAX, MVT::v2f16, Custom);

  // We 'support' these types up to bitcast/load/store level, regardless of
  // MVE integer-only / float support. Only doing FP data processing on the FP
  // vector types is inhibited at integer-only level.
  const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 };
  for (auto VT : LongTypes) {
    addRegisterClass(VT, &ARM::MQPRRegClass);
    setAllExpand(VT);
    setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
    setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
    setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
  }
  // We can do bitwise operations on v2i64 vectors
  setOperationAction(ISD::AND, MVT::v2i64, Legal);
  setOperationAction(ISD::OR, MVT::v2i64, Legal);
  setOperationAction(ISD::XOR, MVT::v2i64, Legal);

  // It is legal to extload from v4i8 to v4i16 or v4i32.
  addAllExtLoads(MVT::v8i16, MVT::v8i8, Legal);
  addAllExtLoads(MVT::v4i32, MVT::v4i16, Legal);
  addAllExtLoads(MVT::v4i32, MVT::v4i8, Legal);

  // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16.
  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8,  Legal);
  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Legal);

  // Some truncating stores are legal too.
  setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
  setTruncStoreAction(MVT::v4i32, MVT::v4i8,  Legal);
  setTruncStoreAction(MVT::v8i16, MVT::v8i8,  Legal);

  // Pre and Post inc on these are legal, given the correct extends
  for (unsigned im = (unsigned)ISD::PRE_INC;
       im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
    for (auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) {
      setIndexedLoadAction(im, VT, Legal);
      setIndexedStoreAction(im, VT, Legal);
      setIndexedMaskedLoadAction(im, VT, Legal);
      setIndexedMaskedStoreAction(im, VT, Legal);
    }
  }

  // Predicate types
  const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1};
  for (auto VT : pTypes) {
    addRegisterClass(VT, &ARM::VCCRRegClass);
    setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
    setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
    setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
    setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
    setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
    setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
    setOperationAction(ISD::SETCC, VT, Custom);
    setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
    setOperationAction(ISD::LOAD, VT, Custom);
    setOperationAction(ISD::STORE, VT, Custom);
  }
}

ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
                                     const ARMSubtarget &STI)
    : TargetLowering(TM), Subtarget(&STI) {
  RegInfo = Subtarget->getRegisterInfo();
  Itins = Subtarget->getInstrItineraryData();

  setBooleanContents(ZeroOrOneBooleanContent);
  setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);

  if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() &&
      !Subtarget->isTargetWatchOS()) {
    bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard;
    for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
      setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID),
                            IsHFTarget ? CallingConv::ARM_AAPCS_VFP
                                       : CallingConv::ARM_AAPCS);
  }

  if (Subtarget->isTargetMachO()) {
    // Uses VFP for Thumb libfuncs if available.
    if (Subtarget->isThumb() && Subtarget->hasVFP2Base() &&
        Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) {
      static const struct {
        const RTLIB::Libcall Op;
        const char * const Name;
        const ISD::CondCode Cond;
      } LibraryCalls[] = {
        // Single-precision floating-point arithmetic.
        { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID },
        { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID },
        { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID },
        { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID },

        // Double-precision floating-point arithmetic.
        { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID },
        { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID },
        { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID },
        { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID },

        // Single-precision comparisons.
        { RTLIB::OEQ_F32, "__eqsf2vfp",    ISD::SETNE },
        { RTLIB::UNE_F32, "__nesf2vfp",    ISD::SETNE },
        { RTLIB::OLT_F32, "__ltsf2vfp",    ISD::SETNE },
        { RTLIB::OLE_F32, "__lesf2vfp",    ISD::SETNE },
        { RTLIB::OGE_F32, "__gesf2vfp",    ISD::SETNE },
        { RTLIB::OGT_F32, "__gtsf2vfp",    ISD::SETNE },
        { RTLIB::UO_F32,  "__unordsf2vfp", ISD::SETNE },

        // Double-precision comparisons.
        { RTLIB::OEQ_F64, "__eqdf2vfp",    ISD::SETNE },
        { RTLIB::UNE_F64, "__nedf2vfp",    ISD::SETNE },
        { RTLIB::OLT_F64, "__ltdf2vfp",    ISD::SETNE },
        { RTLIB::OLE_F64, "__ledf2vfp",    ISD::SETNE },
        { RTLIB::OGE_F64, "__gedf2vfp",    ISD::SETNE },
        { RTLIB::OGT_F64, "__gtdf2vfp",    ISD::SETNE },
        { RTLIB::UO_F64,  "__unorddf2vfp", ISD::SETNE },

        // Floating-point to integer conversions.
        // i64 conversions are done via library routines even when generating VFP
        // instructions, so use the same ones.
        { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp",    ISD::SETCC_INVALID },
        { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID },
        { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp",    ISD::SETCC_INVALID },
        { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID },

        // Conversions between floating types.
        { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp",  ISD::SETCC_INVALID },
        { RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp", ISD::SETCC_INVALID },

        // Integer to floating-point conversions.
        // i64 conversions are done via library routines even when generating VFP
        // instructions, so use the same ones.
        // FIXME: There appears to be some naming inconsistency in ARM libgcc:
        // e.g., __floatunsidf vs. __floatunssidfvfp.
        { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp",    ISD::SETCC_INVALID },
        { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID },
        { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp",    ISD::SETCC_INVALID },
        { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID },
      };

      for (const auto &LC : LibraryCalls) {
        setLibcallName(LC.Op, LC.Name);
        if (LC.Cond != ISD::SETCC_INVALID)
          setCmpLibcallCC(LC.Op, LC.Cond);
      }
    }
  }

  // These libcalls are not available in 32-bit.
  setLibcallName(RTLIB::SHL_I128, nullptr);
  setLibcallName(RTLIB::SRL_I128, nullptr);
  setLibcallName(RTLIB::SRA_I128, nullptr);

  // RTLIB
  if (Subtarget->isAAPCS_ABI() &&
      (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() ||
       Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) {
    static const struct {
      const RTLIB::Libcall Op;
      const char * const Name;
      const CallingConv::ID CC;
      const ISD::CondCode Cond;
    } LibraryCalls[] = {
      // Double-precision floating-point arithmetic helper functions
      // RTABI chapter 4.1.2, Table 2
      { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },

      // Double-precision floating-point comparison helper functions
      // RTABI chapter 4.1.2, Table 3
      { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
      { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
      { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
      { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
      { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
      { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
      { RTLIB::UO_F64,  "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },

      // Single-precision floating-point arithmetic helper functions
      // RTABI chapter 4.1.2, Table 4
      { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },

      // Single-precision floating-point comparison helper functions
      // RTABI chapter 4.1.2, Table 5
      { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
      { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
      { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
      { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
      { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
      { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
      { RTLIB::UO_F32,  "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },

      // Floating-point to integer conversions.
      // RTABI chapter 4.1.2, Table 6
      { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },

      // Conversions between floating types.
      // RTABI chapter 4.1.2, Table 7
      { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::FPEXT_F32_F64,   "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },

      // Integer to floating-point conversions.
      // RTABI chapter 4.1.2, Table 8
      { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },

      // Long long helper functions
      // RTABI chapter 4.2, Table 9
      { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },

      // Integer division functions
      // RTABI chapter 4.3.1
      { RTLIB::SDIV_I8,  "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::SDIV_I16, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::SDIV_I32, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::SDIV_I64, "__aeabi_ldivmod",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::UDIV_I8,  "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::UDIV_I16, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::UDIV_I32, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
    };

    for (const auto &LC : LibraryCalls) {
      setLibcallName(LC.Op, LC.Name);
      setLibcallCallingConv(LC.Op, LC.CC);
      if (LC.Cond != ISD::SETCC_INVALID)
        setCmpLibcallCC(LC.Op, LC.Cond);
    }

    // EABI dependent RTLIB
    if (TM.Options.EABIVersion == EABI::EABI4 ||
        TM.Options.EABIVersion == EABI::EABI5) {
      static const struct {
        const RTLIB::Libcall Op;
        const char *const Name;
        const CallingConv::ID CC;
        const ISD::CondCode Cond;
      } MemOpsLibraryCalls[] = {
        // Memory operations
        // RTABI chapter 4.3.4
        { RTLIB::MEMCPY,  "__aeabi_memcpy",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
        { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
        { RTLIB::MEMSET,  "__aeabi_memset",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
      };

      for (const auto &LC : MemOpsLibraryCalls) {
        setLibcallName(LC.Op, LC.Name);
        setLibcallCallingConv(LC.Op, LC.CC);
        if (LC.Cond != ISD::SETCC_INVALID)
          setCmpLibcallCC(LC.Op, LC.Cond);
      }
    }
  }

  if (Subtarget->isTargetWindows()) {
    static const struct {
      const RTLIB::Libcall Op;
      const char * const Name;
      const CallingConv::ID CC;
    } LibraryCalls[] = {
      { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
      { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
      { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
      { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
      { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
      { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
      { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
      { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
    };

    for (const auto &LC : LibraryCalls) {
      setLibcallName(LC.Op, LC.Name);
      setLibcallCallingConv(LC.Op, LC.CC);
    }
  }

  // Use divmod compiler-rt calls for iOS 5.0 and later.
  if (Subtarget->isTargetMachO() &&
      !(Subtarget->isTargetIOS() &&
        Subtarget->getTargetTriple().isOSVersionLT(5, 0))) {
    setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
    setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
  }

  // The half <-> float conversion functions are always soft-float on
  // non-watchos platforms, but are needed for some targets which use a
  // hard-float calling convention by default.
  if (!Subtarget->isTargetWatchABI()) {
    if (Subtarget->isAAPCS_ABI()) {
      setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
      setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
      setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
    } else {
      setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
      setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
      setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
    }
  }

  // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have
  // a __gnu_ prefix (which is the default).
  if (Subtarget->isTargetAEABI()) {
    static const struct {
      const RTLIB::Libcall Op;
      const char * const Name;
      const CallingConv::ID CC;
    } LibraryCalls[] = {
      { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS },
      { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS },
      { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS },
    };

    for (const auto &LC : LibraryCalls) {
      setLibcallName(LC.Op, LC.Name);
      setLibcallCallingConv(LC.Op, LC.CC);
    }
  }

  if (Subtarget->isThumb1Only())
    addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
  else
    addRegisterClass(MVT::i32, &ARM::GPRRegClass);

  if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() &&
      Subtarget->hasFPRegs()) {
    addRegisterClass(MVT::f32, &ARM::SPRRegClass);
    addRegisterClass(MVT::f64, &ARM::DPRRegClass);
    if (!Subtarget->hasVFP2Base())
      setAllExpand(MVT::f32);
    if (!Subtarget->hasFP64())
      setAllExpand(MVT::f64);
  }

  if (Subtarget->hasFullFP16()) {
    addRegisterClass(MVT::f16, &ARM::HPRRegClass);
    setOperationAction(ISD::BITCAST, MVT::i16, Custom);
    setOperationAction(ISD::BITCAST, MVT::f16, Custom);

    setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
    setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
  }

  if (Subtarget->hasBF16()) {
    addRegisterClass(MVT::bf16, &ARM::HPRRegClass);
    setAllExpand(MVT::bf16);
    if (!Subtarget->hasFullFP16())
      setOperationAction(ISD::BITCAST, MVT::bf16, Custom);
  }

  for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
    for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
      setTruncStoreAction(VT, InnerVT, Expand);
      addAllExtLoads(VT, InnerVT, Expand);
    }

    setOperationAction(ISD::MULHS, VT, Expand);
    setOperationAction(ISD::SMUL_LOHI, VT, Expand);
    setOperationAction(ISD::MULHU, VT, Expand);
    setOperationAction(ISD::UMUL_LOHI, VT, Expand);

    setOperationAction(ISD::BSWAP, VT, Expand);
  }

  setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
  setOperationAction(ISD::ConstantFP, MVT::f64, Custom);

  setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom);
  setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom);

  if (Subtarget->hasMVEIntegerOps())
    addMVEVectorTypes(Subtarget->hasMVEFloatOps());

  // Combine low-overhead loop intrinsics so that we can lower i1 types.
  if (Subtarget->hasLOB()) {
    setTargetDAGCombine(ISD::BRCOND);
    setTargetDAGCombine(ISD::BR_CC);
  }

  if (Subtarget->hasNEON()) {
    addDRTypeForNEON(MVT::v2f32);
    addDRTypeForNEON(MVT::v8i8);
    addDRTypeForNEON(MVT::v4i16);
    addDRTypeForNEON(MVT::v2i32);
    addDRTypeForNEON(MVT::v1i64);

    addQRTypeForNEON(MVT::v4f32);
    addQRTypeForNEON(MVT::v2f64);
    addQRTypeForNEON(MVT::v16i8);
    addQRTypeForNEON(MVT::v8i16);
    addQRTypeForNEON(MVT::v4i32);
    addQRTypeForNEON(MVT::v2i64);

    if (Subtarget->hasFullFP16()) {
      addQRTypeForNEON(MVT::v8f16);
      addDRTypeForNEON(MVT::v4f16);
    }

    if (Subtarget->hasBF16()) {
      addQRTypeForNEON(MVT::v8bf16);
      addDRTypeForNEON(MVT::v4bf16);
    }
  }

  if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) {
    // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
    // none of Neon, MVE or VFP supports any arithmetic operations on it.
    setOperationAction(ISD::FADD, MVT::v2f64, Expand);
    setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
    setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
    // FIXME: Code duplication: FDIV and FREM are expanded always, see
    // ARMTargetLowering::addTypeForNEON method for details.
    setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
    setOperationAction(ISD::FREM, MVT::v2f64, Expand);
    // FIXME: Create unittest.
    // In another words, find a way when "copysign" appears in DAG with vector
    // operands.
    setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
    // FIXME: Code duplication: SETCC has custom operation action, see
    // ARMTargetLowering::addTypeForNEON method for details.
    setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
    // FIXME: Create unittest for FNEG and for FABS.
    setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
    setOperationAction(ISD::FABS, MVT::v2f64, Expand);
    setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
    setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
    setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
    setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
    setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
    setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
    setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
    setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
    setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
    // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
    setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
    setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
    setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
    setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
    setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
    setOperationAction(ISD::FMA, MVT::v2f64, Expand);
  }

  if (Subtarget->hasNEON()) {
    // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
    // supported for v4f32.
    setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
    setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
    setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
    setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
    setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
    setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
    setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
    setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
    setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
    setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
    setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
    setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
    setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
    setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);

    // Mark v2f32 intrinsics.
    setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
    setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
    setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
    setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
    setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
    setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
    setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
    setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
    setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
    setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
    setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
    setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
    setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
    setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);

    // Neon does not support some operations on v1i64 and v2i64 types.
    setOperationAction(ISD::MUL, MVT::v1i64, Expand);
    // Custom handling for some quad-vector types to detect VMULL.
    setOperationAction(ISD::MUL, MVT::v8i16, Custom);
    setOperationAction(ISD::MUL, MVT::v4i32, Custom);
    setOperationAction(ISD::MUL, MVT::v2i64, Custom);
    // Custom handling for some vector types to avoid expensive expansions
    setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
    setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
    setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
    setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
    // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
    // a destination type that is wider than the source, and nor does
    // it have a FP_TO_[SU]INT instruction with a narrower destination than
    // source.
    setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
    setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom);
    setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
    setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
    setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
    setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom);
    setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
    setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);

    setOperationAction(ISD::FP_ROUND,   MVT::v2f32, Expand);
    setOperationAction(ISD::FP_EXTEND,  MVT::v2f64, Expand);

    // NEON does not have single instruction CTPOP for vectors with element
    // types wider than 8-bits.  However, custom lowering can leverage the
    // v8i8/v16i8 vcnt instruction.
    setOperationAction(ISD::CTPOP,      MVT::v2i32, Custom);
    setOperationAction(ISD::CTPOP,      MVT::v4i32, Custom);
    setOperationAction(ISD::CTPOP,      MVT::v4i16, Custom);
    setOperationAction(ISD::CTPOP,      MVT::v8i16, Custom);
    setOperationAction(ISD::CTPOP,      MVT::v1i64, Custom);
    setOperationAction(ISD::CTPOP,      MVT::v2i64, Custom);

    setOperationAction(ISD::CTLZ,       MVT::v1i64, Expand);
    setOperationAction(ISD::CTLZ,       MVT::v2i64, Expand);

    // NEON does not have single instruction CTTZ for vectors.
    setOperationAction(ISD::CTTZ, MVT::v8i8, Custom);
    setOperationAction(ISD::CTTZ, MVT::v4i16, Custom);
    setOperationAction(ISD::CTTZ, MVT::v2i32, Custom);
    setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);

    setOperationAction(ISD::CTTZ, MVT::v16i8, Custom);
    setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
    setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
    setOperationAction(ISD::CTTZ, MVT::v2i64, Custom);

    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom);
    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom);
    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom);
    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom);

    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom);
    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom);
    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);

    // NEON only has FMA instructions as of VFP4.
    if (!Subtarget->hasVFP4Base()) {
      setOperationAction(ISD::FMA, MVT::v2f32, Expand);
      setOperationAction(ISD::FMA, MVT::v4f32, Expand);
    }

    setTargetDAGCombine(ISD::SHL);
    setTargetDAGCombine(ISD::SRL);
    setTargetDAGCombine(ISD::SRA);
    setTargetDAGCombine(ISD::FP_TO_SINT);
    setTargetDAGCombine(ISD::FP_TO_UINT);
    setTargetDAGCombine(ISD::FDIV);
    setTargetDAGCombine(ISD::LOAD);

    // It is legal to extload from v4i8 to v4i16 or v4i32.
    for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
                   MVT::v2i32}) {
      for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
        setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
        setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
        setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
      }
    }
  }

  if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
    setTargetDAGCombine(ISD::BUILD_VECTOR);
    setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
    setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
    setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
    setTargetDAGCombine(ISD::STORE);
    setTargetDAGCombine(ISD::SIGN_EXTEND);
    setTargetDAGCombine(ISD::ZERO_EXTEND);
    setTargetDAGCombine(ISD::ANY_EXTEND);
    setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
    setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
    setTargetDAGCombine(ISD::INTRINSIC_VOID);
    setTargetDAGCombine(ISD::VECREDUCE_ADD);
    setTargetDAGCombine(ISD::ADD);
    setTargetDAGCombine(ISD::BITCAST);
  }
  if (Subtarget->hasMVEIntegerOps()) {
    setTargetDAGCombine(ISD::SMIN);
    setTargetDAGCombine(ISD::UMIN);
    setTargetDAGCombine(ISD::SMAX);
    setTargetDAGCombine(ISD::UMAX);
    setTargetDAGCombine(ISD::FP_EXTEND);
  }

  if (!Subtarget->hasFP64()) {
    // When targeting a floating-point unit with only single-precision
    // operations, f64 is legal for the few double-precision instructions which
    // are present However, no double-precision operations other than moves,
    // loads and stores are provided by the hardware.
    setOperationAction(ISD::FADD,       MVT::f64, Expand);
    setOperationAction(ISD::FSUB,       MVT::f64, Expand);
    setOperationAction(ISD::FMUL,       MVT::f64, Expand);
    setOperationAction(ISD::FMA,        MVT::f64, Expand);
    setOperationAction(ISD::FDIV,       MVT::f64, Expand);
    setOperationAction(ISD::FREM,       MVT::f64, Expand);
    setOperationAction(ISD::FCOPYSIGN,  MVT::f64, Expand);
    setOperationAction(ISD::FGETSIGN,   MVT::f64, Expand);
    setOperationAction(ISD::FNEG,       MVT::f64, Expand);
    setOperationAction(ISD::FABS,       MVT::f64, Expand);
    setOperationAction(ISD::FSQRT,      MVT::f64, Expand);
    setOperationAction(ISD::FSIN,       MVT::f64, Expand);
    setOperationAction(ISD::FCOS,       MVT::f64, Expand);
    setOperationAction(ISD::FPOW,       MVT::f64, Expand);
    setOperationAction(ISD::FLOG,       MVT::f64, Expand);
    setOperationAction(ISD::FLOG2,      MVT::f64, Expand);
    setOperationAction(ISD::FLOG10,     MVT::f64, Expand);
    setOperationAction(ISD::FEXP,       MVT::f64, Expand);
    setOperationAction(ISD::FEXP2,      MVT::f64, Expand);
    setOperationAction(ISD::FCEIL,      MVT::f64, Expand);
    setOperationAction(ISD::FTRUNC,     MVT::f64, Expand);
    setOperationAction(ISD::FRINT,      MVT::f64, Expand);
    setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
    setOperationAction(ISD::FFLOOR,     MVT::f64, Expand);
    setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
    setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
    setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
    setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
    setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom);
    setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom);
    setOperationAction(ISD::FP_ROUND,   MVT::f32, Custom);
    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::f64, Custom);
    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::f64, Custom);
    setOperationAction(ISD::STRICT_FP_ROUND,   MVT::f32, Custom);
  }

  if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) {
    setOperationAction(ISD::FP_EXTEND,  MVT::f64, Custom);
    setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
    if (Subtarget->hasFullFP16()) {
      setOperationAction(ISD::FP_ROUND,  MVT::f16, Custom);
      setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
    }
  }

  if (!Subtarget->hasFP16()) {
    setOperationAction(ISD::FP_EXTEND,  MVT::f32, Custom);
    setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
  }

  computeRegisterProperties(Subtarget->getRegisterInfo());

  // ARM does not have floating-point extending loads.
  for (MVT VT : MVT::fp_valuetypes()) {
    setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
    setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
  }

  // ... or truncating stores
  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
  setTruncStoreAction(MVT::f32, MVT::f16, Expand);
  setTruncStoreAction(MVT::f64, MVT::f16, Expand);

  // ARM does not have i1 sign extending load.
  for (MVT VT : MVT::integer_valuetypes())
    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);

  // ARM supports all 4 flavors of integer indexed load / store.
  if (!Subtarget->isThumb1Only()) {
    for (unsigned im = (unsigned)ISD::PRE_INC;
         im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
      setIndexedLoadAction(im,  MVT::i1,  Legal);
      setIndexedLoadAction(im,  MVT::i8,  Legal);
      setIndexedLoadAction(im,  MVT::i16, Legal);
      setIndexedLoadAction(im,  MVT::i32, Legal);
      setIndexedStoreAction(im, MVT::i1,  Legal);
      setIndexedStoreAction(im, MVT::i8,  Legal);
      setIndexedStoreAction(im, MVT::i16, Legal);
      setIndexedStoreAction(im, MVT::i32, Legal);
    }
  } else {
    // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}.
    setIndexedLoadAction(ISD::POST_INC, MVT::i32,  Legal);
    setIndexedStoreAction(ISD::POST_INC, MVT::i32,  Legal);
  }

  setOperationAction(ISD::SADDO, MVT::i32, Custom);
  setOperationAction(ISD::UADDO, MVT::i32, Custom);
  setOperationAction(ISD::SSUBO, MVT::i32, Custom);
  setOperationAction(ISD::USUBO, MVT::i32, Custom);

  setOperationAction(ISD::ADDCARRY, MVT::i32, Custom);
  setOperationAction(ISD::SUBCARRY, MVT::i32, Custom);
  if (Subtarget->hasDSP()) {
    setOperationAction(ISD::SADDSAT, MVT::i8, Custom);
    setOperationAction(ISD::SSUBSAT, MVT::i8, Custom);
    setOperationAction(ISD::SADDSAT, MVT::i16, Custom);
    setOperationAction(ISD::SSUBSAT, MVT::i16, Custom);
  }
  if (Subtarget->hasBaseDSP()) {
    setOperationAction(ISD::SADDSAT, MVT::i32, Legal);
    setOperationAction(ISD::SSUBSAT, MVT::i32, Legal);
  }

  // i64 operation support.
  setOperationAction(ISD::MUL,     MVT::i64, Expand);
  setOperationAction(ISD::MULHU,   MVT::i32, Expand);
  if (Subtarget->isThumb1Only()) {
    setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
    setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
  }
  if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
      || (Subtarget->isThumb2() && !Subtarget->hasDSP()))
    setOperationAction(ISD::MULHS, MVT::i32, Expand);

  setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
  setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
  setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
  setOperationAction(ISD::SRL,       MVT::i64, Custom);
  setOperationAction(ISD::SRA,       MVT::i64, Custom);
  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
  setOperationAction(ISD::LOAD, MVT::i64, Custom);
  setOperationAction(ISD::STORE, MVT::i64, Custom);

  // MVE lowers 64 bit shifts to lsll and lsrl
  // assuming that ISD::SRL and SRA of i64 are already marked custom
  if (Subtarget->hasMVEIntegerOps())
    setOperationAction(ISD::SHL, MVT::i64, Custom);

  // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1.
  if (Subtarget->isThumb1Only()) {
    setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
    setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
    setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
  }

  if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops())
    setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);

  // ARM does not have ROTL.
  setOperationAction(ISD::ROTL, MVT::i32, Expand);
  for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
    setOperationAction(ISD::ROTL, VT, Expand);
    setOperationAction(ISD::ROTR, VT, Expand);
  }
  setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
  setOperationAction(ISD::CTPOP, MVT::i32, Expand);
  if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) {
    setOperationAction(ISD::CTLZ, MVT::i32, Expand);
    setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, LibCall);
  }

  // @llvm.readcyclecounter requires the Performance Monitors extension.
  // Default to the 0 expansion on unsupported platforms.
  // FIXME: Technically there are older ARM CPUs that have
  // implementation-specific ways of obtaining this information.
  if (Subtarget->hasPerfMon())
    setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);

  // Only ARMv6 has BSWAP.
  if (!Subtarget->hasV6Ops())
    setOperationAction(ISD::BSWAP, MVT::i32, Expand);

  bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
                                        : Subtarget->hasDivideInARMMode();
  if (!hasDivide) {
    // These are expanded into libcalls if the cpu doesn't have HW divider.
    setOperationAction(ISD::SDIV,  MVT::i32, LibCall);
    setOperationAction(ISD::UDIV,  MVT::i32, LibCall);
  }

  if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) {
    setOperationAction(ISD::SDIV, MVT::i32, Custom);
    setOperationAction(ISD::UDIV, MVT::i32, Custom);

    setOperationAction(ISD::SDIV, MVT::i64, Custom);
    setOperationAction(ISD::UDIV, MVT::i64, Custom);
  }

  setOperationAction(ISD::SREM,  MVT::i32, Expand);
  setOperationAction(ISD::UREM,  MVT::i32, Expand);

  // Register based DivRem for AEABI (RTABI 4.2)
  if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
      Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
      Subtarget->isTargetWindows()) {
    setOperationAction(ISD::SREM, MVT::i64, Custom);
    setOperationAction(ISD::UREM, MVT::i64, Custom);
    HasStandaloneRem = false;

    if (Subtarget->isTargetWindows()) {
      const struct {
        const RTLIB::Libcall Op;
        const char * const Name;
        const CallingConv::ID CC;
      } LibraryCalls[] = {
        { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS },
        { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS },
        { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS },
        { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS },

        { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS },
        { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS },
        { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS },
        { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS },
      };

      for (const auto &LC : LibraryCalls) {
        setLibcallName(LC.Op, LC.Name);
        setLibcallCallingConv(LC.Op, LC.CC);
      }
    } else {
      const struct {
        const RTLIB::Libcall Op;
        const char * const Name;
        const CallingConv::ID CC;
      } LibraryCalls[] = {
        { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
        { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
        { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
        { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS },

        { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
        { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
        { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
        { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS },
      };

      for (const auto &LC : LibraryCalls) {
        setLibcallName(LC.Op, LC.Name);
        setLibcallCallingConv(LC.Op, LC.CC);
      }
    }

    setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
    setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
    setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
    setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
  } else {
    setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
    setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
  }

  if (Subtarget->getTargetTriple().isOSMSVCRT()) {
    // MSVCRT doesn't have powi; fall back to pow
    setLibcallName(RTLIB::POWI_F32, nullptr);
    setLibcallName(RTLIB::POWI_F64, nullptr);
  }

  setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
  setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
  setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
  setOperationAction(ISD::BlockAddress, MVT::i32, Custom);

  setOperationAction(ISD::TRAP, MVT::Other, Legal);
  setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);

  // Use the default implementation.
  setOperationAction(ISD::VASTART,            MVT::Other, Custom);
  setOperationAction(ISD::VAARG,              MVT::Other, Expand);
  setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
  setOperationAction(ISD::VAEND,              MVT::Other, Expand);
  setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
  setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);

  if (Subtarget->isTargetWindows())
    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
  else
    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);

  // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
  // the default expansion.
  InsertFencesForAtomic = false;
  if (Subtarget->hasAnyDataBarrier() &&
      (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
    // ATOMIC_FENCE needs custom lowering; the others should have been expanded
    // to ldrex/strex loops already.
    setOperationAction(ISD::ATOMIC_FENCE,     MVT::Other, Custom);
    if (!Subtarget->isThumb() || !Subtarget->isMClass())
      setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i64, Custom);

    // On v8, we have particularly efficient implementations of atomic fences
    // if they can be combined with nearby atomic loads and stores.
    if (!Subtarget->hasAcquireRelease() ||
        getTargetMachine().getOptLevel() == 0) {
      // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
      InsertFencesForAtomic = true;
    }
  } else {
    // If there's anything we can use as a barrier, go through custom lowering
    // for ATOMIC_FENCE.
    // If target has DMB in thumb, Fences can be inserted.
    if (Subtarget->hasDataBarrier())
      InsertFencesForAtomic = true;

    setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other,
                       Subtarget->hasAnyDataBarrier() ? Custom : Expand);

    // Set them all for expansion, which will force libcalls.
    setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
    setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
    setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
    setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
    setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
    setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
    setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
    setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
    setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
    setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
    setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
    setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
    // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
    // Unordered/Monotonic case.
    if (!InsertFencesForAtomic) {
      setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
      setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
    }
  }

  setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);

  // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
  if (!Subtarget->hasV6Ops()) {
    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
  }
  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);

  if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() &&
      !Subtarget->isThumb1Only()) {
    // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
    // iff target supports vfp2.
    setOperationAction(ISD::BITCAST, MVT::i64, Custom);
    setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
  }

  // We want to custom lower some of our intrinsics.
  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
  setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
  setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
  setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
  if (Subtarget->useSjLjEH())
    setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");

  setOperationAction(ISD::SETCC,     MVT::i32, Expand);
  setOperationAction(ISD::SETCC,     MVT::f32, Expand);
  setOperationAction(ISD::SETCC,     MVT::f64, Expand);
  setOperationAction(ISD::SELECT,    MVT::i32, Custom);
  setOperationAction(ISD::SELECT,    MVT::f32, Custom);
  setOperationAction(ISD::SELECT,    MVT::f64, Custom);
  setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
  setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
  if (Subtarget->hasFullFP16()) {
    setOperationAction(ISD::SETCC,     MVT::f16, Expand);
    setOperationAction(ISD::SELECT,    MVT::f16, Custom);
    setOperationAction(ISD::SELECT_CC, MVT::f16, Custom);
  }

  setOperationAction(ISD::SETCCCARRY, MVT::i32, Custom);

  setOperationAction(ISD::BRCOND,    MVT::Other, Custom);
  setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
  if (Subtarget->hasFullFP16())
      setOperationAction(ISD::BR_CC, MVT::f16,   Custom);
  setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
  setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
  setOperationAction(ISD::BR_JT,     MVT::Other, Custom);

  // We don't support sin/cos/fmod/copysign/pow
  setOperationAction(ISD::FSIN,      MVT::f64, Expand);
  setOperationAction(ISD::FSIN,      MVT::f32, Expand);
  setOperationAction(ISD::FCOS,      MVT::f32, Expand);
  setOperationAction(ISD::FCOS,      MVT::f64, Expand);
  setOperationAction(ISD::FSINCOS,   MVT::f64, Expand);
  setOperationAction(ISD::FSINCOS,   MVT::f32, Expand);
  setOperationAction(ISD::FREM,      MVT::f64, Expand);
  setOperationAction(ISD::FREM,      MVT::f32, Expand);
  if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() &&
      !Subtarget->isThumb1Only()) {
    setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
  }
  setOperationAction(ISD::FPOW,      MVT::f64, Expand);
  setOperationAction(ISD::FPOW,      MVT::f32, Expand);

  if (!Subtarget->hasVFP4Base()) {
    setOperationAction(ISD::FMA, MVT::f64, Expand);
    setOperationAction(ISD::FMA, MVT::f32, Expand);
  }

  // Various VFP goodness
  if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) {
    // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
    if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) {
      setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
      setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
    }

    // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
    if (!Subtarget->hasFP16()) {
      setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
      setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
    }

    // Strict floating-point comparisons need custom lowering.
    setOperationAction(ISD::STRICT_FSETCC,  MVT::f16, Custom);
    setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom);
    setOperationAction(ISD::STRICT_FSETCC,  MVT::f32, Custom);
    setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom);
    setOperationAction(ISD::STRICT_FSETCC,  MVT::f64, Custom);
    setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom);
  }

  // Use __sincos_stret if available.
  if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
      getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
    setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
    setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
  }

  // FP-ARMv8 implements a lot of rounding-like FP operations.
  if (Subtarget->hasFPARMv8Base()) {
    setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
    setOperationAction(ISD::FCEIL, MVT::f32, Legal);
    setOperationAction(ISD::FROUND, MVT::f32, Legal);
    setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
    setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
    setOperationAction(ISD::FRINT, MVT::f32, Legal);
    setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
    setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
    if (Subtarget->hasNEON()) {
      setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal);
      setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal);
      setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
      setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
    }

    if (Subtarget->hasFP64()) {
      setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
      setOperationAction(ISD::FCEIL, MVT::f64, Legal);
      setOperationAction(ISD::FROUND, MVT::f64, Legal);
      setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
      setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
      setOperationAction(ISD::FRINT, MVT::f64, Legal);
      setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
      setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
    }
  }

  // FP16 often need to be promoted to call lib functions
  if (Subtarget->hasFullFP16()) {
    setOperationAction(ISD::FREM, MVT::f16, Promote);
    setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
    setOperationAction(ISD::FSIN, MVT::f16, Promote);
    setOperationAction(ISD::FCOS, MVT::f16, Promote);
    setOperationAction(ISD::FSINCOS, MVT::f16, Promote);
    setOperationAction(ISD::FPOWI, MVT::f16, Promote);
    setOperationAction(ISD::FPOW, MVT::f16, Promote);
    setOperationAction(ISD::FEXP, MVT::f16, Promote);
    setOperationAction(ISD::FEXP2, MVT::f16, Promote);
    setOperationAction(ISD::FLOG, MVT::f16, Promote);
    setOperationAction(ISD::FLOG10, MVT::f16, Promote);
    setOperationAction(ISD::FLOG2, MVT::f16, Promote);

    setOperationAction(ISD::FROUND, MVT::f16, Legal);
  }

  if (Subtarget->hasNEON()) {
    // vmin and vmax aren't available in a scalar form, so we can use
    // a NEON instruction with an undef lane instead.  This has a performance
    // penalty on some cores, so we don't do this unless we have been
    // asked to by the core tuning model.
    if (Subtarget->useNEONForSinglePrecisionFP()) {
      setOperationAction(ISD::FMINIMUM, MVT::f32, Legal);
      setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal);
      setOperationAction(ISD::FMINIMUM, MVT::f16, Legal);
      setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal);
    }
    setOperationAction(ISD::FMINIMUM, MVT::v2f32, Legal);
    setOperationAction(ISD::FMAXIMUM, MVT::v2f32, Legal);
    setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal);
    setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal);

    if (Subtarget->hasFullFP16()) {
      setOperationAction(ISD::FMINNUM, MVT::v4f16, Legal);
      setOperationAction(ISD::FMAXNUM, MVT::v4f16, Legal);
      setOperationAction(ISD::FMINNUM, MVT::v8f16, Legal);
      setOperationAction(ISD::FMAXNUM, MVT::v8f16, Legal);

      setOperationAction(ISD::FMINIMUM, MVT::v4f16, Legal);
      setOperationAction(ISD::FMAXIMUM, MVT::v4f16, Legal);
      setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal);
      setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal);
    }
  }

  // We have target-specific dag combine patterns for the following nodes:
  // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
  setTargetDAGCombine(ISD::ADD);
  setTargetDAGCombine(ISD::SUB);
  setTargetDAGCombine(ISD::MUL);
  setTargetDAGCombine(ISD::AND);
  setTargetDAGCombine(ISD::OR);
  setTargetDAGCombine(ISD::XOR);

  if (Subtarget->hasMVEIntegerOps())
    setTargetDAGCombine(ISD::VSELECT);

  if (Subtarget->hasV6Ops())
    setTargetDAGCombine(ISD::SRL);
  if (Subtarget->isThumb1Only())
    setTargetDAGCombine(ISD::SHL);

  setStackPointerRegisterToSaveRestore(ARM::SP);

  if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() ||
      !Subtarget->hasVFP2Base() || Subtarget->hasMinSize())
    setSchedulingPreference(Sched::RegPressure);
  else
    setSchedulingPreference(Sched::Hybrid);

  //// temporary - rewrite interface to use type
  MaxStoresPerMemset = 8;
  MaxStoresPerMemsetOptSize = 4;
  MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
  MaxStoresPerMemcpyOptSize = 2;
  MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
  MaxStoresPerMemmoveOptSize = 2;

  // On ARM arguments smaller than 4 bytes are extended, so all arguments
  // are at least 4 bytes aligned.
  setMinStackArgumentAlignment(Align(4));

  // Prefer likely predicted branches to selects on out-of-order cores.
  PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();

  setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment()));

  setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4));

  if (Subtarget->isThumb() || Subtarget->isThumb2())
    setTargetDAGCombine(ISD::ABS);
}

bool ARMTargetLowering::useSoftFloat() const {
  return Subtarget->useSoftFloat();
}

// FIXME: It might make sense to define the representative register class as the
// nearest super-register that has a non-null superset. For example, DPR_VFP2 is
// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
// SPR's representative would be DPR_VFP2. This should work well if register
// pressure tracking were modified such that a register use would increment the
// pressure of the register class's representative and all of it's super
// classes' representatives transitively. We have not implemented this because
// of the difficulty prior to coalescing of modeling operand register classes
// due to the common occurrence of cross class copies and subregister insertions
// and extractions.
std::pair<const TargetRegisterClass *, uint8_t>
ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
                                           MVT VT) const {
  const TargetRegisterClass *RRC = nullptr;
  uint8_t Cost = 1;
  switch (VT.SimpleTy) {
  default:
    return TargetLowering::findRepresentativeClass(TRI, VT);
  // Use DPR as representative register class for all floating point
  // and vector types. Since there are 32 SPR registers and 32 DPR registers so
  // the cost is 1 for both f32 and f64.
  case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
  case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
    RRC = &ARM::DPRRegClass;
    // When NEON is used for SP, only half of the register file is available
    // because operations that define both SP and DP results will be constrained
    // to the VFP2 class (D0-D15). We currently model this constraint prior to
    // coalescing by double-counting the SP regs. See the FIXME above.
    if (Subtarget->useNEONForSinglePrecisionFP())
      Cost = 2;
    break;
  case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
  case MVT::v4f32: case MVT::v2f64:
    RRC = &ARM::DPRRegClass;
    Cost = 2;
    break;
  case MVT::v4i64:
    RRC = &ARM::DPRRegClass;
    Cost = 4;
    break;
  case MVT::v8i64:
    RRC = &ARM::DPRRegClass;
    Cost = 8;
    break;
  }
  return std::make_pair(RRC, Cost);
}

const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
  switch ((ARMISD::NodeType)Opcode) {
  case ARMISD::FIRST_NUMBER:  break;
  case ARMISD::Wrapper:       return "ARMISD::Wrapper";
  case ARMISD::WrapperPIC:    return "ARMISD::WrapperPIC";
  case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
  case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL";
  case ARMISD::CALL:          return "ARMISD::CALL";
  case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
  case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
  case ARMISD::tSECALL:       return "ARMISD::tSECALL";
  case ARMISD::BRCOND:        return "ARMISD::BRCOND";
  case ARMISD::BR_JT:         return "ARMISD::BR_JT";
  case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
  case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
  case ARMISD::SERET_FLAG:    return "ARMISD::SERET_FLAG";
  case ARMISD::INTRET_FLAG:   return "ARMISD::INTRET_FLAG";
  case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
  case ARMISD::CMP:           return "ARMISD::CMP";
  case ARMISD::CMN:           return "ARMISD::CMN";
  case ARMISD::CMPZ:          return "ARMISD::CMPZ";
  case ARMISD::CMPFP:         return "ARMISD::CMPFP";
  case ARMISD::CMPFPE:        return "ARMISD::CMPFPE";
  case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
  case ARMISD::CMPFPEw0:      return "ARMISD::CMPFPEw0";
  case ARMISD::BCC_i64:       return "ARMISD::BCC_i64";
  case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";

  case ARMISD::CMOV:          return "ARMISD::CMOV";
  case ARMISD::SUBS:          return "ARMISD::SUBS";

  case ARMISD::SSAT:          return "ARMISD::SSAT";
  case ARMISD::USAT:          return "ARMISD::USAT";

  case ARMISD::ASRL:          return "ARMISD::ASRL";
  case ARMISD::LSRL:          return "ARMISD::LSRL";
  case ARMISD::LSLL:          return "ARMISD::LSLL";

  case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
  case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
  case ARMISD::RRX:           return "ARMISD::RRX";

  case ARMISD::ADDC:          return "ARMISD::ADDC";
  case ARMISD::ADDE:          return "ARMISD::ADDE";
  case ARMISD::SUBC:          return "ARMISD::SUBC";
  case ARMISD::SUBE:          return "ARMISD::SUBE";
  case ARMISD::LSLS:          return "ARMISD::LSLS";

  case ARMISD::VMOVRRD:       return "ARMISD::VMOVRRD";
  case ARMISD::VMOVDRR:       return "ARMISD::VMOVDRR";
  case ARMISD::VMOVhr:        return "ARMISD::VMOVhr";
  case ARMISD::VMOVrh:        return "ARMISD::VMOVrh";
  case ARMISD::VMOVSR:        return "ARMISD::VMOVSR";

  case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
  case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP";
  case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH";

  case ARMISD::TC_RETURN:     return "ARMISD::TC_RETURN";

  case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";

  case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";

  case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";

  case ARMISD::PRELOAD:       return "ARMISD::PRELOAD";

  case ARMISD::LDRD:          return "ARMISD::LDRD";
  case ARMISD::STRD:          return "ARMISD::STRD";

  case ARMISD::WIN__CHKSTK:   return "ARMISD::WIN__CHKSTK";
  case ARMISD::WIN__DBZCHK:   return "ARMISD::WIN__DBZCHK";

  case ARMISD::PREDICATE_CAST: return "ARMISD::PREDICATE_CAST";
  case ARMISD::VECTOR_REG_CAST: return "ARMISD::VECTOR_REG_CAST";
  case ARMISD::VCMP:          return "ARMISD::VCMP";
  case ARMISD::VCMPZ:         return "ARMISD::VCMPZ";
  case ARMISD::VTST:          return "ARMISD::VTST";

  case ARMISD::VSHLs:         return "ARMISD::VSHLs";
  case ARMISD::VSHLu:         return "ARMISD::VSHLu";
  case ARMISD::VSHLIMM:       return "ARMISD::VSHLIMM";
  case ARMISD::VSHRsIMM:      return "ARMISD::VSHRsIMM";
  case ARMISD::VSHRuIMM:      return "ARMISD::VSHRuIMM";
  case ARMISD::VRSHRsIMM:     return "ARMISD::VRSHRsIMM";
  case ARMISD::VRSHRuIMM:     return "ARMISD::VRSHRuIMM";
  case ARMISD::VRSHRNIMM:     return "ARMISD::VRSHRNIMM";
  case ARMISD::VQSHLsIMM:     return "ARMISD::VQSHLsIMM";
  case ARMISD::VQSHLuIMM:     return "ARMISD::VQSHLuIMM";
  case ARMISD::VQSHLsuIMM:    return "ARMISD::VQSHLsuIMM";
  case ARMISD::VQSHRNsIMM:    return "ARMISD::VQSHRNsIMM";
  case ARMISD::VQSHRNuIMM:    return "ARMISD::VQSHRNuIMM";
  case ARMISD::VQSHRNsuIMM:   return "ARMISD::VQSHRNsuIMM";
  case ARMISD::VQRSHRNsIMM:   return "ARMISD::VQRSHRNsIMM";
  case ARMISD::VQRSHRNuIMM:   return "ARMISD::VQRSHRNuIMM";
  case ARMISD::VQRSHRNsuIMM:  return "ARMISD::VQRSHRNsuIMM";
  case ARMISD::VSLIIMM:       return "ARMISD::VSLIIMM";
  case ARMISD::VSRIIMM:       return "ARMISD::VSRIIMM";
  case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
  case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
  case ARMISD::VMOVIMM:       return "ARMISD::VMOVIMM";
  case ARMISD::VMVNIMM:       return "ARMISD::VMVNIMM";
  case ARMISD::VMOVFPIMM:     return "ARMISD::VMOVFPIMM";
  case ARMISD::VDUP:          return "ARMISD::VDUP";
  case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
  case ARMISD::VEXT:          return "ARMISD::VEXT";
  case ARMISD::VREV64:        return "ARMISD::VREV64";
  case ARMISD::VREV32:        return "ARMISD::VREV32";
  case ARMISD::VREV16:        return "ARMISD::VREV16";
  case ARMISD::VZIP:          return "ARMISD::VZIP";
  case ARMISD::VUZP:          return "ARMISD::VUZP";
  case ARMISD::VTRN:          return "ARMISD::VTRN";
  case ARMISD::VTBL1:         return "ARMISD::VTBL1";
  case ARMISD::VTBL2:         return "ARMISD::VTBL2";
  case ARMISD::VMOVN:         return "ARMISD::VMOVN";
  case ARMISD::VQMOVNs:       return "ARMISD::VQMOVNs";
  case ARMISD::VQMOVNu:       return "ARMISD::VQMOVNu";
  case ARMISD::VCVTN:         return "ARMISD::VCVTN";
  case ARMISD::VCVTL:         return "ARMISD::VCVTL";
  case ARMISD::VMULLs:        return "ARMISD::VMULLs";
  case ARMISD::VMULLu:        return "ARMISD::VMULLu";
  case ARMISD::VADDVs:        return "ARMISD::VADDVs";
  case ARMISD::VADDVu:        return "ARMISD::VADDVu";
  case ARMISD::VADDVps:       return "ARMISD::VADDVps";
  case ARMISD::VADDVpu:       return "ARMISD::VADDVpu";
  case ARMISD::VADDLVs:       return "ARMISD::VADDLVs";
  case ARMISD::VADDLVu:       return "ARMISD::VADDLVu";
  case ARMISD::VADDLVAs:      return "ARMISD::VADDLVAs";
  case ARMISD::VADDLVAu:      return "ARMISD::VADDLVAu";
  case ARMISD::VADDLVps:      return "ARMISD::VADDLVps";
  case ARMISD::VADDLVpu:      return "ARMISD::VADDLVpu";
  case ARMISD::VADDLVAps:     return "ARMISD::VADDLVAps";
  case ARMISD::VADDLVApu:     return "ARMISD::VADDLVApu";
  case ARMISD::VMLAVs:        return "ARMISD::VMLAVs";
  case ARMISD::VMLAVu:        return "ARMISD::VMLAVu";
  case ARMISD::VMLAVps:       return "ARMISD::VMLAVps";
  case ARMISD::VMLAVpu:       return "ARMISD::VMLAVpu";
  case ARMISD::VMLALVs:       return "ARMISD::VMLALVs";
  case ARMISD::VMLALVu:       return "ARMISD::VMLALVu";
  case ARMISD::VMLALVps:      return "ARMISD::VMLALVps";
  case ARMISD::VMLALVpu:      return "ARMISD::VMLALVpu";
  case ARMISD::VMLALVAs:      return "ARMISD::VMLALVAs";
  case ARMISD::VMLALVAu:      return "ARMISD::VMLALVAu";
  case ARMISD::VMLALVAps:     return "ARMISD::VMLALVAps";
  case ARMISD::VMLALVApu:     return "ARMISD::VMLALVApu";
  case ARMISD::UMAAL:         return "ARMISD::UMAAL";
  case ARMISD::UMLAL:         return "ARMISD::UMLAL";
  case ARMISD::SMLAL:         return "ARMISD::SMLAL";
  case ARMISD::SMLALBB:       return "ARMISD::SMLALBB";
  case ARMISD::SMLALBT:       return "ARMISD::SMLALBT";
  case ARMISD::SMLALTB:       return "ARMISD::SMLALTB";
  case ARMISD::SMLALTT:       return "ARMISD::SMLALTT";
  case ARMISD::SMULWB:        return "ARMISD::SMULWB";
  case ARMISD::SMULWT:        return "ARMISD::SMULWT";
  case ARMISD::SMLALD:        return "ARMISD::SMLALD";
  case ARMISD::SMLALDX:       return "ARMISD::SMLALDX";
  case ARMISD::SMLSLD:        return "ARMISD::SMLSLD";
  case ARMISD::SMLSLDX:       return "ARMISD::SMLSLDX";
  case ARMISD::SMMLAR:        return "ARMISD::SMMLAR";
  case ARMISD::SMMLSR:        return "ARMISD::SMMLSR";
  case ARMISD::QADD16b:       return "ARMISD::QADD16b";
  case ARMISD::QSUB16b:       return "ARMISD::QSUB16b";
  case ARMISD::QADD8b:        return "ARMISD::QADD8b";
  case ARMISD::QSUB8b:        return "ARMISD::QSUB8b";
  case ARMISD::BUILD_VECTOR:  return "ARMISD::BUILD_VECTOR";
  case ARMISD::BFI:           return "ARMISD::BFI";
  case ARMISD::VORRIMM:       return "ARMISD::VORRIMM";
  case ARMISD::VBICIMM:       return "ARMISD::VBICIMM";
  case ARMISD::VBSP:          return "ARMISD::VBSP";
  case ARMISD::MEMCPY:        return "ARMISD::MEMCPY";
  case ARMISD::VLD1DUP:       return "ARMISD::VLD1DUP";
  case ARMISD::VLD2DUP:       return "ARMISD::VLD2DUP";
  case ARMISD::VLD3DUP:       return "ARMISD::VLD3DUP";
  case ARMISD::VLD4DUP:       return "ARMISD::VLD4DUP";
  case ARMISD::VLD1_UPD:      return "ARMISD::VLD1_UPD";
  case ARMISD::VLD2_UPD:      return "ARMISD::VLD2_UPD";
  case ARMISD::VLD3_UPD:      return "ARMISD::VLD3_UPD";
  case ARMISD::VLD4_UPD:      return "ARMISD::VLD4_UPD";
  case ARMISD::VLD2LN_UPD:    return "ARMISD::VLD2LN_UPD";
  case ARMISD::VLD3LN_UPD:    return "ARMISD::VLD3LN_UPD";
  case ARMISD::VLD4LN_UPD:    return "ARMISD::VLD4LN_UPD";
  case ARMISD::VLD1DUP_UPD:   return "ARMISD::VLD1DUP_UPD";
  case ARMISD::VLD2DUP_UPD:   return "ARMISD::VLD2DUP_UPD";
  case ARMISD::VLD3DUP_UPD:   return "ARMISD::VLD3DUP_UPD";
  case ARMISD::VLD4DUP_UPD:   return "ARMISD::VLD4DUP_UPD";
  case ARMISD::VST1_UPD:      return "ARMISD::VST1_UPD";
  case ARMISD::VST2_UPD:      return "ARMISD::VST2_UPD";
  case ARMISD::VST3_UPD:      return "ARMISD::VST3_UPD";
  case ARMISD::VST4_UPD:      return "ARMISD::VST4_UPD";
  case ARMISD::VST2LN_UPD:    return "ARMISD::VST2LN_UPD";
  case ARMISD::VST3LN_UPD:    return "ARMISD::VST3LN_UPD";
  case ARMISD::VST4LN_UPD:    return "ARMISD::VST4LN_UPD";
  case ARMISD::WLS:           return "ARMISD::WLS";
  case ARMISD::LE:            return "ARMISD::LE";
  case ARMISD::LOOP_DEC:      return "ARMISD::LOOP_DEC";
  case ARMISD::CSINV:         return "ARMISD::CSINV";
  case ARMISD::CSNEG:         return "ARMISD::CSNEG";
  case ARMISD::CSINC:         return "ARMISD::CSINC";
  }
  return nullptr;
}

EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
                                          EVT VT) const {
  if (!VT.isVector())
    return getPointerTy(DL);

  // MVE has a predicate register.
  if (Subtarget->hasMVEIntegerOps() &&
      (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8))
    return MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
  return VT.changeVectorElementTypeToInteger();
}

/// getRegClassFor - Return the register class that should be used for the
/// specified value type.
const TargetRegisterClass *
ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
  (void)isDivergent;
  // Map v4i64 to QQ registers but do not make the type legal. Similarly map
  // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
  // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive
  // MVE Q registers.
  if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
    if (VT == MVT::v4i64)
      return &ARM::QQPRRegClass;
    if (VT == MVT::v8i64)
      return &ARM::QQQQPRRegClass;
  }
  return TargetLowering::getRegClassFor(VT);
}

// memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
// source/dest is aligned and the copy size is large enough. We therefore want
// to align such objects passed to memory intrinsics.
bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
                                               unsigned &PrefAlign) const {
  if (!isa<MemIntrinsic>(CI))
    return false;
  MinSize = 8;
  // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1
  // cycle faster than 4-byte aligned LDM.
  PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4);
  return true;
}

// Create a fast isel object.
FastISel *
ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
                                  const TargetLibraryInfo *libInfo) const {
  return ARM::createFastISel(funcInfo, libInfo);
}

Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
  unsigned NumVals = N->getNumValues();
  if (!NumVals)
    return Sched::RegPressure;

  for (unsigned i = 0; i != NumVals; ++i) {
    EVT VT = N->getValueType(i);
    if (VT == MVT::Glue || VT == MVT::Other)
      continue;
    if (VT.isFloatingPoint() || VT.isVector())
      return Sched::ILP;
  }

  if (!N->isMachineOpcode())
    return Sched::RegPressure;

  // Load are scheduled for latency even if there instruction itinerary
  // is not available.
  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
  const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());

  if (MCID.getNumDefs() == 0)
    return Sched::RegPressure;
  if (!Itins->isEmpty() &&
      Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
    return Sched::ILP;

  return Sched::RegPressure;
}

//===----------------------------------------------------------------------===//
// Lowering Code
//===----------------------------------------------------------------------===//

static bool isSRL16(const SDValue &Op) {
  if (Op.getOpcode() != ISD::SRL)
    return false;
  if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
    return Const->getZExtValue() == 16;
  return false;
}

static bool isSRA16(const SDValue &Op) {
  if (Op.getOpcode() != ISD::SRA)
    return false;
  if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
    return Const->getZExtValue() == 16;
  return false;
}

static bool isSHL16(const SDValue &Op) {
  if (Op.getOpcode() != ISD::SHL)
    return false;
  if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
    return Const->getZExtValue() == 16;
  return false;
}

// Check for a signed 16-bit value. We special case SRA because it makes it
// more simple when also looking for SRAs that aren't sign extending a
// smaller value. Without the check, we'd need to take extra care with
// checking order for some operations.
static bool isS16(const SDValue &Op, SelectionDAG &DAG) {
  if (isSRA16(Op))
    return isSHL16(Op.getOperand(0));
  return DAG.ComputeNumSignBits(Op) == 17;
}

/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
  switch (CC) {
  default: llvm_unreachable("Unknown condition code!");
  case ISD::SETNE:  return ARMCC::NE;
  case ISD::SETEQ:  return ARMCC::EQ;
  case ISD::SETGT:  return ARMCC::GT;
  case ISD::SETGE:  return ARMCC::GE;
  case ISD::SETLT:  return ARMCC::LT;
  case ISD::SETLE:  return ARMCC::LE;
  case ISD::SETUGT: return ARMCC::HI;
  case ISD::SETUGE: return ARMCC::HS;
  case ISD::SETULT: return ARMCC::LO;
  case ISD::SETULE: return ARMCC::LS;
  }
}

/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
                        ARMCC::CondCodes &CondCode2) {
  CondCode2 = ARMCC::AL;
  switch (CC) {
  default: llvm_unreachable("Unknown FP condition!");
  case ISD::SETEQ:
  case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
  case ISD::SETGT:
  case ISD::SETOGT: CondCode = ARMCC::GT; break;
  case ISD::SETGE:
  case ISD::SETOGE: CondCode = ARMCC::GE; break;
  case ISD::SETOLT: CondCode = ARMCC::MI; break;
  case ISD::SETOLE: CondCode = ARMCC::LS; break;
  case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
  case ISD::SETO:   CondCode = ARMCC::VC; break;
  case ISD::SETUO:  CondCode = ARMCC::VS; break;
  case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
  case ISD::SETUGT: CondCode = ARMCC::HI; break;
  case ISD::SETUGE: CondCode = ARMCC::PL; break;
  case ISD::SETLT:
  case ISD::SETULT: CondCode = ARMCC::LT; break;
  case ISD::SETLE:
  case ISD::SETULE: CondCode = ARMCC::LE; break;
  case ISD::SETNE:
  case ISD::SETUNE: CondCode = ARMCC::NE; break;
  }
}

//===----------------------------------------------------------------------===//
//                      Calling Convention Implementation
//===----------------------------------------------------------------------===//

/// getEffectiveCallingConv - Get the effective calling convention, taking into
/// account presence of floating point hardware and calling convention
/// limitations, such as support for variadic functions.
CallingConv::ID
ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
                                           bool isVarArg) const {
  switch (CC) {
  default:
    report_fatal_error("Unsupported calling convention");
  case CallingConv::ARM_AAPCS:
  case CallingConv::ARM_APCS:
  case CallingConv::GHC:
  case CallingConv::CFGuard_Check:
    return CC;
  case CallingConv::PreserveMost:
    return CallingConv::PreserveMost;
  case CallingConv::ARM_AAPCS_VFP:
  case CallingConv::Swift:
    return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
  case CallingConv::C:
    if (!Subtarget->isAAPCS_ABI())
      return CallingConv::ARM_APCS;
    else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() &&
             getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
             !isVarArg)
      return CallingConv::ARM_AAPCS_VFP;
    else
      return CallingConv::ARM_AAPCS;
  case CallingConv::Fast:
  case CallingConv::CXX_FAST_TLS:
    if (!Subtarget->isAAPCS_ABI()) {
      if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg)
        return CallingConv::Fast;
      return CallingConv::ARM_APCS;
    } else if (Subtarget->hasVFP2Base() &&
               !Subtarget->isThumb1Only() && !isVarArg)
      return CallingConv::ARM_AAPCS_VFP;
    else
      return CallingConv::ARM_AAPCS;
  }
}

CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
                                                 bool isVarArg) const {
  return CCAssignFnForNode(CC, false, isVarArg);
}

CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
                                                   bool isVarArg) const {
  return CCAssignFnForNode(CC, true, isVarArg);
}

/// CCAssignFnForNode - Selects the correct CCAssignFn for the given
/// CallingConvention.
CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
                                                 bool Return,
                                                 bool isVarArg) const {
  switch (getEffectiveCallingConv(CC, isVarArg)) {
  default:
    report_fatal_error("Unsupported calling convention");
  case CallingConv::ARM_APCS:
    return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
  case CallingConv::ARM_AAPCS:
    return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
  case CallingConv::ARM_AAPCS_VFP:
    return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
  case CallingConv::Fast:
    return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
  case CallingConv::GHC:
    return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
  case CallingConv::PreserveMost:
    return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
  case CallingConv::CFGuard_Check:
    return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check);
  }
}

SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG,
                                     MVT LocVT, MVT ValVT, SDValue Val) const {
  Val = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocVT.getSizeInBits()),
                    Val);
  if (Subtarget->hasFullFP16()) {
    Val = DAG.getNode(ARMISD::VMOVhr, dl, ValVT, Val);
  } else {
    Val = DAG.getNode(ISD::TRUNCATE, dl,
                      MVT::getIntegerVT(ValVT.getSizeInBits()), Val);
    Val = DAG.getNode(ISD::BITCAST, dl, ValVT, Val);
  }
  return Val;
}

SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG,
                                       MVT LocVT, MVT ValVT,
                                       SDValue Val) const {
  if (Subtarget->hasFullFP16()) {
    Val = DAG.getNode(ARMISD::VMOVrh, dl,
                      MVT::getIntegerVT(LocVT.getSizeInBits()), Val);
  } else {
    Val = DAG.getNode(ISD::BITCAST, dl,
                      MVT::getIntegerVT(ValVT.getSizeInBits()), Val);
    Val = DAG.getNode(ISD::ZERO_EXTEND, dl,
                      MVT::getIntegerVT(LocVT.getSizeInBits()), Val);
  }
  return DAG.getNode(ISD::BITCAST, dl, LocVT, Val);
}

/// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
SDValue ARMTargetLowering::LowerCallResult(
    SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
    SDValue ThisVal) const {
  // Assign locations to each value returned by this call.
  SmallVector<CCValAssign, 16> RVLocs;
  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
                 *DAG.getContext());
  CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg));

  // Copy all of the result registers out of their specified physreg.
  for (unsigned i = 0; i != RVLocs.size(); ++i) {
    CCValAssign VA = RVLocs[i];

    // Pass 'this' value directly from the argument to return value, to avoid
    // reg unit interference
    if (i == 0 && isThisReturn) {
      assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
             "unexpected return calling convention register assignment");
      InVals.push_back(ThisVal);
      continue;
    }

    SDValue Val;
    if (VA.needsCustom() &&
        (VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2f64)) {
      // Handle f64 or half of a v2f64.
      SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
                                      InFlag);
      Chain = Lo.getValue(1);
      InFlag = Lo.getValue(2);
      VA = RVLocs[++i]; // skip ahead to next loc
      SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
                                      InFlag);
      Chain = Hi.getValue(1);
      InFlag = Hi.getValue(2);
      if (!Subtarget->isLittle())
        std::swap (Lo, Hi);
      Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);

      if (VA.getLocVT() == MVT::v2f64) {
        SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
        Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
                          DAG.getConstant(0, dl, MVT::i32));

        VA = RVLocs[++i]; // skip ahead to next loc
        Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
        Chain = Lo.getValue(1);
        InFlag = Lo.getValue(2);
        VA = RVLocs[++i]; // skip ahead to next loc
        Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
        Chain = Hi.getValue(1);
        InFlag = Hi.getValue(2);
        if (!Subtarget->isLittle())
          std::swap (Lo, Hi);
        Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
        Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
                          DAG.getConstant(1, dl, MVT::i32));
      }
    } else {
      Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
                               InFlag);
      Chain = Val.getValue(1);
      InFlag = Val.getValue(2);
    }

    switch (VA.getLocInfo()) {
    default: llvm_unreachable("Unknown loc info!");
    case CCValAssign::Full: break;
    case CCValAssign::BCvt:
      Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
      break;
    }

    // f16 arguments have their size extended to 4 bytes and passed as if they
    // had been copied to the LSBs of a 32-bit register.
    // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI)
    if (VA.needsCustom() &&
        (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
      Val = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Val);

    InVals.push_back(Val);
  }

  return Chain;
}

/// LowerMemOpCallTo - Store the argument to the stack.
SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
                                            SDValue Arg, const SDLoc &dl,
                                            SelectionDAG &DAG,
                                            const CCValAssign &VA,
                                            ISD::ArgFlagsTy Flags) const {
  unsigned LocMemOffset = VA.getLocMemOffset();
  SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
  PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
                       StackPtr, PtrOff);
  return DAG.getStore(
      Chain, dl, Arg, PtrOff,
      MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
}

void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
                                         SDValue Chain, SDValue &Arg,
                                         RegsToPassVector &RegsToPass,
                                         CCValAssign &VA, CCValAssign &NextVA,
                                         SDValue &StackPtr,
                                         SmallVectorImpl<SDValue> &MemOpChains,
                                         ISD::ArgFlagsTy Flags) const {
  SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
                              DAG.getVTList(MVT::i32, MVT::i32), Arg);
  unsigned id = Subtarget->isLittle() ? 0 : 1;
  RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));

  if (NextVA.isRegLoc())
    RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
  else {
    assert(NextVA.isMemLoc());
    if (!StackPtr.getNode())
      StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP,
                                    getPointerTy(DAG.getDataLayout()));

    MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
                                           dl, DAG, NextVA,
                                           Flags));
  }
}

/// LowerCall - Lowering a call into a callseq_start <-
/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
/// nodes.
SDValue
ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
                             SmallVectorImpl<SDValue> &InVals) const {
  SelectionDAG &DAG                     = CLI.DAG;
  SDLoc &dl                             = CLI.DL;
  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
  SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
  SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
  SDValue Chain                         = CLI.Chain;
  SDValue Callee                        = CLI.Callee;
  bool &isTailCall                      = CLI.IsTailCall;
  CallingConv::ID CallConv              = CLI.CallConv;
  bool doesNotRet                       = CLI.DoesNotReturn;
  bool isVarArg                         = CLI.IsVarArg;

  MachineFunction &MF = DAG.getMachineFunction();
  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
  MachineFunction::CallSiteInfo CSInfo;
  bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
  bool isThisReturn = false;
  bool isCmseNSCall   = false;
  bool PreferIndirect = false;

  // Determine whether this is a non-secure function call.
  if (CLI.CB && CLI.CB->getAttributes().hasFnAttribute("cmse_nonsecure_call"))
    isCmseNSCall = true;

  // Disable tail calls if they're not supported.
  if (!Subtarget->supportsTailCall())
    isTailCall = false;

  // For both the non-secure calls and the returns from a CMSE entry function,
  // the function needs to do some extra work afte r the call, or before the
  // return, respectively, thus it cannot end with atail call
  if (isCmseNSCall || AFI->isCmseNSEntryFunction())
    isTailCall = false;

  if (isa<GlobalAddressSDNode>(Callee)) {
    // If we're optimizing for minimum size and the function is called three or
    // more times in this block, we can improve codesize by calling indirectly
    // as BLXr has a 16-bit encoding.
    auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
    if (CLI.CB) {
      auto *BB = CLI.CB->getParent();
      PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() &&
                       count_if(GV->users(), [&BB](const User *U) {
                         return isa<Instruction>(U) &&
                                cast<Instruction>(U)->getParent() == BB;
                       }) > 2;
    }
  }
  if (isTailCall) {
    // Check if it's really possible to do a tail call.
    isTailCall = IsEligibleForTailCallOptimization(
        Callee, CallConv, isVarArg, isStructRet,
        MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG,
        PreferIndirect);
    if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall())
      report_fatal_error("failed to perform tail call elimination on a call "
                         "site marked musttail");
    // We don't support GuaranteedTailCallOpt for ARM, only automatically
    // detected sibcalls.
    if (isTailCall)
      ++NumTailCalls;
  }

  // Analyze operands of the call, assigning locations to each operand.
  SmallVector<CCValAssign, 16> ArgLocs;
  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
                 *DAG.getContext());
  CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg));

  // Get a count of how many bytes are to be pushed on the stack.
  unsigned NumBytes = CCInfo.getNextStackOffset();

  if (isTailCall) {
    // For tail calls, memory operands are available in our caller's stack.
    NumBytes = 0;
  } else {
    // Adjust the stack pointer for the new arguments...
    // These operations are automatically eliminated by the prolog/epilog pass
    Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
  }

  SDValue StackPtr =
      DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout()));

  RegsToPassVector RegsToPass;
  SmallVector<SDValue, 8> MemOpChains;

  // Walk the register/memloc assignments, inserting copies/loads.  In the case
  // of tail call optimization, arguments are handled later.
  for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
       i != e;
       ++i, ++realArgIdx) {
    CCValAssign &VA = ArgLocs[i];
    SDValue Arg = OutVals[realArgIdx];
    ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
    bool isByVal = Flags.isByVal();

    // Promote the value if needed.
    switch (VA.getLocInfo()) {
    default: llvm_unreachable("Unknown loc info!");
    case CCValAssign::Full: break;
    case CCValAssign::SExt:
      Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
      break;
    case CCValAssign::ZExt:
      Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
      break;
    case CCValAssign::AExt:
      Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
      break;
    case CCValAssign::BCvt:
      Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
      break;
    }

    // f16 arguments have their size extended to 4 bytes and passed as if they
    // had been copied to the LSBs of a 32-bit register.
    // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI)
    if (VA.needsCustom() &&
        (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) {
      Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg);
    } else {
      // f16 arguments could have been extended prior to argument lowering.
      // Mask them arguments if this is a CMSE nonsecure call.
      auto ArgVT = Outs[realArgIdx].ArgVT;
      if (isCmseNSCall && (ArgVT == MVT::f16)) {
        auto LocBits = VA.getLocVT().getSizeInBits();
        auto MaskValue = APInt::getLowBitsSet(LocBits, ArgVT.getSizeInBits());
        SDValue Mask =
            DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits));
        Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg);
        Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask);
        Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
      }
    }

    // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
    if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) {
      SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
                                DAG.getConstant(0, dl, MVT::i32));
      SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
                                DAG.getConstant(1, dl, MVT::i32));

      PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, VA, ArgLocs[++i],
                       StackPtr, MemOpChains, Flags);

      VA = ArgLocs[++i]; // skip ahead to next loc
      if (VA.isRegLoc()) {
        PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, VA, ArgLocs[++i],
                         StackPtr, MemOpChains, Flags);
      } else {
        assert(VA.isMemLoc());

        MemOpChains.push_back(
            LowerMemOpCallTo(Chain, StackPtr, Op1, dl, DAG, VA, Flags));
      }
    } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) {
      PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
                       StackPtr, MemOpChains, Flags);
    } else if (VA.isRegLoc()) {
      if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
          Outs[0].VT == MVT::i32) {
        assert(VA.getLocVT() == MVT::i32 &&
               "unexpected calling convention register assignment");
        assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
               "unexpected use of 'returned'");
        isThisReturn = true;
      }
      const TargetOptions &Options = DAG.getTarget().Options;
      if (Options.EmitCallSiteInfo)
        CSInfo.emplace_back(VA.getLocReg(), i);
      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
    } else if (isByVal) {
      assert(VA.isMemLoc());
      unsigned offset = 0;

      // True if this byval aggregate will be split between registers
      // and memory.
      unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
      unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();

      if (CurByValIdx < ByValArgsCount) {

        unsigned RegBegin, RegEnd;
        CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);

        EVT PtrVT =
            DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
        unsigned int i, j;
        for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
          SDValue Const = DAG.getConstant(4*i, dl, MVT::i32);
          SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
          SDValue Load =
              DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo(),
                          DAG.InferPtrAlign(AddArg));
          MemOpChains.push_back(Load.getValue(1));
          RegsToPass.push_back(std::make_pair(j, Load));
        }

        // If parameter size outsides register area, "offset" value
        // helps us to calculate stack slot for remained part properly.
        offset = RegEnd - RegBegin;

        CCInfo.nextInRegsParam();
      }

      if (Flags.getByValSize() > 4*offset) {
        auto PtrVT = getPointerTy(DAG.getDataLayout());
        unsigned LocMemOffset = VA.getLocMemOffset();
        SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
        SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff);
        SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl);
        SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset);
        SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl,
                                           MVT::i32);
        SDValue AlignNode =
            DAG.getConstant(Flags.getNonZeroByValAlign().value(), dl, MVT::i32);

        SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
        SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
        MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
                                          Ops));
      }
    } else if (!isTailCall) {
      assert(VA.isMemLoc());

      MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
                                             dl, DAG, VA, Flags));
    }
  }

  if (!MemOpChains.empty())
    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);

  // Build a sequence of copy-to-reg nodes chained together with token chain
  // and flag operands which copy the outgoing args into the appropriate regs.
  SDValue InFlag;
  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
    Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
                             RegsToPass[i].second, InFlag);
    InFlag = Chain.getValue(1);
  }

  // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
  // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
  // node so that legalize doesn't hack it.
  bool isDirect = false;

  const TargetMachine &TM = getTargetMachine();
  const Module *Mod = MF.getFunction().getParent();
  const GlobalValue *GV = nullptr;
  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
    GV = G->getGlobal();
  bool isStub =
      !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO();

  bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
  bool isLocalARMFunc = false;
  auto PtrVt = getPointerTy(DAG.getDataLayout());

  if (Subtarget->genLongCalls()) {
    assert((!isPositionIndependent() || Subtarget->isTargetWindows()) &&
           "long-calls codegen is not position independent!");
    // Handle a global address or an external symbol. If it's not one of
    // those, the target's already in a register, so we don't need to do
    // anything extra.
    if (isa<GlobalAddressSDNode>(Callee)) {
      // Create a constant pool entry for the callee address
      unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
      ARMConstantPoolValue *CPV =
        ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);

      // Get the address of the callee into a register
      SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4));
      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
      Callee = DAG.getLoad(
          PtrVt, dl, DAG.getEntryNode(), CPAddr,
          MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
    } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
      const char *Sym = S->getSymbol();

      // Create a constant pool entry for the callee address
      unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
      ARMConstantPoolValue *CPV =
        ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
                                      ARMPCLabelIndex, 0);
      // Get the address of the callee into a register
      SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4));
      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
      Callee = DAG.getLoad(
          PtrVt, dl, DAG.getEntryNode(), CPAddr,
          MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
    }
  } else if (isa<GlobalAddressSDNode>(Callee)) {
    if (!PreferIndirect) {
      isDirect = true;
      bool isDef = GV->isStrongDefinitionForLinker();

      // ARM call to a local ARM function is predicable.
      isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking);
      // tBX takes a register source operand.
      if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
        assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
        Callee = DAG.getNode(
            ARMISD::WrapperPIC, dl, PtrVt,
            DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY));
        Callee = DAG.getLoad(
            PtrVt, dl, DAG.getEntryNode(), Callee,
            MachinePointerInfo::getGOT(DAG.getMachineFunction()), MaybeAlign(),
            MachineMemOperand::MODereferenceable |
                MachineMemOperand::MOInvariant);
      } else if (Subtarget->isTargetCOFF()) {
        assert(Subtarget->isTargetWindows() &&
               "Windows is the only supported COFF target");
        unsigned TargetFlags = ARMII::MO_NO_FLAG;
        if (GV->hasDLLImportStorageClass())
          TargetFlags = ARMII::MO_DLLIMPORT;
        else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
          TargetFlags = ARMII::MO_COFFSTUB;
        Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*offset=*/0,
                                            TargetFlags);
        if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB))
          Callee =
              DAG.getLoad(PtrVt, dl, DAG.getEntryNode(),
                          DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee),
                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
      } else {
        Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0);
      }
    }
  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
    isDirect = true;
    // tBX takes a register source operand.
    const char *Sym = S->getSymbol();
    if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
      unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
      ARMConstantPoolValue *CPV =
        ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
                                      ARMPCLabelIndex, 4);
      SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4));
      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
      Callee = DAG.getLoad(
          PtrVt, dl, DAG.getEntryNode(), CPAddr,
          MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
      Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel);
    } else {
      Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0);
    }
  }

  if (isCmseNSCall) {
    assert(!isARMFunc && !isDirect &&
           "Cannot handle call to ARM function or direct call");
    if (NumBytes > 0) {
      DiagnosticInfoUnsupported Diag(DAG.getMachineFunction().getFunction(),
                                     "call to non-secure function would "
                                     "require passing arguments on stack",
                                     dl.getDebugLoc());
      DAG.getContext()->diagnose(Diag);
    }
    if (isStructRet) {
      DiagnosticInfoUnsupported Diag(
          DAG.getMachineFunction().getFunction(),
          "call to non-secure function would return value through pointer",
          dl.getDebugLoc());
      DAG.getContext()->diagnose(Diag);
    }
  }

  // FIXME: handle tail calls differently.
  unsigned CallOpc;
  if (Subtarget->isThumb()) {
    if (isCmseNSCall)
      CallOpc = ARMISD::tSECALL;
    else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
      CallOpc = ARMISD::CALL_NOLINK;
    else
      CallOpc = ARMISD::CALL;
  } else {
    if (!isDirect && !Subtarget->hasV5TOps())
      CallOpc = ARMISD::CALL_NOLINK;
    else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
             // Emit regular call when code size is the priority
             !Subtarget->hasMinSize())
      // "mov lr, pc; b _foo" to avoid confusing the RSP
      CallOpc = ARMISD::CALL_NOLINK;
    else
      CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
  }

  std::vector<SDValue> Ops;
  Ops.push_back(Chain);
  Ops.push_back(Callee);

  // Add argument registers to the end of the list so that they are known live
  // into the call.
  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
                                  RegsToPass[i].second.getValueType()));

  // Add a register mask operand representing the call-preserved registers.
  if (!isTailCall) {
    const uint32_t *Mask;
    const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
    if (isThisReturn) {
      // For 'this' returns, use the R0-preserving mask if applicable
      Mask = ARI->getThisReturnPreservedMask(MF, CallConv);
      if (!Mask) {
        // Set isThisReturn to false if the calling convention is not one that
        // allows 'returned' to be modeled in this way, so LowerCallResult does
        // not try to pass 'this' straight through
        isThisReturn = false;
        Mask = ARI->getCallPreservedMask(MF, CallConv);
      }
    } else
      Mask = ARI->getCallPreservedMask(MF, CallConv);

    assert(Mask && "Missing call preserved mask for calling convention");
    Ops.push_back(DAG.getRegisterMask(Mask));
  }

  if (InFlag.getNode())
    Ops.push_back(InFlag);

  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
  if (isTailCall) {
    MF.getFrameInfo().setHasTailCall();
    SDValue Ret = DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
    DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
    return Ret;
  }

  // Returns a chain and a flag for retval copy to use.
  Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
  DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
  InFlag = Chain.getValue(1);
  DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));

  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
                             DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
  if (!Ins.empty())
    InFlag = Chain.getValue(1);

  // Handle result values, copying them out of physregs into vregs that we
  // return.
  return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
                         InVals, isThisReturn,
                         isThisReturn ? OutVals[0] : SDValue());
}

/// HandleByVal - Every parameter *after* a byval parameter is passed
/// on the stack.  Remember the next parameter register to allocate,
/// and then confiscate the rest of the parameter registers to insure
/// this.
void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
                                    Align Alignment) const {
  // Byval (as with any stack) slots are always at least 4 byte aligned.
  Alignment = std::max(Alignment, Align(4));

  unsigned Reg = State->AllocateReg(GPRArgRegs);
  if (!Reg)
    return;

  unsigned AlignInRegs = Alignment.value() / 4;
  unsigned Waste = (ARM::R4 - Reg) % AlignInRegs;
  for (unsigned i = 0; i < Waste; ++i)
    Reg = State->AllocateReg(GPRArgRegs);

  if (!Reg)
    return;

  unsigned Excess = 4 * (ARM::R4 - Reg);

  // Special case when NSAA != SP and parameter size greater than size of
  // all remained GPR regs. In that case we can't split parameter, we must
  // send it to stack. We also must set NCRN to R4, so waste all
  // remained registers.
  const unsigned NSAAOffset = State->getNextStackOffset();
  if (NSAAOffset != 0 && Size > Excess) {
    while (State->AllocateReg(GPRArgRegs))
      ;
    return;
  }

  // First register for byval parameter is the first register that wasn't
  // allocated before this method call, so it would be "reg".
  // If parameter is small enough to be saved in range [reg, r4), then
  // the end (first after last) register would be reg + param-size-in-regs,
  // else parameter would be splitted between registers and stack,
  // end register would be r4 in this case.
  unsigned ByValRegBegin = Reg;
  unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4);
  State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
  // Note, first register is allocated in the beginning of function already,
  // allocate remained amount of registers we need.
  for (unsigned i = Reg + 1; i != ByValRegEnd; ++i)
    State->AllocateReg(GPRArgRegs);
  // A byval parameter that is split between registers and memory needs its
  // size truncated here.
  // In the case where the entire structure fits in registers, we set the
  // size in memory to zero.
  Size = std::max<int>(Size - Excess, 0);
}

/// MatchingStackOffset - Return true if the given stack call argument is
/// already available in the same position (relatively) of the caller's
/// incoming argument stack.
static
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
                         MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
                         const TargetInstrInfo *TII) {
  unsigned Bytes = Arg.getValueSizeInBits() / 8;
  int FI = std::numeric_limits<int>::max();
  if (Arg.getOpcode() == ISD::CopyFromReg) {
    unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
    if (!Register::isVirtualRegister(VR))
      return false;
    MachineInstr *Def = MRI->getVRegDef(VR);
    if (!Def)
      return false;
    if (!Flags.isByVal()) {
      if (!TII->isLoadFromStackSlot(*Def, FI))
        return false;
    } else {
      return false;
    }
  } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
    if (Flags.isByVal())
      // ByVal argument is passed in as a pointer but it's now being
      // dereferenced. e.g.
      // define @foo(%struct.X* %A) {
      //   tail call @bar(%struct.X* byval %A)
      // }
      return false;
    SDValue Ptr = Ld->getBasePtr();
    FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
    if (!FINode)
      return false;
    FI = FINode->getIndex();
  } else
    return false;

  assert(FI != std::numeric_limits<int>::max());
  if (!MFI.isFixedObjectIndex(FI))
    return false;
  return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI);
}

/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
/// optimization should implement this function.
bool ARMTargetLowering::IsEligibleForTailCallOptimization(
    SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
    bool isCalleeStructRet, bool isCallerStructRet,
    const SmallVectorImpl<ISD::OutputArg> &Outs,
    const SmallVectorImpl<SDValue> &OutVals,
    const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG,
    const bool isIndirect) const {
  MachineFunction &MF = DAG.getMachineFunction();
  const Function &CallerF = MF.getFunction();
  CallingConv::ID CallerCC = CallerF.getCallingConv();

  assert(Subtarget->supportsTailCall());

  // Indirect tail calls cannot be optimized for Thumb1 if the args
  // to the call take up r0-r3. The reason is that there are no legal registers
  // left to hold the pointer to the function to be called.
  if (Subtarget->isThumb1Only() && Outs.size() >= 4 &&
      (!isa<GlobalAddressSDNode>(Callee.getNode()) || isIndirect))
    return false;

  // Look for obvious safe cases to perform tail call optimization that do not
  // require ABI changes. This is what gcc calls sibcall.

  // Exception-handling functions need a special set of instructions to indicate
  // a return to the hardware. Tail-calling another function would probably
  // break this.
  if (CallerF.hasFnAttribute("interrupt"))
    return false;

  // Also avoid sibcall optimization if either caller or callee uses struct
  // return semantics.
  if (isCalleeStructRet || isCallerStructRet)
    return false;

  // Externally-defined functions with weak linkage should not be
  // tail-called on ARM when the OS does not support dynamic
  // pre-emption of symbols, as the AAELF spec requires normal calls
  // to undefined weak functions to be replaced with a NOP or jump to the
  // next instruction. The behaviour of branch instructions in this
  // situation (as used for tail calls) is implementation-defined, so we
  // cannot rely on the linker replacing the tail call with a return.
  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
    const GlobalValue *GV = G->getGlobal();
    const Triple &TT = getTargetMachine().getTargetTriple();
    if (GV->hasExternalWeakLinkage() &&
        (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
      return false;
  }

  // Check that the call results are passed in the same way.
  LLVMContext &C = *DAG.getContext();
  if (!CCState::resultsCompatible(
          getEffectiveCallingConv(CalleeCC, isVarArg),
          getEffectiveCallingConv(CallerCC, CallerF.isVarArg()), MF, C, Ins,
          CCAssignFnForReturn(CalleeCC, isVarArg),
          CCAssignFnForReturn(CallerCC, CallerF.isVarArg())))
    return false;
  // The callee has to preserve all registers the caller needs to preserve.
  const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
  const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
  if (CalleeCC != CallerCC) {
    const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
    if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
      return false;
  }

  // If Caller's vararg or byval argument has been split between registers and
  // stack, do not perform tail call, since part of the argument is in caller's
  // local frame.
  const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>();
  if (AFI_Caller->getArgRegsSaveSize())
    return false;

  // If the callee takes no arguments then go on to check the results of the
  // call.
  if (!Outs.empty()) {
    // Check if stack adjustment is needed. For now, do not do this if any
    // argument is passed on the stack.
    SmallVector<CCValAssign, 16> ArgLocs;
    CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
    CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg));
    if (CCInfo.getNextStackOffset()) {
      // Check if the arguments are already laid out in the right way as
      // the caller's fixed stack objects.
      MachineFrameInfo &MFI = MF.getFrameInfo();
      const MachineRegisterInfo *MRI = &MF.getRegInfo();
      const TargetInstrInfo *TII = Subtarget->getInstrInfo();
      for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
           i != e;
           ++i, ++realArgIdx) {
        CCValAssign &VA = ArgLocs[i];
        EVT RegVT = VA.getLocVT();
        SDValue Arg = OutVals[realArgIdx];
        ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
        if (VA.getLocInfo() == CCValAssign::Indirect)
          return false;
        if (VA.needsCustom() && (RegVT == MVT::f64 || RegVT == MVT::v2f64)) {
          // f64 and vector types are split into multiple registers or
          // register/stack-slot combinations.  The types will not match
          // the registers; give up on memory f64 refs until we figure
          // out what to do about this.
          if (!VA.isRegLoc())
            return false;
          if (!ArgLocs[++i].isRegLoc())
            return false;
          if (RegVT == MVT::v2f64) {
            if (!ArgLocs[++i].isRegLoc())
              return false;
            if (!ArgLocs[++i].isRegLoc())
              return false;
          }
        } else if (!VA.isRegLoc()) {
          if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
                                   MFI, MRI, TII))
            return false;
        }
      }
    }

    const MachineRegisterInfo &MRI = MF.getRegInfo();
    if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
      return false;
  }

  return true;
}

bool
ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
                                  MachineFunction &MF, bool isVarArg,
                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
                                  LLVMContext &Context) const {
  SmallVector<CCValAssign, 16> RVLocs;
  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
  return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
}

static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
                                    const SDLoc &DL, SelectionDAG &DAG) {
  const MachineFunction &MF = DAG.getMachineFunction();
  const Function &F = MF.getFunction();

  StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString();

  // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
  // version of the "preferred return address". These offsets affect the return
  // instruction if this is a return from PL1 without hypervisor extensions.
  //    IRQ/FIQ: +4     "subs pc, lr, #4"
  //    SWI:     0      "subs pc, lr, #0"
  //    ABORT:   +4     "subs pc, lr, #4"
  //    UNDEF:   +4/+2  "subs pc, lr, #0"
  // UNDEF varies depending on where the exception came from ARM or Thumb
  // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.

  int64_t LROffset;
  if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
      IntKind == "ABORT")
    LROffset = 4;
  else if (IntKind == "SWI" || IntKind == "UNDEF")
    LROffset = 0;
  else
    report_fatal_error("Unsupported interrupt attribute. If present, value "
                       "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");

  RetOps.insert(RetOps.begin() + 1,
                DAG.getConstant(LROffset, DL, MVT::i32, false));

  return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
}

SDValue
ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
                               bool isVarArg,
                               const SmallVectorImpl<ISD::OutputArg> &Outs,
                               const SmallVectorImpl<SDValue> &OutVals,
                               const SDLoc &dl, SelectionDAG &DAG) const {
  // CCValAssign - represent the assignment of the return value to a location.
  SmallVector<CCValAssign, 16> RVLocs;

  // CCState - Info about the registers and stack slots.
  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
                 *DAG.getContext());

  // Analyze outgoing return values.
  CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));

  SDValue Flag;
  SmallVector<SDValue, 4> RetOps;
  RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
  bool isLittleEndian = Subtarget->isLittle();

  MachineFunction &MF = DAG.getMachineFunction();
  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
  AFI->setReturnRegsCount(RVLocs.size());

 // Report error if cmse entry function returns structure through first ptr arg.
  if (AFI->isCmseNSEntryFunction() && MF.getFunction().hasStructRetAttr()) {
    // Note: using an empty SDLoc(), as the first line of the function is a
    // better place to report than the last line.
    DiagnosticInfoUnsupported Diag(
        DAG.getMachineFunction().getFunction(),
        "secure entry function would return value through pointer",
        SDLoc().getDebugLoc());
    DAG.getContext()->diagnose(Diag);
  }

  // Copy the result values into the output registers.
  for (unsigned i = 0, realRVLocIdx = 0;
       i != RVLocs.size();
       ++i, ++realRVLocIdx) {
    CCValAssign &VA = RVLocs[i];
    assert(VA.isRegLoc() && "Can only return in registers!");

    SDValue Arg = OutVals[realRVLocIdx];
    bool ReturnF16 = false;

    if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) {
      // Half-precision return values can be returned like this:
      //
      // t11 f16 = fadd ...
      // t12: i16 = bitcast t11
      //   t13: i32 = zero_extend t12
      // t14: f32 = bitcast t13  <~~~~~~~ Arg
      //
      // to avoid code generation for bitcasts, we simply set Arg to the node
      // that produces the f16 value, t11 in this case.
      //
      if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) {
        SDValue ZE = Arg.getOperand(0);
        if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) {
          SDValue BC = ZE.getOperand(0);
          if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) {
            Arg = BC.getOperand(0);
            ReturnF16 = true;
          }
        }
      }
    }

    switch (VA.getLocInfo()) {
    default: llvm_unreachable("Unknown loc info!");
    case CCValAssign::Full: break;
    case CCValAssign::BCvt:
      if (!ReturnF16)
        Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
      break;
    }

    // Mask f16 arguments if this is a CMSE nonsecure entry.
    auto RetVT = Outs[realRVLocIdx].ArgVT;
    if (AFI->isCmseNSEntryFunction() && (RetVT == MVT::f16)) {
      if (VA.needsCustom() && VA.getValVT() == MVT::f16) {
        Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg);
      } else {
        auto LocBits = VA.getLocVT().getSizeInBits();
        auto MaskValue = APInt::getLowBitsSet(LocBits, RetVT.getSizeInBits());
        SDValue Mask =
            DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits));
        Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg);
        Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask);
        Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
      }
    }

    if (VA.needsCustom() &&
        (VA.getLocVT() == MVT::v2f64 || VA.getLocVT() == MVT::f64)) {
      if (VA.getLocVT() == MVT::v2f64) {
        // Extract the first half and return it in two registers.
        SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
                                   DAG.getConstant(0, dl, MVT::i32));
        SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
                                       DAG.getVTList(MVT::i32, MVT::i32), Half);

        Chain =
            DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
                             HalfGPRs.getValue(isLittleEndian ? 0 : 1), Flag);
        Flag = Chain.getValue(1);
        RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
        VA = RVLocs[++i]; // skip ahead to next loc
        Chain =
            DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
                             HalfGPRs.getValue(isLittleEndian ? 1 : 0), Flag);
        Flag = Chain.getValue(1);
        RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
        VA = RVLocs[++i]; // skip ahead to next loc

        // Extract the 2nd half and fall through to handle it as an f64 value.
        Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
                          DAG.getConstant(1, dl, MVT::i32));
      }
      // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
      // available.
      SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
                                  DAG.getVTList(MVT::i32, MVT::i32), Arg);
      Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
                               fmrrd.getValue(isLittleEndian ? 0 : 1), Flag);
      Flag = Chain.getValue(1);
      RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
      VA = RVLocs[++i]; // skip ahead to next loc
      Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
                               fmrrd.getValue(isLittleEndian ? 1 : 0), Flag);
    } else
      Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);

    // Guarantee that all emitted copies are
    // stuck together, avoiding something bad.
    Flag = Chain.getValue(1);
    RetOps.push_back(DAG.getRegister(
        VA.getLocReg(), ReturnF16 ? Arg.getValueType() : VA.getLocVT()));
  }
  const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
  const MCPhysReg *I =
      TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
  if (I) {
    for (; *I; ++I) {
      if (ARM::GPRRegClass.contains(*I))
        RetOps.push_back(DAG.getRegister(*I, MVT::i32));
      else if (ARM::DPRRegClass.contains(*I))
        RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
      else
        llvm_unreachable("Unexpected register class in CSRsViaCopy!");
    }
  }

  // Update chain and glue.
  RetOps[0] = Chain;
  if (Flag.getNode())
    RetOps.push_back(Flag);

  // CPUs which aren't M-class use a special sequence to return from
  // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
  // though we use "subs pc, lr, #N").
  //
  // M-class CPUs actually use a normal return sequence with a special
  // (hardware-provided) value in LR, so the normal code path works.
  if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") &&
      !Subtarget->isMClass()) {
    if (Subtarget->isThumb1Only())
      report_fatal_error("interrupt attribute is not supported in Thumb1");
    return LowerInterruptReturn(RetOps, dl, DAG);
  }

  ARMISD::NodeType RetNode = AFI->isCmseNSEntryFunction() ? ARMISD::SERET_FLAG :
                                                            ARMISD::RET_FLAG;
  return DAG.getNode(RetNode, dl, MVT::Other, RetOps);
}

bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
  if (N->getNumValues() != 1)
    return false;
  if (!N->hasNUsesOfValue(1, 0))
    return false;

  SDValue TCChain = Chain;
  SDNode *Copy = *N->use_begin();
  if (Copy->getOpcode() == ISD::CopyToReg) {
    // If the copy has a glue operand, we conservatively assume it isn't safe to
    // perform a tail call.
    if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
      return false;
    TCChain = Copy->getOperand(0);
  } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
    SDNode *VMov = Copy;
    // f64 returned in a pair of GPRs.
    SmallPtrSet<SDNode*, 2> Copies;
    for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
         UI != UE; ++UI) {
      if (UI->getOpcode() != ISD::CopyToReg)
        return false;
      Copies.insert(*UI);
    }
    if (Copies.size() > 2)
      return false;

    for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
         UI != UE; ++UI) {
      SDValue UseChain = UI->getOperand(0);
      if (Copies.count(UseChain.getNode()))
        // Second CopyToReg
        Copy = *UI;
      else {
        // We are at the top of this chain.
        // If the copy has a glue operand, we conservatively assume it
        // isn't safe to perform a tail call.
        if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
          return false;
        // First CopyToReg
        TCChain = UseChain;
      }
    }
  } else if (Copy->getOpcode() == ISD::BITCAST) {
    // f32 returned in a single GPR.
    if (!Copy->hasOneUse())
      return false;
    Copy = *Copy->use_begin();
    if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
      return false;
    // If the copy has a glue operand, we conservatively assume it isn't safe to
    // perform a tail call.
    if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
      return false;
    TCChain = Copy->getOperand(0);
  } else {
    return false;
  }

  bool HasRet = false;
  for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
       UI != UE; ++UI) {
    if (UI->getOpcode() != ARMISD::RET_FLAG &&
        UI->getOpcode() != ARMISD::INTRET_FLAG)
      return false;
    HasRet = true;
  }

  if (!HasRet)
    return false;

  Chain = TCChain;
  return true;
}

bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
  if (!Subtarget->supportsTailCall())
    return false;

  if (!CI->isTailCall())
    return false;

  return true;
}

// Trying to write a 64 bit value so need to split into two 32 bit values first,
// and pass the lower and high parts through.
static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) {
  SDLoc DL(Op);
  SDValue WriteValue = Op->getOperand(2);

  // This function is only supposed to be called for i64 type argument.
  assert(WriteValue.getValueType() == MVT::i64
          && "LowerWRITE_REGISTER called for non-i64 type argument.");

  SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
                           DAG.getConstant(0, DL, MVT::i32));
  SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
                           DAG.getConstant(1, DL, MVT::i32));
  SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi };
  return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops);
}

// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
// one of the above mentioned nodes. It has to be wrapped because otherwise
// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
// be used to form addressing mode. These wrapped nodes will be selected
// into MOVi.
SDValue ARMTargetLowering::LowerConstantPool(SDValue Op,
                                             SelectionDAG &DAG) const {
  EVT PtrVT = Op.getValueType();
  // FIXME there is no actual debug info here
  SDLoc dl(Op);
  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
  SDValue Res;

  // When generating execute-only code Constant Pools must be promoted to the
  // global data section. It's a bit ugly that we can't share them across basic
  // blocks, but this way we guarantee that execute-only behaves correct with
  // position-independent addressing modes.
  if (Subtarget->genExecuteOnly()) {
    auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
    auto T = const_cast<Type*>(CP->getType());
    auto C = const_cast<Constant*>(CP->getConstVal());
    auto M = const_cast<Module*>(DAG.getMachineFunction().
                                 getFunction().getParent());
    auto GV = new GlobalVariable(
                    *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C,
                    Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" +
                    Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" +
                    Twine(AFI->createPICLabelUId())
                  );
    SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV),
                                            dl, PtrVT);
    return LowerGlobalAddress(GA, DAG);
  }

  if (CP->isMachineConstantPoolEntry())
    Res =
        DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, CP->getAlign());
  else
    Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign());
  return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
}

unsigned ARMTargetLowering::getJumpTableEncoding() const {
  return MachineJumpTableInfo::EK_Inline;
}

SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
                                             SelectionDAG &DAG) const {
  MachineFunction &MF = DAG.getMachineFunction();
  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
  unsigned ARMPCLabelIndex = 0;
  SDLoc DL(Op);
  EVT PtrVT = getPointerTy(DAG.getDataLayout());
  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
  SDValue CPAddr;
  bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI();
  if (!IsPositionIndependent) {
    CPAddr = DAG.getTargetConstantPool(BA, PtrVT, Align(4));
  } else {
    unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
    ARMPCLabelIndex = AFI->createPICLabelUId();
    ARMConstantPoolValue *CPV =
      ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
                                      ARMCP::CPBlockAddress, PCAdj);
    CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
  }
  CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
  SDValue Result = DAG.getLoad(
      PtrVT, DL, DAG.getEntryNode(), CPAddr,
      MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
  if (!IsPositionIndependent)
    return Result;
  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32);
  return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
}

/// Convert a TLS address reference into the correct sequence of loads
/// and calls to compute the variable's address for Darwin, and return an
/// SDValue containing the final node.

/// Darwin only has one TLS scheme which must be capable of dealing with the
/// fully general situation, in the worst case. This means:
///     + "extern __thread" declaration.
///     + Defined in a possibly unknown dynamic library.
///
/// The general system is that each __thread variable has a [3 x i32] descriptor
/// which contains information used by the runtime to calculate the address. The
/// only part of this the compiler needs to know about is the first word, which
/// contains a function pointer that must be called with the address of the
/// entire descriptor in "r0".
///
/// Since this descriptor may be in a different unit, in general access must
/// proceed along the usual ARM rules. A common sequence to produce is:
///
///     movw rT1, :lower16:_var$non_lazy_ptr
///     movt rT1, :upper16:_var$non_lazy_ptr
///     ldr r0, [rT1]
///     ldr rT2, [r0]
///     blx rT2
///     [...address now in r0...]
SDValue
ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
                                               SelectionDAG &DAG) const {
  assert(Subtarget->isTargetDarwin() &&
         "This function expects a Darwin target");
  SDLoc DL(Op);

  // First step is to get the address of the actua global symbol. This is where
  // the TLS descriptor lives.
  SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);

  // The first entry in the descriptor is a function pointer that we must call
  // to obtain the address of the variable.
  SDValue Chain = DAG.getEntryNode();
  SDValue FuncTLVGet = DAG.getLoad(
      MVT::i32, DL, Chain, DescAddr,
      MachinePointerInfo::getGOT(DAG.getMachineFunction()), Align(4),
      MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable |
          MachineMemOperand::MOInvariant);
  Chain = FuncTLVGet.getValue(1);

  MachineFunction &F = DAG.getMachineFunction();
  MachineFrameInfo &MFI = F.getFrameInfo();
  MFI.setAdjustsStack(true);

  // TLS calls preserve all registers except those that absolutely must be
  // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be
  // silly).
  auto TRI =
      getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo();
  auto ARI = static_cast<const ARMRegisterInfo *>(TRI);
  const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction());

  // Finally, we can make the call. This is just a degenerate version of a
  // normal AArch64 call node: r0 takes the address of the descriptor, and
  // returns the address of the variable in this thread.
  Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue());
  Chain =
      DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
                  Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32),
                  DAG.getRegisterMask(Mask), Chain.getValue(1));
  return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1));
}

SDValue
ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op,
                                                SelectionDAG &DAG) const {
  assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");

  SDValue Chain = DAG.getEntryNode();
  EVT PtrVT = getPointerTy(DAG.getDataLayout());
  SDLoc DL(Op);

  // Load the current TEB (thread environment block)
  SDValue Ops[] = {Chain,
                   DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32),
                   DAG.getTargetConstant(15, DL, MVT::i32),
                   DAG.getTargetConstant(0, DL, MVT::i32),
                   DAG.getTargetConstant(13, DL, MVT::i32),
                   DAG.getTargetConstant(0, DL, MVT::i32),
                   DAG.getTargetConstant(2, DL, MVT::i32)};
  SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
                                   DAG.getVTList(MVT::i32, MVT::Other), Ops);

  SDValue TEB = CurrentTEB.getValue(0);
  Chain = CurrentTEB.getValue(1);

  // Load the ThreadLocalStoragePointer from the TEB
  // A pointer to the TLS array is located at offset 0x2c from the TEB.
  SDValue TLSArray =
      DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL));
  TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());

  // The pointer to the thread's TLS data area is at the TLS Index scaled by 4
  // offset into the TLSArray.

  // Load the TLS index from the C runtime
  SDValue TLSIndex =
      DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG);
  TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex);
  TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo());

  SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
                              DAG.getConstant(2, DL, MVT::i32));
  SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
                            DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
                            MachinePointerInfo());

  // Get the offset of the start of the .tls section (section base)
  const auto *GA = cast<GlobalAddressSDNode>(Op);
  auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL);
  SDValue Offset = DAG.getLoad(
      PtrVT, DL, Chain,
      DAG.getNode(ARMISD::Wrapper, DL, MVT::i32,
                  DAG.getTargetConstantPool(CPV, PtrVT, Align(4))),
      MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));

  return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset);
}

// Lower ISD::GlobalTLSAddress using the "general dynamic" model
SDValue
ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
                                                 SelectionDAG &DAG) const {
  SDLoc dl(GA);
  EVT PtrVT = getPointerTy(DAG.getDataLayout());
  unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
  MachineFunction &MF = DAG.getMachineFunction();
  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
  unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
  ARMConstantPoolValue *CPV =
    ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
                                    ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
  SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
  Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
  Argument = DAG.getLoad(
      PtrVT, dl, DAG.getEntryNode(), Argument,
      MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
  SDValue Chain = Argument.getValue(1);

  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
  Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);

  // call __tls_get_addr.
  ArgListTy Args;
  ArgListEntry Entry;
  Entry.Node = Argument;
  Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
  Args.push_back(Entry);

  // FIXME: is there useful debug info available here?
  TargetLowering::CallLoweringInfo CLI(DAG);
  CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
      CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
      DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args));

  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
  return CallResult.first;
}

// Lower ISD::GlobalTLSAddress using the "initial exec" or
// "local exec" model.
SDValue
ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
                                        SelectionDAG &DAG,
                                        TLSModel::Model model) const {
  const GlobalValue *GV = GA->getGlobal();
  SDLoc dl(GA);
  SDValue Offset;
  SDValue Chain = DAG.getEntryNode();
  EVT PtrVT = getPointerTy(DAG.getDataLayout());
  // Get the Thread Pointer
  SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);

  if (model == TLSModel::InitialExec) {
    MachineFunction &MF = DAG.getMachineFunction();
    ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
    unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
    // Initial exec model.
    unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
    ARMConstantPoolValue *CPV =
      ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
                                      ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
                                      true);
    Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
    Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
    Offset = DAG.getLoad(
        PtrVT, dl, Chain, Offset,
        MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
    Chain = Offset.getValue(1);

    SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
    Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);

    Offset = DAG.getLoad(
        PtrVT, dl, Chain, Offset,
        MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
  } else {
    // local exec model
    assert(model == TLSModel::LocalExec);
    ARMConstantPoolValue *CPV =
      ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
    Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
    Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
    Offset = DAG.getLoad(
        PtrVT, dl, Chain, Offset,
        MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
  }

  // The address of the thread local variable is the add of the thread
  // pointer with the offset of the variable.
  return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
}

SDValue
ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
  if (DAG.getTarget().useEmulatedTLS())
    return LowerToTLSEmulatedModel(GA, DAG);

  if (Subtarget->isTargetDarwin())
    return LowerGlobalTLSAddressDarwin(Op, DAG);

  if (Subtarget->isTargetWindows())
    return LowerGlobalTLSAddressWindows(Op, DAG);

  // TODO: implement the "local dynamic" model
  assert(Subtarget->isTargetELF() && "Only ELF implemented here");
  TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());

  switch (model) {
    case TLSModel::GeneralDynamic:
    case TLSModel::LocalDynamic:
      return LowerToTLSGeneralDynamicModel(GA, DAG);
    case TLSModel::InitialExec:
    case TLSModel::LocalExec:
      return LowerToTLSExecModels(GA, DAG, model);
  }
  llvm_unreachable("bogus TLS model");
}

/// Return true if all users of V are within function F, looking through
/// ConstantExprs.
static bool allUsersAreInFunction(const Value *V, const Function *F) {
  SmallVector<const User*,4> Worklist;
  for (auto *U : V->users())
    Worklist.push_back(U);
  while (!Worklist.empty()) {
    auto *U = Worklist.pop_back_val();
    if (isa<ConstantExpr>(U)) {
      for (auto *UU : U->users())
        Worklist.push_back(UU);
      continue;
    }

    auto *I = dyn_cast<Instruction>(U);
    if (!I || I->getParent()->getParent() != F)
      return false;
  }
  return true;
}

static SDValue promoteToConstantPool(const ARMTargetLowering *TLI,
                                     const GlobalValue *GV, SelectionDAG &DAG,
                                     EVT PtrVT, const SDLoc &dl) {
  // If we're creating a pool entry for a constant global with unnamed address,
  // and the global is small enough, we can emit it inline into the constant pool
  // to save ourselves an indirection.
  //
  // This is a win if the constant is only used in one function (so it doesn't
  // need to be duplicated) or duplicating the constant wouldn't increase code
  // size (implying the constant is no larger than 4 bytes).
  const Function &F = DAG.getMachineFunction().getFunction();

  // We rely on this decision to inline being idemopotent and unrelated to the
  // use-site. We know that if we inline a variable at one use site, we'll
  // inline it elsewhere too (and reuse the constant pool entry). Fast-isel
  // doesn't know about this optimization, so bail out if it's enabled else
  // we could decide to inline here (and thus never emit the GV) but require
  // the GV from fast-isel generated code.
  if (!EnableConstpoolPromotion ||
      DAG.getMachineFunction().getTarget().Options.EnableFastISel)
      return SDValue();

  auto *GVar = dyn_cast<GlobalVariable>(GV);
  if (!GVar || !GVar->hasInitializer() ||
      !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
      !GVar->hasLocalLinkage())
    return SDValue();

  // If we inline a value that contains relocations, we move the relocations
  // from .data to .text. This is not allowed in position-independent code.
  auto *Init = GVar->getInitializer();
  if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) &&
      Init->needsRelocation())
    return SDValue();

  // The constant islands pass can only really deal with alignment requests
  // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote
  // any type wanting greater alignment requirements than 4 bytes. We also
  // can only promote constants that are multiples of 4 bytes in size or
  // are paddable to a multiple of 4. Currently we only try and pad constants
  // that are strings for simplicity.
  auto *CDAInit = dyn_cast<ConstantDataArray>(Init);
  unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType());
  Align PrefAlign = DAG.getDataLayout().getPreferredAlign(GVar);
  unsigned RequiredPadding = 4 - (Size % 4);
  bool PaddingPossible =
    RequiredPadding == 4 || (CDAInit && CDAInit->isString());
  if (!PaddingPossible || PrefAlign > 4 || Size > ConstpoolPromotionMaxSize ||
      Size == 0)
    return SDValue();

  unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
  MachineFunction &MF = DAG.getMachineFunction();
  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();

  // We can't bloat the constant pool too much, else the ConstantIslands pass
  // may fail to converge. If we haven't promoted this global yet (it may have
  // multiple uses), and promoting it would increase the constant pool size (Sz
  // > 4), ensure we have space to do so up to MaxTotal.
  if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4)
    if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >=
        ConstpoolPromotionMaxTotal)
      return SDValue();

  // This is only valid if all users are in a single function; we can't clone
  // the constant in general. The LLVM IR unnamed_addr allows merging
  // constants, but not cloning them.
  //
  // We could potentially allow cloning if we could prove all uses of the
  // constant in the current function don't care about the address, like
  // printf format strings. But that isn't implemented for now.
  if (!allUsersAreInFunction(GVar, &F))
    return SDValue();

  // We're going to inline this global. Pad it out if needed.
  if (RequiredPadding != 4) {
    StringRef S = CDAInit->getAsString();

    SmallVector<uint8_t,16> V(S.size());
    std::copy(S.bytes_begin(), S.bytes_end(), V.begin());
    while (RequiredPadding--)
      V.push_back(0);
    Init = ConstantDataArray::get(*DAG.getContext(), V);
  }

  auto CPVal = ARMConstantPoolConstant::Create(GVar, Init);
  SDValue CPAddr = DAG.getTargetConstantPool(CPVal, PtrVT, Align(4));
  if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) {
    AFI->markGlobalAsPromotedToConstantPool(GVar);
    AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() +
                                      PaddedSize - 4);
  }
  ++NumConstpoolPromoted;
  return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
}

bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const {
  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
    if (!(GV = GA->getBaseObject()))
      return false;
  if (const auto *V = dyn_cast<GlobalVariable>(GV))
    return V->isConstant();
  return isa<Function>(GV);
}

SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op,
                                              SelectionDAG &DAG) const {
  switch (Subtarget->getTargetTriple().getObjectFormat()) {
  default: llvm_unreachable("unknown object format");
  case Triple::COFF:
    return LowerGlobalAddressWindows(Op, DAG);
  case Triple::ELF:
    return LowerGlobalAddressELF(Op, DAG);
  case Triple::MachO:
    return LowerGlobalAddressDarwin(Op, DAG);
  }
}

SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
                                                 SelectionDAG &DAG) const {
  EVT PtrVT = getPointerTy(DAG.getDataLayout());
  SDLoc dl(Op);
  const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
  const TargetMachine &TM = getTargetMachine();
  bool IsRO = isReadOnly(GV);

  // promoteToConstantPool only if not generating XO text section
  if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly())
    if (SDValue V = promoteToConstantPool(this, GV, DAG, PtrVT, dl))
      return V;

  if (isPositionIndependent()) {
    bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
    SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
                                           UseGOT_PREL ? ARMII::MO_GOT : 0);
    SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
    if (UseGOT_PREL)
      Result =
          DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
                      MachinePointerInfo::getGOT(DAG.getMachineFunction()));
    return Result;
  } else if (Subtarget->isROPI() && IsRO) {
    // PC-relative.
    SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
    SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
    return Result;
  } else if (Subtarget->isRWPI() && !IsRO) {
    // SB-relative.
    SDValue RelAddr;
    if (Subtarget->useMovt()) {
      ++NumMovwMovt;
      SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL);
      RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G);
    } else { // use literal pool for address constant
      ARMConstantPoolValue *CPV =
        ARMConstantPoolConstant::Create(GV, ARMCP::SBREL);
      SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
      RelAddr = DAG.getLoad(
          PtrVT, dl, DAG.getEntryNode(), CPAddr,
          MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
    }
    SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT);
    SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr);
    return Result;
  }

  // If we have T2 ops, we can materialize the address directly via movt/movw
  // pair. This is always cheaper.
  if (Subtarget->useMovt()) {
    ++NumMovwMovt;
    // FIXME: Once remat is capable of dealing with instructions with register
    // operands, expand this into two nodes.
    return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
                       DAG.getTargetGlobalAddress(GV, dl, PtrVT));
  } else {
    SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, Align(4));
    CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
    return DAG.getLoad(
        PtrVT, dl, DAG.getEntryNode(), CPAddr,
        MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
  }
}

SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
                                                    SelectionDAG &DAG) const {
  assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
         "ROPI/RWPI not currently supported for Darwin");
  EVT PtrVT = getPointerTy(DAG.getDataLayout());
  SDLoc dl(Op);
  const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();

  if (Subtarget->useMovt())
    ++NumMovwMovt;

  // FIXME: Once remat is capable of dealing with instructions with register
  // operands, expand this into multiple nodes
  unsigned Wrapper =
      isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper;

  SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
  SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);

  if (Subtarget->isGVIndirectSymbol(GV))
    Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
                         MachinePointerInfo::getGOT(DAG.getMachineFunction()));
  return Result;
}

SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
                                                     SelectionDAG &DAG) const {
  assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
  assert(Subtarget->useMovt() &&
         "Windows on ARM expects to use movw/movt");
  assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
         "ROPI/RWPI not currently supported for Windows");

  const TargetMachine &TM = getTargetMachine();
  const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
  ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG;
  if (GV->hasDLLImportStorageClass())
    TargetFlags = ARMII::MO_DLLIMPORT;
  else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
    TargetFlags = ARMII::MO_COFFSTUB;
  EVT PtrVT = getPointerTy(DAG.getDataLayout());
  SDValue Result;
  SDLoc DL(Op);

  ++NumMovwMovt;

  // FIXME: Once remat is capable of dealing with instructions with register
  // operands, expand this into two nodes.
  Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
                       DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*offset=*/0,
                                                  TargetFlags));
  if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB))
    Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
                         MachinePointerInfo::getGOT(DAG.getMachineFunction()));
  return Result;
}

SDValue
ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
  SDLoc dl(Op);
  SDValue Val = DAG.getConstant(0, dl, MVT::i32);
  return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
                     DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
                     Op.getOperand(1), Val);
}

SDValue
ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
  SDLoc dl(Op);
  return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
                     Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
}

SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
                                                      SelectionDAG &DAG) const {
  SDLoc dl(Op);
  return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other,
                     Op.getOperand(0));
}

SDValue ARMTargetLowering::LowerINTRINSIC_VOID(
    SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const {
  unsigned IntNo =
      cast<ConstantSDNode>(
          Op.getOperand(Op.getOperand(0).getValueType() == MVT::Other))
          ->getZExtValue();
  switch (IntNo) {
    default:
      return SDValue();  // Don't custom lower most intrinsics.
    case Intrinsic::arm_gnu_eabi_mcount: {
      MachineFunction &MF = DAG.getMachineFunction();
      EVT PtrVT = getPointerTy(DAG.getDataLayout());
      SDLoc dl(Op);
      SDValue Chain = Op.getOperand(0);
      // call "\01__gnu_mcount_nc"
      const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
      const uint32_t *Mask =
          ARI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
      assert(Mask && "Missing call preserved mask for calling convention");
      // Mark LR an implicit live-in.
      unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
      SDValue ReturnAddress =
          DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, PtrVT);
      constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue};
      SDValue Callee =
          DAG.getTargetExternalSymbol("\01__gnu_mcount_nc", PtrVT, 0);
      SDValue RegisterMask = DAG.getRegisterMask(Mask);
      if (Subtarget->isThumb())
        return SDValue(
            DAG.getMachineNode(
                ARM::tBL_PUSHLR, dl, ResultTys,
                {ReturnAddress, DAG.getTargetConstant(ARMCC::AL, dl, PtrVT),
                 DAG.getRegister(0, PtrVT), Callee, RegisterMask, Chain}),
            0);
      return SDValue(
          DAG.getMachineNode(ARM::BL_PUSHLR, dl, ResultTys,
                             {ReturnAddress, Callee, RegisterMask, Chain}),
          0);
    }
  }
}

SDValue
ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
                                          const ARMSubtarget *Subtarget) const {
  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
  SDLoc dl(Op);
  switch (IntNo) {
  default: return SDValue();    // Don't custom lower most intrinsics.
  case Intrinsic::thread_pointer: {
    EVT PtrVT = getPointerTy(DAG.getDataLayout());
    return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
  }
  case Intrinsic::arm_cls: {
    const SDValue &Operand = Op.getOperand(1);
    const EVT VTy = Op.getValueType();
    SDValue SRA =
        DAG.getNode(ISD::SRA, dl, VTy, Operand, DAG.getConstant(31, dl, VTy));
    SDValue XOR = DAG.getNode(ISD::XOR, dl, VTy, SRA, Operand);
    SDValue SHL =
        DAG.getNode(ISD::SHL, dl, VTy, XOR, DAG.getConstant(1, dl, VTy));
    SDValue OR =
        DAG.getNode(ISD::OR, dl, VTy, SHL, DAG.getConstant(1, dl, VTy));
    SDValue Result = DAG.getNode(ISD::CTLZ, dl, VTy, OR);
    return Result;
  }
  case Intrinsic::arm_cls64: {
    // cls(x) = if cls(hi(x)) != 31 then cls(hi(x))
    //          else 31 + clz(if hi(x) == 0 then lo(x) else not(lo(x)))
    const SDValue &Operand = Op.getOperand(1);
    const EVT VTy = Op.getValueType();

    SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand,
                             DAG.getConstant(1, dl, VTy));
    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand,
                             DAG.getConstant(0, dl, VTy));
    SDValue Constant0 = DAG.getConstant(0, dl, VTy);
    SDValue Constant1 = DAG.getConstant(1, dl, VTy);
    SDValue Constant31 = DAG.getConstant(31, dl, VTy);
    SDValue SRAHi = DAG.getNode(ISD::SRA, dl, VTy, Hi, Constant31);
    SDValue XORHi = DAG.getNode(ISD::XOR, dl, VTy, SRAHi, Hi);
    SDValue SHLHi = DAG.getNode(ISD::SHL, dl, VTy, XORHi, Constant1);
    SDValue ORHi = DAG.getNode(ISD::OR, dl, VTy, SHLHi, Constant1);
    SDValue CLSHi = DAG.getNode(ISD::CTLZ, dl, VTy, ORHi);
    SDValue CheckLo =
        DAG.getSetCC(dl, MVT::i1, CLSHi, Constant31, ISD::CondCode::SETEQ);
    SDValue HiIsZero =
        DAG.getSetCC(dl, MVT::i1, Hi, Constant0, ISD::CondCode::SETEQ);
    SDValue AdjustedLo =
        DAG.getSelect(dl, VTy, HiIsZero, Lo, DAG.getNOT(dl, Lo, VTy));
    SDValue CLZAdjustedLo = DAG.getNode(ISD::CTLZ, dl, VTy, AdjustedLo);
    SDValue Result =
        DAG.getSelect(dl, VTy, CheckLo,
                      DAG.getNode(ISD::ADD, dl, VTy, CLZAdjustedLo, Constant31), CLSHi);
    return Result;
  }
  case Intrinsic::eh_sjlj_lsda: {
    MachineFunction &MF = DAG.getMachineFunction();
    ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
    unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
    EVT PtrVT = getPointerTy(DAG.getDataLayout());
    SDValue CPAddr;
    bool IsPositionIndependent = isPositionIndependent();
    unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
    ARMConstantPoolValue *CPV =
      ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex,
                                      ARMCP::CPLSDA, PCAdj);
    CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
    CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
    SDValue Result = DAG.getLoad(
        PtrVT, dl, DAG.getEntryNode(), CPAddr,
        MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));

    if (IsPositionIndependent) {
      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
      Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
    }
    return Result;
  }
  case Intrinsic::arm_neon_vabs:
    return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(),
                        Op.getOperand(1));
  case Intrinsic::arm_neon_vmulls:
  case Intrinsic::arm_neon_vmullu: {
    unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
      ? ARMISD::VMULLs : ARMISD::VMULLu;
    return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1), Op.getOperand(2));
  }
  case Intrinsic::arm_neon_vminnm:
  case Intrinsic::arm_neon_vmaxnm: {
    unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm)
      ? ISD::FMINNUM : ISD::FMAXNUM;
    return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1), Op.getOperand(2));
  }
  case Intrinsic::arm_neon_vminu:
  case Intrinsic::arm_neon_vmaxu: {
    if (Op.getValueType().isFloatingPoint())
      return SDValue();
    unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
      ? ISD::UMIN : ISD::UMAX;
    return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
                         Op.getOperand(1), Op.getOperand(2));
  }
  case Intrinsic::arm_neon_vmins:
  case Intrinsic::arm_neon_vmaxs: {
    // v{min,max}s is overloaded between signed integers and floats.
    if (!Op.getValueType().isFloatingPoint()) {
      unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
        ? ISD::SMIN : ISD::SMAX;
      return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
                         Op.getOperand(1), Op.getOperand(2));
    }
    unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
      ? ISD::FMINIMUM : ISD::FMAXIMUM;
    return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1), Op.getOperand(2));
  }
  case Intrinsic::arm_neon_vtbl1:
    return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1), Op.getOperand(2));
  case Intrinsic::arm_neon_vtbl2:
    return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
  case Intrinsic::arm_mve_pred_i2v:
  case Intrinsic::arm_mve_pred_v2i:
    return DAG.getNode(ARMISD::PREDICATE_CAST, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1));
  case Intrinsic::arm_mve_vreinterpretq:
    return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1));
  case Intrinsic::arm_mve_lsll:
    return DAG.getNode(ARMISD::LSLL, SDLoc(Op), Op->getVTList(),
                       Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
  case Intrinsic::arm_mve_asrl:
    return DAG.getNode(ARMISD::ASRL, SDLoc(Op), Op->getVTList(),
                       Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
  }
}

static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
                                 const ARMSubtarget *Subtarget) {
  SDLoc dl(Op);
  ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2));
  auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue());
  if (SSID == SyncScope::SingleThread)
    return Op;

  if (!Subtarget->hasDataBarrier()) {
    // Some ARMv6 cpus can support data barriers with an mcr instruction.
    // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
    // here.
    assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
           "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
    return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
                       DAG.getConstant(0, dl, MVT::i32));
  }

  ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
  AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
  ARM_MB::MemBOpt Domain = ARM_MB::ISH;
  if (Subtarget->isMClass()) {
    // Only a full system barrier exists in the M-class architectures.
    Domain = ARM_MB::SY;
  } else if (Subtarget->preferISHSTBarriers() &&
             Ord == AtomicOrdering::Release) {
    // Swift happens to implement ISHST barriers in a way that's compatible with
    // Release semantics but weaker than ISH so we'd be fools not to use
    // it. Beware: other processors probably don't!
    Domain = ARM_MB::ISHST;
  }

  return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
                     DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32),
                     DAG.getConstant(Domain, dl, MVT::i32));
}

static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
                             const ARMSubtarget *Subtarget) {
  // ARM pre v5TE and Thumb1 does not have preload instructions.
  if (!(Subtarget->isThumb2() ||
        (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
    // Just preserve the chain.
    return Op.getOperand(0);

  SDLoc dl(Op);
  unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
  if (!isRead &&
      (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
    // ARMv7 with MP extension has PLDW.
    return Op.getOperand(0);

  unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
  if (Subtarget->isThumb()) {
    // Invert the bits.
    isRead = ~isRead & 1;
    isData = ~isData & 1;
  }

  return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
                     Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32),
                     DAG.getConstant(isData, dl, MVT::i32));
}

static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
  MachineFunction &MF = DAG.getMachineFunction();
  ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();

  // vastart just stores the address of the VarArgsFrameIndex slot into the
  // memory location argument.
  SDLoc dl(Op);
  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
  SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
  return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
                      MachinePointerInfo(SV));
}

SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
                                                CCValAssign &NextVA,
                                                SDValue &Root,
                                                SelectionDAG &DAG,
                                                const SDLoc &dl) const {
  MachineFunction &MF = DAG.getMachineFunction();
  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();

  const TargetRegisterClass *RC;
  if (AFI->isThumb1OnlyFunction())
    RC = &ARM::tGPRRegClass;
  else
    RC = &ARM::GPRRegClass;

  // Transform the arguments stored in physical registers into virtual ones.
  unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
  SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);

  SDValue ArgValue2;
  if (NextVA.isMemLoc()) {
    MachineFrameInfo &MFI = MF.getFrameInfo();
    int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true);

    // Create load node to retrieve arguments from the stack.
    SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
    ArgValue2 = DAG.getLoad(
        MVT::i32, dl, Root, FIN,
        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
  } else {
    Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
    ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
  }
  if (!Subtarget->isLittle())
    std::swap (ArgValue, ArgValue2);
  return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
}

// The remaining GPRs hold either the beginning of variable-argument
// data, or the beginning of an aggregate passed by value (usually
// byval).  Either way, we allocate stack slots adjacent to the data
// provided by our caller, and store the unallocated registers there.
// If this is a variadic function, the va_list pointer will begin with
// these values; otherwise, this reassembles a (byval) structure that
// was split between registers and memory.
// Return: The frame index registers were stored into.
int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
                                      const SDLoc &dl, SDValue &Chain,
                                      const Value *OrigArg,
                                      unsigned InRegsParamRecordIdx,
                                      int ArgOffset, unsigned ArgSize) const {
  // Currently, two use-cases possible:
  // Case #1. Non-var-args function, and we meet first byval parameter.
  //          Setup first unallocated register as first byval register;
  //          eat all remained registers
  //          (these two actions are performed by HandleByVal method).
  //          Then, here, we initialize stack frame with
  //          "store-reg" instructions.
  // Case #2. Var-args function, that doesn't contain byval parameters.
  //          The same: eat all remained unallocated registers,
  //          initialize stack frame.

  MachineFunction &MF = DAG.getMachineFunction();
  MachineFrameInfo &MFI = MF.getFrameInfo();
  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
  unsigned RBegin, REnd;
  if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
    CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
  } else {
    unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
    RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx];
    REnd = ARM::R4;
  }

  if (REnd != RBegin)
    ArgOffset = -4 * (ARM::R4 - RBegin);

  auto PtrVT = getPointerTy(DAG.getDataLayout());
  int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false);
  SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT);

  SmallVector<SDValue, 4> MemOps;
  const TargetRegisterClass *RC =
      AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;

  for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
    unsigned VReg = MF.addLiveIn(Reg, RC);
    SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
    SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
                                 MachinePointerInfo(OrigArg, 4 * i));
    MemOps.push_back(Store);
    FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT));
  }

  if (!MemOps.empty())
    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
  return FrameIndex;
}

// Setup stack frame, the va_list pointer will start from.
void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
                                             const SDLoc &dl, SDValue &Chain,
                                             unsigned ArgOffset,
                                             unsigned TotalArgRegsSaveSize,
                                             bool ForceMutable) const {
  MachineFunction &MF = DAG.getMachineFunction();
  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();

  // Try to store any remaining integer argument regs
  // to their spots on the stack so that they may be loaded by dereferencing
  // the result of va_next.
  // If there is no regs to be stored, just point address after last
  // argument passed via stack.
  int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
                                  CCInfo.getInRegsParamsCount(),
                                  CCInfo.getNextStackOffset(),
                                  std::max(4U, TotalArgRegsSaveSize));
  AFI->setVarArgsFrameIndex(FrameIndex);
}

bool ARMTargetLowering::splitValueIntoRegisterParts(
    SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
    unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
  bool IsABIRegCopy = CC.hasValue();
  EVT ValueVT = Val.getValueType();
  if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
      PartVT == MVT::f32) {
    unsigned ValueBits = ValueVT.getSizeInBits();
    unsigned PartBits = PartVT.getSizeInBits();
    Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(ValueBits), Val);
    Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::getIntegerVT(PartBits), Val);
    Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
    Parts[0] = Val;
    return true;
  }
  return false;
}

SDValue ARMTargetLowering::joinRegisterPartsIntoValue(
    SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
    MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
  bool IsABIRegCopy = CC.hasValue();
  if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
      PartVT == MVT::f32) {
    unsigned ValueBits = ValueVT.getSizeInBits();
    unsigned PartBits = PartVT.getSizeInBits();
    SDValue Val = Parts[0];

    Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(PartBits), Val);
    Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::getIntegerVT(ValueBits), Val);
    Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
    return Val;
  }
  return SDValue();
}

SDValue ARMTargetLowering::LowerFormalArguments(
    SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
  MachineFunction &MF = DAG.getMachineFunction();
  MachineFrameInfo &MFI = MF.getFrameInfo();

  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();

  // Assign locations to all of the incoming arguments.
  SmallVector<CCValAssign, 16> ArgLocs;
  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
                 *DAG.getContext());
  CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));

  SmallVector<SDValue, 16> ArgValues;
  SDValue ArgValue;
  Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
  unsigned CurArgIdx = 0;

  // Initially ArgRegsSaveSize is zero.
  // Then we increase this value each time we meet byval parameter.
  // We also increase this value in case of varargs function.
  AFI->setArgRegsSaveSize(0);

  // Calculate the amount of stack space that we need to allocate to store
  // byval and variadic arguments that are passed in registers.
  // We need to know this before we allocate the first byval or variadic
  // argument, as they will be allocated a stack slot below the CFA (Canonical
  // Frame Address, the stack pointer at entry to the function).
  unsigned ArgRegBegin = ARM::R4;
  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
    if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount())
      break;

    CCValAssign &VA = ArgLocs[i];
    unsigned Index = VA.getValNo();
    ISD::ArgFlagsTy Flags = Ins[Index].Flags;
    if (!Flags.isByVal())
      continue;

    assert(VA.isMemLoc() && "unexpected byval pointer in reg");
    unsigned RBegin, REnd;
    CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd);
    ArgRegBegin = std::min(ArgRegBegin, RBegin);

    CCInfo.nextInRegsParam();
  }
  CCInfo.rewindByValRegsInfo();

  int lastInsIndex = -1;
  if (isVarArg && MFI.hasVAStart()) {
    unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
    if (RegIdx != array_lengthof(GPRArgRegs))
      ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]);
  }

  unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin);
  AFI->setArgRegsSaveSize(TotalArgRegsSaveSize);
  auto PtrVT = getPointerTy(DAG.getDataLayout());

  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
    CCValAssign &VA = ArgLocs[i];
    if (Ins[VA.getValNo()].isOrigArg()) {
      std::advance(CurOrigArg,
                   Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
      CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
    }
    // Arguments stored in registers.
    if (VA.isRegLoc()) {
      EVT RegVT = VA.getLocVT();

      if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) {
        // f64 and vector types are split up into multiple registers or
        // combinations of registers and stack slots.
        SDValue ArgValue1 =
            GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
        VA = ArgLocs[++i]; // skip ahead to next loc
        SDValue ArgValue2;
        if (VA.isMemLoc()) {
          int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true);
          SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
          ArgValue2 = DAG.getLoad(
              MVT::f64, dl, Chain, FIN,
              MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
        } else {
          ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
        }
        ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
        ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue,
                               ArgValue1, DAG.getIntPtrConstant(0, dl));
        ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue,
                               ArgValue2, DAG.getIntPtrConstant(1, dl));
      } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) {
        ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
      } else {
        const TargetRegisterClass *RC;

        if (RegVT == MVT::f16 || RegVT == MVT::bf16)
          RC = &ARM::HPRRegClass;
        else if (RegVT == MVT::f32)
          RC = &ARM::SPRRegClass;
        else if (RegVT == MVT::f64 || RegVT == MVT::v4f16 ||
                 RegVT == MVT::v4bf16)
          RC = &ARM::DPRRegClass;
        else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16 ||
                 RegVT == MVT::v8bf16)
          RC = &ARM::QPRRegClass;
        else if (RegVT == MVT::i32)
          RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
                                           : &ARM::GPRRegClass;
        else
          llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");

        // Transform the arguments in physical registers into virtual ones.
        unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
        ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);

        // If this value is passed in r0 and has the returned attribute (e.g.
        // C++ 'structors), record this fact for later use.
        if (VA.getLocReg() == ARM::R0 && Ins[VA.getValNo()].Flags.isReturned()) {
          AFI->setPreservesR0();
        }
      }

      // If this is an 8 or 16-bit value, it is really passed promoted
      // to 32 bits.  Insert an assert[sz]ext to capture this, then
      // truncate to the right size.
      switch (VA.getLocInfo()) {
      default: llvm_unreachable("Unknown loc info!");
      case CCValAssign::Full: break;
      case CCValAssign::BCvt:
        ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
        break;
      case CCValAssign::SExt:
        ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
                               DAG.getValueType(VA.getValVT()));
        ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
        break;
      case CCValAssign::ZExt:
        ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
                               DAG.getValueType(VA.getValVT()));
        ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
        break;
      }

      // f16 arguments have their size extended to 4 bytes and passed as if they
      // had been copied to the LSBs of a 32-bit register.
      // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI)
      if (VA.needsCustom() &&
          (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
        ArgValue = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), ArgValue);

      InVals.push_back(ArgValue);
    } else { // VA.isRegLoc()
      // sanity check
      assert(VA.isMemLoc());
      assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");

      int index = VA.getValNo();

      // Some Ins[] entries become multiple ArgLoc[] entries.
      // Process them only once.
      if (index != lastInsIndex)
        {
          ISD::ArgFlagsTy Flags = Ins[index].Flags;
          // FIXME: For now, all byval parameter objects are marked mutable.
          // This can be changed with more analysis.
          // In case of tail call optimization mark all arguments mutable.
          // Since they could be overwritten by lowering of arguments in case of
          // a tail call.
          if (Flags.isByVal()) {
            assert(Ins[index].isOrigArg() &&
                   "Byval arguments cannot be implicit");
            unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();

            int FrameIndex = StoreByValRegs(
                CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
                VA.getLocMemOffset(), Flags.getByValSize());
            InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT));
            CCInfo.nextInRegsParam();
          } else {
            unsigned FIOffset = VA.getLocMemOffset();
            int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
                                           FIOffset, true);

            // Create load nodes to retrieve arguments from the stack.
            SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
            InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
                                         MachinePointerInfo::getFixedStack(
                                             DAG.getMachineFunction(), FI)));
          }
          lastInsIndex = index;
        }
    }
  }

  // varargs
  if (isVarArg && MFI.hasVAStart()) {
    VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset(),
                         TotalArgRegsSaveSize);
    if (AFI->isCmseNSEntryFunction()) {
      DiagnosticInfoUnsupported Diag(
          DAG.getMachineFunction().getFunction(),
          "secure entry function must not be variadic", dl.getDebugLoc());
      DAG.getContext()->diagnose(Diag);
    }
  }

  AFI->setArgumentStackSize(CCInfo.getNextStackOffset());

  if (CCInfo.getNextStackOffset() > 0 && AFI->isCmseNSEntryFunction()) {
    DiagnosticInfoUnsupported Diag(
        DAG.getMachineFunction().getFunction(),
        "secure entry function requires arguments on stack", dl.getDebugLoc());
    DAG.getContext()->diagnose(Diag);
  }

  return Chain;
}

/// isFloatingPointZero - Return true if this is +0.0.
static bool isFloatingPointZero(SDValue Op) {
  if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
    return CFP->getValueAPF().isPosZero();
  else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
    // Maybe this has already been legalized into the constant pool?
    if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
      SDValue WrapperOp = Op.getOperand(1).getOperand(0);
      if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
        if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
          return CFP->getValueAPF().isPosZero();
    }
  } else if (Op->getOpcode() == ISD::BITCAST &&
             Op->getValueType(0) == MVT::f64) {
    // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
    // created by LowerConstantFP().
    SDValue BitcastOp = Op->getOperand(0);
    if (BitcastOp->getOpcode() == ARMISD::VMOVIMM &&
        isNullConstant(BitcastOp->getOperand(0)))
      return true;
  }
  return false;
}

/// Returns appropriate ARM CMP (cmp) and corresponding condition code for
/// the given operands.
SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
                                     SDValue &ARMcc, SelectionDAG &DAG,
                                     const SDLoc &dl) const {
  if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
    unsigned C = RHSC->getZExtValue();
    if (!isLegalICmpImmediate((int32_t)C)) {
      // Constant does not fit, try adjusting it by one.
      switch (CC) {
      default: break;
      case ISD::SETLT:
      case ISD::SETGE:
        if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
          CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
          RHS = DAG.getConstant(C - 1, dl, MVT::i32);
        }
        break;
      case ISD::SETULT:
      case ISD::SETUGE:
        if (C != 0 && isLegalICmpImmediate(C-1)) {
          CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
          RHS = DAG.getConstant(C - 1, dl, MVT::i32);
        }
        break;
      case ISD::SETLE:
      case ISD::SETGT:
        if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
          CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
          RHS = DAG.getConstant(C + 1, dl, MVT::i32);
        }
        break;
      case ISD::SETULE:
      case ISD::SETUGT:
        if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
          CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
          RHS = DAG.getConstant(C + 1, dl, MVT::i32);
        }
        break;
      }
    }
  } else if ((ARM_AM::getShiftOpcForNode(LHS.getOpcode()) != ARM_AM::no_shift) &&
             (ARM_AM::getShiftOpcForNode(RHS.getOpcode()) == ARM_AM::no_shift)) {
    // In ARM and Thumb-2, the compare instructions can shift their second
    // operand.
    CC = ISD::getSetCCSwappedOperands(CC);
    std::swap(LHS, RHS);
  }

  // Thumb1 has very limited immediate modes, so turning an "and" into a
  // shift can save multiple instructions.
  //
  // If we have (x & C1), and C1 is an appropriate mask, we can transform it
  // into "((x << n) >> n)".  But that isn't necessarily profitable on its
  // own. If it's the operand to an unsigned comparison with an immediate,
  // we can eliminate one of the shifts: we transform
  // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)".
  //
  // We avoid transforming cases which aren't profitable due to encoding
  // details:
  //
  // 1. C2 fits into the immediate field of a cmp, and the transformed version
  // would not; in that case, we're essentially trading one immediate load for
  // another.
  // 2. C1 is 255 or 65535, so we can use uxtb or uxth.
  // 3. C2 is zero; we have other code for this special case.
  //
  // FIXME: Figure out profitability for Thumb2; we usually can't save an
  // instruction, since the AND is always one instruction anyway, but we could
  // use narrow instructions in some cases.
  if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::AND &&
      LHS->hasOneUse() && isa<ConstantSDNode>(LHS.getOperand(1)) &&
      LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(RHS) &&
      !isSignedIntSetCC(CC)) {
    unsigned Mask = cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue();
    auto *RHSC = cast<ConstantSDNode>(RHS.getNode());
    uint64_t RHSV = RHSC->getZExtValue();
    if (isMask_32(Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) {
      unsigned ShiftBits = countLeadingZeros(Mask);
      if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) {
        SDValue ShiftAmt = DAG.getConstant(ShiftBits, dl, MVT::i32);
        LHS = DAG.getNode(ISD::SHL, dl, MVT::i32, LHS.getOperand(0), ShiftAmt);
        RHS = DAG.getConstant(RHSV << ShiftBits, dl, MVT::i32);
      }
    }
  }

  // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a
  // single "lsls x, c+1".  The shift sets the "C" and "Z" flags the same
  // way a cmp would.
  // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and
  // some tweaks to the heuristics for the previous and->shift transform.
  // FIXME: Optimize cases where the LHS isn't a shift.
  if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL &&
      isa<ConstantSDNode>(RHS) &&
      cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U &&
      CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) &&
      cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() < 31) {
    unsigned ShiftAmt =
      cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() + 1;
    SDValue Shift = DAG.getNode(ARMISD::LSLS, dl,
                                DAG.getVTList(MVT::i32, MVT::i32),
                                LHS.getOperand(0),
                                DAG.getConstant(ShiftAmt, dl, MVT::i32));
    SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
                                     Shift.getValue(1), SDValue());
    ARMcc = DAG.getConstant(ARMCC::HI, dl, MVT::i32);
    return Chain.getValue(1);
  }

  ARMCC::CondCodes CondCode = IntCCToARMCC(CC);

  // If the RHS is a constant zero then the V (overflow) flag will never be
  // set. This can allow us to simplify GE to PL or LT to MI, which can be
  // simpler for other passes (like the peephole optimiser) to deal with.
  if (isNullConstant(RHS)) {
    switch (CondCode) {
      default: break;
      case ARMCC::GE:
        CondCode = ARMCC::PL;
        break;
      case ARMCC::LT:
        CondCode = ARMCC::MI;
        break;
    }
  }

  ARMISD::NodeType CompareType;
  switch (CondCode) {
  default:
    CompareType = ARMISD::CMP;
    break;
  case ARMCC::EQ:
  case ARMCC::NE:
    // Uses only Z Flag
    CompareType = ARMISD::CMPZ;
    break;
  }
  ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
  return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
}

/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
                                     SelectionDAG &DAG, const SDLoc &dl,
                                     bool Signaling) const {
  assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64);
  SDValue Cmp;
  if (!isFloatingPointZero(RHS))
    Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPE : ARMISD::CMPFP,
                      dl, MVT::Glue, LHS, RHS);
  else
    Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPEw0 : ARMISD::CMPFPw0,
                      dl, MVT::Glue, LHS);
  return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
}

/// duplicateCmp - Glue values can have only one use, so this function
/// duplicates a comparison node.
SDValue
ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
  unsigned Opc = Cmp.getOpcode();
  SDLoc DL(Cmp);
  if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
    return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));

  assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
  Cmp = Cmp.getOperand(0);
  Opc = Cmp.getOpcode();
  if (Opc == ARMISD::CMPFP)
    Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
  else {
    assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
    Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
  }
  return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
}

// This function returns three things: the arithmetic computation itself
// (Value), a comparison (OverflowCmp), and a condition code (ARMcc).  The
// comparison and the condition code define the case in which the arithmetic
// computation *does not* overflow.
std::pair<SDValue, SDValue>
ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
                                 SDValue &ARMcc) const {
  assert(Op.getValueType() == MVT::i32 &&  "Unsupported value type");

  SDValue Value, OverflowCmp;
  SDValue LHS = Op.getOperand(0);
  SDValue RHS = Op.getOperand(1);
  SDLoc dl(Op);

  // FIXME: We are currently always generating CMPs because we don't support
  // generating CMN through the backend. This is not as good as the natural
  // CMP case because it causes a register dependency and cannot be folded
  // later.

  switch (Op.getOpcode()) {
  default:
    llvm_unreachable("Unknown overflow instruction!");
  case ISD::SADDO:
    ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
    Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
    OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
    break;
  case ISD::UADDO:
    ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
    // We use ADDC here to correspond to its use in LowerUnsignedALUO.
    // We do not use it in the USUBO case as Value may not be used.
    Value = DAG.getNode(ARMISD::ADDC, dl,
                        DAG.getVTList(Op.getValueType(), MVT::i32), LHS, RHS)
                .getValue(0);
    OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
    break;
  case ISD::SSUBO:
    ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
    Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
    OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
    break;
  case ISD::USUBO:
    ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
    Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
    OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
    break;
  case ISD::UMULO:
    // We generate a UMUL_LOHI and then check if the high word is 0.
    ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32);
    Value = DAG.getNode(ISD::UMUL_LOHI, dl,
                        DAG.getVTList(Op.getValueType(), Op.getValueType()),
                        LHS, RHS);
    OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1),
                              DAG.getConstant(0, dl, MVT::i32));
    Value = Value.getValue(0); // We only want the low 32 bits for the result.
    break;
  case ISD::SMULO:
    // We generate a SMUL_LOHI and then check if all the bits of the high word
    // are the same as the sign bit of the low word.
    ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32);
    Value = DAG.getNode(ISD::SMUL_LOHI, dl,
                        DAG.getVTList(Op.getValueType(), Op.getValueType()),
                        LHS, RHS);
    OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1),
                              DAG.getNode(ISD::SRA, dl, Op.getValueType(),
                                          Value.getValue(0),
                                          DAG.getConstant(31, dl, MVT::i32)));
    Value = Value.getValue(0); // We only want the low 32 bits for the result.
    break;
  } // switch (...)

  return std::make_pair(Value, OverflowCmp);
}

SDValue
ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const {
  // Let legalize expand this if it isn't a legal type yet.
  if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
    return SDValue();

  SDValue Value, OverflowCmp;
  SDValue ARMcc;
  std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
  SDLoc dl(Op);
  // We use 0 and 1 as false and true values.
  SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
  SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
  EVT VT = Op.getValueType();

  SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal,
                                 ARMcc, CCR, OverflowCmp);

  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
  return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
}

static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry,
                                              SelectionDAG &DAG) {
  SDLoc DL(BoolCarry);
  EVT CarryVT = BoolCarry.getValueType();

  // This converts the boolean value carry into the carry flag by doing
  // ARMISD::SUBC Carry, 1
  SDValue Carry = DAG.getNode(ARMISD::SUBC, DL,
                              DAG.getVTList(CarryVT, MVT::i32),
                              BoolCarry, DAG.getConstant(1, DL, CarryVT));
  return Carry.getValue(1);
}

static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT,
                                              SelectionDAG &DAG) {
  SDLoc DL(Flags);

  // Now convert the carry flag into a boolean carry. We do this
  // using ARMISD:ADDE 0, 0, Carry
  return DAG.getNode(ARMISD::ADDE, DL, DAG.getVTList(VT, MVT::i32),
                     DAG.getConstant(0, DL, MVT::i32),
                     DAG.getConstant(0, DL, MVT::i32), Flags);
}

SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op,
                                             SelectionDAG &DAG) const {
  // Let legalize expand this if it isn't a legal type yet.
  if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
    return SDValue();

  SDValue LHS = Op.getOperand(0);
  SDValue RHS = Op.getOperand(1);
  SDLoc dl(Op);

  EVT VT = Op.getValueType();
  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
  SDValue Value;
  SDValue Overflow;
  switch (Op.getOpcode()) {
  default:
    llvm_unreachable("Unknown overflow instruction!");
  case ISD::UADDO:
    Value = DAG.getNode(ARMISD::ADDC, dl, VTs, LHS, RHS);
    // Convert the carry flag into a boolean value.
    Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
    break;
  case ISD::USUBO: {
    Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS);
    // Convert the carry flag into a boolean value.
    Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
    // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow
    // value. So compute 1 - C.
    Overflow = DAG.getNode(ISD::SUB, dl, MVT::i32,
                           DAG.getConstant(1, dl, MVT::i32), Overflow);
    break;
  }
  }

  return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
}

static SDValue LowerSADDSUBSAT(SDValue Op, SelectionDAG &DAG,
                               const ARMSubtarget *Subtarget) {
  EVT VT = Op.getValueType();
  if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
    return SDValue();
  if (!VT.isSimple())
    return SDValue();

  unsigned NewOpcode;
  bool IsAdd = Op->getOpcode() == ISD::SADDSAT;
  switch (VT.getSimpleVT().SimpleTy) {
  default:
    return SDValue();
  case MVT::i8:
    NewOpcode = IsAdd ? ARMISD::QADD8b : ARMISD::QSUB8b;
    break;
  case MVT::i16:
    NewOpcode = IsAdd ? ARMISD::QADD16b : ARMISD::QSUB16b;
    break;
  }

  SDLoc dl(Op);
  SDValue Add =
      DAG.getNode(NewOpcode, dl, MVT::i32,
                  DAG.getSExtOrTrunc(Op->getOperand(0), dl, MVT::i32),
                  DAG.getSExtOrTrunc(Op->getOperand(1), dl, MVT::i32));
  return DAG.getNode(ISD::TRUNCATE, dl, VT, Add);
}

SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
  SDValue Cond = Op.getOperand(0);
  SDValue SelectTrue = Op.getOperand(1);
  SDValue SelectFalse = Op.getOperand(2);
  SDLoc dl(Op);
  unsigned Opc = Cond.getOpcode();

  if (Cond.getResNo() == 1 &&
      (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
       Opc == ISD::USUBO)) {
    if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
      return SDValue();

    SDValue Value, OverflowCmp;
    SDValue ARMcc;
    std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
    SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
    EVT VT = Op.getValueType();

    return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
                   OverflowCmp, DAG);
  }

  // Convert:
  //
  //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
  //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
  //
  if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
    const ConstantSDNode *CMOVTrue =
      dyn_cast<ConstantSDNode>(Cond.getOperand(0));
    const ConstantSDNode *CMOVFalse =
      dyn_cast<ConstantSDNode>(Cond.getOperand(1));

    if (CMOVTrue && CMOVFalse) {
      unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
      unsigned CMOVFalseVal = CMOVFalse->getZExtValue();

      SDValue True;
      SDValue False;
      if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
        True = SelectTrue;
        False = SelectFalse;
      } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
        True = SelectFalse;
        False = SelectTrue;
      }

      if (True.getNode() && False.getNode()) {
        EVT VT = Op.getValueType();
        SDValue ARMcc = Cond.getOperand(2);
        SDValue CCR = Cond.getOperand(3);
        SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
        assert(True.getValueType() == VT);
        return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
      }
    }
  }

  // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
  // undefined bits before doing a full-word comparison with zero.
  Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
                     DAG.getConstant(1, dl, Cond.getValueType()));

  return DAG.getSelectCC(dl, Cond,
                         DAG.getConstant(0, dl, Cond.getValueType()),
                         SelectTrue, SelectFalse, ISD::SETNE);
}

static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
                                 bool &swpCmpOps, bool &swpVselOps) {
  // Start by selecting the GE condition code for opcodes that return true for
  // 'equality'
  if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
      CC == ISD::SETULE || CC == ISD::SETGE  || CC == ISD::SETLE)
    CondCode = ARMCC::GE;

  // and GT for opcodes that return false for 'equality'.
  else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
           CC == ISD::SETULT || CC == ISD::SETGT  || CC == ISD::SETLT)
    CondCode = ARMCC::GT;

  // Since we are constrained to GE/GT, if the opcode contains 'less', we need
  // to swap the compare operands.
  if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
      CC == ISD::SETULT || CC == ISD::SETLE  || CC == ISD::SETLT)
    swpCmpOps = true;

  // Both GT and GE are ordered comparisons, and return false for 'unordered'.
  // If we have an unordered opcode, we need to swap the operands to the VSEL
  // instruction (effectively negating the condition).
  //
  // This also has the effect of swapping which one of 'less' or 'greater'
  // returns true, so we also swap the compare operands. It also switches
  // whether we return true for 'equality', so we compensate by picking the
  // opposite condition code to our original choice.
  if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
      CC == ISD::SETUGT) {
    swpCmpOps = !swpCmpOps;
    swpVselOps = !swpVselOps;
    CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
  }

  // 'ordered' is 'anything but unordered', so use the VS condition code and
  // swap the VSEL operands.
  if (CC == ISD::SETO) {
    CondCode = ARMCC::VS;
    swpVselOps = true;
  }

  // 'unordered or not equal' is 'anything but equal', so use the EQ condition
  // code and swap the VSEL operands. Also do this if we don't care about the
  // unordered case.
  if (CC == ISD::SETUNE || CC == ISD::SETNE) {
    CondCode = ARMCC::EQ;
    swpVselOps = true;
  }
}

SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
                                   SDValue TrueVal, SDValue ARMcc, SDValue CCR,
                                   SDValue Cmp, SelectionDAG &DAG) const {
  if (!Subtarget->hasFP64() && VT == MVT::f64) {
    FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
                           DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
    TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
                          DAG.getVTList(MVT::i32, MVT::i32), TrueVal);

    SDValue TrueLow = TrueVal.getValue(0);
    SDValue TrueHigh = TrueVal.getValue(1);
    SDValue FalseLow = FalseVal.getValue(0);
    SDValue FalseHigh = FalseVal.getValue(1);

    SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
                              ARMcc, CCR, Cmp);
    SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
                               ARMcc, CCR, duplicateCmp(Cmp, DAG));

    return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
  } else {
    return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
                       Cmp);
  }
}

static bool isGTorGE(ISD::CondCode CC) {
  return CC == ISD::SETGT || CC == ISD::SETGE;
}

static bool isLTorLE(ISD::CondCode CC) {
  return CC == ISD::SETLT || CC == ISD::SETLE;
}

// See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating.
// All of these conditions (and their <= and >= counterparts) will do:
//          x < k ? k : x
//          x > k ? x : k
//          k < x ? x : k
//          k > x ? k : x
static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
                            const SDValue TrueVal, const SDValue FalseVal,
                            const ISD::CondCode CC, const SDValue K) {
  return (isGTorGE(CC) &&
          ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
         (isLTorLE(CC) &&
          ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
}

// Check if two chained conditionals could be converted into SSAT or USAT.
//
// SSAT can replace a set of two conditional selectors that bound a number to an
// interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples:
//
//     x < -k ? -k : (x > k ? k : x)
//     x < -k ? -k : (x < k ? x : k)
//     x > -k ? (x > k ? k : x) : -k
//     x < k ? (x < -k ? -k : x) : k
//     etc.
//
// LLVM canonicalizes these to either a min(max()) or a max(min())
// pattern. This function tries to match one of these and will return true
// if successful.
//
// USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 is
// a power of 2.
//
// It returns true if the conversion can be done, false otherwise.
// Additionally, the variable is returned in parameter V, the constant in K and
// usat is set to true if the conditional represents an unsigned saturation
static bool isSaturatingConditional(const SDValue &Op, SDValue &V,
                                    uint64_t &K, bool &Usat) {
  SDValue V1 = Op.getOperand(0);
  SDValue K1 = Op.getOperand(1);
  SDValue TrueVal1 = Op.getOperand(2);
  SDValue FalseVal1 = Op.getOperand(3);
  ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get();

  const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
  if (Op2.getOpcode() != ISD::SELECT_CC)
    return false;

  SDValue V2 = Op2.getOperand(0);
  SDValue K2 = Op2.getOperand(1);
  SDValue TrueVal2 = Op2.getOperand(2);
  SDValue FalseVal2 = Op2.getOperand(3);
  ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get();

  SDValue V1Tmp = V1;
  SDValue V2Tmp = V2;

  if (V1.getOpcode() == ISD::SIGN_EXTEND_INREG &&
      V2.getOpcode() == ISD::SIGN_EXTEND_INREG) {
    V1Tmp = V1.getOperand(0);
    V2Tmp = V2.getOperand(0);
  }

  // Check that the registers and the constants match a max(min()) or min(max())
  // pattern
  if (V1Tmp == TrueVal1 && V2Tmp == TrueVal2 && K1 == FalseVal1 &&
      K2 == FalseVal2 &&
      ((isGTorGE(CC1) && isLTorLE(CC2)) || (isLTorLE(CC1) && isGTorGE(CC2)))) {

    // Check that the constant in the lower-bound check is
    // the opposite of the constant in the upper-bound check
    // in 1's complement.
    if (!isa<ConstantSDNode>(K1) || !isa<ConstantSDNode>(K2))
      return false;

    int64_t Val1 = cast<ConstantSDNode>(K1)->getSExtValue();
    int64_t Val2 = cast<ConstantSDNode>(K2)->getSExtValue();
    int64_t PosVal = std::max(Val1, Val2);
    int64_t NegVal = std::min(Val1, Val2);

    if (!((Val1 > Val2 && isLTorLE(CC1)) || (Val1 < Val2 && isLTorLE(CC2))) ||
        !isPowerOf2_64(PosVal + 1)) 
      return false;

    // Handle the difference between USAT (unsigned) and SSAT (signed)
    // saturation
    if (Val1 == ~Val2)
      Usat = false;
    else if (NegVal == 0)
      Usat = true;
    else
      return false;

    V = V2Tmp;
    // At this point, PosVal is guaranteed to be positive
    K = (uint64_t) PosVal; 

    return true;
  }
  return false;
}

// Check if a condition of the type x < k ? k : x can be converted into a
// bit operation instead of conditional moves.
// Currently this is allowed given:
// - The conditions and values match up
// - k is 0 or -1 (all ones)
// This function will not check the last condition, thats up to the caller
// It returns true if the transformation can be made, and in such case
// returns x in V, and k in SatK.
static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V,
                                         SDValue &SatK)
{
  SDValue LHS = Op.getOperand(0);
  SDValue RHS = Op.getOperand(1);
  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
  SDValue TrueVal = Op.getOperand(2);
  SDValue FalseVal = Op.getOperand(3);

  SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS)
                                               ? &RHS
                                               : nullptr;

  // No constant operation in comparison, early out
  if (!K)
    return false;

  SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal;
  V = (KTmp == TrueVal) ? FalseVal : TrueVal;
  SDValue VTmp = (K && *K == LHS) ? RHS : LHS;

  // If the constant on left and right side, or variable on left and right,
  // does not match, early out
  if (*K != KTmp || V != VTmp)
    return false;

  if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, *K)) {
    SatK = *K;
    return true;
  }

  return false;
}

bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const {
  if (VT == MVT::f32)
    return !Subtarget->hasVFP2Base();
  if (VT == MVT::f64)
    return !Subtarget->hasFP64();
  if (VT == MVT::f16)
    return !Subtarget->hasFullFP16();
  return false;
}

SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
  EVT VT = Op.getValueType();
  SDLoc dl(Op);

  // Try to convert two saturating conditional selects into a single SSAT
  SDValue SatValue;
  uint64_t SatConstant;
  bool SatUSat;
  if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) &&
      isSaturatingConditional(Op, SatValue, SatConstant, SatUSat)) {
    if (SatUSat)
      return DAG.getNode(ARMISD::USAT, dl, VT, SatValue,
                         DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
    else
      return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue,
                         DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
  }

  // Try to convert expressions of the form x < k ? k : x (and similar forms)
  // into more efficient bit operations, which is possible when k is 0 or -1
  // On ARM and Thumb-2 which have flexible operand 2 this will result in
  // single instructions. On Thumb the shift and the bit operation will be two
  // instructions.
  // Only allow this transformation on full-width (32-bit) operations
  SDValue LowerSatConstant;
  if (VT == MVT::i32 &&
      isLowerSaturatingConditional(Op, SatValue, LowerSatConstant)) {
    SDValue ShiftV = DAG.getNode(ISD::SRA, dl, VT, SatValue,
                                 DAG.getConstant(31, dl, VT));
    if (isNullConstant(LowerSatConstant)) {
      SDValue NotShiftV = DAG.getNode(ISD::XOR, dl, VT, ShiftV,
                                      DAG.getAllOnesConstant(dl, VT));
      return DAG.getNode(ISD::AND, dl, VT, SatValue, NotShiftV);
    } else if (isAllOnesConstant(LowerSatConstant))
      return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV);
  }

  SDValue LHS = Op.getOperand(0);
  SDValue RHS = Op.getOperand(1);
  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
  SDValue TrueVal = Op.getOperand(2);
  SDValue FalseVal = Op.getOperand(3);
  ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FalseVal);
  ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TrueVal);

  if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal &&
      LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) {
    unsigned TVal = CTVal->getZExtValue();
    unsigned FVal = CFVal->getZExtValue();
    unsigned Opcode = 0;

    if (TVal == ~FVal) {
      Opcode = ARMISD::CSINV;
    } else if (TVal == ~FVal + 1) {
      Opcode = ARMISD::CSNEG;
    } else if (TVal + 1 == FVal) {
      Opcode = ARMISD::CSINC;
    } else if (TVal == FVal + 1) {
      Opcode = ARMISD::CSINC;
      std::swap(TrueVal, FalseVal);
      std::swap(TVal, FVal);
      CC = ISD::getSetCCInverse(CC, LHS.getValueType());
    }

    if (Opcode) {
      // If one of the constants is cheaper than another, materialise the
      // cheaper one and let the csel generate the other.
      if (Opcode != ARMISD::CSINC &&
          HasLowerConstantMaterializationCost(FVal, TVal, Subtarget)) {
        std::swap(TrueVal, FalseVal);
        std::swap(TVal, FVal);
        CC = ISD::getSetCCInverse(CC, LHS.getValueType());
      }

      // Attempt to use ZR checking TVal is 0, possibly inverting the condition
      // to get there. CSINC not is invertable like the other two (~(~a) == a,
      // -(-a) == a, but (a+1)+1 != a).
      if (FVal == 0 && Opcode != ARMISD::CSINC) {
        std::swap(TrueVal, FalseVal);
        std::swap(TVal, FVal);
        CC = ISD::getSetCCInverse(CC, LHS.getValueType());
      }
      if (TVal == 0)
        TrueVal = DAG.getRegister(ARM::ZR, MVT::i32);

      // Drops F's value because we can get it by inverting/negating TVal.
      FalseVal = TrueVal;

      SDValue ARMcc;
      SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
      EVT VT = TrueVal.getValueType();
      return DAG.getNode(Opcode, dl, VT, TrueVal, FalseVal, ARMcc, Cmp);
    }
  }

  if (isUnsupportedFloatingType(LHS.getValueType())) {
    DAG.getTargetLoweringInfo().softenSetCCOperands(
        DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS);

    // If softenSetCCOperands only returned one value, we should compare it to
    // zero.
    if (!RHS.getNode()) {
      RHS = DAG.getConstant(0, dl, LHS.getValueType());
      CC = ISD::SETNE;
    }
  }

  if (LHS.getValueType() == MVT::i32) {
    // Try to generate VSEL on ARMv8.
    // The VSEL instruction can't use all the usual ARM condition
    // codes: it only has two bits to select the condition code, so it's
    // constrained to use only GE, GT, VS and EQ.
    //
    // To implement all the various ISD::SETXXX opcodes, we sometimes need to
    // swap the operands of the previous compare instruction (effectively
    // inverting the compare condition, swapping 'less' and 'greater') and
    // sometimes need to swap the operands to the VSEL (which inverts the
    // condition in the sense of firing whenever the previous condition didn't)
    if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 ||
                                        TrueVal.getValueType() == MVT::f32 ||
                                        TrueVal.getValueType() == MVT::f64)) {
      ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
      if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
          CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
        CC = ISD::getSetCCInverse(CC, LHS.getValueType());
        std::swap(TrueVal, FalseVal);
      }
    }

    SDValue ARMcc;
    SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
    SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
    // Choose GE over PL, which vsel does now support
    if (cast<ConstantSDNode>(ARMcc)->getZExtValue() == ARMCC::PL)
      ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32);
    return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
  }

  ARMCC::CondCodes CondCode, CondCode2;
  FPCCToARMCC(CC, CondCode, CondCode2);

  // Normalize the fp compare. If RHS is zero we prefer to keep it there so we
  // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we
  // must use VSEL (limited condition codes), due to not having conditional f16
  // moves.
  if (Subtarget->hasFPARMv8Base() &&
      !(isFloatingPointZero(RHS) && TrueVal.getValueType() != MVT::f16) &&
      (TrueVal.getValueType() == MVT::f16 ||
       TrueVal.getValueType() == MVT::f32 ||
       TrueVal.getValueType() == MVT::f64)) {
    bool swpCmpOps = false;
    bool swpVselOps = false;
    checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);

    if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
        CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
      if (swpCmpOps)
        std::swap(LHS, RHS);
      if (swpVselOps)
        std::swap(TrueVal, FalseVal);
    }
  }

  SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
  SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
  SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
  if (CondCode2 != ARMCC::AL) {
    SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32);
    // FIXME: Needs another CMP because flag can have but one use.
    SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
    Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
  }
  return Result;
}

/// canChangeToInt - Given the fp compare operand, return true if it is suitable
/// to morph to an integer compare sequence.
static bool canChangeToInt(SDValue Op, bool &SeenZero,
                           const ARMSubtarget *Subtarget) {
  SDNode *N = Op.getNode();
  if (!N->hasOneUse())
    // Otherwise it requires moving the value from fp to integer registers.
    return false;
  if (!N->getNumValues())
    return false;
  EVT VT = Op.getValueType();
  if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
    // f32 case is generally profitable. f64 case only makes sense when vcmpe +
    // vmrs are very slow, e.g. cortex-a8.
    return false;

  if (isFloatingPointZero(Op)) {
    SeenZero = true;
    return true;
  }
  return ISD::isNormalLoad(N);
}

static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
  if (isFloatingPointZero(Op))
    return DAG.getConstant(0, SDLoc(Op), MVT::i32);

  if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
    return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(),
                       Ld->getPointerInfo(), Ld->getAlignment(),
                       Ld->getMemOperand()->getFlags());

  llvm_unreachable("Unknown VFP cmp argument!");
}

static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
                           SDValue &RetVal1, SDValue &RetVal2) {
  SDLoc dl(Op);

  if (isFloatingPointZero(Op)) {
    RetVal1 = DAG.getConstant(0, dl, MVT::i32);
    RetVal2 = DAG.getConstant(0, dl, MVT::i32);
    return;
  }

  if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
    SDValue Ptr = Ld->getBasePtr();
    RetVal1 =
        DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
                    Ld->getAlignment(), Ld->getMemOperand()->getFlags());

    EVT PtrType = Ptr.getValueType();
    unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
    SDValue NewPtr = DAG.getNode(ISD::ADD, dl,
                                 PtrType, Ptr, DAG.getConstant(4, dl, PtrType));
    RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr,
                          Ld->getPointerInfo().getWithOffset(4), NewAlign,
                          Ld->getMemOperand()->getFlags());
    return;
  }

  llvm_unreachable("Unknown VFP cmp argument!");
}

/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
/// f32 and even f64 comparisons to integer ones.
SDValue
ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
  SDValue Chain = Op.getOperand(0);
  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
  SDValue LHS = Op.getOperand(2);
  SDValue RHS = Op.getOperand(3);
  SDValue Dest = Op.getOperand(4);
  SDLoc dl(Op);

  bool LHSSeenZero = false;
  bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
  bool RHSSeenZero = false;
  bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
  if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
    // If unsafe fp math optimization is enabled and there are no other uses of
    // the CMP operands, and the condition code is EQ or NE, we can optimize it
    // to an integer comparison.
    if (CC == ISD::SETOEQ)
      CC = ISD::SETEQ;
    else if (CC == ISD::SETUNE)
      CC = ISD::SETNE;

    SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32);
    SDValue ARMcc;
    if (LHS.getValueType() == MVT::f32) {
      LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
                        bitcastf32Toi32(LHS, DAG), Mask);
      RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
                        bitcastf32Toi32(RHS, DAG), Mask);
      SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
      SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
      return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
                         Chain, Dest, ARMcc, CCR, Cmp);
    }

    SDValue LHS1, LHS2;
    SDValue RHS1, RHS2;
    expandf64Toi32(LHS, DAG, LHS1, LHS2);
    expandf64Toi32(RHS, DAG, RHS1, RHS2);
    LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
    RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
    ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
    ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
    SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
    SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
    return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
  }

  return SDValue();
}

SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
  SDValue Chain = Op.getOperand(0);
  SDValue Cond = Op.getOperand(1);
  SDValue Dest = Op.getOperand(2);
  SDLoc dl(Op);

  // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
  // instruction.
  unsigned Opc = Cond.getOpcode();
  bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) &&
                      !Subtarget->isThumb1Only();
  if (Cond.getResNo() == 1 &&
      (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
       Opc == ISD::USUBO || OptimizeMul)) {
    // Only lower legal XALUO ops.
    if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
      return SDValue();

    // The actual operation with overflow check.
    SDValue Value, OverflowCmp;
    SDValue ARMcc;
    std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);

    // Reverse the condition code.
    ARMCC::CondCodes CondCode =
        (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue();
    CondCode = ARMCC::getOppositeCondition(CondCode);
    ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32);
    SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);

    return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR,
                       OverflowCmp);
  }

  return SDValue();
}

SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
  SDValue Chain = Op.getOperand(0);
  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
  SDValue LHS = Op.getOperand(2);
  SDValue RHS = Op.getOperand(3);
  SDValue Dest = Op.getOperand(4);
  SDLoc dl(Op);

  if (isUnsupportedFloatingType(LHS.getValueType())) {
    DAG.getTargetLoweringInfo().softenSetCCOperands(
        DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS);

    // If softenSetCCOperands only returned one value, we should compare it to
    // zero.
    if (!RHS.getNode()) {
      RHS = DAG.getConstant(0, dl, LHS.getValueType());
      CC = ISD::SETNE;
    }
  }

  // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
  // instruction.
  unsigned Opc = LHS.getOpcode();
  bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) &&
                      !Subtarget->isThumb1Only();
  if (LHS.getResNo() == 1 && (isOneConstant(RHS) || isNullConstant(RHS)) &&
      (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
       Opc == ISD::USUBO || OptimizeMul) &&
      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
    // Only lower legal XALUO ops.
    if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0)))
      return SDValue();

    // The actual operation with overflow check.
    SDValue Value, OverflowCmp;
    SDValue ARMcc;
    std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.getValue(0), DAG, ARMcc);

    if ((CC == ISD::SETNE) != isOneConstant(RHS)) {
      // Reverse the condition code.
      ARMCC::CondCodes CondCode =
          (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue();
      CondCode = ARMCC::getOppositeCondition(CondCode);
      ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32);
    }
    SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);

    return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR,
                       OverflowCmp);
  }

  if (LHS.getValueType() == MVT::i32) {
    SDValue ARMcc;
    SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
    SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
    return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
                       Chain, Dest, ARMcc, CCR, Cmp);
  }

  if (getTargetMachine().Options.UnsafeFPMath &&
      (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
       CC == ISD::SETNE || CC == ISD::SETUNE)) {
    if (SDValue Result = OptimizeVFPBrcond(Op, DAG))
      return Result;
  }

  ARMCC::CondCodes CondCode, CondCode2;
  FPCCToARMCC(CC, CondCode, CondCode2);

  SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
  SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
  SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
  SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
  SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
  if (CondCode2 != ARMCC::AL) {
    ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
    SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
    Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
  }
  return Res;
}

SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
  SDValue Chain = Op.getOperand(0);
  SDValue Table = Op.getOperand(1);
  SDValue Index = Op.getOperand(2);
  SDLoc dl(Op);

  EVT PTy = getPointerTy(DAG.getDataLayout());
  JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
  SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
  Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI);
  Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy));
  SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Index);
  if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) {
    // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table
    // which does another jump to the destination. This also makes it easier
    // to translate it to TBB / TBH later (Thumb2 only).
    // FIXME: This might not work if the function is extremely large.
    return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
                       Addr, Op.getOperand(2), JTI);
  }
  if (isPositionIndependent() || Subtarget->isROPI()) {
    Addr =
        DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
                    MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
    Chain = Addr.getValue(1);
    Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Addr);
    return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
  } else {
    Addr =
        DAG.getLoad(PTy, dl, Chain, Addr,
                    MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
    Chain = Addr.getValue(1);
    return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
  }
}

static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
  EVT VT = Op.getValueType();
  SDLoc dl(Op);

  if (Op.getValueType().getVectorElementType() == MVT::i32) {
    if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
      return Op;
    return DAG.UnrollVectorOp(Op.getNode());
  }

  const bool HasFullFP16 =
    static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16();

  EVT NewTy;
  const EVT OpTy = Op.getOperand(0).getValueType();
  if (OpTy == MVT::v4f32)
    NewTy = MVT::v4i32;
  else if (OpTy == MVT::v4f16 && HasFullFP16)
    NewTy = MVT::v4i16;
  else if (OpTy == MVT::v8f16 && HasFullFP16)
    NewTy = MVT::v8i16;
  else
    llvm_unreachable("Invalid type for custom lowering!");

  if (VT != MVT::v4i16 && VT != MVT::v8i16)
    return DAG.UnrollVectorOp(Op.getNode());

  Op = DAG.getNode(Op.getOpcode(), dl, NewTy, Op.getOperand(0));
  return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
}

SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
  EVT VT = Op.getValueType();
  if (VT.isVector())
    return LowerVectorFP_TO_INT(Op, DAG);

  bool IsStrict = Op->isStrictFPOpcode();
  SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);

  if (isUnsupportedFloatingType(SrcVal.getValueType())) {
    RTLIB::Libcall LC;
    if (Op.getOpcode() == ISD::FP_TO_SINT ||
        Op.getOpcode() == ISD::STRICT_FP_TO_SINT)
      LC = RTLIB::getFPTOSINT(SrcVal.getValueType(),
                              Op.getValueType());
    else
      LC = RTLIB::getFPTOUINT(SrcVal.getValueType(),
                              Op.getValueType());
    SDLoc Loc(Op);
    MakeLibCallOptions CallOptions;
    SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
    SDValue Result;
    std::tie(Result, Chain) = makeLibCall(DAG, LC, Op.getValueType(), SrcVal,
                                          CallOptions, Loc, Chain);
    return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result;
  }

  // FIXME: Remove this when we have strict fp instruction selection patterns
  if (IsStrict) {
    SDLoc Loc(Op);
    SDValue Result =
        DAG.getNode(Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT
                                                             : ISD::FP_TO_UINT,
                    Loc, Op.getValueType(), SrcVal);
    return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc);
  }

  return Op;
}

static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
  EVT VT = Op.getValueType();
  SDLoc dl(Op);

  if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
    if (VT.getVectorElementType() == MVT::f32)
      return Op;
    return DAG.UnrollVectorOp(Op.getNode());
  }

  assert((Op.getOperand(0).getValueType() == MVT::v4i16 ||
          Op.getOperand(0).getValueType() == MVT::v8i16) &&
         "Invalid type for custom lowering!");

  const bool HasFullFP16 =
    static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16();

  EVT DestVecType;
  if (VT == MVT::v4f32)
    DestVecType = MVT::v4i32;
  else if (VT == MVT::v4f16 && HasFullFP16)
    DestVecType = MVT::v4i16;
  else if (VT == MVT::v8f16 && HasFullFP16)
    DestVecType = MVT::v8i16;
  else
    return DAG.UnrollVectorOp(Op.getNode());

  unsigned CastOpc;
  unsigned Opc;
  switch (Op.getOpcode()) {
  default: llvm_unreachable("Invalid opcode!");
  case ISD::SINT_TO_FP:
    CastOpc = ISD::SIGN_EXTEND;
    Opc = ISD::SINT_TO_FP;
    break;
  case ISD::UINT_TO_FP:
    CastOpc = ISD::ZERO_EXTEND;
    Opc = ISD::UINT_TO_FP;
    break;
  }

  Op = DAG.getNode(CastOpc, dl, DestVecType, Op.getOperand(0));
  return DAG.getNode(Opc, dl, VT, Op);
}

SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
  EVT VT = Op.getValueType();
  if (VT.isVector())
    return LowerVectorINT_TO_FP(Op, DAG);
  if (isUnsupportedFloatingType(VT)) {
    RTLIB::Libcall LC;
    if (Op.getOpcode() == ISD::SINT_TO_FP)
      LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
                              Op.getValueType());
    else
      LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
                              Op.getValueType());
    MakeLibCallOptions CallOptions;
    return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
                       CallOptions, SDLoc(Op)).first;
  }

  return Op;
}

SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
  // Implement fcopysign with a fabs and a conditional fneg.
  SDValue Tmp0 = Op.getOperand(0);
  SDValue Tmp1 = Op.getOperand(1);
  SDLoc dl(Op);
  EVT VT = Op.getValueType();
  EVT SrcVT = Tmp1.getValueType();
  bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
    Tmp0.getOpcode() == ARMISD::VMOVDRR;
  bool UseNEON = !InGPR && Subtarget->hasNEON();

  if (UseNEON) {
    // Use VBSL to copy the sign bit.
    unsigned EncodedVal = ARM_AM::createVMOVModImm(0x6, 0x80);
    SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
                               DAG.getTargetConstant(EncodedVal, dl, MVT::i32));
    EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
    if (VT == MVT::f64)
      Mask = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT,
                         DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
                         DAG.getConstant(32, dl, MVT::i32));
    else /*if (VT == MVT::f32)*/
      Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
    if (SrcVT == MVT::f32) {
      Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
      if (VT == MVT::f64)
        Tmp1 = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT,
                           DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
                           DAG.getConstant(32, dl, MVT::i32));
    } else if (VT == MVT::f32)
      Tmp1 = DAG.getNode(ARMISD::VSHRuIMM, dl, MVT::v1i64,
                         DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
                         DAG.getConstant(32, dl, MVT::i32));
    Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
    Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);

    SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff),
                                            dl, MVT::i32);
    AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
    SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
                                  DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));

    SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
                              DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
                              DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
    if (VT == MVT::f32) {
      Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
      Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
                        DAG.getConstant(0, dl, MVT::i32));
    } else {
      Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
    }

    return Res;
  }

  // Bitcast operand 1 to i32.
  if (SrcVT == MVT::f64)
    Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
                       Tmp1).getValue(1);
  Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);

  // Or in the signbit with integer operations.
  SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32);
  SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32);
  Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
  if (VT == MVT::f32) {
    Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
                       DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
    return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
                       DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
  }

  // f64: Or the high part with signbit and then combine two parts.
  Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
                     Tmp0);
  SDValue Lo = Tmp0.getValue(0);
  SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
  Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
  return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
}

SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
  MachineFunction &MF = DAG.getMachineFunction();
  MachineFrameInfo &MFI = MF.getFrameInfo();
  MFI.setReturnAddressIsTaken(true);

  if (verifyReturnAddressArgumentIsConstant(Op, DAG))
    return SDValue();

  EVT VT = Op.getValueType();
  SDLoc dl(Op);
  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
  if (Depth) {
    SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
    SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
    return DAG.getLoad(VT, dl, DAG.getEntryNode(),
                       DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
                       MachinePointerInfo());
  }

  // Return LR, which contains the return address. Mark it an implicit live-in.
  unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
  return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
}

SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
  const ARMBaseRegisterInfo &ARI =
    *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
  MachineFunction &MF = DAG.getMachineFunction();
  MachineFrameInfo &MFI = MF.getFrameInfo();
  MFI.setFrameAddressIsTaken(true);

  EVT VT = Op.getValueType();
  SDLoc dl(Op);  // FIXME probably not meaningful
  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
  Register FrameReg = ARI.getFrameRegister(MF);
  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
  while (Depth--)
    FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
                            MachinePointerInfo());
  return FrameAddr;
}

// FIXME? Maybe this could be a TableGen attribute on some registers and
// this table could be generated automatically from RegInfo.
Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT,
                                              const MachineFunction &MF) const {
  Register Reg = StringSwitch<unsigned>(RegName)
                       .Case("sp", ARM::SP)
                       .Default(0);
  if (Reg)
    return Reg;
  report_fatal_error(Twine("Invalid register name \""
                              + StringRef(RegName)  + "\"."));
}

// Result is 64 bit value so split into two 32 bit values and return as a
// pair of values.
static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results,
                                SelectionDAG &DAG) {
  SDLoc DL(N);

  // This function is only supposed to be called for i64 type destination.
  assert(N->getValueType(0) == MVT::i64
          && "ExpandREAD_REGISTER called for non-i64 type result.");

  SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL,
                             DAG.getVTList(MVT::i32, MVT::i32, MVT::Other),
                             N->getOperand(0),
                             N->getOperand(1));

  Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0),
                    Read.getValue(1)));
  Results.push_back(Read.getOperand(0));
}

/// \p BC is a bitcast that is about to be turned into a VMOVDRR.
/// When \p DstVT, the destination type of \p BC, is on the vector
/// register bank and the source of bitcast, \p Op, operates on the same bank,
/// it might be possible to combine them, such that everything stays on the
/// vector register bank.
/// \p return The node that would replace \p BT, if the combine
/// is possible.
static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC,
                                                SelectionDAG &DAG) {
  SDValue Op = BC->getOperand(0);
  EVT DstVT = BC->getValueType(0);

  // The only vector instruction that can produce a scalar (remember,
  // since the bitcast was about to be turned into VMOVDRR, the source
  // type is i64) from a vector is EXTRACT_VECTOR_ELT.
  // Moreover, we can do this combine only if there is one use.
  // Finally, if the destination type is not a vector, there is not
  // much point on forcing everything on the vector bank.
  if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
      !Op.hasOneUse())
    return SDValue();

  // If the index is not constant, we will introduce an additional
  // multiply that will stick.
  // Give up in that case.
  ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1));
  if (!Index)
    return SDValue();
  unsigned DstNumElt = DstVT.getVectorNumElements();

  // Compute the new index.
  const APInt &APIntIndex = Index->getAPIntValue();
  APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt);
  NewIndex *= APIntIndex;
  // Check if the new constant index fits into i32.
  if (NewIndex.getBitWidth() > 32)
    return SDValue();

  // vMTy bitcast(i64 extractelt vNi64 src, i32 index) ->
  // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M)
  SDLoc dl(Op);
  SDValue ExtractSrc = Op.getOperand(0);
  EVT VecVT = EVT::getVectorVT(
      *DAG.getContext(), DstVT.getScalarType(),
      ExtractSrc.getValueType().getVectorNumElements() * DstNumElt);
  SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc);
  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast,
                     DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32));
}

/// ExpandBITCAST - If the target supports VFP, this function is called to
/// expand a bit convert where either the source or destination type is i64 to
/// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
/// operand type is illegal (e.g., v2f32 for a target that doesn't support
/// vectors), since the legalizer won't know what to do with that.
SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
                                         const ARMSubtarget *Subtarget) const {
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  SDLoc dl(N);
  SDValue Op = N->getOperand(0);

  // This function is only supposed to be called for i16 and i64 types, either
  // as the source or destination of the bit convert.
  EVT SrcVT = Op.getValueType();
  EVT DstVT = N->getValueType(0);

  if ((SrcVT == MVT::i16 || SrcVT == MVT::i32) &&
      (DstVT == MVT::f16 || DstVT == MVT::bf16))
    return MoveToHPR(SDLoc(N), DAG, MVT::i32, DstVT.getSimpleVT(),
                     DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), MVT::i32, Op));

  if ((DstVT == MVT::i16 || DstVT == MVT::i32) &&
      (SrcVT == MVT::f16 || SrcVT == MVT::bf16))
    return DAG.getNode(
        ISD::TRUNCATE, SDLoc(N), DstVT,
        MoveFromHPR(SDLoc(N), DAG, MVT::i32, SrcVT.getSimpleVT(), Op));

  if (!(SrcVT == MVT::i64 || DstVT == MVT::i64))
    return SDValue();

  // Turn i64->f64 into VMOVDRR.
  if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
    // Do not force values to GPRs (this is what VMOVDRR does for the inputs)
    // if we can combine the bitcast with its source.
    if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG))
      return Val;

    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
                             DAG.getConstant(0, dl, MVT::i32));
    SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
                             DAG.getConstant(1, dl, MVT::i32));
    return DAG.getNode(ISD::BITCAST, dl, DstVT,
                       DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
  }

  // Turn f64->i64 into VMOVRRD.
  if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
    SDValue Cvt;
    if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() &&
        SrcVT.getVectorNumElements() > 1)
      Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
                        DAG.getVTList(MVT::i32, MVT::i32),
                        DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
    else
      Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
                        DAG.getVTList(MVT::i32, MVT::i32), Op);
    // Merge the pieces into a single i64 value.
    return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
  }

  return SDValue();
}

/// getZeroVector - Returns a vector of specified type with all zero elements.
/// Zero vectors are used to represent vector negation and in those cases
/// will be implemented with the NEON VNEG instruction.  However, VNEG does
/// not support i64 elements, so sometimes the zero vectors will need to be
/// explicitly constructed.  Regardless, use a canonical VMOV to create the
/// zero vector.
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
  assert(VT.isVector() && "Expected a vector type");
  // The canonical modified immediate encoding of a zero vector is....0!
  SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
  EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
  SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
  return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
}

/// LowerShiftRightParts - Lower SRA_PARTS, which returns two
/// i32 values and take a 2 x i32 value to shift plus a shift amount.
SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
                                                SelectionDAG &DAG) const {
  assert(Op.getNumOperands() == 3 && "Not a double-shift!");
  EVT VT = Op.getValueType();
  unsigned VTBits = VT.getSizeInBits();
  SDLoc dl(Op);
  SDValue ShOpLo = Op.getOperand(0);
  SDValue ShOpHi = Op.getOperand(1);
  SDValue ShAmt  = Op.getOperand(2);
  SDValue ARMcc;
  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
  unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;

  assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);

  SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
                                 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
  SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
  SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
                                   DAG.getConstant(VTBits, dl, MVT::i32));
  SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
  SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
  SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
  SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
                            ISD::SETGE, ARMcc, DAG, dl);
  SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift,
                           ARMcc, CCR, CmpLo);

  SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
  SDValue HiBigShift = Opc == ISD::SRA
                           ? DAG.getNode(Opc, dl, VT, ShOpHi,
                                         DAG.getConstant(VTBits - 1, dl, VT))
                           : DAG.getConstant(0, dl, VT);
  SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
                            ISD::SETGE, ARMcc, DAG, dl);
  SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
                           ARMcc, CCR, CmpHi);

  SDValue Ops[2] = { Lo, Hi };
  return DAG.getMergeValues(Ops, dl);
}

/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
/// i32 values and take a 2 x i32 value to shift plus a shift amount.
SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
                                               SelectionDAG &DAG) const {
  assert(Op.getNumOperands() == 3 && "Not a double-shift!");
  EVT VT = Op.getValueType();
  unsigned VTBits = VT.getSizeInBits();
  SDLoc dl(Op);
  SDValue ShOpLo = Op.getOperand(0);
  SDValue ShOpHi = Op.getOperand(1);
  SDValue ShAmt  = Op.getOperand(2);
  SDValue ARMcc;
  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);

  assert(Op.getOpcode() == ISD::SHL_PARTS);
  SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
                                 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
  SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
  SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
  SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);

  SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
                                   DAG.getConstant(VTBits, dl, MVT::i32));
  SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
  SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
                            ISD::SETGE, ARMcc, DAG, dl);
  SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
                           ARMcc, CCR, CmpHi);

  SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
                          ISD::SETGE, ARMcc, DAG, dl);
  SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
  SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift,
                           DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo);

  SDValue Ops[2] = { Lo, Hi };
  return DAG.getMergeValues(Ops, dl);
}

SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
                                            SelectionDAG &DAG) const {
  // The rounding mode is in bits 23:22 of the FPSCR.
  // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
  // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
  // so that the shift + and get folded into a bitfield extract.
  SDLoc dl(Op);
  SDValue Chain = Op.getOperand(0);
  SDValue Ops[] = {Chain,
                   DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32)};

  SDValue FPSCR =
      DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, {MVT::i32, MVT::Other}, Ops);
  Chain = FPSCR.getValue(1);
  SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
                                  DAG.getConstant(1U << 22, dl, MVT::i32));
  SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
                              DAG.getConstant(22, dl, MVT::i32));
  SDValue And = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
                            DAG.getConstant(3, dl, MVT::i32));
  return DAG.getMergeValues({And, Chain}, dl);
}

static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
                         const ARMSubtarget *ST) {
  SDLoc dl(N);
  EVT VT = N->getValueType(0);
  if (VT.isVector() && ST->hasNEON()) {

    // Compute the least significant set bit: LSB = X & -X
    SDValue X = N->getOperand(0);
    SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X);
    SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX);

    EVT ElemTy = VT.getVectorElementType();

    if (ElemTy == MVT::i8) {
      // Compute with: cttz(x) = ctpop(lsb - 1)
      SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
                                DAG.getTargetConstant(1, dl, ElemTy));
      SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
      return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
    }

    if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) &&
        (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) {
      // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0
      unsigned NumBits = ElemTy.getSizeInBits();
      SDValue WidthMinus1 =
          DAG.getNode(ARMISD::VMOVIMM, dl, VT,
                      DAG.getTargetConstant(NumBits - 1, dl, ElemTy));
      SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB);
      return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ);
    }

    // Compute with: cttz(x) = ctpop(lsb - 1)

    // Compute LSB - 1.
    SDValue Bits;
    if (ElemTy == MVT::i64) {
      // Load constant 0xffff'ffff'ffff'ffff to register.
      SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
                               DAG.getTargetConstant(0x1eff, dl, MVT::i32));
      Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF);
    } else {
      SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
                                DAG.getTargetConstant(1, dl, ElemTy));
      Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
    }
    return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
  }

  if (!ST->hasV6T2Ops())
    return SDValue();

  SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0));
  return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
}

static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
                          const ARMSubtarget *ST) {
  EVT VT = N->getValueType(0);
  SDLoc DL(N);

  assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
  assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||
          VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&
         "Unexpected type for custom ctpop lowering");

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
  SDValue Res = DAG.getBitcast(VT8Bit, N->getOperand(0));
  Res = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Res);

  // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds.
  unsigned EltSize = 8;
  unsigned NumElts = VT.is64BitVector() ? 8 : 16;
  while (EltSize != VT.getScalarSizeInBits()) {
    SmallVector<SDValue, 8> Ops;
    Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddlu, DL,
                                  TLI.getPointerTy(DAG.getDataLayout())));
    Ops.push_back(Res);

    EltSize *= 2;
    NumElts /= 2;
    MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts);
    Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, Ops);
  }

  return Res;
}

/// Getvshiftimm - Check if this is a valid build_vector for the immediate
/// operand of a vector shift operation, where all the elements of the
/// build_vector must have the same constant integer value.
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
  // Ignore bit_converts.
  while (Op.getOpcode() == ISD::BITCAST)
    Op = Op.getOperand(0);
  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
  APInt SplatBits, SplatUndef;
  unsigned SplatBitSize;
  bool HasAnyUndefs;
  if (!BVN ||
      !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
                            ElementBits) ||
      SplatBitSize > ElementBits)
    return false;
  Cnt = SplatBits.getSExtValue();
  return true;
}

/// isVShiftLImm - Check if this is a valid build_vector for the immediate
/// operand of a vector shift left operation.  That value must be in the range:
///   0 <= Value < ElementBits for a left shift; or
///   0 <= Value <= ElementBits for a long left shift.
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
  assert(VT.isVector() && "vector shift count is not a vector type");
  int64_t ElementBits = VT.getScalarSizeInBits();
  if (!getVShiftImm(Op, ElementBits, Cnt))
    return false;
  return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
}

/// isVShiftRImm - Check if this is a valid build_vector for the immediate
/// operand of a vector shift right operation.  For a shift opcode, the value
/// is positive, but for an intrinsic the value count must be negative. The
/// absolute value must be in the range:
///   1 <= |Value| <= ElementBits for a right shift; or
///   1 <= |Value| <= ElementBits/2 for a narrow right shift.
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
                         int64_t &Cnt) {
  assert(VT.isVector() && "vector shift count is not a vector type");
  int64_t ElementBits = VT.getScalarSizeInBits();
  if (!getVShiftImm(Op, ElementBits, Cnt))
    return false;
  if (!isIntrinsic)
    return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
  if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) {
    Cnt = -Cnt;
    return true;
  }
  return false;
}

static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
                          const ARMSubtarget *ST) {
  EVT VT = N->getValueType(0);
  SDLoc dl(N);
  int64_t Cnt;

  if (!VT.isVector())
    return SDValue();

  // We essentially have two forms here. Shift by an immediate and shift by a
  // vector register (there are also shift by a gpr, but that is just handled
  // with a tablegen pattern). We cannot easily match shift by an immediate in
  // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM.
  // For shifting by a vector, we don't have VSHR, only VSHL (which can be
  // signed or unsigned, and a negative shift indicates a shift right).
  if (N->getOpcode() == ISD::SHL) {
    if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
      return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0),
                         DAG.getConstant(Cnt, dl, MVT::i32));
    return DAG.getNode(ARMISD::VSHLu, dl, VT, N->getOperand(0),
                       N->getOperand(1));
  }

  assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
         "unexpected vector shift opcode");

  if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
    unsigned VShiftOpc =
        (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM);
    return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
                       DAG.getConstant(Cnt, dl, MVT::i32));
  }

  // Other right shifts we don't have operations for (we use a shift left by a
  // negative number).
  EVT ShiftVT = N->getOperand(1).getValueType();
  SDValue NegatedCount = DAG.getNode(
      ISD::SUB, dl, ShiftVT, getZeroVector(ShiftVT, DAG, dl), N->getOperand(1));
  unsigned VShiftOpc =
      (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu);
  return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), NegatedCount);
}

static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
                                const ARMSubtarget *ST) {
  EVT VT = N->getValueType(0);
  SDLoc dl(N);

  // We can get here for a node like i32 = ISD::SHL i32, i64
  if (VT != MVT::i64)
    return SDValue();

  assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA ||
          N->getOpcode() == ISD::SHL) &&
         "Unknown shift to lower!");

  unsigned ShOpc = N->getOpcode();
  if (ST->hasMVEIntegerOps()) {
    SDValue ShAmt = N->getOperand(1);
    unsigned ShPartsOpc = ARMISD::LSLL;
    ConstantSDNode *Con = dyn_cast<ConstantSDNode>(ShAmt);

    // If the shift amount is greater than 32 or has a greater bitwidth than 64
    // then do the default optimisation
    if (ShAmt->getValueType(0).getSizeInBits() > 64 ||
        (Con && (Con->getZExtValue() == 0 || Con->getZExtValue() >= 32)))
      return SDValue();

    // Extract the lower 32 bits of the shift amount if it's not an i32
    if (ShAmt->getValueType(0) != MVT::i32)
      ShAmt = DAG.getZExtOrTrunc(ShAmt, dl, MVT::i32);

    if (ShOpc == ISD::SRL) {
      if (!Con)
        // There is no t2LSRLr instruction so negate and perform an lsll if the
        // shift amount is in a register, emulating a right shift.
        ShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
                            DAG.getConstant(0, dl, MVT::i32), ShAmt);
      else
        // Else generate an lsrl on the immediate shift amount
        ShPartsOpc = ARMISD::LSRL;
    } else if (ShOpc == ISD::SRA)
      ShPartsOpc = ARMISD::ASRL;

    // Lower 32 bits of the destination/source
    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
                             DAG.getConstant(0, dl, MVT::i32));
    // Upper 32 bits of the destination/source
    SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
                             DAG.getConstant(1, dl, MVT::i32));

    // Generate the shift operation as computed above
    Lo = DAG.getNode(ShPartsOpc, dl, DAG.getVTList(MVT::i32, MVT::i32), Lo, Hi,
                     ShAmt);
    // The upper 32 bits come from the second return value of lsll
    Hi = SDValue(Lo.getNode(), 1);
    return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
  }

  // We only lower SRA, SRL of 1 here, all others use generic lowering.
  if (!isOneConstant(N->getOperand(1)) || N->getOpcode() == ISD::SHL)
    return SDValue();

  // If we are in thumb mode, we don't have RRX.
  if (ST->isThumb1Only())
    return SDValue();

  // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
  SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
                           DAG.getConstant(0, dl, MVT::i32));
  SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
                           DAG.getConstant(1, dl, MVT::i32));

  // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
  // captures the result into a carry flag.
  unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
  Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);

  // The low part is an ARMISD::RRX operand, which shifts the carry in.
  Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));

  // Merge the pieces into a single i64 value.
 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
}

static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG,
                           const ARMSubtarget *ST) {
  bool Invert = false;
  bool Swap = false;
  unsigned Opc = ARMCC::AL;

  SDValue Op0 = Op.getOperand(0);
  SDValue Op1 = Op.getOperand(1);
  SDValue CC = Op.getOperand(2);
  EVT VT = Op.getValueType();
  ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
  SDLoc dl(Op);

  EVT CmpVT;
  if (ST->hasNEON())
    CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
  else {
    assert(ST->hasMVEIntegerOps() &&
           "No hardware support for integer vector comparison!");

    if (Op.getValueType().getVectorElementType() != MVT::i1)
      return SDValue();

    // Make sure we expand floating point setcc to scalar if we do not have
    // mve.fp, so that we can handle them from there.
    if (Op0.getValueType().isFloatingPoint() && !ST->hasMVEFloatOps())
      return SDValue();

    CmpVT = VT;
  }

  if (Op0.getValueType().getVectorElementType() == MVT::i64 &&
      (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) {
    // Special-case integer 64-bit equality comparisons. They aren't legal,
    // but they can be lowered with a few vector instructions.
    unsigned CmpElements = CmpVT.getVectorNumElements() * 2;
    EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements);
    SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0);
    SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1);
    SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1,
                              DAG.getCondCode(ISD::SETEQ));
    SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp);
    SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed);
    Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged);
    if (SetCCOpcode == ISD::SETNE)
      Merged = DAG.getNOT(dl, Merged, CmpVT);
    Merged = DAG.getSExtOrTrunc(Merged, dl, VT);
    return Merged;
  }

  if (CmpVT.getVectorElementType() == MVT::i64)
    // 64-bit comparisons are not legal in general.
    return SDValue();

  if (Op1.getValueType().isFloatingPoint()) {
    switch (SetCCOpcode) {
    default: llvm_unreachable("Illegal FP comparison");
    case ISD::SETUNE:
    case ISD::SETNE:
      if (ST->hasMVEFloatOps()) {
        Opc = ARMCC::NE; break;
      } else {
        Invert = true; LLVM_FALLTHROUGH;
      }
    case ISD::SETOEQ:
    case ISD::SETEQ:  Opc = ARMCC::EQ; break;
    case ISD::SETOLT:
    case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
    case ISD::SETOGT:
    case ISD::SETGT:  Opc = ARMCC::GT; break;
    case ISD::SETOLE:
    case ISD::SETLE:  Swap = true; LLVM_FALLTHROUGH;
    case ISD::SETOGE:
    case ISD::SETGE: Opc = ARMCC::GE; break;
    case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH;
    case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break;
    case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH;
    case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break;
    case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH;
    case ISD::SETONE: {
      // Expand this to (OLT | OGT).
      SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0,
                                   DAG.getConstant(ARMCC::GT, dl, MVT::i32));
      SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
                                   DAG.getConstant(ARMCC::GT, dl, MVT::i32));
      SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1);
      if (Invert)
        Result = DAG.getNOT(dl, Result, VT);
      return Result;
    }
    case ISD::SETUO: Invert = true; LLVM_FALLTHROUGH;
    case ISD::SETO: {
      // Expand this to (OLT | OGE).
      SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0,
                                   DAG.getConstant(ARMCC::GT, dl, MVT::i32));
      SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
                                   DAG.getConstant(ARMCC::GE, dl, MVT::i32));
      SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1);
      if (Invert)
        Result = DAG.getNOT(dl, Result, VT);
      return Result;
    }
    }
  } else {
    // Integer comparisons.
    switch (SetCCOpcode) {
    default: llvm_unreachable("Illegal integer comparison");
    case ISD::SETNE:
      if (ST->hasMVEIntegerOps()) {
        Opc = ARMCC::NE; break;
      } else {
        Invert = true; LLVM_FALLTHROUGH;
      }
    case ISD::SETEQ:  Opc = ARMCC::EQ; break;
    case ISD::SETLT:  Swap = true; LLVM_FALLTHROUGH;
    case ISD::SETGT:  Opc = ARMCC::GT; break;
    case ISD::SETLE:  Swap = true; LLVM_FALLTHROUGH;
    case ISD::SETGE:  Opc = ARMCC::GE; break;
    case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
    case ISD::SETUGT: Opc = ARMCC::HI; break;
    case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
    case ISD::SETUGE: Opc = ARMCC::HS; break;
    }

    // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
    if (ST->hasNEON() && Opc == ARMCC::EQ) {
      SDValue AndOp;
      if (ISD::isBuildVectorAllZeros(Op1.getNode()))
        AndOp = Op0;
      else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
        AndOp = Op1;

      // Ignore bitconvert.
      if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
        AndOp = AndOp.getOperand(0);

      if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
        Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
        Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
        SDValue Result = DAG.getNode(ARMISD::VTST, dl, CmpVT, Op0, Op1);
        if (!Invert)
          Result = DAG.getNOT(dl, Result, VT);
        return Result;
      }
    }
  }

  if (Swap)
    std::swap(Op0, Op1);

  // If one of the operands is a constant vector zero, attempt to fold the
  // comparison to a specialized compare-against-zero form.
  SDValue SingleOp;
  if (ISD::isBuildVectorAllZeros(Op1.getNode()))
    SingleOp = Op0;
  else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
    if (Opc == ARMCC::GE)
      Opc = ARMCC::LE;
    else if (Opc == ARMCC::GT)
      Opc = ARMCC::LT;
    SingleOp = Op1;
  }

  SDValue Result;
  if (SingleOp.getNode()) {
    Result = DAG.getNode(ARMISD::VCMPZ, dl, CmpVT, SingleOp,
                         DAG.getConstant(Opc, dl, MVT::i32));
  } else {
    Result = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
                         DAG.getConstant(Opc, dl, MVT::i32));
  }

  Result = DAG.getSExtOrTrunc(Result, dl, VT);

  if (Invert)
    Result = DAG.getNOT(dl, Result, VT);

  return Result;
}

static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) {
  SDValue LHS = Op.getOperand(0);
  SDValue RHS = Op.getOperand(1);
  SDValue Carry = Op.getOperand(2);
  SDValue Cond = Op.getOperand(3);
  SDLoc DL(Op);

  assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");

  // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we
  // have to invert the carry first.
  Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
                      DAG.getConstant(1, DL, MVT::i32), Carry);
  // This converts the boolean value carry into the carry flag.
  Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);

  SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
  SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry);

  SDValue FVal = DAG.getConstant(0, DL, MVT::i32);
  SDValue TVal = DAG.getConstant(1, DL, MVT::i32);
  SDValue ARMcc = DAG.getConstant(
      IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32);
  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
  SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR,
                                   Cmp.getValue(1), SDValue());
  return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc,
                     CCR, Chain.getValue(1));
}

/// isVMOVModifiedImm - Check if the specified splat value corresponds to a
/// valid vector constant for a NEON or MVE instruction with a "modified
/// immediate" operand (e.g., VMOV).  If so, return the encoded value.
static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
                                 unsigned SplatBitSize, SelectionDAG &DAG,
                                 const SDLoc &dl, EVT &VT, EVT VectorVT,
                                 VMOVModImmType type) {
  unsigned OpCmode, Imm;
  bool is128Bits = VectorVT.is128BitVector();

  // SplatBitSize is set to the smallest size that splats the vector, so a
  // zero vector will always have SplatBitSize == 8.  However, NEON modified
  // immediate instructions others than VMOV do not support the 8-bit encoding
  // of a zero vector, and the default encoding of zero is supposed to be the
  // 32-bit version.
  if (SplatBits == 0)
    SplatBitSize = 32;

  switch (SplatBitSize) {
  case 8:
    if (type != VMOVModImm)
      return SDValue();
    // Any 1-byte value is OK.  Op=0, Cmode=1110.
    assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
    OpCmode = 0xe;
    Imm = SplatBits;
    VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
    break;

  case 16:
    // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
    VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
    if ((SplatBits & ~0xff) == 0) {
      // Value = 0x00nn: Op=x, Cmode=100x.
      OpCmode = 0x8;
      Imm = SplatBits;
      break;
    }
    if ((SplatBits & ~0xff00) == 0) {
      // Value = 0xnn00: Op=x, Cmode=101x.
      OpCmode = 0xa;
      Imm = SplatBits >> 8;
      break;
    }
    return SDValue();

  case 32:
    // NEON's 32-bit VMOV supports splat values where:
    // * only one byte is nonzero, or
    // * the least significant byte is 0xff and the second byte is nonzero, or
    // * the least significant 2 bytes are 0xff and the third is nonzero.
    VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
    if ((SplatBits & ~0xff) == 0) {
      // Value = 0x000000nn: Op=x, Cmode=000x.
      OpCmode = 0;
      Imm = SplatBits;
      break;
    }
    if ((SplatBits & ~0xff00) == 0) {
      // Value = 0x0000nn00: Op=x, Cmode=001x.
      OpCmode = 0x2;
      Imm = SplatBits >> 8;
      break;
    }
    if ((SplatBits & ~0xff0000) == 0) {
      // Value = 0x00nn0000: Op=x, Cmode=010x.
      OpCmode = 0x4;
      Imm = SplatBits >> 16;
      break;
    }
    if ((SplatBits & ~0xff000000) == 0) {
      // Value = 0xnn000000: Op=x, Cmode=011x.
      OpCmode = 0x6;
      Imm = SplatBits >> 24;
      break;
    }

    // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
    if (type == OtherModImm) return SDValue();

    if ((SplatBits & ~0xffff) == 0 &&
        ((SplatBits | SplatUndef) & 0xff) == 0xff) {
      // Value = 0x0000nnff: Op=x, Cmode=1100.
      OpCmode = 0xc;
      Imm = SplatBits >> 8;
      break;
    }

    // cmode == 0b1101 is not supported for MVE VMVN
    if (type == MVEVMVNModImm)
      return SDValue();

    if ((SplatBits & ~0xffffff) == 0 &&
        ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
      // Value = 0x00nnffff: Op=x, Cmode=1101.
      OpCmode = 0xd;
      Imm = SplatBits >> 16;
      break;
    }

    // Note: there are a few 32-bit splat values (specifically: 00ffff00,
    // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
    // VMOV.I32.  A (very) minor optimization would be to replicate the value
    // and fall through here to test for a valid 64-bit splat.  But, then the
    // caller would also need to check and handle the change in size.
    return SDValue();

  case 64: {
    if (type != VMOVModImm)
      return SDValue();
    // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
    uint64_t BitMask = 0xff;
    uint64_t Val = 0;
    unsigned ImmMask = 1;
    Imm = 0;
    for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
      if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
        Val |= BitMask;
        Imm |= ImmMask;
      } else if ((SplatBits & BitMask) != 0) {
        return SDValue();
      }
      BitMask <<= 8;
      ImmMask <<= 1;
    }

    if (DAG.getDataLayout().isBigEndian()) {
      // Reverse the order of elements within the vector.
      unsigned BytesPerElem = VectorVT.getScalarSizeInBits() / 8;
      unsigned Mask = (1 << BytesPerElem) - 1;
      unsigned NumElems = 8 / BytesPerElem;
      unsigned NewImm = 0;
      for (unsigned ElemNum = 0; ElemNum < NumElems; ++ElemNum) {
        unsigned Elem = ((Imm >> ElemNum * BytesPerElem) & Mask);
        NewImm |= Elem << (NumElems - ElemNum - 1) * BytesPerElem;
      }
      Imm = NewImm;
    }

    // Op=1, Cmode=1110.
    OpCmode = 0x1e;
    VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
    break;
  }

  default:
    llvm_unreachable("unexpected size for isVMOVModifiedImm");
  }

  unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode, Imm);
  return DAG.getTargetConstant(EncodedVal, dl, MVT::i32);
}

SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
                                           const ARMSubtarget *ST) const {
  EVT VT = Op.getValueType();
  bool IsDouble = (VT == MVT::f64);
  ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
  const APFloat &FPVal = CFP->getValueAPF();

  // Prevent floating-point constants from using literal loads
  // when execute-only is enabled.
  if (ST->genExecuteOnly()) {
    // If we can represent the constant as an immediate, don't lower it
    if (isFPImmLegal(FPVal, VT))
      return Op;
    // Otherwise, construct as integer, and move to float register
    APInt INTVal = FPVal.bitcastToAPInt();
    SDLoc DL(CFP);
    switch (VT.getSimpleVT().SimpleTy) {
      default:
        llvm_unreachable("Unknown floating point type!");
        break;
      case MVT::f64: {
        SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32);
        SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32);
        return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi);
      }
      case MVT::f32:
          return DAG.getNode(ARMISD::VMOVSR, DL, VT,
              DAG.getConstant(INTVal, DL, MVT::i32));
    }
  }

  if (!ST->hasVFP3Base())
    return SDValue();

  // Use the default (constant pool) lowering for double constants when we have
  // an SP-only FPU
  if (IsDouble && !Subtarget->hasFP64())
    return SDValue();

  // Try splatting with a VMOV.f32...
  int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);

  if (ImmVal != -1) {
    if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
      // We have code in place to select a valid ConstantFP already, no need to
      // do any mangling.
      return Op;
    }

    // It's a float and we are trying to use NEON operations where
    // possible. Lower it to a splat followed by an extract.
    SDLoc DL(Op);
    SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32);
    SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
                                      NewVal);
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
                       DAG.getConstant(0, DL, MVT::i32));
  }

  // The rest of our options are NEON only, make sure that's allowed before
  // proceeding..
  if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
    return SDValue();

  EVT VMovVT;
  uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();

  // It wouldn't really be worth bothering for doubles except for one very
  // important value, which does happen to match: 0.0. So make sure we don't do
  // anything stupid.
  if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
    return SDValue();

  // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
  SDValue NewVal = isVMOVModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op),
                                     VMovVT, VT, VMOVModImm);
  if (NewVal != SDValue()) {
    SDLoc DL(Op);
    SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
                                      NewVal);
    if (IsDouble)
      return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);

    // It's a float: cast and extract a vector element.
    SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
                                       VecConstant);
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
                       DAG.getConstant(0, DL, MVT::i32));
  }

  // Finally, try a VMVN.i32
  NewVal = isVMOVModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT,
                             VT, VMVNModImm);
  if (NewVal != SDValue()) {
    SDLoc DL(Op);
    SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);

    if (IsDouble)
      return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);

    // It's a float: cast and extract a vector element.
    SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
                                       VecConstant);
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
                       DAG.getConstant(0, DL, MVT::i32));
  }

  return SDValue();
}

// check if an VEXT instruction can handle the shuffle mask when the
// vector sources of the shuffle are the same.
static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
  unsigned NumElts = VT.getVectorNumElements();

  // Assume that the first shuffle index is not UNDEF.  Fail if it is.
  if (M[0] < 0)
    return false;

  Imm = M[0];

  // If this is a VEXT shuffle, the immediate value is the index of the first
  // element.  The other shuffle indices must be the successive elements after
  // the first one.
  unsigned ExpectedElt = Imm;
  for (unsigned i = 1; i < NumElts; ++i) {
    // Increment the expected index.  If it wraps around, just follow it
    // back to index zero and keep going.
    ++ExpectedElt;
    if (ExpectedElt == NumElts)
      ExpectedElt = 0;

    if (M[i] < 0) continue; // ignore UNDEF indices
    if (ExpectedElt != static_cast<unsigned>(M[i]))
      return false;
  }

  return true;
}

static bool isVEXTMask(ArrayRef<int> M, EVT VT,
                       bool &ReverseVEXT, unsigned &Imm) {
  unsigned NumElts = VT.getVectorNumElements();
  ReverseVEXT = false;

  // Assume that the first shuffle index is not UNDEF.  Fail if it is.
  if (M[0] < 0)
    return false;

  Imm = M[0];

  // If this is a VEXT shuffle, the immediate value is the index of the first
  // element.  The other shuffle indices must be the successive elements after
  // the first one.
  unsigned ExpectedElt = Imm;
  for (unsigned i = 1; i < NumElts; ++i) {
    // Increment the expected index.  If it wraps around, it may still be
    // a VEXT but the source vectors must be swapped.
    ExpectedElt += 1;
    if (ExpectedElt == NumElts * 2) {
      ExpectedElt = 0;
      ReverseVEXT = true;
    }

    if (M[i] < 0) continue; // ignore UNDEF indices
    if (ExpectedElt != static_cast<unsigned>(M[i]))
      return false;
  }

  // Adjust the index value if the source operands will be swapped.
  if (ReverseVEXT)
    Imm -= NumElts;

  return true;
}

/// isVREVMask - Check if a vector shuffle corresponds to a VREV
/// instruction with the specified blocksize.  (The order of the elements
/// within each block of the vector is reversed.)
static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
  assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
         "Only possible block sizes for VREV are: 16, 32, 64");

  unsigned EltSz = VT.getScalarSizeInBits();
  if (EltSz == 64)
    return false;

  unsigned NumElts = VT.getVectorNumElements();
  unsigned BlockElts = M[0] + 1;
  // If the first shuffle index is UNDEF, be optimistic.
  if (M[0] < 0)
    BlockElts = BlockSize / EltSz;

  if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
    return false;

  for (unsigned i = 0; i < NumElts; ++i) {
    if (M[i] < 0) continue; // ignore UNDEF indices
    if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
      return false;
  }

  return true;
}

static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
  // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
  // range, then 0 is placed into the resulting vector. So pretty much any mask
  // of 8 elements can work here.
  return VT == MVT::v8i8 && M.size() == 8;
}

static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask,
                               unsigned Index) {
  if (Mask.size() == Elements * 2)
    return Index / Elements;
  return Mask[Index] == 0 ? 0 : 1;
}

// Checks whether the shuffle mask represents a vector transpose (VTRN) by
// checking that pairs of elements in the shuffle mask represent the same index
// in each vector, incrementing the expected index by 2 at each step.
// e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6]
//  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g}
//  v2={e,f,g,h}
// WhichResult gives the offset for each element in the mask based on which
// of the two results it belongs to.
//
// The transpose can be represented either as:
// result1 = shufflevector v1, v2, result1_shuffle_mask
// result2 = shufflevector v1, v2, result2_shuffle_mask
// where v1/v2 and the shuffle masks have the same number of elements
// (here WhichResult (see below) indicates which result is being checked)
//
// or as:
// results = shufflevector v1, v2, shuffle_mask
// where both results are returned in one vector and the shuffle mask has twice
// as many elements as v1/v2 (here WhichResult will always be 0 if true) here we
// want to check the low half and high half of the shuffle mask as if it were
// the other case
static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
  unsigned EltSz = VT.getScalarSizeInBits();
  if (EltSz == 64)
    return false;

  unsigned NumElts = VT.getVectorNumElements();
  if (M.size() != NumElts && M.size() != NumElts*2)
    return false;

  // If the mask is twice as long as the input vector then we need to check the
  // upper and lower parts of the mask with a matching value for WhichResult
  // FIXME: A mask with only even values will be rejected in case the first
  // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only
  // M[0] is used to determine WhichResult
  for (unsigned i = 0; i < M.size(); i += NumElts) {
    WhichResult = SelectPairHalf(NumElts, M, i);
    for (unsigned j = 0; j < NumElts; j += 2) {
      if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
          (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult))
        return false;
    }
  }

  if (M.size() == NumElts*2)
    WhichResult = 0;

  return true;
}

/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
  unsigned EltSz = VT.getScalarSizeInBits();
  if (EltSz == 64)
    return false;

  unsigned NumElts = VT.getVectorNumElements();
  if (M.size() != NumElts && M.size() != NumElts*2)
    return false;

  for (unsigned i = 0; i < M.size(); i += NumElts) {
    WhichResult = SelectPairHalf(NumElts, M, i);
    for (unsigned j = 0; j < NumElts; j += 2) {
      if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
          (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult))
        return false;
    }
  }

  if (M.size() == NumElts*2)
    WhichResult = 0;

  return true;
}

// Checks whether the shuffle mask represents a vector unzip (VUZP) by checking
// that the mask elements are either all even and in steps of size 2 or all odd
// and in steps of size 2.
// e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6]
//  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g}
//  v2={e,f,g,h}
// Requires similar checks to that of isVTRNMask with
// respect the how results are returned.
static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
  unsigned EltSz = VT.getScalarSizeInBits();
  if (EltSz == 64)
    return false;

  unsigned NumElts = VT.getVectorNumElements();
  if (M.size() != NumElts && M.size() != NumElts*2)
    return false;

  for (unsigned i = 0; i < M.size(); i += NumElts) {
    WhichResult = SelectPairHalf(NumElts, M, i);
    for (unsigned j = 0; j < NumElts; ++j) {
      if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult)
        return false;
    }
  }

  if (M.size() == NumElts*2)
    WhichResult = 0;

  // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
  if (VT.is64BitVector() && EltSz == 32)
    return false;

  return true;
}

/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
  unsigned EltSz = VT.getScalarSizeInBits();
  if (EltSz == 64)
    return false;

  unsigned NumElts = VT.getVectorNumElements();
  if (M.size() != NumElts && M.size() != NumElts*2)
    return false;

  unsigned Half = NumElts / 2;
  for (unsigned i = 0; i < M.size(); i += NumElts) {
    WhichResult = SelectPairHalf(NumElts, M, i);
    for (unsigned j = 0; j < NumElts; j += Half) {
      unsigned Idx = WhichResult;
      for (unsigned k = 0; k < Half; ++k) {
        int MIdx = M[i + j + k];
        if (MIdx >= 0 && (unsigned) MIdx != Idx)
          return false;
        Idx += 2;
      }
    }
  }

  if (M.size() == NumElts*2)
    WhichResult = 0;

  // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
  if (VT.is64BitVector() && EltSz == 32)
    return false;

  return true;
}

// Checks whether the shuffle mask represents a vector zip (VZIP) by checking
// that pairs of elements of the shufflemask represent the same index in each
// vector incrementing sequentially through the vectors.
// e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5]
//  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f}
//  v2={e,f,g,h}
// Requires similar checks to that of isVTRNMask with respect the how results
// are returned.
static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
  unsigned EltSz = VT.getScalarSizeInBits();
  if (EltSz == 64)
    return false;

  unsigned NumElts = VT.getVectorNumElements();
  if (M.size() != NumElts && M.size() != NumElts*2)
    return false;

  for (unsigned i = 0; i < M.size(); i += NumElts) {
    WhichResult = SelectPairHalf(NumElts, M, i);
    unsigned Idx = WhichResult * NumElts / 2;
    for (unsigned j = 0; j < NumElts; j += 2) {
      if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
          (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts))
        return false;
      Idx += 1;
    }
  }

  if (M.size() == NumElts*2)
    WhichResult = 0;

  // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
  if (VT.is64BitVector() && EltSz == 32)
    return false;

  return true;
}

/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
  unsigned EltSz = VT.getScalarSizeInBits();
  if (EltSz == 64)
    return false;

  unsigned NumElts = VT.getVectorNumElements();
  if (M.size() != NumElts && M.size() != NumElts*2)
    return false;

  for (unsigned i = 0; i < M.size(); i += NumElts) {
    WhichResult = SelectPairHalf(NumElts, M, i);
    unsigned Idx = WhichResult * NumElts / 2;
    for (unsigned j = 0; j < NumElts; j += 2) {
      if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
          (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx))
        return false;
      Idx += 1;
    }
  }

  if (M.size() == NumElts*2)
    WhichResult = 0;

  // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
  if (VT.is64BitVector() && EltSz == 32)
    return false;

  return true;
}

/// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN),
/// and return the corresponding ARMISD opcode if it is, or 0 if it isn't.
static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT,
                                           unsigned &WhichResult,
                                           bool &isV_UNDEF) {
  isV_UNDEF = false;
  if (isVTRNMask(ShuffleMask, VT, WhichResult))
    return ARMISD::VTRN;
  if (isVUZPMask(ShuffleMask, VT, WhichResult))
    return ARMISD::VUZP;
  if (isVZIPMask(ShuffleMask, VT, WhichResult))
    return ARMISD::VZIP;

  isV_UNDEF = true;
  if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
    return ARMISD::VTRN;
  if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
    return ARMISD::VUZP;
  if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
    return ARMISD::VZIP;

  return 0;
}

/// \return true if this is a reverse operation on an vector.
static bool isReverseMask(ArrayRef<int> M, EVT VT) {
  unsigned NumElts = VT.getVectorNumElements();
  // Make sure the mask has the right size.
  if (NumElts != M.size())
      return false;

  // Look for <15, ..., 3, -1, 1, 0>.
  for (unsigned i = 0; i != NumElts; ++i)
    if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
      return false;

  return true;
}

static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top) {
  unsigned NumElts = VT.getVectorNumElements();
  // Make sure the mask has the right size.
  if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8))
      return false;

  // If Top
  //   Look for <0, N, 2, N+2, 4, N+4, ..>.
  //   This inserts Input2 into Input1
  // else if not Top
  //   Look for <0, N+1, 2, N+3, 4, N+5, ..>
  //   This inserts Input1 into Input2
  unsigned Offset = Top ? 0 : 1;
  for (unsigned i = 0; i < NumElts; i+=2) {
    if (M[i] >= 0 && M[i] != (int)i)
      return false;
    if (M[i+1] >= 0 && M[i+1] != (int)(NumElts + i + Offset))
      return false;
  }

  return true;
}

// Reconstruct an MVE VCVT from a BuildVector of scalar fptrunc, all extracted
// from a pair of inputs. For example:
// BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0),
//             FP_ROUND(EXTRACT_ELT(Y, 0),
//             FP_ROUND(EXTRACT_ELT(X, 1),
//             FP_ROUND(EXTRACT_ELT(Y, 1), ...)
static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG,
                                         const ARMSubtarget *ST) {
  assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
  if (!ST->hasMVEFloatOps())
    return SDValue();

  SDLoc dl(BV);
  EVT VT = BV.getValueType();
  if (VT != MVT::v8f16)
    return SDValue();

  // We are looking for a buildvector of fptrunc elements, where all the
  // elements are interleavingly extracted from two sources. Check the first two
  // items are valid enough and extract some info from them (they are checked
  // properly in the loop below).
  if (BV.getOperand(0).getOpcode() != ISD::FP_ROUND ||
      BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
      BV.getOperand(0).getOperand(0).getConstantOperandVal(1) != 0)
    return SDValue();
  if (BV.getOperand(1).getOpcode() != ISD::FP_ROUND ||
      BV.getOperand(1).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
      BV.getOperand(1).getOperand(0).getConstantOperandVal(1) != 0)
    return SDValue();
  SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0);
  SDValue Op1 = BV.getOperand(1).getOperand(0).getOperand(0);
  if (Op0.getValueType() != MVT::v4f32 || Op1.getValueType() != MVT::v4f32)
    return SDValue();

  // Check all the values in the BuildVector line up with our expectations.
  for (unsigned i = 1; i < 4; i++) {
    auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) {
      return Trunc.getOpcode() == ISD::FP_ROUND &&
             Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
             Trunc.getOperand(0).getOperand(0) == Op &&
             Trunc.getOperand(0).getConstantOperandVal(1) == Idx;
    };
    if (!Check(BV.getOperand(i * 2 + 0), Op0, i))
      return SDValue();
    if (!Check(BV.getOperand(i * 2 + 1), Op1, i))
      return SDValue();
  }

  SDValue N1 = DAG.getNode(ARMISD::VCVTN, dl, VT, DAG.getUNDEF(VT), Op0,
                           DAG.getConstant(0, dl, MVT::i32));
  return DAG.getNode(ARMISD::VCVTN, dl, VT, N1, Op1,
                     DAG.getConstant(1, dl, MVT::i32));
}

// Reconstruct an MVE VCVT from a BuildVector of scalar fpext, all extracted
// from a single input on alternating lanes. For example:
// BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0),
//             FP_ROUND(EXTRACT_ELT(X, 2),
//             FP_ROUND(EXTRACT_ELT(X, 4), ...)
static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG,
                                       const ARMSubtarget *ST) {
  assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
  if (!ST->hasMVEFloatOps())
    return SDValue();

  SDLoc dl(BV);
  EVT VT = BV.getValueType();
  if (VT != MVT::v4f32)
    return SDValue();

  // We are looking for a buildvector of fptext elements, where all the
  // elements are alternating lanes from a single source. For example <0,2,4,6>
  // or <1,3,5,7>. Check the first two items are valid enough and extract some
  // info from them (they are checked properly in the loop below).
  if (BV.getOperand(0).getOpcode() != ISD::FP_EXTEND ||
      BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
    return SDValue();
  SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0);
  int Offset = BV.getOperand(0).getOperand(0).getConstantOperandVal(1);
  if (Op0.getValueType() != MVT::v8f16 || (Offset != 0 && Offset != 1))
    return SDValue();

  // Check all the values in the BuildVector line up with our expectations.
  for (unsigned i = 1; i < 4; i++) {
    auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) {
      return Trunc.getOpcode() == ISD::FP_EXTEND &&
             Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
             Trunc.getOperand(0).getOperand(0) == Op &&
             Trunc.getOperand(0).getConstantOperandVal(1) == Idx;
    };
    if (!Check(BV.getOperand(i), Op0, 2 * i + Offset))
      return SDValue();
  }

  return DAG.getNode(ARMISD::VCVTL, dl, VT, Op0,
                     DAG.getConstant(Offset, dl, MVT::i32));
}

// If N is an integer constant that can be moved into a register in one
// instruction, return an SDValue of such a constant (will become a MOV
// instruction).  Otherwise return null.
static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
                                     const ARMSubtarget *ST, const SDLoc &dl) {
  uint64_t Val;
  if (!isa<ConstantSDNode>(N))
    return SDValue();
  Val = cast<ConstantSDNode>(N)->getZExtValue();

  if (ST->isThumb1Only()) {
    if (Val <= 255 || ~Val <= 255)
      return DAG.getConstant(Val, dl, MVT::i32);
  } else {
    if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
      return DAG.getConstant(Val, dl, MVT::i32);
  }
  return SDValue();
}

static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
                                    const ARMSubtarget *ST) {
  SDLoc dl(Op);
  EVT VT = Op.getValueType();

  assert(ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!");

  unsigned NumElts = VT.getVectorNumElements();
  unsigned BoolMask;
  unsigned BitsPerBool;
  if (NumElts == 4) {
    BitsPerBool = 4;
    BoolMask = 0xf;
  } else if (NumElts == 8) {
    BitsPerBool = 2;
    BoolMask = 0x3;
  } else if (NumElts == 16) {
    BitsPerBool = 1;
    BoolMask = 0x1;
  } else
    return SDValue();

  // If this is a single value copied into all lanes (a splat), we can just sign
  // extend that single value
  SDValue FirstOp = Op.getOperand(0);
  if (!isa<ConstantSDNode>(FirstOp) &&
      std::all_of(std::next(Op->op_begin()), Op->op_end(),
                  [&FirstOp](SDUse &U) {
                    return U.get().isUndef() || U.get() == FirstOp;
                  })) {
    SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, FirstOp,
                              DAG.getValueType(MVT::i1));
    return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), Ext);
  }

  // First create base with bits set where known
  unsigned Bits32 = 0;
  for (unsigned i = 0; i < NumElts; ++i) {
    SDValue V = Op.getOperand(i);
    if (!isa<ConstantSDNode>(V) && !V.isUndef())
      continue;
    bool BitSet = V.isUndef() ? false : cast<ConstantSDNode>(V)->getZExtValue();
    if (BitSet)
      Bits32 |= BoolMask << (i * BitsPerBool);
  }

  // Add in unknown nodes
  SDValue Base = DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT,
                             DAG.getConstant(Bits32, dl, MVT::i32));
  for (unsigned i = 0; i < NumElts; ++i) {
    SDValue V = Op.getOperand(i);
    if (isa<ConstantSDNode>(V) || V.isUndef())
      continue;
    Base = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Base, V,
                       DAG.getConstant(i, dl, MVT::i32));
  }

  return Base;
}

// If this is a case we can't handle, return null and let the default
// expansion code take care of it.
SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
                                             const ARMSubtarget *ST) const {
  BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
  SDLoc dl(Op);
  EVT VT = Op.getValueType();

  if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
    return LowerBUILD_VECTOR_i1(Op, DAG, ST);

  APInt SplatBits, SplatUndef;
  unsigned SplatBitSize;
  bool HasAnyUndefs;
  if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
    if (SplatUndef.isAllOnesValue())
      return DAG.getUNDEF(VT);

    if ((ST->hasNEON() && SplatBitSize <= 64) ||
        (ST->hasMVEIntegerOps() && SplatBitSize <= 64)) {
      // Check if an immediate VMOV works.
      EVT VmovVT;
      SDValue Val =
          isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
                            SplatBitSize, DAG, dl, VmovVT, VT, VMOVModImm);

      if (Val.getNode()) {
        SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
        return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
      }

      // Try an immediate VMVN.
      uint64_t NegatedImm = (~SplatBits).getZExtValue();
      Val = isVMOVModifiedImm(
          NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, DAG, dl, VmovVT,
          VT, ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm);
      if (Val.getNode()) {
        SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
        return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
      }

      // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
      if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
        int ImmVal = ARM_AM::getFP32Imm(SplatBits);
        if (ImmVal != -1) {
          SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32);
          return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
        }
      }
    }
  }

  // Scan through the operands to see if only one value is used.
  //
  // As an optimisation, even if more than one value is used it may be more
  // profitable to splat with one value then change some lanes.
  //
  // Heuristically we decide to do this if the vector has a "dominant" value,
  // defined as splatted to more than half of the lanes.
  unsigned NumElts = VT.getVectorNumElements();
  bool isOnlyLowElement = true;
  bool usesOnlyOneValue = true;
  bool hasDominantValue = false;
  bool isConstant = true;

  // Map of the number of times a particular SDValue appears in the
  // element list.
  DenseMap<SDValue, unsigned> ValueCounts;
  SDValue Value;
  for (unsigned i = 0; i < NumElts; ++i) {
    SDValue V = Op.getOperand(i);
    if (V.isUndef())
      continue;
    if (i > 0)
      isOnlyLowElement = false;
    if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
      isConstant = false;

    ValueCounts.insert(std::make_pair(V, 0));
    unsigned &Count = ValueCounts[V];

    // Is this value dominant? (takes up more than half of the lanes)
    if (++Count > (NumElts / 2)) {
      hasDominantValue = true;
      Value = V;
    }
  }
  if (ValueCounts.size() != 1)
    usesOnlyOneValue = false;
  if (!Value.getNode() && !ValueCounts.empty())
    Value = ValueCounts.begin()->first;

  if (ValueCounts.empty())
    return DAG.getUNDEF(VT);

  // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
  // Keep going if we are hitting this case.
  if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
    return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);

  unsigned EltSize = VT.getScalarSizeInBits();

  // Use VDUP for non-constant splats.  For f32 constant splats, reduce to
  // i32 and try again.
  if (hasDominantValue && EltSize <= 32) {
    if (!isConstant) {
      SDValue N;

      // If we are VDUPing a value that comes directly from a vector, that will
      // cause an unnecessary move to and from a GPR, where instead we could
      // just use VDUPLANE. We can only do this if the lane being extracted
      // is at a constant index, as the VDUP from lane instructions only have
      // constant-index forms.
      ConstantSDNode *constIndex;
      if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
          (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) {
        // We need to create a new undef vector to use for the VDUPLANE if the
        // size of the vector from which we get the value is different than the
        // size of the vector that we need to create. We will insert the element
        // such that the register coalescer will remove unnecessary copies.
        if (VT != Value->getOperand(0).getValueType()) {
          unsigned index = constIndex->getAPIntValue().getLimitedValue() %
                             VT.getVectorNumElements();
          N =  DAG.getNode(ARMISD::VDUPLANE, dl, VT,
                 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
                        Value, DAG.getConstant(index, dl, MVT::i32)),
                           DAG.getConstant(index, dl, MVT::i32));
        } else
          N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
                        Value->getOperand(0), Value->getOperand(1));
      } else
        N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);

      if (!usesOnlyOneValue) {
        // The dominant value was splatted as 'N', but we now have to insert
        // all differing elements.
        for (unsigned I = 0; I < NumElts; ++I) {
          if (Op.getOperand(I) == Value)
            continue;
          SmallVector<SDValue, 3> Ops;
          Ops.push_back(N);
          Ops.push_back(Op.getOperand(I));
          Ops.push_back(DAG.getConstant(I, dl, MVT::i32));
          N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
        }
      }
      return N;
    }
    if (VT.getVectorElementType().isFloatingPoint()) {
      SmallVector<SDValue, 8> Ops;
      MVT FVT = VT.getVectorElementType().getSimpleVT();
      assert(FVT == MVT::f32 || FVT == MVT::f16);
      MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16;
      for (unsigned i = 0; i < NumElts; ++i)
        Ops.push_back(DAG.getNode(ISD::BITCAST, dl, IVT,
                                  Op.getOperand(i)));
      EVT VecVT = EVT::getVectorVT(*DAG.getContext(), IVT, NumElts);
      SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
      Val = LowerBUILD_VECTOR(Val, DAG, ST);
      if (Val.getNode())
        return DAG.getNode(ISD::BITCAST, dl, VT, Val);
    }
    if (usesOnlyOneValue) {
      SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
      if (isConstant && Val.getNode())
        return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
    }
  }

  // If all elements are constants and the case above didn't get hit, fall back
  // to the default expansion, which will generate a load from the constant
  // pool.
  if (isConstant)
    return SDValue();

  // Reconstruct the BUILDVECTOR to one of the legal shuffles (such as vext and
  // vmovn). Empirical tests suggest this is rarely worth it for vectors of
  // length <= 2.
  if (NumElts >= 4)
    if (SDValue shuffle = ReconstructShuffle(Op, DAG))
      return shuffle;

  // Attempt to turn a buildvector of scalar fptrunc's or fpext's back into
  // VCVT's
  if (SDValue VCVT = LowerBuildVectorOfFPTrunc(Op, DAG, Subtarget))
    return VCVT;
  if (SDValue VCVT = LowerBuildVectorOfFPExt(Op, DAG, Subtarget))
    return VCVT;

  if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) {
    // If we haven't found an efficient lowering, try splitting a 128-bit vector
    // into two 64-bit vectors; we might discover a better way to lower it.
    SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts);
    EVT ExtVT = VT.getVectorElementType();
    EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2);
    SDValue Lower =
        DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2));
    if (Lower.getOpcode() == ISD::BUILD_VECTOR)
      Lower = LowerBUILD_VECTOR(Lower, DAG, ST);
    SDValue Upper = DAG.getBuildVector(
        HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2));
    if (Upper.getOpcode() == ISD::BUILD_VECTOR)
      Upper = LowerBUILD_VECTOR(Upper, DAG, ST);
    if (Lower && Upper)
      return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper);
  }

  // Vectors with 32- or 64-bit elements can be built by directly assigning
  // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands
  // will be legalized.
  if (EltSize >= 32) {
    // Do the expansion with floating-point types, since that is what the VFP
    // registers are defined to use, and since i64 is not legal.
    EVT EltVT = EVT::getFloatingPointVT(EltSize);
    EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
    SmallVector<SDValue, 8> Ops;
    for (unsigned i = 0; i < NumElts; ++i)
      Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
    SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
    return DAG.getNode(ISD::BITCAST, dl, VT, Val);
  }

  // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
  // know the default expansion would otherwise fall back on something even
  // worse. For a vector with one or two non-undef values, that's
  // scalar_to_vector for the elements followed by a shuffle (provided the
  // shuffle is valid for the target) and materialization element by element
  // on the stack followed by a load for everything else.
  if (!isConstant && !usesOnlyOneValue) {
    SDValue Vec = DAG.getUNDEF(VT);
    for (unsigned i = 0 ; i < NumElts; ++i) {
      SDValue V = Op.getOperand(i);
      if (V.isUndef())
        continue;
      SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
      Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
    }
    return Vec;
  }

  return SDValue();
}

// Gather data to see if the operation can be modelled as a
// shuffle in combination with VEXTs.
SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
                                              SelectionDAG &DAG) const {
  assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
  SDLoc dl(Op);
  EVT VT = Op.getValueType();
  unsigned NumElts = VT.getVectorNumElements();

  struct ShuffleSourceInfo {
    SDValue Vec;
    unsigned MinElt = std::numeric_limits<unsigned>::max();
    unsigned MaxElt = 0;

    // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
    // be compatible with the shuffle we intend to construct. As a result
    // ShuffleVec will be some sliding window into the original Vec.
    SDValue ShuffleVec;

    // Code should guarantee that element i in Vec starts at element "WindowBase
    // + i * WindowScale in ShuffleVec".
    int WindowBase = 0;
    int WindowScale = 1;

    ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {}

    bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
  };

  // First gather all vectors used as an immediate source for this BUILD_VECTOR
  // node.
  SmallVector<ShuffleSourceInfo, 2> Sources;
  for (unsigned i = 0; i < NumElts; ++i) {
    SDValue V = Op.getOperand(i);
    if (V.isUndef())
      continue;
    else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
      // A shuffle can only come from building a vector from various
      // elements of other vectors.
      return SDValue();
    } else if (!isa<ConstantSDNode>(V.getOperand(1))) {
      // Furthermore, shuffles require a constant mask, whereas extractelts
      // accept variable indices.
      return SDValue();
    }

    // Add this element source to the list if it's not already there.
    SDValue SourceVec = V.getOperand(0);
    auto Source = llvm::find(Sources, SourceVec);
    if (Source == Sources.end())
      Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));

    // Update the minimum and maximum lane number seen.
    unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
    Source->MinElt = std::min(Source->MinElt, EltNo);
    Source->MaxElt = std::max(Source->MaxElt, EltNo);
  }

  // Currently only do something sane when at most two source vectors
  // are involved.
  if (Sources.size() > 2)
    return SDValue();

  // Find out the smallest element size among result and two sources, and use
  // it as element size to build the shuffle_vector.
  EVT SmallestEltTy = VT.getVectorElementType();
  for (auto &Source : Sources) {
    EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
    if (SrcEltTy.bitsLT(SmallestEltTy))
      SmallestEltTy = SrcEltTy;
  }
  unsigned ResMultiplier =
      VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
  NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
  EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);

  // If the source vector is too wide or too narrow, we may nevertheless be able
  // to construct a compatible shuffle either by concatenating it with UNDEF or
  // extracting a suitable range of elements.
  for (auto &Src : Sources) {
    EVT SrcVT = Src.ShuffleVec.getValueType();

    if (SrcVT.getSizeInBits() == VT.getSizeInBits())
      continue;

    // This stage of the search produces a source with the same element type as
    // the original, but with a total width matching the BUILD_VECTOR output.
    EVT EltVT = SrcVT.getVectorElementType();
    unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits();
    EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);

    if (SrcVT.getSizeInBits() < VT.getSizeInBits()) {
      if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits())
        return SDValue();
      // We can pad out the smaller vector for free, so if it's part of a
      // shuffle...
      Src.ShuffleVec =
          DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
                      DAG.getUNDEF(Src.ShuffleVec.getValueType()));
      continue;
    }

    if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits())
      return SDValue();

    if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
      // Span too large for a VEXT to cope
      return SDValue();
    }

    if (Src.MinElt >= NumSrcElts) {
      // The extraction can just take the second half
      Src.ShuffleVec =
          DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
                      DAG.getConstant(NumSrcElts, dl, MVT::i32));
      Src.WindowBase = -NumSrcElts;
    } else if (Src.MaxElt < NumSrcElts) {
      // The extraction can just take the first half
      Src.ShuffleVec =
          DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
                      DAG.getConstant(0, dl, MVT::i32));
    } else {
      // An actual VEXT is needed
      SDValue VEXTSrc1 =
          DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
                      DAG.getConstant(0, dl, MVT::i32));
      SDValue VEXTSrc2 =
          DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
                      DAG.getConstant(NumSrcElts, dl, MVT::i32));

      Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1,
                                   VEXTSrc2,
                                   DAG.getConstant(Src.MinElt, dl, MVT::i32));
      Src.WindowBase = -Src.MinElt;
    }
  }

  // Another possible incompatibility occurs from the vector element types. We
  // can fix this by bitcasting the source vectors to the same type we intend
  // for the shuffle.
  for (auto &Src : Sources) {
    EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
    if (SrcEltTy == SmallestEltTy)
      continue;
    assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
    Src.ShuffleVec = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, ShuffleVT, Src.ShuffleVec);
    Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits();
    Src.WindowBase *= Src.WindowScale;
  }

  // Final sanity check before we try to actually produce a shuffle.
  LLVM_DEBUG(for (auto Src
                  : Sources)
                 assert(Src.ShuffleVec.getValueType() == ShuffleVT););

  // The stars all align, our next step is to produce the mask for the shuffle.
  SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
  int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
  for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
    SDValue Entry = Op.getOperand(i);
    if (Entry.isUndef())
      continue;

    auto Src = llvm::find(Sources, Entry.getOperand(0));
    int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();

    // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
    // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
    // segment.
    EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
    int BitsDefined = std::min(OrigEltTy.getScalarSizeInBits(),
                               VT.getScalarSizeInBits());
    int LanesDefined = BitsDefined / BitsPerShuffleLane;

    // This source is expected to fill ResMultiplier lanes of the final shuffle,
    // starting at the appropriate offset.
    int *LaneMask = &Mask[i * ResMultiplier];

    int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
    ExtractBase += NumElts * (Src - Sources.begin());
    for (int j = 0; j < LanesDefined; ++j)
      LaneMask[j] = ExtractBase + j;
  }


  // We can't handle more than two sources. This should have already
  // been checked before this point.
  assert(Sources.size() <= 2 && "Too many sources!");

  SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
  for (unsigned i = 0; i < Sources.size(); ++i)
    ShuffleOps[i] = Sources[i].ShuffleVec;

  SDValue Shuffle = buildLegalVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
                                            ShuffleOps[1], Mask, DAG);
  if (!Shuffle)
    return SDValue();
  return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Shuffle);
}

enum ShuffleOpCodes {
  OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
  OP_VREV,
  OP_VDUP0,
  OP_VDUP1,
  OP_VDUP2,
  OP_VDUP3,
  OP_VEXT1,
  OP_VEXT2,
  OP_VEXT3,
  OP_VUZPL, // VUZP, left result
  OP_VUZPR, // VUZP, right result
  OP_VZIPL, // VZIP, left result
  OP_VZIPR, // VZIP, right result
  OP_VTRNL, // VTRN, left result
  OP_VTRNR  // VTRN, right result
};

static bool isLegalMVEShuffleOp(unsigned PFEntry) {
  unsigned OpNum = (PFEntry >> 26) & 0x0F;
  switch (OpNum) {
  case OP_COPY:
  case OP_VREV:
  case OP_VDUP0:
  case OP_VDUP1:
  case OP_VDUP2:
  case OP_VDUP3:
    return true;
  }
  return false;
}

/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
/// are assumed to be legal.
bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
  if (VT.getVectorNumElements() == 4 &&
      (VT.is128BitVector() || VT.is64BitVector())) {
    unsigned PFIndexes[4];
    for (unsigned i = 0; i != 4; ++i) {
      if (M[i] < 0)
        PFIndexes[i] = 8;
      else
        PFIndexes[i] = M[i];
    }

    // Compute the index in the perfect shuffle table.
    unsigned PFTableIndex =
      PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
    unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
    unsigned Cost = (PFEntry >> 30);

    if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry)))
      return true;
  }

  bool ReverseVEXT, isV_UNDEF;
  unsigned Imm, WhichResult;

  unsigned EltSize = VT.getScalarSizeInBits();
  if (EltSize >= 32 ||
      ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
      ShuffleVectorInst::isIdentityMask(M) ||
      isVREVMask(M, VT, 64) ||
      isVREVMask(M, VT, 32) ||
      isVREVMask(M, VT, 16))
    return true;
  else if (Subtarget->hasNEON() &&
           (isVEXTMask(M, VT, ReverseVEXT, Imm) ||
            isVTBLMask(M, VT) ||
            isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF)))
    return true;
  else if (Subtarget->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) &&
           isReverseMask(M, VT))
    return true;
  else if (Subtarget->hasMVEIntegerOps() &&
           (isVMOVNMask(M, VT, 0) || isVMOVNMask(M, VT, 1)))
    return true;
  else
    return false;
}

/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
/// the specified operations to build the shuffle.
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
                                      SDValue RHS, SelectionDAG &DAG,
                                      const SDLoc &dl) {
  unsigned OpNum = (PFEntry >> 26) & 0x0F;
  unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
  unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);

  if (OpNum == OP_COPY) {
    if (LHSID == (1*9+2)*9+3) return LHS;
    assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
    return RHS;
  }

  SDValue OpLHS, OpRHS;
  OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
  OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
  EVT VT = OpLHS.getValueType();

  switch (OpNum) {
  default: llvm_unreachable("Unknown shuffle opcode!");
  case OP_VREV:
    // VREV divides the vector in half and swaps within the half.
    if (VT.getVectorElementType() == MVT::i32 ||
        VT.getVectorElementType() == MVT::f32)
      return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
    // vrev <4 x i16> -> VREV32
    if (VT.getVectorElementType() == MVT::i16)
      return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS);
    // vrev <4 x i8> -> VREV16
    assert(VT.getVectorElementType() == MVT::i8);
    return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS);
  case OP_VDUP0:
  case OP_VDUP1:
  case OP_VDUP2:
  case OP_VDUP3:
    return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
                       OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32));
  case OP_VEXT1:
  case OP_VEXT2:
  case OP_VEXT3:
    return DAG.getNode(ARMISD::VEXT, dl, VT,
                       OpLHS, OpRHS,
                       DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32));
  case OP_VUZPL:
  case OP_VUZPR:
    return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
                       OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
  case OP_VZIPL:
  case OP_VZIPR:
    return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
                       OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
  case OP_VTRNL:
  case OP_VTRNR:
    return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
                       OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
  }
}

static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
                                       ArrayRef<int> ShuffleMask,
                                       SelectionDAG &DAG) {
  // Check to see if we can use the VTBL instruction.
  SDValue V1 = Op.getOperand(0);
  SDValue V2 = Op.getOperand(1);
  SDLoc DL(Op);

  SmallVector<SDValue, 8> VTBLMask;
  for (ArrayRef<int>::iterator
         I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
    VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32));

  if (V2.getNode()->isUndef())
    return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
                       DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));

  return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
                     DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
}

static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
                                                      SelectionDAG &DAG) {
  SDLoc DL(Op);
  SDValue OpLHS = Op.getOperand(0);
  EVT VT = OpLHS.getValueType();

  assert((VT == MVT::v8i16 || VT == MVT::v16i8) &&
         "Expect an v8i16/v16i8 type");
  OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS);
  // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now,
  // extract the first 8 bytes into the top double word and the last 8 bytes
  // into the bottom double word. The v8i16 case is similar.
  unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4;
  return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS,
                     DAG.getConstant(ExtractNum, DL, MVT::i32));
}

static EVT getVectorTyFromPredicateVector(EVT VT) {
  switch (VT.getSimpleVT().SimpleTy) {
  case MVT::v4i1:
    return MVT::v4i32;
  case MVT::v8i1:
    return MVT::v8i16;
  case MVT::v16i1:
    return MVT::v16i8;
  default:
    llvm_unreachable("Unexpected vector predicate type");
  }
}

static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT,
                                    SelectionDAG &DAG) {
  // Converting from boolean predicates to integers involves creating a vector
  // of all ones or all zeroes and selecting the lanes based upon the real
  // predicate.
  SDValue AllOnes =
      DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), dl, MVT::i32);
  AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllOnes);

  SDValue AllZeroes =
      DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0x0), dl, MVT::i32);
  AllZeroes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllZeroes);

  // Get full vector type from predicate type
  EVT NewVT = getVectorTyFromPredicateVector(VT);

  SDValue RecastV1;
  // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast
  // this to a v16i1. This cannot be done with an ordinary bitcast because the
  // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node,
  // since we know in hardware the sizes are really the same.
  if (VT != MVT::v16i1)
    RecastV1 = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Pred);
  else
    RecastV1 = Pred;

  // Select either all ones or zeroes depending upon the real predicate bits.
  SDValue PredAsVector =
      DAG.getNode(ISD::VSELECT, dl, MVT::v16i8, RecastV1, AllOnes, AllZeroes);

  // Recast our new predicate-as-integer v16i8 vector into something
  // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate.
  return DAG.getNode(ISD::BITCAST, dl, NewVT, PredAsVector);
}

static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG,
                                      const ARMSubtarget *ST) {
  EVT VT = Op.getValueType();
  ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
  ArrayRef<int> ShuffleMask = SVN->getMask();

  assert(ST->hasMVEIntegerOps() &&
         "No support for vector shuffle of boolean predicates");

  SDValue V1 = Op.getOperand(0);
  SDLoc dl(Op);
  if (isReverseMask(ShuffleMask, VT)) {
    SDValue cast = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, V1);
    SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, cast);
    SDValue srl = DAG.getNode(ISD::SRL, dl, MVT::i32, rbit,
                              DAG.getConstant(16, dl, MVT::i32));
    return DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, srl);
  }

  // Until we can come up with optimised cases for every single vector
  // shuffle in existence we have chosen the least painful strategy. This is
  // to essentially promote the boolean predicate to a 8-bit integer, where
  // each predicate represents a byte. Then we fall back on a normal integer
  // vector shuffle and convert the result back into a predicate vector. In
  // many cases the generated code might be even better than scalar code
  // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit
  // fields in a register into 8 other arbitrary 2-bit fields!
  SDValue PredAsVector = PromoteMVEPredVector(dl, V1, VT, DAG);
  EVT NewVT = PredAsVector.getValueType();

  // Do the shuffle!
  SDValue Shuffled = DAG.getVectorShuffle(NewVT, dl, PredAsVector,
                                          DAG.getUNDEF(NewVT), ShuffleMask);

  // Now return the result of comparing the shuffled vector with zero,
  // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
  return DAG.getNode(ARMISD::VCMPZ, dl, VT, Shuffled,
                     DAG.getConstant(ARMCC::NE, dl, MVT::i32));
}

static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op,
                                            ArrayRef<int> ShuffleMask,
                                            SelectionDAG &DAG) {
  // Attempt to lower the vector shuffle using as many whole register movs as
  // possible. This is useful for types smaller than 32bits, which would
  // often otherwise become a series for grp movs.
  SDLoc dl(Op);
  EVT VT = Op.getValueType();
  if (VT.getScalarSizeInBits() >= 32)
    return SDValue();

  assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) &&
         "Unexpected vector type");
  int NumElts = VT.getVectorNumElements();
  int QuarterSize = NumElts / 4;
  // The four final parts of the vector, as i32's
  SDValue Parts[4];

  // Look for full lane vmovs like <0,1,2,3> or <u,5,6,7> etc, (but not
  // <u,u,u,u>), returning the vmov lane index
  auto getMovIdx = [](ArrayRef<int> ShuffleMask, int Start, int Length) {
    // Detect which mov lane this would be from the first non-undef element.
    int MovIdx = -1;
    for (int i = 0; i < Length; i++) {
      if (ShuffleMask[Start + i] >= 0) {
        if (ShuffleMask[Start + i] % Length != i)
          return -1;
        MovIdx = ShuffleMask[Start + i] / Length;
        break;
      }
    }
    // If all items are undef, leave this for other combines
    if (MovIdx == -1)
      return -1;
    // Check the remaining values are the correct part of the same mov
    for (int i = 1; i < Length; i++) {
      if (ShuffleMask[Start + i] >= 0 &&
          (ShuffleMask[Start + i] / Length != MovIdx ||
           ShuffleMask[Start + i] % Length != i))
        return -1;
    }
    return MovIdx;
  };

  for (int Part = 0; Part < 4; ++Part) {
    // Does this part look like a mov
    int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize);
    if (Elt != -1) {
      SDValue Input = Op->getOperand(0);
      if (Elt >= 4) {
        Input = Op->getOperand(1);
        Elt -= 4;
      }
      SDValue BitCast = DAG.getBitcast(MVT::v4i32, Input);
      Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, BitCast,
                                DAG.getConstant(Elt, dl, MVT::i32));
    }
  }

  // Nothing interesting found, just return
  if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3])
    return SDValue();

  // The other parts need to be built with the old shuffle vector, cast to a
  // v4i32 and extract_vector_elts
  if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) {
    SmallVector<int, 16> NewShuffleMask;
    for (int Part = 0; Part < 4; ++Part)
      for (int i = 0; i < QuarterSize; i++)
        NewShuffleMask.push_back(
            Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]);
    SDValue NewShuffle = DAG.getVectorShuffle(
        VT, dl, Op->getOperand(0), Op->getOperand(1), NewShuffleMask);
    SDValue BitCast = DAG.getBitcast(MVT::v4i32, NewShuffle);

    for (int Part = 0; Part < 4; ++Part)
      if (!Parts[Part])
        Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
                                  BitCast, DAG.getConstant(Part, dl, MVT::i32));
  }
  // Build a vector out of the various parts and bitcast it back to the original
  // type.
  SDValue NewVec = DAG.getBuildVector(MVT::v4i32, dl, Parts);
  return DAG.getBitcast(VT, NewVec);
}

static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
                                   const ARMSubtarget *ST) {
  SDValue V1 = Op.getOperand(0);
  SDValue V2 = Op.getOperand(1);
  SDLoc dl(Op);
  EVT VT = Op.getValueType();
  ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
  unsigned EltSize = VT.getScalarSizeInBits();

  if (ST->hasMVEIntegerOps() && EltSize == 1)
    return LowerVECTOR_SHUFFLE_i1(Op, DAG, ST);

  // Convert shuffles that are directly supported on NEON to target-specific
  // DAG nodes, instead of keeping them as shuffles and matching them again
  // during code selection.  This is more efficient and avoids the possibility
  // of inconsistencies between legalization and selection.
  // FIXME: floating-point vectors should be canonicalized to integer vectors
  // of the same time so that they get CSEd properly.
  ArrayRef<int> ShuffleMask = SVN->getMask();

  if (EltSize <= 32) {
    if (SVN->isSplat()) {
      int Lane = SVN->getSplatIndex();
      // If this is undef splat, generate it via "just" vdup, if possible.
      if (Lane == -1) Lane = 0;

      // Test if V1 is a SCALAR_TO_VECTOR.
      if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
        return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
      }
      // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
      // (and probably will turn into a SCALAR_TO_VECTOR once legalization
      // reaches it).
      if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
          !isa<ConstantSDNode>(V1.getOperand(0))) {
        bool IsScalarToVector = true;
        for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
          if (!V1.getOperand(i).isUndef()) {
            IsScalarToVector = false;
            break;
          }
        if (IsScalarToVector)
          return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
      }
      return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
                         DAG.getConstant(Lane, dl, MVT::i32));
    }

    bool ReverseVEXT = false;
    unsigned Imm = 0;
    if (ST->hasNEON() && isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
      if (ReverseVEXT)
        std::swap(V1, V2);
      return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
                         DAG.getConstant(Imm, dl, MVT::i32));
    }

    if (isVREVMask(ShuffleMask, VT, 64))
      return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
    if (isVREVMask(ShuffleMask, VT, 32))
      return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
    if (isVREVMask(ShuffleMask, VT, 16))
      return DAG.getNode(ARMISD::VREV16, dl, VT, V1);

    if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
      return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
                         DAG.getConstant(Imm, dl, MVT::i32));
    }

    // Check for Neon shuffles that modify both input vectors in place.
    // If both results are used, i.e., if there are two shuffles with the same
    // source operands and with masks corresponding to both results of one of
    // these operations, DAG memoization will ensure that a single node is
    // used for both shuffles.
    unsigned WhichResult = 0;
    bool isV_UNDEF = false;
    if (ST->hasNEON()) {
      if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
              ShuffleMask, VT, WhichResult, isV_UNDEF)) {
        if (isV_UNDEF)
          V2 = V1;
        return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2)
            .getValue(WhichResult);
      }
    }
    if (ST->hasMVEIntegerOps()) {
      if (isVMOVNMask(ShuffleMask, VT, 0))
        return DAG.getNode(ARMISD::VMOVN, dl, VT, V2, V1,
                           DAG.getConstant(0, dl, MVT::i32));
      if (isVMOVNMask(ShuffleMask, VT, 1))
        return DAG.getNode(ARMISD::VMOVN, dl, VT, V1, V2,
                           DAG.getConstant(1, dl, MVT::i32));
    }

    // Also check for these shuffles through CONCAT_VECTORS: we canonicalize
    // shuffles that produce a result larger than their operands with:
    //   shuffle(concat(v1, undef), concat(v2, undef))
    // ->
    //   shuffle(concat(v1, v2), undef)
    // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine).
    //
    // This is useful in the general case, but there are special cases where
    // native shuffles produce larger results: the two-result ops.
    //
    // Look through the concat when lowering them:
    //   shuffle(concat(v1, v2), undef)
    // ->
    //   concat(VZIP(v1, v2):0, :1)
    //
    if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) {
      SDValue SubV1 = V1->getOperand(0);
      SDValue SubV2 = V1->getOperand(1);
      EVT SubVT = SubV1.getValueType();

      // We expect these to have been canonicalized to -1.
      assert(llvm::all_of(ShuffleMask, [&](int i) {
        return i < (int)VT.getVectorNumElements();
      }) && "Unexpected shuffle index into UNDEF operand!");

      if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
              ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
        if (isV_UNDEF)
          SubV2 = SubV1;
        assert((WhichResult == 0) &&
               "In-place shuffle of concat can only have one result!");
        SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT),
                                  SubV1, SubV2);
        return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0),
                           Res.getValue(1));
      }
    }
  }

  // If the shuffle is not directly supported and it has 4 elements, use
  // the PerfectShuffle-generated table to synthesize it from other shuffles.
  unsigned NumElts = VT.getVectorNumElements();
  if (NumElts == 4) {
    unsigned PFIndexes[4];
    for (unsigned i = 0; i != 4; ++i) {
      if (ShuffleMask[i] < 0)
        PFIndexes[i] = 8;
      else
        PFIndexes[i] = ShuffleMask[i];
    }

    // Compute the index in the perfect shuffle table.
    unsigned PFTableIndex =
      PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
    unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
    unsigned Cost = (PFEntry >> 30);

    if (Cost <= 4) {
      if (ST->hasNEON())
        return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
      else if (isLegalMVEShuffleOp(PFEntry)) {
        unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
        unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
        unsigned PFEntryLHS = PerfectShuffleTable[LHSID];
        unsigned PFEntryRHS = PerfectShuffleTable[RHSID];
        if (isLegalMVEShuffleOp(PFEntryLHS) && isLegalMVEShuffleOp(PFEntryRHS))
          return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
      }
    }
  }

  // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
  if (EltSize >= 32) {
    // Do the expansion with floating-point types, since that is what the VFP
    // registers are defined to use, and since i64 is not legal.
    EVT EltVT = EVT::getFloatingPointVT(EltSize);
    EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
    V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
    V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
    SmallVector<SDValue, 8> Ops;
    for (unsigned i = 0; i < NumElts; ++i) {
      if (ShuffleMask[i] < 0)
        Ops.push_back(DAG.getUNDEF(EltVT));
      else
        Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
                                  ShuffleMask[i] < (int)NumElts ? V1 : V2,
                                  DAG.getConstant(ShuffleMask[i] & (NumElts-1),
                                                  dl, MVT::i32)));
    }
    SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
    return DAG.getNode(ISD::BITCAST, dl, VT, Val);
  }

  if (ST->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT))
    return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG);

  if (ST->hasNEON() && VT == MVT::v8i8)
    if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG))
      return NewOp;

  if (ST->hasMVEIntegerOps())
    if (SDValue NewOp = LowerVECTOR_SHUFFLEUsingMovs(Op, ShuffleMask, DAG))
      return NewOp;

  return SDValue();
}

static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
                                         const ARMSubtarget *ST) {
  EVT VecVT = Op.getOperand(0).getValueType();
  SDLoc dl(Op);

  assert(ST->hasMVEIntegerOps() &&
         "LowerINSERT_VECTOR_ELT_i1 called without MVE!");

  SDValue Conv =
      DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0));
  unsigned Lane = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
  unsigned LaneWidth =
      getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8;
  unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth;
  SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32,
                            Op.getOperand(1), DAG.getValueType(MVT::i1));
  SDValue BFI = DAG.getNode(ARMISD::BFI, dl, MVT::i32, Conv, Ext,
                            DAG.getConstant(~Mask, dl, MVT::i32));
  return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), BFI);
}

SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
                                                  SelectionDAG &DAG) const {
  // INSERT_VECTOR_ELT is legal only for immediate indexes.
  SDValue Lane = Op.getOperand(2);
  if (!isa<ConstantSDNode>(Lane))
    return SDValue();

  SDValue Elt = Op.getOperand(1);
  EVT EltVT = Elt.getValueType();

  if (Subtarget->hasMVEIntegerOps() &&
      Op.getValueType().getScalarSizeInBits() == 1)
    return LowerINSERT_VECTOR_ELT_i1(Op, DAG, Subtarget);

  if (getTypeAction(*DAG.getContext(), EltVT) ==
      TargetLowering::TypePromoteFloat) {
    // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32,
    // but the type system will try to do that if we don't intervene.
    // Reinterpret any such vector-element insertion as one with the
    // corresponding integer types.

    SDLoc dl(Op);

    EVT IEltVT = MVT::getIntegerVT(EltVT.getScalarSizeInBits());
    assert(getTypeAction(*DAG.getContext(), IEltVT) !=
           TargetLowering::TypePromoteFloat);

    SDValue VecIn = Op.getOperand(0);
    EVT VecVT = VecIn.getValueType();
    EVT IVecVT = EVT::getVectorVT(*DAG.getContext(), IEltVT,
                                  VecVT.getVectorNumElements());

    SDValue IElt = DAG.getNode(ISD::BITCAST, dl, IEltVT, Elt);
    SDValue IVecIn = DAG.getNode(ISD::BITCAST, dl, IVecVT, VecIn);
    SDValue IVecOut = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVecVT,
                                  IVecIn, IElt, Lane);
    return DAG.getNode(ISD::BITCAST, dl, VecVT, IVecOut);
  }

  return Op;
}

static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
                                          const ARMSubtarget *ST) {
  EVT VecVT = Op.getOperand(0).getValueType();
  SDLoc dl(Op);

  assert(ST->hasMVEIntegerOps() &&
         "LowerINSERT_VECTOR_ELT_i1 called without MVE!");

  SDValue Conv =
      DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0));
  unsigned Lane = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
  unsigned LaneWidth =
      getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8;
  SDValue Shift = DAG.getNode(ISD::SRL, dl, MVT::i32, Conv,
                              DAG.getConstant(Lane * LaneWidth, dl, MVT::i32));
  return Shift;
}

static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG,
                                       const ARMSubtarget *ST) {
  // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
  SDValue Lane = Op.getOperand(1);
  if (!isa<ConstantSDNode>(Lane))
    return SDValue();

  SDValue Vec = Op.getOperand(0);
  EVT VT = Vec.getValueType();

  if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
    return LowerEXTRACT_VECTOR_ELT_i1(Op, DAG, ST);

  if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) {
    SDLoc dl(Op);
    return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
  }

  return Op;
}

static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG,
                                      const ARMSubtarget *ST) {
  SDValue V1 = Op.getOperand(0);
  SDValue V2 = Op.getOperand(1);
  SDLoc dl(Op);
  EVT VT = Op.getValueType();
  EVT Op1VT = V1.getValueType();
  EVT Op2VT = V2.getValueType();
  unsigned NumElts = VT.getVectorNumElements();

  assert(Op1VT == Op2VT && "Operand types don't match!");
  assert(VT.getScalarSizeInBits() == 1 &&
         "Unexpected custom CONCAT_VECTORS lowering");
  assert(ST->hasMVEIntegerOps() &&
         "CONCAT_VECTORS lowering only supported for MVE");

  SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG);
  SDValue NewV2 = PromoteMVEPredVector(dl, V2, Op2VT, DAG);

  // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets
  // promoted to v8i16, etc.

  MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT();

  // Extract the vector elements from Op1 and Op2 one by one and truncate them
  // to be the right size for the destination. For example, if Op1 is v4i1 then
  // the promoted vector is v4i32. The result of concatentation gives a v8i1,
  // which when promoted is v8i16. That means each i32 element from Op1 needs
  // truncating to i16 and inserting in the result.
  EVT ConcatVT = MVT::getVectorVT(ElType, NumElts);
  SDValue ConVec = DAG.getNode(ISD::UNDEF, dl, ConcatVT);
  auto ExractInto = [&DAG, &dl](SDValue NewV, SDValue ConVec, unsigned &j) {
    EVT NewVT = NewV.getValueType();
    EVT ConcatVT = ConVec.getValueType();
    for (unsigned i = 0, e = NewVT.getVectorNumElements(); i < e; i++, j++) {
      SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV,
                                DAG.getIntPtrConstant(i, dl));
      ConVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ConcatVT, ConVec, Elt,
                           DAG.getConstant(j, dl, MVT::i32));
    }
    return ConVec;
  };
  unsigned j = 0;
  ConVec = ExractInto(NewV1, ConVec, j);
  ConVec = ExractInto(NewV2, ConVec, j);

  // Now return the result of comparing the subvector with zero,
  // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
  return DAG.getNode(ARMISD::VCMPZ, dl, VT, ConVec,
                     DAG.getConstant(ARMCC::NE, dl, MVT::i32));
}

static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
                                   const ARMSubtarget *ST) {
  EVT VT = Op->getValueType(0);
  if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
    return LowerCONCAT_VECTORS_i1(Op, DAG, ST);

  // The only time a CONCAT_VECTORS operation can have legal types is when
  // two 64-bit vectors are concatenated to a 128-bit vector.
  assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
         "unexpected CONCAT_VECTORS");
  SDLoc dl(Op);
  SDValue Val = DAG.getUNDEF(MVT::v2f64);
  SDValue Op0 = Op.getOperand(0);
  SDValue Op1 = Op.getOperand(1);
  if (!Op0.isUndef())
    Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
                      DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
                      DAG.getIntPtrConstant(0, dl));
  if (!Op1.isUndef())
    Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
                      DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
                      DAG.getIntPtrConstant(1, dl));
  return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
}

static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG,
                                      const ARMSubtarget *ST) {
  SDValue V1 = Op.getOperand(0);
  SDValue V2 = Op.getOperand(1);
  SDLoc dl(Op);
  EVT VT = Op.getValueType();
  EVT Op1VT = V1.getValueType();
  unsigned NumElts = VT.getVectorNumElements();
  unsigned Index = cast<ConstantSDNode>(V2)->getZExtValue();

  assert(VT.getScalarSizeInBits() == 1 &&
         "Unexpected custom EXTRACT_SUBVECTOR lowering");
  assert(ST->hasMVEIntegerOps() &&
         "EXTRACT_SUBVECTOR lowering only supported for MVE");

  SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG);

  // We now have Op1 promoted to a vector of integers, where v8i1 gets
  // promoted to v8i16, etc.

  MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT();

  EVT SubVT = MVT::getVectorVT(ElType, NumElts);
  SDValue SubVec = DAG.getNode(ISD::UNDEF, dl, SubVT);
  for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) {
    SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV1,
                              DAG.getIntPtrConstant(i, dl));
    SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt,
                         DAG.getConstant(j, dl, MVT::i32));
  }

  // Now return the result of comparing the subvector with zero,
  // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
  return DAG.getNode(ARMISD::VCMPZ, dl, VT, SubVec,
                     DAG.getConstant(ARMCC::NE, dl, MVT::i32));
}

/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
/// element has been zero/sign-extended, depending on the isSigned parameter,
/// from an integer type half its size.
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
                                   bool isSigned) {
  // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
  EVT VT = N->getValueType(0);
  if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
    SDNode *BVN = N->getOperand(0).getNode();
    if (BVN->getValueType(0) != MVT::v4i32 ||
        BVN->getOpcode() != ISD::BUILD_VECTOR)
      return false;
    unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
    unsigned HiElt = 1 - LoElt;
    ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
    ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
    ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
    ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
    if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
      return false;
    if (isSigned) {
      if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
          Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
        return true;
    } else {
      if (Hi0->isNullValue() && Hi1->isNullValue())
        return true;
    }
    return false;
  }

  if (N->getOpcode() != ISD::BUILD_VECTOR)
    return false;

  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
    SDNode *Elt = N->getOperand(i).getNode();
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
      unsigned EltSize = VT.getScalarSizeInBits();
      unsigned HalfSize = EltSize / 2;
      if (isSigned) {
        if (!isIntN(HalfSize, C->getSExtValue()))
          return false;
      } else {
        if (!isUIntN(HalfSize, C->getZExtValue()))
          return false;
      }
      continue;
    }
    return false;
  }

  return true;
}

/// isSignExtended - Check if a node is a vector value that is sign-extended
/// or a constant BUILD_VECTOR with sign-extended elements.
static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
  if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
    return true;
  if (isExtendedBUILD_VECTOR(N, DAG, true))
    return true;
  return false;
}

/// isZeroExtended - Check if a node is a vector value that is zero-extended
/// or a constant BUILD_VECTOR with zero-extended elements.
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
  if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N))
    return true;
  if (isExtendedBUILD_VECTOR(N, DAG, false))
    return true;
  return false;
}

static EVT getExtensionTo64Bits(const EVT &OrigVT) {
  if (OrigVT.getSizeInBits() >= 64)
    return OrigVT;

  assert(OrigVT.isSimple() && "Expecting a simple value type");

  MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
  switch (OrigSimpleTy) {
  default: llvm_unreachable("Unexpected Vector Type");
  case MVT::v2i8:
  case MVT::v2i16:
     return MVT::v2i32;
  case MVT::v4i8:
    return  MVT::v4i16;
  }
}

/// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total
/// value size to 64 bits. We need a 64-bit D register as an operand to VMULL.
/// We insert the required extension here to get the vector to fill a D register.
static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG,
                                            const EVT &OrigTy,
                                            const EVT &ExtTy,
                                            unsigned ExtOpcode) {
  // The vector originally had a size of OrigTy. It was then extended to ExtTy.
  // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
  // 64-bits we need to insert a new extension so that it will be 64-bits.
  assert(ExtTy.is128BitVector() && "Unexpected extension size");
  if (OrigTy.getSizeInBits() >= 64)
    return N;

  // Must extend size to at least 64 bits to be used as an operand for VMULL.
  EVT NewVT = getExtensionTo64Bits(OrigTy);

  return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
}

/// SkipLoadExtensionForVMULL - return a load of the original vector size that
/// does not do any sign/zero extension. If the original vector is less
/// than 64 bits, an appropriate extension will be added after the load to
/// reach a total size of 64 bits. We have to add the extension separately
/// because ARM does not have a sign/zero extending load for vectors.
static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {
  EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT());

  // The load already has the right type.
  if (ExtendedTy == LD->getMemoryVT())
    return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(),
                       LD->getBasePtr(), LD->getPointerInfo(),
                       LD->getAlignment(), LD->getMemOperand()->getFlags());

  // We need to create a zextload/sextload. We cannot just create a load
  // followed by a zext/zext node because LowerMUL is also run during normal
  // operation legalization where we can't create illegal types.
  return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy,
                        LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
                        LD->getMemoryVT(), LD->getAlignment(),
                        LD->getMemOperand()->getFlags());
}

/// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND,
/// extending load, or BUILD_VECTOR with extended elements, return the
/// unextended value. The unextended vector should be 64 bits so that it can
/// be used as an operand to a VMULL instruction. If the original vector size
/// before extension is less than 64 bits we add a an extension to resize
/// the vector to 64 bits.
static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
  if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND)
    return AddRequiredExtensionForVMULL(N->getOperand(0), DAG,
                                        N->getOperand(0)->getValueType(0),
                                        N->getValueType(0),
                                        N->getOpcode());

  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
    assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) &&
           "Expected extending load");

    SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG);
    DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1));
    unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
    SDValue extLoad =
        DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad);
    DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad);

    return newLoad;
  }

  // Otherwise, the value must be a BUILD_VECTOR.  For v2i64, it will
  // have been legalized as a BITCAST from v4i32.
  if (N->getOpcode() == ISD::BITCAST) {
    SDNode *BVN = N->getOperand(0).getNode();
    assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
           BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
    unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
    return DAG.getBuildVector(
        MVT::v2i32, SDLoc(N),
        {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)});
  }
  // Construct a new BUILD_VECTOR with elements truncated to half the size.
  assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
  EVT VT = N->getValueType(0);
  unsigned EltSize = VT.getScalarSizeInBits() / 2;
  unsigned NumElts = VT.getVectorNumElements();
  MVT TruncVT = MVT::getIntegerVT(EltSize);
  SmallVector<SDValue, 8> Ops;
  SDLoc dl(N);
  for (unsigned i = 0; i != NumElts; ++i) {
    ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
    const APInt &CInt = C->getAPIntValue();
    // Element types smaller than 32 bits are not legal, so use i32 elements.
    // The values are implicitly truncated so sext vs. zext doesn't matter.
    Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
  }
  return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
}

static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
  unsigned Opcode = N->getOpcode();
  if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
    SDNode *N0 = N->getOperand(0).getNode();
    SDNode *N1 = N->getOperand(1).getNode();
    return N0->hasOneUse() && N1->hasOneUse() &&
      isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
  }
  return false;
}

static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
  unsigned Opcode = N->getOpcode();
  if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
    SDNode *N0 = N->getOperand(0).getNode();
    SDNode *N1 = N->getOperand(1).getNode();
    return N0->hasOneUse() && N1->hasOneUse() &&
      isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
  }
  return false;
}

static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
  // Multiplications are only custom-lowered for 128-bit vectors so that
  // VMULL can be detected.  Otherwise v2i64 multiplications are not legal.
  EVT VT = Op.getValueType();
  assert(VT.is128BitVector() && VT.isInteger() &&
         "unexpected type for custom-lowering ISD::MUL");
  SDNode *N0 = Op.getOperand(0).getNode();
  SDNode *N1 = Op.getOperand(1).getNode();
  unsigned NewOpc = 0;
  bool isMLA = false;
  bool isN0SExt = isSignExtended(N0, DAG);
  bool isN1SExt = isSignExtended(N1, DAG);
  if (isN0SExt && isN1SExt)
    NewOpc = ARMISD::VMULLs;
  else {
    bool isN0ZExt = isZeroExtended(N0, DAG);
    bool isN1ZExt = isZeroExtended(N1, DAG);
    if (isN0ZExt && isN1ZExt)
      NewOpc = ARMISD::VMULLu;
    else if (isN1SExt || isN1ZExt) {
      // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
      // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
      if (isN1SExt && isAddSubSExt(N0, DAG)) {
        NewOpc = ARMISD::VMULLs;
        isMLA = true;
      } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
        NewOpc = ARMISD::VMULLu;
        isMLA = true;
      } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
        std::swap(N0, N1);
        NewOpc = ARMISD::VMULLu;
        isMLA = true;
      }
    }

    if (!NewOpc) {
      if (VT == MVT::v2i64)
        // Fall through to expand this.  It is not legal.
        return SDValue();
      else
        // Other vector multiplications are legal.
        return Op;
    }
  }

  // Legalize to a VMULL instruction.
  SDLoc DL(Op);
  SDValue Op0;
  SDValue Op1 = SkipExtensionForVMULL(N1, DAG);
  if (!isMLA) {
    Op0 = SkipExtensionForVMULL(N0, DAG);
    assert(Op0.getValueType().is64BitVector() &&
           Op1.getValueType().is64BitVector() &&
           "unexpected types for extended operands to VMULL");
    return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
  }

  // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
  // isel lowering to take advantage of no-stall back to back vmul + vmla.
  //   vmull q0, d4, d6
  //   vmlal q0, d5, d6
  // is faster than
  //   vaddl q0, d4, d5
  //   vmovl q1, d6
  //   vmul  q0, q0, q1
  SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG);
  SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG);
  EVT Op1VT = Op1.getValueType();
  return DAG.getNode(N0->getOpcode(), DL, VT,
                     DAG.getNode(NewOpc, DL, VT,
                               DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
                     DAG.getNode(NewOpc, DL, VT,
                               DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
}

static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
                              SelectionDAG &DAG) {
  // TODO: Should this propagate fast-math-flags?

  // Convert to float
  // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
  // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
  X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X);
  Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y);
  X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X);
  Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y);
  // Get reciprocal estimate.
  // float4 recip = vrecpeq_f32(yf);
  Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
                   DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
                   Y);
  // Because char has a smaller range than uchar, we can actually get away
  // without any newton steps.  This requires that we use a weird bias
  // of 0xb000, however (again, this has been exhaustively tested).
  // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
  X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
  X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
  Y = DAG.getConstant(0xb000, dl, MVT::v4i32);
  X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
  X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
  // Convert back to short.
  X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X);
  X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X);
  return X;
}

static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
                               SelectionDAG &DAG) {
  // TODO: Should this propagate fast-math-flags?

  SDValue N2;
  // Convert to float.
  // float4 yf = vcvt_f32_s32(vmovl_s16(y));
  // float4 xf = vcvt_f32_s32(vmovl_s16(x));
  N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0);
  N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1);
  N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
  N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);

  // Use reciprocal estimate and one refinement step.
  // float4 recip = vrecpeq_f32(yf);
  // recip *= vrecpsq_f32(yf, recip);
  N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
                   DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
                   N1);
  N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
                   DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
                   N1, N2);
  N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
  // Because short has a smaller range than ushort, we can actually get away
  // with only a single newton step.  This requires that we use a weird bias
  // of 89, however (again, this has been exhaustively tested).
  // float4 result = as_float4(as_int4(xf*recip) + 0x89);
  N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
  N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
  N1 = DAG.getConstant(0x89, dl, MVT::v4i32);
  N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
  N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
  // Convert back to integer and return.
  // return vmovn_s32(vcvt_s32_f32(result));
  N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
  N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
  return N0;
}

static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG,
                         const ARMSubtarget *ST) {
  EVT VT = Op.getValueType();
  assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
         "unexpected type for custom-lowering ISD::SDIV");

  SDLoc dl(Op);
  SDValue N0 = Op.getOperand(0);
  SDValue N1 = Op.getOperand(1);
  SDValue N2, N3;

  if (VT == MVT::v8i8) {
    N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0);
    N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);

    N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
                     DAG.getIntPtrConstant(4, dl));
    N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
                     DAG.getIntPtrConstant(4, dl));
    N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
                     DAG.getIntPtrConstant(0, dl));
    N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
                     DAG.getIntPtrConstant(0, dl));

    N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
    N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16

    N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
    N0 = LowerCONCAT_VECTORS(N0, DAG, ST);

    N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0);
    return N0;
  }
  return LowerSDIV_v4i16(N0, N1, dl, DAG);
}

static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG,
                         const ARMSubtarget *ST) {
  // TODO: Should this propagate fast-math-flags?
  EVT VT = Op.getValueType();
  assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
         "unexpected type for custom-lowering ISD::UDIV");

  SDLoc dl(Op);
  SDValue N0 = Op.getOperand(0);
  SDValue N1 = Op.getOperand(1);
  SDValue N2, N3;

  if (VT == MVT::v8i8) {
    N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0);
    N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);

    N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
                     DAG.getIntPtrConstant(4, dl));
    N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
                     DAG.getIntPtrConstant(4, dl));
    N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
                     DAG.getIntPtrConstant(0, dl));
    N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
                     DAG.getIntPtrConstant(0, dl));

    N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
    N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16

    N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
    N0 = LowerCONCAT_VECTORS(N0, DAG, ST);

    N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
                     DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl,
                                     MVT::i32),
                     N0);
    return N0;
  }

  // v4i16 sdiv ... Convert to float.
  // float4 yf = vcvt_f32_s32(vmovl_u16(y));
  // float4 xf = vcvt_f32_s32(vmovl_u16(x));
  N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0);
  N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1);
  N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
  SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);

  // Use reciprocal estimate and two refinement steps.
  // float4 recip = vrecpeq_f32(yf);
  // recip *= vrecpsq_f32(yf, recip);
  // recip *= vrecpsq_f32(yf, recip);
  N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
                   DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
                   BN1);
  N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
                   DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
                   BN1, N2);
  N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
  N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
                   DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
                   BN1, N2);
  N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
  // Simply multiplying by the reciprocal estimate can leave us a few ulps
  // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
  // and that it will never cause us to return an answer too large).
  // float4 result = as_float4(as_int4(xf*recip) + 2);
  N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
  N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
  N1 = DAG.getConstant(2, dl, MVT::v4i32);
  N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
  N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
  // Convert back to integer and return.
  // return vmovn_u32(vcvt_s32_f32(result));
  N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
  N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
  return N0;
}

static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
  SDNode *N = Op.getNode();
  EVT VT = N->getValueType(0);
  SDVTList VTs = DAG.getVTList(VT, MVT::i32);

  SDValue Carry = Op.getOperand(2);

  SDLoc DL(Op);

  SDValue Result;
  if (Op.getOpcode() == ISD::ADDCARRY) {
    // This converts the boolean value carry into the carry flag.
    Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);

    // Do the addition proper using the carry flag we wanted.
    Result = DAG.getNode(ARMISD::ADDE, DL, VTs, Op.getOperand(0),
                         Op.getOperand(1), Carry);

    // Now convert the carry flag into a boolean value.
    Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG);
  } else {
    // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we
    // have to invert the carry first.
    Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
                        DAG.getConstant(1, DL, MVT::i32), Carry);
    // This converts the boolean value carry into the carry flag.
    Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);

    // Do the subtraction proper using the carry flag we wanted.
    Result = DAG.getNode(ARMISD::SUBE, DL, VTs, Op.getOperand(0),
                         Op.getOperand(1), Carry);

    // Now convert the carry flag into a boolean value.
    Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG);
    // But the carry returned by ARMISD::SUBE is not a borrow as expected
    // by ISD::SUBCARRY, so compute 1 - C.
    Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
                        DAG.getConstant(1, DL, MVT::i32), Carry);
  }

  // Return both values.
  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Carry);
}

SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
  assert(Subtarget->isTargetDarwin());

  // For iOS, we want to call an alternative entry point: __sincos_stret,
  // return values are passed via sret.
  SDLoc dl(Op);
  SDValue Arg = Op.getOperand(0);
  EVT ArgVT = Arg.getValueType();
  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
  auto PtrVT = getPointerTy(DAG.getDataLayout());

  MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  // Pair of floats / doubles used to pass the result.
  Type *RetTy = StructType::get(ArgTy, ArgTy);
  auto &DL = DAG.getDataLayout();

  ArgListTy Args;
  bool ShouldUseSRet = Subtarget->isAPCS_ABI();
  SDValue SRet;
  if (ShouldUseSRet) {
    // Create stack object for sret.
    const uint64_t ByteSize = DL.getTypeAllocSize(RetTy);
    const Align StackAlign = DL.getPrefTypeAlign(RetTy);
    int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
    SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL));

    ArgListEntry Entry;
    Entry.Node = SRet;
    Entry.Ty = RetTy->getPointerTo();
    Entry.IsSExt = false;
    Entry.IsZExt = false;
    Entry.IsSRet = true;
    Args.push_back(Entry);
    RetTy = Type::getVoidTy(*DAG.getContext());
  }

  ArgListEntry Entry;
  Entry.Node = Arg;
  Entry.Ty = ArgTy;
  Entry.IsSExt = false;
  Entry.IsZExt = false;
  Args.push_back(Entry);

  RTLIB::Libcall LC =
      (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
  const char *LibcallName = getLibcallName(LC);
  CallingConv::ID CC = getLibcallCallingConv(LC);
  SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL));

  TargetLowering::CallLoweringInfo CLI(DAG);
  CLI.setDebugLoc(dl)
      .setChain(DAG.getEntryNode())
      .setCallee(CC, RetTy, Callee, std::move(Args))
      .setDiscardResult(ShouldUseSRet);
  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);

  if (!ShouldUseSRet)
    return CallResult.first;

  SDValue LoadSin =
      DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo());

  // Address of cos field.
  SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet,
                            DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl));
  SDValue LoadCos =
      DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo());

  SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
  return DAG.getNode(ISD::MERGE_VALUES, dl, Tys,
                     LoadSin.getValue(0), LoadCos.getValue(0));
}

SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG,
                                                  bool Signed,
                                                  SDValue &Chain) const {
  EVT VT = Op.getValueType();
  assert((VT == MVT::i32 || VT == MVT::i64) &&
         "unexpected type for custom lowering DIV");
  SDLoc dl(Op);

  const auto &DL = DAG.getDataLayout();
  const auto &TLI = DAG.getTargetLoweringInfo();

  const char *Name = nullptr;
  if (Signed)
    Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64";
  else
    Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64";

  SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL));

  ARMTargetLowering::ArgListTy Args;

  for (auto AI : {1, 0}) {
    ArgListEntry Arg;
    Arg.Node = Op.getOperand(AI);
    Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext());
    Args.push_back(Arg);
  }

  CallLoweringInfo CLI(DAG);
  CLI.setDebugLoc(dl)
    .setChain(Chain)
    .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()),
               ES, std::move(Args));

  return LowerCallTo(CLI).first;
}

// This is a code size optimisation: return the original SDIV node to
// DAGCombiner when we don't want to expand SDIV into a sequence of
// instructions, and an empty node otherwise which will cause the
// SDIV to be expanded in DAGCombine.
SDValue
ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
                                 SelectionDAG &DAG,
                                 SmallVectorImpl<SDNode *> &Created) const {
  // TODO: Support SREM
  if (N->getOpcode() != ISD::SDIV)
    return SDValue();

  const auto &ST = static_cast<const ARMSubtarget&>(DAG.getSubtarget());
  const bool MinSize = ST.hasMinSize();
  const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode()
                                      : ST.hasDivideInARMMode();

  // Don't touch vector types; rewriting this may lead to scalarizing
  // the int divs.
  if (N->getOperand(0).getValueType().isVector())
    return SDValue();

  // Bail if MinSize is not set, and also for both ARM and Thumb mode we need
  // hwdiv support for this to be really profitable.
  if (!(MinSize && HasDivide))
    return SDValue();

  // ARM mode is a bit simpler than Thumb: we can handle large power
  // of 2 immediates with 1 mov instruction; no further checks required,
  // just return the sdiv node.
  if (!ST.isThumb())
    return SDValue(N, 0);

  // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV,
  // and thus lose the code size benefits of a MOVS that requires only 2.
  // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here,
  // but as it's doing exactly this, it's not worth the trouble to get TTI.
  if (Divisor.sgt(128))
    return SDValue();

  return SDValue(N, 0);
}

SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG,
                                            bool Signed) const {
  assert(Op.getValueType() == MVT::i32 &&
         "unexpected type for custom lowering DIV");
  SDLoc dl(Op);

  SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other,
                               DAG.getEntryNode(), Op.getOperand(1));

  return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
}

static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) {
  SDLoc DL(N);
  SDValue Op = N->getOperand(1);
  if (N->getValueType(0) == MVT::i32)
    return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op);
  SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
                           DAG.getConstant(0, DL, MVT::i32));
  SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
                           DAG.getConstant(1, DL, MVT::i32));
  return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain,
                     DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi));
}

void ARMTargetLowering::ExpandDIV_Windows(
    SDValue Op, SelectionDAG &DAG, bool Signed,
    SmallVectorImpl<SDValue> &Results) const {
  const auto &DL = DAG.getDataLayout();
  const auto &TLI = DAG.getTargetLoweringInfo();

  assert(Op.getValueType() == MVT::i64 &&
         "unexpected type for custom lowering DIV");
  SDLoc dl(Op);

  SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode());

  SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);

  SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result);
  SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result,
                              DAG.getConstant(32, dl, TLI.getPointerTy(DL)));
  Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper);

  Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lower, Upper));
}

static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) {
  LoadSDNode *LD = cast<LoadSDNode>(Op.getNode());
  EVT MemVT = LD->getMemoryVT();
  assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) &&
         "Expected a predicate type!");
  assert(MemVT == Op.getValueType());
  assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
         "Expected a non-extending load");
  assert(LD->isUnindexed() && "Expected a unindexed load");

  // The basic MVE VLDR on a v4i1/v8i1 actually loads the entire 16bit
  // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We
  // need to make sure that 8/4 bits are actually loaded into the correct
  // place, which means loading the value and then shuffling the values into
  // the bottom bits of the predicate.
  // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect
  // for BE).

  SDLoc dl(Op);
  SDValue Load = DAG.getExtLoad(
      ISD::EXTLOAD, dl, MVT::i32, LD->getChain(), LD->getBasePtr(),
      EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()),
      LD->getMemOperand());
  SDValue Pred = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Load);
  if (MemVT != MVT::v16i1)
    Pred = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MemVT, Pred,
                       DAG.getConstant(0, dl, MVT::i32));
  return DAG.getMergeValues({Pred, Load.getValue(1)}, dl);
}

void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
                                  SelectionDAG &DAG) const {
  LoadSDNode *LD = cast<LoadSDNode>(N);
  EVT MemVT = LD->getMemoryVT();
  assert(LD->isUnindexed() && "Loads should be unindexed at this point.");

  if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() &&
      !Subtarget->isThumb1Only() && LD->isVolatile()) {
    SDLoc dl(N);
    SDValue Result = DAG.getMemIntrinsicNode(
        ARMISD::LDRD, dl, DAG.getVTList({MVT::i32, MVT::i32, MVT::Other}),
        {LD->getChain(), LD->getBasePtr()}, MemVT, LD->getMemOperand());
    SDValue Lo = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 0 : 1);
    SDValue Hi = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 1 : 0);
    SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
    Results.append({Pair, Result.getValue(2)});
  }
}

static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) {
  StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
  EVT MemVT = ST->getMemoryVT();
  assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) &&
         "Expected a predicate type!");
  assert(MemVT == ST->getValue().getValueType());
  assert(!ST->isTruncatingStore() && "Expected a non-extending store");
  assert(ST->isUnindexed() && "Expected a unindexed store");

  // Only store the v4i1 or v8i1 worth of bits, via a buildvector with top bits
  // unset and a scalar store.
  SDLoc dl(Op);
  SDValue Build = ST->getValue();
  if (MemVT != MVT::v16i1) {
    SmallVector<SDValue, 16> Ops;
    for (unsigned I = 0; I < MemVT.getVectorNumElements(); I++)
      Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Build,
                                DAG.getConstant(I, dl, MVT::i32)));
    for (unsigned I = MemVT.getVectorNumElements(); I < 16; I++)
      Ops.push_back(DAG.getUNDEF(MVT::i32));
    Build = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i1, Ops);
  }
  SDValue GRP = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Build);
  return DAG.getTruncStore(
      ST->getChain(), dl, GRP, ST->getBasePtr(),
      EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()),
      ST->getMemOperand());
}

static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG,
                          const ARMSubtarget *Subtarget) {
  StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
  EVT MemVT = ST->getMemoryVT();
  assert(ST->isUnindexed() && "Stores should be unindexed at this point.");

  if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() &&
      !Subtarget->isThumb1Only() && ST->isVolatile()) {
    SDNode *N = Op.getNode();
    SDLoc dl(N);

    SDValue Lo = DAG.getNode(
        ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(),
        DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 0 : 1, dl,
                              MVT::i32));
    SDValue Hi = DAG.getNode(
        ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(),
        DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 1 : 0, dl,
                              MVT::i32));

    return DAG.getMemIntrinsicNode(ARMISD::STRD, dl, DAG.getVTList(MVT::Other),
                                   {ST->getChain(), Lo, Hi, ST->getBasePtr()},
                                   MemVT, ST->getMemOperand());
  } else if (Subtarget->hasMVEIntegerOps() &&
             ((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 ||
               MemVT == MVT::v16i1))) {
    return LowerPredicateStore(Op, DAG);
  }

  return SDValue();
}

static bool isZeroVector(SDValue N) {
  return (ISD::isBuildVectorAllZeros(N.getNode()) ||
          (N->getOpcode() == ARMISD::VMOVIMM &&
           isNullConstant(N->getOperand(0))));
}

static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) {
  MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
  MVT VT = Op.getSimpleValueType();
  SDValue Mask = N->getMask();
  SDValue PassThru = N->getPassThru();
  SDLoc dl(Op);

  if (isZeroVector(PassThru))
    return Op;

  // MVE Masked loads use zero as the passthru value. Here we convert undef to
  // zero too, and other values are lowered to a select.
  SDValue ZeroVec = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
                                DAG.getTargetConstant(0, dl, MVT::i32));
  SDValue NewLoad = DAG.getMaskedLoad(
      VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask, ZeroVec,
      N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
      N->getExtensionType(), N->isExpandingLoad());
  SDValue Combo = NewLoad;
  bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST ||
                             PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) &&
                            isZeroVector(PassThru->getOperand(0));
  if (!PassThru.isUndef() && !PassThruIsCastZero)
    Combo = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
  return DAG.getMergeValues({Combo, NewLoad.getValue(1)}, dl);
}

static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG,
                              const ARMSubtarget *ST) {
  if (!ST->hasMVEIntegerOps())
    return SDValue();

  SDLoc dl(Op);
  unsigned BaseOpcode = 0;
  switch (Op->getOpcode()) {
  default: llvm_unreachable("Expected VECREDUCE opcode");
  case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break;
  case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break;
  case ISD::VECREDUCE_MUL:  BaseOpcode = ISD::MUL; break;
  case ISD::VECREDUCE_AND:  BaseOpcode = ISD::AND; break;
  case ISD::VECREDUCE_OR:   BaseOpcode = ISD::OR; break;
  case ISD::VECREDUCE_XOR:  BaseOpcode = ISD::XOR; break;
  case ISD::VECREDUCE_FMAX: BaseOpcode = ISD::FMAXNUM; break;
  case ISD::VECREDUCE_FMIN: BaseOpcode = ISD::FMINNUM; break;
  }

  SDValue Op0 = Op->getOperand(0);
  EVT VT = Op0.getValueType();
  EVT EltVT = VT.getVectorElementType();
  unsigned NumElts = VT.getVectorNumElements();
  unsigned NumActiveLanes = NumElts;

  assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 ||
          NumActiveLanes == 2) &&
         "Only expected a power 2 vector size");

  // Use Mul(X, Rev(X)) until 4 items remain. Going down to 4 vector elements
  // allows us to easily extract vector elements from the lanes.
  while (NumActiveLanes > 4) {
    unsigned RevOpcode = NumActiveLanes == 16 ? ARMISD::VREV16 : ARMISD::VREV32;
    SDValue Rev = DAG.getNode(RevOpcode, dl, VT, Op0);
    Op0 = DAG.getNode(BaseOpcode, dl, VT, Op0, Rev);
    NumActiveLanes /= 2;
  }

  SDValue Res;
  if (NumActiveLanes == 4) {
    // The remaining 4 elements are summed sequentially
    SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
                              DAG.getConstant(0 * NumElts / 4, dl, MVT::i32));
    SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
                              DAG.getConstant(1 * NumElts / 4, dl, MVT::i32));
    SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
                              DAG.getConstant(2 * NumElts / 4, dl, MVT::i32));
    SDValue Ext3 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
                              DAG.getConstant(3 * NumElts / 4, dl, MVT::i32));
    SDValue Res0 = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags());
    SDValue Res1 = DAG.getNode(BaseOpcode, dl, EltVT, Ext2, Ext3, Op->getFlags());
    Res = DAG.getNode(BaseOpcode, dl, EltVT, Res0, Res1, Op->getFlags());
  } else {
    SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
                              DAG.getConstant(0, dl, MVT::i32));
    SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
                              DAG.getConstant(1, dl, MVT::i32));
    Res = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags());
  }

  // Result type may be wider than element type.
  if (EltVT != Op->getValueType(0))
    Res = DAG.getNode(ISD::ANY_EXTEND, dl, Op->getValueType(0), Res);
  return Res;
}

static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG,
                               const ARMSubtarget *ST) {
  if (!ST->hasMVEFloatOps())
    return SDValue();
  return LowerVecReduce(Op, DAG, ST);
}

static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
  if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
    // Acquire/Release load/store is not legal for targets without a dmb or
    // equivalent available.
    return SDValue();

  // Monotonic load/store is legal for all targets.
  return Op;
}

static void ReplaceREADCYCLECOUNTER(SDNode *N,
                                    SmallVectorImpl<SDValue> &Results,
                                    SelectionDAG &DAG,
                                    const ARMSubtarget *Subtarget) {
  SDLoc DL(N);
  // Under Power Management extensions, the cycle-count is:
  //    mrc p15, #0, <Rt>, c9, c13, #0
  SDValue Ops[] = { N->getOperand(0), // Chain
                    DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32),
                    DAG.getTargetConstant(15, DL, MVT::i32),
                    DAG.getTargetConstant(0, DL, MVT::i32),
                    DAG.getTargetConstant(9, DL, MVT::i32),
                    DAG.getTargetConstant(13, DL, MVT::i32),
                    DAG.getTargetConstant(0, DL, MVT::i32)
  };

  SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
                                 DAG.getVTList(MVT::i32, MVT::Other), Ops);
  Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32,
                                DAG.getConstant(0, DL, MVT::i32)));
  Results.push_back(Cycles32.getValue(1));
}

static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
  SDLoc dl(V.getNode());
  SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32);
  SDValue VHi = DAG.getAnyExtOrTrunc(
      DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)),
      dl, MVT::i32);
  bool isBigEndian = DAG.getDataLayout().isBigEndian();
  if (isBigEndian)
    std::swap (VLo, VHi);
  SDValue RegClass =
      DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
  SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32);
  SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32);
  const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
  return SDValue(
      DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
}

static void ReplaceCMP_SWAP_64Results(SDNode *N,
                                       SmallVectorImpl<SDValue> & Results,
                                       SelectionDAG &DAG) {
  assert(N->getValueType(0) == MVT::i64 &&
         "AtomicCmpSwap on types less than 64 should be legal");
  SDValue Ops[] = {N->getOperand(1),
                   createGPRPairNode(DAG, N->getOperand(2)),
                   createGPRPairNode(DAG, N->getOperand(3)),
                   N->getOperand(0)};
  SDNode *CmpSwap = DAG.getMachineNode(
      ARM::CMP_SWAP_64, SDLoc(N),
      DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops);

  MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
  DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});

  bool isBigEndian = DAG.getDataLayout().isBigEndian();

  SDValue Lo =
      DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_1 : ARM::gsub_0,
                                 SDLoc(N), MVT::i32, SDValue(CmpSwap, 0));
  SDValue Hi =
      DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_0 : ARM::gsub_1,
                                 SDLoc(N), MVT::i32, SDValue(CmpSwap, 0));
  Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i64, Lo, Hi));
  Results.push_back(SDValue(CmpSwap, 2));
}

SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const {
  SDLoc dl(Op);
  EVT VT = Op.getValueType();
  SDValue Chain = Op.getOperand(0);
  SDValue LHS = Op.getOperand(1);
  SDValue RHS = Op.getOperand(2);
  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get();
  bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;

  // If we don't have instructions of this float type then soften to a libcall
  // and use SETCC instead.
  if (isUnsupportedFloatingType(LHS.getValueType())) {
    DAG.getTargetLoweringInfo().softenSetCCOperands(
      DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS, Chain, IsSignaling);
    if (!RHS.getNode()) {
      RHS = DAG.getConstant(0, dl, LHS.getValueType());
      CC = ISD::SETNE;
    }
    SDValue Result = DAG.getNode(ISD::SETCC, dl, VT, LHS, RHS,
                                 DAG.getCondCode(CC));
    return DAG.getMergeValues({Result, Chain}, dl);
  }

  ARMCC::CondCodes CondCode, CondCode2;
  FPCCToARMCC(CC, CondCode, CondCode2);

  // FIXME: Chain is not handled correctly here. Currently the FPSCR is implicit
  // in CMPFP and CMPFPE, but instead it should be made explicit by these
  // instructions using a chain instead of glue. This would also fix the problem
  // here (and also in LowerSELECT_CC) where we generate two comparisons when
  // CondCode2 != AL.
  SDValue True = DAG.getConstant(1, dl, VT);
  SDValue False =  DAG.getConstant(0, dl, VT);
  SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
  SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling);
  SDValue Result = getCMOV(dl, VT, False, True, ARMcc, CCR, Cmp, DAG);
  if (CondCode2 != ARMCC::AL) {
    ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
    Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling);
    Result = getCMOV(dl, VT, Result, True, ARMcc, CCR, Cmp, DAG);
  }
  return DAG.getMergeValues({Result, Chain}, dl);
}

SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
  LLVM_DEBUG(dbgs() << "Lowering node: "; Op.dump());
  switch (Op.getOpcode()) {
  default: llvm_unreachable("Don't know how to custom lower this!");
  case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG);
  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
  case ISD::BlockAddress:  return LowerBlockAddress(Op, DAG);
  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
  case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
  case ISD::SELECT:        return LowerSELECT(Op, DAG);
  case ISD::SELECT_CC:     return LowerSELECT_CC(Op, DAG);
  case ISD::BRCOND:        return LowerBRCOND(Op, DAG);
  case ISD::BR_CC:         return LowerBR_CC(Op, DAG);
  case ISD::BR_JT:         return LowerBR_JT(Op, DAG);
  case ISD::VASTART:       return LowerVASTART(Op, DAG);
  case ISD::ATOMIC_FENCE:  return LowerATOMIC_FENCE(Op, DAG, Subtarget);
  case ISD::PREFETCH:      return LowerPREFETCH(Op, DAG, Subtarget);
  case ISD::SINT_TO_FP:
  case ISD::UINT_TO_FP:    return LowerINT_TO_FP(Op, DAG);
  case ISD::STRICT_FP_TO_SINT:
  case ISD::STRICT_FP_TO_UINT:
  case ISD::FP_TO_SINT:
  case ISD::FP_TO_UINT:    return LowerFP_TO_INT(Op, DAG);
  case ISD::FCOPYSIGN:     return LowerFCOPYSIGN(Op, DAG);
  case ISD::RETURNADDR:    return LowerRETURNADDR(Op, DAG);
  case ISD::FRAMEADDR:     return LowerFRAMEADDR(Op, DAG);
  case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
  case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
  case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
  case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG, Subtarget);
  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
                                                               Subtarget);
  case ISD::BITCAST:       return ExpandBITCAST(Op.getNode(), DAG, Subtarget);
  case ISD::SHL:
  case ISD::SRL:
  case ISD::SRA:           return LowerShift(Op.getNode(), DAG, Subtarget);
  case ISD::SREM:          return LowerREM(Op.getNode(), DAG);
  case ISD::UREM:          return LowerREM(Op.getNode(), DAG);
  case ISD::SHL_PARTS:     return LowerShiftLeftParts(Op, DAG);
  case ISD::SRL_PARTS:
  case ISD::SRA_PARTS:     return LowerShiftRightParts(Op, DAG);
  case ISD::CTTZ:
  case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
  case ISD::CTPOP:         return LowerCTPOP(Op.getNode(), DAG, Subtarget);
  case ISD::SETCC:         return LowerVSETCC(Op, DAG, Subtarget);
  case ISD::SETCCCARRY:    return LowerSETCCCARRY(Op, DAG);
  case ISD::ConstantFP:    return LowerConstantFP(Op, DAG, Subtarget);
  case ISD::BUILD_VECTOR:  return LowerBUILD_VECTOR(Op, DAG, Subtarget);
  case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
  case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG, Subtarget);
  case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
  case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG, Subtarget);
  case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG, Subtarget);
  case ISD::FLT_ROUNDS_:   return LowerFLT_ROUNDS_(Op, DAG);
  case ISD::MUL:           return LowerMUL(Op, DAG);
  case ISD::SDIV:
    if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
      return LowerDIV_Windows(Op, DAG, /* Signed */ true);
    return LowerSDIV(Op, DAG, Subtarget);
  case ISD::UDIV:
    if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
      return LowerDIV_Windows(Op, DAG, /* Signed */ false);
    return LowerUDIV(Op, DAG, Subtarget);
  case ISD::ADDCARRY:
  case ISD::SUBCARRY:      return LowerADDSUBCARRY(Op, DAG);
  case ISD::SADDO:
  case ISD::SSUBO:
    return LowerSignedALUO(Op, DAG);
  case ISD::UADDO:
  case ISD::USUBO:
    return LowerUnsignedALUO(Op, DAG);
  case ISD::SADDSAT:
  case ISD::SSUBSAT:
    return LowerSADDSUBSAT(Op, DAG, Subtarget);
  case ISD::LOAD:
    return LowerPredicateLoad(Op, DAG);
  case ISD::STORE:
    return LowerSTORE(Op, DAG, Subtarget);
  case ISD::MLOAD:
    return LowerMLOAD(Op, DAG);
  case ISD::VECREDUCE_MUL:
  case ISD::VECREDUCE_AND:
  case ISD::VECREDUCE_OR:
  case ISD::VECREDUCE_XOR:
    return LowerVecReduce(Op, DAG, Subtarget);
  case ISD::VECREDUCE_FADD:
  case ISD::VECREDUCE_FMUL:
  case ISD::VECREDUCE_FMIN:
  case ISD::VECREDUCE_FMAX:
    return LowerVecReduceF(Op, DAG, Subtarget);
  case ISD::ATOMIC_LOAD:
  case ISD::ATOMIC_STORE:  return LowerAtomicLoadStore(Op, DAG);
  case ISD::FSINCOS:       return LowerFSINCOS(Op, DAG);
  case ISD::SDIVREM:
  case ISD::UDIVREM:       return LowerDivRem(Op, DAG);
  case ISD::DYNAMIC_STACKALLOC:
    if (Subtarget->isTargetWindows())
      return LowerDYNAMIC_STACKALLOC(Op, DAG);
    llvm_unreachable("Don't know how to custom lower this!");
  case ISD::STRICT_FP_ROUND:
  case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
  case ISD::STRICT_FP_EXTEND:
  case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
  case ISD::STRICT_FSETCC:
  case ISD::STRICT_FSETCCS: return LowerFSETCC(Op, DAG);
  case ARMISD::WIN__DBZCHK: return SDValue();
  }
}

static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
                                 SelectionDAG &DAG) {
  unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
  unsigned Opc = 0;
  if (IntNo == Intrinsic::arm_smlald)
    Opc = ARMISD::SMLALD;
  else if (IntNo == Intrinsic::arm_smlaldx)
    Opc = ARMISD::SMLALDX;
  else if (IntNo == Intrinsic::arm_smlsld)
    Opc = ARMISD::SMLSLD;
  else if (IntNo == Intrinsic::arm_smlsldx)
    Opc = ARMISD::SMLSLDX;
  else
    return;

  SDLoc dl(N);
  SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
                           N->getOperand(3),
                           DAG.getConstant(0, dl, MVT::i32));
  SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
                           N->getOperand(3),
                           DAG.getConstant(1, dl, MVT::i32));

  SDValue LongMul = DAG.getNode(Opc, dl,
                                DAG.getVTList(MVT::i32, MVT::i32),
                                N->getOperand(1), N->getOperand(2),
                                Lo, Hi);
  Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64,
                                LongMul.getValue(0), LongMul.getValue(1)));
}

/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
                                           SmallVectorImpl<SDValue> &Results,
                                           SelectionDAG &DAG) const {
  SDValue Res;
  switch (N->getOpcode()) {
  default:
    llvm_unreachable("Don't know how to custom expand this!");
  case ISD::READ_REGISTER:
    ExpandREAD_REGISTER(N, Results, DAG);
    break;
  case ISD::BITCAST:
    Res = ExpandBITCAST(N, DAG, Subtarget);
    break;
  case ISD::SRL:
  case ISD::SRA:
  case ISD::SHL:
    Res = Expand64BitShift(N, DAG, Subtarget);
    break;
  case ISD::SREM:
  case ISD::UREM:
    Res = LowerREM(N, DAG);
    break;
  case ISD::SDIVREM:
  case ISD::UDIVREM:
    Res = LowerDivRem(SDValue(N, 0), DAG);
    assert(Res.getNumOperands() == 2 && "DivRem needs two values");
    Results.push_back(Res.getValue(0));
    Results.push_back(Res.getValue(1));
    return;
  case ISD::SADDSAT:
  case ISD::SSUBSAT:
    Res = LowerSADDSUBSAT(SDValue(N, 0), DAG, Subtarget);
    break;
  case ISD::READCYCLECOUNTER:
    ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget);
    return;
  case ISD::UDIV:
  case ISD::SDIV:
    assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows");
    return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV,
                             Results);
  case ISD::ATOMIC_CMP_SWAP:
    ReplaceCMP_SWAP_64Results(N, Results, DAG);
    return;
  case ISD::INTRINSIC_WO_CHAIN:
    return ReplaceLongIntrinsic(N, Results, DAG);
  case ISD::ABS:
     lowerABS(N, Results, DAG);
     return ;
  case ISD::LOAD:
    LowerLOAD(N, Results, DAG);
    break;
  }
  if (Res.getNode())
    Results.push_back(Res);
}

//===----------------------------------------------------------------------===//
//                           ARM Scheduler Hooks
//===----------------------------------------------------------------------===//

/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
/// registers the function context.
void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
                                               MachineBasicBlock *MBB,
                                               MachineBasicBlock *DispatchBB,
                                               int FI) const {
  assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
         "ROPI/RWPI not currently supported with SjLj");
  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
  DebugLoc dl = MI.getDebugLoc();
  MachineFunction *MF = MBB->getParent();
  MachineRegisterInfo *MRI = &MF->getRegInfo();
  MachineConstantPool *MCP = MF->getConstantPool();
  ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
  const Function &F = MF->getFunction();

  bool isThumb = Subtarget->isThumb();
  bool isThumb2 = Subtarget->isThumb2();

  unsigned PCLabelId = AFI->createPICLabelUId();
  unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
  ARMConstantPoolValue *CPV =
    ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj);
  unsigned CPI = MCP->getConstantPoolIndex(CPV, Align(4));

  const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass
                                           : &ARM::GPRRegClass;

  // Grab constant pool and fixed stack memory operands.
  MachineMemOperand *CPMMO =
      MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
                               MachineMemOperand::MOLoad, 4, Align(4));

  MachineMemOperand *FIMMOSt =
      MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
                               MachineMemOperand::MOStore, 4, Align(4));

  // Load the address of the dispatch MBB into the jump buffer.
  if (isThumb2) {
    // Incoming value: jbuf
    //   ldr.n  r5, LCPI1_1
    //   orr    r5, r5, #1
    //   add    r5, pc
    //   str    r5, [$jbuf, #+4] ; &jbuf[1]
    Register NewVReg1 = MRI->createVirtualRegister(TRC);
    BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1)
        .addConstantPoolIndex(CPI)
        .addMemOperand(CPMMO)
        .add(predOps(ARMCC::AL));
    // Set the low bit because of thumb mode.
    Register NewVReg2 = MRI->createVirtualRegister(TRC);
    BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
        .addReg(NewVReg1, RegState::Kill)
        .addImm(0x01)
        .add(predOps(ARMCC::AL))
        .add(condCodeOp());
    Register NewVReg3 = MRI->createVirtualRegister(TRC);
    BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3)
      .addReg(NewVReg2, RegState::Kill)
      .addImm(PCLabelId);
    BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12))
        .addReg(NewVReg3, RegState::Kill)
        .addFrameIndex(FI)
        .addImm(36) // &jbuf[1] :: pc
        .addMemOperand(FIMMOSt)
        .add(predOps(ARMCC::AL));
  } else if (isThumb) {
    // Incoming value: jbuf
    //   ldr.n  r1, LCPI1_4
    //   add    r1, pc
    //   mov    r2, #1
    //   orrs   r1, r2
    //   add    r2, $jbuf, #+4 ; &jbuf[1]
    //   str    r1, [r2]
    Register NewVReg1 = MRI->createVirtualRegister(TRC);
    BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1)
        .addConstantPoolIndex(CPI)
        .addMemOperand(CPMMO)
        .add(predOps(ARMCC::AL));
    Register NewVReg2 = MRI->createVirtualRegister(TRC);
    BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2)
      .addReg(NewVReg1, RegState::Kill)
      .addImm(PCLabelId);
    // Set the low bit because of thumb mode.
    Register NewVReg3 = MRI->createVirtualRegister(TRC);
    BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3)
        .addReg(ARM::CPSR, RegState::Define)
        .addImm(1)
        .add(predOps(ARMCC::AL));
    Register NewVReg4 = MRI->createVirtualRegister(TRC);
    BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4)
        .addReg(ARM::CPSR, RegState::Define)
        .addReg(NewVReg2, RegState::Kill)
        .addReg(NewVReg3, RegState::Kill)
        .add(predOps(ARMCC::AL));
    Register NewVReg5 = MRI->createVirtualRegister(TRC);
    BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5)
            .addFrameIndex(FI)
            .addImm(36); // &jbuf[1] :: pc
    BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi))
        .addReg(NewVReg4, RegState::Kill)
        .addReg(NewVReg5, RegState::Kill)
        .addImm(0)
        .addMemOperand(FIMMOSt)
        .add(predOps(ARMCC::AL));
  } else {
    // Incoming value: jbuf
    //   ldr  r1, LCPI1_1
    //   add  r1, pc, r1
    //   str  r1, [$jbuf, #+4] ; &jbuf[1]
    Register NewVReg1 = MRI->createVirtualRegister(TRC);
    BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1)
        .addConstantPoolIndex(CPI)
        .addImm(0)
        .addMemOperand(CPMMO)
        .add(predOps(ARMCC::AL));
    Register NewVReg2 = MRI->createVirtualRegister(TRC);
    BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2)
        .addReg(NewVReg1, RegState::Kill)
        .addImm(PCLabelId)
        .add(predOps(ARMCC::AL));
    BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12))
        .addReg(NewVReg2, RegState::Kill)
        .addFrameIndex(FI)
        .addImm(36) // &jbuf[1] :: pc
        .addMemOperand(FIMMOSt)
        .add(predOps(ARMCC::AL));
  }
}

void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
                                              MachineBasicBlock *MBB) const {
  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
  DebugLoc dl = MI.getDebugLoc();
  MachineFunction *MF = MBB->getParent();
  MachineRegisterInfo *MRI = &MF->getRegInfo();
  MachineFrameInfo &MFI = MF->getFrameInfo();
  int FI = MFI.getFunctionContextIndex();

  const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass
                                                        : &ARM::GPRnopcRegClass;

  // Get a mapping of the call site numbers to all of the landing pads they're
  // associated with.
  DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad;
  unsigned MaxCSNum = 0;
  for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E;
       ++BB) {
    if (!BB->isEHPad()) continue;

    // FIXME: We should assert that the EH_LABEL is the first MI in the landing
    // pad.
    for (MachineBasicBlock::iterator
           II = BB->begin(), IE = BB->end(); II != IE; ++II) {
      if (!II->isEHLabel()) continue;

      MCSymbol *Sym = II->getOperand(0).getMCSymbol();
      if (!MF->hasCallSiteLandingPad(Sym)) continue;

      SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym);
      for (SmallVectorImpl<unsigned>::iterator
             CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end();
           CSI != CSE; ++CSI) {
        CallSiteNumToLPad[*CSI].push_back(&*BB);
        MaxCSNum = std::max(MaxCSNum, *CSI);
      }
      break;
    }
  }

  // Get an ordered list of the machine basic blocks for the jump table.
  std::vector<MachineBasicBlock*> LPadList;
  SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs;
  LPadList.reserve(CallSiteNumToLPad.size());
  for (unsigned I = 1; I <= MaxCSNum; ++I) {
    SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];
    for (SmallVectorImpl<MachineBasicBlock*>::iterator
           II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) {
      LPadList.push_back(*II);
      InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end());
    }
  }

  assert(!LPadList.empty() &&
         "No landing pad destinations for the dispatch jump table!");

  // Create the jump table and associated information.
  MachineJumpTableInfo *JTI =
    MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline);
  unsigned MJTI = JTI->createJumpTableIndex(LPadList);

  // Create the MBBs for the dispatch code.

  // Shove the dispatch's address into the return slot in the function context.
  MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
  DispatchBB->setIsEHPad();

  MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
  unsigned trap_opcode;
  if (Subtarget->isThumb())
    trap_opcode = ARM::tTRAP;
  else
    trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;

  BuildMI(TrapBB, dl, TII->get(trap_opcode));
  DispatchBB->addSuccessor(TrapBB);

  MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
  DispatchBB->addSuccessor(DispContBB);

  // Insert and MBBs.
  MF->insert(MF->end(), DispatchBB);
  MF->insert(MF->end(), DispContBB);
  MF->insert(MF->end(), TrapBB);

  // Insert code into the entry block that creates and registers the function
  // context.
  SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);

  MachineMemOperand *FIMMOLd = MF->getMachineMemOperand(
      MachinePointerInfo::getFixedStack(*MF, FI),
      MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, Align(4));

  MachineInstrBuilder MIB;
  MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup));

  const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
  const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();

  // Add a register mask with no preserved registers.  This results in all
  // registers being marked as clobbered. This can't work if the dispatch block
  // is in a Thumb1 function and is linked with ARM code which uses the FP
  // registers, as there is no way to preserve the FP registers in Thumb1 mode.
  MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF));

  bool IsPositionIndependent = isPositionIndependent();
  unsigned NumLPads = LPadList.size();
  if (Subtarget->isThumb2()) {
    Register NewVReg1 = MRI->createVirtualRegister(TRC);
    BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1)
        .addFrameIndex(FI)
        .addImm(4)
        .addMemOperand(FIMMOLd)
        .add(predOps(ARMCC::AL));

    if (NumLPads < 256) {
      BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
          .addReg(NewVReg1)
          .addImm(LPadList.size())
          .add(predOps(ARMCC::AL));
    } else {
      Register VReg1 = MRI->createVirtualRegister(TRC);
      BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1)
          .addImm(NumLPads & 0xFFFF)
          .add(predOps(ARMCC::AL));

      unsigned VReg2 = VReg1;
      if ((NumLPads & 0xFFFF0000) != 0) {
        VReg2 = MRI->createVirtualRegister(TRC);
        BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2)
            .addReg(VReg1)
            .addImm(NumLPads >> 16)
            .add(predOps(ARMCC::AL));
      }

      BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr))
          .addReg(NewVReg1)
          .addReg(VReg2)
          .add(predOps(ARMCC::AL));
    }

    BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc))
      .addMBB(TrapBB)
      .addImm(ARMCC::HI)
      .addReg(ARM::CPSR);

    Register NewVReg3 = MRI->createVirtualRegister(TRC);
    BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3)
        .addJumpTableIndex(MJTI)
        .add(predOps(ARMCC::AL));

    Register NewVReg4 = MRI->createVirtualRegister(TRC);
    BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
        .addReg(NewVReg3, RegState::Kill)
        .addReg(NewVReg1)
        .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
        .add(predOps(ARMCC::AL))
        .add(condCodeOp());

    BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
      .addReg(NewVReg4, RegState::Kill)
      .addReg(NewVReg1)
      .addJumpTableIndex(MJTI);
  } else if (Subtarget->isThumb()) {
    Register NewVReg1 = MRI->createVirtualRegister(TRC);
    BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1)
        .addFrameIndex(FI)
        .addImm(1)
        .addMemOperand(FIMMOLd)
        .add(predOps(ARMCC::AL));

    if (NumLPads < 256) {
      BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
          .addReg(NewVReg1)
          .addImm(NumLPads)
          .add(predOps(ARMCC::AL));
    } else {
      MachineConstantPool *ConstantPool = MF->getConstantPool();
      Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
      const Constant *C = ConstantInt::get(Int32Ty, NumLPads);

      // MachineConstantPool wants an explicit alignment.
      Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty);
      unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment);

      Register VReg1 = MRI->createVirtualRegister(TRC);
      BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci))
          .addReg(VReg1, RegState::Define)
          .addConstantPoolIndex(Idx)
          .add(predOps(ARMCC::AL));
      BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr))
          .addReg(NewVReg1)
          .addReg(VReg1)
          .add(predOps(ARMCC::AL));
    }

    BuildMI(DispatchBB, dl, TII->get(ARM::tBcc))
      .addMBB(TrapBB)
      .addImm(ARMCC::HI)
      .addReg(ARM::CPSR);

    Register NewVReg2 = MRI->createVirtualRegister(TRC);
    BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2)
        .addReg(ARM::CPSR, RegState::Define)
        .addReg(NewVReg1)
        .addImm(2)
        .add(predOps(ARMCC::AL));

    Register NewVReg3 = MRI->createVirtualRegister(TRC);
    BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3)
        .addJumpTableIndex(MJTI)
        .add(predOps(ARMCC::AL));

    Register NewVReg4 = MRI->createVirtualRegister(TRC);
    BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4)
        .addReg(ARM::CPSR, RegState::Define)
        .addReg(NewVReg2, RegState::Kill)
        .addReg(NewVReg3)
        .add(predOps(ARMCC::AL));

    MachineMemOperand *JTMMOLd =
        MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF),
                                 MachineMemOperand::MOLoad, 4, Align(4));

    Register NewVReg5 = MRI->createVirtualRegister(TRC);
    BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5)
        .addReg(NewVReg4, RegState::Kill)
        .addImm(0)
        .addMemOperand(JTMMOLd)
        .add(predOps(ARMCC::AL));

    unsigned NewVReg6 = NewVReg5;
    if (IsPositionIndependent) {
      NewVReg6 = MRI->createVirtualRegister(TRC);
      BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6)
          .addReg(ARM::CPSR, RegState::Define)
          .addReg(NewVReg5, RegState::Kill)
          .addReg(NewVReg3)
          .add(predOps(ARMCC::AL));
    }

    BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr))
      .addReg(NewVReg6, RegState::Kill)
      .addJumpTableIndex(MJTI);
  } else {
    Register NewVReg1 = MRI->createVirtualRegister(TRC);
    BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1)
        .addFrameIndex(FI)
        .addImm(4)
        .addMemOperand(FIMMOLd)
        .add(predOps(ARMCC::AL));

    if (NumLPads < 256) {
      BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
          .addReg(NewVReg1)
          .addImm(NumLPads)
          .add(predOps(ARMCC::AL));
    } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) {
      Register VReg1 = MRI->createVirtualRegister(TRC);
      BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1)
          .addImm(NumLPads & 0xFFFF)
          .add(predOps(ARMCC::AL));

      unsigned VReg2 = VReg1;
      if ((NumLPads & 0xFFFF0000) != 0) {
        VReg2 = MRI->createVirtualRegister(TRC);
        BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2)
            .addReg(VReg1)
            .addImm(NumLPads >> 16)
            .add(predOps(ARMCC::AL));
      }

      BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
          .addReg(NewVReg1)
          .addReg(VReg2)
          .add(predOps(ARMCC::AL));
    } else {
      MachineConstantPool *ConstantPool = MF->getConstantPool();
      Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
      const Constant *C = ConstantInt::get(Int32Ty, NumLPads);

      // MachineConstantPool wants an explicit alignment.
      Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty);
      unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment);

      Register VReg1 = MRI->createVirtualRegister(TRC);
      BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp))
          .addReg(VReg1, RegState::Define)
          .addConstantPoolIndex(Idx)
          .addImm(0)
          .add(predOps(ARMCC::AL));
      BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
          .addReg(NewVReg1)
          .addReg(VReg1, RegState::Kill)
          .add(predOps(ARMCC::AL));
    }

    BuildMI(DispatchBB, dl, TII->get(ARM::Bcc))
      .addMBB(TrapBB)
      .addImm(ARMCC::HI)
      .addReg(ARM::CPSR);

    Register NewVReg3 = MRI->createVirtualRegister(TRC);
    BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
        .addReg(NewVReg1)
        .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
        .add(predOps(ARMCC::AL))
        .add(condCodeOp());
    Register NewVReg4 = MRI->createVirtualRegister(TRC);
    BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4)
        .addJumpTableIndex(MJTI)
        .add(predOps(ARMCC::AL));

    MachineMemOperand *JTMMOLd =
        MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF),
                                 MachineMemOperand::MOLoad, 4, Align(4));
    Register NewVReg5 = MRI->createVirtualRegister(TRC);
    BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5)
        .addReg(NewVReg3, RegState::Kill)
        .addReg(NewVReg4)
        .addImm(0)
        .addMemOperand(JTMMOLd)
        .add(predOps(ARMCC::AL));

    if (IsPositionIndependent) {
      BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
        .addReg(NewVReg5, RegState::Kill)
        .addReg(NewVReg4)
        .addJumpTableIndex(MJTI);
    } else {
      BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr))
        .addReg(NewVReg5, RegState::Kill)
        .addJumpTableIndex(MJTI);
    }
  }

  // Add the jump table entries as successors to the MBB.
  SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs;
  for (std::vector<MachineBasicBlock*>::iterator
         I = LPadList.begin(), E = LPadList.end(); I != E; ++I) {
    MachineBasicBlock *CurMBB = *I;
    if (SeenMBBs.insert(CurMBB).second)
      DispContBB->addSuccessor(CurMBB);
  }

  // N.B. the order the invoke BBs are processed in doesn't matter here.
  const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF);
  SmallVector<MachineBasicBlock*, 64> MBBLPads;
  for (MachineBasicBlock *BB : InvokeBBs) {

    // Remove the landing pad successor from the invoke block and replace it
    // with the new dispatch block.
    SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(),
                                                  BB->succ_end());
    while (!Successors.empty()) {
      MachineBasicBlock *SMBB = Successors.pop_back_val();
      if (SMBB->isEHPad()) {
        BB->removeSuccessor(SMBB);
        MBBLPads.push_back(SMBB);
      }
    }

    BB->addSuccessor(DispatchBB, BranchProbability::getZero());
    BB->normalizeSuccProbs();

    // Find the invoke call and mark all of the callee-saved registers as
    // 'implicit defined' so that they're spilled. This prevents code from
    // moving instructions to before the EH block, where they will never be
    // executed.
    for (MachineBasicBlock::reverse_iterator
           II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
      if (!II->isCall()) continue;

      DenseMap<unsigned, bool> DefRegs;
      for (MachineInstr::mop_iterator
             OI = II->operands_begin(), OE = II->operands_end();
           OI != OE; ++OI) {
        if (!OI->isReg()) continue;
        DefRegs[OI->getReg()] = true;
      }

      MachineInstrBuilder MIB(*MF, &*II);

      for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
        unsigned Reg = SavedRegs[i];
        if (Subtarget->isThumb2() &&
            !ARM::tGPRRegClass.contains(Reg) &&
            !ARM::hGPRRegClass.contains(Reg))
          continue;
        if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
          continue;
        if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg))
          continue;
        if (!DefRegs[Reg])
          MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
      }

      break;
    }
  }

  // Mark all former landing pads as non-landing pads. The dispatch is the only
  // landing pad now.
  for (SmallVectorImpl<MachineBasicBlock*>::iterator
         I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I)
    (*I)->setIsEHPad(false);

  // The instruction is gone now.
  MI.eraseFromParent();
}

static
MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
  for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
       E = MBB->succ_end(); I != E; ++I)
    if (*I != Succ)
      return *I;
  llvm_unreachable("Expecting a BB with two successors!");
}

/// Return the load opcode for a given load size. If load size >= 8,
/// neon opcode will be returned.
static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) {
  if (LdSize >= 8)
    return LdSize == 16 ? ARM::VLD1q32wb_fixed
                        : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
  if (IsThumb1)
    return LdSize == 4 ? ARM::tLDRi
                       : LdSize == 2 ? ARM::tLDRHi
                                     : LdSize == 1 ? ARM::tLDRBi : 0;
  if (IsThumb2)
    return LdSize == 4 ? ARM::t2LDR_POST
                       : LdSize == 2 ? ARM::t2LDRH_POST
                                     : LdSize == 1 ? ARM::t2LDRB_POST : 0;
  return LdSize == 4 ? ARM::LDR_POST_IMM
                     : LdSize == 2 ? ARM::LDRH_POST
                                   : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
}

/// Return the store opcode for a given store size. If store size >= 8,
/// neon opcode will be returned.
static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {
  if (StSize >= 8)
    return StSize == 16 ? ARM::VST1q32wb_fixed
                        : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
  if (IsThumb1)
    return StSize == 4 ? ARM::tSTRi
                       : StSize == 2 ? ARM::tSTRHi
                                     : StSize == 1 ? ARM::tSTRBi : 0;
  if (IsThumb2)
    return StSize == 4 ? ARM::t2STR_POST
                       : StSize == 2 ? ARM::t2STRH_POST
                                     : StSize == 1 ? ARM::t2STRB_POST : 0;
  return StSize == 4 ? ARM::STR_POST_IMM
                     : StSize == 2 ? ARM::STRH_POST
                                   : StSize == 1 ? ARM::STRB_POST_IMM : 0;
}

/// Emit a post-increment load operation with given size. The instructions
/// will be added to BB at Pos.
static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
                       const TargetInstrInfo *TII, const DebugLoc &dl,
                       unsigned LdSize, unsigned Data, unsigned AddrIn,
                       unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
  unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2);
  assert(LdOpc != 0 && "Should have a load opcode");
  if (LdSize >= 8) {
    BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
        .addReg(AddrOut, RegState::Define)
        .addReg(AddrIn)
        .addImm(0)
        .add(predOps(ARMCC::AL));
  } else if (IsThumb1) {
    // load + update AddrIn
    BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
        .addReg(AddrIn)
        .addImm(0)
        .add(predOps(ARMCC::AL));
    BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
        .add(t1CondCodeOp())
        .addReg(AddrIn)
        .addImm(LdSize)
        .add(predOps(ARMCC::AL));
  } else if (IsThumb2) {
    BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
        .addReg(AddrOut, RegState::Define)
        .addReg(AddrIn)
        .addImm(LdSize)
        .add(predOps(ARMCC::AL));
  } else { // arm
    BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
        .addReg(AddrOut, RegState::Define)
        .addReg(AddrIn)
        .addReg(0)
        .addImm(LdSize)
        .add(predOps(ARMCC::AL));
  }
}

/// Emit a post-increment store operation with given size. The instructions
/// will be added to BB at Pos.
static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
                       const TargetInstrInfo *TII, const DebugLoc &dl,
                       unsigned StSize, unsigned Data, unsigned AddrIn,
                       unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
  unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2);
  assert(StOpc != 0 && "Should have a store opcode");
  if (StSize >= 8) {
    BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
        .addReg(AddrIn)
        .addImm(0)
        .addReg(Data)
        .add(predOps(ARMCC::AL));
  } else if (IsThumb1) {
    // store + update AddrIn
    BuildMI(*BB, Pos, dl, TII->get(StOpc))
        .addReg(Data)
        .addReg(AddrIn)
        .addImm(0)
        .add(predOps(ARMCC::AL));
    BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
        .add(t1CondCodeOp())
        .addReg(AddrIn)
        .addImm(StSize)
        .add(predOps(ARMCC::AL));
  } else if (IsThumb2) {
    BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
        .addReg(Data)
        .addReg(AddrIn)
        .addImm(StSize)
        .add(predOps(ARMCC::AL));
  } else { // arm
    BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
        .addReg(Data)
        .addReg(AddrIn)
        .addReg(0)
        .addImm(StSize)
        .add(predOps(ARMCC::AL));
  }
}

MachineBasicBlock *
ARMTargetLowering::EmitStructByval(MachineInstr &MI,
                                   MachineBasicBlock *BB) const {
  // This pseudo instruction has 3 operands: dst, src, size
  // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
  // Otherwise, we will generate unrolled scalar copies.
  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
  const BasicBlock *LLVM_BB = BB->getBasicBlock();
  MachineFunction::iterator It = ++BB->getIterator();

  Register dest = MI.getOperand(0).getReg();
  Register src = MI.getOperand(1).getReg();
  unsigned SizeVal = MI.getOperand(2).getImm();
  unsigned Alignment = MI.getOperand(3).getImm();
  DebugLoc dl = MI.getDebugLoc();

  MachineFunction *MF = BB->getParent();
  MachineRegisterInfo &MRI = MF->getRegInfo();
  unsigned UnitSize = 0;
  const TargetRegisterClass *TRC = nullptr;
  const TargetRegisterClass *VecTRC = nullptr;

  bool IsThumb1 = Subtarget->isThumb1Only();
  bool IsThumb2 = Subtarget->isThumb2();
  bool IsThumb = Subtarget->isThumb();

  if (Alignment & 1) {
    UnitSize = 1;
  } else if (Alignment & 2) {
    UnitSize = 2;
  } else {
    // Check whether we can use NEON instructions.
    if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) &&
        Subtarget->hasNEON()) {
      if ((Alignment % 16 == 0) && SizeVal >= 16)
        UnitSize = 16;
      else if ((Alignment % 8 == 0) && SizeVal >= 8)
        UnitSize = 8;
    }
    // Can't use NEON instructions.
    if (UnitSize == 0)
      UnitSize = 4;
  }

  // Select the correct opcode and register class for unit size load/store
  bool IsNeon = UnitSize >= 8;
  TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
  if (IsNeon)
    VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
                            : UnitSize == 8 ? &ARM::DPRRegClass
                                            : nullptr;

  unsigned BytesLeft = SizeVal % UnitSize;
  unsigned LoopSize = SizeVal - BytesLeft;

  if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
    // Use LDR and STR to copy.
    // [scratch, srcOut] = LDR_POST(srcIn, UnitSize)
    // [destOut] = STR_POST(scratch, destIn, UnitSize)
    unsigned srcIn = src;
    unsigned destIn = dest;
    for (unsigned i = 0; i < LoopSize; i+=UnitSize) {
      Register srcOut = MRI.createVirtualRegister(TRC);
      Register destOut = MRI.createVirtualRegister(TRC);
      Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
      emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
                 IsThumb1, IsThumb2);
      emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
                 IsThumb1, IsThumb2);
      srcIn = srcOut;
      destIn = destOut;
    }

    // Handle the leftover bytes with LDRB and STRB.
    // [scratch, srcOut] = LDRB_POST(srcIn, 1)
    // [destOut] = STRB_POST(scratch, destIn, 1)
    for (unsigned i = 0; i < BytesLeft; i++) {
      Register srcOut = MRI.createVirtualRegister(TRC);
      Register destOut = MRI.createVirtualRegister(TRC);
      Register scratch = MRI.createVirtualRegister(TRC);
      emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
                 IsThumb1, IsThumb2);
      emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
                 IsThumb1, IsThumb2);
      srcIn = srcOut;
      destIn = destOut;
    }
    MI.eraseFromParent(); // The instruction is gone now.
    return BB;
  }

  // Expand the pseudo op to a loop.
  // thisMBB:
  //   ...
  //   movw varEnd, # --> with thumb2
  //   movt varEnd, #
  //   ldrcp varEnd, idx --> without thumb2
  //   fallthrough --> loopMBB
  // loopMBB:
  //   PHI varPhi, varEnd, varLoop
  //   PHI srcPhi, src, srcLoop
  //   PHI destPhi, dst, destLoop
  //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
  //   [destLoop] = STR_POST(scratch, destPhi, UnitSize)
  //   subs varLoop, varPhi, #UnitSize
  //   bne loopMBB
  //   fallthrough --> exitMBB
  // exitMBB:
  //   epilogue to handle left-over bytes
  //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
  //   [destOut] = STRB_POST(scratch, destLoop, 1)
  MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
  MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
  MF->insert(It, loopMBB);
  MF->insert(It, exitMBB);

  // Transfer the remainder of BB and its successor edges to exitMBB.
  exitMBB->splice(exitMBB->begin(), BB,
                  std::next(MachineBasicBlock::iterator(MI)), BB->end());
  exitMBB->transferSuccessorsAndUpdatePHIs(BB);

  // Load an immediate to varEnd.
  Register varEnd = MRI.createVirtualRegister(TRC);
  if (Subtarget->useMovt()) {
    unsigned Vtmp = varEnd;
    if ((LoopSize & 0xFFFF0000) != 0)
      Vtmp = MRI.createVirtualRegister(TRC);
    BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp)
        .addImm(LoopSize & 0xFFFF)
        .add(predOps(ARMCC::AL));

    if ((LoopSize & 0xFFFF0000) != 0)
      BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd)
          .addReg(Vtmp)
          .addImm(LoopSize >> 16)
          .add(predOps(ARMCC::AL));
  } else {
    MachineConstantPool *ConstantPool = MF->getConstantPool();
    Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
    const Constant *C = ConstantInt::get(Int32Ty, LoopSize);

    // MachineConstantPool wants an explicit alignment.
    Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty);
    unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment);
    MachineMemOperand *CPMMO =
        MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
                                 MachineMemOperand::MOLoad, 4, Align(4));

    if (IsThumb)
      BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci))
          .addReg(varEnd, RegState::Define)
          .addConstantPoolIndex(Idx)
          .add(predOps(ARMCC::AL))
          .addMemOperand(CPMMO);
    else
      BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp))
          .addReg(varEnd, RegState::Define)
          .addConstantPoolIndex(Idx)
          .addImm(0)
          .add(predOps(ARMCC::AL))
          .addMemOperand(CPMMO);
  }
  BB->addSuccessor(loopMBB);

  // Generate the loop body:
  //   varPhi = PHI(varLoop, varEnd)
  //   srcPhi = PHI(srcLoop, src)
  //   destPhi = PHI(destLoop, dst)
  MachineBasicBlock *entryBB = BB;
  BB = loopMBB;
  Register varLoop = MRI.createVirtualRegister(TRC);
  Register varPhi = MRI.createVirtualRegister(TRC);
  Register srcLoop = MRI.createVirtualRegister(TRC);
  Register srcPhi = MRI.createVirtualRegister(TRC);
  Register destLoop = MRI.createVirtualRegister(TRC);
  Register destPhi = MRI.createVirtualRegister(TRC);

  BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi)
    .addReg(varLoop).addMBB(loopMBB)
    .addReg(varEnd).addMBB(entryBB);
  BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi)
    .addReg(srcLoop).addMBB(loopMBB)
    .addReg(src).addMBB(entryBB);
  BuildMI(BB, dl, TII->get(ARM::PHI), destPhi)
    .addReg(destLoop).addMBB(loopMBB)
    .addReg(dest).addMBB(entryBB);

  //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
  //   [destLoop] = STR_POST(scratch, destPhi, UnitSiz)
  Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
  emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop,
             IsThumb1, IsThumb2);
  emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop,
             IsThumb1, IsThumb2);

  // Decrement loop variable by UnitSize.
  if (IsThumb1) {
    BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop)
        .add(t1CondCodeOp())
        .addReg(varPhi)
        .addImm(UnitSize)
        .add(predOps(ARMCC::AL));
  } else {
    MachineInstrBuilder MIB =
        BuildMI(*BB, BB->end(), dl,
                TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
    MIB.addReg(varPhi)
        .addImm(UnitSize)
        .add(predOps(ARMCC::AL))
        .add(condCodeOp());
    MIB->getOperand(5).setReg(ARM::CPSR);
    MIB->getOperand(5).setIsDef(true);
  }
  BuildMI(*BB, BB->end(), dl,
          TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
      .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);

  // loopMBB can loop back to loopMBB or fall through to exitMBB.
  BB->addSuccessor(loopMBB);
  BB->addSuccessor(exitMBB);

  // Add epilogue to handle BytesLeft.
  BB = exitMBB;
  auto StartOfExit = exitMBB->begin();

  //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
  //   [destOut] = STRB_POST(scratch, destLoop, 1)
  unsigned srcIn = srcLoop;
  unsigned destIn = destLoop;
  for (unsigned i = 0; i < BytesLeft; i++) {
    Register srcOut = MRI.createVirtualRegister(TRC);
    Register destOut = MRI.createVirtualRegister(TRC);
    Register scratch = MRI.createVirtualRegister(TRC);
    emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
               IsThumb1, IsThumb2);
    emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
               IsThumb1, IsThumb2);
    srcIn = srcOut;
    destIn = destOut;
  }

  MI.eraseFromParent(); // The instruction is gone now.
  return BB;
}

MachineBasicBlock *
ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
                                       MachineBasicBlock *MBB) const {
  const TargetMachine &TM = getTargetMachine();
  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
  DebugLoc DL = MI.getDebugLoc();

  assert(Subtarget->isTargetWindows() &&
         "__chkstk is only supported on Windows");
  assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode");

  // __chkstk takes the number of words to allocate on the stack in R4, and
  // returns the stack adjustment in number of bytes in R4.  This will not
  // clober any other registers (other than the obvious lr).
  //
  // Although, technically, IP should be considered a register which may be
  // clobbered, the call itself will not touch it.  Windows on ARM is a pure
  // thumb-2 environment, so there is no interworking required.  As a result, we
  // do not expect a veneer to be emitted by the linker, clobbering IP.
  //
  // Each module receives its own copy of __chkstk, so no import thunk is
  // required, again, ensuring that IP is not clobbered.
  //
  // Finally, although some linkers may theoretically provide a trampoline for
  // out of range calls (which is quite common due to a 32M range limitation of
  // branches for Thumb), we can generate the long-call version via
  // -mcmodel=large, alleviating the need for the trampoline which may clobber
  // IP.

  switch (TM.getCodeModel()) {
  case CodeModel::Tiny:
    llvm_unreachable("Tiny code model not available on ARM.");
  case CodeModel::Small:
  case CodeModel::Medium:
  case CodeModel::Kernel:
    BuildMI(*MBB, MI, DL, TII.get(ARM::tBL))
        .add(predOps(ARMCC::AL))
        .addExternalSymbol("__chkstk")
        .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
        .addReg(ARM::R4, RegState::Implicit | RegState::Define)
        .addReg(ARM::R12,
                RegState::Implicit | RegState::Define | RegState::Dead)
        .addReg(ARM::CPSR,
                RegState::Implicit | RegState::Define | RegState::Dead);
    break;
  case CodeModel::Large: {
    MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
    Register Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass);

    BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg)
      .addExternalSymbol("__chkstk");
    BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr))
        .add(predOps(ARMCC::AL))
        .addReg(Reg, RegState::Kill)
        .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
        .addReg(ARM::R4, RegState::Implicit | RegState::Define)
        .addReg(ARM::R12,
                RegState::Implicit | RegState::Define | RegState::Dead)
        .addReg(ARM::CPSR,
                RegState::Implicit | RegState::Define | RegState::Dead);
    break;
  }
  }

  BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP)
      .addReg(ARM::SP, RegState::Kill)
      .addReg(ARM::R4, RegState::Kill)
      .setMIFlags(MachineInstr::FrameSetup)
      .add(predOps(ARMCC::AL))
      .add(condCodeOp());

  MI.eraseFromParent();
  return MBB;
}

MachineBasicBlock *
ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI,
                                       MachineBasicBlock *MBB) const {
  DebugLoc DL = MI.getDebugLoc();
  MachineFunction *MF = MBB->getParent();
  const TargetInstrInfo *TII = Subtarget->getInstrInfo();

  MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock();
  MF->insert(++MBB->getIterator(), ContBB);
  ContBB->splice(ContBB->begin(), MBB,
                 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
  ContBB->transferSuccessorsAndUpdatePHIs(MBB);
  MBB->addSuccessor(ContBB);

  MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
  BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0));
  MF->push_back(TrapBB);
  MBB->addSuccessor(TrapBB);

  BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8))
      .addReg(MI.getOperand(0).getReg())
      .addImm(0)
      .add(predOps(ARMCC::AL));
  BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc))
      .addMBB(TrapBB)
      .addImm(ARMCC::EQ)
      .addReg(ARM::CPSR);

  MI.eraseFromParent();
  return ContBB;
}

// The CPSR operand of SelectItr might be missing a kill marker
// because there were multiple uses of CPSR, and ISel didn't know
// which to mark. Figure out whether SelectItr should have had a
// kill marker, and set it if it should. Returns the correct kill
// marker value.
static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr,
                                   MachineBasicBlock* BB,
                                   const TargetRegisterInfo* TRI) {
  // Scan forward through BB for a use/def of CPSR.
  MachineBasicBlock::iterator miI(std::next(SelectItr));
  for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
    const MachineInstr& mi = *miI;
    if (mi.readsRegister(ARM::CPSR))
      return false;
    if (mi.definesRegister(ARM::CPSR))
      break; // Should have kill-flag - update below.
  }

  // If we hit the end of the block, check whether CPSR is live into a
  // successor.
  if (miI == BB->end()) {
    for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
                                          sEnd = BB->succ_end();
         sItr != sEnd; ++sItr) {
      MachineBasicBlock* succ = *sItr;
      if (succ->isLiveIn(ARM::CPSR))
        return false;
    }
  }

  // We found a def, or hit the end of the basic block and CPSR wasn't live
  // out. SelectMI should have a kill flag on CPSR.
  SelectItr->addRegisterKilled(ARM::CPSR, TRI);
  return true;
}

MachineBasicBlock *
ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
                                               MachineBasicBlock *BB) const {
  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
  DebugLoc dl = MI.getDebugLoc();
  bool isThumb2 = Subtarget->isThumb2();
  switch (MI.getOpcode()) {
  default: {
    MI.print(errs());
    llvm_unreachable("Unexpected instr type to insert");
  }

  // Thumb1 post-indexed loads are really just single-register LDMs.
  case ARM::tLDR_postidx: {
    MachineOperand Def(MI.getOperand(1));
    BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD))
        .add(Def)  // Rn_wb
        .add(MI.getOperand(2))  // Rn
        .add(MI.getOperand(3))  // PredImm
        .add(MI.getOperand(4))  // PredReg
        .add(MI.getOperand(0))  // Rt
        .cloneMemRefs(MI);
    MI.eraseFromParent();
    return BB;
  }

  // The Thumb2 pre-indexed stores have the same MI operands, they just
  // define them differently in the .td files from the isel patterns, so
  // they need pseudos.
  case ARM::t2STR_preidx:
    MI.setDesc(TII->get(ARM::t2STR_PRE));
    return BB;
  case ARM::t2STRB_preidx:
    MI.setDesc(TII->get(ARM::t2STRB_PRE));
    return BB;
  case ARM::t2STRH_preidx:
    MI.setDesc(TII->get(ARM::t2STRH_PRE));
    return BB;

  case ARM::STRi_preidx:
  case ARM::STRBi_preidx: {
    unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
                                                         : ARM::STRB_PRE_IMM;
    // Decode the offset.
    unsigned Offset = MI.getOperand(4).getImm();
    bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub;
    Offset = ARM_AM::getAM2Offset(Offset);
    if (isSub)
      Offset = -Offset;

    MachineMemOperand *MMO = *MI.memoperands_begin();
    BuildMI(*BB, MI, dl, TII->get(NewOpc))
        .add(MI.getOperand(0)) // Rn_wb
        .add(MI.getOperand(1)) // Rt
        .add(MI.getOperand(2)) // Rn
        .addImm(Offset)        // offset (skip GPR==zero_reg)
        .add(MI.getOperand(5)) // pred
        .add(MI.getOperand(6))
        .addMemOperand(MMO);
    MI.eraseFromParent();
    return BB;
  }
  case ARM::STRr_preidx:
  case ARM::STRBr_preidx:
  case ARM::STRH_preidx: {
    unsigned NewOpc;
    switch (MI.getOpcode()) {
    default: llvm_unreachable("unexpected opcode!");
    case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break;
    case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break;
    case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break;
    }
    MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc));
    for (unsigned i = 0; i < MI.getNumOperands(); ++i)
      MIB.add(MI.getOperand(i));
    MI.eraseFromParent();
    return BB;
  }

  case ARM::tMOVCCr_pseudo: {
    // To "insert" a SELECT_CC instruction, we actually have to insert the
    // diamond control-flow pattern.  The incoming instruction knows the
    // destination vreg to set, the condition code register to branch on, the
    // true/false values to select between, and a branch opcode to use.
    const BasicBlock *LLVM_BB = BB->getBasicBlock();
    MachineFunction::iterator It = ++BB->getIterator();

    //  thisMBB:
    //  ...
    //   TrueVal = ...
    //   cmpTY ccX, r1, r2
    //   bCC copy1MBB
    //   fallthrough --> copy0MBB
    MachineBasicBlock *thisMBB  = BB;
    MachineFunction *F = BB->getParent();
    MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
    MachineBasicBlock *sinkMBB  = F->CreateMachineBasicBlock(LLVM_BB);
    F->insert(It, copy0MBB);
    F->insert(It, sinkMBB);

    // Check whether CPSR is live past the tMOVCCr_pseudo.
    const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
    if (!MI.killsRegister(ARM::CPSR) &&
        !checkAndUpdateCPSRKill(MI, thisMBB, TRI)) {
      copy0MBB->addLiveIn(ARM::CPSR);
      sinkMBB->addLiveIn(ARM::CPSR);
    }

    // Transfer the remainder of BB and its successor edges to sinkMBB.
    sinkMBB->splice(sinkMBB->begin(), BB,
                    std::next(MachineBasicBlock::iterator(MI)), BB->end());
    sinkMBB->transferSuccessorsAndUpdatePHIs(BB);

    BB->addSuccessor(copy0MBB);
    BB->addSuccessor(sinkMBB);

    BuildMI(BB, dl, TII->get(ARM::tBcc))
        .addMBB(sinkMBB)
        .addImm(MI.getOperand(3).getImm())
        .addReg(MI.getOperand(4).getReg());

    //  copy0MBB:
    //   %FalseValue = ...
    //   # fallthrough to sinkMBB
    BB = copy0MBB;

    // Update machine-CFG edges
    BB->addSuccessor(sinkMBB);

    //  sinkMBB:
    //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
    //  ...
    BB = sinkMBB;
    BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg())
        .addReg(MI.getOperand(1).getReg())
        .addMBB(copy0MBB)
        .addReg(MI.getOperand(2).getReg())
        .addMBB(thisMBB);

    MI.eraseFromParent(); // The pseudo instruction is gone now.
    return BB;
  }

  case ARM::BCCi64:
  case ARM::BCCZi64: {
    // If there is an unconditional branch to the other successor, remove it.
    BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end());

    // Compare both parts that make up the double comparison separately for
    // equality.
    bool RHSisZero = MI.getOpcode() == ARM::BCCZi64;

    Register LHS1 = MI.getOperand(1).getReg();
    Register LHS2 = MI.getOperand(2).getReg();
    if (RHSisZero) {
      BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
          .addReg(LHS1)
          .addImm(0)
          .add(predOps(ARMCC::AL));
      BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
        .addReg(LHS2).addImm(0)
        .addImm(ARMCC::EQ).addReg(ARM::CPSR);
    } else {
      Register RHS1 = MI.getOperand(3).getReg();
      Register RHS2 = MI.getOperand(4).getReg();
      BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
          .addReg(LHS1)
          .addReg(RHS1)
          .add(predOps(ARMCC::AL));
      BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
        .addReg(LHS2).addReg(RHS2)
        .addImm(ARMCC::EQ).addReg(ARM::CPSR);
    }

    MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB();
    MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
    if (MI.getOperand(0).getImm() == ARMCC::NE)
      std::swap(destMBB, exitMBB);

    BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
      .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
    if (isThumb2)
      BuildMI(BB, dl, TII->get(ARM::t2B))
          .addMBB(exitMBB)
          .add(predOps(ARMCC::AL));
    else
      BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB);

    MI.eraseFromParent(); // The pseudo instruction is gone now.
    return BB;
  }

  case ARM::Int_eh_sjlj_setjmp:
  case ARM::Int_eh_sjlj_setjmp_nofp:
  case ARM::tInt_eh_sjlj_setjmp:
  case ARM::t2Int_eh_sjlj_setjmp:
  case ARM::t2Int_eh_sjlj_setjmp_nofp:
    return BB;

  case ARM::Int_eh_sjlj_setup_dispatch:
    EmitSjLjDispatchBlock(MI, BB);
    return BB;

  case ARM::ABS:
  case ARM::t2ABS: {
    // To insert an ABS instruction, we have to insert the
    // diamond control-flow pattern.  The incoming instruction knows the
    // source vreg to test against 0, the destination vreg to set,
    // the condition code register to branch on, the
    // true/false values to select between, and a branch opcode to use.
    // It transforms
    //     V1 = ABS V0
    // into
    //     V2 = MOVS V0
    //     BCC                      (branch to SinkBB if V0 >= 0)
    //     RSBBB: V3 = RSBri V2, 0  (compute ABS if V2 < 0)
    //     SinkBB: V1 = PHI(V2, V3)
    const BasicBlock *LLVM_BB = BB->getBasicBlock();
    MachineFunction::iterator BBI = ++BB->getIterator();
    MachineFunction *Fn = BB->getParent();
    MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB);
    MachineBasicBlock *SinkBB  = Fn->CreateMachineBasicBlock(LLVM_BB);
    Fn->insert(BBI, RSBBB);
    Fn->insert(BBI, SinkBB);

    Register ABSSrcReg = MI.getOperand(1).getReg();
    Register ABSDstReg = MI.getOperand(0).getReg();
    bool ABSSrcKIll = MI.getOperand(1).isKill();
    bool isThumb2 = Subtarget->isThumb2();
    MachineRegisterInfo &MRI = Fn->getRegInfo();
    // In Thumb mode S must not be specified if source register is the SP or
    // PC and if destination register is the SP, so restrict register class
    Register NewRsbDstReg = MRI.createVirtualRegister(
        isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);

    // Transfer the remainder of BB and its successor edges to sinkMBB.
    SinkBB->splice(SinkBB->begin(), BB,
                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
    SinkBB->transferSuccessorsAndUpdatePHIs(BB);

    BB->addSuccessor(RSBBB);
    BB->addSuccessor(SinkBB);

    // fall through to SinkMBB
    RSBBB->addSuccessor(SinkBB);

    // insert a cmp at the end of BB
    BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
        .addReg(ABSSrcReg)
        .addImm(0)
        .add(predOps(ARMCC::AL));

    // insert a bcc with opposite CC to ARMCC::MI at the end of BB
    BuildMI(BB, dl,
      TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
      .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR);

    // insert rsbri in RSBBB
    // Note: BCC and rsbri will be converted into predicated rsbmi
    // by if-conversion pass
    BuildMI(*RSBBB, RSBBB->begin(), dl,
            TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
        .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0)
        .addImm(0)
        .add(predOps(ARMCC::AL))
        .add(condCodeOp());

    // insert PHI in SinkBB,
    // reuse ABSDstReg to not change uses of ABS instruction
    BuildMI(*SinkBB, SinkBB->begin(), dl,
      TII->get(ARM::PHI), ABSDstReg)
      .addReg(NewRsbDstReg).addMBB(RSBBB)
      .addReg(ABSSrcReg).addMBB(BB);

    // remove ABS instruction
    MI.eraseFromParent();

    // return last added BB
    return SinkBB;
  }
  case ARM::COPY_STRUCT_BYVAL_I32:
    ++NumLoopByVals;
    return EmitStructByval(MI, BB);
  case ARM::WIN__CHKSTK:
    return EmitLowered__chkstk(MI, BB);
  case ARM::WIN__DBZCHK:
    return EmitLowered__dbzchk(MI, BB);
  }
}

/// Attaches vregs to MEMCPY that it will use as scratch registers
/// when it is expanded into LDM/STM. This is done as a post-isel lowering
/// instead of as a custom inserter because we need the use list from the SDNode.
static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
                                    MachineInstr &MI, const SDNode *Node) {
  bool isThumb1 = Subtarget->isThumb1Only();

  DebugLoc DL = MI.getDebugLoc();
  MachineFunction *MF = MI.getParent()->getParent();
  MachineRegisterInfo &MRI = MF->getRegInfo();
  MachineInstrBuilder MIB(*MF, MI);

  // If the new dst/src is unused mark it as dead.
  if (!Node->hasAnyUseOfValue(0)) {
    MI.getOperand(0).setIsDead(true);
  }
  if (!Node->hasAnyUseOfValue(1)) {
    MI.getOperand(1).setIsDead(true);
  }

  // The MEMCPY both defines and kills the scratch registers.
  for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) {
    Register TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass
                                                         : &ARM::GPRRegClass);
    MIB.addReg(TmpReg, RegState::Define|RegState::Dead);
  }
}

void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
                                                      SDNode *Node) const {
  if (MI.getOpcode() == ARM::MEMCPY) {
    attachMEMCPYScratchRegs(Subtarget, MI, Node);
    return;
  }

  const MCInstrDesc *MCID = &MI.getDesc();
  // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
  // RSC. Coming out of isel, they have an implicit CPSR def, but the optional
  // operand is still set to noreg. If needed, set the optional operand's
  // register to CPSR, and remove the redundant implicit def.
  //
  // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR).

  // Rename pseudo opcodes.
  unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
  unsigned ccOutIdx;
  if (NewOpc) {
    const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo();
    MCID = &TII->get(NewOpc);

    assert(MCID->getNumOperands() ==
           MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize()
        && "converted opcode should be the same except for cc_out"
           " (and, on Thumb1, pred)");

    MI.setDesc(*MCID);

    // Add the optional cc_out operand
    MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));

    // On Thumb1, move all input operands to the end, then add the predicate
    if (Subtarget->isThumb1Only()) {
      for (unsigned c = MCID->getNumOperands() - 4; c--;) {
        MI.addOperand(MI.getOperand(1));
        MI.RemoveOperand(1);
      }

      // Restore the ties
      for (unsigned i = MI.getNumOperands(); i--;) {
        const MachineOperand& op = MI.getOperand(i);
        if (op.isReg() && op.isUse()) {
          int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO);
          if (DefIdx != -1)
            MI.tieOperands(DefIdx, i);
        }
      }

      MI.addOperand(MachineOperand::CreateImm(ARMCC::AL));
      MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false));
      ccOutIdx = 1;
    } else
      ccOutIdx = MCID->getNumOperands() - 1;
  } else
    ccOutIdx = MCID->getNumOperands() - 1;

  // Any ARM instruction that sets the 's' bit should specify an optional
  // "cc_out" operand in the last operand position.
  if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
    assert(!NewOpc && "Optional cc_out operand required");
    return;
  }
  // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it
  // since we already have an optional CPSR def.
  bool definesCPSR = false;
  bool deadCPSR = false;
  for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e;
       ++i) {
    const MachineOperand &MO = MI.getOperand(i);
    if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
      definesCPSR = true;
      if (MO.isDead())
        deadCPSR = true;
      MI.RemoveOperand(i);
      break;
    }
  }
  if (!definesCPSR) {
    assert(!NewOpc && "Optional cc_out operand required");
    return;
  }
  assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag");
  if (deadCPSR) {
    assert(!MI.getOperand(ccOutIdx).getReg() &&
           "expect uninitialized optional cc_out operand");
    // Thumb1 instructions must have the S bit even if the CPSR is dead.
    if (!Subtarget->isThumb1Only())
      return;
  }

  // If this instruction was defined with an optional CPSR def and its dag node
  // had a live implicit CPSR def, then activate the optional CPSR def.
  MachineOperand &MO = MI.getOperand(ccOutIdx);
  MO.setReg(ARM::CPSR);
  MO.setIsDef(true);
}

//===----------------------------------------------------------------------===//
//                           ARM Optimization Hooks
//===----------------------------------------------------------------------===//

// Helper function that checks if N is a null or all ones constant.
static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {
  return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
}

// Return true if N is conditionally 0 or all ones.
// Detects these expressions where cc is an i1 value:
//
//   (select cc 0, y)   [AllOnes=0]
//   (select cc y, 0)   [AllOnes=0]
//   (zext cc)          [AllOnes=0]
//   (sext cc)          [AllOnes=0/1]
//   (select cc -1, y)  [AllOnes=1]
//   (select cc y, -1)  [AllOnes=1]
//
// Invert is set when N is the null/all ones constant when CC is false.
// OtherOp is set to the alternative value of N.
static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
                                       SDValue &CC, bool &Invert,
                                       SDValue &OtherOp,
                                       SelectionDAG &DAG) {
  switch (N->getOpcode()) {
  default: return false;
  case ISD::SELECT: {
    CC = N->getOperand(0);
    SDValue N1 = N->getOperand(1);
    SDValue N2 = N->getOperand(2);
    if (isZeroOrAllOnes(N1, AllOnes)) {
      Invert = false;
      OtherOp = N2;
      return true;
    }
    if (isZeroOrAllOnes(N2, AllOnes)) {
      Invert = true;
      OtherOp = N1;
      return true;
    }
    return false;
  }
  case ISD::ZERO_EXTEND:
    // (zext cc) can never be the all ones value.
    if (AllOnes)
      return false;
    LLVM_FALLTHROUGH;
  case ISD::SIGN_EXTEND: {
    SDLoc dl(N);
    EVT VT = N->getValueType(0);
    CC = N->getOperand(0);
    if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC)
      return false;
    Invert = !AllOnes;
    if (AllOnes)
      // When looking for an AllOnes constant, N is an sext, and the 'other'
      // value is 0.
      OtherOp = DAG.getConstant(0, dl, VT);
    else if (N->getOpcode() == ISD::ZERO_EXTEND)
      // When looking for a 0 constant, N can be zext or sext.
      OtherOp = DAG.getConstant(1, dl, VT);
    else
      OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl,
                                VT);
    return true;
  }
  }
}

// Combine a constant select operand into its use:
//
//   (add (select cc, 0, c), x)  -> (select cc, x, (add, x, c))
//   (sub x, (select cc, 0, c))  -> (select cc, x, (sub, x, c))
//   (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))  [AllOnes=1]
//   (or  (select cc, 0, c), x)  -> (select cc, x, (or, x, c))
//   (xor (select cc, 0, c), x)  -> (select cc, x, (xor, x, c))
//
// The transform is rejected if the select doesn't have a constant operand that
// is null, or all ones when AllOnes is set.
//
// Also recognize sext/zext from i1:
//
//   (add (zext cc), x) -> (select cc (add x, 1), x)
//   (add (sext cc), x) -> (select cc (add x, -1), x)
//
// These transformations eventually create predicated instructions.
//
// @param N       The node to transform.
// @param Slct    The N operand that is a select.
// @param OtherOp The other N operand (x above).
// @param DCI     Context.
// @param AllOnes Require the select constant to be all ones instead of null.
// @returns The new node, or SDValue() on failure.
static
SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
                            TargetLowering::DAGCombinerInfo &DCI,
                            bool AllOnes = false) {
  SelectionDAG &DAG = DCI.DAG;
  EVT VT = N->getValueType(0);
  SDValue NonConstantVal;
  SDValue CCOp;
  bool SwapSelectOps;
  if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps,
                                  NonConstantVal, DAG))
    return SDValue();

  // Slct is now know to be the desired identity constant when CC is true.
  SDValue TrueVal = OtherOp;
  SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
                                 OtherOp, NonConstantVal);
  // Unless SwapSelectOps says CC should be false.
  if (SwapSelectOps)
    std::swap(TrueVal, FalseVal);

  return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
                     CCOp, TrueVal, FalseVal);
}

// Attempt combineSelectAndUse on each operand of a commutative operator N.
static
SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
                                       TargetLowering::DAGCombinerInfo &DCI) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  if (N0.getNode()->hasOneUse())
    if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes))
      return Result;
  if (N1.getNode()->hasOneUse())
    if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes))
      return Result;
  return SDValue();
}

static bool IsVUZPShuffleNode(SDNode *N) {
  // VUZP shuffle node.
  if (N->getOpcode() == ARMISD::VUZP)
    return true;

  // "VUZP" on i32 is an alias for VTRN.
  if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32)
    return true;

  return false;
}

static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const ARMSubtarget *Subtarget) {
  // Look for ADD(VUZP.0, VUZP.1).
  if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() ||
      N0 == N1)
   return SDValue();

  // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD.
  if (!N->getValueType(0).is64BitVector())
    return SDValue();

  // Generate vpadd.
  SelectionDAG &DAG = DCI.DAG;
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  SDLoc dl(N);
  SDNode *Unzip = N0.getNode();
  EVT VT = N->getValueType(0);

  SmallVector<SDValue, 8> Ops;
  Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl,
                                TLI.getPointerTy(DAG.getDataLayout())));
  Ops.push_back(Unzip->getOperand(0));
  Ops.push_back(Unzip->getOperand(1));

  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
}

static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const ARMSubtarget *Subtarget) {
  // Check for two extended operands.
  if (!(N0.getOpcode() == ISD::SIGN_EXTEND &&
        N1.getOpcode() == ISD::SIGN_EXTEND) &&
      !(N0.getOpcode() == ISD::ZERO_EXTEND &&
        N1.getOpcode() == ISD::ZERO_EXTEND))
    return SDValue();

  SDValue N00 = N0.getOperand(0);
  SDValue N10 = N1.getOperand(0);

  // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1))
  if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() ||
      N00 == N10)
    return SDValue();

  // We only recognize Q register paddl here; this can't be reached until
  // after type legalization.
  if (!N00.getValueType().is64BitVector() ||
      !N0.getValueType().is128BitVector())
    return SDValue();

  // Generate vpaddl.
  SelectionDAG &DAG = DCI.DAG;
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  SDLoc dl(N);
  EVT VT = N->getValueType(0);

  SmallVector<SDValue, 8> Ops;
  // Form vpaddl.sN or vpaddl.uN depending on the kind of extension.
  unsigned Opcode;
  if (N0.getOpcode() == ISD::SIGN_EXTEND)
    Opcode = Intrinsic::arm_neon_vpaddls;
  else
    Opcode = Intrinsic::arm_neon_vpaddlu;
  Ops.push_back(DAG.getConstant(Opcode, dl,
                                TLI.getPointerTy(DAG.getDataLayout())));
  EVT ElemTy = N00.getValueType().getVectorElementType();
  unsigned NumElts = VT.getVectorNumElements();
  EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2);
  SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT,
                               N00.getOperand(0), N00.getOperand(1));
  Ops.push_back(Concat);

  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
}

// FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in
// an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is
// much easier to match.
static SDValue
AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1,
                               TargetLowering::DAGCombinerInfo &DCI,
                               const ARMSubtarget *Subtarget) {
  // Only perform optimization if after legalize, and if NEON is available. We
  // also expected both operands to be BUILD_VECTORs.
  if (DCI.isBeforeLegalize() || !Subtarget->hasNEON()
      || N0.getOpcode() != ISD::BUILD_VECTOR
      || N1.getOpcode() != ISD::BUILD_VECTOR)
    return SDValue();

  // Check output type since VPADDL operand elements can only be 8, 16, or 32.
  EVT VT = N->getValueType(0);
  if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64)
    return SDValue();

  // Check that the vector operands are of the right form.
  // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR
  // operands, where N is the size of the formed vector.
  // Each EXTRACT_VECTOR should have the same input vector and odd or even
  // index such that we have a pair wise add pattern.

  // Grab the vector that all EXTRACT_VECTOR nodes should be referencing.
  if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
    return SDValue();
  SDValue Vec = N0->getOperand(0)->getOperand(0);
  SDNode *V = Vec.getNode();
  unsigned nextIndex = 0;

  // For each operands to the ADD which are BUILD_VECTORs,
  // check to see if each of their operands are an EXTRACT_VECTOR with
  // the same vector and appropriate index.
  for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) {
    if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
        && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {

      SDValue ExtVec0 = N0->getOperand(i);
      SDValue ExtVec1 = N1->getOperand(i);

      // First operand is the vector, verify its the same.
      if (V != ExtVec0->getOperand(0).getNode() ||
          V != ExtVec1->getOperand(0).getNode())
        return SDValue();

      // Second is the constant, verify its correct.
      ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1));
      ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1));

      // For the constant, we want to see all the even or all the odd.
      if (!C0 || !C1 || C0->getZExtValue() != nextIndex
          || C1->getZExtValue() != nextIndex+1)
        return SDValue();

      // Increment index.
      nextIndex+=2;
    } else
      return SDValue();
  }

  // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure
  // we're using the entire input vector, otherwise there's a size/legality
  // mismatch somewhere.
  if (nextIndex != Vec.getValueType().getVectorNumElements() ||
      Vec.getValueType().getVectorElementType() == VT.getVectorElementType())
    return SDValue();

  // Create VPADDL node.
  SelectionDAG &DAG = DCI.DAG;
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  SDLoc dl(N);

  // Build operand list.
  SmallVector<SDValue, 8> Ops;
  Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl,
                                TLI.getPointerTy(DAG.getDataLayout())));

  // Input is the vector.
  Ops.push_back(Vec);

  // Get widened type and narrowed type.
  MVT widenType;
  unsigned numElem = VT.getVectorNumElements();

  EVT inputLaneType = Vec.getValueType().getVectorElementType();
  switch (inputLaneType.getSimpleVT().SimpleTy) {
    case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
    case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
    case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
    default:
      llvm_unreachable("Invalid vector element type for padd optimization.");
  }

  SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops);
  unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE;
  return DAG.getNode(ExtOp, dl, VT, tmp);
}

static SDValue findMUL_LOHI(SDValue V) {
  if (V->getOpcode() == ISD::UMUL_LOHI ||
      V->getOpcode() == ISD::SMUL_LOHI)
    return V;
  return SDValue();
}

static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode,
                                        TargetLowering::DAGCombinerInfo &DCI,
                                        const ARMSubtarget *Subtarget) {
  if (!Subtarget->hasBaseDSP())
    return SDValue();

  // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and
  // accumulates the product into a 64-bit value. The 16-bit values will
  // be sign extended somehow or SRA'd into 32-bit values
  // (addc (adde (mul 16bit, 16bit), lo), hi)
  SDValue Mul = AddcNode->getOperand(0);
  SDValue Lo = AddcNode->getOperand(1);
  if (Mul.getOpcode() != ISD::MUL) {
    Lo = AddcNode->getOperand(0);
    Mul = AddcNode->getOperand(1);
    if (Mul.getOpcode() != ISD::MUL)
      return SDValue();
  }

  SDValue SRA = AddeNode->getOperand(0);
  SDValue Hi = AddeNode->getOperand(1);
  if (SRA.getOpcode() != ISD::SRA) {
    SRA = AddeNode->getOperand(1);
    Hi = AddeNode->getOperand(0);
    if (SRA.getOpcode() != ISD::SRA)
      return SDValue();
  }
  if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) {
    if (Const->getZExtValue() != 31)
      return SDValue();
  } else
    return SDValue();

  if (SRA.getOperand(0) != Mul)
    return SDValue();

  SelectionDAG &DAG = DCI.DAG;
  SDLoc dl(AddcNode);
  unsigned Opcode = 0;
  SDValue Op0;
  SDValue Op1;

  if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) {
    Opcode = ARMISD::SMLALBB;
    Op0 = Mul.getOperand(0);
    Op1 = Mul.getOperand(1);
  } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) {
    Opcode = ARMISD::SMLALBT;
    Op0 = Mul.getOperand(0);
    Op1 = Mul.getOperand(1).getOperand(0);
  } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) {
    Opcode = ARMISD::SMLALTB;
    Op0 = Mul.getOperand(0).getOperand(0);
    Op1 = Mul.getOperand(1);
  } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) {
    Opcode = ARMISD::SMLALTT;
    Op0 = Mul->getOperand(0).getOperand(0);
    Op1 = Mul->getOperand(1).getOperand(0);
  }

  if (!Op0 || !Op1)
    return SDValue();

  SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
                              Op0, Op1, Lo, Hi);
  // Replace the ADDs' nodes uses by the MLA node's values.
  SDValue HiMLALResult(SMLAL.getNode(), 1);
  SDValue LoMLALResult(SMLAL.getNode(), 0);

  DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult);
  DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult);

  // Return original node to notify the driver to stop replacing.
  SDValue resNode(AddcNode, 0);
  return resNode;
}

static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const ARMSubtarget *Subtarget) {
  // Look for multiply add opportunities.
  // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where
  // each add nodes consumes a value from ISD::UMUL_LOHI and there is
  // a glue link from the first add to the second add.
  // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by
  // a S/UMLAL instruction.
  //                  UMUL_LOHI
  //                 / :lo    \ :hi
  //                V          \          [no multiline comment]
  //    loAdd ->  ADDC         |
  //                 \ :carry /
  //                  V      V
  //                    ADDE   <- hiAdd
  //
  // In the special case where only the higher part of a signed result is used
  // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts
  // a constant with the exact value of 0x80000000, we recognize we are dealing
  // with a "rounded multiply and add" (or subtract) and transform it into
  // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively.

  assert((AddeSubeNode->getOpcode() == ARMISD::ADDE ||
          AddeSubeNode->getOpcode() == ARMISD::SUBE) &&
         "Expect an ADDE or SUBE");

  assert(AddeSubeNode->getNumOperands() == 3 &&
         AddeSubeNode->getOperand(2).getValueType() == MVT::i32 &&
         "ADDE node has the wrong inputs");

  // Check that we are chained to the right ADDC or SUBC node.
  SDNode *AddcSubcNode = AddeSubeNode->getOperand(2).getNode();
  if ((AddeSubeNode->getOpcode() == ARMISD::ADDE &&
       AddcSubcNode->getOpcode() != ARMISD::ADDC) ||
      (AddeSubeNode->getOpcode() == ARMISD::SUBE &&
       AddcSubcNode->getOpcode() != ARMISD::SUBC))
    return SDValue();

  SDValue AddcSubcOp0 = AddcSubcNode->getOperand(0);
  SDValue AddcSubcOp1 = AddcSubcNode->getOperand(1);

  // Check if the two operands are from the same mul_lohi node.
  if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode())
    return SDValue();

  assert(AddcSubcNode->getNumValues() == 2 &&
         AddcSubcNode->getValueType(0) == MVT::i32 &&
         "Expect ADDC with two result values. First: i32");

  // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it
  // maybe a SMLAL which multiplies two 16-bit values.
  if (AddeSubeNode->getOpcode() == ARMISD::ADDE &&
      AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI &&
      AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI &&
      AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI &&
      AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI)
    return AddCombineTo64BitSMLAL16(AddcSubcNode, AddeSubeNode, DCI, Subtarget);

  // Check for the triangle shape.
  SDValue AddeSubeOp0 = AddeSubeNode->getOperand(0);
  SDValue AddeSubeOp1 = AddeSubeNode->getOperand(1);

  // Make sure that the ADDE/SUBE operands are not coming from the same node.
  if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode())
    return SDValue();

  // Find the MUL_LOHI node walking up ADDE/SUBE's operands.
  bool IsLeftOperandMUL = false;
  SDValue MULOp = findMUL_LOHI(AddeSubeOp0);
  if (MULOp == SDValue())
    MULOp = findMUL_LOHI(AddeSubeOp1);
  else
    IsLeftOperandMUL = true;
  if (MULOp == SDValue())
    return SDValue();

  // Figure out the right opcode.
  unsigned Opc = MULOp->getOpcode();
  unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL;

  // Figure out the high and low input values to the MLAL node.
  SDValue *HiAddSub = nullptr;
  SDValue *LoMul = nullptr;
  SDValue *LowAddSub = nullptr;

  // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI.
  if ((AddeSubeOp0 != MULOp.getValue(1)) && (AddeSubeOp1 != MULOp.getValue(1)))
    return SDValue();

  if (IsLeftOperandMUL)
    HiAddSub = &AddeSubeOp1;
  else
    HiAddSub = &AddeSubeOp0;

  // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node
  // whose low result is fed to the ADDC/SUBC we are checking.

  if (AddcSubcOp0 == MULOp.getValue(0)) {
    LoMul = &AddcSubcOp0;
    LowAddSub = &AddcSubcOp1;
  }
  if (AddcSubcOp1 == MULOp.getValue(0)) {
    LoMul = &AddcSubcOp1;
    LowAddSub = &AddcSubcOp0;
  }

  if (!LoMul)
    return SDValue();

  // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC
  // the replacement below will create a cycle.
  if (AddcSubcNode == HiAddSub->getNode() ||
      AddcSubcNode->isPredecessorOf(HiAddSub->getNode()))
    return SDValue();

  // Create the merged node.
  SelectionDAG &DAG = DCI.DAG;

  // Start building operand list.
  SmallVector<SDValue, 8> Ops;
  Ops.push_back(LoMul->getOperand(0));
  Ops.push_back(LoMul->getOperand(1));

  // Check whether we can use SMMLAR, SMMLSR or SMMULR instead.  For this to be
  // the case, we must be doing signed multiplication and only use the higher
  // part of the result of the MLAL, furthermore the LowAddSub must be a constant
  // addition or subtraction with the value of 0x800000.
  if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() &&
      FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(1) &&
      LowAddSub->getNode()->getOpcode() == ISD::Constant &&
      static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() ==
          0x80000000) {
    Ops.push_back(*HiAddSub);
    if (AddcSubcNode->getOpcode() == ARMISD::SUBC) {
      FinalOpc = ARMISD::SMMLSR;
    } else {
      FinalOpc = ARMISD::SMMLAR;
    }
    SDValue NewNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), MVT::i32, Ops);
    DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), NewNode);

    return SDValue(AddeSubeNode, 0);
  } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC)
    // SMMLS is generated during instruction selection and the rest of this
    // function can not handle the case where AddcSubcNode is a SUBC.
    return SDValue();

  // Finish building the operand list for {U/S}MLAL
  Ops.push_back(*LowAddSub);
  Ops.push_back(*HiAddSub);

  SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode),
                                 DAG.getVTList(MVT::i32, MVT::i32), Ops);

  // Replace the ADDs' nodes uses by the MLA node's values.
  SDValue HiMLALResult(MLALNode.getNode(), 1);
  DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), HiMLALResult);

  SDValue LoMLALResult(MLALNode.getNode(), 0);
  DAG.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode, 0), LoMLALResult);

  // Return original node to notify the driver to stop replacing.
  return SDValue(AddeSubeNode, 0);
}

static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const ARMSubtarget *Subtarget) {
  // UMAAL is similar to UMLAL except that it adds two unsigned values.
  // While trying to combine for the other MLAL nodes, first search for the
  // chance to use UMAAL. Check if Addc uses a node which has already
  // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde
  // as the addend, and it's handled in PerformUMLALCombine.

  if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
    return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);

  // Check that we have a glued ADDC node.
  SDNode* AddcNode = AddeNode->getOperand(2).getNode();
  if (AddcNode->getOpcode() != ARMISD::ADDC)
    return SDValue();

  // Find the converted UMAAL or quit if it doesn't exist.
  SDNode *UmlalNode = nullptr;
  SDValue AddHi;
  if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) {
    UmlalNode = AddcNode->getOperand(0).getNode();
    AddHi = AddcNode->getOperand(1);
  } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) {
    UmlalNode = AddcNode->getOperand(1).getNode();
    AddHi = AddcNode->getOperand(0);
  } else {
    return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
  }

  // The ADDC should be glued to an ADDE node, which uses the same UMLAL as
  // the ADDC as well as Zero.
  if (!isNullConstant(UmlalNode->getOperand(3)))
    return SDValue();

  if ((isNullConstant(AddeNode->getOperand(0)) &&
       AddeNode->getOperand(1).getNode() == UmlalNode) ||
      (AddeNode->getOperand(0).getNode() == UmlalNode &&
       isNullConstant(AddeNode->getOperand(1)))) {
    SelectionDAG &DAG = DCI.DAG;
    SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1),
                      UmlalNode->getOperand(2), AddHi };
    SDValue UMAAL =  DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode),
                                 DAG.getVTList(MVT::i32, MVT::i32), Ops);

    // Replace the ADDs' nodes uses by the UMAAL node's values.
    DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1));
    DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0));

    // Return original node to notify the driver to stop replacing.
    return SDValue(AddeNode, 0);
  }
  return SDValue();
}

static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG,
                                   const ARMSubtarget *Subtarget) {
  if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
    return SDValue();

  // Check that we have a pair of ADDC and ADDE as operands.
  // Both addends of the ADDE must be zero.
  SDNode* AddcNode = N->getOperand(2).getNode();
  SDNode* AddeNode = N->getOperand(3).getNode();
  if ((AddcNode->getOpcode() == ARMISD::ADDC) &&
      (AddeNode->getOpcode() == ARMISD::ADDE) &&
      isNullConstant(AddeNode->getOperand(0)) &&
      isNullConstant(AddeNode->getOperand(1)) &&
      (AddeNode->getOperand(2).getNode() == AddcNode))
    return DAG.getNode(ARMISD::UMAAL, SDLoc(N),
                       DAG.getVTList(MVT::i32, MVT::i32),
                       {N->getOperand(0), N->getOperand(1),
                        AddcNode->getOperand(0), AddcNode->getOperand(1)});
  else
    return SDValue();
}

static SDValue PerformAddcSubcCombine(SDNode *N,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const ARMSubtarget *Subtarget) {
  SelectionDAG &DAG(DCI.DAG);

  if (N->getOpcode() == ARMISD::SUBC) {
    // (SUBC (ADDE 0, 0, C), 1) -> C
    SDValue LHS = N->getOperand(0);
    SDValue RHS = N->getOperand(1);
    if (LHS->getOpcode() == ARMISD::ADDE &&
        isNullConstant(LHS->getOperand(0)) &&
        isNullConstant(LHS->getOperand(1)) && isOneConstant(RHS)) {
      return DCI.CombineTo(N, SDValue(N, 0), LHS->getOperand(2));
    }
  }

  if (Subtarget->isThumb1Only()) {
    SDValue RHS = N->getOperand(1);
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
      int32_t imm = C->getSExtValue();
      if (imm < 0 && imm > std::numeric_limits<int>::min()) {
        SDLoc DL(N);
        RHS = DAG.getConstant(-imm, DL, MVT::i32);
        unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC
                                                           : ARMISD::ADDC;
        return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS);
      }
    }
  }

  return SDValue();
}

static SDValue PerformAddeSubeCombine(SDNode *N,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const ARMSubtarget *Subtarget) {
  if (Subtarget->isThumb1Only()) {
    SelectionDAG &DAG = DCI.DAG;
    SDValue RHS = N->getOperand(1);
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
      int64_t imm = C->getSExtValue();
      if (imm < 0) {
        SDLoc DL(N);

        // The with-carry-in form matches bitwise not instead of the negation.
        // Effectively, the inverse interpretation of the carry flag already
        // accounts for part of the negation.
        RHS = DAG.getConstant(~imm, DL, MVT::i32);

        unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE
                                                           : ARMISD::ADDE;
        return DAG.getNode(Opcode, DL, N->getVTList(),
                           N->getOperand(0), RHS, N->getOperand(2));
      }
    }
  } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) {
    return AddCombineTo64bitMLAL(N, DCI, Subtarget);
  }
  return SDValue();
}

static SDValue PerformVSELECTCombine(SDNode *N,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const ARMSubtarget *Subtarget) {
  // Transforms vselect(not(cond), lhs, rhs) into vselect(cond, rhs, lhs).
  //
  // We need to re-implement this optimization here as the implementation in the
  // Target-Independent DAGCombiner does not handle the kind of constant we make
  // (it calls isConstOrConstSplat with AllowTruncation set to false - and for
  // good reason, allowing truncation there would break other targets).
  //
  // Currently, this is only done for MVE, as it's the only target that benefits
  // from this transformation (e.g. VPNOT+VPSEL becomes a single VPSEL).
  if (!Subtarget->hasMVEIntegerOps())
    return SDValue();

  if (N->getOperand(0).getOpcode() != ISD::XOR)
    return SDValue();
  SDValue XOR = N->getOperand(0);

  // Check if the XOR's RHS is either a 1, or a BUILD_VECTOR of 1s.
  // It is important to check with truncation allowed as the BUILD_VECTORs we
  // generate in those situations will truncate their operands.
  ConstantSDNode *Const =
      isConstOrConstSplat(XOR->getOperand(1), /*AllowUndefs*/ false,
                          /*AllowTruncation*/ true);
  if (!Const || !Const->isOne())
    return SDValue();

  // Rewrite into vselect(cond, rhs, lhs).
  SDValue Cond = XOR->getOperand(0);
  SDValue LHS = N->getOperand(1);
  SDValue RHS = N->getOperand(2);
  EVT Type = N->getValueType(0);
  return DCI.DAG.getNode(ISD::VSELECT, SDLoc(N), Type, Cond, RHS, LHS);
}

static SDValue PerformABSCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  const ARMSubtarget *Subtarget) {
  SDValue res;
  SelectionDAG &DAG = DCI.DAG;
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  if (TLI.isOperationLegal(N->getOpcode(), N->getValueType(0)))
    return SDValue();

  if (!TLI.expandABS(N, res, DAG))
      return SDValue();

  return res;
}

/// PerformADDECombine - Target-specific dag combine transform from
/// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or
/// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
static SDValue PerformADDECombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  const ARMSubtarget *Subtarget) {
  // Only ARM and Thumb2 support UMLAL/SMLAL.
  if (Subtarget->isThumb1Only())
    return PerformAddeSubeCombine(N, DCI, Subtarget);

  // Only perform the checks after legalize when the pattern is available.
  if (DCI.isBeforeLegalize()) return SDValue();

  return AddCombineTo64bitUMAAL(N, DCI, Subtarget);
}

/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
/// operands N0 and N1.  This is a helper for PerformADDCombine that is
/// called with the default operands, and if that fails, with commuted
/// operands.
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
                                          TargetLowering::DAGCombinerInfo &DCI,
                                          const ARMSubtarget *Subtarget){
  // Attempt to create vpadd for this add.
  if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget))
    return Result;

  // Attempt to create vpaddl for this add.
  if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget))
    return Result;
  if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI,
                                                      Subtarget))
    return Result;

  // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
  if (N0.getNode()->hasOneUse())
    if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI))
      return Result;
  return SDValue();
}

static SDValue PerformADDVecReduce(SDNode *N,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   const ARMSubtarget *Subtarget) {
  if (!Subtarget->hasMVEIntegerOps() || N->getValueType(0) != MVT::i64)
    return SDValue();

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  // We are looking for a i64 add of a VADDLVx. Due to these being i64's, this
  // will look like:
  //   t1: i32,i32 = ARMISD::VADDLVs x
  //   t2: i64 = build_pair t1, t1:1
  //   t3: i64 = add t2, y
  // We also need to check for sext / zext and commutitive adds.
  auto MakeVecReduce = [&](unsigned Opcode, unsigned OpcodeA, SDValue NA,
                           SDValue NB) {
    if (NB->getOpcode() != ISD::BUILD_PAIR)
      return SDValue();
    SDValue VecRed = NB->getOperand(0);
    if (VecRed->getOpcode() != Opcode || VecRed.getResNo() != 0 ||
        NB->getOperand(1) != SDValue(VecRed.getNode(), 1))
      return SDValue();

    SDLoc dl(N);
    SmallVector<SDValue, 4> Ops;
    Ops.push_back(DCI.DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA,
                                  DCI.DAG.getConstant(0, dl, MVT::i32)));
    Ops.push_back(DCI.DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA,
                                  DCI.DAG.getConstant(1, dl, MVT::i32)));
    for (unsigned i = 0, e = VecRed.getNumOperands(); i < e; i++)
      Ops.push_back(VecRed->getOperand(i));
    SDValue Red = DCI.DAG.getNode(OpcodeA, dl,
                                  DCI.DAG.getVTList({MVT::i32, MVT::i32}), Ops);
    return DCI.DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Red,
                           SDValue(Red.getNode(), 1));
  };

  if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N0, N1))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N0, N1))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N1, N0))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N1, N0))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N0, N1))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N0, N1))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N1, N0))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N1, N0))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N0, N1))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N0, N1))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N1, N0))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N1, N0))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N0, N1))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N0, N1))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N1, N0))
    return M;
  if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N1, N0))
    return M;
  return SDValue();
}

bool
ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
                                                 CombineLevel Level) const {
  if (Level == BeforeLegalizeTypes)
    return true;

  if (N->getOpcode() != ISD::SHL)
    return true;

  if (Subtarget->isThumb1Only()) {
    // Avoid making expensive immediates by commuting shifts. (This logic
    // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted
    // for free.)
    if (N->getOpcode() != ISD::SHL)
      return true;
    SDValue N1 = N->getOperand(0);
    if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND &&
        N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR)
      return true;
    if (auto *Const = dyn_cast<ConstantSDNode>(N1->getOperand(1))) {
      if (Const->getAPIntValue().ult(256))
        return false;
      if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(0) &&
          Const->getAPIntValue().sgt(-256))
        return false;
    }
    return true;
  }

  // Turn off commute-with-shift transform after legalization, so it doesn't
  // conflict with PerformSHLSimplify.  (We could try to detect when
  // PerformSHLSimplify would trigger more precisely, but it isn't
  // really necessary.)
  return false;
}

bool ARMTargetLowering::shouldFoldConstantShiftPairToMask(
    const SDNode *N, CombineLevel Level) const {
  if (!Subtarget->isThumb1Only())
    return true;

  if (Level == BeforeLegalizeTypes)
    return true;

  return false;
}

bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
  if (!Subtarget->hasNEON()) {
    if (Subtarget->isThumb1Only())
      return VT.getScalarSizeInBits() <= 32;
    return true;
  }
  return VT.isScalarInteger();
}

static SDValue PerformSHLSimplify(SDNode *N,
                                TargetLowering::DAGCombinerInfo &DCI,
                                const ARMSubtarget *ST) {
  // Allow the generic combiner to identify potential bswaps.
  if (DCI.isBeforeLegalize())
    return SDValue();

  // DAG combiner will fold:
  // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
  // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2
  // Other code patterns that can be also be modified have the following form:
  // b + ((a << 1) | 510)
  // b + ((a << 1) & 510)
  // b + ((a << 1) ^ 510)
  // b + ((a << 1) + 510)

  // Many instructions can  perform the shift for free, but it requires both
  // the operands to be registers. If c1 << c2 is too large, a mov immediate
  // instruction will needed. So, unfold back to the original pattern if:
  // - if c1 and c2 are small enough that they don't require mov imms.
  // - the user(s) of the node can perform an shl

  // No shifted operands for 16-bit instructions.
  if (ST->isThumb() && ST->isThumb1Only())
    return SDValue();

  // Check that all the users could perform the shl themselves.
  for (auto U : N->uses()) {
    switch(U->getOpcode()) {
    default:
      return SDValue();
    case ISD::SUB:
    case ISD::ADD:
    case ISD::AND:
    case ISD::OR:
    case ISD::XOR:
    case ISD::SETCC:
    case ARMISD::CMP:
      // Check that the user isn't already using a constant because there
      // aren't any instructions that support an immediate operand and a
      // shifted operand.
      if (isa<ConstantSDNode>(U->getOperand(0)) ||
          isa<ConstantSDNode>(U->getOperand(1)))
        return SDValue();

      // Check that it's not already using a shift.
      if (U->getOperand(0).getOpcode() == ISD::SHL ||
          U->getOperand(1).getOpcode() == ISD::SHL)
        return SDValue();
      break;
    }
  }

  if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR &&
      N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND)
    return SDValue();

  if (N->getOperand(0).getOpcode() != ISD::SHL)
    return SDValue();

  SDValue SHL = N->getOperand(0);

  auto *C1ShlC2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
  auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1));
  if (!C1ShlC2 || !C2)
    return SDValue();

  APInt C2Int = C2->getAPIntValue();
  APInt C1Int = C1ShlC2->getAPIntValue();

  // Check that performing a lshr will not lose any information.
  APInt Mask = APInt::getHighBitsSet(C2Int.getBitWidth(),
                                     C2Int.getBitWidth() - C2->getZExtValue());
  if ((C1Int & Mask) != C1Int)
    return SDValue();

  // Shift the first constant.
  C1Int.lshrInPlace(C2Int);

  // The immediates are encoded as an 8-bit value that can be rotated.
  auto LargeImm = [](const APInt &Imm) {
    unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros();
    return Imm.getBitWidth() - Zeros > 8;
  };

  if (LargeImm(C1Int) || LargeImm(C2Int))
    return SDValue();

  SelectionDAG &DAG = DCI.DAG;
  SDLoc dl(N);
  SDValue X = SHL.getOperand(0);
  SDValue BinOp = DAG.getNode(N->getOpcode(), dl, MVT::i32, X,
                              DAG.getConstant(C1Int, dl, MVT::i32));
  // Shift left to compensate for the lshr of C1Int.
  SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1));

  LLVM_DEBUG(dbgs() << "Simplify shl use:\n"; SHL.getOperand(0).dump();
             SHL.dump(); N->dump());
  LLVM_DEBUG(dbgs() << "Into:\n"; X.dump(); BinOp.dump(); Res.dump());
  return Res;
}


/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
///
static SDValue PerformADDCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const ARMSubtarget *Subtarget) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  // Only works one way, because it needs an immediate operand.
  if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
    return Result;

  if (SDValue Result = PerformADDVecReduce(N, DCI, Subtarget))
    return Result;

  // First try with the default operand order.
  if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget))
    return Result;

  // If that didn't work, try again with the operands commuted.
  return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget);
}

/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
///
static SDValue PerformSUBCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const ARMSubtarget *Subtarget) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
  if (N1.getNode()->hasOneUse())
    if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI))
      return Result;

  if (!Subtarget->hasMVEIntegerOps() || !N->getValueType(0).isVector())
    return SDValue();

  // Fold (sub (ARMvmovImm 0), (ARMvdup x)) -> (ARMvdup (sub 0, x))
  // so that we can readily pattern match more mve instructions which can use
  // a scalar operand.
  SDValue VDup = N->getOperand(1);
  if (VDup->getOpcode() != ARMISD::VDUP)
    return SDValue();

  SDValue VMov = N->getOperand(0);
  if (VMov->getOpcode() == ISD::BITCAST)
    VMov = VMov->getOperand(0);

  if (VMov->getOpcode() != ARMISD::VMOVIMM || !isZeroVector(VMov))
    return SDValue();

  SDLoc dl(N);
  SDValue Negate = DCI.DAG.getNode(ISD::SUB, dl, MVT::i32,
                                   DCI.DAG.getConstant(0, dl, MVT::i32),
                                   VDup->getOperand(0));
  return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), Negate);
}

/// PerformVMULCombine
/// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the
/// special multiplier accumulator forwarding.
///   vmul d3, d0, d2
///   vmla d3, d1, d2
/// is faster than
///   vadd d3, d0, d1
///   vmul d3, d3, d2
//  However, for (A + B) * (A + B),
//    vadd d2, d0, d1
//    vmul d3, d0, d2
//    vmla d3, d1, d2
//  is slower than
//    vadd d2, d0, d1
//    vmul d3, d2, d2
static SDValue PerformVMULCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  const ARMSubtarget *Subtarget) {
  if (!Subtarget->hasVMLxForwarding())
    return SDValue();

  SelectionDAG &DAG = DCI.DAG;
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  unsigned Opcode = N0.getOpcode();
  if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
      Opcode != ISD::FADD && Opcode != ISD::FSUB) {
    Opcode = N1.getOpcode();
    if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
        Opcode != ISD::FADD && Opcode != ISD::FSUB)
      return SDValue();
    std::swap(N0, N1);
  }

  if (N0 == N1)
    return SDValue();

  EVT VT = N->getValueType(0);
  SDLoc DL(N);
  SDValue N00 = N0->getOperand(0);
  SDValue N01 = N0->getOperand(1);
  return DAG.getNode(Opcode, DL, VT,
                     DAG.getNode(ISD::MUL, DL, VT, N00, N1),
                     DAG.getNode(ISD::MUL, DL, VT, N01, N1));
}

static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG,
                                      const ARMSubtarget *Subtarget) {
  EVT VT = N->getValueType(0);
  if (VT != MVT::v2i64)
    return SDValue();

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  auto IsSignExt = [&](SDValue Op) {
    if (Op->getOpcode() != ISD::SIGN_EXTEND_INREG)
      return SDValue();
    EVT VT = cast<VTSDNode>(Op->getOperand(1))->getVT();
    if (VT.getScalarSizeInBits() == 32)
      return Op->getOperand(0);
    return SDValue();
  };
  auto IsZeroExt = [&](SDValue Op) {
    // Zero extends are a little more awkward. At the point we are matching
    // this, we are looking for an AND with a (-1, 0, -1, 0) buildvector mask.
    // That might be before of after a bitcast depending on how the and is
    // placed. Because this has to look through bitcasts, it is currently only
    // supported on LE.
    if (!Subtarget->isLittle())
      return SDValue();

    SDValue And = Op;
    if (And->getOpcode() == ISD::BITCAST)
      And = And->getOperand(0);
    if (And->getOpcode() != ISD::AND)
      return SDValue();
    SDValue Mask = And->getOperand(1);
    if (Mask->getOpcode() == ISD::BITCAST)
      Mask = Mask->getOperand(0);

    if (Mask->getOpcode() != ISD::BUILD_VECTOR ||
        Mask.getValueType() != MVT::v4i32)
      return SDValue();
    if (isAllOnesConstant(Mask->getOperand(0)) &&
        isNullConstant(Mask->getOperand(1)) &&
        isAllOnesConstant(Mask->getOperand(2)) &&
        isNullConstant(Mask->getOperand(3)))
      return And->getOperand(0);
    return SDValue();
  };

  SDLoc dl(N);
  if (SDValue Op0 = IsSignExt(N0)) {
    if (SDValue Op1 = IsSignExt(N1)) {
      SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0);
      SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1);
      return DAG.getNode(ARMISD::VMULLs, dl, VT, New0a, New1a);
    }
  }
  if (SDValue Op0 = IsZeroExt(N0)) {
    if (SDValue Op1 = IsZeroExt(N1)) {
      SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0);
      SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1);
      return DAG.getNode(ARMISD::VMULLu, dl, VT, New0a, New1a);
    }
  }

  return SDValue();
}

static SDValue PerformMULCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const ARMSubtarget *Subtarget) {
  SelectionDAG &DAG = DCI.DAG;

  EVT VT = N->getValueType(0);
  if (Subtarget->hasMVEIntegerOps() && VT == MVT::v2i64)
    return PerformMVEVMULLCombine(N, DAG, Subtarget);

  if (Subtarget->isThumb1Only())
    return SDValue();

  if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
    return SDValue();

  if (VT.is64BitVector() || VT.is128BitVector())
    return PerformVMULCombine(N, DCI, Subtarget);
  if (VT != MVT::i32)
    return SDValue();

  ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
  if (!C)
    return SDValue();

  int64_t MulAmt = C->getSExtValue();
  unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);

  ShiftAmt = ShiftAmt & (32 - 1);
  SDValue V = N->getOperand(0);
  SDLoc DL(N);

  SDValue Res;
  MulAmt >>= ShiftAmt;

  if (MulAmt >= 0) {
    if (isPowerOf2_32(MulAmt - 1)) {
      // (mul x, 2^N + 1) => (add (shl x, N), x)
      Res = DAG.getNode(ISD::ADD, DL, VT,
                        V,
                        DAG.getNode(ISD::SHL, DL, VT,
                                    V,
                                    DAG.getConstant(Log2_32(MulAmt - 1), DL,
                                                    MVT::i32)));
    } else if (isPowerOf2_32(MulAmt + 1)) {
      // (mul x, 2^N - 1) => (sub (shl x, N), x)
      Res = DAG.getNode(ISD::SUB, DL, VT,
                        DAG.getNode(ISD::SHL, DL, VT,
                                    V,
                                    DAG.getConstant(Log2_32(MulAmt + 1), DL,
                                                    MVT::i32)),
                        V);
    } else
      return SDValue();
  } else {
    uint64_t MulAmtAbs = -MulAmt;
    if (isPowerOf2_32(MulAmtAbs + 1)) {
      // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
      Res = DAG.getNode(ISD::SUB, DL, VT,
                        V,
                        DAG.getNode(ISD::SHL, DL, VT,
                                    V,
                                    DAG.getConstant(Log2_32(MulAmtAbs + 1), DL,
                                                    MVT::i32)));
    } else if (isPowerOf2_32(MulAmtAbs - 1)) {
      // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
      Res = DAG.getNode(ISD::ADD, DL, VT,
                        V,
                        DAG.getNode(ISD::SHL, DL, VT,
                                    V,
                                    DAG.getConstant(Log2_32(MulAmtAbs - 1), DL,
                                                    MVT::i32)));
      Res = DAG.getNode(ISD::SUB, DL, VT,
                        DAG.getConstant(0, DL, MVT::i32), Res);
    } else
      return SDValue();
  }

  if (ShiftAmt != 0)
    Res = DAG.getNode(ISD::SHL, DL, VT,
                      Res, DAG.getConstant(ShiftAmt, DL, MVT::i32));

  // Do not add new nodes to DAG combiner worklist.
  DCI.CombineTo(N, Res, false);
  return SDValue();
}

static SDValue CombineANDShift(SDNode *N,
                               TargetLowering::DAGCombinerInfo &DCI,
                               const ARMSubtarget *Subtarget) {
  // Allow DAGCombine to pattern-match before we touch the canonical form.
  if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
    return SDValue();

  if (N->getValueType(0) != MVT::i32)
    return SDValue();

  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
  if (!N1C)
    return SDValue();

  uint32_t C1 = (uint32_t)N1C->getZExtValue();
  // Don't transform uxtb/uxth.
  if (C1 == 255 || C1 == 65535)
    return SDValue();

  SDNode *N0 = N->getOperand(0).getNode();
  if (!N0->hasOneUse())
    return SDValue();

  if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL)
    return SDValue();

  bool LeftShift = N0->getOpcode() == ISD::SHL;

  ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
  if (!N01C)
    return SDValue();

  uint32_t C2 = (uint32_t)N01C->getZExtValue();
  if (!C2 || C2 >= 32)
    return SDValue();

  // Clear irrelevant bits in the mask.
  if (LeftShift)
    C1 &= (-1U << C2);
  else
    C1 &= (-1U >> C2);

  SelectionDAG &DAG = DCI.DAG;
  SDLoc DL(N);

  // We have a pattern of the form "(and (shl x, c2) c1)" or
  // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to
  // transform to a pair of shifts, to save materializing c1.

  // First pattern: right shift, then mask off leading bits.
  // FIXME: Use demanded bits?
  if (!LeftShift && isMask_32(C1)) {
    uint32_t C3 = countLeadingZeros(C1);
    if (C2 < C3) {
      SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
                                DAG.getConstant(C3 - C2, DL, MVT::i32));
      return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL,
                         DAG.getConstant(C3, DL, MVT::i32));
    }
  }

  // First pattern, reversed: left shift, then mask off trailing bits.
  if (LeftShift && isMask_32(~C1)) {
    uint32_t C3 = countTrailingZeros(C1);
    if (C2 < C3) {
      SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
                                DAG.getConstant(C3 - C2, DL, MVT::i32));
      return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
                         DAG.getConstant(C3, DL, MVT::i32));
    }
  }

  // Second pattern: left shift, then mask off leading bits.
  // FIXME: Use demanded bits?
  if (LeftShift && isShiftedMask_32(C1)) {
    uint32_t Trailing = countTrailingZeros(C1);
    uint32_t C3 = countLeadingZeros(C1);
    if (Trailing == C2 && C2 + C3 < 32) {
      SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
                                DAG.getConstant(C2 + C3, DL, MVT::i32));
      return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL,
                        DAG.getConstant(C3, DL, MVT::i32));
    }
  }

  // Second pattern, reversed: right shift, then mask off trailing bits.
  // FIXME: Handle other patterns of known/demanded bits.
  if (!LeftShift && isShiftedMask_32(C1)) {
    uint32_t Leading = countLeadingZeros(C1);
    uint32_t C3 = countTrailingZeros(C1);
    if (Leading == C2 && C2 + C3 < 32) {
      SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
                                DAG.getConstant(C2 + C3, DL, MVT::i32));
      return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
                         DAG.getConstant(C3, DL, MVT::i32));
    }
  }

  // FIXME: Transform "(and (shl x, c2) c1)" ->
  // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than
  // c1.
  return SDValue();
}

static SDValue PerformANDCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const ARMSubtarget *Subtarget) {
  // Attempt to use immediate-form VBIC
  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
  SDLoc dl(N);
  EVT VT = N->getValueType(0);
  SelectionDAG &DAG = DCI.DAG;

  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT) || VT == MVT::v4i1 ||
      VT == MVT::v8i1 || VT == MVT::v16i1)
    return SDValue();

  APInt SplatBits, SplatUndef;
  unsigned SplatBitSize;
  bool HasAnyUndefs;
  if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) &&
      BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
    if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 ||
        SplatBitSize == 64) {
      EVT VbicVT;
      SDValue Val = isVMOVModifiedImm((~SplatBits).getZExtValue(),
                                      SplatUndef.getZExtValue(), SplatBitSize,
                                      DAG, dl, VbicVT, VT, OtherModImm);
      if (Val.getNode()) {
        SDValue Input =
          DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
        SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
        return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
      }
    }
  }

  if (!Subtarget->isThumb1Only()) {
    // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
    if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI))
      return Result;

    if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
      return Result;
  }

  if (Subtarget->isThumb1Only())
    if (SDValue Result = CombineANDShift(N, DCI, Subtarget))
      return Result;

  return SDValue();
}

// Try combining OR nodes to SMULWB, SMULWT.
static SDValue PerformORCombineToSMULWBT(SDNode *OR,
                                         TargetLowering::DAGCombinerInfo &DCI,
                                         const ARMSubtarget *Subtarget) {
  if (!Subtarget->hasV6Ops() ||
      (Subtarget->isThumb() &&
       (!Subtarget->hasThumb2() || !Subtarget->hasDSP())))
    return SDValue();

  SDValue SRL = OR->getOperand(0);
  SDValue SHL = OR->getOperand(1);

  if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) {
    SRL = OR->getOperand(1);
    SHL = OR->getOperand(0);
  }
  if (!isSRL16(SRL) || !isSHL16(SHL))
    return SDValue();

  // The first operands to the shifts need to be the two results from the
  // same smul_lohi node.
  if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) ||
       SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI)
    return SDValue();

  SDNode *SMULLOHI = SRL.getOperand(0).getNode();
  if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) ||
      SHL.getOperand(0) != SDValue(SMULLOHI, 1))
    return SDValue();

  // Now we have:
  // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16)))
  // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments.
  // For SMUWB the 16-bit value will signed extended somehow.
  // For SMULWT only the SRA is required.
  // Check both sides of SMUL_LOHI
  SDValue OpS16 = SMULLOHI->getOperand(0);
  SDValue OpS32 = SMULLOHI->getOperand(1);

  SelectionDAG &DAG = DCI.DAG;
  if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) {
    OpS16 = OpS32;
    OpS32 = SMULLOHI->getOperand(0);
  }

  SDLoc dl(OR);
  unsigned Opcode = 0;
  if (isS16(OpS16, DAG))
    Opcode = ARMISD::SMULWB;
  else if (isSRA16(OpS16)) {
    Opcode = ARMISD::SMULWT;
    OpS16 = OpS16->getOperand(0);
  }
  else
    return SDValue();

  SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16);
  DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res);
  return SDValue(OR, 0);
}

static SDValue PerformORCombineToBFI(SDNode *N,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const ARMSubtarget *Subtarget) {
  // BFI is only available on V6T2+
  if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
    return SDValue();

  EVT VT = N->getValueType(0);
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SelectionDAG &DAG = DCI.DAG;
  SDLoc DL(N);
  // 1) or (and A, mask), val => ARMbfi A, val, mask
  //      iff (val & mask) == val
  //
  // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
  //  2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
  //          && mask == ~mask2
  //  2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
  //          && ~mask == mask2
  //  (i.e., copy a bitfield value into another bitfield of the same width)

  if (VT != MVT::i32)
    return SDValue();

  SDValue N00 = N0.getOperand(0);

  // The value and the mask need to be constants so we can verify this is
  // actually a bitfield set. If the mask is 0xffff, we can do better
  // via a movt instruction, so don't use BFI in that case.
  SDValue MaskOp = N0.getOperand(1);
  ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
  if (!MaskC)
    return SDValue();
  unsigned Mask = MaskC->getZExtValue();
  if (Mask == 0xffff)
    return SDValue();
  SDValue Res;
  // Case (1): or (and A, mask), val => ARMbfi A, val, mask
  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
  if (N1C) {
    unsigned Val = N1C->getZExtValue();
    if ((Val & ~Mask) != Val)
      return SDValue();

    if (ARM::isBitFieldInvertedMask(Mask)) {
      Val >>= countTrailingZeros(~Mask);

      Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
                        DAG.getConstant(Val, DL, MVT::i32),
                        DAG.getConstant(Mask, DL, MVT::i32));

      DCI.CombineTo(N, Res, false);
      // Return value from the original node to inform the combiner than N is
      // now dead.
      return SDValue(N, 0);
    }
  } else if (N1.getOpcode() == ISD::AND) {
    // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
    ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
    if (!N11C)
      return SDValue();
    unsigned Mask2 = N11C->getZExtValue();

    // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern
    // as is to match.
    if (ARM::isBitFieldInvertedMask(Mask) &&
        (Mask == ~Mask2)) {
      // The pack halfword instruction works better for masks that fit it,
      // so use that when it's available.
      if (Subtarget->hasDSP() &&
          (Mask == 0xffff || Mask == 0xffff0000))
        return SDValue();
      // 2a
      unsigned amt = countTrailingZeros(Mask2);
      Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
                        DAG.getConstant(amt, DL, MVT::i32));
      Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
                        DAG.getConstant(Mask, DL, MVT::i32));
      DCI.CombineTo(N, Res, false);
      // Return value from the original node to inform the combiner than N is
      // now dead.
      return SDValue(N, 0);
    } else if (ARM::isBitFieldInvertedMask(~Mask) &&
               (~Mask == Mask2)) {
      // The pack halfword instruction works better for masks that fit it,
      // so use that when it's available.
      if (Subtarget->hasDSP() &&
          (Mask2 == 0xffff || Mask2 == 0xffff0000))
        return SDValue();
      // 2b
      unsigned lsb = countTrailingZeros(Mask);
      Res = DAG.getNode(ISD::SRL, DL, VT, N00,
                        DAG.getConstant(lsb, DL, MVT::i32));
      Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
                        DAG.getConstant(Mask2, DL, MVT::i32));
      DCI.CombineTo(N, Res, false);
      // Return value from the original node to inform the combiner than N is
      // now dead.
      return SDValue(N, 0);
    }
  }

  if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
      N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
      ARM::isBitFieldInvertedMask(~Mask)) {
    // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
    // where lsb(mask) == #shamt and masked bits of B are known zero.
    SDValue ShAmt = N00.getOperand(1);
    unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
    unsigned LSB = countTrailingZeros(Mask);
    if (ShAmtC != LSB)
      return SDValue();

    Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
                      DAG.getConstant(~Mask, DL, MVT::i32));

    DCI.CombineTo(N, Res, false);
    // Return value from the original node to inform the combiner than N is
    // now dead.
    return SDValue(N, 0);
  }

  return SDValue();
}

static bool isValidMVECond(unsigned CC, bool IsFloat) {
  switch (CC) {
  case ARMCC::EQ:
  case ARMCC::NE:
  case ARMCC::LE:
  case ARMCC::GT:
  case ARMCC::GE:
  case ARMCC::LT:
    return true;
  case ARMCC::HS:
  case ARMCC::HI:
    return !IsFloat;
  default:
    return false;
  };
}

static ARMCC::CondCodes getVCMPCondCode(SDValue N) {
  if (N->getOpcode() == ARMISD::VCMP)
    return (ARMCC::CondCodes)N->getConstantOperandVal(2);
  else if (N->getOpcode() == ARMISD::VCMPZ)
    return (ARMCC::CondCodes)N->getConstantOperandVal(1);
  else
    llvm_unreachable("Not a VCMP/VCMPZ!");
}

static bool CanInvertMVEVCMP(SDValue N) {
  ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N));
  return isValidMVECond(CC, N->getOperand(0).getValueType().isFloatingPoint());
}

static SDValue PerformORCombine_i1(SDNode *N,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   const ARMSubtarget *Subtarget) {
  // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain
  // together with predicates
  EVT VT = N->getValueType(0);
  SDLoc DL(N);
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  auto IsFreelyInvertable = [&](SDValue V) {
    if (V->getOpcode() == ARMISD::VCMP || V->getOpcode() == ARMISD::VCMPZ)
      return CanInvertMVEVCMP(V);
    return false;
  };

  // At least one operand must be freely invertable.
  if (!(IsFreelyInvertable(N0) || IsFreelyInvertable(N1)))
    return SDValue();

  SDValue NewN0 = DCI.DAG.getLogicalNOT(DL, N0, VT);
  SDValue NewN1 = DCI.DAG.getLogicalNOT(DL, N1, VT);
  SDValue And = DCI.DAG.getNode(ISD::AND, DL, VT, NewN0, NewN1);
  return DCI.DAG.getLogicalNOT(DL, And, VT);
}

/// PerformORCombine - Target-specific dag combine xforms for ISD::OR
static SDValue PerformORCombine(SDNode *N,
                                TargetLowering::DAGCombinerInfo &DCI,
                                const ARMSubtarget *Subtarget) {
  // Attempt to use immediate-form VORR
  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
  SDLoc dl(N);
  EVT VT = N->getValueType(0);
  SelectionDAG &DAG = DCI.DAG;

  if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
    return SDValue();

  if (Subtarget->hasMVEIntegerOps() &&
      (VT == MVT::v4i1 || VT == MVT::v8i1 || VT == MVT::v16i1))
    return PerformORCombine_i1(N, DCI, Subtarget);

  APInt SplatBits, SplatUndef;
  unsigned SplatBitSize;
  bool HasAnyUndefs;
  if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) &&
      BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
    if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 ||
        SplatBitSize == 64) {
      EVT VorrVT;
      SDValue Val =
          isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
                            SplatBitSize, DAG, dl, VorrVT, VT, OtherModImm);
      if (Val.getNode()) {
        SDValue Input =
          DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
        SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
        return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
      }
    }
  }

  if (!Subtarget->isThumb1Only()) {
    // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
    if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
      return Result;
    if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget))
      return Result;
  }

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
  if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() &&
      DAG.getTargetLoweringInfo().isTypeLegal(VT)) {

    // The code below optimizes (or (and X, Y), Z).
    // The AND operand needs to have a single user to make these optimizations
    // profitable.
    if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
      return SDValue();

    APInt SplatUndef;
    unsigned SplatBitSize;
    bool HasAnyUndefs;

    APInt SplatBits0, SplatBits1;
    BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
    BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
    // Ensure that the second operand of both ands are constants
    if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
                                      HasAnyUndefs) && !HasAnyUndefs) {
        if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
                                          HasAnyUndefs) && !HasAnyUndefs) {
            // Ensure that the bit width of the constants are the same and that
            // the splat arguments are logical inverses as per the pattern we
            // are trying to simplify.
            if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
                SplatBits0 == ~SplatBits1) {
                // Canonicalize the vector type to make instruction selection
                // simpler.
                EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
                SDValue Result = DAG.getNode(ARMISD::VBSP, dl, CanonicalVT,
                                             N0->getOperand(1),
                                             N0->getOperand(0),
                                             N1->getOperand(0));
                return DAG.getNode(ISD::BITCAST, dl, VT, Result);
            }
        }
    }
  }

  // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
  // reasonable.
  if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
    if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget))
      return Res;
  }

  if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
    return Result;

  return SDValue();
}

static SDValue PerformXORCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const ARMSubtarget *Subtarget) {
  EVT VT = N->getValueType(0);
  SelectionDAG &DAG = DCI.DAG;

  if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
    return SDValue();

  if (!Subtarget->isThumb1Only()) {
    // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
    if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
      return Result;

    if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
      return Result;
  }

  if (Subtarget->hasMVEIntegerOps()) {
    // fold (xor(vcmp/z, 1)) into a vcmp with the opposite condition.
    SDValue N0 = N->getOperand(0);
    SDValue N1 = N->getOperand(1);
    const TargetLowering *TLI = Subtarget->getTargetLowering();
    if (TLI->isConstTrueVal(N1.getNode()) &&
        (N0->getOpcode() == ARMISD::VCMP || N0->getOpcode() == ARMISD::VCMPZ)) {
      if (CanInvertMVEVCMP(N0)) {
        SDLoc DL(N0);
        ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N0));

        SmallVector<SDValue, 4> Ops;
        Ops.push_back(N0->getOperand(0));
        if (N0->getOpcode() == ARMISD::VCMP)
          Ops.push_back(N0->getOperand(1));
        Ops.push_back(DCI.DAG.getConstant(CC, DL, MVT::i32));
        return DCI.DAG.getNode(N0->getOpcode(), DL, N0->getValueType(0), Ops);
      }
    }
  }

  return SDValue();
}

// ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it,
// and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and
// their position in "to" (Rd).
static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) {
  assert(N->getOpcode() == ARMISD::BFI);

  SDValue From = N->getOperand(1);
  ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue();
  FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation());

  // If the Base came from a SHR #C, we can deduce that it is really testing bit
  // #C in the base of the SHR.
  if (From->getOpcode() == ISD::SRL &&
      isa<ConstantSDNode>(From->getOperand(1))) {
    APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue();
    assert(Shift.getLimitedValue() < 32 && "Shift too large!");
    FromMask <<= Shift.getLimitedValue(31);
    From = From->getOperand(0);
  }

  return From;
}

// If A and B contain one contiguous set of bits, does A | B == A . B?
//
// Neither A nor B must be zero.
static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) {
  unsigned LastActiveBitInA =  A.countTrailingZeros();
  unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1;
  return LastActiveBitInA - 1 == FirstActiveBitInB;
}

static SDValue FindBFIToCombineWith(SDNode *N) {
  // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with,
  // if one exists.
  APInt ToMask, FromMask;
  SDValue From = ParseBFI(N, ToMask, FromMask);
  SDValue To = N->getOperand(0);

  // Now check for a compatible BFI to merge with. We can pass through BFIs that
  // aren't compatible, but not if they set the same bit in their destination as
  // we do (or that of any BFI we're going to combine with).
  SDValue V = To;
  APInt CombinedToMask = ToMask;
  while (V.getOpcode() == ARMISD::BFI) {
    APInt NewToMask, NewFromMask;
    SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask);
    if (NewFrom != From) {
      // This BFI has a different base. Keep going.
      CombinedToMask |= NewToMask;
      V = V.getOperand(0);
      continue;
    }

    // Do the written bits conflict with any we've seen so far?
    if ((NewToMask & CombinedToMask).getBoolValue())
      // Conflicting bits - bail out because going further is unsafe.
      return SDValue();

    // Are the new bits contiguous when combined with the old bits?
    if (BitsProperlyConcatenate(ToMask, NewToMask) &&
        BitsProperlyConcatenate(FromMask, NewFromMask))
      return V;
    if (BitsProperlyConcatenate(NewToMask, ToMask) &&
        BitsProperlyConcatenate(NewFromMask, FromMask))
      return V;

    // We've seen a write to some bits, so track it.
    CombinedToMask |= NewToMask;
    // Keep going...
    V = V.getOperand(0);
  }

  return SDValue();
}

static SDValue PerformBFICombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI) {
  SDValue N1 = N->getOperand(1);
  if (N1.getOpcode() == ISD::AND) {
    // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
    // the bits being cleared by the AND are not demanded by the BFI.
    ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
    if (!N11C)
      return SDValue();
    unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
    unsigned LSB = countTrailingZeros(~InvMask);
    unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB;
    assert(Width <
               static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
           "undefined behavior");
    unsigned Mask = (1u << Width) - 1;
    unsigned Mask2 = N11C->getZExtValue();
    if ((Mask & (~Mask2)) == 0)
      return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0),
                             N->getOperand(0), N1.getOperand(0),
                             N->getOperand(2));
  } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) {
    // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes.
    // Keep track of any consecutive bits set that all come from the same base
    // value. We can combine these together into a single BFI.
    SDValue CombineBFI = FindBFIToCombineWith(N);
    if (CombineBFI == SDValue())
      return SDValue();

    // We've found a BFI.
    APInt ToMask1, FromMask1;
    SDValue From1 = ParseBFI(N, ToMask1, FromMask1);

    APInt ToMask2, FromMask2;
    SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2);
    assert(From1 == From2);
    (void)From2;

    // First, unlink CombineBFI.
    DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0));
    // Then create a new BFI, combining the two together.
    APInt NewFromMask = FromMask1 | FromMask2;
    APInt NewToMask = ToMask1 | ToMask2;

    EVT VT = N->getValueType(0);
    SDLoc dl(N);

    if (NewFromMask[0] == 0)
      From1 = DCI.DAG.getNode(
        ISD::SRL, dl, VT, From1,
        DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT));
    return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1,
                           DCI.DAG.getConstant(~NewToMask, dl, VT));
  }
  return SDValue();
}

/// PerformVMOVRRDCombine - Target-specific dag combine xforms for
/// ARMISD::VMOVRRD.
static SDValue PerformVMOVRRDCombine(SDNode *N,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const ARMSubtarget *Subtarget) {
  // vmovrrd(vmovdrr x, y) -> x,y
  SDValue InDouble = N->getOperand(0);
  if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64())
    return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));

  // vmovrrd(load f64) -> (load i32), (load i32)
  SDNode *InNode = InDouble.getNode();
  if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() &&
      InNode->getValueType(0) == MVT::f64 &&
      InNode->getOperand(1).getOpcode() == ISD::FrameIndex &&
      !cast<LoadSDNode>(InNode)->isVolatile()) {
    // TODO: Should this be done for non-FrameIndex operands?
    LoadSDNode *LD = cast<LoadSDNode>(InNode);

    SelectionDAG &DAG = DCI.DAG;
    SDLoc DL(LD);
    SDValue BasePtr = LD->getBasePtr();
    SDValue NewLD1 =
        DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(),
                    LD->getAlignment(), LD->getMemOperand()->getFlags());

    SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
                                    DAG.getConstant(4, DL, MVT::i32));

    SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, LD->getChain(), OffsetPtr,
                                 LD->getPointerInfo().getWithOffset(4),
                                 std::min(4U, LD->getAlignment()),
                                 LD->getMemOperand()->getFlags());

    DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
    if (DCI.DAG.getDataLayout().isBigEndian())
      std::swap (NewLD1, NewLD2);
    SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2);
    return Result;
  }

  return SDValue();
}

/// PerformVMOVDRRCombine - Target-specific dag combine xforms for
/// ARMISD::VMOVDRR.  This is also used for BUILD_VECTORs with 2 operands.
static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
  // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);
  if (Op0.getOpcode() == ISD::BITCAST)
    Op0 = Op0.getOperand(0);
  if (Op1.getOpcode() == ISD::BITCAST)
    Op1 = Op1.getOperand(0);
  if (Op0.getOpcode() == ARMISD::VMOVRRD &&
      Op0.getNode() == Op1.getNode() &&
      Op0.getResNo() == 0 && Op1.getResNo() == 1)
    return DAG.getNode(ISD::BITCAST, SDLoc(N),
                       N->getValueType(0), Op0.getOperand(0));
  return SDValue();
}

static SDValue PerformVMOVhrCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
  SDValue Op0 = N->getOperand(0);

  // VMOVhr (VMOVrh (X)) -> X
  if (Op0->getOpcode() == ARMISD::VMOVrh)
    return Op0->getOperand(0);

  // FullFP16: half values are passed in S-registers, and we don't
  // need any of the bitcast and moves:
  //
  //     t2: f32,ch = CopyFromReg t0, Register:f32 %0
  //   t5: i32 = bitcast t2
  // t18: f16 = ARMISD::VMOVhr t5
  if (Op0->getOpcode() == ISD::BITCAST) {
    SDValue Copy = Op0->getOperand(0);
    if (Copy.getValueType() == MVT::f32 &&
        Copy->getOpcode() == ISD::CopyFromReg) {
      SDValue Ops[] = {Copy->getOperand(0), Copy->getOperand(1)};
      SDValue NewCopy =
          DCI.DAG.getNode(ISD::CopyFromReg, SDLoc(N), N->getValueType(0), Ops);
      return NewCopy;
    }
  }

  // fold (VMOVhr (load x)) -> (load (f16*)x)
  if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(Op0)) {
    if (LN0->hasOneUse() && LN0->isUnindexed() &&
        LN0->getMemoryVT() == MVT::i16) {
      SDValue Load =
          DCI.DAG.getLoad(N->getValueType(0), SDLoc(N), LN0->getChain(),
                          LN0->getBasePtr(), LN0->getMemOperand());
      DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0));
      DCI.DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Load.getValue(1));
      return Load;
    }
  }

  // Only the bottom 16 bits of the source register are used.
  APInt DemandedMask = APInt::getLowBitsSet(32, 16);
  const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo();
  if (TLI.SimplifyDemandedBits(Op0, DemandedMask, DCI))
    return SDValue(N, 0);

  return SDValue();
}

static SDValue PerformVMOVrhCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (VMOVrh (fpconst x)) -> const x
  if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N0)) {
    APFloat V = C->getValueAPF();
    return DCI.DAG.getConstant(V.bitcastToAPInt().getZExtValue(), SDLoc(N), VT);
  }

  // fold (VMOVrh (load x)) -> (zextload (i16*)x)
  if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse()) {
    LoadSDNode *LN0 = cast<LoadSDNode>(N0);

    SDValue Load =
        DCI.DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, LN0->getChain(),
                           LN0->getBasePtr(), MVT::i16, LN0->getMemOperand());
    DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0));
    DCI.DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
    return Load;
  }

  // Fold VMOVrh(extract(x, n)) -> vgetlaneu(x, n)
  if (N0->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
      isa<ConstantSDNode>(N0->getOperand(1)))
    return DCI.DAG.getNode(ARMISD::VGETLANEu, SDLoc(N), VT, N0->getOperand(0),
                           N0->getOperand(1));

  return SDValue();
}

/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
/// are normal, non-volatile loads.  If so, it is profitable to bitcast an
/// i64 vector to have f64 elements, since the value can then be loaded
/// directly into a VFP register.
static bool hasNormalLoadOperand(SDNode *N) {
  unsigned NumElts = N->getValueType(0).getVectorNumElements();
  for (unsigned i = 0; i < NumElts; ++i) {
    SDNode *Elt = N->getOperand(i).getNode();
    if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
      return true;
  }
  return false;
}

/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
/// ISD::BUILD_VECTOR.
static SDValue PerformBUILD_VECTORCombine(SDNode *N,
                                          TargetLowering::DAGCombinerInfo &DCI,
                                          const ARMSubtarget *Subtarget) {
  // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
  // VMOVRRD is introduced when legalizing i64 types.  It forces the i64 value
  // into a pair of GPRs, which is fine when the value is used as a scalar,
  // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
  SelectionDAG &DAG = DCI.DAG;
  if (N->getNumOperands() == 2)
    if (SDValue RV = PerformVMOVDRRCombine(N, DAG))
      return RV;

  // Load i64 elements as f64 values so that type legalization does not split
  // them up into i32 values.
  EVT VT = N->getValueType(0);
  if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
    return SDValue();
  SDLoc dl(N);
  SmallVector<SDValue, 8> Ops;
  unsigned NumElts = VT.getVectorNumElements();
  for (unsigned i = 0; i < NumElts; ++i) {
    SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
    Ops.push_back(V);
    // Make the DAGCombiner fold the bitcast.
    DCI.AddToWorklist(V.getNode());
  }
  EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
  SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops);
  return DAG.getNode(ISD::BITCAST, dl, VT, BV);
}

/// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
static SDValue
PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
  // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR.
  // At that time, we may have inserted bitcasts from integer to float.
  // If these bitcasts have survived DAGCombine, change the lowering of this
  // BUILD_VECTOR in something more vector friendly, i.e., that does not
  // force to use floating point types.

  // Make sure we can change the type of the vector.
  // This is possible iff:
  // 1. The vector is only used in a bitcast to a integer type. I.e.,
  //    1.1. Vector is used only once.
  //    1.2. Use is a bit convert to an integer type.
  // 2. The size of its operands are 32-bits (64-bits are not legal).
  EVT VT = N->getValueType(0);
  EVT EltVT = VT.getVectorElementType();

  // Check 1.1. and 2.
  if (EltVT.getSizeInBits() != 32 || !N->hasOneUse())
    return SDValue();

  // By construction, the input type must be float.
  assert(EltVT == MVT::f32 && "Unexpected type!");

  // Check 1.2.
  SDNode *Use = *N->use_begin();
  if (Use->getOpcode() != ISD::BITCAST ||
      Use->getValueType(0).isFloatingPoint())
    return SDValue();

  // Check profitability.
  // Model is, if more than half of the relevant operands are bitcast from
  // i32, turn the build_vector into a sequence of insert_vector_elt.
  // Relevant operands are everything that is not statically
  // (i.e., at compile time) bitcasted.
  unsigned NumOfBitCastedElts = 0;
  unsigned NumElts = VT.getVectorNumElements();
  unsigned NumOfRelevantElts = NumElts;
  for (unsigned Idx = 0; Idx < NumElts; ++Idx) {
    SDValue Elt = N->getOperand(Idx);
    if (Elt->getOpcode() == ISD::BITCAST) {
      // Assume only bit cast to i32 will go away.
      if (Elt->getOperand(0).getValueType() == MVT::i32)
        ++NumOfBitCastedElts;
    } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt))
      // Constants are statically casted, thus do not count them as
      // relevant operands.
      --NumOfRelevantElts;
  }

  // Check if more than half of the elements require a non-free bitcast.
  if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
    return SDValue();

  SelectionDAG &DAG = DCI.DAG;
  // Create the new vector type.
  EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
  // Check if the type is legal.
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (!TLI.isTypeLegal(VecVT))
    return SDValue();

  // Combine:
  // ARMISD::BUILD_VECTOR E1, E2, ..., EN.
  // => BITCAST INSERT_VECTOR_ELT
  //                      (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1),
  //                      (BITCAST EN), N.
  SDValue Vec = DAG.getUNDEF(VecVT);
  SDLoc dl(N);
  for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
    SDValue V = N->getOperand(Idx);
    if (V.isUndef())
      continue;
    if (V.getOpcode() == ISD::BITCAST &&
        V->getOperand(0).getValueType() == MVT::i32)
      // Fold obvious case.
      V = V.getOperand(0);
    else {
      V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
      // Make the DAGCombiner fold the bitcasts.
      DCI.AddToWorklist(V.getNode());
    }
    SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32);
    Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx);
  }
  Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec);
  // Make the DAGCombiner fold the bitcasts.
  DCI.AddToWorklist(Vec.getNode());
  return Vec;
}

static SDValue
PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
  EVT VT = N->getValueType(0);
  SDValue Op = N->getOperand(0);
  SDLoc dl(N);

  // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x)
  if (Op->getOpcode() == ARMISD::PREDICATE_CAST) {
    // If the valuetypes are the same, we can remove the cast entirely.
    if (Op->getOperand(0).getValueType() == VT)
      return Op->getOperand(0);
    return DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, Op->getOperand(0));
  }

  return SDValue();
}

static SDValue
PerformVECTOR_REG_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                              const ARMSubtarget *ST) {
  EVT VT = N->getValueType(0);
  SDValue Op = N->getOperand(0);
  SDLoc dl(N);

  // Under Little endian, a VECTOR_REG_CAST is equivalent to a BITCAST
  if (ST->isLittle())
    return DCI.DAG.getNode(ISD::BITCAST, dl, VT, Op);

  // VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x)
  if (Op->getOpcode() == ARMISD::VECTOR_REG_CAST) {
    // If the valuetypes are the same, we can remove the cast entirely.
    if (Op->getOperand(0).getValueType() == VT)
      return Op->getOperand(0);
    return DCI.DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Op->getOperand(0));
  }

  return SDValue();
}

static SDValue PerformVCMPCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  const ARMSubtarget *Subtarget) {
  if (!Subtarget->hasMVEIntegerOps())
    return SDValue();

  EVT VT = N->getValueType(0);
  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);
  ARMCC::CondCodes Cond =
      (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
  SDLoc dl(N);

  // vcmp X, 0, cc -> vcmpz X, cc
  if (isZeroVector(Op1))
    return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op0,
                           N->getOperand(2));

  unsigned SwappedCond = getSwappedCondition(Cond);
  if (isValidMVECond(SwappedCond, VT.isFloatingPoint())) {
    // vcmp 0, X, cc -> vcmpz X, reversed(cc)
    if (isZeroVector(Op0))
      return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op1,
                             DCI.DAG.getConstant(SwappedCond, dl, MVT::i32));
    // vcmp vdup(Y), X, cc -> vcmp X, vdup(Y), reversed(cc)
    if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP)
      return DCI.DAG.getNode(ARMISD::VCMP, dl, VT, Op1, Op0,
                             DCI.DAG.getConstant(SwappedCond, dl, MVT::i32));
  }

  return SDValue();
}

/// PerformInsertEltCombine - Target-specific dag combine xforms for
/// ISD::INSERT_VECTOR_ELT.
static SDValue PerformInsertEltCombine(SDNode *N,
                                       TargetLowering::DAGCombinerInfo &DCI) {
  // Bitcast an i64 load inserted into a vector to f64.
  // Otherwise, the i64 value will be legalized to a pair of i32 values.
  EVT VT = N->getValueType(0);
  SDNode *Elt = N->getOperand(1).getNode();
  if (VT.getVectorElementType() != MVT::i64 ||
      !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
    return SDValue();

  SelectionDAG &DAG = DCI.DAG;
  SDLoc dl(N);
  EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
                                 VT.getVectorNumElements());
  SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
  SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
  // Make the DAGCombiner fold the bitcasts.
  DCI.AddToWorklist(Vec.getNode());
  DCI.AddToWorklist(V.getNode());
  SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
                               Vec, V, N->getOperand(2));
  return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
}

static SDValue PerformExtractEltCombine(SDNode *N,
                                        TargetLowering::DAGCombinerInfo &DCI) {
  SDValue Op0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  SDLoc dl(N);

  // extract (vdup x) -> x
  if (Op0->getOpcode() == ARMISD::VDUP) {
    SDValue X = Op0->getOperand(0);
    if (VT == MVT::f16 && X.getValueType() == MVT::i32)
      return DCI.DAG.getNode(ARMISD::VMOVhr, dl, VT, X);
    if (VT == MVT::i32 && X.getValueType() == MVT::f16)
      return DCI.DAG.getNode(ARMISD::VMOVrh, dl, VT, X);

    while (X.getValueType() != VT && X->getOpcode() == ISD::BITCAST)
      X = X->getOperand(0);
    if (X.getValueType() == VT)
      return X;
  }

  return SDValue();
}

/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
/// ISD::VECTOR_SHUFFLE.
static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
  // The LLVM shufflevector instruction does not require the shuffle mask
  // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
  // have that requirement.  When translating to ISD::VECTOR_SHUFFLE, if the
  // operands do not match the mask length, they are extended by concatenating
  // them with undef vectors.  That is probably the right thing for other
  // targets, but for NEON it is better to concatenate two double-register
  // size vector operands into a single quad-register size vector.  Do that
  // transformation here:
  //   shuffle(concat(v1, undef), concat(v2, undef)) ->
  //   shuffle(concat(v1, v2), undef)
  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);
  if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
      Op1.getOpcode() != ISD::CONCAT_VECTORS ||
      Op0.getNumOperands() != 2 ||
      Op1.getNumOperands() != 2)
    return SDValue();
  SDValue Concat0Op1 = Op0.getOperand(1);
  SDValue Concat1Op1 = Op1.getOperand(1);
  if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef())
    return SDValue();
  // Skip the transformation if any of the types are illegal.
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  EVT VT = N->getValueType(0);
  if (!TLI.isTypeLegal(VT) ||
      !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
      !TLI.isTypeLegal(Concat1Op1.getValueType()))
    return SDValue();

  SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
                                  Op0.getOperand(0), Op1.getOperand(0));
  // Translate the shuffle mask.
  SmallVector<int, 16> NewMask;
  unsigned NumElts = VT.getVectorNumElements();
  unsigned HalfElts = NumElts/2;
  ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
  for (unsigned n = 0; n < NumElts; ++n) {
    int MaskElt = SVN->getMaskElt(n);
    int NewElt = -1;
    if (MaskElt < (int)HalfElts)
      NewElt = MaskElt;
    else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
      NewElt = HalfElts + MaskElt - NumElts;
    NewMask.push_back(NewElt);
  }
  return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat,
                              DAG.getUNDEF(VT), NewMask);
}

/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
/// NEON load/store intrinsics, and generic vector load/stores, to merge
/// base address updates.
/// For generic load/stores, the memory type is assumed to be a vector.
/// The caller is assumed to have checked legality.
static SDValue CombineBaseUpdate(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI) {
  SelectionDAG &DAG = DCI.DAG;
  const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
                            N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
  const bool isStore = N->getOpcode() == ISD::STORE;
  const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
  SDValue Addr = N->getOperand(AddrOpIdx);
  MemSDNode *MemN = cast<MemSDNode>(N);
  SDLoc dl(N);

  // Search for a use of the address operand that is an increment.
  for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
         UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
    SDNode *User = *UI;
    if (User->getOpcode() != ISD::ADD ||
        UI.getUse().getResNo() != Addr.getResNo())
      continue;

    // Check that the add is independent of the load/store.  Otherwise, folding
    // it would create a cycle. We can avoid searching through Addr as it's a
    // predecessor to both.
    SmallPtrSet<const SDNode *, 32> Visited;
    SmallVector<const SDNode *, 16> Worklist;
    Visited.insert(Addr.getNode());
    Worklist.push_back(N);
    Worklist.push_back(User);
    if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
        SDNode::hasPredecessorHelper(User, Visited, Worklist))
      continue;

    // Find the new opcode for the updating load/store.
    bool isLoadOp = true;
    bool isLaneOp = false;
    unsigned NewOpc = 0;
    unsigned NumVecs = 0;
    if (isIntrinsic) {
      unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
      switch (IntNo) {
      default: llvm_unreachable("unexpected intrinsic for Neon base update");
      case Intrinsic::arm_neon_vld1:     NewOpc = ARMISD::VLD1_UPD;
        NumVecs = 1; break;
      case Intrinsic::arm_neon_vld2:     NewOpc = ARMISD::VLD2_UPD;
        NumVecs = 2; break;
      case Intrinsic::arm_neon_vld3:     NewOpc = ARMISD::VLD3_UPD;
        NumVecs = 3; break;
      case Intrinsic::arm_neon_vld4:     NewOpc = ARMISD::VLD4_UPD;
        NumVecs = 4; break;
      case Intrinsic::arm_neon_vld1x2:
      case Intrinsic::arm_neon_vld1x3:
      case Intrinsic::arm_neon_vld1x4:
      case Intrinsic::arm_neon_vld2dup:
      case Intrinsic::arm_neon_vld3dup:
      case Intrinsic::arm_neon_vld4dup:
        // TODO: Support updating VLD1x and VLDxDUP nodes. For now, we just skip
        // combining base updates for such intrinsics.
        continue;
      case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD;
        NumVecs = 2; isLaneOp = true; break;
      case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD;
        NumVecs = 3; isLaneOp = true; break;
      case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD;
        NumVecs = 4; isLaneOp = true; break;
      case Intrinsic::arm_neon_vst1:     NewOpc = ARMISD::VST1_UPD;
        NumVecs = 1; isLoadOp = false; break;
      case Intrinsic::arm_neon_vst2:     NewOpc = ARMISD::VST2_UPD;
        NumVecs = 2; isLoadOp = false; break;
      case Intrinsic::arm_neon_vst3:     NewOpc = ARMISD::VST3_UPD;
        NumVecs = 3; isLoadOp = false; break;
      case Intrinsic::arm_neon_vst4:     NewOpc = ARMISD::VST4_UPD;
        NumVecs = 4; isLoadOp = false; break;
      case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD;
        NumVecs = 2; isLoadOp = false; isLaneOp = true; break;
      case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD;
        NumVecs = 3; isLoadOp = false; isLaneOp = true; break;
      case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD;
        NumVecs = 4; isLoadOp = false; isLaneOp = true; break;
      }
    } else {
      isLaneOp = true;
      switch (N->getOpcode()) {
      default: llvm_unreachable("unexpected opcode for Neon base update");
      case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break;
      case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
      case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
      case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
      case ISD::LOAD:       NewOpc = ARMISD::VLD1_UPD;
        NumVecs = 1; isLaneOp = false; break;
      case ISD::STORE:      NewOpc = ARMISD::VST1_UPD;
        NumVecs = 1; isLaneOp = false; isLoadOp = false; break;
      }
    }

    // Find the size of memory referenced by the load/store.
    EVT VecTy;
    if (isLoadOp) {
      VecTy = N->getValueType(0);
    } else if (isIntrinsic) {
      VecTy = N->getOperand(AddrOpIdx+1).getValueType();
    } else {
      assert(isStore && "Node has to be a load, a store, or an intrinsic!");
      VecTy = N->getOperand(1).getValueType();
    }

    unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
    if (isLaneOp)
      NumBytes /= VecTy.getVectorNumElements();

    // If the increment is a constant, it must match the memory ref size.
    SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
    ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode());
    if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) {
      // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
      // separate instructions that make it harder to use a non-constant update.
      continue;
    }

    // OK, we found an ADD we can fold into the base update.
    // Now, create a _UPD node, taking care of not breaking alignment.

    EVT AlignedVecTy = VecTy;
    unsigned Alignment = MemN->getAlignment();

    // If this is a less-than-standard-aligned load/store, change the type to
    // match the standard alignment.
    // The alignment is overlooked when selecting _UPD variants; and it's
    // easier to introduce bitcasts here than fix that.
    // There are 3 ways to get to this base-update combine:
    // - intrinsics: they are assumed to be properly aligned (to the standard
    //   alignment of the memory type), so we don't need to do anything.
    // - ARMISD::VLDx nodes: they are only generated from the aforementioned
    //   intrinsics, so, likewise, there's nothing to do.
    // - generic load/store instructions: the alignment is specified as an
    //   explicit operand, rather than implicitly as the standard alignment
    //   of the memory type (like the intrisics).  We need to change the
    //   memory type to match the explicit alignment.  That way, we don't
    //   generate non-standard-aligned ARMISD::VLDx nodes.
    if (isa<LSBaseSDNode>(N)) {
      if (Alignment == 0)
        Alignment = 1;
      if (Alignment < VecTy.getScalarSizeInBits() / 8) {
        MVT EltTy = MVT::getIntegerVT(Alignment * 8);
        assert(NumVecs == 1 && "Unexpected multi-element generic load/store.");
        assert(!isLaneOp && "Unexpected generic load/store lane.");
        unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8);
        AlignedVecTy = MVT::getVectorVT(EltTy, NumElts);
      }
      // Don't set an explicit alignment on regular load/stores that we want
      // to transform to VLD/VST 1_UPD nodes.
      // This matches the behavior of regular load/stores, which only get an
      // explicit alignment if the MMO alignment is larger than the standard
      // alignment of the memory type.
      // Intrinsics, however, always get an explicit alignment, set to the
      // alignment of the MMO.
      Alignment = 1;
    }

    // Create the new updating load/store node.
    // First, create an SDVTList for the new updating node's results.
    EVT Tys[6];
    unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
    unsigned n;
    for (n = 0; n < NumResultVecs; ++n)
      Tys[n] = AlignedVecTy;
    Tys[n++] = MVT::i32;
    Tys[n] = MVT::Other;
    SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2));

    // Then, gather the new node's operands.
    SmallVector<SDValue, 8> Ops;
    Ops.push_back(N->getOperand(0)); // incoming chain
    Ops.push_back(N->getOperand(AddrOpIdx));
    Ops.push_back(Inc);

    if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
      // Try to match the intrinsic's signature
      Ops.push_back(StN->getValue());
    } else {
      // Loads (and of course intrinsics) match the intrinsics' signature,
      // so just add all but the alignment operand.
      for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i)
        Ops.push_back(N->getOperand(i));
    }

    // For all node types, the alignment operand is always the last one.
    Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32));

    // If this is a non-standard-aligned STORE, the penultimate operand is the
    // stored value.  Bitcast it to the aligned type.
    if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) {
      SDValue &StVal = Ops[Ops.size()-2];
      StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal);
    }

    EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy;
    SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT,
                                           MemN->getMemOperand());

    // Update the uses.
    SmallVector<SDValue, 5> NewResults;
    for (unsigned i = 0; i < NumResultVecs; ++i)
      NewResults.push_back(SDValue(UpdN.getNode(), i));

    // If this is an non-standard-aligned LOAD, the first result is the loaded
    // value.  Bitcast it to the expected result type.
    if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) {
      SDValue &LdVal = NewResults[0];
      LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal);
    }

    NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
    DCI.CombineTo(N, NewResults);
    DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));

    break;
  }
  return SDValue();
}

static SDValue PerformVLDCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI) {
  if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
    return SDValue();

  return CombineBaseUpdate(N, DCI);
}

static SDValue PerformMVEVLDCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI) {
  if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
    return SDValue();

  SelectionDAG &DAG = DCI.DAG;
  SDValue Addr = N->getOperand(2);
  MemSDNode *MemN = cast<MemSDNode>(N);
  SDLoc dl(N);

  // For the stores, where there are multiple intrinsics we only actually want
  // to post-inc the last of the them.
  unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
  if (IntNo == Intrinsic::arm_mve_vst2q &&
      cast<ConstantSDNode>(N->getOperand(5))->getZExtValue() != 1)
    return SDValue();
  if (IntNo == Intrinsic::arm_mve_vst4q &&
      cast<ConstantSDNode>(N->getOperand(7))->getZExtValue() != 3)
    return SDValue();

  // Search for a use of the address operand that is an increment.
  for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
                            UE = Addr.getNode()->use_end();
       UI != UE; ++UI) {
    SDNode *User = *UI;
    if (User->getOpcode() != ISD::ADD ||
        UI.getUse().getResNo() != Addr.getResNo())
      continue;

    // Check that the add is independent of the load/store.  Otherwise, folding
    // it would create a cycle. We can avoid searching through Addr as it's a
    // predecessor to both.
    SmallPtrSet<const SDNode *, 32> Visited;
    SmallVector<const SDNode *, 16> Worklist;
    Visited.insert(Addr.getNode());
    Worklist.push_back(N);
    Worklist.push_back(User);
    if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
        SDNode::hasPredecessorHelper(User, Visited, Worklist))
      continue;

    // Find the new opcode for the updating load/store.
    bool isLoadOp = true;
    unsigned NewOpc = 0;
    unsigned NumVecs = 0;
    switch (IntNo) {
    default:
      llvm_unreachable("unexpected intrinsic for MVE VLDn combine");
    case Intrinsic::arm_mve_vld2q:
      NewOpc = ARMISD::VLD2_UPD;
      NumVecs = 2;
      break;
    case Intrinsic::arm_mve_vld4q:
      NewOpc = ARMISD::VLD4_UPD;
      NumVecs = 4;
      break;
    case Intrinsic::arm_mve_vst2q:
      NewOpc = ARMISD::VST2_UPD;
      NumVecs = 2;
      isLoadOp = false;
      break;
    case Intrinsic::arm_mve_vst4q:
      NewOpc = ARMISD::VST4_UPD;
      NumVecs = 4;
      isLoadOp = false;
      break;
    }

    // Find the size of memory referenced by the load/store.
    EVT VecTy;
    if (isLoadOp) {
      VecTy = N->getValueType(0);
    } else {
      VecTy = N->getOperand(3).getValueType();
    }

    unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;

    // If the increment is a constant, it must match the memory ref size.
    SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
    ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode());
    if (!CInc || CInc->getZExtValue() != NumBytes)
      continue;

    // Create the new updating load/store node.
    // First, create an SDVTList for the new updating node's results.
    EVT Tys[6];
    unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
    unsigned n;
    for (n = 0; n < NumResultVecs; ++n)
      Tys[n] = VecTy;
    Tys[n++] = MVT::i32;
    Tys[n] = MVT::Other;
    SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2));

    // Then, gather the new node's operands.
    SmallVector<SDValue, 8> Ops;
    Ops.push_back(N->getOperand(0)); // incoming chain
    Ops.push_back(N->getOperand(2)); // ptr
    Ops.push_back(Inc);

    for (unsigned i = 3; i < N->getNumOperands(); ++i)
      Ops.push_back(N->getOperand(i));

    SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, VecTy,
                                           MemN->getMemOperand());

    // Update the uses.
    SmallVector<SDValue, 5> NewResults;
    for (unsigned i = 0; i < NumResultVecs; ++i)
      NewResults.push_back(SDValue(UpdN.getNode(), i));

    NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain
    DCI.CombineTo(N, NewResults);
    DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));

    break;
  }

  return SDValue();
}

/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
/// are also VDUPLANEs.  If so, combine them to a vldN-dup operation and
/// return true.
static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
  SelectionDAG &DAG = DCI.DAG;
  EVT VT = N->getValueType(0);
  // vldN-dup instructions only support 64-bit vectors for N > 1.
  if (!VT.is64BitVector())
    return false;

  // Check if the VDUPLANE operand is a vldN-dup intrinsic.
  SDNode *VLD = N->getOperand(0).getNode();
  if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
    return false;
  unsigned NumVecs = 0;
  unsigned NewOpc = 0;
  unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
  if (IntNo == Intrinsic::arm_neon_vld2lane) {
    NumVecs = 2;
    NewOpc = ARMISD::VLD2DUP;
  } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
    NumVecs = 3;
    NewOpc = ARMISD::VLD3DUP;
  } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
    NumVecs = 4;
    NewOpc = ARMISD::VLD4DUP;
  } else {
    return false;
  }

  // First check that all the vldN-lane uses are VDUPLANEs and that the lane
  // numbers match the load.
  unsigned VLDLaneNo =
    cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
  for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
       UI != UE; ++UI) {
    // Ignore uses of the chain result.
    if (UI.getUse().getResNo() == NumVecs)
      continue;
    SDNode *User = *UI;
    if (User->getOpcode() != ARMISD::VDUPLANE ||
        VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
      return false;
  }

  // Create the vldN-dup node.
  EVT Tys[5];
  unsigned n;
  for (n = 0; n < NumVecs; ++n)
    Tys[n] = VT;
  Tys[n] = MVT::Other;
  SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1));
  SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
  MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
  SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys,
                                           Ops, VLDMemInt->getMemoryVT(),
                                           VLDMemInt->getMemOperand());

  // Update the uses.
  for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
       UI != UE; ++UI) {
    unsigned ResNo = UI.getUse().getResNo();
    // Ignore uses of the chain result.
    if (ResNo == NumVecs)
      continue;
    SDNode *User = *UI;
    DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
  }

  // Now the vldN-lane intrinsic is dead except for its chain result.
  // Update uses of the chain.
  std::vector<SDValue> VLDDupResults;
  for (unsigned n = 0; n < NumVecs; ++n)
    VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
  VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
  DCI.CombineTo(VLD, VLDDupResults);

  return true;
}

/// PerformVDUPLANECombine - Target-specific dag combine xforms for
/// ARMISD::VDUPLANE.
static SDValue PerformVDUPLANECombine(SDNode *N,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const ARMSubtarget *Subtarget) {
  SDValue Op = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // On MVE, we just convert the VDUPLANE to a VDUP with an extract.
  if (Subtarget->hasMVEIntegerOps()) {
    EVT ExtractVT = VT.getVectorElementType();
    // We need to ensure we are creating a legal type.
    if (!DCI.DAG.getTargetLoweringInfo().isTypeLegal(ExtractVT))
      ExtractVT = MVT::i32;
    SDValue Extract = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), ExtractVT,
                              N->getOperand(0), N->getOperand(1));
    return DCI.DAG.getNode(ARMISD::VDUP, SDLoc(N), VT, Extract);
  }

  // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
  // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
  if (CombineVLDDUP(N, DCI))
    return SDValue(N, 0);

  // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
  // redundant.  Ignore bit_converts for now; element sizes are checked below.
  while (Op.getOpcode() == ISD::BITCAST)
    Op = Op.getOperand(0);
  if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
    return SDValue();

  // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
  unsigned EltSize = Op.getScalarValueSizeInBits();
  // The canonical VMOV for a zero vector uses a 32-bit element size.
  unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
  unsigned EltBits;
  if (ARM_AM::decodeVMOVModImm(Imm, EltBits) == 0)
    EltSize = 8;
  if (EltSize > VT.getScalarSizeInBits())
    return SDValue();

  return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
}

/// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
static SDValue PerformVDUPCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  const ARMSubtarget *Subtarget) {
  SelectionDAG &DAG = DCI.DAG;
  SDValue Op = N->getOperand(0);
  SDLoc dl(N);

  if (Subtarget->hasMVEIntegerOps()) {
    // Convert VDUP f32 -> VDUP BITCAST i32 under MVE, as we know the value will
    // need to come from a GPR.
    if (Op.getValueType() == MVT::f32)
      return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0),
                             DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op));
    else if (Op.getValueType() == MVT::f16)
      return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0),
                             DAG.getNode(ARMISD::VMOVrh, dl, MVT::i32, Op));
  }

  if (!Subtarget->hasNEON())
    return SDValue();

  // Match VDUP(LOAD) -> VLD1DUP.
  // We match this pattern here rather than waiting for isel because the
  // transform is only legal for unindexed loads.
  LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode());
  if (LD && Op.hasOneUse() && LD->isUnindexed() &&
      LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) {
    SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1),
                      DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) };
    SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other);
    SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys,
                                             Ops, LD->getMemoryVT(),
                                             LD->getMemOperand());
    DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1));
    return VLDDup;
  }

  return SDValue();
}

static SDValue PerformLOADCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI) {
  EVT VT = N->getValueType(0);

  // If this is a legal vector load, try to combine it into a VLD1_UPD.
  if (ISD::isNormalLoad(N) && VT.isVector() &&
      DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
    return CombineBaseUpdate(N, DCI);

  return SDValue();
}

// Optimize trunc store (of multiple scalars) to shuffle and store.  First,
// pack all of the elements in one place.  Next, store to memory in fewer
// chunks.
static SDValue PerformTruncatingStoreCombine(StoreSDNode *St,
                                             SelectionDAG &DAG) {
  SDValue StVal = St->getValue();
  EVT VT = StVal.getValueType();
  if (!St->isTruncatingStore() || !VT.isVector())
    return SDValue();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  EVT StVT = St->getMemoryVT();
  unsigned NumElems = VT.getVectorNumElements();
  assert(StVT != VT && "Cannot truncate to the same type");
  unsigned FromEltSz = VT.getScalarSizeInBits();
  unsigned ToEltSz = StVT.getScalarSizeInBits();

  // From, To sizes and ElemCount must be pow of two
  if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz))
    return SDValue();

  // We are going to use the original vector elt for storing.
  // Accumulated smaller vector elements must be a multiple of the store size.
  if (0 != (NumElems * FromEltSz) % ToEltSz)
    return SDValue();

  unsigned SizeRatio = FromEltSz / ToEltSz;
  assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());

  // Create a type on which we perform the shuffle.
  EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(),
                                   NumElems * SizeRatio);
  assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());

  SDLoc DL(St);
  SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
  SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
  for (unsigned i = 0; i < NumElems; ++i)
    ShuffleVec[i] = DAG.getDataLayout().isBigEndian() ? (i + 1) * SizeRatio - 1
                                                      : i * SizeRatio;

  // Can't shuffle using an illegal type.
  if (!TLI.isTypeLegal(WideVecVT))
    return SDValue();

  SDValue Shuff = DAG.getVectorShuffle(
      WideVecVT, DL, WideVec, DAG.getUNDEF(WideVec.getValueType()), ShuffleVec);
  // At this point all of the data is stored at the bottom of the
  // register. We now need to save it to mem.

  // Find the largest store unit
  MVT StoreType = MVT::i8;
  for (MVT Tp : MVT::integer_valuetypes()) {
    if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
      StoreType = Tp;
  }
  // Didn't find a legal store type.
  if (!TLI.isTypeLegal(StoreType))
    return SDValue();

  // Bitcast the original vector into a vector of store-size units
  EVT StoreVecVT =
      EVT::getVectorVT(*DAG.getContext(), StoreType,
                       VT.getSizeInBits() / EVT(StoreType).getSizeInBits());
  assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
  SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
  SmallVector<SDValue, 8> Chains;
  SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL,
                                      TLI.getPointerTy(DAG.getDataLayout()));
  SDValue BasePtr = St->getBasePtr();

  // Perform one or more big stores into memory.
  unsigned E = (ToEltSz * NumElems) / StoreType.getSizeInBits();
  for (unsigned I = 0; I < E; I++) {
    SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreType,
                                 ShuffWide, DAG.getIntPtrConstant(I, DL));
    SDValue Ch =
        DAG.getStore(St->getChain(), DL, SubVec, BasePtr, St->getPointerInfo(),
                     St->getAlignment(), St->getMemOperand()->getFlags());
    BasePtr =
        DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, Increment);
    Chains.push_back(Ch);
  }
  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
}

// Try taking a single vector store from an truncate (which would otherwise turn
// into an expensive buildvector) and splitting it into a series of narrowing
// stores.
static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St,
                                                 SelectionDAG &DAG) {
  if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed())
    return SDValue();
  SDValue Trunc = St->getValue();
  if (Trunc->getOpcode() != ISD::TRUNCATE && Trunc->getOpcode() != ISD::FP_ROUND)
    return SDValue();
  EVT FromVT = Trunc->getOperand(0).getValueType();
  EVT ToVT = Trunc.getValueType();
  if (!ToVT.isVector())
    return SDValue();
  assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements());
  EVT ToEltVT = ToVT.getVectorElementType();
  EVT FromEltVT = FromVT.getVectorElementType();

  unsigned NumElements = 0;
  if (FromEltVT == MVT::i32 && (ToEltVT == MVT::i16 || ToEltVT == MVT::i8))
    NumElements = 4;
  if (FromEltVT == MVT::i16 && ToEltVT == MVT::i8)
    NumElements = 8;
  if (FromEltVT == MVT::f32 && ToEltVT == MVT::f16)
    NumElements = 4;
  if (NumElements == 0 ||
      (FromEltVT != MVT::f32 && FromVT.getVectorNumElements() == NumElements) ||
      FromVT.getVectorNumElements() % NumElements != 0)
    return SDValue();

  // Test if the Trunc will be convertable to a VMOVN with a shuffle, and if so
  // use the VMOVN over splitting the store. We are looking for patterns of:
  // !rev: 0 N 1 N+1 2 N+2 ...
  //  rev: N 0 N+1 1 N+2 2 ...
  auto isVMOVNOriginalMask = [&](ArrayRef<int> M, bool rev) {
    unsigned NumElts = ToVT.getVectorNumElements();
    if (NumElts != M.size())
      return false;

    unsigned Off0 = rev ? NumElts : 0;
    unsigned Off1 = rev ? 0 : NumElts;

    for (unsigned i = 0; i < NumElts; i += 2) {
      if (M[i] >= 0 && M[i] != (int)(Off0 + i / 2))
        return false;
      if (M[i + 1] >= 0 && M[i + 1] != (int)(Off1 + i / 2))
        return false;
    }

    return true;
  };

  if (auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Trunc->getOperand(0)))
    if (isVMOVNOriginalMask(Shuffle->getMask(), false) ||
        isVMOVNOriginalMask(Shuffle->getMask(), true))
      return SDValue();

  LLVMContext &C = *DAG.getContext();
  SDLoc DL(St);
  // Details about the old store
  SDValue Ch = St->getChain();
  SDValue BasePtr = St->getBasePtr();
  Align Alignment = St->getOriginalAlign();
  MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags();
  AAMDNodes AAInfo = St->getAAInfo();

  // We split the store into slices of NumElements. fp16 trunc stores are vcvt
  // and then stored as truncating integer stores.
  EVT NewFromVT = EVT::getVectorVT(C, FromEltVT, NumElements);
  EVT NewToVT = EVT::getVectorVT(
      C, EVT::getIntegerVT(C, ToEltVT.getSizeInBits()), NumElements);

  SmallVector<SDValue, 4> Stores;
  for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) {
    unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8;
    SDValue NewPtr =
        DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset));

    SDValue Extract =
        DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewFromVT, Trunc.getOperand(0),
                    DAG.getConstant(i * NumElements, DL, MVT::i32));

    if (ToEltVT == MVT::f16) {
      SDValue FPTrunc =
          DAG.getNode(ARMISD::VCVTN, DL, MVT::v8f16, DAG.getUNDEF(MVT::v8f16),
                      Extract, DAG.getConstant(0, DL, MVT::i32));
      Extract = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v4i32, FPTrunc);
    }

    SDValue Store = DAG.getTruncStore(
        Ch, DL, Extract, NewPtr, St->getPointerInfo().getWithOffset(NewOffset),
        NewToVT, Alignment.value(), MMOFlags, AAInfo);
    Stores.push_back(Store);
  }
  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
}

/// PerformSTORECombine - Target-specific dag combine xforms for
/// ISD::STORE.
static SDValue PerformSTORECombine(SDNode *N,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   const ARMSubtarget *Subtarget) {
  StoreSDNode *St = cast<StoreSDNode>(N);
  if (St->isVolatile())
    return SDValue();
  SDValue StVal = St->getValue();
  EVT VT = StVal.getValueType();

  if (Subtarget->hasNEON())
    if (SDValue Store = PerformTruncatingStoreCombine(St, DCI.DAG))
      return Store;

  if (Subtarget->hasMVEIntegerOps())
    if (SDValue NewToken = PerformSplittingToNarrowingStores(St, DCI.DAG))
      return NewToken;

  if (!ISD::isNormalStore(St))
    return SDValue();

  // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
  // ARM stores of arguments in the same cache line.
  if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
      StVal.getNode()->hasOneUse()) {
    SelectionDAG  &DAG = DCI.DAG;
    bool isBigEndian = DAG.getDataLayout().isBigEndian();
    SDLoc DL(St);
    SDValue BasePtr = St->getBasePtr();
    SDValue NewST1 = DAG.getStore(
        St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0),
        BasePtr, St->getPointerInfo(), St->getOriginalAlign(),
        St->getMemOperand()->getFlags());

    SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
                                    DAG.getConstant(4, DL, MVT::i32));
    return DAG.getStore(NewST1.getValue(0), DL,
                        StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
                        OffsetPtr, St->getPointerInfo().getWithOffset(4),
                        St->getOriginalAlign(),
                        St->getMemOperand()->getFlags());
  }

  if (StVal.getValueType() == MVT::i64 &&
      StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {

    // Bitcast an i64 store extracted from a vector to f64.
    // Otherwise, the i64 value will be legalized to a pair of i32 values.
    SelectionDAG &DAG = DCI.DAG;
    SDLoc dl(StVal);
    SDValue IntVec = StVal.getOperand(0);
    EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
                                   IntVec.getValueType().getVectorNumElements());
    SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
    SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
                                 Vec, StVal.getOperand(1));
    dl = SDLoc(N);
    SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
    // Make the DAGCombiner fold the bitcasts.
    DCI.AddToWorklist(Vec.getNode());
    DCI.AddToWorklist(ExtElt.getNode());
    DCI.AddToWorklist(V.getNode());
    return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
                        St->getPointerInfo(), St->getAlignment(),
                        St->getMemOperand()->getFlags(), St->getAAInfo());
  }

  // If this is a legal vector store, try to combine it into a VST1_UPD.
  if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() &&
      DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
    return CombineBaseUpdate(N, DCI);

  return SDValue();
}

/// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
/// can replace combinations of VMUL and VCVT (floating-point to integer)
/// when the VMUL has a constant operand that is a power of 2.
///
/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
///  vmul.f32        d16, d17, d16
///  vcvt.s32.f32    d16, d16
/// becomes:
///  vcvt.s32.f32    d16, d16, #3
static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
                                  const ARMSubtarget *Subtarget) {
  if (!Subtarget->hasNEON())
    return SDValue();

  SDValue Op = N->getOperand(0);
  if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
      Op.getOpcode() != ISD::FMUL)
    return SDValue();

  SDValue ConstVec = Op->getOperand(1);
  if (!isa<BuildVectorSDNode>(ConstVec))
    return SDValue();

  MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
  uint32_t FloatBits = FloatTy.getSizeInBits();
  MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
  uint32_t IntBits = IntTy.getSizeInBits();
  unsigned NumLanes = Op.getValueType().getVectorNumElements();
  if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
    // These instructions only exist converting from f32 to i32. We can handle
    // smaller integers by generating an extra truncate, but larger ones would
    // be lossy. We also can't handle anything other than 2 or 4 lanes, since
    // these intructions only support v2i32/v4i32 types.
    return SDValue();
  }

  BitVector UndefElements;
  BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
  int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
  if (C == -1 || C == 0 || C > 32)
    return SDValue();

  SDLoc dl(N);
  bool isSigned = N->getOpcode() == ISD::FP_TO_SINT;
  unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
    Intrinsic::arm_neon_vcvtfp2fxu;
  SDValue FixConv = DAG.getNode(
      ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
      DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0),
      DAG.getConstant(C, dl, MVT::i32));

  if (IntBits < FloatBits)
    FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv);

  return FixConv;
}

/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
/// can replace combinations of VCVT (integer to floating-point) and VDIV
/// when the VDIV has a constant operand that is a power of 2.
///
/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
///  vcvt.f32.s32    d16, d16
///  vdiv.f32        d16, d17, d16
/// becomes:
///  vcvt.f32.s32    d16, d16, #3
static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG,
                                  const ARMSubtarget *Subtarget) {
  if (!Subtarget->hasNEON())
    return SDValue();

  SDValue Op = N->getOperand(0);
  unsigned OpOpcode = Op.getNode()->getOpcode();
  if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() ||
      (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
    return SDValue();

  SDValue ConstVec = N->getOperand(1);
  if (!isa<BuildVectorSDNode>(ConstVec))
    return SDValue();

  MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
  uint32_t FloatBits = FloatTy.getSizeInBits();
  MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
  uint32_t IntBits = IntTy.getSizeInBits();
  unsigned NumLanes = Op.getValueType().getVectorNumElements();
  if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
    // These instructions only exist converting from i32 to f32. We can handle
    // smaller integers by generating an extra extend, but larger ones would
    // be lossy. We also can't handle anything other than 2 or 4 lanes, since
    // these intructions only support v2i32/v4i32 types.
    return SDValue();
  }

  BitVector UndefElements;
  BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
  int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
  if (C == -1 || C == 0 || C > 32)
    return SDValue();

  SDLoc dl(N);
  bool isSigned = OpOpcode == ISD::SINT_TO_FP;
  SDValue ConvInput = Op.getOperand(0);
  if (IntBits < FloatBits)
    ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
                            dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
                            ConvInput);

  unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
    Intrinsic::arm_neon_vcvtfxu2fp;
  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
                     Op.getValueType(),
                     DAG.getConstant(IntrinsicOpcode, dl, MVT::i32),
                     ConvInput, DAG.getConstant(C, dl, MVT::i32));
}

static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
                                           const ARMSubtarget *ST) {
  if (!ST->hasMVEIntegerOps())
    return SDValue();

  assert(N->getOpcode() == ISD::VECREDUCE_ADD);
  EVT ResVT = N->getValueType(0);
  SDValue N0 = N->getOperand(0);
  SDLoc dl(N);

  // We are looking for something that will have illegal types if left alone,
  // but that we can convert to a single instruction undef MVE. For example
  // vecreduce_add(sext(A, v8i32)) => VADDV.s16 A
  // or
  // vecreduce_add(mul(zext(A, v16i32), zext(B, v16i32))) => VMLADAV.u8 A, B

  // Cases:
  //   VADDV u/s 8/16/32
  //   VMLAV u/s 8/16/32
  //   VADDLV u/s 32
  //   VMLALV u/s 16/32

  auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes) {
    if (ResVT != RetTy || N0->getOpcode() != ExtendCode)
      return SDValue();
    SDValue A = N0->getOperand(0);
    if (llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; }))
      return A;
    return SDValue();
  };
  auto IsPredVADDV = [&](MVT RetTy, unsigned ExtendCode,
                         ArrayRef<MVT> ExtTypes, SDValue &Mask) {
    if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT ||
        !ISD::isBuildVectorAllZeros(N0->getOperand(2).getNode()))
      return SDValue();
    Mask = N0->getOperand(0);
    SDValue Ext = N0->getOperand(1);
    if (Ext->getOpcode() != ExtendCode)
      return SDValue();
    SDValue A = Ext->getOperand(0);
    if (llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; }))
      return A;
    return SDValue();
  };
  auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes,
                     SDValue &A, SDValue &B) {
    // For a vmla we are trying to match a larger pattern:
    // ExtA = sext/zext A
    // ExtB = sext/zext B
    // Mul = mul ExtA, ExtB
    // vecreduce.add Mul
    // There might also be en extra extend between the mul and the addreduce, so
    // long as the bitwidth is high enough to make them equivalent (for example
    // original v8i16 might be mul at v8i32 and the reduce happens at v8i64).
    if (ResVT != RetTy)
      return false;
    SDValue Mul = N0;
    if (Mul->getOpcode() == ExtendCode &&
        Mul->getOperand(0).getScalarValueSizeInBits() * 2 >=
            ResVT.getScalarSizeInBits())
      Mul = Mul->getOperand(0);
    if (Mul->getOpcode() != ISD::MUL)
      return false;
    SDValue ExtA = Mul->getOperand(0);
    SDValue ExtB = Mul->getOperand(1);
    if (ExtA->getOpcode() != ExtendCode && ExtB->getOpcode() != ExtendCode)
      return false;
    A = ExtA->getOperand(0);
    B = ExtB->getOperand(0);
    if (A.getValueType() == B.getValueType() &&
        llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; }))
      return true;
    return false;
  };
  auto IsPredVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes,
                     SDValue &A, SDValue &B, SDValue &Mask) {
    // Same as the pattern above with a select for the zero predicated lanes
    // ExtA = sext/zext A
    // ExtB = sext/zext B
    // Mul = mul ExtA, ExtB
    // N0 = select Mask, Mul, 0
    // vecreduce.add N0
    if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT ||
        !ISD::isBuildVectorAllZeros(N0->getOperand(2).getNode()))
      return false;
    Mask = N0->getOperand(0);
    SDValue Mul = N0->getOperand(1);
    if (Mul->getOpcode() == ExtendCode &&
        Mul->getOperand(0).getScalarValueSizeInBits() * 2 >=
            ResVT.getScalarSizeInBits())
      Mul = Mul->getOperand(0);
    if (Mul->getOpcode() != ISD::MUL)
      return false;
    SDValue ExtA = Mul->getOperand(0);
    SDValue ExtB = Mul->getOperand(1);
    if (ExtA->getOpcode() != ExtendCode && ExtB->getOpcode() != ExtendCode)
      return false;
    A = ExtA->getOperand(0);
    B = ExtB->getOperand(0);
    if (A.getValueType() == B.getValueType() &&
        llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; }))
      return true;
    return false;
  };
  auto Create64bitNode = [&](unsigned Opcode, ArrayRef<SDValue> Ops) {
    SDValue Node = DAG.getNode(Opcode, dl, {MVT::i32, MVT::i32}, Ops);
    return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Node,
                       SDValue(Node.getNode(), 1));
  };

  if (SDValue A = IsVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}))
    return DAG.getNode(ARMISD::VADDVs, dl, ResVT, A);
  if (SDValue A = IsVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}))
    return DAG.getNode(ARMISD::VADDVu, dl, ResVT, A);
  if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}))
    return Create64bitNode(ARMISD::VADDLVs, {A});
  if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}))
    return Create64bitNode(ARMISD::VADDLVu, {A});
  if (SDValue A = IsVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}))
    return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
                       DAG.getNode(ARMISD::VADDVs, dl, MVT::i32, A));
  if (SDValue A = IsVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}))
    return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
                       DAG.getNode(ARMISD::VADDVu, dl, MVT::i32, A));

  SDValue Mask;
  if (SDValue A = IsPredVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask))
    return DAG.getNode(ARMISD::VADDVps, dl, ResVT, A, Mask);
  if (SDValue A = IsPredVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask))
    return DAG.getNode(ARMISD::VADDVpu, dl, ResVT, A, Mask);
  if (SDValue A = IsPredVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}, Mask))
    return Create64bitNode(ARMISD::VADDLVps, {A, Mask});
  if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}, Mask))
    return Create64bitNode(ARMISD::VADDLVpu, {A, Mask});
  if (SDValue A = IsPredVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, Mask))
    return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
                       DAG.getNode(ARMISD::VADDVps, dl, MVT::i32, A, Mask));
  if (SDValue A = IsPredVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, Mask))
    return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
                       DAG.getNode(ARMISD::VADDVpu, dl, MVT::i32, A, Mask));

  SDValue A, B;
  if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B))
    return DAG.getNode(ARMISD::VMLAVs, dl, ResVT, A, B);
  if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B))
    return DAG.getNode(ARMISD::VMLAVu, dl, ResVT, A, B);
  if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B))
    return Create64bitNode(ARMISD::VMLALVs, {A, B});
  if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B))
    return Create64bitNode(ARMISD::VMLALVu, {A, B});
  if (IsVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B))
    return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
                       DAG.getNode(ARMISD::VMLAVs, dl, MVT::i32, A, B));
  if (IsVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B))
    return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
                       DAG.getNode(ARMISD::VMLAVu, dl, MVT::i32, A, B));

  if (IsPredVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, Mask))
    return DAG.getNode(ARMISD::VMLAVps, dl, ResVT, A, B, Mask);
  if (IsPredVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, Mask))
    return DAG.getNode(ARMISD::VMLAVpu, dl, ResVT, A, B, Mask);
  if (IsPredVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B, Mask))
    return Create64bitNode(ARMISD::VMLALVps, {A, B, Mask});
  if (IsPredVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B, Mask))
    return Create64bitNode(ARMISD::VMLALVpu, {A, B, Mask});
  if (IsPredVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B, Mask))
    return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
                       DAG.getNode(ARMISD::VMLAVps, dl, MVT::i32, A, B, Mask));
  if (IsPredVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B, Mask))
    return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
                       DAG.getNode(ARMISD::VMLAVpu, dl, MVT::i32, A, B, Mask));

  // Some complications. We can get a case where the two inputs of the mul are
  // the same, then the output sext will have been helpfully converted to a
  // zext. Turn it back.
  SDValue Op = N0;
  if (Op->getOpcode() == ISD::VSELECT)
    Op = Op->getOperand(1);
  if (Op->getOpcode() == ISD::ZERO_EXTEND &&
      Op->getOperand(0)->getOpcode() == ISD::MUL) {
    SDValue Mul = Op->getOperand(0);
    if (Mul->getOperand(0) == Mul->getOperand(1) &&
        Mul->getOperand(0)->getOpcode() == ISD::SIGN_EXTEND) {
      SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, N0->getValueType(0), Mul);
      if (Op != N0)
        Ext = DAG.getNode(ISD::VSELECT, dl, N0->getValueType(0),
                          N0->getOperand(0), Ext, N0->getOperand(2));
      return DAG.getNode(ISD::VECREDUCE_ADD, dl, ResVT, Ext);
    }
  }

  return SDValue();
}

static SDValue PerformVMOVNCombine(SDNode *N,
                                   TargetLowering::DAGCombinerInfo &DCI) {
  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);
  unsigned IsTop = N->getConstantOperandVal(2);

  // VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b)
  // VMOVNb(c, VQMOVNb(a, b)) => VQMOVNb(c, b)
  if ((Op1->getOpcode() == ARMISD::VQMOVNs ||
       Op1->getOpcode() == ARMISD::VQMOVNu) &&
      Op1->getConstantOperandVal(2) == 0)
    return DCI.DAG.getNode(Op1->getOpcode(), SDLoc(Op1), N->getValueType(0),
                           Op0, Op1->getOperand(1), N->getOperand(2));

  // Only the bottom lanes from Qm (Op1) and either the top or bottom lanes from
  // Qd (Op0) are demanded from a VMOVN, depending on whether we are inserting
  // into the top or bottom lanes.
  unsigned NumElts = N->getValueType(0).getVectorNumElements();
  APInt Op1DemandedElts = APInt::getSplat(NumElts, APInt::getLowBitsSet(2, 1));
  APInt Op0DemandedElts =
      IsTop ? Op1DemandedElts
            : APInt::getSplat(NumElts, APInt::getHighBitsSet(2, 1));

  APInt KnownUndef, KnownZero;
  const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo();
  if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, KnownUndef,
                                     KnownZero, DCI))
    return SDValue(N, 0);
  if (TLI.SimplifyDemandedVectorElts(Op1, Op1DemandedElts, KnownUndef,
                                     KnownZero, DCI))
    return SDValue(N, 0);

  return SDValue();
}

static SDValue PerformVQMOVNCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI) {
  SDValue Op0 = N->getOperand(0);
  unsigned IsTop = N->getConstantOperandVal(2);

  unsigned NumElts = N->getValueType(0).getVectorNumElements();
  APInt Op0DemandedElts =
      APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1)
                                     : APInt::getHighBitsSet(2, 1));

  APInt KnownUndef, KnownZero;
  const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo();
  if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, KnownUndef,
                                     KnownZero, DCI))
    return SDValue(N, 0);
  return SDValue();
}

static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) {
  SDLoc DL(N);
  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);

  // Turn X << -C -> X >> C and viceversa. The negative shifts can come up from
  // uses of the intrinsics.
  if (auto C = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
    int ShiftAmt = C->getSExtValue();
    if (ShiftAmt == 0) {
      SDValue Merge = DAG.getMergeValues({Op0, Op1}, DL);
      DAG.ReplaceAllUsesWith(N, Merge.getNode());
      return SDValue();
    }

    if (ShiftAmt >= -32 && ShiftAmt < 0) {
      unsigned NewOpcode =
          N->getOpcode() == ARMISD::LSLL ? ARMISD::LSRL : ARMISD::LSLL;
      SDValue NewShift = DAG.getNode(NewOpcode, DL, N->getVTList(), Op0, Op1,
                                     DAG.getConstant(-ShiftAmt, DL, MVT::i32));
      DAG.ReplaceAllUsesWith(N, NewShift.getNode());
      return NewShift;
    }
  }

  return SDValue();
}

/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N,
                                                   DAGCombinerInfo &DCI) const {
  SelectionDAG &DAG = DCI.DAG;
  unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
  switch (IntNo) {
  default:
    // Don't do anything for most intrinsics.
    break;

  // Vector shifts: check for immediate versions and lower them.
  // Note: This is done during DAG combining instead of DAG legalizing because
  // the build_vectors for 64-bit vector element shift counts are generally
  // not legal, and it is hard to see their values after they get legalized to
  // loads from a constant pool.
  case Intrinsic::arm_neon_vshifts:
  case Intrinsic::arm_neon_vshiftu:
  case Intrinsic::arm_neon_vrshifts:
  case Intrinsic::arm_neon_vrshiftu:
  case Intrinsic::arm_neon_vrshiftn:
  case Intrinsic::arm_neon_vqshifts:
  case Intrinsic::arm_neon_vqshiftu:
  case Intrinsic::arm_neon_vqshiftsu:
  case Intrinsic::arm_neon_vqshiftns:
  case Intrinsic::arm_neon_vqshiftnu:
  case Intrinsic::arm_neon_vqshiftnsu:
  case Intrinsic::arm_neon_vqrshiftns:
  case Intrinsic::arm_neon_vqrshiftnu:
  case Intrinsic::arm_neon_vqrshiftnsu: {
    EVT VT = N->getOperand(1).getValueType();
    int64_t Cnt;
    unsigned VShiftOpc = 0;

    switch (IntNo) {
    case Intrinsic::arm_neon_vshifts:
    case Intrinsic::arm_neon_vshiftu:
      if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
        VShiftOpc = ARMISD::VSHLIMM;
        break;
      }
      if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
        VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM
                                                          : ARMISD::VSHRuIMM);
        break;
      }
      return SDValue();

    case Intrinsic::arm_neon_vrshifts:
    case Intrinsic::arm_neon_vrshiftu:
      if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
        break;
      return SDValue();

    case Intrinsic::arm_neon_vqshifts:
    case Intrinsic::arm_neon_vqshiftu:
      if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
        break;
      return SDValue();

    case Intrinsic::arm_neon_vqshiftsu:
      if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
        break;
      llvm_unreachable("invalid shift count for vqshlu intrinsic");

    case Intrinsic::arm_neon_vrshiftn:
    case Intrinsic::arm_neon_vqshiftns:
    case Intrinsic::arm_neon_vqshiftnu:
    case Intrinsic::arm_neon_vqshiftnsu:
    case Intrinsic::arm_neon_vqrshiftns:
    case Intrinsic::arm_neon_vqrshiftnu:
    case Intrinsic::arm_neon_vqrshiftnsu:
      // Narrowing shifts require an immediate right shift.
      if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
        break;
      llvm_unreachable("invalid shift count for narrowing vector shift "
                       "intrinsic");

    default:
      llvm_unreachable("unhandled vector shift");
    }

    switch (IntNo) {
    case Intrinsic::arm_neon_vshifts:
    case Intrinsic::arm_neon_vshiftu:
      // Opcode already set above.
      break;
    case Intrinsic::arm_neon_vrshifts:
      VShiftOpc = ARMISD::VRSHRsIMM;
      break;
    case Intrinsic::arm_neon_vrshiftu:
      VShiftOpc = ARMISD::VRSHRuIMM;
      break;
    case Intrinsic::arm_neon_vrshiftn:
      VShiftOpc = ARMISD::VRSHRNIMM;
      break;
    case Intrinsic::arm_neon_vqshifts:
      VShiftOpc = ARMISD::VQSHLsIMM;
      break;
    case Intrinsic::arm_neon_vqshiftu:
      VShiftOpc = ARMISD::VQSHLuIMM;
      break;
    case Intrinsic::arm_neon_vqshiftsu:
      VShiftOpc = ARMISD::VQSHLsuIMM;
      break;
    case Intrinsic::arm_neon_vqshiftns:
      VShiftOpc = ARMISD::VQSHRNsIMM;
      break;
    case Intrinsic::arm_neon_vqshiftnu:
      VShiftOpc = ARMISD::VQSHRNuIMM;
      break;
    case Intrinsic::arm_neon_vqshiftnsu:
      VShiftOpc = ARMISD::VQSHRNsuIMM;
      break;
    case Intrinsic::arm_neon_vqrshiftns:
      VShiftOpc = ARMISD::VQRSHRNsIMM;
      break;
    case Intrinsic::arm_neon_vqrshiftnu:
      VShiftOpc = ARMISD::VQRSHRNuIMM;
      break;
    case Intrinsic::arm_neon_vqrshiftnsu:
      VShiftOpc = ARMISD::VQRSHRNsuIMM;
      break;
    }

    SDLoc dl(N);
    return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
                       N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32));
  }

  case Intrinsic::arm_neon_vshiftins: {
    EVT VT = N->getOperand(1).getValueType();
    int64_t Cnt;
    unsigned VShiftOpc = 0;

    if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
      VShiftOpc = ARMISD::VSLIIMM;
    else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
      VShiftOpc = ARMISD::VSRIIMM;
    else {
      llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
    }

    SDLoc dl(N);
    return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
                       N->getOperand(1), N->getOperand(2),
                       DAG.getConstant(Cnt, dl, MVT::i32));
  }

  case Intrinsic::arm_neon_vqrshifts:
  case Intrinsic::arm_neon_vqrshiftu:
    // No immediate versions of these to check for.
    break;

  case Intrinsic::arm_mve_vqdmlah:
  case Intrinsic::arm_mve_vqdmlash:
  case Intrinsic::arm_mve_vqrdmlah:
  case Intrinsic::arm_mve_vqrdmlash:
  case Intrinsic::arm_mve_vmla_n_predicated:
  case Intrinsic::arm_mve_vmlas_n_predicated:
  case Intrinsic::arm_mve_vqdmlah_predicated:
  case Intrinsic::arm_mve_vqdmlash_predicated:
  case Intrinsic::arm_mve_vqrdmlah_predicated:
  case Intrinsic::arm_mve_vqrdmlash_predicated: {
    // These intrinsics all take an i32 scalar operand which is narrowed to the
    // size of a single lane of the vector type they return. So we don't need
    // any bits of that operand above that point, which allows us to eliminate
    // uxth/sxth.
    unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
    APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth);
    if (SimplifyDemandedBits(N->getOperand(3), DemandedMask, DCI))
      return SDValue();
    break;
  }

  case Intrinsic::arm_mve_minv:
  case Intrinsic::arm_mve_maxv:
  case Intrinsic::arm_mve_minav:
  case Intrinsic::arm_mve_maxav:
  case Intrinsic::arm_mve_minv_predicated:
  case Intrinsic::arm_mve_maxv_predicated:
  case Intrinsic::arm_mve_minav_predicated:
  case Intrinsic::arm_mve_maxav_predicated: {
    // These intrinsics all take an i32 scalar operand which is narrowed to the
    // size of a single lane of the vector type they take as the other input.
    unsigned BitWidth = N->getOperand(2)->getValueType(0).getScalarSizeInBits();
    APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth);
    if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
      return SDValue();
    break;
  }

  case Intrinsic::arm_mve_addv: {
    // Turn this intrinsic straight into the appropriate ARMISD::VADDV node,
    // which allow PerformADDVecReduce to turn it into VADDLV when possible.
    bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
    unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs;
    return DAG.getNode(Opc, SDLoc(N), N->getVTList(), N->getOperand(1));
  }

  case Intrinsic::arm_mve_addlv:
  case Intrinsic::arm_mve_addlv_predicated: {
    // Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR
    // which recombines the two outputs into an i64
    bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
    unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ?
                    (Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) :
                    (Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps);

    SmallVector<SDValue, 4> Ops;
    for (unsigned i = 1, e = N->getNumOperands(); i < e; i++)
      if (i != 2)                      // skip the unsigned flag
        Ops.push_back(N->getOperand(i));

    SDLoc dl(N);
    SDValue val = DAG.getNode(Opc, dl, {MVT::i32, MVT::i32}, Ops);
    return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, val.getValue(0),
                       val.getValue(1));
  }
  }

  return SDValue();
}

/// PerformShiftCombine - Checks for immediate versions of vector shifts and
/// lowers them.  As with the vector shift intrinsics, this is done during DAG
/// combining instead of DAG legalizing because the build_vectors for 64-bit
/// vector element shift counts are generally not legal, and it is hard to see
/// their values after they get legalized to loads from a constant pool.
static SDValue PerformShiftCombine(SDNode *N,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   const ARMSubtarget *ST) {
  SelectionDAG &DAG = DCI.DAG;
  EVT VT = N->getValueType(0);
  if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) {
    // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high
    // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16.
    SDValue N1 = N->getOperand(1);
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
      SDValue N0 = N->getOperand(0);
      if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP &&
          DAG.MaskedValueIsZero(N0.getOperand(0),
                                APInt::getHighBitsSet(32, 16)))
        return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1);
    }
  }

  if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 &&
      N->getOperand(0)->getOpcode() == ISD::AND &&
      N->getOperand(0)->hasOneUse()) {
    if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
      return SDValue();
    // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't
    // usually show up because instcombine prefers to canonicalize it to
    // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come
    // out of GEP lowering in some cases.
    SDValue N0 = N->getOperand(0);
    ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
    if (!ShiftAmtNode)
      return SDValue();
    uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue());
    ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(N0->getOperand(1));
    if (!AndMaskNode)
      return SDValue();
    uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue());
    // Don't transform uxtb/uxth.
    if (AndMask == 255 || AndMask == 65535)
      return SDValue();
    if (isMask_32(AndMask)) {
      uint32_t MaskedBits = countLeadingZeros(AndMask);
      if (MaskedBits > ShiftAmt) {
        SDLoc DL(N);
        SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
                                  DAG.getConstant(MaskedBits, DL, MVT::i32));
        return DAG.getNode(
            ISD::SRL, DL, MVT::i32, SHL,
            DAG.getConstant(MaskedBits - ShiftAmt, DL, MVT::i32));
      }
    }
  }

  // Nothing to be done for scalar shifts.
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (!VT.isVector() || !TLI.isTypeLegal(VT))
    return SDValue();
  if (ST->hasMVEIntegerOps() && VT == MVT::v2i64)
    return SDValue();

  int64_t Cnt;

  switch (N->getOpcode()) {
  default: llvm_unreachable("unexpected shift opcode");

  case ISD::SHL:
    if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) {
      SDLoc dl(N);
      return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0),
                         DAG.getConstant(Cnt, dl, MVT::i32));
    }
    break;

  case ISD::SRA:
  case ISD::SRL:
    if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
      unsigned VShiftOpc =
          (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM);
      SDLoc dl(N);
      return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
                         DAG.getConstant(Cnt, dl, MVT::i32));
    }
  }
  return SDValue();
}

// Look for a sign/zero/fpextend extend of a larger than legal load. This can be
// split into multiple extending loads, which are simpler to deal with than an
// arbitrary extend. For fp extends we use an integer extending load and a VCVTL
// to convert the type to an f32.
static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) {
  SDValue N0 = N->getOperand(0);
  if (N0.getOpcode() != ISD::LOAD)
    return SDValue();
  LoadSDNode *LD = cast<LoadSDNode>(N0.getNode());
  if (!LD->isSimple() || !N0.hasOneUse() || LD->isIndexed() ||
      LD->getExtensionType() != ISD::NON_EXTLOAD)
    return SDValue();
  EVT FromVT = LD->getValueType(0);
  EVT ToVT = N->getValueType(0);
  if (!ToVT.isVector())
    return SDValue();
  assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements());
  EVT ToEltVT = ToVT.getVectorElementType();
  EVT FromEltVT = FromVT.getVectorElementType();

  unsigned NumElements = 0;
  if (ToEltVT == MVT::i32 && (FromEltVT == MVT::i16 || FromEltVT == MVT::i8))
    NumElements = 4;
  if (ToEltVT == MVT::i16 && FromEltVT == MVT::i8)
    NumElements = 8;
  if (ToEltVT == MVT::f32 && FromEltVT == MVT::f16)
    NumElements = 4;
  if (NumElements == 0 ||
      (FromEltVT != MVT::f16 && FromVT.getVectorNumElements() == NumElements) ||
      FromVT.getVectorNumElements() % NumElements != 0 ||
      !isPowerOf2_32(NumElements))
    return SDValue();

  LLVMContext &C = *DAG.getContext();
  SDLoc DL(LD);
  // Details about the old load
  SDValue Ch = LD->getChain();
  SDValue BasePtr = LD->getBasePtr();
  Align Alignment = LD->getOriginalAlign();
  MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
  AAMDNodes AAInfo = LD->getAAInfo();

  ISD::LoadExtType NewExtType =
      N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
  SDValue Offset = DAG.getUNDEF(BasePtr.getValueType());
  EVT NewFromVT = EVT::getVectorVT(
      C, EVT::getIntegerVT(C, FromEltVT.getScalarSizeInBits()), NumElements);
  EVT NewToVT = EVT::getVectorVT(
      C, EVT::getIntegerVT(C, ToEltVT.getScalarSizeInBits()), NumElements);

  SmallVector<SDValue, 4> Loads;
  SmallVector<SDValue, 4> Chains;
  for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) {
    unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8;
    SDValue NewPtr =
        DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset));

    SDValue NewLoad =
        DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, NewPtr, Offset,
                    LD->getPointerInfo().getWithOffset(NewOffset), NewFromVT,
                    Alignment, MMOFlags, AAInfo);
    Loads.push_back(NewLoad);
    Chains.push_back(SDValue(NewLoad.getNode(), 1));
  }

  // Float truncs need to extended with VCVTB's into their floating point types.
  if (FromEltVT == MVT::f16) {
    SmallVector<SDValue, 4> Extends;

    for (unsigned i = 0; i < Loads.size(); i++) {
      SDValue LoadBC =
          DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v8f16, Loads[i]);
      SDValue FPExt = DAG.getNode(ARMISD::VCVTL, DL, MVT::v4f32, LoadBC,
                                  DAG.getConstant(0, DL, MVT::i32));
      Extends.push_back(FPExt);
    }

    Loads = Extends;
  }

  SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
  DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewChain);
  return DAG.getNode(ISD::CONCAT_VECTORS, DL, ToVT, Loads);
}

/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
                                    const ARMSubtarget *ST) {
  SDValue N0 = N->getOperand(0);

  // Check for sign- and zero-extensions of vector extract operations of 8- and
  // 16-bit vector elements. NEON and MVE support these directly. They are
  // handled during DAG combining because type legalization will promote them
  // to 32-bit types and it is messy to recognize the operations after that.
  if ((ST->hasNEON() || ST->hasMVEIntegerOps()) &&
      N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
    SDValue Vec = N0.getOperand(0);
    SDValue Lane = N0.getOperand(1);
    EVT VT = N->getValueType(0);
    EVT EltVT = N0.getValueType();
    const TargetLowering &TLI = DAG.getTargetLoweringInfo();

    if (VT == MVT::i32 &&
        (EltVT == MVT::i8 || EltVT == MVT::i16) &&
        TLI.isTypeLegal(Vec.getValueType()) &&
        isa<ConstantSDNode>(Lane)) {

      unsigned Opc = 0;
      switch (N->getOpcode()) {
      default: llvm_unreachable("unexpected opcode");
      case ISD::SIGN_EXTEND:
        Opc = ARMISD::VGETLANEs;
        break;
      case ISD::ZERO_EXTEND:
      case ISD::ANY_EXTEND:
        Opc = ARMISD::VGETLANEu;
        break;
      }
      return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane);
    }
  }

  if (ST->hasMVEIntegerOps())
    if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG))
      return NewLoad;

  return SDValue();
}

static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG,
                                      const ARMSubtarget *ST) {
  if (ST->hasMVEFloatOps())
    if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG))
      return NewLoad;

  return SDValue();
}

/// PerformMinMaxCombine - Target-specific DAG combining for creating truncating
/// saturates.
static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG,
                                    const ARMSubtarget *ST) {
  EVT VT = N->getValueType(0);
  SDValue N0 = N->getOperand(0);
  if (!ST->hasMVEIntegerOps())
    return SDValue();

  if (VT != MVT::v4i32 && VT != MVT::v8i16)
    return SDValue();

  auto IsSignedSaturate = [&](SDNode *Min, SDNode *Max) {
    // Check one is a smin and the other is a smax
    if (Min->getOpcode() != ISD::SMIN)
      std::swap(Min, Max);
    if (Min->getOpcode() != ISD::SMIN || Max->getOpcode() != ISD::SMAX)
      return false;

    APInt SaturateC;
    if (VT == MVT::v4i32)
      SaturateC = APInt(32, (1 << 15) - 1, true);
    else //if (VT == MVT::v8i16)
      SaturateC = APInt(16, (1 << 7) - 1, true);

    APInt MinC, MaxC;
    if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) ||
        MinC != SaturateC)
      return false;
    if (!ISD::isConstantSplatVector(Max->getOperand(1).getNode(), MaxC) ||
        MaxC != ~SaturateC)
      return false;
    return true;
  };

  if (IsSignedSaturate(N, N0.getNode())) {
    SDLoc DL(N);
    MVT ExtVT, HalfVT;
    if (VT == MVT::v4i32) {
      HalfVT = MVT::v8i16;
      ExtVT = MVT::v4i16;
    } else { // if (VT == MVT::v8i16)
      HalfVT = MVT::v16i8;
      ExtVT = MVT::v8i8;
    }

    // Create a VQMOVNB with undef top lanes, then signed extended into the top
    // half. That extend will hopefully be removed if only the bottom bits are
    // demanded (though a truncating store, for example).
    SDValue VQMOVN =
        DAG.getNode(ARMISD::VQMOVNs, DL, HalfVT, DAG.getUNDEF(HalfVT),
                    N0->getOperand(0), DAG.getConstant(0, DL, MVT::i32));
    SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN);
    return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Bitcast,
                       DAG.getValueType(ExtVT));
  }

  auto IsUnsignedSaturate = [&](SDNode *Min) {
    // For unsigned, we just need to check for <= 0xffff
    if (Min->getOpcode() != ISD::UMIN)
      return false;

    APInt SaturateC;
    if (VT == MVT::v4i32)
      SaturateC = APInt(32, (1 << 16) - 1, true);
    else //if (VT == MVT::v8i16)
      SaturateC = APInt(16, (1 << 8) - 1, true);

    APInt MinC;
    if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) ||
        MinC != SaturateC)
      return false;
    return true;
  };

  if (IsUnsignedSaturate(N)) {
    SDLoc DL(N);
    MVT HalfVT;
    unsigned ExtConst;
    if (VT == MVT::v4i32) {
      HalfVT = MVT::v8i16;
      ExtConst = 0x0000FFFF;
    } else { //if (VT == MVT::v8i16)
      HalfVT = MVT::v16i8;
      ExtConst = 0x00FF;
    }

    // Create a VQMOVNB with undef top lanes, then ZExt into the top half with
    // an AND. That extend will hopefully be removed if only the bottom bits are
    // demanded (though a truncating store, for example).
    SDValue VQMOVN =
        DAG.getNode(ARMISD::VQMOVNu, DL, HalfVT, DAG.getUNDEF(HalfVT), N0,
                    DAG.getConstant(0, DL, MVT::i32));
    SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN);
    return DAG.getNode(ISD::AND, DL, VT, Bitcast,
                       DAG.getConstant(ExtConst, DL, VT));
  }

  return SDValue();
}

static const APInt *isPowerOf2Constant(SDValue V) {
  ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
  if (!C)
    return nullptr;
  const APInt *CV = &C->getAPIntValue();
  return CV->isPowerOf2() ? CV : nullptr;
}

SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const {
  // If we have a CMOV, OR and AND combination such as:
  //   if (x & CN)
  //     y |= CM;
  //
  // And:
  //   * CN is a single bit;
  //   * All bits covered by CM are known zero in y
  //
  // Then we can convert this into a sequence of BFI instructions. This will
  // always be a win if CM is a single bit, will always be no worse than the
  // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is
  // three bits (due to the extra IT instruction).

  SDValue Op0 = CMOV->getOperand(0);
  SDValue Op1 = CMOV->getOperand(1);
  auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2));
  auto CC = CCNode->getAPIntValue().getLimitedValue();
  SDValue CmpZ = CMOV->getOperand(4);

  // The compare must be against zero.
  if (!isNullConstant(CmpZ->getOperand(1)))
    return SDValue();

  assert(CmpZ->getOpcode() == ARMISD::CMPZ);
  SDValue And = CmpZ->getOperand(0);
  if (And->getOpcode() != ISD::AND)
    return SDValue();
  const APInt *AndC = isPowerOf2Constant(And->getOperand(1));
  if (!AndC)
    return SDValue();
  SDValue X = And->getOperand(0);

  if (CC == ARMCC::EQ) {
    // We're performing an "equal to zero" compare. Swap the operands so we
    // canonicalize on a "not equal to zero" compare.
    std::swap(Op0, Op1);
  } else {
    assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?");
  }

  if (Op1->getOpcode() != ISD::OR)
    return SDValue();

  ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1));
  if (!OrC)
    return SDValue();
  SDValue Y = Op1->getOperand(0);

  if (Op0 != Y)
    return SDValue();

  // Now, is it profitable to continue?
  APInt OrCI = OrC->getAPIntValue();
  unsigned Heuristic = Subtarget->isThumb() ? 3 : 2;
  if (OrCI.countPopulation() > Heuristic)
    return SDValue();

  // Lastly, can we determine that the bits defined by OrCI
  // are zero in Y?
  KnownBits Known = DAG.computeKnownBits(Y);
  if ((OrCI & Known.Zero) != OrCI)
    return SDValue();

  // OK, we can do the combine.
  SDValue V = Y;
  SDLoc dl(X);
  EVT VT = X.getValueType();
  unsigned BitInX = AndC->logBase2();

  if (BitInX != 0) {
    // We must shift X first.
    X = DAG.getNode(ISD::SRL, dl, VT, X,
                    DAG.getConstant(BitInX, dl, VT));
  }

  for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits();
       BitInY < NumActiveBits; ++BitInY) {
    if (OrCI[BitInY] == 0)
      continue;
    APInt Mask(VT.getSizeInBits(), 0);
    Mask.setBit(BitInY);
    V = DAG.getNode(ARMISD::BFI, dl, VT, V, X,
                    // Confusingly, the operand is an *inverted* mask.
                    DAG.getConstant(~Mask, dl, VT));
  }

  return V;
}

// Given N, the value controlling the conditional branch, search for the loop
// intrinsic, returning it, along with how the value is used. We need to handle
// patterns such as the following:
// (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit)
// (brcond (setcc (loop.decrement), 0, eq), exit)
// (brcond (setcc (loop.decrement), 0, ne), header)
static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm,
                                   bool &Negate) {
  switch (N->getOpcode()) {
  default:
    break;
  case ISD::XOR: {
    if (!isa<ConstantSDNode>(N.getOperand(1)))
      return SDValue();
    if (!cast<ConstantSDNode>(N.getOperand(1))->isOne())
      return SDValue();
    Negate = !Negate;
    return SearchLoopIntrinsic(N.getOperand(0), CC, Imm, Negate);
  }
  case ISD::SETCC: {
    auto *Const = dyn_cast<ConstantSDNode>(N.getOperand(1));
    if (!Const)
      return SDValue();
    if (Const->isNullValue())
      Imm = 0;
    else if (Const->isOne())
      Imm = 1;
    else
      return SDValue();
    CC = cast<CondCodeSDNode>(N.getOperand(2))->get();
    return SearchLoopIntrinsic(N->getOperand(0), CC, Imm, Negate);
  }
  case ISD::INTRINSIC_W_CHAIN: {
    unsigned IntOp = cast<ConstantSDNode>(N.getOperand(1))->getZExtValue();
    if (IntOp != Intrinsic::test_set_loop_iterations &&
        IntOp != Intrinsic::loop_decrement_reg)
      return SDValue();
    return N;
  }
  }
  return SDValue();
}

static SDValue PerformHWLoopCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI,
                                    const ARMSubtarget *ST) {

  // The hwloop intrinsics that we're interested are used for control-flow,
  // either for entering or exiting the loop:
  // - test.set.loop.iterations will test whether its operand is zero. If it
  //   is zero, the proceeding branch should not enter the loop.
  // - loop.decrement.reg also tests whether its operand is zero. If it is
  //   zero, the proceeding branch should not branch back to the beginning of
  //   the loop.
  // So here, we need to check that how the brcond is using the result of each
  // of the intrinsics to ensure that we're branching to the right place at the
  // right time.

  ISD::CondCode CC;
  SDValue Cond;
  int Imm = 1;
  bool Negate = false;
  SDValue Chain = N->getOperand(0);
  SDValue Dest;

  if (N->getOpcode() == ISD::BRCOND) {
    CC = ISD::SETEQ;
    Cond = N->getOperand(1);
    Dest = N->getOperand(2);
  } else {
    assert(N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!");
    CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
    Cond = N->getOperand(2);
    Dest = N->getOperand(4);
    if (auto *Const = dyn_cast<ConstantSDNode>(N->getOperand(3))) {
      if (!Const->isOne() && !Const->isNullValue())
        return SDValue();
      Imm = Const->getZExtValue();
    } else
      return SDValue();
  }

  SDValue Int = SearchLoopIntrinsic(Cond, CC, Imm, Negate);
  if (!Int)
    return SDValue();

  if (Negate)
    CC = ISD::getSetCCInverse(CC, /* Integer inverse */ MVT::i32);

  auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) {
    return (CC == ISD::SETEQ && Imm == 0) ||
           (CC == ISD::SETNE && Imm == 1) ||
           (CC == ISD::SETLT && Imm == 1) ||
           (CC == ISD::SETULT && Imm == 1);
  };

  auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) {
    return (CC == ISD::SETEQ && Imm == 1) ||
           (CC == ISD::SETNE && Imm == 0) ||
           (CC == ISD::SETGT && Imm == 0) ||
           (CC == ISD::SETUGT && Imm == 0) ||
           (CC == ISD::SETGE && Imm == 1) ||
           (CC == ISD::SETUGE && Imm == 1);
  };

  assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) &&
         "unsupported condition");

  SDLoc dl(Int);
  SelectionDAG &DAG = DCI.DAG;
  SDValue Elements = Int.getOperand(2);
  unsigned IntOp = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue();
  assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR)
          && "expected single br user");
  SDNode *Br = *N->use_begin();
  SDValue OtherTarget = Br->getOperand(1);

  // Update the unconditional branch to branch to the given Dest.
  auto UpdateUncondBr = [](SDNode *Br, SDValue Dest, SelectionDAG &DAG) {
    SDValue NewBrOps[] = { Br->getOperand(0), Dest };
    SDValue NewBr = DAG.getNode(ISD::BR, SDLoc(Br), MVT::Other, NewBrOps);
    DAG.ReplaceAllUsesOfValueWith(SDValue(Br, 0), NewBr);
  };

  if (IntOp == Intrinsic::test_set_loop_iterations) {
    SDValue Res;
    // We expect this 'instruction' to branch when the counter is zero.
    if (IsTrueIfZero(CC, Imm)) {
      SDValue Ops[] = { Chain, Elements, Dest };
      Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops);
    } else {
      // The logic is the reverse of what we need for WLS, so find the other
      // basic block target: the target of the proceeding br.
      UpdateUncondBr(Br, Dest, DAG);

      SDValue Ops[] = { Chain, Elements, OtherTarget };
      Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops);
    }
    DAG.ReplaceAllUsesOfValueWith(Int.getValue(1), Int.getOperand(0));
    return Res;
  } else {
    SDValue Size = DAG.getTargetConstant(
      cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl, MVT::i32);
    SDValue Args[] = { Int.getOperand(0), Elements, Size, };
    SDValue LoopDec = DAG.getNode(ARMISD::LOOP_DEC, dl,
                                  DAG.getVTList(MVT::i32, MVT::Other), Args);
    DAG.ReplaceAllUsesWith(Int.getNode(), LoopDec.getNode());

    // We expect this instruction to branch when the count is not zero.
    SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget;

    // Update the unconditional branch to target the loop preheader if we've
    // found the condition has been reversed.
    if (Target == OtherTarget)
      UpdateUncondBr(Br, Dest, DAG);

    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
                        SDValue(LoopDec.getNode(), 1), Chain);

    SDValue EndArgs[] = { Chain, SDValue(LoopDec.getNode(), 0), Target };
    return DAG.getNode(ARMISD::LE, dl, MVT::Other, EndArgs);
  }
  return SDValue();
}

/// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
SDValue
ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
  SDValue Cmp = N->getOperand(4);
  if (Cmp.getOpcode() != ARMISD::CMPZ)
    // Only looking at NE cases.
    return SDValue();

  EVT VT = N->getValueType(0);
  SDLoc dl(N);
  SDValue LHS = Cmp.getOperand(0);
  SDValue RHS = Cmp.getOperand(1);
  SDValue Chain = N->getOperand(0);
  SDValue BB = N->getOperand(1);
  SDValue ARMcc = N->getOperand(2);
  ARMCC::CondCodes CC =
    (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();

  // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
  // -> (brcond Chain BB CC CPSR Cmp)
  if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() &&
      LHS->getOperand(0)->getOpcode() == ARMISD::CMOV &&
      LHS->getOperand(0)->hasOneUse()) {
    auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0));
    auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1));
    auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
    auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
    if ((LHS00C && LHS00C->getZExtValue() == 0) &&
        (LHS01C && LHS01C->getZExtValue() == 1) &&
        (LHS1C && LHS1C->getZExtValue() == 1) &&
        (RHSC && RHSC->getZExtValue() == 0)) {
      return DAG.getNode(
          ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2),
          LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4));
    }
  }

  return SDValue();
}

/// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
SDValue
ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
  SDValue Cmp = N->getOperand(4);
  if (Cmp.getOpcode() != ARMISD::CMPZ)
    // Only looking at EQ and NE cases.
    return SDValue();

  EVT VT = N->getValueType(0);
  SDLoc dl(N);
  SDValue LHS = Cmp.getOperand(0);
  SDValue RHS = Cmp.getOperand(1);
  SDValue FalseVal = N->getOperand(0);
  SDValue TrueVal = N->getOperand(1);
  SDValue ARMcc = N->getOperand(2);
  ARMCC::CondCodes CC =
    (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();

  // BFI is only available on V6T2+.
  if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
    SDValue R = PerformCMOVToBFICombine(N, DAG);
    if (R)
      return R;
  }

  // Simplify
  //   mov     r1, r0
  //   cmp     r1, x
  //   mov     r0, y
  //   moveq   r0, x
  // to
  //   cmp     r0, x
  //   movne   r0, y
  //
  //   mov     r1, r0
  //   cmp     r1, x
  //   mov     r0, x
  //   movne   r0, y
  // to
  //   cmp     r0, x
  //   movne   r0, y
  /// FIXME: Turn this into a target neutral optimization?
  SDValue Res;
  if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
    Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
                      N->getOperand(3), Cmp);
  } else if (CC == ARMCC::EQ && TrueVal == RHS) {
    SDValue ARMcc;
    SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl);
    Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc,
                      N->getOperand(3), NewCmp);
  }

  // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0))
  // -> (cmov F T CC CPSR Cmp)
  if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) {
    auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0));
    auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
    auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
    if ((LHS0C && LHS0C->getZExtValue() == 0) &&
        (LHS1C && LHS1C->getZExtValue() == 1) &&
        (RHSC && RHSC->getZExtValue() == 0)) {
      return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
                         LHS->getOperand(2), LHS->getOperand(3),
                         LHS->getOperand(4));
    }
  }

  if (!VT.isInteger())
      return SDValue();

  // Materialize a boolean comparison for integers so we can avoid branching.
  if (isNullConstant(FalseVal)) {
    if (CC == ARMCC::EQ && isOneConstant(TrueVal)) {
      if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) {
        // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it
        // right 5 bits will make that 32 be 1, otherwise it will be 0.
        // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5
        SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
        Res = DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::CTLZ, dl, VT, Sub),
                          DAG.getConstant(5, dl, MVT::i32));
      } else {
        // CMOV 0, 1, ==, (CMPZ x, y) ->
        //     (ADDCARRY (SUB x, y), t:0, t:1)
        // where t = (SUBCARRY 0, (SUB x, y), 0)
        //
        // The SUBCARRY computes 0 - (x - y) and this will give a borrow when
        // x != y. In other words, a carry C == 1 when x == y, C == 0
        // otherwise.
        // The final ADDCARRY computes
        //     x - y + (0 - (x - y)) + C == C
        SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
        SDVTList VTs = DAG.getVTList(VT, MVT::i32);
        SDValue Neg = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, Sub);
        // ISD::SUBCARRY returns a borrow but we want the carry here
        // actually.
        SDValue Carry =
            DAG.getNode(ISD::SUB, dl, MVT::i32,
                        DAG.getConstant(1, dl, MVT::i32), Neg.getValue(1));
        Res = DAG.getNode(ISD::ADDCARRY, dl, VTs, Sub, Neg, Carry);
      }
    } else if (CC == ARMCC::NE && !isNullConstant(RHS) &&
               (!Subtarget->isThumb1Only() || isPowerOf2Constant(TrueVal))) {
      // This seems pointless but will allow us to combine it further below.
      // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
      SDValue Sub =
          DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS);
      SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
                                          Sub.getValue(1), SDValue());
      Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, TrueVal, ARMcc,
                        N->getOperand(3), CPSRGlue.getValue(1));
      FalseVal = Sub;
    }
  } else if (isNullConstant(TrueVal)) {
    if (CC == ARMCC::EQ && !isNullConstant(RHS) &&
        (!Subtarget->isThumb1Only() || isPowerOf2Constant(FalseVal))) {
      // This seems pointless but will allow us to combine it further below
      // Note that we change == for != as this is the dual for the case above.
      // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
      SDValue Sub =
          DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS);
      SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
                                          Sub.getValue(1), SDValue());
      Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, FalseVal,
                        DAG.getConstant(ARMCC::NE, dl, MVT::i32),
                        N->getOperand(3), CPSRGlue.getValue(1));
      FalseVal = Sub;
    }
  }

  // On Thumb1, the DAG above may be further combined if z is a power of 2
  // (z == 2 ^ K).
  // CMOV (SUBS x, y), z, !=, (SUBS x, y):1 ->
  // t1 = (USUBO (SUB x, y), 1)
  // t2 = (SUBCARRY (SUB x, y), t1:0, t1:1)
  // Result = if K != 0 then (SHL t2:0, K) else t2:0
  //
  // This also handles the special case of comparing against zero; it's
  // essentially, the same pattern, except there's no SUBS:
  // CMOV x, z, !=, (CMPZ x, 0) ->
  // t1 = (USUBO x, 1)
  // t2 = (SUBCARRY x, t1:0, t1:1)
  // Result = if K != 0 then (SHL t2:0, K) else t2:0
  const APInt *TrueConst;
  if (Subtarget->isThumb1Only() && CC == ARMCC::NE &&
      ((FalseVal.getOpcode() == ARMISD::SUBS &&
        FalseVal.getOperand(0) == LHS && FalseVal.getOperand(1) == RHS) ||
       (FalseVal == LHS && isNullConstant(RHS))) &&
      (TrueConst = isPowerOf2Constant(TrueVal))) {
    SDVTList VTs = DAG.getVTList(VT, MVT::i32);
    unsigned ShiftAmount = TrueConst->logBase2();
    if (ShiftAmount)
      TrueVal = DAG.getConstant(1, dl, VT);
    SDValue Subc = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, TrueVal);
    Res = DAG.getNode(ISD::SUBCARRY, dl, VTs, FalseVal, Subc, Subc.getValue(1));

    if (ShiftAmount)
      Res = DAG.getNode(ISD::SHL, dl, VT, Res,
                        DAG.getConstant(ShiftAmount, dl, MVT::i32));
  }

  if (Res.getNode()) {
    KnownBits Known = DAG.computeKnownBits(SDValue(N,0));
    // Capture demanded bits information that would be otherwise lost.
    if (Known.Zero == 0xfffffffe)
      Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
                        DAG.getValueType(MVT::i1));
    else if (Known.Zero == 0xffffff00)
      Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
                        DAG.getValueType(MVT::i8));
    else if (Known.Zero == 0xffff0000)
      Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
                        DAG.getValueType(MVT::i16));
  }

  return Res;
}

static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG,
                                    const ARMSubtarget *ST) {
  SDValue Src = N->getOperand(0);
  EVT DstVT = N->getValueType(0);

  // Convert v4f32 bitcast (v4i32 vdup (i32)) -> v4f32 vdup (i32) under MVE.
  if (ST->hasMVEIntegerOps() && Src.getOpcode() == ARMISD::VDUP) {
    EVT SrcVT = Src.getValueType();
    if (SrcVT.getScalarSizeInBits() == DstVT.getScalarSizeInBits())
      return DAG.getNode(ARMISD::VDUP, SDLoc(N), DstVT, Src.getOperand(0));
  }

  // We may have a bitcast of something that has already had this bitcast
  // combine performed on it, so skip past any VECTOR_REG_CASTs.
  while (Src.getOpcode() == ARMISD::VECTOR_REG_CAST)
    Src = Src.getOperand(0);

  // Bitcast from element-wise VMOV or VMVN doesn't need VREV if the VREV that
  // would be generated is at least the width of the element type.
  EVT SrcVT = Src.getValueType();
  if ((Src.getOpcode() == ARMISD::VMOVIMM ||
       Src.getOpcode() == ARMISD::VMVNIMM ||
       Src.getOpcode() == ARMISD::VMOVFPIMM) &&
      SrcVT.getScalarSizeInBits() <= DstVT.getScalarSizeInBits() &&
      DAG.getDataLayout().isBigEndian())
    return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(N), DstVT, Src);

  return SDValue();
}

SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
                                             DAGCombinerInfo &DCI) const {
  switch (N->getOpcode()) {
  default: break;
  case ISD::VSELECT:    return PerformVSELECTCombine(N, DCI, Subtarget);
  case ISD::ABS:        return PerformABSCombine(N, DCI, Subtarget);
  case ARMISD::ADDE:    return PerformADDECombine(N, DCI, Subtarget);
  case ARMISD::UMLAL:   return PerformUMLALCombine(N, DCI.DAG, Subtarget);
  case ISD::ADD:        return PerformADDCombine(N, DCI, Subtarget);
  case ISD::SUB:        return PerformSUBCombine(N, DCI, Subtarget);
  case ISD::MUL:        return PerformMULCombine(N, DCI, Subtarget);
  case ISD::OR:         return PerformORCombine(N, DCI, Subtarget);
  case ISD::XOR:        return PerformXORCombine(N, DCI, Subtarget);
  case ISD::AND:        return PerformANDCombine(N, DCI, Subtarget);
  case ISD::BRCOND:
  case ISD::BR_CC:      return PerformHWLoopCombine(N, DCI, Subtarget);
  case ARMISD::ADDC:
  case ARMISD::SUBC:    return PerformAddcSubcCombine(N, DCI, Subtarget);
  case ARMISD::SUBE:    return PerformAddeSubeCombine(N, DCI, Subtarget);
  case ARMISD::BFI:     return PerformBFICombine(N, DCI);
  case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget);
  case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
  case ARMISD::VMOVhr:  return PerformVMOVhrCombine(N, DCI);
  case ARMISD::VMOVrh:  return PerformVMOVrhCombine(N, DCI);
  case ISD::STORE:      return PerformSTORECombine(N, DCI, Subtarget);
  case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget);
  case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
  case ISD::EXTRACT_VECTOR_ELT: return PerformExtractEltCombine(N, DCI);
  case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
  case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI, Subtarget);
  case ARMISD::VDUP: return PerformVDUPCombine(N, DCI, Subtarget);
  case ISD::FP_TO_SINT:
  case ISD::FP_TO_UINT:
    return PerformVCVTCombine(N, DCI.DAG, Subtarget);
  case ISD::FDIV:
    return PerformVDIVCombine(N, DCI.DAG, Subtarget);
  case ISD::INTRINSIC_WO_CHAIN:
    return PerformIntrinsicCombine(N, DCI);
  case ISD::SHL:
  case ISD::SRA:
  case ISD::SRL:
    return PerformShiftCombine(N, DCI, Subtarget);
  case ISD::SIGN_EXTEND:
  case ISD::ZERO_EXTEND:
  case ISD::ANY_EXTEND:
    return PerformExtendCombine(N, DCI.DAG, Subtarget);
  case ISD::FP_EXTEND:
    return PerformFPExtendCombine(N, DCI.DAG, Subtarget);
  case ISD::SMIN:
  case ISD::UMIN:
  case ISD::SMAX:
  case ISD::UMAX:
    return PerformMinMaxCombine(N, DCI.DAG, Subtarget);
  case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
  case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG);
  case ISD::LOAD:       return PerformLOADCombine(N, DCI);
  case ARMISD::VLD1DUP:
  case ARMISD::VLD2DUP:
  case ARMISD::VLD3DUP:
  case ARMISD::VLD4DUP:
    return PerformVLDCombine(N, DCI);
  case ARMISD::BUILD_VECTOR:
    return PerformARMBUILD_VECTORCombine(N, DCI);
  case ISD::BITCAST:
    return PerformBITCASTCombine(N, DCI.DAG, Subtarget);
  case ARMISD::PREDICATE_CAST:
    return PerformPREDICATE_CASTCombine(N, DCI);
  case ARMISD::VECTOR_REG_CAST:
    return PerformVECTOR_REG_CASTCombine(N, DCI, Subtarget);
  case ARMISD::VCMP:
    return PerformVCMPCombine(N, DCI, Subtarget);
  case ISD::VECREDUCE_ADD:
    return PerformVECREDUCE_ADDCombine(N, DCI.DAG, Subtarget);
  case ARMISD::VMOVN:
    return PerformVMOVNCombine(N, DCI);
  case ARMISD::VQMOVNs:
  case ARMISD::VQMOVNu:
    return PerformVQMOVNCombine(N, DCI);
  case ARMISD::ASRL:
  case ARMISD::LSRL:
  case ARMISD::LSLL:
    return PerformLongShiftCombine(N, DCI.DAG);
  case ARMISD::SMULWB: {
    unsigned BitWidth = N->getValueType(0).getSizeInBits();
    APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
    if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
      return SDValue();
    break;
  }
  case ARMISD::SMULWT: {
    unsigned BitWidth = N->getValueType(0).getSizeInBits();
    APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
    if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
      return SDValue();
    break;
  }
  case ARMISD::SMLALBB:
  case ARMISD::QADD16b:
  case ARMISD::QSUB16b: {
    unsigned BitWidth = N->getValueType(0).getSizeInBits();
    APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
    if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
        (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
      return SDValue();
    break;
  }
  case ARMISD::SMLALBT: {
    unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits();
    APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
    unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits();
    APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
    if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) ||
        (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI)))
      return SDValue();
    break;
  }
  case ARMISD::SMLALTB: {
    unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits();
    APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
    unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits();
    APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
    if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) ||
        (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI)))
      return SDValue();
    break;
  }
  case ARMISD::SMLALTT: {
    unsigned BitWidth = N->getValueType(0).getSizeInBits();
    APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
    if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
        (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
      return SDValue();
    break;
  }
  case ARMISD::QADD8b:
  case ARMISD::QSUB8b: {
    unsigned BitWidth = N->getValueType(0).getSizeInBits();
    APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
    if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
        (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
      return SDValue();
    break;
  }
  case ISD::INTRINSIC_VOID:
  case ISD::INTRINSIC_W_CHAIN:
    switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
    case Intrinsic::arm_neon_vld1:
    case Intrinsic::arm_neon_vld1x2:
    case Intrinsic::arm_neon_vld1x3:
    case Intrinsic::arm_neon_vld1x4:
    case Intrinsic::arm_neon_vld2:
    case Intrinsic::arm_neon_vld3:
    case Intrinsic::arm_neon_vld4:
    case Intrinsic::arm_neon_vld2lane:
    case Intrinsic::arm_neon_vld3lane:
    case Intrinsic::arm_neon_vld4lane:
    case Intrinsic::arm_neon_vld2dup:
    case Intrinsic::arm_neon_vld3dup:
    case Intrinsic::arm_neon_vld4dup:
    case Intrinsic::arm_neon_vst1:
    case Intrinsic::arm_neon_vst1x2:
    case Intrinsic::arm_neon_vst1x3:
    case Intrinsic::arm_neon_vst1x4:
    case Intrinsic::arm_neon_vst2:
    case Intrinsic::arm_neon_vst3:
    case Intrinsic::arm_neon_vst4:
    case Intrinsic::arm_neon_vst2lane:
    case Intrinsic::arm_neon_vst3lane:
    case Intrinsic::arm_neon_vst4lane:
      return PerformVLDCombine(N, DCI);
    case Intrinsic::arm_mve_vld2q:
    case Intrinsic::arm_mve_vld4q:
    case Intrinsic::arm_mve_vst2q:
    case Intrinsic::arm_mve_vst4q:
      return PerformMVEVLDCombine(N, DCI);
    default: break;
    }
    break;
  }
  return SDValue();
}

bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
                                                          EVT VT) const {
  return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
}

bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned,
                                                       unsigned Alignment,
                                                       MachineMemOperand::Flags,
                                                       bool *Fast) const {
  // Depends what it gets converted into if the type is weird.
  if (!VT.isSimple())
    return false;

  // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus
  bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
  auto Ty = VT.getSimpleVT().SimpleTy;

  if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) {
    // Unaligned access can use (for example) LRDB, LRDH, LDR
    if (AllowsUnaligned) {
      if (Fast)
        *Fast = Subtarget->hasV7Ops();
      return true;
    }
  }

  if (Ty == MVT::f64 || Ty == MVT::v2f64) {
    // For any little-endian targets with neon, we can support unaligned ld/st
    // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
    // A big-endian target may also explicitly support unaligned accesses
    if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) {
      if (Fast)
        *Fast = true;
      return true;
    }
  }

  if (!Subtarget->hasMVEIntegerOps())
    return false;

  // These are for predicates
  if ((Ty == MVT::v16i1 || Ty == MVT::v8i1 || Ty == MVT::v4i1)) {
    if (Fast)
      *Fast = true;
    return true;
  }

  // These are for truncated stores/narrowing loads. They are fine so long as
  // the alignment is at least the size of the item being loaded
  if ((Ty == MVT::v4i8 || Ty == MVT::v8i8 || Ty == MVT::v4i16) &&
      Alignment >= VT.getScalarSizeInBits() / 8) {
    if (Fast)
      *Fast = true;
    return true;
  }

  // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and
  // VSTRW.U32 all store the vector register in exactly the same format, and
  // differ only in the range of their immediate offset field and the required
  // alignment. So there is always a store that can be used, regardless of
  // actual type.
  //
  // For big endian, that is not the case. But can still emit a (VSTRB.U8;
  // VREV64.8) pair and get the same effect. This will likely be better than
  // aligning the vector through the stack.
  if (Ty == MVT::v16i8 || Ty == MVT::v8i16 || Ty == MVT::v8f16 ||
      Ty == MVT::v4i32 || Ty == MVT::v4f32 || Ty == MVT::v2i64 ||
      Ty == MVT::v2f64) {
    if (Fast)
      *Fast = true;
    return true;
  }

  return false;
}


EVT ARMTargetLowering::getOptimalMemOpType(
    const MemOp &Op, const AttributeList &FuncAttributes) const {
  // See if we can use NEON instructions for this...
  if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() &&
      !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
    bool Fast;
    if (Op.size() >= 16 &&
        (Op.isAligned(Align(16)) ||
         (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1,
                                         MachineMemOperand::MONone, &Fast) &&
          Fast))) {
      return MVT::v2f64;
    } else if (Op.size() >= 8 &&
               (Op.isAligned(Align(8)) ||
                (allowsMisalignedMemoryAccesses(
                     MVT::f64, 0, 1, MachineMemOperand::MONone, &Fast) &&
                 Fast))) {
      return MVT::f64;
    }
  }

  // Let the target-independent logic figure it out.
  return MVT::Other;
}

// 64-bit integers are split into their high and low parts and held in two
// different registers, so the trunc is free since the low register can just
// be used.
bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
  if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
    return false;
  unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
  unsigned DestBits = DstTy->getPrimitiveSizeInBits();
  return (SrcBits == 64 && DestBits == 32);
}

bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
  if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() ||
      !DstVT.isInteger())
    return false;
  unsigned SrcBits = SrcVT.getSizeInBits();
  unsigned DestBits = DstVT.getSizeInBits();
  return (SrcBits == 64 && DestBits == 32);
}

bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
  if (Val.getOpcode() != ISD::LOAD)
    return false;

  EVT VT1 = Val.getValueType();
  if (!VT1.isSimple() || !VT1.isInteger() ||
      !VT2.isSimple() || !VT2.isInteger())
    return false;

  switch (VT1.getSimpleVT().SimpleTy) {
  default: break;
  case MVT::i1:
  case MVT::i8:
  case MVT::i16:
    // 8-bit and 16-bit loads implicitly zero-extend to 32-bits.
    return true;
  }

  return false;
}

bool ARMTargetLowering::isFNegFree(EVT VT) const {
  if (!VT.isSimple())
    return false;

  // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that
  // negate values directly (fneg is free). So, we don't want to let the DAG
  // combiner rewrite fneg into xors and some other instructions.  For f16 and
  // FullFP16 argument passing, some bitcast nodes may be introduced,
  // triggering this DAG combine rewrite, so we are avoiding that with this.
  switch (VT.getSimpleVT().SimpleTy) {
  default: break;
  case MVT::f16:
    return Subtarget->hasFullFP16();
  }

  return false;
}

/// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
/// of the vector elements.
static bool areExtractExts(Value *Ext1, Value *Ext2) {
  auto areExtDoubled = [](Instruction *Ext) {
    return Ext->getType()->getScalarSizeInBits() ==
           2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
  };

  if (!match(Ext1, m_ZExtOrSExt(m_Value())) ||
      !match(Ext2, m_ZExtOrSExt(m_Value())) ||
      !areExtDoubled(cast<Instruction>(Ext1)) ||
      !areExtDoubled(cast<Instruction>(Ext2)))
    return false;

  return true;
}

/// Check if sinking \p I's operands to I's basic block is profitable, because
/// the operands can be folded into a target instruction, e.g.
/// sext/zext can be folded into vsubl.
bool ARMTargetLowering::shouldSinkOperands(Instruction *I,
                                           SmallVectorImpl<Use *> &Ops) const {
  if (!I->getType()->isVectorTy())
    return false;

  if (Subtarget->hasNEON()) {
    switch (I->getOpcode()) {
    case Instruction::Sub:
    case Instruction::Add: {
      if (!areExtractExts(I->getOperand(0), I->getOperand(1)))
        return false;
      Ops.push_back(&I->getOperandUse(0));
      Ops.push_back(&I->getOperandUse(1));
      return true;
    }
    default:
      return false;
    }
  }

  if (!Subtarget->hasMVEIntegerOps())
    return false;

  auto IsFMSMul = [&](Instruction *I) {
    if (!I->hasOneUse())
      return false;
    auto *Sub = cast<Instruction>(*I->users().begin());
    return Sub->getOpcode() == Instruction::FSub && Sub->getOperand(1) == I;
  };
  auto IsFMS = [&](Instruction *I) {
    if (match(I->getOperand(0), m_FNeg(m_Value())) ||
        match(I->getOperand(1), m_FNeg(m_Value())))
      return true;
    return false;
  };

  auto IsSinker = [&](Instruction *I, int Operand) {
    switch (I->getOpcode()) {
    case Instruction::Add:
    case Instruction::Mul:
    case Instruction::FAdd:
    case Instruction::ICmp:
    case Instruction::FCmp:
      return true;
    case Instruction::FMul:
      return !IsFMSMul(I);
    case Instruction::Sub:
    case Instruction::FSub:
    case Instruction::Shl:
    case Instruction::LShr:
    case Instruction::AShr:
      return Operand == 1;
    case Instruction::Call:
      if (auto *II = dyn_cast<IntrinsicInst>(I)) {
        switch (II->getIntrinsicID()) {
        case Intrinsic::fma:
          return !IsFMS(I);
        case Intrinsic::arm_mve_add_predicated:
        case Intrinsic::arm_mve_mul_predicated:
        case Intrinsic::arm_mve_qadd_predicated:
        case Intrinsic::arm_mve_hadd_predicated:
        case Intrinsic::arm_mve_vqdmull_predicated:
        case Intrinsic::arm_mve_qdmulh_predicated:
        case Intrinsic::arm_mve_qrdmulh_predicated:
        case Intrinsic::arm_mve_fma_predicated:
          return true;
        case Intrinsic::arm_mve_sub_predicated:
        case Intrinsic::arm_mve_qsub_predicated:
        case Intrinsic::arm_mve_hsub_predicated:
          return Operand == 1;
        default:
          return false;
        }
      }
      return false;
    default:
      return false;
    }
  };

  for (auto OpIdx : enumerate(I->operands())) {
    Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
    // Make sure we are not already sinking this operand
    if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
      continue;

    Instruction *Shuffle = Op;
    if (Shuffle->getOpcode() == Instruction::BitCast)
      Shuffle = dyn_cast<Instruction>(Shuffle->getOperand(0));
    // We are looking for a splat that can be sunk.
    if (!Shuffle ||
        !match(Shuffle, m_Shuffle(
                            m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
                            m_Undef(), m_ZeroMask())))
      continue;
    if (!IsSinker(I, OpIdx.index()))
      continue;

    // All uses of the shuffle should be sunk to avoid duplicating it across gpr
    // and vector registers
    for (Use &U : Op->uses()) {
      Instruction *Insn = cast<Instruction>(U.getUser());
      if (!IsSinker(Insn, U.getOperandNo()))
        return false;
    }

    Ops.push_back(&Shuffle->getOperandUse(0));
    if (Shuffle != Op)
      Ops.push_back(&Op->getOperandUse(0));
    Ops.push_back(&OpIdx.value());
  }
  return true;
}

Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const {
  if (!Subtarget->hasMVEIntegerOps())
    return nullptr;
  Type *SVIType = SVI->getType();
  Type *ScalarType = SVIType->getScalarType();

  if (ScalarType->isFloatTy())
    return Type::getInt32Ty(SVIType->getContext());
  if (ScalarType->isHalfTy())
    return Type::getInt16Ty(SVIType->getContext());
  return nullptr;
}

bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
  EVT VT = ExtVal.getValueType();

  if (!isTypeLegal(VT))
    return false;

  if (auto *Ld = dyn_cast<MaskedLoadSDNode>(ExtVal.getOperand(0))) {
    if (Ld->isExpandingLoad())
      return false;
  }

  if (Subtarget->hasMVEIntegerOps())
    return true;

  // Don't create a loadext if we can fold the extension into a wide/long
  // instruction.
  // If there's more than one user instruction, the loadext is desirable no
  // matter what.  There can be two uses by the same instruction.
  if (ExtVal->use_empty() ||
      !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode()))
    return true;

  SDNode *U = *ExtVal->use_begin();
  if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB ||
       U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM))
    return false;

  return true;
}

bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
  if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
    return false;

  if (!isTypeLegal(EVT::getEVT(Ty1)))
    return false;

  assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");

  // Assuming the caller doesn't have a zeroext or signext return parameter,
  // truncation all the way down to i1 is valid.
  return true;
}

int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL,
                                                const AddrMode &AM, Type *Ty,
                                                unsigned AS) const {
  if (isLegalAddressingMode(DL, AM, Ty, AS)) {
    if (Subtarget->hasFPAO())
      return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster
    return 0;
  }
  return -1;
}

/// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
/// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
/// expanded to FMAs when this method returns true, otherwise fmuladd is
/// expanded to fmul + fadd.
///
/// ARM supports both fused and unfused multiply-add operations; we already
/// lower a pair of fmul and fadd to the latter so it's not clear that there
/// would be a gain or that the gain would be worthwhile enough to risk
/// correctness bugs.
///
/// For MVE, we set this to true as it helps simplify the need for some
/// patterns (and we don't have the non-fused floating point instruction).
bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
                                                   EVT VT) const {
  if (!VT.isSimple())
    return false;

  switch (VT.getSimpleVT().SimpleTy) {
  case MVT::v4f32:
  case MVT::v8f16:
    return Subtarget->hasMVEFloatOps();
  case MVT::f16:
    return Subtarget->useFPVFMx16();
  case MVT::f32:
    return Subtarget->useFPVFMx();
  case MVT::f64:
    return Subtarget->useFPVFMx64();
  default:
    break;
  }

  return false;
}

static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
  if (V < 0)
    return false;

  unsigned Scale = 1;
  switch (VT.getSimpleVT().SimpleTy) {
  case MVT::i1:
  case MVT::i8:
    // Scale == 1;
    break;
  case MVT::i16:
    // Scale == 2;
    Scale = 2;
    break;
  default:
    // On thumb1 we load most things (i32, i64, floats, etc) with a LDR
    // Scale == 4;
    Scale = 4;
    break;
  }

  if ((V & (Scale - 1)) != 0)
    return false;
  return isUInt<5>(V / Scale);
}

static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
                                      const ARMSubtarget *Subtarget) {
  if (!VT.isInteger() && !VT.isFloatingPoint())
    return false;
  if (VT.isVector() && Subtarget->hasNEON())
    return false;
  if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() &&
      !Subtarget->hasMVEFloatOps())
    return false;

  bool IsNeg = false;
  if (V < 0) {
    IsNeg = true;
    V = -V;
  }

  unsigned NumBytes = std::max((unsigned)VT.getSizeInBits() / 8, 1U);

  // MVE: size * imm7
  if (VT.isVector() && Subtarget->hasMVEIntegerOps()) {
    switch (VT.getSimpleVT().getVectorElementType().SimpleTy) {
    case MVT::i32:
    case MVT::f32:
      return isShiftedUInt<7,2>(V);
    case MVT::i16:
    case MVT::f16:
      return isShiftedUInt<7,1>(V);
    case MVT::i8:
      return isUInt<7>(V);
    default:
      return false;
    }
  }

  // half VLDR: 2 * imm8
  if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16())
    return isShiftedUInt<8, 1>(V);
  // VLDR and LDRD: 4 * imm8
  if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8)
    return isShiftedUInt<8, 2>(V);

  if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) {
    // + imm12 or - imm8
    if (IsNeg)
      return isUInt<8>(V);
    return isUInt<12>(V);
  }

  return false;
}

/// isLegalAddressImmediate - Return true if the integer value can be used
/// as the offset of the target addressing mode for load / store of the
/// given type.
static bool isLegalAddressImmediate(int64_t V, EVT VT,
                                    const ARMSubtarget *Subtarget) {
  if (V == 0)
    return true;

  if (!VT.isSimple())
    return false;

  if (Subtarget->isThumb1Only())
    return isLegalT1AddressImmediate(V, VT);
  else if (Subtarget->isThumb2())
    return isLegalT2AddressImmediate(V, VT, Subtarget);

  // ARM mode.
  if (V < 0)
    V = - V;
  switch (VT.getSimpleVT().SimpleTy) {
  default: return false;
  case MVT::i1:
  case MVT::i8:
  case MVT::i32:
    // +- imm12
    return isUInt<12>(V);
  case MVT::i16:
    // +- imm8
    return isUInt<8>(V);
  case MVT::f32:
  case MVT::f64:
    if (!Subtarget->hasVFP2Base()) // FIXME: NEON?
      return false;
    return isShiftedUInt<8, 2>(V);
  }
}

bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
                                                      EVT VT) const {
  int Scale = AM.Scale;
  if (Scale < 0)
    return false;

  switch (VT.getSimpleVT().SimpleTy) {
  default: return false;
  case MVT::i1:
  case MVT::i8:
  case MVT::i16:
  case MVT::i32:
    if (Scale == 1)
      return true;
    // r + r << imm
    Scale = Scale & ~1;
    return Scale == 2 || Scale == 4 || Scale == 8;
  case MVT::i64:
    // FIXME: What are we trying to model here? ldrd doesn't have an r + r
    // version in Thumb mode.
    // r + r
    if (Scale == 1)
      return true;
    // r * 2 (this can be lowered to r + r).
    if (!AM.HasBaseReg && Scale == 2)
      return true;
    return false;
  case MVT::isVoid:
    // Note, we allow "void" uses (basically, uses that aren't loads or
    // stores), because arm allows folding a scale into many arithmetic
    // operations.  This should be made more precise and revisited later.

    // Allow r << imm, but the imm has to be a multiple of two.
    if (Scale & 1) return false;
    return isPowerOf2_32(Scale);
  }
}

bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM,
                                                      EVT VT) const {
  const int Scale = AM.Scale;

  // Negative scales are not supported in Thumb1.
  if (Scale < 0)
    return false;

  // Thumb1 addressing modes do not support register scaling excepting the
  // following cases:
  // 1. Scale == 1 means no scaling.
  // 2. Scale == 2 this can be lowered to r + r if there is no base register.
  return (Scale == 1) || (!AM.HasBaseReg && Scale == 2);
}

/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                              const AddrMode &AM, Type *Ty,
                                              unsigned AS, Instruction *I) const {
  EVT VT = getValueType(DL, Ty, true);
  if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
    return false;

  // Can never fold addr of global into load/store.
  if (AM.BaseGV)
    return false;

  switch (AM.Scale) {
  case 0:  // no scale reg, must be "r+i" or "r", or "i".
    break;
  default:
    // ARM doesn't support any R+R*scale+imm addr modes.
    if (AM.BaseOffs)
      return false;

    if (!VT.isSimple())
      return false;

    if (Subtarget->isThumb1Only())
      return isLegalT1ScaledAddressingMode(AM, VT);

    if (Subtarget->isThumb2())
      return isLegalT2ScaledAddressingMode(AM, VT);

    int Scale = AM.Scale;
    switch (VT.getSimpleVT().SimpleTy) {
    default: return false;
    case MVT::i1:
    case MVT::i8:
    case MVT::i32:
      if (Scale < 0) Scale = -Scale;
      if (Scale == 1)
        return true;
      // r + r << imm
      return isPowerOf2_32(Scale & ~1);
    case MVT::i16:
    case MVT::i64:
      // r +/- r
      if (Scale == 1 || (AM.HasBaseReg && Scale == -1))
        return true;
      // r * 2 (this can be lowered to r + r).
      if (!AM.HasBaseReg && Scale == 2)
        return true;
      return false;

    case MVT::isVoid:
      // Note, we allow "void" uses (basically, uses that aren't loads or
      // stores), because arm allows folding a scale into many arithmetic
      // operations.  This should be made more precise and revisited later.

      // Allow r << imm, but the imm has to be a multiple of two.
      if (Scale & 1) return false;
      return isPowerOf2_32(Scale);
    }
  }
  return true;
}

/// isLegalICmpImmediate - Return true if the specified immediate is legal
/// icmp immediate, that is the target has icmp instructions which can compare
/// a register against the immediate without having to materialize the
/// immediate into a register.
bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
  // Thumb2 and ARM modes can use cmn for negative immediates.
  if (!Subtarget->isThumb())
    return ARM_AM::getSOImmVal((uint32_t)Imm) != -1 ||
           ARM_AM::getSOImmVal(-(uint32_t)Imm) != -1;
  if (Subtarget->isThumb2())
    return ARM_AM::getT2SOImmVal((uint32_t)Imm) != -1 ||
           ARM_AM::getT2SOImmVal(-(uint32_t)Imm) != -1;
  // Thumb1 doesn't have cmn, and only 8-bit immediates.
  return Imm >= 0 && Imm <= 255;
}

/// isLegalAddImmediate - Return true if the specified immediate is a legal add
/// *or sub* immediate, that is the target has add or sub instructions which can
/// add a register with the immediate without having to materialize the
/// immediate into a register.
bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {
  // Same encoding for add/sub, just flip the sign.
  int64_t AbsImm = std::abs(Imm);
  if (!Subtarget->isThumb())
    return ARM_AM::getSOImmVal(AbsImm) != -1;
  if (Subtarget->isThumb2())
    return ARM_AM::getT2SOImmVal(AbsImm) != -1;
  // Thumb1 only has 8-bit unsigned immediate.
  return AbsImm >= 0 && AbsImm <= 255;
}

static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
                                      bool isSEXTLoad, SDValue &Base,
                                      SDValue &Offset, bool &isInc,
                                      SelectionDAG &DAG) {
  if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
    return false;

  if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
    // AddressingMode 3
    Base = Ptr->getOperand(0);
    if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
      int RHSC = (int)RHS->getZExtValue();
      if (RHSC < 0 && RHSC > -256) {
        assert(Ptr->getOpcode() == ISD::ADD);
        isInc = false;
        Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
        return true;
      }
    }
    isInc = (Ptr->getOpcode() == ISD::ADD);
    Offset = Ptr->getOperand(1);
    return true;
  } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
    // AddressingMode 2
    if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
      int RHSC = (int)RHS->getZExtValue();
      if (RHSC < 0 && RHSC > -0x1000) {
        assert(Ptr->getOpcode() == ISD::ADD);
        isInc = false;
        Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
        Base = Ptr->getOperand(0);
        return true;
      }
    }

    if (Ptr->getOpcode() == ISD::ADD) {
      isInc = true;
      ARM_AM::ShiftOpc ShOpcVal=
        ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode());
      if (ShOpcVal != ARM_AM::no_shift) {
        Base = Ptr->getOperand(1);
        Offset = Ptr->getOperand(0);
      } else {
        Base = Ptr->getOperand(0);
        Offset = Ptr->getOperand(1);
      }
      return true;
    }

    isInc = (Ptr->getOpcode() == ISD::ADD);
    Base = Ptr->getOperand(0);
    Offset = Ptr->getOperand(1);
    return true;
  }

  // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
  return false;
}

static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
                                     bool isSEXTLoad, SDValue &Base,
                                     SDValue &Offset, bool &isInc,
                                     SelectionDAG &DAG) {
  if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
    return false;

  Base = Ptr->getOperand(0);
  if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
    int RHSC = (int)RHS->getZExtValue();
    if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
      assert(Ptr->getOpcode() == ISD::ADD);
      isInc = false;
      Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
      return true;
    } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
      isInc = Ptr->getOpcode() == ISD::ADD;
      Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
      return true;
    }
  }

  return false;
}

static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment,
                                      bool isSEXTLoad, bool IsMasked, bool isLE,
                                      SDValue &Base, SDValue &Offset,
                                      bool &isInc, SelectionDAG &DAG) {
  if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
    return false;
  if (!isa<ConstantSDNode>(Ptr->getOperand(1)))
    return false;

  // We allow LE non-masked loads to change the type (for example use a vldrb.8
  // as opposed to a vldrw.32). This can allow extra addressing modes or
  // alignments for what is otherwise an equivalent instruction.
  bool CanChangeType = isLE && !IsMasked;

  ConstantSDNode *RHS = cast<ConstantSDNode>(Ptr->getOperand(1));
  int RHSC = (int)RHS->getZExtValue();

  auto IsInRange = [&](int RHSC, int Limit, int Scale) {
    if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) {
      assert(Ptr->getOpcode() == ISD::ADD);
      isInc = false;
      Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
      return true;
    } else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) {
      isInc = Ptr->getOpcode() == ISD::ADD;
      Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
      return true;
    }
    return false;
  };

  // Try to find a matching instruction based on s/zext, Alignment, Offset and
  // (in BE/masked) type.
  Base = Ptr->getOperand(0);
  if (VT == MVT::v4i16) {
    if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2))
      return true;
  } else if (VT == MVT::v4i8 || VT == MVT::v8i8) {
    if (IsInRange(RHSC, 0x80, 1))
      return true;
  } else if (Alignment >= 4 &&
             (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) &&
             IsInRange(RHSC, 0x80, 4))
    return true;
  else if (Alignment >= 2 &&
           (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) &&
           IsInRange(RHSC, 0x80, 2))
    return true;
  else if ((CanChangeType || VT == MVT::v16i8) && IsInRange(RHSC, 0x80, 1))
    return true;
  return false;
}

/// getPreIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if the node's address
/// can be legally represented as pre-indexed load / store address.
bool
ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
                                             SDValue &Offset,
                                             ISD::MemIndexedMode &AM,
                                             SelectionDAG &DAG) const {
  if (Subtarget->isThumb1Only())
    return false;

  EVT VT;
  SDValue Ptr;
  Align Alignment;
  bool isSEXTLoad = false;
  bool IsMasked = false;
  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
    Ptr = LD->getBasePtr();
    VT = LD->getMemoryVT();
    Alignment = LD->getAlign();
    isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
  } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
    Ptr = ST->getBasePtr();
    VT = ST->getMemoryVT();
    Alignment = ST->getAlign();
  } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
    Ptr = LD->getBasePtr();
    VT = LD->getMemoryVT();
    Alignment = LD->getAlign();
    isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
    IsMasked = true;
  } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) {
    Ptr = ST->getBasePtr();
    VT = ST->getMemoryVT();
    Alignment = ST->getAlign();
    IsMasked = true;
  } else
    return false;

  bool isInc;
  bool isLegal = false;
  if (VT.isVector())
    isLegal = Subtarget->hasMVEIntegerOps() &&
              getMVEIndexedAddressParts(
                  Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked,
                  Subtarget->isLittle(), Base, Offset, isInc, DAG);
  else {
    if (Subtarget->isThumb2())
      isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
                                         Offset, isInc, DAG);
    else
      isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
                                          Offset, isInc, DAG);
  }
  if (!isLegal)
    return false;

  AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
  return true;
}

/// getPostIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if this node can be
/// combined with a load / store to form a post-indexed load / store.
bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
                                                   SDValue &Base,
                                                   SDValue &Offset,
                                                   ISD::MemIndexedMode &AM,
                                                   SelectionDAG &DAG) const {
  EVT VT;
  SDValue Ptr;
  Align Alignment;
  bool isSEXTLoad = false, isNonExt;
  bool IsMasked = false;
  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
    VT = LD->getMemoryVT();
    Ptr = LD->getBasePtr();
    Alignment = LD->getAlign();
    isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
    isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
  } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
    VT = ST->getMemoryVT();
    Ptr = ST->getBasePtr();
    Alignment = ST->getAlign();
    isNonExt = !ST->isTruncatingStore();
  } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
    VT = LD->getMemoryVT();
    Ptr = LD->getBasePtr();
    Alignment = LD->getAlign();
    isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
    isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
    IsMasked = true;
  } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) {
    VT = ST->getMemoryVT();
    Ptr = ST->getBasePtr();
    Alignment = ST->getAlign();
    isNonExt = !ST->isTruncatingStore();
    IsMasked = true;
  } else
    return false;

  if (Subtarget->isThumb1Only()) {
    // Thumb-1 can do a limited post-inc load or store as an updating LDM. It
    // must be non-extending/truncating, i32, with an offset of 4.
    assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!");
    if (Op->getOpcode() != ISD::ADD || !isNonExt)
      return false;
    auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
    if (!RHS || RHS->getZExtValue() != 4)
      return false;

    Offset = Op->getOperand(1);
    Base = Op->getOperand(0);
    AM = ISD::POST_INC;
    return true;
  }

  bool isInc;
  bool isLegal = false;
  if (VT.isVector())
    isLegal = Subtarget->hasMVEIntegerOps() &&
              getMVEIndexedAddressParts(Op, VT, Alignment, isSEXTLoad, IsMasked,
                                        Subtarget->isLittle(), Base, Offset,
                                        isInc, DAG);
  else {
    if (Subtarget->isThumb2())
      isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
                                         isInc, DAG);
    else
      isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
                                          isInc, DAG);
  }
  if (!isLegal)
    return false;

  if (Ptr != Base) {
    // Swap base ptr and offset to catch more post-index load / store when
    // it's legal. In Thumb2 mode, offset must be an immediate.
    if (Ptr == Offset && Op->getOpcode() == ISD::ADD &&
        !Subtarget->isThumb2())
      std::swap(Base, Offset);

    // Post-indexed load / store update the base pointer.
    if (Ptr != Base)
      return false;
  }

  AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
  return true;
}

void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
                                                      KnownBits &Known,
                                                      const APInt &DemandedElts,
                                                      const SelectionDAG &DAG,
                                                      unsigned Depth) const {
  unsigned BitWidth = Known.getBitWidth();
  Known.resetAll();
  switch (Op.getOpcode()) {
  default: break;
  case ARMISD::ADDC:
  case ARMISD::ADDE:
  case ARMISD::SUBC:
  case ARMISD::SUBE:
    // Special cases when we convert a carry to a boolean.
    if (Op.getResNo() == 0) {
      SDValue LHS = Op.getOperand(0);
      SDValue RHS = Op.getOperand(1);
      // (ADDE 0, 0, C) will give us a single bit.
      if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(LHS) &&
          isNullConstant(RHS)) {
        Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
        return;
      }
    }
    break;
  case ARMISD::CMOV: {
    // Bits are known zero/one if known on the LHS and RHS.
    Known = DAG.computeKnownBits(Op.getOperand(0), Depth+1);
    if (Known.isUnknown())
      return;

    KnownBits KnownRHS = DAG.computeKnownBits(Op.getOperand(1), Depth+1);
    Known.Zero &= KnownRHS.Zero;
    Known.One  &= KnownRHS.One;
    return;
  }
  case ISD::INTRINSIC_W_CHAIN: {
    ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
    Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
    switch (IntID) {
    default: return;
    case Intrinsic::arm_ldaex:
    case Intrinsic::arm_ldrex: {
      EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
      unsigned MemBits = VT.getScalarSizeInBits();
      Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
      return;
    }
    }
  }
  case ARMISD::BFI: {
    // Conservatively, we can recurse down the first operand
    // and just mask out all affected bits.
    Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);

    // The operand to BFI is already a mask suitable for removing the bits it
    // sets.
    ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
    const APInt &Mask = CI->getAPIntValue();
    Known.Zero &= Mask;
    Known.One &= Mask;
    return;
  }
  case ARMISD::VGETLANEs:
  case ARMISD::VGETLANEu: {
    const SDValue &SrcSV = Op.getOperand(0);
    EVT VecVT = SrcSV.getValueType();
    assert(VecVT.isVector() && "VGETLANE expected a vector type");
    const unsigned NumSrcElts = VecVT.getVectorNumElements();
    ConstantSDNode *Pos = cast<ConstantSDNode>(Op.getOperand(1).getNode());
    assert(Pos->getAPIntValue().ult(NumSrcElts) &&
           "VGETLANE index out of bounds");
    unsigned Idx = Pos->getZExtValue();
    APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
    Known = DAG.computeKnownBits(SrcSV, DemandedElt, Depth + 1);

    EVT VT = Op.getValueType();
    const unsigned DstSz = VT.getScalarSizeInBits();
    const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits();
    (void)SrcSz;
    assert(SrcSz == Known.getBitWidth());
    assert(DstSz > SrcSz);
    if (Op.getOpcode() == ARMISD::VGETLANEs)
      Known = Known.sext(DstSz);
    else {
      Known = Known.zext(DstSz);
    }
    assert(DstSz == Known.getBitWidth());
    break;
  }
  case ARMISD::VMOVrh: {
    KnownBits KnownOp = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
    assert(KnownOp.getBitWidth() == 16);
    Known = KnownOp.zext(32);
    break;
  }
  }
}

bool ARMTargetLowering::targetShrinkDemandedConstant(
    SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
    TargetLoweringOpt &TLO) const {
  // Delay optimization, so we don't have to deal with illegal types, or block
  // optimizations.
  if (!TLO.LegalOps)
    return false;

  // Only optimize AND for now.
  if (Op.getOpcode() != ISD::AND)
    return false;

  EVT VT = Op.getValueType();

  // Ignore vectors.
  if (VT.isVector())
    return false;

  assert(VT == MVT::i32 && "Unexpected integer type");

  // Make sure the RHS really is a constant.
  ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
  if (!C)
    return false;

  unsigned Mask = C->getZExtValue();

  unsigned Demanded = DemandedBits.getZExtValue();
  unsigned ShrunkMask = Mask & Demanded;
  unsigned ExpandedMask = Mask | ~Demanded;

  // If the mask is all zeros, let the target-independent code replace the
  // result with zero.
  if (ShrunkMask == 0)
    return false;

  // If the mask is all ones, erase the AND. (Currently, the target-independent
  // code won't do this, so we have to do it explicitly to avoid an infinite
  // loop in obscure cases.)
  if (ExpandedMask == ~0U)
    return TLO.CombineTo(Op, Op.getOperand(0));

  auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool {
    return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0;
  };
  auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool {
    if (NewMask == Mask)
      return true;
    SDLoc DL(Op);
    SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
    SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
    return TLO.CombineTo(Op, NewOp);
  };

  // Prefer uxtb mask.
  if (IsLegalMask(0xFF))
    return UseMask(0xFF);

  // Prefer uxth mask.
  if (IsLegalMask(0xFFFF))
    return UseMask(0xFFFF);

  // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2.
  // FIXME: Prefer a contiguous sequence of bits for other optimizations.
  if (ShrunkMask < 256)
    return UseMask(ShrunkMask);

  // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2.
  // FIXME: Prefer a contiguous sequence of bits for other optimizations.
  if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256)
    return UseMask(ExpandedMask);

  // Potential improvements:
  //
  // We could try to recognize lsls+lsrs or lsrs+lsls pairs here.
  // We could try to prefer Thumb1 immediates which can be lowered to a
  // two-instruction sequence.
  // We could try to recognize more legal ARM/Thumb2 immediates here.

  return false;
}

bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode(
    SDValue Op, const APInt &OriginalDemandedBits,
    const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
    unsigned Depth) const {
  unsigned Opc = Op.getOpcode();

  switch (Opc) {
  case ARMISD::ASRL:
  case ARMISD::LSRL: {
    // If this is result 0 and the other result is unused, see if the demand
    // bits allow us to shrink this long shift into a standard small shift in
    // the opposite direction.
    if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(1) &&
        isa<ConstantSDNode>(Op->getOperand(2))) {
      unsigned ShAmt = Op->getConstantOperandVal(2);
      if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf(
                            APInt::getAllOnesValue(32) << (32 - ShAmt)))
        return TLO.CombineTo(
            Op, TLO.DAG.getNode(
                    ISD::SHL, SDLoc(Op), MVT::i32, Op.getOperand(1),
                    TLO.DAG.getConstant(32 - ShAmt, SDLoc(Op), MVT::i32)));
    }
    break;
  }
  }

  return TargetLowering::SimplifyDemandedBitsForTargetNode(
      Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
}

//===----------------------------------------------------------------------===//
//                           ARM Inline Assembly Support
//===----------------------------------------------------------------------===//

bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
  // Looking for "rev" which is V6+.
  if (!Subtarget->hasV6Ops())
    return false;

  InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
  std::string AsmStr = IA->getAsmString();
  SmallVector<StringRef, 4> AsmPieces;
  SplitString(AsmStr, AsmPieces, ";\n");

  switch (AsmPieces.size()) {
  default: return false;
  case 1:
    AsmStr = std::string(AsmPieces[0]);
    AsmPieces.clear();
    SplitString(AsmStr, AsmPieces, " \t,");

    // rev $0, $1
    if (AsmPieces.size() == 3 &&
        AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
        IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
      IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
      if (Ty && Ty->getBitWidth() == 32)
        return IntrinsicLowering::LowerToByteSwap(CI);
    }
    break;
  }

  return false;
}

const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const {
  // At this point, we have to lower this constraint to something else, so we
  // lower it to an "r" or "w". However, by doing this we will force the result
  // to be in register, while the X constraint is much more permissive.
  //
  // Although we are correct (we are free to emit anything, without
  // constraints), we might break use cases that would expect us to be more
  // efficient and emit something else.
  if (!Subtarget->hasVFP2Base())
    return "r";
  if (ConstraintVT.isFloatingPoint())
    return "w";
  if (ConstraintVT.isVector() && Subtarget->hasNEON() &&
     (ConstraintVT.getSizeInBits() == 64 ||
      ConstraintVT.getSizeInBits() == 128))
    return "w";

  return "r";
}

/// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target.
ARMTargetLowering::ConstraintType
ARMTargetLowering::getConstraintType(StringRef Constraint) const {
  unsigned S = Constraint.size();
  if (S == 1) {
    switch (Constraint[0]) {
    default:  break;
    case 'l': return C_RegisterClass;
    case 'w': return C_RegisterClass;
    case 'h': return C_RegisterClass;
    case 'x': return C_RegisterClass;
    case 't': return C_RegisterClass;
    case 'j': return C_Immediate; // Constant for movw.
    // An address with a single base register. Due to the way we
    // currently handle addresses it is the same as an 'r' memory constraint.
    case 'Q': return C_Memory;
    }
  } else if (S == 2) {
    switch (Constraint[0]) {
    default: break;
    case 'T': return C_RegisterClass;
    // All 'U+' constraints are addresses.
    case 'U': return C_Memory;
    }
  }
  return TargetLowering::getConstraintType(Constraint);
}

/// Examine constraint type and operand type and determine a weight value.
/// This object must already have been set up with the operand type
/// and the current alternative constraint selected.
TargetLowering::ConstraintWeight
ARMTargetLowering::getSingleConstraintMatchWeight(
    AsmOperandInfo &info, const char *constraint) const {
  ConstraintWeight weight = CW_Invalid;
  Value *CallOperandVal = info.CallOperandVal;
    // If we don't have a value, we can't do a match,
    // but allow it at the lowest weight.
  if (!CallOperandVal)
    return CW_Default;
  Type *type = CallOperandVal->getType();
  // Look at the constraint type.
  switch (*constraint) {
  default:
    weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
    break;
  case 'l':
    if (type->isIntegerTy()) {
      if (Subtarget->isThumb())
        weight = CW_SpecificReg;
      else
        weight = CW_Register;
    }
    break;
  case 'w':
    if (type->isFloatingPointTy())
      weight = CW_Register;
    break;
  }
  return weight;
}

using RCPair = std::pair<unsigned, const TargetRegisterClass *>;

RCPair ARMTargetLowering::getRegForInlineAsmConstraint(
    const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
  switch (Constraint.size()) {
  case 1:
    // GCC ARM Constraint Letters
    switch (Constraint[0]) {
    case 'l': // Low regs or general regs.
      if (Subtarget->isThumb())
        return RCPair(0U, &ARM::tGPRRegClass);
      return RCPair(0U, &ARM::GPRRegClass);
    case 'h': // High regs or no regs.
      if (Subtarget->isThumb())
        return RCPair(0U, &ARM::hGPRRegClass);
      break;
    case 'r':
      if (Subtarget->isThumb1Only())
        return RCPair(0U, &ARM::tGPRRegClass);
      return RCPair(0U, &ARM::GPRRegClass);
    case 'w':
      if (VT == MVT::Other)
        break;
      if (VT == MVT::f32)
        return RCPair(0U, &ARM::SPRRegClass);
      if (VT.getSizeInBits() == 64)
        return RCPair(0U, &ARM::DPRRegClass);
      if (VT.getSizeInBits() == 128)
        return RCPair(0U, &ARM::QPRRegClass);
      break;
    case 'x':
      if (VT == MVT::Other)
        break;
      if (VT == MVT::f32)
        return RCPair(0U, &ARM::SPR_8RegClass);
      if (VT.getSizeInBits() == 64)
        return RCPair(0U, &ARM::DPR_8RegClass);
      if (VT.getSizeInBits() == 128)
        return RCPair(0U, &ARM::QPR_8RegClass);
      break;
    case 't':
      if (VT == MVT::Other)
        break;
      if (VT == MVT::f32 || VT == MVT::i32)
        return RCPair(0U, &ARM::SPRRegClass);
      if (VT.getSizeInBits() == 64)
        return RCPair(0U, &ARM::DPR_VFP2RegClass);
      if (VT.getSizeInBits() == 128)
        return RCPair(0U, &ARM::QPR_VFP2RegClass);
      break;
    }
    break;

  case 2:
    if (Constraint[0] == 'T') {
      switch (Constraint[1]) {
      default:
        break;
      case 'e':
        return RCPair(0U, &ARM::tGPREvenRegClass);
      case 'o':
        return RCPair(0U, &ARM::tGPROddRegClass);
      }
    }
    break;

  default:
    break;
  }

  if (StringRef("{cc}").equals_lower(Constraint))
    return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass);

  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}

/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector.  If it is invalid, don't add anything to Ops.
void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
                                                     std::string &Constraint,
                                                     std::vector<SDValue>&Ops,
                                                     SelectionDAG &DAG) const {
  SDValue Result;

  // Currently only support length 1 constraints.
  if (Constraint.length() != 1) return;

  char ConstraintLetter = Constraint[0];
  switch (ConstraintLetter) {
  default: break;
  case 'j':
  case 'I': case 'J': case 'K': case 'L':
  case 'M': case 'N': case 'O':
    ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
    if (!C)
      return;

    int64_t CVal64 = C->getSExtValue();
    int CVal = (int) CVal64;
    // None of these constraints allow values larger than 32 bits.  Check
    // that the value fits in an int.
    if (CVal != CVal64)
      return;

    switch (ConstraintLetter) {
      case 'j':
        // Constant suitable for movw, must be between 0 and
        // 65535.
        if (Subtarget->hasV6T2Ops() || (Subtarget->hasV8MBaselineOps()))
          if (CVal >= 0 && CVal <= 65535)
            break;
        return;
      case 'I':
        if (Subtarget->isThumb1Only()) {
          // This must be a constant between 0 and 255, for ADD
          // immediates.
          if (CVal >= 0 && CVal <= 255)
            break;
        } else if (Subtarget->isThumb2()) {
          // A constant that can be used as an immediate value in a
          // data-processing instruction.
          if (ARM_AM::getT2SOImmVal(CVal) != -1)
            break;
        } else {
          // A constant that can be used as an immediate value in a
          // data-processing instruction.
          if (ARM_AM::getSOImmVal(CVal) != -1)
            break;
        }
        return;

      case 'J':
        if (Subtarget->isThumb1Only()) {
          // This must be a constant between -255 and -1, for negated ADD
          // immediates. This can be used in GCC with an "n" modifier that
          // prints the negated value, for use with SUB instructions. It is
          // not useful otherwise but is implemented for compatibility.
          if (CVal >= -255 && CVal <= -1)
            break;
        } else {
          // This must be a constant between -4095 and 4095. It is not clear
          // what this constraint is intended for. Implemented for
          // compatibility with GCC.
          if (CVal >= -4095 && CVal <= 4095)
            break;
        }
        return;

      case 'K':
        if (Subtarget->isThumb1Only()) {
          // A 32-bit value where only one byte has a nonzero value. Exclude
          // zero to match GCC. This constraint is used by GCC internally for
          // constants that can be loaded with a move/shift combination.
          // It is not useful otherwise but is implemented for compatibility.
          if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
            break;
        } else if (Subtarget->isThumb2()) {
          // A constant whose bitwise inverse can be used as an immediate
          // value in a data-processing instruction. This can be used in GCC
          // with a "B" modifier that prints the inverted value, for use with
          // BIC and MVN instructions. It is not useful otherwise but is
          // implemented for compatibility.
          if (ARM_AM::getT2SOImmVal(~CVal) != -1)
            break;
        } else {
          // A constant whose bitwise inverse can be used as an immediate
          // value in a data-processing instruction. This can be used in GCC
          // with a "B" modifier that prints the inverted value, for use with
          // BIC and MVN instructions. It is not useful otherwise but is
          // implemented for compatibility.
          if (ARM_AM::getSOImmVal(~CVal) != -1)
            break;
        }
        return;

      case 'L':
        if (Subtarget->isThumb1Only()) {
          // This must be a constant between -7 and 7,
          // for 3-operand ADD/SUB immediate instructions.
          if (CVal >= -7 && CVal < 7)
            break;
        } else if (Subtarget->isThumb2()) {
          // A constant whose negation can be used as an immediate value in a
          // data-processing instruction. This can be used in GCC with an "n"
          // modifier that prints the negated value, for use with SUB
          // instructions. It is not useful otherwise but is implemented for
          // compatibility.
          if (ARM_AM::getT2SOImmVal(-CVal) != -1)
            break;
        } else {
          // A constant whose negation can be used as an immediate value in a
          // data-processing instruction. This can be used in GCC with an "n"
          // modifier that prints the negated value, for use with SUB
          // instructions. It is not useful otherwise but is implemented for
          // compatibility.
          if (ARM_AM::getSOImmVal(-CVal) != -1)
            break;
        }
        return;

      case 'M':
        if (Subtarget->isThumb1Only()) {
          // This must be a multiple of 4 between 0 and 1020, for
          // ADD sp + immediate.
          if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
            break;
        } else {
          // A power of two or a constant between 0 and 32.  This is used in
          // GCC for the shift amount on shifted register operands, but it is
          // useful in general for any shift amounts.
          if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
            break;
        }
        return;

      case 'N':
        if (Subtarget->isThumb1Only()) {
          // This must be a constant between 0 and 31, for shift amounts.
          if (CVal >= 0 && CVal <= 31)
            break;
        }
        return;

      case 'O':
        if (Subtarget->isThumb1Only()) {
          // This must be a multiple of 4 between -508 and 508, for
          // ADD/SUB sp = sp + immediate.
          if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
            break;
        }
        return;
    }
    Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType());
    break;
  }

  if (Result.getNode()) {
    Ops.push_back(Result);
    return;
  }
  return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}

static RTLIB::Libcall getDivRemLibcall(
    const SDNode *N, MVT::SimpleValueType SVT) {
  assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
          N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
         "Unhandled Opcode in getDivRemLibcall");
  bool isSigned = N->getOpcode() == ISD::SDIVREM ||
                  N->getOpcode() == ISD::SREM;
  RTLIB::Libcall LC;
  switch (SVT) {
  default: llvm_unreachable("Unexpected request for libcall!");
  case MVT::i8:  LC = isSigned ? RTLIB::SDIVREM_I8  : RTLIB::UDIVREM_I8;  break;
  case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
  case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
  case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
  }
  return LC;
}

static TargetLowering::ArgListTy getDivRemArgList(
    const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) {
  assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
          N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
         "Unhandled Opcode in getDivRemArgList");
  bool isSigned = N->getOpcode() == ISD::SDIVREM ||
                  N->getOpcode() == ISD::SREM;
  TargetLowering::ArgListTy Args;
  TargetLowering::ArgListEntry Entry;
  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
    EVT ArgVT = N->getOperand(i).getValueType();
    Type *ArgTy = ArgVT.getTypeForEVT(*Context);
    Entry.Node = N->getOperand(i);
    Entry.Ty = ArgTy;
    Entry.IsSExt = isSigned;
    Entry.IsZExt = !isSigned;
    Args.push_back(Entry);
  }
  if (Subtarget->isTargetWindows() && Args.size() >= 2)
    std::swap(Args[0], Args[1]);
  return Args;
}

SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
  assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
          Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
          Subtarget->isTargetWindows()) &&
         "Register-based DivRem lowering only");
  unsigned Opcode = Op->getOpcode();
  assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
         "Invalid opcode for Div/Rem lowering");
  bool isSigned = (Opcode == ISD::SDIVREM);
  EVT VT = Op->getValueType(0);
  Type *Ty = VT.getTypeForEVT(*DAG.getContext());
  SDLoc dl(Op);

  // If the target has hardware divide, use divide + multiply + subtract:
  //     div = a / b
  //     rem = a - b * div
  //     return {div, rem}
  // This should be lowered into UDIV/SDIV + MLS later on.
  bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
                                        : Subtarget->hasDivideInARMMode();
  if (hasDivide && Op->getValueType(0).isSimple() &&
      Op->getSimpleValueType(0) == MVT::i32) {
    unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
    const SDValue Dividend = Op->getOperand(0);
    const SDValue Divisor = Op->getOperand(1);
    SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor);
    SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor);
    SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul);

    SDValue Values[2] = {Div, Rem};
    return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values);
  }

  RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(),
                                       VT.getSimpleVT().SimpleTy);
  SDValue InChain = DAG.getEntryNode();

  TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(),
                                                    DAG.getContext(),
                                                    Subtarget);

  SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
                                         getPointerTy(DAG.getDataLayout()));

  Type *RetTy = StructType::get(Ty, Ty);

  if (Subtarget->isTargetWindows())
    InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain);

  TargetLowering::CallLoweringInfo CLI(DAG);
  CLI.setDebugLoc(dl).setChain(InChain)
    .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
    .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);

  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
  return CallInfo.first;
}

// Lowers REM using divmod helpers
// see RTABI section 4.2/4.3
SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const {
  // Build return types (div and rem)
  std::vector<Type*> RetTyParams;
  Type *RetTyElement;

  switch (N->getValueType(0).getSimpleVT().SimpleTy) {
  default: llvm_unreachable("Unexpected request for libcall!");
  case MVT::i8:   RetTyElement = Type::getInt8Ty(*DAG.getContext());  break;
  case MVT::i16:  RetTyElement = Type::getInt16Ty(*DAG.getContext()); break;
  case MVT::i32:  RetTyElement = Type::getInt32Ty(*DAG.getContext()); break;
  case MVT::i64:  RetTyElement = Type::getInt64Ty(*DAG.getContext()); break;
  }

  RetTyParams.push_back(RetTyElement);
  RetTyParams.push_back(RetTyElement);
  ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams);
  Type *RetTy = StructType::get(*DAG.getContext(), ret);

  RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT().
                                                             SimpleTy);
  SDValue InChain = DAG.getEntryNode();
  TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(),
                                                    Subtarget);
  bool isSigned = N->getOpcode() == ISD::SREM;
  SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
                                         getPointerTy(DAG.getDataLayout()));

  if (Subtarget->isTargetWindows())
    InChain = WinDBZCheckDenominator(DAG, N, InChain);

  // Lower call
  CallLoweringInfo CLI(DAG);
  CLI.setChain(InChain)
     .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args))
     .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N));
  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);

  // Return second (rem) result operand (first contains div)
  SDNode *ResNode = CallResult.first.getNode();
  assert(ResNode->getNumOperands() == 2 && "divmod should return two operands");
  return ResNode->getOperand(1);
}

SDValue
ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
  assert(Subtarget->isTargetWindows() && "unsupported target platform");
  SDLoc DL(Op);

  // Get the inputs.
  SDValue Chain = Op.getOperand(0);
  SDValue Size  = Op.getOperand(1);

  if (DAG.getMachineFunction().getFunction().hasFnAttribute(
          "no-stack-arg-probe")) {
    MaybeAlign Align =
        cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
    SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
    Chain = SP.getValue(1);
    SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size);
    if (Align)
      SP =
          DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0),
                      DAG.getConstant(-(uint64_t)Align->value(), DL, MVT::i32));
    Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP);
    SDValue Ops[2] = { SP, Chain };
    return DAG.getMergeValues(Ops, DL);
  }

  SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size,
                              DAG.getConstant(2, DL, MVT::i32));

  SDValue Flag;
  Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag);
  Flag = Chain.getValue(1);

  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
  Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag);

  SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
  Chain = NewSP.getValue(1);

  SDValue Ops[2] = { NewSP, Chain };
  return DAG.getMergeValues(Ops, DL);
}

SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
  bool IsStrict = Op->isStrictFPOpcode();
  SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
  const unsigned DstSz = Op.getValueType().getSizeInBits();
  const unsigned SrcSz = SrcVal.getValueType().getSizeInBits();
  assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 &&
         "Unexpected type for custom-lowering FP_EXTEND");

  assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&
         "With both FP DP and 16, any FP conversion is legal!");

  assert(!(DstSz == 32 && Subtarget->hasFP16()) &&
         "With FP16, 16 to 32 conversion is legal!");

  // Converting from 32 -> 64 is valid if we have FP64.
  if (SrcSz == 32 && DstSz == 64 && Subtarget->hasFP64()) {
    // FIXME: Remove this when we have strict fp instruction selection patterns
    if (IsStrict) {
      SDLoc Loc(Op);
      SDValue Result = DAG.getNode(ISD::FP_EXTEND,
                                   Loc, Op.getValueType(), SrcVal);
      return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc);
    }
    return Op;
  }

  // Either we are converting from 16 -> 64, without FP16 and/or
  // FP.double-precision or without Armv8-fp. So we must do it in two
  // steps.
  // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32
  // without FP16. So we must do a function call.
  SDLoc Loc(Op);
  RTLIB::Libcall LC;
  MakeLibCallOptions CallOptions;
  SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
  for (unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) {
    bool Supported = (Sz == 16 ? Subtarget->hasFP16() : Subtarget->hasFP64());
    MVT SrcVT = (Sz == 16 ? MVT::f16 : MVT::f32);
    MVT DstVT = (Sz == 16 ? MVT::f32 : MVT::f64);
    if (Supported) {
      if (IsStrict) {
        SrcVal = DAG.getNode(ISD::STRICT_FP_EXTEND, Loc,
                             {DstVT, MVT::Other}, {Chain, SrcVal});
        Chain = SrcVal.getValue(1);
      } else {
        SrcVal = DAG.getNode(ISD::FP_EXTEND, Loc, DstVT, SrcVal);
      }
    } else {
      LC = RTLIB::getFPEXT(SrcVT, DstVT);
      assert(LC != RTLIB::UNKNOWN_LIBCALL &&
             "Unexpected type for custom-lowering FP_EXTEND");
      std::tie(SrcVal, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions,
                                            Loc, Chain);
    }
  }

  return IsStrict ? DAG.getMergeValues({SrcVal, Chain}, Loc) : SrcVal;
}

SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
  bool IsStrict = Op->isStrictFPOpcode();

  SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
  EVT SrcVT = SrcVal.getValueType();
  EVT DstVT = Op.getValueType();
  const unsigned DstSz = Op.getValueType().getSizeInBits();
  const unsigned SrcSz = SrcVT.getSizeInBits();
  (void)DstSz;
  assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 &&
         "Unexpected type for custom-lowering FP_ROUND");

  assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&
         "With both FP DP and 16, any FP conversion is legal!");

  SDLoc Loc(Op);

  // Instruction from 32 -> 16 if hasFP16 is valid
  if (SrcSz == 32 && Subtarget->hasFP16())
    return Op;

  // Lib call from 32 -> 16 / 64 -> [32, 16]
  RTLIB::Libcall LC = RTLIB::getFPROUND(SrcVT, DstVT);
  assert(LC != RTLIB::UNKNOWN_LIBCALL &&
         "Unexpected type for custom-lowering FP_ROUND");
  MakeLibCallOptions CallOptions;
  SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
  SDValue Result;
  std::tie(Result, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions,
                                        Loc, Chain);
  return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result;
}

void ARMTargetLowering::lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results,
                                 SelectionDAG &DAG) const {
  assert(N->getValueType(0) == MVT::i64 && "Unexpected type (!= i64) on ABS.");
  MVT HalfT = MVT::i32;
  SDLoc dl(N);
  SDValue Hi, Lo, Tmp;

  if (!isOperationLegalOrCustom(ISD::ADDCARRY, HalfT) ||
      !isOperationLegalOrCustom(ISD::UADDO, HalfT))
    return ;

  unsigned OpTypeBits = HalfT.getScalarSizeInBits();
  SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);

  Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
                   DAG.getConstant(0, dl, HalfT));
  Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
                   DAG.getConstant(1, dl, HalfT));

  Tmp = DAG.getNode(ISD::SRA, dl, HalfT, Hi,
                    DAG.getConstant(OpTypeBits - 1, dl,
                    getShiftAmountTy(HalfT, DAG.getDataLayout())));
  Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
  Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
                   SDValue(Lo.getNode(), 1));
  Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
  Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);

  Results.push_back(Lo);
  Results.push_back(Hi);
}

bool
ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
  // The ARM target isn't yet aware of offsets.
  return false;
}

bool ARM::isBitFieldInvertedMask(unsigned v) {
  if (v == 0xffffffff)
    return false;

  // there can be 1's on either or both "outsides", all the "inside"
  // bits must be 0's
  return isShiftedMask_32(~v);
}

/// isFPImmLegal - Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                     bool ForCodeSize) const {
  if (!Subtarget->hasVFP3Base())
    return false;
  if (VT == MVT::f16 && Subtarget->hasFullFP16())
    return ARM_AM::getFP16Imm(Imm) != -1;
  if (VT == MVT::f32 && Subtarget->hasFullFP16() &&
      ARM_AM::getFP32FP16Imm(Imm) != -1)
    return true;
  if (VT == MVT::f32)
    return ARM_AM::getFP32Imm(Imm) != -1;
  if (VT == MVT::f64 && Subtarget->hasFP64())
    return ARM_AM::getFP64Imm(Imm) != -1;
  return false;
}

/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
/// MemIntrinsicNodes.  The associated MachineMemOperands record the alignment
/// specified in the intrinsic calls.
bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
                                           const CallInst &I,
                                           MachineFunction &MF,
                                           unsigned Intrinsic) const {
  switch (Intrinsic) {
  case Intrinsic::arm_neon_vld1:
  case Intrinsic::arm_neon_vld2:
  case Intrinsic::arm_neon_vld3:
  case Intrinsic::arm_neon_vld4:
  case Intrinsic::arm_neon_vld2lane:
  case Intrinsic::arm_neon_vld3lane:
  case Intrinsic::arm_neon_vld4lane:
  case Intrinsic::arm_neon_vld2dup:
  case Intrinsic::arm_neon_vld3dup:
  case Intrinsic::arm_neon_vld4dup: {
    Info.opc = ISD::INTRINSIC_W_CHAIN;
    // Conservatively set memVT to the entire set of vectors loaded.
    auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
    uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
    Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
    Info.ptrVal = I.getArgOperand(0);
    Info.offset = 0;
    Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
    Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue();
    // volatile loads with NEON intrinsics not supported
    Info.flags = MachineMemOperand::MOLoad;
    return true;
  }
  case Intrinsic::arm_neon_vld1x2:
  case Intrinsic::arm_neon_vld1x3:
  case Intrinsic::arm_neon_vld1x4: {
    Info.opc = ISD::INTRINSIC_W_CHAIN;
    // Conservatively set memVT to the entire set of vectors loaded.
    auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
    uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
    Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
    Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1);
    Info.offset = 0;
    Info.align.reset();
    // volatile loads with NEON intrinsics not supported
    Info.flags = MachineMemOperand::MOLoad;
    return true;
  }
  case Intrinsic::arm_neon_vst1:
  case Intrinsic::arm_neon_vst2:
  case Intrinsic::arm_neon_vst3:
  case Intrinsic::arm_neon_vst4:
  case Intrinsic::arm_neon_vst2lane:
  case Intrinsic::arm_neon_vst3lane:
  case Intrinsic::arm_neon_vst4lane: {
    Info.opc = ISD::INTRINSIC_VOID;
    // Conservatively set memVT to the entire set of vectors stored.
    auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
    unsigned NumElts = 0;
    for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
      Type *ArgTy = I.getArgOperand(ArgI)->getType();
      if (!ArgTy->isVectorTy())
        break;
      NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
    }
    Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
    Info.ptrVal = I.getArgOperand(0);
    Info.offset = 0;
    Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
    Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue();
    // volatile stores with NEON intrinsics not supported
    Info.flags = MachineMemOperand::MOStore;
    return true;
  }
  case Intrinsic::arm_neon_vst1x2:
  case Intrinsic::arm_neon_vst1x3:
  case Intrinsic::arm_neon_vst1x4: {
    Info.opc = ISD::INTRINSIC_VOID;
    // Conservatively set memVT to the entire set of vectors stored.
    auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
    unsigned NumElts = 0;
    for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
      Type *ArgTy = I.getArgOperand(ArgI)->getType();
      if (!ArgTy->isVectorTy())
        break;
      NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
    }
    Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
    Info.ptrVal = I.getArgOperand(0);
    Info.offset = 0;
    Info.align.reset();
    // volatile stores with NEON intrinsics not supported
    Info.flags = MachineMemOperand::MOStore;
    return true;
  }
  case Intrinsic::arm_mve_vld2q:
  case Intrinsic::arm_mve_vld4q: {
    Info.opc = ISD::INTRINSIC_W_CHAIN;
    // Conservatively set memVT to the entire set of vectors loaded.
    Type *VecTy = cast<StructType>(I.getType())->getElementType(1);
    unsigned Factor = Intrinsic == Intrinsic::arm_mve_vld2q ? 2 : 4;
    Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2);
    Info.ptrVal = I.getArgOperand(0);
    Info.offset = 0;
    Info.align = Align(VecTy->getScalarSizeInBits() / 8);
    // volatile loads with MVE intrinsics not supported
    Info.flags = MachineMemOperand::MOLoad;
    return true;
  }
  case Intrinsic::arm_mve_vst2q:
  case Intrinsic::arm_mve_vst4q: {
    Info.opc = ISD::INTRINSIC_VOID;
    // Conservatively set memVT to the entire set of vectors stored.
    Type *VecTy = I.getArgOperand(1)->getType();
    unsigned Factor = Intrinsic == Intrinsic::arm_mve_vst2q ? 2 : 4;
    Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2);
    Info.ptrVal = I.getArgOperand(0);
    Info.offset = 0;
    Info.align = Align(VecTy->getScalarSizeInBits() / 8);
    // volatile stores with MVE intrinsics not supported
    Info.flags = MachineMemOperand::MOStore;
    return true;
  }
  case Intrinsic::arm_ldaex:
  case Intrinsic::arm_ldrex: {
    auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
    PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
    Info.opc = ISD::INTRINSIC_W_CHAIN;
    Info.memVT = MVT::getVT(PtrTy->getElementType());
    Info.ptrVal = I.getArgOperand(0);
    Info.offset = 0;
    Info.align = DL.getABITypeAlign(PtrTy->getElementType());
    Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
    return true;
  }
  case Intrinsic::arm_stlex:
  case Intrinsic::arm_strex: {
    auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
    PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
    Info.opc = ISD::INTRINSIC_W_CHAIN;
    Info.memVT = MVT::getVT(PtrTy->getElementType());
    Info.ptrVal = I.getArgOperand(1);
    Info.offset = 0;
    Info.align = DL.getABITypeAlign(PtrTy->getElementType());
    Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
    return true;
  }
  case Intrinsic::arm_stlexd:
  case Intrinsic::arm_strexd:
    Info.opc = ISD::INTRINSIC_W_CHAIN;
    Info.memVT = MVT::i64;
    Info.ptrVal = I.getArgOperand(2);
    Info.offset = 0;
    Info.align = Align(8);
    Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
    return true;

  case Intrinsic::arm_ldaexd:
  case Intrinsic::arm_ldrexd:
    Info.opc = ISD::INTRINSIC_W_CHAIN;
    Info.memVT = MVT::i64;
    Info.ptrVal = I.getArgOperand(0);
    Info.offset = 0;
    Info.align = Align(8);
    Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
    return true;

  default:
    break;
  }

  return false;
}

/// Returns true if it is beneficial to convert a load of a constant
/// to just the constant itself.
bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
                                                          Type *Ty) const {
  assert(Ty->isIntegerTy());

  unsigned Bits = Ty->getPrimitiveSizeInBits();
  if (Bits == 0 || Bits > 32)
    return false;
  return true;
}

bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
                                                unsigned Index) const {
  if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
    return false;

  return (Index == 0 || Index == ResVT.getVectorNumElements());
}

Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder,
                                        ARM_MB::MemBOpt Domain) const {
  Module *M = Builder.GetInsertBlock()->getParent()->getParent();

  // First, if the target has no DMB, see what fallback we can use.
  if (!Subtarget->hasDataBarrier()) {
    // Some ARMv6 cpus can support data barriers with an mcr instruction.
    // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
    // here.
    if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) {
      Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr);
      Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0),
                        Builder.getInt32(0), Builder.getInt32(7),
                        Builder.getInt32(10), Builder.getInt32(5)};
      return Builder.CreateCall(MCR, args);
    } else {
      // Instead of using barriers, atomic accesses on these subtargets use
      // libcalls.
      llvm_unreachable("makeDMB on a target so old that it has no barriers");
    }
  } else {
    Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb);
    // Only a full system barrier exists in the M-class architectures.
    Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain;
    Constant *CDomain = Builder.getInt32(Domain);
    return Builder.CreateCall(DMB, CDomain);
  }
}

// Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
                                                 Instruction *Inst,
                                                 AtomicOrdering Ord) const {
  switch (Ord) {
  case AtomicOrdering::NotAtomic:
  case AtomicOrdering::Unordered:
    llvm_unreachable("Invalid fence: unordered/non-atomic");
  case AtomicOrdering::Monotonic:
  case AtomicOrdering::Acquire:
    return nullptr; // Nothing to do
  case AtomicOrdering::SequentiallyConsistent:
    if (!Inst->hasAtomicStore())
      return nullptr; // Nothing to do
    LLVM_FALLTHROUGH;
  case AtomicOrdering::Release:
  case AtomicOrdering::AcquireRelease:
    if (Subtarget->preferISHSTBarriers())
      return makeDMB(Builder, ARM_MB::ISHST);
    // FIXME: add a comment with a link to documentation justifying this.
    else
      return makeDMB(Builder, ARM_MB::ISH);
  }
  llvm_unreachable("Unknown fence ordering in emitLeadingFence");
}

Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
                                                  Instruction *Inst,
                                                  AtomicOrdering Ord) const {
  switch (Ord) {
  case AtomicOrdering::NotAtomic:
  case AtomicOrdering::Unordered:
    llvm_unreachable("Invalid fence: unordered/not-atomic");
  case AtomicOrdering::Monotonic:
  case AtomicOrdering::Release:
    return nullptr; // Nothing to do
  case AtomicOrdering::Acquire:
  case AtomicOrdering::AcquireRelease:
  case AtomicOrdering::SequentiallyConsistent:
    return makeDMB(Builder, ARM_MB::ISH);
  }
  llvm_unreachable("Unknown fence ordering in emitTrailingFence");
}

// Loads and stores less than 64-bits are already atomic; ones above that
// are doomed anyway, so defer to the default libcall and blame the OS when
// things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
// anything for those.
bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
  unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
  return (Size == 64) && !Subtarget->isMClass();
}

// Loads and stores less than 64-bits are already atomic; ones above that
// are doomed anyway, so defer to the default libcall and blame the OS when
// things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
// anything for those.
// FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that
// guarantee, see DDI0406C ARM architecture reference manual,
// sections A8.8.72-74 LDRD)
TargetLowering::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
  unsigned Size = LI->getType()->getPrimitiveSizeInBits();
  return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly
                                                  : AtomicExpansionKind::None;
}

// For the real atomic operations, we have ldrex/strex up to 32 bits,
// and up to 64 bits on the non-M profiles
TargetLowering::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
  if (AI->isFloatingPointOperation())
    return AtomicExpansionKind::CmpXChg;

  unsigned Size = AI->getType()->getPrimitiveSizeInBits();
  bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
  return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW)
             ? AtomicExpansionKind::LLSC
             : AtomicExpansionKind::None;
}

TargetLowering::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
  // At -O0, fast-regalloc cannot cope with the live vregs necessary to
  // implement cmpxchg without spilling. If the address being exchanged is also
  // on the stack and close enough to the spill slot, this can lead to a
  // situation where the monitor always gets cleared and the atomic operation
  // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
  bool HasAtomicCmpXchg =
      !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
  if (getTargetMachine().getOptLevel() != 0 && HasAtomicCmpXchg)
    return AtomicExpansionKind::LLSC;
  return AtomicExpansionKind::None;
}

bool ARMTargetLowering::shouldInsertFencesForAtomic(
    const Instruction *I) const {
  return InsertFencesForAtomic;
}

// This has so far only been implemented for MachO.
bool ARMTargetLowering::useLoadStackGuardNode() const {
  return Subtarget->isTargetMachO();
}

void ARMTargetLowering::insertSSPDeclarations(Module &M) const {
  if (!Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
    return TargetLowering::insertSSPDeclarations(M);

  // MSVC CRT has a global variable holding security cookie.
  M.getOrInsertGlobal("__security_cookie",
                      Type::getInt8PtrTy(M.getContext()));

  // MSVC CRT has a function to validate security cookie.
  FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
      "__security_check_cookie", Type::getVoidTy(M.getContext()),
      Type::getInt8PtrTy(M.getContext()));
  if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee()))
    F->addAttribute(1, Attribute::AttrKind::InReg);
}

Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const {
  // MSVC CRT has a global variable holding security cookie.
  if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
    return M.getGlobalVariable("__security_cookie");
  return TargetLowering::getSDagStackGuard(M);
}

Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const {
  // MSVC CRT has a function to validate security cookie.
  if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
    return M.getFunction("__security_check_cookie");
  return TargetLowering::getSSPStackGuardCheck(M);
}

bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
                                                  unsigned &Cost) const {
  // If we do not have NEON, vector types are not natively supported.
  if (!Subtarget->hasNEON())
    return false;

  // Floating point values and vector values map to the same register file.
  // Therefore, although we could do a store extract of a vector type, this is
  // better to leave at float as we have more freedom in the addressing mode for
  // those.
  if (VectorTy->isFPOrFPVectorTy())
    return false;

  // If the index is unknown at compile time, this is very expensive to lower
  // and it is not possible to combine the store with the extract.
  if (!isa<ConstantInt>(Idx))
    return false;

  assert(VectorTy->isVectorTy() && "VectorTy is not a vector type");
  unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedSize();
  // We can do a store + vector extract on any vector that fits perfectly in a D
  // or Q register.
  if (BitWidth == 64 || BitWidth == 128) {
    Cost = 0;
    return true;
  }
  return false;
}

bool ARMTargetLowering::isCheapToSpeculateCttz() const {
  return Subtarget->hasV6T2Ops();
}

bool ARMTargetLowering::isCheapToSpeculateCtlz() const {
  return Subtarget->hasV6T2Ops();
}

bool ARMTargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const {
  return !Subtarget->hasMinSize() || Subtarget->isTargetWindows();
}

Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
                                         AtomicOrdering Ord) const {
  Module *M = Builder.GetInsertBlock()->getParent()->getParent();
  Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
  bool IsAcquire = isAcquireOrStronger(Ord);

  // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
  // intrinsic must return {i32, i32} and we have to recombine them into a
  // single i64 here.
  if (ValTy->getPrimitiveSizeInBits() == 64) {
    Intrinsic::ID Int =
        IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
    Function *Ldrex = Intrinsic::getDeclaration(M, Int);

    Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
    Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");

    Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
    Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
    if (!Subtarget->isLittle())
      std::swap (Lo, Hi);
    Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
    Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
    return Builder.CreateOr(
        Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
  }

  Type *Tys[] = { Addr->getType() };
  Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
  Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys);

  return Builder.CreateTruncOrBitCast(
      Builder.CreateCall(Ldrex, Addr),
      cast<PointerType>(Addr->getType())->getElementType());
}

void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
    IRBuilder<> &Builder) const {
  if (!Subtarget->hasV7Ops())
    return;
  Module *M = Builder.GetInsertBlock()->getParent()->getParent();
  Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex));
}

Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
                                               Value *Addr,
                                               AtomicOrdering Ord) const {
  Module *M = Builder.GetInsertBlock()->getParent()->getParent();
  bool IsRelease = isReleaseOrStronger(Ord);

  // Since the intrinsics must have legal type, the i64 intrinsics take two
  // parameters: "i32, i32". We must marshal Val into the appropriate form
  // before the call.
  if (Val->getType()->getPrimitiveSizeInBits() == 64) {
    Intrinsic::ID Int =
        IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
    Function *Strex = Intrinsic::getDeclaration(M, Int);
    Type *Int32Ty = Type::getInt32Ty(M->getContext());

    Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
    Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
    if (!Subtarget->isLittle())
      std::swap(Lo, Hi);
    Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
    return Builder.CreateCall(Strex, {Lo, Hi, Addr});
  }

  Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
  Type *Tys[] = { Addr->getType() };
  Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);

  return Builder.CreateCall(
      Strex, {Builder.CreateZExtOrBitCast(
                  Val, Strex->getFunctionType()->getParamType(0)),
              Addr});
}


bool ARMTargetLowering::alignLoopsWithOptSize() const {
  return Subtarget->isMClass();
}

/// A helper function for determining the number of interleaved accesses we
/// will generate when lowering accesses of the given type.
unsigned
ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy,
                                             const DataLayout &DL) const {
  return (DL.getTypeSizeInBits(VecTy) + 127) / 128;
}

bool ARMTargetLowering::isLegalInterleavedAccessType(
    unsigned Factor, FixedVectorType *VecTy, const DataLayout &DL) const {

  unsigned VecSize = DL.getTypeSizeInBits(VecTy);
  unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());

  if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps())
    return false;

  // Ensure the vector doesn't have f16 elements. Even though we could do an
  // i16 vldN, we can't hold the f16 vectors and will end up converting via
  // f32.
  if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy())
    return false;
  if (Subtarget->hasMVEIntegerOps() && Factor == 3)
    return false;

  // Ensure the number of vector elements is greater than 1.
  if (VecTy->getNumElements() < 2)
    return false;

  // Ensure the element type is legal.
  if (ElSize != 8 && ElSize != 16 && ElSize != 32)
    return false;

  // Ensure the total vector size is 64 or a multiple of 128. Types larger than
  // 128 will be split into multiple interleaved accesses.
  if (Subtarget->hasNEON() && VecSize == 64)
    return true;
  return VecSize % 128 == 0;
}

unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const {
  if (Subtarget->hasNEON())
    return 4;
  if (Subtarget->hasMVEIntegerOps())
    return MVEMaxSupportedInterleaveFactor;
  return TargetLoweringBase::getMaxSupportedInterleaveFactor();
}

/// Lower an interleaved load into a vldN intrinsic.
///
/// E.g. Lower an interleaved load (Factor = 2):
///        %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
///        %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>  ; Extract even elements
///        %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>  ; Extract odd elements
///
///      Into:
///        %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
///        %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
///        %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
bool ARMTargetLowering::lowerInterleavedLoad(
    LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
    ArrayRef<unsigned> Indices, unsigned Factor) const {
  assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
         "Invalid interleave factor");
  assert(!Shuffles.empty() && "Empty shufflevector input");
  assert(Shuffles.size() == Indices.size() &&
         "Unmatched number of shufflevectors and indices");

  auto *VecTy = cast<FixedVectorType>(Shuffles[0]->getType());
  Type *EltTy = VecTy->getElementType();

  const DataLayout &DL = LI->getModule()->getDataLayout();

  // Skip if we do not have NEON and skip illegal vector types. We can
  // "legalize" wide vector types into multiple interleaved accesses as long as
  // the vector types are divisible by 128.
  if (!isLegalInterleavedAccessType(Factor, VecTy, DL))
    return false;

  unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL);

  // A pointer vector can not be the return type of the ldN intrinsics. Need to
  // load integer vectors first and then convert to pointer vectors.
  if (EltTy->isPointerTy())
    VecTy = FixedVectorType::get(DL.getIntPtrType(EltTy), VecTy);

  IRBuilder<> Builder(LI);

  // The base address of the load.
  Value *BaseAddr = LI->getPointerOperand();

  if (NumLoads > 1) {
    // If we're going to generate more than one load, reset the sub-vector type
    // to something legal.
    VecTy = FixedVectorType::get(VecTy->getElementType(),
                                 VecTy->getNumElements() / NumLoads);

    // We will compute the pointer operand of each load from the original base
    // address using GEPs. Cast the base address to a pointer to the scalar
    // element type.
    BaseAddr = Builder.CreateBitCast(
        BaseAddr,
        VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()));
  }

  assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!");

  auto createLoadIntrinsic = [&](Value *BaseAddr) {
    if (Subtarget->hasNEON()) {
      Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace());
      Type *Tys[] = {VecTy, Int8Ptr};
      static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
                                                Intrinsic::arm_neon_vld3,
                                                Intrinsic::arm_neon_vld4};
      Function *VldnFunc =
          Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys);

      SmallVector<Value *, 2> Ops;
      Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
      Ops.push_back(Builder.getInt32(LI->getAlignment()));

      return Builder.CreateCall(VldnFunc, Ops, "vldN");
    } else {
      assert((Factor == 2 || Factor == 4) &&
             "expected interleave factor of 2 or 4 for MVE");
      Intrinsic::ID LoadInts =
          Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q;
      Type *VecEltTy =
          VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace());
      Type *Tys[] = {VecTy, VecEltTy};
      Function *VldnFunc =
          Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys);

      SmallVector<Value *, 2> Ops;
      Ops.push_back(Builder.CreateBitCast(BaseAddr, VecEltTy));
      return Builder.CreateCall(VldnFunc, Ops, "vldN");
    }
  };

  // Holds sub-vectors extracted from the load intrinsic return values. The
  // sub-vectors are associated with the shufflevector instructions they will
  // replace.
  DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;

  for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
    // If we're generating more than one load, compute the base address of
    // subsequent loads as an offset from the previous.
    if (LoadCount > 0)
      BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr,
                                            VecTy->getNumElements() * Factor);

    CallInst *VldN = createLoadIntrinsic(BaseAddr);

    // Replace uses of each shufflevector with the corresponding vector loaded
    // by ldN.
    for (unsigned i = 0; i < Shuffles.size(); i++) {
      ShuffleVectorInst *SV = Shuffles[i];
      unsigned Index = Indices[i];

      Value *SubVec = Builder.CreateExtractValue(VldN, Index);

      // Convert the integer vector to pointer vector if the element is pointer.
      if (EltTy->isPointerTy())
        SubVec = Builder.CreateIntToPtr(
            SubVec,
            FixedVectorType::get(SV->getType()->getElementType(), VecTy));

      SubVecs[SV].push_back(SubVec);
    }
  }

  // Replace uses of the shufflevector instructions with the sub-vectors
  // returned by the load intrinsic. If a shufflevector instruction is
  // associated with more than one sub-vector, those sub-vectors will be
  // concatenated into a single wide vector.
  for (ShuffleVectorInst *SVI : Shuffles) {
    auto &SubVec = SubVecs[SVI];
    auto *WideVec =
        SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
    SVI->replaceAllUsesWith(WideVec);
  }

  return true;
}

/// Lower an interleaved store into a vstN intrinsic.
///
/// E.g. Lower an interleaved store (Factor = 3):
///        %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
///                                  <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
///        store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
///
///      Into:
///        %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
///        %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
///        %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
///
/// Note that the new shufflevectors will be removed and we'll only generate one
/// vst3 instruction in CodeGen.
///
/// Example for a more general valid mask (Factor 3). Lower:
///        %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
///                 <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
///        store <12 x i32> %i.vec, <12 x i32>* %ptr
///
///      Into:
///        %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
///        %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
///        %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
                                              ShuffleVectorInst *SVI,
                                              unsigned Factor) const {
  assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
         "Invalid interleave factor");

  auto *VecTy = cast<FixedVectorType>(SVI->getType());
  assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");

  unsigned LaneLen = VecTy->getNumElements() / Factor;
  Type *EltTy = VecTy->getElementType();
  auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen);

  const DataLayout &DL = SI->getModule()->getDataLayout();

  // Skip if we do not have NEON and skip illegal vector types. We can
  // "legalize" wide vector types into multiple interleaved accesses as long as
  // the vector types are divisible by 128.
  if (!isLegalInterleavedAccessType(Factor, SubVecTy, DL))
    return false;

  unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL);

  Value *Op0 = SVI->getOperand(0);
  Value *Op1 = SVI->getOperand(1);
  IRBuilder<> Builder(SI);

  // StN intrinsics don't support pointer vectors as arguments. Convert pointer
  // vectors to integer vectors.
  if (EltTy->isPointerTy()) {
    Type *IntTy = DL.getIntPtrType(EltTy);

    // Convert to the corresponding integer vector.
    auto *IntVecTy =
        FixedVectorType::get(IntTy, cast<FixedVectorType>(Op0->getType()));
    Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
    Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);

    SubVecTy = FixedVectorType::get(IntTy, LaneLen);
  }

  // The base address of the store.
  Value *BaseAddr = SI->getPointerOperand();

  if (NumStores > 1) {
    // If we're going to generate more than one store, reset the lane length
    // and sub-vector type to something legal.
    LaneLen /= NumStores;
    SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen);

    // We will compute the pointer operand of each store from the original base
    // address using GEPs. Cast the base address to a pointer to the scalar
    // element type.
    BaseAddr = Builder.CreateBitCast(
        BaseAddr,
        SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace()));
  }

  assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!");

  auto Mask = SVI->getShuffleMask();

  auto createStoreIntrinsic = [&](Value *BaseAddr,
                                  SmallVectorImpl<Value *> &Shuffles) {
    if (Subtarget->hasNEON()) {
      static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
                                                 Intrinsic::arm_neon_vst3,
                                                 Intrinsic::arm_neon_vst4};
      Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace());
      Type *Tys[] = {Int8Ptr, SubVecTy};

      Function *VstNFunc = Intrinsic::getDeclaration(
          SI->getModule(), StoreInts[Factor - 2], Tys);

      SmallVector<Value *, 6> Ops;
      Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
      for (auto S : Shuffles)
        Ops.push_back(S);
      Ops.push_back(Builder.getInt32(SI->getAlignment()));
      Builder.CreateCall(VstNFunc, Ops);
    } else {
      assert((Factor == 2 || Factor == 4) &&
             "expected interleave factor of 2 or 4 for MVE");
      Intrinsic::ID StoreInts =
          Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q;
      Type *EltPtrTy = SubVecTy->getElementType()->getPointerTo(
          SI->getPointerAddressSpace());
      Type *Tys[] = {EltPtrTy, SubVecTy};
      Function *VstNFunc =
          Intrinsic::getDeclaration(SI->getModule(), StoreInts, Tys);

      SmallVector<Value *, 6> Ops;
      Ops.push_back(Builder.CreateBitCast(BaseAddr, EltPtrTy));
      for (auto S : Shuffles)
        Ops.push_back(S);
      for (unsigned F = 0; F < Factor; F++) {
        Ops.push_back(Builder.getInt32(F));
        Builder.CreateCall(VstNFunc, Ops);
        Ops.pop_back();
      }
    }
  };

  for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
    // If we generating more than one store, we compute the base address of
    // subsequent stores as an offset from the previous.
    if (StoreCount > 0)
      BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(),
                                            BaseAddr, LaneLen * Factor);

    SmallVector<Value *, 4> Shuffles;

    // Split the shufflevector operands into sub vectors for the new vstN call.
    for (unsigned i = 0; i < Factor; i++) {
      unsigned IdxI = StoreCount * LaneLen * Factor + i;
      if (Mask[IdxI] >= 0) {
        Shuffles.push_back(Builder.CreateShuffleVector(
            Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0)));
      } else {
        unsigned StartMask = 0;
        for (unsigned j = 1; j < LaneLen; j++) {
          unsigned IdxJ = StoreCount * LaneLen * Factor + j;
          if (Mask[IdxJ * Factor + IdxI] >= 0) {
            StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
            break;
          }
        }
        // Note: If all elements in a chunk are undefs, StartMask=0!
        // Note: Filling undef gaps with random elements is ok, since
        // those elements were being written anyway (with undefs).
        // In the case of all undefs we're defaulting to using elems from 0
        // Note: StartMask cannot be negative, it's checked in
        // isReInterleaveMask
        Shuffles.push_back(Builder.CreateShuffleVector(
            Op0, Op1, createSequentialMask(StartMask, LaneLen, 0)));
      }
    }

    createStoreIntrinsic(BaseAddr, Shuffles);
  }
  return true;
}

enum HABaseType {
  HA_UNKNOWN = 0,
  HA_FLOAT,
  HA_DOUBLE,
  HA_VECT64,
  HA_VECT128
};

static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
                                   uint64_t &Members) {
  if (auto *ST = dyn_cast<StructType>(Ty)) {
    for (unsigned i = 0; i < ST->getNumElements(); ++i) {
      uint64_t SubMembers = 0;
      if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers))
        return false;
      Members += SubMembers;
    }
  } else if (auto *AT = dyn_cast<ArrayType>(Ty)) {
    uint64_t SubMembers = 0;
    if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers))
      return false;
    Members += SubMembers * AT->getNumElements();
  } else if (Ty->isFloatTy()) {
    if (Base != HA_UNKNOWN && Base != HA_FLOAT)
      return false;
    Members = 1;
    Base = HA_FLOAT;
  } else if (Ty->isDoubleTy()) {
    if (Base != HA_UNKNOWN && Base != HA_DOUBLE)
      return false;
    Members = 1;
    Base = HA_DOUBLE;
  } else if (auto *VT = dyn_cast<VectorType>(Ty)) {
    Members = 1;
    switch (Base) {
    case HA_FLOAT:
    case HA_DOUBLE:
      return false;
    case HA_VECT64:
      return VT->getPrimitiveSizeInBits().getFixedSize() == 64;
    case HA_VECT128:
      return VT->getPrimitiveSizeInBits().getFixedSize() == 128;
    case HA_UNKNOWN:
      switch (VT->getPrimitiveSizeInBits().getFixedSize()) {
      case 64:
        Base = HA_VECT64;
        return true;
      case 128:
        Base = HA_VECT128;
        return true;
      default:
        return false;
      }
    }
  }

  return (Members > 0 && Members <= 4);
}

/// Return the correct alignment for the current calling convention.
Align ARMTargetLowering::getABIAlignmentForCallingConv(Type *ArgTy,
                                                       DataLayout DL) const {
  const Align ABITypeAlign = DL.getABITypeAlign(ArgTy);
  if (!ArgTy->isVectorTy())
    return ABITypeAlign;

  // Avoid over-aligning vector parameters. It would require realigning the
  // stack and waste space for no real benefit.
  return std::min(ABITypeAlign, DL.getStackAlignment());
}

/// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
/// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when
/// passing according to AAPCS rules.
bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
    Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
  if (getEffectiveCallingConv(CallConv, isVarArg) !=
      CallingConv::ARM_AAPCS_VFP)
    return false;

  HABaseType Base = HA_UNKNOWN;
  uint64_t Members = 0;
  bool IsHA = isHomogeneousAggregate(Ty, Base, Members);
  LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump());

  bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy();
  return IsHA || IsIntArray;
}

Register ARMTargetLowering::getExceptionPointerRegister(
    const Constant *PersonalityFn) const {
  // Platforms which do not use SjLj EH may return values in these registers
  // via the personality function.
  return Subtarget->useSjLjEH() ? Register() : ARM::R0;
}

Register ARMTargetLowering::getExceptionSelectorRegister(
    const Constant *PersonalityFn) const {
  // Platforms which do not use SjLj EH may return values in these registers
  // via the personality function.
  return Subtarget->useSjLjEH() ? Register() : ARM::R1;
}

void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
  // Update IsSplitCSR in ARMFunctionInfo.
  ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>();
  AFI->setIsSplitCSR(true);
}

void ARMTargetLowering::insertCopiesSplitCSR(
    MachineBasicBlock *Entry,
    const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
  const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
  const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
  if (!IStart)
    return;

  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
  MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
  MachineBasicBlock::iterator MBBI = Entry->begin();
  for (const MCPhysReg *I = IStart; *I; ++I) {
    const TargetRegisterClass *RC = nullptr;
    if (ARM::GPRRegClass.contains(*I))
      RC = &ARM::GPRRegClass;
    else if (ARM::DPRRegClass.contains(*I))
      RC = &ARM::DPRRegClass;
    else
      llvm_unreachable("Unexpected register class in CSRsViaCopy!");

    Register NewVR = MRI->createVirtualRegister(RC);
    // Create copy from CSR to a virtual register.
    // FIXME: this currently does not emit CFI pseudo-instructions, it works
    // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
    // nounwind. If we want to generalize this later, we may need to emit
    // CFI pseudo-instructions.
    assert(Entry->getParent()->getFunction().hasFnAttribute(
               Attribute::NoUnwind) &&
           "Function should be nounwind in insertCopiesSplitCSR!");
    Entry->addLiveIn(*I);
    BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
        .addReg(*I);

    // Insert the copy-back instructions right before the terminator.
    for (auto *Exit : Exits)
      BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
              TII->get(TargetOpcode::COPY), *I)
          .addReg(NewVR);
  }
}

void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const {
  MF.getFrameInfo().computeMaxCallFrameSize(MF);
  TargetLoweringBase::finalizeLowering(MF);
}