isl_ast_codegen.c 184 KB
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893
/*
 * Copyright 2012-2014 Ecole Normale Superieure
 * Copyright 2014      INRIA Rocquencourt
 *
 * Use of this software is governed by the MIT license
 *
 * Written by Sven Verdoolaege,
 * Ecole Normale Superieure, 45 rue d’Ulm, 75230 Paris, France
 * and Inria Paris - Rocquencourt, Domaine de Voluceau - Rocquencourt,
 * B.P. 105 - 78153 Le Chesnay, France
 */

#include <limits.h>
#include <isl/id.h>
#include <isl/val.h>
#include <isl/space.h>
#include <isl/aff.h>
#include <isl/constraint.h>
#include <isl/set.h>
#include <isl/ilp.h>
#include <isl/union_set.h>
#include <isl/union_map.h>
#include <isl/schedule_node.h>
#include <isl/options.h>
#include <isl_sort.h>
#include <isl_tarjan.h>
#include <isl_ast_private.h>
#include <isl_ast_build_expr.h>
#include <isl_ast_build_private.h>
#include <isl_ast_graft_private.h>

/* Try and reduce the number of disjuncts in the representation of "set",
 * without dropping explicit representations of local variables.
 */
static __isl_give isl_set *isl_set_coalesce_preserve(__isl_take isl_set *set)
{
	isl_ctx *ctx;
	int save_preserve;

	if (!set)
		return NULL;

	ctx = isl_set_get_ctx(set);
	save_preserve = isl_options_get_coalesce_preserve_locals(ctx);
	isl_options_set_coalesce_preserve_locals(ctx, 1);
	set = isl_set_coalesce(set);
	isl_options_set_coalesce_preserve_locals(ctx, save_preserve);
	return set;
}

/* Data used in generate_domain.
 *
 * "build" is the input build.
 * "list" collects the results.
 */
struct isl_generate_domain_data {
	isl_ast_build *build;

	isl_ast_graft_list *list;
};

static __isl_give isl_ast_graft_list *generate_next_level(
	__isl_take isl_union_map *executed,
	__isl_take isl_ast_build *build);
static __isl_give isl_ast_graft_list *generate_code(
	__isl_take isl_union_map *executed, __isl_take isl_ast_build *build,
	int internal);

/* Generate an AST for a single domain based on
 * the (non single valued) inverse schedule "executed".
 *
 * We extend the schedule with the iteration domain
 * and continue generating through a call to generate_code.
 *
 * In particular, if executed has the form
 *
 *	S -> D
 *
 * then we continue generating code on
 *
 *	[S -> D] -> D
 *
 * The extended inverse schedule is clearly single valued
 * ensuring that the nested generate_code will not reach this function,
 * but will instead create calls to all elements of D that need
 * to be executed from the current schedule domain.
 */
static isl_stat generate_non_single_valued(__isl_take isl_map *executed,
	struct isl_generate_domain_data *data)
{
	isl_map *identity;
	isl_ast_build *build;
	isl_ast_graft_list *list;

	build = isl_ast_build_copy(data->build);

	identity = isl_set_identity(isl_map_range(isl_map_copy(executed)));
	executed = isl_map_domain_product(executed, identity);
	build = isl_ast_build_set_single_valued(build, 1);

	list = generate_code(isl_union_map_from_map(executed), build, 1);

	data->list = isl_ast_graft_list_concat(data->list, list);

	return isl_stat_ok;
}

/* Call the at_each_domain callback, if requested by the user,
 * after recording the current inverse schedule in the build.
 */
static __isl_give isl_ast_graft *at_each_domain(__isl_take isl_ast_graft *graft,
	__isl_keep isl_map *executed, __isl_keep isl_ast_build *build)
{
	if (!graft || !build)
		return isl_ast_graft_free(graft);
	if (!build->at_each_domain)
		return graft;

	build = isl_ast_build_copy(build);
	build = isl_ast_build_set_executed(build,
			isl_union_map_from_map(isl_map_copy(executed)));
	if (!build)
		return isl_ast_graft_free(graft);

	graft->node = build->at_each_domain(graft->node,
					build, build->at_each_domain_user);
	isl_ast_build_free(build);

	if (!graft->node)
		graft = isl_ast_graft_free(graft);

	return graft;
}

/* Generate a call expression for the single executed
 * domain element "map" and put a guard around it based its (simplified)
 * domain.  "executed" is the original inverse schedule from which "map"
 * has been derived.  In particular, "map" is either identical to "executed"
 * or it is the result of gisting "executed" with respect to the build domain.
 * "executed" is only used if there is an at_each_domain callback.
 *
 * At this stage, any pending constraints in the build can no longer
 * be simplified with respect to any enforced constraints since
 * the call node does not have any enforced constraints.
 * Since all pending constraints not covered by any enforced constraints
 * will be added as a guard to the graft in create_node_scaled,
 * even in the eliminated case, the pending constraints
 * can be considered to have been generated by outer constructs.
 *
 * If the user has set an at_each_domain callback, it is called
 * on the constructed call expression node.
 */
static isl_stat add_domain(__isl_take isl_map *executed,
	__isl_take isl_map *map, struct isl_generate_domain_data *data)
{
	isl_ast_build *build;
	isl_ast_graft *graft;
	isl_ast_graft_list *list;
	isl_set *guard, *pending;

	build = isl_ast_build_copy(data->build);
	pending = isl_ast_build_get_pending(build);
	build = isl_ast_build_replace_pending_by_guard(build, pending);

	guard = isl_map_domain(isl_map_copy(map));
	guard = isl_set_compute_divs(guard);
	guard = isl_set_coalesce_preserve(guard);
	guard = isl_set_gist(guard, isl_ast_build_get_generated(build));
	guard = isl_ast_build_specialize(build, guard);

	graft = isl_ast_graft_alloc_domain(map, build);
	graft = at_each_domain(graft, executed, build);
	isl_ast_build_free(build);
	isl_map_free(executed);
	graft = isl_ast_graft_add_guard(graft, guard, data->build);

	list = isl_ast_graft_list_from_ast_graft(graft);
	data->list = isl_ast_graft_list_concat(data->list, list);

	return isl_stat_ok;
}

/* Generate an AST for a single domain based on
 * the inverse schedule "executed" and add it to data->list.
 *
 * If there is more than one domain element associated to the current
 * schedule "time", then we need to continue the generation process
 * in generate_non_single_valued.
 * Note that the inverse schedule being single-valued may depend
 * on constraints that are only available in the original context
 * domain specified by the user.  We therefore first introduce
 * some of the constraints of data->build->domain.  In particular,
 * we intersect with a single-disjunct approximation of this set.
 * We perform this approximation to avoid further splitting up
 * the executed relation, possibly introducing a disjunctive guard
 * on the statement.
 *
 * On the other hand, we only perform the test after having taken the gist
 * of the domain as the resulting map is the one from which the call
 * expression is constructed.  Using this map to construct the call
 * expression usually yields simpler results in cases where the original
 * map is not obviously single-valued.
 * If the original map is obviously single-valued, then the gist
 * operation is skipped.
 *
 * Because we perform the single-valuedness test on the gisted map,
 * we may in rare cases fail to recognize that the inverse schedule
 * is single-valued.  This becomes problematic if this happens
 * from the recursive call through generate_non_single_valued
 * as we would then end up in an infinite recursion.
 * We therefore check if we are inside a call to generate_non_single_valued
 * and revert to the ungisted map if the gisted map turns out not to be
 * single-valued.
 *
 * Otherwise, call add_domain to generate a call expression (with guard) and
 * to call the at_each_domain callback, if any.
 */
static isl_stat generate_domain(__isl_take isl_map *executed, void *user)
{
	struct isl_generate_domain_data *data = user;
	isl_set *domain;
	isl_map *map = NULL;
	int empty, sv;

	domain = isl_ast_build_get_domain(data->build);
	domain = isl_set_from_basic_set(isl_set_simple_hull(domain));
	executed = isl_map_intersect_domain(executed, domain);
	empty = isl_map_is_empty(executed);
	if (empty < 0)
		goto error;
	if (empty) {
		isl_map_free(executed);
		return isl_stat_ok;
	}

	sv = isl_map_plain_is_single_valued(executed);
	if (sv < 0)
		goto error;
	if (sv)
		return add_domain(executed, isl_map_copy(executed), data);

	executed = isl_map_coalesce(executed);
	map = isl_map_copy(executed);
	map = isl_ast_build_compute_gist_map_domain(data->build, map);
	sv = isl_map_is_single_valued(map);
	if (sv < 0)
		goto error;
	if (!sv) {
		isl_map_free(map);
		if (data->build->single_valued)
			map = isl_map_copy(executed);
		else
			return generate_non_single_valued(executed, data);
	}

	return add_domain(executed, map, data);
error:
	isl_map_free(map);
	isl_map_free(executed);
	return isl_stat_error;
}

/* Call build->create_leaf to a create "leaf" node in the AST,
 * encapsulate the result in an isl_ast_graft and return the result
 * as a 1-element list.
 *
 * Note that the node returned by the user may be an entire tree.
 *
 * Since the node itself cannot enforce any constraints, we turn
 * all pending constraints into guards and add them to the resulting
 * graft to ensure that they will be generated.
 *
 * Before we pass control to the user, we first clear some information
 * from the build that is (presumbably) only meaningful
 * for the current code generation.
 * This includes the create_leaf callback itself, so we make a copy
 * of the build first.
 */
static __isl_give isl_ast_graft_list *call_create_leaf(
	__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
{
	isl_set *guard;
	isl_ast_node *node;
	isl_ast_graft *graft;
	isl_ast_build *user_build;

	guard = isl_ast_build_get_pending(build);
	user_build = isl_ast_build_copy(build);
	user_build = isl_ast_build_replace_pending_by_guard(user_build,
							isl_set_copy(guard));
	user_build = isl_ast_build_set_executed(user_build, executed);
	user_build = isl_ast_build_clear_local_info(user_build);
	if (!user_build)
		node = NULL;
	else
		node = build->create_leaf(user_build, build->create_leaf_user);
	graft = isl_ast_graft_alloc(node, build);
	graft = isl_ast_graft_add_guard(graft, guard, build);
	isl_ast_build_free(build);
	return isl_ast_graft_list_from_ast_graft(graft);
}

static __isl_give isl_ast_graft_list *build_ast_from_child(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node,
	__isl_take isl_union_map *executed);

/* Generate an AST after having handled the complete schedule
 * of this call to the code generator or the complete band
 * if we are generating an AST from a schedule tree.
 *
 * If we are inside a band node, then move on to the child of the band.
 *
 * If the user has specified a create_leaf callback, control
 * is passed to the user in call_create_leaf.
 *
 * Otherwise, we generate one or more calls for each individual
 * domain in generate_domain.
 */
static __isl_give isl_ast_graft_list *generate_inner_level(
	__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
{
	isl_ctx *ctx;
	struct isl_generate_domain_data data = { build };

	if (!build || !executed)
		goto error;

	if (isl_ast_build_has_schedule_node(build)) {
		isl_schedule_node *node;
		node = isl_ast_build_get_schedule_node(build);
		build = isl_ast_build_reset_schedule_node(build);
		return build_ast_from_child(build, node, executed);
	}

	if (build->create_leaf)
		return call_create_leaf(executed, build);

	ctx = isl_union_map_get_ctx(executed);
	data.list = isl_ast_graft_list_alloc(ctx, 0);
	if (isl_union_map_foreach_map(executed, &generate_domain, &data) < 0)
		data.list = isl_ast_graft_list_free(data.list);

	if (0)
error:		data.list = NULL;
	isl_ast_build_free(build);
	isl_union_map_free(executed);
	return data.list;
}

/* Call the before_each_for callback, if requested by the user.
 */
static __isl_give isl_ast_node *before_each_for(__isl_take isl_ast_node *node,
	__isl_keep isl_ast_build *build)
{
	isl_id *id;

	if (!node || !build)
		return isl_ast_node_free(node);
	if (!build->before_each_for)
		return node;
	id = build->before_each_for(build, build->before_each_for_user);
	node = isl_ast_node_set_annotation(node, id);
	return node;
}

/* Call the after_each_for callback, if requested by the user.
 */
static __isl_give isl_ast_graft *after_each_for(__isl_take isl_ast_graft *graft,
	__isl_keep isl_ast_build *build)
{
	if (!graft || !build)
		return isl_ast_graft_free(graft);
	if (!build->after_each_for)
		return graft;
	graft->node = build->after_each_for(graft->node, build,
						build->after_each_for_user);
	if (!graft->node)
		return isl_ast_graft_free(graft);
	return graft;
}

/* Plug in all the know values of the current and outer dimensions
 * in the domain of "executed".  In principle, we only need to plug
 * in the known value of the current dimension since the values of
 * outer dimensions have been plugged in already.
 * However, it turns out to be easier to just plug in all known values.
 */
static __isl_give isl_union_map *plug_in_values(
	__isl_take isl_union_map *executed, __isl_keep isl_ast_build *build)
{
	return isl_ast_build_substitute_values_union_map_domain(build,
								    executed);
}

/* Check if the constraint "c" is a lower bound on dimension "pos",
 * an upper bound, or independent of dimension "pos".
 */
static int constraint_type(isl_constraint *c, int pos)
{
	if (isl_constraint_is_lower_bound(c, isl_dim_set, pos))
		return 1;
	if (isl_constraint_is_upper_bound(c, isl_dim_set, pos))
		return 2;
	return 0;
}

/* Compare the types of the constraints "a" and "b",
 * resulting in constraints that are independent of "depth"
 * to be sorted before the lower bounds on "depth", which in
 * turn are sorted before the upper bounds on "depth".
 */
static int cmp_constraint(__isl_keep isl_constraint *a,
	__isl_keep isl_constraint *b, void *user)
{
	int *depth = user;
	int t1 = constraint_type(a, *depth);
	int t2 = constraint_type(b, *depth);

	return t1 - t2;
}

/* Extract a lower bound on dimension "pos" from constraint "c".
 *
 * If the constraint is of the form
 *
 *	a x + f(...) >= 0
 *
 * then we essentially return
 *
 *	l = ceil(-f(...)/a)
 *
 * However, if the current dimension is strided, then we need to make
 * sure that the lower bound we construct is of the form
 *
 *	f + s a
 *
 * with f the offset and s the stride.
 * We therefore compute
 *
 *	f + s * ceil((l - f)/s)
 */
static __isl_give isl_aff *lower_bound(__isl_keep isl_constraint *c,
	int pos, __isl_keep isl_ast_build *build)
{
	isl_aff *aff;

	aff = isl_constraint_get_bound(c, isl_dim_set, pos);
	aff = isl_aff_ceil(aff);

	if (isl_ast_build_has_stride(build, pos)) {
		isl_aff *offset;
		isl_val *stride;

		offset = isl_ast_build_get_offset(build, pos);
		stride = isl_ast_build_get_stride(build, pos);

		aff = isl_aff_sub(aff, isl_aff_copy(offset));
		aff = isl_aff_scale_down_val(aff, isl_val_copy(stride));
		aff = isl_aff_ceil(aff);
		aff = isl_aff_scale_val(aff, stride);
		aff = isl_aff_add(aff, offset);
	}

	aff = isl_ast_build_compute_gist_aff(build, aff);

	return aff;
}

/* Return the exact lower bound (or upper bound if "upper" is set)
 * of "domain" as a piecewise affine expression.
 *
 * If we are computing a lower bound (of a strided dimension), then
 * we need to make sure it is of the form
 *
 *	f + s a
 *
 * where f is the offset and s is the stride.
 * We therefore need to include the stride constraint before computing
 * the minimum.
 */
static __isl_give isl_pw_aff *exact_bound(__isl_keep isl_set *domain,
	__isl_keep isl_ast_build *build, int upper)
{
	isl_set *stride;
	isl_map *it_map;
	isl_pw_aff *pa;
	isl_pw_multi_aff *pma;

	domain = isl_set_copy(domain);
	if (!upper) {
		stride = isl_ast_build_get_stride_constraint(build);
		domain = isl_set_intersect(domain, stride);
	}
	it_map = isl_ast_build_map_to_iterator(build, domain);
	if (upper)
		pma = isl_map_lexmax_pw_multi_aff(it_map);
	else
		pma = isl_map_lexmin_pw_multi_aff(it_map);
	pa = isl_pw_multi_aff_get_pw_aff(pma, 0);
	isl_pw_multi_aff_free(pma);
	pa = isl_ast_build_compute_gist_pw_aff(build, pa);
	pa = isl_pw_aff_coalesce(pa);

	return pa;
}

/* Callback for sorting the isl_pw_aff_list passed to reduce_list and
 * remove_redundant_lower_bounds.
 */
static int reduce_list_cmp(__isl_keep isl_pw_aff *a, __isl_keep isl_pw_aff *b,
	void *user)
{
	return isl_pw_aff_plain_cmp(a, b);
}

/* Given a list of lower bounds "list", remove those that are redundant
 * with respect to the other bounds in "list" and the domain of "build".
 *
 * We first sort the bounds in the same way as they would be sorted
 * by set_for_node_expressions so that we can try and remove the last
 * bounds first.
 *
 * For a lower bound to be effective, there needs to be at least
 * one domain element for which it is larger than all other lower bounds.
 * For each lower bound we therefore intersect the domain with
 * the conditions that it is larger than all other bounds and
 * check whether the result is empty.  If so, the bound can be removed.
 */
static __isl_give isl_pw_aff_list *remove_redundant_lower_bounds(
	__isl_take isl_pw_aff_list *list, __isl_keep isl_ast_build *build)
{
	int i, j;
	isl_size n;
	isl_set *domain;

	list = isl_pw_aff_list_sort(list, &reduce_list_cmp, NULL);

	n = isl_pw_aff_list_n_pw_aff(list);
	if (n < 0)
		return isl_pw_aff_list_free(list);
	if (n <= 1)
		return list;

	domain = isl_ast_build_get_domain(build);

	for (i = n - 1; i >= 0; --i) {
		isl_pw_aff *pa_i;
		isl_set *domain_i;
		int empty;

		domain_i = isl_set_copy(domain);
		pa_i = isl_pw_aff_list_get_pw_aff(list, i);

		for (j = 0; j < n; ++j) {
			isl_pw_aff *pa_j;
			isl_set *better;

			if (j == i)
				continue;

			pa_j = isl_pw_aff_list_get_pw_aff(list, j);
			better = isl_pw_aff_gt_set(isl_pw_aff_copy(pa_i), pa_j);
			domain_i = isl_set_intersect(domain_i, better);
		}

		empty = isl_set_is_empty(domain_i);

		isl_set_free(domain_i);
		isl_pw_aff_free(pa_i);

		if (empty < 0)
			goto error;
		if (!empty)
			continue;
		list = isl_pw_aff_list_drop(list, i, 1);
		n--;
	}

	isl_set_free(domain);

	return list;
error:
	isl_set_free(domain);
	return isl_pw_aff_list_free(list);
}

/* Extract a lower bound on dimension "pos" from each constraint
 * in "constraints" and return the list of lower bounds.
 * If "constraints" has zero elements, then we extract a lower bound
 * from "domain" instead.
 *
 * If the current dimension is strided, then the lower bound
 * is adjusted by lower_bound to match the stride information.
 * This modification may make one or more lower bounds redundant
 * with respect to the other lower bounds.  We therefore check
 * for this condition and remove the redundant lower bounds.
 */
static __isl_give isl_pw_aff_list *lower_bounds(
	__isl_keep isl_constraint_list *constraints, int pos,
	__isl_keep isl_set *domain, __isl_keep isl_ast_build *build)
{
	isl_ctx *ctx;
	isl_pw_aff_list *list;
	int i;
	isl_size n;

	if (!build)
		return NULL;

	n = isl_constraint_list_n_constraint(constraints);
	if (n < 0)
		return NULL;
	if (n == 0) {
		isl_pw_aff *pa;
		pa = exact_bound(domain, build, 0);
		return isl_pw_aff_list_from_pw_aff(pa);
	}

	ctx = isl_ast_build_get_ctx(build);
	list = isl_pw_aff_list_alloc(ctx,n);

	for (i = 0; i < n; ++i) {
		isl_aff *aff;
		isl_constraint *c;

		c = isl_constraint_list_get_constraint(constraints, i);
		aff = lower_bound(c, pos, build);
		isl_constraint_free(c);
		list = isl_pw_aff_list_add(list, isl_pw_aff_from_aff(aff));
	}

	if (isl_ast_build_has_stride(build, pos))
		list = remove_redundant_lower_bounds(list, build);

	return list;
}

/* Extract an upper bound on dimension "pos" from each constraint
 * in "constraints" and return the list of upper bounds.
 * If "constraints" has zero elements, then we extract an upper bound
 * from "domain" instead.
 */
static __isl_give isl_pw_aff_list *upper_bounds(
	__isl_keep isl_constraint_list *constraints, int pos,
	__isl_keep isl_set *domain, __isl_keep isl_ast_build *build)
{
	isl_ctx *ctx;
	isl_pw_aff_list *list;
	int i;
	isl_size n;

	n = isl_constraint_list_n_constraint(constraints);
	if (n < 0)
		return NULL;
	if (n == 0) {
		isl_pw_aff *pa;
		pa = exact_bound(domain, build, 1);
		return isl_pw_aff_list_from_pw_aff(pa);
	}

	ctx = isl_ast_build_get_ctx(build);
	list = isl_pw_aff_list_alloc(ctx,n);

	for (i = 0; i < n; ++i) {
		isl_aff *aff;
		isl_constraint *c;

		c = isl_constraint_list_get_constraint(constraints, i);
		aff = isl_constraint_get_bound(c, isl_dim_set, pos);
		isl_constraint_free(c);
		aff = isl_aff_floor(aff);
		list = isl_pw_aff_list_add(list, isl_pw_aff_from_aff(aff));
	}

	return list;
}

/* Return an isl_ast_expr that performs the reduction of type "type"
 * on AST expressions corresponding to the elements in "list".
 *
 * The list is assumed to contain at least one element.
 * If the list contains exactly one element, then the returned isl_ast_expr
 * simply computes that affine expression.
 * If the list contains more than one element, then we sort it
 * using a fairly arbitrary but hopefully reasonably stable order.
 */
static __isl_give isl_ast_expr *reduce_list(enum isl_ast_expr_op_type type,
	__isl_keep isl_pw_aff_list *list, __isl_keep isl_ast_build *build)
{
	int i;
	isl_size n;
	isl_ctx *ctx;
	isl_ast_expr *expr;

	n = isl_pw_aff_list_n_pw_aff(list);
	if (n < 0)
		return NULL;

	if (n == 1)
		return isl_ast_build_expr_from_pw_aff_internal(build,
				isl_pw_aff_list_get_pw_aff(list, 0));

	ctx = isl_pw_aff_list_get_ctx(list);
	expr = isl_ast_expr_alloc_op(ctx, type, n);
	if (!expr)
		return NULL;

	list = isl_pw_aff_list_copy(list);
	list = isl_pw_aff_list_sort(list, &reduce_list_cmp, NULL);
	if (!list)
		return isl_ast_expr_free(expr);

	for (i = 0; i < n; ++i) {
		isl_ast_expr *expr_i;

		expr_i = isl_ast_build_expr_from_pw_aff_internal(build,
				isl_pw_aff_list_get_pw_aff(list, i));
		if (!expr_i)
			goto error;
		expr->u.op.args[i] = expr_i;
	}

	isl_pw_aff_list_free(list);
	return expr;
error:
	isl_pw_aff_list_free(list);
	isl_ast_expr_free(expr);
	return NULL;
}

/* Add guards implied by the "generated constraints",
 * but not (necessarily) enforced by the generated AST to "guard".
 * In particular, if there is any stride constraints,
 * then add the guard implied by those constraints.
 * If we have generated a degenerate loop, then add the guard
 * implied by "bounds" on the outer dimensions, i.e., the guard
 * that ensures that the single value actually exists.
 * Since there may also be guards implied by a combination
 * of these constraints, we first combine them before
 * deriving the implied constraints.
 */
static __isl_give isl_set *add_implied_guards(__isl_take isl_set *guard,
	int degenerate, __isl_keep isl_basic_set *bounds,
	__isl_keep isl_ast_build *build)
{
	int depth, has_stride;
	isl_space *space;
	isl_set *dom, *set;

	depth = isl_ast_build_get_depth(build);
	has_stride = isl_ast_build_has_stride(build, depth);
	if (!has_stride && !degenerate)
		return guard;

	space = isl_basic_set_get_space(bounds);
	dom = isl_set_universe(space);

	if (degenerate) {
		bounds = isl_basic_set_copy(bounds);
		bounds = isl_basic_set_drop_constraints_not_involving_dims(
					bounds, isl_dim_set, depth, 1);
		set = isl_set_from_basic_set(bounds);
		dom = isl_set_intersect(dom, set);
	}

	if (has_stride) {
		set = isl_ast_build_get_stride_constraint(build);
		dom = isl_set_intersect(dom, set);
	}

	dom = isl_set_eliminate(dom, isl_dim_set, depth, 1);
	dom = isl_ast_build_compute_gist(build, dom);
	guard = isl_set_intersect(guard, dom);

	return guard;
}

/* Update "graft" based on "sub_build" for the degenerate case.
 *
 * "build" is the build in which graft->node was created
 * "sub_build" contains information about the current level itself,
 * including the single value attained.
 *
 * We set the initialization part of the for loop to the single
 * value attained by the current dimension.
 * The increment and condition are not strictly needed as the are known
 * to be "1" and "iterator <= value" respectively.
 */
static __isl_give isl_ast_graft *refine_degenerate(
	__isl_take isl_ast_graft *graft, __isl_keep isl_ast_build *build,
	__isl_keep isl_ast_build *sub_build)
{
	isl_pw_aff *value;

	if (!graft || !sub_build)
		return isl_ast_graft_free(graft);

	value = isl_pw_aff_copy(sub_build->value);

	graft->node->u.f.init = isl_ast_build_expr_from_pw_aff_internal(build,
						value);
	if (!graft->node->u.f.init)
		return isl_ast_graft_free(graft);

	return graft;
}

/* Return the intersection of constraints in "list" as a set.
 */
static __isl_give isl_set *intersect_constraints(
	__isl_keep isl_constraint_list *list)
{
	int i;
	isl_size n;
	isl_basic_set *bset;

	n = isl_constraint_list_n_constraint(list);
	if (n < 0)
		return NULL;
	if (n < 1)
		isl_die(isl_constraint_list_get_ctx(list), isl_error_internal,
			"expecting at least one constraint", return NULL);

	bset = isl_basic_set_from_constraint(
				isl_constraint_list_get_constraint(list, 0));
	for (i = 1; i < n; ++i) {
		isl_basic_set *bset_i;

		bset_i = isl_basic_set_from_constraint(
				isl_constraint_list_get_constraint(list, i));
		bset = isl_basic_set_intersect(bset, bset_i);
	}

	return isl_set_from_basic_set(bset);
}

/* Compute the constraints on the outer dimensions enforced by
 * graft->node and add those constraints to graft->enforced,
 * in case the upper bound is expressed as a set "upper".
 *
 * In particular, if l(...) is a lower bound in "lower", and
 *
 *	-a i + f(...) >= 0		or	a i <= f(...)
 *
 * is an upper bound ocnstraint on the current dimension i,
 * then the for loop enforces the constraint
 *
 *	-a l(...) + f(...) >= 0		or	a l(...) <= f(...)
 *
 * We therefore simply take each lower bound in turn, plug it into
 * the upper bounds and compute the intersection over all lower bounds.
 *
 * If a lower bound is a rational expression, then
 * isl_basic_set_preimage_multi_aff will force this rational
 * expression to have only integer values.  However, the loop
 * itself does not enforce this integrality constraint.  We therefore
 * use the ceil of the lower bounds instead of the lower bounds themselves.
 * Other constraints will make sure that the for loop is only executed
 * when each of the lower bounds attains an integral value.
 * In particular, potentially rational values only occur in
 * lower_bound if the offset is a (seemingly) rational expression,
 * but then outer conditions will make sure that this rational expression
 * only attains integer values.
 */
static __isl_give isl_ast_graft *set_enforced_from_set(
	__isl_take isl_ast_graft *graft,
	__isl_keep isl_pw_aff_list *lower, int pos, __isl_keep isl_set *upper)
{
	isl_space *space;
	isl_basic_set *enforced;
	isl_pw_multi_aff *pma;
	int i;
	isl_size n;

	n = isl_pw_aff_list_n_pw_aff(lower);
	if (!graft || n < 0)
		return isl_ast_graft_free(graft);

	space = isl_set_get_space(upper);
	enforced = isl_basic_set_universe(isl_space_copy(space));

	space = isl_space_map_from_set(space);
	pma = isl_pw_multi_aff_identity(space);

	for (i = 0; i < n; ++i) {
		isl_pw_aff *pa;
		isl_set *enforced_i;
		isl_basic_set *hull;
		isl_pw_multi_aff *pma_i;

		pa = isl_pw_aff_list_get_pw_aff(lower, i);
		pa = isl_pw_aff_ceil(pa);
		pma_i = isl_pw_multi_aff_copy(pma);
		pma_i = isl_pw_multi_aff_set_pw_aff(pma_i, pos, pa);
		enforced_i = isl_set_copy(upper);
		enforced_i = isl_set_preimage_pw_multi_aff(enforced_i, pma_i);
		hull = isl_set_simple_hull(enforced_i);
		enforced = isl_basic_set_intersect(enforced, hull);
	}

	isl_pw_multi_aff_free(pma);

	graft = isl_ast_graft_enforce(graft, enforced);

	return graft;
}

/* Compute the constraints on the outer dimensions enforced by
 * graft->node and add those constraints to graft->enforced,
 * in case the upper bound is expressed as
 * a list of affine expressions "upper".
 *
 * The enforced condition is that each lower bound expression is less
 * than or equal to each upper bound expression.
 */
static __isl_give isl_ast_graft *set_enforced_from_list(
	__isl_take isl_ast_graft *graft,
	__isl_keep isl_pw_aff_list *lower, __isl_keep isl_pw_aff_list *upper)
{
	isl_set *cond;
	isl_basic_set *enforced;

	lower = isl_pw_aff_list_copy(lower);
	upper = isl_pw_aff_list_copy(upper);
	cond = isl_pw_aff_list_le_set(lower, upper);
	enforced = isl_set_simple_hull(cond);
	graft = isl_ast_graft_enforce(graft, enforced);

	return graft;
}

/* Does "aff" have a negative constant term?
 */
static isl_bool aff_constant_is_negative(__isl_keep isl_set *set,
	__isl_keep isl_aff *aff, void *user)
{
	isl_bool is_neg;
	isl_val *v;

	v = isl_aff_get_constant_val(aff);
	is_neg = isl_val_is_neg(v);
	isl_val_free(v);

	return is_neg;
}

/* Does "pa" have a negative constant term over its entire domain?
 */
static isl_bool pw_aff_constant_is_negative(__isl_keep isl_pw_aff *pa,
	void *user)
{
	return isl_pw_aff_every_piece(pa, &aff_constant_is_negative, NULL);
}

/* Does each element in "list" have a negative constant term?
 */
static int list_constant_is_negative(__isl_keep isl_pw_aff_list *list)
{
	return isl_pw_aff_list_every(list, &pw_aff_constant_is_negative, NULL);
}

/* Add 1 to each of the elements in "list", where each of these elements
 * is defined over the internal schedule space of "build".
 */
static __isl_give isl_pw_aff_list *list_add_one(
	__isl_take isl_pw_aff_list *list, __isl_keep isl_ast_build *build)
{
	int i;
	isl_size n;
	isl_space *space;
	isl_aff *aff;
	isl_pw_aff *one;

	n = isl_pw_aff_list_n_pw_aff(list);
	if (n < 0)
		return isl_pw_aff_list_free(list);

	space = isl_ast_build_get_space(build, 1);
	aff = isl_aff_zero_on_domain(isl_local_space_from_space(space));
	aff = isl_aff_add_constant_si(aff, 1);
	one = isl_pw_aff_from_aff(aff);

	for (i = 0; i < n; ++i) {
		isl_pw_aff *pa;
		pa = isl_pw_aff_list_get_pw_aff(list, i);
		pa = isl_pw_aff_add(pa, isl_pw_aff_copy(one));
		list = isl_pw_aff_list_set_pw_aff(list, i, pa);
	}

	isl_pw_aff_free(one);

	return list;
}

/* Set the condition part of the for node graft->node in case
 * the upper bound is represented as a list of piecewise affine expressions.
 *
 * In particular, set the condition to
 *
 *	iterator <= min(list of upper bounds)
 *
 * If each of the upper bounds has a negative constant term, then
 * set the condition to
 *
 *	iterator < min(list of (upper bound + 1)s)
 *
 */
static __isl_give isl_ast_graft *set_for_cond_from_list(
	__isl_take isl_ast_graft *graft, __isl_keep isl_pw_aff_list *list,
	__isl_keep isl_ast_build *build)
{
	int neg;
	isl_ast_expr *bound, *iterator, *cond;
	enum isl_ast_expr_op_type type = isl_ast_expr_op_le;

	if (!graft || !list)
		return isl_ast_graft_free(graft);

	neg = list_constant_is_negative(list);
	if (neg < 0)
		return isl_ast_graft_free(graft);
	list = isl_pw_aff_list_copy(list);
	if (neg) {
		list = list_add_one(list, build);
		type = isl_ast_expr_op_lt;
	}

	bound = reduce_list(isl_ast_expr_op_min, list, build);
	iterator = isl_ast_expr_copy(graft->node->u.f.iterator);
	cond = isl_ast_expr_alloc_binary(type, iterator, bound);
	graft->node->u.f.cond = cond;

	isl_pw_aff_list_free(list);
	if (!graft->node->u.f.cond)
		return isl_ast_graft_free(graft);
	return graft;
}

/* Set the condition part of the for node graft->node in case
 * the upper bound is represented as a set.
 */
static __isl_give isl_ast_graft *set_for_cond_from_set(
	__isl_take isl_ast_graft *graft, __isl_keep isl_set *set,
	__isl_keep isl_ast_build *build)
{
	isl_ast_expr *cond;

	if (!graft)
		return NULL;

	cond = isl_ast_build_expr_from_set_internal(build, isl_set_copy(set));
	graft->node->u.f.cond = cond;
	if (!graft->node->u.f.cond)
		return isl_ast_graft_free(graft);
	return graft;
}

/* Construct an isl_ast_expr for the increment (i.e., stride) of
 * the current dimension.
 */
static __isl_give isl_ast_expr *for_inc(__isl_keep isl_ast_build *build)
{
	int depth;
	isl_val *v;
	isl_ctx *ctx;

	if (!build)
		return NULL;
	ctx = isl_ast_build_get_ctx(build);
	depth = isl_ast_build_get_depth(build);

	if (!isl_ast_build_has_stride(build, depth))
		return isl_ast_expr_alloc_int_si(ctx, 1);

	v = isl_ast_build_get_stride(build, depth);
	return isl_ast_expr_from_val(v);
}

/* Should we express the loop condition as
 *
 *	iterator <= min(list of upper bounds)
 *
 * or as a conjunction of constraints?
 *
 * The first is constructed from a list of upper bounds.
 * The second is constructed from a set.
 *
 * If there are no upper bounds in "constraints", then this could mean
 * that "domain" simply doesn't have an upper bound or that we didn't
 * pick any upper bound.  In the first case, we want to generate the
 * loop condition as a(n empty) conjunction of constraints
 * In the second case, we will compute
 * a single upper bound from "domain" and so we use the list form.
 *
 * If there are upper bounds in "constraints",
 * then we use the list form iff the atomic_upper_bound option is set.
 */
static int use_upper_bound_list(isl_ctx *ctx, int n_upper,
	__isl_keep isl_set *domain, int depth)
{
	if (n_upper > 0)
		return isl_options_get_ast_build_atomic_upper_bound(ctx);
	else
		return isl_set_dim_has_upper_bound(domain, isl_dim_set, depth);
}

/* Fill in the expressions of the for node in graft->node.
 *
 * In particular,
 * - set the initialization part of the loop to the maximum of the lower bounds
 * - extract the increment from the stride of the current dimension
 * - construct the for condition either based on a list of upper bounds
 *	or on a set of upper bound constraints.
 */
static __isl_give isl_ast_graft *set_for_node_expressions(
	__isl_take isl_ast_graft *graft, __isl_keep isl_pw_aff_list *lower,
	int use_list, __isl_keep isl_pw_aff_list *upper_list,
	__isl_keep isl_set *upper_set, __isl_keep isl_ast_build *build)
{
	isl_ast_node *node;

	if (!graft)
		return NULL;

	build = isl_ast_build_copy(build);

	node = graft->node;
	node->u.f.init = reduce_list(isl_ast_expr_op_max, lower, build);
	node->u.f.inc = for_inc(build);

	if (!node->u.f.init || !node->u.f.inc)
		graft = isl_ast_graft_free(graft);

	if (use_list)
		graft = set_for_cond_from_list(graft, upper_list, build);
	else
		graft = set_for_cond_from_set(graft, upper_set, build);

	isl_ast_build_free(build);

	return graft;
}

/* Update "graft" based on "bounds" and "domain" for the generic,
 * non-degenerate, case.
 *
 * "c_lower" and "c_upper" contain the lower and upper bounds
 * that the loop node should express.
 * "domain" is the subset of the intersection of the constraints
 * for which some code is executed.
 *
 * There may be zero lower bounds or zero upper bounds in "constraints"
 * in case the list of constraints was created
 * based on the atomic option or based on separation with explicit bounds.
 * In that case, we use "domain" to derive lower and/or upper bounds.
 *
 * We first compute a list of one or more lower bounds.
 *
 * Then we decide if we want to express the condition as
 *
 *	iterator <= min(list of upper bounds)
 *
 * or as a conjunction of constraints.
 *
 * The set of enforced constraints is then computed either based on
 * a list of upper bounds or on a set of upper bound constraints.
 * We do not compute any enforced constraints if we were forced
 * to compute a lower or upper bound using exact_bound.  The domains
 * of the resulting expressions may imply some bounds on outer dimensions
 * that we do not want to appear in the enforced constraints since
 * they are not actually enforced by the corresponding code.
 *
 * Finally, we fill in the expressions of the for node.
 */
static __isl_give isl_ast_graft *refine_generic_bounds(
	__isl_take isl_ast_graft *graft,
	__isl_take isl_constraint_list *c_lower,
	__isl_take isl_constraint_list *c_upper,
	__isl_keep isl_set *domain, __isl_keep isl_ast_build *build)
{
	int depth;
	isl_ctx *ctx;
	isl_pw_aff_list *lower;
	int use_list;
	isl_set *upper_set = NULL;
	isl_pw_aff_list *upper_list = NULL;
	isl_size n_lower, n_upper;

	if (!graft || !c_lower || !c_upper || !build)
		goto error;

	depth = isl_ast_build_get_depth(build);
	ctx = isl_ast_graft_get_ctx(graft);

	n_lower = isl_constraint_list_n_constraint(c_lower);
	n_upper = isl_constraint_list_n_constraint(c_upper);
	if (n_lower < 0 || n_upper < 0)
		goto error;

	use_list = use_upper_bound_list(ctx, n_upper, domain, depth);

	lower = lower_bounds(c_lower, depth, domain, build);

	if (use_list)
		upper_list = upper_bounds(c_upper, depth, domain, build);
	else if (n_upper > 0)
		upper_set = intersect_constraints(c_upper);
	else
		upper_set = isl_set_universe(isl_set_get_space(domain));

	if (n_lower == 0 || n_upper == 0)
		;
	else if (use_list)
		graft = set_enforced_from_list(graft, lower, upper_list);
	else
		graft = set_enforced_from_set(graft, lower, depth, upper_set);

	graft = set_for_node_expressions(graft, lower, use_list, upper_list,
					upper_set, build);

	isl_pw_aff_list_free(lower);
	isl_pw_aff_list_free(upper_list);
	isl_set_free(upper_set);
	isl_constraint_list_free(c_lower);
	isl_constraint_list_free(c_upper);

	return graft;
error:
	isl_constraint_list_free(c_lower);
	isl_constraint_list_free(c_upper);
	return isl_ast_graft_free(graft);
}

/* Internal data structure used inside count_constraints to keep
 * track of the number of constraints that are independent of dimension "pos",
 * the lower bounds in "pos" and the upper bounds in "pos".
 */
struct isl_ast_count_constraints_data {
	int pos;

	int n_indep;
	int n_lower;
	int n_upper;
};

/* Increment data->n_indep, data->lower or data->upper depending
 * on whether "c" is independenct of dimensions data->pos,
 * a lower bound or an upper bound.
 */
static isl_stat count_constraints(__isl_take isl_constraint *c, void *user)
{
	struct isl_ast_count_constraints_data *data = user;

	if (isl_constraint_is_lower_bound(c, isl_dim_set, data->pos))
		data->n_lower++;
	else if (isl_constraint_is_upper_bound(c, isl_dim_set, data->pos))
		data->n_upper++;
	else
		data->n_indep++;

	isl_constraint_free(c);

	return isl_stat_ok;
}

/* Update "graft" based on "bounds" and "domain" for the generic,
 * non-degenerate, case.
 *
 * "list" respresent the list of bounds that need to be encoded by
 * the for loop.  Only the constraints that involve the iterator
 * are relevant here.  The other constraints are taken care of by
 * the caller and are included in the generated constraints of "build".
 * "domain" is the subset of the intersection of the constraints
 * for which some code is executed.
 * "build" is the build in which graft->node was created.
 *
 * We separate lower bounds, upper bounds and constraints that
 * are independent of the loop iterator.
 *
 * The actual for loop bounds are generated in refine_generic_bounds.
 */
static __isl_give isl_ast_graft *refine_generic_split(
	__isl_take isl_ast_graft *graft, __isl_take isl_constraint_list *list,
	__isl_keep isl_set *domain, __isl_keep isl_ast_build *build)
{
	struct isl_ast_count_constraints_data data;
	isl_constraint_list *lower;
	isl_constraint_list *upper;

	if (!list)
		return isl_ast_graft_free(graft);

	data.pos = isl_ast_build_get_depth(build);

	list = isl_constraint_list_sort(list, &cmp_constraint, &data.pos);
	if (!list)
		return isl_ast_graft_free(graft);

	data.n_indep = data.n_lower = data.n_upper = 0;
	if (isl_constraint_list_foreach(list, &count_constraints, &data) < 0) {
		isl_constraint_list_free(list);
		return isl_ast_graft_free(graft);
	}

	lower = isl_constraint_list_drop(list, 0, data.n_indep);
	upper = isl_constraint_list_copy(lower);
	lower = isl_constraint_list_drop(lower, data.n_lower, data.n_upper);
	upper = isl_constraint_list_drop(upper, 0, data.n_lower);

	return refine_generic_bounds(graft, lower, upper, domain, build);
}

/* Update "graft" based on "bounds" and "domain" for the generic,
 * non-degenerate, case.
 *
 * "bounds" respresent the bounds that need to be encoded by
 * the for loop (or a guard around the for loop).
 * "domain" is the subset of "bounds" for which some code is executed.
 * "build" is the build in which graft->node was created.
 *
 * We break up "bounds" into a list of constraints and continue with
 * refine_generic_split.
 */
static __isl_give isl_ast_graft *refine_generic(
	__isl_take isl_ast_graft *graft,
	__isl_keep isl_basic_set *bounds, __isl_keep isl_set *domain,
	__isl_keep isl_ast_build *build)
{
	isl_constraint_list *list;

	if (!build || !graft)
		return isl_ast_graft_free(graft);

	list = isl_basic_set_get_constraint_list(bounds);

	graft = refine_generic_split(graft, list, domain, build);

	return graft;
}

/* Create a for node for the current level.
 *
 * Mark the for node degenerate if "degenerate" is set.
 */
static __isl_give isl_ast_node *create_for(__isl_keep isl_ast_build *build,
	int degenerate)
{
	int depth;
	isl_id *id;
	isl_ast_node *node;

	if (!build)
		return NULL;

	depth = isl_ast_build_get_depth(build);
	id = isl_ast_build_get_iterator_id(build, depth);
	node = isl_ast_node_alloc_for(id);
	if (degenerate)
		node = isl_ast_node_for_mark_degenerate(node);

	return node;
}

/* If the ast_build_exploit_nested_bounds option is set, then return
 * the constraints enforced by all elements in "list".
 * Otherwise, return the universe.
 */
static __isl_give isl_basic_set *extract_shared_enforced(
	__isl_keep isl_ast_graft_list *list, __isl_keep isl_ast_build *build)
{
	isl_ctx *ctx;
	isl_space *space;

	if (!list)
		return NULL;

	ctx = isl_ast_graft_list_get_ctx(list);
	if (isl_options_get_ast_build_exploit_nested_bounds(ctx))
		return isl_ast_graft_list_extract_shared_enforced(list, build);

	space = isl_ast_build_get_space(build, 1);
	return isl_basic_set_universe(space);
}

/* Return the pending constraints of "build" that are not already taken
 * care of (by a combination of "enforced" and the generated constraints
 * of "build").
 */
static __isl_give isl_set *extract_pending(__isl_keep isl_ast_build *build,
	__isl_keep isl_basic_set *enforced)
{
	isl_set *guard, *context;

	guard = isl_ast_build_get_pending(build);
	context = isl_set_from_basic_set(isl_basic_set_copy(enforced));
	context = isl_set_intersect(context,
					isl_ast_build_get_generated(build));
	return isl_set_gist(guard, context);
}

/* Create an AST node for the current dimension based on
 * the schedule domain "bounds" and return the node encapsulated
 * in an isl_ast_graft.
 *
 * "executed" is the current inverse schedule, taking into account
 * the bounds in "bounds"
 * "domain" is the domain of "executed", with inner dimensions projected out.
 * It may be a strict subset of "bounds" in case "bounds" was created
 * based on the atomic option or based on separation with explicit bounds.
 *
 * "domain" may satisfy additional equalities that result
 * from intersecting "executed" with "bounds" in add_node.
 * It may also satisfy some global constraints that were dropped out because
 * we performed separation with explicit bounds.
 * The very first step is then to copy these constraints to "bounds".
 *
 * Since we may be calling before_each_for and after_each_for
 * callbacks, we record the current inverse schedule in the build.
 *
 * We consider three builds,
 * "build" is the one in which the current level is created,
 * "body_build" is the build in which the next level is created,
 * "sub_build" is essentially the same as "body_build", except that
 * the depth has not been increased yet.
 *
 * "build" already contains information (in strides and offsets)
 * about the strides at the current level, but this information is not
 * reflected in the build->domain.
 * We first add this information and the "bounds" to the sub_build->domain.
 * isl_ast_build_set_loop_bounds adds the stride information and
 * checks whether the current dimension attains
 * only a single value and whether this single value can be represented using
 * a single affine expression.
 * In the first case, the current level is considered "degenerate".
 * In the second, sub-case, the current level is considered "eliminated".
 * Eliminated levels don't need to be reflected in the AST since we can
 * simply plug in the affine expression.  For degenerate, but non-eliminated,
 * levels, we do introduce a for node, but mark is as degenerate so that
 * it can be printed as an assignment of the single value to the loop
 * "iterator".
 *
 * If the current level is eliminated, we explicitly plug in the value
 * for the current level found by isl_ast_build_set_loop_bounds in the
 * inverse schedule.  This ensures that if we are working on a slice
 * of the domain based on information available in the inverse schedule
 * and the build domain, that then this information is also reflected
 * in the inverse schedule.  This operation also eliminates the current
 * dimension from the inverse schedule making sure no inner dimensions depend
 * on the current dimension.  Otherwise, we create a for node, marking
 * it degenerate if appropriate.  The initial for node is still incomplete
 * and will be completed in either refine_degenerate or refine_generic.
 *
 * We then generate a sequence of grafts for the next level,
 * create a surrounding graft for the current level and insert
 * the for node we created (if the current level is not eliminated).
 * Before creating a graft for the current level, we first extract
 * hoistable constraints from the child guards and combine them
 * with the pending constraints in the build.  These constraints
 * are used to simplify the child guards and then added to the guard
 * of the current graft to ensure that they will be generated.
 * If the hoisted guard is a disjunction, then we use it directly
 * to gist the guards on the children before intersect it with the
 * pending constraints.  We do so because this disjunction is typically
 * identical to the guards on the children such that these guards
 * can be effectively removed completely.  After the intersection,
 * the gist operation would have a harder time figuring this out.
 *
 * Finally, we set the bounds of the for loop in either
 * refine_degenerate or refine_generic.
 * We do so in a context where the pending constraints of the build
 * have been replaced by the guard of the current graft.
 */
static __isl_give isl_ast_graft *create_node_scaled(
	__isl_take isl_union_map *executed,
	__isl_take isl_basic_set *bounds, __isl_take isl_set *domain,
	__isl_take isl_ast_build *build)
{
	int depth;
	int degenerate;
	isl_bool eliminated;
	isl_size n;
	isl_basic_set *hull;
	isl_basic_set *enforced;
	isl_set *guard, *hoisted;
	isl_ast_node *node = NULL;
	isl_ast_graft *graft;
	isl_ast_graft_list *children;
	isl_ast_build *sub_build;
	isl_ast_build *body_build;

	domain = isl_ast_build_eliminate_divs(build, domain);
	domain = isl_set_detect_equalities(domain);
	hull = isl_set_unshifted_simple_hull(isl_set_copy(domain));
	bounds = isl_basic_set_intersect(bounds, hull);
	build = isl_ast_build_set_executed(build, isl_union_map_copy(executed));

	depth = isl_ast_build_get_depth(build);
	sub_build = isl_ast_build_copy(build);
	bounds = isl_basic_set_remove_redundancies(bounds);
	bounds = isl_ast_build_specialize_basic_set(sub_build, bounds);
	sub_build = isl_ast_build_set_loop_bounds(sub_build,
						isl_basic_set_copy(bounds));
	degenerate = isl_ast_build_has_value(sub_build);
	eliminated = isl_ast_build_has_affine_value(sub_build, depth);
	if (degenerate < 0 || eliminated < 0)
		executed = isl_union_map_free(executed);
	if (!degenerate)
		bounds = isl_ast_build_compute_gist_basic_set(build, bounds);
	sub_build = isl_ast_build_set_pending_generated(sub_build,
						isl_basic_set_copy(bounds));
	if (eliminated)
		executed = plug_in_values(executed, sub_build);
	else
		node = create_for(build, degenerate);

	body_build = isl_ast_build_copy(sub_build);
	body_build = isl_ast_build_increase_depth(body_build);
	if (!eliminated)
		node = before_each_for(node, body_build);
	children = generate_next_level(executed,
				    isl_ast_build_copy(body_build));

	enforced = extract_shared_enforced(children, build);
	guard = extract_pending(sub_build, enforced);
	hoisted = isl_ast_graft_list_extract_hoistable_guard(children, build);
	n = isl_set_n_basic_set(hoisted);
	if (n < 0)
		children = isl_ast_graft_list_free(children);
	if (n > 1)
		children = isl_ast_graft_list_gist_guards(children,
						    isl_set_copy(hoisted));
	guard = isl_set_intersect(guard, hoisted);
	if (!eliminated)
		guard = add_implied_guards(guard, degenerate, bounds, build);

	graft = isl_ast_graft_alloc_from_children(children,
			    isl_set_copy(guard), enforced, build, sub_build);

	if (!eliminated) {
		isl_ast_build *for_build;

		graft = isl_ast_graft_insert_for(graft, node);
		for_build = isl_ast_build_copy(build);
		for_build = isl_ast_build_replace_pending_by_guard(for_build,
							isl_set_copy(guard));
		if (degenerate)
			graft = refine_degenerate(graft, for_build, sub_build);
		else
			graft = refine_generic(graft, bounds,
					domain, for_build);
		isl_ast_build_free(for_build);
	}
	isl_set_free(guard);
	if (!eliminated)
		graft = after_each_for(graft, body_build);

	isl_ast_build_free(body_build);
	isl_ast_build_free(sub_build);
	isl_ast_build_free(build);
	isl_basic_set_free(bounds);
	isl_set_free(domain);

	return graft;
}

/* Internal data structure for checking if all constraints involving
 * the input dimension "depth" are such that the other coefficients
 * are multiples of "m", reducing "m" if they are not.
 * If "m" is reduced all the way down to "1", then the check has failed
 * and we break out of the iteration.
 */
struct isl_check_scaled_data {
	int depth;
	isl_val *m;
};

/* If constraint "c" involves the input dimension data->depth,
 * then make sure that all the other coefficients are multiples of data->m,
 * reducing data->m if needed.
 * Break out of the iteration if data->m has become equal to "1".
 */
static isl_stat constraint_check_scaled(__isl_take isl_constraint *c,
	void *user)
{
	struct isl_check_scaled_data *data = user;
	int i, j;
	isl_size n;
	enum isl_dim_type t[] = { isl_dim_param, isl_dim_in, isl_dim_out,
				    isl_dim_div };

	if (!isl_constraint_involves_dims(c, isl_dim_in, data->depth, 1)) {
		isl_constraint_free(c);
		return isl_stat_ok;
	}

	for (i = 0; i < 4; ++i) {
		n = isl_constraint_dim(c, t[i]);
		if (n < 0)
			break;
		for (j = 0; j < n; ++j) {
			isl_val *d;

			if (t[i] == isl_dim_in && j == data->depth)
				continue;
			if (!isl_constraint_involves_dims(c, t[i], j, 1))
				continue;
			d = isl_constraint_get_coefficient_val(c, t[i], j);
			data->m = isl_val_gcd(data->m, d);
			if (isl_val_is_one(data->m))
				break;
		}
		if (j < n)
			break;
	}

	isl_constraint_free(c);

	return i < 4 ? isl_stat_error : isl_stat_ok;
}

/* For each constraint of "bmap" that involves the input dimension data->depth,
 * make sure that all the other coefficients are multiples of data->m,
 * reducing data->m if needed.
 * Break out of the iteration if data->m has become equal to "1".
 */
static isl_stat basic_map_check_scaled(__isl_take isl_basic_map *bmap,
	void *user)
{
	isl_stat r;

	r = isl_basic_map_foreach_constraint(bmap,
						&constraint_check_scaled, user);
	isl_basic_map_free(bmap);

	return r;
}

/* For each constraint of "map" that involves the input dimension data->depth,
 * make sure that all the other coefficients are multiples of data->m,
 * reducing data->m if needed.
 * Break out of the iteration if data->m has become equal to "1".
 */
static isl_stat map_check_scaled(__isl_take isl_map *map, void *user)
{
	isl_stat r;

	r = isl_map_foreach_basic_map(map, &basic_map_check_scaled, user);
	isl_map_free(map);

	return r;
}

/* Create an AST node for the current dimension based on
 * the schedule domain "bounds" and return the node encapsulated
 * in an isl_ast_graft.
 *
 * "executed" is the current inverse schedule, taking into account
 * the bounds in "bounds"
 * "domain" is the domain of "executed", with inner dimensions projected out.
 *
 *
 * Before moving on to the actual AST node construction in create_node_scaled,
 * we first check if the current dimension is strided and if we can scale
 * down this stride.  Note that we only do this if the ast_build_scale_strides
 * option is set.
 *
 * In particular, let the current dimension take on values
 *
 *	f + s a
 *
 * with a an integer.  We check if we can find an integer m that (obviously)
 * divides both f and s.
 *
 * If so, we check if the current dimension only appears in constraints
 * where the coefficients of the other variables are multiples of m.
 * We perform this extra check to avoid the risk of introducing
 * divisions by scaling down the current dimension.
 *
 * If so, we scale the current dimension down by a factor of m.
 * That is, we plug in
 *
 *	i = m i'							(1)
 *
 * Note that in principle we could always scale down strided loops
 * by plugging in
 *
 *	i = f + s i'
 *
 * but this may result in i' taking on larger values than the original i,
 * due to the shift by "f".
 * By constrast, the scaling in (1) can only reduce the (absolute) value "i".
 */
static __isl_give isl_ast_graft *create_node(__isl_take isl_union_map *executed,
	__isl_take isl_basic_set *bounds, __isl_take isl_set *domain,
	__isl_take isl_ast_build *build)
{
	struct isl_check_scaled_data data;
	isl_ctx *ctx;
	isl_aff *offset;
	isl_val *d;

	ctx = isl_ast_build_get_ctx(build);
	if (!isl_options_get_ast_build_scale_strides(ctx))
		return create_node_scaled(executed, bounds, domain, build);

	data.depth = isl_ast_build_get_depth(build);
	if (!isl_ast_build_has_stride(build, data.depth))
		return create_node_scaled(executed, bounds, domain, build);

	offset = isl_ast_build_get_offset(build, data.depth);
	data.m = isl_ast_build_get_stride(build, data.depth);
	if (!data.m)
		offset = isl_aff_free(offset);
	offset = isl_aff_scale_down_val(offset, isl_val_copy(data.m));
	d = isl_aff_get_denominator_val(offset);
	if (!d)
		executed = isl_union_map_free(executed);

	if (executed && isl_val_is_divisible_by(data.m, d))
		data.m = isl_val_div(data.m, d);
	else {
		data.m = isl_val_set_si(data.m, 1);
		isl_val_free(d);
	}

	if (!isl_val_is_one(data.m)) {
		if (isl_union_map_foreach_map(executed, &map_check_scaled,
						&data) < 0 &&
		    !isl_val_is_one(data.m))
			executed = isl_union_map_free(executed);
	}

	if (!isl_val_is_one(data.m)) {
		isl_space *space;
		isl_multi_aff *ma;
		isl_aff *aff;
		isl_map *map;
		isl_union_map *umap;

		space = isl_ast_build_get_space(build, 1);
		space = isl_space_map_from_set(space);
		ma = isl_multi_aff_identity(space);
		aff = isl_multi_aff_get_aff(ma, data.depth);
		aff = isl_aff_scale_val(aff, isl_val_copy(data.m));
		ma = isl_multi_aff_set_aff(ma, data.depth, aff);

		bounds = isl_basic_set_preimage_multi_aff(bounds,
						isl_multi_aff_copy(ma));
		domain = isl_set_preimage_multi_aff(domain,
						isl_multi_aff_copy(ma));
		map = isl_map_reverse(isl_map_from_multi_aff(ma));
		umap = isl_union_map_from_map(map);
		executed = isl_union_map_apply_domain(executed,
						isl_union_map_copy(umap));
		build = isl_ast_build_scale_down(build, isl_val_copy(data.m),
						umap);
	}
	isl_aff_free(offset);
	isl_val_free(data.m);

	return create_node_scaled(executed, bounds, domain, build);
}

/* Add the basic set to the list that "user" points to.
 */
static isl_stat collect_basic_set(__isl_take isl_basic_set *bset, void *user)
{
	isl_basic_set_list **list = user;

	*list = isl_basic_set_list_add(*list, bset);

	return isl_stat_ok;
}

/* Extract the basic sets of "set" and collect them in an isl_basic_set_list.
 */
static __isl_give isl_basic_set_list *isl_basic_set_list_from_set(
	__isl_take isl_set *set)
{
	isl_size n;
	isl_ctx *ctx;
	isl_basic_set_list *list;

	n = isl_set_n_basic_set(set);
	if (n < 0)
		set = isl_set_free(set);
	if (!set)
		return NULL;

	ctx = isl_set_get_ctx(set);

	list = isl_basic_set_list_alloc(ctx, n);
	if (isl_set_foreach_basic_set(set, &collect_basic_set, &list) < 0)
		list = isl_basic_set_list_free(list);

	isl_set_free(set);
	return list;
}

/* Generate code for the schedule domain "bounds"
 * and add the result to "list".
 *
 * We mainly detect strides here and check if the bounds do not
 * conflict with the current build domain
 * and then pass over control to create_node.
 *
 * "bounds" reflects the bounds on the current dimension and possibly
 * some extra conditions on outer dimensions.
 * It does not, however, include any divs involving the current dimension,
 * so it does not capture any stride constraints.
 * We therefore need to compute that part of the schedule domain that
 * intersects with "bounds" and derive the strides from the result.
 */
static __isl_give isl_ast_graft_list *add_node(
	__isl_take isl_ast_graft_list *list, __isl_take isl_union_map *executed,
	__isl_take isl_basic_set *bounds, __isl_take isl_ast_build *build)
{
	isl_ast_graft *graft;
	isl_set *domain = NULL;
	isl_union_set *uset;
	int empty, disjoint;

	uset = isl_union_set_from_basic_set(isl_basic_set_copy(bounds));
	executed = isl_union_map_intersect_domain(executed, uset);
	empty = isl_union_map_is_empty(executed);
	if (empty < 0)
		goto error;
	if (empty)
		goto done;

	uset = isl_union_map_domain(isl_union_map_copy(executed));
	domain = isl_set_from_union_set(uset);
	domain = isl_ast_build_specialize(build, domain);

	domain = isl_set_compute_divs(domain);
	domain = isl_ast_build_eliminate_inner(build, domain);
	disjoint = isl_set_is_disjoint(domain, build->domain);
	if (disjoint < 0)
		goto error;
	if (disjoint)
		goto done;

	build = isl_ast_build_detect_strides(build, isl_set_copy(domain));

	graft = create_node(executed, bounds, domain,
				isl_ast_build_copy(build));
	list = isl_ast_graft_list_add(list, graft);
	isl_ast_build_free(build);
	return list;
error:
	list = isl_ast_graft_list_free(list);
done:
	isl_set_free(domain);
	isl_basic_set_free(bounds);
	isl_union_map_free(executed);
	isl_ast_build_free(build);
	return list;
}

/* Does any element of i follow or coincide with any element of j
 * at the current depth for equal values of the outer dimensions?
 */
static isl_bool domain_follows_at_depth(__isl_keep isl_basic_set *i,
	__isl_keep isl_basic_set *j, void *user)
{
	int depth = *(int *) user;
	isl_basic_map *test;
	isl_bool empty;
	int l;

	test = isl_basic_map_from_domain_and_range(isl_basic_set_copy(i),
						    isl_basic_set_copy(j));
	for (l = 0; l < depth; ++l)
		test = isl_basic_map_equate(test, isl_dim_in, l,
						isl_dim_out, l);
	test = isl_basic_map_order_ge(test, isl_dim_in, depth,
					isl_dim_out, depth);
	empty = isl_basic_map_is_empty(test);
	isl_basic_map_free(test);

	return isl_bool_not(empty);
}

/* Split up each element of "list" into a part that is related to "bset"
 * according to "gt" and a part that is not.
 * Return a list that consist of "bset" and all the pieces.
 */
static __isl_give isl_basic_set_list *add_split_on(
	__isl_take isl_basic_set_list *list, __isl_take isl_basic_set *bset,
	__isl_keep isl_basic_map *gt)
{
	int i;
	isl_size n;
	isl_basic_set_list *res;

	n = isl_basic_set_list_n_basic_set(list);
	if (n < 0)
		bset = isl_basic_set_free(bset);

	gt = isl_basic_map_copy(gt);
	gt = isl_basic_map_intersect_domain(gt, isl_basic_set_copy(bset));
	res = isl_basic_set_list_from_basic_set(bset);
	for (i = 0; res && i < n; ++i) {
		isl_basic_set *bset;
		isl_set *set1, *set2;
		isl_basic_map *bmap;
		int empty;

		bset = isl_basic_set_list_get_basic_set(list, i);
		bmap = isl_basic_map_copy(gt);
		bmap = isl_basic_map_intersect_range(bmap, bset);
		bset = isl_basic_map_range(bmap);
		empty = isl_basic_set_is_empty(bset);
		if (empty < 0)
			res = isl_basic_set_list_free(res);
		if (empty)  {
			isl_basic_set_free(bset);
			bset = isl_basic_set_list_get_basic_set(list, i);
			res = isl_basic_set_list_add(res, bset);
			continue;
		}

		res = isl_basic_set_list_add(res, isl_basic_set_copy(bset));
		set1 = isl_set_from_basic_set(bset);
		bset = isl_basic_set_list_get_basic_set(list, i);
		set2 = isl_set_from_basic_set(bset);
		set1 = isl_set_subtract(set2, set1);
		set1 = isl_set_make_disjoint(set1);

		res = isl_basic_set_list_concat(res,
					    isl_basic_set_list_from_set(set1));
	}
	isl_basic_map_free(gt);
	isl_basic_set_list_free(list);
	return res;
}

static __isl_give isl_ast_graft_list *generate_sorted_domains(
	__isl_keep isl_basic_set_list *domain_list,
	__isl_keep isl_union_map *executed,
	__isl_keep isl_ast_build *build);

/* Internal data structure for add_nodes.
 *
 * "executed" and "build" are extra arguments to be passed to add_node.
 * "list" collects the results.
 */
struct isl_add_nodes_data {
	isl_union_map *executed;
	isl_ast_build *build;

	isl_ast_graft_list *list;
};

/* Generate code for the schedule domains in "scc"
 * and add the results to "list".
 *
 * The domains in "scc" form a strongly connected component in the ordering.
 * If the number of domains in "scc" is larger than 1, then this means
 * that we cannot determine a valid ordering for the domains in the component.
 * This should be fairly rare because the individual domains
 * have been made disjoint first.
 * The problem is that the domains may be integrally disjoint but not
 * rationally disjoint.  For example, we may have domains
 *
 *	{ [i,i] : 0 <= i <= 1 }		and	{ [i,1-i] : 0 <= i <= 1 }
 *
 * These two domains have an empty intersection, but their rational
 * relaxations do intersect.  It is impossible to order these domains
 * in the second dimension because the first should be ordered before
 * the second for outer dimension equal to 0, while it should be ordered
 * after for outer dimension equal to 1.
 *
 * This may happen in particular in case of unrolling since the domain
 * of each slice is replaced by its simple hull.
 *
 * For each basic set i in "scc" and for each of the following basic sets j,
 * we split off that part of the basic set i that shares the outer dimensions
 * with j and lies before j in the current dimension.
 * We collect all the pieces in a new list that replaces "scc".
 *
 * While the elements in "scc" should be disjoint, we double-check
 * this property to avoid running into an infinite recursion in case
 * they intersect due to some internal error.
 */
static isl_stat add_nodes(__isl_take isl_basic_set_list *scc, void *user)
{
	struct isl_add_nodes_data *data = user;
	int i, depth;
	isl_size n;
	isl_basic_set *bset, *first;
	isl_basic_set_list *list;
	isl_space *space;
	isl_basic_map *gt;

	n = isl_basic_set_list_n_basic_set(scc);
	if (n < 0)
		goto error;
	bset = isl_basic_set_list_get_basic_set(scc, 0);
	if (n == 1) {
		isl_basic_set_list_free(scc);
		data->list = add_node(data->list,
				isl_union_map_copy(data->executed), bset,
				isl_ast_build_copy(data->build));
		return data->list ? isl_stat_ok : isl_stat_error;
	}

	depth = isl_ast_build_get_depth(data->build);
	space = isl_basic_set_get_space(bset);
	space = isl_space_map_from_set(space);
	gt = isl_basic_map_universe(space);
	for (i = 0; i < depth; ++i)
		gt = isl_basic_map_equate(gt, isl_dim_in, i, isl_dim_out, i);
	gt = isl_basic_map_order_gt(gt, isl_dim_in, depth, isl_dim_out, depth);

	first = isl_basic_set_copy(bset);
	list = isl_basic_set_list_from_basic_set(bset);
	for (i = 1; i < n; ++i) {
		int disjoint;

		bset = isl_basic_set_list_get_basic_set(scc, i);

		disjoint = isl_basic_set_is_disjoint(bset, first);
		if (disjoint < 0)
			list = isl_basic_set_list_free(list);
		else if (!disjoint)
			isl_die(isl_basic_set_list_get_ctx(scc),
				isl_error_internal,
				"basic sets in scc are assumed to be disjoint",
				list = isl_basic_set_list_free(list));

		list = add_split_on(list, bset, gt);
	}
	isl_basic_set_free(first);
	isl_basic_map_free(gt);
	isl_basic_set_list_free(scc);
	scc = list;
	data->list = isl_ast_graft_list_concat(data->list,
		    generate_sorted_domains(scc, data->executed, data->build));
	isl_basic_set_list_free(scc);

	return data->list ? isl_stat_ok : isl_stat_error;
error:
	isl_basic_set_list_free(scc);
	return isl_stat_error;
}

/* Sort the domains in "domain_list" according to the execution order
 * at the current depth (for equal values of the outer dimensions),
 * generate code for each of them, collecting the results in a list.
 * If no code is generated (because the intersection of the inverse schedule
 * with the domains turns out to be empty), then an empty list is returned.
 *
 * The caller is responsible for ensuring that the basic sets in "domain_list"
 * are pair-wise disjoint.  It can, however, in principle happen that
 * two basic sets should be ordered one way for one value of the outer
 * dimensions and the other way for some other value of the outer dimensions.
 * We therefore play safe and look for strongly connected components.
 * The function add_nodes takes care of handling non-trivial components.
 */
static __isl_give isl_ast_graft_list *generate_sorted_domains(
	__isl_keep isl_basic_set_list *domain_list,
	__isl_keep isl_union_map *executed, __isl_keep isl_ast_build *build)
{
	isl_ctx *ctx;
	struct isl_add_nodes_data data;
	int depth;
	isl_size n;

	n = isl_basic_set_list_n_basic_set(domain_list);
	if (n < 0)
		return NULL;

	ctx = isl_basic_set_list_get_ctx(domain_list);
	data.list = isl_ast_graft_list_alloc(ctx, n);
	if (n == 0)
		return data.list;
	if (n == 1)
		return add_node(data.list, isl_union_map_copy(executed),
			isl_basic_set_list_get_basic_set(domain_list, 0),
			isl_ast_build_copy(build));

	depth = isl_ast_build_get_depth(build);
	data.executed = executed;
	data.build = build;
	if (isl_basic_set_list_foreach_scc(domain_list,
					&domain_follows_at_depth, &depth,
					&add_nodes, &data) < 0)
		data.list = isl_ast_graft_list_free(data.list);

	return data.list;
}

/* Do i and j share any values for the outer dimensions?
 */
static isl_bool shared_outer(__isl_keep isl_basic_set *i,
	__isl_keep isl_basic_set *j, void *user)
{
	int depth = *(int *) user;
	isl_basic_map *test;
	isl_bool empty;
	int l;

	test = isl_basic_map_from_domain_and_range(isl_basic_set_copy(i),
						    isl_basic_set_copy(j));
	for (l = 0; l < depth; ++l)
		test = isl_basic_map_equate(test, isl_dim_in, l,
						isl_dim_out, l);
	empty = isl_basic_map_is_empty(test);
	isl_basic_map_free(test);

	return isl_bool_not(empty);
}

/* Internal data structure for generate_sorted_domains_wrap.
 *
 * "n" is the total number of basic sets
 * "executed" and "build" are extra arguments to be passed
 *	to generate_sorted_domains.
 *
 * "single" is set to 1 by generate_sorted_domains_wrap if there
 * is only a single component.
 * "list" collects the results.
 */
struct isl_ast_generate_parallel_domains_data {
	isl_size n;
	isl_union_map *executed;
	isl_ast_build *build;

	int single;
	isl_ast_graft_list *list;
};

/* Call generate_sorted_domains on "scc", fuse the result into a list
 * with either zero or one graft and collect the these single element
 * lists into data->list.
 *
 * If there is only one component, i.e., if the number of basic sets
 * in the current component is equal to the total number of basic sets,
 * then data->single is set to 1 and the result of generate_sorted_domains
 * is not fused.
 */
static isl_stat generate_sorted_domains_wrap(__isl_take isl_basic_set_list *scc,
	void *user)
{
	struct isl_ast_generate_parallel_domains_data *data = user;
	isl_ast_graft_list *list;
	isl_size n;

	n = isl_basic_set_list_n_basic_set(scc);
	if (n < 0)
		scc = isl_basic_set_list_free(scc);
	list = generate_sorted_domains(scc, data->executed, data->build);
	data->single = n == data->n;
	if (!data->single)
		list = isl_ast_graft_list_fuse(list, data->build);
	if (!data->list)
		data->list = list;
	else
		data->list = isl_ast_graft_list_concat(data->list, list);

	isl_basic_set_list_free(scc);
	if (!data->list)
		return isl_stat_error;

	return isl_stat_ok;
}

/* Look for any (weakly connected) components in the "domain_list"
 * of domains that share some values of the outer dimensions.
 * That is, domains in different components do not share any values
 * of the outer dimensions.  This means that these components
 * can be freely reordered.
 * Within each of the components, we sort the domains according
 * to the execution order at the current depth.
 *
 * If there is more than one component, then generate_sorted_domains_wrap
 * fuses the result of each call to generate_sorted_domains
 * into a list with either zero or one graft and collects these (at most)
 * single element lists into a bigger list. This means that the elements of the
 * final list can be freely reordered.  In particular, we sort them
 * according to an arbitrary but fixed ordering to ease merging of
 * graft lists from different components.
 */
static __isl_give isl_ast_graft_list *generate_parallel_domains(
	__isl_keep isl_basic_set_list *domain_list,
	__isl_keep isl_union_map *executed, __isl_keep isl_ast_build *build)
{
	int depth;
	struct isl_ast_generate_parallel_domains_data data;

	data.n = isl_basic_set_list_n_basic_set(domain_list);
	if (data.n < 0)
		return NULL;

	if (data.n <= 1)
		return generate_sorted_domains(domain_list, executed, build);

	depth = isl_ast_build_get_depth(build);
	data.list = NULL;
	data.executed = executed;
	data.build = build;
	data.single = 0;
	if (isl_basic_set_list_foreach_scc(domain_list, &shared_outer, &depth,
					    &generate_sorted_domains_wrap,
					    &data) < 0)
		data.list = isl_ast_graft_list_free(data.list);

	if (!data.single)
		data.list = isl_ast_graft_list_sort_guard(data.list);

	return data.list;
}

/* Internal data for separate_domain.
 *
 * "explicit" is set if we only want to use explicit bounds.
 *
 * "domain" collects the separated domains.
 */
struct isl_separate_domain_data {
	isl_ast_build *build;
	int explicit;
	isl_set *domain;
};

/* Extract implicit bounds on the current dimension for the executed "map".
 *
 * The domain of "map" may involve inner dimensions, so we
 * need to eliminate them.
 */
static __isl_give isl_set *implicit_bounds(__isl_take isl_map *map,
	__isl_keep isl_ast_build *build)
{
	isl_set *domain;

	domain = isl_map_domain(map);
	domain = isl_ast_build_eliminate(build, domain);

	return domain;
}

/* Extract explicit bounds on the current dimension for the executed "map".
 *
 * Rather than eliminating the inner dimensions as in implicit_bounds,
 * we simply drop any constraints involving those inner dimensions.
 * The idea is that most bounds that are implied by constraints on the
 * inner dimensions will be enforced by for loops and not by explicit guards.
 * There is then no need to separate along those bounds.
 */
static __isl_give isl_set *explicit_bounds(__isl_take isl_map *map,
	__isl_keep isl_ast_build *build)
{
	isl_set *domain;
	int depth;
	isl_size dim;

	dim = isl_map_dim(map, isl_dim_out);
	if (dim < 0)
		return isl_map_domain(isl_map_free(map));
	map = isl_map_drop_constraints_involving_dims(map, isl_dim_out, 0, dim);

	domain = isl_map_domain(map);
	depth = isl_ast_build_get_depth(build);
	dim = isl_set_dim(domain, isl_dim_set);
	domain = isl_set_detect_equalities(domain);
	domain = isl_set_drop_constraints_involving_dims(domain,
				isl_dim_set, depth + 1, dim - (depth + 1));
	domain = isl_set_remove_divs_involving_dims(domain,
				isl_dim_set, depth, 1);
	domain = isl_set_remove_unknown_divs(domain);

	return domain;
}

/* Split data->domain into pieces that intersect with the range of "map"
 * and pieces that do not intersect with the range of "map"
 * and then add that part of the range of "map" that does not intersect
 * with data->domain.
 */
static isl_stat separate_domain(__isl_take isl_map *map, void *user)
{
	struct isl_separate_domain_data *data = user;
	isl_set *domain;
	isl_set *d1, *d2;

	if (data->explicit)
		domain = explicit_bounds(map, data->build);
	else
		domain = implicit_bounds(map, data->build);

	domain = isl_set_coalesce(domain);
	domain = isl_set_make_disjoint(domain);
	d1 = isl_set_subtract(isl_set_copy(domain), isl_set_copy(data->domain));
	d2 = isl_set_subtract(isl_set_copy(data->domain), isl_set_copy(domain));
	data->domain = isl_set_intersect(data->domain, domain);
	data->domain = isl_set_union(data->domain, d1);
	data->domain = isl_set_union(data->domain, d2);

	return isl_stat_ok;
}

/* Separate the schedule domains of "executed".
 *
 * That is, break up the domain of "executed" into basic sets,
 * such that for each basic set S, every element in S is associated with
 * the same domain spaces.
 *
 * "space" is the (single) domain space of "executed".
 */
static __isl_give isl_set *separate_schedule_domains(
	__isl_take isl_space *space, __isl_take isl_union_map *executed,
	__isl_keep isl_ast_build *build)
{
	struct isl_separate_domain_data data = { build };
	isl_ctx *ctx;

	ctx = isl_ast_build_get_ctx(build);
	data.explicit = isl_options_get_ast_build_separation_bounds(ctx) ==
				    ISL_AST_BUILD_SEPARATION_BOUNDS_EXPLICIT;
	data.domain = isl_set_empty(space);
	if (isl_union_map_foreach_map(executed, &separate_domain, &data) < 0)
		data.domain = isl_set_free(data.domain);

	isl_union_map_free(executed);
	return data.domain;
}

/* Temporary data used during the search for a lower bound for unrolling.
 *
 * "build" is the build in which the unrolling will be performed
 * "domain" is the original set for which to find a lower bound
 * "depth" is the dimension for which to find a lower boudn
 * "expansion" is the expansion that needs to be applied to "domain"
 * in the unrolling that will be performed
 *
 * "lower" is the best lower bound found so far.  It is NULL if we have not
 * found any yet.
 * "n" is the corresponding size.  If lower is NULL, then the value of n
 * is undefined.
 * "n_div" is the maximal number of integer divisions in the first
 * unrolled iteration (after expansion).  It is set to -1 if it hasn't
 * been computed yet.
 */
struct isl_find_unroll_data {
	isl_ast_build *build;
	isl_set *domain;
	int depth;
	isl_basic_map *expansion;

	isl_aff *lower;
	int *n;
	int n_div;
};

/* Return the constraint
 *
 *	i_"depth" = aff + offset
 */
static __isl_give isl_constraint *at_offset(int depth, __isl_keep isl_aff *aff,
	int offset)
{
	aff = isl_aff_copy(aff);
	aff = isl_aff_add_coefficient_si(aff, isl_dim_in, depth, -1);
	aff = isl_aff_add_constant_si(aff, offset);
	return isl_equality_from_aff(aff);
}

/* Update *user to the number of integer divisions in the first element
 * of "ma", if it is larger than the current value.
 */
static isl_stat update_n_div(__isl_take isl_set *set,
	__isl_take isl_multi_aff *ma, void *user)
{
	isl_aff *aff;
	int *n = user;
	isl_size n_div;

	aff = isl_multi_aff_get_aff(ma, 0);
	n_div = isl_aff_dim(aff, isl_dim_div);
	isl_aff_free(aff);
	isl_multi_aff_free(ma);
	isl_set_free(set);

	if (n_div > *n)
		*n = n_div;

	return n_div >= 0 ? isl_stat_ok : isl_stat_error;
}

/* Get the number of integer divisions in the expression for the iterator
 * value at the first slice in the unrolling based on lower bound "lower",
 * taking into account the expansion that needs to be performed on this slice.
 */
static int get_expanded_n_div(struct isl_find_unroll_data *data,
	__isl_keep isl_aff *lower)
{
	isl_constraint *c;
	isl_set *set;
	isl_map *it_map, *expansion;
	isl_pw_multi_aff *pma;
	int n;

	c = at_offset(data->depth, lower, 0);
	set = isl_set_copy(data->domain);
	set = isl_set_add_constraint(set, c);
	expansion = isl_map_from_basic_map(isl_basic_map_copy(data->expansion));
	set = isl_set_apply(set, expansion);
	it_map = isl_ast_build_map_to_iterator(data->build, set);
	pma = isl_pw_multi_aff_from_map(it_map);
	n = 0;
	if (isl_pw_multi_aff_foreach_piece(pma, &update_n_div, &n) < 0)
		n = -1;
	isl_pw_multi_aff_free(pma);

	return n;
}

/* Is the lower bound "lower" with corresponding iteration count "n"
 * better than the one stored in "data"?
 * If there is no upper bound on the iteration count ("n" is infinity) or
 * if the count is too large, then we cannot use this lower bound.
 * Otherwise, if there was no previous lower bound or
 * if the iteration count of the new lower bound is smaller than
 * the iteration count of the previous lower bound, then we consider
 * the new lower bound to be better.
 * If the iteration count is the same, then compare the number
 * of integer divisions that would be needed to express
 * the iterator value at the first slice in the unrolling
 * according to the lower bound.  If we end up computing this
 * number, then store the lowest value in data->n_div.
 */
static int is_better_lower_bound(struct isl_find_unroll_data *data,
	__isl_keep isl_aff *lower, __isl_keep isl_val *n)
{
	int cmp;
	int n_div;

	if (!n)
		return -1;
	if (isl_val_is_infty(n))
		return 0;
	if (isl_val_cmp_si(n, INT_MAX) > 0)
		return 0;
	if (!data->lower)
		return 1;
	cmp = isl_val_cmp_si(n, *data->n);
	if (cmp < 0)
		return 1;
	if (cmp > 0)
		return 0;
	if (data->n_div < 0)
		data->n_div = get_expanded_n_div(data, data->lower);
	if (data->n_div < 0)
		return -1;
	if (data->n_div == 0)
		return 0;
	n_div = get_expanded_n_div(data, lower);
	if (n_div < 0)
		return -1;
	if (n_div >= data->n_div)
		return 0;
	data->n_div = n_div;

	return 1;
}

/* Check if we can use "c" as a lower bound and if it is better than
 * any previously found lower bound.
 *
 * If "c" does not involve the dimension at the current depth,
 * then we cannot use it.
 * Otherwise, let "c" be of the form
 *
 *	i >= f(j)/a
 *
 * We compute the maximal value of
 *
 *	-ceil(f(j)/a)) + i + 1
 *
 * over the domain.  If there is such a value "n", then we know
 *
 *	-ceil(f(j)/a)) + i + 1 <= n
 *
 * or
 *
 *	i < ceil(f(j)/a)) + n
 *
 * meaning that we can use ceil(f(j)/a)) as a lower bound for unrolling.
 * We just need to check if we have found any lower bound before and
 * if the new lower bound is better (smaller n or fewer integer divisions)
 * than the previously found lower bounds.
 */
static isl_stat update_unrolling_lower_bound(struct isl_find_unroll_data *data,
	__isl_keep isl_constraint *c)
{
	isl_aff *aff, *lower;
	isl_val *max;
	int better;

	if (!isl_constraint_is_lower_bound(c, isl_dim_set, data->depth))
		return isl_stat_ok;

	lower = isl_constraint_get_bound(c, isl_dim_set, data->depth);
	lower = isl_aff_ceil(lower);
	aff = isl_aff_copy(lower);
	aff = isl_aff_neg(aff);
	aff = isl_aff_add_coefficient_si(aff, isl_dim_in, data->depth, 1);
	aff = isl_aff_add_constant_si(aff, 1);
	max = isl_set_max_val(data->domain, aff);
	isl_aff_free(aff);

	better = is_better_lower_bound(data, lower, max);
	if (better < 0 || !better) {
		isl_val_free(max);
		isl_aff_free(lower);
		return better < 0 ? isl_stat_error : isl_stat_ok;
	}

	isl_aff_free(data->lower);
	data->lower = lower;
	*data->n = isl_val_get_num_si(max);
	isl_val_free(max);

	return isl_stat_ok;
}

/* Check if we can use "c" as a lower bound and if it is better than
 * any previously found lower bound.
 */
static isl_stat constraint_find_unroll(__isl_take isl_constraint *c, void *user)
{
	struct isl_find_unroll_data *data;
	isl_stat r;

	data = (struct isl_find_unroll_data *) user;
	r = update_unrolling_lower_bound(data, c);
	isl_constraint_free(c);

	return r;
}

/* Look for a lower bound l(i) on the dimension at "depth"
 * and a size n such that "domain" is a subset of
 *
 *	{ [i] : l(i) <= i_d < l(i) + n }
 *
 * where d is "depth" and l(i) depends only on earlier dimensions.
 * Furthermore, try and find a lower bound such that n is as small as possible.
 * In particular, "n" needs to be finite.
 * "build" is the build in which the unrolling will be performed.
 * "expansion" is the expansion that needs to be applied to "domain"
 * in the unrolling that will be performed.
 *
 * Inner dimensions have been eliminated from "domain" by the caller.
 *
 * We first construct a collection of lower bounds on the input set
 * by computing its simple hull.  We then iterate through them,
 * discarding those that we cannot use (either because they do not
 * involve the dimension at "depth" or because they have no corresponding
 * upper bound, meaning that "n" would be unbounded) and pick out the
 * best from the remaining ones.
 *
 * If we cannot find a suitable lower bound, then we consider that
 * to be an error.
 */
static __isl_give isl_aff *find_unroll_lower_bound(
	__isl_keep isl_ast_build *build, __isl_keep isl_set *domain,
	int depth, __isl_keep isl_basic_map *expansion, int *n)
{
	struct isl_find_unroll_data data =
			{ build, domain, depth, expansion, NULL, n, -1 };
	isl_basic_set *hull;

	hull = isl_set_simple_hull(isl_set_copy(domain));

	if (isl_basic_set_foreach_constraint(hull,
					    &constraint_find_unroll, &data) < 0)
		goto error;

	isl_basic_set_free(hull);

	if (!data.lower)
		isl_die(isl_set_get_ctx(domain), isl_error_invalid,
			"cannot find lower bound for unrolling", return NULL);

	return data.lower;
error:
	isl_basic_set_free(hull);
	return isl_aff_free(data.lower);
}

/* Call "fn" on each iteration of the current dimension of "domain".
 * If "init" is not NULL, then it is called with the number of
 * iterations before any call to "fn".
 * Return -1 on failure.
 *
 * Since we are going to be iterating over the individual values,
 * we first check if there are any strides on the current dimension.
 * If there is, we rewrite the current dimension i as
 *
 *		i = stride i' + offset
 *
 * and then iterate over individual values of i' instead.
 *
 * We then look for a lower bound on i' and a size such that the domain
 * is a subset of
 *
 *	{ [j,i'] : l(j) <= i' < l(j) + n }
 *
 * and then take slices of the domain at values of i'
 * between l(j) and l(j) + n - 1.
 *
 * We compute the unshifted simple hull of each slice to ensure that
 * we have a single basic set per offset.  The slicing constraint
 * may get simplified away before the unshifted simple hull is taken
 * and may therefore in some rare cases disappear from the result.
 * We therefore explicitly add the constraint back after computing
 * the unshifted simple hull to ensure that the basic sets
 * remain disjoint.  The constraints that are dropped by taking the hull
 * will be taken into account at the next level, as in the case of the
 * atomic option.
 *
 * Finally, we map i' back to i and call "fn".
 */
static int foreach_iteration(__isl_take isl_set *domain,
	__isl_keep isl_ast_build *build, int (*init)(int n, void *user),
	int (*fn)(__isl_take isl_basic_set *bset, void *user), void *user)
{
	int i, n;
	int empty;
	int depth;
	isl_multi_aff *expansion;
	isl_basic_map *bmap;
	isl_aff *lower = NULL;
	isl_ast_build *stride_build;

	depth = isl_ast_build_get_depth(build);

	domain = isl_ast_build_eliminate_inner(build, domain);
	domain = isl_set_intersect(domain, isl_ast_build_get_domain(build));
	stride_build = isl_ast_build_copy(build);
	stride_build = isl_ast_build_detect_strides(stride_build,
							isl_set_copy(domain));
	expansion = isl_ast_build_get_stride_expansion(stride_build);

	domain = isl_set_preimage_multi_aff(domain,
					    isl_multi_aff_copy(expansion));
	domain = isl_ast_build_eliminate_divs(stride_build, domain);
	isl_ast_build_free(stride_build);

	bmap = isl_basic_map_from_multi_aff(expansion);

	empty = isl_set_is_empty(domain);
	if (empty < 0) {
		n = -1;
	} else if (empty) {
		n = 0;
	} else {
		lower = find_unroll_lower_bound(build, domain, depth, bmap, &n);
		if (!lower)
			n = -1;
	}
	if (n >= 0 && init && init(n, user) < 0)
		n = -1;
	for (i = 0; i < n; ++i) {
		isl_set *set;
		isl_basic_set *bset;
		isl_constraint *slice;

		slice = at_offset(depth, lower, i);
		set = isl_set_copy(domain);
		set = isl_set_add_constraint(set, isl_constraint_copy(slice));
		bset = isl_set_unshifted_simple_hull(set);
		bset = isl_basic_set_add_constraint(bset, slice);
		bset = isl_basic_set_apply(bset, isl_basic_map_copy(bmap));

		if (fn(bset, user) < 0)
			break;
	}

	isl_aff_free(lower);
	isl_set_free(domain);
	isl_basic_map_free(bmap);

	return n < 0 || i < n ? -1 : 0;
}

/* Data structure for storing the results and the intermediate objects
 * of compute_domains.
 *
 * "list" is the main result of the function and contains a list
 * of disjoint basic sets for which code should be generated.
 *
 * "executed" and "build" are inputs to compute_domains.
 * "schedule_domain" is the domain of "executed".
 *
 * "option" contains the domains at the current depth that should by
 * atomic, separated or unrolled.  These domains are as specified by
 * the user, except that inner dimensions have been eliminated and
 * that they have been made pair-wise disjoint.
 *
 * "sep_class" contains the user-specified split into separation classes
 * specialized to the current depth.
 * "done" contains the union of the separation domains that have already
 * been handled.
 */
struct isl_codegen_domains {
	isl_basic_set_list *list;

	isl_union_map *executed;
	isl_ast_build *build;
	isl_set *schedule_domain;

	isl_set *option[4];

	isl_map *sep_class;
	isl_set *done;
};

/* Internal data structure for do_unroll.
 *
 * "domains" stores the results of compute_domains.
 * "class_domain" is the original class domain passed to do_unroll.
 * "unroll_domain" collects the unrolled iterations.
 */
struct isl_ast_unroll_data {
	struct isl_codegen_domains *domains;
	isl_set *class_domain;
	isl_set *unroll_domain;
};

/* Given an iteration of an unrolled domain represented by "bset",
 * add it to data->domains->list.
 * Since we may have dropped some constraints, we intersect with
 * the class domain again to ensure that each element in the list
 * is disjoint from the other class domains.
 */
static int do_unroll_iteration(__isl_take isl_basic_set *bset, void *user)
{
	struct isl_ast_unroll_data *data = user;
	isl_set *set;
	isl_basic_set_list *list;

	set = isl_set_from_basic_set(bset);
	data->unroll_domain = isl_set_union(data->unroll_domain,
					    isl_set_copy(set));
	set = isl_set_intersect(set, isl_set_copy(data->class_domain));
	set = isl_set_make_disjoint(set);
	list = isl_basic_set_list_from_set(set);
	data->domains->list = isl_basic_set_list_concat(data->domains->list,
							list);

	return 0;
}

/* Extend domains->list with a list of basic sets, one for each value
 * of the current dimension in "domain" and remove the corresponding
 * sets from the class domain.  Return the updated class domain.
 * The divs that involve the current dimension have not been projected out
 * from this domain.
 *
 * We call foreach_iteration to iterate over the individual values and
 * in do_unroll_iteration we collect the individual basic sets in
 * domains->list and their union in data->unroll_domain, which is then
 * used to update the class domain.
 */
static __isl_give isl_set *do_unroll(struct isl_codegen_domains *domains,
	__isl_take isl_set *domain, __isl_take isl_set *class_domain)
{
	struct isl_ast_unroll_data data;

	if (!domain)
		return isl_set_free(class_domain);
	if (!class_domain)
		return isl_set_free(domain);

	data.domains = domains;
	data.class_domain = class_domain;
	data.unroll_domain = isl_set_empty(isl_set_get_space(domain));

	if (foreach_iteration(domain, domains->build, NULL,
				&do_unroll_iteration, &data) < 0)
		data.unroll_domain = isl_set_free(data.unroll_domain);

	class_domain = isl_set_subtract(class_domain, data.unroll_domain);

	return class_domain;
}

/* Add domains to domains->list for each individual value of the current
 * dimension, for that part of the schedule domain that lies in the
 * intersection of the option domain and the class domain.
 * Remove the corresponding sets from the class domain and
 * return the updated class domain.
 *
 * We first break up the unroll option domain into individual pieces
 * and then handle each of them separately.  The unroll option domain
 * has been made disjoint in compute_domains_init_options,
 *
 * Note that we actively want to combine different pieces of the
 * schedule domain that have the same value at the current dimension.
 * We therefore need to break up the unroll option domain before
 * intersecting with class and schedule domain, hoping that the
 * unroll option domain specified by the user is relatively simple.
 */
static __isl_give isl_set *compute_unroll_domains(
	struct isl_codegen_domains *domains, __isl_take isl_set *class_domain)
{
	isl_set *unroll_domain;
	isl_basic_set_list *unroll_list;
	int i;
	isl_size n;
	isl_bool empty;

	empty = isl_set_is_empty(domains->option[isl_ast_loop_unroll]);
	if (empty < 0)
		return isl_set_free(class_domain);
	if (empty)
		return class_domain;

	unroll_domain = isl_set_copy(domains->option[isl_ast_loop_unroll]);
	unroll_list = isl_basic_set_list_from_set(unroll_domain);

	n = isl_basic_set_list_n_basic_set(unroll_list);
	if (n < 0)
		class_domain = isl_set_free(class_domain);
	for (i = 0; i < n; ++i) {
		isl_basic_set *bset;

		bset = isl_basic_set_list_get_basic_set(unroll_list, i);
		unroll_domain = isl_set_from_basic_set(bset);
		unroll_domain = isl_set_intersect(unroll_domain,
						    isl_set_copy(class_domain));
		unroll_domain = isl_set_intersect(unroll_domain,
					isl_set_copy(domains->schedule_domain));

		empty = isl_set_is_empty(unroll_domain);
		if (empty >= 0 && empty) {
			isl_set_free(unroll_domain);
			continue;
		}

		class_domain = do_unroll(domains, unroll_domain, class_domain);
	}

	isl_basic_set_list_free(unroll_list);

	return class_domain;
}

/* Try and construct a single basic set that includes the intersection of
 * the schedule domain, the atomic option domain and the class domain.
 * Add the resulting basic set(s) to domains->list and remove them
 * from class_domain.  Return the updated class domain.
 *
 * We construct a single domain rather than trying to combine
 * the schedule domains of individual domains because we are working
 * within a single component so that non-overlapping schedule domains
 * should already have been separated.
 * We do however need to make sure that this single domains is a subset
 * of the class domain so that it would not intersect with any other
 * class domains.  This means that we may end up splitting up the atomic
 * domain in case separation classes are being used.
 *
 * "domain" is the intersection of the schedule domain and the class domain,
 * with inner dimensions projected out.
 */
static __isl_give isl_set *compute_atomic_domain(
	struct isl_codegen_domains *domains, __isl_take isl_set *class_domain)
{
	isl_basic_set *bset;
	isl_basic_set_list *list;
	isl_set *domain, *atomic_domain;
	int empty;

	domain = isl_set_copy(domains->option[isl_ast_loop_atomic]);
	domain = isl_set_intersect(domain, isl_set_copy(class_domain));
	domain = isl_set_intersect(domain,
				isl_set_copy(domains->schedule_domain));
	empty = isl_set_is_empty(domain);
	if (empty < 0)
		class_domain = isl_set_free(class_domain);
	if (empty) {
		isl_set_free(domain);
		return class_domain;
	}

	domain = isl_ast_build_eliminate(domains->build, domain);
	domain = isl_set_coalesce_preserve(domain);
	bset = isl_set_unshifted_simple_hull(domain);
	domain = isl_set_from_basic_set(bset);
	atomic_domain = isl_set_copy(domain);
	domain = isl_set_intersect(domain, isl_set_copy(class_domain));
	class_domain = isl_set_subtract(class_domain, atomic_domain);
	domain = isl_set_make_disjoint(domain);
	list = isl_basic_set_list_from_set(domain);
	domains->list = isl_basic_set_list_concat(domains->list, list);

	return class_domain;
}

/* Split up the schedule domain into uniform basic sets,
 * in the sense that each element in a basic set is associated to
 * elements of the same domains, and add the result to domains->list.
 * Do this for that part of the schedule domain that lies in the
 * intersection of "class_domain" and the separate option domain.
 *
 * "class_domain" may or may not include the constraints
 * of the schedule domain, but this does not make a difference
 * since we are going to intersect it with the domain of the inverse schedule.
 * If it includes schedule domain constraints, then they may involve
 * inner dimensions, but we will eliminate them in separation_domain.
 */
static int compute_separate_domain(struct isl_codegen_domains *domains,
	__isl_keep isl_set *class_domain)
{
	isl_space *space;
	isl_set *domain;
	isl_union_map *executed;
	isl_basic_set_list *list;
	int empty;

	domain = isl_set_copy(domains->option[isl_ast_loop_separate]);
	domain = isl_set_intersect(domain, isl_set_copy(class_domain));
	executed = isl_union_map_copy(domains->executed);
	executed = isl_union_map_intersect_domain(executed,
				    isl_union_set_from_set(domain));
	empty = isl_union_map_is_empty(executed);
	if (empty < 0 || empty) {
		isl_union_map_free(executed);
		return empty < 0 ? -1 : 0;
	}

	space = isl_set_get_space(class_domain);
	domain = separate_schedule_domains(space, executed, domains->build);

	list = isl_basic_set_list_from_set(domain);
	domains->list = isl_basic_set_list_concat(domains->list, list);

	return 0;
}

/* Split up the domain at the current depth into disjoint
 * basic sets for which code should be generated separately
 * for the given separation class domain.
 *
 * If any separation classes have been defined, then "class_domain"
 * is the domain of the current class and does not refer to inner dimensions.
 * Otherwise, "class_domain" is the universe domain.
 *
 * We first make sure that the class domain is disjoint from
 * previously considered class domains.
 *
 * The separate domains can be computed directly from the "class_domain".
 *
 * The unroll, atomic and remainder domains need the constraints
 * from the schedule domain.
 *
 * For unrolling, the actual schedule domain is needed (with divs that
 * may refer to the current dimension) so that stride detection can be
 * performed.
 *
 * For atomic and remainder domains, inner dimensions and divs involving
 * the current dimensions should be eliminated.
 * In case we are working within a separation class, we need to intersect
 * the result with the current "class_domain" to ensure that the domains
 * are disjoint from those generated from other class domains.
 *
 * The domain that has been made atomic may be larger than specified
 * by the user since it needs to be representable as a single basic set.
 * This possibly larger domain is removed from class_domain by
 * compute_atomic_domain.  It is computed first so that the extended domain
 * would not overlap with any domains computed before.
 * Similary, the unrolled domains may have some constraints removed and
 * may therefore also be larger than specified by the user.
 *
 * If anything is left after handling separate, unroll and atomic,
 * we split it up into basic sets and append the basic sets to domains->list.
 */
static isl_stat compute_partial_domains(struct isl_codegen_domains *domains,
	__isl_take isl_set *class_domain)
{
	isl_basic_set_list *list;
	isl_set *domain;

	class_domain = isl_set_subtract(class_domain,
					isl_set_copy(domains->done));
	domains->done = isl_set_union(domains->done,
					isl_set_copy(class_domain));

	class_domain = compute_atomic_domain(domains, class_domain);
	class_domain = compute_unroll_domains(domains, class_domain);

	domain = isl_set_copy(class_domain);

	if (compute_separate_domain(domains, domain) < 0)
		goto error;
	domain = isl_set_subtract(domain,
			isl_set_copy(domains->option[isl_ast_loop_separate]));

	domain = isl_set_intersect(domain,
				isl_set_copy(domains->schedule_domain));

	domain = isl_ast_build_eliminate(domains->build, domain);
	domain = isl_set_intersect(domain, isl_set_copy(class_domain));

	domain = isl_set_coalesce_preserve(domain);
	domain = isl_set_make_disjoint(domain);

	list = isl_basic_set_list_from_set(domain);
	domains->list = isl_basic_set_list_concat(domains->list, list);

	isl_set_free(class_domain);

	return isl_stat_ok;
error:
	isl_set_free(domain);
	isl_set_free(class_domain);
	return isl_stat_error;
}

/* Split up the domain at the current depth into disjoint
 * basic sets for which code should be generated separately
 * for the separation class identified by "pnt".
 *
 * We extract the corresponding class domain from domains->sep_class,
 * eliminate inner dimensions and pass control to compute_partial_domains.
 */
static isl_stat compute_class_domains(__isl_take isl_point *pnt, void *user)
{
	struct isl_codegen_domains *domains = user;
	isl_set *class_set;
	isl_set *domain;
	int disjoint;

	class_set = isl_set_from_point(pnt);
	domain = isl_map_domain(isl_map_intersect_range(
				isl_map_copy(domains->sep_class), class_set));
	domain = isl_ast_build_compute_gist(domains->build, domain);
	domain = isl_ast_build_eliminate(domains->build, domain);

	disjoint = isl_set_plain_is_disjoint(domain, domains->schedule_domain);
	if (disjoint < 0)
		return isl_stat_error;
	if (disjoint) {
		isl_set_free(domain);
		return isl_stat_ok;
	}

	return compute_partial_domains(domains, domain);
}

/* Extract the domains at the current depth that should be atomic,
 * separated or unrolled and store them in option.
 *
 * The domains specified by the user might overlap, so we make
 * them disjoint by subtracting earlier domains from later domains.
 */
static void compute_domains_init_options(isl_set *option[4],
	__isl_keep isl_ast_build *build)
{
	enum isl_ast_loop_type type, type2;
	isl_set *unroll;

	for (type = isl_ast_loop_atomic;
	    type <= isl_ast_loop_separate; ++type) {
		option[type] = isl_ast_build_get_option_domain(build, type);
		for (type2 = isl_ast_loop_atomic; type2 < type; ++type2)
			option[type] = isl_set_subtract(option[type],
						isl_set_copy(option[type2]));
	}

	unroll = option[isl_ast_loop_unroll];
	unroll = isl_set_coalesce(unroll);
	unroll = isl_set_make_disjoint(unroll);
	option[isl_ast_loop_unroll] = unroll;
}

/* Split up the domain at the current depth into disjoint
 * basic sets for which code should be generated separately,
 * based on the user-specified options.
 * Return the list of disjoint basic sets.
 *
 * There are three kinds of domains that we need to keep track of.
 * - the "schedule domain" is the domain of "executed"
 * - the "class domain" is the domain corresponding to the currrent
 *	separation class
 * - the "option domain" is the domain corresponding to one of the options
 *	atomic, unroll or separate
 *
 * We first consider the individial values of the separation classes
 * and split up the domain for each of them separately.
 * Finally, we consider the remainder.  If no separation classes were
 * specified, then we call compute_partial_domains with the universe
 * "class_domain".  Otherwise, we take the "schedule_domain" as "class_domain",
 * with inner dimensions removed.  We do this because we want to
 * avoid computing the complement of the class domains (i.e., the difference
 * between the universe and domains->done).
 */
static __isl_give isl_basic_set_list *compute_domains(
	__isl_keep isl_union_map *executed, __isl_keep isl_ast_build *build)
{
	struct isl_codegen_domains domains;
	isl_ctx *ctx;
	isl_set *domain;
	isl_union_set *schedule_domain;
	isl_set *classes;
	isl_space *space;
	int n_param;
	enum isl_ast_loop_type type;
	isl_bool empty;

	if (!executed)
		return NULL;

	ctx = isl_union_map_get_ctx(executed);
	domains.list = isl_basic_set_list_alloc(ctx, 0);

	schedule_domain = isl_union_map_domain(isl_union_map_copy(executed));
	domain = isl_set_from_union_set(schedule_domain);

	compute_domains_init_options(domains.option, build);

	domains.sep_class = isl_ast_build_get_separation_class(build);
	classes = isl_map_range(isl_map_copy(domains.sep_class));
	n_param = isl_set_dim(classes, isl_dim_param);
	if (n_param < 0)
		classes = isl_set_free(classes);
	classes = isl_set_project_out(classes, isl_dim_param, 0, n_param);

	space = isl_set_get_space(domain);
	domains.build = build;
	domains.schedule_domain = isl_set_copy(domain);
	domains.executed = executed;
	domains.done = isl_set_empty(space);

	if (isl_set_foreach_point(classes, &compute_class_domains, &domains) < 0)
		domains.list = isl_basic_set_list_free(domains.list);
	isl_set_free(classes);

	empty = isl_set_is_empty(domains.done);
	if (empty < 0) {
		domains.list = isl_basic_set_list_free(domains.list);
		domain = isl_set_free(domain);
	} else if (empty) {
		isl_set_free(domain);
		domain = isl_set_universe(isl_set_get_space(domains.done));
	} else {
		domain = isl_ast_build_eliminate(build, domain);
	}
	if (compute_partial_domains(&domains, domain) < 0)
		domains.list = isl_basic_set_list_free(domains.list);

	isl_set_free(domains.schedule_domain);
	isl_set_free(domains.done);
	isl_map_free(domains.sep_class);
	for (type = isl_ast_loop_atomic; type <= isl_ast_loop_separate; ++type)
		isl_set_free(domains.option[type]);

	return domains.list;
}

/* Generate code for a single component, after shifting (if any)
 * has been applied, in case the schedule was specified as a union map.
 *
 * We first split up the domain at the current depth into disjoint
 * basic sets based on the user-specified options.
 * Then we generated code for each of them and concatenate the results.
 */
static __isl_give isl_ast_graft_list *generate_shifted_component_flat(
	__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
{
	isl_basic_set_list *domain_list;
	isl_ast_graft_list *list = NULL;

	domain_list = compute_domains(executed, build);
	list = generate_parallel_domains(domain_list, executed, build);

	isl_basic_set_list_free(domain_list);
	isl_union_map_free(executed);
	isl_ast_build_free(build);

	return list;
}

/* Generate code for a single component, after shifting (if any)
 * has been applied, in case the schedule was specified as a schedule tree
 * and the separate option was specified.
 *
 * We perform separation on the domain of "executed" and then generate
 * an AST for each of the resulting disjoint basic sets.
 */
static __isl_give isl_ast_graft_list *generate_shifted_component_tree_separate(
	__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
{
	isl_space *space;
	isl_set *domain;
	isl_basic_set_list *domain_list;
	isl_ast_graft_list *list;

	space = isl_ast_build_get_space(build, 1);
	domain = separate_schedule_domains(space,
					isl_union_map_copy(executed), build);
	domain_list = isl_basic_set_list_from_set(domain);

	list = generate_parallel_domains(domain_list, executed, build);

	isl_basic_set_list_free(domain_list);
	isl_union_map_free(executed);
	isl_ast_build_free(build);

	return list;
}

/* Internal data structure for generate_shifted_component_tree_unroll.
 *
 * "executed" and "build" are inputs to generate_shifted_component_tree_unroll.
 * "list" collects the constructs grafts.
 */
struct isl_ast_unroll_tree_data {
	isl_union_map *executed;
	isl_ast_build *build;
	isl_ast_graft_list *list;
};

/* Initialize data->list to a list of "n" elements.
 */
static int init_unroll_tree(int n, void *user)
{
	struct isl_ast_unroll_tree_data *data = user;
	isl_ctx *ctx;

	ctx = isl_ast_build_get_ctx(data->build);
	data->list = isl_ast_graft_list_alloc(ctx, n);

	return 0;
}

/* Given an iteration of an unrolled domain represented by "bset",
 * generate the corresponding AST and add the result to data->list.
 */
static int do_unroll_tree_iteration(__isl_take isl_basic_set *bset, void *user)
{
	struct isl_ast_unroll_tree_data *data = user;

	data->list = add_node(data->list, isl_union_map_copy(data->executed),
				bset, isl_ast_build_copy(data->build));

	return 0;
}

/* Generate code for a single component, after shifting (if any)
 * has been applied, in case the schedule was specified as a schedule tree
 * and the unroll option was specified.
 *
 * We call foreach_iteration to iterate over the individual values and
 * construct and collect the corresponding grafts in do_unroll_tree_iteration.
 */
static __isl_give isl_ast_graft_list *generate_shifted_component_tree_unroll(
	__isl_take isl_union_map *executed, __isl_take isl_set *domain,
	__isl_take isl_ast_build *build)
{
	struct isl_ast_unroll_tree_data data = { executed, build, NULL };

	if (foreach_iteration(domain, build, &init_unroll_tree,
				&do_unroll_tree_iteration, &data) < 0)
		data.list = isl_ast_graft_list_free(data.list);

	isl_union_map_free(executed);
	isl_ast_build_free(build);

	return data.list;
}

/* Does "domain" involve a disjunction that is purely based on
 * constraints involving only outer dimension?
 *
 * In particular, is there a disjunction such that the constraints
 * involving the current and later dimensions are the same over
 * all the disjuncts?
 */
static isl_bool has_pure_outer_disjunction(__isl_keep isl_set *domain,
	__isl_keep isl_ast_build *build)
{
	isl_basic_set *hull;
	isl_set *shared, *inner;
	isl_bool equal;
	int depth;
	isl_size n;
	isl_size dim;

	n = isl_set_n_basic_set(domain);
	if (n < 0)
		return isl_bool_error;
	if (n <= 1)
		return isl_bool_false;
	dim = isl_set_dim(domain, isl_dim_set);
	if (dim < 0)
		return isl_bool_error;

	inner = isl_set_copy(domain);
	depth = isl_ast_build_get_depth(build);
	inner = isl_set_drop_constraints_not_involving_dims(inner,
					    isl_dim_set, depth, dim - depth);
	hull = isl_set_plain_unshifted_simple_hull(isl_set_copy(inner));
	shared = isl_set_from_basic_set(hull);
	equal = isl_set_plain_is_equal(inner, shared);
	isl_set_free(inner);
	isl_set_free(shared);

	return equal;
}

/* Generate code for a single component, after shifting (if any)
 * has been applied, in case the schedule was specified as a schedule tree.
 * In particular, handle the base case where there is either no isolated
 * set or we are within the isolated set (in which case "isolated" is set)
 * or the iterations that precede or follow the isolated set.
 *
 * The schedule domain is broken up or combined into basic sets
 * according to the AST generation option specified in the current
 * schedule node, which may be either atomic, separate, unroll or
 * unspecified.  If the option is unspecified, then we currently simply
 * split the schedule domain into disjoint basic sets.
 *
 * In case the separate option is specified, the AST generation is
 * handled by generate_shifted_component_tree_separate.
 * In the other cases, we need the global schedule domain.
 * In the unroll case, the AST generation is then handled by
 * generate_shifted_component_tree_unroll which needs the actual
 * schedule domain (with divs that may refer to the current dimension)
 * so that stride detection can be performed.
 * In the atomic or unspecified case, inner dimensions and divs involving
 * the current dimensions should be eliminated.
 * The result is then either combined into a single basic set or
 * split up into disjoint basic sets.
 * Finally an AST is generated for each basic set and the results are
 * concatenated.
 *
 * If the schedule domain involves a disjunction that is purely based on
 * constraints involving only outer dimension, then it is treated as
 * if atomic was specified.  This ensures that only a single loop
 * is generated instead of a sequence of identical loops with
 * different guards.
 */
static __isl_give isl_ast_graft_list *generate_shifted_component_tree_base(
	__isl_take isl_union_map *executed, __isl_take isl_ast_build *build,
	int isolated)
{
	isl_bool outer_disjunction;
	isl_union_set *schedule_domain;
	isl_set *domain;
	isl_basic_set_list *domain_list;
	isl_ast_graft_list *list;
	enum isl_ast_loop_type type;

	type = isl_ast_build_get_loop_type(build, isolated);
	if (type < 0)
		goto error;

	if (type == isl_ast_loop_separate)
		return generate_shifted_component_tree_separate(executed,
								build);

	schedule_domain = isl_union_map_domain(isl_union_map_copy(executed));
	domain = isl_set_from_union_set(schedule_domain);

	if (type == isl_ast_loop_unroll)
		return generate_shifted_component_tree_unroll(executed, domain,
								build);

	domain = isl_ast_build_eliminate(build, domain);
	domain = isl_set_coalesce_preserve(domain);

	outer_disjunction = has_pure_outer_disjunction(domain, build);
	if (outer_disjunction < 0)
		domain = isl_set_free(domain);

	if (outer_disjunction || type == isl_ast_loop_atomic) {
		isl_basic_set *hull;
		hull = isl_set_unshifted_simple_hull(domain);
		domain_list = isl_basic_set_list_from_basic_set(hull);
	} else {
		domain = isl_set_make_disjoint(domain);
		domain_list = isl_basic_set_list_from_set(domain);
	}

	list = generate_parallel_domains(domain_list, executed, build);

	isl_basic_set_list_free(domain_list);
	isl_union_map_free(executed);
	isl_ast_build_free(build);

	return list;
error:
	isl_union_map_free(executed);
	isl_ast_build_free(build);
	return NULL;
}

/* Extract out the disjunction imposed by "domain" on the outer
 * schedule dimensions.
 *
 * In particular, remove all inner dimensions from "domain" (including
 * the current dimension) and then remove the constraints that are shared
 * by all disjuncts in the result.
 */
static __isl_give isl_set *extract_disjunction(__isl_take isl_set *domain,
	__isl_keep isl_ast_build *build)
{
	isl_set *hull;
	int depth;
	isl_size dim;

	domain = isl_ast_build_specialize(build, domain);
	depth = isl_ast_build_get_depth(build);
	dim = isl_set_dim(domain, isl_dim_set);
	if (dim < 0)
		return isl_set_free(domain);
	domain = isl_set_eliminate(domain, isl_dim_set, depth, dim - depth);
	domain = isl_set_remove_unknown_divs(domain);
	hull = isl_set_copy(domain);
	hull = isl_set_from_basic_set(isl_set_unshifted_simple_hull(hull));
	domain = isl_set_gist(domain, hull);

	return domain;
}

/* Add "guard" to the grafts in "list".
 * "build" is the outer AST build, while "sub_build" includes "guard"
 * in its generated domain.
 *
 * First combine the grafts into a single graft and then add the guard.
 * If the list is empty, or if some error occurred, then simply return
 * the list.
 */
static __isl_give isl_ast_graft_list *list_add_guard(
	__isl_take isl_ast_graft_list *list, __isl_keep isl_set *guard,
	__isl_keep isl_ast_build *build, __isl_keep isl_ast_build *sub_build)
{
	isl_ast_graft *graft;
	isl_size n;

	list = isl_ast_graft_list_fuse(list, sub_build);

	n = isl_ast_graft_list_n_ast_graft(list);
	if (n < 0)
		return isl_ast_graft_list_free(list);
	if (n != 1)
		return list;

	graft = isl_ast_graft_list_get_ast_graft(list, 0);
	graft = isl_ast_graft_add_guard(graft, isl_set_copy(guard), build);
	list = isl_ast_graft_list_set_ast_graft(list, 0, graft);

	return list;
}

/* Generate code for a single component, after shifting (if any)
 * has been applied, in case the schedule was specified as a schedule tree.
 * In particular, do so for the specified subset of the schedule domain.
 *
 * If we are outside of the isolated part, then "domain" may include
 * a disjunction.  Explicitly generate this disjunction at this point
 * instead of relying on the disjunction getting hoisted back up
 * to this level.
 */
static __isl_give isl_ast_graft_list *generate_shifted_component_tree_part(
	__isl_keep isl_union_map *executed, __isl_take isl_set *domain,
	__isl_keep isl_ast_build *build, int isolated)
{
	isl_union_set *uset;
	isl_ast_graft_list *list;
	isl_ast_build *sub_build;
	int empty;

	uset = isl_union_set_from_set(isl_set_copy(domain));
	executed = isl_union_map_copy(executed);
	executed = isl_union_map_intersect_domain(executed, uset);
	empty = isl_union_map_is_empty(executed);
	if (empty < 0)
		goto error;
	if (empty) {
		isl_ctx *ctx;
		isl_union_map_free(executed);
		isl_set_free(domain);
		ctx = isl_ast_build_get_ctx(build);
		return isl_ast_graft_list_alloc(ctx, 0);
	}

	sub_build = isl_ast_build_copy(build);
	if (!isolated) {
		domain = extract_disjunction(domain, build);
		sub_build = isl_ast_build_restrict_generated(sub_build,
							isl_set_copy(domain));
	}
	list = generate_shifted_component_tree_base(executed,
				isl_ast_build_copy(sub_build), isolated);
	if (!isolated)
		list = list_add_guard(list, domain, build, sub_build);
	isl_ast_build_free(sub_build);
	isl_set_free(domain);
	return list;
error:
	isl_union_map_free(executed);
	isl_set_free(domain);
	return NULL;
}

/* Generate code for a single component, after shifting (if any)
 * has been applied, in case the schedule was specified as a schedule tree.
 * In particular, do so for the specified sequence of subsets
 * of the schedule domain, "before", "isolated", "after" and "other",
 * where only the "isolated" part is considered to be isolated.
 */
static __isl_give isl_ast_graft_list *generate_shifted_component_parts(
	__isl_take isl_union_map *executed, __isl_take isl_set *before,
	__isl_take isl_set *isolated, __isl_take isl_set *after,
	__isl_take isl_set *other, __isl_take isl_ast_build *build)
{
	isl_ast_graft_list *list, *res;

	res = generate_shifted_component_tree_part(executed, before, build, 0);
	list = generate_shifted_component_tree_part(executed, isolated,
						    build, 1);
	res = isl_ast_graft_list_concat(res, list);
	list = generate_shifted_component_tree_part(executed, after, build, 0);
	res = isl_ast_graft_list_concat(res, list);
	list = generate_shifted_component_tree_part(executed, other, build, 0);
	res = isl_ast_graft_list_concat(res, list);

	isl_union_map_free(executed);
	isl_ast_build_free(build);

	return res;
}

/* Does "set" intersect "first", but not "second"?
 */
static isl_bool only_intersects_first(__isl_keep isl_set *set,
	__isl_keep isl_set *first, __isl_keep isl_set *second)
{
	isl_bool disjoint;

	disjoint = isl_set_is_disjoint(set, first);
	if (disjoint < 0)
		return isl_bool_error;
	if (disjoint)
		return isl_bool_false;

	return isl_set_is_disjoint(set, second);
}

/* Generate code for a single component, after shifting (if any)
 * has been applied, in case the schedule was specified as a schedule tree.
 * In particular, do so in case of isolation where there is
 * only an "isolated" part and an "after" part.
 * "dead1" and "dead2" are freed by this function in order to simplify
 * the caller.
 *
 * The "before" and "other" parts are set to empty sets.
 */
static __isl_give isl_ast_graft_list *generate_shifted_component_only_after(
	__isl_take isl_union_map *executed, __isl_take isl_set *isolated,
	__isl_take isl_set *after, __isl_take isl_ast_build *build,
	__isl_take isl_set *dead1, __isl_take isl_set *dead2)
{
	isl_set *empty;

	empty = isl_set_empty(isl_set_get_space(after));
	isl_set_free(dead1);
	isl_set_free(dead2);
	return generate_shifted_component_parts(executed, isl_set_copy(empty),
						isolated, after, empty, build);
}

/* Generate code for a single component, after shifting (if any)
 * has been applied, in case the schedule was specified as a schedule tree.
 *
 * We first check if the user has specified an isolated schedule domain
 * and that we are not already outside of this isolated schedule domain.
 * If so, we break up the schedule domain into iterations that
 * precede the isolated domain, the isolated domain itself,
 * the iterations that follow the isolated domain and
 * the remaining iterations (those that are incomparable
 * to the isolated domain).
 * We generate an AST for each piece and concatenate the results.
 *
 * If the isolated domain is not convex, then it is replaced
 * by a convex superset to ensure that the sets of preceding and
 * following iterations are properly defined and, in particular,
 * that there are no intermediate iterations that do not belong
 * to the isolated domain.
 *
 * In the special case where at least one element of the schedule
 * domain that does not belong to the isolated domain needs
 * to be scheduled after this isolated domain, but none of those
 * elements need to be scheduled before, break up the schedule domain
 * in only two parts, the isolated domain, and a part that will be
 * scheduled after the isolated domain.
 *
 * If no isolated set has been specified, then we generate an
 * AST for the entire inverse schedule.
 */
static __isl_give isl_ast_graft_list *generate_shifted_component_tree(
	__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
{
	int i, depth;
	int empty, has_isolate;
	isl_space *space;
	isl_union_set *schedule_domain;
	isl_set *domain;
	isl_basic_set *hull;
	isl_set *isolated, *before, *after, *test;
	isl_map *gt, *lt;
	isl_bool pure;

	build = isl_ast_build_extract_isolated(build);
	has_isolate = isl_ast_build_has_isolated(build);
	if (has_isolate < 0)
		executed = isl_union_map_free(executed);
	else if (!has_isolate)
		return generate_shifted_component_tree_base(executed, build, 0);

	schedule_domain = isl_union_map_domain(isl_union_map_copy(executed));
	domain = isl_set_from_union_set(schedule_domain);

	isolated = isl_ast_build_get_isolated(build);
	isolated = isl_set_intersect(isolated, isl_set_copy(domain));
	test = isl_ast_build_specialize(build, isl_set_copy(isolated));
	empty = isl_set_is_empty(test);
	isl_set_free(test);
	if (empty < 0)
		goto error;
	if (empty) {
		isl_set_free(isolated);
		isl_set_free(domain);
		return generate_shifted_component_tree_base(executed, build, 0);
	}
	isolated = isl_ast_build_eliminate(build, isolated);
	hull = isl_set_unshifted_simple_hull(isolated);
	isolated = isl_set_from_basic_set(hull);

	depth = isl_ast_build_get_depth(build);
	space = isl_space_map_from_set(isl_set_get_space(isolated));
	gt = isl_map_universe(space);
	for (i = 0; i < depth; ++i)
		gt = isl_map_equate(gt, isl_dim_in, i, isl_dim_out, i);
	gt = isl_map_order_gt(gt, isl_dim_in, depth, isl_dim_out, depth);
	lt = isl_map_reverse(isl_map_copy(gt));
	before = isl_set_apply(isl_set_copy(isolated), gt);
	after = isl_set_apply(isl_set_copy(isolated), lt);

	domain = isl_set_subtract(domain, isl_set_copy(isolated));
	pure = only_intersects_first(domain, after, before);
	if (pure < 0)
		executed = isl_union_map_free(executed);
	else if (pure)
		return generate_shifted_component_only_after(executed, isolated,
						domain, build, before, after);
	domain = isl_set_subtract(domain, isl_set_copy(before));
	domain = isl_set_subtract(domain, isl_set_copy(after));
	after = isl_set_subtract(after, isl_set_copy(isolated));
	after = isl_set_subtract(after, isl_set_copy(before));
	before = isl_set_subtract(before, isl_set_copy(isolated));

	return generate_shifted_component_parts(executed, before, isolated,
						after, domain, build);
error:
	isl_set_free(domain);
	isl_set_free(isolated);
	isl_union_map_free(executed);
	isl_ast_build_free(build);
	return NULL;
}

/* Generate code for a single component, after shifting (if any)
 * has been applied.
 *
 * Call generate_shifted_component_tree or generate_shifted_component_flat
 * depending on whether the schedule was specified as a schedule tree.
 */
static __isl_give isl_ast_graft_list *generate_shifted_component(
	__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
{
	if (isl_ast_build_has_schedule_node(build))
		return generate_shifted_component_tree(executed, build);
	else
		return generate_shifted_component_flat(executed, build);
}

struct isl_set_map_pair {
	isl_set *set;
	isl_map *map;
};

/* Given an array "domain" of isl_set_map_pairs and an array "order"
 * of indices into the "domain" array,
 * return the union of the "map" fields of the elements
 * indexed by the first "n" elements of "order".
 */
static __isl_give isl_union_map *construct_component_executed(
	struct isl_set_map_pair *domain, int *order, int n)
{
	int i;
	isl_map *map;
	isl_union_map *executed;

	map = isl_map_copy(domain[order[0]].map);
	executed = isl_union_map_from_map(map);
	for (i = 1; i < n; ++i) {
		map = isl_map_copy(domain[order[i]].map);
		executed = isl_union_map_add_map(executed, map);
	}

	return executed;
}

/* Generate code for a single component, after shifting (if any)
 * has been applied.
 *
 * The component inverse schedule is specified as the "map" fields
 * of the elements of "domain" indexed by the first "n" elements of "order".
 */
static __isl_give isl_ast_graft_list *generate_shifted_component_from_list(
	struct isl_set_map_pair *domain, int *order, int n,
	__isl_take isl_ast_build *build)
{
	isl_union_map *executed;

	executed = construct_component_executed(domain, order, n);
	return generate_shifted_component(executed, build);
}

/* Does set dimension "pos" of "set" have an obviously fixed value?
 */
static int dim_is_fixed(__isl_keep isl_set *set, int pos)
{
	int fixed;
	isl_val *v;

	v = isl_set_plain_get_val_if_fixed(set, isl_dim_set, pos);
	if (!v)
		return -1;
	fixed = !isl_val_is_nan(v);
	isl_val_free(v);

	return fixed;
}

/* Given an array "domain" of isl_set_map_pairs and an array "order"
 * of indices into the "domain" array,
 * do all (except for at most one) of the "set" field of the elements
 * indexed by the first "n" elements of "order" have a fixed value
 * at position "depth"?
 */
static int at_most_one_non_fixed(struct isl_set_map_pair *domain,
	int *order, int n, int depth)
{
	int i;
	int non_fixed = -1;

	for (i = 0; i < n; ++i) {
		int f;

		f = dim_is_fixed(domain[order[i]].set, depth);
		if (f < 0)
			return -1;
		if (f)
			continue;
		if (non_fixed >= 0)
			return 0;
		non_fixed = i;
	}

	return 1;
}

/* Given an array "domain" of isl_set_map_pairs and an array "order"
 * of indices into the "domain" array,
 * eliminate the inner dimensions from the "set" field of the elements
 * indexed by the first "n" elements of "order", provided the current
 * dimension does not have a fixed value.
 *
 * Return the index of the first element in "order" with a corresponding
 * "set" field that does not have an (obviously) fixed value.
 */
static int eliminate_non_fixed(struct isl_set_map_pair *domain,
	int *order, int n, int depth, __isl_keep isl_ast_build *build)
{
	int i;
	int base = -1;

	for (i = n - 1; i >= 0; --i) {
		int f;
		f = dim_is_fixed(domain[order[i]].set, depth);
		if (f < 0)
			return -1;
		if (f)
			continue;
		domain[order[i]].set = isl_ast_build_eliminate_inner(build,
							domain[order[i]].set);
		base = i;
	}

	return base;
}

/* Given an array "domain" of isl_set_map_pairs and an array "order"
 * of indices into the "domain" array,
 * find the element of "domain" (amongst those indexed by the first "n"
 * elements of "order") with the "set" field that has the smallest
 * value for the current iterator.
 *
 * Note that the domain with the smallest value may depend on the parameters
 * and/or outer loop dimension.  Since the result of this function is only
 * used as heuristic, we only make a reasonable attempt at finding the best
 * domain, one that should work in case a single domain provides the smallest
 * value for the current dimension over all values of the parameters
 * and outer dimensions.
 *
 * In particular, we compute the smallest value of the first domain
 * and replace it by that of any later domain if that later domain
 * has a smallest value that is smaller for at least some value
 * of the parameters and outer dimensions.
 */
static int first_offset(struct isl_set_map_pair *domain, int *order, int n,
	__isl_keep isl_ast_build *build)
{
	int i;
	isl_map *min_first;
	int first = 0;

	min_first = isl_ast_build_map_to_iterator(build,
					isl_set_copy(domain[order[0]].set));
	min_first = isl_map_lexmin(min_first);

	for (i = 1; i < n; ++i) {
		isl_map *min, *test;
		int empty;

		min = isl_ast_build_map_to_iterator(build,
					isl_set_copy(domain[order[i]].set));
		min = isl_map_lexmin(min);
		test = isl_map_copy(min);
		test = isl_map_apply_domain(isl_map_copy(min_first), test);
		test = isl_map_order_lt(test, isl_dim_in, 0, isl_dim_out, 0);
		empty = isl_map_is_empty(test);
		isl_map_free(test);
		if (empty >= 0 && !empty) {
			isl_map_free(min_first);
			first = i;
			min_first = min;
		} else
			isl_map_free(min);

		if (empty < 0)
			break;
	}

	isl_map_free(min_first);

	return i < n ? -1 : first;
}

/* Construct a shifted inverse schedule based on the original inverse schedule,
 * the stride and the offset.
 *
 * The original inverse schedule is specified as the "map" fields
 * of the elements of "domain" indexed by the first "n" elements of "order".
 *
 * "stride" and "offset" are such that the difference
 * between the values of the current dimension of domain "i"
 * and the values of the current dimension for some reference domain are
 * equal to
 *
 *	stride * integer + offset[i]
 *
 * Moreover, 0 <= offset[i] < stride.
 *
 * For each domain, we create a map
 *
 *	{ [..., j, ...] -> [..., j - offset[i], offset[i], ....] }
 *
 * where j refers to the current dimension and the other dimensions are
 * unchanged, and apply this map to the original schedule domain.
 *
 * For example, for the original schedule
 *
 *	{ A[i] -> [2i]: 0 <= i < 10; B[i] -> [2i+1] : 0 <= i < 10 }
 *
 * and assuming the offset is 0 for the A domain and 1 for the B domain,
 * we apply the mapping
 *
 *	{ [j] -> [j, 0] }
 *
 * to the schedule of the "A" domain and the mapping
 *
 *	{ [j - 1] -> [j, 1] }
 *
 * to the schedule of the "B" domain.
 *
 *
 * Note that after the transformation, the differences between pairs
 * of values of the current dimension over all domains are multiples
 * of stride and that we have therefore exposed the stride.
 *
 *
 * To see that the mapping preserves the lexicographic order,
 * first note that each of the individual maps above preserves the order.
 * If the value of the current iterator is j1 in one domain and j2 in another,
 * then if j1 = j2, we know that the same map is applied to both domains
 * and the order is preserved.
 * Otherwise, let us assume, without loss of generality, that j1 < j2.
 * If c1 >= c2 (with c1 and c2 the corresponding offsets), then
 *
 *	j1 - c1 < j2 - c2
 *
 * and the order is preserved.
 * If c1 < c2, then we know
 *
 *	0 <= c2 - c1 < s
 *
 * We also have
 *
 *	j2 - j1 = n * s + r
 *
 * with n >= 0 and 0 <= r < s.
 * In other words, r = c2 - c1.
 * If n > 0, then
 *
 *	j1 - c1 < j2 - c2
 *
 * If n = 0, then
 *
 *	j1 - c1 = j2 - c2
 *
 * and so
 *
 *	(j1 - c1, c1) << (j2 - c2, c2)
 *
 * with "<<" the lexicographic order, proving that the order is preserved
 * in all cases.
 */
static __isl_give isl_union_map *construct_shifted_executed(
	struct isl_set_map_pair *domain, int *order, int n,
	__isl_keep isl_val *stride, __isl_keep isl_multi_val *offset,
	__isl_take isl_ast_build *build)
{
	int i;
	isl_union_map *executed;
	isl_space *space;
	isl_map *map;
	int depth;
	isl_constraint *c;

	depth = isl_ast_build_get_depth(build);
	space = isl_ast_build_get_space(build, 1);
	executed = isl_union_map_empty(isl_space_copy(space));
	space = isl_space_map_from_set(space);
	map = isl_map_identity(isl_space_copy(space));
	map = isl_map_eliminate(map, isl_dim_out, depth, 1);
	map = isl_map_insert_dims(map, isl_dim_out, depth + 1, 1);
	space = isl_space_insert_dims(space, isl_dim_out, depth + 1, 1);

	c = isl_constraint_alloc_equality(isl_local_space_from_space(space));
	c = isl_constraint_set_coefficient_si(c, isl_dim_in, depth, 1);
	c = isl_constraint_set_coefficient_si(c, isl_dim_out, depth, -1);

	for (i = 0; i < n; ++i) {
		isl_map *map_i;
		isl_val *v;

		v = isl_multi_val_get_val(offset, i);
		if (!v)
			break;
		map_i = isl_map_copy(map);
		map_i = isl_map_fix_val(map_i, isl_dim_out, depth + 1,
					isl_val_copy(v));
		v = isl_val_neg(v);
		c = isl_constraint_set_constant_val(c, v);
		map_i = isl_map_add_constraint(map_i, isl_constraint_copy(c));

		map_i = isl_map_apply_domain(isl_map_copy(domain[order[i]].map),
						map_i);
		executed = isl_union_map_add_map(executed, map_i);
	}

	isl_constraint_free(c);
	isl_map_free(map);

	if (i < n)
		executed = isl_union_map_free(executed);

	return executed;
}

/* Generate code for a single component, after exposing the stride,
 * given that the schedule domain is "shifted strided".
 *
 * The component inverse schedule is specified as the "map" fields
 * of the elements of "domain" indexed by the first "n" elements of "order".
 *
 * The schedule domain being "shifted strided" means that the differences
 * between the values of the current dimension of domain "i"
 * and the values of the current dimension for some reference domain are
 * equal to
 *
 *	stride * integer + offset[i]
 *
 * We first look for the domain with the "smallest" value for the current
 * dimension and adjust the offsets such that the offset of the "smallest"
 * domain is equal to zero.  The other offsets are reduced modulo stride.
 *
 * Based on this information, we construct a new inverse schedule in
 * construct_shifted_executed that exposes the stride.
 * Since this involves the introduction of a new schedule dimension,
 * the build needs to be changed accordingly.
 * After computing the AST, the newly introduced dimension needs
 * to be removed again from the list of grafts.  We do this by plugging
 * in a mapping that represents the new schedule domain in terms of the
 * old schedule domain.
 */
static __isl_give isl_ast_graft_list *generate_shift_component(
	struct isl_set_map_pair *domain, int *order, int n,
	__isl_keep isl_val *stride, __isl_keep isl_multi_val *offset,
	__isl_take isl_ast_build *build)
{
	isl_ast_graft_list *list;
	int first;
	int depth;
	isl_val *val;
	isl_multi_val *mv;
	isl_space *space;
	isl_multi_aff *ma, *zero;
	isl_union_map *executed;

	depth = isl_ast_build_get_depth(build);

	first = first_offset(domain, order, n, build);
	if (first < 0)
		goto error;

	mv = isl_multi_val_copy(offset);
	val = isl_multi_val_get_val(offset, first);
	val = isl_val_neg(val);
	mv = isl_multi_val_add_val(mv, val);
	mv = isl_multi_val_mod_val(mv, isl_val_copy(stride));

	executed = construct_shifted_executed(domain, order, n, stride, mv,
						build);
	space = isl_ast_build_get_space(build, 1);
	space = isl_space_map_from_set(space);
	ma = isl_multi_aff_identity(isl_space_copy(space));
	space = isl_space_from_domain(isl_space_domain(space));
	space = isl_space_add_dims(space, isl_dim_out, 1);
	zero = isl_multi_aff_zero(space);
	ma = isl_multi_aff_range_splice(ma, depth + 1, zero);
	build = isl_ast_build_insert_dim(build, depth + 1);
	list = generate_shifted_component(executed, build);

	list = isl_ast_graft_list_preimage_multi_aff(list, ma);

	isl_multi_val_free(mv);

	return list;
error:
	isl_ast_build_free(build);
	return NULL;
}

/* Does any node in the schedule tree rooted at the current schedule node
 * of "build" depend on outer schedule nodes?
 */
static int has_anchored_subtree(__isl_keep isl_ast_build *build)
{
	isl_schedule_node *node;
	int dependent = 0;

	node = isl_ast_build_get_schedule_node(build);
	dependent = isl_schedule_node_is_subtree_anchored(node);
	isl_schedule_node_free(node);

	return dependent;
}

/* Generate code for a single component.
 *
 * The component inverse schedule is specified as the "map" fields
 * of the elements of "domain" indexed by the first "n" elements of "order".
 *
 * This function may modify the "set" fields of "domain".
 *
 * Before proceeding with the actual code generation for the component,
 * we first check if there are any "shifted" strides, meaning that
 * the schedule domains of the individual domains are all strided,
 * but that they have different offsets, resulting in the union
 * of schedule domains not being strided anymore.
 *
 * The simplest example is the schedule
 *
 *	{ A[i] -> [2i]: 0 <= i < 10; B[i] -> [2i+1] : 0 <= i < 10 }
 *
 * Both schedule domains are strided, but their union is not.
 * This function detects such cases and then rewrites the schedule to
 *
 *	{ A[i] -> [2i, 0]: 0 <= i < 10; B[i] -> [2i, 1] : 0 <= i < 10 }
 *
 * In the new schedule, the schedule domains have the same offset (modulo
 * the stride), ensuring that the union of schedule domains is also strided.
 *
 *
 * If there is only a single domain in the component, then there is
 * nothing to do.   Similarly, if the current schedule dimension has
 * a fixed value for almost all domains then there is nothing to be done.
 * In particular, we need at least two domains where the current schedule
 * dimension does not have a fixed value.
 * Finally, in case of a schedule map input,
 * if any of the options refer to the current schedule dimension,
 * then we bail out as well.  It would be possible to reformulate the options
 * in terms of the new schedule domain, but that would introduce constraints
 * that separate the domains in the options and that is something we would
 * like to avoid.
 * In the case of a schedule tree input, we bail out if any of
 * the descendants of the current schedule node refer to outer
 * schedule nodes in any way.
 *
 *
 * To see if there is any shifted stride, we look at the differences
 * between the values of the current dimension in pairs of domains
 * for equal values of outer dimensions.  These differences should be
 * of the form
 *
 *	m x + r
 *
 * with "m" the stride and "r" a constant.  Note that we cannot perform
 * this analysis on individual domains as the lower bound in each domain
 * may depend on parameters or outer dimensions and so the current dimension
 * itself may not have a fixed remainder on division by the stride.
 *
 * In particular, we compare the first domain that does not have an
 * obviously fixed value for the current dimension to itself and all
 * other domains and collect the offsets and the gcd of the strides.
 * If the gcd becomes one, then we failed to find shifted strides.
 * If the gcd is zero, then the differences were all fixed, meaning
 * that some domains had non-obviously fixed values for the current dimension.
 * If all the offsets are the same (for those domains that do not have
 * an obviously fixed value for the current dimension), then we do not
 * apply the transformation.
 * If none of the domains were skipped, then there is nothing to do.
 * If some of them were skipped, then if we apply separation, the schedule
 * domain should get split in pieces with a (non-shifted) stride.
 *
 * Otherwise, we apply a shift to expose the stride in
 * generate_shift_component.
 */
static __isl_give isl_ast_graft_list *generate_component(
	struct isl_set_map_pair *domain, int *order, int n,
	__isl_take isl_ast_build *build)
{
	int i, d;
	int depth;
	isl_ctx *ctx;
	isl_map *map;
	isl_set *deltas;
	isl_val *gcd = NULL;
	isl_multi_val *mv;
	int fixed, skip;
	int base;
	isl_ast_graft_list *list;
	int res = 0;

	depth = isl_ast_build_get_depth(build);

	skip = n == 1;
	if (skip >= 0 && !skip)
		skip = at_most_one_non_fixed(domain, order, n, depth);
	if (skip >= 0 && !skip) {
		if (isl_ast_build_has_schedule_node(build))
			skip = has_anchored_subtree(build);
		else
			skip = isl_ast_build_options_involve_depth(build);
	}
	if (skip < 0)
		goto error;
	if (skip)
		return generate_shifted_component_from_list(domain,
							    order, n, build);

	base = eliminate_non_fixed(domain, order, n, depth, build);
	if (base < 0)
		goto error;

	ctx = isl_ast_build_get_ctx(build);

	mv = isl_multi_val_zero(isl_space_set_alloc(ctx, 0, n));

	fixed = 1;
	for (i = 0; i < n; ++i) {
		isl_val *r, *m;

		map = isl_map_from_domain_and_range(
					isl_set_copy(domain[order[base]].set),
					isl_set_copy(domain[order[i]].set));
		for (d = 0; d < depth; ++d)
			map = isl_map_equate(map, isl_dim_in, d,
						    isl_dim_out, d);
		deltas = isl_map_deltas(map);
		res = isl_set_dim_residue_class_val(deltas, depth, &m, &r);
		isl_set_free(deltas);
		if (res < 0)
			break;

		if (i == 0)
			gcd = m;
		else
			gcd = isl_val_gcd(gcd, m);
		if (isl_val_is_one(gcd)) {
			isl_val_free(r);
			break;
		}
		mv = isl_multi_val_set_val(mv, i, r);

		res = dim_is_fixed(domain[order[i]].set, depth);
		if (res < 0)
			break;
		if (res)
			continue;

		if (fixed && i > base) {
			isl_val *a, *b;
			a = isl_multi_val_get_val(mv, i);
			b = isl_multi_val_get_val(mv, base);
			if (isl_val_ne(a, b))
				fixed = 0;
			isl_val_free(a);
			isl_val_free(b);
		}
	}

	if (res < 0 || !gcd) {
		isl_ast_build_free(build);
		list = NULL;
	} else if (i < n || fixed || isl_val_is_zero(gcd)) {
		list = generate_shifted_component_from_list(domain,
							    order, n, build);
	} else {
		list = generate_shift_component(domain, order, n, gcd, mv,
						build);
	}

	isl_val_free(gcd);
	isl_multi_val_free(mv);

	return list;
error:
	isl_ast_build_free(build);
	return NULL;
}

/* Store both "map" itself and its domain in the
 * structure pointed to by *next and advance to the next array element.
 */
static isl_stat extract_domain(__isl_take isl_map *map, void *user)
{
	struct isl_set_map_pair **next = user;

	(*next)->map = isl_map_copy(map);
	(*next)->set = isl_map_domain(map);
	(*next)++;

	return isl_stat_ok;
}

static isl_bool after_in_tree(__isl_keep isl_union_map *umap,
	__isl_keep isl_schedule_node *node);

/* Is any domain element of "umap" scheduled after any of
 * the corresponding image elements by the tree rooted at
 * the child of "node"?
 */
static isl_bool after_in_child(__isl_keep isl_union_map *umap,
	__isl_keep isl_schedule_node *node)
{
	isl_schedule_node *child;
	isl_bool after;

	child = isl_schedule_node_get_child(node, 0);
	after = after_in_tree(umap, child);
	isl_schedule_node_free(child);

	return after;
}

/* Is any domain element of "umap" scheduled after any of
 * the corresponding image elements by the tree rooted at
 * the band node "node"?
 *
 * We first check if any domain element is scheduled after any
 * of the corresponding image elements by the band node itself.
 * If not, we restrict "map" to those pairs of element that
 * are scheduled together by the band node and continue with
 * the child of the band node.
 * If there are no such pairs then the map passed to after_in_child
 * will be empty causing it to return 0.
 */
static isl_bool after_in_band(__isl_keep isl_union_map *umap,
	__isl_keep isl_schedule_node *node)
{
	isl_multi_union_pw_aff *mupa;
	isl_union_map *partial, *test, *gt, *universe, *umap1, *umap2;
	isl_union_set *domain, *range;
	isl_space *space;
	isl_bool empty;
	isl_bool after;
	isl_size n;

	n = isl_schedule_node_band_n_member(node);
	if (n < 0)
		return isl_bool_error;
	if (n == 0)
		return after_in_child(umap, node);

	mupa = isl_schedule_node_band_get_partial_schedule(node);
	space = isl_multi_union_pw_aff_get_space(mupa);
	partial = isl_union_map_from_multi_union_pw_aff(mupa);
	test = isl_union_map_copy(umap);
	test = isl_union_map_apply_domain(test, isl_union_map_copy(partial));
	test = isl_union_map_apply_range(test, isl_union_map_copy(partial));
	gt = isl_union_map_from_map(isl_map_lex_gt(space));
	test = isl_union_map_intersect(test, gt);
	empty = isl_union_map_is_empty(test);
	isl_union_map_free(test);

	if (empty < 0 || !empty) {
		isl_union_map_free(partial);
		return isl_bool_not(empty);
	}

	universe = isl_union_map_universe(isl_union_map_copy(umap));
	domain = isl_union_map_domain(isl_union_map_copy(universe));
	range = isl_union_map_range(universe);
	umap1 = isl_union_map_copy(partial);
	umap1 = isl_union_map_intersect_domain(umap1, domain);
	umap2 = isl_union_map_intersect_domain(partial, range);
	test = isl_union_map_apply_range(umap1, isl_union_map_reverse(umap2));
	test = isl_union_map_intersect(test, isl_union_map_copy(umap));
	after = after_in_child(test, node);
	isl_union_map_free(test);
	return after;
}

/* Is any domain element of "umap" scheduled after any of
 * the corresponding image elements by the tree rooted at
 * the context node "node"?
 *
 * The context constraints apply to the schedule domain,
 * so we cannot apply them directly to "umap", which contains
 * pairs of statement instances.  Instead, we add them
 * to the range of the prefix schedule for both domain and
 * range of "umap".
 */
static isl_bool after_in_context(__isl_keep isl_union_map *umap,
	__isl_keep isl_schedule_node *node)
{
	isl_union_map *prefix, *universe, *umap1, *umap2;
	isl_union_set *domain, *range;
	isl_set *context;
	isl_bool after;

	umap = isl_union_map_copy(umap);
	context = isl_schedule_node_context_get_context(node);
	prefix = isl_schedule_node_get_prefix_schedule_union_map(node);
	universe = isl_union_map_universe(isl_union_map_copy(umap));
	domain = isl_union_map_domain(isl_union_map_copy(universe));
	range = isl_union_map_range(universe);
	umap1 = isl_union_map_copy(prefix);
	umap1 = isl_union_map_intersect_domain(umap1, domain);
	umap2 = isl_union_map_intersect_domain(prefix, range);
	umap1 = isl_union_map_intersect_range(umap1,
					    isl_union_set_from_set(context));
	umap1 = isl_union_map_apply_range(umap1, isl_union_map_reverse(umap2));
	umap = isl_union_map_intersect(umap, umap1);

	after = after_in_child(umap, node);

	isl_union_map_free(umap);

	return after;
}

/* Is any domain element of "umap" scheduled after any of
 * the corresponding image elements by the tree rooted at
 * the expansion node "node"?
 *
 * We apply the expansion to domain and range of "umap" and
 * continue with its child.
 */
static isl_bool after_in_expansion(__isl_keep isl_union_map *umap,
	__isl_keep isl_schedule_node *node)
{
	isl_union_map *expansion;
	isl_bool after;

	expansion = isl_schedule_node_expansion_get_expansion(node);
	umap = isl_union_map_copy(umap);
	umap = isl_union_map_apply_domain(umap, isl_union_map_copy(expansion));
	umap = isl_union_map_apply_range(umap, expansion);

	after = after_in_child(umap, node);

	isl_union_map_free(umap);

	return after;
}

/* Is any domain element of "umap" scheduled after any of
 * the corresponding image elements by the tree rooted at
 * the extension node "node"?
 *
 * Since the extension node may add statement instances before or
 * after the pairs of statement instances in "umap", we return isl_bool_true
 * to ensure that these pairs are not broken up.
 */
static isl_bool after_in_extension(__isl_keep isl_union_map *umap,
	__isl_keep isl_schedule_node *node)
{
	return isl_bool_true;
}

/* Is any domain element of "umap" scheduled after any of
 * the corresponding image elements by the tree rooted at
 * the filter node "node"?
 *
 * We intersect domain and range of "umap" with the filter and
 * continue with its child.
 */
static isl_bool after_in_filter(__isl_keep isl_union_map *umap,
	__isl_keep isl_schedule_node *node)
{
	isl_union_set *filter;
	isl_bool after;

	umap = isl_union_map_copy(umap);
	filter = isl_schedule_node_filter_get_filter(node);
	umap = isl_union_map_intersect_domain(umap, isl_union_set_copy(filter));
	umap = isl_union_map_intersect_range(umap, filter);

	after = after_in_child(umap, node);

	isl_union_map_free(umap);

	return after;
}

/* Is any domain element of "umap" scheduled after any of
 * the corresponding image elements by the tree rooted at
 * the set node "node"?
 *
 * This is only the case if this condition holds in any
 * of the (filter) children of the set node.
 * In particular, if the domain and the range of "umap"
 * are contained in different children, then the condition
 * does not hold.
 */
static isl_bool after_in_set(__isl_keep isl_union_map *umap,
	__isl_keep isl_schedule_node *node)
{
	int i;
	isl_size n;

	n = isl_schedule_node_n_children(node);
	if (n < 0)
		return isl_bool_error;
	for (i = 0; i < n; ++i) {
		isl_schedule_node *child;
		isl_bool after;

		child = isl_schedule_node_get_child(node, i);
		after = after_in_tree(umap, child);
		isl_schedule_node_free(child);

		if (after < 0 || after)
			return after;
	}

	return isl_bool_false;
}

/* Return the filter of child "i" of "node".
 */
static __isl_give isl_union_set *child_filter(
	__isl_keep isl_schedule_node *node, int i)
{
	isl_schedule_node *child;
	isl_union_set *filter;

	child = isl_schedule_node_get_child(node, i);
	filter = isl_schedule_node_filter_get_filter(child);
	isl_schedule_node_free(child);

	return filter;
}

/* Is any domain element of "umap" scheduled after any of
 * the corresponding image elements by the tree rooted at
 * the sequence node "node"?
 *
 * This happens in particular if any domain element is
 * contained in a later child than one containing a range element or
 * if the condition holds within a given child in the sequence.
 * The later part of the condition is checked by after_in_set.
 */
static isl_bool after_in_sequence(__isl_keep isl_union_map *umap,
	__isl_keep isl_schedule_node *node)
{
	int i, j;
	isl_size n;
	isl_union_map *umap_i;
	isl_bool empty;
	isl_bool after = isl_bool_false;

	n = isl_schedule_node_n_children(node);
	if (n < 0)
		return isl_bool_error;
	for (i = 1; i < n; ++i) {
		isl_union_set *filter_i;

		umap_i = isl_union_map_copy(umap);
		filter_i = child_filter(node, i);
		umap_i = isl_union_map_intersect_domain(umap_i, filter_i);
		empty = isl_union_map_is_empty(umap_i);
		if (empty < 0)
			goto error;
		if (empty) {
			isl_union_map_free(umap_i);
			continue;
		}

		for (j = 0; j < i; ++j) {
			isl_union_set *filter_j;
			isl_union_map *umap_ij;

			umap_ij = isl_union_map_copy(umap_i);
			filter_j = child_filter(node, j);
			umap_ij = isl_union_map_intersect_range(umap_ij,
								filter_j);
			empty = isl_union_map_is_empty(umap_ij);
			isl_union_map_free(umap_ij);

			if (empty < 0)
				goto error;
			if (!empty)
				after = isl_bool_true;
			if (after)
				break;
		}

		isl_union_map_free(umap_i);
		if (after)
			break;
	}

	if (after < 0 || after)
		return after;

	return after_in_set(umap, node);
error:
	isl_union_map_free(umap_i);
	return isl_bool_error;
}

/* Is any domain element of "umap" scheduled after any of
 * the corresponding image elements by the tree rooted at "node"?
 *
 * If "umap" is empty, then clearly there is no such element.
 * Otherwise, consider the different types of nodes separately.
 */
static isl_bool after_in_tree(__isl_keep isl_union_map *umap,
	__isl_keep isl_schedule_node *node)
{
	isl_bool empty;
	enum isl_schedule_node_type type;

	empty = isl_union_map_is_empty(umap);
	if (empty < 0)
		return isl_bool_error;
	if (empty)
		return isl_bool_false;
	if (!node)
		return isl_bool_error;

	type = isl_schedule_node_get_type(node);
	switch (type) {
	case isl_schedule_node_error:
		return isl_bool_error;
	case isl_schedule_node_leaf:
		return isl_bool_false;
	case isl_schedule_node_band:
		return after_in_band(umap, node);
	case isl_schedule_node_domain:
		isl_die(isl_schedule_node_get_ctx(node), isl_error_internal,
			"unexpected internal domain node",
			return isl_bool_error);
	case isl_schedule_node_context:
		return after_in_context(umap, node);
	case isl_schedule_node_expansion:
		return after_in_expansion(umap, node);
	case isl_schedule_node_extension:
		return after_in_extension(umap, node);
	case isl_schedule_node_filter:
		return after_in_filter(umap, node);
	case isl_schedule_node_guard:
	case isl_schedule_node_mark:
		return after_in_child(umap, node);
	case isl_schedule_node_set:
		return after_in_set(umap, node);
	case isl_schedule_node_sequence:
		return after_in_sequence(umap, node);
	}

	return isl_bool_true;
}

/* Is any domain element of "map1" scheduled after any domain
 * element of "map2" by the subtree underneath the current band node,
 * while at the same time being scheduled together by the current
 * band node, i.e., by "map1" and "map2?
 *
 * If the child of the current band node is a leaf, then
 * no element can be scheduled after any other element.
 *
 * Otherwise, we construct a relation between domain elements
 * of "map1" and domain elements of "map2" that are scheduled
 * together and then check if the subtree underneath the current
 * band node determines their relative order.
 */
static isl_bool after_in_subtree(__isl_keep isl_ast_build *build,
	__isl_keep isl_map *map1, __isl_keep isl_map *map2)
{
	isl_schedule_node *node;
	isl_map *map;
	isl_union_map *umap;
	isl_bool after;

	node = isl_ast_build_get_schedule_node(build);
	if (!node)
		return isl_bool_error;
	node = isl_schedule_node_child(node, 0);
	if (isl_schedule_node_get_type(node) == isl_schedule_node_leaf) {
		isl_schedule_node_free(node);
		return isl_bool_false;
	}
	map = isl_map_copy(map2);
	map = isl_map_apply_domain(map, isl_map_copy(map1));
	umap = isl_union_map_from_map(map);
	after = after_in_tree(umap, node);
	isl_union_map_free(umap);
	isl_schedule_node_free(node);
	return after;
}

/* Internal data for any_scheduled_after.
 *
 * "build" is the build in which the AST is constructed.
 * "depth" is the number of loops that have already been generated
 * "group_coscheduled" is a local copy of options->ast_build_group_coscheduled
 * "domain" is an array of set-map pairs corresponding to the different
 * iteration domains.  The set is the schedule domain, i.e., the domain
 * of the inverse schedule, while the map is the inverse schedule itself.
 */
struct isl_any_scheduled_after_data {
	isl_ast_build *build;
	int depth;
	int group_coscheduled;
	struct isl_set_map_pair *domain;
};

/* Is any element of domain "i" scheduled after any element of domain "j"
 * (for a common iteration of the first data->depth loops)?
 *
 * data->domain[i].set contains the domain of the inverse schedule
 * for domain "i", i.e., elements in the schedule domain.
 *
 * If we are inside a band of a schedule tree and there is a pair
 * of elements in the two domains that is schedule together by
 * the current band, then we check if any element of "i" may be schedule
 * after element of "j" by the descendants of the band node.
 *
 * If data->group_coscheduled is set, then we also return 1 if there
 * is any pair of elements in the two domains that are scheduled together.
 */
static isl_bool any_scheduled_after(int i, int j, void *user)
{
	struct isl_any_scheduled_after_data *data = user;
	isl_size dim = isl_set_dim(data->domain[i].set, isl_dim_set);
	int pos;

	if (dim < 0)
		return isl_bool_error;

	for (pos = data->depth; pos < dim; ++pos) {
		int follows;

		follows = isl_set_follows_at(data->domain[i].set,
						data->domain[j].set, pos);

		if (follows < -1)
			return isl_bool_error;
		if (follows > 0)
			return isl_bool_true;
		if (follows < 0)
			return isl_bool_false;
	}

	if (isl_ast_build_has_schedule_node(data->build)) {
		isl_bool after;

		after = after_in_subtree(data->build, data->domain[i].map,
					    data->domain[j].map);
		if (after < 0 || after)
			return after;
	}

	return isl_bool_ok(data->group_coscheduled);
}

/* Look for independent components at the current depth and generate code
 * for each component separately.  The resulting lists of grafts are
 * merged in an attempt to combine grafts with identical guards.
 *
 * Code for two domains can be generated separately if all the elements
 * of one domain are scheduled before (or together with) all the elements
 * of the other domain.  We therefore consider the graph with as nodes
 * the domains and an edge between two nodes if any element of the first
 * node is scheduled after any element of the second node.
 * If the ast_build_group_coscheduled is set, then we also add an edge if
 * there is any pair of elements in the two domains that are scheduled
 * together.
 * Code is then generated (by generate_component)
 * for each of the strongly connected components in this graph
 * in their topological order.
 *
 * Since the test is performed on the domain of the inverse schedules of
 * the different domains, we precompute these domains and store
 * them in data.domain.
 */
static __isl_give isl_ast_graft_list *generate_components(
	__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
{
	int i;
	isl_ctx *ctx = isl_ast_build_get_ctx(build);
	isl_size n = isl_union_map_n_map(executed);
	struct isl_any_scheduled_after_data data;
	struct isl_set_map_pair *next;
	struct isl_tarjan_graph *g = NULL;
	isl_ast_graft_list *list = NULL;
	int n_domain = 0;

	data.domain = NULL;
	if (n < 0)
		goto error;
	data.domain = isl_calloc_array(ctx, struct isl_set_map_pair, n);
	if (!data.domain)
		goto error;
	n_domain = n;

	next = data.domain;
	if (isl_union_map_foreach_map(executed, &extract_domain, &next) < 0)
		goto error;

	if (!build)
		goto error;
	data.build = build;
	data.depth = isl_ast_build_get_depth(build);
	data.group_coscheduled = isl_options_get_ast_build_group_coscheduled(ctx);
	g = isl_tarjan_graph_init(ctx, n, &any_scheduled_after, &data);
	if (!g)
		goto error;

	list = isl_ast_graft_list_alloc(ctx, 0);

	i = 0;
	while (list && n) {
		isl_ast_graft_list *list_c;
		int first = i;

		if (g->order[i] == -1)
			isl_die(ctx, isl_error_internal, "cannot happen",
				goto error);
		++i; --n;
		while (g->order[i] != -1) {
			++i; --n;
		}

		list_c = generate_component(data.domain,
					    g->order + first, i - first,
					    isl_ast_build_copy(build));
		list = isl_ast_graft_list_merge(list, list_c, build);

		++i;
	}

	if (0)
error:		list = isl_ast_graft_list_free(list);
	isl_tarjan_graph_free(g);
	for (i = 0; i < n_domain; ++i) {
		isl_map_free(data.domain[i].map);
		isl_set_free(data.domain[i].set);
	}
	free(data.domain);
	isl_union_map_free(executed);
	isl_ast_build_free(build);

	return list;
}

/* Generate code for the next level (and all inner levels).
 *
 * If "executed" is empty, i.e., no code needs to be generated,
 * then we return an empty list.
 *
 * If we have already generated code for all loop levels, then we pass
 * control to generate_inner_level.
 *
 * If "executed" lives in a single space, i.e., if code needs to be
 * generated for a single domain, then there can only be a single
 * component and we go directly to generate_shifted_component.
 * Otherwise, we call generate_components to detect the components
 * and to call generate_component on each of them separately.
 */
static __isl_give isl_ast_graft_list *generate_next_level(
	__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
{
	int depth;
	isl_size dim;
	isl_size n;

	if (!build || !executed)
		goto error;

	if (isl_union_map_is_empty(executed)) {
		isl_ctx *ctx = isl_ast_build_get_ctx(build);
		isl_union_map_free(executed);
		isl_ast_build_free(build);
		return isl_ast_graft_list_alloc(ctx, 0);
	}

	depth = isl_ast_build_get_depth(build);
	dim = isl_ast_build_dim(build, isl_dim_set);
	if (dim < 0)
		goto error;
	if (depth >= dim)
		return generate_inner_level(executed, build);

	n = isl_union_map_n_map(executed);
	if (n < 0)
		goto error;
	if (n == 1)
		return generate_shifted_component(executed, build);

	return generate_components(executed, build);
error:
	isl_union_map_free(executed);
	isl_ast_build_free(build);
	return NULL;
}

/* Internal data structure used by isl_ast_build_node_from_schedule_map.
 * internal, executed and build are the inputs to generate_code.
 * list collects the output.
 */
struct isl_generate_code_data {
	int internal;
	isl_union_map *executed;
	isl_ast_build *build;

	isl_ast_graft_list *list;
};

/* Given an inverse schedule in terms of the external build schedule, i.e.,
 *
 *	[E -> S] -> D
 *
 * with E the external build schedule and S the additional schedule "space",
 * reformulate the inverse schedule in terms of the internal schedule domain,
 * i.e., return
 *
 *	[I -> S] -> D
 *
 * We first obtain a mapping
 *
 *	I -> E
 *
 * take the inverse and the product with S -> S, resulting in
 *
 *	[I -> S] -> [E -> S]
 *
 * Applying the map to the input produces the desired result.
 */
static __isl_give isl_union_map *internal_executed(
	__isl_take isl_union_map *executed, __isl_keep isl_space *space,
	__isl_keep isl_ast_build *build)
{
	isl_map *id, *proj;

	proj = isl_ast_build_get_schedule_map(build);
	proj = isl_map_reverse(proj);
	space = isl_space_map_from_set(isl_space_copy(space));
	id = isl_map_identity(space);
	proj = isl_map_product(proj, id);
	executed = isl_union_map_apply_domain(executed,
						isl_union_map_from_map(proj));
	return executed;
}

/* Generate an AST that visits the elements in the range of data->executed
 * in the relative order specified by the corresponding domain element(s)
 * for those domain elements that belong to "set".
 * Add the result to data->list.
 *
 * The caller ensures that "set" is a universe domain.
 * "space" is the space of the additional part of the schedule.
 * It is equal to the space of "set" if build->domain is parametric.
 * Otherwise, it is equal to the range of the wrapped space of "set".
 *
 * If the build space is not parametric and
 * if isl_ast_build_node_from_schedule_map
 * was called from an outside user (data->internal not set), then
 * the (inverse) schedule refers to the external build domain and needs to
 * be transformed to refer to the internal build domain.
 *
 * If the build space is parametric, then we add some of the parameter
 * constraints to the executed relation.  Adding these constraints
 * allows for an earlier detection of conflicts in some cases.
 * However, we do not want to divide the executed relation into
 * more disjuncts than necessary.  We therefore approximate
 * the constraints on the parameters by a single disjunct set.
 *
 * The build is extended to include the additional part of the schedule.
 * If the original build space was not parametric, then the options
 * in data->build refer only to the additional part of the schedule
 * and they need to be adjusted to refer to the complete AST build
 * domain.
 *
 * After having adjusted inverse schedule and build, we start generating
 * code with the outer loop of the current code generation
 * in generate_next_level.
 *
 * If the original build space was not parametric, we undo the embedding
 * on the resulting isl_ast_node_list so that it can be used within
 * the outer AST build.
 */
static isl_stat generate_code_in_space(struct isl_generate_code_data *data,
	__isl_take isl_set *set, __isl_take isl_space *space)
{
	isl_union_map *executed;
	isl_ast_build *build;
	isl_ast_graft_list *list;
	int embed;

	executed = isl_union_map_copy(data->executed);
	executed = isl_union_map_intersect_domain(executed,
						 isl_union_set_from_set(set));

	embed = !isl_set_is_params(data->build->domain);
	if (embed && !data->internal)
		executed = internal_executed(executed, space, data->build);
	if (!embed) {
		isl_set *domain;
		domain = isl_ast_build_get_domain(data->build);
		domain = isl_set_from_basic_set(isl_set_simple_hull(domain));
		executed = isl_union_map_intersect_params(executed, domain);
	}

	build = isl_ast_build_copy(data->build);
	build = isl_ast_build_product(build, space);

	list = generate_next_level(executed, build);

	list = isl_ast_graft_list_unembed(list, embed);

	data->list = isl_ast_graft_list_concat(data->list, list);

	return isl_stat_ok;
}

/* Generate an AST that visits the elements in the range of data->executed
 * in the relative order specified by the corresponding domain element(s)
 * for those domain elements that belong to "set".
 * Add the result to data->list.
 *
 * The caller ensures that "set" is a universe domain.
 *
 * If the build space S is not parametric, then the space of "set"
 * need to be a wrapped relation with S as domain.  That is, it needs
 * to be of the form
 *
 *	[S -> T]
 *
 * Check this property and pass control to generate_code_in_space
 * passing along T.
 * If the build space is not parametric, then T is the space of "set".
 */
static isl_stat generate_code_set(__isl_take isl_set *set, void *user)
{
	struct isl_generate_code_data *data = user;
	isl_space *space, *build_space;
	int is_domain;

	space = isl_set_get_space(set);

	if (isl_set_is_params(data->build->domain))
		return generate_code_in_space(data, set, space);

	build_space = isl_ast_build_get_space(data->build, data->internal);
	space = isl_space_unwrap(space);
	is_domain = isl_space_is_domain(build_space, space);
	isl_space_free(build_space);
	space = isl_space_range(space);

	if (is_domain < 0)
		goto error;
	if (!is_domain)
		isl_die(isl_set_get_ctx(set), isl_error_invalid,
			"invalid nested schedule space", goto error);

	return generate_code_in_space(data, set, space);
error:
	isl_set_free(set);
	isl_space_free(space);
	return isl_stat_error;
}

/* Generate an AST that visits the elements in the range of "executed"
 * in the relative order specified by the corresponding domain element(s).
 *
 * "build" is an isl_ast_build that has either been constructed by
 * isl_ast_build_from_context or passed to a callback set by
 * isl_ast_build_set_create_leaf.
 * In the first case, the space of the isl_ast_build is typically
 * a parametric space, although this is currently not enforced.
 * In the second case, the space is never a parametric space.
 * If the space S is not parametric, then the domain space(s) of "executed"
 * need to be wrapped relations with S as domain.
 *
 * If the domain of "executed" consists of several spaces, then an AST
 * is generated for each of them (in arbitrary order) and the results
 * are concatenated.
 *
 * If "internal" is set, then the domain "S" above refers to the internal
 * schedule domain representation.  Otherwise, it refers to the external
 * representation, as returned by isl_ast_build_get_schedule_space.
 *
 * We essentially run over all the spaces in the domain of "executed"
 * and call generate_code_set on each of them.
 */
static __isl_give isl_ast_graft_list *generate_code(
	__isl_take isl_union_map *executed, __isl_take isl_ast_build *build,
	int internal)
{
	isl_ctx *ctx;
	struct isl_generate_code_data data = { 0 };
	isl_space *space;
	isl_union_set *schedule_domain;
	isl_union_map *universe;

	if (!build)
		goto error;
	space = isl_ast_build_get_space(build, 1);
	space = isl_space_align_params(space,
				    isl_union_map_get_space(executed));
	space = isl_space_align_params(space,
				    isl_union_map_get_space(build->options));
	build = isl_ast_build_align_params(build, isl_space_copy(space));
	executed = isl_union_map_align_params(executed, space);
	if (!executed || !build)
		goto error;

	ctx = isl_ast_build_get_ctx(build);

	data.internal = internal;
	data.executed = executed;
	data.build = build;
	data.list = isl_ast_graft_list_alloc(ctx, 0);

	universe = isl_union_map_universe(isl_union_map_copy(executed));
	schedule_domain = isl_union_map_domain(universe);
	if (isl_union_set_foreach_set(schedule_domain, &generate_code_set,
					&data) < 0)
		data.list = isl_ast_graft_list_free(data.list);

	isl_union_set_free(schedule_domain);
	isl_union_map_free(executed);

	isl_ast_build_free(build);
	return data.list;
error:
	isl_union_map_free(executed);
	isl_ast_build_free(build);
	return NULL;
}

/* Generate an AST that visits the elements in the domain of "schedule"
 * in the relative order specified by the corresponding image element(s).
 *
 * "build" is an isl_ast_build that has either been constructed by
 * isl_ast_build_from_context or passed to a callback set by
 * isl_ast_build_set_create_leaf.
 * In the first case, the space of the isl_ast_build is typically
 * a parametric space, although this is currently not enforced.
 * In the second case, the space is never a parametric space.
 * If the space S is not parametric, then the range space(s) of "schedule"
 * need to be wrapped relations with S as domain.
 *
 * If the range of "schedule" consists of several spaces, then an AST
 * is generated for each of them (in arbitrary order) and the results
 * are concatenated.
 *
 * We first initialize the local copies of the relevant options.
 * We do this here rather than when the isl_ast_build is created
 * because the options may have changed between the construction
 * of the isl_ast_build and the call to isl_generate_code.
 *
 * The main computation is performed on an inverse schedule (with
 * the schedule domain in the domain and the elements to be executed
 * in the range) called "executed".
 */
__isl_give isl_ast_node *isl_ast_build_node_from_schedule_map(
	__isl_keep isl_ast_build *build, __isl_take isl_union_map *schedule)
{
	isl_ast_graft_list *list;
	isl_ast_node *node;
	isl_union_map *executed;

	build = isl_ast_build_copy(build);
	build = isl_ast_build_set_single_valued(build, 0);
	schedule = isl_union_map_coalesce(schedule);
	schedule = isl_union_map_remove_redundancies(schedule);
	executed = isl_union_map_reverse(schedule);
	list = generate_code(executed, isl_ast_build_copy(build), 0);
	node = isl_ast_node_from_graft_list(list, build);
	isl_ast_build_free(build);

	return node;
}

/* The old name for isl_ast_build_node_from_schedule_map.
 * It is being kept for backward compatibility, but
 * it will be removed in the future.
 */
__isl_give isl_ast_node *isl_ast_build_ast_from_schedule(
	__isl_keep isl_ast_build *build, __isl_take isl_union_map *schedule)
{
	return isl_ast_build_node_from_schedule_map(build, schedule);
}

/* Generate an AST that visits the elements in the domain of "executed"
 * in the relative order specified by the leaf node "node".
 *
 * The relation "executed" maps the outer generated loop iterators
 * to the domain elements executed by those iterations.
 *
 * Simply pass control to generate_inner_level.
 * Note that the current build does not refer to any band node, so
 * that generate_inner_level will not try to visit the child of
 * the leaf node.
 *
 * If multiple statement instances reach a leaf,
 * then they can be executed in any order.
 * Group the list of grafts based on shared guards
 * such that identical guards are only generated once
 * when the list is eventually passed on to isl_ast_graft_list_fuse.
 */
static __isl_give isl_ast_graft_list *build_ast_from_leaf(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node,
	__isl_take isl_union_map *executed)
{
	isl_ast_graft_list *list;

	isl_schedule_node_free(node);
	list = generate_inner_level(executed, isl_ast_build_copy(build));
	list = isl_ast_graft_list_group_on_guard(list, build);
	isl_ast_build_free(build);

	return list;
}

/* Generate an AST that visits the elements in the domain of "executed"
 * in the relative order specified by the band node "node" and its descendants.
 *
 * The relation "executed" maps the outer generated loop iterators
 * to the domain elements executed by those iterations.
 *
 * If the band is empty, we continue with its descendants.
 * Otherwise, we extend the build and the inverse schedule with
 * the additional space/partial schedule and continue generating
 * an AST in generate_next_level.
 * As soon as we have extended the inverse schedule with the additional
 * partial schedule, we look for equalities that may exists between
 * the old and the new part.
 */
static __isl_give isl_ast_graft_list *build_ast_from_band(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node,
	__isl_take isl_union_map *executed)
{
	isl_space *space;
	isl_multi_union_pw_aff *extra;
	isl_union_map *extra_umap;
	isl_ast_graft_list *list;
	isl_size n1, n2;
	isl_size n;

	n = isl_schedule_node_band_n_member(node);
	if (!build || n < 0 || !executed)
		goto error;

	if (n == 0)
		return build_ast_from_child(build, node, executed);

	extra = isl_schedule_node_band_get_partial_schedule(node);
	extra = isl_multi_union_pw_aff_align_params(extra,
				isl_ast_build_get_space(build, 1));
	space = isl_multi_union_pw_aff_get_space(extra);

	extra_umap = isl_union_map_from_multi_union_pw_aff(extra);
	extra_umap = isl_union_map_reverse(extra_umap);

	executed = isl_union_map_domain_product(executed, extra_umap);
	executed = isl_union_map_detect_equalities(executed);

	n1 = isl_ast_build_dim(build, isl_dim_param);
	build = isl_ast_build_product(build, space);
	n2 = isl_ast_build_dim(build, isl_dim_param);
	if (n1 < 0 || n2 < 0)
		build = isl_ast_build_free(build);
	else if (n2 > n1)
		isl_die(isl_ast_build_get_ctx(build), isl_error_invalid,
			"band node is not allowed to introduce new parameters",
			build = isl_ast_build_free(build));
	build = isl_ast_build_set_schedule_node(build, node);

	list = generate_next_level(executed, build);

	list = isl_ast_graft_list_unembed(list, 1);

	return list;
error:
	isl_schedule_node_free(node);
	isl_union_map_free(executed);
	isl_ast_build_free(build);
	return NULL;
}

/* Hoist a list of grafts (in practice containing a single graft)
 * from "sub_build" (which includes extra context information)
 * to "build".
 *
 * In particular, project out all additional parameters introduced
 * by the context node from the enforced constraints and the guard
 * of the single graft.
 */
static __isl_give isl_ast_graft_list *hoist_out_of_context(
	__isl_take isl_ast_graft_list *list, __isl_keep isl_ast_build *build,
	__isl_keep isl_ast_build *sub_build)
{
	isl_ast_graft *graft;
	isl_basic_set *enforced;
	isl_set *guard;
	isl_size n_param, extra_param;

	n_param = isl_ast_build_dim(build, isl_dim_param);
	extra_param = isl_ast_build_dim(sub_build, isl_dim_param);
	if (n_param < 0 || extra_param < 0)
		return isl_ast_graft_list_free(list);

	if (extra_param == n_param)
		return list;

	extra_param -= n_param;
	enforced = isl_ast_graft_list_extract_shared_enforced(list, sub_build);
	enforced = isl_basic_set_project_out(enforced, isl_dim_param,
							n_param, extra_param);
	enforced = isl_basic_set_remove_unknown_divs(enforced);
	guard = isl_ast_graft_list_extract_hoistable_guard(list, sub_build);
	guard = isl_set_remove_divs_involving_dims(guard, isl_dim_param,
							n_param, extra_param);
	guard = isl_set_project_out(guard, isl_dim_param, n_param, extra_param);
	guard = isl_set_compute_divs(guard);
	graft = isl_ast_graft_alloc_from_children(list, guard, enforced,
							build, sub_build);
	list = isl_ast_graft_list_from_ast_graft(graft);

	return list;
}

/* Generate an AST that visits the elements in the domain of "executed"
 * in the relative order specified by the context node "node"
 * and its descendants.
 *
 * The relation "executed" maps the outer generated loop iterators
 * to the domain elements executed by those iterations.
 *
 * The context node may introduce additional parameters as well as
 * constraints on the outer schedule dimensions or original parameters.
 *
 * We add the extra parameters to a new build and the context
 * constraints to both the build and (as a single disjunct)
 * to the domain of "executed".  Since the context constraints
 * are specified in terms of the input schedule, we first need
 * to map them to the internal schedule domain.
 *
 * After constructing the AST from the descendants of "node",
 * we combine the list of grafts into a single graft within
 * the new build, in order to be able to exploit the additional
 * context constraints during this combination.
 *
 * Additionally, if the current node is the outermost node in
 * the schedule tree (apart from the root domain node), we generate
 * all pending guards, again to be able to exploit the additional
 * context constraints.  We currently do not do this for internal
 * context nodes since we may still want to hoist conditions
 * to outer AST nodes.
 *
 * If the context node introduced any new parameters, then they
 * are removed from the set of enforced constraints and guard
 * in hoist_out_of_context.
 */
static __isl_give isl_ast_graft_list *build_ast_from_context(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node,
	__isl_take isl_union_map *executed)
{
	isl_set *context;
	isl_space *space;
	isl_multi_aff *internal2input;
	isl_ast_build *sub_build;
	isl_ast_graft_list *list;
	isl_size n;
	isl_size depth;

	depth = isl_schedule_node_get_tree_depth(node);
	if (depth < 0)
		build = isl_ast_build_free(build);
	space = isl_ast_build_get_space(build, 1);
	context = isl_schedule_node_context_get_context(node);
	context = isl_set_align_params(context, space);
	sub_build = isl_ast_build_copy(build);
	space = isl_set_get_space(context);
	sub_build = isl_ast_build_align_params(sub_build, space);
	internal2input = isl_ast_build_get_internal2input(sub_build);
	context = isl_set_preimage_multi_aff(context, internal2input);
	sub_build = isl_ast_build_restrict_generated(sub_build,
					isl_set_copy(context));
	context = isl_set_from_basic_set(isl_set_simple_hull(context));
	executed = isl_union_map_intersect_domain(executed,
					isl_union_set_from_set(context));

	list = build_ast_from_child(isl_ast_build_copy(sub_build),
						node, executed);
	n = isl_ast_graft_list_n_ast_graft(list);
	if (n < 0)
		list = isl_ast_graft_list_free(list);

	list = isl_ast_graft_list_fuse(list, sub_build);
	if (depth == 1)
		list = isl_ast_graft_list_insert_pending_guard_nodes(list,
								sub_build);
	if (n >= 1)
		list = hoist_out_of_context(list, build, sub_build);

	isl_ast_build_free(build);
	isl_ast_build_free(sub_build);

	return list;
}

/* Generate an AST that visits the elements in the domain of "executed"
 * in the relative order specified by the expansion node "node" and
 * its descendants.
 *
 * The relation "executed" maps the outer generated loop iterators
 * to the domain elements executed by those iterations.
 *
 * We expand the domain elements by the expansion and
 * continue with the descendants of the node.
 */
static __isl_give isl_ast_graft_list *build_ast_from_expansion(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node,
	__isl_take isl_union_map *executed)
{
	isl_union_map *expansion;
	isl_size n1, n2;

	expansion = isl_schedule_node_expansion_get_expansion(node);
	expansion = isl_union_map_align_params(expansion,
				isl_union_map_get_space(executed));

	n1 = isl_union_map_dim(executed, isl_dim_param);
	executed = isl_union_map_apply_range(executed, expansion);
	n2 = isl_union_map_dim(executed, isl_dim_param);
	if (n1 < 0 || n2 < 0)
		goto error;
	if (n2 > n1)
		isl_die(isl_ast_build_get_ctx(build), isl_error_invalid,
			"expansion node is not allowed to introduce "
			"new parameters", goto error);

	return build_ast_from_child(build, node, executed);
error:
	isl_ast_build_free(build);
	isl_schedule_node_free(node);
	isl_union_map_free(executed);
	return NULL;
}

/* Generate an AST that visits the elements in the domain of "executed"
 * in the relative order specified by the extension node "node" and
 * its descendants.
 *
 * The relation "executed" maps the outer generated loop iterators
 * to the domain elements executed by those iterations.
 *
 * Extend the inverse schedule with the extension applied to current
 * set of generated constraints.  Since the extension if formulated
 * in terms of the input schedule, it first needs to be transformed
 * to refer to the internal schedule.
 */
static __isl_give isl_ast_graft_list *build_ast_from_extension(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node,
	__isl_take isl_union_map *executed)
{
	isl_union_set *schedule_domain;
	isl_union_map *extension;
	isl_set *set;

	set = isl_ast_build_get_generated(build);
	set = isl_set_from_basic_set(isl_set_simple_hull(set));
	schedule_domain = isl_union_set_from_set(set);

	extension = isl_schedule_node_extension_get_extension(node);

	extension = isl_union_map_preimage_domain_multi_aff(extension,
			isl_multi_aff_copy(build->internal2input));
	extension = isl_union_map_intersect_domain(extension, schedule_domain);
	extension = isl_ast_build_substitute_values_union_map_domain(build,
								    extension);
	executed = isl_union_map_union(executed, extension);

	return build_ast_from_child(build, node, executed);
}

/* Generate an AST that visits the elements in the domain of "executed"
 * in the relative order specified by the filter node "node" and
 * its descendants.
 *
 * The relation "executed" maps the outer generated loop iterators
 * to the domain elements executed by those iterations.
 *
 * We simply intersect the iteration domain (i.e., the range of "executed")
 * with the filter and continue with the descendants of the node,
 * unless the resulting inverse schedule is empty, in which
 * case we return an empty list.
 *
 * If the result of the intersection is equal to the original "executed"
 * relation, then keep the original representation since the intersection
 * may have unnecessarily broken up the relation into a greater number
 * of disjuncts.
 */
static __isl_give isl_ast_graft_list *build_ast_from_filter(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node,
	__isl_take isl_union_map *executed)
{
	isl_ctx *ctx;
	isl_union_set *filter;
	isl_union_map *orig;
	isl_ast_graft_list *list;
	int empty;
	isl_bool unchanged;
	isl_size n1, n2;

	orig = isl_union_map_copy(executed);
	if (!build || !node || !executed)
		goto error;

	filter = isl_schedule_node_filter_get_filter(node);
	filter = isl_union_set_align_params(filter,
				isl_union_map_get_space(executed));
	n1 = isl_union_map_dim(executed, isl_dim_param);
	executed = isl_union_map_intersect_range(executed, filter);
	n2 = isl_union_map_dim(executed, isl_dim_param);
	if (n1 < 0 || n2 < 0)
		goto error;
	if (n2 > n1)
		isl_die(isl_ast_build_get_ctx(build), isl_error_invalid,
			"filter node is not allowed to introduce "
			"new parameters", goto error);

	unchanged = isl_union_map_is_subset(orig, executed);
	empty = isl_union_map_is_empty(executed);
	if (unchanged < 0 || empty < 0)
		goto error;
	if (unchanged) {
		isl_union_map_free(executed);
		return build_ast_from_child(build, node, orig);
	}
	isl_union_map_free(orig);
	if (!empty)
		return build_ast_from_child(build, node, executed);

	ctx = isl_ast_build_get_ctx(build);
	list = isl_ast_graft_list_alloc(ctx, 0);
	isl_ast_build_free(build);
	isl_schedule_node_free(node);
	isl_union_map_free(executed);
	return list;
error:
	isl_ast_build_free(build);
	isl_schedule_node_free(node);
	isl_union_map_free(executed);
	isl_union_map_free(orig);
	return NULL;
}

/* Generate an AST that visits the elements in the domain of "executed"
 * in the relative order specified by the guard node "node" and
 * its descendants.
 *
 * The relation "executed" maps the outer generated loop iterators
 * to the domain elements executed by those iterations.
 *
 * Ensure that the associated guard is enforced by the outer AST
 * constructs by adding it to the guard of the graft.
 * Since we know that we will enforce the guard, we can also include it
 * in the generated constraints used to construct an AST for
 * the descendant nodes.
 */
static __isl_give isl_ast_graft_list *build_ast_from_guard(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node,
	__isl_take isl_union_map *executed)
{
	isl_space *space;
	isl_set *guard, *hoisted;
	isl_basic_set *enforced;
	isl_ast_build *sub_build;
	isl_ast_graft *graft;
	isl_ast_graft_list *list;
	isl_size n1, n2, n;

	space = isl_ast_build_get_space(build, 1);
	guard = isl_schedule_node_guard_get_guard(node);
	n1 = isl_space_dim(space, isl_dim_param);
	guard = isl_set_align_params(guard, space);
	n2 = isl_set_dim(guard, isl_dim_param);
	if (n1 < 0 || n2 < 0)
		guard = isl_set_free(guard);
	else if (n2 > n1)
		isl_die(isl_ast_build_get_ctx(build), isl_error_invalid,
			"guard node is not allowed to introduce "
			"new parameters", guard = isl_set_free(guard));
	guard = isl_set_preimage_multi_aff(guard,
			isl_multi_aff_copy(build->internal2input));
	guard = isl_ast_build_specialize(build, guard);
	guard = isl_set_gist(guard, isl_set_copy(build->generated));

	sub_build = isl_ast_build_copy(build);
	sub_build = isl_ast_build_restrict_generated(sub_build,
							isl_set_copy(guard));

	list = build_ast_from_child(isl_ast_build_copy(sub_build),
							node, executed);

	hoisted = isl_ast_graft_list_extract_hoistable_guard(list, sub_build);
	n = isl_set_n_basic_set(hoisted);
	if (n < 0)
		list = isl_ast_graft_list_free(list);
	if (n > 1)
		list = isl_ast_graft_list_gist_guards(list,
						    isl_set_copy(hoisted));
	guard = isl_set_intersect(guard, hoisted);
	enforced = extract_shared_enforced(list, build);
	graft = isl_ast_graft_alloc_from_children(list, guard, enforced,
						    build, sub_build);

	isl_ast_build_free(sub_build);
	isl_ast_build_free(build);
	return isl_ast_graft_list_from_ast_graft(graft);
}

/* Call the before_each_mark callback, if requested by the user.
 *
 * Return 0 on success and -1 on error.
 *
 * The caller is responsible for recording the current inverse schedule
 * in "build".
 */
static isl_stat before_each_mark(__isl_keep isl_id *mark,
	__isl_keep isl_ast_build *build)
{
	if (!build)
		return isl_stat_error;
	if (!build->before_each_mark)
		return isl_stat_ok;
	return build->before_each_mark(mark, build,
					build->before_each_mark_user);
}

/* Call the after_each_mark callback, if requested by the user.
 *
 * The caller is responsible for recording the current inverse schedule
 * in "build".
 */
static __isl_give isl_ast_graft *after_each_mark(
	__isl_take isl_ast_graft *graft, __isl_keep isl_ast_build *build)
{
	if (!graft || !build)
		return isl_ast_graft_free(graft);
	if (!build->after_each_mark)
		return graft;
	graft->node = build->after_each_mark(graft->node, build,
						build->after_each_mark_user);
	if (!graft->node)
		return isl_ast_graft_free(graft);
	return graft;
}


/* Generate an AST that visits the elements in the domain of "executed"
 * in the relative order specified by the mark node "node" and
 * its descendants.
 *
 * The relation "executed" maps the outer generated loop iterators
 * to the domain elements executed by those iterations.

 * Since we may be calling before_each_mark and after_each_mark
 * callbacks, we record the current inverse schedule in the build.
 *
 * We generate an AST for the child of the mark node, combine
 * the graft list into a single graft and then insert the mark
 * in the AST of that single graft.
 */
static __isl_give isl_ast_graft_list *build_ast_from_mark(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node,
	__isl_take isl_union_map *executed)
{
	isl_id *mark;
	isl_ast_graft *graft;
	isl_ast_graft_list *list;
	isl_size n;

	build = isl_ast_build_set_executed(build, isl_union_map_copy(executed));

	mark = isl_schedule_node_mark_get_id(node);
	if (before_each_mark(mark, build) < 0)
		node = isl_schedule_node_free(node);

	list = build_ast_from_child(isl_ast_build_copy(build), node, executed);
	list = isl_ast_graft_list_fuse(list, build);
	n = isl_ast_graft_list_n_ast_graft(list);
	if (n < 0)
		list = isl_ast_graft_list_free(list);
	if (n == 0) {
		isl_id_free(mark);
	} else {
		graft = isl_ast_graft_list_get_ast_graft(list, 0);
		graft = isl_ast_graft_insert_mark(graft, mark);
		graft = after_each_mark(graft, build);
		list = isl_ast_graft_list_set_ast_graft(list, 0, graft);
	}
	isl_ast_build_free(build);

	return list;
}

static __isl_give isl_ast_graft_list *build_ast_from_schedule_node(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node,
	__isl_take isl_union_map *executed);

/* Generate an AST that visits the elements in the domain of "executed"
 * in the relative order specified by the sequence (or set) node "node" and
 * its descendants.
 *
 * The relation "executed" maps the outer generated loop iterators
 * to the domain elements executed by those iterations.
 *
 * We simply generate an AST for each of the children and concatenate
 * the results.
 */
static __isl_give isl_ast_graft_list *build_ast_from_sequence(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node,
	__isl_take isl_union_map *executed)
{
	int i;
	isl_size n;
	isl_ctx *ctx;
	isl_ast_graft_list *list;

	ctx = isl_ast_build_get_ctx(build);
	list = isl_ast_graft_list_alloc(ctx, 0);

	n = isl_schedule_node_n_children(node);
	if (n < 0)
		list = isl_ast_graft_list_free(list);
	for (i = 0; i < n; ++i) {
		isl_schedule_node *child;
		isl_ast_graft_list *list_i;

		child = isl_schedule_node_get_child(node, i);
		list_i = build_ast_from_schedule_node(isl_ast_build_copy(build),
					child, isl_union_map_copy(executed));
		list = isl_ast_graft_list_concat(list, list_i);
	}
	isl_ast_build_free(build);
	isl_schedule_node_free(node);
	isl_union_map_free(executed);

	return list;
}

/* Generate an AST that visits the elements in the domain of "executed"
 * in the relative order specified by the node "node" and its descendants.
 *
 * The relation "executed" maps the outer generated loop iterators
 * to the domain elements executed by those iterations.
 *
 * The node types are handled in separate functions.
 * Set nodes are currently treated in the same way as sequence nodes.
 * The children of a set node may be executed in any order,
 * including the order of the children.
 */
static __isl_give isl_ast_graft_list *build_ast_from_schedule_node(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node,
	__isl_take isl_union_map *executed)
{
	enum isl_schedule_node_type type;

	type = isl_schedule_node_get_type(node);

	switch (type) {
	case isl_schedule_node_error:
		goto error;
	case isl_schedule_node_leaf:
		return build_ast_from_leaf(build, node, executed);
	case isl_schedule_node_band:
		return build_ast_from_band(build, node, executed);
	case isl_schedule_node_context:
		return build_ast_from_context(build, node, executed);
	case isl_schedule_node_domain:
		isl_die(isl_schedule_node_get_ctx(node), isl_error_unsupported,
			"unexpected internal domain node", goto error);
	case isl_schedule_node_expansion:
		return build_ast_from_expansion(build, node, executed);
	case isl_schedule_node_extension:
		return build_ast_from_extension(build, node, executed);
	case isl_schedule_node_filter:
		return build_ast_from_filter(build, node, executed);
	case isl_schedule_node_guard:
		return build_ast_from_guard(build, node, executed);
	case isl_schedule_node_mark:
		return build_ast_from_mark(build, node, executed);
	case isl_schedule_node_sequence:
	case isl_schedule_node_set:
		return build_ast_from_sequence(build, node, executed);
	}

	isl_die(isl_ast_build_get_ctx(build), isl_error_internal,
		"unhandled type", goto error);
error:
	isl_union_map_free(executed);
	isl_schedule_node_free(node);
	isl_ast_build_free(build);

	return NULL;
}

/* Generate an AST that visits the elements in the domain of "executed"
 * in the relative order specified by the (single) child of "node" and
 * its descendants.
 *
 * The relation "executed" maps the outer generated loop iterators
 * to the domain elements executed by those iterations.
 *
 * This function is never called on a leaf, set or sequence node,
 * so the node always has exactly one child.
 */
static __isl_give isl_ast_graft_list *build_ast_from_child(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node,
	__isl_take isl_union_map *executed)
{
	node = isl_schedule_node_child(node, 0);
	return build_ast_from_schedule_node(build, node, executed);
}

/* Generate an AST that visits the elements in the domain of the domain
 * node "node" in the relative order specified by its descendants.
 *
 * An initial inverse schedule is created that maps a zero-dimensional
 * schedule space to the node domain.
 * The input "build" is assumed to have a parametric domain and
 * is replaced by the same zero-dimensional schedule space.
 *
 * We also add some of the parameter constraints in the build domain
 * to the executed relation.  Adding these constraints
 * allows for an earlier detection of conflicts in some cases.
 * However, we do not want to divide the executed relation into
 * more disjuncts than necessary.  We therefore approximate
 * the constraints on the parameters by a single disjunct set.
 */
static __isl_give isl_ast_node *build_ast_from_domain(
	__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node)
{
	isl_ctx *ctx;
	isl_union_set *domain, *schedule_domain;
	isl_union_map *executed;
	isl_space *space;
	isl_set *set;
	isl_ast_graft_list *list;
	isl_ast_node *ast;
	int is_params;

	if (!build)
		goto error;

	ctx = isl_ast_build_get_ctx(build);
	space = isl_ast_build_get_space(build, 1);
	is_params = isl_space_is_params(space);
	isl_space_free(space);
	if (is_params < 0)
		goto error;
	if (!is_params)
		isl_die(ctx, isl_error_unsupported,
			"expecting parametric initial context", goto error);

	domain = isl_schedule_node_domain_get_domain(node);
	domain = isl_union_set_coalesce(domain);

	space = isl_union_set_get_space(domain);
	space = isl_space_set_from_params(space);
	build = isl_ast_build_product(build, space);

	set = isl_ast_build_get_domain(build);
	set = isl_set_from_basic_set(isl_set_simple_hull(set));
	schedule_domain = isl_union_set_from_set(set);

	executed = isl_union_map_from_domain_and_range(schedule_domain, domain);
	list = build_ast_from_child(isl_ast_build_copy(build), node, executed);
	ast = isl_ast_node_from_graft_list(list, build);
	isl_ast_build_free(build);

	return ast;
error:
	isl_schedule_node_free(node);
	isl_ast_build_free(build);
	return NULL;
}

/* Generate an AST that visits the elements in the domain of "schedule"
 * in the relative order specified by the schedule tree.
 *
 * "build" is an isl_ast_build that has been created using
 * isl_ast_build_alloc or isl_ast_build_from_context based
 * on a parametric set.
 *
 * The construction starts at the root node of the schedule,
 * which is assumed to be a domain node.
 */
__isl_give isl_ast_node *isl_ast_build_node_from_schedule(
	__isl_keep isl_ast_build *build, __isl_take isl_schedule *schedule)
{
	isl_ctx *ctx;
	isl_schedule_node *node;

	if (!build || !schedule)
		goto error;

	ctx = isl_ast_build_get_ctx(build);

	node = isl_schedule_get_root(schedule);
	if (!node)
		goto error;
	isl_schedule_free(schedule);

	build = isl_ast_build_copy(build);
	build = isl_ast_build_set_single_valued(build, 0);
	if (isl_schedule_node_get_type(node) != isl_schedule_node_domain)
		isl_die(ctx, isl_error_unsupported,
			"expecting root domain node",
			build = isl_ast_build_free(build));
	return build_ast_from_domain(build, node);
error:
	isl_schedule_free(schedule);
	return NULL;
}