SampleProfile.cpp 79.2 KB
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
//===- SampleProfile.cpp - Incorporate sample profiles into the IR --------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the SampleProfileLoader transformation. This pass
// reads a profile file generated by a sampling profiler (e.g. Linux Perf -
// http://perf.wiki.kernel.org/) and generates IR metadata to reflect the
// profile information in the given profile.
//
// This pass generates branch weight annotations on the IR:
//
// - prof: Represents branch weights. This annotation is added to branches
//      to indicate the weights of each edge coming out of the branch.
//      The weight of each edge is the weight of the target block for
//      that edge. The weight of a block B is computed as the maximum
//      number of samples found in B.
//
//===----------------------------------------------------------------------===//

#include "llvm/Transforms/IPO/SampleProfile.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CallGraphSCCPass.h"
#include "llvm/Analysis/InlineAdvisor.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/ReplayInlineAdvisor.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/ProfileData/SampleProf.h"
#include "llvm/ProfileData/SampleProfReader.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/GenericDomTree.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/CallPromotionUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/MisExpect.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <functional>
#include <limits>
#include <map>
#include <memory>
#include <queue>
#include <string>
#include <system_error>
#include <utility>
#include <vector>

using namespace llvm;
using namespace sampleprof;
using ProfileCount = Function::ProfileCount;
#define DEBUG_TYPE "sample-profile"
#define CSINLINE_DEBUG DEBUG_TYPE "-inline"

STATISTIC(NumCSInlined,
          "Number of functions inlined with context sensitive profile");
STATISTIC(NumCSNotInlined,
          "Number of functions not inlined with context sensitive profile");

// Command line option to specify the file to read samples from. This is
// mainly used for debugging.
static cl::opt<std::string> SampleProfileFile(
    "sample-profile-file", cl::init(""), cl::value_desc("filename"),
    cl::desc("Profile file loaded by -sample-profile"), cl::Hidden);

// The named file contains a set of transformations that may have been applied
// to the symbol names between the program from which the sample data was
// collected and the current program's symbols.
static cl::opt<std::string> SampleProfileRemappingFile(
    "sample-profile-remapping-file", cl::init(""), cl::value_desc("filename"),
    cl::desc("Profile remapping file loaded by -sample-profile"), cl::Hidden);

static cl::opt<unsigned> SampleProfileMaxPropagateIterations(
    "sample-profile-max-propagate-iterations", cl::init(100),
    cl::desc("Maximum number of iterations to go through when propagating "
             "sample block/edge weights through the CFG."));

static cl::opt<unsigned> SampleProfileRecordCoverage(
    "sample-profile-check-record-coverage", cl::init(0), cl::value_desc("N"),
    cl::desc("Emit a warning if less than N% of records in the input profile "
             "are matched to the IR."));

static cl::opt<unsigned> SampleProfileSampleCoverage(
    "sample-profile-check-sample-coverage", cl::init(0), cl::value_desc("N"),
    cl::desc("Emit a warning if less than N% of samples in the input profile "
             "are matched to the IR."));

static cl::opt<bool> NoWarnSampleUnused(
    "no-warn-sample-unused", cl::init(false), cl::Hidden,
    cl::desc("Use this option to turn off/on warnings about function with "
             "samples but without debug information to use those samples. "));

static cl::opt<bool> ProfileSampleAccurate(
    "profile-sample-accurate", cl::Hidden, cl::init(false),
    cl::desc("If the sample profile is accurate, we will mark all un-sampled "
             "callsite and function as having 0 samples. Otherwise, treat "
             "un-sampled callsites and functions conservatively as unknown. "));

static cl::opt<bool> ProfileAccurateForSymsInList(
    "profile-accurate-for-symsinlist", cl::Hidden, cl::ZeroOrMore,
    cl::init(true),
    cl::desc("For symbols in profile symbol list, regard their profiles to "
             "be accurate. It may be overriden by profile-sample-accurate. "));

static cl::opt<bool> ProfileMergeInlinee(
    "sample-profile-merge-inlinee", cl::Hidden, cl::init(true),
    cl::desc("Merge past inlinee's profile to outline version if sample "
             "profile loader decided not to inline a call site. It will "
             "only be enabled when top-down order of profile loading is "
             "enabled. "));

static cl::opt<bool> ProfileTopDownLoad(
    "sample-profile-top-down-load", cl::Hidden, cl::init(true),
    cl::desc("Do profile annotation and inlining for functions in top-down "
             "order of call graph during sample profile loading. It only "
             "works for new pass manager. "));

static cl::opt<bool> ProfileSizeInline(
    "sample-profile-inline-size", cl::Hidden, cl::init(false),
    cl::desc("Inline cold call sites in profile loader if it's beneficial "
             "for code size."));

static cl::opt<int> SampleColdCallSiteThreshold(
    "sample-profile-cold-inline-threshold", cl::Hidden, cl::init(45),
    cl::desc("Threshold for inlining cold callsites"));

static cl::opt<std::string> ProfileInlineReplayFile(
    "sample-profile-inline-replay", cl::init(""), cl::value_desc("filename"),
    cl::desc(
        "Optimization remarks file containing inline remarks to be replayed "
        "by inlining from sample profile loader."),
    cl::Hidden);

namespace {

using BlockWeightMap = DenseMap<const BasicBlock *, uint64_t>;
using EquivalenceClassMap = DenseMap<const BasicBlock *, const BasicBlock *>;
using Edge = std::pair<const BasicBlock *, const BasicBlock *>;
using EdgeWeightMap = DenseMap<Edge, uint64_t>;
using BlockEdgeMap =
    DenseMap<const BasicBlock *, SmallVector<const BasicBlock *, 8>>;

class SampleProfileLoader;

class SampleCoverageTracker {
public:
  SampleCoverageTracker(SampleProfileLoader &SPL) : SPLoader(SPL){};

  bool markSamplesUsed(const FunctionSamples *FS, uint32_t LineOffset,
                       uint32_t Discriminator, uint64_t Samples);
  unsigned computeCoverage(unsigned Used, unsigned Total) const;
  unsigned countUsedRecords(const FunctionSamples *FS,
                            ProfileSummaryInfo *PSI) const;
  unsigned countBodyRecords(const FunctionSamples *FS,
                            ProfileSummaryInfo *PSI) const;
  uint64_t getTotalUsedSamples() const { return TotalUsedSamples; }
  uint64_t countBodySamples(const FunctionSamples *FS,
                            ProfileSummaryInfo *PSI) const;

  void clear() {
    SampleCoverage.clear();
    TotalUsedSamples = 0;
  }

private:
  using BodySampleCoverageMap = std::map<LineLocation, unsigned>;
  using FunctionSamplesCoverageMap =
      DenseMap<const FunctionSamples *, BodySampleCoverageMap>;

  /// Coverage map for sampling records.
  ///
  /// This map keeps a record of sampling records that have been matched to
  /// an IR instruction. This is used to detect some form of staleness in
  /// profiles (see flag -sample-profile-check-coverage).
  ///
  /// Each entry in the map corresponds to a FunctionSamples instance.  This is
  /// another map that counts how many times the sample record at the
  /// given location has been used.
  FunctionSamplesCoverageMap SampleCoverage;

  /// Number of samples used from the profile.
  ///
  /// When a sampling record is used for the first time, the samples from
  /// that record are added to this accumulator.  Coverage is later computed
  /// based on the total number of samples available in this function and
  /// its callsites.
  ///
  /// Note that this accumulator tracks samples used from a single function
  /// and all the inlined callsites. Strictly, we should have a map of counters
  /// keyed by FunctionSamples pointers, but these stats are cleared after
  /// every function, so we just need to keep a single counter.
  uint64_t TotalUsedSamples = 0;

  SampleProfileLoader &SPLoader;
};

class GUIDToFuncNameMapper {
public:
  GUIDToFuncNameMapper(Module &M, SampleProfileReader &Reader,
                        DenseMap<uint64_t, StringRef> &GUIDToFuncNameMap)
      : CurrentReader(Reader), CurrentModule(M),
      CurrentGUIDToFuncNameMap(GUIDToFuncNameMap) {
    if (!CurrentReader.useMD5())
      return;

    for (const auto &F : CurrentModule) {
      StringRef OrigName = F.getName();
      CurrentGUIDToFuncNameMap.insert(
          {Function::getGUID(OrigName), OrigName});

      // Local to global var promotion used by optimization like thinlto
      // will rename the var and add suffix like ".llvm.xxx" to the
      // original local name. In sample profile, the suffixes of function
      // names are all stripped. Since it is possible that the mapper is
      // built in post-thin-link phase and var promotion has been done,
      // we need to add the substring of function name without the suffix
      // into the GUIDToFuncNameMap.
      StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
      if (CanonName != OrigName)
        CurrentGUIDToFuncNameMap.insert(
            {Function::getGUID(CanonName), CanonName});
    }

    // Update GUIDToFuncNameMap for each function including inlinees.
    SetGUIDToFuncNameMapForAll(&CurrentGUIDToFuncNameMap);
  }

  ~GUIDToFuncNameMapper() {
    if (!CurrentReader.useMD5())
      return;

    CurrentGUIDToFuncNameMap.clear();

    // Reset GUIDToFuncNameMap for of each function as they're no
    // longer valid at this point.
    SetGUIDToFuncNameMapForAll(nullptr);
  }

private:
  void SetGUIDToFuncNameMapForAll(DenseMap<uint64_t, StringRef> *Map) {
    std::queue<FunctionSamples *> FSToUpdate;
    for (auto &IFS : CurrentReader.getProfiles()) {
      FSToUpdate.push(&IFS.second);
    }

    while (!FSToUpdate.empty()) {
      FunctionSamples *FS = FSToUpdate.front();
      FSToUpdate.pop();
      FS->GUIDToFuncNameMap = Map;
      for (const auto &ICS : FS->getCallsiteSamples()) {
        const FunctionSamplesMap &FSMap = ICS.second;
        for (auto &IFS : FSMap) {
          FunctionSamples &FS = const_cast<FunctionSamples &>(IFS.second);
          FSToUpdate.push(&FS);
        }
      }
    }
  }

  SampleProfileReader &CurrentReader;
  Module &CurrentModule;
  DenseMap<uint64_t, StringRef> &CurrentGUIDToFuncNameMap;
};

/// Sample profile pass.
///
/// This pass reads profile data from the file specified by
/// -sample-profile-file and annotates every affected function with the
/// profile information found in that file.
class SampleProfileLoader {
public:
  SampleProfileLoader(
      StringRef Name, StringRef RemapName, bool IsThinLTOPreLink,
      std::function<AssumptionCache &(Function &)> GetAssumptionCache,
      std::function<TargetTransformInfo &(Function &)> GetTargetTransformInfo,
      std::function<const TargetLibraryInfo &(Function &)> GetTLI)
      : GetAC(std::move(GetAssumptionCache)),
        GetTTI(std::move(GetTargetTransformInfo)), GetTLI(std::move(GetTLI)),
        CoverageTracker(*this), Filename(std::string(Name)),
        RemappingFilename(std::string(RemapName)),
        IsThinLTOPreLink(IsThinLTOPreLink) {}

  bool doInitialization(Module &M, FunctionAnalysisManager *FAM = nullptr);
  bool runOnModule(Module &M, ModuleAnalysisManager *AM,
                   ProfileSummaryInfo *_PSI, CallGraph *CG);

  void dump() { Reader->dump(); }

protected:
  friend class SampleCoverageTracker;

  bool runOnFunction(Function &F, ModuleAnalysisManager *AM);
  unsigned getFunctionLoc(Function &F);
  bool emitAnnotations(Function &F);
  ErrorOr<uint64_t> getInstWeight(const Instruction &I);
  ErrorOr<uint64_t> getBlockWeight(const BasicBlock *BB);
  const FunctionSamples *findCalleeFunctionSamples(const CallBase &I) const;
  std::vector<const FunctionSamples *>
  findIndirectCallFunctionSamples(const Instruction &I, uint64_t &Sum) const;
  mutable DenseMap<const DILocation *, const FunctionSamples *> DILocation2SampleMap;
  const FunctionSamples *findFunctionSamples(const Instruction &I) const;
  bool inlineCallInstruction(CallBase &CB);
  bool inlineHotFunctions(Function &F,
                          DenseSet<GlobalValue::GUID> &InlinedGUIDs);
  // Inline cold/small functions in addition to hot ones
  bool shouldInlineColdCallee(CallBase &CallInst);
  void emitOptimizationRemarksForInlineCandidates(
      const SmallVectorImpl<CallBase *> &Candidates, const Function &F,
      bool Hot);
  void printEdgeWeight(raw_ostream &OS, Edge E);
  void printBlockWeight(raw_ostream &OS, const BasicBlock *BB) const;
  void printBlockEquivalence(raw_ostream &OS, const BasicBlock *BB);
  bool computeBlockWeights(Function &F);
  void findEquivalenceClasses(Function &F);
  template <bool IsPostDom>
  void findEquivalencesFor(BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants,
                           DominatorTreeBase<BasicBlock, IsPostDom> *DomTree);

  void propagateWeights(Function &F);
  uint64_t visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge);
  void buildEdges(Function &F);
  std::vector<Function *> buildFunctionOrder(Module &M, CallGraph *CG);
  bool propagateThroughEdges(Function &F, bool UpdateBlockCount);
  void computeDominanceAndLoopInfo(Function &F);
  void clearFunctionData();
  bool callsiteIsHot(const FunctionSamples *CallsiteFS,
                     ProfileSummaryInfo *PSI);

  /// Map basic blocks to their computed weights.
  ///
  /// The weight of a basic block is defined to be the maximum
  /// of all the instruction weights in that block.
  BlockWeightMap BlockWeights;

  /// Map edges to their computed weights.
  ///
  /// Edge weights are computed by propagating basic block weights in
  /// SampleProfile::propagateWeights.
  EdgeWeightMap EdgeWeights;

  /// Set of visited blocks during propagation.
  SmallPtrSet<const BasicBlock *, 32> VisitedBlocks;

  /// Set of visited edges during propagation.
  SmallSet<Edge, 32> VisitedEdges;

  /// Equivalence classes for block weights.
  ///
  /// Two blocks BB1 and BB2 are in the same equivalence class if they
  /// dominate and post-dominate each other, and they are in the same loop
  /// nest. When this happens, the two blocks are guaranteed to execute
  /// the same number of times.
  EquivalenceClassMap EquivalenceClass;

  /// Map from function name to Function *. Used to find the function from
  /// the function name. If the function name contains suffix, additional
  /// entry is added to map from the stripped name to the function if there
  /// is one-to-one mapping.
  StringMap<Function *> SymbolMap;

  /// Dominance, post-dominance and loop information.
  std::unique_ptr<DominatorTree> DT;
  std::unique_ptr<PostDominatorTree> PDT;
  std::unique_ptr<LoopInfo> LI;

  std::function<AssumptionCache &(Function &)> GetAC;
  std::function<TargetTransformInfo &(Function &)> GetTTI;
  std::function<const TargetLibraryInfo &(Function &)> GetTLI;

  /// Predecessors for each basic block in the CFG.
  BlockEdgeMap Predecessors;

  /// Successors for each basic block in the CFG.
  BlockEdgeMap Successors;

  SampleCoverageTracker CoverageTracker;

  /// Profile reader object.
  std::unique_ptr<SampleProfileReader> Reader;

  /// Samples collected for the body of this function.
  FunctionSamples *Samples = nullptr;

  /// Name of the profile file to load.
  std::string Filename;

  /// Name of the profile remapping file to load.
  std::string RemappingFilename;

  /// Flag indicating whether the profile input loaded successfully.
  bool ProfileIsValid = false;

  /// Flag indicating if the pass is invoked in ThinLTO compile phase.
  ///
  /// In this phase, in annotation, we should not promote indirect calls.
  /// Instead, we will mark GUIDs that needs to be annotated to the function.
  bool IsThinLTOPreLink;

  /// Profile Summary Info computed from sample profile.
  ProfileSummaryInfo *PSI = nullptr;

  /// Profle Symbol list tells whether a function name appears in the binary
  /// used to generate the current profile.
  std::unique_ptr<ProfileSymbolList> PSL;

  /// Total number of samples collected in this profile.
  ///
  /// This is the sum of all the samples collected in all the functions executed
  /// at runtime.
  uint64_t TotalCollectedSamples = 0;

  /// Optimization Remark Emitter used to emit diagnostic remarks.
  OptimizationRemarkEmitter *ORE = nullptr;

  // Information recorded when we declined to inline a call site
  // because we have determined it is too cold is accumulated for
  // each callee function. Initially this is just the entry count.
  struct NotInlinedProfileInfo {
    uint64_t entryCount;
  };
  DenseMap<Function *, NotInlinedProfileInfo> notInlinedCallInfo;

  // GUIDToFuncNameMap saves the mapping from GUID to the symbol name, for
  // all the function symbols defined or declared in current module.
  DenseMap<uint64_t, StringRef> GUIDToFuncNameMap;

  // All the Names used in FunctionSamples including outline function
  // names, inline instance names and call target names.
  StringSet<> NamesInProfile;

  // For symbol in profile symbol list, whether to regard their profiles
  // to be accurate. It is mainly decided by existance of profile symbol
  // list and -profile-accurate-for-symsinlist flag, but it can be
  // overriden by -profile-sample-accurate or profile-sample-accurate
  // attribute.
  bool ProfAccForSymsInList;

  // External inline advisor used to replay inline decision from remarks.
  std::unique_ptr<ReplayInlineAdvisor> ExternalInlineAdvisor;
};

class SampleProfileLoaderLegacyPass : public ModulePass {
public:
  // Class identification, replacement for typeinfo
  static char ID;

  SampleProfileLoaderLegacyPass(StringRef Name = SampleProfileFile,
                                bool IsThinLTOPreLink = false)
      : ModulePass(ID), SampleLoader(
                            Name, SampleProfileRemappingFile, IsThinLTOPreLink,
                            [&](Function &F) -> AssumptionCache & {
                              return ACT->getAssumptionCache(F);
                            },
                            [&](Function &F) -> TargetTransformInfo & {
                              return TTIWP->getTTI(F);
                            },
                            [&](Function &F) -> TargetLibraryInfo & {
                              return TLIWP->getTLI(F);
                            }) {
    initializeSampleProfileLoaderLegacyPassPass(
        *PassRegistry::getPassRegistry());
  }

  void dump() { SampleLoader.dump(); }

  bool doInitialization(Module &M) override {
    return SampleLoader.doInitialization(M);
  }

  StringRef getPassName() const override { return "Sample profile pass"; }
  bool runOnModule(Module &M) override;

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.addRequired<AssumptionCacheTracker>();
    AU.addRequired<TargetTransformInfoWrapperPass>();
    AU.addRequired<TargetLibraryInfoWrapperPass>();
    AU.addRequired<ProfileSummaryInfoWrapperPass>();
  }

private:
  SampleProfileLoader SampleLoader;
  AssumptionCacheTracker *ACT = nullptr;
  TargetTransformInfoWrapperPass *TTIWP = nullptr;
  TargetLibraryInfoWrapperPass *TLIWP = nullptr;
};

} // end anonymous namespace

/// Return true if the given callsite is hot wrt to hot cutoff threshold.
///
/// Functions that were inlined in the original binary will be represented
/// in the inline stack in the sample profile. If the profile shows that
/// the original inline decision was "good" (i.e., the callsite is executed
/// frequently), then we will recreate the inline decision and apply the
/// profile from the inlined callsite.
///
/// To decide whether an inlined callsite is hot, we compare the callsite
/// sample count with the hot cutoff computed by ProfileSummaryInfo, it is
/// regarded as hot if the count is above the cutoff value.
///
/// When ProfileAccurateForSymsInList is enabled and profile symbol list
/// is present, functions in the profile symbol list but without profile will
/// be regarded as cold and much less inlining will happen in CGSCC inlining
/// pass, so we tend to lower the hot criteria here to allow more early
/// inlining to happen for warm callsites and it is helpful for performance.
bool SampleProfileLoader::callsiteIsHot(const FunctionSamples *CallsiteFS,
                                        ProfileSummaryInfo *PSI) {
  if (!CallsiteFS)
    return false; // The callsite was not inlined in the original binary.

  assert(PSI && "PSI is expected to be non null");
  uint64_t CallsiteTotalSamples = CallsiteFS->getTotalSamples();
  if (ProfAccForSymsInList)
    return !PSI->isColdCount(CallsiteTotalSamples);
  else
    return PSI->isHotCount(CallsiteTotalSamples);
}

/// Mark as used the sample record for the given function samples at
/// (LineOffset, Discriminator).
///
/// \returns true if this is the first time we mark the given record.
bool SampleCoverageTracker::markSamplesUsed(const FunctionSamples *FS,
                                            uint32_t LineOffset,
                                            uint32_t Discriminator,
                                            uint64_t Samples) {
  LineLocation Loc(LineOffset, Discriminator);
  unsigned &Count = SampleCoverage[FS][Loc];
  bool FirstTime = (++Count == 1);
  if (FirstTime)
    TotalUsedSamples += Samples;
  return FirstTime;
}

/// Return the number of sample records that were applied from this profile.
///
/// This count does not include records from cold inlined callsites.
unsigned
SampleCoverageTracker::countUsedRecords(const FunctionSamples *FS,
                                        ProfileSummaryInfo *PSI) const {
  auto I = SampleCoverage.find(FS);

  // The size of the coverage map for FS represents the number of records
  // that were marked used at least once.
  unsigned Count = (I != SampleCoverage.end()) ? I->second.size() : 0;

  // If there are inlined callsites in this function, count the samples found
  // in the respective bodies. However, do not bother counting callees with 0
  // total samples, these are callees that were never invoked at runtime.
  for (const auto &I : FS->getCallsiteSamples())
    for (const auto &J : I.second) {
      const FunctionSamples *CalleeSamples = &J.second;
      if (SPLoader.callsiteIsHot(CalleeSamples, PSI))
        Count += countUsedRecords(CalleeSamples, PSI);
    }

  return Count;
}

/// Return the number of sample records in the body of this profile.
///
/// This count does not include records from cold inlined callsites.
unsigned
SampleCoverageTracker::countBodyRecords(const FunctionSamples *FS,
                                        ProfileSummaryInfo *PSI) const {
  unsigned Count = FS->getBodySamples().size();

  // Only count records in hot callsites.
  for (const auto &I : FS->getCallsiteSamples())
    for (const auto &J : I.second) {
      const FunctionSamples *CalleeSamples = &J.second;
      if (SPLoader.callsiteIsHot(CalleeSamples, PSI))
        Count += countBodyRecords(CalleeSamples, PSI);
    }

  return Count;
}

/// Return the number of samples collected in the body of this profile.
///
/// This count does not include samples from cold inlined callsites.
uint64_t
SampleCoverageTracker::countBodySamples(const FunctionSamples *FS,
                                        ProfileSummaryInfo *PSI) const {
  uint64_t Total = 0;
  for (const auto &I : FS->getBodySamples())
    Total += I.second.getSamples();

  // Only count samples in hot callsites.
  for (const auto &I : FS->getCallsiteSamples())
    for (const auto &J : I.second) {
      const FunctionSamples *CalleeSamples = &J.second;
      if (SPLoader.callsiteIsHot(CalleeSamples, PSI))
        Total += countBodySamples(CalleeSamples, PSI);
    }

  return Total;
}

/// Return the fraction of sample records used in this profile.
///
/// The returned value is an unsigned integer in the range 0-100 indicating
/// the percentage of sample records that were used while applying this
/// profile to the associated function.
unsigned SampleCoverageTracker::computeCoverage(unsigned Used,
                                                unsigned Total) const {
  assert(Used <= Total &&
         "number of used records cannot exceed the total number of records");
  return Total > 0 ? Used * 100 / Total : 100;
}

/// Clear all the per-function data used to load samples and propagate weights.
void SampleProfileLoader::clearFunctionData() {
  BlockWeights.clear();
  EdgeWeights.clear();
  VisitedBlocks.clear();
  VisitedEdges.clear();
  EquivalenceClass.clear();
  DT = nullptr;
  PDT = nullptr;
  LI = nullptr;
  Predecessors.clear();
  Successors.clear();
  CoverageTracker.clear();
}

#ifndef NDEBUG
/// Print the weight of edge \p E on stream \p OS.
///
/// \param OS  Stream to emit the output to.
/// \param E  Edge to print.
void SampleProfileLoader::printEdgeWeight(raw_ostream &OS, Edge E) {
  OS << "weight[" << E.first->getName() << "->" << E.second->getName()
     << "]: " << EdgeWeights[E] << "\n";
}

/// Print the equivalence class of block \p BB on stream \p OS.
///
/// \param OS  Stream to emit the output to.
/// \param BB  Block to print.
void SampleProfileLoader::printBlockEquivalence(raw_ostream &OS,
                                                const BasicBlock *BB) {
  const BasicBlock *Equiv = EquivalenceClass[BB];
  OS << "equivalence[" << BB->getName()
     << "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n";
}

/// Print the weight of block \p BB on stream \p OS.
///
/// \param OS  Stream to emit the output to.
/// \param BB  Block to print.
void SampleProfileLoader::printBlockWeight(raw_ostream &OS,
                                           const BasicBlock *BB) const {
  const auto &I = BlockWeights.find(BB);
  uint64_t W = (I == BlockWeights.end() ? 0 : I->second);
  OS << "weight[" << BB->getName() << "]: " << W << "\n";
}
#endif

/// Get the weight for an instruction.
///
/// The "weight" of an instruction \p Inst is the number of samples
/// collected on that instruction at runtime. To retrieve it, we
/// need to compute the line number of \p Inst relative to the start of its
/// function. We use HeaderLineno to compute the offset. We then
/// look up the samples collected for \p Inst using BodySamples.
///
/// \param Inst Instruction to query.
///
/// \returns the weight of \p Inst.
ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) {
  const DebugLoc &DLoc = Inst.getDebugLoc();
  if (!DLoc)
    return std::error_code();

  const FunctionSamples *FS = findFunctionSamples(Inst);
  if (!FS)
    return std::error_code();

  // Ignore all intrinsics, phinodes and branch instructions.
  // Branch and phinodes instruction usually contains debug info from sources outside of
  // the residing basic block, thus we ignore them during annotation.
  if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst) || isa<PHINode>(Inst))
    return std::error_code();

  // If a direct call/invoke instruction is inlined in profile
  // (findCalleeFunctionSamples returns non-empty result), but not inlined here,
  // it means that the inlined callsite has no sample, thus the call
  // instruction should have 0 count.
  if (auto *CB = dyn_cast<CallBase>(&Inst))
    if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB))
      return 0;

  const DILocation *DIL = DLoc;
  uint32_t LineOffset = FunctionSamples::getOffset(DIL);
  uint32_t Discriminator = DIL->getBaseDiscriminator();
  ErrorOr<uint64_t> R = FS->findSamplesAt(LineOffset, Discriminator);
  if (R) {
    bool FirstMark =
        CoverageTracker.markSamplesUsed(FS, LineOffset, Discriminator, R.get());
    if (FirstMark) {
      ORE->emit([&]() {
        OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst);
        Remark << "Applied " << ore::NV("NumSamples", *R);
        Remark << " samples from profile (offset: ";
        Remark << ore::NV("LineOffset", LineOffset);
        if (Discriminator) {
          Remark << ".";
          Remark << ore::NV("Discriminator", Discriminator);
        }
        Remark << ")";
        return Remark;
      });
    }
    LLVM_DEBUG(dbgs() << "    " << DLoc.getLine() << "."
                      << DIL->getBaseDiscriminator() << ":" << Inst
                      << " (line offset: " << LineOffset << "."
                      << DIL->getBaseDiscriminator() << " - weight: " << R.get()
                      << ")\n");
  }
  return R;
}

/// Compute the weight of a basic block.
///
/// The weight of basic block \p BB is the maximum weight of all the
/// instructions in BB.
///
/// \param BB The basic block to query.
///
/// \returns the weight for \p BB.
ErrorOr<uint64_t> SampleProfileLoader::getBlockWeight(const BasicBlock *BB) {
  uint64_t Max = 0;
  bool HasWeight = false;
  for (auto &I : BB->getInstList()) {
    const ErrorOr<uint64_t> &R = getInstWeight(I);
    if (R) {
      Max = std::max(Max, R.get());
      HasWeight = true;
    }
  }
  return HasWeight ? ErrorOr<uint64_t>(Max) : std::error_code();
}

/// Compute and store the weights of every basic block.
///
/// This populates the BlockWeights map by computing
/// the weights of every basic block in the CFG.
///
/// \param F The function to query.
bool SampleProfileLoader::computeBlockWeights(Function &F) {
  bool Changed = false;
  LLVM_DEBUG(dbgs() << "Block weights\n");
  for (const auto &BB : F) {
    ErrorOr<uint64_t> Weight = getBlockWeight(&BB);
    if (Weight) {
      BlockWeights[&BB] = Weight.get();
      VisitedBlocks.insert(&BB);
      Changed = true;
    }
    LLVM_DEBUG(printBlockWeight(dbgs(), &BB));
  }

  return Changed;
}

/// Get the FunctionSamples for a call instruction.
///
/// The FunctionSamples of a call/invoke instruction \p Inst is the inlined
/// instance in which that call instruction is calling to. It contains
/// all samples that resides in the inlined instance. We first find the
/// inlined instance in which the call instruction is from, then we
/// traverse its children to find the callsite with the matching
/// location.
///
/// \param Inst Call/Invoke instruction to query.
///
/// \returns The FunctionSamples pointer to the inlined instance.
const FunctionSamples *
SampleProfileLoader::findCalleeFunctionSamples(const CallBase &Inst) const {
  const DILocation *DIL = Inst.getDebugLoc();
  if (!DIL) {
    return nullptr;
  }

  StringRef CalleeName;
  if (Function *Callee = Inst.getCalledFunction())
    CalleeName = Callee->getName();

  const FunctionSamples *FS = findFunctionSamples(Inst);
  if (FS == nullptr)
    return nullptr;

  return FS->findFunctionSamplesAt(LineLocation(FunctionSamples::getOffset(DIL),
                                                DIL->getBaseDiscriminator()),
                                   CalleeName, Reader->getRemapper());
}

/// Returns a vector of FunctionSamples that are the indirect call targets
/// of \p Inst. The vector is sorted by the total number of samples. Stores
/// the total call count of the indirect call in \p Sum.
std::vector<const FunctionSamples *>
SampleProfileLoader::findIndirectCallFunctionSamples(
    const Instruction &Inst, uint64_t &Sum) const {
  const DILocation *DIL = Inst.getDebugLoc();
  std::vector<const FunctionSamples *> R;

  if (!DIL) {
    return R;
  }

  const FunctionSamples *FS = findFunctionSamples(Inst);
  if (FS == nullptr)
    return R;

  uint32_t LineOffset = FunctionSamples::getOffset(DIL);
  uint32_t Discriminator = DIL->getBaseDiscriminator();

  auto T = FS->findCallTargetMapAt(LineOffset, Discriminator);
  Sum = 0;
  if (T)
    for (const auto &T_C : T.get())
      Sum += T_C.second;
  if (const FunctionSamplesMap *M = FS->findFunctionSamplesMapAt(LineLocation(
          FunctionSamples::getOffset(DIL), DIL->getBaseDiscriminator()))) {
    if (M->empty())
      return R;
    for (const auto &NameFS : *M) {
      Sum += NameFS.second.getEntrySamples();
      R.push_back(&NameFS.second);
    }
    llvm::sort(R, [](const FunctionSamples *L, const FunctionSamples *R) {
      if (L->getEntrySamples() != R->getEntrySamples())
        return L->getEntrySamples() > R->getEntrySamples();
      return FunctionSamples::getGUID(L->getName()) <
             FunctionSamples::getGUID(R->getName());
    });
  }
  return R;
}

/// Get the FunctionSamples for an instruction.
///
/// The FunctionSamples of an instruction \p Inst is the inlined instance
/// in which that instruction is coming from. We traverse the inline stack
/// of that instruction, and match it with the tree nodes in the profile.
///
/// \param Inst Instruction to query.
///
/// \returns the FunctionSamples pointer to the inlined instance.
const FunctionSamples *
SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const {
  const DILocation *DIL = Inst.getDebugLoc();
  if (!DIL)
    return Samples;

  auto it = DILocation2SampleMap.try_emplace(DIL,nullptr);
  if (it.second)
    it.first->second = Samples->findFunctionSamples(DIL, Reader->getRemapper());
  return it.first->second;
}

bool SampleProfileLoader::inlineCallInstruction(CallBase &CB) {
  if (ExternalInlineAdvisor) {
    auto Advice = ExternalInlineAdvisor->getAdvice(CB);
    if (!Advice->isInliningRecommended()) {
      Advice->recordUnattemptedInlining();
      return false;
    }
    // Dummy record, we don't use it for replay.
    Advice->recordInlining();
  }

  Function *CalledFunction = CB.getCalledFunction();
  assert(CalledFunction);
  DebugLoc DLoc = CB.getDebugLoc();
  BasicBlock *BB = CB.getParent();
  InlineParams Params = getInlineParams();
  Params.ComputeFullInlineCost = true;
  // Checks if there is anything in the reachable portion of the callee at
  // this callsite that makes this inlining potentially illegal. Need to
  // set ComputeFullInlineCost, otherwise getInlineCost may return early
  // when cost exceeds threshold without checking all IRs in the callee.
  // The acutal cost does not matter because we only checks isNever() to
  // see if it is legal to inline the callsite.
  InlineCost Cost =
      getInlineCost(CB, Params, GetTTI(*CalledFunction), GetAC, GetTLI);
  if (Cost.isNever()) {
    ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineFail", DLoc, BB)
              << "incompatible inlining");
    return false;
  }
  InlineFunctionInfo IFI(nullptr, GetAC);
  if (InlineFunction(CB, IFI).isSuccess()) {
    // The call to InlineFunction erases I, so we can't pass it here.
    emitInlinedInto(*ORE, DLoc, BB, *CalledFunction, *BB->getParent(), Cost,
                    true, CSINLINE_DEBUG);
    return true;
  }
  return false;
}

bool SampleProfileLoader::shouldInlineColdCallee(CallBase &CallInst) {
  if (!ProfileSizeInline)
    return false;

  Function *Callee = CallInst.getCalledFunction();
  if (Callee == nullptr)
    return false;

  InlineCost Cost = getInlineCost(CallInst, getInlineParams(), GetTTI(*Callee),
                                  GetAC, GetTLI);

  return Cost.getCost() <= SampleColdCallSiteThreshold;
}

void SampleProfileLoader::emitOptimizationRemarksForInlineCandidates(
    const SmallVectorImpl<CallBase *> &Candidates, const Function &F,
    bool Hot) {
  for (auto I : Candidates) {
    Function *CalledFunction = I->getCalledFunction();
    if (CalledFunction) {
      ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineAttempt",
                                           I->getDebugLoc(), I->getParent())
                << "previous inlining reattempted for "
                << (Hot ? "hotness: '" : "size: '")
                << ore::NV("Callee", CalledFunction) << "' into '"
                << ore::NV("Caller", &F) << "'");
    }
  }
}

/// Iteratively inline hot callsites of a function.
///
/// Iteratively traverse all callsites of the function \p F, and find if
/// the corresponding inlined instance exists and is hot in profile. If
/// it is hot enough, inline the callsites and adds new callsites of the
/// callee into the caller. If the call is an indirect call, first promote
/// it to direct call. Each indirect call is limited with a single target.
///
/// \param F function to perform iterative inlining.
/// \param InlinedGUIDs a set to be updated to include all GUIDs that are
///     inlined in the profiled binary.
///
/// \returns True if there is any inline happened.
bool SampleProfileLoader::inlineHotFunctions(
    Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
  DenseSet<Instruction *> PromotedInsns;

  // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure
  // Profile symbol list is ignored when profile-sample-accurate is on.
  assert((!ProfAccForSymsInList ||
          (!ProfileSampleAccurate &&
           !F.hasFnAttribute("profile-sample-accurate"))) &&
         "ProfAccForSymsInList should be false when profile-sample-accurate "
         "is enabled");

  DenseMap<CallBase *, const FunctionSamples *> localNotInlinedCallSites;
  bool Changed = false;
  while (true) {
    bool LocalChanged = false;
    SmallVector<CallBase *, 10> CIS;
    for (auto &BB : F) {
      bool Hot = false;
      SmallVector<CallBase *, 10> AllCandidates;
      SmallVector<CallBase *, 10> ColdCandidates;
      for (auto &I : BB.getInstList()) {
        const FunctionSamples *FS = nullptr;
        if (auto *CB = dyn_cast<CallBase>(&I)) {
          if (!isa<IntrinsicInst>(I) && (FS = findCalleeFunctionSamples(*CB))) {
            assert((!FunctionSamples::UseMD5 || FS->GUIDToFuncNameMap) &&
                   "GUIDToFuncNameMap has to be populated");
            AllCandidates.push_back(CB);
            if (FS->getEntrySamples() > 0)
              localNotInlinedCallSites.try_emplace(CB, FS);
            if (callsiteIsHot(FS, PSI))
              Hot = true;
            else if (shouldInlineColdCallee(*CB))
              ColdCandidates.push_back(CB);
          }
        }
      }
      if (Hot || ExternalInlineAdvisor) {
        CIS.insert(CIS.begin(), AllCandidates.begin(), AllCandidates.end());
        emitOptimizationRemarksForInlineCandidates(AllCandidates, F, true);
      } else {
        CIS.insert(CIS.begin(), ColdCandidates.begin(), ColdCandidates.end());
        emitOptimizationRemarksForInlineCandidates(ColdCandidates, F, false);
      }
    }
    for (CallBase *I : CIS) {
      Function *CalledFunction = I->getCalledFunction();
      // Do not inline recursive calls.
      if (CalledFunction == &F)
        continue;
      if (I->isIndirectCall()) {
        if (PromotedInsns.count(I))
          continue;
        uint64_t Sum;
        for (const auto *FS : findIndirectCallFunctionSamples(*I, Sum)) {
          if (IsThinLTOPreLink) {
            FS->findInlinedFunctions(InlinedGUIDs, F.getParent(),
                                     PSI->getOrCompHotCountThreshold());
            continue;
          }
          if (!callsiteIsHot(FS, PSI))
            continue;

          const char *Reason = "Callee function not available";
          // R->getValue() != &F is to prevent promoting a recursive call.
          // If it is a recursive call, we do not inline it as it could bloat
          // the code exponentially. There is way to better handle this, e.g.
          // clone the caller first, and inline the cloned caller if it is
          // recursive. As llvm does not inline recursive calls, we will
          // simply ignore it instead of handling it explicitly.
          auto CalleeFunctionName = FS->getFuncName();
          auto R = SymbolMap.find(CalleeFunctionName);
          if (R != SymbolMap.end() && R->getValue() &&
              !R->getValue()->isDeclaration() &&
              R->getValue()->getSubprogram() &&
              R->getValue()->hasFnAttribute("use-sample-profile") &&
              R->getValue() != &F &&
              isLegalToPromote(*I, R->getValue(), &Reason)) {
            uint64_t C = FS->getEntrySamples();
            auto &DI =
                pgo::promoteIndirectCall(*I, R->getValue(), C, Sum, false, ORE);
            Sum -= C;
            PromotedInsns.insert(I);
            // If profile mismatches, we should not attempt to inline DI.
            if ((isa<CallInst>(DI) || isa<InvokeInst>(DI)) &&
                inlineCallInstruction(cast<CallBase>(DI))) {
              localNotInlinedCallSites.erase(I);
              LocalChanged = true;
              ++NumCSInlined;
            }
          } else {
            LLVM_DEBUG(dbgs()
                       << "\nFailed to promote indirect call to "
                       << CalleeFunctionName << " because " << Reason << "\n");
          }
        }
      } else if (CalledFunction && CalledFunction->getSubprogram() &&
                 !CalledFunction->isDeclaration()) {
        if (inlineCallInstruction(*I)) {
          localNotInlinedCallSites.erase(I);
          LocalChanged = true;
          ++NumCSInlined;
        }
      } else if (IsThinLTOPreLink) {
        findCalleeFunctionSamples(*I)->findInlinedFunctions(
            InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold());
      }
    }
    if (LocalChanged) {
      Changed = true;
    } else {
      break;
    }
  }

  // Accumulate not inlined callsite information into notInlinedSamples
  for (const auto &Pair : localNotInlinedCallSites) {
    CallBase *I = Pair.getFirst();
    Function *Callee = I->getCalledFunction();
    if (!Callee || Callee->isDeclaration())
      continue;

    ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "NotInline",
                                         I->getDebugLoc(), I->getParent())
              << "previous inlining not repeated: '"
              << ore::NV("Callee", Callee) << "' into '"
              << ore::NV("Caller", &F) << "'");

    ++NumCSNotInlined;
    const FunctionSamples *FS = Pair.getSecond();
    if (FS->getTotalSamples() == 0 && FS->getEntrySamples() == 0) {
      continue;
    }

    if (ProfileMergeInlinee) {
      // A function call can be replicated by optimizations like callsite
      // splitting or jump threading and the replicates end up sharing the
      // sample nested callee profile instead of slicing the original inlinee's
      // profile. We want to do merge exactly once by filtering out callee
      // profiles with a non-zero head sample count.
      if (FS->getHeadSamples() == 0) {
        // Use entry samples as head samples during the merge, as inlinees
        // don't have head samples.
        const_cast<FunctionSamples *>(FS)->addHeadSamples(
            FS->getEntrySamples());

        // Note that we have to do the merge right after processing function.
        // This allows OutlineFS's profile to be used for annotation during
        // top-down processing of functions' annotation.
        FunctionSamples *OutlineFS = Reader->getOrCreateSamplesFor(*Callee);
        OutlineFS->merge(*FS);
      } else
        assert(FS->getHeadSamples() == FS->getEntrySamples() &&
               "Expect same head and entry sample counts for profiles already "
               "merged.");
    } else {
      auto pair =
          notInlinedCallInfo.try_emplace(Callee, NotInlinedProfileInfo{0});
      pair.first->second.entryCount += FS->getEntrySamples();
    }
  }
  return Changed;
}

/// Find equivalence classes for the given block.
///
/// This finds all the blocks that are guaranteed to execute the same
/// number of times as \p BB1. To do this, it traverses all the
/// descendants of \p BB1 in the dominator or post-dominator tree.
///
/// A block BB2 will be in the same equivalence class as \p BB1 if
/// the following holds:
///
/// 1- \p BB1 is a descendant of BB2 in the opposite tree. So, if BB2
///    is a descendant of \p BB1 in the dominator tree, then BB2 should
///    dominate BB1 in the post-dominator tree.
///
/// 2- Both BB2 and \p BB1 must be in the same loop.
///
/// For every block BB2 that meets those two requirements, we set BB2's
/// equivalence class to \p BB1.
///
/// \param BB1  Block to check.
/// \param Descendants  Descendants of \p BB1 in either the dom or pdom tree.
/// \param DomTree  Opposite dominator tree. If \p Descendants is filled
///                 with blocks from \p BB1's dominator tree, then
///                 this is the post-dominator tree, and vice versa.
template <bool IsPostDom>
void SampleProfileLoader::findEquivalencesFor(
    BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants,
    DominatorTreeBase<BasicBlock, IsPostDom> *DomTree) {
  const BasicBlock *EC = EquivalenceClass[BB1];
  uint64_t Weight = BlockWeights[EC];
  for (const auto *BB2 : Descendants) {
    bool IsDomParent = DomTree->dominates(BB2, BB1);
    bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2);
    if (BB1 != BB2 && IsDomParent && IsInSameLoop) {
      EquivalenceClass[BB2] = EC;
      // If BB2 is visited, then the entire EC should be marked as visited.
      if (VisitedBlocks.count(BB2)) {
        VisitedBlocks.insert(EC);
      }

      // If BB2 is heavier than BB1, make BB2 have the same weight
      // as BB1.
      //
      // Note that we don't worry about the opposite situation here
      // (when BB2 is lighter than BB1). We will deal with this
      // during the propagation phase. Right now, we just want to
      // make sure that BB1 has the largest weight of all the
      // members of its equivalence set.
      Weight = std::max(Weight, BlockWeights[BB2]);
    }
  }
  if (EC == &EC->getParent()->getEntryBlock()) {
    BlockWeights[EC] = Samples->getHeadSamples() + 1;
  } else {
    BlockWeights[EC] = Weight;
  }
}

/// Find equivalence classes.
///
/// Since samples may be missing from blocks, we can fill in the gaps by setting
/// the weights of all the blocks in the same equivalence class to the same
/// weight. To compute the concept of equivalence, we use dominance and loop
/// information. Two blocks B1 and B2 are in the same equivalence class if B1
/// dominates B2, B2 post-dominates B1 and both are in the same loop.
///
/// \param F The function to query.
void SampleProfileLoader::findEquivalenceClasses(Function &F) {
  SmallVector<BasicBlock *, 8> DominatedBBs;
  LLVM_DEBUG(dbgs() << "\nBlock equivalence classes\n");
  // Find equivalence sets based on dominance and post-dominance information.
  for (auto &BB : F) {
    BasicBlock *BB1 = &BB;

    // Compute BB1's equivalence class once.
    if (EquivalenceClass.count(BB1)) {
      LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1));
      continue;
    }

    // By default, blocks are in their own equivalence class.
    EquivalenceClass[BB1] = BB1;

    // Traverse all the blocks dominated by BB1. We are looking for
    // every basic block BB2 such that:
    //
    // 1- BB1 dominates BB2.
    // 2- BB2 post-dominates BB1.
    // 3- BB1 and BB2 are in the same loop nest.
    //
    // If all those conditions hold, it means that BB2 is executed
    // as many times as BB1, so they are placed in the same equivalence
    // class by making BB2's equivalence class be BB1.
    DominatedBBs.clear();
    DT->getDescendants(BB1, DominatedBBs);
    findEquivalencesFor(BB1, DominatedBBs, PDT.get());

    LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1));
  }

  // Assign weights to equivalence classes.
  //
  // All the basic blocks in the same equivalence class will execute
  // the same number of times. Since we know that the head block in
  // each equivalence class has the largest weight, assign that weight
  // to all the blocks in that equivalence class.
  LLVM_DEBUG(
      dbgs() << "\nAssign the same weight to all blocks in the same class\n");
  for (auto &BI : F) {
    const BasicBlock *BB = &BI;
    const BasicBlock *EquivBB = EquivalenceClass[BB];
    if (BB != EquivBB)
      BlockWeights[BB] = BlockWeights[EquivBB];
    LLVM_DEBUG(printBlockWeight(dbgs(), BB));
  }
}

/// Visit the given edge to decide if it has a valid weight.
///
/// If \p E has not been visited before, we copy to \p UnknownEdge
/// and increment the count of unknown edges.
///
/// \param E  Edge to visit.
/// \param NumUnknownEdges  Current number of unknown edges.
/// \param UnknownEdge  Set if E has not been visited before.
///
/// \returns E's weight, if known. Otherwise, return 0.
uint64_t SampleProfileLoader::visitEdge(Edge E, unsigned *NumUnknownEdges,
                                        Edge *UnknownEdge) {
  if (!VisitedEdges.count(E)) {
    (*NumUnknownEdges)++;
    *UnknownEdge = E;
    return 0;
  }

  return EdgeWeights[E];
}

/// Propagate weights through incoming/outgoing edges.
///
/// If the weight of a basic block is known, and there is only one edge
/// with an unknown weight, we can calculate the weight of that edge.
///
/// Similarly, if all the edges have a known count, we can calculate the
/// count of the basic block, if needed.
///
/// \param F  Function to process.
/// \param UpdateBlockCount  Whether we should update basic block counts that
///                          has already been annotated.
///
/// \returns  True if new weights were assigned to edges or blocks.
bool SampleProfileLoader::propagateThroughEdges(Function &F,
                                                bool UpdateBlockCount) {
  bool Changed = false;
  LLVM_DEBUG(dbgs() << "\nPropagation through edges\n");
  for (const auto &BI : F) {
    const BasicBlock *BB = &BI;
    const BasicBlock *EC = EquivalenceClass[BB];

    // Visit all the predecessor and successor edges to determine
    // which ones have a weight assigned already. Note that it doesn't
    // matter that we only keep track of a single unknown edge. The
    // only case we are interested in handling is when only a single
    // edge is unknown (see setEdgeOrBlockWeight).
    for (unsigned i = 0; i < 2; i++) {
      uint64_t TotalWeight = 0;
      unsigned NumUnknownEdges = 0, NumTotalEdges = 0;
      Edge UnknownEdge, SelfReferentialEdge, SingleEdge;

      if (i == 0) {
        // First, visit all predecessor edges.
        NumTotalEdges = Predecessors[BB].size();
        for (auto *Pred : Predecessors[BB]) {
          Edge E = std::make_pair(Pred, BB);
          TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
          if (E.first == E.second)
            SelfReferentialEdge = E;
        }
        if (NumTotalEdges == 1) {
          SingleEdge = std::make_pair(Predecessors[BB][0], BB);
        }
      } else {
        // On the second round, visit all successor edges.
        NumTotalEdges = Successors[BB].size();
        for (auto *Succ : Successors[BB]) {
          Edge E = std::make_pair(BB, Succ);
          TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
        }
        if (NumTotalEdges == 1) {
          SingleEdge = std::make_pair(BB, Successors[BB][0]);
        }
      }

      // After visiting all the edges, there are three cases that we
      // can handle immediately:
      //
      // - All the edge weights are known (i.e., NumUnknownEdges == 0).
      //   In this case, we simply check that the sum of all the edges
      //   is the same as BB's weight. If not, we change BB's weight
      //   to match. Additionally, if BB had not been visited before,
      //   we mark it visited.
      //
      // - Only one edge is unknown and BB has already been visited.
      //   In this case, we can compute the weight of the edge by
      //   subtracting the total block weight from all the known
      //   edge weights. If the edges weight more than BB, then the
      //   edge of the last remaining edge is set to zero.
      //
      // - There exists a self-referential edge and the weight of BB is
      //   known. In this case, this edge can be based on BB's weight.
      //   We add up all the other known edges and set the weight on
      //   the self-referential edge as we did in the previous case.
      //
      // In any other case, we must continue iterating. Eventually,
      // all edges will get a weight, or iteration will stop when
      // it reaches SampleProfileMaxPropagateIterations.
      if (NumUnknownEdges <= 1) {
        uint64_t &BBWeight = BlockWeights[EC];
        if (NumUnknownEdges == 0) {
          if (!VisitedBlocks.count(EC)) {
            // If we already know the weight of all edges, the weight of the
            // basic block can be computed. It should be no larger than the sum
            // of all edge weights.
            if (TotalWeight > BBWeight) {
              BBWeight = TotalWeight;
              Changed = true;
              LLVM_DEBUG(dbgs() << "All edge weights for " << BB->getName()
                                << " known. Set weight for block: ";
                         printBlockWeight(dbgs(), BB););
            }
          } else if (NumTotalEdges == 1 &&
                     EdgeWeights[SingleEdge] < BlockWeights[EC]) {
            // If there is only one edge for the visited basic block, use the
            // block weight to adjust edge weight if edge weight is smaller.
            EdgeWeights[SingleEdge] = BlockWeights[EC];
            Changed = true;
          }
        } else if (NumUnknownEdges == 1 && VisitedBlocks.count(EC)) {
          // If there is a single unknown edge and the block has been
          // visited, then we can compute E's weight.
          if (BBWeight >= TotalWeight)
            EdgeWeights[UnknownEdge] = BBWeight - TotalWeight;
          else
            EdgeWeights[UnknownEdge] = 0;
          const BasicBlock *OtherEC;
          if (i == 0)
            OtherEC = EquivalenceClass[UnknownEdge.first];
          else
            OtherEC = EquivalenceClass[UnknownEdge.second];
          // Edge weights should never exceed the BB weights it connects.
          if (VisitedBlocks.count(OtherEC) &&
              EdgeWeights[UnknownEdge] > BlockWeights[OtherEC])
            EdgeWeights[UnknownEdge] = BlockWeights[OtherEC];
          VisitedEdges.insert(UnknownEdge);
          Changed = true;
          LLVM_DEBUG(dbgs() << "Set weight for edge: ";
                     printEdgeWeight(dbgs(), UnknownEdge));
        }
      } else if (VisitedBlocks.count(EC) && BlockWeights[EC] == 0) {
        // If a block Weights 0, all its in/out edges should weight 0.
        if (i == 0) {
          for (auto *Pred : Predecessors[BB]) {
            Edge E = std::make_pair(Pred, BB);
            EdgeWeights[E] = 0;
            VisitedEdges.insert(E);
          }
        } else {
          for (auto *Succ : Successors[BB]) {
            Edge E = std::make_pair(BB, Succ);
            EdgeWeights[E] = 0;
            VisitedEdges.insert(E);
          }
        }
      } else if (SelfReferentialEdge.first && VisitedBlocks.count(EC)) {
        uint64_t &BBWeight = BlockWeights[BB];
        // We have a self-referential edge and the weight of BB is known.
        if (BBWeight >= TotalWeight)
          EdgeWeights[SelfReferentialEdge] = BBWeight - TotalWeight;
        else
          EdgeWeights[SelfReferentialEdge] = 0;
        VisitedEdges.insert(SelfReferentialEdge);
        Changed = true;
        LLVM_DEBUG(dbgs() << "Set self-referential edge weight to: ";
                   printEdgeWeight(dbgs(), SelfReferentialEdge));
      }
      if (UpdateBlockCount && !VisitedBlocks.count(EC) && TotalWeight > 0) {
        BlockWeights[EC] = TotalWeight;
        VisitedBlocks.insert(EC);
        Changed = true;
      }
    }
  }

  return Changed;
}

/// Build in/out edge lists for each basic block in the CFG.
///
/// We are interested in unique edges. If a block B1 has multiple
/// edges to another block B2, we only add a single B1->B2 edge.
void SampleProfileLoader::buildEdges(Function &F) {
  for (auto &BI : F) {
    BasicBlock *B1 = &BI;

    // Add predecessors for B1.
    SmallPtrSet<BasicBlock *, 16> Visited;
    if (!Predecessors[B1].empty())
      llvm_unreachable("Found a stale predecessors list in a basic block.");
    for (pred_iterator PI = pred_begin(B1), PE = pred_end(B1); PI != PE; ++PI) {
      BasicBlock *B2 = *PI;
      if (Visited.insert(B2).second)
        Predecessors[B1].push_back(B2);
    }

    // Add successors for B1.
    Visited.clear();
    if (!Successors[B1].empty())
      llvm_unreachable("Found a stale successors list in a basic block.");
    for (succ_iterator SI = succ_begin(B1), SE = succ_end(B1); SI != SE; ++SI) {
      BasicBlock *B2 = *SI;
      if (Visited.insert(B2).second)
        Successors[B1].push_back(B2);
    }
  }
}

/// Returns the sorted CallTargetMap \p M by count in descending order.
static SmallVector<InstrProfValueData, 2> GetSortedValueDataFromCallTargets(
    const SampleRecord::CallTargetMap & M) {
  SmallVector<InstrProfValueData, 2> R;
  for (const auto &I : SampleRecord::SortCallTargets(M)) {
    R.emplace_back(InstrProfValueData{FunctionSamples::getGUID(I.first), I.second});
  }
  return R;
}

/// Propagate weights into edges
///
/// The following rules are applied to every block BB in the CFG:
///
/// - If BB has a single predecessor/successor, then the weight
///   of that edge is the weight of the block.
///
/// - If all incoming or outgoing edges are known except one, and the
///   weight of the block is already known, the weight of the unknown
///   edge will be the weight of the block minus the sum of all the known
///   edges. If the sum of all the known edges is larger than BB's weight,
///   we set the unknown edge weight to zero.
///
/// - If there is a self-referential edge, and the weight of the block is
///   known, the weight for that edge is set to the weight of the block
///   minus the weight of the other incoming edges to that block (if
///   known).
void SampleProfileLoader::propagateWeights(Function &F) {
  bool Changed = true;
  unsigned I = 0;

  // If BB weight is larger than its corresponding loop's header BB weight,
  // use the BB weight to replace the loop header BB weight.
  for (auto &BI : F) {
    BasicBlock *BB = &BI;
    Loop *L = LI->getLoopFor(BB);
    if (!L) {
      continue;
    }
    BasicBlock *Header = L->getHeader();
    if (Header && BlockWeights[BB] > BlockWeights[Header]) {
      BlockWeights[Header] = BlockWeights[BB];
    }
  }

  // Before propagation starts, build, for each block, a list of
  // unique predecessors and successors. This is necessary to handle
  // identical edges in multiway branches. Since we visit all blocks and all
  // edges of the CFG, it is cleaner to build these lists once at the start
  // of the pass.
  buildEdges(F);

  // Propagate until we converge or we go past the iteration limit.
  while (Changed && I++ < SampleProfileMaxPropagateIterations) {
    Changed = propagateThroughEdges(F, false);
  }

  // The first propagation propagates BB counts from annotated BBs to unknown
  // BBs. The 2nd propagation pass resets edges weights, and use all BB weights
  // to propagate edge weights.
  VisitedEdges.clear();
  Changed = true;
  while (Changed && I++ < SampleProfileMaxPropagateIterations) {
    Changed = propagateThroughEdges(F, false);
  }

  // The 3rd propagation pass allows adjust annotated BB weights that are
  // obviously wrong.
  Changed = true;
  while (Changed && I++ < SampleProfileMaxPropagateIterations) {
    Changed = propagateThroughEdges(F, true);
  }

  // Generate MD_prof metadata for every branch instruction using the
  // edge weights computed during propagation.
  LLVM_DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n");
  LLVMContext &Ctx = F.getContext();
  MDBuilder MDB(Ctx);
  for (auto &BI : F) {
    BasicBlock *BB = &BI;

    if (BlockWeights[BB]) {
      for (auto &I : BB->getInstList()) {
        if (!isa<CallInst>(I) && !isa<InvokeInst>(I))
          continue;
        if (!cast<CallBase>(I).getCalledFunction()) {
          const DebugLoc &DLoc = I.getDebugLoc();
          if (!DLoc)
            continue;
          const DILocation *DIL = DLoc;
          uint32_t LineOffset = FunctionSamples::getOffset(DIL);
          uint32_t Discriminator = DIL->getBaseDiscriminator();

          const FunctionSamples *FS = findFunctionSamples(I);
          if (!FS)
            continue;
          auto T = FS->findCallTargetMapAt(LineOffset, Discriminator);
          if (!T || T.get().empty())
            continue;
          SmallVector<InstrProfValueData, 2> SortedCallTargets =
              GetSortedValueDataFromCallTargets(T.get());
          uint64_t Sum;
          findIndirectCallFunctionSamples(I, Sum);
          annotateValueSite(*I.getParent()->getParent()->getParent(), I,
                            SortedCallTargets, Sum, IPVK_IndirectCallTarget,
                            SortedCallTargets.size());
        } else if (!isa<IntrinsicInst>(&I)) {
          I.setMetadata(LLVMContext::MD_prof,
                        MDB.createBranchWeights(
                            {static_cast<uint32_t>(BlockWeights[BB])}));
        }
      }
    }
    Instruction *TI = BB->getTerminator();
    if (TI->getNumSuccessors() == 1)
      continue;
    if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
      continue;

    DebugLoc BranchLoc = TI->getDebugLoc();
    LLVM_DEBUG(dbgs() << "\nGetting weights for branch at line "
                      << ((BranchLoc) ? Twine(BranchLoc.getLine())
                                      : Twine("<UNKNOWN LOCATION>"))
                      << ".\n");
    SmallVector<uint32_t, 4> Weights;
    uint32_t MaxWeight = 0;
    Instruction *MaxDestInst;
    for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) {
      BasicBlock *Succ = TI->getSuccessor(I);
      Edge E = std::make_pair(BB, Succ);
      uint64_t Weight = EdgeWeights[E];
      LLVM_DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E));
      // Use uint32_t saturated arithmetic to adjust the incoming weights,
      // if needed. Sample counts in profiles are 64-bit unsigned values,
      // but internally branch weights are expressed as 32-bit values.
      if (Weight > std::numeric_limits<uint32_t>::max()) {
        LLVM_DEBUG(dbgs() << " (saturated due to uint32_t overflow)");
        Weight = std::numeric_limits<uint32_t>::max();
      }
      // Weight is added by one to avoid propagation errors introduced by
      // 0 weights.
      Weights.push_back(static_cast<uint32_t>(Weight + 1));
      if (Weight != 0) {
        if (Weight > MaxWeight) {
          MaxWeight = Weight;
          MaxDestInst = Succ->getFirstNonPHIOrDbgOrLifetime();
        }
      }
    }

    misexpect::verifyMisExpect(TI, Weights, TI->getContext());

    uint64_t TempWeight;
    // Only set weights if there is at least one non-zero weight.
    // In any other case, let the analyzer set weights.
    // Do not set weights if the weights are present. In ThinLTO, the profile
    // annotation is done twice. If the first annotation already set the
    // weights, the second pass does not need to set it.
    if (MaxWeight > 0 && !TI->extractProfTotalWeight(TempWeight)) {
      LLVM_DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n");
      TI->setMetadata(LLVMContext::MD_prof,
                      MDB.createBranchWeights(Weights));
      ORE->emit([&]() {
        return OptimizationRemark(DEBUG_TYPE, "PopularDest", MaxDestInst)
               << "most popular destination for conditional branches at "
               << ore::NV("CondBranchesLoc", BranchLoc);
      });
    } else {
      LLVM_DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n");
    }
  }
}

/// Get the line number for the function header.
///
/// This looks up function \p F in the current compilation unit and
/// retrieves the line number where the function is defined. This is
/// line 0 for all the samples read from the profile file. Every line
/// number is relative to this line.
///
/// \param F  Function object to query.
///
/// \returns the line number where \p F is defined. If it returns 0,
///          it means that there is no debug information available for \p F.
unsigned SampleProfileLoader::getFunctionLoc(Function &F) {
  if (DISubprogram *S = F.getSubprogram())
    return S->getLine();

  if (NoWarnSampleUnused)
    return 0;

  // If the start of \p F is missing, emit a diagnostic to inform the user
  // about the missed opportunity.
  F.getContext().diagnose(DiagnosticInfoSampleProfile(
      "No debug information found in function " + F.getName() +
          ": Function profile not used",
      DS_Warning));
  return 0;
}

void SampleProfileLoader::computeDominanceAndLoopInfo(Function &F) {
  DT.reset(new DominatorTree);
  DT->recalculate(F);

  PDT.reset(new PostDominatorTree(F));

  LI.reset(new LoopInfo);
  LI->analyze(*DT);
}

/// Generate branch weight metadata for all branches in \p F.
///
/// Branch weights are computed out of instruction samples using a
/// propagation heuristic. Propagation proceeds in 3 phases:
///
/// 1- Assignment of block weights. All the basic blocks in the function
///    are initial assigned the same weight as their most frequently
///    executed instruction.
///
/// 2- Creation of equivalence classes. Since samples may be missing from
///    blocks, we can fill in the gaps by setting the weights of all the
///    blocks in the same equivalence class to the same weight. To compute
///    the concept of equivalence, we use dominance and loop information.
///    Two blocks B1 and B2 are in the same equivalence class if B1
///    dominates B2, B2 post-dominates B1 and both are in the same loop.
///
/// 3- Propagation of block weights into edges. This uses a simple
///    propagation heuristic. The following rules are applied to every
///    block BB in the CFG:
///
///    - If BB has a single predecessor/successor, then the weight
///      of that edge is the weight of the block.
///
///    - If all the edges are known except one, and the weight of the
///      block is already known, the weight of the unknown edge will
///      be the weight of the block minus the sum of all the known
///      edges. If the sum of all the known edges is larger than BB's weight,
///      we set the unknown edge weight to zero.
///
///    - If there is a self-referential edge, and the weight of the block is
///      known, the weight for that edge is set to the weight of the block
///      minus the weight of the other incoming edges to that block (if
///      known).
///
/// Since this propagation is not guaranteed to finalize for every CFG, we
/// only allow it to proceed for a limited number of iterations (controlled
/// by -sample-profile-max-propagate-iterations).
///
/// FIXME: Try to replace this propagation heuristic with a scheme
/// that is guaranteed to finalize. A work-list approach similar to
/// the standard value propagation algorithm used by SSA-CCP might
/// work here.
///
/// Once all the branch weights are computed, we emit the MD_prof
/// metadata on BB using the computed values for each of its branches.
///
/// \param F The function to query.
///
/// \returns true if \p F was modified. Returns false, otherwise.
bool SampleProfileLoader::emitAnnotations(Function &F) {
  bool Changed = false;

  if (getFunctionLoc(F) == 0)
    return false;

  LLVM_DEBUG(dbgs() << "Line number for the first instruction in "
                    << F.getName() << ": " << getFunctionLoc(F) << "\n");

  DenseSet<GlobalValue::GUID> InlinedGUIDs;
  Changed |= inlineHotFunctions(F, InlinedGUIDs);

  // Compute basic block weights.
  Changed |= computeBlockWeights(F);

  if (Changed) {
    // Add an entry count to the function using the samples gathered at the
    // function entry.
    // Sets the GUIDs that are inlined in the profiled binary. This is used
    // for ThinLink to make correct liveness analysis, and also make the IR
    // match the profiled binary before annotation.
    F.setEntryCount(
        ProfileCount(Samples->getHeadSamples() + 1, Function::PCT_Real),
        &InlinedGUIDs);

    // Compute dominance and loop info needed for propagation.
    computeDominanceAndLoopInfo(F);

    // Find equivalence classes.
    findEquivalenceClasses(F);

    // Propagate weights to all edges.
    propagateWeights(F);
  }

  // If coverage checking was requested, compute it now.
  if (SampleProfileRecordCoverage) {
    unsigned Used = CoverageTracker.countUsedRecords(Samples, PSI);
    unsigned Total = CoverageTracker.countBodyRecords(Samples, PSI);
    unsigned Coverage = CoverageTracker.computeCoverage(Used, Total);
    if (Coverage < SampleProfileRecordCoverage) {
      F.getContext().diagnose(DiagnosticInfoSampleProfile(
          F.getSubprogram()->getFilename(), getFunctionLoc(F),
          Twine(Used) + " of " + Twine(Total) + " available profile records (" +
              Twine(Coverage) + "%) were applied",
          DS_Warning));
    }
  }

  if (SampleProfileSampleCoverage) {
    uint64_t Used = CoverageTracker.getTotalUsedSamples();
    uint64_t Total = CoverageTracker.countBodySamples(Samples, PSI);
    unsigned Coverage = CoverageTracker.computeCoverage(Used, Total);
    if (Coverage < SampleProfileSampleCoverage) {
      F.getContext().diagnose(DiagnosticInfoSampleProfile(
          F.getSubprogram()->getFilename(), getFunctionLoc(F),
          Twine(Used) + " of " + Twine(Total) + " available profile samples (" +
              Twine(Coverage) + "%) were applied",
          DS_Warning));
    }
  }
  return Changed;
}

char SampleProfileLoaderLegacyPass::ID = 0;

INITIALIZE_PASS_BEGIN(SampleProfileLoaderLegacyPass, "sample-profile",
                      "Sample Profile loader", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
INITIALIZE_PASS_END(SampleProfileLoaderLegacyPass, "sample-profile",
                    "Sample Profile loader", false, false)

std::vector<Function *>
SampleProfileLoader::buildFunctionOrder(Module &M, CallGraph *CG) {
  std::vector<Function *> FunctionOrderList;
  FunctionOrderList.reserve(M.size());

  if (!ProfileTopDownLoad || CG == nullptr) {
    if (ProfileMergeInlinee) {
      // Disable ProfileMergeInlinee if profile is not loaded in top down order,
      // because the profile for a function may be used for the profile
      // annotation of its outline copy before the profile merging of its
      // non-inlined inline instances, and that is not the way how
      // ProfileMergeInlinee is supposed to work.
      ProfileMergeInlinee = false;
    }

    for (Function &F : M)
      if (!F.isDeclaration() && F.hasFnAttribute("use-sample-profile"))
        FunctionOrderList.push_back(&F);
    return FunctionOrderList;
  }

  assert(&CG->getModule() == &M);
  scc_iterator<CallGraph *> CGI = scc_begin(CG);
  while (!CGI.isAtEnd()) {
    for (CallGraphNode *node : *CGI) {
      auto F = node->getFunction();
      if (F && !F->isDeclaration() && F->hasFnAttribute("use-sample-profile"))
        FunctionOrderList.push_back(F);
    }
    ++CGI;
  }

  std::reverse(FunctionOrderList.begin(), FunctionOrderList.end());
  return FunctionOrderList;
}

bool SampleProfileLoader::doInitialization(Module &M,
                                           FunctionAnalysisManager *FAM) {
  auto &Ctx = M.getContext();

  auto ReaderOrErr =
      SampleProfileReader::create(Filename, Ctx, RemappingFilename);
  if (std::error_code EC = ReaderOrErr.getError()) {
    std::string Msg = "Could not open profile: " + EC.message();
    Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
    return false;
  }
  Reader = std::move(ReaderOrErr.get());
  Reader->collectFuncsFrom(M);
  ProfileIsValid = (Reader->read() == sampleprof_error::success);
  PSL = Reader->getProfileSymbolList();

  // While profile-sample-accurate is on, ignore symbol list.
  ProfAccForSymsInList =
      ProfileAccurateForSymsInList && PSL && !ProfileSampleAccurate;
  if (ProfAccForSymsInList) {
    NamesInProfile.clear();
    if (auto NameTable = Reader->getNameTable())
      NamesInProfile.insert(NameTable->begin(), NameTable->end());
  }

  if (FAM && !ProfileInlineReplayFile.empty()) {
    ExternalInlineAdvisor = std::make_unique<ReplayInlineAdvisor>(
        *FAM, Ctx, ProfileInlineReplayFile);
    if (!ExternalInlineAdvisor->areReplayRemarksLoaded())
      ExternalInlineAdvisor.reset();
  }

  return true;
}

ModulePass *llvm::createSampleProfileLoaderPass() {
  return new SampleProfileLoaderLegacyPass();
}

ModulePass *llvm::createSampleProfileLoaderPass(StringRef Name) {
  return new SampleProfileLoaderLegacyPass(Name);
}

bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM,
                                      ProfileSummaryInfo *_PSI, CallGraph *CG) {
  if (!ProfileIsValid)
    return false;
  GUIDToFuncNameMapper Mapper(M, *Reader, GUIDToFuncNameMap);

  PSI = _PSI;
  if (M.getProfileSummary(/* IsCS */ false) == nullptr) {
    M.setProfileSummary(Reader->getSummary().getMD(M.getContext()),
                        ProfileSummary::PSK_Sample);
    PSI->refresh();
  }
  // Compute the total number of samples collected in this profile.
  for (const auto &I : Reader->getProfiles())
    TotalCollectedSamples += I.second.getTotalSamples();

  auto Remapper = Reader->getRemapper();
  // Populate the symbol map.
  for (const auto &N_F : M.getValueSymbolTable()) {
    StringRef OrigName = N_F.getKey();
    Function *F = dyn_cast<Function>(N_F.getValue());
    if (F == nullptr)
      continue;
    SymbolMap[OrigName] = F;
    auto pos = OrigName.find('.');
    if (pos != StringRef::npos) {
      StringRef NewName = OrigName.substr(0, pos);
      auto r = SymbolMap.insert(std::make_pair(NewName, F));
      // Failiing to insert means there is already an entry in SymbolMap,
      // thus there are multiple functions that are mapped to the same
      // stripped name. In this case of name conflicting, set the value
      // to nullptr to avoid confusion.
      if (!r.second)
        r.first->second = nullptr;
      OrigName = NewName;
    }
    // Insert the remapped names into SymbolMap.
    if (Remapper) {
      if (auto MapName = Remapper->lookUpNameInProfile(OrigName)) {
        if (*MapName == OrigName)
          continue;
        SymbolMap.insert(std::make_pair(*MapName, F));
      }
    }
  }

  bool retval = false;
  for (auto F : buildFunctionOrder(M, CG)) {
    assert(!F->isDeclaration());
    clearFunctionData();
    retval |= runOnFunction(*F, AM);
  }

  // Account for cold calls not inlined....
  for (const std::pair<Function *, NotInlinedProfileInfo> &pair :
       notInlinedCallInfo)
    updateProfileCallee(pair.first, pair.second.entryCount);

  return retval;
}

bool SampleProfileLoaderLegacyPass::runOnModule(Module &M) {
  ACT = &getAnalysis<AssumptionCacheTracker>();
  TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>();
  TLIWP = &getAnalysis<TargetLibraryInfoWrapperPass>();
  ProfileSummaryInfo *PSI =
      &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
  return SampleLoader.runOnModule(M, nullptr, PSI, nullptr);
}

bool SampleProfileLoader::runOnFunction(Function &F, ModuleAnalysisManager *AM) {

  DILocation2SampleMap.clear();
  // By default the entry count is initialized to -1, which will be treated
  // conservatively by getEntryCount as the same as unknown (None). This is
  // to avoid newly added code to be treated as cold. If we have samples
  // this will be overwritten in emitAnnotations.
  uint64_t initialEntryCount = -1;

  ProfAccForSymsInList = ProfileAccurateForSymsInList && PSL;
  if (ProfileSampleAccurate || F.hasFnAttribute("profile-sample-accurate")) {
    // initialize all the function entry counts to 0. It means all the
    // functions without profile will be regarded as cold.
    initialEntryCount = 0;
    // profile-sample-accurate is a user assertion which has a higher precedence
    // than symbol list. When profile-sample-accurate is on, ignore symbol list.
    ProfAccForSymsInList = false;
  }

  // PSL -- profile symbol list include all the symbols in sampled binary.
  // If ProfileAccurateForSymsInList is enabled, PSL is used to treat
  // old functions without samples being cold, without having to worry
  // about new and hot functions being mistakenly treated as cold.
  if (ProfAccForSymsInList) {
    // Initialize the entry count to 0 for functions in the list.
    if (PSL->contains(F.getName()))
      initialEntryCount = 0;

    // Function in the symbol list but without sample will be regarded as
    // cold. To minimize the potential negative performance impact it could
    // have, we want to be a little conservative here saying if a function
    // shows up in the profile, no matter as outline function, inline instance
    // or call targets, treat the function as not being cold. This will handle
    // the cases such as most callsites of a function are inlined in sampled
    // binary but not inlined in current build (because of source code drift,
    // imprecise debug information, or the callsites are all cold individually
    // but not cold accumulatively...), so the outline function showing up as
    // cold in sampled binary will actually not be cold after current build.
    StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
    if (NamesInProfile.count(CanonName))
      initialEntryCount = -1;
  }

  F.setEntryCount(ProfileCount(initialEntryCount, Function::PCT_Real));
  std::unique_ptr<OptimizationRemarkEmitter> OwnedORE;
  if (AM) {
    auto &FAM =
        AM->getResult<FunctionAnalysisManagerModuleProxy>(*F.getParent())
            .getManager();
    ORE = &FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
  } else {
    OwnedORE = std::make_unique<OptimizationRemarkEmitter>(&F);
    ORE = OwnedORE.get();
  }
  Samples = Reader->getSamplesFor(F);
  if (Samples && !Samples->empty())
    return emitAnnotations(F);
  return false;
}

PreservedAnalyses SampleProfileLoaderPass::run(Module &M,
                                               ModuleAnalysisManager &AM) {
  FunctionAnalysisManager &FAM =
      AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();

  auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
    return FAM.getResult<AssumptionAnalysis>(F);
  };
  auto GetTTI = [&](Function &F) -> TargetTransformInfo & {
    return FAM.getResult<TargetIRAnalysis>(F);
  };
  auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & {
    return FAM.getResult<TargetLibraryAnalysis>(F);
  };

  SampleProfileLoader SampleLoader(
      ProfileFileName.empty() ? SampleProfileFile : ProfileFileName,
      ProfileRemappingFileName.empty() ? SampleProfileRemappingFile
                                       : ProfileRemappingFileName,
      IsThinLTOPreLink, GetAssumptionCache, GetTTI, GetTLI);

  if (!SampleLoader.doInitialization(M, &FAM))
    return PreservedAnalyses::all();

  ProfileSummaryInfo *PSI = &AM.getResult<ProfileSummaryAnalysis>(M);
  CallGraph &CG = AM.getResult<CallGraphAnalysis>(M);
  if (!SampleLoader.runOnModule(M, &AM, PSI, &CG))
    return PreservedAnalyses::all();

  return PreservedAnalyses::none();
}