// Long-form article — AI Field Guide for Foundation Leaders

const TOC_SECTIONS = [
  { id: 'intro',         num: '—',  label: 'A note to the reader' },
  { id: 'honest',        num: '01', label: 'Where we actually are' },
  { id: 'does-well',     num: '02', label: 'What AI does — and doesn\'t' },
  { id: 'sector',        num: '03', label: 'The foundation sector today' },
  { id: 'grantmaking',   num: '04', label: 'AI for grantmaking' },
  { id: 'management',    num: '05', label: 'AI for management' },
  { id: 'strategy',      num: '06', label: 'AI for philanthropic strategy' },
  { id: 'role',          num: '07', label: 'Rethinking the foundation\'s role' },
  { id: 'governance',    num: '08', label: 'Governance and risk' },
  { id: 'getting-started', num: '09', label: 'Getting started' },
];

function ArticleTOC({ activeId }) {
  const scrollTo = (id) => {
    const el = document.getElementById(id);
    if (el) el.scrollIntoView({ behavior: 'smooth', block: 'start' });
  };
  return (
    <nav className="q-art-toc" aria-label="Table of contents">
      <div className="q-art-toc-inner">
        <div className="q-art-toc-label">In this guide</div>
        <ol className="q-art-toc-list">
          {TOC_SECTIONS.map(s => (
            <li key={s.id} className={'q-art-toc-item' + (activeId === s.id ? ' is-active' : '')}>
              <a onClick={() => scrollTo(s.id)}>
                <span className="num">{s.num}</span>
                <span className="text">{s.label}</span>
              </a>
            </li>
          ))}
        </ol>
      </div>
    </nav>
  );
}

function ArticlePage({ setRoute }) {
  const [activeSection, setActiveSection] = React.useState(TOC_SECTIONS[0].id);

  React.useEffect(() => {
    const observer = new IntersectionObserver(
      (entries) => {
        entries.forEach(entry => {
          if (entry.isIntersecting) setActiveSection(entry.target.id);
        });
      },
      { rootMargin: '-15% 0px -75% 0px' }
    );
    TOC_SECTIONS.forEach(s => {
      const el = document.getElementById(s.id);
      if (el) observer.observe(el);
    });
    return () => observer.disconnect();
  }, []);

  return (
    <div data-screen-label="12 Article">

      {/* ── Header ── */}
      <section className="q-art-header">
        <SiteNav route="insights" setRoute={setRoute} theme="dark" />
        <div className="q-wrap">
          <div className="q-art-header-inner">
            <div className="q-art-crumb">
              <a onClick={() => setRoute('insights')} className="q-art-crumb-link">Insights</a>
              <span>·</span>
              <span>Field Guide</span>
              <span>·</span>
              <span>Foundations</span>
            </div>
            <h1 className="q-art-h1">
              AI Field Guide for Foundation Leaders —{' '}
              <em>CEO &amp; Executive Director Edition</em>
            </h1>
            <div className="q-art-meta">
              <div className="q-art-avatars-inline">
                <img className="q-art-avatar-sm" src="assets/team/jonas.png" alt="Jonas Clark" />
                <img className="q-art-avatar-sm" src="assets/team/amanda.jpg" alt="Amanda Rinderle" />
              </div>
              <span className="author">Jonas Clark &amp; Amanda Rinderle</span>
              <span className="sep">·</span>
              <span>May 2026</span>
              <span className="sep">·</span>
              <span>35 min read</span>
            </div>
          </div>
        </div>
      </section>

      {/* ── Body ── */}
      <article className="q-art-body">
        <div className="q-wrap">
          <div className="q-art-layout">
          <ArticleTOC activeId={activeSection} />
          <div className="q-art-content">

            <p className="q-art-lede">
              A practical guide for foundation CEOs and executive directors navigating AI adoption across grantmaking, operations, strategy, and mission delivery.
            </p>

            {/* ── Intro ── */}
            <div id="intro" className="q-art-section">
              <h2>A note to the foundation leader who already has an opinion about AI</h2>
              <p>
                You have probably already formed a view. Maybe you are using AI tools regularly and quietly frustrated that your organization is not moving as fast as you are. Maybe you are genuinely curious but skeptical that the tools are ready for the specific complexities of foundation work. Maybe you are leading a lean operation and wondering whether this is one more thing you are supposed to care about.
              </p>
              <p>
                Whatever your starting point, this guide is not going to try to convince you that AI is revolutionary. That argument has been made at sufficient volume. What we are going to do is give you an honest account of where the technology actually is, what it does well and poorly in foundation-specific contexts, and where the genuine opportunities are for organizations like yours.
              </p>
              <p>
                Foundation leaders are a different audience than most AI writing assumes. You do not need to be convinced that technology matters, and you do not need a primer on what a language model is. What you need is an honest, peer-level conversation about what AI means for the specific work of running a foundation: the grantmaking, the strategy, the relationship with grantees, and the increasingly unavoidable question of whether your organization's role in the sector needs to evolve.
              </p>
              <p>
                That is what we have tried to write. We have been honest about the limitations. We have been specific about foundation workflows rather than recycling generic AI advice. And we have tried to address the question that matters most for leaders at your level: not whether to engage with AI, but how to do it in a way that is genuinely worth your organization's time.
              </p>
            </div>

            {/* ── Part I label ── */}
            <div className="q-art-part-label">Part I — What's actually happening</div>

            {/* ── Section 01 ── */}
            <div id="honest" className="q-art-section">
              <div className="q-art-section-label">01</div>
              <h2>Where we actually are: an honest account</h2>
              <p>
                The foundation sector has watched several waves of technology-driven transformation promises come and go. So when AI arrived with the usual fanfare, the appropriate response was skepticism. The question worth asking now is whether this wave is different, and the honest answer is: in some ways that matter, yes.
              </p>
              <h3>The capability shifts that matter</h3>
              <p>
                In late 2022, ChatGPT launched publicly and crossed 100 million users in two months. For the first time, a non-technical person could sit down, open a browser, and have a substantive working conversation with an AI system — drafting a document, synthesizing a report, thinking through a problem. The barrier to access effectively disappeared overnight.
              </p>
              <p>
                2023 brought substantially more capable models from multiple developers, with better reasoning, longer context windows, and meaningfully improved reliability. By mid-2023, the question was no longer whether AI could be useful for knowledge work. It clearly could. The question shifted to how to integrate it responsibly.
              </p>
              <p>
                2024 introduced multimodal capability. A program officer could upload a stack of grantee reports and ask AI to synthesize themes across the portfolio. A development director at a community foundation could paste in donor meeting notes and ask for a relationship summary and suggested next steps. The practical surface area for foundation work expanded significantly.
              </p>
              <p>
                2025 brought the early stages of agentic AI — systems that do not just answer questions but take sequences of actions: monitoring databases, drafting communications, updating records, executing multi-step workflows without step-by-step human direction. This is the frontier. It carries both the most operational promise and the most significant governance complexity.
              </p>
              <p>
                2026 has largely been a story of pushing this frontier forward, with improvement in models, yes, but also via vastly improved model harnesses: the set of connectors, integrations, and context management tools that make models genuinely useful inside complex organizations.
              </p>
              <h3>Why this wave is different</h3>
              <p>
                The access barrier is genuinely gone. Your program officers, your grants manager, and your communications staff can use these tools today without any technical support. They probably already are, regardless of whether your organization has a policy.
              </p>
              <p>
                The capability improvement curve has been steep and sustained. The gap between what AI could do in 2022 and what it can do today is larger than the previous decade of AI development combined. The tools are meeting professional workflows where they are — you do not need to restructure your grantmaking process to get value from AI. You can start with what you are already doing.
              </p>
              <p>
                The gap between what AI can do in a demonstration and what it reliably does in organizational practice remains real. Managing that gap is most of what thoughtful adoption actually involves.
              </p>
            </div>

            {/* ── Section 02 ── */}
            <div id="does-well" className="q-art-section">
              <div className="q-art-section-label">02</div>
              <h2>What AI does well, and what it genuinely doesn't</h2>
              <p>
                Most AI content leads with the capabilities and buries the limitations. We are going to do it differently, because foundation leaders making real decisions need the full picture — and because the limitations are often more consequential for governance than the upsides.
              </p>
              <h3>What AI does well</h3>
              <p>
                <strong>Synthesis and summarization at volume.</strong> AI is exceptionally strong at processing large volumes of text and producing coherent summaries. Grant applications, grantee reports, board meeting records, research literature, 990 filings — AI can synthesize these faster and more consistently than a human reader. For foundations dealing with application volume that outstrips program staff capacity, this is probably the highest-value, lowest-risk starting point.
              </p>
              <p>
                <strong>First-pass drafting.</strong> AI substantially lowers the cost of a serviceable first draft. Grant review memos, board materials, grantee communications, impact reports — AI removes the blank-page friction and produces drafts that humans can edit rather than originate. The voice and judgment that makes those documents effective still comes from your staff. The drudgery of getting started does not.
              </p>
              <p>
                <strong>Research and intelligence gathering.</strong> AI can rapidly surface relevant information, synthesize sector research, and identify patterns across large data sets. For foundations that need to track policy developments, understand emerging issue areas, or benchmark grantmaking against sector trends, this is a genuine force multiplier.
              </p>
              <p>
                <strong>Consistency at scale.</strong> For tasks that need to be done repeatedly and to a consistent standard — reviewing applications against criteria, assessing grantee reports against grant objectives, checking documents against a rubric — AI performs with a consistency that humans, who get tired and variable, do not. This matters significantly in grant review contexts.
              </p>
              <h3>What AI genuinely does not do well</h3>
              <p>
                <strong>It fabricates with confidence.</strong> This is the most important limitation to internalize. AI systems regularly produce information that is factually wrong — specific statistics, citations, organization names, dates — and present it with the same confident tone as accurate information. This is not a bug that will be fully fixed; it is a feature of how these systems work. Any AI output containing specific factual claims requires human verification before it goes anywhere consequential.
              </p>
              <p>
                <strong>It cannot exercise judgment or carry accountability.</strong> AI can analyze a situation and generate perspectives. It cannot make decisions that carry moral weight, institutional accountability, or genuine stakes. Deciding whether a grantee's theory of change is credible, whether an organization's leadership is trustworthy, whether a grant is genuinely aligned with community values — these require human judgment. AI can inform that judgment. It cannot substitute for it.
              </p>
              <p>
                <strong>It does not know your foundation.</strong> Without significant effort to provide context, AI outputs will be generic at best and tone-deaf at worst. AI does not know your grantmaking history, your relationships with grantees, your board's risk tolerance, or the informal dynamics that shape every significant decision. Closing that gap requires ongoing investment in building the context that makes AI outputs genuinely useful for your specific organization.
              </p>

              <blockquote className="q-art-pull">
                "Your foundation remains accountable for every output that AI contributes to, whether or not humans reviewed it carefully. The governance question is not whether AI can do this — it is whether you have built the human oversight to ensure it is done right."
              </blockquote>
            </div>

            {/* ── Section 03 ── */}
            <div id="sector" className="q-art-section">
              <div className="q-art-section-label">03</div>
              <h2>The foundation sector: where things actually stand</h2>
              <p>
                General AI adoption data tells you something. What is happening in philanthropy specifically tells you more. The picture is uneven, and it is moving faster than most foundation leaders realize.
              </p>
              <p>
                Philanthropy has been slower than corporate and higher education sectors to adopt AI in any coordinated way. The reasons are not surprising: smaller staff sizes, risk aversion embedded in governance culture, genuine concern about bias in grantmaking, and the absence of competitive pressure that drives faster adoption elsewhere. Foundations do not lose market share by moving slowly on technology.
              </p>
              <p>
                But the picture is shifting. Larger foundations with significant program staff and data infrastructure are moving from individual experimentation to organizational pilots. A growing number of community foundations are actively exploring AI for prospect research and donor stewardship. And across the sector, staff are using consumer AI tools informally, regardless of whether their organizations have policies.
              </p>

              <div className="q-art-stat">
                <div className="q-art-stat-inner">
                  <div className="q-art-stat-num">Most</div>
                  <div className="q-art-stat-body">
                    <strong>foundations are earlier in this process than they appear from the outside — and earlier than they feel on the inside.</strong> The most consistent finding across conversations with foundation leaders right now is that individual usage is running well ahead of organizational strategy. Program officers are using Claude or ChatGPT to summarize applications. This is not a crisis, but it is a signal that the organizational conversation is overdue.
                  </div>
                </div>
              </div>

              <p>
                The foundations making the most meaningful progress share traits that have nothing to do with budget or staff size: leadership that is actively curious rather than delegating AI to IT, at least one staff member with both interest and some technical aptitude, and a culture that tolerates learning in public. None of those require significant resources. All of them require intentional leadership.
              </p>
            </div>

            {/* ── Part II label ── */}
            <div className="q-art-part-label">Part II — What it means for your foundation</div>

            {/* ── Section 04 ── */}
            <div id="grantmaking" className="q-art-section">
              <div className="q-art-section-label">04</div>
              <h2>AI for grantmaking operations</h2>
              <p>
                Grantmaking is where most foundation leaders will see the earliest and most tangible returns on AI investment. The volume of reading, synthesizing, and pattern-matching involved in grant review is substantial, and it is largely mechanical. AI does not replace program judgment. It removes the work that gets in the way of it.
              </p>

              <h3>Application screening and triage</h3>
              <p>
                Foundations routinely receive more applications than program staff can read carefully. The result is triage by gut, not analysis. AI handles the mechanical parts well: summarizing applications against stated criteria in standardized formats, flagging alignment gaps between stated mission and application content, surfacing strong applications that might otherwise get buried in volume, generating first-pass scoring across dimensions the foundation defines.
              </p>
              <p>
                What AI cannot read: organizational trust, community relationships, or the intangible qualities that make a grantee credible. Use AI to improve the quality of your reading, not to replace it.
              </p>

              <h3>Due diligence</h3>
              <p>
                Due diligence on grantees is time-consuming and inconsistent across program officers. AI can synthesize publicly available 990 data, audited financials, and news coverage; identify governance red flags or leadership instability; generate standardized due diligence memos from gathered inputs; and cross-reference grantee claims against public records.
              </p>
              <p>
                Where it falls short: AI will miss context that lives in relationships and local knowledge. Public data is often lagged or incomplete, and AI reflects those gaps. The due diligence memo is a starting point for a conversation, not a conclusion.
              </p>

              <h3>Portfolio monitoring and reporting</h3>
              <p>
                Grantee reports pile up. Reading them carefully is hard; extracting patterns across the portfolio is harder. AI can summarize progress reports against grant objectives, identify common themes and challenges across a portfolio, generate board-ready portfolio narratives from report data, and flag grants at risk based on reported indicators. The judgment about what to do with those signals still sits with your program team.
              </p>
            </div>

            {/* ── Section 05 ── */}
            <div id="management" className="q-art-section">
              <div className="q-art-section-label">05</div>
              <h2>AI for foundation management</h2>
              <p>
                Internal operations are often the lowest-hanging fruit for AI adoption. The work is high-volume, time-consuming, and largely rule-bound — exactly the conditions where AI performs well.
              </p>

              <h3>Board materials and governance</h3>
              <p>
                Board preparation is one of the most consistent time sinks for foundation staff. AI changes the economics of that work: drafting board meeting materials from staff inputs and prior records, synthesizing strategy documents into executive summaries, generating grant approval memos and consent agenda items, producing board-level portfolio updates from program staff reports. The preparation time drops. The quality of the prepared materials — and the conversation they enable — can go up.
              </p>

              <h3>Financial reporting and compliance</h3>
              <p>
                Financial reporting and compliance documentation is essential and time-consuming. AI reduces the grind without replacing the review — synthesizing financial data into narrative form for board and public reporting, assisting with 990 preparation, flagging compliance gaps or policy inconsistencies in grant documentation. Two important caveats: AI does not audit, and regulatory requirements change faster than AI training data.
              </p>

              <h3>Fundraising and donor relations (community foundations)</h3>
              <p>
                Community foundations operate a dual mandate: they are simultaneously grantmakers and fundraisers. AI has meaningful applications across both sides. For prospect research, it can synthesize publicly available wealth signals, philanthropic history, and community connections. For donor stewardship, it can draft personalized impact reports, gift acknowledgments, and cultivation materials at a scale that small fundraising teams could not previously sustain. For gift officer preparation, it can generate pre-meeting briefs that surface relevant donor interests and giving history.
              </p>
              <p>
                Where the relationship still lives: major gift relationships are built on trust and genuine connection. AI can prepare you for a conversation. It cannot have one for you.
              </p>
            </div>

            {/* ── Section 06 ── */}
            <div id="strategy" className="q-art-section">
              <div className="q-art-section-label">06</div>
              <h2>AI for philanthropic strategy</h2>
              <p>
                This is the area where AI is most underused and most misunderstood. Foundation leaders tend to think of AI as a tool for efficiency. It is also a tool for intelligence. The capacity to synthesize large volumes of public data, research, and sector reporting into actionable strategic insight is genuinely new.
              </p>

              <h3>Trend spotting and sector intelligence</h3>
              <p>
                Most foundation leaders are operating with incomplete sector pictures — not because the information does not exist, but because there is too much of it to process with the staff time available. AI can monitor academic research, policy developments, and sector news across your focus areas; synthesize Giving USA data, 990 trends, and funder collaborative reports; surface emerging grantee needs before they become crisis-level; and map funding gaps by comparing your portfolio against sector need.
              </p>

              <blockquote className="q-art-pull q-art-pull--right">
                "Build a recurring AI workflow that synthesizes your sector's latest research into a monthly briefing. This replaces hours of staff reading time with a structured, actionable document."
              </blockquote>

              <h3>Funding AI and supporting grantee adoption</h3>
              <p>
                Every foundation will soon face a strategic question: do you fund AI, fund the adoption of AI, or stay neutral? This is not a technology question. It is a theory-of-change question.
              </p>
              <p>
                Some foundations are beginning to require or incentivize AI literacy among grantees as a condition of operational capacity grants. Others are funding AI equity initiatives — ensuring that grantees in under-resourced communities have access to the same tools as well-resourced peers. A growing number are adding AI capacity questions to their due diligence process.
              </p>
              <p>
                A caution: requiring AI adoption can be paternalistic if not handled thoughtfully. Foundations should offer resources and learning, not mandates, unless they are prepared to provide meaningful implementation support alongside the expectation.
              </p>
            </div>

            {/* ── Section 07 ── */}
            <div id="role" className="q-art-section">
              <div className="q-art-section-label">07</div>
              <h2>Rethinking the foundation's role</h2>
              <p>
                This is the most speculative section of this guide, and deliberately so. It is also, we think, the most important one for foundation leaders to sit with.
              </p>

              <h3>The pass-through problem</h3>
              <p>
                Most foundations know they are more than a check. They have sector knowledge, networks, peer relationships, and convening capacity that grantees value. But operationalizing that value — turning it into something grantees reliably receive — has always been expensive and inconsistent.
              </p>
              <p>
                The honest reason this has not changed is not will. It is cost structure. Providing genuine technical assistance at scale has historically required program staff time that most foundations do not have, producing something better than mediocre has required expertise they do not want to maintain internally, and doing it consistently has required infrastructure they could not justify building. So foundations gestured at capacity building, offered the occasional convening, and wrote the check.
              </p>
              <p>
                AI changes that calculus in a specific and meaningful way. The cost of synthesizing sector research, producing tailored materials, and maintaining consistent communication at scale has dropped to the point where a small program team with well-designed workflows can deliver genuine value to dozens of grantees simultaneously. That was not true three years ago. It is true now.
              </p>

              <h3>What this could look like</h3>
              <p>
                A small number of larger foundations are beginning to experiment with AI-assisted technical assistance, shared sector intelligence, and capacity-building workflows delivered at scale. They are early — most are in design and pilot stage rather than full deployment. The design space includes:
              </p>

              <div className="q-art-callout">
                <div className="q-art-callout-items">
                  <div className="q-art-callout-item">
                    <div className="n">→</div>
                    <div>
                      <strong>AI-generated grantee learning briefs</strong>
                      <p>Synthesized sector research tailored to each grantee's focus area, delivered as a funder benefit rather than an expectation.</p>
                    </div>
                  </div>
                  <div className="q-art-callout-item">
                    <div className="n">→</div>
                    <div>
                      <strong>Grant writing support</strong>
                      <p>AI-assisted narrative development for grantees applying to other funders, offered selectively where it would be genuinely useful rather than as a standard service.</p>
                    </div>
                  </div>
                  <div className="q-art-callout-item">
                    <div className="n">→</div>
                    <div>
                      <strong>Shared sector intelligence</strong>
                      <p>Publishing AI-synthesized briefings that grantees can access alongside the foundation, building a common information base rather than hoarding analytical capacity.</p>
                    </div>
                  </div>
                  <div className="q-art-callout-item">
                    <div className="n">→</div>
                    <div>
                      <strong>Explicit AI pilots</strong>
                      <p>Small grants for grantees to experiment with AI in their own operations, with foundation support for evaluation and peer learning.</p>
                    </div>
                  </div>
                </div>
              </div>

              <h3>The governance question</h3>
              <p>
                Moving from pass-through to active partner changes the power dynamic in the funder-grantee relationship, and that deserves honest examination before any foundation commits to this direction. The foundations best positioned to explore this are those with genuine sector depth, a track record of being trusted advisors rather than just funders, and the organizational discipline to offer support without creating dependency. That is not every foundation.
              </p>
              <p>
                For boards thinking through whether to pursue this: Does deeper operational involvement compromise your ability to evaluate grantees objectively? Are you equipped to be a reliable technical assistance provider, or are you setting grantees up to depend on something you cannot sustain? What does it mean for your identity as a funder if you become known as a capacity-building partner? These are not objections to the direction. They are the questions that responsible exploration requires.
              </p>
            </div>

            {/* ── Section 08 ── */}
            <div id="governance" className="q-art-section">
              <div className="q-art-section-label">08</div>
              <h2>Governance, risk, and responsible adoption</h2>
              <p>
                Foundation boards care about risk. AI introduces new categories of it: bias in grantmaking, data security, vendor dependency, and reputational exposure. Getting governance right is not optional.
              </p>

              <h3>Bias and fairness in AI-assisted grantmaking</h3>
              <p>
                AI models trained on historical data can perpetuate historical patterns. In grantmaking, this can mean AI that consistently surfaces established organizations over emerging ones, or that reads certain types of applications more favorably than others. Mitigation: be explicit about what criteria you are asking AI to apply, and review those criteria for embedded bias. Use AI to expand your review, not narrow it, before human readers make final decisions. Audit AI outputs periodically against your equity goals.
              </p>

              <h3>Data security and confidentiality</h3>
              <p>
                Grantee application data is often sensitive — applicant financials, strategic plans, and organizational details should not be pasted into consumer AI tools without understanding the data handling policies. Use enterprise AI tools with clear data retention and privacy policies. Establish a policy about what data categories can be used with which tools. Train staff on the difference between public AI tools and enterprise-grade deployments.
              </p>

              <h3>Audit trails and accountability</h3>
              <p>
                Good governance looks like this: document AI use in grant review processes so decisions can be reconstructed and explained. Maintain human sign-off requirements for all grant decisions regardless of AI involvement. Build periodic AI use reviews into your annual governance calendar.
              </p>

              <div className="q-art-aside">
                <div className="q-art-aside-label">Questions to ask before deploying any AI tool</div>
                <p>What data does this tool retain, and for how long? Is this tool subject to regular security audits? What happens to our workflow if this vendor changes its pricing or discontinues the product? Does this tool have an enterprise agreement option with appropriate data protection?</p>
              </div>
            </div>

            {/* ── Section 09 ── */}
            <div id="getting-started" className="q-art-section">
              <div className="q-art-section-label">09</div>
              <h2>Getting started</h2>
              <p>
                The most common mistake in AI adoption is trying to do too much at once. The second most common mistake is waiting until you have a perfect plan. The right approach is sequenced, low-risk pilots with clear success criteria.
              </p>

              <h3>Where to start</h3>
              <p>
                The most productive entry points depend on where your organization's pain is greatest. For most foundation leaders, that means choosing one of three starting places: board materials and governance reporting, grant application review, or grantee portfolio monitoring. All three are high-volume, time-consuming, and largely mechanical — which makes them well-suited to AI assistance with low risk to your core grantmaking judgment.
              </p>
              <p>
                Run your first AI workflow in parallel with your existing process rather than replacing it. Compare the outputs. Evaluate honestly. Only scale what demonstrably works.
              </p>

              <h3>Measuring success</h3>

              <div className="q-art-callout">
                <div className="q-art-callout-items">
                  <div className="q-art-callout-item">
                    <div className="n">01</div>
                    <div>
                      <strong>Efficiency metrics</strong>
                      <p>Staff time saved per grant cycle. Time from application close to decision. Volume of grantee reports reviewed per program officer.</p>
                    </div>
                  </div>
                  <div className="q-art-callout-item">
                    <div className="n">02</div>
                    <div>
                      <strong>Quality metrics</strong>
                      <p>Consistency of grant review criteria application across staff. Grantee experience of foundation responsiveness. Board confidence in portfolio reporting.</p>
                    </div>
                  </div>
                  <div className="q-art-callout-item">
                    <div className="n">03</div>
                    <div>
                      <strong>Strategic metrics</strong>
                      <p>New sector intelligence that informed funding decisions. Grantee capacity outcomes where foundation provided technical assistance.</p>
                    </div>
                  </div>
                </div>
              </div>

              <h3>What not to do</h3>
              <p>
                Do not automate your grantmaking without a robust human review layer. Do not use AI tools for sensitive applicant data without understanding the privacy policy. Do not announce an AI strategy to your board before you have tested anything. Do not let perfect be the enemy of started — a small, well-designed pilot teaches more than a large strategy document.
              </p>
            </div>

            {/* ── Closing ── */}
            <div className="q-art-section q-art-closing">
              <h3>A note on what this guide is not</h3>
              <p>
                This guide is a scaffold, not a finished document. It is designed to be iterated. The AI landscape is moving fast enough that specific tool recommendations would be outdated by the time this reaches your desk.
              </p>
              <p>
                What does not change: the judgment required to deploy these tools responsibly, the organizational work required to build staff confidence, and the strategic clarity required to use AI in service of mission rather than in place of it. Those are leadership questions. They were before AI, and they still are.
              </p>
            </div>

            {/* ── Author block ── */}
            <div className="q-art-authors">
              <div className="q-art-author-entry">
                <img className="q-art-avatar" src="assets/team/jonas.png" alt="Jonas Clark" />
                <div>
                  <div className="q-art-author-name">Jonas Clark</div>
                  <p>Jonas leads Quarterdeck's AI and Technology practice. He works with foundations, universities, and established businesses on AI adoption, data infrastructure, and the systems that hold complex organizations together.</p>
                </div>
              </div>
              <div className="q-art-author-entry">
                <img className="q-art-avatar" src="assets/team/amanda.jpg" alt="Amanda Rinderle" />
                <div>
                  <div className="q-art-author-name">Amanda Rinderle</div>
                  <p>Amanda is Managing Partner at Quarterdeck. She leads strategy and governance engagements with foundations and nonprofits, with a focus on helping organizations navigate complex decisions with the rigor and clarity they deserve.</p>
                </div>
              </div>
            </div>

          </div>{/* /.q-art-content */}
          </div>{/* /.q-art-layout */}
        </div>
      </article>

      {/* ── Footer nav ── */}
      <section className="q-art-footer-nav">
        <div className="q-wrap">
          <div className="q-art-footer-nav-inner">
            <a className="q-art-back" onClick={() => setRoute('insights')}>
              <svg width="16" height="16" viewBox="0 0 16 16" fill="none" aria-hidden="true"><path d="M14 8H2M6 4L2 8l4 4" stroke="currentColor" strokeWidth="1.4" strokeLinecap="round" strokeLinejoin="round"/></svg>
              Back to Insights
            </a>
            <div className="q-art-footer-cta">
              <div className="label">Questions about this? Start a conversation.</div>
              <a className="qbtn qbtn--primary" href="mailto:info@quarterdeck.io" style={{ marginTop: 12 }}>Email info@quarterdeck.io <Arrow /></a>
            </div>
          </div>
        </div>
      </section>

      <SiteFooter setRoute={setRoute} />
    </div>
  );
}

Object.assign(window, { ArticlePage });
