<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="billres.xsl"?>
<!DOCTYPE bill PUBLIC "-//US Congress//DTDs/bill.dtd//EN" "bill.dtd">
<bill bill-stage="Introduced-in-Senate" dms-id="A1" public-private="public" slc-id="S1-ALL23A15-XFH-7W-8LC"><metadata xmlns:dc="http://purl.org/dc/elements/1.1/">
<dublinCore>
<dc:title>118 S3312 IS: Artificial Intelligence Research, Innovation, and Accountability Act of 2023</dc:title>
<dc:publisher>U.S. Senate</dc:publisher>
<dc:date>2023-11-15</dc:date>
<dc:format>text/xml</dc:format>
<dc:language>EN</dc:language>
<dc:rights>Pursuant to Title 17 Section 105 of the United States Code, this file is not subject to copyright protection and is in the public domain.</dc:rights>
</dublinCore>
</metadata>
<form>
<distribution-code display="yes">II</distribution-code><congress>118th CONGRESS</congress><session>1st Session</session><legis-num>S. 3312</legis-num><current-chamber>IN THE SENATE OF THE UNITED STATES</current-chamber><action><action-date date="20231115">November 15, 2023</action-date><action-desc><sponsor name-id="S303">Mr. Thune</sponsor> (for himself, <cosponsor name-id="S311">Ms. Klobuchar</cosponsor>, <cosponsor name-id="S318">Mr. Wicker</cosponsor>, <cosponsor name-id="S408">Mr. Hickenlooper</cosponsor>, <cosponsor name-id="S409">Mr. Luján</cosponsor>, and <cosponsor name-id="S372">Mrs. Capito</cosponsor>) introduced the following bill; which was read twice and referred to the <committee-name committee-id="SSCM00">Committee on Commerce, Science, and Transportation</committee-name></action-desc></action><legis-type>A BILL</legis-type><official-title>To provide a framework for artificial intelligence innovation and accountability, and for other purposes.</official-title></form><legis-body><section id="S1" section-type="section-one"><enum>1.</enum><header>Short title</header><text display-inline="no-display-inline">This Act may be cited as the <quote><short-title>Artificial Intelligence Research, Innovation, and Accountability Act of 2023</short-title></quote>.</text></section><section id="id267c0c21a5bf4e9f9d86a40ab7643305"><enum>2.</enum><header>Table of contents</header><text display-inline="no-display-inline">The table of contents for this Act is as follows:</text><toc><toc-entry level="section" idref="S1">Sec. 1. Short title.</toc-entry><toc-entry level="section" idref="id267c0c21a5bf4e9f9d86a40ab7643305">Sec. 2. Table of contents.</toc-entry><toc-entry level="title" idref="idcbe44313f12947a1a25f2335a11bdd50">TITLE I—Artificial intelligence research and innovation</toc-entry><toc-entry level="section" idref="idb4ed39ac6c4b44499b9574354ddde13a">Sec. 101. Open data policy amendments.</toc-entry><toc-entry level="section" idref="idf42cea67bc50443db938998b13e6eedb">Sec. 102. Online content authenticity and provenance standards research and development.</toc-entry><toc-entry level="section" idref="id7994ad94064b4822a4c9ea652822ab3b">Sec. 103. Standards for detection of emergent and anomalous behavior and AI-generated media.</toc-entry><toc-entry level="section" idref="id6f8d12be32184fa190fcf9e957ac9733">Sec. 104. Comptroller General study on barriers and best practices to usage of AI in government.</toc-entry><toc-entry level="title" idref="id74d2e87ba3994ca8a53ff761e6a23bbb">TITLE II—Artificial intelligence accountability</toc-entry><toc-entry level="section" idref="ide953f7856e0c45bbaacfacad316b9e56">Sec. 201. Definitions.</toc-entry><toc-entry level="section" idref="id831e30897eed4d408a857be5a384edf0">Sec. 202. Generative artificial intelligence transparency.</toc-entry><toc-entry level="section" idref="id80074546495e4fa5b76faeb153dd11c1">Sec. 203. Transparency reports for high-impact artificial intelligence systems.</toc-entry><toc-entry level="section" idref="idd422be3f98794c4e8a679ed677bab61f">Sec. 204. Recommendations to Federal agencies for risk management of high-impact artificial intelligence systems.</toc-entry><toc-entry level="section" idref="id937581d398aa437589d2c564dc9fd97e">Sec. 205. Office of Management and Budget oversight of recommendations to agencies.</toc-entry><toc-entry level="section" idref="idc9c834caca524c31953d74e9215ee8d8">Sec. 206. Risk management assessment for critical-impact artificial intelligence systems.</toc-entry><toc-entry level="section" idref="idde181c44934440b38a92be72f1aa83bd">Sec. 207. Certification of critical-impact artificial intelligence systems.</toc-entry><toc-entry level="section" idref="id2622ebcc0f82420da66bc7f25df639cb">Sec. 208. Enforcement.</toc-entry><toc-entry level="section" idref="id3b889107277248ddb6e0a390c25daa07">Sec. 209. Artificial intelligence consumer education.</toc-entry></toc></section><title id="idcbe44313f12947a1a25f2335a11bdd50" style="OLC"><enum>I</enum><header>Artificial intelligence research and innovation</header><section id="idb4ed39ac6c4b44499b9574354ddde13a"><enum>101.</enum><header>Open data policy amendments</header><text display-inline="no-display-inline">Section 3502 of title 44, United States Code, is amended—</text><paragraph id="idbc7d6a0434ed4fc5b18d8dfe5815b1ba"><enum>(1)</enum><text>in paragraph (22)—</text><subparagraph commented="no" display-inline="no-display-inline" id="idb087ff0c96784e7c9910e69f4fa5401f"><enum>(A)</enum><text display-inline="yes-display-inline">by inserting <quote>or data model</quote> after <quote>a data asset</quote>; and</text></subparagraph><subparagraph commented="no" display-inline="no-display-inline" id="id5920d13f45364e8a8b38eb608782f9e9"><enum>(B)</enum><text display-inline="yes-display-inline">by striking <quote>and</quote> at the end;</text></subparagraph></paragraph><paragraph commented="no" display-inline="no-display-inline" id="ida35bf1063bac4943a7d607309baf4903"><enum>(2)</enum><text display-inline="yes-display-inline">in paragraph (23), by striking the period at the end and inserting a semicolon; and</text></paragraph><paragraph id="id6cf571a5e03d4e4b95ee7631f7db9bbe"><enum>(3)</enum><text>by adding at the end the following:</text><quoted-block style="OLC" display-inline="no-display-inline" id="ide8530f1b1a514641bf859b5104b1e2c6"><paragraph id="id00a563e9c84346c8bebd2752965075aa"><enum>(24)</enum><text>the term <term>data model</term> means a mathematical, economic, or statistical representation of a system or process used to assist in making calculations and predictions, including through the use of algorithms, computer programs, or artificial intelligence systems; and</text></paragraph><paragraph id="id27c1209586fd4c849489c16586db85de"><enum>(25)</enum><text>the term <term>artificial intelligence system</term> means an engineered system that—</text><subparagraph id="ide53ad6028aba4feea88d610f7ea81735"><enum>(A)</enum><text>generates outputs, such as content, predictions, recommendations, or decisions for a given set of objectives; and</text></subparagraph><subparagraph id="idf686a708c0c443699e87b9915b7c87af"><enum>(B)</enum><text>is designed to operate with varying levels of adaptability and autonomy using machine and human-based inputs.</text></subparagraph></paragraph><after-quoted-block>.</after-quoted-block></quoted-block></paragraph></section><section id="idf42cea67bc50443db938998b13e6eedb"><enum>102.</enum><header>Online content authenticity and provenance standards research and development</header><subsection id="id7663da718c3643bb8c6c80d05309970b"><enum>(a)</enum><header>Research</header><paragraph commented="no" display-inline="no-display-inline" id="id0dda387bf0fd4a8b92bc79479e23b310"><enum>(1)</enum><header>In general</header><text display-inline="yes-display-inline">Not later than 180 days after the date of the enactment of this Act, the Under Secretary of Commerce for Standards and Technology shall carry out research to facilitate the development and standardization of means to provide authenticity and provenance information for content generated by human authors and artificial intelligence systems.</text></paragraph><paragraph commented="no" display-inline="no-display-inline" id="id3ba0e60c9c34474b8f930e792025cf6b"><enum>(2)</enum><header>Elements</header><text display-inline="yes-display-inline">The research carried out pursuant to paragraph (1) shall cover the following:</text><subparagraph id="id6e062aabdfbf464c90f921c35e952568"><enum>(A)</enum><text>Secure and binding methods for human authors of content to append statements of provenance through the use of unique credentials, watermarking, or other data or metadata-based approaches.</text></subparagraph><subparagraph id="id2e2f0afaf11b42afb028b072479d4cbf"><enum>(B)</enum><text>Methods for the verification of statements of content provenance to ensure authenticity such as watermarking or classifiers, which are trained models that distinguish artificial intelligence-generated media.</text></subparagraph><subparagraph id="id50f1651c6ddf4cd0bca7254cf59bef61"><enum>(C)</enum><text>Methods for displaying clear and conspicuous statements of content provenance to the end user. </text></subparagraph><subparagraph id="id83fc1490848d4c229c3f2e3965cbb76d"><enum>(D)</enum><text>Technologies or applications needed to facilitate the creation and verification of content provenance information.</text></subparagraph><subparagraph id="iddb418da71e614a738edfbd8d5e1331dd"><enum>(E)</enum><text>Mechanisms to ensure that any technologies and methods developed under this section are minimally burdensome on content producers.</text></subparagraph><subparagraph id="id3ed5cf3dbe284b48bba52c9fcd38ceaf"><enum>(F)</enum><text>Such other related processes, technologies, or applications as the Under Secretary considers appropriate.</text></subparagraph><subparagraph id="idf9015820bba148739b6da63ea868e648"><enum>(G)</enum><text>Use of provenance technology to enable attribution for content creators. </text></subparagraph></paragraph><paragraph id="id7168c18cba4d4969b00aba2308cd215d"><enum>(3)</enum><header>Implementation</header><text>The Under Secretary shall carry out the research required by paragraph (1) as part of the research directives pursuant to section 22A(b)(1) of the National Institute of Standards and Technology Act (<external-xref legal-doc="usc" parsable-cite="usc/15/278h-1">15 U.S.C. 278h–1(b)(1)</external-xref>).</text></paragraph></subsection><subsection id="id88a7ea1362a44f43b5052e8548f0eef8"><enum>(b)</enum><header>Development of standards</header><paragraph commented="no" display-inline="no-display-inline" id="id6aabddf8bccb48a9b6ccb598a88b8e7a"><enum>(1)</enum><header>In general</header><text display-inline="yes-display-inline">For methodologies and applications related to content provenance and authenticity deemed by the Under Secretary to be at a readiness level sufficient for standardization, the Under Secretary shall provide technical review and assistance to such other Federal agencies and nongovernmental standards organizations as the Under Secretary considers appropriate.</text></paragraph><paragraph id="idff33968cb3474dabbc6f22fb91106aa8" commented="no"><enum>(2)</enum><header>Considerations</header><text>In providing any technical review and assistance related to the development of content provenance and authenticity standards under this subsection, the Under Secretary may—</text><subparagraph id="idd13a7c856aa14bb1a7981fb9a022688e" commented="no"><enum>(A)</enum><text>consider whether a proposed standard is reasonable, practicable, and appropriate for the particular type of media and media environment for which the standard is proposed;</text></subparagraph><subparagraph id="idd1d3683cd192496ca7253960b3a4c2c0" commented="no"><enum>(B)</enum><text>consult with relevant stakeholders; and</text></subparagraph><subparagraph commented="no" display-inline="no-display-inline" id="id0f9d933b581346468e09344d2026ae90"><enum>(C)</enum><text display-inline="yes-display-inline">review industry standards issued by nongovernmental standards organizations.</text></subparagraph></paragraph></subsection><subsection id="id85d5a17fc8204018ae0993f4b8f819c7"><enum>(c)</enum><header>Pilot program</header><paragraph id="idb4e7c67ac5774a058c930feeedafb527"><enum>(1)</enum><header>In general</header><text>The Under Secretary shall carry out a pilot program to assess the feasibility and advisability of using available technologies and creating open standards to facilitate the creation and verification of content governance information for digital content.</text></paragraph><paragraph commented="no" display-inline="no-display-inline" id="id9d9b6491f99a4819b22f6f0e190dbcfa"><enum>(2)</enum><header>Locations</header><text>The pilot program required by paragraph (1) shall be carried out at not more than 2 Federal agencies the Under Secretary shall select for purposes of the pilot program required by paragraph (1).</text></paragraph><paragraph id="id9df5bdfb2d27446f8ab87a1a43486d20"><enum>(3)</enum><header>Requirements</header><text>In carrying out the pilot program required by paragraph (1), the Under Secretary shall—</text><subparagraph id="id86d012a30195449d8ecb4d9dccc0e4db"><enum>(A)</enum><text>apply and evaluate methods for authenticating the origin of and modifications to government-produced digital content using technology and open standards described in paragraph (1); and</text></subparagraph><subparagraph id="id59ca6ab0a01e4b678257c9f2b8bb86d7"><enum>(B)</enum><text>make available to the public digital content embedded with provenance or other authentication provided by the heads of the Federal agencies selected pursuant to paragraph (2) for the purposes of the pilot program. </text></subparagraph></paragraph><paragraph id="id6c9f29b5b4ff43f6accbd360c7eff623"><enum>(4)</enum><header>Briefing required</header><text>Not later than 1 year after the date of the enactment of this Act, and annually thereafter until the date described in paragraph (5), the Under Secretary shall brief the Committee on Commerce, Science, and Transportation of the Senate and the Committee on Science, Space, and Technology of the House of Representatives on the findings of the Under Secretary with respect to the pilot program carried out under this subsection.</text></paragraph><paragraph id="idA1D2D210AB40426A8A540D3EAFAD5A53" commented="no" display-inline="no-display-inline"><enum>(5)</enum><header>Termination</header><text>The pilot program shall terminate on the date that is 10 years after the date of the enactment of this Act.</text></paragraph></subsection><subsection id="id36b71fe5404a414d9bb6767a2e4f07f8" commented="no" display-inline="no-display-inline"><enum>(d)</enum><header>Report to Congress</header><text display-inline="yes-display-inline">Not later than 1 year after the date of the enactment of this Act, the Under Secretary shall submit to the Committee on Commerce, Science, and Transportation of the Senate and the Committee on Science, Space, and Technology of the House of Representatives a report outlining the progress of standardization initiatives relating to requirements under this section, as well as recommendations for legislative or administrative action to encourage or require the widespread adoption of such initiatives in the United States.</text></subsection></section><section id="id7994ad94064b4822a4c9ea652822ab3b"><enum>103.</enum><header>Standards for detection of emergent and anomalous behavior and AI-generated media</header><text display-inline="no-display-inline">Section 22A(b)(1) of the National Institute of Standards and Technology Act (<external-xref legal-doc="usc" parsable-cite="usc/15/278h-1">15 U.S.C. 278h–1(b)(1)</external-xref>) is amended—</text><paragraph id="id739e5ea49ee242feb6d09588ea972134"><enum>(1)</enum><text>by redesignating subparagraph (I) as subparagraph (K);</text></paragraph><paragraph id="idaf61cd716dc54b1fb13adfcbd9779090"><enum>(2)</enum><text>in subparagraph (H), by striking <quote>; and</quote> and inserting a semicolon; and</text></paragraph><paragraph id="id006dd2f35b81415db2d0eff8a7bc61df"><enum>(3)</enum><text>by inserting after subparagraph (H) the following:</text><quoted-block id="id2A0178A8CC1941BEA47B185440ECE219" display-inline="no-display-inline" style="OLC"><subparagraph id="id866bc749bfd24633b38e556ed2368129"><enum>(I)</enum><text>best practices for detecting outputs generated by artificial intelligence systems, including content such as text, audio, images, and videos;</text></subparagraph><subparagraph id="id282f6d303a4c41a28f66e534abd719df" commented="no" display-inline="no-display-inline"><enum>(J)</enum><text>methods to detect and understand anomalous behavior of artificial intelligence systems and safeguards to mitigate potentially adversarial or compromising anomalous behavior; and</text></subparagraph><after-quoted-block>.</after-quoted-block></quoted-block></paragraph></section><section id="id6f8d12be32184fa190fcf9e957ac9733"><enum>104.</enum><header>Comptroller General study on barriers and best practices to usage of AI in government</header><subsection id="id6f354e40a1124c3f986f1f783c5a7305"><enum>(a)</enum><header>In general</header><text>Not later than 1 year after the date of enactment of this Act, the Comptroller General of the United States shall—</text><paragraph id="idaf07bef461fd411684b351ca6bca8755"><enum>(1)</enum><text>conduct a review of statutory, regulatory, and other policy barriers to the use of artificial intelligence systems to improve the functionality of the Federal Government; and </text></paragraph><paragraph id="id9674a9b7b7db4781a99ae39309c9c1c1"><enum>(2)</enum><text>identify best practices for the adoption and use of artificial intelligence systems by the Federal Government, including—</text><subparagraph id="id3877cf2c0d0645b38cd42c97a5707be8"><enum>(A)</enum><text>ensuring that an artificial intelligence system is proportional to the need of the Federal Government;</text></subparagraph><subparagraph id="id9def724027de4920884a67429b0aadf6"><enum>(B)</enum><text>restrictions on access to and use of an artificial intelligence system based on the capabilities and risks of the artificial intelligence system; and</text></subparagraph><subparagraph id="id127e4833f0194d569c018d1f3d28fe35"><enum>(C)</enum><text>safety measures that ensure that an artificial intelligence system is appropriately limited to necessary data and compartmentalized from other assets of the Federal Government.</text></subparagraph></paragraph></subsection><subsection id="ida3e4de6670a8453fb64b18f8cc53b90f"><enum>(b)</enum><header>Report</header><text>Not later than 2 years after the date of enactment of this Act, the Comptroller General of the United States shall submit to the Committee on Commerce, Science, and Transportation of the Senate and the Committee on Science, Space, and Technology of the House of Representatives a report that—</text><paragraph commented="no" display-inline="no-display-inline" id="id8e4d286907a0426ebe2769af15076156"><enum>(1)</enum><text display-inline="yes-display-inline">summarizes the results of the review conducted under subsection (a)(1) and the best practices identified under subsection (a)(2), including recommendations, as the Comptroller General of the United States considers appropriate;</text></paragraph><paragraph id="iddfd49d9eff5e45dcb63e530abea1d327"><enum>(2)</enum><text>describes any laws, regulations, guidance documents, or other policies that may prevent the adoption of artificial intelligence systems by the Federal Government to improve certain functions of the Federal Government, including—</text><subparagraph id="idd948e43b8d1a41b7b8cbe55d6833ee95"><enum>(A)</enum><text>data analysis and processing;</text></subparagraph><subparagraph id="id87e9bb33fc1143be954abbbf24f37dd8"><enum>(B)</enum><text>paperwork reduction;</text></subparagraph><subparagraph id="idbce5e5faf07e4a64a3662ea67142ae2a"><enum>(C)</enum><text>contracting and procurement practices; and</text></subparagraph><subparagraph id="id1b8c27b47c1d4f88b4eb5422e5e77822"><enum>(D)</enum><text>other Federal Government services; and</text></subparagraph></paragraph><paragraph id="id3fee4c2c0f2b423fbe14e8bb23b6dafc"><enum>(3)</enum><text>includes, as the Comptroller General of the United States considers appropriate, recommendations to modify or eliminate barriers to the use of artificial intelligence systems by the Federal Government.</text></paragraph></subsection></section></title><title id="id74d2e87ba3994ca8a53ff761e6a23bbb" style="OLC"><enum>II</enum><header>Artificial intelligence accountability</header><section id="ide953f7856e0c45bbaacfacad316b9e56"><enum>201.</enum><header>Definitions</header><text display-inline="no-display-inline">In this title:</text><paragraph id="id8392e288d0154c7ca933f88d1d21af82"><enum>(1)</enum><header>Appropriate congressional committees</header><text>The term <term>appropriate congressional committees</term> means—</text><subparagraph id="id8d56c253e7e342b5abba6db8532da8aa"><enum>(A)</enum><text>the Committee on Energy and Natural Resources and the Committee on Commerce, Science, and Transportation of the Senate;</text></subparagraph><subparagraph id="id0028218338f14625b3bf49304dfddd4b"><enum>(B)</enum><text>the Committee on Energy and Commerce of the House of Representatives; and</text></subparagraph><subparagraph id="id0a2a11f941124c709b4b11bae8b199c3" commented="no" display-inline="no-display-inline"><enum>(C)</enum><text>each congressional committee with jurisdiction over an applicable covered agency. </text></subparagraph></paragraph><paragraph id="id58bf3983691f4dccb8a3febc48600ce6"><enum>(2)</enum><header>Artificial intelligence system</header><text>The term <term>artificial intelligence system</term> means an engineered system that—</text><subparagraph id="idc3766f1f4f08473ba9251a8cb514ab00"><enum>(A)</enum><text>generates outputs, such as content, predictions, recommendations, or decisions for a given set of human-defined objectives; and</text></subparagraph><subparagraph id="id15ff03f17bca409b829ee34a5f7c942a"><enum>(B)</enum><text>is designed to operate with varying levels of adaptability and autonomy using machine and human-based inputs.</text></subparagraph></paragraph><paragraph id="idccffebdcaa6248be920aa9fc5092700f"><enum>(3)</enum><header>Covered agency</header><text>the term <term>covered agency</term> means an agency for which the Under Secretary develops an NIST recommendation. </text></paragraph><paragraph id="idABBC7249AD38426BADB7162DE87886ED"><enum>(4)</enum><header>Covered internet platform</header><subparagraph commented="no" display-inline="no-display-inline" id="id6C30B7D7A3C14EE693F210E714673ADC"><enum>(A)</enum><header>In general</header><text display-inline="yes-display-inline">The term <term>covered internet platform</term>—</text><clause id="ida235fe79bbcc4d96901fb5238c788b82"><enum>(i)</enum><text>means any public-facing website, consumer-facing internet application, or mobile application available to consumers in the United States; and</text></clause><clause commented="no" display-inline="no-display-inline" id="id766BDE142ED847448218838C4788100C"><enum>(ii)</enum><text display-inline="yes-display-inline">includes a social network site, video sharing service, search engine, and content aggregation service.</text></clause></subparagraph><subparagraph id="id9D956A4E95E94B1DBA5203A752BBF66F"><enum>(B)</enum><header>Exclusions</header><text>The term <term>covered internet platform</term> does not include a platform that—</text><clause id="idDCF43C84F9EE46969924445B1D93818D"><enum>(i)</enum><text>is wholly owned, controlled, and operated by a person that—</text><subclause id="id77B632A6A994491BB8265C047DD47EBF"><enum>(I)</enum><text>during the most recent 180-day period, did not employ more than 500 employees;</text></subclause><subclause id="id0FF13271AA464268B7E81F8CA01C2807"><enum>(II)</enum><text>during the most recent 3-year period, averaged less than $50,000,000 in annual gross receipts; and</text></subclause><subclause id="idD50907399A854B1187A4C333ADC53FF4"><enum>(III)</enum><text>on an annual basis, collects or processes the personal data of less than 1,000,000 individuals; or</text></subclause></clause><clause id="id0511D33015544535932970DB6D185ED7" commented="no" display-inline="no-display-inline"><enum>(ii)</enum><text>is operated for the sole purpose of conducting research that is not directly or indirectly made for profit.</text></clause></subparagraph></paragraph><paragraph id="id641bcdc0764644829d1f549123124add"><enum>(5)</enum><header>Critical-impact AI organization</header><text>The term <term>critical-impact AI organization</term> means a non-government organization that serves as the deployer of a critical-impact artificial intelligence system. </text></paragraph><paragraph id="id151a1c2f9b584828b9fee94863cd7276"><enum>(6)</enum><header>Critical-impact artificial intelligence system</header><text>The term <term>critical-impact artificial intelligence system</term> means an artificial intelligence system that—</text><subparagraph id="idab9549ca05ba44ef9f350645d85a8eac"><enum>(A)</enum><text>is deployed for a purpose other than solely for use by the Department of Defense or an intelligence agency (as defined in section 3094(e) of the National Security Act of 1947 (<external-xref legal-doc="usc" parsable-cite="usc/50/3094">50 U.S.C. 3094(3)</external-xref>)); and </text></subparagraph><subparagraph id="idd336dd8937b64fb29e888c51151965f2"><enum>(B)</enum><text>is used or intended to be used—</text><clause id="idd43723d9b570415199a93359f127ceb4"><enum>(i)</enum><text>to make decisions that have a legal or similarly significant effect on—</text><subclause id="id4cfe8e40a27a4e85a4a89ff626d0a750"><enum>(I)</enum><text>the real-time or ex post facto collection of biometric data of natural persons by biometric identification systems without their consent;</text></subclause><subclause id="id9625298199394d09b840dd1715b9801a"><enum>(II)</enum><text>the direct management and operation of critical infrastructure (as defined in section 1016(e) of the USA PATRIOT Act (<external-xref legal-doc="usc" parsable-cite="usc/42/5195c">42 U.S.C. 5195c(e)</external-xref>)) and space-based infrastructure; or </text></subclause><subclause id="idae2fdc2bf2bd43feb615d302bd7f4914"><enum>(III)</enum><text>criminal justice (as defined in section 901 of title I of the Omnibus Crime Control and Safe Streets Act of 1968 (<external-xref legal-doc="usc" parsable-cite="usc/34/10251">34 U.S.C. 10251</external-xref>)); and</text></subclause></clause><clause id="iddf97a30e78914daf82b494f3ea0450b7"><enum>(ii)</enum><text>in a manner that poses a significant risk to rights afforded under the Constitution of the United States or safety.</text></clause></subparagraph></paragraph><paragraph id="idce54b1cc2f4d4d168d1a64683f5210fd"><enum>(7)</enum><header>Deployer</header><text>The term <term>deployer</term>—</text><subparagraph commented="no" display-inline="no-display-inline" id="idbc40f10f6dd94bbdb84d9782d51c317b"><enum>(A)</enum><text display-inline="yes-display-inline">means an entity that uses or operates an artificial intelligence system for internal use or for use by third parties; and</text></subparagraph><subparagraph commented="no" display-inline="no-display-inline" id="idfb6965173a6144fcbc978d3f575e1cb4"><enum>(B)</enum><text display-inline="yes-display-inline">does not include an entity that is solely an end user of a system.</text></subparagraph></paragraph><paragraph id="id30a8883db29248a781030a562be58ff7"><enum>(8)</enum><header>Developer</header><text>The term <term>developer</term> means an entity that—</text><subparagraph commented="no" display-inline="no-display-inline" id="id48c786805c544197810b910fb7fcbb44"><enum>(A)</enum><text display-inline="yes-display-inline">designs, codes, produces, or owns an artificial intelligence system for internal use or for use by a third party as a baseline model; and</text></subparagraph><subparagraph commented="no" display-inline="no-display-inline" id="id73afd08111764f55ae703aed313cdbe1"><enum>(B)</enum><text display-inline="yes-display-inline">does not act as a deployer of the artificial intelligence system described in subparagraph (A).</text></subparagraph></paragraph><paragraph id="id5f3a2eaf9cc749d895df372f4bb4a75c"><enum>(9)</enum><header>Generative artificial intelligence system</header><text>The term <term>generative artificial intelligence system</term> means an artificial intelligence system that generates novel data or content in a written, audio, or visual format.</text></paragraph><paragraph id="id6ce16cc9ebc242f3a353868cbfd8c5fb"><enum>(10)</enum><header>High-impact artificial intelligence system</header><text>The term <term>high-impact artificial intelligence system</term> means an artificial intelligence system—</text><subparagraph commented="no" display-inline="no-display-inline" id="idbb1604a7ad964972b34663dfbe0d6ffe"><enum>(A)</enum><text display-inline="yes-display-inline">deployed for a purpose other than solely for use by the Department of Defense or an intelligence agency (as defined in section 3094(e) of the National Security Act of 1947 (<external-xref legal-doc="usc" parsable-cite="usc/50/3094">50 U.S.C. 3094(3)</external-xref>)); and</text></subparagraph><subparagraph commented="no" display-inline="no-display-inline" id="id1ab5a37df52144dd9dcc6981a5ea7827"><enum>(B)</enum><text display-inline="yes-display-inline">that is specifically developed with the intended purpose of making decisions that have a legal or similarly significant effect on the access of an individual to housing, employment, credit, education, healthcare, or insurance in a manner that poses a significant risk to rights afforded under the Constitution of the United States or safety. </text></subparagraph></paragraph><paragraph id="idb5938dab4ded4130b5725a74594832fa"><enum>(11)</enum><header>NIST recommendation</header><text>The term <term>NIST recommendation</term> means a sector-specific recommendation developed under section 22B(b)(1) of the National Institute of Standards and Technology Act, as added by section 204 of this Act. </text></paragraph><paragraph id="ide23e0f2ad36c49da8323e36dd2dd639d"><enum>(12)</enum><header>Secretary</header><text>The term <term>Secretary</term> means the Secretary of Commerce.</text></paragraph><paragraph id="id2f75efa0e65648a2a963c3ee1c214a9f"><enum>(13)</enum><header>Significant risk</header><text>The term <term>significant risk</term> means a combination of severe, high-intensity, high-probability, and long-duration risk of harm to individuals.</text></paragraph><paragraph id="id9454449e445d4a909efc0f45c39ca682"><enum>(14)</enum><header>TEVV</header><text>The term <term>TEVV</term> means the testing, evaluation, validation, and verification of any artificial intelligence system that includes—</text><subparagraph id="id13b24e42ae714f858dc3898f2e97cf56"><enum>(A)</enum><text>open, transparent, testable, and verifiable specifications that characterize realistic operational performance, such as precision and accuracy for relevant tasks;</text></subparagraph><subparagraph id="id82bc19f89a8d437988edc836c5841118"><enum>(B)</enum><text>testing methodologies and metrics that enable the evaluation of system trustworthiness, including robustness and resilience;</text></subparagraph><subparagraph id="idd6c8da2296af43a495bc35a8e3166ce2"><enum>(C)</enum><text>data quality standards for training and testing datasets;</text></subparagraph><subparagraph id="idbf1780f5d2644dcbab3b98b1e544e6b1"><enum>(D)</enum><text>requirements for system validation and integration into production environments, automated testing, and compliance with existing legal and regulatory specifications;</text></subparagraph><subparagraph id="id0361d080047040c18b80be412e345ffe"><enum>(E)</enum><text>methods and tools for—</text><clause commented="no" display-inline="no-display-inline" id="id318e1c232af941d5aa55a2bde6d7beec"><enum>(i)</enum><text display-inline="yes-display-inline">the monitoring of system behavior;</text></clause><clause commented="no" display-inline="no-display-inline" id="idbc8cfb3163b144449134c21b4fa7ac06"><enum>(ii)</enum><text display-inline="yes-display-inline">the tracking of incidents or errors reported and their management; and</text></clause><clause commented="no" display-inline="no-display-inline" id="idbbb1151a32ce4ca48dd511cf97a30ad2"><enum>(iii)</enum><text display-inline="yes-display-inline">the detection of emergent properties and related impacts; and</text></clause></subparagraph><subparagraph commented="no" display-inline="no-display-inline" id="id183b8855379040ed8888a5512083ae80"><enum>(F)</enum><text display-inline="yes-display-inline">and processes for redress and response.</text></subparagraph></paragraph><paragraph commented="no" display-inline="no-display-inline" id="id6eea8f227bb944e3803f6946135ac43a"><enum>(15)</enum><header>Under Secretary</header><text>The term <term>Under Secretary</term> means the Director of the National Institute of Standards and Technology.</text></paragraph></section><section id="id831e30897eed4d408a857be5a384edf0"><enum>202.</enum><header>Generative artificial intelligence transparency</header><subsection id="idfef1145f85ad4b11843fd2f21e4c7c07"><enum>(a)</enum><header>Prohibition</header><paragraph commented="no" display-inline="no-display-inline" id="id4f2f7f83a3bf48e996f8443034fa4d07"><enum>(1)</enum><header>In general</header><text display-inline="yes-display-inline">Subject to paragraph (2), it shall be unlawful for a person to operate a covered internet platform that uses a generative artificial intelligence system.</text></paragraph><paragraph id="idff8220574289413e89904954bfce4940" commented="no"><enum>(2)</enum><header>Disclosure of use of generative artificial intelligence systems</header><subparagraph commented="no" display-inline="no-display-inline" id="id655aeeca58c94a5fa63d936df47c4c4f"><enum>(A)</enum><header>In general</header><text display-inline="yes-display-inline">A person may operate a covered internet platform that uses a generative artificial intelligence system if the person provides notice to each user of the covered internet platform that the covered internet platform uses a generative artificial intelligence system to generate content the user sees.</text></subparagraph><subparagraph id="id00dd0ae675694989bed90528147183b4"><enum>(B)</enum><header>Requirements</header><text>A person providing the notice described in subparagraph (A) to a user—</text><clause commented="no" display-inline="no-display-inline" id="id34fbc4b897604cacb42ca95eb7b8111b"><enum>(i)</enum><text display-inline="yes-display-inline">subject to clause (ii), shall provide the notice in a clear and conspicuous manner on the covered internet platform before the user interacts with content produced by a generative artificial intelligence system; and</text></clause><clause commented="no" display-inline="no-display-inline" id="id67dc109cd89645b7bc34bc704ee1eac5"><enum>(ii)</enum><text display-inline="yes-display-inline">may provide an option for the user to choose to see the notice described in clause (i) only upon the first interaction of the user with content produced by a generative artificial intelligence system.</text></clause></subparagraph></paragraph></subsection><subsection id="id88409107bff744d1893ab29a0b019ba2"><enum>(b)</enum><header>Enforcement action</header><text>Upon learning that a covered internet platform does not comply with the requirements under this section, the Secretary—</text><paragraph commented="no" display-inline="no-display-inline" id="id3d57990d88b6403cb8eff64bba34eb76"><enum>(1)</enum><text display-inline="yes-display-inline">shall immediately—</text><subparagraph id="id349f1241fc0a4e72978de87a0f7bb344"><enum>(A)</enum><text>notify the covered internet platform of the finding; and</text></subparagraph><subparagraph id="id871978550d444137ae3ea69b688b9e58"><enum>(B)</enum><text>order the covered internet platform to take remedial action to address the noncompliance of the generative artificial intelligence system operated by the covered internet platform; and</text></subparagraph></paragraph><paragraph id="idcc88b17a453b4445b6dda05a827f0447"><enum>(2)</enum><text>may, as determined appropriate or necessary by the Secretary, take enforcement action under section 208 if the covered internet platform does not take sufficient action to remedy the noncompliance within 15 days of the notification under paragraph (1)(A).</text></paragraph></subsection><subsection id="id08c20940382248c686f727bfb62c7aa5"><enum>(c)</enum><header>Effective date</header><text>This section shall take effect on the date that is 180 days after the date of enactment of this Act.</text></subsection></section><section id="id80074546495e4fa5b76faeb153dd11c1"><enum>203.</enum><header>Transparency reports for high-impact artificial intelligence systems</header><subsection id="id46b47245ae1d441788e1d25db9109f3b"><enum>(a)</enum><header>Transparency reporting</header><paragraph id="id0fb8ac48c200420ba5188f8537b287dd"><enum>(1)</enum><header>In general</header><text>Each deployer of a high-impact artificial intelligence system shall—</text><subparagraph id="idf292db6d2c70441aa6f7a08bf4b8d48a"><enum>(A)</enum><text>before deploying the high-impact artificial intelligence system, and annually thereafter, submit to the Secretary a report describing the design and safety plans for the artificial intelligence system; and</text></subparagraph><subparagraph id="id5d6216ab713a420e874bb0cfa6a52b1a"><enum>(B)</enum><text>submit to the Secretary an updated report on the high-impact artificial intelligence system if the deployer makes a material change to—</text><clause id="idbd592ec96b614b258fdf5308e9718f7d"><enum>(i)</enum><text>the purpose for which the high-impact artificial intelligence system is used; or</text></clause><clause id="id028802c7ac364215b6615f9b8b5aa40d"><enum>(ii)</enum><text>the type of data the high-impact artificial intelligence system processes or uses for training purposes.</text></clause></subparagraph></paragraph><paragraph id="id95dbe868c75c4feba6a16ae02e787d7e"><enum>(2)</enum><header>Contents</header><text>Each transparency report submitted under paragraph (1) shall include, with respect to the high-impact artificial intelligence system—</text><subparagraph id="id228689ffe9534563b8ce28c344358201"><enum>(A)</enum><text>the purpose;</text></subparagraph><subparagraph id="ida665e44f7c3043f3a151a227614cdd20"><enum>(B)</enum><text>the intended use cases;</text></subparagraph><subparagraph id="id07706866948d4dc59792b34859a3afdf"><enum>(C)</enum><text>deployment context;</text></subparagraph><subparagraph id="id18d5fa12eea943a799b7e1075a62e1bf"><enum>(D)</enum><text>benefits;</text></subparagraph><subparagraph id="id65765f43ed1a40f098e248cb6c7ceb61"><enum>(E)</enum><text>a description of data that the high-impact artificial intelligence system, once deployed, processes as inputs;</text></subparagraph><subparagraph id="id64b553999c6e44edb23ff1cb05eb6a1d"><enum>(F)</enum><text>if available—</text><clause id="id6deb9fb3a15f43df9df2e82336dcdf36"><enum>(i)</enum><text>a list of data categories and formats the deployer used to retrain or continue training the high-impact artificial intelligence system;</text></clause><clause id="id68afe9c0b24f4341a7b91ad4445af12d"><enum>(ii)</enum><text>metrics for evaluating the high-impact artificial intelligence system performance and known limitations; and</text></clause><clause id="id3d8339283ecf4d548f115c171d004f08"><enum>(iii)</enum><text>transparency measures, including information identifying to individuals when a high-impact artificial intelligence system is in use;</text></clause></subparagraph><subparagraph id="idce5092db9e11406d90bd0662b51bd79c"><enum>(G)</enum><text>processes and testing performed before each deployment to ensure the high-impact artificial intelligence system is safe, reliable, and effective;</text></subparagraph><subparagraph id="id2b62d80d8dbf4b66b12583153cde30db"><enum>(H)</enum><text>if applicable, an identification of any third-party artificial intelligence systems or datasets the deployer relies on to train or operate the high-impact artificial intelligence system; and</text></subparagraph><subparagraph id="id716bff0249b04e03bc35acaf75d5c3c4"><enum>(I)</enum><text>post-deployment monitoring and user safeguards, including a description of the oversight process in place to address issues as issues arise.</text></subparagraph></paragraph></subsection><subsection id="id84af25c261ca499faf205e9885d6d59f"><enum>(b)</enum><header>Developer obligations</header><text>The developer of a high-impact artificial intelligence system shall be subject to the same obligations as a developer of a critical impact artificial intelligence system under section 206(c).</text></subsection><subsection id="ide6ae213e84ed41a2a508374e587ad17c"><enum>(c)</enum><header>Considerations</header><text>In carrying out subsections (a) and (b), a deployer or developer of a high-impact artificial intelligence system shall consider the best practices outlined in the most recent version of the risk management framework developed pursuant to section 22A(c) of the National Institute of Standards and Technology Act (<external-xref legal-doc="usc" parsable-cite="usc/15/278h-1">15 U.S.C. 278h–1(c)</external-xref>).</text></subsection><subsection id="id74aca9accb004f5a8eb1700f1ac964ed"><enum>(d)</enum><header>Noncompliance and enforcement action</header><text>Upon learning that a deployer of a high-impact artificial intelligence system is not in compliance with the requirements under this section with respect to a high-impact artificial intelligence system, the Secretary—</text><paragraph id="id7ea7d1b7f4f84e108d1c69213a4ad191"><enum>(1)</enum><text>shall immediately—</text><subparagraph id="id4442000165bc40cf8206df371e25d519"><enum>(A)</enum><text>notify the deployer of the finding; and</text></subparagraph><subparagraph id="id3d7a608a7e5140d0a1851a79772da772"><enum>(B)</enum><text>order the deployer to immediately submit to the Secretary the report required under subsection (a)(1); and</text></subparagraph></paragraph><paragraph id="id9ea48fe6a0bb4510ad1c0f64fb71ee89"><enum>(2)</enum><text>if the deployer fails to submit the report by the date that is 15 days after the date of the notification under paragraph (1)(A), may take enforcement action under section 208.</text></paragraph></subsection><subsection id="idd960c3727a044d0cb3a9c2b5e7f62fd9"><enum>(e)</enum><header>Avoidance of duplication</header><paragraph id="id722cfef617f440dd8cc6f4143ff75fb3"><enum>(1)</enum><header>In general</header><text>Pursuant to the deconfliction of duplicative requirements under paragraph (2), the Secretary shall ensure that the requirements under this section are not unnecessarily burdensome or duplicative of requirements made or oversight conducted by a covered agency regarding the non-Federal use of high-impact artificial intelligence systems.</text></paragraph><paragraph id="id000da51bd77549d1b35eddf0ef4f4fb5"><enum>(2)</enum><header>Deconfliction of duplicative requirements</header><text>Not later than 90 days after the date of the enactment of this Act, and annually thereafter, the Secretary, in coordination with the head of any relevant covered agency, shall complete the deconfliction of duplicative requirements relating to the submission of a transparency report for a high-impact artificial intelligence system under this section. </text></paragraph></subsection><subsection id="id9d52665b8ef04c69814f5c2e21d7a1e5"><enum>(f)</enum><header>Rule of construction</header><text>Nothing in this section shall be construed to require a deployer of a high-impact artificial intelligence system to disclose any information, including data or algorithms—</text><paragraph id="id8cc2a46efeb6453694e317fdd8bda2be"><enum>(1)</enum><text>relating to a trade secret or other protected intellectual property right;</text></paragraph><paragraph id="id67c4370916344ade8cd6545898c8fd87"><enum>(2)</enum><text>that is confidential business information; or</text></paragraph><paragraph id="iddaf5f9eefbc54f24ba9107be14b08753" commented="no" display-inline="no-display-inline"><enum>(3)</enum><text>that is privileged. </text></paragraph></subsection></section><section id="idd422be3f98794c4e8a679ed677bab61f"><enum>204.</enum><header>Recommendations to Federal agencies for risk management of high-impact artificial intelligence systems</header><text display-inline="no-display-inline">The National Institute of Standards and Technology Act (<external-xref legal-doc="usc" parsable-cite="usc/15/278h-1">15 U.S.C. 278h–1</external-xref>) is amended by inserting after section 22A the following:</text><quoted-block id="id98a07b608f2441da9055293c1ea38a2b" display-inline="no-display-inline" style="OLC"><section id="idbba363a1cf6b4ad8bf1f70e7180efe33"><enum>22B.</enum><header>Recommendations to Federal agencies for sector-specific oversight of artificial intelligence</header><subsection id="iddd4007ebf5854f40a1cd255a8b18d8b7"><enum>(a)</enum><header>Definition of high-Impact artificial intelligence system</header><text>In this section, the term <term>high-impact artificial intelligence system</term> means an artificial intelligence system—</text><paragraph commented="no" display-inline="no-display-inline" id="id3168fae65de4450e8532a773145d20e7"><enum>(1)</enum><text display-inline="yes-display-inline">deployed for purposes other than those solely for use by the Department of Defense or an element of the intelligence community (as defined in section 3 of the National Security Act of 1947 (<external-xref legal-doc="usc" parsable-cite="usc/50/3003">50 U.S.C. 3003</external-xref>)); and</text></paragraph><paragraph commented="no" display-inline="no-display-inline" id="id4f4f44c7c1254ab8bd27b9d4912f2870"><enum>(2)</enum><text display-inline="yes-display-inline">that is specifically developed with the intended purpose of making decisions that have a legal or similarly significant effect on the access of an individual to housing, employment, credit, education, health care, or insurance in a manner that poses a significant risk to rights afforded under the Constitution of the United States or to safety.</text></paragraph></subsection><subsection id="idce248e84d2354656b0da770dd702cfbb"><enum>(b)</enum><header>Sector-Specific recommendations</header><text>Not later than 1 year after the date of the enactment of the <short-title>Artificial Intelligence Research, Innovation, and Accountability Act of 2023</short-title>, the Director shall—</text><paragraph id="id3511e0a79b464eee9b46798a5c309af1"><enum>(1)</enum><text>develop sector-specific recommendations for individual Federal agencies to conduct oversight of the non-Federal, and, as appropriate, Federal use of high-impact artificial intelligence systems to improve the safe and responsible use of such systems; and</text></paragraph><paragraph id="id4d2ddee23bde435ab2858dc53ab857f1"><enum>(2)</enum><text>not less frequently than biennially, update the sector-specific recommendations to account for changes in technological capabilities or artificial intelligence use cases.</text></paragraph></subsection><subsection id="id8f03df49bc1c42bba923828fd0c563f3"><enum>(c)</enum><header>Requirements</header><text>In developing recommendations under subsection (b), the Director shall use the voluntary risk management framework required by section 22A(c) to identify and provide recommendations to a Federal agency—</text><paragraph id="id85bf5394584e40248d12287199c1c231"><enum>(1)</enum><text>to establish regulations, standards, guidelines, best practices, methodologies, procedures, or processes to facilitate oversight of non-Federal use of high-impact artificial intelligence systems; and</text></paragraph><paragraph id="id97515c8134fb4ff9aaf38b02aff55f20"><enum>(2)</enum><text>to mitigate risks from such high-impact artificial intelligence systems.</text></paragraph></subsection><subsection id="id24a722eee3aa4eca8696a5df38d93cd1"><enum>(d)</enum><header>Recommendations</header><text>In developing recommendations under subsection (b), the Director may include the following:</text><paragraph id="idf37013bc91e94660b56a4146ee81f685"><enum>(1)</enum><text>Key design choices made during high-impact artificial intelligence model development, including rationale and assumptions made.</text></paragraph><paragraph id="id00b87127686f4333ab9565ca5e3e51f0"><enum>(2)</enum><text>Intended use and users, other possible use cases, including any anticipated undesirable or potentially harmful use cases, and what good faith efforts model developers can take to mitigate the use of the system in harmful ways.</text></paragraph><paragraph id="id7d6eba9aa61f441f8f4f600f85745f45"><enum>(3)</enum><text>Methods for evaluating the safety of high-impact artificial intelligence systems and approaches for responsible use.</text></paragraph><paragraph id="id6ca82d99fbc145c8bd6d5616ccc1fd4c"><enum>(4)</enum><text>Sector-specific differences in what constitutes acceptable high-impact artificial intelligence model functionality and trustworthiness, metrics used to determine high-impact artificial intelligence model performance, and any test results reflecting application of these metrics to evaluate high-impact artificial intelligence model performance across different sectors.</text></paragraph><paragraph id="id95e5903efbfe43d18c82848f6bd160cc" commented="no" display-inline="no-display-inline"><enum>(5)</enum><text>Recommendations to support iterative development of subsequent recommendations under subsection (b).</text></paragraph></subsection><subsection id="idd110d76996764fc198a5eb72c88d4028" commented="no" display-inline="no-display-inline"><enum>(e)</enum><header>Consultation</header><text>In developing recommendations under subsection (b), the Director shall, as the Director considers applicable and practicable, consult with relevant covered agencies and stakeholders representing perspectives from civil society, academia, technologists, engineers, and creators.</text></subsection></section><after-quoted-block>.</after-quoted-block></quoted-block></section><section id="id937581d398aa437589d2c564dc9fd97e"><enum>205.</enum><header>Office of Management and Budget oversight of recommendations to agencies</header><subsection id="id0f4c9c77bae646e492047f07147fe941"><enum>(a)</enum><header>Recommendations</header><paragraph id="id667ff0a9109b4ef1b63fc40150f89104"><enum>(1)</enum><header>In general</header><text>Not later than 1 year after the date of enactment of this Act, the Under Secretary shall submit to the Director, the head of each covered agency, and the appropriate congressional committees each NIST recommendation.</text></paragraph><paragraph id="id6221cd7a19dc4a2b8fc29a9b3ccf985d"><enum>(2)</enum><header>Agency responses to recommendations</header><text>Not later than 90 days after the date on which the Under Secretary submits a NIST recommendation to the head of a covered agency under paragraph (1), the head of the covered agency shall transmit to the Director a formal written response to the NIST recommendation that—</text><subparagraph id="idc6e7e862dac44f27b408c3abb90567c2"><enum>(A)</enum><text>indicates whether the head of the covered agency intends to—</text><clause id="ida0adbdee0b9449ea8b139e5ad3065ad3"><enum>(i)</enum><text>carry out procedures to adopt the complete NIST recommendation;</text></clause><clause id="idc521933994874ecd84d29acce0d6f528"><enum>(ii)</enum><text>carry out procedures to adopt a part of the NIST recommendation; or</text></clause><clause id="id385a3a2972934a74983748102a4a5816"><enum>(iii)</enum><text>refuse to carry out procedures to adopt the NIST recommendation; and</text></clause></subparagraph><subparagraph id="idfef0b90862824c0f93af1b1ddfc1b9a6"><enum>(B)</enum><text>includes—</text><clause id="id4ae4c221a4dc433694c04d5730b86bed"><enum>(i)</enum><text>with respect to a formal written response described in clause (i) or (ii) of subparagraph (A), a copy of a proposed timetable for completing the procedures described in that clause;</text></clause><clause id="id5a80663a4e674759889ceddcba149f1d"><enum>(ii)</enum><text>with respect to a formal written response described in subparagraph (A)(ii), the reasons for the refusal to carry out procedures with respect to the remainder of the NIST recommendation described in that subparagraph; and</text></clause><clause id="ida438b53badae4ac88f868a9e0fcdd7bc"><enum>(iii)</enum><text>with respect to a formal written response described in subparagraph (A)(iii), the reasons for the refusal to carry out procedures.</text></clause></subparagraph></paragraph></subsection><subsection id="ide510c1438e3d4b37bdba368af407d2d2"><enum>(b)</enum><header>Public availability</header><text>The Director shall make a copy of each NIST recommendation and each written formal response of a covered agency required under subsection (a)(2) available to the public at reasonable cost.</text></subsection><subsection id="id64ee16f2a4d7434ba848cef9050c2170"><enum>(c)</enum><header>Reporting requirements</header><paragraph id="idc8b0f5fcf1b442d3a631a0538a6b5bbd"><enum>(1)</enum><header>Annual secretarial regulatory status reports</header><subparagraph id="id9b95578b0add4ad58f3700a0846a469b"><enum>(A)</enum><header>In general</header><text>On the first February 1 occurring after the date of enactment of this Act, and annually thereafter until the date described in subparagraph (B), the head of each covered agency shall submit to the Director a report containing the regulatory status of each NIST recommendation.</text></subparagraph><subparagraph id="id5f6d31cc873b4b09a1538d9d1d1faff0"><enum>(B)</enum><header>Continued reporting</header><text>The date described in this subparagraph is the date on which the head of a covered agency—</text><clause id="id0cb024e5f256414c972f0af63d8b7a2d"><enum>(i)</enum><text>takes final regulatory action with respect to a NIST recommendation; and</text></clause><clause id="idba95909e4d5548dfab0e89b37444ddd3"><enum>(ii)</enum><text>determines and states in a report required under subparagraph (A) that no regulatory action should be taken with respect to a NIST recommendation.</text></clause></subparagraph></paragraph><paragraph id="ida499dcceba714b589e28f59a72fd4153"><enum>(2)</enum><header>Compliance report to Congress</header><text>On April 1 of each year, the Director shall—</text><subparagraph id="idb4e7346b54ea4cb087f670802b727499"><enum>(A)</enum><text>review the reports received under paragraph (1)(A); and</text></subparagraph><subparagraph id="id417caa9bb97d4791979d0aacaad82188"><enum>(B)</enum><text>transmit comments on the reports to the heads of covered agencies and the appropriate congressional committees.</text></subparagraph></paragraph><paragraph id="id1630b0b9bd574d90831d61d0cdfac304"><enum>(3)</enum><header>Failure to report</header><text>If, on March 1 of each year, the Director has not received a report required under paragraph (1)(A) from the head of a covered agency, the Director shall notify the appropriate congressional committees of the failure.</text></paragraph></subsection><subsection id="iddd8bb85642894de8a0137221c8d59ab8"><enum>(d)</enum><header>Technical assistance in carrying out recommendations</header><text>The Under Secretary shall provide assistance to the heads of covered agencies relating to the implementation of the NIST recommendations the heads of covered agencies intend to carry out.</text></subsection><subsection id="idd40dfa92486c4b66b49aee671e42ba8b"><enum>(e)</enum><header>Regulation review and improvement</header><text>The Administrator of the Office of Information and Regulatory Affairs of the Office of Management and Budget, in consultation with the Under Secretary, shall develop and periodically revise performance indicators and measures for sector-specific regulation of artificial intelligence. </text></subsection></section><section id="idc9c834caca524c31953d74e9215ee8d8"><enum>206.</enum><header>Risk management assessment for critical-impact artificial intelligence systems</header><subsection id="id07df5405530d466294ae9932b6186bc5"><enum>(a)</enum><header>Requirement</header><paragraph id="id52c8d52dd1254869ba9ca6cd21fd1da2"><enum>(1)</enum><header>In general</header><text>Each critical-impact AI organization shall perform a risk management assessment in accordance with this section.</text></paragraph><paragraph id="id38d065898c8f496b9b6eac0a10b26228"><enum>(2)</enum><header>Assessment</header><text>Each critical-impact AI organization shall—</text><subparagraph id="id191915e4e4854477aef2297e08218c16"><enum>(A)</enum><text>not later than 30 days before the date on which a critical-impact artificial intelligence system is made publicly available by the critical-impact AI organization, perform a risk management assessment; and</text></subparagraph><subparagraph id="id8e282437c87b4adeb1b03b8570307f1d"><enum>(B)</enum><text>not less frequently than biennially during the period beginning on the date of enactment of this Act and ending on the date on which the applicable critical-impact artificial intelligence system is no longer being made publicly available by the critical-impact AI organization, as applicable, conduct an updated risk management assessment that—</text><clause id="idbd3bc02328e24bc499d8f93ba94bcf1b"><enum>(i)</enum><text>may find that no significant changes were made to the critical-impact artificial intelligence system; and</text></clause><clause id="id0eb8d78037bf4cad8a246da16c5ec4b2"><enum>(ii)</enum><text>provides, to the extent practicable, aggregate results of any significant deviation from expected performance detailed in the assessment performed under subparagraph (A) or the most recent assessment performed under this subparagraph.</text></clause></subparagraph></paragraph><paragraph id="id7dece76d1f314a7e83a7a45b5d5061bc"><enum>(3)</enum><header>Review</header><subparagraph id="id26bd006ae2fe4411a6cc862642f2529a"><enum>(A)</enum><header>In general</header><text>Not later than 90 days after the date of completion of a risk management assessment by a critical-impact AI organization under this section, the critical-impact AI organization shall submit to the Secretary a report—</text><clause commented="no" display-inline="no-display-inline" id="id42f97412a10f402d8fc28039dac8062a"><enum>(i)</enum><text display-inline="yes-display-inline">outlining the assessment performed under this section; and</text></clause><clause commented="no" display-inline="no-display-inline" id="id8bb0e5a8ed7243729c93d9bd6f1025f8"><enum>(ii)</enum><text>that is in a consistent format, as determined by the Secretary.</text></clause></subparagraph><subparagraph id="id97bd236947d147359b9eb40e959b25fd"><enum>(B)</enum><header>Additional information</header><text>Subject to subsection (d), the Secretary may request that a critical-impact AI organization submit to the Secretary any related additional or clarifying information with respect to a risk management assessment performed under this section.</text></subparagraph></paragraph><paragraph id="id196195b0f0a64b92a88d311fd034c655"><enum>(4)</enum><header>Limitation</header><text>The Secretary may not prohibit a critical-impact AI organization from making a critical-impact artificial intelligence system available to the public based on the review by the Secretary of a report submitted under paragraph (3)(A) or additional or clarifying information submitted under paragraph (3)(B).</text></paragraph></subsection><subsection id="id8d0d4978ca4b493cbe47af5195600f69"><enum>(b)</enum><header>Assessment subject areas</header><text>Each assessment performed by a critical-impact AI organization under subsection (a) shall describe the means by which the critical-impact AI organization is addressing, through a documented TEVV process, the following categories:</text><paragraph id="idb0a9567edff64202912e8c60e05336d2"><enum>(1)</enum><text>Policies, processes, procedures, and practices across the organization relating to transparent and effective mapping, measuring, and managing of artificial intelligence risks, including—</text><subparagraph id="id7f3fbea907d5435cb4888e43b576efae"><enum>(A)</enum><text>how the organization understands, manages, and documents legal and regulatory requirements involving artificial intelligence;</text></subparagraph><subparagraph id="id84eb6ccbcc4a4a129791e7f09eff486d"><enum>(B)</enum><text>how the organization integrates characteristics of trustworthy artificial intelligence, which include valid, reliable, safe, secure, resilient, accountable, transparent, globally and locally explainable, interpretable, privacy-enhanced, and fair with harmful bias managed, into organizational policies, processes, procedures, and practices; </text></subparagraph><subparagraph id="id29d1b1023f344b6ca407e3634539b571"><enum>(C)</enum><text>a methodology to determine the needed level of risk management activities based on the organization’s risk tolerance; and</text></subparagraph><subparagraph id="idcd5bb6e687194f8ab4756b377ab97375"><enum>(D)</enum><text>how the organization establishes risk management processes and outcomes through transparent policies, procedures, and other controls based on organizational risk priorities.</text></subparagraph></paragraph><paragraph id="id00bcab1267eb4fb2b4efa0840e1c0769"><enum>(2)</enum><text>The structure, context, and capabilities of the critical-impact artificial intelligence system or critical-impact foundation model, including—</text><subparagraph id="id31c3ad70856a4f968f3f64ca67febc14"><enum>(A)</enum><text>how the context was established and understood;</text></subparagraph><subparagraph id="id132578ee5e724d9bb2c57573da399cf0"><enum>(B)</enum><text>capabilities, targeted uses, goals, and expected costs and benefits; and</text></subparagraph><subparagraph id="idba5f2485161e4587b94e15e6aeb2c1ea"><enum>(C)</enum><text>how risks and benefits are mapped for each system component.</text></subparagraph></paragraph><paragraph id="id25f2181a0661498da69f21cf4b4dd986"><enum>(3)</enum><text>A description of how the organization employs quantitative, qualitative, or mixed-method tools, techniques, and methodologies to analyze, assess, benchmark, and monitor artificial intelligence risk, including—</text><subparagraph id="idbd13c974cc6244708d3d168c517b780f"><enum>(A)</enum><text>identification of appropriate methods and metrics;</text></subparagraph><subparagraph id="id83a1722a46274d19a3ad5d171dee4983"><enum>(B)</enum><text>how artificial intelligence systems are evaluated for trustworthy characteristics;</text></subparagraph><subparagraph id="id608314b005684cf6bc50804c111de2d8"><enum>(C)</enum><text>mechanisms for tracking artificial intelligence system risks over time; and</text></subparagraph><subparagraph id="idf36ae736af1c4d29be291f4004bc9a17"><enum>(D)</enum><text>processes for gathering and assessing feedback relating to the efficacy of measurement.</text></subparagraph></paragraph><paragraph id="id0146829ec49f4757957306aba0725bdd"><enum>(4)</enum><text>A description of allocation of risk resources to map and measure risks on a regular basis as described in paragraph (1), including—</text><subparagraph id="id3d04b07b5f62451f9b6e61bcdc109f49"><enum>(A)</enum><text>how artificial intelligence risks based on assessments and other analytical outputs described in paragraphs (2) and (3) are prioritized, responded to, and managed;</text></subparagraph><subparagraph id="ide1c72f506ab0446e93694787b60259df"><enum>(B)</enum><text>how strategies to maximize artificial intelligence benefits and minimize negative impacts were planned, prepared, implemented, documented, and informed by input from relevant artificial intelligence deployers;</text></subparagraph><subparagraph id="id8dd899ccf1bd432c8595e08b4bfcf18c"><enum>(C)</enum><text>management of artificial intelligence system risks and benefits; and</text></subparagraph><subparagraph id="idf57b980144ed4a3fab961ff03a5ff3fd"><enum>(D)</enum><text>regular monitoring of risk treatments, including response and recovery, and communication plans for the identified and measured artificial intelligence risks, as applicable.</text></subparagraph></paragraph></subsection><subsection id="id1c8f8abae9b34f40ae6cd1b9d74ae248"><enum>(c)</enum><header>Developer obligations</header><text>The developer of a critical-impact artificial intelligence system that agrees through a contract or license to provide technology or services to a deployer of the critical-impact artificial intelligence system shall provide to the deployer of the critical-impact artificial intelligence system the information reasonably necessary for the deployer to comply with the requirements under subsection (a), including—</text><paragraph id="idf39cebe4ed744d1fa411d079c0b33f47"><enum>(1)</enum><text>an overview of the data used in training the baseline artificial intelligence system provided by the developer, including—</text><subparagraph id="idef1ebe7a62a54fddada2beb7bd6b9f12"><enum>(A)</enum><text>data size;</text></subparagraph><subparagraph id="id14772732ce214bf0a282e2f3567eb07a"><enum>(B)</enum><text>data sources;</text></subparagraph><subparagraph id="id81bfe802a07d462b92573a8fa157970b"><enum>(C)</enum><text>copyrighted data; and</text></subparagraph><subparagraph id="id9f382e4be84049afb5198d086727114b"><enum>(D)</enum><text>personal identifiable information;</text></subparagraph></paragraph><paragraph id="id02eb924d38a64b48b1a5299ad018cdb5"><enum>(2)</enum><text>documentation outlining the structure and context of the baseline artificial intelligence system of the developer, including—</text><subparagraph id="id8b7821747cb64231a42e941b0740eb50"><enum>(A)</enum><text>input modality;</text></subparagraph><subparagraph id="id21818196b2fb4af59071015b39f14c84"><enum>(B)</enum><text>output modality;</text></subparagraph><subparagraph id="id215d7b0691c94b218092cf104f04c1a8"><enum>(C)</enum><text>model size; and</text></subparagraph><subparagraph id="ide70a7ac95de44dcd90f77dd8310373c5"><enum>(D)</enum><text>model architecture;</text></subparagraph></paragraph><paragraph id="idbf76217a8b1146c5b51d12b8fadbe771"><enum>(3)</enum><text>known capabilities, limitations, and risks of the baseline artificial intelligence system of the developer at the time of the development of the artificial intelligence system; and</text></paragraph><paragraph id="id3e5a45e6d75247958142e407deb1c87b"><enum>(4)</enum><text>documentation for downstream use, including—</text><subparagraph id="idd2a157872f2d40c9ae1548d76c98f6a7"><enum>(A)</enum><text>a statement of intended purpose;</text></subparagraph><subparagraph id="idfad60f75f0564dba9dff6457b98db898"><enum>(B)</enum><text>guidelines for the intended use of the artificial intelligence system, including a list of permitted, restricted, and prohibited uses and users; and</text></subparagraph><subparagraph id="iddf3ae5ecfaa4478c9c9b6809008e4f36"><enum>(C)</enum><text>a statement of the potential for deviation from the intended purpose of the baseline artificial intelligence system.</text></subparagraph></paragraph></subsection><subsection id="id0e3e322e9f5f4dac8979e18a645f76a3"><enum>(d)</enum><header>Termination of obligation To disclose information</header><paragraph id="id747462d2d74e4eefa6ebdd5c0ec731f6"><enum>(1)</enum><header>In general</header><text>The obligation of a critical-impact AI organization to provide information, upon request of the Secretary, relating to a specific assessment category under subsection (b) shall end on the date of issuance of a relevant standard applicable to the same category of a critical-impact artificial intelligence system by—</text><subparagraph id="ide3a16f566ae54e458e880f68586c80ab"><enum>(A)</enum><text>the Secretary under section 207(c) with respect to a critical-impact artificial intelligence system;</text></subparagraph><subparagraph id="id0bc79cb160bd4a94afd74d49bf9c532d"><enum>(B)</enum><text>another department or agency of the Federal Government, as determined applicable by the Secretary; or</text></subparagraph><subparagraph id="id7053dcb180e4497c9262202d67476800"><enum>(C)</enum><text>a non-governmental standards organization, as determined appropriate by the Secretary.</text></subparagraph></paragraph><paragraph id="idc7c7e09dfa57445dad5e7ad74c596131"><enum>(2)</enum><header>Effect of new standard</header><text>In adopting any standard applicable to critical-impact artificial intelligence systems under section 207(c), the Secretary shall—</text><subparagraph id="id142624e19f7f43e2853e73fbd37a2c95"><enum>(A)</enum><text>identify the category under subsection (b) to which the standard relates, if any; and</text></subparagraph><subparagraph id="idd89432e360ba4db898e65176dabdf203"><enum>(B)</enum><text>specify the information that is no longer required to be included in a report required under subsection (a) as a result of the new standard.</text></subparagraph></paragraph></subsection><subsection id="idabdfda09870e48ba9f477dfeeba5a063"><enum>(e)</enum><header>Rule of construction</header><text>Nothing in this section shall be construed to require a critical-impact AI organization, or permit the Secretary, to disclose any information, including data or algorithms—</text><paragraph id="id5f7a98c6153242629dbcdaa1445965a9"><enum>(1)</enum><text>relating to a trade secret or other protected intellectual property right; </text></paragraph><paragraph id="idc881905aa0ba4270b488aa2f798b6d91"><enum>(2)</enum><text>that is confidential business information; or</text></paragraph><paragraph id="idb2279c2789b64fffa70b86f25049e656"><enum>(3)</enum><text>that is privileged. </text></paragraph></subsection></section><section commented="no" display-inline="no-display-inline" id="idde181c44934440b38a92be72f1aa83bd"><enum>207.</enum><header>Certification of critical-impact artificial intelligence systems</header><subsection id="id152837b9e1b44c049368af9b7aa54cc1"><enum>(a)</enum><header>Establishment of Artificial Intelligence Certification Advisory Committee</header><paragraph id="id72d13ede6a864358adb6c78871547834"><enum>(1)</enum><header>In general</header><text>Not later than 180 days after the date of enactment of this Act, the Secretary shall establish an advisory committee to provide advice and recommendations on TEVV standards and the certification of critical-impact artificial intelligence systems.</text></paragraph><paragraph id="id5509d4f9dd5f4142802ca9a9b47b8a14"><enum>(2)</enum><header>Duties</header><text>The advisory committee established under this section shall advise the Secretary on matters relating to the testing and certification of critical-impact artificial intelligence systems, including by—</text><subparagraph id="id389b6fd8f9f74a4f8509096a36825059"><enum>(A)</enum><text>providing recommendations to the Secretary on proposed TEVV standards to ensure such standards—</text><clause id="ide37a7e05e0154a0baf3ec4b6ea55b8cf"><enum>(i)</enum><text>maximize alignment and interoperability with standards issued by nongovernmental standards organizations and international standards bodies;</text></clause><clause id="id4cb9475f9e234a7f9681c46ebb71bb7c"><enum>(ii)</enum><text>are performance-based and impact-based; and</text></clause><clause id="id95369281178542e5a1bd9d67bd7e6828"><enum>(iii)</enum><text>are applicable or necessary to facilitate the deployment of critical-impact artificial intelligence systems in a transparent, secure, and safe manner;</text></clause></subparagraph><subparagraph id="id357b14afeb334456b501b4e0376ef6b7"><enum>(B)</enum><text>reviewing prospective TEVV standards submitted by the Secretary to ensure such standards align with recommendations under subparagraph (A);</text></subparagraph><subparagraph id="idddce95241fa640ffbc8be33ee6a27644"><enum>(C)</enum><text>upon completion of the review under subparagraph (B), providing consensus recommendations to the Secretary on—</text><clause id="id6640a662b4e04fddaac63d8bfc1e7a95"><enum>(i)</enum><text>whether a TEVV standard should be issued, modified, revoked, or added; and</text></clause><clause id="id2e4da0199f2b412a9a0040536e8a5410"><enum>(ii)</enum><text>if such a standard should be issued, how best to align the standard with the considerations described in subsection (c)(2) and recommendations described in subparagraph (A); and</text></clause></subparagraph><subparagraph id="idb156c8ecc8d84e2eaf192ac1dddf4977"><enum>(D)</enum><text>reviewing and providing advice and recommendations on the plan and subsequent updates to the plan submitted under subsection (b).</text></subparagraph></paragraph><paragraph id="id5ffdd420669b441594a5d53fff09f56d"><enum>(3)</enum><header>Composition</header><text>The advisory committee established under this subsection shall be composed of not more than 15 members with a balanced composition of representatives of the private sector, institutions of higher education, and non-profit organizations, including—</text><subparagraph id="id4b5230a5ac794c36a8ad95ce1a206700"><enum>(A)</enum><text>representatives of—</text><clause id="iddc41a4e063614223ad1ad2db781c8a42"><enum>(i)</enum><text>institutions of higher education;</text></clause><clause id="id6af4a7d574904f83b12a8e42bea7dc84"><enum>(ii)</enum><text>companies developing or operating artificial intelligence systems;</text></clause><clause id="ide7cdb7ae39434a5fbe63e39dc5891c77"><enum>(iii)</enum><text>consumers or consumer advocacy groups; and</text></clause><clause id="id5f5aeae4c4c44721b70c09a808cda819"><enum>(iv)</enum><text>enabling technology companies; and</text></clause></subparagraph><subparagraph id="id0ecbe3ec244d47e0a381fce11ad61f0a"><enum>(B)</enum><text>any other members the Secretary considers to be appropriate.</text></subparagraph></paragraph></subsection><subsection id="idc834a3cc6d9343648aee85fd067a14d8"><enum>(b)</enum><header>Artificial intelligence certification plan</header><paragraph id="id876472d7dfea466d8bff7d3bcf2e7380"><enum>(1)</enum><header>In general</header><text>Not later than 1 year after the date of enactment of this Act, the Secretary shall establish a 3-year implementation plan for the certification of critical-impact artificial intelligence systems.</text></paragraph><paragraph id="id6c34d663ea314ed8a60365e84f4e661b"><enum>(2)</enum><header>Periodic update</header><text>The Secretary shall periodically update the plan established under paragraph (1).</text></paragraph><paragraph id="id0d2d1640ec3b43d394476f1871a01738"><enum>(3)</enum><header>Contents</header><text>The plan established under paragraph (1) shall include—</text><subparagraph id="idb5c0a51e2d3147bda3217592847dd7e9"><enum>(A)</enum><text>a methodology for gathering and using relevant, objective, and available information relating to TEVV;</text></subparagraph><subparagraph id="id1a8f1d242b294a4c9fbca1cde7484bd5"><enum>(B)</enum><text>a process for considering whether prescribing certain TEVV standards under subsection (c) for critical-impact artificial intelligence systems is appropriate, necessary, or duplicative of existing international standards;</text></subparagraph><subparagraph id="id4d8df22a3c804d428519388aec0bcfdc"><enum>(C)</enum><text>if TEVV standards are considered appropriate, a process for prescribing such standards for critical-impact artificial intelligence systems; and</text></subparagraph><subparagraph id="iddcefd6d3f1aa4623b412f8bd003986b1"><enum>(D)</enum><text>an outline of standards proposed to be issued, including an estimation of the timeline and sequencing of such standards.</text></subparagraph></paragraph><paragraph id="id9527d631206e406d8760c04cee03f4a7"><enum>(4)</enum><header>Consultation</header><text>In developing the plan required under paragraph (1), the Secretary shall consult the following:</text><subparagraph id="ide88cb7446b104ce2891e82bc05037a62"><enum>(A)</enum><text>The National Artificial Intelligence Initiative Office.</text></subparagraph><subparagraph id="idc097fb5ce4c4455da3877b37a8e9a570"><enum>(B)</enum><text>The interagency committee established under section 5103 of the National Artificial Intelligence Initiative Act of 2020 (<external-xref legal-doc="usc" parsable-cite="usc/15/9413">15 U.S.C. 9413</external-xref>).</text></subparagraph><subparagraph id="ide23d899bbe1a4efeb926dc1272e08b22"><enum>(C)</enum><text>The National Artificial Intelligence Advisory Committee.</text></subparagraph><subparagraph id="id8919f4f7acf7488aa70f49321adc0a27"><enum>(D)</enum><text>Industry consensus standards issued by non-governmental standards organizations.</text></subparagraph><subparagraph id="id5f3f708b499f4dd99a9ca9699760b15b"><enum>(E)</enum><text>Other departments, agencies, and instrumentalities of the Federal Government, as considered appropriate by the Secretary.</text></subparagraph></paragraph><paragraph id="id822669fe799144be972f5bcd384af40a"><enum>(5)</enum><header>Submission to certification advisory committee</header><text>Upon completing the initial plan required under this subsection and upon completing periodic updates to the plan under paragraph (2), the Secretary shall submit the plan to the advisory committee established under subsection (a) for review.</text></paragraph><paragraph id="idb79d2013f39a43ce9aaff8750852d7a2"><enum>(6)</enum><header>Submission to committees of congress</header><text>Upon completing the plan required under this subsection, the Secretary shall submit to the relevant committees of Congress a report containing the plan.</text></paragraph><paragraph id="id5e2fd35d1347456dad3763dfc17368d3"><enum>(7)</enum><header>Limitation</header><text>The Secretary may not issue TEVV standards under subsection (c) until the date of the submission of the plan under paragraphs (5) and (6).</text></paragraph></subsection><subsection id="id2068d6b32ee345d8aec683cfa01757b1"><enum>(c)</enum><header>Standards</header><paragraph id="id32c845ce6fb34394a5088bff7e889f3f"><enum>(1)</enum><header>Standards</header><subparagraph id="id0292112833534b54973b2326b4326fb3"><enum>(A)</enum><header>In general</header><text>The Secretary shall issue TEVV standards for critical-impact artificial intelligence systems.</text></subparagraph><subparagraph id="idace8b3fbe13648f397a0ed293466ff23"><enum>(B)</enum><header>Requirements</header><text>Each standard issued under this subsection shall—</text><clause id="ideb82681287784001a5348ed1e38de89a"><enum>(i)</enum><text>be practicable;</text></clause><clause id="id2dc732a4d98d4d03b9a0e2b9d42c0d1b"><enum>(ii)</enum><text>meet the need for safe, secure, and transparent operations of critical-impact artificial intelligence systems;</text></clause><clause id="idcbce5bd5942e437aa7beab3281042ac1"><enum>(iii)</enum><text>with respect to a relevant standard issued by a non-governmental standards organization that is already in place, align with and be interoperable with that standard;</text></clause><clause id="id64cfad5935a14ff2a205900ac8cc2605"><enum>(iv)</enum><text>provide for a mechanism to, not less frequently than once every 2 years, solicit public comment and update the standard to reflect advancements in technology and system architecture; and</text></clause><clause id="idea722585444f47f4bf00ac8202e199b4"><enum>(v)</enum><text>be stated in objective terms.</text></clause></subparagraph></paragraph><paragraph id="id411ce22fdab449c8ae8ce5cd2480872d"><enum>(2)</enum><header>Considerations</header><text>In issuing TEVV standards for critical-impact artificial intelligence systems under this subsection, the Secretary shall—</text><subparagraph id="idb0cd9a3fdec749118611ef8a60101c47"><enum>(A)</enum><text>consider relevant available information concerning critical-impact artificial intelligence systems, including—</text><clause id="iddf8527f9547246869bbb14423f703f8c"><enum>(i)</enum><text>transparency reports submitted under section 203(a);</text></clause><clause id="id5824df92c3984dfbb351cdeebfd7b8fe"><enum>(ii)</enum><text>risk management assessments conducted under section 206(a); and</text></clause><clause id="id84a88bfec4ca49c1a42a34017569e474"><enum>(iii)</enum><text>any additional information provided to the Secretary pursuant to section 203(a)(1)(B); </text></clause></subparagraph><subparagraph id="id7be849e0d1fa4cf2b909d943c139770c"><enum>(B)</enum><text>consider whether a proposed standard is reasonable, practicable, and appropriate for the particular type of critical-impact artificial intelligence system for which the standard is proposed;</text></subparagraph><subparagraph id="id01902f78ed2a4b1883b87e83d4fb8714"><enum>(C)</enum><text>consult with relevant artificial intelligence stakeholders and review industry standards issued by nongovernmental standards organizations;</text></subparagraph><subparagraph id="id8dba1b46a350468288e4a4de4e077a54"><enum>(D)</enum><text>pursuant to paragraph (1)(B)(iii), consider whether adoption of a relevant standard issued by a nongovernmental standards organization as a TEVV standard is the most appropriate action; and</text></subparagraph><subparagraph id="id069695c992644bdcb2f26c532fb7438e"><enum>(E)</enum><text>consider whether the standard takes into account—</text><clause id="id8ef20d1912874ac0b52367d83e57f3a0"><enum>(i)</enum><text>transparent, replicable, and objective assessments of critical-impact artificial intelligence system risk, structure, capabilities, and design;</text></clause><clause id="id8602ef6060fc4a53b198d5dbe498faa4"><enum>(ii)</enum><text>the risk posed to the public by an applicable critical-impact artificial intelligence system; and</text></clause><clause id="ideb70c61c83e440318b8a952291ae72d9"><enum>(iii)</enum><text>the diversity of methodologies and innovative technologies and approaches available to meet the objectives of the standard. </text></clause></subparagraph></paragraph><paragraph id="ide571ac879e1b47a0bb6a02116968d73a"><enum>(3)</enum><header>Consultation</header><text>Before finalizing a TEVV standard issued under this subsection, the Secretary shall submit the TEVV standard to the advisory committee established under subsection (a) for review. </text></paragraph><paragraph id="id655c9ab95a2f481f8047abcbe29508b0"><enum>(4)</enum><header>Public comment</header><text>Before issuing any TEVV standard under this subsection, the Secretary shall provide an opportunity for public comment. </text></paragraph><paragraph id="idbe0de1e7190e4e5caa1c18ec92fa6b7e"><enum>(5)</enum><header>Cooperation</header><text>In developing a TEVV standard under this subsection, the Secretary may, as determined appropriate, advise, assist, and cooperate with departments, agencies, and instrumentalities of the Federal Government, States, and other public and private agencies.</text></paragraph><paragraph id="id73b7a34dea494d45b90e7e3797014d4d"><enum>(6)</enum><header>Effective date of standards</header><subparagraph commented="no" display-inline="no-display-inline" id="id548fd04b080043208a4f6ed22f51e49e"><enum>(A)</enum><header>In general</header><text display-inline="yes-display-inline">The Secretary shall specify the effective date of a TEVV standard issued under this subsection in the order issuing the standard.</text></subparagraph><subparagraph commented="no" display-inline="no-display-inline" id="id9ec0e9c930f144debd74c86f55fe55de"><enum>(B)</enum><header>Limitation</header><text display-inline="yes-display-inline">Subject to subparagraph (C), a TEVV standard issued under this subsection may not become effective—</text><clause commented="no" display-inline="no-display-inline" id="id42c032b33c72442c98a308ee40a5fa46"><enum>(i)</enum><text display-inline="yes-display-inline">during the 180-day period following the date on which the TEVV standard is issued; and</text></clause><clause commented="no" display-inline="no-display-inline" id="ida3688d5de3774794810955262d02bd34"><enum>(ii)</enum><text display-inline="yes-display-inline">more than 1 year after the date on which the TEVV standard is issued.</text></clause></subparagraph><subparagraph commented="no" display-inline="no-display-inline" id="id91428870f26044c78c29cbb10c52d292"><enum>(C)</enum><header>Exception</header><text display-inline="yes-display-inline">Subparagraph (B) shall not apply to the effective date of a TEVV standard issued under this section if the Secretary—</text><clause commented="no" display-inline="no-display-inline" id="id02c2819e4dfe4fe9b3130434ba00bd9f"><enum>(i)</enum><text display-inline="yes-display-inline">finds, for good cause shown, that a different effective date is in the public interest; and</text></clause><clause commented="no" display-inline="no-display-inline" id="idbc0b22c768fa49669b9748d59400ee78"><enum>(ii)</enum><text display-inline="yes-display-inline">publishes the reasons for the finding under clause (i).</text></clause></subparagraph></paragraph><paragraph id="id1b71234bb5bb48e3a0667c3fc67ea689"><enum>(7)</enum><header>Rule of construction</header><text>Nothing in this subsection shall be construed to authorize the Secretary to impose any requirements on or take any enforcement actions under this section or section 208 relating to a critical-impact AI organization before a TEVV standard relating to those requirements is prescribed.</text></paragraph></subsection><subsection id="idc21041ff0a5b4fa9addc3a87945ffaa4"><enum>(d)</enum><header>Exemptions</header><paragraph id="id47ac03781fed45bc9350fb7509fcb71e"><enum>(1)</enum><header>Authority to exempt and procedures</header><subparagraph id="id9e98b9fea2874eb68a124f87d40986a9"><enum>(A)</enum><header>In general</header><text>The Secretary may exempt, on a temporary basis, a critical-impact artificial intelligence system from a TEVV standard issued under subsection (c) on terms the Secretary considers appropriate.</text></subparagraph><subparagraph commented="no" display-inline="no-display-inline" id="id87cfa75a096f482c93b0bb4cc5d79233"><enum>(B)</enum><header>Renewal</header><text display-inline="yes-display-inline">An exemption under subparagraph (A)—</text><clause commented="no" display-inline="no-display-inline" id="id493a28b76b04475690117bfba2fa0e80"><enum>(i)</enum><text display-inline="yes-display-inline">may be renewed only on reapplication; and</text></clause><clause commented="no" display-inline="no-display-inline" id="idcd818033275845438e67ff31a2799643"><enum>(ii)</enum><text display-inline="yes-display-inline">shall conform to the requirements of this paragraph.</text></clause></subparagraph><subparagraph id="id7eb88f62d9a94d71a875f2810ad0db81" commented="no"><enum>(C)</enum><header>Proceedings</header><clause id="idcd77a46ff4d746c19de78ff552e1a0df"><enum>(i)</enum><header>In general</header><text>The Secretary may begin a proceeding to grant an exemption to a critical-impact artificial intelligence system under this paragraph if the critical-impact AI organization that deployed the critical-impact artificial intelligence systems applies for an exemption or a renewal of an exemption.</text></clause><clause commented="no" display-inline="no-display-inline" id="id8458563648854941a1529e83fa6f3f5f"><enum>(ii)</enum><header>Notice and comment</header><text display-inline="yes-display-inline">The Secretary shall publish notice of the application under clause (i) and provide an opportunity to comment.</text></clause><clause commented="no" display-inline="no-display-inline" id="idadd6aea5d7424f4bbb0e48ed2f074d99"><enum>(iii)</enum><header>Filing</header><text display-inline="yes-display-inline">An application for an exemption or for a renewal of an exemption under this paragraph shall be filed at such time and in such manner and contain such information as the Secretary may require.</text></clause></subparagraph><subparagraph id="id7519849472624c41be14057f7fa01ef1" commented="no"><enum>(D)</enum><header>Actions</header><text>The Secretary may grant an exemption under this paragraph upon finding that—</text><clause id="id61731c8c5fbe432289e69e9cd102189f" commented="no"><enum>(i)</enum><text>the exemption is consistent with the public interest and this section; and</text></clause><clause id="idfe7cbf3268bf44369090dd91c5771df1" commented="no"><enum>(ii)</enum><text>the exemption would facilitate the development or evaluation of a feature or characteristic of a critical-impact artificial intelligence system providing a safety and security level that is not less than the TEVV standard level. </text></clause></subparagraph></paragraph><paragraph id="id27d58aafda5c4d85abd5f156782337fd"><enum>(2)</enum><header>Disclosure</header><text>Not later than 30 days after the date on which an application is filed under this subsection, the Secretary may make public information contained in the application or relevant to the application, unless the information concerns or is related to a trade secret or other confidential information not relevant to the application.</text></paragraph><paragraph id="id42128c39de844d5e999182511ecd77c7"><enum>(3)</enum><header>Notice of decision</header><text>The Secretary shall publish in the Federal Register a notice of each decision granting or denying an exemption under this subsection and the reasons for granting or denying that exemption, including a justification with supporting information for the selected approach.</text></paragraph></subsection><subsection id="idcea23be3e85147e6b19f734e13390b52"><enum>(e)</enum><header>Self-Certification of compliance</header><paragraph id="id731c268760bb42a0acb91c3f99dbd150"><enum>(1)</enum><header>In general</header><text>Subject to paragraph (2), with respect to each critical-impact artificial intelligence system of a critical-impact AI organization, the critical-impact AI organization shall certify to the Secretary that the critical-impact artificial intelligence system complies with applicable TEVV standards issued under this section.</text></paragraph><paragraph id="id2aaedff44aa542faaf30d366c664842a"><enum>(2)</enum><header>Exception</header><text>A critical-impact AI organization may not issue a certificate under paragraph (1) if, in exercising reasonable care, the critical-impact AI organization has constructive knowledge that the certificate is false or misleading in a material respect.</text></paragraph></subsection><subsection id="idb44b528750584de0947a52a45685b13f"><enum>(f)</enum><header>Noncompliance findings and enforcement action</header><paragraph id="id961375c0548e43c6bfe3754acd03ef97"><enum>(1)</enum><header>Finding of noncompliance by secretary</header><text>Upon learning that a critical-impact artificial intelligence system deployed by a critical-impact AI organization does not comply with the requirements under this section, the Secretary shall—</text><subparagraph commented="no" display-inline="no-display-inline" id="id4bcf13ddb18a4db6be358c56b2917388"><enum>(A)</enum><text display-inline="yes-display-inline">immediately—</text><clause id="ida72cd70df8474e13896a5060be07ea10"><enum>(i)</enum><text>notify the critical-impact AI organization of the finding; and</text></clause><clause id="idea9f272c485c4b5dbd07d4190445091f"><enum>(ii)</enum><text>order the critical-impact AI organization to take remedial action to address the noncompliance of the artificial intelligence system; and</text></clause></subparagraph><subparagraph id="id213ef29be8a343638eff10830f3424c2"><enum>(B)</enum><text>may, as determined appropriate or necessary by the Secretary, and if the Secretary determines that actions taken by a critical-impact AI organization are insufficient to remedy the noncompliance of the critical-impact AI organization with this section, take enforcement action under section 208.</text></subparagraph></paragraph><paragraph id="id18ede82ca5e94510a127dab09fc87a39"><enum>(2)</enum><header>Actions by critical-impact AI organization</header><text>If a critical-impact AI organization finds that a critical-impact artificial intelligence system deployed by the critical-impact AI organization is noncompliant with an applicable TEVV standard issued under this section or the critical-impact AI organization is notified of noncompliance by the Secretary under paragraph (1)(A)(i), the critical-impact AI organization shall—</text><subparagraph id="id5c1464ff0a7a4cd79e007b7b82f097aa"><enum>(A)</enum><text>without undue delay, notify the Secretary by certified mail or electronic mail of the noncompliance or receipt of the notification of noncompliance;</text></subparagraph><subparagraph id="id67e5817e7b5f45a3845294d6a89eba38"><enum>(B)</enum><text>take remedial action to address the noncompliance; and</text></subparagraph><subparagraph id="id9854272c8ea043728dbf46cbe2d31898"><enum>(C)</enum><text>not later than 10 days after the date of the notification or receipt under subparagraph (A), submit to the Secretary a report containing information on—</text><clause id="id9c64d887bbb044cabe13f71a920ff3cb"><enum>(i)</enum><text>the nature and discovery of the noncompliant aspect of the critical-impact artificial intelligence system;</text></clause><clause id="id0c0a6273e6f44972a5aab69bdcf6922c"><enum>(ii)</enum><text>measures taken to remedy such noncompliance; and</text></clause><clause id="idf8ef3100081f45a88901dc976eb77eb3"><enum>(iii)</enum><text>actions taken by the critical-impact AI organization to address stakeholders affected by such noncompliance.</text></clause></subparagraph></paragraph></subsection></section><section commented="no" display-inline="no-display-inline" id="id2622ebcc0f82420da66bc7f25df639cb"><enum>208.</enum><header>Enforcement</header><subsection id="idb26803d89c214e7c9a5d60dace1068ab"><enum>(a)</enum><header>In general</header><text>Upon discovering noncompliance with a provision of this Act by a deployer of a high-impact artificial intelligence system or a critical-impact AI organization if the Secretary determines that actions taken by the critical-impact AI organization are insufficient to remedy the noncompliance, the Secretary shall take an action described in this section.</text></subsection><subsection id="id7102eb868c06447a82fb6e25a68b940c"><enum>(b)</enum><header>Civil penalties</header><paragraph id="idb4a607d0699542c7be90b8089cf46922"><enum>(1)</enum><header>In general</header><text>The Secretary may impose a penalty described in paragraph (2) on deployer of a high-impact artificial intelligence system or a critical-impact AI organization for each violation by that entity of this Act or any regulation or order issued under this Act.</text></paragraph><paragraph id="ida4eec6286b784804ac9fae32c83d9dbe"><enum>(2)</enum><header>Penalty described</header><text>The penalty described in this paragraph is the greater of—</text><subparagraph commented="no" display-inline="no-display-inline" id="id9b9a67de8e8747239189ad9298b45caf"><enum>(A)</enum><text display-inline="yes-display-inline">an amount not to exceed $300,000; or</text></subparagraph><subparagraph commented="no" display-inline="no-display-inline" id="id0a28063c53334d5da66ae8c7f13501a9"><enum>(B)</enum><text display-inline="yes-display-inline">an amount that is twice the value of the transaction that is the basis of the violation with respect to which the penalty is imposed.</text></subparagraph></paragraph></subsection><subsection id="id9a45f57f5a5d4330abb28c1d24d94ca1"><enum>(c)</enum><header>Violation with intent</header><paragraph commented="no" display-inline="no-display-inline" id="id011f9c9fc3bf44b585fd9258061c233c"><enum>(1)</enum><header>In general</header><text display-inline="yes-display-inline">If the Secretary determines that a deployer of a high-impact artificial intelligence system or a critical-impact AI organization intentionally violates this Act or any regulation or order issued under this Act, the Secretary may prohibit the critical-impact AI organization from deploying a critical-impact artificial intelligence system.</text></paragraph><paragraph commented="no" display-inline="no-display-inline" id="id79a9b47c04b942008beb352cb29c73f5"><enum>(2)</enum><header>In addition</header><text display-inline="yes-display-inline">A prohibition imposed under paragraph (1) shall be in addition to any other civil penalties provided under this Act.</text></paragraph></subsection><subsection id="id9c09f87cc1404c31adf0bbc60605d5b8"><enum>(d)</enum><header>Factors</header><text>The Secretary may by regulation provide standards for establishing levels of civil penalty under this section based upon factors such as the seriousness of the violation, the culpability of the violator, and such mitigating factors as the violator’s record of cooperation with the Secretary in disclosing the violation.</text></subsection><subsection id="id7e5cbb763c964bbbab0dc642592256d7"><enum>(e)</enum><header>Civil action</header><paragraph commented="no" display-inline="no-display-inline" id="id0ebe8b43541b430f963cff28ffb29662"><enum>(1)</enum><header>In general</header><text display-inline="yes-display-inline">Upon referral by the Secretary, the Attorney General may bring a civil action in a United States district court to—</text><subparagraph commented="no" display-inline="no-display-inline" id="idb320d2d5b97e402abf4f857f5a2c288d"><enum>(A)</enum><text display-inline="yes-display-inline">enjoin a violation of section 207; or</text></subparagraph><subparagraph commented="no" display-inline="no-display-inline" id="id56ed76192b3e491fa87f85556e0aebb4"><enum>(B)</enum><text display-inline="yes-display-inline">collect a civil penalty upon a finding of noncompliance with this Act.</text></subparagraph></paragraph><paragraph commented="no" display-inline="no-display-inline" id="idc95bf0d141b344cbaf65497a84a5c5fe"><enum>(2)</enum><header>Venue</header><text display-inline="yes-display-inline">A civil action may be brought under paragraph (1) in the judicial district in which the violation occurred or the defendant is found, resides, or does business.</text></paragraph><paragraph commented="no" display-inline="no-display-inline" id="id5356fd243acc4fd39f3d8e11ab989926"><enum>(3)</enum><header>Process</header><text display-inline="yes-display-inline">Process in a civil action under paragraph (1) may be served in any judicial district in which the defendant resides or is found.</text></paragraph></subsection><subsection id="idba2206b597904743afdd72094fb22b67"><enum>(f)</enum><header>Rule of construction</header><text>Nothing in this section shall be construed to require a developer of a critical-impact artificial intelligence system to disclose any information, including data or algorithms—</text><paragraph id="id3458e6d405b94d18ae5d465146e3b6f1"><enum>(1)</enum><text>relating to a trade secret or other protected intellectual property right;</text></paragraph><paragraph id="id70e06b4000cb4b29b931e86d4bda0d1d"><enum>(2)</enum><text>that is confidential business information; or</text></paragraph><paragraph id="iddbc7c63e392b4d20b23f8ba1d54e0a3d"><enum>(3)</enum><text>that is privileged.</text></paragraph></subsection></section><section id="id3b889107277248ddb6e0a390c25daa07"><enum>209.</enum><header>Artificial intelligence consumer education</header><subsection id="id5a46c1489dc746db9fb22f5167f7a4f7"><enum>(a)</enum><header>Establishment</header><text>Not later than 180 days after the date of enactment of this Act, the Secretary shall establish a working group relating to responsible education efforts for artificial intelligence systems.</text></subsection><subsection id="id9e0f76a5e63945b78e66074b9c2ec986"><enum>(b)</enum><header>Membership</header><paragraph id="id6079de1969714bfbbb5c715aab2c050c"><enum>(1)</enum><header>In general</header><text>The Secretary shall appoint to serve as members of the working group established under this section not more than 15 individuals with expertise relating to artificial intelligence systems, including—</text><subparagraph id="idac72d7aa0c6444d29041fe9c64712dac"><enum>(A)</enum><text>representatives of—</text><clause id="idaa681600722545849fbffd501dd5be98"><enum>(i)</enum><text>institutions of higher education;</text></clause><clause id="idad649e61fb3c4e14951abd7b8a5323c3"><enum>(ii)</enum><text>companies developing or operating artificial intelligence systems;</text></clause><clause id="id426e45df007f432fbef20fbf4794820b"><enum>(iii)</enum><text>consumers or consumer advocacy groups;</text></clause><clause id="id0d8bfcc887fd4017bd3947d0c4cab900"><enum>(iv)</enum><text>public health organizations;</text></clause><clause id="id83d85c6ab8614461a1f3b15eed71b951"><enum>(v)</enum><text>marketing professionals;</text></clause><clause id="id8b996e830cf04ed599977d74ba53f239"><enum>(vi)</enum><text>entities with national experience relating to consumer education, including technology education;</text></clause><clause id="id53a70145330f4087ac97788cf936988c"><enum>(vii)</enum><text>public safety organizations;</text></clause><clause commented="no" display-inline="no-display-inline" id="id61d100a536184e0193144f2dd5839b91"><enum>(viii)</enum><text>rural workforce development advocates;</text></clause><clause id="id1b9e8f1823144aed82c0725269eab88d"><enum>(ix)</enum><text>enabling technology companies; and</text></clause><clause commented="no" display-inline="no-display-inline" id="id10b4ff75048f495f9eb25b22625c2b75"><enum>(x)</enum><text>nonprofit technology industry trade associations; and</text></clause></subparagraph><subparagraph id="id362fdff91ef745b28f2c09cfbfd3ece3"><enum>(B)</enum><text>any other members the Secretary considers to be appropriate.</text></subparagraph></paragraph><paragraph id="id05eae745ef754d03aa1787fef19d155b"><enum>(2)</enum><header>Compensation</header><text>A member of the working group established under this section shall serve without compensation.</text></paragraph></subsection><subsection id="id3461518a5f6b40358e65f3d93cdea8bc"><enum>(c)</enum><header>Duties</header><paragraph id="id4c3fa182da604ca089db9a9f80be368c"><enum>(1)</enum><header>In general</header><text>The working group established under this section shall—</text><subparagraph id="id8179d9e2a43048ae85c97e05481c59c2"><enum>(A)</enum><text>identify recommended education and programs that may be voluntarily employed by industry to inform—</text><clause id="id32e4591395ea4897be14c98681701948"><enum>(i)</enum><text>consumers and other stakeholders with respect to artificial intelligence systems as those systems—</text><subclause id="id33b1c0f9328f41fab2d2c4a13b844990"><enum>(I)</enum><text>become available; or</text></subclause><subclause id="id466e3679584941e9adb2e09c3b6a8f9b"><enum>(II)</enum><text>are soon to be made widely available for public use or consumption; and</text></subclause></clause></subparagraph><subparagraph id="idea5003cbb7e4461d9f3aedd9975637a2"><enum>(B)</enum><text>submit to Congress, and make available to the public, a report containing the findings and recommendations under subparagraph (A).</text></subparagraph></paragraph><paragraph id="idf900bd1e0512449c8f13091dbac4ea26"><enum>(2)</enum><header>Factors for consideration</header><text>The working group established under this section shall take into consideration topics relating to—</text><subparagraph id="id5af7f31722224ab08ee5145c59030175"><enum>(A)</enum><text>the intent, capabilities, and limitations of artificial intelligence systems;</text></subparagraph><subparagraph id="idf4b73c0b0c794bf48fdf304312568744"><enum>(B)</enum><text>use cases of artificial intelligence applications that improve lives of the people of the United States, such as improving government efficiency, filling critical roles, and reducing mundane work tasks;</text></subparagraph><subparagraph id="id206f4d77a69949a7894bcd831b237096"><enum>(C)</enum><text>artificial intelligence research breakthroughs;</text></subparagraph><subparagraph id="idfdcc20087e2044aa883bdb0f10969cf8"><enum>(D)</enum><text>engagement and interaction methods, including how to adequately inform consumers of interaction with an artificial intelligence system;</text></subparagraph><subparagraph id="id19fa42c1e116417f94bc5886b1a4441d"><enum>(E)</enum><text>human-machine interfaces;</text></subparagraph><subparagraph id="idcc1dd3c6ff4c4ad191d9d3ce35d7fa11"><enum>(F)</enum><text>emergency fallback scenarios;</text></subparagraph><subparagraph id="id686f4b50dc8a48acbdef562b0605e684"><enum>(G)</enum><text>operational boundary responsibilities;</text></subparagraph><subparagraph id="id2b72eb43cdb94008b05ba792c441e2bd"><enum>(H)</enum><text>potential mechanisms that could change function behavior in service; and</text></subparagraph><subparagraph id="id7218188eb1fd47ab8505374d44ec80a4"><enum>(I)</enum><text>consistent nomenclature and taxonomy for safety features and systems.</text></subparagraph></paragraph><paragraph id="idfe754c90d04345508ca24a79bd2021fa"><enum>(3)</enum><header>Consultation</header><text>The Secretary shall consult with the Chair of the Federal Trade Commission with respect to the recommendations of the working group established under this section, as appropriate.</text></paragraph></subsection><subsection id="ide1ec64823faa41f0a674b5e8c82e9d31" commented="no" display-inline="no-display-inline"><enum>(d)</enum><header>Termination</header><text>The working group established under this section shall terminate on the date that is 2 years after the date of enactment of this Act.</text></subsection></section></title></legis-body></bill> 

